Merge remote-tracking branch 'remotes/origin/brillo/linux-3.10.y' into Brillo-20170221-3.10_kernel

Conflicts:
	crypto/blkcipher.c
	drivers/platform/x86/intel_scu_ipcutil.c
	drivers/usb/gadget/u_ether.c
	fs/ext4/extents.c
	net/bluetooth/sco.c
	net/wireless/scan.c

Change-Id: Iaea6d22acb3da6b0477d71cf546858d35295e3d2
diff --git a/.gitignore b/.gitignore
index 3b8b9b3..3557999 100644
--- a/.gitignore
+++ b/.gitignore
@@ -64,11 +64,11 @@
 arch/*/include/generated
 
 # stgit generated dirs
-patches-*
+#patches-*
 
 # quilt's files
-patches
-series
+#patches
+#series
 
 # cscope files
 cscope.*
diff --git a/Documentation/ABI/testing/debugfs-aufs b/Documentation/ABI/testing/debugfs-aufs
new file mode 100644
index 0000000..a58f0d0
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-aufs
@@ -0,0 +1,50 @@
+What:		/debug/aufs/si_<id>/
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+		Under /debug/aufs, a directory named si_<id> is created
+		per aufs mount, where <id> is a unique id generated
+		internally.
+
+What:		/debug/aufs/si_<id>/plink
+Date:		Apr 2013
+Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+		It has three lines and shows the information about the
+		pseudo-link. The first line is a single number
+		representing a number of buckets. The second line is a
+		number of pseudo-links per buckets (separated by a
+		blank). The last line is a single number representing a
+		total number of psedo-links.
+		When the aufs mount option 'noplink' is specified, it
+		will show "1\n0\n0\n".
+
+What:		/debug/aufs/si_<id>/xib
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+		It shows the consumed blocks by xib (External Inode Number
+		Bitmap), its block size and file size.
+		When the aufs mount option 'noxino' is specified, it
+		will be empty. About XINO files, see the aufs manual.
+
+What:		/debug/aufs/si_<id>/xino0, xino1 ... xinoN
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+		It shows the consumed blocks by xino (External Inode Number
+		Translation Table), its link count, block size and file
+		size.
+		When the aufs mount option 'noxino' is specified, it
+		will be empty. About XINO files, see the aufs manual.
+
+What:		/debug/aufs/si_<id>/xigen
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+		It shows the consumed blocks by xigen (External Inode
+		Generation Table), its block size and file size.
+		If CONFIG_AUFS_EXPORT is disabled, this entry will not
+		be created.
+		When the aufs mount option 'noxino' is specified, it
+		will be empty. About XINO files, see the aufs manual.
diff --git a/Documentation/ABI/testing/sysfs-aufs b/Documentation/ABI/testing/sysfs-aufs
new file mode 100644
index 0000000..7af6dc0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-aufs
@@ -0,0 +1,24 @@
+What:		/sys/fs/aufs/si_<id>/
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+		Under /sys/fs/aufs, a directory named si_<id> is created
+		per aufs mount, where <id> is a unique id generated
+		internally.
+
+What:		/sys/fs/aufs/si_<id>/br0, br1 ... brN
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+		It shows the abolute path of a member directory (which
+		is called branch) in aufs, and its permission.
+
+What:		/sys/fs/aufs/si_<id>/xi_path
+Date:		March 2009
+Contact:	J. R. Okajima <hooanon05@yahoo.co.jp>
+Description:
+		It shows the abolute path of XINO (External Inode Number
+		Bitmap, Translation Table and Generation Table) file
+		even if it is the default path.
+		When the aufs mount option 'noxino' is specified, it
+		will be empty. About XINO files, see the aufs manual.
diff --git a/Documentation/ABI/testing/sysfs-class-dual-role-usb b/Documentation/ABI/testing/sysfs-class-dual-role-usb
new file mode 100644
index 0000000..a900fd7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-dual-role-usb
@@ -0,0 +1,71 @@
+What:		/sys/class/dual_role_usb/.../
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		Provide a generic interface to monitor and change
+		the state of dual role usb ports. The name here
+		refers to the name mentioned in the
+		dual_role_phy_desc that is passed while registering
+		the dual_role_phy_intstance through
+		devm_dual_role_instance_register.
+
+What:           /sys/class/dual_role_usb/.../supported_modes
+Date:           June 2015
+Contact:        Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		This is a static node, once initialized this
+		is not expected to change during runtime. "dfp"
+		refers to "downstream facing port" i.e. port can
+		only act as host. "ufp" refers to "upstream
+		facing port" i.e. port can only act as device.
+		"dfp ufp" refers to "dual role port" i.e. the port
+		can either be a host port or a device port.
+
+What:		/sys/class/dual_role_usb/.../mode
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		The mode node refers to the current mode in which the
+		port is operating. "dfp" for host ports. "ufp" for device
+		ports and "none" when cable is not connected.
+
+		On devices where the USB mode is software-controllable,
+		userspace can change the mode by writing "dfp" or "ufp".
+		On devices where the USB mode is fixed in hardware,
+		this attribute is read-only.
+
+What:		/sys/class/dual_role_usb/.../power_role
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		The power_role node mentions whether the port
+		is "sink"ing or "source"ing power. "none" if
+		they are not connected.
+
+		On devices implementing USB Power Delivery,
+		userspace can control the power role by writing "sink" or
+		"source". On devices without USB-PD, this attribute is
+		read-only.
+
+What:		/sys/class/dual_role_usb/.../data_role
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		The data_role node mentions whether the port
+		is acting as "host" or "device" for USB data connection.
+		"none" if there is no active data link.
+
+		On devices implementing USB Power Delivery, userspace
+		can control the data role by writing "host" or "device".
+		On devices without USB-PD, this attribute is read-only
+
+What:		/sys/class/dual_role_usb/.../powers_vconn
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		The powers_vconn node mentions whether the port
+		is supplying power for VCONN pin.
+
+		On devices with software control of VCONN,
+		userspace can disable the power supply to VCONN by writing "n",
+		or enable the power supply by writing "y".
diff --git a/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
new file mode 100644
index 0000000..acb19b9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
@@ -0,0 +1,16 @@
+What:		/sys/kernel/wakeup_reasons/last_resume_reason
+Date:		February 2014
+Contact:	Ruchi Kandoi <kandoiruchi@google.com>
+Description:
+		The /sys/kernel/wakeup_reasons/last_resume_reason is
+		used to report wakeup reasons after system exited suspend.
+
+What:		/sys/kernel/wakeup_reasons/last_suspend_time
+Date:		March 2015
+Contact:	jinqian <jinqian@google.com>
+Description:
+		The /sys/kernel/wakeup_reasons/last_suspend_time is
+		used to report time spent in last suspend cycle. It contains
+		two numbers (in seconds) separated by space. First number is
+		the time spent in suspend and resume processes. Second number
+		is the time spent in sleep state.
\ No newline at end of file
diff --git a/Documentation/android.txt b/Documentation/android.txt
new file mode 100644
index 0000000..0f40a78
--- /dev/null
+++ b/Documentation/android.txt
@@ -0,0 +1,121 @@
+				=============
+				A N D R O I D
+				=============
+
+Copyright (C) 2009 Google, Inc.
+Written by Mike Chan <mike@android.com>
+
+CONTENTS:
+---------
+
+1. Android
+  1.1 Required enabled config options
+  1.2 Required disabled config options
+  1.3 Recommended enabled config options
+2. Contact
+
+
+1. Android
+==========
+
+Android (www.android.com) is an open source operating system for mobile devices.
+This document describes configurations needed to run the Android framework on
+top of the Linux kernel.
+
+To see a working defconfig look at msm_defconfig or goldfish_defconfig
+which can be found at http://android.git.kernel.org in kernel/common.git
+and kernel/msm.git
+
+
+1.1 Required enabled config options
+-----------------------------------
+After building a standard defconfig, ensure that these options are enabled in
+your .config or defconfig if they are not already. Based off the msm_defconfig.
+You should keep the rest of the default options enabled in the defconfig
+unless you know what you are doing.
+
+ANDROID_PARANOID_NETWORK
+ASHMEM
+CONFIG_FB_MODE_HELPERS
+CONFIG_FONT_8x16
+CONFIG_FONT_8x8
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+DAB
+EARLYSUSPEND
+FB
+FB_CFB_COPYAREA
+FB_CFB_FILLRECT
+FB_CFB_IMAGEBLIT
+FB_DEFERRED_IO
+FB_TILEBLITTING
+HIGH_RES_TIMERS
+INOTIFY
+INOTIFY_USER
+INPUT_EVDEV
+INPUT_GPIO
+INPUT_MISC
+LEDS_CLASS
+LEDS_GPIO
+LOCK_KERNEL
+LkOGGER
+LOW_MEMORY_KILLER
+MISC_DEVICES
+NEW_LEDS
+NO_HZ
+POWER_SUPPLY
+PREEMPT
+RAMFS
+RTC_CLASS
+RTC_LIB
+SWITCH
+SWITCH_GPIO
+TMPFS
+UID_STAT
+UID16
+USB_FUNCTION
+USB_FUNCTION_ADB
+USER_WAKELOCK
+VIDEO_OUTPUT_CONTROL
+WAKELOCK
+YAFFS_AUTO_YAFFS2
+YAFFS_FS
+YAFFS_YAFFS1
+YAFFS_YAFFS2
+
+
+1.2 Required disabled config options
+------------------------------------
+CONFIG_YAFFS_DISABLE_LAZY_LOAD
+DNOTIFY
+
+
+1.3 Recommended enabled config options
+------------------------------
+ANDROID_PMEM
+PSTORE_CONSOLE
+PSTORE_RAM
+SCHEDSTATS
+DEBUG_PREEMPT
+DEBUG_MUTEXES
+DEBUG_SPINLOCK_SLEEP
+DEBUG_INFO
+FRAME_POINTER
+CPU_FREQ
+CPU_FREQ_TABLE
+CPU_FREQ_DEFAULT_GOV_ONDEMAND
+CPU_FREQ_GOV_ONDEMAND
+CRC_CCITT
+EMBEDDED
+INPUT_TOUCHSCREEN
+I2C
+I2C_BOARDINFO
+LOG_BUF_SHIFT=17
+SERIAL_CORE
+SERIAL_CORE_CONSOLE
+
+
+2. Contact
+==========
+website: http://android.git.kernel.org
+
+mailing-lists: android-kernel@googlegroups.com
diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt
new file mode 100644
index 0000000..d9995f1
--- /dev/null
+++ b/Documentation/arm64/tagged-pointers.txt
@@ -0,0 +1,34 @@
+		Tagged virtual addresses in AArch64 Linux
+		=========================================
+
+Author: Will Deacon <will.deacon@arm.com>
+Date  : 12 June 2013
+
+This document briefly describes the provision of tagged virtual
+addresses in the AArch64 translation system and their potential uses
+in AArch64 Linux.
+
+The kernel configures the translation tables so that translations made
+via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of
+the virtual address ignored by the translation hardware. This frees up
+this byte for application use, with the following caveats:
+
+	(1) The kernel requires that all user addresses passed to EL1
+	    are tagged with tag 0x00. This means that any syscall
+	    parameters containing user virtual addresses *must* have
+	    their top byte cleared before trapping to the kernel.
+
+	(2) Non-zero tags are not preserved when delivering signals.
+	    This means that signal handlers in applications making use
+	    of tags cannot rely on the tag information for user virtual
+	    addresses being maintained for fields inside siginfo_t.
+	    One exception to this rule is for signals raised in response
+	    to watchpoint debug exceptions, where the tag information
+	    will be preserved.
+
+	(3) Special care should be taken when using tagged pointers,
+	    since it is likely that C compilers will not hazard two
+	    virtual addresses differing only in the upper byte.
+
+The architecture prevents the use of a tagged PC, so the upper byte will
+be set to a sign-extension of bit 55 on exception return.
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 638bf17..61dc0ec 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -598,6 +598,15 @@
 be called for a newly-created cgroup if an error occurs after this
 subsystem's create() method has been called for the new cgroup).
 
+int allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+(cgroup_mutex held by caller)
+
+Called prior to moving a task into a cgroup; if the subsystem
+returns an error, this will abort the attach operation.  Used
+to extend the permission checks - if all subsystems in a cgroup
+return 0, the attach will be allowed to proceed, even if the
+default permission check (root or same user) fails.
+
 int can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 (cgroup_mutex held by caller)
 
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 219970b..875eecb 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -28,6 +28,7 @@
 2.3  Userspace
 2.4  Ondemand
 2.5  Conservative
+2.6  Interactive
 
 3.   The Governor Interface in the CPUfreq Core
 
@@ -218,6 +219,90 @@
 speed. Load for frequency increase is still evaluated every
 sampling rate.
 
+2.6 Interactive
+---------------
+
+The CPUfreq governor "interactive" is designed for latency-sensitive,
+interactive workloads. This governor sets the CPU speed depending on
+usage, similar to "ondemand" and "conservative" governors, but with a
+different set of configurable behaviors.
+
+The tuneable values for this governor are:
+
+target_loads: CPU load values used to adjust speed to influence the
+current CPU load toward that value.  In general, the lower the target
+load, the more often the governor will raise CPU speeds to bring load
+below the target.  The format is a single target load, optionally
+followed by pairs of CPU speeds and CPU loads to target at or above
+those speeds.  Colons can be used between the speeds and associated
+target loads for readability.  For example:
+
+   85 1000000:90 1700000:99
+
+targets CPU load 85% below speed 1GHz, 90% at or above 1GHz, until
+1.7GHz and above, at which load 99% is targeted.  If speeds are
+specified these must appear in ascending order.  Higher target load
+values are typically specified for higher speeds, that is, target load
+values also usually appear in an ascending order. The default is
+target load 90% for all speeds.
+
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. Default is 80000 uS.
+
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load.  If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher.  Default is the maximum speed
+allowed by the policy at governor initialization time.
+
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
+
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
+The format is a single delay value, optionally followed by pairs of
+CPU speeds and the delay to use at or above those speeds.  Colons can
+be used between the speeds and associated delays for readability.  For
+example:
+
+   80000 1300000:200000 1500000:40000
+
+uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
+200000 uS is used until speed 1.5 GHz, at which speed (and above)
+delay 40000 uS is used.  If speeds are specified these must appear in
+ascending order.  Default is 20000 uS.
+
+timer_rate: Sample rate for reevaluating CPU load when the CPU is not
+idle.  A deferrable timer is used, such that the CPU will not be woken
+from idle to service this timer until something else needs to run.
+(The maximum time to allow deferring this timer when not running at
+minimum speed is configurable via timer_slack.)  Default is 20000 uS.
+
+timer_slack: Maximum additional time to defer handling the governor
+sampling timer beyond timer_rate when running at speeds above the
+minimum.  For platforms that consume additional power at idle when
+CPUs are running at speeds greater than minimum, this places an upper
+bound on how long the timer will be deferred prior to re-evaluating
+load and dropping speed.  For example, if timer_rate is 20000uS and
+timer_slack is 10000uS then timers will be deferred for up to 30msec
+when not at lowest speed.  A value of -1 means defer timers
+indefinitely at all speeds.  Default is 80000 uS.
+
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute.  If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
+
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual.  Default is 80000 uS.
+
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
diff --git a/Documentation/device-mapper/verity.txt b/Documentation/device-mapper/verity.txt
index 9884681..2929f6b 100644
--- a/Documentation/device-mapper/verity.txt
+++ b/Documentation/device-mapper/verity.txt
@@ -10,7 +10,7 @@
     <version> <dev> <hash_dev>
     <data_block_size> <hash_block_size>
     <num_data_blocks> <hash_start_block>
-    <algorithm> <digest> <salt>
+    <algorithm> <digest> <salt> <mode>
 
 <version>
     This is the type of the on-disk hash format.
@@ -62,6 +62,16 @@
 <salt>
     The hexadecimal encoding of the salt value.
 
+<mode>
+    Optional. The mode of operation.
+
+    0 is the normal mode of operation where a corrupted block will result in an
+      I/O error.
+
+    1 is logging mode where corrupted blocks are logged and a uevent is sent to
+      notify user space.
+
+
 Theory of operation
 ===================
 
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index 85aada2..458b57f 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -28,6 +28,7 @@
 - cap-mmc-highspeed: MMC high-speed timing is supported
 - cap-power-off-card: powering off the card is safe
 - cap-sdio-irq: enable SDIO IRQ signalling on this interface
+- full-pwr-cycle: full power cycle of the card is supported
 
 *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
 polarity properties, we have to fix the meaning of the "normal" and "inverted"
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 0706d32..cbbac3f 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -189,7 +189,7 @@
 				loff_t pos, unsigned len, unsigned copied,
 				struct page *page, void *fsdata);
 	sector_t (*bmap)(struct address_space *, sector_t);
-	int (*invalidatepage) (struct page *, unsigned long);
+	void (*invalidatepage) (struct page *, unsigned int, unsigned int);
 	int (*releasepage) (struct page *, int);
 	void (*freepage)(struct page *);
 	int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@@ -310,8 +310,8 @@
 keep it that way and don't breed new callers.
 
 	->invalidatepage() is called when the filesystem must attempt to drop
-some or all of the buffers from the page when it is being truncated.  It
-returns zero on success.  If ->invalidatepage is zero, the kernel uses
+some or all of the buffers from the page when it is being truncated. It
+returns zero on success. If ->invalidatepage is zero, the kernel uses
 block_invalidatepage() instead.
 
 	->releasepage() is called when the kernel is about to try to drop the
diff --git a/Documentation/filesystems/aufs/README b/Documentation/filesystems/aufs/README
new file mode 100644
index 0000000..617f81a
--- /dev/null
+++ b/Documentation/filesystems/aufs/README
@@ -0,0 +1,346 @@
+
+Aufs3 -- advanced multi layered unification filesystem version 3.x
+http://aufs.sf.net
+Junjiro R. Okajima
+
+
+0. Introduction
+----------------------------------------
+In the early days, aufs was entirely re-designed and re-implemented
+Unionfs Version 1.x series. After many original ideas, approaches,
+improvements and implementations, it becomes totally different from
+Unionfs while keeping the basic features.
+Recently, Unionfs Version 2.x series begin taking some of the same
+approaches to aufs1's.
+Unionfs is being developed by Professor Erez Zadok at Stony Brook
+University and his team.
+
+Aufs3 supports linux-3.0 and later.
+If you want older kernel version support, try aufs2-2.6.git or
+aufs2-standalone.git repository, aufs1 from CVS on SourceForge.
+
+Note: it becomes clear that "Aufs was rejected. Let's give it up."
+According to Christoph Hellwig, linux rejects all union-type filesystems
+but UnionMount.
+<http://marc.info/?l=linux-kernel&m=123938533724484&w=2>
+
+
+1. Features
+----------------------------------------
+- unite several directories into a single virtual filesystem. The member
+  directory is called as a branch.
+- you can specify the permission flags to the branch, which are 'readonly',
+  'readwrite' and 'whiteout-able.'
+- by upper writable branch, internal copyup and whiteout, files/dirs on
+  readonly branch are modifiable logically.
+- dynamic branch manipulation, add, del.
+- etc...
+
+Also there are many enhancements in aufs1, such as:
+- readdir(3) in userspace.
+- keep inode number by external inode number table
+- keep the timestamps of file/dir in internal copyup operation
+- seekable directory, supporting NFS readdir.
+- whiteout is hardlinked in order to reduce the consumption of inodes
+  on branch
+- do not copyup, nor create a whiteout when it is unnecessary
+- revert a single systemcall when an error occurs in aufs
+- remount interface instead of ioctl
+- maintain /etc/mtab by an external command, /sbin/mount.aufs.
+- loopback mounted filesystem as a branch
+- kernel thread for removing the dir who has a plenty of whiteouts
+- support copyup sparse file (a file which has a 'hole' in it)
+- default permission flags for branches
+- selectable permission flags for ro branch, whether whiteout can
+  exist or not
+- export via NFS.
+- support <sysfs>/fs/aufs and <debugfs>/aufs.
+- support multiple writable branches, some policies to select one
+  among multiple writable branches.
+- a new semantics for link(2) and rename(2) to support multiple
+  writable branches.
+- no glibc changes are required.
+- pseudo hardlink (hardlink over branches)
+- allow a direct access manually to a file on branch, e.g. bypassing aufs.
+  including NFS or remote filesystem branch.
+- userspace wrapper for pathconf(3)/fpathconf(3) with _PC_LINK_MAX.
+- and more...
+
+Currently these features are dropped temporary from aufs3.
+See design/08plan.txt in detail.
+- test only the highest one for the directory permission (dirperm1)
+- copyup on open (coo=)
+- nested mount, i.e. aufs as readonly no-whiteout branch of another aufs
+  (robr)
+- statistics of aufs thread (/sys/fs/aufs/stat)
+- delegation mode (dlgt)
+  a delegation of the internal branch access to support task I/O
+  accounting, which also supports Linux Security Modules (LSM) mainly
+  for Suse AppArmor.
+- intent.open/create (file open in a single lookup)
+
+Features or just an idea in the future (see also design/*.txt),
+- reorder the branch index without del/re-add.
+- permanent xino files for NFSD
+- an option for refreshing the opened files after add/del branches
+- 'move' policy for copy-up between two writable branches, after
+  checking free space.
+- light version, without branch manipulation. (unnecessary?)
+- copyup in userspace
+- inotify in userspace
+- readv/writev
+- xattr, acl
+
+
+2. Download
+----------------------------------------
+There were three GIT trees for aufs3, aufs3-linux.git,
+aufs3-standalone.git, and aufs-util.git. Note that there is no "3" in
+"aufs-util.git."
+While the aufs-util is always necessary, you need either of aufs3-linux
+or aufs3-standalone.
+
+The aufs3-linux tree includes the whole linux mainline GIT tree,
+git://git.kernel.org/.../torvalds/linux.git.
+And you cannot select CONFIG_AUFS_FS=m for this version, eg. you cannot
+build aufs3 as an external kernel module.
+
+On the other hand, the aufs3-standalone tree has only aufs source files
+and necessary patches, and you can select CONFIG_AUFS_FS=m.
+
+You will find GIT branches whose name is in form of "aufs3.x" where "x"
+represents the linux kernel version, "linux-3.x". For instance,
+"aufs3.0" is for linux-3.0. For latest "linux-3.x-rcN", use
+"aufs3.x-rcN" branch.
+
+o aufs3-linux tree
+$ git clone --reference /your/linux/git/tree \
+	git://git.code.sf.net/p/aufs/aufs3-linux aufs-aufs3-linux \
+	aufs3-linux.git
+- if you don't have linux GIT tree, then remove "--reference ..."
+$ cd aufs3-linux.git
+$ git checkout origin/aufs3.0
+
+o aufs3-standalone tree
+$ git clone git://git.code.sf.net/p/aufs/aufs3-standalone \
+	aufs3-standalone.git
+$ cd aufs3-standalone.git
+$ git checkout origin/aufs3.0
+
+o aufs-util tree
+$ git clone git://git.code.sf.net/p/aufs/aufs-util \
+	aufs-util.git
+$ cd aufs-util.git
+$ git checkout origin/aufs3.0
+
+Note: The 3.x-rcN branch is to be used with `rc' kernel versions ONLY.
+The minor version number, 'x' in '3.x', of aufs may not always
+follow the minor version number of the kernel.
+Because changes in the kernel that cause the use of a new
+minor version number do not always require changes to aufs-util.
+
+Since aufs-util has its own minor version number, you may not be
+able to find a GIT branch in aufs-util for your kernel's
+exact minor version number.
+In this case, you should git-checkout the branch for the
+nearest lower number.
+
+For (an unreleased) example:
+If you are using "linux-3.10" and the "aufs3.10" branch
+does not exist in aufs-util repository, then "aufs3.9", "aufs3.8"
+or something numerically smaller is the branch for your kernel.
+
+Also you can view all branches by
+	$ git branch -a
+
+
+3. Configuration and Compilation
+----------------------------------------
+Make sure you have git-checkout'ed the correct branch.
+
+For aufs3-linux tree,
+- enable CONFIG_AUFS_FS.
+- set other aufs configurations if necessary.
+
+For aufs3-standalone tree,
+There are several ways to build.
+
+1.
+- apply ./aufs3-kbuild.patch to your kernel source files.
+- apply ./aufs3-base.patch too.
+- apply ./aufs3-proc_map.patch too, if you want to make /proc/PID/maps (and
+  others including lsof(1)) show the file path on aufs instead of the
+  path on the branch fs.
+- apply ./aufs3-standalone.patch too, if you have a plan to set
+  CONFIG_AUFS_FS=m. otherwise you don't need ./aufs3-standalone.patch.
+- copy ./{Documentation,fs,include/uapi/linux/aufs_type.h} files to your
+  kernel source tree. Never copy $PWD/include/uapi/linux/Kbuild.
+- enable CONFIG_AUFS_FS, you can select either
+  =m or =y.
+- and build your kernel as usual.
+- install the built kernel.
+  Note: Since linux-3.9, every filesystem module requires an alias
+  "fs-<fsname>". You should make sure that "fs-aufs" is listed in your
+  modules.aliases file if you set CONFIG_AUFS_FS=m.
+- install the header files too by "make headers_install" to the
+  directory where you specify. By default, it is $PWD/usr.
+  "make help" shows a brief note for headers_install.
+- and reboot your system.
+
+2.
+- module only (CONFIG_AUFS_FS=m).
+- apply ./aufs3-base.patch to your kernel source files.
+- apply ./aufs3-proc_map.patch too to your kernel source files,
+  if you want to make /proc/PID/maps (and others including lsof(1)) show
+  the file path on aufs instead of the path on the branch fs.
+- apply ./aufs3-standalone.patch too.
+- build your kernel, don't forget "make headers_install", and reboot.
+- edit ./config.mk and set other aufs configurations if necessary.
+  Note: You should read $PWD/fs/aufs/Kconfig carefully which describes
+  every aufs configurations.
+- build the module by simple "make".
+  Note: Since linux-3.9, every filesystem module requires an alias
+  "fs-<fsname>". You should make sure that "fs-aufs" is listed in your
+  modules.aliases file.
+- you can specify ${KDIR} make variable which points to your kernel
+  source tree.
+- install the files
+  + run "make install" to install the aufs module, or copy the built
+    $PWD/aufs.ko to /lib/modules/... and run depmod -a (or reboot simply).
+  + run "make install_headers" (instead of headers_install) to install
+    the modified aufs header file (you can specify DESTDIR which is
+    available in aufs standalone version's Makefile only), or copy
+    $PWD/usr/include/linux/aufs_type.h to /usr/include/linux or wherever
+    you like manually. By default, the target directory is $PWD/usr.
+- no need to apply aufs3-kbuild.patch, nor copying source files to your
+  kernel source tree.
+
+Note: The header file aufs_type.h is necessary to build aufs-util
+      as well as "make headers_install" in the kernel source tree.
+      headers_install is subject to be forgotten, but it is essentially
+      necessary, not only for building aufs-util.
+      You may not meet problems without headers_install in some older
+      version though.
+
+And then,
+- read README in aufs-util, build and install it
+- note that your distribution may contain an obsoleted version of
+  aufs_type.h in /usr/include/linux or something. When you build aufs
+  utilities, make sure that your compiler refers the correct aufs header
+  file which is built by "make headers_install."
+- if you want to use readdir(3) in userspace or pathconf(3) wrapper,
+  then run "make install_ulib" too. And refer to the aufs manual in
+  detail.
+
+
+4. Usage
+----------------------------------------
+At first, make sure aufs-util are installed, and please read the aufs
+manual, aufs.5 in aufs-util.git tree.
+$ man -l aufs.5
+
+And then,
+$ mkdir /tmp/rw /tmp/aufs
+# mount -t aufs -o br=/tmp/rw:${HOME} none /tmp/aufs
+
+Here is another example. The result is equivalent.
+# mount -t aufs -o br=/tmp/rw=rw:${HOME}=ro none /tmp/aufs
+  Or
+# mount -t aufs -o br:/tmp/rw none /tmp/aufs
+# mount -o remount,append:${HOME} /tmp/aufs
+
+Then, you can see whole tree of your home dir through /tmp/aufs. If
+you modify a file under /tmp/aufs, the one on your home directory is
+not affected, instead the same named file will be newly created under
+/tmp/rw. And all of your modification to a file will be applied to
+the one under /tmp/rw. This is called the file based Copy on Write
+(COW) method.
+Aufs mount options are described in aufs.5.
+If you run chroot or something and make your aufs as a root directory,
+then you need to customize the shutdown script. See the aufs manual in
+detail.
+
+Additionally, there are some sample usages of aufs which are a
+diskless system with network booting, and LiveCD over NFS.
+See sample dir in CVS tree on SourceForge.
+
+
+5. Contact
+----------------------------------------
+When you have any problems or strange behaviour in aufs, please let me
+know with:
+- /proc/mounts (instead of the output of mount(8))
+- /sys/module/aufs/*
+- /sys/fs/aufs/* (if you have them)
+- /debug/aufs/* (if you have them)
+- linux kernel version
+  if your kernel is not plain, for example modified by distributor,
+  the url where i can download its source is necessary too.
+- aufs version which was printed at loading the module or booting the
+  system, instead of the date you downloaded.
+- configuration (define/undefine CONFIG_AUFS_xxx)
+- kernel configuration or /proc/config.gz (if you have it)
+- behaviour which you think to be incorrect
+- actual operation, reproducible one is better
+- mailto: aufs-users at lists.sourceforge.net
+
+Usually, I don't watch the Public Areas(Bugs, Support Requests, Patches,
+and Feature Requests) on SourceForge. Please join and write to
+aufs-users ML.
+
+
+6. Acknowledgements
+----------------------------------------
+Thanks to everyone who have tried and are using aufs, whoever
+have reported a bug or any feedback.
+
+Especially donators:
+Tomas Matejicek(slax.org) made a donation (much more than once).
+	Since Apr 2010, Tomas M (the author of Slax and Linux Live
+	scripts) is making "doubling" donations.
+	Unfortunately I cannot list all of the donators, but I really
+	appreciate.
+	It ends Aug 2010, but the ordinary donation URL is still available.
+	<http://sourceforge.net/donate/index.php?group_id=167503>
+Dai Itasaka made a donation (2007/8).
+Chuck Smith made a donation (2008/4, 10 and 12).
+Henk Schoneveld made a donation (2008/9).
+Chih-Wei Huang, ASUS, CTC donated Eee PC 4G (2008/10).
+Francois Dupoux made a donation (2008/11).
+Bruno Cesar Ribas and Luis Carlos Erpen de Bona, C3SL serves public
+	aufs2 GIT tree (2009/2).
+William Grant made a donation (2009/3).
+Patrick Lane made a donation (2009/4).
+The Mail Archive (mail-archive.com) made donations (2009/5).
+Nippy Networks (Ed Wildgoose) made a donation (2009/7).
+New Dream Network, LLC (www.dreamhost.com) made a donation (2009/11).
+Pavel Pronskiy made a donation (2011/2).
+Iridium and Inmarsat satellite phone retailer (www.mailasail.com), Nippy
+	Networks (Ed Wildgoose) made a donation for hardware (2011/3).
+Max Lekomcev (DOM-TV project) made a donation (2011/7, 12, 2012/3, 6 and
+11).
+Sam Liddicott made a donation (2011/9).
+Era Scarecrow made a donation (2013/4).
+Bor Ratajc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+POIRETTE Marc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+lauri kasvandik made a donation (2013/5).
+pemasu from Finland made a donation (2013/7).
+
+Thank you very much.
+Donations are always, including future donations, very important and
+helpful for me to keep on developing aufs.
+
+
+7.
+----------------------------------------
+If you are an experienced user, no explanation is needed. Aufs is
+just a linux filesystem.
+
+
+Enjoy!
+
+# Local variables: ;
+# mode: text;
+# End: ;
diff --git a/Documentation/filesystems/aufs/design/01intro.txt b/Documentation/filesystems/aufs/design/01intro.txt
new file mode 100644
index 0000000..e60f8c6
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/01intro.txt
@@ -0,0 +1,162 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Introduction
+----------------------------------------
+
+aufs [ei ju: ef es] | [a u f s]
+1. abbrev. for "advanced multi-layered unification filesystem".
+2. abbrev. for "another unionfs".
+3. abbrev. for "auf das" in German which means "on the" in English.
+   Ex. "Butter aufs Brot"(G) means "butter onto bread"(E).
+   But "Filesystem aufs Filesystem" is hard to understand.
+
+AUFS is a filesystem with features:
+- multi layered stackable unification filesystem, the member directory
+  is called as a branch.
+- branch permission and attribute, 'readonly', 'real-readonly',
+  'readwrite', 'whiteout-able', 'link-able whiteout' and their
+  combination.
+- internal "file copy-on-write".
+- logical deletion, whiteout.
+- dynamic branch manipulation, adding, deleting and changing permission.
+- allow bypassing aufs, user's direct branch access.
+- external inode number translation table and bitmap which maintains the
+  persistent aufs inode number.
+- seekable directory, including NFS readdir.
+- file mapping, mmap and sharing pages.
+- pseudo-link, hardlink over branches.
+- loopback mounted filesystem as a branch.
+- several policies to select one among multiple writable branches.
+- revert a single systemcall when an error occurs in aufs.
+- and more...
+
+
+Multi Layered Stackable Unification Filesystem
+----------------------------------------------------------------------
+Most people already knows what it is.
+It is a filesystem which unifies several directories and provides a
+merged single directory. When users access a file, the access will be
+passed/re-directed/converted (sorry, I am not sure which English word is
+correct) to the real file on the member filesystem. The member
+filesystem is called 'lower filesystem' or 'branch' and has a mode
+'readonly' and 'readwrite.' And the deletion for a file on the lower
+readonly branch is handled by creating 'whiteout' on the upper writable
+branch.
+
+On LKML, there have been discussions about UnionMount (Jan Blunck,
+Bharata B Rao and Valerie Aurora) and Unionfs (Erez Zadok). They took
+different approaches to implement the merged-view.
+The former tries putting it into VFS, and the latter implements as a
+separate filesystem.
+(If I misunderstand about these implementations, please let me know and
+I shall correct it. Because it is a long time ago when I read their
+source files last time).
+
+UnionMount's approach will be able to small, but may be hard to share
+branches between several UnionMount since the whiteout in it is
+implemented in the inode on branch filesystem and always
+shared. According to Bharata's post, readdir does not seems to be
+finished yet.
+There are several missing features known in this implementations such as
+- for users, the inode number may change silently. eg. copy-up.
+- link(2) may break by copy-up.
+- read(2) may get an obsoleted filedata (fstat(2) too).
+- fcntl(F_SETLK) may be broken by copy-up.
+- unnecessary copy-up may happen, for example mmap(MAP_PRIVATE) after
+  open(O_RDWR).
+
+Unionfs has a longer history. When I started implementing a stacking filesystem
+(Aug 2005), it already existed. It has virtual super_block, inode,
+dentry and file objects and they have an array pointing lower same kind
+objects. After contributing many patches for Unionfs, I re-started my
+project AUFS (Jun 2006).
+
+In AUFS, the structure of filesystem resembles to Unionfs, but I
+implemented my own ideas, approaches and enhancements and it became
+totally different one.
+
+Comparing DM snapshot and fs based implementation
+- the number of bytes to be copied between devices is much smaller.
+- the type of filesystem must be one and only.
+- the fs must be writable, no readonly fs, even for the lower original
+  device. so the compression fs will not be usable. but if we use
+  loopback mount, we may address this issue.
+  for instance,
+	mount /cdrom/squashfs.img /sq
+	losetup /sq/ext2.img
+	losetup /somewhere/cow
+	dmsetup "snapshot /dev/loop0 /dev/loop1 ..."
+- it will be difficult (or needs more operations) to extract the
+  difference between the original device and COW.
+- DM snapshot-merge may help a lot when users try merging. in the
+  fs-layer union, users will use rsync(1).
+
+
+Several characters/aspects of aufs
+----------------------------------------------------------------------
+
+Aufs has several characters or aspects.
+1. a filesystem, callee of VFS helper
+2. sub-VFS, caller of VFS helper for branches
+3. a virtual filesystem which maintains persistent inode number
+4. reader/writer of files on branches such like an application
+
+1. Callee of VFS Helper
+As an ordinary linux filesystem, aufs is a callee of VFS. For instance,
+unlink(2) from an application reaches sys_unlink() kernel function and
+then vfs_unlink() is called. vfs_unlink() is one of VFS helper and it
+calls filesystem specific unlink operation. Actually aufs implements the
+unlink operation but it behaves like a redirector.
+
+2. Caller of VFS Helper for Branches
+aufs_unlink() passes the unlink request to the branch filesystem as if
+it were called from VFS. So the called unlink operation of the branch
+filesystem acts as usual. As a caller of VFS helper, aufs should handle
+every necessary pre/post operation for the branch filesystem.
+- acquire the lock for the parent dir on a branch
+- lookup in a branch
+- revalidate dentry on a branch
+- mnt_want_write() for a branch
+- vfs_unlink() for a branch
+- mnt_drop_write() for a branch
+- release the lock on a branch
+
+3. Persistent Inode Number
+One of the most important issue for a filesystem is to maintain inode
+numbers. This is particularly important to support exporting a
+filesystem via NFS. Aufs is a virtual filesystem which doesn't have a
+backend block device for its own. But some storage is necessary to
+maintain inode number. It may be a large space and may not suit to keep
+in memory. Aufs rents some space from its first writable branch
+filesystem (by default) and creates file(s) on it. These files are
+created by aufs internally and removed soon (currently) keeping opened.
+Note: Because these files are removed, they are totally gone after
+      unmounting aufs. It means the inode numbers are not persistent
+      across unmount or reboot. I have a plan to make them really
+      persistent which will be important for aufs on NFS server.
+
+4. Read/Write Files Internally (copy-on-write)
+Because a branch can be readonly, when you write a file on it, aufs will
+"copy-up" it to the upper writable branch internally. And then write the
+originally requested thing to the file. Generally kernel doesn't
+open/read/write file actively. In aufs, even a single write may cause a
+internal "file copy". This behaviour is very similar to cp(1) command.
+
+Some people may think it is better to pass such work to user space
+helper, instead of doing in kernel space. Actually I am still thinking
+about it. But currently I have implemented it in kernel space.
diff --git a/Documentation/filesystems/aufs/design/02struct.txt b/Documentation/filesystems/aufs/design/02struct.txt
new file mode 100644
index 0000000..083c36e
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/02struct.txt
@@ -0,0 +1,243 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Basic Aufs Internal Structure
+
+Superblock/Inode/Dentry/File Objects
+----------------------------------------------------------------------
+As like an ordinary filesystem, aufs has its own
+superblock/inode/dentry/file objects. All these objects have a
+dynamically allocated array and store the same kind of pointers to the
+lower filesystem, branch.
+For example, when you build a union with one readwrite branch and one
+readonly, mounted /au, /rw and /ro respectively.
+- /au = /rw + /ro
+- /ro/fileA exists but /rw/fileA
+
+Aufs lookup operation finds /ro/fileA and gets dentry for that. These
+pointers are stored in a aufs dentry. The array in aufs dentry will be,
+- [0] = NULL
+- [1] = /ro/fileA
+
+This style of an array is essentially same to the aufs
+superblock/inode/dentry/file objects.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+dynamically, these objects has its own generation. When branches are
+changed, the generation in aufs superblock is incremented. And a
+generation in other object are compared when it is accessed.
+When a generation in other objects are obsoleted, aufs refreshes the
+internal array.
+
+
+Superblock
+----------------------------------------------------------------------
+Additionally aufs superblock has some data for policies to select one
+among multiple writable branches, XIB files, pseudo-links and kobject.
+See below in detail.
+About the policies which supports copy-down a directory, see policy.txt
+too.
+
+
+Branch and XINO(External Inode Number Translation Table)
+----------------------------------------------------------------------
+Every branch has its own xino (external inode number translation table)
+file. The xino file is created and unlinked by aufs internally. When two
+members of a union exist on the same filesystem, they share the single
+xino file.
+The struct of a xino file is simple, just a sequence of aufs inode
+numbers which is indexed by the lower inode number.
+In the above sample, assume the inode number of /ro/fileA is i111 and
+aufs assigns the inode number i999 for fileA. Then aufs writes 999 as
+4(8) bytes at 111 * 4(8) bytes offset in the xino file.
+
+When the inode numbers are not contiguous, the xino file will be sparse
+which has a hole in it and doesn't consume as much disk space as it
+might appear. If your branch filesystem consumes disk space for such
+holes, then you should specify 'xino=' option at mounting aufs.
+
+Also a writable branch has three kinds of "whiteout bases". All these
+are existed when the branch is joined to aufs and the names are
+whiteout-ed doubly, so that users will never see their names in aufs
+hierarchy.
+1. a regular file which will be linked to all whiteouts.
+2. a directory to store a pseudo-link.
+3. a directory to store an "orphan-ed" file temporary.
+
+1. Whiteout Base
+   When you remove a file on a readonly branch, aufs handles it as a
+   logical deletion and creates a whiteout on the upper writable branch
+   as a hardlink of this file in order not to consume inode on the
+   writable branch.
+2. Pseudo-link Dir
+   See below, Pseudo-link.
+3. Step-Parent Dir
+   When "fileC" exists on the lower readonly branch only and it is
+   opened and removed with its parent dir, and then user writes
+   something into it, then aufs copies-up fileC to this
+   directory. Because there is no other dir to store fileC. After
+   creating a file under this dir, the file is unlinked.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+dynamically, a branch has its own id. When the branch order changes, aufs
+finds the new index by searching the branch id.
+
+
+Pseudo-link
+----------------------------------------------------------------------
+Assume "fileA" exists on the lower readonly branch only and it is
+hardlinked to "fileB" on the branch. When you write something to fileA,
+aufs copies-up it to the upper writable branch. Additionally aufs
+creates a hardlink under the Pseudo-link Directory of the writable
+branch. The inode of a pseudo-link is kept in aufs super_block as a
+simple list. If fileB is read after unlinking fileA, aufs returns
+filedata from the pseudo-link instead of the lower readonly
+branch. Because the pseudo-link is based upon the inode, to keep the
+inode number by xino (see above) is important.
+
+All the hardlinks under the Pseudo-link Directory of the writable branch
+should be restored in a proper location later. Aufs provides a utility
+to do this. The userspace helpers executed at remounting and unmounting
+aufs by default.
+During this utility is running, it puts aufs into the pseudo-link
+maintenance mode. In this mode, only the process which began the
+maintenance mode (and its child processes) is allowed to operate in
+aufs. Some other processes which are not related to the pseudo-link will
+be allowed to run too, but the rest have to return an error or wait
+until the maintenance mode ends. If a process already acquires an inode
+mutex (in VFS), it has to return an error.
+
+
+XIB(external inode number bitmap)
+----------------------------------------------------------------------
+Addition to the xino file per a branch, aufs has an external inode number
+bitmap in a superblock object. It is also a file such like a xino file.
+It is a simple bitmap to mark whether the aufs inode number is in-use or
+not.
+To reduce the file I/O, aufs prepares a single memory page to cache xib.
+
+Aufs implements a feature to truncate/refresh both of xino and xib to
+reduce the number of consumed disk blocks for these files.
+
+
+Virtual or Vertical Dir, and Readdir in Userspace
+----------------------------------------------------------------------
+In order to support multiple layers (branches), aufs readdir operation
+constructs a virtual dir block on memory. For readdir, aufs calls
+vfs_readdir() internally for each dir on branches, merges their entries
+with eliminating the whiteout-ed ones, and sets it to file (dir)
+object. So the file object has its entry list until it is closed. The
+entry list will be updated when the file position is zero and becomes
+old. This decision is made in aufs automatically.
+
+The dynamically allocated memory block for the name of entries has a
+unit of 512 bytes (by default) and stores the names contiguously (no
+padding). Another block for each entry is handled by kmem_cache too.
+During building dir blocks, aufs creates hash list and judging whether
+the entry is whiteouted by its upper branch or already listed.
+The merged result is cached in the corresponding inode object and
+maintained by a customizable life-time option.
+
+Some people may call it can be a security hole or invite DoS attack
+since the opened and once readdir-ed dir (file object) holds its entry
+list and becomes a pressure for system memory. But I'd say it is similar
+to files under /proc or /sys. The virtual files in them also holds a
+memory page (generally) while they are opened. When an idea to reduce
+memory for them is introduced, it will be applied to aufs too.
+For those who really hate this situation, I've developed readdir(3)
+library which operates this merging in userspace. You just need to set
+LD_PRELOAD environment variable, and aufs will not consume no memory in
+kernel space for readdir(3).
+
+
+Workqueue
+----------------------------------------------------------------------
+Aufs sometimes requires privilege access to a branch. For instance,
+in copy-up/down operation. When a user process is going to make changes
+to a file which exists in the lower readonly branch only, and the mode
+of one of ancestor directories may not be writable by a user
+process. Here aufs copy-up the file with its ancestors and they may
+require privilege to set its owner/group/mode/etc.
+This is a typical case of a application character of aufs (see
+Introduction).
+
+Aufs uses workqueue synchronously for this case. It creates its own
+workqueue. The workqueue is a kernel thread and has privilege. Aufs
+passes the request to call mkdir or write (for example), and wait for
+its completion. This approach solves a problem of a signal handler
+simply.
+If aufs didn't adopt the workqueue and changed the privilege of the
+process, and if the mkdir/write call arises SIGXFSZ or other signal,
+then the user process might gain a privilege or the generated core file
+was owned by a superuser.
+
+Also aufs uses the system global workqueue ("events" kernel thread) too
+for asynchronous tasks, such like handling inotify/fsnotify, re-creating a
+whiteout base and etc. This is unrelated to a privilege.
+Most of aufs operation tries acquiring a rw_semaphore for aufs
+superblock at the beginning, at the same time waits for the completion
+of all queued asynchronous tasks.
+
+
+Whiteout
+----------------------------------------------------------------------
+The whiteout in aufs is very similar to Unionfs's. That is represented
+by its filename. UnionMount takes an approach of a file mode, but I am
+afraid several utilities (find(1) or something) will have to support it.
+
+Basically the whiteout represents "logical deletion" which stops aufs to
+lookup further, but also it represents "dir is opaque" which also stop
+lookup.
+
+In aufs, rmdir(2) and rename(2) for dir uses whiteout alternatively.
+In order to make several functions in a single systemcall to be
+revertible, aufs adopts an approach to rename a directory to a temporary
+unique whiteouted name.
+For example, in rename(2) dir where the target dir already existed, aufs
+renames the target dir to a temporary unique whiteouted name before the
+actual rename on a branch and then handles other actions (make it opaque,
+update the attributes, etc). If an error happens in these actions, aufs
+simply renames the whiteouted name back and returns an error. If all are
+succeeded, aufs registers a function to remove the whiteouted unique
+temporary name completely and asynchronously to the system global
+workqueue.
+
+
+Copy-up
+----------------------------------------------------------------------
+It is a well-known feature or concept.
+When user modifies a file on a readonly branch, aufs operate "copy-up"
+internally and makes change to the new file on the upper writable branch.
+When the trigger systemcall does not update the timestamps of the parent
+dir, aufs reverts it after copy-up.
+
+
+Move-down (aufs3.9 and later)
+----------------------------------------------------------------------
+"Copy-up" is one of the essential feature in aufs. It copies a file from
+the lower readonly branch to the upper writable branch when a user
+changes something about the file.
+"Move-down" is an opposite action of copy-up. Basically this action is
+ran manually instead of automatically and internally.
+
+Sometimes users want to move-down a file from the upper writable branch
+to the lower readonly or writable branch. For instance,
+- the free space of the upper writable branch is going to run out.
+- create a new intermediate branch between the upper and lower branch.
+- etc.
+
+For this purpose, use "aumvdown" command in aufs-util.git.
diff --git a/Documentation/filesystems/aufs/design/03lookup.txt b/Documentation/filesystems/aufs/design/03lookup.txt
new file mode 100644
index 0000000..d3ca527
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/03lookup.txt
@@ -0,0 +1,106 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Lookup in a Branch
+----------------------------------------------------------------------
+Since aufs has a character of sub-VFS (see Introduction), it operates
+lookup for branches as VFS does. It may be a heavy work. Generally
+speaking struct nameidata is a bigger structure and includes many
+information. But almost all lookup operation in aufs is the simplest
+case, ie. lookup only an entry directly connected to its parent. Digging
+down the directory hierarchy is unnecessary.
+
+VFS has a function lookup_one_len() for that use, but it is not usable
+for a branch filesystem which requires struct nameidata. So aufs
+implements a simple lookup wrapper function. When a branch filesystem
+allows NULL as nameidata, it calls lookup_one_len(). Otherwise it builds
+a simplest nameidata and calls lookup_hash().
+Here aufs applies "a principle in NFSD", ie. if the filesystem supports
+NFS-export, then it has to support NULL as a nameidata parameter for
+->create(), ->lookup() and ->d_revalidate(). So the lookup wrapper in
+aufs tests if ->s_export_op in the branch is NULL or not.
+
+When a branch is a remote filesystem, aufs basically trusts its
+->d_revalidate(), also aufs forces the hardest revalidate tests for
+them.
+For d_revalidate, aufs implements three levels of revalidate tests. See
+"Revalidate Dentry and UDBA" in detail.
+
+
+Loopback Mount
+----------------------------------------------------------------------
+Basically aufs supports any type of filesystem and block device for a
+branch (actually there are some exceptions). But it is prohibited to add
+a loopback mounted one whose backend file exists in a filesystem which is
+already added to aufs. The reason is to protect aufs from a recursive
+lookup. If it was allowed, the aufs lookup operation might re-enter a
+lookup for the loopback mounted branch in the same context, and will
+cause a deadlock.
+
+
+Revalidate Dentry and UDBA (User's Direct Branch Access)
+----------------------------------------------------------------------
+Generally VFS helpers re-validate a dentry as a part of lookup.
+0. digging down the directory hierarchy.
+1. lock the parent dir by its i_mutex.
+2. lookup the final (child) entry.
+3. revalidate it.
+4. call the actual operation (create, unlink, etc.)
+5. unlock the parent dir
+
+If the filesystem implements its ->d_revalidate() (step 3), then it is
+called. Actually aufs implements it and checks the dentry on a branch is
+still valid.
+But it is not enough. Because aufs has to release the lock for the
+parent dir on a branch at the end of ->lookup() (step 2) and
+->d_revalidate() (step 3) while the i_mutex of the aufs dir is still
+held by VFS.
+If the file on a branch is changed directly, eg. bypassing aufs, after
+aufs released the lock, then the subsequent operation may cause
+something unpleasant result.
+
+This situation is a result of VFS architecture, ->lookup() and
+->d_revalidate() is separated. But I never say it is wrong. It is a good
+design from VFS's point of view. It is just not suitable for sub-VFS
+character in aufs.
+
+Aufs supports such case by three level of revalidation which is
+selectable by user.
+1. Simple Revalidate
+   Addition to the native flow in VFS's, confirm the child-parent
+   relationship on the branch just after locking the parent dir on the
+   branch in the "actual operation" (step 4). When this validation
+   fails, aufs returns EBUSY. ->d_revalidate() (step 3) in aufs still
+   checks the validation of the dentry on branches.
+2. Monitor Changes Internally by Inotify/Fsnotify
+   Addition to above, in the "actual operation" (step 4) aufs re-lookup
+   the dentry on the branch, and returns EBUSY if it finds different
+   dentry.
+   Additionally, aufs sets the inotify/fsnotify watch for every dir on branches
+   during it is in cache. When the event is notified, aufs registers a
+   function to kernel 'events' thread by schedule_work(). And the
+   function sets some special status to the cached aufs dentry and inode
+   private data. If they are not cached, then aufs has nothing to
+   do. When the same file is accessed through aufs (step 0-3) later,
+   aufs will detect the status and refresh all necessary data.
+   In this mode, aufs has to ignore the event which is fired by aufs
+   itself.
+3. No Extra Validation
+   This is the simplest test and doesn't add any additional revalidation
+   test, and skip therevalidatin in step 4. It is useful and improves
+   aufs performance when system surely hide the aufs branches from user,
+   by over-mounting something (or another method).
diff --git a/Documentation/filesystems/aufs/design/04branch.txt b/Documentation/filesystems/aufs/design/04branch.txt
new file mode 100644
index 0000000..f85f3a8
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/04branch.txt
@@ -0,0 +1,76 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Branch Manipulation
+
+Since aufs supports dynamic branch manipulation, ie. add/remove a branch
+and changing its permission/attribute, there are a lot of works to do.
+
+
+Add a Branch
+----------------------------------------------------------------------
+o Confirm the adding dir exists outside of aufs, including loopback
+  mount.
+- and other various attributes...
+o Initialize the xino file and whiteout bases if necessary.
+  See struct.txt.
+
+o Check the owner/group/mode of the directory
+  When the owner/group/mode of the adding directory differs from the
+  existing branch, aufs issues a warning because it may impose a
+  security risk.
+  For example, when a upper writable branch has a world writable empty
+  top directory, a malicious user can create any files on the writable
+  branch directly, like copy-up and modify manually. If something like
+  /etc/{passwd,shadow} exists on the lower readonly branch but the upper
+  writable branch, and the writable branch is world-writable, then a
+  malicious guy may create /etc/passwd on the writable branch directly
+  and the infected file will be valid in aufs.
+  I am afraid it can be a security issue, but nothing to do except
+  producing a warning.
+
+
+Delete a Branch
+----------------------------------------------------------------------
+o Confirm the deleting branch is not busy
+  To be general, there is one merit to adopt "remount" interface to
+  manipulate branches. It is to discard caches. At deleting a branch,
+  aufs checks the still cached (and connected) dentries and inodes. If
+  there are any, then they are all in-use. An inode without its
+  corresponding dentry can be alive alone (for example, inotify/fsnotify case).
+
+  For the cached one, aufs checks whether the same named entry exists on
+  other branches.
+  If the cached one is a directory, because aufs provides a merged view
+  to users, as long as one dir is left on any branch aufs can show the
+  dir to users. In this case, the branch can be removed from aufs.
+  Otherwise aufs rejects deleting the branch.
+
+  If any file on the deleting branch is opened by aufs, then aufs
+  rejects deleting.
+
+
+Modify the Permission of a Branch
+----------------------------------------------------------------------
+o Re-initialize or remove the xino file and whiteout bases if necessary.
+  See struct.txt.
+
+o rw --> ro: Confirm the modifying branch is not busy
+  Aufs rejects the request if any of these conditions are true.
+  - a file on the branch is mmap-ed.
+  - a regular file on the branch is opened for write and there is no
+    same named entry on the upper branch.
diff --git a/Documentation/filesystems/aufs/design/05wbr_policy.txt b/Documentation/filesystems/aufs/design/05wbr_policy.txt
new file mode 100644
index 0000000..2bb8e58
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/05wbr_policy.txt
@@ -0,0 +1,65 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Policies to Select One among Multiple Writable Branches
+----------------------------------------------------------------------
+When the number of writable branch is more than one, aufs has to decide
+the target branch for file creation or copy-up. By default, the highest
+writable branch which has the parent (or ancestor) dir of the target
+file is chosen (top-down-parent policy).
+By user's request, aufs implements some other policies to select the
+writable branch, for file creation two policies, round-robin and
+most-free-space policies. For copy-up three policies, top-down-parent,
+bottom-up-parent and bottom-up policies.
+
+As expected, the round-robin policy selects the branch in circular. When
+you have two writable branches and creates 10 new files, 5 files will be
+created for each branch. mkdir(2) systemcall is an exception. When you
+create 10 new directories, all will be created on the same branch.
+And the most-free-space policy selects the one which has most free
+space among the writable branches. The amount of free space will be
+checked by aufs internally, and users can specify its time interval.
+
+The policies for copy-up is more simple,
+top-down-parent is equivalent to the same named on in create policy,
+bottom-up-parent selects the writable branch where the parent dir
+exists and the nearest upper one from the copyup-source,
+bottom-up selects the nearest upper writable branch from the
+copyup-source, regardless the existence of the parent dir.
+
+There are some rules or exceptions to apply these policies.
+- If there is a readonly branch above the policy-selected branch and
+  the parent dir is marked as opaque (a variation of whiteout), or the
+  target (creating) file is whiteout-ed on the upper readonly branch,
+  then the result of the policy is ignored and the target file will be
+  created on the nearest upper writable branch than the readonly branch.
+- If there is a writable branch above the policy-selected branch and
+  the parent dir is marked as opaque or the target file is whiteouted
+  on the branch, then the result of the policy is ignored and the target
+  file will be created on the highest one among the upper writable
+  branches who has diropq or whiteout. In case of whiteout, aufs removes
+  it as usual.
+- link(2) and rename(2) systemcalls are exceptions in every policy.
+  They try selecting the branch where the source exists as possible
+  since copyup a large file will take long time. If it can't be,
+  ie. the branch where the source exists is readonly, then they will
+  follow the copyup policy.
+- There is an exception for rename(2) when the target exists.
+  If the rename target exists, aufs compares the index of the branches
+  where the source and the target exists and selects the higher
+  one. If the selected branch is readonly, then aufs follows the
+  copyup policy.
diff --git a/Documentation/filesystems/aufs/design/06mmap.txt b/Documentation/filesystems/aufs/design/06mmap.txt
new file mode 100644
index 0000000..55524d6
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06mmap.txt
@@ -0,0 +1,47 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+mmap(2) -- File Memory Mapping
+----------------------------------------------------------------------
+In aufs, the file-mapped pages are handled by a branch fs directly, no
+interaction with aufs. It means aufs_mmap() calls the branch fs's
+->mmap().
+This approach is simple and good, but there is one problem.
+Under /proc, several entries show the mmap-ped files by its path (with
+device and inode number), and the printed path will be the path on the
+branch fs's instead of virtual aufs's.
+This is not a problem in most cases, but some utilities lsof(1) (and its
+user) may expect the path on aufs.
+
+To address this issue, aufs adds a new member called vm_prfile in struct
+vm_area_struct (and struct vm_region). The original vm_file points to
+the file on the branch fs in order to handle everything correctly as
+usual. The new vm_prfile points to a virtual file in aufs, and the
+show-functions in procfs refers to vm_prfile if it is set.
+Also we need to maintain several other places where touching vm_file
+such like
+- fork()/clone() copies vma and the reference count of vm_file is
+  incremented.
+- merging vma maintains the ref count too.
+
+This is not a good approach. It just faking the printed path. But it
+leaves all behaviour around f_mapping unchanged. This is surely an
+advantage.
+Actually aufs had adopted another complicated approach which calls
+generic_file_mmap() and handles struct vm_operations_struct. In this
+approach, aufs met a hard problem and I could not solve it without
+switching the approach.
diff --git a/Documentation/filesystems/aufs/design/07export.txt b/Documentation/filesystems/aufs/design/07export.txt
new file mode 100644
index 0000000..ecf42a4
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/07export.txt
@@ -0,0 +1,59 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Export Aufs via NFS
+----------------------------------------------------------------------
+Here is an approach.
+- like xino/xib, add a new file 'xigen' which stores aufs inode
+  generation.
+- iget_locked(): initialize aufs inode generation for a new inode, and
+  store it in xigen file.
+- destroy_inode(): increment aufs inode generation and store it in xigen
+  file. it is necessary even if it is not unlinked, because any data of
+  inode may be changed by UDBA.
+- encode_fh(): for a root dir, simply return FILEID_ROOT. otherwise
+  build file handle by
+  + branch id (4 bytes)
+  + superblock generation (4 bytes)
+  + inode number (4 or 8 bytes)
+  + parent dir inode number (4 or 8 bytes)
+  + inode generation (4 bytes))
+  + return value of exportfs_encode_fh() for the parent on a branch (4
+    bytes)
+  + file handle for a branch (by exportfs_encode_fh())
+- fh_to_dentry():
+  + find the index of a branch from its id in handle, and check it is
+    still exist in aufs.
+  + 1st level: get the inode number from handle and search it in cache.
+  + 2nd level: if not found, get the parent inode number from handle and
+    search it in cache. and then open the parent dir, find the matching
+    inode number by vfs_readdir() and get its name, and call
+    lookup_one_len() for the target dentry.
+  + 3rd level: if the parent dir is not cached, call
+    exportfs_decode_fh() for a branch and get the parent on a branch,
+    build a pathname of it, convert it a pathname in aufs, call
+    path_lookup(). now aufs gets a parent dir dentry, then handle it as
+    the 2nd level.
+  + to open the dir, aufs needs struct vfsmount. aufs keeps vfsmount
+    for every branch, but not itself. to get this, (currently) aufs
+    searches in current->nsproxy->mnt_ns list. it may not be a good
+    idea, but I didn't get other approach.
+  + test the generation of the gotten inode.
+- every inode operation: they may get EBUSY due to UDBA. in this case,
+  convert it into ESTALE for NFSD.
+- readdir(): call lockdep_on/off() because filldir in NFSD calls
+  lookup_one_len(), vfs_getattr(), encode_fh() and others.
diff --git a/Documentation/filesystems/aufs/design/08shwh.txt b/Documentation/filesystems/aufs/design/08shwh.txt
new file mode 100644
index 0000000..18b889c
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/08shwh.txt
@@ -0,0 +1,53 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Show Whiteout Mode (shwh)
+----------------------------------------------------------------------
+Generally aufs hides the name of whiteouts. But in some cases, to show
+them is very useful for users. For instance, creating a new middle layer
+(branch) by merging existing layers.
+
+(borrowing aufs1 HOW-TO from a user, Michael Towers)
+When you have three branches,
+- Bottom: 'system', squashfs (underlying base system), read-only
+- Middle: 'mods', squashfs, read-only
+- Top: 'overlay', ram (tmpfs), read-write
+
+The top layer is loaded at boot time and saved at shutdown, to preserve
+the changes made to the system during the session.
+When larger changes have been made, or smaller changes have accumulated,
+the size of the saved top layer data grows. At this point, it would be
+nice to be able to merge the two overlay branches ('mods' and 'overlay')
+and rewrite the 'mods' squashfs, clearing the top layer and thus
+restoring save and load speed.
+
+This merging is simplified by the use of another aufs mount, of just the
+two overlay branches using the 'shwh' option.
+# mount -t aufs -o ro,shwh,br:/livesys/overlay=ro+wh:/livesys/mods=rr+wh \
+	aufs /livesys/merge_union
+
+A merged view of these two branches is then available at
+/livesys/merge_union, and the new feature is that the whiteouts are
+visible!
+Note that in 'shwh' mode the aufs mount must be 'ro', which will disable
+writing to all branches. Also the default mode for all branches is 'ro'.
+It is now possible to save the combined contents of the two overlay
+branches to a new squashfs, e.g.:
+# mksquashfs /livesys/merge_union /path/to/newmods.squash
+
+This new squashfs archive can be stored on the boot device and the
+initramfs will use it to replace the old one at the next boot.
diff --git a/Documentation/filesystems/aufs/design/10dynop.txt b/Documentation/filesystems/aufs/design/10dynop.txt
new file mode 100644
index 0000000..49e9a53
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/10dynop.txt
@@ -0,0 +1,47 @@
+
+# Copyright (C) 2010-2013 Junjiro R. Okajima
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Dynamically customizable FS operations
+----------------------------------------------------------------------
+Generally FS operations (struct inode_operations, struct
+address_space_operations, struct file_operations, etc.) are defined as
+"static const", but it never means that FS have only one set of
+operation. Some FS have multiple sets of them. For instance, ext2 has
+three sets, one for XIP, for NOBH, and for normal.
+Since aufs overrides and redirects these operations, sometimes aufs has
+to change its behaviour according to the branch FS type. More imporantly
+VFS acts differently if a function (member in the struct) is set or
+not. It means aufs should have several sets of operations and select one
+among them according to the branch FS definition.
+
+In order to solve this problem and not to affect the behavour of VFS,
+aufs defines these operations dynamically. For instance, aufs defines
+aio_read function for struct file_operations, but it may not be set to
+the file_operations. When the branch FS doesn't have it, aufs doesn't
+set it to its file_operations while the function definition itself is
+still alive. So the behaviour of io_submit(2) will not change, and it
+will return an error when aio_read is not defined.
+
+The lifetime of these dynamically generated operation object is
+maintained by aufs branch object. When the branch is removed from aufs,
+the reference counter of the object is decremented. When it reaches
+zero, the dynamically generated operation object will be freed.
+
+This approach is designed to support AIO (io_submit), Direcit I/O and
+XIP mainly.
+Currently this approach is applied to file_operations and
+vm_operations_struct for regular files only.
diff --git a/Documentation/filesystems/aufs/design/99plan.txt b/Documentation/filesystems/aufs/design/99plan.txt
new file mode 100644
index 0000000..a21f133
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/99plan.txt
@@ -0,0 +1,96 @@
+
+# Copyright (C) 2005-2013 Junjiro R. Okajima
+# 
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Plan
+
+Restoring some features which was implemented in aufs1.
+They were dropped in aufs2 in order to make source files simpler and
+easier to be reviewed.
+
+
+Test Only the Highest One for the Directory Permission (dirperm1 option)
+----------------------------------------------------------------------
+Let's try case study.
+- aufs has two branches, upper readwrite and lower readonly.
+  /au = /rw + /ro
+- "dirA" exists under /ro, but /rw. and its mode is 0700.
+- user invoked "chmod a+rx /au/dirA"
+- then "dirA" becomes world readable?
+
+In this case, /ro/dirA is still 0700 since it exists in readonly branch,
+or it may be a natively readonly filesystem. If aufs respects the lower
+branch, it should not respond readdir request from other users. But user
+allowed it by chmod. Should really aufs rejects showing the entries
+under /ro/dirA?
+
+To be honest, I don't have a best solution for this case. So I
+implemented 'dirperm1' and 'nodirperm1' option in aufs1, and leave it to
+users.
+When dirperm1 is specified, aufs checks only the highest one for the
+directory permission, and shows the entries. Otherwise, as usual, checks
+every dir existing on all branches and rejects the request.
+
+As a side effect, dirperm1 option improves the performance of aufs
+because the number of permission check is reduced.
+
+
+Being Another Aufs's Readonly Branch (robr)
+----------------------------------------------------------------------
+Aufs1 allows aufs to be another aufs's readonly branch.
+This feature was developed by a user's request. But it may not be used
+currecnly.
+
+
+Copy-up on Open (coo=)
+----------------------------------------------------------------------
+By default the internal copy-up is executed when it is really necessary.
+It is not done when a file is opened for writing, but when write(2) is
+done. Users who have many (over 100) branches want to know and analyse
+when and what file is copied-up. To insert a new upper branch which
+contains such files only may improve the performance of aufs.
+
+Aufs1 implemented "coo=none | leaf | all" option.
+
+
+Refresh the Opened File (refrof)
+----------------------------------------------------------------------
+This option is implemented in aufs1 but incomplete.
+
+When user reads from a file, he expects to get its latest filedata
+generally. If the file is removed and a new same named file is created,
+the content he gets is unchanged, ie. the unlinked filedata.
+
+Let's try case study again.
+- aufs has two branches.
+  /au = /rw + /ro
+- "fileA" exists under /ro, but /rw.
+- user opened "/au/fileA".
+- he or someone else inserts a branch (/new) between /rw and /ro.
+  /au = /rw + /new + /ro
+- the new branch has "fileA".
+- user reads from the opened "fileA"
+- which filedata should aufs return, from /ro or /new?
+
+Some people says it has to be "from /ro" and it is a semantics of Unix.
+The others say it should be "from /new" because the file is not removed
+and it is equivalent to the case of someone else modifies the file.
+
+Here again I don't have a best and final answer. I got an idea to
+implement 'refrof' and 'norefrof' option. When 'refrof' (REFResh the
+Opened File) is specified (by default), aufs returns the filedata from
+/new.
+Otherwise from /new.
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 4db22f6..85a4a03 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -445,3 +445,6 @@
 [mandatory]
 	FS_REVAL_DOT is gone; if you used to have it, add ->d_weak_revalidate()
 in your dentry operations instead.
+--
+[mandatory]
+	vfs_readdir() is gone; switch to iterate_dir() instead
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 954eab8..be35913 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -369,6 +369,8 @@
  [stack:1001]             = the stack of the thread with tid 1001
  [vdso]                   = the "virtual dynamic shared object",
                             the kernel system call handler
+ [anon:<name>]            = an anonymous mapping that has been
+                            named by userspace
 
  or if empty, the mapping is anonymous.
 
@@ -419,6 +421,7 @@
 MMUPageSize:           4 kB
 Locked:              374 kB
 VmFlags: rd ex mr mw me de
+Name:           name from userspace
 
 the first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
@@ -469,6 +472,9 @@
 be present in all further kernel releases. Things get changed, the flags may
 be vanished or the reverse -- new added.
 
+The "Name" field will only be present on a mapping that has been named by
+userspace, and will show the name passed in by userspace.
+
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
 
@@ -484,6 +490,10 @@
     > echo 3 > /proc/PID/clear_refs
 Any other value written to /proc/PID/clear_refs will have no effect.
 
+To reset the peak resident set size ("high water mark") to the process's
+current value:
+    > echo 5 > /proc/PID/clear_refs
+
 The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
 using /proc/kpageflags and number of times a page is mapped using
 /proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt.
diff --git a/Documentation/filesystems/squashfs.txt b/Documentation/filesystems/squashfs.txt
index 403c090..e5274f8 100644
--- a/Documentation/filesystems/squashfs.txt
+++ b/Documentation/filesystems/squashfs.txt
@@ -2,10 +2,10 @@
 =======================
 
 Squashfs is a compressed read-only filesystem for Linux.
-It uses zlib/lzo/xz compression to compress files, inodes and directories.
-Inodes in the system are very small and all blocks are packed to minimise
-data overhead. Block sizes greater than 4K are supported up to a maximum
-of 1Mbytes (default block size 128K).
+It uses zlib, lz4, lzo, or xz compression to compress files, inodes and
+directories.  Inodes in the system are very small and all blocks are packed to
+minimise data overhead. Block sizes greater than 4K are supported up to a
+maximum of 1Mbytes (default block size 128K).
 
 Squashfs is intended for general read-only filesystem use, for archival
 use (i.e. in cases where a .tar.gz file may be used), and in constrained
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index bc4b06b..e445b95 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -549,7 +549,7 @@
 -------------------------------
 
 This describes how the VFS can manipulate mapping of a file to page cache in
-your filesystem. As of kernel 2.6.22, the following members are defined:
+your filesystem. The following members are defined:
 
 struct address_space_operations {
 	int (*writepage)(struct page *page, struct writeback_control *wbc);
@@ -566,7 +566,7 @@
 				loff_t pos, unsigned len, unsigned copied,
 				struct page *page, void *fsdata);
 	sector_t (*bmap)(struct address_space *, sector_t);
-	int (*invalidatepage) (struct page *, unsigned long);
+	void (*invalidatepage) (struct page *, unsigned int, unsigned int);
 	int (*releasepage) (struct page *, int);
 	void (*freepage)(struct page *);
 	ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@@ -685,14 +685,14 @@
   invalidatepage: If a page has PagePrivate set, then invalidatepage
         will be called when part or all of the page is to be removed
 	from the address space.  This generally corresponds to either a
-	truncation or a complete invalidation of the address space
-	(in the latter case 'offset' will always be 0).
-	Any private data associated with the page should be updated
-	to reflect this truncation.  If offset is 0, then
-	the private data should be released, because the page
-	must be able to be completely discarded.  This may be done by
-        calling the ->releasepage function, but in this case the
-        release MUST succeed.
+	truncation, punch hole  or a complete invalidation of the address
+	space (in the latter case 'offset' will always be 0 and 'length'
+	will be PAGE_CACHE_SIZE). Any private data associated with the page
+	should be updated to reflect this truncation.  If offset is 0 and
+	length is PAGE_CACHE_SIZE, then the private data should be released,
+	because the page must be able to be completely discarded.  This may
+	be done by calling the ->releasepage function, but in this case the
+	release MUST succeed.
 
   releasepage: releasepage is called on PagePrivate pages to indicate
         that the page should be freed if possible.  ->releasepage
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 98da831..6b13480 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3350,11 +3350,11 @@
 			default x2apic cluster mode on platforms
 			supporting x2apic.
 
-	x86_mrst_timer= [X86-32,APBT]
-			Choose timer option for x86 Moorestown MID platform.
+	x86_intel_mid_timer= [X86-32,APBT]
+			Choose timer option for x86 Intel MID platform.
 			Two valid options are apbt timer only and lapic timer
 			plus one apbt timer for broadcast timer.
-			x86_mrst_timer=apbt_only | lapic_and_apbt
+			x86_intel_mid_timer=apbt_only | lapic_and_apbt
 
 	xen_emul_unplug=		[HW,X86,XEN]
 			Unplug Xen emulated devices
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index a59ee43..b4ec119 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -22,6 +22,15 @@
 min_pmtu - INTEGER
 	default 552 - minimum discovered Path MTU
 
+fwmark_reflect - BOOLEAN
+	Controls the fwmark of kernel-generated IPv4 reply packets that are not
+	associated with a socket for example, TCP RSTs or ICMP echo replies).
+	If unset, these packets have a fwmark of zero. If set, they have the
+	fwmark of the packet they are replying to. Similarly affects the fwmark
+	used by internal routing lookups triggered by incoming packets, such as
+	the ones used for Path MTU Discovery.
+	Default: 0
+
 route/max_size - INTEGER
 	Maximum number of routes allowed in the kernel.  Increase
 	this when using large numbers of interfaces and/or routes.
@@ -468,6 +477,16 @@
 
 	See include/net/tcp.h and the code for more details.
 
+tcp_fwmark_accept - BOOLEAN
+	If set, incoming connections to listening sockets that do not have a
+	socket mark will set the mark of the accepting socket to the fwmark of
+	the incoming SYN packet. This will cause all packets on that connection
+	(starting from the first SYNACK) to be sent with that fwmark. The
+	listening socket's mark is unchanged. Listening sockets that already
+	have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are
+	unaffected.
+	Default: 0
+
 tcp_syn_retries - INTEGER
 	Number of times initial SYNs for an active TCP connection attempt
 	will be retransmitted. Should not be higher than 255. Default value
@@ -1093,6 +1112,15 @@
 proxy_ndp - BOOLEAN
 	Do proxy ndp.
 
+fwmark_reflect - BOOLEAN
+	Controls the fwmark of kernel-generated IPv6 reply packets that are not
+	associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
+	If unset, these packets have a fwmark of zero. If set, they have the
+	fwmark of the packet they are replying to. Similarly affects the fwmark
+	used by internal routing lookups triggered by incoming packets, such as
+	the ones used for Path MTU Discovery.
+	Default: 0
+
 conf/interface/*:
 	Change special settings per interface.
 
@@ -1230,6 +1258,13 @@
 	routers are present.
 	Default: 3
 
+use_oif_addrs_only - BOOLEAN
+	When enabled, the candidate source addresses for destinations
+	routed via this interface are restricted to the set of addresses
+	configured on this interface (vis. RFC 6724, section 4).
+
+	Default: false
+
 use_tempaddr - INTEGER
 	Preference for Privacy Extensions (RFC3041).
 	  <= 0 : disable Privacy Extensions
@@ -1311,6 +1346,19 @@
 	1 - Generate unsolicited neighbour advertisements when device is brought
 	    up or hardware address changes.
 
+optimistic_dad - BOOLEAN
+	Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
+		0: disabled (default)
+		1: enabled
+
+use_optimistic - BOOLEAN
+	If enabled, do not classify optimistic addresses as deprecated during
+	source address selection.  Preferred addresses will still be chosen
+	before optimistic addresses, subject to other ranking in the source
+	address selection algorithm.
+		0: disabled (default)
+		1: enabled
+
 icmp/*:
 ratelimit - INTEGER
 	Limit the maximal rates for sending ICMPv6 packets.
diff --git a/Documentation/sync.txt b/Documentation/sync.txt
new file mode 100644
index 0000000..a2d05e7
--- /dev/null
+++ b/Documentation/sync.txt
@@ -0,0 +1,75 @@
+Motivation:
+
+In complicated DMA pipelines such as graphics (multimedia, camera, gpu, display)
+a consumer of a buffer needs to know when the producer has finished producing
+it.  Likewise the producer needs to know when the consumer is finished with the
+buffer so it can reuse it.  A particular buffer may be consumed by multiple
+consumers which will retain the buffer for different amounts of time.  In
+addition, a consumer may consume multiple buffers atomically.
+The sync framework adds an API which allows synchronization between the
+producers and consumers in a generic way while also allowing platforms which
+have shared hardware synchronization primitives to exploit them.
+
+Goals:
+	* provide a generic API for expressing synchronization dependencies
+	* allow drivers to exploit hardware synchronization between hardware
+	  blocks
+	* provide a userspace API that allows a compositor to manage
+	  dependencies.
+	* provide rich telemetry data to allow debugging slowdowns and stalls of
+	   the graphics pipeline.
+
+Objects:
+	* sync_timeline
+	* sync_pt
+	* sync_fence
+
+sync_timeline:
+
+A sync_timeline is an abstract monotonically increasing counter. In general,
+each driver/hardware block context will have one of these.  They can be backed
+by the appropriate hardware or rely on the generic sw_sync implementation.
+Timelines are only ever created through their specific implementations
+(i.e. sw_sync.)
+
+sync_pt:
+
+A sync_pt is an abstract value which marks a point on a sync_timeline. Sync_pts
+have a single timeline parent.  They have 3 states: active, signaled, and error.
+They start in active state and transition, once, to either signaled (when the
+timeline counter advances beyond the sync_pt’s value) or error state.
+
+sync_fence:
+
+Sync_fences are the primary primitives used by drivers to coordinate
+synchronization of their buffers.  They are a collection of sync_pts which may
+or may not have the same timeline parent.  A sync_pt can only exist in one fence
+and the fence's list of sync_pts is immutable once created.  Fences can be
+waited on synchronously or asynchronously.  Two fences can also be merged to
+create a third fence containing a copy of the two fences’ sync_pts.  Fences are
+backed by file descriptors to allow userspace to coordinate the display pipeline
+dependencies.
+
+Use:
+
+A driver implementing sync support should have a work submission function which:
+     * takes a fence argument specifying when to begin work
+     * asynchronously queues that work to kick off when the fence is signaled
+     * returns a fence to indicate when its work will be done.
+     * signals the returned fence once the work is completed.
+
+Consider an imaginary display driver that has the following API:
+/*
+ * assumes buf is ready to be displayed.
+ * blocks until the buffer is on screen.
+ */
+    void display_buffer(struct dma_buf *buf);
+
+The new API will become:
+/*
+ * will display buf when fence is signaled.
+ * returns immediately with a fence that will signal when buf
+ * is no longer displayed.
+ */
+struct sync_fence* display_buffer(struct dma_buf *buf,
+                                 struct sync_fence *fence);
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 8d90c42..9b9d019 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -182,6 +182,7 @@
 	%<NUL>	'%' is dropped
 	%%	output one '%'
 	%p	pid
+	%P	global pid (init PID namespace)
 	%u	uid
 	%g	gid
 	%d	dump mode, matches PR_SET_DUMPABLE and
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index dcc75a9..c7539ec 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -29,6 +29,7 @@
 - dirty_writeback_centisecs
 - drop_caches
 - extfrag_threshold
+- extra_free_kbytes
 - hugepages_treat_as_movable
 - hugetlb_shm_group
 - laptop_mode
@@ -41,6 +42,8 @@
 - min_slab_ratio
 - min_unmapped_ratio
 - mmap_min_addr
+- mmap_rnd_bits
+- mmap_rnd_compat_bits
 - nr_hugepages
 - nr_overcommit_hugepages
 - nr_trim_pages         (only if CONFIG_MMU=n)
@@ -198,6 +201,21 @@
 
 ==============================================================
 
+extra_free_kbytes
+
+This parameter tells the VM to keep extra free memory between the threshold
+where background reclaim (kswapd) kicks in, and the threshold where direct
+reclaim (by allocating processes) kicks in.
+
+This is useful for workloads that require low latency memory allocations
+and have a bounded burstiness in memory allocations, for example a
+realtime application that receives and transmits network traffic
+(causing in-kernel memory allocations) with a maximum total message burst
+size of 200MB may need 200MB of extra free memory to avoid direct reclaim
+related latencies.
+
+==============================================================
+
 hugepages_treat_as_movable
 
 This parameter is only useful when kernelcore= is specified at boot time to
@@ -439,6 +457,33 @@
 
 ==============================================================
 
+mmap_rnd_bits:
+
+This value can be used to select the number of bits to use to
+determine the random offset to the base address of vma regions
+resulting from mmap allocations on architectures which support
+tuning address space randomization.  This value will be bounded
+by the architecture's minimum and maximum supported values.
+
+This value can be changed after boot using the
+/proc/sys/vm/mmap_rnd_bits tunable
+
+==============================================================
+
+mmap_rnd_compat_bits:
+
+This value can be used to select the number of bits to use to
+determine the random offset to the base address of vma regions
+resulting from mmap allocations for applications run in
+compatibility mode on architectures which support tuning address
+space randomization.  This value will be bounded by the
+architecture's minimum and maximum supported values.
+
+This value can be changed after boot using the
+/proc/sys/vm/mmap_rnd_compat_bits tunable
+
+==============================================================
+
 nr_hugepages
 
 Change the minimum size of the hugepage pool.
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index bfe8c29..29064c3 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -2013,6 +2013,35 @@
  1)   1.449 us    |             }
 
 
+You can disable the hierarchical function call formatting and instead print a
+flat list of function entry and return events.  This uses the format described
+in the Output Formatting section and respects all the trace options that
+control that formatting.  Hierarchical formatting is the default.
+
+	hierachical: echo nofuncgraph-flat > trace_options
+	flat: echo funcgraph-flat > trace_options
+
+  ie:
+
+  # tracer: function_graph
+  #
+  # entries-in-buffer/entries-written: 68355/68355   #P:2
+  #
+  #                              _-----=> irqs-off
+  #                             / _----=> need-resched
+  #                            | / _---=> hardirq/softirq
+  #                            || / _--=> preempt-depth
+  #                            ||| /     delay
+  #           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+  #              | |       |   ||||       |         |
+                sh-1806  [001] d...   198.843443: graph_ent: func=_raw_spin_lock
+                sh-1806  [001] d...   198.843445: graph_ent: func=__raw_spin_lock
+                sh-1806  [001] d..1   198.843447: graph_ret: func=__raw_spin_lock
+                sh-1806  [001] d..1   198.843449: graph_ret: func=_raw_spin_lock
+                sh-1806  [001] d..1   198.843451: graph_ent: func=_raw_spin_unlock_irqrestore
+                sh-1806  [001] d...   198.843453: graph_ret: func=_raw_spin_unlock_irqrestore
+
+
 You might find other useful features for this tracer in the
 following "dynamic ftrace" section such as tracing only specific
 functions or tasks.
diff --git a/Makefile b/Makefile
index 80e180e..3ba1951 100644
--- a/Makefile
+++ b/Makefile
@@ -408,7 +408,7 @@
 # Files to ignore in find ... statements
 
 RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \
-		   -o -name .pc -o -name .hg -o -name .git \) -prune -o
+		   -o -name .pc -o -name .hg -o -name .git -o -name meta \) -prune -o
 export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
 			 --exclude CVS --exclude .pc --exclude .hg --exclude .git
 
diff --git a/android/configs/README b/android/configs/README
new file mode 100644
index 0000000..8798731
--- /dev/null
+++ b/android/configs/README
@@ -0,0 +1,15 @@
+The files in this directory are meant to be used as a base for an Android
+kernel config. All devices should have the options in android-base.cfg enabled.
+While not mandatory, the options in android-recommended.cfg enable advanced
+Android features.
+
+Assuming you already have a minimalist defconfig for your device, a possible
+way to enable these options would be:
+
+     ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig android/configs/android-base.cfg android/configs/android-recommended.cfg
+
+This will generate a .config that can then be used to save a new defconfig or
+compile a new kernel with Android features enabled.
+
+Because there is no tool to consistently generate these config fragments,
+lets keep them alphabetically sorted instead of random.
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
new file mode 100644
index 0000000..1bee5d6
--- /dev/null
+++ b/android/configs/android-base.cfg
@@ -0,0 +1,152 @@
+#  KEEP ALPHABETICALLY SORTED
+# CONFIG_DEVKMEM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_MODULES is not set
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ARMV7_COMPAT=y
+CONFIG_ASHMEM=y
+CONFIG_AUDIT=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_VERITY=y
+CONFIG_EMBEDDED=y
+CONFIG_FB=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_NET=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_KEY=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_NAT=y
+CONFIG_NO_HZ=y
+CONFIG_PACKET=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PPP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PREEMPT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SND=y
+CONFIG_SOUND=y
+CONFIG_STAGING=y
+CONFIG_SWITCH=y
+CONFIG_SYNC=y
+CONFIG_SYSVIPC=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_XFRM_USER=y
diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg
new file mode 100644
index 0000000..960b9de
--- /dev/null
+++ b/android/configs/android-recommended.cfg
@@ -0,0 +1,121 @@
+#  KEEP ALPHABETICALLY SORTED
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_PM_WAKELOCKS_GC is not set
+# CONFIG_VT is not set
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_COMPACTION=y
+CONFIG_DM_UEVENT=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_ION=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KSM=y
+CONFIG_LOGIG940_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGITECH_FF=y
+CONFIG_MD=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MSDOS_FS=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANTHERLORD_FF=y
+CONFIG_PERF_EVENTS=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+CONFIG_POWER_SUPPLY=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_SND=y
+CONFIG_SOUND=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_TABLET_USB_WACOM=y
+CONFIG_TIMER_STATS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UHID=y
+CONFIG_UID_STAT=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_USBNET=y
+CONFIG_VFAT_FS=y
diff --git a/arch/Kconfig b/arch/Kconfig
index 00e3702..6dbcfc4 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -331,6 +331,7 @@
 	  - secure_computing is called from a ptrace_event()-safe context
 	  - secure_computing return value is checked and a return value of -1
 	    results in the system call being skipped immediately.
+	  - seccomp syscall wired up
 
 config SECCOMP_FILTER
 	def_bool y
@@ -390,6 +391,74 @@
 	  Some architectures generate an _ in front of C symbols; things like
 	  module loading and assembly files need to know about this.
 
+config HAVE_ARCH_MMAP_RND_BITS
+	bool
+	help
+	  An arch should select this symbol if it supports setting a variable
+	  number of bits for use in establishing the base address for mmap
+	  allocations, has MMU enabled and provides values for both:
+	  - ARCH_MMAP_RND_BITS_MIN
+	  - ARCH_MMAP_RND_BITS_MAX
+
+config ARCH_MMAP_RND_BITS_MIN
+	int
+
+config ARCH_MMAP_RND_BITS_MAX
+	int
+
+config ARCH_MMAP_RND_BITS_DEFAULT
+	int
+
+config ARCH_MMAP_RND_BITS
+	int "Number of bits to use for ASLR of mmap base address" if EXPERT
+	range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
+	default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
+	default ARCH_MMAP_RND_BITS_MIN
+	depends on HAVE_ARCH_MMAP_RND_BITS
+	help
+	  This value can be used to select the number of bits to use to
+	  determine the random offset to the base address of vma regions
+	  resulting from mmap allocations. This value will be bounded
+	  by the architecture's minimum and maximum supported values.
+
+	  This value can be changed after boot using the
+	  /proc/sys/vm/mmap_rnd_bits tunable
+
+config HAVE_ARCH_MMAP_RND_COMPAT_BITS
+	bool
+	help
+	  An arch should select this symbol if it supports running applications
+	  in compatibility mode, supports setting a variable number of bits for
+	  use in establishing the base address for mmap allocations, has MMU
+	  enabled and provides values for both:
+	  - ARCH_MMAP_RND_COMPAT_BITS_MIN
+	  - ARCH_MMAP_RND_COMPAT_BITS_MAX
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+	int
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+	int
+
+config ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+	int
+
+config ARCH_MMAP_RND_COMPAT_BITS
+	int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
+	range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
+	default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+	default ARCH_MMAP_RND_COMPAT_BITS_MIN
+	depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
+	help
+	  This value can be used to select the number of bits to use to
+	  determine the random offset to the base address of vma regions
+	  resulting from mmap allocations for compatible applications This
+	  value will be bounded by the architecture's minimum and maximum
+	  supported values.
+
+	  This value can be changed after boot using the
+	  /proc/sys/vm/mmap_rnd_compat_bits tunable
+
 #
 # ABI hall of shame
 #
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index b9e37ad..1402fcc 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -96,6 +96,7 @@
 };
 
 struct osf_dirent_callback {
+	struct dir_context ctx;
 	struct osf_dirent __user *dirent;
 	long __user *basep;
 	unsigned int count;
@@ -146,17 +147,17 @@
 {
 	int error;
 	struct fd arg = fdget(fd);
-	struct osf_dirent_callback buf;
+	struct osf_dirent_callback buf = {
+		.ctx.actor = osf_filldir,
+		.dirent = dirent,
+		.basep = basep,
+		.count = count
+	};
 
 	if (!arg.file)
 		return -EBADF;
 
-	buf.dirent = dirent;
-	buf.basep = basep;
-	buf.count = count;
-	buf.error = 0;
-
-	error = vfs_readdir(arg.file, osf_filldir, &buf);
+	error = iterate_dir(arg.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
 	if (count != buf.count)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d419512..fd46f71 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -22,6 +22,7 @@
 	select HARDIRQS_SW_RESEND
 	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
 	select HAVE_ARCH_KGDB
+	select HAVE_ARCH_MMAP_RND_BITS if MMU
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_BPF_JIT
@@ -279,6 +280,14 @@
 	  Select if you want MMU-based virtualised addressing space
 	  support by paged memory management. If unsure, say 'Y'.
 
+config ARCH_MMAP_RND_BITS_MIN
+	default 8
+
+config ARCH_MMAP_RND_BITS_MAX
+	default 14 if PAGE_OFFSET=0x40000000
+	default 15 if PAGE_OFFSET=0x80000000
+	default 16
+
 #
 # The "ARM system type" choice list is ordered alphabetically by option
 # text.  Please add new entries in the option alphabetic order.
@@ -1820,6 +1829,15 @@
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
+config ARM_FLUSH_CONSOLE_ON_RESTART
+	bool "Force flush the console on restart"
+	help
+	  If the console is locked while the system is rebooted, the messages
+	  in the temporary logbuffer would not have propogated to all the
+	  console drivers. This option forces the console lock to be
+	  released if it failed to be acquired, which will cause all the
+	  pending messages to be flushed.
+
 endmenu
 
 menu "Boot options"
@@ -1849,6 +1867,21 @@
 	  This was deprecated in 2001 and announced to live on for 5 years.
 	  Some old boot loaders still use this way.
 
+config BUILD_ARM_APPENDED_DTB_IMAGE
+	bool "Build a concatenated zImage/dtb by default"
+	depends on OF
+	help
+	  Enabling this option will cause a concatenated zImage and list of
+	  DTBs to be built by default (instead of a standalone zImage.)
+	  The image will built in arch/arm/boot/zImage-dtb
+
+config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES
+	string "Default dtb names"
+	depends on BUILD_ARM_APPENDED_DTB_IMAGE
+	help
+	  Space separated list of names of dtbs to append when
+	  building a concatenated zImage-dtb.
+
 # Compressed boot loader in ROM.  Yes, we really want to ask about
 # TEXT and BSS so we preserve their values in the config files.
 config ZBOOT_ROM_TEXT
@@ -2198,6 +2231,13 @@
 	  Say Y to include support code for NEON, the ARMv7 Advanced SIMD
 	  Extension.
 
+config KERNEL_MODE_NEON
+	bool "Support for NEON in kernel mode"
+	default n
+	depends on NEON
+	help
+	  Say Y to include support for NEON in kernel mode.
+
 endmenu
 
 menu "Userspace binary formats"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 1d41908..a640f09 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,6 +63,27 @@
 	      8 - SIGSEGV faults
 	     16 - SIGBUS faults
 
+config DEBUG_RODATA
+	bool "Write protect kernel text section"
+	default n
+	depends on DEBUG_KERNEL && MMU
+	---help---
+	  Mark the kernel text section as write-protected in the pagetables,
+	  in order to catch accidental (and incorrect) writes to such const
+	  data. This will cause the size of the kernel, plus up to 4MB, to
+	  be mapped as pages instead of sections, which will increase TLB
+	  pressure.
+	  If in doubt, say "N".
+
+config DEBUG_RODATA_TEST
+	bool "Testcase for the DEBUG_RODATA feature"
+	depends on DEBUG_RODATA
+	default n
+	---help---
+	  This option enables a testcase for the DEBUG_RODATA
+	  feature.
+	  If in doubt, say "N"
+
 # These options are only for real kernel hackers who want to get their hands dirty.
 config DEBUG_LL
 	bool "Kernel low-level debugging functions (read help!)"
@@ -669,6 +690,14 @@
 	  kernel low-level debugging functions. Add earlyprintk to your
 	  kernel parameters to enable this console.
 
+config EARLY_PRINTK_DIRECT
+	bool "Early printk direct"
+	depends on DEBUG_LL
+	help
+	  Say Y here if you want to have an early console using the
+	  kernel low-level debugging functions and EARLY_PRINTK is
+	  not early enough.
+
 config OC_ETM
 	bool "On-chip ETM and ETB"
 	depends on ARM_AMBA
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7d4ce43..5cae9d8 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -272,6 +272,8 @@
 # Default target when executing plain make
 ifeq ($(CONFIG_XIP_KERNEL),y)
 KBUILD_IMAGE := xipImage
+else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE := zImage-dtb
 else
 KBUILD_IMAGE := zImage
 endif
@@ -291,6 +293,9 @@
 # Convert bzImage to zImage
 bzImage: zImage
 
+# These targets cannot be built in parallel
+.NOTPARALLEL: zImage Image xipImage bootpImage uImage
+
 zImage Image xipImage bootpImage uImage: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
@@ -303,6 +308,9 @@
 dtbs: scripts
 	$(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs
 
+zImage-dtb: vmlinux scripts dtbs
+	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
 	$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore
index 3c79f85..ad7a025 100644
--- a/arch/arm/boot/.gitignore
+++ b/arch/arm/boot/.gitignore
@@ -4,3 +4,4 @@
 bootpImage
 uImage
 *.dtb
+zImage-dtb
\ No newline at end of file
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 84aa2ca..65285bb 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -14,6 +14,7 @@
 ifneq ($(MACHINE),)
 include $(srctree)/$(MACHINE)/Makefile.boot
 endif
+include $(srctree)/arch/arm/boot/dts/Makefile
 
 # Note: the following conditions must always be true:
 #   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
@@ -27,6 +28,14 @@
 
 targets := Image zImage xipImage bootpImage uImage
 
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+
 ifeq ($(CONFIG_XIP_KERNEL),y)
 
 $(obj)/xipImage: vmlinux FORCE
@@ -55,6 +64,10 @@
 	$(call if_changed,objcopy)
 	@$(kecho) '  Kernel: $@ is ready'
 
+$(obj)/zImage-dtb:	$(obj)/zImage $(DTB_OBJS) FORCE
+	$(call if_changed,cat)
+	@echo '  Kernel: $@ is ready'
+
 endif
 
 ifneq ($(LOADADDR),)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 9fef67a..d76329e 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -717,6 +717,8 @@
 		bic     r6, r6, #1 << 31        @ 32-bit translation system
 		bic     r6, r6, #(7 << 0) | (1 << 4)	@ use only ttbr0
 		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer
+		mcrne	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
+		mcr	p15, 0, r0, c7, c5, 4	@ ISB
 		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control
 		mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
 #endif
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index f0895c5..b83cc50 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -210,13 +210,20 @@
 	wm8850-w70v2.dtb
 dtb-$(CONFIG_ARCH_ZYNQ) += zynq-zc702.dtb
 
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+
 targets += dtbs
-targets += $(dtb-y)
+targets += $(DTB_LIST)
 endif
 
 # *.dtb used to be generated in the directory above. Clean out the
 # old build results so people don't accidentally use them.
-dtbs: $(addprefix $(obj)/, $(dtb-y))
+dtbs: $(addprefix $(obj)/, $(DTB_LIST))
 	$(Q)rm -f $(obj)/../*.dtb
 
 clean-files := *.dtb
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 9353184..ce01364 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -17,3 +17,7 @@
 
 config SHARP_SCOOP
 	bool
+
+config FIQ_GLUE
+	bool
+	select FIQ
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 48434cb..0a0821f 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -4,6 +4,7 @@
 
 obj-y				+= firmware.o
 
+obj-$(CONFIG_FIQ_GLUE)		+= fiq_glue.o fiq_glue_setup.o
 obj-$(CONFIG_ICST)		+= icst.o
 obj-$(CONFIG_SA1111)		+= sa1111.o
 obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644
index 0000000..24b42ce
--- /dev/null
+++ b/arch/arm/common/fiq_glue.S
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+		.text
+
+		.global fiq_glue_end
+
+		/* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+		/* store pc, cpsr from previous mode, reserve space for spsr */
+		mrs	r12, spsr
+		sub	lr, lr, #4
+		subs	r10, #1
+		bne	nested_fiq
+
+		str	r12, [sp, #-8]!
+		str	lr, [sp, #-4]!
+
+		/* store r8-r14 from previous mode */
+		sub	sp, sp, #(7 * 4)
+		stmia	sp, {r8-r14}^
+		nop
+
+		/* store r0-r7 from previous mode */
+		stmfd	sp!, {r0-r7}
+
+		/* setup func(data,regs) arguments */
+		mov	r0, r9
+		mov	r1, sp
+		mov	r3, r8
+
+		mov	r7, sp
+
+		/* Get sp and lr from non-user modes */
+		and	r4, r12, #MODE_MASK
+		cmp	r4, #USR_MODE
+		beq	fiq_from_usr_mode
+
+		mov	r7, sp
+		orr	r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+		msr	cpsr_c, r4
+		str	sp, [r7, #(4 * 13)]
+		str	lr, [r7, #(4 * 14)]
+		mrs	r5, spsr
+		str	r5, [r7, #(4 * 17)]
+
+		cmp	r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+		/* use fiq stack if we reenter this mode */
+		subne	sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+		msr	cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+		mov	r2, sp
+		sub	sp, r7, #12
+		stmfd	sp!, {r2, ip, lr}
+		/* call func(data,regs) */
+		blx	r3
+		ldmfd	sp, {r2, ip, lr}
+		mov	sp, r2
+
+		/* restore/discard saved state */
+		cmp	r4, #USR_MODE
+		beq	fiq_from_usr_mode_exit
+
+		msr	cpsr_c, r4
+		ldr	sp, [r7, #(4 * 13)]
+		ldr	lr, [r7, #(4 * 14)]
+		msr	spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+		msr	cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+		ldmfd	sp!, {r0-r7}
+		ldr	lr, [sp, #(4 * 7)]
+		ldr	r12, [sp, #(4 * 8)]
+		add	sp, sp, #(10 * 4)
+exit_fiq:
+		msr	spsr_cxsf, r12
+		add	r10, #1
+		cmp	r11, #0
+		moveqs	pc, lr
+		bx	r11 /* jump to custom fiq return function */
+
+nested_fiq:
+		orr	r12, r12, #(PSR_F_BIT)
+		b	exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
+		stmfd		sp!, {r4}
+		mrs		r4, cpsr
+		msr		cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+		movs		r8, r0
+		mov		r9, r1
+		mov		sp, r2
+		mov		r11, r3
+		moveq		r10, #0
+		movne		r10, #1
+		msr		cpsr_c, r4
+		ldmfd		sp!, {r4}
+		bx		lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644
index 0000000..8cb1b61
--- /dev/null
+++ b/arch/arm/common/fiq_glue_setup.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp,
+			   fiq_return_handler_t fiq_return_handler);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+	.name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static fiq_return_handler_t fiq_return_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+	struct fiq_glue_handler *handler = info;
+	fiq_glue_setup(handler->fiq, handler,
+		__get_cpu_var(fiq_stack) + THREAD_START_SP,
+		fiq_return_handler);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+	int ret;
+	int cpu;
+
+	if (!handler || !handler->fiq)
+		return -EINVAL;
+
+	mutex_lock(&fiq_glue_lock);
+	if (fiq_stack) {
+		ret = -EBUSY;
+		goto err_busy;
+	}
+
+	for_each_possible_cpu(cpu) {
+		void *stack;
+		stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+		if (WARN_ON(!stack)) {
+			ret = -ENOMEM;
+			goto err_alloc_fiq_stack;
+		}
+		per_cpu(fiq_stack, cpu) = stack;
+	}
+
+	ret = claim_fiq(&fiq_debbuger_fiq_handler);
+	if (WARN_ON(ret))
+		goto err_claim_fiq;
+
+	current_handler = handler;
+	on_each_cpu(fiq_glue_setup_helper, handler, true);
+	set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+	mutex_unlock(&fiq_glue_lock);
+	return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+	for_each_possible_cpu(cpu) {
+		__free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+		per_cpu(fiq_stack, cpu) = NULL;
+	}
+err_busy:
+	mutex_unlock(&fiq_glue_lock);
+	return ret;
+}
+
+static void fiq_glue_update_return_handler(void (*fiq_return)(void))
+{
+	fiq_return_handler = fiq_return;
+	if (current_handler)
+		on_each_cpu(fiq_glue_setup_helper, current_handler, true);
+}
+
+int fiq_glue_set_return_handler(void (*fiq_return)(void))
+{
+	int ret;
+
+	mutex_lock(&fiq_glue_lock);
+	if (fiq_return_handler) {
+		ret = -EBUSY;
+		goto err_busy;
+	}
+	fiq_glue_update_return_handler(fiq_return);
+	ret = 0;
+err_busy:
+	mutex_unlock(&fiq_glue_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(fiq_glue_set_return_handler);
+
+int fiq_glue_clear_return_handler(void (*fiq_return)(void))
+{
+	int ret;
+
+	mutex_lock(&fiq_glue_lock);
+	if (WARN_ON(fiq_return_handler != fiq_return)) {
+		ret = -EINVAL;
+		goto err_inval;
+	}
+	fiq_glue_update_return_handler(NULL);
+	ret = 0;
+err_inval:
+	mutex_unlock(&fiq_glue_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(fiq_glue_clear_return_handler);
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+	if (!current_handler)
+		return;
+	fiq_glue_setup(current_handler->fiq, current_handler,
+		__get_cpu_var(fiq_stack) + THREAD_START_SP,
+		fiq_return_handler);
+	if (current_handler->resume)
+		current_handler->resume(current_handler);
+}
+
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore
new file mode 100644
index 0000000..6231d36
--- /dev/null
+++ b/arch/arm/crypto/.gitignore
@@ -0,0 +1 @@
+aesbs-core.S
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index a2c8385..2cee53b 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -3,7 +3,27 @@
 #
 
 obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
+obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
 obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
+obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
+obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
+obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
 
-aes-arm-y  := aes-armv4.o aes_glue.o
-sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+aes-arm-y	:= aes-armv4.o aes_glue.o
+aes-arm-bs-y	:= aesbs-core.o aesbs-glue.o
+sha1-arm-y	:= sha1-armv4-large.o sha1_glue.o
+sha1-arm-neon-y	:= sha1-armv7-neon.o sha1_neon_glue.o
+sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
+sha256-arm-y	:= sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
+sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
+
+quiet_cmd_perl = PERL    $@
+      cmd_perl = $(PERL) $(<) > $(@)
+
+$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
+	$(call cmd,perl)
+
+$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
+	$(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S $(obj)/sha256-core.S
diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
index e73ec2a..0409b8f 100644
--- a/arch/arm/crypto/aes_glue.c
+++ b/arch/arm/crypto/aes_glue.c
@@ -6,22 +6,12 @@
 #include <linux/crypto.h>
 #include <crypto/aes.h>
 
-#define AES_MAXNR 14
+#include "aes_glue.h"
 
-typedef struct {
-	unsigned int rd_key[4 *(AES_MAXNR + 1)];
-	int rounds;
-} AES_KEY;
-
-struct AES_CTX {
-	AES_KEY enc_key;
-	AES_KEY dec_key;
-};
-
-asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
-asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
+EXPORT_SYMBOL(AES_encrypt);
+EXPORT_SYMBOL(AES_decrypt);
+EXPORT_SYMBOL(private_AES_set_encrypt_key);
+EXPORT_SYMBOL(private_AES_set_decrypt_key);
 
 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
@@ -81,7 +71,7 @@
 		.cipher	= {
 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
-			.cia_setkey			= aes_set_key,
+			.cia_setkey		= aes_set_key,
 			.cia_encrypt		= aes_encrypt,
 			.cia_decrypt		= aes_decrypt
 		}
diff --git a/arch/arm/crypto/aes_glue.h b/arch/arm/crypto/aes_glue.h
new file mode 100644
index 0000000..cca3e51
--- /dev/null
+++ b/arch/arm/crypto/aes_glue.h
@@ -0,0 +1,19 @@
+
+#define AES_MAXNR 14
+
+struct AES_KEY {
+	unsigned int rd_key[4 * (AES_MAXNR + 1)];
+	int rounds;
+};
+
+struct AES_CTX {
+	struct AES_KEY enc_key;
+	struct AES_KEY dec_key;
+};
+
+asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
+					   const int bits, struct AES_KEY *key);
+asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
+					   const int bits, struct AES_KEY *key);
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
new file mode 100644
index 0000000..71e5fc7
--- /dev/null
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -0,0 +1,2544 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+@ granted.
+@ ====================================================================
+
+@ Bit-sliced AES for ARM NEON
+@
+@ February 2012.
+@
+@ This implementation is direct adaptation of bsaes-x86_64 module for
+@ ARM NEON. Except that this module is endian-neutral [in sense that
+@ it can be compiled for either endianness] by courtesy of vld1.8's
+@ neutrality. Initial version doesn't implement interface to OpenSSL,
+@ only low-level primitives and unsupported entry points, just enough
+@ to collect performance results, which for Cortex-A8 core are:
+@
+@ encrypt	19.5 cycles per byte processed with 128-bit key
+@ decrypt	22.1 cycles per byte processed with 128-bit key
+@ key conv.	440  cycles per 128-bit key/0.18 of 8x block
+@
+@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+@ which is [much] worse than anticipated (for further details see
+@ http://www.openssl.org/~appro/Snapdragon-S4.html).
+@
+@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+@ manages in 20.0 cycles].
+@
+@ When comparing to x86_64 results keep in mind that NEON unit is
+@ [mostly] single-issue and thus can't [fully] benefit from
+@ instruction-level parallelism. And when comparing to aes-armv4
+@ results keep in mind key schedule conversion overhead (see
+@ bsaes-x86_64.pl for further details)...
+@
+@						<appro@openssl.org>
+
+@ April-August 2013
+@
+@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
+@
+@					<ard.biesheuvel@linaro.org>
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH	vstmdb	sp!,{d8-d15}
+# define VFP_ABI_POP	vldmia	sp!,{d8-d15}
+# define VFP_ABI_FRAME	0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME	0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__	7
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax	unified 	@ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu	neon
+
+.type	_bsaes_decrypt8,%function
+.align	4
+_bsaes_decrypt8:
+	adr	r6,_bsaes_decrypt8
+	vldmia	r4!, {q9}		@ round 0 key
+	add	r6,r6,#.LM0ISR-_bsaes_decrypt8
+
+	vldmia	r6!, {q8}		@ .LM0ISR
+	veor	q10, q0, q9	@ xor with round0 key
+	veor	q11, q1, q9
+	 vtbl.8	d0, {q10}, d16
+	 vtbl.8	d1, {q10}, d17
+	veor	q12, q2, q9
+	 vtbl.8	d2, {q11}, d16
+	 vtbl.8	d3, {q11}, d17
+	veor	q13, q3, q9
+	 vtbl.8	d4, {q12}, d16
+	 vtbl.8	d5, {q12}, d17
+	veor	q14, q4, q9
+	 vtbl.8	d6, {q13}, d16
+	 vtbl.8	d7, {q13}, d17
+	veor	q15, q5, q9
+	 vtbl.8	d8, {q14}, d16
+	 vtbl.8	d9, {q14}, d17
+	veor	q10, q6, q9
+	 vtbl.8	d10, {q15}, d16
+	 vtbl.8	d11, {q15}, d17
+	veor	q11, q7, q9
+	 vtbl.8	d12, {q10}, d16
+	 vtbl.8	d13, {q10}, d17
+	 vtbl.8	d14, {q11}, d16
+	 vtbl.8	d15, {q11}, d17
+	vmov.i8	q8,#0x55			@ compose .LBS0
+	vmov.i8	q9,#0x33			@ compose .LBS1
+	vshr.u64	q10, q6, #1
+	 vshr.u64	q11, q4, #1
+	veor		q10, q10, q7
+	 veor		q11, q11, q5
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #1
+	 veor		q5, q5, q11
+	 vshl.u64	q11, q11, #1
+	veor		q6, q6, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q2, #1
+	 vshr.u64	q11, q0, #1
+	veor		q10, q10, q3
+	 veor		q11, q11, q1
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q3, q3, q10
+	vshl.u64	q10, q10, #1
+	 veor		q1, q1, q11
+	 vshl.u64	q11, q11, #1
+	veor		q2, q2, q10
+	 veor		q0, q0, q11
+	vmov.i8	q8,#0x0f			@ compose .LBS2
+	vshr.u64	q10, q5, #2
+	 vshr.u64	q11, q4, #2
+	veor		q10, q10, q7
+	 veor		q11, q11, q6
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #2
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #2
+	veor		q5, q5, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q1, #2
+	 vshr.u64	q11, q0, #2
+	veor		q10, q10, q3
+	 veor		q11, q11, q2
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q3, q3, q10
+	vshl.u64	q10, q10, #2
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #2
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vshr.u64	q10, q3, #4
+	 vshr.u64	q11, q2, #4
+	veor		q10, q10, q7
+	 veor		q11, q11, q6
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #4
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #4
+	veor		q3, q3, q10
+	 veor		q2, q2, q11
+	vshr.u64	q10, q1, #4
+	 vshr.u64	q11, q0, #4
+	veor		q10, q10, q5
+	 veor		q11, q11, q4
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #4
+	 veor		q4, q4, q11
+	 vshl.u64	q11, q11, #4
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	sub	r5,r5,#1
+	b	.Ldec_sbox
+.align	4
+.Ldec_loop:
+	vldmia	r4!, {q8-q11}
+	veor	q8, q8, q0
+	veor	q9, q9, q1
+	vtbl.8	d0, {q8}, d24
+	vtbl.8	d1, {q8}, d25
+	vldmia	r4!, {q8}
+	veor	q10, q10, q2
+	vtbl.8	d2, {q9}, d24
+	vtbl.8	d3, {q9}, d25
+	vldmia	r4!, {q9}
+	veor	q11, q11, q3
+	vtbl.8	d4, {q10}, d24
+	vtbl.8	d5, {q10}, d25
+	vldmia	r4!, {q10}
+	vtbl.8	d6, {q11}, d24
+	vtbl.8	d7, {q11}, d25
+	vldmia	r4!, {q11}
+	veor	q8, q8, q4
+	veor	q9, q9, q5
+	vtbl.8	d8, {q8}, d24
+	vtbl.8	d9, {q8}, d25
+	veor	q10, q10, q6
+	vtbl.8	d10, {q9}, d24
+	vtbl.8	d11, {q9}, d25
+	veor	q11, q11, q7
+	vtbl.8	d12, {q10}, d24
+	vtbl.8	d13, {q10}, d25
+	vtbl.8	d14, {q11}, d24
+	vtbl.8	d15, {q11}, d25
+.Ldec_sbox:
+	 veor	q1, q1, q4
+	veor	q3, q3, q4
+
+	veor	q4, q4, q7
+	 veor	q1, q1, q6
+	veor	q2, q2, q7
+	veor	q6, q6, q4
+
+	veor	q0, q0, q1
+	veor	q2, q2, q5
+	 veor	q7, q7, q6
+	veor	q3, q3, q0
+	veor	q5, q5, q0
+	veor	q1, q1, q3
+	veor	q11, q3, q0
+	veor	q10, q7, q4
+	veor	q9, q1, q6
+	veor	q13, q4, q0
+	 vmov	q8, q10
+	veor	q12, q5, q2
+
+	vorr	q10, q10, q9
+	veor	q15, q11, q8
+	vand	q14, q11, q12
+	vorr	q11, q11, q12
+	veor	q12, q12, q9
+	vand	q8, q8, q9
+	veor	q9, q6, q2
+	vand	q15, q15, q12
+	vand	q13, q13, q9
+	veor	q9, q3, q7
+	veor	q12, q1, q5
+	veor	q11, q11, q13
+	veor	q10, q10, q13
+	vand	q13, q9, q12
+	vorr	q9, q9, q12
+	veor	q11, q11, q15
+	veor	q8, q8, q13
+	veor	q10, q10, q14
+	veor	q9, q9, q15
+	veor	q8, q8, q14
+	vand	q12, q4, q6
+	veor	q9, q9, q14
+	vand	q13, q0, q2
+	vand	q14, q7, q1
+	vorr	q15, q3, q5
+	veor	q11, q11, q12
+	veor	q9, q9, q14
+	veor	q8, q8, q15
+	veor	q10, q10, q13
+
+	@ Inv_GF16 	0, 	1, 	2, 	3, s0, s1, s2, s3
+
+	@ new smaller inversion
+
+	vand	q14, q11, q9
+	vmov	q12, q8
+
+	veor	q13, q10, q14
+	veor	q15, q8, q14
+	veor	q14, q8, q14	@ q14=q15
+
+	vbsl	q13, q9, q8
+	vbsl	q15, q11, q10
+	veor	q11, q11, q10
+
+	vbsl	q12, q13, q14
+	vbsl	q8, q14, q13
+
+	vand	q14, q12, q15
+	veor	q9, q9, q8
+
+	veor	q14, q14, q11
+	veor	q12, q5, q2
+	veor	q8, q1, q6
+	veor 	q10, q15, q14
+	vand	q10, q10, q5
+	veor	q5, q5, q1
+	vand	q11, q1, q15
+	vand	q5, q5, q14
+	veor	q1, q11, q10
+	veor	q5, q5, q11
+	veor	q15, q15, q13
+	veor	q14, q14, q9
+	veor	q11, q15, q14
+	 veor 	q10, q13, q9
+	vand	q11, q11, q12
+	 vand	q10, q10, q2
+	veor	q12, q12, q8
+	 veor	q2, q2, q6
+	vand	q8, q8, q15
+	 vand	q6, q6, q13
+	vand	q12, q12, q14
+	 vand	q2, q2, q9
+	veor	q8, q8, q12
+	 veor	q2, q2, q6
+	veor	q12, q12, q11
+	 veor	q6, q6, q10
+	veor	q5, q5, q12
+	veor	q2, q2, q12
+	veor	q1, q1, q8
+	veor	q6, q6, q8
+
+	veor	q12, q3, q0
+	veor	q8, q7, q4
+	veor	q11, q15, q14
+	 veor 	q10, q13, q9
+	vand	q11, q11, q12
+	 vand	q10, q10, q0
+	veor	q12, q12, q8
+	 veor	q0, q0, q4
+	vand	q8, q8, q15
+	 vand	q4, q4, q13
+	vand	q12, q12, q14
+	 vand	q0, q0, q9
+	veor	q8, q8, q12
+	 veor	q0, q0, q4
+	veor	q12, q12, q11
+	 veor	q4, q4, q10
+	veor	q15, q15, q13
+	veor	q14, q14, q9
+	veor 	q10, q15, q14
+	vand	q10, q10, q3
+	veor	q3, q3, q7
+	vand	q11, q7, q15
+	vand	q3, q3, q14
+	veor	q7, q11, q10
+	veor	q3, q3, q11
+	veor	q3, q3, q12
+	veor	q0, q0, q12
+	veor	q7, q7, q8
+	veor	q4, q4, q8
+	veor	q1, q1, q7
+	veor	q6, q6, q5
+
+	veor	q4, q4, q1
+	veor	q2, q2, q7
+	veor	q5, q5, q7
+	veor	q4, q4, q2
+	 veor 	q7, q7, q0
+	veor	q4, q4, q5
+	 veor	q3, q3, q6
+	 veor	q6, q6, q1
+	veor	q3, q3, q4
+
+	veor	q4, q4, q0
+	veor	q7, q7, q3
+	subs	r5,r5,#1
+	bcc	.Ldec_done
+	@ multiplication by 0x05-0x00-0x04-0x00
+	vext.8	q8, q0, q0, #8
+	vext.8	q14, q3, q3, #8
+	vext.8	q15, q5, q5, #8
+	veor	q8, q8, q0
+	vext.8	q9, q1, q1, #8
+	veor	q14, q14, q3
+	vext.8	q10, q6, q6, #8
+	veor	q15, q15, q5
+	vext.8	q11, q4, q4, #8
+	veor	q9, q9, q1
+	vext.8	q12, q2, q2, #8
+	veor	q10, q10, q6
+	vext.8	q13, q7, q7, #8
+	veor	q11, q11, q4
+	veor	q12, q12, q2
+	veor	q13, q13, q7
+
+	 veor	q0, q0, q14
+	 veor	q1, q1, q14
+	 veor	q6, q6, q8
+	 veor	q2, q2, q10
+	 veor	q4, q4, q9
+	 veor	q1, q1, q15
+	 veor	q6, q6, q15
+	 veor	q2, q2, q14
+	 veor	q7, q7, q11
+	 veor	q4, q4, q14
+	 veor	q3, q3, q12
+	 veor	q2, q2, q15
+	 veor	q7, q7, q15
+	 veor	q5, q5, q13
+	vext.8	q8, q0, q0, #12	@ x0 <<< 32
+	vext.8	q9, q1, q1, #12
+	 veor	q0, q0, q8		@ x0 ^ (x0 <<< 32)
+	vext.8	q10, q6, q6, #12
+	 veor	q1, q1, q9
+	vext.8	q11, q4, q4, #12
+	 veor	q6, q6, q10
+	vext.8	q12, q2, q2, #12
+	 veor	q4, q4, q11
+	vext.8	q13, q7, q7, #12
+	 veor	q2, q2, q12
+	vext.8	q14, q3, q3, #12
+	 veor	q7, q7, q13
+	vext.8	q15, q5, q5, #12
+	 veor	q3, q3, q14
+
+	veor	q9, q9, q0
+	 veor	q5, q5, q15
+	 vext.8	q0, q0, q0, #8		@ (x0 ^ (x0 <<< 32)) <<< 64)
+	veor	q10, q10, q1
+	veor	q8, q8, q5
+	veor	q9, q9, q5
+	 vext.8	q1, q1, q1, #8
+	veor	q13, q13, q2
+	 veor	q0, q0, q8
+	veor	q14, q14, q7
+	 veor	q1, q1, q9
+	 vext.8	q8, q2, q2, #8
+	veor	q12, q12, q4
+	 vext.8	q9, q7, q7, #8
+	veor	q15, q15, q3
+	 vext.8	q2, q4, q4, #8
+	veor	q11, q11, q6
+	 vext.8	q7, q5, q5, #8
+	veor	q12, q12, q5
+	 vext.8	q4, q3, q3, #8
+	veor	q11, q11, q5
+	 vext.8	q3, q6, q6, #8
+	veor	q5, q9, q13
+	veor	q11, q11, q2
+	veor	q7, q7, q15
+	veor	q6, q4, q14
+	veor	q4, q8, q12
+	veor	q2, q3, q10
+	vmov	q3, q11
+	 @ vmov	q5, q9
+	vldmia	r6, {q12}		@ .LISR
+	ite	eq				@ Thumb2 thing, sanity check in ARM
+	addeq	r6,r6,#0x10
+	bne	.Ldec_loop
+	vldmia	r6, {q12}		@ .LISRM0
+	b	.Ldec_loop
+.align	4
+.Ldec_done:
+	vmov.i8	q8,#0x55			@ compose .LBS0
+	vmov.i8	q9,#0x33			@ compose .LBS1
+	vshr.u64	q10, q3, #1
+	 vshr.u64	q11, q2, #1
+	veor		q10, q10, q5
+	 veor		q11, q11, q7
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #1
+	 veor		q7, q7, q11
+	 vshl.u64	q11, q11, #1
+	veor		q3, q3, q10
+	 veor		q2, q2, q11
+	vshr.u64	q10, q6, #1
+	 vshr.u64	q11, q0, #1
+	veor		q10, q10, q4
+	 veor		q11, q11, q1
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q4, q4, q10
+	vshl.u64	q10, q10, #1
+	 veor		q1, q1, q11
+	 vshl.u64	q11, q11, #1
+	veor		q6, q6, q10
+	 veor		q0, q0, q11
+	vmov.i8	q8,#0x0f			@ compose .LBS2
+	vshr.u64	q10, q7, #2
+	 vshr.u64	q11, q2, #2
+	veor		q10, q10, q5
+	 veor		q11, q11, q3
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #2
+	 veor		q3, q3, q11
+	 vshl.u64	q11, q11, #2
+	veor		q7, q7, q10
+	 veor		q2, q2, q11
+	vshr.u64	q10, q1, #2
+	 vshr.u64	q11, q0, #2
+	veor		q10, q10, q4
+	 veor		q11, q11, q6
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q4, q4, q10
+	vshl.u64	q10, q10, #2
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #2
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vshr.u64	q10, q4, #4
+	 vshr.u64	q11, q6, #4
+	veor		q10, q10, q5
+	 veor		q11, q11, q3
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #4
+	 veor		q3, q3, q11
+	 vshl.u64	q11, q11, #4
+	veor		q4, q4, q10
+	 veor		q6, q6, q11
+	vshr.u64	q10, q1, #4
+	 vshr.u64	q11, q0, #4
+	veor		q10, q10, q7
+	 veor		q11, q11, q2
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #4
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #4
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vldmia	r4, {q8}			@ last round key
+	veor	q6, q6, q8
+	veor	q4, q4, q8
+	veor	q2, q2, q8
+	veor	q7, q7, q8
+	veor	q3, q3, q8
+	veor	q5, q5, q8
+	veor	q0, q0, q8
+	veor	q1, q1, q8
+	bx	lr
+.size	_bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type	_bsaes_const,%object
+.align	6
+_bsaes_const:
+.LM0ISR:	@ InvShiftRows constants
+	.quad	0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+	.quad	0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+	.quad	0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:		@ ShiftRows constants
+	.quad	0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+	.quad	0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+	.quad	0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+	.quad	0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+	.quad	0x090d01050c000408, 0x03070b0f060a0e02
+.asciz	"Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align	6
+.size	_bsaes_const,.-_bsaes_const
+
+.type	_bsaes_encrypt8,%function
+.align	4
+_bsaes_encrypt8:
+	adr	r6,_bsaes_encrypt8
+	vldmia	r4!, {q9}		@ round 0 key
+	sub	r6,r6,#_bsaes_encrypt8-.LM0SR
+
+	vldmia	r6!, {q8}		@ .LM0SR
+_bsaes_encrypt8_alt:
+	veor	q10, q0, q9	@ xor with round0 key
+	veor	q11, q1, q9
+	 vtbl.8	d0, {q10}, d16
+	 vtbl.8	d1, {q10}, d17
+	veor	q12, q2, q9
+	 vtbl.8	d2, {q11}, d16
+	 vtbl.8	d3, {q11}, d17
+	veor	q13, q3, q9
+	 vtbl.8	d4, {q12}, d16
+	 vtbl.8	d5, {q12}, d17
+	veor	q14, q4, q9
+	 vtbl.8	d6, {q13}, d16
+	 vtbl.8	d7, {q13}, d17
+	veor	q15, q5, q9
+	 vtbl.8	d8, {q14}, d16
+	 vtbl.8	d9, {q14}, d17
+	veor	q10, q6, q9
+	 vtbl.8	d10, {q15}, d16
+	 vtbl.8	d11, {q15}, d17
+	veor	q11, q7, q9
+	 vtbl.8	d12, {q10}, d16
+	 vtbl.8	d13, {q10}, d17
+	 vtbl.8	d14, {q11}, d16
+	 vtbl.8	d15, {q11}, d17
+_bsaes_encrypt8_bitslice:
+	vmov.i8	q8,#0x55			@ compose .LBS0
+	vmov.i8	q9,#0x33			@ compose .LBS1
+	vshr.u64	q10, q6, #1
+	 vshr.u64	q11, q4, #1
+	veor		q10, q10, q7
+	 veor		q11, q11, q5
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #1
+	 veor		q5, q5, q11
+	 vshl.u64	q11, q11, #1
+	veor		q6, q6, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q2, #1
+	 vshr.u64	q11, q0, #1
+	veor		q10, q10, q3
+	 veor		q11, q11, q1
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q3, q3, q10
+	vshl.u64	q10, q10, #1
+	 veor		q1, q1, q11
+	 vshl.u64	q11, q11, #1
+	veor		q2, q2, q10
+	 veor		q0, q0, q11
+	vmov.i8	q8,#0x0f			@ compose .LBS2
+	vshr.u64	q10, q5, #2
+	 vshr.u64	q11, q4, #2
+	veor		q10, q10, q7
+	 veor		q11, q11, q6
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #2
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #2
+	veor		q5, q5, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q1, #2
+	 vshr.u64	q11, q0, #2
+	veor		q10, q10, q3
+	 veor		q11, q11, q2
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q3, q3, q10
+	vshl.u64	q10, q10, #2
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #2
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vshr.u64	q10, q3, #4
+	 vshr.u64	q11, q2, #4
+	veor		q10, q10, q7
+	 veor		q11, q11, q6
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #4
+	 veor		q6, q6, q11
+	 vshl.u64	q11, q11, #4
+	veor		q3, q3, q10
+	 veor		q2, q2, q11
+	vshr.u64	q10, q1, #4
+	 vshr.u64	q11, q0, #4
+	veor		q10, q10, q5
+	 veor		q11, q11, q4
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #4
+	 veor		q4, q4, q11
+	 vshl.u64	q11, q11, #4
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	sub	r5,r5,#1
+	b	.Lenc_sbox
+.align	4
+.Lenc_loop:
+	vldmia	r4!, {q8-q11}
+	veor	q8, q8, q0
+	veor	q9, q9, q1
+	vtbl.8	d0, {q8}, d24
+	vtbl.8	d1, {q8}, d25
+	vldmia	r4!, {q8}
+	veor	q10, q10, q2
+	vtbl.8	d2, {q9}, d24
+	vtbl.8	d3, {q9}, d25
+	vldmia	r4!, {q9}
+	veor	q11, q11, q3
+	vtbl.8	d4, {q10}, d24
+	vtbl.8	d5, {q10}, d25
+	vldmia	r4!, {q10}
+	vtbl.8	d6, {q11}, d24
+	vtbl.8	d7, {q11}, d25
+	vldmia	r4!, {q11}
+	veor	q8, q8, q4
+	veor	q9, q9, q5
+	vtbl.8	d8, {q8}, d24
+	vtbl.8	d9, {q8}, d25
+	veor	q10, q10, q6
+	vtbl.8	d10, {q9}, d24
+	vtbl.8	d11, {q9}, d25
+	veor	q11, q11, q7
+	vtbl.8	d12, {q10}, d24
+	vtbl.8	d13, {q10}, d25
+	vtbl.8	d14, {q11}, d24
+	vtbl.8	d15, {q11}, d25
+.Lenc_sbox:
+	veor	q2, q2, q1
+	veor	q5, q5, q6
+	veor	q3, q3, q0
+	veor	q6, q6, q2
+	veor	q5, q5, q0
+
+	veor	q6, q6, q3
+	veor	q3, q3, q7
+	veor	q7, q7, q5
+	veor	q3, q3, q4
+	veor	q4, q4, q5
+
+	veor	q2, q2, q7
+	veor	q3, q3, q1
+	veor	q1, q1, q5
+	veor	q11, q7, q4
+	veor	q10, q1, q2
+	veor	q9, q5, q3
+	veor	q13, q2, q4
+	 vmov	q8, q10
+	veor	q12, q6, q0
+
+	vorr	q10, q10, q9
+	veor	q15, q11, q8
+	vand	q14, q11, q12
+	vorr	q11, q11, q12
+	veor	q12, q12, q9
+	vand	q8, q8, q9
+	veor	q9, q3, q0
+	vand	q15, q15, q12
+	vand	q13, q13, q9
+	veor	q9, q7, q1
+	veor	q12, q5, q6
+	veor	q11, q11, q13
+	veor	q10, q10, q13
+	vand	q13, q9, q12
+	vorr	q9, q9, q12
+	veor	q11, q11, q15
+	veor	q8, q8, q13
+	veor	q10, q10, q14
+	veor	q9, q9, q15
+	veor	q8, q8, q14
+	vand	q12, q2, q3
+	veor	q9, q9, q14
+	vand	q13, q4, q0
+	vand	q14, q1, q5
+	vorr	q15, q7, q6
+	veor	q11, q11, q12
+	veor	q9, q9, q14
+	veor	q8, q8, q15
+	veor	q10, q10, q13
+
+	@ Inv_GF16 	0, 	1, 	2, 	3, s0, s1, s2, s3
+
+	@ new smaller inversion
+
+	vand	q14, q11, q9
+	vmov	q12, q8
+
+	veor	q13, q10, q14
+	veor	q15, q8, q14
+	veor	q14, q8, q14	@ q14=q15
+
+	vbsl	q13, q9, q8
+	vbsl	q15, q11, q10
+	veor	q11, q11, q10
+
+	vbsl	q12, q13, q14
+	vbsl	q8, q14, q13
+
+	vand	q14, q12, q15
+	veor	q9, q9, q8
+
+	veor	q14, q14, q11
+	veor	q12, q6, q0
+	veor	q8, q5, q3
+	veor 	q10, q15, q14
+	vand	q10, q10, q6
+	veor	q6, q6, q5
+	vand	q11, q5, q15
+	vand	q6, q6, q14
+	veor	q5, q11, q10
+	veor	q6, q6, q11
+	veor	q15, q15, q13
+	veor	q14, q14, q9
+	veor	q11, q15, q14
+	 veor 	q10, q13, q9
+	vand	q11, q11, q12
+	 vand	q10, q10, q0
+	veor	q12, q12, q8
+	 veor	q0, q0, q3
+	vand	q8, q8, q15
+	 vand	q3, q3, q13
+	vand	q12, q12, q14
+	 vand	q0, q0, q9
+	veor	q8, q8, q12
+	 veor	q0, q0, q3
+	veor	q12, q12, q11
+	 veor	q3, q3, q10
+	veor	q6, q6, q12
+	veor	q0, q0, q12
+	veor	q5, q5, q8
+	veor	q3, q3, q8
+
+	veor	q12, q7, q4
+	veor	q8, q1, q2
+	veor	q11, q15, q14
+	 veor 	q10, q13, q9
+	vand	q11, q11, q12
+	 vand	q10, q10, q4
+	veor	q12, q12, q8
+	 veor	q4, q4, q2
+	vand	q8, q8, q15
+	 vand	q2, q2, q13
+	vand	q12, q12, q14
+	 vand	q4, q4, q9
+	veor	q8, q8, q12
+	 veor	q4, q4, q2
+	veor	q12, q12, q11
+	 veor	q2, q2, q10
+	veor	q15, q15, q13
+	veor	q14, q14, q9
+	veor 	q10, q15, q14
+	vand	q10, q10, q7
+	veor	q7, q7, q1
+	vand	q11, q1, q15
+	vand	q7, q7, q14
+	veor	q1, q11, q10
+	veor	q7, q7, q11
+	veor	q7, q7, q12
+	veor	q4, q4, q12
+	veor	q1, q1, q8
+	veor	q2, q2, q8
+	veor	q7, q7, q0
+	veor	q1, q1, q6
+	veor	q6, q6, q0
+	veor	q4, q4, q7
+	veor	q0, q0, q1
+
+	veor	q1, q1, q5
+	veor	q5, q5, q2
+	veor	q2, q2, q3
+	veor	q3, q3, q5
+	veor	q4, q4, q5
+
+	veor	q6, q6, q3
+	subs	r5,r5,#1
+	bcc	.Lenc_done
+	vext.8	q8, q0, q0, #12	@ x0 <<< 32
+	vext.8	q9, q1, q1, #12
+	 veor	q0, q0, q8		@ x0 ^ (x0 <<< 32)
+	vext.8	q10, q4, q4, #12
+	 veor	q1, q1, q9
+	vext.8	q11, q6, q6, #12
+	 veor	q4, q4, q10
+	vext.8	q12, q3, q3, #12
+	 veor	q6, q6, q11
+	vext.8	q13, q7, q7, #12
+	 veor	q3, q3, q12
+	vext.8	q14, q2, q2, #12
+	 veor	q7, q7, q13
+	vext.8	q15, q5, q5, #12
+	 veor	q2, q2, q14
+
+	veor	q9, q9, q0
+	 veor	q5, q5, q15
+	 vext.8	q0, q0, q0, #8		@ (x0 ^ (x0 <<< 32)) <<< 64)
+	veor	q10, q10, q1
+	veor	q8, q8, q5
+	veor	q9, q9, q5
+	 vext.8	q1, q1, q1, #8
+	veor	q13, q13, q3
+	 veor	q0, q0, q8
+	veor	q14, q14, q7
+	 veor	q1, q1, q9
+	 vext.8	q8, q3, q3, #8
+	veor	q12, q12, q6
+	 vext.8	q9, q7, q7, #8
+	veor	q15, q15, q2
+	 vext.8	q3, q6, q6, #8
+	veor	q11, q11, q4
+	 vext.8	q7, q5, q5, #8
+	veor	q12, q12, q5
+	 vext.8	q6, q2, q2, #8
+	veor	q11, q11, q5
+	 vext.8	q2, q4, q4, #8
+	veor	q5, q9, q13
+	veor	q4, q8, q12
+	veor	q3, q3, q11
+	veor	q7, q7, q15
+	veor	q6, q6, q14
+	 @ vmov	q4, q8
+	veor	q2, q2, q10
+	 @ vmov	q5, q9
+	vldmia	r6, {q12}		@ .LSR
+	ite	eq				@ Thumb2 thing, samity check in ARM
+	addeq	r6,r6,#0x10
+	bne	.Lenc_loop
+	vldmia	r6, {q12}		@ .LSRM0
+	b	.Lenc_loop
+.align	4
+.Lenc_done:
+	vmov.i8	q8,#0x55			@ compose .LBS0
+	vmov.i8	q9,#0x33			@ compose .LBS1
+	vshr.u64	q10, q2, #1
+	 vshr.u64	q11, q3, #1
+	veor		q10, q10, q5
+	 veor		q11, q11, q7
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #1
+	 veor		q7, q7, q11
+	 vshl.u64	q11, q11, #1
+	veor		q2, q2, q10
+	 veor		q3, q3, q11
+	vshr.u64	q10, q4, #1
+	 vshr.u64	q11, q0, #1
+	veor		q10, q10, q6
+	 veor		q11, q11, q1
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q6, q6, q10
+	vshl.u64	q10, q10, #1
+	 veor		q1, q1, q11
+	 vshl.u64	q11, q11, #1
+	veor		q4, q4, q10
+	 veor		q0, q0, q11
+	vmov.i8	q8,#0x0f			@ compose .LBS2
+	vshr.u64	q10, q7, #2
+	 vshr.u64	q11, q3, #2
+	veor		q10, q10, q5
+	 veor		q11, q11, q2
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #2
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #2
+	veor		q7, q7, q10
+	 veor		q3, q3, q11
+	vshr.u64	q10, q1, #2
+	 vshr.u64	q11, q0, #2
+	veor		q10, q10, q6
+	 veor		q11, q11, q4
+	vand		q10, q10, q9
+	 vand		q11, q11, q9
+	veor		q6, q6, q10
+	vshl.u64	q10, q10, #2
+	 veor		q4, q4, q11
+	 vshl.u64	q11, q11, #2
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vshr.u64	q10, q6, #4
+	 vshr.u64	q11, q4, #4
+	veor		q10, q10, q5
+	 veor		q11, q11, q2
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q5, q5, q10
+	vshl.u64	q10, q10, #4
+	 veor		q2, q2, q11
+	 vshl.u64	q11, q11, #4
+	veor		q6, q6, q10
+	 veor		q4, q4, q11
+	vshr.u64	q10, q1, #4
+	 vshr.u64	q11, q0, #4
+	veor		q10, q10, q7
+	 veor		q11, q11, q3
+	vand		q10, q10, q8
+	 vand		q11, q11, q8
+	veor		q7, q7, q10
+	vshl.u64	q10, q10, #4
+	 veor		q3, q3, q11
+	 vshl.u64	q11, q11, #4
+	veor		q1, q1, q10
+	 veor		q0, q0, q11
+	vldmia	r4, {q8}			@ last round key
+	veor	q4, q4, q8
+	veor	q6, q6, q8
+	veor	q3, q3, q8
+	veor	q7, q7, q8
+	veor	q2, q2, q8
+	veor	q5, q5, q8
+	veor	q0, q0, q8
+	veor	q1, q1, q8
+	bx	lr
+.size	_bsaes_encrypt8,.-_bsaes_encrypt8
+.type	_bsaes_key_convert,%function
+.align	4
+_bsaes_key_convert:
+	adr	r6,_bsaes_key_convert
+	vld1.8	{q7},  [r4]!		@ load round 0 key
+	sub	r6,r6,#_bsaes_key_convert-.LM0
+	vld1.8	{q15}, [r4]!		@ load round 1 key
+
+	vmov.i8	q8,  #0x01			@ bit masks
+	vmov.i8	q9,  #0x02
+	vmov.i8	q10, #0x04
+	vmov.i8	q11, #0x08
+	vmov.i8	q12, #0x10
+	vmov.i8	q13, #0x20
+	vldmia	r6, {q14}		@ .LM0
+
+#ifdef __ARMEL__
+	vrev32.8	q7,  q7
+	vrev32.8	q15, q15
+#endif
+	sub	r5,r5,#1
+	vstmia	r12!, {q7}		@ save round 0 key
+	b	.Lkey_loop
+
+.align	4
+.Lkey_loop:
+	vtbl.8	d14,{q15},d28
+	vtbl.8	d15,{q15},d29
+	vmov.i8	q6,  #0x40
+	vmov.i8	q15, #0x80
+
+	vtst.8	q0, q7, q8
+	vtst.8	q1, q7, q9
+	vtst.8	q2, q7, q10
+	vtst.8	q3, q7, q11
+	vtst.8	q4, q7, q12
+	vtst.8	q5, q7, q13
+	vtst.8	q6, q7, q6
+	vtst.8	q7, q7, q15
+	vld1.8	{q15}, [r4]!		@ load next round key
+	vmvn	q0, q0		@ "pnot"
+	vmvn	q1, q1
+	vmvn	q5, q5
+	vmvn	q6, q6
+#ifdef __ARMEL__
+	vrev32.8	q15, q15
+#endif
+	subs	r5,r5,#1
+	vstmia	r12!,{q0-q7}		@ write bit-sliced round key
+	bne	.Lkey_loop
+
+	vmov.i8	q7,#0x63			@ compose .L63
+	@ don't save last round key
+	bx	lr
+.size	_bsaes_key_convert,.-_bsaes_key_convert
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global	bsaes_cbc_encrypt
+.type	bsaes_cbc_encrypt,%function
+.align	5
+bsaes_cbc_encrypt:
+#ifndef	__KERNEL__
+	cmp	r2, #128
+#ifndef	__thumb__
+	blo	AES_cbc_encrypt
+#else
+	bhs	1f
+	b	AES_cbc_encrypt
+1:
+#endif
+#endif
+
+	@ it is up to the caller to make sure we are called with enc == 0
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	r8, [ip]			@ IV is 1st arg on the stack
+	mov	r2, r2, lsr#4		@ len in 16 byte blocks
+	sub	sp, #0x10			@ scratch space to carry over the IV
+	mov	r9, sp				@ save sp
+
+	ldr	r10, [r3, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, r10, lsl#7		@ 128 bytes per inner round key
+	add	r12, #96			@ sifze of bit-slices key schedule
+
+	@ populate the key schedule
+	mov	r4, r3			@ pass key
+	mov	r5, r10			@ pass # of rounds
+	mov	sp, r12				@ sp is sp
+	bl	_bsaes_key_convert
+	vldmia	sp, {q6}
+	vstmia	r12,  {q15}		@ save last round key
+	veor	q7, q7, q6	@ fix up round 0 key
+	vstmia	sp, {q7}
+#else
+	ldr	r12, [r3, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [r3, #244]
+	mov	r4, r3			@ pass key
+	mov	r5, r10			@ pass # of rounds
+	add	r12, r3, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, r3, #248
+	vldmia	r4, {q6}
+	vstmia	r12, {q15}			@ save last round key
+	veor	q7, q7, q6	@ fix up round 0 key
+	vstmia	r4, {q7}
+
+.align	2
+0:
+#endif
+
+	vld1.8	{q15}, [r8]		@ load IV
+	b	.Lcbc_dec_loop
+
+.align	4
+.Lcbc_dec_loop:
+	subs	r2, r2, #0x8
+	bmi	.Lcbc_dec_loop_finish
+
+	vld1.8	{q0-q1}, [r0]!	@ load input
+	vld1.8	{q2-q3}, [r0]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, sp			@ pass the key
+#else
+	add	r4, r3, #248
+#endif
+	vld1.8	{q4-q5}, [r0]!
+	mov	r5, r10
+	vld1.8	{q6-q7}, [r0]
+	sub	r0, r0, #0x60
+	vstmia	r9, {q15}			@ put aside IV
+
+	bl	_bsaes_decrypt8
+
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10-q11}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q12-q13}, [r0]!
+	veor	q4, q4, q10
+	veor	q2, q2, q11
+	vld1.8	{q14-q15}, [r0]!
+	veor	q7, q7, q12
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	veor	q3, q3, q13
+	vst1.8	{q6}, [r1]!
+	veor	q5, q5, q14
+	vst1.8	{q4}, [r1]!
+	vst1.8	{q2}, [r1]!
+	vst1.8	{q7}, [r1]!
+	vst1.8	{q3}, [r1]!
+	vst1.8	{q5}, [r1]!
+
+	b	.Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+	adds	r2, r2, #8
+	beq	.Lcbc_dec_done
+
+	vld1.8	{q0}, [r0]!		@ load input
+	cmp	r2, #2
+	blo	.Lcbc_dec_one
+	vld1.8	{q1}, [r0]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, sp			@ pass the key
+#else
+	add	r4, r3, #248
+#endif
+	mov	r5, r10
+	vstmia	r9, {q15}			@ put aside IV
+	beq	.Lcbc_dec_two
+	vld1.8	{q2}, [r0]!
+	cmp	r2, #4
+	blo	.Lcbc_dec_three
+	vld1.8	{q3}, [r0]!
+	beq	.Lcbc_dec_four
+	vld1.8	{q4}, [r0]!
+	cmp	r2, #6
+	blo	.Lcbc_dec_five
+	vld1.8	{q5}, [r0]!
+	beq	.Lcbc_dec_six
+	vld1.8	{q6}, [r0]!
+	sub	r0, r0, #0x70
+
+	bl	_bsaes_decrypt8
+
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10-q11}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q12-q13}, [r0]!
+	veor	q4, q4, q10
+	veor	q2, q2, q11
+	vld1.8	{q15}, [r0]!
+	veor	q7, q7, q12
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	veor	q3, q3, q13
+	vst1.8	{q6}, [r1]!
+	vst1.8	{q4}, [r1]!
+	vst1.8	{q2}, [r1]!
+	vst1.8	{q7}, [r1]!
+	vst1.8	{q3}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_six:
+	sub	r0, r0, #0x60
+	bl	_bsaes_decrypt8
+	vldmia	r9,{q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10-q11}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q12}, [r0]!
+	veor	q4, q4, q10
+	veor	q2, q2, q11
+	vld1.8	{q15}, [r0]!
+	veor	q7, q7, q12
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	vst1.8	{q6}, [r1]!
+	vst1.8	{q4}, [r1]!
+	vst1.8	{q2}, [r1]!
+	vst1.8	{q7}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_five:
+	sub	r0, r0, #0x50
+	bl	_bsaes_decrypt8
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10-q11}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q15}, [r0]!
+	veor	q4, q4, q10
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	veor	q2, q2, q11
+	vst1.8	{q6}, [r1]!
+	vst1.8	{q4}, [r1]!
+	vst1.8	{q2}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_four:
+	sub	r0, r0, #0x40
+	bl	_bsaes_decrypt8
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q10}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vld1.8	{q15}, [r0]!
+	veor	q4, q4, q10
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	vst1.8	{q6}, [r1]!
+	vst1.8	{q4}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_three:
+	sub	r0, r0, #0x30
+	bl	_bsaes_decrypt8
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8-q9}, [r0]!	@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q15}, [r0]!
+	veor	q1, q1, q8
+	veor	q6, q6, q9
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	vst1.8	{q6}, [r1]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_two:
+	sub	r0, r0, #0x20
+	bl	_bsaes_decrypt8
+	vldmia	r9, {q14}			@ reload IV
+	vld1.8	{q8}, [r0]!		@ reload input
+	veor	q0, q0, q14	@ ^= IV
+	vld1.8	{q15}, [r0]!		@ reload input
+	veor	q1, q1, q8
+	vst1.8	{q0-q1}, [r1]!	@ write output
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_one:
+	sub	r0, r0, #0x10
+	mov	r10, r1			@ save original out pointer
+	mov	r1, r9			@ use the iv scratch space as out buffer
+	mov	r2, r3
+	vmov	q4,q15		@ just in case ensure that IV
+	vmov	q5,q0			@ and input are preserved
+	bl	AES_decrypt
+	vld1.8	{q0}, [r9,:64]		@ load result
+	veor	q0, q0, q4	@ ^= IV
+	vmov	q15, q5		@ q5 holds input
+	vst1.8	{q0}, [r10]		@ write output
+
+.Lcbc_dec_done:
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+.Lcbc_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r9
+	bne		.Lcbc_dec_bzero
+#endif
+
+	mov	sp, r9
+	add	sp, #0x10			@ add sp,r9,#0x10 is no good for thumb
+	vst1.8	{q15}, [r8]		@ return IV
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}
+.size	bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+.extern	AES_encrypt
+.global	bsaes_ctr32_encrypt_blocks
+.type	bsaes_ctr32_encrypt_blocks,%function
+.align	5
+bsaes_ctr32_encrypt_blocks:
+	cmp	r2, #8			@ use plain AES for
+	blo	.Lctr_enc_short			@ small sizes
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	r8, [ip]			@ ctr is 1st arg on the stack
+	sub	sp, sp, #0x10			@ scratch space to carry over the ctr
+	mov	r9, sp				@ save sp
+
+	ldr	r10, [r3, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, r10, lsl#7		@ 128 bytes per inner round key
+	add	r12, #96			@ size of bit-sliced key schedule
+
+	@ populate the key schedule
+	mov	r4, r3			@ pass key
+	mov	r5, r10			@ pass # of rounds
+	mov	sp, r12				@ sp is sp
+	bl	_bsaes_key_convert
+	veor	q7,q7,q15	@ fix up last round key
+	vstmia	r12, {q7}			@ save last round key
+
+	vld1.8	{q0}, [r8]		@ load counter
+	add	r8, r6, #.LREVM0SR-.LM0	@ borrow r8
+	vldmia	sp, {q4}		@ load round0 key
+#else
+	ldr	r12, [r3, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [r3, #244]
+	mov	r4, r3			@ pass key
+	mov	r5, r10			@ pass # of rounds
+	add	r12, r3, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	q7,q7,q15	@ fix up last round key
+	vstmia	r12, {q7}			@ save last round key
+
+.align	2
+0:	add	r12, r3, #248
+	vld1.8	{q0}, [r8]		@ load counter
+	adrl	r8, .LREVM0SR			@ borrow r8
+	vldmia	r12, {q4}			@ load round0 key
+	sub	sp, #0x10			@ place for adjusted round0 key
+#endif
+
+	vmov.i32	q8,#1		@ compose 1<<96
+	veor		q9,q9,q9
+	vrev32.8	q0,q0
+	vext.8		q8,q9,q8,#4
+	vrev32.8	q4,q4
+	vadd.u32	q9,q8,q8	@ compose 2<<96
+	vstmia	sp, {q4}		@ save adjusted round0 key
+	b	.Lctr_enc_loop
+
+.align	4
+.Lctr_enc_loop:
+	vadd.u32	q10, q8, q9	@ compose 3<<96
+	vadd.u32	q1, q0, q8	@ +1
+	vadd.u32	q2, q0, q9	@ +2
+	vadd.u32	q3, q0, q10	@ +3
+	vadd.u32	q4, q1, q10
+	vadd.u32	q5, q2, q10
+	vadd.u32	q6, q3, q10
+	vadd.u32	q7, q4, q10
+	vadd.u32	q10, q5, q10	@ next counter
+
+	@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+	@ to flip byte order in 32-bit counter
+
+	vldmia		sp, {q9}		@ load round0 key
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x10		@ pass next round key
+#else
+	add		r4, r3, #264
+#endif
+	vldmia		r8, {q8}			@ .LREVM0SR
+	mov		r5, r10			@ pass rounds
+	vstmia		r9, {q10}			@ save next counter
+	sub		r6, r8, #.LREVM0SR-.LSR	@ pass constants
+
+	bl		_bsaes_encrypt8_alt
+
+	subs		r2, r2, #8
+	blo		.Lctr_enc_loop_done
+
+	vld1.8		{q8-q9}, [r0]!	@ load input
+	vld1.8		{q10-q11}, [r0]!
+	veor		q0, q8
+	veor		q1, q9
+	vld1.8		{q12-q13}, [r0]!
+	veor		q4, q10
+	veor		q6, q11
+	vld1.8		{q14-q15}, [r0]!
+	veor		q3, q12
+	vst1.8		{q0-q1}, [r1]!	@ write output
+	veor		q7, q13
+	veor		q2, q14
+	vst1.8		{q4}, [r1]!
+	veor		q5, q15
+	vst1.8		{q6}, [r1]!
+	vmov.i32	q8, #1			@ compose 1<<96
+	vst1.8		{q3}, [r1]!
+	veor		q9, q9, q9
+	vst1.8		{q7}, [r1]!
+	vext.8		q8, q9, q8, #4
+	vst1.8		{q2}, [r1]!
+	vadd.u32	q9,q8,q8		@ compose 2<<96
+	vst1.8		{q5}, [r1]!
+	vldmia		r9, {q0}			@ load counter
+
+	bne		.Lctr_enc_loop
+	b		.Lctr_enc_done
+
+.align	4
+.Lctr_enc_loop_done:
+	add		r2, r2, #8
+	vld1.8		{q8}, [r0]!	@ load input
+	veor		q0, q8
+	vst1.8		{q0}, [r1]!	@ write output
+	cmp		r2, #2
+	blo		.Lctr_enc_done
+	vld1.8		{q9}, [r0]!
+	veor		q1, q9
+	vst1.8		{q1}, [r1]!
+	beq		.Lctr_enc_done
+	vld1.8		{q10}, [r0]!
+	veor		q4, q10
+	vst1.8		{q4}, [r1]!
+	cmp		r2, #4
+	blo		.Lctr_enc_done
+	vld1.8		{q11}, [r0]!
+	veor		q6, q11
+	vst1.8		{q6}, [r1]!
+	beq		.Lctr_enc_done
+	vld1.8		{q12}, [r0]!
+	veor		q3, q12
+	vst1.8		{q3}, [r1]!
+	cmp		r2, #6
+	blo		.Lctr_enc_done
+	vld1.8		{q13}, [r0]!
+	veor		q7, q13
+	vst1.8		{q7}, [r1]!
+	beq		.Lctr_enc_done
+	vld1.8		{q14}, [r0]
+	veor		q2, q14
+	vst1.8		{q2}, [r1]!
+
+.Lctr_enc_done:
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifndef	BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:			@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r9
+	bne		.Lctr_enc_bzero
+#else
+	vstmia		sp, {q0-q1}
+#endif
+
+	mov	sp, r9
+	add	sp, #0x10		@ add sp,r9,#0x10 is no good for thumb
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}	@ return
+
+.align	4
+.Lctr_enc_short:
+	ldr	ip, [sp]		@ ctr pointer is passed on stack
+	stmdb	sp!, {r4-r8, lr}
+
+	mov	r4, r0		@ copy arguments
+	mov	r5, r1
+	mov	r6, r2
+	mov	r7, r3
+	ldr	r8, [ip, #12]		@ load counter LSW
+	vld1.8	{q1}, [ip]		@ load whole counter value
+#ifdef __ARMEL__
+	rev	r8, r8
+#endif
+	sub	sp, sp, #0x10
+	vst1.8	{q1}, [sp,:64]	@ copy counter value
+	sub	sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+	add	r0, sp, #0x10		@ input counter value
+	mov	r1, sp			@ output on the stack
+	mov	r2, r7			@ key
+
+	bl	AES_encrypt
+
+	vld1.8	{q0}, [r4]!	@ load input
+	vld1.8	{q1}, [sp,:64]	@ load encrypted counter
+	add	r8, r8, #1
+#ifdef __ARMEL__
+	rev	r0, r8
+	str	r0, [sp, #0x1c]		@ next counter value
+#else
+	str	r8, [sp, #0x1c]		@ next counter value
+#endif
+	veor	q0,q0,q1
+	vst1.8	{q0}, [r5]!	@ store output
+	subs	r6, r6, #1
+	bne	.Lctr_enc_short_loop
+
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+	vstmia		sp!, {q0-q1}
+
+	ldmia	sp!, {r4-r8, pc}
+.size	bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+.globl	bsaes_xts_encrypt
+.type	bsaes_xts_encrypt,%function
+.align	4
+bsaes_xts_encrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future r3
+
+	mov	r7, r0
+	mov	r8, r1
+	mov	r9, r2
+	mov	r10, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0,sp				@ pointer to initial tweak
+#endif
+
+	ldr	r1, [r10, #240]		@ get # of rounds
+	mov	r3, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, r1, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #96			@ size of bit-sliced key schedule
+	sub	r12, #48			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, r10			@ pass key
+	mov	r5, r1			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	q7, q7, q15	@ fix up last round key
+	vstmia	r12, {q7}			@ save last round key
+#else
+	ldr	r12, [r10, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [r10, #244]
+	mov	r4, r10			@ pass key
+	mov	r5, r1			@ pass # of rounds
+	add	r12, r10, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	q7, q7, q15	@ fix up last round key
+	vstmia	r12, {q7}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+
+	vld1.8	{q8}, [r0]			@ initial tweak
+	adr	r2, .Lxts_magic
+
+	subs	r9, #0x80
+	blo	.Lxts_enc_short
+	b	.Lxts_enc_loop
+
+.align	4
+.Lxts_enc_loop:
+	vldmia		r2, {q5}	@ load XTS magic
+	vshr.s64	q6, q8, #63
+	mov		r0, sp
+	vand		q6, q6, q5
+	vadd.u64	q9, q8, q8
+	vst1.64		{q8}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q9, #63
+	veor		q9, q9, q6
+	vand		q7, q7, q5
+	vadd.u64	q10, q9, q9
+	vst1.64		{q9}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q10, #63
+	veor		q10, q10, q7
+	vand		q6, q6, q5
+	vld1.8		{q0}, [r7]!
+	vadd.u64	q11, q10, q10
+	vst1.64		{q10}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q11, #63
+	veor		q11, q11, q6
+	vand		q7, q7, q5
+	vld1.8		{q1}, [r7]!
+	veor		q0, q0, q8
+	vadd.u64	q12, q11, q11
+	vst1.64		{q11}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q12, #63
+	veor		q12, q12, q7
+	vand		q6, q6, q5
+	vld1.8		{q2}, [r7]!
+	veor		q1, q1, q9
+	vadd.u64	q13, q12, q12
+	vst1.64		{q12}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q13, #63
+	veor		q13, q13, q6
+	vand		q7, q7, q5
+	vld1.8		{q3}, [r7]!
+	veor		q2, q2, q10
+	vadd.u64	q14, q13, q13
+	vst1.64		{q13}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q14, #63
+	veor		q14, q14, q7
+	vand		q6, q6, q5
+	vld1.8		{q4}, [r7]!
+	veor		q3, q3, q11
+	vadd.u64	q15, q14, q14
+	vst1.64		{q14}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q15, #63
+	veor		q15, q15, q6
+	vand		q7, q7, q5
+	vld1.8		{q5}, [r7]!
+	veor		q4, q4, q12
+	vadd.u64	q8, q15, q15
+	vst1.64		{q15}, [r0,:128]!
+	vswp		d15,d14
+	veor		q8, q8, q7
+	vst1.64		{q8}, [r0,:128]		@ next round tweak
+
+	vld1.8		{q6-q7}, [r7]!
+	veor		q5, q5, q13
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q6, q6, q14
+	mov		r5, r1			@ pass rounds
+	veor		q7, q7, q15
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	vld1.64		{q14-q15}, [r0,:128]!
+	veor		q10, q3, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	veor		q12, q2, q14
+	vst1.8		{q10-q11}, [r8]!
+	veor		q13, q5, q15
+	vst1.8		{q12-q13}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+
+	subs		r9, #0x80
+	bpl		.Lxts_enc_loop
+
+.Lxts_enc_short:
+	adds		r9, #0x70
+	bmi		.Lxts_enc_done
+
+	vldmia		r2, {q5}	@ load XTS magic
+	vshr.s64	q7, q8, #63
+	mov		r0, sp
+	vand		q7, q7, q5
+	vadd.u64	q9, q8, q8
+	vst1.64		{q8}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q9, #63
+	veor		q9, q9, q7
+	vand		q6, q6, q5
+	vadd.u64	q10, q9, q9
+	vst1.64		{q9}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q10, #63
+	veor		q10, q10, q6
+	vand		q7, q7, q5
+	vld1.8		{q0}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_1
+	vadd.u64	q11, q10, q10
+	vst1.64		{q10}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q11, #63
+	veor		q11, q11, q7
+	vand		q6, q6, q5
+	vld1.8		{q1}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_2
+	veor		q0, q0, q8
+	vadd.u64	q12, q11, q11
+	vst1.64		{q11}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q12, #63
+	veor		q12, q12, q6
+	vand		q7, q7, q5
+	vld1.8		{q2}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_3
+	veor		q1, q1, q9
+	vadd.u64	q13, q12, q12
+	vst1.64		{q12}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q13, #63
+	veor		q13, q13, q7
+	vand		q6, q6, q5
+	vld1.8		{q3}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_4
+	veor		q2, q2, q10
+	vadd.u64	q14, q13, q13
+	vst1.64		{q13}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q14, #63
+	veor		q14, q14, q6
+	vand		q7, q7, q5
+	vld1.8		{q4}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_5
+	veor		q3, q3, q11
+	vadd.u64	q15, q14, q14
+	vst1.64		{q14}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q15, #63
+	veor		q15, q15, q7
+	vand		q6, q6, q5
+	vld1.8		{q5}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_enc_6
+	veor		q4, q4, q12
+	sub		r9, #0x10
+	vst1.64		{q15}, [r0,:128]		@ next round tweak
+
+	vld1.8		{q6}, [r7]!
+	veor		q5, q5, q13
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q6, q6, q14
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	vld1.64		{q14}, [r0,:128]!
+	veor		q10, q3, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	veor		q12, q2, q14
+	vst1.8		{q10-q11}, [r8]!
+	vst1.8		{q12}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_6:
+	vst1.64		{q14}, [r0,:128]		@ next round tweak
+
+	veor		q4, q4, q12
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q5, q5, q13
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	veor		q10, q3, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	vst1.8		{q10-q11}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align	5
+.Lxts_magic:
+	.quad	1, 0x87
+
+.align	5
+.Lxts_enc_5:
+	vst1.64		{q13}, [r0,:128]		@ next round tweak
+
+	veor		q3, q3, q11
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q4, q4, q12
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	veor		q10, q3, q12
+	vst1.8		{q8-q9}, [r8]!
+	vst1.8		{q10}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_4:
+	vst1.64		{q12}, [r0,:128]		@ next round tweak
+
+	veor		q2, q2, q10
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q3, q3, q11
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q6, q11
+	vst1.8		{q8-q9}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_3:
+	vst1.64		{q11}, [r0,:128]		@ next round tweak
+
+	veor		q1, q1, q9
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q2, q2, q10
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	veor		q8, q4, q10
+	vst1.8		{q0-q1}, [r8]!
+	vst1.8		{q8}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_2:
+	vst1.64		{q10}, [r0,:128]		@ next round tweak
+
+	veor		q0, q0, q8
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q1, q1, q9
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	vst1.8		{q0-q1}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_1:
+	mov		r0, sp
+	veor		q0, q8
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+	mov		r4, r3				@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q8
+	vst1.8		{q0}, [r8]!
+	mov		r3, r4
+
+	vmov		q8, q9		@ next round tweak
+
+.Lxts_enc_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		r9, #0x10
+	beq		.Lxts_enc_ret
+	sub		r6, r8, #0x10
+
+.Lxts_enc_steal:
+	ldrb		r0, [r7], #1
+	ldrb		r1, [r8, #-0x10]
+	strb		r0, [r8, #-0x10]
+	strb		r1, [r8], #1
+
+	subs		r9, #1
+	bhi		.Lxts_enc_steal
+
+	vld1.8		{q0}, [r6]
+	mov		r0, sp
+	veor		q0, q0, q8
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+	mov		r4, r3			@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q8
+	vst1.8		{q0}, [r6]
+	mov		r3, r4
+#endif
+
+.Lxts_enc_ret:
+	bic		r0, r3, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [r3, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_enc_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_enc_bzero
+
+	mov		sp, r3
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{q8}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl	bsaes_xts_decrypt
+.type	bsaes_xts_decrypt,%function
+.align	4
+bsaes_xts_decrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future r3
+
+	mov	r7, r0
+	mov	r8, r1
+	mov	r9, r2
+	mov	r10, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0, sp				@ pointer to initial tweak
+#endif
+
+	ldr	r1, [r10, #240]		@ get # of rounds
+	mov	r3, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, r1, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #96			@ size of bit-sliced key schedule
+	sub	r12, #48			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, r10			@ pass key
+	mov	r5, r1			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, sp, #0x90
+	vldmia	r4, {q6}
+	vstmia	r12,  {q15}		@ save last round key
+	veor	q7, q7, q6	@ fix up round 0 key
+	vstmia	r4, {q7}
+#else
+	ldr	r12, [r10, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [r10, #244]
+	mov	r4, r10			@ pass key
+	mov	r5, r1			@ pass # of rounds
+	add	r12, r10, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, r10, #248
+	vldmia	r4, {q6}
+	vstmia	r12,  {q15}		@ save last round key
+	veor	q7, q7, q6	@ fix up round 0 key
+	vstmia	r4, {q7}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+	vld1.8	{q8}, [r0]			@ initial tweak
+	adr	r2, .Lxts_magic
+
+	tst	r9, #0xf			@ if not multiple of 16
+	it	ne				@ Thumb2 thing, sanity check in ARM
+	subne	r9, #0x10			@ subtract another 16 bytes
+	subs	r9, #0x80
+
+	blo	.Lxts_dec_short
+	b	.Lxts_dec_loop
+
+.align	4
+.Lxts_dec_loop:
+	vldmia		r2, {q5}	@ load XTS magic
+	vshr.s64	q6, q8, #63
+	mov		r0, sp
+	vand		q6, q6, q5
+	vadd.u64	q9, q8, q8
+	vst1.64		{q8}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q9, #63
+	veor		q9, q9, q6
+	vand		q7, q7, q5
+	vadd.u64	q10, q9, q9
+	vst1.64		{q9}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q10, #63
+	veor		q10, q10, q7
+	vand		q6, q6, q5
+	vld1.8		{q0}, [r7]!
+	vadd.u64	q11, q10, q10
+	vst1.64		{q10}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q11, #63
+	veor		q11, q11, q6
+	vand		q7, q7, q5
+	vld1.8		{q1}, [r7]!
+	veor		q0, q0, q8
+	vadd.u64	q12, q11, q11
+	vst1.64		{q11}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q12, #63
+	veor		q12, q12, q7
+	vand		q6, q6, q5
+	vld1.8		{q2}, [r7]!
+	veor		q1, q1, q9
+	vadd.u64	q13, q12, q12
+	vst1.64		{q12}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q13, #63
+	veor		q13, q13, q6
+	vand		q7, q7, q5
+	vld1.8		{q3}, [r7]!
+	veor		q2, q2, q10
+	vadd.u64	q14, q13, q13
+	vst1.64		{q13}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q14, #63
+	veor		q14, q14, q7
+	vand		q6, q6, q5
+	vld1.8		{q4}, [r7]!
+	veor		q3, q3, q11
+	vadd.u64	q15, q14, q14
+	vst1.64		{q14}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q15, #63
+	veor		q15, q15, q6
+	vand		q7, q7, q5
+	vld1.8		{q5}, [r7]!
+	veor		q4, q4, q12
+	vadd.u64	q8, q15, q15
+	vst1.64		{q15}, [r0,:128]!
+	vswp		d15,d14
+	veor		q8, q8, q7
+	vst1.64		{q8}, [r0,:128]		@ next round tweak
+
+	vld1.8		{q6-q7}, [r7]!
+	veor		q5, q5, q13
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q6, q6, q14
+	mov		r5, r1			@ pass rounds
+	veor		q7, q7, q15
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	vld1.64		{q14-q15}, [r0,:128]!
+	veor		q10, q2, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	veor		q12, q3, q14
+	vst1.8		{q10-q11}, [r8]!
+	veor		q13, q5, q15
+	vst1.8		{q12-q13}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+
+	subs		r9, #0x80
+	bpl		.Lxts_dec_loop
+
+.Lxts_dec_short:
+	adds		r9, #0x70
+	bmi		.Lxts_dec_done
+
+	vldmia		r2, {q5}	@ load XTS magic
+	vshr.s64	q7, q8, #63
+	mov		r0, sp
+	vand		q7, q7, q5
+	vadd.u64	q9, q8, q8
+	vst1.64		{q8}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q9, #63
+	veor		q9, q9, q7
+	vand		q6, q6, q5
+	vadd.u64	q10, q9, q9
+	vst1.64		{q9}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q10, #63
+	veor		q10, q10, q6
+	vand		q7, q7, q5
+	vld1.8		{q0}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_1
+	vadd.u64	q11, q10, q10
+	vst1.64		{q10}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q11, #63
+	veor		q11, q11, q7
+	vand		q6, q6, q5
+	vld1.8		{q1}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_2
+	veor		q0, q0, q8
+	vadd.u64	q12, q11, q11
+	vst1.64		{q11}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q12, #63
+	veor		q12, q12, q6
+	vand		q7, q7, q5
+	vld1.8		{q2}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_3
+	veor		q1, q1, q9
+	vadd.u64	q13, q12, q12
+	vst1.64		{q12}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q13, #63
+	veor		q13, q13, q7
+	vand		q6, q6, q5
+	vld1.8		{q3}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_4
+	veor		q2, q2, q10
+	vadd.u64	q14, q13, q13
+	vst1.64		{q13}, [r0,:128]!
+	vswp		d13,d12
+	vshr.s64	q7, q14, #63
+	veor		q14, q14, q6
+	vand		q7, q7, q5
+	vld1.8		{q4}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_5
+	veor		q3, q3, q11
+	vadd.u64	q15, q14, q14
+	vst1.64		{q14}, [r0,:128]!
+	vswp		d15,d14
+	vshr.s64	q6, q15, #63
+	veor		q15, q15, q7
+	vand		q6, q6, q5
+	vld1.8		{q5}, [r7]!
+	subs		r9, #0x10
+	bmi		.Lxts_dec_6
+	veor		q4, q4, q12
+	sub		r9, #0x10
+	vst1.64		{q15}, [r0,:128]		@ next round tweak
+
+	vld1.8		{q6}, [r7]!
+	veor		q5, q5, q13
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q6, q6, q14
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	vld1.64		{q14}, [r0,:128]!
+	veor		q10, q2, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	veor		q12, q3, q14
+	vst1.8		{q10-q11}, [r8]!
+	vst1.8		{q12}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_6:
+	vst1.64		{q14}, [r0,:128]		@ next round tweak
+
+	veor		q4, q4, q12
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q5, q5, q13
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12-q13}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	veor		q10, q2, q12
+	vst1.8		{q8-q9}, [r8]!
+	veor		q11, q7, q13
+	vst1.8		{q10-q11}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_5:
+	vst1.64		{q13}, [r0,:128]		@ next round tweak
+
+	veor		q3, q3, q11
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q4, q4, q12
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	vld1.64		{q12}, [r0,:128]!
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	veor		q10, q2, q12
+	vst1.8		{q8-q9}, [r8]!
+	vst1.8		{q10}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_4:
+	vst1.64		{q12}, [r0,:128]		@ next round tweak
+
+	veor		q2, q2, q10
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q3, q3, q11
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10-q11}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	veor		q9, q4, q11
+	vst1.8		{q8-q9}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_3:
+	vst1.64		{q11}, [r0,:128]		@ next round tweak
+
+	veor		q1, q1, q9
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q2, q2, q10
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	vld1.64		{q10}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	veor		q8, q6, q10
+	vst1.8		{q0-q1}, [r8]!
+	vst1.8		{q8}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_2:
+	vst1.64		{q10}, [r0,:128]		@ next round tweak
+
+	veor		q0, q0, q8
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, r10, #248			@ pass key schedule
+#endif
+	veor		q1, q1, q9
+	mov		r5, r1			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{q8-q9}, [r0,:128]!
+	veor		q0, q0, q8
+	veor		q1, q1, q9
+	vst1.8		{q0-q1}, [r8]!
+
+	vld1.64		{q8}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_1:
+	mov		r0, sp
+	veor		q0, q8
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+	mov		r4, r3				@ preserve fp
+	mov		r5, r2			@ preserve magic
+
+	bl		AES_decrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q8
+	vst1.8		{q0}, [r8]!
+	mov		r3, r4
+	mov		r2, r5
+
+	vmov		q8, q9		@ next round tweak
+
+.Lxts_dec_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		r9, #0x10
+	beq		.Lxts_dec_ret
+
+	@ calculate one round of extra tweak for the stolen ciphertext
+	vldmia		r2, {q5}
+	vshr.s64	q6, q8, #63
+	vand		q6, q6, q5
+	vadd.u64	q9, q8, q8
+	vswp		d13,d12
+	veor		q9, q9, q6
+
+	@ perform the final decryption with the last tweak value
+	vld1.8		{q0}, [r7]!
+	mov		r0, sp
+	veor		q0, q0, q9
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+	mov		r4, r3			@ preserve fp
+
+	bl		AES_decrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q9
+	vst1.8		{q0}, [r8]
+
+	mov		r6, r8
+.Lxts_dec_steal:
+	ldrb		r1, [r8]
+	ldrb		r0, [r7], #1
+	strb		r1, [r8, #0x10]
+	strb		r0, [r8], #1
+
+	subs		r9, #1
+	bhi		.Lxts_dec_steal
+
+	vld1.8		{q0}, [r6]
+	mov		r0, sp
+	veor		q0, q8
+	mov		r1, sp
+	vst1.8		{q0}, [sp,:128]
+	mov		r2, r10
+
+	bl		AES_decrypt
+
+	vld1.8		{q0}, [sp,:128]
+	veor		q0, q0, q8
+	vst1.8		{q0}, [r6]
+	mov		r3, r4
+#endif
+
+.Lxts_dec_ret:
+	bic		r0, r3, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [r3, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_dec_bzero
+
+	mov		sp, r3
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{q8}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_decrypt,.-bsaes_xts_decrypt
+#endif
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
new file mode 100644
index 0000000..15468fb
--- /dev/null
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -0,0 +1,434 @@
+/*
+ * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+#include "aes_glue.h"
+
+#define BIT_SLICED_KEY_MAXSIZE	(128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
+
+struct BS_KEY {
+	struct AES_KEY	rk;
+	int		converted;
+	u8 __aligned(8)	bs[BIT_SLICED_KEY_MAXSIZE];
+} __aligned(8);
+
+asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
+asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
+
+asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
+				  struct BS_KEY *key, u8 iv[]);
+
+asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
+					   struct BS_KEY *key, u8 const iv[]);
+
+asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
+				  struct BS_KEY *key, u8 tweak[]);
+
+asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
+				  struct BS_KEY *key, u8 tweak[]);
+
+struct aesbs_cbc_ctx {
+	struct AES_KEY	enc;
+	struct BS_KEY	dec;
+};
+
+struct aesbs_ctr_ctx {
+	struct BS_KEY	enc;
+};
+
+struct aesbs_xts_ctx {
+	struct BS_KEY	enc;
+	struct BS_KEY	dec;
+	struct AES_KEY	twkey;
+};
+
+static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+			     unsigned int key_len)
+{
+	struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	int bits = key_len * 8;
+
+	if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+	ctx->dec.rk = ctx->enc;
+	private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+	ctx->dec.converted = 0;
+	return 0;
+}
+
+static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+			     unsigned int key_len)
+{
+	struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+	int bits = key_len * 8;
+
+	if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+	ctx->enc.converted = 0;
+	return 0;
+}
+
+static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+			     unsigned int key_len)
+{
+	struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+	int bits = key_len * 4;
+
+	if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+	ctx->dec.rk = ctx->enc.rk;
+	private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+	private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
+	ctx->enc.converted = ctx->dec.converted = 0;
+	return 0;
+}
+
+static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst,
+			     struct scatterlist *src, unsigned int nbytes)
+{
+	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while (walk.nbytes) {
+		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+		u8 *src = walk.src.virt.addr;
+
+		if (walk.dst.virt.addr == walk.src.virt.addr) {
+			u8 *iv = walk.iv;
+
+			do {
+				crypto_xor(src, iv, AES_BLOCK_SIZE);
+				AES_encrypt(src, src, &ctx->enc);
+				iv = src;
+				src += AES_BLOCK_SIZE;
+			} while (--blocks);
+			memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+		} else {
+			u8 *dst = walk.dst.virt.addr;
+
+			do {
+				crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
+				AES_encrypt(walk.iv, dst, &ctx->enc);
+				memcpy(walk.iv, dst, AES_BLOCK_SIZE);
+				src += AES_BLOCK_SIZE;
+				dst += AES_BLOCK_SIZE;
+			} while (--blocks);
+		}
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	return err;
+}
+
+static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst,
+			     struct scatterlist *src, unsigned int nbytes)
+{
+	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+	while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
+		kernel_neon_begin();
+		bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+				  walk.nbytes, &ctx->dec, walk.iv);
+		kernel_neon_end();
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	while (walk.nbytes) {
+		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+		u8 *dst = walk.dst.virt.addr;
+		u8 *src = walk.src.virt.addr;
+		u8 bk[2][AES_BLOCK_SIZE];
+		u8 *iv = walk.iv;
+
+		do {
+			if (walk.dst.virt.addr == walk.src.virt.addr)
+				memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
+
+			AES_decrypt(src, dst, &ctx->dec.rk);
+			crypto_xor(dst, iv, AES_BLOCK_SIZE);
+
+			if (walk.dst.virt.addr == walk.src.virt.addr)
+				iv = bk[blocks & 1];
+			else
+				iv = src;
+
+			dst += AES_BLOCK_SIZE;
+			src += AES_BLOCK_SIZE;
+		} while (--blocks);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	return err;
+}
+
+static void inc_be128_ctr(__be32 ctr[], u32 addend)
+{
+	int i;
+
+	for (i = 3; i >= 0; i--, addend = 1) {
+		u32 n = be32_to_cpu(ctr[i]) + addend;
+
+		ctr[i] = cpu_to_be32(n);
+		if (n >= addend)
+			break;
+	}
+}
+
+static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst, struct scatterlist *src,
+			     unsigned int nbytes)
+{
+	struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	u32 blocks;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+	while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
+		u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+		__be32 *ctr = (__be32 *)walk.iv;
+		u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
+
+		/* avoid 32 bit counter overflow in the NEON code */
+		if (unlikely(headroom < blocks)) {
+			blocks = headroom + 1;
+			tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
+		}
+		kernel_neon_begin();
+		bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
+					   walk.dst.virt.addr, blocks,
+					   &ctx->enc, walk.iv);
+		kernel_neon_end();
+		inc_be128_ctr(ctr, blocks);
+
+		nbytes -= blocks * AES_BLOCK_SIZE;
+		if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
+			break;
+
+		err = blkcipher_walk_done(desc, &walk, tail);
+	}
+	if (walk.nbytes) {
+		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+		u8 ks[AES_BLOCK_SIZE];
+
+		AES_encrypt(walk.iv, ks, &ctx->enc.rk);
+		if (tdst != tsrc)
+			memcpy(tdst, tsrc, nbytes);
+		crypto_xor(tdst, ks, nbytes);
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+	return err;
+}
+
+static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst,
+			     struct scatterlist *src, unsigned int nbytes)
+{
+	struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+	/* generate the initial tweak */
+	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+	while (walk.nbytes) {
+		kernel_neon_begin();
+		bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+				  walk.nbytes, &ctx->enc, walk.iv);
+		kernel_neon_end();
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	return err;
+}
+
+static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
+			     struct scatterlist *dst,
+			     struct scatterlist *src, unsigned int nbytes)
+{
+	struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+	/* generate the initial tweak */
+	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+	while (walk.nbytes) {
+		kernel_neon_begin();
+		bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+				  walk.nbytes, &ctx->dec, walk.iv);
+		kernel_neon_end();
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	return err;
+}
+
+static struct crypto_alg aesbs_algs[] = { {
+	.cra_name		= "__cbc-aes-neonbs",
+	.cra_driver_name	= "__driver-cbc-aes-neonbs",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct aesbs_cbc_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= aesbs_cbc_set_key,
+		.encrypt	= aesbs_cbc_encrypt,
+		.decrypt	= aesbs_cbc_decrypt,
+	},
+}, {
+	.cra_name		= "__ctr-aes-neonbs",
+	.cra_driver_name	= "__driver-ctr-aes-neonbs",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct aesbs_ctr_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= aesbs_ctr_set_key,
+		.encrypt	= aesbs_ctr_encrypt,
+		.decrypt	= aesbs_ctr_encrypt,
+	},
+}, {
+	.cra_name		= "__xts-aes-neonbs",
+	.cra_driver_name	= "__driver-xts-aes-neonbs",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct aesbs_xts_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= aesbs_xts_set_key,
+		.encrypt	= aesbs_xts_encrypt,
+		.decrypt	= aesbs_xts_decrypt,
+	},
+}, {
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "cbc-aes-neonbs",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= __ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+}, {
+	.cra_name		= "ctr(aes)",
+	.cra_driver_name	= "ctr-aes-neonbs",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+}, {
+	.cra_name		= "xts(aes)",
+	.cra_driver_name	= "xts-aes-neonbs",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+} };
+
+static int __init aesbs_mod_init(void)
+{
+	if (!cpu_has_neon())
+		return -ENODEV;
+
+	return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+static void __exit aesbs_mod_exit(void)
+{
+	crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+module_init(aesbs_mod_init);
+module_exit(aesbs_mod_exit);
+
+MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
new file mode 100644
index 0000000..be068db
--- /dev/null
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -0,0 +1,2467 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+# granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt	19.5 cycles per byte processed with 128-bit key
+# decrypt	22.1 cycles per byte processed with 128-bit key
+# key conv.	440  cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+#						<appro@openssl.org>
+
+# April-August 2013
+#
+# Add CBC, CTR and XTS subroutines, adapt for kernel use.
+#
+#					<ard.biesheuvel@linaro.org>
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub Sbox {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InBasisChange	(@b);
+	&Inv_GF256	(@b[6,5,0,3,7,1,4,2],@t,@s);
+	&OutBasisChange	(@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb 
+my @b=@_[0..7];
+$code.=<<___;
+	veor	@b[2], @b[2], @b[1]
+	veor	@b[5], @b[5], @b[6]
+	veor	@b[3], @b[3], @b[0]
+	veor	@b[6], @b[6], @b[2]
+	veor	@b[5], @b[5], @b[0]
+
+	veor	@b[6], @b[6], @b[3]
+	veor	@b[3], @b[3], @b[7]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[3], @b[3], @b[4]
+	veor	@b[4], @b[4], @b[5]
+
+	veor	@b[2], @b[2], @b[7]
+	veor	@b[3], @b[3], @b[1]
+	veor	@b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+	veor	@b[0], @b[0], @b[6]
+	veor	@b[1], @b[1], @b[4]
+	veor	@b[4], @b[4], @b[6]
+	veor	@b[2], @b[2], @b[0]
+	veor	@b[6], @b[6], @b[1]
+
+	veor	@b[1], @b[1], @b[5]
+	veor	@b[5], @b[5], @b[3]
+	veor	@b[3], @b[3], @b[7]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[2], @b[2], @b[5]
+
+	veor	@b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb 	> [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb	> [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InvInBasisChange	(@b);
+	&Inv_GF256		(@b[5,1,2,6,3,7,0,4],@t,@s);
+	&InvOutBasisChange	(@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange {		# OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+	 veor	@b[1], @b[1], @b[7]
+	veor	@b[4], @b[4], @b[7]
+
+	veor	@b[7], @b[7], @b[5]
+	 veor	@b[1], @b[1], @b[3]
+	veor	@b[2], @b[2], @b[5]
+	veor	@b[3], @b[3], @b[7]
+
+	veor	@b[6], @b[6], @b[1]
+	veor	@b[2], @b[2], @b[0]
+	 veor	@b[5], @b[5], @b[3]
+	veor	@b[4], @b[4], @b[6]
+	veor	@b[0], @b[0], @b[6]
+	veor	@b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange {		# InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+	veor	@b[1], @b[1], @b[5]
+	veor	@b[2], @b[2], @b[7]
+
+	veor	@b[3], @b[3], @b[1]
+	veor	@b[4], @b[4], @b[5]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[3], @b[3], @b[4]
+	 veor 	@b[5], @b[5], @b[0]
+	veor	@b[3], @b[3], @b[7]
+	 veor	@b[6], @b[6], @b[2]
+	 veor	@b[2], @b[2], @b[1]
+	veor	@b[6], @b[6], @b[3]
+
+	veor	@b[3], @b[3], @b[0]
+	veor	@b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+	veor 	$t0, $y0, $y1
+	vand	$t0, $t0, $x0
+	veor	$x0, $x0, $x1
+	vand	$t1, $x1, $y0
+	vand	$x0, $x0, $y1
+	veor	$x1, $t1, $t0
+	veor	$x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N {				# not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+	veor	$t0, $y0, $y1
+	vand	$t0, $t0, $x0
+	veor	$x0, $x0, $x1
+	vand	$x1, $x1, $y0
+	vand	$x0, $x0, $y1
+	veor	$x1, $x1, $x0
+	veor	$x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+    $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+	veor	$t0, $y0, $y1
+	 veor 	$t1, $y2, $y3
+	vand	$t0, $t0, $x0
+	 vand	$t1, $t1, $x2
+	veor	$x0, $x0, $x1
+	 veor	$x2, $x2, $x3
+	vand	$x1, $x1, $y0
+	 vand	$x3, $x3, $y2
+	vand	$x0, $x0, $y1
+	 vand	$x2, $x2, $y3
+	veor	$x1, $x1, $x0
+	 veor	$x2, $x2, $x3
+	veor	$x0, $x0, $t0
+	 veor	$x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+	veor	@t[0], @x[0], @x[2]
+	veor	@t[1], @x[1], @x[3]
+___
+	&Mul_GF4  	(@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+	veor	@y[0], @y[0], @y[2]
+	veor	@y[1], @y[1], @y[3]
+___
+	Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	veor	@x[0], @x[0], @t[0]
+	veor	@x[2], @x[2], @t[0]
+	veor	@x[1], @x[1], @t[1]
+	veor	@x[3], @x[3], @t[1]
+
+	veor	@t[0], @x[4], @x[6]
+	veor	@t[1], @x[5], @x[7]
+___
+	&Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	veor	@y[0], @y[0], @y[2]
+	veor	@y[1], @y[1], @y[3]
+___
+	&Mul_GF4  	(@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+	veor	@x[4], @x[4], @t[0]
+	veor	@x[6], @x[6], @t[0]
+	veor	@x[5], @x[5], @t[1]
+	veor	@x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+	veor	@t[3], @x[4], @x[6]
+	veor	@t[2], @x[5], @x[7]
+	veor	@t[1], @x[1], @x[3]
+	veor	@s[1], @x[7], @x[6]
+	 vmov	@t[0], @t[2]
+	veor	@s[0], @x[0], @x[2]
+
+	vorr	@t[2], @t[2], @t[1]
+	veor	@s[3], @t[3], @t[0]
+	vand	@s[2], @t[3], @s[0]
+	vorr	@t[3], @t[3], @s[0]
+	veor	@s[0], @s[0], @t[1]
+	vand	@t[0], @t[0], @t[1]
+	veor	@t[1], @x[3], @x[2]
+	vand	@s[3], @s[3], @s[0]
+	vand	@s[1], @s[1], @t[1]
+	veor	@t[1], @x[4], @x[5]
+	veor	@s[0], @x[1], @x[0]
+	veor	@t[3], @t[3], @s[1]
+	veor	@t[2], @t[2], @s[1]
+	vand	@s[1], @t[1], @s[0]
+	vorr	@t[1], @t[1], @s[0]
+	veor	@t[3], @t[3], @s[3]
+	veor	@t[0], @t[0], @s[1]
+	veor	@t[2], @t[2], @s[2]
+	veor	@t[1], @t[1], @s[3]
+	veor	@t[0], @t[0], @s[2]
+	vand	@s[0], @x[7], @x[3]
+	veor	@t[1], @t[1], @s[2]
+	vand	@s[1], @x[6], @x[2]
+	vand	@s[2], @x[5], @x[1]
+	vorr	@s[3], @x[4], @x[0]
+	veor	@t[3], @t[3], @s[0]
+	veor	@t[1], @t[1], @s[2]
+	veor	@t[0], @t[0], @s[3]
+	veor	@t[2], @t[2], @s[1]
+
+	@ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+	@ new smaller inversion
+
+	vand	@s[2], @t[3], @t[1]
+	vmov	@s[0], @t[0]
+
+	veor	@s[1], @t[2], @s[2]
+	veor	@s[3], @t[0], @s[2]
+	veor	@s[2], @t[0], @s[2]	@ @s[2]=@s[3]
+
+	vbsl	@s[1], @t[1], @t[0]
+	vbsl	@s[3], @t[3], @t[2]
+	veor	@t[3], @t[3], @t[2]
+
+	vbsl	@s[0], @s[1], @s[2]
+	vbsl	@t[0], @s[2], @s[1]
+
+	vand	@s[2], @s[0], @s[3]
+	veor	@t[1], @t[1], @t[0]
+
+	veor	@s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+	&Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+	vldmia	$key!, {@t[0]-@t[3]}
+	veor	@t[0], @t[0], @x[0]
+	veor	@t[1], @t[1], @x[1]
+	vtbl.8	`&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[0]}
+	veor	@t[2], @t[2], @x[2]
+	vtbl.8	`&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[1]}
+	veor	@t[3], @t[3], @x[3]
+	vtbl.8	`&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[2]}
+	vtbl.8	`&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[3]}
+	veor	@t[0], @t[0], @x[4]
+	veor	@t[1], @t[1], @x[5]
+	vtbl.8	`&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+	veor	@t[2], @t[2], @x[6]
+	vtbl.8	`&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+	veor	@t[3], @t[3], @x[7]
+	vtbl.8	`&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+	vtbl.8	`&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16];	# optional
+$code.=<<___;
+	vext.8	@t[0], @x[0], @x[0], #12	@ x0 <<< 32
+	vext.8	@t[1], @x[1], @x[1], #12
+	 veor	@x[0], @x[0], @t[0]		@ x0 ^ (x0 <<< 32)
+	vext.8	@t[2], @x[2], @x[2], #12
+	 veor	@x[1], @x[1], @t[1]
+	vext.8	@t[3], @x[3], @x[3], #12
+	 veor	@x[2], @x[2], @t[2]
+	vext.8	@t[4], @x[4], @x[4], #12
+	 veor	@x[3], @x[3], @t[3]
+	vext.8	@t[5], @x[5], @x[5], #12
+	 veor	@x[4], @x[4], @t[4]
+	vext.8	@t[6], @x[6], @x[6], #12
+	 veor	@x[5], @x[5], @t[5]
+	vext.8	@t[7], @x[7], @x[7], #12
+	 veor	@x[6], @x[6], @t[6]
+
+	veor	@t[1], @t[1], @x[0]
+	 veor	@x[7], @x[7], @t[7]
+	 vext.8	@x[0], @x[0], @x[0], #8		@ (x0 ^ (x0 <<< 32)) <<< 64)
+	veor	@t[2], @t[2], @x[1]
+	veor	@t[0], @t[0], @x[7]
+	veor	@t[1], @t[1], @x[7]
+	 vext.8	@x[1], @x[1], @x[1], #8
+	veor	@t[5], @t[5], @x[4]
+	 veor	@x[0], @x[0], @t[0]
+	veor	@t[6], @t[6], @x[5]
+	 veor	@x[1], @x[1], @t[1]
+	 vext.8	@t[0], @x[4], @x[4], #8
+	veor	@t[4], @t[4], @x[3]
+	 vext.8	@t[1], @x[5], @x[5], #8
+	veor	@t[7], @t[7], @x[6]
+	 vext.8	@x[4], @x[3], @x[3], #8
+	veor	@t[3], @t[3], @x[2]
+	 vext.8	@x[5], @x[7], @x[7], #8
+	veor	@t[4], @t[4], @x[7]
+	 vext.8	@x[3], @x[6], @x[6], #8
+	veor	@t[3], @t[3], @x[7]
+	 vext.8	@x[6], @x[2], @x[2], #8
+	veor	@x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+	veor	@x[2], @t[0], @t[4]
+	veor	@x[4], @x[4], @t[3]
+	veor	@x[5], @x[5], @t[7]
+	veor	@x[3], @x[3], @t[6]
+	 @ vmov	@x[2], @t[0]
+	veor	@x[6], @x[6], @t[2]
+	 @ vmov	@x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+	veor	@t[3], @t[3], @x[4]
+	veor	@x[5], @x[5], @t[7]
+	veor	@x[2], @x[3], @t[6]
+	veor	@x[3], @t[0], @t[4]
+	veor	@x[4], @x[6], @t[2]
+	vmov	@x[6], @t[3]
+	 @ vmov	@x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+	@ multiplication by 0x0e
+	vext.8	@t[7], @x[7], @x[7], #12
+	vmov	@t[2], @x[2]
+	veor	@x[2], @x[2], @x[5]		@ 2 5
+	veor	@x[7], @x[7], @x[5]		@ 7 5
+	vext.8	@t[0], @x[0], @x[0], #12
+	vmov	@t[5], @x[5]
+	veor	@x[5], @x[5], @x[0]		@ 5 0		[1]
+	veor	@x[0], @x[0], @x[1]		@ 0 1
+	vext.8	@t[1], @x[1], @x[1], #12
+	veor	@x[1], @x[1], @x[2]		@ 1 25
+	veor	@x[0], @x[0], @x[6]		@ 01 6		[2]
+	vext.8	@t[3], @x[3], @x[3], #12
+	veor	@x[1], @x[1], @x[3]		@ 125 3		[4]
+	veor	@x[2], @x[2], @x[0]		@ 25 016	[3]
+	veor	@x[3], @x[3], @x[7]		@ 3 75
+	veor	@x[7], @x[7], @x[6]		@ 75 6		[0]
+	vext.8	@t[6], @x[6], @x[6], #12
+	vmov	@t[4], @x[4]
+	veor	@x[6], @x[6], @x[4]		@ 6 4
+	veor	@x[4], @x[4], @x[3]		@ 4 375		[6]
+	veor	@x[3], @x[3], @x[7]		@ 375 756=36
+	veor	@x[6], @x[6], @t[5]		@ 64 5		[7]
+	veor	@x[3], @x[3], @t[2]		@ 36 2
+	vext.8	@t[5], @t[5], @t[5], #12
+	veor	@x[3], @x[3], @t[4]		@ 362 4		[5]
+___
+					my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+	@ multiplication by 0x0b
+	veor	@y[1], @y[1], @y[0]
+	veor	@y[0], @y[0], @t[0]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[1], @y[1], @t[1]
+	veor	@y[0], @y[0], @t[5]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[1], @y[1], @t[6]
+	veor	@y[0], @y[0], @t[7]
+	veor	@t[7], @t[7], @t[6]		@ clobber t[7]
+
+	veor	@y[3], @y[3], @t[0]
+	 veor	@y[1], @y[1], @y[0]
+	vext.8	@t[0], @t[0], @t[0], #12
+	veor	@y[2], @y[2], @t[1]
+	veor	@y[4], @y[4], @t[1]
+	vext.8	@t[1], @t[1], @t[1], #12
+	veor	@y[2], @y[2], @t[2]
+	veor	@y[3], @y[3], @t[2]
+	veor	@y[5], @y[5], @t[2]
+	veor	@y[2], @y[2], @t[7]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[3], @y[3], @t[3]
+	veor	@y[6], @y[6], @t[3]
+	veor	@y[4], @y[4], @t[3]
+	veor	@y[7], @y[7], @t[4]
+	vext.8	@t[3], @t[3], @t[3], #12
+	veor	@y[5], @y[5], @t[4]
+	veor	@y[7], @y[7], @t[7]
+	veor	@t[7], @t[7], @t[5]		@ clobber t[7] even more
+	veor	@y[3], @y[3], @t[5]
+	veor	@y[4], @y[4], @t[4]
+
+	veor	@y[5], @y[5], @t[7]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[6], @y[6], @t[7]
+	veor	@y[4], @y[4], @t[7]
+
+	veor	@t[7], @t[7], @t[5]
+	vext.8	@t[5], @t[5], @t[5], #12
+
+	@ multiplication by 0x0d
+	veor	@y[4], @y[4], @y[7]
+	 veor	@t[7], @t[7], @t[6]		@ restore t[7]
+	veor	@y[7], @y[7], @t[4]
+	vext.8	@t[6], @t[6], @t[6], #12
+	veor	@y[2], @y[2], @t[0]
+	veor	@y[7], @y[7], @t[5]
+	vext.8	@t[7], @t[7], @t[7], #12
+	veor	@y[2], @y[2], @t[2]
+
+	veor	@y[3], @y[3], @y[1]
+	veor	@y[1], @y[1], @t[1]
+	veor	@y[0], @y[0], @t[0]
+	veor	@y[3], @y[3], @t[0]
+	veor	@y[1], @y[1], @t[5]
+	veor	@y[0], @y[0], @t[5]
+	vext.8	@t[0], @t[0], @t[0], #12
+	veor	@y[1], @y[1], @t[7]
+	veor	@y[0], @y[0], @t[6]
+	veor	@y[3], @y[3], @y[1]
+	veor	@y[4], @y[4], @t[1]
+	vext.8	@t[1], @t[1], @t[1], #12
+
+	veor	@y[7], @y[7], @t[7]
+	veor	@y[4], @y[4], @t[2]
+	veor	@y[5], @y[5], @t[2]
+	veor	@y[2], @y[2], @t[6]
+	veor	@t[6], @t[6], @t[3]		@ clobber t[6]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[4], @y[4], @y[7]
+	veor	@y[3], @y[3], @t[6]
+
+	veor	@y[6], @y[6], @t[6]
+	veor	@y[5], @y[5], @t[5]
+	vext.8	@t[5], @t[5], @t[5], #12
+	veor	@y[6], @y[6], @t[4]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[5], @y[5], @t[6]
+	veor	@y[6], @y[6], @t[7]
+	vext.8	@t[7], @t[7], @t[7], #12
+	veor	@t[6], @t[6], @t[3]		@ restore t[6]
+	vext.8	@t[3], @t[3], @t[3], #12
+
+	@ multiplication by 0x09
+	veor	@y[4], @y[4], @y[1]
+	veor	@t[1], @t[1], @y[1]		@ t[1]=y[1]
+	veor	@t[0], @t[0], @t[5]		@ clobber t[0]
+	vext.8	@t[6], @t[6], @t[6], #12
+	veor	@t[1], @t[1], @t[5]
+	veor	@y[3], @y[3], @t[0]
+	veor	@t[0], @t[0], @y[0]		@ t[0]=y[0]
+	veor	@t[1], @t[1], @t[6]
+	veor	@t[6], @t[6], @t[7]		@ clobber t[6]
+	veor	@y[4], @y[4], @t[1]
+	veor	@y[7], @y[7], @t[4]
+	veor	@y[6], @y[6], @t[3]
+	veor	@y[5], @y[5], @t[2]
+	veor	@t[4], @t[4], @y[4]		@ t[4]=y[4]
+	veor	@t[3], @t[3], @y[3]		@ t[3]=y[3]
+	veor	@t[5], @t[5], @y[5]		@ t[5]=y[5]
+	veor	@t[2], @t[2], @y[2]		@ t[2]=y[2]
+	veor	@t[3], @t[3], @t[7]
+	veor	@XMM[5], @t[5], @t[6]
+	veor	@XMM[6], @t[6], @y[6]		@ t[6]=y[6]
+	veor	@XMM[2], @t[2], @t[6]
+	veor	@XMM[7], @t[7], @y[7]		@ t[7]=y[7]
+
+	vmov	@XMM[0], @t[0]
+	vmov	@XMM[1], @t[1]
+	@ vmov	@XMM[2], @t[2]
+	vmov	@XMM[3], @t[3]
+	vmov	@XMM[4], @t[4]
+	@ vmov	@XMM[5], @t[5]
+	@ vmov	@XMM[6], @t[6]
+	@ vmov	@XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 |   | 02 03 01 01 |   | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b |   | 01 01 02 03 |   | 04 00 05 00 |
+# | 0b 0d 09 0e |   | 03 01 01 02 |   | 00 04 00 05 |
+
+$code.=<<___;
+	@ multiplication by 0x05-0x00-0x04-0x00
+	vext.8	@t[0], @x[0], @x[0], #8
+	vext.8	@t[6], @x[6], @x[6], #8
+	vext.8	@t[7], @x[7], @x[7], #8
+	veor	@t[0], @t[0], @x[0]
+	vext.8	@t[1], @x[1], @x[1], #8
+	veor	@t[6], @t[6], @x[6]
+	vext.8	@t[2], @x[2], @x[2], #8
+	veor	@t[7], @t[7], @x[7]
+	vext.8	@t[3], @x[3], @x[3], #8
+	veor	@t[1], @t[1], @x[1]
+	vext.8	@t[4], @x[4], @x[4], #8
+	veor	@t[2], @t[2], @x[2]
+	vext.8	@t[5], @x[5], @x[5], #8
+	veor	@t[3], @t[3], @x[3]
+	veor	@t[4], @t[4], @x[4]
+	veor	@t[5], @t[5], @x[5]
+
+	 veor	@x[0], @x[0], @t[6]
+	 veor	@x[1], @x[1], @t[6]
+	 veor	@x[2], @x[2], @t[0]
+	 veor	@x[4], @x[4], @t[2]
+	 veor	@x[3], @x[3], @t[1]
+	 veor	@x[1], @x[1], @t[7]
+	 veor	@x[2], @x[2], @t[7]
+	 veor	@x[4], @x[4], @t[6]
+	 veor	@x[5], @x[5], @t[3]
+	 veor	@x[3], @x[3], @t[6]
+	 veor	@x[6], @x[6], @t[4]
+	 veor	@x[4], @x[4], @t[7]
+	 veor	@x[5], @x[5], @t[7]
+	 veor	@x[7], @x[7], @t[5]
+___
+	&MixColumns	(@x,@t,1);	# flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+	vshr.u64	$t, $b, #$n
+	veor		$t, $t, $a
+	vand		$t, $t, $mask
+	veor		$a, $a, $t
+	vshl.u64	$t, $t, #$n
+	veor		$b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+	vshr.u64	$t0, $b0, #$n
+	 vshr.u64	$t1, $b1, #$n
+	veor		$t0, $t0, $a0
+	 veor		$t1, $t1, $a1
+	vand		$t0, $t0, $mask
+	 vand		$t1, $t1, $mask
+	veor		$a0, $a0, $t0
+	vshl.u64	$t0, $t0, #$n
+	 veor		$a1, $a1, $t1
+	 vshl.u64	$t1, $t1, #$n
+	veor		$b0, $b0, $t0
+	 veor		$b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+	vmov.i8	$t0,#0x55			@ compose .LBS0
+	vmov.i8	$t1,#0x33			@ compose .LBS1
+___
+	&swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+	&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+	vmov.i8	$t0,#0x0f			@ compose .LBS2
+___
+	&swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+	&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+	&swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+	&swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH	vstmdb	sp!,{d8-d15}
+# define VFP_ABI_POP	vldmia	sp!,{d8-d15}
+# define VFP_ABI_FRAME	0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME	0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__	7
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax	unified 	@ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu	neon
+
+.type	_bsaes_decrypt8,%function
+.align	4
+_bsaes_decrypt8:
+	adr	$const,_bsaes_decrypt8
+	vldmia	$key!, {@XMM[9]}		@ round 0 key
+	add	$const,$const,#.LM0ISR-_bsaes_decrypt8
+
+	vldmia	$const!, {@XMM[8]}		@ .LM0ISR
+	veor	@XMM[10], @XMM[0], @XMM[9]	@ xor with round0 key
+	veor	@XMM[11], @XMM[1], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	veor	@XMM[12], @XMM[2], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+	veor	@XMM[13], @XMM[3], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+	veor	@XMM[14], @XMM[4], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+	veor	@XMM[15], @XMM[5], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+	veor	@XMM[10], @XMM[6], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+	veor	@XMM[11], @XMM[7], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	 vtbl.8	`&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	sub	$rounds,$rounds,#1
+	b	.Ldec_sbox
+.align	4
+.Ldec_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+	&InvSbox	(@XMM[0..7, 8..15]);
+$code.=<<___;
+	subs	$rounds,$rounds,#1
+	bcc	.Ldec_done
+___
+	&InvMixColumns	(@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+	vldmia	$const, {@XMM[12]}		@ .LISR
+	ite	eq				@ Thumb2 thing, sanity check in ARM
+	addeq	$const,$const,#0x10
+	bne	.Ldec_loop
+	vldmia	$const, {@XMM[12]}		@ .LISRM0
+	b	.Ldec_loop
+.align	4
+.Ldec_done:
+___
+	&bitslice	(@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+	vldmia	$key, {@XMM[8]}			@ last round key
+	veor	@XMM[6], @XMM[6], @XMM[8]
+	veor	@XMM[4], @XMM[4], @XMM[8]
+	veor	@XMM[2], @XMM[2], @XMM[8]
+	veor	@XMM[7], @XMM[7], @XMM[8]
+	veor	@XMM[3], @XMM[3], @XMM[8]
+	veor	@XMM[5], @XMM[5], @XMM[8]
+	veor	@XMM[0], @XMM[0], @XMM[8]
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	bx	lr
+.size	_bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type	_bsaes_const,%object
+.align	6
+_bsaes_const:
+.LM0ISR:	@ InvShiftRows constants
+	.quad	0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+	.quad	0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+	.quad	0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:		@ ShiftRows constants
+	.quad	0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+	.quad	0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+	.quad	0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+	.quad	0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+	.quad	0x090d01050c000408, 0x03070b0f060a0e02
+.asciz	"Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align	6
+.size	_bsaes_const,.-_bsaes_const
+
+.type	_bsaes_encrypt8,%function
+.align	4
+_bsaes_encrypt8:
+	adr	$const,_bsaes_encrypt8
+	vldmia	$key!, {@XMM[9]}		@ round 0 key
+	sub	$const,$const,#_bsaes_encrypt8-.LM0SR
+
+	vldmia	$const!, {@XMM[8]}		@ .LM0SR
+_bsaes_encrypt8_alt:
+	veor	@XMM[10], @XMM[0], @XMM[9]	@ xor with round0 key
+	veor	@XMM[11], @XMM[1], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	veor	@XMM[12], @XMM[2], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+	veor	@XMM[13], @XMM[3], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+	veor	@XMM[14], @XMM[4], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+	veor	@XMM[15], @XMM[5], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+	veor	@XMM[10], @XMM[6], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+	veor	@XMM[11], @XMM[7], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	 vtbl.8	`&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	sub	$rounds,$rounds,#1
+	b	.Lenc_sbox
+.align	4
+.Lenc_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+	&Sbox		(@XMM[0..7, 8..15]);
+$code.=<<___;
+	subs	$rounds,$rounds,#1
+	bcc	.Lenc_done
+___
+	&MixColumns	(@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+	vldmia	$const, {@XMM[12]}		@ .LSR
+	ite	eq				@ Thumb2 thing, samity check in ARM
+	addeq	$const,$const,#0x10
+	bne	.Lenc_loop
+	vldmia	$const, {@XMM[12]}		@ .LSRM0
+	b	.Lenc_loop
+.align	4
+.Lenc_done:
+___
+	# output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+	&bitslice	(@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+	vldmia	$key, {@XMM[8]}			@ last round key
+	veor	@XMM[4], @XMM[4], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[8]
+	veor	@XMM[3], @XMM[3], @XMM[8]
+	veor	@XMM[7], @XMM[7], @XMM[8]
+	veor	@XMM[2], @XMM[2], @XMM[8]
+	veor	@XMM[5], @XMM[5], @XMM[8]
+	veor	@XMM[0], @XMM[0], @XMM[8]
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	bx	lr
+.size	_bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+	&swapmove	(@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+	@ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+	vmov	@x[2], @x[0]
+	vmov	@x[3], @x[1]
+___
+	#&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+	&swapmove2x	(@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+	@ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+	vmov	@x[4], @x[0]
+	vmov	@x[6], @x[2]
+	vmov	@x[5], @x[1]
+	vmov	@x[7], @x[3]
+___
+	&swapmove2x	(@x[0,4,1,5],4,$bs2,$t2,$t3);
+	&swapmove2x	(@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type	_bsaes_key_convert,%function
+.align	4
+_bsaes_key_convert:
+	adr	$const,_bsaes_key_convert
+	vld1.8	{@XMM[7]},  [$inp]!		@ load round 0 key
+	sub	$const,$const,#_bsaes_key_convert-.LM0
+	vld1.8	{@XMM[15]}, [$inp]!		@ load round 1 key
+
+	vmov.i8	@XMM[8],  #0x01			@ bit masks
+	vmov.i8	@XMM[9],  #0x02
+	vmov.i8	@XMM[10], #0x04
+	vmov.i8	@XMM[11], #0x08
+	vmov.i8	@XMM[12], #0x10
+	vmov.i8	@XMM[13], #0x20
+	vldmia	$const, {@XMM[14]}		@ .LM0
+
+#ifdef __ARMEL__
+	vrev32.8	@XMM[7],  @XMM[7]
+	vrev32.8	@XMM[15], @XMM[15]
+#endif
+	sub	$rounds,$rounds,#1
+	vstmia	$out!, {@XMM[7]}		@ save round 0 key
+	b	.Lkey_loop
+
+.align	4
+.Lkey_loop:
+	vtbl.8	`&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+	vtbl.8	`&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+	vmov.i8	@XMM[6],  #0x40
+	vmov.i8	@XMM[15], #0x80
+
+	vtst.8	@XMM[0], @XMM[7], @XMM[8]
+	vtst.8	@XMM[1], @XMM[7], @XMM[9]
+	vtst.8	@XMM[2], @XMM[7], @XMM[10]
+	vtst.8	@XMM[3], @XMM[7], @XMM[11]
+	vtst.8	@XMM[4], @XMM[7], @XMM[12]
+	vtst.8	@XMM[5], @XMM[7], @XMM[13]
+	vtst.8	@XMM[6], @XMM[7], @XMM[6]
+	vtst.8	@XMM[7], @XMM[7], @XMM[15]
+	vld1.8	{@XMM[15]}, [$inp]!		@ load next round key
+	vmvn	@XMM[0], @XMM[0]		@ "pnot"
+	vmvn	@XMM[1], @XMM[1]
+	vmvn	@XMM[5], @XMM[5]
+	vmvn	@XMM[6], @XMM[6]
+#ifdef __ARMEL__
+	vrev32.8	@XMM[15], @XMM[15]
+#endif
+	subs	$rounds,$rounds,#1
+	vstmia	$out!,{@XMM[0]-@XMM[7]}		@ write bit-sliced round key
+	bne	.Lkey_loop
+
+	vmov.i8	@XMM[7],#0x63			@ compose .L63
+	@ don't save last round key
+	bx	lr
+.size	_bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) {		# following four functions are unsupported interface
+			# used for benchmarking...
+$code.=<<___;
+.globl	bsaes_enc_key_convert
+.type	bsaes_enc_key_convert,%function
+.align	4
+bsaes_enc_key_convert:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+
+	ldr	r5,[$inp,#240]			@ pass rounds
+	mov	r4,$inp				@ pass key
+	mov	r12,$out			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl	bsaes_encrypt_128
+.type	bsaes_encrypt_128,%function
+.align	4
+bsaes_encrypt_128:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+.Lenc128_loop:
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+	mov	r4,$key				@ pass the key
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5,#10				@ pass rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]!
+
+	bl	_bsaes_encrypt8
+
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	subs	$len,$len,#0x80
+	vst1.8	{@XMM[5]}, [$out]!
+	bhi	.Lenc128_loop
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl	bsaes_dec_key_convert
+.type	bsaes_dec_key_convert,%function
+.align	4
+bsaes_dec_key_convert:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+
+	ldr	r5,[$inp,#240]			@ pass rounds
+	mov	r4,$inp				@ pass key
+	mov	r12,$out			@ pass key schedule
+	bl	_bsaes_key_convert
+	vldmia	$out, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	$out, {@XMM[7]}
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl	bsaes_decrypt_128
+.type	bsaes_decrypt_128,%function
+.align	4
+bsaes_decrypt_128:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+.Ldec128_loop:
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+	mov	r4,$key				@ pass the key
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5,#10				@ pass rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]!
+
+	bl	_bsaes_decrypt8
+
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	subs	$len,$len,#0x80
+	vst1.8	{@XMM[5]}, [$out]!
+	bhi	.Ldec128_loop
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global	bsaes_cbc_encrypt
+.type	bsaes_cbc_encrypt,%function
+.align	5
+bsaes_cbc_encrypt:
+#ifndef	__KERNEL__
+	cmp	$len, #128
+#ifndef	__thumb__
+	blo	AES_cbc_encrypt
+#else
+	bhs	1f
+	b	AES_cbc_encrypt
+1:
+#endif
+#endif
+
+	@ it is up to the caller to make sure we are called with enc == 0
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	$ivp, [ip]			@ IV is 1st arg on the stack
+	mov	$len, $len, lsr#4		@ len in 16 byte blocks
+	sub	sp, #0x10			@ scratch space to carry over the IV
+	mov	$fp, sp				@ save sp
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	add	r12, #`128-32`			@ sifze of bit-slices key schedule
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12				@ sp is $keysched
+	bl	_bsaes_key_convert
+	vldmia	$keysched, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	$keysched, {@XMM[7]}
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, $key, #248
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12, {@XMM[15]}			@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+
+.align	2
+0:
+#endif
+
+	vld1.8	{@XMM[15]}, [$ivp]		@ load IV
+	b	.Lcbc_dec_loop
+
+.align	4
+.Lcbc_dec_loop:
+	subs	$len, $len, #0x8
+	bmi	.Lcbc_dec_loop_finish
+
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, $keysched			@ pass the key
+#else
+	add	r4, $key, #248
+#endif
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5, $rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]
+	sub	$inp, $inp, #0x60
+	vstmia	$fp, {@XMM[15]}			@ put aside IV
+
+	bl	_bsaes_decrypt8
+
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]-@XMM[13]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[14]-@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[3], @XMM[3], @XMM[13]
+	vst1.8	{@XMM[6]}, [$out]!
+	veor	@XMM[5], @XMM[5], @XMM[14]
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	vst1.8	{@XMM[5]}, [$out]!
+
+	b	.Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+	adds	$len, $len, #8
+	beq	.Lcbc_dec_done
+
+	vld1.8	{@XMM[0]}, [$inp]!		@ load input
+	cmp	$len, #2
+	blo	.Lcbc_dec_one
+	vld1.8	{@XMM[1]}, [$inp]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, $keysched			@ pass the key
+#else
+	add	r4, $key, #248
+#endif
+	mov	r5, $rounds
+	vstmia	$fp, {@XMM[15]}			@ put aside IV
+	beq	.Lcbc_dec_two
+	vld1.8	{@XMM[2]}, [$inp]!
+	cmp	$len, #4
+	blo	.Lcbc_dec_three
+	vld1.8	{@XMM[3]}, [$inp]!
+	beq	.Lcbc_dec_four
+	vld1.8	{@XMM[4]}, [$inp]!
+	cmp	$len, #6
+	blo	.Lcbc_dec_five
+	vld1.8	{@XMM[5]}, [$inp]!
+	beq	.Lcbc_dec_six
+	vld1.8	{@XMM[6]}, [$inp]!
+	sub	$inp, $inp, #0x70
+
+	bl	_bsaes_decrypt8
+
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]-@XMM[13]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[3], @XMM[3], @XMM[13]
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_six:
+	sub	$inp, $inp, #0x60
+	bl	_bsaes_decrypt8
+	vldmia	$fp,{@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_five:
+	sub	$inp, $inp, #0x50
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_four:
+	sub	$inp, $inp, #0x40
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_three:
+	sub	$inp, $inp, #0x30
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_two:
+	sub	$inp, $inp, #0x20
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]}, [$inp]!		@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[15]}, [$inp]!		@ reload input
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_one:
+	sub	$inp, $inp, #0x10
+	mov	$rounds, $out			@ save original out pointer
+	mov	$out, $fp			@ use the iv scratch space as out buffer
+	mov	r2, $key
+	vmov	@XMM[4],@XMM[15]		@ just in case ensure that IV
+	vmov	@XMM[5],@XMM[0]			@ and input are preserved
+	bl	AES_decrypt
+	vld1.8	{@XMM[0]}, [$fp,:64]		@ load result
+	veor	@XMM[0], @XMM[0], @XMM[4]	@ ^= IV
+	vmov	@XMM[15], @XMM[5]		@ @XMM[5] holds input
+	vst1.8	{@XMM[0]}, [$rounds]		@ write output
+
+.Lcbc_dec_done:
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+.Lcbc_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		$keysched!, {q0-q1}
+	cmp		$keysched, $fp
+	bne		.Lcbc_dec_bzero
+#endif
+
+	mov	sp, $fp
+	add	sp, #0x10			@ add sp,$fp,#0x10 is no good for thumb
+	vst1.8	{@XMM[15]}, [$ivp]		@ return IV
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}
+.size	bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6";	# shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern	AES_encrypt
+.global	bsaes_ctr32_encrypt_blocks
+.type	bsaes_ctr32_encrypt_blocks,%function
+.align	5
+bsaes_ctr32_encrypt_blocks:
+	cmp	$len, #8			@ use plain AES for
+	blo	.Lctr_enc_short			@ small sizes
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	$ctr, [ip]			@ ctr is 1st arg on the stack
+	sub	sp, sp, #0x10			@ scratch space to carry over the ctr
+	mov	$fp, sp				@ save sp
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	add	r12, #`128-32`			@ size of bit-sliced key schedule
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12				@ sp is $keysched
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+	vld1.8	{@XMM[0]}, [$ctr]		@ load counter
+	add	$ctr, $const, #.LREVM0SR-.LM0	@ borrow $ctr
+	vldmia	$keysched, {@XMM[4]}		@ load round0 key
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+.align	2
+0:	add	r12, $key, #248
+	vld1.8	{@XMM[0]}, [$ctr]		@ load counter
+	adrl	$ctr, .LREVM0SR			@ borrow $ctr
+	vldmia	r12, {@XMM[4]}			@ load round0 key
+	sub	sp, #0x10			@ place for adjusted round0 key
+#endif
+
+	vmov.i32	@XMM[8],#1		@ compose 1<<96
+	veor		@XMM[9],@XMM[9],@XMM[9]
+	vrev32.8	@XMM[0],@XMM[0]
+	vext.8		@XMM[8],@XMM[9],@XMM[8],#4
+	vrev32.8	@XMM[4],@XMM[4]
+	vadd.u32	@XMM[9],@XMM[8],@XMM[8]	@ compose 2<<96
+	vstmia	$keysched, {@XMM[4]}		@ save adjusted round0 key
+	b	.Lctr_enc_loop
+
+.align	4
+.Lctr_enc_loop:
+	vadd.u32	@XMM[10], @XMM[8], @XMM[9]	@ compose 3<<96
+	vadd.u32	@XMM[1], @XMM[0], @XMM[8]	@ +1
+	vadd.u32	@XMM[2], @XMM[0], @XMM[9]	@ +2
+	vadd.u32	@XMM[3], @XMM[0], @XMM[10]	@ +3
+	vadd.u32	@XMM[4], @XMM[1], @XMM[10]
+	vadd.u32	@XMM[5], @XMM[2], @XMM[10]
+	vadd.u32	@XMM[6], @XMM[3], @XMM[10]
+	vadd.u32	@XMM[7], @XMM[4], @XMM[10]
+	vadd.u32	@XMM[10], @XMM[5], @XMM[10]	@ next counter
+
+	@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+	@ to flip byte order in 32-bit counter
+
+	vldmia		$keysched, {@XMM[9]}		@ load round0 key
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, $keysched, #0x10		@ pass next round key
+#else
+	add		r4, $key, #`248+16`
+#endif
+	vldmia		$ctr, {@XMM[8]}			@ .LREVM0SR
+	mov		r5, $rounds			@ pass rounds
+	vstmia		$fp, {@XMM[10]}			@ save next counter
+	sub		$const, $ctr, #.LREVM0SR-.LSR	@ pass constants
+
+	bl		_bsaes_encrypt8_alt
+
+	subs		$len, $len, #8
+	blo		.Lctr_enc_loop_done
+
+	vld1.8		{@XMM[8]-@XMM[9]}, [$inp]!	@ load input
+	vld1.8		{@XMM[10]-@XMM[11]}, [$inp]!
+	veor		@XMM[0], @XMM[8]
+	veor		@XMM[1], @XMM[9]
+	vld1.8		{@XMM[12]-@XMM[13]}, [$inp]!
+	veor		@XMM[4], @XMM[10]
+	veor		@XMM[6], @XMM[11]
+	vld1.8		{@XMM[14]-@XMM[15]}, [$inp]!
+	veor		@XMM[3], @XMM[12]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor		@XMM[7], @XMM[13]
+	veor		@XMM[2], @XMM[14]
+	vst1.8		{@XMM[4]}, [$out]!
+	veor		@XMM[5], @XMM[15]
+	vst1.8		{@XMM[6]}, [$out]!
+	vmov.i32	@XMM[8], #1			@ compose 1<<96
+	vst1.8		{@XMM[3]}, [$out]!
+	veor		@XMM[9], @XMM[9], @XMM[9]
+	vst1.8		{@XMM[7]}, [$out]!
+	vext.8		@XMM[8], @XMM[9], @XMM[8], #4
+	vst1.8		{@XMM[2]}, [$out]!
+	vadd.u32	@XMM[9],@XMM[8],@XMM[8]		@ compose 2<<96
+	vst1.8		{@XMM[5]}, [$out]!
+	vldmia		$fp, {@XMM[0]}			@ load counter
+
+	bne		.Lctr_enc_loop
+	b		.Lctr_enc_done
+
+.align	4
+.Lctr_enc_loop_done:
+	add		$len, $len, #8
+	vld1.8		{@XMM[8]}, [$inp]!	@ load input
+	veor		@XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!	@ write output
+	cmp		$len, #2
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[9]}, [$inp]!
+	veor		@XMM[1], @XMM[9]
+	vst1.8		{@XMM[1]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[10]}, [$inp]!
+	veor		@XMM[4], @XMM[10]
+	vst1.8		{@XMM[4]}, [$out]!
+	cmp		$len, #4
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[11]}, [$inp]!
+	veor		@XMM[6], @XMM[11]
+	vst1.8		{@XMM[6]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[12]}, [$inp]!
+	veor		@XMM[3], @XMM[12]
+	vst1.8		{@XMM[3]}, [$out]!
+	cmp		$len, #6
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[13]}, [$inp]!
+	veor		@XMM[7], @XMM[13]
+	vst1.8		{@XMM[7]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[14]}, [$inp]
+	veor		@XMM[2], @XMM[14]
+	vst1.8		{@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifndef	BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:			@ wipe key schedule [if any]
+	vstmia		$keysched!, {q0-q1}
+	cmp		$keysched, $fp
+	bne		.Lctr_enc_bzero
+#else
+	vstmia		$keysched, {q0-q1}
+#endif
+
+	mov	sp, $fp
+	add	sp, #0x10		@ add sp,$fp,#0x10 is no good for thumb
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}	@ return
+
+.align	4
+.Lctr_enc_short:
+	ldr	ip, [sp]		@ ctr pointer is passed on stack
+	stmdb	sp!, {r4-r8, lr}
+
+	mov	r4, $inp		@ copy arguments
+	mov	r5, $out
+	mov	r6, $len
+	mov	r7, $key
+	ldr	r8, [ip, #12]		@ load counter LSW
+	vld1.8	{@XMM[1]}, [ip]		@ load whole counter value
+#ifdef __ARMEL__
+	rev	r8, r8
+#endif
+	sub	sp, sp, #0x10
+	vst1.8	{@XMM[1]}, [sp,:64]	@ copy counter value
+	sub	sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+	add	r0, sp, #0x10		@ input counter value
+	mov	r1, sp			@ output on the stack
+	mov	r2, r7			@ key
+
+	bl	AES_encrypt
+
+	vld1.8	{@XMM[0]}, [r4]!	@ load input
+	vld1.8	{@XMM[1]}, [sp,:64]	@ load encrypted counter
+	add	r8, r8, #1
+#ifdef __ARMEL__
+	rev	r0, r8
+	str	r0, [sp, #0x1c]		@ next counter value
+#else
+	str	r8, [sp, #0x1c]		@ next counter value
+#endif
+	veor	@XMM[0],@XMM[0],@XMM[1]
+	vst1.8	{@XMM[0]}, [r5]!	@ store output
+	subs	r6, r6, #1
+	bne	.Lctr_enc_short_loop
+
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+	vstmia		sp!, {q0-q1}
+
+	ldmia	sp!, {r4-r8, pc}
+.size	bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2,
+#	const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6";		# returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl	bsaes_xts_encrypt
+.type	bsaes_xts_encrypt,%function
+.align	4
+bsaes_xts_encrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future $fp
+
+	mov	$inp, r0
+	mov	$out, r1
+	mov	$len, r2
+	mov	$key, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0,sp				@ pointer to initial tweak
+#endif
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+	mov	$fp, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #`128-32`			@ size of bit-sliced key schedule
+	sub	r12, #`32+16`			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7], @XMM[7], @XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7], @XMM[7], @XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+
+	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
+	adr	$magic, .Lxts_magic
+
+	subs	$len, #0x80
+	blo	.Lxts_enc_short
+	b	.Lxts_enc_loop
+
+.align	4
+.Lxts_enc_loop:
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	vadd.u64	@XMM[8], @XMM[15], @XMM[15]
+	vst1.64		{@XMM[15]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	veor		@XMM[8], @XMM[8], @T[0]
+	vst1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]-@XMM[7]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	veor		@XMM[7], @XMM[7], @XMM[15]
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vld1.64		{@XMM[14]-@XMM[15]}, [r0,:128]!
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[2], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	veor		@XMM[13], @XMM[5], @XMM[15]
+	vst1.8		{@XMM[12]-@XMM[13]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	subs		$len, #0x80
+	bpl		.Lxts_enc_loop
+
+.Lxts_enc_short:
+	adds		$len, #0x70
+	bmi		.Lxts_enc_done
+
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+	subs		$len, #0x10
+	bmi		.Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	sub		$len, #0x10
+	vst1.64		{@XMM[15]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vld1.64		{@XMM[14]}, [r0,:128]!
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[2], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	vst1.8		{@XMM[12]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_6:
+	vst1.64		{@XMM[14]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[4], @XMM[4], @XMM[12]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[5], @XMM[5], @XMM[13]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align	5
+.Lxts_magic:
+	.quad	1, 0x87
+
+.align	5
+.Lxts_enc_5:
+	vst1.64		{@XMM[13]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[3], @XMM[3], @XMM[11]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[4], @XMM[4], @XMM[12]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	vst1.8		{@XMM[10]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_4:
+	vst1.64		{@XMM[12]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[2], @XMM[2], @XMM[10]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[3], @XMM[3], @XMM[11]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_3:
+	vst1.64		{@XMM[11]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[1], @XMM[1], @XMM[9]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[2], @XMM[2], @XMM[10]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	vld1.64		{@XMM[10]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	vst1.8		{@XMM[8]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_2:
+	vst1.64		{@XMM[10]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[0], @XMM[0], @XMM[8]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[1], @XMM[1], @XMM[9]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_1:
+	mov		r0, sp
+	veor		@XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp				@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!
+	mov		$fp, r4
+
+	vmov		@XMM[8], @XMM[9]		@ next round tweak
+
+.Lxts_enc_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		$len, #0x10
+	beq		.Lxts_enc_ret
+	sub		r6, $out, #0x10
+
+.Lxts_enc_steal:
+	ldrb		r0, [$inp], #1
+	ldrb		r1, [$out, #-0x10]
+	strb		r0, [$out, #-0x10]
+	strb		r1, [$out], #1
+
+	subs		$len, #1
+	bhi		.Lxts_enc_steal
+
+	vld1.8		{@XMM[0]}, [r6]
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp			@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [r6]
+	mov		$fp, r4
+#endif
+
+.Lxts_enc_ret:
+	bic		r0, $fp, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [$fp, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_enc_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_enc_bzero
+
+	mov		sp, $fp
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{@XMM[8]}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl	bsaes_xts_decrypt
+.type	bsaes_xts_decrypt,%function
+.align	4
+bsaes_xts_decrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future $fp
+
+	mov	$inp, r0
+	mov	$out, r1
+	mov	$len, r2
+	mov	$key, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0, sp				@ pointer to initial tweak
+#endif
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+	mov	$fp, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #`128-32`			@ size of bit-sliced key schedule
+	sub	r12, #`32+16`			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, sp, #0x90
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, $key, #248
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
+	adr	$magic, .Lxts_magic
+
+	tst	$len, #0xf			@ if not multiple of 16
+	it	ne				@ Thumb2 thing, sanity check in ARM
+	subne	$len, #0x10			@ subtract another 16 bytes
+	subs	$len, #0x80
+
+	blo	.Lxts_dec_short
+	b	.Lxts_dec_loop
+
+.align	4
+.Lxts_dec_loop:
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	vadd.u64	@XMM[8], @XMM[15], @XMM[15]
+	vst1.64		{@XMM[15]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	veor		@XMM[8], @XMM[8], @T[0]
+	vst1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]-@XMM[7]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	veor		@XMM[7], @XMM[7], @XMM[15]
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vld1.64		{@XMM[14]-@XMM[15]}, [r0,:128]!
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[3], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	veor		@XMM[13], @XMM[5], @XMM[15]
+	vst1.8		{@XMM[12]-@XMM[13]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	subs		$len, #0x80
+	bpl		.Lxts_dec_loop
+
+.Lxts_dec_short:
+	adds		$len, #0x70
+	bmi		.Lxts_dec_done
+
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+	subs		$len, #0x10
+	bmi		.Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	sub		$len, #0x10
+	vst1.64		{@XMM[15]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vld1.64		{@XMM[14]}, [r0,:128]!
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[3], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	vst1.8		{@XMM[12]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_6:
+	vst1.64		{@XMM[14]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[4], @XMM[4], @XMM[12]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[5], @XMM[5], @XMM[13]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_5:
+	vst1.64		{@XMM[13]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[3], @XMM[3], @XMM[11]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[4], @XMM[4], @XMM[12]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	vst1.8		{@XMM[10]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_4:
+	vst1.64		{@XMM[12]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[2], @XMM[2], @XMM[10]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[3], @XMM[3], @XMM[11]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_3:
+	vst1.64		{@XMM[11]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[1], @XMM[1], @XMM[9]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[2], @XMM[2], @XMM[10]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	vld1.64		{@XMM[10]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	vst1.8		{@XMM[8]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_2:
+	vst1.64		{@XMM[10]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[0], @XMM[0], @XMM[8]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[1], @XMM[1], @XMM[9]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_1:
+	mov		r0, sp
+	veor		@XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp				@ preserve fp
+	mov		r5, $magic			@ preserve magic
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!
+	mov		$fp, r4
+	mov		$magic, r5
+
+	vmov		@XMM[8], @XMM[9]		@ next round tweak
+
+.Lxts_dec_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		$len, #0x10
+	beq		.Lxts_dec_ret
+
+	@ calculate one round of extra tweak for the stolen ciphertext
+	vldmia		$magic, {$twmask}
+	vshr.s64	@XMM[6], @XMM[8], #63
+	vand		@XMM[6], @XMM[6], $twmask
+	vadd.u64	@XMM[9], @XMM[8], @XMM[8]
+	vswp		`&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+	veor		@XMM[9], @XMM[9], @XMM[6]
+
+	@ perform the final decryption with the last tweak value
+	vld1.8		{@XMM[0]}, [$inp]!
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[9]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp			@ preserve fp
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[9]
+	vst1.8		{@XMM[0]}, [$out]
+
+	mov		r6, $out
+.Lxts_dec_steal:
+	ldrb		r1, [$out]
+	ldrb		r0, [$inp], #1
+	strb		r1, [$out, #0x10]
+	strb		r0, [$out], #1
+
+	subs		$len, #1
+	bhi		.Lxts_dec_steal
+
+	vld1.8		{@XMM[0]}, [r6]
+	mov		r0, sp
+	veor		@XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [r6]
+	mov		$fp, r4
+#endif
+
+.Lxts_dec_ret:
+	bic		r0, $fp, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [$fp, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_dec_bzero
+
+	mov		sp, $fp
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{@XMM[8]}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+	next if (/^#!/);
+        last if (!s/^#/@/ and !/^$/);
+        print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT;
diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S
new file mode 100644
index 0000000..50013c0
--- /dev/null
+++ b/arch/arm/crypto/sha1-armv7-neon.S
@@ -0,0 +1,634 @@
+/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function
+ *
+ * Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/linkage.h>
+
+
+.syntax unified
+.code   32
+.fpu neon
+
+.text
+
+
+/* Context structure */
+
+#define state_h0 0
+#define state_h1 4
+#define state_h2 8
+#define state_h3 12
+#define state_h4 16
+
+
+/* Constants */
+
+#define K1  0x5A827999
+#define K2  0x6ED9EBA1
+#define K3  0x8F1BBCDC
+#define K4  0xCA62C1D6
+.align 4
+.LK_VEC:
+.LK1:	.long K1, K1, K1, K1
+.LK2:	.long K2, K2, K2, K2
+.LK3:	.long K3, K3, K3, K3
+.LK4:	.long K4, K4, K4, K4
+
+
+/* Register macros */
+
+#define RSTATE r0
+#define RDATA r1
+#define RNBLKS r2
+#define ROLDSTACK r3
+#define RWK lr
+
+#define _a r4
+#define _b r5
+#define _c r6
+#define _d r7
+#define _e r8
+
+#define RT0 r9
+#define RT1 r10
+#define RT2 r11
+#define RT3 r12
+
+#define W0 q0
+#define W1 q1
+#define W2 q2
+#define W3 q3
+#define W4 q4
+#define W5 q5
+#define W6 q6
+#define W7 q7
+
+#define tmp0 q8
+#define tmp1 q9
+#define tmp2 q10
+#define tmp3 q11
+
+#define qK1 q12
+#define qK2 q13
+#define qK3 q14
+#define qK4 q15
+
+
+/* Round function macros. */
+
+#define WK_offs(i) (((i) & 15) * 4)
+
+#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	ldr RT3, [sp, WK_offs(i)]; \
+		pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	bic RT0, d, b; \
+	add e, e, a, ror #(32 - 5); \
+	and RT1, c, b; \
+		pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add RT0, RT0, RT3; \
+	add e, e, RT1; \
+	ror b, #(32 - 30); \
+		pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add e, e, RT0;
+
+#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	ldr RT3, [sp, WK_offs(i)]; \
+		pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	eor RT0, d, b; \
+	add e, e, a, ror #(32 - 5); \
+	eor RT0, RT0, c; \
+		pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add e, e, RT3; \
+	ror b, #(32 - 30); \
+		pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add e, e, RT0; \
+
+#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	ldr RT3, [sp, WK_offs(i)]; \
+		pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	eor RT0, b, c; \
+	and RT1, b, c; \
+	add e, e, a, ror #(32 - 5); \
+		pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	and RT0, RT0, d; \
+	add RT1, RT1, RT3; \
+	add e, e, RT0; \
+	ror b, #(32 - 30); \
+		pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add e, e, RT1;
+
+#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	_R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
+
+#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,\
+           W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	_R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	       W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
+
+#define R(a,b,c,d,e,f,i) \
+	_R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,\
+	       W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
+
+#define dummy(...)
+
+
+/* Input expansion macros. */
+
+/********* Precalc macros for rounds 0-15 *************************************/
+
+#define W_PRECALC_00_15() \
+	add       RWK, sp, #(WK_offs(0));			\
+	\
+	vld1.32   {tmp0, tmp1}, [RDATA]!;			\
+	vrev32.8  W0, tmp0;		/* big => little */	\
+	vld1.32   {tmp2, tmp3}, [RDATA]!;			\
+	vadd.u32  tmp0, W0, curK;				\
+	vrev32.8  W7, tmp1;		/* big => little */	\
+	vrev32.8  W6, tmp2;		/* big => little */	\
+	vadd.u32  tmp1, W7, curK;				\
+	vrev32.8  W5, tmp3;		/* big => little */	\
+	vadd.u32  tmp2, W6, curK;				\
+	vst1.32   {tmp0, tmp1}, [RWK]!;				\
+	vadd.u32  tmp3, W5, curK;				\
+	vst1.32   {tmp2, tmp3}, [RWK];				\
+
+#define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vld1.32   {tmp0, tmp1}, [RDATA]!;			\
+
+#define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	add       RWK, sp, #(WK_offs(0));			\
+
+#define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vrev32.8  W0, tmp0;		/* big => little */	\
+
+#define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vld1.32   {tmp2, tmp3}, [RDATA]!;			\
+
+#define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp0, W0, curK;				\
+
+#define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vrev32.8  W7, tmp1;		/* big => little */	\
+
+#define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vrev32.8  W6, tmp2;		/* big => little */	\
+
+#define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp1, W7, curK;				\
+
+#define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vrev32.8  W5, tmp3;		/* big => little */	\
+
+#define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp2, W6, curK;				\
+
+#define WPRECALC_00_15_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vst1.32   {tmp0, tmp1}, [RWK]!;				\
+
+#define WPRECALC_00_15_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp3, W5, curK;				\
+
+#define WPRECALC_00_15_12(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vst1.32   {tmp2, tmp3}, [RWK];				\
+
+
+/********* Precalc macros for rounds 16-31 ************************************/
+
+#define WPRECALC_16_31_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      tmp0, tmp0;			\
+	vext.8    W, W_m16, W_m12, #8;		\
+
+#define WPRECALC_16_31_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	add       RWK, sp, #(WK_offs(i));	\
+	vext.8    tmp0, W_m04, tmp0, #4;	\
+
+#define WPRECALC_16_31_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      tmp0, tmp0, W_m16;		\
+	veor.32   W, W, W_m08;			\
+
+#define WPRECALC_16_31_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      tmp1, tmp1;			\
+	veor      W, W, tmp0;			\
+
+#define WPRECALC_16_31_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vshl.u32  tmp0, W, #1;			\
+
+#define WPRECALC_16_31_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vext.8    tmp1, tmp1, W, #(16-12);	\
+	vshr.u32  W, W, #31;			\
+
+#define WPRECALC_16_31_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vorr      tmp0, tmp0, W;		\
+	vshr.u32  W, tmp1, #30;			\
+
+#define WPRECALC_16_31_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vshl.u32  tmp1, tmp1, #2;		\
+
+#define WPRECALC_16_31_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      tmp0, tmp0, W;		\
+
+#define WPRECALC_16_31_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      W, tmp0, tmp1;		\
+
+#define WPRECALC_16_31_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp0, W, curK;		\
+
+#define WPRECALC_16_31_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vst1.32   {tmp0}, [RWK];
+
+
+/********* Precalc macros for rounds 32-79 ************************************/
+
+#define WPRECALC_32_79_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor W, W_m28; \
+
+#define WPRECALC_32_79_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vext.8 tmp0, W_m08, W_m04, #8; \
+
+#define WPRECALC_32_79_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor W, W_m16; \
+
+#define WPRECALC_32_79_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor W, tmp0; \
+
+#define WPRECALC_32_79_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	add RWK, sp, #(WK_offs(i&~3)); \
+
+#define WPRECALC_32_79_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vshl.u32 tmp1, W, #2; \
+
+#define WPRECALC_32_79_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vshr.u32 tmp0, W, #30; \
+
+#define WPRECALC_32_79_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vorr W, tmp0, tmp1; \
+
+#define WPRECALC_32_79_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32 tmp0, W, curK; \
+
+#define WPRECALC_32_79_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vst1.32 {tmp0}, [RWK];
+
+
+/*
+ * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
+ *
+ * unsigned int
+ * sha1_transform_neon (void *ctx, const unsigned char *data,
+ *                      unsigned int nblks)
+ */
+.align 3
+ENTRY(sha1_transform_neon)
+  /* input:
+   *	r0: ctx, CTX
+   *	r1: data (64*nblks bytes)
+   *	r2: nblks
+   */
+
+  cmp RNBLKS, #0;
+  beq .Ldo_nothing;
+
+  push {r4-r12, lr};
+  /*vpush {q4-q7};*/
+
+  adr RT3, .LK_VEC;
+
+  mov ROLDSTACK, sp;
+
+  /* Align stack. */
+  sub RT0, sp, #(16*4);
+  and RT0, #(~(16-1));
+  mov sp, RT0;
+
+  vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */
+
+  /* Get the values of the chaining variables. */
+  ldm RSTATE, {_a-_e};
+
+  vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */
+
+#undef curK
+#define curK qK1
+  /* Precalc 0-15. */
+  W_PRECALC_00_15();
+
+.Loop:
+  /* Transform 0-15 + Precalc 16-31. */
+  _R( _a, _b, _c, _d, _e, F1,  0,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16,
+      W4, W5, W6, W7, W0, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F1,  1,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16,
+      W4, W5, W6, W7, W0, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F1,  2,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16,
+      W4, W5, W6, W7, W0, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F1,  3,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16,
+      W4, W5, W6, W7, W0, _, _, _ );
+
+#undef curK
+#define curK qK2
+  _R( _b, _c, _d, _e, _a, F1,  4,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20,
+      W3, W4, W5, W6, W7, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F1,  5,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20,
+      W3, W4, W5, W6, W7, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F1,  6,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20,
+      W3, W4, W5, W6, W7, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F1,  7,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20,
+      W3, W4, W5, W6, W7, _, _, _ );
+
+  _R( _c, _d, _e, _a, _b, F1,  8,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24,
+      W2, W3, W4, W5, W6, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F1,  9,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24,
+      W2, W3, W4, W5, W6, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F1, 10,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24,
+      W2, W3, W4, W5, W6, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F1, 11,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24,
+      W2, W3, W4, W5, W6, _, _, _ );
+
+  _R( _d, _e, _a, _b, _c, F1, 12,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28,
+      W1, W2, W3, W4, W5, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F1, 13,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28,
+      W1, W2, W3, W4, W5, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F1, 14,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28,
+      W1, W2, W3, W4, W5, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F1, 15,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28,
+      W1, W2, W3, W4, W5, _, _, _ );
+
+  /* Transform 16-63 + Precalc 32-79. */
+  _R( _e, _a, _b, _c, _d, F1, 16,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _d, _e, _a, _b, _c, F1, 17,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _c, _d, _e, _a, _b, F1, 18,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _b, _c, _d, _e, _a, F1, 19,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+
+  _R( _a, _b, _c, _d, _e, F2, 20,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _e, _a, _b, _c, _d, F2, 21,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _d, _e, _a, _b, _c, F2, 22,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _c, _d, _e, _a, _b, F2, 23,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+
+#undef curK
+#define curK qK3
+  _R( _b, _c, _d, _e, _a, F2, 24,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _a, _b, _c, _d, _e, F2, 25,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _e, _a, _b, _c, _d, F2, 26,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _d, _e, _a, _b, _c, F2, 27,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+
+  _R( _c, _d, _e, _a, _b, F2, 28,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _b, _c, _d, _e, _a, F2, 29,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _a, _b, _c, _d, _e, F2, 30,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _e, _a, _b, _c, _d, F2, 31,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+
+  _R( _d, _e, _a, _b, _c, F2, 32,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+  _R( _c, _d, _e, _a, _b, F2, 33,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+  _R( _b, _c, _d, _e, _a, F2, 34,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+  _R( _a, _b, _c, _d, _e, F2, 35,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+
+  _R( _e, _a, _b, _c, _d, F2, 36,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+  _R( _d, _e, _a, _b, _c, F2, 37,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+  _R( _c, _d, _e, _a, _b, F2, 38,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+  _R( _b, _c, _d, _e, _a, F2, 39,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+
+  _R( _a, _b, _c, _d, _e, F3, 40,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+  _R( _e, _a, _b, _c, _d, F3, 41,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+  _R( _d, _e, _a, _b, _c, F3, 42,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+  _R( _c, _d, _e, _a, _b, F3, 43,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+
+#undef curK
+#define curK qK4
+  _R( _b, _c, _d, _e, _a, F3, 44,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+  _R( _a, _b, _c, _d, _e, F3, 45,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+  _R( _e, _a, _b, _c, _d, F3, 46,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+  _R( _d, _e, _a, _b, _c, F3, 47,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+
+  _R( _c, _d, _e, _a, _b, F3, 48,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _b, _c, _d, _e, _a, F3, 49,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _a, _b, _c, _d, _e, F3, 50,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _e, _a, _b, _c, _d, F3, 51,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+
+  _R( _d, _e, _a, _b, _c, F3, 52,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _c, _d, _e, _a, _b, F3, 53,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _b, _c, _d, _e, _a, F3, 54,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _a, _b, _c, _d, _e, F3, 55,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+
+  _R( _e, _a, _b, _c, _d, F3, 56,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _d, _e, _a, _b, _c, F3, 57,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _c, _d, _e, _a, _b, F3, 58,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _b, _c, _d, _e, _a, F3, 59,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+
+  subs RNBLKS, #1;
+
+  _R( _a, _b, _c, _d, _e, F4, 60,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _e, _a, _b, _c, _d, F4, 61,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _d, _e, _a, _b, _c, F4, 62,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _c, _d, _e, _a, _b, F4, 63,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+
+  beq .Lend;
+
+  /* Transform 64-79 + Precalc 0-15 of next block. */
+#undef curK
+#define curK qK1
+  _R( _b, _c, _d, _e, _a, F4, 64,
+      WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F4, 65,
+      WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F4, 66,
+      WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F4, 67,
+      WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+
+  _R( _c, _d, _e, _a, _b, F4, 68,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F4, 69,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F4, 70,
+      WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F4, 71,
+      WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+
+  _R( _d, _e, _a, _b, _c, F4, 72,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F4, 73,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F4, 74,
+      WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F4, 75,
+      WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+
+  _R( _e, _a, _b, _c, _d, F4, 76,
+      WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F4, 77,
+      WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F4, 78,
+      WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F4, 79,
+      WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ );
+
+  /* Update the chaining variables. */
+  ldm RSTATE, {RT0-RT3};
+  add _a, RT0;
+  ldr RT0, [RSTATE, #state_h4];
+  add _b, RT1;
+  add _c, RT2;
+  add _d, RT3;
+  add _e, RT0;
+  stm RSTATE, {_a-_e};
+
+  b .Loop;
+
+.Lend:
+  /* Transform 64-79 */
+  R( _b, _c, _d, _e, _a, F4, 64 );
+  R( _a, _b, _c, _d, _e, F4, 65 );
+  R( _e, _a, _b, _c, _d, F4, 66 );
+  R( _d, _e, _a, _b, _c, F4, 67 );
+  R( _c, _d, _e, _a, _b, F4, 68 );
+  R( _b, _c, _d, _e, _a, F4, 69 );
+  R( _a, _b, _c, _d, _e, F4, 70 );
+  R( _e, _a, _b, _c, _d, F4, 71 );
+  R( _d, _e, _a, _b, _c, F4, 72 );
+  R( _c, _d, _e, _a, _b, F4, 73 );
+  R( _b, _c, _d, _e, _a, F4, 74 );
+  R( _a, _b, _c, _d, _e, F4, 75 );
+  R( _e, _a, _b, _c, _d, F4, 76 );
+  R( _d, _e, _a, _b, _c, F4, 77 );
+  R( _c, _d, _e, _a, _b, F4, 78 );
+  R( _b, _c, _d, _e, _a, F4, 79 );
+
+  mov sp, ROLDSTACK;
+
+  /* Update the chaining variables. */
+  ldm RSTATE, {RT0-RT3};
+  add _a, RT0;
+  ldr RT0, [RSTATE, #state_h4];
+  add _b, RT1;
+  add _c, RT2;
+  add _d, RT3;
+  /*vpop {q4-q7};*/
+  add _e, RT0;
+  stm RSTATE, {_a-_e};
+
+  pop {r4-r12, pc};
+
+.Ldo_nothing:
+  bx lr
+ENDPROC(sha1_transform_neon)
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index ace4cd6..e31b044 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -23,32 +23,27 @@
 #include <linux/types.h>
 #include <crypto/sha.h>
 #include <asm/byteorder.h>
+#include <asm/crypto/sha1.h>
 
-struct SHA1_CTX {
-	uint32_t h0,h1,h2,h3,h4;
-	u64 count;
-	u8 data[SHA1_BLOCK_SIZE];
-};
 
-asmlinkage void sha1_block_data_order(struct SHA1_CTX *digest,
+asmlinkage void sha1_block_data_order(u32 *digest,
 		const unsigned char *data, unsigned int rounds);
 
 
 static int sha1_init(struct shash_desc *desc)
 {
-	struct SHA1_CTX *sctx = shash_desc_ctx(desc);
-	memset(sctx, 0, sizeof(*sctx));
-	sctx->h0 = SHA1_H0;
-	sctx->h1 = SHA1_H1;
-	sctx->h2 = SHA1_H2;
-	sctx->h3 = SHA1_H3;
-	sctx->h4 = SHA1_H4;
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	*sctx = (struct sha1_state){
+		.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+	};
+
 	return 0;
 }
 
 
-static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data,
-			       unsigned int len, unsigned int partial)
+static int __sha1_update(struct sha1_state *sctx, const u8 *data,
+			 unsigned int len, unsigned int partial)
 {
 	unsigned int done = 0;
 
@@ -56,43 +51,44 @@
 
 	if (partial) {
 		done = SHA1_BLOCK_SIZE - partial;
-		memcpy(sctx->data + partial, data, done);
-		sha1_block_data_order(sctx, sctx->data, 1);
+		memcpy(sctx->buffer + partial, data, done);
+		sha1_block_data_order(sctx->state, sctx->buffer, 1);
 	}
 
 	if (len - done >= SHA1_BLOCK_SIZE) {
 		const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
-		sha1_block_data_order(sctx, data + done, rounds);
+		sha1_block_data_order(sctx->state, data + done, rounds);
 		done += rounds * SHA1_BLOCK_SIZE;
 	}
 
-	memcpy(sctx->data, data + done, len - done);
+	memcpy(sctx->buffer, data + done, len - done);
 	return 0;
 }
 
 
-static int sha1_update(struct shash_desc *desc, const u8 *data,
-			     unsigned int len)
+int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+		    unsigned int len)
 {
-	struct SHA1_CTX *sctx = shash_desc_ctx(desc);
+	struct sha1_state *sctx = shash_desc_ctx(desc);
 	unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
 	int res;
 
 	/* Handle the fast case right here */
 	if (partial + len < SHA1_BLOCK_SIZE) {
 		sctx->count += len;
-		memcpy(sctx->data + partial, data, len);
+		memcpy(sctx->buffer + partial, data, len);
 		return 0;
 	}
 	res = __sha1_update(sctx, data, len, partial);
 	return res;
 }
+EXPORT_SYMBOL_GPL(sha1_update_arm);
 
 
 /* Add padding and return the message digest. */
 static int sha1_final(struct shash_desc *desc, u8 *out)
 {
-	struct SHA1_CTX *sctx = shash_desc_ctx(desc);
+	struct sha1_state *sctx = shash_desc_ctx(desc);
 	unsigned int i, index, padlen;
 	__be32 *dst = (__be32 *)out;
 	__be64 bits;
@@ -106,7 +102,7 @@
 	/* We need to fill a whole block for __sha1_update() */
 	if (padlen <= 56) {
 		sctx->count += padlen;
-		memcpy(sctx->data + index, padding, padlen);
+		memcpy(sctx->buffer + index, padding, padlen);
 	} else {
 		__sha1_update(sctx, padding, padlen, index);
 	}
@@ -114,7 +110,7 @@
 
 	/* Store state in digest */
 	for (i = 0; i < 5; i++)
-		dst[i] = cpu_to_be32(((u32 *)sctx)[i]);
+		dst[i] = cpu_to_be32(sctx->state[i]);
 
 	/* Wipe context */
 	memset(sctx, 0, sizeof(*sctx));
@@ -124,7 +120,7 @@
 
 static int sha1_export(struct shash_desc *desc, void *out)
 {
-	struct SHA1_CTX *sctx = shash_desc_ctx(desc);
+	struct sha1_state *sctx = shash_desc_ctx(desc);
 	memcpy(out, sctx, sizeof(*sctx));
 	return 0;
 }
@@ -132,7 +128,7 @@
 
 static int sha1_import(struct shash_desc *desc, const void *in)
 {
-	struct SHA1_CTX *sctx = shash_desc_ctx(desc);
+	struct sha1_state *sctx = shash_desc_ctx(desc);
 	memcpy(sctx, in, sizeof(*sctx));
 	return 0;
 }
@@ -141,12 +137,12 @@
 static struct shash_alg alg = {
 	.digestsize	=	SHA1_DIGEST_SIZE,
 	.init		=	sha1_init,
-	.update		=	sha1_update,
+	.update		=	sha1_update_arm,
 	.final		=	sha1_final,
 	.export		=	sha1_export,
 	.import		=	sha1_import,
-	.descsize	=	sizeof(struct SHA1_CTX),
-	.statesize	=	sizeof(struct SHA1_CTX),
+	.descsize	=	sizeof(struct sha1_state),
+	.statesize	=	sizeof(struct sha1_state),
 	.base		=	{
 		.cra_name	=	"sha1",
 		.cra_driver_name=	"sha1-asm",
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
new file mode 100644
index 0000000..6f1b411
--- /dev/null
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -0,0 +1,197 @@
+/*
+ * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
+ * ARM NEON instructions.
+ *
+ * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This file is based on sha1_generic.c and sha1_ssse3_glue.c:
+ *  Copyright (c) Alan Smithee.
+ *  Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ *  Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ *  Copyright (c) Mathias Krause <minipli@googlemail.com>
+ *  Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/sha.h>
+#include <asm/byteorder.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/crypto/sha1.h>
+
+
+asmlinkage void sha1_transform_neon(void *state_h, const char *data,
+				    unsigned int rounds);
+
+
+static int sha1_neon_init(struct shash_desc *desc)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	*sctx = (struct sha1_state){
+		.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+	};
+
+	return 0;
+}
+
+static int __sha1_neon_update(struct shash_desc *desc, const u8 *data,
+			       unsigned int len, unsigned int partial)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	unsigned int done = 0;
+
+	sctx->count += len;
+
+	if (partial) {
+		done = SHA1_BLOCK_SIZE - partial;
+		memcpy(sctx->buffer + partial, data, done);
+		sha1_transform_neon(sctx->state, sctx->buffer, 1);
+	}
+
+	if (len - done >= SHA1_BLOCK_SIZE) {
+		const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
+
+		sha1_transform_neon(sctx->state, data + done, rounds);
+		done += rounds * SHA1_BLOCK_SIZE;
+	}
+
+	memcpy(sctx->buffer, data + done, len - done);
+
+	return 0;
+}
+
+static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
+			     unsigned int len)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+	int res;
+
+	/* Handle the fast case right here */
+	if (partial + len < SHA1_BLOCK_SIZE) {
+		sctx->count += len;
+		memcpy(sctx->buffer + partial, data, len);
+
+		return 0;
+	}
+
+	if (!may_use_simd()) {
+		res = sha1_update_arm(desc, data, len);
+	} else {
+		kernel_neon_begin();
+		res = __sha1_neon_update(desc, data, len, partial);
+		kernel_neon_end();
+	}
+
+	return res;
+}
+
+
+/* Add padding and return the message digest. */
+static int sha1_neon_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	unsigned int i, index, padlen;
+	__be32 *dst = (__be32 *)out;
+	__be64 bits;
+	static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
+
+	bits = cpu_to_be64(sctx->count << 3);
+
+	/* Pad out to 56 mod 64 and append length */
+	index = sctx->count % SHA1_BLOCK_SIZE;
+	padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
+	if (!may_use_simd()) {
+		sha1_update_arm(desc, padding, padlen);
+		sha1_update_arm(desc, (const u8 *)&bits, sizeof(bits));
+	} else {
+		kernel_neon_begin();
+		/* We need to fill a whole block for __sha1_neon_update() */
+		if (padlen <= 56) {
+			sctx->count += padlen;
+			memcpy(sctx->buffer + index, padding, padlen);
+		} else {
+			__sha1_neon_update(desc, padding, padlen, index);
+		}
+		__sha1_neon_update(desc, (const u8 *)&bits, sizeof(bits), 56);
+		kernel_neon_end();
+	}
+
+	/* Store state in digest */
+	for (i = 0; i < 5; i++)
+		dst[i] = cpu_to_be32(sctx->state[i]);
+
+	/* Wipe context */
+	memset(sctx, 0, sizeof(*sctx));
+
+	return 0;
+}
+
+static int sha1_neon_export(struct shash_desc *desc, void *out)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(out, sctx, sizeof(*sctx));
+
+	return 0;
+}
+
+static int sha1_neon_import(struct shash_desc *desc, const void *in)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(sctx, in, sizeof(*sctx));
+
+	return 0;
+}
+
+static struct shash_alg alg = {
+	.digestsize	=	SHA1_DIGEST_SIZE,
+	.init		=	sha1_neon_init,
+	.update		=	sha1_neon_update,
+	.final		=	sha1_neon_final,
+	.export		=	sha1_neon_export,
+	.import		=	sha1_neon_import,
+	.descsize	=	sizeof(struct sha1_state),
+	.statesize	=	sizeof(struct sha1_state),
+	.base		=	{
+		.cra_name		= "sha1",
+		.cra_driver_name	= "sha1-neon",
+		.cra_priority		= 250,
+		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_module		= THIS_MODULE,
+	}
+};
+
+static int __init sha1_neon_mod_init(void)
+{
+	if (!cpu_has_neon())
+		return -ENODEV;
+
+	return crypto_register_shash(&alg);
+}
+
+static void __exit sha1_neon_mod_fini(void)
+{
+	crypto_unregister_shash(&alg);
+}
+
+module_init(sha1_neon_mod_init);
+module_exit(sha1_neon_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated");
+MODULE_ALIAS("sha1");
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
new file mode 100644
index 0000000..fac0533
--- /dev/null
+++ b/arch/arm/crypto/sha256-armv4.pl
@@ -0,0 +1,716 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Permission to use under GPL terms is granted.
+# ====================================================================
+
+# SHA256 block procedure for ARMv4. May 2007.
+
+# Performance is ~2x better than gcc 3.4 generated code and in "abso-
+# lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+# byte [on single-issue Xscale PXA250 core].
+
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 22% improvement on
+# Cortex A8 core and ~20 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+# September 2013.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process one
+# byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+# S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+# code (meaning that latter performs sub-optimally, nothing was done
+# about it).
+
+# May 2014.
+#
+# Add ARMv8 code path performing at 2.0 cpb on Apple A7.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$ctx="r0";	$t0="r0";
+$inp="r1";	$t4="r1";
+$len="r2";	$t1="r2";
+$T1="r3";	$t3="r3";
+$A="r4";
+$B="r5";
+$C="r6";
+$D="r7";
+$E="r8";
+$F="r9";
+$G="r10";
+$H="r11";
+@V=($A,$B,$C,$D,$E,$F,$G,$H);
+$t2="r12";
+$Ktbl="r14";
+
+@Sigma0=( 2,13,22);
+@Sigma1=( 6,11,25);
+@sigma0=( 7,18, 3);
+@sigma1=(17,19,10);
+
+sub BODY_00_15 {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___ if ($i<16);
+#if __ARM_ARCH__>=7
+	@ ldr	$t1,[$inp],#4			@ $i
+# if $i==15
+	str	$inp,[sp,#17*4]			@ make room for $t4
+# endif
+	eor	$t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+	add	$a,$a,$t2			@ h+=Maj(a,b,c) from the past
+	eor	$t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]`	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	$t1,$t1
+# endif
+#else
+	@ ldrb	$t1,[$inp,#3]			@ $i
+	add	$a,$a,$t2			@ h+=Maj(a,b,c) from the past
+	ldrb	$t2,[$inp,#2]
+	ldrb	$t0,[$inp,#1]
+	orr	$t1,$t1,$t2,lsl#8
+	ldrb	$t2,[$inp],#4
+	orr	$t1,$t1,$t0,lsl#16
+# if $i==15
+	str	$inp,[sp,#17*4]			@ make room for $t4
+# endif
+	eor	$t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+	orr	$t1,$t1,$t2,lsl#24
+	eor	$t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]`	@ Sigma1(e)
+#endif
+___
+$code.=<<___;
+	ldr	$t2,[$Ktbl],#4			@ *K256++
+	add	$h,$h,$t1			@ h+=X[i]
+	str	$t1,[sp,#`$i%16`*4]
+	eor	$t1,$f,$g
+	add	$h,$h,$t0,ror#$Sigma1[0]	@ h+=Sigma1(e)
+	and	$t1,$t1,$e
+	add	$h,$h,$t2			@ h+=K256[i]
+	eor	$t1,$t1,$g			@ Ch(e,f,g)
+	eor	$t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]`
+	add	$h,$h,$t1			@ h+=Ch(e,f,g)
+#if $i==31
+	and	$t2,$t2,#0xff
+	cmp	$t2,#0xf2			@ done?
+#endif
+#if $i<15
+# if __ARM_ARCH__>=7
+	ldr	$t1,[$inp],#4			@ prefetch
+# else
+	ldrb	$t1,[$inp,#3]
+# endif
+	eor	$t2,$a,$b			@ a^b, b^c in next round
+#else
+	ldr	$t1,[sp,#`($i+2)%16`*4]		@ from future BODY_16_xx
+	eor	$t2,$a,$b			@ a^b, b^c in next round
+	ldr	$t4,[sp,#`($i+15)%16`*4]	@ from future BODY_16_xx
+#endif
+	eor	$t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]`	@ Sigma0(a)
+	and	$t3,$t3,$t2			@ (b^c)&=(a^b)
+	add	$d,$d,$h			@ d+=h
+	eor	$t3,$t3,$b			@ Maj(a,b,c)
+	add	$h,$h,$t0,ror#$Sigma0[0]	@ h+=Sigma0(a)
+	@ add	$h,$h,$t3			@ h+=Maj(a,b,c)
+___
+	($t2,$t3)=($t3,$t2);
+}
+
+sub BODY_16_XX {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___;
+	@ ldr	$t1,[sp,#`($i+1)%16`*4]		@ $i
+	@ ldr	$t4,[sp,#`($i+14)%16`*4]
+	mov	$t0,$t1,ror#$sigma0[0]
+	add	$a,$a,$t2			@ h+=Maj(a,b,c) from the past
+	mov	$t2,$t4,ror#$sigma1[0]
+	eor	$t0,$t0,$t1,ror#$sigma0[1]
+	eor	$t2,$t2,$t4,ror#$sigma1[1]
+	eor	$t0,$t0,$t1,lsr#$sigma0[2]	@ sigma0(X[i+1])
+	ldr	$t1,[sp,#`($i+0)%16`*4]
+	eor	$t2,$t2,$t4,lsr#$sigma1[2]	@ sigma1(X[i+14])
+	ldr	$t4,[sp,#`($i+9)%16`*4]
+
+	add	$t2,$t2,$t0
+	eor	$t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`	@ from BODY_00_15
+	add	$t1,$t1,$t2
+	eor	$t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]`	@ Sigma1(e)
+	add	$t1,$t1,$t4			@ X[i]
+___
+	&BODY_00_15(@_);
+}
+
+$code=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+#endif
+
+.text
+#if __ARM_ARCH__<7
+.code	32
+#else
+.syntax unified
+# ifdef __thumb2__
+#  define adrl adr
+.thumb
+# else
+.code   32
+# endif
+#endif
+
+.type	K256,%object
+.align	5
+K256:
+.word	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size	K256,.-K256
+.word	0				@ terminator
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.LOPENSSL_armcap:
+.word	OPENSSL_armcap_P-sha256_block_data_order
+#endif
+.align	5
+
+.global	sha256_block_data_order
+.type	sha256_block_data_order,%function
+sha256_block_data_order:
+#if __ARM_ARCH__<7
+	sub	r3,pc,#8		@ sha256_block_data_order
+#else
+	adr	r3,sha256_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+	ldr	r12,.LOPENSSL_armcap
+	ldr	r12,[r3,r12]		@ OPENSSL_armcap_P
+	tst	r12,#ARMV8_SHA256
+	bne	.LARMv8
+	tst	r12,#ARMV7_NEON
+	bne	.LNEON
+#endif
+	add	$len,$inp,$len,lsl#6	@ len to point at the end of inp
+	stmdb	sp!,{$ctx,$inp,$len,r4-r11,lr}
+	ldmia	$ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
+	sub	$Ktbl,r3,#256+32	@ K256
+	sub	sp,sp,#16*4		@ alloca(X[16])
+.Loop:
+# if __ARM_ARCH__>=7
+	ldr	$t1,[$inp],#4
+# else
+	ldrb	$t1,[$inp,#3]
+# endif
+	eor	$t3,$B,$C		@ magic
+	eor	$t2,$t2,$t2
+___
+for($i=0;$i<16;$i++)	{ &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=".Lrounds_16_xx:\n";
+for (;$i<32;$i++)	{ &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+#if __ARM_ARCH__>=7
+	ite	eq			@ Thumb2 thing, sanity check in ARM
+#endif
+	ldreq	$t3,[sp,#16*4]		@ pull ctx
+	bne	.Lrounds_16_xx
+
+	add	$A,$A,$t2		@ h+=Maj(a,b,c) from the past
+	ldr	$t0,[$t3,#0]
+	ldr	$t1,[$t3,#4]
+	ldr	$t2,[$t3,#8]
+	add	$A,$A,$t0
+	ldr	$t0,[$t3,#12]
+	add	$B,$B,$t1
+	ldr	$t1,[$t3,#16]
+	add	$C,$C,$t2
+	ldr	$t2,[$t3,#20]
+	add	$D,$D,$t0
+	ldr	$t0,[$t3,#24]
+	add	$E,$E,$t1
+	ldr	$t1,[$t3,#28]
+	add	$F,$F,$t2
+	ldr	$inp,[sp,#17*4]		@ pull inp
+	ldr	$t2,[sp,#18*4]		@ pull inp+len
+	add	$G,$G,$t0
+	add	$H,$H,$t1
+	stmia	$t3,{$A,$B,$C,$D,$E,$F,$G,$H}
+	cmp	$inp,$t2
+	sub	$Ktbl,$Ktbl,#256	@ rewind Ktbl
+	bne	.Loop
+
+	add	sp,sp,#`16+3`*4	@ destroy frame
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r11,pc}
+#else
+	ldmia	sp!,{r4-r11,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	sha256_block_data_order,.-sha256_block_data_order
+___
+######################################################################
+# NEON stuff
+#
+{{{
+my @X=map("q$_",(0..3));
+my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25");
+my $Xfer=$t4;
+my $j=0;
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+  my $arg = pop;
+    $arg = "#$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub Xupdate()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+	&vext_8		($T0,@X[0],@X[1],4);	# X[1..4]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vext_8		($T1,@X[2],@X[3],4);	# X[9..12]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	($T2,$T0,$sigma0[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(@X[0],@X[0],$T1);	# X[0..3] += X[9..12]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	($T1,$T0,$sigma0[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vsli_32	($T2,$T0,32-$sigma0[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	($T3,$T0,$sigma0[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		($T1,$T1,$T2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vsli_32	($T3,$T0,32-$sigma0[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dhi(@X[3]),$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		($T1,$T1,$T3);		# sigma0(X[1..4])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dhi(@X[3]),32-$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T5,&Dhi(@X[3]),$sigma1[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(@X[0],@X[0],$T1);	# X[0..3] += sigma0(X[1..4])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dhi(@X[3]),$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dhi(@X[3]),32-$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);		# sigma1(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dlo(@X[0]),$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dlo(@X[0]),32-$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T5,&Dlo(@X[0]),$sigma1[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dlo(@X[0]),$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vld1_32	("{$T0}","[$Ktbl,:128]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dlo(@X[0]),32-$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);		# sigma1(X[16..17])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	($T0,$T0,@X[0]);
+	 while($#insns>=2) { eval(shift(@insns)); }
+	&vst1_32	("{$T0}","[$Xfer,:128]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	push(@X,shift(@X));		# "rotate" X[]
+}
+
+sub Xpreload()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vld1_32	("{$T0}","[$Ktbl,:128]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vrev32_8	(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	($T0,$T0,@X[0]);
+	 foreach (@insns) { eval; }	# remaining instructions
+	&vst1_32	("{$T0}","[$Xfer,:128]!");
+
+	push(@X,shift(@X));		# "rotate" X[]
+}
+
+sub body_00_15 () {
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
+	'&add	($h,$h,$t1)',			# h+=X[i]+K[i]
+	'&eor	($t1,$f,$g)',
+	'&eor	($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
+	'&add	($a,$a,$t2)',			# h+=Maj(a,b,c) from the past
+	'&and	($t1,$t1,$e)',
+	'&eor	($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))',	# Sigma1(e)
+	'&eor	($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
+	'&eor	($t1,$t1,$g)',			# Ch(e,f,g)
+	'&add	($h,$h,$t2,"ror#$Sigma1[0]")',	# h+=Sigma1(e)
+	'&eor	($t2,$a,$b)',			# a^b, b^c in next round
+	'&eor	($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))',	# Sigma0(a)
+	'&add	($h,$h,$t1)',			# h+=Ch(e,f,g)
+	'&ldr	($t1,sprintf "[sp,#%d]",4*(($j+1)&15))	if (($j&15)!=15);'.
+	'&ldr	($t1,"[$Ktbl]")				if ($j==15);'.
+	'&ldr	($t1,"[sp,#64]")			if ($j==31)',
+	'&and	($t3,$t3,$t2)',			# (b^c)&=(a^b)
+	'&add	($d,$d,$h)',			# d+=h
+	'&add	($h,$h,$t0,"ror#$Sigma0[0]");'.	# h+=Sigma0(a)
+	'&eor	($t3,$t3,$b)',			# Maj(a,b,c)
+	'$j++;	unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
+	)
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
+.global	sha256_block_data_order_neon
+.type	sha256_block_data_order_neon,%function
+.align	4
+sha256_block_data_order_neon:
+.LNEON:
+	stmdb	sp!,{r4-r12,lr}
+
+	sub	$H,sp,#16*4+16
+	adrl	$Ktbl,K256
+	bic	$H,$H,#15		@ align for 128-bit stores
+	mov	$t2,sp
+	mov	sp,$H			@ alloca
+	add	$len,$inp,$len,lsl#6	@ len to point at the end of inp
+
+	vld1.8		{@X[0]},[$inp]!
+	vld1.8		{@X[1]},[$inp]!
+	vld1.8		{@X[2]},[$inp]!
+	vld1.8		{@X[3]},[$inp]!
+	vld1.32		{$T0},[$Ktbl,:128]!
+	vld1.32		{$T1},[$Ktbl,:128]!
+	vld1.32		{$T2},[$Ktbl,:128]!
+	vld1.32		{$T3},[$Ktbl,:128]!
+	vrev32.8	@X[0],@X[0]		@ yes, even on
+	str		$ctx,[sp,#64]
+	vrev32.8	@X[1],@X[1]		@ big-endian
+	str		$inp,[sp,#68]
+	mov		$Xfer,sp
+	vrev32.8	@X[2],@X[2]
+	str		$len,[sp,#72]
+	vrev32.8	@X[3],@X[3]
+	str		$t2,[sp,#76]		@ save original sp
+	vadd.i32	$T0,$T0,@X[0]
+	vadd.i32	$T1,$T1,@X[1]
+	vst1.32		{$T0},[$Xfer,:128]!
+	vadd.i32	$T2,$T2,@X[2]
+	vst1.32		{$T1},[$Xfer,:128]!
+	vadd.i32	$T3,$T3,@X[3]
+	vst1.32		{$T2},[$Xfer,:128]!
+	vst1.32		{$T3},[$Xfer,:128]!
+
+	ldmia		$ctx,{$A-$H}
+	sub		$Xfer,$Xfer,#64
+	ldr		$t1,[sp,#0]
+	eor		$t2,$t2,$t2
+	eor		$t3,$B,$C
+	b		.L_00_48
+
+.align	4
+.L_00_48:
+___
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+$code.=<<___;
+	teq	$t1,#0				@ check for K256 terminator
+	ldr	$t1,[sp,#0]
+	sub	$Xfer,$Xfer,#64
+	bne	.L_00_48
+
+	ldr		$inp,[sp,#68]
+	ldr		$t0,[sp,#72]
+	sub		$Ktbl,$Ktbl,#256	@ rewind $Ktbl
+	teq		$inp,$t0
+	it		eq
+	subeq		$inp,$inp,#64		@ avoid SEGV
+	vld1.8		{@X[0]},[$inp]!		@ load next input block
+	vld1.8		{@X[1]},[$inp]!
+	vld1.8		{@X[2]},[$inp]!
+	vld1.8		{@X[3]},[$inp]!
+	it		ne
+	strne		$inp,[sp,#68]
+	mov		$Xfer,sp
+___
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+$code.=<<___;
+	ldr	$t0,[$t1,#0]
+	add	$A,$A,$t2			@ h+=Maj(a,b,c) from the past
+	ldr	$t2,[$t1,#4]
+	ldr	$t3,[$t1,#8]
+	ldr	$t4,[$t1,#12]
+	add	$A,$A,$t0			@ accumulate
+	ldr	$t0,[$t1,#16]
+	add	$B,$B,$t2
+	ldr	$t2,[$t1,#20]
+	add	$C,$C,$t3
+	ldr	$t3,[$t1,#24]
+	add	$D,$D,$t4
+	ldr	$t4,[$t1,#28]
+	add	$E,$E,$t0
+	str	$A,[$t1],#4
+	add	$F,$F,$t2
+	str	$B,[$t1],#4
+	add	$G,$G,$t3
+	str	$C,[$t1],#4
+	add	$H,$H,$t4
+	str	$D,[$t1],#4
+	stmia	$t1,{$E-$H}
+
+	ittte	ne
+	movne	$Xfer,sp
+	ldrne	$t1,[sp,#0]
+	eorne	$t2,$t2,$t2
+	ldreq	sp,[sp,#76]			@ restore original sp
+	itt	ne
+	eorne	$t3,$B,$C
+	bne	.L_00_48
+
+	ldmia	sp!,{r4-r12,pc}
+.size	sha256_block_data_order_neon,.-sha256_block_data_order_neon
+#endif
+___
+}}}
+######################################################################
+# ARMv8 stuff
+#
+{{{
+my ($ABCD,$EFGH,$abcd)=map("q$_",(0..2));
+my @MSG=map("q$_",(8..11));
+my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15));
+my $Ktbl="r3";
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+
+# ifdef __thumb2__
+#  define INST(a,b,c,d)	.byte	c,d|0xc,a,b
+# else
+#  define INST(a,b,c,d)	.byte	a,b,c,d
+# endif
+
+.type	sha256_block_data_order_armv8,%function
+.align	5
+sha256_block_data_order_armv8:
+.LARMv8:
+	vld1.32	{$ABCD,$EFGH},[$ctx]
+# ifdef __thumb2__
+	adr	$Ktbl,.LARMv8
+	sub	$Ktbl,$Ktbl,#.LARMv8-K256
+# else
+	adrl	$Ktbl,K256
+# endif
+	add	$len,$inp,$len,lsl#6	@ len to point at the end of inp
+
+.Loop_v8:
+	vld1.8		{@MSG[0]-@MSG[1]},[$inp]!
+	vld1.8		{@MSG[2]-@MSG[3]},[$inp]!
+	vld1.32		{$W0},[$Ktbl]!
+	vrev32.8	@MSG[0],@MSG[0]
+	vrev32.8	@MSG[1],@MSG[1]
+	vrev32.8	@MSG[2],@MSG[2]
+	vrev32.8	@MSG[3],@MSG[3]
+	vmov		$ABCD_SAVE,$ABCD	@ offload
+	vmov		$EFGH_SAVE,$EFGH
+	teq		$inp,$len
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+	vld1.32		{$W1},[$Ktbl]!
+	vadd.i32	$W0,$W0,@MSG[0]
+	sha256su0	@MSG[0],@MSG[1]
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+	sha256su1	@MSG[0],@MSG[2],@MSG[3]
+___
+	($W0,$W1)=($W1,$W0);	push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+	vld1.32		{$W1},[$Ktbl]!
+	vadd.i32	$W0,$W0,@MSG[0]
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+
+	vld1.32		{$W0},[$Ktbl]!
+	vadd.i32	$W1,$W1,@MSG[1]
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W1
+	sha256h2	$EFGH,$abcd,$W1
+
+	vld1.32		{$W1},[$Ktbl]
+	vadd.i32	$W0,$W0,@MSG[2]
+	sub		$Ktbl,$Ktbl,#256-16	@ rewind
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+
+	vadd.i32	$W1,$W1,@MSG[3]
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W1
+	sha256h2	$EFGH,$abcd,$W1
+
+	vadd.i32	$ABCD,$ABCD,$ABCD_SAVE
+	vadd.i32	$EFGH,$EFGH,$EFGH_SAVE
+	it		ne
+	bne		.Loop_v8
+
+	vst1.32		{$ABCD,$EFGH},[$ctx]
+
+	ret		@ bx lr
+.size	sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
+#endif
+___
+}}}
+$code.=<<___;
+.asciz  "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.comm   OPENSSL_armcap_P,4,4
+#endif
+___
+
+open SELF,$0;
+while(<SELF>) {
+	next if (/^#!/);
+	last if (!s/^#/@/ and !/^$/);
+	print;
+}
+close SELF;
+
+{   my  %opcode = (
+	"sha256h"	=> 0xf3000c40,	"sha256h2"	=> 0xf3100c40,
+	"sha256su0"	=> 0xf3ba03c0,	"sha256su1"	=> 0xf3200c40	);
+
+    sub unsha256 {
+	my ($mnemonic,$arg)=@_;
+
+	if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
+	    my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+					 |(($2&7)<<17)|(($2&8)<<4)
+					 |(($3&7)<<1) |(($3&8)<<2);
+	    # since ARMv7 instructions are always encoded little-endian.
+	    # correct solution is to use .inst directive, but older
+	    # assemblers don't implement it:-(
+	    sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
+			$word&0xff,($word>>8)&0xff,
+			($word>>16)&0xff,($word>>24)&0xff,
+			$mnemonic,$arg;
+	}
+    }
+}
+
+foreach (split($/,$code)) {
+
+	s/\`([^\`]*)\`/eval $1/geo;
+
+	s/\b(sha256\w+)\s+(q.*)/unsha256($1,$2)/geo;
+
+	s/\bret\b/bx	lr/go		or
+	s/\bbx\s+lr\b/.word\t0xe12fff1e/go;	# make it possible to compile with -march=armv4
+
+	print $_,"\n";
+}
+
+close STDOUT; # enforce flush
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
new file mode 100644
index 0000000..555a1a8
--- /dev/null
+++ b/arch/arm/crypto/sha256-core.S_shipped
@@ -0,0 +1,2808 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Permission to use under GPL terms is granted.
+@ ====================================================================
+
+@ SHA256 block procedure for ARMv4. May 2007.
+
+@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
+@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+@ byte [on single-issue Xscale PXA250 core].
+
+@ July 2010.
+@
+@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
+@ Cortex A8 core and ~20 cycles per processed byte.
+
+@ February 2011.
+@
+@ Profiler-assisted and platform-specific optimization resulted in 16%
+@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+@ September 2013.
+@
+@ Add NEON implementation. On Cortex A8 it was measured to process one
+@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+@ code (meaning that latter performs sub-optimally, nothing was done
+@ about it).
+
+@ May 2014.
+@
+@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+#endif
+
+.text
+#if __ARM_ARCH__<7
+.code	32
+#else
+.syntax unified
+# ifdef __thumb2__
+#  define adrl adr
+.thumb
+# else
+.code   32
+# endif
+#endif
+
+.type	K256,%object
+.align	5
+K256:
+.word	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size	K256,.-K256
+.word	0				@ terminator
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.LOPENSSL_armcap:
+.word	OPENSSL_armcap_P-sha256_block_data_order
+#endif
+.align	5
+
+.global	sha256_block_data_order
+.type	sha256_block_data_order,%function
+sha256_block_data_order:
+#if __ARM_ARCH__<7
+	sub	r3,pc,#8		@ sha256_block_data_order
+#else
+	adr	r3,sha256_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+	ldr	r12,.LOPENSSL_armcap
+	ldr	r12,[r3,r12]		@ OPENSSL_armcap_P
+	tst	r12,#ARMV8_SHA256
+	bne	.LARMv8
+	tst	r12,#ARMV7_NEON
+	bne	.LNEON
+#endif
+	add	r2,r1,r2,lsl#6	@ len to point at the end of inp
+	stmdb	sp!,{r0,r1,r2,r4-r11,lr}
+	ldmia	r0,{r4,r5,r6,r7,r8,r9,r10,r11}
+	sub	r14,r3,#256+32	@ K256
+	sub	sp,sp,#16*4		@ alloca(X[16])
+.Loop:
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r5,r6		@ magic
+	eor	r12,r12,r12
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 0
+# if 0==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r8,r8,ror#5
+	add	r4,r4,r12			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r8,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 0
+	add	r4,r4,r12			@ h+=Maj(a,b,c) from the past
+	ldrb	r12,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r12,lsl#8
+	ldrb	r12,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 0==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r8,r8,ror#5
+	orr	r2,r2,r12,lsl#24
+	eor	r0,r0,r8,ror#19	@ Sigma1(e)
+#endif
+	ldr	r12,[r14],#4			@ *K256++
+	add	r11,r11,r2			@ h+=X[i]
+	str	r2,[sp,#0*4]
+	eor	r2,r9,r10
+	add	r11,r11,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r8
+	add	r11,r11,r12			@ h+=K256[i]
+	eor	r2,r2,r10			@ Ch(e,f,g)
+	eor	r0,r4,r4,ror#11
+	add	r11,r11,r2			@ h+=Ch(e,f,g)
+#if 0==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 0<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r4,r5			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#2*4]		@ from future BODY_16_xx
+	eor	r12,r4,r5			@ a^b, b^c in next round
+	ldr	r1,[sp,#15*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r4,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r7,r7,r11			@ d+=h
+	eor	r3,r3,r5			@ Maj(a,b,c)
+	add	r11,r11,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r11,r11,r3			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 1
+# if 1==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r7,r7,ror#5
+	add	r11,r11,r3			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r7,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 1
+	add	r11,r11,r3			@ h+=Maj(a,b,c) from the past
+	ldrb	r3,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r3,lsl#8
+	ldrb	r3,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 1==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r7,r7,ror#5
+	orr	r2,r2,r3,lsl#24
+	eor	r0,r0,r7,ror#19	@ Sigma1(e)
+#endif
+	ldr	r3,[r14],#4			@ *K256++
+	add	r10,r10,r2			@ h+=X[i]
+	str	r2,[sp,#1*4]
+	eor	r2,r8,r9
+	add	r10,r10,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r7
+	add	r10,r10,r3			@ h+=K256[i]
+	eor	r2,r2,r9			@ Ch(e,f,g)
+	eor	r0,r11,r11,ror#11
+	add	r10,r10,r2			@ h+=Ch(e,f,g)
+#if 1==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 1<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r11,r4			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#3*4]		@ from future BODY_16_xx
+	eor	r3,r11,r4			@ a^b, b^c in next round
+	ldr	r1,[sp,#0*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r11,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r6,r6,r10			@ d+=h
+	eor	r12,r12,r4			@ Maj(a,b,c)
+	add	r10,r10,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r10,r10,r12			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 2
+# if 2==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r6,r6,ror#5
+	add	r10,r10,r12			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r6,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 2
+	add	r10,r10,r12			@ h+=Maj(a,b,c) from the past
+	ldrb	r12,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r12,lsl#8
+	ldrb	r12,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 2==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r6,r6,ror#5
+	orr	r2,r2,r12,lsl#24
+	eor	r0,r0,r6,ror#19	@ Sigma1(e)
+#endif
+	ldr	r12,[r14],#4			@ *K256++
+	add	r9,r9,r2			@ h+=X[i]
+	str	r2,[sp,#2*4]
+	eor	r2,r7,r8
+	add	r9,r9,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r6
+	add	r9,r9,r12			@ h+=K256[i]
+	eor	r2,r2,r8			@ Ch(e,f,g)
+	eor	r0,r10,r10,ror#11
+	add	r9,r9,r2			@ h+=Ch(e,f,g)
+#if 2==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 2<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r10,r11			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#4*4]		@ from future BODY_16_xx
+	eor	r12,r10,r11			@ a^b, b^c in next round
+	ldr	r1,[sp,#1*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r10,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r5,r5,r9			@ d+=h
+	eor	r3,r3,r11			@ Maj(a,b,c)
+	add	r9,r9,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r9,r9,r3			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 3
+# if 3==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r5,r5,ror#5
+	add	r9,r9,r3			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r5,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 3
+	add	r9,r9,r3			@ h+=Maj(a,b,c) from the past
+	ldrb	r3,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r3,lsl#8
+	ldrb	r3,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 3==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r5,r5,ror#5
+	orr	r2,r2,r3,lsl#24
+	eor	r0,r0,r5,ror#19	@ Sigma1(e)
+#endif
+	ldr	r3,[r14],#4			@ *K256++
+	add	r8,r8,r2			@ h+=X[i]
+	str	r2,[sp,#3*4]
+	eor	r2,r6,r7
+	add	r8,r8,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r5
+	add	r8,r8,r3			@ h+=K256[i]
+	eor	r2,r2,r7			@ Ch(e,f,g)
+	eor	r0,r9,r9,ror#11
+	add	r8,r8,r2			@ h+=Ch(e,f,g)
+#if 3==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 3<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r9,r10			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#5*4]		@ from future BODY_16_xx
+	eor	r3,r9,r10			@ a^b, b^c in next round
+	ldr	r1,[sp,#2*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r9,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r4,r4,r8			@ d+=h
+	eor	r12,r12,r10			@ Maj(a,b,c)
+	add	r8,r8,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r8,r8,r12			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 4
+# if 4==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r4,r4,ror#5
+	add	r8,r8,r12			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r4,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 4
+	add	r8,r8,r12			@ h+=Maj(a,b,c) from the past
+	ldrb	r12,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r12,lsl#8
+	ldrb	r12,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 4==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r4,r4,ror#5
+	orr	r2,r2,r12,lsl#24
+	eor	r0,r0,r4,ror#19	@ Sigma1(e)
+#endif
+	ldr	r12,[r14],#4			@ *K256++
+	add	r7,r7,r2			@ h+=X[i]
+	str	r2,[sp,#4*4]
+	eor	r2,r5,r6
+	add	r7,r7,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r4
+	add	r7,r7,r12			@ h+=K256[i]
+	eor	r2,r2,r6			@ Ch(e,f,g)
+	eor	r0,r8,r8,ror#11
+	add	r7,r7,r2			@ h+=Ch(e,f,g)
+#if 4==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 4<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r8,r9			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#6*4]		@ from future BODY_16_xx
+	eor	r12,r8,r9			@ a^b, b^c in next round
+	ldr	r1,[sp,#3*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r8,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r11,r11,r7			@ d+=h
+	eor	r3,r3,r9			@ Maj(a,b,c)
+	add	r7,r7,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r7,r7,r3			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 5
+# if 5==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r11,r11,ror#5
+	add	r7,r7,r3			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r11,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 5
+	add	r7,r7,r3			@ h+=Maj(a,b,c) from the past
+	ldrb	r3,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r3,lsl#8
+	ldrb	r3,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 5==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r11,r11,ror#5
+	orr	r2,r2,r3,lsl#24
+	eor	r0,r0,r11,ror#19	@ Sigma1(e)
+#endif
+	ldr	r3,[r14],#4			@ *K256++
+	add	r6,r6,r2			@ h+=X[i]
+	str	r2,[sp,#5*4]
+	eor	r2,r4,r5
+	add	r6,r6,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r11
+	add	r6,r6,r3			@ h+=K256[i]
+	eor	r2,r2,r5			@ Ch(e,f,g)
+	eor	r0,r7,r7,ror#11
+	add	r6,r6,r2			@ h+=Ch(e,f,g)
+#if 5==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 5<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r7,r8			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#7*4]		@ from future BODY_16_xx
+	eor	r3,r7,r8			@ a^b, b^c in next round
+	ldr	r1,[sp,#4*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r7,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r10,r10,r6			@ d+=h
+	eor	r12,r12,r8			@ Maj(a,b,c)
+	add	r6,r6,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r6,r6,r12			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 6
+# if 6==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r10,r10,ror#5
+	add	r6,r6,r12			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r10,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 6
+	add	r6,r6,r12			@ h+=Maj(a,b,c) from the past
+	ldrb	r12,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r12,lsl#8
+	ldrb	r12,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 6==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r10,r10,ror#5
+	orr	r2,r2,r12,lsl#24
+	eor	r0,r0,r10,ror#19	@ Sigma1(e)
+#endif
+	ldr	r12,[r14],#4			@ *K256++
+	add	r5,r5,r2			@ h+=X[i]
+	str	r2,[sp,#6*4]
+	eor	r2,r11,r4
+	add	r5,r5,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r10
+	add	r5,r5,r12			@ h+=K256[i]
+	eor	r2,r2,r4			@ Ch(e,f,g)
+	eor	r0,r6,r6,ror#11
+	add	r5,r5,r2			@ h+=Ch(e,f,g)
+#if 6==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 6<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r6,r7			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#8*4]		@ from future BODY_16_xx
+	eor	r12,r6,r7			@ a^b, b^c in next round
+	ldr	r1,[sp,#5*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r6,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r9,r9,r5			@ d+=h
+	eor	r3,r3,r7			@ Maj(a,b,c)
+	add	r5,r5,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r5,r5,r3			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 7
+# if 7==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r9,r9,ror#5
+	add	r5,r5,r3			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r9,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 7
+	add	r5,r5,r3			@ h+=Maj(a,b,c) from the past
+	ldrb	r3,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r3,lsl#8
+	ldrb	r3,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 7==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r9,r9,ror#5
+	orr	r2,r2,r3,lsl#24
+	eor	r0,r0,r9,ror#19	@ Sigma1(e)
+#endif
+	ldr	r3,[r14],#4			@ *K256++
+	add	r4,r4,r2			@ h+=X[i]
+	str	r2,[sp,#7*4]
+	eor	r2,r10,r11
+	add	r4,r4,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r9
+	add	r4,r4,r3			@ h+=K256[i]
+	eor	r2,r2,r11			@ Ch(e,f,g)
+	eor	r0,r5,r5,ror#11
+	add	r4,r4,r2			@ h+=Ch(e,f,g)
+#if 7==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 7<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r5,r6			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#9*4]		@ from future BODY_16_xx
+	eor	r3,r5,r6			@ a^b, b^c in next round
+	ldr	r1,[sp,#6*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r5,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r8,r8,r4			@ d+=h
+	eor	r12,r12,r6			@ Maj(a,b,c)
+	add	r4,r4,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r4,r4,r12			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 8
+# if 8==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r8,r8,ror#5
+	add	r4,r4,r12			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r8,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 8
+	add	r4,r4,r12			@ h+=Maj(a,b,c) from the past
+	ldrb	r12,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r12,lsl#8
+	ldrb	r12,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 8==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r8,r8,ror#5
+	orr	r2,r2,r12,lsl#24
+	eor	r0,r0,r8,ror#19	@ Sigma1(e)
+#endif
+	ldr	r12,[r14],#4			@ *K256++
+	add	r11,r11,r2			@ h+=X[i]
+	str	r2,[sp,#8*4]
+	eor	r2,r9,r10
+	add	r11,r11,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r8
+	add	r11,r11,r12			@ h+=K256[i]
+	eor	r2,r2,r10			@ Ch(e,f,g)
+	eor	r0,r4,r4,ror#11
+	add	r11,r11,r2			@ h+=Ch(e,f,g)
+#if 8==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 8<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r4,r5			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#10*4]		@ from future BODY_16_xx
+	eor	r12,r4,r5			@ a^b, b^c in next round
+	ldr	r1,[sp,#7*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r4,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r7,r7,r11			@ d+=h
+	eor	r3,r3,r5			@ Maj(a,b,c)
+	add	r11,r11,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r11,r11,r3			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 9
+# if 9==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r7,r7,ror#5
+	add	r11,r11,r3			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r7,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 9
+	add	r11,r11,r3			@ h+=Maj(a,b,c) from the past
+	ldrb	r3,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r3,lsl#8
+	ldrb	r3,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 9==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r7,r7,ror#5
+	orr	r2,r2,r3,lsl#24
+	eor	r0,r0,r7,ror#19	@ Sigma1(e)
+#endif
+	ldr	r3,[r14],#4			@ *K256++
+	add	r10,r10,r2			@ h+=X[i]
+	str	r2,[sp,#9*4]
+	eor	r2,r8,r9
+	add	r10,r10,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r7
+	add	r10,r10,r3			@ h+=K256[i]
+	eor	r2,r2,r9			@ Ch(e,f,g)
+	eor	r0,r11,r11,ror#11
+	add	r10,r10,r2			@ h+=Ch(e,f,g)
+#if 9==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 9<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r11,r4			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#11*4]		@ from future BODY_16_xx
+	eor	r3,r11,r4			@ a^b, b^c in next round
+	ldr	r1,[sp,#8*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r11,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r6,r6,r10			@ d+=h
+	eor	r12,r12,r4			@ Maj(a,b,c)
+	add	r10,r10,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r10,r10,r12			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 10
+# if 10==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r6,r6,ror#5
+	add	r10,r10,r12			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r6,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 10
+	add	r10,r10,r12			@ h+=Maj(a,b,c) from the past
+	ldrb	r12,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r12,lsl#8
+	ldrb	r12,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 10==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r6,r6,ror#5
+	orr	r2,r2,r12,lsl#24
+	eor	r0,r0,r6,ror#19	@ Sigma1(e)
+#endif
+	ldr	r12,[r14],#4			@ *K256++
+	add	r9,r9,r2			@ h+=X[i]
+	str	r2,[sp,#10*4]
+	eor	r2,r7,r8
+	add	r9,r9,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r6
+	add	r9,r9,r12			@ h+=K256[i]
+	eor	r2,r2,r8			@ Ch(e,f,g)
+	eor	r0,r10,r10,ror#11
+	add	r9,r9,r2			@ h+=Ch(e,f,g)
+#if 10==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 10<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r10,r11			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#12*4]		@ from future BODY_16_xx
+	eor	r12,r10,r11			@ a^b, b^c in next round
+	ldr	r1,[sp,#9*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r10,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r5,r5,r9			@ d+=h
+	eor	r3,r3,r11			@ Maj(a,b,c)
+	add	r9,r9,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r9,r9,r3			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 11
+# if 11==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r5,r5,ror#5
+	add	r9,r9,r3			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r5,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 11
+	add	r9,r9,r3			@ h+=Maj(a,b,c) from the past
+	ldrb	r3,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r3,lsl#8
+	ldrb	r3,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 11==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r5,r5,ror#5
+	orr	r2,r2,r3,lsl#24
+	eor	r0,r0,r5,ror#19	@ Sigma1(e)
+#endif
+	ldr	r3,[r14],#4			@ *K256++
+	add	r8,r8,r2			@ h+=X[i]
+	str	r2,[sp,#11*4]
+	eor	r2,r6,r7
+	add	r8,r8,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r5
+	add	r8,r8,r3			@ h+=K256[i]
+	eor	r2,r2,r7			@ Ch(e,f,g)
+	eor	r0,r9,r9,ror#11
+	add	r8,r8,r2			@ h+=Ch(e,f,g)
+#if 11==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 11<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r9,r10			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#13*4]		@ from future BODY_16_xx
+	eor	r3,r9,r10			@ a^b, b^c in next round
+	ldr	r1,[sp,#10*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r9,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r4,r4,r8			@ d+=h
+	eor	r12,r12,r10			@ Maj(a,b,c)
+	add	r8,r8,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r8,r8,r12			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 12
+# if 12==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r4,r4,ror#5
+	add	r8,r8,r12			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r4,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 12
+	add	r8,r8,r12			@ h+=Maj(a,b,c) from the past
+	ldrb	r12,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r12,lsl#8
+	ldrb	r12,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 12==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r4,r4,ror#5
+	orr	r2,r2,r12,lsl#24
+	eor	r0,r0,r4,ror#19	@ Sigma1(e)
+#endif
+	ldr	r12,[r14],#4			@ *K256++
+	add	r7,r7,r2			@ h+=X[i]
+	str	r2,[sp,#12*4]
+	eor	r2,r5,r6
+	add	r7,r7,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r4
+	add	r7,r7,r12			@ h+=K256[i]
+	eor	r2,r2,r6			@ Ch(e,f,g)
+	eor	r0,r8,r8,ror#11
+	add	r7,r7,r2			@ h+=Ch(e,f,g)
+#if 12==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 12<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r8,r9			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#14*4]		@ from future BODY_16_xx
+	eor	r12,r8,r9			@ a^b, b^c in next round
+	ldr	r1,[sp,#11*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r8,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r11,r11,r7			@ d+=h
+	eor	r3,r3,r9			@ Maj(a,b,c)
+	add	r7,r7,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r7,r7,r3			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 13
+# if 13==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r11,r11,ror#5
+	add	r7,r7,r3			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r11,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 13
+	add	r7,r7,r3			@ h+=Maj(a,b,c) from the past
+	ldrb	r3,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r3,lsl#8
+	ldrb	r3,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 13==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r11,r11,ror#5
+	orr	r2,r2,r3,lsl#24
+	eor	r0,r0,r11,ror#19	@ Sigma1(e)
+#endif
+	ldr	r3,[r14],#4			@ *K256++
+	add	r6,r6,r2			@ h+=X[i]
+	str	r2,[sp,#13*4]
+	eor	r2,r4,r5
+	add	r6,r6,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r11
+	add	r6,r6,r3			@ h+=K256[i]
+	eor	r2,r2,r5			@ Ch(e,f,g)
+	eor	r0,r7,r7,ror#11
+	add	r6,r6,r2			@ h+=Ch(e,f,g)
+#if 13==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 13<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r7,r8			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#15*4]		@ from future BODY_16_xx
+	eor	r3,r7,r8			@ a^b, b^c in next round
+	ldr	r1,[sp,#12*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r7,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r10,r10,r6			@ d+=h
+	eor	r12,r12,r8			@ Maj(a,b,c)
+	add	r6,r6,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r6,r6,r12			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 14
+# if 14==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r10,r10,ror#5
+	add	r6,r6,r12			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r10,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 14
+	add	r6,r6,r12			@ h+=Maj(a,b,c) from the past
+	ldrb	r12,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r12,lsl#8
+	ldrb	r12,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 14==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r10,r10,ror#5
+	orr	r2,r2,r12,lsl#24
+	eor	r0,r0,r10,ror#19	@ Sigma1(e)
+#endif
+	ldr	r12,[r14],#4			@ *K256++
+	add	r5,r5,r2			@ h+=X[i]
+	str	r2,[sp,#14*4]
+	eor	r2,r11,r4
+	add	r5,r5,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r10
+	add	r5,r5,r12			@ h+=K256[i]
+	eor	r2,r2,r4			@ Ch(e,f,g)
+	eor	r0,r6,r6,ror#11
+	add	r5,r5,r2			@ h+=Ch(e,f,g)
+#if 14==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 14<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r6,r7			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#0*4]		@ from future BODY_16_xx
+	eor	r12,r6,r7			@ a^b, b^c in next round
+	ldr	r1,[sp,#13*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r6,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r9,r9,r5			@ d+=h
+	eor	r3,r3,r7			@ Maj(a,b,c)
+	add	r5,r5,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r5,r5,r3			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	@ ldr	r2,[r1],#4			@ 15
+# if 15==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r9,r9,ror#5
+	add	r5,r5,r3			@ h+=Maj(a,b,c) from the past
+	eor	r0,r0,r9,ror#19	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	r2,r2
+# endif
+#else
+	@ ldrb	r2,[r1,#3]			@ 15
+	add	r5,r5,r3			@ h+=Maj(a,b,c) from the past
+	ldrb	r3,[r1,#2]
+	ldrb	r0,[r1,#1]
+	orr	r2,r2,r3,lsl#8
+	ldrb	r3,[r1],#4
+	orr	r2,r2,r0,lsl#16
+# if 15==15
+	str	r1,[sp,#17*4]			@ make room for r1
+# endif
+	eor	r0,r9,r9,ror#5
+	orr	r2,r2,r3,lsl#24
+	eor	r0,r0,r9,ror#19	@ Sigma1(e)
+#endif
+	ldr	r3,[r14],#4			@ *K256++
+	add	r4,r4,r2			@ h+=X[i]
+	str	r2,[sp,#15*4]
+	eor	r2,r10,r11
+	add	r4,r4,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r9
+	add	r4,r4,r3			@ h+=K256[i]
+	eor	r2,r2,r11			@ Ch(e,f,g)
+	eor	r0,r5,r5,ror#11
+	add	r4,r4,r2			@ h+=Ch(e,f,g)
+#if 15==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 15<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r5,r6			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#1*4]		@ from future BODY_16_xx
+	eor	r3,r5,r6			@ a^b, b^c in next round
+	ldr	r1,[sp,#14*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r5,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r8,r8,r4			@ d+=h
+	eor	r12,r12,r6			@ Maj(a,b,c)
+	add	r4,r4,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r4,r4,r12			@ h+=Maj(a,b,c)
+.Lrounds_16_xx:
+	@ ldr	r2,[sp,#1*4]		@ 16
+	@ ldr	r1,[sp,#14*4]
+	mov	r0,r2,ror#7
+	add	r4,r4,r12			@ h+=Maj(a,b,c) from the past
+	mov	r12,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r12,r12,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#0*4]
+	eor	r12,r12,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#9*4]
+
+	add	r12,r12,r0
+	eor	r0,r8,r8,ror#5	@ from BODY_00_15
+	add	r2,r2,r12
+	eor	r0,r0,r8,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r12,[r14],#4			@ *K256++
+	add	r11,r11,r2			@ h+=X[i]
+	str	r2,[sp,#0*4]
+	eor	r2,r9,r10
+	add	r11,r11,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r8
+	add	r11,r11,r12			@ h+=K256[i]
+	eor	r2,r2,r10			@ Ch(e,f,g)
+	eor	r0,r4,r4,ror#11
+	add	r11,r11,r2			@ h+=Ch(e,f,g)
+#if 16==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 16<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r4,r5			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#2*4]		@ from future BODY_16_xx
+	eor	r12,r4,r5			@ a^b, b^c in next round
+	ldr	r1,[sp,#15*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r4,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r7,r7,r11			@ d+=h
+	eor	r3,r3,r5			@ Maj(a,b,c)
+	add	r11,r11,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r11,r11,r3			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#2*4]		@ 17
+	@ ldr	r1,[sp,#15*4]
+	mov	r0,r2,ror#7
+	add	r11,r11,r3			@ h+=Maj(a,b,c) from the past
+	mov	r3,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r3,r3,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#1*4]
+	eor	r3,r3,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#10*4]
+
+	add	r3,r3,r0
+	eor	r0,r7,r7,ror#5	@ from BODY_00_15
+	add	r2,r2,r3
+	eor	r0,r0,r7,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r3,[r14],#4			@ *K256++
+	add	r10,r10,r2			@ h+=X[i]
+	str	r2,[sp,#1*4]
+	eor	r2,r8,r9
+	add	r10,r10,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r7
+	add	r10,r10,r3			@ h+=K256[i]
+	eor	r2,r2,r9			@ Ch(e,f,g)
+	eor	r0,r11,r11,ror#11
+	add	r10,r10,r2			@ h+=Ch(e,f,g)
+#if 17==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 17<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r11,r4			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#3*4]		@ from future BODY_16_xx
+	eor	r3,r11,r4			@ a^b, b^c in next round
+	ldr	r1,[sp,#0*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r11,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r6,r6,r10			@ d+=h
+	eor	r12,r12,r4			@ Maj(a,b,c)
+	add	r10,r10,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r10,r10,r12			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#3*4]		@ 18
+	@ ldr	r1,[sp,#0*4]
+	mov	r0,r2,ror#7
+	add	r10,r10,r12			@ h+=Maj(a,b,c) from the past
+	mov	r12,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r12,r12,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#2*4]
+	eor	r12,r12,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#11*4]
+
+	add	r12,r12,r0
+	eor	r0,r6,r6,ror#5	@ from BODY_00_15
+	add	r2,r2,r12
+	eor	r0,r0,r6,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r12,[r14],#4			@ *K256++
+	add	r9,r9,r2			@ h+=X[i]
+	str	r2,[sp,#2*4]
+	eor	r2,r7,r8
+	add	r9,r9,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r6
+	add	r9,r9,r12			@ h+=K256[i]
+	eor	r2,r2,r8			@ Ch(e,f,g)
+	eor	r0,r10,r10,ror#11
+	add	r9,r9,r2			@ h+=Ch(e,f,g)
+#if 18==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 18<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r10,r11			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#4*4]		@ from future BODY_16_xx
+	eor	r12,r10,r11			@ a^b, b^c in next round
+	ldr	r1,[sp,#1*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r10,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r5,r5,r9			@ d+=h
+	eor	r3,r3,r11			@ Maj(a,b,c)
+	add	r9,r9,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r9,r9,r3			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#4*4]		@ 19
+	@ ldr	r1,[sp,#1*4]
+	mov	r0,r2,ror#7
+	add	r9,r9,r3			@ h+=Maj(a,b,c) from the past
+	mov	r3,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r3,r3,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#3*4]
+	eor	r3,r3,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#12*4]
+
+	add	r3,r3,r0
+	eor	r0,r5,r5,ror#5	@ from BODY_00_15
+	add	r2,r2,r3
+	eor	r0,r0,r5,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r3,[r14],#4			@ *K256++
+	add	r8,r8,r2			@ h+=X[i]
+	str	r2,[sp,#3*4]
+	eor	r2,r6,r7
+	add	r8,r8,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r5
+	add	r8,r8,r3			@ h+=K256[i]
+	eor	r2,r2,r7			@ Ch(e,f,g)
+	eor	r0,r9,r9,ror#11
+	add	r8,r8,r2			@ h+=Ch(e,f,g)
+#if 19==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 19<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r9,r10			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#5*4]		@ from future BODY_16_xx
+	eor	r3,r9,r10			@ a^b, b^c in next round
+	ldr	r1,[sp,#2*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r9,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r4,r4,r8			@ d+=h
+	eor	r12,r12,r10			@ Maj(a,b,c)
+	add	r8,r8,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r8,r8,r12			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#5*4]		@ 20
+	@ ldr	r1,[sp,#2*4]
+	mov	r0,r2,ror#7
+	add	r8,r8,r12			@ h+=Maj(a,b,c) from the past
+	mov	r12,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r12,r12,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#4*4]
+	eor	r12,r12,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#13*4]
+
+	add	r12,r12,r0
+	eor	r0,r4,r4,ror#5	@ from BODY_00_15
+	add	r2,r2,r12
+	eor	r0,r0,r4,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r12,[r14],#4			@ *K256++
+	add	r7,r7,r2			@ h+=X[i]
+	str	r2,[sp,#4*4]
+	eor	r2,r5,r6
+	add	r7,r7,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r4
+	add	r7,r7,r12			@ h+=K256[i]
+	eor	r2,r2,r6			@ Ch(e,f,g)
+	eor	r0,r8,r8,ror#11
+	add	r7,r7,r2			@ h+=Ch(e,f,g)
+#if 20==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 20<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r8,r9			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#6*4]		@ from future BODY_16_xx
+	eor	r12,r8,r9			@ a^b, b^c in next round
+	ldr	r1,[sp,#3*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r8,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r11,r11,r7			@ d+=h
+	eor	r3,r3,r9			@ Maj(a,b,c)
+	add	r7,r7,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r7,r7,r3			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#6*4]		@ 21
+	@ ldr	r1,[sp,#3*4]
+	mov	r0,r2,ror#7
+	add	r7,r7,r3			@ h+=Maj(a,b,c) from the past
+	mov	r3,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r3,r3,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#5*4]
+	eor	r3,r3,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#14*4]
+
+	add	r3,r3,r0
+	eor	r0,r11,r11,ror#5	@ from BODY_00_15
+	add	r2,r2,r3
+	eor	r0,r0,r11,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r3,[r14],#4			@ *K256++
+	add	r6,r6,r2			@ h+=X[i]
+	str	r2,[sp,#5*4]
+	eor	r2,r4,r5
+	add	r6,r6,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r11
+	add	r6,r6,r3			@ h+=K256[i]
+	eor	r2,r2,r5			@ Ch(e,f,g)
+	eor	r0,r7,r7,ror#11
+	add	r6,r6,r2			@ h+=Ch(e,f,g)
+#if 21==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 21<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r7,r8			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#7*4]		@ from future BODY_16_xx
+	eor	r3,r7,r8			@ a^b, b^c in next round
+	ldr	r1,[sp,#4*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r7,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r10,r10,r6			@ d+=h
+	eor	r12,r12,r8			@ Maj(a,b,c)
+	add	r6,r6,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r6,r6,r12			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#7*4]		@ 22
+	@ ldr	r1,[sp,#4*4]
+	mov	r0,r2,ror#7
+	add	r6,r6,r12			@ h+=Maj(a,b,c) from the past
+	mov	r12,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r12,r12,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#6*4]
+	eor	r12,r12,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#15*4]
+
+	add	r12,r12,r0
+	eor	r0,r10,r10,ror#5	@ from BODY_00_15
+	add	r2,r2,r12
+	eor	r0,r0,r10,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r12,[r14],#4			@ *K256++
+	add	r5,r5,r2			@ h+=X[i]
+	str	r2,[sp,#6*4]
+	eor	r2,r11,r4
+	add	r5,r5,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r10
+	add	r5,r5,r12			@ h+=K256[i]
+	eor	r2,r2,r4			@ Ch(e,f,g)
+	eor	r0,r6,r6,ror#11
+	add	r5,r5,r2			@ h+=Ch(e,f,g)
+#if 22==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 22<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r6,r7			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#8*4]		@ from future BODY_16_xx
+	eor	r12,r6,r7			@ a^b, b^c in next round
+	ldr	r1,[sp,#5*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r6,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r9,r9,r5			@ d+=h
+	eor	r3,r3,r7			@ Maj(a,b,c)
+	add	r5,r5,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r5,r5,r3			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#8*4]		@ 23
+	@ ldr	r1,[sp,#5*4]
+	mov	r0,r2,ror#7
+	add	r5,r5,r3			@ h+=Maj(a,b,c) from the past
+	mov	r3,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r3,r3,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#7*4]
+	eor	r3,r3,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#0*4]
+
+	add	r3,r3,r0
+	eor	r0,r9,r9,ror#5	@ from BODY_00_15
+	add	r2,r2,r3
+	eor	r0,r0,r9,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r3,[r14],#4			@ *K256++
+	add	r4,r4,r2			@ h+=X[i]
+	str	r2,[sp,#7*4]
+	eor	r2,r10,r11
+	add	r4,r4,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r9
+	add	r4,r4,r3			@ h+=K256[i]
+	eor	r2,r2,r11			@ Ch(e,f,g)
+	eor	r0,r5,r5,ror#11
+	add	r4,r4,r2			@ h+=Ch(e,f,g)
+#if 23==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 23<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r5,r6			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#9*4]		@ from future BODY_16_xx
+	eor	r3,r5,r6			@ a^b, b^c in next round
+	ldr	r1,[sp,#6*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r5,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r8,r8,r4			@ d+=h
+	eor	r12,r12,r6			@ Maj(a,b,c)
+	add	r4,r4,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r4,r4,r12			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#9*4]		@ 24
+	@ ldr	r1,[sp,#6*4]
+	mov	r0,r2,ror#7
+	add	r4,r4,r12			@ h+=Maj(a,b,c) from the past
+	mov	r12,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r12,r12,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#8*4]
+	eor	r12,r12,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#1*4]
+
+	add	r12,r12,r0
+	eor	r0,r8,r8,ror#5	@ from BODY_00_15
+	add	r2,r2,r12
+	eor	r0,r0,r8,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r12,[r14],#4			@ *K256++
+	add	r11,r11,r2			@ h+=X[i]
+	str	r2,[sp,#8*4]
+	eor	r2,r9,r10
+	add	r11,r11,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r8
+	add	r11,r11,r12			@ h+=K256[i]
+	eor	r2,r2,r10			@ Ch(e,f,g)
+	eor	r0,r4,r4,ror#11
+	add	r11,r11,r2			@ h+=Ch(e,f,g)
+#if 24==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 24<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r4,r5			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#10*4]		@ from future BODY_16_xx
+	eor	r12,r4,r5			@ a^b, b^c in next round
+	ldr	r1,[sp,#7*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r4,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r7,r7,r11			@ d+=h
+	eor	r3,r3,r5			@ Maj(a,b,c)
+	add	r11,r11,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r11,r11,r3			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#10*4]		@ 25
+	@ ldr	r1,[sp,#7*4]
+	mov	r0,r2,ror#7
+	add	r11,r11,r3			@ h+=Maj(a,b,c) from the past
+	mov	r3,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r3,r3,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#9*4]
+	eor	r3,r3,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#2*4]
+
+	add	r3,r3,r0
+	eor	r0,r7,r7,ror#5	@ from BODY_00_15
+	add	r2,r2,r3
+	eor	r0,r0,r7,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r3,[r14],#4			@ *K256++
+	add	r10,r10,r2			@ h+=X[i]
+	str	r2,[sp,#9*4]
+	eor	r2,r8,r9
+	add	r10,r10,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r7
+	add	r10,r10,r3			@ h+=K256[i]
+	eor	r2,r2,r9			@ Ch(e,f,g)
+	eor	r0,r11,r11,ror#11
+	add	r10,r10,r2			@ h+=Ch(e,f,g)
+#if 25==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 25<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r11,r4			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#11*4]		@ from future BODY_16_xx
+	eor	r3,r11,r4			@ a^b, b^c in next round
+	ldr	r1,[sp,#8*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r11,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r6,r6,r10			@ d+=h
+	eor	r12,r12,r4			@ Maj(a,b,c)
+	add	r10,r10,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r10,r10,r12			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#11*4]		@ 26
+	@ ldr	r1,[sp,#8*4]
+	mov	r0,r2,ror#7
+	add	r10,r10,r12			@ h+=Maj(a,b,c) from the past
+	mov	r12,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r12,r12,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#10*4]
+	eor	r12,r12,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#3*4]
+
+	add	r12,r12,r0
+	eor	r0,r6,r6,ror#5	@ from BODY_00_15
+	add	r2,r2,r12
+	eor	r0,r0,r6,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r12,[r14],#4			@ *K256++
+	add	r9,r9,r2			@ h+=X[i]
+	str	r2,[sp,#10*4]
+	eor	r2,r7,r8
+	add	r9,r9,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r6
+	add	r9,r9,r12			@ h+=K256[i]
+	eor	r2,r2,r8			@ Ch(e,f,g)
+	eor	r0,r10,r10,ror#11
+	add	r9,r9,r2			@ h+=Ch(e,f,g)
+#if 26==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 26<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r10,r11			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#12*4]		@ from future BODY_16_xx
+	eor	r12,r10,r11			@ a^b, b^c in next round
+	ldr	r1,[sp,#9*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r10,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r5,r5,r9			@ d+=h
+	eor	r3,r3,r11			@ Maj(a,b,c)
+	add	r9,r9,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r9,r9,r3			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#12*4]		@ 27
+	@ ldr	r1,[sp,#9*4]
+	mov	r0,r2,ror#7
+	add	r9,r9,r3			@ h+=Maj(a,b,c) from the past
+	mov	r3,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r3,r3,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#11*4]
+	eor	r3,r3,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#4*4]
+
+	add	r3,r3,r0
+	eor	r0,r5,r5,ror#5	@ from BODY_00_15
+	add	r2,r2,r3
+	eor	r0,r0,r5,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r3,[r14],#4			@ *K256++
+	add	r8,r8,r2			@ h+=X[i]
+	str	r2,[sp,#11*4]
+	eor	r2,r6,r7
+	add	r8,r8,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r5
+	add	r8,r8,r3			@ h+=K256[i]
+	eor	r2,r2,r7			@ Ch(e,f,g)
+	eor	r0,r9,r9,ror#11
+	add	r8,r8,r2			@ h+=Ch(e,f,g)
+#if 27==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 27<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r9,r10			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#13*4]		@ from future BODY_16_xx
+	eor	r3,r9,r10			@ a^b, b^c in next round
+	ldr	r1,[sp,#10*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r9,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r4,r4,r8			@ d+=h
+	eor	r12,r12,r10			@ Maj(a,b,c)
+	add	r8,r8,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r8,r8,r12			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#13*4]		@ 28
+	@ ldr	r1,[sp,#10*4]
+	mov	r0,r2,ror#7
+	add	r8,r8,r12			@ h+=Maj(a,b,c) from the past
+	mov	r12,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r12,r12,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#12*4]
+	eor	r12,r12,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#5*4]
+
+	add	r12,r12,r0
+	eor	r0,r4,r4,ror#5	@ from BODY_00_15
+	add	r2,r2,r12
+	eor	r0,r0,r4,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r12,[r14],#4			@ *K256++
+	add	r7,r7,r2			@ h+=X[i]
+	str	r2,[sp,#12*4]
+	eor	r2,r5,r6
+	add	r7,r7,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r4
+	add	r7,r7,r12			@ h+=K256[i]
+	eor	r2,r2,r6			@ Ch(e,f,g)
+	eor	r0,r8,r8,ror#11
+	add	r7,r7,r2			@ h+=Ch(e,f,g)
+#if 28==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 28<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r8,r9			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#14*4]		@ from future BODY_16_xx
+	eor	r12,r8,r9			@ a^b, b^c in next round
+	ldr	r1,[sp,#11*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r8,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r11,r11,r7			@ d+=h
+	eor	r3,r3,r9			@ Maj(a,b,c)
+	add	r7,r7,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r7,r7,r3			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#14*4]		@ 29
+	@ ldr	r1,[sp,#11*4]
+	mov	r0,r2,ror#7
+	add	r7,r7,r3			@ h+=Maj(a,b,c) from the past
+	mov	r3,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r3,r3,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#13*4]
+	eor	r3,r3,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#6*4]
+
+	add	r3,r3,r0
+	eor	r0,r11,r11,ror#5	@ from BODY_00_15
+	add	r2,r2,r3
+	eor	r0,r0,r11,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r3,[r14],#4			@ *K256++
+	add	r6,r6,r2			@ h+=X[i]
+	str	r2,[sp,#13*4]
+	eor	r2,r4,r5
+	add	r6,r6,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r11
+	add	r6,r6,r3			@ h+=K256[i]
+	eor	r2,r2,r5			@ Ch(e,f,g)
+	eor	r0,r7,r7,ror#11
+	add	r6,r6,r2			@ h+=Ch(e,f,g)
+#if 29==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 29<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r7,r8			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#15*4]		@ from future BODY_16_xx
+	eor	r3,r7,r8			@ a^b, b^c in next round
+	ldr	r1,[sp,#12*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r7,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r10,r10,r6			@ d+=h
+	eor	r12,r12,r8			@ Maj(a,b,c)
+	add	r6,r6,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r6,r6,r12			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#15*4]		@ 30
+	@ ldr	r1,[sp,#12*4]
+	mov	r0,r2,ror#7
+	add	r6,r6,r12			@ h+=Maj(a,b,c) from the past
+	mov	r12,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r12,r12,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#14*4]
+	eor	r12,r12,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#7*4]
+
+	add	r12,r12,r0
+	eor	r0,r10,r10,ror#5	@ from BODY_00_15
+	add	r2,r2,r12
+	eor	r0,r0,r10,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r12,[r14],#4			@ *K256++
+	add	r5,r5,r2			@ h+=X[i]
+	str	r2,[sp,#14*4]
+	eor	r2,r11,r4
+	add	r5,r5,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r10
+	add	r5,r5,r12			@ h+=K256[i]
+	eor	r2,r2,r4			@ Ch(e,f,g)
+	eor	r0,r6,r6,ror#11
+	add	r5,r5,r2			@ h+=Ch(e,f,g)
+#if 30==31
+	and	r12,r12,#0xff
+	cmp	r12,#0xf2			@ done?
+#endif
+#if 30<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r12,r6,r7			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#0*4]		@ from future BODY_16_xx
+	eor	r12,r6,r7			@ a^b, b^c in next round
+	ldr	r1,[sp,#13*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r6,ror#20	@ Sigma0(a)
+	and	r3,r3,r12			@ (b^c)&=(a^b)
+	add	r9,r9,r5			@ d+=h
+	eor	r3,r3,r7			@ Maj(a,b,c)
+	add	r5,r5,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r5,r5,r3			@ h+=Maj(a,b,c)
+	@ ldr	r2,[sp,#0*4]		@ 31
+	@ ldr	r1,[sp,#13*4]
+	mov	r0,r2,ror#7
+	add	r5,r5,r3			@ h+=Maj(a,b,c) from the past
+	mov	r3,r1,ror#17
+	eor	r0,r0,r2,ror#18
+	eor	r3,r3,r1,ror#19
+	eor	r0,r0,r2,lsr#3	@ sigma0(X[i+1])
+	ldr	r2,[sp,#15*4]
+	eor	r3,r3,r1,lsr#10	@ sigma1(X[i+14])
+	ldr	r1,[sp,#8*4]
+
+	add	r3,r3,r0
+	eor	r0,r9,r9,ror#5	@ from BODY_00_15
+	add	r2,r2,r3
+	eor	r0,r0,r9,ror#19	@ Sigma1(e)
+	add	r2,r2,r1			@ X[i]
+	ldr	r3,[r14],#4			@ *K256++
+	add	r4,r4,r2			@ h+=X[i]
+	str	r2,[sp,#15*4]
+	eor	r2,r10,r11
+	add	r4,r4,r0,ror#6	@ h+=Sigma1(e)
+	and	r2,r2,r9
+	add	r4,r4,r3			@ h+=K256[i]
+	eor	r2,r2,r11			@ Ch(e,f,g)
+	eor	r0,r5,r5,ror#11
+	add	r4,r4,r2			@ h+=Ch(e,f,g)
+#if 31==31
+	and	r3,r3,#0xff
+	cmp	r3,#0xf2			@ done?
+#endif
+#if 31<15
+# if __ARM_ARCH__>=7
+	ldr	r2,[r1],#4			@ prefetch
+# else
+	ldrb	r2,[r1,#3]
+# endif
+	eor	r3,r5,r6			@ a^b, b^c in next round
+#else
+	ldr	r2,[sp,#1*4]		@ from future BODY_16_xx
+	eor	r3,r5,r6			@ a^b, b^c in next round
+	ldr	r1,[sp,#14*4]	@ from future BODY_16_xx
+#endif
+	eor	r0,r0,r5,ror#20	@ Sigma0(a)
+	and	r12,r12,r3			@ (b^c)&=(a^b)
+	add	r8,r8,r4			@ d+=h
+	eor	r12,r12,r6			@ Maj(a,b,c)
+	add	r4,r4,r0,ror#2	@ h+=Sigma0(a)
+	@ add	r4,r4,r12			@ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+	ite	eq			@ Thumb2 thing, sanity check in ARM
+#endif
+	ldreq	r3,[sp,#16*4]		@ pull ctx
+	bne	.Lrounds_16_xx
+
+	add	r4,r4,r12		@ h+=Maj(a,b,c) from the past
+	ldr	r0,[r3,#0]
+	ldr	r2,[r3,#4]
+	ldr	r12,[r3,#8]
+	add	r4,r4,r0
+	ldr	r0,[r3,#12]
+	add	r5,r5,r2
+	ldr	r2,[r3,#16]
+	add	r6,r6,r12
+	ldr	r12,[r3,#20]
+	add	r7,r7,r0
+	ldr	r0,[r3,#24]
+	add	r8,r8,r2
+	ldr	r2,[r3,#28]
+	add	r9,r9,r12
+	ldr	r1,[sp,#17*4]		@ pull inp
+	ldr	r12,[sp,#18*4]		@ pull inp+len
+	add	r10,r10,r0
+	add	r11,r11,r2
+	stmia	r3,{r4,r5,r6,r7,r8,r9,r10,r11}
+	cmp	r1,r12
+	sub	r14,r14,#256	@ rewind Ktbl
+	bne	.Loop
+
+	add	sp,sp,#19*4	@ destroy frame
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r11,pc}
+#else
+	ldmia	sp!,{r4-r11,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
+#endif
+.size	sha256_block_data_order,.-sha256_block_data_order
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
+.global	sha256_block_data_order_neon
+.type	sha256_block_data_order_neon,%function
+.align	4
+sha256_block_data_order_neon:
+.LNEON:
+	stmdb	sp!,{r4-r12,lr}
+
+	sub	r11,sp,#16*4+16
+	adrl	r14,K256
+	bic	r11,r11,#15		@ align for 128-bit stores
+	mov	r12,sp
+	mov	sp,r11			@ alloca
+	add	r2,r1,r2,lsl#6	@ len to point at the end of inp
+
+	vld1.8		{q0},[r1]!
+	vld1.8		{q1},[r1]!
+	vld1.8		{q2},[r1]!
+	vld1.8		{q3},[r1]!
+	vld1.32		{q8},[r14,:128]!
+	vld1.32		{q9},[r14,:128]!
+	vld1.32		{q10},[r14,:128]!
+	vld1.32		{q11},[r14,:128]!
+	vrev32.8	q0,q0		@ yes, even on
+	str		r0,[sp,#64]
+	vrev32.8	q1,q1		@ big-endian
+	str		r1,[sp,#68]
+	mov		r1,sp
+	vrev32.8	q2,q2
+	str		r2,[sp,#72]
+	vrev32.8	q3,q3
+	str		r12,[sp,#76]		@ save original sp
+	vadd.i32	q8,q8,q0
+	vadd.i32	q9,q9,q1
+	vst1.32		{q8},[r1,:128]!
+	vadd.i32	q10,q10,q2
+	vst1.32		{q9},[r1,:128]!
+	vadd.i32	q11,q11,q3
+	vst1.32		{q10},[r1,:128]!
+	vst1.32		{q11},[r1,:128]!
+
+	ldmia		r0,{r4-r11}
+	sub		r1,r1,#64
+	ldr		r2,[sp,#0]
+	eor		r12,r12,r12
+	eor		r3,r5,r6
+	b		.L_00_48
+
+.align	4
+.L_00_48:
+	vext.8	q8,q0,q1,#4
+	add	r11,r11,r2
+	eor	r2,r9,r10
+	eor	r0,r8,r8,ror#5
+	vext.8	q9,q2,q3,#4
+	add	r4,r4,r12
+	and	r2,r2,r8
+	eor	r12,r0,r8,ror#19
+	vshr.u32	q10,q8,#7
+	eor	r0,r4,r4,ror#11
+	eor	r2,r2,r10
+	vadd.i32	q0,q0,q9
+	add	r11,r11,r12,ror#6
+	eor	r12,r4,r5
+	vshr.u32	q9,q8,#3
+	eor	r0,r0,r4,ror#20
+	add	r11,r11,r2
+	vsli.32	q10,q8,#25
+	ldr	r2,[sp,#4]
+	and	r3,r3,r12
+	vshr.u32	q11,q8,#18
+	add	r7,r7,r11
+	add	r11,r11,r0,ror#2
+	eor	r3,r3,r5
+	veor	q9,q9,q10
+	add	r10,r10,r2
+	vsli.32	q11,q8,#14
+	eor	r2,r8,r9
+	eor	r0,r7,r7,ror#5
+	vshr.u32	d24,d7,#17
+	add	r11,r11,r3
+	and	r2,r2,r7
+	veor	q9,q9,q11
+	eor	r3,r0,r7,ror#19
+	eor	r0,r11,r11,ror#11
+	vsli.32	d24,d7,#15
+	eor	r2,r2,r9
+	add	r10,r10,r3,ror#6
+	vshr.u32	d25,d7,#10
+	eor	r3,r11,r4
+	eor	r0,r0,r11,ror#20
+	vadd.i32	q0,q0,q9
+	add	r10,r10,r2
+	ldr	r2,[sp,#8]
+	veor	d25,d25,d24
+	and	r12,r12,r3
+	add	r6,r6,r10
+	vshr.u32	d24,d7,#19
+	add	r10,r10,r0,ror#2
+	eor	r12,r12,r4
+	vsli.32	d24,d7,#13
+	add	r9,r9,r2
+	eor	r2,r7,r8
+	veor	d25,d25,d24
+	eor	r0,r6,r6,ror#5
+	add	r10,r10,r12
+	vadd.i32	d0,d0,d25
+	and	r2,r2,r6
+	eor	r12,r0,r6,ror#19
+	vshr.u32	d24,d0,#17
+	eor	r0,r10,r10,ror#11
+	eor	r2,r2,r8
+	vsli.32	d24,d0,#15
+	add	r9,r9,r12,ror#6
+	eor	r12,r10,r11
+	vshr.u32	d25,d0,#10
+	eor	r0,r0,r10,ror#20
+	add	r9,r9,r2
+	veor	d25,d25,d24
+	ldr	r2,[sp,#12]
+	and	r3,r3,r12
+	vshr.u32	d24,d0,#19
+	add	r5,r5,r9
+	add	r9,r9,r0,ror#2
+	eor	r3,r3,r11
+	vld1.32	{q8},[r14,:128]!
+	add	r8,r8,r2
+	vsli.32	d24,d0,#13
+	eor	r2,r6,r7
+	eor	r0,r5,r5,ror#5
+	veor	d25,d25,d24
+	add	r9,r9,r3
+	and	r2,r2,r5
+	vadd.i32	d1,d1,d25
+	eor	r3,r0,r5,ror#19
+	eor	r0,r9,r9,ror#11
+	vadd.i32	q8,q8,q0
+	eor	r2,r2,r7
+	add	r8,r8,r3,ror#6
+	eor	r3,r9,r10
+	eor	r0,r0,r9,ror#20
+	add	r8,r8,r2
+	ldr	r2,[sp,#16]
+	and	r12,r12,r3
+	add	r4,r4,r8
+	vst1.32	{q8},[r1,:128]!
+	add	r8,r8,r0,ror#2
+	eor	r12,r12,r10
+	vext.8	q8,q1,q2,#4
+	add	r7,r7,r2
+	eor	r2,r5,r6
+	eor	r0,r4,r4,ror#5
+	vext.8	q9,q3,q0,#4
+	add	r8,r8,r12
+	and	r2,r2,r4
+	eor	r12,r0,r4,ror#19
+	vshr.u32	q10,q8,#7
+	eor	r0,r8,r8,ror#11
+	eor	r2,r2,r6
+	vadd.i32	q1,q1,q9
+	add	r7,r7,r12,ror#6
+	eor	r12,r8,r9
+	vshr.u32	q9,q8,#3
+	eor	r0,r0,r8,ror#20
+	add	r7,r7,r2
+	vsli.32	q10,q8,#25
+	ldr	r2,[sp,#20]
+	and	r3,r3,r12
+	vshr.u32	q11,q8,#18
+	add	r11,r11,r7
+	add	r7,r7,r0,ror#2
+	eor	r3,r3,r9
+	veor	q9,q9,q10
+	add	r6,r6,r2
+	vsli.32	q11,q8,#14
+	eor	r2,r4,r5
+	eor	r0,r11,r11,ror#5
+	vshr.u32	d24,d1,#17
+	add	r7,r7,r3
+	and	r2,r2,r11
+	veor	q9,q9,q11
+	eor	r3,r0,r11,ror#19
+	eor	r0,r7,r7,ror#11
+	vsli.32	d24,d1,#15
+	eor	r2,r2,r5
+	add	r6,r6,r3,ror#6
+	vshr.u32	d25,d1,#10
+	eor	r3,r7,r8
+	eor	r0,r0,r7,ror#20
+	vadd.i32	q1,q1,q9
+	add	r6,r6,r2
+	ldr	r2,[sp,#24]
+	veor	d25,d25,d24
+	and	r12,r12,r3
+	add	r10,r10,r6
+	vshr.u32	d24,d1,#19
+	add	r6,r6,r0,ror#2
+	eor	r12,r12,r8
+	vsli.32	d24,d1,#13
+	add	r5,r5,r2
+	eor	r2,r11,r4
+	veor	d25,d25,d24
+	eor	r0,r10,r10,ror#5
+	add	r6,r6,r12
+	vadd.i32	d2,d2,d25
+	and	r2,r2,r10
+	eor	r12,r0,r10,ror#19
+	vshr.u32	d24,d2,#17
+	eor	r0,r6,r6,ror#11
+	eor	r2,r2,r4
+	vsli.32	d24,d2,#15
+	add	r5,r5,r12,ror#6
+	eor	r12,r6,r7
+	vshr.u32	d25,d2,#10
+	eor	r0,r0,r6,ror#20
+	add	r5,r5,r2
+	veor	d25,d25,d24
+	ldr	r2,[sp,#28]
+	and	r3,r3,r12
+	vshr.u32	d24,d2,#19
+	add	r9,r9,r5
+	add	r5,r5,r0,ror#2
+	eor	r3,r3,r7
+	vld1.32	{q8},[r14,:128]!
+	add	r4,r4,r2
+	vsli.32	d24,d2,#13
+	eor	r2,r10,r11
+	eor	r0,r9,r9,ror#5
+	veor	d25,d25,d24
+	add	r5,r5,r3
+	and	r2,r2,r9
+	vadd.i32	d3,d3,d25
+	eor	r3,r0,r9,ror#19
+	eor	r0,r5,r5,ror#11
+	vadd.i32	q8,q8,q1
+	eor	r2,r2,r11
+	add	r4,r4,r3,ror#6
+	eor	r3,r5,r6
+	eor	r0,r0,r5,ror#20
+	add	r4,r4,r2
+	ldr	r2,[sp,#32]
+	and	r12,r12,r3
+	add	r8,r8,r4
+	vst1.32	{q8},[r1,:128]!
+	add	r4,r4,r0,ror#2
+	eor	r12,r12,r6
+	vext.8	q8,q2,q3,#4
+	add	r11,r11,r2
+	eor	r2,r9,r10
+	eor	r0,r8,r8,ror#5
+	vext.8	q9,q0,q1,#4
+	add	r4,r4,r12
+	and	r2,r2,r8
+	eor	r12,r0,r8,ror#19
+	vshr.u32	q10,q8,#7
+	eor	r0,r4,r4,ror#11
+	eor	r2,r2,r10
+	vadd.i32	q2,q2,q9
+	add	r11,r11,r12,ror#6
+	eor	r12,r4,r5
+	vshr.u32	q9,q8,#3
+	eor	r0,r0,r4,ror#20
+	add	r11,r11,r2
+	vsli.32	q10,q8,#25
+	ldr	r2,[sp,#36]
+	and	r3,r3,r12
+	vshr.u32	q11,q8,#18
+	add	r7,r7,r11
+	add	r11,r11,r0,ror#2
+	eor	r3,r3,r5
+	veor	q9,q9,q10
+	add	r10,r10,r2
+	vsli.32	q11,q8,#14
+	eor	r2,r8,r9
+	eor	r0,r7,r7,ror#5
+	vshr.u32	d24,d3,#17
+	add	r11,r11,r3
+	and	r2,r2,r7
+	veor	q9,q9,q11
+	eor	r3,r0,r7,ror#19
+	eor	r0,r11,r11,ror#11
+	vsli.32	d24,d3,#15
+	eor	r2,r2,r9
+	add	r10,r10,r3,ror#6
+	vshr.u32	d25,d3,#10
+	eor	r3,r11,r4
+	eor	r0,r0,r11,ror#20
+	vadd.i32	q2,q2,q9
+	add	r10,r10,r2
+	ldr	r2,[sp,#40]
+	veor	d25,d25,d24
+	and	r12,r12,r3
+	add	r6,r6,r10
+	vshr.u32	d24,d3,#19
+	add	r10,r10,r0,ror#2
+	eor	r12,r12,r4
+	vsli.32	d24,d3,#13
+	add	r9,r9,r2
+	eor	r2,r7,r8
+	veor	d25,d25,d24
+	eor	r0,r6,r6,ror#5
+	add	r10,r10,r12
+	vadd.i32	d4,d4,d25
+	and	r2,r2,r6
+	eor	r12,r0,r6,ror#19
+	vshr.u32	d24,d4,#17
+	eor	r0,r10,r10,ror#11
+	eor	r2,r2,r8
+	vsli.32	d24,d4,#15
+	add	r9,r9,r12,ror#6
+	eor	r12,r10,r11
+	vshr.u32	d25,d4,#10
+	eor	r0,r0,r10,ror#20
+	add	r9,r9,r2
+	veor	d25,d25,d24
+	ldr	r2,[sp,#44]
+	and	r3,r3,r12
+	vshr.u32	d24,d4,#19
+	add	r5,r5,r9
+	add	r9,r9,r0,ror#2
+	eor	r3,r3,r11
+	vld1.32	{q8},[r14,:128]!
+	add	r8,r8,r2
+	vsli.32	d24,d4,#13
+	eor	r2,r6,r7
+	eor	r0,r5,r5,ror#5
+	veor	d25,d25,d24
+	add	r9,r9,r3
+	and	r2,r2,r5
+	vadd.i32	d5,d5,d25
+	eor	r3,r0,r5,ror#19
+	eor	r0,r9,r9,ror#11
+	vadd.i32	q8,q8,q2
+	eor	r2,r2,r7
+	add	r8,r8,r3,ror#6
+	eor	r3,r9,r10
+	eor	r0,r0,r9,ror#20
+	add	r8,r8,r2
+	ldr	r2,[sp,#48]
+	and	r12,r12,r3
+	add	r4,r4,r8
+	vst1.32	{q8},[r1,:128]!
+	add	r8,r8,r0,ror#2
+	eor	r12,r12,r10
+	vext.8	q8,q3,q0,#4
+	add	r7,r7,r2
+	eor	r2,r5,r6
+	eor	r0,r4,r4,ror#5
+	vext.8	q9,q1,q2,#4
+	add	r8,r8,r12
+	and	r2,r2,r4
+	eor	r12,r0,r4,ror#19
+	vshr.u32	q10,q8,#7
+	eor	r0,r8,r8,ror#11
+	eor	r2,r2,r6
+	vadd.i32	q3,q3,q9
+	add	r7,r7,r12,ror#6
+	eor	r12,r8,r9
+	vshr.u32	q9,q8,#3
+	eor	r0,r0,r8,ror#20
+	add	r7,r7,r2
+	vsli.32	q10,q8,#25
+	ldr	r2,[sp,#52]
+	and	r3,r3,r12
+	vshr.u32	q11,q8,#18
+	add	r11,r11,r7
+	add	r7,r7,r0,ror#2
+	eor	r3,r3,r9
+	veor	q9,q9,q10
+	add	r6,r6,r2
+	vsli.32	q11,q8,#14
+	eor	r2,r4,r5
+	eor	r0,r11,r11,ror#5
+	vshr.u32	d24,d5,#17
+	add	r7,r7,r3
+	and	r2,r2,r11
+	veor	q9,q9,q11
+	eor	r3,r0,r11,ror#19
+	eor	r0,r7,r7,ror#11
+	vsli.32	d24,d5,#15
+	eor	r2,r2,r5
+	add	r6,r6,r3,ror#6
+	vshr.u32	d25,d5,#10
+	eor	r3,r7,r8
+	eor	r0,r0,r7,ror#20
+	vadd.i32	q3,q3,q9
+	add	r6,r6,r2
+	ldr	r2,[sp,#56]
+	veor	d25,d25,d24
+	and	r12,r12,r3
+	add	r10,r10,r6
+	vshr.u32	d24,d5,#19
+	add	r6,r6,r0,ror#2
+	eor	r12,r12,r8
+	vsli.32	d24,d5,#13
+	add	r5,r5,r2
+	eor	r2,r11,r4
+	veor	d25,d25,d24
+	eor	r0,r10,r10,ror#5
+	add	r6,r6,r12
+	vadd.i32	d6,d6,d25
+	and	r2,r2,r10
+	eor	r12,r0,r10,ror#19
+	vshr.u32	d24,d6,#17
+	eor	r0,r6,r6,ror#11
+	eor	r2,r2,r4
+	vsli.32	d24,d6,#15
+	add	r5,r5,r12,ror#6
+	eor	r12,r6,r7
+	vshr.u32	d25,d6,#10
+	eor	r0,r0,r6,ror#20
+	add	r5,r5,r2
+	veor	d25,d25,d24
+	ldr	r2,[sp,#60]
+	and	r3,r3,r12
+	vshr.u32	d24,d6,#19
+	add	r9,r9,r5
+	add	r5,r5,r0,ror#2
+	eor	r3,r3,r7
+	vld1.32	{q8},[r14,:128]!
+	add	r4,r4,r2
+	vsli.32	d24,d6,#13
+	eor	r2,r10,r11
+	eor	r0,r9,r9,ror#5
+	veor	d25,d25,d24
+	add	r5,r5,r3
+	and	r2,r2,r9
+	vadd.i32	d7,d7,d25
+	eor	r3,r0,r9,ror#19
+	eor	r0,r5,r5,ror#11
+	vadd.i32	q8,q8,q3
+	eor	r2,r2,r11
+	add	r4,r4,r3,ror#6
+	eor	r3,r5,r6
+	eor	r0,r0,r5,ror#20
+	add	r4,r4,r2
+	ldr	r2,[r14]
+	and	r12,r12,r3
+	add	r8,r8,r4
+	vst1.32	{q8},[r1,:128]!
+	add	r4,r4,r0,ror#2
+	eor	r12,r12,r6
+	teq	r2,#0				@ check for K256 terminator
+	ldr	r2,[sp,#0]
+	sub	r1,r1,#64
+	bne	.L_00_48
+
+	ldr		r1,[sp,#68]
+	ldr		r0,[sp,#72]
+	sub		r14,r14,#256	@ rewind r14
+	teq		r1,r0
+	it		eq
+	subeq		r1,r1,#64		@ avoid SEGV
+	vld1.8		{q0},[r1]!		@ load next input block
+	vld1.8		{q1},[r1]!
+	vld1.8		{q2},[r1]!
+	vld1.8		{q3},[r1]!
+	it		ne
+	strne		r1,[sp,#68]
+	mov		r1,sp
+	add	r11,r11,r2
+	eor	r2,r9,r10
+	eor	r0,r8,r8,ror#5
+	add	r4,r4,r12
+	vld1.32	{q8},[r14,:128]!
+	and	r2,r2,r8
+	eor	r12,r0,r8,ror#19
+	eor	r0,r4,r4,ror#11
+	eor	r2,r2,r10
+	vrev32.8	q0,q0
+	add	r11,r11,r12,ror#6
+	eor	r12,r4,r5
+	eor	r0,r0,r4,ror#20
+	add	r11,r11,r2
+	vadd.i32	q8,q8,q0
+	ldr	r2,[sp,#4]
+	and	r3,r3,r12
+	add	r7,r7,r11
+	add	r11,r11,r0,ror#2
+	eor	r3,r3,r5
+	add	r10,r10,r2
+	eor	r2,r8,r9
+	eor	r0,r7,r7,ror#5
+	add	r11,r11,r3
+	and	r2,r2,r7
+	eor	r3,r0,r7,ror#19
+	eor	r0,r11,r11,ror#11
+	eor	r2,r2,r9
+	add	r10,r10,r3,ror#6
+	eor	r3,r11,r4
+	eor	r0,r0,r11,ror#20
+	add	r10,r10,r2
+	ldr	r2,[sp,#8]
+	and	r12,r12,r3
+	add	r6,r6,r10
+	add	r10,r10,r0,ror#2
+	eor	r12,r12,r4
+	add	r9,r9,r2
+	eor	r2,r7,r8
+	eor	r0,r6,r6,ror#5
+	add	r10,r10,r12
+	and	r2,r2,r6
+	eor	r12,r0,r6,ror#19
+	eor	r0,r10,r10,ror#11
+	eor	r2,r2,r8
+	add	r9,r9,r12,ror#6
+	eor	r12,r10,r11
+	eor	r0,r0,r10,ror#20
+	add	r9,r9,r2
+	ldr	r2,[sp,#12]
+	and	r3,r3,r12
+	add	r5,r5,r9
+	add	r9,r9,r0,ror#2
+	eor	r3,r3,r11
+	add	r8,r8,r2
+	eor	r2,r6,r7
+	eor	r0,r5,r5,ror#5
+	add	r9,r9,r3
+	and	r2,r2,r5
+	eor	r3,r0,r5,ror#19
+	eor	r0,r9,r9,ror#11
+	eor	r2,r2,r7
+	add	r8,r8,r3,ror#6
+	eor	r3,r9,r10
+	eor	r0,r0,r9,ror#20
+	add	r8,r8,r2
+	ldr	r2,[sp,#16]
+	and	r12,r12,r3
+	add	r4,r4,r8
+	add	r8,r8,r0,ror#2
+	eor	r12,r12,r10
+	vst1.32	{q8},[r1,:128]!
+	add	r7,r7,r2
+	eor	r2,r5,r6
+	eor	r0,r4,r4,ror#5
+	add	r8,r8,r12
+	vld1.32	{q8},[r14,:128]!
+	and	r2,r2,r4
+	eor	r12,r0,r4,ror#19
+	eor	r0,r8,r8,ror#11
+	eor	r2,r2,r6
+	vrev32.8	q1,q1
+	add	r7,r7,r12,ror#6
+	eor	r12,r8,r9
+	eor	r0,r0,r8,ror#20
+	add	r7,r7,r2
+	vadd.i32	q8,q8,q1
+	ldr	r2,[sp,#20]
+	and	r3,r3,r12
+	add	r11,r11,r7
+	add	r7,r7,r0,ror#2
+	eor	r3,r3,r9
+	add	r6,r6,r2
+	eor	r2,r4,r5
+	eor	r0,r11,r11,ror#5
+	add	r7,r7,r3
+	and	r2,r2,r11
+	eor	r3,r0,r11,ror#19
+	eor	r0,r7,r7,ror#11
+	eor	r2,r2,r5
+	add	r6,r6,r3,ror#6
+	eor	r3,r7,r8
+	eor	r0,r0,r7,ror#20
+	add	r6,r6,r2
+	ldr	r2,[sp,#24]
+	and	r12,r12,r3
+	add	r10,r10,r6
+	add	r6,r6,r0,ror#2
+	eor	r12,r12,r8
+	add	r5,r5,r2
+	eor	r2,r11,r4
+	eor	r0,r10,r10,ror#5
+	add	r6,r6,r12
+	and	r2,r2,r10
+	eor	r12,r0,r10,ror#19
+	eor	r0,r6,r6,ror#11
+	eor	r2,r2,r4
+	add	r5,r5,r12,ror#6
+	eor	r12,r6,r7
+	eor	r0,r0,r6,ror#20
+	add	r5,r5,r2
+	ldr	r2,[sp,#28]
+	and	r3,r3,r12
+	add	r9,r9,r5
+	add	r5,r5,r0,ror#2
+	eor	r3,r3,r7
+	add	r4,r4,r2
+	eor	r2,r10,r11
+	eor	r0,r9,r9,ror#5
+	add	r5,r5,r3
+	and	r2,r2,r9
+	eor	r3,r0,r9,ror#19
+	eor	r0,r5,r5,ror#11
+	eor	r2,r2,r11
+	add	r4,r4,r3,ror#6
+	eor	r3,r5,r6
+	eor	r0,r0,r5,ror#20
+	add	r4,r4,r2
+	ldr	r2,[sp,#32]
+	and	r12,r12,r3
+	add	r8,r8,r4
+	add	r4,r4,r0,ror#2
+	eor	r12,r12,r6
+	vst1.32	{q8},[r1,:128]!
+	add	r11,r11,r2
+	eor	r2,r9,r10
+	eor	r0,r8,r8,ror#5
+	add	r4,r4,r12
+	vld1.32	{q8},[r14,:128]!
+	and	r2,r2,r8
+	eor	r12,r0,r8,ror#19
+	eor	r0,r4,r4,ror#11
+	eor	r2,r2,r10
+	vrev32.8	q2,q2
+	add	r11,r11,r12,ror#6
+	eor	r12,r4,r5
+	eor	r0,r0,r4,ror#20
+	add	r11,r11,r2
+	vadd.i32	q8,q8,q2
+	ldr	r2,[sp,#36]
+	and	r3,r3,r12
+	add	r7,r7,r11
+	add	r11,r11,r0,ror#2
+	eor	r3,r3,r5
+	add	r10,r10,r2
+	eor	r2,r8,r9
+	eor	r0,r7,r7,ror#5
+	add	r11,r11,r3
+	and	r2,r2,r7
+	eor	r3,r0,r7,ror#19
+	eor	r0,r11,r11,ror#11
+	eor	r2,r2,r9
+	add	r10,r10,r3,ror#6
+	eor	r3,r11,r4
+	eor	r0,r0,r11,ror#20
+	add	r10,r10,r2
+	ldr	r2,[sp,#40]
+	and	r12,r12,r3
+	add	r6,r6,r10
+	add	r10,r10,r0,ror#2
+	eor	r12,r12,r4
+	add	r9,r9,r2
+	eor	r2,r7,r8
+	eor	r0,r6,r6,ror#5
+	add	r10,r10,r12
+	and	r2,r2,r6
+	eor	r12,r0,r6,ror#19
+	eor	r0,r10,r10,ror#11
+	eor	r2,r2,r8
+	add	r9,r9,r12,ror#6
+	eor	r12,r10,r11
+	eor	r0,r0,r10,ror#20
+	add	r9,r9,r2
+	ldr	r2,[sp,#44]
+	and	r3,r3,r12
+	add	r5,r5,r9
+	add	r9,r9,r0,ror#2
+	eor	r3,r3,r11
+	add	r8,r8,r2
+	eor	r2,r6,r7
+	eor	r0,r5,r5,ror#5
+	add	r9,r9,r3
+	and	r2,r2,r5
+	eor	r3,r0,r5,ror#19
+	eor	r0,r9,r9,ror#11
+	eor	r2,r2,r7
+	add	r8,r8,r3,ror#6
+	eor	r3,r9,r10
+	eor	r0,r0,r9,ror#20
+	add	r8,r8,r2
+	ldr	r2,[sp,#48]
+	and	r12,r12,r3
+	add	r4,r4,r8
+	add	r8,r8,r0,ror#2
+	eor	r12,r12,r10
+	vst1.32	{q8},[r1,:128]!
+	add	r7,r7,r2
+	eor	r2,r5,r6
+	eor	r0,r4,r4,ror#5
+	add	r8,r8,r12
+	vld1.32	{q8},[r14,:128]!
+	and	r2,r2,r4
+	eor	r12,r0,r4,ror#19
+	eor	r0,r8,r8,ror#11
+	eor	r2,r2,r6
+	vrev32.8	q3,q3
+	add	r7,r7,r12,ror#6
+	eor	r12,r8,r9
+	eor	r0,r0,r8,ror#20
+	add	r7,r7,r2
+	vadd.i32	q8,q8,q3
+	ldr	r2,[sp,#52]
+	and	r3,r3,r12
+	add	r11,r11,r7
+	add	r7,r7,r0,ror#2
+	eor	r3,r3,r9
+	add	r6,r6,r2
+	eor	r2,r4,r5
+	eor	r0,r11,r11,ror#5
+	add	r7,r7,r3
+	and	r2,r2,r11
+	eor	r3,r0,r11,ror#19
+	eor	r0,r7,r7,ror#11
+	eor	r2,r2,r5
+	add	r6,r6,r3,ror#6
+	eor	r3,r7,r8
+	eor	r0,r0,r7,ror#20
+	add	r6,r6,r2
+	ldr	r2,[sp,#56]
+	and	r12,r12,r3
+	add	r10,r10,r6
+	add	r6,r6,r0,ror#2
+	eor	r12,r12,r8
+	add	r5,r5,r2
+	eor	r2,r11,r4
+	eor	r0,r10,r10,ror#5
+	add	r6,r6,r12
+	and	r2,r2,r10
+	eor	r12,r0,r10,ror#19
+	eor	r0,r6,r6,ror#11
+	eor	r2,r2,r4
+	add	r5,r5,r12,ror#6
+	eor	r12,r6,r7
+	eor	r0,r0,r6,ror#20
+	add	r5,r5,r2
+	ldr	r2,[sp,#60]
+	and	r3,r3,r12
+	add	r9,r9,r5
+	add	r5,r5,r0,ror#2
+	eor	r3,r3,r7
+	add	r4,r4,r2
+	eor	r2,r10,r11
+	eor	r0,r9,r9,ror#5
+	add	r5,r5,r3
+	and	r2,r2,r9
+	eor	r3,r0,r9,ror#19
+	eor	r0,r5,r5,ror#11
+	eor	r2,r2,r11
+	add	r4,r4,r3,ror#6
+	eor	r3,r5,r6
+	eor	r0,r0,r5,ror#20
+	add	r4,r4,r2
+	ldr	r2,[sp,#64]
+	and	r12,r12,r3
+	add	r8,r8,r4
+	add	r4,r4,r0,ror#2
+	eor	r12,r12,r6
+	vst1.32	{q8},[r1,:128]!
+	ldr	r0,[r2,#0]
+	add	r4,r4,r12			@ h+=Maj(a,b,c) from the past
+	ldr	r12,[r2,#4]
+	ldr	r3,[r2,#8]
+	ldr	r1,[r2,#12]
+	add	r4,r4,r0			@ accumulate
+	ldr	r0,[r2,#16]
+	add	r5,r5,r12
+	ldr	r12,[r2,#20]
+	add	r6,r6,r3
+	ldr	r3,[r2,#24]
+	add	r7,r7,r1
+	ldr	r1,[r2,#28]
+	add	r8,r8,r0
+	str	r4,[r2],#4
+	add	r9,r9,r12
+	str	r5,[r2],#4
+	add	r10,r10,r3
+	str	r6,[r2],#4
+	add	r11,r11,r1
+	str	r7,[r2],#4
+	stmia	r2,{r8-r11}
+
+	ittte	ne
+	movne	r1,sp
+	ldrne	r2,[sp,#0]
+	eorne	r12,r12,r12
+	ldreq	sp,[sp,#76]			@ restore original sp
+	itt	ne
+	eorne	r3,r5,r6
+	bne	.L_00_48
+
+	ldmia	sp!,{r4-r12,pc}
+.size	sha256_block_data_order_neon,.-sha256_block_data_order_neon
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+
+# ifdef __thumb2__
+#  define INST(a,b,c,d)	.byte	c,d|0xc,a,b
+# else
+#  define INST(a,b,c,d)	.byte	a,b,c,d
+# endif
+
+.type	sha256_block_data_order_armv8,%function
+.align	5
+sha256_block_data_order_armv8:
+.LARMv8:
+	vld1.32	{q0,q1},[r0]
+# ifdef __thumb2__
+	adr	r3,.LARMv8
+	sub	r3,r3,#.LARMv8-K256
+# else
+	adrl	r3,K256
+# endif
+	add	r2,r1,r2,lsl#6	@ len to point at the end of inp
+
+.Loop_v8:
+	vld1.8		{q8-q9},[r1]!
+	vld1.8		{q10-q11},[r1]!
+	vld1.32		{q12},[r3]!
+	vrev32.8	q8,q8
+	vrev32.8	q9,q9
+	vrev32.8	q10,q10
+	vrev32.8	q11,q11
+	vmov		q14,q0	@ offload
+	vmov		q15,q1
+	teq		r1,r2
+	vld1.32		{q13},[r3]!
+	vadd.i32	q12,q12,q8
+	INST(0xe2,0x03,0xfa,0xf3)	@ sha256su0 q8,q9
+	vmov		q2,q0
+	INST(0x68,0x0c,0x02,0xf3)	@ sha256h q0,q1,q12
+	INST(0x68,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q12
+	INST(0xe6,0x0c,0x64,0xf3)	@ sha256su1 q8,q10,q11
+	vld1.32		{q12},[r3]!
+	vadd.i32	q13,q13,q9
+	INST(0xe4,0x23,0xfa,0xf3)	@ sha256su0 q9,q10
+	vmov		q2,q0
+	INST(0x6a,0x0c,0x02,0xf3)	@ sha256h q0,q1,q13
+	INST(0x6a,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q13
+	INST(0xe0,0x2c,0x66,0xf3)	@ sha256su1 q9,q11,q8
+	vld1.32		{q13},[r3]!
+	vadd.i32	q12,q12,q10
+	INST(0xe6,0x43,0xfa,0xf3)	@ sha256su0 q10,q11
+	vmov		q2,q0
+	INST(0x68,0x0c,0x02,0xf3)	@ sha256h q0,q1,q12
+	INST(0x68,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q12
+	INST(0xe2,0x4c,0x60,0xf3)	@ sha256su1 q10,q8,q9
+	vld1.32		{q12},[r3]!
+	vadd.i32	q13,q13,q11
+	INST(0xe0,0x63,0xfa,0xf3)	@ sha256su0 q11,q8
+	vmov		q2,q0
+	INST(0x6a,0x0c,0x02,0xf3)	@ sha256h q0,q1,q13
+	INST(0x6a,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q13
+	INST(0xe4,0x6c,0x62,0xf3)	@ sha256su1 q11,q9,q10
+	vld1.32		{q13},[r3]!
+	vadd.i32	q12,q12,q8
+	INST(0xe2,0x03,0xfa,0xf3)	@ sha256su0 q8,q9
+	vmov		q2,q0
+	INST(0x68,0x0c,0x02,0xf3)	@ sha256h q0,q1,q12
+	INST(0x68,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q12
+	INST(0xe6,0x0c,0x64,0xf3)	@ sha256su1 q8,q10,q11
+	vld1.32		{q12},[r3]!
+	vadd.i32	q13,q13,q9
+	INST(0xe4,0x23,0xfa,0xf3)	@ sha256su0 q9,q10
+	vmov		q2,q0
+	INST(0x6a,0x0c,0x02,0xf3)	@ sha256h q0,q1,q13
+	INST(0x6a,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q13
+	INST(0xe0,0x2c,0x66,0xf3)	@ sha256su1 q9,q11,q8
+	vld1.32		{q13},[r3]!
+	vadd.i32	q12,q12,q10
+	INST(0xe6,0x43,0xfa,0xf3)	@ sha256su0 q10,q11
+	vmov		q2,q0
+	INST(0x68,0x0c,0x02,0xf3)	@ sha256h q0,q1,q12
+	INST(0x68,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q12
+	INST(0xe2,0x4c,0x60,0xf3)	@ sha256su1 q10,q8,q9
+	vld1.32		{q12},[r3]!
+	vadd.i32	q13,q13,q11
+	INST(0xe0,0x63,0xfa,0xf3)	@ sha256su0 q11,q8
+	vmov		q2,q0
+	INST(0x6a,0x0c,0x02,0xf3)	@ sha256h q0,q1,q13
+	INST(0x6a,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q13
+	INST(0xe4,0x6c,0x62,0xf3)	@ sha256su1 q11,q9,q10
+	vld1.32		{q13},[r3]!
+	vadd.i32	q12,q12,q8
+	INST(0xe2,0x03,0xfa,0xf3)	@ sha256su0 q8,q9
+	vmov		q2,q0
+	INST(0x68,0x0c,0x02,0xf3)	@ sha256h q0,q1,q12
+	INST(0x68,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q12
+	INST(0xe6,0x0c,0x64,0xf3)	@ sha256su1 q8,q10,q11
+	vld1.32		{q12},[r3]!
+	vadd.i32	q13,q13,q9
+	INST(0xe4,0x23,0xfa,0xf3)	@ sha256su0 q9,q10
+	vmov		q2,q0
+	INST(0x6a,0x0c,0x02,0xf3)	@ sha256h q0,q1,q13
+	INST(0x6a,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q13
+	INST(0xe0,0x2c,0x66,0xf3)	@ sha256su1 q9,q11,q8
+	vld1.32		{q13},[r3]!
+	vadd.i32	q12,q12,q10
+	INST(0xe6,0x43,0xfa,0xf3)	@ sha256su0 q10,q11
+	vmov		q2,q0
+	INST(0x68,0x0c,0x02,0xf3)	@ sha256h q0,q1,q12
+	INST(0x68,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q12
+	INST(0xe2,0x4c,0x60,0xf3)	@ sha256su1 q10,q8,q9
+	vld1.32		{q12},[r3]!
+	vadd.i32	q13,q13,q11
+	INST(0xe0,0x63,0xfa,0xf3)	@ sha256su0 q11,q8
+	vmov		q2,q0
+	INST(0x6a,0x0c,0x02,0xf3)	@ sha256h q0,q1,q13
+	INST(0x6a,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q13
+	INST(0xe4,0x6c,0x62,0xf3)	@ sha256su1 q11,q9,q10
+	vld1.32		{q13},[r3]!
+	vadd.i32	q12,q12,q8
+	vmov		q2,q0
+	INST(0x68,0x0c,0x02,0xf3)	@ sha256h q0,q1,q12
+	INST(0x68,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q12
+
+	vld1.32		{q12},[r3]!
+	vadd.i32	q13,q13,q9
+	vmov		q2,q0
+	INST(0x6a,0x0c,0x02,0xf3)	@ sha256h q0,q1,q13
+	INST(0x6a,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q13
+
+	vld1.32		{q13},[r3]
+	vadd.i32	q12,q12,q10
+	sub		r3,r3,#256-16	@ rewind
+	vmov		q2,q0
+	INST(0x68,0x0c,0x02,0xf3)	@ sha256h q0,q1,q12
+	INST(0x68,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q12
+
+	vadd.i32	q13,q13,q11
+	vmov		q2,q0
+	INST(0x6a,0x0c,0x02,0xf3)	@ sha256h q0,q1,q13
+	INST(0x6a,0x2c,0x14,0xf3)	@ sha256h2 q1,q2,q13
+
+	vadd.i32	q0,q0,q14
+	vadd.i32	q1,q1,q15
+	it		ne
+	bne		.Loop_v8
+
+	vst1.32		{q0,q1},[r0]
+
+	bx	lr		@ bx lr
+.size	sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
+#endif
+.asciz  "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align	2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.comm   OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
new file mode 100644
index 0000000..0d6fc43
--- /dev/null
+++ b/arch/arm/crypto/sha256_glue.c
@@ -0,0 +1,246 @@
+/*
+ * Glue code for the SHA256 Secure Hash Algorithm assembly implementation
+ * using optimized ARM assembler and NEON instructions.
+ *
+ * Copyright © 2015 Google Inc.
+ *
+ * This file is based on sha256_ssse3_glue.c:
+ *   Copyright (C) 2013 Intel Corporation
+ *   Author: Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <crypto/sha.h>
+#include <asm/byteorder.h>
+#include <asm/simd.h>
+#include <asm/neon.h>
+#include "sha256_glue.h"
+
+asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
+				      unsigned int num_blks);
+
+
+int sha256_init(struct shash_desc *desc)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA256_H0;
+	sctx->state[1] = SHA256_H1;
+	sctx->state[2] = SHA256_H2;
+	sctx->state[3] = SHA256_H3;
+	sctx->state[4] = SHA256_H4;
+	sctx->state[5] = SHA256_H5;
+	sctx->state[6] = SHA256_H6;
+	sctx->state[7] = SHA256_H7;
+	sctx->count = 0;
+
+	return 0;
+}
+
+int sha224_init(struct shash_desc *desc)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA224_H0;
+	sctx->state[1] = SHA224_H1;
+	sctx->state[2] = SHA224_H2;
+	sctx->state[3] = SHA224_H3;
+	sctx->state[4] = SHA224_H4;
+	sctx->state[5] = SHA224_H5;
+	sctx->state[6] = SHA224_H6;
+	sctx->state[7] = SHA224_H7;
+	sctx->count = 0;
+
+	return 0;
+}
+
+int __sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len,
+		    unsigned int partial)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	unsigned int done = 0;
+
+	sctx->count += len;
+
+	if (partial) {
+		done = SHA256_BLOCK_SIZE - partial;
+		memcpy(sctx->buf + partial, data, done);
+		sha256_block_data_order(sctx->state, sctx->buf, 1);
+	}
+
+	if (len - done >= SHA256_BLOCK_SIZE) {
+		const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
+
+		sha256_block_data_order(sctx->state, data + done, rounds);
+		done += rounds * SHA256_BLOCK_SIZE;
+	}
+
+	memcpy(sctx->buf, data + done, len - done);
+
+	return 0;
+}
+
+int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+	/* Handle the fast case right here */
+	if (partial + len < SHA256_BLOCK_SIZE) {
+		sctx->count += len;
+		memcpy(sctx->buf + partial, data, len);
+
+		return 0;
+	}
+
+	return __sha256_update(desc, data, len, partial);
+}
+
+/* Add padding and return the message digest. */
+static int sha256_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	unsigned int i, index, padlen;
+	__be32 *dst = (__be32 *)out;
+	__be64 bits;
+	static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
+
+	/* save number of bits */
+	bits = cpu_to_be64(sctx->count << 3);
+
+	/* Pad out to 56 mod 64 and append length */
+	index = sctx->count % SHA256_BLOCK_SIZE;
+	padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index);
+
+	/* We need to fill a whole block for __sha256_update */
+	if (padlen <= 56) {
+		sctx->count += padlen;
+		memcpy(sctx->buf + index, padding, padlen);
+	} else {
+		__sha256_update(desc, padding, padlen, index);
+	}
+	__sha256_update(desc, (const u8 *)&bits, sizeof(bits), 56);
+
+	/* Store state in digest */
+	for (i = 0; i < 8; i++)
+		dst[i] = cpu_to_be32(sctx->state[i]);
+
+	/* Wipe context */
+	memset(sctx, 0, sizeof(*sctx));
+
+	return 0;
+}
+
+static int sha224_final(struct shash_desc *desc, u8 *out)
+{
+	u8 D[SHA256_DIGEST_SIZE];
+
+	sha256_final(desc, D);
+
+	memcpy(out, D, SHA224_DIGEST_SIZE);
+	memset(D, 0, SHA256_DIGEST_SIZE);
+
+	return 0;
+}
+
+int sha256_export(struct shash_desc *desc, void *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(out, sctx, sizeof(*sctx));
+
+	return 0;
+}
+
+int sha256_import(struct shash_desc *desc, const void *in)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(sctx, in, sizeof(*sctx));
+
+	return 0;
+}
+
+static struct shash_alg algs[] = { {
+	.digestsize	=	SHA256_DIGEST_SIZE,
+	.init		=	sha256_init,
+	.update		=	sha256_update,
+	.final		=	sha256_final,
+	.export		=	sha256_export,
+	.import		=	sha256_import,
+	.descsize	=	sizeof(struct sha256_state),
+	.statesize	=	sizeof(struct sha256_state),
+	.base		=	{
+		.cra_name	=	"sha256",
+		.cra_driver_name =	"sha256-asm",
+		.cra_priority	=	150,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA256_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+}, {
+	.digestsize	=	SHA224_DIGEST_SIZE,
+	.init		=	sha224_init,
+	.update		=	sha256_update,
+	.final		=	sha224_final,
+	.export		=	sha256_export,
+	.import		=	sha256_import,
+	.descsize	=	sizeof(struct sha256_state),
+	.statesize	=	sizeof(struct sha256_state),
+	.base		=	{
+		.cra_name	=	"sha224",
+		.cra_driver_name =	"sha224-asm",
+		.cra_priority	=	150,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA224_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+} };
+
+static int __init sha256_mod_init(void)
+{
+	int res = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+
+	if (res < 0)
+		return res;
+
+	if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) {
+		res = crypto_register_shashes(sha256_neon_algs,
+					      ARRAY_SIZE(sha256_neon_algs));
+
+		if (res < 0)
+			crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+	}
+
+	return res;
+}
+
+static void __exit sha256_mod_fini(void)
+{
+	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+
+	if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon())
+		crypto_unregister_shashes(sha256_neon_algs,
+					  ARRAY_SIZE(sha256_neon_algs));
+}
+
+module_init(sha256_mod_init);
+module_exit(sha256_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm (ARM), including NEON");
+
+MODULE_ALIAS("sha256");
diff --git a/arch/arm/crypto/sha256_glue.h b/arch/arm/crypto/sha256_glue.h
new file mode 100644
index 0000000..0312f4f
--- /dev/null
+++ b/arch/arm/crypto/sha256_glue.h
@@ -0,0 +1,23 @@
+#ifndef _CRYPTO_SHA256_GLUE_H
+#define _CRYPTO_SHA256_GLUE_H
+
+#include <linux/crypto.h>
+#include <crypto/sha.h>
+
+extern struct shash_alg sha256_neon_algs[2];
+
+extern int sha256_init(struct shash_desc *desc);
+
+extern int sha224_init(struct shash_desc *desc);
+
+extern int __sha256_update(struct shash_desc *desc, const u8 *data,
+			   unsigned int len, unsigned int partial);
+
+extern int sha256_update(struct shash_desc *desc, const u8 *data,
+			 unsigned int len);
+
+extern int sha256_export(struct shash_desc *desc, void *out);
+
+extern int sha256_import(struct shash_desc *desc, const void *in);
+
+#endif /* _CRYPTO_SHA256_GLUE_H */
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
new file mode 100644
index 0000000..3ff0a7f
--- /dev/null
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -0,0 +1,172 @@
+/*
+ * Glue code for the SHA256 Secure Hash Algorithm assembly implementation
+ * using NEON instructions.
+ *
+ * Copyright © 2015 Google Inc.
+ *
+ * This file is based on sha512_neon_glue.c:
+ *   Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <crypto/sha.h>
+#include <asm/byteorder.h>
+#include <asm/simd.h>
+#include <asm/neon.h>
+#include "sha256_glue.h"
+
+asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
+				      unsigned int num_blks);
+
+
+static int __sha256_neon_update(struct shash_desc *desc, const u8 *data,
+				unsigned int len, unsigned int partial)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	unsigned int done = 0;
+
+	sctx->count += len;
+
+	if (partial) {
+		done = SHA256_BLOCK_SIZE - partial;
+		memcpy(sctx->buf + partial, data, done);
+		sha256_block_data_order_neon(sctx->state, sctx->buf, 1);
+	}
+
+	if (len - done >= SHA256_BLOCK_SIZE) {
+		const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
+
+		sha256_block_data_order_neon(sctx->state, data + done, rounds);
+		done += rounds * SHA256_BLOCK_SIZE;
+	}
+
+	memcpy(sctx->buf, data + done, len - done);
+
+	return 0;
+}
+
+static int sha256_neon_update(struct shash_desc *desc, const u8 *data,
+			      unsigned int len)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+	int res;
+
+	/* Handle the fast case right here */
+	if (partial + len < SHA256_BLOCK_SIZE) {
+		sctx->count += len;
+		memcpy(sctx->buf + partial, data, len);
+
+		return 0;
+	}
+
+	if (!may_use_simd()) {
+		res = __sha256_update(desc, data, len, partial);
+	} else {
+		kernel_neon_begin();
+		res = __sha256_neon_update(desc, data, len, partial);
+		kernel_neon_end();
+	}
+
+	return res;
+}
+
+/* Add padding and return the message digest. */
+static int sha256_neon_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	unsigned int i, index, padlen;
+	__be32 *dst = (__be32 *)out;
+	__be64 bits;
+	static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
+
+	/* save number of bits */
+	bits = cpu_to_be64(sctx->count << 3);
+
+	/* Pad out to 56 mod 64 and append length */
+	index = sctx->count % SHA256_BLOCK_SIZE;
+	padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index);
+
+	if (!may_use_simd()) {
+		sha256_update(desc, padding, padlen);
+		sha256_update(desc, (const u8 *)&bits, sizeof(bits));
+	} else {
+		kernel_neon_begin();
+		/* We need to fill a whole block for __sha256_neon_update() */
+		if (padlen <= 56) {
+			sctx->count += padlen;
+			memcpy(sctx->buf + index, padding, padlen);
+		} else {
+			__sha256_neon_update(desc, padding, padlen, index);
+		}
+		__sha256_neon_update(desc, (const u8 *)&bits,
+					sizeof(bits), 56);
+		kernel_neon_end();
+	}
+
+	/* Store state in digest */
+	for (i = 0; i < 8; i++)
+		dst[i] = cpu_to_be32(sctx->state[i]);
+
+	/* Wipe context */
+	memset(sctx, 0, sizeof(*sctx));
+
+	return 0;
+}
+
+static int sha224_neon_final(struct shash_desc *desc, u8 *out)
+{
+	u8 D[SHA256_DIGEST_SIZE];
+
+	sha256_neon_final(desc, D);
+
+	memcpy(out, D, SHA224_DIGEST_SIZE);
+	memset(D, 0, SHA256_DIGEST_SIZE);
+
+	return 0;
+}
+
+struct shash_alg sha256_neon_algs[] = { {
+	.digestsize	=	SHA256_DIGEST_SIZE,
+	.init		=	sha256_init,
+	.update		=	sha256_neon_update,
+	.final		=	sha256_neon_final,
+	.export		=	sha256_export,
+	.import		=	sha256_import,
+	.descsize	=	sizeof(struct sha256_state),
+	.statesize	=	sizeof(struct sha256_state),
+	.base		=	{
+		.cra_name	=	"sha256",
+		.cra_driver_name =	"sha256-neon",
+		.cra_priority	=	250,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA256_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+}, {
+	.digestsize	=	SHA224_DIGEST_SIZE,
+	.init		=	sha224_init,
+	.update		=	sha256_neon_update,
+	.final		=	sha224_neon_final,
+	.export		=	sha256_export,
+	.import		=	sha256_import,
+	.descsize	=	sizeof(struct sha256_state),
+	.statesize	=	sizeof(struct sha256_state),
+	.base		=	{
+		.cra_name	=	"sha224",
+		.cra_driver_name =	"sha224-neon",
+		.cra_priority	=	250,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA224_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+} };
diff --git a/arch/arm/crypto/sha512-armv7-neon.S b/arch/arm/crypto/sha512-armv7-neon.S
new file mode 100644
index 0000000..fe99472
--- /dev/null
+++ b/arch/arm/crypto/sha512-armv7-neon.S
@@ -0,0 +1,455 @@
+/* sha512-armv7-neon.S  -  ARM/NEON assembly implementation of SHA-512 transform
+ *
+ * Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/linkage.h>
+
+
+.syntax unified
+.code   32
+.fpu neon
+
+.text
+
+/* structure of SHA512_CONTEXT */
+#define hd_a 0
+#define hd_b ((hd_a) + 8)
+#define hd_c ((hd_b) + 8)
+#define hd_d ((hd_c) + 8)
+#define hd_e ((hd_d) + 8)
+#define hd_f ((hd_e) + 8)
+#define hd_g ((hd_f) + 8)
+
+/* register macros */
+#define RK %r2
+
+#define RA d0
+#define RB d1
+#define RC d2
+#define RD d3
+#define RE d4
+#define RF d5
+#define RG d6
+#define RH d7
+
+#define RT0 d8
+#define RT1 d9
+#define RT2 d10
+#define RT3 d11
+#define RT4 d12
+#define RT5 d13
+#define RT6 d14
+#define RT7 d15
+
+#define RT01q q4
+#define RT23q q5
+#define RT45q q6
+#define RT67q q7
+
+#define RW0 d16
+#define RW1 d17
+#define RW2 d18
+#define RW3 d19
+#define RW4 d20
+#define RW5 d21
+#define RW6 d22
+#define RW7 d23
+#define RW8 d24
+#define RW9 d25
+#define RW10 d26
+#define RW11 d27
+#define RW12 d28
+#define RW13 d29
+#define RW14 d30
+#define RW15 d31
+
+#define RW01q q8
+#define RW23q q9
+#define RW45q q10
+#define RW67q q11
+#define RW89q q12
+#define RW1011q q13
+#define RW1213q q14
+#define RW1415q q15
+
+/***********************************************************************
+ * ARM assembly implementation of sha512 transform
+ ***********************************************************************/
+#define rounds2_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, rw01q, rw2, \
+                     rw23q, rw1415q, rw9, rw10, interleave_op, arg1) \
+	/* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
+	vshr.u64 RT2, re, #14; \
+	vshl.u64 RT3, re, #64 - 14; \
+	interleave_op(arg1); \
+	vshr.u64 RT4, re, #18; \
+	vshl.u64 RT5, re, #64 - 18; \
+	vld1.64 {RT0}, [RK]!; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vshr.u64 RT4, re, #41; \
+	vshl.u64 RT5, re, #64 - 41; \
+	vadd.u64 RT0, RT0, rw0; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vmov.64 RT7, re; \
+	veor.64 RT1, RT2, RT3; \
+	vbsl.64 RT7, rf, rg; \
+	\
+	vadd.u64 RT1, RT1, rh; \
+	vshr.u64 RT2, ra, #28; \
+	vshl.u64 RT3, ra, #64 - 28; \
+	vadd.u64 RT1, RT1, RT0; \
+	vshr.u64 RT4, ra, #34; \
+	vshl.u64 RT5, ra, #64 - 34; \
+	vadd.u64 RT1, RT1, RT7; \
+	\
+	/* h = Sum0 (a) + Maj (a, b, c); */ \
+	veor.64 RT23q, RT23q, RT45q; \
+	vshr.u64 RT4, ra, #39; \
+	vshl.u64 RT5, ra, #64 - 39; \
+	veor.64 RT0, ra, rb; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vbsl.64 RT0, rc, rb; \
+	vadd.u64 rd, rd, RT1; /* d+=t1; */ \
+	veor.64 rh, RT2, RT3; \
+	\
+	/* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
+	vshr.u64 RT2, rd, #14; \
+	vshl.u64 RT3, rd, #64 - 14; \
+	vadd.u64 rh, rh, RT0; \
+	vshr.u64 RT4, rd, #18; \
+	vshl.u64 RT5, rd, #64 - 18; \
+	vadd.u64 rh, rh, RT1; /* h+=t1; */ \
+	vld1.64 {RT0}, [RK]!; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vshr.u64 RT4, rd, #41; \
+	vshl.u64 RT5, rd, #64 - 41; \
+	vadd.u64 RT0, RT0, rw1; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vmov.64 RT7, rd; \
+	veor.64 RT1, RT2, RT3; \
+	vbsl.64 RT7, re, rf; \
+	\
+	vadd.u64 RT1, RT1, rg; \
+	vshr.u64 RT2, rh, #28; \
+	vshl.u64 RT3, rh, #64 - 28; \
+	vadd.u64 RT1, RT1, RT0; \
+	vshr.u64 RT4, rh, #34; \
+	vshl.u64 RT5, rh, #64 - 34; \
+	vadd.u64 RT1, RT1, RT7; \
+	\
+	/* g = Sum0 (h) + Maj (h, a, b); */ \
+	veor.64 RT23q, RT23q, RT45q; \
+	vshr.u64 RT4, rh, #39; \
+	vshl.u64 RT5, rh, #64 - 39; \
+	veor.64 RT0, rh, ra; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vbsl.64 RT0, rb, ra; \
+	vadd.u64 rc, rc, RT1; /* c+=t1; */ \
+	veor.64 rg, RT2, RT3; \
+	\
+	/* w[0] += S1 (w[14]) + w[9] + S0 (w[1]); */ \
+	/* w[1] += S1 (w[15]) + w[10] + S0 (w[2]); */ \
+	\
+	/**** S0(w[1:2]) */ \
+	\
+	/* w[0:1] += w[9:10] */ \
+	/* RT23q = rw1:rw2 */ \
+	vext.u64 RT23q, rw01q, rw23q, #1; \
+	vadd.u64 rw0, rw9; \
+	vadd.u64 rg, rg, RT0; \
+	vadd.u64 rw1, rw10;\
+	vadd.u64 rg, rg, RT1; /* g+=t1; */ \
+	\
+	vshr.u64 RT45q, RT23q, #1; \
+	vshl.u64 RT67q, RT23q, #64 - 1; \
+	vshr.u64 RT01q, RT23q, #8; \
+	veor.u64 RT45q, RT45q, RT67q; \
+	vshl.u64 RT67q, RT23q, #64 - 8; \
+	veor.u64 RT45q, RT45q, RT01q; \
+	vshr.u64 RT01q, RT23q, #7; \
+	veor.u64 RT45q, RT45q, RT67q; \
+	\
+	/**** S1(w[14:15]) */ \
+	vshr.u64 RT23q, rw1415q, #6; \
+	veor.u64 RT01q, RT01q, RT45q; \
+	vshr.u64 RT45q, rw1415q, #19; \
+	vshl.u64 RT67q, rw1415q, #64 - 19; \
+	veor.u64 RT23q, RT23q, RT45q; \
+	vshr.u64 RT45q, rw1415q, #61; \
+	veor.u64 RT23q, RT23q, RT67q; \
+	vshl.u64 RT67q, rw1415q, #64 - 61; \
+	veor.u64 RT23q, RT23q, RT45q; \
+	vadd.u64 rw01q, RT01q; /* w[0:1] += S(w[1:2]) */ \
+	veor.u64 RT01q, RT23q, RT67q;
+#define vadd_RT01q(rw01q) \
+	/* w[0:1] += S(w[14:15]) */ \
+	vadd.u64 rw01q, RT01q;
+
+#define dummy(_) /*_*/
+
+#define rounds2_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, \
+	              interleave_op1, arg1, interleave_op2, arg2) \
+	/* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
+	vshr.u64 RT2, re, #14; \
+	vshl.u64 RT3, re, #64 - 14; \
+	interleave_op1(arg1); \
+	vshr.u64 RT4, re, #18; \
+	vshl.u64 RT5, re, #64 - 18; \
+	interleave_op2(arg2); \
+	vld1.64 {RT0}, [RK]!; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vshr.u64 RT4, re, #41; \
+	vshl.u64 RT5, re, #64 - 41; \
+	vadd.u64 RT0, RT0, rw0; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vmov.64 RT7, re; \
+	veor.64 RT1, RT2, RT3; \
+	vbsl.64 RT7, rf, rg; \
+	\
+	vadd.u64 RT1, RT1, rh; \
+	vshr.u64 RT2, ra, #28; \
+	vshl.u64 RT3, ra, #64 - 28; \
+	vadd.u64 RT1, RT1, RT0; \
+	vshr.u64 RT4, ra, #34; \
+	vshl.u64 RT5, ra, #64 - 34; \
+	vadd.u64 RT1, RT1, RT7; \
+	\
+	/* h = Sum0 (a) + Maj (a, b, c); */ \
+	veor.64 RT23q, RT23q, RT45q; \
+	vshr.u64 RT4, ra, #39; \
+	vshl.u64 RT5, ra, #64 - 39; \
+	veor.64 RT0, ra, rb; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vbsl.64 RT0, rc, rb; \
+	vadd.u64 rd, rd, RT1; /* d+=t1; */ \
+	veor.64 rh, RT2, RT3; \
+	\
+	/* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
+	vshr.u64 RT2, rd, #14; \
+	vshl.u64 RT3, rd, #64 - 14; \
+	vadd.u64 rh, rh, RT0; \
+	vshr.u64 RT4, rd, #18; \
+	vshl.u64 RT5, rd, #64 - 18; \
+	vadd.u64 rh, rh, RT1; /* h+=t1; */ \
+	vld1.64 {RT0}, [RK]!; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vshr.u64 RT4, rd, #41; \
+	vshl.u64 RT5, rd, #64 - 41; \
+	vadd.u64 RT0, RT0, rw1; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vmov.64 RT7, rd; \
+	veor.64 RT1, RT2, RT3; \
+	vbsl.64 RT7, re, rf; \
+	\
+	vadd.u64 RT1, RT1, rg; \
+	vshr.u64 RT2, rh, #28; \
+	vshl.u64 RT3, rh, #64 - 28; \
+	vadd.u64 RT1, RT1, RT0; \
+	vshr.u64 RT4, rh, #34; \
+	vshl.u64 RT5, rh, #64 - 34; \
+	vadd.u64 RT1, RT1, RT7; \
+	\
+	/* g = Sum0 (h) + Maj (h, a, b); */ \
+	veor.64 RT23q, RT23q, RT45q; \
+	vshr.u64 RT4, rh, #39; \
+	vshl.u64 RT5, rh, #64 - 39; \
+	veor.64 RT0, rh, ra; \
+	veor.64 RT23q, RT23q, RT45q; \
+	vbsl.64 RT0, rb, ra; \
+	vadd.u64 rc, rc, RT1; /* c+=t1; */ \
+	veor.64 rg, RT2, RT3;
+#define vadd_rg_RT0(rg) \
+	vadd.u64 rg, rg, RT0;
+#define vadd_rg_RT1(rg) \
+	vadd.u64 rg, rg, RT1; /* g+=t1; */
+
+.align 3
+ENTRY(sha512_transform_neon)
+	/* Input:
+	 *	%r0: SHA512_CONTEXT
+	 *	%r1: data
+	 *	%r2: u64 k[] constants
+	 *	%r3: nblks
+	 */
+	push {%lr};
+
+	mov %lr, #0;
+
+	/* Load context to d0-d7 */
+	vld1.64 {RA-RD}, [%r0]!;
+	vld1.64 {RE-RH}, [%r0];
+	sub %r0, #(4*8);
+
+	/* Load input to w[16], d16-d31 */
+	/* NOTE: Assumes that on ARMv7 unaligned accesses are always allowed. */
+	vld1.64 {RW0-RW3}, [%r1]!;
+	vld1.64 {RW4-RW7}, [%r1]!;
+	vld1.64 {RW8-RW11}, [%r1]!;
+	vld1.64 {RW12-RW15}, [%r1]!;
+#ifdef __ARMEL__
+	/* byteswap */
+	vrev64.8 RW01q, RW01q;
+	vrev64.8 RW23q, RW23q;
+	vrev64.8 RW45q, RW45q;
+	vrev64.8 RW67q, RW67q;
+	vrev64.8 RW89q, RW89q;
+	vrev64.8 RW1011q, RW1011q;
+	vrev64.8 RW1213q, RW1213q;
+	vrev64.8 RW1415q, RW1415q;
+#endif
+
+	/* EABI says that d8-d15 must be preserved by callee. */
+	/*vpush {RT0-RT7};*/
+
+.Loop:
+	rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
+		     RW23q, RW1415q, RW9, RW10, dummy, _);
+	b .Lenter_rounds;
+
+.Loop_rounds:
+	rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
+		     RW23q, RW1415q, RW9, RW10, vadd_RT01q, RW1415q);
+.Lenter_rounds:
+	rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, RW23q, RW4,
+		     RW45q, RW01q, RW11, RW12, vadd_RT01q, RW01q);
+	rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, RW45q, RW6,
+		     RW67q, RW23q, RW13, RW14, vadd_RT01q, RW23q);
+	rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, RW67q, RW8,
+		     RW89q, RW45q, RW15, RW0, vadd_RT01q, RW45q);
+	rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, RW89q, RW10,
+		     RW1011q, RW67q, RW1, RW2, vadd_RT01q, RW67q);
+	rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, RW1011q, RW12,
+		     RW1213q, RW89q, RW3, RW4, vadd_RT01q, RW89q);
+	add %lr, #16;
+	rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, RW1213q, RW14,
+		     RW1415q, RW1011q, RW5, RW6, vadd_RT01q, RW1011q);
+	cmp %lr, #64;
+	rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, RW1415q, RW0,
+		     RW01q, RW1213q, RW7, RW8, vadd_RT01q, RW1213q);
+	bne .Loop_rounds;
+
+	subs %r3, #1;
+
+	rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1,
+		      vadd_RT01q, RW1415q, dummy, _);
+	rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3,
+		      vadd_rg_RT0, RG, vadd_rg_RT1, RG);
+	beq .Lhandle_tail;
+	vld1.64 {RW0-RW3}, [%r1]!;
+	rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
+		      vadd_rg_RT0, RE, vadd_rg_RT1, RE);
+	rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
+		      vadd_rg_RT0, RC, vadd_rg_RT1, RC);
+#ifdef __ARMEL__
+	vrev64.8 RW01q, RW01q;
+	vrev64.8 RW23q, RW23q;
+#endif
+	vld1.64 {RW4-RW7}, [%r1]!;
+	rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
+		      vadd_rg_RT0, RA, vadd_rg_RT1, RA);
+	rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
+		      vadd_rg_RT0, RG, vadd_rg_RT1, RG);
+#ifdef __ARMEL__
+	vrev64.8 RW45q, RW45q;
+	vrev64.8 RW67q, RW67q;
+#endif
+	vld1.64 {RW8-RW11}, [%r1]!;
+	rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
+		      vadd_rg_RT0, RE, vadd_rg_RT1, RE);
+	rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
+		      vadd_rg_RT0, RC, vadd_rg_RT1, RC);
+#ifdef __ARMEL__
+	vrev64.8 RW89q, RW89q;
+	vrev64.8 RW1011q, RW1011q;
+#endif
+	vld1.64 {RW12-RW15}, [%r1]!;
+	vadd_rg_RT0(RA);
+	vadd_rg_RT1(RA);
+
+	/* Load context */
+	vld1.64 {RT0-RT3}, [%r0]!;
+	vld1.64 {RT4-RT7}, [%r0];
+	sub %r0, #(4*8);
+
+#ifdef __ARMEL__
+	vrev64.8 RW1213q, RW1213q;
+	vrev64.8 RW1415q, RW1415q;
+#endif
+
+	vadd.u64 RA, RT0;
+	vadd.u64 RB, RT1;
+	vadd.u64 RC, RT2;
+	vadd.u64 RD, RT3;
+	vadd.u64 RE, RT4;
+	vadd.u64 RF, RT5;
+	vadd.u64 RG, RT6;
+	vadd.u64 RH, RT7;
+
+	/* Store the first half of context */
+	vst1.64 {RA-RD}, [%r0]!;
+	sub RK, $(8*80);
+	vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
+	mov %lr, #0;
+	sub %r0, #(4*8);
+
+	b .Loop;
+
+.Lhandle_tail:
+	rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
+		      vadd_rg_RT0, RE, vadd_rg_RT1, RE);
+	rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
+		      vadd_rg_RT0, RC, vadd_rg_RT1, RC);
+	rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
+		      vadd_rg_RT0, RA, vadd_rg_RT1, RA);
+	rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
+		      vadd_rg_RT0, RG, vadd_rg_RT1, RG);
+	rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
+		      vadd_rg_RT0, RE, vadd_rg_RT1, RE);
+	rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
+		      vadd_rg_RT0, RC, vadd_rg_RT1, RC);
+
+	/* Load context to d16-d23 */
+	vld1.64 {RW0-RW3}, [%r0]!;
+	vadd_rg_RT0(RA);
+	vld1.64 {RW4-RW7}, [%r0];
+	vadd_rg_RT1(RA);
+	sub %r0, #(4*8);
+
+	vadd.u64 RA, RW0;
+	vadd.u64 RB, RW1;
+	vadd.u64 RC, RW2;
+	vadd.u64 RD, RW3;
+	vadd.u64 RE, RW4;
+	vadd.u64 RF, RW5;
+	vadd.u64 RG, RW6;
+	vadd.u64 RH, RW7;
+
+	/* Store the first half of context */
+	vst1.64 {RA-RD}, [%r0]!;
+
+	/* Clear used registers */
+	/* d16-d31 */
+	veor.u64 RW01q, RW01q;
+	veor.u64 RW23q, RW23q;
+	veor.u64 RW45q, RW45q;
+	veor.u64 RW67q, RW67q;
+	vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
+	veor.u64 RW89q, RW89q;
+	veor.u64 RW1011q, RW1011q;
+	veor.u64 RW1213q, RW1213q;
+	veor.u64 RW1415q, RW1415q;
+	/* d8-d15 */
+	/*vpop {RT0-RT7};*/
+	/* d0-d7 (q0-q3) */
+	veor.u64 %q0, %q0;
+	veor.u64 %q1, %q1;
+	veor.u64 %q2, %q2;
+	veor.u64 %q3, %q3;
+
+	pop {%pc};
+ENDPROC(sha512_transform_neon)
diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c
new file mode 100644
index 0000000..0d2758f
--- /dev/null
+++ b/arch/arm/crypto/sha512_neon_glue.c
@@ -0,0 +1,305 @@
+/*
+ * Glue code for the SHA512 Secure Hash Algorithm assembly implementation
+ * using NEON instructions.
+ *
+ * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This file is based on sha512_ssse3_glue.c:
+ *   Copyright (C) 2013 Intel Corporation
+ *   Author: Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <crypto/sha.h>
+#include <asm/byteorder.h>
+#include <asm/simd.h>
+#include <asm/neon.h>
+
+
+static const u64 sha512_k[] = {
+	0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
+	0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+	0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
+	0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+	0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
+	0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+	0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
+	0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+	0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
+	0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+	0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
+	0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+	0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
+	0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+	0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
+	0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+	0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
+	0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+	0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
+	0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+	0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
+	0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+	0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
+	0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+	0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
+	0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+	0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
+	0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+	0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
+	0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+	0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
+	0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+	0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
+	0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+	0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
+	0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+	0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
+	0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+	0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
+	0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
+};
+
+
+asmlinkage void sha512_transform_neon(u64 *digest, const void *data,
+				      const u64 k[], unsigned int num_blks);
+
+
+static int sha512_neon_init(struct shash_desc *desc)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA512_H0;
+	sctx->state[1] = SHA512_H1;
+	sctx->state[2] = SHA512_H2;
+	sctx->state[3] = SHA512_H3;
+	sctx->state[4] = SHA512_H4;
+	sctx->state[5] = SHA512_H5;
+	sctx->state[6] = SHA512_H6;
+	sctx->state[7] = SHA512_H7;
+	sctx->count[0] = sctx->count[1] = 0;
+
+	return 0;
+}
+
+static int __sha512_neon_update(struct shash_desc *desc, const u8 *data,
+				unsigned int len, unsigned int partial)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	unsigned int done = 0;
+
+	sctx->count[0] += len;
+	if (sctx->count[0] < len)
+		sctx->count[1]++;
+
+	if (partial) {
+		done = SHA512_BLOCK_SIZE - partial;
+		memcpy(sctx->buf + partial, data, done);
+		sha512_transform_neon(sctx->state, sctx->buf, sha512_k, 1);
+	}
+
+	if (len - done >= SHA512_BLOCK_SIZE) {
+		const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
+
+		sha512_transform_neon(sctx->state, data + done, sha512_k,
+				      rounds);
+
+		done += rounds * SHA512_BLOCK_SIZE;
+	}
+
+	memcpy(sctx->buf, data + done, len - done);
+
+	return 0;
+}
+
+static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
+			     unsigned int len)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+	int res;
+
+	/* Handle the fast case right here */
+	if (partial + len < SHA512_BLOCK_SIZE) {
+		sctx->count[0] += len;
+		if (sctx->count[0] < len)
+			sctx->count[1]++;
+		memcpy(sctx->buf + partial, data, len);
+
+		return 0;
+	}
+
+	if (!may_use_simd()) {
+		res = crypto_sha512_update(desc, data, len);
+	} else {
+		kernel_neon_begin();
+		res = __sha512_neon_update(desc, data, len, partial);
+		kernel_neon_end();
+	}
+
+	return res;
+}
+
+
+/* Add padding and return the message digest. */
+static int sha512_neon_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+	unsigned int i, index, padlen;
+	__be64 *dst = (__be64 *)out;
+	__be64 bits[2];
+	static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
+
+	/* save number of bits */
+	bits[1] = cpu_to_be64(sctx->count[0] << 3);
+	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+
+	/* Pad out to 112 mod 128 and append length */
+	index = sctx->count[0] & 0x7f;
+	padlen = (index < 112) ? (112 - index) : ((128+112) - index);
+
+	if (!may_use_simd()) {
+		crypto_sha512_update(desc, padding, padlen);
+		crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits));
+	} else {
+		kernel_neon_begin();
+		/* We need to fill a whole block for __sha512_neon_update() */
+		if (padlen <= 112) {
+			sctx->count[0] += padlen;
+			if (sctx->count[0] < padlen)
+				sctx->count[1]++;
+			memcpy(sctx->buf + index, padding, padlen);
+		} else {
+			__sha512_neon_update(desc, padding, padlen, index);
+		}
+		__sha512_neon_update(desc, (const u8 *)&bits,
+					sizeof(bits), 112);
+		kernel_neon_end();
+	}
+
+	/* Store state in digest */
+	for (i = 0; i < 8; i++)
+		dst[i] = cpu_to_be64(sctx->state[i]);
+
+	/* Wipe context */
+	memset(sctx, 0, sizeof(*sctx));
+
+	return 0;
+}
+
+static int sha512_neon_export(struct shash_desc *desc, void *out)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(out, sctx, sizeof(*sctx));
+
+	return 0;
+}
+
+static int sha512_neon_import(struct shash_desc *desc, const void *in)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	memcpy(sctx, in, sizeof(*sctx));
+
+	return 0;
+}
+
+static int sha384_neon_init(struct shash_desc *desc)
+{
+	struct sha512_state *sctx = shash_desc_ctx(desc);
+
+	sctx->state[0] = SHA384_H0;
+	sctx->state[1] = SHA384_H1;
+	sctx->state[2] = SHA384_H2;
+	sctx->state[3] = SHA384_H3;
+	sctx->state[4] = SHA384_H4;
+	sctx->state[5] = SHA384_H5;
+	sctx->state[6] = SHA384_H6;
+	sctx->state[7] = SHA384_H7;
+
+	sctx->count[0] = sctx->count[1] = 0;
+
+	return 0;
+}
+
+static int sha384_neon_final(struct shash_desc *desc, u8 *hash)
+{
+	u8 D[SHA512_DIGEST_SIZE];
+
+	sha512_neon_final(desc, D);
+
+	memcpy(hash, D, SHA384_DIGEST_SIZE);
+	memset(D, 0, SHA512_DIGEST_SIZE);
+
+	return 0;
+}
+
+static struct shash_alg algs[] = { {
+	.digestsize	=	SHA512_DIGEST_SIZE,
+	.init		=	sha512_neon_init,
+	.update		=	sha512_neon_update,
+	.final		=	sha512_neon_final,
+	.export		=	sha512_neon_export,
+	.import		=	sha512_neon_import,
+	.descsize	=	sizeof(struct sha512_state),
+	.statesize	=	sizeof(struct sha512_state),
+	.base		=	{
+		.cra_name	=	"sha512",
+		.cra_driver_name =	"sha512-neon",
+		.cra_priority	=	250,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA512_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+},  {
+	.digestsize	=	SHA384_DIGEST_SIZE,
+	.init		=	sha384_neon_init,
+	.update		=	sha512_neon_update,
+	.final		=	sha384_neon_final,
+	.export		=	sha512_neon_export,
+	.import		=	sha512_neon_import,
+	.descsize	=	sizeof(struct sha512_state),
+	.statesize	=	sizeof(struct sha512_state),
+	.base		=	{
+		.cra_name	=	"sha384",
+		.cra_driver_name =	"sha384-neon",
+		.cra_priority	=	250,
+		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize	=	SHA384_BLOCK_SIZE,
+		.cra_module	=	THIS_MODULE,
+	}
+} };
+
+static int __init sha512_neon_mod_init(void)
+{
+	if (!cpu_has_neon())
+		return -ENODEV;
+
+	return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha512_neon_mod_fini(void)
+{
+	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_init(sha512_neon_mod_init);
+module_exit(sha512_neon_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
+
+MODULE_ALIAS("sha512");
+MODULE_ALIAS("sha384");
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index d3db398..6577b8a 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -24,6 +24,7 @@
 generic-y += serial.h
 generic-y += shmbuf.h
 generic-y += siginfo.h
+generic-y += simd.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 8dcd9c7..b00ef07 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -59,6 +59,21 @@
 #define smp_wmb()	dmb()
 #endif
 
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	___p1;								\
+})
+
 #define read_barrier_depends()		do { } while(0)
 #define smp_read_barrier_depends()	do { } while(0)
 
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index a25e62d..36324ec 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -16,6 +16,7 @@
 #include <asm/shmparam.h>
 #include <asm/cachetype.h>
 #include <asm/outercache.h>
+#include <asm/rodata.h>
 
 #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 
diff --git a/arch/arm/include/asm/crypto/sha1.h b/arch/arm/include/asm/crypto/sha1.h
new file mode 100644
index 0000000..75e6a41
--- /dev/null
+++ b/arch/arm/include/asm/crypto/sha1.h
@@ -0,0 +1,10 @@
+#ifndef ASM_ARM_CRYPTO_SHA1_H
+#define ASM_ARM_CRYPTO_SHA1_H
+
+#include <linux/crypto.h>
+#include <crypto/sha.h>
+
+extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+			   unsigned int len);
+
+#endif
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index ce6e306..051b726 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -19,8 +19,6 @@
 
 typedef struct user_fp elf_fpregset_t;
 
-#define EM_ARM	40
-
 #define EF_ARM_EABI_MASK	0xff000000
 #define EF_ARM_EABI_UNKNOWN	0x00000000
 #define EF_ARM_EABI_VER1	0x01000000
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
new file mode 100644
index 0000000..a9e244f9
--- /dev/null
+++ b/arch/arm/include/asm/fiq_glue.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+	void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
+	void (*resume)(struct fiq_glue_handler *h);
+};
+typedef void (*fiq_return_handler_t)(void);
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return);
+int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return);
+
+#ifdef CONFIG_FIQ_GLUE
+void fiq_glue_resume(void);
+#else
+static inline void fiq_glue_resume(void) {}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a..3d7351c 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI	6
+#define NR_IPI	7
 
 typedef struct {
 	unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 3b2c40b..0ca0f5a 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -66,6 +66,7 @@
 #define   L2X0_STNDBY_MODE_EN		(1 << 0)
 
 /* Registers shifts and masks */
+#define L2X0_CACHE_ID_REV_MASK		(0x3f)
 #define L2X0_CACHE_ID_PART_MASK		(0xf << 6)
 #define L2X0_CACHE_ID_PART_L210		(1 << 6)
 #define L2X0_CACHE_ID_PART_L310		(3 << 6)
@@ -106,6 +107,8 @@
 
 #define L2X0_WAY_SIZE_SHIFT		3
 
+#define REV_PL310_R2P0				4
+
 #ifndef __ASSEMBLY__
 extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
 #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 0cf7a6b..4aee45d 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -17,15 +17,23 @@
 #define TRACER_ACCESSED_BIT	0
 #define TRACER_RUNNING_BIT	1
 #define TRACER_CYCLE_ACC_BIT	2
+#define TRACER_TRACE_DATA_BIT	3
+#define TRACER_TIMESTAMP_BIT	4
+#define TRACER_BRANCHOUTPUT_BIT	5
+#define TRACER_RETURN_STACK_BIT	6
 #define TRACER_ACCESSED		BIT(TRACER_ACCESSED_BIT)
 #define TRACER_RUNNING		BIT(TRACER_RUNNING_BIT)
 #define TRACER_CYCLE_ACC	BIT(TRACER_CYCLE_ACC_BIT)
+#define TRACER_TRACE_DATA	BIT(TRACER_TRACE_DATA_BIT)
+#define TRACER_TIMESTAMP	BIT(TRACER_TIMESTAMP_BIT)
+#define TRACER_BRANCHOUTPUT	BIT(TRACER_BRANCHOUTPUT_BIT)
+#define TRACER_RETURN_STACK	BIT(TRACER_RETURN_STACK_BIT)
 
 #define TRACER_TIMEOUT 10000
 
-#define etm_writel(t, v, x) \
-	(__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+#define etm_writel(t, id, v, x) \
+	(__raw_writel((v), (t)->etm_regs[(id)] + (x)))
+#define etm_readl(t, id, x) (__raw_readl((t)->etm_regs[(id)] + (x)))
 
 /* CoreSight Management Registers */
 #define CSMR_LOCKACCESS 0xfb0
@@ -43,7 +51,7 @@
 #define ETMCTRL_POWERDOWN	1
 #define ETMCTRL_PROGRAM		(1 << 10)
 #define ETMCTRL_PORTSEL		(1 << 11)
-#define ETMCTRL_DO_CONTEXTID	(3 << 14)
+#define ETMCTRL_CONTEXTIDSIZE(x) (((x) & 3) << 14)
 #define ETMCTRL_PORTMASK1	(7 << 4)
 #define ETMCTRL_PORTMASK2	(1 << 21)
 #define ETMCTRL_PORTMASK	(ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
@@ -55,9 +63,12 @@
 #define ETMCTRL_DATA_DO_BOTH	(ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
 #define ETMCTRL_BRANCH_OUTPUT	(1 << 8)
 #define ETMCTRL_CYCLEACCURATE	(1 << 12)
+#define ETMCTRL_TIMESTAMP_EN	(1 << 28)
+#define ETMCTRL_RETURN_STACK_EN	(1 << 29)
 
 /* ETM configuration code register */
 #define ETMR_CONFCODE		(0x04)
+#define ETMCCR_ETMIDR_PRESENT	BIT(31)
 
 /* ETM trace start/stop resource control register */
 #define ETMR_TRACESSCTRL	(0x18)
@@ -113,10 +124,25 @@
 #define ETMR_TRACEENCTRL	0x24
 #define ETMTE_INCLEXCL		BIT(24)
 #define ETMR_TRACEENEVT		0x20
-#define ETMCTRL_OPTS		(ETMCTRL_DO_CPRT | \
-				ETMCTRL_DATA_DO_ADDR | \
-				ETMCTRL_BRANCH_OUTPUT | \
-				ETMCTRL_DO_CONTEXTID)
+
+#define ETMR_VIEWDATAEVT	0x30
+#define ETMR_VIEWDATACTRL1	0x34
+#define ETMR_VIEWDATACTRL2	0x38
+#define ETMR_VIEWDATACTRL3	0x3c
+#define ETMVDC3_EXCLONLY	BIT(16)
+
+#define ETMCTRL_OPTS		(ETMCTRL_DO_CPRT)
+
+#define ETMR_ID			0x1e4
+#define ETMIDR_VERSION(x)	(((x) >> 4) & 0xff)
+#define ETMIDR_VERSION_3_1	0x21
+#define ETMIDR_VERSION_PFT_1_0	0x30
+
+#define ETMR_CCE		0x1e8
+#define ETMCCER_RETURN_STACK_IMPLEMENTED	BIT(23)
+#define ETMCCER_TIMESTAMPING_IMPLEMENTED	BIT(22)
+
+#define ETMR_TRACEIDR		0x200
 
 /* ETM management registers, "ETM Architecture", 3.5.24 */
 #define ETMMR_OSLAR	0x300
@@ -140,14 +166,16 @@
 #define ETBFF_TRIGIN		BIT(8)
 #define ETBFF_TRIGEVT		BIT(9)
 #define ETBFF_TRIGFL		BIT(10)
+#define ETBFF_STOPFL		BIT(12)
 
 #define etb_writel(t, v, x) \
 	(__raw_writel((v), (t)->etb_regs + (x)))
 #define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
 
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
-	do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
+#define etm_lock(t, id) \
+	do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
+#define etm_unlock(t, id) \
+	do { etm_writel((t), (id), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
 
 #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
 #define etb_unlock(t) \
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 53c15de..809203a 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -35,6 +35,9 @@
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 #endif
 
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
new file mode 100644
index 0000000..bca864a
--- /dev/null
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -0,0 +1,28 @@
+/*
+ *  arch/arm/include/asm/mach/mmc.h
+ */
+#ifndef ASMARM_MACH_MMC_H
+#define ASMARM_MACH_MMC_H
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
+
+struct mmc_platform_data {
+	unsigned int ocr_mask;			/* available voltages */
+	int built_in;				/* built-in device flag */
+	int card_present;			/* card detect state */
+	u32 (*translate_vdd)(struct device *, unsigned int);
+	unsigned int (*status)(struct device *);
+	struct embedded_sdio_data *embedded_sdio;
+	int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+};
+
+#endif
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644
index 0000000..8f730fe
--- /dev/null
+++ b/arch/arm/include/asm/neon.h
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/arm/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+
+#define cpu_has_neon()		(!!(elf_hwcap & HWCAP_NEON))
+
+#ifdef __ARM_NEON__
+
+/*
+ * If you are affected by the BUILD_BUG below, it probably means that you are
+ * using NEON code /and/ calling the kernel_neon_begin() function from the same
+ * compilation unit. To prevent issues that may arise from GCC reordering or
+ * generating(1) NEON instructions outside of these begin/end functions, the
+ * only supported way of using NEON code in the kernel is by isolating it in a
+ * separate compilation unit, and calling it from another unit from inside a
+ * kernel_neon_begin/kernel_neon_end pair.
+ *
+ * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
+ *     -mpfu=neon is set.
+ */
+
+#define kernel_neon_begin() \
+	BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
+
+#else
+void kernel_neon_begin(void);
+#endif
+void kernel_neon_end(void);
diff --git a/arch/arm/include/asm/rodata.h b/arch/arm/include/asm/rodata.h
new file mode 100644
index 0000000..8c8add8
--- /dev/null
+++ b/arch/arm/include/asm/rodata.h
@@ -0,0 +1,32 @@
+/*
+ *  arch/arm/include/asm/rodata.h
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_RODATA_H
+#define _ASMARM_RODATA_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_DEBUG_RODATA
+
+int set_memory_rw(unsigned long virt, int numpages);
+int set_memory_ro(unsigned long virt, int numpages);
+
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index d3a22be..c5aa088 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -81,6 +81,8 @@
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
 
+extern void smp_send_all_cpu_backtrace(void);
+
 struct smp_operations {
 #ifdef CONFIG_SMP
 	/*
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index 73ddd72..ed805f1 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -103,8 +103,7 @@
 	memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-				   struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 	/* ARM tasks don't change audit architectures on the fly. */
 	return AUDIT_ARCH_ARM;
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index cbd6197..4387624 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -15,7 +15,7 @@
 
 #include <uapi/asm/unistd.h>
 
-#define __NR_syscalls  (380)
+#define __NR_syscalls  (384)
 #define __ARM_NR_cmpxchg		(__ARM_NR_BASE+0x00fff0)
 
 #define __ARCH_WANT_STAT64
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index af33b44..17407c9 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -406,6 +406,12 @@
 #define __NR_process_vm_writev		(__NR_SYSCALL_BASE+377)
 #define __NR_kcmp			(__NR_SYSCALL_BASE+378)
 #define __NR_finit_module		(__NR_SYSCALL_BASE+379)
+/* Reserve for later
+#define __NR_sched_setattr		(__NR_SYSCALL_BASE+380)
+#define __NR_sched_getattr		(__NR_SYSCALL_BASE+381)
+#define __NR_renameat2			(__NR_SYSCALL_BASE+382)
+*/
+#define __NR_seccomp			(__NR_SYSCALL_BASE+383)
 
 /*
  * This may need to be greater than __NR_last_syscall+1 in order to
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index c6ca7e3..725f844 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -389,6 +389,11 @@
 		CALL(sys_process_vm_writev)
 		CALL(sys_kcmp)
 		CALL(sys_finit_module)
+/* 380 */	CALL(sys_ni_syscall)		/* reserved sys_sched_setattr */
+		CALL(sys_ni_syscall)		/* reserved sys_sched_getattr */
+		CALL(sys_ni_syscall)		/* reserved sys_renameat2     */
+		CALL(sys_seccomp)
+
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f264693..179e96c3 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -502,7 +502,8 @@
  * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
  */
 #define ABI(native, compat) native
-#ifdef CONFIG_AEABI
+
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
 #define OBSOLETE(syscall) sys_ni_syscall
 #else
 #define OBSOLETE(syscall) syscall
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 8ff0ecd..7db3247 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/io.h>
+#include <linux/slab.h>
 #include <linux/sysrq.h>
 #include <linux/device.h>
 #include <linux/clk.h>
@@ -37,26 +38,37 @@
 struct tracectx {
 	unsigned int	etb_bufsz;
 	void __iomem	*etb_regs;
-	void __iomem	*etm_regs;
+	void __iomem	**etm_regs;
+	int		etm_regs_count;
 	unsigned long	flags;
 	int		ncmppairs;
 	int		etm_portsz;
+	int		etm_contextid_size;
+	u32		etb_fc;
+	unsigned long	range_start;
+	unsigned long	range_end;
+	unsigned long	data_range_start;
+	unsigned long	data_range_end;
+	bool		dump_initial_etb;
 	struct device	*dev;
 	struct clk	*emu_clk;
 	struct mutex	mutex;
 };
 
-static struct tracectx tracer;
+static struct tracectx tracer = {
+	.range_start = (unsigned long)_stext,
+	.range_end = (unsigned long)_etext,
+};
 
 static inline bool trace_isrunning(struct tracectx *t)
 {
 	return !!(t->flags & TRACER_RUNNING);
 }
 
-static int etm_setup_address_range(struct tracectx *t, int n,
+static int etm_setup_address_range(struct tracectx *t, int id, int n,
 		unsigned long start, unsigned long end, int exclude, int data)
 {
-	u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
+	u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
 		    ETMAAT_NOVALCMP;
 
 	if (n < 1 || n > t->ncmppairs)
@@ -72,95 +84,185 @@
 		flags |= ETMAAT_IEXEC;
 
 	/* first comparator for the range */
-	etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
-	etm_writel(t, start, ETMR_COMP_VAL(n * 2));
+	etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
+	etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
 
 	/* second comparator is right next to it */
-	etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
-	etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
+	etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
+	etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
 
-	flags = exclude ? ETMTE_INCLEXCL : 0;
-	etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
+	if (data) {
+		flags = exclude ? ETMVDC3_EXCLONLY : 0;
+		if (exclude)
+			n += 8;
+		etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
+	} else {
+		flags = exclude ? ETMTE_INCLEXCL : 0;
+		etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
+	}
 
 	return 0;
 }
 
+static int trace_start_etm(struct tracectx *t, int id)
+{
+	u32 v;
+	unsigned long timeout = TRACER_TIMEOUT;
+
+	v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
+	v |= ETMCTRL_CONTEXTIDSIZE(t->etm_contextid_size);
+
+	if (t->flags & TRACER_CYCLE_ACC)
+		v |= ETMCTRL_CYCLEACCURATE;
+
+	if (t->flags & TRACER_BRANCHOUTPUT)
+		v |= ETMCTRL_BRANCH_OUTPUT;
+
+	if (t->flags & TRACER_TRACE_DATA)
+		v |= ETMCTRL_DATA_DO_ADDR;
+
+	if (t->flags & TRACER_TIMESTAMP)
+		v |= ETMCTRL_TIMESTAMP_EN;
+
+	if (t->flags & TRACER_RETURN_STACK)
+		v |= ETMCTRL_RETURN_STACK_EN;
+
+	etm_unlock(t, id);
+
+	etm_writel(t, id, v, ETMR_CTRL);
+
+	while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+		;
+	if (!timeout) {
+		dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
+		etm_lock(t, id);
+		return -EFAULT;
+	}
+
+	if (t->range_start || t->range_end)
+		etm_setup_address_range(t, id, 1,
+					t->range_start, t->range_end, 0, 0);
+	else
+		etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
+
+	etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
+	etm_writel(t, id, 0, ETMR_TRACESSCTRL);
+	etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
+
+	etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
+	etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
+
+	if (t->data_range_start || t->data_range_end)
+		etm_setup_address_range(t, id, 2, t->data_range_start,
+					t->data_range_end, 0, 1);
+	else
+		etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
+
+	etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
+
+	v &= ~ETMCTRL_PROGRAM;
+	v |= ETMCTRL_PORTSEL;
+
+	etm_writel(t, id, v, ETMR_CTRL);
+
+	timeout = TRACER_TIMEOUT;
+	while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
+		;
+	if (!timeout) {
+		dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
+		etm_lock(t, id);
+		return -EFAULT;
+	}
+
+	etm_lock(t, id);
+	return 0;
+}
+
 static int trace_start(struct tracectx *t)
 {
-	u32 v;
-	unsigned long timeout = TRACER_TIMEOUT;
+	int ret;
+	int id;
+	u32 etb_fc = t->etb_fc;
 
 	etb_unlock(t);
 
-	etb_writel(t, 0, ETBR_FORMATTERCTRL);
+	t->dump_initial_etb = false;
+	etb_writel(t, 0, ETBR_WRITEADDR);
+	etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
 	etb_writel(t, 1, ETBR_CTRL);
 
 	etb_lock(t);
 
-	/* configure etm */
-	v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
-
-	if (t->flags & TRACER_CYCLE_ACC)
-		v |= ETMCTRL_CYCLEACCURATE;
-
-	etm_unlock(t);
-
-	etm_writel(t, v, ETMR_CTRL);
-
-	while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
-		;
-	if (!timeout) {
-		dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-		etm_lock(t);
-		return -EFAULT;
+	/* configure etm(s) */
+	for (id = 0; id < t->etm_regs_count; id++) {
+		ret = trace_start_etm(t, id);
+		if (ret)
+			return ret;
 	}
 
-	etm_setup_address_range(t, 1, (unsigned long)_stext,
-			(unsigned long)_etext, 0, 0);
-	etm_writel(t, 0, ETMR_TRACEENCTRL2);
-	etm_writel(t, 0, ETMR_TRACESSCTRL);
-	etm_writel(t, 0x6f, ETMR_TRACEENEVT);
-
-	v &= ~ETMCTRL_PROGRAM;
-	v |= ETMCTRL_PORTSEL;
-
-	etm_writel(t, v, ETMR_CTRL);
-
-	timeout = TRACER_TIMEOUT;
-	while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
-		;
-	if (!timeout) {
-		dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
-		etm_lock(t);
-		return -EFAULT;
-	}
-
-	etm_lock(t);
-
 	t->flags |= TRACER_RUNNING;
 
 	return 0;
 }
 
-static int trace_stop(struct tracectx *t)
+static int trace_stop_etm(struct tracectx *t, int id)
 {
 	unsigned long timeout = TRACER_TIMEOUT;
 
-	etm_unlock(t);
+	etm_unlock(t, id);
 
-	etm_writel(t, 0x440, ETMR_CTRL);
-	while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+	etm_writel(t, id, 0x440, ETMR_CTRL);
+	while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
 		;
 	if (!timeout) {
-		dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-		etm_lock(t);
+		dev_err(t->dev,
+			"etm%d: Waiting for progbit to assert timed out\n",
+			id);
+		etm_lock(t, id);
 		return -EFAULT;
 	}
 
-	etm_lock(t);
+	etm_lock(t, id);
+	return 0;
+}
+
+static int trace_power_down_etm(struct tracectx *t, int id)
+{
+	unsigned long timeout = TRACER_TIMEOUT;
+	etm_unlock(t, id);
+	while (!(etm_readl(t, id, ETMR_STATUS) & ETMST_PROGBIT) && --timeout)
+		;
+	if (!timeout) {
+		dev_err(t->dev, "etm%d: Waiting for status progbit to assert timed out\n",
+			id);
+		etm_lock(t, id);
+		return -EFAULT;
+	}
+
+	etm_writel(t, id, 0x441, ETMR_CTRL);
+
+	etm_lock(t, id);
+	return 0;
+}
+
+static int trace_stop(struct tracectx *t)
+{
+	int id;
+	unsigned long timeout = TRACER_TIMEOUT;
+	u32 etb_fc = t->etb_fc;
+
+	for (id = 0; id < t->etm_regs_count; id++)
+		trace_stop_etm(t, id);
+
+	for (id = 0; id < t->etm_regs_count; id++)
+		trace_power_down_etm(t, id);
 
 	etb_unlock(t);
-	etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
+	if (etb_fc) {
+		etb_fc |= ETBFF_STOPFL;
+		etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
+	}
+	etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
 
 	timeout = TRACER_TIMEOUT;
 	while (etb_readl(t, ETBR_FORMATTERCTRL) &
@@ -185,24 +287,15 @@
 static int etb_getdatalen(struct tracectx *t)
 {
 	u32 v;
-	int rp, wp;
+	int wp;
 
 	v = etb_readl(t, ETBR_STATUS);
 
 	if (v & 1)
 		return t->etb_bufsz;
 
-	rp = etb_readl(t, ETBR_READADDR);
 	wp = etb_readl(t, ETBR_WRITEADDR);
-
-	if (rp > wp) {
-		etb_writel(t, 0, ETBR_READADDR);
-		etb_writel(t, 0, ETBR_WRITEADDR);
-
-		return 0;
-	}
-
-	return wp - rp;
+	return wp;
 }
 
 /* sysrq+v will always stop the running trace and leave it at that */
@@ -235,21 +328,18 @@
 		printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
 	printk(KERN_INFO "\n--- ETB buffer end ---\n");
 
-	/* deassert the overflow bit */
-	etb_writel(t, 1, ETBR_CTRL);
-	etb_writel(t, 0, ETBR_CTRL);
-
-	etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-	etb_writel(t, 0, ETBR_READADDR);
-	etb_writel(t, 0, ETBR_WRITEADDR);
-
 	etb_lock(t);
 }
 
 static void sysrq_etm_dump(int key)
 {
+	if (!mutex_trylock(&tracer.mutex)) {
+		printk(KERN_INFO "Tracing hardware busy\n");
+		return;
+	}
 	dev_dbg(tracer.dev, "Dumping ETB buffer\n");
 	etm_dump();
+	mutex_unlock(&tracer.mutex);
 }
 
 static struct sysrq_key_op sysrq_etm_op = {
@@ -276,6 +366,10 @@
 	struct tracectx *t = file->private_data;
 	u32 first = 0;
 	u32 *buf;
+	int wpos;
+	int skip;
+	long wlength;
+	loff_t pos = *ppos;
 
 	mutex_lock(&t->mutex);
 
@@ -287,31 +381,39 @@
 	etb_unlock(t);
 
 	total = etb_getdatalen(t);
+	if (total == 0 && t->dump_initial_etb)
+		total = t->etb_bufsz;
 	if (total == t->etb_bufsz)
 		first = etb_readl(t, ETBR_WRITEADDR);
 
+	if (pos > total * 4) {
+		skip = 0;
+		wpos = total;
+	} else {
+		skip = (int)pos % 4;
+		wpos = (int)pos / 4;
+	}
+	total -= wpos;
+	first = (first + wpos) % t->etb_bufsz;
+
 	etb_writel(t, first, ETBR_READADDR);
 
-	length = min(total * 4, (int)len);
-	buf = vmalloc(length);
+	wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
+	length = min(total * 4 - skip, (int)len);
+	buf = vmalloc(wlength * 4);
 
-	dev_dbg(t->dev, "ETB buffer length: %d\n", total);
+	dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
+		length, pos, wlength, first);
+	dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
 	dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
-	for (i = 0; i < length / 4; i++)
+	for (i = 0; i < wlength; i++)
 		buf[i] = etb_readl(t, ETBR_READMEM);
 
-	/* the only way to deassert overflow bit in ETB status is this */
-	etb_writel(t, 1, ETBR_CTRL);
-	etb_writel(t, 0, ETBR_CTRL);
-
-	etb_writel(t, 0, ETBR_WRITEADDR);
-	etb_writel(t, 0, ETBR_READADDR);
-	etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
 	etb_lock(t);
 
-	length -= copy_to_user(data, buf, length);
+	length -= copy_to_user(data, (u8 *)buf + skip, length);
 	vfree(buf);
+	*ppos = pos + length;
 
 out:
 	mutex_unlock(&t->mutex);
@@ -348,28 +450,17 @@
 	if (ret)
 		goto out;
 
+	mutex_lock(&t->mutex);
 	t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
 	if (!t->etb_regs) {
 		ret = -ENOMEM;
 		goto out_release;
 	}
 
+	t->dev = &dev->dev;
+	t->dump_initial_etb = true;
 	amba_set_drvdata(dev, t);
 
-	etb_miscdev.parent = &dev->dev;
-
-	ret = misc_register(&etb_miscdev);
-	if (ret)
-		goto out_unmap;
-
-	t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
-	if (IS_ERR(t->emu_clk)) {
-		dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
-		return -EFAULT;
-	}
-
-	clk_enable(t->emu_clk);
-
 	etb_unlock(t);
 	t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
 	dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
@@ -378,6 +469,20 @@
 	etb_writel(t, 0, ETBR_CTRL);
 	etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
 	etb_lock(t);
+	mutex_unlock(&t->mutex);
+
+	etb_miscdev.parent = &dev->dev;
+
+	ret = misc_register(&etb_miscdev);
+	if (ret)
+		goto out_unmap;
+
+	/* Get optional clock. Currently used to select clock source on omap3 */
+	t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
+	if (IS_ERR(t->emu_clk))
+		dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
+	else
+		clk_enable(t->emu_clk);
 
 	dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
 
@@ -385,10 +490,13 @@
 	return ret;
 
 out_unmap:
+	mutex_lock(&t->mutex);
 	amba_set_drvdata(dev, NULL);
 	iounmap(t->etb_regs);
+	t->etb_regs = NULL;
 
 out_release:
+	mutex_unlock(&t->mutex);
 	amba_release_regions(dev);
 
 	return ret;
@@ -403,8 +511,10 @@
 	iounmap(t->etb_regs);
 	t->etb_regs = NULL;
 
-	clk_disable(t->emu_clk);
-	clk_put(t->emu_clk);
+	if (!IS_ERR(t->emu_clk)) {
+		clk_disable(t->emu_clk);
+		clk_put(t->emu_clk);
+	}
 
 	amba_release_regions(dev);
 
@@ -448,7 +558,10 @@
 		return -EINVAL;
 
 	mutex_lock(&tracer.mutex);
-	ret = value ? trace_start(&tracer) : trace_stop(&tracer);
+	if (!tracer.etb_regs)
+		ret = -ENODEV;
+	else
+		ret = value ? trace_start(&tracer) : trace_stop(&tracer);
 	mutex_unlock(&tracer.mutex);
 
 	return ret ? : n;
@@ -463,36 +576,50 @@
 {
 	u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
 	int datalen;
+	int id;
+	int ret;
 
-	etb_unlock(&tracer);
-	datalen = etb_getdatalen(&tracer);
-	etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
-	etb_ra = etb_readl(&tracer, ETBR_READADDR);
-	etb_st = etb_readl(&tracer, ETBR_STATUS);
-	etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
-	etb_lock(&tracer);
+	mutex_lock(&tracer.mutex);
+	if (tracer.etb_regs) {
+		etb_unlock(&tracer);
+		datalen = etb_getdatalen(&tracer);
+		etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
+		etb_ra = etb_readl(&tracer, ETBR_READADDR);
+		etb_st = etb_readl(&tracer, ETBR_STATUS);
+		etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
+		etb_lock(&tracer);
+	} else {
+		etb_wa = etb_ra = etb_st = etb_fc = ~0;
+		datalen = -1;
+	}
 
-	etm_unlock(&tracer);
-	etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
-	etm_st = etm_readl(&tracer, ETMR_STATUS);
-	etm_lock(&tracer);
-
-	return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
+	ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
 			"ETBR_WRITEADDR:\t%08x\n"
 			"ETBR_READADDR:\t%08x\n"
 			"ETBR_STATUS:\t%08x\n"
-			"ETBR_FORMATTERCTRL:\t%08x\n"
-			"ETMR_CTRL:\t%08x\n"
-			"ETMR_STATUS:\t%08x\n",
+			"ETBR_FORMATTERCTRL:\t%08x\n",
 			datalen,
 			tracer.ncmppairs,
 			etb_wa,
 			etb_ra,
 			etb_st,
-			etb_fc,
+			etb_fc
+			);
+
+	for (id = 0; id < tracer.etm_regs_count; id++) {
+		etm_unlock(&tracer, id);
+		etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
+		etm_st = etm_readl(&tracer, id, ETMR_STATUS);
+		etm_lock(&tracer, id);
+		ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
+			"ETMR_STATUS:\t%08x\n",
 			etm_ctrl,
 			etm_st
 			);
+	}
+	mutex_unlock(&tracer.mutex);
+
+	return ret;
 }
 
 static struct kobj_attribute trace_info_attr =
@@ -531,42 +658,260 @@
 static struct kobj_attribute trace_mode_attr =
 	__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
 
+static ssize_t trace_contextid_size_show(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 char *buf)
+{
+	/* 0: No context id tracing, 1: One byte, 2: Two bytes, 3: Four bytes */
+	return sprintf(buf, "%d\n", (1 << tracer.etm_contextid_size) >> 1);
+}
+
+static ssize_t trace_contextid_size_store(struct kobject *kobj,
+					  struct kobj_attribute *attr,
+					  const char *buf, size_t n)
+{
+	unsigned int contextid_size;
+
+	if (sscanf(buf, "%u", &contextid_size) != 1)
+		return -EINVAL;
+
+	if (contextid_size == 3 || contextid_size > 4)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	tracer.etm_contextid_size = fls(contextid_size);
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+static struct kobj_attribute trace_contextid_size_attr =
+	__ATTR(trace_contextid_size, 0644,
+		trace_contextid_size_show, trace_contextid_size_store);
+
+static ssize_t trace_branch_output_show(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					char *buf)
+{
+	return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
+}
+
+static ssize_t trace_branch_output_store(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 const char *buf, size_t n)
+{
+	unsigned int branch_output;
+
+	if (sscanf(buf, "%u", &branch_output) != 1)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	if (branch_output) {
+		tracer.flags |= TRACER_BRANCHOUTPUT;
+		/* Branch broadcasting is incompatible with the return stack */
+		tracer.flags &= ~TRACER_RETURN_STACK;
+	} else {
+		tracer.flags &= ~TRACER_BRANCHOUTPUT;
+	}
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+static struct kobj_attribute trace_branch_output_attr =
+	__ATTR(trace_branch_output, 0644,
+		trace_branch_output_show, trace_branch_output_store);
+
+static ssize_t trace_return_stack_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
+}
+
+static ssize_t trace_return_stack_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned int return_stack;
+
+	if (sscanf(buf, "%u", &return_stack) != 1)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	if (return_stack) {
+		tracer.flags |= TRACER_RETURN_STACK;
+		/* Return stack is incompatible with branch broadcasting */
+		tracer.flags &= ~TRACER_BRANCHOUTPUT;
+	} else {
+		tracer.flags &= ~TRACER_RETURN_STACK;
+	}
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+static struct kobj_attribute trace_return_stack_attr =
+	__ATTR(trace_return_stack, 0644,
+		trace_return_stack_show, trace_return_stack_store);
+
+static ssize_t trace_timestamp_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
+}
+
+static ssize_t trace_timestamp_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned int timestamp;
+
+	if (sscanf(buf, "%u", &timestamp) != 1)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	if (timestamp)
+		tracer.flags |= TRACER_TIMESTAMP;
+	else
+		tracer.flags &= ~TRACER_TIMESTAMP;
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+static struct kobj_attribute trace_timestamp_attr =
+	__ATTR(trace_timestamp, 0644,
+		trace_timestamp_show, trace_timestamp_store);
+
+static ssize_t trace_range_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%08lx %08lx\n",
+			tracer.range_start, tracer.range_end);
+}
+
+static ssize_t trace_range_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned long range_start, range_end;
+
+	if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	tracer.range_start = range_start;
+	tracer.range_end = range_end;
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+
+static struct kobj_attribute trace_range_attr =
+	__ATTR(trace_range, 0644, trace_range_show, trace_range_store);
+
+static ssize_t trace_data_range_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	unsigned long range_start;
+	u64 range_end;
+	mutex_lock(&tracer.mutex);
+	range_start = tracer.data_range_start;
+	range_end = tracer.data_range_end;
+	if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
+		range_end = 0x100000000ULL;
+	mutex_unlock(&tracer.mutex);
+	return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
+}
+
+static ssize_t trace_data_range_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned long range_start;
+	u64 range_end;
+
+	if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	tracer.data_range_start = range_start;
+	tracer.data_range_end = (unsigned long)range_end;
+	if (range_end)
+		tracer.flags |= TRACER_TRACE_DATA;
+	else
+		tracer.flags &= ~TRACER_TRACE_DATA;
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+
+static struct kobj_attribute trace_data_range_attr =
+	__ATTR(trace_data_range, 0644,
+		trace_data_range_show, trace_data_range_store);
+
 static int etm_probe(struct amba_device *dev, const struct amba_id *id)
 {
 	struct tracectx *t = &tracer;
 	int ret = 0;
+	void __iomem **new_regs;
+	int new_count;
+	u32 etmccr;
+	u32 etmidr;
+	u32 etmccer = 0;
+	u8 etm_version = 0;
 
-	if (t->etm_regs) {
-		dev_dbg(&dev->dev, "ETM already initialized\n");
-		ret = -EBUSY;
+	mutex_lock(&t->mutex);
+	new_count = t->etm_regs_count + 1;
+	new_regs = krealloc(t->etm_regs,
+				sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
+
+	if (!new_regs) {
+		dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
+		ret = -ENOMEM;
 		goto out;
 	}
+	t->etm_regs = new_regs;
 
 	ret = amba_request_regions(dev, NULL);
 	if (ret)
 		goto out;
 
-	t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
-	if (!t->etm_regs) {
+	t->etm_regs[t->etm_regs_count] =
+		ioremap_nocache(dev->res.start, resource_size(&dev->res));
+	if (!t->etm_regs[t->etm_regs_count]) {
 		ret = -ENOMEM;
 		goto out_release;
 	}
 
-	amba_set_drvdata(dev, t);
+	amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
 
-	mutex_init(&t->mutex);
-	t->dev = &dev->dev;
-	t->flags = TRACER_CYCLE_ACC;
+	t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA | TRACER_BRANCHOUTPUT;
 	t->etm_portsz = 1;
+	t->etm_contextid_size = 3;
 
-	etm_unlock(t);
-	(void)etm_readl(t, ETMMR_PDSR);
+	etm_unlock(t, t->etm_regs_count);
+	(void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
 	/* dummy first read */
-	(void)etm_readl(&tracer, ETMMR_OSSRR);
+	(void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
 
-	t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
-	etm_writel(t, 0x440, ETMR_CTRL);
-	etm_lock(t);
+	etmccr = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE);
+	t->ncmppairs = etmccr & 0xf;
+	if (etmccr & ETMCCR_ETMIDR_PRESENT) {
+		etmidr = etm_readl(t, t->etm_regs_count, ETMR_ID);
+		etm_version = ETMIDR_VERSION(etmidr);
+		if (etm_version >= ETMIDR_VERSION_3_1)
+			etmccer = etm_readl(t, t->etm_regs_count, ETMR_CCE);
+	}
+	etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
+	etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
+	etm_lock(t, t->etm_regs_count);
 
 	ret = sysfs_create_file(&dev->dev.kobj,
 			&trace_running_attr.attr);
@@ -582,35 +927,100 @@
 	if (ret)
 		dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
 
-	dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
+	ret = sysfs_create_file(&dev->dev.kobj,
+				&trace_contextid_size_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev,
+			"Failed to create trace_contextid_size in sysfs\n");
+
+	ret = sysfs_create_file(&dev->dev.kobj,
+				&trace_branch_output_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev,
+			"Failed to create trace_branch_output in sysfs\n");
+
+	if (etmccer & ETMCCER_RETURN_STACK_IMPLEMENTED) {
+		ret = sysfs_create_file(&dev->dev.kobj,
+					&trace_return_stack_attr.attr);
+		if (ret)
+			dev_dbg(&dev->dev,
+			      "Failed to create trace_return_stack in sysfs\n");
+	}
+
+	if (etmccer & ETMCCER_TIMESTAMPING_IMPLEMENTED) {
+		ret = sysfs_create_file(&dev->dev.kobj,
+					&trace_timestamp_attr.attr);
+		if (ret)
+			dev_dbg(&dev->dev,
+				"Failed to create trace_timestamp in sysfs\n");
+	}
+
+	ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
+
+	if (etm_version < ETMIDR_VERSION_PFT_1_0) {
+		ret = sysfs_create_file(&dev->dev.kobj,
+					&trace_data_range_attr.attr);
+		if (ret)
+			dev_dbg(&dev->dev,
+				"Failed to create trace_data_range in sysfs\n");
+	} else {
+		tracer.flags &= ~TRACER_TRACE_DATA;
+	}
+
+	dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
+
+	/* Enable formatter if there are multiple trace sources */
+	if (new_count > 1)
+		t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
+
+	t->etm_regs_count = new_count;
 
 out:
+	mutex_unlock(&t->mutex);
 	return ret;
 
 out_unmap:
 	amba_set_drvdata(dev, NULL);
-	iounmap(t->etm_regs);
+	iounmap(t->etm_regs[t->etm_regs_count]);
 
 out_release:
 	amba_release_regions(dev);
 
+	mutex_unlock(&t->mutex);
 	return ret;
 }
 
 static int etm_remove(struct amba_device *dev)
 {
-	struct tracectx *t = amba_get_drvdata(dev);
-
-	amba_set_drvdata(dev, NULL);
-
-	iounmap(t->etm_regs);
-	t->etm_regs = NULL;
-
-	amba_release_regions(dev);
+	int i;
+	struct tracectx *t = &tracer;
+	void __iomem	*etm_regs = amba_get_drvdata(dev);
 
 	sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
 	sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
 	sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
+	sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
+	sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
+
+	amba_set_drvdata(dev, NULL);
+
+	mutex_lock(&t->mutex);
+	for (i = 0; i < t->etm_regs_count; i++)
+		if (t->etm_regs[i] == etm_regs)
+			break;
+	for (; i < t->etm_regs_count - 1; i++)
+		t->etm_regs[i] = t->etm_regs[i + 1];
+	t->etm_regs_count--;
+	if (!t->etm_regs_count) {
+		kfree(t->etm_regs);
+		t->etm_regs = NULL;
+	}
+	mutex_unlock(&t->mutex);
+
+	iounmap(etm_regs);
+	amba_release_regions(dev);
 
 	return 0;
 }
@@ -620,6 +1030,10 @@
 		.id	= 0x0003b921,
 		.mask	= 0x0007ffff,
 	},
+	{
+		.id	= 0x0003b950,
+		.mask	= 0x0007ffff,
+	},
 	{ 0, 0 },
 };
 
@@ -637,6 +1051,8 @@
 {
 	int retval;
 
+	mutex_init(&tracer.mutex);
+
 	retval = amba_driver_register(&etb_driver);
 	if (retval) {
 		printk(KERN_ERR "Failed to register etb\n");
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 34e5664..6a740a9 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -13,6 +13,7 @@
  */
 
 #include <linux/ftrace.h>
+#include <linux/module.h>
 #include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
@@ -63,6 +64,20 @@
 }
 #endif
 
+int ftrace_arch_code_modify_prepare(void)
+{
+	set_kernel_text_rw();
+	set_all_modules_text_rw();
+	return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+	set_all_modules_text_ro();
+	set_kernel_text_ro();
+	return 0;
+}
+
 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
 {
 	return arm_gen_branch_link(pc, addr);
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 778c2f7..b321c8f 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -144,6 +144,8 @@
 
 static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
 {
+	if (user_mode(regs))
+		return -1;
 	kgdb_handle_exception(1, SIGTRAP, 0, regs);
 
 	return 0;
@@ -151,6 +153,8 @@
 
 static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
 {
+	if (user_mode(regs))
+		return -1;
 	compiled_break = 1;
 	kgdb_handle_exception(1, SIGTRAP, 0, regs);
 
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index ac4c2e5..978002e 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -32,6 +32,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/cpuidle.h>
 #include <linux/leds.h>
+#include <linux/console.h>
 
 #include <asm/cacheflush.h>
 #include <asm/idmap.h>
@@ -57,9 +58,46 @@
   "ARM" , "Thumb" , "Jazelle", "ThumbEE"
 };
 
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+	smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+	dump_stack();
+}
+#endif
+
 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
 typedef void (*phys_reset_t)(unsigned long);
 
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+	printk("\n");
+	pr_emerg("Restarting %s\n", linux_banner);
+	if (console_trylock()) {
+		console_unlock();
+		return;
+	}
+
+	mdelay(50);
+
+	local_irq_disable();
+	if (!console_trylock())
+		pr_emerg("arm_restart: Console was locked! Busting\n");
+	else
+		pr_emerg("arm_restart: Console was locked!\n");
+	console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
 /*
  * A temporary stack to use for CPU reset. This is static so that we
  * don't clobber it with the identity mapping. When running with this
@@ -147,6 +185,7 @@
 
 void arch_cpu_idle_enter(void)
 {
+	idle_notifier_call_chain(IDLE_START);
 	ledtrig_cpu(CPU_LED_IDLE_START);
 #ifdef CONFIG_PL310_ERRATA_769419
 	wmb();
@@ -156,6 +195,7 @@
 void arch_cpu_idle_exit(void)
 {
 	ledtrig_cpu(CPU_LED_IDLE_END);
+	idle_notifier_call_chain(IDLE_END);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -195,6 +235,16 @@
  */
 void machine_shutdown(void)
 {
+#ifdef CONFIG_SMP
+	/*
+	 * Disable preemption so we're guaranteed to
+	 * run to power off or reboot and prevent
+	 * the possibility of switching to another
+	 * thread that might wind up blocking on
+	 * one of the stopped CPUs.
+	 */
+	preempt_disable();
+#endif
 	disable_nonboot_cpus();
 }
 
@@ -240,6 +290,10 @@
 {
 	smp_send_stop();
 
+	/* Flush the console to make sure all the relevant messages make it
+	 * out to the console drivers */
+	arm_machine_flush_console();
+
 	arm_pm_restart(reboot_mode, cmd);
 
 	/* Give a grace period for failure to restart of 1s */
@@ -251,6 +305,77 @@
 	while (1);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+	int	i, j;
+	int	nlines;
+	u32	*p;
+
+	/*
+	 * don't attempt to dump non-kernel addresses or
+	 * values that are probably just small negative numbers
+	 */
+	if (addr < PAGE_OFFSET || addr > -256UL)
+		return;
+
+	printk("\n%s: %#lx:\n", name, addr);
+
+	/*
+	 * round address down to a 32 bit boundary
+	 * and always dump a multiple of 32 bytes
+	 */
+	p = (u32 *)(addr & ~(sizeof(u32) - 1));
+	nbytes += (addr & (sizeof(u32) - 1));
+	nlines = (nbytes + 31) / 32;
+
+
+	for (i = 0; i < nlines; i++) {
+		/*
+		 * just display low 16 bits of address to keep
+		 * each line of the dump < 80 characters
+		 */
+		printk("%04lx ", (unsigned long)p & 0xffff);
+		for (j = 0; j < 8; j++) {
+			u32	data;
+			if (probe_kernel_address(p, data)) {
+				printk(" ********");
+			} else {
+				printk(" %08x", data);
+			}
+			++p;
+		}
+		printk("\n");
+	}
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+	mm_segment_t fs;
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+	show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+	show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+	show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+	show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+	show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+	show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+	show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+	show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+	show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+	show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+	show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+	show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+	show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+	show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+	show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+	set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
 	unsigned long flags;
@@ -307,6 +432,8 @@
 		printk("Control: %08x%s\n", ctrl, buf);
 	}
 #endif
+
+	show_extra_register_data(regs, 128);
 }
 
 void show_regs(struct pt_regs * regs)
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 4e2110d..fe8c5ec 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -916,7 +916,7 @@
 	PTRACE_SYSCALL_EXIT,
 };
 
-static int tracehook_report_syscall(struct pt_regs *regs,
+static void tracehook_report_syscall(struct pt_regs *regs,
 				    enum ptrace_syscall_dir dir)
 {
 	unsigned long ip;
@@ -934,7 +934,6 @@
 		current_thread_info()->syscall = -1;
 
 	regs->ARM_ip = ip;
-	return current_thread_info()->syscall;
 }
 
 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
@@ -946,7 +945,9 @@
 		return -1;
 
 	if (test_thread_flag(TIF_SYSCALL_TRACE))
-		scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+	scno = current_thread_info()->syscall;
 
 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
 		trace_sys_enter(regs, scno);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5919eb4..0c96b2d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -66,6 +66,7 @@
 	IPI_CALL_FUNC,
 	IPI_CALL_FUNC_SINGLE,
 	IPI_CPU_STOP,
+	IPI_CPU_BACKTRACE,
 };
 
 static DECLARE_COMPLETION(cpu_running);
@@ -463,6 +464,7 @@
 	S(IPI_CALL_FUNC, "Function call interrupts"),
 	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
 	S(IPI_CPU_STOP, "CPU stop interrupts"),
+	S(IPI_CPU_BACKTRACE, "CPU backtrace"),
 };
 
 void show_ipi_list(struct seq_file *p, int prec)
@@ -588,6 +590,58 @@
 		cpu_relax();
 }
 
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+	unsigned int this_cpu = smp_processor_id();
+	int i;
+
+	if (test_and_set_bit(0, &backtrace_flag))
+		/*
+		 * If there is already a trigger_all_cpu_backtrace() in progress
+		 * (backtrace_flag == 1), don't output double cpu dump infos.
+		 */
+		return;
+
+	cpumask_copy(&backtrace_mask, cpu_online_mask);
+	cpu_clear(this_cpu, backtrace_mask);
+
+	pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+	dump_stack();
+
+	pr_info("\nsending IPI to all other CPUs:\n");
+	smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+	/* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+	for (i = 0; i < 10 * 1000; i++) {
+		if (cpumask_empty(&backtrace_mask))
+			break;
+		mdelay(1);
+	}
+
+	clear_bit(0, &backtrace_flag);
+	smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+	if (cpu_isset(cpu, backtrace_mask)) {
+		raw_spin_lock(&backtrace_lock);
+		pr_warning("IPI backtrace for cpu %d\n", cpu);
+		show_regs(regs);
+		raw_spin_unlock(&backtrace_lock);
+		cpu_clear(cpu, backtrace_mask);
+	}
+}
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -638,6 +692,10 @@
 		irq_exit();
 		break;
 
+	case IPI_CPU_BACKTRACE:
+		ipi_cpu_backtrace(cpu, regs);
+		break;
+
 	default:
 		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
 		       cpu, ipinr);
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 9e51be9..8045a48 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -7,6 +7,7 @@
 
 obj-$(CONFIG_MMU)		+= fault-armv.o flush.o idmap.o ioremap.o \
 				   mmap.o pgd.o mmu.o
+obj-$(CONFIG_DEBUG_RODATA)	+= rodata.o
 
 ifneq ($(CONFIG_MMU),y)
 obj-y				+= nommu.o
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c465fac..90a130f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -33,6 +33,9 @@
 static DEFINE_RAW_SPINLOCK(l2x0_lock);
 static u32 l2x0_way_mask;	/* Bitmask of active ways */
 static u32 l2x0_size;
+static u32 l2x0_cache_id;
+static unsigned int l2x0_sets;
+static unsigned int l2x0_ways;
 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
 
 /* Aurora don't have the cache ID register available, so we have to
@@ -49,6 +52,13 @@
 
 static bool of_init = false;
 
+static inline bool is_pl310_rev(int rev)
+{
+	return (l2x0_cache_id &
+		(L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
+			(L2X0_CACHE_ID_PART_L310 | rev);
+}
+
 static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
 {
 	/* wait for cache operation by line or way to complete */
@@ -137,6 +147,23 @@
 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
+#ifdef CONFIG_PL310_ERRATA_727915
+static void l2x0_for_each_set_way(void __iomem *reg)
+{
+	int set;
+	int way;
+	unsigned long flags;
+
+	for (way = 0; way < l2x0_ways; way++) {
+		raw_spin_lock_irqsave(&l2x0_lock, flags);
+		for (set = 0; set < l2x0_sets; set++)
+			writel_relaxed((way << 28) | (set << 5), reg);
+		cache_sync();
+		raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+	}
+}
+#endif
+
 static void __l2x0_flush_all(void)
 {
 	debug_writel(0x03);
@@ -150,6 +177,13 @@
 {
 	unsigned long flags;
 
+#ifdef CONFIG_PL310_ERRATA_727915
+	if (is_pl310_rev(REV_PL310_R2P0)) {
+		l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
+		return;
+	}
+#endif
+
 	/* clean all ways */
 	raw_spin_lock_irqsave(&l2x0_lock, flags);
 	__l2x0_flush_all();
@@ -160,11 +194,20 @@
 {
 	unsigned long flags;
 
+#ifdef CONFIG_PL310_ERRATA_727915
+	if (is_pl310_rev(REV_PL310_R2P0)) {
+		l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
+		return;
+	}
+#endif
+
 	/* clean all ways */
 	raw_spin_lock_irqsave(&l2x0_lock, flags);
+	debug_writel(0x03);
 	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
 	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
 	cache_sync();
+	debug_writel(0x00);
 	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
@@ -323,65 +366,64 @@
 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 {
 	u32 aux;
-	u32 cache_id;
 	u32 way_size = 0;
-	int ways;
 	int way_size_shift = L2X0_WAY_SIZE_SHIFT;
 	const char *type;
 
 	l2x0_base = base;
 	if (cache_id_part_number_from_dt)
-		cache_id = cache_id_part_number_from_dt;
+		l2x0_cache_id = cache_id_part_number_from_dt;
 	else
-		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+		l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
 	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 
 	aux &= aux_mask;
 	aux |= aux_val;
 
 	/* Determine the number of ways */
-	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+	switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
 	case L2X0_CACHE_ID_PART_L310:
 		if (aux & (1 << 16))
-			ways = 16;
+			l2x0_ways = 16;
 		else
-			ways = 8;
+			l2x0_ways = 8;
 		type = "L310";
 #ifdef CONFIG_PL310_ERRATA_753970
 		/* Unmapped register. */
 		sync_reg_offset = L2X0_DUMMY_REG;
 #endif
-		if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
+		if ((l2x0_cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
 			outer_cache.set_debug = pl310_set_debug;
 		break;
 	case L2X0_CACHE_ID_PART_L210:
-		ways = (aux >> 13) & 0xf;
+		l2x0_ways = (aux >> 13) & 0xf;
 		type = "L210";
 		break;
 
 	case AURORA_CACHE_ID:
 		sync_reg_offset = AURORA_SYNC_REG;
-		ways = (aux >> 13) & 0xf;
-		ways = 2 << ((ways + 1) >> 2);
+		l2x0_ways = (aux >> 13) & 0xf;
+		l2x0_ways = 2 << ((l2x0_ways + 1) >> 2);
 		way_size_shift = AURORA_WAY_SIZE_SHIFT;
 		type = "Aurora";
 		break;
 	default:
 		/* Assume unknown chips have 8 ways */
-		ways = 8;
+		l2x0_ways = 8;
 		type = "L2x0 series";
 		break;
 	}
 
-	l2x0_way_mask = (1 << ways) - 1;
+	l2x0_way_mask = (1 << l2x0_ways) - 1;
 
 	/*
 	 * L2 cache Size =  Way size * Number of ways
 	 */
 	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
-	way_size = 1 << (way_size + way_size_shift);
+	way_size = SZ_1K << (way_size + way_size_shift);
 
-	l2x0_size = ways * way_size * SZ_1K;
+	l2x0_size = l2x0_ways * way_size;
+	l2x0_sets = way_size / CACHE_LINE_SIZE;
 
 	/*
 	 * Check if l2x0 controller is already enabled.
@@ -390,7 +432,7 @@
 	 */
 	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 		/* Make sure that I&D is not locked down when starting */
-		l2x0_unlock(cache_id);
+		l2x0_unlock(l2x0_cache_id);
 
 		/* l2x0 controller is disabled */
 		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -419,7 +461,7 @@
 
 	printk(KERN_INFO "%s cache controller enabled\n", type);
 	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
-			ways, cache_id, aux, l2x0_size);
+			l2x0_ways, l2x0_cache_id, aux, l2x0_size);
 }
 
 #ifdef CONFIG_OF
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index d8fd4d4..7a3d3d8 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -270,6 +270,11 @@
  *	- end     - virtual end address of region
  */
 ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+	sub	r2, r1, r0
+	cmp	r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+	bhi	v6_dma_flush_dcache_all
+#endif
 #ifdef CONFIG_DMA_CACHE_RWFO
 	ldrb	r2, [r0]		@ read for ownership
 	strb	r2, [r0]		@ write for ownership
@@ -292,6 +297,18 @@
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 	mov	pc, lr
 
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+	mov	r0, #0
+#ifdef HARVARD_CACHE
+	mcr	p15, 0, r0, c7, c14, 0		@ D cache clean+invalidate
+#else
+	mcr	p15, 0, r0, c7, c15, 0		@ Cache clean+invalidate
+#endif
+	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
+	mov	pc, lr
+#endif
+
 /*
  *	dma_map_area(start, size, dir)
  *	- start	- kernel virtual start address
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 160da6d..9820ad4 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -274,10 +274,10 @@
 		local_irq_enable();
 
 	/*
-	 * If we're in an interrupt or have no user
+	 * If we're in an interrupt, or have no irqs, or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (in_atomic() || irqs_disabled() || !mm)
 		goto no_context;
 
 	if (user_mode(regs))
@@ -449,8 +449,16 @@
 
 	if (pud_none(*pud_k))
 		goto bad_area;
-	if (!pud_present(*pud))
+	if (!pud_present(*pud)) {
 		set_pud(pud, *pud_k);
+		/*
+		 * There is a small window during free_pgtables() where the
+		 * user *pud entry is 0 but the TLB has not been invalidated
+		 * and we get a level 2 (pmd) translation fault caused by the
+		 * intermediate TLB caching of the old level 1 (pud) entry.
+		 */
+		flush_tlb_kernel_page(addr);
+	}
 
 	pmd = pmd_offset(pud, addr);
 	pmd_k = pmd_offset(pud_k, addr);
@@ -473,8 +481,9 @@
 #endif
 	if (pmd_none(pmd_k[index]))
 		goto bad_area;
+	if (!pmd_present(pmd[index]))
+		copy_pmd(pmd, pmd_k);
 
-	copy_pmd(pmd, pmd_k);
 	return 0;
 
 bad_area:
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 5ef506c..0a23581 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -173,10 +173,9 @@
 {
 	unsigned long random_factor = 0UL;
 
-	/* 8 bits of randomness in 20 address space bits */
 	if ((current->flags & PF_RANDOMIZE) &&
 	    !(current->personality & ADDR_NO_RANDOMIZE))
-		random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
+		random_factor = (get_random_long() & ((1UL << mmap_rnd_bits) - 1)) << PAGE_SHIFT;
 
 	if (mmap_is_legacy()) {
 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4c7d5cd..66088a2 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -605,11 +605,25 @@
 	return early_alloc_aligned(sz, sz);
 }
 
-static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd)
+{
+	if (pmd_none(*pmd) || pmd_bad(*pmd))
+		return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+	return pmd_page_vaddr(*pmd);
+}
+
+static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
+{
+	__pmd_populate(pmd, __pa(pte), prot);
+	BUG_ON(pmd_bad(*pmd));
+}
+
+static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
+	unsigned long addr, unsigned long prot)
 {
 	if (pmd_none(*pmd)) {
-		pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
-		__pmd_populate(pmd, __pa(pte), prot);
+		pte_t *pte = early_pte_alloc(pmd);
+		early_pte_install(pmd, pte, prot);
 	}
 	BUG_ON(pmd_bad(*pmd));
 	return pte_offset_kernel(pmd, addr);
@@ -619,11 +633,17 @@
 				  unsigned long end, unsigned long pfn,
 				  const struct mem_type *type)
 {
-	pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+	pte_t *start_pte = early_pte_alloc(pmd);
+	pte_t *pte = start_pte + pte_index(addr);
+
+	/* If replacing a section mapping, the whole section must be replaced */
+	BUG_ON(!pmd_none(*pmd) && pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
+
 	do {
 		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
 		pfn++;
 	} while (pte++, addr += PAGE_SIZE, addr != end);
+	early_pte_install(pmd, start_pte, type->prot_l1);
 }
 
 static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
@@ -655,7 +675,8 @@
 
 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 				      unsigned long end, phys_addr_t phys,
-				      const struct mem_type *type)
+				      const struct mem_type *type,
+				      bool force_pages)
 {
 	pmd_t *pmd = pmd_offset(pud, addr);
 	unsigned long next;
@@ -672,7 +693,8 @@
 		 * aligned to a section boundary.
 		 */
 		if (type->prot_sect &&
-				((addr | next | phys) & ~SECTION_MASK) == 0) {
+				((addr | next | phys) & ~SECTION_MASK) == 0 &&
+				!force_pages) {
 			__map_init_section(pmd, addr, next, phys, type);
 		} else {
 			alloc_init_pte(pmd, addr, next,
@@ -685,15 +707,15 @@
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
-				  unsigned long end, phys_addr_t phys,
-				  const struct mem_type *type)
+	unsigned long end, unsigned long phys, const struct mem_type *type,
+	bool force_pages)
 {
 	pud_t *pud = pud_offset(pgd, addr);
 	unsigned long next;
 
 	do {
 		next = pud_addr_end(addr, end);
-		alloc_init_pmd(pud, addr, next, phys, type);
+		alloc_init_pmd(pud, addr, next, phys, type, force_pages);
 		phys += next - addr;
 	} while (pud++, addr = next, addr != end);
 }
@@ -767,7 +789,7 @@
  * offsets, and we take full advantage of sections and
  * supersections.
  */
-static void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md, bool force_pages)
 {
 	unsigned long addr, length, end;
 	phys_addr_t phys;
@@ -817,7 +839,7 @@
 	do {
 		unsigned long next = pgd_addr_end(addr, end);
 
-		alloc_init_pud(pgd, addr, next, phys, type);
+		alloc_init_pud(pgd, addr, next, phys, type, force_pages);
 
 		phys += next - addr;
 		addr = next;
@@ -839,7 +861,7 @@
 	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
 
 	for (md = io_desc; nr; md++, nr--) {
-		create_mapping(md);
+		create_mapping(md, false);
 
 		vm = &svm->vm;
 		vm->addr = (void *)(md->virtual & PAGE_MASK);
@@ -960,7 +982,7 @@
 	map.virtual &= PAGE_MASK;
 	map.length = PAGE_SIZE;
 	map.type = MT_DEVICE;
-	create_mapping(&map);
+	create_mapping(&map, false);
 }
 #endif
 
@@ -1005,6 +1027,28 @@
 		struct membank *bank = &meminfo.bank[j];
 		*bank = meminfo.bank[i];
 
+#ifdef CONFIG_SPARSEMEM
+		if (pfn_to_section_nr(bank_pfn_start(bank)) !=
+		    pfn_to_section_nr(bank_pfn_end(bank) - 1)) {
+			phys_addr_t sz;
+			unsigned long start_pfn = bank_pfn_start(bank);
+			unsigned long end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
+			sz = ((phys_addr_t)(end_pfn - start_pfn) << PAGE_SHIFT);
+
+			if (meminfo.nr_banks >= NR_BANKS) {
+				pr_crit("NR_BANKS too low, ignoring %lld bytes of memory\n",
+					(unsigned long long)(bank->size - sz));
+			} else {
+				memmove(bank + 1, bank,
+					(meminfo.nr_banks - i) * sizeof(*bank));
+				meminfo.nr_banks++;
+				bank[1].size -= sz;
+				bank[1].start = __pfn_to_phys(end_pfn);
+			}
+			bank->size = sz;
+		}
+#endif
+
 		if (bank->start > ULONG_MAX)
 			highmem = 1;
 
@@ -1202,7 +1246,7 @@
 	map.virtual = MODULES_VADDR;
 	map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
 	map.type = MT_ROM;
-	create_mapping(&map);
+	create_mapping(&map, false);
 #endif
 
 	/*
@@ -1213,14 +1257,14 @@
 	map.virtual = FLUSH_BASE;
 	map.length = SZ_1M;
 	map.type = MT_CACHECLEAN;
-	create_mapping(&map);
+	create_mapping(&map, false);
 #endif
 #ifdef FLUSH_BASE_MINICACHE
 	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
 	map.virtual = FLUSH_BASE_MINICACHE;
 	map.length = SZ_1M;
 	map.type = MT_MINICLEAN;
-	create_mapping(&map);
+	create_mapping(&map, false);
 #endif
 
 	/*
@@ -1236,13 +1280,13 @@
 #else
 	map.type = MT_LOW_VECTORS;
 #endif
-	create_mapping(&map);
+	create_mapping(&map, false);
 
 	if (!vectors_high()) {
 		map.virtual = 0;
 		map.length = PAGE_SIZE * 2;
 		map.type = MT_LOW_VECTORS;
-		create_mapping(&map);
+		create_mapping(&map, false);
 	}
 
 	/* Now create a kernel read-only mapping */
@@ -1275,20 +1319,23 @@
 static void __init kmap_init(void)
 {
 #ifdef CONFIG_HIGHMEM
-	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
+	pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
 		PKMAP_BASE, _PAGE_KERNEL_TABLE);
 #endif
 }
 
+
 static void __init map_lowmem(void)
 {
 	struct memblock_region *reg;
+	phys_addr_t start;
+	phys_addr_t end;
+	struct map_desc map;
 
 	/* Map all the lowmem memory banks. */
 	for_each_memblock(memory, reg) {
-		phys_addr_t start = reg->base;
-		phys_addr_t end = start + reg->size;
-		struct map_desc map;
+		start = reg->base;
+		end = start + reg->size;
 
 		if (end > arm_lowmem_limit)
 			end = arm_lowmem_limit;
@@ -1300,8 +1347,20 @@
 		map.length = end - start;
 		map.type = MT_MEMORY;
 
-		create_mapping(&map);
+		create_mapping(&map, false);
 	}
+
+#ifdef CONFIG_DEBUG_RODATA
+	start = __pa(_stext) & PMD_MASK;
+	end = ALIGN(__pa(__end_rodata), PMD_SIZE);
+
+	map.pfn = __phys_to_pfn(start);
+	map.virtual = __phys_to_virt(start);
+	map.length = end - start;
+	map.type = MT_MEMORY;
+
+	create_mapping(&map, true);
+#endif
 }
 
 /*
diff --git a/arch/arm/mm/rodata.c b/arch/arm/mm/rodata.c
new file mode 100644
index 0000000..9a8eb84
--- /dev/null
+++ b/arch/arm/mm/rodata.c
@@ -0,0 +1,159 @@
+/*
+ *  linux/arch/arm/mm/rodata.c
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  Author: Colin Cross <ccross@android.com>
+ *
+ *  Based on x86 implementation in arch/x86/mm/init_32.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+#include <asm/rodata.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+static int kernel_set_to_readonly __read_mostly;
+
+#ifdef CONFIG_DEBUG_RODATA_TEST
+static const int rodata_test_data = 0xC3;
+
+static noinline void rodata_test(void)
+{
+	int result;
+
+	pr_info("%s: attempting to write to read-only section:\n", __func__);
+
+	if (*(volatile int *)&rodata_test_data != 0xC3) {
+		pr_err("read only data changed before test\n");
+		return;
+	}
+
+	/*
+	 * Attempt to to write to rodata_test_data, trapping the expected
+	 * data abort.  If the trap executed, result will be 1.  If it didn't,
+	 * result will be 0xFF.
+	 */
+	asm volatile(
+		"0:	str	%[zero], [%[rodata_test_data]]\n"
+		"	mov	%[result], #0xFF\n"
+		"	b	2f\n"
+		"1:	mov	%[result], #1\n"
+		"2:\n"
+
+		/* Exception fixup - if store at label 0 faults, jumps to 1 */
+		".pushsection __ex_table, \"a\"\n"
+		"	.long	0b, 1b\n"
+		".popsection\n"
+
+		: [result] "=r" (result)
+		: [rodata_test_data] "r" (&rodata_test_data), [zero] "r" (0)
+		: "memory"
+	);
+
+	if (result == 1)
+		pr_info("write to read-only section trapped, success\n");
+	else
+		pr_err("write to read-only section NOT trapped, test failed\n");
+
+	if (*(volatile int *)&rodata_test_data != 0xC3)
+		pr_err("read only data changed during write\n");
+}
+#else
+static inline void rodata_test(void) { }
+#endif
+
+static int set_page_attributes(unsigned long virt, int numpages,
+	pte_t (*f)(pte_t))
+{
+	pmd_t *pmd;
+	pte_t *pte;
+	unsigned long start = virt;
+	unsigned long end = virt + (numpages << PAGE_SHIFT);
+	unsigned long pmd_end;
+
+	while (virt < end) {
+		pmd = pmd_off_k(virt);
+		pmd_end = min(ALIGN(virt + 1, PMD_SIZE), end);
+
+		if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_TABLE) {
+			pr_err("%s: pmd %p=%08lx for %08lx not page table\n",
+				__func__, pmd, pmd_val(*pmd), virt);
+			virt = pmd_end;
+			continue;
+		}
+
+		while (virt < pmd_end) {
+			pte = pte_offset_kernel(pmd, virt);
+			set_pte_ext(pte, f(*pte), 0);
+			virt += PAGE_SIZE;
+		}
+	}
+
+	flush_tlb_kernel_range(start, end);
+
+	return 0;
+}
+
+int set_memory_ro(unsigned long virt, int numpages)
+{
+	return set_page_attributes(virt, numpages, pte_wrprotect);
+}
+EXPORT_SYMBOL(set_memory_ro);
+
+int set_memory_rw(unsigned long virt, int numpages)
+{
+	return set_page_attributes(virt, numpages, pte_mkwrite);
+}
+EXPORT_SYMBOL(set_memory_rw);
+
+void set_kernel_text_rw(void)
+{
+	unsigned long start = PAGE_ALIGN((unsigned long)_text);
+	unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+	if (!kernel_set_to_readonly)
+		return;
+
+	pr_debug("Set kernel text: %lx - %lx to read-write\n",
+		 start, start + size);
+
+	set_memory_rw(start, size >> PAGE_SHIFT);
+}
+
+void set_kernel_text_ro(void)
+{
+	unsigned long start = PAGE_ALIGN((unsigned long)_text);
+	unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+	if (!kernel_set_to_readonly)
+		return;
+
+	pr_info_once("Write protecting the kernel text section %lx - %lx\n",
+		start, start + size);
+
+	pr_debug("Set kernel text: %lx - %lx to read only\n",
+		 start, start + size);
+
+	set_memory_ro(start, size >> PAGE_SHIFT);
+}
+
+void mark_rodata_ro(void)
+{
+	kernel_set_to_readonly = 1;
+
+	set_kernel_text_ro();
+
+	rodata_test();
+}
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 5dfbb0b..452fb3a 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/user.h>
+#include <linux/export.h>
 
 #include <asm/cp15.h>
 #include <asm/cputype.h>
@@ -648,6 +649,52 @@
 	return NOTIFY_OK;
 }
 
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+	struct thread_info *thread = current_thread_info();
+	unsigned int cpu;
+	u32 fpexc;
+
+	/*
+	 * Kernel mode NEON is only allowed outside of interrupt context
+	 * with preemption disabled. This will make sure that the kernel
+	 * mode NEON register contents never need to be preserved.
+	 */
+	BUG_ON(in_interrupt());
+	cpu = get_cpu();
+
+	fpexc = fmrx(FPEXC) | FPEXC_EN;
+	fmxr(FPEXC, fpexc);
+
+	/*
+	 * Save the userland NEON/VFP state. Under UP,
+	 * the owner could be a task other than 'current'
+	 */
+	if (vfp_state_in_hw(cpu, thread))
+		vfp_save_state(&thread->vfpstate, fpexc);
+#ifndef CONFIG_SMP
+	else if (vfp_current_hw_state[cpu] != NULL)
+		vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+#endif
+	vfp_current_hw_state[cpu] = NULL;
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+	/* Disable the NEON/VFP unit. */
+	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+	put_cpu();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
 /*
  * VFP support code initialisation.
  */
@@ -731,4 +778,4 @@
 	return 0;
 }
 
-late_initcall(vfp_init);
+core_initcall(vfp_init);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 661ccf8..8f15737 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -8,15 +8,22 @@
 	select ARM_AMBA
 	select ARM_ARCH_TIMER
 	select ARM_GIC
+	select AUDIT_ARCH_COMPAT_GENERIC
 	select CLONE_BACKWARDS
 	select COMMON_CLK
 	select GENERIC_CLOCKEVENTS
+	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+	select GENERIC_CPU_AUTOPROBE
 	select GENERIC_IOMAP
 	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_SHOW
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_TIME_VSYSCALL
 	select HARDIRQS_SW_RESEND
+	select HAVE_ARCH_AUDITSYSCALL
+	select HAVE_ARCH_SECCOMP_FILTER
+	select HAVE_ARCH_MMAP_RND_BITS
+	select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_DEBUG_BUGVERBOSE
 	select HAVE_DEBUG_KMEMLEAK
@@ -53,6 +60,10 @@
 config NO_IOPORT
 	def_bool y
 
+config ILLEGAL_POINTER_VALUE
+	hex
+	default 0xdead000000000000
+
 config STACKTRACE_SUPPORT
 	def_bool y
 
@@ -155,6 +166,31 @@
 	depends on SMP
 	default "4"
 
+config SWP_EMULATE
+	bool "Emulate SWP/SWPB instructions"
+	help
+	  ARMv6 architecture deprecates use of the SWP/SWPB instructions. ARMv8
+	  oblosetes the use of SWP/SWPB instructions. ARMv7 multiprocessing
+	  extensions introduce the ability to disable these instructions,
+	  triggering an undefined instruction exception when executed. Say Y
+	  here to enable software emulation of these instructions for userspace
+	  (not kernel) using LDREX/STREX. Also creates /proc/cpu/swp_emulation
+	  for statistics.
+
+	  In some older versions of glibc [<=2.8] SWP is used during futex
+	  trylock() operations with the assumption that the code will not
+	  be preempted. This invalid assumption may be more likely to fail
+	  with SWP emulation enabled, leading to deadlock of the user
+	  application.
+
+	  NOTE: when accessing uncached shared regions, LDREX/STREX rely
+	  on an external transaction monitoring block called a global
+	  monitor to maintain update atomicity. If your system does not
+	  implement a global monitor, this option can cause programs that
+	  perform SWP operations to uncached memory to deadlock.
+
+	  If unsure, say Y.
+
 source kernel/Kconfig.preempt
 
 config HZ
@@ -185,8 +221,43 @@
 	  Enable hardware performance counter support for perf events. If
 	  disabled, perf events will use software events only.
 
+config ARMV7_COMPAT
+	bool "Kernel support for ARMv7 applications"
+	depends on COMPAT
+	select SWP_EMULATE
+	help
+	 This option enables features that allow that ran on an ARMv7 or older
+	 processor to continue functioning.
+
+	 If you want to execute ARMv7 applications, say Y
+
+config ARMV7_COMPAT_CPUINFO
+	bool "Report backwards compatible cpu features in /proc/cpuinfo"
+	depends on ARMV7_COMPAT
+	default y
+	help
+	 This option makes /proc/cpuinfo list CPU features that an ARMv7 or
+	 earlier kernel would report, but are not optional on an ARMv8 or later
+	 processor.
+
+	 If you want to execute ARMv7 applications, say Y
+
 source "mm/Kconfig"
 
+config SECCOMP
+	bool "Enable seccomp to safely compute untrusted bytecode"
+	---help---
+	  This kernel feature is useful for number crunching applications
+	  that may need to compute untrusted bytecode during their
+	  execution. By using pipes or other transports made available to
+	  the process as file descriptors supporting the read/write
+	  syscalls, it's possible to isolate those applications in
+	  their own address space using seccomp. Once seccomp is
+	  enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
+	  and the task is only allowed to execute a few safe syscalls
+	  defined by each seccomp mode.
+
+
 endmenu
 
 menu "Boot options"
@@ -199,6 +270,23 @@
 	  entering them here. As a minimum, you should specify the the
 	  root device (e.g. root=/dev/nfs).
 
+choice
+	prompt "Kernel command line type" if CMDLINE != ""
+	default CMDLINE_FROM_BOOTLOADER
+
+config CMDLINE_FROM_BOOTLOADER
+	bool "Use bootloader kernel arguments if available"
+	help
+	  Uses the command-line options passed by the boot loader. If
+	  the boot loader doesn't provide any, the default kernel command
+	  string provided in CMDLINE will be used.
+
+config CMDLINE_EXTEND
+	bool "Extend bootloader kernel arguments"
+	help
+	  The command-line arguments provided by the boot loader will be
+	  appended to the default kernel command string.
+
 config CMDLINE_FORCE
 	bool "Always use the default kernel command string"
 	help
@@ -206,6 +294,22 @@
 	  loader passes other arguments to the kernel.
 	  This is useful if you cannot or don't want to change the
 	  command-line options your boot loader passes to the kernel.
+endchoice
+
+config BUILD_ARM64_APPENDED_DTB_IMAGE
+	bool "Build a concatenated Image.gz/dtb by default"
+	depends on OF
+	help
+	  Enabling this option will cause a concatenated Image.gz and list of
+	  DTBs to be built by default (instead of a standalone Image.gz.)
+	  The image will built in arch/arm64/boot/Image.gz-dtb
+
+config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
+	string "Default dtb names"
+	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+	help
+	  Space separated list of names of dtbs to append when
+	  building a concatenated Image.gz-dtb.
 
 endmenu
 
@@ -245,5 +349,8 @@
 source "security/Kconfig"
 
 source "crypto/Kconfig"
+if CRYPTO
+source "arch/arm64/crypto/Kconfig"
+endif
 
 source "lib/Kconfig"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index c95c5cb..2b27721 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -20,6 +20,7 @@
 KBUILD_DEFCONFIG := defconfig
 
 KBUILD_CFLAGS	+= -mgeneral-regs-only
+KBUILD_CFLAGS	+= -fno-pic
 KBUILD_CPPFLAGS	+= -mlittle-endian
 AS		+= -EL
 LD		+= -EL
@@ -37,11 +38,17 @@
 export	TEXT_OFFSET GZFLAGS
 
 core-y		+= arch/arm64/kernel/ arch/arm64/mm/
+core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
 libs-y		:= arch/arm64/lib/ $(libs-y)
 libs-y		+= $(LIBGCC)
 
 # Default target when executing plain make
+ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE	:= Image.gz-dtb
+else
 KBUILD_IMAGE	:= Image.gz
+endif
+
 KBUILD_DTBS	:= dtbs
 
 all:	$(KBUILD_IMAGE) $(KBUILD_DTBS)
@@ -60,6 +67,9 @@
 dtbs: scripts
 	$(Q)$(MAKE) $(build)=$(boot)/dts dtbs
 
+Image.gz-dtb: vmlinux scripts dtbs
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
 	$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore
index 8dab0bb..eb35511 100644
--- a/arch/arm64/boot/.gitignore
+++ b/arch/arm64/boot/.gitignore
@@ -1,2 +1,3 @@
 Image
 Image.gz
+Image.gz-dtb
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 5a0e3ab..df51984 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -14,14 +14,27 @@
 # Based on the ia64 boot/Makefile.
 #
 
+include $(srctree)/arch/arm64/boot/dts/Makefile
+
 targets := Image Image.gz
 
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+
 $(obj)/Image: vmlinux FORCE
 	$(call if_changed,objcopy)
 
 $(obj)/Image.gz: $(obj)/Image FORCE
 	$(call if_changed,gzip)
 
+$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE
+	$(call if_changed,cat)
+
 install: $(obj)/Image
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/Image System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 68457e9..d58ea71 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,8 +1,15 @@
 dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
 
 targets += dtbs
-targets += $(dtb-y)
 
-dtbs: $(addprefix $(obj)/, $(dtb-y))
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+targets += $(DTB_LIST)
 
-clean-files := *.dtb
+dtbs: $(addprefix $(obj)/, $(DTB_LIST))
+
+clean-files := dts/*.dtb *.dtb
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 8d9696ad..8fd6e02 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -67,9 +67,16 @@
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS is not set
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_USE_FOR_EXT23=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
+# CONFIG_EXT4_DEBUG is not set
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=y
 CONFIG_VFAT_FS=y
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
new file mode 100644
index 0000000..5562652
--- /dev/null
+++ b/arch/arm64/crypto/Kconfig
@@ -0,0 +1,53 @@
+
+menuconfig ARM64_CRYPTO
+	bool "ARM64 Accelerated Cryptographic Algorithms"
+	depends on ARM64
+	help
+	  Say Y here to choose from a selection of cryptographic algorithms
+	  implemented using ARM64 specific CPU features or instructions.
+
+if ARM64_CRYPTO
+
+config CRYPTO_SHA1_ARM64_CE
+	tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
+	depends on ARM64 && KERNEL_MODE_NEON
+	select CRYPTO_HASH
+
+config CRYPTO_SHA2_ARM64_CE
+	tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
+	depends on ARM64 && KERNEL_MODE_NEON
+	select CRYPTO_HASH
+
+config CRYPTO_GHASH_ARM64_CE
+	tristate "GHASH (for GCM chaining mode) using ARMv8 Crypto Extensions"
+	depends on ARM64 && KERNEL_MODE_NEON
+	select CRYPTO_HASH
+
+config CRYPTO_AES_ARM64_CE
+	tristate "AES core cipher using ARMv8 Crypto Extensions"
+	depends on ARM64 && KERNEL_MODE_NEON
+	select CRYPTO_ALGAPI
+	select CRYPTO_AES
+
+config CRYPTO_AES_ARM64_CE_CCM
+	tristate "AES in CCM mode using ARMv8 Crypto Extensions"
+	depends on ARM64 && KERNEL_MODE_NEON
+	select CRYPTO_ALGAPI
+	select CRYPTO_AES
+	select CRYPTO_AEAD
+
+config CRYPTO_AES_ARM64_CE_BLK
+	tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
+	depends on ARM64 && KERNEL_MODE_NEON
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AES
+	select CRYPTO_ABLK_HELPER
+
+config CRYPTO_AES_ARM64_NEON_BLK
+	tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
+	depends on ARM64 && KERNEL_MODE_NEON
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AES
+	select CRYPTO_ABLK_HELPER
+
+endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
new file mode 100644
index 0000000..a3f935f
--- /dev/null
+++ b/arch/arm64/crypto/Makefile
@@ -0,0 +1,38 @@
+#
+# linux/arch/arm64/crypto/Makefile
+#
+# Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+obj-$(CONFIG_CRYPTO_SHA1_ARM64_CE) += sha1-ce.o
+sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
+
+obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
+sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
+
+obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
+ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
+CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o
+aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_CE_BLK) += aes-ce-blk.o
+aes-ce-blk-y := aes-glue-ce.o aes-ce.o
+
+obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
+aes-neon-blk-y := aes-glue-neon.o aes-neon.o
+
+AFLAGS_aes-ce.o		:= -DINTERLEAVE=2 -DINTERLEAVE_INLINE
+AFLAGS_aes-neon.o	:= -DINTERLEAVE=4
+
+CFLAGS_aes-glue-ce.o	:= -DUSE_V8_CRYPTO_EXTENSIONS
+
+$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
+	$(call if_changed_rule,cc_o_c)
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
new file mode 100644
index 0000000..432e484
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -0,0 +1,222 @@
+/*
+ * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+	.text
+	.arch	armv8-a+crypto
+
+	/*
+	 * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
+	 *			     u32 *macp, u8 const rk[], u32 rounds);
+	 */
+ENTRY(ce_aes_ccm_auth_data)
+	ldr	w8, [x3]			/* leftover from prev round? */
+	ld1	{v0.2d}, [x0]			/* load mac */
+	cbz	w8, 1f
+	sub	w8, w8, #16
+	eor	v1.16b, v1.16b, v1.16b
+0:	ldrb	w7, [x1], #1			/* get 1 byte of input */
+	subs	w2, w2, #1
+	add	w8, w8, #1
+	ins	v1.b[0], w7
+	ext	v1.16b, v1.16b, v1.16b, #1	/* rotate in the input bytes */
+	beq	8f				/* out of input? */
+	cbnz	w8, 0b
+	eor	v0.16b, v0.16b, v1.16b
+1:	ld1	{v3.2d}, [x4]			/* load first round key */
+	prfm	pldl1strm, [x1]
+	cmp	w5, #12				/* which key size? */
+	add	x6, x4, #16
+	sub	w7, w5, #2			/* modified # of rounds */
+	bmi	2f
+	bne	5f
+	mov	v5.16b, v3.16b
+	b	4f
+2:	mov	v4.16b, v3.16b
+	ld1	{v5.2d}, [x6], #16		/* load 2nd round key */
+3:	aese	v0.16b, v4.16b
+	aesmc	v0.16b, v0.16b
+4:	ld1	{v3.2d}, [x6], #16		/* load next round key */
+	aese	v0.16b, v5.16b
+	aesmc	v0.16b, v0.16b
+5:	ld1	{v4.2d}, [x6], #16		/* load next round key */
+	subs	w7, w7, #3
+	aese	v0.16b, v3.16b
+	aesmc	v0.16b, v0.16b
+	ld1	{v5.2d}, [x6], #16		/* load next round key */
+	bpl	3b
+	aese	v0.16b, v4.16b
+	subs	w2, w2, #16			/* last data? */
+	eor	v0.16b, v0.16b, v5.16b		/* final round */
+	bmi	6f
+	ld1	{v1.16b}, [x1], #16		/* load next input block */
+	eor	v0.16b, v0.16b, v1.16b		/* xor with mac */
+	bne	1b
+6:	st1	{v0.2d}, [x0]			/* store mac */
+	beq	10f
+	adds	w2, w2, #16
+	beq	10f
+	mov	w8, w2
+7:	ldrb	w7, [x1], #1
+	umov	w6, v0.b[0]
+	eor	w6, w6, w7
+	strb	w6, [x0], #1
+	subs	w2, w2, #1
+	beq	10f
+	ext	v0.16b, v0.16b, v0.16b, #1	/* rotate out the mac bytes */
+	b	7b
+8:	mov	w7, w8
+	add	w8, w8, #16
+9:	ext	v1.16b, v1.16b, v1.16b, #1
+	adds	w7, w7, #1
+	bne	9b
+	eor	v0.16b, v0.16b, v1.16b
+	st1	{v0.2d}, [x0]
+10:	str	w8, [x3]
+	ret
+ENDPROC(ce_aes_ccm_auth_data)
+
+	/*
+	 * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
+	 * 			 u32 rounds);
+	 */
+ENTRY(ce_aes_ccm_final)
+	ld1	{v3.2d}, [x2], #16		/* load first round key */
+	ld1	{v0.2d}, [x0]			/* load mac */
+	cmp	w3, #12				/* which key size? */
+	sub	w3, w3, #2			/* modified # of rounds */
+	ld1	{v1.2d}, [x1]			/* load 1st ctriv */
+	bmi	0f
+	bne	3f
+	mov	v5.16b, v3.16b
+	b	2f
+0:	mov	v4.16b, v3.16b
+1:	ld1	{v5.2d}, [x2], #16		/* load next round key */
+	aese	v0.16b, v4.16b
+	aese	v1.16b, v4.16b
+	aesmc	v0.16b, v0.16b
+	aesmc	v1.16b, v1.16b
+2:	ld1	{v3.2d}, [x2], #16		/* load next round key */
+	aese	v0.16b, v5.16b
+	aese	v1.16b, v5.16b
+	aesmc	v0.16b, v0.16b
+	aesmc	v1.16b, v1.16b
+3:	ld1	{v4.2d}, [x2], #16		/* load next round key */
+	subs	w3, w3, #3
+	aese	v0.16b, v3.16b
+	aese	v1.16b, v3.16b
+	aesmc	v0.16b, v0.16b
+	aesmc	v1.16b, v1.16b
+	bpl	1b
+	aese	v0.16b, v4.16b
+	aese	v1.16b, v4.16b
+	/* final round key cancels out */
+	eor	v0.16b, v0.16b, v1.16b		/* en-/decrypt the mac */
+	st1	{v0.2d}, [x0]			/* store result */
+	ret
+ENDPROC(ce_aes_ccm_final)
+
+	.macro	aes_ccm_do_crypt,enc
+	ldr	x8, [x6, #8]			/* load lower ctr */
+	ld1	{v0.2d}, [x5]			/* load mac */
+	rev	x8, x8				/* keep swabbed ctr in reg */
+0:	/* outer loop */
+	ld1	{v1.1d}, [x6]			/* load upper ctr */
+	prfm	pldl1strm, [x1]
+	add	x8, x8, #1
+	rev	x9, x8
+	cmp	w4, #12				/* which key size? */
+	sub	w7, w4, #2			/* get modified # of rounds */
+	ins	v1.d[1], x9			/* no carry in lower ctr */
+	ld1	{v3.2d}, [x3]			/* load first round key */
+	add	x10, x3, #16
+	bmi	1f
+	bne	4f
+	mov	v5.16b, v3.16b
+	b	3f
+1:	mov	v4.16b, v3.16b
+	ld1	{v5.2d}, [x10], #16		/* load 2nd round key */
+2:	/* inner loop: 3 rounds, 2x interleaved */
+	aese	v0.16b, v4.16b
+	aese	v1.16b, v4.16b
+	aesmc	v0.16b, v0.16b
+	aesmc	v1.16b, v1.16b
+3:	ld1	{v3.2d}, [x10], #16		/* load next round key */
+	aese	v0.16b, v5.16b
+	aese	v1.16b, v5.16b
+	aesmc	v0.16b, v0.16b
+	aesmc	v1.16b, v1.16b
+4:	ld1	{v4.2d}, [x10], #16		/* load next round key */
+	subs	w7, w7, #3
+	aese	v0.16b, v3.16b
+	aese	v1.16b, v3.16b
+	aesmc	v0.16b, v0.16b
+	aesmc	v1.16b, v1.16b
+	ld1	{v5.2d}, [x10], #16		/* load next round key */
+	bpl	2b
+	aese	v0.16b, v4.16b
+	aese	v1.16b, v4.16b
+	subs	w2, w2, #16
+	bmi	6f				/* partial block? */
+	ld1	{v2.16b}, [x1], #16		/* load next input block */
+	.if	\enc == 1
+	eor	v2.16b, v2.16b, v5.16b		/* final round enc+mac */
+	eor	v1.16b, v1.16b, v2.16b		/* xor with crypted ctr */
+	.else
+	eor	v2.16b, v2.16b, v1.16b		/* xor with crypted ctr */
+	eor	v1.16b, v2.16b, v5.16b		/* final round enc */
+	.endif
+	eor	v0.16b, v0.16b, v2.16b		/* xor mac with pt ^ rk[last] */
+	st1	{v1.16b}, [x0], #16		/* write output block */
+	bne	0b
+	rev	x8, x8
+	st1	{v0.2d}, [x5]			/* store mac */
+	str	x8, [x6, #8]			/* store lsb end of ctr (BE) */
+5:	ret
+
+6:	eor	v0.16b, v0.16b, v5.16b		/* final round mac */
+	eor	v1.16b, v1.16b, v5.16b		/* final round enc */
+	st1	{v0.2d}, [x5]			/* store mac */
+	add	w2, w2, #16			/* process partial tail block */
+7:	ldrb	w9, [x1], #1			/* get 1 byte of input */
+	umov	w6, v1.b[0]			/* get top crypted ctr byte */
+	umov	w7, v0.b[0]			/* get top mac byte */
+	.if	\enc == 1
+	eor	w7, w7, w9
+	eor	w9, w9, w6
+	.else
+	eor	w9, w9, w6
+	eor	w7, w7, w9
+	.endif
+	strb	w9, [x0], #1			/* store out byte */
+	strb	w7, [x5], #1			/* store mac byte */
+	subs	w2, w2, #1
+	beq	5b
+	ext	v0.16b, v0.16b, v0.16b, #1	/* shift out mac byte */
+	ext	v1.16b, v1.16b, v1.16b, #1	/* shift out ctr byte */
+	b	7b
+	.endm
+
+	/*
+	 * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
+	 * 			   u8 const rk[], u32 rounds, u8 mac[],
+	 * 			   u8 ctr[]);
+	 * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
+	 * 			   u8 const rk[], u32 rounds, u8 mac[],
+	 * 			   u8 ctr[]);
+	 */
+ENTRY(ce_aes_ccm_encrypt)
+	aes_ccm_do_crypt	1
+ENDPROC(ce_aes_ccm_encrypt)
+
+ENTRY(ce_aes_ccm_decrypt)
+	aes_ccm_do_crypt	0
+ENDPROC(ce_aes_ccm_decrypt)
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
new file mode 100644
index 0000000..9e6cdde
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -0,0 +1,297 @@
+/*
+ * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+static int num_rounds(struct crypto_aes_ctx *ctx)
+{
+	/*
+	 * # of rounds specified by AES:
+	 * 128 bit key		10 rounds
+	 * 192 bit key		12 rounds
+	 * 256 bit key		14 rounds
+	 * => n byte key	=> 6 + (n/4) rounds
+	 */
+	return 6 + ctx->key_length / 4;
+}
+
+asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
+				     u32 *macp, u32 const rk[], u32 rounds);
+
+asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
+				   u32 const rk[], u32 rounds, u8 mac[],
+				   u8 ctr[]);
+
+asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
+				   u32 const rk[], u32 rounds, u8 mac[],
+				   u8 ctr[]);
+
+asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
+				 u32 rounds);
+
+static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
+		      unsigned int key_len)
+{
+	struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
+	int ret;
+
+	ret = crypto_aes_expand_key(ctx, in_key, key_len);
+	if (!ret)
+		return 0;
+
+	tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+	return -EINVAL;
+}
+
+static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+	if ((authsize & 1) || authsize < 4)
+		return -EINVAL;
+	return 0;
+}
+
+static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	__be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
+	u32 l = req->iv[0] + 1;
+
+	/* verify that CCM dimension 'L' is set correctly in the IV */
+	if (l < 2 || l > 8)
+		return -EINVAL;
+
+	/* verify that msglen can in fact be represented in L bytes */
+	if (l < 4 && msglen >> (8 * l))
+		return -EOVERFLOW;
+
+	/*
+	 * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
+	 * uses a u32 type to represent msglen so the top 4 bytes are always 0.
+	 */
+	n[0] = 0;
+	n[1] = cpu_to_be32(msglen);
+
+	memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
+
+	/*
+	 * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
+	 * - bits 0..2	: max # of bytes required to represent msglen, minus 1
+	 *                (already set by caller)
+	 * - bits 3..5	: size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
+	 * - bit 6	: indicates presence of authenticate-only data
+	 */
+	maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
+	if (req->assoclen)
+		maciv[0] |= 0x40;
+
+	memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
+	return 0;
+}
+
+static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
+	struct __packed { __be16 l; __be32 h; u16 len; } ltag;
+	struct scatter_walk walk;
+	u32 len = req->assoclen;
+	u32 macp = 0;
+
+	/* prepend the AAD with a length tag */
+	if (len < 0xff00) {
+		ltag.l = cpu_to_be16(len);
+		ltag.len = 2;
+	} else  {
+		ltag.l = cpu_to_be16(0xfffe);
+		put_unaligned_be32(len, &ltag.h);
+		ltag.len = 6;
+	}
+
+	ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
+			     num_rounds(ctx));
+	scatterwalk_start(&walk, req->assoc);
+
+	do {
+		u32 n = scatterwalk_clamp(&walk, len);
+		u8 *p;
+
+		if (!n) {
+			scatterwalk_start(&walk, sg_next(walk.sg));
+			n = scatterwalk_clamp(&walk, len);
+		}
+		p = scatterwalk_map(&walk);
+		ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc,
+				     num_rounds(ctx));
+		len -= n;
+
+		scatterwalk_unmap(p);
+		scatterwalk_advance(&walk, n);
+		scatterwalk_done(&walk, 0, len);
+	} while (len);
+}
+
+static int ccm_encrypt(struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
+	struct blkcipher_desc desc = { .info = req->iv };
+	struct blkcipher_walk walk;
+	u8 __aligned(8) mac[AES_BLOCK_SIZE];
+	u8 buf[AES_BLOCK_SIZE];
+	u32 len = req->cryptlen;
+	int err;
+
+	err = ccm_init_mac(req, mac, len);
+	if (err)
+		return err;
+
+	kernel_neon_begin_partial(6);
+
+	if (req->assoclen)
+		ccm_calculate_auth_mac(req, mac);
+
+	/* preserve the original iv for the final round */
+	memcpy(buf, req->iv, AES_BLOCK_SIZE);
+
+	blkcipher_walk_init(&walk, req->dst, req->src, len);
+	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
+					     AES_BLOCK_SIZE);
+
+	while (walk.nbytes) {
+		u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+
+		if (walk.nbytes == len)
+			tail = 0;
+
+		ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+				   walk.nbytes - tail, ctx->key_enc,
+				   num_rounds(ctx), mac, walk.iv);
+
+		len -= walk.nbytes - tail;
+		err = blkcipher_walk_done(&desc, &walk, tail);
+	}
+	if (!err)
+		ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+
+	kernel_neon_end();
+
+	if (err)
+		return err;
+
+	/* copy authtag to end of dst */
+	scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
+				 crypto_aead_authsize(aead), 1);
+
+	return 0;
+}
+
+static int ccm_decrypt(struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
+	unsigned int authsize = crypto_aead_authsize(aead);
+	struct blkcipher_desc desc = { .info = req->iv };
+	struct blkcipher_walk walk;
+	u8 __aligned(8) mac[AES_BLOCK_SIZE];
+	u8 buf[AES_BLOCK_SIZE];
+	u32 len = req->cryptlen - authsize;
+	int err;
+
+	err = ccm_init_mac(req, mac, len);
+	if (err)
+		return err;
+
+	kernel_neon_begin_partial(6);
+
+	if (req->assoclen)
+		ccm_calculate_auth_mac(req, mac);
+
+	/* preserve the original iv for the final round */
+	memcpy(buf, req->iv, AES_BLOCK_SIZE);
+
+	blkcipher_walk_init(&walk, req->dst, req->src, len);
+	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
+					     AES_BLOCK_SIZE);
+
+	while (walk.nbytes) {
+		u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+
+		if (walk.nbytes == len)
+			tail = 0;
+
+		ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+				   walk.nbytes - tail, ctx->key_enc,
+				   num_rounds(ctx), mac, walk.iv);
+
+		len -= walk.nbytes - tail;
+		err = blkcipher_walk_done(&desc, &walk, tail);
+	}
+	if (!err)
+		ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+
+	kernel_neon_end();
+
+	if (err)
+		return err;
+
+	/* compare calculated auth tag with the stored one */
+	scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
+				 authsize, 0);
+
+	if (memcmp(mac, buf, authsize))
+		return -EBADMSG;
+	return 0;
+}
+
+static struct crypto_alg ccm_aes_alg = {
+	.cra_name		= "ccm(aes)",
+	.cra_driver_name	= "ccm-aes-ce",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_aead_type,
+	.cra_module		= THIS_MODULE,
+	.cra_aead = {
+		.ivsize		= AES_BLOCK_SIZE,
+		.maxauthsize	= AES_BLOCK_SIZE,
+		.setkey		= ccm_setkey,
+		.setauthsize	= ccm_setauthsize,
+		.encrypt	= ccm_encrypt,
+		.decrypt	= ccm_decrypt,
+	}
+};
+
+static int __init aes_mod_init(void)
+{
+	if (!(elf_hwcap & HWCAP_AES))
+		return -ENODEV;
+	return crypto_register_alg(&ccm_aes_alg);
+}
+
+static void __exit aes_mod_exit(void)
+{
+	crypto_unregister_alg(&ccm_aes_alg);
+}
+
+module_init(aes_mod_init);
+module_exit(aes_mod_exit);
+
+MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ccm(aes)");
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
new file mode 100644
index 0000000..2075e1a
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -0,0 +1,155 @@
+/*
+ * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+struct aes_block {
+	u8 b[AES_BLOCK_SIZE];
+};
+
+static int num_rounds(struct crypto_aes_ctx *ctx)
+{
+	/*
+	 * # of rounds specified by AES:
+	 * 128 bit key		10 rounds
+	 * 192 bit key		12 rounds
+	 * 256 bit key		14 rounds
+	 * => n byte key	=> 6 + (n/4) rounds
+	 */
+	return 6 + ctx->key_length / 4;
+}
+
+static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
+{
+	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aes_block *out = (struct aes_block *)dst;
+	struct aes_block const *in = (struct aes_block *)src;
+	void *dummy0;
+	int dummy1;
+
+	kernel_neon_begin_partial(4);
+
+	__asm__("	ld1	{v0.16b}, %[in]			;"
+		"	ld1	{v1.2d}, [%[key]], #16		;"
+		"	cmp	%w[rounds], #10			;"
+		"	bmi	0f				;"
+		"	bne	3f				;"
+		"	mov	v3.16b, v1.16b			;"
+		"	b	2f				;"
+		"0:	mov	v2.16b, v1.16b			;"
+		"	ld1	{v3.2d}, [%[key]], #16		;"
+		"1:	aese	v0.16b, v2.16b			;"
+		"	aesmc	v0.16b, v0.16b			;"
+		"2:	ld1	{v1.2d}, [%[key]], #16		;"
+		"	aese	v0.16b, v3.16b			;"
+		"	aesmc	v0.16b, v0.16b			;"
+		"3:	ld1	{v2.2d}, [%[key]], #16		;"
+		"	subs	%w[rounds], %w[rounds], #3	;"
+		"	aese	v0.16b, v1.16b			;"
+		"	aesmc	v0.16b, v0.16b			;"
+		"	ld1	{v3.2d}, [%[key]], #16		;"
+		"	bpl	1b				;"
+		"	aese	v0.16b, v2.16b			;"
+		"	eor	v0.16b, v0.16b, v3.16b		;"
+		"	st1	{v0.16b}, %[out]		;"
+
+	:	[out]		"=Q"(*out),
+		[key]		"=r"(dummy0),
+		[rounds]	"=r"(dummy1)
+	:	[in]		"Q"(*in),
+				"1"(ctx->key_enc),
+				"2"(num_rounds(ctx) - 2)
+	:	"cc");
+
+	kernel_neon_end();
+}
+
+static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
+{
+	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aes_block *out = (struct aes_block *)dst;
+	struct aes_block const *in = (struct aes_block *)src;
+	void *dummy0;
+	int dummy1;
+
+	kernel_neon_begin_partial(4);
+
+	__asm__("	ld1	{v0.16b}, %[in]			;"
+		"	ld1	{v1.2d}, [%[key]], #16		;"
+		"	cmp	%w[rounds], #10			;"
+		"	bmi	0f				;"
+		"	bne	3f				;"
+		"	mov	v3.16b, v1.16b			;"
+		"	b	2f				;"
+		"0:	mov	v2.16b, v1.16b			;"
+		"	ld1	{v3.2d}, [%[key]], #16		;"
+		"1:	aesd	v0.16b, v2.16b			;"
+		"	aesimc	v0.16b, v0.16b			;"
+		"2:	ld1	{v1.2d}, [%[key]], #16		;"
+		"	aesd	v0.16b, v3.16b			;"
+		"	aesimc	v0.16b, v0.16b			;"
+		"3:	ld1	{v2.2d}, [%[key]], #16		;"
+		"	subs	%w[rounds], %w[rounds], #3	;"
+		"	aesd	v0.16b, v1.16b			;"
+		"	aesimc	v0.16b, v0.16b			;"
+		"	ld1	{v3.2d}, [%[key]], #16		;"
+		"	bpl	1b				;"
+		"	aesd	v0.16b, v2.16b			;"
+		"	eor	v0.16b, v0.16b, v3.16b		;"
+		"	st1	{v0.16b}, %[out]		;"
+
+	:	[out]		"=Q"(*out),
+		[key]		"=r"(dummy0),
+		[rounds]	"=r"(dummy1)
+	:	[in]		"Q"(*in),
+				"1"(ctx->key_dec),
+				"2"(num_rounds(ctx) - 2)
+	:	"cc");
+
+	kernel_neon_end();
+}
+
+static struct crypto_alg aes_alg = {
+	.cra_name		= "aes",
+	.cra_driver_name	= "aes-ce",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+	.cra_module		= THIS_MODULE,
+	.cra_cipher = {
+		.cia_min_keysize	= AES_MIN_KEY_SIZE,
+		.cia_max_keysize	= AES_MAX_KEY_SIZE,
+		.cia_setkey		= crypto_aes_set_key,
+		.cia_encrypt		= aes_cipher_encrypt,
+		.cia_decrypt		= aes_cipher_decrypt
+	}
+};
+
+static int __init aes_mod_init(void)
+{
+	return crypto_register_alg(&aes_alg);
+}
+
+static void __exit aes_mod_exit(void)
+{
+	crypto_unregister_alg(&aes_alg);
+}
+
+module_cpu_feature_match(AES, aes_mod_init);
+module_exit(aes_mod_exit);
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
new file mode 100644
index 0000000..685a18f
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce.S
@@ -0,0 +1,133 @@
+/*
+ * linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with
+ *                                    Crypto Extensions
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#define AES_ENTRY(func)		ENTRY(ce_ ## func)
+#define AES_ENDPROC(func)	ENDPROC(ce_ ## func)
+
+	.arch		armv8-a+crypto
+
+	/* preload all round keys */
+	.macro		load_round_keys, rounds, rk
+	cmp		\rounds, #12
+	blo		2222f		/* 128 bits */
+	beq		1111f		/* 192 bits */
+	ld1		{v17.16b-v18.16b}, [\rk], #32
+1111:	ld1		{v19.16b-v20.16b}, [\rk], #32
+2222:	ld1		{v21.16b-v24.16b}, [\rk], #64
+	ld1		{v25.16b-v28.16b}, [\rk], #64
+	ld1		{v29.16b-v31.16b}, [\rk]
+	.endm
+
+	/* prepare for encryption with key in rk[] */
+	.macro		enc_prepare, rounds, rk, ignore
+	load_round_keys	\rounds, \rk
+	.endm
+
+	/* prepare for encryption (again) but with new key in rk[] */
+	.macro		enc_switch_key, rounds, rk, ignore
+	load_round_keys	\rounds, \rk
+	.endm
+
+	/* prepare for decryption with key in rk[] */
+	.macro		dec_prepare, rounds, rk, ignore
+	load_round_keys	\rounds, \rk
+	.endm
+
+	.macro		do_enc_Nx, de, mc, k, i0, i1, i2, i3
+	aes\de		\i0\().16b, \k\().16b
+	.ifnb		\i1
+	aes\de		\i1\().16b, \k\().16b
+	.ifnb		\i3
+	aes\de		\i2\().16b, \k\().16b
+	aes\de		\i3\().16b, \k\().16b
+	.endif
+	.endif
+	aes\mc		\i0\().16b, \i0\().16b
+	.ifnb		\i1
+	aes\mc		\i1\().16b, \i1\().16b
+	.ifnb		\i3
+	aes\mc		\i2\().16b, \i2\().16b
+	aes\mc		\i3\().16b, \i3\().16b
+	.endif
+	.endif
+	.endm
+
+	/* up to 4 interleaved encryption rounds with the same round key */
+	.macro		round_Nx, enc, k, i0, i1, i2, i3
+	.ifc		\enc, e
+	do_enc_Nx	e, mc, \k, \i0, \i1, \i2, \i3
+	.else
+	do_enc_Nx	d, imc, \k, \i0, \i1, \i2, \i3
+	.endif
+	.endm
+
+	/* up to 4 interleaved final rounds */
+	.macro		fin_round_Nx, de, k, k2, i0, i1, i2, i3
+	aes\de		\i0\().16b, \k\().16b
+	.ifnb		\i1
+	aes\de		\i1\().16b, \k\().16b
+	.ifnb		\i3
+	aes\de		\i2\().16b, \k\().16b
+	aes\de		\i3\().16b, \k\().16b
+	.endif
+	.endif
+	eor		\i0\().16b, \i0\().16b, \k2\().16b
+	.ifnb		\i1
+	eor		\i1\().16b, \i1\().16b, \k2\().16b
+	.ifnb		\i3
+	eor		\i2\().16b, \i2\().16b, \k2\().16b
+	eor		\i3\().16b, \i3\().16b, \k2\().16b
+	.endif
+	.endif
+	.endm
+
+	/* up to 4 interleaved blocks */
+	.macro		do_block_Nx, enc, rounds, i0, i1, i2, i3
+	cmp		\rounds, #12
+	blo		2222f		/* 128 bits */
+	beq		1111f		/* 192 bits */
+	round_Nx	\enc, v17, \i0, \i1, \i2, \i3
+	round_Nx	\enc, v18, \i0, \i1, \i2, \i3
+1111:	round_Nx	\enc, v19, \i0, \i1, \i2, \i3
+	round_Nx	\enc, v20, \i0, \i1, \i2, \i3
+2222:	.irp		key, v21, v22, v23, v24, v25, v26, v27, v28, v29
+	round_Nx	\enc, \key, \i0, \i1, \i2, \i3
+	.endr
+	fin_round_Nx	\enc, v30, v31, \i0, \i1, \i2, \i3
+	.endm
+
+	.macro		encrypt_block, in, rounds, t0, t1, t2
+	do_block_Nx	e, \rounds, \in
+	.endm
+
+	.macro		encrypt_block2x, i0, i1, rounds, t0, t1, t2
+	do_block_Nx	e, \rounds, \i0, \i1
+	.endm
+
+	.macro		encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
+	do_block_Nx	e, \rounds, \i0, \i1, \i2, \i3
+	.endm
+
+	.macro		decrypt_block, in, rounds, t0, t1, t2
+	do_block_Nx	d, \rounds, \in
+	.endm
+
+	.macro		decrypt_block2x, i0, i1, rounds, t0, t1, t2
+	do_block_Nx	d, \rounds, \i0, \i1
+	.endm
+
+	.macro		decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
+	do_block_Nx	d, \rounds, \i0, \i1, \i2, \i3
+	.endm
+
+#include "aes-modes.S"
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
new file mode 100644
index 0000000..79cd911
--- /dev/null
+++ b/arch/arm64/crypto/aes-glue.c
@@ -0,0 +1,446 @@
+/*
+ * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/hwcap.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+
+#ifdef USE_V8_CRYPTO_EXTENSIONS
+#define MODE			"ce"
+#define PRIO			300
+#define aes_ecb_encrypt		ce_aes_ecb_encrypt
+#define aes_ecb_decrypt		ce_aes_ecb_decrypt
+#define aes_cbc_encrypt		ce_aes_cbc_encrypt
+#define aes_cbc_decrypt		ce_aes_cbc_decrypt
+#define aes_ctr_encrypt		ce_aes_ctr_encrypt
+#define aes_xts_encrypt		ce_aes_xts_encrypt
+#define aes_xts_decrypt		ce_aes_xts_decrypt
+MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
+#else
+#define MODE			"neon"
+#define PRIO			200
+#define aes_ecb_encrypt		neon_aes_ecb_encrypt
+#define aes_ecb_decrypt		neon_aes_ecb_decrypt
+#define aes_cbc_encrypt		neon_aes_cbc_encrypt
+#define aes_cbc_decrypt		neon_aes_cbc_decrypt
+#define aes_ctr_encrypt		neon_aes_ctr_encrypt
+#define aes_xts_encrypt		neon_aes_xts_encrypt
+#define aes_xts_decrypt		neon_aes_xts_decrypt
+MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
+MODULE_ALIAS("ecb(aes)");
+MODULE_ALIAS("cbc(aes)");
+MODULE_ALIAS("ctr(aes)");
+MODULE_ALIAS("xts(aes)");
+#endif
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+/* defined in aes-modes.S */
+asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
+				int rounds, int blocks, int first);
+asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
+				int rounds, int blocks, int first);
+
+asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
+				int rounds, int blocks, u8 iv[], int first);
+asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
+				int rounds, int blocks, u8 iv[], int first);
+
+asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
+				int rounds, int blocks, u8 ctr[], int first);
+
+asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
+				int rounds, int blocks, u8 const rk2[], u8 iv[],
+				int first);
+asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
+				int rounds, int blocks, u8 const rk2[], u8 iv[],
+				int first);
+
+struct crypto_aes_xts_ctx {
+	struct crypto_aes_ctx key1;
+	struct crypto_aes_ctx __aligned(8) key2;
+};
+
+static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+		       unsigned int key_len)
+{
+	struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	ret = crypto_aes_expand_key(&ctx->key1, in_key, key_len / 2);
+	if (!ret)
+		ret = crypto_aes_expand_key(&ctx->key2, &in_key[key_len / 2],
+					    key_len / 2);
+	if (!ret)
+		return 0;
+
+	tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+	return -EINVAL;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	int err, first, rounds = 6 + ctx->key_length / 4;
+	struct blkcipher_walk walk;
+	unsigned int blocks;
+
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	kernel_neon_begin();
+	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+		aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+				(u8 *)ctx->key_enc, rounds, blocks, first);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	kernel_neon_end();
+	return err;
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	int err, first, rounds = 6 + ctx->key_length / 4;
+	struct blkcipher_walk walk;
+	unsigned int blocks;
+
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	kernel_neon_begin();
+	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+		aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+				(u8 *)ctx->key_dec, rounds, blocks, first);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	kernel_neon_end();
+	return err;
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	int err, first, rounds = 6 + ctx->key_length / 4;
+	struct blkcipher_walk walk;
+	unsigned int blocks;
+
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	kernel_neon_begin();
+	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+		aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+				(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
+				first);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	kernel_neon_end();
+	return err;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	int err, first, rounds = 6 + ctx->key_length / 4;
+	struct blkcipher_walk walk;
+	unsigned int blocks;
+
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	kernel_neon_begin();
+	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+		aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+				(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
+				first);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	kernel_neon_end();
+	return err;
+}
+
+static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	int err, first, rounds = 6 + ctx->key_length / 4;
+	struct blkcipher_walk walk;
+	int blocks;
+
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+
+	first = 1;
+	kernel_neon_begin();
+	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+		aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+				(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
+				first);
+		first = 0;
+		nbytes -= blocks * AES_BLOCK_SIZE;
+		if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
+			break;
+		err = blkcipher_walk_done(desc, &walk,
+					  walk.nbytes % AES_BLOCK_SIZE);
+	}
+	if (nbytes) {
+		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+		u8 __aligned(8) tail[AES_BLOCK_SIZE];
+
+		/*
+		 * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
+		 * to tell aes_ctr_encrypt() to only read half a block.
+		 */
+		blocks = (nbytes <= 8) ? -1 : 1;
+
+		aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
+				blocks, walk.iv, first);
+		memcpy(tdst, tail, nbytes);
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+	kernel_neon_end();
+
+	return err;
+}
+
+static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	int err, first, rounds = 6 + ctx->key1.key_length / 4;
+	struct blkcipher_walk walk;
+	unsigned int blocks;
+
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	kernel_neon_begin();
+	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+		aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+				(u8 *)ctx->key1.key_enc, rounds, blocks,
+				(u8 *)ctx->key2.key_enc, walk.iv, first);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	kernel_neon_end();
+
+	return err;
+}
+
+static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	int err, first, rounds = 6 + ctx->key1.key_length / 4;
+	struct blkcipher_walk walk;
+	unsigned int blocks;
+
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	kernel_neon_begin();
+	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
+		aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+				(u8 *)ctx->key1.key_dec, rounds, blocks,
+				(u8 *)ctx->key2.key_enc, walk.iv, first);
+		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+	}
+	kernel_neon_end();
+
+	return err;
+}
+
+static struct crypto_alg aes_algs[] = { {
+	.cra_name		= "__ecb-aes-" MODE,
+	.cra_driver_name	= "__driver-ecb-aes-" MODE,
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= crypto_aes_set_key,
+		.encrypt	= ecb_encrypt,
+		.decrypt	= ecb_decrypt,
+	},
+}, {
+	.cra_name		= "__cbc-aes-" MODE,
+	.cra_driver_name	= "__driver-cbc-aes-" MODE,
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= crypto_aes_set_key,
+		.encrypt	= cbc_encrypt,
+		.decrypt	= cbc_decrypt,
+	},
+}, {
+	.cra_name		= "__ctr-aes-" MODE,
+	.cra_driver_name	= "__driver-ctr-aes-" MODE,
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= crypto_aes_set_key,
+		.encrypt	= ctr_encrypt,
+		.decrypt	= ctr_encrypt,
+	},
+}, {
+	.cra_name		= "__xts-aes-" MODE,
+	.cra_driver_name	= "__driver-xts-aes-" MODE,
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct crypto_aes_xts_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_blkcipher = {
+		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= xts_set_key,
+		.encrypt	= xts_encrypt,
+		.decrypt	= xts_decrypt,
+	},
+}, {
+	.cra_name		= "ecb(aes)",
+	.cra_driver_name	= "ecb-aes-" MODE,
+	.cra_priority		= PRIO,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+}, {
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "cbc-aes-" MODE,
+	.cra_priority		= PRIO,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+}, {
+	.cra_name		= "ctr(aes)",
+	.cra_driver_name	= "ctr-aes-" MODE,
+	.cra_priority		= PRIO,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+}, {
+	.cra_name		= "xts(aes)",
+	.cra_driver_name	= "xts-aes-" MODE,
+	.cra_priority		= PRIO,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_helper_ctx),
+	.cra_alignmask		= 7,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= ablk_init,
+	.cra_exit		= ablk_exit,
+	.cra_ablkcipher = {
+		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= ablk_set_key,
+		.encrypt	= ablk_encrypt,
+		.decrypt	= ablk_decrypt,
+	}
+} };
+
+static int __init aes_init(void)
+{
+	return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+static void __exit aes_exit(void)
+{
+	crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
+#ifdef USE_V8_CRYPTO_EXTENSIONS
+module_cpu_feature_match(AES, aes_init);
+#else
+module_init(aes_init);
+#endif
+module_exit(aes_exit);
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
new file mode 100644
index 0000000..f6e372c
--- /dev/null
+++ b/arch/arm64/crypto/aes-modes.S
@@ -0,0 +1,532 @@
+/*
+ * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* included by aes-ce.S and aes-neon.S */
+
+	.text
+	.align		4
+
+/*
+ * There are several ways to instantiate this code:
+ * - no interleave, all inline
+ * - 2-way interleave, 2x calls out of line (-DINTERLEAVE=2)
+ * - 2-way interleave, all inline (-DINTERLEAVE=2 -DINTERLEAVE_INLINE)
+ * - 4-way interleave, 4x calls out of line (-DINTERLEAVE=4)
+ * - 4-way interleave, all inline (-DINTERLEAVE=4 -DINTERLEAVE_INLINE)
+ *
+ * Macros imported by this code:
+ * - enc_prepare	- setup NEON registers for encryption
+ * - dec_prepare	- setup NEON registers for decryption
+ * - enc_switch_key	- change to new key after having prepared for encryption
+ * - encrypt_block	- encrypt a single block
+ * - decrypt block	- decrypt a single block
+ * - encrypt_block2x	- encrypt 2 blocks in parallel (if INTERLEAVE == 2)
+ * - decrypt_block2x	- decrypt 2 blocks in parallel (if INTERLEAVE == 2)
+ * - encrypt_block4x	- encrypt 4 blocks in parallel (if INTERLEAVE == 4)
+ * - decrypt_block4x	- decrypt 4 blocks in parallel (if INTERLEAVE == 4)
+ */
+
+#if defined(INTERLEAVE) && !defined(INTERLEAVE_INLINE)
+#define FRAME_PUSH	stp x29, x30, [sp,#-16]! ; mov x29, sp
+#define FRAME_POP	ldp x29, x30, [sp],#16
+
+#if INTERLEAVE == 2
+
+aes_encrypt_block2x:
+	encrypt_block2x	v0, v1, w3, x2, x6, w7
+	ret
+ENDPROC(aes_encrypt_block2x)
+
+aes_decrypt_block2x:
+	decrypt_block2x	v0, v1, w3, x2, x6, w7
+	ret
+ENDPROC(aes_decrypt_block2x)
+
+#elif INTERLEAVE == 4
+
+aes_encrypt_block4x:
+	encrypt_block4x	v0, v1, v2, v3, w3, x2, x6, w7
+	ret
+ENDPROC(aes_encrypt_block4x)
+
+aes_decrypt_block4x:
+	decrypt_block4x	v0, v1, v2, v3, w3, x2, x6, w7
+	ret
+ENDPROC(aes_decrypt_block4x)
+
+#else
+#error INTERLEAVE should equal 2 or 4
+#endif
+
+	.macro		do_encrypt_block2x
+	bl		aes_encrypt_block2x
+	.endm
+
+	.macro		do_decrypt_block2x
+	bl		aes_decrypt_block2x
+	.endm
+
+	.macro		do_encrypt_block4x
+	bl		aes_encrypt_block4x
+	.endm
+
+	.macro		do_decrypt_block4x
+	bl		aes_decrypt_block4x
+	.endm
+
+#else
+#define FRAME_PUSH
+#define FRAME_POP
+
+	.macro		do_encrypt_block2x
+	encrypt_block2x	v0, v1, w3, x2, x6, w7
+	.endm
+
+	.macro		do_decrypt_block2x
+	decrypt_block2x	v0, v1, w3, x2, x6, w7
+	.endm
+
+	.macro		do_encrypt_block4x
+	encrypt_block4x	v0, v1, v2, v3, w3, x2, x6, w7
+	.endm
+
+	.macro		do_decrypt_block4x
+	decrypt_block4x	v0, v1, v2, v3, w3, x2, x6, w7
+	.endm
+
+#endif
+
+	/*
+	 * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+	 *		   int blocks, int first)
+	 * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+	 *		   int blocks, int first)
+	 */
+
+AES_ENTRY(aes_ecb_encrypt)
+	FRAME_PUSH
+	cbz		w5, .LecbencloopNx
+
+	enc_prepare	w3, x2, x5
+
+.LecbencloopNx:
+#if INTERLEAVE >= 2
+	subs		w4, w4, #INTERLEAVE
+	bmi		.Lecbenc1x
+#if INTERLEAVE == 2
+	ld1		{v0.16b-v1.16b}, [x1], #32	/* get 2 pt blocks */
+	do_encrypt_block2x
+	st1		{v0.16b-v1.16b}, [x0], #32
+#else
+	ld1		{v0.16b-v3.16b}, [x1], #64	/* get 4 pt blocks */
+	do_encrypt_block4x
+	st1		{v0.16b-v3.16b}, [x0], #64
+#endif
+	b		.LecbencloopNx
+.Lecbenc1x:
+	adds		w4, w4, #INTERLEAVE
+	beq		.Lecbencout
+#endif
+.Lecbencloop:
+	ld1		{v0.16b}, [x1], #16		/* get next pt block */
+	encrypt_block	v0, w3, x2, x5, w6
+	st1		{v0.16b}, [x0], #16
+	subs		w4, w4, #1
+	bne		.Lecbencloop
+.Lecbencout:
+	FRAME_POP
+	ret
+AES_ENDPROC(aes_ecb_encrypt)
+
+
+AES_ENTRY(aes_ecb_decrypt)
+	FRAME_PUSH
+	cbz		w5, .LecbdecloopNx
+
+	dec_prepare	w3, x2, x5
+
+.LecbdecloopNx:
+#if INTERLEAVE >= 2
+	subs		w4, w4, #INTERLEAVE
+	bmi		.Lecbdec1x
+#if INTERLEAVE == 2
+	ld1		{v0.16b-v1.16b}, [x1], #32	/* get 2 ct blocks */
+	do_decrypt_block2x
+	st1		{v0.16b-v1.16b}, [x0], #32
+#else
+	ld1		{v0.16b-v3.16b}, [x1], #64	/* get 4 ct blocks */
+	do_decrypt_block4x
+	st1		{v0.16b-v3.16b}, [x0], #64
+#endif
+	b		.LecbdecloopNx
+.Lecbdec1x:
+	adds		w4, w4, #INTERLEAVE
+	beq		.Lecbdecout
+#endif
+.Lecbdecloop:
+	ld1		{v0.16b}, [x1], #16		/* get next ct block */
+	decrypt_block	v0, w3, x2, x5, w6
+	st1		{v0.16b}, [x0], #16
+	subs		w4, w4, #1
+	bne		.Lecbdecloop
+.Lecbdecout:
+	FRAME_POP
+	ret
+AES_ENDPROC(aes_ecb_decrypt)
+
+
+	/*
+	 * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+	 *		   int blocks, u8 iv[], int first)
+	 * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+	 *		   int blocks, u8 iv[], int first)
+	 */
+
+AES_ENTRY(aes_cbc_encrypt)
+	cbz		w6, .Lcbcencloop
+
+	ld1		{v0.16b}, [x5]			/* get iv */
+	enc_prepare	w3, x2, x5
+
+.Lcbcencloop:
+	ld1		{v1.16b}, [x1], #16		/* get next pt block */
+	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
+	encrypt_block	v0, w3, x2, x5, w6
+	st1		{v0.16b}, [x0], #16
+	subs		w4, w4, #1
+	bne		.Lcbcencloop
+	ret
+AES_ENDPROC(aes_cbc_encrypt)
+
+
+AES_ENTRY(aes_cbc_decrypt)
+	FRAME_PUSH
+	cbz		w6, .LcbcdecloopNx
+
+	ld1		{v7.16b}, [x5]			/* get iv */
+	dec_prepare	w3, x2, x5
+
+.LcbcdecloopNx:
+#if INTERLEAVE >= 2
+	subs		w4, w4, #INTERLEAVE
+	bmi		.Lcbcdec1x
+#if INTERLEAVE == 2
+	ld1		{v0.16b-v1.16b}, [x1], #32	/* get 2 ct blocks */
+	mov		v2.16b, v0.16b
+	mov		v3.16b, v1.16b
+	do_decrypt_block2x
+	eor		v0.16b, v0.16b, v7.16b
+	eor		v1.16b, v1.16b, v2.16b
+	mov		v7.16b, v3.16b
+	st1		{v0.16b-v1.16b}, [x0], #32
+#else
+	ld1		{v0.16b-v3.16b}, [x1], #64	/* get 4 ct blocks */
+	mov		v4.16b, v0.16b
+	mov		v5.16b, v1.16b
+	mov		v6.16b, v2.16b
+	do_decrypt_block4x
+	sub		x1, x1, #16
+	eor		v0.16b, v0.16b, v7.16b
+	eor		v1.16b, v1.16b, v4.16b
+	ld1		{v7.16b}, [x1], #16		/* reload 1 ct block */
+	eor		v2.16b, v2.16b, v5.16b
+	eor		v3.16b, v3.16b, v6.16b
+	st1		{v0.16b-v3.16b}, [x0], #64
+#endif
+	b		.LcbcdecloopNx
+.Lcbcdec1x:
+	adds		w4, w4, #INTERLEAVE
+	beq		.Lcbcdecout
+#endif
+.Lcbcdecloop:
+	ld1		{v1.16b}, [x1], #16		/* get next ct block */
+	mov		v0.16b, v1.16b			/* ...and copy to v0 */
+	decrypt_block	v0, w3, x2, x5, w6
+	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
+	mov		v7.16b, v1.16b			/* ct is next iv */
+	st1		{v0.16b}, [x0], #16
+	subs		w4, w4, #1
+	bne		.Lcbcdecloop
+.Lcbcdecout:
+	FRAME_POP
+	ret
+AES_ENDPROC(aes_cbc_decrypt)
+
+
+	/*
+	 * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
+	 *		   int blocks, u8 ctr[], int first)
+	 */
+
+AES_ENTRY(aes_ctr_encrypt)
+	FRAME_PUSH
+	cbnz		w6, .Lctrfirst		/* 1st time around? */
+	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
+	rev		x5, x5
+#if INTERLEAVE >= 2
+	cmn		w5, w4			/* 32 bit overflow? */
+	bcs		.Lctrinc
+	add		x5, x5, #1		/* increment BE ctr */
+	b		.LctrincNx
+#else
+	b		.Lctrinc
+#endif
+.Lctrfirst:
+	enc_prepare	w3, x2, x6
+	ld1		{v4.16b}, [x5]
+	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
+	rev		x5, x5
+#if INTERLEAVE >= 2
+	cmn		w5, w4			/* 32 bit overflow? */
+	bcs		.Lctrloop
+.LctrloopNx:
+	subs		w4, w4, #INTERLEAVE
+	bmi		.Lctr1x
+#if INTERLEAVE == 2
+	mov		v0.8b, v4.8b
+	mov		v1.8b, v4.8b
+	rev		x7, x5
+	add		x5, x5, #1
+	ins		v0.d[1], x7
+	rev		x7, x5
+	add		x5, x5, #1
+	ins		v1.d[1], x7
+	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
+	do_encrypt_block2x
+	eor		v0.16b, v0.16b, v2.16b
+	eor		v1.16b, v1.16b, v3.16b
+	st1		{v0.16b-v1.16b}, [x0], #32
+#else
+	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
+	dup		v7.4s, w5
+	mov		v0.16b, v4.16b
+	add		v7.4s, v7.4s, v8.4s
+	mov		v1.16b, v4.16b
+	rev32		v8.16b, v7.16b
+	mov		v2.16b, v4.16b
+	mov		v3.16b, v4.16b
+	mov		v1.s[3], v8.s[0]
+	mov		v2.s[3], v8.s[1]
+	mov		v3.s[3], v8.s[2]
+	ld1		{v5.16b-v7.16b}, [x1], #48	/* get 3 input blocks */
+	do_encrypt_block4x
+	eor		v0.16b, v5.16b, v0.16b
+	ld1		{v5.16b}, [x1], #16		/* get 1 input block  */
+	eor		v1.16b, v6.16b, v1.16b
+	eor		v2.16b, v7.16b, v2.16b
+	eor		v3.16b, v5.16b, v3.16b
+	st1		{v0.16b-v3.16b}, [x0], #64
+	add		x5, x5, #INTERLEAVE
+#endif
+	cbz		w4, .LctroutNx
+.LctrincNx:
+	rev		x7, x5
+	ins		v4.d[1], x7
+	b		.LctrloopNx
+.LctroutNx:
+	sub		x5, x5, #1
+	rev		x7, x5
+	ins		v4.d[1], x7
+	b		.Lctrout
+.Lctr1x:
+	adds		w4, w4, #INTERLEAVE
+	beq		.Lctrout
+#endif
+.Lctrloop:
+	mov		v0.16b, v4.16b
+	encrypt_block	v0, w3, x2, x6, w7
+	subs		w4, w4, #1
+	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
+	ld1		{v3.16b}, [x1], #16
+	eor		v3.16b, v0.16b, v3.16b
+	st1		{v3.16b}, [x0], #16
+	beq		.Lctrout
+.Lctrinc:
+	adds		x5, x5, #1		/* increment BE ctr */
+	rev		x7, x5
+	ins		v4.d[1], x7
+	bcc		.Lctrloop		/* no overflow? */
+	umov		x7, v4.d[0]		/* load upper word of ctr  */
+	rev		x7, x7			/* ... to handle the carry */
+	add		x7, x7, #1
+	rev		x7, x7
+	ins		v4.d[0], x7
+	b		.Lctrloop
+.Lctrhalfblock:
+	ld1		{v3.8b}, [x1]
+	eor		v3.8b, v0.8b, v3.8b
+	st1		{v3.8b}, [x0]
+.Lctrout:
+	FRAME_POP
+	ret
+AES_ENDPROC(aes_ctr_encrypt)
+	.ltorg
+
+
+	/*
+	 * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
+	 *		   int blocks, u8 const rk2[], u8 iv[], int first)
+	 * aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
+	 *		   int blocks, u8 const rk2[], u8 iv[], int first)
+	 */
+
+	.macro		next_tweak, out, in, const, tmp
+	sshr		\tmp\().2d,  \in\().2d,   #63
+	and		\tmp\().16b, \tmp\().16b, \const\().16b
+	add		\out\().2d,  \in\().2d,   \in\().2d
+	ext		\tmp\().16b, \tmp\().16b, \tmp\().16b, #8
+	eor		\out\().16b, \out\().16b, \tmp\().16b
+	.endm
+
+.Lxts_mul_x:
+	.word		1, 0, 0x87, 0
+
+AES_ENTRY(aes_xts_encrypt)
+	FRAME_PUSH
+	cbz		w7, .LxtsencloopNx
+
+	ld1		{v4.16b}, [x6]
+	enc_prepare	w3, x5, x6
+	encrypt_block	v4, w3, x5, x6, w7		/* first tweak */
+	enc_switch_key	w3, x2, x6
+	ldr		q7, .Lxts_mul_x
+	b		.LxtsencNx
+
+.LxtsencloopNx:
+	ldr		q7, .Lxts_mul_x
+	next_tweak	v4, v4, v7, v8
+.LxtsencNx:
+#if INTERLEAVE >= 2
+	subs		w4, w4, #INTERLEAVE
+	bmi		.Lxtsenc1x
+#if INTERLEAVE == 2
+	ld1		{v0.16b-v1.16b}, [x1], #32	/* get 2 pt blocks */
+	next_tweak	v5, v4, v7, v8
+	eor		v0.16b, v0.16b, v4.16b
+	eor		v1.16b, v1.16b, v5.16b
+	do_encrypt_block2x
+	eor		v0.16b, v0.16b, v4.16b
+	eor		v1.16b, v1.16b, v5.16b
+	st1		{v0.16b-v1.16b}, [x0], #32
+	cbz		w4, .LxtsencoutNx
+	next_tweak	v4, v5, v7, v8
+	b		.LxtsencNx
+.LxtsencoutNx:
+	mov		v4.16b, v5.16b
+	b		.Lxtsencout
+#else
+	ld1		{v0.16b-v3.16b}, [x1], #64	/* get 4 pt blocks */
+	next_tweak	v5, v4, v7, v8
+	eor		v0.16b, v0.16b, v4.16b
+	next_tweak	v6, v5, v7, v8
+	eor		v1.16b, v1.16b, v5.16b
+	eor		v2.16b, v2.16b, v6.16b
+	next_tweak	v7, v6, v7, v8
+	eor		v3.16b, v3.16b, v7.16b
+	do_encrypt_block4x
+	eor		v3.16b, v3.16b, v7.16b
+	eor		v0.16b, v0.16b, v4.16b
+	eor		v1.16b, v1.16b, v5.16b
+	eor		v2.16b, v2.16b, v6.16b
+	st1		{v0.16b-v3.16b}, [x0], #64
+	mov		v4.16b, v7.16b
+	cbz		w4, .Lxtsencout
+	b		.LxtsencloopNx
+#endif
+.Lxtsenc1x:
+	adds		w4, w4, #INTERLEAVE
+	beq		.Lxtsencout
+#endif
+.Lxtsencloop:
+	ld1		{v1.16b}, [x1], #16
+	eor		v0.16b, v1.16b, v4.16b
+	encrypt_block	v0, w3, x2, x6, w7
+	eor		v0.16b, v0.16b, v4.16b
+	st1		{v0.16b}, [x0], #16
+	subs		w4, w4, #1
+	beq		.Lxtsencout
+	next_tweak	v4, v4, v7, v8
+	b		.Lxtsencloop
+.Lxtsencout:
+	FRAME_POP
+	ret
+AES_ENDPROC(aes_xts_encrypt)
+
+
+AES_ENTRY(aes_xts_decrypt)
+	FRAME_PUSH
+	cbz		w7, .LxtsdecloopNx
+
+	ld1		{v4.16b}, [x6]
+	enc_prepare	w3, x5, x6
+	encrypt_block	v4, w3, x5, x6, w7		/* first tweak */
+	dec_prepare	w3, x2, x6
+	ldr		q7, .Lxts_mul_x
+	b		.LxtsdecNx
+
+.LxtsdecloopNx:
+	ldr		q7, .Lxts_mul_x
+	next_tweak	v4, v4, v7, v8
+.LxtsdecNx:
+#if INTERLEAVE >= 2
+	subs		w4, w4, #INTERLEAVE
+	bmi		.Lxtsdec1x
+#if INTERLEAVE == 2
+	ld1		{v0.16b-v1.16b}, [x1], #32	/* get 2 ct blocks */
+	next_tweak	v5, v4, v7, v8
+	eor		v0.16b, v0.16b, v4.16b
+	eor		v1.16b, v1.16b, v5.16b
+	do_decrypt_block2x
+	eor		v0.16b, v0.16b, v4.16b
+	eor		v1.16b, v1.16b, v5.16b
+	st1		{v0.16b-v1.16b}, [x0], #32
+	cbz		w4, .LxtsdecoutNx
+	next_tweak	v4, v5, v7, v8
+	b		.LxtsdecNx
+.LxtsdecoutNx:
+	mov		v4.16b, v5.16b
+	b		.Lxtsdecout
+#else
+	ld1		{v0.16b-v3.16b}, [x1], #64	/* get 4 ct blocks */
+	next_tweak	v5, v4, v7, v8
+	eor		v0.16b, v0.16b, v4.16b
+	next_tweak	v6, v5, v7, v8
+	eor		v1.16b, v1.16b, v5.16b
+	eor		v2.16b, v2.16b, v6.16b
+	next_tweak	v7, v6, v7, v8
+	eor		v3.16b, v3.16b, v7.16b
+	do_decrypt_block4x
+	eor		v3.16b, v3.16b, v7.16b
+	eor		v0.16b, v0.16b, v4.16b
+	eor		v1.16b, v1.16b, v5.16b
+	eor		v2.16b, v2.16b, v6.16b
+	st1		{v0.16b-v3.16b}, [x0], #64
+	mov		v4.16b, v7.16b
+	cbz		w4, .Lxtsdecout
+	b		.LxtsdecloopNx
+#endif
+.Lxtsdec1x:
+	adds		w4, w4, #INTERLEAVE
+	beq		.Lxtsdecout
+#endif
+.Lxtsdecloop:
+	ld1		{v1.16b}, [x1], #16
+	eor		v0.16b, v1.16b, v4.16b
+	decrypt_block	v0, w3, x2, x6, w7
+	eor		v0.16b, v0.16b, v4.16b
+	st1		{v0.16b}, [x0], #16
+	subs		w4, w4, #1
+	beq		.Lxtsdecout
+	next_tweak	v4, v4, v7, v8
+	b		.Lxtsdecloop
+.Lxtsdecout:
+	FRAME_POP
+	ret
+AES_ENDPROC(aes_xts_decrypt)
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
new file mode 100644
index 0000000..b93170e
--- /dev/null
+++ b/arch/arm64/crypto/aes-neon.S
@@ -0,0 +1,382 @@
+/*
+ * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#define AES_ENTRY(func)		ENTRY(neon_ ## func)
+#define AES_ENDPROC(func)	ENDPROC(neon_ ## func)
+
+	/* multiply by polynomial 'x' in GF(2^8) */
+	.macro		mul_by_x, out, in, temp, const
+	sshr		\temp, \in, #7
+	add		\out, \in, \in
+	and		\temp, \temp, \const
+	eor		\out, \out, \temp
+	.endm
+
+	/* preload the entire Sbox */
+	.macro		prepare, sbox, shiftrows, temp
+	adr		\temp, \sbox
+	movi		v12.16b, #0x40
+	ldr		q13, \shiftrows
+	movi		v14.16b, #0x1b
+	ld1		{v16.16b-v19.16b}, [\temp], #64
+	ld1		{v20.16b-v23.16b}, [\temp], #64
+	ld1		{v24.16b-v27.16b}, [\temp], #64
+	ld1		{v28.16b-v31.16b}, [\temp]
+	.endm
+
+	/* do preload for encryption */
+	.macro		enc_prepare, ignore0, ignore1, temp
+	prepare		.LForward_Sbox, .LForward_ShiftRows, \temp
+	.endm
+
+	.macro		enc_switch_key, ignore0, ignore1, temp
+	/* do nothing */
+	.endm
+
+	/* do preload for decryption */
+	.macro		dec_prepare, ignore0, ignore1, temp
+	prepare		.LReverse_Sbox, .LReverse_ShiftRows, \temp
+	.endm
+
+	/* apply SubBytes transformation using the the preloaded Sbox */
+	.macro		sub_bytes, in
+	sub		v9.16b, \in\().16b, v12.16b
+	tbl		\in\().16b, {v16.16b-v19.16b}, \in\().16b
+	sub		v10.16b, v9.16b, v12.16b
+	tbx		\in\().16b, {v20.16b-v23.16b}, v9.16b
+	sub		v11.16b, v10.16b, v12.16b
+	tbx		\in\().16b, {v24.16b-v27.16b}, v10.16b
+	tbx		\in\().16b, {v28.16b-v31.16b}, v11.16b
+	.endm
+
+	/* apply MixColumns transformation */
+	.macro		mix_columns, in
+	mul_by_x	v10.16b, \in\().16b, v9.16b, v14.16b
+	rev32		v8.8h, \in\().8h
+	eor		\in\().16b, v10.16b, \in\().16b
+	shl		v9.4s, v8.4s, #24
+	shl		v11.4s, \in\().4s, #24
+	sri		v9.4s, v8.4s, #8
+	sri		v11.4s, \in\().4s, #8
+	eor		v9.16b, v9.16b, v8.16b
+	eor		v10.16b, v10.16b, v9.16b
+	eor		\in\().16b, v10.16b, v11.16b
+	.endm
+
+	/* Inverse MixColumns: pre-multiply by { 5, 0, 4, 0 } */
+	.macro		inv_mix_columns, in
+	mul_by_x	v11.16b, \in\().16b, v10.16b, v14.16b
+	mul_by_x	v11.16b, v11.16b, v10.16b, v14.16b
+	eor		\in\().16b, \in\().16b, v11.16b
+	rev32		v11.8h, v11.8h
+	eor		\in\().16b, \in\().16b, v11.16b
+	mix_columns	\in
+	.endm
+
+	.macro		do_block, enc, in, rounds, rk, rkp, i
+	ld1		{v15.16b}, [\rk]
+	add		\rkp, \rk, #16
+	mov		\i, \rounds
+1111:	eor		\in\().16b, \in\().16b, v15.16b		/* ^round key */
+	tbl		\in\().16b, {\in\().16b}, v13.16b	/* ShiftRows */
+	sub_bytes	\in
+	ld1		{v15.16b}, [\rkp], #16
+	subs		\i, \i, #1
+	beq		2222f
+	.if		\enc == 1
+	mix_columns	\in
+	.else
+	inv_mix_columns	\in
+	.endif
+	b		1111b
+2222:	eor		\in\().16b, \in\().16b, v15.16b		/* ^round key */
+	.endm
+
+	.macro		encrypt_block, in, rounds, rk, rkp, i
+	do_block	1, \in, \rounds, \rk, \rkp, \i
+	.endm
+
+	.macro		decrypt_block, in, rounds, rk, rkp, i
+	do_block	0, \in, \rounds, \rk, \rkp, \i
+	.endm
+
+	/*
+	 * Interleaved versions: functionally equivalent to the
+	 * ones above, but applied to 2 or 4 AES states in parallel.
+	 */
+
+	.macro		sub_bytes_2x, in0, in1
+	sub		v8.16b, \in0\().16b, v12.16b
+	sub		v9.16b, \in1\().16b, v12.16b
+	tbl		\in0\().16b, {v16.16b-v19.16b}, \in0\().16b
+	tbl		\in1\().16b, {v16.16b-v19.16b}, \in1\().16b
+	sub		v10.16b, v8.16b, v12.16b
+	sub		v11.16b, v9.16b, v12.16b
+	tbx		\in0\().16b, {v20.16b-v23.16b}, v8.16b
+	tbx		\in1\().16b, {v20.16b-v23.16b}, v9.16b
+	sub		v8.16b, v10.16b, v12.16b
+	sub		v9.16b, v11.16b, v12.16b
+	tbx		\in0\().16b, {v24.16b-v27.16b}, v10.16b
+	tbx		\in1\().16b, {v24.16b-v27.16b}, v11.16b
+	tbx		\in0\().16b, {v28.16b-v31.16b}, v8.16b
+	tbx		\in1\().16b, {v28.16b-v31.16b}, v9.16b
+	.endm
+
+	.macro		sub_bytes_4x, in0, in1, in2, in3
+	sub		v8.16b, \in0\().16b, v12.16b
+	tbl		\in0\().16b, {v16.16b-v19.16b}, \in0\().16b
+	sub		v9.16b, \in1\().16b, v12.16b
+	tbl		\in1\().16b, {v16.16b-v19.16b}, \in1\().16b
+	sub		v10.16b, \in2\().16b, v12.16b
+	tbl		\in2\().16b, {v16.16b-v19.16b}, \in2\().16b
+	sub		v11.16b, \in3\().16b, v12.16b
+	tbl		\in3\().16b, {v16.16b-v19.16b}, \in3\().16b
+	tbx		\in0\().16b, {v20.16b-v23.16b}, v8.16b
+	tbx		\in1\().16b, {v20.16b-v23.16b}, v9.16b
+	sub		v8.16b, v8.16b, v12.16b
+	tbx		\in2\().16b, {v20.16b-v23.16b}, v10.16b
+	sub		v9.16b, v9.16b, v12.16b
+	tbx		\in3\().16b, {v20.16b-v23.16b}, v11.16b
+	sub		v10.16b, v10.16b, v12.16b
+	tbx		\in0\().16b, {v24.16b-v27.16b}, v8.16b
+	sub		v11.16b, v11.16b, v12.16b
+	tbx		\in1\().16b, {v24.16b-v27.16b}, v9.16b
+	sub		v8.16b, v8.16b, v12.16b
+	tbx		\in2\().16b, {v24.16b-v27.16b}, v10.16b
+	sub		v9.16b, v9.16b, v12.16b
+	tbx		\in3\().16b, {v24.16b-v27.16b}, v11.16b
+	sub		v10.16b, v10.16b, v12.16b
+	tbx		\in0\().16b, {v28.16b-v31.16b}, v8.16b
+	sub		v11.16b, v11.16b, v12.16b
+	tbx		\in1\().16b, {v28.16b-v31.16b}, v9.16b
+	tbx		\in2\().16b, {v28.16b-v31.16b}, v10.16b
+	tbx		\in3\().16b, {v28.16b-v31.16b}, v11.16b
+	.endm
+
+	.macro		mul_by_x_2x, out0, out1, in0, in1, tmp0, tmp1, const
+	sshr		\tmp0\().16b, \in0\().16b,  #7
+	add		\out0\().16b, \in0\().16b,  \in0\().16b
+	sshr		\tmp1\().16b, \in1\().16b,  #7
+	and		\tmp0\().16b, \tmp0\().16b, \const\().16b
+	add		\out1\().16b, \in1\().16b,  \in1\().16b
+	and		\tmp1\().16b, \tmp1\().16b, \const\().16b
+	eor		\out0\().16b, \out0\().16b, \tmp0\().16b
+	eor		\out1\().16b, \out1\().16b, \tmp1\().16b
+	.endm
+
+	.macro		mix_columns_2x, in0, in1
+	mul_by_x_2x	v8, v9, \in0, \in1, v10, v11, v14
+	rev32		v10.8h, \in0\().8h
+	rev32		v11.8h, \in1\().8h
+	eor		\in0\().16b, v8.16b, \in0\().16b
+	eor		\in1\().16b, v9.16b, \in1\().16b
+	shl		v12.4s, v10.4s, #24
+	shl		v13.4s, v11.4s, #24
+	eor		v8.16b, v8.16b, v10.16b
+	sri		v12.4s, v10.4s, #8
+	shl		v10.4s, \in0\().4s, #24
+	eor		v9.16b, v9.16b, v11.16b
+	sri		v13.4s, v11.4s, #8
+	shl		v11.4s, \in1\().4s, #24
+	sri		v10.4s, \in0\().4s, #8
+	eor		\in0\().16b, v8.16b, v12.16b
+	sri		v11.4s, \in1\().4s, #8
+	eor		\in1\().16b, v9.16b, v13.16b
+	eor		\in0\().16b, v10.16b, \in0\().16b
+	eor		\in1\().16b, v11.16b, \in1\().16b
+	.endm
+
+	.macro		inv_mix_cols_2x, in0, in1
+	mul_by_x_2x	v8, v9, \in0, \in1, v10, v11, v14
+	mul_by_x_2x	v8, v9, v8, v9, v10, v11, v14
+	eor		\in0\().16b, \in0\().16b, v8.16b
+	eor		\in1\().16b, \in1\().16b, v9.16b
+	rev32		v8.8h, v8.8h
+	rev32		v9.8h, v9.8h
+	eor		\in0\().16b, \in0\().16b, v8.16b
+	eor		\in1\().16b, \in1\().16b, v9.16b
+	mix_columns_2x	\in0, \in1
+	.endm
+
+	.macro		inv_mix_cols_4x, in0, in1, in2, in3
+	mul_by_x_2x	v8, v9, \in0, \in1, v10, v11, v14
+	mul_by_x_2x	v10, v11, \in2, \in3, v12, v13, v14
+	mul_by_x_2x	v8, v9, v8, v9, v12, v13, v14
+	mul_by_x_2x	v10, v11, v10, v11, v12, v13, v14
+	eor		\in0\().16b, \in0\().16b, v8.16b
+	eor		\in1\().16b, \in1\().16b, v9.16b
+	eor		\in2\().16b, \in2\().16b, v10.16b
+	eor		\in3\().16b, \in3\().16b, v11.16b
+	rev32		v8.8h, v8.8h
+	rev32		v9.8h, v9.8h
+	rev32		v10.8h, v10.8h
+	rev32		v11.8h, v11.8h
+	eor		\in0\().16b, \in0\().16b, v8.16b
+	eor		\in1\().16b, \in1\().16b, v9.16b
+	eor		\in2\().16b, \in2\().16b, v10.16b
+	eor		\in3\().16b, \in3\().16b, v11.16b
+	mix_columns_2x	\in0, \in1
+	mix_columns_2x	\in2, \in3
+	.endm
+
+	.macro		do_block_2x, enc, in0, in1 rounds, rk, rkp, i
+	ld1		{v15.16b}, [\rk]
+	add		\rkp, \rk, #16
+	mov		\i, \rounds
+1111:	eor		\in0\().16b, \in0\().16b, v15.16b	/* ^round key */
+	eor		\in1\().16b, \in1\().16b, v15.16b	/* ^round key */
+	sub_bytes_2x	\in0, \in1
+	tbl		\in0\().16b, {\in0\().16b}, v13.16b	/* ShiftRows */
+	tbl		\in1\().16b, {\in1\().16b}, v13.16b	/* ShiftRows */
+	ld1		{v15.16b}, [\rkp], #16
+	subs		\i, \i, #1
+	beq		2222f
+	.if		\enc == 1
+	mix_columns_2x	\in0, \in1
+	ldr		q13, .LForward_ShiftRows
+	.else
+	inv_mix_cols_2x	\in0, \in1
+	ldr		q13, .LReverse_ShiftRows
+	.endif
+	movi		v12.16b, #0x40
+	b		1111b
+2222:	eor		\in0\().16b, \in0\().16b, v15.16b	/* ^round key */
+	eor		\in1\().16b, \in1\().16b, v15.16b	/* ^round key */
+	.endm
+
+	.macro		do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
+	ld1		{v15.16b}, [\rk]
+	add		\rkp, \rk, #16
+	mov		\i, \rounds
+1111:	eor		\in0\().16b, \in0\().16b, v15.16b	/* ^round key */
+	eor		\in1\().16b, \in1\().16b, v15.16b	/* ^round key */
+	eor		\in2\().16b, \in2\().16b, v15.16b	/* ^round key */
+	eor		\in3\().16b, \in3\().16b, v15.16b	/* ^round key */
+	sub_bytes_4x	\in0, \in1, \in2, \in3
+	tbl		\in0\().16b, {\in0\().16b}, v13.16b	/* ShiftRows */
+	tbl		\in1\().16b, {\in1\().16b}, v13.16b	/* ShiftRows */
+	tbl		\in2\().16b, {\in2\().16b}, v13.16b	/* ShiftRows */
+	tbl		\in3\().16b, {\in3\().16b}, v13.16b	/* ShiftRows */
+	ld1		{v15.16b}, [\rkp], #16
+	subs		\i, \i, #1
+	beq		2222f
+	.if		\enc == 1
+	mix_columns_2x	\in0, \in1
+	mix_columns_2x	\in2, \in3
+	ldr		q13, .LForward_ShiftRows
+	.else
+	inv_mix_cols_4x	\in0, \in1, \in2, \in3
+	ldr		q13, .LReverse_ShiftRows
+	.endif
+	movi		v12.16b, #0x40
+	b		1111b
+2222:	eor		\in0\().16b, \in0\().16b, v15.16b	/* ^round key */
+	eor		\in1\().16b, \in1\().16b, v15.16b	/* ^round key */
+	eor		\in2\().16b, \in2\().16b, v15.16b	/* ^round key */
+	eor		\in3\().16b, \in3\().16b, v15.16b	/* ^round key */
+	.endm
+
+	.macro		encrypt_block2x, in0, in1, rounds, rk, rkp, i
+	do_block_2x	1, \in0, \in1, \rounds, \rk, \rkp, \i
+	.endm
+
+	.macro		decrypt_block2x, in0, in1, rounds, rk, rkp, i
+	do_block_2x	0, \in0, \in1, \rounds, \rk, \rkp, \i
+	.endm
+
+	.macro		encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
+	do_block_4x	1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
+	.endm
+
+	.macro		decrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
+	do_block_4x	0, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
+	.endm
+
+#include "aes-modes.S"
+
+	.text
+	.align		4
+.LForward_ShiftRows:
+	.byte		0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
+	.byte		0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
+
+.LReverse_ShiftRows:
+	.byte		0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
+	.byte		0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
+
+.LForward_Sbox:
+	.byte		0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+	.byte		0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+	.byte		0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+	.byte		0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+	.byte		0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+	.byte		0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+	.byte		0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+	.byte		0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+	.byte		0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+	.byte		0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+	.byte		0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+	.byte		0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+	.byte		0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+	.byte		0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+	.byte		0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+	.byte		0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+	.byte		0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+	.byte		0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+	.byte		0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+	.byte		0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+	.byte		0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+	.byte		0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+	.byte		0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+	.byte		0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+	.byte		0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+	.byte		0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+	.byte		0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+	.byte		0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+	.byte		0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+	.byte		0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+	.byte		0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+	.byte		0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+
+.LReverse_Sbox:
+	.byte		0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+	.byte		0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+	.byte		0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+	.byte		0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+	.byte		0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+	.byte		0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+	.byte		0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+	.byte		0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+	.byte		0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+	.byte		0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+	.byte		0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+	.byte		0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+	.byte		0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+	.byte		0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+	.byte		0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+	.byte		0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+	.byte		0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+	.byte		0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+	.byte		0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+	.byte		0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+	.byte		0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+	.byte		0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+	.byte		0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+	.byte		0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+	.byte		0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+	.byte		0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+	.byte		0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+	.byte		0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+	.byte		0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+	.byte		0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+	.byte		0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+	.byte		0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
new file mode 100644
index 0000000..dc45701
--- /dev/null
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -0,0 +1,79 @@
+/*
+ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+	SHASH	.req	v0
+	SHASH2	.req	v1
+	T1	.req	v2
+	T2	.req	v3
+	MASK	.req	v4
+	XL	.req	v5
+	XM	.req	v6
+	XH	.req	v7
+	IN1	.req	v7
+
+	.text
+	.arch		armv8-a+crypto
+
+	/*
+	 * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+	 *			   struct ghash_key const *k, const char *head)
+	 */
+ENTRY(pmull_ghash_update)
+	ld1		{SHASH.16b}, [x3]
+	ld1		{XL.16b}, [x1]
+	movi		MASK.16b, #0xe1
+	ext		SHASH2.16b, SHASH.16b, SHASH.16b, #8
+	shl		MASK.2d, MASK.2d, #57
+	eor		SHASH2.16b, SHASH2.16b, SHASH.16b
+
+	/* do the head block first, if supplied */
+	cbz		x4, 0f
+	ld1		{T1.2d}, [x4]
+	b		1f
+
+0:	ld1		{T1.2d}, [x2], #16
+	sub		w0, w0, #1
+
+1:	/* multiply XL by SHASH in GF(2^128) */
+CPU_LE(	rev64		T1.16b, T1.16b	)
+
+	ext		T2.16b, XL.16b, XL.16b, #8
+	ext		IN1.16b, T1.16b, T1.16b, #8
+	eor		T1.16b, T1.16b, T2.16b
+	eor		XL.16b, XL.16b, IN1.16b
+
+	pmull2		XH.1q, SHASH.2d, XL.2d		// a1 * b1
+	eor		T1.16b, T1.16b, XL.16b
+	pmull		XL.1q, SHASH.1d, XL.1d		// a0 * b0
+	pmull		XM.1q, SHASH2.1d, T1.1d		// (a1 + a0)(b1 + b0)
+
+	ext		T1.16b, XL.16b, XH.16b, #8
+	eor		T2.16b, XL.16b, XH.16b
+	eor		XM.16b, XM.16b, T1.16b
+	eor		XM.16b, XM.16b, T2.16b
+	pmull		T2.1q, XL.1d, MASK.1d
+
+	mov		XH.d[0], XM.d[1]
+	mov		XM.d[1], XL.d[0]
+
+	eor		XL.16b, XM.16b, T2.16b
+	ext		T2.16b, XL.16b, XL.16b, #8
+	pmull		XL.1q, XL.1d, MASK.1d
+	eor		T2.16b, T2.16b, XH.16b
+	eor		XL.16b, XL.16b, T2.16b
+
+	cbnz		w0, 0b
+
+	st1		{XL.16b}, [x1]
+	ret
+ENDPROC(pmull_ghash_update)
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
new file mode 100644
index 0000000..833ec1e
--- /dev/null
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -0,0 +1,156 @@
+/*
+ * Accelerated GHASH implementation with ARMv8 PMULL instructions.
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+#define GHASH_BLOCK_SIZE	16
+#define GHASH_DIGEST_SIZE	16
+
+struct ghash_key {
+	u64 a;
+	u64 b;
+};
+
+struct ghash_desc_ctx {
+	u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
+	u8 buf[GHASH_BLOCK_SIZE];
+	u32 count;
+};
+
+asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+				   struct ghash_key const *k, const char *head);
+
+static int ghash_init(struct shash_desc *desc)
+{
+	struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+
+	*ctx = (struct ghash_desc_ctx){};
+	return 0;
+}
+
+static int ghash_update(struct shash_desc *desc, const u8 *src,
+			unsigned int len)
+{
+	struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+	unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+
+	ctx->count += len;
+
+	if ((partial + len) >= GHASH_BLOCK_SIZE) {
+		struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+		int blocks;
+
+		if (partial) {
+			int p = GHASH_BLOCK_SIZE - partial;
+
+			memcpy(ctx->buf + partial, src, p);
+			src += p;
+			len -= p;
+		}
+
+		blocks = len / GHASH_BLOCK_SIZE;
+		len %= GHASH_BLOCK_SIZE;
+
+		kernel_neon_begin_partial(8);
+		pmull_ghash_update(blocks, ctx->digest, src, key,
+				   partial ? ctx->buf : NULL);
+		kernel_neon_end();
+		src += blocks * GHASH_BLOCK_SIZE;
+		partial = 0;
+	}
+	if (len)
+		memcpy(ctx->buf + partial, src, len);
+	return 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+	struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+	unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+
+	if (partial) {
+		struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+
+		memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
+
+		kernel_neon_begin_partial(8);
+		pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
+		kernel_neon_end();
+	}
+	put_unaligned_be64(ctx->digest[1], dst);
+	put_unaligned_be64(ctx->digest[0], dst + 8);
+
+	*ctx = (struct ghash_desc_ctx){};
+	return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+			const u8 *inkey, unsigned int keylen)
+{
+	struct ghash_key *key = crypto_shash_ctx(tfm);
+	u64 a, b;
+
+	if (keylen != GHASH_BLOCK_SIZE) {
+		crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	/* perform multiplication by 'x' in GF(2^128) */
+	b = get_unaligned_be64(inkey);
+	a = get_unaligned_be64(inkey + 8);
+
+	key->a = (a << 1) | (b >> 63);
+	key->b = (b << 1) | (a >> 63);
+
+	if (b >> 63)
+		key->b ^= 0xc200000000000000UL;
+
+	return 0;
+}
+
+static struct shash_alg ghash_alg = {
+	.digestsize	= GHASH_DIGEST_SIZE,
+	.init		= ghash_init,
+	.update		= ghash_update,
+	.final		= ghash_final,
+	.setkey		= ghash_setkey,
+	.descsize	= sizeof(struct ghash_desc_ctx),
+	.base		= {
+		.cra_name		= "ghash",
+		.cra_driver_name	= "ghash-ce",
+		.cra_priority		= 200,
+		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize		= GHASH_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct ghash_key),
+		.cra_module		= THIS_MODULE,
+	},
+};
+
+static int __init ghash_ce_mod_init(void)
+{
+	return crypto_register_shash(&ghash_alg);
+}
+
+static void __exit ghash_ce_mod_exit(void)
+{
+	crypto_unregister_shash(&ghash_alg);
+}
+
+module_cpu_feature_match(PMULL, ghash_ce_mod_init);
+module_exit(ghash_ce_mod_exit);
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
new file mode 100644
index 0000000..09d57d9
--- /dev/null
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -0,0 +1,153 @@
+/*
+ * sha1-ce-core.S - SHA-1 secure hash using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+	.text
+	.arch		armv8-a+crypto
+
+	k0		.req	v0
+	k1		.req	v1
+	k2		.req	v2
+	k3		.req	v3
+
+	t0		.req	v4
+	t1		.req	v5
+
+	dga		.req	q6
+	dgav		.req	v6
+	dgb		.req	s7
+	dgbv		.req	v7
+
+	dg0q		.req	q12
+	dg0s		.req	s12
+	dg0v		.req	v12
+	dg1s		.req	s13
+	dg1v		.req	v13
+	dg2s		.req	s14
+
+	.macro		add_only, op, ev, rc, s0, dg1
+	.ifc		\ev, ev
+	add		t1.4s, v\s0\().4s, \rc\().4s
+	sha1h		dg2s, dg0s
+	.ifnb		\dg1
+	sha1\op		dg0q, \dg1, t0.4s
+	.else
+	sha1\op		dg0q, dg1s, t0.4s
+	.endif
+	.else
+	.ifnb		\s0
+	add		t0.4s, v\s0\().4s, \rc\().4s
+	.endif
+	sha1h		dg1s, dg0s
+	sha1\op		dg0q, dg2s, t1.4s
+	.endif
+	.endm
+
+	.macro		add_update, op, ev, rc, s0, s1, s2, s3, dg1
+	sha1su0		v\s0\().4s, v\s1\().4s, v\s2\().4s
+	add_only	\op, \ev, \rc, \s1, \dg1
+	sha1su1		v\s0\().4s, v\s3\().4s
+	.endm
+
+	/*
+	 * The SHA1 round constants
+	 */
+	.align		4
+.Lsha1_rcon:
+	.word		0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
+
+	/*
+	 * void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
+	 * 			  u8 *head, long bytes)
+	 */
+ENTRY(sha1_ce_transform)
+	/* load round constants */
+	adr		x6, .Lsha1_rcon
+	ld1r		{k0.4s}, [x6], #4
+	ld1r		{k1.4s}, [x6], #4
+	ld1r		{k2.4s}, [x6], #4
+	ld1r		{k3.4s}, [x6]
+
+	/* load state */
+	ldr		dga, [x2]
+	ldr		dgb, [x2, #16]
+
+	/* load partial state (if supplied) */
+	cbz		x3, 0f
+	ld1		{v8.4s-v11.4s}, [x3]
+	b		1f
+
+	/* load input */
+0:	ld1		{v8.4s-v11.4s}, [x1], #64
+	sub		w0, w0, #1
+
+1:
+CPU_LE(	rev32		v8.16b, v8.16b		)
+CPU_LE(	rev32		v9.16b, v9.16b		)
+CPU_LE(	rev32		v10.16b, v10.16b	)
+CPU_LE(	rev32		v11.16b, v11.16b	)
+
+2:	add		t0.4s, v8.4s, k0.4s
+	mov		dg0v.16b, dgav.16b
+
+	add_update	c, ev, k0,  8,  9, 10, 11, dgb
+	add_update	c, od, k0,  9, 10, 11,  8
+	add_update	c, ev, k0, 10, 11,  8,  9
+	add_update	c, od, k0, 11,  8,  9, 10
+	add_update	c, ev, k1,  8,  9, 10, 11
+
+	add_update	p, od, k1,  9, 10, 11,  8
+	add_update	p, ev, k1, 10, 11,  8,  9
+	add_update	p, od, k1, 11,  8,  9, 10
+	add_update	p, ev, k1,  8,  9, 10, 11
+	add_update	p, od, k2,  9, 10, 11,  8
+
+	add_update	m, ev, k2, 10, 11,  8,  9
+	add_update	m, od, k2, 11,  8,  9, 10
+	add_update	m, ev, k2,  8,  9, 10, 11
+	add_update	m, od, k2,  9, 10, 11,  8
+	add_update	m, ev, k3, 10, 11,  8,  9
+
+	add_update	p, od, k3, 11,  8,  9, 10
+	add_only	p, ev, k3,  9
+	add_only	p, od, k3, 10
+	add_only	p, ev, k3, 11
+	add_only	p, od
+
+	/* update state */
+	add		dgbv.2s, dgbv.2s, dg1v.2s
+	add		dgav.4s, dgav.4s, dg0v.4s
+
+	cbnz		w0, 0b
+
+	/*
+	 * Final block: add padding and total bit count.
+	 * Skip if we have no total byte count in x4. In that case, the input
+	 * size was not a round multiple of the block size, and the padding is
+	 * handled by the C code.
+	 */
+	cbz		x4, 3f
+	movi		v9.2d, #0
+	mov		x8, #0x80000000
+	movi		v10.2d, #0
+	ror		x7, x4, #29		// ror(lsl(x4, 3), 32)
+	fmov		d8, x8
+	mov		x4, #0
+	mov		v11.d[0], xzr
+	mov		v11.d[1], x7
+	b		2b
+
+	/* store new state */
+3:	str		dga, [x2]
+	str		dgb, [x2, #16]
+	ret
+ENDPROC(sha1_ce_transform)
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
new file mode 100644
index 0000000..6fe83f3
--- /dev/null
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -0,0 +1,174 @@
+/*
+ * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sha1_ce_transform(int blocks, u8 const *src, u32 *state,
+				  u8 *head, long bytes);
+
+static int sha1_init(struct shash_desc *desc)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+
+	*sctx = (struct sha1_state){
+		.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+	};
+	return 0;
+}
+
+static int sha1_update(struct shash_desc *desc, const u8 *data,
+		       unsigned int len)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+	sctx->count += len;
+
+	if ((partial + len) >= SHA1_BLOCK_SIZE) {
+		int blocks;
+
+		if (partial) {
+			int p = SHA1_BLOCK_SIZE - partial;
+
+			memcpy(sctx->buffer + partial, data, p);
+			data += p;
+			len -= p;
+		}
+
+		blocks = len / SHA1_BLOCK_SIZE;
+		len %= SHA1_BLOCK_SIZE;
+
+		kernel_neon_begin_partial(16);
+		sha1_ce_transform(blocks, data, sctx->state,
+				  partial ? sctx->buffer : NULL, 0);
+		kernel_neon_end();
+
+		data += blocks * SHA1_BLOCK_SIZE;
+		partial = 0;
+	}
+	if (len)
+		memcpy(sctx->buffer + partial, data, len);
+	return 0;
+}
+
+static int sha1_final(struct shash_desc *desc, u8 *out)
+{
+	static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
+
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	__be64 bits = cpu_to_be64(sctx->count << 3);
+	__be32 *dst = (__be32 *)out;
+	int i;
+
+	u32 padlen = SHA1_BLOCK_SIZE
+		     - ((sctx->count + sizeof(bits)) % SHA1_BLOCK_SIZE);
+
+	sha1_update(desc, padding, padlen);
+	sha1_update(desc, (const u8 *)&bits, sizeof(bits));
+
+	for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+		put_unaligned_be32(sctx->state[i], dst++);
+
+	*sctx = (struct sha1_state){};
+	return 0;
+}
+
+static int sha1_finup(struct shash_desc *desc, const u8 *data,
+		      unsigned int len, u8 *out)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	__be32 *dst = (__be32 *)out;
+	int blocks;
+	int i;
+
+	if (sctx->count || !len || (len % SHA1_BLOCK_SIZE)) {
+		sha1_update(desc, data, len);
+		return sha1_final(desc, out);
+	}
+
+	/*
+	 * Use a fast path if the input is a multiple of 64 bytes. In
+	 * this case, there is no need to copy data around, and we can
+	 * perform the entire digest calculation in a single invocation
+	 * of sha1_ce_transform()
+	 */
+	blocks = len / SHA1_BLOCK_SIZE;
+
+	kernel_neon_begin_partial(16);
+	sha1_ce_transform(blocks, data, sctx->state, NULL, len);
+	kernel_neon_end();
+
+	for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+		put_unaligned_be32(sctx->state[i], dst++);
+
+	*sctx = (struct sha1_state){};
+	return 0;
+}
+
+static int sha1_export(struct shash_desc *desc, void *out)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	struct sha1_state *dst = out;
+
+	*dst = *sctx;
+	return 0;
+}
+
+static int sha1_import(struct shash_desc *desc, const void *in)
+{
+	struct sha1_state *sctx = shash_desc_ctx(desc);
+	struct sha1_state const *src = in;
+
+	*sctx = *src;
+	return 0;
+}
+
+static struct shash_alg alg = {
+	.init			= sha1_init,
+	.update			= sha1_update,
+	.final			= sha1_final,
+	.finup			= sha1_finup,
+	.export			= sha1_export,
+	.import			= sha1_import,
+	.descsize		= sizeof(struct sha1_state),
+	.digestsize		= SHA1_DIGEST_SIZE,
+	.statesize		= sizeof(struct sha1_state),
+	.base			= {
+		.cra_name		= "sha1",
+		.cra_driver_name	= "sha1-ce",
+		.cra_priority		= 200,
+		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_module		= THIS_MODULE,
+	}
+};
+
+static int __init sha1_ce_mod_init(void)
+{
+	return crypto_register_shash(&alg);
+}
+
+static void __exit sha1_ce_mod_fini(void)
+{
+	crypto_unregister_shash(&alg);
+}
+
+module_cpu_feature_match(SHA1, sha1_ce_mod_init);
+module_exit(sha1_ce_mod_fini);
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
new file mode 100644
index 0000000..7f29fc0
--- /dev/null
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -0,0 +1,156 @@
+/*
+ * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+	.text
+	.arch		armv8-a+crypto
+
+	dga		.req	q20
+	dgav		.req	v20
+	dgb		.req	q21
+	dgbv		.req	v21
+
+	t0		.req	v22
+	t1		.req	v23
+
+	dg0q		.req	q24
+	dg0v		.req	v24
+	dg1q		.req	q25
+	dg1v		.req	v25
+	dg2q		.req	q26
+	dg2v		.req	v26
+
+	.macro		add_only, ev, rc, s0
+	mov		dg2v.16b, dg0v.16b
+	.ifeq		\ev
+	add		t1.4s, v\s0\().4s, \rc\().4s
+	sha256h		dg0q, dg1q, t0.4s
+	sha256h2	dg1q, dg2q, t0.4s
+	.else
+	.ifnb		\s0
+	add		t0.4s, v\s0\().4s, \rc\().4s
+	.endif
+	sha256h		dg0q, dg1q, t1.4s
+	sha256h2	dg1q, dg2q, t1.4s
+	.endif
+	.endm
+
+	.macro		add_update, ev, rc, s0, s1, s2, s3
+	sha256su0	v\s0\().4s, v\s1\().4s
+	add_only	\ev, \rc, \s1
+	sha256su1	v\s0\().4s, v\s2\().4s, v\s3\().4s
+	.endm
+
+	/*
+	 * The SHA-256 round constants
+	 */
+	.align		4
+.Lsha2_rcon:
+	.word		0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
+	.word		0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
+	.word		0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
+	.word		0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
+	.word		0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
+	.word		0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
+	.word		0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
+	.word		0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
+	.word		0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
+	.word		0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
+	.word		0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
+	.word		0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
+	.word		0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
+	.word		0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
+	.word		0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
+	.word		0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+
+	/*
+	 * void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
+	 *                        u8 *head, long bytes)
+	 */
+ENTRY(sha2_ce_transform)
+	/* load round constants */
+	adr		x8, .Lsha2_rcon
+	ld1		{ v0.4s- v3.4s}, [x8], #64
+	ld1		{ v4.4s- v7.4s}, [x8], #64
+	ld1		{ v8.4s-v11.4s}, [x8], #64
+	ld1		{v12.4s-v15.4s}, [x8]
+
+	/* load state */
+	ldp		dga, dgb, [x2]
+
+	/* load partial input (if supplied) */
+	cbz		x3, 0f
+	ld1		{v16.4s-v19.4s}, [x3]
+	b		1f
+
+	/* load input */
+0:	ld1		{v16.4s-v19.4s}, [x1], #64
+	sub		w0, w0, #1
+
+1:
+CPU_LE(	rev32		v16.16b, v16.16b	)
+CPU_LE(	rev32		v17.16b, v17.16b	)
+CPU_LE(	rev32		v18.16b, v18.16b	)
+CPU_LE(	rev32		v19.16b, v19.16b	)
+
+2:	add		t0.4s, v16.4s, v0.4s
+	mov		dg0v.16b, dgav.16b
+	mov		dg1v.16b, dgbv.16b
+
+	add_update	0,  v1, 16, 17, 18, 19
+	add_update	1,  v2, 17, 18, 19, 16
+	add_update	0,  v3, 18, 19, 16, 17
+	add_update	1,  v4, 19, 16, 17, 18
+
+	add_update	0,  v5, 16, 17, 18, 19
+	add_update	1,  v6, 17, 18, 19, 16
+	add_update	0,  v7, 18, 19, 16, 17
+	add_update	1,  v8, 19, 16, 17, 18
+
+	add_update	0,  v9, 16, 17, 18, 19
+	add_update	1, v10, 17, 18, 19, 16
+	add_update	0, v11, 18, 19, 16, 17
+	add_update	1, v12, 19, 16, 17, 18
+
+	add_only	0, v13, 17
+	add_only	1, v14, 18
+	add_only	0, v15, 19
+	add_only	1
+
+	/* update state */
+	add		dgav.4s, dgav.4s, dg0v.4s
+	add		dgbv.4s, dgbv.4s, dg1v.4s
+
+	/* handled all input blocks? */
+	cbnz		w0, 0b
+
+	/*
+	 * Final block: add padding and total bit count.
+	 * Skip if we have no total byte count in x4. In that case, the input
+	 * size was not a round multiple of the block size, and the padding is
+	 * handled by the C code.
+	 */
+	cbz		x4, 3f
+	movi		v17.2d, #0
+	mov		x8, #0x80000000
+	movi		v18.2d, #0
+	ror		x7, x4, #29		// ror(lsl(x4, 3), 32)
+	fmov		d16, x8
+	mov		x4, #0
+	mov		v19.d[0], xzr
+	mov		v19.d[1], x7
+	b		2b
+
+	/* store new state */
+3:	stp		dga, dgb, [x2]
+	ret
+ENDPROC(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
new file mode 100644
index 0000000..c294e67
--- /dev/null
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -0,0 +1,255 @@
+/*
+ * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state,
+				 u8 *head, long bytes);
+
+static int sha224_init(struct shash_desc *desc)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	*sctx = (struct sha256_state){
+		.state = {
+			SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+			SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
+		}
+	};
+	return 0;
+}
+
+static int sha256_init(struct shash_desc *desc)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+
+	*sctx = (struct sha256_state){
+		.state = {
+			SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+			SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
+		}
+	};
+	return 0;
+}
+
+static int sha2_update(struct shash_desc *desc, const u8 *data,
+		       unsigned int len)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+	sctx->count += len;
+
+	if ((partial + len) >= SHA256_BLOCK_SIZE) {
+		int blocks;
+
+		if (partial) {
+			int p = SHA256_BLOCK_SIZE - partial;
+
+			memcpy(sctx->buf + partial, data, p);
+			data += p;
+			len -= p;
+		}
+
+		blocks = len / SHA256_BLOCK_SIZE;
+		len %= SHA256_BLOCK_SIZE;
+
+		kernel_neon_begin_partial(28);
+		sha2_ce_transform(blocks, data, sctx->state,
+				  partial ? sctx->buf : NULL, 0);
+		kernel_neon_end();
+
+		data += blocks * SHA256_BLOCK_SIZE;
+		partial = 0;
+	}
+	if (len)
+		memcpy(sctx->buf + partial, data, len);
+	return 0;
+}
+
+static void sha2_final(struct shash_desc *desc)
+{
+	static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
+
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	__be64 bits = cpu_to_be64(sctx->count << 3);
+	u32 padlen = SHA256_BLOCK_SIZE
+		     - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE);
+
+	sha2_update(desc, padding, padlen);
+	sha2_update(desc, (const u8 *)&bits, sizeof(bits));
+}
+
+static int sha224_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	__be32 *dst = (__be32 *)out;
+	int i;
+
+	sha2_final(desc);
+
+	for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
+		put_unaligned_be32(sctx->state[i], dst++);
+
+	*sctx = (struct sha256_state){};
+	return 0;
+}
+
+static int sha256_final(struct shash_desc *desc, u8 *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	__be32 *dst = (__be32 *)out;
+	int i;
+
+	sha2_final(desc);
+
+	for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
+		put_unaligned_be32(sctx->state[i], dst++);
+
+	*sctx = (struct sha256_state){};
+	return 0;
+}
+
+static void sha2_finup(struct shash_desc *desc, const u8 *data,
+		       unsigned int len)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	int blocks;
+
+	if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) {
+		sha2_update(desc, data, len);
+		sha2_final(desc);
+		return;
+	}
+
+	/*
+	 * Use a fast path if the input is a multiple of 64 bytes. In
+	 * this case, there is no need to copy data around, and we can
+	 * perform the entire digest calculation in a single invocation
+	 * of sha2_ce_transform()
+	 */
+	blocks = len / SHA256_BLOCK_SIZE;
+
+	kernel_neon_begin_partial(28);
+	sha2_ce_transform(blocks, data, sctx->state, NULL, len);
+	kernel_neon_end();
+	data += blocks * SHA256_BLOCK_SIZE;
+}
+
+static int sha224_finup(struct shash_desc *desc, const u8 *data,
+			unsigned int len, u8 *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	__be32 *dst = (__be32 *)out;
+	int i;
+
+	sha2_finup(desc, data, len);
+
+	for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++)
+		put_unaligned_be32(sctx->state[i], dst++);
+
+	*sctx = (struct sha256_state){};
+	return 0;
+}
+
+static int sha256_finup(struct shash_desc *desc, const u8 *data,
+			unsigned int len, u8 *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	__be32 *dst = (__be32 *)out;
+	int i;
+
+	sha2_finup(desc, data, len);
+
+	for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++)
+		put_unaligned_be32(sctx->state[i], dst++);
+
+	*sctx = (struct sha256_state){};
+	return 0;
+}
+
+static int sha2_export(struct shash_desc *desc, void *out)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	struct sha256_state *dst = out;
+
+	*dst = *sctx;
+	return 0;
+}
+
+static int sha2_import(struct shash_desc *desc, const void *in)
+{
+	struct sha256_state *sctx = shash_desc_ctx(desc);
+	struct sha256_state const *src = in;
+
+	*sctx = *src;
+	return 0;
+}
+
+static struct shash_alg algs[] = { {
+	.init			= sha224_init,
+	.update			= sha2_update,
+	.final			= sha224_final,
+	.finup			= sha224_finup,
+	.export			= sha2_export,
+	.import			= sha2_import,
+	.descsize		= sizeof(struct sha256_state),
+	.digestsize		= SHA224_DIGEST_SIZE,
+	.statesize		= sizeof(struct sha256_state),
+	.base			= {
+		.cra_name		= "sha224",
+		.cra_driver_name	= "sha224-ce",
+		.cra_priority		= 200,
+		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize		= SHA256_BLOCK_SIZE,
+		.cra_module		= THIS_MODULE,
+	}
+}, {
+	.init			= sha256_init,
+	.update			= sha2_update,
+	.final			= sha256_final,
+	.finup			= sha256_finup,
+	.export			= sha2_export,
+	.import			= sha2_import,
+	.descsize		= sizeof(struct sha256_state),
+	.digestsize		= SHA256_DIGEST_SIZE,
+	.statesize		= sizeof(struct sha256_state),
+	.base			= {
+		.cra_name		= "sha256",
+		.cra_driver_name	= "sha256-ce",
+		.cra_priority		= 200,
+		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+		.cra_blocksize		= SHA256_BLOCK_SIZE,
+		.cra_module		= THIS_MODULE,
+	}
+} };
+
+static int __init sha2_ce_mod_init(void)
+{
+	return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha2_ce_mod_fini(void)
+{
+	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_cpu_feature_match(SHA2, sha2_ce_mod_init);
+module_exit(sha2_ce_mod_fini);
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 79a642d..fb6e0c4 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -36,6 +36,7 @@
 generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
+generic-y += simd.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index d4a6333..78e20ba 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -35,10 +35,60 @@
 #define smp_mb()	barrier()
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
+
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	___p1;								\
+})
+
 #else
+
 #define smp_mb()	asm volatile("dmb ish" : : : "memory")
 #define smp_rmb()	asm volatile("dmb ishld" : : : "memory")
 #define smp_wmb()	asm volatile("dmb ishst" : : : "memory")
+
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	switch (sizeof(*p)) {						\
+	case 4:								\
+		asm volatile ("stlr %w1, %0"				\
+				: "=Q" (*p) : "r" (v) : "memory");	\
+		break;							\
+	case 8:								\
+		asm volatile ("stlr %1, %0"				\
+				: "=Q" (*p) : "r" (v) : "memory");	\
+		break;							\
+	}								\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1;						\
+	compiletime_assert_atomic_type(*p);				\
+	switch (sizeof(*p)) {						\
+	case 4:								\
+		asm volatile ("ldar %w0, %1"				\
+			: "=r" (___p1) : "Q" (*p) : "memory");		\
+		break;							\
+	case 8:								\
+		asm volatile ("ldar %0, %1"				\
+			: "=r" (___p1) : "Q" (*p) : "memory");		\
+		break;							\
+	}								\
+	___p1;								\
+})
+
 #endif
 
 #define read_barrier_depends()		do { } while(0)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 8a8ce0e..a8b44ca 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -158,17 +158,23 @@
 	return ret;
 }
 
-#define cmpxchg(ptr,o,n)						\
-	((__typeof__(*(ptr)))__cmpxchg_mb((ptr),			\
-					  (unsigned long)(o),		\
-					  (unsigned long)(n),		\
-					  sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+({ \
+	__typeof__(*(ptr)) __ret; \
+	__ret = (__typeof__(*(ptr))) \
+		__cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
+			     sizeof(*(ptr))); \
+	__ret; \
+})
 
-#define cmpxchg_local(ptr,o,n)						\
-	((__typeof__(*(ptr)))__cmpxchg((ptr),				\
-				       (unsigned long)(o),		\
-				       (unsigned long)(n),		\
-				       sizeof(*(ptr))))
+#define cmpxchg_local(ptr, o, n) \
+({ \
+	__typeof__(*(ptr)) __ret; \
+	__ret = (__typeof__(*(ptr))) \
+		__cmpxchg((ptr), (unsigned long)(o), \
+			  (unsigned long)(n), sizeof(*(ptr))); \
+	__ret; \
+})
 
 #define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
 #define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index c30a548..a203e97 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -191,6 +191,13 @@
 			compat_long_t _band;	/* POLL_IN, POLL_OUT, POLL_MSG */
 			int _fd;
 		} _sigpoll;
+
+		/* SIGSYS */
+		struct {
+			compat_uptr_t _call_addr; /* calling user insn */
+			int _syscall;	/* triggering system call number */
+			unsigned int _arch;	/* AUDIT_ARCH_* of syscall */
+		} _sigsys;
 	} _sifields;
 } compat_siginfo_t;
 
@@ -214,7 +221,7 @@
 	return (u32)(unsigned long)uptr;
 }
 
-#define compat_user_stack_pointer() (current_pt_regs()->compat_sp)
+#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
 
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
@@ -291,11 +298,6 @@
 
 #else /* !CONFIG_COMPAT */
 
-static inline int is_compat_task(void)
-{
-	return 0;
-}
-
 static inline int is_compat_thread(struct thread_info *thread)
 {
 	return 0;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
new file mode 100644
index 0000000..cd4ac05
--- /dev/null
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <asm/hwcap.h>
+
+/*
+ * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
+ * in the kernel and for user space to keep track of which optional features
+ * are supported by the current system. So let's map feature 'x' to HWCAP_x.
+ * Note that HWCAP_x constants are bit fields so we need to take the log.
+ */
+
+#define MAX_CPU_FEATURES	(8 * sizeof(elf_hwcap))
+#define cpu_feature(x)		ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+	return elf_hwcap & (1UL << num);
+}
+
+#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index be9b5ca..76b7b3c 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -16,23 +16,13 @@
 #ifndef __ASM_CPUTYPE_H
 #define __ASM_CPUTYPE_H
 
-#define ID_MIDR_EL1		"midr_el1"
-#define ID_MPIDR_EL1		"mpidr_el1"
-#define ID_CTR_EL0		"ctr_el0"
-
-#define ID_AA64PFR0_EL1		"id_aa64pfr0_el1"
-#define ID_AA64DFR0_EL1		"id_aa64dfr0_el1"
-#define ID_AA64AFR0_EL1		"id_aa64afr0_el1"
-#define ID_AA64ISAR0_EL1	"id_aa64isar0_el1"
-#define ID_AA64MMFR0_EL1	"id_aa64mmfr0_el1"
-
 #define INVALID_HWID		ULONG_MAX
 
 #define MPIDR_HWID_BITMASK	0xff00ffffff
 
 #define read_cpuid(reg) ({						\
 	u64 __val;							\
-	asm("mrs	%0, " reg : "=r" (__val));			\
+	asm("mrs	%0, " #reg : "=r" (__val));			\
 	__val;								\
 })
 
@@ -51,12 +41,12 @@
  */
 static inline u32 __attribute_const__ read_cpuid_id(void)
 {
-	return read_cpuid(ID_MIDR_EL1);
+	return read_cpuid(MIDR_EL1);
 }
 
 static inline u64 __attribute_const__ read_cpuid_mpidr(void)
 {
-	return read_cpuid(ID_MPIDR_EL1);
+	return read_cpuid(MPIDR_EL1);
 }
 
 static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
@@ -71,7 +61,7 @@
 
 static inline u32 __attribute_const__ read_cpuid_cachetype(void)
 {
-	return read_cpuid(ID_CTR_EL0);
+	return read_cpuid(CTR_EL0);
 }
 
 void cpuinfo_store_cpu(void);
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 7eaa0b3..ef8235c 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -83,6 +83,15 @@
 }
 #endif
 
+#ifdef CONFIG_COMPAT
+int aarch32_break_handler(struct pt_regs *regs);
+#else
+static int aarch32_break_handler(struct pt_regs *regs)
+{
+	return -EFAULT;
+}
+#endif
+
 #endif	/* __ASSEMBLY */
 #endif	/* __KERNEL__ */
 #endif	/* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index e647e6d..a4e1758 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -33,8 +33,6 @@
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 typedef struct user_fpsimd_state elf_fpregset_t;
 
-#define EM_AARCH64		183
-
 /*
  * AArch64 static relocation types.
  */
@@ -152,7 +150,6 @@
 #define arch_randomize_brk arch_randomize_brk
 
 #ifdef CONFIG_COMPAT
-#define EM_ARM				40
 #define COMPAT_ELF_PLATFORM		("v8l")
 
 #define COMPAT_ELF_ET_DYN_BASE		(randomize_et_dyn(2 * TASK_SIZE_32 / 3))
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index c43b4ac..50f559f 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -37,8 +37,21 @@
 			u32 fpcr;
 		};
 	};
+	/* the id of the last cpu to have restored this state */
+	unsigned int cpu;
 };
 
+/*
+ * Struct for stacking the bottom 'n' FP/SIMD registers.
+ */
+struct fpsimd_partial_state {
+	u32		fpsr;
+	u32		fpcr;
+	u32		num_regs;
+	__uint128_t	vregs[32];
+};
+
+
 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /* Masks for extracting the FPSR and FPCR from the FPSCR */
 #define VFP_FPSCR_STAT_MASK	0xf800009f
@@ -58,6 +71,16 @@
 extern void fpsimd_thread_switch(struct task_struct *next);
 extern void fpsimd_flush_thread(void);
 
+extern void fpsimd_preserve_current_state(void);
+extern void fpsimd_restore_current_state(void);
+extern void fpsimd_update_current_state(struct fpsimd_state *state);
+
+extern void fpsimd_flush_task_state(struct task_struct *target);
+
+extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state,
+				      u32 num_regs);
+extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state);
+
 #endif
 
 #endif
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index bbec599..768414d 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -62,3 +62,38 @@
 	ldr	w\tmpnr, [\state, #16 * 2 + 4]
 	msr	fpcr, x\tmpnr
 .endm
+
+.altmacro
+.macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2
+	mrs	x\tmpnr1, fpsr
+	str	w\numnr, [\state, #8]
+	mrs	x\tmpnr2, fpcr
+	stp	w\tmpnr1, w\tmpnr2, [\state]
+	adr	x\tmpnr1, 0f
+	add	\state, \state, x\numnr, lsl #4
+	sub	x\tmpnr1, x\tmpnr1, x\numnr, lsl #1
+	br	x\tmpnr1
+	.irp	qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
+	.irp	qb, %(qa + 1)
+	stp	q\qa, q\qb, [\state, # -16 * \qa - 16]
+	.endr
+	.endr
+0:
+.endm
+
+.macro fpsimd_restore_partial state, tmpnr1, tmpnr2
+	ldp	w\tmpnr1, w\tmpnr2, [\state]
+	msr	fpsr, x\tmpnr1
+	msr	fpcr, x\tmpnr2
+	adr	x\tmpnr1, 0f
+	ldr	w\tmpnr2, [\state, #8]
+	add	\state, \state, x\tmpnr2, lsl #4
+	sub	x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1
+	br	x\tmpnr1
+	.irp	qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
+	.irp	qb, %(qa + 1)
+	ldp	q\qa, q\qb, [\state, # -16 * \qa - 16]
+	.endr
+	.endr
+0:
+.endm
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 6d4482f..024c461 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,6 +30,13 @@
 #define COMPAT_HWCAP_IDIVA	(1 << 17)
 #define COMPAT_HWCAP_IDIVT	(1 << 18)
 #define COMPAT_HWCAP_IDIV	(COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_EVTSTRM	(1 << 21)
+
+#define COMPAT_HWCAP2_AES	(1 << 0)
+#define COMPAT_HWCAP2_PMULL	(1 << 1)
+#define COMPAT_HWCAP2_SHA1	(1 << 2)
+#define COMPAT_HWCAP2_SHA2	(1 << 3)
+#define COMPAT_HWCAP2_CRC32	(1 << 4)
 
 #ifndef __ASSEMBLY__
 /*
@@ -37,12 +44,13 @@
  * instruction set this cpu supports.
  */
 #define ELF_HWCAP		(elf_hwcap)
-#define COMPAT_ELF_HWCAP	(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
-				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
-				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
-				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
-				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
 
-extern unsigned int elf_hwcap;
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP	(compat_elf_hwcap)
+#define COMPAT_ELF_HWCAP2	(compat_elf_hwcap2)
+extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
+#endif
+
+extern unsigned long elf_hwcap;
 #endif
 #endif
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
new file mode 100644
index 0000000..13ce4cc
--- /dev/null
+++ b/arch/arm64/include/asm/neon.h
@@ -0,0 +1,18 @@
+/*
+ * linux/arch/arm64/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+
+#define cpu_has_neon()		(1)
+
+#define kernel_neon_begin()	kernel_neon_begin_partial(32)
+
+void kernel_neon_begin_partial(u32 num_regs);
+void kernel_neon_end(void);
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
new file mode 100644
index 0000000..fd189a5
--- /dev/null
+++ b/arch/arm64/include/asm/opcodes.h
@@ -0,0 +1,231 @@
+/*
+ *  Copied from arch/arm/include/asm/opcodes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_OPCODES_H
+#define __ASM_ARM_OPCODES_H
+
+#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
+extern asmlinkage unsigned int arm_check_condition(u32 opcode, u64 psr);
+#endif
+
+#define ARM_OPCODE_CONDTEST_FAIL   0
+#define ARM_OPCODE_CONDTEST_PASS   1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
+
+/*
+ * Assembler opcode byteswap helpers.
+ * These are only intended for use by this header: don't use them directly,
+ * because they will be suboptimal in most cases.
+ */
+#define ___asm_opcode_swab32(x) (	\
+	  (((x) << 24) & 0xFF000000)	\
+	| (((x) <<  8) & 0x00FF0000)	\
+	| (((x) >>  8) & 0x0000FF00)	\
+	| (((x) >> 24) & 0x000000FF)	\
+)
+#define ___asm_opcode_swab16(x) (	\
+	  (((x) << 8) & 0xFF00)		\
+	| (((x) >> 8) & 0x00FF)		\
+)
+#define ___asm_opcode_swahb32(x) (	\
+	  (((x) << 8) & 0xFF00FF00)	\
+	| (((x) >> 8) & 0x00FF00FF)	\
+)
+#define ___asm_opcode_swahw32(x) (	\
+	  (((x) << 16) & 0xFFFF0000)	\
+	| (((x) >> 16) & 0x0000FFFF)	\
+)
+#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF)
+#define ___asm_opcode_identity16(x) ((x) & 0xFFFF)
+
+
+/*
+ * Opcode byteswap helpers
+ *
+ * These macros help with converting instructions between a canonical integer
+ * format and in-memory representation, in an endianness-agnostic manner.
+ *
+ * __mem_to_opcode_*() convert from in-memory representation to canonical form.
+ * __opcode_to_mem_*() convert from canonical form to in-memory representation.
+ *
+ *
+ * Canonical instruction representation:
+ *
+ *	ARM:		0xKKLLMMNN
+ *	Thumb 16-bit:	0x0000KKLL, where KK < 0xE8
+ *	Thumb 32-bit:	0xKKLLMMNN, where KK >= 0xE8
+ *
+ * There is no way to distinguish an ARM instruction in canonical representation
+ * from a Thumb instruction (just as these cannot be distinguished in memory).
+ * Where this distinction is important, it needs to be tracked separately.
+ *
+ * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not
+ * represent any valid Thumb-2 instruction.  For this range,
+ * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false.
+ *
+ * The ___asm variants are intended only for use by this header, in situations
+ * involving inline assembler.  For .S files, the normal __opcode_*() macros
+ * should do the right thing.
+ */
+#ifdef __ASSEMBLY__
+
+#define ___opcode_swab32(x) ___asm_opcode_swab32(x)
+#define ___opcode_swab16(x) ___asm_opcode_swab16(x)
+#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x)
+#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x)
+#define ___opcode_identity32(x) ___asm_opcode_identity32(x)
+#define ___opcode_identity16(x) ___asm_opcode_identity16(x)
+
+#else /* ! __ASSEMBLY__ */
+
+#include <linux/types.h>
+#include <linux/swab.h>
+
+#define ___opcode_swab32(x) swab32(x)
+#define ___opcode_swab16(x) swab16(x)
+#define ___opcode_swahb32(x) swahb32(x)
+#define ___opcode_swahw32(x) swahw32(x)
+#define ___opcode_identity32(x) ((u32)(x))
+#define ___opcode_identity16(x) ((u16)(x))
+
+#endif /* ! __ASSEMBLY__ */
+
+
+#ifdef CONFIG_CPU_ENDIAN_BE8
+
+#define __opcode_to_mem_arm(x) ___opcode_swab32(x)
+#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x)
+#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x)
+#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x)
+#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x)
+#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x)
+
+#else /* ! CONFIG_CPU_ENDIAN_BE8 */
+
+#define __opcode_to_mem_arm(x) ___opcode_identity32(x)
+#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x)
+#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x)
+#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x)
+#ifndef CONFIG_CPU_ENDIAN_BE32
+/*
+ * On BE32 systems, using 32-bit accesses to store Thumb instructions will not
+ * work in all cases, due to alignment constraints.  For now, a correct
+ * version is not provided for BE32.
+ */
+#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x)
+#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x)
+#endif
+
+#endif /* ! CONFIG_CPU_ENDIAN_BE8 */
+
+#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x)
+#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x)
+#ifndef CONFIG_CPU_ENDIAN_BE32
+#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x)
+#endif
+
+/* Operations specific to Thumb opcodes */
+
+/* Instruction size checks: */
+#define __opcode_is_thumb32(x) (		\
+	   ((x) & 0xF8000000) == 0xE8000000	\
+	|| ((x) & 0xF0000000) == 0xF0000000	\
+)
+#define __opcode_is_thumb16(x) (					\
+	   ((x) & 0xFFFF0000) == 0					\
+	&& !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000)	\
+)
+
+/* Operations to construct or split 32-bit Thumb instructions: */
+#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16))
+#define __opcode_thumb32_second(x) (___opcode_identity16(x))
+#define __opcode_thumb32_compose(first, second) (			\
+	  (___opcode_identity32(___opcode_identity16(first)) << 16)	\
+	| ___opcode_identity32(___opcode_identity16(second))		\
+)
+#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16))
+#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x))
+#define ___asm_opcode_thumb32_compose(first, second) (			    \
+	  (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \
+	| ___asm_opcode_identity32(___asm_opcode_identity16(second))	    \
+)
+
+/*
+ * Opcode injection helpers
+ *
+ * In rare cases it is necessary to assemble an opcode which the
+ * assembler does not support directly, or which would normally be
+ * rejected because of the CFLAGS or AFLAGS used to build the affected
+ * file.
+ *
+ * Before using these macros, consider carefully whether it is feasible
+ * instead to change the build flags for your file, or whether it really
+ * makes sense to support old assembler versions when building that
+ * particular kernel feature.
+ *
+ * The macros defined here should only be used where there is no viable
+ * alternative.
+ *
+ *
+ * __inst_arm(x): emit the specified ARM opcode
+ * __inst_thumb16(x): emit the specified 16-bit Thumb opcode
+ * __inst_thumb32(x): emit the specified 32-bit Thumb opcode
+ *
+ * __inst_arm_thumb16(arm, thumb): emit either the specified arm or
+ *	16-bit Thumb opcode, depending on whether an ARM or Thumb-2
+ *	kernel is being built
+ *
+ * __inst_arm_thumb32(arm, thumb): emit either the specified arm or
+ *	32-bit Thumb opcode, depending on whether an ARM or Thumb-2
+ *	kernel is being built
+ *
+ *
+ * Note that using these macros directly is poor practice.  Instead, you
+ * should use them to define human-readable wrapper macros to encode the
+ * instructions that you care about.  In code which might run on ARMv7 or
+ * above, you can usually use the __inst_arm_thumb{16,32} macros to
+ * specify the ARM and Thumb alternatives at the same time.  This ensures
+ * that the correct opcode gets emitted depending on the instruction set
+ * used for the kernel build.
+ *
+ * Look at opcodes-virt.h for an example of how to use these macros.
+ */
+#include <linux/stringify.h>
+
+#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x))
+#define __inst_thumb32(x) ___inst_thumb32(				\
+	___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)),	\
+	___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x))	\
+)
+#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x))
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \
+	__inst_thumb16(thumb_opcode)
+#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \
+	__inst_thumb32(thumb_opcode)
+#else
+#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
+#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode)
+#endif
+
+/* Helpers for the helpers.  Don't use these directly. */
+#ifdef __ASSEMBLY__
+#define ___inst_arm(x) .long x
+#define ___inst_thumb16(x) .short x
+#define ___inst_thumb32(first, second) .short first, second
+#else
+#define ___inst_arm(x) ".long " __stringify(x) "\n\t"
+#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t"
+#define ___inst_thumb32(first, second) \
+	".short " __stringify(first) ", " __stringify(second) "\n\t"
+#endif
+
+#endif /* __ASM_ARM_OPCODES_H */
diff --git a/arch/arm64/include/asm/pgtable-3level-types.h b/arch/arm64/include/asm/pgtable-3level-types.h
index 4489615..4e94424 100644
--- a/arch/arm64/include/asm/pgtable-3level-types.h
+++ b/arch/arm64/include/asm/pgtable-3level-types.h
@@ -16,6 +16,8 @@
 #ifndef __ASM_PGTABLE_3LEVEL_TYPES_H
 #define __ASM_PGTABLE_3LEVEL_TYPES_H
 
+#include <asm/types.h>
+
 typedef u64 pteval_t;
 typedef u64 pmdval_t;
 typedef u64 pgdval_t;
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 75fd13d..7eeed1a 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -92,5 +92,6 @@
 #define TCR_TG1_64K		(UL(1) << 30)
 #define TCR_IPS_40BIT		(UL(2) << 32)
 #define TCR_ASID16		(UL(1) << 36)
+#define TCR_TBI0		(UL(1) << 37)
 
 #endif
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index ab239b2..3b7bb03 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -131,8 +131,8 @@
 #define task_pt_regs(p) \
 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 
-#define KSTK_EIP(tsk)	task_pt_regs(tsk)->pc
-#define KSTK_ESP(tsk)	task_pt_regs(tsk)->sp
+#define KSTK_EIP(tsk)	((unsigned long)task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)	user_stack_pointer(task_pt_regs(tsk))
 
 /*
  * Prefetching support
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 7257c36..33d3001 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -60,6 +60,15 @@
 #define COMPAT_PT_TEXT_ADDR		0x10000
 #define COMPAT_PT_DATA_ADDR		0x10004
 #define COMPAT_PT_TEXT_END_ADDR		0x10008
+
+/*
+ * used to skip a system call when tracer changes its number to -1
+ * with ptrace(PTRACE_SET_SYSCALL)
+ */
+#define RET_SKIP_SYSCALL	-1
+#define RET_SKIP_SYSCALL_TRACE	-2
+#define IS_SKIP_SYSCALL(no)	((int)(no & 0xffffffff) == -1)
+
 #ifndef __ASSEMBLY__
 
 /* sizeof(struct user) for AArch32 */
@@ -131,7 +140,12 @@
 	(!((regs)->pstate & PSR_F_BIT))
 
 #define user_stack_pointer(regs) \
-	((regs)->sp)
+	(!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+	return regs->regs[0];
+}
 
 /*
  * Are the current registers suitable for user mode? (used to maintain
@@ -171,7 +185,13 @@
 #define profile_pc(regs) instruction_pointer(regs)
 #endif
 
-extern int aarch32_break_trap(struct pt_regs *regs);
+/*
+ * True if instr is a 32-bit thumb instruction. This works if instr
+ * is the first or only half-word of a thumb instruction. It also works
+ * when instr holds all 32-bits of a wide thumb instruction if stored
+ * in the form (first_half<<16)|(second_half)
+ */
+#define is_wide_instruction(instr)	((unsigned)(instr) >= 0xe800)
 
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h
new file mode 100644
index 0000000..bec3a43
--- /dev/null
+++ b/arch/arm64/include/asm/seccomp.h
@@ -0,0 +1,25 @@
+/*
+ * arch/arm64/include/asm/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi <at> linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#ifdef CONFIG_COMPAT
+#define __NR_seccomp_read_32		__NR_compat_read
+#define __NR_seccomp_write_32		__NR_compat_write
+#define __NR_seccomp_exit_32		__NR_compat_exit
+#define __NR_seccomp_sigreturn_32	__NR_compat_rt_sigreturn
+#endif /* CONFIG_COMPAT */
+
+#include <asm-generic/seccomp.h>
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 70ba9d4..35cfb7e 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -16,6 +16,8 @@
 #ifndef __ASM_SYSCALL_H
 #define __ASM_SYSCALL_H
 
+#include <uapi/linux/audit.h>
+#include <linux/compat.h>
 #include <linux/err.h>
 
 
@@ -104,4 +106,16 @@
 	memcpy(&regs->regs[i], args, n * sizeof(args[0]));
 }
 
+/*
+ * We don't care about endianness (__AUDIT_ARCH_LE bit) here because
+ * AArch64 has the same system calls both on little- and big- endian.
+ */
+static inline int syscall_get_arch(void)
+{
+	if (is_compat_task())
+		return AUDIT_ARCH_ARM;
+
+	return AUDIT_ARCH_AARCH64;
+}
+
 #endif	/* __ASM_SYSCALL_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 23a3c47..c09cbf6 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -97,6 +97,9 @@
 /*
  * thread information flags:
  *  TIF_SYSCALL_TRACE	- syscall trace active
+ *  TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
+ *  TIF_SYSCALL_AUDIT	- syscall auditing
+ *  TIF_SECOMP		- syscall secure computing
  *  TIF_SIGPENDING	- signal pending
  *  TIF_NEED_RESCHED	- rescheduling necessary
  *  TIF_NOTIFY_RESUME	- callback before returning to user
@@ -106,7 +109,11 @@
 #define TIF_SIGPENDING		0
 #define TIF_NEED_RESCHED	1
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
+#define TIF_FOREIGN_FPSTATE	3	/* CPU's FP state is not current's */
 #define TIF_SYSCALL_TRACE	8
+#define TIF_SYSCALL_AUDIT	9
+#define TIF_SYSCALL_TRACEPOINT	10
+#define TIF_SECCOMP		11
 #define TIF_POLLING_NRFLAG	16
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_FREEZE		19
@@ -118,10 +125,18 @@
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
+#define _TIF_FOREIGN_FPSTATE	(1 << TIF_FOREIGN_FPSTATE)
+#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 #define _TIF_32BIT		(1 << TIF_32BIT)
 
 #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-				 _TIF_NOTIFY_RESUME)
+				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
+
+#define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 10ca8ff..75f51ea 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -18,6 +18,19 @@
 #ifndef __ASM_TRAP_H
 #define __ASM_TRAP_H
 
+#include <linux/list.h>
+
+struct undef_hook {
+	struct list_head node;
+	u32 instr_mask;
+	u32 instr_val;
+	u32 pstate_mask;
+	u32 pstate_val;
+	int (*fn)(struct pt_regs *regs, unsigned int instr);
+};
+
+void register_undef_hook(struct undef_hook *hook);
+
 static inline int in_exception_text(unsigned long ptr)
 {
 	extern char __exception_text_start[];
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 82ce217..2a3957f 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -25,6 +25,26 @@
 #define __ARCH_WANT_COMPAT_SYS_SENDFILE
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_VFORK
+
+/*
+ * Compat syscall numbers used by the AArch64 kernel.
+ */
+#define __NR_compat_restart_syscall	0
+#define __NR_compat_exit		1
+#define __NR_compat_read		3
+#define __NR_compat_write		4
+#define __NR_compat_sigreturn		119
+#define __NR_compat_rt_sigreturn	173
+
+/*
+ * The following SVCs are ARM private.
+ */
+#define __ARM_NR_COMPAT_BASE		0x0f0000
+#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
+#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
+
+#define __NR_compat_syscalls		384
 #endif
+
 #define __ARCH_WANT_SYS_CLONE
 #include <uapi/asm/unistd.h>
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 58125bf..76d0945 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -21,399 +21,771 @@
 #define __SYSCALL(x, y)
 #endif
 
-__SYSCALL(0,   sys_restart_syscall)
-__SYSCALL(1,   sys_exit)
-__SYSCALL(2,   sys_fork)
-__SYSCALL(3,   sys_read)
-__SYSCALL(4,   sys_write)
-__SYSCALL(5,   compat_sys_open)
-__SYSCALL(6,   sys_close)
-__SYSCALL(7,   sys_ni_syscall)			/* 7 was sys_waitpid */
-__SYSCALL(8,   sys_creat)
-__SYSCALL(9,   sys_link)
-__SYSCALL(10,  sys_unlink)
-__SYSCALL(11,  compat_sys_execve)
-__SYSCALL(12,  sys_chdir)
-__SYSCALL(13,  sys_ni_syscall)			/* 13 was sys_time */
-__SYSCALL(14,  sys_mknod)
-__SYSCALL(15,  sys_chmod)
-__SYSCALL(16,  sys_lchown16)
-__SYSCALL(17,  sys_ni_syscall)			/* 17 was sys_break */
-__SYSCALL(18,  sys_ni_syscall)			/* 18 was sys_stat */
-__SYSCALL(19,  compat_sys_lseek)
-__SYSCALL(20,  sys_getpid)
-__SYSCALL(21,  compat_sys_mount)
-__SYSCALL(22,  sys_ni_syscall)			/* 22 was sys_umount */
-__SYSCALL(23,  sys_setuid16)
-__SYSCALL(24,  sys_getuid16)
-__SYSCALL(25,  sys_ni_syscall)			/* 25 was sys_stime */
-__SYSCALL(26,  compat_sys_ptrace)
-__SYSCALL(27,  sys_ni_syscall)			/* 27 was sys_alarm */
-__SYSCALL(28,  sys_ni_syscall)			/* 28 was sys_fstat */
-__SYSCALL(29,  sys_pause)
-__SYSCALL(30,  sys_ni_syscall)			/* 30 was sys_utime */
-__SYSCALL(31,  sys_ni_syscall)			/* 31 was sys_stty */
-__SYSCALL(32,  sys_ni_syscall)			/* 32 was sys_gtty */
-__SYSCALL(33,  sys_access)
-__SYSCALL(34,  sys_nice)
-__SYSCALL(35,  sys_ni_syscall)			/* 35 was sys_ftime */
-__SYSCALL(36,  sys_sync)
-__SYSCALL(37,  sys_kill)
-__SYSCALL(38,  sys_rename)
-__SYSCALL(39,  sys_mkdir)
-__SYSCALL(40,  sys_rmdir)
-__SYSCALL(41,  sys_dup)
-__SYSCALL(42,  sys_pipe)
-__SYSCALL(43,  compat_sys_times)
-__SYSCALL(44,  sys_ni_syscall)			/* 44 was sys_prof */
-__SYSCALL(45,  sys_brk)
-__SYSCALL(46,  sys_setgid16)
-__SYSCALL(47,  sys_getgid16)
-__SYSCALL(48,  sys_ni_syscall)			/* 48 was sys_signal */
-__SYSCALL(49,  sys_geteuid16)
-__SYSCALL(50,  sys_getegid16)
-__SYSCALL(51,  sys_acct)
-__SYSCALL(52,  sys_umount)
-__SYSCALL(53,  sys_ni_syscall)			/* 53 was sys_lock */
-__SYSCALL(54,  compat_sys_ioctl)
-__SYSCALL(55,  compat_sys_fcntl)
-__SYSCALL(56,  sys_ni_syscall)			/* 56 was sys_mpx */
-__SYSCALL(57,  sys_setpgid)
-__SYSCALL(58,  sys_ni_syscall)			/* 58 was sys_ulimit */
-__SYSCALL(59,  sys_ni_syscall)			/* 59 was sys_olduname */
-__SYSCALL(60,  sys_umask)
-__SYSCALL(61,  sys_chroot)
-__SYSCALL(62,  compat_sys_ustat)
-__SYSCALL(63,  sys_dup2)
-__SYSCALL(64,  sys_getppid)
-__SYSCALL(65,  sys_getpgrp)
-__SYSCALL(66,  sys_setsid)
-__SYSCALL(67,  compat_sys_sigaction)
-__SYSCALL(68,  sys_ni_syscall)			/* 68 was sys_sgetmask */
-__SYSCALL(69,  sys_ni_syscall)			/* 69 was sys_ssetmask */
-__SYSCALL(70,  sys_setreuid16)
-__SYSCALL(71,  sys_setregid16)
-__SYSCALL(72,  sys_sigsuspend)
-__SYSCALL(73,  compat_sys_sigpending)
-__SYSCALL(74,  sys_sethostname)
-__SYSCALL(75,  compat_sys_setrlimit)
-__SYSCALL(76,  sys_ni_syscall)			/* 76 was compat_sys_getrlimit */
-__SYSCALL(77,  compat_sys_getrusage)
-__SYSCALL(78,  compat_sys_gettimeofday)
-__SYSCALL(79,  compat_sys_settimeofday)
-__SYSCALL(80,  sys_getgroups16)
-__SYSCALL(81,  sys_setgroups16)
-__SYSCALL(82,  sys_ni_syscall)			/* 82 was compat_sys_select */
-__SYSCALL(83,  sys_symlink)
-__SYSCALL(84,  sys_ni_syscall)			/* 84 was sys_lstat */
-__SYSCALL(85,  sys_readlink)
-__SYSCALL(86,  sys_uselib)
-__SYSCALL(87,  sys_swapon)
-__SYSCALL(88,  sys_reboot)
-__SYSCALL(89,  sys_ni_syscall)			/* 89 was sys_readdir */
-__SYSCALL(90,  sys_ni_syscall)			/* 90 was sys_mmap */
-__SYSCALL(91,  sys_munmap)
-__SYSCALL(92,  compat_sys_truncate)
-__SYSCALL(93,  compat_sys_ftruncate)
-__SYSCALL(94,  sys_fchmod)
-__SYSCALL(95,  sys_fchown16)
-__SYSCALL(96,  sys_getpriority)
-__SYSCALL(97,  sys_setpriority)
-__SYSCALL(98,  sys_ni_syscall)			/* 98 was sys_profil */
-__SYSCALL(99,  compat_sys_statfs)
-__SYSCALL(100, compat_sys_fstatfs)
-__SYSCALL(101, sys_ni_syscall)			/* 101 was sys_ioperm */
-__SYSCALL(102, sys_ni_syscall)			/* 102 was sys_socketcall */
-__SYSCALL(103, sys_syslog)
-__SYSCALL(104, compat_sys_setitimer)
-__SYSCALL(105, compat_sys_getitimer)
-__SYSCALL(106, compat_sys_newstat)
-__SYSCALL(107, compat_sys_newlstat)
-__SYSCALL(108, compat_sys_newfstat)
-__SYSCALL(109, sys_ni_syscall)			/* 109 was sys_uname */
-__SYSCALL(110, sys_ni_syscall)			/* 110 was sys_iopl */
-__SYSCALL(111, sys_vhangup)
-__SYSCALL(112, sys_ni_syscall)			/* 112 was sys_idle */
-__SYSCALL(113, sys_ni_syscall)			/* 113 was sys_syscall */
-__SYSCALL(114, compat_sys_wait4)
-__SYSCALL(115, sys_swapoff)
-__SYSCALL(116, compat_sys_sysinfo)
-__SYSCALL(117, sys_ni_syscall)			/* 117 was sys_ipc */
-__SYSCALL(118, sys_fsync)
-__SYSCALL(119, compat_sys_sigreturn_wrapper)
-__SYSCALL(120, sys_clone)
-__SYSCALL(121, sys_setdomainname)
-__SYSCALL(122, sys_newuname)
-__SYSCALL(123, sys_ni_syscall)			/* 123 was sys_modify_ldt */
-__SYSCALL(124, compat_sys_adjtimex)
-__SYSCALL(125, sys_mprotect)
-__SYSCALL(126, compat_sys_sigprocmask)
-__SYSCALL(127, sys_ni_syscall)			/* 127 was sys_create_module */
-__SYSCALL(128, sys_init_module)
-__SYSCALL(129, sys_delete_module)
-__SYSCALL(130, sys_ni_syscall)			/* 130 was sys_get_kernel_syms */
-__SYSCALL(131, sys_quotactl)
-__SYSCALL(132, sys_getpgid)
-__SYSCALL(133, sys_fchdir)
-__SYSCALL(134, sys_bdflush)
-__SYSCALL(135, sys_sysfs)
-__SYSCALL(136, sys_personality)
-__SYSCALL(137, sys_ni_syscall)			/* 137 was sys_afs_syscall */
-__SYSCALL(138, sys_setfsuid16)
-__SYSCALL(139, sys_setfsgid16)
-__SYSCALL(140, sys_llseek)
-__SYSCALL(141, compat_sys_getdents)
-__SYSCALL(142, compat_sys_select)
-__SYSCALL(143, sys_flock)
-__SYSCALL(144, sys_msync)
-__SYSCALL(145, compat_sys_readv)
-__SYSCALL(146, compat_sys_writev)
-__SYSCALL(147, sys_getsid)
-__SYSCALL(148, sys_fdatasync)
-__SYSCALL(149, compat_sys_sysctl)
-__SYSCALL(150, sys_mlock)
-__SYSCALL(151, sys_munlock)
-__SYSCALL(152, sys_mlockall)
-__SYSCALL(153, sys_munlockall)
-__SYSCALL(154, sys_sched_setparam)
-__SYSCALL(155, sys_sched_getparam)
-__SYSCALL(156, sys_sched_setscheduler)
-__SYSCALL(157, sys_sched_getscheduler)
-__SYSCALL(158, sys_sched_yield)
-__SYSCALL(159, sys_sched_get_priority_max)
-__SYSCALL(160, sys_sched_get_priority_min)
-__SYSCALL(161, compat_sys_sched_rr_get_interval)
-__SYSCALL(162, compat_sys_nanosleep)
-__SYSCALL(163, sys_mremap)
-__SYSCALL(164, sys_setresuid16)
-__SYSCALL(165, sys_getresuid16)
-__SYSCALL(166, sys_ni_syscall)			/* 166 was sys_vm86 */
-__SYSCALL(167, sys_ni_syscall)			/* 167 was sys_query_module */
-__SYSCALL(168, sys_poll)
-__SYSCALL(169, sys_ni_syscall)
-__SYSCALL(170, sys_setresgid16)
-__SYSCALL(171, sys_getresgid16)
-__SYSCALL(172, sys_prctl)
-__SYSCALL(173, compat_sys_rt_sigreturn_wrapper)
-__SYSCALL(174, compat_sys_rt_sigaction)
-__SYSCALL(175, compat_sys_rt_sigprocmask)
-__SYSCALL(176, compat_sys_rt_sigpending)
-__SYSCALL(177, compat_sys_rt_sigtimedwait)
-__SYSCALL(178, compat_sys_rt_sigqueueinfo)
-__SYSCALL(179, compat_sys_rt_sigsuspend)
-__SYSCALL(180, compat_sys_pread64_wrapper)
-__SYSCALL(181, compat_sys_pwrite64_wrapper)
-__SYSCALL(182, sys_chown16)
-__SYSCALL(183, sys_getcwd)
-__SYSCALL(184, sys_capget)
-__SYSCALL(185, sys_capset)
-__SYSCALL(186, compat_sys_sigaltstack)
-__SYSCALL(187, compat_sys_sendfile)
-__SYSCALL(188, sys_ni_syscall)			/* 188 reserved */
-__SYSCALL(189, sys_ni_syscall)			/* 189 reserved */
-__SYSCALL(190, sys_vfork)
-__SYSCALL(191, compat_sys_getrlimit)		/* SuS compliant getrlimit */
-__SYSCALL(192, sys_mmap_pgoff)
-__SYSCALL(193, compat_sys_truncate64_wrapper)
-__SYSCALL(194, compat_sys_ftruncate64_wrapper)
-__SYSCALL(195, sys_stat64)
-__SYSCALL(196, sys_lstat64)
-__SYSCALL(197, sys_fstat64)
-__SYSCALL(198, sys_lchown)
-__SYSCALL(199, sys_getuid)
-__SYSCALL(200, sys_getgid)
-__SYSCALL(201, sys_geteuid)
-__SYSCALL(202, sys_getegid)
-__SYSCALL(203, sys_setreuid)
-__SYSCALL(204, sys_setregid)
-__SYSCALL(205, sys_getgroups)
-__SYSCALL(206, sys_setgroups)
-__SYSCALL(207, sys_fchown)
-__SYSCALL(208, sys_setresuid)
-__SYSCALL(209, sys_getresuid)
-__SYSCALL(210, sys_setresgid)
-__SYSCALL(211, sys_getresgid)
-__SYSCALL(212, sys_chown)
-__SYSCALL(213, sys_setuid)
-__SYSCALL(214, sys_setgid)
-__SYSCALL(215, sys_setfsuid)
-__SYSCALL(216, sys_setfsgid)
-__SYSCALL(217, compat_sys_getdents64)
-__SYSCALL(218, sys_pivot_root)
-__SYSCALL(219, sys_mincore)
-__SYSCALL(220, sys_madvise)
-__SYSCALL(221, compat_sys_fcntl64)
-__SYSCALL(222, sys_ni_syscall)			/* 222 for tux */
-__SYSCALL(223, sys_ni_syscall)			/* 223 is unused */
-__SYSCALL(224, sys_gettid)
-__SYSCALL(225, compat_sys_readahead_wrapper)
-__SYSCALL(226, sys_setxattr)
-__SYSCALL(227, sys_lsetxattr)
-__SYSCALL(228, sys_fsetxattr)
-__SYSCALL(229, sys_getxattr)
-__SYSCALL(230, sys_lgetxattr)
-__SYSCALL(231, sys_fgetxattr)
-__SYSCALL(232, sys_listxattr)
-__SYSCALL(233, sys_llistxattr)
-__SYSCALL(234, sys_flistxattr)
-__SYSCALL(235, sys_removexattr)
-__SYSCALL(236, sys_lremovexattr)
-__SYSCALL(237, sys_fremovexattr)
-__SYSCALL(238, sys_tkill)
-__SYSCALL(239, sys_sendfile64)
-__SYSCALL(240, compat_sys_futex)
-__SYSCALL(241, compat_sys_sched_setaffinity)
-__SYSCALL(242, compat_sys_sched_getaffinity)
-__SYSCALL(243, compat_sys_io_setup)
-__SYSCALL(244, sys_io_destroy)
-__SYSCALL(245, compat_sys_io_getevents)
-__SYSCALL(246, compat_sys_io_submit)
-__SYSCALL(247, sys_io_cancel)
-__SYSCALL(248, sys_exit_group)
-__SYSCALL(249, compat_sys_lookup_dcookie)
-__SYSCALL(250, sys_epoll_create)
-__SYSCALL(251, sys_epoll_ctl)
-__SYSCALL(252, sys_epoll_wait)
-__SYSCALL(253, sys_remap_file_pages)
-__SYSCALL(254, sys_ni_syscall)			/* 254 for set_thread_area */
-__SYSCALL(255, sys_ni_syscall)			/* 255 for get_thread_area */
-__SYSCALL(256, sys_set_tid_address)
-__SYSCALL(257, compat_sys_timer_create)
-__SYSCALL(258, compat_sys_timer_settime)
-__SYSCALL(259, compat_sys_timer_gettime)
-__SYSCALL(260, sys_timer_getoverrun)
-__SYSCALL(261, sys_timer_delete)
-__SYSCALL(262, compat_sys_clock_settime)
-__SYSCALL(263, compat_sys_clock_gettime)
-__SYSCALL(264, compat_sys_clock_getres)
-__SYSCALL(265, compat_sys_clock_nanosleep)
-__SYSCALL(266, compat_sys_statfs64_wrapper)
-__SYSCALL(267, compat_sys_fstatfs64_wrapper)
-__SYSCALL(268, sys_tgkill)
-__SYSCALL(269, compat_sys_utimes)
-__SYSCALL(270, compat_sys_fadvise64_64_wrapper)
-__SYSCALL(271, sys_pciconfig_iobase)
-__SYSCALL(272, sys_pciconfig_read)
-__SYSCALL(273, sys_pciconfig_write)
-__SYSCALL(274, compat_sys_mq_open)
-__SYSCALL(275, sys_mq_unlink)
-__SYSCALL(276, compat_sys_mq_timedsend)
-__SYSCALL(277, compat_sys_mq_timedreceive)
-__SYSCALL(278, compat_sys_mq_notify)
-__SYSCALL(279, compat_sys_mq_getsetattr)
-__SYSCALL(280, compat_sys_waitid)
-__SYSCALL(281, sys_socket)
-__SYSCALL(282, sys_bind)
-__SYSCALL(283, sys_connect)
-__SYSCALL(284, sys_listen)
-__SYSCALL(285, sys_accept)
-__SYSCALL(286, sys_getsockname)
-__SYSCALL(287, sys_getpeername)
-__SYSCALL(288, sys_socketpair)
-__SYSCALL(289, sys_send)
-__SYSCALL(290, sys_sendto)
-__SYSCALL(291, compat_sys_recv)
-__SYSCALL(292, compat_sys_recvfrom)
-__SYSCALL(293, sys_shutdown)
-__SYSCALL(294, compat_sys_setsockopt)
-__SYSCALL(295, compat_sys_getsockopt)
-__SYSCALL(296, compat_sys_sendmsg)
-__SYSCALL(297, compat_sys_recvmsg)
-__SYSCALL(298, sys_semop)
-__SYSCALL(299, sys_semget)
-__SYSCALL(300, compat_sys_semctl)
-__SYSCALL(301, compat_sys_msgsnd)
-__SYSCALL(302, compat_sys_msgrcv)
-__SYSCALL(303, sys_msgget)
-__SYSCALL(304, compat_sys_msgctl)
-__SYSCALL(305, compat_sys_shmat)
-__SYSCALL(306, sys_shmdt)
-__SYSCALL(307, sys_shmget)
-__SYSCALL(308, compat_sys_shmctl)
-__SYSCALL(309, sys_add_key)
-__SYSCALL(310, sys_request_key)
-__SYSCALL(311, compat_sys_keyctl)
-__SYSCALL(312, compat_sys_semtimedop)
-__SYSCALL(313, sys_ni_syscall)
-__SYSCALL(314, sys_ioprio_set)
-__SYSCALL(315, sys_ioprio_get)
-__SYSCALL(316, sys_inotify_init)
-__SYSCALL(317, sys_inotify_add_watch)
-__SYSCALL(318, sys_inotify_rm_watch)
-__SYSCALL(319, compat_sys_mbind)
-__SYSCALL(320, compat_sys_get_mempolicy)
-__SYSCALL(321, compat_sys_set_mempolicy)
-__SYSCALL(322, compat_sys_openat)
-__SYSCALL(323, sys_mkdirat)
-__SYSCALL(324, sys_mknodat)
-__SYSCALL(325, sys_fchownat)
-__SYSCALL(326, compat_sys_futimesat)
-__SYSCALL(327, sys_fstatat64)
-__SYSCALL(328, sys_unlinkat)
-__SYSCALL(329, sys_renameat)
-__SYSCALL(330, sys_linkat)
-__SYSCALL(331, sys_symlinkat)
-__SYSCALL(332, sys_readlinkat)
-__SYSCALL(333, sys_fchmodat)
-__SYSCALL(334, sys_faccessat)
-__SYSCALL(335, compat_sys_pselect6)
-__SYSCALL(336, compat_sys_ppoll)
-__SYSCALL(337, sys_unshare)
-__SYSCALL(338, compat_sys_set_robust_list)
-__SYSCALL(339, compat_sys_get_robust_list)
-__SYSCALL(340, sys_splice)
-__SYSCALL(341, compat_sys_sync_file_range2_wrapper)
-__SYSCALL(342, sys_tee)
-__SYSCALL(343, compat_sys_vmsplice)
-__SYSCALL(344, compat_sys_move_pages)
-__SYSCALL(345, sys_getcpu)
-__SYSCALL(346, compat_sys_epoll_pwait)
-__SYSCALL(347, compat_sys_kexec_load)
-__SYSCALL(348, compat_sys_utimensat)
-__SYSCALL(349, compat_sys_signalfd)
-__SYSCALL(350, sys_timerfd_create)
-__SYSCALL(351, sys_eventfd)
-__SYSCALL(352, compat_sys_fallocate_wrapper)
-__SYSCALL(353, compat_sys_timerfd_settime)
-__SYSCALL(354, compat_sys_timerfd_gettime)
-__SYSCALL(355, compat_sys_signalfd4)
-__SYSCALL(356, sys_eventfd2)
-__SYSCALL(357, sys_epoll_create1)
-__SYSCALL(358, sys_dup3)
-__SYSCALL(359, sys_pipe2)
-__SYSCALL(360, sys_inotify_init1)
-__SYSCALL(361, compat_sys_preadv)
-__SYSCALL(362, compat_sys_pwritev)
-__SYSCALL(363, compat_sys_rt_tgsigqueueinfo)
-__SYSCALL(364, sys_perf_event_open)
-__SYSCALL(365, compat_sys_recvmmsg)
-__SYSCALL(366, sys_accept4)
-__SYSCALL(367, sys_fanotify_init)
-__SYSCALL(368, compat_sys_fanotify_mark)
-__SYSCALL(369, sys_prlimit64)
-__SYSCALL(370, sys_name_to_handle_at)
-__SYSCALL(371, compat_sys_open_by_handle_at)
-__SYSCALL(372, compat_sys_clock_adjtime)
-__SYSCALL(373, sys_syncfs)
-__SYSCALL(374, compat_sys_sendmmsg)
-__SYSCALL(375, sys_setns)
-__SYSCALL(376, compat_sys_process_vm_readv)
-__SYSCALL(377, compat_sys_process_vm_writev)
-__SYSCALL(378, sys_ni_syscall)			/* 378 for kcmp */
-
-#define __NR_compat_syscalls		379
-
-/*
- * Compat syscall numbers used by the AArch64 kernel.
- */
-#define __NR_compat_restart_syscall	0
-#define __NR_compat_sigreturn		119
-#define __NR_compat_rt_sigreturn	173
-
-
-/*
- * The following SVCs are ARM private.
- */
-#define __ARM_NR_COMPAT_BASE		0x0f0000
-#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
-#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
+#define __NR_restart_syscall 0
+__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
+#define __NR_exit 1
+__SYSCALL(__NR_exit, sys_exit)
+#define __NR_fork 2
+__SYSCALL(__NR_fork, sys_fork)
+#define __NR_read 3
+__SYSCALL(__NR_read, sys_read)
+#define __NR_write 4
+__SYSCALL(__NR_write, sys_write)
+#define __NR_open 5
+__SYSCALL(__NR_open, compat_sys_open)
+#define __NR_close 6
+__SYSCALL(__NR_close, sys_close)
+			/* 7 was sys_waitpid */
+__SYSCALL(7, sys_ni_syscall)
+#define __NR_creat 8
+__SYSCALL(__NR_creat, sys_creat)
+#define __NR_link 9
+__SYSCALL(__NR_link, sys_link)
+#define __NR_unlink 10
+__SYSCALL(__NR_unlink, sys_unlink)
+#define __NR_execve 11
+__SYSCALL(__NR_execve, compat_sys_execve)
+#define __NR_chdir 12
+__SYSCALL(__NR_chdir, sys_chdir)
+			/* 13 was sys_time */
+__SYSCALL(13, sys_ni_syscall)
+#define __NR_mknod 14
+__SYSCALL(__NR_mknod, sys_mknod)
+#define __NR_chmod 15
+__SYSCALL(__NR_chmod, sys_chmod)
+#define __NR_lchown 16
+__SYSCALL(__NR_lchown, sys_lchown16)
+			/* 17 was sys_break */
+__SYSCALL(17, sys_ni_syscall)
+			/* 18 was sys_stat */
+__SYSCALL(18, sys_ni_syscall)
+#define __NR_lseek 19
+__SYSCALL(__NR_lseek, compat_sys_lseek)
+#define __NR_getpid 20
+__SYSCALL(__NR_getpid, sys_getpid)
+#define __NR_mount 21
+__SYSCALL(__NR_mount, compat_sys_mount)
+			/* 22 was sys_umount */
+__SYSCALL(22, sys_ni_syscall)
+#define __NR_setuid 23
+__SYSCALL(__NR_setuid, sys_setuid16)
+#define __NR_getuid 24
+__SYSCALL(__NR_getuid, sys_getuid16)
+			/* 25 was sys_stime */
+__SYSCALL(25, sys_ni_syscall)
+#define __NR_ptrace 26
+__SYSCALL(__NR_ptrace, compat_sys_ptrace)
+			/* 27 was sys_alarm */
+__SYSCALL(27, sys_ni_syscall)
+			/* 28 was sys_fstat */
+__SYSCALL(28, sys_ni_syscall)
+#define __NR_pause 29
+__SYSCALL(__NR_pause, sys_pause)
+			/* 30 was sys_utime */
+__SYSCALL(30, sys_ni_syscall)
+			/* 31 was sys_stty */
+__SYSCALL(31, sys_ni_syscall)
+			/* 32 was sys_gtty */
+__SYSCALL(32, sys_ni_syscall)
+#define __NR_access 33
+__SYSCALL(__NR_access, sys_access)
+#define __NR_nice 34
+__SYSCALL(__NR_nice, sys_nice)
+			/* 35 was sys_ftime */
+__SYSCALL(35, sys_ni_syscall)
+#define __NR_sync 36
+__SYSCALL(__NR_sync, sys_sync)
+#define __NR_kill 37
+__SYSCALL(__NR_kill, sys_kill)
+#define __NR_rename 38
+__SYSCALL(__NR_rename, sys_rename)
+#define __NR_mkdir 39
+__SYSCALL(__NR_mkdir, sys_mkdir)
+#define __NR_rmdir 40
+__SYSCALL(__NR_rmdir, sys_rmdir)
+#define __NR_dup 41
+__SYSCALL(__NR_dup, sys_dup)
+#define __NR_pipe 42
+__SYSCALL(__NR_pipe, sys_pipe)
+#define __NR_times 43
+__SYSCALL(__NR_times, compat_sys_times)
+			/* 44 was sys_prof */
+__SYSCALL(44, sys_ni_syscall)
+#define __NR_brk 45
+__SYSCALL(__NR_brk, sys_brk)
+#define __NR_setgid 46
+__SYSCALL(__NR_setgid, sys_setgid16)
+#define __NR_getgid 47
+__SYSCALL(__NR_getgid, sys_getgid16)
+			/* 48 was sys_signal */
+__SYSCALL(48, sys_ni_syscall)
+#define __NR_geteuid 49
+__SYSCALL(__NR_geteuid, sys_geteuid16)
+#define __NR_getegid 50
+__SYSCALL(__NR_getegid, sys_getegid16)
+#define __NR_acct 51
+__SYSCALL(__NR_acct, sys_acct)
+#define __NR_umount2 52
+__SYSCALL(__NR_umount2, sys_umount)
+			/* 53 was sys_lock */
+__SYSCALL(53, sys_ni_syscall)
+#define __NR_ioctl 54
+__SYSCALL(__NR_ioctl, compat_sys_ioctl)
+#define __NR_fcntl 55
+__SYSCALL(__NR_fcntl, compat_sys_fcntl)
+			/* 56 was sys_mpx */
+__SYSCALL(56, sys_ni_syscall)
+#define __NR_setpgid 57
+__SYSCALL(__NR_setpgid, sys_setpgid)
+			/* 58 was sys_ulimit */
+__SYSCALL(58, sys_ni_syscall)
+			/* 59 was sys_olduname */
+__SYSCALL(59, sys_ni_syscall)
+#define __NR_umask 60
+__SYSCALL(__NR_umask, sys_umask)
+#define __NR_chroot 61
+__SYSCALL(__NR_chroot, sys_chroot)
+#define __NR_ustat 62
+__SYSCALL(__NR_ustat, compat_sys_ustat)
+#define __NR_dup2 63
+__SYSCALL(__NR_dup2, sys_dup2)
+#define __NR_getppid 64
+__SYSCALL(__NR_getppid, sys_getppid)
+#define __NR_getpgrp 65
+__SYSCALL(__NR_getpgrp, sys_getpgrp)
+#define __NR_setsid 66
+__SYSCALL(__NR_setsid, sys_setsid)
+#define __NR_sigaction 67
+__SYSCALL(__NR_sigaction, compat_sys_sigaction)
+			/* 68 was sys_sgetmask */
+__SYSCALL(68, sys_ni_syscall)
+			/* 69 was sys_ssetmask */
+__SYSCALL(69, sys_ni_syscall)
+#define __NR_setreuid 70
+__SYSCALL(__NR_setreuid, sys_setreuid16)
+#define __NR_setregid 71
+__SYSCALL(__NR_setregid, sys_setregid16)
+#define __NR_sigsuspend 72
+__SYSCALL(__NR_sigsuspend, sys_sigsuspend)
+#define __NR_sigpending 73
+__SYSCALL(__NR_sigpending, compat_sys_sigpending)
+#define __NR_sethostname 74
+__SYSCALL(__NR_sethostname, sys_sethostname)
+#define __NR_setrlimit 75
+__SYSCALL(__NR_setrlimit, compat_sys_setrlimit)
+			/* 76 was compat_sys_getrlimit */
+__SYSCALL(76, sys_ni_syscall)
+#define __NR_getrusage 77
+__SYSCALL(__NR_getrusage, compat_sys_getrusage)
+#define __NR_gettimeofday 78
+__SYSCALL(__NR_gettimeofday, compat_sys_gettimeofday)
+#define __NR_settimeofday 79
+__SYSCALL(__NR_settimeofday, compat_sys_settimeofday)
+#define __NR_getgroups 80
+__SYSCALL(__NR_getgroups, sys_getgroups16)
+#define __NR_setgroups 81
+__SYSCALL(__NR_setgroups, sys_setgroups16)
+			/* 82 was compat_sys_select */
+__SYSCALL(82, sys_ni_syscall)
+#define __NR_symlink 83
+__SYSCALL(__NR_symlink, sys_symlink)
+			/* 84 was sys_lstat */
+__SYSCALL(84, sys_ni_syscall)
+#define __NR_readlink 85
+__SYSCALL(__NR_readlink, sys_readlink)
+#define __NR_uselib 86
+__SYSCALL(__NR_uselib, sys_uselib)
+#define __NR_swapon 87
+__SYSCALL(__NR_swapon, sys_swapon)
+#define __NR_reboot 88
+__SYSCALL(__NR_reboot, sys_reboot)
+			/* 89 was sys_readdir */
+__SYSCALL(89, sys_ni_syscall)
+			/* 90 was sys_mmap */
+__SYSCALL(90, sys_ni_syscall)
+#define __NR_munmap 91
+__SYSCALL(__NR_munmap, sys_munmap)
+#define __NR_truncate 92
+__SYSCALL(__NR_truncate, compat_sys_truncate)
+#define __NR_ftruncate 93
+__SYSCALL(__NR_ftruncate, compat_sys_ftruncate)
+#define __NR_fchmod 94
+__SYSCALL(__NR_fchmod, sys_fchmod)
+#define __NR_fchown 95
+__SYSCALL(__NR_fchown, sys_fchown16)
+#define __NR_getpriority 96
+__SYSCALL(__NR_getpriority, sys_getpriority)
+#define __NR_setpriority 97
+__SYSCALL(__NR_setpriority, sys_setpriority)
+			/* 98 was sys_profil */
+__SYSCALL(98, sys_ni_syscall)
+#define __NR_statfs 99
+__SYSCALL(__NR_statfs, compat_sys_statfs)
+#define __NR_fstatfs 100
+__SYSCALL(__NR_fstatfs, compat_sys_fstatfs)
+			/* 101 was sys_ioperm */
+__SYSCALL(101, sys_ni_syscall)
+			/* 102 was sys_socketcall */
+__SYSCALL(102, sys_ni_syscall)
+#define __NR_syslog 103
+__SYSCALL(__NR_syslog, sys_syslog)
+#define __NR_setitimer 104
+__SYSCALL(__NR_setitimer, compat_sys_setitimer)
+#define __NR_getitimer 105
+__SYSCALL(__NR_getitimer, compat_sys_getitimer)
+#define __NR_stat 106
+__SYSCALL(__NR_stat, compat_sys_newstat)
+#define __NR_lstat 107
+__SYSCALL(__NR_lstat, compat_sys_newlstat)
+#define __NR_fstat 108
+__SYSCALL(__NR_fstat, compat_sys_newfstat)
+			/* 109 was sys_uname */
+__SYSCALL(109, sys_ni_syscall)
+			/* 110 was sys_iopl */
+__SYSCALL(110, sys_ni_syscall)
+#define __NR_vhangup 111
+__SYSCALL(__NR_vhangup, sys_vhangup)
+			/* 112 was sys_idle */
+__SYSCALL(112, sys_ni_syscall)
+			/* 113 was sys_syscall */
+__SYSCALL(113, sys_ni_syscall)
+#define __NR_wait4 114
+__SYSCALL(__NR_wait4, compat_sys_wait4)
+#define __NR_swapoff 115
+__SYSCALL(__NR_swapoff, sys_swapoff)
+#define __NR_sysinfo 116
+__SYSCALL(__NR_sysinfo, compat_sys_sysinfo)
+			/* 117 was sys_ipc */
+__SYSCALL(117, sys_ni_syscall)
+#define __NR_fsync 118
+__SYSCALL(__NR_fsync, sys_fsync)
+#define __NR_sigreturn 119
+__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper)
+#define __NR_clone 120
+__SYSCALL(__NR_clone, sys_clone)
+#define __NR_setdomainname 121
+__SYSCALL(__NR_setdomainname, sys_setdomainname)
+#define __NR_uname 122
+__SYSCALL(__NR_uname, sys_newuname)
+			/* 123 was sys_modify_ldt */
+__SYSCALL(123, sys_ni_syscall)
+#define __NR_adjtimex 124
+__SYSCALL(__NR_adjtimex, compat_sys_adjtimex)
+#define __NR_mprotect 125
+__SYSCALL(__NR_mprotect, sys_mprotect)
+#define __NR_sigprocmask 126
+__SYSCALL(__NR_sigprocmask, compat_sys_sigprocmask)
+			/* 127 was sys_create_module */
+__SYSCALL(127, sys_ni_syscall)
+#define __NR_init_module 128
+__SYSCALL(__NR_init_module, sys_init_module)
+#define __NR_delete_module 129
+__SYSCALL(__NR_delete_module, sys_delete_module)
+			/* 130 was sys_get_kernel_syms */
+__SYSCALL(130, sys_ni_syscall)
+#define __NR_quotactl 131
+__SYSCALL(__NR_quotactl, sys_quotactl)
+#define __NR_getpgid 132
+__SYSCALL(__NR_getpgid, sys_getpgid)
+#define __NR_fchdir 133
+__SYSCALL(__NR_fchdir, sys_fchdir)
+#define __NR_bdflush 134
+__SYSCALL(__NR_bdflush, sys_bdflush)
+#define __NR_sysfs 135
+__SYSCALL(__NR_sysfs, sys_sysfs)
+#define __NR_personality 136
+__SYSCALL(__NR_personality, sys_personality)
+			/* 137 was sys_afs_syscall */
+__SYSCALL(137, sys_ni_syscall)
+#define __NR_setfsuid 138
+__SYSCALL(__NR_setfsuid, sys_setfsuid16)
+#define __NR_setfsgid 139
+__SYSCALL(__NR_setfsgid, sys_setfsgid16)
+#define __NR__llseek 140
+__SYSCALL(__NR__llseek, sys_llseek)
+#define __NR_getdents 141
+__SYSCALL(__NR_getdents, compat_sys_getdents)
+#define __NR__newselect 142
+__SYSCALL(__NR__newselect, compat_sys_select)
+#define __NR_flock 143
+__SYSCALL(__NR_flock, sys_flock)
+#define __NR_msync 144
+__SYSCALL(__NR_msync, sys_msync)
+#define __NR_readv 145
+__SYSCALL(__NR_readv, compat_sys_readv)
+#define __NR_writev 146
+__SYSCALL(__NR_writev, compat_sys_writev)
+#define __NR_getsid 147
+__SYSCALL(__NR_getsid, sys_getsid)
+#define __NR_fdatasync 148
+__SYSCALL(__NR_fdatasync, sys_fdatasync)
+#define __NR__sysctl 149
+__SYSCALL(__NR__sysctl, compat_sys_sysctl)
+#define __NR_mlock 150
+__SYSCALL(__NR_mlock, sys_mlock)
+#define __NR_munlock 151
+__SYSCALL(__NR_munlock, sys_munlock)
+#define __NR_mlockall 152
+__SYSCALL(__NR_mlockall, sys_mlockall)
+#define __NR_munlockall 153
+__SYSCALL(__NR_munlockall, sys_munlockall)
+#define __NR_sched_setparam 154
+__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
+#define __NR_sched_getparam 155
+__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
+#define __NR_sched_setscheduler 156
+__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
+#define __NR_sched_getscheduler 157
+__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
+#define __NR_sched_yield 158
+__SYSCALL(__NR_sched_yield, sys_sched_yield)
+#define __NR_sched_get_priority_max 159
+__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
+#define __NR_sched_get_priority_min 160
+__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#define __NR_sched_rr_get_interval 161
+__SYSCALL(__NR_sched_rr_get_interval, compat_sys_sched_rr_get_interval)
+#define __NR_nanosleep 162
+__SYSCALL(__NR_nanosleep, compat_sys_nanosleep)
+#define __NR_mremap 163
+__SYSCALL(__NR_mremap, sys_mremap)
+#define __NR_setresuid 164
+__SYSCALL(__NR_setresuid, sys_setresuid16)
+#define __NR_getresuid 165
+__SYSCALL(__NR_getresuid, sys_getresuid16)
+			/* 166 was sys_vm86 */
+__SYSCALL(166, sys_ni_syscall)
+			/* 167 was sys_query_module */
+__SYSCALL(167, sys_ni_syscall)
+#define __NR_poll 168
+__SYSCALL(__NR_poll, sys_poll)
+#define __NR_nfsservctl 169
+__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
+#define __NR_setresgid 170
+__SYSCALL(__NR_setresgid, sys_setresgid16)
+#define __NR_getresgid 171
+__SYSCALL(__NR_getresgid, sys_getresgid16)
+#define __NR_prctl 172
+__SYSCALL(__NR_prctl, sys_prctl)
+#define __NR_rt_sigreturn 173
+__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper)
+#define __NR_rt_sigaction 174
+__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction)
+#define __NR_rt_sigprocmask 175
+__SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask)
+#define __NR_rt_sigpending 176
+__SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending)
+#define __NR_rt_sigtimedwait 177
+__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait)
+#define __NR_rt_sigqueueinfo 178
+__SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
+#define __NR_rt_sigsuspend 179
+__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend)
+#define __NR_pread64 180
+__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper)
+#define __NR_pwrite64 181
+__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper)
+#define __NR_chown 182
+__SYSCALL(__NR_chown, sys_chown16)
+#define __NR_getcwd 183
+__SYSCALL(__NR_getcwd, sys_getcwd)
+#define __NR_capget 184
+__SYSCALL(__NR_capget, sys_capget)
+#define __NR_capset 185
+__SYSCALL(__NR_capset, sys_capset)
+#define __NR_sigaltstack 186
+__SYSCALL(__NR_sigaltstack, compat_sys_sigaltstack)
+#define __NR_sendfile 187
+__SYSCALL(__NR_sendfile, compat_sys_sendfile)
+			/* 188 reserved */
+__SYSCALL(188, sys_ni_syscall)
+			/* 189 reserved */
+__SYSCALL(189, sys_ni_syscall)
+#define __NR_vfork 190
+__SYSCALL(__NR_vfork, sys_vfork)
+#define __NR_ugetrlimit 191	/* SuS compliant getrlimit */
+__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit)		/* SuS compliant getrlimit */
+#define __NR_mmap2 192
+__SYSCALL(__NR_mmap2, sys_mmap_pgoff)
+#define __NR_truncate64 193
+__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper)
+#define __NR_ftruncate64 194
+__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper)
+#define __NR_stat64 195
+__SYSCALL(__NR_stat64, sys_stat64)
+#define __NR_lstat64 196
+__SYSCALL(__NR_lstat64, sys_lstat64)
+#define __NR_fstat64 197
+__SYSCALL(__NR_fstat64, sys_fstat64)
+#define __NR_lchown32 198
+__SYSCALL(__NR_lchown32, sys_lchown)
+#define __NR_getuid32 199
+__SYSCALL(__NR_getuid32, sys_getuid)
+#define __NR_getgid32 200
+__SYSCALL(__NR_getgid32, sys_getgid)
+#define __NR_geteuid32 201
+__SYSCALL(__NR_geteuid32, sys_geteuid)
+#define __NR_getegid32 202
+__SYSCALL(__NR_getegid32, sys_getegid)
+#define __NR_setreuid32 203
+__SYSCALL(__NR_setreuid32, sys_setreuid)
+#define __NR_setregid32 204
+__SYSCALL(__NR_setregid32, sys_setregid)
+#define __NR_getgroups32 205
+__SYSCALL(__NR_getgroups32, sys_getgroups)
+#define __NR_setgroups32 206
+__SYSCALL(__NR_setgroups32, sys_setgroups)
+#define __NR_fchown32 207
+__SYSCALL(__NR_fchown32, sys_fchown)
+#define __NR_setresuid32 208
+__SYSCALL(__NR_setresuid32, sys_setresuid)
+#define __NR_getresuid32 209
+__SYSCALL(__NR_getresuid32, sys_getresuid)
+#define __NR_setresgid32 210
+__SYSCALL(__NR_setresgid32, sys_setresgid)
+#define __NR_getresgid32 211
+__SYSCALL(__NR_getresgid32, sys_getresgid)
+#define __NR_chown32 212
+__SYSCALL(__NR_chown32, sys_chown)
+#define __NR_setuid32 213
+__SYSCALL(__NR_setuid32, sys_setuid)
+#define __NR_setgid32 214
+__SYSCALL(__NR_setgid32, sys_setgid)
+#define __NR_setfsuid32 215
+__SYSCALL(__NR_setfsuid32, sys_setfsuid)
+#define __NR_setfsgid32 216
+__SYSCALL(__NR_setfsgid32, sys_setfsgid)
+#define __NR_getdents64 217
+__SYSCALL(__NR_getdents64, compat_sys_getdents64)
+#define __NR_pivot_root 218
+__SYSCALL(__NR_pivot_root, sys_pivot_root)
+#define __NR_mincore 219
+__SYSCALL(__NR_mincore, sys_mincore)
+#define __NR_madvise 220
+__SYSCALL(__NR_madvise, sys_madvise)
+#define __NR_fcntl64 221
+__SYSCALL(__NR_fcntl64, compat_sys_fcntl64)
+			/* 222 for tux */
+__SYSCALL(222, sys_ni_syscall)
+			/* 223 is unused */
+__SYSCALL(223, sys_ni_syscall)
+#define __NR_gettid 224
+__SYSCALL(__NR_gettid, sys_gettid)
+#define __NR_readahead 225
+__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper)
+#define __NR_setxattr 226
+__SYSCALL(__NR_setxattr, sys_setxattr)
+#define __NR_lsetxattr 227
+__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
+#define __NR_fsetxattr 228
+__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
+#define __NR_getxattr 229
+__SYSCALL(__NR_getxattr, sys_getxattr)
+#define __NR_lgetxattr 230
+__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
+#define __NR_fgetxattr 231
+__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
+#define __NR_listxattr 232
+__SYSCALL(__NR_listxattr, sys_listxattr)
+#define __NR_llistxattr 233
+__SYSCALL(__NR_llistxattr, sys_llistxattr)
+#define __NR_flistxattr 234
+__SYSCALL(__NR_flistxattr, sys_flistxattr)
+#define __NR_removexattr 235
+__SYSCALL(__NR_removexattr, sys_removexattr)
+#define __NR_lremovexattr 236
+__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
+#define __NR_fremovexattr 237
+__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
+#define __NR_tkill 238
+__SYSCALL(__NR_tkill, sys_tkill)
+#define __NR_sendfile64 239
+__SYSCALL(__NR_sendfile64, sys_sendfile64)
+#define __NR_futex 240
+__SYSCALL(__NR_futex, compat_sys_futex)
+#define __NR_sched_setaffinity 241
+__SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity)
+#define __NR_sched_getaffinity 242
+__SYSCALL(__NR_sched_getaffinity, compat_sys_sched_getaffinity)
+#define __NR_io_setup 243
+__SYSCALL(__NR_io_setup, compat_sys_io_setup)
+#define __NR_io_destroy 244
+__SYSCALL(__NR_io_destroy, sys_io_destroy)
+#define __NR_io_getevents 245
+__SYSCALL(__NR_io_getevents, compat_sys_io_getevents)
+#define __NR_io_submit 246
+__SYSCALL(__NR_io_submit, compat_sys_io_submit)
+#define __NR_io_cancel 247
+__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#define __NR_exit_group 248
+__SYSCALL(__NR_exit_group, sys_exit_group)
+#define __NR_lookup_dcookie 249
+__SYSCALL(__NR_lookup_dcookie, compat_sys_lookup_dcookie)
+#define __NR_epoll_create 250
+__SYSCALL(__NR_epoll_create, sys_epoll_create)
+#define __NR_epoll_ctl 251
+__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
+#define __NR_epoll_wait 252
+__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
+#define __NR_remap_file_pages 253
+__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
+			/* 254 for set_thread_area */
+__SYSCALL(254, sys_ni_syscall)
+			/* 255 for get_thread_area */
+__SYSCALL(255, sys_ni_syscall)
+#define __NR_set_tid_address 256
+__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
+#define __NR_timer_create 257
+__SYSCALL(__NR_timer_create, compat_sys_timer_create)
+#define __NR_timer_settime 258
+__SYSCALL(__NR_timer_settime, compat_sys_timer_settime)
+#define __NR_timer_gettime 259
+__SYSCALL(__NR_timer_gettime, compat_sys_timer_gettime)
+#define __NR_timer_getoverrun 260
+__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#define __NR_timer_delete 261
+__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#define __NR_clock_settime 262
+__SYSCALL(__NR_clock_settime, compat_sys_clock_settime)
+#define __NR_clock_gettime 263
+__SYSCALL(__NR_clock_gettime, compat_sys_clock_gettime)
+#define __NR_clock_getres 264
+__SYSCALL(__NR_clock_getres, compat_sys_clock_getres)
+#define __NR_clock_nanosleep 265
+__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep)
+#define __NR_statfs64 266
+__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper)
+#define __NR_fstatfs64 267
+__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper)
+#define __NR_tgkill 268
+__SYSCALL(__NR_tgkill, sys_tgkill)
+#define __NR_utimes 269
+__SYSCALL(__NR_utimes, compat_sys_utimes)
+#define __NR_arm_fadvise64_64 270
+__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper)
+#define __NR_pciconfig_iobase 271
+__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
+#define __NR_pciconfig_read 272
+__SYSCALL(__NR_pciconfig_read, sys_pciconfig_read)
+#define __NR_pciconfig_write 273
+__SYSCALL(__NR_pciconfig_write, sys_pciconfig_write)
+#define __NR_mq_open 274
+__SYSCALL(__NR_mq_open, compat_sys_mq_open)
+#define __NR_mq_unlink 275
+__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#define __NR_mq_timedsend 276
+__SYSCALL(__NR_mq_timedsend, compat_sys_mq_timedsend)
+#define __NR_mq_timedreceive 277
+__SYSCALL(__NR_mq_timedreceive, compat_sys_mq_timedreceive)
+#define __NR_mq_notify 278
+__SYSCALL(__NR_mq_notify, compat_sys_mq_notify)
+#define __NR_mq_getsetattr 279
+__SYSCALL(__NR_mq_getsetattr, compat_sys_mq_getsetattr)
+#define __NR_waitid 280
+__SYSCALL(__NR_waitid, compat_sys_waitid)
+#define __NR_socket 281
+__SYSCALL(__NR_socket, sys_socket)
+#define __NR_bind 282
+__SYSCALL(__NR_bind, sys_bind)
+#define __NR_connect 283
+__SYSCALL(__NR_connect, sys_connect)
+#define __NR_listen 284
+__SYSCALL(__NR_listen, sys_listen)
+#define __NR_accept 285
+__SYSCALL(__NR_accept, sys_accept)
+#define __NR_getsockname 286
+__SYSCALL(__NR_getsockname, sys_getsockname)
+#define __NR_getpeername 287
+__SYSCALL(__NR_getpeername, sys_getpeername)
+#define __NR_socketpair 288
+__SYSCALL(__NR_socketpair, sys_socketpair)
+#define __NR_send 289
+__SYSCALL(__NR_send, sys_send)
+#define __NR_sendto 290
+__SYSCALL(__NR_sendto, sys_sendto)
+#define __NR_recv 291
+__SYSCALL(__NR_recv, compat_sys_recv)
+#define __NR_recvfrom 292
+__SYSCALL(__NR_recvfrom, compat_sys_recvfrom)
+#define __NR_shutdown 293
+__SYSCALL(__NR_shutdown, sys_shutdown)
+#define __NR_setsockopt 294
+__SYSCALL(__NR_setsockopt, compat_sys_setsockopt)
+#define __NR_getsockopt 295
+__SYSCALL(__NR_getsockopt, compat_sys_getsockopt)
+#define __NR_sendmsg 296
+__SYSCALL(__NR_sendmsg, compat_sys_sendmsg)
+#define __NR_recvmsg 297
+__SYSCALL(__NR_recvmsg, compat_sys_recvmsg)
+#define __NR_semop 298
+__SYSCALL(__NR_semop, sys_semop)
+#define __NR_semget 299
+__SYSCALL(__NR_semget, sys_semget)
+#define __NR_semctl 300
+__SYSCALL(__NR_semctl, compat_sys_semctl)
+#define __NR_msgsnd 301
+__SYSCALL(__NR_msgsnd, compat_sys_msgsnd)
+#define __NR_msgrcv 302
+__SYSCALL(__NR_msgrcv, compat_sys_msgrcv)
+#define __NR_msgget 303
+__SYSCALL(__NR_msgget, sys_msgget)
+#define __NR_msgctl 304
+__SYSCALL(__NR_msgctl, compat_sys_msgctl)
+#define __NR_shmat 305
+__SYSCALL(__NR_shmat, compat_sys_shmat)
+#define __NR_shmdt 306
+__SYSCALL(__NR_shmdt, sys_shmdt)
+#define __NR_shmget 307
+__SYSCALL(__NR_shmget, sys_shmget)
+#define __NR_shmctl 308
+__SYSCALL(__NR_shmctl, compat_sys_shmctl)
+#define __NR_add_key 309
+__SYSCALL(__NR_add_key, sys_add_key)
+#define __NR_request_key 310
+__SYSCALL(__NR_request_key, sys_request_key)
+#define __NR_keyctl 311
+__SYSCALL(__NR_keyctl, compat_sys_keyctl)
+#define __NR_semtimedop 312
+__SYSCALL(__NR_semtimedop, compat_sys_semtimedop)
+#define __NR_vserver 313
+__SYSCALL(__NR_vserver, sys_ni_syscall)
+#define __NR_ioprio_set 314
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
+#define __NR_ioprio_get 315
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
+#define __NR_inotify_init 316
+__SYSCALL(__NR_inotify_init, sys_inotify_init)
+#define __NR_inotify_add_watch 317
+__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
+#define __NR_inotify_rm_watch 318
+__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+#define __NR_mbind 319
+__SYSCALL(__NR_mbind, compat_sys_mbind)
+#define __NR_get_mempolicy 320
+__SYSCALL(__NR_get_mempolicy, compat_sys_get_mempolicy)
+#define __NR_set_mempolicy 321
+__SYSCALL(__NR_set_mempolicy, compat_sys_set_mempolicy)
+#define __NR_openat 322
+__SYSCALL(__NR_openat, compat_sys_openat)
+#define __NR_mkdirat 323
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_mknodat 324
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_fchownat 325
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_futimesat 326
+__SYSCALL(__NR_futimesat, compat_sys_futimesat)
+#define __NR_fstatat64 327
+__SYSCALL(__NR_fstatat64, sys_fstatat64)
+#define __NR_unlinkat 328
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_renameat 329
+__SYSCALL(__NR_renameat, sys_renameat)
+#define __NR_linkat 330
+__SYSCALL(__NR_linkat, sys_linkat)
+#define __NR_symlinkat 331
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_readlinkat 332
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR_fchmodat 333
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_faccessat 334
+__SYSCALL(__NR_faccessat, sys_faccessat)
+#define __NR_pselect6 335
+__SYSCALL(__NR_pselect6, compat_sys_pselect6)
+#define __NR_ppoll 336
+__SYSCALL(__NR_ppoll, compat_sys_ppoll)
+#define __NR_unshare 337
+__SYSCALL(__NR_unshare, sys_unshare)
+#define __NR_set_robust_list 338
+__SYSCALL(__NR_set_robust_list, compat_sys_set_robust_list)
+#define __NR_get_robust_list 339
+__SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list)
+#define __NR_splice 340
+__SYSCALL(__NR_splice, sys_splice)
+#define __NR_sync_file_range2 341
+__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper)
+#define __NR_tee 342
+__SYSCALL(__NR_tee, sys_tee)
+#define __NR_vmsplice 343
+__SYSCALL(__NR_vmsplice, compat_sys_vmsplice)
+#define __NR_move_pages 344
+__SYSCALL(__NR_move_pages, compat_sys_move_pages)
+#define __NR_getcpu 345
+__SYSCALL(__NR_getcpu, sys_getcpu)
+#define __NR_epoll_pwait 346
+__SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait)
+#define __NR_kexec_load 347
+__SYSCALL(__NR_kexec_load, compat_sys_kexec_load)
+#define __NR_utimensat 348
+__SYSCALL(__NR_utimensat, compat_sys_utimensat)
+#define __NR_signalfd 349
+__SYSCALL(__NR_signalfd, compat_sys_signalfd)
+#define __NR_timerfd_create 350
+__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#define __NR_eventfd 351
+__SYSCALL(__NR_eventfd, sys_eventfd)
+#define __NR_fallocate 352
+__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper)
+#define __NR_timerfd_settime 353
+__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime)
+#define __NR_timerfd_gettime 354
+__SYSCALL(__NR_timerfd_gettime, compat_sys_timerfd_gettime)
+#define __NR_signalfd4 355
+__SYSCALL(__NR_signalfd4, compat_sys_signalfd4)
+#define __NR_eventfd2 356
+__SYSCALL(__NR_eventfd2, sys_eventfd2)
+#define __NR_epoll_create1 357
+__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
+#define __NR_dup3 358
+__SYSCALL(__NR_dup3, sys_dup3)
+#define __NR_pipe2 359
+__SYSCALL(__NR_pipe2, sys_pipe2)
+#define __NR_inotify_init1 360
+__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
+#define __NR_preadv 361
+__SYSCALL(__NR_preadv, compat_sys_preadv)
+#define __NR_pwritev 362
+__SYSCALL(__NR_pwritev, compat_sys_pwritev)
+#define __NR_rt_tgsigqueueinfo 363
+__SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo)
+#define __NR_perf_event_open 364
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
+#define __NR_recvmmsg 365
+__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg)
+#define __NR_accept4 366
+__SYSCALL(__NR_accept4, sys_accept4)
+#define __NR_fanotify_init 367
+__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
+#define __NR_fanotify_mark 368
+__SYSCALL(__NR_fanotify_mark, compat_sys_fanotify_mark)
+#define __NR_prlimit64 369
+__SYSCALL(__NR_prlimit64, sys_prlimit64)
+#define __NR_name_to_handle_at 370
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at 371
+__SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at)
+#define __NR_clock_adjtime 372
+__SYSCALL(__NR_clock_adjtime, compat_sys_clock_adjtime)
+#define __NR_syncfs 373
+__SYSCALL(__NR_syncfs, sys_syncfs)
+#define __NR_sendmmsg 374
+__SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg)
+#define __NR_setns 375
+__SYSCALL(__NR_setns, sys_setns)
+#define __NR_process_vm_readv 376
+__SYSCALL(__NR_process_vm_readv, compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 377
+__SYSCALL(__NR_process_vm_writev, compat_sys_process_vm_writev)
+#define __NR_kcmp 378
+__SYSCALL(__NR_kcmp, sys_kcmp)
+#define __NR_finit_module 379
+__SYSCALL(__NR_finit_module, sys_finit_module)
+/* #define __NR_sched_setattr 380 */
+__SYSCALL(380, sys_ni_syscall)
+/* #define __NR_sched_getattr 381 */
+__SYSCALL(381, sys_ni_syscall)
+/* #define __NR_renameat2 382 */
+__SYSCALL(382, sys_ni_syscall)
+#define __NR_seccomp 383
+__SYSCALL(__NR_seccomp, sys_seccomp)
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index eea4975..73cf0f5 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -21,6 +21,11 @@
  */
 #define HWCAP_FP		(1 << 0)
 #define HWCAP_ASIMD		(1 << 1)
-
+#define HWCAP_EVTSTRM		(1 << 2)
+#define HWCAP_AES		(1 << 3)
+#define HWCAP_PMULL		(1 << 4)
+#define HWCAP_SHA1		(1 << 5)
+#define HWCAP_SHA2		(1 << 6)
+#define HWCAP_CRC32		(1 << 7)
 
 #endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 6913643..49c6174 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -23,6 +23,7 @@
 
 #include <asm/hwcap.h>
 
+#define PTRACE_SET_SYSCALL	23
 
 /*
  * PSR bits
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7b4b564..3e67066 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -9,15 +9,18 @@
 arm64-obj-y		:= cputable.o debug-monitors.o entry.o irq.o fpsimd.o	\
 			   entry-fpsimd.o process.o ptrace.o setup.o signal.o	\
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
-			   hyp-stub.o psci.o
+			   hyp-stub.o psci.o opcodes.o
 
-arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o 	\
+arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o	\
 					   sys_compat.o
 arm64-obj-$(CONFIG_MODULES)		+= arm64ksyms.o module.o
 arm64-obj-$(CONFIG_SMP)			+= smp.o smp_spin_table.o smp_psci.o
 arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
 arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
 arm64-obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+arm64-obj-$(CONFIG_ARM_CPU_SUSPEND)	+= sleep.o suspend.o
+
+obj-$(CONFIG_SWP_EMULATE)	+= swp_emulate.o
 
 obj-y					+= $(arm64-obj-y) vdso/
 obj-m					+= $(arm64-obj-m)
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 49e6e30..e5532ef 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -24,6 +24,7 @@
 #include <linux/init.h>
 #include <linux/ptrace.h>
 #include <linux/stat.h>
+#include <linux/uaccess.h>
 
 #include <asm/debug-monitors.h>
 #include <asm/local.h>
@@ -226,13 +227,74 @@
 	return 0;
 }
 
-static int __init single_step_init(void)
+static int brk_handler(unsigned long addr, unsigned int esr,
+		       struct pt_regs *regs)
+{
+	siginfo_t info;
+
+	if (!user_mode(regs))
+		return -EFAULT;
+
+	info = (siginfo_t) {
+		.si_signo = SIGTRAP,
+		.si_errno = 0,
+		.si_code  = TRAP_BRKPT,
+		.si_addr  = (void __user *)instruction_pointer(regs),
+	};
+
+	force_sig_info(SIGTRAP, &info, current);
+	return 0;
+}
+
+int aarch32_break_handler(struct pt_regs *regs)
+{
+	siginfo_t info;
+	unsigned int instr;
+	bool bp = false;
+	void __user *pc = (void __user *)instruction_pointer(regs);
+
+	if (!compat_user_mode(regs))
+		return -EFAULT;
+
+	if (compat_thumb_mode(regs)) {
+		/* get 16-bit Thumb instruction */
+		get_user(instr, (u16 __user *)pc);
+		if (instr == AARCH32_BREAK_THUMB2_LO) {
+			/* get second half of 32-bit Thumb-2 instruction */
+			get_user(instr, (u16 __user *)(pc + 2));
+			bp = instr == AARCH32_BREAK_THUMB2_HI;
+		} else {
+			bp = instr == AARCH32_BREAK_THUMB;
+		}
+	} else {
+		/* 32-bit ARM instruction */
+		get_user(instr, (u32 __user *)pc);
+		bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
+	}
+
+	if (!bp)
+		return -EFAULT;
+
+	info = (siginfo_t) {
+		.si_signo = SIGTRAP,
+		.si_errno = 0,
+		.si_code  = TRAP_BRKPT,
+		.si_addr  = pc,
+	};
+
+	force_sig_info(SIGTRAP, &info, current);
+	return 0;
+}
+
+static int __init debug_traps_init(void)
 {
 	hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
 			      TRAP_HWBKPT, "single-step handler");
+	hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
+			      TRAP_BRKPT, "ptrace BRK handler");
 	return 0;
 }
-arch_initcall(single_step_init);
+arch_initcall(debug_traps_init);
 
 /* Re-enable single step for syscall restarting. */
 void user_rewind_single_step(struct task_struct *task)
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 6a27cd6..d358cca 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -41,3 +41,27 @@
 	fpsimd_restore x0, 8
 	ret
 ENDPROC(fpsimd_load_state)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Save the bottom n FP registers.
+ *
+ * x0 - pointer to struct fpsimd_partial_state
+ */
+ENTRY(fpsimd_save_partial_state)
+	fpsimd_save_partial x0, 1, 8, 9
+	ret
+ENDPROC(fpsimd_load_partial_state)
+
+/*
+ * Load the bottom n FP registers.
+ *
+ * x0 - pointer to struct fpsimd_partial_state
+ */
+ENTRY(fpsimd_load_partial_state)
+	fpsimd_restore_partial x0, 8, 9
+	ret
+ENDPROC(fpsimd_load_partial_state)
+
+#endif
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 5d515e6..d24b4c4 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -25,9 +25,9 @@
 #include <asm/asm-offsets.h>
 #include <asm/errno.h>
 #include <asm/esr.h>
+#include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
-#include <asm/unistd32.h>
 
 /*
  * Bad Abort numbers
@@ -422,6 +422,7 @@
 	 * Data abort handling
 	 */
 	mrs	x0, far_el1
+	bic	x0, x0, #(0xff << 56)
 	disable_step x1
 	isb
 	enable_dbg
@@ -585,7 +586,7 @@
 	str	x0, [sp, #S_X0]			// returned x0
 work_pending:
 	tbnz	x1, #TIF_NEED_RESCHED, work_resched
-	/* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
+	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
 	ldr	x2, [sp, #S_PSTATE]
 	mov	x0, sp				// 'regs'
 	tst	x2, #PSR_MODE_MASK		// user mode regs?
@@ -640,8 +641,9 @@
 	enable_irq
 
 	get_thread_info tsk
-	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall tracing
-	tbnz	x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
+	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
+	tst	x16, #_TIF_SYSCALL_WORK
+	b.ne	__sys_trace
 	adr	lr, ret_fast_syscall		// return address
 	cmp     scno, sc_nr                     // check upper syscall limit
 	b.hs	ni_sys
@@ -657,10 +659,13 @@
 	 * switches, and waiting for our parent to respond.
 	 */
 __sys_trace:
-	mov	x1, sp
-	mov	w0, #0				// trace entry
-	bl	syscall_trace
+	mov	x0, sp
+	bl	syscall_trace_enter
 	adr	lr, __sys_trace_return		// return address
+	cmp	w0, #RET_SKIP_SYSCALL_TRACE	// skip syscall and tracing?
+	b.eq	ret_to_user
+	cmp	w0, #RET_SKIP_SYSCALL		// skip syscall?
+	b.eq	__sys_trace_return_skipped
 	uxtw	scno, w0			// syscall number (possibly new)
 	mov	x1, sp				// pointer to regs
 	cmp	scno, sc_nr			// check upper syscall limit
@@ -674,9 +679,9 @@
 
 __sys_trace_return:
 	str	x0, [sp]			// save returned x0
-	mov	x1, sp
-	mov	w0, #1				// trace exit
-	bl	syscall_trace
+__sys_trace_return_skipped:			// x0 already in regs[0]
+	mov	x0, sp
+	bl	syscall_trace_exit
 	b	ret_to_user
 
 /*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 2fa308e..2df693e 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -33,6 +33,60 @@
 #define FPEXC_IDF	(1 << 7)
 
 /*
+ * In order to reduce the number of times the FPSIMD state is needlessly saved
+ * and restored, we need to keep track of two things:
+ * (a) for each task, we need to remember which CPU was the last one to have
+ *     the task's FPSIMD state loaded into its FPSIMD registers;
+ * (b) for each CPU, we need to remember which task's userland FPSIMD state has
+ *     been loaded into its FPSIMD registers most recently, or whether it has
+ *     been used to perform kernel mode NEON in the meantime.
+ *
+ * For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to
+ * the id of the current CPU everytime the state is loaded onto a CPU. For (b),
+ * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
+ * address of the userland FPSIMD state of the task that was loaded onto the CPU
+ * the most recently, or NULL if kernel mode NEON has been performed after that.
+ *
+ * With this in place, we no longer have to restore the next FPSIMD state right
+ * when switching between tasks. Instead, we can defer this check to userland
+ * resume, at which time we verify whether the CPU's fpsimd_last_state and the
+ * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we
+ * can omit the FPSIMD restore.
+ *
+ * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
+ * indicate whether or not the userland FPSIMD state of the current task is
+ * present in the registers. The flag is set unless the FPSIMD registers of this
+ * CPU currently contain the most recent userland FPSIMD state of the current
+ * task.
+ *
+ * For a certain task, the sequence may look something like this:
+ * - the task gets scheduled in; if both the task's fpsimd_state.cpu field
+ *   contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
+ *   variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
+ *   cleared, otherwise it is set;
+ *
+ * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
+ *   userland FPSIMD state is copied from memory to the registers, the task's
+ *   fpsimd_state.cpu field is set to the id of the current CPU, the current
+ *   CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
+ *   TIF_FOREIGN_FPSTATE flag is cleared;
+ *
+ * - the task executes an ordinary syscall; upon return to userland, the
+ *   TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
+ *   restored;
+ *
+ * - the task executes a syscall which executes some NEON instructions; this is
+ *   preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
+ *   register contents to memory, clears the fpsimd_last_state per-cpu variable
+ *   and sets the TIF_FOREIGN_FPSTATE flag;
+ *
+ * - the task gets preempted after kernel_neon_end() is called; as we have not
+ *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
+ *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
+ */
+static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+
+/*
  * Trapped FP/ASIMD access.
  */
 void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
@@ -70,22 +124,179 @@
 
 void fpsimd_thread_switch(struct task_struct *next)
 {
-	/* check if not kernel threads */
-	if (current->mm)
+	/*
+	 * Save the current FPSIMD state to memory, but only if whatever is in
+	 * the registers is in fact the most recent userland FPSIMD state of
+	 * 'current'.
+	 */
+	if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
 		fpsimd_save_state(&current->thread.fpsimd_state);
-	if (next->mm)
-		fpsimd_load_state(&next->thread.fpsimd_state);
+
+	if (next->mm) {
+		/*
+		 * If we are switching to a task whose most recent userland
+		 * FPSIMD state is already in the registers of *this* cpu,
+		 * we can skip loading the state from memory. Otherwise, set
+		 * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
+		 * upon the next return to userland.
+		 */
+		struct fpsimd_state *st = &next->thread.fpsimd_state;
+
+		if (__this_cpu_read(fpsimd_last_state) == st
+		    && st->cpu == smp_processor_id())
+			clear_ti_thread_flag(task_thread_info(next),
+					     TIF_FOREIGN_FPSTATE);
+		else
+			set_ti_thread_flag(task_thread_info(next),
+					   TIF_FOREIGN_FPSTATE);
+	}
 }
 
 void fpsimd_flush_thread(void)
 {
 	preempt_disable();
 	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
-	fpsimd_load_state(&current->thread.fpsimd_state);
+        fpsimd_load_state(&current->thread.fpsimd_state);
+        preempt_enable();
+}
+
+/*
+ * Save the userland FPSIMD state of 'current' to memory, but only if the state
+ * currently held in the registers does in fact belong to 'current'
+ */
+void fpsimd_preserve_current_state(void)
+{
+	preempt_disable();
+	if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
+		fpsimd_save_state(&current->thread.fpsimd_state);
 	preempt_enable();
 }
 
 /*
+ * Load the userland FPSIMD state of 'current' from memory, but only if the
+ * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
+ * state of 'current'
+ */
+void fpsimd_restore_current_state(void)
+{
+	preempt_disable();
+	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+		struct fpsimd_state *st = &current->thread.fpsimd_state;
+
+		fpsimd_load_state(st);
+		this_cpu_write(fpsimd_last_state, st);
+		st->cpu = smp_processor_id();
+	}
+	preempt_enable();
+}
+
+/*
+ * Load an updated userland FPSIMD state for 'current' from memory and set the
+ * flag that indicates that the FPSIMD register contents are the most recent
+ * FPSIMD state of 'current'
+ */
+void fpsimd_update_current_state(struct fpsimd_state *state)
+{
+	preempt_disable();
+	fpsimd_load_state(state);
+	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+		struct fpsimd_state *st = &current->thread.fpsimd_state;
+
+		this_cpu_write(fpsimd_last_state, st);
+		st->cpu = smp_processor_id();
+	}
+	preempt_enable();
+}
+
+/*
+ * Invalidate live CPU copies of task t's FPSIMD state
+ */
+void fpsimd_flush_task_state(struct task_struct *t)
+{
+	t->thread.fpsimd_state.cpu = NR_CPUS;
+}
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+static DEFINE_PER_CPU(struct fpsimd_partial_state, hardirq_fpsimdstate);
+static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate);
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin_partial(u32 num_regs)
+{
+	if (in_interrupt()) {
+		struct fpsimd_partial_state *s = this_cpu_ptr(
+			in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
+
+		BUG_ON(num_regs > 32);
+		fpsimd_save_partial_state(s, roundup(num_regs, 2));
+	} else {
+		/*
+		 * Save the userland FPSIMD state if we have one and if we
+		 * haven't done so already. Clear fpsimd_last_state to indicate
+		 * that there is no longer userland FPSIMD state in the
+		 * registers.
+		 */
+		preempt_disable();
+		if (current->mm &&
+		    !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
+			fpsimd_save_state(&current->thread.fpsimd_state);
+		this_cpu_write(fpsimd_last_state, NULL);
+	}
+}
+EXPORT_SYMBOL(kernel_neon_begin_partial);
+
+void kernel_neon_end(void)
+{
+	if (in_interrupt()) {
+		struct fpsimd_partial_state *s = this_cpu_ptr(
+			in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
+		fpsimd_load_partial_state(s);
+	} else {
+		preempt_enable();
+	}
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
+#ifdef CONFIG_CPU_PM
+static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
+				  unsigned long cmd, void *v)
+{
+	switch (cmd) {
+	case CPU_PM_ENTER:
+		if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
+			fpsimd_save_state(&current->thread.fpsimd_state);
+		this_cpu_write(fpsimd_last_state, NULL);
+		break;
+	case CPU_PM_EXIT:
+		if (current->mm)
+			set_thread_flag(TIF_FOREIGN_FPSTATE);
+		break;
+	case CPU_PM_ENTER_FAILED:
+	default:
+		return NOTIFY_DONE;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block fpsimd_cpu_pm_notifier_block = {
+	.notifier_call = fpsimd_cpu_pm_notifier,
+};
+
+static void fpsimd_pm_init(void)
+{
+	cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
+}
+
+#else
+static inline void fpsimd_pm_init(void) { }
+#endif /* CONFIG_CPU_PM */
+
+/*
  * FP/SIMD support code initialisation.
  */
 static int __init fpsimd_init(void)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 5ab825c..68e371e 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -20,13 +20,13 @@
 
 #define pr_fmt(fmt) "hw-breakpoint: " fmt
 
+#include <linux/compat.h>
 #include <linux/errno.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/perf_event.h>
 #include <linux/ptrace.h>
 #include <linux/smp.h>
 
-#include <asm/compat.h>
 #include <asm/current.h>
 #include <asm/debug-monitors.h>
 #include <asm/hw_breakpoint.h>
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 8b69ecb..2f5b3ff 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -27,6 +27,9 @@
  *
  * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
  */
+
+#include <asm/unistd.h>
+
 	.align	5
 	.globl	__kuser_helper_start
 __kuser_helper_start:
diff --git a/arch/arm64/kernel/opcodes.c b/arch/arm64/kernel/opcodes.c
new file mode 100644
index 0000000..ceb5a04
--- /dev/null
+++ b/arch/arm64/kernel/opcodes.c
@@ -0,0 +1,72 @@
+/*
+ *  Copied from linux/arch/arm/kernel/opcodes.c
+ *
+ *  A32 condition code lookup feature moved from nwfpe/fpopcode.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <asm/opcodes.h>
+
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+/*
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+	0xF0F0,			/* EQ == Z set            */
+	0x0F0F,			/* NE                     */
+	0xCCCC,			/* CS == C set            */
+	0x3333,			/* CC                     */
+	0xFF00,			/* MI == N set            */
+	0x00FF,			/* PL                     */
+	0xAAAA,			/* VS == V set            */
+	0x5555,			/* VC                     */
+	0x0C0C,			/* HI == C set && Z clear */
+	0xF3F3,			/* LS == C clear || Z set */
+	0xAA55,			/* GE == (N==V)           */
+	0x55AA,			/* LT == (N!=V)           */
+	0x0A05,			/* GT == (!Z && (N==V))   */
+	0xF5FA,			/* LE == (Z || (N!=V))    */
+	0xFFFF,			/* AL always              */
+	0			/* NV                     */
+};
+
+/*
+ * Returns:
+ * ARM_OPCODE_CONDTEST_FAIL   - if condition fails
+ * ARM_OPCODE_CONDTEST_PASS   - if condition passes (including AL)
+ * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional
+ *                              opcode space from v5 onwards
+ *
+ * Code that tests whether a conditional instruction would pass its condition
+ * check should check that return value == ARM_OPCODE_CONDTEST_PASS.
+ *
+ * Code that tests if a condition means that the instruction would be executed
+ * (regardless of conditional or unconditional) should instead check that the
+ * return value != ARM_OPCODE_CONDTEST_FAIL.
+ */
+asmlinkage unsigned int arm_check_condition(u32 opcode, u64 psr)
+{
+	u32 cc_bits  = opcode >> 28;
+	u32 psr_cond = (u32)(psr & 0xffffffff) >> 28;
+	unsigned int ret;
+
+	if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+		if ((cc_map[cc_bits] >> (psr_cond)) & 1)
+			ret = ARM_OPCODE_CONDTEST_PASS;
+		else
+			ret = ARM_OPCODE_CONDTEST_FAIL;
+	} else {
+		ret = ARM_OPCODE_CONDTEST_UNCOND;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(arm_check_condition);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 0860fc3..78259d0 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -20,6 +20,7 @@
 
 #include <stdarg.h>
 
+#include <linux/compat.h>
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -141,6 +142,70 @@
 	while (1);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+	int	i, j;
+	int	nlines;
+	u32	*p;
+
+	/*
+	 * don't attempt to dump non-kernel addresses or
+	 * values that are probably just small negative numbers
+	 */
+	if (addr < PAGE_OFFSET || addr > -256UL)
+		return;
+
+	printk("\n%s: %#lx:\n", name, addr);
+
+	/*
+	 * round address down to a 32 bit boundary
+	 * and always dump a multiple of 32 bytes
+	 */
+	p = (u32 *)(addr & ~(sizeof(u32) - 1));
+	nbytes += (addr & (sizeof(u32) - 1));
+	nlines = (nbytes + 31) / 32;
+
+
+	for (i = 0; i < nlines; i++) {
+		/*
+		 * just display low 16 bits of address to keep
+		 * each line of the dump < 80 characters
+		 */
+		printk("%04lx ", (unsigned long)p & 0xffff);
+		for (j = 0; j < 8; j++) {
+			u32	data;
+			if (probe_kernel_address(p, data)) {
+				printk(" ********");
+			} else {
+				printk(" %08x", data);
+			}
+			++p;
+		}
+		printk("\n");
+	}
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+	mm_segment_t fs;
+	unsigned int i;
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	show_data(regs->pc - nbytes, nbytes * 2, "PC");
+	show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
+	show_data(regs->sp - nbytes, nbytes * 2, "SP");
+	for (i = 0; i < 30; i++) {
+		char name[4];
+		snprintf(name, sizeof(name), "X%u", i);
+		show_data(regs->regs[i] - nbytes, nbytes * 2, name);
+	}
+	set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
 	int i;
@@ -156,6 +221,8 @@
 		if (i % 2 == 0)
 			printk("\n");
 	}
+	if (!user_mode(regs))
+		show_extra_register_data(regs, 128);
 	printk("\n");
 }
 
@@ -202,7 +269,7 @@
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
-	fpsimd_save_state(&current->thread.fpsimd_state);
+	fpsimd_preserve_current_state();
 	*dst = *src;
 	return 0;
 }
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index dfad98f..8027b1d 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -19,12 +19,15 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/audit.h>
+#include <linux/compat.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
 #include <linux/ptrace.h>
 #include <linux/user.h>
+#include <linux/seccomp.h>
 #include <linux/security.h>
 #include <linux/init.h>
 #include <linux/signal.h>
@@ -38,6 +41,7 @@
 #include <asm/compat.h>
 #include <asm/debug-monitors.h>
 #include <asm/pgtable.h>
+#include <asm/syscall.h>
 #include <asm/traps.h>
 #include <asm/system_misc.h>
 
@@ -59,28 +63,6 @@
 	user_disable_single_step(child);
 }
 
-/*
- * Handle hitting a breakpoint.
- */
-static int ptrace_break(struct pt_regs *regs)
-{
-	siginfo_t info = {
-		.si_signo = SIGTRAP,
-		.si_errno = 0,
-		.si_code  = TRAP_BRKPT,
-		.si_addr  = (void __user *)instruction_pointer(regs),
-	};
-
-	force_sig_info(SIGTRAP, &info, current);
-	return 0;
-}
-
-static int arm64_break_trap(unsigned long addr, unsigned int esr,
-			    struct pt_regs *regs)
-{
-	return ptrace_break(regs);
-}
-
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 /*
  * Handle hitting a HW-breakpoint.
@@ -546,6 +528,7 @@
 		return ret;
 
 	target->thread.fpsimd_state.user_fpsimd = newstate;
+	fpsimd_flush_task_state(target);
 	return ret;
 }
 
@@ -795,6 +778,7 @@
 		uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
 	}
 
+	fpsimd_flush_task_state(target);
 	return ret;
 }
 
@@ -822,33 +806,6 @@
 	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
 };
 
-int aarch32_break_trap(struct pt_regs *regs)
-{
-	unsigned int instr;
-	bool bp = false;
-	void __user *pc = (void __user *)instruction_pointer(regs);
-
-	if (compat_thumb_mode(regs)) {
-		/* get 16-bit Thumb instruction */
-		get_user(instr, (u16 __user *)pc);
-		if (instr == AARCH32_BREAK_THUMB2_LO) {
-			/* get second half of 32-bit Thumb-2 instruction */
-			get_user(instr, (u16 __user *)(pc + 2));
-			bp = instr == AARCH32_BREAK_THUMB2_HI;
-		} else {
-			bp = instr == AARCH32_BREAK_THUMB;
-		}
-	} else {
-		/* 32-bit ARM instruction */
-		get_user(instr, (u32 __user *)pc);
-		bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
-	}
-
-	if (bp)
-		return ptrace_break(regs);
-	return 1;
-}
-
 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
 				   compat_ulong_t __user *ret)
 {
@@ -1117,48 +1074,87 @@
 long arch_ptrace(struct task_struct *child, long request,
 		 unsigned long addr, unsigned long data)
 {
-	return ptrace_request(child, request, addr, data);
-}
+	int ret;
 
-
-static int __init ptrace_break_init(void)
-{
-	hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP,
-			      TRAP_BRKPT, "ptrace BRK handler");
-	return 0;
-}
-core_initcall(ptrace_break_init);
-
-
-asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
-{
-	unsigned long saved_reg;
-
-	if (!test_thread_flag(TIF_SYSCALL_TRACE))
-		return regs->syscallno;
-
-	if (is_compat_task()) {
-		/* AArch32 uses ip (r12) for scratch */
-		saved_reg = regs->regs[12];
-		regs->regs[12] = dir;
-	} else {
-		/*
-		 * Save X7. X7 is used to denote syscall entry/exit:
-		 *   X7 = 0 -> entry, = 1 -> exit
-		 */
-		saved_reg = regs->regs[7];
-		regs->regs[7] = dir;
+	switch (request) {
+		case PTRACE_SET_SYSCALL:
+			task_pt_regs(child)->syscallno = data;
+			ret = 0;
+			break;
+		default:
+			ret = ptrace_request(child, request, addr, data);
+			break;
 	}
 
-	if (dir)
+	return ret;
+}
+
+enum ptrace_syscall_dir {
+	PTRACE_SYSCALL_ENTER = 0,
+	PTRACE_SYSCALL_EXIT,
+};
+
+static void tracehook_report_syscall(struct pt_regs *regs,
+				     enum ptrace_syscall_dir dir)
+{
+	int regno;
+	unsigned long saved_reg;
+
+	/*
+	 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
+	 * used to denote syscall entry/exit:
+	 */
+	regno = (is_compat_task() ? 12 : 7);
+	saved_reg = regs->regs[regno];
+	regs->regs[regno] = dir;
+
+	if (dir == PTRACE_SYSCALL_EXIT)
 		tracehook_report_syscall_exit(regs, 0);
 	else if (tracehook_report_syscall_entry(regs))
 		regs->syscallno = ~0UL;
 
-	if (is_compat_task())
-		regs->regs[12] = saved_reg;
-	else
-		regs->regs[7] = saved_reg;
+	regs->regs[regno] = saved_reg;
+}
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+{
+	unsigned int saved_syscallno = regs->syscallno;
+
+	/* Do the secure computing check first; failures should be fast. */
+	if (secure_computing(regs->syscallno) == -1)
+		return RET_SKIP_SYSCALL_TRACE;
+
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+	if (IS_SKIP_SYSCALL(regs->syscallno)) {
+		/*
+		 * RESTRICTION: we can't modify a return value of user
+		 * issued syscall(-1) here. In order to ease this flavor,
+		 * we need to treat whatever value in x0 as a return value,
+		 * but this might result in a bogus value being returned.
+		 */
+		/*
+		 * NOTE: syscallno may also be set to -1 if fatal signal is
+		 * detected in tracehook_report_syscall_entry(), but since
+		 * a value set to x0 here is not used in this case, we may
+		 * neglect the case.
+		 */
+		if (!test_thread_flag(TIF_SYSCALL_TRACE) ||
+				(IS_SKIP_SYSCALL(saved_syscallno)))
+			regs->regs[0] = -ENOSYS;
+	}
+
+	audit_syscall_entry(syscall_get_arch(), regs->syscallno,
+		regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
 
 	return regs->syscallno;
 }
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+	audit_syscall_exit(regs);
+
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 7cc551d..323f779 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -58,9 +58,20 @@
 unsigned int processor_id;
 EXPORT_SYMBOL(processor_id);
 
-unsigned int elf_hwcap __read_mostly;
+unsigned long elf_hwcap __read_mostly;
 EXPORT_SYMBOL_GPL(elf_hwcap);
 
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP_DEFAULT	\
+				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+unsigned int compat_elf_hwcap2 __read_mostly;
+#endif
+
 static const char *cpu_name;
 static const char *machine_name;
 phys_addr_t __fdt_pointer __initdata;
@@ -114,6 +125,7 @@
 static void __init setup_processor(void)
 {
 	struct cpu_info *cpu_info;
+	u64 features, block;
 
 	/*
 	 * locate processor in the list of supported processor
@@ -134,6 +146,69 @@
 
 	sprintf(init_utsname()->machine, "aarch64");
 	elf_hwcap = 0;
+
+	/*
+	 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
+	 * The blocks we test below represent incremental functionality
+	 * for non-negative values. Negative values are reserved.
+	 */
+	features = read_cpuid(ID_AA64ISAR0_EL1);
+	block = (features >> 4) & 0xf;
+	if (!(block & 0x8)) {
+		switch (block) {
+		default:
+		case 2:
+			elf_hwcap |= HWCAP_PMULL;
+		case 1:
+			elf_hwcap |= HWCAP_AES;
+		case 0:
+			break;
+		}
+	}
+
+	block = (features >> 8) & 0xf;
+	if (block && !(block & 0x8))
+		elf_hwcap |= HWCAP_SHA1;
+
+	block = (features >> 12) & 0xf;
+	if (block && !(block & 0x8))
+		elf_hwcap |= HWCAP_SHA2;
+
+	block = (features >> 16) & 0xf;
+	if (block && !(block & 0x8))
+		elf_hwcap |= HWCAP_CRC32;
+
+#ifdef CONFIG_COMPAT
+	/*
+	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
+	 * the Aarch32 32-bit execution state.
+	 */
+	features = read_cpuid(ID_ISAR5_EL1);
+	block = (features >> 4) & 0xf;
+	if (!(block & 0x8)) {
+		switch (block) {
+		default:
+		case 2:
+			compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
+		case 1:
+			compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
+		case 0:
+			break;
+		}
+	}
+
+	block = (features >> 8) & 0xf;
+	if (block && !(block & 0x8))
+		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
+
+	block = (features >> 12) & 0xf;
+	if (block && !(block & 0x8))
+		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
+
+	block = (features >> 16) & 0xf;
+	if (block && !(block & 0x8))
+		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
+#endif
 }
 
 static void __init setup_machine_fdt(phys_addr_t dt_phys)
@@ -323,6 +398,12 @@
 static const char *hwcap_str[] = {
 	"fp",
 	"asimd",
+	"evtstrm",
+	"aes",
+	"pmull",
+	"sha1",
+	"sha2",
+	"crc32",
 	NULL
 };
 
@@ -372,34 +453,36 @@
 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
 			   loops_per_jiffy / (500000UL/HZ),
 			   loops_per_jiffy / (5000UL/HZ) % 100);
-
-		/*
-		 * Dump out the common processor features in a single line.
-		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
-		 * rather than attempting to parse this, but there's a body of
-		 * software which does already (at least for 32-bit).
-		 */
-		seq_puts(m, "Features\t:");
-		if (personality(current->personality) == PER_LINUX32) {
-#ifdef CONFIG_COMPAT
-			for (j = 0; compat_hwcap_str[j]; j++)
-				if (COMPAT_ELF_HWCAP & (1 << j))
-					seq_printf(m, " %s", compat_hwcap_str[j]);
-#endif /* CONFIG_COMPAT */
-		} else {
-			for (j = 0; hwcap_str[j]; j++)
-				if (elf_hwcap & (1 << j))
-					seq_printf(m, " %s", hwcap_str[j]);
-		}
-		seq_puts(m, "\n");
-
-		seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24));
-		seq_printf(m, "CPU architecture: 8\n");
-		seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) & 0xf));
-		seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) & 0xfff));
-		seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
 	}
 
+	/* dump out the processor features */
+	seq_puts(m, "Features\t: ");
+
+	for (i = 0; hwcap_str[i]; i++)
+		if (elf_hwcap & (1 << i))
+			seq_printf(m, "%s ", hwcap_str[i]);
+#ifdef CONFIG_ARMV7_COMPAT_CPUINFO
+	if (is_compat_task()) {
+		/* Print out the non-optional ARMv8 HW capabilities */
+		seq_printf(m, "wp half thumb fastmult vfp edsp neon vfpv3 tlsi ");
+		seq_printf(m, "vfpv4 idiva idivt ");
+	}
+#endif
+
+	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
+	seq_printf(m, "CPU architecture: %s\n",
+#if IS_ENABLED(CONFIG_ARMV7_COMPAT_CPUINFO)
+			is_compat_task() ? "8" :
+#endif
+			"AArch64");
+	seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
+	seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
+	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
+
+	seq_puts(m, "\n");
+
+	seq_printf(m, "Hardware\t: %s\n", machine_name);
+
 	return 0;
 }
 
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 890a591..182b6fc 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/compat.h>
 #include <linux/errno.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
@@ -25,7 +26,6 @@
 #include <linux/tracehook.h>
 #include <linux/ratelimit.h>
 
-#include <asm/compat.h>
 #include <asm/debug-monitors.h>
 #include <asm/elf.h>
 #include <asm/cacheflush.h>
@@ -51,7 +51,7 @@
 	int err;
 
 	/* dump the hardware registers to the fpsimd_state structure */
-	fpsimd_save_state(fpsimd);
+	fpsimd_preserve_current_state();
 
 	/* copy the FP and status/control registers */
 	err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
@@ -86,11 +86,8 @@
 	__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
 
 	/* load the hardware registers from the fpsimd_state structure */
-	if (!err) {
-		preempt_disable();
-		fpsimd_load_state(&fpsimd);
-		preempt_enable();
-	}
+	if (!err)
+		fpsimd_update_current_state(&fpsimd);
 
 	return err ? -EFAULT : 0;
 }
@@ -416,4 +413,8 @@
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		tracehook_notify_resume(regs);
 	}
+
+	if (thread_flags & _TIF_FOREIGN_FPSTATE)
+		fpsimd_restore_current_state();
+
 }
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1e60acc..ec6cac5 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -26,7 +26,7 @@
 #include <asm/fpsimd.h>
 #include <asm/signal32.h>
 #include <asm/uaccess.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
 struct compat_sigcontext {
 	/* We always set these two fields to 0 */
@@ -211,6 +211,14 @@
 		err |= __put_user(from->si_uid, &to->si_uid);
 		err |= __put_user(from->si_int, &to->si_int);
 		break;
+#ifdef __ARCH_SIGSYS
+	case __SI_SYS:
+		err |= __put_user((compat_uptr_t)(unsigned long)
+				from->si_call_addr, &to->si_call_addr);
+		err |= __put_user(from->si_syscall, &to->si_syscall);
+		err |= __put_user(from->si_arch, &to->si_arch);
+		break;
+#endif
 	default: /* this is just in case for now ... */
 		err |= __put_user(from->si_pid, &to->si_pid);
 		err |= __put_user(from->si_uid, &to->si_uid);
@@ -263,7 +271,7 @@
 	 * Note that this also saves V16-31, which aren't visible
 	 * in AArch32.
 	 */
-	fpsimd_save_state(fpsimd);
+	fpsimd_preserve_current_state();
 
 	/* Place structure header on the stack */
 	__put_user_error(magic, &frame->magic, err);
@@ -333,11 +341,8 @@
 	 * We don't need to touch the exception register, so
 	 * reload the hardware state.
 	 */
-	if (!err) {
-		preempt_disable();
-		fpsimd_load_state(&fpsimd);
-		preempt_enable();
-	}
+	if (!err)
+		fpsimd_update_current_state(&fpsimd);
 
 	return err ? -EFAULT : 0;
 }
diff --git a/arch/arm64/kernel/swp_emulate.c b/arch/arm64/kernel/swp_emulate.c
new file mode 100644
index 0000000..508fd2e
--- /dev/null
+++ b/arch/arm64/kernel/swp_emulate.c
@@ -0,0 +1,223 @@
+/*
+ *  Derived from from linux/arch/arm/kernel/swp_emulate.c
+ *
+ *  Copyright (C) 2009 ARM Limited
+ *  Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Implements emulation of the SWP/SWPB instructions using load-exclusive and
+ *  store-exclusive for processors that have them disabled (or future ones that
+ *  might not implement them).
+ *
+ *  Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
+ *  Where: Rt  = destination
+ *	   Rt2 = source
+ *	   Rn  = address
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/perf_event.h>
+
+#include <asm/opcodes.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+#include <asm/system_misc.h>
+#include <linux/debugfs.h>
+
+/*
+ * Error-checking SWP macros implemented using ldrex{b}/strex{b}
+ */
+
+static int swpb(u8 in, u8 *out, u8 *addr)
+{
+	u8 _out;
+	int res;
+	int err;
+
+	do {
+		__asm__ __volatile__(
+		"0:	ldxrb	%w1, %4\n"
+		"1:	stxrb	%w0, %w3, %4\n"
+		"	mov	%w2, #0\n"
+		"2:\n"
+		"	.section	 .fixup,\"ax\"\n"
+		"	.align		2\n"
+		"3:	mov	%w2, %5\n"
+		"	b	2b\n"
+		"	.previous\n"
+		"	.section	 __ex_table,\"a\"\n"
+		"	.align		3\n"
+		"	.quad		0b, 3b\n"
+		"	.quad		1b, 3b\n"
+		"	.previous"
+		: "=&r" (res), "=r" (_out), "=r" (err)
+		: "r" (in), "Q" (*addr), "i" (-EFAULT)
+		: "cc", "memory");
+	} while (err == 0 && res != 0);
+
+	if (err == 0)
+		*out = _out;
+	return err;
+}
+
+static int swp(u32 in, u32 *out, u32 *addr)
+{
+	u32 _out;
+	int res;
+	int err = 0;
+
+	do {
+		__asm__ __volatile__(
+		"0:	ldxr	%w1, %4\n"
+		"1:	stxr	%w0, %w3, %4\n"
+		"	mov	%w2, #0\n"
+		"2:\n"
+		"	.section	 .fixup,\"ax\"\n"
+		"	.align		2\n"
+		"3:	mov	%w2, %5\n"
+		"	b	2b\n"
+		"	.previous\n"
+		"	.section	 __ex_table,\"a\"\n"
+		"	.align		3\n"
+		"	.quad		0b, 3b\n"
+		"	.quad		1b, 3b\n"
+		"	.previous"
+		: "=&r" (res), "=r" (_out), "=r" (err)
+		: "r" (in), "Q" (*addr), "i" (-EFAULT)
+		: "cc", "memory");
+	} while (err == 0 && res != 0);
+
+	if (err == 0)
+		*out = _out;
+	return err;
+}
+/*
+ * Macros/defines for extracting register numbers from instruction.
+ */
+#define EXTRACT_REG_NUM(instruction, offset) \
+	(((instruction) & (0xf << (offset))) >> (offset))
+#define RN_OFFSET  16
+#define RT_OFFSET  12
+#define RT2_OFFSET  0
+/*
+ * Bit 22 of the instruction encoding distinguishes between
+ * the SWP and SWPB variants (bit set means SWPB).
+ */
+#define TYPE_SWPB (1 << 22)
+
+static pid_t previous_pid;
+
+u64 swpb_count = 0;
+u64 swp_count = 0;
+
+/*
+ * swp_handler logs the id of calling process, dissects the instruction, sanity
+ * checks the memory location, calls emulate_swpX for the actual operation and
+ * deals with fixup/error handling before returning
+ */
+static int swp_handler(struct pt_regs *regs, unsigned int instr)
+{
+	u32 destreg, data, type;
+	uintptr_t address;
+	unsigned int res = 0;
+	int err;
+	u32 temp32;
+	u8 temp8;
+
+	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+	res = arm_check_condition(instr, regs->pstate);
+	switch (res) {
+	case ARM_OPCODE_CONDTEST_PASS:
+		break;
+	case ARM_OPCODE_CONDTEST_FAIL:
+		/* Condition failed - return to next instruction */
+		regs->pc += 4;
+		return 0;
+	case ARM_OPCODE_CONDTEST_UNCOND:
+		/* If unconditional encoding - not a SWP, undef */
+		return -EFAULT;
+	default:
+		return -EINVAL;
+	}
+
+	if (current->pid != previous_pid) {
+		pr_warn("\"%s\" (%ld) uses obsolete SWP{B} instruction\n",
+			 current->comm, (unsigned long)current->pid);
+		previous_pid = current->pid;
+	}
+
+	address = regs->regs[EXTRACT_REG_NUM(instr, RN_OFFSET)] & 0xffffffff;
+	data = regs->regs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
+	destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
+
+	type = instr & TYPE_SWPB;
+
+	/* Check access in reasonable access range for both SWP and SWPB */
+	if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+		pr_debug("SWP{B} emulation: access to %p not allowed!\n",
+			 (void *)address);
+		res = -EFAULT;
+	}
+	if (type == TYPE_SWPB) {
+		err = swpb((u8) data, &temp8, (u8 *) address);
+		if (err)
+			return err;
+		regs->regs[destreg] = temp8;
+		regs->pc += 4;
+		swpb_count++;
+	} else if (address & 0x3) {
+		/* SWP to unaligned address not permitted */
+		pr_debug("SWP instruction on unaligned pointer!\n");
+		return -EFAULT;
+	} else {
+		err = swp((u32) data, &temp32, (u32 *) address);
+		if (err)
+			return err;
+		regs->regs[destreg] = temp32;
+		regs->pc += 4;
+		swp_count++;
+	}
+
+	return 0;
+}
+
+/*
+ * Only emulate SWP/SWPB executed in ARM state/User mode.
+ * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
+ */
+static struct undef_hook swp_hook = {
+	.instr_mask	= 0x0fb00ff0,
+	.instr_val	= 0x01000090,
+	.pstate_mask	= COMPAT_PSR_MODE_MASK | COMPAT_PSR_T_BIT,
+	.pstate_val	= COMPAT_PSR_MODE_USR,
+	.fn		= swp_handler
+};
+
+/*
+ * Register handler and create status file in /proc/cpu
+ * Invoked as late_initcall, since not needed before init spawned.
+ */
+static int __init swp_emulation_init(void)
+{
+	struct dentry *dir;
+	dir = debugfs_create_dir("swp_emulate", NULL);
+	debugfs_create_u64("swp_count", S_IRUGO | S_IWUSR, dir, &swp_count);
+	debugfs_create_u64("swpb_count", S_IRUGO | S_IWUSR, dir, &swpb_count);
+
+	pr_notice("Registering SWP/SWPB emulation handler\n");
+	register_undef_hook(&swp_hook);
+
+
+	return 0;
+}
+
+late_initcall(swp_emulation_init);
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 7803992..dc47e53 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -26,7 +26,7 @@
 #include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
 static inline void
 do_compat_cache_op(unsigned long start, unsigned long end, int flags)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 488a7b3..5c4d778 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -3,6 +3,7 @@
  *
  * Copyright (C) 1995-2009 Russell King
  * Copyright (C) 2012 ARM Ltd.
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -32,6 +33,7 @@
 #include <linux/syscalls.h>
 
 #include <asm/atomic.h>
+#include <asm/debug-monitors.h>
 #include <asm/traps.h>
 #include <asm/stacktrace.h>
 #include <asm/exception.h>
@@ -256,17 +258,58 @@
 		die(str, regs, err);
 }
 
+static LIST_HEAD(undef_hook);
+
+void register_undef_hook(struct undef_hook *hook)
+{
+	list_add(&hook->node, &undef_hook);
+}
+
+static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+{
+	struct undef_hook *hook;
+	int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
+
+	list_for_each_entry(hook, &undef_hook, node)
+		if ((instr & hook->instr_mask) == hook->instr_val &&
+		    (regs->pstate & hook->pstate_mask) == hook->pstate_val)
+			fn = hook->fn;
+
+	return fn ? fn(regs, instr) : 1;
+}
+
 asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 {
+	u32 instr;
 	siginfo_t info;
 	void __user *pc = (void __user *)instruction_pointer(regs);
 
-#ifdef CONFIG_COMPAT
 	/* check for AArch32 breakpoint instructions */
-	if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0)
+	if (!aarch32_break_handler(regs))
 		return;
-#endif
+	if (user_mode(regs)) {
+		if (compat_thumb_mode(regs)) {
+			if (get_user(instr, (u16 __user *)pc))
+				goto die_sig;
+			if (is_wide_instruction(instr)) {
+				u32 instr2;
+				if (get_user(instr2, (u16 __user *)pc+1))
+					goto die_sig;
+				instr <<= 16;
+				instr |= instr2;
+			}
+		} else if (get_user(instr, (u32 __user *)pc)) {
+			goto die_sig;
+		}
+	} else {
+		/* kernel mode */
+		instr = *((u32 *)pc);
+	}
 
+	if (call_undef_hook(regs, instr) == 0)
+		return;
+
+die_sig:
 	if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
 	    printk_ratelimit()) {
 		pr_info("%s[%d]: undefined instruction: pc=%p\n",
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 5c55166..b016660 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -109,9 +109,11 @@
 }
 
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
+#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
+
 int pfn_valid(unsigned long pfn)
 {
-	return memblock_is_memory(pfn << PAGE_SHIFT);
+	return (pfn & PFN_MASK) == pfn && memblock_is_memory(pfn << PAGE_SHIFT);
 }
 EXPORT_SYMBOL(pfn_valid);
 #endif
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 8aaf073..83c3df2 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -51,9 +51,14 @@
 {
 	unsigned long rnd = 0;
 
-	if (current->flags & PF_RANDOMIZE)
-		rnd = (long)get_random_int() & STACK_RND_MASK;
-
+	if (current->flags & PF_RANDOMIZE) {
+#ifdef CONFIG_COMPAT
+		if (test_thread_flag(TIF_32BIT))
+			rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
+		else
+#endif
+			rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
+	}
 	return rnd << PAGE_SHIFT;
 }
 
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index f84fcf7..25929f6 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -147,7 +147,7 @@
 	 * both user and kernel.
 	 */
 	ldr	x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
-		      TCR_ASID16 | (1 << 31)
+		      TCR_ASID16 | TCR_TBI0 | (1 << 31)
 #ifdef CONFIG_ARM64_64K_PAGES
 	orr	x10, x10, TCR_TG0_64K
 	orr	x10, x10, TCR_TG1_64K
@@ -156,6 +156,20 @@
 	ret					// return to head.S
 ENDPROC(__cpu_setup)
 
+#ifdef CONFIG_ARMV7_COMPAT
+	/*
+	 *                 n n            T
+	 *       U E      WT T UD     US IHBS
+	 *       CE0      XWHW CZ     ME TEEA S
+	 * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
+	 * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
+	 * .... .100 .... 01.1 11.1 ..01 0011 1101 < software settings
+	 */
+	.type	crval, #object
+crval:
+	.word	0x030802e2			// clear
+	.word	0x0405d03d			// set
+#else
 	/*
 	 *                 n n            T
 	 *       U E      WT T UD     US IHBS
@@ -168,3 +182,4 @@
 crval:
 	.word	0x030802e2			// clear
 	.word	0x0405d11d			// set
+#endif
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 60576e0..d0a69aa 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -45,14 +45,37 @@
 # define smp_rmb()	rmb()
 # define smp_wmb()	wmb()
 # define smp_read_barrier_depends()	read_barrier_depends()
+
 #else
+
 # define smp_mb()	barrier()
 # define smp_rmb()	barrier()
 # define smp_wmb()	barrier()
 # define smp_read_barrier_depends()	do { } while(0)
+
 #endif
 
 /*
+ * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
+ * need for asm trickery!
+ */
+
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	___p1;								\
+})
+
+/*
  * XXX check on this ---I suspect what Linus really wants here is
  * acquire vs release semantics but we can't discuss this stuff with
  * Linus just yet.  Grrr...
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index e355a4c..2d6f0de 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -85,4 +85,19 @@
 #define smp_read_barrier_depends()     do { } while (0)
 #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
 
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	___p1;								\
+})
+
 #endif /* _ASM_METAG_BARRIER_H */
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 314ab55..52c5b61 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -180,4 +180,19 @@
 #define nudge_writes() mb()
 #endif
 
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	___p1;								\
+})
+
 #endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index acb3437..5d2a69c 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -23,7 +23,7 @@
 	unsigned int version = cpu_data[n].processor_id;
 	unsigned int fp_vers = cpu_data[n].fpu_id;
 	char fmt [64];
-	int i;
+	int cpu,i;
 
 #ifdef CONFIG_SMP
 	if (!cpu_online(n))
@@ -43,9 +43,11 @@
 	seq_printf(m, "processor\t\t: %ld\n", n);
 	sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
 		      cpu_data[n].options & MIPS_CPU_FPU ? "  FPU V%d.%d" : "");
-	seq_printf(m, fmt, __cpu_name[n],
+	cpu = get_cpu();
+	seq_printf(m, fmt, __cpu_name[cpu],
 		      (version >> 4) & 0x0f, version & 0x0f,
 		      (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
+	put_cpu();
 	seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
 		      cpu_data[n].udelay_val / (500000/HZ),
 		      (cpu_data[n].udelay_val / (5000/HZ)) % 100);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index c2ec87e..ea8138a 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -386,12 +386,12 @@
 
 static void r4k__flush_cache_vmap(void)
 {
-	r4k_blast_dcache();
+	r4k_on_each_cpu((void *)r4k_blast_dcache, NULL);
 }
 
 static void r4k__flush_cache_vunmap(void)
 {
-	r4k_blast_dcache();
+	r4k_on_each_cpu((void *)r4k_blast_dcache, NULL);
 }
 
 static inline void local_r4k_flush_cache_range(void * args)
@@ -619,7 +619,7 @@
 	 * explicitly
 	 */
 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
-		r4k_blast_dcache();
+		r4k_on_each_cpu((void *)r4k_blast_dcache, NULL);
 	} else {
 		R4600_HIT_CACHEOP_WAR_IMPL;
 		blast_dcache_range(addr, addr + size);
@@ -656,7 +656,7 @@
 	}
 
 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
-		r4k_blast_dcache();
+		r4k_on_each_cpu((void *)r4k_blast_dcache, NULL);
 	} else {
 		R4600_HIT_CACHEOP_WAR_IMPL;
 		blast_inv_dcache_range(addr, addr + size);
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 7e5fe27..358820b 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -147,7 +147,7 @@
 	unsigned long random_factor = 0UL;
 
 	if (current->flags & PF_RANDOMIZE) {
-		random_factor = get_random_int();
+		random_factor = get_random_long();
 		random_factor = random_factor << PAGE_SHIFT;
 		if (TASK_IS_32BIT_ADDR)
 			random_factor &= 0xfffffful;
@@ -168,7 +168,7 @@
 
 static inline unsigned long brk_rnd(void)
 {
-	unsigned long rnd = get_random_int();
+	unsigned long rnd = get_random_long();
 
 	rnd = rnd << PAGE_SHIFT;
 	/* 8MB for 32bit, 256MB for 64bit */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index a91a7a9..4c02e93 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -484,6 +484,7 @@
 	case CPU_R4700:
 	case CPU_R5000:
 	case CPU_NEVADA:
+	case CPU_4KC:
 		uasm_i_nop(p);
 		uasm_i_tlbp(p);
 		break;
@@ -550,6 +551,7 @@
 
 	case CPU_R4600:
 	case CPU_R4700:
+	case CPU_4KC:
 		uasm_i_nop(p);
 		tlbw(p);
 		uasm_i_nop(p);
@@ -574,7 +576,6 @@
 	case CPU_R10000:
 	case CPU_R12000:
 	case CPU_R14000:
-	case CPU_4KC:
 	case CPU_4KEC:
 	case CPU_M14KC:
 	case CPU_M14KEC:
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 838b479..88d0962 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -60,6 +60,7 @@
 };
 
 struct getdents_callback {
+	struct dir_context ctx;
 	struct hpux_dirent __user *current_dir;
 	struct hpux_dirent __user *previous;
 	int count;
@@ -110,24 +111,23 @@
 {
 	struct fd arg;
 	struct hpux_dirent __user * lastdirent;
-	struct getdents_callback buf;
+	struct getdents_callback buf = {
+		.ctx.actor = filldir,
+		.current_dir = dirent,
+		.count = count
+	};
 	int error;
 
 	arg = fdget(fd);
 	if (!arg.file)
 		return -EBADF;
 
-	buf.current_dir = dirent;
-	buf.previous = NULL;
-	buf.count = count;
-	buf.error = 0;
-
-	error = vfs_readdir(arg.file, filldir, &buf);
+	error = iterate_dir(arg.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
 	lastdirent = buf.previous;
 	if (lastdirent) {
-		if (put_user(arg.file->f_pos, &lastdirent->d_off))
+		if (put_user(buf.ctx.pos, &lastdirent->d_off))
 			error = -EFAULT;
 		else
 			error = count - buf.count;
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 56a4a5d..d05cfed 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -194,6 +194,9 @@
 zImage: relocs_check
 endif
 
+# These targets cannot be built in parallel
+.NOTPARALLEL: $(BOOT_TARGETS)
+
 $(BOOT_TARGETS1): vmlinux
 	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
 $(BOOT_TARGETS2): vmlinux
@@ -240,7 +243,7 @@
 archclean:
 	$(Q)$(MAKE) $(clean)=$(boot)
 
-archprepare: checkbin
+archprepare: checkbin arch/powerpc/lib/crtsavres.o
 
 # Use the file '.tmp_gas_check' for binutils tests, as gas won't output
 # to stdout and these checks are run even on install targets.
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 6a15c96..61a5cdf 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -20,7 +20,7 @@
 all: $(obj)/zImage
 
 BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-		 -fno-strict-aliasing -Os -msoft-float -pipe \
+		 -fno-strict-aliasing -msoft-float -pipe \
 		 -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
 		 -isystem $(shell $(CROSS32CC) -print-file-name=include)
 BOOTAFLAGS	:= -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index ae78225..f89da80 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -45,11 +45,15 @@
 #    define SMPWMB      eieio
 #endif
 
+#define __lwsync()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+
 #define smp_mb()	mb()
-#define smp_rmb()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define smp_rmb()	__lwsync()
 #define smp_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
 #define smp_read_barrier_depends()	read_barrier_depends()
 #else
+#define __lwsync()	barrier()
+
 #define smp_mb()	barrier()
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
@@ -65,4 +69,19 @@
 #define data_barrier(x)	\
 	asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
 
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	__lwsync();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	__lwsync();							\
+	___p1;								\
+})
+
 #endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index d55357e..47936dc 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1450,9 +1450,9 @@
 
 	/* 8MB for 32bit, 1GB for 64bit */
 	if (is_32bit_task())
-		rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+		rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
 	else
-		rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+		rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
 
 	return rnd << PAGE_SHIFT;
 }
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S
index cf0c9c9..78c88af 100644
--- a/arch/powerpc/kernel/vdso32/sigtramp.S
+++ b/arch/powerpc/kernel/vdso32/sigtramp.S
@@ -251,6 +251,40 @@
   vsave_msr1 (31);							\
   vsave_msr2 (33, 32*16+12);						\
   vsave      (32, 32*16)
+#elif defined(CONFIG_SPE)
+#define EH_FRAME_VMX \
+  rsave (113, VREGS);							\
+  rsave (114, VREGS + 1*4);						\
+  rsave (115, VREGS + 2*4);						\
+  rsave (116, VREGS + 3*4);						\
+  rsave (117, VREGS + 4*4);						\
+  rsave (118, VREGS + 5*4);						\
+  rsave (119, VREGS + 6*4);						\
+  rsave (120, VREGS + 7*4);						\
+  rsave (121, VREGS + 8*4);						\
+  rsave (122, VREGS + 9*4);						\
+  rsave (123, VREGS + 10*4);						\
+  rsave (124, VREGS + 11*4);						\
+  rsave (125, VREGS + 12*4);						\
+  rsave (126, VREGS + 13*4);						\
+  rsave (127, VREGS + 14*4);						\
+  rsave (128, VREGS + 15*4);						\
+  rsave (129, VREGS + 16*4);						\
+  rsave (130, VREGS + 17*4);						\
+  rsave (131, VREGS + 18*4);						\
+  rsave (132, VREGS + 19*4);						\
+  rsave (133, VREGS + 20*4);						\
+  rsave (134, VREGS + 21*4);						\
+  rsave (135, VREGS + 22*4);						\
+  rsave (136, VREGS + 23*4);						\
+  rsave (137, VREGS + 24*4);						\
+  rsave (138, VREGS + 25*4);						\
+  rsave (139, VREGS + 26*4);						\
+  rsave (140, VREGS + 27*4);						\
+  rsave (141, VREGS + 28*4);						\
+  rsave (142, VREGS + 29*4);						\
+  rsave (143, VREGS + 30*4);						\
+  rsave (144, VREGS + 31*4);
 #else
 #define EH_FRAME_VMX
 #endif
diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
index 67a42ed..2803d90 100644
--- a/arch/powerpc/mm/mmap_64.c
+++ b/arch/powerpc/mm/mmap_64.c
@@ -60,9 +60,9 @@
 	if (current->flags & PF_RANDOMIZE) {
 		/* 8MB for 32bit, 1GB for 64bit */
 		if (is_32bit_task())
-			rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+			rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
 		else
-			rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+			rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
 	}
 	return rnd << PAGE_SHIFT;
 }
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a538c80..eb42124 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -313,6 +313,7 @@
 	asm volatile("isync":::"memory");
 	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
+	mb();
 	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
 
 	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 16760ee..578680f 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -32,4 +32,19 @@
 
 #define set_mb(var, value)		do { var = value; mb(); } while (0)
 
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	___p1;								\
+})
+
 #endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 749313b..25b4b1f 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -89,11 +89,10 @@
 		regs->orig_gpr2 = args[0];
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-				   struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 #ifdef CONFIG_COMPAT
-	if (test_tsk_thread_flag(task, TIF_31BIT))
+	if (test_tsk_thread_flag(current, TIF_31BIT))
 		return AUDIT_ARCH_S390;
 #endif
 	return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 95d4598..b5aad96 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -53,4 +53,19 @@
 
 #define smp_read_barrier_depends()	do { } while(0)
 
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	___p1;								\
+})
+
 #endif /* !(__SPARC64_BARRIER_H) */
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 666510b..c2c11c7 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -265,7 +265,7 @@
 	unsigned long rnd = 0UL;
 
 	if (current->flags & PF_RANDOMIZE) {
-		unsigned long val = get_random_int();
+		unsigned long val = get_random_long();
 		if (test_thread_flag(TIF_32BIT))
 			rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
 		else
diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h
index e584e40..f2ca570 100644
--- a/arch/um/include/shared/frame_kern.h
+++ b/arch/um/include/shared/frame_kern.h
@@ -6,13 +6,13 @@
 #ifndef __FRAME_KERN_H_
 #define __FRAME_KERN_H_
 
-extern int setup_signal_stack_sc(unsigned long stack_top, int sig, 
+extern int setup_signal_stack_sc(unsigned long stack_top, int sig,
 				 struct k_sigaction *ka,
-				 struct pt_regs *regs, 
+				 struct pt_regs *regs,
 				 sigset_t *mask);
-extern int setup_signal_stack_si(unsigned long stack_top, int sig, 
+extern int setup_signal_stack_si(unsigned long stack_top, int sig,
 				 struct k_sigaction *ka,
-				 struct pt_regs *regs, siginfo_t *info, 
+				 struct pt_regs *regs, struct siginfo *info,
 				 sigset_t *mask);
 
 #endif
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 3e831b3..f57e02e 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -19,7 +19,7 @@
  * OK, we're invoking a handler
  */
 static void handle_signal(struct pt_regs *regs, unsigned long signr,
-			 struct k_sigaction *ka, siginfo_t *info)
+			 struct k_sigaction *ka, struct siginfo *info)
 {
 	sigset_t *oldset = sigmask_to_save();
 	int singlestep = 0;
@@ -71,7 +71,7 @@
 static int kern_do_signal(struct pt_regs *regs)
 {
 	struct k_sigaction ka_copy;
-	siginfo_t info;
+	struct siginfo info;
 	int sig, handled_sig = 0;
 
 	while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 9d9f1b4..905924b 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -25,7 +25,7 @@
 	[SIGIO]		= sigio_handler,
 	[SIGVTALRM]	= timer_handler };
 
-static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc)
+static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
 {
 	struct uml_pt_regs r;
 	int save_errno = errno;
@@ -61,7 +61,7 @@
 static int signals_enabled;
 static unsigned int signals_pending;
 
-void sig_handler(int sig, siginfo_t *si, mcontext_t *mc)
+void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
 {
 	int enabled;
 
@@ -120,7 +120,7 @@
 		panic("enabling signal stack failed, errno = %d\n", errno);
 }
 
-static void (*handlers[_NSIG])(int sig, siginfo_t *si, mcontext_t *mc) = {
+static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
 	[SIGSEGV] = sig_handler,
 	[SIGBUS] = sig_handler,
 	[SIGILL] = sig_handler,
@@ -162,7 +162,7 @@
 		while ((sig = ffs(pending)) != 0){
 			sig--;
 			pending &= ~(1 << sig);
-			(*handlers[sig])(sig, si, mc);
+			(*handlers[sig])(sig, (struct siginfo *)si, mc);
 		}
 
 		/*
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 4625949..908579f 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -409,7 +409,7 @@
 		if (WIFSTOPPED(status)) {
 			int sig = WSTOPSIG(status);
 
-			ptrace(PTRACE_GETSIGINFO, pid, 0, &si);
+			ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
 
 			switch (sig) {
 			case SIGSEGV:
@@ -417,7 +417,7 @@
 				    !ptrace_faultinfo) {
 					get_skas_faultinfo(pid,
 							   &regs->faultinfo);
-					(*sig_info[SIGSEGV])(SIGSEGV, &si,
+					(*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
 							     regs);
 				}
 				else handle_segv(pid, regs);
@@ -426,14 +426,14 @@
 			        handle_trap(pid, regs, local_using_sysemu);
 				break;
 			case SIGTRAP:
-				relay_signal(SIGTRAP, &si, regs);
+				relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
 				break;
 			case SIGVTALRM:
 				now = os_nsecs();
 				if (now < nsecs)
 					break;
 				block_signals();
-				(*sig_info[sig])(sig, &si, regs);
+				(*sig_info[sig])(sig, (struct siginfo *)&si, regs);
 				unblock_signals();
 				nsecs = timer.it_value.tv_sec *
 					UM_NSEC_PER_SEC +
@@ -447,7 +447,7 @@
 			case SIGFPE:
 			case SIGWINCH:
 				block_signals();
-				(*sig_info[sig])(sig, &si, regs);
+				(*sig_info[sig])(sig, (struct siginfo *)&si, regs);
 				unblock_signals();
 				break;
 			default:
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 105ae30..f0f33ae 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -99,6 +99,8 @@
 	select DCACHE_WORD_ACCESS
 	select GENERIC_SMP_IDLE_THREAD
 	select ARCH_WANT_IPC_PARSE_VERSION if X86_32
+	select HAVE_ARCH_MMAP_RND_BITS		if MMU
+	select HAVE_ARCH_MMAP_RND_COMPAT_BITS	if MMU && COMPAT
 	select HAVE_ARCH_SECCOMP_FILTER
 	select BUILDTIME_EXTABLE_SORT
 	select GENERIC_CMOS_UPDATE
@@ -149,6 +151,20 @@
 config MMU
 	def_bool y
 
+config ARCH_MMAP_RND_BITS_MIN
+	default 28 if 64BIT
+	default 8
+
+config ARCH_MMAP_RND_BITS_MAX
+	default 32 if 64BIT
+	default 16
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+	default 8
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+	default 16
+
 config SBUS
 	bool
 
@@ -443,29 +459,65 @@
 if X86_WANT_INTEL_MID
 
 config X86_INTEL_MID
-	bool
-
-config X86_MDFLD
-       bool "Medfield MID platform"
+	bool "Intel MID platform"
 	depends on PCI
 	depends on PCI_GOANY
 	depends on X86_IO_APIC
-	select X86_INTEL_MID
 	select SFI
+	select INTEL_SCU_IPC
+	select X86_PLATFORM_DEVICES
+	select ARCH_HAVE_CUSTOM_GPIO_H
+	---help---
+	  Intel MID is Intel's Low Power Intel Architecture (LPIA) based Mobile
+	  Internet Device(MID) platform.
+	  Unlike standard x86 PCs, Intel MID does not have many legacy devices
+	  nor standard legacy replacement devices/features. e.g. It does not
+	  contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+
+config X86_MDFLD
+	bool "Medfield MID platform"
+	depends on X86_INTEL_MID
 	select DW_APB_TIMER
 	select APB_TIMER
 	select I2C
 	select SPI
-	select INTEL_SCU_IPC
-	select X86_PLATFORM_DEVICES
 	select MFD_INTEL_MSIC
 	---help---
 	  Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin
-	  Internet Device(MID) platform. 
+	  Internet Device(MID) platform.
 	  Unlike standard x86 PCs, Medfield does not have many legacy devices
 	  nor standard legacy replacement devices/features. e.g. Medfield does
 	  not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
 
+config ATOM_SOC_POWER
+	bool "Select Atom SOC Power"
+
+choice
+	prompt "Select PMU support"
+	depends on ATOM_SOC_POWER
+	default REMOVEME_INTEL_ATOM_MDFLD_POWER
+
+config REMOVEME_INTEL_ATOM_MDFLD_POWER
+	bool "Medfield"
+
+config REMOVEME_INTEL_ATOM_CLV_POWER
+	bool "Clovertrail"
+
+config REMOVEME_INTEL_ATOM_MRFLD_POWER
+	bool "Merrifield"
+
+endchoice
+
+config INTEL_DEBUG_FEATURE
+       bool "Debug feature interface on Intel MID platform"
+       depends on X86_INTEL_MID
+       ---help---
+         Provides an interface to list the debug features
+	 that are enabled on an Intel MID platform. The
+	 enabling of the debug features depends on the mode
+	 the device is in (e.g. manufacturing, production,
+	 end user, etc...).
+
 endif
 
 config X86_INTEL_LPSS
@@ -724,6 +776,16 @@
          as it is off-chip. APB timers are always running regardless of CPU
          C states, they are used as per CPU clockevent device when possible.
 
+config ARCH_NR_GPIO
+	int
+	depends on ARCH_HAVE_CUSTOM_GPIO_H
+	default 512 if X86_INTEL_MID
+	default 0
+	help
+	  Maximum number of GPIOs in the system.
+
+	  If unsure, leave the default value.
+
 # Mark as expert because too many people got it wrong.
 # The code disables itself when not needed.
 config DMI
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index c026cca..0a7e986 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -269,6 +269,15 @@
 	  accordingly optimized code. Use a recent GCC with specific Atom
 	  support in order to fully benefit from selecting this option.
 
+config MSLM
+	bool "Intel Silvermont (Atom)"
+	---help---
+
+	  Select this for the Intel Silvermont (Atom) platform. Intel Atom
+	  CPUs have an in-order pipelining architecture and thus can benefit
+	  from accordingly optimized code. Use a recent GCC with specific
+	  Atom support in order to fully benefit from selecting this option.
+
 config GENERIC_CPU
 	bool "Generic-x86-64"
 	depends on X86_64
@@ -300,7 +309,7 @@
 config X86_L1_CACHE_SHIFT
 	int
 	default "7" if MPENTIUM4 || MPSC
-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MSLM || MVIAC7 || X86_GENERIC || GENERIC_CPU
 	default "4" if MELAN || M486 || MGEODEGX1
 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 
@@ -335,7 +344,7 @@
 
 config X86_USE_PPRO_CHECKSUM
 	def_bool y
-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MSLM
 
 config X86_USE_3DNOW
 	def_bool y
@@ -363,17 +372,17 @@
 
 config X86_TSC
 	def_bool y
-	depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) && !X86_NUMAQ) || X86_64
+	depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MSLM) && !X86_NUMAQ) || X86_64
 
 config X86_CMPXCHG64
 	def_bool y
-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
+	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MSLM
 
 # this should be set for all -march=.. options where the compiler
 # generates cmov.
 config X86_CMOV
 	def_bool y
-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MSLM || MGEODE_LX)
 
 config X86_MINIMUM_CPU_FAMILY
 	int
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 412189d..328cd2a 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -74,6 +74,8 @@
                 $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
 	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
 		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+        cflags-$(CONFIG_MSLM) += $(call cc-option,-march=slm) \
+                $(call cc-option,-mtune=slm,$(call cc-option,-mtune=generic))
         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
         KBUILD_CFLAGS += $(cflags-y)
 
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 6647ed4..a2eb6c6 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -34,6 +34,8 @@
 cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
 cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
 	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+cflags-$(CONFIG_MSLM)		+= $(call cc-option,-march=slm,$(call cc-option,-march=core2,-march=i686)) \
+	$(call cc-option,-mtune=slm,$(call cc-option,-mtune=generic))
 
 # AMD Elan support
 cflags-$(CONFIG_MELAN)		+= -march=i486
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 9444708..590eea2 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,311 +1,2958 @@
 # CONFIG_64BIT is not set
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_X86_32=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf32-i386"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CPU_AUTOPROBE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_32_LAZY_GS=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
+CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+CONFIG_TICK_CPU_ACCOUNTING=y
+# CONFIG_IRQ_TIME_ACCOUNTING is not set
 CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
 CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
+# CONFIG_RCU_FANOUT_EXACT is not set
+CONFIG_RCU_FAST_NO_HZ=y
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_NOCB_CPU is not set
+# CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=18
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
 CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_SCHED=y
+# CONFIG_MEMCG is not set
+# CONFIG_CGROUP_HUGETLB is not set
+# CONFIG_CGROUP_PERF is not set
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_BLK_CGROUP is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+# CONFIG_USER_NS is not set
+CONFIG_PID_NS=y
+# CONFIG_NET_NS is not set
+CONFIG_UIDGID_CONVERTED=y
+# CONFIG_UIDGID_STRICT_TYPE_CHECKS is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_COMPAT_BRK is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
+CONFIG_RD_LZO=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_PANIC_TIMEOUT=0
+# CONFIG_EXPERT is not set
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PCI_QUIRKS=y
+# CONFIG_EMBEDDED is not set
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
 CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+# CONFIG_OPROFILE is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
 CONFIG_KPROBES=y
+# CONFIG_JUMP_LABEL is not set
+CONFIG_OPTPROBES=y
+CONFIG_KPROBES_ON_FTRACE=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_KRETPROBES=y
+CONFIG_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP_FILTER=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_GCOV_TOOLCHAIN_IS_ANDROID is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_MODULE_SIG is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+CONFIG_BLK_DEV_INTEGRITY=y
+
+#
+# Partition Types
+#
 CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+CONFIG_ATARI_PARTITION=y
 CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
 CONFIG_MINIX_SUBPARTITION=y
 CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
 CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
 CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
+# CONFIG_KARMA_PARTITION is not set
 CONFIG_EFI_PARTITION=y
+CONFIG_SYSV68_PARTITION=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_PREEMPT_NOTIFIERS=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
 CONFIG_SMP=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_X86_BIGSMP is not set
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
+# CONFIG_X86_WANT_INTEL_MID is not set
+# CONFIG_X86_RDC321X is not set
+# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_X86_32_IRIS is not set
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+# CONFIG_PARAVIRT_DEBUG is not set
+# CONFIG_PARAVIRT_SPINLOCKS is not set
+# CONFIG_XEN is not set
+# CONFIG_XEN_PRIVILEGED_GUEST is not set
+# CONFIG_KVM_GUEST is not set
+CONFIG_LGUEST_GUEST=y
+# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
+CONFIG_NO_BOOTMEM=y
+# CONFIG_MEMTEST is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MELAN is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+CONFIG_MCORE2=y
+# CONFIG_MATOM is not set
 CONFIG_X86_GENERIC=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_DEBUGCTLMSR=y
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
 CONFIG_HPET_TIMER=y
+CONFIG_DMI=y
+CONFIG_NR_CPUS=8
 CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
 CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+# CONFIG_PREEMPT is not set
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
 CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+CONFIG_X86_MCE_AMD=y
+# CONFIG_X86_ANCIENT_MCE is not set
+CONFIG_X86_MCE_THRESHOLD=y
+# CONFIG_X86_MCE_INJECT is not set
+CONFIG_X86_THERMAL_VECTOR=y
+CONFIG_VM86=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
 CONFIG_X86_REBOOTFIXUPS=y
-CONFIG_MICROCODE=y
-CONFIG_MICROCODE_AMD=y
-CONFIG_X86_MSR=y
+# CONFIG_MICROCODE is not set
+# CONFIG_X86_MSR is not set
 CONFIG_X86_CPUID=y
+# CONFIG_NOHIGHMEM is not set
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HIGHMEM64G=y
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_HIGHMEM=y
+CONFIG_X86_PAE=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_COMPACTION is not set
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_MMU_NOTIFIER=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_MEMORY_FAILURE is not set
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+CONFIG_CROSS_MEMORY_ATTACH=y
+# CONFIG_CLEANCACHE is not set
+# CONFIG_FRONTSWAP is not set
 CONFIG_HIGHPTE=y
-CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_X86_RESERVE_LOW=64
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
 # CONFIG_MTRR_SANITIZER is not set
-CONFIG_EFI=y
-CONFIG_HZ_1000=y
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+CONFIG_ARCH_RANDOM=y
+CONFIG_X86_SMAP=y
+CONFIG_SECCOMP=y
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_250 is not set
+CONFIG_HZ_300=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=300
+CONFIG_SCHED_HRTICK=y
 CONFIG_KEXEC=y
 CONFIG_CRASH_DUMP=y
-# CONFIG_COMPAT_VDSO is not set
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
-CONFIG_PM_TRACE_RTC=y
-CONFIG_ACPI_PROCFS=y
-CONFIG_ACPI_DOCK=y
+CONFIG_PHYSICAL_START=0x100000
+CONFIG_RELOCATABLE=y
+CONFIG_X86_NEED_RELOCS=y
+CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+CONFIG_COMPAT_VDSO=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management and ACPI options
+#
+# CONFIG_SUSPEND is not set
+# CONFIG_HAS_WAKELOCK is not set
+# CONFIG_WAKELOCK is not set
+# CONFIG_HIBERNATION is not set
+# CONFIG_PM_RUNTIME is not set
+# CONFIG_SUSPEND_TIME is not set
+# CONFIG_ACPI is not set
+# CONFIG_SFI is not set
+
+#
+# CPU Frequency scaling
+#
 CONFIG_CPU_FREQ=y
-# CONFIG_CPU_FREQ_STAT is not set
-CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
 CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_X86_ACPI_CPUFREQ=y
+# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+
+#
+# x86 CPU frequency scaling drivers
+#
+# CONFIG_X86_INTEL_PSTATE is not set
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+# CONFIG_X86_SPEEDSTEP_ICH is not set
+# CONFIG_X86_SPEEDSTEP_SMI is not set
+# CONFIG_X86_P4_CLOCKMOD is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# CONFIG_X86_LONGRUN is not set
+
+#
+# shared options
+#
+# CONFIG_X86_SPEEDSTEP_LIB is not set
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+CONFIG_INTEL_IDLE=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_DOMAINS=y
 CONFIG_PCIEPORTBUS=y
+# CONFIG_HOTPLUG_PCI_PCIE is not set
+CONFIG_PCIEAER=y
+# CONFIG_PCIE_ECRC is not set
+# CONFIG_PCIEAER_INJECT is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+CONFIG_PCIEASPM_DEFAULT=y
+# CONFIG_PCIEASPM_POWERSAVE is not set
+# CONFIG_PCIEASPM_PERFORMANCE is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
 CONFIG_PCI_MSI=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_LABEL=y
+
+#
+# PCIE Host Controller Drivers
+#
+CONFIG_ISA_DMA_API=y
+CONFIG_ISA=y
+CONFIG_EISA=y
+CONFIG_EISA_VLB_PRIMING=y
+CONFIG_EISA_PCI_EISA=y
+CONFIG_EISA_VIRTUAL_ROOT=y
+CONFIG_EISA_NAMES=y
+# CONFIG_SCx200 is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
+CONFIG_AMD_NB=y
 CONFIG_PCCARD=y
+# CONFIG_PCMCIA is not set
+CONFIG_CARDBUS=y
+
+#
+# PC-card bridges
+#
 CONFIG_YENTA=y
+CONFIG_YENTA_O2=y
+CONFIG_YENTA_RICOH=y
+CONFIG_YENTA_TI=y
+CONFIG_YENTA_ENE_TUNE=y
+CONFIG_YENTA_TOSHIBA=y
+CONFIG_PCMCIA_PROBE=y
 CONFIG_HOTPLUG_PCI=y
-CONFIG_BINFMT_MISC=y
+# CONFIG_HOTPLUG_PCI_COMPAQ is not set
+# CONFIG_HOTPLUG_PCI_IBM is not set
+CONFIG_HOTPLUG_PCI_CPCI=y
+# CONFIG_HOTPLUG_PCI_CPCI_ZT5550 is not set
+# CONFIG_HOTPLUG_PCI_CPCI_GENERIC is not set
+CONFIG_HOTPLUG_PCI_SHPC=y
+# CONFIG_RAPIDIO is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_SCRIPT=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_COREDUMP=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_HAVE_TEXT_POKE_SMP=y
 CONFIG_NET=y
+
+#
+# Networking options
+#
 CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
 CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
 CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_MULTIPATH=y
 CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_NET_IP_TUNNEL is not set
 CONFIG_IP_MROUTE=y
+# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
 CONFIG_SYN_COOKIES=y
+# CONFIG_NET_IPVTI is not set
+# CONFIG_INET_AH is not set
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TUNNEL=y
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-CONFIG_TCP_CONG_ADVANCED=y
-# CONFIG_TCP_CONG_BIC is not set
-# CONFIG_TCP_CONG_WESTWOOD is not set
-# CONFIG_TCP_CONG_HTCP is not set
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_NETLABEL=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETLABEL is not set
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+# CONFIG_NET_ACTIVITY_STATS is not set
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
 CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_ADVANCED is not set
-CONFIG_NF_CONNTRACK=y
-CONFIG_NF_CONNTRACK_FTP=y
-CONFIG_NF_CONNTRACK_IRC=y
-CONFIG_NF_CONNTRACK_SIP=y
-CONFIG_NF_CT_NETLINK=y
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
-CONFIG_NETFILTER_XT_TARGET_NFLOG=y
-CONFIG_NETFILTER_XT_TARGET_SECMARK=y
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
-CONFIG_NETFILTER_XT_MATCH_POLICY=y
-CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NF_CONNTRACK_IPV4=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+# CONFIG_NETFILTER_XT_MARK is not set
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA2 is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
 CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
 CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_ULOG=y
-CONFIG_NF_NAT=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
-CONFIG_IP_NF_MANGLE=y
-CONFIG_NF_CONNTRACK_IPV6=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_MATCH_IPV6HEADER=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_IP6_NF_TARGET_REJECT=y
-CONFIG_IP6_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_REJECT is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_SECURITY is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+CONFIG_ATM=y
+CONFIG_ATM_CLIP=y
+# CONFIG_ATM_CLIP_NO_ICMP is not set
+# CONFIG_ATM_LANE is not set
+# CONFIG_ATM_BR2684 is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+CONFIG_HAVE_NET_DSA=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_MHI is not set
+# CONFIG_IEEE802154 is not set
 CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_ATM is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+# CONFIG_NET_SCH_CODEL is not set
+# CONFIG_NET_SCH_FQ_CODEL is not set
+# CONFIG_NET_SCH_INGRESS is not set
+# CONFIG_NET_SCH_PLUG is not set
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
 CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+# CONFIG_NET_EMATCH_CMP is not set
+# CONFIG_NET_EMATCH_NBYTE is not set
+# CONFIG_NET_EMATCH_U32 is not set
+# CONFIG_NET_EMATCH_META is not set
+# CONFIG_NET_EMATCH_TEXT is not set
 CONFIG_NET_CLS_ACT=y
-CONFIG_HAMRADIO=y
-CONFIG_CFG80211=y
-CONFIG_MAC80211=y
-CONFIG_MAC80211_LEDS=y
-CONFIG_RFKILL=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_NAT is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
+# CONFIG_NET_ACT_CSUM is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_MMAP is not set
+# CONFIG_NETLINK_DIAG is not set
+# CONFIG_RMNET_DATA is not set
+# CONFIG_NETPRIO_CGROUP is not set
+CONFIG_BQL=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEBUG_DEVRES=y
-CONFIG_CONNECTOR=y
+CONFIG_DEVTMPFS=y
+# CONFIG_DEVTMPFS_MOUNT is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_HAVE_CPU_AUTOPROBE=y
+# CONFIG_DMA_SHARED_BUFFER is not set
+# CONFIG_CMA is not set
+
+#
+# Bus devices
+#
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+# CONFIG_PARPORT_SERIAL is not set
+CONFIG_PARPORT_PC_FIFO=y
+# CONFIG_PARPORT_PC_SUPERIO is not set
+# CONFIG_PARPORT_GSC is not set
+# CONFIG_PARPORT_AX88796 is not set
+CONFIG_PARPORT_1284=y
+CONFIG_PNP=y
+CONFIG_PNP_DEBUG_MESSAGES=y
+
+#
+# Protocols
+#
+CONFIG_ISAPNP=y
+CONFIG_PNPBIOS=y
+CONFIG_PNPBIOS_PROC_FS=y
+# CONFIG_PNPACPI is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_PARIDE is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_VIRTIO_BLK=y
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_INTEL_MID_PTI is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_KERNEL_DEBUGGER_CORE is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_SENSORS_NCT1008 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_UID_STAT is not set
+# CONFIG_VMWARE_BALLOON is not set
+# CONFIG_BMP085_I2C is not set
+# CONFIG_PCH_PHUB is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_SRAM is not set
+# CONFIG_BCM4329_RFKILL is not set
+# CONFIG_TEGRA_CRYPTO_DEV is not set
+# CONFIG_APANIC is not set
+CONFIG_APANIC_PLABEL="kpanic"
+# CONFIG_THERM_EST is not set
+# CONFIG_FAN_THERM_EST is not set
+# CONFIG_BLUEDROID_PM is not set
+CONFIG_CPULOAD_MONITOR=y
+# CONFIG_MODS is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_ST_GPS is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_INTEL_MEI is not set
+# CONFIG_INTEL_MEI_ME is not set
+# CONFIG_VMWARE_VMCI is not set
+# CONFIG_ISSP is not set
+# CONFIG_GPS is not set
+# CONFIG_HTC_HEADSET_MGR is not set
+# CONFIG_HEADSET_DEBUG_UART is not set
+
+#
+# Qualcomm Modem Drivers
+#
+CONFIG_HAVE_IDE=y
+CONFIG_IDE=y
+
+#
+# Please see Documentation/ide/ide.txt for help/info on IDE drives
+#
+CONFIG_IDE_ATAPI=y
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_IDE_GD=y
+CONFIG_IDE_GD_ATA=y
+# CONFIG_IDE_GD_ATAPI is not set
+# CONFIG_BLK_DEV_DELKIN is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+CONFIG_IDE_PROC_FS=y
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_IDE_GENERIC is not set
+# CONFIG_BLK_DEV_PLATFORM is not set
+# CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_BLK_DEV_IDEPNP is not set
+
+#
+# PCI IDE chipsets support
+#
+# CONFIG_BLK_DEV_GENERIC is not set
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_CS5535 is not set
+# CONFIG_BLK_DEV_CS5536 is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT8172 is not set
+# CONFIG_BLK_DEV_IT8213 is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_BLK_DEV_TC86C001 is not set
+
+#
+# Other IDE chipsets support
+#
+
+#
+# Note: most of these also require special kernel boot parameters
+#
+# CONFIG_BLK_DEV_4DRIVES is not set
+# CONFIG_BLK_DEV_ALI14XX is not set
+# CONFIG_BLK_DEV_DTC2278 is not set
+# CONFIG_BLK_DEV_HT6560B is not set
+# CONFIG_BLK_DEV_QD65XX is not set
+# CONFIG_BLK_DEV_UMC8672 is not set
+# CONFIG_BLK_DEV_IDEDMA is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
 CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
 CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SPI_ATTRS=y
-# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_SCSI_LOGGING=y
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_ISCSI_BOOT_SYSFS is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_CXGB4_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_SCSI_BNX2X_FCOE is not set
+# CONFIG_BE2ISCSI is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
+# CONFIG_SCSI_MVUMI is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_ARCMSR is not set
+CONFIG_MEGARAID_NEWGEN=y
+# CONFIG_MEGARAID_MM is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
+# CONFIG_SCSI_MPT3SAS is not set
+# CONFIG_SCSI_UFSHCD is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_VMWARE_PVSCSI is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
+# CONFIG_FCOE_FNIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_ISCI is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_PPA is not set
+# CONFIG_SCSI_IMM is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
+# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_VIRTIO is not set
+# CONFIG_SCSI_CHELSIO_FCOE is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
 CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_SATA_PMP=y
+
+#
+# Controllers with non-SFF native interface
+#
 CONFIG_SATA_AHCI=y
+# CONFIG_SATA_AHCI_PLATFORM is not set
+# CONFIG_SATA_AHCI_TEGRA is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_SATA_ACARD_AHCI is not set
+# CONFIG_SATA_SIL24 is not set
+CONFIG_ATA_SFF=y
+
+#
+# SFF controllers with custom DMA interface
+#
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_SX4 is not set
+CONFIG_ATA_BMDMA=y
+
+#
+# SATA SFF controllers with BMDMA
+#
 CONFIG_ATA_PIIX=y
-CONFIG_PATA_AMD=y
-CONFIG_PATA_OLDPIIX=y
-CONFIG_PATA_SCH=y
-CONFIG_PATA_MPIIX=y
+# CONFIG_SATA_HIGHBANK is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+
+#
+# PATA SFF controllers with BMDMA
+#
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_ATP867X is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CS5536 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87415 is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SCH is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# PIO-only SFF controllers
+#
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_ISAPNP is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_QDI is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_WINBOND_VLB is not set
+
+#
+# Generic fallback / legacy drivers
+#
 CONFIG_ATA_GENERIC=y
+# CONFIG_PATA_LEGACY is not set
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_BCACHE is not set
+CONFIG_BLK_DEV_DM_BUILTIN=y
 CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
+# CONFIG_DM_CRYPT is not set
+CONFIG_DM_SNAPSHOT=y
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_CACHE is not set
 CONFIG_DM_MIRROR=y
-CONFIG_DM_ZERO=y
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_LOG_USERSPACE is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_VERITY is not set
+# CONFIG_TARGET_CORE is not set
+CONFIG_FUSION=y
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+CONFIG_FUSION_MAX_SGE=128
+CONFIG_FUSION_LOGGING=y
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# CONFIG_I2O is not set
 CONFIG_MACINTOSH_DRIVERS=y
 CONFIG_MAC_EMUMOUSEBTN=y
 CONFIG_NETDEVICES=y
-CONFIG_NETCONSOLE=y
-CONFIG_BNX2=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+CONFIG_DUMMY=y
+# CONFIG_EQUALIZER is not set
+CONFIG_NET_FC=y
+CONFIG_MII=y
+# CONFIG_IFB is not set
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_VXLAN is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_ARCNET is not set
+CONFIG_ATM_DRIVERS=y
+# CONFIG_ATM_DUMMY is not set
+# CONFIG_ATM_TCP is not set
+# CONFIG_ATM_LANAI is not set
+# CONFIG_ATM_ENI is not set
+# CONFIG_ATM_FIRESTREAM is not set
+# CONFIG_ATM_ZATM is not set
+# CONFIG_ATM_NICSTAR is not set
+# CONFIG_ATM_IDT77252 is not set
+# CONFIG_ATM_AMBASSADOR is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_IA is not set
+# CONFIG_ATM_FORE200E is not set
+# CONFIG_ATM_HE is not set
+# CONFIG_ATM_SOLOS is not set
+
+#
+# CAIF transport drivers
+#
+# CONFIG_VHOST_NET is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_EL3 is not set
+# CONFIG_3C515 is not set
+# CONFIG_VORTEX is not set
+# CONFIG_TYPHOON is not set
+CONFIG_NET_VENDOR_ADAPTEC=y
+# CONFIG_ADAPTEC_STARFIRE is not set
+CONFIG_NET_VENDOR_ALTEON=y
+# CONFIG_ACENIC is not set
+CONFIG_NET_VENDOR_AMD=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_LANCE is not set
+# CONFIG_PCNET32 is not set
+# CONFIG_NI65 is not set
+CONFIG_NET_VENDOR_ATHEROS=y
+# CONFIG_ATL2 is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_ALX is not set
+CONFIG_NET_CADENCE=y
+# CONFIG_ARM_AT91_ETHER is not set
+# CONFIG_MACB is not set
+CONFIG_NET_VENDOR_BROADCOM=y
+# CONFIG_B44 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
 CONFIG_TIGON3=y
-CONFIG_NET_TULIP=y
-CONFIG_E100=y
-CONFIG_E1000=y
+# CONFIG_BNX2X is not set
+CONFIG_NET_VENDOR_BROCADE=y
+# CONFIG_BNA is not set
+# CONFIG_NET_CALXEDA_XGMAC is not set
+CONFIG_NET_VENDOR_CHELSIO=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_CHELSIO_T4VF is not set
+CONFIG_NET_VENDOR_CIRRUS=y
+# CONFIG_CS89x0 is not set
+CONFIG_NET_VENDOR_CISCO=y
+# CONFIG_ENIC is not set
+# CONFIG_DNET is not set
+CONFIG_NET_VENDOR_DEC=y
+# CONFIG_NET_TULIP is not set
+CONFIG_NET_VENDOR_DLINK=y
+# CONFIG_DL2K is not set
+# CONFIG_SUNDANCE is not set
+CONFIG_NET_VENDOR_EMULEX=y
+# CONFIG_BE2NET is not set
+CONFIG_NET_VENDOR_EXAR=y
+# CONFIG_S2IO is not set
+# CONFIG_VXGE is not set
+CONFIG_NET_VENDOR_FUJITSU=y
+CONFIG_NET_VENDOR_HP=y
+# CONFIG_HP100 is not set
+CONFIG_NET_VENDOR_INTEL=y
+# CONFIG_E100 is not set
+# CONFIG_E1000 is not set
 CONFIG_E1000E=y
-CONFIG_SKY2=y
-CONFIG_NE2K_PCI=y
-CONFIG_FORCEDETH=y
-CONFIG_8139TOO=y
-# CONFIG_8139TOO_PIO is not set
-CONFIG_R8169=y
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_IXGB is not set
+# CONFIG_IXGBE is not set
+# CONFIG_IXGBEVF is not set
+CONFIG_NET_VENDOR_I825XX=y
+# CONFIG_IP1000 is not set
+# CONFIG_JME is not set
+CONFIG_NET_VENDOR_MARVELL=y
+# CONFIG_MVMDIO is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+CONFIG_NET_VENDOR_MELLANOX=y
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+CONFIG_NET_VENDOR_MICREL=y
+# CONFIG_KS8851_MLL is not set
+# CONFIG_KSZ884X_PCI is not set
+CONFIG_NET_VENDOR_MYRI=y
+# CONFIG_MYRI10GE is not set
+# CONFIG_FEALNX is not set
+CONFIG_NET_VENDOR_NATSEMI=y
+# CONFIG_NATSEMI is not set
+# CONFIG_NS83820 is not set
+CONFIG_NET_VENDOR_8390=y
+# CONFIG_NE2000 is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_ULTRA is not set
+# CONFIG_WD80x3 is not set
+CONFIG_NET_VENDOR_NVIDIA=y
+# CONFIG_FORCEDETH is not set
+CONFIG_NET_VENDOR_OKI=y
+# CONFIG_PCH_GBE is not set
+# CONFIG_ETHOC is not set
+CONFIG_NET_PACKET_ENGINE=y
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_NET_VENDOR_QLOGIC=y
+# CONFIG_QLA3XXX is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+# CONFIG_NETXEN_NIC is not set
+CONFIG_NET_VENDOR_REALTEK=y
+# CONFIG_ATP is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_R8169 is not set
+CONFIG_NET_VENDOR_RDC=y
+# CONFIG_R6040 is not set
+CONFIG_NET_VENDOR_SEEQ=y
+CONFIG_NET_VENDOR_SILAN=y
+# CONFIG_SC92031 is not set
+CONFIG_NET_VENDOR_SIS=y
+# CONFIG_SIS900 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SFC is not set
+CONFIG_NET_VENDOR_SMSC=y
+# CONFIG_SMC9194 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
+CONFIG_NET_VENDOR_STMICRO=y
+# CONFIG_STMMAC_ETH is not set
+CONFIG_NET_VENDOR_SUN=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NIU is not set
+CONFIG_NET_VENDOR_TEHUTI=y
+# CONFIG_TEHUTI is not set
+CONFIG_NET_VENDOR_TI=y
+# CONFIG_TLAN is not set
+CONFIG_NET_VENDOR_VIA=y
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_NET_VENDOR_WIZNET=y
+# CONFIG_WIZNET_W5100 is not set
+# CONFIG_WIZNET_W5300 is not set
 CONFIG_FDDI=y
-CONFIG_INPUT_POLLDEV=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_DEFXX is not set
+# CONFIG_SKFP is not set
+CONFIG_HIPPI=y
+# CONFIG_ROADRUNNER is not set
+# CONFIG_NET_SB1000 is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_PLIP is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+# CONFIG_PPP_MPPE is not set
+CONFIG_PPP_MULTILINK=y
+# CONFIG_PPPOATM is not set
+# CONFIG_PPPOE is not set
+# CONFIG_PPPOLAC is not set
+# CONFIG_PPPOPNS is not set
+CONFIG_PPP_ASYNC=y
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=y
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+CONFIG_WAN=y
+# CONFIG_HDLC is not set
+# CONFIG_DLCI is not set
+# CONFIG_SBNI is not set
+# CONFIG_VMXNET3 is not set
+CONFIG_ISDN=y
+# CONFIG_ISDN_I4L is not set
+# CONFIG_ISDN_CAPI is not set
+# CONFIG_ISDN_DRV_GIGASET is not set
+# CONFIG_HYSDN is not set
+# CONFIG_MISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
 CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+# CONFIG_INPUT_KEYRESET is not set
+# CONFIG_INPUT_KEYCOMBO is not set
+# CONFIG_INPUT_CFBOOST is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_CYPRESS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_CYAPA is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
 CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_DB9 is not set
+# CONFIG_JOYSTICK_GAMECON is not set
+# CONFIG_JOYSTICK_TURBOGRAFX is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_WALKERA0701 is not set
 CONFIG_INPUT_TABLET=y
 CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_CYPRESS_SAR is not set
+# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_ILI210X is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_WACOM_I2C is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MMS114 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_HTCPEN is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
+# CONFIG_TOUCHSCREEN_PANJIT_I2C is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+# CONFIG_TOUCHSCREEN_MAX1187X is not set
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C=y
+# CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE is not set
 CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_KEYCHORD is not set
+# CONFIG_INPUT_KXTJ9 is not set
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_CMA3000 is not set
+# CONFIG_INPUT_DRV2603_VIBRATOR is not set
+# CONFIG_INV_AK8975 is not set
+# CONFIG_INV_MPU is not set
+# CONFIG_INV_BMP180 is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PARKBD is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=y
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_ARC_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
 CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_LEGACY_PTYS is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
 CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_ISI is not set
+# CONFIG_N_HDLC is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVMEM=y
+CONFIG_DEVKMEM=y
+CONFIG_STALDRV=y
+
+#
+# Serial drivers
+#
 CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
+CONFIG_SERIAL_8250_PNP=y
 CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=48
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
+# CONFIG_HAS_NS_SUPER_IO_CHIP is not set
+# CONFIG_SERIAL_8250_FOURPORT is not set
+# CONFIG_SERIAL_8250_ACCENT is not set
+# CONFIG_SERIAL_8250_BOCA is not set
+# CONFIG_SERIAL_8250_EXAR_ST16C554 is not set
+# CONFIG_SERIAL_8250_HUB6 is not set
 CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
 CONFIG_SERIAL_8250_RSA=y
+# CONFIG_SERIAL_8250_DW is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MFD_HSU is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+
+#
+# Diag Support
+#
+
+#
+# DIAG traffic over USB
+#
+
+#
+# SDIO support for DIAG
+#
+
+#
+# HSIC/SMUX support for DIAG
+#
+CONFIG_PRINTER=y
+# CONFIG_LP_CONSOLE is not set
+# CONFIG_PPDEV is not set
+CONFIG_HVC_DRIVER=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_HW_RANDOM_INTEL is not set
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_GEODE is not set
+# CONFIG_HW_RANDOM_VIA is not set
+CONFIG_HW_RANDOM_VIRTIO=y
 CONFIG_NVRAM=y
-CONFIG_HPET=y
-# CONFIG_HPET_MMAP is not set
-CONFIG_I2C_I801=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+# CONFIG_I2C_CHARDEV is not set
+# CONFIG_I2C_MUX is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_INPUT_CWSTM32 is not set
+# CONFIG_I2C_SLAVE is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+# CONFIG_I2C_SMBUS is not set
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+CONFIG_I2C_ISCH=y
+# CONFIG_I2C_ISMT is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_ISA is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_PCA_GMI is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_SPI is not set
+
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=y
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+# CONFIG_PPS_CLIENT_KTIMER is not set
+# CONFIG_PPS_CLIENT_LDISC is not set
+# CONFIG_PPS_CLIENT_PARPORT is not set
+# CONFIG_PPS_CLIENT_GPIO is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# CONFIG_PTP_1588_CLOCK_PCH is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_POWER_AVS is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_HIH6130 is not set
+# CONFIG_SENSORS_CORETEMP is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_NTC_THERMISTOR is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_TMON_TMP411 is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_SENSORS_INA219 is not set
+# CONFIG_SENSORS_INA230 is not set
+# CONFIG_SENSORS_INA3221 is not set
+# CONFIG_BATTERY_SYSTEM_VOLTAGE_MONITOR is not set
+# CONFIG_CABLE_VBUS_MONITOR is not set
+# CONFIG_THERMAL is not set
+
+#
+# Trusty
+#
+# CONFIG_TRUSTY is not set
 CONFIG_WATCHDOG=y
-CONFIG_AGP=y
-CONFIG_AGP_AMD64=y
-CONFIG_AGP_INTEL=y
-CONFIG_DRM=y
-CONFIG_DRM_I915=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_FB_EFI=y
-# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_WATCHDOG_CORE=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_F71808E_WDT is not set
+# CONFIG_SP5100_TCO is not set
+# CONFIG_SC520_WDT is not set
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_IBMASR is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I6300ESB_WDT is not set
+# CONFIG_IE6XX_WDT is not set
+CONFIG_ITCO_WDT=y
+CONFIG_ITCO_VENDOR_SUPPORT=y
+# CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
+# CONFIG_HP_WATCHDOG is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
+# CONFIG_NV_TCO is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
+# CONFIG_SMSC37B787_WDT is not set
+# CONFIG_VIA_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83697HF_WDT is not set
+# CONFIG_W83697UG_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
+
+#
+# ISA-based Watchdog Cards
+#
+# CONFIG_PCWATCHDOG is not set
+# CONFIG_MIXCOMWD is not set
+# CONFIG_WDT is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AIC3256 is not set
+# CONFIG_MFD_AIC3256_I2C is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+CONFIG_LPC_ICH=y
+CONFIG_LPC_SCH=y
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX77665 is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_AIC3XXX_I2C_REGMAP is not set
+# CONFIG_AIC3XXX_CORE is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_MAX8831 is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_AS3722 is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_MFD_MAX8907C is not set
+# CONFIG_MFD_MAX77663 is not set
+# CONFIG_MFD_AS3720 is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_MAX77660 is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_AGP is not set
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_EXYNOS_VIDEO is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
 CONFIG_VGACON_SOFT_SCROLLBACK=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_SEQ_DUMMY=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_HRTIMER=y
-CONFIG_SND_HDA_INTEL=y
-CONFIG_SND_HDA_HWDEP=y
-CONFIG_HIDRAW=y
+CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=256
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_DS90UH925Q_SER is not set
+# CONFIG_SOUND is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+# CONFIG_UHID is not set
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_A4TECH=y
+# CONFIG_HID_ACRUX is not set
+CONFIG_HID_APPLE=y
+# CONFIG_HID_AUREAL is not set
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+# CONFIG_DRAGONRISE_FF is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+CONFIG_HID_EZKEY=y
+# CONFIG_HID_KEYTOUCH is not set
+CONFIG_HID_KYE=y
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
 CONFIG_HID_GYRATION=y
-CONFIG_LOGITECH_FF=y
-CONFIG_HID_NTRIG=y
+# CONFIG_HID_ICADE is not set
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+# CONFIG_HID_LCPOWER is not set
+CONFIG_HID_LOGITECH=y
+# CONFIG_HID_LOGITECH_DJ is not set
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
+# CONFIG_LOGIWHEELS_FF is not set
+# CONFIG_HID_MAGICMOUSE is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_ORTEK is not set
 CONFIG_HID_PANTHERLORD=y
-CONFIG_PANTHERLORD_FF=y
+# CONFIG_PANTHERLORD_FF is not set
 CONFIG_HID_PETALYNX=y
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PS3REMOTE is not set
+# CONFIG_HID_SAITEK is not set
 CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
 CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_SMARTJOYPLUS=y
+# CONFIG_SMARTJOYPLUS_FF is not set
+# CONFIG_HID_TIVO is not set
 CONFIG_HID_TOPSEED=y
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_UHCI_HCD=y
-CONFIG_USB_PRINTER=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_LIBUSUAL=y
+CONFIG_HID_THRUSTMASTER=y
+# CONFIG_THRUSTMASTER_FF is not set
+CONFIG_HID_ZEROPLUS=y
+# CONFIG_ZEROPLUS_FF is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_XHCI=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_UWB is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_SWITCH is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
 CONFIG_EDAC=y
-CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
-CONFIG_DMADEVICES=y
-CONFIG_EEEPC_LAPTOP=y
-CONFIG_EFI_VARS=y
+CONFIG_EDAC_LEGACY_SYSFS=y
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_DECODE_MCE=y
+# CONFIG_EDAC_MCE_INJ is not set
+# CONFIG_EDAC_MM_EDAC is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+CONFIG_VIRTIO=y
+
+#
+# Virtio drivers
+#
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+# CONFIG_STAGING is not set
+# CONFIG_ANDROID is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_CHROMEOS_LAPTOP is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_IBM_RTL is not set
+# CONFIG_SAMSUNG_Q10 is not set
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_I8253=y
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_MAILBOX is not set
+# CONFIG_IOMMU_SUPPORT is not set
+
+#
+# Remoteproc drivers
+#
+# CONFIG_STE_MODEM_RPROC is not set
+
+#
+# Rpmsg drivers
+#
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_EXTCON is not set
+# CONFIG_MIPI_BIF is not set
+# CONFIG_MEMORY is not set
+# CONFIG_IIO is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_PWM is not set
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+
+#
+# SYSEDP Framework
+#
+# CONFIG_SYSEDP_FRAMEWORK is not set
+# CONFIG_GK20A is not set
+
+#
+# Android
+#
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+CONFIG_DMIID=y
+# CONFIG_DMI_SYSFS is not set
+CONFIG_ISCSI_IBFT_FIND=y
+# CONFIG_ISCSI_IBFT is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
 CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_USE_FOR_EXT23=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_DEBUG=y
+CONFIG_JBD2=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=y
+CONFIG_QFMT_V1=y
 CONFIG_QFMT_V2=y
-CONFIG_AUTOFS4_FS=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
-CONFIG_MSDOS_FS=y
+CONFIG_UDF_FS=y
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+# CONFIG_MSDOS_FS is not set
 CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=y
+# CONFIG_NTFS_DEBUG is not set
+CONFIG_NTFS_RW=y
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_VMCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_REPORT_PRESENT_CPUS is not set
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
 CONFIG_HUGETLBFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_DEFAULT="utf8"
+CONFIG_HUGETLB_PAGE=y
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_ECRYPT_FS_MESSAGING is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_F2FS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp437"
 CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-CONFIG_FRAME_WARN=2048
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_READABLE_ASM is not set
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
+# CONFIG_LESS_GCC_OPT is not set
+# CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_LOCKUP_DETECTOR is not set
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
 CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCKDEP=y
+CONFIG_LOCK_STAT=y
+# CONFIG_DEBUG_LOCKDEP is not set
+CONFIG_TRACE_IRQFLAGS=y
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
 CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_CPU_STALL_INFO is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_LATENCYTOP=y
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_WANT_PAGE_DEBUG_FLAGS=y
+CONFIG_PAGE_GUARD=y
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+# CONFIG_IRQSOFF_TRACER is not set
+CONFIG_SCHED_TRACER=y
+# CONFIG_FTRACE_SYSCALLS is not set
+CONFIG_TRACER_SNAPSHOT=y
+# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KPROBE_EVENT=y
+# CONFIG_UPROBE_EVENT is not set
+CONFIG_PROBE_EVENTS=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+# CONFIG_TRACELEVEL is not set
+# CONFIG_TRACEDUMP is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
 CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
-CONFIG_EARLY_PRINTK_DBGP=y
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
 CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_X86_PTDUMP is not set
+CONFIG_DEBUG_RODATA=y
 # CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_DEBUG_SET_MODULE_RONX is not set
+# CONFIG_DEBUG_NX_TEST is not set
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+# CONFIG_X86_DECODER_SELFTEST is not set
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
 CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
 CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+CONFIG_ENCRYPTED_KEYS=y
 CONFIG_KEYS_DEBUG_PROC_KEYS=y
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_DISABLE=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
+# CONFIG_SECURITY_SELINUX is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_TRUSTED_LITTLE_KERNEL is not set
+# CONFIG_SECURITY_YAMA is not set
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_NULL=y
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+CONFIG_CRYPTO_XTS=y
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_CMAC is not set
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=y
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRC32_PCLMUL is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+# CONFIG_CRYPTO_RMD128 is not set
+CONFIG_CRYPTO_RMD160=y
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_AES_586=y
+# CONFIG_CRYPTO_AES_NI_INTEL is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=y
+CONFIG_CRYPTO_BLOWFISH_COMMON=y
+CONFIG_CRYPTO_CAMELLIA=y
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=y
+# CONFIG_CRYPTO_SERPENT_SSE2_586 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+# CONFIG_CRYPTO_TWOFISH_586 is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_PADLOCK=y
+# CONFIG_CRYPTO_DEV_PADLOCK_AES is not set
+# CONFIG_CRYPTO_DEV_PADLOCK_SHA is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+# CONFIG_CRYPTO_DEV_TEGRA_SE is not set
+# CONFIG_ASYMMETRIC_KEY_TYPE is not set
+CONFIG_HAVE_KVM=y
+CONFIG_HAVE_KVM_IRQCHIP=y
+CONFIG_HAVE_KVM_IRQ_ROUTING=y
+CONFIG_HAVE_KVM_EVENTFD=y
+CONFIG_KVM_APIC_ARCHITECTURE=y
+CONFIG_KVM_MMIO=y
+CONFIG_KVM_ASYNC_PF=y
+CONFIG_HAVE_KVM_MSI=y
+CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+# CONFIG_KVM_AMD is not set
+# CONFIG_KVM_MMU_AUDIT is not set
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_XZ=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_AVERAGE=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
diff --git a/arch/x86/configs/i386_edison_defconfig b/arch/x86/configs/i386_edison_defconfig
new file mode 100644
index 0000000..4e68b91
--- /dev/null
+++ b/arch/x86/configs/i386_edison_defconfig
@@ -0,0 +1,3864 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/x86 3.10.17 Kernel Configuration
+#
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf32-i386"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CPU_AUTOPROBE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_32_LAZY_GS=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
+CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION="-poky-edison"
+CONFIG_LOCALVERSION_AUTO=n
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+# CONFIG_TICK_CPU_ACCOUNTING is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_PREEMPT_RCU=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_BOOST is not set
+# CONFIG_RCU_NOCB_CPU is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_MEMCG is not set
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_NET_NS=y
+CONFIG_UIDGID_CONVERTED=y
+# CONFIG_UIDGID_STRICT_TYPE_CHECKS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_EXPERT=y
+# CONFIG_UPTIME_LIMITED_KERNEL is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
+CONFIG_KPROBES=y
+# CONFIG_JUMP_LABEL is not set
+CONFIG_KPROBES_ON_FTRACE=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_KRETPROBES=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_MODULE_SIG is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_DEV_THROTTLING=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CFQ_GROUP_IOSCHED is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_ASN1=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
+CONFIG_SMP=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_X86_BIGSMP is not set
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
+CONFIG_X86_WANT_INTEL_MID=y
+CONFIG_X86_INTEL_MID=y
+# CONFIG_X86_MDFLD is not set
+CONFIG_ATOM_SOC_POWER=y
+# CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER is not set
+# CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER is not set
+CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER=y
+CONFIG_INTEL_DEBUG_FEATURE=y
+# CONFIG_X86_RDC321X is not set
+# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_X86_32_IRIS is not set
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+# CONFIG_HYPERVISOR_GUEST is not set
+CONFIG_NO_BOOTMEM=y
+# CONFIG_MEMTEST is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MELAN is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_MATOM is not set
+CONFIG_MSLM=y
+CONFIG_X86_GENERIC=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_DEBUGCTLMSR=y
+# CONFIG_PROCESSOR_SELECT is not set
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_APB_TIMER is not set
+CONFIG_ARCH_NR_GPIO=512
+CONFIG_DMI=y
+CONFIG_NR_CPUS=2
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_COUNT=y
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+# CONFIG_X86_MCE_AMD is not set
+# CONFIG_X86_ANCIENT_MCE is not set
+CONFIG_X86_MCE_THRESHOLD=y
+# CONFIG_X86_MCE_INJECT is not set
+CONFIG_X86_THERMAL_VECTOR=y
+CONFIG_VM86=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+CONFIG_X86_REBOOTFIXUPS=y
+# CONFIG_MICROCODE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_NOHIGHMEM is not set
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HIGHMEM64G=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_HIGHMEM=y
+CONFIG_X86_PAE=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_COMPACTION is not set
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_MEMORY_FAILURE is not set
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+CONFIG_CROSS_MEMORY_ATTACH=y
+# CONFIG_CLEANCACHE is not set
+# CONFIG_HIGHPTE is not set
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set
+CONFIG_X86_RESERVE_LOW=64
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+# CONFIG_ARCH_RANDOM is not set
+CONFIG_X86_SMAP=y
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PHYSICAL_START=0x1200000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management and ACPI options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_SLEEP_SMP=y
+# CONFIG_PM_AUTOSLEEP is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=100
+CONFIG_PM_WAKELOCKS_GC=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_PM_SLEEP_DEBUG=y
+# CONFIG_PM_TRACE_RTC is not set
+# CONFIG_ACPI is not set
+CONFIG_SFI=y
+# CONFIG_APM is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+
+#
+# x86 CPU frequency scaling drivers
+#
+# CONFIG_X86_INTEL_PSTATE is not set
+CONFIG_X86_SFI_CPUFREQ=y
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+# CONFIG_X86_SPEEDSTEP_ICH is not set
+# CONFIG_X86_SPEEDSTEP_SMI is not set
+# CONFIG_X86_P4_CLOCKMOD is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# CONFIG_X86_LONGRUN is not set
+
+#
+# shared options
+#
+# CONFIG_X86_SPEEDSTEP_LIB is not set
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+CONFIG_INTEL_IDLE=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_CNB20LE_QUIRK is not set
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+# CONFIG_PCIEASPM_DEFAULT is not set
+# CONFIG_PCIEASPM_POWERSAVE is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_PCIE_PME=y
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_LABEL=y
+CONFIG_ISA_DMA_API=y
+# CONFIG_ISA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
+CONFIG_AMD_NB=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COREDUMP=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_HAVE_TEXT_POKE_SMP=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+CONFIG_NET_IP_TUNNEL=y
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_NET_IPVTI is not set
+# CONFIG_INET_AH is not set
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_GRE is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_IPV6_SUBTREES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETLABEL is not set
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=y
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+# CONFIG_NF_CONNTRACK_SECMARK is not set
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+# CONFIG_NF_CONNTRACK_FTP is not set
+# CONFIG_NF_CONNTRACK_H323 is not set
+# CONFIG_NF_CONNTRACK_IRC is not set
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_NF_CONNTRACK_TFTP is not set
+# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_NEEDED=y
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_FTP is not set
+# CONFIG_NF_NAT_IRC is not set
+# CONFIG_NF_NAT_SIP is not set
+# CONFIG_NF_NAT_TFTP is not set
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_HL=y
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+CONFIG_NETFILTER_XT_TARGET_NETMAP=y
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set
+# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
+# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_HL=y
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_SET is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+CONFIG_IP_NF_RAW=y
+# CONFIG_IP_NF_SECURITY is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=y
+# CONFIG_NF_CONNTRACK_IPV6 is not set
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+# CONFIG_IP6_NF_SECURITY is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_L2TP=y
+# CONFIG_L2TP_DEBUGFS is not set
+# CONFIG_L2TP_V3 is not set
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_HAVE_NET_DSA=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_MMAP is not set
+# CONFIG_NETLINK_DIAG is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+# CONFIG_NETPRIO_CGROUP is not set
+CONFIG_BQL=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+# CONFIG_BT_HCIUART_BCSP is not set
+# CONFIG_BT_HCIUART_ATH3K is not set
+# CONFIG_BT_HCIUART_LL is not set
+# CONFIG_BT_HCIUART_3WIRE is not set
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+CONFIG_CFG80211_DEVELOPER_WARNINGS=y
+# CONFIG_CFG80211_REG_DEBUG is not set
+# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+CONFIG_RFKILL_LEDS=y
+CONFIG_RFKILL_INPUT=y
+# CONFIG_RFKILL_REGULATOR is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH=""
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_DEBUG_DRIVER is not set
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_REGMAP_IRQ=y
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_CMA is not set
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_VIRTIO_BLK is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+CONFIG_INTEL_MID_PTI=y
+CONFIG_INTEL_PTI_STM=y
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085_I2C is not set
+# CONFIG_BMP085_SPI is not set
+# CONFIG_PCH_PHUB is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_LATTICE_ECP3_CONFIG is not set
+# CONFIG_SRAM is not set
+CONFIG_EMMC_IPANIC=y
+CONFIG_EMMC_IPANIC_PLABEL="panic"
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_VMWARE_VMCI is not set
+CONFIG_BCM_BT_LPM=m
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+CONFIG_MII=y
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_VXLAN is not set
+CONFIG_NETCONSOLE=y
+# CONFIG_NETCONSOLE_DYNAMIC is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_TUN=y
+# CONFIG_VETH is not set
+# CONFIG_VIRTIO_NET is not set
+# CONFIG_ARCNET is not set
+
+#
+# CAIF transport drivers
+#
+# CONFIG_VHOST_NET is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+# CONFIG_ETHERNET is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MICREL_KS8995MA is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_RTL8152 is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_EEM is not set
+CONFIG_USB_NET_CDC_NCM=y
+# CONFIG_USB_NET_CDC_MBIM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+CONFIG_USB_NET_CDC_SUBSET=y
+# CONFIG_USB_ALI_M5632 is not set
+# CONFIG_USB_AN2720 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_EPSON2888 is not set
+# CONFIG_USB_KC2190 is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_QMI_WWAN is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8180 is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_ADM8211 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+CONFIG_WIFI_CONTROL_FUNC=y
+CONFIG_WIFI_PLATFORM_DATA=y
+# CONFIG_ATH_CARDS is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWL4965 is not set
+# CONFIG_IWL3945 is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_HERMES is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTLWIFI is not set
+# CONFIG_WL_TI is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_CYAPA is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_MOUSE_SYNAPTICS_USB is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
+CONFIG_INPUT_TABLET=y
+# CONFIG_TABLET_USB_ACECAD is not set
+# CONFIG_TABLET_USB_AIPTEK is not set
+# CONFIG_TABLET_USB_GTCO is not set
+# CONFIG_TABLET_USB_HANWANG is not set
+# CONFIG_TABLET_USB_KBTAB is not set
+# CONFIG_TABLET_USB_WACOM is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_ILI210X is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_WACOM_I2C is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MMS114 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_INTEL_MID is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_APANEL is not set
+# CONFIG_INPUT_GP2A is not set
+# CONFIG_INPUT_GPIO_TILT_POLLED is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_PWM_BEEPER is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_CMA3000 is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_ARC_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_ISI is not set
+# CONFIG_N_HDLC is not set
+CONFIG_N_GSM=y
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+# CONFIG_STALDRV is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+CONFIG_FIX_EARLYCON_MEM=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX310X is not set
+CONFIG_SERIAL_MRST_MAX3110=y
+CONFIG_SERIAL_MFD_HSU=y
+CONFIG_SERIAL_MFD_HSU_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_VIRTIO_CONSOLE is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+CONFIG_HW_RANDOM_INTEL=y
+CONFIG_HW_RANDOM_AMD=y
+CONFIG_HW_RANDOM_GEODE=y
+CONFIG_HW_RANDOM_VIA=y
+# CONFIG_HW_RANDOM_VIRTIO is not set
+CONFIG_NVRAM=y
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_ISMT is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_CBUS_GPIO is not set
+CONFIG_I2C_DESIGNWARE_CORE_FORK=y
+CONFIG_I2C_DESIGNWARE_PCI_FORK=y
+# CONFIG_I2C_DESIGNWARE_PLATFORM_FORK is not set
+CONFIG_I2C_DW_SPEED_MODE_DEBUG=y
+# CONFIG_I2C_PMIC is not set
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI_INTEL_MID_SSP=y
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PXA2XX is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_SC18IS602 is not set
+# CONFIG_SPI_TOPCLIFF_PCH is not set
+# CONFIG_SPI_XCOMM is not set
+# CONFIG_SPI_XILINX is not set
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_PCI=y
+CONFIG_SPI_DW_MID_DMA=y
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=y
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+# CONFIG_PPS_CLIENT_KTIMER is not set
+# CONFIG_PPS_CLIENT_LDISC is not set
+# CONFIG_PPS_CLIENT_GPIO is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# CONFIG_PTP_1588_CLOCK_PCH is not set
+CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIODEBUG=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_TS5500 is not set
+# CONFIG_GPIO_SCH is not set
+# CONFIG_GPIO_ICH is not set
+# CONFIG_GPIO_VX855 is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_WM8994 is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_AMD8111 is not set
+CONFIG_GPIO_LANGWELL=y
+# CONFIG_GPIO_PCH is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_RDC321X is not set
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_GPIO_MSIC is not set
+
+#
+# USB GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+CONFIG_PMIC_CCSM=y
+CONFIG_BQ24261_CHARGER=y
+# CONFIG_PDA_POWER is not set
+# CONFIG_GENERIC_ADC_BATTERY is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+CONFIG_BATTERY_MAX17042=y
+# CONFIG_BATTERY_INTEL_MID is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_MANAGER is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+# CONFIG_POWER_RESET is not set
+# CONFIG_POWER_AVS is not set
+CONFIG_HWMON=y
+CONFIG_INTEL_MCU=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7310 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_HIH6130 is not set
+CONFIG_SENSORS_CORETEMP=y
+CONFIG_SENSORS_CORETEMP_INTERRUPT=y
+# CONFIG_SENSORS_IIO_HWMON is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+CONFIG_MSIC_GPADC=y
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_APPLESMC is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
+CONFIG_THERMAL_GOV_STEP_WISE=y
+# CONFIG_THERMAL_GOV_USER_SPACE is not set
+# CONFIG_CPU_THERMAL is not set
+# CONFIG_THERMAL_EMULATION is not set
+CONFIG_INTEL_POWERCLAMP=y
+CONFIG_SENSORS_THERMAL_MRFLD=y
+CONFIG_SOC_THERMAL=y
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_CORE is not set
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_F71808E_WDT is not set
+# CONFIG_SP5100_TCO is not set
+# CONFIG_SC520_WDT is not set
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_IBMASR is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I6300ESB_WDT is not set
+# CONFIG_IE6XX_WDT is not set
+# CONFIG_INTEL_SCU_WATCHDOG is not set
+CONFIG_INTEL_SCU_WATCHDOG_EVO=y
+CONFIG_DISABLE_SCU_WATCHDOG=y
+# CONFIG_ITCO_WDT is not set
+# CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
+# CONFIG_HP_WATCHDOG is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
+# CONFIG_NV_TCO is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
+# CONFIG_SMSC37B787_WDT is not set
+# CONFIG_VIA_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83697HF_WDT is not set
+# CONFIG_W83697UG_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+CONFIG_MFD_INTEL_MSIC=y
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+CONFIG_MFD_WM8994=y
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_AD5398 is not set
+# CONFIG_REGULATOR_FAN53555 is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+# CONFIG_REGULATOR_MAX8973 is not set
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_LP872X is not set
+# CONFIG_REGULATOR_LP8755 is not set
+# CONFIG_REGULATOR_TPS51632 is not set
+# CONFIG_REGULATOR_TPS62360 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_TPS6524X is not set
+CONFIG_REGULATOR_WM8994=y
+CONFIG_REGULATOR_PMIC_BASIN_COVE=y
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_RC_SUPPORT is not set
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_V4L2=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEOBUF2_CORE=m
+CONFIG_VIDEOBUF2_MEMOPS=m
+CONFIG_VIDEOBUF2_VMALLOC=m
+# CONFIG_VIDEO_V4L2_INT_DEVICE is not set
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Media drivers
+#
+CONFIG_MEDIA_USB_SUPPORT=y
+
+#
+# Webcam devices
+#
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+# CONFIG_USB_M5602 is not set
+# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_GL860 is not set
+# CONFIG_USB_GSPCA_BENQ is not set
+# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_CPIA1 is not set
+# CONFIG_USB_GSPCA_ETOMS is not set
+# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_JEILINJ is not set
+# CONFIG_USB_GSPCA_JL2005BCD is not set
+# CONFIG_USB_GSPCA_KINECT is not set
+# CONFIG_USB_GSPCA_KONICA is not set
+# CONFIG_USB_GSPCA_MARS is not set
+# CONFIG_USB_GSPCA_MR97310A is not set
+# CONFIG_USB_GSPCA_NW80X is not set
+# CONFIG_USB_GSPCA_OV519 is not set
+# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_OV534_9 is not set
+# CONFIG_USB_GSPCA_PAC207 is not set
+# CONFIG_USB_GSPCA_PAC7302 is not set
+# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SE401 is not set
+# CONFIG_USB_GSPCA_SN9C2028 is not set
+# CONFIG_USB_GSPCA_SN9C20X is not set
+# CONFIG_USB_GSPCA_SONIXB is not set
+# CONFIG_USB_GSPCA_SONIXJ is not set
+# CONFIG_USB_GSPCA_SPCA500 is not set
+# CONFIG_USB_GSPCA_SPCA501 is not set
+# CONFIG_USB_GSPCA_SPCA505 is not set
+# CONFIG_USB_GSPCA_SPCA506 is not set
+# CONFIG_USB_GSPCA_SPCA508 is not set
+# CONFIG_USB_GSPCA_SPCA561 is not set
+# CONFIG_USB_GSPCA_SPCA1528 is not set
+# CONFIG_USB_GSPCA_SQ905 is not set
+# CONFIG_USB_GSPCA_SQ905C is not set
+# CONFIG_USB_GSPCA_SQ930X is not set
+# CONFIG_USB_GSPCA_STK014 is not set
+# CONFIG_USB_GSPCA_STV0680 is not set
+# CONFIG_USB_GSPCA_SUNPLUS is not set
+# CONFIG_USB_GSPCA_T613 is not set
+# CONFIG_USB_GSPCA_TOPRO is not set
+# CONFIG_USB_GSPCA_TV8532 is not set
+# CONFIG_USB_GSPCA_VC032X is not set
+# CONFIG_USB_GSPCA_VICAM is not set
+# CONFIG_USB_GSPCA_XIRLINK_CIT is not set
+# CONFIG_USB_GSPCA_ZC3XX is not set
+# CONFIG_USB_PWC is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_USB_SN9C102 is not set
+
+#
+# Webcam, TV (analog/digital) USB devices
+#
+# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_MEDIA_PCI_SUPPORT is not set
+# CONFIG_V4L_PLATFORM_DRIVERS is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_V4L_TEST_DRIVERS is not set
+
+#
+# Supported MMC/SDIO adapters
+#
+# CONFIG_CYPRESS_FIRMWARE is not set
+
+#
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
+#
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+
+#
+# Encoders, decoders, sensors and other helper chips
+#
+
+#
+# Audio decoders, processors and mixers
+#
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_UDA1342 is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+# CONFIG_VIDEO_SONY_BTF_MPX is not set
+
+#
+# RDS decoders
+#
+# CONFIG_VIDEO_SAA6588 is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_ADV7183 is not set
+# CONFIG_VIDEO_ADV7604 is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_SAA7110 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA7191 is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_TW2804 is not set
+# CONFIG_VIDEO_TW9903 is not set
+# CONFIG_VIDEO_TW9906 is not set
+# CONFIG_VIDEO_VPX3220 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_CX25840 is not set
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_ADV7393 is not set
+# CONFIG_VIDEO_AD9389B is not set
+# CONFIG_VIDEO_AK881X is not set
+
+#
+# Camera sensor devices
+#
+# CONFIG_VIDEO_OV7640 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_OV9650 is not set
+# CONFIG_VIDEO_VS6624 is not set
+# CONFIG_VIDEO_MT9M032 is not set
+# CONFIG_VIDEO_MT9P031 is not set
+# CONFIG_VIDEO_MT9T001 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_MT9V032 is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+# CONFIG_VIDEO_NOON010PC30 is not set
+# CONFIG_VIDEO_M5MOLS is not set
+# CONFIG_VIDEO_S5K6AA is not set
+# CONFIG_VIDEO_S5K4ECGX is not set
+# CONFIG_VIDEO_S5C73M3 is not set
+
+#
+# Flash devices
+#
+# CONFIG_VIDEO_ADP1653 is not set
+# CONFIG_VIDEO_AS3645A is not set
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+
+#
+# Miscelaneous helper chips
+#
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_M52790 is not set
+
+#
+# Sensors used on soc_camera driver
+#
+
+#
+# Customise DVB Frontends
+#
+# CONFIG_DVB_AU8522_V4L is not set
+CONFIG_DVB_TUNER_DIB0070=m
+CONFIG_DVB_TUNER_DIB0090=m
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_SWORKS is not set
+# CONFIG_AGP_VIA is not set
+# CONFIG_AGP_EFFICEON is not set
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
+CONFIG_DRM=y
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_R128 is not set
+# CONFIG_DRM_RADEON is not set
+# CONFIG_DRM_NOUVEAU is not set
+# CONFIG_DRM_I915 is not set
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_VMWGFX is not set
+# CONFIG_DRM_GMA500 is not set
+# CONFIG_DRM_UDL is not set
+# CONFIG_DRM_AST is not set
+# CONFIG_DRM_MGAG200 is not set
+# CONFIG_DRM_CIRRUS_QEMU is not set
+# CONFIG_DRM_QXL is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_HDMI=y
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_VESA is not set
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_GOLDFISH is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_EXYNOS_VIDEO is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_PWM is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3630 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_LP855X is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+# CONFIG_SND_COMPRESS_OFFLOAD is not set
+# CONFIG_SND_EFFECTS_OFFLOAD is not set
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=y
+# CONFIG_SND_SEQ_DUMMY is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_SEQUENCER_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DMA_SGBUF=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_PCSP is not set
+# CONFIG_SND_DUMMY is not set
+CONFIG_SND_ALOOP=y
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_PCI is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_USX2Y is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_US122L is not set
+# CONFIG_SND_USB_6FIRE is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_ATMEL_SOC is not set
+# CONFIG_SND_MFLD_MACHINE is not set
+CONFIG_SND_INTEL_SST=y
+CONFIG_SND_MRFLD_MACHINE=y
+CONFIG_SND_SST_PLATFORM=y
+CONFIG_SST_MRFLD_DPCM=y
+CONFIG_SND_SST_MACHINE=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_WM_HUBS=y
+CONFIG_SND_SOC_WM8994=y
+# CONFIG_SND_SIMPLE_CARD is not set
+# CONFIG_SOUND_PRIME is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_APPLEIR is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LENOVO_TPKBD is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PS3REMOTE is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SAMSUNG is not set
+CONFIG_HID_SONY=y
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_XHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_XHCI_HCD_DEBUGGING is not set
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_PCI=y
+# CONFIG_USB_EHCI_HCD_PLATFORM is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=y
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+CONFIG_USB_DWC3=y
+# CONFIG_USB_DWC3_HOST is not set
+CONFIG_USB_DWC3_GADGET=y
+# CONFIG_USB_DWC3_DUAL_ROLE is not set
+
+#
+# Platform Glue Driver Support
+#
+# CONFIG_USB_DWC3_PCI is not set
+CONFIG_USB_DWC3_OTG=y
+CONFIG_USB_DWC3_INTEL_MRFL=y
+# CONFIG_USB_DWC3_INTEL_BYT is not set
+CONFIG_USB_DWC3_DEVICE_INTEL=y
+CONFIG_USB_DWC3_HOST_INTEL=y
+
+#
+# Debugging features
+#
+# CONFIG_USB_DWC3_DEBUG is not set
+# CONFIG_USB_CHIPIDEA is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=y
+# CONFIG_USB_SERIAL_CONSOLE is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+CONFIG_USB_SERIAL_CP210X=m
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_F81232 is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_METRO is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+CONFIG_USB_SERIAL_PL2303=y
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_XSENS_MT is not set
+# CONFIG_USB_SERIAL_ZIO is not set
+# CONFIG_USB_SERIAL_WISHBONE is not set
+# CONFIG_USB_SERIAL_ZTE is not set
+# CONFIG_USB_SERIAL_SSU100 is not set
+# CONFIG_USB_SERIAL_QT2 is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+CONFIG_USB_PHY=y
+CONFIG_NOP_USB_XCEIV=y
+# CONFIG_OMAP_CONTROL_USB is not set
+# CONFIG_OMAP_USB3 is not set
+# CONFIG_SAMSUNG_USBPHY is not set
+# CONFIG_SAMSUNG_USB2PHY is not set
+# CONFIG_SAMSUNG_USB3PHY is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_RCAR_PHY is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+
+#
+# USB Peripheral Controller
+#
+# CONFIG_USB_R8A66597 is not set
+# CONFIG_USB_PXA27X is not set
+# CONFIG_USB_MV_UDC is not set
+# CONFIG_USB_MV_U3D is not set
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_AMD5536UDC is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_NET2280 is not set
+# CONFIG_USB_GOKU is not set
+# CONFIG_USB_EG20T is not set
+# CONFIG_USB_DUMMY_HCD is not set
+CONFIG_USB_LIBCOMPOSITE=m
+CONFIG_USB_F_ACM=m
+CONFIG_USB_U_SERIAL=m
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_ACM_MS is not set
+CONFIG_USB_G_MULTI=m
+CONFIG_USB_G_MULTI_RNDIS=y
+CONFIG_USB_G_MULTI_CDC=y
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_WEBCAM is not set
+# CONFIG_UWB is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+# CONFIG_MMC_RICOH_MMC is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
+# CONFIG_MMC_WBSD is not set
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_LP5562 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA9633 is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_INTEL_SS4200 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_LM355x is not set
+# CONFIG_LEDS_OT200 is not set
+# CONFIG_LEDS_BLINKM is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC=y
+CONFIG_EDAC_LEGACY_SYSFS=y
+# CONFIG_EDAC_DEBUG is not set
+# CONFIG_EDAC_MM_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8523 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+# CONFIG_RTC_DRV_RX4581 is not set
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_VRTC is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_DS2404 is not set
+
+#
+# on-CPU RTC drivers
+#
+
+#
+# HID Sensor RTC drivers
+#
+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_INTEL_MID_DMAC=y
+# CONFIG_INTEL_IOATDMA is not set
+# CONFIG_DW_DMAC is not set
+# CONFIG_TIMB_DMA is not set
+# CONFIG_PCH_DMA is not set
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+CONFIG_VIRTIO=y
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+CONFIG_STAGING=y
+# CONFIG_ET131X is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_R8187SE is not set
+# CONFIG_RTL8192U is not set
+# CONFIG_RTLLIB is not set
+# CONFIG_R8712U is not set
+# CONFIG_RTS5139 is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_VT6655 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_DX_SEP is not set
+
+#
+# IIO staging drivers
+#
+
+#
+# Accelerometers
+#
+# CONFIG_ADIS16201 is not set
+# CONFIG_ADIS16203 is not set
+# CONFIG_ADIS16204 is not set
+# CONFIG_ADIS16209 is not set
+# CONFIG_ADIS16220 is not set
+# CONFIG_ADIS16240 is not set
+# CONFIG_LIS3L02DQ is not set
+# CONFIG_SCA3000 is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7291 is not set
+# CONFIG_AD7606 is not set
+# CONFIG_AD799X is not set
+# CONFIG_AD7780 is not set
+# CONFIG_AD7816 is not set
+# CONFIG_AD7192 is not set
+# CONFIG_AD7280 is not set
+
+#
+# Analog digital bi-direction converters
+#
+# CONFIG_ADT7316 is not set
+
+#
+# Capacitance to digital converters
+#
+# CONFIG_AD7150 is not set
+# CONFIG_AD7152 is not set
+# CONFIG_AD7746 is not set
+
+#
+# Direct Digital Synthesis
+#
+# CONFIG_AD5930 is not set
+# CONFIG_AD9832 is not set
+# CONFIG_AD9834 is not set
+# CONFIG_AD9850 is not set
+# CONFIG_AD9852 is not set
+# CONFIG_AD9910 is not set
+# CONFIG_AD9951 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16060 is not set
+# CONFIG_ADIS16130 is not set
+# CONFIG_ADIS16260 is not set
+
+#
+# Network Analyzer, Impedance Converters
+#
+# CONFIG_AD5933 is not set
+
+#
+# Light sensors
+#
+# CONFIG_SENSORS_ISL29018 is not set
+# CONFIG_SENSORS_ISL29028 is not set
+# CONFIG_TSL2583 is not set
+# CONFIG_TSL2x7x is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_SENSORS_HMC5843 is not set
+
+#
+# Active energy metering IC
+#
+# CONFIG_ADE7753 is not set
+# CONFIG_ADE7754 is not set
+# CONFIG_ADE7758 is not set
+# CONFIG_ADE7759 is not set
+# CONFIG_ADE7854 is not set
+
+#
+# Resolver to digital converters
+#
+# CONFIG_AD2S90 is not set
+# CONFIG_AD2S1200 is not set
+# CONFIG_AD2S1210 is not set
+
+#
+# Triggers - standalone
+#
+# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
+# CONFIG_IIO_GPIO_TRIGGER is not set
+CONFIG_IIO_SYSFS_TRIGGER=m
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_ZSMALLOC is not set
+# CONFIG_FB_SM7XX is not set
+# CONFIG_CRYSTALHD is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_USB_ENESTORAGE is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_FT1000 is not set
+
+#
+# Speakup console speech
+#
+# CONFIG_SPEAKUP is not set
+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+# CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+# CONFIG_ANDROID is not set
+# CONFIG_USB_WPAN_HCD is not set
+# CONFIG_WIMAX_GDM72XX is not set
+# CONFIG_CSR_WIFI is not set
+# CONFIG_NET_VENDOR_SILICOM is not set
+# CONFIG_CED1401 is not set
+# CONFIG_DGRP is not set
+# CONFIG_USB_DWC2 is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_CHROMEOS_LAPTOP is not set
+# CONFIG_AMILO_RFKILL is not set
+# CONFIG_SENSORS_HDAPS is not set
+CONFIG_INTEL_SCU_IPC=y
+CONFIG_INTEL_SCU_IPC_INTR_MODE=y
+# CONFIG_INTEL_SCU_IPC_POLL_MODE is not set
+CONFIG_INTEL_SCU_IPC_UTIL=y
+CONFIG_GPIO_INTEL_PMIC=y
+CONFIG_INTEL_MID_POWER_BUTTON=y
+# CONFIG_INTEL_MFLD_THERMAL is not set
+# CONFIG_IBM_RTL is not set
+# CONFIG_SAMSUNG_LAPTOP is not set
+CONFIG_INTEL_SCU_FLIS=y
+CONFIG_INTEL_PSH_IPC=y
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_I8253=y
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_MAILBOX is not set
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Remoteproc drivers
+#
+CONFIG_REMOTEPROC=y
+# CONFIG_STE_MODEM_RPROC is not set
+CONFIG_INTEL_MID_REMOTEPROC=y
+
+#
+# Rpmsg drivers
+#
+CONFIG_RPMSG=y
+CONFIG_RPMSG_IPC=y
+CONFIG_PM_DEVFREQ=y
+
+#
+# DEVFREQ Governors
+#
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+
+#
+# DEVFREQ Drivers
+#
+CONFIG_EXTCON=y
+
+#
+# Extcon Device Drivers
+#
+# CONFIG_EXTCON_GPIO is not set
+# CONFIG_EXTCON_ADC_JACK is not set
+# CONFIG_MEMORY is not set
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+# CONFIG_IIO_BUFFER_CB is not set
+CONFIG_IIO_KFIFO_BUF=y
+CONFIG_IIO_TRIGGERED_BUFFER=y
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
+
+#
+# Accelerometers
+#
+# CONFIG_KXSD9 is not set
+# CONFIG_IIO_ST_ACCEL_3AXIS is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7266 is not set
+# CONFIG_AD7298 is not set
+# CONFIG_AD7923 is not set
+# CONFIG_AD7791 is not set
+# CONFIG_AD7793 is not set
+# CONFIG_AD7476 is not set
+# CONFIG_AD7887 is not set
+# CONFIG_MAX1363 is not set
+# CONFIG_TI_ADC081C is not set
+CONFIG_TI_ADS7955_ADC=y
+CONFIG_IIO_BASINCOVE_GPADC=y
+
+#
+# Amplifiers
+#
+# CONFIG_AD8366 is not set
+
+#
+# Hid Sensor IIO Common
+#
+
+#
+# Digital to analog converters
+#
+# CONFIG_AD5064 is not set
+# CONFIG_AD5360 is not set
+# CONFIG_AD5380 is not set
+# CONFIG_AD5421 is not set
+# CONFIG_AD5624R_SPI is not set
+# CONFIG_AD5446 is not set
+# CONFIG_AD5449 is not set
+# CONFIG_AD5504 is not set
+# CONFIG_AD5755 is not set
+# CONFIG_AD5764 is not set
+# CONFIG_AD5791 is not set
+# CONFIG_AD5686 is not set
+# CONFIG_MAX517 is not set
+# CONFIG_MCP4725 is not set
+
+#
+# Frequency Synthesizers DDS/PLL
+#
+
+#
+# Clock Generator/Distribution
+#
+# CONFIG_AD9523 is not set
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+# CONFIG_ADF4350 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16080 is not set
+# CONFIG_ADIS16136 is not set
+# CONFIG_ADXRS450 is not set
+# CONFIG_IIO_ST_GYRO_3AXIS is not set
+# CONFIG_ITG3200 is not set
+
+#
+# Inertial measurement units
+#
+# CONFIG_ADIS16400 is not set
+# CONFIG_ADIS16480 is not set
+# CONFIG_INV_MPU6050_IIO is not set
+
+#
+# Light sensors
+#
+# CONFIG_ADJD_S311 is not set
+# CONFIG_SENSORS_TSL2563 is not set
+# CONFIG_VCNL4000 is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_AK8975 is not set
+# CONFIG_IIO_ST_MAGN_3AXIS is not set
+# CONFIG_VME_BUS is not set
+CONFIG_PWM=y
+CONFIG_PWM_SYSFS=y
+CONFIG_PWM_INTEL_MID=y
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+CONFIG_DMIID=y
+# CONFIG_DMI_SYSFS is not set
+# CONFIG_ISCSI_IBFT_FIND is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+# CONFIG_VFAT_FS_NO_DUALNAMES is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_FTRACE=y
+CONFIG_PSTORE_RAM=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_F2FS_FS is not set
+CONFIG_AUFS_FS=m
+CONFIG_AUFS_BRANCH_MAX_127=y
+# CONFIG_AUFS_BRANCH_MAX_511 is not set
+# CONFIG_AUFS_BRANCH_MAX_1023 is not set
+# CONFIG_AUFS_BRANCH_MAX_32767 is not set
+CONFIG_AUFS_SBILIST=y
+# CONFIG_AUFS_HNOTIFY is not set
+# CONFIG_AUFS_EXPORT is not set
+# CONFIG_AUFS_RDU is not set
+# CONFIG_AUFS_PROC_MAP is not set
+# CONFIG_AUFS_SP_IATTR is not set
+# CONFIG_AUFS_SHWH is not set
+# CONFIG_AUFS_BR_RAMFS is not set
+# CONFIG_AUFS_BR_FUSE is not set
+CONFIG_AUFS_BDEV_LOOP=y
+# CONFIG_AUFS_DEBUG is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_DEF_FILE_IO_SIZE=4096
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_SWAP is not set
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=400
+# CONFIG_DEBUG_KMEMLEAK_TEST is not set
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_TRACE_IRQFLAGS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_TEST_LIST_SORT=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+CONFIG_BOOT_PRINTK_DELAY=y
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU_DELAY is not set
+CONFIG_SPARSE_RCU_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RCU_CPU_STALL_VERBOSE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+# CONFIG_RCU_TRACE is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+# CONFIG_UPROBE_EVENT is not set
+# CONFIG_PROBE_EVENTS is not set
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_INTEL_MID=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_X86_PTDUMP is not set
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_NX_TEST=m
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+# CONFIG_X86_DECODER_SELFTEST is not set
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_ENCRYPTED_KEYS is not set
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
+CONFIG_LSM_MMAP_MIN_ADDR=65536
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+CONFIG_SECURITY_SELINUX_AVC_STATS=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_SECURITY_YAMA is not set
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
+CONFIG_DEFAULT_SECURITY_SELINUX=y
+# CONFIG_DEFAULT_SECURITY_DAC is not set
+CONFIG_DEFAULT_SECURITY="selinux"
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTODEV is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_ABLK_HELPER_X86=y
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=y
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
+# CONFIG_CRYPTO_PCBC is not set
+CONFIG_CRYPTO_XTS=y
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_CMAC is not set
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRC32_PCLMUL is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_SERPENT_SSE2_586 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+CONFIG_CRYPTO_TWOFISH_586=y
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_PUBLIC_KEY_ALGO_RSA=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_HAVE_KVM=y
+CONFIG_VIRTUALIZATION=y
+# CONFIG_KVM is not set
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+# CONFIG_CRC32_SLICEBY8 is not set
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+CONFIG_CRC32_BIT=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_AVERAGE=y
+CONFIG_CLZ_TAB=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
+CONFIG_MPILIB=y
+CONFIG_OID_REGISTRY=y
diff --git a/arch/x86/configs/i386_iot_edison_defconfig b/arch/x86/configs/i386_iot_edison_defconfig
new file mode 100644
index 0000000..56a51be
--- /dev/null
+++ b/arch/x86/configs/i386_iot_edison_defconfig
@@ -0,0 +1,3899 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/i386 3.10.89 Kernel Configuration
+#
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf32-i386"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CPU_AUTOPROBE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_32_LAZY_GS=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
+CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION="-brillo-edison"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+# CONFIG_TICK_CPU_ACCOUNTING is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_PREEMPT_RCU=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_BOOST is not set
+# CONFIG_RCU_NOCB_CPU is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_MEMCG is not set
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+# CONFIG_USER_NS is not set
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+CONFIG_UIDGID_CONVERTED=y
+# CONFIG_UIDGID_STRICT_TYPE_CHECKS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_EXPERT=y
+# CONFIG_UPTIME_LIMITED_KERNEL is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
+# CONFIG_JUMP_LABEL is not set
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP_FILTER=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+# CONFIG_MODULES is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_DEV_THROTTLING=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CFQ_GROUP_IOSCHED is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_ASN1=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
+CONFIG_SMP=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_X86_BIGSMP is not set
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
+CONFIG_X86_WANT_INTEL_MID=y
+CONFIG_X86_INTEL_MID=y
+# CONFIG_X86_MDFLD is not set
+CONFIG_ATOM_SOC_POWER=y
+# CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER is not set
+# CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER is not set
+CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER=y
+CONFIG_INTEL_DEBUG_FEATURE=y
+# CONFIG_X86_RDC321X is not set
+# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_X86_32_IRIS is not set
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+# CONFIG_HYPERVISOR_GUEST is not set
+CONFIG_NO_BOOTMEM=y
+# CONFIG_MEMTEST is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MELAN is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_MATOM is not set
+CONFIG_MSLM=y
+CONFIG_X86_GENERIC=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_DEBUGCTLMSR=y
+# CONFIG_PROCESSOR_SELECT is not set
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_APB_TIMER is not set
+CONFIG_ARCH_NR_GPIO=512
+CONFIG_DMI=y
+CONFIG_NR_CPUS=2
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_COUNT=y
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+# CONFIG_X86_MCE_AMD is not set
+# CONFIG_X86_ANCIENT_MCE is not set
+CONFIG_X86_MCE_THRESHOLD=y
+# CONFIG_X86_MCE_INJECT is not set
+CONFIG_X86_THERMAL_VECTOR=y
+CONFIG_VM86=y
+CONFIG_X86_16BIT=y
+CONFIG_X86_ESPFIX32=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+CONFIG_X86_REBOOTFIXUPS=y
+# CONFIG_MICROCODE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_NOHIGHMEM is not set
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HIGHMEM64G=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_HIGHMEM=y
+CONFIG_X86_PAE=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_COMPACTION is not set
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_MEMORY_FAILURE is not set
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+CONFIG_CROSS_MEMORY_ATTACH=y
+# CONFIG_CLEANCACHE is not set
+# CONFIG_HIGHPTE is not set
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set
+CONFIG_X86_RESERVE_LOW=64
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+# CONFIG_ARCH_RANDOM is not set
+CONFIG_X86_SMAP=y
+CONFIG_SECCOMP=y
+# CONFIG_CC_STACKPROTECTOR is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PHYSICAL_START=0x1200000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management and ACPI options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_HAS_WAKELOCK=y
+CONFIG_WAKELOCK=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=100
+CONFIG_PM_WAKELOCKS_GC=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_PM_SLEEP_DEBUG=y
+# CONFIG_PM_TRACE_RTC is not set
+# CONFIG_SUSPEND_TIME is not set
+# CONFIG_ACPI is not set
+CONFIG_SFI=y
+# CONFIG_APM is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+
+#
+# x86 CPU frequency scaling drivers
+#
+# CONFIG_X86_INTEL_PSTATE is not set
+CONFIG_X86_SFI_CPUFREQ=y
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+# CONFIG_X86_SPEEDSTEP_ICH is not set
+# CONFIG_X86_SPEEDSTEP_SMI is not set
+# CONFIG_X86_P4_CLOCKMOD is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# CONFIG_X86_LONGRUN is not set
+
+#
+# shared options
+#
+# CONFIG_X86_SPEEDSTEP_LIB is not set
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+CONFIG_INTEL_IDLE=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_CNB20LE_QUIRK is not set
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+# CONFIG_PCIEASPM_DEFAULT is not set
+# CONFIG_PCIEASPM_POWERSAVE is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_PCIE_PME=y
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_LABEL=y
+CONFIG_ISA_DMA_API=y
+# CONFIG_ISA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
+CONFIG_AMD_NB=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_COREDUMP=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_HAVE_TEXT_POKE_SMP=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+CONFIG_NET_IP_TUNNEL=y
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_NET_IPVTI is not set
+# CONFIG_INET_AH is not set
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_GRE is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_IPV6_SUBTREES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETLABEL is not set
+CONFIG_ANDROID_PARANOID_NETWORK=y
+CONFIG_NET_ACTIVITY_STATS=y
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=y
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+# CONFIG_NF_CONNTRACK_SECMARK is not set
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+# CONFIG_NF_CONNTRACK_FTP is not set
+# CONFIG_NF_CONNTRACK_H323 is not set
+# CONFIG_NF_CONNTRACK_IRC is not set
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_NF_CONNTRACK_TFTP is not set
+# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_NEEDED=y
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_FTP is not set
+# CONFIG_NF_NAT_IRC is not set
+# CONFIG_NF_NAT_SIP is not set
+# CONFIG_NF_NAT_TFTP is not set
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_HL=y
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+CONFIG_NETFILTER_XT_TARGET_NETMAP=y
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set
+# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
+# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_HL=y
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QTAGUID is not set
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+# CONFIG_NETFILTER_XT_MATCH_QUOTA2 is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_SET is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_REJECT_SKERR is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+CONFIG_IP_NF_RAW=y
+# CONFIG_IP_NF_SECURITY is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+# CONFIG_IP6_NF_TARGET_REJECT_SKERR is not set
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+# CONFIG_IP6_NF_SECURITY is not set
+# CONFIG_NF_NAT_IPV6 is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_L2TP=y
+# CONFIG_L2TP_DEBUGFS is not set
+# CONFIG_L2TP_V3 is not set
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_HAVE_NET_DSA=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_MMAP is not set
+# CONFIG_NETLINK_DIAG is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+# CONFIG_NETPRIO_CGROUP is not set
+CONFIG_BQL=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+# CONFIG_BT_HCIUART_BCSP is not set
+# CONFIG_BT_HCIUART_ATH3K is not set
+# CONFIG_BT_HCIUART_LL is not set
+# CONFIG_BT_HCIUART_3WIRE is not set
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+CONFIG_CFG80211_DEVELOPER_WARNINGS=y
+# CONFIG_CFG80211_REG_DEBUG is not set
+# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+# CONFIG_LIB80211 is not set
+# CONFIG_CFG80211_ALLOW_RECONNECT is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+CONFIG_RFKILL_PM=y
+CONFIG_RFKILL_LEDS=y
+CONFIG_RFKILL_INPUT=y
+# CONFIG_RFKILL_REGULATOR is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH=""
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_DEBUG_DRIVER is not set
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_HAVE_CPU_AUTOPROBE=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_REGMAP_IRQ=y
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_CMA is not set
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_VIRTIO_BLK is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+CONFIG_INTEL_MID_PTI=y
+CONFIG_INTEL_PTI_STM=y
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_UID_STAT is not set
+# CONFIG_BMP085_I2C is not set
+# CONFIG_BMP085_SPI is not set
+# CONFIG_PCH_PHUB is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_LATTICE_ECP3_CONFIG is not set
+# CONFIG_SRAM is not set
+CONFIG_EMMC_IPANIC=y
+CONFIG_EMMC_IPANIC_PLABEL="panic"
+# CONFIG_UID_CPUTIME is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_INTEL_MEI is not set
+# CONFIG_INTEL_MEI_ME is not set
+# CONFIG_VMWARE_VMCI is not set
+CONFIG_BCM_BT_LPM=y
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+CONFIG_MII=y
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_VXLAN is not set
+CONFIG_NETCONSOLE=y
+# CONFIG_NETCONSOLE_DYNAMIC is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_TUN=y
+# CONFIG_VETH is not set
+# CONFIG_VIRTIO_NET is not set
+# CONFIG_ARCNET is not set
+
+#
+# CAIF transport drivers
+#
+# CONFIG_VHOST_NET is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+# CONFIG_ETHERNET is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MICREL_KS8995MA is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_RTL8152 is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_EEM is not set
+CONFIG_USB_NET_CDC_NCM=y
+# CONFIG_USB_NET_CDC_MBIM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+CONFIG_USB_NET_CDC_SUBSET=y
+# CONFIG_USB_ALI_M5632 is not set
+# CONFIG_USB_AN2720 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_EPSON2888 is not set
+# CONFIG_USB_KC2190 is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_QMI_WWAN is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8180 is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_ADM8211 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+# CONFIG_WIFI_CONTROL_FUNC is not set
+CONFIG_WIFI_PLATFORM_DATA=y
+# CONFIG_ATH_CARDS is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWL4965 is not set
+# CONFIG_IWL3945 is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_HERMES is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTLWIFI is not set
+# CONFIG_WL_TI is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+# CONFIG_INPUT_KEYRESET is not set
+# CONFIG_INPUT_KEYCOMBO is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_CYAPA is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_MOUSE_SYNAPTICS_USB is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
+CONFIG_INPUT_TABLET=y
+# CONFIG_TABLET_USB_ACECAD is not set
+# CONFIG_TABLET_USB_AIPTEK is not set
+# CONFIG_TABLET_USB_GTCO is not set
+# CONFIG_TABLET_USB_HANWANG is not set
+# CONFIG_TABLET_USB_KBTAB is not set
+# CONFIG_TABLET_USB_WACOM is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_ILI210X is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_WACOM_I2C is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MMS114 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_INTEL_MID is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_APANEL is not set
+# CONFIG_INPUT_GP2A is not set
+# CONFIG_INPUT_GPIO_TILT_POLLED is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYCHORD is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_PWM_BEEPER is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_CMA3000 is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_ARC_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_ISI is not set
+# CONFIG_N_HDLC is not set
+CONFIG_N_GSM=y
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVMEM=y
+# CONFIG_DEVKMEM is not set
+# CONFIG_STALDRV is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+CONFIG_FIX_EARLYCON_MEM=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX310X is not set
+CONFIG_SERIAL_MRST_MAX3110=y
+CONFIG_SERIAL_MFD_HSU=y
+CONFIG_SERIAL_MFD_HSU_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_VIRTIO_CONSOLE is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+CONFIG_HW_RANDOM_INTEL=y
+CONFIG_HW_RANDOM_AMD=y
+CONFIG_HW_RANDOM_GEODE=y
+CONFIG_HW_RANDOM_VIA=y
+# CONFIG_HW_RANDOM_VIRTIO is not set
+CONFIG_NVRAM=y
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_ISMT is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_CBUS_GPIO is not set
+CONFIG_I2C_DESIGNWARE_CORE_FORK=y
+CONFIG_I2C_DESIGNWARE_PCI_FORK=y
+# CONFIG_I2C_DESIGNWARE_PLATFORM_FORK is not set
+CONFIG_I2C_DW_SPEED_MODE_DEBUG=y
+# CONFIG_I2C_PMIC is not set
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI_INTEL_MID_SSP=y
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PXA2XX is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_SC18IS602 is not set
+# CONFIG_SPI_TOPCLIFF_PCH is not set
+# CONFIG_SPI_XCOMM is not set
+# CONFIG_SPI_XILINX is not set
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_PCI=y
+CONFIG_SPI_DW_MID_DMA=y
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=y
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+# CONFIG_PPS_CLIENT_KTIMER is not set
+# CONFIG_PPS_CLIENT_LDISC is not set
+# CONFIG_PPS_CLIENT_GPIO is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# CONFIG_PTP_1588_CLOCK_PCH is not set
+CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIODEBUG=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_TS5500 is not set
+# CONFIG_GPIO_SCH is not set
+# CONFIG_GPIO_ICH is not set
+# CONFIG_GPIO_VX855 is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_WM8994 is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_AMD8111 is not set
+CONFIG_GPIO_LANGWELL=y
+# CONFIG_GPIO_PCH is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_RDC321X is not set
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_GPIO_MSIC is not set
+
+#
+# USB GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+CONFIG_PMIC_CCSM=y
+CONFIG_BQ24261_CHARGER=y
+# CONFIG_PDA_POWER is not set
+# CONFIG_GENERIC_ADC_BATTERY is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+CONFIG_BATTERY_MAX17042=y
+# CONFIG_BATTERY_INTEL_MID is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_MANAGER is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+# CONFIG_POWER_RESET is not set
+# CONFIG_POWER_AVS is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7310 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_HIH6130 is not set
+CONFIG_SENSORS_CORETEMP=y
+CONFIG_SENSORS_CORETEMP_INTERRUPT=y
+# CONFIG_SENSORS_IIO_HWMON is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+CONFIG_MSIC_GPADC=y
+CONFIG_INTEL_MCU=y
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_NTC_THERMISTOR is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_APPLESMC is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
+CONFIG_THERMAL_GOV_STEP_WISE=y
+# CONFIG_THERMAL_GOV_USER_SPACE is not set
+# CONFIG_CPU_THERMAL is not set
+# CONFIG_THERMAL_EMULATION is not set
+# CONFIG_INTEL_POWERCLAMP is not set
+CONFIG_SENSORS_THERMAL_MRFLD=y
+CONFIG_SOC_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_F71808E_WDT is not set
+# CONFIG_SP5100_TCO is not set
+# CONFIG_SC520_WDT is not set
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_IBMASR is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I6300ESB_WDT is not set
+# CONFIG_IE6XX_WDT is not set
+# CONFIG_INTEL_SCU_WATCHDOG is not set
+# CONFIG_INTEL_SCU_WATCHDOG_EVO is not set
+CONFIG_INTEL_MID_WATCHDOG=y
+# CONFIG_ITCO_WDT is not set
+# CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
+# CONFIG_HP_WATCHDOG is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
+# CONFIG_NV_TCO is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
+# CONFIG_SMSC37B787_WDT is not set
+# CONFIG_VIA_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83697HF_WDT is not set
+# CONFIG_W83697UG_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+CONFIG_MFD_INTEL_MSIC=y
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+CONFIG_MFD_WM8994=y
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_AD5398 is not set
+# CONFIG_REGULATOR_FAN53555 is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+# CONFIG_REGULATOR_MAX8973 is not set
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_LP872X is not set
+# CONFIG_REGULATOR_LP8755 is not set
+# CONFIG_REGULATOR_TPS51632 is not set
+# CONFIG_REGULATOR_TPS62360 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_TPS6524X is not set
+CONFIG_REGULATOR_WM8994=y
+CONFIG_REGULATOR_PMIC_BASIN_COVE=y
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_RC_SUPPORT is not set
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_V4L2=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEOBUF2_CORE=y
+CONFIG_VIDEOBUF2_MEMOPS=y
+CONFIG_VIDEOBUF2_VMALLOC=y
+# CONFIG_VIDEO_V4L2_INT_DEVICE is not set
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Media drivers
+#
+CONFIG_MEDIA_USB_SUPPORT=y
+
+#
+# Webcam devices
+#
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=y
+# CONFIG_USB_M5602 is not set
+# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_GL860 is not set
+# CONFIG_USB_GSPCA_BENQ is not set
+# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_CPIA1 is not set
+# CONFIG_USB_GSPCA_ETOMS is not set
+# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_JEILINJ is not set
+# CONFIG_USB_GSPCA_JL2005BCD is not set
+# CONFIG_USB_GSPCA_KINECT is not set
+# CONFIG_USB_GSPCA_KONICA is not set
+# CONFIG_USB_GSPCA_MARS is not set
+# CONFIG_USB_GSPCA_MR97310A is not set
+# CONFIG_USB_GSPCA_NW80X is not set
+# CONFIG_USB_GSPCA_OV519 is not set
+# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_OV534_9 is not set
+# CONFIG_USB_GSPCA_PAC207 is not set
+# CONFIG_USB_GSPCA_PAC7302 is not set
+# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SE401 is not set
+# CONFIG_USB_GSPCA_SN9C2028 is not set
+# CONFIG_USB_GSPCA_SN9C20X is not set
+# CONFIG_USB_GSPCA_SONIXB is not set
+# CONFIG_USB_GSPCA_SONIXJ is not set
+# CONFIG_USB_GSPCA_SPCA500 is not set
+# CONFIG_USB_GSPCA_SPCA501 is not set
+# CONFIG_USB_GSPCA_SPCA505 is not set
+# CONFIG_USB_GSPCA_SPCA506 is not set
+# CONFIG_USB_GSPCA_SPCA508 is not set
+# CONFIG_USB_GSPCA_SPCA561 is not set
+# CONFIG_USB_GSPCA_SPCA1528 is not set
+# CONFIG_USB_GSPCA_SQ905 is not set
+# CONFIG_USB_GSPCA_SQ905C is not set
+# CONFIG_USB_GSPCA_SQ930X is not set
+# CONFIG_USB_GSPCA_STK014 is not set
+# CONFIG_USB_GSPCA_STV0680 is not set
+# CONFIG_USB_GSPCA_SUNPLUS is not set
+# CONFIG_USB_GSPCA_T613 is not set
+# CONFIG_USB_GSPCA_TOPRO is not set
+# CONFIG_USB_GSPCA_TV8532 is not set
+# CONFIG_USB_GSPCA_VC032X is not set
+# CONFIG_USB_GSPCA_VICAM is not set
+# CONFIG_USB_GSPCA_XIRLINK_CIT is not set
+# CONFIG_USB_GSPCA_ZC3XX is not set
+# CONFIG_USB_PWC is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_USB_SN9C102 is not set
+
+#
+# Webcam, TV (analog/digital) USB devices
+#
+# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_MEDIA_PCI_SUPPORT is not set
+# CONFIG_V4L_PLATFORM_DRIVERS is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_V4L_TEST_DRIVERS is not set
+
+#
+# Supported MMC/SDIO adapters
+#
+# CONFIG_CYPRESS_FIRMWARE is not set
+
+#
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
+#
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+
+#
+# Encoders, decoders, sensors and other helper chips
+#
+
+#
+# Audio decoders, processors and mixers
+#
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_UDA1342 is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+# CONFIG_VIDEO_SONY_BTF_MPX is not set
+
+#
+# RDS decoders
+#
+# CONFIG_VIDEO_SAA6588 is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_ADV7183 is not set
+# CONFIG_VIDEO_ADV7604 is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_SAA7110 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA7191 is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_TW2804 is not set
+# CONFIG_VIDEO_TW9903 is not set
+# CONFIG_VIDEO_TW9906 is not set
+# CONFIG_VIDEO_VPX3220 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_CX25840 is not set
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_ADV7393 is not set
+# CONFIG_VIDEO_AD9389B is not set
+# CONFIG_VIDEO_AK881X is not set
+
+#
+# Camera sensor devices
+#
+# CONFIG_VIDEO_OV7640 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_OV9650 is not set
+# CONFIG_VIDEO_VS6624 is not set
+# CONFIG_VIDEO_MT9M032 is not set
+# CONFIG_VIDEO_MT9P031 is not set
+# CONFIG_VIDEO_MT9T001 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_MT9V032 is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+# CONFIG_VIDEO_NOON010PC30 is not set
+# CONFIG_VIDEO_M5MOLS is not set
+# CONFIG_VIDEO_S5K6AA is not set
+# CONFIG_VIDEO_S5K4ECGX is not set
+# CONFIG_VIDEO_S5C73M3 is not set
+
+#
+# Flash devices
+#
+# CONFIG_VIDEO_ADP1653 is not set
+# CONFIG_VIDEO_AS3645A is not set
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+
+#
+# Miscelaneous helper chips
+#
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_M52790 is not set
+
+#
+# Sensors used on soc_camera driver
+#
+
+#
+# Customise DVB Frontends
+#
+# CONFIG_DVB_AU8522_V4L is not set
+CONFIG_DVB_TUNER_DIB0070=y
+CONFIG_DVB_TUNER_DIB0090=y
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_SWORKS is not set
+# CONFIG_AGP_VIA is not set
+# CONFIG_AGP_EFFICEON is not set
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
+CONFIG_DRM=y
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_R128 is not set
+# CONFIG_DRM_RADEON is not set
+# CONFIG_DRM_NOUVEAU is not set
+# CONFIG_DRM_I915 is not set
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_VMWGFX is not set
+# CONFIG_DRM_GMA500 is not set
+# CONFIG_DRM_UDL is not set
+# CONFIG_DRM_AST is not set
+# CONFIG_DRM_MGAG200 is not set
+# CONFIG_DRM_CIRRUS_QEMU is not set
+# CONFIG_DRM_QXL is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_HDMI=y
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_VESA is not set
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_GOLDFISH is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_EXYNOS_VIDEO is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_PWM is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3630 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_LP855X is not set
+# CONFIG_ADF is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_COMPRESS_OFFLOAD=y
+CONFIG_SND_EFFECTS_OFFLOAD=y
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=y
+# CONFIG_SND_SEQ_DUMMY is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_SEQUENCER_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DMA_SGBUF=y
+CONFIG_SND_RAWMIDI_SEQ=y
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_PCSP is not set
+# CONFIG_SND_DUMMY is not set
+CONFIG_SND_ALOOP=y
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_PCI is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_USX2Y is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_US122L is not set
+# CONFIG_SND_USB_6FIRE is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_ATMEL_SOC is not set
+# CONFIG_SND_MFLD_MACHINE is not set
+CONFIG_SND_MRFLD_MACHINE=y
+CONFIG_SND_INTEL_SST=y
+CONFIG_SND_SST_PLATFORM=y
+CONFIG_SST_MRFLD_DPCM=y
+CONFIG_SND_SST_MACHINE=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_WM_HUBS=y
+CONFIG_SND_SOC_LM49453=y
+CONFIG_SND_SOC_WM8994=y
+# CONFIG_SND_SIMPLE_CARD is not set
+# CONFIG_SOUND_PRIME is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_APPLEIR is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LENOVO_TPKBD is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PS3REMOTE is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_XHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_XHCI_HCD_DEBUGGING is not set
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_PCI=y
+# CONFIG_USB_EHCI_HCD_PLATFORM is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=y
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+CONFIG_USB_DWC3=y
+# CONFIG_USB_DWC3_HOST is not set
+CONFIG_USB_DWC3_GADGET=y
+# CONFIG_USB_DWC3_DUAL_ROLE is not set
+
+#
+# Platform Glue Driver Support
+#
+# CONFIG_USB_DWC3_PCI is not set
+CONFIG_USB_DWC3_OTG=y
+CONFIG_USB_DWC3_INTEL_MRFL=y
+# CONFIG_USB_DWC3_INTEL_BYT is not set
+CONFIG_USB_DWC3_DEVICE_INTEL=y
+CONFIG_USB_DWC3_HOST_INTEL=y
+
+#
+# Debugging features
+#
+# CONFIG_USB_DWC3_DEBUG is not set
+# CONFIG_USB_CHIPIDEA is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=y
+# CONFIG_USB_SERIAL_CONSOLE is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+CONFIG_USB_SERIAL_CP210X=y
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_F81232 is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_METRO is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+CONFIG_USB_SERIAL_PL2303=y
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_XSENS_MT is not set
+# CONFIG_USB_SERIAL_ZIO is not set
+# CONFIG_USB_SERIAL_WISHBONE is not set
+# CONFIG_USB_SERIAL_ZTE is not set
+# CONFIG_USB_SERIAL_SSU100 is not set
+# CONFIG_USB_SERIAL_QT2 is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+CONFIG_USB_PHY=y
+# CONFIG_USB_OTG_WAKELOCK is not set
+CONFIG_NOP_USB_XCEIV=y
+# CONFIG_OMAP_CONTROL_USB is not set
+# CONFIG_OMAP_USB3 is not set
+# CONFIG_SAMSUNG_USBPHY is not set
+# CONFIG_SAMSUNG_USB2PHY is not set
+# CONFIG_SAMSUNG_USB3PHY is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_RCAR_PHY is not set
+# CONFIG_DUAL_ROLE_USB_INTF is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+
+#
+# USB Peripheral Controller
+#
+# CONFIG_USB_R8A66597 is not set
+# CONFIG_USB_PXA27X is not set
+# CONFIG_USB_MV_UDC is not set
+# CONFIG_USB_MV_U3D is not set
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_AMD5536UDC is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_NET2280 is not set
+# CONFIG_USB_GOKU is not set
+# CONFIG_USB_EG20T is not set
+# CONFIG_USB_DUMMY_HCD is not set
+CONFIG_USB_LIBCOMPOSITE=y
+CONFIG_USB_F_ACM=y
+CONFIG_USB_U_SERIAL=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_G_ANDROID=y
+# CONFIG_USB_ANDROID_RNDIS_DWORD_ALIGNED is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_ACM_MS is not set
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_WEBCAM is not set
+# CONFIG_UWB is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_CLKGATE is not set
+# CONFIG_MMC_EMBEDDED_SDIO is not set
+# CONFIG_MMC_PARANOID_SD_INIT is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_MMC_BLOCK_DEFERRED_RESUME is not set
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+# CONFIG_MMC_RICOH_MMC is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
+# CONFIG_MMC_WBSD is not set
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_LP5562 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA9633 is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_INTEL_SS4200 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_LM355x is not set
+# CONFIG_LEDS_OT200 is not set
+# CONFIG_LEDS_BLINKM is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+# CONFIG_SWITCH is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC=y
+CONFIG_EDAC_LEGACY_SYSFS=y
+# CONFIG_EDAC_DEBUG is not set
+# CONFIG_EDAC_MM_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8523 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+# CONFIG_RTC_DRV_RX4581 is not set
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES=y
+CONFIG_RTC_DRV_CMOS_DAYOFMONTH_ALARM=y
+# CONFIG_RTC_DRV_VRTC is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_DS2404 is not set
+
+#
+# on-CPU RTC drivers
+#
+
+#
+# HID Sensor RTC drivers
+#
+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_INTEL_MID_DMAC=y
+# CONFIG_INTEL_IOATDMA is not set
+# CONFIG_DW_DMAC is not set
+# CONFIG_TIMB_DMA is not set
+# CONFIG_PCH_DMA is not set
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+CONFIG_VIRTIO=y
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+CONFIG_STAGING=y
+# CONFIG_ET131X is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_R8712U is not set
+# CONFIG_RTS5139 is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_DX_SEP is not set
+CONFIG_DX_SEP54=y
+#
+# IIO staging drivers
+#
+
+#
+# Accelerometers
+#
+# CONFIG_ADIS16201 is not set
+# CONFIG_ADIS16203 is not set
+# CONFIG_ADIS16204 is not set
+# CONFIG_ADIS16209 is not set
+# CONFIG_ADIS16220 is not set
+# CONFIG_ADIS16240 is not set
+# CONFIG_LIS3L02DQ is not set
+# CONFIG_SCA3000 is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7291 is not set
+# CONFIG_AD7606 is not set
+# CONFIG_AD799X is not set
+# CONFIG_AD7780 is not set
+# CONFIG_AD7816 is not set
+# CONFIG_AD7192 is not set
+# CONFIG_AD7280 is not set
+
+#
+# Analog digital bi-direction converters
+#
+# CONFIG_ADT7316 is not set
+
+#
+# Capacitance to digital converters
+#
+# CONFIG_AD7150 is not set
+# CONFIG_AD7152 is not set
+# CONFIG_AD7746 is not set
+
+#
+# Direct Digital Synthesis
+#
+# CONFIG_AD5930 is not set
+# CONFIG_AD9832 is not set
+# CONFIG_AD9834 is not set
+# CONFIG_AD9850 is not set
+# CONFIG_AD9852 is not set
+# CONFIG_AD9910 is not set
+# CONFIG_AD9951 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16060 is not set
+# CONFIG_ADIS16130 is not set
+# CONFIG_ADIS16260 is not set
+
+#
+# Network Analyzer, Impedance Converters
+#
+# CONFIG_AD5933 is not set
+
+#
+# Light sensors
+#
+# CONFIG_SENSORS_ISL29018 is not set
+# CONFIG_SENSORS_ISL29028 is not set
+# CONFIG_TSL2583 is not set
+# CONFIG_TSL2x7x is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_SENSORS_HMC5843 is not set
+
+#
+# Active energy metering IC
+#
+# CONFIG_ADE7753 is not set
+# CONFIG_ADE7754 is not set
+# CONFIG_ADE7758 is not set
+# CONFIG_ADE7759 is not set
+# CONFIG_ADE7854 is not set
+
+#
+# Resolver to digital converters
+#
+# CONFIG_AD2S90 is not set
+# CONFIG_AD2S1200 is not set
+# CONFIG_AD2S1210 is not set
+
+#
+# Triggers - standalone
+#
+# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
+# CONFIG_IIO_GPIO_TRIGGER is not set
+CONFIG_IIO_SYSFS_TRIGGER=y
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_ZSMALLOC is not set
+# CONFIG_FB_SM7XX is not set
+# CONFIG_CRYSTALHD is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_FT1000 is not set
+
+#
+# Speakup console speech
+#
+# CONFIG_SPEAKUP is not set
+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+# CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_OUTPUT=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+# CONFIG_ION is not set
+# CONFIG_FIQ_WATCHDOG is not set
+CONFIG_INTEL_SCU_REBOOT_REASON=y
+CONFIG_INTEL_REBOOT_TARGET=y
+# CONFIG_USB_WPAN_HCD is not set
+# CONFIG_WIMAX_GDM72XX is not set
+# CONFIG_CSR_WIFI is not set
+# CONFIG_NET_VENDOR_SILICOM is not set
+# CONFIG_CED1401 is not set
+# CONFIG_DGRP is not set
+# CONFIG_USB_DWC2 is not set
+CONFIG_BCMDHD=y
+# CONFIG_BCMDHD_GENERIC_CHIP_SUPPORT is not set
+# CONFIG_BCM4330 is not set
+# CONFIG_BCM4334 is not set
+CONFIG_BCM4334X=y
+# CONFIG_BCM43241 is not set
+# CONFIG_BCM4335 is not set
+# CONFIG_BCM4339 is not set
+# CONFIG_BCM4354 is not set
+CONFIG_BCMDHD_SDIO=y
+# CONFIG_BCMDHD_PCIE is not set
+CONFIG_INTEL_PLATFORM=y
+CONFIG_BCMDHD_FW_PATH="system/vendor/firmware/bcm43340/fw_bcmdhd_apsta.bin"
+CONFIG_BCMDHD_NVRAM_PATH="system/vendor/firmware/bcm43340/bcmdhd.cal"
+# CONFIG_DHD_USE_STATIC_BUF is not set
+CONFIG_DHD_USE_SCHED_SCAN=y
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_CHROMEOS_LAPTOP is not set
+# CONFIG_AMILO_RFKILL is not set
+# CONFIG_SENSORS_HDAPS is not set
+CONFIG_INTEL_SCU_IPC=y
+CONFIG_INTEL_SCU_IPC_INTR_MODE=y
+# CONFIG_INTEL_SCU_IPC_POLL_MODE is not set
+CONFIG_INTEL_SCU_IPC_UTIL=y
+CONFIG_GPIO_INTEL_PMIC=y
+CONFIG_INTEL_MID_POWER_BUTTON=y
+# CONFIG_INTEL_MFLD_THERMAL is not set
+# CONFIG_IBM_RTL is not set
+# CONFIG_SAMSUNG_LAPTOP is not set
+CONFIG_INTEL_SCU_FLIS=y
+CONFIG_INTEL_PSH_IPC=y
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_I8253=y
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_MAILBOX is not set
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Remoteproc drivers
+#
+CONFIG_REMOTEPROC=y
+# CONFIG_STE_MODEM_RPROC is not set
+CONFIG_INTEL_MID_REMOTEPROC=y
+
+#
+# Rpmsg drivers
+#
+CONFIG_RPMSG=y
+CONFIG_RPMSG_IPC=y
+CONFIG_PM_DEVFREQ=y
+
+#
+# DEVFREQ Governors
+#
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+
+#
+# DEVFREQ Drivers
+#
+# CONFIG_EXTCON is not set
+
+#
+# Extcon Device Drivers
+#
+# CONFIG_EXTCON_GPIO is not set
+# CONFIG_EXTCON_ADC_JACK is not set
+# CONFIG_MEMORY is not set
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+# CONFIG_IIO_BUFFER_CB is not set
+CONFIG_IIO_KFIFO_BUF=y
+CONFIG_IIO_TRIGGERED_BUFFER=y
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
+
+#
+# Accelerometers
+#
+# CONFIG_KXSD9 is not set
+# CONFIG_IIO_ST_ACCEL_3AXIS is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7266 is not set
+# CONFIG_AD7298 is not set
+# CONFIG_AD7923 is not set
+# CONFIG_AD7791 is not set
+# CONFIG_AD7793 is not set
+# CONFIG_AD7476 is not set
+# CONFIG_AD7887 is not set
+# CONFIG_MAX1363 is not set
+# CONFIG_TI_ADC081C is not set
+CONFIG_TI_ADS7955_ADC=y
+CONFIG_IIO_BASINCOVE_GPADC=y
+
+#
+# Amplifiers
+#
+# CONFIG_AD8366 is not set
+
+#
+# Hid Sensor IIO Common
+#
+
+#
+# Digital to analog converters
+#
+# CONFIG_AD5064 is not set
+# CONFIG_AD5360 is not set
+# CONFIG_AD5380 is not set
+# CONFIG_AD5421 is not set
+# CONFIG_AD5624R_SPI is not set
+# CONFIG_AD5446 is not set
+# CONFIG_AD5449 is not set
+# CONFIG_AD5504 is not set
+# CONFIG_AD5755 is not set
+# CONFIG_AD5764 is not set
+# CONFIG_AD5791 is not set
+# CONFIG_AD5686 is not set
+# CONFIG_MAX517 is not set
+# CONFIG_MCP4725 is not set
+
+#
+# Frequency Synthesizers DDS/PLL
+#
+
+#
+# Clock Generator/Distribution
+#
+# CONFIG_AD9523 is not set
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+# CONFIG_ADF4350 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16080 is not set
+# CONFIG_ADIS16136 is not set
+# CONFIG_ADXRS450 is not set
+# CONFIG_IIO_ST_GYRO_3AXIS is not set
+# CONFIG_ITG3200 is not set
+
+#
+# Inertial measurement units
+#
+# CONFIG_ADIS16400 is not set
+# CONFIG_ADIS16480 is not set
+# CONFIG_INV_MPU6050_IIO is not set
+
+#
+# Light sensors
+#
+# CONFIG_ADJD_S311 is not set
+# CONFIG_SENSORS_TSL2563 is not set
+# CONFIG_VCNL4000 is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_AK8975 is not set
+# CONFIG_IIO_ST_MAGN_3AXIS is not set
+# CONFIG_VME_BUS is not set
+CONFIG_PWM=y
+CONFIG_PWM_SYSFS=y
+CONFIG_PWM_INTEL_MID=y
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+
+#
+# Android
+#
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_IPC_32BIT=y
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+CONFIG_DMIID=y
+# CONFIG_DMI_SYSFS is not set
+# CONFIG_ISCSI_IBFT_FIND is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_FS_ENCRYPTION is not set
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+# CONFIG_VFAT_FS_NO_DUALNAMES is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+# CONFIG_PSTORE_PMSG is not set
+CONFIG_PSTORE_FTRACE=y
+CONFIG_PSTORE_RAM=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_F2FS_FS is not set
+# CONFIG_AUFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_DEF_FILE_IO_SIZE=4096
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_SWAP is not set
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR_NMI=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=400
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_TRACE_IRQFLAGS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_TEST_LIST_SORT=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+CONFIG_BOOT_PRINTK_DELAY=y
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU_DELAY is not set
+CONFIG_SPARSE_RCU_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RCU_CPU_STALL_VERBOSE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+# CONFIG_RCU_TRACE is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_UPROBE_EVENT is not set
+# CONFIG_PROBE_EVENTS is not set
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+CONFIG_STRICT_DEVMEM=y
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_INTEL_MID=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_X86_PTDUMP is not set
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_ENCRYPTED_KEYS is not set
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
+CONFIG_LSM_MMAP_MIN_ADDR=65536
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+CONFIG_SECURITY_SELINUX_AVC_STATS=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_SECURITY_YAMA is not set
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
+CONFIG_DEFAULT_SECURITY_SELINUX=y
+# CONFIG_DEFAULT_SECURITY_DAC is not set
+CONFIG_DEFAULT_SECURITY="selinux"
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTODEV is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_ABLK_HELPER_X86=y
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=y
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
+# CONFIG_CRYPTO_PCBC is not set
+CONFIG_CRYPTO_XTS=y
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_CMAC is not set
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRC32_PCLMUL is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+# CONFIG_CRYPTO_SHA256_ARM is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_SERPENT_SSE2_586 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+CONFIG_CRYPTO_TWOFISH_586=y
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_PUBLIC_KEY_ALGO_RSA=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_HAVE_KVM=y
+CONFIG_VIRTUALIZATION=y
+# CONFIG_KVM is not set
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+# CONFIG_CRC32_SLICEBY8 is not set
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+CONFIG_CRC32_BIT=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_AVERAGE=y
+CONFIG_CLZ_TAB=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
+CONFIG_MPILIB=y
+CONFIG_OID_REGISTRY=y
diff --git a/arch/x86/configs/i386_mrfl_defconfig b/arch/x86/configs/i386_mrfl_defconfig
new file mode 100644
index 0000000..db31fcc
--- /dev/null
+++ b/arch/x86/configs/i386_mrfl_defconfig
@@ -0,0 +1,3980 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/i386 3.10.17 Kernel Configuration
+#
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf32-i386"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CPU_AUTOPROBE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_32_LAZY_GS=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
+CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+# CONFIG_TICK_CPU_ACCOUNTING is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_PREEMPT_RCU=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_BOOST is not set
+# CONFIG_RCU_NOCB_CPU is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_MEMCG is not set
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_UIDGID_CONVERTED=y
+# CONFIG_UIDGID_STRICT_TYPE_CHECKS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_EXPERT=y
+# CONFIG_UPTIME_LIMITED_KERNEL is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
+CONFIG_KPROBES=y
+# CONFIG_JUMP_LABEL is not set
+CONFIG_KPROBES_ON_FTRACE=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_KRETPROBES=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_ALL=y
+# CONFIG_MODULE_SIG_SHA1 is not set
+# CONFIG_MODULE_SIG_SHA224 is not set
+CONFIG_MODULE_SIG_SHA256=y
+# CONFIG_MODULE_SIG_SHA384 is not set
+# CONFIG_MODULE_SIG_SHA512 is not set
+CONFIG_MODULE_SIG_HASH="sha256"
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_DEV_THROTTLING=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CFQ_GROUP_IOSCHED is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_ASN1=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
+CONFIG_SMP=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_X86_BIGSMP is not set
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
+CONFIG_X86_WANT_INTEL_MID=y
+CONFIG_X86_INTEL_MID=y
+# CONFIG_X86_MDFLD is not set
+CONFIG_ATOM_SOC_POWER=y
+# CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER is not set
+# CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER is not set
+CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER=y
+CONFIG_INTEL_DEBUG_FEATURE=y
+# CONFIG_X86_RDC321X is not set
+# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_X86_32_IRIS is not set
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+# CONFIG_HYPERVISOR_GUEST is not set
+CONFIG_NO_BOOTMEM=y
+# CONFIG_MEMTEST is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MELAN is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_MATOM is not set
+CONFIG_MSLM=y
+CONFIG_X86_GENERIC=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_DEBUGCTLMSR=y
+# CONFIG_PROCESSOR_SELECT is not set
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_APB_TIMER is not set
+CONFIG_DMI=y
+CONFIG_NR_CPUS=2
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_COUNT=y
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+# CONFIG_X86_MCE_AMD is not set
+# CONFIG_X86_ANCIENT_MCE is not set
+CONFIG_X86_MCE_THRESHOLD=y
+# CONFIG_X86_MCE_INJECT is not set
+CONFIG_X86_THERMAL_VECTOR=y
+CONFIG_VM86=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+CONFIG_X86_REBOOTFIXUPS=y
+# CONFIG_MICROCODE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+# CONFIG_NOHIGHMEM is not set
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HIGHMEM64G=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_HIGHMEM=y
+CONFIG_X86_PAE=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_COMPACTION is not set
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_MEMORY_FAILURE is not set
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+CONFIG_CROSS_MEMORY_ATTACH=y
+# CONFIG_CLEANCACHE is not set
+# CONFIG_HIGHPTE is not set
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set
+CONFIG_X86_RESERVE_LOW=64
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+# CONFIG_ARCH_RANDOM is not set
+CONFIG_X86_SMAP=y
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PHYSICAL_START=0x1200000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management and ACPI options
+#
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_PM_SLEEP=y
+CONFIG_PM_SLEEP_SMP=y
+# CONFIG_PM_AUTOSLEEP is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=100
+CONFIG_PM_WAKELOCKS_GC=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_PM_SLEEP_DEBUG=y
+# CONFIG_PM_TRACE_RTC is not set
+# CONFIG_ACPI is not set
+CONFIG_SFI=y
+# CONFIG_APM is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+
+#
+# x86 CPU frequency scaling drivers
+#
+# CONFIG_X86_INTEL_PSTATE is not set
+CONFIG_X86_SFI_CPUFREQ=y
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+# CONFIG_X86_SPEEDSTEP_ICH is not set
+# CONFIG_X86_SPEEDSTEP_SMI is not set
+# CONFIG_X86_P4_CLOCKMOD is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# CONFIG_X86_LONGRUN is not set
+
+#
+# shared options
+#
+# CONFIG_X86_SPEEDSTEP_LIB is not set
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+CONFIG_INTEL_IDLE=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_CNB20LE_QUIRK is not set
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+# CONFIG_PCIEASPM_DEFAULT is not set
+# CONFIG_PCIEASPM_POWERSAVE is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_PCIE_PME=y
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_LABEL=y
+CONFIG_ISA_DMA_API=y
+# CONFIG_ISA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
+CONFIG_AMD_NB=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COREDUMP=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_HAVE_TEXT_POKE_SMP=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+CONFIG_NET_IP_TUNNEL=y
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_NET_IPVTI is not set
+# CONFIG_INET_AH is not set
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_GRE is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_IPV6_SUBTREES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETLABEL is not set
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=y
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+# CONFIG_NF_CONNTRACK_SECMARK is not set
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
+# CONFIG_NF_CT_PROTO_SCTP is not set
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+# CONFIG_NF_CONNTRACK_FTP is not set
+# CONFIG_NF_CONNTRACK_H323 is not set
+# CONFIG_NF_CONNTRACK_IRC is not set
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+# CONFIG_NF_CONNTRACK_PPTP is not set
+# CONFIG_NF_CONNTRACK_SANE is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_NF_CONNTRACK_TFTP is not set
+# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_NEEDED=y
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_FTP is not set
+# CONFIG_NF_NAT_IRC is not set
+# CONFIG_NF_NAT_SIP is not set
+# CONFIG_NF_NAT_TFTP is not set
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_HL=y
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+CONFIG_NETFILTER_XT_TARGET_NETMAP=y
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set
+# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
+# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_HL=y
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_SET is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+CONFIG_IP_NF_RAW=y
+# CONFIG_IP_NF_SECURITY is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=y
+# CONFIG_NF_CONNTRACK_IPV6 is not set
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+# CONFIG_IP6_NF_SECURITY is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_L2TP=y
+# CONFIG_L2TP_DEBUGFS is not set
+# CONFIG_L2TP_V3 is not set
+# CONFIG_BRIDGE is not set
+CONFIG_HAVE_NET_DSA=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_MMAP is not set
+# CONFIG_NETLINK_DIAG is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+# CONFIG_NETPRIO_CGROUP is not set
+CONFIG_BQL=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+# CONFIG_CFG80211_WEXT is not set
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_HAS_RC=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+CONFIG_RFKILL_LEDS=y
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_RFKILL_REGULATOR is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+CONFIG_NFC=y
+# CONFIG_NFC_NCI is not set
+# CONFIG_NFC_HCI is not set
+
+#
+# Near Field Communication (NFC) devices
+#
+# CONFIG_NFC_PN533 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH=""
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_DEBUG_DRIVER is not set
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
+CONFIG_REGMAP_IRQ=y
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_CMA is not set
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_VIRTIO_BLK is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+CONFIG_INTEL_MID_PTI=y
+CONFIG_INTEL_PTI_STM=y
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
+# CONFIG_BMP085_I2C is not set
+# CONFIG_BMP085_SPI is not set
+# CONFIG_PCH_PHUB is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_LATTICE_ECP3_CONFIG is not set
+# CONFIG_SRAM is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_VMWARE_VMCI is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_SATA_PMP=y
+
+#
+# Controllers with non-SFF native interface
+#
+CONFIG_SATA_AHCI=y
+# CONFIG_SATA_AHCI_PLATFORM is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_SATA_ACARD_AHCI is not set
+# CONFIG_SATA_SIL24 is not set
+CONFIG_ATA_SFF=y
+
+#
+# SFF controllers with custom DMA interface
+#
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_SX4 is not set
+CONFIG_ATA_BMDMA=y
+
+#
+# SATA SFF controllers with BMDMA
+#
+CONFIG_ATA_PIIX=y
+# CONFIG_SATA_HIGHBANK is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+
+#
+# PATA SFF controllers with BMDMA
+#
+# CONFIG_PATA_ALI is not set
+CONFIG_PATA_AMD=y
+# CONFIG_PATA_ARASAN_CF is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_ATP867X is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CS5536 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87415 is not set
+CONFIG_PATA_OLDPIIX=y
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
+# CONFIG_PATA_SC1200 is not set
+CONFIG_PATA_SCH=y
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# PIO-only SFF controllers
+#
+# CONFIG_PATA_CMD640_PCI is not set
+CONFIG_PATA_MPIIX=y
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_PLATFORM is not set
+# CONFIG_PATA_RZ1000 is not set
+
+#
+# Generic fallback / legacy drivers
+#
+CONFIG_ATA_GENERIC=y
+# CONFIG_PATA_LEGACY is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
+# CONFIG_MD_LINEAR is not set
+# CONFIG_MD_RAID0 is not set
+# CONFIG_MD_RAID1 is not set
+# CONFIG_MD_RAID10 is not set
+# CONFIG_MD_RAID456 is not set
+# CONFIG_MD_MULTIPATH is not set
+# CONFIG_MD_FAULTY is not set
+# CONFIG_BCACHE is not set
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_CRYPT=y
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_CACHE is not set
+CONFIG_DM_MIRROR=y
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_LOG_USERSPACE is not set
+CONFIG_DM_ZERO=y
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_VERITY is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+CONFIG_MII=y
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_VXLAN is not set
+CONFIG_NETCONSOLE=y
+# CONFIG_NETCONSOLE_DYNAMIC is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+CONFIG_TUN=y
+# CONFIG_VETH is not set
+# CONFIG_VIRTIO_NET is not set
+# CONFIG_ARCNET is not set
+
+#
+# CAIF transport drivers
+#
+# CONFIG_VHOST_NET is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_VORTEX is not set
+# CONFIG_TYPHOON is not set
+CONFIG_NET_VENDOR_ADAPTEC=y
+# CONFIG_ADAPTEC_STARFIRE is not set
+CONFIG_NET_VENDOR_ALTEON=y
+# CONFIG_ACENIC is not set
+CONFIG_NET_VENDOR_AMD=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_PCNET32 is not set
+CONFIG_NET_VENDOR_ATHEROS=y
+# CONFIG_ATL2 is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_ALX is not set
+CONFIG_NET_CADENCE=y
+# CONFIG_ARM_AT91_ETHER is not set
+# CONFIG_MACB is not set
+CONFIG_NET_VENDOR_BROADCOM=y
+# CONFIG_B44 is not set
+CONFIG_BNX2=y
+# CONFIG_CNIC is not set
+CONFIG_TIGON3=y
+# CONFIG_BNX2X is not set
+CONFIG_NET_VENDOR_BROCADE=y
+# CONFIG_BNA is not set
+# CONFIG_NET_CALXEDA_XGMAC is not set
+CONFIG_NET_VENDOR_CHELSIO=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_CHELSIO_T4VF is not set
+CONFIG_NET_VENDOR_CISCO=y
+# CONFIG_ENIC is not set
+# CONFIG_DNET is not set
+CONFIG_NET_VENDOR_DEC=y
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+# CONFIG_TULIP is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_ULI526X is not set
+CONFIG_NET_VENDOR_DLINK=y
+# CONFIG_DL2K is not set
+# CONFIG_SUNDANCE is not set
+CONFIG_NET_VENDOR_EMULEX=y
+# CONFIG_BE2NET is not set
+CONFIG_NET_VENDOR_EXAR=y
+# CONFIG_S2IO is not set
+# CONFIG_VXGE is not set
+CONFIG_NET_VENDOR_HP=y
+# CONFIG_HP100 is not set
+CONFIG_NET_VENDOR_INTEL=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_IXGB is not set
+# CONFIG_IXGBE is not set
+# CONFIG_IXGBEVF is not set
+CONFIG_NET_VENDOR_I825XX=y
+# CONFIG_IP1000 is not set
+# CONFIG_JME is not set
+CONFIG_NET_VENDOR_MARVELL=y
+# CONFIG_MVMDIO is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+CONFIG_NET_VENDOR_MELLANOX=y
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+CONFIG_NET_VENDOR_MICREL=y
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_KSZ884X_PCI is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+CONFIG_NET_VENDOR_MYRI=y
+# CONFIG_MYRI10GE is not set
+# CONFIG_FEALNX is not set
+CONFIG_NET_VENDOR_NATSEMI=y
+# CONFIG_NATSEMI is not set
+# CONFIG_NS83820 is not set
+CONFIG_NET_VENDOR_8390=y
+CONFIG_NE2K_PCI=y
+CONFIG_NET_VENDOR_NVIDIA=y
+CONFIG_FORCEDETH=y
+CONFIG_NET_VENDOR_OKI=y
+# CONFIG_PCH_GBE is not set
+# CONFIG_ETHOC is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+CONFIG_NET_VENDOR_QLOGIC=y
+# CONFIG_QLA3XXX is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+# CONFIG_NETXEN_NIC is not set
+CONFIG_NET_VENDOR_REALTEK=y
+# CONFIG_8139CP is not set
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+CONFIG_R8169=y
+CONFIG_NET_VENDOR_RDC=y
+# CONFIG_R6040 is not set
+CONFIG_NET_VENDOR_SEEQ=y
+CONFIG_NET_VENDOR_SILAN=y
+# CONFIG_SC92031 is not set
+CONFIG_NET_VENDOR_SIS=y
+# CONFIG_SIS900 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SFC is not set
+CONFIG_NET_VENDOR_SMSC=y
+# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
+CONFIG_NET_VENDOR_STMICRO=y
+# CONFIG_STMMAC_ETH is not set
+CONFIG_NET_VENDOR_SUN=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NIU is not set
+CONFIG_NET_VENDOR_TEHUTI=y
+# CONFIG_TEHUTI is not set
+CONFIG_NET_VENDOR_TI=y
+# CONFIG_TLAN is not set
+CONFIG_NET_VENDOR_VIA=y
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_NET_VENDOR_WIZNET=y
+# CONFIG_WIZNET_W5100 is not set
+# CONFIG_WIZNET_W5300 is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MICREL_KS8995MA is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_RTL8152 is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_EEM is not set
+CONFIG_USB_NET_CDC_NCM=y
+# CONFIG_USB_NET_CDC_MBIM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+CONFIG_USB_NET_CDC_SUBSET=y
+# CONFIG_USB_ALI_M5632 is not set
+# CONFIG_USB_AN2720 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_EPSON2888 is not set
+# CONFIG_USB_KC2190 is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_QMI_WWAN is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8180 is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_ADM8211 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+CONFIG_WIFI_CONTROL_FUNC=y
+CONFIG_WIFI_PLATFORM_DATA=y
+# CONFIG_ATH_CARDS is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMFMAC is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWL4965 is not set
+# CONFIG_IWL3945 is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTLWIFI is not set
+# CONFIG_WL_TI is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_CYAPA is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_MOUSE_SYNAPTICS_USB is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
+CONFIG_INPUT_TABLET=y
+# CONFIG_TABLET_USB_ACECAD is not set
+# CONFIG_TABLET_USB_AIPTEK is not set
+# CONFIG_TABLET_USB_GTCO is not set
+# CONFIG_TABLET_USB_HANWANG is not set
+# CONFIG_TABLET_USB_KBTAB is not set
+# CONFIG_TABLET_USB_WACOM is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_ILI210X is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_WACOM_I2C is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MMS114 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_INTEL_MID is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_APANEL is not set
+# CONFIG_INPUT_GP2A is not set
+# CONFIG_INPUT_GPIO_TILT_POLLED is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_CMA3000 is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_ARC_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_CONSOLE_SLEEP=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_ISI is not set
+# CONFIG_N_HDLC is not set
+CONFIG_N_GSM=y
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVKMEM=y
+# CONFIG_STALDRV is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+CONFIG_FIX_EARLYCON_MEM=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX310X is not set
+CONFIG_SERIAL_MRST_MAX3110=y
+# CONFIG_SERIAL_MFD_HSU is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_VIRTIO_CONSOLE is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+CONFIG_HW_RANDOM_INTEL=y
+CONFIG_HW_RANDOM_AMD=y
+CONFIG_HW_RANDOM_GEODE=y
+CONFIG_HW_RANDOM_VIA=y
+# CONFIG_HW_RANDOM_VIRTIO is not set
+CONFIG_NVRAM=y
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_ISMT is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_CBUS_GPIO is not set
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_GPIO=y
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PXA2XX is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_SC18IS602 is not set
+# CONFIG_SPI_TOPCLIFF_PCH is not set
+# CONFIG_SPI_XCOMM is not set
+# CONFIG_SPI_XILINX is not set
+CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_PCI=y
+CONFIG_SPI_DW_MID_DMA=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=y
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+# CONFIG_PPS_CLIENT_KTIMER is not set
+# CONFIG_PPS_CLIENT_LDISC is not set
+# CONFIG_PPS_CLIENT_GPIO is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# CONFIG_PTP_1588_CLOCK_PCH is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIODEBUG=y
+
+#
+# Memory mapped GPIO drivers:
+#
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_TS5500 is not set
+# CONFIG_GPIO_SCH is not set
+# CONFIG_GPIO_ICH is not set
+# CONFIG_GPIO_VX855 is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_WM8994 is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_AMD8111 is not set
+CONFIG_GPIO_LANGWELL=y
+# CONFIG_GPIO_PCH is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_RDC321X is not set
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_GPIO_MSIC is not set
+
+#
+# USB GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_GENERIC_ADC_BATTERY is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+CONFIG_BATTERY_MAX17042=y
+# CONFIG_BATTERY_INTEL_MID is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_MANAGER is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+# CONFIG_POWER_RESET is not set
+# CONFIG_POWER_AVS is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7310 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_HIH6130 is not set
+CONFIG_SENSORS_CORETEMP=y
+CONFIG_SENSORS_CORETEMP_INTERRUPT=y
+# CONFIG_SENSORS_IIO_HWMON is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+CONFIG_MSIC_GPADC=y
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_APPLESMC is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
+CONFIG_THERMAL_GOV_STEP_WISE=y
+# CONFIG_THERMAL_GOV_USER_SPACE is not set
+# CONFIG_CPU_THERMAL is not set
+# CONFIG_THERMAL_EMULATION is not set
+# CONFIG_INTEL_POWERCLAMP is not set
+CONFIG_SENSORS_THERMAL_MRFLD=y
+CONFIG_SOC_THERMAL=y
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_CORE is not set
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_F71808E_WDT is not set
+# CONFIG_SP5100_TCO is not set
+# CONFIG_SC520_WDT is not set
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_IBMASR is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I6300ESB_WDT is not set
+# CONFIG_IE6XX_WDT is not set
+# CONFIG_INTEL_SCU_WATCHDOG is not set
+CONFIG_INTEL_SCU_WATCHDOG_EVO=y
+CONFIG_DISABLE_SCU_WATCHDOG=y
+# CONFIG_ITCO_WDT is not set
+# CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
+# CONFIG_HP_WATCHDOG is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
+# CONFIG_NV_TCO is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
+# CONFIG_SMSC37B787_WDT is not set
+# CONFIG_VIA_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83697HF_WDT is not set
+# CONFIG_W83697UG_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+CONFIG_MFD_INTEL_MSIC=y
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+CONFIG_MFD_WM8994=y
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_GPIO is not set
+# CONFIG_REGULATOR_AD5398 is not set
+# CONFIG_REGULATOR_FAN53555 is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+# CONFIG_REGULATOR_MAX8973 is not set
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_LP872X is not set
+# CONFIG_REGULATOR_LP8755 is not set
+# CONFIG_REGULATOR_TPS51632 is not set
+# CONFIG_REGULATOR_TPS62360 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_TPS6524X is not set
+CONFIG_REGULATOR_WM8994=y
+CONFIG_REGULATOR_PMIC_BASIN_COVE=y
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_RC_SUPPORT is not set
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_V4L2=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_VIDEO_V4L2_INT_DEVICE is not set
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Media drivers
+#
+# CONFIG_MEDIA_USB_SUPPORT is not set
+# CONFIG_MEDIA_PCI_SUPPORT is not set
+# CONFIG_V4L_PLATFORM_DRIVERS is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_V4L_TEST_DRIVERS is not set
+
+#
+# Supported MMC/SDIO adapters
+#
+# CONFIG_CYPRESS_FIRMWARE is not set
+
+#
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
+#
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+
+#
+# Encoders, decoders, sensors and other helper chips
+#
+
+#
+# Audio decoders, processors and mixers
+#
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_UDA1342 is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+# CONFIG_VIDEO_SONY_BTF_MPX is not set
+
+#
+# RDS decoders
+#
+# CONFIG_VIDEO_SAA6588 is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_ADV7183 is not set
+# CONFIG_VIDEO_ADV7604 is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_SAA7110 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA7191 is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_TW2804 is not set
+# CONFIG_VIDEO_TW9903 is not set
+# CONFIG_VIDEO_TW9906 is not set
+# CONFIG_VIDEO_VPX3220 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_CX25840 is not set
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_ADV7393 is not set
+# CONFIG_VIDEO_AD9389B is not set
+# CONFIG_VIDEO_AK881X is not set
+
+#
+# Camera sensor devices
+#
+# CONFIG_VIDEO_OV7640 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_OV9650 is not set
+# CONFIG_VIDEO_VS6624 is not set
+# CONFIG_VIDEO_MT9M032 is not set
+# CONFIG_VIDEO_MT9P031 is not set
+# CONFIG_VIDEO_MT9T001 is not set
+# CONFIG_VIDEO_MT9V011 is not set
+# CONFIG_VIDEO_MT9V032 is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+# CONFIG_VIDEO_NOON010PC30 is not set
+# CONFIG_VIDEO_M5MOLS is not set
+# CONFIG_VIDEO_S5K6AA is not set
+# CONFIG_VIDEO_S5K4ECGX is not set
+# CONFIG_VIDEO_S5C73M3 is not set
+
+#
+# Flash devices
+#
+# CONFIG_VIDEO_ADP1653 is not set
+# CONFIG_VIDEO_AS3645A is not set
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+
+#
+# Miscelaneous helper chips
+#
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_M52790 is not set
+
+#
+# Sensors used on soc_camera driver
+#
+
+#
+# Customise DVB Frontends
+#
+# CONFIG_DVB_AU8522_V4L is not set
+CONFIG_DVB_TUNER_DIB0070=m
+CONFIG_DVB_TUNER_DIB0090=m
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_SWORKS is not set
+# CONFIG_AGP_VIA is not set
+# CONFIG_AGP_EFFICEON is not set
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
+CONFIG_DRM=y
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_R128 is not set
+# CONFIG_DRM_RADEON is not set
+# CONFIG_DRM_NOUVEAU is not set
+# CONFIG_DRM_I915 is not set
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_VMWGFX is not set
+# CONFIG_DRM_GMA500 is not set
+# CONFIG_DRM_UDL is not set
+# CONFIG_DRM_AST is not set
+# CONFIG_DRM_MGAG200 is not set
+# CONFIG_DRM_CIRRUS_QEMU is not set
+# CONFIG_DRM_QXL is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_HDMI=y
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_VESA is not set
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_GOLDFISH is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_EXYNOS_VIDEO is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_SAHARA is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3630 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_LP855X is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_COMPRESS_OFFLOAD=y
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=y
+# CONFIG_SND_SEQ_DUMMY is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_SEQUENCER_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DMA_SGBUF=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_PCSP is not set
+# CONFIG_SND_DUMMY is not set
+CONFIG_SND_ALOOP=y
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_PCI is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_USX2Y is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_US122L is not set
+# CONFIG_SND_USB_6FIRE is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_ATMEL_SOC is not set
+# CONFIG_SND_MFLD_MACHINE is not set
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+# CONFIG_SND_SIMPLE_CARD is not set
+# CONFIG_SOUND_PRIME is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_APPLEIR is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LENOVO_TPKBD is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PS3REMOTE is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_XHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+# CONFIG_USB_XHCI_HCD_DEBUGGING is not set
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_PCI=y
+# CONFIG_USB_EHCI_HCD_PLATFORM is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=y
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+CONFIG_USB_DWC3=y
+# CONFIG_USB_DWC3_HOST is not set
+CONFIG_USB_DWC3_GADGET=y
+# CONFIG_USB_DWC3_DUAL_ROLE is not set
+
+#
+# Platform Glue Driver Support
+#
+# CONFIG_USB_DWC3_PCI is not set
+CONFIG_USB_DWC3_OTG=y
+CONFIG_USB_DWC3_INTEL_MRFL=y
+# CONFIG_USB_DWC3_INTEL_BYT is not set
+CONFIG_USB_DWC3_DEVICE_INTEL=y
+CONFIG_USB_DWC3_HOST_INTEL=y
+
+#
+# Debugging features
+#
+# CONFIG_USB_DWC3_DEBUG is not set
+# CONFIG_USB_CHIPIDEA is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=y
+# CONFIG_USB_SERIAL_CONSOLE is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP210X is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_F81232 is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_METRO is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+CONFIG_USB_SERIAL_PL2303=y
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_XSENS_MT is not set
+# CONFIG_USB_SERIAL_ZIO is not set
+# CONFIG_USB_SERIAL_WISHBONE is not set
+# CONFIG_USB_SERIAL_ZTE is not set
+# CONFIG_USB_SERIAL_SSU100 is not set
+# CONFIG_USB_SERIAL_QT2 is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+CONFIG_USB_PHY=y
+CONFIG_NOP_USB_XCEIV=y
+# CONFIG_OMAP_CONTROL_USB is not set
+# CONFIG_OMAP_USB3 is not set
+# CONFIG_SAMSUNG_USBPHY is not set
+# CONFIG_SAMSUNG_USB2PHY is not set
+# CONFIG_SAMSUNG_USB3PHY is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_RCAR_PHY is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+
+#
+# USB Peripheral Controller
+#
+# CONFIG_USB_R8A66597 is not set
+# CONFIG_USB_PXA27X is not set
+# CONFIG_USB_MV_UDC is not set
+# CONFIG_USB_MV_U3D is not set
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_AMD5536UDC is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_NET2280 is not set
+# CONFIG_USB_GOKU is not set
+# CONFIG_USB_EG20T is not set
+# CONFIG_USB_DUMMY_HCD is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_ACM_MS is not set
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_WEBCAM is not set
+# CONFIG_UWB is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=10
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+# CONFIG_MMC_RICOH_MMC is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
+# CONFIG_MMC_WBSD is not set
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_LP5562 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA9633 is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_REGULATOR is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_INTEL_SS4200 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_LM355x is not set
+# CONFIG_LEDS_OT200 is not set
+# CONFIG_LEDS_BLINKM is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC=y
+CONFIG_EDAC_LEGACY_SYSFS=y
+# CONFIG_EDAC_DEBUG is not set
+# CONFIG_EDAC_MM_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8523 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+# CONFIG_RTC_DRV_RX4581 is not set
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_VRTC is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_DS2404 is not set
+
+#
+# on-CPU RTC drivers
+#
+
+#
+# HID Sensor RTC drivers
+#
+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_INTEL_MID_DMAC=y
+# CONFIG_INTEL_IOATDMA is not set
+# CONFIG_DW_DMAC is not set
+# CONFIG_TIMB_DMA is not set
+# CONFIG_PCH_DMA is not set
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+CONFIG_VIRTIO=y
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+CONFIG_STAGING=y
+# CONFIG_ET131X is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_R8187SE is not set
+# CONFIG_RTL8192U is not set
+# CONFIG_RTLLIB is not set
+# CONFIG_R8712U is not set
+# CONFIG_RTS5139 is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_IDE_PHISON is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_VT6655 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_DX_SEP is not set
+
+#
+# IIO staging drivers
+#
+
+#
+# Accelerometers
+#
+# CONFIG_ADIS16201 is not set
+# CONFIG_ADIS16203 is not set
+# CONFIG_ADIS16204 is not set
+# CONFIG_ADIS16209 is not set
+# CONFIG_ADIS16220 is not set
+# CONFIG_ADIS16240 is not set
+# CONFIG_LIS3L02DQ is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7291 is not set
+# CONFIG_AD7606 is not set
+# CONFIG_AD799X is not set
+# CONFIG_AD7780 is not set
+# CONFIG_AD7816 is not set
+# CONFIG_AD7192 is not set
+# CONFIG_AD7280 is not set
+
+#
+# Analog digital bi-direction converters
+#
+# CONFIG_ADT7316 is not set
+
+#
+# Capacitance to digital converters
+#
+# CONFIG_AD7150 is not set
+# CONFIG_AD7152 is not set
+# CONFIG_AD7746 is not set
+
+#
+# Direct Digital Synthesis
+#
+# CONFIG_AD5930 is not set
+# CONFIG_AD9832 is not set
+# CONFIG_AD9834 is not set
+# CONFIG_AD9850 is not set
+# CONFIG_AD9852 is not set
+# CONFIG_AD9910 is not set
+# CONFIG_AD9951 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16060 is not set
+# CONFIG_ADIS16130 is not set
+# CONFIG_ADIS16260 is not set
+
+#
+# Network Analyzer, Impedance Converters
+#
+# CONFIG_AD5933 is not set
+
+#
+# Light sensors
+#
+# CONFIG_SENSORS_ISL29018 is not set
+# CONFIG_SENSORS_ISL29028 is not set
+# CONFIG_TSL2583 is not set
+# CONFIG_TSL2x7x is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_SENSORS_HMC5843 is not set
+
+#
+# Active energy metering IC
+#
+# CONFIG_ADE7753 is not set
+# CONFIG_ADE7754 is not set
+# CONFIG_ADE7758 is not set
+# CONFIG_ADE7759 is not set
+# CONFIG_ADE7854 is not set
+
+#
+# Resolver to digital converters
+#
+# CONFIG_AD2S90 is not set
+# CONFIG_AD2S1200 is not set
+# CONFIG_AD2S1210 is not set
+
+#
+# Triggers - standalone
+#
+# CONFIG_IIO_SIMPLE_DUMMY is not set
+# CONFIG_ZSMALLOC is not set
+# CONFIG_FB_SM7XX is not set
+# CONFIG_CRYSTALHD is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_USB_ENESTORAGE is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_FT1000 is not set
+
+#
+# Speakup console speech
+#
+# CONFIG_SPEAKUP is not set
+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set
+# CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_TIMED_OUTPUT=y
+# CONFIG_ANDROID_TIMED_GPIO is not set
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+# CONFIG_USB_WPAN_HCD is not set
+# CONFIG_WIMAX_GDM72XX is not set
+# CONFIG_NET_VENDOR_SILICOM is not set
+# CONFIG_CED1401 is not set
+# CONFIG_DGRP is not set
+# CONFIG_USB_DWC2 is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_CHROMEOS_LAPTOP is not set
+# CONFIG_AMILO_RFKILL is not set
+# CONFIG_SENSORS_HDAPS is not set
+CONFIG_INTEL_SCU_IPC=y
+CONFIG_INTEL_SCU_IPC_INTR_MODE=y
+# CONFIG_INTEL_SCU_IPC_POLL_MODE is not set
+CONFIG_INTEL_SCU_IPC_UTIL=y
+CONFIG_GPIO_INTEL_PMIC=y
+CONFIG_INTEL_MID_POWER_BUTTON=y
+# CONFIG_INTEL_MFLD_THERMAL is not set
+# CONFIG_IBM_RTL is not set
+# CONFIG_SAMSUNG_LAPTOP is not set
+CONFIG_INTEL_SCU_FLIS=y
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_I8253=y
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_MAILBOX is not set
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Remoteproc drivers
+#
+CONFIG_REMOTEPROC=y
+# CONFIG_STE_MODEM_RPROC is not set
+CONFIG_INTEL_MID_REMOTEPROC=y
+
+#
+# Rpmsg drivers
+#
+CONFIG_RPMSG=y
+CONFIG_RPMSG_IPC=y
+CONFIG_PM_DEVFREQ=y
+
+#
+# DEVFREQ Governors
+#
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+
+#
+# DEVFREQ Drivers
+#
+CONFIG_EXTCON=y
+
+#
+# Extcon Device Drivers
+#
+# CONFIG_EXTCON_GPIO is not set
+# CONFIG_EXTCON_ADC_JACK is not set
+# CONFIG_MEMORY is not set
+CONFIG_IIO=y
+# CONFIG_IIO_BUFFER is not set
+# CONFIG_IIO_TRIGGER is not set
+
+#
+# Accelerometers
+#
+# CONFIG_KXSD9 is not set
+# CONFIG_IIO_ST_ACCEL_3AXIS is not set
+
+#
+# Analog to digital converters
+#
+# CONFIG_AD7266 is not set
+# CONFIG_AD7298 is not set
+# CONFIG_AD7923 is not set
+# CONFIG_AD7791 is not set
+# CONFIG_AD7793 is not set
+# CONFIG_AD7476 is not set
+# CONFIG_AD7887 is not set
+# CONFIG_MAX1363 is not set
+# CONFIG_TI_ADC081C is not set
+CONFIG_IIO_BASINCOVE_GPADC=y
+
+#
+# Amplifiers
+#
+# CONFIG_AD8366 is not set
+
+#
+# Hid Sensor IIO Common
+#
+
+#
+# Digital to analog converters
+#
+# CONFIG_AD5064 is not set
+# CONFIG_AD5360 is not set
+# CONFIG_AD5380 is not set
+# CONFIG_AD5421 is not set
+# CONFIG_AD5624R_SPI is not set
+# CONFIG_AD5446 is not set
+# CONFIG_AD5449 is not set
+# CONFIG_AD5504 is not set
+# CONFIG_AD5755 is not set
+# CONFIG_AD5764 is not set
+# CONFIG_AD5791 is not set
+# CONFIG_AD5686 is not set
+# CONFIG_MAX517 is not set
+# CONFIG_MCP4725 is not set
+
+#
+# Frequency Synthesizers DDS/PLL
+#
+
+#
+# Clock Generator/Distribution
+#
+# CONFIG_AD9523 is not set
+
+#
+# Phase-Locked Loop (PLL) frequency synthesizers
+#
+# CONFIG_ADF4350 is not set
+
+#
+# Digital gyroscope sensors
+#
+# CONFIG_ADIS16080 is not set
+# CONFIG_ADIS16136 is not set
+# CONFIG_ADXRS450 is not set
+# CONFIG_IIO_ST_GYRO_3AXIS is not set
+# CONFIG_ITG3200 is not set
+
+#
+# Inertial measurement units
+#
+# CONFIG_ADIS16400 is not set
+# CONFIG_ADIS16480 is not set
+# CONFIG_INV_MPU6050_IIO is not set
+
+#
+# Light sensors
+#
+# CONFIG_ADJD_S311 is not set
+# CONFIG_SENSORS_TSL2563 is not set
+# CONFIG_VCNL4000 is not set
+
+#
+# Magnetometer sensors
+#
+# CONFIG_AK8975 is not set
+# CONFIG_IIO_ST_MAGN_3AXIS is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_PWM is not set
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+CONFIG_DMIID=y
+# CONFIG_DMI_SYSFS is not set
+# CONFIG_ISCSI_IBFT_FIND is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+# CONFIG_VFAT_FS_NO_DUALNAMES is not set
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_FTRACE=y
+CONFIG_PSTORE_RAM=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_F2FS_FS is not set
+# CONFIG_AUFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_DEF_FILE_IO_SIZE=4096
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_SWAP is not set
+# CONFIG_NFS_V4_1 is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=400
+# CONFIG_DEBUG_KMEMLEAK_TEST is not set
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_TRACE_IRQFLAGS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_TEST_LIST_SORT=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+CONFIG_BOOT_PRINTK_DELAY=y
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU_DELAY is not set
+CONFIG_SPARSE_RCU_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_RCU_CPU_STALL_VERBOSE=y
+CONFIG_RCU_CPU_STALL_INFO=y
+# CONFIG_RCU_TRACE is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+# CONFIG_UPROBE_EVENT is not set
+# CONFIG_PROBE_EVENTS is not set
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_INTEL_MID=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_X86_PTDUMP is not set
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_NX_TEST=m
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+# CONFIG_X86_DECODER_SELFTEST is not set
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_ENCRYPTED_KEYS is not set
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
+CONFIG_LSM_MMAP_MIN_ADDR=65536
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_SELINUX_DEVELOP=y
+CONFIG_SECURITY_SELINUX_AVC_STATS=y
+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
+# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_SECURITY_YAMA is not set
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
+CONFIG_DEFAULT_SECURITY_SELINUX=y
+# CONFIG_DEFAULT_SECURITY_DAC is not set
+CONFIG_DEFAULT_SECURITY="selinux"
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTODEV is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_ABLK_HELPER_X86=y
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=y
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
+# CONFIG_CRYPTO_PCBC is not set
+CONFIG_CRYPTO_XTS=y
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_CMAC is not set
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRC32_PCLMUL is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_SERPENT_SSE2_586 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+CONFIG_CRYPTO_TWOFISH_586=y
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_PUBLIC_KEY_ALGO_RSA=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_HAVE_KVM=y
+CONFIG_VIRTUALIZATION=y
+# CONFIG_KVM is not set
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+# CONFIG_CRC32_SLICEBY8 is not set
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+CONFIG_CRC32_BIT=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_AVERAGE=y
+CONFIG_CLZ_TAB=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
+CONFIG_MPILIB=y
+CONFIG_OID_REGISTRY=y
diff --git a/arch/x86/configs/i386_qemu_defconfig b/arch/x86/configs/i386_qemu_defconfig
new file mode 100644
index 0000000..6bbf453
--- /dev/null
+++ b/arch/x86/configs/i386_qemu_defconfig
@@ -0,0 +1,2959 @@
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf32-i386"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CPU_AUTOPROBE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_32_LAZY_GS=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx"
+CONFIG_ARCH_CPU_PROBE_RELEASE=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+# CONFIG_AUDIT_LOGINUID_IMMUTABLE is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+CONFIG_TICK_CPU_ACCOUNTING=y
+# CONFIG_IRQ_TIME_ACCOUNTING is not set
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
+# CONFIG_RCU_FANOUT_EXACT is not set
+CONFIG_RCU_FAST_NO_HZ=y
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_NOCB_CPU is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_MEMCG is not set
+# CONFIG_CGROUP_HUGETLB is not set
+# CONFIG_CGROUP_PERF is not set
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_BLK_CGROUP is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+# CONFIG_USER_NS is not set
+CONFIG_PID_NS=y
+# CONFIG_NET_NS is not set
+CONFIG_UIDGID_CONVERTED=y
+# CONFIG_UIDGID_STRICT_TYPE_CHECKS is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
+CONFIG_RD_LZO=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_PANIC_TIMEOUT=0
+# CONFIG_EXPERT is not set
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PCI_QUIRKS=y
+# CONFIG_EMBEDDED is not set
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+# CONFIG_OPROFILE is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
+CONFIG_KPROBES=y
+# CONFIG_JUMP_LABEL is not set
+CONFIG_OPTPROBES=y
+CONFIG_KPROBES_ON_FTRACE=y
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_KRETPROBES=y
+CONFIG_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP_FILTER=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_GCOV_TOOLCHAIN_IS_ANDROID is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_MODULE_SIG is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+CONFIG_BLK_DEV_INTEGRITY=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+CONFIG_SYSV68_PARTITION=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_PREEMPT_NOTIFIERS=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
+CONFIG_SMP=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_X86_BIGSMP is not set
+CONFIG_X86_EXTENDED_PLATFORM=y
+# CONFIG_X86_GOLDFISH is not set
+# CONFIG_X86_WANT_INTEL_MID is not set
+# CONFIG_X86_RDC321X is not set
+# CONFIG_X86_32_NON_STANDARD is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_X86_32_IRIS is not set
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+# CONFIG_PARAVIRT_DEBUG is not set
+# CONFIG_PARAVIRT_SPINLOCKS is not set
+# CONFIG_XEN is not set
+# CONFIG_XEN_PRIVILEGED_GUEST is not set
+# CONFIG_KVM_GUEST is not set
+CONFIG_LGUEST_GUEST=y
+# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
+CONFIG_NO_BOOTMEM=y
+# CONFIG_MEMTEST is not set
+# CONFIG_M486 is not set
+# CONFIG_M586 is not set
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MELAN is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+CONFIG_MCORE2=y
+# CONFIG_MATOM is not set
+CONFIG_X86_GENERIC=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=5
+CONFIG_X86_DEBUGCTLMSR=y
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_HPET_TIMER=y
+CONFIG_DMI=y
+CONFIG_NR_CPUS=8
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+CONFIG_X86_MCE_AMD=y
+# CONFIG_X86_ANCIENT_MCE is not set
+CONFIG_X86_MCE_THRESHOLD=y
+# CONFIG_X86_MCE_INJECT is not set
+CONFIG_X86_THERMAL_VECTOR=y
+CONFIG_VM86=y
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+CONFIG_X86_REBOOTFIXUPS=y
+# CONFIG_MICROCODE is not set
+# CONFIG_X86_MSR is not set
+CONFIG_X86_CPUID=y
+# CONFIG_NOHIGHMEM is not set
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HIGHMEM64G=y
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_HIGHMEM=y
+CONFIG_X86_PAE=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ILLEGAL_POINTER_VALUE=0
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_COMPACTION is not set
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_MMU_NOTIFIER=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+# CONFIG_MEMORY_FAILURE is not set
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+CONFIG_CROSS_MEMORY_ATTACH=y
+# CONFIG_CLEANCACHE is not set
+# CONFIG_FRONTSWAP is not set
+CONFIG_HIGHPTE=y
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_X86_RESERVE_LOW=64
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+CONFIG_ARCH_RANDOM=y
+CONFIG_X86_SMAP=y
+CONFIG_SECCOMP=y
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_250 is not set
+CONFIG_HZ_300=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=300
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_PHYSICAL_START=0x100000
+CONFIG_RELOCATABLE=y
+CONFIG_X86_NEED_RELOCS=y
+CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+CONFIG_COMPAT_VDSO=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+
+#
+# Power management and ACPI options
+#
+# CONFIG_SUSPEND is not set
+# CONFIG_HAS_WAKELOCK is not set
+# CONFIG_WAKELOCK is not set
+# CONFIG_HIBERNATION is not set
+# CONFIG_PM_RUNTIME is not set
+# CONFIG_SUSPEND_TIME is not set
+# CONFIG_ACPI is not set
+# CONFIG_SFI is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+
+#
+# x86 CPU frequency scaling drivers
+#
+# CONFIG_X86_INTEL_PSTATE is not set
+# CONFIG_X86_POWERNOW_K6 is not set
+# CONFIG_X86_POWERNOW_K7 is not set
+# CONFIG_X86_GX_SUSPMOD is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+# CONFIG_X86_SPEEDSTEP_ICH is not set
+# CONFIG_X86_SPEEDSTEP_SMI is not set
+# CONFIG_X86_P4_CLOCKMOD is not set
+# CONFIG_X86_CPUFREQ_NFORCE2 is not set
+# CONFIG_X86_LONGRUN is not set
+
+#
+# shared options
+#
+# CONFIG_X86_SPEEDSTEP_LIB is not set
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+CONFIG_INTEL_IDLE=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_HOTPLUG_PCI_PCIE is not set
+CONFIG_PCIEAER=y
+# CONFIG_PCIE_ECRC is not set
+# CONFIG_PCIEAER_INJECT is not set
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+CONFIG_PCIEASPM_DEFAULT=y
+# CONFIG_PCIEASPM_POWERSAVE is not set
+# CONFIG_PCIEASPM_PERFORMANCE is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_LABEL=y
+
+#
+# PCIE Host Controller Drivers
+#
+CONFIG_ISA_DMA_API=y
+CONFIG_ISA=y
+CONFIG_EISA=y
+CONFIG_EISA_VLB_PRIMING=y
+CONFIG_EISA_PCI_EISA=y
+CONFIG_EISA_VIRTUAL_ROOT=y
+CONFIG_EISA_NAMES=y
+# CONFIG_SCx200 is not set
+# CONFIG_ALIX is not set
+# CONFIG_NET5501 is not set
+# CONFIG_GEOS is not set
+CONFIG_AMD_NB=y
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA is not set
+CONFIG_CARDBUS=y
+
+#
+# PC-card bridges
+#
+CONFIG_YENTA=y
+CONFIG_YENTA_O2=y
+CONFIG_YENTA_RICOH=y
+CONFIG_YENTA_TI=y
+CONFIG_YENTA_ENE_TUNE=y
+CONFIG_YENTA_TOSHIBA=y
+CONFIG_PCMCIA_PROBE=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_HOTPLUG_PCI_COMPAQ is not set
+# CONFIG_HOTPLUG_PCI_IBM is not set
+CONFIG_HOTPLUG_PCI_CPCI=y
+# CONFIG_HOTPLUG_PCI_CPCI_ZT5550 is not set
+# CONFIG_HOTPLUG_PCI_CPCI_GENERIC is not set
+CONFIG_HOTPLUG_PCI_SHPC=y
+# CONFIG_RAPIDIO is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_SCRIPT=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_COREDUMP=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_HAVE_TEXT_POKE_SMP=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_NET_IP_TUNNEL is not set
+CONFIG_IP_MROUTE=y
+# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_NET_IPVTI is not set
+# CONFIG_INET_AH is not set
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETLABEL is not set
+# CONFIG_ANDROID_PARANOID_NETWORK is not set
+# CONFIG_NET_ACTIVITY_STATS is not set
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+# CONFIG_NETFILTER_XT_MARK is not set
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA2 is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+# CONFIG_IP_NF_TARGET_REJECT is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_SECURITY is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+CONFIG_ATM=y
+CONFIG_ATM_CLIP=y
+# CONFIG_ATM_CLIP_NO_ICMP is not set
+# CONFIG_ATM_LANE is not set
+# CONFIG_ATM_BR2684 is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+CONFIG_HAVE_NET_DSA=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_MHI is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_ATM is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+# CONFIG_NET_SCH_CODEL is not set
+# CONFIG_NET_SCH_FQ_CODEL is not set
+# CONFIG_NET_SCH_INGRESS is not set
+# CONFIG_NET_SCH_PLUG is not set
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+# CONFIG_NET_EMATCH_CMP is not set
+# CONFIG_NET_EMATCH_NBYTE is not set
+# CONFIG_NET_EMATCH_U32 is not set
+# CONFIG_NET_EMATCH_META is not set
+# CONFIG_NET_EMATCH_TEXT is not set
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_NAT is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
+# CONFIG_NET_ACT_CSUM is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_MMAP is not set
+# CONFIG_NETLINK_DIAG is not set
+# CONFIG_RMNET_DATA is not set
+# CONFIG_NETPRIO_CGROUP is not set
+CONFIG_BQL=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+# CONFIG_DEVTMPFS_MOUNT is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_HAVE_CPU_AUTOPROBE=y
+# CONFIG_DMA_SHARED_BUFFER is not set
+# CONFIG_CMA is not set
+
+#
+# Bus devices
+#
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+# CONFIG_PARPORT_SERIAL is not set
+CONFIG_PARPORT_PC_FIFO=y
+# CONFIG_PARPORT_PC_SUPERIO is not set
+# CONFIG_PARPORT_GSC is not set
+# CONFIG_PARPORT_AX88796 is not set
+CONFIG_PARPORT_1284=y
+CONFIG_PNP=y
+CONFIG_PNP_DEBUG_MESSAGES=y
+
+#
+# Protocols
+#
+CONFIG_ISAPNP=y
+CONFIG_PNPBIOS=y
+CONFIG_PNPBIOS_PROC_FS=y
+# CONFIG_PNPACPI is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_PARIDE is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_VIRTIO_BLK=y
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_INTEL_MID_PTI is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_KERNEL_DEBUGGER_CORE is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_SENSORS_NCT1008 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_UID_STAT is not set
+# CONFIG_VMWARE_BALLOON is not set
+# CONFIG_BMP085_I2C is not set
+# CONFIG_PCH_PHUB is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_SRAM is not set
+# CONFIG_BCM4329_RFKILL is not set
+# CONFIG_TEGRA_CRYPTO_DEV is not set
+# CONFIG_APANIC is not set
+CONFIG_APANIC_PLABEL="kpanic"
+# CONFIG_THERM_EST is not set
+# CONFIG_FAN_THERM_EST is not set
+# CONFIG_BLUEDROID_PM is not set
+CONFIG_CPULOAD_MONITOR=y
+# CONFIG_MODS is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_ST_GPS is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_INTEL_MEI is not set
+# CONFIG_INTEL_MEI_ME is not set
+# CONFIG_VMWARE_VMCI is not set
+# CONFIG_ISSP is not set
+# CONFIG_GPS is not set
+# CONFIG_HTC_HEADSET_MGR is not set
+# CONFIG_HEADSET_DEBUG_UART is not set
+
+#
+# Qualcomm Modem Drivers
+#
+CONFIG_HAVE_IDE=y
+CONFIG_IDE=y
+
+#
+# Please see Documentation/ide/ide.txt for help/info on IDE drives
+#
+CONFIG_IDE_ATAPI=y
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_IDE_GD=y
+CONFIG_IDE_GD_ATA=y
+# CONFIG_IDE_GD_ATAPI is not set
+# CONFIG_BLK_DEV_DELKIN is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+CONFIG_IDE_PROC_FS=y
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_IDE_GENERIC is not set
+# CONFIG_BLK_DEV_PLATFORM is not set
+# CONFIG_BLK_DEV_CMD640 is not set
+# CONFIG_BLK_DEV_IDEPNP is not set
+
+#
+# PCI IDE chipsets support
+#
+# CONFIG_BLK_DEV_GENERIC is not set
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_CS5535 is not set
+# CONFIG_BLK_DEV_CS5536 is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT8172 is not set
+# CONFIG_BLK_DEV_IT8213 is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_BLK_DEV_TC86C001 is not set
+
+#
+# Other IDE chipsets support
+#
+
+#
+# Note: most of these also require special kernel boot parameters
+#
+# CONFIG_BLK_DEV_4DRIVES is not set
+# CONFIG_BLK_DEV_ALI14XX is not set
+# CONFIG_BLK_DEV_DTC2278 is not set
+# CONFIG_BLK_DEV_HT6560B is not set
+# CONFIG_BLK_DEV_QD65XX is not set
+# CONFIG_BLK_DEV_UMC8672 is not set
+# CONFIG_BLK_DEV_IDEDMA is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_ISCSI_BOOT_SYSFS is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_CXGB4_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_SCSI_BNX2X_FCOE is not set
+# CONFIG_BE2ISCSI is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+# CONFIG_SCSI_AHA1740 is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
+# CONFIG_SCSI_MVUMI is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_SCSI_ARCMSR is not set
+CONFIG_MEGARAID_NEWGEN=y
+# CONFIG_MEGARAID_MM is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
+# CONFIG_SCSI_MPT3SAS is not set
+# CONFIG_SCSI_UFSHCD is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_VMWARE_PVSCSI is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
+# CONFIG_FCOE_FNIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_ISCI is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_PPA is not set
+# CONFIG_SCSI_IMM is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_SIM710 is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
+# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_VIRTIO is not set
+# CONFIG_SCSI_CHELSIO_FCOE is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_SATA_PMP=y
+
+#
+# Controllers with non-SFF native interface
+#
+CONFIG_SATA_AHCI=y
+# CONFIG_SATA_AHCI_PLATFORM is not set
+# CONFIG_SATA_AHCI_TEGRA is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_SATA_ACARD_AHCI is not set
+# CONFIG_SATA_SIL24 is not set
+CONFIG_ATA_SFF=y
+
+#
+# SFF controllers with custom DMA interface
+#
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_SX4 is not set
+CONFIG_ATA_BMDMA=y
+
+#
+# SATA SFF controllers with BMDMA
+#
+CONFIG_ATA_PIIX=y
+# CONFIG_SATA_HIGHBANK is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+
+#
+# PATA SFF controllers with BMDMA
+#
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_ATP867X is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CS5535 is not set
+# CONFIG_PATA_CS5536 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87415 is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RDC is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SCH is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# PIO-only SFF controllers
+#
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_ISAPNP is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_QDI is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_WINBOND_VLB is not set
+
+#
+# Generic fallback / legacy drivers
+#
+CONFIG_ATA_GENERIC=y
+# CONFIG_PATA_LEGACY is not set
+CONFIG_MD=y
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_BCACHE is not set
+CONFIG_BLK_DEV_DM_BUILTIN=y
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
+# CONFIG_DM_CRYPT is not set
+CONFIG_DM_SNAPSHOT=y
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_CACHE is not set
+CONFIG_DM_MIRROR=y
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_LOG_USERSPACE is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_VERITY is not set
+# CONFIG_TARGET_CORE is not set
+CONFIG_FUSION=y
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+CONFIG_FUSION_MAX_SGE=128
+CONFIG_FUSION_LOGGING=y
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# CONFIG_I2O is not set
+CONFIG_MACINTOSH_DRIVERS=y
+CONFIG_MAC_EMUMOUSEBTN=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+CONFIG_DUMMY=y
+# CONFIG_EQUALIZER is not set
+CONFIG_NET_FC=y
+CONFIG_MII=y
+# CONFIG_IFB is not set
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_VXLAN is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_ARCNET is not set
+CONFIG_ATM_DRIVERS=y
+# CONFIG_ATM_DUMMY is not set
+# CONFIG_ATM_TCP is not set
+# CONFIG_ATM_LANAI is not set
+# CONFIG_ATM_ENI is not set
+# CONFIG_ATM_FIRESTREAM is not set
+# CONFIG_ATM_ZATM is not set
+# CONFIG_ATM_NICSTAR is not set
+# CONFIG_ATM_IDT77252 is not set
+# CONFIG_ATM_AMBASSADOR is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_IA is not set
+# CONFIG_ATM_FORE200E is not set
+# CONFIG_ATM_HE is not set
+# CONFIG_ATM_SOLOS is not set
+
+#
+# CAIF transport drivers
+#
+# CONFIG_VHOST_NET is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+CONFIG_ETHERNET=y
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_EL3 is not set
+# CONFIG_3C515 is not set
+# CONFIG_VORTEX is not set
+# CONFIG_TYPHOON is not set
+CONFIG_NET_VENDOR_ADAPTEC=y
+# CONFIG_ADAPTEC_STARFIRE is not set
+CONFIG_NET_VENDOR_ALTEON=y
+# CONFIG_ACENIC is not set
+CONFIG_NET_VENDOR_AMD=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_LANCE is not set
+# CONFIG_PCNET32 is not set
+# CONFIG_NI65 is not set
+CONFIG_NET_VENDOR_ATHEROS=y
+# CONFIG_ATL2 is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_ALX is not set
+CONFIG_NET_CADENCE=y
+# CONFIG_ARM_AT91_ETHER is not set
+# CONFIG_MACB is not set
+CONFIG_NET_VENDOR_BROADCOM=y
+# CONFIG_B44 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
+CONFIG_TIGON3=y
+# CONFIG_BNX2X is not set
+CONFIG_NET_VENDOR_BROCADE=y
+# CONFIG_BNA is not set
+# CONFIG_NET_CALXEDA_XGMAC is not set
+CONFIG_NET_VENDOR_CHELSIO=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_CHELSIO_T4VF is not set
+CONFIG_NET_VENDOR_CIRRUS=y
+# CONFIG_CS89x0 is not set
+CONFIG_NET_VENDOR_CISCO=y
+# CONFIG_ENIC is not set
+# CONFIG_DNET is not set
+CONFIG_NET_VENDOR_DEC=y
+# CONFIG_NET_TULIP is not set
+CONFIG_NET_VENDOR_DLINK=y
+# CONFIG_DL2K is not set
+# CONFIG_SUNDANCE is not set
+CONFIG_NET_VENDOR_EMULEX=y
+# CONFIG_BE2NET is not set
+CONFIG_NET_VENDOR_EXAR=y
+# CONFIG_S2IO is not set
+# CONFIG_VXGE is not set
+CONFIG_NET_VENDOR_FUJITSU=y
+CONFIG_NET_VENDOR_HP=y
+# CONFIG_HP100 is not set
+CONFIG_NET_VENDOR_INTEL=y
+# CONFIG_E100 is not set
+# CONFIG_E1000 is not set
+CONFIG_E1000E=y
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_IXGB is not set
+# CONFIG_IXGBE is not set
+# CONFIG_IXGBEVF is not set
+CONFIG_NET_VENDOR_I825XX=y
+# CONFIG_IP1000 is not set
+# CONFIG_JME is not set
+CONFIG_NET_VENDOR_MARVELL=y
+# CONFIG_MVMDIO is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+CONFIG_NET_VENDOR_MELLANOX=y
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+CONFIG_NET_VENDOR_MICREL=y
+# CONFIG_KS8851_MLL is not set
+# CONFIG_KSZ884X_PCI is not set
+CONFIG_NET_VENDOR_MYRI=y
+# CONFIG_MYRI10GE is not set
+# CONFIG_FEALNX is not set
+CONFIG_NET_VENDOR_NATSEMI=y
+# CONFIG_NATSEMI is not set
+# CONFIG_NS83820 is not set
+CONFIG_NET_VENDOR_8390=y
+# CONFIG_NE2000 is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_ULTRA is not set
+# CONFIG_WD80x3 is not set
+CONFIG_NET_VENDOR_NVIDIA=y
+# CONFIG_FORCEDETH is not set
+CONFIG_NET_VENDOR_OKI=y
+# CONFIG_PCH_GBE is not set
+# CONFIG_ETHOC is not set
+CONFIG_NET_PACKET_ENGINE=y
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_NET_VENDOR_QLOGIC=y
+# CONFIG_QLA3XXX is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+# CONFIG_NETXEN_NIC is not set
+CONFIG_NET_VENDOR_REALTEK=y
+# CONFIG_ATP is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_R8169 is not set
+CONFIG_NET_VENDOR_RDC=y
+# CONFIG_R6040 is not set
+CONFIG_NET_VENDOR_SEEQ=y
+CONFIG_NET_VENDOR_SILAN=y
+# CONFIG_SC92031 is not set
+CONFIG_NET_VENDOR_SIS=y
+# CONFIG_SIS900 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SFC is not set
+CONFIG_NET_VENDOR_SMSC=y
+# CONFIG_SMC9194 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
+CONFIG_NET_VENDOR_STMICRO=y
+# CONFIG_STMMAC_ETH is not set
+CONFIG_NET_VENDOR_SUN=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NIU is not set
+CONFIG_NET_VENDOR_TEHUTI=y
+# CONFIG_TEHUTI is not set
+CONFIG_NET_VENDOR_TI=y
+# CONFIG_TLAN is not set
+CONFIG_NET_VENDOR_VIA=y
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_NET_VENDOR_WIZNET=y
+# CONFIG_WIZNET_W5100 is not set
+# CONFIG_WIZNET_W5300 is not set
+CONFIG_FDDI=y
+# CONFIG_DEFXX is not set
+# CONFIG_SKFP is not set
+CONFIG_HIPPI=y
+# CONFIG_ROADRUNNER is not set
+# CONFIG_NET_SB1000 is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_PLIP is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+# CONFIG_PPP_MPPE is not set
+CONFIG_PPP_MULTILINK=y
+# CONFIG_PPPOATM is not set
+# CONFIG_PPPOE is not set
+# CONFIG_PPPOLAC is not set
+# CONFIG_PPPOPNS is not set
+CONFIG_PPP_ASYNC=y
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=y
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+CONFIG_WAN=y
+# CONFIG_HDLC is not set
+# CONFIG_DLCI is not set
+# CONFIG_SBNI is not set
+# CONFIG_VMXNET3 is not set
+CONFIG_ISDN=y
+# CONFIG_ISDN_I4L is not set
+# CONFIG_ISDN_CAPI is not set
+# CONFIG_ISDN_DRV_GIGASET is not set
+# CONFIG_HYSDN is not set
+# CONFIG_MISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+# CONFIG_INPUT_KEYRESET is not set
+# CONFIG_INPUT_KEYCOMBO is not set
+# CONFIG_INPUT_CFBOOST is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_CYPRESS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_CYAPA is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_DB9 is not set
+# CONFIG_JOYSTICK_GAMECON is not set
+# CONFIG_JOYSTICK_TURBOGRAFX is not set
+# CONFIG_JOYSTICK_AS5011 is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_WALKERA0701 is not set
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_CYPRESS_SAR is not set
+# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_ILI210X is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_WACOM_I2C is not set
+# CONFIG_TOUCHSCREEN_MAX11801 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MMS114 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_HTCPEN is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
+# CONFIG_TOUCHSCREEN_PANJIT_I2C is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_PIXCIR is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_ST1232 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+# CONFIG_TOUCHSCREEN_MAX1187X is not set
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C=y
+# CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_MPU3050 is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_KEYCHORD is not set
+# CONFIG_INPUT_KXTJ9 is not set
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_CMA3000 is not set
+# CONFIG_INPUT_DRV2603_VIBRATOR is not set
+# CONFIG_INV_AK8975 is not set
+# CONFIG_INV_MPU is not set
+# CONFIG_INV_BMP180 is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PARKBD is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=y
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_ARC_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_ISI is not set
+# CONFIG_N_HDLC is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVMEM=y
+CONFIG_DEVKMEM=y
+CONFIG_STALDRV=y
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
+CONFIG_SERIAL_8250_PNP=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=48
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+# CONFIG_HAS_NS_SUPER_IO_CHIP is not set
+# CONFIG_SERIAL_8250_FOURPORT is not set
+# CONFIG_SERIAL_8250_ACCENT is not set
+# CONFIG_SERIAL_8250_BOCA is not set
+# CONFIG_SERIAL_8250_EXAR_ST16C554 is not set
+# CONFIG_SERIAL_8250_HUB6 is not set
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+CONFIG_SERIAL_8250_RSA=y
+# CONFIG_SERIAL_8250_DW is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MFD_HSU is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+
+#
+# Diag Support
+#
+
+#
+# DIAG traffic over USB
+#
+
+#
+# SDIO support for DIAG
+#
+
+#
+# HSIC/SMUX support for DIAG
+#
+CONFIG_PRINTER=y
+# CONFIG_LP_CONSOLE is not set
+# CONFIG_PPDEV is not set
+CONFIG_HVC_DRIVER=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_HW_RANDOM_INTEL is not set
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_GEODE is not set
+# CONFIG_HW_RANDOM_VIA is not set
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_NVRAM=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+# CONFIG_I2C_CHARDEV is not set
+# CONFIG_I2C_MUX is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_INPUT_CWSTM32 is not set
+# CONFIG_I2C_SLAVE is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+# CONFIG_I2C_SMBUS is not set
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+CONFIG_I2C_ISCH=y
+# CONFIG_I2C_ISMT is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_EG20T is not set
+# CONFIG_I2C_INTEL_MID is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_ISA is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_PCA_GMI is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_SPI is not set
+
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=y
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+# CONFIG_PPS_CLIENT_KTIMER is not set
+# CONFIG_PPS_CLIENT_LDISC is not set
+# CONFIG_PPS_CLIENT_PARPORT is not set
+# CONFIG_PPS_CLIENT_GPIO is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# CONFIG_PTP_1588_CLOCK_PCH is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_POWER_AVS is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_HIH6130 is not set
+# CONFIG_SENSORS_CORETEMP is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_NTC_THERMISTOR is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SCH5627 is not set
+# CONFIG_SENSORS_SCH5636 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_TMON_TMP411 is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_SENSORS_INA219 is not set
+# CONFIG_SENSORS_INA230 is not set
+# CONFIG_SENSORS_INA3221 is not set
+# CONFIG_BATTERY_SYSTEM_VOLTAGE_MONITOR is not set
+# CONFIG_CABLE_VBUS_MONITOR is not set
+# CONFIG_THERMAL is not set
+
+#
+# Trusty
+#
+# CONFIG_TRUSTY is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ACQUIRE_WDT is not set
+# CONFIG_ADVANTECH_WDT is not set
+# CONFIG_ALIM1535_WDT is not set
+# CONFIG_ALIM7101_WDT is not set
+# CONFIG_F71808E_WDT is not set
+# CONFIG_SP5100_TCO is not set
+# CONFIG_SC520_WDT is not set
+# CONFIG_SBC_FITPC2_WATCHDOG is not set
+# CONFIG_EUROTECH_WDT is not set
+# CONFIG_IB700_WDT is not set
+# CONFIG_IBMASR is not set
+# CONFIG_WAFER_WDT is not set
+# CONFIG_I6300ESB_WDT is not set
+# CONFIG_IE6XX_WDT is not set
+CONFIG_ITCO_WDT=y
+CONFIG_ITCO_VENDOR_SUPPORT=y
+# CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
+# CONFIG_HP_WATCHDOG is not set
+# CONFIG_SC1200_WDT is not set
+# CONFIG_PC87413_WDT is not set
+# CONFIG_NV_TCO is not set
+# CONFIG_60XX_WDT is not set
+# CONFIG_SBC8360_WDT is not set
+# CONFIG_SBC7240_WDT is not set
+# CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
+# CONFIG_SMSC37B787_WDT is not set
+# CONFIG_VIA_WDT is not set
+# CONFIG_W83627HF_WDT is not set
+# CONFIG_W83697HF_WDT is not set
+# CONFIG_W83697UG_WDT is not set
+# CONFIG_W83877F_WDT is not set
+# CONFIG_W83977F_WDT is not set
+# CONFIG_MACHZ_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
+
+#
+# ISA-based Watchdog Cards
+#
+# CONFIG_PCWATCHDOG is not set
+# CONFIG_MIXCOMWD is not set
+# CONFIG_WDT is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AIC3256 is not set
+# CONFIG_MFD_AIC3256_I2C is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+CONFIG_LPC_ICH=y
+CONFIG_LPC_SCH=y
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX77665 is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_AIC3XXX_I2C_REGMAP is not set
+# CONFIG_AIC3XXX_CORE is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_MAX8831 is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_AS3722 is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_MFD_MAX8907C is not set
+# CONFIG_MFD_MAX77663 is not set
+# CONFIG_MFD_AS3720 is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_MAX77660 is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_AGP is not set
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_EXYNOS_VIDEO is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_VGACON_SOFT_SCROLLBACK=y
+CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=256
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_DS90UH925Q_SER is not set
+# CONFIG_SOUND is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+# CONFIG_UHID is not set
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_A4TECH=y
+# CONFIG_HID_ACRUX is not set
+CONFIG_HID_APPLE=y
+# CONFIG_HID_AUREAL is not set
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+# CONFIG_DRAGONRISE_FF is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+CONFIG_HID_EZKEY=y
+# CONFIG_HID_KEYTOUCH is not set
+CONFIG_HID_KYE=y
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+CONFIG_HID_GYRATION=y
+# CONFIG_HID_ICADE is not set
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+# CONFIG_HID_LCPOWER is not set
+CONFIG_HID_LOGITECH=y
+# CONFIG_HID_LOGITECH_DJ is not set
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
+# CONFIG_LOGIWHEELS_FF is not set
+# CONFIG_HID_MAGICMOUSE is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_ORTEK is not set
+CONFIG_HID_PANTHERLORD=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_PS3REMOTE is not set
+# CONFIG_HID_SAITEK is not set
+CONFIG_HID_SAMSUNG=y
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_SMARTJOYPLUS=y
+# CONFIG_SMARTJOYPLUS_FF is not set
+# CONFIG_HID_TIVO is not set
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+# CONFIG_THRUSTMASTER_FF is not set
+CONFIG_HID_ZEROPLUS=y
+# CONFIG_ZEROPLUS_FF is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_XHCI=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_UWB is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_SWITCH is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC=y
+CONFIG_EDAC_LEGACY_SYSFS=y
+# CONFIG_EDAC_DEBUG is not set
+CONFIG_EDAC_DECODE_MCE=y
+# CONFIG_EDAC_MCE_INJ is not set
+# CONFIG_EDAC_MM_EDAC is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+CONFIG_VIRTIO=y
+
+#
+# Virtio drivers
+#
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+# CONFIG_STAGING is not set
+# CONFIG_ANDROID is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_CHROMEOS_LAPTOP is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_IBM_RTL is not set
+# CONFIG_SAMSUNG_Q10 is not set
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_I8253=y
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_MAILBOX is not set
+# CONFIG_IOMMU_SUPPORT is not set
+
+#
+# Remoteproc drivers
+#
+# CONFIG_STE_MODEM_RPROC is not set
+
+#
+# Rpmsg drivers
+#
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_EXTCON is not set
+# CONFIG_MIPI_BIF is not set
+# CONFIG_MEMORY is not set
+# CONFIG_IIO is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_PWM is not set
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+
+#
+# SYSEDP Framework
+#
+# CONFIG_SYSEDP_FRAMEWORK is not set
+# CONFIG_GK20A is not set
+
+#
+# Android
+#
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+CONFIG_DMIID=y
+# CONFIG_DMI_SYSFS is not set
+CONFIG_ISCSI_IBFT_FIND=y
+# CONFIG_ISCSI_IBFT is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_USE_FOR_EXT23=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_EXT4_DEBUG=y
+CONFIG_JBD2=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=y
+CONFIG_QFMT_V1=y
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=y
+# CONFIG_NTFS_DEBUG is not set
+CONFIG_NTFS_RW=y
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_VMCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_REPORT_PRESENT_CPUS is not set
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+CONFIG_ECRYPT_FS=y
+# CONFIG_ECRYPT_FS_MESSAGING is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_F2FS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp437"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_READABLE_ASM is not set
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_LESS_GCC_OPT is not set
+# CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_LOCKUP_DETECTOR is not set
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCKDEP=y
+CONFIG_LOCK_STAT=y
+# CONFIG_DEBUG_LOCKDEP is not set
+CONFIG_TRACE_IRQFLAGS=y
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_CPU_STALL_INFO is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_LKDTM is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_LATENCYTOP=y
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_WANT_PAGE_DEBUG_FLAGS=y
+CONFIG_PAGE_GUARD=y
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+# CONFIG_IRQSOFF_TRACER is not set
+CONFIG_SCHED_TRACER=y
+# CONFIG_FTRACE_SYSCALLS is not set
+CONFIG_TRACER_SNAPSHOT=y
+# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KPROBE_EVENT=y
+# CONFIG_UPROBE_EVENT is not set
+CONFIG_PROBE_EVENTS=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+# CONFIG_TRACELEVEL is not set
+# CONFIG_TRACEDUMP is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_X86_PTDUMP is not set
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_DEBUG_SET_MODULE_RONX is not set
+# CONFIG_DEBUG_NX_TEST is not set
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+# CONFIG_X86_DECODER_SELFTEST is not set
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_ENCRYPTED_KEYS is not set
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
+# CONFIG_SECURITY_SELINUX is not set
+# CONFIG_SECURITY_SMACK is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_TRUSTED_LITTLE_KERNEL is not set
+# CONFIG_SECURITY_YAMA is not set
+# CONFIG_IMA is not set
+# CONFIG_EVM is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_NULL=y
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_CMAC is not set
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=y
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRC32_PCLMUL is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+# CONFIG_CRYPTO_RMD128 is not set
+CONFIG_CRYPTO_RMD160=y
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+# CONFIG_CRYPTO_AES_NI_INTEL is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=y
+CONFIG_CRYPTO_BLOWFISH_COMMON=y
+CONFIG_CRYPTO_CAMELLIA=y
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=y
+# CONFIG_CRYPTO_SERPENT_SSE2_586 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+# CONFIG_CRYPTO_TWOFISH_586 is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_PADLOCK=y
+# CONFIG_CRYPTO_DEV_PADLOCK_AES is not set
+# CONFIG_CRYPTO_DEV_PADLOCK_SHA is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+# CONFIG_CRYPTO_DEV_TEGRA_SE is not set
+# CONFIG_ASYMMETRIC_KEY_TYPE is not set
+CONFIG_HAVE_KVM=y
+CONFIG_HAVE_KVM_IRQCHIP=y
+CONFIG_HAVE_KVM_IRQ_ROUTING=y
+CONFIG_HAVE_KVM_EVENTFD=y
+CONFIG_KVM_APIC_ARCHITECTURE=y
+CONFIG_KVM_MMIO=y
+CONFIG_KVM_ASYNC_PF=y
+CONFIG_HAVE_KVM_MSI=y
+CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+# CONFIG_KVM_AMD is not set
+# CONFIG_KVM_MMU_AUDIT is not set
+# CONFIG_LGUEST is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_XZ=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_AVERAGE=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index 0acbac2..2a67dda 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -44,6 +44,7 @@
 
 static inline unsigned long apbt_quick_calibrate(void) {return 0; }
 static inline void apbt_time_init(void) { }
+static inline void apbt_setup_secondary_clock(void) { }
 
 #endif
 #endif /* ASM_X86_APBT_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index c6cd358..04a4890 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -92,12 +92,53 @@
 #endif
 #define smp_read_barrier_depends()	read_barrier_depends()
 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else
+#else /* !SMP */
 #define smp_mb()	barrier()
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
 #define smp_read_barrier_depends()	do { } while (0)
 #define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif /* SMP */
+
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+/*
+ * For either of these options x86 doesn't have a strong TSO memory
+ * model and we should fall back to full barriers.
+ */
+
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	___p1;								\
+})
+
+#else /* regular x86 TSO memory ordering */
+
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	barrier();							\
+	___p1;								\
+})
+
 #endif
 
 /*
diff --git a/arch/x86/include/asm/bcm_bt_lpm.h b/arch/x86/include/asm/bcm_bt_lpm.h
new file mode 100644
index 0000000..d6d0557
--- /dev/null
+++ b/arch/x86/include/asm/bcm_bt_lpm.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2009 Google, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*/
+
+#ifndef BCM_BT_LMP_H
+#define BCM_BT_LMP_H
+
+#include <linux/serial_core.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+/* Uart driver must call this every time it beings TX, to ensure
+* this driver keeps WAKE asserted during TX. Called with uart
+* spinlock held. */
+extern void bcm_bt_lpm_exit_lpm_locked(struct device *dev,
+							struct hci_dev *hdev);
+
+struct bcm_bt_lpm_platform_data {
+	int gpio_wake;		/* CPU -> BCM wakeup gpio */
+	int gpio_host_wake;	/* BCM -> CPU wakeup gpio */
+	int int_host_wake;	/* BCM -> CPU wakeup irq */
+	int gpio_enable;	/* GPIO enable/disable BT/FM */
+
+	int port;			/* UART port to use with BT/FM */
+	/*
+	 * Callback to request the uart driver to clock off.
+	 * Called with uart spinlock held.
+	 */
+	void (*uart_disable)(struct device *tty);
+	/*
+	 * Callback to request the uart driver to clock on.
+	 * Called with uart spinlock held.
+	 */
+	void (*uart_enable)(struct device *tty);
+};
+
+#endif
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 9d7d36c..fcc715e 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -119,6 +119,7 @@
 	FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
 #ifdef	CONFIG_X86_INTEL_MID
 	FIX_LNW_VRTC,
+	FIX_CLOCK_CTL,
 #endif
 	__end_of_permanent_fixed_addresses,
 
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index b3799d8..131c96d 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -1,4 +1,27 @@
+#ifndef _ARCH_X86_GPIO_H
+#define _ARCH_X86_GPIO_H
+
+#if CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
+
+#if CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
+#endif
+
+#include <asm-generic/gpio.h>
+
+/* The trivial gpiolib dispatchers */
+#define gpio_get_value	__gpio_get_value
+#define gpio_set_value	__gpio_set_value
+#define gpio_cansleep	__gpio_cansleep
+#define gpio_to_irq	__gpio_to_irq
+
+#else /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
+
 #ifndef __LINUX_GPIO_H
 #warning Include linux/gpio.h instead of asm/gpio.h
 #include <linux/gpio.h>
 #endif
+
+#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
+
+#endif /* _ARCH_X86_GPIO_H */
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index c5d1785..02bab09 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -1,13 +1,6 @@
 #ifndef _ASM_X86_IDLE_H
 #define _ASM_X86_IDLE_H
 
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
 #ifdef CONFIG_X86_64
 void enter_idle(void);
 void exit_idle(void);
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
new file mode 100644
index 0000000..c5860b4
--- /dev/null
+++ b/arch/x86/include/asm/intel-mid.h
@@ -0,0 +1,248 @@
+/*
+ * intel-mid.h: Intel MID specific setup code
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _ASM_X86_INTEL_MID_H
+#define _ASM_X86_INTEL_MID_H
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <asm/intel_mid_pcihelpers.h>
+
+#ifdef CONFIG_SFI
+extern int get_gpio_by_name(const char *name);
+extern void install_irq_resource(struct platform_device *pdev, int irq);
+#else
+static inline int get_gpio_by_name(const char *name) { return -ENODEV; }
+/* Dummy function to prevent compilation error in byt */
+static inline void install_irq_resource(struct platform_device *pdev, int irq)
+{};
+#endif
+
+extern int intel_mid_pci_init(void);
+extern void *get_oem0_table(void);
+extern void intel_delayed_device_register(void *dev,
+			void (*delayed_callback)(void *dev_desc));
+extern void intel_scu_device_register(struct platform_device *pdev);
+extern struct devs_id *get_device_id(u8 type, char *name);
+extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
+extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
+extern int sfi_mrtc_num;
+extern struct sfi_rtc_table_entry sfi_mrtc_array[];
+extern void *get_oem0_table(void);
+extern void register_rpmsg_service(char *name, int id, u32 addr);
+extern int sdhci_pci_request_regulators(void);
+
+/* Define soft platform ID to comply with the OEMB table format. But SPID is not supported */
+#define INTEL_PLATFORM_SSN_SIZE 32
+struct soft_platform_id {
+        u16 customer_id; /*Defines the final customer for the product */
+        u16 vendor_id; /* Defines who owns the final product delivery */
+        u16 manufacturer_id; /* Defines who build the hardware. This can be
+                              * different for the same product */
+        u16 platform_family_id; /* Defined by vendor and defines the family of
+                                 * the product with the same root components */
+        u16 product_line_id; /* Defined by vendor and defines the name of the
+                              * product. This can be used to differentiate the
+                              * feature set for the same product family (low
+                              * cost vs full feature). */
+        u16 hardware_id; /* Defined by vendor and defines the physical hardware
+                          * component set present on the PCB/FAB */
+        u8  fru[SPID_FRU_SIZE]; /* Field Replaceabl Unit */
+} __packed;
+
+/* OEMB table */
+struct sfi_table_oemb {
+	struct sfi_table_header header;
+	u32 board_id;
+	u32 board_fab;
+	u8 iafw_major_version;
+	u8 iafw_main_version;
+	u8 val_hooks_major_version;
+	u8 val_hooks_minor_version;
+	u8 ia_suppfw_major_version;
+	u8 ia_suppfw_minor_version;
+	u8 scu_runtime_major_version;
+	u8 scu_runtime_minor_version;
+	u8 ifwi_major_version;
+	u8 ifwi_minor_version;
+	struct soft_platform_id spid;
+	u8 ssn[INTEL_PLATFORM_SSN_SIZE];
+} __packed;
+
+/*
+ * Here defines the array of devices platform data that IAFW would export
+ * through SFI "DEVS" table, we use name and type to match the device and
+ * its platform data.
+ */
+struct devs_id {
+	char name[SFI_NAME_LEN + 1];
+	u8 type;
+	u8 delay;
+	void *(*get_platform_data)(void *info);
+	void (*device_handler)(struct sfi_device_table_entry *pentry,
+				struct devs_id *dev);
+	/* Custom handler for devices */
+	u8 trash_itp;/* true if this driver uses pin muxed with XDB connector */
+};
+
+#define SD_NAME_SIZE 16
+/**
+ * struct sd_board_info - template for device creation
+ * @name: Initializes sdio_device.name; identifies the driver.
+ * @bus_num: board-specific identifier for a given SDIO controller.
+ * @board_ref_clock: Initializes sd_device.board_ref_clock;
+ * @platform_data: Initializes sd_device.platform_data; the particular
+ *      data stored there is driver-specific.
+ *
+ */
+struct sd_board_info {
+	char            name[SD_NAME_SIZE];
+	int             bus_num;
+	unsigned short  addr;
+	u32             board_ref_clock;
+	void            *platform_data;
+};
+
+
+/*
+ * Medfield is the follow-up of Moorestown, it combines two chip solution into
+ * one. Other than that it also added always-on and constant tsc and lapic
+ * timers. Medfield is the platform name, and the chip name is called Penwell
+ * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
+ * identified via MSRs.
+ */
+enum intel_mid_cpu_type {
+	INTEL_CPU_CHIP_NOTMID = 0,
+	/* 1 was Moorestown */
+	INTEL_MID_CPU_CHIP_PENWELL = 2,
+	INTEL_MID_CPU_CHIP_CLOVERVIEW,
+	INTEL_MID_CPU_CHIP_TANGIER,
+	INTEL_MID_CPU_CHIP_VALLEYVIEW2,
+	INTEL_MID_CPU_CHIP_ANNIEDALE,
+	INTEL_MID_CPU_CHIP_CARBONCANYON,
+};
+
+extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
+
+/**
+ * struct intel_mid_ops - Interface between intel-mid & sub archs
+ * @arch_setup: arch_setup function to re-initialize platform
+ *             structures (x86_init, x86_platform_init)
+ *
+ * This structure can be extended if any new interface is required
+ * between intel-mid & its sub arch files.
+ */
+struct intel_mid_ops {
+	void (*arch_setup)(void);
+};
+
+/* Helper API's for INTEL_MID_OPS_INIT */
+#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid)[cpuid] = \
+		get_##cpuname##_ops,
+
+/* Maximum number of CPU ops */
+#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
+
+/*
+ * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
+ * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
+ */
+#define INTEL_MID_OPS_INIT {\
+	DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL) \
+	DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW) \
+	DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
+};
+
+static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
+{
+#ifdef CONFIG_X86_INTEL_MID
+	return __intel_mid_cpu_chip;
+#else
+	return INTEL_CPU_CHIP_NOTMID;
+#endif
+}
+
+enum intel_mid_timer_options {
+	INTEL_MID_TIMER_DEFAULT,
+	INTEL_MID_TIMER_APBT_ONLY,
+	INTEL_MID_TIMER_LAPIC_APBT,
+};
+
+extern enum intel_mid_timer_options intel_mid_timer_options;
+
+/*
+ * Penwell uses spread spectrum clock, so the freq number is not exactly
+ * the same as reported by MSR based on SDM.
+ */
+#define FSB_FREQ_83SKU	83200
+#define FSB_FREQ_100SKU	99840
+#define FSB_FREQ_133SKU	133000
+
+#define FSB_FREQ_167SKU	167000
+#define FSB_FREQ_200SKU	200000
+#define FSB_FREQ_267SKU	267000
+#define FSB_FREQ_333SKU	333000
+#define FSB_FREQ_400SKU	400000
+
+/* Bus Select SoC Fuse value */
+#define BSEL_SOC_FUSE_MASK	0x7
+#define BSEL_SOC_FUSE_001	0x1 /* FSB 133MHz */
+#define BSEL_SOC_FUSE_101	0x5 /* FSB 100MHz */
+#define BSEL_SOC_FUSE_111	0x7 /* FSB 83MHz */
+
+#define SFI_MTMR_MAX_NUM 8
+#define SFI_MRTC_MAX	8
+
+extern struct console early_mrst_console;
+extern void mrst_early_console_init(void);
+
+extern struct console early_mrfld_console;
+extern void mrfld_early_console_init(void);
+
+extern struct console early_hsu_console;
+extern void hsu_early_console_init(const char *);
+
+extern struct console early_pti_console;
+
+extern void intel_scu_devices_create(void);
+extern void intel_scu_devices_destroy(void);
+extern void intel_psh_devices_create(void);
+extern void intel_psh_devices_destroy(void);
+
+/* VRTC timer */
+#define MRST_VRTC_MAP_SZ	(1024)
+/*#define MRST_VRTC_PGOFFSET	(0xc00) */
+
+extern void intel_mid_rtc_init(void);
+
+enum intel_mid_sim_type {
+	INTEL_MID_CPU_SIMULATION_NONE = 0,
+	INTEL_MID_CPU_SIMULATION_VP,
+	INTEL_MID_CPU_SIMULATION_SLE,
+	INTEL_MID_CPU_SIMULATION_HVP,
+};
+extern enum intel_mid_sim_type __intel_mid_sim_platform;
+static inline enum intel_mid_sim_type intel_mid_identify_sim(void)
+{
+#ifdef CONFIG_X86_INTEL_MID
+	return __intel_mid_sim_platform;
+#else
+	return INTEL_MID_CPU_SIMULATION_NONE;
+#endif
+}
+
+#define INTEL_MID_IRQ_OFFSET 0x100
+
+extern void pstore_ram_reserve_memory(void);
+
+#endif /* _ASM_X86_INTEL_MID_H */
diff --git a/arch/x86/include/asm/intel_basincove_gpadc.h b/arch/x86/include/asm/intel_basincove_gpadc.h
new file mode 100644
index 0000000..d4d1ffa
--- /dev/null
+++ b/arch/x86/include/asm/intel_basincove_gpadc.h
@@ -0,0 +1,61 @@
+#ifndef __INTEL_BASINCOVE_GPADC_H__
+#define __INTEL_BASINCOVE_GPADC_H__
+
+#define GPADC_VBAT	(1 << 0)
+#define GPADC_BATID	(1 << 1)
+#define GPADC_IBAT	(1 << 2)
+#define GPADC_PMICTEMP	(1 << 3)
+#define GPADC_BATTEMP0	(1 << 4)
+#define GPADC_BATTEMP1	(1 << 5)
+#define GPADC_SYSTEMP0	(1 << 6)
+#define GPADC_SYSTEMP1	(1 << 7)
+#define GPADC_SYSTEMP2	(1 << 8)
+#define GPADC_CH_NUM	9
+
+#define MBATTEMP	(1 << 2)
+#define MSYSTEMP	(1 << 3)
+#define MBATT		(1 << 4)
+#define MVIBATT		(1 << 5)
+#define MCCTICK		(1 << 7)
+
+#define GPADC_RSL(channel, res) (res->data[ffs(channel)-1])
+
+struct gpadc_regmap_t {
+	char *name;
+	int cntl;       /* GPADC Conversion Control Bit indicator */
+	int rslth;      /* GPADC Conversion Result Register Addr High */
+	int rsltl;      /* GPADC Conversion Result Register Addr Low */
+};
+
+struct gpadc_regs_t {
+	u16 gpadcreq;
+	u16 gpadcreq_irqen;
+	u16 gpadcreq_busy;
+	u16 mirqlvl1;
+	u16 mirqlvl1_adc;
+	u16 adc1cntl;
+	u16 adcirq;
+	u16 madcirq;
+};
+
+struct iio_dev;
+
+struct intel_basincove_gpadc_platform_data {
+	int channel_num;
+	unsigned long intr;
+	u8 intr_mask;
+	struct iio_map *gpadc_iio_maps;
+	struct gpadc_regmap_t *gpadc_regmaps;
+	struct gpadc_regs_t *gpadc_regs;
+	const struct iio_chan_spec *gpadc_channels;
+};
+
+struct gpadc_result {
+	int data[GPADC_CH_NUM];
+};
+
+int iio_basincove_gpadc_sample(struct iio_dev *indio_dev,
+				int ch, struct gpadc_result *res);
+
+int intel_basincove_gpadc_sample(int ch, struct gpadc_result *res);
+#endif
diff --git a/arch/x86/include/asm/intel_basincove_ocd.h b/arch/x86/include/asm/intel_basincove_ocd.h
new file mode 100644
index 0000000..eff495e
--- /dev/null
+++ b/arch/x86/include/asm/intel_basincove_ocd.h
@@ -0,0 +1,161 @@
+#ifndef __INTEL_BASINCOVE_OCD_H__
+#define __INTEL_BASINCOVE_OCD_H__
+
+#define DRIVER_NAME "bcove_bcu"
+#define DEVICE_NAME "mrfl_pmic_bcu"
+
+/* Generic bit representaion macros */
+#define B0	(1 << 0)
+#define B1	(1 << 1)
+#define B2	(1 << 2)
+#define B3	(1 << 3)
+#define B4	(1 << 4)
+#define B5	(1 << 5)
+#define B6	(1 << 6)
+#define B7	(1 << 7)
+
+/* 30 seconds delay macro for VWARN1 interrupt Unmask (enable) */
+#define VWARN2_INTR_EN_DELAY	(30 * HZ)
+
+/* IRQ registers */
+#define BCUIRQ                  0x05
+#define IRQLVL1                 0x01
+#define MIRQLVL1                0x0C
+
+/* Status registers */
+#define S_BCUINT                0x3B
+#define S_BCUCTRL               0x49
+
+/* PMIC SRAM address for BCU register */
+#define PMIC_SRAM_BCU_ADDR      0xFFFFF614
+#define IOMAP_LEN               1
+
+#define NUM_VOLT_LEVELS         3
+#define NUM_CURR_LEVELS         2
+
+#define VWARN_EN_MASK		B3
+#define ICCMAXVCC_EN_MASK	B6
+
+#define MVWARN1_MASK		B0
+#define MVWARN2_MASK		B1
+#define MVCRIT_MASK		B2
+
+#define MVCRIT			B2
+#define MVWARN2			B1
+#define MVWARN1			B0
+
+#define ICCMAXVCC_EN		(1 << 6)
+#define VWARN_EN		(1 << 3)
+#define VCRIT_SHUTDOWN		(1 << 4)
+
+#define BCU_ALERT               (1 << 3)
+#define VWARN1_IRQ              (1 << 0)
+#define VWARN2_IRQ              (1 << 1)
+#define VCRIT_IRQ               (1 << 2)
+#define GSMPULSE_IRQ            (1 << 3)
+#define TXPWRTH_IRQ             (1 << 4)
+
+/* Number of configurable thresholds for current and voltage */
+#define NUM_THRESHOLDS          8
+
+/* BCU real time status flags for corresponding input signals */
+#define SVWARN1                 (1<<0)
+#define SVWARN2                 (1<<1)
+#define SVCRIT                  (1<<2)
+
+/* S_BCUCTRL register status bits */
+#define S_CAMFLTORCH		B3
+#define S_CAMFLDIS		B2
+#define S_BCUDISW2		B1
+
+#define S_BCUDISW2_MASK		B1
+#define S_CAMFLDIS_MASK		B2
+#define S_CAMFLTORCH_MASK	B3
+
+/* check whether bit is sticky or not by checking 5th bit */
+#define IS_STICKY(data)         (!!(data & 0x10))
+
+/* check whether signal asserted for VW1/VW2/VC */
+#define IS_ASSRT_ON_VW1(data)   (!!(data & 0x01))
+#define IS_ASSRT_ON_VW2(data)   (!!(data & 0x02))
+#define IS_ASSRT_ON_VC(data)    (!!(data & 0x04))
+
+/* Configuration registers that monitor the voltage drop */
+#define VWARN1_CFG              0x3C
+#define VWARN2_CFG              0x3D
+#define VCRIT_CFG               0x3E
+#define ICCMAXVSYS_CFG          0x3F
+#define ICCMAXVCC_CFG           0x40
+#define ICCMAXVNN_CFG           0x41
+
+/* Behaviour registers */
+#define VFLEXSRC_BEH            0x42
+#define VFLEXDIS_BEH            0x43
+#define VIBDIS_BEH              0x44
+#define CAMFLTORCH_BEH          0x45
+#define CAMFLDIS_BEH            0x46
+#define BCUDISW2_BEH            0x47
+#define BCUDISCRIT_BEH          0x48
+
+/*IRQ Mask Register*/
+#define MBCUIRQ                 0x10
+
+#define MRFL_SMIP_SRAM_ADDR	0xFFFCE000
+
+/* SMIP offset address from where the BCU related info should be read */
+#define BCU_SMIP_OFFSET		0x3BA
+
+/* No of Bytes we have to read from SMIP from BCU_SMIP_BASE*/
+#define NUM_SMIP_BYTES          14
+
+/* Max length of the register name string */
+#define MAX_REGNAME_LEN		15
+
+/* String to send the uevent along with env info to user space */
+#define EVT_STR	"BCUEVT="
+
+/* Macro to get the mode of acess for the BCU registers	*/
+#define MODE(m)	(((m != S_BCUINT) && (m != BCUIRQ) && (m != IRQLVL1))	\
+			? (S_IRUGO | S_IWUSR) : S_IRUGO)
+
+/* Generic macro to assign the parameters (reg name and address) */
+#define reg_info(x)	{ .name = #x, .addr = x, .mode = MODE(x) }
+
+/* Generic macro to fill the environmental data for bcu uevent */
+#define get_envp(evt)	(EVT_STR#evt)
+
+/*
+* These values are read from SMIP.
+* SMIP contains these entries - default register configurations
+* BCU is programmed to these default values during boot time.
+*/
+
+struct ocd_bcove_config_data {
+	uint8_t vwarn1_cfg;
+	uint8_t vwarn2_cfg;
+	uint8_t vcrit_cfg;
+	uint8_t iccmaxvsys_cfg;
+	uint8_t iccmaxvcc_cfg;
+	uint8_t iccmaxvnn_cfg;
+	uint8_t vflexsrc_beh;
+	uint8_t vflexdis_beh;
+	uint8_t vibdis_beh;
+	uint8_t camfltorch_beh;
+	uint8_t camfldis_beh;
+	uint8_t bcudisw2_beh;
+	uint8_t bcudiscrit_beh;
+	uint8_t mbcuirq;
+} __packed;
+
+struct ocd_platform_data {
+	int (*bcu_config_data) (struct ocd_bcove_config_data *);
+};
+
+struct bcu_reg_info {
+	char	name[MAX_REGNAME_LEN];	/* register name   */
+	u16	addr;			/* offset address  */
+	u16	mode;			/* permission mode */
+};
+
+#endif
+
diff --git a/arch/x86/include/asm/intel_mid_gpadc.h b/arch/x86/include/asm/intel_mid_gpadc.h
new file mode 100644
index 0000000..ba62f83
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_gpadc.h
@@ -0,0 +1,19 @@
+#ifndef __INTEL_MID_GPADC_H__
+#define __INTEL_MID_GPADC_H__
+
+struct intel_mid_gpadc_platform_data {
+	unsigned long intr;
+};
+
+#define CH_NEED_VREF		(1 << 8)
+#define CH_NEED_VCALIB		(1 << 9)
+#define CH_NEED_ICALIB		(1 << 10)
+
+int intel_mid_gpadc_gsmpulse_sample(int *vol, int *cur);
+int intel_mid_gpadc_sample(void *handle, int sample_count, ...);
+int get_gpadc_sample(void *handle, int sample_count, int *buffer);
+void intel_mid_gpadc_free(void *handle);
+void *intel_mid_gpadc_alloc(int count, ...);
+void *gpadc_alloc_channels(int count, int *channel_info);
+#endif
+
diff --git a/arch/x86/include/asm/intel_mid_hsu.h b/arch/x86/include/asm/intel_mid_hsu.h
new file mode 100644
index 0000000..36b91fc
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_hsu.h
@@ -0,0 +1,69 @@
+#ifndef __INTEL_MID_HSU_H__
+#define __INTEL_MID_HSU_H__
+
+#define hsu_port_func_max 4
+
+enum {
+	hsu_port0,
+	hsu_port1,
+	hsu_port2,
+	hsu_port_share,
+	hsu_port_max,
+	hsu_dma,
+};
+
+enum {
+	bt_port,
+	modem_port,
+	gps_port,
+	debug_port,
+};
+
+enum {
+	hsu_intel,
+	hsu_dw,
+};
+
+struct hsu_port_cfg {
+	int type;
+	int hw_ip;
+	int index;
+	char *name;
+	int idle;
+	int has_alt;
+	int alt;
+	int force_suspend;
+	int preamble;
+	int hw_context_save;
+	int hw_ctrl_cts;
+	struct device *dev;
+	int (*hw_init)(struct device *dev, int port);
+	void(*hw_set_alt)(int port);
+	void(*hw_set_rts)(int port, int value);
+	void(*hw_suspend)(int port, struct device *dev, irq_handler_t wake_isr);
+	void(*hw_suspend_post)(int port);
+	void(*hw_resume)(int port, struct device *dev);
+	unsigned int (*hw_get_clk)(void);
+	void (*wake_peer)(struct device *tty);
+	void (*set_clk)(unsigned int m, unsigned int n,
+			void __iomem *addr);
+	void (*hw_reset)(void __iomem *addr);
+};
+
+
+void intel_mid_hsu_suspend(int port, struct device *dev,
+				irq_handler_t wake_isr);
+void intel_mid_hsu_resume(int port, struct device *dev);
+void intel_mid_hsu_rts(int port, int value);
+void intel_mid_hsu_switch(int port);
+int intel_mid_hsu_init(struct device *dev, int port);
+int intel_mid_hsu_func_to_port(unsigned int func);
+unsigned int intel_mid_hsu_get_clk(void);
+int hsu_register_board_info(void *inf);
+void intel_mid_hsu_suspend_post(int port);
+struct device *intel_mid_hsu_set_wake_peer(int port,
+			void (*wake_peer)(struct device *));
+void intel_mid_hsu_reset(void __iomem *addr);
+void intel_mid_hsu_set_clk(unsigned int m, unsigned int n,
+			void __iomem *addr);
+#endif
diff --git a/arch/x86/include/asm/intel_mid_pcihelpers.h b/arch/x86/include/asm/intel_mid_pcihelpers.h
new file mode 100644
index 0000000..d48026f
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_pcihelpers.h
@@ -0,0 +1,25 @@
+/*
+ * Access to message bus through three registers
+ * in CUNIT(0:0:0) PCI configuration space.
+ * MSGBUS_CTRL_REG(0xD0):
+ *   31:24      = message bus opcode
+ *   23:16      = message bus port
+ *   15:8       = message bus address, low 8 bits.
+ *   7:4        = message bus byte enables
+ * MSGBUS_CTRL_EXT_REG(0xD8):
+ *   31:8       = message bus address, high 24 bits.
+ * MSGBUS_DATA_REG(0xD4):
+ *   hold the data for write or read
+ */
+#define PCI_ROOT_MSGBUS_CTRL_REG        0xD0
+#define PCI_ROOT_MSGBUS_DATA_REG        0xD4
+#define PCI_ROOT_MSGBUS_CTRL_EXT_REG    0xD8
+#define PCI_ROOT_MSGBUS_READ            0x10
+#define PCI_ROOT_MSGBUS_WRITE           0x11
+#define PCI_ROOT_MSGBUS_DWORD_ENABLE    0xf0
+
+u32 intel_mid_msgbus_read32_raw(u32 cmd);
+u32 intel_mid_msgbus_read32(u8 port, u32 addr);
+void intel_mid_msgbus_write32_raw(u32 cmd, u32 data);
+void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data);
+u32 intel_mid_soc_stepping(void);
diff --git a/arch/x86/include/asm/intel_mid_powerbtn.h b/arch/x86/include/asm/intel_mid_powerbtn.h
new file mode 100644
index 0000000..a0b4c87
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_powerbtn.h
@@ -0,0 +1,14 @@
+#ifndef __INTEL_MID_POWERBTN_H__
+#define __INTEL_MID_POWERBTN_H__
+
+struct intel_msic_power_btn_platform_data {
+	u32 pbstat;
+	u16 pb_level;
+	u16 irq_lvl1_mask;
+	int (*irq_ack)(struct intel_msic_power_btn_platform_data *);
+};
+
+#define MSIC_PB_LEN	1
+#define MSIC_PWRBTNM	(1 << 0)
+
+#endif
diff --git a/arch/x86/include/asm/intel_mid_pwm.h b/arch/x86/include/asm/intel_mid_pwm.h
new file mode 100644
index 0000000..fdd5221
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_pwm.h
@@ -0,0 +1,30 @@
+#ifndef __INTEL_MID_PWM_H__
+#define __INTEL_MID_PWM_H__
+
+#define MAX_DUTYCYCLE_PERCENTAGE 100
+
+enum {
+	PWM_LED = 0,
+	PWM_VIBRATOR,
+	PWM_LCD_BACKLIGHT,
+	PWM_NUM,
+};
+
+struct intel_mid_pwm_device_data {
+	u16 reg_clkdiv0;
+	u16 reg_clkdiv1;
+	u16 reg_dutycyc;
+	u8 val_clkdiv0;
+	u8 val_clkdiv1;
+};
+
+struct intel_mid_pwm_platform_data {
+	int pwm_num;
+	struct intel_mid_pwm_device_data *ddata;
+	u16 reg_clksel;
+	u8 val_clksel;
+};
+
+int intel_mid_pwm(int id, int value);
+#endif
+
diff --git a/arch/x86/include/asm/intel_mid_remoteproc.h b/arch/x86/include/asm/intel_mid_remoteproc.h
new file mode 100644
index 0000000..d65a8139
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_remoteproc.h
@@ -0,0 +1,117 @@
+/*
+ * INTEL MID Remote Processor Head File
+ *
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ASM_INTEL_MID_REMOTEPROC_H
+#define _ASM_INTEL_MID_REMOTEPROC_H
+
+#define RP_IPC_COMMAND		0xA0
+#define RP_IPC_SIMPLE_COMMAND	0xA1
+#define RP_IPC_RAW_COMMAND	0xA2
+
+#define	RP_PMIC_ACCESS		0xFF
+#define	RP_DFU_REQUEST		0xFE
+#define	RP_SET_WATCHDOG		0xF8
+#define	RP_FLIS_ACCESS		0xF5
+#define	RP_GET_FW_REVISION	0xF4
+#define	RP_COLD_BOOT		0xF3
+#define	RP_COLD_RESET		0xF1
+#define	RP_COLD_OFF		0x80
+#define	RP_MIP_ACCESS		0xEC
+#define RP_GET_HOBADDR		0xE5
+#define RP_OSC_CLK_CTRL		0xE6
+#define RP_S0IX_COUNTER		0xE8
+#define RP_WRITE_OSNIB		0xE4
+#define RP_FW_UPDATE		0xFE
+#define RP_VRTC			0xFA
+#define RP_PMDB			0xE0
+#define RP_INDIRECT_WRITE	0x05
+
+/*
+ * Assigning some temp ids for following devices
+ * TODO: Need to change it to some meaningful
+ *       values.
+ */
+#define RP_PMIC_GPIO		0X02
+#define RP_PMIC_AUDIO		0x03
+#define RP_MSIC_GPIO		0x05
+#define RP_MSIC_AUDIO		0x06
+#define RP_MSIC_OCD		0x07
+#define RP_MSIC_BATTERY		0XEF
+#define RP_MSIC_THERMAL		0x09
+#define RP_MSIC_POWER_BTN	0x10
+#define RP_IPC			0X11
+#define RP_IPC_UTIL		0X12
+#define RP_FW_ACCESS		0X13
+#define RP_UMIP_ACCESS		0x14
+#define RP_OSIP_ACCESS		0x15
+#define RP_MSIC_ADC		0x16
+#define RP_BQ24192		0x17
+#define RP_MSIC_CLV_AUDIO	0x18
+#define RP_PMIC_CCSM		0x19
+#define RP_PMIC_I2C		0x20
+#define RP_MSIC_MRFLD_AUDIO	0x21
+#define RP_MSIC_PWM		0x22
+#define RP_MSIC_KPD_LED		0x23
+#define RP_BCOVE_ADC		0x24
+#define RP_BCOVE_THERMAL	0x25
+#define RP_MRFL_OCD		0x26
+#define RP_FW_LOGGING		0x27
+#define RP_PMIC_CHARGER		0x28
+
+enum rproc_type {
+	RPROC_SCU = 0,
+	RPROC_PSH,
+	RPROC_NUM,
+};
+
+struct rproc_ops;
+struct platform_device;
+struct rpmsg_ns_msg;
+
+struct rpmsg_ns_info {
+	enum rproc_type type;
+	char name[RPMSG_NAME_SIZE];
+	u32 addr;
+	u32 flags;
+	struct list_head node;
+};
+
+struct rpmsg_ns_list {
+	struct list_head list;
+	struct mutex lock;
+};
+
+extern struct rpmsg_ns_info *rpmsg_ns_alloc(const char *name,
+						int id, u32 addr);
+extern void rpmsg_ns_add_to_list(struct rpmsg_ns_info *info,
+					struct rpmsg_ns_list *nslist);
+
+/*
+ * struct intel_mid_rproc_pdata - intel mid remoteproc's platform data
+ * @name: the remoteproc's name
+ * @firmware: name of firmware file to load
+ * @ops: start/stop rproc handlers
+ * @device_enable: handler for enabling a device
+ * @device_shutdown: handler for shutting down a device
+ */
+struct intel_mid_rproc_pdata {
+	const char *name;
+	const char *firmware;
+	const struct rproc_ops *ops;
+	int (*device_enable) (struct platform_device *pdev);
+	int (*device_shutdown) (struct platform_device *pdev);
+	struct rpmsg_ns_list *nslist;
+};
+
+#endif /* _ASM_INTEL_MID_REMOTEPROC_H */
diff --git a/arch/x86/include/asm/intel_mid_rpmsg.h b/arch/x86/include/asm/intel_mid_rpmsg.h
new file mode 100644
index 0000000..6a666a2
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_rpmsg.h
@@ -0,0 +1,77 @@
+#ifndef _INTEL_MID_RPMSG_H_
+#define _INTEL_MID_RPMSG_H_
+
+#include <asm/scu_ipc_rpmsg.h>
+#include <linux/rpmsg.h>
+
+#ifdef ANDROID_BUILD
+#include <linux/wakelock.h>
+#endif
+
+#define RPMSG_TX_TIMEOUT   (5 * HZ)
+
+struct rpmsg_instance {
+	struct rpmsg_channel *rpdev;
+	struct mutex instance_lock;
+	struct tx_ipc_msg *tx_msg;
+	struct rx_ipc_msg *rx_msg;
+	struct mutex rx_lock;
+	struct completion reply_arrived;
+	struct rpmsg_endpoint *endpoint;
+};
+
+struct rpmsg_lock {
+	struct mutex lock;
+	int locked_prev; /* locked prev flag */
+	atomic_t pending;
+};
+
+extern int rpmsg_send_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub, u8 *in,
+						u32 *out, u32 inlen,
+						u32 outlen);
+
+extern int rpmsg_send_raw_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub, u8 *in,
+						u32 *out, u32 inlen,
+						u32 outlen, u32 sptr,
+						u32 dptr);
+
+extern int rpmsg_send_simple_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub);
+
+extern int alloc_rpmsg_instance(struct rpmsg_channel *rpdev,
+				struct rpmsg_instance **pInstance);
+
+extern void free_rpmsg_instance(struct rpmsg_channel *rpdev,
+				struct rpmsg_instance **pInstance);
+
+extern void init_rpmsg_instance(struct rpmsg_instance *instance);
+
+extern int rpmsg_send_generic_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+				u32 *out, u32 outlen);
+
+extern int rpmsg_send_generic_simple_command(u32 cmd, u32 sub);
+
+extern int rpmsg_send_generic_raw_command(u32 cmd, u32 sub,
+				   u8 *in, u32 inlen,
+				   u32 *out, u32 outlen,
+				   u32 dptr, u32 sptr);
+
+struct rpmsg_device_data {
+	char name[RPMSG_NAME_SIZE];
+	struct rpmsg_channel *rpdev;
+	struct rpmsg_instance *rpmsg_instance;
+};
+
+enum rpmsg_ipc_command_type {
+	RPMSG_IPC_COMMAND = 0,
+	RPMSG_IPC_SIMPLE_COMMAND,
+	RPMSG_IPC_RAW_COMMAND,
+	RPMSG_IPC_COMMAND_TYPE_NUM,
+};
+
+extern void rpmsg_global_lock(void);
+extern void rpmsg_global_unlock(void);
+
+#endif
diff --git a/arch/x86/include/asm/intel_mid_thermal.h b/arch/x86/include/asm/intel_mid_thermal.h
new file mode 100644
index 0000000..6a604a1
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_thermal.h
@@ -0,0 +1,77 @@
+#ifndef __INTEL_MID_THERMAL_H__
+#define __INTEL_MID_THERMAL_H__
+
+#include <linux/thermal.h>
+
+#define BPTHERM_NAME	"bptherm"
+#define SKIN0_NAME	"skin0"
+#define SKIN1_NAME	"skin1"
+#define MSIC_DIE_NAME	"msicdie"
+#define MSIC_SYS_NAME	"sys"
+#define SYSTHERM2       "systherm2"
+/**
+ * struct intel_mid_thermal_sensor - intel_mid_thermal sensor information
+ * @name:		name of the sensor
+ * @index:		index number of sensor
+ * @slope:		slope used for temp calculation
+ * @intercept:		intercept used for temp calculation
+ * @adc_channel:	adc channel id|flags
+ * @direct:		If true then direct conversion is used.
+ * @priv:		private sensor data
+ * @temp_correlation:	temp correlation function
+ */
+struct intel_mid_thermal_sensor {
+	char name[THERMAL_NAME_LENGTH];
+	int index;
+	long slope;
+	long intercept;
+	int adc_channel;
+	bool direct;
+	void *priv;
+	int (*temp_correlation)(void *info, long temp, long *res);
+};
+
+/**
+ * struct soc_throttle_data - SoC level power limits for thermal throttling
+ * @power_limit:	power limit value
+ * @floor_freq:		The CPU frequency may not go below this value
+ */
+struct soc_throttle_data {
+	int power_limit;
+	int floor_freq;
+};
+
+/**
+ * struct intel_mid_thermal_platform_data - Platform data for
+ *		intel mid thermal driver
+ *
+ * @num_sensors:	Maximum number of sensors supported
+ * @sensors:		sensor info
+ * @soc_cooling:	True or false
+ */
+struct intel_mid_thermal_platform_data {
+	int num_sensors;
+	struct intel_mid_thermal_sensor *sensors;
+	bool soc_cooling;
+};
+
+/**
+ * struct skin1_private_info - skin1 sensor private data
+ *
+ * @dependent:		dependency on other sensors
+			0   - no dependency,
+			> 0 - depends on other sensors
+ * @sensors:		dependent sensor address.
+ */
+struct skin1_private_info {
+	int dependent;
+	struct intel_mid_thermal_sensor **sensors;
+};
+
+/* skin0 sensor temperature correlation function*/
+int skin0_temp_correlation(void *info, long temp, long *res);
+/* skin1 sensor temperature correlation function*/
+int skin1_temp_correlation(void *info, long temp, long *res);
+/* bptherm sensor temperature correlation function*/
+int bptherm_temp_correlation(void *info, long temp, long *res);
+#endif
diff --git a/arch/x86/include/asm/intel_mid_vrtc.h b/arch/x86/include/asm/intel_mid_vrtc.h
new file mode 100644
index 0000000..11ababf
--- /dev/null
+++ b/arch/x86/include/asm/intel_mid_vrtc.h
@@ -0,0 +1,9 @@
+#ifndef _INTEL_MID_VRTC_H
+#define _INTEL_MID_VRTC_H
+
+extern unsigned char vrtc_cmos_read(unsigned char reg);
+extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
+extern unsigned long vrtc_get_time(void);
+extern int vrtc_set_mmss(unsigned long nowtime);
+
+#endif
diff --git a/arch/x86/include/asm/intel_mip.h b/arch/x86/include/asm/intel_mip.h
new file mode 100644
index 0000000..f05fc05
--- /dev/null
+++ b/arch/x86/include/asm/intel_mip.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_X86_INTEL_MIP_H_
+#define  _ASM_X86_INTEL_MIP_H_
+
+#include <asm/intel-mid.h>
+
+/* SMIP property related definitions */
+#define SCU_MIP_DEV_NAME		"intel_scu_mip"
+#define SMIP_NUM_CONFIG_PROPS		6
+#define SMIP_MAX_PROP_LEN		4
+
+enum platform_prop {
+	USB_COMPLIANCE,
+	CHARGE_TERMINATION,
+	SHUTDOWN_METHODOLOGY,
+	MOS_TRANS_CAPACITY,
+	NFC_RESV_CAPACITY,
+	TEMP_CRIT_SHUTDOWN,
+};
+
+struct smip_platform_prop {
+	unsigned int offset;
+	unsigned int len;
+	bool is_bit_field;
+	unsigned int mask;
+};
+
+struct scu_mip_platform_data {
+	struct smip_platform_prop smip_prop[SMIP_NUM_CONFIG_PROPS];
+};
+
+int get_smip_property_by_name(enum platform_prop);
+#endif
diff --git a/arch/x86/include/asm/intel_psh_ipc.h b/arch/x86/include/asm/intel_psh_ipc.h
new file mode 100644
index 0000000..ed4d3c14
--- /dev/null
+++ b/arch/x86/include/asm/intel_psh_ipc.h
@@ -0,0 +1,35 @@
+#ifndef _ASM_X86_INTEL_PSH_IPC_H_
+#define _ASM_X86_INTEL_PSH_IPC_H_
+
+#define CHANNEL_BUSY		(1 << 31)
+#define PSH_IPC_CONTINUE	(1 << 30)
+
+struct psh_msg {
+	u32 msg;
+	u32 param;
+};
+
+enum psh_channel {
+	PSH_SEND_CH0 = 0,
+	PSH_SEND_CH1,
+	PSH_SEND_CH2,
+	PSH_SEND_CH3,
+	NUM_IA2PSH_IPC,
+	PSH_RECV_CH0 = NUM_IA2PSH_IPC,
+	PSH_RECV_CH1,
+	PSH_RECV_CH2,
+	PSH_RECV_CH3,
+	PSH_RECV_END,
+	NUM_PSH2IA_IPC = PSH_RECV_END - PSH_RECV_CH0,
+	NUM_ALL_CH = NUM_IA2PSH_IPC + NUM_PSH2IA_IPC,
+};
+
+typedef void(*psh_channel_handle_t)(u32 msg, u32 param, void *data);
+int intel_ia2psh_command(struct psh_msg *in, struct psh_msg *out,
+			 int ch, int timeout);
+int intel_psh_ipc_bind(int ch, psh_channel_handle_t handle, void *data);
+void intel_psh_ipc_unbind(int ch);
+
+void intel_psh_ipc_disable_irq(void);
+void intel_psh_ipc_enable_irq(void);
+#endif
diff --git a/arch/x86/include/asm/intel_scu_flis.h b/arch/x86/include/asm/intel_scu_flis.h
new file mode 100644
index 0000000..7cea735
--- /dev/null
+++ b/arch/x86/include/asm/intel_scu_flis.h
@@ -0,0 +1,326 @@
+#ifndef _ASM_X86_INTEL_SCU_FLIS_H_
+#define _ASM_X86_INTEL_SCU_FLIS_H_
+
+enum flis_param_t {
+	PULL,
+	MUX,
+	OPEN_DRAIN,
+};
+
+/* For MERR */
+
+#define PULL_MASK	((7 << 4) | (3 << 8))
+#define MUX_MASK	(0xF << 12)
+#define OPEN_DRAIN_MASK	BIT(21)
+
+#define PULL_UP		(1 << 8)
+#define PULL_DOWN	(2 << 8)
+#define R2Kohms		(0 << 4)
+#define R20Kohms	(1 << 4)
+#define R50Kohms	(2 << 4)
+#define R910ohms	(3 << 4)
+
+#define UP_2K		(PULL_UP | R2Kohms)
+#define UP_20K		(PULL_UP | R20Kohms)
+#define UP_50K		(PULL_UP | R50Kohms)
+#define UP_910		(PULL_UP | R910ohms)
+#define DOWN_2K		(PULL_DOWN | R2Kohms)
+#define DOWN_20K	(PULL_DOWN | R20Kohms)
+#define DOWN_50K	(PULL_DOWN | R50Kohms)
+#define DOWN_910	(PULL_DOWN | R910ohms)
+
+#define OD_DISABLE	(0 << 21)
+#define OD_ENABLE	(1 << 21)
+
+#define MUX_EN_INPUT_EN		(2 << 12)
+#define INPUT_EN		(1 << 12)
+#define MUX_EN_OUTPUT_EN	(8 << 12)
+#define OUTPUT_EN		(4 << 12)
+
+/* Add prefix "tng_" to avoid name duplication with ctp pins */
+enum tng_pinname_t {
+	tng_usb_ulpi_0_clk = 0,
+	tng_usb_ulpi_0_data_0 = 1,
+	tng_usb_ulpi_0_data_1 = 2,
+	tng_usb_ulpi_0_data_2 = 3,
+	tng_usb_ulpi_0_data_3 = 4,
+	tng_usb_ulpi_0_data_4 = 5,
+	tng_usb_ulpi_0_data_5 = 6,
+	tng_usb_ulpi_0_data_6 = 7,
+	tng_usb_ulpi_0_data_7 = 8,
+	tng_usb_ulpi_0_dir = 9,
+	tng_usb_ulpi_0_nxt = 10,
+	tng_usb_ulpi_0_refclk = 11,
+	tng_usb_ulpi_0_stp = 12,
+	tng_emmc_0_clk = 13,
+	tng_emmc_0_cmd = 14,
+	tng_emmc_0_d_0 = 15,
+	tng_emmc_0_d_1 = 16,
+	tng_emmc_0_d_2 = 17,
+	tng_emmc_0_d_3 = 18,
+	tng_emmc_0_d_4 = 19,
+	tng_emmc_0_d_5 = 20,
+	tng_emmc_0_d_6 = 21,
+	tng_emmc_0_d_7 = 22,
+	tng_emmc_0_rst_b = 23,
+	tng_gp_emmc_1_clk = 24,
+	tng_gp_emmc_1_cmd = 25,
+	tng_gp_emmc_1_d_0 = 26,
+	tng_gp_emmc_1_d_1 = 27,
+	tng_gp_emmc_1_d_2 = 28,
+	tng_gp_emmc_1_d_3 = 29,
+	tng_gp_emmc_1_d_4 = 30,
+	tng_gp_emmc_1_d_5 = 31,
+	tng_gp_emmc_1_d_6 = 32,
+	tng_gp_emmc_1_d_7 = 33,
+	tng_gp_emmc_1_rst_b = 34,
+	tng_gp_28 = 35,
+	tng_gp_29 = 36,
+	tng_gp_sdio_0_cd_b = 37,
+	tng_gp_sdio_0_clk = 38,
+	tng_gp_sdio_0_cmd = 39,
+	tng_gp_sdio_0_dat_0 = 40,
+	tng_gp_sdio_0_dat_1 = 41,
+	tng_gp_sdio_0_dat_2 = 42,
+	tng_gp_sdio_0_dat_3 = 43,
+	tng_gp_sdio_0_lvl_clk_fb = 44,
+	tng_gp_sdio_0_lvl_cmd_dir = 45,
+	tng_gp_sdio_0_lvl_dat_dir = 46,
+	tng_gp_sdio_0_lvl_sel = 47,
+	tng_gp_sdio_0_powerdown_b = 48,
+	tng_gp_sdio_0_wp = 49,
+	tng_gp_sdio_1_clk = 50,
+	tng_gp_sdio_1_cmd = 51,
+	tng_gp_sdio_1_dat_0 = 52,
+	tng_gp_sdio_1_dat_1 = 53,
+	tng_gp_sdio_1_dat_2 = 54,
+	tng_gp_sdio_1_dat_3 = 55,
+	tng_gp_sdio_1_powerdown_b = 56,
+	tng_mhsi_acdata = 57,
+	tng_mhsi_acflag = 58,
+	tng_mhsi_acready = 59,
+	tng_mhsi_acwake = 60,
+	tng_mhsi_cadata = 61,
+	tng_mhsi_caflag = 62,
+	tng_mhsi_caready = 63,
+	tng_mhsi_cawake = 64,
+	tng_gp_mslim_0_bclk = 65,
+	tng_gp_mslim_0_bdat = 66,
+	tng_gp_ssp_0_clk = 67,
+	tng_gp_ssp_0_fs = 68,
+	tng_gp_ssp_0_rxd = 69,
+	tng_gp_ssp_0_txd = 70,
+	tng_gp_ssp_1_clk = 71,
+	tng_gp_ssp_1_fs = 72,
+	tng_gp_ssp_1_rxd = 73,
+	tng_gp_ssp_1_txd = 74,
+	tng_gp_ssp_2_clk = 75,
+	tng_gp_ssp_2_fs = 76,
+	tng_gp_ssp_2_rxd = 77,
+	tng_gp_ssp_2_txd = 78,
+	tng_gp_ssp_3_clk = 79,
+	tng_gp_ssp_3_fs = 80,
+	tng_gp_ssp_3_rxd = 81,
+	tng_gp_ssp_3_txd = 82,
+	tng_gp_ssp_4_clk = 83,
+	tng_gp_ssp_4_fs_0 = 84,
+	tng_gp_ssp_4_fs_1 = 85,
+	tng_gp_ssp_4_fs_2 = 86,
+	tng_gp_ssp_4_fs_3 = 87,
+	tng_gp_ssp_4_rxd = 88,
+	tng_gp_ssp_4_txd = 89,
+	tng_gp_ssp_5_clk = 90,
+	tng_gp_ssp_5_fs_0 = 91,
+	tng_gp_ssp_5_fs_1 = 92,
+	tng_gp_ssp_5_fs_2 = 93,
+	tng_gp_ssp_5_fs_3 = 94,
+	tng_gp_ssp_5_rxd = 95,
+	tng_gp_ssp_5_txd = 96,
+	tng_gp_ssp_6_clk = 97,
+	tng_gp_ssp_6_fs = 98,
+	tng_gp_ssp_6_rxd = 99,
+	tng_gp_ssp_6_txd = 100,
+	tng_gp_i2c_1_scl = 101,
+	tng_gp_i2c_1_sda = 102,
+	tng_gp_i2c_2_scl = 103,
+	tng_gp_i2c_2_sda = 104,
+	tng_gp_i2c_3_scl = 105,
+	tng_gp_i2c_3_sda = 106,
+	tng_gp_i2c_4_scl = 107,
+	tng_gp_i2c_4_sda = 108,
+	tng_gp_i2c_5_scl = 109,
+	tng_gp_i2c_5_sda = 110,
+	tng_gp_i2c_6_scl = 111,
+	tng_gp_i2c_6_sda = 112,
+	tng_gp_i2c_7_scl = 113,
+	tng_gp_i2c_7_sda = 114,
+	tng_gp_uart_0_cts = 115,
+	tng_gp_uart_0_rts = 116,
+	tng_gp_uart_0_rx = 117,
+	tng_gp_uart_0_tx = 118,
+	tng_gp_uart_1_cts = 119,
+	tng_gp_uart_1_rts = 120,
+	tng_gp_uart_1_rx = 121,
+	tng_gp_uart_1_tx = 122,
+	tng_gp_uart_2_cts = 123,
+	tng_gp_uart_2_rts = 124,
+	tng_gp_uart_2_rx = 125,
+	tng_gp_uart_2_tx = 126,
+	tng_gp_13 = 127,
+	tng_gp_14 = 128,
+	tng_gp_15 = 129,
+	tng_gp_16 = 130,
+	tng_gp_17 = 131,
+	tng_gp_18 = 132,
+	tng_gp_19 = 133,
+	tng_gp_20 = 134,
+	tng_gp_21 = 135,
+	tng_gp_22 = 136,
+	tng_gp_23 = 137,
+	tng_gp_24 = 138,
+	tng_gp_25 = 139,
+	tng_gp_fast_int_0 = 140,
+	tng_gp_fast_int_1 = 141,
+	tng_gp_fast_int_2 = 142,
+	tng_gp_fast_int_3 = 143,
+	tng_gp_pwm_0 = 144,
+	tng_gp_pwm_1 = 145,
+	tng_gp_camerasb_0 = 146,
+	tng_gp_camerasb_1 = 147,
+	tng_gp_camerasb_2 = 148,
+	tng_gp_camerasb_3 = 149,
+	tng_gp_camerasb_4 = 150,
+	tng_gp_camerasb_5 = 151,
+	tng_gp_camerasb_6 = 152,
+	tng_gp_camerasb_7 = 153,
+	tng_gp_camerasb_8 = 154,
+	tng_gp_camerasb_9 = 155,
+	tng_gp_camerasb_10 = 156,
+	tng_gp_camerasb_11 = 157,
+	tng_gp_clkph_0 = 158,
+	tng_gp_clkph_1 = 159,
+	tng_gp_clkph_2 = 160,
+	tng_gp_clkph_3 = 161,
+	tng_gp_clkph_4 = 162,
+	tng_gp_clkph_5 = 163,
+	tng_gp_hdmi_hpd = 164,
+	tng_gp_intd_dsi_te1 = 165,
+	tng_gp_intd_dsi_te2 = 166,
+	tng_osc_clk_ctrl_0 = 167,
+	tng_osc_clk_ctrl_1 = 168,
+	tng_osc_clk_out_0 = 169,
+	tng_osc_clk_out_1 = 170,
+	tng_osc_clk_out_2 = 171,
+	tng_osc_clk_out_3 = 172,
+	tng_osc_clk_out_4 = 173,
+	tng_resetout_b = 174,
+	tng_xxpmode = 175,
+	tng_xxprdy = 176,
+	tng_xxpreq_b = 177,
+	tng_gp_26 = 178,
+	tng_gp_27 = 179,
+	tng_i2c_0_scl = 180,
+	tng_i2c_0_sda = 181,
+	tng_ierr_b = 182,
+	tng_jtag_tckc = 183,
+	tng_jtag_tdic = 184,
+	tng_jtag_tdoc = 185,
+	tng_jtag_tmsc = 186,
+	tng_jtag_trst_b = 187,
+	tng_prochot_b = 188,
+	tng_rtc_clk = 189,
+	tng_svid_vclk = 190,
+	tng_svid_vdio = 191,
+	tng_thermtrip_b = 192,
+	tng_standby = 193,
+	tng_gp_kbd_dkin_0 = 194,
+	tng_gp_kbd_dkin_1 = 195,
+	tng_gp_kbd_dkin_2 = 196,
+	tng_gp_kbd_dkin_3 = 197,
+	tng_gp_kbd_mkin_0 = 198,
+	tng_gp_kbd_mkin_1 = 199,
+	tng_gp_kbd_mkin_2 = 200,
+	tng_gp_kbd_mkin_3 = 201,
+	tng_gp_kbd_mkin_4 = 202,
+	tng_gp_kbd_mkin_5 = 203,
+	tng_gp_kbd_mkin_6 = 204,
+	tng_gp_kbd_mkin_7 = 205,
+	tng_gp_kbd_mkout_0 = 206,
+	tng_gp_kbd_mkout_1 = 207,
+	tng_gp_kbd_mkout_2 = 208,
+	tng_gp_kbd_mkout_3 = 209,
+	tng_gp_kbd_mkout_4 = 210,
+	tng_gp_kbd_mkout_5 = 211,
+	tng_gp_kbd_mkout_6 = 212,
+	tng_gp_kbd_mkout_7 = 213,
+	tng_gp_0 = 214,
+	tng_gp_1 = 215,
+	tng_gp_2 = 216,
+	tng_gp_3 = 217,
+	tng_gp_4 = 218,
+	tng_gp_5 = 219,
+	tng_gp_6 = 220,
+	tng_gp_7 = 221,
+	tng_gp_8 = 222,
+	tng_gp_9 = 223,
+	tng_gp_10 = 224,
+	tng_gp_11 = 225,
+	tng_gp_12 = 226,
+	tng_gp_mpti_clk = 227,
+	tng_gp_mpti_data_0 = 228,
+	tng_gp_mpti_data_1 = 229,
+	tng_gp_mpti_data_2 = 230,
+	tng_gp_mpti_data_3 = 231,
+	TNG_PIN_NUM,
+};
+
+struct pinstruct_t {
+	bool valid;	/* the pin is allowed to be configured or not */
+	u8 bus_address;
+	u8 pullup_offset;
+	u8 pullup_lsb_pos;
+	u8 direction_offset;
+	u8 direction_lsb_pos;
+	u8 open_drain_offset;
+	u8 open_drain_bit;
+};
+
+enum ACCESS_CTRL {
+	readonly = (1 << 0),
+	writable = (1 << 1),
+};
+
+struct pin_mmio_flis_t {
+	u8 access_ctrl; /* mmio flis access control */
+	u32 offset;	/* pin offset from flis base address */
+};
+
+struct intel_scu_flis_platform_data {
+	struct pinstruct_t *pin_t;
+	int pin_num;
+	u32 flis_base;
+	u32 flis_len;
+	struct pin_mmio_flis_t *mmio_flis_t;
+};
+
+#define OPS_STR_LEN 10
+
+enum {
+	DBG_SHIM_FLIS_ADDR,
+	DBG_SHIM_OFFSET,
+	DBG_SHIM_DATA,
+
+	DBG_PARAM_VAL,
+	DBG_PARAM_TYPE,
+	DBG_PIN_NAME,
+};
+
+int intel_scu_ipc_write_shim(u32 data, u32 flis_addr, u32 offset);
+int intel_scu_ipc_read_shim(u32 *data, u32 flis_addr, u32 offset);
+int intel_scu_ipc_update_shim(u32 data, u32 mask, u32 flis_addr, u32 offset);
+int config_pin_flis(unsigned int name, enum flis_param_t param, u32 val);
+int get_pin_flis(unsigned int name, enum flis_param_t param, u32 *val);
+u32 get_flis_value(u32 offset);
+void set_flis_value(u32 value, u32 offset);
+
+#endif
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 925b605..3055f51 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -2,53 +2,92 @@
 #define  _ASM_X86_INTEL_SCU_IPC_H_
 
 #include <linux/notifier.h>
+#include <asm/intel-mid.h>
 
+/* IPC defines the following message types */
+#define IPCMSG_GET_HOBADDR	0xE5 /* OSHOB access. */
+#define IPCMSG_BATTERY          0xEF /* Coulomb Counter Accumulator */
+#define IPCMSG_MIP_ACCESS       0xEC /* IA MIP access */
+#define IPCMSG_PMDB_CMD		0xE0
 #define IPCMSG_WARM_RESET	0xF0
 #define IPCMSG_COLD_RESET	0xF1
 #define IPCMSG_SOFT_RESET	0xF2
 #define IPCMSG_COLD_BOOT	0xF3
-
+#define IPCMSG_COLD_OFF		0x80 /* for TNG only */
+#define IPCMSG_FW_REVISION      0xF4 /* Get firmware revision */
+#define IPCMSG_SHIM_CONFIG	0xF5 /* Configure SHIM */
+#define IPCMSG_WATCHDOG_TIMER   0xF8 /* Set Kernel Watchdog Threshold */
 #define IPCMSG_VRTC		0xFA	 /* Set vRTC device */
-	/* Command id associated with message IPCMSG_VRTC */
-	#define IPC_CMD_VRTC_SETTIME      1 /* Set time */
-	#define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
+#define IPCMSG_FW_UPDATE        0xFE /* Firmware update */
+#define IPCMSG_PCNTRL           0xFF /* Power controller unit read/write */
+#define IPCMSG_OSC_CLK		0xE6 /* Turn on/off osc clock */
+#define IPCMSG_S0IX_COUNTER	0xEB /* Get S0ix residency */
+#define IPCMSG_CLEAR_FABERROR	0xE3 /* Clear fabric error log */
+#define IPCMSG_STORE_NV_DATA	0xCD /* Store the Non Volatile data to RAM */
 
-/* Read single register */
-int intel_scu_ipc_ioread8(u16 addr, u8 *data);
+#define IPC_CMD_UMIP_RD     0
+#define IPC_CMD_UMIP_WR     1
+#define IPC_CMD_SMIP_RD     2
 
-/* Read two sequential registers */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data);
+/* Command id associated with message IPCMSG_PCNTRL */
+#define IPC_CMD_PCNTRL_W      0 /* Register write */
+#define IPC_CMD_PCNTRL_R      1 /* Register read */
+#define IPC_CMD_PCNTRL_M      2 /* Register read-modify-write */
 
-/* Read four sequential registers */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data);
+#define IPC_ERR_NONE			0
+#define IPC_ERR_CMD_NOT_SUPPORTED	1
+#define IPC_ERR_CMD_NOT_SERVICED	2
+#define IPC_ERR_UNABLE_TO_SERVICE	3
+#define IPC_ERR_CMD_INVALID		4
+#define IPC_ERR_CMD_FAILED		5
+#define IPC_ERR_EMSECURITY		6
+#define IPC_ERR_UNSIGNEDKERNEL		7
 
-/* Read a vector */
-int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
+#define MSIC_DEBUG_FILE "msic"
+#define MSIC_ALL_DEBUG_FILE "msic_all"
+#define MAX_MSIC_REG   0x3FF
+#define MIN_MSIC_REG   0x0
 
-/* Write single register */
-int intel_scu_ipc_iowrite8(u16 addr, u8 data);
 
-/* Write two sequential registers */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data);
 
-/* Write four sequential registers */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data);
+/* Command id associated with message IPCMSG_VRTC */
+#define IPC_CMD_VRTC_SETTIME      1 /* Set time */
+#define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
+#define IPC_CMD_VRTC_SYNC_RTC     3 /* Sync MSIC/PMIC RTC to VRTC */
 
-/* Write a vector */
-int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
+/* Command id associated with message IPCMSG_SHIM_CONFIG */
+#define IPC_CMD_SHIM_RD		0 /* SHIM read */
+#define IPC_CMD_SHIM_WR		1 /* SHIM write */
 
-/* Update single register based on the mask */
-int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
+/* check ipc status */
+int intel_scu_ipc_check_status(void);
 
 /* Issue commands to the SCU with or without data */
 int intel_scu_ipc_simple_command(int cmd, int sub);
-int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
-							u32 *out, int outlen);
+int intel_scu_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+				u32 *out, u32 outlen);
 /* I2C control api */
 int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
 
 /* Update FW version */
-int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
+int intel_scu_ipc_fw_update(void);
+int intel_scu_ipc_mrstfw_update(u8 *buffer, u32 length);
+int intel_scu_ipc_medfw_prepare(void __user *arg);
+
+int intel_scu_ipc_read_mip(u8 *data, int len, int offset, int issigned);
+int intel_scu_ipc_write_umip(u8 *data, int len, int offset);
+
+/* NVRAM access */
+u32 intel_scu_ipc_get_nvram_size(void);
+u32 intel_scu_ipc_get_nvram_addr(void);
+
+/* Penwell has 4 osc clocks */
+#define OSC_CLK_AUDIO	0	/* Audio */
+#define OSC_CLK_CAM0	1	/* Primary camera */
+#define OSC_CLK_CAM1	2	/* Secondary camera */
+#define OSC_CLK_DISP	3	/* Display buffer */
+
+int intel_scu_ipc_osc_clk(u8 clk, unsigned int khz);
 
 extern struct blocking_notifier_head intel_scu_notifier;
 
diff --git a/arch/x86/include/asm/intel_scu_ipcutil.h b/arch/x86/include/asm/intel_scu_ipcutil.h
new file mode 100644
index 0000000..0153882
--- /dev/null
+++ b/arch/x86/include/asm/intel_scu_ipcutil.h
@@ -0,0 +1,135 @@
+#ifndef _ASM_X86_INTEL_SCU_IPCUTIL_H_
+#define _ASM_X86_INTEL_SCU_IPCUTIL_H_
+
+#include <linux/types.h>
+
+/* ioctl commnds */
+#define INTEL_SCU_IPC_REGISTER_READ	0
+#define INTEL_SCU_IPC_REGISTER_WRITE	1
+#define INTEL_SCU_IPC_REGISTER_UPDATE	2
+#define INTEL_SCU_IPC_FW_UPDATE			    0xA2
+#define INTEL_SCU_IPC_MEDFIELD_FW_UPDATE	    0xA3
+#define INTEL_SCU_IPC_FW_REVISION_GET		    0xB0
+#define INTEL_SCU_IPC_FW_REVISION_EXT_GET	    0xB1
+#define INTEL_SCU_IPC_S0IX_RESIDENCY		    0xB8
+#define INTEL_SCU_IPC_READ_RR_FROM_OSNIB	    0xC1
+#define INTEL_SCU_IPC_WRITE_RR_TO_OSNIB		    0xC2
+#define INTEL_SCU_IPC_READ_VBATTCRIT		    0xC4
+#define INTEL_SCU_IPC_WRITE_ALARM_FLAG_TO_OSNIB	    0xC5
+#define INTEL_SCU_IPC_OSC_CLK_CNTL		    0xC6
+#define INTEL_SCU_IPC_PMDB_ACCESS		    0xD0
+
+#define SIGNED_MOS_ATTR		0x0
+#define SIGNED_COS_ATTR		0x0A
+#define SIGNED_RECOVERY_ATTR	0x0C
+#define SIGNED_POS_ATTR		0x0E
+#define SIGNED_FACTORY_ATTR	0x12
+
+enum intel_scu_ipc_wake_src {
+	WAKE_BATT_INSERT,
+	WAKE_PWR_BUTTON_PRESS,
+	WAKE_RTC_TIMER,
+	WAKE_USB_CHRG_INSERT,
+	WAKE_RESERVED,
+	WAKE_REAL_RESET,
+	WAKE_COLD_BOOT,
+	WAKE_UNKNOWN,
+	WAKE_KERNEL_WATCHDOG_RESET,
+	WAKE_SECURITY_WATCHDOG_RESET,
+	WAKE_WATCHDOG_COUNTER_EXCEEDED,
+	WAKE_POWER_SUPPLY_DETECTED,
+	WAKE_FASTBOOT_BUTTONS_COMBO,
+	WAKE_NO_MATCHING_OSIP_ENTRY,
+	WAKE_CRITICAL_BATTERY,
+	WAKE_INVALID_CHECKSUM,
+	WAKE_FORCED_RESET,
+	WAKE_ACDC_CHRG_INSERT,
+	WAKE_PMIC_WATCHDOG_RESET,
+	WAKE_PLATFORM_WATCHDOG_RESET,
+	WAKE_SC_WATCHDOG_RESET
+};
+
+struct scu_ipc_data {
+	__u32	count;  /* No. of registers */
+	__u16	addr[5]; /* Register addresses */
+	__u8	data[5]; /* Register data */
+	__u8	mask; /* Valid for read-modify-write */
+};
+
+struct scu_ipc_version {
+	__u32	count;  /* length of version info */
+	__u8	data[16]; /* version data */
+};
+
+struct osc_clk_t {
+	__u32	id; /* clock id */
+	__u32	khz; /* clock frequency */
+};
+
+/* PMDB buffer, cmd, and limits */
+#define PMDB_SIZE              512
+#define PMDB_WMDB_SIZE         76
+#define PMDB_OTPDB_SIZE        384
+#define PMDB_OTPCTL_SIZE       48
+#define PMDB_ACCESS_SIZE       16
+
+#define PMDB_SUB_CMD_R_WMDB    0
+#define PMDB_SUB_CMD_R_OTPDB   1
+#define PMDB_SUB_CMD_W_WMDB    2
+#define PMDB_SUB_CMD_W_OTPDB   3
+#define PMDB_SUB_CMD_R_OTPCTL  4
+
+struct scu_ipc_pmdb_buffer {
+	__u32	sub; /* sub cmd of SCU's PMDB IPC commands */
+	__u32	count; /* length of PMDB buffer */
+	__u32	offset; /* buffer start offset for each PMDB component */
+	__u8	data[PMDB_SIZE]; /* PMDB buffer */
+};
+
+/* Penwell has 4 osc clocks */
+#define OSC_CLK_AUDIO	0	/* Audio */
+#define OSC_CLK_CAM0	1	/* Primary camera */
+#define OSC_CLK_CAM1	2	/* Secondary camera */
+#define OSC_CLK_DISP	3	/* Display buffer */
+
+#ifdef __KERNEL__
+
+int intel_scu_ipc_osc_clk(u8 clk, unsigned int khz);
+
+enum clk0_mode {
+	CLK0_AUDIENCE = 0x4,
+	CLK0_VIBRA1 = 0x8,
+	CLK0_VIBRA2 = 0x10,
+	CLK0_MSIC = 0x20,
+	CLK0_DEBUG = 0x100,
+	CLK0_QUERY = 0x1000,
+};
+
+int intel_scu_ipc_set_osc_clk0(unsigned int enable, enum clk0_mode mode);
+
+/* Helpers to turn on/off msic vprog1 and vprog2 */
+int intel_scu_ipc_msic_vprog1(int on);
+int intel_scu_ipc_msic_vprog2(int on);
+
+/* OSHOB-OS Handoff Buffer read */
+int intel_scu_ipc_get_oshob_base(void);
+int intel_scu_ipc_get_oshob_size(void);
+
+/* SCU trace buffer interface */
+u32 intel_scu_ipc_get_scu_trace_buffer(void);
+u32 intel_scu_ipc_get_scu_trace_buffer_size(void);
+u32 intel_scu_ipc_get_fabricerror_buf1_offset(void);
+u32 intel_scu_ipc_get_fabricerror_buf2_offset(void);
+
+/* OSNIB interface. */
+int intel_scu_ipc_write_osnib(u8 *data, int len, int offset);
+int intel_scu_ipc_read_osnib(u8 *data, int len, int offset);
+int intel_scu_ipc_write_osnib_extend(u8 *data, int len, int offset);
+int intel_scu_ipc_read_osnib_extend(u8 *data, int len, int offset);
+int intel_scu_ipc_write_osnib_rr(u8 rr);
+int intel_scu_ipc_read_osnib_rr(u8 *rr);
+int intel_scu_ipc_read_osnib_wd(u8 *wd);
+int intel_scu_ipc_write_osnib_wd(u8 *wd);
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/intel_scu_pmic.h b/arch/x86/include/asm/intel_scu_pmic.h
new file mode 100644
index 0000000..308afa6
--- /dev/null
+++ b/arch/x86/include/asm/intel_scu_pmic.h
@@ -0,0 +1,16 @@
+#ifndef __INTEL_SCU_PMIC_H__
+#define __INTEL_SCU_PMIC_H__
+
+#include <asm/types.h>
+
+#define KOBJ_PMIC_ATTR(_name, _mode, _show, _store) \
+	struct kobj_attribute _name##_attr = __ATTR(_name, _mode, _show, _store)
+
+extern int intel_scu_ipc_ioread8(u16 addr, u8 *data);
+extern int intel_scu_ipc_ioread32(u16 addr, u32 *data);
+extern int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
+extern int intel_scu_ipc_iowrite8(u16 addr, u8 data);
+extern int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
+extern int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
+
+#endif /*__INTEL_SCU_PMIC_H__ */
diff --git a/arch/x86/include/asm/intel_soc_debug.h b/arch/x86/include/asm/intel_soc_debug.h
new file mode 100644
index 0000000..9edb166
--- /dev/null
+++ b/arch/x86/include/asm/intel_soc_debug.h
@@ -0,0 +1,43 @@
+/*
+ * intel_soc_debug.h
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef INTEL_SOC_DEBUG_H
+#define INTEL_SOC_DEBUG_H
+
+#define DEBUG_FEATURE_PTI      0x00000001
+#define DEBUG_FEATURE_RTIT     0x00000002
+#define DEBUG_FEATURE_LAKEMORE 0x00000004
+#define DEBUG_FEATURE_SOCHAPS  0x00000008
+#define DEBUG_FEATURE_USB3DFX  0x00000010
+
+/* cpu_has_debug_feature checks whether the debug
+ * feature passed as parameter is enabled.
+ * The passed parameter shall be one (and only one)
+ * of the above values (DEBUG_FEATURE_XXX).
+ * The function returns 1 if the debug feature is
+ * enabled and 0 otherwise.
+ */
+
+#ifdef CONFIG_INTEL_DEBUG_FEATURE
+extern int cpu_has_debug_feature(u32 bit);
+#else
+static inline int cpu_has_debug_feature(u32 bit) { return 0; };
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/intel_sst_mrfld.h b/arch/x86/include/asm/intel_sst_mrfld.h
new file mode 100644
index 0000000..041ff85
--- /dev/null
+++ b/arch/x86/include/asm/intel_sst_mrfld.h
@@ -0,0 +1,44 @@
+/* intel_sst_mrlfd.h - Common enum of the Merrifield platform
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Samreen Nilofer <samreen.nilofer@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#ifndef _INTEL_SST_MRFLD_H
+#define _INTEL_SST_MRFLD_H
+
+enum {
+	MERR_SALTBAY_AUDIO = 0,
+	MERR_SALTBAY_COMPR,
+	MERR_SALTBAY_VOIP,
+	MERR_SALTBAY_PROBE,
+	MERR_SALTBAY_AWARE,
+	MERR_SALTBAY_VAD,
+	MERR_SALTBAY_POWER,
+};
+
+enum {
+	MERR_DPCM_AUDIO = 0,
+	MERR_DPCM_DB,
+	MERR_DPCM_LL,
+	MERR_DPCM_COMPR,
+	MERR_DPCM_VOIP,
+	MERR_DPCM_PROBE,
+};
+
+#endif
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index e3b7819..a11269b 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -15,7 +15,7 @@
 #define MODULE_PROC_FAMILY "586MMX "
 #elif defined CONFIG_MCORE2
 #define MODULE_PROC_FAMILY "CORE2 "
-#elif defined CONFIG_MATOM
+#elif (defined CONFIG_MATOM) || (defined CONFIG_MSLM)
 #define MODULE_PROC_FAMILY "ATOM "
 #elif defined CONFIG_M686
 #define MODULE_PROC_FAMILY "686 "
diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/mrst-vrtc.h
deleted file mode 100644
index 73668ab..0000000
--- a/arch/x86/include/asm/mrst-vrtc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _MRST_VRTC_H
-#define _MRST_VRTC_H
-
-extern unsigned char vrtc_cmos_read(unsigned char reg);
-extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
-extern unsigned long vrtc_get_time(void);
-extern int vrtc_set_mmss(unsigned long nowtime);
-
-#endif
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
deleted file mode 100644
index fc18bf3..0000000
--- a/arch/x86/include/asm/mrst.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * mrst.h: Intel Moorestown platform specific setup code
- *
- * (C) Copyright 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-#ifndef _ASM_X86_MRST_H
-#define _ASM_X86_MRST_H
-
-#include <linux/sfi.h>
-
-extern int pci_mrst_init(void);
-extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
-extern int sfi_mrtc_num;
-extern struct sfi_rtc_table_entry sfi_mrtc_array[];
-
-/*
- * Medfield is the follow-up of Moorestown, it combines two chip solution into
- * one. Other than that it also added always-on and constant tsc and lapic
- * timers. Medfield is the platform name, and the chip name is called Penwell
- * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
- * identified via MSRs.
- */
-enum mrst_cpu_type {
-	/* 1 was Moorestown */
-	MRST_CPU_CHIP_PENWELL = 2,
-};
-
-extern enum mrst_cpu_type __mrst_cpu_chip;
-
-#ifdef CONFIG_X86_INTEL_MID
-
-static inline enum mrst_cpu_type mrst_identify_cpu(void)
-{
-	return __mrst_cpu_chip;
-}
-
-#else /* !CONFIG_X86_INTEL_MID */
-
-#define mrst_identify_cpu()    (0)
-
-#endif /* !CONFIG_X86_INTEL_MID */
-
-enum mrst_timer_options {
-	MRST_TIMER_DEFAULT,
-	MRST_TIMER_APBT_ONLY,
-	MRST_TIMER_LAPIC_APBT,
-};
-
-extern enum mrst_timer_options mrst_timer_options;
-
-/*
- * Penwell uses spread spectrum clock, so the freq number is not exactly
- * the same as reported by MSR based on SDM.
- */
-#define PENWELL_FSB_FREQ_83SKU         83200
-#define PENWELL_FSB_FREQ_100SKU        99840
-
-#define SFI_MTMR_MAX_NUM 8
-#define SFI_MRTC_MAX	8
-
-extern struct console early_mrst_console;
-extern void mrst_early_console_init(void);
-
-extern struct console early_hsu_console;
-extern void hsu_early_console_init(const char *);
-
-extern void intel_scu_devices_create(void);
-extern void intel_scu_devices_destroy(void);
-
-/* VRTC timer */
-#define MRST_VRTC_MAP_SZ	(1024)
-/*#define MRST_VRTC_PGOFFSET	(0xc00) */
-
-extern void mrst_rtc_init(void);
-
-#endif /* _ASM_X86_MRST_H */
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 2f366d0..2d34917 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -13,4 +13,10 @@
 
 #define MWAIT_ECX_INTERRUPT_BREAK	0x1
 
+#ifdef CONFIG_ATOM_SOC_POWER
+#define MWAIT_MAX_NUM_CSTATES		10
+#else
+#define MWAIT_MAX_NUM_CSTATES		8
+#endif
+
 #endif /* _ASM_X86_MWAIT_H */
diff --git a/arch/x86/include/asm/platform_byt_audio.h b/arch/x86/include/asm/platform_byt_audio.h
new file mode 100644
index 0000000..fed2b1b
--- /dev/null
+++ b/arch/x86/include/asm/platform_byt_audio.h
@@ -0,0 +1,49 @@
+/*
+ * platform_byt_audio.h: Baytrail audio platform data header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Omair Md Abdullah <omair.m.abdullah@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_BYT_AUDIO_H_
+#define _PLATFORM_BYT_AUDIO_H_
+
+enum {
+	BYT_AUD_AIF1 = 0,
+	BYT_AUD_AIF2,
+	BYT_AUD_COMPR_DEV,
+#ifdef CONFIG_SND_SOC_COMMS_SSP
+	BYT_COMMS_BT,
+	BYT_COMMS_MODEM,
+#endif /* CONFIG_SND_SOC_COMMS_SSP */
+	BYT_AUD_PROBE_DEV,
+};
+
+enum {
+	BYT_CR_AUD_AIF1 = 0,
+	BYT_CR_AUD_COMPR_DEV,
+	BYT_CR_COMMS_BT,
+};
+/* LPE viewpoint addresses */
+/* TODO: move to DSDT */
+#define SST_BYT_IRAM_PHY_START	0xff2c0000
+#define SST_BYT_IRAM_PHY_END	0xff2d4000
+#define SST_BYT_DRAM_PHY_START	0xff300000
+#define SST_BYT_DRAM_PHY_END	0xff320000
+#define SST_BYT_IMR_VIRT_START	0xc0000000 /* virtual addr in LPE */
+#define SST_BYT_IMR_VIRT_END	0xc01fffff
+#define SST_BYT_SHIM_PHY_ADDR	0xff340000
+#define SST_BYT_MBOX_PHY_ADDR	0xff344000
+#define SST_BYT_DMA0_PHY_ADDR	0xff298000
+#define SST_BYT_DMA1_PHY_ADDR	0xff29c000
+#define SST_BYT_SSP0_PHY_ADDR	0xff2a0000
+#define SST_BYT_SSP2_PHY_ADDR	0xff2a2000
+
+#define BYT_FW_MOD_TABLE_OFFSET 0x80000
+#define BYT_FW_MOD_TABLE_SIZE   0x100
+
+#endif
diff --git a/arch/x86/include/asm/platform_sst.h b/arch/x86/include/asm/platform_sst.h
new file mode 100644
index 0000000..e752ed9
--- /dev/null
+++ b/arch/x86/include/asm/platform_sst.h
@@ -0,0 +1,132 @@
+
+/*
+ * platform_sst.h:  sst audio platform data header file
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Author: Dharageswari R <dharageswari.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef _PLATFORM_SST_H_
+#define _PLATFORM_SST_H_
+
+#include <linux/sfi.h>
+
+#define MAX_NUM_STREAMS_CTP	5
+#define MAX_NUM_STREAMS_MRFLD	25
+#define MAX_NUM_STREAMS	MAX_NUM_STREAMS_MRFLD
+
+#define SST_MAX_SSP_PORTS 4
+#define SST_MAX_DMA 2
+
+enum {
+	SST_SSP_AUDIO = 0,
+	SST_SSP_MODEM,
+	SST_SSP_BT,
+	SST_SSP_FM,
+};
+
+struct sst_gpio_config {
+	u32 i2s_rx_alt;
+	u32 i2s_tx_alt;
+	u32 i2s_frame;
+	u32 i2s_clock;
+	u32 alt_function;
+};
+
+struct sst_ssp_info {
+	u32 base_add;
+	struct sst_gpio_config gpio;
+	bool gpio_in_use;
+};
+
+struct sst_info {
+	u32 iram_start;
+	u32 iram_end;
+	bool iram_use;
+	u32 dram_start;
+	u32 dram_end;
+	bool dram_use;
+	u32 imr_start;
+	u32 imr_end;
+	bool imr_use;
+	u32 mailbox_start;
+	bool use_elf;
+	bool lpe_viewpt_rqd;
+	unsigned int max_streams;
+	u32 dma_max_len;
+	u8 num_probes;
+};
+
+struct sst_ssp_platform_cfg {
+	u8 ssp_cfg_sst;
+	u8 port_number;
+	u8 is_master;
+	u8 pack_mode;
+	u8 num_slots_per_frame;
+	u8 num_bits_per_slot;
+	u8 active_tx_map;
+	u8 active_rx_map;
+	u8 ssp_frame_format;
+	u8 frame_polarity;
+	u8 serial_bitrate_clk_mode;
+	u8 frame_sync_width;
+	u8 dma_handshake_interface_tx;
+	u8 dma_handshake_interface_rx;
+	u8 network_mode;
+	u8 start_delay;
+	u32 ssp_base_add;
+} __packed;
+
+struct sst_board_config_data {
+	struct sst_ssp_platform_cfg ssp_platform_data[SST_MAX_SSP_PORTS];
+	u8 active_ssp_ports;
+	u8 platform_id;
+	u8 board_id;
+	u8 ihf_num_chan;
+	u32 osc_clk_freq;
+} __packed;
+
+struct sst_platform_config_data {
+	u32 sst_sram_buff_base;
+	u32 sst_dma_base[SST_MAX_DMA];
+} __packed;
+
+struct sst_platform_debugfs_data {
+	u32 ssp_reg_size;
+	u32 dma_reg_size;
+	u32 checkpoint_offset;
+	u32 checkpoint_size;
+	u8 num_ssp;
+	u8 num_dma;
+};
+
+struct sst_ipc_info {
+	int ipc_offset;
+	bool use_32bit_ops;
+	unsigned int mbox_recv_off;
+};
+
+struct sst_lib_dnld_info {
+	unsigned int mod_base;
+	unsigned int mod_end;
+	unsigned int mod_table_offset;
+	unsigned int mod_table_size;
+	bool mod_ddr_dnld;
+};
+
+struct sst_platform_info {
+	const struct sst_info *probe_data;
+	const struct sst_ssp_info *ssp_data;
+	const struct sst_board_config_data *bdata;
+	const struct sst_platform_config_data *pdata;
+	const struct sst_ipc_info *ipc_info;
+	const struct sst_platform_debugfs_data *debugfs_data;
+	const struct sst_lib_dnld_info *lib_info;
+};
+
+#endif
diff --git a/arch/x86/include/asm/platform_sst_audio.h b/arch/x86/include/asm/platform_sst_audio.h
new file mode 100644
index 0000000..6333631
--- /dev/null
+++ b/arch/x86/include/asm/platform_sst_audio.h
@@ -0,0 +1,161 @@
+/*
+ * platform_sst_audio.h:  sst audio platform data header file
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SST_AUDIO_H_
+#define _PLATFORM_SST_AUDIO_H_
+
+#include <linux/sfi.h>
+
+/* The stream map status is used to dynamically assign
+ * device-id to a device, for example probe device. If
+ * a stream map entry is free for a device then the device-id
+ * for that device will be popluated when the device is
+ * opened and then the status set to IN_USE. When device
+ * is closed, the strm map status is set to FREE again.
+ */
+enum sst_strm_map_status {
+	SST_DEV_MAP_FREE = 0,
+	SST_DEV_MAP_IN_USE,
+};
+
+/* Device IDs for CTP are same as stream IDs */
+enum sst_audio_device_id_ctp {
+	SST_PCM_OUT0 = 1,
+	SST_PCM_OUT1 = 2,
+	SST_COMPRESSED_OUT = 3,
+	SST_CAPTURE_IN = 4,
+	SST_PROBE_IN = 5,
+};
+
+enum sst_audio_task_id_mrfld {
+	SST_TASK_ID_NONE = 0,
+	SST_TASK_ID_SBA = 1,
+	SST_TASK_ID_FBA_UL = 2,
+	SST_TASK_ID_MEDIA = 3,
+	SST_TASK_ID_AWARE = 4,
+	SST_TASK_ID_FBA_DL = 5,
+	SST_TASK_ID_MAX = SST_TASK_ID_FBA_DL,
+};
+
+/* Device IDs for Merrifield are Pipe IDs,
+ * ref: LPE DSP command interface spec v0.75 */
+enum sst_audio_device_id_mrfld {
+	/* Output pipeline IDs */
+	PIPE_ID_OUT_START = 0x0,
+	PIPE_MODEM_OUT = 0x0,
+	PIPE_BT_OUT = 0x1,
+	PIPE_CODEC_OUT0 = 0x2,
+	PIPE_CODEC_OUT1 = 0x3,
+	PIPE_SPROT_LOOP_OUT = 0x4,
+	PIPE_MEDIA_LOOP1_OUT = 0x5,
+	PIPE_MEDIA_LOOP2_OUT = 0x6,
+	PIPE_PROBE_OUT = 0x7,
+	PIPE_HF_SNS_OUT = 0x8, /* VOCIE_UPLINK_REF2 */
+	PIPE_HF_OUT = 0x9, /* VOICE_UPLINK_REF1 */
+	PIPE_SPEECH_OUT = 0xA, /* VOICE UPLINK */
+	PIPE_RxSPEECH_OUT = 0xB, /* VOICE_DOWNLINK */
+	PIPE_VOIP_OUT = 0xC,
+	PIPE_PCM0_OUT = 0xD,
+	PIPE_PCM1_OUT = 0xE,
+	PIPE_PCM2_OUT = 0xF,
+	PIPE_AWARE_OUT = 0x10,
+	PIPE_VAD_OUT = 0x11,
+	PIPE_MEDIA0_OUT = 0x12,
+	PIPE_MEDIA1_OUT = 0x13,
+	PIPE_FM_OUT = 0x14,
+	PIPE_PROBE1_OUT = 0x15,
+	PIPE_PROBE2_OUT = 0x16,
+	PIPE_PROBE3_OUT = 0x17,
+	PIPE_PROBE4_OUT = 0x18,
+	PIPE_PROBE5_OUT = 0x19,
+	PIPE_PROBE6_OUT = 0x1A,
+	PIPE_PROBE7_OUT = 0x1B,
+	PIPE_PROBE8_OUT = 0x1C,
+/* Input Pipeline IDs */
+	PIPE_ID_IN_START = 0x80,
+	PIPE_MODEM_IN = 0x80,
+	PIPE_BT_IN = 0x81,
+	PIPE_CODEC_IN0 = 0x82,
+	PIPE_CODEC_IN1 = 0x83,
+	PIPE_SPROT_LOOP_IN = 0x84,
+	PIPE_MEDIA_LOOP1_IN = 0x85,
+	PIPE_MEDIA_LOOP2_IN = 0x86,
+	PIPE_PROBE_IN = 0x87,
+	PIPE_SIDETONE_IN = 0x88,
+	PIPE_TxSPEECH_IN = 0x89,
+	PIPE_SPEECH_IN = 0x8A,
+	PIPE_TONE_IN = 0x8B,
+	PIPE_VOIP_IN = 0x8C,
+	PIPE_PCM0_IN = 0x8D,
+	PIPE_PCM1_IN = 0x8E,
+	PIPE_MEDIA0_IN = 0x8F,
+	PIPE_MEDIA1_IN = 0x90,
+	PIPE_MEDIA2_IN = 0x91,
+	PIPE_FM_IN = 0x92,
+	PIPE_PROBE1_IN = 0x93,
+	PIPE_PROBE2_IN = 0x94,
+	PIPE_PROBE3_IN = 0x95,
+	PIPE_PROBE4_IN = 0x96,
+	PIPE_PROBE5_IN = 0x97,
+	PIPE_PROBE6_IN = 0x98,
+	PIPE_PROBE7_IN = 0x99,
+	PIPE_PROBE8_IN = 0x9A,
+	PIPE_MEDIA3_IN = 0x9C,
+	PIPE_LOW_PCM0_IN = 0x9D,
+	PIPE_RSVD = 0xFF,
+};
+
+/* The stream map for each platform consists of an array of the below
+ * stream map structure. The array index is used as the static stream-id
+ * associated with a device and (dev_num,subdev_num,direction) tuple match
+ * gives the device_id for the device.
+ */
+struct sst_dev_stream_map {
+	u8 dev_num;
+	u8 subdev_num;
+	u8 direction;
+	u8 device_id;
+	u8 task_id;
+	u8 status;
+};
+
+#define MAX_DESCRIPTOR_SIZE 172
+
+struct sst_dev_effects_map {
+	char	uuid[16];
+	u16	algo_id;
+	char	descriptor[MAX_DESCRIPTOR_SIZE];
+};
+
+struct sst_dev_effects_resource_map {
+	char  uuid[16];
+	unsigned int flags;
+	u16 cpuLoad;
+	u16 memoryUsage;
+};
+
+struct sst_dev_effects {
+	struct sst_dev_effects_map *effs_map;
+	struct sst_dev_effects_resource_map *effs_res_map;
+	unsigned int effs_num_map;
+};
+
+struct sst_platform_data {
+	/* Intel software platform id*/
+	const struct soft_platform_id *spid;
+	struct sst_dev_stream_map *pdev_strm_map;
+	struct sst_dev_effects pdev_effs;
+	unsigned int strm_map_size;
+};
+
+int add_sst_platform_device(void);
+#endif
+
diff --git a/arch/x86/include/asm/pmic_pdata.h b/arch/x86/include/asm/pmic_pdata.h
new file mode 100644
index 0000000..d88c64a
--- /dev/null
+++ b/arch/x86/include/asm/pmic_pdata.h
@@ -0,0 +1,45 @@
+#ifndef __PMIC_PDATA_H__
+#define __PMIC_PDATA_H__
+
+struct temp_lookup {
+	int adc_val;
+	int temp;
+	int temp_err;
+};
+
+/*
+ * pmic cove charger driver info
+ */
+struct pmic_platform_data {
+	void (*cc_to_reg)(int, u8*);
+	void (*cv_to_reg)(int, u8*);
+	void (*inlmt_to_reg)(int, u8*);
+	int max_tbl_row_cnt;
+	struct temp_lookup *adc_tbl;
+};
+
+extern int pmic_get_status(void);
+extern int pmic_enable_charging(bool);
+extern int pmic_set_cc(int);
+extern int pmic_set_cv(int);
+extern int pmic_set_ilimma(int);
+extern int pmic_enable_vbus(bool enable);
+/* WA for ShadyCove VBUS removal detect issue */
+extern int pmic_handle_low_supply(void);
+
+extern void dump_pmic_regs(void);
+#ifdef CONFIG_PMIC_CCSM
+extern int pmic_get_health(void);
+extern int pmic_get_battery_pack_temp(int *);
+#else
+static int pmic_get_health(void)
+{
+	return 0;
+}
+static int pmic_get_battery_pack_temp(int *temp)
+{
+	return 0;
+}
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 22224b3..57437a1 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -660,6 +660,10 @@
 	rep_nop();
 }
 
+#ifndef cpu_read_relax
+#define cpu_read_relax() cpu_relax()
+#endif
+
 /* Stop speculative execution and prefetching of modified code. */
 static inline void sync_core(void)
 {
diff --git a/arch/x86/include/asm/relaxed.h b/arch/x86/include/asm/relaxed.h
new file mode 100644
index 0000000..4b38a06
--- /dev/null
+++ b/arch/x86/include/asm/relaxed.h
@@ -0,0 +1,22 @@
+/*
+ * x86/include/asm/relaxed.h
+ *
+ * (copied from arm/include/asm/relaxed.h)
+ *
+ * Copyright (c) 2014 NVIDIA Corporation. All rights reserved.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RELAXED_H_
+#define _ASM_RELAXED_H_
+
+#include <asm-generic/relaxed.h>
+
+#endif /*_ASM_RELAXED_H_*/
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 5c6e4fb..ee177f7 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -47,7 +47,7 @@
 # define NEED_NOPL	0
 #endif
 
-#ifdef CONFIG_MATOM
+#if defined(CONFIG_MATOM) || defined(CONFIG_MSLM)
 # define NEED_MOVBE	(1<<(X86_FEATURE_MOVBE & 31))
 #else
 # define NEED_MOVBE	0
diff --git a/arch/x86/include/asm/scu_ipc_rpmsg.h b/arch/x86/include/asm/scu_ipc_rpmsg.h
new file mode 100644
index 0000000..f4aded0
--- /dev/null
+++ b/arch/x86/include/asm/scu_ipc_rpmsg.h
@@ -0,0 +1,19 @@
+#ifndef _SCU_IPC_RPMSG_H_
+#define _SCU_IPC_RPMSG_H_
+
+struct tx_ipc_msg {
+	u32 cmd;
+	u32 sub;
+	u8 *in;
+	u32 *out;
+	u32 inlen;	/* number of bytes to be written */
+	u32 outlen;	/* number of dwords to be read */
+	u32 sptr;	/* needed for raw ipc command */
+	u32 dptr;	/* needed for raw ipc command */
+};
+
+struct rx_ipc_msg {
+	u32 status;	/* Indicate IPC status, 0-success, 1-fail */
+};
+
+#endif
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 2e327f1..e993660 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -49,9 +49,9 @@
 extern void setup_default_timer_irq(void);
 
 #ifdef CONFIG_X86_INTEL_MID
-extern void x86_mrst_early_setup(void);
+extern void x86_intel_mid_early_setup(void);
 #else
-static inline void x86_mrst_early_setup(void) { }
+static inline void x86_intel_mid_early_setup(void) { }
 #endif
 
 #ifdef CONFIG_X86_INTEL_CE
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 2e188d6..f106908 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -90,8 +90,7 @@
 	memcpy(&regs->bx + i, args, n * sizeof(args[0]));
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-				   struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 	return AUDIT_ARCH_I386;
 }
@@ -220,8 +219,7 @@
 		}
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-				   struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 #ifdef CONFIG_IA32_EMULATION
 	/*
@@ -233,7 +231,7 @@
 	 *
 	 * x32 tasks should be considered AUDIT_ARCH_X86_64.
 	 */
-	if (task_thread_info(task)->status & TS_COMPAT)
+	if (task_thread_info(current)->status & TS_COMPAT)
 		return AUDIT_ARCH_I386;
 #endif
 	/* Both x32 and x86_64 are considered "64-bit". */
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c15ddaf..9c3733c 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -158,7 +158,7 @@
 	X86_SUBARCH_PC = 0,
 	X86_SUBARCH_LGUEST,
 	X86_SUBARCH_XEN,
-	X86_SUBARCH_MRST,
+	X86_SUBARCH_INTEL_MID,
 	X86_SUBARCH_CE4100,
 	X86_NR_SUBARCHS,
 };
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index d3fd447..27256e6 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -311,6 +311,11 @@
 #define MSR_AMD_PERF_STATUS		0xc0010063
 #define MSR_AMD_PERF_CTL		0xc0010062
 
+#define MSR_IA32_POWER_MISC		0x00000120
+
+#define ENABLE_ULFM_AUTOCM		(1 << 2)
+#define ENABLE_INDP_AUTOCM		(1 << 3)
+
 #define MSR_IA32_MPERF			0x000000e7
 #define MSR_IA32_APERF			0x000000e8
 
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index c9876ef..af5b08a 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -40,7 +40,7 @@
 
 #include <asm/fixmap.h>
 #include <asm/apb_timer.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 #include <asm/time.h>
 
 #define APBT_CLOCKEVENT_RATING		110
@@ -157,13 +157,13 @@
 
 	adev->num = smp_processor_id();
 	adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
-		mrst_timer_options == MRST_TIMER_LAPIC_APBT ?
+		intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ?
 		APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
 		adev_virt_addr(adev), 0, apbt_freq);
 	/* Firmware does EOI handling for us. */
 	adev->timer->eoi = NULL;
 
-	if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+	if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
 		global_clock_event = &adev->timer->ced;
 		printk(KERN_DEBUG "%s clockevent registered as global\n",
 		       global_clock_event->name);
@@ -253,7 +253,7 @@
 
 static __init int apbt_late_init(void)
 {
-	if (mrst_timer_options == MRST_TIMER_LAPIC_APBT ||
+	if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
 		!apb_timer_block_enabled)
 		return 0;
 	/* This notifier should be called after workqueue is ready */
@@ -340,7 +340,7 @@
 	}
 #ifdef CONFIG_SMP
 	/* kernel cmdline disable apb timer, so we will use lapic timers */
-	if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
+	if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
 		printk(KERN_INFO "apbt: disabled per cpu timer\n");
 		return;
 	}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 3cd8bfc..bdafde6 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -54,6 +54,7 @@
 #include <asm/mce.h>
 #include <asm/tsc.h>
 #include <asm/hypervisor.h>
+#include <asm/intel-mid.h>
 
 unsigned int num_processors;
 
@@ -707,7 +708,7 @@
 		lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
 					TICK_NSEC, lapic_clockevent.shift);
 		lapic_clockevent.max_delta_ns =
-			clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+			clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
 		lapic_clockevent.min_delta_ns =
 			clockevent_delta2ns(0xF, &lapic_clockevent);
 		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
@@ -2234,6 +2235,19 @@
 	unsigned long flags;
 	int maxlvt;
 
+	/*
+	 * On intel_mid, the suspend flow is a bit different, and the lapic
+	 * hw implementation, and integration is not supporting standard
+	 * suspension.
+	 * This implementation is only putting high value to the timer, so that
+	 * AONT global timer will be updated with this big value at s0i3 entry,
+	 * and wont produce timer based wake up event.
+	 */
+	if (intel_mid_identify_cpu() != 0) {
+		apic_write(APIC_TMICT, ~0);
+		return 0;
+	}
+
 	if (!apic_pm_state.active)
 		return 0;
 
@@ -2272,6 +2286,15 @@
 	unsigned long flags;
 	int maxlvt;
 
+	/*
+	 * On intel_mid, the resume flow is a bit different.
+	 * Refer explanation on lapic_suspend.
+	 */
+	if (intel_mid_identify_cpu() != 0) {
+		apic_write(APIC_TMICT, 10);
+		return;
+	}
+
 	if (!apic_pm_state.active)
 		return;
 
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 9ed796c..d80c6d2 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -316,6 +316,24 @@
 	writel(vector, &io_apic->eoi);
 }
 
+/*
+ * This index matches with 1024 - 4 address in SCU RTE table area.
+ * That is not used for anything. Works in CLVP only
+ */
+#define LAST_INDEX_IN_IO_APIC_SPACE 255
+#define KERNEL_TO_SCU_PANIC_REQUEST (0x0515dead)
+void apic_scu_panic_dump(void)
+{
+	unsigned long flags;
+
+	printk(KERN_ERR "Request SCU panic dump");
+	raw_spin_lock_irqsave(&ioapic_lock, flags);
+	io_apic_write(0, LAST_INDEX_IN_IO_APIC_SPACE,
+		      KERNEL_TO_SCU_PANIC_REQUEST);
+	raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+EXPORT_SYMBOL_GPL(apic_scu_panic_dump);
+
 unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
 {
 	struct io_apic __iomem *io_apic = io_apic_base(apic);
@@ -2355,6 +2373,10 @@
 	return ret;
 }
 
+static int ioapic_set_wake(struct irq_data *data, unsigned int on)
+{
+	return 0;
+}
 static void ack_apic_edge(struct irq_data *data)
 {
 	irq_complete_move(data->chip_data);
@@ -2519,7 +2541,9 @@
 	.irq_ack		= ack_apic_edge,
 	.irq_eoi		= ack_apic_level,
 	.irq_set_affinity	= native_ioapic_set_affinity,
+	.irq_set_wake		= ioapic_set_wake,
 	.irq_retrigger		= ioapic_retrigger_irq,
+	.flags			= IRQCHIP_SKIP_SET_WAKE,
 };
 
 static inline void init_IO_APIC_traps(void)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 81e0fe4..ac15e64 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1187,15 +1187,17 @@
  */
 static void clear_all_debug_regs(void)
 {
-	int i;
+/*	int i;
 
 	for (i = 0; i < 8; i++) {
+*/
 		/* Ignore db4, db5 */
-		if ((i == 4) || (i == 5))
+/*		if ((i == 4) || (i == 5))
 			continue;
 
 		set_debugreg(0, i);
 	}
+*/
 }
 
 #ifdef CONFIG_KGDB
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8533e69..78537fc 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -101,6 +101,7 @@
 		switch (c->x86_model) {
 		case 0x27:	/* Penwell */
 		case 0x35:	/* Cloverview */
+		case 0x4A:	/* Merrifield */
 			set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
 			break;
 		default:
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 47a1870..95db10f 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -30,8 +30,17 @@
 #include <asm/mce.h>
 #include <asm/msr.h>
 
-/* How long to wait between reporting thermal events */
+/*
+ * How long to wait between reporting thermal events ?
+ * If Interrupt is enabled for Coretemp driver, the BIOS
+ * takes care of hysteresis and thus there are no spurious
+ * interrupts expected. Hence making this interval to 0.
+ */
+#ifdef CONFIG_SENSORS_CORETEMP_INTERRUPT
+#define CHECK_INTERVAL         (0)
+#else
 #define CHECK_INTERVAL		(300 * HZ)
+#endif
 
 #define THERMAL_THROTTLING_EVENT	0
 #define POWER_LIMIT_EVENT		1
@@ -177,12 +186,12 @@
 	/* if we just entered the thermal event */
 	if (new_event) {
 		if (event == THERMAL_THROTTLING_EVENT)
-			printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
+			pr_crit_ratelimited("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package",
 				state->count);
 		else
-			printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
+			pr_crit_ratelimited("CPU%d: %s power limit notification (total events = %lu)\n",
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package",
 				state->count);
@@ -190,11 +199,11 @@
 	}
 	if (old_event) {
 		if (event == THERMAL_THROTTLING_EVENT)
-			printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
+			pr_info_ratelimited("CPU%d: %s temperature/speed normal\n",
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package");
 		else
-			printk(KERN_INFO "CPU%d: %s power limit normal\n",
+			pr_info_ratelimited("CPU%d: %s power limit normal\n",
 				this_cpu,
 				level == CORE_LEVEL ? "Core" : "Package");
 		return 1;
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 03a3632..4c77d69 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -21,6 +21,7 @@
  *
  */
 
+#include <linux/jiffies.h>
 #include <linux/dmi.h>
 #include <linux/module.h>
 #include <asm/div64.h>
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index d15f575..3b78cb7 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -14,7 +14,7 @@
 #include <xen/hvc-console.h>
 #include <asm/pci-direct.h>
 #include <asm/fixmap.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 #include <asm/pgtable.h>
 #include <linux/usb/ehci_def.h>
 
@@ -228,11 +228,16 @@
 			mrst_early_console_init();
 			early_console_register(&early_mrst_console, keep);
 		}
-
+		if (!strncmp(buf, "mrfld", 5)) {
+			mrfld_early_console_init();
+			early_console_register(&early_mrfld_console, keep);
+		}
 		if (!strncmp(buf, "hsu", 3)) {
 			hsu_early_console_init(buf + 3);
 			early_console_register(&early_hsu_console, keep);
 		}
+		if (!strncmp(buf, "pti", 3))
+			early_console_register(&early_pti_console, keep);
 #endif
 		buf++;
 	}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 138463a..8f344e7 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -35,8 +35,8 @@
 
 	/* Call the subarch specific early setup function */
 	switch (boot_params.hdr.hardware_subarch) {
-	case X86_SUBARCH_MRST:
-		x86_mrst_early_setup();
+	case X86_SUBARCH_INTEL_MID:
+		x86_intel_mid_early_setup();
 		break;
 	case X86_SUBARCH_CE4100:
 		x86_ce4100_early_setup();
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index ac0631d..bfa0766 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -273,7 +273,9 @@
 
 		data = irq_desc_get_irq_data(desc);
 		affinity = data->affinity;
-		if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
+		/* include IRQs who have no action, but are chained */
+		if ((!irq_has_action(irq) && !irq_is_chained(irq)) ||
+			irqd_is_per_cpu(data) ||
 		    cpumask_subset(affinity, cpu_online_mask)) {
 			raw_spin_unlock(&desc->lock);
 			continue;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 59b9037..48f4399 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -40,19 +40,6 @@
 
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
-	atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
-	atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 #endif
 
 struct kmem_cache *task_xstate_cachep;
@@ -257,14 +244,14 @@
 void enter_idle(void)
 {
 	this_cpu_write(is_idle, 1);
-	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+	idle_notifier_call_chain(IDLE_START);
 }
 
 static void __exit_idle(void)
 {
 	if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
 		return;
-	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+	idle_notifier_call_chain(IDLE_END);
 }
 
 /* Called from interrupts to signify idle end */
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 198eb20..bf00e20 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -11,9 +11,10 @@
 
 #include <asm/vsyscall.h>
 #include <asm/x86_init.h>
+#include <asm/intel-mid.h>
 #include <asm/time.h>
-#include <asm/mrst.h>
 #include <asm/rtc.h>
+#include <asm/io_apic.h>
 
 #ifdef CONFIG_X86_32
 /*
@@ -149,6 +150,26 @@
 	ts->tv_nsec = 0;
 }
 
+static int handle_mrfl_dev_ioapic(int irq)
+{
+	int ret = 0;
+	int ioapic;
+	struct io_apic_irq_attr irq_attr;
+
+	ioapic = mp_find_ioapic(irq);
+	if (ioapic >= 0) {
+		irq_attr.ioapic = ioapic;
+		irq_attr.ioapic_pin = irq;
+		irq_attr.trigger = 1;
+		irq_attr.polarity = 0; /* Active high */
+		io_apic_set_pci_routing(NULL, irq, &irq_attr);
+	} else {
+		pr_warn("can not find interrupt %d in ioapic\n", irq);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
 
 static struct resource rtc_resources[] = {
 	[0] = {
@@ -172,6 +193,8 @@
 
 static __init int add_rtc_cmos(void)
 {
+	int ret;
+
 #ifdef CONFIG_PNP
 	static const char * const  const ids[] __initconst =
 	    { "PNP0b00", "PNP0b01", "PNP0b02", };
@@ -191,10 +214,17 @@
 	if (of_have_populated_dt())
 		return 0;
 
-	/* Intel MID platforms don't have ioport rtc */
-	if (mrst_identify_cpu())
+	/* Intel MID platforms don't have ioport rtc
+	 * except Tangier platform, which doesn't have vRTC
+	 */
+	if (intel_mid_identify_cpu() &&
+	    intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
 		return -ENODEV;
 
+	ret = handle_mrfl_dev_ioapic(RTC_IRQ);
+	if (ret)
+		return ret;
+
 	platform_device_register(&rtc_device);
 	dev_info(&rtc_device.dev,
 		 "registered platform RTC device (no PNP device found)\n");
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 87084ab..ff290ca 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1325,21 +1325,25 @@
 	return 0;
 }
 
+/*
+ * We let cpus' idle tasks announce their own death to complete
+ * logical cpu unplug sequence.
+ */
+DECLARE_COMPLETION(cpu_die_comp);
+
 void native_cpu_die(unsigned int cpu)
 {
 	/* We don't do anything here: idle task is faking death itself. */
-	unsigned int i;
+	unsigned long timeout = HZ; /* 1 sec */
 
-	for (i = 0; i < 10; i++) {
-		/* They ack this in play_dead by setting CPU_DEAD */
-		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-			if (system_state == SYSTEM_RUNNING)
-				pr_info("CPU %u is now offline\n", cpu);
-			return;
-		}
-		msleep(100);
-	}
-	pr_err("CPU %u didn't die...\n", cpu);
+	/* They ack this in play_dead by setting CPU_DEAD */
+	wait_for_completion_timeout(&cpu_die_comp, timeout);
+	if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+		if (system_state == SYSTEM_RUNNING)
+			pr_info("CPU %u is now offline\n", cpu);
+		return;
+	} else
+		pr_err("CPU %u didn't die...\n", cpu);
 }
 
 void play_dead_common(void)
@@ -1351,6 +1355,7 @@
 	mb();
 	/* Ack it */
 	__this_cpu_write(cpu_state, CPU_DEAD);
+	complete(&cpu_die_comp);
 
 	/*
 	 * With physical CPU hotplug, we should halt the cpu
@@ -1403,8 +1408,15 @@
 				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
 			}
 		}
-		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
-			(highest_subcstate - 1);
+
+		if (highest_cstate < 6) {
+			eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+				(highest_subcstate - 1);
+		} else {
+			/* For s0i3 substate code is 4 */
+			eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+				((highest_subcstate - 1) * 2);
+		}
 	}
 
 	/*
@@ -1416,6 +1428,13 @@
 
 	wbinvd();
 
+	/*
+	 * FIXME: SCU will abort S3 entry with ACK C6 timeout
+	 * if the lapic timer value programmed is low.
+	 * Hence program a high value before offlineing the CPU
+	 */
+	apic_write(APIC_TMICT, ~0);
+
 	while (1) {
 		/*
 		 * The CLFLUSH is a workaround for erratum AAI65 for
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 75f9e5d..75b36cc 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -69,15 +69,15 @@
 {
 	unsigned long rnd = 0;
 
-	/*
-	*  8 bits of randomness in 32bit mmaps, 20 address space bits
-	* 28 bits of randomness in 64bit mmaps, 40 address space bits
-	*/
 	if (current->flags & PF_RANDOMIZE) {
 		if (mmap_is_ia32())
-			rnd = get_random_int() % (1<<8);
+#ifdef CONFIG_COMPAT
+			rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
+#else
+			rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
+#endif
 		else
-			rnd = get_random_int() % (1<<28);
+			rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 	}
 	return rnd << PAGE_SHIFT;
 }
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 6eb18c4..f02406b 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -31,6 +31,7 @@
 #include <asm/pci_x86.h>
 #include <asm/hw_irq.h>
 #include <asm/io_apic.h>
+#include <asm/intel-mid.h>
 
 #define PCIE_CAP_OFFSET	0x100
 
@@ -203,7 +204,7 @@
 			       where, size, value);
 }
 
-static int mrst_pci_irq_enable(struct pci_dev *dev)
+static int intel_mid_pci_irq_enable(struct pci_dev *dev)
 {
 	u8 pin;
 	struct io_apic_irq_attr irq_attr;
@@ -214,31 +215,36 @@
 	 * IOAPIC RTE entries, so we just enable RTE for the device.
 	 */
 	irq_attr.ioapic = mp_find_ioapic(dev->irq);
+	if (irq_attr.ioapic < 0)
+		return -1;
 	irq_attr.ioapic_pin = dev->irq;
 	irq_attr.trigger = 1; /* level */
-	irq_attr.polarity = 1; /* active low */
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+		irq_attr.polarity = 0; /* active high */
+	else
+		irq_attr.polarity = 1; /* active low */
 	io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
 
 	return 0;
 }
 
-struct pci_ops pci_mrst_ops = {
+struct pci_ops intel_mid_pci_ops = {
 	.read = pci_read,
 	.write = pci_write,
 };
 
 /**
- * pci_mrst_init - installs pci_mrst_ops
+ * intel_mid_pci_init - installs intel_mid_pci_ops
  *
  * Moorestown has an interesting PCI implementation (see above).
  * Called when the early platform detection installs it.
  */
-int __init pci_mrst_init(void)
+int __init intel_mid_pci_init(void)
 {
 	printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
 	pci_mmcfg_late_init();
-	pcibios_enable_irq = mrst_pci_irq_enable;
-	pci_root_ops = pci_mrst_ops;
+	pcibios_enable_irq = intel_mid_pci_irq_enable;
+	pci_root_ops = intel_mid_pci_ops;
 	pci_soc_mode = 1;
 	/* Continue with standard init */
 	return 1;
@@ -259,6 +265,7 @@
 	if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
 		return;
 	dev->d3_delay = 0;
+	dev->d3cold_delay = 0;
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
 
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 01e0231..20342d4 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -4,7 +4,7 @@
 obj-y	+= geode/
 obj-y	+= goldfish/
 obj-y	+= iris/
-obj-y	+= mrst/
+obj-y	+= intel-mid/
 obj-y	+= olpc/
 obj-y	+= scx200/
 obj-y	+= sfi/
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
new file mode 100644
index 0000000..57886ea
--- /dev/null
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -0,0 +1,27 @@
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o
+obj-$(CONFIG_X86_INTEL_MID)	+= intel_mid_vrtc.o
+obj-$(CONFIG_EARLY_PRINTK_INTEL_MID)	+= early_printk_intel_mid.o
+
+# SFI specific code
+obj-$(CONFIG_SFI) += intel_mid_sfi.o
+
+# platform configuration for board devices
+obj-y += device_libs/
+
+# SoC specific files
+obj-$(CONFIG_X86_INTEL_MID) += mfld.o mrfl.o
+obj-$(CONFIG_X86_WANT_INTEL_MID) += intel_mid_pcihelpers.o
+obj-$(CONFIG_X86_INTEL_MID) += intel_mid_scu.o
+
+# BOARD files
+obj-$(CONFIG_X86_INTEL_MID) += board.o
+
+# PMU driver
+obj-$(CONFIG_ATOM_SOC_POWER) += intel_soc_pmu.o intel_soc_pm_debug.o intel_soc_dump.o
+obj-$(CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER) += intel_soc_mdfld.o intel_soc_mdfld_clv_common.o
+obj-$(CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER) += intel_soc_clv.o intel_soc_mdfld_clv_common.o
+obj-$(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER) += intel_soc_mrfld.o
+obj-$(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER) += pmu_tng.o
+
+# Debug features driver
+obj-$(CONFIG_INTEL_DEBUG_FEATURE) += intel_soc_debug.o
diff --git a/arch/x86/platform/intel-mid/board.c b/arch/x86/platform/intel-mid/board.c
new file mode 100644
index 0000000..eb3d8a4
--- /dev/null
+++ b/arch/x86/platform/intel-mid/board.c
@@ -0,0 +1,165 @@
+/*
+ * board-blackbay.c: Intel Medfield based board (Blackbay)
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/i2c-gpio.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+
+/*
+ * IPC devices
+ */
+#include "device_libs/platform_ipc.h"
+#include "device_libs/platform_pmic_gpio.h"
+#include "device_libs/platform_msic.h"
+#include "device_libs/platform_msic_battery.h"
+#include "device_libs/platform_msic_gpio.h"
+#include "device_libs/platform_msic_audio.h"
+#include "device_libs/platform_msic_power_btn.h"
+#include "device_libs/platform_msic_ocd.h"
+#include "device_libs/platform_mrfl_pmic.h"
+#include "device_libs/platform_mrfl_pmic_i2c.h"
+#include "device_libs/platform_mrfl_ocd.h"
+#include "device_libs/platform_msic_thermal.h"
+#include "device_libs/platform_soc_thermal.h"
+#include "device_libs/platform_msic_adc.h"
+#include "device_libs/platform_bcove_adc.h"
+#include "device_libs/platform_mrfld_audio.h"
+#include "device_libs/platform_mrfl_thermal.h"
+
+/*
+ * I2C devices
+ */
+#include "device_libs/platform_max7315.h"
+#include "device_libs/platform_tca6416.h"
+#include "device_libs/platform_mpu3050.h"
+#include "device_libs/platform_emc1403.h"
+#include "device_libs/platform_lis331.h"
+#include "device_libs/platform_mpu3050.h"
+#include "device_libs/platform_tc35876x.h"
+#include "device_libs/platform_bq24261.h"
+#include "device_libs/platform_pcal9555a.h"
+
+#include "device_libs/platform_wm8994.h"
+
+/*
+ * SPI devices
+ */
+#include "device_libs/platform_max3111.h"
+#include "device_libs/platform_spidev.h"
+#include "device_libs/platform_ads7955.h"
+
+/* WIFI devices */
+#include "device_libs/platform_wl12xx.h"
+#include "device_libs/platform_wifi.h"
+
+static void __init *no_platform_data(void *info)
+{
+	return NULL;
+}
+
+struct devs_id __initconst device_ids[] = {
+	/* SD devices */
+	{"wl12xx_clk_vmmc", SFI_DEV_TYPE_SD, 0, &wl12xx_platform_data, NULL},
+	{"bcm43xx_clk_vmmc", SFI_DEV_TYPE_SD, 0, &wifi_platform_data, NULL},
+	{"bcm43xx_vmmc", SFI_DEV_TYPE_SD, 0, &wifi_platform_data, NULL},
+	{"iwlwifi_clk_vmmc", SFI_DEV_TYPE_SD, 0, &wifi_platform_data, NULL},
+	{"WLAN_FAST_IRQ", SFI_DEV_TYPE_SD, 0, &no_platform_data,
+	 &wifi_platform_data_fastirq},
+
+	/* I2C devices*/
+	{"pcal9555a-1", SFI_DEV_TYPE_I2C, 1, &pcal9555a_platform_data, NULL},
+	{"pcal9555a-2", SFI_DEV_TYPE_I2C, 1, &pcal9555a_platform_data, NULL},
+	{"pcal9555a-3", SFI_DEV_TYPE_I2C, 1, &pcal9555a_platform_data, NULL},
+	{"pcal9555a-4", SFI_DEV_TYPE_I2C, 1, &pcal9555a_platform_data, NULL},
+
+	/* SPI devices */
+	{"spidev", SFI_DEV_TYPE_SPI, 0, &spidev_platform_data, NULL},
+	{"ads7955", SFI_DEV_TYPE_SPI, 0, &ads7955_platform_data, NULL},
+	{"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data, NULL},
+	{"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data, NULL},
+	{"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data,
+					&ipc_device_handler},
+	{"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data, NULL},
+	{"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data, NULL},
+	{"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data, NULL},
+	{"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data, NULL},
+	{"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data, NULL},
+	{"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data, NULL},
+	{"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data,
+					&ipc_device_handler},
+	{"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data, NULL},
+	{"i2c_disp_brig", SFI_DEV_TYPE_I2C, 0, &tc35876x_platform_data, NULL},
+
+	/* MSIC subdevices */
+	{"msic_adc", SFI_DEV_TYPE_IPC, 1, &msic_adc_platform_data,
+						&ipc_device_handler},
+	{"bcove_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data,
+					&ipc_device_handler},
+	{"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data,
+					&ipc_device_handler},
+	{"msic_gpio", SFI_DEV_TYPE_IPC, 1, &msic_gpio_platform_data,
+					&ipc_device_handler},
+	{"msic_audio", SFI_DEV_TYPE_IPC, 1, &msic_audio_platform_data,
+					&ipc_device_handler},
+	{"msic_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data,
+					&ipc_device_handler},
+	{"msic_ocd", SFI_DEV_TYPE_IPC, 1, &msic_ocd_platform_data,
+					&ipc_device_handler},
+	{"bcove_bcu", SFI_DEV_TYPE_IPC, 1, &mrfl_ocd_platform_data,
+					&ipc_device_handler},
+	{"msic_thermal", SFI_DEV_TYPE_IPC, 1, &msic_thermal_platform_data,
+					&ipc_device_handler},
+	{"bcove_adc", SFI_DEV_TYPE_IPC, 1, &bcove_adc_platform_data,
+					&ipc_device_handler},
+	{"bcove_thrm", SFI_DEV_TYPE_IPC, 1, &mrfl_thermal_platform_data,
+					&ipc_device_handler},
+	{"wm8994", SFI_DEV_TYPE_I2C, 0, &wm8994_platform_data, NULL},
+	/* IPC devices */
+	{"pmic_charger", SFI_DEV_TYPE_IPC, 1, &no_platform_data, NULL},
+	{"pmic_ccsm", SFI_DEV_TYPE_IPC, 1, &mrfl_pmic_ccsm_platform_data,
+						&ipc_device_handler},
+	{"i2c_pmic_adap", SFI_DEV_TYPE_IPC, 1, &mrfl_pmic_i2c_platform_data,
+						&ipc_device_handler},
+	{"mrfld_sst", SFI_DEV_TYPE_IPC, 1, &mrfld_sst_audio_platform_data,
+						&ipc_device_handler},
+	{"soc_thrm", SFI_DEV_TYPE_IPC, 1, &no_platform_data,
+						&soc_thrm_device_handler},
+	{},
+};
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
new file mode 100644
index 0000000..20b06c5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -0,0 +1,67 @@
+# IPC Devices
+obj-y += platform_sst_audio.o
+obj-y += platform_mrfl_regulator.o
+obj-y += platform_soc_thermal.o
+obj-$(subst m,y,$(CONFIG_SND_BYT_MACHINE)) += platform_byt_audio.o
+obj-$(subst m,y,$(CONFIG_SND_MRFLD_MACHINE)) += platform_mrfld_audio.o
+obj-$(subst m,y,$(CONFIG_SND_CTP_MACHINE)) += platform_ctp_audio.o
+obj-y += platform_ipc.o
+obj-y += platform_msic.o
+obj-y += platform_msic_audio.o
+obj-y += platform_msic_gpio.o
+obj-y += platform_msic_ocd.o
+obj-y += platform_tc35876x.o
+obj-y += pci/
+obj-$(subst m,y,$(CONFIG_BATTERY_INTEL_MDF)) += platform_msic_battery.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
+obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
+obj-$(subst m,y,$(CONFIG_MID_PWM)) += platform_mid_pwm.o
+obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
+obj-$(subst m,y,$(CONFIG_SENSORS_MID_VDD)) += platform_msic_vdd.o
+obj-$(subst m,y,$(CONFIG_SENSORS_MRFL_OCD)) += platform_mrfl_ocd.o
+obj-$(subst m,y,$(CONFIG_PMIC_CCSM)) += platform_mrfl_pmic.o
+obj-$(subst m,y,$(CONFIG_I2C_PMIC)) += platform_mrfl_pmic_i2c.o
+ifdef CONFIG_INTEL_BYT_THERMAL
+obj-$(subst m,y,$(CONFIG_INTEL_BYT_THERMAL)) += platform_byt_thermal.o
+else
+obj-$(subst m,y,$(CONFIG_INTEL_BYT_EC_THERMAL)) += platform_byt_thermal.o
+endif
+obj-$(subst m,y,$(CONFIG_SENSORS_THERMAL_MRFLD)) += platform_mrfl_thermal.o
+obj-$(subst m,y,$(CONFIG_INTEL_SCU_FLIS)) += platform_scu_flis.o
+# I2C Devices
+obj-$(subst m,y,$(CONFIG_I2C_DESIGNWARE_CORE_FORK)) += platform_dw_i2c.o
+obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
+obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o
+obj-$(subst m,y,$(CONFIG_SENSORS_MPU3050)) += platform_mpu3050.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
+obj-$(subst m,y,$(CONFIG_BQ24261_CHARGER)) += platform_bq24261.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
+obj-$(subst m,y,$(CONFIG_SND_SOC_WM8994)) += platform_wm8994.o
+# SPI Devices
+obj-$(subst m,y,$(CONFIG_SERIAL_MRST_MAX3110)) += platform_max3111.o
+obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
+obj-$(subst m,y,$(CONFIG_TI_ADS7955_ADC)) += platform_ads7955.o
+# MISC Devices
+obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
+# ADC
+obj-$(subst m,y,$(CONFIG_MSIC_GPADC))	+= platform_msic_adc.o
+obj-$(subst m,y,$(CONFIG_IIO_BASINCOVE_GPADC))	+= platform_bcove_adc.o
+# UART Devices
+obj-$(subst m,y,$(CONFIG_SERIAL_MFD_HSU)) += platform_hsu.o
+# SD Devices
+obj-$(subst m,y,$(CONFIG_WILINK_PLATFORM_DATA)) += platform_wl12xx.o
+ifndef CONFIG_ACPI
+obj-$(subst m,y,$(CONFIG_BCM_BT_LPM)) += platform_btlpm.o
+endif
+#I2C Devices
+# Panel Control Device
+obj-$(subst m,y,$(CONFIG_DRM_MRFLD)) += platform_panel.o
+# GPS
+obj-$(subst m,y,$(CONFIG_INTEL_MID_GPS)) += platform_gps.o
+# WIFI devices
+obj-$(subst m,y,$(CONFIG_WIFI_PLATFORM_DATA)) += platform_wifi.o
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_ACPI)) += platform_sdio_regulator.o
+# SCU log
+obj-$(subst m,y,$(CONFIG_SCU_LOGGING)) += platform_scu_log.o
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/Makefile b/arch/x86/platform/intel-mid/device_libs/pci/Makefile
new file mode 100644
index 0000000..47c7b78
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/Makefile
@@ -0,0 +1,6 @@
+# MMC Sdhci pci host controller platform data
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI))				+= platform_sdhci_pci.o
+# USB OTG controller platform data
+obj-$(subst m,y,$(CONFIG_USB_PENWELL_OTG))				+= platform_usb_otg.o
+obj-$(subst m,y,$(CONFIG_USB_DWC3_OTG))        += platform_usb_otg.o
+obj-$(subst m,y,$(CONFIG_SND_INTEL_SST))                                += platform_sst_pci.o
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.c b/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.c
new file mode 100644
index 0000000..3b22e3b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.c
@@ -0,0 +1,588 @@
+/*
+ * platform_mmc_sdhci_pci.c: mmc sdhci pci platform data initilization file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/intel-mid.h>
+#include <linux/mmc/sdhci-pci-data.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/hardirq.h>
+#include <linux/mmc/sdhci.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+#include "platform_sdhci_pci.h"
+
+#ifdef CONFIG_ATOM_SOC_POWER
+static int panic_mode_emmc0_power_up(void *data)
+{
+	int ret;
+	bool atomic_context;
+	/*
+	 * Since pmu_set_emmc_to_d0i0_atomic function can
+	 * only be used in atomic context, before call this
+	 * function, do a check first and make sure this function
+	 * is used in atomic context.
+	 */
+	atomic_context = (!preemptible() || in_atomic_preempt_off());
+
+	if (!atomic_context) {
+		pr_err("%s: not in atomic context!\n", __func__);
+		return -EPERM;
+	}
+
+	ret = pmu_set_emmc_to_d0i0_atomic();
+	if (ret) {
+		pr_err("%s: power up host failed with err %d\n",
+				__func__, ret);
+	}
+
+	return ret;
+}
+#else
+static int panic_mode_emmc0_power_up(void *data)
+{
+	return 0;
+}
+#endif
+
+static unsigned int sdhci_pdata_quirks = SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8
+		| SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY;
+
+int sdhci_pdata_set_quirks(const unsigned int quirks)
+{
+	sdhci_pdata_quirks = quirks;
+	return 0;
+}
+
+static int mrfl_flis_check(void *data, unsigned int clk);
+static int mrfl_sdio_setup(struct sdhci_pci_data *data);
+static void mrfl_sdio_cleanup(struct sdhci_pci_data *data);
+
+static void (*sdhci_embedded_control)(void *dev_id, void (*virtual_cd)
+					(void *dev_id, int card_present));
+
+/*****************************************************************************\
+ *                                                                           *
+ *  Regulator declaration for WLAN SDIO card                                 *
+ *                                                                           *
+\*****************************************************************************/
+
+#define DELAY_ONOFF 250
+
+static struct regulator_consumer_supply wlan_vmmc_supply = {
+	.supply		= "vmmc",
+};
+
+static struct regulator_init_data wlan_vmmc_data = {
+	.constraints = {
+		.valid_ops_mask	= REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies = &wlan_vmmc_supply,
+};
+
+static struct fixed_voltage_config vwlan = {
+	.supply_name		= "vwlan",
+	.microvolts		= 1800000,
+	.gpio			= -EINVAL,
+	.startup_delay		= 1000 * DELAY_ONOFF,
+	.enable_high		= 1,
+	.enabled_at_boot	= 0,
+	.init_data		= &wlan_vmmc_data,
+};
+
+static void vwlan_device_release(struct device *dev) {}
+
+static struct platform_device vwlan_device = {
+	.name		= "reg-fixed-voltage",
+	.id		= PLATFORM_DEVID_AUTO,
+	.dev = {
+		.platform_data	= &vwlan,
+		.release = vwlan_device_release,
+	},
+};
+
+
+/* Board specific setup related to SDIO goes here */
+static int mfld_sdio_setup(struct sdhci_pci_data *data)
+{
+	struct pci_dev *pdev = data->pdev;
+	/* Control card power through a regulator */
+	wlan_vmmc_supply.dev_name = dev_name(&pdev->dev);
+	vwlan.gpio = get_gpio_by_name("WLAN-enable");
+	if (vwlan.gpio < 0)
+		pr_err("%s: No WLAN-enable GPIO in SFI table\n",
+	       __func__);
+	pr_info("vwlan gpio %d\n", vwlan.gpio);
+	/* add a regulator to control wlan enable gpio */
+	if (platform_device_register(&vwlan_device))
+		pr_err("regulator register failed\n");
+	else
+		sdhci_pci_request_regulators();
+
+	return 0;
+}
+
+
+/* MFLD platform data */
+static struct sdhci_pci_data mfld_sdhci_pci_data[] = {
+	[EMMC0_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.setup = 0,
+			.cleanup = 0,
+			.power_up = panic_mode_emmc0_power_up,
+	},
+	[EMMC1_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.setup = 0,
+			.cleanup = 0,
+	},
+	[SD_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = 69,
+			.setup = 0,
+			.cleanup = 0,
+	},
+	[SDIO_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = mfld_sdio_setup,
+			.cleanup = 0,
+	},
+};
+
+/* Board specific setup related to SDIO goes here */
+static int clv_sdio_setup(struct sdhci_pci_data *data)
+{
+	struct pci_dev *pdev = data->pdev;
+	/* Control card power through a regulator */
+	wlan_vmmc_supply.dev_name = dev_name(&pdev->dev);
+	vwlan.gpio = get_gpio_by_name("WLAN-enable");
+	if (vwlan.gpio < 0)
+		pr_err("%s: No WLAN-enable GPIO in SFI table\n",
+	       __func__);
+	pr_info("vwlan gpio %d\n", vwlan.gpio);
+	/* add a regulator to control wlan enable gpio */
+	if (platform_device_register(&vwlan_device))
+		pr_err("regulator register failed\n");
+	else
+		sdhci_pci_request_regulators();
+
+	return 0;
+}
+
+/* CLV platform data */
+static struct sdhci_pci_data clv_sdhci_pci_data[] = {
+	[EMMC0_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.setup = 0,
+			.cleanup = 0,
+			.power_up = panic_mode_emmc0_power_up,
+	},
+	[EMMC1_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.setup = 0,
+			.cleanup = 0,
+	},
+	[SD_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = 69,
+			.setup = 0,
+			.cleanup = 0,
+	},
+	[SDIO_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = clv_sdio_setup,
+			.cleanup = 0,
+	},
+};
+
+#define TNG_EMMC_0_FLIS_ADDR		0xff0c0900
+#define TNG_EMMC_FLIS_SLEW		0x00000400
+#define TNG_EMMC_0_CLK_PULLDOWN		0x00000200
+static int mrfl_flis_slew_change(int slew)
+{
+	void __iomem *flis_addr;
+	unsigned int reg;
+	int i, ret = 0;
+
+	flis_addr = ioremap_nocache(TNG_EMMC_0_FLIS_ADDR, 64);
+
+	if (!flis_addr) {
+		pr_err("flis_addr ioremap fail!\n");
+		ret = -ENOMEM;
+	} else {
+		pr_info("flis_addr mapped addr: %p\n", flis_addr);
+		/*
+		 * Change TNG gpio FLIS settings for all eMMC0
+		 * CLK/CMD/DAT pins.
+		 * That is, including emmc_0_clk, emmc_0_cmd,
+		 * emmc_0_d_0, emmc_0_d_1, emmc_0_d_2, emmc_0_d_3,
+		 * emmc_0_d_4, emmc_0_d_5, emmc_0_d_6, emmc_0_d_7
+		 */
+		for (i = 0; i < 10; i++) {
+			reg = readl(flis_addr + (i * 4));
+			if (slew)
+				reg |= TNG_EMMC_FLIS_SLEW; /* SLEW B */
+			else
+				reg &= ~TNG_EMMC_FLIS_SLEW; /* SLEW A */
+			writel(reg, flis_addr + (i * 4));
+		}
+
+		/* Disable PullDown for emmc_0_clk */
+		reg = readl(flis_addr);
+		reg &= ~TNG_EMMC_0_CLK_PULLDOWN;
+		writel(reg, flis_addr);
+
+		ret = 0;
+	}
+
+	if (flis_addr)
+		iounmap(flis_addr);
+
+	return ret;
+}
+
+static int mrfl_flis_check(void *data, unsigned int clk)
+{
+	struct sdhci_host *host = data;
+	int ret = 0;
+
+	if ((host->clock <= 52000000) && (clk > 52000000))
+		ret = mrfl_flis_slew_change(1);
+	else if ((host->clock > 52000000) && (clk <= 52000000))
+		ret = mrfl_flis_slew_change(0);
+
+	return ret;
+}
+
+/* Board specific setup related to eMMC goes here */
+static int mrfl_emmc_setup(struct sdhci_pci_data *data)
+{
+	struct pci_dev *pdev = data->pdev;
+	int ret = 0;
+
+	if (pdev->revision == 0x01) /* TNB B0 stepping */
+		ret = mrfl_flis_slew_change(1); /* HS200 FLIS slew setting */
+
+	return ret;
+}
+
+/* Board specific setup related to SD goes here */
+static int mrfl_sd_setup(struct sdhci_pci_data *data)
+{
+	u8 vldocnt = 0;
+	int err;
+
+	/*
+	 * Change necessary GPIO pin mode for SD card working.
+	 * This is something should be done in IA firmware.
+	 * But, anyway, just do it here in case IA firmware
+	 * forget to do so.
+	 */
+	lnw_gpio_set_alt(MRFLD_GPIO_SDIO_0_CD, 0);
+
+	err = intel_scu_ipc_ioread8(MRFLD_PMIC_VLDOCNT, &vldocnt);
+	if (err) {
+		pr_err("PMIC vldocnt IPC read error: %d\n", err);
+		return err;
+	}
+
+	vldocnt |= MRFLD_PMIC_VLDOCNT_VSWITCH_BIT;
+	err = intel_scu_ipc_iowrite8(MRFLD_PMIC_VLDOCNT, vldocnt);
+	if (err) {
+		pr_err("PMIC vldocnt IPC write error: %d\n", err);
+		return err;
+	}
+	msleep(20);
+
+	return 0;
+}
+
+/* Board specific cleanup related to SD goes here */
+static void mrfl_sd_cleanup(struct sdhci_pci_data *data)
+{
+}
+
+/* Board specific setup related to SDIO goes here */
+static int mrfl_sdio_setup(struct sdhci_pci_data *data)
+{
+	struct pci_dev *pdev = data->pdev;
+	/* Control card power through a regulator */
+	wlan_vmmc_supply.dev_name = dev_name(&pdev->dev);
+	vwlan.gpio = get_gpio_by_name("WLAN-enable");
+	if (vwlan.gpio < 0)
+		pr_err("%s: No WLAN-enable GPIO in SFI table\n",
+	       __func__);
+	pr_info("vwlan gpio %d\n", vwlan.gpio);
+	/* add a regulator to control wlan enable gpio */
+	if (platform_device_register(&vwlan_device))
+		pr_err("regulator register failed\n");
+	else
+		sdhci_pci_request_regulators();
+
+	return 0;
+}
+
+/* Board specific cleanup related to SDIO goes here */
+static void mrfl_sdio_cleanup(struct sdhci_pci_data *data)
+{
+}
+
+/* MRFL platform data */
+static struct sdhci_pci_data mrfl_sdhci_pci_data[] = {
+	[EMMC0_INDEX] = {
+			.pdev = NULL,
+			.slotno = EMMC0_INDEX,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = mrfl_emmc_setup,
+			.cleanup = 0,
+			.power_up = panic_mode_emmc0_power_up,
+	},
+	[SD_INDEX] = {
+			.pdev = NULL,
+			.slotno = SD_INDEX,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = 77,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = mrfl_sd_setup,
+			.cleanup = mrfl_sd_cleanup,
+	},
+	[SDIO_INDEX] = {
+			.pdev = NULL,
+			.slotno = SDIO_INDEX,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = mrfl_sdio_setup,
+			.cleanup = mrfl_sdio_cleanup,
+	},
+};
+
+/* Board specific setup related to SDIO goes here */
+static int byt_sdio_setup(struct sdhci_pci_data *data)
+{
+	struct pci_dev *pdev = data->pdev;
+#ifdef CONFIG_ACPI
+	acpi_handle handle;
+	acpi_status status;
+#endif
+
+	/* Control card power through a regulator */
+	wlan_vmmc_supply.dev_name = dev_name(&pdev->dev);
+
+#ifdef CONFIG_ACPI
+	status = acpi_get_handle(NULL, "\\_SB.SDHB", &handle);
+	if (ACPI_FAILURE(status))
+		pr_err("wlan: cannot get SDHB acpi handle");
+	ACPI_HANDLE_SET(&pdev->dev, handle);
+	vwlan.gpio = acpi_get_gpio_by_index(&pdev->dev, 0, NULL);
+#endif
+	if (vwlan.gpio < 0)
+		pr_err("%s: No wlan-enable GPIO in SDHB ACPI block\n",
+		       __func__);
+
+	pr_info("vwlan gpio %d\n", vwlan.gpio);
+
+	/* add a regulator to control wlan enable gpio */
+	if (platform_device_register(&vwlan_device))
+		pr_err("regulator register failed\n");
+	else
+		sdhci_pci_request_regulators();
+
+	return 0;
+}
+
+
+/* BYT platform data */
+static struct sdhci_pci_data byt_sdhci_pci_data[] = {
+	[SDIO_INDEX] = {
+			.pdev = NULL,
+			.slotno = 0,
+			.rst_n_gpio = -EINVAL,
+			.cd_gpio = -EINVAL,
+			.quirks = 0,
+			.platform_quirks = 0,
+			.setup = byt_sdio_setup,
+			.cleanup = NULL,
+	},
+};
+
+static struct sdhci_pci_data *get_sdhci_platform_data(struct pci_dev *pdev)
+{
+	struct sdhci_pci_data *pdata = NULL;
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_MFD_EMMC0:
+		pdata = &mfld_sdhci_pci_data[EMMC0_INDEX];
+		break;
+	case PCI_DEVICE_ID_INTEL_MFD_EMMC1:
+		pdata = &mfld_sdhci_pci_data[EMMC1_INDEX];
+		break;
+	case PCI_DEVICE_ID_INTEL_MFD_SD:
+		pdata = &mfld_sdhci_pci_data[SD_INDEX];
+		break;
+	case PCI_DEVICE_ID_INTEL_MFD_SDIO1:
+		pdata = &mfld_sdhci_pci_data[SDIO_INDEX];
+		pdata->quirks = sdhci_pdata_quirks;
+		pdata->register_embedded_control = sdhci_embedded_control;
+		break;
+	case PCI_DEVICE_ID_INTEL_CLV_EMMC0:
+		pdata = &clv_sdhci_pci_data[EMMC0_INDEX];
+		pdata->rst_n_gpio = get_gpio_by_name("emmc0_rst");
+		break;
+	case PCI_DEVICE_ID_INTEL_CLV_EMMC1:
+		pdata = &clv_sdhci_pci_data[EMMC1_INDEX];
+		pdata->rst_n_gpio = get_gpio_by_name("emmc1_rst");
+		break;
+	case PCI_DEVICE_ID_INTEL_CLV_SDIO0:
+		pdata = &clv_sdhci_pci_data[SD_INDEX];
+		break;
+	case PCI_DEVICE_ID_INTEL_CLV_SDIO1:
+		pdata = &clv_sdhci_pci_data[SDIO_INDEX];
+		pdata->quirks = sdhci_pdata_quirks;
+		pdata->register_embedded_control = sdhci_embedded_control;
+		break;
+	case PCI_DEVICE_ID_INTEL_MRFL_MMC:
+		switch (PCI_FUNC(pdev->devfn)) {
+		case 0:
+			pdata = &mrfl_sdhci_pci_data[EMMC0_INDEX];
+			/*
+			 * The current eMMC device simulation in Merrifield
+			 * VP only implements boot partition 0, does not
+			 * implements boot partition 1. And the VP will
+			 * crash if eMMC boot partition 1 is accessed
+			 * during kernel boot. So, we just disable boot
+			 * partition support for Merrifield VP platform.
+			 */
+			if (intel_mid_identify_sim() ==
+					INTEL_MID_CPU_SIMULATION_VP)
+				pdata->platform_quirks |=
+					PLFM_QUIRK_NO_EMMC_BOOT_PART;
+			if (intel_mid_identify_sim() ==
+					INTEL_MID_CPU_SIMULATION_HVP)
+				pdata->platform_quirks |=
+					PLFM_QUIRK_NO_HIGH_SPEED;
+			break;
+		case 1:
+			pdata = &mrfl_sdhci_pci_data[EMMC1_INDEX];
+			if (intel_mid_identify_sim() ==
+					INTEL_MID_CPU_SIMULATION_VP)
+				pdata->platform_quirks |=
+					PLFM_QUIRK_NO_EMMC_BOOT_PART;
+			/*
+			 * Merrifield HVP platform only implements
+			 * eMMC0 host controller in its FPGA, and
+			 * does not implements other 3 Merrifield
+			 * SDHCI host controllers.
+			 */
+			if (intel_mid_identify_sim() ==
+					INTEL_MID_CPU_SIMULATION_HVP)
+				pdata->platform_quirks |=
+					PLFM_QUIRK_NO_HOST_CTRL_HW;
+			break;
+		case 2:
+			pdata = &mrfl_sdhci_pci_data[SD_INDEX];
+			if (intel_mid_identify_sim() ==
+					INTEL_MID_CPU_SIMULATION_HVP)
+				pdata->platform_quirks |=
+					PLFM_QUIRK_NO_HOST_CTRL_HW;
+			break;
+		case 3:
+			pdata = &mrfl_sdhci_pci_data[SDIO_INDEX];
+			if (intel_mid_identify_sim() ==
+					INTEL_MID_CPU_SIMULATION_HVP)
+				pdata->platform_quirks |=
+					PLFM_QUIRK_NO_HOST_CTRL_HW;
+				pdata->quirks = sdhci_pdata_quirks;
+				pdata->register_embedded_control =
+					sdhci_embedded_control;
+			break;
+		default:
+			pr_err("%s func %s: Invalid PCI Dev func no. (%d)\n",
+				__FILE__, __func__, PCI_FUNC(pdev->devfn));
+			break;
+		}
+		break;
+	case PCI_DEVICE_ID_INTEL_BYT_SDIO:
+		pr_err("setting quirks/embedded controls on SDIO");
+		pdata = &byt_sdhci_pci_data[SDIO_INDEX];
+		pdata->quirks = sdhci_pdata_quirks;
+		pdata->register_embedded_control = sdhci_embedded_control;
+		break;
+	default:
+		break;
+	}
+	return pdata;
+}
+
+int sdhci_pdata_set_embedded_control(void (*fnp)
+			(void *dev_id, void (*virtual_cd)
+			(void *dev_id, int card_present)))
+{
+	WARN_ON(sdhci_embedded_control);
+	sdhci_embedded_control = fnp;
+	return 0;
+}
+
+struct sdhci_pci_data *mmc_sdhci_pci_get_data(struct pci_dev *pci_dev, int slotno)
+{
+	return get_sdhci_platform_data(pci_dev);
+}
+
+static int __init init_sdhci_get_data(void)
+{
+	sdhci_pci_get_data = mmc_sdhci_pci_get_data;
+
+	return 0;
+}
+
+arch_initcall(init_sdhci_get_data);
+
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.h b/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.h
new file mode 100644
index 0000000..9f14952
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/platform_sdhci_pci.h
@@ -0,0 +1,30 @@
+/*
+ * platform_mmc_sdhci_pci.h: mmc sdhci pci platform data header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MMC_SDHCI_PCI_H_
+#define _PLATFORM_MMC_SDHCI_PCI_H_
+
+#define EMMC0_INDEX	0
+#define EMMC1_INDEX	1
+#define SD_INDEX	2
+#define SDIO_INDEX	3
+
+#define MRFLD_GPIO_SDIO_0_CD		77
+
+#define MRFLD_PMIC_VLDOCNT		0xaf
+#define MRFLD_PMIC_VLDOCNT_VSWITCH_BIT	0x02
+
+int sdhci_pdata_set_quirks(const unsigned int quirks);
+int sdhci_pdata_set_embedded_control(void (*fnp)
+			(void *dev_id, void (*virtual_cd)
+			(void *dev_id, int card_present)));
+#endif
+
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/platform_sst_pci.c b/arch/x86/platform/intel-mid/device_libs/pci/platform_sst_pci.c
new file mode 100644
index 0000000..8721c3a
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/platform_sst_pci.c
@@ -0,0 +1,229 @@
+/*
+ * platform_sst_pci.c: SST platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author:  Dharageswari R <dharageswari.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <linux/intel_mid_dma.h>
+#include <asm/intel-mid.h>
+#include <asm/platform_sst.h>
+
+#define CTP_SSP_BASE 0xffa23000
+#define CTP_DMA_BASE 0xffaf8000
+#define MRFLD_SSP_BASE 0xff2a0000
+#define MRFLD_DMA_BASE 0xff298000
+#define CTP_MAX_CONFIG_SIZE 500
+
+#define SST_CTP_IRAM_START	0
+#define SST_CTP_IRAM_END	0x80000
+#define SST_CTP_DRAM_START	0x400000
+#define SST_CTP_DRAM_END	0x480000
+#define SSP_SIZE 0x1000
+#define DMA_SIZE_CTP 0x1000
+#define DMA_SIZE_MRFLD 0x4000
+#define SST_CHECKPOINT_OFFSET 0x1C00
+#define SST_CHECKPOINT_OFFSET_MRFLD 0x0C00
+#define CHECKPOINT_DUMP_SZ 256
+
+#define SST_V1_MAILBOX_RECV	0x800
+#define SST_V2_MAILBOX_RECV	0x400
+
+#define MRFLD_FW_LSP_DDR_BASE 0xC5E00000
+#define MRFLD_FW_MOD_END (MRFLD_FW_LSP_DDR_BASE + 0x1FFFFF)
+#define MRFLD_FW_MOD_TABLE_OFFSET 0x80000
+#define MRFLD_FW_MOD_TABLE_SIZE 0x100
+
+struct sst_platform_info sst_data;
+
+static struct sst_ssp_info ssp_inf_ctp = {
+	.base_add = CTP_SSP_BASE,
+	.gpio = {
+		.alt_function = LNW_ALT_2,
+	},
+	.gpio_in_use = true,
+};
+
+static struct sst_ssp_info ssp_inf_mrfld = {
+	.base_add = MRFLD_SSP_BASE,
+	.gpio_in_use = false,
+};
+
+static const struct sst_platform_config_data sst_ctp_pdata = {
+	.sst_sram_buff_base = 0xfffc0000,
+	.sst_dma_base[0] = CTP_DMA_BASE,
+	.sst_dma_base[1] = 0x0,
+};
+
+static struct sst_platform_config_data sst_mrfld_pdata = {
+	.sst_dma_base[0] = MRFLD_DMA_BASE,
+	.sst_dma_base[1] = 0x0,
+};
+
+static const struct sst_board_config_data sst_ctp_bdata = {
+	.active_ssp_ports = 4,
+	.platform_id = 2,/*FIXME: Once the firmware fix is available*/
+	.board_id = 1,/*FIXME: Once the firmware fix is available*/
+	.ihf_num_chan = 2,
+	.osc_clk_freq = 19200000,
+	.ssp_platform_data = {
+		[SST_SSP_AUDIO] = {
+				.ssp_cfg_sst = 1,
+				.port_number = 3,
+				.is_master = 1,
+				.pack_mode = 1,
+				.num_slots_per_frame = 2,
+				.num_bits_per_slot = 25,
+				.active_tx_map = 3,
+				.active_rx_map = 3,
+				.ssp_frame_format = 3,
+				.frame_polarity = 0,
+				.serial_bitrate_clk_mode = 0,
+				.frame_sync_width = 24,
+				.dma_handshake_interface_tx = 5,
+				.dma_handshake_interface_rx = 4,
+				.ssp_base_add = 0xFFA23000,
+		},
+		[SST_SSP_MODEM] = {0},
+		[SST_SSP_BT] = {0},
+		[SST_SSP_FM] = {0},
+	},
+};
+
+static const struct sst_info ctp_sst_info = {
+	.iram_start = SST_CTP_IRAM_START,
+	.iram_end = SST_CTP_IRAM_END,
+	.iram_use = true,
+	.dram_start = SST_CTP_DRAM_START,
+	.dram_end = SST_CTP_DRAM_END,
+	.dram_use = true,
+	.imr_start = 0,
+	.imr_end = 0,
+	.imr_use = false,
+	.mailbox_start = 0,
+	.lpe_viewpt_rqd = false,
+	.use_elf = false,
+	.max_streams = MAX_NUM_STREAMS_CTP,
+	.dma_max_len = (SST_MAX_DMA_LEN * 4),
+	.num_probes = 1,
+};
+
+static const struct sst_ipc_info ctp_ipc_info = {
+	.use_32bit_ops = true,
+	.ipc_offset = 0,
+	.mbox_recv_off = SST_V1_MAILBOX_RECV,
+};
+
+static const struct sst_info mrfld_sst_info = {
+	.iram_start = 0,
+	.iram_end = 0,
+	.iram_use = false,
+	.dram_start = 0,
+	.dram_end = 0,
+	.dram_use = false,
+	.imr_start = 0,
+	.imr_end = 0,
+	.imr_use = false,
+	.mailbox_start = 0,
+	.use_elf = true,
+	.lpe_viewpt_rqd = false,
+	.max_streams = MAX_NUM_STREAMS_MRFLD,
+	.dma_max_len = SST_MAX_DMA_LEN_MRFLD,
+	.num_probes = 16,
+};
+
+static struct sst_platform_debugfs_data ctp_debugfs_data = {
+	.ssp_reg_size = SSP_SIZE,
+	.dma_reg_size = DMA_SIZE_CTP,
+	.num_ssp = 1,
+	.num_dma = 1,
+	.checkpoint_offset = SST_CHECKPOINT_OFFSET,
+	.checkpoint_size = CHECKPOINT_DUMP_SZ,
+};
+
+static struct sst_platform_debugfs_data mrfld_debugfs_data = {
+	.ssp_reg_size = SSP_SIZE,
+	.dma_reg_size = DMA_SIZE_MRFLD,
+	.num_ssp = 3,
+	.num_dma = 2,
+	.checkpoint_offset = SST_CHECKPOINT_OFFSET_MRFLD,
+	.checkpoint_size = CHECKPOINT_DUMP_SZ,
+};
+
+static const struct sst_ipc_info mrfld_ipc_info = {
+	.use_32bit_ops = false,
+	.ipc_offset = 0,
+	.mbox_recv_off = SST_V2_MAILBOX_RECV,
+};
+
+static const struct sst_lib_dnld_info  mrfld_lib_dnld_info = {
+	.mod_base           = MRFLD_FW_LSP_DDR_BASE,
+	.mod_end            = MRFLD_FW_MOD_END,
+	.mod_table_offset   = MRFLD_FW_MOD_TABLE_OFFSET,
+	.mod_table_size     = MRFLD_FW_MOD_TABLE_SIZE,
+	.mod_ddr_dnld       = true,
+};
+
+static int set_ctp_sst_config(struct sst_platform_info *sst_info)
+{
+	unsigned int conf_len;
+
+	ssp_inf_ctp.gpio.i2s_rx_alt = get_gpio_by_name("gpio_i2s3_rx");
+	ssp_inf_ctp.gpio.i2s_tx_alt = get_gpio_by_name("gpio_i2s3_rx");
+	ssp_inf_ctp.gpio.i2s_frame = get_gpio_by_name("gpio_i2s3_fs");
+	ssp_inf_ctp.gpio.i2s_clock = get_gpio_by_name("gpio_i2s3_clk");
+
+	sst_info->ssp_data = &ssp_inf_ctp;
+	conf_len = sizeof(sst_ctp_pdata) + sizeof(sst_ctp_bdata);
+	if (conf_len > CTP_MAX_CONFIG_SIZE)
+		return -EINVAL;
+	sst_info->pdata = &sst_ctp_pdata;
+	sst_info->bdata = &sst_ctp_bdata;
+	sst_info->probe_data = &ctp_sst_info;
+	sst_info->ipc_info = &ctp_ipc_info;
+	sst_info->debugfs_data = &ctp_debugfs_data;
+	sst_info->lib_info = NULL;
+
+	return 0;
+}
+
+static void set_mrfld_sst_config(struct sst_platform_info *sst_info)
+{
+	sst_info->ssp_data = &ssp_inf_mrfld;
+	sst_info->pdata = &sst_mrfld_pdata;
+	sst_info->bdata = NULL;
+	sst_info->probe_data = &mrfld_sst_info;
+	sst_info->ipc_info = &mrfld_ipc_info;
+	sst_info->debugfs_data = &mrfld_debugfs_data;
+	sst_info->lib_info = &mrfld_lib_dnld_info;
+
+	return ;
+
+}
+
+static struct sst_platform_info *get_sst_platform_data(struct pci_dev *pdev)
+{
+	struct sst_platform_info *sst_pinfo = NULL;
+
+	set_mrfld_sst_config(&sst_data);
+	sst_pinfo = &sst_data;
+
+	return sst_pinfo;
+}
+
+static void sst_pci_early_quirks(struct pci_dev *pci_dev)
+{
+	pci_dev->dev.platform_data = get_sst_platform_data(pci_dev);
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SST_MRFLD,
+							sst_pci_early_quirks);
diff --git a/arch/x86/platform/intel-mid/device_libs/pci/platform_usb_otg.c b/arch/x86/platform/intel-mid/device_libs/pci/platform_usb_otg.c
new file mode 100644
index 0000000..4deead8
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/pci/platform_usb_otg.c
@@ -0,0 +1,93 @@
+/*
+ * platform_otg_pci.c: USB OTG platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/pci.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_USB_DWC3_OTG
+#include <linux/usb/dwc3-intel-mid.h>
+static struct intel_dwc_otg_pdata dwc_otg_pdata;
+
+static bool dwc_otg_get_usbspecoverride(void)
+{
+	void __iomem *usb_comp_iomap;
+	bool usb_spec_override;
+
+	/* Read MISCFLAGS byte from offset 0x717 */
+	usb_comp_iomap = ioremap_nocache(0xFFFCE717, 4);
+	/* MISCFLAGS.BIT[6] indicates USB spec override */
+	usb_spec_override = ioread8(usb_comp_iomap) & 0x40;
+	iounmap(usb_comp_iomap);
+
+	return usb_spec_override;
+}
+
+static struct intel_dwc_otg_pdata *get_otg_platform_data(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG:
+		dwc_otg_pdata.pmic_type = BASIN_COVE;
+		dwc_otg_pdata.charger_detect_enable = 1;
+
+		dwc_otg_pdata.charging_compliance =
+			dwc_otg_get_usbspecoverride();
+
+		return &dwc_otg_pdata;
+	default:
+		break;
+	}
+
+	return NULL;
+}
+#endif
+
+#ifdef CONFIG_USB_PENWELL_OTG
+#include <linux/usb/penwell_otg.h>
+static struct intel_mid_otg_pdata otg_pdata = {
+	.gpio_vbus = 0,
+	.gpio_cs = 0,
+	.gpio_reset = 0,
+	.charging_compliance = 0,
+	.hnp_poll_support = 0,
+	.power_budget = 500
+};
+
+static struct intel_mid_otg_pdata *get_otg_platform_data(struct pci_dev *pdev)
+{
+	struct intel_mid_otg_pdata *pdata = &otg_pdata;
+
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG:
+		dwc_otg_pdata.pmic_type = BASIN_COVE;
+		dwc_otg_pdata.charger_detect_enable = 1;
+
+		dwc_otg_pdata.charging_compliance =
+			dwc_otg_get_usbspecoverride();
+		return &dwc_otg_pdata;
+
+	default:
+		break;
+	}
+
+	return pdata;
+}
+#endif
+
+static void otg_pci_early_quirks(struct pci_dev *pci_dev)
+{
+	pci_dev->dev.platform_data = get_otg_platform_data(pci_dev);
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG,
+			otg_pci_early_quirks);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ads7955.c b/arch/x86/platform/intel-mid/device_libs/platform_ads7955.c
new file mode 100644
index 0000000..c0d07aa
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ads7955.c
@@ -0,0 +1,37 @@
+/*
+ * platform_ads7955.c: ads7955 platform data initialization file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/lnw_gpio.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+#include <asm/intel-mid.h>
+#include "platform_ads7955.h"
+
+static struct intel_mid_ssp_spi_chip chip = {
+	.burst_size = DFLT_FIFO_BURST_SIZE,
+	.timeout = DFLT_TIMEOUT_VAL,
+	/* SPI DMA is current not usable on Tangier */
+	.dma_enabled = false,
+};
+
+void __init *ads7955_platform_data(void *info)
+{
+	struct spi_board_info *spi_info = info;
+
+	spi_info->mode = SPI_MODE_0;
+
+	spi_info->controller_data = &chip;
+	spi_info->bus_num = FORCE_SPI_BUS_NUM;
+
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ads7955.h b/arch/x86/platform/intel-mid/device_libs/platform_ads7955.h
new file mode 100644
index 0000000..114958f
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ads7955.h
@@ -0,0 +1,20 @@
+/*
+ * platform_ads7955.h: ads7955 platform data header file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_ADS7955_H_
+#define _PLATFORM_ADS7955_H_
+
+/* REVERT ME workaround[MRFL] for invalid bus number in IAFW .25 */
+#define FORCE_SPI_BUS_NUM	5
+#define FORCE_CHIP_SELECT	0
+
+extern void *ads7955_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.c b/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.c
new file mode 100644
index 0000000..88125ac
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.c
@@ -0,0 +1,143 @@
+/*
+ * platform_bcove_adc.c: Platform data for Merrifield Basincove GPADC driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/types.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_basincove_gpadc.h>
+#include <asm/intel_mid_remoteproc.h>
+
+#include "platform_bcove_adc.h"
+
+/* SRAM address where the GPADC interrupt register is cached */
+#define GPADC_SRAM_INTR_ADDR	0xfffff615
+
+static struct gpadc_regmap_t basincove_gpadc_regmaps[GPADC_CH_NUM] = {
+	{"VBAT",        5, 0xE9, 0xEA, },
+	{"BATID",       4, 0xEB, 0xEC, },
+	{"IBAT",        5, 0xED, 0xEE, },
+	{"PMICTEMP",    3, 0xCC, 0xCD, },
+	{"BATTEMP0",    2, 0xC8, 0xC9, },
+	{"BATTEMP1",    2, 0xCA, 0xCB, },
+	{"SYSTEMP0",    3, 0xC2, 0xC3, },
+	{"SYSTEMP1",    3, 0xC4, 0xC5, },
+	{"SYSTEMP2",    3, 0xC6, 0xC7, },
+};
+
+static struct gpadc_regs_t basincove_gpadc_regs = {
+	.gpadcreq		= 0xDC,
+	.gpadcreq_irqen		= (1 << 1),
+	.gpadcreq_busy		= (1 << 0),
+	.mirqlvl1		= 0x0C,
+	.mirqlvl1_adc		= (1 << 4),
+	.adc1cntl		= 0xDD,
+	.adcirq			= 0x06,
+	.madcirq		= 0x11,
+};
+
+#define MSIC_ADC_MAP(_adc_channel_label,			\
+		     _consumer_dev_name,                        \
+		     _consumer_channel)                         \
+	{                                                       \
+		.adc_channel_label = _adc_channel_label,        \
+		.consumer_dev_name = _consumer_dev_name,        \
+		.consumer_channel = _consumer_channel,          \
+	}
+
+struct iio_map basincove_iio_maps[] = {
+	MSIC_ADC_MAP("CH0", "VIBAT", "VBAT"),
+	MSIC_ADC_MAP("CH1", "BATID", "BATID"),
+	MSIC_ADC_MAP("CH2", "VIBAT", "IBAT"),
+	MSIC_ADC_MAP("CH3", "PMICTEMP", "PMICTEMP"),
+	MSIC_ADC_MAP("CH4", "BATTEMP", "BATTEMP0"),
+	MSIC_ADC_MAP("CH5", "BATTEMP", "BATTEMP1"),
+	MSIC_ADC_MAP("CH6", "SYSTEMP", "SYSTEMP0"),
+	MSIC_ADC_MAP("CH7", "SYSTEMP", "SYSTEMP1"),
+	MSIC_ADC_MAP("CH8", "SYSTEMP", "SYSTEMP2"),
+	MSIC_ADC_MAP("CH6", "bcove_thrm", "SYSTEMP0"),
+	MSIC_ADC_MAP("CH7", "bcove_thrm", "SYSTEMP1"),
+	MSIC_ADC_MAP("CH8", "bcove_thrm", "SYSTEMP2"),
+	MSIC_ADC_MAP("CH3", "bcove_thrm", "PMICTEMP"),
+	{ },
+};
+
+#define MSIC_ADC_CHANNEL(_type, _channel, _datasheet_name) \
+	{                               \
+		.indexed = 1,           \
+		.type = _type,          \
+		.channel = _channel,    \
+		.datasheet_name = _datasheet_name,      \
+	}
+
+static const struct iio_chan_spec const basincove_adc_channels[] = {
+	MSIC_ADC_CHANNEL(IIO_VOLTAGE, 0, "CH0"),
+	MSIC_ADC_CHANNEL(IIO_RESISTANCE, 1, "CH1"),
+	MSIC_ADC_CHANNEL(IIO_CURRENT, 2, "CH2"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 3, "CH3"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 4, "CH4"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 5, "CH5"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 6, "CH6"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 7, "CH7"),
+	MSIC_ADC_CHANNEL(IIO_TEMP, 8, "CH8"),
+};
+
+static struct intel_basincove_gpadc_platform_data bcove_adc_pdata = {
+	.channel_num = GPADC_CH_NUM,
+	.intr = GPADC_SRAM_INTR_ADDR,
+	.gpadc_iio_maps = basincove_iio_maps,
+	.gpadc_regmaps = basincove_gpadc_regmaps,
+	.gpadc_regs = &basincove_gpadc_regs,
+	.gpadc_channels = basincove_adc_channels,
+};
+
+void __init *bcove_adc_platform_data(void *info)
+{
+	struct platform_device *pdev = NULL;
+	struct sfi_device_table_entry *entry = info;
+	int ret;
+
+	pdev = platform_device_alloc(BCOVE_ADC_DEV_NAME, -1);
+
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+					BCOVE_ADC_DEV_NAME);
+		goto out;
+	}
+
+	bcove_adc_pdata.channel_num = GPADC_CH_NUM;
+	bcove_adc_pdata.intr = GPADC_SRAM_INTR_ADDR;
+	bcove_adc_pdata.intr_mask = MBATTEMP | MSYSTEMP | MBATT
+		| MVIBATT | MCCTICK;
+	bcove_adc_pdata.gpadc_iio_maps = basincove_iio_maps;
+	bcove_adc_pdata.gpadc_regmaps = basincove_gpadc_regmaps;
+	bcove_adc_pdata.gpadc_regs = &basincove_gpadc_regs;
+	bcove_adc_pdata.gpadc_channels = basincove_adc_channels;
+
+	pdev->dev.platform_data = &bcove_adc_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add bcove adc platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+
+	install_irq_resource(pdev, entry->irq);
+
+	register_rpmsg_service("rpmsg_bcove_adc", RPROC_SCU,
+				RP_BCOVE_ADC);
+out:
+	return &bcove_adc_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.h b/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.h
new file mode 100644
index 0000000..044adc7
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bcove_adc.h
@@ -0,0 +1,17 @@
+/*
+ * platform_bcove_adc.h: Head File for Merrifield Basincove GPADC driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_BCOVE_ADC_H_
+#define _PLATFORM_BCOVE_ADC_H_
+
+#define BCOVE_ADC_DEV_NAME	"bcove_adc"
+
+extern void __init *bcove_adc_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bq24261.c b/arch/x86/platform/intel-mid/device_libs/platform_bq24261.c
new file mode 100644
index 0000000..1c1a583
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bq24261.c
@@ -0,0 +1,77 @@
+/*
+ * platform_mrfl_bq24261.c: Platform data for bq24261 charger driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/power_supply.h>
+#include <asm/pmic_pdata.h>
+#include <linux/power/bq24261_charger.h>
+#include <asm/intel-mid.h>
+
+#include "platform_ipc.h"
+#include "platform_bq24261.h"
+
+#define BOOST_CUR_LIM 500
+
+static struct power_supply_throttle bq24261_throttle_states[] = {
+	{
+		.throttle_action = PSY_THROTTLE_CC_LIMIT,
+		.throttle_val = BQ24261_CHRG_CUR_NOLIMIT,
+
+	},
+	{
+		.throttle_action = PSY_THROTTLE_CC_LIMIT,
+		.throttle_val = BQ24261_CHRG_CUR_MEDIUM,
+
+	},
+	{
+		.throttle_action = PSY_THROTTLE_DISABLE_CHARGING,
+	},
+	{
+		.throttle_action = PSY_THROTTLE_DISABLE_CHARGER,
+	},
+
+};
+
+char *bq24261_supplied_to[] = {
+				"max170xx_battery",
+				"max17047_battery",
+};
+
+
+void __init *bq24261_platform_data(void *info)
+{
+	static struct bq24261_plat_data bq24261_pdata;
+
+
+	bq24261_pdata.irq_map = PMIC_SRAM_INTR_MAP;
+	bq24261_pdata.irq_mask = PMIC_EXT_INTR_MASK;
+	bq24261_pdata.supplied_to = bq24261_supplied_to;
+	bq24261_pdata.num_supplicants = ARRAY_SIZE(bq24261_supplied_to);
+	bq24261_pdata.throttle_states = bq24261_throttle_states;
+	bq24261_pdata.num_throttle_states = ARRAY_SIZE(bq24261_throttle_states);
+	bq24261_pdata.enable_charger = NULL;
+#ifdef CONFIG_PMIC_CCSM
+	bq24261_pdata.enable_charging = pmic_enable_charging;
+	bq24261_pdata.set_inlmt = pmic_set_ilimma;
+	bq24261_pdata.set_cc = pmic_set_cc;
+	bq24261_pdata.set_cv = pmic_set_cv;
+	bq24261_pdata.dump_master_regs = dump_pmic_regs;
+	bq24261_pdata.enable_vbus = pmic_enable_vbus;
+#endif
+	bq24261_pdata.set_iterm = NULL;
+	bq24261_pdata.boost_mode_ma = BOOST_CUR_LIM;
+
+	return &bq24261_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bq24261.h b/arch/x86/platform/intel-mid/device_libs/platform_bq24261.h
new file mode 100644
index 0000000..6b97380
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bq24261.h
@@ -0,0 +1,26 @@
+/*
+ * platform_mrfl_bq24261.h: platform data for bq24261 driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_BQ24261_H_
+#define _PLATFORM_MRFL_BQ24261_H_
+
+#define MRFL_CHRGR_DEV_NAME	"bq24261_charger"
+
+#define PMIC_SRAM_INTR_MAP 0xFFFFF616
+#define PMIC_EXT_INTR_MASK 0x01
+
+#define BQ24261_CHRG_CUR_LOW		100	/* 100mA */
+#define BQ24261_CHRG_CUR_MEDIUM		500	/* 500mA */
+#define BQ24261_CHRG_CUR_HIGH		900	/* 900mA */
+#define BQ24261_CHRG_CUR_NOLIMIT	1500	/* 1500mA */
+
+extern void __init *bq24261_platform_data(
+			void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_btlpm.c b/arch/x86/platform/intel-mid/device_libs/platform_btlpm.c
new file mode 100644
index 0000000..084452d
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_btlpm.c
@@ -0,0 +1,80 @@
+/*
+ * platform_btlpm: btlpm platform data initialization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/init.h>
+#include <linux/pm_runtime.h>
+#include <asm/bcm_bt_lpm.h>
+#include <asm/intel-mid.h>
+#include <linux/gpio.h>
+
+#define UART_PORT_NO 0 /* Bluetooth is using UART port number 0 */
+
+#define LPM_ON
+
+static struct bcm_bt_lpm_platform_data bcm_bt_lpm_pdata = {
+	.gpio_wake = -EINVAL,
+	.gpio_host_wake = -EINVAL,
+	.int_host_wake = -EINVAL,
+	.gpio_enable = -EINVAL,
+	.port = UART_PORT_NO,
+};
+
+struct platform_device bcm_bt_lpm_device = {
+	.name = "bcm_bt_lpm",
+	.id = 0,
+	.dev = {
+		.platform_data = &bcm_bt_lpm_pdata,
+	},
+};
+
+static int __init bluetooth_init(void)
+{
+
+	int error_reg;
+
+	/* Get the GPIO numbers from the SFI table */
+
+	bcm_bt_lpm_pdata.gpio_enable = get_gpio_by_name("BT-reset");
+	if (!gpio_is_valid(bcm_bt_lpm_pdata.gpio_enable)) {
+		pr_err("%s: gpio %s not found\n", __func__, "BT-reset");
+		return -ENODEV;
+	}
+
+#ifdef LPM_ON
+	bcm_bt_lpm_pdata.gpio_host_wake = get_gpio_by_name("bt_uart_enable");
+	if (!gpio_is_valid(bcm_bt_lpm_pdata.gpio_host_wake)) {
+		pr_err("%s: gpio %s not found\n", __func__, "bt_uart_enable");
+		return -ENODEV;
+	}
+
+	bcm_bt_lpm_pdata.int_host_wake =
+				gpio_to_irq(bcm_bt_lpm_pdata.gpio_host_wake);
+
+	bcm_bt_lpm_pdata.gpio_wake = get_gpio_by_name("bt_wakeup");
+	if (!gpio_is_valid(bcm_bt_lpm_pdata.gpio_wake)) {
+		pr_err("%s: gpio %s not found\n", __func__, "bt_wakeup");
+		return -ENODEV;
+	}
+
+	pr_debug("%s: gpio_wake %d, gpio_host_wake %d\n", __func__,
+		bcm_bt_lpm_pdata.gpio_wake, bcm_bt_lpm_pdata.gpio_host_wake);
+#endif
+
+	error_reg = platform_device_register(&bcm_bt_lpm_device);
+	if (error_reg < 0) {
+		pr_err("%s: platform_device_register for %s failed\n",
+					__func__, bcm_bt_lpm_device.name);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+device_initcall(bluetooth_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_dw_i2c.c b/arch/x86/platform/intel-mid/device_libs/platform_dw_i2c.c
new file mode 100644
index 0000000..9f1822d
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_dw_i2c.c
@@ -0,0 +1,113 @@
+/*
+ * platform_dw_i2c.c: I2C platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/lnw_gpio.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+
+struct i2c_pin_cfg {
+	int scl_gpio;
+	int scl_alt;
+	int sda_gpio;
+	int sda_alt;
+};
+
+enum {
+	BOARD_NONE = 0,
+	BOARD_VTB,
+	BOARD_SALTBAY,
+};
+
+static struct i2c_pin_cfg dw_i2c_pin_cfgs[][10] = {
+	[BOARD_NONE] =  {},
+	[BOARD_VTB] =  {
+		[1] = {27, 1, 26, 1},
+	},
+	[BOARD_SALTBAY] =  {
+		[1] = {19, 1, 20, 1},
+	},
+};
+
+int intel_mid_dw_i2c_abort(int busnum)
+{
+	int i;
+	int ret = -EBUSY;
+	struct i2c_pin_cfg *pins = &dw_i2c_pin_cfgs[BOARD_NONE][busnum];
+
+	switch (intel_mid_identify_cpu()) {
+	case INTEL_MID_CPU_CHIP_CLOVERVIEW:
+		pins = &dw_i2c_pin_cfgs[BOARD_VTB][busnum];
+		break;
+	case INTEL_MID_CPU_CHIP_TANGIER:
+		pins = &dw_i2c_pin_cfgs[BOARD_SALTBAY][busnum];
+		break;
+	default:
+		break;
+	}
+
+	if (!pins->scl_gpio || !pins->sda_gpio) {
+		pr_err("i2c-%d: recovery ignore\n", busnum);
+		return 0;
+	}
+	pr_err("i2c-%d: try to abort xfer, scl_gpio %d, sda_gpio %d\n",
+			busnum, pins->scl_gpio, pins->sda_gpio);
+	gpio_request(pins->scl_gpio, "scl");
+	gpio_request(pins->sda_gpio, "sda");
+	lnw_gpio_set_alt(pins->scl_gpio, LNW_GPIO);
+	lnw_gpio_set_alt(pins->sda_gpio, LNW_GPIO);
+	gpio_direction_input(pins->scl_gpio);
+	gpio_direction_input(pins->sda_gpio);
+	usleep_range(10, 10);
+	pr_err("i2c-%d: scl_gpio val %d, sda_gpio val %d\n",
+			busnum,
+			gpio_get_value(pins->scl_gpio) ? 1 : 0,
+			gpio_get_value(pins->sda_gpio) ? 1 : 0);
+	gpio_direction_output(pins->scl_gpio, 1);
+	pr_err("i2c-%d: toggle begin\n", busnum);
+	for (i = 0; i < 9; i++) {
+		if (gpio_get_value(pins->sda_gpio)) {
+			if (gpio_get_value(pins->scl_gpio)) {
+				pr_err("i2c-%d: recovery success\n", busnum);
+				break;
+			} else {
+				gpio_direction_output(pins->scl_gpio, 0);
+				pr_err("i2c-%d: scl_gpio val 0, sda_gpio val 1\n",
+					busnum);
+			}
+		}
+		gpio_set_value(pins->scl_gpio, 0);
+		usleep_range(10, 20);
+		gpio_set_value(pins->scl_gpio, 1);
+		usleep_range(10, 20);
+		pr_err("i2c-%d: toggle SCL loop %d\n", busnum, i);
+	}
+	pr_err("i2c-%d: toggle end\n", busnum);
+	gpio_direction_output(pins->scl_gpio, 1);
+	gpio_direction_output(pins->sda_gpio, 0);
+	gpio_set_value(pins->scl_gpio, 0);
+	usleep_range(10, 20);
+	gpio_set_value(pins->scl_gpio, 1);
+	usleep_range(10, 20);
+	gpio_set_value(pins->sda_gpio, 0);
+	lnw_gpio_set_alt(pins->scl_gpio, pins->scl_alt);
+	lnw_gpio_set_alt(pins->sda_gpio, pins->sda_alt);
+	usleep_range(10, 10);
+	gpio_free(pins->scl_gpio);
+	gpio_free(pins->sda_gpio);
+
+	return ret;
+}
+EXPORT_SYMBOL(intel_mid_dw_i2c_abort);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
new file mode 100644
index 0000000..b7760f2
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
@@ -0,0 +1,33 @@
+/*
+ * platform_emc1403.c: emc1403 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <asm/intel-mid.h>
+#include "platform_emc1403.h"
+
+void __init *emc1403_platform_data(void *info)
+{
+	static short intr2nd_pdata;
+	struct i2c_board_info *i2c_info = info;
+	int intr = get_gpio_by_name("thermal_int");
+	int intr2nd = get_gpio_by_name("thermal_alert");
+
+	if (intr == -1 || intr2nd == -1)
+		return NULL;
+
+	i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+	intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET;
+
+	return &intr2nd_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.h b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.h
new file mode 100644
index 0000000..0229438
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.h
@@ -0,0 +1,16 @@
+/*
+ * platform_emc1403.h: emc1403 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_EMC1403_H_
+#define _PLATFORM_EMC1403_H_
+
+extern void __init *emc1403_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
new file mode 100644
index 0000000..395e051
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
@@ -0,0 +1,89 @@
+/*
+ * platform_gpio_keys.c: gpio_keys platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/platform_device.h>
+#include <asm/intel-mid.h>
+#include "platform_gpio_keys.h"
+
+/*
+ * we will search these buttons in SFI GPIO table (by name)
+ * and register them dynamically. Please add all possible
+ * buttons here, we will shrink them if no GPIO found.
+ */
+static struct gpio_keys_button gpio_button[] = {
+        {
+                .code = KEY_POWER,
+                .gpio = -1, /* GPIO number */
+                .active_low = 1,
+                .desc = "power_btn",/*Button description*/
+                .type = EV_KEY,
+                .wakeup = 0,
+                .debounce_interval = 3000,
+        },
+        {
+		.code = KEY_PROG1,
+		.gpio = 61,
+		.active_low = 1,
+		.desc = "SW1UI4",
+		.type = EV_KEY,
+		.wakeup = 0,
+		.debounce_interval = 50,
+        },
+};
+
+static struct gpio_keys_platform_data gpio_keys = {
+	.buttons	= gpio_button,
+	.rep		= 1,
+	.nbuttons	= -1, /* will fill it after search */
+};
+
+static struct platform_device pb_device = {
+	.name		= DEVICE_NAME,
+	.id		= -1,
+	.dev		= {
+		.platform_data	= &gpio_keys,
+	},
+};
+
+/*
+ * Shrink the non-existent buttons, register the gpio button
+ * device if there is some
+ */
+static int __init pb_keys_init(void)
+{
+	struct gpio_keys_button *gb = gpio_button;
+	int i, num, good = 0;
+
+	num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
+	for (i = 0; i < num; i++) {
+		pr_info("info[%2d]: name = %s, gpio = %d\n",
+			 i, gb[i].desc, gb[i].gpio);
+		if (gb[i].gpio == -1)
+			continue;
+
+		if (i != good)
+			gb[good] = gb[i];
+		good++;
+	}
+
+	if (good) {
+		gpio_keys.nbuttons = good;
+		return platform_device_register(&pb_device);
+	}
+	return 0;
+}
+late_initcall(pb_keys_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.h b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.h
new file mode 100644
index 0000000..5095329
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.h
@@ -0,0 +1,16 @@
+/*
+ * platform_gpio_keys.h: gpio_keys platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_GPIO_KEYS_H_
+#define _PLATFORM_GPIO_KEYS_H_
+
+#define DEVICE_NAME "gpio-keys"
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_hsu.c b/arch/x86/platform/intel-mid/device_libs/platform_hsu.c
new file mode 100644
index 0000000..f1e3d29
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_hsu.c
@@ -0,0 +1,440 @@
+/*
+ * platform_hsu.c: hsu platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/lnw_gpio.h>
+#include <linux/gpio.h>
+#include <asm/setup.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_hsu.h>
+
+#include "platform_hsu.h"
+
+#define TNG_CLOCK_CTL 0xFF00B830
+#define TNG_CLOCK_SC  0xFF00B868
+
+#define VLV_HSU_CLOCK	0x0800
+#define VLV_HSU_RESET	0x0804
+
+static unsigned int clock;
+static struct hsu_port_pin_cfg *hsu_port_gpio_mux;
+static struct hsu_port_cfg *platform_hsu_info;
+
+static struct
+hsu_port_pin_cfg hsu_port_pin_cfgs[][hsu_pid_max][hsu_port_max] = {
+	[hsu_tng] = {
+		[hsu_pid_def] = {
+			[hsu_port0] = {
+				.id = 0,
+				.name = HSU_BT_PORT,
+				.rx_gpio = 126,
+				.rx_alt = 1,
+				.tx_gpio = 127,
+				.tx_alt = 1,
+				.cts_gpio = 124,
+				.cts_alt = 1,
+				.rts_gpio = 125,
+				.rts_alt = 1,
+			},
+			[hsu_port1] = {
+				.id = 1,
+				.name = HSU_UART1_PORT,
+				.wake_gpio = 130,
+				.rx_gpio = 130,
+				.rx_alt = 1,
+				.tx_gpio = 131,
+				.tx_alt = 1,
+				.cts_gpio = 128,
+				.cts_alt = 1,
+				.rts_gpio = 129,
+				.rts_alt = 1,
+			},
+			[hsu_port2] = {
+				.id = 2,
+				.name = HSU_UART2_PORT,
+				.wake_gpio = 134,
+				.rx_gpio = 134,
+				.rx_alt = 1,
+				.cts_gpio = 132,
+				.cts_alt = 1,
+				.rts_gpio = 133,
+				.rts_alt = 1,
+			},
+		},
+	},
+
+};
+
+static struct hsu_port_cfg hsu_port_cfgs[][hsu_port_max] = {
+	[hsu_tng] = {
+		[hsu_port0] = {
+			.type = bt_port,
+			.hw_ip = hsu_intel,
+			.index = 0,
+			.name = HSU_BT_PORT,
+			.idle = 20,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.hw_context_save = 1,
+		},
+		[hsu_port1] = {
+			.type = gps_port,
+			.hw_ip = hsu_intel,
+			.index = 1,
+			.name = HSU_UART1_PORT,
+			.idle = 30,
+			.preamble = 1,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_set_rts = intel_mid_hsu_rts,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_suspend_post = intel_mid_hsu_suspend_post,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.hw_context_save = 1,
+		},
+		[hsu_port2] = {
+			.type = debug_port,
+			.hw_ip = hsu_intel,
+			.index = 2,
+			.name = HSU_UART2_PORT,
+			.idle = 5000,
+			.hw_init = intel_mid_hsu_init,
+			.hw_set_alt = intel_mid_hsu_switch,
+			.hw_suspend = intel_mid_hsu_suspend,
+			.hw_resume = intel_mid_hsu_resume,
+			.hw_get_clk = intel_mid_hsu_get_clk,
+			.hw_context_save = 1,
+		},
+	},
+};
+
+static struct hsu_func2port hsu_port_func_id_tlb[][hsu_port_func_max] = {
+	[hsu_tng] = {
+		[0] = {
+			.func = 0,
+			.port = -1,
+		},
+		[1] = {
+			.func = 1,
+			.port = hsu_port0,
+		},
+		[2] = {
+			.func = 2,
+			.port = hsu_port1,
+		},
+		[3] = {
+			.func = 3,
+			.port = hsu_port2,
+		},
+	},
+};
+
+static void hsu_port_enable(int port)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (info->rx_gpio) {
+		lnw_gpio_set_alt(info->rx_gpio, info->rx_alt);
+		gpio_direction_input(info->rx_gpio);
+	}
+	if (info->tx_gpio) {
+		gpio_direction_output(info->tx_gpio, 1);
+		lnw_gpio_set_alt(info->tx_gpio, info->tx_alt);
+		usleep_range(10, 10);
+		gpio_direction_output(info->tx_gpio, 0);
+
+	}
+	if (info->cts_gpio) {
+		lnw_gpio_set_alt(info->cts_gpio, info->cts_alt);
+		gpio_direction_input(info->cts_gpio);
+	}
+	if (info->rts_gpio) {
+		gpio_direction_output(info->rts_gpio, 0);
+		lnw_gpio_set_alt(info->rts_gpio, info->rts_alt);
+	}
+}
+
+static void hsu_port_disable(int port)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (info->rx_gpio) {
+		lnw_gpio_set_alt(info->rx_gpio, LNW_GPIO);
+		gpio_direction_input(info->rx_gpio);
+	}
+	if (info->tx_gpio) {
+		gpio_direction_output(info->tx_gpio, 1);
+		lnw_gpio_set_alt(info->tx_gpio, LNW_GPIO);
+		usleep_range(10, 10);
+		gpio_direction_input(info->tx_gpio);
+	}
+	if (info->cts_gpio) {
+		lnw_gpio_set_alt(info->cts_gpio, LNW_GPIO);
+		gpio_direction_input(info->cts_gpio);
+	}
+	if (info->rts_gpio) {
+		lnw_gpio_set_alt(info->rts_gpio, LNW_GPIO);
+		gpio_direction_input(info->rts_gpio);
+	}
+}
+
+void intel_mid_hsu_suspend(int port, struct device *dev,
+							irq_handler_t wake_isr)
+{
+	int ret;
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	info->dev = dev;
+	info->wake_isr = wake_isr;
+
+	if (info->wake_gpio) {
+		lnw_gpio_set_alt(info->wake_gpio, LNW_GPIO);
+		gpio_direction_input(info->wake_gpio);
+		udelay(10);
+		ret = request_irq(gpio_to_irq(info->wake_gpio), info->wake_isr,
+				IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING,
+				info->name, info->dev);
+		if (ret)
+			dev_err(info->dev, "failed to register wakeup irq\n");
+	}
+}
+
+void intel_mid_hsu_resume(int port, struct device *dev)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (info->wake_gpio)
+		free_irq(gpio_to_irq(info->wake_gpio), info->dev);
+
+	if (info->rx_gpio) {
+		lnw_gpio_set_alt(info->rx_gpio, info->rx_alt);
+		gpio_direction_input(info->rx_gpio);
+	}
+	if (info->tx_gpio) {
+		gpio_direction_output(info->tx_gpio, 1);
+		lnw_gpio_set_alt(info->tx_gpio, info->tx_alt);
+		usleep_range(10, 10);
+		gpio_direction_output(info->tx_gpio, 0);
+
+	}
+	if (info->cts_gpio) {
+		lnw_gpio_set_alt(info->cts_gpio, info->cts_alt);
+		gpio_direction_input(info->cts_gpio);
+	}
+}
+
+void intel_mid_hsu_switch(int port)
+{
+	int i;
+	struct hsu_port_pin_cfg *tmp;
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	for (i = 0; i < hsu_port_max; i++) {
+		tmp = hsu_port_gpio_mux + i;
+		if (tmp != info && tmp->id == info->id)
+			hsu_port_disable(i);
+	}
+	hsu_port_enable(port);
+}
+
+void intel_mid_hsu_rts(int port, int value)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (!info->rts_gpio)
+		return;
+
+	if (value) {
+		gpio_direction_output(info->rts_gpio, 1);
+		lnw_gpio_set_alt(info->rts_gpio, LNW_GPIO);
+	} else
+		lnw_gpio_set_alt(info->rts_gpio, info->rts_alt);
+}
+
+void intel_mid_hsu_suspend_post(int port)
+{
+	struct hsu_port_pin_cfg *info = hsu_port_gpio_mux + port;
+
+	if (info->rts_gpio && info->wake_gpio
+		&& info->wake_gpio == info->rx_gpio) {
+		gpio_direction_output(info->rts_gpio, 0);
+		lnw_gpio_set_alt(info->rts_gpio, LNW_GPIO);
+	}
+}
+
+void intel_mid_hsu_set_clk(unsigned int m, unsigned int n,
+				void __iomem *addr)
+{
+	unsigned int param, update_bit;
+
+	update_bit = 1 << 31;
+	param = (m << 1) | (n << 16) | 0x1;
+
+	writel(param, addr + VLV_HSU_CLOCK);
+	writel((param | update_bit), addr + VLV_HSU_CLOCK);
+	writel(param, addr + VLV_HSU_CLOCK);
+}
+
+void intel_mid_hsu_reset(void __iomem *addr)
+{
+	writel(0, addr + VLV_HSU_RESET);
+	writel(3, addr + VLV_HSU_RESET);
+}
+
+unsigned int intel_mid_hsu_get_clk(void)
+{
+	return clock;
+}
+
+int intel_mid_hsu_func_to_port(unsigned int func)
+{
+	int i;
+	struct hsu_func2port *tbl = NULL;
+
+	switch (intel_mid_identify_cpu()) {
+	case INTEL_MID_CPU_CHIP_TANGIER:
+		tbl = &hsu_port_func_id_tlb[hsu_tng][0];
+		break;
+	default:
+		/* FIXME: VALLEYVIEW2? */
+		/* 1e.3 and 1e.4 */
+		tbl = &hsu_port_func_id_tlb[hsu_vlv2][0];
+		break;
+	}
+
+	for (i = 0; i < hsu_port_func_max; i++) {
+		if (tbl->func == func)
+			return tbl->port;
+		tbl++;
+	}
+
+	return -1;
+}
+
+int intel_mid_hsu_init(struct device *dev, int port)
+{
+	struct hsu_port_cfg *port_cfg = platform_hsu_info + port;
+	struct hsu_port_pin_cfg *info;
+
+	if (port >= hsu_port_max)
+		return -ENODEV;
+
+	port_cfg->dev = dev;
+
+	info = hsu_port_gpio_mux + port;
+	if (info->wake_gpio) {
+		gpio_request(info->wake_gpio, "hsu");
+    }
+	if (info->rx_gpio) {
+		gpio_request(info->rx_gpio, "hsu");
+		gpio_export(info->rx_gpio, 1);
+    }
+	if (info->tx_gpio) {
+		gpio_request(info->tx_gpio, "hsu");
+		gpio_export(info->tx_gpio, 1);
+    }
+	if (info->cts_gpio) {
+		gpio_request(info->cts_gpio, "hsu");
+		gpio_export(info->cts_gpio, 1);
+    }
+	if (info->rts_gpio) {
+		gpio_request(info->rts_gpio, "hsu");
+		gpio_export(info->rts_gpio, 1);
+    }
+
+	return 1;
+}
+
+static void hsu_platform_clk(enum intel_mid_cpu_type cpu_type)
+{
+	void __iomem *clkctl, *clksc;
+	u32 clk_src, clk_div;
+
+	switch (cpu_type) {
+	case INTEL_MID_CPU_CHIP_TANGIER:
+		clock = 100000;
+		clkctl = ioremap_nocache(TNG_CLOCK_CTL, 4);
+		if (!clkctl) {
+			pr_err("tng scu clk ctl ioremap error\n");
+			break;
+		}
+
+		clksc = ioremap_nocache(TNG_CLOCK_SC, 4);
+		if (!clksc) {
+			pr_err("tng scu clk sc ioremap error\n");
+			iounmap(clkctl);
+			break;
+		}
+
+		clk_src = readl(clkctl);
+		clk_div = readl(clksc);
+
+		if (clk_src & (1 << 16))
+			/* source SCU fabric 100M */
+			clock = clock / ((clk_div & 0x7) + 1);
+		else {
+			if (clk_src & (1 << 31))
+				/* source OSCX2 38.4M */
+				clock = 38400;
+			else
+				/* source OSC clock 19.2M */
+				clock = 19200;
+		}
+
+		iounmap(clkctl);
+		iounmap(clksc);
+		break;
+
+	default:
+		/* FIXME: VALLEYVIEW2? */
+		clock = 100000;
+		break;
+	}
+
+	pr_info("hsu core clock %u M\n", clock / 1000);
+}
+
+static __init int hsu_dev_platform_data(void)
+{
+	switch (intel_mid_identify_cpu()) {
+	case INTEL_MID_CPU_CHIP_TANGIER:
+		platform_hsu_info = &hsu_port_cfgs[hsu_tng][0];
+		hsu_port_gpio_mux = &hsu_port_pin_cfgs[hsu_tng][hsu_pid_def][0];
+		break;
+	default:
+		platform_hsu_info = NULL;
+		hsu_port_gpio_mux = NULL;
+		break;
+	}
+
+	if (platform_hsu_info == NULL)
+		return -ENODEV;
+
+	if (hsu_port_gpio_mux == NULL)
+		return -ENODEV;
+
+	hsu_register_board_info(platform_hsu_info);
+	hsu_platform_clk(intel_mid_identify_cpu());
+
+	return 0;
+}
+
+fs_initcall(hsu_dev_platform_data);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_hsu.h b/arch/x86/platform/intel-mid/device_libs/platform_hsu.h
new file mode 100644
index 0000000..7bc6eaf
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_hsu.h
@@ -0,0 +1,56 @@
+/*
+ * platform_hsu.h: hsu platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_HSU_H_
+#define _PLATFORM_HSU_H_
+
+#define HSU_BT_PORT "hsu_bt_port"
+#define HSU_UART0_PORT "hsu_uart0_port"
+#define HSU_UART1_PORT "hsu_uart1_port"
+#define HSU_UART2_PORT "hsu_uart2_port"
+
+enum hsu_core {
+	hsu_pnw,
+	hsu_clv,
+	hsu_tng,
+	hsu_vlv2,
+};
+
+enum hsu_pid {
+	hsu_pid_def = 0,
+	hsu_pid_rhb = 0,
+	hsu_pid_vtb_pro = 1,
+	hsu_pid_vtb_eng = 2,
+	hsu_pid_max,
+};
+
+struct hsu_func2port {
+	int func;
+	int port;
+};
+
+struct hsu_port_pin_cfg {
+	char *name;
+	int id;
+	int wake_gpio;
+	int rx_gpio;
+	int rx_alt;
+	int tx_gpio;
+	int tx_alt;
+	int cts_gpio;
+	int cts_alt;
+	int rts_gpio;
+	int rts_alt;
+	struct device *dev;
+	irq_handler_t wake_isr;
+};
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.c b/arch/x86/platform/intel-mid/device_libs/platform_ipc.c
new file mode 100644
index 0000000..5a71b82
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ipc.c
@@ -0,0 +1,38 @@
+/*
+ * platform_ipc.c: IPC platform library file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/sfi.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_ipc.h"
+
+void ipc_device_handler(struct sfi_device_table_entry *pentry,
+				struct devs_id *dev) {
+	void *pdata = NULL;
+	/*
+	 * IPC device creation is handled by the MSIC
+	 * MFD driver so we don't need to do it here.
+	 */
+
+	/*
+	 * We need to call platform init of IPC devices to fill
+	 * misc_pdata structure. It will be used in msic_init for
+	 * initialization.
+	 */
+	pr_info("IPC bus, name = %16.16s, irq = 0x%2x\n",
+		pentry->name, pentry->irq);
+	if (dev != NULL)
+		pdata = dev->get_platform_data(pentry);
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
new file mode 100644
index 0000000..984b549
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
@@ -0,0 +1,17 @@
+/*
+ * platform_ipc.h: IPC platform library header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_IPC_H_
+#define _PLATFORM_IPC_H_
+
+extern void ipc_device_handler(struct sfi_device_table_entry *pentry,
+			struct devs_id *dev) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
new file mode 100644
index 0000000..05536ea
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
@@ -0,0 +1,32 @@
+/*
+ * platform_lis331.c:  lis331 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_lis331.h"
+
+void __init *lis331dl_platform_data(void *info)
+{
+	static short intr2nd_pdata;
+	struct i2c_board_info *i2c_info = info;
+	int intr = get_gpio_by_name("accel_int");
+	int intr2nd = get_gpio_by_name("accel_2");
+
+	if (intr == -1 || intr2nd == -1)
+		return NULL;
+
+	i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+	intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET;
+
+	return &intr2nd_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.h b/arch/x86/platform/intel-mid/device_libs/platform_lis331.h
new file mode 100644
index 0000000..37fbe0c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_lis331.h
@@ -0,0 +1,16 @@
+/*
+ * platform_lis331.h: lis331 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_LIS331_H_
+#define _PLATFORM_LIS331_H_
+
+extern void __init *lis331dl_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max3111.c b/arch/x86/platform/intel-mid/device_libs/platform_max3111.c
new file mode 100644
index 0000000..cf6b88b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max3111.c
@@ -0,0 +1,61 @@
+/*
+ * platform_max3111.c: max3111 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/lnw_gpio.h>
+#include <linux/serial_max3110.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+#include <asm/intel-mid.h>
+#include "platform_max3111.h"
+
+static struct intel_mid_ssp_spi_chip chip = {
+	.burst_size = DFLT_FIFO_BURST_SIZE,
+	.timeout = DFLT_TIMEOUT_VAL,
+	/* UART DMA is not supported in VP */
+	.dma_enabled = false,
+};
+
+void __init *max3111_platform_data(void *info)
+{
+	struct spi_board_info *spi_info = info;
+	int intr;
+	static struct plat_max3110 max3110_pdata;
+
+	spi_info->mode = SPI_MODE_0;
+
+	/* max 3110 interrupt not supported by sim platforms */
+	if (intel_mid_identify_sim()) {
+		spi_info->controller_data = &chip;
+		spi_info->bus_num = FORCE_SPI_BUS_NUM;
+		return &max3110_pdata;
+	}
+
+	spi_info->controller_data = &chip;
+	spi_info->bus_num = FORCE_SPI_BUS_NUM;
+
+	/* use fast_int_1 (IRQ 41) on MRFL */
+	max3110_pdata.irq_edge_triggered = 0;
+
+	/*force polling for HVP and VP simulation platforms
+	 * on TANGIER AND ANNIEDALE.
+	 */
+	if ((intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_VP) ||
+	    (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_HVP)) {
+		if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+		   (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+			spi_info->irq = 0;
+		}
+	}
+
+	return &max3110_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max3111.h b/arch/x86/platform/intel-mid/device_libs/platform_max3111.h
new file mode 100644
index 0000000..fe2c361
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max3111.h
@@ -0,0 +1,20 @@
+/*
+ * platform_max3111.h: max3111 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MAX3111_H_
+#define _PLATFORM_MAX3111_H_
+
+/* REVERT ME workaround[MRFL] for invalid bus number in IAFW .25 */
+#define FORCE_SPI_BUS_NUM	5
+#define FORCE_CHIP_SELECT	0
+
+extern void *max3111_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
new file mode 100644
index 0000000..b682f0a
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
@@ -0,0 +1,64 @@
+/*
+ * platform_max7315.c: max7315 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+#include <asm/intel-mid.h>
+#include "platform_max7315.h"
+
+
+void __init *max7315_platform_data(void *info)
+{
+	static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
+	static int nr;
+	struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
+	struct i2c_board_info *i2c_info = info;
+	int gpio_base, intr;
+	char base_pin_name[SFI_NAME_LEN + 1];
+	char intr_pin_name[SFI_NAME_LEN + 1];
+
+	if (nr >= MAX7315_NUM) {
+		pr_err("too many max7315s, we only support %d\n",
+				MAX7315_NUM);
+		return NULL;
+	}
+	/* we have several max7315 on the board, we only need load several
+	 * instances of the same pca953x driver to cover them
+	 */
+	strcpy(i2c_info->type, "max7315");
+	if (nr++) {
+		snprintf(base_pin_name, sizeof(base_pin_name),
+							"max7315_%d_base", nr);
+		snprintf(intr_pin_name, sizeof(intr_pin_name),
+							"max7315_%d_int", nr);
+	} else {
+		strcpy(base_pin_name, "max7315_base");
+		strcpy(intr_pin_name, "max7315_int");
+	}
+
+	gpio_base = get_gpio_by_name(base_pin_name);
+	intr = get_gpio_by_name(intr_pin_name);
+
+	if (gpio_base == -1)
+		return NULL;
+	max7315->gpio_base = gpio_base;
+	if (intr != -1) {
+		i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+		max7315->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+	} else {
+		i2c_info->irq = -1;
+		max7315->irq_base = -1;
+	}
+	return max7315;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.h b/arch/x86/platform/intel-mid/device_libs/platform_max7315.h
new file mode 100644
index 0000000..d62daa5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.h
@@ -0,0 +1,19 @@
+/*
+ * platform_max7315.h: max7315 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MAX7315_H_
+#define _PLATFORM_MAX7315_H_
+
+/* we have multiple max7315 on the board ... */
+#define MAX7315_NUM 2
+
+extern void __init *max7315_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mid_pwm.c b/arch/x86/platform/intel-mid/device_libs/platform_mid_pwm.c
new file mode 100644
index 0000000..c184fed
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mid_pwm.c
@@ -0,0 +1,121 @@
+/*
+ * platform_mid_pwm.c: mid_pwm platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/lnw_gpio.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_pwm.h>
+#include <asm/intel_mid_remoteproc.h>
+
+#include "platform_mid_pwm.h"
+
+static struct intel_mid_pwm_device_data mfld_pwms[] = {
+	[PWM_LED] = {
+		.reg_clkdiv0 = 0x62,
+		.reg_clkdiv1 = 0x61,
+		.reg_dutycyc = 0x67,
+		.val_clkdiv1 = 0x00,
+		.val_clkdiv0 = 0x03,
+	},
+	[PWM_VIBRATOR] = {
+		.reg_clkdiv0 = 0x64,
+		.reg_clkdiv1 = 0x63,
+		.reg_dutycyc = 0x68,
+		.val_clkdiv1 = 0x00,
+		.val_clkdiv0 = 0x03,
+	},
+	[PWM_LCD_BACKLIGHT] = {
+		.reg_clkdiv0 = 0x66,
+		.reg_clkdiv1 = 0x65,
+		.reg_dutycyc = 0x69,
+		.val_clkdiv1 = 0x00,
+		.val_clkdiv0 = 0x03,
+	},
+};
+
+static struct intel_mid_pwm_device_data ctp_pwms[] = {
+	[PWM_LED] = {
+		.reg_clkdiv0 = 0x62,
+		.reg_clkdiv1 = 0x61,
+		.reg_dutycyc = 0x67,
+		.val_clkdiv1 = 0x00,
+		.val_clkdiv0 = 0x00,
+	},
+	[PWM_VIBRATOR] = {
+		.reg_clkdiv0 = 0x64,
+		.reg_clkdiv1 = 0x63,
+		.reg_dutycyc = 0x68,
+		.val_clkdiv1 = 0x00,
+		.val_clkdiv0 = 0x03,
+	},
+	[PWM_LCD_BACKLIGHT] = {
+		.reg_clkdiv0 = 0x66,
+		.reg_clkdiv1 = 0x65,
+		.reg_dutycyc = 0x69,
+		.val_clkdiv1 = 0x00,
+		.val_clkdiv0 = 0x03,
+	},
+};
+
+static struct intel_mid_pwm_platform_data pdata[] = {
+	[mfld_pwm] = {
+		.pwm_num = PWM_NUM,
+		.ddata = mfld_pwms,
+		.reg_clksel = 0x38F,
+		.val_clksel = 0x01,
+	},
+	[ctp_pwm] = {
+		.pwm_num = PWM_NUM,
+		.ddata = ctp_pwms,
+		.reg_clksel = 0x38F,
+		.val_clksel = 0x00,
+	},
+};
+
+static void *get_pwm_platform_data(void)
+{
+	pr_info("%s, MFLD board detected\n", __func__);
+	return &pdata[mfld_pwm];
+}
+
+static int __init intel_mid_pwm_init(void)
+{
+	struct platform_device *pdev = NULL;
+	int ret = 0;
+
+	pdev = platform_device_alloc(DEVICE_NAME, -1);
+
+	if (!pdev) {
+		pr_err("out of memory for platform dev %s\n",
+					DEVICE_NAME);
+		return -1;
+	}
+
+	pdev->dev.platform_data = get_pwm_platform_data();
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add platform device %s\n",
+					DEVICE_NAME);
+		platform_device_put(pdev);
+		return -1;
+	}
+
+	return 0;
+}
+
+fs_initcall(intel_mid_pwm_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mid_pwm.h b/arch/x86/platform/intel-mid/device_libs/platform_mid_pwm.h
new file mode 100644
index 0000000..fb9a26c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mid_pwm.h
@@ -0,0 +1,21 @@
+/*
+ * platform_mid_pwm.h: mid_pwm platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MID_PWM_H_
+#define _PLATFORM_MID_PWM_H_
+
+#define DEVICE_NAME "intel_mid_pwm"
+
+enum {
+	mfld_pwm,
+	ctp_pwm,
+};
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
new file mode 100644
index 0000000..715274f
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
@@ -0,0 +1,28 @@
+/*
+ * platform_mpu3050.c: mpu3050 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <asm/intel-mid.h>
+#include "platform_mpu3050.h"
+
+void *mpu3050_platform_data(void *info)
+{
+	struct i2c_board_info *i2c_info = info;
+	int intr = get_gpio_by_name("mpu3050_int");
+
+	if (intr == -1)
+		return NULL;
+
+	i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.h b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.h
new file mode 100644
index 0000000..c324c16
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.h
@@ -0,0 +1,16 @@
+/*
+ * platform_mpu3050.h: mpu3050 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MPU3050_H_
+#define _PLATFORM_MPU3050_H_
+
+extern void *mpu3050_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.c
new file mode 100644
index 0000000..a179255
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.c
@@ -0,0 +1,68 @@
+/*
+ * platform_mrfl_ocd.c: Platform data for Merrifield Platform OCD  Driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *  Author: Saranya Gopal <saranya.gopal@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_basincove_ocd.h>
+
+#include "platform_msic.h"
+#include "platform_mrfl_ocd.h"
+
+static int get_bcu_config(struct ocd_bcove_config_data *ocd_smip_data)
+{
+	int i;
+	void __iomem *bcu_smip_sram_addr;
+	u8 *plat_smip_data;
+
+	if (!ocd_smip_data)
+		return -ENXIO;
+
+	plat_smip_data = (u8 *)ocd_smip_data;
+	bcu_smip_sram_addr = ioremap_nocache(MRFL_SMIP_SRAM_ADDR +
+					BCU_SMIP_OFFSET, NUM_SMIP_BYTES);
+
+	for (i = 0; i < NUM_SMIP_BYTES; i++)
+		*(plat_smip_data + i) = ioread8(bcu_smip_sram_addr + i);
+
+	return 0;
+}
+
+static struct ocd_platform_data ocd_data;
+
+void __init *mrfl_ocd_platform_data(void *info)
+{
+	struct sfi_device_table_entry *entry = info;
+	struct platform_device *pdev;
+
+	pdev = platform_device_alloc(MRFL_OCD_DEV_NAME, -1);
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+			MRFL_OCD_DEV_NAME);
+		return NULL;
+	}
+
+	if (platform_device_add(pdev)) {
+		pr_err("failed to add merrifield ocd platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	install_irq_resource(pdev, entry->irq);
+	ocd_data.bcu_config_data = &get_bcu_config;
+	pdev->dev.platform_data	= &ocd_data;
+	register_rpmsg_service("rpmsg_mrfl_ocd", RPROC_SCU, RP_MRFL_OCD);
+
+	return &ocd_data;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.h b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.h
new file mode 100644
index 0000000..d817e7d
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_ocd.h
@@ -0,0 +1,19 @@
+/*
+ * platform_mrfl_ocd.h: msic_thermal platform data header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Saranya Gopal <saranya.gopal@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_OCD_H_
+#define _PLATFORM_MRFL_OCD_H_
+
+#define MRFL_OCD_DEV_NAME "bcove_bcu"
+
+extern void __init *mrfl_ocd_platform_data(void *info)
+			__attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.c
new file mode 100644
index 0000000..9f69867
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.c
@@ -0,0 +1,54 @@
+/*
+ * platform_mrfl_pmic.c: Platform data for Merrifield PMIC driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <asm/pmic_pdata.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/power/bq24261_charger.h>
+
+#include "platform_ipc.h"
+#include "platform_mrfl_pmic.h"
+
+void __init *mrfl_pmic_ccsm_platform_data(void *info)
+{
+	struct sfi_device_table_entry *entry = info;
+	static struct pmic_platform_data pmic_pdata;
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	pdev = platform_device_alloc(entry->name, -1);
+	if (!pdev) {
+		pr_err("Out of memory for SFI platform dev %s\n", entry->name);
+		goto out;
+	}
+	pdev->dev.platform_data = &pmic_pdata;
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("Failed to add adc platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+	install_irq_resource(pdev, entry->irq);
+#ifdef CONFIG_BQ24261_CHARGER
+	pmic_pdata.cc_to_reg = bq24261_cc_to_reg;
+	pmic_pdata.cv_to_reg = bq24261_cv_to_reg;
+#endif
+	register_rpmsg_service("rpmsg_pmic_ccsm", RPROC_SCU,
+				RP_PMIC_CCSM);
+out:
+	return &pmic_pdata;
+}
+
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.h b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.h
new file mode 100644
index 0000000..5eb3ed5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic.h
@@ -0,0 +1,17 @@
+/*
+ * platform_mrfl_pmic.h: platform data for pmic driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_PMIC_H_
+#define _PLATFORM_MRFL_PMIC_H_
+
+extern void __init *mrfl_pmic_ccsm_platform_data(
+				void *info) __attribute__((weak));
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.c
new file mode 100644
index 0000000..c6ad12b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.c
@@ -0,0 +1,49 @@
+/*
+ * platform_mrfl_pmic_i2c.c: Platform data for Merrifield PMIC I2C
+ * adapter driver.
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <asm/pmic_pdata.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/power/bq24261_charger.h>
+
+#include "platform_ipc.h"
+#include "platform_mrfl_pmic_i2c.h"
+
+void __init *mrfl_pmic_i2c_platform_data(void *info)
+{
+	struct sfi_device_table_entry *entry = info;
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	pdev = platform_device_alloc(entry->name, -1);
+	if (!pdev) {
+		pr_err("Out of memory for SFI platform dev %s\n", entry->name);
+		goto out;
+	}
+	pdev->dev.platform_data = NULL;
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("Failed to add adc platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+	install_irq_resource(pdev, entry->irq);
+	register_rpmsg_service("rpmsg_i2c_pmic_adap", RPROC_SCU,
+				RP_PMIC_I2C);
+out:
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.h b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.h
new file mode 100644
index 0000000..3034a7f
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_pmic_i2c.h
@@ -0,0 +1,17 @@
+/*
+ * platform_mrfl_pmic_i2c.h: platform data for pmic i2c adapter driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_PMIC_I2C_H_
+#define _PLATFORM_MRFL_PMIC_I2C_H_
+
+extern void __init *mrfl_pmic_i2c_platform_data(
+				void *info) __attribute__((weak));
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_regulator.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_regulator.c
new file mode 100644
index 0000000..f7d38fa
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_regulator.c
@@ -0,0 +1,125 @@
+/*
+ * platform_mrfl_regulator.c - Merrifield regulator machine drvier
+ * Copyright (c) 2012, Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regulator/intel_basin_cove_pmic.h>
+#include <linux/regulator/machine.h>
+
+#include <asm/intel-mid.h>
+
+/***********VPROG1 REGUATOR platform data*************/
+static struct regulator_consumer_supply vprog1_consumer[] = {
+};
+static struct regulator_init_data vprog1_data = {
+	.constraints = {
+		.min_uV			= 1500000,
+		.max_uV			= 2800000,
+		.valid_ops_mask		= REGULATOR_CHANGE_STATUS
+			| REGULATOR_CHANGE_VOLTAGE,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(vprog1_consumer),
+	.consumer_supplies	= vprog1_consumer,
+};
+
+static struct intel_pmic_info vprog1_info = {
+	.pmic_reg   = VPROG1CNT_ADDR,
+	.init_data  = &vprog1_data,
+	.table_len  = ARRAY_SIZE(VPROG1_VSEL_table),
+	.table      = VPROG1_VSEL_table,
+};
+static struct platform_device vprog1_device = {
+	.name = "intel_regulator",
+	.id = VPROG1,
+	.dev = {
+		.platform_data = &vprog1_info,
+	},
+};
+/***********VPROG2 REGUATOR platform data*************/
+static struct regulator_consumer_supply vprog2_consumer[] = {
+};
+static struct regulator_init_data vprog2_data = {
+	.constraints = {
+		.min_uV			= 1500000,
+		.max_uV			= 2850000,
+		.valid_ops_mask		= REGULATOR_CHANGE_STATUS
+			| REGULATOR_CHANGE_VOLTAGE,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(vprog2_consumer),
+	.consumer_supplies	= vprog2_consumer,
+};
+static struct intel_pmic_info vprog2_info = {
+	.pmic_reg   = VPROG2CNT_ADDR,
+	.init_data  = &vprog2_data,
+	.table_len  = ARRAY_SIZE(VPROG2_VSEL_table),
+	.table      = VPROG2_VSEL_table,
+};
+static struct platform_device vprog2_device = {
+	.name = "intel_regulator",
+	.id = VPROG2,
+	.dev = {
+		.platform_data = &vprog2_info,
+	},
+};
+
+/***********VPROG3 REGUATOR platform data*************/
+static struct regulator_consumer_supply vprog3_consumer[] = {
+};
+static struct regulator_init_data vprog3_data = {
+	.constraints = {
+		.min_uV			= 1050000,
+		.max_uV			= 2800000,
+		.valid_ops_mask		= REGULATOR_CHANGE_STATUS
+			| REGULATOR_CHANGE_VOLTAGE,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(vprog2_consumer),
+	.consumer_supplies	= vprog3_consumer,
+};
+static struct intel_pmic_info vprog3_info = {
+	.pmic_reg   = VPROG3CNT_ADDR,
+	.init_data  = &vprog3_data,
+	.table_len  = ARRAY_SIZE(VPROG3_VSEL_table),
+	.table      = VPROG3_VSEL_table,
+};
+static struct platform_device vprog3_device = {
+	.name = "intel_regulator",
+	.id = VPROG3,
+	.dev = {
+		.platform_data = &vprog3_info,
+	},
+};
+
+static struct platform_device *regulator_devices[] __initdata = {
+	&vprog1_device,
+	&vprog2_device,
+	&vprog3_device,
+};
+
+static int __init regulator_init(void)
+{
+	/* register the regulator only if SoC is Tangier */
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+		platform_add_devices(regulator_devices,
+				ARRAY_SIZE(regulator_devices));
+
+	return 0;
+}
+device_initcall(regulator_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_thermal.c
new file mode 100644
index 0000000..8e118a4
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_thermal.c
@@ -0,0 +1,133 @@
+/*
+ * platform_mrfl_thermal.c: Platform data initilization file for
+ *			Intel Merrifield Platform thermal driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/platform_device.h>
+#include <asm/intel_mid_thermal.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_remoteproc.h>
+#include "platform_mrfl_thermal.h"
+
+/* 'enum' of Thermal ADC channels */
+enum thermal_adc_channels { SYS0, SYS1, SYS2, PMIC_DIE };
+
+static int linear_temp_correlation(void *info, long temp, long *res)
+{
+	struct intel_mid_thermal_sensor *sensor = info;
+
+	*res = ((temp * sensor->slope) / 1000) + sensor->intercept;
+
+	return 0;
+}
+
+/*
+ * Naming convention:
+ * skin0 -> front skin,
+ * skin1--> back skin
+ */
+
+static struct intel_mid_thermal_sensor mrfl_sensors[] = {
+	{
+		.name = SKIN0_NAME,
+		.index = SYS2,
+		.slope = 969,
+		.intercept = -3741,
+		.temp_correlation = linear_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = SKIN1_NAME,
+		.index = SYS0,
+		.slope = 966,
+		.intercept = -2052,
+		.temp_correlation = linear_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = MSIC_DIE_NAME,
+		.index = PMIC_DIE,
+		.slope = 1000,
+		.intercept = 0,
+		.temp_correlation = linear_temp_correlation,
+		.direct = true,
+	},
+};
+
+/* Bodegabay - PRh thermal sensor list */
+static struct intel_mid_thermal_sensor bdgb_sensors[] = {
+	{
+		.name = SKIN0_NAME,
+		.index = SYS0,
+		.slope = 410,
+		.intercept = 16808,
+		.temp_correlation = linear_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = SKIN1_NAME,
+		.index = SYS0,
+		.slope = 665,
+		.intercept = 8375,
+		.temp_correlation = linear_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = MSIC_DIE_NAME,
+		.index = PMIC_DIE,
+		.slope = 1000,
+		.intercept = 0,
+		.temp_correlation = linear_temp_correlation,
+		.direct = true,
+	},
+};
+
+static struct intel_mid_thermal_platform_data pdata[] = {
+	[mrfl_thermal] = {
+		.num_sensors = 3,
+		.sensors = mrfl_sensors,
+	},
+	[bdgb_thermal] = {
+		.num_sensors = 3,
+		.sensors = bdgb_sensors,
+	},
+};
+
+void __init *mrfl_thermal_platform_data(void *info)
+{
+	struct platform_device *pdev;
+	struct sfi_device_table_entry *entry = info;
+
+	pdev = platform_device_alloc(MRFL_THERM_DEV_NAME, -1);
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+			MRFL_THERM_DEV_NAME);
+		return NULL;
+	}
+
+	if (platform_device_add(pdev)) {
+		pr_err("failed to add thermal platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	pdev->dev.platform_data = &pdata[mrfl_thermal];
+
+	install_irq_resource(pdev, entry->irq);
+	register_rpmsg_service("rpmsg_mrfl_thermal", RPROC_SCU,
+				RP_BCOVE_THERMAL);
+
+	return 0;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfl_thermal.h b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_thermal.h
new file mode 100644
index 0000000..f1011a1
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfl_thermal.h
@@ -0,0 +1,26 @@
+/*
+ * platform_mrfl_thermal.h: Platform data initilization file for
+ *			Intel Merrifield Platform thermal driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFL_THERMAL_H_
+#define _PLATFORM_MRFL_THERMAL_H_
+
+#define MRFL_THERM_DEV_NAME "bcove_thrm"
+
+extern void __init *mrfl_thermal_platform_data(void *)
+			__attribute__((weak));
+
+enum {
+	mrfl_thermal,
+	bdgb_thermal,
+};
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_audio.c
new file mode 100644
index 0000000..0e8738b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_audio.c
@@ -0,0 +1,151 @@
+/*
+ * platform_mrfld_audio.c: MRFLD audio platform data initilization file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Dharageswari R <dharageswari.r@intel.com>
+ *	Vinod Koul <vinod.koul@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <asm/intel-mid.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include <asm/platform_sst_audio.h>
+#include "platform_mrfld_audio.h"
+#include "platform_msic.h"
+#include "platform_wm8994.h"
+
+static char* audio_codec = "dummy";
+module_param(audio_codec, charp, S_IRUSR);
+MODULE_PARM_DESC(audio_codec, "Hardware codec's name in use");
+
+static struct mrfld_audio_platform_data mrfld_audio_pdata;
+
+void *merfld_audio_platform_data(void *info)
+{
+	struct platform_device *pdev;
+	int ret;
+
+	pr_debug("in %s\n", __func__);
+
+	ret = add_sst_platform_device();
+	if (ret < 0) {
+		pr_err("%s failed to sst_platform device\n", __func__);
+		return NULL;
+	}
+
+	pdev = platform_device_alloc("hdmi-audio", -1);
+	if (!pdev) {
+		pr_err("failed to allocate hdmi-audio platform device\n");
+		return NULL;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add hdmi-audio platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	/* request the gpios for audio */
+	mrfld_audio_pdata.codec_gpio = get_gpio_by_name("audiocodec_int");
+	mrfld_audio_pdata.codec_rst = get_gpio_by_name("audiocodec_rst");
+
+	pdev = platform_device_alloc("mrfld_lm49453", -1);
+	if (!pdev) {
+		pr_err("failed to allocate mrfld_lm49453 platform device\n");
+		return NULL;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add mrfld_lm49453 platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+	if (platform_device_add_data(pdev, &mrfld_audio_pdata,
+				     sizeof(mrfld_audio_pdata))) {
+		pr_err("failed to add mrfld_lm49453 platform data\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	register_rpmsg_service("rpmsg_msic_mrfld_audio", RPROC_SCU,
+				RP_MSIC_MRFLD_AUDIO);
+
+	return NULL;
+}
+
+void *mrfld_sst_audio_platform_data(void *info)
+{
+	struct platform_device *pdev;
+	int ret;
+
+	ret = add_sst_platform_device();
+	if (ret < 0) {
+		pr_err("%s failed to sst_platform device\n", __func__);
+		return NULL;
+	}
+
+	if(!audio_codec || !strcmp(audio_codec, "dummy")) {
+		pdev = platform_device_register_simple("merr_dpcm_dummy",
+					0, NULL, 0);
+		if (!pdev) {
+			pr_err("failed to register merr_dpcm_dummy platform device\n");
+			return NULL;
+		}
+	} else if (!strcmp(audio_codec, "wm8958")) {
+		/* Register i2c audio codec wm8958 */
+		wm8958_platform_data(NULL);
+
+		pdev = platform_device_alloc("mrfld_wm8958", -1);
+		if (!pdev) {
+			pr_err("failed to allocate mrfld_wm8958 platform device\n");
+			return NULL;
+		}
+
+		ret = platform_device_add(pdev);
+		if (ret) {
+			pr_err("failed to add mrfld_wm8958 platform device\n");
+			platform_device_put(pdev);
+			return NULL;
+		}
+		if (platform_device_add_data(pdev, &mrfld_audio_pdata,
+						 sizeof(mrfld_audio_pdata))) {
+			pr_err("failed to add mrfld_wm8958 platform data\n");
+			platform_device_put(pdev);
+			return NULL;
+		}
+
+		register_rpmsg_service("rpmsg_mrfld_wm8958_audio", RPROC_SCU,
+					RP_MSIC_MRFLD_AUDIO);
+	}
+	/*
+	 * To add a new codec, add a "else if" statement with
+	 * its name and its specific implementation.
+	 */
+	else {
+		pr_info("Codec %s is not implemented."
+				"Dummy codec selected...\n", audio_codec);
+
+		pdev = platform_device_register_simple("merr_dpcm_dummy",
+							0, NULL, 0);
+		if (!pdev) {
+			pr_err("failed to register merr_dpcm_dummy platform device\n");
+			return NULL;
+		}
+	}
+
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_audio.h b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_audio.h
new file mode 100644
index 0000000..2a57556
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_audio.h
@@ -0,0 +1,25 @@
+/*
+ * platform_mrfld_audio.h: MRFLD audio platform data header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Vinod Koul <vinod.koul@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MRFLD_AUDIO_H_
+#define _PLATFORM_MRFLD_AUDIO_H_
+
+#include <linux/sfi.h>
+
+struct mrfld_audio_platform_data {
+	const struct soft_platform_id *spid;
+	int codec_gpio;
+	int codec_rst;
+};
+
+extern void __init *merfld_audio_platform_data(void *info) __attribute__((weak));
+extern void __init *mrfld_sst_audio_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.c b/arch/x86/platform/intel-mid/device_libs/platform_msic.c
new file mode 100644
index 0000000..a601f2c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic.c
@@ -0,0 +1,92 @@
+/*
+ * platform_msic.c: MSIC platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+
+struct intel_msic_platform_data msic_pdata;
+
+static struct resource msic_resources[] = {
+	{
+		.start	= INTEL_MSIC_IRQ_PHYS_BASE,
+		.end	= INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device msic_device = {
+	.name		= "intel_msic",
+	.id		= -1,
+	.dev		= {
+		.platform_data	= &msic_pdata,
+	},
+	.num_resources	= ARRAY_SIZE(msic_resources),
+	.resource	= msic_resources,
+};
+
+inline bool intel_mid_has_msic(void)
+{
+	return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
+}
+
+static int msic_scu_status_change(struct notifier_block *nb,
+				  unsigned long code, void *data)
+{
+	if (code == SCU_DOWN) {
+		platform_device_unregister(&msic_device);
+		return 0;
+	}
+
+	return platform_device_register(&msic_device);
+}
+
+static int __init msic_init(void)
+{
+	static struct notifier_block msic_scu_notifier = {
+		.notifier_call	= msic_scu_status_change,
+	};
+
+	/*
+	 * We need to be sure that the SCU IPC is ready before MSIC device
+	 * can be registered.
+	 */
+	if (intel_mid_has_msic())
+		intel_scu_notifier_add(&msic_scu_notifier);
+
+	return 0;
+}
+arch_initcall(msic_init);
+
+/*
+ * msic_generic_platform_data - sets generic platform data for the block
+ * @info: pointer to the SFI device table entry for this block
+ * @block: MSIC block
+ *
+ * Function sets IRQ number from the SFI table entry for given device to
+ * the MSIC platform data.
+ */
+void *msic_generic_platform_data(void *info, enum intel_msic_block block)
+{
+	struct sfi_device_table_entry *entry = info;
+
+	BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
+	msic_pdata.irq[block] = entry->irq;
+
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.h b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
new file mode 100644
index 0000000..6abcfc7
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
@@ -0,0 +1,21 @@
+/*
+ * platform_msic.h: MSIC platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_H_
+#define _PLATFORM_MSIC_H_
+
+#include <linux/mfd/intel_msic.h>
+
+extern struct intel_msic_platform_data msic_pdata;
+
+extern void *msic_generic_platform_data(void *info,
+			enum intel_msic_block block) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.c
new file mode 100644
index 0000000..f1ed88e
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.c
@@ -0,0 +1,56 @@
+/*
+ * platform_msic_adc.c: MSIC ADC platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_gpadc.h>
+#include <asm/intel_mid_remoteproc.h>
+#include "platform_msic.h"
+#include "platform_msic_adc.h"
+
+void __init *msic_adc_platform_data(void *info)
+{
+	struct platform_device *pdev = NULL;
+	struct sfi_device_table_entry *entry = info;
+	static struct intel_mid_gpadc_platform_data msic_adc_pdata;
+	int ret = 0;
+
+	pdev = platform_device_alloc(ADC_DEVICE_NAME, -1);
+
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+					ADC_DEVICE_NAME);
+		goto out;
+	}
+
+	msic_adc_pdata.intr = 0xffff7fc0;
+
+	pdev->dev.platform_data = &msic_adc_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add adc platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+
+	install_irq_resource(pdev, entry->irq);
+
+	register_rpmsg_service("rpmsg_msic_adc", RPROC_SCU,
+				RP_MSIC_ADC);
+out:
+	return &msic_adc_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.h
new file mode 100644
index 0000000..8e1f901
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_adc.h
@@ -0,0 +1,18 @@
+/*
+ * platform_msic_adc.h: MSIC ADC platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_ADC_H_
+#define _PLATFORM_MSIC_ADC_H_
+
+#define ADC_DEVICE_NAME "msic_adc"
+
+extern void __init *msic_adc_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
new file mode 100644
index 0000000..a8aadfb
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.c
@@ -0,0 +1,36 @@
+/*
+ * platform_msic_audio.c: MSIC audio platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+#include "platform_msic_audio.h"
+
+void *msic_audio_platform_data(void *info)
+{
+	struct platform_device *pdev;
+
+	pdev = platform_device_register_simple("sst-platform", -1, NULL, 0);
+
+	if (IS_ERR(pdev)) {
+		pr_err("failed to create audio platform device\n");
+		return NULL;
+	}
+
+	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO);
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.h
new file mode 100644
index 0000000..1fca68f
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_audio.h
@@ -0,0 +1,16 @@
+/*
+ * platform_msic_audio.h: MSIC audio platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_AUDIO_H_
+#define _PLATFORM_MSIC_AUDIO_H_
+
+extern void __init *msic_audio_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
new file mode 100644
index 0000000..133448b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.c
@@ -0,0 +1,26 @@
+/*
+ * platform_msic_battery.c: MSIC battery platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+#include "platform_msic_battery.h"
+
+void __init *msic_battery_platform_data(void *info)
+{
+	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY);
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.h
new file mode 100644
index 0000000..9be705d
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_battery.h
@@ -0,0 +1,17 @@
+/*
+ * platform_msic_battery.h: MSIC battery platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_BATTERY_H_
+#define _PLATFORM_MSIC_BATTERY_H_
+
+extern void __init *msic_battery_platform_data(void *info)
+			__attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
new file mode 100644
index 0000000..1b11038
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.c
@@ -0,0 +1,79 @@
+/*
+ * platform_msic_gpio.c: MSIC GPIO platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include "platform_msic.h"
+#include "platform_msic_gpio.h"
+
+void __init *msic_gpio_platform_data(void *info)
+{
+	struct platform_device *pdev = NULL;
+	struct sfi_device_table_entry *entry = info;
+	static struct intel_msic_gpio_pdata msic_gpio_pdata;
+	int ret;
+	int gpio;
+	struct resource res;
+
+	pdev = platform_device_alloc(MSIC_GPIO_DEVICE_NAME, -1);
+
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+					MSIC_GPIO_DEVICE_NAME);
+		return NULL;
+	}
+
+	gpio = get_gpio_by_name("msic_gpio_base");
+
+	if (gpio < 0)
+		return NULL;
+
+	/* Basincove PMIC GPIO has total 8 GPIO pins,
+	 * GPIO[5:2,0] support 1.8V, GPIO[7:6,1] support 1.8V and 3.3V,
+	 * We group GPIO[5:2] to low voltage and GPIO[7:6] to
+	 * high voltage. Because the CTL registers are contiguous,
+	 * this grouping method doesn't affect the driver usage but
+	 * easy for the driver sharing among multiple platforms.
+	 */
+	msic_gpio_pdata.ngpio_lv = 6;
+	msic_gpio_pdata.ngpio_hv = 2;
+	msic_gpio_pdata.gpio0_lv_ctlo = 0x7E;
+	msic_gpio_pdata.gpio0_lv_ctli = 0x8E;
+	msic_gpio_pdata.gpio0_hv_ctlo = 0x84;
+	msic_gpio_pdata.gpio0_hv_ctli = 0x94;
+
+	msic_gpio_pdata.can_sleep = 1;
+	msic_gpio_pdata.gpio_base = gpio;
+
+	pdev->dev.platform_data = &msic_gpio_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add msic gpio platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	res.name = "IRQ",
+	res.flags = IORESOURCE_IRQ,
+	res.start = entry->irq;
+	platform_device_add_resources(pdev, &res, 1);
+
+	register_rpmsg_service("rpmsg_msic_gpio", RPROC_SCU, RP_MSIC_GPIO);
+
+	return &msic_gpio_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.h
new file mode 100644
index 0000000..b78b236
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_gpio.h
@@ -0,0 +1,18 @@
+/*
+ * platform_msic_gpio.h: MSIC GPIO platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_GPIO_H_
+#define _PLATFORM_MSIC_GPIO_H_
+
+#define MSIC_GPIO_DEVICE_NAME "msic_gpio"
+
+extern void __init *msic_gpio_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
new file mode 100644
index 0000000..1782389
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.c
@@ -0,0 +1,39 @@
+/*
+ * platform_msic_ocd.c: MSIC OCD platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include "platform_msic.h"
+#include "platform_msic_ocd.h"
+
+void __init *msic_ocd_platform_data(void *info)
+{
+	static struct intel_msic_ocd_pdata msic_ocd_pdata;
+	int gpio;
+
+	gpio = get_gpio_by_name("ocd_gpio");
+
+	if (gpio < 0)
+		return NULL;
+
+	msic_ocd_pdata.gpio = gpio;
+	msic_pdata.ocd = &msic_ocd_pdata;
+
+	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD);
+}
+
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.h
new file mode 100644
index 0000000..7caa13b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_ocd.h
@@ -0,0 +1,16 @@
+/*
+ * platform_msic_ocd.h: MSIC OCD platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_OCD_H_
+#define _PLATFORM_MSIC_OCD_H_
+
+extern void __init *msic_ocd_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
new file mode 100644
index 0000000..f745750
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.c
@@ -0,0 +1,73 @@
+/*
+ * platform_msic_power_btn.c: MSIC power btn platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/init.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_powerbtn.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include "platform_msic_power_btn.h"
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#define BCOVE_PBIRQ 0x02
+#define BCOVE_PBIRQMASK	0x0d
+
+static struct intel_msic_power_btn_platform_data msic_power_btn_pdata;
+
+static int mrfl_pb_irq_ack(struct intel_msic_power_btn_platform_data *pdata)
+{
+	intel_scu_ipc_update_register(BCOVE_PBIRQ, 0, MSIC_PWRBTNM);
+	intel_scu_ipc_update_register(BCOVE_PBIRQMASK, 0, MSIC_PWRBTNM);
+
+	return 0;
+}
+
+void __init *msic_power_btn_platform_data(void *info)
+{
+	int ret;
+	struct platform_device *pdev;
+	struct sfi_device_table_entry *entry = info;
+	struct resource res;
+
+	pdev = platform_device_alloc(INTEL_MID_POWERBTN_DEV_NAME, -1);
+	if (!pdev) {
+		pr_err("%s(): out of memory\n", __func__);
+		return NULL;
+	}
+
+	msic_power_btn_pdata.pbstat = 0xfffff61a;
+	msic_power_btn_pdata.pb_level = (1 << 4);
+	msic_power_btn_pdata.irq_lvl1_mask = 0x0c;
+	msic_power_btn_pdata.irq_ack = mrfl_pb_irq_ack;
+
+	pdev->dev.platform_data = &msic_power_btn_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("%s(): platform_device_add() failed\n", __func__);
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	res.name = "IRQ",
+	res.flags = IORESOURCE_IRQ,
+	res.start = entry->irq;
+	platform_device_add_resources(pdev, &res, 1);
+
+	register_rpmsg_service("rpmsg_mid_powerbtn",
+			RPROC_SCU, RP_MSIC_POWER_BTN);
+
+	return &msic_power_btn_pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.h
new file mode 100644
index 0000000..3de94ca
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_power_btn.h
@@ -0,0 +1,19 @@
+/*
+ * platform_msic_power_btn.h: MSIC power btn platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_POWER_BTN_H_
+#define _PLATFORM_MSIC_POWER_BTN_H_
+
+#define INTEL_MID_POWERBTN_DEV_NAME "mid_powerbtn"
+
+extern void __init *msic_power_btn_platform_data(void *info)
+		__attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
new file mode 100644
index 0000000..7c24a14
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.c
@@ -0,0 +1,185 @@
+/*
+ * platform_msic_thermal.c: msic_thermal platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_gpadc.h>
+#include <asm/intel_mid_thermal.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include "platform_msic.h"
+#include "platform_msic_thermal.h"
+
+/* ctp thermal sensor list */
+static struct intel_mid_thermal_sensor ctp_sensors[] = {
+	{
+		.name = SKIN0_NAME,
+		.index = 0,
+		.slope = 410,
+		.intercept = 16808,
+		.adc_channel = 0x04 | CH_NEED_VREF | CH_NEED_VCALIB,
+		.temp_correlation = skin0_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = SKIN1_NAME,
+		.index = 1,
+		.slope = 665,
+		.intercept = 8375,
+		.adc_channel = 0x04 | CH_NEED_VREF | CH_NEED_VCALIB,
+		.temp_correlation = skin0_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = MSIC_DIE_NAME,
+		.index = 2,
+		.slope = 368,
+		.intercept = 219560,
+		.adc_channel = 0x03 | CH_NEED_VCALIB,
+		.direct = true,
+	},
+	{
+		.name = BPTHERM_NAME,
+		.index = 3,
+		.slope = 788,
+		.intercept = 5065,
+		.adc_channel = 0x09 | CH_NEED_VREF | CH_NEED_VCALIB,
+		.temp_correlation = bptherm_temp_correlation,
+		.direct = false,
+	},
+
+};
+
+/* mfld thermal sensor list */
+static struct intel_mid_thermal_sensor mfld_sensors[] = {
+	{
+		.name = SKIN0_NAME,
+		.index = 0,
+		.slope = 851,
+		.intercept = 2800,
+		.adc_channel = 0x08 | CH_NEED_VREF | CH_NEED_VCALIB,
+		.temp_correlation = skin0_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = SKIN1_NAME,
+		.index = 1,
+		.slope = 806,
+		.intercept = 1800,
+		.adc_channel = 0x08 | CH_NEED_VREF | CH_NEED_VCALIB,
+		.temp_correlation = skin1_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = MSIC_SYS_NAME,
+		.index = 2,
+		.slope = 0,
+		.intercept = 0,
+		.adc_channel = 0x0A | CH_NEED_VREF | CH_NEED_VCALIB,
+		.direct = false,
+	},
+	{
+		.name = MSIC_DIE_NAME,
+		.index = 3,
+		.slope = 368,
+		.intercept = 219560,
+		.adc_channel = 0x03 | CH_NEED_VCALIB,
+		.direct = true,
+	},
+
+};
+
+/* LEX thermal sensor list */
+static struct intel_mid_thermal_sensor lex_sensors[] = {
+	{
+		.name = SKIN0_NAME,
+		.index = 0,
+		.slope = 851,
+		.intercept = 2800,
+		.adc_channel = 0x08 | CH_NEED_VREF | CH_NEED_VCALIB,
+		.temp_correlation = skin0_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = SKIN1_NAME,
+		.index = 1,
+		.slope = 806,
+		.intercept = 1800,
+		.adc_channel = 0x08 | CH_NEED_VREF | CH_NEED_VCALIB,
+		.temp_correlation = skin1_temp_correlation,
+		.direct = false,
+	},
+	{
+		.name = MSIC_SYS_NAME,
+		.index = 2,
+		.slope = 0,
+		.intercept = 0,
+		.adc_channel = 0x0A | CH_NEED_VREF | CH_NEED_VCALIB,
+		.direct = false,
+	},
+	{
+		.name = MSIC_DIE_NAME,
+		.index = 3,
+		.slope = 368,
+		.intercept = 219560,
+		.adc_channel = 0x03 | CH_NEED_VCALIB,
+		.direct = true,
+	},
+
+};
+
+
+static struct intel_mid_thermal_platform_data pdata[] = {
+	[mfld_thermal] = {
+		.num_sensors = 4,
+		.sensors = mfld_sensors,
+		.soc_cooling = false,
+	},
+	[ctp_thermal] = {
+		.num_sensors = 4,
+		.sensors = ctp_sensors,
+		.soc_cooling = true,
+	},
+	[lex_thermal] = {
+		.num_sensors = 4,
+		.sensors = lex_sensors,
+		.soc_cooling = false,
+	},
+};
+
+void __init *msic_thermal_platform_data(void *info)
+{
+	struct platform_device *pdev;
+
+	pdev = platform_device_alloc(MSIC_THERM_DEV_NAME, -1);
+	if (!pdev) {
+		pr_err("out of memory for SFI platform dev %s\n",
+			MSIC_THERM_DEV_NAME);
+		return NULL;
+	}
+
+	if (platform_device_add(pdev)) {
+		pr_err("failed to add thermal platform device\n");
+		platform_device_put(pdev);
+		return NULL;
+	}
+
+	pdev->dev.platform_data = &pdata[mfld_thermal];
+
+	register_rpmsg_service("rpmsg_mid_thermal", RPROC_SCU, RP_MSIC_THERMAL);
+
+	return 0;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.h b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.h
new file mode 100644
index 0000000..c4f3404
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic_thermal.h
@@ -0,0 +1,26 @@
+/*
+ * platform_msic_thermal.h: msic_thermal platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_MSIC_THERMAL_H_
+#define _PLATFORM_MSIC_THERMAL_H_
+
+#define MSIC_THERM_DEV_NAME "msic_thermal"
+
+extern void __init *msic_thermal_platform_data(void *info)
+			__attribute__((weak));
+enum {
+	mfld_thermal,
+	ctp_thermal,
+	lex_thermal,
+	vb_thermal,
+};
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
new file mode 100644
index 0000000..6625edc
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
@@ -0,0 +1,68 @@
+/*
+ * platform_pcal9555a.c: pcal9555a platform data initilization file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+#include <asm/intel-mid.h>
+#include "platform_pcal9555a.h"
+
+
+void __init *pcal9555a_platform_data(void *info)
+{
+	static struct pca953x_platform_data pcal9555a_pdata[PCAL9555A_NUM];
+	static int nr;
+	struct pca953x_platform_data *pcal9555a;
+	struct i2c_board_info *i2c_info = info;
+	int gpio_base, intr;
+	char base_pin_name[SFI_NAME_LEN + 1];
+	char intr_pin_name[SFI_NAME_LEN + 1];
+
+	if (!info) {
+		pr_err("%s: invalid info pointer\n", __func__);
+		return NULL;
+	}
+
+	if (nr >= PCAL9555A_NUM) {
+		pr_err("%s: too many pcal9555a, we only support %d\n",
+			__func__, PCAL9555A_NUM);
+		return NULL;
+	}
+	pcal9555a = &pcal9555a_pdata[nr++];
+
+	/* we have several pcal9555a on the board, we only need load several
+	 * instances of the same pca953x driver to cover them
+	 */
+
+	snprintf(base_pin_name, sizeof(base_pin_name),
+		 "%s_base", i2c_info->type);
+	snprintf(intr_pin_name, sizeof(intr_pin_name),
+		 "%s_int", i2c_info->type);
+
+	strcpy(i2c_info->type, "pcal9555a");
+
+	gpio_base = get_gpio_by_name(base_pin_name);
+	intr = get_gpio_by_name(intr_pin_name);
+
+	if (gpio_base == -1)
+		return NULL;
+	pcal9555a->gpio_base = gpio_base;
+	if (intr != -1) {
+		i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+		pcal9555a->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+	} else {
+		i2c_info->irq = -1;
+		pcal9555a->irq_base = -1;
+	}
+	return pcal9555a;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.h b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.h
new file mode 100644
index 0000000..5593db2
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.h
@@ -0,0 +1,19 @@
+/*
+ * platform_pcal9555a.h: pcal9555a platform data header file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_PCAL9555A_H_
+#define _PLATFORM_PCAL9555A_H_
+
+/* we have multiple pcal9555a on the board ... */
+#define PCAL9555A_NUM 4
+
+extern void __init *pcal9555a_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
new file mode 100644
index 0000000..1526663
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
@@ -0,0 +1,36 @@
+/*
+ * platform_pmic_gpio.c: PMIC GPIO platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+#include <linux/intel_pmic_gpio.h>
+#include "platform_pmic_gpio.h"
+
+void __init *pmic_gpio_platform_data(void *info)
+{
+	static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
+	int gpio_base = get_gpio_by_name("pmic_gpio_base");
+
+	if (gpio_base == -1)
+		gpio_base = 64;
+	pmic_gpio_pdata.gpio_base = gpio_base;
+	pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+	pmic_gpio_pdata.gpiointr = 0xffffeff8;
+
+	return &pmic_gpio_pdata;
+}
+
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.h b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.h
new file mode 100644
index 0000000..0bce0de
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.h
@@ -0,0 +1,16 @@
+/*
+ * platform_pmic_gpio.h: PMIC GPIO platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_PMIC_GPIO_H_
+#define _PLATFORM_PMIC_GPIO_H_
+
+extern void __init *pmic_gpio_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.c b/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.c
new file mode 100644
index 0000000..778f70c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.c
@@ -0,0 +1,290 @@
+/*
+ * platform_scu_flis.c: scu_flis platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Ning Li <ning.li@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/input.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_scu_flis.h>
+#include "platform_scu_flis.h"
+
+static struct pin_mmio_flis_t tng_pin_mmio_flis_table[TNG_PIN_NUM] = {
+	[tng_usb_ulpi_0_clk] = { writable, 0x0500 },
+	[tng_usb_ulpi_0_data_0] = { writable, 0x0504 },
+	[tng_usb_ulpi_0_data_1] = { writable, 0x0508 },
+	[tng_usb_ulpi_0_data_2] = { writable, 0x050C },
+	[tng_usb_ulpi_0_data_3] = { writable, 0x0510 },
+	[tng_usb_ulpi_0_data_4] = { writable, 0x0514 },
+	[tng_usb_ulpi_0_data_5] = { writable, 0x0518 },
+	[tng_usb_ulpi_0_data_6] = { writable, 0x051C },
+	[tng_usb_ulpi_0_data_7] = { writable, 0x0520 },
+	[tng_usb_ulpi_0_dir] = { writable, 0x0524 },
+	[tng_usb_ulpi_0_nxt] = { writable, 0x0528 },
+	[tng_usb_ulpi_0_refclk] = { writable, 0x052C },
+	[tng_usb_ulpi_0_stp] = { writable, 0x0530 },
+	[tng_emmc_0_clk] = { writable, 0x0900 },
+	[tng_emmc_0_cmd] = { writable, 0x0904 },
+	[tng_emmc_0_d_0] = { writable, 0x0908 },
+	[tng_emmc_0_d_1] = { writable, 0x090C },
+	[tng_emmc_0_d_2] = { writable, 0x0910 },
+	[tng_emmc_0_d_3] = { writable, 0x0914 },
+	[tng_emmc_0_d_4] = { writable, 0x0918 },
+	[tng_emmc_0_d_5] = { writable, 0x091C },
+	[tng_emmc_0_d_6] = { writable, 0x0920 },
+	[tng_emmc_0_d_7] = { writable, 0x0924 },
+	[tng_emmc_0_rst_b] = { writable, 0x0928 },
+	[tng_gp_emmc_1_clk] = { writable, 0x092C },
+	[tng_gp_emmc_1_cmd] = { writable, 0x0930 },
+	[tng_gp_emmc_1_d_0] = { writable, 0x0934 },
+	[tng_gp_emmc_1_d_1] = { writable, 0x0938 },
+	[tng_gp_emmc_1_d_2] = { writable, 0x093C },
+	[tng_gp_emmc_1_d_3] = { writable, 0x0940 },
+	[tng_gp_emmc_1_d_4] = { writable, 0x0944 },
+	[tng_gp_emmc_1_d_5] = { writable, 0x0948 },
+	[tng_gp_emmc_1_d_6] = { writable, 0x094C },
+	[tng_gp_emmc_1_d_7] = { writable, 0x0950 },
+	[tng_gp_emmc_1_rst_b] = { writable, 0x0954 },
+	[tng_gp_28] = { writable, 0x0958 },
+	[tng_gp_29] = { writable, 0x095C },
+	[tng_gp_sdio_0_cd_b] = { writable, 0x0D00 },
+	[tng_gp_sdio_0_clk] = { writable, 0x0D04 },
+	[tng_gp_sdio_0_cmd] = { writable, 0x0D08 },
+	[tng_gp_sdio_0_dat_0] = { writable, 0x0D0C },
+	[tng_gp_sdio_0_dat_1] = { writable, 0x0D10 },
+	[tng_gp_sdio_0_dat_2] = { writable, 0x0D14 },
+	[tng_gp_sdio_0_dat_3] = { writable, 0x0D18 },
+	[tng_gp_sdio_0_lvl_clk_fb] = { writable, 0x0D1C },
+	[tng_gp_sdio_0_lvl_cmd_dir] = { writable, 0x0D20 },
+	[tng_gp_sdio_0_lvl_dat_dir] = { writable, 0x0D24 },
+	[tng_gp_sdio_0_lvl_sel] = { writable, 0x0D28 },
+	[tng_gp_sdio_0_powerdown_b] = { writable, 0x0D2C },
+	[tng_gp_sdio_0_wp] = { writable, 0x0D30 },
+	[tng_gp_sdio_1_clk] = { writable, 0x0D34 },
+	[tng_gp_sdio_1_cmd] = { writable, 0x0D38 },
+	[tng_gp_sdio_1_dat_0] = { writable, 0x0D3C },
+	[tng_gp_sdio_1_dat_1] = { writable, 0x0D40 },
+	[tng_gp_sdio_1_dat_2] = { writable, 0x0D44 },
+	[tng_gp_sdio_1_dat_3] = { writable, 0x0D48 },
+	[tng_gp_sdio_1_powerdown_b] = { writable, 0x0D4C },
+	[tng_mhsi_acdata] = { writable, 0x1100 },
+	[tng_mhsi_acflag] = { writable, 0x1104 },
+	[tng_mhsi_acready] = { writable, 0x1108 },
+	[tng_mhsi_acwake] = { writable, 0x110C },
+	[tng_mhsi_cadata] = { writable, 0x1110 },
+	[tng_mhsi_caflag] = { writable, 0x1114 },
+	[tng_mhsi_caready] = { writable, 0x1118 },
+	[tng_mhsi_cawake] = { writable, 0x111C },
+	[tng_gp_mslim_0_bclk] = { writable, 0x1500 },
+	[tng_gp_mslim_0_bdat] = { writable, 0x1504 },
+	[tng_gp_ssp_0_clk] = { writable, 0x1508 },
+	[tng_gp_ssp_0_fs] = { writable, 0x150C },
+	[tng_gp_ssp_0_rxd] = { writable, 0x1510 },
+	[tng_gp_ssp_0_txd] = { writable, 0x1514 },
+	[tng_gp_ssp_1_clk] = { writable, 0x1518 },
+	[tng_gp_ssp_1_fs] = { writable, 0x151C },
+	[tng_gp_ssp_1_rxd] = { writable, 0x1520 },
+	[tng_gp_ssp_1_txd] = { writable, 0x1524 },
+	[tng_gp_ssp_2_clk] = { writable, 0x1528 },
+	[tng_gp_ssp_2_fs] = { writable, 0x152C },
+	[tng_gp_ssp_2_rxd] = { writable, 0x1530 },
+	[tng_gp_ssp_2_txd] = { writable, 0x1534 },
+	[tng_gp_ssp_3_clk] = { writable, 0x1900 },
+	[tng_gp_ssp_3_fs] = { writable, 0x1904 },
+	[tng_gp_ssp_3_rxd] = { writable, 0x1908 },
+	[tng_gp_ssp_3_txd] = { writable, 0x190C },
+	[tng_gp_ssp_4_clk] = { writable, 0x1910 },
+	[tng_gp_ssp_4_fs_0] = { writable, 0x1914 },
+	[tng_gp_ssp_4_fs_1] = { writable, 0x1918 },
+	[tng_gp_ssp_4_fs_2] = { writable, 0x191C },
+	[tng_gp_ssp_4_fs_3] = { writable, 0x1920 },
+	[tng_gp_ssp_4_rxd] = { writable, 0x1924 },
+	[tng_gp_ssp_4_txd] = { writable, 0x1928 },
+	[tng_gp_ssp_5_clk] = { writable, 0x192C },
+	[tng_gp_ssp_5_fs_0] = { writable, 0x1930 },
+	[tng_gp_ssp_5_fs_1] = { writable, 0x1934 },
+	[tng_gp_ssp_5_fs_2] = { writable, 0x1938 },
+	[tng_gp_ssp_5_fs_3] = { writable, 0x193C },
+	[tng_gp_ssp_5_rxd] = { writable, 0x1940 },
+	[tng_gp_ssp_5_txd] = { writable, 0x1944 },
+	[tng_gp_ssp_6_clk] = { writable, 0x1948 },
+	[tng_gp_ssp_6_fs] = { writable, 0x194C },
+	[tng_gp_ssp_6_rxd] = { writable, 0x1950 },
+	[tng_gp_ssp_6_txd] = { writable, 0x1954 },
+	[tng_gp_i2c_1_scl] = { writable, 0x1D00 },
+	[tng_gp_i2c_1_sda] = { writable, 0x1D04 },
+	[tng_gp_i2c_2_scl] = { writable, 0x1D08 },
+	[tng_gp_i2c_2_sda] = { writable, 0x1D0C },
+	[tng_gp_i2c_3_scl] = { writable, 0x1D10 },
+	[tng_gp_i2c_3_sda] = { writable, 0x1D14 },
+	[tng_gp_i2c_4_scl] = { writable, 0x1D18 },
+	[tng_gp_i2c_4_sda] = { writable, 0x1D1C },
+	[tng_gp_i2c_5_scl] = { writable, 0x1D20 },
+	[tng_gp_i2c_5_sda] = { writable, 0x1D24 },
+	[tng_gp_i2c_6_scl] = { writable, 0x1D28 },
+	[tng_gp_i2c_6_sda] = { writable, 0x1D2C },
+	[tng_gp_i2c_7_scl] = { writable, 0x1D30 },
+	[tng_gp_i2c_7_sda] = { writable, 0x1D34 },
+	[tng_gp_uart_0_cts] = { writable, 0x2100 },
+	[tng_gp_uart_0_rts] = { writable, 0x2104 },
+	[tng_gp_uart_0_rx] = { writable, 0x2108 },
+	[tng_gp_uart_0_tx] = { writable, 0x210C },
+	[tng_gp_uart_1_cts] = { writable, 0x2110 },
+	[tng_gp_uart_1_rts] = { writable, 0x2114 },
+	[tng_gp_uart_1_rx] = { writable, 0x2118 },
+	[tng_gp_uart_1_tx] = { writable, 0x211C },
+	[tng_gp_uart_2_cts] = { writable, 0x2120 },
+	[tng_gp_uart_2_rts] = { writable, 0x2124 },
+	[tng_gp_uart_2_rx] = { writable, 0x2128 },
+	[tng_gp_uart_2_tx] = { writable, 0x212C },
+	[tng_gp_13] = { writable, 0x2500 },
+	[tng_gp_14] = { writable, 0x2504 },
+	[tng_gp_15] = { writable, 0x2508 },
+	[tng_gp_16] = { writable, 0x250C },
+	[tng_gp_17] = { writable, 0x2510 },
+	[tng_gp_18] = { writable, 0x2514 },
+	[tng_gp_19] = { writable, 0x2518 },
+	[tng_gp_20] = { writable, 0x251C },
+	[tng_gp_21] = { writable, 0x2520 },
+	[tng_gp_22] = { writable, 0x2524 },
+	[tng_gp_23] = { writable, 0x2528 },
+	[tng_gp_24] = { writable, 0x252C },
+	[tng_gp_25] = { writable, 0x2530 },
+	[tng_gp_fast_int_0] = { writable, 0x2534 },
+	[tng_gp_fast_int_1] = { writable, 0x2538 },
+	[tng_gp_fast_int_2] = { writable, 0x253C },
+	[tng_gp_fast_int_3] = { writable, 0x2540 },
+	[tng_gp_pwm_0] = { writable, 0x2544 },
+	[tng_gp_pwm_1] = { writable, 0x2548 },
+	[tng_gp_camerasb_0] = { writable, 0x2900 },
+	[tng_gp_camerasb_1] = { writable, 0x2904 },
+	[tng_gp_camerasb_2] = { writable, 0x2908 },
+	[tng_gp_camerasb_3] = { writable, 0x290C },
+	[tng_gp_camerasb_4] = { writable, 0x2910 },
+	[tng_gp_camerasb_5] = { writable, 0x2914 },
+	[tng_gp_camerasb_6] = { writable, 0x2918 },
+	[tng_gp_camerasb_7] = { writable, 0x291C },
+	[tng_gp_camerasb_8] = { writable, 0x2920 },
+	[tng_gp_camerasb_9] = { writable, 0x2924 },
+	[tng_gp_camerasb_10] = { writable, 0x2928 },
+	[tng_gp_camerasb_11] = { writable, 0x292C },
+	[tng_gp_clkph_0] = { writable, 0x2D00 },
+	[tng_gp_clkph_1] = { writable, 0x2D04 },
+	[tng_gp_clkph_2] = { writable, 0x2D08 },
+	[tng_gp_clkph_3] = { writable, 0x2D0C },
+	[tng_gp_clkph_4] = { writable, 0x2D10 },
+	[tng_gp_clkph_5] = { writable, 0x2D14 },
+	[tng_gp_hdmi_hpd] = { writable, 0x2D18 },
+	[tng_gp_intd_dsi_te1] = { writable, 0x2D1C },
+	[tng_gp_intd_dsi_te2] = { writable, 0x2D20 },
+	[tng_osc_clk_ctrl_0] = { writable, 0x2D24 },
+	[tng_osc_clk_ctrl_1] = { writable, 0x2D28 },
+	[tng_osc_clk_out_0] = { writable, 0x2D2C },
+	[tng_osc_clk_out_1] = { writable, 0x2D30 },
+	[tng_osc_clk_out_2] = { writable, 0x2D34 },
+	[tng_osc_clk_out_3] = { writable, 0x2D38 },
+	[tng_osc_clk_out_4] = { writable, 0x2D3C },
+	[tng_resetout_b] = { writable, 0x2D40 },
+	[tng_xxpmode] = { writable, 0x2D44 },
+	[tng_xxprdy] = { writable, 0x2D48 },
+	[tng_xxpreq_b] = { writable, 0x2D4C },
+	[tng_gp_26] = { writable, 0x2D50 },
+	[tng_gp_27] = { writable, 0x2D54 },
+	[tng_i2c_0_scl] = { writable, 0x3100 },
+	[tng_i2c_0_sda] = { writable, 0x3104 },
+	[tng_ierr_b] = { writable, 0x3108 },
+	[tng_jtag_tckc] = { writable, 0x310C },
+	[tng_jtag_tdic] = { writable, 0x3110 },
+	[tng_jtag_tdoc] = { writable, 0x3114 },
+	[tng_jtag_tmsc] = { writable, 0x3118 },
+	[tng_jtag_trst_b] = { writable, 0x311C },
+	[tng_prochot_b] = { writable, 0x3120 },
+	[tng_rtc_clk] = { writable, 0x3124 },
+	[tng_svid_vclk] = { writable, 0x3128 },
+	[tng_svid_vdio] = { writable, 0x3130 },
+	[tng_thermtrip_b] = { writable, 0x3134 },
+	[tng_standby] = { writable, 0x3138 },
+	[tng_gp_kbd_dkin_0] = { writable, 0x3500 },
+	[tng_gp_kbd_dkin_1] = { writable, 0x3504 },
+	[tng_gp_kbd_dkin_2] = { writable, 0x3508 },
+	[tng_gp_kbd_dkin_3] = { writable, 0x350C },
+	[tng_gp_kbd_mkin_0] = { writable, 0x3510 },
+	[tng_gp_kbd_mkin_1] = { writable, 0x3514 },
+	[tng_gp_kbd_mkin_2] = { writable, 0x3518 },
+	[tng_gp_kbd_mkin_3] = { writable, 0x351C },
+	[tng_gp_kbd_mkin_4] = { writable, 0x3520 },
+	[tng_gp_kbd_mkin_5] = { writable, 0x3524 },
+	[tng_gp_kbd_mkin_6] = { writable, 0x3528 },
+	[tng_gp_kbd_mkin_7] = { writable, 0x352C },
+	[tng_gp_kbd_mkout_0] = { writable, 0x3530 },
+	[tng_gp_kbd_mkout_1] = { writable, 0x3534 },
+	[tng_gp_kbd_mkout_2] = { writable, 0x3538 },
+	[tng_gp_kbd_mkout_3] = { writable, 0x353C },
+	[tng_gp_kbd_mkout_4] = { writable, 0x3540 },
+	[tng_gp_kbd_mkout_5] = { writable, 0x3544 },
+	[tng_gp_kbd_mkout_6] = { writable, 0x3548 },
+	[tng_gp_kbd_mkout_7] = { writable, 0x354C },
+	[tng_gp_0] = { writable, 0x3900 },
+	[tng_gp_1] = { writable, 0x3904 },
+	[tng_gp_2] = { writable, 0x3908 },
+	[tng_gp_3] = { writable, 0x390C },
+	[tng_gp_4] = { writable, 0x3910 },
+	[tng_gp_5] = { writable, 0x3914 },
+	[tng_gp_6] = { writable, 0x3918 },
+	[tng_gp_7] = { writable, 0x391C },
+	[tng_gp_8] = { writable, 0x3920 },
+	[tng_gp_9] = { writable, 0x3924 },
+	[tng_gp_10] = { writable, 0x3928 },
+	[tng_gp_11] = { writable, 0x392C },
+	[tng_gp_12] = { writable, 0x3930 },
+	[tng_gp_mpti_clk] = { writable, 0x3D00 },
+	[tng_gp_mpti_data_0] = { writable, 0x3D04 },
+	[tng_gp_mpti_data_1] = { writable, 0x3D08 },
+	[tng_gp_mpti_data_2] = { writable, 0x3D0C },
+	[tng_gp_mpti_data_3] = { writable, 0x3D10 },
+};
+
+static int __init intel_scu_flis_init(void)
+{
+	int ret;
+	struct platform_device *pdev = NULL;
+	static struct intel_scu_flis_platform_data flis_pdata;
+
+	flis_pdata.pin_t = NULL;
+	flis_pdata.pin_num = TNG_PIN_NUM;
+	flis_pdata.flis_base = 0xFF0C0000;
+	flis_pdata.flis_len = 0x8000;
+	flis_pdata.mmio_flis_t = tng_pin_mmio_flis_table;
+
+	pdev = platform_device_alloc(FLIS_DEVICE_NAME, -1);
+	if (!pdev) {
+		pr_err("out of memory for platform dev %s\n", FLIS_DEVICE_NAME);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	pdev->dev.platform_data = &flis_pdata;
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add flis platform device\n");
+		platform_device_put(pdev);
+		goto out;
+	}
+
+	pr_info("intel_scu_flis platform device created\n");
+out:
+	return ret;
+}
+fs_initcall(intel_scu_flis_init);
+
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.h b/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.h
new file mode 100644
index 0000000..cf48ae5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_scu_flis.h
@@ -0,0 +1,17 @@
+/*
+ * platform_scu_flis.h: scu_flis platform data header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SCU_FLIS_H_
+#define _PLATFORM_SCU_FLIS_H_
+
+#define FLIS_DEVICE_NAME "intel_scu_flis"
+
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_sdio_regulator.c b/arch/x86/platform/intel-mid/device_libs/platform_sdio_regulator.c
new file mode 100644
index 0000000..01953a6
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_sdio_regulator.c
@@ -0,0 +1,235 @@
+/*
+ * platform_sdio_regulator.c: sdio regulator platform device initilization file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author: chuanxiao.dong@intel.com, feiyix.ning@intel.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/intel-mid.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+#define DELAY_ONOFF 250
+
+struct acpi_ids { char *hid; char *uid; };
+
+static struct acpi_ids intel_sdio_ids[] = {
+	{"INT33BB", "2"}, /* BYT SDIO */
+	{ },
+};
+
+static struct acpi_ids intel_brc_ids[] = {
+	{"BCM4321", NULL}, /* BYT SDIO */
+	{ },
+};
+
+static struct regulator_consumer_supply wlan_vmmc_supply = {
+	.supply = "vmmc",
+};
+
+static struct regulator_init_data wlan_vmmc_data = {
+	.constraints = {
+		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies  = 1,
+	.consumer_supplies = &wlan_vmmc_supply,
+};
+
+static struct fixed_voltage_config vwlan = {
+	.supply_name            = "wlan_en_acpi",
+	.microvolts             = 1800000,
+	.gpio                   = -EINVAL,
+	.startup_delay          = 1000 * DELAY_ONOFF,
+	.enable_high            = 1,
+	.enabled_at_boot        = 0,
+	.init_data              = &wlan_vmmc_data,
+};
+
+static void vwlan_device_release(struct device *dev) {}
+
+static struct platform_device vwlan_device = {
+	.name   = "reg-fixed-voltage",
+	.id             = PLATFORM_DEVID_AUTO,
+	.dev = {
+		.platform_data  = &vwlan,
+		.release = vwlan_device_release,
+	},
+};
+
+static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
+{
+	struct acpi_device *device = NULL;
+	acpi_status status;
+	int result;
+	struct acpi_device *acpi_root;
+
+	result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
+	if (result)
+		return NULL;
+
+	/*
+	 * Fixed hardware devices do not appear in the namespace and do not
+	 * have handles, but we fabricate acpi_devices for them, so we have
+	 * to deal with them specially.
+	 */
+	if (!handle)
+		return acpi_root;
+
+	do {
+		status = acpi_get_parent(handle, &handle);
+		if (ACPI_FAILURE(status))
+			return status == AE_NULL_ENTRY ? NULL : acpi_root;
+	} while (acpi_bus_get_device(handle, &device));
+
+	return device;
+}
+
+static int sdio_acpi_match(struct device *dev, void *data)
+{
+	struct acpi_ids *ids = data;
+	struct acpi_handle *handle = ACPI_HANDLE(dev);
+	struct acpi_device_info *info;
+	char *uid = NULL;
+	acpi_status status;
+
+	status = acpi_get_object_info(handle, &info);
+	if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID))
+		uid = info->unique_id.string;
+	else
+		return false;
+
+	if (!strncmp(ids->hid, dev_name(dev), strlen(ids->hid)))
+		if (!strcmp(ids->uid, uid))
+			return true;
+
+	return false;
+}
+
+static int brc_acpi_match(struct device *dev, void *data)
+{
+	struct acpi_ids *ids = data;
+
+	if (!strncmp(ids->hid, dev_name(dev), strlen(ids->hid)))
+			return true;
+
+	return false;
+}
+
+
+static int brc_fixed_regulator_register_by_acpi(struct platform_device *pdev)
+{
+	struct device *dev;
+	struct acpi_ids *brc_ids;
+	struct fixed_voltage_config *fixedcfg = NULL;
+	struct regulator_init_data *data = NULL;
+	struct acpi_handle *handle;
+	struct acpi_device *parent;
+
+	if (!pdev)
+		return -ENODEV;
+	fixedcfg = pdev->dev.platform_data;
+	if (!fixedcfg)
+		return -ENODEV;
+	data = fixedcfg->init_data;
+	if (!data || !data->consumer_supplies)
+		return -ENODEV;
+
+	/* get the GPIO pin from ACPI device first */
+	for (brc_ids = intel_brc_ids; brc_ids->hid; brc_ids++) {
+		dev = bus_find_device(&platform_bus_type, NULL,
+				brc_ids, brc_acpi_match);
+		if (dev) {
+			handle = ACPI_HANDLE(dev);
+			if (!ACPI_HANDLE(dev))
+				continue;
+			parent = acpi_bus_get_parent(handle);
+			if (!parent)
+				continue;
+
+			data->consumer_supplies->dev_name =
+						dev_name(&parent->dev);
+			fixedcfg->gpio = acpi_get_gpio_by_index(dev, 1, NULL);
+			if (fixedcfg->gpio < 0) {
+				dev_info(dev, "No wlan-enable GPIO\n");
+				continue;
+			}
+			dev_info(dev, "wlan-enable GPIO %d found\n",
+					fixedcfg->gpio);
+			break;
+		}
+	}
+
+	if (brc_ids->hid) {
+		/* add a regulator to control wlan enable gpio */
+		return platform_device_register(&vwlan_device);
+	}
+
+	return -ENODEV;
+}
+
+static int sdio_fixed_regulator_register_by_acpi(struct platform_device *pdev)
+{
+	struct device *dev;
+	struct acpi_ids *sdio_ids;
+	struct fixed_voltage_config *fixedcfg = NULL;
+	struct regulator_init_data *data = NULL;
+
+	if (!pdev)
+		return -ENODEV;
+	fixedcfg = pdev->dev.platform_data;
+	if (!fixedcfg)
+		return -ENODEV;
+	data = fixedcfg->init_data;
+	if (!data || !data->consumer_supplies)
+		return -ENODEV;
+
+	/* get the GPIO pin from ACPI device first */
+	for (sdio_ids = intel_sdio_ids; sdio_ids->hid; sdio_ids++) {
+		dev = bus_find_device(&platform_bus_type, NULL,
+				sdio_ids, sdio_acpi_match);
+		if (dev) {
+			data->consumer_supplies->dev_name = dev_name(dev);
+
+			fixedcfg->gpio = acpi_get_gpio_by_index(dev, 0, NULL);
+			if (fixedcfg->gpio < 0) {
+				dev_info(dev, "No wlan-enable GPIO\n");
+				continue;
+			}
+			dev_info(dev, "wlan-enable GPIO %d found\n",
+					fixedcfg->gpio);
+			break;
+		}
+	}
+
+	if (sdio_ids->hid) {
+		/* add a regulator to control wlan enable gpio */
+		return platform_device_register(&vwlan_device);
+	}
+
+	return -ENODEV;
+}
+
+static int __init wifi_regulator_init(void)
+{
+	int ret;
+	/* register fixed regulator through ACPI device */
+	ret = brc_fixed_regulator_register_by_acpi(&vwlan_device);
+	if (!ret)
+		return ret;
+
+	pr_err("%s: No SDIO host in platform devices\n", __func__);
+	return ret;
+}
+rootfs_initcall(wifi_regulator_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.c b/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.c
new file mode 100644
index 0000000..004784a
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.c
@@ -0,0 +1,136 @@
+/*
+ * platform_soc_thermal.c: Platform data for SoC DTS driver
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#define pr_fmt(fmt)  "intel_soc_thermal: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include "platform_soc_thermal.h"
+
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_thermal.h>
+
+#define BYT_SOC_THRM_IRQ	86
+#define BYT_SOC_THRM		"soc_thrm"
+
+static struct resource res = {
+		.flags = IORESOURCE_IRQ,
+};
+
+static struct soc_throttle_data tng_soc_data[] = {
+	{
+		.power_limit = 0x9C,
+		.floor_freq = 0x00,
+	},
+	{
+		.power_limit = 0x8C,
+		.floor_freq = 0x00,
+	},
+	{
+		.power_limit = 0x7C,
+		.floor_freq = 0x00,
+	},
+	{
+		.power_limit = 0x6C,
+		.floor_freq = 0x00,
+	},
+};
+
+static struct soc_throttle_data vlv2_soc_data[] = {
+	{
+		.power_limit = 0xDA, /* 7W */
+		.floor_freq = 0x00,
+	},
+	{
+		.power_limit = 0x6D, /* 3.5W */
+		.floor_freq = 0x01,
+	},
+	{
+		.power_limit = 0x2E, /* 1.5W */
+		.floor_freq = 0x01,
+	},
+	{
+		.power_limit = 0x2E, /* 1.5W */
+		.floor_freq = 0x01,
+	},
+};
+
+void soc_thrm_device_handler(struct sfi_device_table_entry *pentry,
+				struct devs_id *dev)
+{
+	int ret;
+	struct platform_device *pdev;
+
+	pr_info("IPC bus = %d, name = %16.16s, irq = 0x%2x\n",
+		pentry->host_num, pentry->name, pentry->irq);
+
+	res.start = pentry->irq;
+
+	pdev = platform_device_register_simple(pentry->name, -1,
+					(const struct resource *)&res, 1);
+	if (IS_ERR(pdev)) {
+		ret = PTR_ERR(pdev);
+		pr_err("platform_soc_thermal:pdev_register failed: %d\n", ret);
+	}
+
+	pdev->dev.platform_data = &tng_soc_data;
+}
+
+static inline int byt_program_ioapic(int irq, int trigger, int polarity)
+{
+	struct io_apic_irq_attr irq_attr;
+	int ioapic;
+
+	ioapic = mp_find_ioapic(irq);
+	if (ioapic < 0)
+		return -EINVAL;
+	irq_attr.ioapic = ioapic;
+	irq_attr.ioapic_pin = irq;
+	irq_attr.trigger = trigger;
+	irq_attr.polarity = polarity;
+	return io_apic_set_pci_routing(NULL, irq, &irq_attr);
+}
+
+static int __init byt_soc_thermal_init(void)
+{
+	int ret;
+	struct platform_device *pdev;
+
+	res.start = BYT_SOC_THRM_IRQ;
+
+	pdev = platform_device_register_simple(BYT_SOC_THRM, -1,
+					(const struct resource *)&res, 1);
+	if (IS_ERR(pdev)) {
+		ret = PTR_ERR(pdev);
+		pr_err("byt_soc_thermal:pdev_register failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = byt_program_ioapic(BYT_SOC_THRM_IRQ, 0, 1);
+	if (ret) {
+		pr_err("%s: ioapic programming failed", __func__);
+		platform_device_unregister(pdev);
+	}
+
+	pdev->dev.platform_data = &vlv2_soc_data;
+
+	return ret;
+}
+
+static int __init platform_soc_thermal_init(void)
+{
+	return 0;
+}
+device_initcall(platform_soc_thermal_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.h b/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.h
new file mode 100644
index 0000000..c8ece32
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_soc_thermal.h
@@ -0,0 +1,20 @@
+/*
+ * platform_soc_thermal.h: platform SoC thermal driver library header file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SOC_THERMAL_H_
+#define _PLATFORM_SOC_THERMAL_H_
+
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+
+extern void soc_thrm_device_handler(struct sfi_device_table_entry *,
+			struct devs_id *) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
new file mode 100644
index 0000000..40827d6
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
@@ -0,0 +1,73 @@
+/*
+ * platform_spidev.c: spidev platform data initilization file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+#include <asm/intel-mid.h>
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include "platform_spidev.h"
+
+static void tng_ssp_spi_cs_control(u32 command);
+static void tng_ssp_spi_platform_pinmux(void);
+
+static int tng_ssp_spi2_FS_gpio = 111;
+
+static struct intel_mid_ssp_spi_chip chip = {
+	.burst_size = DFLT_FIFO_BURST_SIZE,
+	.timeout = DFLT_TIMEOUT_VAL,
+	/* SPI DMA is currently usable on Tangier */
+	.dma_enabled = true,
+	.cs_control = tng_ssp_spi_cs_control,
+	.platform_pinmux = tng_ssp_spi_platform_pinmux,
+};
+
+static void tng_ssp_spi_cs_control(u32 command)
+{
+	gpio_set_value(tng_ssp_spi2_FS_gpio, (command != 0) ? 1 : 0);
+}
+
+static void tng_ssp_spi_platform_pinmux(void)
+{
+	int err;
+	int saved_muxing;
+	/* Request Chip Select gpios */
+	saved_muxing = gpio_get_alt(tng_ssp_spi2_FS_gpio);
+
+	lnw_gpio_set_alt(tng_ssp_spi2_FS_gpio, LNW_GPIO);
+	err = gpio_request_one(tng_ssp_spi2_FS_gpio,
+			GPIOF_DIR_OUT|GPIOF_INIT_HIGH, "Arduino Shield SS");
+	if (err) {
+		pr_err("%s: unable to get Chip Select GPIO,\
+				fallback to legacy CS mode \n", __func__);
+		lnw_gpio_set_alt(tng_ssp_spi2_FS_gpio, saved_muxing);
+		chip.cs_control = NULL;
+		chip.platform_pinmux = NULL;
+	}
+}
+
+void __init *spidev_platform_data(void *info)
+{
+	struct spi_board_info *spi_info = info;
+
+	if (!spi_info) {
+		pr_err("%s: invalid info pointer\n", __func__);
+		return NULL;
+	}
+
+	spi_info->mode = SPI_MODE_0;
+
+	spi_info->controller_data = &chip;
+	spi_info->bus_num = FORCE_SPI_BUS_NUM;
+
+	return NULL;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.h b/arch/x86/platform/intel-mid/device_libs/platform_spidev.h
new file mode 100644
index 0000000..6ae0db2
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_spidev.h
@@ -0,0 +1,22 @@
+/*
+ * platform_spidev.h: spidev platform data header file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SPIDEV_H_
+#define _PLATFORM_SPIDEV_H_
+
+/* REVERT ME workaround[MRFL] for invalid bus number in IAFW .25 */
+#define FORCE_SPI_BUS_NUM	5
+#ifndef FORCE_CHIP_SELECT
+#define FORCE_CHIP_SELECT	1
+#endif
+
+extern void *spidev_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_sst_audio.c b/arch/x86/platform/intel-mid/device_libs/platform_sst_audio.c
new file mode 100644
index 0000000..7270162
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_sst_audio.c
@@ -0,0 +1,152 @@
+/*
+ * platform_sst_libs.c: SST platform  data initilization file
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/asound.h>
+
+static struct sst_platform_data sst_platform_pdata;
+
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+static struct sst_dev_stream_map mrfld_strm_map[] = {
+	{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /* Reserved, not in use */
+	{MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA1_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_DB,    0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA3_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_LL,    0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_LOW_PCM0_IN, SST_TASK_ID_SBA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_COMPR, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA0_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_VOIP,  0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_VOIP_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE1_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 1, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE2_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 2, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE3_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 3, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE4_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 4, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE5_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 5, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE6_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 6, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE7_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 7, SNDRV_PCM_STREAM_PLAYBACK, PIPE_PROBE8_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PCM1_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_VOIP,  0, SNDRV_PCM_STREAM_CAPTURE, PIPE_VOIP_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE1_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 1, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE2_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 2, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE3_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 3, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE4_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 4, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE5_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 5, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE6_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 6, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE7_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_DPCM_PROBE, 7, SNDRV_PCM_STREAM_CAPTURE, PIPE_PROBE8_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+};
+#else
+static struct sst_dev_stream_map mrfld_strm_map[] = {
+	{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /* Reserved, not in use */
+	{MERR_SALTBAY_AUDIO, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_AUDIO, 1, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_AUDIO, 2, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_COMPR, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA0_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_VOIP, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_VOIP_IN, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_AUDIO, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PCM1_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_VOIP, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_VOIP_OUT, SST_TASK_ID_MEDIA, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_PROBE, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 1, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 2, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 3, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 4, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 5, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 6, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 7, SNDRV_PCM_STREAM_CAPTURE, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 1, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 2, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 3, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 4, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 5, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 6, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_PROBE, 7, SNDRV_PCM_STREAM_PLAYBACK, PIPE_RSVD, SST_TASK_ID_MEDIA, SST_DEV_MAP_FREE},
+	{MERR_SALTBAY_AWARE, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_AWARE_OUT, SST_TASK_ID_AWARE, SST_DEV_MAP_IN_USE},
+	{MERR_SALTBAY_VAD, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_VAD_OUT, SST_TASK_ID_AWARE, SST_DEV_MAP_IN_USE},
+};
+#endif
+
+#define EQ_EFFECT_ALGO_ID 0x99
+static struct sst_dev_effects_map mrfld_effs_map[] = {
+	{
+	  {0xc1, 0x47, 0xa2, 0xf7, 0x7b, 0x1a, 0xe0, 0x11, 0x0d, 0xbb, 0x2a, 0x30, 0xdf, 0xd7, 0x20, 0x45},/* uuid */
+	   EQ_EFFECT_ALGO_ID,										   /* algo id */
+	  {0x00, 0x43, 0xed, 0x0b, 0xd6, 0xdd, 0xdb, 0x11, 0x34, 0x8f, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b, /* descriptor */
+	   0xc1, 0x47, 0xa2, 0xf7, 0x7b, 0x1a, 0xe0, 0x11, 0x0d, 0xbb, 0x2a, 0x30, 0xdf, 0xd7, 0x20, 0x45,
+	   0x12, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x45, 0x71, 0x75, 0x61,
+	   0x6c, 0x69, 0x7a, 0x65, 0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x6e, 0x74, 0x65,
+	   0x6c, 0x20, 0x43, 0x6f, 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+	  },
+	}
+};
+
+static struct sst_dev_effects_resource_map mrfld_effs_res_map[] = {
+	{
+	 {0xc1, 0x47, 0xa2, 0xf7, 0x7b, 0x1a, 0xe0, 0x11, 0x0d, 0xbb, 0x2a, 0x30, 0xdf, 0xd7, 0x20, 0x45}, /* uuid */
+	  0x50, /* Flags */
+	  0x00, /* Cpu load */
+	  0x01, /* Memory Usage */
+	 }
+};
+
+static void set_mrfld_platform_config(void)
+{
+	sst_platform_pdata.pdev_strm_map = mrfld_strm_map;
+	sst_platform_pdata.strm_map_size = ARRAY_SIZE(mrfld_strm_map);
+	sst_platform_pdata.pdev_effs.effs_map = mrfld_effs_map;
+	sst_platform_pdata.pdev_effs.effs_res_map = mrfld_effs_res_map;
+	sst_platform_pdata.pdev_effs.effs_num_map = ARRAY_SIZE(mrfld_effs_map);
+}
+
+static void  populate_platform_data(void)
+{
+	set_mrfld_platform_config();
+}
+
+int add_sst_platform_device(void)
+{
+	struct platform_device *pdev = NULL;
+	int ret;
+
+	populate_platform_data();
+
+	pdev = platform_device_alloc("sst-platform", -1);
+	if (!pdev) {
+		pr_err("failed to allocate audio platform device\n");
+		return -EINVAL;
+	}
+
+	ret = platform_device_add_data(pdev, &sst_platform_pdata,
+					sizeof(sst_platform_pdata));
+	if (ret) {
+		pr_err("failed to add sst platform data\n");
+		platform_device_put(pdev);
+		return  -EINVAL;
+	}
+	ret = platform_device_add(pdev);
+	if (ret) {
+		pr_err("failed to add audio platform device\n");
+		platform_device_put(pdev);
+		return  -EINVAL;
+	}
+	return ret;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
new file mode 100644
index 0000000..127091d
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.c
@@ -0,0 +1,26 @@
+/*
+ * platform_tc35876x.c: tc35876x platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c/tc35876x.h>
+#include <asm/intel-mid.h>
+#include "platform_tc35876x.h"
+
+/*tc35876x DSI_LVDS bridge chip and panel platform data*/
+void *tc35876x_platform_data(void *data)
+{
+	static struct tc35876x_platform_data pdata;
+	pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
+	pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
+	pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
+	return &pdata;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.h b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.h
new file mode 100644
index 0000000..56a74eb
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tc35876x.h
@@ -0,0 +1,16 @@
+/*
+ * platform_tc35876x.h: tc35876x platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_TC35876X_H_
+#define _PLATFORM_TC35876X_H_
+
+extern void *tc35876x_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
new file mode 100644
index 0000000..98a6cd1
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
@@ -0,0 +1,45 @@
+/*
+ * platform_tca6416.c: tca6416 platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/i2c/pca953x.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <asm/intel-mid.h>
+#include "platform_tca6416.h"
+
+void *tca6416_platform_data(void *info)
+{
+	static struct pca953x_platform_data tca6416;
+	struct i2c_board_info *i2c_info = info;
+	int gpio_base, intr;
+	char base_pin_name[SFI_NAME_LEN + 1];
+	char intr_pin_name[SFI_NAME_LEN + 1];
+
+	strcpy(i2c_info->type, TCA6416_NAME);
+	strcpy(base_pin_name, TCA6416_BASE);
+	strcpy(intr_pin_name, TCA6416_INTR);
+
+	gpio_base = get_gpio_by_name(base_pin_name);
+	intr = get_gpio_by_name(intr_pin_name);
+
+	if (gpio_base == -1)
+		return NULL;
+	tca6416.gpio_base = gpio_base;
+	if (intr != -1) {
+		i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+		tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+	} else {
+		i2c_info->irq = -1;
+		tca6416.irq_base = -1;
+	}
+	return &tca6416;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.h b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.h
new file mode 100644
index 0000000..69802d6
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.h
@@ -0,0 +1,20 @@
+/*
+ * platform_tca6416.h: tca6416 platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_TCA6416_H_
+#define _PLATFORM_TCA6416_H_
+
+#define TCA6416_NAME	"tca6416"
+#define TCA6416_BASE	"tca6416_base"
+#define TCA6416_INTR	"tca6416_int"
+
+extern void *tca6416_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
new file mode 100644
index 0000000..973cf3b
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
@@ -0,0 +1,72 @@
+/*
+ * platform_wdt.c: Watchdog platform library file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: David Cohen <david.a.cohen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/intel-mid_wdt.h>
+#include <asm/intel-mid.h>
+#include <asm/io_apic.h>
+
+#define TANGIER_EXT_TIMER0_MSI 15
+
+static struct platform_device wdt_dev = {
+	.name = "intel_mid_wdt",
+	.id = -1,
+};
+
+static int tangier_probe(struct platform_device *pdev)
+{
+	int ioapic;
+	int irq;
+	struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+	struct io_apic_irq_attr irq_attr = { 0 };
+
+	if (!pdata)
+		return -EINVAL;
+
+	irq = pdata->irq;
+	ioapic = mp_find_ioapic(irq);
+	if (ioapic >= 0) {
+		int ret;
+		irq_attr.ioapic = ioapic;
+		irq_attr.ioapic_pin = irq;
+		irq_attr.trigger = 1;
+		/* irq_attr.polarity = 0; -> Active high */
+		ret = io_apic_set_pci_routing(NULL, irq, &irq_attr);
+		if (ret)
+			return ret;
+	} else {
+		dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n",
+			 irq);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct intel_mid_wdt_pdata tangier_pdata = {
+	.irq = TANGIER_EXT_TIMER0_MSI,
+	.probe = tangier_probe,
+};
+
+static int __init register_mid_wdt(void)
+{
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		wdt_dev.dev.platform_data = &tangier_pdata;
+		return platform_device_register(&wdt_dev);
+	}
+
+	return -ENODEV;
+}
+
+rootfs_initcall(register_mid_wdt);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wifi.c b/arch/x86/platform/intel-mid/device_libs/platform_wifi.c
new file mode 100644
index 0000000..ad0b6b4
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wifi.c
@@ -0,0 +1,128 @@
+/*
+ * platform_bcm43xx.c: bcm43xx platform data initilization file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <asm/intel-mid.h>
+#include <linux/wlan_plat.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/sdhci.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include "pci/platform_sdhci_pci.h"
+#include "platform_wifi.h"
+
+static struct resource wifi_res[] = {
+	{
+	.name = "wlan_irq",
+	.start = -1,
+	.end = -1,
+	.flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING ,
+	},
+};
+
+static struct wifi_platform_data pdata;
+
+static struct platform_device wifi_device = {
+	.name = "wlan",
+	.dev = {
+		.platform_data = &pdata,
+		},
+	.num_resources = ARRAY_SIZE(wifi_res),
+	.resource = wifi_res,
+};
+
+static const unsigned int sdhci_quirk = SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8
+		| SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY
+		| SDHCI_QUIRK2_ADVERTISE_3V0_FORCE_1V8
+		| SDHCI_QUIRK2_NON_STD_CIS;
+
+static void __init wifi_platform_data_init_sfi_fastirq(struct sfi_device_table_entry *pentry,
+						       bool should_register)
+{
+	int wifi_irq_gpio = -1;
+
+	/* If the GPIO mode was previously called, this code overloads
+	   the IRQ anyway */
+	wifi_res[0].start = wifi_res[0].end = pentry->irq;
+	wifi_res[0].flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH;
+
+	pr_info("wifi_platform_data: IRQ == %d\n", pentry->irq);
+
+	if (should_register && platform_device_register(&wifi_device) < 0)
+		pr_err("platform_device_register failed for wifi_device\n");
+}
+
+/* Called if SFI device WLAN is present */
+void __init wifi_platform_data_fastirq(struct sfi_device_table_entry *pe,
+				       struct devs_id *dev)
+{
+	/* This is used in the driver to know if it is GPIO/FastIRQ */
+	pdata.use_fast_irq = true;
+
+	if (wifi_res[0].start == -1) {
+		pr_info("Using WiFi platform data (Fast IRQ)\n");
+
+		/* Set vendor specific SDIO quirks */
+		sdhci_pdata_set_quirks(sdhci_quirk);
+		wifi_platform_data_init_sfi_fastirq(pe, true);
+	} else {
+		pr_info("Using WiFi platform data (Fast IRQ, overloading GPIO mode set previously)\n");
+		/* We do not register platform device, as it's already been
+		   done by wifi_platform_data */
+		wifi_platform_data_init_sfi_fastirq(pe, false);
+	}
+
+}
+
+/* GPIO legacy code path */
+static void __init wifi_platform_data_init_sfi_gpio(void)
+{
+	int wifi_irq_gpio = -1;
+
+	/*Get GPIO numbers from the SFI table*/
+	wifi_irq_gpio = get_gpio_by_name(WIFI_SFI_GPIO_IRQ_NAME);
+	if (wifi_irq_gpio < 0) {
+		pr_err("%s: Unable to find " WIFI_SFI_GPIO_IRQ_NAME
+		       " WLAN-interrupt GPIO in the SFI table\n",
+		       __func__);
+		return;
+	}
+
+	wifi_res[0].start = wifi_res[0].end = wifi_irq_gpio;
+	pr_info("wifi_platform_data: GPIO == %d\n", wifi_irq_gpio);
+
+	if (platform_device_register(&wifi_device) < 0)
+		pr_err("platform_device_register failed for wifi_device\n");
+}
+
+/* Called from board.c */
+void __init *wifi_platform_data(void *info)
+{
+	/* When fast IRQ platform data has been called first, don't pursue */
+	if (wifi_res[0].start != -1)
+		return NULL;
+
+	pr_info("Using generic wifi platform data\n");
+
+	/* Set vendor specific SDIO quirks */
+#ifdef CONFIG_MMC_SDHCI_PCI
+	sdhci_pdata_set_quirks(sdhci_quirk);
+#endif
+
+#ifndef CONFIG_ACPI
+	/* We are SFI here, register platform device */
+	wifi_platform_data_init_sfi_gpio();
+#endif
+
+	return &wifi_device;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wifi.h b/arch/x86/platform/intel-mid/device_libs/platform_wifi.h
new file mode 100644
index 0000000..7f1f710
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wifi.h
@@ -0,0 +1,21 @@
+/*
+ * platform_wifi.h: WiFi platform data header file
+ *
+ * (C) Copyright 2011 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_WIFI_H_
+#define _PLATFORM_WIFI_H_
+
+#define WIFI_SFI_GPIO_IRQ_NAME "WLAN-interrupt"
+#define WIFI_SFI_GPIO_ENABLE_NAME "WLAN-enable"
+
+extern void __init *wifi_platform_data(void *info) __attribute__((weak));
+extern void wifi_platform_data_fastirq(struct sfi_device_table_entry *pe,
+				       struct devs_id *dev) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wl12xx.c b/arch/x86/platform/intel-mid/device_libs/platform_wl12xx.c
new file mode 100644
index 0000000..53b8544
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wl12xx.c
@@ -0,0 +1,182 @@
+/*
+ * platform_wl12xx.c: wl12xx platform data initilization file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <linux/wl12xx.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <asm/intel-mid.h>
+#include "platform_wl12xx.h"
+
+static int wl12xx_platform_init(struct wl12xx_platform_data *platform_data);
+static void wl12xx_platform_deinit(struct wl12xx_platform_data *platform_data);
+
+static struct wl12xx_platform_data mid_wifi_control = {
+	.board_ref_clock = 1,
+	.irq = 2,
+	.gpio = -EINVAL,
+	.board_tcxo_clock = 1,
+	.platform_quirks = WL12XX_PLATFORM_QUIRK_EDGE_IRQ,
+	.hw_init = wl12xx_platform_init,
+	.hw_deinit = wl12xx_platform_deinit,
+};
+
+static struct regulator_consumer_supply wl12xx_vmmc3_supply = {
+	.supply		= "vmmc",
+	.dev_name	= "0000:00:00.0", /*default value*/
+};
+
+static struct regulator_init_data wl12xx_vmmc3 = {
+	.constraints = {
+		.valid_ops_mask	= REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies = &wl12xx_vmmc3_supply,
+};
+
+static struct fixed_voltage_config wl12xx_vwlan = {
+	.supply_name		= "vwl1271",
+	.microvolts		= 1800000,
+	.gpio			= 75,
+	.startup_delay		= 70000,
+	.enable_high		= 1,
+	.enabled_at_boot	= 0,
+	.init_data		= &wl12xx_vmmc3,
+};
+
+static struct platform_device wl12xx_vwlan_device = {
+	.name		= "reg-fixed-voltage",
+	.id		= 1,
+	.dev = {
+		.platform_data	= &wl12xx_vwlan,
+	},
+};
+
+void __init wl12xx_platform_data_init(void *info)
+{
+	struct sd_board_info *sd_info = info;
+	int err;
+
+	/*Get GPIO numbers from the SFI table*/
+	mid_wifi_control.gpio = get_gpio_by_name(WL12XX_SFI_GPIO_IRQ_NAME);
+	if (mid_wifi_control.gpio == -1) {
+		pr_err("%s: Unable to find WLAN-interrupt GPIO in the SFI table\n",
+				__func__);
+		return;
+	}
+
+	/* Set our board_ref_clock from SFI SD board info */
+	if (sd_info->board_ref_clock == ICDK_BOARD_REF_CLK)
+		/*iCDK board*/
+		/*26Mhz TCXO clock ref*/
+		mid_wifi_control.board_ref_clock = 1;
+	else if (sd_info->board_ref_clock == NCDK_BOARD_REF_CLK)
+		/*nCDK board*/
+		/*38,4Mhz TCXO clock ref*/
+		mid_wifi_control.board_ref_clock = 2;
+	err = wl12xx_set_platform_data(&mid_wifi_control);
+	if (err < 0)
+		pr_err("error setting wl12xx data\n");
+
+	/* this is the fake regulator that mmc stack use to power of the
+	   wifi sdio card via runtime_pm apis */
+	wl12xx_vwlan.gpio = get_gpio_by_name(WL12XX_SFI_GPIO_ENABLE_NAME);
+	if (wl12xx_vwlan.gpio == -1) {
+		pr_err("%s: Unable to find WLAN-enable GPIO in the SFI table\n",
+		       __func__);
+		return;
+	}
+	/* format vmmc reg address from sfi table */
+	sprintf((char *)wl12xx_vmmc3_supply.dev_name, "0000:00:%02x.%01x",
+		(sd_info->addr)>>8, sd_info->addr&0xFF);
+
+	err = platform_device_register(&wl12xx_vwlan_device);
+	if (err < 0)
+		pr_err("error platform_device_register\n");
+
+}
+
+void __init *wl12xx_platform_data(void *info)
+{
+	wl12xx_platform_data_init(info);
+
+	return &mid_wifi_control;
+}
+
+static int wl12xx_platform_init(struct wl12xx_platform_data *platform_data)
+{
+	int err = 0;
+
+	if (IS_ERR(platform_data)) {
+		err = PTR_ERR(platform_data);
+		pr_err("%s: missing wlan platform data: %d\n", __func__, err);
+		goto out;
+	}
+
+	/* gpio must be set to -EINVAL by platform code if
+	   gpio based irq is not used*/
+
+	if (gpio_is_valid(platform_data->gpio)) {
+		if (!platform_data->gpio)
+			pr_warn("using GPIO %d for wl12xx\n",
+						platform_data->gpio);
+
+		/* Request gpio */
+		err = gpio_request(platform_data->gpio, "wl12xx");
+		if (err < 0) {
+			pr_err("%s: Unable to request GPIO:%d, err:%d\n",
+					__func__, platform_data->gpio, err);
+			goto out;
+		}
+
+		/* set gpio direction */
+		err = gpio_direction_input(platform_data->gpio);
+		if (err < 0) {
+			pr_err("%s: Unable to set GPIO:%d direction, err:%d\n",
+			 __func__, platform_data->gpio, err);
+			goto out;
+		}
+
+		/* convert gpio to irq */
+		platform_data->irq = gpio_to_irq(platform_data->gpio);
+		if (platform_data->irq < 0) {
+			pr_err("%s: Error gpio_to_irq:%d->%d\n", __func__,
+					platform_data->gpio,
+					platform_data->irq);
+			goto out;
+		}
+	}
+
+	sdhci_pci_request_regulators();
+
+	pr_info("%s done\n", __func__);
+out:
+	return err;
+}
+
+static void wl12xx_platform_deinit(struct wl12xx_platform_data *pdata)
+{
+	/* get platform data and free the gpio */
+	if (IS_ERR(pdata)) {
+		pr_err("%s: missing wlan platform data\n", __func__);
+		goto out;
+	}
+
+	if (gpio_is_valid(pdata->gpio)) {
+		if (!pdata->gpio)
+			pr_warn("using GPIO %d for wl12xx\n", pdata->gpio);
+		gpio_free(pdata->gpio);
+	}
+out:
+	return ;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wl12xx.h b/arch/x86/platform/intel-mid/device_libs/platform_wl12xx.h
new file mode 100644
index 0000000..909f697
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wl12xx.h
@@ -0,0 +1,21 @@
+/*
+ * platform_wl12xx.h: wl12xx platform data header file
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_WL12XX_H_
+#define _PLATFORM_WL12XX_H_
+
+#define WL12XX_SFI_GPIO_IRQ_NAME "WLAN-interrupt"
+#define WL12XX_SFI_GPIO_ENABLE_NAME "WLAN-enable"
+#define ICDK_BOARD_REF_CLK 26000000
+#define NCDK_BOARD_REF_CLK 38400000
+
+extern void __init *wl12xx_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wm8994.c b/arch/x86/platform/intel-mid/device_libs/platform_wm8994.c
new file mode 100644
index 0000000..51a6352
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wm8994.c
@@ -0,0 +1,243 @@
+/*
+ * platform_wm8994.c: wm8994 platform data initilization file
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/lnw_gpio.h>
+#include <asm/intel-mid.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include "platform_wm8994.h"
+
+/***********WM89941 REGUATOR platform data*************/
+static struct regulator_consumer_supply vwm89941_consumer[] = {
+	REGULATOR_SUPPLY("DBVDD", "1-001a"),
+	REGULATOR_SUPPLY("DBVDD1", "1-001a"),
+	REGULATOR_SUPPLY("DBVDD2", "1-001a"),
+	REGULATOR_SUPPLY("DBVDD3", "1-001a"),
+	REGULATOR_SUPPLY("AVDD2", "1-001a"),
+	REGULATOR_SUPPLY("CPVDD", "1-001a"),
+};
+
+static struct regulator_init_data vwm89941_data = {
+		.constraints = {
+			.always_on = 1,
+		},
+		.num_consumer_supplies	=	ARRAY_SIZE(vwm89941_consumer),
+		.consumer_supplies	=	vwm89941_consumer,
+};
+
+static struct fixed_voltage_config vwm89941_config = {
+	.supply_name	= "VCC_1.8V_PDA",
+	.microvolts	= 1800000,
+	.gpio		= -EINVAL,
+	.init_data	= &vwm89941_data,
+};
+
+static struct platform_device vwm89941_device = {
+	.name = "reg-fixed-voltage",
+	.id = 0,
+	.dev = {
+		.platform_data = &vwm89941_config,
+	},
+};
+
+/***********WM89942 REGUATOR platform data*************/
+static struct regulator_consumer_supply vwm89942_consumer[] = {
+	REGULATOR_SUPPLY("SPKVDD1", "1-001a"),
+	REGULATOR_SUPPLY("SPKVDD2", "1-001a"),
+};
+
+static struct regulator_init_data vwm89942_data = {
+		.constraints = {
+			.always_on = 1,
+		},
+		.num_consumer_supplies	=	ARRAY_SIZE(vwm89942_consumer),
+		.consumer_supplies	=	vwm89942_consumer,
+};
+
+static struct fixed_voltage_config vwm89942_config = {
+	.supply_name	= "V_BAT",
+	.microvolts	= 3700000,
+	.gpio		= -EINVAL,
+	.init_data  = &vwm89942_data,
+};
+
+static struct platform_device vwm89942_device = {
+	.name = "reg-fixed-voltage",
+	.id = 1,
+	.dev = {
+		.platform_data = &vwm89942_config,
+	},
+};
+
+static struct platform_device wm8994_ldo1_device;
+static struct platform_device wm8994_ldo2_device;
+static struct platform_device *wm1811a_reg_devices[] __initdata = {
+	&vwm89941_device,
+	&vwm89942_device,
+	&wm8994_ldo1_device,
+	&wm8994_ldo2_device
+};
+
+static struct platform_device *wm8958_reg_devices[] __initdata = {
+	&vwm89941_device,
+	&vwm89942_device
+};
+
+static struct regulator_consumer_supply wm8994_avdd1_supply =
+	REGULATOR_SUPPLY("AVDD1", "1-001a");
+
+static struct regulator_consumer_supply wm8994_dcvdd_supply =
+	REGULATOR_SUPPLY("DCVDD", "1-001a");
+
+static struct regulator_init_data wm8994_ldo1_data = {
+	.constraints	= {
+		.always_on	= 1,
+		.name		= "AVDD1_3.0V",
+		.valid_ops_mask	= REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &wm8994_avdd1_supply,
+};
+
+static struct fixed_voltage_config wm8994_ldo1_config = {
+	.supply_name	= "V_BAT_X",
+	.microvolts	= 3700000,
+	.gpio		= -EINVAL,
+	.init_data  = &wm8994_ldo1_data,
+};
+
+static struct platform_device wm8994_ldo1_device = {
+	.name = "reg-fixed-voltage",
+	.id = 2,
+	.dev = {
+		.platform_data = &wm8994_ldo1_config,
+	},
+};
+
+
+static struct regulator_init_data wm8994_ldo2_data = {
+	.constraints	= {
+		.always_on	= 1,
+		.name		= "DCVDD_1.0V",
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &wm8994_dcvdd_supply,
+};
+
+static struct fixed_voltage_config wm8994_ldo2_config = {
+	.supply_name	= "V_BAT_Y",
+	.microvolts	= 3700000,
+	.gpio		= -EINVAL,
+	.init_data  = &wm8994_ldo2_data,
+};
+
+static struct platform_device wm8994_ldo2_device = {
+	.name = "reg-fixed-voltage",
+	.id = 3,
+	.dev = {
+		.platform_data = &wm8994_ldo2_config,
+	},
+};
+
+static struct  wm8958_custom_config custom_config = {
+	.format = 6,
+	.rate = 48000,
+	.channels = 2,
+};
+
+static struct wm8994_pdata wm8994_pdata = {
+	/* configure gpio1 function: 0x0001(Logic level input/output) */
+	.gpio_defaults[0] = 0x0003,
+	.irq_flags = IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+	/* FIXME: Below are 1811A specfic, we need to use SPID for these */
+
+	/* configure gpio3/4/5/7 function for AIF2 voice */
+	.gpio_defaults[2] = 0x8100,
+	.gpio_defaults[3] = 0x8100,
+	.gpio_defaults[4] = 0x8100,
+	.gpio_defaults[6] = 0x0100,
+	/* configure gpio8/9/10/11 function for AIF3 BT */
+	/* gpio7 is codec intr pin for GV M2 */
+	.gpio_defaults[7] = 0x0003,
+	.gpio_defaults[8] = 0x0105,
+	.gpio_defaults[9] = 0x0100,
+	.gpio_defaults[10] = 0x0100,
+	.ldo[0]	= { 0, &wm8994_ldo1_data }, /* set actual value at wm8994_platform_data() */
+	.ldo[1]	= { 0, &wm8994_ldo2_data },
+	.ldo_ena_always_driven = 1,
+
+	.mic_id_delay = 300, /*300ms delay*/
+	.micdet_delay = 500,
+	.micb_en_delay = 5000, /* Keeps MICBIAS2 high for 5sec during jack insertion/removal */
+
+	.custom_cfg = &custom_config,
+};
+
+static int wm8994_get_irq_data(struct wm8994_pdata *pdata,
+			struct i2c_board_info *i2c_info, char *name)
+{
+	int codec_gpio;
+
+	/* alek tells me that since driver is registering a new chip
+	 * irq we need to give it a base which is unused so put
+	 * 256+192 here */
+	pdata->irq_base = (256 + 192);
+	codec_gpio = get_gpio_by_name(name);
+	if (codec_gpio < 0) {
+		pr_err("%s failed for : %d\n", __func__, codec_gpio);
+		return -EINVAL;
+	}
+	i2c_info->irq = codec_gpio + INTEL_MID_IRQ_OFFSET;
+	return codec_gpio;
+}
+
+void __init *wm8994_platform_data(void *info)
+{
+	struct i2c_board_info *i2c_info = (struct i2c_board_info *)info;
+	int irq = 0;
+
+	platform_add_devices(wm8958_reg_devices,
+		ARRAY_SIZE(wm8958_reg_devices));
+
+	irq = wm8994_get_irq_data(&wm8994_pdata, i2c_info,
+					"audiocodec_int");
+	if (irq < 0)
+		return NULL;
+
+	return &wm8994_pdata;
+}
+
+static struct i2c_board_info wm8958_info = {
+	I2C_BOARD_INFO("wm8958", 0x1a),
+};
+
+void __init *wm8958_platform_data(void *info)
+{
+	int irq = 0, bus_num = 1;
+
+	platform_add_devices(wm8958_reg_devices,
+			ARRAY_SIZE(wm8958_reg_devices));
+
+	irq = wm8994_get_irq_data(&wm8994_pdata, &wm8958_info,
+					"audiocodec_int");
+
+	if (irq)
+		wm8958_info.platform_data = &wm8994_pdata;
+
+	i2c_register_board_info(bus_num, &wm8958_info, 1);
+
+	return;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wm8994.h b/arch/x86/platform/intel-mid/device_libs/platform_wm8994.h
new file mode 100644
index 0000000..ac3b2b6
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wm8994.h
@@ -0,0 +1,6 @@
+#ifndef _PLATFORM_WM8994_H_
+#define _PLATFORM_WM8994_H_
+
+extern void *wm8994_platform_data(void *info) __attribute__((weak));
+extern void *wm8958_platform_data(void *info) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
new file mode 100644
index 0000000..3b361bd
--- /dev/null
+++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
@@ -0,0 +1,637 @@
+/*
+ * early_printk_intel_mid.c - early consoles for Intel MID platforms
+ *
+ * Copyright (c) 2008-2010, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/*
+ * Currently we have 3 types of early printk consoles: PTI, HSU and
+ * MAX3110 SPI-UART.
+ * PTI is available for mdfld, clv and mrfld.
+ * HSU is available for mdfld, clv and mrfld. But it depends on board design.
+ * Some boards don't have HSU UART pins routed to the connector so we can't
+ * use it.
+ * Max3110 SPI-UART is a stand-alone chip with SPI interface located in the
+ * debug card. Drivers can access to this chip via Soc's SPI controller or SSP
+ * controller(working in SPI mode).
+ * Max3110 is available for mrst, mdfld, clv and mrfld. But for mrst, mdfld
+ * and clv, MAX3110 is connected to SPI controller, for mrfld, MAX3110 is
+ * connected to SSP controller.
+ */
+
+#include <linux/serial_reg.h>
+#include <linux/serial_mfd.h>
+#include <linux/kmsg_dump.h>
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/pti.h>
+
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <asm/intel-mid.h>
+
+#define MRST_SPI_TIMEOUT		0x200000
+#define MRST_REGBASE_SPI0		0xff128000
+#define MRST_REGBASE_SPI1		0xff128400
+#define CLV_REGBASE_SPI1		0xff135000
+#define MRST_CLK_SPI0_REG		0xff11d86c
+#define MRFLD_SSP_TIMEOUT		0x200000
+#define MRFLD_REGBASE_SSP5		0xff189000
+
+/* Bit fields in CTRLR0 */
+#define SPI_DFS_OFFSET			0
+
+#define SPI_FRF_OFFSET			4
+#define SPI_FRF_SPI			0x0
+#define SPI_FRF_SSP			0x1
+#define SPI_FRF_MICROWIRE		0x2
+#define SPI_FRF_RESV			0x3
+
+#define SPI_MODE_OFFSET			6
+#define SPI_SCPH_OFFSET			6
+#define SPI_SCOL_OFFSET			7
+#define SPI_TMOD_OFFSET			8
+#define	SPI_TMOD_TR			0x0		/* xmit & recv */
+#define SPI_TMOD_TO			0x1		/* xmit only */
+#define SPI_TMOD_RO			0x2		/* recv only */
+#define SPI_TMOD_EPROMREAD		0x3		/* eeprom read mode */
+
+#define SPI_SLVOE_OFFSET		10
+#define SPI_SRL_OFFSET			11
+#define SPI_CFS_OFFSET			12
+
+/* Bit fields in SR, 7 bits */
+#define SR_MASK				0x7f		/* cover 7 bits */
+#define SR_BUSY				(1 << 0)
+#define SR_TF_NOT_FULL			(1 << 1)
+#define SR_TF_EMPT			(1 << 2)
+#define SR_RF_NOT_EMPT			(1 << 3)
+#define SR_RF_FULL			(1 << 4)
+#define SR_TX_ERR			(1 << 5)
+#define SR_DCOL				(1 << 6)
+
+/* SR bit fields for SSP*/
+#define SSP_SR_TF_NOT_FULL		(1 << 2)
+
+static int ssp_timing_wr; /* Tangier A0 SSP timing workaround */
+
+static unsigned int early_pti_console_channel;
+static unsigned int early_pti_control_channel;
+
+/* SPI controller registers */
+struct dw_spi_reg {
+	u32	ctrl0;
+	u32	ctrl1;
+	u32	ssienr;
+	u32	mwcr;
+	u32	ser;
+	u32	baudr;
+	u32	txfltr;
+	u32	rxfltr;
+	u32	txflr;
+	u32	rxflr;
+	u32	sr;
+	u32	imr;
+	u32	isr;
+	u32	risr;
+	u32	txoicr;
+	u32	rxoicr;
+	u32	rxuicr;
+	u32	msticr;
+	u32	icr;
+	u32	dmacr;
+	u32	dmatdlr;
+	u32	dmardlr;
+	u32	idr;
+	u32	version;
+
+	/* Currently operates as 32 bits, though only the low 16 bits matter */
+	u32	dr;
+} __packed;
+
+/* SSP controler registers */
+struct dw_ssp_reg {
+	u32 ctrl0;
+	u32 ctrl1;
+	u32 sr;
+	u32 ssitr;
+	u32 dr;
+} __packed;
+
+#define dw_readl(dw, name)		__raw_readl(&(dw)->name)
+#define dw_writel(dw, name, val)	__raw_writel((val), &(dw)->name)
+
+/* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */
+static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
+
+static u32 *pclk_spi0;
+/* Always contains an accessible address, start with 0 */
+static struct dw_spi_reg *pspi;
+static struct dw_ssp_reg *pssp;
+
+static struct kmsg_dumper dw_dumper;
+static int dumper_registered;
+
+static void dw_kmsg_dump(struct kmsg_dumper *dumper,
+			 enum kmsg_dump_reason reason)
+{
+	static char line[1024];
+	size_t len;
+
+	/* When run to this, we'd better re-init the HW */
+	mrst_early_console_init();
+
+	while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
+		early_mrst_console.write(&early_mrst_console, line, len);
+}
+
+/* Set the ratio rate to 115200, 8n1, IRQ disabled */
+static void max3110_spi_write_config(void)
+{
+	u16 config;
+
+	config = 0xc001;
+	dw_writel(pspi, dr, config);
+}
+
+/* Translate char to a eligible word and send to max3110 */
+static void max3110_spi_write_data(char c)
+{
+	u16 data;
+
+	data = 0x8000 | c;
+	dw_writel(pspi, dr, data);
+}
+
+/* similar to max3110_spi_write_config, but via SSP controller */
+static void max3110_ssp_write_config(void)
+{
+	u16 config;
+
+	config = 0xc001;
+	dw_writel(pssp, dr, config);
+	dw_readl(pssp, dr);
+	udelay(10);
+	return;
+}
+
+/* similar to max3110_spi_write_data, but via SSP controller */
+static void max3110_ssp_write_data(char c)
+{
+	u16 data;
+
+	data = 0x8000 | c;
+	dw_writel(pssp, dr, data);
+	dw_readl(pssp, dr);
+	udelay(10);
+	return;
+}
+
+void mrst_early_console_init(void)
+{
+	u32 ctrlr0 = 0;
+	u32 spi0_cdiv;
+	u32 freq; /* Freqency info only need be searched once */
+
+	/* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */
+	pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
+							MRST_CLK_SPI0_REG);
+	spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
+	freq = 100000000 / (spi0_cdiv + 1);
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL)
+		mrst_spi_paddr = MRST_REGBASE_SPI1;
+	else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		mrst_spi_paddr = CLV_REGBASE_SPI1;
+
+	pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
+						mrst_spi_paddr);
+
+	/* Disable SPI controller */
+	dw_writel(pspi, ssienr, 0);
+
+	/* Set control param, 8 bits, transmit only mode */
+	ctrlr0 = dw_readl(pspi, ctrl0);
+
+	ctrlr0 &= 0xfcc0;
+	ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
+		      | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
+	dw_writel(pspi, ctrl0, ctrlr0);
+
+	/*
+	 * Change the spi0 clk to comply with 115200 bps, use 100000 to
+	 * calculate the clk dividor to make the clock a little slower
+	 * than real baud rate.
+	 */
+	dw_writel(pspi, baudr, freq/100000);
+
+	/* Disable all INT for early phase */
+	dw_writel(pspi, imr, 0x0);
+
+	/* Set the cs to spi-uart */
+	dw_writel(pspi, ser, 0x2);
+
+	/* Enable the HW, the last step for HW init */
+	dw_writel(pspi, ssienr, 0x1);
+
+	/* Set the default configuration */
+	max3110_spi_write_config();
+
+	/* Register the kmsg dumper */
+	if (!dumper_registered) {
+		dw_dumper.dump = dw_kmsg_dump;
+		kmsg_dump_register(&dw_dumper);
+		dumper_registered = 1;
+	}
+}
+
+/* Slave select should be called in the read/write function */
+static void early_mrst_spi_putc(char c)
+{
+	unsigned int timeout;
+	u32 sr;
+
+	timeout = MRST_SPI_TIMEOUT;
+	/* Early putc needs to make sure the TX FIFO is not full */
+	while (--timeout) {
+		sr = dw_readl(pspi, sr);
+		if (!(sr & SR_TF_NOT_FULL))
+			cpu_relax();
+		else
+			break;
+	}
+
+	if (!timeout)
+		pr_warn("MRST earlycon: timed out\n");
+	else
+		max3110_spi_write_data(c);
+}
+
+/* Early SPI only uses polling mode */
+static void early_mrst_spi_write(struct console *con, const char *str,
+				unsigned n)
+{
+	int i;
+
+	for (i = 0; i < n && *str; i++) {
+		if (*str == '\n')
+			early_mrst_spi_putc('\r');
+		early_mrst_spi_putc(*str);
+		str++;
+	}
+}
+
+struct console early_mrst_console = {
+	.name =		"earlymrst",
+	.write =	early_mrst_spi_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+void mrfld_early_console_init(void)
+{
+	u32 ctrlr0 = 0;
+
+	set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, MRFLD_REGBASE_SSP5);
+
+	pssp = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
+			(MRFLD_REGBASE_SSP5 & (PAGE_SIZE - 1)));
+
+	if (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_NONE)
+		ssp_timing_wr = 1;
+
+	/* mask interrupts, clear enable and set DSS config */
+	/* SSPSCLK on active transfers only */
+	if (ssp_timing_wr) {
+		dw_writel(pssp, ctrl0, 0xc12c0f);
+		dw_writel(pssp, ctrl1, 0x0);
+	} else {
+		dw_writel(pssp, ctrl0, 0xc0000f);
+		dw_writel(pssp, ctrl1, 0x10000000);
+	}
+
+	dw_readl(pssp, sr);
+
+	/* enable port */
+	ctrlr0 = dw_readl(pssp, ctrl0);
+	ctrlr0 |= 0x80;
+	dw_writel(pssp, ctrl0, ctrlr0);
+}
+
+/* slave select should be called in the read/write function */
+static int early_mrfld_putc(char c)
+{
+	unsigned int timeout;
+	u32 sr;
+
+	timeout = MRFLD_SSP_TIMEOUT;
+	/* early putc need make sure the TX FIFO is not full*/
+	while (timeout--) {
+		sr = dw_readl(pssp, sr);
+		if (ssp_timing_wr) {
+			if (sr & 0xF00)
+				cpu_relax();
+			else
+				break;
+		} else {
+			if (!(sr & SSP_SR_TF_NOT_FULL))
+				cpu_relax();
+			else
+				break;
+		}
+	}
+
+	if (timeout == 0xffffffff) {
+		pr_info("SSP: waiting timeout\n");
+		return -1;
+	}
+
+	max3110_ssp_write_data(c);
+	return 0;
+}
+
+static void early_mrfld_write(struct console *con,
+				const char *str, unsigned n)
+{
+	int  i;
+
+	for (i = 0; i < n && *str; i++) {
+		if (*str == '\n')
+			early_mrfld_putc('\r');
+		early_mrfld_putc(*str);
+
+		str++;
+	}
+}
+
+struct console early_mrfld_console = {
+	.name =		"earlymrfld",
+	.write =	early_mrfld_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+void mrfld_early_printk(const char *fmt, ...)
+{
+	char buf[512];
+	int n;
+	va_list ap;
+
+	va_start(ap, fmt);
+	n = vscnprintf(buf, 512, fmt, ap);
+	va_end(ap);
+
+	early_mrfld_console.write(&early_mrfld_console, buf, n);
+}
+
+/*
+ * Following is the early console based on High Speed UART device.
+ */
+#define MERR_HSU_PORT_BASE	0xff010180
+#define MERR_HSU_CLK_CTL	0xff00b830
+#define MFLD_HSU_PORT_BASE	0xffa28080
+
+static void __iomem *phsu;
+
+void hsu_early_console_init(const char *s)
+{
+	unsigned long paddr, port = 0;
+	u8 lcr;
+	int *clkctl;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		paddr = MERR_HSU_PORT_BASE;
+		clkctl = (int *)set_fixmap_offset_nocache(FIX_CLOCK_CTL,
+							  MERR_HSU_CLK_CTL);
+	} else {
+		paddr = MFLD_HSU_PORT_BASE;
+		clkctl = NULL;
+	}
+
+	/*
+	 * Select the early HSU console port if specified by user in the
+	 * kernel command line.
+	 */
+	if (*s && !kstrtoul(s, 10, &port))
+		port = clamp_val(port, 0, 2);
+
+	paddr += port * 0x80;
+	phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
+
+	/* Disable FIFO */
+	writeb(0x0, phsu + UART_FCR);
+
+	/* Set to default 115200 bps, 8n1 */
+	lcr = readb(phsu + UART_LCR);
+	writeb((0x80 | lcr), phsu + UART_LCR);
+	writeb(0x01, phsu + UART_DLL);
+	writeb(0x00, phsu + UART_DLM);
+	writeb(lcr,  phsu + UART_LCR);
+	writel(0x0010, phsu + UART_ABR * 4);
+	writel(0x0010, phsu + UART_PS * 4);
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		/* detect HSU clock is 50M or 19.2M */
+		if (clkctl && *clkctl & (1 << 16))
+			writel(0x0120, phsu + UART_MUL * 4); /* for 50M */
+		else
+			writel(0x05DC, phsu + UART_MUL * 4);  /* for 19.2M */
+	} else
+		writel(0x0240, phsu + UART_MUL * 4);
+
+	writel(0x3D09, phsu + UART_DIV * 4);
+
+	writeb(0x8, phsu + UART_MCR);
+	writeb(0x7, phsu + UART_FCR);
+	writeb(0x3, phsu + UART_LCR);
+
+	/* Clear IRQ status */
+	readb(phsu + UART_LSR);
+	readb(phsu + UART_RX);
+	readb(phsu + UART_IIR);
+	readb(phsu + UART_MSR);
+
+	/* Enable FIFO */
+	writeb(0x7, phsu + UART_FCR);
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static void early_hsu_putc(char ch)
+{
+	unsigned int timeout = 10000; /* 10ms */
+	u8 status;
+
+	while (--timeout) {
+		status = readb(phsu + UART_LSR);
+		if (status & BOTH_EMPTY)
+			break;
+		udelay(1);
+	}
+
+	/* Only write the char when there was no timeout */
+	if (timeout)
+		writeb(ch, phsu + UART_TX);
+}
+
+static void early_hsu_write(struct console *con, const char *str, unsigned n)
+{
+	int i;
+
+	for (i = 0; i < n && *str; i++) {
+		if (*str == '\n')
+			early_hsu_putc('\r');
+		early_hsu_putc(*str);
+		str++;
+	}
+}
+
+struct console early_hsu_console = {
+	.name =		"earlyhsu",
+	.write =	early_hsu_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+void hsu_early_printk(const char *fmt, ...)
+{
+	char buf[512];
+	int n;
+	va_list ap;
+
+	va_start(ap, fmt);
+	n = vscnprintf(buf, 512, fmt, ap);
+	va_end(ap);
+
+	early_hsu_console.write(&early_hsu_console, buf, n);
+}
+
+#define PTI_ADDRESS		0xfd800000
+#define CONTROL_FRAME_LEN 32    /* PTI control frame maximum size */
+
+static void early_pti_write_to_aperture(struct pti_masterchannel *mc,
+					 u8 *buf, int len)
+{
+	int dwordcnt, final, i;
+	u32 ptiword;
+	u8 *p ;
+	u32 pti_phys_address ;
+	u32 __iomem *aperture;
+
+	p = buf;
+
+	/*
+	   calculate the aperture offset from the base using the master and
+	   channel id's.
+	*/
+	pti_phys_address = PTI_ADDRESS +
+				(mc->master << 15) + (mc->channel << 8);
+
+	set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, pti_phys_address);
+	aperture = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
+				(pti_phys_address & (PAGE_SIZE - 1)));
+
+	dwordcnt = len >> 2;
+	final = len - (dwordcnt << 2);		/* final = trailing bytes */
+	if (final == 0 && dwordcnt != 0) {	/* always have a final dword */
+		final += 4;
+		dwordcnt--;
+	}
+
+	for (i = 0; i < dwordcnt; i++) {
+		ptiword = be32_to_cpu(*(u32 *)p);
+		p += 4;
+		iowrite32(ptiword, aperture);
+	}
+
+	aperture += PTI_LASTDWORD_DTS;	/* adding DTS signals that is EOM */
+	ptiword = 0;
+
+	for (i = 0; i < final; i++)
+		ptiword |= *p++ << (24-(8*i));
+
+	iowrite32(ptiword, aperture);
+
+	return;
+}
+
+static int pti_early_console_init(void)
+{
+	early_pti_console_channel = 0;
+	early_pti_control_channel = 0;
+	return 0;
+}
+
+static void early_pti_write(struct console *con,
+			const char *str, unsigned n)
+{
+	static struct pti_masterchannel mccontrol = {.master = 72,
+						     .channel = 0};
+	static struct pti_masterchannel mcconsole = {.master = 73,
+						     .channel = 0};
+	const char *control_format = "%3d %3d %s";
+
+	/*
+	 * Since we access the comm member in current's task_struct,
+	 * we only need to be as large as what 'comm' in that
+	 * structure is.
+	 */
+	char comm[TASK_COMM_LEN];
+	u8 control_frame[CONTROL_FRAME_LEN];
+
+	/* task information */
+	if (in_irq())
+		strncpy(comm, "hardirq", sizeof(comm));
+	else if (in_softirq())
+		strncpy(comm, "softirq", sizeof(comm));
+	else
+		strncpy(comm, current->comm, sizeof(comm));
+
+	/* Absolutely ensure our buffer is zero terminated */
+	comm[TASK_COMM_LEN-1] = 0;
+
+	mccontrol.channel = early_pti_control_channel;
+	early_pti_control_channel = (early_pti_control_channel + 1) & 0x7f;
+
+	mcconsole.channel = early_pti_console_channel;
+	early_pti_console_channel = (early_pti_console_channel + 1) & 0x7f;
+
+	snprintf(control_frame, CONTROL_FRAME_LEN, control_format,
+		mcconsole.master, mcconsole.channel, comm);
+
+	early_pti_write_to_aperture(&mccontrol, control_frame,
+					strlen(control_frame));
+	early_pti_write_to_aperture(&mcconsole, (u8 *)str, n);
+
+}
+
+struct console early_pti_console = {
+	.name =		"earlypti",
+	.early_setup =  pti_early_console_init,
+	.write =	early_pti_write,
+	.flags =	CON_PRINTBUFFER,
+	.index =	-1,
+};
+
+void pti_early_printk(const char *fmt, ...)
+{
+	char buf[512];
+	int n;
+	va_list ap;
+
+	va_start(ap, fmt);
+	n = vscnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+
+	early_pti_console.write(&early_pti_console, buf, n);
+}
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
new file mode 100644
index 0000000..a7635b3
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -0,0 +1,239 @@
+/*
+ * intel-mid.c: Intel MID platform setup code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ * Author: Sathyanarayanan KN(sathyanarayanan.kuppuswamy@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#define	SFI_SIG_OEM0	"OEM0"
+#define pr_fmt(fmt) "intel_mid: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+#include "intel_mid_weak_decls.h"
+#include "intel_soc_pmu.h"
+
+/*
+ * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
+ * cmdline option x86_intel_mid_timer can be used to override the configuration
+ * to prefer one or the other.
+ * at runtime, there are basically three timer configurations:
+ * 1. per cpu apbt clock only
+ * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
+ * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
+ *
+ * by default (without cmdline option), platform code first detects cpu type
+ * to see if we are on lincroft or penwell, then set up both lapic or apbt
+ * clocks accordingly.
+ * i.e. by default, medfield uses configuration #2, moorestown uses #1.
+ * config #3 is supported but not recommended on medfield.
+ *
+ * rating and feature summary:
+ * lapic (with C3STOP) --------- 100
+ * apbt (always-on) ------------ 110
+ * lapic (always-on,ARAT) ------ 150
+ */
+__cpuinitdata enum intel_mid_timer_options intel_mid_timer_options;
+
+/* intel_mid_ops to store sub arch ops */
+struct intel_mid_ops *intel_mid_ops;
+/* getter function for sub arch ops*/
+static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
+enum intel_mid_cpu_type __intel_mid_cpu_chip;
+EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
+
+static int force_cold_boot;
+module_param(force_cold_boot, int, 0644);
+MODULE_PARM_DESC(force_cold_boot,
+		 "Set to Y to force a COLD BOOT instead of a COLD RESET "
+		 "on the next reboot system call.");
+u32 nbr_hsi_clients = 2;
+static void intel_mid_power_off(void)
+{
+	pmu_power_off();
+};
+
+static void intel_mid_reboot(void)
+{
+	if (intel_scu_ipc_fw_update()) {
+		pr_debug("intel_scu_fw_update: IFWI upgrade failed...\n");
+	}
+	if (force_cold_boot)
+		rpmsg_send_generic_simple_command(IPCMSG_COLD_BOOT, 0);
+	else
+		rpmsg_send_generic_simple_command(IPCMSG_COLD_RESET, 0);
+}
+
+static unsigned long __init intel_mid_calibrate_tsc(void)
+{
+	return 0;
+}
+
+static void __init intel_mid_time_init(void)
+{
+
+#ifdef CONFIG_SFI
+	sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
+#endif
+	switch (intel_mid_timer_options) {
+	case INTEL_MID_TIMER_APBT_ONLY:
+		break;
+	case INTEL_MID_TIMER_LAPIC_APBT:
+		x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+		x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
+		break;
+	default:
+		if (!boot_cpu_has(X86_FEATURE_ARAT))
+			break;
+		x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+		x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
+		return;
+	}
+	/* we need at least one APB timer */
+	pre_init_apic_IRQ0();
+	apbt_time_init();
+}
+
+static void __cpuinit intel_mid_arch_setup(void)
+{
+	if (boot_cpu_data.x86 != 6) {
+		pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
+			boot_cpu_data.x86, boot_cpu_data.x86_model);
+		__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+		return;
+	}
+	switch (boot_cpu_data.x86_model) {
+	case 0x35:
+		__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CLOVERVIEW;
+		break;
+	case 0x3C:
+	case 0x4A:
+		__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER;
+		break;
+	case 0x5A:
+		__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_ANNIEDALE;
+		break;
+	case 0x5D:
+		__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CARBONCANYON;
+		break;
+	case 0x27:
+	default:
+		__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+		break;
+	}
+
+	if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops))
+		intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
+	else {
+		intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
+		pr_info("ARCH: Uknown SoC, assuming PENWELL!\n");
+	}
+
+	if (intel_mid_ops->arch_setup)
+		intel_mid_ops->arch_setup();
+}
+
+/* MID systems don't have i8042 controller */
+static int intel_mid_i8042_detect(void)
+{
+	return 0;
+}
+
+/*
+ * Moorestown does not have external NMI source nor port 0x61 to report
+ * NMI status. The possible NMI sources are from pmu as a result of NMI
+ * watchdog or lock debug. Reading io port 0x61 results in 0xff which
+ * misled NMI handler.
+ */
+static unsigned char intel_mid_get_nmi_reason(void)
+{
+	return 0;
+}
+
+/*
+ * Moorestown specific x86_init function overrides and early setup
+ * calls.
+ */
+void __init x86_intel_mid_early_setup(void)
+{
+	x86_init.resources.probe_roms = x86_init_noop;
+	x86_init.resources.reserve_resources = x86_init_noop;
+	x86_init.oem.arch_setup = intel_mid_arch_setup;
+	x86_init.timers.setup_percpu_clockev = x86_init_noop;
+	x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
+
+	x86_init.timers.timer_init = intel_mid_time_init;
+	x86_init.timers.setup_percpu_clockev = x86_init_noop;
+
+	x86_init.irqs.pre_vector_init = x86_init_noop;
+
+	x86_init.oem.arch_setup = intel_mid_arch_setup;
+
+	x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
+
+	x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
+	x86_platform.i8042_detect = intel_mid_i8042_detect;
+	x86_init.timers.wallclock_init = intel_mid_rtc_init;
+	x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
+
+	x86_init.pci.init = intel_mid_pci_init;
+	x86_init.pci.fixup_irqs = x86_init_noop;
+
+	legacy_pic = &null_legacy_pic;
+
+	pm_power_off = intel_mid_power_off;
+	machine_ops.emergency_restart  = intel_mid_reboot;
+
+	/* Avoid searching for BIOS MP tables */
+	x86_init.mpparse.find_smp_config = x86_init_noop;
+	x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+	set_bit(MP_BUS_ISA, mp_bus_not_pci);
+}
+
+/*
+ * if user does not want to use per CPU apb timer, just give it a lower rating
+ * than local apic timer and skip the late per cpu timer init.
+ */
+static inline int __init setup_x86_intel_mid_timer(char *arg)
+{
+	if (!arg)
+		return -EINVAL;
+
+	if (strcmp("apbt_only", arg) == 0)
+		intel_mid_timer_options = INTEL_MID_TIMER_APBT_ONLY;
+	else if (strcmp("lapic_and_apbt", arg) == 0)
+		intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
+	else {
+		pr_warn("X86 INTEL_MID timer option %s not recognised"
+			   " use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
+			   arg);
+		return -EINVAL;
+	}
+	return 0;
+}
+__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
diff --git a/arch/x86/platform/intel-mid/intel_mid_pcihelpers.c b/arch/x86/platform/intel-mid/intel_mid_pcihelpers.c
new file mode 100644
index 0000000..85d4be8
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_pcihelpers.c
@@ -0,0 +1,105 @@
+#include <linux/export.h>
+#include <linux/pci.h>
+
+#include <asm/intel_mid_pcihelpers.h>
+
+/* Unified message bus read/write operation */
+static DEFINE_SPINLOCK(msgbus_lock);
+
+static struct pci_dev *pci_root;
+
+static int intel_mid_msgbus_init(void)
+{
+        pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+        if (!pci_root) {
+                printk(KERN_ALERT "%s: Error: msgbus PCI handle NULL",
+                        __func__);
+                return -ENODEV;
+        }
+        return 0;
+}
+fs_initcall(intel_mid_msgbus_init);
+
+u32 intel_mid_msgbus_read32_raw(u32 cmd)
+{
+        unsigned long irq_flags;
+        u32 data;
+
+        spin_lock_irqsave(&msgbus_lock, irq_flags);
+        pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+        pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+        spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+        return data;
+}
+EXPORT_SYMBOL(intel_mid_msgbus_read32_raw);
+
+void intel_mid_msgbus_write32_raw(u32 cmd, u32 data)
+{
+        unsigned long irq_flags;
+
+        spin_lock_irqsave(&msgbus_lock, irq_flags);
+        pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+        pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+        spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32_raw);
+
+u32 intel_mid_msgbus_read32(u8 port, u32 addr)
+{
+        unsigned long irq_flags;
+        u32 data;
+        u32 cmd;
+        u32 cmdext;
+
+        cmd = (PCI_ROOT_MSGBUS_READ << 24) | (port << 16) |
+                ((addr & 0xff) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+        cmdext = addr & 0xffffff00;
+
+        spin_lock_irqsave(&msgbus_lock, irq_flags);
+
+        if (cmdext) {
+                /* This resets to 0 automatically, no need to write 0 */
+                pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+                        cmdext);
+        }
+
+        pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+        pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+        spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+        return data;
+}
+
+EXPORT_SYMBOL(intel_mid_msgbus_read32);
+void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data)
+{
+        unsigned long irq_flags;
+        u32 cmd;
+        u32 cmdext;
+
+        cmd = (PCI_ROOT_MSGBUS_WRITE << 24) | (port << 16) |
+                ((addr & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+        cmdext = addr & 0xffffff00;
+
+        spin_lock_irqsave(&msgbus_lock, irq_flags);
+        pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+
+        if (cmdext) {
+                /* This resets to 0 automatically, no need to write 0 */
+                pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+                        cmdext);
+        }
+
+        pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+        spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32);
+
+/* called only from where is later then fs_initcall */
+u32 intel_mid_soc_stepping(void)
+{
+        return pci_root->revision;
+}
+EXPORT_SYMBOL(intel_mid_soc_stepping);
+
diff --git a/arch/x86/platform/intel-mid/intel_mid_scu.c b/arch/x86/platform/intel-mid/intel_mid_scu.c
new file mode 100644
index 0000000..a8e633c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_scu.c
@@ -0,0 +1,92 @@
+/*
+ * intel_mid_scu.c: Intel MID SCU platform initialization code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/irq.h>
+#include <linux/rpmsg.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+struct rpmsg_ns_list nslist = {
+	.list = LIST_HEAD_INIT(nslist.list),
+	.lock = __MUTEX_INITIALIZER(nslist.lock),
+};
+
+static struct intel_mid_rproc_pdata intel_scu_pdata = {
+	.name		= "intel_rproc_scu",
+	.firmware	= "intel_mid/intel_mid_remoteproc.fw",
+	.nslist		= &nslist,
+};
+
+static u64 intel_scu_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device intel_scu_device = {
+	.name		= "intel_rproc_scu",
+	.id		= -1,
+	.dev		= {
+		.platform_data = &intel_scu_pdata,
+		.dma_mask = &intel_scu_dmamask,
+	},
+};
+
+void register_rpmsg_service(char *name, int id, u32 addr)
+{
+	struct rpmsg_ns_info *info;
+	info = rpmsg_ns_alloc(name, id, addr);
+	rpmsg_ns_add_to_list(info, &nslist);
+}
+
+int intel_mid_rproc_init(void)
+{
+	int err;
+
+	/* generic rpmsg channels */
+	register_rpmsg_service("rpmsg_ipc_command", RPROC_SCU, RP_IPC_COMMAND);
+	register_rpmsg_service("rpmsg_ipc_simple_command",
+				RPROC_SCU, RP_IPC_SIMPLE_COMMAND);
+	register_rpmsg_service("rpmsg_ipc_raw_command",
+				RPROC_SCU, RP_IPC_RAW_COMMAND);
+
+	register_rpmsg_service("rpmsg_pmic", RPROC_SCU, RP_PMIC_ACCESS);
+	register_rpmsg_service("rpmsg_mip", RPROC_SCU, RP_MIP_ACCESS);
+	register_rpmsg_service("rpmsg_fw_update",
+					RPROC_SCU, RP_FW_ACCESS);
+	register_rpmsg_service("rpmsg_ipc_util",
+					RPROC_SCU, RP_IPC_UTIL);
+	register_rpmsg_service("rpmsg_flis", RPROC_SCU, RP_FLIS_ACCESS);
+	register_rpmsg_service("rpmsg_watchdog", RPROC_SCU, RP_SET_WATCHDOG);
+	register_rpmsg_service("rpmsg_umip", RPROC_SCU, RP_UMIP_ACCESS);
+	register_rpmsg_service("rpmsg_osip", RPROC_SCU, RP_OSIP_ACCESS);
+	register_rpmsg_service("rpmsg_vrtc", RPROC_SCU, RP_VRTC);
+	register_rpmsg_service("rpmsg_fw_logging", RPROC_SCU, RP_FW_LOGGING);
+	register_rpmsg_service("rpmsg_kpd_led", RPROC_SCU,
+				RP_MSIC_KPD_LED);
+	register_rpmsg_service("rpmsg_modem_nvram", RPROC_SCU,
+					RP_IPC_RAW_COMMAND);
+	register_rpmsg_service("rpmsg_mid_pwm", RPROC_SCU,
+				RP_MSIC_PWM);
+
+	err = platform_device_register(&intel_scu_device);
+	if (err < 0)
+		pr_err("Fail to register intel-mid-rproc platform device.\n");
+
+	return 0;
+}
+arch_initcall_sync(intel_mid_rproc_init);
diff --git a/arch/x86/platform/intel-mid/intel_mid_scu.h b/arch/x86/platform/intel-mid/intel_mid_scu.h
new file mode 100644
index 0000000..0601fe9
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_scu.h
@@ -0,0 +1,15 @@
+/*
+ * intel_mid_scu.h: SCU initialization header file
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _INTEL_MID_SCU_H_
+#define _INTEL_MID_SCU_H_
+extern int intel_mid_rproc_init(void) __attribute__((weak));
+#endif
diff --git a/arch/x86/platform/intel-mid/intel_mid_sfi.c b/arch/x86/platform/intel-mid/intel_mid_sfi.c
new file mode 100644
index 0000000..f73b84e
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_sfi.c
@@ -0,0 +1,608 @@
+/*
+ * intel_mid_sfi.c: Intel MID SFI initialization code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Sathyanarayanan KN
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/sfi.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/skbuff.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/blkdev.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+#include "intel_mid_weak_decls.h"
+
+#define	SFI_SIG_OEM0	"OEM0"
+#define MAX_IPCDEVS	24
+#define MAX_SCU_SPI	24
+#define MAX_SCU_I2C	24
+
+static struct platform_device *ipc_devs[MAX_IPCDEVS];
+static struct spi_board_info *spi_devs[MAX_SCU_SPI];
+static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
+static struct sfi_gpio_table_entry *gpio_table;
+static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
+static int ipc_next_dev;
+static int spi_next_dev;
+static int i2c_next_dev;
+static int i2c_bus[MAX_SCU_I2C];
+static int gpio_num_entry;
+static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
+int sfi_mrtc_num;
+int sfi_mtimer_num;
+
+struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
+EXPORT_SYMBOL_GPL(sfi_mrtc_array);
+
+struct blocking_notifier_head intel_scu_notifier =
+			BLOCKING_NOTIFIER_INIT(intel_scu_notifier);
+EXPORT_SYMBOL_GPL(intel_scu_notifier);
+
+/* parse all the mtimer info to a static mtimer array */
+int __init sfi_parse_mtmr(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_timer_table_entry *pentry;
+	struct mpc_intsrc mp_irq;
+	int totallen;
+
+	sb = (struct sfi_table_simple *)table;
+	if (!sfi_mtimer_num) {
+		sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
+					struct sfi_timer_table_entry);
+		pentry = (struct sfi_timer_table_entry *) sb->pentry;
+		totallen = sfi_mtimer_num * sizeof(*pentry);
+		memcpy(sfi_mtimer_array, pentry, totallen);
+	}
+
+	pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
+	pentry = sfi_mtimer_array;
+	for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
+		pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz, irq = %d\n",
+			totallen, (u32)pentry->phys_addr,
+			pentry->freq_hz, pentry->irq);
+			if (!pentry->irq)
+				continue;
+			mp_irq.type = MP_INTSRC;
+			mp_irq.irqtype = mp_INT;
+/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
+			mp_irq.irqflag = 5;
+			mp_irq.srcbus = MP_BUS_ISA;
+			mp_irq.srcbusirq = pentry->irq;	/* IRQ */
+			mp_irq.dstapic = MP_APIC_ALL;
+			mp_irq.dstirq = pentry->irq;
+			mp_save_irq(&mp_irq);
+	}
+
+	return 0;
+}
+
+struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
+{
+	int i;
+	if (hint < sfi_mtimer_num) {
+		if (!sfi_mtimer_usage[hint]) {
+			pr_debug("hint taken for timer %d irq %d\n",
+				hint, sfi_mtimer_array[hint].irq);
+			sfi_mtimer_usage[hint] = 1;
+			return &sfi_mtimer_array[hint];
+		}
+	}
+	/* take the first timer available */
+	for (i = 0; i < sfi_mtimer_num;) {
+		if (!sfi_mtimer_usage[i]) {
+			sfi_mtimer_usage[i] = 1;
+			return &sfi_mtimer_array[i];
+		}
+		i++;
+	}
+	return NULL;
+}
+
+void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
+{
+	int i;
+	for (i = 0; i < sfi_mtimer_num;) {
+		if (mtmr->irq == sfi_mtimer_array[i].irq) {
+			sfi_mtimer_usage[i] = 0;
+			return;
+		}
+		i++;
+	}
+}
+
+/* parse all the mrtc info to a global mrtc array */
+int __init sfi_parse_mrtc(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_rtc_table_entry *pentry;
+	struct mpc_intsrc mp_irq;
+
+	int totallen;
+
+	sb = (struct sfi_table_simple *)table;
+	if (!sfi_mrtc_num) {
+		sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
+						struct sfi_rtc_table_entry);
+		pentry = (struct sfi_rtc_table_entry *)sb->pentry;
+		totallen = sfi_mrtc_num * sizeof(*pentry);
+		memcpy(sfi_mrtc_array, pentry, totallen);
+	}
+
+	pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
+	pentry = sfi_mrtc_array;
+	for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
+		pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
+			totallen, (u32)pentry->phys_addr, pentry->irq);
+		mp_irq.type = MP_INTSRC;
+		mp_irq.irqtype = mp_INT;
+		mp_irq.irqflag = 0xf;	/* level trigger and active low */
+		mp_irq.srcbus = MP_BUS_ISA;
+		mp_irq.srcbusirq = pentry->irq;	/* IRQ */
+		mp_irq.dstapic = MP_APIC_ALL;
+		mp_irq.dstirq = pentry->irq;
+		mp_save_irq(&mp_irq);
+	}
+	return 0;
+}
+
+
+/*
+ * Parsing GPIO table first, since the DEVS table will need this table
+ * to map the pin name to the actual pin.
+ */
+static int __init sfi_parse_gpio(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_gpio_table_entry *pentry;
+	int num, i;
+
+	if (gpio_table)
+		return 0;
+	sb = (struct sfi_table_simple *)table;
+	num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
+	pentry = (struct sfi_gpio_table_entry *)sb->pentry;
+
+	gpio_table = (struct sfi_gpio_table_entry *)
+				kmalloc(num * sizeof(*pentry), GFP_KERNEL);
+	if (!gpio_table)
+		return -1;
+	memcpy(gpio_table, pentry, num * sizeof(*pentry));
+	gpio_num_entry = num;
+
+	pr_debug("GPIO pin info:\n");
+	for (i = 0; i < num; i++, pentry++)
+		pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
+		" pin = %d\n", i,
+			pentry->controller_name,
+			pentry->pin_name,
+			pentry->pin_no);
+	return 0;
+}
+
+int get_gpio_by_name(const char *name)
+{
+	struct sfi_gpio_table_entry *pentry = gpio_table;
+	int i;
+
+	if (!pentry)
+		return -1;
+	for (i = 0; i < gpio_num_entry; i++, pentry++) {
+		if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
+			return pentry->pin_no;
+	}
+	return -1;
+}
+
+void __init intel_scu_device_register(struct platform_device *pdev)
+{
+	if (ipc_next_dev == MAX_IPCDEVS)
+		pr_err("too many SCU IPC devices");
+	else
+		ipc_devs[ipc_next_dev++] = pdev;
+}
+
+static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
+{
+	struct spi_board_info *new_dev;
+
+	if (spi_next_dev == MAX_SCU_SPI) {
+		pr_err("too many SCU SPI devices");
+		return;
+	}
+
+	new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+	if (!new_dev) {
+		pr_err("failed to alloc mem for delayed spi dev %s\n",
+			sdev->modalias);
+		return;
+	}
+	memcpy(new_dev, sdev, sizeof(*sdev));
+
+	spi_devs[spi_next_dev++] = new_dev;
+}
+
+static void __init intel_scu_i2c_device_register(int bus,
+						struct i2c_board_info *idev)
+{
+	struct i2c_board_info *new_dev;
+
+	if (i2c_next_dev == MAX_SCU_I2C) {
+		pr_err("too many SCU I2C devices");
+		return;
+	}
+
+	new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
+	if (!new_dev) {
+		pr_err("failed to alloc mem for delayed i2c dev %s\n",
+			idev->type);
+		return;
+	}
+	memcpy(new_dev, idev, sizeof(*idev));
+
+	i2c_bus[i2c_next_dev] = bus;
+	i2c_devs[i2c_next_dev++] = new_dev;
+}
+
+/* Called by IPC driver */
+void intel_scu_devices_create(void)
+{
+	int i;
+
+	for (i = 0; i < ipc_next_dev; i++)
+		platform_device_add(ipc_devs[i]);
+
+	for (i = 0; i < spi_next_dev; i++)
+		spi_register_board_info(spi_devs[i], 1);
+
+	for (i = 0; i < i2c_next_dev; i++) {
+		struct i2c_adapter *adapter;
+		struct i2c_client *client;
+
+		adapter = i2c_get_adapter(i2c_bus[i]);
+		if (adapter) {
+			client = i2c_new_device(adapter, i2c_devs[i]);
+			if (!client)
+				pr_err("can't create i2c device %s\n",
+					i2c_devs[i]->type);
+		} else
+			i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
+	}
+	intel_scu_notifier_post(SCU_AVAILABLE, NULL);
+}
+EXPORT_SYMBOL_GPL(intel_scu_devices_create);
+
+/* Called by IPC driver */
+void intel_scu_devices_destroy(void)
+{
+	int i;
+
+	intel_scu_notifier_post(SCU_DOWN, NULL);
+
+	for (i = 0; i < ipc_next_dev; i++)
+		platform_device_del(ipc_devs[i]);
+}
+EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
+
+static struct platform_device *psh_ipc;
+void intel_psh_devices_create(void)
+{
+	psh_ipc = platform_device_alloc("intel_psh_ipc", 0);
+	if (psh_ipc == NULL) {
+		pr_err("out of memory for platform device psh_ipc.\n");
+		return;
+	}
+
+	platform_device_add(psh_ipc);
+}
+EXPORT_SYMBOL_GPL(intel_psh_devices_create);
+
+void intel_psh_devices_destroy(void)
+{
+	if (psh_ipc)
+		platform_device_del(psh_ipc);
+}
+EXPORT_SYMBOL_GPL(intel_psh_devices_destroy);
+
+void __init install_irq_resource(struct platform_device *pdev, int irq)
+{
+	/* Single threaded */
+	static struct resource __initdata res = {
+		.name = "IRQ",
+		.flags = IORESOURCE_IRQ,
+	};
+	res.start = irq;
+	platform_device_add_resources(pdev, &res, 1);
+}
+
+static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	struct platform_device *pdev;
+	void *pdata = NULL;
+	pr_info("IPC bus, name = %16.16s, irq = 0x%2x\n",
+		pentry->name, pentry->irq);
+	pdata = dev->get_platform_data(pentry);
+	pdev = platform_device_alloc(pentry->name, 0);
+	if (pdev == NULL) {
+		pr_err("out of memory for SFI platform device '%s'.\n",
+			pentry->name);
+		return;
+	}
+	install_irq_resource(pdev, pentry->irq);
+
+	pdev->dev.platform_data = pdata;
+	intel_scu_device_register(pdev);
+}
+
+static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	struct spi_board_info spi_info;
+	void *pdata = NULL;
+
+	memset(&spi_info, 0, sizeof(spi_info));
+	strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
+	spi_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
+	spi_info.bus_num = pentry->host_num;
+	spi_info.chip_select = pentry->addr;
+	spi_info.max_speed_hz = pentry->max_freq;
+	pr_info("SPI bus=%d, name=%16.16s, irq=0x%2x, max_freq=%d, cs=%d\n",
+		spi_info.bus_num,
+		spi_info.modalias,
+		spi_info.irq,
+		spi_info.max_speed_hz,
+		spi_info.chip_select);
+
+	pdata = dev->get_platform_data(&spi_info);
+
+	spi_info.platform_data = pdata;
+	if (dev->delay)
+		intel_scu_spi_device_register(&spi_info);
+	else
+		spi_register_board_info(&spi_info, 1);
+}
+
+static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	struct i2c_board_info i2c_info;
+	void *pdata = NULL;
+
+	memset(&i2c_info, 0, sizeof(i2c_info));
+	strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
+	i2c_info.irq = ((pentry->irq == (u8)0xff) ? 0 : pentry->irq);
+	i2c_info.addr = pentry->addr;
+	pr_info("I2C bus = %d, name = %16.16s, irq = 0x%2x, addr = 0x%x\n",
+		pentry->host_num,
+		i2c_info.type,
+		i2c_info.irq,
+		i2c_info.addr);
+	pdata = dev->get_platform_data(&i2c_info);
+	i2c_info.platform_data = pdata;
+
+	if (dev->delay)
+		intel_scu_i2c_device_register(pentry->host_num, &i2c_info);
+	else
+		i2c_register_board_info(pentry->host_num, &i2c_info, 1);
+}
+
+static void __init sfi_handle_sd_dev(struct sfi_device_table_entry *pentry,
+					struct devs_id *dev)
+{
+	struct sd_board_info sd_info;
+	void *pdata = NULL;
+
+	memset(&sd_info, 0, sizeof(sd_info));
+	strncpy(sd_info.name, pentry->name, 16);
+	sd_info.bus_num = pentry->host_num;
+	sd_info.board_ref_clock = pentry->max_freq;
+	sd_info.addr = pentry->addr;
+	pr_info("SDIO bus = %d, name = %16.16s, "
+			"ref_clock = %d, addr =0x%x\n",
+			sd_info.bus_num,
+			sd_info.name,
+			sd_info.board_ref_clock,
+			sd_info.addr);
+	pdata = dev->get_platform_data(&sd_info);
+	sd_info.platform_data = pdata;
+}
+
+struct devs_id __init *get_device_id(u8 type, char *name)
+{
+	struct devs_id *dev = device_ids;
+
+	if (device_ids == NULL)
+		return NULL;
+
+	while (dev->name[0]) {
+		if (dev->type == type &&
+			!strncmp(dev->name, name, SFI_NAME_LEN)) {
+			return dev;
+		}
+		dev++;
+	}
+
+	return NULL;
+}
+
+static int __init sfi_parse_devs(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_device_table_entry *pentry;
+	struct devs_id *dev = NULL;
+	int num, i;
+	int ioapic;
+	struct io_apic_irq_attr irq_attr;
+
+	sb = (struct sfi_table_simple *)table;
+	num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
+	pentry = (struct sfi_device_table_entry *)sb->pentry;
+
+	for (i = 0; i < num; i++, pentry++) {
+		int irq = pentry->irq;
+
+		if (irq != (u8)0xff) { /* native RTE case */
+			/* these SPI2 devices are not exposed to system as PCI
+			 * devices, but they have separate RTE entry in IOAPIC
+			 * so we have to enable them one by one here
+			 */
+			ioapic = mp_find_ioapic(irq);
+			if (ioapic >= 0) {
+				irq_attr.ioapic = ioapic;
+				irq_attr.ioapic_pin = irq;
+				irq_attr.trigger = 1;
+				if (intel_mid_identify_cpu() ==
+						INTEL_MID_CPU_CHIP_TANGIER) {
+					if (!strncmp(pentry->name,
+							"r69001-ts-i2c", 13))
+						/* active low */
+						irq_attr.polarity = 1;
+					else if (!strncmp(pentry->name,
+							"synaptics_3202", 14))
+						/* active low */
+						irq_attr.polarity = 1;
+					else if (irq == 41)
+						/* fast_int_1 */
+						irq_attr.polarity = 1;
+					else
+						/* active high */
+						irq_attr.polarity = 0;
+				} else {
+					/* PNW and CLV go with active low */
+					irq_attr.polarity = 1;
+				}
+				io_apic_set_pci_routing(NULL, irq, &irq_attr);
+			} else
+				printk(KERN_INFO "APIC entry not found for: name=%s, irq=%d, ioapic=%d\n",
+					pentry->name, irq, ioapic);
+		}
+		dev = get_device_id(pentry->type, pentry->name);
+
+		if ((dev == NULL) || (dev->get_platform_data == NULL))
+			continue;
+
+		if (dev->device_handler) {
+			dev->device_handler(pentry, dev);
+		} else {
+			switch (pentry->type) {
+			case SFI_DEV_TYPE_IPC:
+				sfi_handle_ipc_dev(pentry, dev);
+				break;
+			case SFI_DEV_TYPE_SPI:
+				sfi_handle_spi_dev(pentry, dev);
+				break;
+			case SFI_DEV_TYPE_I2C:
+				sfi_handle_i2c_dev(pentry, dev);
+				break;
+			case SFI_DEV_TYPE_SD:
+				sfi_handle_sd_dev(pentry, dev);
+				break;
+			case SFI_DEV_TYPE_HSI:
+			case SFI_DEV_TYPE_UART:
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int __init sfi_parse_oemb(struct sfi_table_header *table)
+{
+	struct sfi_table_oemb *oemb;
+	u32 board_id;
+	u8 sig[SFI_SIGNATURE_SIZE + 1] = {'\0'};
+	u8 oem_id[SFI_OEM_ID_SIZE + 1] = {'\0'};
+	u8 oem_table_id[SFI_OEM_TABLE_ID_SIZE + 1] = {'\0'};
+
+	oemb = (struct sfi_table_oemb *) table;
+	if (!oemb) {
+		pr_err("%s: fail to read SFI OEMB Layout\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	board_id = oemb->board_id | (oemb->board_fab << 4);
+
+	snprintf(sig, (SFI_SIGNATURE_SIZE + 1), "%s", oemb->header.sig);
+	snprintf(oem_id, (SFI_OEM_ID_SIZE + 1), "%s", oemb->header.oem_id);
+	snprintf(oem_table_id, (SFI_OEM_TABLE_ID_SIZE + 1), "%s",
+		 oemb->header.oem_table_id);
+	pr_info("SFI OEMB Layout\n");
+	pr_info("\tOEMB signature               : %s\n"
+		"\tOEMB length                  : %d\n"
+		"\tOEMB revision                : %d\n"
+		"\tOEMB checksum                : 0x%X\n"
+		"\tOEMB oem_id                  : %s\n"
+		"\tOEMB oem_table_id            : %s\n"
+		"\tOEMB board_id                : 0x%02X\n"
+		"\tOEMB iafw version            : %03d.%03d\n"
+		"\tOEMB val_hooks version       : %03d.%03d\n"
+		"\tOEMB ia suppfw version       : %03d.%03d\n"
+		"\tOEMB scu runtime version     : %03d.%03d\n"
+		"\tOEMB ifwi version            : %03d.%03d\n",
+		sig,
+		oemb->header.len,
+		oemb->header.rev,
+		oemb->header.csum,
+		oem_id,
+		oem_table_id,
+		board_id,
+		oemb->iafw_major_version,
+		oemb->iafw_main_version,
+		oemb->val_hooks_major_version,
+		oemb->val_hooks_minor_version,
+		oemb->ia_suppfw_major_version,
+		oemb->ia_suppfw_minor_version,
+		oemb->scu_runtime_major_version,
+		oemb->scu_runtime_minor_version,
+		oemb->ifwi_major_version,
+		oemb->ifwi_minor_version
+		);
+	return 0;
+}
+
+static int __init intel_mid_platform_init(void)
+{
+	/* Get SFI OEMB Layout */
+	sfi_table_parse(SFI_SIG_OEMB, NULL, NULL, sfi_parse_oemb);
+	sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
+	sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
+
+	return 0;
+}
+arch_initcall(intel_mid_platform_init);
diff --git a/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
new file mode 100644
index 0000000..80f3edd
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
@@ -0,0 +1,169 @@
+/*
+ * intel_mid_vrtc.c: Driver for virtual RTC device on Intel MID platform
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * Note:
+ * VRTC is emulated by system controller firmware, the real HW
+ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
+ * in a memory mapped IO space that is visible to the host IA
+ * processor.
+ *
+ * This driver is based on RTC CMOS driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/time.h>
+#include <asm/fixmap.h>
+
+static unsigned char __iomem *vrtc_virt_base;
+
+unsigned char vrtc_cmos_read(unsigned char reg)
+{
+	unsigned char retval;
+
+	/* vRTC's registers range from 0x0 to 0xD */
+	if (reg > 0xd || !vrtc_virt_base)
+		return 0xff;
+
+	lock_cmos_prefix(reg);
+	retval = __raw_readb(vrtc_virt_base + (reg << 2));
+	lock_cmos_suffix(reg);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(vrtc_cmos_read);
+
+void vrtc_cmos_write(unsigned char val, unsigned char reg)
+{
+	if (reg > 0xd || !vrtc_virt_base)
+		return;
+
+	lock_cmos_prefix(reg);
+	__raw_writeb(val, vrtc_virt_base + (reg << 2));
+	lock_cmos_suffix(reg);
+}
+EXPORT_SYMBOL_GPL(vrtc_cmos_write);
+
+unsigned long vrtc_get_time(void)
+{
+	u8 sec, min, hour, mday, mon;
+	unsigned long flags;
+	u32 year;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+
+	while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
+		cpu_relax();
+
+	sec = vrtc_cmos_read(RTC_SECONDS);
+	min = vrtc_cmos_read(RTC_MINUTES);
+	hour = vrtc_cmos_read(RTC_HOURS);
+	mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
+	mon = vrtc_cmos_read(RTC_MONTH);
+	year = vrtc_cmos_read(RTC_YEAR);
+
+	spin_unlock_irqrestore(&rtc_lock, flags);
+
+	/* vRTC YEAR reg contains the offset to 1972 */
+	year += 1972;
+
+	pr_info("vRTC: sec: %d min: %d hour: %d day: %d "
+		"mon: %d year: %d\n", sec, min, hour, mday, mon, year);
+
+	return mktime(year, mon, mday, hour, min, sec);
+}
+
+/* Only care about the minutes and seconds */
+int vrtc_set_mmss(unsigned long nowtime)
+{
+	int real_sec, real_min;
+	unsigned long flags;
+	int vrtc_min;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+	vrtc_min = vrtc_cmos_read(RTC_MINUTES);
+
+	real_sec = nowtime % 60;
+	real_min = nowtime / 60;
+	if (((abs(real_min - vrtc_min) + 15)/30) & 1)
+		real_min += 30;
+	real_min %= 60;
+
+	vrtc_cmos_write(real_sec, RTC_SECONDS);
+	vrtc_cmos_write(real_min, RTC_MINUTES);
+	spin_unlock_irqrestore(&rtc_lock, flags);
+
+	return 0;
+}
+
+void __init intel_mid_rtc_init(void)
+{
+	unsigned long vrtc_paddr;
+
+	sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
+
+	vrtc_paddr = sfi_mrtc_array[0].phys_addr;
+	if (!sfi_mrtc_num || !vrtc_paddr)
+		return;
+
+	vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC,
+								vrtc_paddr);
+	x86_platform.get_wallclock = vrtc_get_time;
+	x86_platform.set_wallclock = vrtc_set_mmss;
+}
+
+/*
+ * The Moorestown platform has a memory mapped virtual RTC device that emulates
+ * the programming interface of the RTC.
+ */
+
+static struct resource vrtc_resources[] = {
+	[0] = {
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.flags	= IORESOURCE_IRQ,
+	}
+};
+
+static struct platform_device vrtc_device = {
+	.name		= "rtc_mrst",
+	.id		= -1,
+	.resource	= vrtc_resources,
+	.num_resources	= ARRAY_SIZE(vrtc_resources),
+};
+
+/* Register the RTC device if appropriate */
+static int __init intel_mid_device_create(void)
+{
+	/* No Moorestown, no device */
+	if (!intel_mid_identify_cpu())
+		return -ENODEV;
+	/* No timer, no device */
+	if (!sfi_mrtc_num)
+		return -ENODEV;
+
+	/* iomem resource */
+	vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr;
+	vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr +
+				MRST_VRTC_MAP_SZ;
+	/* irq resource */
+	vrtc_resources[1].start = sfi_mrtc_array[0].irq;
+	vrtc_resources[1].end = sfi_mrtc_array[0].irq;
+
+	return platform_device_register(&vrtc_device);
+}
+
+module_init(intel_mid_device_create);
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
new file mode 100644
index 0000000..015bf42
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
@@ -0,0 +1,22 @@
+/*
+ * intel_mid_weak_decls.h: Weak declarations of intel-mid.c
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
+/* __attribute__((weak)) makes these declarations overridable */
+extern struct devs_id __initconst device_ids[] __attribute__((weak));
+/* For every CPU addition a new get_<cpuname>_ops interface needs
+ * to be added.
+ */
+extern void * __init get_penwell_ops(void) __attribute__((weak));
+extern void * __init get_cloverview_ops(void) __attribute__((weak));
+extern void * __init get_tangier_ops(void) __attribute__((weak));
+extern void * __init get_anniedale_ops(void) __attribute__((weak));
diff --git a/arch/x86/platform/intel-mid/intel_soc_clv.c b/arch/x86/platform/intel-mid/intel_soc_clv.c
new file mode 100644
index 0000000..76303b1
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_clv.c
@@ -0,0 +1,146 @@
+/*
+ * intel_soc_clv.c - This driver provides utility api's for
+ * Cloverview platform
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+
+
+static unsigned short fastonoff_flag;
+
+static ssize_t fastonoff_show(struct kobject *kobj,
+					struct kobj_attribute *attr, char *buf)
+{
+		return sprintf(buf, "%hu\n", fastonoff_flag);
+}
+
+static ssize_t fastonoff_store(struct kobject *kobj,
+			struct kobj_attribute *attr, const char *buf, size_t n)
+{
+	unsigned short value;
+	if (sscanf(buf, "%hu", &value) != 1 ||
+	    (value != 0 && value != 1)) {
+		printk(KERN_ERR "fastonoff_store: Invalid value\n");
+		return -EINVAL;
+	}
+	fastonoff_flag = value;
+	return n;
+}
+
+static struct kobj_attribute fast_onoff_attr =
+	__ATTR(fastonoff, 0644, fastonoff_show, fastonoff_store);
+
+
+static void clv_init_sysfsfs(void)
+{
+	int error;
+	error = sysfs_create_file(power_kobj, &fast_onoff_attr.attr);
+	if (error)
+		printk(KERN_ERR "sysfs_create_file failed: %d\n", error);
+}
+
+static int clv_pmu_init(void)
+{
+	mid_pmu_cxt->s3_hint = C6_HINT;
+	clv_init_sysfsfs();
+	return 0;
+}
+
+static bool clv_pmu_enter(int s0ix_state)
+{
+	u32 s0ix_value;
+	int num_retry = PMU_MISC_SET_TIMEOUT;
+
+	if (fastonoff_flag && (s0ix_state == MID_S3_STATE))
+		s0ix_value = get_s0ix_val_set_pm_ssc(MID_FAST_ON_OFF_STATE);
+	else
+		s0ix_value = get_s0ix_val_set_pm_ssc(s0ix_state);
+
+	/* issue a command to SCU */
+	pmu_set_interrupt_enable();
+	writel(s0ix_value, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+	do {
+		if (readl(&mid_pmu_cxt->pmu_reg->pm_msic))
+			break;
+		udelay(1);
+	} while (--num_retry);
+
+	if (!num_retry && !readl(&mid_pmu_cxt->pmu_reg->pm_msic))
+		WARN(1, "%s: pm_msic not set.\n", __func__);
+
+	mid_pmu_cxt->s0ix_entered = s0ix_state;
+
+	return true;
+}
+
+static void clv_pmu_remove(void)
+{
+	/* Place holder */
+}
+
+static void clv_pmu_wakeup(void)
+{
+
+	/* Wakeup allother CPU's */
+	if (mid_pmu_cxt->s0ix_entered)
+		apic->send_IPI_allbutself(RESCHEDULE_VECTOR);
+}
+
+static pci_power_t clv_pmu_choose_state(int device_lss)
+{
+	pci_power_t state;
+
+	switch (device_lss) {
+	case PMU_SECURITY_LSS_04:
+		state = PCI_D2;
+		break;
+
+	case PMU_USB_OTG_LSS_06:
+	case PMU_USB_HSIC_LSS_07:
+	case PMU_UART2_LSS_41:
+		state = PCI_D1;
+		break;
+
+	default:
+		state = PCI_D3hot;
+		break;
+	}
+
+	return state;
+}
+
+/**
+ *      platform_set_pmu_ops - Set the global pmu method table.
+ *      @ops:   Pointer to ops structure.
+ */
+void platform_set_pmu_ops(void)
+{
+	pmu_ops = &clv_pmu_ops;
+}
+
+struct platform_pmu_ops clv_pmu_ops = {
+	.init		= clv_pmu_init,
+	.enter		 = clv_pmu_enter,
+	.wakeup		 = clv_pmu_wakeup,
+	.remove		= clv_pmu_remove,
+	.pci_choose_state = clv_pmu_choose_state,
+	.set_power_state_ops = pmu_set_s0ix_possible,
+	.set_s0ix_complete = s0ix_complete,
+	.nc_set_power_state = mdfld_clv_nc_set_power_state,
+};
diff --git a/arch/x86/platform/intel-mid/intel_soc_clv.h b/arch/x86/platform/intel-mid/intel_soc_clv.h
new file mode 100644
index 0000000..0b8294e
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_clv.h
@@ -0,0 +1,352 @@
+/*
+ * intel_soc_clv.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER
+
+#define   PM_SUPPORT				0x21
+
+#define ISP_POS			7
+#define ISP_SUB_CLASS		0x80
+#define PMU_MISC_SET_TIMEOUT	15000
+
+#define PMU1_MAX_DEVS   8
+#define PMU2_MAX_DEVS   55
+
+#define GFX_LSS_INDEX			1
+#define PMU_SDIO0_LSS_00		0
+#define PMU_EMMC0_LSS_01		1
+#define PMU_AONT_LSS_02			2
+#define PMU_HSI_LSS_03			3
+#define PMU_SECURITY_LSS_04		4
+#define PMU_EMMC1_LSS_05		5
+#define PMU_USB_OTG_LSS_06		6
+#define PMU_USB_HSIC_LSS_07		7
+#define PMU_AUDIO_ENGINE_LSS_08		8
+#define PMU_AUDIO_DMA_LSS_09		9
+#define PMU_SRAM_LSS_10			10
+#define PMU_SRAM_LSS_11			11
+#define PMU_SRAM_LSS_12			12
+#define PMU_SRAM_LSS_13			13
+#define PMU_SDIO2_LSS_14		14
+#define PMU_PTI_DAFCA_LSS_15		15
+#define PMU_SC_DMA_LSS_16		16
+#define PMU_SPIO_LSS_17			17
+#define PMU_SPI1_LSS_18			18
+#define PMU_SPI2_LSS_19			19
+#define PMU_I2C0_LSS_20			20
+#define PMU_I2C1_LSS_21			21
+#define PMU_HPET_LSS_22			22
+#define PMU_EXTTMR_LSS_23		23
+#define PMU_SC_FABRIC_LSS_24		24
+#define PMU_AUDIO_RAM_LSS_25		25
+#define PMU_SCU_ROM_LSS_26		26
+#define PMU_I2C2_LSS_27			27
+#define PMU_SSC_LSS_28			28
+#define PMU_SECURITY_LSS_29		29
+#define PMU_SDIO1_LSS_30		30
+#define PMU_vRTC_LSS_31			31
+#define PMU_SEC_TIMER_LSS_32		32
+#define PMU_I2C3_LSS_33			33
+#define PMU_I2C4_LSS_34			34
+#define PMU_I2C5_LSS_35			35
+#define PMU_SPI3_LSS_36			36
+#define PMU_GPIO1_LSS_37		37
+#define PMU_PWR_BUTTON_LSS_38		38
+#define PMU_GPIO0_LSS_39		39
+#define PMU_KEYBRD_LSS_40		40
+#define PMU_UART2_LSS_41		41
+#define PMU_ADC_LSS_42			42
+#define PMU_CHARGER_LSS_43		43
+#define PMU_SEC_TAPC_LSS_44		44
+#define PMU_RTC_LSS_45			45
+#define PMU_GPI_LSS_46			46
+#define PMU_BCU_LSS_47			47
+#define PMU_SSP2_LSS_48			48
+#define PMU_AUDIO_SLIM1_LSS_49		49
+#define PMU_AUDIO_SLIM2_LSS_50		50
+#define PMU_AUDIO_SSP0_LSS_51		51
+#define PMU_AUDIO_SSP1_LSS_52		52
+#define PMU_IOSF_OCP_BRG_LSS_53		53
+#define PMU_GP_DMA_LSS_54		54
+#define PMU_MSIC_RESET_LSS_55		55
+#define PMU_SOC_FUSE_LSS_56		56
+#define PMU_RSVD3_LSS_57		57
+#define PMU_SSP4_LSS_58			58
+#define PMU_RSVD5_LSS_59		59
+#define PMU_RSVD6_LSS_60		60
+#define PMU_RSVD7_LSS_61		61
+#define PMU_RSVD8_LSS_62		62
+#define PMU_RSVD9_LSS_63		63
+
+#define PMU_MAX_LSS			63
+#define PMU_LSS_IN_FIRST_DWORD		32
+
+#define EMMC0_LSS			PMU_EMMC0_LSS_01
+
+#define S0IX_TARGET_SSS0_MASK ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I3_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I3_MASK, PMU_USB_HSIC_LSS_07) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0IX_TARGET_SSS1_MASK ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+#define S0IX_TARGET_SSS2_MASK ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I3_MASK, PMU_UART2_LSS_41-32))
+
+#define S0IX_TARGET_SSS3_MASK ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define S0IX_TARGET_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0IX_TARGET_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define S0IX_TARGET_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define S0IX_TARGET_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define LPMP3_TARGET_SSS0_MASK ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I3_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I3_MASK, PMU_USB_HSIC_LSS_07) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_TARGET_SSS1_MASK ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_TARGET_SSS2_MASK ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I3_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_TARGET_SSS3_MASK ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define LPMP3_TARGET_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+	SSMSK(D0I0_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_TARGET_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_TARGET_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_TARGET_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define IGNORE_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_10) | \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_11) | \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_12) | \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_13) | \
+	SSMSK(D0I3_MASK, PMU_PTI_DAFCA_LSS_15))
+
+#define IGNORE_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SC_DMA_LSS_16-16) | \
+	SSMSK(D0I3_MASK, PMU_SPIO_LSS_17-16) | \
+	SSMSK(D0I3_MASK, PMU_HPET_LSS_22-16) | \
+	SSMSK(D0I3_MASK, PMU_EXTTMR_LSS_23-16) | \
+	SSMSK(D0I3_MASK, PMU_SC_FABRIC_LSS_24-16) | \
+	SSMSK(D0I3_MASK, PMU_SCU_ROM_LSS_26-16) | \
+	SSMSK(D0I3_MASK, PMU_SSC_LSS_28-16) | \
+	SSMSK(D0I3_MASK, PMU_SECURITY_LSS_29-16) | \
+	SSMSK(D0I3_MASK, PMU_vRTC_LSS_31-16))
+
+#define IGNORE_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_SEC_TIMER_LSS_32-32) | \
+	SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+	SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+	SSMSK(D0I3_MASK, PMU_GPIO0_LSS_39-32) | \
+	SSMSK(D0I3_MASK, PMU_ADC_LSS_42-32) | \
+	SSMSK(D0I3_MASK, PMU_CHARGER_LSS_43-32) | \
+	SSMSK(D0I3_MASK, PMU_SEC_TAPC_LSS_44-32) | \
+	SSMSK(D0I3_MASK, PMU_RTC_LSS_45-32) | \
+	SSMSK(D0I3_MASK, PMU_GPI_LSS_46-32) | \
+	SSMSK(D0I3_MASK, PMU_BCU_LSS_47-32))
+
+#define IGNORE_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_IOSF_OCP_BRG_LSS_53-48) | \
+	SSMSK(D0I3_MASK, PMU_MSIC_RESET_LSS_55-48) | \
+	SSMSK(D0I3_MASK, PMU_SOC_FUSE_LSS_56-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD3_LSS_57-48) | \
+	SSMSK(D0I3_MASK, PMU_SSP4_LSS_58-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD5_LSS_59-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD6_LSS_60-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD7_LSS_61-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD8_LSS_62-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD9_LSS_63-48))
+
+#define IGNORE_S3_WKC0 SSWKC(PMU_AONT_LSS_02)
+#define IGNORE_S3_WKC1 SSWKC(PMU_ADC_LSS_42-32)
+
+/* FIXME:: CVT Platform gives SRAM Error if SRAM is put in D0i3 */
+#define S0I3_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_AONT_LSS_02) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0I3_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_SPI2_LSS_19-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_RAM_LSS_25-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define S0I3_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+	SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+	SSMSK(D0I3_MASK, PMU_KEYBRD_LSS_40-32) | \
+	SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define S0I3_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SLIM1_LSS_49-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SLIM2_LSS_50-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48) | \
+	SSMSK(D0I3_MASK, PMU_GP_DMA_LSS_54-48))
+
+#define S0I1_SSS0 S0I3_SSS0
+#define S0I1_SSS1 S0I3_SSS1
+#define S0I1_SSS2 S0I3_SSS2
+#define S0I1_SSS3 S0I3_SSS3
+
+#define LPMP3_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_AONT_LSS_02) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_SPI2_LSS_19-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+	SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+	SSMSK(D0I3_MASK, PMU_KEYBRD_LSS_40-32) | \
+	SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SLIM1_LSS_49-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SLIM2_LSS_50-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48) | \
+	SSMSK(D0I3_MASK, PMU_GP_DMA_LSS_54-48))
+
+extern void pmu_set_s0ix_possible(int state);
+extern void log_wakeup_irq(void);
+extern void s0ix_complete(void);
+extern int mdfld_clv_nc_set_power_state(int, int, int, int *);
+
+#endif
diff --git a/arch/x86/platform/intel-mid/intel_soc_debug.c b/arch/x86/platform/intel-mid/intel_soc_debug.c
new file mode 100644
index 0000000..d824e59
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_debug.c
@@ -0,0 +1,202 @@
+/*
+ * intel_soc_debug.c - This driver provides utility debug api's
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_soc_debug.h>
+
+/* This module currently only supports Intel Tangier
+ * and Anniedale SOCs (CONFIG_INTEL_DEBUG_FEATURE will
+ * only be set in i386_mrfl_defconfig and i386_moor_defconfig).
+ * In addition, a platform check is done in soc_debug_init()
+ * to make sure that this module is only used by appropriate
+ * platforms.
+ */
+#define PGRR_BASE           0xff03a0bc
+#define MAX_MODE_NUMBER     9
+#define MAX_DEBUG_NUMBER    5
+
+static struct dentry *dfs_entry;
+
+enum pgrr_mode {
+	manufacturing_mode = 0x0F,
+	production_mode = 0x07,
+	intel_production_mode = 0x04,
+	oem_production_mode = 0x05,
+	gfx_production_mode = 0x0E,
+	end_user_mode = 0x0B,
+	intel_end_user_mode = 0x08,
+	rma_mode = 0x03,
+	permanent_mode = 0x00
+};
+
+static struct debug_mode {
+	enum pgrr_mode mode;
+	u32 bitmask;
+	char *name;
+} asset_array[] = {
+	{ manufacturing_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "ManufacturingMode",
+	},
+	{ production_mode,
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "ProductionMode",
+	},
+	{ intel_production_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "IntelProductionMode",
+	},
+	{ oem_production_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "OemProductionMode",
+	},
+	{ gfx_production_mode,
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "GfxProductionMode",
+	},
+	{ intel_end_user_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "IntelEndUserMode",
+	},
+	{ end_user_mode,
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "EndUserMode",
+	},
+	{ rma_mode,
+	  DEBUG_FEATURE_PTI | DEBUG_FEATURE_RTIT | DEBUG_FEATURE_USB3DFX |
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "RmaMode",
+	},
+	{ permanent_mode,
+	  DEBUG_FEATURE_SOCHAPS | DEBUG_FEATURE_LAKEMORE,
+	  "PermanentMode",
+	}
+};
+
+static int debug_mode_idx; /* index in asset_array */
+
+static struct debug_feature {
+	u32 bit;
+	char *name;
+} debug_feature_array[] = {
+	{ DEBUG_FEATURE_PTI,
+	  "PTI",
+	},
+	{ DEBUG_FEATURE_RTIT,
+	  "RTIT",
+	},
+	{ DEBUG_FEATURE_LAKEMORE,
+	  "LAKERMORE",
+	},
+	{ DEBUG_FEATURE_SOCHAPS,
+	  "SOCHAPS",
+	},
+	{ DEBUG_FEATURE_USB3DFX,
+	  "USB3DFX",
+	},
+};
+
+int cpu_has_debug_feature(u32 bit)
+{
+	if (asset_array[debug_mode_idx].bitmask & bit)
+		return 1;
+
+	return  0;
+}
+EXPORT_SYMBOL(cpu_has_debug_feature);
+
+static int show_debug_feature(struct seq_file *s, void *unused)
+{
+	int i = 0;
+
+	if (debug_mode_idx >= 0 && (debug_mode_idx < MAX_MODE_NUMBER)) {
+		seq_printf(s, "Profile: %s\n",
+			   asset_array[debug_mode_idx].name);
+
+		for (i = 0; i < MAX_DEBUG_NUMBER; i++)
+			if (cpu_has_debug_feature(debug_feature_array[i].bit))
+				seq_printf(s, "%s\n",
+					   debug_feature_array[i].name);
+	}
+
+	return 0;
+}
+
+static int debug_feature_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_debug_feature, NULL);
+}
+
+static const struct file_operations debug_feature_ops = {
+	.open		= debug_feature_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+int __init soc_debug_init(void)
+{
+	u32 __iomem *pgrr;
+	int i = 0;
+	enum pgrr_mode soc_debug_setting = 0;
+
+	if ((intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) &&
+	    (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_ANNIEDALE))
+		return -EINVAL;
+
+	/* Read Policy Generator Result Register */
+	pgrr = ioremap_nocache(PGRR_BASE, sizeof(u32));
+	if (pgrr == NULL)
+		return -EFAULT;
+
+	pr_info("pgrr = %08x\n", *pgrr);
+	soc_debug_setting = *pgrr & 0x0F;
+	iounmap(pgrr);
+
+	for (i = 0; i < MAX_MODE_NUMBER; i++)
+		if (asset_array[i].mode == soc_debug_setting)
+			break;
+
+	if (i == MAX_MODE_NUMBER)
+		return -EFAULT;
+
+	debug_mode_idx = i;
+
+	dfs_entry = debugfs_create_file("debug_feature", S_IFREG | S_IRUGO,
+					NULL, NULL, &debug_feature_ops);
+
+	return 0;
+}
+arch_initcall(soc_debug_init);
+
+void __exit soc_debug_exit(void)
+{
+	debugfs_remove(dfs_entry);
+}
+module_exit(soc_debug_exit);
diff --git a/arch/x86/platform/intel-mid/intel_soc_dump.c b/arch/x86/platform/intel-mid/intel_soc_dump.c
new file mode 100644
index 0000000..2441b1c
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_dump.c
@@ -0,0 +1,1586 @@
+/*
+ * intel_soc_dump.c - This driver provides a debugfs interface to read or
+ * write any registers inside the SoC. Supported access methods are:
+ * mmio, msg_bus, pci and i2c.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ * Author: Bin Gao <bin.gao@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Two files are created in debugfs root folder: dump_cmd and dump_output.
+ * Echo a dump command to the file dump_cmd, and then cat the file dump_output.
+ * Even for write command, you still have to run "cat dump_output", otherwise
+ * the data will not be really written.
+ *
+ * It works like this:
+ * $ echo "dump command" > dump_cmd
+ * $ cat dump_output
+ *
+ * I/O memory read: echo "r[1|2|4] mmio <addr> [<len>]" > dump_cmd
+ *     e.g.  echo "r mmio 0xff180000" > dump_cmd
+ *
+ * I/O memory write: echo "w[1|2|4] <addr> <val>" > dump_cmd
+ *     e.g.  echo "w mmio 0xff190000 0xf0107a08" > dump_cmd
+ *
+ * I/O port read: echo "r[1|2|4] port <port>" > dump_cmd
+ *     e.g.  echo "r port 0xcf8" > dump_cmd
+ *
+ * I/O port write: echo "w[1|2|4] <port> <val>" > dump_cmd
+ *     e.g.  echo "w4 port 0xcfc 0x80002188" > dump_cmd
+ *
+ * message bus read: echo "r msg_bus <port> <addr> [<len>]" > dump_cmd
+ *     e.g.  echo "r msg_bus 0x02 0x30" > dump_cmd
+ *
+ * message bus write: echo "w msg_bus <port> <addr> <val>" > dump_cmd
+ *     e.g.  echo "w msg_bus 0x02 0x30 0x1020003f" > dump_cmd
+ *
+ * pci config read: echo "r[1|2|4] pci <bus> <dev> <func> <reg> [<len>]" >
+ * dump_cmd
+ *     e.g.  echo "r1 pci 0 2 0 0x20" > dump_cmd
+ *
+ * pci config write: echo "w[1|2|4] pci <bus> <dev> <func> <reg> <value>" >
+ * dump_cmd
+ *     e.g.  echo "w pci 0 2 0 0x20 0x380020f3" > dump_cmd
+ *
+ * msr read: echo "r[4|8]  msr [<cpu>|all] <reg>" > dump_cmd
+ * read cab be 32bit(r4) or 64bit(r8), default is r8 (=r)
+ * cpu can be 0, 1, 2, 3, ... or all, default is all
+ *     e.g.  echo "r msr 0 0xcd" > dump_cmd
+ *     (read all cpu's msr reg 0xcd in 64bit mode)
+ *
+ * msr write: echo "w[4|8] msr [<cpu>|all] <reg> <val>" > dump_cmd
+ * write cab be 32bit(w4) or 64bit(w8), default is w8 (=w)
+ * cpu can be 0, 1, 2, 3, ... or all, default is all
+ *     e.g.  echo "w msr 1 289 0xf03090a0cc73be64" > dump_cmd
+ *     (write value 0xf03090a0cc73be64 to cpu 1's msr reg 289 in 64bit mode)
+ *
+ * i2c read:  echo "r i2c <bus> <addr>" > dump_cmd
+ *     e.g.  echo "r i2c 1 0x3e" > dump_cmd
+ *
+ * i2c write: echo "w i2c <bus> <addr> <val>" > dump_cmd
+ *      e.g.  echo "w i2c 2 0x70 0x0f" > dump_cmd
+ *
+ * SCU indirect memory read: echo "r[4] scu <addr>" > dump_cmd
+ *     e.g.  echo "r scu 0xff108194" > dump_cmd
+ *
+ * SCU indirect memory write: echo "w[4] scu <addr> <val>" > dump_cmd
+ *     e.g.  echo "w scu 0xff108194 0x03000001" > dump_cmd
+ *
+ *  SCU indirect read/write is limited to those addresses in
+ *  IndRdWrValidAddrRange array in SCU FW.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <asm/uaccess.h>
+#include <asm/intel-mid.h>
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#define MAX_CMDLEN		96
+#define MAX_ERRLEN		255
+#define MIN_ARGS_NUM		3
+#define MAX_ARGS_NUM		8
+#define MAX_MMIO_PCI_LEN	4096
+#define MAX_MSG_BUS_LEN		64
+
+#define ACCESS_WIDTH_DEFAULT	0
+#define ACCESS_WIDTH_8BIT	1
+#define ACCESS_WIDTH_16BIT	2
+#define ACCESS_WIDTH_32BIT	4
+#define ACCESS_WIDTH_64BIT	8
+
+#define ACCESS_BUS_MMIO		1 /* I/O memory */
+#define ACCESS_BUS_PORT		2 /* I/O port */
+#define ACCESS_BUS_MSG_BUS	3 /* message bus */
+#define ACCESS_BUS_PCI		4 /* PCI bus */
+#define ACCESS_BUS_MSR		5 /* MSR registers */
+#define ACCESS_BUS_I2C		6 /* I2C bus */
+#define ACCESS_BUS_SCU_INDRW	7 /* SCU indirect read/write */
+
+#define ACCESS_DIR_READ		1
+#define ACCESS_DIR_WRITE	2
+
+#define RP_INDIRECT_READ	0x02 /* MSG_ID for indirect read via SCU */
+#define RP_INDIRECT_WRITE	0x05 /* MSG_ID for indirect write via SCU */
+
+#define SHOW_NUM_PER_LINE	(32 / access_width)
+#define LINE_WIDTH		(access_width * SHOW_NUM_PER_LINE)
+#define IS_WHITESPACE(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
+#define ADDR_RANGE(start, size, addr) \
+	((addr >= start) && (addr < (start + size)))
+
+/* mmio <--> device map */
+struct mmio_pci_map {
+	u32 start;
+	size_t size;
+	u32 pci_bus:8;
+	u32 pci_dev:8;
+	u32 pci_func:8;
+	char name[24];
+};
+
+static struct dentry *dump_cmd_dentry, *dump_output_dentry;
+static int dump_cmd_was_set;
+static char dump_cmd_buf[MAX_CMDLEN], err_buf[MAX_ERRLEN + 1];
+
+static int access_dir, access_width, access_bus, access_len;
+static u32 access_value;
+static u64 access_value_64;
+
+/* I/O memory */
+static u32 mmio_addr;
+
+/* I/O port */
+static unsigned port_addr;
+
+/* msg_bus */
+static u8 msg_bus_port;
+static u32 msg_bus_addr;
+
+/* pci */
+static u8 pci_bus, pci_dev, pci_func;
+static u16 pci_reg;
+
+/* msr */
+static int msr_cpu;
+static u32 msr_reg;
+
+/* i2c */
+static u8 i2c_bus;
+static u32 i2c_addr;
+
+/* scu */
+static u32 scu_addr;
+
+static const struct mmio_pci_map soc_pnw_map[] = {
+	{ 0xff128000, 0x400, 0, 0, 1, "SPI0" },
+	{ 0xff128400, 0x400, 0, 0, 2, "SPI1" },
+	{ 0xff128800, 0x400, 0, 2, 4, "SPI2" },
+
+	{ 0xff12a000, 0x400, 0, 0, 3, "I2C0" },
+	{ 0xff12a400, 0x400, 0, 0, 4, "I2C1" },
+	{ 0xff12a800, 0x400, 0, 0, 5, "I2C2" },
+	{ 0xff12ac00, 0x400, 0, 3, 2, "I2C3" },
+	{ 0xff12b000, 0x400, 0, 3, 3, "I2C4" },
+	{ 0xff12b400, 0x400, 0, 3, 4, "I2C5" },
+
+	{ 0xffae5800, 0x400, 0, 2, 7, "SSP0" },
+	{ 0xffae6000, 0x400, 0, 1, 4, "SSP1" },
+	{ 0xffae6400, 0x400, 0, 1, 3, "SSP2" },
+	{ 0xffaf0000, 0x800, 0, 2, 6, "LPE DMA1" },
+
+	{ 0xff0d0000, 0x10000, 0, 1, 5, "SEP SECURITY" },
+	{ 0xff11c000, 0x400, 0, 1, 7, "SCU IPC1" },
+
+	{ 0xdff00000, 0x100000, 0, 2, 0, "GVD BAR0" },
+	{ 0x40000000, 0x10000000, 0, 2, 0, "GVD BAR2" },
+	{ 0xdfec0000, 0x40000, 0, 2, 0, "GVD BAR3" },
+
+	{ 0xff11d000, 0x1000, 0, 2, 2, "PMU" },
+	{ 0xffa60000, 0x20000, 0, 2, 3, "USB OTG" },
+
+	{ 0xdf800000, 0x400000, 0, 3, 0, "ISP" },
+
+	{ 0xff12c000, 0x800, 0, 2, 1, "GPIO0" },
+	{ 0xff12c800, 0x800, 0, 3, 5, "GPIO1" },
+	{ 0xff12b800, 0x800, 0, 2, 5, "GP DMA" },
+
+	{ 0xffa58000, 0x100, 0, 4, 0, "SDIO0(HC2)" },
+	{ 0xffa5c000, 0x100, 0, 4, 1, "SDIO1(HC1a)" },
+	{ 0xffa2a000, 0x100, 0, 4, 2, "SDIO3(HC1b)" },
+	{ 0xffa50000, 0x100, 0, 1, 0, "SDIO3/eMMC0(HC0a)" },
+	{ 0xffa54000, 0x100, 0, 1, 1, "SDIO4/eMMC1(HC0b)" },
+
+	{ 0xffa28080, 0x80, 0, 5, 0, "UART0" },
+	{ 0xffa28100, 0x80, 0, 5, 1, "UART1" },
+	{ 0xffa28180, 0x80, 0, 5, 2, "UART2" },
+	{ 0xffa28400, 0x400, 0, 5, 3, "UART DMA" },
+
+	{ 0xffa2e000, 0x400, 0, 6, 0, "PTI" },
+
+	/* no address assigned:	{ 0x0, 0, 0, 6, 1, "xx" }, */
+
+	{ 0xffa29000, 0x800, 0, 6, 3, "HSI" },
+	{ 0xffa29800, 0x800, 0, 6, 4, "HSI DMA" },
+};
+
+static const struct mmio_pci_map soc_clv_map[] = {
+	{ 0xff138000, 0x400, 0, 0, 3, "I2C0" },
+	{ 0xff139000, 0x400, 0, 0, 4, "I2C1" },
+	{ 0xff13a000, 0x400, 0, 0, 5, "I2C2" },
+	{ 0xff13b000, 0x400, 0, 3, 2, "I2C3" },
+	{ 0xff13c000, 0x400, 0, 3, 3, "I2C4" },
+	{ 0xff13d000, 0x400, 0, 3, 4, "I2C5" },
+
+	{ 0xff128000, 0x400, 0, 0, 1, "SPI0/MSIC" },
+	{ 0xff135000, 0x400, 0, 0, 2, "SPI1" },
+	{ 0xff136000, 0x400, 0, 2, 4, "SPI2" },
+	/* invisible to IA: { 0xff137000, 0, -1, -1, -1, "SPI3" }, */
+
+	{ 0xffa58000, 0x100, 0, 4, 0, "SDIO0 (HC2)" },
+	{ 0xffa48000, 0x100, 0, 4, 1, "SDIO1 (HC1a)" },
+	{ 0xffa4c000, 0x100, 0, 4, 2, "SDIO2 (HC1b)" },
+	{ 0xffa50000, 0x100, 0, 1, 0, "SDIO3/eMMC0 (HC0a)" },
+	{ 0xffa54000, 0x100, 0, 1, 1, "SDIO4/eMMC1 (HC0b)" },
+
+	{ 0xff119000, 0x800, 0, 2, 1, "GPIO0" },
+	{ 0xff13f000, 0x800, 0, 3, 5, "GPIO1" },
+	{ 0xff13e000, 0x800, 0, 2, 5, "GP DMA" },
+
+	{ 0xffa20000, 0x400, 0, 2, 7, "SSP0" },
+	{ 0xffa21000, 0x400, 0, 1, 4, "SSP1" },
+	{ 0xffa22000, 0x400, 0, 1, 3, "SSP2" },
+	/* invisible to IA: { 0xffa23000, 0, -1, -1, -1, "SSP3" }, */
+
+	/* invisible to IA: { 0xffaf8000, 0, -1, -1, -1, "LPE DMA0" }, */
+	{ 0xffaf0000, 0x800, 0, 2, 6, "LPE DMA1" },
+	{ 0xffae8000, 0x1000, 0, 1, 3, "LPE SHIM" },
+	/* { 0xffae9000, 0, 0, 6, 5, "VIBRA" }, LPE SHIM BASE + 0x1000 */
+
+	{ 0xffa28080, 0x80, 0, 5, 0, "UART0" },
+	{ 0xffa28100, 0x80, 0, 5, 1, "UART1" },
+	{ 0xffa28180, 0x80, 0, 5, 2, "UART2" },
+	{ 0xffa28400, 0x400, 0, 5, 3, "UART DMA" },
+
+	{ 0xffa29000, 0x800, 0, 6, 3, "HSI" },
+	{ 0xffa2a000, 0x800, 0, 6, 4, "HSI DMA" },
+
+	{ 0xffa60000, 0x20000, 0, 2, 3, "USB OTG" },
+	{ 0xffa80000, 0x60000, 0, 6, 5, "USB SPH" },
+
+	{ 0xff0d0000, 0x10000, 0, 1, 5, "SEP SECURITY" },
+
+	{ 0xdff00000, 0x100000, 0, 2, 0, "GVD BAR0" },
+	{ 0x40000000, 0x10000000, 0, 2, 0, "GVD BAR2" },
+	{ 0xdfec0000, 0x40000, 0, 2, 0, "GVD BAR3" },
+	/* No address assigned: { 0x0, 0, 0, 6, 1, "HDMI HOTPLUG" }, */
+
+	{ 0xdf800000, 0x400000, 0, 3, 0, "ISP" },
+
+	{ 0xffa2e000, 0x400, 0, 6, 0, "PTI" },
+	{ 0xff11c000, 0x400, 0, 1, 7, "SCU IPC1" },
+	{ 0xff11d000, 0x1000, 0, 2, 2, "PMU" },
+};
+
+static const struct mmio_pci_map soc_tng_map[] = {
+	/* I2C0 is reserved for SCU<-->PMIC communication */
+	{ 0xff18b000, 0x400, 0, 8, 0, "I2C1" },
+	{ 0xff18c000, 0x400, 0, 8, 1, "I2C2" },
+	{ 0xff18d000, 0x400, 0, 8, 2, "I2C3" },
+	{ 0xff18e000, 0x400, 0, 8, 3, "I2C4" },
+	{ 0xff18f000, 0x400, 0, 9, 0, "I2C5" },
+	{ 0xff190000, 0x400, 0, 9, 1, "I2C6" },
+	{ 0xff191000, 0x400, 0, 9, 2, "I2C7" },
+
+	/* SDIO controllers number: 4 (compared to 5 of PNW/CLV) */
+	{ 0xff3fa000, 0x100, 0, 1, 2, "SDIO0 (HC2)" },
+	{ 0xff3fb000, 0x100, 0, 1, 3, "SDIO1 (HC1a)" },
+	{ 0xff3fc000, 0x100, 0, 1, 0, "SDIO3/eMMC0 (HC0a)" },
+	{ 0xff3fd000, 0x100, 0, 1, 1, "SDIO4/eMMC1 (HC0b)" },
+
+	/* GPIO0 and GPIO1 are merged to one GPIO controller in TNG */
+	{ 0xff008000, 0x1000, 0, 12, 0, "GPIO" },
+	{ 0xff192000, 0x1000, 0, 21, 0, "GP DMA" },
+
+	/* SSP Audio: SSP0: Modem, SSP1: Audio Codec, SSP2: Bluetooth */
+
+	/* LPE */
+	{ 0xff340000, 0x4000, 0, 13, 0, "LPE SHIM" },
+	{ 0xff344000, 0x1000, 0, 13, 0, "MAILBOX RAM" },
+	{ 0xff2c0000, 0x14000, 0, 13, 0, "ICCM" },
+	{ 0xff300000, 0x28000, 0, 13, 0, "DCCM" },
+	{ 0xff298000, 0x4000, 0, 14, 0, "LPE DMA0" },
+	/* invisible to IA: { 0xff29c000, 0x4000, -1, -1, -1, "LPE DMA1" }, */
+
+
+	/* SSP SC: SSP4: used by SCU for SPI Debug Card */
+	/* invisible to IA: { 0xff00e000, 0x1000, -1, -1, -1, "SSP SC" }, */
+
+	/* SSP General Purpose */
+	{ 0xff188000, 0x1000, 0, 7, 0, "SSP3" },
+	{ 0xff189000, 0x1000, 0, 7, 1, "SSP5" },
+	{ 0xff18a000, 0x1000, 0, 7, 2, "SSP6" },
+
+	/* UART */
+	{ 0xff010080, 0x80, 0, 4, 1, "UART0" },
+	{ 0xff011000, 0x80, 0, 4, 2, "UART1" },
+	{ 0xff011080, 0x80, 0, 4, 3, "UART2" },
+	{ 0xff011400, 0x400, 0, 5, 0, "UART DMA" },
+
+	/* HSI */
+	{ 0xff3f8000, 0x1000, 0, 10, 0, "HSI" },
+
+	/* USB */
+	{ 0xf9040000, 0x20000, 0, 15, 0, "USB2 OTG" },
+	{ 0xf9060000, 0x20000, 0, 16, 0, "USB2 MPH/HSIC" },
+	{ 0xf9100000, 0x100000, 0, 17, 0, "USB3 OTG" },
+	/* { 0xf90f0000, 0x1000, -1, -1, -1, "USB3 PHY" }, */
+	/* { 0xf90a0000, 0x10000, -1, -1, -1, "USB3 DMA FETCH" }, */
+
+	/* Security/Chaabi */
+	{ 0xf9030000, 0x1000, 0, 11, 0, "SEP SECURITY" },
+
+	/* Graphics/Display */
+	{ 0xc0000000, 0x2000000, 0, 2, 0, "GVD BAR0" },
+	{ 0x80000000, 0x10000000, 0, 2, 0, "GVD BAR2" },
+
+	/* ISP */
+	{ 0xc2000000, 0x400000, 0, 3, 0, "ISP" },
+
+	/* PTI */
+	{ 0xf9009000, 0x1000, 0, 18, 0, "PTI STM" },
+	{ 0xf90a0000, 0x10000, 0, 18, 0, "PTI USB3 DMA FETCH" },
+	{ 0xfa000000, 0x1000000, 0, 18, 0, "PTI APERTURE A" },
+
+	{ 0xff009000, 0x1000, 0, 19, 0, "SCU-IA IPC" },
+	{ 0xff00b000, 0x1000, 0, 20, 0, "PMU" },
+};
+
+static struct pci_dev *mmio_to_pci(u32 addr, char **name)
+{
+	int i, count;
+	struct mmio_pci_map *map;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL) {
+		count = ARRAY_SIZE(soc_pnw_map);
+		map = (struct mmio_pci_map *) &soc_pnw_map[0];
+	} else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		count = ARRAY_SIZE(soc_clv_map);
+		map = (struct mmio_pci_map *) &soc_clv_map[0];
+	} else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		count = ARRAY_SIZE(soc_tng_map);
+		map = (struct mmio_pci_map *) &soc_tng_map[0];
+	} else {
+		return NULL;
+	}
+
+	for (i = 0; i < count; i++) {
+		if (ADDR_RANGE(map[i].start, map[i].size, addr))
+			break;
+	}
+
+	if (i >= count)
+		return NULL;
+
+	*name = &map[i].name[0];
+	return pci_get_bus_and_slot(map[i].pci_bus,
+		PCI_DEVFN(map[i].pci_dev, map[i].pci_func));
+}
+
+static int parse_argument(char *input, char **args)
+{
+	int count, located;
+	char *p = input;
+	int input_len = strlen(input);
+
+	count = 0;
+	located = 0;
+	while (*p != 0) {
+		if (p - input >= input_len)
+			break;
+
+		/* Locate the first character of a argument */
+		if (!IS_WHITESPACE(*p)) {
+			if (!located) {
+				located = 1;
+				args[count++] = p;
+				if (count > MAX_ARGS_NUM)
+					break;
+			}
+		} else {
+			if (located) {
+				*p = 0;
+				located = 0;
+			}
+		}
+		p++;
+	}
+
+	return count;
+}
+
+static int dump_cmd_show(struct seq_file *s, void *unused)
+{
+	seq_printf(s, dump_cmd_buf);
+	return 0;
+}
+
+static int dump_cmd_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dump_cmd_show, NULL);
+}
+
+static int parse_mmio_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (arg_num < 3) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r[1|2|4] <mmio> <addr> [<len>]\n"
+			"       w[1|2|4] <mmio> <addr> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_32BIT;
+
+	ret = kstrtou32(arg_list[2], 0, &mmio_addr);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid mmio address %s\n",
+							 arg_list[2]);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_32BIT) &&
+		(mmio_addr % 4)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"addr %x is not 4 bytes aligned!\n",
+						mmio_addr);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_16BIT) &&
+		(mmio_addr % 2)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"addr %x is not 2 bytes aligned!\n",
+						mmio_addr);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num == 4) {
+			ret = kstrtou32(arg_list[3], 0, &access_len);
+			if (ret) {
+				snprintf(err_buf, MAX_ERRLEN,
+					"invalid mmio read length %s\n",
+							arg_list[3]);
+				goto failed;
+			}
+		} else if (arg_num > 4) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"usage: r[1|2|4] mmio <addr> "
+						"[<len>]\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 4) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"need exact 4 arguments for "
+					"mmio write.\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[3], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid mmio address %s\n",
+						arg_list[3]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_port_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (arg_num < 2) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r[1|2|4] port <port>\n"
+			"       w[1|2|4] port <port> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_8BIT;
+
+	ret = kstrtou16(arg_list[2], 0, (u16 *)&port_addr);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid port address %s\n",
+							 arg_list[2]);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_32BIT) &&
+		(port_addr % ACCESS_WIDTH_32BIT)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"port %x is not 4 bytes aligned!\n", port_addr);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_16BIT) &&
+		(port_addr % ACCESS_WIDTH_16BIT)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"port %x is not 2 bytes aligned!\n", port_addr);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num != 3) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"usage: r[1|2|4] port <port>\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 4) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"need exact 4 arguments for port write.\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[3], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid value %s\n", arg_list[3]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_msg_bus_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (arg_num < 4) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r msg_bus <port> <addr> [<len>]\n"
+			"       w msg_bus <port> <addr> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_32BIT;
+
+	if (access_width != ACCESS_WIDTH_32BIT) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"only 32bit read/write are supported.\n");
+		goto failed;
+	}
+
+	ret = kstrtou8(arg_list[2], 0, &msg_bus_port);
+	if (ret || msg_bus_port > 255) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid msg_bus port %s\n",
+								arg_list[2]);
+		goto failed;
+	}
+
+	ret = kstrtou32(arg_list[3], 0, &msg_bus_addr);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid msg_bus address %s\n",
+								arg_list[3]);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num == 5) {
+			ret = kstrtou32(arg_list[4], 0, &access_len);
+			if (ret) {
+				snprintf(err_buf, MAX_ERRLEN,
+					"invalid msg_bus read length %s\n",
+								arg_list[4]);
+				goto failed;
+			}
+		} else if (arg_num > 5) {
+			snprintf(err_buf, MAX_ERRLEN, "too many arguments\n"
+						"usage: r[1|2|4] msg_bus "
+						"<port> <addr> [<len>]\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 5) {
+			snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+				"usage: w msg_bus <port> <addr> <val>]\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[4], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid value for msg_bus write %s\n",
+							 arg_list[4]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_pci_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (arg_num < 6) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r[1|2|4] pci <bus> <dev> <func> <reg> [<len>]\n"
+			"       w[1|2|4] pci <bus> <dev> <func> <reg> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_32BIT;
+
+	ret = kstrtou8(arg_list[2], 0, &pci_bus);
+	if (ret || pci_bus > 255) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid pci bus %s\n",
+							arg_list[2]);
+		goto failed;
+	}
+
+	ret = kstrtou8(arg_list[3], 0, &pci_dev);
+	if (ret || pci_dev > 255) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid pci device %s\n",
+							arg_list[3]);
+		goto failed;
+	}
+
+	ret = kstrtou8(arg_list[4], 0, &pci_func);
+	if (ret || pci_func > 255) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid pci function %s\n",
+							arg_list[4]);
+		goto failed;
+	}
+
+	ret = kstrtou16(arg_list[5], 0, &pci_reg);
+	if (ret || pci_reg > 4 * 1024) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid pci register %s\n",
+							arg_list[5]);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_32BIT) && (pci_reg % 4)) {
+		snprintf(err_buf, MAX_ERRLEN, "reg %x is not 4 bytes aligned!\n"
+							 , (u32) pci_reg);
+		goto failed;
+	}
+
+	if ((access_width == ACCESS_WIDTH_16BIT) && (pci_reg % 2)) {
+		snprintf(err_buf, MAX_ERRLEN, "reg %x is not 2 bytes aligned\n",
+								pci_reg);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num == 7) {
+			ret = kstrtou32(arg_list[6], 0, &access_len);
+			if (ret || access_len > 4 * 1024) {
+				snprintf(err_buf, MAX_ERRLEN,
+					"invalid pci read length %s\n",
+							arg_list[6]);
+				return ret;
+			}
+		} else if (arg_num > 7) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"max 7 args are allowed for pci read\n"
+				"usage: r[1|2|4] pci <bus> <dev> <func> "
+							"<reg> [<len>]\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 7) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"need exact 7 args for pci write.\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[6], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid value for pci write %s\n",
+							 arg_list[6]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_msr_args(char **arg_list, int arg_num)
+{
+	int ret, arg_reg, arg_val;
+
+	if (((access_dir == ACCESS_DIR_READ) && (arg_num < 3)) ||
+		((access_dir == ACCESS_DIR_WRITE) && (arg_num < 4))) {
+		snprintf(err_buf, MAX_ERRLEN, "too few arguments\n"
+			"usage: r[4|8] msr [<cpu> | all] <reg>]\n"
+			"       w[4|8] msr [<cpu> | all] <reg> <val>]\n");
+		goto failed;
+	}
+
+	if (((access_dir == ACCESS_DIR_READ) && (arg_num > 4)) ||
+		((access_dir == ACCESS_DIR_WRITE) && (arg_num > 5))) {
+		snprintf(err_buf, MAX_ERRLEN, "too many arguments\n"
+			"usage: r[4|8] msr [<cpu> | all] <reg>]\n"
+			"       w[4|8] msr [<cpu> | all] <reg> <val>]\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_64BIT;
+
+	if (!strncmp(arg_list[2], "all", 3)) {
+		msr_cpu = -1;
+		arg_reg = 3;
+		arg_val = 4;
+	} else if ((access_dir == ACCESS_DIR_READ && arg_num == 4) ||
+		(access_dir == ACCESS_DIR_WRITE && arg_num == 5)) {
+		ret = kstrtou32(arg_list[2], 0, &msr_cpu);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN, "invalid cpu: %s\n",
+							arg_list[2]);
+			goto failed;
+		}
+		arg_reg = 3;
+		arg_val = 4;
+	} else {
+		/* Default cpu for msr read is all, for msr write is 0 */
+		if (access_dir == ACCESS_DIR_READ)
+			msr_cpu = -1;
+		else
+			msr_cpu = 0;
+		arg_reg = 2;
+		arg_val = 3;
+	}
+
+
+	ret = kstrtou32(arg_list[arg_reg], 0, &msr_reg);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid msr reg: %s\n",
+							arg_list[2]);
+		goto failed;
+	}
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (access_width == ACCESS_WIDTH_32BIT)
+			ret = kstrtou32(arg_list[arg_val], 0, &access_value);
+		else
+			ret = kstrtou64(arg_list[arg_val], 0, &access_value_64);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN, "invalid value: %s\n",
+							arg_list[arg_val]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_i2c_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if ((access_dir == ACCESS_DIR_READ && arg_num != 4) ||
+		(access_dir == ACCESS_DIR_WRITE && arg_num != 5)) {
+		snprintf(err_buf, MAX_ERRLEN, "usage: r i2c <bus> <addr>\n"
+			"       w i2c <bus> <addr> <val>\n");
+		goto failed;
+	}
+
+	if (access_width == ACCESS_WIDTH_DEFAULT)
+		access_width = ACCESS_WIDTH_8BIT;
+
+	if (access_width != ACCESS_WIDTH_8BIT) {
+		snprintf(err_buf, MAX_ERRLEN, "only 8bit access is allowed\n");
+		goto failed;
+	}
+
+	ret = kstrtou8(arg_list[2], 0, &i2c_bus);
+	if (ret || i2c_bus > 9) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid i2c bus %s\n",
+							arg_list[2]);
+		goto failed;
+	}
+
+	ret = kstrtou32(arg_list[3], 0, &i2c_addr);
+
+	pr_err("ret = %d, i2c_addr is 0x%x\n", ret, i2c_addr);
+	if (ret || (i2c_addr > 1024)) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid i2c address %s\n",
+							arg_list[3]);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		ret = kstrtou32(arg_list[4], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid value for i2c write %s\n",
+							 arg_list[4]);
+			goto failed;
+		}
+	}
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static int parse_scu_args(char **arg_list, int arg_num)
+{
+	int ret;
+
+	if (access_width != ACCESS_WIDTH_32BIT)
+		access_width = ACCESS_WIDTH_32BIT;
+
+	ret = kstrtou32(arg_list[2], 0, &scu_addr);
+	if (ret) {
+		snprintf(err_buf, MAX_ERRLEN, "invalid scu address %s\n",
+							arg_list[2]);
+		goto failed;
+	}
+
+	if (scu_addr % 4) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"addr %x is not 4 bytes aligned!\n",
+						scu_addr);
+		goto failed;
+	}
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (arg_num != 3) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"usage: r[4] scu <addr>\n");
+			goto failed;
+		}
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		if (arg_num != 4) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"usage: w[4] scu <addr> <val>\n");
+			goto failed;
+		}
+		ret = kstrtou32(arg_list[3], 0, &access_value);
+		if (ret) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"invalid scu write value %s\n",
+						arg_list[3]);
+			goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	return -EINVAL;
+}
+
+static ssize_t dump_cmd_write(struct file *file, const char __user *buf,
+				size_t len, loff_t *offset)
+{
+	char cmd[MAX_CMDLEN];
+	char *arg_list[MAX_ARGS_NUM];
+	int arg_num, ret = -EINVAL;
+
+	err_buf[0] = 0;
+
+	if (len >= MAX_CMDLEN) {
+		snprintf(err_buf, MAX_ERRLEN, "input command is too long.\n"
+					"max allowed input length is %d\n",
+							MAX_CMDLEN);
+		goto done;
+	}
+
+	if (copy_from_user(cmd, buf, len)) {
+		snprintf(err_buf, MAX_ERRLEN, "copy_from_user() failed.\n");
+		goto done;
+	}
+	cmd[len] = 0;
+
+	dump_cmd_buf[0] = 0;
+	strncpy(dump_cmd_buf, cmd, len);
+	dump_cmd_buf[len] = 0;
+
+	arg_num = parse_argument(cmd, arg_list);
+	if (arg_num < MIN_ARGS_NUM) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"invalid command(too few arguments): "
+					"%s\n", dump_cmd_buf);
+		goto done;
+	}
+	if (arg_num > MAX_ARGS_NUM) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"invalid command(too many arguments): "
+					"%s\n", dump_cmd_buf);
+		goto done;
+	}
+
+	/* arg 1: direction(read/write) and mode (8/16/32/64 bit) */
+	if (!strncmp(arg_list[0], "r8", 2)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_64BIT;
+	} else if (!strncmp(arg_list[0], "r4", 2)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_32BIT;
+	} else if (!strncmp(arg_list[0], "r2", 2)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_16BIT;
+	} else if (!strncmp(arg_list[0], "r1", 2)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_8BIT;
+	} else if (!strncmp(arg_list[0], "r", 1)) {
+		access_dir = ACCESS_DIR_READ;
+		access_width = ACCESS_WIDTH_DEFAULT;
+	} else if (!strncmp(arg_list[0], "w8", 2)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_64BIT;
+	} else if (!strncmp(arg_list[0], "w4", 2)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_32BIT;
+	} else if (!strncmp(arg_list[0], "w2", 2)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_16BIT;
+	} else if (!strncmp(arg_list[0], "w1", 2)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_8BIT;
+	} else if (!strncmp(arg_list[0], "w", 1)) {
+		access_dir = ACCESS_DIR_WRITE;
+		access_width = ACCESS_WIDTH_DEFAULT;
+	} else {
+		snprintf(err_buf, MAX_ERRLEN, "unknown argument: %s\n",
+							arg_list[0]);
+		goto done;
+	}
+
+	/* arg2: bus type(mmio, msg_bus, pci or i2c) */
+	access_len = 1;
+	if (!strncmp(arg_list[1], "mmio", 4)) {
+		access_bus = ACCESS_BUS_MMIO;
+		ret = parse_mmio_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "port", 4)) {
+		access_bus = ACCESS_BUS_PORT;
+		ret = parse_port_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "msg_bus", 7)) {
+		access_bus = ACCESS_BUS_MSG_BUS;
+		ret = parse_msg_bus_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "pci", 3)) {
+		access_bus = ACCESS_BUS_PCI;
+		ret = parse_pci_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "msr", 3)) {
+		access_bus = ACCESS_BUS_MSR;
+		ret = parse_msr_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "i2c", 3)) {
+		access_bus = ACCESS_BUS_I2C;
+		ret = parse_i2c_args(arg_list, arg_num);
+	} else if (!strncmp(arg_list[1], "scu", 3)) {
+		access_bus = ACCESS_BUS_SCU_INDRW;
+		ret = parse_scu_args(arg_list, arg_num);
+	} else {
+		snprintf(err_buf, MAX_ERRLEN, "unknown argument: %s\n",
+							arg_list[1]);
+	}
+
+	if (access_len == 0) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"access length must be larger than 0\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((access_bus == ACCESS_BUS_MMIO || access_bus == ACCESS_BUS_PCI) &&
+					 (access_len > MAX_MMIO_PCI_LEN)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"%d exceeds max mmio/pci read length(%d)\n",
+					access_len, MAX_MMIO_PCI_LEN);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((access_bus == ACCESS_BUS_MSG_BUS) &&
+		(access_len > MAX_MSG_BUS_LEN)) {
+		snprintf(err_buf, MAX_ERRLEN,
+			"%d exceeds max msg_bus read length(%d)\n",
+					access_len, MAX_MSG_BUS_LEN);
+		ret = -EINVAL;
+	}
+
+	if (access_bus == ACCESS_BUS_MSR) {
+		if ((access_width != ACCESS_WIDTH_32BIT) &&
+			(access_width != ACCESS_WIDTH_64BIT) &&
+			(access_width != ACCESS_WIDTH_DEFAULT)) {
+			snprintf(err_buf, MAX_ERRLEN,
+				"only 32bit or 64bit is allowed for msr\n");
+			ret = -EINVAL;
+		}
+	}
+
+done:
+	dump_cmd_was_set = ret ? 0 : 1;
+	return ret ? ret : len;
+}
+
+static int dump_output_show_mmio(struct seq_file *s)
+{
+	void __iomem *base;
+	int i, comp1, comp2;
+	u32 start, end, end_natural;
+	struct pci_dev *pdev;
+	char *name;
+
+	pdev = mmio_to_pci(mmio_addr, &name);
+	if (pdev && pm_runtime_get_sync(&pdev->dev) < 0) {
+		seq_printf(s, "can't put device %s into D0i0 state\n", name);
+		return 0;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		base = ioremap_nocache(mmio_addr, access_width);
+		if (!base) {
+			seq_printf(s, "can't map physical address: %x\n",
+				mmio_addr);
+			if (pdev)
+				pm_runtime_put_sync(&pdev->dev);
+			return 0;
+		}
+		switch (access_width) {
+		case ACCESS_WIDTH_8BIT:
+			iowrite8((u8) access_value, base);
+			break;
+		case ACCESS_WIDTH_16BIT:
+			iowrite16((u16) access_value, base);
+			break;
+		case ACCESS_WIDTH_32BIT:
+		case ACCESS_WIDTH_DEFAULT:
+			iowrite32(access_value, base);
+			break;
+		default:
+			break; /* never happen */
+		}
+		seq_printf(s, "write succeeded\n");
+	} else {
+		start = (mmio_addr / LINE_WIDTH) * LINE_WIDTH;
+		end_natural = mmio_addr + (access_len - 1) * access_width;
+		end = (end_natural / LINE_WIDTH + 1) * LINE_WIDTH -
+						access_width;
+		comp1 = (mmio_addr - start) / access_width;
+		comp2 = (end - end_natural) / access_width;
+
+		base = ioremap_nocache(start, (comp1 + comp2 +
+			access_len) * access_width);
+		if (!base) {
+			seq_printf(s, "can't map physical address: %x\n",
+				mmio_addr);
+			if (pdev)
+				pm_runtime_put_sync(&pdev->dev);
+			return 0;
+		}
+
+		for (i = 0; i < comp1 + comp2 + access_len; i++) {
+			if ((i % SHOW_NUM_PER_LINE) == 0)
+					seq_printf(s, "[%08x]", start + i * 4);
+
+			if (i < comp1 || i >= access_len + comp1) {
+				switch (access_width) {
+				case ACCESS_WIDTH_32BIT:
+					seq_printf(s, "         ");
+					break;
+				case ACCESS_WIDTH_16BIT:
+					seq_printf(s, "     ");
+					break;
+				case ACCESS_WIDTH_8BIT:
+					seq_printf(s, "   ");
+					break;
+				}
+
+			} else {
+				switch (access_width) {
+				case ACCESS_WIDTH_32BIT:
+					seq_printf(s, " %08x",
+						ioread32(base + i * 4));
+					break;
+				case ACCESS_WIDTH_16BIT:
+					seq_printf(s, " %04x",
+						(u16) ioread16(base + i * 2));
+					break;
+				case ACCESS_WIDTH_8BIT:
+					seq_printf(s, " %02x",
+						(u8) ioread8(base + i));
+					break;
+				}
+			}
+
+			if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+				seq_printf(s, "\n");
+		}
+	}
+
+	iounmap(base);
+	if (pdev)
+		pm_runtime_put_sync(&pdev->dev);
+	return 0;
+}
+
+static int dump_output_show_port(struct seq_file *s)
+{
+	if (access_dir == ACCESS_DIR_WRITE) {
+		switch (access_width) {
+		case ACCESS_WIDTH_8BIT:
+		case ACCESS_WIDTH_DEFAULT:
+			outb((u8) access_value, port_addr);
+			break;
+		case ACCESS_WIDTH_16BIT:
+			outw((u16) access_value, port_addr);
+			break;
+		case ACCESS_WIDTH_32BIT:
+			outl(access_value, port_addr);
+			break;
+		default:
+			break; /* never happen */
+		}
+		seq_printf(s, "write succeeded\n");
+	} else {
+		switch (access_width) {
+		case ACCESS_WIDTH_32BIT:
+			seq_printf(s, " %08x\n", inl(port_addr));
+			break;
+		case ACCESS_WIDTH_16BIT:
+			seq_printf(s, " %04x\n", (u16) inw(port_addr));
+			break;
+		case ACCESS_WIDTH_8BIT:
+			seq_printf(s, " %02x\n", (u8) inb(port_addr));
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int dump_output_show_msg_bus(struct seq_file *s)
+{
+	int i, comp1, comp2;
+	u32 start, end, end_natural;
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		intel_mid_msgbus_write32(msg_bus_port,
+			msg_bus_addr, access_value);
+		seq_printf(s, "write succeeded\n");
+	} else {
+		start = (msg_bus_addr / LINE_WIDTH) * LINE_WIDTH;
+		end_natural = msg_bus_addr + (access_len - 1) * access_width;
+		end = (end_natural / LINE_WIDTH + 1) * LINE_WIDTH -
+						access_width;
+		comp1 = (msg_bus_addr - start) / access_width;
+		comp2 = (end - end_natural) / access_width;
+
+	for (i = 0; i < comp1 + comp2 + access_len; i++) {
+			if ((i % SHOW_NUM_PER_LINE) == 0)
+					seq_printf(s, "[%08x]", start + i * 4);
+
+			if (i < comp1 || i >= access_len + comp1)
+				seq_printf(s, "         ");
+
+			else
+				seq_printf(s, " %08x", intel_mid_msgbus_read32(
+					msg_bus_port, msg_bus_addr + i));
+
+			if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+				seq_printf(s, "\n");
+		}
+	}
+
+	return 0;
+}
+
+static int dump_output_show_pci(struct seq_file *s)
+{
+	int i, comp1, comp2;
+	u32 start, end, end_natural, val;
+	struct pci_dev *pdev;
+
+	pdev = pci_get_bus_and_slot(pci_bus, PCI_DEVFN(pci_dev, pci_func));
+	if (!pdev) {
+		seq_printf(s, "pci bus %d:%d:%d doesn't exist\n",
+			pci_bus, pci_dev, pci_func);
+		return 0;
+	}
+
+	if (pm_runtime_get_sync(&pdev->dev) < 0) {
+		seq_printf(s, "can't put pci device %d:%d:%d into D0i0 state\n",
+			pci_bus, pci_dev, pci_func);
+		return 0;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		switch (access_width) {
+		case ACCESS_WIDTH_8BIT:
+			pci_write_config_byte(pdev, (int)pci_reg,
+					(u8)access_value);
+			break;
+		case ACCESS_WIDTH_16BIT:
+			pci_write_config_word(pdev, (int)pci_reg,
+				(u16)access_value);
+			break;
+		case ACCESS_WIDTH_32BIT:
+		case ACCESS_WIDTH_DEFAULT:
+			pci_write_config_dword(pdev, (int)pci_reg,
+				access_value);
+			break;
+		default:
+			break; /* never happen */
+		}
+		seq_printf(s, "write succeeded\n");
+	} else {
+		start = (pci_reg / LINE_WIDTH) * LINE_WIDTH;
+		end_natural = pci_reg + (access_len - 1) * access_width;
+		end = (end_natural / LINE_WIDTH + 1) * LINE_WIDTH -
+						access_width;
+		comp1 = (pci_reg - start) / access_width;
+		comp2 = (end - end_natural) / access_width;
+
+		for (i = 0; i < comp1 + comp2 + access_len; i++) {
+			if ((i % SHOW_NUM_PER_LINE) == 0)
+					seq_printf(s, "[%08x]", start + i * 4);
+
+			if (i < comp1 || i >= access_len + comp1) {
+				switch (access_width) {
+				case ACCESS_WIDTH_32BIT:
+					seq_printf(s, "         ");
+					break;
+				case ACCESS_WIDTH_16BIT:
+					seq_printf(s, "     ");
+					break;
+				case ACCESS_WIDTH_8BIT:
+					seq_printf(s, "   ");
+					break;
+				}
+
+			} else {
+				switch (access_width) {
+				case ACCESS_WIDTH_32BIT:
+					pci_read_config_dword(pdev,
+						start + i * 4, &val);
+					seq_printf(s, " %08x", val);
+					break;
+				case ACCESS_WIDTH_16BIT:
+					pci_read_config_word(pdev,
+						start + i * 2, (u16 *) &val);
+					seq_printf(s, " %04x", (u16)val);
+					break;
+				case ACCESS_WIDTH_8BIT:
+					pci_read_config_byte(pdev,
+						start + i, (u8 *) &val);
+					seq_printf(s, " %04x", (u8)val);
+					break;
+				}
+			}
+
+			if ((i + 1) % SHOW_NUM_PER_LINE == 0)
+				seq_printf(s, "\n");
+		}
+	}
+
+	return 0;
+}
+
+static int dump_output_show_msr(struct seq_file *s)
+{
+	int ret, i, count;
+	u32 data[2];
+
+	if (access_dir == ACCESS_DIR_READ) {
+		if (msr_cpu < 0) {
+			/* loop for all cpus */
+			i = 0;
+			count = nr_cpu_ids;
+		} else if (msr_cpu >= nr_cpu_ids || msr_cpu < 0) {
+			seq_printf(s, "cpu should be between 0 - %d\n",
+							nr_cpu_ids - 1);
+			return 0;
+		} else {
+			/* loop for one cpu */
+			i = msr_cpu;
+			count = msr_cpu + 1;
+		}
+		for (; i < count; i++) {
+			ret = rdmsr_safe_on_cpu(i, msr_reg, &data[0], &data[1]);
+			if (ret) {
+				seq_printf(s, "msr read error: %d\n", ret);
+				return 0;
+			} else {
+				if (access_width == ACCESS_WIDTH_32BIT)
+					seq_printf(s, "[cpu %1d] %08x\n",
+							i, data[0]);
+				else
+					seq_printf(s, "[cpu %1d] %08x%08x\n",
+						 i, data[1], data[0]);
+			}
+		}
+	} else {
+		if (access_width == ACCESS_WIDTH_32BIT) {
+			ret = rdmsr_safe_on_cpu(msr_cpu, msr_reg,
+					&data[0], &data[1]);
+			if (ret) {
+				seq_printf(s, "msr write error: %d\n", ret);
+				return 0;
+			}
+			data[0] = access_value;
+		} else {
+			data[0] = (u32)access_value_64;
+			data[1] = (u32)(access_value_64 >> 32);
+		}
+		if (msr_cpu < 0) {
+			/* loop for all cpus */
+			i = 0;
+			count = nr_cpu_ids;
+		} else {
+			if (msr_cpu >= nr_cpu_ids || msr_cpu < 0) {
+				seq_printf(s, "cpu should be between 0 - %d\n",
+						nr_cpu_ids - 1);
+				return 0;
+			}
+			/* loop for one cpu */
+			i = msr_cpu;
+			count = msr_cpu + 1;
+		}
+		for (; i < count; i++) {
+			ret = wrmsr_safe_on_cpu(i, msr_reg, data[0], data[1]);
+			if (ret) {
+				seq_printf(s, "msr write error: %d\n", ret);
+				return 0;
+			} else {
+				seq_printf(s, "write succeeded.\n");
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int dump_output_show_i2c(struct seq_file *s)
+{
+	int ret;
+	struct i2c_adapter *adap;
+	struct i2c_msg msg;
+	u8 val;
+
+	adap = i2c_get_adapter(i2c_bus);
+	if (!adap) {
+		seq_printf(s, "can't find bus adapter for i2c bus %d\n",
+							i2c_bus);
+		return 0;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		msg.addr = i2c_addr;
+		msg.len = 1;
+		msg.buf = (u8 *) &access_value;
+		ret = i2c_transfer(adap, &msg, 1);
+		if (ret != 1)
+			seq_printf(s, "i2c write error: %d\n", ret);
+		else
+			seq_printf(s, "write succeeded.\n");
+	} else {
+		msg.flags |= I2C_M_RD;
+		msg.addr = i2c_addr;
+		msg.len = 1;
+		msg.buf = &val;
+		ret = i2c_transfer(adap, &msg, 1);
+		if (ret != 1)
+			seq_printf(s, "i2c read error: %d\n", ret);
+		else
+			seq_printf(s, "%02x\n", val);
+	}
+
+	return 0;
+}
+
+static int dump_output_show_scu(struct seq_file *s)
+{
+	struct pci_dev *pdev;
+	char *name;
+	int ret;
+	u32 cmd, sub = 0, dptr = 0, sptr = 0;
+	u8 wbuflen = 4, rbuflen = 4;
+	u8 wbuf[16];
+	u8 rbuf[16];
+
+	memset(wbuf, 0, 16);
+	memset(rbuf, 0, 16);
+
+	pdev = mmio_to_pci(scu_addr, &name);
+	if (pdev && pm_runtime_get_sync(&pdev->dev) < 0) {
+		seq_printf(s, "can't put device %s into D0i0 state\n", name);
+		return 0;
+	}
+
+	if (access_dir == ACCESS_DIR_WRITE) {
+		cmd = RP_INDIRECT_WRITE;
+		dptr = scu_addr;
+		wbuf[0] = (u8) (access_value & 0xff);
+		wbuf[1] = (u8) ((access_value >> 8) & 0xff);
+		wbuf[2] = (u8) ((access_value >> 16) & 0xff);
+		wbuf[3] = (u8) ((access_value >> 24) & 0xff);
+
+		ret = rpmsg_send_generic_raw_command(cmd, sub, wbuf, wbuflen,
+			(u32 *)rbuf, rbuflen, dptr, sptr);
+
+		if (ret) {
+			seq_printf(s,
+				"Indirect write failed (check dmesg): "
+						"[%08x]\n", scu_addr);
+		} else {
+			seq_printf(s, "write succeeded\n");
+		}
+	} else if (access_dir == ACCESS_DIR_READ) {
+		cmd = RP_INDIRECT_READ;
+		sptr = scu_addr;
+
+		ret = rpmsg_send_generic_raw_command(cmd, sub, wbuf, wbuflen,
+			(u32 *)rbuf, rbuflen, dptr, sptr);
+
+		if (ret) {
+			seq_printf(s,
+				"Indirect read failed (check dmesg): "
+						"[%08x]\n", scu_addr);
+		} else {
+			access_value = (rbuf[3] << 24) | (rbuf[2] << 16) |
+				(rbuf[1] << 8) | (rbuf[0]);
+			seq_printf(s, "[%08x] %08x\n", scu_addr, access_value);
+		}
+	}
+
+	if (pdev)
+		pm_runtime_put_sync(&pdev->dev);
+
+	return 0;
+}
+
+static int dump_output_show(struct seq_file *s, void *unused)
+{
+	int ret = 0;
+
+	if (!dump_cmd_was_set) {
+		seq_printf(s, "%s", err_buf);
+		return 0;
+	}
+
+	switch (access_bus) {
+	case ACCESS_BUS_MMIO:
+		ret = dump_output_show_mmio(s);
+		break;
+	case ACCESS_BUS_PORT:
+		ret = dump_output_show_port(s);
+		break;
+	case ACCESS_BUS_MSG_BUS:
+		ret = dump_output_show_msg_bus(s);
+		break;
+	case ACCESS_BUS_PCI:
+		ret = dump_output_show_pci(s);
+		break;
+	case ACCESS_BUS_MSR:
+		ret = dump_output_show_msr(s);
+		break;
+	case ACCESS_BUS_I2C:
+		ret = dump_output_show_i2c(s);
+		break;
+	case ACCESS_BUS_SCU_INDRW:
+		ret = dump_output_show_scu(s);
+		break;
+	default:
+		seq_printf(s, "unknow bus type: %d\n", access_bus);
+		break;
+
+	}
+
+	return ret;
+}
+
+static const struct file_operations dump_cmd_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dump_cmd_open,
+	.read		= seq_read,
+	.write		= dump_cmd_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int dump_output_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dump_output_show, NULL);
+}
+
+static const struct file_operations dump_output_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dump_output_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init intel_mid_dump_init(void)
+{
+	dump_cmd_dentry = debugfs_create_file("dump_cmd",
+		S_IFREG | S_IRUGO | S_IWUSR, NULL, NULL, &dump_cmd_fops);
+	dump_output_dentry = debugfs_create_file("dump_output",
+		S_IFREG | S_IRUGO, NULL, NULL, &dump_output_fops);
+	if (!dump_cmd_dentry || !dump_output_dentry) {
+		pr_err("intel_mid_dump: can't create debugfs node\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+module_init(intel_mid_dump_init);
+
+static void __exit intel_mid_dump_exit(void)
+{
+	if (dump_cmd_dentry)
+		debugfs_remove(dump_cmd_dentry);
+	if (dump_output_dentry)
+		debugfs_remove(dump_output_dentry);
+}
+module_exit(intel_mid_dump_exit);
+
+MODULE_DESCRIPTION("Intel Atom SoC register dump driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Bin Gao <bin.gao@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/platform/intel-mid/intel_soc_mdfld.c b/arch/x86/platform/intel-mid/intel_soc_mdfld.c
new file mode 100644
index 0000000..e86d1b5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_mdfld.c
@@ -0,0 +1,178 @@
+/*
+ * intel_soc_mdfld.c - This driver provides utility api's for medfield
+ * platform
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+
+/* To CLEAR C6 offload Bit(LSB) in MSR 120 */
+static inline void clear_c6offload_bit(void)
+{
+	u32 msr_low, msr_high;
+
+	rdmsr(MSR_C6OFFLOAD_CTL_REG, msr_low, msr_high);
+	msr_low = msr_low & ~MSR_C6OFFLOAD_SET_LOW;
+	msr_high = msr_high & ~MSR_C6OFFLOAD_SET_HIGH;
+	wrmsr(MSR_C6OFFLOAD_CTL_REG, msr_low, msr_high);
+}
+
+/* To SET C6 offload Bit(LSB) in MSR 120 */
+static inline void set_c6offload_bit(void)
+{
+	u32 msr_low, msr_high;
+
+	rdmsr(MSR_C6OFFLOAD_CTL_REG, msr_low, msr_high);
+	msr_low = msr_low | MSR_C6OFFLOAD_SET_LOW;
+	msr_high = msr_high | MSR_C6OFFLOAD_SET_HIGH;
+	wrmsr(MSR_C6OFFLOAD_CTL_REG, msr_low, msr_high);
+}
+
+static bool mfld_pmu_enter(int s0ix_state)
+{
+	u32 s0ix_value;
+	u32 ssw_val;
+	int num_retry = PMU_MISC_SET_TIMEOUT;
+
+	s0ix_value = get_s0ix_val_set_pm_ssc(s0ix_state);
+
+	clear_c6offload_bit();
+
+	/* issue a command to SCU */
+	pmu_set_interrupt_enable();
+	writel(s0ix_value, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+	pmu_log_command(s0ix_value, NULL);
+
+	do {
+		if (readl(&mid_pmu_cxt->pmu_reg->pm_msic))
+			break;
+		udelay(1);
+	} while (--num_retry);
+
+	if (!num_retry && !readl(&mid_pmu_cxt->pmu_reg->pm_msic))
+		WARN(1, "%s: pm_msic not set.\n", __func__);
+
+	num_retry = PMU_C6OFFLOAD_ACCESS_TIMEOUT;
+
+	/* At this point we have committed an S0ix command
+	 * will have to wait for the SCU s0ix complete
+	 * intertupt to proceed further.
+	 */
+	mid_pmu_cxt->s0ix_entered = s0ix_state;
+
+	if (s0ix_value == S0I3_VALUE) {
+		do {
+			ssw_val = readl(mid_pmu_cxt->base_addr.offload_reg);
+			if ((ssw_val & C6OFFLOAD_BIT_MASK) ==  C6OFFLOAD_BIT) {
+				set_c6offload_bit();
+				break;
+			}
+
+			udelay(1);
+		} while (--num_retry);
+
+		if (unlikely(!num_retry)) {
+			WARN(1, "mid_pmu: error cpu offload bit not set.\n");
+			pmu_stat_clear();
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void mfld_pmu_wakeup(void)
+{
+
+	/* Wakeup allother CPU's */
+	if (mid_pmu_cxt->s0ix_entered)
+		apic->send_IPI_allbutself(RESCHEDULE_VECTOR);
+
+	clear_c6offload_bit();
+}
+
+static void mfld_pmu_remove(void)
+{
+	/* Freeing up memory allocated for PMU1 & PMU2 */
+	iounmap(mid_pmu_cxt->base_addr.offload_reg);
+	mid_pmu_cxt->base_addr.offload_reg = NULL;
+
+}
+
+static pci_power_t mfld_pmu_choose_state(int device_lss)
+{
+	pci_power_t state;
+
+	switch (device_lss) {
+	case PMU_SECURITY_LSS_04:
+		state = PCI_D2;
+		break;
+
+	case PMU_USB_OTG_LSS_06:
+	case PMU_USB_HSIC_LSS_07:
+	case PMU_UART2_LSS_41:
+		state = PCI_D1;
+		break;
+
+	default:
+		state = PCI_D3hot;
+		break;
+	}
+
+	return state;
+}
+
+static int mfld_pmu_init(void)
+{
+	int ret = PMU_SUCCESS;
+
+	/* Map the memory of offload_reg */
+	mid_pmu_cxt->base_addr.offload_reg =
+				ioremap_nocache(C6_OFFLOAD_REG_ADDR, 4);
+	if (mid_pmu_cxt->base_addr.offload_reg == NULL) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+		"Unable to map the offload_reg address space\n");
+		ret = PMU_FAILED;
+		goto out_err;
+	}
+
+	mid_pmu_cxt->s3_hint = C6_HINT;
+
+out_err:
+	return ret;
+}
+
+/**
+ *      platform_set_pmu_ops - Set the global pmu method table.
+ *      @ops:   Pointer to ops structure.
+ */
+void platform_set_pmu_ops(void)
+{
+	pmu_ops = &mfld_pmu_ops;
+}
+
+struct platform_pmu_ops mfld_pmu_ops = {
+	.init	 = mfld_pmu_init,
+	.enter	 = mfld_pmu_enter,
+	.wakeup = mfld_pmu_wakeup,
+	.remove = mfld_pmu_remove,
+	.pci_choose_state = mfld_pmu_choose_state,
+	.set_power_state_ops = pmu_set_s0ix_possible,
+	.set_s0ix_complete = s0ix_complete,
+	.nc_set_power_state = mdfld_clv_nc_set_power_state,
+};
diff --git a/arch/x86/platform/intel-mid/intel_soc_mdfld.h b/arch/x86/platform/intel-mid/intel_soc_mdfld.h
new file mode 100644
index 0000000..85c25b5
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_mdfld.h
@@ -0,0 +1,353 @@
+/*
+ * intel_soc_mdfld.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifdef CONFIG_REMOVEME_INTEL_REMOVEME_ATOM_MDFLD_POWER
+
+#define   PM_SUPPORT				0x21
+
+#define ISP_POS			7
+#define ISP_SUB_CLASS		0x80
+#define C6_OFFLOAD_REG_ADDR	0xffd01ffc
+#define PMU_MISC_SET_TIMEOUT	50 /* 50usec timeout */
+#define PMU_C6OFFLOAD_ACCESS_TIMEOUT 1500 /* 1.5msecs timeout */
+
+#define PMU1_MAX_DEVS   8
+#define PMU2_MAX_DEVS   55
+
+#define GFX_LSS_INDEX			1
+#define PMU_SDIO0_LSS_00		0
+#define PMU_EMMC0_LSS_01		1
+#define PMU_AONT_LSS_02			2
+#define PMU_HSI_LSS_03			3
+#define PMU_SECURITY_LSS_04		4
+#define PMU_EMMC1_LSS_05		5
+#define PMU_USB_OTG_LSS_06		6
+#define PMU_USB_HSIC_LSS_07		7
+#define PMU_AUDIO_ENGINE_LSS_08		8
+#define PMU_AUDIO_DMA_LSS_09		9
+#define PMU_SRAM_LSS_10			10
+#define PMU_SRAM_LSS_11			11
+#define PMU_SRAM_LSS_12			12
+#define PMU_SRAM_LSS_13			13
+#define PMU_SDIO2_LSS_14		14
+#define PMU_PTI_DAFCA_LSS_15		15
+#define PMU_SC_DMA_LSS_16		16
+#define PMU_SPIO_LSS_17			17
+#define PMU_SPI1_LSS_18			18
+#define PMU_SPI2_LSS_19			19
+#define PMU_I2C0_LSS_20			20
+#define PMU_I2C1_LSS_21			21
+#define PMU_MAIN_FABRIC_LSS_22		22
+#define PMU_SEC_FABRIC_LSS_23		23
+#define PMU_SC_FABRIC_LSS_24		24
+#define PMU_AUDIO_RAM_LSS_25		25
+#define PMU_SCU_ROM_LSS_26		26
+#define PMU_I2C2_LSS_27			27
+#define PMU_SSC_LSS_28			28
+#define PMU_SECURITY_LSS_29		29
+#define PMU_SDIO1_LSS_30		30
+#define PMU_SCU_RAM0_LSS_31		31
+#define PMU_SCU_RAM1_LSS_32		32
+#define PMU_I2C3_LSS_33			33
+#define PMU_I2C4_LSS_34			34
+#define PMU_I2C5_LSS_35			35
+#define PMU_SPI3_LSS_36			36
+#define PMU_GPIO1_LSS_37		37
+#define PMU_PWR_BUTTON_LSS_38		38
+#define PMU_GPIO0_LSS_39		39
+#define PMU_KEYBRD_LSS_40		40
+#define PMU_UART2_LSS_41		41
+#define PMU_ADC_LSS_42			42
+#define PMU_CHARGER_LSS_43		43
+#define PMU_SEC_TAPC_LSS_44		44
+#define PMU_RTC_LSS_45			45
+#define PMU_GPI_LSS_46			46
+#define PMU_HDMI_VREG_LSS_47		47
+#define PMU_RESERVED_LSS_48		48
+#define PMU_AUDIO_SLIM1_LSS_49		49
+#define PMU_RESET_LSS_50		50
+#define PMU_AUDIO_SSP0_LSS_51		51
+#define PMU_AUDIO_SSP1_LSS_52		52
+#define PMU_IOSF_OCP_BRG_LSS_53		53
+#define PMU_GP_DMA_LSS_54		54
+#define PMU_SVID_LSS_55			55
+#define PMU_SOC_FUSE_LSS_56		56
+#define PMU_RSVD3_LSS_57		57
+#define PMU_RSVD4_LSS_58		58
+#define PMU_RSVD5_LSS_59		59
+#define PMU_RSVD6_LSS_60		60
+#define PMU_RSVD7_LSS_61		61
+#define PMU_RSVD8_LSS_62		62
+#define PMU_RSVD9_LSS_63		63
+
+#define PMU_MAX_LSS			63
+#define PMU_LSS_IN_FIRST_DWORD		32
+
+#define EMMC0_LSS			PMU_EMMC0_LSS_01
+
+#define S0IX_TARGET_SSS0_MASK ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I3_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0IX_TARGET_SSS1_MASK ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+#define S0IX_TARGET_SSS2_MASK ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I3_MASK, PMU_UART2_LSS_41-32))
+
+#define S0IX_TARGET_SSS3_MASK ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define S0IX_TARGET_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0IX_TARGET_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define S0IX_TARGET_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define S0IX_TARGET_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define LPMP3_TARGET_SSS0_MASK ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I3_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_TARGET_SSS1_MASK ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_TARGET_SSS2_MASK ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I3_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_TARGET_SSS3_MASK ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define LPMP3_TARGET_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I0_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_TARGET_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_TARGET_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_TARGET_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48))
+
+#define IGNORE_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_USB_HSIC_LSS_07) | \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_10) | \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_11) | \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_12) | \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_13) | \
+	SSMSK(D0I3_MASK, PMU_PTI_DAFCA_LSS_15))
+
+#define IGNORE_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SC_DMA_LSS_16-16) | \
+	SSMSK(D0I3_MASK, PMU_SPIO_LSS_17-16) | \
+	SSMSK(D0I3_MASK, PMU_MAIN_FABRIC_LSS_22-16) | \
+	SSMSK(D0I3_MASK, PMU_SEC_FABRIC_LSS_23-16) | \
+	SSMSK(D0I3_MASK, PMU_SC_FABRIC_LSS_24-16) | \
+	SSMSK(D0I3_MASK, PMU_SCU_ROM_LSS_26-16) | \
+	SSMSK(D0I3_MASK, PMU_SSC_LSS_28-16) | \
+	SSMSK(D0I3_MASK, PMU_SECURITY_LSS_29-16) | \
+	SSMSK(D0I3_MASK, PMU_SCU_RAM0_LSS_31-16))
+
+#define IGNORE_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_SCU_RAM1_LSS_32-32) | \
+	SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+	SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+	SSMSK(D0I3_MASK, PMU_GPIO0_LSS_39-32) | \
+	SSMSK(D0I3_MASK, PMU_ADC_LSS_42-32) | \
+	SSMSK(D0I3_MASK, PMU_CHARGER_LSS_43-32) | \
+	SSMSK(D0I3_MASK, PMU_SEC_TAPC_LSS_44-32) | \
+	SSMSK(D0I3_MASK, PMU_RTC_LSS_45-32) | \
+	SSMSK(D0I3_MASK, PMU_GPI_LSS_46-32) | \
+	SSMSK(D0I3_MASK, PMU_HDMI_VREG_LSS_47-32))
+
+#define IGNORE_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_IOSF_OCP_BRG_LSS_53-48) | \
+	SSMSK(D0I3_MASK, PMU_SVID_LSS_55-48) | \
+	SSMSK(D0I3_MASK, PMU_SOC_FUSE_LSS_56-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD3_LSS_57-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD4_LSS_58-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD5_LSS_59-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD6_LSS_60-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD7_LSS_61-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD8_LSS_62-48) | \
+	SSMSK(D0I3_MASK, PMU_RSVD9_LSS_63-48))
+
+#define IGNORE_S3_WKC0 SSWKC(PMU_AONT_LSS_02)
+#define IGNORE_S3_WKC1 SSWKC(PMU_ADC_LSS_42-32)
+
+#define S0I3_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_AONT_LSS_02) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_ENGINE_LSS_08) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_DMA_LSS_09) | \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_12) | \
+	SSMSK(D0I3_MASK, PMU_SRAM_LSS_13) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define S0I3_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_SPI2_LSS_19-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_RAM_LSS_25-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define S0I3_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+	SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+	SSMSK(D0I3_MASK, PMU_KEYBRD_LSS_40-32) | \
+	SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define S0I3_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SLIM1_LSS_49-48) | \
+	SSMSK(D0I3_MASK, PMU_RESET_LSS_50-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48) | \
+	SSMSK(D0I3_MASK, PMU_GP_DMA_LSS_54-48))
+
+#define S0I1_SSS0 S0I3_SSS0
+#define S0I1_SSS1 S0I3_SSS1
+#define S0I1_SSS2 S0I3_SSS2
+#define S0I1_SSS3 S0I3_SSS3
+
+#define LPMP3_SSS0 ( \
+	SSMSK(D0I3_MASK, PMU_SDIO0_LSS_00) | \
+	SSMSK(D0I3_MASK, PMU_EMMC0_LSS_01) | \
+	SSMSK(D0I3_MASK, PMU_AONT_LSS_02) | \
+	SSMSK(D0I3_MASK, PMU_HSI_LSS_03) | \
+	SSMSK(D0I2_MASK, PMU_SECURITY_LSS_04) | \
+	SSMSK(D0I3_MASK, PMU_EMMC1_LSS_05) | \
+	SSMSK(D0I1_MASK, PMU_USB_OTG_LSS_06) | \
+	SSMSK(D0I1_MASK, PMU_USB_HSIC_LSS_07) | \
+	SSMSK(D0I3_MASK, PMU_SDIO2_LSS_14))
+
+#define LPMP3_SSS1 ( \
+	SSMSK(D0I3_MASK, PMU_SPI1_LSS_18-16) | \
+	SSMSK(D0I3_MASK, PMU_SPI2_LSS_19-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C0_LSS_20-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C1_LSS_21-16) | \
+	SSMSK(D0I3_MASK, PMU_I2C2_LSS_27-16) | \
+	SSMSK(D0I3_MASK, PMU_SDIO1_LSS_30-16))
+
+#define LPMP3_SSS2 ( \
+	SSMSK(D0I3_MASK, PMU_I2C3_LSS_33-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C4_LSS_34-32) | \
+	SSMSK(D0I3_MASK, PMU_I2C5_LSS_35-32) | \
+	SSMSK(D0I3_MASK, PMU_SPI3_LSS_36-32) | \
+	SSMSK(D0I3_MASK, PMU_GPIO1_LSS_37-32) | \
+	SSMSK(D0I3_MASK, PMU_PWR_BUTTON_LSS_38-32) | \
+	SSMSK(D0I3_MASK, PMU_KEYBRD_LSS_40-32) | \
+	SSMSK(D0I1_MASK, PMU_UART2_LSS_41-32))
+
+#define LPMP3_SSS3 ( \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SLIM1_LSS_49-48) | \
+	SSMSK(D0I3_MASK, PMU_RESET_LSS_50-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP0_LSS_51-48) | \
+	SSMSK(D0I3_MASK, PMU_AUDIO_SSP1_LSS_52-48) | \
+	SSMSK(D0I3_MASK, PMU_GP_DMA_LSS_54-48))
+
+extern void pmu_set_s0ix_possible(int state);
+extern void log_wakeup_irq(void);
+extern void s0ix_complete(void);
+extern int mdfld_clv_nc_set_power_state(int, int, int, int *);
+
+
+#endif
diff --git a/arch/x86/platform/intel-mid/intel_soc_mdfld_clv_common.c b/arch/x86/platform/intel-mid/intel_soc_mdfld_clv_common.c
new file mode 100644
index 0000000..8ae0930
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_mdfld_clv_common.c
@@ -0,0 +1,451 @@
+/*
+ * intel_soc_mdfld_clv_common.c - This driver provides utility api's common for
+ * mdfld and clv platforms
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+
+static int extended_cstate_mode = MID_S0IX_STATE;
+int set_extended_cstate_mode(const char *val, struct kernel_param *kp)
+{
+	char valcp[5];
+	int cstate_mode;
+
+	memcpy(valcp, val, 5);
+	valcp[4] = '\0';
+
+	if (strcmp(valcp, "s0i1") == 0)
+		cstate_mode = MID_S0I1_STATE;
+	else if (strcmp(valcp, "lmp3") == 0)
+		cstate_mode = MID_LPMP3_STATE;
+	else if (strcmp(valcp, "s0i3") == 0)
+		cstate_mode = MID_S0I3_STATE;
+	else if (strcmp(valcp, "i1i3") == 0)
+		cstate_mode = MID_I1I3_STATE;
+	else if (strcmp(valcp, "lpi1") == 0)
+		cstate_mode = MID_LPI1_STATE;
+	else if (strcmp(valcp, "lpi3") == 0)
+		cstate_mode = MID_LPI3_STATE;
+	else if (strcmp(valcp, "s0ix") == 0)
+		cstate_mode = MID_S0IX_STATE;
+	else {
+		cstate_mode = 0;
+		strncpy(valcp, "none", 5);
+	}
+	memcpy(s0ix, valcp, 5);
+
+	down(&mid_pmu_cxt->scu_ready_sem);
+	extended_cstate_mode = cstate_mode;
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return 0;
+}
+
+int get_extended_cstate_mode(char *buffer, struct kernel_param *kp)
+{
+	strcpy(buffer, s0ix);
+	return 4;
+}
+
+/*
+ *Decide which state the platfrom can go to based on user and
+ *platfrom inputs
+*/
+static int get_final_state(unsigned long *eax)
+{
+	int ret = 0;
+	int possible = mid_pmu_cxt->s0ix_possible;
+
+	switch (extended_cstate_mode) {
+	case MID_S0I1_STATE:
+	case MID_S0I3_STATE:
+	case MID_I1I3_STATE:
+		/* user asks s0i1/s0i3 then only
+		 * do s0i1/s0i3, dont do lpmp3
+		 */
+		if (possible == MID_S0IX_STATE)
+			ret = extended_cstate_mode & possible;
+		break;
+
+	case MID_LPMP3_STATE:
+		/* user asks lpmp3 then only
+		 * do lpmp3
+		 */
+		if (possible == MID_LPMP3_STATE)
+			ret = MID_LPMP3_STATE;
+		break;
+
+	case MID_LPI1_STATE:
+	case MID_LPI3_STATE:
+		/* user asks lpmp3/i1/i3 then only
+		 * do lpmp3/i1/i3
+		 */
+		if (possible == MID_LPMP3_STATE)
+			ret = MID_LPMP3_STATE;
+		else if (possible == MID_S0IX_STATE)
+			ret = extended_cstate_mode >> REMOVE_LP_FROM_LPIX;
+		break;
+
+	case MID_S0IX_STATE:
+		ret = possible;
+		break;
+	}
+
+	if ((ret == MID_S0IX_STATE) &&
+			(*eax == MID_LPMP3_STATE))
+		ret = MID_S0I1_STATE;
+	else if ((ret <= *eax ||
+			(ret == MID_S0IX_STATE)))
+		ret = ret & *eax;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static bool check_s0ix_possible(struct pmu_ss_states *pmsss)
+{
+	if (((pmsss->pmu2_states[0] & S0IX_TARGET_SSS0_MASK) ==
+					S0IX_TARGET_SSS0) &&
+		((pmsss->pmu2_states[1] & S0IX_TARGET_SSS1_MASK) ==
+					S0IX_TARGET_SSS1) &&
+		((pmsss->pmu2_states[2] & S0IX_TARGET_SSS2_MASK) ==
+					S0IX_TARGET_SSS2) &&
+		((pmsss->pmu2_states[3] & S0IX_TARGET_SSS3_MASK) ==
+					S0IX_TARGET_SSS3))
+		return true;
+
+	return false;
+}
+
+static bool check_lpmp3_possible(struct pmu_ss_states *pmsss)
+{
+	if (((pmsss->pmu2_states[0] & LPMP3_TARGET_SSS0_MASK) ==
+					LPMP3_TARGET_SSS0) &&
+		((pmsss->pmu2_states[1] & LPMP3_TARGET_SSS1_MASK) ==
+					LPMP3_TARGET_SSS1) &&
+		((pmsss->pmu2_states[2] & LPMP3_TARGET_SSS2_MASK) ==
+					LPMP3_TARGET_SSS2) &&
+		((pmsss->pmu2_states[3] & LPMP3_TARGET_SSS3_MASK) ==
+					LPMP3_TARGET_SSS3))
+		return true;
+
+	return false;
+}
+
+void pmu_set_s0ix_possible(int state)
+{
+	/* assume S0ix not possible */
+	mid_pmu_cxt->s0ix_possible = 0;
+
+	if (state != PCI_D0) {
+		struct pmu_ss_states cur_pmsss;
+
+		pmu_read_sss(&cur_pmsss);
+
+		if (likely(check_s0ix_possible(&cur_pmsss)))
+			mid_pmu_cxt->s0ix_possible = MID_S0IX_STATE;
+		else if (check_lpmp3_possible(&cur_pmsss))
+			mid_pmu_cxt->s0ix_possible = MID_LPMP3_STATE;
+	}
+}
+
+int get_target_platform_state(unsigned long *eax)
+{
+	int ret = 0;
+
+	if (unlikely(!pmu_initialized))
+		goto ret;
+
+	/* dont do s0ix if suspend in progress */
+	if (unlikely(mid_pmu_cxt->suspend_started))
+		goto ret;
+
+	/* dont do s0ix if shutdown in progress */
+	if (unlikely(mid_pmu_cxt->shutdown_started))
+		goto ret;
+
+	if (nc_device_state())
+		goto ret;
+
+	ret = get_final_state(eax);
+
+ret:
+	*eax = C6_HINT;
+	return ret;
+}
+EXPORT_SYMBOL(get_target_platform_state);
+
+u32 get_s0ix_val_set_pm_ssc(int s0ix_state)
+{
+	u32 s0ix_value = 0;
+
+	switch (s0ix_state) {
+	case MID_S0I1_STATE:
+		writel(S0I1_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+		writel(S0I1_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+		writel(S0I1_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+		writel(S0I1_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+		pmu_stat_start(SYS_STATE_S0I1);
+		s0ix_value = S0I1_VALUE;
+		break;
+	case MID_LPMP3_STATE:
+		writel(LPMP3_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+		writel(LPMP3_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+		writel(LPMP3_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+		writel(LPMP3_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+		pmu_stat_start(SYS_STATE_S0I2);
+		s0ix_value = LPMP3_VALUE;
+		break;
+	case MID_S0I3_STATE:
+		writel(S0I3_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+		writel(S0I3_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+		writel(S0I3_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+		writel(S0I3_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+		pmu_stat_start(SYS_STATE_S0I3);
+		s0ix_value = S0I3_VALUE;
+		break;
+	case MID_S3_STATE:
+		writel(S0I3_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+		writel(S0I3_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+		writel(S0I3_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+		writel(S0I3_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+		pmu_stat_start(SYS_STATE_S3);
+		s0ix_value = S0I3_VALUE;
+		break;
+	case MID_FAST_ON_OFF_STATE:
+		writel(S0I3_SSS0, &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+		writel(S0I3_SSS1, &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+		writel(S0I3_SSS2, &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+		writel(S0I3_SSS3, &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+		pmu_stat_start(SYS_STATE_S3);
+		s0ix_value = FAST_ON_OFF_VALUE;
+		break;
+	default:
+		pmu_dump_logs();
+		BUG_ON(1);
+	}
+	return s0ix_value;
+}
+
+void platform_update_all_lss_states(struct pmu_ss_states *pmu_config,
+					int *PCIALLDEV_CFG)
+{
+	/* We shutdown devices that are in the target config, and that are
+	   not in the pci table, some devices are indeed not advertised in pci
+	   table for certain firmwares. This is the case for HSI firmwares,
+	   SPI3 device is not advertised, and would then prevent s0i3. */
+	/* Also take IGNORE_CFG in account (for e.g. GPIO1)*/
+	pmu_config->pmu2_states[0] |= S0IX_TARGET_SSS0_MASK & ~PCIALLDEV_CFG[0];
+	pmu_config->pmu2_states[0] &= ~IGNORE_SSS0;
+	pmu_config->pmu2_states[1] |= S0IX_TARGET_SSS1_MASK & ~PCIALLDEV_CFG[1];
+	pmu_config->pmu2_states[1] &= ~IGNORE_SSS1;
+	pmu_config->pmu2_states[2] |= S0IX_TARGET_SSS2_MASK & ~PCIALLDEV_CFG[2];
+	pmu_config->pmu2_states[2] &= ~IGNORE_SSS2;
+	pmu_config->pmu2_states[3] |= S0IX_TARGET_SSS3_MASK & ~PCIALLDEV_CFG[3];
+	pmu_config->pmu2_states[3] &= ~IGNORE_SSS3;
+}
+
+void s0ix_complete(void)
+{
+	if (unlikely(mid_pmu_cxt->s0ix_entered))
+		writel(0, &mid_pmu_cxt->pmu_reg->pm_msic);
+}
+
+/*
+ * Valid wake source: lss_number 0 to 63
+ * Returns true if 'lss_number' is wake source
+ * else false
+ */
+bool mid_pmu_is_wake_source(u32 lss_number)
+{
+	u32 wake = 0;
+	bool ret = false;
+
+	if (lss_number > PMU_MAX_LSS)
+		return ret;
+
+	if (lss_number < PMU_LSS_IN_FIRST_DWORD) {
+		wake = readl(&mid_pmu_cxt->pmu_reg->pm_wks[0]);
+		wake &= (1 << lss_number);
+	} else {
+		wake = readl(&mid_pmu_cxt->pmu_reg->pm_wks[1]);
+		wake &= (1 << (lss_number - PMU_LSS_IN_FIRST_DWORD));
+	}
+
+	if (wake)
+		ret = true;
+
+	return ret;
+}
+
+static void log_wakeup_source(int source)
+{
+	enum sys_state type = mid_pmu_cxt->pmu_current_state;
+
+	mid_pmu_cxt->num_wakes[source][type]++;
+
+	trace_printk("wake_from_lss%d\n",
+		     source - mid_pmu_cxt->pmu1_max_devs);
+
+	if ((mid_pmu_cxt->pmu_current_state != SYS_STATE_S3)
+	    || !mid_pmu_cxt->suspend_started)
+		return;
+
+	switch (source - mid_pmu_cxt->pmu1_max_devs) {
+	case PMU_USB_OTG_LSS_06:
+		pr_info("wakeup from USB.\n");
+		break;
+	case PMU_GPIO0_LSS_39:
+		pr_info("wakeup from GPIO.\n");
+		break;
+	case PMU_HSI_LSS_03:
+		pr_info("wakeup from HSI.\n");
+		break;
+	default:
+		pr_info("wakeup from LSS%02d.\n",
+			source - mid_pmu_cxt->pmu1_max_devs);
+		break;
+	}
+}
+
+/* return the last wake source id, and make statistics about wake sources */
+int pmu_get_wake_source(void)
+{
+	u32 wake0, wake1;
+	int i;
+	int source = INVALID_WAKE_SRC;
+
+	wake0 = readl(&mid_pmu_cxt->pmu_reg->pm_wks[0]);
+	wake1 = readl(&mid_pmu_cxt->pmu_reg->pm_wks[1]);
+
+	if (!wake0 && !wake1) {
+		log_wakeup_irq();
+		goto out;
+	}
+
+	while (wake0) {
+		i = fls(wake0) - 1;
+		source = i + mid_pmu_cxt->pmu1_max_devs;
+		log_wakeup_source(source);
+		wake0 &= ~(1<<i);
+	}
+
+	while (wake1) {
+		i = fls(wake1) - 1;
+		source = i + 32 + mid_pmu_cxt->pmu1_max_devs;
+		log_wakeup_source(source);
+		wake1 &= ~(1<<i);
+	}
+out:
+	return source;
+}
+
+static int wait_for_nc_pmcmd_complete(int verify_mask, int state_type
+					, int reg_type)
+{
+	int pwr_sts;
+	int count = 0;
+	u32 addr;
+
+	switch (reg_type) {
+	case APM_REG_TYPE:
+		addr = mid_pmu_cxt->apm_base + APM_STS;
+		break;
+	case OSPM_REG_TYPE:
+		addr = mid_pmu_cxt->ospm_base + OSPM_PM_SSS;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	while (true) {
+		pwr_sts = inl(addr);
+		if (state_type == OSPM_ISLAND_DOWN) {
+			if ((pwr_sts & verify_mask) == verify_mask)
+				break;
+			else
+				udelay(10);
+		} else if (state_type == OSPM_ISLAND_UP) {
+			if (pwr_sts  == verify_mask)
+				break;
+			else
+				udelay(10);
+		}
+		count++;
+		if (WARN_ONCE(count > 500000, "Timed out waiting for P-Unit"))
+			return -EBUSY;
+	}
+	return 0;
+}
+
+int mdfld_clv_nc_set_power_state(int islands, int state_type,
+					int reg_type, int *change)
+{
+	u32 pwr_cnt = 0;
+	u32 pwr_mask = 0;
+	int i, lss, mask;
+	int ret = 0;
+
+	*change = 0;
+
+	switch (reg_type) {
+	case APM_REG_TYPE:
+		pwr_cnt = inl(mid_pmu_cxt->apm_base + APM_STS);
+		break;
+	case OSPM_REG_TYPE:
+		pwr_cnt = inl(mid_pmu_cxt->ospm_base + OSPM_PM_SSS);
+		break;
+	default:
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	pwr_mask = pwr_cnt;
+	for (i = 0; i < OSPM_MAX_POWER_ISLANDS; i++) {
+		lss = islands & (0x1 << i);
+		if (lss) {
+			mask = D0I3_MASK << (BITS_PER_LSS * i);
+			if (state_type == OSPM_ISLAND_DOWN)
+				pwr_mask |= mask;
+			else if (state_type == OSPM_ISLAND_UP)
+				pwr_mask &= ~mask;
+		}
+	}
+
+	if (pwr_mask != pwr_cnt) {
+		switch (reg_type) {
+		case APM_REG_TYPE:
+			outl(pwr_mask, mid_pmu_cxt->apm_base + APM_CMD);
+			break;
+		case OSPM_REG_TYPE:
+			outl(pwr_mask, mid_pmu_cxt->ospm_base + OSPM_PM_SSC);
+			break;
+		}
+
+		ret =
+		wait_for_nc_pmcmd_complete(pwr_mask, state_type, reg_type);
+		if (!ret)
+			*change = 1;
+		if (nc_report_power_state)
+			nc_report_power_state(pwr_mask, reg_type);
+	}
+
+unlock:
+	return ret;
+}
diff --git a/arch/x86/platform/intel-mid/intel_soc_mrfld.c b/arch/x86/platform/intel-mid/intel_soc_mrfld.c
new file mode 100644
index 0000000..9e125f1
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_mrfld.c
@@ -0,0 +1,446 @@
+/*
+ * intel_soc_mrfld.c - This driver provides utility api's for merrifield
+ * platform
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+
+u32 __iomem *residency[SYS_STATE_MAX];
+u32 __iomem *s0ix_counter[SYS_STATE_MAX];
+
+/* list of north complex devices */
+char *mrfl_nc_devices[] = {
+	"GFXSLC",
+	"GSDKCK",
+	"GRSCD",
+	"VED",
+	"VEC",
+	"DPA",
+	"DPB",
+	"DPC",
+	"VSP",
+	"ISP",
+	"MIO",
+	"HDMIO",
+	"GFXSLCLDO"
+};
+
+int mrfl_no_of_nc_devices =
+	sizeof(mrfl_nc_devices)/sizeof(mrfl_nc_devices[0]);
+
+static int mrfld_pmu_init(void)
+{
+	mid_pmu_cxt->s3_hint = MRFLD_S3_HINT;
+
+
+	/* Put all unused LSS in D0i3 */
+	mid_pmu_cxt->os_sss[0] = (SSMSK(D0I3_MASK, PMU_RESERVED_LSS_03)	|
+				SSMSK(D0I3_MASK, PMU_HSI_LSS_05)	|
+				//SSMSK(D0I3_MASK, PMU_SECURITY_LSS_06)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_07)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_11)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_12)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_13)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_14)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_15));
+
+	/* Put LSS8 as unused on Tangier */
+	mid_pmu_cxt->os_sss[0] |= \
+				SSMSK(D0I3_MASK, PMU_USB_MPH_LSS_08);
+
+	mid_pmu_cxt->os_sss[1] = (SSMSK(D0I3_MASK, PMU_RESERVED_LSS_16-16)|
+				SSMSK(D0I3_MASK, PMU_SSP3_LSS_17-16)|
+				SSMSK(D0I3_MASK, PMU_SSP6_LSS_19-16)|
+				SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_28-16)|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_29-16)|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_30-16));
+
+	/* Excpet for LSS 35 keep all in D0i3 */
+	mid_pmu_cxt->os_sss[2] = 0xFFFFFFFF;
+	mid_pmu_cxt->os_sss[3] = 0xFFFFFFFF;
+
+	mid_pmu_cxt->os_sss[2] &= ~SSMSK(D0I3_MASK, PMU_SSP4_LSS_35-32);
+
+	/* Map S0ix residency counters */
+	residency[SYS_STATE_S0I1] = ioremap_nocache(S0I1_RES_ADDR, sizeof(u64));
+	if (residency[SYS_STATE_S0I1] == NULL)
+		goto err1;
+	residency[SYS_STATE_LPMP3] = ioremap_nocache(LPMP3_RES_ADDR,
+								sizeof(u64));
+	if (residency[SYS_STATE_LPMP3] == NULL)
+		goto err2;
+	residency[SYS_STATE_S0I2] = ioremap_nocache(S0I2_RES_ADDR, sizeof(u64));
+	if (residency[SYS_STATE_S0I2] == NULL)
+		goto err3;
+	residency[SYS_STATE_S0I3] = ioremap_nocache(S0I3_RES_ADDR, sizeof(u64));
+	if (residency[SYS_STATE_S0I3] == NULL)
+		goto err4;
+
+	/* Map S0ix iteration counters */
+	s0ix_counter[SYS_STATE_S0I1] = ioremap_nocache(S0I1_COUNT_ADDR,
+								sizeof(u32));
+	if (s0ix_counter[SYS_STATE_S0I1] == NULL)
+		goto err5;
+	s0ix_counter[SYS_STATE_LPMP3] = ioremap_nocache(LPMP3_COUNT_ADDR,
+								sizeof(u32));
+	if (s0ix_counter[SYS_STATE_LPMP3] == NULL)
+		goto err6;
+	s0ix_counter[SYS_STATE_S0I2] = ioremap_nocache(S0I2_COUNT_ADDR,
+								sizeof(u32));
+	if (s0ix_counter[SYS_STATE_S0I2] == NULL)
+		goto err7;
+	s0ix_counter[SYS_STATE_S0I3] = ioremap_nocache(S0I3_COUNT_ADDR,
+								sizeof(u32));
+	if (s0ix_counter[SYS_STATE_S0I3] == NULL)
+		goto err8;
+	/* Keep PSH LSS's 00, 33, 34 in D0i0 if PM is disabled */
+	if (!enable_s0ix && !enable_s3) {
+		mid_pmu_cxt->os_sss[2] &=
+				~SSMSK(D0I3_MASK, PMU_I2C8_LSS_33-32);
+		mid_pmu_cxt->os_sss[2] &=
+				~SSMSK(D0I3_MASK, PMU_I2C9_LSS_34-32);
+	} else {
+		mid_pmu_cxt->os_sss[0] |= SSMSK(D0I3_MASK, PMU_PSH_LSS_00);
+	}
+
+	/* Disable the Interrupt Enable bit in PM ICS register */
+	pmu_clear_interrupt_enable();
+
+	return PMU_SUCCESS;
+
+err8:
+	iounmap(s0ix_counter[SYS_STATE_S0I3]);
+	s0ix_counter[SYS_STATE_S0I3] = NULL;
+err7:
+	iounmap(s0ix_counter[SYS_STATE_S0I2]);
+	s0ix_counter[SYS_STATE_S0I2] = NULL;
+err6:
+	iounmap(s0ix_counter[SYS_STATE_LPMP3]);
+	s0ix_counter[SYS_STATE_LPMP3] = NULL;
+err5:
+	iounmap(s0ix_counter[SYS_STATE_S0I1]);
+	s0ix_counter[SYS_STATE_S0I1] = NULL;
+err4:
+	iounmap(residency[SYS_STATE_S0I3]);
+	residency[SYS_STATE_S0I3] = NULL;
+err3:
+	iounmap(residency[SYS_STATE_S0I2]);
+	residency[SYS_STATE_S0I2] = NULL;
+err2:
+	iounmap(residency[SYS_STATE_LPMP3]);
+	residency[SYS_STATE_LPMP3] = NULL;
+err1:
+	iounmap(residency[SYS_STATE_S0I1]);
+	residency[SYS_STATE_S0I1] = NULL;
+
+	pr_err("Cannot map memory to read S0ix residency and count\n");
+	return PMU_FAILED;
+}
+
+/* This function checks north complex (NC) and
+ * south complex (SC) device status in MRFLD.
+ * returns TRUE if all NC and SC devices are in d0i3
+ * else FALSE.
+ */
+static bool mrfld_nc_sc_status_check(void)
+{
+	int i;
+	u32 val, nc_pwr_sts;
+	struct pmu_ss_states cur_pmsss;
+	bool nc_status, sc_status;
+
+	/* assuming nc and sc are good */
+	nc_status = true;
+	sc_status = true;
+
+	/* Check south complex device status */
+	pmu_read_sss(&cur_pmsss);
+
+	if (!(((cur_pmsss.pmu2_states[0] & S0IX_TARGET_SSS0_MASK) ==
+					 S0IX_TARGET_SSS0) &&
+		((cur_pmsss.pmu2_states[1] & S0IX_TARGET_SSS1_MASK) ==
+					 S0IX_TARGET_SSS1) &&
+		((cur_pmsss.pmu2_states[2] & S0IX_TARGET_SSS2_MASK) ==
+					 S0IX_TARGET_SSS2) &&
+		((cur_pmsss.pmu2_states[3] & S0IX_TARGET_SSS3_MASK) ==
+					 (S0IX_TARGET_SSS3)))) {
+		sc_status = false;
+		pr_warn("SC device/devices not in d0i3!!\n");
+		for (i = 0; i < 4; i++)
+			pr_warn("pmu2_states[%d] = %08lX\n", i,
+					cur_pmsss.pmu2_states[i]);
+	}
+
+	if (sc_status) {
+		/* Check north complex status */
+		nc_pwr_sts =
+			 intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS);
+		/* loop through the status to see if any of nc power island
+		 * is not in D0i3 state
+		 */
+		for (i = 0; i < mrfl_no_of_nc_devices; i++) {
+			val = nc_pwr_sts & 3;
+			if (val != 3) {
+				nc_status = false;
+				pr_warn("NC device (%s) is not in d0i3!!\n",
+							mrfl_nc_devices[i]);
+				pr_warn("nc_pm_sss = %08X\n", nc_pwr_sts);
+				break;
+			}
+			nc_pwr_sts >>= BITS_PER_LSS;
+		}
+	}
+
+	return nc_status & sc_status;
+}
+
+/* FIXME: Need to start the counter only if debug is
+ * needed. This will save SCU cycles if debug is
+ * disabled
+ */
+static int __init start_scu_s0ix_res_counters(void)
+{
+	int ret;
+
+	ret = intel_scu_ipc_simple_command(START_RES_COUNTER, 0);
+	if (ret) {
+		pr_err("IPC command to start res counter failed\n");
+		BUG();
+		return ret;
+	}
+	return 0;
+}
+late_initcall(start_scu_s0ix_res_counters);
+
+void platform_update_all_lss_states(struct pmu_ss_states *pmu_config,
+					int *PCIALLDEV_CFG)
+{
+	/* Overwrite the pmu_config values that we get */
+	pmu_config->pmu2_states[0] =
+				(SSMSK(D0I3_MASK, PMU_RESERVED_LSS_03)	|
+				SSMSK(D0I3_MASK, PMU_HSI_LSS_05)	|
+				//SSMSK(D0I3_MASK, PMU_SECURITY_LSS_06)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_07)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_11)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_12)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_13)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_14)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_15));
+
+	/* Put LSS8 as unused on Tangier */
+	pmu_config->pmu2_states[0] |= \
+				SSMSK(D0I3_MASK, PMU_USB_MPH_LSS_08);
+
+	pmu_config->pmu2_states[1] =
+				(SSMSK(D0I3_MASK, PMU_RESERVED_LSS_16-16)|
+				SSMSK(D0I3_MASK, PMU_SSP3_LSS_17-16)|
+				SSMSK(D0I3_MASK, PMU_SSP6_LSS_19-16)|
+				SSMSK(D0I3_MASK, PMU_USB_OTG_LSS_28-16)	|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_29-16)|
+				SSMSK(D0I3_MASK, PMU_RESERVED_LSS_30-16));
+
+	pmu_config->pmu2_states[0] &= ~IGNORE_SSS0;
+	pmu_config->pmu2_states[1] &= ~IGNORE_SSS1;
+	pmu_config->pmu2_states[2] = ~IGNORE_SSS2;
+	pmu_config->pmu2_states[3] = ~IGNORE_SSS3;
+
+	/* Excpet for LSS 35 keep all in D0i3 */
+	pmu_config->pmu2_states[2] &= ~SSMSK(D0I3_MASK, PMU_SSP4_LSS_35-32);
+
+	/* Keep PSH LSS's 00, 33, 34 in D0i0 if PM is disabled */
+	if (!enable_s0ix && !enable_s3) {
+		pmu_config->pmu2_states[2] &=
+				~SSMSK(D0I3_MASK, PMU_I2C8_LSS_33-32);
+		pmu_config->pmu2_states[2] &=
+				~SSMSK(D0I3_MASK, PMU_I2C9_LSS_34-32);
+	} else {
+		pmu_config->pmu2_states[0] |= SSMSK(D0I3_MASK, PMU_PSH_LSS_00);
+	}
+}
+
+/*
+ * In MDFLD and CLV this callback is used to issue
+ * PM_CMD which is not required in MRFLD
+ */
+static bool mrfld_pmu_enter(int s0ix_state)
+{
+	mid_pmu_cxt->s0ix_entered = s0ix_state;
+	if (s0ix_state == MID_S3_STATE) {
+		mid_pmu_cxt->pmu_current_state = SYS_STATE_S3;
+		pmu_set_interrupt_enable();
+	}
+
+	return true;
+}
+
+/**
+ *      platform_set_pmu_ops - Set the global pmu method table.
+ *      @ops:   Pointer to ops structure.
+ */
+void platform_set_pmu_ops(void)
+{
+	pmu_ops = &mrfld_pmu_ops;
+}
+
+/*
+ * As of now since there is no sequential mapping between
+ * LSS abd WKS bits the following two calls are dummy
+ */
+
+bool mid_pmu_is_wake_source(u32 lss_number)
+{
+	return false;
+}
+
+/* return the last wake source id, and make statistics about wake sources */
+int pmu_get_wake_source(void)
+{
+	return INVALID_WAKE_SRC;
+}
+
+
+int set_extended_cstate_mode(const char *val, struct kernel_param *kp)
+{
+	return 0;
+}
+
+int get_extended_cstate_mode(char *buffer, struct kernel_param *kp)
+{
+	const char *default_string = "not supported";
+	strcpy(buffer, default_string);
+	return strlen(default_string);
+}
+
+static int wait_for_nc_pmcmd_complete(int verify_mask,
+				int status_mask, int state_type , int reg)
+{
+	int pwr_sts;
+	int count = 0;
+
+	while (true) {
+		pwr_sts = intel_mid_msgbus_read32(PUNIT_PORT, reg);
+		pwr_sts = pwr_sts >> SSS_SHIFT;
+		if (state_type == OSPM_ISLAND_DOWN ||
+					state_type == OSPM_ISLAND_SR) {
+			if ((pwr_sts & status_mask) ==
+						(verify_mask & status_mask))
+				break;
+			else
+				udelay(10);
+		} else if (state_type == OSPM_ISLAND_UP) {
+			if ((~pwr_sts & status_mask)  ==
+						(~verify_mask & status_mask))
+				break;
+			else
+				udelay(10);
+		}
+
+		count++;
+		if (WARN_ONCE(count > 500000, "Timed out waiting for P-Unit"))
+			return -EBUSY;
+	}
+	return 0;
+}
+
+static int mrfld_nc_set_power_state(int islands, int state_type,
+							int reg, int *change)
+{
+	u32 pwr_sts = 0;
+	u32 pwr_mask = 0;
+	int i, lss, mask;
+	int ret = 0;
+	int status_mask = 0;
+
+	*change = 0;
+	pwr_sts = intel_mid_msgbus_read32(PUNIT_PORT, reg);
+	pwr_mask = pwr_sts;
+
+	for (i = 0; i < OSPM_MAX_POWER_ISLANDS; i++) {
+		lss = islands & (0x1 << i);
+		if (lss) {
+			mask = D0I3_MASK << (BITS_PER_LSS * i);
+			status_mask = status_mask | mask;
+			if (state_type == OSPM_ISLAND_DOWN)
+				pwr_mask |= mask;
+			else if (state_type == OSPM_ISLAND_UP)
+				pwr_mask &= ~mask;
+			/* Soft reset case */
+			else if (state_type == OSPM_ISLAND_SR) {
+				pwr_mask &= ~mask;
+				mask = SR_MASK << (BITS_PER_LSS * i);
+				pwr_mask |= mask;
+			}
+		}
+	}
+
+	if (pwr_mask != pwr_sts) {
+		intel_mid_msgbus_write32(PUNIT_PORT, reg, pwr_mask);
+		ret = wait_for_nc_pmcmd_complete(pwr_mask,
+					status_mask, state_type, reg);
+		if (!ret)
+			*change = 1;
+		if (nc_report_power_state)
+			nc_report_power_state(pwr_mask, reg);
+	}
+
+	return ret;
+}
+
+void s0ix_complete(void)
+{
+	if (mid_pmu_cxt->s0ix_entered) {
+		log_wakeup_irq();
+
+		if (mid_pmu_cxt->s0ix_entered == SYS_STATE_S3)
+			pmu_clear_interrupt_enable();
+
+		mid_pmu_cxt->pmu_current_state	=
+		mid_pmu_cxt->s0ix_entered	= 0;
+	}
+}
+
+bool could_do_s0ix(void)
+{
+	bool ret = false;
+	if (unlikely(!pmu_initialized))
+		goto ret;
+
+	/* dont do s0ix if suspend in progress */
+	if (unlikely(mid_pmu_cxt->suspend_started))
+		goto ret;
+
+	/* dont do s0ix if shutdown in progress */
+	if (unlikely(mid_pmu_cxt->shutdown_started))
+		goto ret;
+
+	if (nc_device_state())
+		goto ret;
+
+	ret = true;
+ret:
+	return ret;
+}
+EXPORT_SYMBOL(could_do_s0ix);
+
+struct platform_pmu_ops mrfld_pmu_ops = {
+	.init	 = mrfld_pmu_init,
+	.enter	 = mrfld_pmu_enter,
+	.set_s0ix_complete = s0ix_complete,
+	.nc_set_power_state = mrfld_nc_set_power_state,
+	.check_nc_sc_status = mrfld_nc_sc_status_check,
+};
diff --git a/arch/x86/platform/intel-mid/intel_soc_mrfld.h b/arch/x86/platform/intel-mid/intel_soc_mrfld.h
new file mode 100644
index 0000000..dd14cba
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_mrfld.h
@@ -0,0 +1,161 @@
+/*
+ * intel_soc_mrfld.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+
+#define PM_SUPPORT		0x21
+
+#define ISP_POS			7
+#define ISP_SUB_CLASS		0x80
+
+#define PUNIT_PORT		0x04
+#define SSS_SHIFT		24
+
+/* Soft reset mask */
+#define SR_MASK			0x2
+
+#define PMU1_MAX_DEVS			8
+#define PMU2_MAX_DEVS			55
+
+#define MRFLD_S3_HINT			0x64
+
+#define PUNIT_PORT			0x04
+#define NC_PM_SSS			0x3F
+
+/* SRAM locations to get S0ix residency */
+#define S0I1_RES_ADDR		0xFFFFF560
+#define LPMP3_RES_ADDR		0xFFFFF578
+#define S0I2_RES_ADDR		0xFFFFF568
+#define S0I3_RES_ADDR		0xFFFFF570
+
+/* SRAM locations to get S0ix count */
+#define S0I1_COUNT_ADDR		0xFFFFF588
+#define LPMP3_COUNT_ADDR	0xFFFFF594
+#define S0I2_COUNT_ADDR		0xFFFFF58C
+#define S0I3_COUNT_ADDR		0xFFFFF590
+
+/* IPC commands to start, stop and
+ * dump S0ix residency counters */
+#define START_RES_COUNTER	0x00EB
+#define STOP_RES_COUNTER	0x10EB
+#define DUMP_RES_COUNTER	0x20EB
+
+/* IPC commands to start/reset and
+ * dump S0ix count */
+#define START_S0IX_COUNT	0x00E1
+#define DUMP_S0IX_COUNT		0x10E1
+
+#define GFX_LSS_INDEX			1
+
+#define PMU_PSH_LSS_00			0
+#define PMU_SDIO0_LSS_01		1
+#define PMU_EMMC0_LSS_02		2
+#define PMU_RESERVED_LSS_03		3
+#define PMU_SDIO1_LSS_04		4
+#define PMU_HSI_LSS_05			5
+#define PMU_SECURITY_LSS_06		6
+#define PMU_RESERVED_LSS_07		7
+#define PMU_USB_MPH_LSS_08		8
+#define PMU_USB3_LSS_09			9
+#define PMU_AUDIO_LSS_10		10
+#define PMU_RESERVED_LSS_11		11
+#define PMU_RESERVED_LSS_12		12
+#define PMU_RESERVED_LSS_13		13
+#define PMU_RESERVED_LSS_14		14
+#define PMU_RESERVED_LSS_15		15
+#define PMU_RESERVED_LSS_16		16
+#define PMU_SSP3_LSS_17			17
+#define PMU_SSP5_LSS_18			18
+#define PMU_SSP6_LSS_19			19
+#define PMU_I2C1_LSS_20			20
+#define PMU_I2C2_LSS_21			21
+#define PMU_I2C3_LSS_22			22
+#define PMU_I2C4_LSS_23			23
+#define PMU_I2C5_LSS_24			24
+#define PMU_GP_DMA_LSS_25		25
+#define PMU_I2C6_LSS_26			26
+#define PMU_I2C7_LSS_27			27
+#define PMU_USB_OTG_LSS_28		28
+#define PMU_RESERVED_LSS_29		29
+#define PMU_RESERVED_LSS_30		30
+#define PMU_UART0_LSS_31		31
+#define PMU_UART1_LSS_31		31
+#define PMU_UART2_LSS_31		31
+
+#define PMU_I2C8_LSS_33			33
+#define PMU_I2C9_LSS_34			34
+#define PMU_SSP4_LSS_35			35
+#define PMU_PMW_LSS_36			36
+
+#define EMMC0_LSS			PMU_EMMC0_LSS_02
+
+#define IGNORE_SSS0			0
+#define IGNORE_SSS1			0
+#define IGNORE_SSS2			0
+#define IGNORE_SSS3			0
+
+#define PMU_WAKE_GPIO0      (1 << 0)
+#define PMU_WAKE_GPIO1      (1 << 1)
+#define PMU_WAKE_GPIO2      (1 << 2)
+#define PMU_WAKE_GPIO3      (1 << 3)
+#define PMU_WAKE_GPIO4      (1 << 4)
+#define PMU_WAKE_GPIO5      (1 << 5)
+#define PMU_WAKE_TIMERS     (1 << 6)
+#define PMU_WAKE_SECURITY   (1 << 7)
+#define PMU_WAKE_AONT32K    (1 << 8)
+#define PMU_WAKE_AONT       (1 << 9)
+#define PMU_WAKE_SVID_ALERT (1 << 10)
+#define PMU_WAKE_AUDIO      (1 << 11)
+#define PMU_WAKE_USB2       (1 << 12)
+#define PMU_WAKE_USB3       (1 << 13)
+#define PMU_WAKE_ILB        (1 << 14)
+#define PMU_WAKE_TAP        (1 << 15)
+#define PMU_WAKE_WATCHDOG   (1 << 16)
+#define PMU_WAKE_HSIC       (1 << 17)
+#define PMU_WAKE_PSH        (1 << 18)
+#define PMU_WAKE_PSH_GPIO   (1 << 19)
+#define PMU_WAKE_PSH_AONT   (1 << 20)
+#define PMU_WAKE_PSH_HALT   (1 << 21)
+#define PMU_GLBL_WAKE_MASK  (1 << 31)
+
+/* Ignore AONT WAKES and ALL from WKC1 */
+#define IGNORE_S3_WKC0 (PMU_WAKE_AONT32K | PMU_WAKE_AONT)
+#define IGNORE_S3_WKC1 (~0)
+
+#define S0IX_TARGET_SSS0_MASK (0xFFF3FFFF)
+#define S0IX_TARGET_SSS1_MASK (0xFFFFFFFF)
+#define S0IX_TARGET_SSS2_MASK (0xFFFFFFFF)
+#define S0IX_TARGET_SSS3_MASK (0xFFFFFFFF)
+
+#define S0IX_TARGET_SSS0 (0xFFF3FFFF)
+#define S0IX_TARGET_SSS1 (0xFFFFFFFF)
+#define S0IX_TARGET_SSS2 (0xFFFFFF3F)
+#define S0IX_TARGET_SSS3 (0xFFFFFFFF)
+
+#define LPMP3_TARGET_SSS0_MASK (0xFFF3FFFF)
+#define LPMP3_TARGET_SSS0 (0xFFC3FFFF)
+
+extern char *mrfl_nc_devices[];
+extern int mrfl_no_of_nc_devices;
+extern int intel_scu_ipc_simple_command(int, int);
+extern void log_wakeup_irq(void);
+extern void s0ix_complete(void);
+extern bool could_do_s0ix(void);
+
+#endif
diff --git a/arch/x86/platform/intel-mid/intel_soc_pm_debug.c b/arch/x86/platform/intel-mid/intel_soc_pm_debug.c
new file mode 100644
index 0000000..5cbcb89
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_pm_debug.c
@@ -0,0 +1,2518 @@
+/*
+ * intel_soc_pm_debug.c - This driver provides debug utilities across
+ * multiple platforms
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/time.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/cpuidle.h>
+#include "intel_soc_pm_debug.h"
+#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <asm/tsc.h>
+
+#ifdef CONFIG_PM_DEBUG
+#define MAX_CSTATES_POSSIBLE	32
+
+
+
+static struct latency_stat *lat_stat;
+
+static void latency_measure_enable_disable(bool enable_measure)
+{
+	int err;
+	u32 sub;
+
+	if (enable_measure == lat_stat->latency_measure)
+		return;
+
+	if (enable_measure)
+		sub = IPC_SUB_MEASURE_START_CLVP;
+	else
+		sub = IPC_SUB_MEASURE_STOP_CLVP;
+
+	err = rpmsg_send_generic_command(IPC_CMD_S0IX_LATENCY_CLVP,
+						sub, NULL, 0, NULL, 0);
+	if (unlikely(err)) {
+		pr_err("IPC to %s S0IX Latency Measurement failed!\n",
+					enable_measure ? "start" : "stop");
+		return;
+	}
+
+	if (enable_measure) {
+		memset(lat_stat->scu_latency, 0, sizeof(lat_stat->scu_latency));
+		memset(lat_stat->os_latency, 0, sizeof(lat_stat->os_latency));
+		memset(lat_stat->s3_parts_lat, 0,
+				sizeof(lat_stat->s3_parts_lat));
+		memset(lat_stat->count, 0, sizeof(lat_stat->count));
+	}
+
+	lat_stat->latency_measure = enable_measure;
+}
+
+static void print_simple_stat(struct seq_file *s, int divisor, int rem_div,
+					int count, struct simple_stat stat)
+{
+	unsigned long long min, avg, max;
+	unsigned long min_rem = 0, avg_rem = 0, max_rem = 0;
+
+	min = stat.min;
+	max = stat.max;
+	avg = stat.total;
+
+	if (count)
+		do_div(avg, count);
+
+	if (divisor > 1) {
+		min_rem = do_div(min, divisor);
+		max_rem = do_div(max, divisor);
+		avg_rem = do_div(avg, divisor);
+	}
+
+	if (rem_div > 1) {
+		min_rem /= rem_div;
+		max_rem /= rem_div;
+		avg_rem /= rem_div;
+	}
+
+	seq_printf(s, " %5llu.%03lu/%5llu.%03lu/%5llu.%03lu",
+			min, min_rem, avg, avg_rem, max, max_rem);
+}
+
+static int show_pmu_s0ix_lat(struct seq_file *s, void *unused)
+{
+	int i = 0;
+
+	char *states[] = {
+		"S0I1",
+		"LPMP3",
+		"S0I3",
+		"S3"
+	};
+
+	char *s3_parts_names[] = {
+		"PROC_FRZ",
+		"DEV_SUS",
+		"NB_CPU_OFF",
+		"NB_CPU_ON",
+		"DEV_RES",
+		"PROC_UNFRZ"
+	};
+
+	seq_printf(s, "%29s %35s\n", "SCU Latency", "OS Latency");
+	seq_printf(s, "%33s %35s\n", "min/avg/max(msec)", "min/avg/max(msec)");
+
+	for (i = SYS_STATE_S0I1; i < SYS_STATE_S3; i++) {
+		seq_printf(s, "\n%s(%llu)", states[i - SYS_STATE_S0I1],
+							lat_stat->count[i]);
+
+		seq_printf(s, "\n%5s", "entry");
+		print_simple_stat(s, USEC_PER_MSEC, 1, lat_stat->count[i],
+						lat_stat->scu_latency[i].entry);
+		seq_printf(s, "      ");
+		print_simple_stat(s, NSEC_PER_MSEC, NSEC_PER_USEC,
+			lat_stat->count[i], lat_stat->os_latency[i].entry);
+
+		seq_printf(s, "\n%5s", "exit");
+		print_simple_stat(s, USEC_PER_MSEC, 1, lat_stat->count[i],
+						lat_stat->scu_latency[i].exit);
+		seq_printf(s, "      ");
+		print_simple_stat(s, NSEC_PER_MSEC, NSEC_PER_USEC,
+			lat_stat->count[i], lat_stat->os_latency[i].exit);
+
+	}
+
+	seq_printf(s, "\n\n");
+
+	if (!lat_stat->count[SYS_STATE_S3])
+		return 0;
+
+	seq_printf(s, "S3 Latency dissection:\n");
+	seq_printf(s, "%38s\n", "min/avg/max(msec)");
+
+	for (i = 0; i < MAX_S3_PARTS; i++) {
+		seq_printf(s, "%10s\t", s3_parts_names[i]);
+		print_simple_stat(s, NSEC_PER_MSEC, NSEC_PER_USEC,
+					lat_stat->count[SYS_STATE_S3],
+					lat_stat->s3_parts_lat[i]);
+		seq_printf(s, "\n");
+	}
+
+	return 0;
+}
+
+static int pmu_s0ix_lat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_pmu_s0ix_lat, NULL);
+}
+
+static ssize_t pmu_s0ix_lat_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+
+	buf[buf_size] = 0;
+
+	if (((strlen("start") + 1) == buf_size) &&
+		!strncmp(buf, "start", strlen("start"))) {
+		latency_measure_enable_disable(true);
+	} else if (((strlen("stop") + 1) == buf_size) &&
+		!strncmp(buf, "stop", strlen("stop"))) {
+		latency_measure_enable_disable(false);
+	}
+
+	return buf_size;
+}
+
+static const struct file_operations s0ix_latency_ops = {
+	.open		= pmu_s0ix_lat_open,
+	.read		= seq_read,
+	.write		= pmu_s0ix_lat_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void update_simple_stat(struct simple_stat *simple_stat, int count)
+{
+	u64 duration = simple_stat->curr;
+
+	if (!count) {
+		simple_stat->min =
+		simple_stat->max =
+		simple_stat->total = duration;
+	} else {
+		if (duration < simple_stat->min)
+			simple_stat->min = duration;
+		else if (duration > simple_stat->max)
+			simple_stat->max = duration;
+		simple_stat->total += duration;
+	}
+}
+
+void s0ix_scu_latency_stat(int type)
+{
+	if (!lat_stat || !lat_stat->latency_measure)
+		return;
+
+	if (type < SYS_STATE_S0I1 || type > SYS_STATE_S3)
+		return;
+
+	lat_stat->scu_latency[type].entry.curr =
+			readl(lat_stat->scu_s0ix_lat_addr);
+	lat_stat->scu_latency[type].exit.curr =
+			readl(lat_stat->scu_s0ix_lat_addr + 1);
+
+	update_simple_stat(&lat_stat->scu_latency[type].entry,
+					lat_stat->count[type]);
+	update_simple_stat(&lat_stat->scu_latency[type].exit,
+					lat_stat->count[type]);
+}
+
+void s0ix_lat_stat_init(void)
+{
+	if (!platform_is(INTEL_ATOM_CLV))
+		return;
+
+	lat_stat = kzalloc(sizeof(struct latency_stat), GFP_KERNEL);
+	if (unlikely(!lat_stat)) {
+		pr_err("Failed to allocate memory for s0ix latency!\n");
+		goto out_err0;
+	}
+
+	lat_stat->scu_s0ix_lat_addr =
+		ioremap_nocache(S0IX_LAT_SRAM_ADDR_CLVP,
+					S0IX_LAT_SRAM_SIZE_CLVP);
+	if (unlikely(!lat_stat->scu_s0ix_lat_addr)) {
+		pr_err("Failed to map SCU_S0IX_LAT_ADDR!\n");
+		goto out_err1;
+	}
+
+	lat_stat->dentry = debugfs_create_file("s0ix_latency",
+			S_IFREG | S_IRUGO, NULL, NULL, &s0ix_latency_ops);
+	if (unlikely(!lat_stat->dentry)) {
+		pr_err("Failed to create debugfs for s0ix latency!\n");
+		goto out_err2;
+	}
+
+	return;
+
+out_err2:
+	iounmap(lat_stat->scu_s0ix_lat_addr);
+out_err1:
+	kfree(lat_stat);
+	lat_stat = NULL;
+out_err0:
+	pr_err("%s: Initialization failed\n", __func__);
+}
+
+void s0ix_lat_stat_finish(void)
+{
+	if (!platform_is(INTEL_ATOM_CLV))
+		return;
+
+	if (unlikely(!lat_stat))
+		return;
+
+	if (likely(lat_stat->scu_s0ix_lat_addr))
+		iounmap(lat_stat->scu_s0ix_lat_addr);
+
+	if (likely(lat_stat->dentry))
+		debugfs_remove(lat_stat->dentry);
+
+	kfree(lat_stat);
+	lat_stat = NULL;
+}
+
+void time_stamp_in_suspend_flow(int mark, bool start)
+{
+	if (!lat_stat || !lat_stat->latency_measure)
+		return;
+
+	if (start) {
+		lat_stat->s3_parts_lat[mark].curr = cpu_clock(0);
+		return;
+	}
+
+	lat_stat->s3_parts_lat[mark].curr = cpu_clock(0) -
+				lat_stat->s3_parts_lat[mark].curr;
+}
+
+static void collect_sleep_state_latency_stat(int sleep_state)
+{
+	int i;
+	if (sleep_state == SYS_STATE_S3)
+		for (i = 0; i < MAX_S3_PARTS; i++)
+			update_simple_stat(&lat_stat->s3_parts_lat[i],
+						lat_stat->count[sleep_state]);
+
+	update_simple_stat(&lat_stat->os_latency[sleep_state].entry,
+						lat_stat->count[sleep_state]);
+	update_simple_stat(&lat_stat->os_latency[sleep_state].exit,
+						lat_stat->count[sleep_state]);
+	lat_stat->count[sleep_state]++;
+}
+
+void time_stamp_for_sleep_state_latency(int sleep_state, bool start, bool entry)
+{
+	if (!lat_stat || !lat_stat->latency_measure)
+		return;
+
+	if (start) {
+		if (entry)
+			lat_stat->os_latency[sleep_state].entry.curr =
+								cpu_clock(0);
+		else
+			lat_stat->os_latency[sleep_state].exit.curr =
+								cpu_clock(0);
+		return;
+	}
+
+	if (entry)
+		lat_stat->os_latency[sleep_state].entry.curr = cpu_clock(0) -
+				lat_stat->os_latency[sleep_state].entry.curr;
+	else {
+		lat_stat->os_latency[sleep_state].exit.curr = cpu_clock(0) -
+				lat_stat->os_latency[sleep_state].exit.curr;
+		collect_sleep_state_latency_stat(sleep_state);
+	}
+}
+#else /* CONFIG_PM_DEBUG */
+void s0ix_scu_latency_stat(int type) {}
+void s0ix_lat_stat_init(void) {}
+void s0ix_lat_stat_finish(void) {}
+void time_stamp_for_sleep_state_latency(int sleep_state, bool start,
+							bool entry) {}
+void time_stamp_in_suspend_flow(int mark, bool start) {}
+inline unsigned int pmu_get_new_cstate
+		(unsigned int cstate, int *index) { return cstate; };
+#endif /* CONFIG_PM_DEBUG */
+
+static char *dstates[] = {"D0", "D0i1", "D0i2", "D0i3"};
+
+/* This can be used to report NC power transitions */
+void (*nc_report_power_state) (u32, int);
+
+#if defined(CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER)			\
+			|| defined(CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER)
+
+#define PMU_DEBUG_PRINT_STATS	(1U << 0)
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_PRINT(logging_type, s, debug_level_mask, args...)		\
+	do {								\
+		if (logging_type)					\
+			seq_printf(s, args);				\
+		else if (debug_mask &					\
+			PMU_DEBUG_PRINT_##debug_level_mask)		\
+			pr_info(args);					\
+	} while (0)
+
+static struct island display_islands[] = {
+	{APM_REG_TYPE, APM_GRAPHICS_ISLAND, "GFX"},
+	{APM_REG_TYPE, APM_VIDEO_DEC_ISLAND, "Video Decoder"},
+	{APM_REG_TYPE, APM_VIDEO_ENC_ISLAND, "Video Encoder"},
+	{APM_REG_TYPE, APM_GL3_CACHE_ISLAND, "GL3 Cache"},
+	{OSPM_REG_TYPE, OSPM_DISPLAY_A_ISLAND, "Display A"},
+	{OSPM_REG_TYPE, OSPM_DISPLAY_B_ISLAND, "Display B"},
+	{OSPM_REG_TYPE, OSPM_DISPLAY_C_ISLAND, "Display C"},
+	{OSPM_REG_TYPE, OSPM_MIPI_ISLAND, "MIPI-DSI"}
+};
+
+static struct island camera_islands[] = {
+	{APM_REG_TYPE, APM_ISP_ISLAND, "ISP"},
+	{APM_REG_TYPE, APM_IPH_ISLAND, "Iunit PHY"}
+};
+
+static char *lss_device_status[4] = { "D0i0", "D0i1", "D0i2", "D0i3" };
+
+static int lsses_num =
+			sizeof(lsses)/sizeof(lsses[0]);
+
+#ifdef LOG_PMU_EVENTS
+static void pmu_log_timestamp(struct timespec *ts)
+{
+	if (timekeeping_suspended) {
+		ts->tv_sec = 0;
+		ts->tv_nsec = 0;
+	} else {
+		ktime_get_ts(ts);
+	}
+}
+
+void pmu_log_pmu_irq(int status)
+{
+	struct mid_pmu_pmu_irq_log *log =
+		&mid_pmu_cxt->pmu_irq_log[mid_pmu_cxt->pmu_irq_log_idx];
+
+	log->status = status;
+	pmu_log_timestamp(&log->ts);
+	mid_pmu_cxt->pmu_irq_log_idx =
+		(mid_pmu_cxt->pmu_irq_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_pmu_irq_log(void)
+{
+	struct mid_pmu_pmu_irq_log *log;
+	int i = mid_pmu_cxt->pmu_irq_log_idx, j;
+
+	printk(KERN_ERR"%d last pmu irqs:\n", LOG_SIZE);
+
+	for (j = 0; j  < LOG_SIZE; j++) {
+		i ? i-- : (i = LOG_SIZE - 1);
+		log = &mid_pmu_cxt->pmu_irq_log[i];
+		printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+			log->ts.tv_sec, log->ts.tv_nsec);
+		printk(KERN_ERR"Status = 0x%02x", log->status);
+		printk(KERN_ERR"\n");
+	}
+}
+
+void pmu_log_ipc_irq(void)
+{
+	struct mid_pmu_ipc_irq_log *log =
+		&mid_pmu_cxt->ipc_irq_log[mid_pmu_cxt->ipc_irq_log_idx];
+
+	pmu_log_timestamp(&log->ts);
+	mid_pmu_cxt->ipc_irq_log_idx =
+	(mid_pmu_cxt->ipc_irq_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_ipc_irq_log(void)
+{
+	struct mid_pmu_ipc_irq_log *log;
+	int i = mid_pmu_cxt->ipc_irq_log_idx, j;
+
+	printk(KERN_ERR"%d last ipc irqs:\n", LOG_SIZE);
+
+	for (j = 0; j  < LOG_SIZE; j++) {
+		i ? i-- : (i = LOG_SIZE - 1);
+		log = &mid_pmu_cxt->ipc_irq_log[i];
+		printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+			log->ts.tv_sec, log->ts.tv_nsec);
+		printk(KERN_ERR"\n");
+	}
+}
+
+void pmu_log_ipc(u32 command)
+{
+	struct mid_pmu_ipc_log *log =
+	&mid_pmu_cxt->ipc_log[mid_pmu_cxt->ipc_log_idx];
+
+	log->command = command;
+	pmu_log_timestamp(&log->ts);
+	mid_pmu_cxt->ipc_log_idx = (mid_pmu_cxt->ipc_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_ipc_log(void)
+{
+	struct mid_pmu_ipc_log *log;
+	int i = mid_pmu_cxt->ipc_log_idx, j;
+
+	printk(KERN_ERR"%d last ipc commands:\n", LOG_SIZE);
+
+	for (j = 0; j  < LOG_SIZE; j++) {
+		i  ? i-- : (i = LOG_SIZE - 1);
+		log = &mid_pmu_cxt->ipc_log[i];
+		printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+			log->ts.tv_sec, log->ts.tv_nsec);
+		printk(KERN_ERR"Command: 0x%08x", log->command);
+		printk(KERN_ERR"\n");
+	}
+}
+
+void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc)
+{
+	struct mid_pmu_cmd_log *log =
+		&mid_pmu_cxt->cmd_log[mid_pmu_cxt->cmd_log_idx];
+
+	if (pm_ssc != NULL)
+		memcpy(&log->pm_ssc, pm_ssc, sizeof(struct pmu_ss_states));
+	else
+		memset(&log->pm_ssc, 0, sizeof(struct pmu_ss_states));
+	log->command = command;
+	pmu_log_timestamp(&log->ts);
+	mid_pmu_cxt->cmd_log_idx = (mid_pmu_cxt->cmd_log_idx + 1) % LOG_SIZE;
+}
+
+static void pmu_dump_command_log(void)
+{
+	struct mid_pmu_cmd_log *log;
+	int i = mid_pmu_cxt->cmd_log_idx, j, k;
+	u32 cmd_state;
+	printk(KERN_ERR"%d last pmu commands:\n", LOG_SIZE);
+
+	for (j = 0; j  < LOG_SIZE; j++) {
+		i ? i-- : (i = LOG_SIZE - 1);
+		log = &mid_pmu_cxt->cmd_log[i];
+		cmd_state = log->command;
+		printk(KERN_ERR"Timestamp: %lu.%09lu\n",
+			log->ts.tv_sec, log->ts.tv_nsec);
+		switch (cmd_state) {
+		case INTERACTIVE_VALUE:
+			printk(KERN_ERR"PM_CMD = Interactive_CMD IOC bit not set.\n");
+			break;
+		case INTERACTIVE_IOC_VALUE:
+			printk(KERN_ERR"PM_CMD = Interactive_CMD IOC bit set.\n");
+			break;
+		case S0I1_VALUE:
+			printk(KERN_ERR"PM_CMD = S0i1_CMD\n");
+			break;
+		case S0I3_VALUE:
+			printk(KERN_ERR"PM_CMD = S0i3_CMD\n");
+			break;
+		case LPMP3_VALUE:
+			printk(KERN_ERR"PM_CMD = LPMP3_CMD\n");
+			break;
+		default:
+			printk(KERN_ERR "Invalid PM_CMD\n");
+			break;
+		}
+		for (k = 0; k < 4; k++)
+			printk(KERN_ERR"pmu2_states[%d]: 0x%08lx\n",
+				k, log->pm_ssc.pmu2_states[k]);
+			printk(KERN_ERR"\n");
+	}
+}
+
+void pmu_dump_logs(void)
+{
+	struct timespec ts;
+
+	pmu_log_timestamp(&ts);
+	printk(KERN_ERR"Dumping out pmu logs\n");
+	printk(KERN_ERR"Timestamp: %lu.%09lu\n\n", ts.tv_sec, ts.tv_nsec);
+	printk(KERN_ERR"---------------------------------------\n\n");
+	pmu_dump_command_log();
+	printk(KERN_ERR"---------------------------------------\n\n");
+	pmu_dump_pmu_irq_log();
+	printk(KERN_ERR"---------------------------------------\n\n");
+	pmu_dump_ipc_log();
+	printk(KERN_ERR"---------------------------------------\n\n");
+	pmu_dump_ipc_irq_log();
+}
+#else
+void pmu_log_pmu_irq(int status) {}
+void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc) {}
+void pmu_dump_logs(void) {}
+#endif /* LOG_PMU_EVENTS */
+
+void pmu_stat_start(enum sys_state type)
+{
+	mid_pmu_cxt->pmu_current_state = type;
+	mid_pmu_cxt->pmu_stats[type].last_try = cpu_clock(smp_processor_id());
+}
+
+void pmu_stat_end(void)
+{
+	enum sys_state type = mid_pmu_cxt->pmu_current_state;
+
+	if (type > SYS_STATE_S0I0 && type < SYS_STATE_MAX) {
+		mid_pmu_cxt->pmu_stats[type].last_entry =
+			mid_pmu_cxt->pmu_stats[type].last_try;
+
+		if (!mid_pmu_cxt->pmu_stats[type].count)
+			mid_pmu_cxt->pmu_stats[type].first_entry =
+				mid_pmu_cxt->pmu_stats[type].last_entry;
+
+		mid_pmu_cxt->pmu_stats[type].time +=
+			cpu_clock(smp_processor_id())
+			- mid_pmu_cxt->pmu_stats[type].last_entry;
+
+		mid_pmu_cxt->pmu_stats[type].count++;
+
+		s0ix_scu_latency_stat(type);
+		if (type >= SYS_STATE_S0I1 && type <= SYS_STATE_S0I3)
+			/* time stamp for end of s0ix exit */
+			time_stamp_for_sleep_state_latency(type, false, false);
+	}
+
+	mid_pmu_cxt->pmu_current_state = SYS_STATE_S0I0;
+}
+
+void pmu_stat_error(u8 err_type)
+{
+	enum sys_state type = mid_pmu_cxt->pmu_current_state;
+	u8 err_index;
+
+	if (type > SYS_STATE_S0I0 && type < SYS_STATE_MAX) {
+		switch (err_type) {
+		case SUBSYS_POW_ERR_INT:
+			trace_printk("S0ix_POW_ERR_INT\n");
+			err_index = 0;
+			break;
+		case S0ix_MISS_INT:
+			trace_printk("S0ix_MISS_INT\n");
+			err_index = 1;
+			break;
+		case NO_ACKC6_INT:
+			trace_printk("S0ix_NO_ACKC6_INT\n");
+			err_index = 2;
+			break;
+		default:
+			err_index = 3;
+			break;
+		}
+
+		if (err_index < 3)
+			mid_pmu_cxt->pmu_stats[type].err_count[err_index]++;
+	}
+}
+
+static void pmu_stat_seq_printf(struct seq_file *s, int type, char *typestr)
+{
+	unsigned long long t;
+	unsigned long nanosec_rem, remainder;
+	unsigned long time, init_2_now_time;
+
+	seq_printf(s, "%s\t%5llu\t%10llu\t%9llu\t%9llu\t", typestr,
+		 mid_pmu_cxt->pmu_stats[type].count,
+		 mid_pmu_cxt->pmu_stats[type].err_count[0],
+		 mid_pmu_cxt->pmu_stats[type].err_count[1],
+		 mid_pmu_cxt->pmu_stats[type].err_count[2]);
+
+	t = mid_pmu_cxt->pmu_stats[type].time;
+	nanosec_rem = do_div(t, NANO_SEC);
+
+	/* convert time in secs */
+	time = (unsigned long)t;
+
+	seq_printf(s, "%5lu.%06lu\t",
+	   (unsigned long) t, nanosec_rem / 1000);
+
+	t = mid_pmu_cxt->pmu_stats[type].last_entry;
+	nanosec_rem = do_div(t, NANO_SEC);
+	seq_printf(s, "%5lu.%06lu\t",
+	   (unsigned long) t, nanosec_rem / 1000);
+
+	t = mid_pmu_cxt->pmu_stats[type].first_entry;
+	nanosec_rem = do_div(t, NANO_SEC);
+	seq_printf(s, "%5lu.%06lu\t",
+	   (unsigned long) t, nanosec_rem / 1000);
+
+	t =  cpu_clock(raw_smp_processor_id());
+	t -= mid_pmu_cxt->pmu_init_time;
+	nanosec_rem = do_div(t, NANO_SEC);
+
+	init_2_now_time =  (unsigned long) t;
+
+	/* for calculating percentage residency */
+	t = (u64) time;
+	t *= 100;
+
+	/* take care of divide by zero */
+	if (init_2_now_time) {
+		remainder = do_div(t, init_2_now_time);
+		time = (unsigned long) t;
+
+		/* for getting 3 digit precision after
+		 * decimal dot */
+		t = (u64) remainder;
+		t *= 1000;
+		remainder = do_div(t, init_2_now_time);
+	} else
+		time = t = 0;
+
+	seq_printf(s, "%5lu.%03lu\n", time, (unsigned long) t);
+}
+
+static unsigned long pmu_dev_res_print(int index, unsigned long *precision,
+				unsigned long *sampled_time, bool dev_state)
+{
+	unsigned long long t, delta_time = 0;
+	unsigned long nanosec_rem, remainder;
+	unsigned long time, init_to_now_time;
+
+	t =  cpu_clock(raw_smp_processor_id());
+
+	if (dev_state) {
+		/* print for d0ix */
+		if ((mid_pmu_cxt->pmu_dev_res[index].state != PCI_D0))
+			delta_time = t -
+				mid_pmu_cxt->pmu_dev_res[index].d0i3_entry;
+
+			delta_time += mid_pmu_cxt->pmu_dev_res[index].d0i3_acc;
+	} else {
+		/* print for d0i0 */
+		if ((mid_pmu_cxt->pmu_dev_res[index].state == PCI_D0))
+			delta_time = t -
+				mid_pmu_cxt->pmu_dev_res[index].d0i0_entry;
+
+		delta_time += mid_pmu_cxt->pmu_dev_res[index].d0i0_acc;
+	}
+
+	t -= mid_pmu_cxt->pmu_dev_res[index].start;
+	nanosec_rem = do_div(t, NANO_SEC);
+
+	init_to_now_time =  (unsigned long) t;
+
+	t = delta_time;
+	nanosec_rem = do_div(t, NANO_SEC);
+
+	/* convert time in secs */
+	time = (unsigned long)t;
+	*sampled_time = time;
+
+	/* for calculating percentage residency */
+	t = (u64) time;
+	t *= 100;
+
+	/* take care of divide by zero */
+	if (init_to_now_time) {
+		remainder = do_div(t, init_to_now_time);
+		time = (unsigned long) t;
+
+		/* for getting 3 digit precision after
+		* decimal dot */
+		t = (u64) remainder;
+		t *= 1000;
+		remainder = do_div(t, init_to_now_time);
+	} else
+		time = t = 0;
+
+	*precision = (unsigned long)t;
+
+	return time;
+}
+
+static void nc_device_state_show(struct seq_file *s, struct pci_dev *pdev)
+{
+	int off, i, islands_num, state;
+	struct island *islands;
+
+	if (PCI_SLOT(pdev->devfn) == DEV_GFX &&
+			PCI_FUNC(pdev->devfn) == FUNC_GFX) {
+		off = mid_pmu_cxt->display_off;
+		islands_num = ISLANDS_GFX;
+		islands = &display_islands[0];
+	} else if (PCI_SLOT(pdev->devfn) == DEV_ISP &&
+			PCI_FUNC(pdev->devfn) == FUNC_ISP) {
+		off = mid_pmu_cxt->camera_off;
+		islands_num = ISLANDS_ISP;
+		islands = &camera_islands[0];
+	} else {
+		return;
+	}
+
+	seq_printf(s, "pci %04x %04X %s %20s: %41s %s\n",
+		pdev->vendor, pdev->device, dev_name(&pdev->dev),
+		dev_driver_string(&pdev->dev),
+		"", off ? "" : "blocking s0ix");
+	for (i = 0; i < islands_num; i++) {
+		state = pmu_nc_get_power_state(islands[i].index,
+				islands[i].type);
+		seq_printf(s, "%52s %15s %17s %s\n",
+				 "|------->", islands[i].name, "",
+				(state >= 0) ? dstates[state & 3] : "ERR");
+	}
+}
+
+static int pmu_devices_state_show(struct seq_file *s, void *unused)
+{
+	struct pci_dev *pdev = NULL;
+	int index, i, pmu_num, ss_idx, ss_pos;
+	unsigned int base_class;
+	u32 target_mask, mask, val, needed;
+	struct pmu_ss_states cur_pmsss;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	_pmu2_wait_not_busy();
+	pmu_read_sss(&cur_pmsss);
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	seq_printf(s, "TARGET_CFG: ");
+	seq_printf(s, "SSS0:%08X ", S0IX_TARGET_SSS0_MASK);
+	seq_printf(s, "SSS1:%08X ", S0IX_TARGET_SSS1_MASK);
+	seq_printf(s, "SSS2:%08X ", S0IX_TARGET_SSS2_MASK);
+	seq_printf(s, "SSS3:%08X ", S0IX_TARGET_SSS3_MASK);
+
+	seq_printf(s, "\n");
+	seq_printf(s, "CONDITION FOR S0I3: ");
+	seq_printf(s, "SSS0:%08X ", S0IX_TARGET_SSS0);
+	seq_printf(s, "SSS1:%08X ", S0IX_TARGET_SSS1);
+	seq_printf(s, "SSS2:%08X ", S0IX_TARGET_SSS2);
+	seq_printf(s, "SSS3:%08X ", S0IX_TARGET_SSS3);
+
+	seq_printf(s, "\n");
+	seq_printf(s, "SSS: ");
+
+	for (i = 0; i < 4; i++)
+		seq_printf(s, "%08lX ", cur_pmsss.pmu2_states[i]);
+
+	if (!mid_pmu_cxt->display_off)
+		seq_printf(s, "display not suspended: blocking s0ix\n");
+	else if (!mid_pmu_cxt->camera_off)
+		seq_printf(s, "camera not suspended: blocking s0ix\n");
+	else if (mid_pmu_cxt->s0ix_possible & MID_S0IX_STATE)
+		seq_printf(s, "can enter s0i1 or s0i3\n");
+	else if (mid_pmu_cxt->s0ix_possible & MID_LPMP3_STATE)
+		seq_printf(s, "can enter lpmp3\n");
+	else
+		seq_printf(s, "blocking s0ix\n");
+
+	seq_printf(s, "cmd_error_int count: %d\n", mid_pmu_cxt->cmd_error_int);
+
+	seq_printf(s,
+	"\tcount\tsybsys_pow\ts0ix_miss\tno_ack_c6\ttime (secs)\tlast_entry");
+	seq_printf(s, "\tfirst_entry\tresidency(%%)\n");
+
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1, "s0i1");
+	pmu_stat_seq_printf(s, SYS_STATE_S0I2, "lpmp3");
+	pmu_stat_seq_printf(s, SYS_STATE_S0I3, "s0i3");
+	pmu_stat_seq_printf(s, SYS_STATE_S3, "s3");
+
+	for_each_pci_dev(pdev) {
+		/* find the base class info */
+		base_class = pdev->class >> 16;
+
+		if (base_class == PCI_BASE_CLASS_BRIDGE)
+			continue;
+
+		if (pmu_pci_to_indexes(pdev, &index, &pmu_num, &ss_idx,
+								  &ss_pos))
+			continue;
+
+		if (pmu_num == PMU_NUM_1) {
+			nc_device_state_show(s, pdev);
+			continue;
+		}
+
+		mask	= (D0I3_MASK << (ss_pos * BITS_PER_LSS));
+		val	= (cur_pmsss.pmu2_states[ss_idx] & mask) >>
+						(ss_pos * BITS_PER_LSS);
+		switch (ss_idx) {
+		case 0:
+			target_mask = S0IX_TARGET_SSS0_MASK;
+			break;
+		case 1:
+			target_mask = S0IX_TARGET_SSS1_MASK;
+			break;
+		case 2:
+			target_mask = S0IX_TARGET_SSS2_MASK;
+			break;
+		case 3:
+			target_mask = S0IX_TARGET_SSS3_MASK;
+			break;
+		default:
+			target_mask = 0;
+			break;
+		}
+		needed	= ((target_mask &  mask) != 0);
+
+		seq_printf(s, "pci %04x %04X %s %20s: lss:%02d reg:%d"
+			"mask:%08X wk:%02d:%02d:%02d:%03d %s  %s\n",
+			pdev->vendor, pdev->device, dev_name(&pdev->dev),
+			dev_driver_string(&pdev->dev),
+			index - mid_pmu_cxt->pmu1_max_devs, ss_idx, mask,
+			mid_pmu_cxt->num_wakes[index][SYS_STATE_S0I1],
+			mid_pmu_cxt->num_wakes[index][SYS_STATE_S0I2],
+			mid_pmu_cxt->num_wakes[index][SYS_STATE_S0I3],
+			mid_pmu_cxt->num_wakes[index][SYS_STATE_S3],
+			dstates[val & 3],
+			(needed && !val) ? "blocking s0ix" : "");
+
+	}
+
+	return 0;
+}
+
+static int devices_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmu_devices_state_show, NULL);
+}
+
+static ssize_t devices_state_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+
+	buf[buf_size] = 0;
+
+	if (((strlen("clear")+1) == buf_size) &&
+		!strncmp(buf, "clear", strlen("clear"))) {
+		down(&mid_pmu_cxt->scu_ready_sem);
+		memset(mid_pmu_cxt->pmu_stats, 0,
+					sizeof(mid_pmu_cxt->pmu_stats));
+		memset(mid_pmu_cxt->num_wakes, 0,
+					sizeof(mid_pmu_cxt->num_wakes));
+		mid_pmu_cxt->pmu_current_state = SYS_STATE_S0I0;
+		mid_pmu_cxt->pmu_init_time =
+			cpu_clock(raw_smp_processor_id());
+		clear_d0ix_stats();
+		up(&mid_pmu_cxt->scu_ready_sem);
+	}
+
+	return buf_size;
+}
+
+static const struct file_operations devices_state_operations = {
+	.open		= devices_state_open,
+	.read		= seq_read,
+	.write		= devices_state_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int show_pmu_lss_status(struct seq_file *s, void *unused)
+{
+	int sss_reg_index;
+	int offset;
+	int lss;
+	unsigned long status;
+	unsigned long sub_status;
+	unsigned long lss_status[4];
+	struct lss_definition *entry;
+
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	lss_status[0] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[0]);
+	lss_status[1] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[1]);
+	lss_status[2] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[2]);
+	lss_status[3] = readl(&mid_pmu_cxt->pmu_reg->pm_sss[3]);
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	lss = 0;
+	seq_printf(s, "%5s\t%12s %35s %5s %4s %4s %4s %4s\n",
+			"lss", "block", "subsystem", "state", "D0i0", "D0i1",
+			"D0i2", "D0i3");
+	seq_printf(s, "====================================================="
+		      "=====================\n");
+	for (sss_reg_index = 0; sss_reg_index < 4; sss_reg_index++) {
+		status = lss_status[sss_reg_index];
+		for (offset = 0; offset < sizeof(unsigned long) * 8 / 2;
+								offset++) {
+			sub_status = status & 3;
+			if (lss >= lsses_num)
+				entry = &lsses[lsses_num - 1];
+			else
+				entry = &lsses[lss];
+			seq_printf(s, "%5s\t%12s %35s %4s %4d %4d %4d %4d\n",
+					entry->lss_name, entry->block,
+					entry->subsystem,
+					lss_device_status[sub_status],
+					get_d0ix_stat(lss, SS_STATE_D0I0),
+					get_d0ix_stat(lss, SS_STATE_D0I1),
+					get_d0ix_stat(lss, SS_STATE_D0I2),
+					get_d0ix_stat(lss, SS_STATE_D0I3));
+
+			status >>= 2;
+			lss++;
+		}
+	}
+
+	return 0;
+}
+
+static int pmu_sss_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_pmu_lss_status, NULL);
+}
+
+static const struct file_operations pmu_sss_state_operations = {
+	.open		= pmu_sss_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int show_pmu_dev_stats(struct seq_file *s, void *unused)
+{
+	struct pci_dev *pdev = NULL;
+	unsigned long sampled_time, precision;
+	int index, pmu_num, ss_idx, ss_pos;
+	unsigned int base_class;
+
+	seq_printf(s, "%5s\t%20s\t%10s\t%10s\t%s\n",
+		"lss", "Name", "D0_res", "D0ix_res", "Sampled_Time");
+	seq_printf(s,
+	"==================================================================\n");
+
+	for_each_pci_dev(pdev) {
+		/* find the base class info */
+		base_class = pdev->class >> 16;
+
+		if (base_class == PCI_BASE_CLASS_BRIDGE)
+			continue;
+
+		if (pmu_pci_to_indexes(pdev, &index, &pmu_num, &ss_idx,
+							&ss_pos))
+			continue;
+
+		if (pmu_num == PMU_NUM_1) {
+			seq_printf(s,
+			"%5s%20s\t%5lu.%03lu%%\t%5lu.%03lu%%\t%lu\n",
+			"NC", dev_driver_string(&pdev->dev),
+			pmu_dev_res_print(index, &precision,
+				 &sampled_time, false),
+			precision,
+			pmu_dev_res_print(index, &precision,
+				 &sampled_time, true),
+			precision, sampled_time);
+			continue;
+		}
+
+		/* Print for South Complex devices */
+		seq_printf(s, "%5d\t%20s\t%5lu.%03lu%%\t%5lu.%03lu%%\t%lu\n",
+		index - mid_pmu_cxt->pmu1_max_devs,
+		dev_driver_string(&pdev->dev),
+		pmu_dev_res_print(index, &precision, &sampled_time, false),
+		precision,
+		pmu_dev_res_print(index, &precision, &sampled_time, true),
+		precision, sampled_time);
+	}
+	return 0;
+}
+
+static int pmu_dev_stat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_pmu_dev_stats, NULL);
+}
+
+static const struct file_operations pmu_dev_stat_operations = {
+	.open		= pmu_dev_stat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#ifdef CONFIG_PM_DEBUG
+static int pmu_stats_interval = PMU_LOG_INTERVAL_SECS;
+module_param_named(pmu_stats_interval, pmu_stats_interval,
+				int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+void pmu_s0ix_demotion_stat(int req_state, int grant_state)
+{
+	struct pmu_ss_states cur_pmsss;
+	int i, req_sys_state, offset;
+	unsigned long status, sub_status;
+	unsigned long s0ix_target_sss_mask[4] = {
+				S0IX_TARGET_SSS0_MASK,
+				S0IX_TARGET_SSS1_MASK,
+				S0IX_TARGET_SSS2_MASK,
+				S0IX_TARGET_SSS3_MASK};
+
+	unsigned long s0ix_target_sss[4] = {
+				S0IX_TARGET_SSS0,
+				S0IX_TARGET_SSS1,
+				S0IX_TARGET_SSS2,
+				S0IX_TARGET_SSS3};
+
+	unsigned long lpmp3_target_sss_mask[4] = {
+				LPMP3_TARGET_SSS0_MASK,
+				LPMP3_TARGET_SSS1_MASK,
+				LPMP3_TARGET_SSS2_MASK,
+				LPMP3_TARGET_SSS3_MASK};
+
+	unsigned long lpmp3_target_sss[4] = {
+				LPMP3_TARGET_SSS0,
+				LPMP3_TARGET_SSS1,
+				LPMP3_TARGET_SSS2,
+				LPMP3_TARGET_SSS3};
+
+	req_sys_state = mid_state_to_sys_state(req_state);
+	if ((grant_state >= C4_STATE_IDX) && (grant_state <= S0I3_STATE_IDX))
+		mid_pmu_cxt->pmu_stats
+			[req_sys_state].demote_count
+				[grant_state-C4_STATE_IDX]++;
+
+	if (down_trylock(&mid_pmu_cxt->scu_ready_sem))
+		return;
+
+	pmu_read_sss(&cur_pmsss);
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	if (!mid_pmu_cxt->camera_off)
+		mid_pmu_cxt->pmu_stats[req_sys_state].camera_blocker_count++;
+
+	if (!mid_pmu_cxt->display_off)
+		mid_pmu_cxt->pmu_stats[req_sys_state].display_blocker_count++;
+
+	if (!mid_pmu_cxt->s0ix_possible) {
+		for (i = 0; i < 4; i++) {
+			unsigned int lss_per_register;
+			if (req_state == MID_LPMP3_STATE)
+				status = lpmp3_target_sss[i] ^
+					(cur_pmsss.pmu2_states[i] &
+						lpmp3_target_sss_mask[i]);
+			else
+				status = s0ix_target_sss[i] ^
+					(cur_pmsss.pmu2_states[i] &
+						s0ix_target_sss_mask[i]);
+			if (!status)
+				continue;
+
+			lss_per_register =
+				(sizeof(unsigned long)*8)/BITS_PER_LSS;
+
+			for (offset = 0; offset < lss_per_register; offset++) {
+				sub_status = status & SS_IDX_MASK;
+				if (sub_status) {
+					mid_pmu_cxt->pmu_stats[req_sys_state].
+						blocker_count
+						[offset + lss_per_register*i]++;
+				}
+
+				status >>= BITS_PER_LSS;
+			}
+		}
+	}
+}
+EXPORT_SYMBOL(pmu_s0ix_demotion_stat);
+
+static void pmu_log_s0ix_status(int type, char *typestr,
+		struct seq_file *s, bool logging_type)
+{
+	unsigned long long t;
+	unsigned long time, remainder, init_2_now_time;
+
+	t = mid_pmu_cxt->pmu_stats[type].time;
+	remainder = do_div(t, NANO_SEC);
+
+	/* convert time in secs */
+	time = (unsigned long)t;
+
+	t =  cpu_clock(0);
+	t -= mid_pmu_cxt->pmu_init_time;
+	remainder = do_div(t, NANO_SEC);
+
+	init_2_now_time =  (unsigned long) t;
+
+	/* for calculating percentage residency */
+	t = (u64) time;
+	t *= 100;
+
+	/* take care of divide by zero */
+	if (init_2_now_time) {
+		remainder = do_div(t, init_2_now_time);
+		time = (unsigned long) t;
+
+		/* for getting 3 digit precision after
+		 * decimal dot */
+		t = (u64) remainder;
+		t *= 1000;
+		remainder = do_div(t, init_2_now_time);
+	} else
+		time = t = 0;
+	DEBUG_PRINT(logging_type, s, STATS,
+			"%s\t%5llu\t%9llu\t%9llu\t%5lu.%03lu\n"
+			, typestr, mid_pmu_cxt->pmu_stats[type].count,
+			mid_pmu_cxt->pmu_stats[type].err_count[1],
+			mid_pmu_cxt->pmu_stats[type].err_count[2],
+			time, (unsigned long) t);
+}
+
+static void pmu_log_s0ix_demotion(int type, char *typestr,
+		struct seq_file *s, bool logging_type)
+{
+	DEBUG_PRINT(logging_type, s, STATS, "%s:\t%6d\t%6d\t%6d\t%6d\t%6d\n",
+		typestr,
+		mid_pmu_cxt->pmu_stats[type].demote_count[0],
+		mid_pmu_cxt->pmu_stats[type].demote_count[1],
+		mid_pmu_cxt->pmu_stats[type].demote_count[2],
+		mid_pmu_cxt->pmu_stats[type].demote_count[3],
+		mid_pmu_cxt->pmu_stats[type].demote_count[4]);
+}
+
+static void pmu_log_s0ix_lss_blocked(int type, char *typestr,
+		struct seq_file *s, bool logging_type)
+{
+	int i, block_count;
+
+	DEBUG_PRINT(logging_type, s, STATS, "%s: Block Count\n", typestr);
+
+	block_count = mid_pmu_cxt->pmu_stats[type].display_blocker_count;
+
+	if (block_count)
+		DEBUG_PRINT(logging_type, s, STATS,
+			 "\tDisplay blocked: %d times\n", block_count);
+
+	block_count = mid_pmu_cxt->pmu_stats[type].camera_blocker_count;
+
+	if (block_count)
+		DEBUG_PRINT(logging_type, s, STATS,
+			"\tCamera blocked: %d times\n", block_count);
+
+	DEBUG_PRINT(logging_type, s, STATS, "\tLSS\t #blocked\n");
+
+	for  (i = 0; i < MAX_LSS_POSSIBLE; i++) {
+		block_count = mid_pmu_cxt->pmu_stats[type].blocker_count[i];
+		if (block_count)
+			DEBUG_PRINT(logging_type, s, STATS, "\t%02d\t %6d\n", i,
+						block_count);
+	}
+	DEBUG_PRINT(logging_type, s, STATS, "\n");
+}
+
+static void pmu_stats_logger(bool logging_type, struct seq_file *s)
+{
+
+	if (!logging_type)
+		DEBUG_PRINT(logging_type, s, STATS,
+			"\n----MID_PMU_STATS_LOG_BEGIN----\n");
+
+	DEBUG_PRINT(logging_type, s, STATS,
+			"\tcount\ts0ix_miss\tno_ack_c6\tresidency(%%)\n");
+	pmu_log_s0ix_status(SYS_STATE_S0I1, "s0i1", s, logging_type);
+	pmu_log_s0ix_status(SYS_STATE_S0I2, "lpmp3", s, logging_type);
+	pmu_log_s0ix_status(SYS_STATE_S0I3, "s0i3", s, logging_type);
+	pmu_log_s0ix_status(SYS_STATE_S3, "s3", s, logging_type);
+
+	DEBUG_PRINT(logging_type, s, STATS, "\nFrom:\tTo\n");
+	DEBUG_PRINT(logging_type, s, STATS,
+		"\t    C4\t   C6\t  S0i1\t  Lpmp3\t  S0i3\n");
+
+	/* storing C6 demotion info in S0I0 */
+	pmu_log_s0ix_demotion(SYS_STATE_S0I0, "  C6", s, logging_type);
+
+	pmu_log_s0ix_demotion(SYS_STATE_S0I1, "s0i1", s, logging_type);
+	pmu_log_s0ix_demotion(SYS_STATE_S0I2, "lpmp3", s, logging_type);
+	pmu_log_s0ix_demotion(SYS_STATE_S0I3, "s0i3", s, logging_type);
+
+	DEBUG_PRINT(logging_type, s, STATS, "\n");
+	pmu_log_s0ix_lss_blocked(SYS_STATE_S0I1, "s0i1", s, logging_type);
+	pmu_log_s0ix_lss_blocked(SYS_STATE_S0I2, "lpmp3", s, logging_type);
+	pmu_log_s0ix_lss_blocked(SYS_STATE_S0I3, "s0i3", s, logging_type);
+
+	if (!logging_type)
+		DEBUG_PRINT(logging_type, s, STATS,
+				"\n----MID_PMU_STATS_LOG_END----\n");
+}
+
+static void pmu_log_stat(struct work_struct *work)
+{
+
+	pmu_stats_logger(false, NULL);
+
+	schedule_delayed_work(&mid_pmu_cxt->log_work,
+			msecs_to_jiffies(pmu_stats_interval*1000));
+}
+
+static int show_pmu_stats_log(struct seq_file *s, void *unused)
+{
+	pmu_stats_logger(true, s);
+	return 0;
+}
+
+static int pmu_stats_log_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, show_pmu_stats_log, NULL);
+}
+
+static const struct file_operations pmu_stats_log_operations = {
+	.open		= pmu_stats_log_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#else
+void pmu_s0ix_demotion_stat(int req_state, int grant_state) {}
+EXPORT_SYMBOL(pmu_s0ix_demotion_stat);
+#endif
+
+void pmu_stats_init(void)
+{
+	struct dentry *fentry;
+
+	/* /sys/kernel/debug/mid_pmu_states */
+	(void) debugfs_create_file("mid_pmu_states", S_IFREG | S_IRUGO,
+				NULL, NULL, &devices_state_operations);
+
+	/* /sys/kernel/debug/pmu_sss_states */
+	(void) debugfs_create_file("pmu_sss_states", S_IFREG | S_IRUGO,
+				NULL, NULL, &pmu_sss_state_operations);
+
+	/* /sys/kernel/debug/pmu_dev_stats */
+	(void) debugfs_create_file("pmu_dev_stats", S_IFREG | S_IRUGO,
+				NULL, NULL, &pmu_dev_stat_operations);
+
+	s0ix_lat_stat_init();
+
+#ifdef CONFIG_PM_DEBUG
+	/* dynamic debug tracing in every 5 mins */
+	INIT_DEFERRABLE_WORK(&mid_pmu_cxt->log_work, pmu_log_stat);
+	schedule_delayed_work(&mid_pmu_cxt->log_work,
+				msecs_to_jiffies(pmu_stats_interval*1000));
+
+	debug_mask = PMU_DEBUG_PRINT_STATS;
+
+	/* /sys/kernel/debug/pmu_stats_log */
+	fentry = debugfs_create_file("pmu_stats_log", S_IFREG | S_IRUGO,
+				NULL, NULL, &pmu_stats_log_operations);
+	if (fentry == NULL)
+		printk(KERN_ERR "Failed to create pmu_stats_log debugfs\n");
+#endif
+}
+
+void pmu_s3_stats_update(int enter)
+{
+
+}
+
+void pmu_stats_finish(void)
+{
+#ifdef CONFIG_PM_DEBUG
+	cancel_delayed_work_sync(&mid_pmu_cxt->log_work);
+#endif
+	s0ix_lat_stat_finish();
+}
+
+#endif /*if CONFIG_X86_MDFLD_POWER || CONFIG_X86_CLV_POWER*/
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+
+static u32 prev_s0ix_cnt[SYS_STATE_MAX];
+static unsigned long long prev_s0ix_res[SYS_STATE_MAX];
+static unsigned long long cur_s0ix_res[SYS_STATE_MAX];
+static unsigned long long cur_s0ix_cnt[SYS_STATE_MAX];
+static u32 S3_count;
+static unsigned long long S3_res;
+
+static void pmu_stat_seq_printf(struct seq_file *s, int type, char *typestr,
+							long long uptime)
+{
+	unsigned long long t;
+	u32 scu_val = 0, time = 0;
+	u32 remainder;
+	unsigned long init_2_now_time;
+	unsigned long long tsc_freq = 1330000;
+
+	/* If tsc calibration fails use the default as 1330Mhz */
+	if (tsc_khz)
+		tsc_freq = tsc_khz;
+
+	/* Print S0ix residency counter */
+	if (type == SYS_STATE_S0I0) {
+		for (t = SYS_STATE_S0I1; t <= SYS_STATE_S3; t++)
+			time += cur_s0ix_res[t];
+	} else if (type < SYS_STATE_S3) {
+		t = readq(residency[type]);
+		if (t < prev_s0ix_res[type])
+			t += (((unsigned long long)~0) - prev_s0ix_res[type]);
+		else
+			t -= prev_s0ix_res[type];
+
+		if (type == SYS_STATE_S0I3)
+			t -= prev_s0ix_res[SYS_STATE_S3];
+	} else
+		t = prev_s0ix_res[SYS_STATE_S3];
+
+	if (type == SYS_STATE_S0I0) {
+		/* uptime(nanoS) - sum_res(miliSec) */
+		t = uptime;
+		do_div(t, MICRO_SEC);
+		time = t - time;
+	} else {
+		/* s0ix residency counters are in TSC cycle count domain
+		 * convert this to milli second time domain
+		 */
+		remainder = do_div(t, tsc_freq);
+
+		/* store time in millisecs */
+		time = (unsigned int)t;
+	}
+	cur_s0ix_res[type] = (unsigned int)time;
+
+	seq_printf(s, "%s\t%5lu.%03lu\t", typestr,
+		(unsigned long)(time/1000), (unsigned long)(time%1000));
+
+	t = uptime;
+	do_div(t, MICRO_SEC); /* time in milli secs */
+
+	/* Note: with millisecs accuracy we get more
+	 * precise residency percentages, but we have
+	 * to trade off with the max number of days
+	 * that we can run without clearing counters,
+	 * with 32bit counter this value is ~50days.
+	 */
+	init_2_now_time =  (unsigned long) t;
+
+	/* for calculating percentage residency */
+	t	= (u64)(time);
+	t	*= 100;
+
+	/* take care of divide by zero */
+	if (init_2_now_time) {
+		remainder = do_div(t, init_2_now_time);
+		time = (unsigned long) t;
+
+		/* for getting 3 digit precision after
+		 * decimal dot */
+		t = (u64) remainder;
+		t *= 1000;
+		remainder = do_div(t, init_2_now_time);
+	} else
+		time = t = 0;
+
+	seq_printf(s, "%5lu.%03lu\t", (unsigned long) time, (unsigned long) t);
+
+	/* Print S0ix counters */
+	if (type == SYS_STATE_S0I0) {
+		for (t = SYS_STATE_S0I1; t <= SYS_STATE_S3; t++)
+			scu_val += cur_s0ix_cnt[t];
+		if (scu_val == 0) /* S0I0 residency 100% */
+			scu_val = 1;
+	} else if (type < SYS_STATE_S3) {
+		scu_val = readl(s0ix_counter[type]);
+		if (scu_val < prev_s0ix_cnt[type])
+			scu_val += (((u32)~0) - prev_s0ix_cnt[type]);
+		else
+			scu_val -= prev_s0ix_cnt[type];
+
+		if (type == SYS_STATE_S0I3)
+			scu_val -= prev_s0ix_cnt[SYS_STATE_S3];
+	} else
+			scu_val = prev_s0ix_cnt[SYS_STATE_S3];
+
+	if (type != SYS_STATE_S0I0)
+		cur_s0ix_cnt[type] = scu_val;
+
+	seq_printf(s, "%5lu\t", (unsigned long) scu_val);
+
+	remainder = 0;
+	t = cur_s0ix_res[type];
+	if (scu_val) { /* s0ix_time in millisecs */
+		do_div(t, scu_val);
+		remainder = do_div(t, 1000);
+	}
+	seq_printf(s, "%5lu.%03lu\n", (unsigned long) t,
+			(unsigned long) remainder);
+}
+
+static int pmu_devices_state_show(struct seq_file *s, void *unused)
+{
+	struct pci_dev *pdev = NULL;
+	int index, i, pmu_num, ss_idx, ss_pos;
+	unsigned int base_class;
+	u32 mask, val, nc_pwr_sts;
+	struct pmu_ss_states cur_pmsss;
+	long long uptime;
+	int ret;
+
+	if (!pmu_initialized)
+		return 0;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	_pmu2_wait_not_busy();
+	pmu_read_sss(&cur_pmsss);
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	seq_printf(s, "SSS: ");
+
+	for (i = 0; i < 4; i++)
+		seq_printf(s, "%08lX ", cur_pmsss.pmu2_states[i]);
+
+	seq_printf(s, "cmd_error_int count: %d\n", mid_pmu_cxt->cmd_error_int);
+
+	seq_printf(s, "\ttime(secs)\tresidency(%%)\tcount\tAvg.Res(Sec)\n");
+
+	down(&mid_pmu_cxt->scu_ready_sem);
+	/* Dump S0ix residency counters */
+	ret = intel_scu_ipc_simple_command(DUMP_RES_COUNTER, 0);
+	if (ret)
+		seq_printf(s, "IPC command to DUMP S0ix residency failed\n");
+
+	/* Dump number of interations of S0ix */
+	ret = intel_scu_ipc_simple_command(DUMP_S0IX_COUNT, 0);
+	if (ret)
+		seq_printf(s, "IPC command to DUMP S0ix count failed\n");
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	uptime =  cpu_clock(0);
+	uptime -= mid_pmu_cxt->pmu_init_time;
+	pmu_stat_seq_printf(s, SYS_STATE_S0I1, "s0i1", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_LPMP3, "lpmp3", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I2, "s0i2", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I3, "s0i3", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S3, "s3", uptime);
+	pmu_stat_seq_printf(s, SYS_STATE_S0I0, "s0", uptime);
+
+	val = do_div(uptime, NANO_SEC);
+	seq_printf(s, "\n\nTotal time: %5lu.%03lu Sec\n", (unsigned long)uptime,
+		   (unsigned long) val/1000000);
+
+	seq_printf(s, "\nNORTH COMPLEX DEVICES :\n\n");
+
+	nc_pwr_sts = intel_mid_msgbus_read32(PUNIT_PORT, NC_PM_SSS);
+	for (i = 0; i < mrfl_no_of_nc_devices; i++) {
+		val = nc_pwr_sts & 3;
+		nc_pwr_sts >>= BITS_PER_LSS;
+		seq_printf(s, "%9s : %s\n", mrfl_nc_devices[i], dstates[val]);
+	}
+
+	seq_printf(s, "\nSOUTH COMPLEX DEVICES :\n\n");
+
+	for_each_pci_dev(pdev) {
+		/* find the base class info */
+		base_class = pdev->class >> 16;
+
+		if (base_class == PCI_BASE_CLASS_BRIDGE)
+			continue;
+
+		if (pmu_pci_to_indexes(pdev, &index, &pmu_num, &ss_idx,
+								  &ss_pos))
+			continue;
+
+		if (pmu_num == PMU_NUM_1)
+			continue;
+
+		mask	= (D0I3_MASK << (ss_pos * BITS_PER_LSS));
+		val	= (cur_pmsss.pmu2_states[ss_idx] & mask) >>
+						(ss_pos * BITS_PER_LSS);
+
+		seq_printf(s, "pci %04x %04X %s %20.20s: lss:%02d reg:%d ",
+			pdev->vendor, pdev->device, dev_name(&pdev->dev),
+			dev_driver_string(&pdev->dev),
+			index - mid_pmu_cxt->pmu1_max_devs, ss_idx);
+		seq_printf(s, "mask:%08X  %s\n",  mask, dstates[val & 3]);
+	}
+
+	return 0;
+}
+
+static int devices_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmu_devices_state_show, NULL);
+}
+
+static ssize_t devices_state_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int ret;
+	int buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
+
+	if (((strlen("clear")+1) == buf_size) &&
+		!strncmp(buf, "clear", strlen("clear"))) {
+		down(&mid_pmu_cxt->scu_ready_sem);
+
+		/* Dump S0ix residency counters */
+		ret = intel_scu_ipc_simple_command(DUMP_RES_COUNTER, 0);
+		if (ret)
+			printk(KERN_ERR "IPC command to DUMP S0ix residency failed\n");
+
+		/* Dump number of interations of S0ix */
+		ret = intel_scu_ipc_simple_command(DUMP_S0IX_COUNT, 0);
+		if (ret)
+			printk(KERN_ERR "IPC command to DUMP S0ix count failed\n");
+		up(&mid_pmu_cxt->scu_ready_sem);
+
+		mid_pmu_cxt->pmu_init_time = cpu_clock(0);
+		prev_s0ix_cnt[SYS_STATE_S0I1] = readl(s0ix_counter[SYS_STATE_S0I1]);
+		prev_s0ix_cnt[SYS_STATE_LPMP3] = readl(s0ix_counter[SYS_STATE_LPMP3]);
+		prev_s0ix_cnt[SYS_STATE_S0I2] = readl(s0ix_counter[SYS_STATE_S0I2]);
+		prev_s0ix_cnt[SYS_STATE_S0I3] = readl(s0ix_counter[SYS_STATE_S0I3]);
+		prev_s0ix_cnt[SYS_STATE_S3] = 0;
+		prev_s0ix_res[SYS_STATE_S0I1] = readq(residency[SYS_STATE_S0I1]);
+		prev_s0ix_res[SYS_STATE_LPMP3] = readq(residency[SYS_STATE_LPMP3]);
+		prev_s0ix_res[SYS_STATE_S0I2] = readq(residency[SYS_STATE_S0I2]);
+		prev_s0ix_res[SYS_STATE_S0I3] = readq(residency[SYS_STATE_S0I3]);
+		prev_s0ix_res[SYS_STATE_S3] = 0 ;
+	}
+	return buf_size;
+}
+
+
+static const struct file_operations devices_state_operations = {
+	.open		= devices_state_open,
+	.read		= seq_read,
+	.write		= devices_state_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#ifdef CONFIG_PM_DEBUG
+static int ignore_lss_show(struct seq_file *s, void *unused)
+{
+	u32 local_ignore_lss[4];
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(local_ignore_lss, mid_pmu_cxt->ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	seq_printf(s, "IGNORE_LSS[0]: %08X\n", local_ignore_lss[0]);
+	seq_printf(s, "IGNORE_LSS[1]: %08X\n", local_ignore_lss[1]);
+	seq_printf(s, "IGNORE_LSS[2]: %08X\n", local_ignore_lss[2]);
+	seq_printf(s, "IGNORE_LSS[3]: %08X\n", local_ignore_lss[3]);
+
+	return 0;
+}
+
+static int ignore_add_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ignore_lss_show, NULL);
+}
+
+static ssize_t ignore_add_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int res;
+	int buf_size = min(count, sizeof(buf)-1);
+	int sub_sys_pos, sub_sys_index;
+	u32 lss, local_ignore_lss[4];
+	u32 pm_cmd_val;
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	res = kstrtou32(buf, 10, &lss);
+
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(local_ignore_lss, mid_pmu_cxt->ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	/* If set to MAX_LSS_POSSIBLE it means
+	 * ignore all.
+	 */
+	if (lss == MAX_LSS_POSSIBLE) {
+		local_ignore_lss[0] = 0xFFFFFFFF;
+		local_ignore_lss[1] = 0xFFFFFFFF;
+		local_ignore_lss[2] = 0xFFFFFFFF;
+		local_ignore_lss[3] = 0xFFFFFFFF;
+	} else {
+		sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+		sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+
+		pm_cmd_val =
+			(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+		local_ignore_lss[sub_sys_index] |= pm_cmd_val;
+	}
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(mid_pmu_cxt->ignore_lss, local_ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return buf_size;
+}
+
+static const struct file_operations ignore_add_ops = {
+	.open		= ignore_add_open,
+	.read		= seq_read,
+	.write		= ignore_add_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int ignore_remove_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ignore_lss_show, NULL);
+}
+
+static ssize_t ignore_remove_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int res;
+	int buf_size = min(count, sizeof(buf)-1);
+	int sub_sys_pos, sub_sys_index;
+	u32 lss, local_ignore_lss[4];
+	u32 pm_cmd_val;
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	res = kstrtou32(buf, 10, &lss);
+
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(local_ignore_lss, mid_pmu_cxt->ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	/* If set to MAX_LSS_POSSIBLE it means
+	 * remove all from ignore list.
+	 */
+	if (lss == MAX_LSS_POSSIBLE) {
+		local_ignore_lss[0] = 0;
+		local_ignore_lss[1] = 0;
+		local_ignore_lss[2] = 0;
+		local_ignore_lss[3] = 0;
+	} else {
+		sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+		sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+
+		pm_cmd_val =
+			(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+		local_ignore_lss[sub_sys_index] &= ~pm_cmd_val;
+	}
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	memcpy(mid_pmu_cxt->ignore_lss, local_ignore_lss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return buf_size;
+}
+
+static const struct file_operations ignore_remove_ops = {
+	.open		= ignore_remove_open,
+	.read		= seq_read,
+	.write		= ignore_remove_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int pmu_sync_d0ix_show(struct seq_file *s, void *unused)
+{
+	int i;
+	u32 local_os_sss[4];
+	struct pmu_ss_states cur_pmsss;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	_pmu2_wait_not_busy();
+	/* Read SCU SSS */
+	pmu_read_sss(&cur_pmsss);
+	/* Read OS SSS */
+	memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	for (i = 0; i < 4; i++)
+		seq_printf(s, "OS_SSS[%d]: %08X\tSSS[%d]: %08lX\n", i,
+				local_os_sss[i], i, cur_pmsss.pmu2_states[i]);
+
+	return 0;
+}
+
+static int pmu_sync_d0ix_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmu_sync_d0ix_show, NULL);
+}
+
+static ssize_t pmu_sync_d0ix_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int res, i;
+	bool send_cmd;
+	int buf_size = min(count, sizeof(buf)-1);
+	u32 lss, local_os_sss[4];
+	int sub_sys_pos, sub_sys_index;
+	u32 pm_cmd_val;
+	u32 temp_sss;
+
+	struct pmu_ss_states cur_pmsss;
+
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	res = kstrtou32(buf, 10, &lss);
+
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	_pmu2_wait_not_busy();
+	/* Read SCU SSS */
+	pmu_read_sss(&cur_pmsss);
+
+	for (i = 0; i < 4; i++)
+		local_os_sss[i] = mid_pmu_cxt->os_sss[i] &
+				~mid_pmu_cxt->ignore_lss[i];
+
+	send_cmd = false;
+	for (i = 0; i < 4; i++) {
+		if (local_os_sss[i] != cur_pmsss.pmu2_states[i]) {
+			send_cmd = true;
+			break;
+		}
+	}
+
+	if (send_cmd) {
+		int status;
+
+		if (lss == MAX_LSS_POSSIBLE) {
+			memcpy(cur_pmsss.pmu2_states, local_os_sss,
+							 (sizeof(u32)*4));
+		} else {
+			bool same;
+			sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+			sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+			pm_cmd_val =
+				(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+			/* dont send d0ix request if its same */
+			same =
+			((cur_pmsss.pmu2_states[sub_sys_index] & pm_cmd_val)
+			== (mid_pmu_cxt->os_sss[sub_sys_index] & pm_cmd_val));
+
+			if (same)
+				goto unlock;
+
+			cur_pmsss.pmu2_states[sub_sys_index] &= ~pm_cmd_val;
+			temp_sss =
+				mid_pmu_cxt->os_sss[sub_sys_index] & pm_cmd_val;
+			cur_pmsss.pmu2_states[sub_sys_index] |= temp_sss;
+		}
+
+		/* Issue the pmu command to PMU 2
+		 * flag is needed to distinguish between
+		 * S0ix vs interactive command in pmu_sc_irq()
+		 */
+		status = pmu_issue_interactive_command(&cur_pmsss, false,
+							false);
+
+		if (unlikely(status != PMU_SUCCESS)) {
+			dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+				 "Failed to Issue a PM command to PMU2\n");
+			goto unlock;
+		}
+
+		/*
+		 * Wait for interactive command to complete.
+		 * If we dont wait, there is a possibility that
+		 * the driver may access the device before its
+		 * powered on in SCU.
+		 *
+		 */
+		status = _pmu2_wait_not_busy();
+		if (unlikely(status)) {
+			printk(KERN_CRIT "%s: D0ix transition failure\n",
+				__func__);
+		}
+	}
+
+unlock:
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return buf_size;
+}
+
+static const struct file_operations pmu_sync_d0ix_ops = {
+	.open		= pmu_sync_d0ix_open,
+	.read		= seq_read,
+	.write		= pmu_sync_d0ix_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int pmu_force_d0ix_show(struct seq_file *s, void *unused)
+{
+	int i;
+	u32 local_os_sss[4];
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+	/* Read OS SSS */
+	memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	for (i = 0; i < 4; i++)
+		seq_printf(s, "OS_SSS[%d]: %08X\n", i, local_os_sss[i]);
+
+	return 0;
+}
+
+static int pmu_force_d0ix_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmu_force_d0ix_show, NULL);
+}
+
+static ssize_t pmu_force_d0i3_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int res;
+	int buf_size = min(count, sizeof(buf)-1);
+	u32 lss, local_os_sss[4];
+	int sub_sys_pos, sub_sys_index;
+	u32 pm_cmd_val;
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	res = kstrtou32(buf, 10, &lss);
+
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	if (lss == MAX_LSS_POSSIBLE) {
+		local_os_sss[0] =
+		local_os_sss[1] =
+		local_os_sss[2] =
+		local_os_sss[3] = 0xFFFFFFFF;
+	} else {
+		memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+		sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+		sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+		pm_cmd_val =
+			(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+		local_os_sss[sub_sys_index] |= pm_cmd_val;
+	}
+
+	memcpy(mid_pmu_cxt->os_sss, local_os_sss, (sizeof(u32)*4));
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return buf_size;
+}
+
+static const struct file_operations pmu_force_d0i3_ops = {
+	.open		= pmu_force_d0ix_open,
+	.read		= seq_read,
+	.write		= pmu_force_d0i3_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static ssize_t pmu_force_d0i0_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int res;
+	int buf_size = min(count, sizeof(buf)-1);
+	u32 lss, local_os_sss[4];
+	int sub_sys_pos, sub_sys_index;
+	u32 pm_cmd_val;
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	res = kstrtou32(buf, 10, &lss);
+
+	if (res)
+		return -EINVAL;
+
+	if (lss > MAX_LSS_POSSIBLE)
+		return -EINVAL;
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	if (lss == MAX_LSS_POSSIBLE) {
+		local_os_sss[0] =
+		local_os_sss[1] =
+		local_os_sss[2] =
+		local_os_sss[3] = 0;
+	} else {
+		memcpy(local_os_sss, mid_pmu_cxt->os_sss, (sizeof(u32)*4));
+		sub_sys_index	= lss / mid_pmu_cxt->ss_per_reg;
+		sub_sys_pos	= lss % mid_pmu_cxt->ss_per_reg;
+		pm_cmd_val =
+			(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+		local_os_sss[sub_sys_index] &= ~pm_cmd_val;
+	}
+
+	memcpy(mid_pmu_cxt->os_sss, local_os_sss, (sizeof(u32)*4));
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return buf_size;
+}
+
+static const struct file_operations pmu_force_d0i0_ops = {
+	.open		= pmu_force_d0ix_open,
+	.read		= seq_read,
+	.write		= pmu_force_d0i0_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int cstate_ignore_add_show(struct seq_file *s, void *unused)
+{
+	int i;
+	seq_printf(s, "CSTATES IGNORED: ");
+	for (i = 0; i < CPUIDLE_STATE_MAX; i++)
+		if ((mid_pmu_cxt->cstate_ignore & (1 << i)))
+			seq_printf(s, "%d, ", i+1);
+
+	seq_printf(s, "\n");
+	return 0;
+}
+
+static int cstate_ignore_add_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cstate_ignore_add_show, NULL);
+}
+
+static ssize_t cstate_ignore_add_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int res;
+	int cstate;
+	int buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	res = kstrtou32(buf, 10, &cstate);
+
+	if (res)
+		return -EINVAL;
+
+	if (cstate > MAX_CSTATES_POSSIBLE)
+		return -EINVAL;
+
+	/* cannot add/remove C0, C1 */
+	if (((cstate == 0) || (cstate == 1))) {
+		printk(KERN_CRIT "C0 C1 state cannot be used.\n");
+		return -EINVAL;
+	}
+
+	if (!mid_pmu_cxt->cstate_qos)
+		return -EINVAL;
+
+	if (cstate == MAX_CSTATES_POSSIBLE) {
+		mid_pmu_cxt->cstate_ignore = ((1 << CPUIDLE_STATE_MAX) - 1);
+		pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+					CSTATE_EXIT_LATENCY_C1 - 1);
+	} else {
+		u32 cstate_exit_latency[CPUIDLE_STATE_MAX+1];
+		u32 local_cstate_allowed;
+		int max_cstate_allowed;
+
+		/* 0 is C1 state */
+		cstate--;
+		mid_pmu_cxt->cstate_ignore |= (1 << cstate);
+
+		/* by default remove C1 from ignore list */
+		mid_pmu_cxt->cstate_ignore &= ~(1 << 0);
+
+		/* populate cstate latency table */
+		cstate_exit_latency[0] = CSTATE_EXIT_LATENCY_C1;
+		cstate_exit_latency[1] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[2] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[3] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[4] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[5] = CSTATE_EXIT_LATENCY_C6;
+		cstate_exit_latency[6] = CSTATE_EXIT_LATENCY_S0i1;
+		cstate_exit_latency[7] = CSTATE_EXIT_LATENCY_S0i2;
+		cstate_exit_latency[8] = CSTATE_EXIT_LATENCY_S0i3;
+		cstate_exit_latency[9] = PM_QOS_DEFAULT_VALUE;
+		cstate_exit_latency[10] = PM_QOS_DEFAULT_VALUE;
+
+		local_cstate_allowed = ~mid_pmu_cxt->cstate_ignore;
+
+		/* restrict to max c-states */
+		local_cstate_allowed &= ((1<<CPUIDLE_STATE_MAX)-1);
+
+		/* If no states allowed will return 0 */
+		max_cstate_allowed = fls(local_cstate_allowed);
+
+		printk(KERN_CRIT "max_cstate: %d local_cstate_allowed = %x\n",
+			max_cstate_allowed, local_cstate_allowed);
+		printk(KERN_CRIT "exit latency = %d\n",
+				(cstate_exit_latency[max_cstate_allowed]-1));
+		pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+				(cstate_exit_latency[max_cstate_allowed]-1));
+	}
+
+	return buf_size;
+}
+
+static const struct file_operations cstate_ignore_add_ops = {
+	.open		= cstate_ignore_add_open,
+	.read		= seq_read,
+	.write		= cstate_ignore_add_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int cstate_ignore_remove_show(struct seq_file *s, void *unused)
+{
+	int i;
+	seq_printf(s, "CSTATES ALLOWED: ");
+	for (i = 0; i < CPUIDLE_STATE_MAX; i++)
+		if (!(mid_pmu_cxt->cstate_ignore & (1 << i)))
+			seq_printf(s, "%d, ", i+1);
+
+	seq_printf(s, "\n");
+
+	return 0;
+}
+
+static int cstate_ignore_remove_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, cstate_ignore_remove_show, NULL);
+}
+
+static ssize_t cstate_ignore_remove_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int res;
+	int cstate;
+	int buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	res = kstrtou32(buf, 10, &cstate);
+
+	if (res)
+		return -EINVAL;
+
+	if (cstate > MAX_CSTATES_POSSIBLE)
+		return -EINVAL;
+
+	/* cannot add/remove C0, C1 */
+	if (((cstate == 0) || (cstate == 1))) {
+		printk(KERN_CRIT "C0 C1 state cannot be used.\n");
+		return -EINVAL;
+	}
+
+	if (!mid_pmu_cxt->cstate_qos)
+		return -EINVAL;
+
+	if (cstate == MAX_CSTATES_POSSIBLE) {
+		mid_pmu_cxt->cstate_ignore =
+				~((1 << CPUIDLE_STATE_MAX) - 1);
+		/* Ignore C2, C3, C5, C8 and C10 states */
+		mid_pmu_cxt->cstate_ignore |= (1 << 1);
+		mid_pmu_cxt->cstate_ignore |= (1 << 2);
+		mid_pmu_cxt->cstate_ignore |= (1 << 4);
+		mid_pmu_cxt->cstate_ignore |= (1 << 7);
+		mid_pmu_cxt->cstate_ignore |= (1 << 9);
+
+		pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+						PM_QOS_DEFAULT_VALUE);
+	} else {
+		u32 cstate_exit_latency[CPUIDLE_STATE_MAX+1];
+		u32 local_cstate_allowed;
+		int max_cstate_allowed;
+
+		/* populate cstate latency table */
+		cstate_exit_latency[0] = CSTATE_EXIT_LATENCY_C1;
+		cstate_exit_latency[1] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[2] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[3] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[4] = CSTATE_EXIT_LATENCY_C2;
+		cstate_exit_latency[5] = CSTATE_EXIT_LATENCY_C6;
+		cstate_exit_latency[6] = CSTATE_EXIT_LATENCY_S0i1;
+		cstate_exit_latency[7] = CSTATE_EXIT_LATENCY_S0i2;
+		cstate_exit_latency[8] = CSTATE_EXIT_LATENCY_S0i3;
+		cstate_exit_latency[9] = PM_QOS_DEFAULT_VALUE;
+		cstate_exit_latency[10] = PM_QOS_DEFAULT_VALUE;
+
+		/* 0 is C1 state */
+		cstate--;
+		mid_pmu_cxt->cstate_ignore &= ~(1 << cstate);
+
+		/* by default remove C1 from ignore list */
+		mid_pmu_cxt->cstate_ignore &= ~(1 << 0);
+
+		/* Ignore C2, C3, C5, C8 and C10 states */
+		mid_pmu_cxt->cstate_ignore |= (1 << 1);
+		mid_pmu_cxt->cstate_ignore |= (1 << 2);
+		mid_pmu_cxt->cstate_ignore |= (1 << 4);
+		mid_pmu_cxt->cstate_ignore |= (1 << 7);
+		mid_pmu_cxt->cstate_ignore |= (1 << 9);
+
+		local_cstate_allowed = ~mid_pmu_cxt->cstate_ignore;
+		/* restrict to max c-states */
+		local_cstate_allowed &= ((1<<CPUIDLE_STATE_MAX)-1);
+
+		/* If no states allowed will return 0 */
+		max_cstate_allowed = fls(local_cstate_allowed);
+		printk(KERN_CRIT "max_cstate: %d local_cstate_allowed = %x\n",
+			max_cstate_allowed, local_cstate_allowed);
+		printk(KERN_CRIT "exit latency = %d\n",
+				(cstate_exit_latency[max_cstate_allowed]-1));
+		pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+				(cstate_exit_latency[max_cstate_allowed]-1));
+	}
+
+	return buf_size;
+}
+
+static const struct file_operations cstate_ignore_remove_ops = {
+	.open		= cstate_ignore_remove_open,
+	.read		= seq_read,
+	.write		= cstate_ignore_remove_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int s3_ctrl_show(struct seq_file *s, void *unused)
+{
+	seq_printf(s, "%d\n", enable_s3);
+	return 0;
+}
+
+static int s3_ctrl_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, s3_ctrl_show, NULL);
+}
+
+static ssize_t s3_ctrl_write(struct file *file,
+		     const char __user *userbuf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	int res;
+	int local_s3_ctrl;
+	int buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, userbuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = 0;
+
+	res = kstrtou32(buf, 10, &local_s3_ctrl);
+
+	if (res)
+		return -EINVAL;
+
+	enable_s3 = local_s3_ctrl ? 1 : 0;
+
+	if (enable_s3)
+		__pm_relax(mid_pmu_cxt->pmu_wake_lock);
+	else
+		__pm_stay_awake(mid_pmu_cxt->pmu_wake_lock);
+
+	return buf_size;
+}
+
+static const struct file_operations s3_ctrl_ops = {
+	.open		= s3_ctrl_open,
+	.read		= seq_read,
+	.write		= s3_ctrl_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+
+unsigned int pmu_get_new_cstate(unsigned int cstate, int *index)
+{
+	static int cstate_index_table[CPUIDLE_STATE_MAX] = {
+					1, 1, 1, 1, 1, 2, 3, 3, 4, 4};
+	unsigned int new_cstate = cstate;
+	u32 local_cstate = (u32)(cstate);
+	u32 local_cstate_allowed = ~mid_pmu_cxt->cstate_ignore;
+	u32 cstate_mask, cstate_no_s0ix_mask = (u32)((1 << 6) - 1);
+
+	if (platform_is(INTEL_ATOM_MRFLD)) {
+		/* cstate is also 7 for C9 so correct */
+		if ((local_cstate == 7) && (*index == 4))
+			local_cstate = 9;
+
+		/* get next low cstate allowed */
+		cstate_mask = (u32)((1 << local_cstate)-1);
+		/* in case if cstate == 0 which should not be the case*/
+		cstate_mask |= 1;
+		local_cstate_allowed	&= ((1<<CPUIDLE_STATE_MAX)-1);
+		local_cstate_allowed	&= cstate_mask;
+		if (!could_do_s0ix())
+			local_cstate_allowed &= cstate_no_s0ix_mask;
+		new_cstate	= fls(local_cstate_allowed);
+
+		*index	= cstate_index_table[new_cstate-1];
+	}
+
+	return new_cstate;
+}
+#endif
+
+DEFINE_PER_CPU(u64[NUM_CSTATES_RES_MEASURE], c_states_res);
+
+static int read_c_states_res(void)
+{
+	int cpu, i;
+	u32 lo, hi;
+
+	u32 c_states_res_msr[NUM_CSTATES_RES_MEASURE] = {
+		PUNIT_CR_CORE_C1_RES_MSR,
+		PUNIT_CR_CORE_C4_RES_MSR,
+		PUNIT_CR_CORE_C6_RES_MSR
+	};
+
+	for_each_online_cpu(cpu)
+		for (i = 0; i < NUM_CSTATES_RES_MEASURE; i++) {
+			u64 temp;
+			rdmsr_on_cpu(cpu, c_states_res_msr[i], &lo, &hi);
+			temp = hi;
+			temp <<= 32;
+			temp |= lo;
+			per_cpu(c_states_res, cpu)[i] = temp;
+		}
+
+	return 0;
+}
+
+static int c_states_stat_show(struct seq_file *s, void *unused)
+{
+	char *c_states_name[] = {
+		"C1",
+		"C4",
+		"C6"
+	};
+
+	int i, cpu;
+
+	seq_printf(s, "C STATES: %20s\n", "Residecy");
+	for_each_online_cpu(cpu)
+		seq_printf(s, "%18s %d", "Core", cpu);
+	seq_printf(s, "\n");
+
+	read_c_states_res();
+	for (i = 0; i < NUM_CSTATES_RES_MEASURE; i++) {
+		seq_printf(s, "%s", c_states_name[i]);
+		for_each_online_cpu(cpu)
+			seq_printf(s, "%18llu", per_cpu(c_states_res, cpu)[i]);
+		seq_printf(s, "\n");
+	}
+	return 0;
+}
+
+static int c_states_stat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, c_states_stat_show, NULL);
+}
+
+static const struct file_operations c_states_stat_ops = {
+	.open		= c_states_stat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/*These are place holders and will be enabled in next patch*/
+
+void pmu_log_pmu_irq(int status) { return; };
+void pmu_log_ipc_irq(void) { return; };
+void pmu_log_ipc(u32 command) { return; };
+void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc) { return; };
+void pmu_dump_logs(void) { return; };
+void pmu_stat_start(enum sys_state type) { return; };
+void pmu_stat_end(void) { return; };
+void pmu_stat_error(u8 err_type) { return; };
+void pmu_s0ix_demotion_stat(int req_state, int grant_state) { return; };
+EXPORT_SYMBOL(pmu_s0ix_demotion_stat);
+
+void pmu_stats_finish(void)
+{
+#ifdef CONFIG_PM_DEBUG
+	if (mid_pmu_cxt->cstate_qos) {
+		pm_qos_remove_request(mid_pmu_cxt->cstate_qos);
+		kfree(mid_pmu_cxt->cstate_qos);
+		mid_pmu_cxt->cstate_qos = NULL;
+	}
+#endif
+
+	return;
+}
+
+void pmu_s3_stats_update(int enter)
+{
+#ifdef CONFIG_PM_DEBUG
+	int ret;
+
+	down(&mid_pmu_cxt->scu_ready_sem);
+	/* Dump S0ix residency counters */
+	ret = intel_scu_ipc_simple_command(DUMP_RES_COUNTER, 0);
+	if (ret)
+		printk(KERN_ERR "IPC command to DUMP S0ix residency failed\n");
+
+	/* Dump number of interations of S0ix */
+	ret = intel_scu_ipc_simple_command(DUMP_S0IX_COUNT, 0);
+	if (ret)
+		printk(KERN_ERR "IPC command to DUMP S0ix count failed\n");
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	if (enter == 1) {
+		S3_count  = readl(s0ix_counter[SYS_STATE_S0I3]);
+		S3_res = readq(residency[SYS_STATE_S0I3]);
+	} else {
+		prev_s0ix_cnt[SYS_STATE_S3] +=
+			(readl(s0ix_counter[SYS_STATE_S0I3])) - S3_count;
+		prev_s0ix_res[SYS_STATE_S3] += (readq(residency[SYS_STATE_S0I3])) - S3_res;
+	}
+
+#endif
+	return;
+}
+
+
+void pmu_stats_init(void)
+{
+	/* /sys/kernel/debug/mid_pmu_states */
+	(void) debugfs_create_file("mid_pmu_states", S_IFREG | S_IRUGO,
+				NULL, NULL, &devices_state_operations);
+
+	/* /sys/kernel/debug/c_p_states_stat */
+	(void) debugfs_create_file("c_states_stat", S_IFREG | S_IRUGO,
+				NULL, NULL, &c_states_stat_ops);
+#ifdef CONFIG_PM_DEBUG
+	if (platform_is(INTEL_ATOM_MRFLD)) {
+		/* If s0ix is disabled then restrict to C6 */
+		if (!enable_s0ix) {
+			mid_pmu_cxt->cstate_ignore =
+				~((1 << CPUIDLE_STATE_MAX) - 1);
+
+			/* Ignore C2, C3, C5 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 1);
+			mid_pmu_cxt->cstate_ignore |= (1 << 2);
+			mid_pmu_cxt->cstate_ignore |= (1 << 4);
+
+			/* For now ignore C7, C8, C9, C10 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 6);
+			mid_pmu_cxt->cstate_ignore |= (1 << 7);
+			mid_pmu_cxt->cstate_ignore |= (1 << 8);
+			mid_pmu_cxt->cstate_ignore |= (1 << 9);
+		} else {
+			mid_pmu_cxt->cstate_ignore =
+				~((1 << CPUIDLE_STATE_MAX) - 1);
+
+			/* Ignore C2, C3, C5, C8 and C10 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 1);
+			mid_pmu_cxt->cstate_ignore |= (1 << 2);
+			mid_pmu_cxt->cstate_ignore |= (1 << 4);
+			mid_pmu_cxt->cstate_ignore |= (1 << 7);
+			mid_pmu_cxt->cstate_ignore |= (1 << 9);
+		}
+
+		mid_pmu_cxt->cstate_qos =
+			kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+		if (mid_pmu_cxt->cstate_qos) {
+			pm_qos_add_request(mid_pmu_cxt->cstate_qos,
+				 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+		}
+
+		/* If s0ix is disabled then restrict to C6 */
+		if (!enable_s0ix) {
+			/* Restrict platform Cx state to C6 */
+			pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+						(CSTATE_EXIT_LATENCY_S0i1-1));
+		}
+
+		/* /sys/kernel/debug/ignore_add */
+		(void) debugfs_create_file("ignore_add", S_IFREG | S_IRUGO,
+					NULL, NULL, &ignore_add_ops);
+		/* /sys/kernel/debug/ignore_remove */
+		(void) debugfs_create_file("ignore_remove", S_IFREG | S_IRUGO,
+					NULL, NULL, &ignore_remove_ops);
+		/* /sys/kernel/debug/pmu_sync_d0ix */
+		(void) debugfs_create_file("pmu_sync_d0ix", S_IFREG | S_IRUGO,
+					NULL, NULL, &pmu_sync_d0ix_ops);
+		/* /sys/kernel/debug/pmu_force_d0i0 */
+		(void) debugfs_create_file("pmu_force_d0i0", S_IFREG | S_IRUGO,
+					NULL, NULL, &pmu_force_d0i0_ops);
+		/* /sys/kernel/debug/pmu_force_d0i3 */
+		(void) debugfs_create_file("pmu_force_d0i3", S_IFREG | S_IRUGO,
+					NULL, NULL, &pmu_force_d0i3_ops);
+		/* /sys/kernel/debug/cstate_ignore_add */
+		(void) debugfs_create_file("cstate_ignore_add",
+			S_IFREG | S_IRUGO, NULL, NULL, &cstate_ignore_add_ops);
+		/* /sys/kernel/debug/cstate_ignore_remove */
+		(void) debugfs_create_file("cstate_ignore_remove",
+		S_IFREG | S_IRUGO, NULL, NULL, &cstate_ignore_remove_ops);
+		/* /sys/kernel/debug/cstate_ignore_remove */
+		(void) debugfs_create_file("s3_ctrl",
+		S_IFREG | S_IRUGO, NULL, NULL, &s3_ctrl_ops);
+	}
+#endif
+}
+
+#endif /*if CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER*/
diff --git a/arch/x86/platform/intel-mid/intel_soc_pm_debug.h b/arch/x86/platform/intel-mid/intel_soc_pm_debug.h
new file mode 100644
index 0000000..51e0871
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_pm_debug.h
@@ -0,0 +1,234 @@
+/*
+ * intel_soc_pm_debug.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _INTEL_SOC_PM_DEBUG_H
+#define _INTEL_SOC_PM_DEBUG_H
+#include <linux/intel_mid_pm.h>
+
+#include "intel_soc_pmu.h"
+
+
+#define NANO_SEC		1000000000UL /* 10^9 in sec */
+#define MICRO_SEC		1000000UL /* 10^6 in sec */
+#define PMU_LOG_INTERVAL_SECS	(60*5) /* 5 mins in secs */
+
+#define S0IX_LAT_SRAM_ADDR_CLVP		0xFFFF7FD0
+#define S0IX_LAT_SRAM_SIZE_CLVP		8
+
+#define IPC_CMD_S0IX_LATENCY_CLVP	0xCE
+#define IPC_SUB_MEASURE_START_CLVP	0x00
+#define IPC_SUB_MEASURE_STOP_CLVP	0x01
+
+struct simple_stat {
+	u64 min;
+	u64 max;
+	u64 total;
+	u64 curr;
+};
+
+struct entry_exit_stat {
+	struct simple_stat entry;
+	struct simple_stat exit;
+};
+
+struct latency_stat {
+	struct entry_exit_stat scu_latency[SYS_STATE_MAX];
+	struct entry_exit_stat os_latency[SYS_STATE_MAX];
+	struct simple_stat s3_parts_lat[MAX_S3_PARTS];
+	u64 count[SYS_STATE_MAX];
+	u32 __iomem *scu_s0ix_lat_addr;
+	struct dentry *dentry;
+	bool latency_measure;
+};
+
+struct island {
+	int type;
+	int index;
+	char *name;
+};
+
+struct lss_definition {
+	char *lss_name;
+	char *block;
+	char *subsystem;
+};
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+#define PUNIT_CR_CORE_C1_RES_MSR	0x660
+#define PUNIT_CR_CORE_C4_RES_MSR	0x3fc
+#define PUNIT_CR_CORE_C6_RES_MSR	0x3fd
+
+#define NUM_CSTATES_RES_MEASURE		3
+
+extern unsigned int enable_s3;
+extern unsigned int enable_s0ix;
+
+extern u32 __iomem *residency[];
+extern u32 __iomem *s0ix_counter[];
+
+#endif
+
+/* platform dependency starts */
+#ifdef CONFIG_INTEL_REMOVEME_ATOM_MDFLD_POWER
+
+#define DEV_GFX		2
+#define FUNC_GFX	0
+#define ISLANDS_GFX	8
+#define DEV_ISP		3
+#define FUNC_ISP	0
+#define ISLANDS_ISP	2
+#define NC_DEVS		2
+
+static struct lss_definition lsses[] = {
+	{"Lss00", "Storage", "SDIO0 (HC2)"},
+	{"Lss01", "Storage", "eMMC0 (HC0a)"},
+	{"NA", "Storage", "ND_CTL (Note 5)"},
+	{"Lss03", "H S I", "H S I DMA"},
+	{"Lss04", "Security", "RNG"},
+	{"Lss05", "Storage", "eMMC1 (HC0b)"},
+	{"Lss06", "USB", "USB OTG (ULPI)"},
+	{"Lss07", "USB", "USB_SPH"},
+	{"Lss08", "Audio", ""},
+	{"Lss09", "Audio", ""},
+	{"Lss10", "SRAM", " SRAM CTL+SRAM_16KB"},
+	{"Lss11", "SRAM", " SRAM CTL+SRAM_16KB"},
+	{"Lss12", "SRAM", "SRAM BANK (16KB+3x32KBKB)"},
+	{"Lss13", "SRAM", "SRAM BANK(4x32KB)"},
+	{"Lss14", "SDIO COMMS", "SDIO2 (HC1b)"},
+	{"Lss15", "PTI, DAFCA", " DFX Blocks"},
+	{"Lss16", "SC", " DMA"},
+	{"NA", "SC", "SPI0/MSIC"},
+	{"Lss18", "GP", "SPI1"},
+	{"Lss19", "GP", " SPI2"},
+	{"Lss20", "GP", " I2C0"},
+	{"Lss21", "GP", " I2C1"},
+	{"NA", "Fabrics", " Main Fabric"},
+	{"NA", "Fabrics", " Secondary Fabric"},
+	{"NA", "SC", "SC Fabric"},
+	{"Lss25", "Audio", " I-RAM BANK1 (32 + 256KB)"},
+	{"NA", "SCU", " ROM BANK1 (18KB+18KB+18KB)"},
+	{"Lss27", "GP", "I2C2"},
+	{"NA", "SSC", "SSC (serial bus controller to FLIS)"},
+	{"Lss29", "Security", "Chaabi AON Registers"},
+	{"Lss30", "SDIO COMMS", "SDIO1 (HC1a)"},
+	{"NA", "SCU", "I-RAM BANK0 (32KB)"},
+	{"NA", "SCU", "I-RAM BANK1 (32KB)"},
+	{"Lss33", "GP", "I2C3 (HDMI)"},
+	{"Lss34", "GP", "I2C4"},
+	{"Lss35", "GP", "I2C5"},
+	{"Lss36", "GP", "SSP (SPI3)"},
+	{"Lss37", "GP", "GPIO1"},
+	{"NA", "GP", "GP Fabric"},
+	{"Lss39", "SC", "GPIO0"},
+	{"Lss40", "SC", "KBD"},
+	{"Lss41", "SC", "UART2:0"},
+	{"NA", "NA", "NA"},
+	{"NA", "NA", "NA"},
+	{"Lss44", "Security", " Security TAPC"},
+	{"NA", "MISC", "AON Timers"},
+	{"NA", "PLL", "LFHPLL and Spread Spectrum"},
+	{"NA", "PLL", "USB PLL"},
+	{"NA", "NA", "NA"},
+	{"NA", "Audio", "SLIMBUS CTL 1 (note 5)"},
+	{"NA", "Audio", "SLIMBUS CTL 2 (note 5)"},
+	{"Lss51", "Audio", "SSP0"},
+	{"Lss52", "Audio", "SSP1"},
+	{"NA", "Bridge", "IOSF to OCP Bridge"},
+	{"Lss54", "GP", "DMA"},
+	{"NA", "SC", "SVID (Serial Voltage ID)"},
+	{"NA", "SOC Fuse", "SoC Fuse Block (note 3)"},
+	{"NA", "NA", "NA"},
+};
+#endif
+
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER
+
+#define DEV_GFX		2
+#define FUNC_GFX	0
+#define ISLANDS_GFX	8
+#define DEV_ISP		3
+#define FUNC_ISP	0
+#define ISLANDS_ISP	2
+#define NC_DEVS		2
+
+static struct lss_definition lsses[] = {
+	{"Lss00", "Storage", "SDIO0 (HC2)"},
+	{"Lss01", "Storage", "eMMC0 (HC0a)"},
+	{"NA", "Timer", "AONT"},
+	{"Lss03", "H S I", "H S I DMA"},
+	{"Lss04", "Security", "RNG"},
+	{"Lss05", "Storage", "eMMC1 (HC0b)"},
+	{"Lss06", "USB", "USB OTG (ULPI)"},
+	{"Lss07", "USB", "USB_SPH"},
+	{"Lss08", "Audio", "Audio ENGINE"},
+	{"Lss09", "Audio", "Audio DMA"},
+	{"Lss10", "SRAM", " SRAM CTL+SRAM_16KB"},
+	{"Lss11", "SRAM", " SRAM CTL+SRAM_16KB"},
+	{"Lss12", "SRAM", "SRAM BANK (16KB+3x32KBKB)"},
+	{"Lss13", "SRAM", "SRAM BANK(4x32KB)"},
+	{"Lss14", "SDIO COMMS", "SDIO2 (HC1b)"},
+	{"Lss15", "PTI, DAFCA", " DFX Blocks"},
+	{"Lss16", "SC", " DMA"},
+	{"NA", "SC", "SPI0/MSIC"},
+	{"Lss18", "GP", "SPI1"},
+	{"Lss19", "GP", " SPI2"},
+	{"Lss20", "GP", " I2C0"},
+	{"Lss21", "GP", " I2C1"},
+	{"NA", "Timer", "HPET"},
+	{"NA", "Timer", "External Timer"},
+	{"NA", "SC", "SC Fabric"},
+	{"Lss25", "Audio", " I-RAM BANK1 (32 + 256KB)"},
+	{"NA", "SCU", " ROM BANK1 (18KB+18KB+18KB)"},
+	{"Lss27", "GP", "I2C2"},
+	{"NA", "SSC", "SSC (serial bus controller to FLIS)"},
+	{"Lss29", "Security", "Chaabi AON Registers"},
+	{"Lss30", "SDIO COMMS", "SDIO1 (HC1a)"},
+	{"NA", "Timer", "vRTC"},
+	{"NA", "Security", "Security Timer"},
+	{"Lss33", "GP", "I2C3 (HDMI)"},
+	{"Lss34", "GP", "I2C4"},
+	{"Lss35", "GP", "I2C5"},
+	{"Lss36", "GP", "SSP (SPI3)"},
+	{"Lss37", "GP", "GPIO1"},
+	{"NA", "MSIC", "Power Button"},
+	{"Lss39", "SC", "GPIO0"},
+	{"Lss40", "SC", "KBD"},
+	{"Lss41", "SC", "UART2:0"},
+	{"NA", "MSIC", "ADC"},
+	{"NA", "MSIC", "Charger"},
+	{"Lss44", "Security", " Security TAPC"},
+	{"NA", "MSIC", "AON Timers"},
+	{"NA", "MSIC", "GPI"},
+	{"NA", "MSIC", "BCU"},
+	{"NA", "NA", "SSP2"},
+	{"NA", "Audio", "SLIMBUS CTL 1 (note 5)"},
+	{"NA", "Audio", "SLIMBUS CTL 2 (note 5)"},
+	{"Lss51", "Audio", "SSP0"},
+	{"Lss52", "Audio", "SSP1"},
+	{"NA", "Bridge", "IOSF to OCP Bridge"},
+	{"Lss54", "GP", "DMA"},
+	{"NA", "MSIC", "RESET"},
+	{"NA", "SOC Fuse", "SoC Fuse Block (note 3)"},
+	{"NA", "NA", "NA"},
+	{"Lss58", "NA", "SSP4"},
+};
+#endif
+/* platform dependency ends */
+
+#endif
diff --git a/arch/x86/platform/intel-mid/intel_soc_pmu.c b/arch/x86/platform/intel-mid/intel_soc_pmu.c
new file mode 100644
index 0000000..94a3805
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_pmu.c
@@ -0,0 +1,2143 @@
+/*
+ * intel_soc_pmu.c - This driver provides interface to configure the 2 pmu's
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "intel_soc_pmu.h"
+#include <linux/cpuidle.h>
+#include <linux/proc_fs.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#ifdef CONFIG_DRM_INTEL_MID
+#define GFX_ENABLE
+#endif
+
+bool pmu_initialized;
+
+DEFINE_MUTEX(pci_root_lock);
+
+/* mid_pmu context structure */
+struct mid_pmu_dev *mid_pmu_cxt;
+
+struct platform_pmu_ops *pmu_ops;
+/*
+ * Locking strategy::
+ *
+ * one semaphore (scu_ready sem) is used for accessing busy bit,
+ * issuing interactive cmd in the code.
+ * The entry points in pmu driver are pmu_pci_set_power_state()
+ * and PMU interrupt handler contexts, so here is the flow of how
+ * the semaphore is used.
+ *
+ * In D0ix command case::
+ * set_power_state process context:
+ * set_power_state()->acquire_scu_ready_sem()->issue_interactive_cmd->
+ * wait_for_interactive_complete->release scu_ready sem
+ *
+ * PMU Interrupt context:
+ * pmu_interrupt_handler()->release interactive_complete->return
+ *
+ * In Idle handler case::
+ * Idle context:
+ * idle_handler()->try_acquire_scu_ready_sem->if acquired->
+ * issue s0ix command->return
+ *
+ * PMU Interrupt context:
+ * pmu_Interrupt_handler()->release scu_ready_sem->return
+ *
+ */
+
+/* Maps pci power states to SCU D0ix mask */
+static int pci_to_platform_state(pci_power_t pci_state)
+{
+
+	static int mask[]  = {D0I0_MASK, D0I1_MASK,
+				D0I2_MASK, D0I3_MASK, D0I3_MASK};
+
+	int state = D0I0_MASK;
+
+	if (pci_state > 4)
+		WARN(1, "%s: wrong pci_state received.\n", __func__);
+
+	else
+		state = mask[pci_state];
+
+	return state;
+}
+
+/* Maps power states to pmu driver's internal indexes */
+int mid_state_to_sys_state(int mid_state)
+{
+	int sys_state = 0;
+	switch (mid_state) {
+	case MID_S0I1_STATE:
+		sys_state = SYS_STATE_S0I1;
+		break;
+	case MID_LPMP3_STATE:
+		sys_state = SYS_STATE_S0I2;
+		break;
+	case MID_S0I3_STATE:
+		sys_state = SYS_STATE_S0I3;
+		break;
+	case MID_S3_STATE:
+		sys_state = SYS_STATE_S3;
+		break;
+
+	case C6_HINT:
+		sys_state = SYS_STATE_S0I0;
+	}
+
+	return sys_state;
+}
+
+/* PCI Device Id structure */
+static DEFINE_PCI_DEVICE_TABLE(mid_pm_ids) = {
+	{PCI_VDEVICE(INTEL, MID_PMU_MFLD_DRV_DEV_ID), 0},
+	{PCI_VDEVICE(INTEL, MID_PMU_CLV_DRV_DEV_ID), 0},
+	{PCI_VDEVICE(INTEL, MID_PMU_MRFL_DRV_DEV_ID), 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, mid_pm_ids);
+
+char s0ix[5] = "s0ix";
+
+module_param_call(s0ix, set_extended_cstate_mode,
+		get_extended_cstate_mode, NULL, 0644);
+
+MODULE_PARM_DESC(s0ix,
+	"setup extended c state s0ix mode [s0i3|s0i1|lmp3|"
+				"i1i3|lpi1|lpi3|s0ix|none]");
+
+/**
+ * This function set all devices in d0i0 and deactivates pmu driver.
+ * The function is used before IFWI update as it needs devices to be
+ * in d0i0 during IFWI update. Reboot is needed to work pmu
+ * driver properly again. After calling this function and IFWI
+ * update, system is always rebooted as IFWI update function,
+ * intel_scu_ipc_medfw_upgrade() is called from mrst_emergency_reboot().
+ */
+int pmu_set_devices_in_d0i0(void)
+{
+	int status;
+	struct pmu_ss_states cur_pmssc;
+
+	/* Ignore request until we have initialized */
+	if (unlikely((!pmu_initialized)))
+		return 0;
+
+	cur_pmssc.pmu2_states[0] = D0I0_MASK;
+	cur_pmssc.pmu2_states[1] = D0I0_MASK;
+	cur_pmssc.pmu2_states[2] = D0I0_MASK;
+	cur_pmssc.pmu2_states[3] = D0I0_MASK;
+
+	/* Restrict platform Cx state to C6 */
+	pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+				(CSTATE_EXIT_LATENCY_S0i1-1));
+
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	mid_pmu_cxt->shutdown_started = true;
+
+	/* Issue the pmu command to PMU 2
+	 * flag is needed to distinguish between
+	 * S0ix vs interactive command in pmu_sc_irq()
+	 */
+	status = pmu_issue_interactive_command(&cur_pmssc, false, false);
+
+	if (unlikely(status != PMU_SUCCESS)) {	/* pmu command failed */
+		printk(KERN_CRIT "%s: Failed to Issue a PM command to PMU2\n",
+								__func__);
+		mid_pmu_cxt->shutdown_started = false;
+
+		/* allow s0ix now */
+		pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+						PM_QOS_DEFAULT_VALUE);
+		goto unlock;
+	}
+
+	if (_pmu2_wait_not_busy()) {
+		pmu_dump_logs();
+		BUG();
+	}
+
+unlock:
+	up(&mid_pmu_cxt->scu_ready_sem);
+	return status;
+}
+EXPORT_SYMBOL(pmu_set_devices_in_d0i0);
+
+static int _pmu_read_status(int type)
+{
+	u32 temp;
+	union pmu_pm_status result;
+
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_sts);
+
+	/* extract the busy bit */
+	result.pmu_status_value = temp;
+
+	if (type == PMU_BUSY_STATUS)
+		return result.pmu_status_parts.pmu_busy;
+	else if (type == PMU_MODE_ID)
+		return result.pmu_status_parts.mode_id;
+
+	return 0;
+}
+
+int _pmu2_wait_not_busy(void)
+{
+	int pmu_busy_retry = PMU2_BUSY_TIMEOUT;
+
+	/* wait 500ms that the latest pmu command finished */
+	do {
+		if (_pmu_read_status(PMU_BUSY_STATUS) == 0)
+			return 0;
+
+		udelay(1);
+	} while (--pmu_busy_retry);
+
+	WARN(1, "pmu2 busy!");
+
+	return -EBUSY;
+}
+
+static int _pmu2_wait_not_busy_yield(void)
+{
+	int pmu_busy_retry = PMU2_BUSY_TIMEOUT;
+
+	/* wait max 500ms that the latest pmu command finished */
+	do {
+		if (_pmu_read_status(PMU_BUSY_STATUS) == 0)
+			return 0;
+
+		usleep_range(10, 12);
+		pmu_busy_retry -= 11;
+	} while (pmu_busy_retry > 0);
+
+	WARN(1, "pmu2 busy!");
+
+	return -EBUSY;
+}
+
+static void pmu_write_subsys_config(struct pmu_ss_states *pm_ssc)
+{
+	/* South complex in Penwell has multiple registers for
+	 * PM_SSC, etc.
+	 */
+	writel(pm_ssc->pmu2_states[0], &mid_pmu_cxt->pmu_reg->pm_ssc[0]);
+	writel(pm_ssc->pmu2_states[1], &mid_pmu_cxt->pmu_reg->pm_ssc[1]);
+	writel(pm_ssc->pmu2_states[2], &mid_pmu_cxt->pmu_reg->pm_ssc[2]);
+	writel(pm_ssc->pmu2_states[3], &mid_pmu_cxt->pmu_reg->pm_ssc[3]);
+}
+
+void log_wakeup_irq(void)
+{
+	unsigned int irr = 0, vector = 0;
+	int offset = 0, irq = 0;
+	struct irq_desc *desc;
+	const char *act_name;
+
+	if ((mid_pmu_cxt->pmu_current_state != SYS_STATE_S3)
+	    || !mid_pmu_cxt->suspend_started)
+		return;
+
+	for (offset = (FIRST_EXTERNAL_VECTOR/32);
+	offset < (NR_VECTORS/32); offset++) {
+		irr = apic_read(APIC_IRR + (offset * 0x10));
+		while (irr) {
+			vector = __ffs(irr);
+			irr &= ~(1 << vector);
+			irq = __this_cpu_read(
+					vector_irq[vector + (offset * 32)]);
+			if (irq < 0)
+				continue;
+			pr_info("wakeup from  IRQ %d\n", irq);
+
+			desc = irq_to_desc(irq);
+
+			if ((desc) && (desc->action)) {
+				act_name = desc->action->name;
+				pr_info("IRQ %d,action name:%s\n",
+					irq,
+					(act_name) ? (act_name) : "no action");
+			}
+		}
+	}
+	return;
+}
+
+static inline int pmu_interrupt_pending(void)
+{
+	u32 temp;
+	union pmu_pm_ics result;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+	result.pmu_pm_ics_value = temp;
+
+	/* return the pm interrupt status int pending bit info */
+	return result.pmu_pm_ics_parts.int_pend;
+}
+
+static inline void pmu_clear_pending_interrupt(void)
+{
+	u32 temp;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+
+	/* write into the PM_ICS register */
+	writel(temp, &mid_pmu_cxt->pmu_reg->pm_ics);
+}
+
+void pmu_set_interrupt_enable(void)
+{
+	u32 temp;
+	union pmu_pm_ics result;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+	result.pmu_pm_ics_value = temp;
+
+	/* Set the interrupt enable bit */
+	result.pmu_pm_ics_parts.int_enable = 1;
+
+	temp = result.pmu_pm_ics_value;
+
+	/* write into the PM_ICS register */
+	writel(temp, &mid_pmu_cxt->pmu_reg->pm_ics);
+}
+
+void pmu_clear_interrupt_enable(void)
+{
+	u32 temp;
+	union pmu_pm_ics result;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+	result.pmu_pm_ics_value = temp;
+
+	/* Clear the interrupt enable bit */
+	result.pmu_pm_ics_parts.int_enable = 0;
+
+	temp = result.pmu_pm_ics_value;
+
+	/* write into the PM_ICS register */
+	writel(temp, &mid_pmu_cxt->pmu_reg->pm_ics);
+}
+
+static inline int pmu_read_interrupt_status(void)
+{
+	u32 temp;
+	union pmu_pm_ics result;
+
+	/* read the pm interrupt status register */
+	temp = readl(&mid_pmu_cxt->pmu_reg->pm_ics);
+
+	result.pmu_pm_ics_value = temp;
+
+	if (result.pmu_pm_ics_parts.int_status == 0)
+		return PMU_FAILED;
+
+	/* return the pm interrupt status int pending bit info */
+	return result.pmu_pm_ics_parts.int_status;
+}
+
+/*This function is used for programming the wake capable devices*/
+static void pmu_prepare_wake(int s0ix_state)
+{
+
+	struct pmu_ss_states cur_pmsss;
+
+	/* setup the wake capable devices */
+	if (s0ix_state == MID_S3_STATE) {
+		writel(~IGNORE_S3_WKC0, &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+		writel(~IGNORE_S3_WKC1, &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+	}
+
+	if (platform_is(INTEL_ATOM_MFLD) || platform_is(INTEL_ATOM_CLV)) {
+
+		/* Re-program the sub systems state on wakeup as
+		 * the current SSS
+		 */
+		pmu_read_sss(&cur_pmsss);
+
+		writel(cur_pmsss.pmu2_states[0],
+				&mid_pmu_cxt->pmu_reg->pm_wssc[0]);
+		writel(cur_pmsss.pmu2_states[1],
+				&mid_pmu_cxt->pmu_reg->pm_wssc[1]);
+		writel(cur_pmsss.pmu2_states[2],
+				&mid_pmu_cxt->pmu_reg->pm_wssc[2]);
+		writel(cur_pmsss.pmu2_states[3],
+				&mid_pmu_cxt->pmu_reg->pm_wssc[3]);
+	}
+}
+
+int mid_s0ix_enter(int s0ix_state)
+{
+	int ret = 0;
+
+	if (unlikely(!pmu_ops || !pmu_ops->enter))
+		goto ret;
+
+	/* check if we can acquire scu_ready_sem
+	 * if we are not able to then do a c6 */
+	if (down_trylock(&mid_pmu_cxt->scu_ready_sem))
+		goto ret;
+
+	/* If PMU is busy, we'll retry on next C6 */
+	if (unlikely(_pmu_read_status(PMU_BUSY_STATUS))) {
+		up(&mid_pmu_cxt->scu_ready_sem);
+		pr_debug("mid_pmu_cxt->scu_read_sem is up\n");
+		goto ret;
+	}
+
+	pmu_prepare_wake(s0ix_state);
+
+	/* no need to proceed if schedule pending */
+	if (unlikely(need_resched())) {
+		pmu_stat_clear();
+		/*set wkc to appropriate value suitable for s0ix*/
+		writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+		writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+		up(&mid_pmu_cxt->scu_ready_sem);
+		goto ret;
+	}
+
+	/* entry function for pmu driver ops */
+	if (pmu_ops->enter(s0ix_state))
+		ret = s0ix_state;
+	else  {
+		/*set wkc to appropriate value suitable for s0ix*/
+		writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+		writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+	}
+
+ret:
+	return ret;
+}
+
+/**
+ * pmu_sc_irq - pmu driver interrupt handler
+ * Context: interrupt context
+ */
+static irqreturn_t pmu_sc_irq(int irq, void *ignored)
+{
+	int status;
+	irqreturn_t ret = IRQ_NONE;
+	int wake_source;
+
+	/* check if interrup pending bit is set, if not ignore interrupt */
+	if (unlikely(!pmu_interrupt_pending())) {
+		goto ret_no_clear;
+	}
+
+	/* read the interrupt status */
+	status = pmu_read_interrupt_status();
+	if (unlikely(status == PMU_FAILED))
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "Invalid interrupt source\n");
+
+	switch (status) {
+	case INVALID_INT:
+		goto ret_no_clear;
+
+	case CMD_COMPLETE_INT:
+		break;
+
+	case CMD_ERROR_INT:
+		mid_pmu_cxt->cmd_error_int++;
+		break;
+
+	case SUBSYS_POW_ERR_INT:
+	case NO_ACKC6_INT:
+	case S0ix_MISS_INT:
+		pmu_stat_error(status);
+		break;
+
+	case WAKE_RECEIVED_INT:
+		wake_source = pmu_get_wake_source();
+		trace_printk("wake_from_lss%d\n",
+				wake_source);
+		pmu_stat_end();
+		break;
+	case TRIGGERERR:
+		pmu_dump_logs();
+		WARN(1, "%s: TRIGGERERR caused, but proceeding...\n", __func__);
+		break;
+	}
+
+	pmu_stat_clear();
+
+	/* clear the interrupt pending bit */
+	pmu_clear_pending_interrupt();
+
+	if (pmu_ops->wakeup)
+		pmu_ops->wakeup();
+
+	if (platform_is(INTEL_ATOM_MFLD) ||
+				platform_is(INTEL_ATOM_CLV)) {
+		mid_pmu_cxt->s0ix_entered = 0;
+		/* S0ix case release it */
+		up(&mid_pmu_cxt->scu_ready_sem);
+	}
+
+	ret = IRQ_HANDLED;
+ret_no_clear:
+	/* clear interrupt enable bit */
+	pmu_clear_interrupt_enable();
+
+	return ret;
+}
+
+void pmu_set_s0ix_complete(void)
+{
+	if (pmu_ops->set_s0ix_complete)
+		pmu_ops->set_s0ix_complete();
+}
+EXPORT_SYMBOL(pmu_set_s0ix_complete);
+
+bool pmu_is_s0ix_in_progress(void)
+{
+	bool state = false;
+
+	if (pmu_initialized && mid_pmu_cxt->s0ix_entered)
+		state = true;
+
+	return state;
+}
+EXPORT_SYMBOL(pmu_is_s0ix_in_progress);
+
+static inline u32 find_index_in_hash(struct pci_dev *pdev, int *found)
+{
+	u32 h_index;
+	int i;
+
+	/* assuming pdev is not null */
+	WARN_ON(pdev == NULL);
+
+	/*assuming pdev pionter will not change from platfrom
+	 *boot to shutdown*/
+	h_index = jhash_1word((u32) (long) pdev,
+		 MID_PCI_INDEX_HASH_INITVALUE) & MID_PCI_INDEX_HASH_MASK;
+
+	/* assume not found */
+	*found = 0;
+
+	for (i = 0; i < MID_PCI_INDEX_HASH_SIZE; i++) {
+		if (likely(mid_pmu_cxt->pci_dev_hash[h_index].pdev == pdev)) {
+			*found = 1;
+			break;
+		}
+
+		/* assume no deletions, hence there shouldn't be any
+		 * gaps ie., NULL's */
+		if (unlikely(mid_pmu_cxt->pci_dev_hash[h_index].pdev == NULL)) {
+			/* found NULL, that means we wont have
+			 * it in hash */
+			break;
+		}
+
+		h_index = (h_index+1)%MID_PCI_INDEX_HASH_SIZE;
+	}
+
+	/* Assume hash table wont be full */
+	WARN_ON(i == MID_PCI_INDEX_HASH_SIZE);
+
+	return h_index;
+}
+
+static bool is_display_subclass(unsigned int sub_class)
+{
+	/* On MDFLD and CLV, we have display PCI device class 0x30000,
+	 * On MRFLD, we have display PCI device class 0x38000
+	 */
+
+	if ((sub_class == 0x0 &&
+		(platform_is(INTEL_ATOM_MFLD) ||
+		platform_is(INTEL_ATOM_CLV))) ||
+		(sub_class == 0x80 && platform_is(INTEL_ATOM_MRFLD)))
+		return true;
+
+	return false;
+}
+
+static int get_pci_to_pmu_index(struct pci_dev *pdev)
+{
+	int pm, type;
+	unsigned int base_class;
+	unsigned int sub_class;
+	u8 ss;
+	int index = PMU_FAILED;
+	u32 h_index;
+	int found;
+
+	h_index = find_index_in_hash(pdev, &found);
+
+	if (found)
+		return (int)mid_pmu_cxt->pci_dev_hash[h_index].index;
+
+	/* if not found, h_index would be where
+	 * we can insert this */
+
+	base_class = pdev->class >> 16;
+	sub_class  = (pdev->class & SUB_CLASS_MASK) >> 8;
+	pm = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+	/* read the logical sub system id & cap if present */
+	pci_read_config_byte(pdev, pm + 4, &ss);
+
+	type = ss & LOG_SS_MASK;
+	ss = ss & LOG_ID_MASK;
+
+	if ((base_class == PCI_BASE_CLASS_DISPLAY) &&
+			is_display_subclass(sub_class))
+		index = 1;
+	else if ((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
+			(sub_class == ISP_SUB_CLASS))
+				index = ISP_POS;
+	else if (type) {
+		WARN_ON(ss >= MAX_LSS_POSSIBLE);
+		index = mid_pmu_cxt->pmu1_max_devs + ss;
+	}
+
+	if (index != PMU_FAILED) {
+		/* insert into hash table */
+		mid_pmu_cxt->pci_dev_hash[h_index].pdev = pdev;
+
+		/* assume index never exceeds 0xff */
+		WARN_ON(index > 0xFF);
+
+		mid_pmu_cxt->pci_dev_hash[h_index].index = (u8)index;
+
+		if (index < mid_pmu_cxt->pmu1_max_devs) {
+			set_mid_pci_ss_idx(index, 0);
+			set_mid_pci_ss_pos(index, (u8)index);
+			set_mid_pci_pmu_num(index, PMU_NUM_1);
+		} else if (index >= mid_pmu_cxt->pmu1_max_devs &&
+			   index < (mid_pmu_cxt->pmu1_max_devs +
+						mid_pmu_cxt->pmu2_max_devs)) {
+			set_mid_pci_ss_idx(index,
+					(u8)(ss / mid_pmu_cxt->ss_per_reg));
+			set_mid_pci_ss_pos(index,
+					(u8)(ss % mid_pmu_cxt->ss_per_reg));
+			set_mid_pci_pmu_num(index, PMU_NUM_2);
+		} else {
+			index = PMU_FAILED;
+		}
+
+		WARN_ON(index == PMU_FAILED);
+	}
+
+	return index;
+}
+
+static void get_pci_lss_info(struct pci_dev *pdev)
+{
+	int index, pm;
+	unsigned int base_class;
+	unsigned int sub_class;
+	u8 ss, cap;
+	int i;
+	base_class = pdev->class >> 16;
+	sub_class  = (pdev->class & SUB_CLASS_MASK) >> 8;
+
+	pm = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+	/* read the logical sub system id & cap if present */
+	pci_read_config_byte(pdev, pm + 4, &ss);
+	pci_read_config_byte(pdev, pm + 5, &cap);
+
+	/* get the index for the copying of ss info */
+	index = get_pci_to_pmu_index(pdev);
+
+	if ((index == PMU_FAILED) || (index >= MAX_DEVICES))
+		return;
+
+	/* initialize gfx subsystem info */
+	if ((base_class == PCI_BASE_CLASS_DISPLAY) &&
+			is_display_subclass(sub_class)) {
+		set_mid_pci_log_id(index, (u32)index);
+		set_mid_pci_cap(index, PM_SUPPORT);
+	} else if ((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
+		(sub_class == ISP_SUB_CLASS)) {
+			set_mid_pci_log_id(index, (u32)index);
+			set_mid_pci_cap(index, PM_SUPPORT);
+	} else if (ss && cap) {
+		set_mid_pci_log_id(index, (u32)(ss & LOG_ID_MASK));
+		set_mid_pci_cap(index, cap);
+	}
+
+	for (i = 0; i < PMU_MAX_LSS_SHARE &&
+		get_mid_pci_drv(index, i); i++) {
+		/* do nothing */
+	}
+
+	WARN_ON(i >= PMU_MAX_LSS_SHARE);
+
+	if (i < PMU_MAX_LSS_SHARE) {
+		set_mid_pci_drv(index, i, pdev);
+		set_mid_pci_power_state(index, i, PCI_D3hot);
+	}
+}
+
+static void pmu_enumerate(void)
+{
+	struct pci_dev *pdev = NULL;
+	unsigned int base_class;
+
+	for_each_pci_dev(pdev) {
+		if (platform_is(INTEL_ATOM_MRFLD) &&
+			pdev->device == MID_MRFL_HDMI_DRV_DEV_ID)
+			continue;
+
+		/* find the base class info */
+		base_class = pdev->class >> 16;
+
+		if (base_class == PCI_BASE_CLASS_BRIDGE)
+			continue;
+
+		get_pci_lss_info(pdev);
+	}
+}
+
+void pmu_read_sss(struct pmu_ss_states *pm_ssc)
+{
+	pm_ssc->pmu2_states[0] =
+			readl(&mid_pmu_cxt->pmu_reg->pm_sss[0]);
+	pm_ssc->pmu2_states[1] =
+			readl(&mid_pmu_cxt->pmu_reg->pm_sss[1]);
+	pm_ssc->pmu2_states[2] =
+			readl(&mid_pmu_cxt->pmu_reg->pm_sss[2]);
+	pm_ssc->pmu2_states[3] =
+			readl(&mid_pmu_cxt->pmu_reg->pm_sss[3]);
+}
+
+
+/*
+ * For all devices in this lss, we check what is the weakest power state
+ *
+ * Thus we dont power down if another device needs more power
+ */
+
+static pci_power_t  pmu_pci_get_weakest_state_for_lss(int lss_index,
+				struct pci_dev *pdev, pci_power_t state)
+{
+	int i;
+	pci_power_t weakest = state;
+
+	for (i = 0; i < PMU_MAX_LSS_SHARE; i++) {
+		if (get_mid_pci_drv(lss_index, i) == pdev)
+			set_mid_pci_power_state(lss_index, i, state);
+
+		if (get_mid_pci_drv(lss_index, i) &&
+			(get_mid_pci_power_state(lss_index, i) < weakest))
+			weakest = get_mid_pci_power_state(lss_index, i);
+	}
+	return weakest;
+}
+
+int pmu_pci_to_indexes(struct pci_dev *pdev, int *index,
+				int *pmu_num, int *ss_idx, int *ss_pos)
+{
+	int i;
+
+	i = get_pci_to_pmu_index(pdev);
+	if (i == PMU_FAILED)
+		return PMU_FAILED;
+
+	*index		= i;
+	*ss_pos		= get_mid_pci_ss_pos(i);
+	*ss_idx		= get_mid_pci_ss_idx(i);
+	*pmu_num	= get_mid_pci_pmu_num(i);
+
+	return PMU_SUCCESS;
+}
+
+static bool update_nc_device_states(int i, pci_power_t state)
+{
+	int status = 0;
+	int islands = 0;
+	int reg;
+
+	/* store the display status */
+	if (i == GFX_LSS_INDEX) {
+		mid_pmu_cxt->display_off = (state != PCI_D0);
+		return true;
+	}
+
+	/*Update the Camera status per ISP Driver Suspended/Resumed
+	* ISP power islands are also updated accordingly, otherwise Dx state
+	* in PMCSR refuses to change.
+	*/
+	else if (i == ISP_POS) {
+		if (platform_is(INTEL_ATOM_MFLD) ||
+				 platform_is(INTEL_ATOM_CLV)) {
+			islands = APM_ISP_ISLAND | APM_IPH_ISLAND;
+			reg = APM_REG_TYPE;
+		} else if (platform_is(INTEL_ATOM_MRFLD)) {
+			islands = TNG_ISP_ISLAND;
+			reg = ISP_SS_PM0;
+		} else
+			return false;
+		status = pmu_nc_set_power_state(islands,
+			(state != PCI_D0) ?
+			OSPM_ISLAND_DOWN : OSPM_ISLAND_UP,
+			reg);
+		if (status)
+			return false;
+		mid_pmu_cxt->camera_off = (state != PCI_D0);
+		return true;
+	}
+
+	return false;
+}
+
+void init_nc_device_states(void)
+{
+#if !IS_ENABLED(CONFIG_VIDEO_ATOMISP)
+	mid_pmu_cxt->camera_off = true;
+#endif
+
+#ifndef GFX_ENABLE
+	/* If Gfx is disabled
+	 * assume s0ix is not blocked
+	 * from gfx side
+	 */
+	mid_pmu_cxt->display_off = true;
+#endif
+
+	return;
+}
+
+/* FIXME::Currently HSI Modem 7060 (BZ# 28529) is having a issue and
+* it will not go to Low Power State on CVT. So Standby will not work
+* if HSI is enabled.
+* We can choose between Standby/HSI based on enable_stadby 1/0.
+*/
+unsigned int enable_standby __read_mostly;
+module_param(enable_standby, uint, 0000);
+
+/* FIXME:: We have issues with S0ix/S3 enabling by default
+ * with display lockup, HSIC etc., so have a boot time option
+ * to enable S0ix/S3
+ */
+unsigned int enable_s3 __read_mostly = 1;
+int set_enable_s3(const char *val, struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+
+	if (unlikely((!pmu_initialized)))
+		return 0;
+
+	if (platform_is(INTEL_ATOM_MRFLD)) {
+		if (!enable_s3)
+			__pm_stay_awake(mid_pmu_cxt->pmu_wake_lock);
+		else
+			__pm_relax(mid_pmu_cxt->pmu_wake_lock);
+	}
+
+	return 0;
+}
+module_param_call(enable_s3, set_enable_s3, param_get_uint,
+				&enable_s3, S_IRUGO | S_IWUSR);
+
+/* FIXME:: We have issues with S0ix/S3 enabling by default
+ * with display lockup, HSIC etc., so have a boot time option
+ * to enable S0ix/S3
+ */
+unsigned int enable_s0ix __read_mostly = 1;
+int set_enable_s0ix(const char *val, struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+
+	if (unlikely((!pmu_initialized)))
+		return 0;
+
+	if (platform_is(INTEL_ATOM_MRFLD)) {
+		if (!enable_s0ix) {
+			mid_pmu_cxt->cstate_ignore =
+				~((1 << CPUIDLE_STATE_MAX) - 1);
+
+			/* Ignore C2, C3, C5 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 1);
+			mid_pmu_cxt->cstate_ignore |= (1 << 2);
+			mid_pmu_cxt->cstate_ignore |= (1 << 4);
+
+			/* For now ignore C7, C8, C9, C10 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 6);
+			mid_pmu_cxt->cstate_ignore |= (1 << 7);
+			mid_pmu_cxt->cstate_ignore |= (1 << 8);
+			mid_pmu_cxt->cstate_ignore |= (1 << 9);
+
+			/* Restrict platform Cx state to C6 */
+			pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+						(CSTATE_EXIT_LATENCY_S0i1-1));
+		} else {
+			mid_pmu_cxt->cstate_ignore =
+				~((1 << CPUIDLE_STATE_MAX) - 1);
+
+			/* Ignore C2, C3, C5, C8 and C10 states */
+			mid_pmu_cxt->cstate_ignore |= (1 << 1);
+			mid_pmu_cxt->cstate_ignore |= (1 << 2);
+			mid_pmu_cxt->cstate_ignore |= (1 << 4);
+			mid_pmu_cxt->cstate_ignore |= (1 << 7);
+			mid_pmu_cxt->cstate_ignore |= (1 << 9);
+
+			pm_qos_update_request(mid_pmu_cxt->cstate_qos,
+							PM_QOS_DEFAULT_VALUE);
+		}
+	}
+
+	return 0;
+}
+module_param_call(enable_s0ix, set_enable_s0ix, param_get_uint,
+				&enable_s0ix, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss0 __read_mostly = IGNORE_SSS0;
+module_param(pmu_ignore_lss0, uint, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss1 __read_mostly = IGNORE_SSS1;
+module_param(pmu_ignore_lss1, uint, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss2 __read_mostly = IGNORE_SSS2;
+module_param(pmu_ignore_lss2, uint, S_IRUGO | S_IWUSR);
+
+unsigned int pmu_ignore_lss3 __read_mostly = IGNORE_SSS3;
+module_param(pmu_ignore_lss3, uint, S_IRUGO | S_IWUSR);
+
+int pmu_set_emmc_to_d0i0_atomic(void)
+{
+	u32 pm_cmd_val;
+	u32 new_value;
+	int sub_sys_pos, sub_sys_index;
+	struct pmu_ss_states cur_pmssc;
+	int status = 0;
+
+	if (unlikely((!pmu_initialized)))
+		return 0;
+
+	/* LSS 01 is index = 0, pos = 1 */
+	sub_sys_index	= EMMC0_LSS / mid_pmu_cxt->ss_per_reg;
+	sub_sys_pos	= EMMC0_LSS % mid_pmu_cxt->ss_per_reg;
+
+	memset(&cur_pmssc, 0, sizeof(cur_pmssc));
+
+	/*
+	 * Give time for possible previous PMU operation to finish in
+	 * case where SCU is functioning normally. For SCU crashed case
+	 * PMU may stay busy but check if the emmc is accessible.
+	 */
+	status = _pmu2_wait_not_busy();
+	if (status) {
+		dev_err(&mid_pmu_cxt->pmu_dev->dev,
+			"PMU2 busy, ignoring as emmc might be already d0i0\n");
+		status = 0;
+	}
+
+	pmu_read_sss(&cur_pmssc);
+
+	/* set D0i0 the LSS bits */
+	pm_cmd_val =
+		(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+	new_value = cur_pmssc.pmu2_states[sub_sys_index] &
+						(~pm_cmd_val);
+	if (new_value == cur_pmssc.pmu2_states[sub_sys_index])
+		goto err;
+
+	status = _pmu2_wait_not_busy();
+	if (status)
+		goto err;
+
+	cur_pmssc.pmu2_states[sub_sys_index] = new_value;
+
+	/* Request SCU for PM interrupt enabling */
+	writel(PMU_PANIC_EMMC_UP_REQ_CMD, mid_pmu_cxt->emergeny_emmc_up_addr);
+
+	status = pmu_issue_interactive_command(&cur_pmssc, false, false);
+
+	if (unlikely(status != PMU_SUCCESS)) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+			 "Failed to Issue a PM command to PMU2\n");
+		goto err;
+
+	}
+
+	/*
+	 * Wait for interactive command to complete.
+	 * If we dont wait, there is a possibility that
+	 * the driver may access the device before its
+	 * powered on in SCU.
+	 *
+	 */
+	if (_pmu2_wait_not_busy()) {
+		pmu_dump_logs();
+		BUG();
+	}
+
+err:
+
+	return status;
+}
+
+
+#define SAVED_HISTORY_ADDRESS_NUM	10
+#define SAVED_HISTORY_NUM		20
+#define PCI_MAX_RECORD_NUM		10
+
+struct saved_nc_power_history {
+	unsigned long long ts;
+	unsigned short pci;
+	unsigned short cpu:4;
+	unsigned short state_type:8;
+	unsigned short real_change:2;
+	int reg_type;
+	int islands;
+	void *address[SAVED_HISTORY_ADDRESS_NUM];
+};
+
+static atomic_t saved_nc_power_history_current = ATOMIC_INIT(-1);
+static struct saved_nc_power_history all_history[SAVED_HISTORY_NUM];
+static struct saved_nc_power_history *get_new_record_history(void)
+{
+	unsigned int ret =
+		atomic_add_return(1, &saved_nc_power_history_current);
+	return &all_history[ret%SAVED_HISTORY_NUM];
+}
+
+static unsigned short pci_need_record[PCI_MAX_RECORD_NUM] = { 0x08c8, 0x0130, };
+static int num_pci_need_record = 2;
+module_param_array(pci_need_record, ushort, &num_pci_need_record, 0644);
+MODULE_PARM_DESC(pci_need_record,
+		"devices need be traced power state transition.");
+
+static bool pci_need_record_power_state(struct pci_dev *pdev)
+{
+	int i;
+	for (i = 0; i < num_pci_need_record; i++)
+		if (pdev->device == pci_need_record[i])
+			return true;
+
+	return false;
+}
+
+static void print_saved_record(struct saved_nc_power_history *record)
+{
+	int i;
+	unsigned long long ts = record->ts;
+	unsigned long nanosec_rem = do_div(ts, 1000000000);
+
+	printk(KERN_INFO "----\n");
+	printk(KERN_INFO "ts[%5lu.%06lu] cpu[%d] is pci[%04x] reg_type[%d] "
+			"state_type[%d] islands[%x] real_change[%d]\n",
+		(unsigned long)ts,
+		nanosec_rem / 1000,
+		record->cpu,
+		record->pci,
+		record->reg_type,
+		record->state_type,
+		record->islands,
+		record->real_change);
+	for (i = 0; i < SAVED_HISTORY_ADDRESS_NUM; i++) {
+		printk(KERN_INFO "%pf real_addr[%p]\n",
+			record->address[i],
+			record->address[i]);
+	}
+}
+
+int verify_stack_ok(unsigned int *good_ebp, unsigned int *_ebp)
+{
+	return ((unsigned int)_ebp & 0xffffe000) ==
+		((unsigned int)good_ebp & 0xffffe000);
+}
+
+size_t backtrace_safe(void **array, size_t max_size)
+{
+	unsigned int *_ebp, *base_ebp;
+	unsigned int *caller;
+	unsigned int i;
+
+	asm ("movl %%ebp, %0"
+		: "=r" (_ebp)
+	    );
+
+	base_ebp = _ebp;
+	caller = (unsigned int *) *(_ebp+1);
+
+	for (i = 0; i < max_size; i++)
+		array[i] = 0;
+	for (i = 0; i < max_size; i++) {
+		array[i] = caller;
+		_ebp = (unsigned int *) *_ebp;
+		if (!verify_stack_ok(base_ebp, _ebp))
+			break;
+		caller = (unsigned int *) *(_ebp+1);
+	}
+
+	return i + 1;
+}
+
+void dump_nc_power_history(void)
+{
+	int i, start;
+	unsigned int total = atomic_read(&saved_nc_power_history_current);
+
+	start = total % SAVED_HISTORY_NUM;
+	printk(KERN_INFO "<----current timestamp\n");
+	printk(KERN_INFO "start[%d] saved[%d]\n",
+			start, total);
+	for (i = start; i >= 0; i--)
+		print_saved_record(&all_history[i]);
+	for (i = SAVED_HISTORY_NUM - 1; i > start; i--)
+		print_saved_record(&all_history[i]);
+}
+EXPORT_SYMBOL(dump_nc_power_history);
+
+static ssize_t debug_read_history(struct file *file, char __user *buffer,
+			size_t count, loff_t *pos)
+{
+	dump_nc_power_history();
+
+	return 0;
+}
+
+static ssize_t debug_write_read_history_entry(struct file *file,
+		const char __user *buffer, size_t count, loff_t *pos)
+{
+	char buf[20] = "0";
+	unsigned long len = min(sizeof(buf) - 1, count);
+	u32 islands;
+	u32 on;
+	int ret;
+
+	/*do nothing if platform is nether medfield or clv*/
+	if (!platform_is(INTEL_ATOM_MFLD) && !platform_is(INTEL_ATOM_CLV))
+		return count;
+
+	if (copy_from_user(buf, buffer, len))
+		return -1;
+
+	buf[len] = 0;
+
+	ret = sscanf(buf, "%x%x", &islands, &on);
+	if (ret == 2)
+		pmu_nc_set_power_state(islands, on, OSPM_REG_TYPE);
+
+	return count;
+}
+
+static const struct file_operations proc_debug_operations = {
+	.owner	= THIS_MODULE,
+	.read	= debug_read_history,
+	.write	= debug_write_read_history_entry,
+};
+
+static int __init debug_read_history_entry(void)
+{
+	struct proc_dir_entry *res = NULL;
+
+	res = proc_create("debug_read_history", S_IRUGO | S_IWUSR, NULL,
+		&proc_debug_operations);
+
+	if (!res)
+		return -ENOMEM;
+
+	return 0;
+}
+device_initcall(debug_read_history_entry);
+
+/**
+ * pmu_nc_set_power_state - Callback function is used by all the devices
+ * in north complex for a platform  specific device power on/shutdown.
+ * Following assumptions are made by this function
+ *
+ * Every new request starts from scratch with no assumptions
+ * on previous/pending request to Punit.
+ * Caller is responsible to retry if request fails.
+ * Avoids multiple requests to Punit if target state is
+ * already in the expected state.
+ * spin_locks guarantee serialized access to these registers
+ * and avoid concurrent access from 2d/3d, VED, VEC, ISP & IPH.
+ *
+ */
+int pmu_nc_set_power_state(int islands, int state_type, int reg)
+{
+	unsigned long flags;
+	struct saved_nc_power_history *record = NULL;
+	int ret = 0;
+	int change;
+
+	spin_lock_irqsave(&mid_pmu_cxt->nc_ready_lock, flags);
+
+	record = get_new_record_history();
+	record->cpu = raw_smp_processor_id();
+	record->ts = cpu_clock(record->cpu);
+	record->islands = islands;
+	record->pci = 0;
+	record->state_type = state_type;
+	backtrace_safe(record->address, SAVED_HISTORY_ADDRESS_NUM);
+	record->real_change = 0;
+	record->reg_type = reg;
+
+	if (pmu_ops->nc_set_power_state)	{
+		ret = pmu_ops->nc_set_power_state(islands, state_type,
+								reg, &change);
+		if (change) {
+			record->real_change = 1;
+			record->ts = cpu_clock(record->cpu);
+		}
+	}
+
+	spin_unlock_irqrestore(&mid_pmu_cxt->nc_ready_lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(pmu_nc_set_power_state);
+
+/**
+ * pmu_nc_get_power_state - Callback function is used to
+ * query power status of all the devices in north complex.
+ * Following assumptions are made by this function
+ *
+ * Every new request starts from scratch with no assumptions
+ * on previous/pending request to Punit.
+ * Caller is responsible to retry if request fails.
+ * Avoids multiple requests to Punit if target state is
+ * already in the expected state.
+ * spin_locks guarantee serialized access to these registers
+ * and avoid concurrent access from 2d/3d, VED, VEC, ISP & IPH.
+ *
+ */
+int pmu_nc_get_power_state(int island, int reg_type)
+{
+	u32 pwr_sts;
+	unsigned long flags;
+	int i, lss;
+	int ret = -EINVAL;
+
+	/*do nothing if platform is nether medfield or clv*/
+	if (!platform_is(INTEL_ATOM_MFLD) && !platform_is(INTEL_ATOM_CLV))
+		return 0;
+
+	spin_lock_irqsave(&mid_pmu_cxt->nc_ready_lock, flags);
+
+	switch (reg_type) {
+	case APM_REG_TYPE:
+		pwr_sts = inl(mid_pmu_cxt->apm_base + APM_STS);
+		break;
+	case OSPM_REG_TYPE:
+		pwr_sts = inl(mid_pmu_cxt->ospm_base + OSPM_PM_SSS);
+		break;
+	default:
+		pr_err("%s: invalid argument 'island': %d.\n",
+				 __func__, island);
+		goto unlock;
+	}
+
+	for (i = 0; i < OSPM_MAX_POWER_ISLANDS; i++) {
+		lss = island & (0x1 << i);
+		if (lss) {
+			ret = (pwr_sts >> (2 * i)) & 0x3;
+			break;
+		}
+	}
+
+unlock:
+	spin_unlock_irqrestore(&mid_pmu_cxt->nc_ready_lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(pmu_nc_get_power_state);
+
+/*
+* update_dev_res - Calulates & Updates the device residency when
+* a device state change occurs.
+* Computation of respective device residency starts when
+* its first state tranisition happens after the pmu driver
+* is initialised.
+*
+*/
+void update_dev_res(int index, pci_power_t state)
+{
+	if (state != PCI_D0) {
+		if (mid_pmu_cxt->pmu_dev_res[index].start == 0) {
+			mid_pmu_cxt->pmu_dev_res[index].start = cpu_clock(0);
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_entry =
+				mid_pmu_cxt->pmu_dev_res[index].start;
+				mid_pmu_cxt->pmu_dev_res[index].d0i0_acc = 0;
+		} else{
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_entry =
+							cpu_clock(0);
+			mid_pmu_cxt->pmu_dev_res[index].d0i0_acc +=
+			(mid_pmu_cxt->pmu_dev_res[index].d0i3_entry -
+				 mid_pmu_cxt->pmu_dev_res[index].d0i0_entry);
+		}
+	} else {
+		if (mid_pmu_cxt->pmu_dev_res[index].start == 0) {
+			mid_pmu_cxt->pmu_dev_res[index].start =
+						 cpu_clock(0);
+			mid_pmu_cxt->pmu_dev_res[index].d0i0_entry
+				= mid_pmu_cxt->pmu_dev_res[index].start;
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_acc = 0;
+		} else {
+			mid_pmu_cxt->pmu_dev_res[index].d0i0_entry =
+						 cpu_clock(0);
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_acc +=
+			(mid_pmu_cxt->pmu_dev_res[index].d0i0_entry -
+			mid_pmu_cxt->pmu_dev_res[index].d0i3_entry);
+		}
+	}
+	mid_pmu_cxt->pmu_dev_res[index].state = state;
+}
+
+/**
+ * pmu_pci_set_power_state - Callback function is used by all the PCI devices
+ *			for a platform  specific device power on/shutdown.
+ *
+ */
+int __ref pmu_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+	u32 new_value;
+	int i = 0;
+	u32 pm_cmd_val, chk_val;
+	int sub_sys_pos, sub_sys_index;
+	int pmu_num;
+	struct pmu_ss_states cur_pmssc;
+	int status = 0;
+	int retry_times = 0;
+	ktime_t calltime, delta, rettime;
+	struct saved_nc_power_history *record = NULL;
+	bool d3_cold = false;
+
+	/* Ignore callback from devices until we have initialized */
+	if (unlikely((!pmu_initialized)) || !pdev->driver)
+		return 0;
+
+	might_sleep();
+
+	/* Try to acquire the scu_ready_sem, if not
+	 * get blocked, until pmu_sc_irq() releases */
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	/*get LSS index corresponding to pdev, its position in
+	 *32 bit register and its register numer*/
+	status =
+		pmu_pci_to_indexes(pdev, &i, &pmu_num,
+				&sub_sys_index,  &sub_sys_pos);
+
+	if (status)
+		goto unlock;
+
+	if (pci_need_record_power_state(pdev)) {
+		record = get_new_record_history();
+		record->cpu = raw_smp_processor_id();
+		record->ts = cpu_clock(record->cpu);
+		record->islands = 0;
+		record->reg_type = 0;
+		record->pci = pdev->device;
+		record->state_type = state;
+		backtrace_safe(record->address, SAVED_HISTORY_ADDRESS_NUM);
+		record->real_change = 0;
+	}
+
+	/* Ignore HDMI HPD driver d0ix on LSS 0 on MRFLD */
+	if (platform_is(INTEL_ATOM_MRFLD) &&
+			pdev->device == MID_MRFL_HDMI_DRV_DEV_ID)
+			goto unlock;
+
+	/*in case a LSS is assigned to more than one pdev, we need
+	  *to find the shallowest state the LSS should be put into*/
+	state = pmu_pci_get_weakest_state_for_lss(i, pdev, state);
+
+	/*If the LSS corresponds to northcomplex device, update
+	  *the status and return*/
+	if (update_nc_device_states(i, state)) {
+		if (mid_pmu_cxt->pmu_dev_res[i].state == state)
+			goto nc_done;
+		else {
+			if (i < MAX_DEVICES)
+				update_dev_res(i, state);
+			goto nc_done;
+		}
+	}
+
+	/* initialize the current pmssc states */
+	memset(&cur_pmssc, 0, sizeof(cur_pmssc));
+
+	status = _pmu2_wait_not_busy();
+
+	if (status)
+		goto unlock;
+
+	pmu_read_sss(&cur_pmssc);
+
+	/* Read the pm_cmd val & update the value */
+	pm_cmd_val =
+		(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+	/* First clear the LSS bits */
+	new_value = cur_pmssc.pmu2_states[sub_sys_index] &
+						(~pm_cmd_val);
+	mid_pmu_cxt->os_sss[sub_sys_index] &= ~pm_cmd_val;
+
+	if (state != PCI_D0) {
+		pm_cmd_val =
+			(pci_to_platform_state(state) <<
+				(sub_sys_pos * BITS_PER_LSS));
+
+		new_value |= pm_cmd_val;
+
+		mid_pmu_cxt->os_sss[sub_sys_index] |= pm_cmd_val;
+	}
+
+	new_value &= ~mid_pmu_cxt->ignore_lss[sub_sys_index];
+
+	/* nothing to do, so dont do it... */
+	if (new_value == cur_pmssc.pmu2_states[sub_sys_index])
+		goto unlock;
+
+	cur_pmssc.pmu2_states[sub_sys_index] = new_value;
+
+	/* Check if the state is D3_cold or D3_Hot in TNG platform*/
+	if (platform_is(INTEL_ATOM_MRFLD) && (state == PCI_D3cold))
+		d3_cold = true;
+
+	/* Issue the pmu command to PMU 2
+	 * flag is needed to distinguish between
+	 * S0ix vs interactive command in pmu_sc_irq()
+	 */
+	status = pmu_issue_interactive_command(&cur_pmssc, false, d3_cold);
+
+	if (unlikely(status != PMU_SUCCESS)) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+			 "Failed to Issue a PM command to PMU2\n");
+		goto unlock;
+	}
+
+	calltime = ktime_get();
+retry:
+	/*
+	 * Wait for interactive command to complete.
+	 * If we dont wait, there is a possibility that
+	 * the driver may access the device before its
+	 * powered on in SCU.
+	 *
+	 */
+	status = _pmu2_wait_not_busy_yield();
+	if (unlikely(status)) {
+		rettime = ktime_get();
+		delta = ktime_sub(rettime, calltime);
+		retry_times++;
+
+		printk(KERN_CRIT "%s: D0ix transition failure: %04x %04X %s %20s:\n",
+				__func__,
+				pdev->vendor, pdev->device,
+				dev_name(&pdev->dev),
+				dev_driver_string(&pdev->dev));
+		printk(KERN_CRIT "interrupt pending = %d\n",
+				pmu_interrupt_pending());
+		printk(KERN_CRIT "pmu_busy_status = %d\n",
+				_pmu_read_status(PMU_BUSY_STATUS));
+		printk(KERN_CRIT "suspend_started = %d\n",
+				mid_pmu_cxt->suspend_started);
+		printk(KERN_CRIT "shutdown_started = %d\n",
+				mid_pmu_cxt->shutdown_started);
+		printk(KERN_CRIT "camera_off = %d display_off = %d\n",
+				mid_pmu_cxt->camera_off,
+				mid_pmu_cxt->display_off);
+		printk(KERN_CRIT "s0ix_possible = 0x%x\n",
+				mid_pmu_cxt->s0ix_possible);
+		printk(KERN_CRIT "s0ix_entered = 0x%x\n",
+				mid_pmu_cxt->s0ix_entered);
+		printk(KERN_CRIT "pmu_current_state = %d\n",
+				mid_pmu_cxt->pmu_current_state);
+		printk(KERN_CRIT "PMU is BUSY! retry_times[%d] total_delay[%lli]ms. Retry ...\n",
+				retry_times, (long long) ktime_to_ms(delta));
+		pmu_dump_logs();
+
+		trigger_all_cpu_backtrace();
+		if (retry_times < 60)
+			goto retry;
+		else
+			BUG();
+	}
+	if (record) {
+		record->real_change = 1;
+		record->ts = cpu_clock(record->cpu);
+	}
+
+	if (pmu_ops->set_power_state_ops)
+		pmu_ops->set_power_state_ops(state);
+
+	/* update stats */
+	inc_d0ix_stat((i-mid_pmu_cxt->pmu1_max_devs),
+				pci_to_platform_state(state));
+
+	/* check if tranisition to requested state has happened */
+	pmu_read_sss(&cur_pmssc);
+
+	chk_val = cur_pmssc.pmu2_states[sub_sys_index] &
+		(D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+	new_value &= (D0I3_MASK << (sub_sys_pos * BITS_PER_LSS));
+
+	if ((chk_val == new_value) && (i < MAX_DEVICES))
+		update_dev_res(i, state);
+
+	WARN_ON(chk_val != new_value);
+
+nc_done:
+#if !IS_ENABLED(CONFIG_VIDEO_ATOMISP)
+	/* ATOMISP is always powered up on system-resume path. It needs
+	 * to be turned off here if there is no driver to do it. */
+	if (!mid_pmu_cxt->camera_off) {
+		/* power down isp */
+		pmu_nc_set_power_state(APM_ISP_ISLAND | APM_IPH_ISLAND,
+				       OSPM_ISLAND_DOWN, APM_REG_TYPE);
+		/* power down DPHY */
+		new_value = intel_mid_msgbus_read32(0x09, 0x03);
+		new_value |= 0x300;
+		intel_mid_msgbus_write32(0x09, 0x03, new_value);
+		mid_pmu_cxt->camera_off = true;
+	}
+#endif
+
+	/* FIXME:: If S0ix is enabled when North Complex is ON we see
+	 * Fabric errors, tracked in BZ: 115181, hence hold pm_qos
+	 * to restrict s0ix during North Island in D0i0
+	 */
+	if (nc_device_state()) {
+		if (!pm_qos_request_active(mid_pmu_cxt->nc_restrict_qos))
+			pm_qos_add_request(mid_pmu_cxt->nc_restrict_qos,
+			 PM_QOS_CPU_DMA_LATENCY, (CSTATE_EXIT_LATENCY_S0i1-1));
+	} else {
+		if (pm_qos_request_active(mid_pmu_cxt->nc_restrict_qos))
+			pm_qos_remove_request(mid_pmu_cxt->nc_restrict_qos);
+	}
+
+unlock:
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return status;
+}
+
+pci_power_t platfrom_pmu_choose_state(int lss)
+{
+	pci_power_t state = PCI_D3hot;
+
+	if (pmu_ops->pci_choose_state)
+		state = pmu_ops->pci_choose_state(lss);
+
+	return state;
+}
+
+/* return platform specific deepest states that the device can enter */
+pci_power_t pmu_pci_choose_state(struct pci_dev *pdev)
+{
+	int i;
+	int sub_sys_pos, sub_sys_index;
+	int status;
+	int device_lss;
+	int pmu_num;
+
+	pci_power_t state = PCI_D3hot;
+
+	if (pmu_initialized) {
+		status =
+		pmu_pci_to_indexes(pdev, &i, &pmu_num,
+					&sub_sys_index,  &sub_sys_pos);
+
+		if ((status == PMU_SUCCESS) &&
+			(pmu_num == PMU_NUM_2)) {
+
+			device_lss =
+				(sub_sys_index * mid_pmu_cxt->ss_per_reg) +
+								sub_sys_pos;
+
+			state = platfrom_pmu_choose_state(device_lss);
+		}
+	}
+
+	return state;
+}
+
+int pmu_issue_interactive_command(struct pmu_ss_states *pm_ssc, bool ioc,
+					bool d3_cold)
+{
+	u32 command;
+
+	if (_pmu2_wait_not_busy()) {
+		dev_err(&mid_pmu_cxt->pmu_dev->dev,
+			"SCU BUSY. Operation not permitted\n");
+		return PMU_FAILED;
+	}
+
+	/* enable interrupts in PMU2 so that interrupts are
+	 * propagated when ioc bit for a particular set
+	 * command is set
+	 */
+	/* Enable the hardware interrupt */
+	if (ioc)
+		pmu_set_interrupt_enable();
+
+	/* Configure the sub systems for pmu2 */
+	pmu_write_subsys_config(pm_ssc);
+
+	command = (ioc) ? INTERACTIVE_IOC_VALUE : INTERACTIVE_VALUE;
+
+	 /* Special handling for PCI_D3cold in Tangier */
+	if (d3_cold)
+		command |= PM_CMD_D3_COLD;
+
+	/* send interactive command to SCU */
+	writel(command, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+	pmu_log_command(command, pm_ssc);
+
+	return 0;
+}
+
+/* Reads the status of each driver and updates the LSS values.
+ * To be called with scu_ready_sem mutex held, and pmu_config
+ * initialized with '0's
+ */
+static void update_all_lss_states(struct pmu_ss_states *pmu_config)
+{
+	int i;
+	u32 PCIALLDEV_CFG[4] = {0, 0, 0, 0};
+
+	if (platform_is(INTEL_ATOM_MFLD) || platform_is(INTEL_ATOM_CLV)) {
+		for (i = 0; i < MAX_DEVICES; i++) {
+			int pmu_num = get_mid_pci_pmu_num(i);
+			struct pci_dev *pdev = get_mid_pci_drv(i, 0);
+
+			if ((pmu_num == PMU_NUM_2) && pdev) {
+				int ss_idx, ss_pos;
+				pci_power_t state;
+
+				ss_idx = get_mid_pci_ss_idx(i);
+				ss_pos = get_mid_pci_ss_pos(i);
+				state = pdev->current_state;
+				/* The case of device not probed yet:
+				 * Force D0i3 */
+				if (state == PCI_UNKNOWN)
+					state = pmu_pci_choose_state(pdev);
+
+				/* By default its set to '0' hence
+				 * no need to update PCI_D0 state
+				 */
+				state = pmu_pci_get_weakest_state_for_lss
+							(i, pdev, state);
+
+				pmu_config->pmu2_states[ss_idx] |=
+				 (pci_to_platform_state(state) <<
+					(ss_pos * BITS_PER_LSS));
+
+				PCIALLDEV_CFG[ss_idx] |=
+					(D0I3_MASK << (ss_pos * BITS_PER_LSS));
+			}
+		}
+	}
+
+	platform_update_all_lss_states(pmu_config, PCIALLDEV_CFG);
+}
+
+static int pmu_init(void)
+{
+	int status;
+	struct pmu_ss_states pmu_config;
+	struct pmu_suspend_config *ss_config;
+	int ret = 0;
+	int retry_times = 0;
+
+
+	dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "PMU Driver loaded\n");
+	spin_lock_init(&mid_pmu_cxt->nc_ready_lock);
+
+	/* enumerate the PCI configuration space */
+	pmu_enumerate();
+
+	/* initialize the stats for pmu driver */
+	pmu_stats_init();
+
+	/* register platform pmu ops */
+	platform_set_pmu_ops();
+
+	/* platform specific initialization */
+	if (pmu_ops->init) {
+		status = pmu_ops->init();
+		if (status) {
+			dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+				"pmu_ops->init failed\n");
+			goto out_err1;
+		}
+	}
+
+	/* initialize the state variables here */
+	ss_config = kzalloc(sizeof(struct pmu_suspend_config), GFP_KERNEL);
+
+	if (ss_config == NULL) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+			"Allocation of memory for ss_config has failed\n");
+		status = PMU_FAILED;
+		goto out_err1;
+	}
+
+	memset(&pmu_config, 0, sizeof(pmu_config));
+
+	ss_config->ss_state = pmu_config;
+
+	/* initialize for the autonomous S0i3 */
+	mid_pmu_cxt->ss_config = ss_config;
+
+	/* setup the wake capable devices */
+	mid_pmu_cxt->ss_config->wake_state.wake_enable[0] = WAKE_ENABLE_0;
+	mid_pmu_cxt->ss_config->wake_state.wake_enable[1] = WAKE_ENABLE_1;
+
+	/* setup the ignore lss list */
+	mid_pmu_cxt->ignore_lss[0] = pmu_ignore_lss0;
+	mid_pmu_cxt->ignore_lss[1] = pmu_ignore_lss1;
+	mid_pmu_cxt->ignore_lss[2] = pmu_ignore_lss2;
+	mid_pmu_cxt->ignore_lss[3] = pmu_ignore_lss3;
+
+	/*set wkc to appropriate value suitable for s0ix*/
+	writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+	writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+
+	/* Acquire the scu_ready_sem */
+	down(&mid_pmu_cxt->scu_ready_sem);
+
+	/* Now we have initialized the driver
+	 * Allow drivers to get blocked in
+	 * pmu_pci_set_power_state(), until we finish
+	 * first interactive command.
+	 */
+
+	pmu_initialized = true;
+
+	/* get the current status of each of the driver
+	 * and update it in SCU
+	 */
+	update_all_lss_states(&pmu_config);
+
+	status = pmu_issue_interactive_command(&pmu_config, false,
+						false);
+	if (status != PMU_SUCCESS) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,\
+		 "Failure from pmu mode change to interactive."
+		" = %d\n", status);
+		status = PMU_FAILED;
+		up(&mid_pmu_cxt->scu_ready_sem);
+		goto out_err2;
+	}
+
+	/*
+	 * Wait for interactive command to complete.
+	 * If we dont wait, there is a possibility that
+	 * the driver may access the device before its
+	 * powered on in SCU.
+	 *
+	 */
+retry:
+	ret = _pmu2_wait_not_busy();
+	if (unlikely(ret)) {
+		retry_times++;
+		if (retry_times < 60) {
+			usleep_range(10, 500);
+			goto retry;
+		} else {
+			pmu_dump_logs();
+			BUG();
+		}
+	}
+
+	/* In cases were gfx is not enabled
+	 * this will enable s0ix immediately
+	 */
+	if (pmu_ops->set_power_state_ops)
+		pmu_ops->set_power_state_ops(PCI_D3hot);
+
+	up(&mid_pmu_cxt->scu_ready_sem);
+
+	return PMU_SUCCESS;
+
+out_err2:
+	kfree(ss_config);
+	mid_pmu_cxt->ss_config = NULL;
+out_err1:
+	return status;
+}
+
+/**
+ * mid_pmu_probe - This is the function where most of the PMU driver
+ *		   initialization happens.
+ */
+static int
+mid_pmu_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+{
+	int ret;
+	struct mrst_pmu_reg __iomem *pmu;
+	u32 data;
+
+	mid_pmu_cxt->pmu_wake_lock =
+				wakeup_source_register("pmu_wake_lock");
+
+	if (!mid_pmu_cxt->pmu_wake_lock) {
+		pr_err("%s: unable to register pmu wake source.\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* Init the device */
+	ret = pci_enable_device(dev);
+	if (ret) {
+		pr_err("Mid PM device cant be enabled\n");
+		goto out_err0;
+	}
+
+	/* store the dev */
+	mid_pmu_cxt->pmu_dev = dev;
+	dev_warn(&dev->dev, "PMU DRIVER Probe called\n");
+
+	ret = pci_request_regions(dev, PMU_DRV_NAME);
+	if (ret < 0) {
+		pr_err("pci request region has failed\n");
+		goto out_err1;
+	}
+
+	mid_pmu_cxt->pmu1_max_devs = PMU1_MAX_DEVS;
+	mid_pmu_cxt->pmu2_max_devs = PMU2_MAX_DEVS;
+	mid_pmu_cxt->ss_per_reg = 16;
+
+	/* Following code is used to map address required for NC PM
+	 * which is not needed for all platforms
+	 */
+	if (platform_is(INTEL_ATOM_MFLD) || platform_is(INTEL_ATOM_CLV)) {
+		data = intel_mid_msgbus_read32(OSPM_PUNIT_PORT, OSPM_APMBA);
+		mid_pmu_cxt->apm_base = data & 0xffff;
+
+		data = intel_mid_msgbus_read32(OSPM_PUNIT_PORT, OSPM_OSPMBA);
+		mid_pmu_cxt->ospm_base = data & 0xffff;
+	}
+
+	/* Map the memory of pmu1 PMU reg base */
+	pmu = pci_iomap(dev, 0, 0);
+	if (pmu == NULL) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+				"Unable to map the PMU2 address space\n");
+		ret = PMU_FAILED;
+		goto out_err2;
+	}
+
+	mid_pmu_cxt->pmu_reg = pmu;
+
+	/* Map the memory of emergency emmc up */
+	mid_pmu_cxt->emergeny_emmc_up_addr =
+		ioremap_nocache(PMU_PANIC_EMMC_UP_ADDR, 4);
+	if (mid_pmu_cxt->emergeny_emmc_up_addr == NULL) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+		"Unable to map the emergency emmc up address space\n");
+		ret = PMU_FAILED;
+		goto out_err3;
+	}
+
+	if (request_irq(dev->irq, pmu_sc_irq, IRQF_NO_SUSPEND, PMU_DRV_NAME,
+			NULL)) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "Registering isr has failed\n");
+		ret = PMU_FAILED;
+		goto out_err4;
+	}
+
+	/* call pmu init() for initialization of pmu interface */
+	ret = pmu_init();
+	if (ret != PMU_SUCCESS) {
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "PMU initialization has failed\n");
+		goto out_err5;
+	}
+	dev_warn(&mid_pmu_cxt->pmu_dev->dev, "after pmu initialization\n");
+
+	mid_pmu_cxt->pmu_init_time =
+		cpu_clock(raw_smp_processor_id());
+
+#ifdef CONFIG_PM_DEBUG
+	/*
+	 * FIXME: Since S3 is not enabled yet we need to take
+	 * a wake lock here. Else S3 will be triggered on display
+	 * time out and platform will hang
+	 */
+	if (platform_is(INTEL_ATOM_MRFLD) && !enable_s3)
+		__pm_stay_awake(mid_pmu_cxt->pmu_wake_lock);
+#endif
+
+	return 0;
+
+out_err5:
+	free_irq(dev->irq, &pmu_sc_irq);
+out_err4:
+	iounmap(mid_pmu_cxt->emergeny_emmc_up_addr);
+	mid_pmu_cxt->emergeny_emmc_up_addr = NULL;
+out_err3:
+	iounmap(mid_pmu_cxt->pmu_reg);
+	mid_pmu_cxt->base_addr.pmu1_base = NULL;
+	mid_pmu_cxt->base_addr.pmu2_base = NULL;
+out_err2:
+	pci_release_region(dev, 0);
+out_err1:
+	pci_disable_device(dev);
+out_err0:
+	wakeup_source_unregister(mid_pmu_cxt->pmu_wake_lock);
+	return ret;
+}
+
+static void mid_pmu_remove(struct pci_dev *dev)
+{
+	/* Freeing up the irq */
+	free_irq(dev->irq, &pmu_sc_irq);
+
+	if (pmu_ops->remove)
+		pmu_ops->remove();
+
+	iounmap(mid_pmu_cxt->emergeny_emmc_up_addr);
+	mid_pmu_cxt->emergeny_emmc_up_addr = NULL;
+
+	pci_iounmap(dev, mid_pmu_cxt->pmu_reg);
+	mid_pmu_cxt->base_addr.pmu1_base = NULL;
+	mid_pmu_cxt->base_addr.pmu2_base = NULL;
+
+	/* disable the current PCI device */
+	pci_release_region(dev, 0);
+	pci_disable_device(dev);
+
+	wakeup_source_unregister(mid_pmu_cxt->pmu_wake_lock);
+}
+
+static void mid_pmu_shutdown(struct pci_dev *dev)
+{
+	dev_dbg(&mid_pmu_cxt->pmu_dev->dev, "Mid PM mid_pmu_shutdown called\n");
+
+	if (mid_pmu_cxt) {
+		/* Restrict platform Cx state to C6 */
+		pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+					(CSTATE_EXIT_LATENCY_S0i1-1));
+
+		down(&mid_pmu_cxt->scu_ready_sem);
+		mid_pmu_cxt->shutdown_started = true;
+		up(&mid_pmu_cxt->scu_ready_sem);
+	}
+}
+
+static struct pci_driver driver = {
+	.name = PMU_DRV_NAME,
+	.id_table = mid_pm_ids,
+	.probe = mid_pmu_probe,
+	.remove = mid_pmu_remove,
+	.shutdown = mid_pmu_shutdown
+};
+
+static int standby_enter(void)
+{
+	u32 temp = 0;
+	int s3_state = mid_state_to_sys_state(MID_S3_STATE);
+
+	if (mid_s0ix_enter(MID_S3_STATE) != MID_S3_STATE) {
+		pmu_set_s0ix_complete();
+		return -EINVAL;
+	}
+
+	/* time stamp for end of s3 entry */
+	time_stamp_for_sleep_state_latency(s3_state, false, true);
+
+	__monitor((void *) &temp, 0, 0);
+	smp_mb();
+	__mwait(mid_pmu_cxt->s3_hint, 1);
+
+	/* time stamp for start of s3 exit */
+	time_stamp_for_sleep_state_latency(s3_state, true, false);
+
+	pmu_set_s0ix_complete();
+
+	/*set wkc to appropriate value suitable for s0ix*/
+	writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[0],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[0]);
+	writel(mid_pmu_cxt->ss_config->wake_state.wake_enable[1],
+		       &mid_pmu_cxt->pmu_reg->pm_wkc[1]);
+
+	if (platform_is(INTEL_ATOM_MRFLD))
+		up(&mid_pmu_cxt->scu_ready_sem);
+
+	return 0;
+}
+
+static int mid_suspend_begin(suspend_state_t state)
+{
+	mid_pmu_cxt->suspend_started = true;
+	pmu_s3_stats_update(1);
+
+	/* Restrict to C6 during suspend */
+	pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+					(CSTATE_EXIT_LATENCY_S0i1-1));
+	return 0;
+}
+
+static int mid_suspend_valid(suspend_state_t state)
+{
+	int ret = 0;
+
+	switch (state) {
+	case PM_SUSPEND_ON:
+	case PM_SUSPEND_MEM:
+		/* check if we are ready */
+		if (likely(pmu_initialized))
+			ret = 1;
+	break;
+	}
+
+	return ret;
+}
+
+static int mid_suspend_prepare(void)
+{
+	return 0;
+}
+
+static int mid_suspend_prepare_late(void)
+{
+	return 0;
+}
+
+static int mid_suspend_enter(suspend_state_t state)
+{
+	int ret;
+
+	if (state != PM_SUSPEND_MEM)
+		return -EINVAL;
+
+	/* one last check before entering standby */
+	if (pmu_ops->check_nc_sc_status) {
+		if (!(pmu_ops->check_nc_sc_status())) {
+			trace_printk("Device d0ix status check failed! Aborting Standby entry!\n");
+//			WARN_ON(1);
+		}
+	}
+
+	trace_printk("s3_entry\n");
+	ret = standby_enter();
+	trace_printk("s3_exit %d\n", ret);
+	if (ret != 0)
+		dev_dbg(&mid_pmu_cxt->pmu_dev->dev,
+				"Failed to enter S3 status: %d\n", ret);
+
+	return ret;
+}
+
+static void mid_suspend_end(void)
+{
+	/* allow s0ix now */
+	pm_qos_update_request(mid_pmu_cxt->s3_restrict_qos,
+					PM_QOS_DEFAULT_VALUE);
+
+	pmu_s3_stats_update(0);
+	mid_pmu_cxt->suspend_started = false;
+}
+
+static const struct platform_suspend_ops mid_suspend_ops = {
+	.begin = mid_suspend_begin,
+	.valid = mid_suspend_valid,
+	.prepare = mid_suspend_prepare,
+	.prepare_late = mid_suspend_prepare_late,
+	.enter = mid_suspend_enter,
+	.end = mid_suspend_end,
+};
+
+/**
+ * mid_pci_register_init - register the PMU driver as PCI device
+ */
+static int __init mid_pci_register_init(void)
+{
+	int ret;
+
+	mid_pmu_cxt = kzalloc(sizeof(struct mid_pmu_dev), GFP_KERNEL);
+
+	if (mid_pmu_cxt == NULL)
+		return -ENOMEM;
+
+	mid_pmu_cxt->s3_restrict_qos =
+		kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+	if (mid_pmu_cxt->s3_restrict_qos) {
+		pm_qos_add_request(mid_pmu_cxt->s3_restrict_qos,
+			 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+	} else {
+		return -ENOMEM;
+	}
+
+	init_nc_device_states();
+
+	mid_pmu_cxt->nc_restrict_qos =
+		kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+	if (mid_pmu_cxt->nc_restrict_qos == NULL)
+		return -ENOMEM;
+
+	/* initialize the semaphores */
+	sema_init(&mid_pmu_cxt->scu_ready_sem, 1);
+
+	/* registering PCI device */
+	ret = pci_register_driver(&driver);
+	suspend_set_ops(&mid_suspend_ops);
+
+	return ret;
+}
+fs_initcall(mid_pci_register_init);
+
+void pmu_power_off(void)
+{
+	/* wait till SCU is ready */
+	if (!_pmu2_wait_not_busy())
+		writel(S5_VALUE, &mid_pmu_cxt->pmu_reg->pm_cmd);
+
+	if (!_pmu2_wait_not_busy())
+		rpmsg_send_generic_simple_command(IPCMSG_COLD_OFF, 1);
+}
+
+static void __exit mid_pci_cleanup(void)
+{
+	if (mid_pmu_cxt) {
+		if (mid_pmu_cxt->s3_restrict_qos)
+			pm_qos_remove_request(mid_pmu_cxt->s3_restrict_qos);
+
+		if (pm_qos_request_active(mid_pmu_cxt->nc_restrict_qos))
+			pm_qos_remove_request(mid_pmu_cxt->nc_restrict_qos);
+	}
+
+	suspend_set_ops(NULL);
+
+	/* registering PCI device */
+	pci_unregister_driver(&driver);
+
+	if (mid_pmu_cxt) {
+		pmu_stats_finish();
+		kfree(mid_pmu_cxt->ss_config);
+	}
+
+	kfree(mid_pmu_cxt);
+}
+module_exit(mid_pci_cleanup);
diff --git a/arch/x86/platform/intel-mid/intel_soc_pmu.h b/arch/x86/platform/intel-mid/intel_soc_pmu.h
new file mode 100644
index 0000000..243ae47
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_soc_pmu.h
@@ -0,0 +1,511 @@
+/*
+ * intel_soc_pmu.h
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _MID_PMU_H_
+#define _MID_PMU_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/jhash.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include <linux/nmi.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_wakeup.h>
+#include <asm/apic.h>
+#include <asm/intel_scu_ipc.h>
+#include <linux/intel_mid_pm.h>
+
+#include "intel_soc_mdfld.h"
+#include "intel_soc_clv.h"
+#include "intel_soc_mrfld.h"
+
+#define MID_PMU_MFLD_DRV_DEV_ID                 0x0828
+#define MID_PMU_CLV_DRV_DEV_ID			0x08EC
+#define MID_PMU_MRFL_DRV_DEV_ID			0x11A1
+
+#define MID_MRFL_HDMI_DRV_DEV_ID		0x11A6
+
+/* SRAM address where PANIC START is written */
+#define PMU_PANIC_EMMC_UP_ADDR			0xFFFF3080
+#define PMU_PANIC_EMMC_UP_REQ_CMD		0xDEADBEEF
+
+#define MAX_DEVICES	(PMU1_MAX_DEVS + PMU2_MAX_DEVS)
+#define PMU_MAX_LSS_SHARE 4
+
+#define PMU2_BUSY_TIMEOUT			500000
+#define HSU0_PCI_ID				0x81c
+#define HSU1_PCI_ID				0x81b
+#define HSI_PCI_ID				0x833
+
+#define MODE_ID_MAGIC_NUM			1
+
+#define   LOG_ID_MASK				0x7F
+#define   SUB_CLASS_MASK			0xFF00
+
+
+/* Definition for C6 Offload MSR Address */
+#define MSR_C6OFFLOAD_CTL_REG			0x120
+
+#define MSR_C6OFFLOAD_SET_LOW			1
+#define MSR_C6OFFLOAD_SET_HIGH			0
+
+#define C6OFFLOAD_BIT_MASK			0x2
+#define C6OFFLOAD_BIT				0x2
+
+#define PMU_DRV_NAME				"intel_pmu_driver"
+
+#define MID_PCI_INDEX_HASH_BITS		8 /*size 256*/
+#define MID_PCI_INDEX_HASH_SIZE		(1<<MID_PCI_INDEX_HASH_BITS)
+#define MID_PCI_INDEX_HASH_MASK		(MID_PCI_INDEX_HASH_SIZE-1)
+
+/* some random number for initvalue */
+#define	MID_PCI_INDEX_HASH_INITVALUE	0x27041975
+
+/*
+ * Values for programming the PM_CMD register based on the PM
+ * architecture speci
+*/
+
+#define S5_VALUE	0x309D2601
+#define S0I1_VALUE	0X30992601
+#define LPMP3_VALUE	0X40492601
+#define S0I3_VALUE	0X309B2601
+#define FAST_ON_OFF_VALUE	0X309E2601
+#define INTERACTIVE_VALUE	0X00002201
+#define INTERACTIVE_IOC_VALUE	0X00002301
+
+#define WAKE_ENABLE_0		0xffffffff
+#define WAKE_ENABLE_1		0xffffffff
+#define INVALID_WAKE_SRC	0xFFFF
+
+#define LOG_SS_MASK		0x80
+
+#define D0I0_MASK		0
+#define D0I1_MASK		1
+#define D0I2_MASK		2
+#define D0I3_MASK		3
+
+#define BITS_PER_LSS		2
+#define MAX_LSS_POSSIBLE	64
+#define SS_IDX_MASK		0x3
+#define SS_POS_MASK		0xF
+
+#define PMU_BASE_ADDR(pmu_num) ((pmu_num == 0) ? \
+				(u32) base_addr.pmu1_base :\
+				(u32) base_addr.pmu2_base);
+
+#define SSMSK(mask, lss) ((mask) << ((lss) * 2))
+#define SSWKC(lss) (1 << (lss))
+
+/* North Complex Power management */
+#define OSPM_PUNIT_PORT         0x04
+#define OSPM_OSPMBA             0x78
+#define OSPM_PM_SSC             0x20
+#define OSPM_PM_SSS             0x30
+
+#define OSPM_APMBA              0x7a
+#define APM_CMD                 0x0
+#define APM_STS                 0x04
+#define PM_CMD_D3_COLD		(0x1 << 21)
+
+/* Size of command logging array */
+#define LOG_SIZE	5
+
+enum sys_state {
+	SYS_STATE_S0I0,
+	SYS_STATE_S0I1,
+	SYS_STATE_LPMP3,
+	SYS_STATE_S0I2,
+	SYS_STATE_S0I3,
+	SYS_STATE_S3,
+	SYS_STATE_S5,
+	SYS_STATE_MAX
+};
+
+enum int_status {
+	INVALID_INT = 0,
+	CMD_COMPLETE_INT = 1,
+	CMD_ERROR_INT = 2,
+	WAKE_RECEIVED_INT = 3,
+	SUBSYS_POW_ERR_INT = 4,
+	S0ix_MISS_INT = 5,
+	NO_ACKC6_INT = 6,
+	TRIGGERERR = 7,
+	INVALID_SRC_INT
+};
+
+enum pmu_number {
+	PMU_NUM_1,
+	PMU_NUM_2,
+	PMU_MAX_DEVS
+};
+
+enum pmu_ss_state {
+	SS_STATE_D0I0 = 0,
+	SS_STATE_D0I1 = 1,
+	SS_STATE_D0I2 = 2,
+	SS_STATE_D0I3 = 3
+};
+
+
+struct pmu_ss_states {
+	unsigned long pmu1_states;
+	unsigned long pmu2_states[4];
+};
+
+struct pci_dev_info {
+	u8 ss_pos;
+	u8 ss_idx;
+	u8 pmu_num;
+
+	u32 log_id;
+	u32 cap;
+	struct pci_dev *drv[PMU_MAX_LSS_SHARE];
+	pci_power_t power_state[PMU_MAX_LSS_SHARE];
+};
+
+struct pmu_wake_ss_states {
+	unsigned long wake_enable[2];
+	unsigned long pmu1_wake_states;
+	unsigned long pmu2_wake_states[4];
+};
+
+struct pmu_suspend_config {
+	struct pmu_ss_states ss_state;
+	struct pmu_wake_ss_states wake_state;
+};
+
+struct pci_dev_index {
+	struct pci_dev	*pdev;
+	u8		index;
+};
+
+/* PMU register interface */
+struct mrst_pmu_reg {
+	u32 pm_sts;             /* 0x00 */
+	u32 pm_cmd;             /* 0x04 */
+	u32 pm_ics;             /* 0x08 */
+	u32 _resv1;
+	u32 pm_wkc[2];          /* 0x10 */
+	u32 pm_wks[2];          /* 0x18 */
+	u32 pm_ssc[4];          /* 0x20 */
+	u32 pm_sss[4];          /* 0x30 */
+	u32 pm_wssc[4];         /* 0x40 */
+	u32 pm_c3c4;            /* 0x50 */
+	u32 pm_c5c6;            /* 0x54 */
+	u32 pm_msic;            /* 0x58 */
+};
+
+struct mid_pmu_cmd_log {
+	struct timespec ts;
+	u32 command;
+	struct pmu_ss_states pm_ssc;
+};
+
+struct mid_pmu_irq_log {
+	struct timespec ts;
+	u32 status;
+};
+
+struct mid_pmu_ipc_log {
+	struct timespec ts;
+	u32 command;
+};
+
+struct mid_pmu_pmu_irq_log {
+	struct timespec ts;
+	u8 status;
+};
+
+struct mid_pmu_ipc_irq_log {
+	struct timespec ts;
+};
+
+union pmu_pm_status {
+	struct {
+		u32 pmu_rev:8;
+		u32 pmu_busy:1;
+		u32 mode_id:4;
+		u32 Reserved:19;
+	} pmu_status_parts;
+	u32 pmu_status_value;
+};
+
+union pmu_pm_ics {
+	struct {
+		u32 int_status:8;
+		u32 int_enable:1;
+		u32 int_pend:1;
+		/* New bit added in TNG to indicate device wakes*/
+		u32 sw_int_status:1;
+		u32 reserved:21;
+	} pmu_pm_ics_parts;
+	u32 pmu_pm_ics_value;
+};
+
+struct intel_mid_base_addr {
+	u32 *pmu1_base;
+	void __iomem *pmu2_base;
+	u32 *pm_table_base;
+	u32 __iomem *offload_reg;
+};
+
+#define MAX_PMU_LOG_STATES	(S0I3_STATE_IDX - C4_STATE_IDX + 1)
+
+struct mid_pmu_stats {
+	u64 err_count[3];
+	u64 count;
+	u64 time;
+	u64 last_entry;
+	u64 last_try;
+	u64 first_entry;
+	u32 demote_count[MAX_PMU_LOG_STATES];
+	u32 display_blocker_count;
+	u32 camera_blocker_count;
+	u32 blocker_count[MAX_LSS_POSSIBLE];
+};
+
+struct device_residency {
+	u64 d0i0_entry;
+	u64 d0i3_entry;
+	u64 d0i0_acc;
+	u64 d0i3_acc;
+	u64 start;
+	pci_power_t state;
+};
+
+struct mid_pmu_dev {
+	bool suspend_started;
+	bool shutdown_started;
+	bool camera_off;
+	bool display_off;
+
+	u32 apm_base;
+	u32 ospm_base;
+	u32 pmu1_max_devs;
+	u32 pmu2_max_devs;
+	u32 ss_per_reg;
+	u32 d0ix_stat[MAX_LSS_POSSIBLE][SS_STATE_D0I3+1];
+	u32 num_wakes[MAX_DEVICES][SYS_STATE_MAX];
+	u32 ignore_lss[4];
+	u32 os_sss[4];
+#ifdef CONFIG_PM_DEBUG
+	u32 cstate_ignore;
+	struct pm_qos_request *cstate_qos;
+#endif
+
+	u32 __iomem *emergeny_emmc_up_addr;
+	u64 pmu_init_time;
+
+	int cmd_error_int;
+	int s0ix_possible;
+	int s0ix_entered;
+
+#ifdef LOG_PMU_EVENTS
+	int cmd_log_idx;
+	int ipc_log_idx;
+	int ipc_irq_log_idx;
+	int pmu_irq_log_idx;
+#endif
+
+	enum sys_state  pmu_current_state;
+
+	struct pci_dev_info pci_devs[MAX_DEVICES];
+	struct pci_dev_index
+		pci_dev_hash[MID_PCI_INDEX_HASH_SIZE];
+	struct intel_mid_base_addr base_addr;
+	struct mrst_pmu_reg	__iomem *pmu_reg;
+	struct semaphore scu_ready_sem;
+	struct mid_pmu_stats pmu_stats[SYS_STATE_MAX];
+	struct device_residency pmu_dev_res[MAX_DEVICES];
+	struct delayed_work log_work;
+	struct pm_qos_request *s3_restrict_qos;
+
+#ifdef LOG_PMU_EVENTS
+	struct mid_pmu_cmd_log cmd_log[LOG_SIZE];
+	struct mid_pmu_ipc_log ipc_log[LOG_SIZE];
+	struct mid_pmu_ipc_irq_log ipc_irq_log[LOG_SIZE];
+	struct mid_pmu_pmu_irq_log pmu_irq_log[LOG_SIZE];
+#endif
+	struct wakeup_source *pmu_wake_lock;
+
+	struct pmu_suspend_config *ss_config;
+	struct pci_dev *pmu_dev;
+	struct pm_qos_request *nc_restrict_qos;
+
+	spinlock_t nc_ready_lock;
+
+	int s3_hint;
+};
+
+struct platform_pmu_ops {
+	int (*init)(void);
+	void (*prepare)(int);
+	bool (*enter)(int);
+	void (*wakeup)(void);
+	void (*remove)(void);
+	pci_power_t (*pci_choose_state) (int);
+	void (*set_power_state_ops) (int);
+	void (*set_s0ix_complete) (void);
+	int (*nc_set_power_state) (int, int, int, int *);
+	bool (*check_nc_sc_status) (void);
+};
+
+extern char s0ix[5];
+extern struct platform_pmu_ops mfld_pmu_ops;
+extern struct platform_pmu_ops clv_pmu_ops;
+extern struct platform_pmu_ops mrfld_pmu_ops;
+extern struct platform_pmu_ops *get_platform_ops(void);
+extern void mfld_s0ix_sram_save_cleanup(void);
+extern void pmu_stats_init(void);
+extern void pmu_s3_stats_update(int enter);
+extern void pmu_stats_finish(void);
+extern void mfld_s0ix_sram_restore(u32 s0ix);
+extern void pmu_stat_error(u8 err_type);
+extern void pmu_stat_end(void);
+extern void pmu_stat_start(enum sys_state type);
+extern int pmu_pci_to_indexes(struct pci_dev *pdev, int *index,
+				int *pmu_num, int *ss_idx, int *ss_pos);
+extern struct mid_pmu_dev *mid_pmu_cxt;
+extern void platform_set_pmu_ops(void);
+extern void pmu_read_sss(struct pmu_ss_states *pm_ssc);
+extern int pmu_issue_interactive_command(struct pmu_ss_states *pm_ssc,
+				bool ioc, bool d3_cold);
+extern int _pmu2_wait_not_busy(void);
+extern u32 get_s0ix_val_set_pm_ssc(int);
+extern int pmu_get_wake_source(void);
+extern bool pmu_initialized;
+extern struct platform_pmu_ops *pmu_ops;
+extern void platform_update_all_lss_states(struct pmu_ss_states *, int *);
+extern int set_extended_cstate_mode(const char *val, struct kernel_param *kp);
+extern int get_extended_cstate_mode(char *buffer, struct kernel_param *kp);
+extern int byt_pmu_nc_set_power_state(int islands, int state_type, int reg);
+extern int byt_pmu_nc_get_power_state(int islands, int reg);
+extern void pmu_set_interrupt_enable(void);
+extern void pmu_clear_interrupt_enable(void);
+
+#ifdef LOG_PMU_EVENTS
+extern void pmu_log_pmu_irq(int status);
+extern void pmu_log_command(u32 command, struct pmu_ss_states *pm_ssc);
+extern void pmu_dump_logs(void);
+#endif
+
+/* Accessor function for pci_devs start */
+static inline void pmu_stat_clear(void)
+{
+	mid_pmu_cxt->pmu_current_state = SYS_STATE_S0I0;
+}
+
+static inline struct pci_dev *get_mid_pci_drv(int lss_index, int i)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].drv[i];
+}
+
+static inline pci_power_t get_mid_pci_power_state(int lss_index, int i)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].power_state[i];
+}
+
+static inline u8 get_mid_pci_ss_idx(int lss_index)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].ss_idx & SS_IDX_MASK;
+}
+
+static inline u8 get_mid_pci_ss_pos(int lss_index)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].ss_pos & SS_POS_MASK;
+}
+
+static inline u8 get_mid_pci_pmu_num(int lss_index)
+{
+	return mid_pmu_cxt->pci_devs[lss_index].pmu_num;
+}
+
+static inline void set_mid_pci_drv(int lss_index,
+					int i, struct pci_dev *pdev)
+{
+	mid_pmu_cxt->pci_devs[lss_index].drv[i] = pdev;
+}
+
+static inline void set_mid_pci_power_state(int lss_index,
+					int i, pci_power_t state)
+{
+	mid_pmu_cxt->pci_devs[lss_index].power_state[i] = state;
+}
+
+static inline void set_mid_pci_ss_idx(int lss_index, u8 ss_idx)
+{
+	mid_pmu_cxt->pci_devs[lss_index].ss_idx = ss_idx;
+}
+
+static inline void set_mid_pci_ss_pos(int lss_index, u8 ss_pos)
+{
+	mid_pmu_cxt->pci_devs[lss_index].ss_pos = ss_pos;
+}
+
+static inline void set_mid_pci_pmu_num(int lss_index, u8 pmu_num)
+{
+	mid_pmu_cxt->pci_devs[lss_index].pmu_num = pmu_num;
+}
+
+static inline void set_mid_pci_log_id(int lss_index, u32 log_id)
+{
+	mid_pmu_cxt->pci_devs[lss_index].log_id = log_id;
+}
+
+static inline void set_mid_pci_cap(int lss_index, u32 cap)
+{
+	mid_pmu_cxt->pci_devs[lss_index].cap = cap;
+}
+
+static inline u32 get_d0ix_stat(int lss_index, int state)
+{
+	return mid_pmu_cxt->d0ix_stat[lss_index][state];
+}
+
+static inline void inc_d0ix_stat(int lss_index, int state)
+{
+	mid_pmu_cxt->d0ix_stat[lss_index][state]++;
+}
+
+static inline void clear_d0ix_stats(void)
+{
+	memset(mid_pmu_cxt->d0ix_stat, 0, sizeof(mid_pmu_cxt->d0ix_stat));
+}
+
+/* Accessor functions for pci_devs end */
+
+static inline bool nc_device_state(void)
+{
+	return !mid_pmu_cxt->display_off || !mid_pmu_cxt->camera_off;
+}
+
+#endif
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
new file mode 100644
index 0000000..d0aa010
--- /dev/null
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -0,0 +1,94 @@
+/*
+ * mfld.c: Intel Medfield platform setup code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author:
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/intel_pmic_gpio.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/reboot.h>
+
+static void penwell_arch_setup(void);
+/* penwell arch ops */
+static struct intel_mid_ops penwell_ops = {
+	.arch_setup = penwell_arch_setup,
+};
+
+static void mfld_power_off(void)
+{
+}
+
+static unsigned long __init mfld_calibrate_tsc(void)
+{
+	unsigned long fast_calibrate;
+	u32 lo, hi, ratio, fsb;
+
+	rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+	pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
+	ratio = (hi >> 8) & 0x1f;
+	pr_debug("ratio is %d\n", ratio);
+	if (!ratio) {
+		pr_err("read a zero ratio, should be incorrect!\n");
+		pr_err("force tsc ratio to 16 ...\n");
+		ratio = 16;
+	}
+	rdmsr(MSR_FSB_FREQ, lo, hi);
+	if ((lo & 0x7) == 0x7)
+		fsb = FSB_FREQ_83SKU;
+	else
+		fsb = FSB_FREQ_100SKU;
+	fast_calibrate = ratio * fsb;
+	pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
+	lapic_timer_frequency = fsb * 1000 / HZ;
+	/* mark tsc clocksource as reliable */
+	set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+	if (fast_calibrate)
+		return fast_calibrate;
+
+	return 0;
+}
+
+static void penwell_arch_setup()
+{
+	x86_platform.calibrate_tsc = mfld_calibrate_tsc;
+	pm_power_off = mfld_power_off;
+}
+
+void *get_penwell_ops()
+{
+	return &penwell_ops;
+}
+
+void *get_cloverview_ops()
+{
+	return &penwell_ops;
+}
diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
new file mode 100644
index 0000000..0e952f8
--- /dev/null
+++ b/arch/x86/platform/intel-mid/mrfl.c
@@ -0,0 +1,161 @@
+/*
+ * mrfl.c: Intel Merrifield platform specific setup code
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Mark F. Brown <mark.f.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <asm/setup.h>
+#include <asm/intel-mid.h>
+#include <asm/processor.h>
+
+#define APIC_DIVISOR 16
+
+enum intel_mid_sim_type __intel_mid_sim_platform;
+EXPORT_SYMBOL_GPL(__intel_mid_sim_platform);
+
+static void (*intel_mid_timer_init)(void);
+
+static void tangier_arch_setup(void);
+
+/* tangier arch ops */
+static struct intel_mid_ops tangier_ops = {
+	.arch_setup = tangier_arch_setup,
+};
+
+static unsigned long __init tangier_calibrate_tsc(void)
+{
+	/* [REVERT ME] fast timer calibration method to be defined */
+	if ((intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_VP) ||
+	    (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_HVP)) {
+		lapic_timer_frequency = 50000;
+		return 1000000;
+	}
+
+	if ((intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_SLE) ||
+	    (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_NONE)) {
+
+		unsigned long fast_calibrate;
+		u32 lo, hi, ratio, fsb, bus_freq;
+
+		/* *********************** */
+		/* Compute TSC:Ratio * FSB */
+		/* *********************** */
+
+		/* Compute Ratio */
+		rdmsr(MSR_PLATFORM_INFO, lo, hi);
+		pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo);
+
+		ratio = (lo >> 8) & 0xFF;
+		pr_debug("ratio is %d\n", ratio);
+		if (!ratio) {
+			pr_err("Read a zero ratio, force tsc ratio to 4 ...\n");
+			ratio = 4;
+		}
+
+		/* Compute FSB */
+		rdmsr(MSR_FSB_FREQ, lo, hi);
+		pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n",
+			hi, lo);
+
+		bus_freq = lo & 0x7;
+		pr_debug("bus_freq = 0x%x\n", bus_freq);
+
+		if (bus_freq == 0)
+			fsb = FSB_FREQ_100SKU;
+		else if (bus_freq == 1)
+			fsb = FSB_FREQ_100SKU;
+		else if (bus_freq == 2)
+			fsb = FSB_FREQ_133SKU;
+		else if (bus_freq == 3)
+			fsb = FSB_FREQ_167SKU;
+		else if (bus_freq == 4)
+			fsb = FSB_FREQ_83SKU;
+		else if (bus_freq == 5)
+			fsb = FSB_FREQ_400SKU;
+		else if (bus_freq == 6)
+			fsb = FSB_FREQ_267SKU;
+		else if (bus_freq == 7)
+			fsb = FSB_FREQ_333SKU;
+		else {
+			BUG();
+			pr_err("Invalid bus_freq! Setting to minimal value!\n");
+			fsb = FSB_FREQ_100SKU;
+		}
+
+		/* TSC = FSB Freq * Resolved HFM Ratio */
+		fast_calibrate = ratio * fsb;
+		pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate);
+
+		/* ************************************ */
+		/* Calculate Local APIC Timer Frequency */
+		/* ************************************ */
+		lapic_timer_frequency = (fsb * 1000) / HZ;
+
+		pr_debug("Setting lapic_timer_frequency = %d\n",
+			lapic_timer_frequency);
+
+		/* mark tsc clocksource as reliable */
+		set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+		if (fast_calibrate)
+			return fast_calibrate;
+	}
+	return 0;
+}
+
+/* Allow user to enable simulator quirks settings for kernel */
+static int __init set_simulation_platform(char *str)
+{
+	int platform;
+
+	__intel_mid_sim_platform = INTEL_MID_CPU_SIMULATION_NONE;
+	if (get_option(&str, &platform)) {
+		__intel_mid_sim_platform = platform;
+		pr_info("simulator mode %d enabled.\n",
+			__intel_mid_sim_platform);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+early_param("mrfld_simulation", set_simulation_platform);
+
+static void __init tangier_time_init(void)
+{
+	/* [REVERT ME] ARAT capability not set in VP. Force setting */
+	if (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_VP ||
+	    intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_HVP)
+		set_cpu_cap(&boot_cpu_data, X86_FEATURE_ARAT);
+
+	if (intel_mid_timer_init)
+		intel_mid_timer_init();
+}
+
+static void __init tangier_arch_setup(void)
+{
+	x86_platform.calibrate_tsc = tangier_calibrate_tsc;
+	intel_mid_timer_init = x86_init.timers.timer_init;
+	x86_init.timers.timer_init = tangier_time_init;
+}
+
+void *get_tangier_ops()
+{
+	return &tangier_ops;
+}
+
+/* piggy back on anniedale ops right now */
+void *get_anniedale_ops()
+{
+	return &tangier_ops;
+}
diff --git a/arch/x86/platform/intel-mid/pmu_tng.c b/arch/x86/platform/intel-mid/pmu_tng.c
new file mode 100644
index 0000000..dd69354
--- /dev/null
+++ b/arch/x86/platform/intel-mid/pmu_tng.c
@@ -0,0 +1,218 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+
+#include <asm/intel-mid.h>
+#include "pmu_tng.h"
+
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+const char *pm_cmd_reg_name(u32 reg_addr)
+{
+	const char *pstr;
+
+	switch (reg_addr) {
+	case GFX_SS_PM0:
+		pstr = "GFX_SS_PM0";
+		break;
+	case GFX_SS_PM1:
+		pstr = "GFX_SS_PM1";
+		break;
+	case VED_SS_PM0:
+		pstr = "VED_SS_PM0";
+		break;
+	case VED_SS_PM1:
+		pstr = "VED_SS_PM1";
+		break;
+	case VEC_SS_PM0:
+		pstr = "VEC_SS_PM0";
+		break;
+	case VEC_SS_PM1:
+		pstr = "VEC_SS_PM1";
+		break;
+	case DSP_SS_PM:
+		pstr = "DSP_SS_PM";
+		break;
+	case VSP_SS_PM0:
+		pstr = "VSP_SS_PM0";
+		break;
+	case VSP_SS_PM1:
+		pstr = "VSP_SS_PM1";
+		break;
+	case MIO_SS_PM:
+		pstr = "MIO_SS_PM";
+		break;
+	case HDMIO_SS_PM:
+		pstr = "HDMIO_SS_PM";
+		break;
+	case NC_PM_SSS:
+		pstr = "NC_PM_SSS";
+		break;
+	default:
+		pstr = "(unknown_pm_reg)";
+		break;
+	}
+
+	return pstr;
+}
+#endif /* if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD */
+
+
+/**
+ * pmu_set_power_state_tng() - Send power management cmd to punit and
+ * wait for completion.
+ *
+ * This function implements Tangier/Merrifield punit-based power control.
+ *
+ * @reg_pm0 - Address of PM control register.  Example: GFX_SS_PM0
+ *
+ * @si_mask: Control bits.  "si" stands for "sub-islands".
+ * Bit mask specifying of one or more of the power islands to be affected.
+ * Each power island is a two bit field.  These bits are set for every bit
+ * in each power island to be affected by this command.
+ * For each island, either 0 or all 2 of its bits may be specified, but it
+ * is an error to specify only 1 of its bits.
+ *
+ * @ns_mask: "ns" stands for "new state".
+ * New state for bits specified by si_mask.
+ * Bits in ns_mask that are not set in si_mask are ignored.
+ * Mask of new power state for the power islands specified by si_mask.
+ * These bits are 0b00 for full power off and 0b11 for full power on.
+ * Note that other values may be specified (0b01 and 0b10).
+ *
+ * Bit field values:
+ *   TNG_SSC_I0    0b00      - i0 - power on, no clock or p[ower gating
+ *   TNG_SSC_I1    0b01      - i1 - clock gated
+ *   TNG_SSC_I2    0b01      - i2 - soft reset
+ *   TNG_SSC_D3    0b11      - d3 - power off, hw state not retained
+ *
+ * NOTE: Bit mask ns_mask is inverted from the *actual* hardware register
+ * values being used for power control.  This convention was adopted so that
+ * the API accepts 0b11 for full power-on and 0b00 for full power-off.
+ *
+ * Function return value: 0 if success, or -error_value.
+ *
+ * Example calls (ignoring return status):
+ * Turn on all gfx islands:
+ *   si_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ *   ns_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ *   pmu_set_power_state_tng(GFX_SS_PM0, this_mask, new_state);
+ * Turn on all gfx islands:  (Another way):
+ *   si_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ *   ns_mask = 0xFFFFFFFF;
+ *   pmu_set_power_state_tng(GFX_SS_PM0, this_mask, new_state);
+ * Turn off all gfx islands:
+ *   si_mask = GFX_SLC_LDO_SSC | GFX_SLC_SSC | GFX_SDKCK_SSC | GFX_RSCD_SSC;
+ *   ns_mask = 0;
+ *   pmu_set_power_state_tng(GFX_SS_PM0, this_mask, new_state);
+ *
+ * Replaces (for Tangier):
+ *    int pmu_nc_set_power_state(int islands, int state_type, int reg_type);
+ */
+int pmu_set_power_state_tng(u32 reg_pm0, u32 si_mask, u32 ns_mask)
+{
+	u32 pwr_cur;
+	u32 pwr_val;
+	int tcount;
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+	u32 pwr_prev;
+	int pwr_stored;
+#endif
+
+	ns_mask &= si_mask;
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+	printk(KERN_ALERT "%s(\"%s\"=%#x, %#x, %#x);\n", __func__,
+		pm_cmd_reg_name(reg_pm0), reg_pm0, si_mask, ns_mask);
+#endif
+
+	pwr_cur = intel_mid_msgbus_read32(PUNIT_PORT, reg_pm0);
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+	printk(KERN_ALERT "%s: before: %s: read: %#x\n",
+		__func__, pm_cmd_reg_name(reg_pm0), pwr_cur);
+#endif
+	/*  Return if already in desired state. */
+	if ((((pwr_cur >> SSC_TO_SSS_SHIFT) ^ ns_mask) & si_mask) == 0)
+		return 0;
+
+	pwr_val = (pwr_cur & ~si_mask) | ns_mask;
+	intel_mid_msgbus_write32(PUNIT_PORT, reg_pm0, pwr_val);
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+	printk(KERN_ALERT "%s: %s: write: %#x\n",
+		__func__, pm_cmd_reg_name(reg_pm0), pwr_val);
+	pwr_prev = 0;
+	pwr_stored = 0;
+#endif
+
+	for (tcount = 0; ; tcount++) {
+		if (tcount > 50) {
+			WARN(1, "%s: P-Unit PM action request timeout",
+				__func__);
+			return -EBUSY;
+		}
+		pwr_cur = intel_mid_msgbus_read32(PUNIT_PORT, reg_pm0);
+
+#if (defined DEBUG_PM_CMD) && DEBUG_PM_CMD
+		if (!pwr_stored || (pwr_prev != pwr_cur)) {
+			printk(KERN_ALERT
+				"%s: tries=%d: %s: read: %#x\n",
+				__func__, tcount,
+				pm_cmd_reg_name(reg_pm0),
+				pwr_cur);
+			pwr_stored = 1;
+			pwr_prev = pwr_cur;
+		}
+#endif
+
+		if ((((pwr_cur >> SSC_TO_SSS_SHIFT) ^ ns_mask) & si_mask) == 0)
+			break;
+		udelay(10);
+	}
+
+	return 0;
+}
+
+static int __init pmu_nc_poweroff(void) {
+	/* Power off DPA */
+	pmu_set_power_state_tng (DSP_SS_PM, DPA_SSC, TNG_COMPOSITE_D3);
+	/* Power off MIO */
+	pmu_set_power_state_tng (MIO_SS_PM, MIO_SSC, TNG_COMPOSITE_D3);
+	/* Power off ISP */
+	pmu_set_power_state_tng (ISP_SS_PM0, ISP_SSC, TNG_COMPOSITE_D3);
+	return 0;
+}
+
+late_initcall(pmu_nc_poweroff);
+
diff --git a/arch/x86/platform/intel-mid/pmu_tng.h b/arch/x86/platform/intel-mid/pmu_tng.h
new file mode 100644
index 0000000..ef91013
--- /dev/null
+++ b/arch/x86/platform/intel-mid/pmu_tng.h
@@ -0,0 +1,180 @@
+/**************************************************************************
+ * Copyright (c) 2012, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Dale B. Stimson <dale.b.stimson@intel.com>
+ *    Austin Hu <austin.hu@intel.com>
+ */
+
+#ifndef _PMU_TNG_H_
+#define _PMU_TNG_H_
+
+#include <linux/types.h>
+
+/* Per TNG Punit HAS */
+
+#define PUNIT_PORT              0x04
+
+/*
+ * Registers on msgbus port 4 (p-unit) for power/freq control.
+ * Bits 7:0 of the PM0 (or just PM) registers are power control bits, whereas
+ * bits 31:24 are the corresponding status bits.
+*/
+
+/*  Subsystem status of all North Cluster IPs (bits NC_PM_SSS_*) */
+#define NC_PM_SSS               0x3f
+
+/*
+ * Bit masks for power islands, as present in PM0 or PM registers.
+ * These reside as control bits in bits 7:0 of each register and
+ * as status bits in bits 31:24 of each register.
+ * Each power island has a 2-bit field which contains a value of TNG_SSC_*.
+ */
+#define SSC_TO_SSS_SHIFT        24
+
+/* GFX_SS_PM0 island */
+#define GFX_SS_PM0              0x30
+#define GFX_SS_PM1              0x31
+
+#define GFX_SLC_SSC             0x03
+#define GFX_SDKCK_SSC           0x0c
+#define GFX_RSCD_SSC            0x30
+#define GFX_SLC_LDO_SSC         0xc0
+
+#define GFX_SLC_SHIFT           0
+#define GFX_SDKCK_SHIFT         2
+#define GFX_RSCD_SHIFT          4
+#define GFX_SLC_LDO_SHIFT       6
+
+/* VED_SS_PMx power island */
+#define VED_SS_PM0              0x32
+#define VED_SS_PM1              0x33
+
+#define VED_SSC                 0x03
+
+/* VEC_SS_PMx power island */
+#define VEC_SS_PM0              0x34
+#define VEC_SS_PM1              0x35
+
+#define VEC_SSC                 0x03
+
+/* DSP_SS_PM power islands */
+#define DSP_SS_PM               0x36
+
+#define DPA_SSC                 0x03
+#define DPB_SSC                 0x0c
+#define DPC_SSC                 0x30
+
+#define DPA_SHIFT               0
+#define DPB_SHIFT               2
+#define DPC_SHIFT               4
+
+/* VSP_SS_PMx power islands */
+#define VSP_SS_PM0              0x37
+#define VSP_SS_PM1              0x38
+
+#define VSP_SSC                 0x03
+
+/* ISP_SS_PMx power islands */
+#define ISP_SS_PM0              0x39
+#define ISP_SS_PM1              0x3a
+
+#define ISP_SSC                 0x03
+
+/* MIO_SS_PM power island */
+#define MIO_SS_PM               0x3b
+
+#define MIO_SSC                 0x03
+
+/* HDMIO_SS_PM power island */
+#define HDMIO_SS_PM             0x3c
+
+#define HDMIO_SSC               0x03
+
+/*
+ * Subsystem status bits for NC_PM_SSS.  Status of all North Cluster IPs.
+ * These correspond to the above bits.
+ */
+#define NC_PM_SSS_GFX_SLC       0x00000003
+#define NC_PM_SSS_GFX_SDKCK     0x0000000c
+#define NC_PM_SSS_GFX_RSCD      0x00000030
+#define NC_PM_SSS_VED           0x000000c0
+#define NC_PM_SSS_VEC           0x00000300
+#define NC_PM_SSS_DPA           0x00000c00
+#define NC_PM_SSS_DPB           0x00003000
+#define NC_PM_SSS_DPC           0x0000c000
+#define NC_PM_SSS_VSP           0x00030000
+#define NC_PM_SSS_ISP           0x000c0000
+#define NC_PM_SSS_MIO           0x00300000
+#define NC_PM_SSS_HDMIO         0x00c00000
+#define NC_PM_SSS_GFX_SLC_LDO   0x03000000
+
+/*
+ * Frequency bits for *_PM1 registers above.
+ */
+#define IP_FREQ_VALID     0x80     /* Freq is valid bit */
+
+#define IP_FREQ_SIZE         5     /* number of bits in freq fields */
+#define IP_FREQ_MASK      0x1f     /* Bit mask for freq field */
+
+/*  Positions of various frequency fields */
+#define IP_FREQ_POS          0     /* Freq control [4:0] */
+#define IP_FREQ_GUAR_POS     8     /* Freq guar   [12:8] */
+#define IP_FREQ_STAT_POS    24     /* Freq status [28:24] */
+
+#define IP_FREQ_100_00 0x1f        /* 0b11111 100.00 */
+#define IP_FREQ_106_67 0x1d        /* 0b11101 106.67 */
+#define IP_FREQ_133_30 0x17        /* 0b10111 133.30 */
+#define IP_FREQ_160_00 0x13        /* 0b10011 160.00 */
+#define IP_FREQ_177_78 0x11        /* 0b10001 177.78 */
+#define IP_FREQ_200_00 0x0f        /* 0b01111 200.00 */
+#define IP_FREQ_213_33 0x0e        /* 0b01110 213.33 */
+#define IP_FREQ_266_67 0x0b        /* 0b01011 266.67 */
+#define IP_FREQ_320_00 0x09        /* 0b01001 320.00 */
+#define IP_FREQ_355_56 0x08        /* 0b01000 355.56 */
+#define IP_FREQ_400_00 0x07        /* 0b00111 400.00 */
+#define IP_FREQ_457_14 0x06        /* 0b00110 457.14 */
+#define IP_FREQ_533_33 0x05        /* 0b00101 533.33 */
+#define IP_FREQ_640_00 0x04        /* 0b00100 640.00 */
+#define IP_FREQ_800_00 0x03        /* 0b00011 800.00 */
+#define IP_FREQ_RESUME_SET 0x64
+
+/*  Tangier power states for each island */
+#define TNG_SSC_I0    (0b00)    /* i0 - power on, no clock or p[ower gating */
+#define TNG_SSC_I1    (0b01)    /* i1 - clock gated */
+#define TNG_SSC_I2    (0b01)    /* i2 - soft reset */
+#define TNG_SSC_D3    (0b11)    /* d3 - power off, hw state not retained */
+
+#define TNG_SSC_MASK  (0b11)    /* bit mask of all involved bits. */
+
+/*  Masks for the completely on and off states for 4 islands */
+#define TNG_COMPOSITE_I0    (0b00000000)
+#define TNG_COMPOSITE_D3    (0b11111111)
+
+#define DEBUG_PM_CMD 0
+#if !defined DEBUG_PM_CMD
+#define DEBUG_PM_CMD 1
+#endif
+
+int pmu_set_power_state_tng(u32 reg_pm0, u32 si_mask, u32 ns_mask);
+#endif /* ifndef _PMU_TNG_H_ */
diff --git a/arch/x86/platform/mrst/Makefile b/arch/x86/platform/mrst/Makefile
deleted file mode 100644
index af1da7e..0000000
--- a/arch/x86/platform/mrst/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_X86_INTEL_MID)	+= mrst.o
-obj-$(CONFIG_X86_INTEL_MID)	+= vrtc.o
-obj-$(CONFIG_EARLY_PRINTK_INTEL_MID)	+= early_printk_mrst.o
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
deleted file mode 100644
index 028454f..0000000
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * early_printk_mrst.c - early consoles for Intel MID platforms
- *
- * Copyright (c) 2008-2010, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-/*
- * This file implements two early consoles named mrst and hsu.
- * mrst is based on Maxim3110 spi-uart device, it exists in both
- * Moorestown and Medfield platforms, while hsu is based on a High
- * Speed UART device which only exists in the Medfield platform
- */
-
-#include <linux/serial_reg.h>
-#include <linux/serial_mfd.h>
-#include <linux/kmsg_dump.h>
-#include <linux/console.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/mrst.h>
-
-#define MRST_SPI_TIMEOUT		0x200000
-#define MRST_REGBASE_SPI0		0xff128000
-#define MRST_REGBASE_SPI1		0xff128400
-#define MRST_CLK_SPI0_REG		0xff11d86c
-
-/* Bit fields in CTRLR0 */
-#define SPI_DFS_OFFSET			0
-
-#define SPI_FRF_OFFSET			4
-#define SPI_FRF_SPI			0x0
-#define SPI_FRF_SSP			0x1
-#define SPI_FRF_MICROWIRE		0x2
-#define SPI_FRF_RESV			0x3
-
-#define SPI_MODE_OFFSET			6
-#define SPI_SCPH_OFFSET			6
-#define SPI_SCOL_OFFSET			7
-#define SPI_TMOD_OFFSET			8
-#define	SPI_TMOD_TR			0x0		/* xmit & recv */
-#define SPI_TMOD_TO			0x1		/* xmit only */
-#define SPI_TMOD_RO			0x2		/* recv only */
-#define SPI_TMOD_EPROMREAD		0x3		/* eeprom read mode */
-
-#define SPI_SLVOE_OFFSET		10
-#define SPI_SRL_OFFSET			11
-#define SPI_CFS_OFFSET			12
-
-/* Bit fields in SR, 7 bits */
-#define SR_MASK				0x7f		/* cover 7 bits */
-#define SR_BUSY				(1 << 0)
-#define SR_TF_NOT_FULL			(1 << 1)
-#define SR_TF_EMPT			(1 << 2)
-#define SR_RF_NOT_EMPT			(1 << 3)
-#define SR_RF_FULL			(1 << 4)
-#define SR_TX_ERR			(1 << 5)
-#define SR_DCOL				(1 << 6)
-
-struct dw_spi_reg {
-	u32	ctrl0;
-	u32	ctrl1;
-	u32	ssienr;
-	u32	mwcr;
-	u32	ser;
-	u32	baudr;
-	u32	txfltr;
-	u32	rxfltr;
-	u32	txflr;
-	u32	rxflr;
-	u32	sr;
-	u32	imr;
-	u32	isr;
-	u32	risr;
-	u32	txoicr;
-	u32	rxoicr;
-	u32	rxuicr;
-	u32	msticr;
-	u32	icr;
-	u32	dmacr;
-	u32	dmatdlr;
-	u32	dmardlr;
-	u32	idr;
-	u32	version;
-
-	/* Currently operates as 32 bits, though only the low 16 bits matter */
-	u32	dr;
-} __packed;
-
-#define dw_readl(dw, name)		__raw_readl(&(dw)->name)
-#define dw_writel(dw, name, val)	__raw_writel((val), &(dw)->name)
-
-/* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */
-static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
-
-static u32 *pclk_spi0;
-/* Always contains an accessible address, start with 0 */
-static struct dw_spi_reg *pspi;
-
-static struct kmsg_dumper dw_dumper;
-static int dumper_registered;
-
-static void dw_kmsg_dump(struct kmsg_dumper *dumper,
-			 enum kmsg_dump_reason reason)
-{
-	static char line[1024];
-	size_t len;
-
-	/* When run to this, we'd better re-init the HW */
-	mrst_early_console_init();
-
-	while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
-		early_mrst_console.write(&early_mrst_console, line, len);
-}
-
-/* Set the ratio rate to 115200, 8n1, IRQ disabled */
-static void max3110_write_config(void)
-{
-	u16 config;
-
-	config = 0xc001;
-	dw_writel(pspi, dr, config);
-}
-
-/* Translate char to a eligible word and send to max3110 */
-static void max3110_write_data(char c)
-{
-	u16 data;
-
-	data = 0x8000 | c;
-	dw_writel(pspi, dr, data);
-}
-
-void mrst_early_console_init(void)
-{
-	u32 ctrlr0 = 0;
-	u32 spi0_cdiv;
-	u32 freq; /* Freqency info only need be searched once */
-
-	/* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */
-	pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
-							MRST_CLK_SPI0_REG);
-	spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
-	freq = 100000000 / (spi0_cdiv + 1);
-
-	if (mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL)
-		mrst_spi_paddr = MRST_REGBASE_SPI1;
-
-	pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
-						mrst_spi_paddr);
-
-	/* Disable SPI controller */
-	dw_writel(pspi, ssienr, 0);
-
-	/* Set control param, 8 bits, transmit only mode */
-	ctrlr0 = dw_readl(pspi, ctrl0);
-
-	ctrlr0 &= 0xfcc0;
-	ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
-		      | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
-	dw_writel(pspi, ctrl0, ctrlr0);
-
-	/*
-	 * Change the spi0 clk to comply with 115200 bps, use 100000 to
-	 * calculate the clk dividor to make the clock a little slower
-	 * than real baud rate.
-	 */
-	dw_writel(pspi, baudr, freq/100000);
-
-	/* Disable all INT for early phase */
-	dw_writel(pspi, imr, 0x0);
-
-	/* Set the cs to spi-uart */
-	dw_writel(pspi, ser, 0x2);
-
-	/* Enable the HW, the last step for HW init */
-	dw_writel(pspi, ssienr, 0x1);
-
-	/* Set the default configuration */
-	max3110_write_config();
-
-	/* Register the kmsg dumper */
-	if (!dumper_registered) {
-		dw_dumper.dump = dw_kmsg_dump;
-		kmsg_dump_register(&dw_dumper);
-		dumper_registered = 1;
-	}
-}
-
-/* Slave select should be called in the read/write function */
-static void early_mrst_spi_putc(char c)
-{
-	unsigned int timeout;
-	u32 sr;
-
-	timeout = MRST_SPI_TIMEOUT;
-	/* Early putc needs to make sure the TX FIFO is not full */
-	while (--timeout) {
-		sr = dw_readl(pspi, sr);
-		if (!(sr & SR_TF_NOT_FULL))
-			cpu_relax();
-		else
-			break;
-	}
-
-	if (!timeout)
-		pr_warning("MRST earlycon: timed out\n");
-	else
-		max3110_write_data(c);
-}
-
-/* Early SPI only uses polling mode */
-static void early_mrst_spi_write(struct console *con, const char *str, unsigned n)
-{
-	int i;
-
-	for (i = 0; i < n && *str; i++) {
-		if (*str == '\n')
-			early_mrst_spi_putc('\r');
-		early_mrst_spi_putc(*str);
-		str++;
-	}
-}
-
-struct console early_mrst_console = {
-	.name =		"earlymrst",
-	.write =	early_mrst_spi_write,
-	.flags =	CON_PRINTBUFFER,
-	.index =	-1,
-};
-
-/*
- * Following is the early console based on Medfield HSU (High
- * Speed UART) device.
- */
-#define HSU_PORT_BASE		0xffa28080
-
-static void __iomem *phsu;
-
-void hsu_early_console_init(const char *s)
-{
-	unsigned long paddr, port = 0;
-	u8 lcr;
-
-	/*
-	 * Select the early HSU console port if specified by user in the
-	 * kernel command line.
-	 */
-	if (*s && !kstrtoul(s, 10, &port))
-		port = clamp_val(port, 0, 2);
-
-	paddr = HSU_PORT_BASE + port * 0x80;
-	phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
-
-	/* Disable FIFO */
-	writeb(0x0, phsu + UART_FCR);
-
-	/* Set to default 115200 bps, 8n1 */
-	lcr = readb(phsu + UART_LCR);
-	writeb((0x80 | lcr), phsu + UART_LCR);
-	writeb(0x18, phsu + UART_DLL);
-	writeb(lcr,  phsu + UART_LCR);
-	writel(0x3600, phsu + UART_MUL*4);
-
-	writeb(0x8, phsu + UART_MCR);
-	writeb(0x7, phsu + UART_FCR);
-	writeb(0x3, phsu + UART_LCR);
-
-	/* Clear IRQ status */
-	readb(phsu + UART_LSR);
-	readb(phsu + UART_RX);
-	readb(phsu + UART_IIR);
-	readb(phsu + UART_MSR);
-
-	/* Enable FIFO */
-	writeb(0x7, phsu + UART_FCR);
-}
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-static void early_hsu_putc(char ch)
-{
-	unsigned int timeout = 10000; /* 10ms */
-	u8 status;
-
-	while (--timeout) {
-		status = readb(phsu + UART_LSR);
-		if (status & BOTH_EMPTY)
-			break;
-		udelay(1);
-	}
-
-	/* Only write the char when there was no timeout */
-	if (timeout)
-		writeb(ch, phsu + UART_TX);
-}
-
-static void early_hsu_write(struct console *con, const char *str, unsigned n)
-{
-	int i;
-
-	for (i = 0; i < n && *str; i++) {
-		if (*str == '\n')
-			early_hsu_putc('\r');
-		early_hsu_putc(*str);
-		str++;
-	}
-}
-
-struct console early_hsu_console = {
-	.name =		"earlyhsu",
-	.write =	early_hsu_write,
-	.flags =	CON_PRINTBUFFER,
-	.index =	-1,
-};
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
deleted file mode 100644
index a0a0a43..0000000
--- a/arch/x86/platform/mrst/mrst.c
+++ /dev/null
@@ -1,1052 +0,0 @@
-/*
- * mrst.c: Intel Moorestown platform specific setup code
- *
- * (C) Copyright 2008 Intel Corporation
- * Author: Jacob Pan (jacob.jun.pan@intel.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#define pr_fmt(fmt) "mrst: " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/scatterlist.h>
-#include <linux/sfi.h>
-#include <linux/intel_pmic_gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/i2c.h>
-#include <linux/i2c/pca953x.h>
-#include <linux/gpio_keys.h>
-#include <linux/input.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/mfd/intel_msic.h>
-#include <linux/gpio.h>
-#include <linux/i2c/tc35876x.h>
-
-#include <asm/setup.h>
-#include <asm/mpspec_def.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/io_apic.h>
-#include <asm/mrst.h>
-#include <asm/mrst-vrtc.h>
-#include <asm/io.h>
-#include <asm/i8259.h>
-#include <asm/intel_scu_ipc.h>
-#include <asm/apb_timer.h>
-#include <asm/reboot.h>
-
-/*
- * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
- * cmdline option x86_mrst_timer can be used to override the configuration
- * to prefer one or the other.
- * at runtime, there are basically three timer configurations:
- * 1. per cpu apbt clock only
- * 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
- * 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
- *
- * by default (without cmdline option), platform code first detects cpu type
- * to see if we are on lincroft or penwell, then set up both lapic or apbt
- * clocks accordingly.
- * i.e. by default, medfield uses configuration #2, moorestown uses #1.
- * config #3 is supported but not recommended on medfield.
- *
- * rating and feature summary:
- * lapic (with C3STOP) --------- 100
- * apbt (always-on) ------------ 110
- * lapic (always-on,ARAT) ------ 150
- */
-
-__cpuinitdata enum mrst_timer_options mrst_timer_options;
-
-static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
-static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
-enum mrst_cpu_type __mrst_cpu_chip;
-EXPORT_SYMBOL_GPL(__mrst_cpu_chip);
-
-int sfi_mtimer_num;
-
-struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
-EXPORT_SYMBOL_GPL(sfi_mrtc_array);
-int sfi_mrtc_num;
-
-static void mrst_power_off(void)
-{
-}
-
-static void mrst_reboot(void)
-{
-	intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
-}
-
-/* parse all the mtimer info to a static mtimer array */
-static int __init sfi_parse_mtmr(struct sfi_table_header *table)
-{
-	struct sfi_table_simple *sb;
-	struct sfi_timer_table_entry *pentry;
-	struct mpc_intsrc mp_irq;
-	int totallen;
-
-	sb = (struct sfi_table_simple *)table;
-	if (!sfi_mtimer_num) {
-		sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
-					struct sfi_timer_table_entry);
-		pentry = (struct sfi_timer_table_entry *) sb->pentry;
-		totallen = sfi_mtimer_num * sizeof(*pentry);
-		memcpy(sfi_mtimer_array, pentry, totallen);
-	}
-
-	pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
-	pentry = sfi_mtimer_array;
-	for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
-		pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz,"
-			" irq = %d\n", totallen, (u32)pentry->phys_addr,
-			pentry->freq_hz, pentry->irq);
-			if (!pentry->irq)
-				continue;
-			mp_irq.type = MP_INTSRC;
-			mp_irq.irqtype = mp_INT;
-/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
-			mp_irq.irqflag = 5;
-			mp_irq.srcbus = MP_BUS_ISA;
-			mp_irq.srcbusirq = pentry->irq;	/* IRQ */
-			mp_irq.dstapic = MP_APIC_ALL;
-			mp_irq.dstirq = pentry->irq;
-			mp_save_irq(&mp_irq);
-	}
-
-	return 0;
-}
-
-struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
-{
-	int i;
-	if (hint < sfi_mtimer_num) {
-		if (!sfi_mtimer_usage[hint]) {
-			pr_debug("hint taken for timer %d irq %d\n",\
-				hint, sfi_mtimer_array[hint].irq);
-			sfi_mtimer_usage[hint] = 1;
-			return &sfi_mtimer_array[hint];
-		}
-	}
-	/* take the first timer available */
-	for (i = 0; i < sfi_mtimer_num;) {
-		if (!sfi_mtimer_usage[i]) {
-			sfi_mtimer_usage[i] = 1;
-			return &sfi_mtimer_array[i];
-		}
-		i++;
-	}
-	return NULL;
-}
-
-void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
-{
-	int i;
-	for (i = 0; i < sfi_mtimer_num;) {
-		if (mtmr->irq == sfi_mtimer_array[i].irq) {
-			sfi_mtimer_usage[i] = 0;
-			return;
-		}
-		i++;
-	}
-}
-
-/* parse all the mrtc info to a global mrtc array */
-int __init sfi_parse_mrtc(struct sfi_table_header *table)
-{
-	struct sfi_table_simple *sb;
-	struct sfi_rtc_table_entry *pentry;
-	struct mpc_intsrc mp_irq;
-
-	int totallen;
-
-	sb = (struct sfi_table_simple *)table;
-	if (!sfi_mrtc_num) {
-		sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
-						struct sfi_rtc_table_entry);
-		pentry = (struct sfi_rtc_table_entry *)sb->pentry;
-		totallen = sfi_mrtc_num * sizeof(*pentry);
-		memcpy(sfi_mrtc_array, pentry, totallen);
-	}
-
-	pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
-	pentry = sfi_mrtc_array;
-	for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
-		pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
-			totallen, (u32)pentry->phys_addr, pentry->irq);
-		mp_irq.type = MP_INTSRC;
-		mp_irq.irqtype = mp_INT;
-		mp_irq.irqflag = 0xf;	/* level trigger and active low */
-		mp_irq.srcbus = MP_BUS_ISA;
-		mp_irq.srcbusirq = pentry->irq;	/* IRQ */
-		mp_irq.dstapic = MP_APIC_ALL;
-		mp_irq.dstirq = pentry->irq;
-		mp_save_irq(&mp_irq);
-	}
-	return 0;
-}
-
-static unsigned long __init mrst_calibrate_tsc(void)
-{
-	unsigned long fast_calibrate;
-	u32 lo, hi, ratio, fsb;
-
-	rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
-	pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
-	ratio = (hi >> 8) & 0x1f;
-	pr_debug("ratio is %d\n", ratio);
-	if (!ratio) {
-		pr_err("read a zero ratio, should be incorrect!\n");
-		pr_err("force tsc ratio to 16 ...\n");
-		ratio = 16;
-	}
-	rdmsr(MSR_FSB_FREQ, lo, hi);
-	if ((lo & 0x7) == 0x7)
-		fsb = PENWELL_FSB_FREQ_83SKU;
-	else
-		fsb = PENWELL_FSB_FREQ_100SKU;
-	fast_calibrate = ratio * fsb;
-	pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
-	lapic_timer_frequency = fsb * 1000 / HZ;
-	/* mark tsc clocksource as reliable */
-	set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
-	
-	if (fast_calibrate)
-		return fast_calibrate;
-
-	return 0;
-}
-
-static void __init mrst_time_init(void)
-{
-	sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
-	switch (mrst_timer_options) {
-	case MRST_TIMER_APBT_ONLY:
-		break;
-	case MRST_TIMER_LAPIC_APBT:
-		x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
-		x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
-		break;
-	default:
-		if (!boot_cpu_has(X86_FEATURE_ARAT))
-			break;
-		x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
-		x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
-		return;
-	}
-	/* we need at least one APB timer */
-	pre_init_apic_IRQ0();
-	apbt_time_init();
-}
-
-static void __cpuinit mrst_arch_setup(void)
-{
-	if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
-		__mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
-	else {
-		pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
-			boot_cpu_data.x86, boot_cpu_data.x86_model);
-		__mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
-	}
-}
-
-/* MID systems don't have i8042 controller */
-static int mrst_i8042_detect(void)
-{
-	return 0;
-}
-
-/*
- * Moorestown does not have external NMI source nor port 0x61 to report
- * NMI status. The possible NMI sources are from pmu as a result of NMI
- * watchdog or lock debug. Reading io port 0x61 results in 0xff which
- * misled NMI handler.
- */
-static unsigned char mrst_get_nmi_reason(void)
-{
-	return 0;
-}
-
-/*
- * Moorestown specific x86_init function overrides and early setup
- * calls.
- */
-void __init x86_mrst_early_setup(void)
-{
-	x86_init.resources.probe_roms = x86_init_noop;
-	x86_init.resources.reserve_resources = x86_init_noop;
-
-	x86_init.timers.timer_init = mrst_time_init;
-	x86_init.timers.setup_percpu_clockev = x86_init_noop;
-
-	x86_init.irqs.pre_vector_init = x86_init_noop;
-
-	x86_init.oem.arch_setup = mrst_arch_setup;
-
-	x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
-
-	x86_platform.calibrate_tsc = mrst_calibrate_tsc;
-	x86_platform.i8042_detect = mrst_i8042_detect;
-	x86_init.timers.wallclock_init = mrst_rtc_init;
-	x86_platform.get_nmi_reason = mrst_get_nmi_reason;
-
-	x86_init.pci.init = pci_mrst_init;
-	x86_init.pci.fixup_irqs = x86_init_noop;
-
-	legacy_pic = &null_legacy_pic;
-
-	/* Moorestown specific power_off/restart method */
-	pm_power_off = mrst_power_off;
-	machine_ops.emergency_restart  = mrst_reboot;
-
-	/* Avoid searching for BIOS MP tables */
-	x86_init.mpparse.find_smp_config = x86_init_noop;
-	x86_init.mpparse.get_smp_config = x86_init_uint_noop;
-	set_bit(MP_BUS_ISA, mp_bus_not_pci);
-}
-
-/*
- * if user does not want to use per CPU apb timer, just give it a lower rating
- * than local apic timer and skip the late per cpu timer init.
- */
-static inline int __init setup_x86_mrst_timer(char *arg)
-{
-	if (!arg)
-		return -EINVAL;
-
-	if (strcmp("apbt_only", arg) == 0)
-		mrst_timer_options = MRST_TIMER_APBT_ONLY;
-	else if (strcmp("lapic_and_apbt", arg) == 0)
-		mrst_timer_options = MRST_TIMER_LAPIC_APBT;
-	else {
-		pr_warning("X86 MRST timer option %s not recognised"
-			   " use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
-			   arg);
-		return -EINVAL;
-	}
-	return 0;
-}
-__setup("x86_mrst_timer=", setup_x86_mrst_timer);
-
-/*
- * Parsing GPIO table first, since the DEVS table will need this table
- * to map the pin name to the actual pin.
- */
-static struct sfi_gpio_table_entry *gpio_table;
-static int gpio_num_entry;
-
-static int __init sfi_parse_gpio(struct sfi_table_header *table)
-{
-	struct sfi_table_simple *sb;
-	struct sfi_gpio_table_entry *pentry;
-	int num, i;
-
-	if (gpio_table)
-		return 0;
-	sb = (struct sfi_table_simple *)table;
-	num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
-	pentry = (struct sfi_gpio_table_entry *)sb->pentry;
-
-	gpio_table = kmalloc(num * sizeof(*pentry), GFP_KERNEL);
-	if (!gpio_table)
-		return -1;
-	memcpy(gpio_table, pentry, num * sizeof(*pentry));
-	gpio_num_entry = num;
-
-	pr_debug("GPIO pin info:\n");
-	for (i = 0; i < num; i++, pentry++)
-		pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
-		" pin = %d\n", i,
-			pentry->controller_name,
-			pentry->pin_name,
-			pentry->pin_no);
-	return 0;
-}
-
-static int get_gpio_by_name(const char *name)
-{
-	struct sfi_gpio_table_entry *pentry = gpio_table;
-	int i;
-
-	if (!pentry)
-		return -1;
-	for (i = 0; i < gpio_num_entry; i++, pentry++) {
-		if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
-			return pentry->pin_no;
-	}
-	return -1;
-}
-
-/*
- * Here defines the array of devices platform data that IAFW would export
- * through SFI "DEVS" table, we use name and type to match the device and
- * its platform data.
- */
-struct devs_id {
-	char name[SFI_NAME_LEN + 1];
-	u8 type;
-	u8 delay;
-	void *(*get_platform_data)(void *info);
-};
-
-/* the offset for the mapping of global gpio pin to irq */
-#define MRST_IRQ_OFFSET 0x100
-
-static void __init *pmic_gpio_platform_data(void *info)
-{
-	static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
-	int gpio_base = get_gpio_by_name("pmic_gpio_base");
-
-	if (gpio_base == -1)
-		gpio_base = 64;
-	pmic_gpio_pdata.gpio_base = gpio_base;
-	pmic_gpio_pdata.irq_base = gpio_base + MRST_IRQ_OFFSET;
-	pmic_gpio_pdata.gpiointr = 0xffffeff8;
-
-	return &pmic_gpio_pdata;
-}
-
-static void __init *max3111_platform_data(void *info)
-{
-	struct spi_board_info *spi_info = info;
-	int intr = get_gpio_by_name("max3111_int");
-
-	spi_info->mode = SPI_MODE_0;
-	if (intr == -1)
-		return NULL;
-	spi_info->irq = intr + MRST_IRQ_OFFSET;
-	return NULL;
-}
-
-/* we have multiple max7315 on the board ... */
-#define MAX7315_NUM 2
-static void __init *max7315_platform_data(void *info)
-{
-	static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
-	static int nr;
-	struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
-	struct i2c_board_info *i2c_info = info;
-	int gpio_base, intr;
-	char base_pin_name[SFI_NAME_LEN + 1];
-	char intr_pin_name[SFI_NAME_LEN + 1];
-
-	if (nr == MAX7315_NUM) {
-		pr_err("too many max7315s, we only support %d\n",
-				MAX7315_NUM);
-		return NULL;
-	}
-	/* we have several max7315 on the board, we only need load several
-	 * instances of the same pca953x driver to cover them
-	 */
-	strcpy(i2c_info->type, "max7315");
-	if (nr++) {
-		sprintf(base_pin_name, "max7315_%d_base", nr);
-		sprintf(intr_pin_name, "max7315_%d_int", nr);
-	} else {
-		strcpy(base_pin_name, "max7315_base");
-		strcpy(intr_pin_name, "max7315_int");
-	}
-
-	gpio_base = get_gpio_by_name(base_pin_name);
-	intr = get_gpio_by_name(intr_pin_name);
-
-	if (gpio_base == -1)
-		return NULL;
-	max7315->gpio_base = gpio_base;
-	if (intr != -1) {
-		i2c_info->irq = intr + MRST_IRQ_OFFSET;
-		max7315->irq_base = gpio_base + MRST_IRQ_OFFSET;
-	} else {
-		i2c_info->irq = -1;
-		max7315->irq_base = -1;
-	}
-	return max7315;
-}
-
-static void *tca6416_platform_data(void *info)
-{
-	static struct pca953x_platform_data tca6416;
-	struct i2c_board_info *i2c_info = info;
-	int gpio_base, intr;
-	char base_pin_name[SFI_NAME_LEN + 1];
-	char intr_pin_name[SFI_NAME_LEN + 1];
-
-	strcpy(i2c_info->type, "tca6416");
-	strcpy(base_pin_name, "tca6416_base");
-	strcpy(intr_pin_name, "tca6416_int");
-
-	gpio_base = get_gpio_by_name(base_pin_name);
-	intr = get_gpio_by_name(intr_pin_name);
-
-	if (gpio_base == -1)
-		return NULL;
-	tca6416.gpio_base = gpio_base;
-	if (intr != -1) {
-		i2c_info->irq = intr + MRST_IRQ_OFFSET;
-		tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
-	} else {
-		i2c_info->irq = -1;
-		tca6416.irq_base = -1;
-	}
-	return &tca6416;
-}
-
-static void *mpu3050_platform_data(void *info)
-{
-	struct i2c_board_info *i2c_info = info;
-	int intr = get_gpio_by_name("mpu3050_int");
-
-	if (intr == -1)
-		return NULL;
-
-	i2c_info->irq = intr + MRST_IRQ_OFFSET;
-	return NULL;
-}
-
-static void __init *emc1403_platform_data(void *info)
-{
-	static short intr2nd_pdata;
-	struct i2c_board_info *i2c_info = info;
-	int intr = get_gpio_by_name("thermal_int");
-	int intr2nd = get_gpio_by_name("thermal_alert");
-
-	if (intr == -1 || intr2nd == -1)
-		return NULL;
-
-	i2c_info->irq = intr + MRST_IRQ_OFFSET;
-	intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
-
-	return &intr2nd_pdata;
-}
-
-static void __init *lis331dl_platform_data(void *info)
-{
-	static short intr2nd_pdata;
-	struct i2c_board_info *i2c_info = info;
-	int intr = get_gpio_by_name("accel_int");
-	int intr2nd = get_gpio_by_name("accel_2");
-
-	if (intr == -1 || intr2nd == -1)
-		return NULL;
-
-	i2c_info->irq = intr + MRST_IRQ_OFFSET;
-	intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
-
-	return &intr2nd_pdata;
-}
-
-static void __init *no_platform_data(void *info)
-{
-	return NULL;
-}
-
-static struct resource msic_resources[] = {
-	{
-		.start	= INTEL_MSIC_IRQ_PHYS_BASE,
-		.end	= INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static struct intel_msic_platform_data msic_pdata;
-
-static struct platform_device msic_device = {
-	.name		= "intel_msic",
-	.id		= -1,
-	.dev		= {
-		.platform_data	= &msic_pdata,
-	},
-	.num_resources	= ARRAY_SIZE(msic_resources),
-	.resource	= msic_resources,
-};
-
-static inline bool mrst_has_msic(void)
-{
-	return mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL;
-}
-
-static int msic_scu_status_change(struct notifier_block *nb,
-				  unsigned long code, void *data)
-{
-	if (code == SCU_DOWN) {
-		platform_device_unregister(&msic_device);
-		return 0;
-	}
-
-	return platform_device_register(&msic_device);
-}
-
-static int __init msic_init(void)
-{
-	static struct notifier_block msic_scu_notifier = {
-		.notifier_call	= msic_scu_status_change,
-	};
-
-	/*
-	 * We need to be sure that the SCU IPC is ready before MSIC device
-	 * can be registered.
-	 */
-	if (mrst_has_msic())
-		intel_scu_notifier_add(&msic_scu_notifier);
-
-	return 0;
-}
-arch_initcall(msic_init);
-
-/*
- * msic_generic_platform_data - sets generic platform data for the block
- * @info: pointer to the SFI device table entry for this block
- * @block: MSIC block
- *
- * Function sets IRQ number from the SFI table entry for given device to
- * the MSIC platform data.
- */
-static void *msic_generic_platform_data(void *info, enum intel_msic_block block)
-{
-	struct sfi_device_table_entry *entry = info;
-
-	BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
-	msic_pdata.irq[block] = entry->irq;
-
-	return no_platform_data(info);
-}
-
-static void *msic_battery_platform_data(void *info)
-{
-	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY);
-}
-
-static void *msic_gpio_platform_data(void *info)
-{
-	static struct intel_msic_gpio_pdata pdata;
-	int gpio = get_gpio_by_name("msic_gpio_base");
-
-	if (gpio < 0)
-		return NULL;
-
-	pdata.gpio_base = gpio;
-	msic_pdata.gpio = &pdata;
-
-	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_GPIO);
-}
-
-static void *msic_audio_platform_data(void *info)
-{
-	struct platform_device *pdev;
-
-	pdev = platform_device_register_simple("sst-platform", -1, NULL, 0);
-	if (IS_ERR(pdev)) {
-		pr_err("failed to create audio platform device\n");
-		return NULL;
-	}
-
-	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO);
-}
-
-static void *msic_power_btn_platform_data(void *info)
-{
-	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_POWER_BTN);
-}
-
-static void *msic_ocd_platform_data(void *info)
-{
-	static struct intel_msic_ocd_pdata pdata;
-	int gpio = get_gpio_by_name("ocd_gpio");
-
-	if (gpio < 0)
-		return NULL;
-
-	pdata.gpio = gpio;
-	msic_pdata.ocd = &pdata;
-
-	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD);
-}
-
-static void *msic_thermal_platform_data(void *info)
-{
-	return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL);
-}
-
-/* tc35876x DSI-LVDS bridge chip and panel platform data */
-static void *tc35876x_platform_data(void *data)
-{
-       static struct tc35876x_platform_data pdata;
-
-       /* gpio pins set to -1 will not be used by the driver */
-       pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
-       pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
-       pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
-
-       return &pdata;
-}
-
-static const struct devs_id __initconst device_ids[] = {
-	{"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
-	{"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
-	{"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
-	{"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
-	{"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
-	{"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
-	{"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
-	{"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
-	{"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
-	{"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
-	{"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
-	{"i2c_disp_brig", SFI_DEV_TYPE_I2C, 0, &tc35876x_platform_data},
-
-	/* MSIC subdevices */
-	{"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
-	{"msic_gpio", SFI_DEV_TYPE_IPC, 1, &msic_gpio_platform_data},
-	{"msic_audio", SFI_DEV_TYPE_IPC, 1, &msic_audio_platform_data},
-	{"msic_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data},
-	{"msic_ocd", SFI_DEV_TYPE_IPC, 1, &msic_ocd_platform_data},
-	{"msic_thermal", SFI_DEV_TYPE_IPC, 1, &msic_thermal_platform_data},
-
-	{},
-};
-
-#define MAX_IPCDEVS	24
-static struct platform_device *ipc_devs[MAX_IPCDEVS];
-static int ipc_next_dev;
-
-#define MAX_SCU_SPI	24
-static struct spi_board_info *spi_devs[MAX_SCU_SPI];
-static int spi_next_dev;
-
-#define MAX_SCU_I2C	24
-static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
-static int i2c_bus[MAX_SCU_I2C];
-static int i2c_next_dev;
-
-static void __init intel_scu_device_register(struct platform_device *pdev)
-{
-	if(ipc_next_dev == MAX_IPCDEVS)
-		pr_err("too many SCU IPC devices");
-	else
-		ipc_devs[ipc_next_dev++] = pdev;
-}
-
-static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
-{
-	struct spi_board_info *new_dev;
-
-	if (spi_next_dev == MAX_SCU_SPI) {
-		pr_err("too many SCU SPI devices");
-		return;
-	}
-
-	new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
-	if (!new_dev) {
-		pr_err("failed to alloc mem for delayed spi dev %s\n",
-			sdev->modalias);
-		return;
-	}
-	memcpy(new_dev, sdev, sizeof(*sdev));
-
-	spi_devs[spi_next_dev++] = new_dev;
-}
-
-static void __init intel_scu_i2c_device_register(int bus,
-						struct i2c_board_info *idev)
-{
-	struct i2c_board_info *new_dev;
-
-	if (i2c_next_dev == MAX_SCU_I2C) {
-		pr_err("too many SCU I2C devices");
-		return;
-	}
-
-	new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
-	if (!new_dev) {
-		pr_err("failed to alloc mem for delayed i2c dev %s\n",
-			idev->type);
-		return;
-	}
-	memcpy(new_dev, idev, sizeof(*idev));
-
-	i2c_bus[i2c_next_dev] = bus;
-	i2c_devs[i2c_next_dev++] = new_dev;
-}
-
-BLOCKING_NOTIFIER_HEAD(intel_scu_notifier);
-EXPORT_SYMBOL_GPL(intel_scu_notifier);
-
-/* Called by IPC driver */
-void intel_scu_devices_create(void)
-{
-	int i;
-
-	for (i = 0; i < ipc_next_dev; i++)
-		platform_device_add(ipc_devs[i]);
-
-	for (i = 0; i < spi_next_dev; i++)
-		spi_register_board_info(spi_devs[i], 1);
-
-	for (i = 0; i < i2c_next_dev; i++) {
-		struct i2c_adapter *adapter;
-		struct i2c_client *client;
-
-		adapter = i2c_get_adapter(i2c_bus[i]);
-		if (adapter) {
-			client = i2c_new_device(adapter, i2c_devs[i]);
-			if (!client)
-				pr_err("can't create i2c device %s\n",
-					i2c_devs[i]->type);
-		} else
-			i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
-	}
-	intel_scu_notifier_post(SCU_AVAILABLE, NULL);
-}
-EXPORT_SYMBOL_GPL(intel_scu_devices_create);
-
-/* Called by IPC driver */
-void intel_scu_devices_destroy(void)
-{
-	int i;
-
-	intel_scu_notifier_post(SCU_DOWN, NULL);
-
-	for (i = 0; i < ipc_next_dev; i++)
-		platform_device_del(ipc_devs[i]);
-}
-EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
-
-static void __init install_irq_resource(struct platform_device *pdev, int irq)
-{
-	/* Single threaded */
-	static struct resource __initdata res = {
-		.name = "IRQ",
-		.flags = IORESOURCE_IRQ,
-	};
-	res.start = irq;
-	platform_device_add_resources(pdev, &res, 1);
-}
-
-static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *entry)
-{
-	const struct devs_id *dev = device_ids;
-	struct platform_device *pdev;
-	void *pdata = NULL;
-
-	while (dev->name[0]) {
-		if (dev->type == SFI_DEV_TYPE_IPC &&
-			!strncmp(dev->name, entry->name, SFI_NAME_LEN)) {
-			pdata = dev->get_platform_data(entry);
-			break;
-		}
-		dev++;
-	}
-
-	/*
-	 * On Medfield the platform device creation is handled by the MSIC
-	 * MFD driver so we don't need to do it here.
-	 */
-	if (mrst_has_msic())
-		return;
-
-	pdev = platform_device_alloc(entry->name, 0);
-	if (pdev == NULL) {
-		pr_err("out of memory for SFI platform device '%s'.\n",
-			entry->name);
-		return;
-	}
-	install_irq_resource(pdev, entry->irq);
-
-	pdev->dev.platform_data = pdata;
-	intel_scu_device_register(pdev);
-}
-
-static void __init sfi_handle_spi_dev(struct spi_board_info *spi_info)
-{
-	const struct devs_id *dev = device_ids;
-	void *pdata = NULL;
-
-	while (dev->name[0]) {
-		if (dev->type == SFI_DEV_TYPE_SPI &&
-				!strncmp(dev->name, spi_info->modalias, SFI_NAME_LEN)) {
-			pdata = dev->get_platform_data(spi_info);
-			break;
-		}
-		dev++;
-	}
-	spi_info->platform_data = pdata;
-	if (dev->delay)
-		intel_scu_spi_device_register(spi_info);
-	else
-		spi_register_board_info(spi_info, 1);
-}
-
-static void __init sfi_handle_i2c_dev(int bus, struct i2c_board_info *i2c_info)
-{
-	const struct devs_id *dev = device_ids;
-	void *pdata = NULL;
-
-	while (dev->name[0]) {
-		if (dev->type == SFI_DEV_TYPE_I2C &&
-			!strncmp(dev->name, i2c_info->type, SFI_NAME_LEN)) {
-			pdata = dev->get_platform_data(i2c_info);
-			break;
-		}
-		dev++;
-	}
-	i2c_info->platform_data = pdata;
-
-	if (dev->delay)
-		intel_scu_i2c_device_register(bus, i2c_info);
-	else
-		i2c_register_board_info(bus, i2c_info, 1);
- }
-
-
-static int __init sfi_parse_devs(struct sfi_table_header *table)
-{
-	struct sfi_table_simple *sb;
-	struct sfi_device_table_entry *pentry;
-	struct spi_board_info spi_info;
-	struct i2c_board_info i2c_info;
-	int num, i, bus;
-	int ioapic;
-	struct io_apic_irq_attr irq_attr;
-
-	sb = (struct sfi_table_simple *)table;
-	num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
-	pentry = (struct sfi_device_table_entry *)sb->pentry;
-
-	for (i = 0; i < num; i++, pentry++) {
-		int irq = pentry->irq;
-
-		if (irq != (u8)0xff) { /* native RTE case */
-			/* these SPI2 devices are not exposed to system as PCI
-			 * devices, but they have separate RTE entry in IOAPIC
-			 * so we have to enable them one by one here
-			 */
-			ioapic = mp_find_ioapic(irq);
-			irq_attr.ioapic = ioapic;
-			irq_attr.ioapic_pin = irq;
-			irq_attr.trigger = 1;
-			irq_attr.polarity = 1;
-			io_apic_set_pci_routing(NULL, irq, &irq_attr);
-		} else
-			irq = 0; /* No irq */
-
-		switch (pentry->type) {
-		case SFI_DEV_TYPE_IPC:
-			pr_debug("info[%2d]: IPC bus, name = %16.16s, "
-				"irq = 0x%2x\n", i, pentry->name, pentry->irq);
-			sfi_handle_ipc_dev(pentry);
-			break;
-		case SFI_DEV_TYPE_SPI:
-			memset(&spi_info, 0, sizeof(spi_info));
-			strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
-			spi_info.irq = irq;
-			spi_info.bus_num = pentry->host_num;
-			spi_info.chip_select = pentry->addr;
-			spi_info.max_speed_hz = pentry->max_freq;
-			pr_debug("info[%2d]: SPI bus = %d, name = %16.16s, "
-				"irq = 0x%2x, max_freq = %d, cs = %d\n", i,
-				spi_info.bus_num,
-				spi_info.modalias,
-				spi_info.irq,
-				spi_info.max_speed_hz,
-				spi_info.chip_select);
-			sfi_handle_spi_dev(&spi_info);
-			break;
-		case SFI_DEV_TYPE_I2C:
-			memset(&i2c_info, 0, sizeof(i2c_info));
-			bus = pentry->host_num;
-			strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
-			i2c_info.irq = irq;
-			i2c_info.addr = pentry->addr;
-			pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
-				"irq = 0x%2x, addr = 0x%x\n", i, bus,
-				i2c_info.type,
-				i2c_info.irq,
-				i2c_info.addr);
-			sfi_handle_i2c_dev(bus, &i2c_info);
-			break;
-		case SFI_DEV_TYPE_UART:
-		case SFI_DEV_TYPE_HSI:
-		default:
-			;
-		}
-	}
-	return 0;
-}
-
-static int __init mrst_platform_init(void)
-{
-	sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
-	sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
-	return 0;
-}
-arch_initcall(mrst_platform_init);
-
-/*
- * we will search these buttons in SFI GPIO table (by name)
- * and register them dynamically. Please add all possible
- * buttons here, we will shrink them if no GPIO found.
- */
-static struct gpio_keys_button gpio_button[] = {
-	{KEY_POWER,		-1, 1, "power_btn",	EV_KEY, 0, 3000},
-	{KEY_PROG1,		-1, 1, "prog_btn1",	EV_KEY, 0, 20},
-	{KEY_PROG2,		-1, 1, "prog_btn2",	EV_KEY, 0, 20},
-	{SW_LID,		-1, 1, "lid_switch",	EV_SW,  0, 20},
-	{KEY_VOLUMEUP,		-1, 1, "vol_up",	EV_KEY, 0, 20},
-	{KEY_VOLUMEDOWN,	-1, 1, "vol_down",	EV_KEY, 0, 20},
-	{KEY_CAMERA,		-1, 1, "camera_full",	EV_KEY, 0, 20},
-	{KEY_CAMERA_FOCUS,	-1, 1, "camera_half",	EV_KEY, 0, 20},
-	{SW_KEYPAD_SLIDE,	-1, 1, "MagSw1",	EV_SW,  0, 20},
-	{SW_KEYPAD_SLIDE,	-1, 1, "MagSw2",	EV_SW,  0, 20},
-};
-
-static struct gpio_keys_platform_data mrst_gpio_keys = {
-	.buttons	= gpio_button,
-	.rep		= 1,
-	.nbuttons	= -1, /* will fill it after search */
-};
-
-static struct platform_device pb_device = {
-	.name		= "gpio-keys",
-	.id		= -1,
-	.dev		= {
-		.platform_data	= &mrst_gpio_keys,
-	},
-};
-
-/*
- * Shrink the non-existent buttons, register the gpio button
- * device if there is some
- */
-static int __init pb_keys_init(void)
-{
-	struct gpio_keys_button *gb = gpio_button;
-	int i, num, good = 0;
-
-	num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
-	for (i = 0; i < num; i++) {
-		gb[i].gpio = get_gpio_by_name(gb[i].desc);
-		pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc, gb[i].gpio);
-		if (gb[i].gpio == -1)
-			continue;
-
-		if (i != good)
-			gb[good] = gb[i];
-		good++;
-	}
-
-	if (good) {
-		mrst_gpio_keys.nbuttons = good;
-		return platform_device_register(&pb_device);
-	}
-	return 0;
-}
-late_initcall(pb_keys_init);
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
deleted file mode 100644
index d62b0a3..0000000
--- a/arch/x86/platform/mrst/vrtc.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * vrtc.c: Driver for virtual RTC device on Intel MID platform
- *
- * (C) Copyright 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- *
- * Note:
- * VRTC is emulated by system controller firmware, the real HW
- * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
- * in a memory mapped IO space that is visible to the host IA
- * processor.
- *
- * This driver is based on RTC CMOS driver.
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/platform_device.h>
-
-#include <asm/mrst.h>
-#include <asm/mrst-vrtc.h>
-#include <asm/time.h>
-#include <asm/fixmap.h>
-
-static unsigned char __iomem *vrtc_virt_base;
-
-unsigned char vrtc_cmos_read(unsigned char reg)
-{
-	unsigned char retval;
-
-	/* vRTC's registers range from 0x0 to 0xD */
-	if (reg > 0xd || !vrtc_virt_base)
-		return 0xff;
-
-	lock_cmos_prefix(reg);
-	retval = __raw_readb(vrtc_virt_base + (reg << 2));
-	lock_cmos_suffix(reg);
-	return retval;
-}
-EXPORT_SYMBOL_GPL(vrtc_cmos_read);
-
-void vrtc_cmos_write(unsigned char val, unsigned char reg)
-{
-	if (reg > 0xd || !vrtc_virt_base)
-		return;
-
-	lock_cmos_prefix(reg);
-	__raw_writeb(val, vrtc_virt_base + (reg << 2));
-	lock_cmos_suffix(reg);
-}
-EXPORT_SYMBOL_GPL(vrtc_cmos_write);
-
-unsigned long vrtc_get_time(void)
-{
-	u8 sec, min, hour, mday, mon;
-	unsigned long flags;
-	u32 year;
-
-	spin_lock_irqsave(&rtc_lock, flags);
-
-	while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
-		cpu_relax();
-
-	sec = vrtc_cmos_read(RTC_SECONDS);
-	min = vrtc_cmos_read(RTC_MINUTES);
-	hour = vrtc_cmos_read(RTC_HOURS);
-	mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
-	mon = vrtc_cmos_read(RTC_MONTH);
-	year = vrtc_cmos_read(RTC_YEAR);
-
-	spin_unlock_irqrestore(&rtc_lock, flags);
-
-	/* vRTC YEAR reg contains the offset to 1972 */
-	year += 1972;
-
-	printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
-		"mon: %d year: %d\n", sec, min, hour, mday, mon, year);
-
-	return mktime(year, mon, mday, hour, min, sec);
-}
-
-int vrtc_set_mmss(unsigned long nowtime)
-{
-	unsigned long flags;
-	struct rtc_time tm;
-	int year;
-	int retval = 0;
-
-	rtc_time_to_tm(nowtime, &tm);
-	if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) {
-		/*
-		 * tm.year is the number of years since 1900, and the
-		 * vrtc need the years since 1972.
-		 */
-		year = tm.tm_year - 72;
-		spin_lock_irqsave(&rtc_lock, flags);
-		vrtc_cmos_write(year, RTC_YEAR);
-		vrtc_cmos_write(tm.tm_mon, RTC_MONTH);
-		vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH);
-		vrtc_cmos_write(tm.tm_hour, RTC_HOURS);
-		vrtc_cmos_write(tm.tm_min, RTC_MINUTES);
-		vrtc_cmos_write(tm.tm_sec, RTC_SECONDS);
-		spin_unlock_irqrestore(&rtc_lock, flags);
-	} else {
-		printk(KERN_ERR
-		       "%s: Invalid vRTC value: write of %lx to vRTC failed\n",
-			__FUNCTION__, nowtime);
-		retval = -EINVAL;
-	}
-	return retval;
-}
-
-void __init mrst_rtc_init(void)
-{
-	unsigned long vrtc_paddr;
-
-	sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
-
-	vrtc_paddr = sfi_mrtc_array[0].phys_addr;
-	if (!sfi_mrtc_num || !vrtc_paddr)
-		return;
-
-	vrtc_virt_base = (void __iomem *)set_fixmap_offset_nocache(FIX_LNW_VRTC,
-								vrtc_paddr);
-	x86_platform.get_wallclock = vrtc_get_time;
-	x86_platform.set_wallclock = vrtc_set_mmss;
-}
-
-/*
- * The Moorestown platform has a memory mapped virtual RTC device that emulates
- * the programming interface of the RTC.
- */
-
-static struct resource vrtc_resources[] = {
-	[0] = {
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.flags	= IORESOURCE_IRQ,
-	}
-};
-
-static struct platform_device vrtc_device = {
-	.name		= "rtc_mrst",
-	.id		= -1,
-	.resource	= vrtc_resources,
-	.num_resources	= ARRAY_SIZE(vrtc_resources),
-};
-
-/* Register the RTC device if appropriate */
-static int __init mrst_device_create(void)
-{
-	/* No Moorestown, no device */
-	if (!mrst_identify_cpu())
-		return -ENODEV;
-	/* No timer, no device */
-	if (!sfi_mrtc_num)
-		return -ENODEV;
-
-	/* iomem resource */
-	vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr;
-	vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr +
-				MRST_VRTC_MAP_SZ;
-	/* irq resource */
-	vrtc_resources[1].start = sfi_mrtc_array[0].irq;
-	vrtc_resources[1].end = sfi_mrtc_array[0].irq;
-
-	return platform_device_register(&vrtc_device);
-}
-
-module_init(mrst_device_create);
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index aabfb83..01ed502 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -357,3 +357,7 @@
 348	i386	process_vm_writev	sys_process_vm_writev		compat_sys_process_vm_writev
 349	i386	kcmp			sys_kcmp
 350	i386	finit_module		sys_finit_module
+# 351	i386	sched_setattr		sys_sched_setattr
+# 352	i386	sched_getattr		sys_sched_getattr
+# 353	i386	renameat2		sys_renameat2
+354	i386	seccomp			sys_seccomp
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 63a8993..c7b4ac7 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -320,6 +320,10 @@
 311	64	process_vm_writev	sys_process_vm_writev
 312	common	kcmp			sys_kcmp
 313	common	finit_module		sys_finit_module
+# 314	common	sched_setattr		sys_sched_setattr
+# 315	common	sched_getattr		sys_sched_getattr
+# 316	common	renameat2		sys_renameat2
+317	common	seccomp			sys_seccomp
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index ae7319d..5e04a1c 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -508,7 +508,6 @@
 {
 	struct rt_sigframe __user *frame;
 	int err = 0;
-	struct task_struct *me = current;
 
 	frame = (struct rt_sigframe __user *)
 		round_down(stack_top - sizeof(struct rt_sigframe), 16);
diff --git a/block/genhd.c b/block/genhd.c
index 7af2f6a..fd1946b 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1117,6 +1117,22 @@
 		blk_put_queue(disk->queue);
 	kfree(disk);
 }
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+	int cnt = 0;
+
+	disk_part_iter_init(&piter, disk, 0);
+	while((part = disk_part_iter_next(&piter)))
+		cnt++;
+	disk_part_iter_exit(&piter);
+	add_uevent_var(env, "NPARTS=%u", cnt);
+	return 0;
+}
+
 struct class block_class = {
 	.name		= "block",
 };
@@ -1136,6 +1152,7 @@
 	.groups		= disk_attr_groups,
 	.release	= disk_release,
 	.devnode	= block_devnode,
+	.uevent		= disk_uevent,
 };
 
 #ifdef CONFIG_PROC_FS
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 0d9e5f9..47284e7 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -217,10 +217,21 @@
 	kfree(p);
 }
 
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct hd_struct *part = dev_to_part(dev);
+
+	add_uevent_var(env, "PARTN=%u", part->partno);
+	if (part->info && part->info->volname[0])
+		add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+	return 0;
+}
+
 struct device_type part_type = {
 	.name		= "partition",
 	.groups		= part_attr_groups,
 	.release	= part_release,
+	.uevent		= part_uevent,
 };
 
 static void delete_partition_rcu_cb(struct rcu_head *head)
diff --git a/crypto/Kconfig b/crypto/Kconfig
index bf8148e..13c877d 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -21,6 +21,14 @@
 
 comment "Crypto core or helper"
 
+config CRYPTODEV
+        tristate "cryptodev module support"
+        help
+          This is a /dev/crypto device driver, equivalent to those in OpenBSD or
+          FreeBSD. The main idea is to access of existing ciphers in kernel space
+          from userspace, thus enabling re-use of a hardware implementation of a
+          cipher.
+
 config CRYPTO_FIPS
 	bool "FIPS 200 compliance"
 	depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS
@@ -179,6 +187,10 @@
 	depends on X86
 	select CRYPTO_CRYPTD
 
+config CRYPTO_ABLK_HELPER
+	tristate
+	select CRYPTO_CRYPTD
+
 config CRYPTO_GLUE_HELPER_X86
 	tristate
 	depends on X86
@@ -529,6 +541,17 @@
 	  This is the powerpc hardware accelerated implementation of the
 	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
 
+config CRYPTO_SHA1_ARM_NEON
+	tristate "SHA1 digest algorithm (ARM NEON)"
+	depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
+	select CRYPTO_SHA1_ARM
+	select CRYPTO_SHA1
+	select CRYPTO_HASH
+	help
+	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
+	  using optimized ARM NEON assembly, when NEON instructions are
+	  available.
+
 config CRYPTO_SHA256
 	tristate "SHA224 and SHA256 digest algorithm"
 	select CRYPTO_HASH
@@ -541,6 +564,13 @@
 	  This code also includes SHA-224, a 224 bit hash with 112 bits
 	  of security against collision attacks.
 
+config CRYPTO_SHA256_ARM
+	tristate "SHA-224/256 digest algorithm (ARM-asm and NEON)"
+	select CRYPTO_HASH
+	help
+	  SHA-256 secure hash standard (DFIPS 180-2) implemented
+	  using optimized ARM assembler and NEON, when available.
+
 config CRYPTO_SHA256_SPARC64
 	tristate "SHA224 and SHA256 digest algorithm (SPARC64)"
 	depends on SPARC64
@@ -562,6 +592,21 @@
 	  This code also includes SHA-384, a 384 bit hash with 192 bits
 	  of security against collision attacks.
 
+config CRYPTO_SHA512_ARM_NEON
+	tristate "SHA384 and SHA512 digest algorithm (ARM NEON)"
+	depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
+	select CRYPTO_SHA512
+	select CRYPTO_HASH
+	help
+	  SHA-512 secure hash standard (DFIPS 180-2) implemented
+	  using ARM NEON instructions, when available.
+
+	  This version of SHA implements a 512 bit hash with 256 bits of
+	  security against collision attacks.
+
+	  This code also includes SHA-384, a 384 bit hash with 192 bits
+	  of security against collision attacks.
+
 config CRYPTO_SHA512_SPARC64
 	tristate "SHA384 and SHA512 digest algorithm (SPARC64)"
 	depends on SPARC64
@@ -757,6 +802,22 @@
 
 	  See <http://csrc.nist.gov/encryption/aes/> for more information.
 
+config CRYPTO_AES_ARM_BS
+	tristate "Bit sliced AES using NEON instructions"
+	depends on ARM && KERNEL_MODE_NEON
+	select CRYPTO_ALGAPI
+	select CRYPTO_AES_ARM
+	select CRYPTO_ABLK_HELPER
+	help
+	  Use a faster and more secure NEON based implementation of AES in CBC,
+	  CTR and XTS modes
+
+	  Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
+	  and for XTS mode encryption, CBC and XTS mode decryption speedup is
+	  around 25%. (CBC encryption speed is not affected by this driver.)
+	  This implementation does not rely on any lookup tables so it is
+	  believed to be invulnerable to cache timing attacks.
+
 config CRYPTO_ANUBIS
 	tristate "Anubis cipher algorithm"
 	select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index a8e9b0f..22033d7 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -101,3 +101,5 @@
 obj-$(CONFIG_XOR_BLOCKS) += xor.o
 obj-$(CONFIG_ASYNC_CORE) += async_tx/
 obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
+obj-$(CONFIG_CRYPTODEV) += cryptodev/
+obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
new file mode 100644
index 0000000..ffe7278
--- /dev/null
+++ b/crypto/ablk_helper.c
@@ -0,0 +1,150 @@
+/*
+ * Shared async block cipher helpers
+ *
+ * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * Based on aesni-intel_glue.c by:
+ *  Copyright (C) 2008, Intel Corp.
+ *    Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+ * USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/hardirq.h>
+#include <crypto/algapi.h>
+#include <crypto/cryptd.h>
+#include <crypto/ablk_helper.h>
+#include <asm/simd.h>
+
+int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
+		 unsigned int key_len)
+{
+	struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
+	int err;
+
+	crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
+				    & CRYPTO_TFM_REQ_MASK);
+	err = crypto_ablkcipher_setkey(child, key, key_len);
+	crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
+				    & CRYPTO_TFM_RES_MASK);
+	return err;
+}
+EXPORT_SYMBOL_GPL(ablk_set_key);
+
+int __ablk_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct blkcipher_desc desc;
+
+	desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
+	desc.info = req->info;
+	desc.flags = 0;
+
+	return crypto_blkcipher_crt(desc.tfm)->encrypt(
+		&desc, req->dst, req->src, req->nbytes);
+}
+EXPORT_SYMBOL_GPL(__ablk_encrypt);
+
+int ablk_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	if (!may_use_simd()) {
+		struct ablkcipher_request *cryptd_req =
+			ablkcipher_request_ctx(req);
+
+		*cryptd_req = *req;
+		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+
+		return crypto_ablkcipher_encrypt(cryptd_req);
+	} else {
+		return __ablk_encrypt(req);
+	}
+}
+EXPORT_SYMBOL_GPL(ablk_encrypt);
+
+int ablk_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	if (!may_use_simd()) {
+		struct ablkcipher_request *cryptd_req =
+			ablkcipher_request_ctx(req);
+
+		*cryptd_req = *req;
+		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+
+		return crypto_ablkcipher_decrypt(cryptd_req);
+	} else {
+		struct blkcipher_desc desc;
+
+		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
+		desc.info = req->info;
+		desc.flags = 0;
+
+		return crypto_blkcipher_crt(desc.tfm)->decrypt(
+			&desc, req->dst, req->src, req->nbytes);
+	}
+}
+EXPORT_SYMBOL_GPL(ablk_decrypt);
+
+void ablk_exit(struct crypto_tfm *tfm)
+{
+	struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	cryptd_free_ablkcipher(ctx->cryptd_tfm);
+}
+EXPORT_SYMBOL_GPL(ablk_exit);
+
+int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
+{
+	struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct cryptd_ablkcipher *cryptd_tfm;
+
+	cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+
+	ctx->cryptd_tfm = cryptd_tfm;
+	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
+		crypto_ablkcipher_reqsize(&cryptd_tfm->base);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ablk_init_common);
+
+int ablk_init(struct crypto_tfm *tfm)
+{
+	char drv_name[CRYPTO_MAX_ALG_NAME];
+
+	snprintf(drv_name, sizeof(drv_name), "__driver-%s",
+					crypto_tfm_alg_driver_name(tfm));
+
+	return ablk_init_common(tfm, drv_name);
+}
+EXPORT_SYMBOL_GPL(ablk_init);
+
+MODULE_LICENSE("GPL");
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 39b09f25..8a66221 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -70,14 +70,12 @@
 	return max(start, end_page);
 }
 
-static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
-					       struct blkcipher_walk *walk,
+static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
 					       unsigned int bsize)
 {
 	u8 *addr;
-	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
 
-	addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
+	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 	addr = blkcipher_get_spot(addr, bsize);
 	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
 	return bsize;
@@ -105,7 +103,6 @@
 int blkcipher_walk_done(struct blkcipher_desc *desc,
 			struct blkcipher_walk *walk, int err)
 {
-	struct crypto_blkcipher *tfm = desc->tfm;
 	unsigned int nbytes = 0;
 
 	if (likely(err >= 0)) {
@@ -117,7 +114,7 @@
 			err = -EINVAL;
 			goto err;
 		} else
-			n = blkcipher_done_slow(tfm, walk, n);
+			n = blkcipher_done_slow(walk, n);
 
 		nbytes = walk->total - n;
 		err = 0;
@@ -136,7 +133,7 @@
 	}
 
 	if (walk->iv != desc->info)
-		memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
+		memcpy(desc->info, walk->iv, walk->ivsize);
 	if (walk->buffer != walk->page)
 		kfree(walk->buffer);
 	if (walk->page)
@@ -226,24 +223,22 @@
 static int blkcipher_walk_next(struct blkcipher_desc *desc,
 			       struct blkcipher_walk *walk)
 {
-	struct crypto_blkcipher *tfm = desc->tfm;
-	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
 	unsigned int bsize;
 	unsigned int n;
 	int err;
 
 	n = walk->total;
-	if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
+	if (unlikely(n < walk->cipher_blocksize)) {
 		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 		return blkcipher_walk_done(desc, walk, -EINVAL);
 	}
 
-	bsize = min(walk->blocksize, n);
+	bsize = min(walk->walk_blocksize, n);
 
 	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
 			 BLKCIPHER_WALK_DIFF);
-	if (!scatterwalk_aligned(&walk->in, alignmask) ||
-	    !scatterwalk_aligned(&walk->out, alignmask)) {
+	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
+	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
 		walk->flags |= BLKCIPHER_WALK_COPY;
 		if (!walk->page) {
 			walk->page = (void *)__get_free_page(GFP_ATOMIC);
@@ -256,7 +251,7 @@
 	n = scatterwalk_clamp(&walk->out, n);
 
 	if (unlikely(n < bsize)) {
-		err = blkcipher_next_slow(desc, walk, bsize, alignmask);
+		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
 		goto set_phys_lowmem;
 	}
 
@@ -278,28 +273,26 @@
 	return err;
 }
 
-static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
-				    struct crypto_blkcipher *tfm,
-				    unsigned int alignmask)
+static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
 {
-	unsigned bs = walk->blocksize;
-	unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
-	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
-	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
-			    (alignmask + 1);
+	unsigned bs = walk->walk_blocksize;
+	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
+	unsigned int size = aligned_bs * 2 +
+			    walk->ivsize + max(aligned_bs, walk->ivsize) -
+			    (walk->alignmask + 1);
 	u8 *iv;
 
-	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
 	walk->buffer = kmalloc(size, GFP_ATOMIC);
 	if (!walk->buffer)
 		return -ENOMEM;
 
-	iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
+	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
 	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
-	iv = blkcipher_get_spot(iv, ivsize);
+	iv = blkcipher_get_spot(iv, walk->ivsize);
 
-	walk->iv = memcpy(iv, walk->iv, ivsize);
+	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
 	return 0;
 }
 
@@ -307,7 +300,10 @@
 			struct blkcipher_walk *walk)
 {
 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
-	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
+	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
+	walk->cipher_blocksize = walk->walk_blocksize;
+	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
+	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
 	return blkcipher_walk_first(desc, walk);
 }
 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
@@ -316,7 +312,10 @@
 			struct blkcipher_walk *walk)
 {
 	walk->flags |= BLKCIPHER_WALK_PHYS;
-	walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
+	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
+	walk->cipher_blocksize = walk->walk_blocksize;
+	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
+	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
 	return blkcipher_walk_first(desc, walk);
 }
 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
@@ -324,9 +323,6 @@
 static int blkcipher_walk_first(struct blkcipher_desc *desc,
 				struct blkcipher_walk *walk)
 {
-	struct crypto_blkcipher *tfm = desc->tfm;
-	unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
-
 	if (WARN_ON_ONCE(in_irq()))
 		return -EDEADLK;
 
@@ -336,8 +332,8 @@
 
 	walk->buffer = NULL;
 	walk->iv = desc->info;
-	if (unlikely(((unsigned long)walk->iv & alignmask))) {
-		int err = blkcipher_copy_iv(walk, tfm, alignmask);
+	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+		int err = blkcipher_copy_iv(walk);
 		if (err)
 			return err;
 	}
@@ -354,11 +350,28 @@
 			      unsigned int blocksize)
 {
 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
-	walk->blocksize = blocksize;
+	walk->walk_blocksize = blocksize;
+	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
+	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
+	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
 	return blkcipher_walk_first(desc, walk);
 }
 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
 
+int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
+				   struct blkcipher_walk *walk,
+				   struct crypto_aead *tfm,
+				   unsigned int blocksize)
+{
+	walk->flags &= ~BLKCIPHER_WALK_PHYS;
+	walk->walk_blocksize = blocksize;
+	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
+	walk->ivsize = crypto_aead_ivsize(tfm);
+	walk->alignmask = crypto_aead_alignmask(tfm);
+	return blkcipher_walk_first(desc, walk);
+}
+EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
+
 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
 			    unsigned int keylen)
 {
diff --git a/crypto/cryptodev/AUTHORS b/crypto/cryptodev/AUTHORS
new file mode 100644
index 0000000..6f9408e
--- /dev/null
+++ b/crypto/cryptodev/AUTHORS
@@ -0,0 +1,19 @@
+Michal Ludvig: 
+  Initial implementation for linux 2.6.8
+
+Nikos Mavrogiannopoulos: 
+  Port to 2.6.27 and later, better compatibility
+  with OpenBSD (and FreeBSD) cryptodev and maintanance.
+
+Michael Weiser: 
+  Porting to blkcipher async API. Several hardware drivers
+  only implemented this API.
+
+Phil Sutter: 
+  Implemented a zero copy version of the internal engine.
+
+Dmitry Kasatkin:
+  Multi-update support for hash calculation.
+
+
+Maintained by Nikos Mavrogiannopoulos (nmav [at] gnutls [dot] org)
diff --git a/crypto/cryptodev/COPYING b/crypto/cryptodev/COPYING
new file mode 100644
index 0000000..d159169
--- /dev/null
+++ b/crypto/cryptodev/COPYING
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/crypto/cryptodev/Makefile b/crypto/cryptodev/Makefile
new file mode 100644
index 0000000..d66ef26
--- /dev/null
+++ b/crypto/cryptodev/Makefile
@@ -0,0 +1,53 @@
+#
+# Since version 1.6 the asynchronous mode has been
+# disabled by default. To re-enable it uncomment the
+# corresponding CFLAG.
+#
+CRYPTODEV_CFLAGS ?= #-DENABLE_ASYNC
+KBUILD_CFLAGS += -I$(src) $(CRYPTODEV_CFLAGS)
+KERNEL_DIR = /lib/modules/$(shell uname -r)/build
+VERSION = 1.6
+PREFIX =
+
+cryptodev-objs = ioctl.o main.o cryptlib.o authenc.o zc.o util.o
+
+obj-m += cryptodev.o
+
+build: version.h
+	make -C $(KERNEL_DIR) SUBDIRS=`pwd` modules
+
+version.h: Makefile
+	@echo "#define VERSION \"$(VERSION)\"" > version.h
+
+install: modules_install
+
+modules_install:
+	make -C $(KERNEL_DIR) SUBDIRS=`pwd` modules_install
+	@echo "Installing cryptodev.h in $(PREFIX)/usr/include/crypto ..."
+	@install -D crypto/cryptodev.h $(PREFIX)/usr/include/crypto/cryptodev.h
+
+clean:
+	make -C $(KERNEL_DIR) SUBDIRS=`pwd` clean
+	rm -f $(hostprogs) *~
+	CFLAGS=$(CRYPTODEV_CFLAGS) KERNEL_DIR=$(KERNEL_DIR) make -C tests clean
+
+check:
+	CFLAGS=$(CRYPTODEV_CFLAGS) KERNEL_DIR=$(KERNEL_DIR) make -C tests check
+
+FILEBASE = cryptodev-linux-$(VERSION)
+TMPDIR ?= /tmp
+OUTPUT = $(FILEBASE).tar.gz
+
+dist: clean
+	@echo Packing
+	@rm -f *.tar.gz
+	@mkdir $(TMPDIR)/$(FILEBASE)
+	@cp -ar crypto extras tests examples Makefile *.c *.h README NEWS \
+		AUTHORS COPYING $(TMPDIR)/$(FILEBASE)
+	@rm -rf $(TMPDIR)/$(FILEBASE)/.git* $(TMPDIR)/$(FILEBASE)/releases $(TMPDIR)/$(FILEBASE)/scripts
+	@tar -C /tmp -czf ./$(OUTPUT) $(FILEBASE)
+	@rm -rf $(TMPDIR)/$(FILEBASE)
+	@echo Signing $(OUTPUT)
+	@gpg --output $(OUTPUT).sig -sb $(OUTPUT)
+	@gpg --verify $(OUTPUT).sig $(OUTPUT)
+	@mv $(OUTPUT) $(OUTPUT).sig releases/
diff --git a/crypto/cryptodev/NEWS b/crypto/cryptodev/NEWS
new file mode 100644
index 0000000..bd1b666
--- /dev/null
+++ b/crypto/cryptodev/NEWS
@@ -0,0 +1,153 @@
+Version 1.6 (released 2013-03-20)
+
+* Added modules_install target in Makefile
+
+* Added SHA224. Patch by Yashpal Dutta.
+
+* Asynchronous operations will not be scheduled if zero
+copy is disabled.
+
+* Asynchronous operations are disabled by default, unless
+-DENABLE_ASYNC is enabled on Makefile.
+
+
+Version 1.5 (released 2012-08-04)
+
+* Fixes in AEAD support. Patches by Jaren Johnston.
+
+* Simplifications in memory locking. Patch by Phil Sutter.
+
+* Allow empty plaintext and authenticated data in AEAD 
+ciphers. Patch by Jaren Johnston.
+
+
+Version 1.4 (released 2012-03-15)
+
+* Correctly report hw accelerated ciphers.
+
+
+Version 1.3 (released 2012-02-29)
+
+* Return EBADMSG instead of ECANCELED on tag verification
+failure in authenc modes.
+
+* COP_FLAG_RESET can be combined with COP_FLAG_UPDATE for
+efficiency.
+
+* Added more test cases.
+
+* Automatically set public permissions for the device
+
+
+Version 1.2 (released 2012-02-24)
+
+* In kernels that do not distinguish between hw 
+accelerated ciphers or not set the SIOP_FLAG_KERNEL_DRIVER_ONLY
+flag based on driver name.
+
+* camelia was renamed to camellia.
+
+* Added COP_FLAG_RESET to allow resetting the state
+in multi-update.
+
+* Corrected issue in ARM processors with mv_cesa.
+
+
+Version 1.1 (released 2012-02-20)
+
+* Fixed alignment issue in speed.c
+
+* Defined HASH_MAX_LEN in cryptodev.h
+
+* CIOCGSESSINFO ioctl() sets the SIOP_FLAG_KERNEL_DRIVER_ONLY 
+flag if the driver is only available through kernel
+driver (and is not just software cipher).
+
+* Added new encryption ioctl, CIOCAUTHCRYPT, which
+combines authentication and encryption. Operates
+in AEAD, TLS and SRTP modes (the API might change
+in later versions).
+
+
+Version 1.0 (released 2011-04-12)
+
+* Several fixes in the included examples. Based on
+patches by Vladimir Zapolskiy.
+
+
+Version 0.9 (released 2011-02-11)
+
+* Added additional test tools:
+  - sha_speed does performance testing of SHA1 and SHA256
+  - hashcrypt_speed additionally encrypts with AES128 and AES256
+
+* Allow updating the IV in userspace via the COP_FLAG_WRITE_IV
+flag.
+
+* Export the alignmask in an OCF compatible way.
+
+* Fix for kernel crash on passing incorrect session ID.
+
+* Added CIOCGSESSINFO to export additional information
+for each session.
+
+
+Version 0.8 (released 2010-11-06)
+
+* Made cryptodev aware of alignment constraints.
+
+* Added support for CRYPTO_AES_ECB.
+
+* Added asynchronous operation support using
+  CIOCASYNCCRYPT, CIOCASYNCFETCH ioctls and poll().
+
+
+Version 0.7 (released 2010-10-08)
+
+* Added COP_FLAG_FINAL to make multi-update
+more efficient.
+
+* Added CRIOGET_NOT_NEEDED definition to allow
+users of the API to distinguish from the bare
+OpenBSD API that requires the CRIOGET.
+
+
+Version 0.6 (released 2010-09-16)
+
+* multi-update support for hash calculation using
+the new flag COP_FLAG_UPDATE.
+
+* Relicensed under GPLv2.
+
+* Added AES-CTR.
+
+* Corrected fallback to non-zero copy when referenced
+pages were not writable.
+
+
+Version 0.5 (released 2010-07-06)
+
+* Corrected issue with zero copy on multiple pages.
+
+* Fallback to normal operation if user pages cannot be
+mapped.
+
+
+Version 0.4 (released 2010-07-03)
+
+* Internal engine supports operations with zero copy from
+user space. 
+
+
+Version 0.3 (released 2010-06-19)
+
+* Corrected bug when initializing unsupported algorithms.
+
+
+Version 0.2 (released 2010-06-18)
+
+* Added compat_ioctl() to allow working on systems where userspace is 32bits
+and kernel is operating in 64bit mode (Phil Sutter)
+
+* Added several sanity checks to input.
+
diff --git a/crypto/cryptodev/README b/crypto/cryptodev/README
new file mode 100644
index 0000000..1176e70
--- /dev/null
+++ b/crypto/cryptodev/README
@@ -0,0 +1,21 @@
+This is a /dev/crypto device driver, equivalent to those in OpenBSD or
+FreeBSD. The main idea is to access of existing ciphers in kernel space 
+from userspace, thus enabling re-use of a hardware implementation of a
+cipher.
+
+For questions and suggestions please use the mailing lists at:
+http://cryptodev-linux.org/lists.html
+
+
+=== How to combine with cryptographic libraries ===
+
+* GnuTLS: 
+
+GnuTLS needs to be compiled with --enable-cryptodev in order to take
+advantage of /dev/crypto. GnuTLS 3.0.14 or later is recommended.
+
+* OpenSSL:
+
+The current releases of openssl support /dev/crypto by replacing
+eng_cryptodev.c with the version available in the extras subdirectory. 
+In order to compile use the -DHAVE_CRYPTODEV -DUSE_CRYPTODEV_DIGESTS flags.
diff --git a/crypto/cryptodev/authenc.c b/crypto/cryptodev/authenc.c
new file mode 100644
index 0000000..8bff677
--- /dev/null
+++ b/crypto/cryptodev/authenc.c
@@ -0,0 +1,742 @@
+/*
+ * Driver for /dev/crypto device (aka CryptoDev)
+ *
+ * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
+ *
+ * Author: Nikos Mavrogiannopoulos
+ *
+ * This file is part of linux cryptodev.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/*
+ * This file handles the AEAD part of /dev/crypto.
+ *
+ */
+
+#include <crypto/hash.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/ioctl.h>
+#include <linux/random.h>
+#include <linux/syscalls.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <crypto/cryptodev.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include "cryptodev_int.h"
+#include "zc.h"
+#include "util.h"
+#include "cryptlib.h"
+#include "version.h"
+
+
+/* make caop->dst available in scatterlist.
+ * (caop->src is assumed to be equal to caop->dst)
+ */
+static int get_userbuf_tls(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
+			struct scatterlist **dst_sg)
+{
+	int pagecount = 0;
+	struct crypt_auth_op *caop = &kcaop->caop;
+	int rc;
+
+	if (caop->dst == NULL)
+		return -EINVAL;
+
+	if (ses->alignmask) {
+		if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
+			dprintk(2, KERN_WARNING, "careful - source address %lx is not %d byte aligned\n",
+				(unsigned long)caop->dst, ses->alignmask + 1);
+	}
+
+	if (kcaop->dst_len == 0) {
+		dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
+		return -EINVAL;
+	}
+
+	pagecount = PAGECOUNT(caop->dst, kcaop->dst_len);
+
+	ses->used_pages = pagecount;
+	ses->readonly_pages = 0;
+
+	rc = adjust_sg_array(ses, pagecount);
+	if (rc)
+		return rc;
+
+	rc = __get_userbuf(caop->dst, kcaop->dst_len, 1, pagecount,
+	                   ses->pages, ses->sg, kcaop->task, kcaop->mm);
+	if (unlikely(rc)) {
+		dprintk(1, KERN_ERR,
+			"failed to get user pages for data input\n");
+		return -EINVAL;
+	}
+
+	(*dst_sg) = ses->sg;
+
+	return 0;
+}
+
+
+#define MAX_SRTP_AUTH_DATA_DIFF 256
+
+/* Makes caop->auth_src available as scatterlist.
+ * It also provides a pointer to caop->dst, which however,
+ * is assumed to be within the caop->auth_src buffer. If not
+ * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
+ * returns error.
+ */
+static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
+			struct scatterlist **auth_sg, struct scatterlist **dst_sg)
+{
+	int pagecount, diff;
+	int auth_pagecount = 0;
+	struct crypt_auth_op *caop = &kcaop->caop;
+	int rc;
+
+	if (caop->dst == NULL && caop->auth_src == NULL) {
+		dprintk(1, KERN_ERR, "dst and auth_src cannot be both null\n");
+		return -EINVAL;
+	}
+
+	if (ses->alignmask) {
+		if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
+			dprintk(2, KERN_WARNING, "careful - source address %lx is not %d byte aligned\n",
+				(unsigned long)caop->dst, ses->alignmask + 1);
+		if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
+			dprintk(2, KERN_WARNING, "careful - source address %lx is not %d byte aligned\n",
+				(unsigned long)caop->auth_src, ses->alignmask + 1);
+	}
+
+	if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
+		dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
+		return -EINVAL;
+	}
+
+	/* Note that in SRTP auth data overlap with data to be encrypted (dst)
+         */
+
+	auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
+	diff = (int)(caop->src - caop->auth_src);
+	if (diff > MAX_SRTP_AUTH_DATA_DIFF || diff < 0) {
+		dprintk(1, KERN_WARNING, "auth_src must overlap with src (diff: %d).\n", diff);
+		return -EINVAL;
+	}
+
+	pagecount = auth_pagecount;
+
+	rc = adjust_sg_array(ses, pagecount*2); /* double pages to have pages for dst(=auth_src) */
+	if (rc) {
+		dprintk(1, KERN_ERR, "cannot adjust sg array\n");
+		return rc;
+	}
+
+	rc = __get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
+			   ses->pages, ses->sg, kcaop->task, kcaop->mm);
+	if (unlikely(rc)) {
+		dprintk(1, KERN_ERR,
+			"failed to get user pages for data input\n");
+		return -EINVAL;
+	}
+
+	ses->used_pages = pagecount;
+	ses->readonly_pages = 0;
+
+	(*auth_sg) = ses->sg;
+
+	(*dst_sg) = ses->sg + auth_pagecount;
+	sg_init_table(*dst_sg, auth_pagecount);
+	sg_copy(ses->sg, (*dst_sg), caop->auth_len);
+	(*dst_sg) = sg_advance(*dst_sg, diff);
+	if (*dst_sg == NULL) {
+		release_user_pages(ses);
+		dprintk(1, KERN_ERR,
+			"failed to get enough pages for auth data\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fill_kcaop_from_caop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
+{
+	struct crypt_auth_op *caop = &kcaop->caop;
+	struct csession *ses_ptr;
+	int ret;
+
+	/* this also enters ses_ptr->sem */
+	ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
+	if (unlikely(!ses_ptr)) {
+		dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
+		return -EINVAL;
+	}
+
+	if (caop->flags & COP_FLAG_AEAD_TLS_TYPE || caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
+		if (caop->src != caop->dst) {
+			dprintk(1, KERN_ERR,
+				"Non-inplace encryption and decryption is not efficient and not implemented\n");
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+	}
+
+	if (caop->tag_len == 0)
+		caop->tag_len = ses_ptr->hdata.digestsize;
+
+	kcaop->ivlen = caop->iv ? ses_ptr->cdata.ivsize : 0;
+
+	if (caop->flags & COP_FLAG_AEAD_TLS_TYPE)
+		kcaop->dst_len = caop->len + ses_ptr->cdata.blocksize /* pad */ + caop->tag_len;
+	else
+		kcaop->dst_len = caop->len;
+
+	kcaop->task = current;
+	kcaop->mm = current->mm;
+
+	if (caop->iv) {
+		ret = copy_from_user(kcaop->iv, caop->iv, kcaop->ivlen);
+		if (unlikely(ret)) {
+			dprintk(1, KERN_ERR,
+				"error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
+				kcaop->ivlen, ret, (unsigned long)caop->iv);
+			ret = -EFAULT;
+			goto out_unlock;
+		}
+	}
+
+	ret = 0;
+
+out_unlock:
+	crypto_put_session(ses_ptr);
+	return ret;
+
+}
+
+static int fill_caop_from_kcaop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
+{
+	int ret;
+
+	kcaop->caop.len = kcaop->dst_len;
+
+	if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
+		ret = copy_to_user(kcaop->caop.iv,
+				kcaop->iv, kcaop->ivlen);
+		if (unlikely(ret)) {
+			dprintk(1, KERN_ERR, "Error in copying to userspace\n");
+			return -EFAULT;
+		}
+	}
+	return 0;
+}
+
+
+int kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
+			struct fcrypt *fcr, void __user *arg)
+{
+	if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
+		dprintk(1, KERN_ERR, "Error in copying from userspace\n");
+		return -EFAULT;
+	}
+
+	return fill_kcaop_from_caop(kcaop, fcr);
+}
+
+int kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
+		struct fcrypt *fcr, void __user *arg)
+{
+	int ret;
+
+	ret = fill_caop_from_kcaop(kcaop, fcr);
+	if (unlikely(ret)) {
+		dprintk(1, KERN_ERR, "fill_caop_from_kcaop\n");
+		return ret;
+	}
+
+	if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
+		dprintk(1, KERN_ERR, "Error in copying to userspace\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void copy_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
+{
+	scatterwalk_map_and_copy(hash, dst_sg, len, hash_len, 1);
+}
+
+static void read_tls_hash( struct scatterlist *dst_sg, int len, void* hash, int hash_len)
+{
+	scatterwalk_map_and_copy(hash, dst_sg, len-hash_len, hash_len, 0);
+}
+
+static int pad_record( struct scatterlist *dst_sg, int len, int block_size)
+{
+	uint8_t pad[block_size];
+	int pad_size = block_size - (len % block_size);
+
+	memset(pad, pad_size-1, pad_size);
+
+	scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);
+
+	return pad_size;
+}
+
+static int verify_tls_record_pad( struct scatterlist *dst_sg, int len, int block_size)
+{
+	uint8_t pad[256]; /* the maximum allowed */
+	uint8_t pad_size;
+	int i;
+
+	scatterwalk_map_and_copy(&pad_size, dst_sg, len-1, 1, 0);
+
+	if (pad_size+1 > len) {
+		dprintk(1, KERN_ERR, "Pad size: %d\n", pad_size);
+		return -EBADMSG;
+	}
+
+	scatterwalk_map_and_copy(pad, dst_sg, len-pad_size-1, pad_size+1, 0);
+
+	for (i=0;i<pad_size;i++)
+		if (pad[i] != pad_size) {
+			dprintk(1, KERN_ERR, "Pad size: %d, pad: %d\n", pad_size, (int)pad[i]);
+			return -EBADMSG;
+		}
+
+	return pad_size+1;
+}
+
+/* Authenticate and encrypt the TLS way (also perform padding).
+ * During decryption it verifies the pad and tag and returns -EBADMSG on error.
+ */
+static int
+tls_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
+		 struct scatterlist *auth_sg, uint32_t auth_len,
+		 struct scatterlist *dst_sg, uint32_t len)
+{
+	int ret, fail = 0;
+	struct crypt_auth_op *caop = &kcaop->caop;
+	uint8_t vhash[AALG_MAX_RESULT_LEN];
+	uint8_t hash_output[AALG_MAX_RESULT_LEN];
+
+	/* TLS authenticates the plaintext except for the padding.
+	 */
+	if (caop->op == COP_ENCRYPT) {
+		if (ses_ptr->hdata.init != 0) {
+			if (auth_len > 0) {
+				ret = cryptodev_hash_update(&ses_ptr->hdata,
+								auth_sg, auth_len);
+				if (unlikely(ret)) {
+					dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
+					return ret;
+				}
+			}
+
+			if (len > 0) {
+				ret = cryptodev_hash_update(&ses_ptr->hdata,
+								dst_sg, len);
+				if (unlikely(ret)) {
+					dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
+					return ret;
+				}
+			}
+
+			ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
+			if (unlikely(ret)) {
+				dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
+				return ret;
+			}
+
+			copy_tls_hash( dst_sg, len, hash_output, caop->tag_len);
+			len += caop->tag_len;
+		}
+
+		if (ses_ptr->cdata.init != 0) {
+			if (ses_ptr->cdata.blocksize > 1) {
+				ret = pad_record(dst_sg, len, ses_ptr->cdata.blocksize);
+				len += ret;
+			}
+
+			ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
+							dst_sg, dst_sg, len);
+			if (unlikely(ret)) {
+				dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
+				return ret;
+			}
+		}
+	} else {
+		if (ses_ptr->cdata.init != 0) {
+			ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
+							dst_sg, dst_sg, len);
+
+			if (unlikely(ret)) {
+				dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
+				return ret;
+			}
+
+			if (ses_ptr->cdata.blocksize > 1) {
+				ret = verify_tls_record_pad(dst_sg, len, ses_ptr->cdata.blocksize);
+				if (unlikely(ret < 0)) {
+					dprintk(2, KERN_ERR, "verify_record_pad: %d\n", ret);
+					fail = 1;
+				} else {
+					len -= ret;
+				}
+			}
+		}
+
+		if (ses_ptr->hdata.init != 0) {
+			if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
+				dprintk(1, KERN_ERR, "Illegal tag len size\n");
+				return -EINVAL;
+			}
+
+			read_tls_hash( dst_sg, len, vhash, caop->tag_len);
+			len -= caop->tag_len;
+
+			if (auth_len > 0) {
+				ret = cryptodev_hash_update(&ses_ptr->hdata,
+								auth_sg, auth_len);
+				if (unlikely(ret)) {
+					dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
+					return ret;
+				}
+			}
+
+			if (len > 0) {
+				ret = cryptodev_hash_update(&ses_ptr->hdata,
+									dst_sg, len);
+				if (unlikely(ret)) {
+					dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
+					return ret;
+				}
+			}
+
+			ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
+			if (unlikely(ret)) {
+				dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
+				return ret;
+			}
+
+			if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
+				dprintk(2, KERN_ERR, "MAC verification failed (tag_len: %d)\n", caop->tag_len);
+				return -EBADMSG;
+			}
+		}
+	}
+	kcaop->dst_len = len;
+	return 0;
+}
+
+/* Authenticate and encrypt the SRTP way. During decryption
+ * it verifies the tag and returns -EBADMSG on error.
+ */
+static int
+srtp_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
+		  struct scatterlist *auth_sg, uint32_t auth_len,
+		  struct scatterlist *dst_sg, uint32_t len)
+{
+	int ret, fail = 0;
+	struct crypt_auth_op *caop = &kcaop->caop;
+	uint8_t vhash[AALG_MAX_RESULT_LEN];
+	uint8_t hash_output[AALG_MAX_RESULT_LEN];
+
+	/* SRTP authenticates the encrypted data.
+	 */
+	if (caop->op == COP_ENCRYPT) {
+		if (ses_ptr->cdata.init != 0) {
+			ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
+							dst_sg, dst_sg, len);
+			if (unlikely(ret)) {
+				dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
+				return ret;
+			}
+		}
+
+		if (ses_ptr->hdata.init != 0) {
+			if (auth_len > 0) {
+				ret = cryptodev_hash_update(&ses_ptr->hdata,
+								auth_sg, auth_len);
+				if (unlikely(ret)) {
+					dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
+					return ret;
+				}
+			}
+
+			ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
+			if (unlikely(ret)) {
+				dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
+				return ret;
+			}
+
+			if (unlikely(copy_to_user(caop->tag, hash_output, caop->tag_len))) {
+				return -EFAULT;
+			}
+		}
+
+	} else {
+		if (ses_ptr->hdata.init != 0) {
+			if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
+				dprintk(1, KERN_ERR, "Illegal tag len size\n");
+				return -EINVAL;
+			}
+
+			if (unlikely(copy_from_user(vhash, caop->tag, caop->tag_len))) {
+				return -EFAULT;
+			}
+
+			ret = cryptodev_hash_update(&ses_ptr->hdata,
+							auth_sg, auth_len);
+			if (unlikely(ret)) {
+				dprintk(0, KERN_ERR, "cryptodev_hash_update: %d\n", ret);
+				return ret;
+			}
+
+			ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
+			if (unlikely(ret)) {
+				dprintk(0, KERN_ERR, "cryptodev_hash_final: %d\n", ret);
+				return ret;
+			}
+
+			if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
+				dprintk(2, KERN_ERR, "MAC verification failed\n");
+				return -EBADMSG;
+			}
+		}
+
+		if (ses_ptr->cdata.init != 0) {
+			ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
+							dst_sg, dst_sg, len);
+
+			if (unlikely(ret)) {
+				dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
+				return ret;
+			}
+		}
+
+	}
+	kcaop->dst_len = len;
+	return 0;
+}
+
+/* Typical AEAD (i.e. GCM) encryption/decryption.
+ * During decryption the tag is verified.
+ */
+static int
+auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
+		  struct scatterlist *auth_sg, uint32_t auth_len,
+		  struct scatterlist *src_sg,
+		  struct scatterlist *dst_sg, uint32_t len)
+{
+	int ret;
+	struct crypt_auth_op *caop = &kcaop->caop;
+	int max_tag_len;
+
+	max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
+	if (unlikely(caop->tag_len > max_tag_len)) {
+		dprintk(0, KERN_ERR, "Illegal tag length: %d\n", caop->tag_len);
+		return -EINVAL;
+	}
+
+	if (caop->tag_len)
+		cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
+	else
+		caop->tag_len = max_tag_len;
+
+	cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
+
+	if (caop->op == COP_ENCRYPT) {
+		ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
+						src_sg, dst_sg, len);
+		if (unlikely(ret)) {
+			dprintk(0, KERN_ERR, "cryptodev_cipher_encrypt: %d\n", ret);
+			return ret;
+		}
+		kcaop->dst_len = len + caop->tag_len;
+		caop->tag = caop->dst + len;
+	} else {
+		ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
+						src_sg, dst_sg, len);
+
+		if (unlikely(ret)) {
+			dprintk(0, KERN_ERR, "cryptodev_cipher_decrypt: %d\n", ret);
+			return ret;
+		}
+		kcaop->dst_len = len - caop->tag_len;
+		caop->tag = caop->dst + len - caop->tag_len;
+	}
+
+	return 0;
+}
+
+/* This is the main crypto function - zero-copy edition */
+static int
+__crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
+{
+	struct scatterlist *dst_sg, *auth_sg, *src_sg;
+	struct crypt_auth_op *caop = &kcaop->caop;
+	int ret = 0;
+
+	if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
+		if (unlikely(ses_ptr->cdata.init != 0 &&
+			(ses_ptr->cdata.stream == 0 || ses_ptr->cdata.aead != 0)))
+		{
+			dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode (but not AEAD)\n");
+			return -EINVAL;
+		}
+
+		ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg);
+		if (unlikely(ret)) {
+			dprintk(1, KERN_ERR, "get_userbuf_srtp(): Error getting user pages.\n");
+			return ret;
+		}
+
+		ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
+			   dst_sg, caop->len);
+
+		release_user_pages(ses_ptr);
+	} else { /* TLS and normal cases. Here auth data are usually small
+	          * so we just copy them to a free page, instead of trying
+	          * to map them.
+	          */
+		unsigned char* auth_buf = NULL;
+		struct scatterlist tmp;
+
+		if (unlikely(caop->auth_len > PAGE_SIZE)) {
+			dprintk(1, KERN_ERR, "auth data len is excessive.\n");
+			return -EINVAL;
+		}
+
+		auth_buf = (char *)__get_free_page(GFP_KERNEL);
+		if (unlikely(!auth_buf)) {
+			dprintk(1, KERN_ERR, "unable to get a free page.\n");
+			return -ENOMEM;
+		}
+
+		if (caop->auth_src && caop->auth_len > 0) {
+			if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
+				dprintk(1, KERN_ERR, "unable to copy auth data from userspace.\n");
+				ret = -EFAULT;
+				goto free_auth_buf;
+			}
+
+			sg_init_one(&tmp, auth_buf, caop->auth_len);
+			auth_sg = &tmp;
+		} else {
+			auth_sg = NULL;
+		}
+
+		if (caop->flags & COP_FLAG_AEAD_TLS_TYPE && ses_ptr->cdata.aead == 0) {
+			ret = get_userbuf_tls(ses_ptr, kcaop, &dst_sg);
+			if (unlikely(ret)) {
+				dprintk(1, KERN_ERR, "get_userbuf_tls(): Error getting user pages.\n");
+				goto free_auth_buf;
+			}
+
+			ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
+				   dst_sg, caop->len);
+		} else {
+			int dst_len;
+
+			if (unlikely(ses_ptr->cdata.init == 0 ||
+					ses_ptr->cdata.stream == 0 ||
+					ses_ptr->cdata.aead == 0))
+			{
+				dprintk(0, KERN_ERR, "Only stream and AEAD ciphers are allowed for authenc\n");
+				ret = -EINVAL;
+				goto free_auth_buf;
+			}
+
+			if (caop->op == COP_ENCRYPT) dst_len = caop->len + cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
+			else dst_len = caop->len;
+
+			ret = get_userbuf(ses_ptr, caop->src, caop->len, caop->dst, dst_len,
+					  kcaop->task, kcaop->mm, &src_sg, &dst_sg);
+			if (unlikely(ret)) {
+				dprintk(1, KERN_ERR, "get_userbuf(): Error getting user pages.\n");
+				goto free_auth_buf;
+			}
+
+			ret = auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
+					   src_sg, dst_sg, caop->len);
+		}
+
+		release_user_pages(ses_ptr);
+
+free_auth_buf:
+		free_page((unsigned long)auth_buf);
+	}
+
+	return ret;
+}
+
+
+int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop)
+{
+	struct csession *ses_ptr;
+	struct crypt_auth_op *caop = &kcaop->caop;
+	int ret;
+
+	if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
+		dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", caop->op);
+		return -EINVAL;
+	}
+
+	/* this also enters ses_ptr->sem */
+	ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
+	if (unlikely(!ses_ptr)) {
+		dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", caop->ses);
+		return -EINVAL;
+	}
+
+	if (unlikely(ses_ptr->cdata.init == 0)) {
+		dprintk(1, KERN_ERR, "cipher context not initialized\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	/* If we have a hash/mac handle reset its state */
+	if (ses_ptr->hdata.init != 0) {
+		ret = cryptodev_hash_reset(&ses_ptr->hdata);
+		if (unlikely(ret)) {
+			dprintk(1, KERN_ERR,
+				"error in cryptodev_hash_reset()\n");
+			goto out_unlock;
+		}
+	}
+
+	cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
+				min(ses_ptr->cdata.ivsize, kcaop->ivlen));
+
+	ret = __crypto_auth_run_zc(ses_ptr, kcaop);
+	if (unlikely(ret)) {
+		dprintk(1, KERN_ERR,
+			"error in __crypto_auth_run_zc()\n");
+		goto out_unlock;
+	}
+
+	ret = 0;
+
+	cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
+				min(ses_ptr->cdata.ivsize, kcaop->ivlen));
+
+out_unlock:
+	crypto_put_session(ses_ptr);
+	return ret;
+}
diff --git a/crypto/cryptodev/cryptlib.c b/crypto/cryptodev/cryptlib.c
new file mode 100644
index 0000000..fad5ba6
--- /dev/null
+++ b/crypto/cryptodev/cryptlib.c
@@ -0,0 +1,377 @@
+/*
+ * Driver for /dev/crypto device (aka CryptoDev)
+ *
+ * Copyright (c) 2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
+ * Portions Copyright (c) 2010 Michael Weiser
+ * Portions Copyright (c) 2010 Phil Sutter
+ *
+ * This file is part of linux cryptodev.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/ioctl.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/uaccess.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/cryptodev.h>
+#include <crypto/aead.h>
+#include "cryptodev_int.h"
+
+
+struct cryptodev_result {
+	struct completion completion;
+	int err;
+};
+
+static void cryptodev_complete(struct crypto_async_request *req, int err)
+{
+	struct cryptodev_result *res = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+
+int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
+				uint8_t *keyp, size_t keylen, int stream, int aead)
+{
+	int ret;
+
+	memset(out, 0, sizeof(*out));
+
+	if (aead == 0) {
+		struct ablkcipher_alg *alg;
+
+		out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0);
+		if (unlikely(IS_ERR(out->async.s))) {
+			dprintk(1, KERN_DEBUG, "Failed to load cipher %s\n", alg_name);
+				return -EINVAL;
+		}
+
+		alg = crypto_ablkcipher_alg(out->async.s);
+		if (alg != NULL) {
+			/* Was correct key length supplied? */
+			if (alg->max_keysize > 0 &&
+					unlikely((keylen < alg->min_keysize) ||
+					(keylen > alg->max_keysize))) {
+				dprintk(1, KERN_DEBUG,
+					"Wrong keylen '%zu' for algorithm '%s'. \
+					Use %u to %u.\n",
+					   keylen, alg_name, alg->min_keysize,
+					   alg->max_keysize);
+				ret = -EINVAL;
+				goto error;
+			}
+		}
+
+		out->blocksize = crypto_ablkcipher_blocksize(out->async.s);
+		out->ivsize = crypto_ablkcipher_ivsize(out->async.s);
+		out->alignmask = crypto_ablkcipher_alignmask(out->async.s);
+
+		ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen);
+	} else {
+		out->async.as = crypto_alloc_aead(alg_name, 0, 0);
+		if (unlikely(IS_ERR(out->async.as))) {
+			dprintk(1, KERN_DEBUG, "Failed to load cipher %s\n", alg_name);
+			return -EINVAL;
+		}
+
+		out->blocksize = crypto_aead_blocksize(out->async.as);
+		out->ivsize = crypto_aead_ivsize(out->async.as);
+		out->alignmask = crypto_aead_alignmask(out->async.as);
+
+		ret = crypto_aead_setkey(out->async.as, keyp, keylen);
+	}
+
+	if (unlikely(ret)) {
+		dprintk(1, KERN_DEBUG, "Setting key failed for %s-%zu.\n",
+			alg_name, keylen*8);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	out->stream = stream;
+	out->aead = aead;
+
+	out->async.result = kmalloc(sizeof(*out->async.result), GFP_KERNEL);
+	if (unlikely(!out->async.result)) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	memset(out->async.result, 0, sizeof(*out->async.result));
+	init_completion(&out->async.result->completion);
+
+	if (aead == 0) {
+		out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL);
+		if (unlikely(!out->async.request)) {
+			dprintk(1, KERN_ERR, "error allocating async crypto request\n");
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		ablkcipher_request_set_callback(out->async.request,
+					CRYPTO_TFM_REQ_MAY_BACKLOG,
+					cryptodev_complete, out->async.result);
+	} else {
+		out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL);
+		if (unlikely(!out->async.arequest)) {
+			dprintk(1, KERN_ERR, "error allocating async crypto request\n");
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		aead_request_set_callback(out->async.arequest,
+					CRYPTO_TFM_REQ_MAY_BACKLOG,
+					cryptodev_complete, out->async.result);
+	}
+
+	out->init = 1;
+	return 0;
+error:
+	if (aead == 0) {
+		if (out->async.request)
+			ablkcipher_request_free(out->async.request);
+		if (out->async.s)
+			crypto_free_ablkcipher(out->async.s);
+	} else {
+		if (out->async.arequest)
+			aead_request_free(out->async.arequest);
+		if (out->async.s)
+			crypto_free_aead(out->async.as);
+	}
+	kfree(out->async.result);
+
+	return ret;
+}
+
+void cryptodev_cipher_deinit(struct cipher_data *cdata)
+{
+	if (cdata->init) {
+		if (cdata->aead == 0) {
+			if (cdata->async.request)
+				ablkcipher_request_free(cdata->async.request);
+			if (cdata->async.s)
+				crypto_free_ablkcipher(cdata->async.s);
+		} else {
+			if (cdata->async.arequest)
+				aead_request_free(cdata->async.arequest);
+			if (cdata->async.as)
+				crypto_free_aead(cdata->async.as);
+		}
+
+		kfree(cdata->async.result);
+		cdata->init = 0;
+	}
+}
+
+static inline int waitfor(struct cryptodev_result *cr, ssize_t ret)
+{
+	switch (ret) {
+	case 0:
+		break;
+	case -EINPROGRESS:
+	case -EBUSY:
+		wait_for_completion(&cr->completion);
+		/* At this point we known for sure the request has finished,
+		 * because wait_for_completion above was not interruptible.
+		 * This is important because otherwise hardware or driver
+		 * might try to access memory which will be freed or reused for
+		 * another request. */
+
+		if (unlikely(cr->err)) {
+			dprintk(0, KERN_ERR, "error from async request: %d\n",
+				cr->err);
+			return cr->err;
+		}
+
+		break;
+	default:
+		return ret;
+	}
+
+	return 0;
+}
+
+ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
+		const struct scatterlist *src, struct scatterlist *dst,
+		size_t len)
+{
+	int ret;
+
+	INIT_COMPLETION(cdata->async.result->completion);
+
+	if (cdata->aead == 0) {
+		ablkcipher_request_set_crypt(cdata->async.request,
+			(struct scatterlist *)src, dst,
+			len, cdata->async.iv);
+		ret = crypto_ablkcipher_encrypt(cdata->async.request);
+	} else {
+		aead_request_set_crypt(cdata->async.arequest,
+			(struct scatterlist *)src, dst,
+			len, cdata->async.iv);
+		ret = crypto_aead_encrypt(cdata->async.arequest);
+	}
+
+	return waitfor(cdata->async.result, ret);
+}
+
+ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
+		const struct scatterlist *src, struct scatterlist *dst,
+		size_t len)
+{
+	int ret;
+
+	INIT_COMPLETION(cdata->async.result->completion);
+	if (cdata->aead == 0) {
+		ablkcipher_request_set_crypt(cdata->async.request,
+			(struct scatterlist *)src, dst,
+			len, cdata->async.iv);
+		ret = crypto_ablkcipher_decrypt(cdata->async.request);
+	} else {
+		aead_request_set_crypt(cdata->async.arequest,
+			(struct scatterlist *)src, dst,
+			len, cdata->async.iv);
+		ret = crypto_aead_decrypt(cdata->async.arequest);
+	}
+
+	return waitfor(cdata->async.result, ret);
+}
+
+/* Hash functions */
+
+int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
+			int hmac_mode, void *mackey, size_t mackeylen)
+{
+	int ret;
+
+	hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0);
+	if (unlikely(IS_ERR(hdata->async.s))) {
+		dprintk(1, KERN_DEBUG, "Failed to load transform for %s\n", alg_name);
+		return -EINVAL;
+	}
+
+	/* Copy the key from user and set to TFM. */
+	if (hmac_mode != 0) {
+		ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
+		if (unlikely(ret)) {
+			dprintk(1, KERN_DEBUG,
+				"Setting hmac key failed for %s-%zu.\n",
+				alg_name, mackeylen*8);
+			ret = -EINVAL;
+			goto error;
+		}
+	}
+
+	hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
+	hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);
+
+	hdata->async.result = kmalloc(sizeof(*hdata->async.result), GFP_KERNEL);
+	if (unlikely(!hdata->async.result)) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	memset(hdata->async.result, 0, sizeof(*hdata->async.result));
+	init_completion(&hdata->async.result->completion);
+
+	hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
+	if (unlikely(!hdata->async.request)) {
+		dprintk(0, KERN_ERR, "error allocating async crypto request\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ahash_request_set_callback(hdata->async.request,
+			CRYPTO_TFM_REQ_MAY_BACKLOG,
+			cryptodev_complete, hdata->async.result);
+
+	ret = crypto_ahash_init(hdata->async.request);
+	if (unlikely(ret)) {
+		dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
+		goto error_request;
+	}
+
+	hdata->init = 1;
+	return 0;
+
+error_request:
+	ahash_request_free(hdata->async.request);
+error:
+	kfree(hdata->async.result);
+	crypto_free_ahash(hdata->async.s);
+	return ret;
+}
+
+void cryptodev_hash_deinit(struct hash_data *hdata)
+{
+	if (hdata->init) {
+		if (hdata->async.request)
+			ahash_request_free(hdata->async.request);
+		kfree(hdata->async.result);
+		if (hdata->async.s)
+			crypto_free_ahash(hdata->async.s);
+		hdata->init = 0;
+	}
+}
+
+int cryptodev_hash_reset(struct hash_data *hdata)
+{
+	int ret;
+
+	ret = crypto_ahash_init(hdata->async.request);
+	if (unlikely(ret)) {
+		dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
+		return ret;
+	}
+
+	return 0;
+
+}
+
+ssize_t cryptodev_hash_update(struct hash_data *hdata,
+				struct scatterlist *sg, size_t len)
+{
+	int ret;
+
+	INIT_COMPLETION(hdata->async.result->completion);
+	ahash_request_set_crypt(hdata->async.request, sg, NULL, len);
+
+	ret = crypto_ahash_update(hdata->async.request);
+
+	return waitfor(hdata->async.result, ret);
+}
+
+int cryptodev_hash_final(struct hash_data *hdata, void* output)
+{
+	int ret;
+
+	INIT_COMPLETION(hdata->async.result->completion);
+	ahash_request_set_crypt(hdata->async.request, NULL, output, 0);
+
+	ret = crypto_ahash_final(hdata->async.request);
+
+	return waitfor(hdata->async.result, ret);
+}
+
diff --git a/crypto/cryptodev/cryptlib.h b/crypto/cryptodev/cryptlib.h
new file mode 100644
index 0000000..1745d0f
--- /dev/null
+++ b/crypto/cryptodev/cryptlib.h
@@ -0,0 +1,90 @@
+#ifndef CRYPTLIB_H
+# define CRYPTLIB_H
+
+struct cipher_data {
+	int init; /* 0 uninitialized */
+	int blocksize;
+	int aead;
+	int stream;
+	int ivsize;
+	int alignmask;
+	struct {
+		/* block ciphers */
+		struct crypto_ablkcipher *s;
+		struct ablkcipher_request *request;
+
+		/* AEAD ciphers */
+		struct crypto_aead *as;
+		struct aead_request *arequest;
+
+		struct cryptodev_result *result;
+		uint8_t iv[EALG_MAX_BLOCK_LEN];
+	} async;
+};
+
+int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
+			  uint8_t *key, size_t keylen, int stream, int aead);
+void cryptodev_cipher_deinit(struct cipher_data *cdata);
+ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
+			const struct scatterlist *sg1,
+			struct scatterlist *sg2, size_t len);
+ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
+				const struct scatterlist *sg1,
+				struct scatterlist *sg2, size_t len);
+
+/* AEAD */
+inline static void cryptodev_cipher_auth(struct cipher_data *cdata,
+					 struct scatterlist *sg1, size_t len)
+{
+	/* for some reason we _have_ to call that even for zero length sgs */
+	aead_request_set_assoc(cdata->async.arequest, len ? sg1 : NULL, len);
+}
+
+inline static void cryptodev_cipher_set_tag_size(struct cipher_data *cdata, int size)
+{
+	if (likely(cdata->aead != 0))
+		crypto_aead_setauthsize(cdata->async.as, size);
+}
+
+inline static int cryptodev_cipher_get_tag_size(struct cipher_data *cdata)
+{
+	if (likely(cdata->init && cdata->aead != 0))
+		return crypto_aead_authsize(cdata->async.as);
+	else
+		return 0;
+}
+
+inline static void cryptodev_cipher_set_iv(struct cipher_data *cdata,
+				void *iv, size_t iv_size)
+{
+	memcpy(cdata->async.iv, iv, min(iv_size, sizeof(cdata->async.iv)));
+}
+
+inline static void cryptodev_cipher_get_iv(struct cipher_data *cdata,
+				void *iv, size_t iv_size)
+{
+	memcpy(iv, cdata->async.iv, min(iv_size, sizeof(cdata->async.iv)));
+}
+
+/* Hash */
+struct hash_data {
+	int init; /* 0 uninitialized */
+	int digestsize;
+	int alignmask;
+	struct {
+		struct crypto_ahash *s;
+		struct cryptodev_result *result;
+		struct ahash_request *request;
+	} async;
+};
+
+int cryptodev_hash_final(struct hash_data *hdata, void *output);
+ssize_t cryptodev_hash_update(struct hash_data *hdata,
+			struct scatterlist *sg, size_t len);
+int cryptodev_hash_reset(struct hash_data *hdata);
+void cryptodev_hash_deinit(struct hash_data *hdata);
+int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
+			int hmac_mode, void *mackey, size_t mackeylen);
+
+
+#endif
diff --git a/crypto/cryptodev/crypto/cryptodev.h b/crypto/cryptodev/crypto/cryptodev.h
new file mode 100644
index 0000000..a2f11b1
--- /dev/null
+++ b/crypto/cryptodev/crypto/cryptodev.h
@@ -0,0 +1,292 @@
+/* This is a source compatible implementation with the original API of
+ * cryptodev by Angelos D. Keromytis, found at openbsd cryptodev.h.
+ * Placed under public domain */
+
+#ifndef L_CRYPTODEV_H
+#define L_CRYPTODEV_H
+
+#include <linux/types.h>
+#ifndef __KERNEL__
+#define __user
+#endif
+
+/* API extensions for linux */
+#define CRYPTO_HMAC_MAX_KEY_LEN		512
+#define CRYPTO_CIPHER_MAX_KEY_LEN	64
+
+/* All the supported algorithms
+ */
+enum cryptodev_crypto_op_t {
+	CRYPTO_DES_CBC = 1,
+	CRYPTO_3DES_CBC = 2,
+	CRYPTO_BLF_CBC = 3,
+	CRYPTO_CAST_CBC = 4,
+	CRYPTO_SKIPJACK_CBC = 5,
+	CRYPTO_MD5_HMAC = 6,
+	CRYPTO_SHA1_HMAC = 7,
+	CRYPTO_RIPEMD160_HMAC = 8,
+	CRYPTO_MD5_KPDK = 9,
+	CRYPTO_SHA1_KPDK = 10,
+	CRYPTO_RIJNDAEL128_CBC = 11,
+	CRYPTO_AES_CBC = CRYPTO_RIJNDAEL128_CBC,
+	CRYPTO_ARC4 = 12,
+	CRYPTO_MD5 = 13,
+	CRYPTO_SHA1 = 14,
+	CRYPTO_DEFLATE_COMP = 15,
+	CRYPTO_NULL = 16,
+	CRYPTO_LZS_COMP = 17,
+	CRYPTO_SHA2_256_HMAC = 18,
+	CRYPTO_SHA2_384_HMAC = 19,
+	CRYPTO_SHA2_512_HMAC = 20,
+	CRYPTO_AES_CTR = 21,
+	CRYPTO_AES_XTS = 22,
+	CRYPTO_AES_ECB = 23,
+	CRYPTO_AES_GCM = 50,
+
+	CRYPTO_CAMELLIA_CBC = 101,
+	CRYPTO_RIPEMD160,
+	CRYPTO_SHA2_224,
+	CRYPTO_SHA2_256,
+	CRYPTO_SHA2_384,
+	CRYPTO_SHA2_512,
+	CRYPTO_SHA2_224_HMAC,
+	CRYPTO_ALGORITHM_ALL, /* Keep updated - see below */
+};
+
+#define	CRYPTO_ALGORITHM_MAX	(CRYPTO_ALGORITHM_ALL - 1)
+
+/* Values for ciphers */
+#define DES_BLOCK_LEN		8
+#define DES3_BLOCK_LEN		8
+#define RIJNDAEL128_BLOCK_LEN	16
+#define AES_BLOCK_LEN		RIJNDAEL128_BLOCK_LEN
+#define CAMELLIA_BLOCK_LEN      16
+#define BLOWFISH_BLOCK_LEN	8
+#define SKIPJACK_BLOCK_LEN	8
+#define CAST128_BLOCK_LEN	8
+
+/* the maximum of the above */
+#define EALG_MAX_BLOCK_LEN	16
+
+/* Values for hashes/MAC */
+#define AALG_MAX_RESULT_LEN		64
+
+/* maximum length of verbose alg names (depends on CRYPTO_MAX_ALG_NAME) */
+#define CRYPTODEV_MAX_ALG_NAME		64
+
+#define HASH_MAX_LEN 64
+
+/* input of CIOCGSESSION */
+struct session_op {
+	/* Specify either cipher or mac
+	 */
+	__u32	cipher;		/* cryptodev_crypto_op_t */
+	__u32	mac;		/* cryptodev_crypto_op_t */
+
+	__u32	keylen;
+	__u8	__user *key;
+	__u32	mackeylen;
+	__u8	__user *mackey;
+
+	__u32	ses;		/* session identifier */
+};
+
+struct session_info_op {
+	__u32 ses;		/* session identifier */
+
+	/* verbose names for the requested ciphers */
+	struct alg_info {
+		char cra_name[CRYPTODEV_MAX_ALG_NAME];
+		char cra_driver_name[CRYPTODEV_MAX_ALG_NAME];
+	} cipher_info, hash_info;
+
+	__u16	alignmask;	/* alignment constraints */
+	__u32   flags;          /* SIOP_FLAGS_* */
+};
+
+/* If this flag is set then this algorithm uses
+ * a driver only available in kernel (software drivers,
+ * or drivers based on instruction sets do not set this flag).
+ *
+ * If multiple algorithms are involved (as in AEAD case), then
+ * if one of them is kernel-driver-only this flag will be set.
+ */
+#define SIOP_FLAG_KERNEL_DRIVER_ONLY 1
+
+#define	COP_ENCRYPT	0
+#define COP_DECRYPT	1
+
+/* input of CIOCCRYPT */
+struct crypt_op {
+	__u32	ses;		/* session identifier */
+	__u16	op;		/* COP_ENCRYPT or COP_DECRYPT */
+	__u16	flags;		/* see COP_FLAG_* */
+	__u32	len;		/* length of source data */
+	__u8	__user *src;	/* source data */
+	__u8	__user *dst;	/* pointer to output data */
+	/* pointer to output data for hash/MAC operations */
+	__u8	__user *mac;
+	/* initialization vector for encryption operations */
+	__u8	__user *iv;
+};
+
+/* input of CIOCAUTHCRYPT */
+struct crypt_auth_op {
+	__u32	ses;		/* session identifier */
+	__u16	op;		/* COP_ENCRYPT or COP_DECRYPT */
+	__u16	flags;		/* see COP_FLAG_AEAD_* */
+	__u32	len;		/* length of source data */
+	__u32	auth_len;	/* length of auth data */
+	__u8	__user *auth_src;	/* authenticated-only data */
+
+	/* The current implementation is more efficient if data are
+	 * encrypted in-place (src==dst). */
+	__u8	__user *src;	/* data to be encrypted and authenticated */
+	__u8	__user *dst;	/* pointer to output data. Must have
+	                         * space for tag. For TLS this should be at least 
+	                         * len + tag_size + block_size for padding */
+
+	__u8    __user *tag;    /* where the tag will be copied to. TLS mode
+                                 * doesn't use that as tag is copied to dst.
+                                 * SRTP mode copies tag there. */
+	__u32	tag_len;	/* the length of the tag. Use zero for digest size or max tag. */
+
+	/* initialization vector for encryption operations */
+	__u8	__user *iv;
+	__u32   iv_len;
+};
+
+/* In plain AEAD mode the following are required:
+ *  flags   : 0
+ *  iv      : the initialization vector (12 bytes)
+ *  auth_len: the length of the data to be authenticated
+ *  auth_src: the data to be authenticated
+ *  len     : length of data to be encrypted
+ *  src     : the data to be encrypted
+ *  dst     : space to hold encrypted data. It must have
+ *            at least a size of len + tag_size.
+ *  tag_size: the size of the desired authentication tag or zero to use
+ *            the maximum tag output.
+ *
+ * Note tag isn't being used because the Linux AEAD interface
+ * copies the tag just after data.
+ */
+
+/* In TLS mode (used for CBC ciphers that required padding) 
+ * the following are required:
+ *  flags   : COP_FLAG_AEAD_TLS_TYPE
+ *  iv      : the initialization vector
+ *  auth_len: the length of the data to be authenticated only
+ *  len     : length of data to be encrypted
+ *  auth_src: the data to be authenticated
+ *  src     : the data to be encrypted
+ *  dst     : space to hold encrypted data (preferably in-place). It must have
+ *            at least a size of len + tag_size + blocksize.
+ *  tag_size: the size of the desired authentication tag or zero to use
+ *            the default mac output.
+ *
+ * Note that the padding used is the minimum padding.
+ */
+
+/* In SRTP mode the following are required:
+ *  flags   : COP_FLAG_AEAD_SRTP_TYPE
+ *  iv      : the initialization vector
+ *  auth_len: the length of the data to be authenticated. This must
+ *            include the SRTP header + SRTP payload (data to be encrypted) + rest
+ *            
+ *  len     : length of data to be encrypted
+ *  auth_src: pointer the data to be authenticated. Should point at the same buffer as src.
+ *  src     : pointer to the data to be encrypted.
+ *  dst     : This is mandatory to be the same as src (in-place only).
+ *  tag_size: the size of the desired authentication tag or zero to use
+ *            the default mac output.
+ *  tag     : Pointer to an address where the authentication tag will be copied.
+ */
+
+
+/* struct crypt_op flags */
+
+#define COP_FLAG_NONE		(0 << 0) /* totally no flag */
+#define COP_FLAG_UPDATE		(1 << 0) /* multi-update hash mode */
+#define COP_FLAG_FINAL		(1 << 1) /* multi-update final hash mode */
+#define COP_FLAG_WRITE_IV	(1 << 2) /* update the IV during operation */
+#define COP_FLAG_NO_ZC		(1 << 3) /* do not zero-copy */
+#define COP_FLAG_AEAD_TLS_TYPE  (1 << 4) /* authenticate and encrypt using the 
+                                          * TLS protocol rules */
+#define COP_FLAG_AEAD_SRTP_TYPE  (1 << 5) /* authenticate and encrypt using the 
+                                           * SRTP protocol rules */
+#define COP_FLAG_RESET		(1 << 6) /* multi-update reset the state.
+                                          * should be used in combination
+                                          * with COP_FLAG_UPDATE */
+
+
+/* Stuff for bignum arithmetic and public key
+ * cryptography - not supported yet by linux
+ * cryptodev.
+ */
+
+#define	CRYPTO_ALG_FLAG_SUPPORTED	1
+#define	CRYPTO_ALG_FLAG_RNG_ENABLE	2
+#define	CRYPTO_ALG_FLAG_DSA_SHA		4
+
+struct crparam {
+	__u8	*crp_p;
+	__u32	crp_nbits;
+};
+
+#define CRK_MAXPARAM	8
+
+/* input of CIOCKEY */
+struct crypt_kop {
+	__u32	crk_op;		/* cryptodev_crk_ot_t */
+	__u32	crk_status;
+	__u16	crk_iparams;
+	__u16	crk_oparams;
+	__u32	crk_pad1;
+	struct crparam	crk_param[CRK_MAXPARAM];
+};
+
+enum cryptodev_crk_op_t {
+	CRK_MOD_EXP = 0,
+	CRK_MOD_EXP_CRT = 1,
+	CRK_DSA_SIGN = 2,
+	CRK_DSA_VERIFY = 3,
+	CRK_DH_COMPUTE_KEY = 4,
+	CRK_ALGORITHM_ALL
+};
+
+#define CRK_ALGORITHM_MAX	(CRK_ALGORITHM_ALL-1)
+
+/* features to be queried with CIOCASYMFEAT ioctl
+ */
+#define CRF_MOD_EXP		(1 << CRK_MOD_EXP)
+#define CRF_MOD_EXP_CRT		(1 << CRK_MOD_EXP_CRT)
+#define CRF_DSA_SIGN		(1 << CRK_DSA_SIGN)
+#define CRF_DSA_VERIFY		(1 << CRK_DSA_VERIFY)
+#define CRF_DH_COMPUTE_KEY	(1 << CRK_DH_COMPUTE_KEY)
+
+
+/* ioctl's. Compatible with old linux cryptodev.h
+ */
+#define CRIOGET         _IOWR('c', 101, __u32)
+#define CIOCGSESSION    _IOWR('c', 102, struct session_op)
+#define CIOCFSESSION    _IOW('c', 103, __u32)
+#define CIOCCRYPT       _IOWR('c', 104, struct crypt_op)
+#define CIOCKEY         _IOWR('c', 105, struct crypt_kop)
+#define CIOCASYMFEAT    _IOR('c', 106, __u32)
+#define CIOCGSESSINFO	_IOWR('c', 107, struct session_info_op)
+
+/* to indicate that CRIOGET is not required in linux
+ */
+#define CRIOGET_NOT_NEEDED 1
+
+/* additional ioctls for AEAD */
+#define CIOCAUTHCRYPT   _IOWR('c', 109, struct crypt_auth_op)
+
+/* additional ioctls for asynchronous operation.
+ * These are conditionally enabled since version 1.6.
+ */
+#define CIOCASYNCCRYPT    _IOW('c', 110, struct crypt_op)
+#define CIOCASYNCFETCH    _IOR('c', 111, struct crypt_op)
+
+#endif /* L_CRYPTODEV_H */
diff --git a/crypto/cryptodev/cryptodev_int.h b/crypto/cryptodev/cryptodev_int.h
new file mode 100644
index 0000000..12dd5b1
--- /dev/null
+++ b/crypto/cryptodev/cryptodev_int.h
@@ -0,0 +1,134 @@
+/* cipher stuff */
+#ifndef CRYPTODEV_INT_H
+# define CRYPTODEV_INT_H
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/scatterlist.h>
+#include <crypto/cryptodev.h>
+#include <crypto/aead.h>
+
+#define PFX "cryptodev: "
+#define dprintk(level, severity, format, a...)			\
+	do {							\
+		if (level <= cryptodev_verbosity)		\
+			printk(severity PFX "%s[%u] (%s:%u): " format,	\
+			       current->comm, current->pid,	\
+			       __func__, __LINE__,		\
+			       ##a);				\
+	} while (0)
+
+extern int cryptodev_verbosity;
+
+struct fcrypt {
+	struct list_head list;
+	struct mutex sem;
+};
+
+/* compatibility stuff */
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+/* input of CIOCGSESSION */
+struct compat_session_op {
+	/* Specify either cipher or mac
+	 */
+	uint32_t	cipher;		/* cryptodev_crypto_op_t */
+	uint32_t	mac;		/* cryptodev_crypto_op_t */
+
+	uint32_t	keylen;
+	compat_uptr_t	key;		/* pointer to key data */
+	uint32_t	mackeylen;
+	compat_uptr_t	mackey;		/* pointer to mac key data */
+
+	uint32_t	ses;		/* session identifier */
+};
+
+/* input of CIOCCRYPT */
+ struct compat_crypt_op {
+	uint32_t	ses;		/* session identifier */
+	uint16_t	op;		/* COP_ENCRYPT or COP_DECRYPT */
+	uint16_t	flags;		/* see COP_FLAG_* */
+	uint32_t	len;		/* length of source data */
+	compat_uptr_t	src;		/* source data */
+	compat_uptr_t	dst;		/* pointer to output data */
+	compat_uptr_t	mac;/* pointer to output data for hash/MAC operations */
+	compat_uptr_t	iv;/* initialization vector for encryption operations */
+};
+
+/* compat ioctls, defined for the above structs */
+#define COMPAT_CIOCGSESSION    _IOWR('c', 102, struct compat_session_op)
+#define COMPAT_CIOCCRYPT       _IOWR('c', 104, struct compat_crypt_op)
+#define COMPAT_CIOCASYNCCRYPT  _IOW('c', 107, struct compat_crypt_op)
+#define COMPAT_CIOCASYNCFETCH  _IOR('c', 108, struct compat_crypt_op)
+
+#endif /* CONFIG_COMPAT */
+
+/* kernel-internal extension to struct crypt_op */
+struct kernel_crypt_op {
+	struct crypt_op cop;
+
+	int ivlen;
+	__u8 iv[EALG_MAX_BLOCK_LEN];
+
+	int digestsize;
+	uint8_t hash_output[AALG_MAX_RESULT_LEN];
+
+	struct task_struct *task;
+	struct mm_struct *mm;
+};
+
+struct kernel_crypt_auth_op {
+	struct crypt_auth_op caop;
+
+	int dst_len; /* based on src_len + pad + tag */
+	int ivlen;
+	__u8 iv[EALG_MAX_BLOCK_LEN];
+
+	struct task_struct *task;
+	struct mm_struct *mm;
+};
+
+/* auth */
+
+int kcaop_from_user(struct kernel_crypt_auth_op *kcop,
+			struct fcrypt *fcr, void __user *arg);
+int kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
+		struct fcrypt *fcr, void __user *arg);
+int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop);
+int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop);
+
+#include <cryptlib.h>
+
+/* other internal structs */
+struct csession {
+	struct list_head entry;
+	struct mutex sem;
+	struct cipher_data cdata;
+	struct hash_data hdata;
+	uint32_t sid;
+	uint32_t alignmask;
+
+	unsigned int array_size;
+	unsigned int used_pages; /* the number of pages that are used */
+	/* the number of pages marked as NOT-writable; they preceed writeables */
+	unsigned int readonly_pages;
+	struct page **pages;
+	struct scatterlist *sg;
+};
+
+struct csession *crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid);
+
+inline static void crypto_put_session(struct csession * ses_ptr)
+{
+	mutex_unlock(&ses_ptr->sem);
+}
+int adjust_sg_array(struct csession * ses, int pagecount);
+
+#endif /* CRYPTODEV_INT_H */
diff --git a/crypto/cryptodev/ioctl.c b/crypto/cryptodev/ioctl.c
new file mode 100644
index 0000000..f26cf93
--- /dev/null
+++ b/crypto/cryptodev/ioctl.c
@@ -0,0 +1,1136 @@
+/*
+ * Driver for /dev/crypto device (aka CryptoDev)
+ *
+ * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
+ * Copyright (c) 2009,2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
+ * Copyright (c) 2010 Phil Sutter
+ *
+ * This file is part of linux cryptodev.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/*
+ * Device /dev/crypto provides an interface for
+ * accessing kernel CryptoAPI algorithms (ciphers,
+ * hashes) from userspace programs.
+ *
+ * /dev/crypto interface was originally introduced in
+ * OpenBSD and this module attempts to keep the API.
+ *
+ */
+
+#include <crypto/hash.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/ioctl.h>
+#include <linux/random.h>
+#include <linux/syscalls.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <crypto/cryptodev.h>
+#include <linux/scatterlist.h>
+#include "cryptodev_int.h"
+#include "zc.h"
+#include "version.h"
+
+MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
+MODULE_DESCRIPTION("CryptoDev driver");
+MODULE_LICENSE("GPL");
+
+/* ====== Compile-time config ====== */
+
+/* Default (pre-allocated) and maximum size of the job queue.
+ * These are free, pending and done items all together. */
+#define DEF_COP_RINGSIZE 16
+#define MAX_COP_RINGSIZE 64
+
+/* ====== Module parameters ====== */
+
+int cryptodev_verbosity;
+module_param(cryptodev_verbosity, int, 0644);
+MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
+
+/* ====== CryptoAPI ====== */
+struct todo_list_item {
+	struct list_head __hook;
+	struct kernel_crypt_op kcop;
+	int result;
+};
+
+struct locked_list {
+	struct list_head list;
+	struct mutex lock;
+};
+
+struct crypt_priv {
+	struct fcrypt fcrypt;
+	struct locked_list free, todo, done;
+	int itemcount;
+	struct work_struct cryptask;
+	wait_queue_head_t user_waiter;
+};
+
+#define FILL_SG(sg, ptr, len)					\
+	do {							\
+		(sg)->page = virt_to_page(ptr);			\
+		(sg)->offset = offset_in_page(ptr);		\
+		(sg)->length = len;				\
+		(sg)->dma_address = 0;				\
+	} while (0)
+
+/* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
+static struct workqueue_struct *cryptodev_wq;
+
+/* Prepare session for future use. */
+static int
+crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
+{
+	struct csession	*ses_new = NULL, *ses_ptr;
+	int ret = 0;
+	const char *alg_name = NULL;
+	const char *hash_name = NULL;
+	int hmac_mode = 1, stream = 0, aead = 0;
+
+	/* Does the request make sense? */
+	if (unlikely(!sop->cipher && !sop->mac)) {
+		dprintk(1, KERN_DEBUG, "Both 'cipher' and 'mac' unset.\n");
+		return -EINVAL;
+	}
+
+	switch (sop->cipher) {
+	case 0:
+		break;
+	case CRYPTO_DES_CBC:
+		alg_name = "cbc(des)";
+		break;
+	case CRYPTO_3DES_CBC:
+		alg_name = "cbc(des3_ede)";
+		break;
+	case CRYPTO_BLF_CBC:
+		alg_name = "cbc(blowfish)";
+		break;
+	case CRYPTO_AES_CBC:
+		alg_name = "cbc(aes)";
+		break;
+	case CRYPTO_AES_ECB:
+		alg_name = "ecb(aes)";
+		break;
+	case CRYPTO_CAMELLIA_CBC:
+		alg_name = "cbc(camellia)";
+		break;
+	case CRYPTO_AES_CTR:
+		alg_name = "ctr(aes)";
+		stream = 1;
+		break;
+	case CRYPTO_AES_GCM:
+		alg_name = "gcm(aes)";
+		stream = 1;
+		aead = 1;
+		break;
+	case CRYPTO_NULL:
+		alg_name = "ecb(cipher_null)";
+		stream = 1;
+		break;
+	default:
+		dprintk(1, KERN_DEBUG, "bad cipher: %d\n", sop->cipher);
+		return -EINVAL;
+	}
+
+	switch (sop->mac) {
+	case 0:
+		break;
+	case CRYPTO_MD5_HMAC:
+		hash_name = "hmac(md5)";
+		break;
+	case CRYPTO_RIPEMD160_HMAC:
+		hash_name = "hmac(rmd160)";
+		break;
+	case CRYPTO_SHA1_HMAC:
+		hash_name = "hmac(sha1)";
+		break;
+	case CRYPTO_SHA2_224_HMAC:
+		hash_name = "hmac(sha224)";
+		break;
+
+	case CRYPTO_SHA2_256_HMAC:
+		hash_name = "hmac(sha256)";
+		break;
+	case CRYPTO_SHA2_384_HMAC:
+		hash_name = "hmac(sha384)";
+		break;
+	case CRYPTO_SHA2_512_HMAC:
+		hash_name = "hmac(sha512)";
+		break;
+
+	/* non-hmac cases */
+	case CRYPTO_MD5:
+		hash_name = "md5";
+		hmac_mode = 0;
+		break;
+	case CRYPTO_RIPEMD160:
+		hash_name = "rmd160";
+		hmac_mode = 0;
+		break;
+	case CRYPTO_SHA1:
+		hash_name = "sha1";
+		hmac_mode = 0;
+		break;
+	case CRYPTO_SHA2_224:
+		hash_name = "sha224";
+		hmac_mode = 0;
+		break;
+	case CRYPTO_SHA2_256:
+		hash_name = "sha256";
+		hmac_mode = 0;
+		break;
+	case CRYPTO_SHA2_384:
+		hash_name = "sha384";
+		hmac_mode = 0;
+		break;
+	case CRYPTO_SHA2_512:
+		hash_name = "sha512";
+		hmac_mode = 0;
+		break;
+	default:
+		dprintk(1, KERN_DEBUG, "bad mac: %d\n", sop->mac);
+		return -EINVAL;
+	}
+
+	/* Create a session and put it to the list. */
+	ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+	if (!ses_new)
+		return -ENOMEM;
+
+	/* Set-up crypto transform. */
+	if (alg_name) {
+		uint8_t keyp[CRYPTO_CIPHER_MAX_KEY_LEN];
+
+		if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN)) {
+			dprintk(1, KERN_DEBUG,
+				"Setting key failed for %s-%zu.\n",
+				alg_name, (size_t)sop->keylen*8);
+			ret = -EINVAL;
+			goto error_cipher;
+		}
+
+		if (unlikely(copy_from_user(keyp, sop->key, sop->keylen))) {
+			ret = -EFAULT;
+			goto error_cipher;
+		}
+
+		ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keyp,
+						sop->keylen, stream, aead);
+		if (ret < 0) {
+			dprintk(1, KERN_DEBUG,
+				"Failed to load cipher for %s\n", alg_name);
+			ret = -EINVAL;
+			goto error_cipher;
+		}
+	}
+
+	if (hash_name && aead == 0) {
+		uint8_t keyp[CRYPTO_HMAC_MAX_KEY_LEN];
+
+		if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
+			dprintk(1, KERN_DEBUG,
+				"Setting key failed for %s-%zu.\n",
+				alg_name, (size_t)sop->mackeylen*8);
+			ret = -EINVAL;
+			goto error_hash;
+		}
+
+		if (sop->mackey && unlikely(copy_from_user(keyp, sop->mackey,
+					    sop->mackeylen))) {
+			ret = -EFAULT;
+			goto error_hash;
+		}
+
+		ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
+							keyp, sop->mackeylen);
+		if (ret != 0) {
+			dprintk(1, KERN_DEBUG, "Failed to load hash for %s\n", hash_name);
+			ret = -EINVAL;
+			goto error_hash;
+		}
+	}
+
+	ses_new->alignmask = max(ses_new->cdata.alignmask,
+	                                          ses_new->hdata.alignmask);
+	dprintk(2, KERN_DEBUG, "got alignmask %d\n", ses_new->alignmask);
+
+	ses_new->array_size = DEFAULT_PREALLOC_PAGES;
+	dprintk(2, KERN_DEBUG, "preallocating for %d user pages\n",
+			ses_new->array_size);
+	ses_new->pages = kzalloc(ses_new->array_size *
+			sizeof(struct page *), GFP_KERNEL);
+	ses_new->sg = kzalloc(ses_new->array_size *
+			sizeof(struct scatterlist), GFP_KERNEL);
+	if (ses_new->sg == NULL || ses_new->pages == NULL) {
+		dprintk(0, KERN_DEBUG, "Memory error\n");
+		ret = -ENOMEM;
+		goto error_hash;
+	}
+
+	/* put the new session to the list */
+	get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
+	mutex_init(&ses_new->sem);
+
+	mutex_lock(&fcr->sem);
+restart:
+	list_for_each_entry(ses_ptr, &fcr->list, entry) {
+		/* Check for duplicate SID */
+		if (unlikely(ses_new->sid == ses_ptr->sid)) {
+			get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
+			/* Unless we have a broken RNG this
+			   shouldn't loop forever... ;-) */
+			goto restart;
+		}
+	}
+
+	list_add(&ses_new->entry, &fcr->list);
+	mutex_unlock(&fcr->sem);
+
+	/* Fill in some values for the user. */
+	sop->ses = ses_new->sid;
+
+	return 0;
+
+error_hash:
+	cryptodev_cipher_deinit(&ses_new->cdata);
+	kfree(ses_new->sg);
+	kfree(ses_new->pages);
+error_cipher:
+	kfree(ses_new);
+
+	return ret;
+
+}
+
+/* Everything that needs to be done when remowing a session. */
+static inline void
+crypto_destroy_session(struct csession *ses_ptr)
+{
+	if (!mutex_trylock(&ses_ptr->sem)) {
+		dprintk(2, KERN_DEBUG, "Waiting for semaphore of sid=0x%08X\n",
+			ses_ptr->sid);
+		mutex_lock(&ses_ptr->sem);
+	}
+	dprintk(2, KERN_DEBUG, "Removed session 0x%08X\n", ses_ptr->sid);
+	cryptodev_cipher_deinit(&ses_ptr->cdata);
+	cryptodev_hash_deinit(&ses_ptr->hdata);
+	dprintk(2, KERN_DEBUG, "freeing space for %d user pages\n",
+			ses_ptr->array_size);
+	kfree(ses_ptr->pages);
+	kfree(ses_ptr->sg);
+	mutex_unlock(&ses_ptr->sem);
+	kfree(ses_ptr);
+}
+
+/* Look up a session by ID and remove. */
+static int
+crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
+{
+	struct csession *tmp, *ses_ptr;
+	struct list_head *head;
+	int ret = 0;
+
+	mutex_lock(&fcr->sem);
+	head = &fcr->list;
+	list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
+		if (ses_ptr->sid == sid) {
+			list_del(&ses_ptr->entry);
+			crypto_destroy_session(ses_ptr);
+			break;
+		}
+	}
+
+	if (unlikely(!ses_ptr)) {
+		dprintk(1, KERN_ERR, "Session with sid=0x%08X not found!\n",
+			sid);
+		ret = -ENOENT;
+	}
+	mutex_unlock(&fcr->sem);
+
+	return ret;
+}
+
+/* Remove all sessions when closing the file */
+static int
+crypto_finish_all_sessions(struct fcrypt *fcr)
+{
+	struct csession *tmp, *ses_ptr;
+	struct list_head *head;
+
+	mutex_lock(&fcr->sem);
+
+	head = &fcr->list;
+	list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
+		list_del(&ses_ptr->entry);
+		crypto_destroy_session(ses_ptr);
+	}
+	mutex_unlock(&fcr->sem);
+
+	return 0;
+}
+
+/* Look up session by session ID. The returned session is locked. */
+struct csession *
+crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
+{
+	struct csession *ses_ptr, *retval = NULL;
+
+	if (unlikely(fcr == NULL))
+		return NULL;
+
+	mutex_lock(&fcr->sem);
+	list_for_each_entry(ses_ptr, &fcr->list, entry) {
+		if (ses_ptr->sid == sid) {
+			mutex_lock(&ses_ptr->sem);
+			retval = ses_ptr;
+			break;
+		}
+	}
+	mutex_unlock(&fcr->sem);
+
+	return retval;
+}
+
+static void cryptask_routine(struct work_struct *work)
+{
+	struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
+	struct todo_list_item *item;
+	LIST_HEAD(tmp);
+
+	/* fetch all pending jobs into the temporary list */
+	mutex_lock(&pcr->todo.lock);
+	list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
+	mutex_unlock(&pcr->todo.lock);
+
+	/* handle each job locklessly */
+	list_for_each_entry(item, &tmp, __hook) {
+		item->result = crypto_run(&pcr->fcrypt, &item->kcop);
+		if (unlikely(item->result))
+			dprintk(0, KERN_ERR, "crypto_run() failed: %d\n",
+					item->result);
+	}
+
+	/* push all handled jobs to the done list at once */
+	mutex_lock(&pcr->done.lock);
+	list_splice_tail(&tmp, &pcr->done.list);
+	mutex_unlock(&pcr->done.lock);
+
+	/* wake for POLLIN */
+	wake_up_interruptible(&pcr->user_waiter);
+}
+
+/* ====== /dev/crypto ====== */
+
+static int
+cryptodev_open(struct inode *inode, struct file *filp)
+{
+	struct todo_list_item *tmp;
+	struct crypt_priv *pcr;
+	int i;
+
+	pcr = kmalloc(sizeof(*pcr), GFP_KERNEL);
+	if (!pcr)
+		return -ENOMEM;
+
+	memset(pcr, 0, sizeof(*pcr));
+	mutex_init(&pcr->fcrypt.sem);
+	INIT_LIST_HEAD(&pcr->fcrypt.list);
+
+	INIT_LIST_HEAD(&pcr->free.list);
+	INIT_LIST_HEAD(&pcr->todo.list);
+	INIT_LIST_HEAD(&pcr->done.list);
+	INIT_WORK(&pcr->cryptask, cryptask_routine);
+	mutex_init(&pcr->free.lock);
+	mutex_init(&pcr->todo.lock);
+	mutex_init(&pcr->done.lock);
+	init_waitqueue_head(&pcr->user_waiter);
+
+	for (i = 0; i < DEF_COP_RINGSIZE; i++) {
+		tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
+		if (!tmp)
+			return -ENOMEM;
+		pcr->itemcount++;
+		dprintk(2, KERN_DEBUG, "allocated new item at %lx\n",
+				(unsigned long)tmp);
+		list_add(&tmp->__hook, &pcr->free.list);
+	}
+
+	filp->private_data = pcr;
+	dprintk(2, KERN_DEBUG,
+	        "Cryptodev handle initialised, %d elements in queue\n",
+		DEF_COP_RINGSIZE);
+	return 0;
+}
+
+static int
+cryptodev_release(struct inode *inode, struct file *filp)
+{
+	struct crypt_priv *pcr = filp->private_data;
+	struct todo_list_item *item, *item_safe;
+	int items_freed = 0;
+
+	if (!pcr)
+		return 0;
+
+	cancel_work_sync(&pcr->cryptask);
+
+	mutex_destroy(&pcr->todo.lock);
+	mutex_destroy(&pcr->done.lock);
+	mutex_destroy(&pcr->free.lock);
+
+	list_splice_tail(&pcr->todo.list, &pcr->free.list);
+	list_splice_tail(&pcr->done.list, &pcr->free.list);
+
+	list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
+		dprintk(2, KERN_DEBUG, "freeing item at %lx\n",
+				(unsigned long)item);
+		list_del(&item->__hook);
+		kfree(item);
+		items_freed++;
+
+	}
+	if (items_freed != pcr->itemcount) {
+		dprintk(0, KERN_ERR,
+		        "freed %d items, but %d should exist!\n",
+		        items_freed, pcr->itemcount);
+	}
+
+	crypto_finish_all_sessions(&pcr->fcrypt);
+	kfree(pcr);
+	filp->private_data = NULL;
+
+	dprintk(2, KERN_DEBUG,
+	        "Cryptodev handle deinitialised, %d elements freed\n",
+	        items_freed);
+	return 0;
+}
+
+static int
+clonefd(struct file *filp)
+{
+	int ret;
+	ret = get_unused_fd();
+	if (ret >= 0) {
+			get_file(filp);
+			fd_install(ret, filp);
+	}
+
+	return ret;
+}
+
+#ifdef ENABLE_ASYNC
+/* enqueue a job for asynchronous completion
+ *
+ * returns:
+ * -EBUSY when there are no free queue slots left
+ *        (and the number of slots has reached it MAX_COP_RINGSIZE)
+ * -EFAULT when there was a memory allocation error
+ * 0 on success */
+static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
+{
+	struct todo_list_item *item = NULL;
+	
+	if (unlikely(kcop->cop.flags & COP_FLAG_NO_ZC))
+		return -EINVAL;
+
+	mutex_lock(&pcr->free.lock);
+	if (likely(!list_empty(&pcr->free.list))) {
+		item = list_first_entry(&pcr->free.list,
+				struct todo_list_item, __hook);
+		list_del(&item->__hook);
+	} else if (pcr->itemcount < MAX_COP_RINGSIZE) {
+		pcr->itemcount++;
+	} else {
+		mutex_unlock(&pcr->free.lock);
+		return -EBUSY;
+	}
+	mutex_unlock(&pcr->free.lock);
+
+	if (unlikely(!item)) {
+		item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
+		if (unlikely(!item))
+			return -EFAULT;
+		dprintk(1, KERN_INFO, "increased item count to %d\n",
+				pcr->itemcount);
+	}
+
+	memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
+
+	mutex_lock(&pcr->todo.lock);
+	list_add_tail(&item->__hook, &pcr->todo.list);
+	mutex_unlock(&pcr->todo.lock);
+
+	queue_work(cryptodev_wq, &pcr->cryptask);
+	return 0;
+}
+
+/* get the first completed job from the "done" queue
+ *
+ * returns:
+ * -EBUSY if no completed jobs are ready (yet)
+ * the return value of crypto_run() otherwise */
+static int crypto_async_fetch(struct crypt_priv *pcr,
+		struct kernel_crypt_op *kcop)
+{
+	struct todo_list_item *item;
+	int retval;
+
+	mutex_lock(&pcr->done.lock);
+	if (list_empty(&pcr->done.list)) {
+		mutex_unlock(&pcr->done.lock);
+		return -EBUSY;
+	}
+	item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
+	list_del(&item->__hook);
+	mutex_unlock(&pcr->done.lock);
+
+	memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
+	retval = item->result;
+
+	mutex_lock(&pcr->free.lock);
+	list_add_tail(&item->__hook, &pcr->free.list);
+	mutex_unlock(&pcr->free.lock);
+
+	/* wake for POLLOUT */
+	wake_up_interruptible(&pcr->user_waiter);
+
+	return retval;
+}
+#endif
+
+/* this function has to be called from process context */
+static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
+{
+	struct crypt_op *cop = &kcop->cop;
+	struct csession *ses_ptr;
+	int rc;
+
+	/* this also enters ses_ptr->sem */
+	ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
+	if (unlikely(!ses_ptr)) {
+		dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
+		return -EINVAL;
+	}
+	kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
+	kcop->digestsize = 0; /* will be updated during operation */
+
+	crypto_put_session(ses_ptr);
+
+	kcop->task = current;
+	kcop->mm = current->mm;
+
+	if (cop->iv) {
+		rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
+		if (unlikely(rc)) {
+			dprintk(1, KERN_ERR,
+				"error copying IV (%d bytes), copy_from_user returned %d for address %lx\n",
+				kcop->ivlen, rc, (unsigned long)cop->iv);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+/* this function has to be called from process context */
+static int fill_cop_from_kcop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
+{
+	int ret;
+
+	if (kcop->digestsize) {
+		ret = copy_to_user(kcop->cop.mac,
+				kcop->hash_output, kcop->digestsize);
+		if (unlikely(ret))
+			return -EFAULT;
+	}
+	if (kcop->ivlen && kcop->cop.flags & COP_FLAG_WRITE_IV) {
+		ret = copy_to_user(kcop->cop.iv,
+				kcop->iv, kcop->ivlen);
+		if (unlikely(ret))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static int kcop_from_user(struct kernel_crypt_op *kcop,
+			struct fcrypt *fcr, void __user *arg)
+{
+	if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
+		return -EFAULT;
+
+	return fill_kcop_from_cop(kcop, fcr);
+}
+
+static int kcop_to_user(struct kernel_crypt_op *kcop,
+			struct fcrypt *fcr, void __user *arg)
+{
+	int ret;
+
+	ret = fill_cop_from_kcop(kcop, fcr);
+	if (unlikely(ret)) {
+		dprintk(1, KERN_ERR, "Error in fill_cop_from_kcop\n");
+		return ret;
+	}
+
+	if (unlikely(copy_to_user(arg, &kcop->cop, sizeof(kcop->cop)))) {
+		dprintk(1, KERN_ERR, "Cannot copy to userspace\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static inline void tfm_info_to_alg_info(struct alg_info *dst, struct crypto_tfm *tfm)
+{
+	snprintf(dst->cra_name, CRYPTODEV_MAX_ALG_NAME,
+			"%s", crypto_tfm_alg_name(tfm));
+	snprintf(dst->cra_driver_name, CRYPTODEV_MAX_ALG_NAME,
+			"%s", crypto_tfm_alg_driver_name(tfm));
+}
+
+static unsigned int is_known_accelerated(struct crypto_tfm *tfm)
+{
+const char* name = crypto_tfm_alg_driver_name(tfm);
+
+	if (name == NULL)
+	  return 1; /* assume accelerated */
+
+	if (strstr(name, "-talitos"))
+	  return 1;
+	else if (strncmp(name, "mv-", 3) == 0)
+	  return 1;
+	else if (strstr(name, "geode"))
+	  return 1;
+	else if (strstr(name, "hifn"))
+	  return 1;
+	else if (strstr(name, "-ixp4xx"))
+	  return 1;
+	else if (strstr(name, "-omap"))
+	  return 1;
+	else if (strstr(name, "-picoxcell"))
+	  return 1;
+	else if (strstr(name, "-s5p"))
+	  return 1;
+	else if (strstr(name, "-ppc4xx"))
+	  return 1;
+	else if (strstr(name, "-caam"))
+	  return 1;
+	else if (strstr(name, "-n2"))
+	  return 1;
+
+	return 0;
+}
+
+static int get_session_info(struct fcrypt *fcr, struct session_info_op *siop)
+{
+	struct csession *ses_ptr;
+	struct crypto_tfm *tfm;
+
+	/* this also enters ses_ptr->sem */
+	ses_ptr = crypto_get_session_by_sid(fcr, siop->ses);
+	if (unlikely(!ses_ptr)) {
+		dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", siop->ses);
+		return -EINVAL;
+	}
+
+	siop->flags = 0;
+
+	if (ses_ptr->cdata.init) {
+		if (ses_ptr->cdata.aead == 0) {
+			tfm = crypto_ablkcipher_tfm(ses_ptr->cdata.async.s);
+		} else {
+			tfm = crypto_aead_tfm(ses_ptr->cdata.async.as);
+		}
+		tfm_info_to_alg_info(&siop->cipher_info, tfm);
+#ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
+		if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
+			siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
+#else
+		if (is_known_accelerated(tfm))
+			siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
+#endif
+	}
+	if (ses_ptr->hdata.init) {
+		tfm = crypto_ahash_tfm(ses_ptr->hdata.async.s);
+		tfm_info_to_alg_info(&siop->hash_info, tfm);
+#ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
+		if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
+			siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
+#else
+		if (is_known_accelerated(tfm))
+			siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
+#endif
+	}
+
+	siop->alignmask = ses_ptr->alignmask;
+
+	crypto_put_session(ses_ptr);
+	return 0;
+}
+
+static long
+cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
+{
+	void __user *arg = (void __user *)arg_;
+	int __user *p = arg;
+	struct session_op sop;
+	struct kernel_crypt_op kcop;
+	struct kernel_crypt_auth_op kcaop;
+	struct crypt_priv *pcr = filp->private_data;
+	struct fcrypt *fcr;
+	struct session_info_op siop;
+	uint32_t ses;
+	int ret, fd;
+
+	if (unlikely(!pcr))
+		BUG();
+
+	fcr = &pcr->fcrypt;
+
+	switch (cmd) {
+	case CIOCASYMFEAT:
+		return put_user(0, p);
+	case CRIOGET:
+		fd = clonefd(filp);
+		ret = put_user(fd, p);
+		if (unlikely(ret)) {
+			sys_close(fd);
+			return ret;
+		}
+		return ret;
+	case CIOCGSESSION:
+		if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
+			return -EFAULT;
+
+		ret = crypto_create_session(fcr, &sop);
+		if (unlikely(ret))
+			return ret;
+		ret = copy_to_user(arg, &sop, sizeof(sop));
+		if (unlikely(ret)) {
+			crypto_finish_session(fcr, sop.ses);
+			return -EFAULT;
+		}
+		return ret;
+	case CIOCFSESSION:
+		ret = get_user(ses, (uint32_t __user *)arg);
+		if (unlikely(ret))
+			return ret;
+		ret = crypto_finish_session(fcr, ses);
+		return ret;
+	case CIOCGSESSINFO:
+		if (unlikely(copy_from_user(&siop, arg, sizeof(siop))))
+			return -EFAULT;
+
+		ret = get_session_info(fcr, &siop);
+		if (unlikely(ret))
+			return ret;
+		return copy_to_user(arg, &siop, sizeof(siop));
+	case CIOCCRYPT:
+		if (unlikely(ret = kcop_from_user(&kcop, fcr, arg))) {
+			dprintk(1, KERN_WARNING, "Error copying from user\n");
+			return ret;
+		}
+
+		ret = crypto_run(fcr, &kcop);
+		if (unlikely(ret)) {
+			dprintk(1, KERN_WARNING, "Error in crypto_run\n");
+			return ret;
+		}
+
+		return kcop_to_user(&kcop, fcr, arg);
+	case CIOCAUTHCRYPT:
+		if (unlikely(ret = kcaop_from_user(&kcaop, fcr, arg))) {
+			dprintk(1, KERN_WARNING, "Error copying from user\n");
+			return ret;
+		}
+
+		ret = crypto_auth_run(fcr, &kcaop);
+		if (unlikely(ret)) {
+			dprintk(1, KERN_WARNING, "Error in crypto_auth_run\n");
+			return ret;
+		}
+		return kcaop_to_user(&kcaop, fcr, arg);
+#ifdef ENABLE_ASYNC
+	case CIOCASYNCCRYPT:
+		if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
+			return ret;
+
+		return crypto_async_run(pcr, &kcop);
+	case CIOCASYNCFETCH:
+		ret = crypto_async_fetch(pcr, &kcop);
+		if (unlikely(ret))
+			return ret;
+
+		return kcop_to_user(&kcop, fcr, arg);
+#endif
+	default:
+		return -EINVAL;
+	}
+}
+
+/* compatibility code for 32bit userlands */
+#ifdef CONFIG_COMPAT
+
+static inline void
+compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
+{
+	sop->cipher = compat->cipher;
+	sop->mac = compat->mac;
+	sop->keylen = compat->keylen;
+
+	sop->key       = compat_ptr(compat->key);
+	sop->mackeylen = compat->mackeylen;
+	sop->mackey    = compat_ptr(compat->mackey);
+	sop->ses       = compat->ses;
+}
+
+static inline void
+session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
+{
+	compat->cipher = sop->cipher;
+	compat->mac = sop->mac;
+	compat->keylen = sop->keylen;
+
+	compat->key       = ptr_to_compat(sop->key);
+	compat->mackeylen = sop->mackeylen;
+	compat->mackey    = ptr_to_compat(sop->mackey);
+	compat->ses       = sop->ses;
+}
+
+static inline void
+compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
+{
+	cop->ses = compat->ses;
+	cop->op = compat->op;
+	cop->flags = compat->flags;
+	cop->len = compat->len;
+
+	cop->src = compat_ptr(compat->src);
+	cop->dst = compat_ptr(compat->dst);
+	cop->mac = compat_ptr(compat->mac);
+	cop->iv  = compat_ptr(compat->iv);
+}
+
+static inline void
+crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
+{
+	compat->ses = cop->ses;
+	compat->op = cop->op;
+	compat->flags = cop->flags;
+	compat->len = cop->len;
+
+	compat->src = ptr_to_compat(cop->src);
+	compat->dst = ptr_to_compat(cop->dst);
+	compat->mac = ptr_to_compat(cop->mac);
+	compat->iv  = ptr_to_compat(cop->iv);
+}
+
+static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
+                                 struct fcrypt *fcr, void __user *arg)
+{
+	struct compat_crypt_op compat_cop;
+
+	if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
+		return -EFAULT;
+	compat_to_crypt_op(&compat_cop, &kcop->cop);
+
+	return fill_kcop_from_cop(kcop, fcr);
+}
+
+static int compat_kcop_to_user(struct kernel_crypt_op *kcop,
+                                 struct fcrypt *fcr, void __user *arg)
+{
+	int ret;
+	struct compat_crypt_op compat_cop;
+
+	ret = fill_cop_from_kcop(kcop, fcr);
+	if (unlikely(ret)) {
+		dprintk(1, KERN_WARNING, "Error in fill_cop_from_kcop\n");
+		return ret;
+	}
+	crypt_op_to_compat(&kcop->cop, &compat_cop);
+
+	if (unlikely(copy_to_user(arg, &compat_cop, sizeof(compat_cop)))) {
+		dprintk(1, KERN_WARNING, "Error copying to user\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static long
+cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
+{
+	void __user *arg = (void __user *)arg_;
+	struct crypt_priv *pcr = file->private_data;
+	struct fcrypt *fcr;
+	struct session_op sop;
+	struct compat_session_op compat_sop;
+	struct kernel_crypt_op kcop;
+	int ret;
+
+	if (unlikely(!pcr))
+		BUG();
+
+	fcr = &pcr->fcrypt;
+
+	switch (cmd) {
+	case CIOCASYMFEAT:
+	case CRIOGET:
+	case CIOCFSESSION:
+	case CIOCGSESSINFO:
+		return cryptodev_ioctl(file, cmd, arg_);
+
+	case COMPAT_CIOCGSESSION:
+		if (unlikely(copy_from_user(&compat_sop, arg,
+					    sizeof(compat_sop))))
+			return -EFAULT;
+		compat_to_session_op(&compat_sop, &sop);
+
+		ret = crypto_create_session(fcr, &sop);
+		if (unlikely(ret))
+			return ret;
+
+		session_op_to_compat(&sop, &compat_sop);
+		ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
+		if (unlikely(ret)) {
+			crypto_finish_session(fcr, sop.ses);
+			return -EFAULT;
+		}
+		return ret;
+
+	case COMPAT_CIOCCRYPT:
+		ret = compat_kcop_from_user(&kcop, fcr, arg);
+		if (unlikely(ret))
+			return ret;
+
+		ret = crypto_run(fcr, &kcop);
+		if (unlikely(ret))
+			return ret;
+
+		return compat_kcop_to_user(&kcop, fcr, arg);
+#ifdef ENABLE_ASYNC
+	case COMPAT_CIOCASYNCCRYPT:
+		if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
+			return ret;
+
+		return crypto_async_run(pcr, &kcop);
+	case COMPAT_CIOCASYNCFETCH:
+		ret = crypto_async_fetch(pcr, &kcop);
+		if (unlikely(ret))
+			return ret;
+
+		return compat_kcop_to_user(&kcop, fcr, arg);
+#endif
+	default:
+		return -EINVAL;
+	}
+}
+
+#endif /* CONFIG_COMPAT */
+
+static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
+{
+	struct crypt_priv *pcr = file->private_data;
+	int ret = 0;
+
+	poll_wait(file, &pcr->user_waiter, wait);
+
+	if (!list_empty_careful(&pcr->done.list))
+		ret |= POLLIN | POLLRDNORM;
+	if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
+		ret |= POLLOUT | POLLWRNORM;
+
+	return ret;
+}
+
+static const struct file_operations cryptodev_fops = {
+	.owner = THIS_MODULE,
+	.open = cryptodev_open,
+	.release = cryptodev_release,
+	.unlocked_ioctl = cryptodev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = cryptodev_compat_ioctl,
+#endif /* CONFIG_COMPAT */
+	.poll = cryptodev_poll,
+};
+
+static struct miscdevice cryptodev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "crypto",
+	.fops = &cryptodev_fops,
+	.mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH,
+};
+
+static int __init
+cryptodev_register(void)
+{
+	int rc;
+
+	rc = misc_register(&cryptodev);
+	if (unlikely(rc)) {
+		printk(KERN_ERR PFX "registration of /dev/crypto failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static void __exit
+cryptodev_deregister(void)
+{
+	misc_deregister(&cryptodev);
+}
+
+/* ====== Module init/exit ====== */
+static int __init init_cryptodev(void)
+{
+	int rc;
+
+	cryptodev_wq = create_workqueue("cryptodev_queue");
+	if (unlikely(!cryptodev_wq)) {
+		printk(KERN_ERR PFX "failed to allocate the cryptodev workqueue\n");
+		return -EFAULT;
+	}
+
+	rc = cryptodev_register();
+	if (unlikely(rc)) {
+		destroy_workqueue(cryptodev_wq);
+		return rc;
+	}
+
+	printk(KERN_INFO PFX "driver %s loaded.\n", VERSION);
+
+	return 0;
+}
+
+static void __exit exit_cryptodev(void)
+{
+	flush_workqueue(cryptodev_wq);
+	destroy_workqueue(cryptodev_wq);
+
+	cryptodev_deregister();
+	printk(KERN_INFO PFX "driver unloaded.\n");
+}
+
+module_init(init_cryptodev);
+module_exit(exit_cryptodev);
+
diff --git a/crypto/cryptodev/main.c b/crypto/cryptodev/main.c
new file mode 100644
index 0000000..fe6d390
--- /dev/null
+++ b/crypto/cryptodev/main.c
@@ -0,0 +1,257 @@
+/*
+ * Driver for /dev/crypto device (aka CryptoDev)
+ *
+ * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
+ * Copyright (c) 2009,2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
+ *
+ * This file is part of linux cryptodev.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/*
+ * Device /dev/crypto provides an interface for
+ * accessing kernel CryptoAPI algorithms (ciphers,
+ * hashes) from userspace programs.
+ *
+ * /dev/crypto interface was originally introduced in
+ * OpenBSD and this module attempts to keep the API.
+ *
+ */
+#include <crypto/hash.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/ioctl.h>
+#include <linux/random.h>
+#include <linux/syscalls.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <crypto/cryptodev.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include "cryptodev_int.h"
+#include "zc.h"
+#include "cryptlib.h"
+#include "version.h"
+
+/* This file contains the traditional operations of encryption
+ * and hashing of /dev/crypto.
+ */
+
+static int
+hash_n_crypt(struct csession *ses_ptr, struct crypt_op *cop,
+		struct scatterlist *src_sg, struct scatterlist *dst_sg,
+		uint32_t len)
+{
+	int ret;
+
+	/* Always hash before encryption and after decryption. Maybe
+	 * we should introduce a flag to switch... TBD later on.
+	 */
+	if (cop->op == COP_ENCRYPT) {
+		if (ses_ptr->hdata.init != 0) {
+			ret = cryptodev_hash_update(&ses_ptr->hdata,
+							src_sg, len);
+			if (unlikely(ret))
+				goto out_err;
+		}
+		if (ses_ptr->cdata.init != 0) {
+			ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
+							src_sg, dst_sg, len);
+
+			if (unlikely(ret))
+				goto out_err;
+		}
+	} else {
+		if (ses_ptr->cdata.init != 0) {
+			ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
+							src_sg, dst_sg, len);
+
+			if (unlikely(ret))
+				goto out_err;
+		}
+
+		if (ses_ptr->hdata.init != 0) {
+			ret = cryptodev_hash_update(&ses_ptr->hdata,
+								dst_sg, len);
+			if (unlikely(ret))
+				goto out_err;
+		}
+	}
+	return 0;
+out_err:
+	dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
+	return ret;
+}
+
+/* This is the main crypto function - feed it with plaintext
+   and get a ciphertext (or vice versa :-) */
+static int
+__crypto_run_std(struct csession *ses_ptr, struct crypt_op *cop)
+{
+	char *data;
+	char __user *src, *dst;
+	struct scatterlist sg;
+	size_t nbytes, bufsize;
+	int ret = 0;
+
+	nbytes = cop->len;
+	data = (char *)__get_free_page(GFP_KERNEL);
+
+	if (unlikely(!data)) {
+		dprintk(1, KERN_ERR, "Error getting free page.\n");
+		return -ENOMEM;
+        }
+
+	bufsize = PAGE_SIZE < nbytes ? PAGE_SIZE : nbytes;
+
+	src = cop->src;
+	dst = cop->dst;
+
+	while (nbytes > 0) {
+		size_t current_len = nbytes > bufsize ? bufsize : nbytes;
+
+		if (unlikely(copy_from_user(data, src, current_len))) {
+		        dprintk(1, KERN_ERR, "Error copying %d bytes from user address %p.\n", (int)current_len, src);
+			ret = -EFAULT;
+			break;
+		}
+
+		sg_init_one(&sg, data, current_len);
+
+		ret = hash_n_crypt(ses_ptr, cop, &sg, &sg, current_len);
+
+		if (unlikely(ret)) {
+		        dprintk(1, KERN_ERR, "hash_n_crypt failed.\n");
+			break;
+                }
+
+		if (ses_ptr->cdata.init != 0) {
+			if (unlikely(copy_to_user(dst, data, current_len))) {
+			        dprintk(1, KERN_ERR, "could not copy to user.\n");
+				ret = -EFAULT;
+				break;
+			}
+		}
+
+		dst += current_len;
+		nbytes -= current_len;
+		src += current_len;
+	}
+
+	free_page((unsigned long)data);
+	return ret;
+}
+
+
+
+/* This is the main crypto function - zero-copy edition */
+static int
+__crypto_run_zc(struct csession *ses_ptr, struct kernel_crypt_op *kcop)
+{
+	struct scatterlist *src_sg, *dst_sg;
+	struct crypt_op *cop = &kcop->cop;
+	int ret = 0;
+
+	ret = get_userbuf(ses_ptr, cop->src, cop->len, cop->dst, cop->len,
+	                  kcop->task, kcop->mm, &src_sg, &dst_sg);
+	if (unlikely(ret)) {
+		dprintk(1, KERN_ERR, "Error getting user pages. "
+					"Falling back to non zero copy.\n");
+		return __crypto_run_std(ses_ptr, cop);
+	}
+
+	ret = hash_n_crypt(ses_ptr, cop, src_sg, dst_sg, cop->len);
+
+	release_user_pages(ses_ptr);
+	return ret;
+}
+
+int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop)
+{
+	struct csession *ses_ptr;
+	struct crypt_op *cop = &kcop->cop;
+	int ret;
+
+	if (unlikely(cop->op != COP_ENCRYPT && cop->op != COP_DECRYPT)) {
+		dprintk(1, KERN_DEBUG, "invalid operation op=%u\n", cop->op);
+		return -EINVAL;
+	}
+
+	/* this also enters ses_ptr->sem */
+	ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
+	if (unlikely(!ses_ptr)) {
+		dprintk(1, KERN_ERR, "invalid session ID=0x%08X\n", cop->ses);
+		return -EINVAL;
+	}
+
+	if (ses_ptr->hdata.init != 0 && (cop->flags == 0 || cop->flags & COP_FLAG_RESET)) {
+		ret = cryptodev_hash_reset(&ses_ptr->hdata);
+		if (unlikely(ret)) {
+			dprintk(1, KERN_ERR,
+				"error in cryptodev_hash_reset()\n");
+			goto out_unlock;
+		}
+	}
+
+	if (ses_ptr->cdata.init != 0) {
+		int blocksize = ses_ptr->cdata.blocksize;
+
+		if (unlikely(cop->len % blocksize)) {
+			dprintk(1, KERN_ERR,
+				"data size (%u) isn't a multiple "
+				"of block size (%u)\n",
+				cop->len, blocksize);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		cryptodev_cipher_set_iv(&ses_ptr->cdata, kcop->iv,
+				min(ses_ptr->cdata.ivsize, kcop->ivlen));
+	}
+
+	if (likely(cop->len)) {
+		if (cop->flags & COP_FLAG_NO_ZC)
+			ret = __crypto_run_std(ses_ptr, &kcop->cop);
+		else
+			ret = __crypto_run_zc(ses_ptr, kcop);
+		if (unlikely(ret))
+			goto out_unlock;
+	}
+
+	if (ses_ptr->cdata.init != 0) {
+		cryptodev_cipher_get_iv(&ses_ptr->cdata, kcop->iv,
+				min(ses_ptr->cdata.ivsize, kcop->ivlen));
+	}
+
+	if (ses_ptr->hdata.init != 0 &&
+		((cop->flags & COP_FLAG_FINAL) ||
+		   (!(cop->flags & COP_FLAG_UPDATE) || cop->len == 0))) {
+
+		ret = cryptodev_hash_final(&ses_ptr->hdata, kcop->hash_output);
+		if (unlikely(ret)) {
+			dprintk(0, KERN_ERR, "CryptoAPI failure: %d\n", ret);
+			goto out_unlock;
+		}
+		kcop->digestsize = ses_ptr->hdata.digestsize;
+	}
+
+out_unlock:
+	crypto_put_session(ses_ptr);
+	return ret;
+}
diff --git a/crypto/cryptodev/tests/Makefile b/crypto/cryptodev/tests/Makefile
new file mode 100644
index 0000000..c9f04e8
--- /dev/null
+++ b/crypto/cryptodev/tests/Makefile
@@ -0,0 +1,35 @@
+KERNEL_DIR ?= /lib/modules/$(shell uname -r)/build
+KBUILD_CFLAGS += -I.. $(CRYPTODEV_CFLAGS)
+CFLAGS += -I.. $(CRYPTODEV_CFLAGS)
+
+comp_progs := cipher_comp hash_comp hmac_comp
+
+hostprogs := cipher cipher-aead hmac speed async_cipher async_hmac \
+	async_speed sha_speed hashcrypt_speed fullspeed cipher-gcm \
+	cipher-aead-srtp ${comp_progs}
+
+example-cipher-objs := cipher.o
+example-cipher-aead-objs := cipher-aead.o
+example-hmac-objs := hmac.o
+example-speed-objs := speed.c
+example-fullspeed-objs := fullspeed.c
+example-sha-speed-objs := sha_speed.c
+example-async-cipher-objs := async_cipher.o
+example-async-hmac-objs := async_hmac.o
+example-async-speed-objs := async_speed.o
+example-hashcrypt-speed-objs := hashcrypt_speed.c
+
+check: $(hostprogs)
+	./cipher
+	./hmac
+	./async_cipher
+	./async_hmac
+	./cipher-aead-srtp
+	./cipher-gcm
+	./cipher-aead
+
+clean:
+	rm -f *.o *~ $(hostprogs)
+
+${comp_progs}: LDFLAGS += -lssl -lcrypto
+${comp_progs}: %: %.o openssl_wrapper.o
diff --git a/crypto/cryptodev/tests/async_cipher.c b/crypto/cryptodev/tests/async_cipher.c
new file mode 100644
index 0000000..162a695
--- /dev/null
+++ b/crypto/cryptodev/tests/async_cipher.c
@@ -0,0 +1,338 @@
+/*
+ * Demo on how to use /dev/crypto device for ciphering.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <poll.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+#include "testhelper.h"
+
+#ifdef ENABLE_ASYNC
+
+static int debug = 0;
+
+#define	DATA_SIZE	8*1024
+#define	BLOCK_SIZE	16
+#define	KEY_SIZE	16
+
+static int
+test_crypto(int cfd)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+
+	struct session_op sess;
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop;
+#endif
+	struct crypt_op cryp;
+
+	if (debug) printf("running %s\n", __func__);
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cryp, 0, sizeof(cryp));
+
+	memset(key, 0x33,  sizeof(key));
+	memset(iv, 0x03,  sizeof(iv));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	if (debug) printf("%s: got the session\n", __func__);
+
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	plaintext = (char *)(((unsigned long)plaintext_raw + siop.alignmask) & ~siop.alignmask);
+	ciphertext = (char *)(((unsigned long)ciphertext_raw + siop.alignmask) & ~siop.alignmask);
+#else
+	plaintext = plaintext_raw;
+	ciphertext = ciphertext_raw;
+#endif
+	memset(plaintext, 0x15, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess.ses;
+	cryp.len = DATA_SIZE;
+	cryp.src = plaintext;
+	cryp.dst = ciphertext;
+	cryp.iv = iv;
+	cryp.op = COP_ENCRYPT;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
+	DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
+
+	if (debug) printf("%s: data encrypted\n", __func__);
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+	if (debug) printf("%s: session finished\n", __func__);
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+	if (debug) printf("%s: got new session\n", __func__);
+
+	/* Decrypt data.encrypted to data.decrypted */
+	cryp.ses = sess.ses;
+	cryp.len = DATA_SIZE;
+	cryp.src = ciphertext;
+	cryp.dst = ciphertext;
+	cryp.iv = iv;
+	cryp.op = COP_DECRYPT;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
+	DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
+
+	if (debug) printf("%s: data encrypted\n", __func__);
+
+	/* Verify the result */
+	if (memcmp(plaintext, ciphertext, DATA_SIZE) != 0) {
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		return 1;
+	} else if (debug)
+		printf("Test passed\n");
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int test_aes(int cfd)
+{
+	char plaintext1_raw[BLOCK_SIZE + 63], *plaintext1;
+	char ciphertext1[BLOCK_SIZE] = { 0xdf, 0x55, 0x6a, 0x33, 0x43, 0x8d, 0xb8, 0x7b, 0xc4, 0x1b, 0x17, 0x52, 0xc5, 0x5e, 0x5e, 0x49 };
+	char iv1[BLOCK_SIZE];
+	char key1[KEY_SIZE] = { 0xff, 0xff, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+	char plaintext2_data[BLOCK_SIZE] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x00 };
+	char plaintext2_raw[BLOCK_SIZE + 63], *plaintext2;
+	char ciphertext2[BLOCK_SIZE] = { 0xb7, 0x97, 0x2b, 0x39, 0x41, 0xc4, 0x4b, 0x90, 0xaf, 0xa7, 0xb2, 0x64, 0xbf, 0xba, 0x73, 0x87 };
+	char iv2[BLOCK_SIZE];
+	char key2[KEY_SIZE];
+
+	struct session_op sess1, sess2;
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop1, siop2;
+#endif
+	struct crypt_op cryp1, cryp2;
+
+	memset(&sess1, 0, sizeof(sess1));
+	memset(&sess2, 0, sizeof(sess2));
+	memset(&cryp1, 0, sizeof(cryp1));
+	memset(&cryp2, 0, sizeof(cryp2));
+
+	/* Get crypto session for AES128 */
+	sess1.cipher = CRYPTO_AES_CBC;
+	sess1.keylen = KEY_SIZE;
+	sess1.key = key1;
+	if (ioctl(cfd, CIOCGSESSION, &sess1)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop1.ses = sess1.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop1)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	plaintext1 = (char *)(((unsigned long)plaintext1_raw + siop1.alignmask) & ~siop1.alignmask);
+#else
+	plaintext1 = plaintext1_raw;
+#endif
+	memset(plaintext1, 0x0, BLOCK_SIZE);
+
+	memset(iv1, 0x0, sizeof(iv1));
+	memset(key2, 0x0, sizeof(key2));
+
+	/* Get second crypto session for AES128 */
+	sess2.cipher = CRYPTO_AES_CBC;
+	sess2.keylen = KEY_SIZE;
+	sess2.key = key2;
+	if (ioctl(cfd, CIOCGSESSION, &sess2)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop2.ses = sess2.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop2)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	plaintext2 = (char *)(((unsigned long)plaintext2_raw + siop2.alignmask) & ~siop2.alignmask);
+#else
+	plaintext2 = plaintext2_raw;
+#endif
+	memcpy(plaintext2, plaintext2_data, BLOCK_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cryp1.ses = sess1.ses;
+	cryp1.len = BLOCK_SIZE;
+	cryp1.src = plaintext1;
+	cryp1.dst = plaintext1;
+	cryp1.iv = iv1;
+	cryp1.op = COP_ENCRYPT;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp1), 0);
+	if (debug) printf("cryp1 written out\n");
+
+	memset(iv2, 0x0, sizeof(iv2));
+
+	/* Encrypt data.in to data.encrypted */
+	cryp2.ses = sess2.ses;
+	cryp2.len = BLOCK_SIZE;
+	cryp2.src = plaintext2;
+	cryp2.dst = plaintext2;
+	cryp2.iv = iv2;
+	cryp2.op = COP_ENCRYPT;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp2), 0);
+	if (debug) printf("cryp2 written out\n");
+
+	DO_OR_DIE(do_async_fetch(cfd, &cryp1), 0);
+	DO_OR_DIE(do_async_fetch(cfd, &cryp2), 0);
+	if (debug) printf("cryp1 + cryp2 successfully read\n");
+
+	/* Verify the result */
+	if (memcmp(plaintext1, ciphertext1, BLOCK_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < BLOCK_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", plaintext1[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < BLOCK_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", ciphertext1[i]);
+		}
+		printf("\n");
+		return 1;
+	} else {
+		if (debug) printf("result 1 passed\n");
+	}
+
+	/* Test 2 */
+
+	/* Verify the result */
+	if (memcmp(plaintext2, ciphertext2, BLOCK_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < BLOCK_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", plaintext2[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < BLOCK_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", ciphertext2[i]);
+		}
+		printf("\n");
+		return 1;
+	} else {
+		if (debug) printf("result 2 passed\n");
+	}
+
+	if (debug) printf("AES Test passed\n");
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess1.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+	if (ioctl(cfd, CIOCFSESSION, &sess2.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+main(int argc, char** argv)
+{
+	int fd = -1, cfd = -1;
+	
+	if (argc > 1) debug = 1;
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	/* Clone file descriptor */
+	if (ioctl(fd, CRIOGET, &cfd)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	/* Set close-on-exec (not really neede here) */
+	if (fcntl(cfd, F_SETFD, 1) == -1) {
+		perror("fcntl(F_SETFD)");
+		return 1;
+	}
+
+	/* Run the test itself */
+	if (test_aes(cfd))
+		return 1;
+
+	if (test_crypto(cfd))
+		return 1;
+
+	/* Close cloned descriptor */
+	if (close(cfd)) {
+		perror("close(cfd)");
+		return 1;
+	}
+
+	/* Close the original descriptor */
+	if (close(fd)) {
+		perror("close(fd)");
+		return 1;
+	}
+
+	return 0;
+}
+#else
+int
+main(int argc, char** argv)
+{
+	return (0);
+}
+#endif
diff --git a/crypto/cryptodev/tests/async_hmac.c b/crypto/cryptodev/tests/async_hmac.c
new file mode 100644
index 0000000..97fd0c5
--- /dev/null
+++ b/crypto/cryptodev/tests/async_hmac.c
@@ -0,0 +1,303 @@
+/*
+ * Demo on how to use /dev/crypto device for HMAC.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdint.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+#include "testhelper.h"
+
+#ifdef ENABLE_ASYNC
+
+static int debug = 0;
+
+#define	DATA_SIZE	4096
+#define	BLOCK_SIZE	16
+#define	KEY_SIZE	16
+#define SHA1_HASH_LEN   20
+
+static int
+test_crypto(int cfd)
+{
+	struct {
+		uint8_t	in[DATA_SIZE],
+			encrypted[DATA_SIZE],
+			decrypted[DATA_SIZE],
+			iv[BLOCK_SIZE],
+			key[KEY_SIZE];
+	} data;
+	struct session_op sess;
+	struct crypt_op cryp;
+	uint8_t mac[AALG_MAX_RESULT_LEN];
+	uint8_t oldmac[AALG_MAX_RESULT_LEN];
+	uint8_t md5_hmac_out[] = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03\xea\xa8\x6e\x31\x0a\x5d\xb7\x38";
+	uint8_t sha1_out[] = "\x8f\x82\x03\x94\xf9\x53\x35\x18\x20\x45\xda\x24\xf3\x4d\xe5\x2b\xf8\xbc\x34\x32";
+	int i;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cryp, 0, sizeof(cryp));
+
+	/* Use the garbage that is on the stack :-) */
+	/* memset(&data, 0, sizeof(data)); */
+
+	/* SHA1 plain test */
+	memset(mac, 0, sizeof(mac));
+
+	sess.cipher = 0;
+	sess.mac = CRYPTO_SHA1;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	cryp.ses = sess.ses;
+	cryp.len = sizeof("what do ya want for nothing?")-1;
+	cryp.src = "what do ya want for nothing?";
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
+	DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
+
+	if (memcmp(mac, sha1_out, 20)!=0) {
+		printf("mac: ");
+		for (i=0;i<SHA1_HASH_LEN;i++) {
+			printf("%.2x", (uint8_t)mac[i]);
+		}
+		puts("\n");
+		fprintf(stderr, "HASH test 1: failed\n");
+	} else {
+		if (debug) fprintf(stderr, "HASH test 1: passed\n");
+	}
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* MD5-HMAC test */
+	memset(mac, 0, sizeof(mac));
+
+	sess.cipher = 0;
+	sess.mackey = (uint8_t*)"Jefe";
+	sess.mackeylen = 4;
+	sess.mac = CRYPTO_MD5_HMAC;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	cryp.ses = sess.ses;
+	cryp.len = sizeof("what do ya want for nothing?")-1;
+	cryp.src = "what do ya want for nothing?";
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
+	DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
+
+	if (memcmp(mac, md5_hmac_out, 16)!=0) {
+		printf("mac: ");
+		for (i=0;i<SHA1_HASH_LEN;i++) {
+			printf("%.2x", (uint8_t)mac[i]);
+		}
+		puts("\n");
+		fprintf(stderr, "HMAC test 1: failed\n");
+	} else {
+		if (debug) fprintf(stderr, "HMAC test 1: passed\n");
+	}
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* Hash and encryption in one step test */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.keylen = KEY_SIZE;
+	sess.key = data.key;
+	sess.mackeylen = 16;
+	sess.mackey = (uint8_t*)"\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess.ses;
+	cryp.len = sizeof(data.in);
+	cryp.src = data.in;
+	cryp.dst = data.encrypted;
+	cryp.iv = data.iv;
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
+	DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
+
+	memcpy(oldmac, mac, sizeof(mac));
+
+	/* Decrypt data.encrypted to data.decrypted */
+	cryp.src = data.encrypted;
+	cryp.dst = data.decrypted;
+	cryp.op = COP_DECRYPT;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
+	DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
+
+	/* Verify the result */
+	if (memcmp(data.in, data.decrypted, sizeof(data.in)) != 0) {
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		return 1;
+	} else if (debug) 
+		printf("Crypt Test: passed\n");
+
+	if (memcmp(mac, oldmac, 20) != 0) {
+		fprintf(stderr,
+			"FAIL: Hash in decrypted data different than in encrypted.\n");
+		return 1;
+	} else if (debug)
+		printf("HMAC Test 2: passed\n");
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int
+test_extras(int cfd)
+{
+	struct session_op sess;
+	struct crypt_op cryp;
+	uint8_t mac[AALG_MAX_RESULT_LEN];
+	uint8_t oldmac[AALG_MAX_RESULT_LEN];
+	uint8_t md5_hmac_out[] = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03\xea\xa8\x6e\x31\x0a\x5d\xb7\x38";
+	uint8_t sha1_out[] = "\x8f\x82\x03\x94\xf9\x53\x35\x18\x20\x45\xda\x24\xf3\x4d\xe5\x2b\xf8\xbc\x34\x32";
+	int i;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cryp, 0, sizeof(cryp));
+
+	/* Use the garbage that is on the stack :-) */
+	/* memset(&data, 0, sizeof(data)); */
+
+	/* SHA1 plain test */
+	memset(mac, 0, sizeof(mac));
+
+	sess.cipher = 0;
+	sess.mac = CRYPTO_SHA1;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	cryp.ses = sess.ses;
+	cryp.len = sizeof("what do")-1;
+	cryp.src = "what do";
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	cryp.flags = COP_FLAG_UPDATE;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
+	DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
+
+	cryp.ses = sess.ses;
+	cryp.len = sizeof(" ya want for nothing?")-1;
+	cryp.src = " ya want for nothing?";
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	cryp.flags = COP_FLAG_FINAL;
+
+	DO_OR_DIE(do_async_crypt(cfd, &cryp), 0);
+	DO_OR_DIE(do_async_fetch(cfd, &cryp), 0);
+
+	if (memcmp(mac, sha1_out, 20)!=0) {
+		printf("mac: ");
+		for (i=0;i<SHA1_HASH_LEN;i++) {
+			printf("%.2x", (uint8_t)mac[i]);
+		}
+		puts("\n");
+		fprintf(stderr, "HASH test [update]: failed\n");
+	} else {
+		if (debug) fprintf(stderr, "HASH test [update]: passed\n");
+	}
+
+	memset(mac, 0, sizeof(mac));
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+
+int
+main()
+{
+	int fd = -1, cfd = -1;
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	/* Clone file descriptor */
+	if (ioctl(fd, CRIOGET, &cfd)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	/* Set close-on-exec (not really neede here) */
+	if (fcntl(cfd, F_SETFD, 1) == -1) {
+		perror("fcntl(F_SETFD)");
+		return 1;
+	}
+
+	/* Run the test itself */
+	if (test_crypto(cfd))
+		return 1;
+
+	if (test_extras(cfd))
+		return 1;
+
+	/* Close cloned descriptor */
+	if (close(cfd)) {
+		perror("close(cfd)");
+		return 1;
+	}
+
+	/* Close the original descriptor */
+	if (close(fd)) {
+		perror("close(fd)");
+		return 1;
+	}
+
+	return 0;
+}
+#else
+int
+main(int argc, char** argv)
+{
+	return (0);
+}
+#endif
diff --git a/crypto/cryptodev/tests/async_speed.c b/crypto/cryptodev/tests/async_speed.c
new file mode 100644
index 0000000..1188599
--- /dev/null
+++ b/crypto/cryptodev/tests/async_speed.c
@@ -0,0 +1,225 @@
+/*  cryptodev_test - simple benchmark tool for cryptodev
+ *
+ *    Copyright (C) 2010 by Phil Sutter <phil.sutter@viprinet.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <crypto/cryptodev.h>
+
+#ifdef ENABLE_ASYNC
+
+static double udifftimeval(struct timeval start, struct timeval end)
+{
+	return (double)(end.tv_usec - start.tv_usec) +
+	       (double)(end.tv_sec - start.tv_sec) * 1000 * 1000;
+}
+
+static int must_finish = 0;
+static struct pollfd pfd;
+
+static void alarm_handler(int signo)
+{
+        must_finish = 1;
+	pfd.events = POLLIN;
+}
+
+static char *units[] = { "", "Ki", "Mi", "Gi", "Ti", 0};
+
+static void value2human(double bytes, double time, double* data, double* speed,char* metric)
+{
+	int unit = 0;
+
+	*data = bytes;
+	while (*data > 1024 && units[unit + 1]) {
+		*data /= 1024;
+		unit++;
+	}
+	*speed = *data / time;
+	sprintf(metric, "%sB", units[unit]);
+}
+
+
+int encrypt_data(struct session_op *sess, int fdc, int chunksize, int alignmask)
+{
+	struct crypt_op cop;
+	char *buffer[64], iv[32];
+	static int val = 23;
+	struct timeval start, end;
+	double total = 0;
+	double secs, ddata, dspeed;
+	char metric[16];
+	int rc, wqueue = 0, bufidx = 0;
+
+	memset(iv, 0x23, 32);
+
+	printf("\tEncrypting in chunks of %d bytes: ", chunksize);
+	fflush(stdout);
+
+	for (rc = 0; rc < 64; rc++) {
+		if (alignmask) {
+			if (posix_memalign((void **)(buffer + rc), alignmask + 1, chunksize)) {
+				printf("posix_memalign() failed!\n");
+				return 1;
+			}
+		} else {
+			if (!(buffer[rc] = malloc(chunksize))) {
+				perror("malloc()");
+				return 1;
+			}
+		}
+		memset(buffer[rc], val++, chunksize);
+	}
+	pfd.fd = fdc;
+	pfd.events = POLLOUT | POLLIN;
+
+	must_finish = 0;
+	alarm(5);
+
+	gettimeofday(&start, NULL);
+	do {
+		if ((rc = poll(&pfd, 1, 100)) < 0) {
+			if (errno & (ERESTART | EINTR))
+				continue;
+			fprintf(stderr, "errno = %d ", errno);
+			perror("poll()");
+			return 1;
+		}
+
+		if (pfd.revents & POLLOUT) {
+			memset(&cop, 0, sizeof(cop));
+			cop.ses = sess->ses;
+			cop.len = chunksize;
+			cop.iv = (unsigned char *)iv;
+			cop.op = COP_ENCRYPT;
+			cop.src = cop.dst = (unsigned char *)buffer[bufidx];
+			bufidx = (bufidx + 1) % 64;
+
+			if (ioctl(fdc, CIOCASYNCCRYPT, &cop)) {
+				perror("ioctl(CIOCASYNCCRYPT)");
+				return 1;
+			}
+			wqueue++;
+		}
+		if (pfd.revents & POLLIN) {
+			if (ioctl(fdc, CIOCASYNCFETCH, &cop)) {
+				perror("ioctl(CIOCASYNCFETCH)");
+				return 1;
+			}
+			wqueue--;
+			total += cop.len;
+		}
+	} while(!must_finish || wqueue);
+	gettimeofday(&end, NULL);
+
+	secs = udifftimeval(start, end)/ 1000000.0;
+
+	value2human(total, secs, &ddata, &dspeed, metric);
+	printf ("done. %.2f %s in %.2f secs: ", ddata, metric, secs);
+	printf ("%.2f %s/sec\n", dspeed, metric);
+
+	for (rc = 0; rc < 64; rc++)
+		free(buffer[rc]);
+	return 0;
+}
+
+int main(void)
+{
+	int fd, i, fdc = -1, alignmask = 0;
+	struct session_op sess;
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop;
+#endif
+	char keybuf[32];
+
+	signal(SIGALRM, alarm_handler);
+
+	if ((fd = open("/dev/crypto", O_RDWR, 0)) < 0) {
+		perror("open()");
+		return 1;
+	}
+	if (ioctl(fd, CRIOGET, &fdc)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	fprintf(stderr, "Testing NULL cipher: \n");
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_NULL;
+	sess.keylen = 0;
+	sess.key = (unsigned char *)keybuf;
+	if (ioctl(fdc, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(fdc, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	alignmask = siop.alignmask;
+#endif
+
+	for (i = 256; i <= (64 * 4096); i *= 2) {
+		if (encrypt_data(&sess, fdc, i, alignmask))
+			break;
+	}
+
+	fprintf(stderr, "\nTesting AES-128-CBC cipher: \n");
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = 16;
+	memset(keybuf, 0x42, 16);
+	sess.key = (unsigned char *)keybuf;
+	if (ioctl(fdc, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(fdc, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	alignmask = siop.alignmask;
+#endif
+
+	for (i = 256; i <= (64 * 1024); i *= 2) {
+		if (encrypt_data(&sess, fdc, i, alignmask))
+			break;
+	}
+
+	close(fdc);
+	close(fd);
+	return 0;
+}
+
+#else
+int
+main(int argc, char** argv)
+{
+	return (0);
+}
+#endif
diff --git a/crypto/cryptodev/tests/cipher-aead-srtp.c b/crypto/cryptodev/tests/cipher-aead-srtp.c
new file mode 100644
index 0000000..fae04e7
--- /dev/null
+++ b/crypto/cryptodev/tests/cipher-aead-srtp.c
@@ -0,0 +1,572 @@
+/*
+ * Demo on how to use /dev/crypto device for ciphering.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+#define	DATA_SIZE	(8*1024)
+#define HEADER_SIZE 193
+#define PLAINTEXT_SIZE 1021
+#define FOOTER_SIZE 15
+#define	BLOCK_SIZE	16
+#define	KEY_SIZE	16
+
+#define MAC_SIZE 20 /* SHA1 */
+
+static int debug = 0;
+
+static int
+get_sha1_hmac(int cfd, void* key, int key_size, void* data, int data_size, void* mac)
+{
+	struct session_op sess;
+	struct crypt_op cryp;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cryp, 0, sizeof(cryp));
+
+	sess.cipher = 0;
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = key_size;
+	sess.mackey = key;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess.ses;
+	cryp.len = data_size;
+	cryp.src = data;
+	cryp.dst = NULL;
+	cryp.iv = NULL;
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static void print_buf(char* desc, unsigned char* buf, int size)
+{
+int i;
+	fputs(desc, stderr);
+	for (i=0;i<size;i++) {
+		fprintf(stderr, "%.2x", (uint8_t)buf[i]);
+	}
+	fputs("\n", stderr);
+}
+
+static int
+test_crypto(int cfd)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+	unsigned char sha1mac[20];
+	unsigned char tag[20];
+	unsigned char mackey[] = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+	int mackey_len = 16;
+
+	struct session_op sess;
+	struct crypt_op co;
+	struct crypt_auth_op cao;
+	struct session_info_op siop;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cao, 0, sizeof(cao));
+	memset(&co, 0, sizeof(co));
+
+	memset(key,0x33,  sizeof(key));
+	memset(iv, 0x03,  sizeof(iv));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CTR;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = mackey_len;
+	sess.mackey = mackey;
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	
+	if (debug)
+		printf("requested cipher CRYPTO_AES_CBC/HMAC-SHA1, got %s with driver %s\n",
+			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext = (char *)(((unsigned long)plaintext_raw + siop.alignmask) & ~siop.alignmask);
+	ciphertext = (char *)(((unsigned long)ciphertext_raw + siop.alignmask) & ~siop.alignmask);
+
+	memset(plaintext, 0x15, HEADER_SIZE); /* header */
+	memset(&plaintext[HEADER_SIZE], 0x17, PLAINTEXT_SIZE); /* payload */
+	memset(&plaintext[HEADER_SIZE+PLAINTEXT_SIZE], 0x22, FOOTER_SIZE);
+
+	memcpy(ciphertext, plaintext, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cao.ses = sess.ses;
+	cao.len = PLAINTEXT_SIZE;
+	cao.auth_len = HEADER_SIZE+PLAINTEXT_SIZE+FOOTER_SIZE;
+	cao.auth_src = ciphertext;
+	cao.src = ciphertext+HEADER_SIZE;
+	cao.dst = cao.src;
+	cao.iv = iv;
+	cao.op = COP_ENCRYPT;
+	cao.flags = COP_FLAG_AEAD_SRTP_TYPE;
+	cao.tag = tag;
+	cao.tag_len = 20;
+
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* Get crypto session for AES128 */
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CTR;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	if (get_sha1_hmac(cfd, mackey, mackey_len, ciphertext, HEADER_SIZE + PLAINTEXT_SIZE + FOOTER_SIZE, sha1mac) != 0) {
+		fprintf(stderr, "SHA1 MAC failed\n");
+		return 1;
+	}
+
+	if (memcmp(tag, sha1mac, 20) != 0) {
+		fprintf(stderr, "AEAD SHA1 MAC does not match plain MAC\n");
+		print_buf("SHA1: ", sha1mac, 20);
+		print_buf("SHA1-SRTP: ", tag, 20);
+		return 1;
+	}
+
+	/* Decrypt data.encrypted to data.decrypted */
+	co.ses = sess.ses;
+	co.len = PLAINTEXT_SIZE;
+	co.src = ciphertext+HEADER_SIZE;
+	co.dst = ciphertext+HEADER_SIZE;
+	co.iv = iv;
+	co.op = COP_DECRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &co)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	/* Verify the result */
+	if (memcmp(plaintext+HEADER_SIZE, ciphertext+HEADER_SIZE, PLAINTEXT_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", (unsigned int)plaintext[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", (unsigned int)ciphertext[i]);
+		}
+		printf("\n");
+		return 1;
+	}
+
+	if (debug) printf("Test passed\n");
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int
+test_encrypt_decrypt(int cfd)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+	unsigned char tag[20];
+	unsigned char mackey[] = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+	int mackey_len = 16;
+
+	struct session_op sess;
+	struct crypt_auth_op cao;
+	struct session_info_op siop;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cao, 0, sizeof(cao));
+
+	memset(key,0x33,  sizeof(key));
+	memset(iv, 0x03,  sizeof(iv));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CTR;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = mackey_len;
+	sess.mackey = mackey;
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+//	printf("requested cipher CRYPTO_AES_CBC/HMAC-SHA1, got %s with driver %s\n",
+//			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext = (char *)(((unsigned long)plaintext_raw + siop.alignmask) & ~siop.alignmask);
+	ciphertext = (char *)(((unsigned long)ciphertext_raw + siop.alignmask) & ~siop.alignmask);
+
+	memset(plaintext, 0x15, HEADER_SIZE); /* header */
+	memset(&plaintext[HEADER_SIZE], 0x17, PLAINTEXT_SIZE); /* payload */
+	memset(&plaintext[HEADER_SIZE+PLAINTEXT_SIZE], 0x22, FOOTER_SIZE);
+
+	memcpy(ciphertext, plaintext, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cao.ses = sess.ses;
+	cao.len = PLAINTEXT_SIZE;
+	cao.auth_len = HEADER_SIZE+PLAINTEXT_SIZE+FOOTER_SIZE;
+	cao.auth_src = ciphertext;
+	cao.src = ciphertext+HEADER_SIZE;
+	cao.dst = cao.src;
+	cao.iv = iv;
+	cao.op = COP_ENCRYPT;
+	cao.flags = COP_FLAG_AEAD_SRTP_TYPE;
+	cao.tag = tag;
+	cao.tag_len = 20;
+
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* Get crypto session for AES128 */
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CTR;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = mackey_len;
+	sess.mackey = mackey;
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	/* Decrypt data.encrypted to data.decrypted */
+	/* Encrypt data.in to data.encrypted */
+	cao.ses = sess.ses;
+	cao.len = PLAINTEXT_SIZE;
+	cao.auth_len = HEADER_SIZE+PLAINTEXT_SIZE+FOOTER_SIZE;
+	cao.auth_src = ciphertext;
+	cao.src = ciphertext+HEADER_SIZE;
+	cao.dst = cao.src;
+	cao.iv = iv;
+	cao.op = COP_DECRYPT;
+	cao.flags = COP_FLAG_AEAD_SRTP_TYPE;
+	cao.tag = tag;
+	cao.tag_len = 20;
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	/* Verify the result */
+	if (memcmp(plaintext+HEADER_SIZE, ciphertext+HEADER_SIZE, PLAINTEXT_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", (unsigned int)plaintext[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", (unsigned int)ciphertext[i]);
+		}
+		printf("\n");
+		return 1;
+	}
+
+	if (debug) printf("Test passed\n");
+
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int
+test_encrypt_decrypt_error(int cfd, int err)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+	unsigned char tag[20];
+	unsigned char mackey[] = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+	int mackey_len = 16;
+
+	struct session_op sess;
+	struct crypt_auth_op cao;
+	struct session_info_op siop;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cao, 0, sizeof(cao));
+
+	memset(key,0x33,  sizeof(key));
+	memset(iv, 0x03,  sizeof(iv));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CTR;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = mackey_len;
+	sess.mackey = mackey;
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+//	printf("requested cipher CRYPTO_AES_CBC/HMAC-SHA1, got %s with driver %s\n",
+//			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext = (char *)(((unsigned long)plaintext_raw + siop.alignmask) & ~siop.alignmask);
+	ciphertext = (char *)(((unsigned long)ciphertext_raw + siop.alignmask) & ~siop.alignmask);
+
+	memset(plaintext, 0x15, HEADER_SIZE); /* header */
+	memset(&plaintext[HEADER_SIZE], 0x17, PLAINTEXT_SIZE); /* payload */
+	memset(&plaintext[HEADER_SIZE+PLAINTEXT_SIZE], 0x22, FOOTER_SIZE);
+
+	memcpy(ciphertext, plaintext, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cao.ses = sess.ses;
+	cao.len = PLAINTEXT_SIZE;
+	cao.auth_len = HEADER_SIZE+PLAINTEXT_SIZE+FOOTER_SIZE;
+	cao.auth_src = ciphertext;
+	cao.src = ciphertext+HEADER_SIZE;
+	cao.dst = cao.src;
+	cao.iv = iv;
+	cao.op = COP_ENCRYPT;
+	cao.flags = COP_FLAG_AEAD_SRTP_TYPE;
+	cao.tag = tag;
+	cao.tag_len = 20;
+
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* Get crypto session for AES128 */
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CTR;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = mackey_len;
+	sess.mackey = mackey;
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	/* Decrypt data.encrypted to data.decrypted */
+	/* Encrypt data.in to data.encrypted */
+	if (err == 0)
+		ciphertext[1]++;
+	else
+		ciphertext[HEADER_SIZE+3]++;
+	cao.ses = sess.ses;
+	cao.len = PLAINTEXT_SIZE;
+	cao.auth_len = HEADER_SIZE+PLAINTEXT_SIZE+FOOTER_SIZE;
+	cao.auth_src = ciphertext;
+	cao.src = ciphertext+HEADER_SIZE;
+	cao.dst = cao.src;
+	cao.iv = iv;
+	cao.op = COP_DECRYPT;
+	cao.flags = COP_FLAG_AEAD_SRTP_TYPE;
+	cao.tag = tag;
+	cao.tag_len = 20;
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+			perror("ioctl(CIOCFSESSION)");
+			return 1;
+		}
+
+		if (debug) printf("Test passed\n");
+		return 0;
+	}
+
+	/* Verify the result */
+	if (memcmp(plaintext+HEADER_SIZE, ciphertext+HEADER_SIZE, PLAINTEXT_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", (unsigned int)plaintext[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", (unsigned int)ciphertext[i]);
+		}
+		printf("\n");
+		return 1;
+	}
+
+	printf("Test failed\n");
+
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 1;
+}
+
+int
+main(int argc, char** argv)
+{
+	int fd = -1, cfd = -1;
+	
+	if (argc > 1) debug = 1;
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	/* Clone file descriptor */
+	if (ioctl(fd, CRIOGET, &cfd)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	/* Set close-on-exec (not really neede here) */
+	if (fcntl(cfd, F_SETFD, 1) == -1) {
+		perror("fcntl(F_SETFD)");
+		return 1;
+	}
+
+	/* Run the test itself */
+
+	if (test_crypto(cfd))
+		return 1;
+
+	if (test_encrypt_decrypt(cfd))
+		return 1;
+
+	if (test_encrypt_decrypt_error(cfd,0))
+		return 1;
+
+	if (test_encrypt_decrypt_error(cfd,1))
+		return 1;
+
+	/* Close cloned descriptor */
+	if (close(cfd)) {
+		perror("close(cfd)");
+		return 1;
+	}
+
+	/* Close the original descriptor */
+	if (close(fd)) {
+		perror("close(fd)");
+		return 1;
+	}
+
+	return 0;
+}
+
diff --git a/crypto/cryptodev/tests/cipher-aead.c b/crypto/cryptodev/tests/cipher-aead.c
new file mode 100644
index 0000000..164327f
--- /dev/null
+++ b/crypto/cryptodev/tests/cipher-aead.c
@@ -0,0 +1,574 @@
+/*
+ * Demo on how to use /dev/crypto device for ciphering.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+#define	DATA_SIZE	(8*1024)
+#define AUTH_SIZE       31
+#define	BLOCK_SIZE	16
+#define	KEY_SIZE	16
+
+#define MAC_SIZE 20 /* SHA1 */
+
+static int debug = 0;
+
+static int
+get_sha1_hmac(int cfd, void* key, int key_size, void* data1, int data1_size, void* data2, int data2_size, void* mac)
+{
+	struct session_op sess;
+	struct crypt_op cryp;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cryp, 0, sizeof(cryp));
+
+	sess.cipher = 0;
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = key_size;
+	sess.mackey = key;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess.ses;
+	cryp.len = data1_size;
+	cryp.src = data1;
+	cryp.dst = NULL;
+	cryp.iv = NULL;
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	cryp.flags = COP_FLAG_UPDATE;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	cryp.ses = sess.ses;
+	cryp.len = data2_size;
+	cryp.src = data2;
+	cryp.dst = NULL;
+	cryp.iv = NULL;
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	cryp.flags = COP_FLAG_FINAL;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static void print_buf(char* desc, unsigned char* buf, int size)
+{
+int i;
+	fputs(desc, stdout);
+	for (i=0;i<size;i++) {
+		printf("%.2x", (uint8_t)buf[i]);
+	}
+	fputs("\n", stdout);
+}
+
+static int
+test_crypto(int cfd)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+	char auth[AUTH_SIZE];
+	unsigned char sha1mac[20];
+	int pad, i;
+
+	struct session_op sess;
+	struct crypt_op co;
+	struct crypt_auth_op cao;
+	struct session_info_op siop;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cao, 0, sizeof(cao));
+	memset(&co, 0, sizeof(co));
+
+	memset(key,0x33,  sizeof(key));
+	memset(iv, 0x03,  sizeof(iv));
+	memset(auth, 0xf1,  sizeof(auth));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = (void*)key;
+
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = 16;
+	sess.mackey = (uint8_t*)"\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	if (debug)
+		printf("requested cipher CRYPTO_AES_CBC/HMAC-SHA1, got %s with driver %s\n",
+			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext = (char *)(((unsigned long)plaintext_raw + siop.alignmask) & ~siop.alignmask);
+	ciphertext = (char *)(((unsigned long)ciphertext_raw + siop.alignmask) & ~siop.alignmask);
+	memset(plaintext, 0x15, DATA_SIZE);
+
+	if (get_sha1_hmac(cfd, sess.mackey, sess.mackeylen, auth, sizeof(auth), plaintext, DATA_SIZE, sha1mac) != 0) {
+		fprintf(stderr, "SHA1 MAC failed\n");
+		return 1;
+	}
+
+	memcpy(ciphertext, plaintext, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cao.ses = sess.ses;
+	cao.auth_src = auth;
+	cao.auth_len = sizeof(auth);
+	cao.len = DATA_SIZE;
+	cao.src = ciphertext;
+	cao.dst = ciphertext;
+	cao.iv = iv;
+	cao.op = COP_ENCRYPT;
+	cao.flags = COP_FLAG_AEAD_TLS_TYPE;
+
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+	//printf("Original plaintext size: %d, ciphertext: %d\n", DATA_SIZE, cao.len);
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* Get crypto session for AES128 */
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	/* Decrypt data.encrypted to data.decrypted */
+	co.ses = sess.ses;
+	co.len = cao.len;
+	co.src = ciphertext;
+	co.dst = ciphertext;
+	co.iv = iv;
+	co.op = COP_DECRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &co)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	/* Verify the result */
+	if (memcmp(plaintext, ciphertext, DATA_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", plaintext[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", ciphertext[i]);
+		}
+		printf("\n");
+		return 1;
+	}
+
+	pad = ciphertext[cao.len-1];
+	if (memcmp(&ciphertext[cao.len-MAC_SIZE-pad-1], sha1mac, 20) != 0) {
+		fprintf(stderr, "AEAD SHA1 MAC does not match plain MAC\n");
+		print_buf("SHA1: ", sha1mac, 20);
+		print_buf("SHA1-TLS: ", &ciphertext[cao.len-MAC_SIZE-pad-1], 20);
+		return 1;
+	}
+
+
+	for (i=0;i<pad;i++)
+		if (ciphertext[cao.len-1-i] != pad) {
+			fprintf(stderr, "Pad does not match (expected %d)\n", pad);
+			print_buf("PAD: ", &ciphertext[cao.len-1-pad], pad);
+			return 1;
+		}
+
+	if (debug) printf("Test passed\n");
+
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int
+test_encrypt_decrypt(int cfd)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+	char auth[AUTH_SIZE];
+	unsigned char sha1mac[20];
+	int enc_len;
+
+	struct session_op sess;
+	struct crypt_op co;
+	struct crypt_auth_op cao;
+	struct session_info_op siop;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cao, 0, sizeof(cao));
+	memset(&co, 0, sizeof(co));
+
+	memset(key,0x33,  sizeof(key));
+	memset(iv, 0x03,  sizeof(iv));
+	memset(auth, 0xf1,  sizeof(auth));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = 16;
+	sess.mackey = (uint8_t*)"\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+//	printf("requested cipher CRYPTO_AES_CBC/HMAC-SHA1, got %s with driver %s\n",
+//			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext = (char *)(((unsigned long)plaintext_raw + siop.alignmask) & ~siop.alignmask);
+	ciphertext = (char *)(((unsigned long)ciphertext_raw + siop.alignmask) & ~siop.alignmask);
+
+	memset(plaintext, 0x15, DATA_SIZE);
+
+	if (get_sha1_hmac(cfd, sess.mackey, sess.mackeylen, auth, sizeof(auth), plaintext, DATA_SIZE, sha1mac) != 0) {
+		fprintf(stderr, "SHA1 MAC failed\n");
+		return 1;
+	}
+
+	memcpy(ciphertext, plaintext, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cao.ses = sess.ses;
+	cao.auth_src = (void*)auth;
+	cao.auth_len = sizeof(auth);
+	cao.len = DATA_SIZE;
+	cao.src = (void*)ciphertext;
+	cao.dst = (void*)ciphertext;
+	cao.iv = iv;
+	cao.op = COP_ENCRYPT;
+	cao.flags = COP_FLAG_AEAD_TLS_TYPE;
+
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+	enc_len = cao.len;
+	//printf("Original plaintext size: %d, ciphertext: %d\n", DATA_SIZE, enc_len);
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* Get crypto session for AES128 */
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = 16;
+	sess.mackey = (uint8_t*)"\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	/* Decrypt data.encrypted to data.decrypted */
+	cao.ses = sess.ses;
+	cao.auth_src = auth;
+	cao.auth_len = sizeof(auth);
+	cao.len = enc_len;
+	cao.src = ciphertext;
+	cao.dst = ciphertext;
+	cao.iv = iv;
+	cao.op = COP_DECRYPT;
+	cao.flags = COP_FLAG_AEAD_TLS_TYPE;
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+	if (cao.len != DATA_SIZE) {
+		fprintf(stderr, "decrypted data size incorrect!\n");
+		return 1;
+	}
+
+	/* Verify the result */
+	if (memcmp(plaintext, ciphertext, DATA_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", plaintext[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", ciphertext[i]);
+		}
+		printf("\n");
+		return 1;
+	}
+
+	if (debug) printf("Test passed\n");
+
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int
+test_encrypt_decrypt_error(int cfd, int err)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+	char auth[AUTH_SIZE];
+	unsigned char sha1mac[20];
+	int enc_len;
+
+	struct session_op sess;
+	struct crypt_op co;
+	struct crypt_auth_op cao;
+	struct session_info_op siop;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cao, 0, sizeof(cao));
+	memset(&co, 0, sizeof(co));
+
+	memset(key,0x33,  sizeof(key));
+	memset(iv, 0x03,  sizeof(iv));
+	memset(auth, 0xf1,  sizeof(auth));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = 16;
+	sess.mackey = (uint8_t*)"\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+//	printf("requested cipher CRYPTO_AES_CBC/HMAC-SHA1, got %s with driver %s\n",
+//			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext = (char *)(((unsigned long)plaintext_raw + siop.alignmask) & ~siop.alignmask);
+	ciphertext = (char *)(((unsigned long)ciphertext_raw + siop.alignmask) & ~siop.alignmask);
+	memset(plaintext, 0x15, DATA_SIZE);
+
+	if (get_sha1_hmac(cfd, sess.mackey, sess.mackeylen, auth, sizeof(auth), plaintext, DATA_SIZE, sha1mac) != 0) {
+		fprintf(stderr, "SHA1 MAC failed\n");
+		return 1;
+	}
+	
+	memcpy(ciphertext, plaintext, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cao.ses = sess.ses;
+	cao.auth_src = auth;
+	cao.auth_len = sizeof(auth);
+	cao.len = DATA_SIZE;
+	cao.src = ciphertext;
+	cao.dst = ciphertext;
+	cao.iv = iv;
+	cao.op = COP_ENCRYPT;
+	cao.flags = COP_FLAG_AEAD_TLS_TYPE;
+
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+	enc_len = cao.len;
+	//printf("Original plaintext size: %d, ciphertext: %d\n", DATA_SIZE, enc_len);
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* Get crypto session for AES128 */
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = 16;
+	sess.mackey = (uint8_t*)"\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	if (err == 0)
+		auth[2]++;
+	else
+		ciphertext[4]++;
+
+	/* Decrypt data.encrypted to data.decrypted */
+	cao.ses = sess.ses;
+	cao.auth_src = auth;
+	cao.auth_len = sizeof(auth);
+	cao.len = enc_len;
+	cao.src = ciphertext;
+	cao.dst = ciphertext;
+	cao.iv = iv;
+	cao.op = COP_DECRYPT;
+	cao.flags = COP_FLAG_AEAD_TLS_TYPE;
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+			perror("ioctl(CIOCFSESSION)");
+			return 1;
+		}
+
+		if (debug) printf("Test passed\n");
+		return 0;
+	}
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+
+	fprintf(stderr, "Modification to ciphertext was not detected\n");
+	return 1;
+}
+
+int
+main()
+{
+	int fd = -1, cfd = -1;
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	/* Clone file descriptor */
+	if (ioctl(fd, CRIOGET, &cfd)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	/* Set close-on-exec (not really neede here) */
+	if (fcntl(cfd, F_SETFD, 1) == -1) {
+		perror("fcntl(F_SETFD)");
+		return 1;
+	}
+
+	/* Run the test itself */
+
+	if (test_crypto(cfd))
+		return 1;
+
+	if (test_encrypt_decrypt(cfd))
+		return 1;
+
+	if (test_encrypt_decrypt_error(cfd, 0))
+		return 1;
+
+	if (test_encrypt_decrypt_error(cfd, 1))
+		return 1;
+
+	/* Close cloned descriptor */
+	if (close(cfd)) {
+		perror("close(cfd)");
+		return 1;
+	}
+
+	/* Close the original descriptor */
+	if (close(fd)) {
+		perror("close(fd)");
+		return 1;
+	}
+
+	return 0;
+}
+
diff --git a/crypto/cryptodev/tests/cipher-gcm.c b/crypto/cryptodev/tests/cipher-gcm.c
new file mode 100644
index 0000000..599e70c
--- /dev/null
+++ b/crypto/cryptodev/tests/cipher-gcm.c
@@ -0,0 +1,529 @@
+/*
+ * Demo on how to use /dev/crypto device for ciphering.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+#define	DATA_SIZE	(8*1024)
+#define AUTH_SIZE       31
+#define	BLOCK_SIZE	16
+#define	KEY_SIZE	16
+
+#define my_perror(x) {fprintf(stderr, "%s: %d\n", __func__, __LINE__); perror(x); }
+
+static int debug = 0;
+
+static void print_buf(char *desc, const unsigned char *buf, int size)
+{
+	int i;
+	fputs(desc, stdout);
+	for (i = 0; i < size; i++) {
+		printf("%.2x", (uint8_t) buf[i]);
+	}
+	fputs("\n", stdout);
+}
+
+struct aes_gcm_vectors_st {
+	const uint8_t *key;
+	const uint8_t *auth;
+	int auth_size;
+	const uint8_t *plaintext;
+	int plaintext_size;
+	const uint8_t *iv;
+	const uint8_t *ciphertext;
+	const uint8_t *tag;
+};
+
+struct aes_gcm_vectors_st aes_gcm_vectors[] = {
+	{
+	 .key =
+	 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+	 .auth = NULL,
+	 .auth_size = 0,
+	 .plaintext =
+	 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+	 .plaintext_size = 16,
+	 .ciphertext =
+	 "\x03\x88\xda\xce\x60\xb6\xa3\x92\xf3\x28\xc2\xb9\x71\xb2\xfe\x78",
+	 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+	 .tag =
+	 "\xab\x6e\x47\xd4\x2c\xec\x13\xbd\xf5\x3a\x67\xb2\x12\x57\xbd\xdf"
+	},
+	{
+	 .key =
+	 "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+	 .auth = NULL,
+	 .auth_size = 0,
+	 .plaintext =
+	 "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
+	 .plaintext_size = 64,
+	 .ciphertext =
+	 "\x42\x83\x1e\xc2\x21\x77\x74\x24\x4b\x72\x21\xb7\x84\xd0\xd4\x9c\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0\x35\xc1\x7e\x23\x29\xac\xa1\x2e\x21\xd5\x14\xb2\x54\x66\x93\x1c\x7d\x8f\x6a\x5a\xac\x84\xaa\x05\x1b\xa3\x0b\x39\x6a\x0a\xac\x97\x3d\x58\xe0\x91\x47\x3f\x59\x85",
+	 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad\xde\xca\xf8\x88",
+	 .tag = "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4"
+	},
+	{
+	 .key =
+	 "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+	 .auth =
+	 "\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xed\xfa\xce\xde\xad\xbe\xef\xab\xad\xda\xd2",
+	 .auth_size = 20,
+	 .plaintext =
+	 "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57\xba\x63\x7b\x39",
+	 .plaintext_size = 60,
+	 .ciphertext =
+	 "\x42\x83\x1e\xc2\x21\x77\x74\x24\x4b\x72\x21\xb7\x84\xd0\xd4\x9c\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0\x35\xc1\x7e\x23\x29\xac\xa1\x2e\x21\xd5\x14\xb2\x54\x66\x93\x1c\x7d\x8f\x6a\x5a\xac\x84\xaa\x05\x1b\xa3\x0b\x39\x6a\x0a\xac\x97\x3d\x58\xe0\x91",
+	 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad\xde\xca\xf8\x88",
+	 .tag =
+	 "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb\x94\xfa\xe9\x5a\xe7\x12\x1a\x47"
+	}
+};
+
+
+/* Test against AES-GCM test vectors.
+ */
+static int test_crypto(int cfd)
+{
+	int i;
+	int8_t tmp[128];
+
+	struct session_op sess;
+	struct crypt_auth_op cao;
+
+	/* Get crypto session for AES128 */
+
+	if (debug) {
+		fprintf(stdout, "Tests on AES-GCM vectors: ");
+		fflush(stdout);
+	}
+	for (i = 0;
+	     i < sizeof(aes_gcm_vectors) / sizeof(aes_gcm_vectors[0]);
+	     i++) {
+		memset(&sess, 0, sizeof(sess));
+		memset(tmp, 0, sizeof(tmp));
+
+		sess.cipher = CRYPTO_AES_GCM;
+		sess.keylen = 16;
+		sess.key = (void *) aes_gcm_vectors[i].key;
+
+		if (ioctl(cfd, CIOCGSESSION, &sess)) {
+			my_perror("ioctl(CIOCGSESSION)");
+			return 1;
+		}
+
+		memset(&cao, 0, sizeof(cao));
+
+		cao.ses = sess.ses;
+		cao.dst = tmp;
+		cao.iv = (void *) aes_gcm_vectors[i].iv;
+		cao.iv_len = 12;
+		cao.op = COP_ENCRYPT;
+		cao.flags = 0;
+
+		if (aes_gcm_vectors[i].auth_size > 0) {
+			cao.auth_src = (void *) aes_gcm_vectors[i].auth;
+			cao.auth_len = aes_gcm_vectors[i].auth_size;
+		}
+
+		if (aes_gcm_vectors[i].plaintext_size > 0) {
+			cao.src = (void *) aes_gcm_vectors[i].plaintext;
+			cao.len = aes_gcm_vectors[i].plaintext_size;
+		}
+
+		if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+			my_perror("ioctl(CIOCAUTHCRYPT)");
+			return 1;
+		}
+
+		if (aes_gcm_vectors[i].plaintext_size > 0)
+			if (memcmp
+			    (tmp, aes_gcm_vectors[i].ciphertext,
+			     aes_gcm_vectors[i].plaintext_size) != 0) {
+				fprintf(stderr,
+					"AES-GCM test vector %d failed!\n",
+					i);
+
+				print_buf("Cipher: ", tmp, aes_gcm_vectors[i].plaintext_size);
+				print_buf("Expected: ", aes_gcm_vectors[i].ciphertext, aes_gcm_vectors[i].plaintext_size);
+				return 1;
+			}
+
+		if (memcmp
+		    (&tmp[cao.len - cao.tag_len], aes_gcm_vectors[i].tag,
+		     16) != 0) {
+			fprintf(stderr,
+				"AES-GCM test vector %d failed (tag)!\n",
+				i);
+
+			print_buf("Tag: ", tmp, cao.tag_len);
+			print_buf("Expected tag: ",
+				  aes_gcm_vectors[i].tag, 16);
+			return 1;
+		}
+
+	}
+	
+	if (debug) {
+		fprintf(stdout, "ok\n");
+		fprintf(stdout, "\n");
+	}
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		my_perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+/* Checks if encryption and subsequent decryption 
+ * produces the same data.
+ */
+static int test_encrypt_decrypt(int cfd)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+	char auth[AUTH_SIZE];
+	int enc_len;
+
+	struct session_op sess;
+	struct crypt_auth_op cao;
+	struct session_info_op siop;
+
+	if (debug) {
+		fprintf(stdout, "Tests on AES-GCM encryption/decryption: ");
+		fflush(stdout);
+	}
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cao, 0, sizeof(cao));
+
+	memset(key, 0x33, sizeof(key));
+	memset(iv, 0x03, sizeof(iv));
+	memset(auth, 0xf1, sizeof(auth));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_GCM;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		my_perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		my_perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+//      printf("requested cipher CRYPTO_AES_CBC/HMAC-SHA1, got %s with driver %s\n",
+//                      siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext =
+	    (char *) (((unsigned long) plaintext_raw + siop.alignmask) &
+		      ~siop.alignmask);
+	ciphertext =
+	    (char *) (((unsigned long) ciphertext_raw + siop.alignmask) &
+		      ~siop.alignmask);
+
+	memset(plaintext, 0x15, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cao.ses = sess.ses;
+	cao.auth_src = auth;
+	cao.auth_len = sizeof(auth);
+	cao.len = DATA_SIZE;
+	cao.src = plaintext;
+	cao.dst = ciphertext;
+	cao.iv = iv;
+	cao.iv_len = 12;
+	cao.op = COP_ENCRYPT;
+	cao.flags = 0;
+
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		my_perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+	enc_len = cao.len;
+	//printf("Original plaintext size: %d, ciphertext: %d\n", DATA_SIZE, enc_len);
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		my_perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* Get crypto session for AES128 */
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_GCM;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		my_perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	/* Decrypt data.encrypted to data.decrypted */
+	cao.ses = sess.ses;
+	cao.auth_src = auth;
+	cao.auth_len = sizeof(auth);
+	cao.len = enc_len;
+	cao.src = ciphertext;
+	cao.dst = ciphertext;
+	cao.iv = iv;
+	cao.iv_len = 12;
+	cao.op = COP_DECRYPT;
+	cao.flags = 0;
+
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		my_perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+	if (cao.len != DATA_SIZE) {
+		fprintf(stderr, "decrypted data size incorrect!\n");
+		return 1;
+	}
+
+	/* Verify the result */
+	if (memcmp(plaintext, ciphertext, DATA_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", plaintext[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", ciphertext[i]);
+		}
+		printf("\n");
+		return 1;
+	}
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		my_perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	if (debug) {
+		fprintf(stdout, "ok\n");
+		fprintf(stdout, "\n");
+	}
+
+	return 0;
+}
+
+static int test_encrypt_decrypt_error(int cfd, int err)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+	char auth[AUTH_SIZE];
+	int enc_len;
+
+	struct session_op sess;
+	struct crypt_op co;
+	struct crypt_auth_op cao;
+	struct session_info_op siop;
+
+	if (debug) {
+		fprintf(stdout, "Tests on AES-GCM tag verification: ");
+		fflush(stdout);
+	}
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cao, 0, sizeof(cao));
+	memset(&co, 0, sizeof(co));
+
+	memset(key, 0x33, sizeof(key));
+	memset(iv, 0x03, sizeof(iv));
+	memset(auth, 0xf1, sizeof(auth));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = 16;
+	sess.mackey =
+	    (uint8_t *)
+	    "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		my_perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		my_perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+//      printf("requested cipher CRYPTO_AES_CBC/HMAC-SHA1, got %s with driver %s\n",
+//                      siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext =
+	    (char *) (((unsigned long) plaintext_raw + siop.alignmask) &
+		      ~siop.alignmask);
+	ciphertext =
+	    (char *) (((unsigned long) ciphertext_raw + siop.alignmask) &
+		      ~siop.alignmask);
+
+	memset(plaintext, 0x15, DATA_SIZE);
+	memcpy(ciphertext, plaintext, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cao.ses = sess.ses;
+	cao.auth_src = auth;
+	cao.auth_len = sizeof(auth);
+	cao.len = DATA_SIZE;
+	cao.src = ciphertext;
+	cao.dst = ciphertext;
+	cao.iv = iv;
+	cao.op = COP_ENCRYPT;
+	cao.flags = COP_FLAG_AEAD_TLS_TYPE;
+
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		my_perror("ioctl(CIOCAUTHCRYPT)");
+		return 1;
+	}
+
+	enc_len = cao.len;
+	//printf("Original plaintext size: %d, ciphertext: %d\n", DATA_SIZE, enc_len);
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		my_perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	/* Get crypto session for AES128 */
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.mackeylen = 16;
+	sess.mackey =
+	    (uint8_t *)
+	    "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		my_perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+	if (err == 0)
+		auth[2]++;
+	else
+		ciphertext[4]++;
+
+	/* Decrypt data.encrypted to data.decrypted */
+	cao.ses = sess.ses;
+	cao.auth_src = auth;
+	cao.auth_len = sizeof(auth);
+	cao.len = enc_len;
+	cao.src = ciphertext;
+	cao.dst = ciphertext;
+	cao.iv = iv;
+	cao.op = COP_DECRYPT;
+	cao.flags = COP_FLAG_AEAD_TLS_TYPE;
+	if (ioctl(cfd, CIOCAUTHCRYPT, &cao)) {
+		if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+			my_perror("ioctl(CIOCFSESSION)");
+			return 1;
+		}
+
+		if (debug) {
+			fprintf(stdout, "ok\n");
+			fprintf(stdout, "\n");
+		}
+		return 0;
+	}
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		my_perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+
+	fprintf(stderr, "Modification to ciphertext was not detected\n");
+	return 1;
+}
+
+int main(int argc, char** argv)
+{
+	int fd = -1, cfd = -1;
+
+	if (argc > 1) debug = 1;
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		my_perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	/* Clone file descriptor */
+	if (ioctl(fd, CRIOGET, &cfd)) {
+		my_perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	/* Set close-on-exec (not really neede here) */
+	if (fcntl(cfd, F_SETFD, 1) == -1) {
+		my_perror("fcntl(F_SETFD)");
+		return 1;
+	}
+
+	/* Run the test itself */
+
+	if (test_crypto(cfd))
+		return 1;
+
+	if (test_encrypt_decrypt(cfd))
+		return 1;
+
+	if (test_encrypt_decrypt_error(cfd, 0))
+		return 1;
+
+	if (test_encrypt_decrypt_error(cfd, 1))
+		return 1;
+
+	/* Close cloned descriptor */
+	if (close(cfd)) {
+		my_perror("close(cfd)");
+		return 1;
+	}
+
+	/* Close the original descriptor */
+	if (close(fd)) {
+		my_perror("close(fd)");
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/cipher.c b/crypto/cryptodev/tests/cipher.c
new file mode 100644
index 0000000..07144f2
--- /dev/null
+++ b/crypto/cryptodev/tests/cipher.c
@@ -0,0 +1,326 @@
+/*
+ * Demo on how to use /dev/crypto device for ciphering.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+static int debug = 0;
+
+#define	DATA_SIZE	8*1024
+#define	BLOCK_SIZE	16
+#define	KEY_SIZE	16
+
+static int
+test_crypto(int cfd)
+{
+	char plaintext_raw[DATA_SIZE + 63], *plaintext;
+	char ciphertext_raw[DATA_SIZE + 63], *ciphertext;
+	char iv[BLOCK_SIZE];
+	char key[KEY_SIZE];
+
+	struct session_op sess;
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop;
+#endif
+	struct crypt_op cryp;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cryp, 0, sizeof(cryp));
+
+	memset(key, 0x33,  sizeof(key));
+	memset(iv, 0x03,  sizeof(iv));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	if (debug)
+		printf("requested cipher CRYPTO_AES_CBC, got %s with driver %s\n",
+			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext = (char *)(((unsigned long)plaintext_raw + siop.alignmask) & ~siop.alignmask);
+	ciphertext = (char *)(((unsigned long)ciphertext_raw + siop.alignmask) & ~siop.alignmask);
+#else
+	plaintext = plaintext_raw;
+	ciphertext = ciphertext_raw;
+#endif
+	memset(plaintext, 0x15, DATA_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess.ses;
+	cryp.len = DATA_SIZE;
+	cryp.src = plaintext;
+	cryp.dst = ciphertext;
+	cryp.iv = iv;
+	cryp.op = COP_ENCRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	if (debug)
+		printf("requested cipher CRYPTO_AES_CBC, got %s with driver %s\n",
+			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+#endif
+
+	/* Decrypt data.encrypted to data.decrypted */
+	cryp.ses = sess.ses;
+	cryp.len = DATA_SIZE;
+	cryp.src = ciphertext;
+	cryp.dst = ciphertext;
+	cryp.iv = iv;
+	cryp.op = COP_DECRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	/* Verify the result */
+	if (memcmp(plaintext, ciphertext, DATA_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", plaintext[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < DATA_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", ciphertext[i]);
+		}
+		printf("\n");
+		return 1;
+	} else if (debug)
+		printf("Test passed\n");
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int test_aes(int cfd)
+{
+	char plaintext1_raw[BLOCK_SIZE + 63], *plaintext1;
+	char ciphertext1[BLOCK_SIZE] = { 0xdf, 0x55, 0x6a, 0x33, 0x43, 0x8d, 0xb8, 0x7b, 0xc4, 0x1b, 0x17, 0x52, 0xc5, 0x5e, 0x5e, 0x49 };
+	char iv1[BLOCK_SIZE];
+	char key1[KEY_SIZE] = { 0xff, 0xff, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+	char plaintext2_data[BLOCK_SIZE] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x00 };
+	char plaintext2_raw[BLOCK_SIZE + 63], *plaintext2;
+	char ciphertext2[BLOCK_SIZE] = { 0xb7, 0x97, 0x2b, 0x39, 0x41, 0xc4, 0x4b, 0x90, 0xaf, 0xa7, 0xb2, 0x64, 0xbf, 0xba, 0x73, 0x87 };
+	char iv2[BLOCK_SIZE];
+	char key2[KEY_SIZE];
+
+	struct session_op sess;
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop;
+#endif
+	struct crypt_op cryp;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cryp, 0, sizeof(cryp));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key1;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	plaintext1 = (char *)(((unsigned long)plaintext1_raw + siop.alignmask) & ~siop.alignmask);
+#else
+	plaintext1 = plaintext1_raw;
+#endif
+	memset(plaintext1, 0x0, BLOCK_SIZE);
+	memset(iv1, 0x0, sizeof(iv1));
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess.ses;
+	cryp.len = BLOCK_SIZE;
+	cryp.src = plaintext1;
+	cryp.dst = plaintext1;
+	cryp.iv = iv1;
+	cryp.op = COP_ENCRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	/* Verify the result */
+	if (memcmp(plaintext1, ciphertext1, BLOCK_SIZE) != 0) {
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		return 1;
+	}
+
+	/* Test 2 */
+
+	memset(key2, 0x0, sizeof(key2));
+	memset(iv2, 0x0, sizeof(iv2));
+
+	/* Get crypto session for AES128 */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key2;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	if (debug)
+		printf("requested cipher CRYPTO_AES_CBC, got %s with driver %s\n",
+			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name);
+
+	plaintext2 = (char *)(((unsigned long)plaintext2_raw + siop.alignmask) & ~siop.alignmask);
+#else
+	plaintext2 = plaintext2_raw;
+#endif
+	memcpy(plaintext2, plaintext2_data, BLOCK_SIZE);
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess.ses;
+	cryp.len = BLOCK_SIZE;
+	cryp.src = plaintext2;
+	cryp.dst = plaintext2;
+	cryp.iv = iv2;
+	cryp.op = COP_ENCRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	/* Verify the result */
+	if (memcmp(plaintext2, ciphertext2, BLOCK_SIZE) != 0) {
+		int i;
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		printf("plaintext:");
+		for (i = 0; i < BLOCK_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", plaintext2[i]);
+		}
+		printf("ciphertext:");
+		for (i = 0; i < BLOCK_SIZE; i++) {
+			if ((i % 30) == 0)
+				printf("\n");
+			printf("%02x ", ciphertext2[i]);
+		}
+		printf("\n");
+		return 1;
+	}
+
+	if (debug) printf("AES Test passed\n");
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+main(int argc, char** argv)
+{
+	int fd = -1, cfd = -1;
+
+	if (argc > 1) debug = 1;
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	/* Clone file descriptor */
+	if (ioctl(fd, CRIOGET, &cfd)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	/* Set close-on-exec (not really neede here) */
+	if (fcntl(cfd, F_SETFD, 1) == -1) {
+		perror("fcntl(F_SETFD)");
+		return 1;
+	}
+
+	/* Run the test itself */
+	if (test_aes(cfd))
+		return 1;
+
+	if (test_crypto(cfd))
+		return 1;
+
+	/* Close cloned descriptor */
+	if (close(cfd)) {
+		perror("close(cfd)");
+		return 1;
+	}
+
+	/* Close the original descriptor */
+	if (close(fd)) {
+		perror("close(fd)");
+		return 1;
+	}
+
+	return 0;
+}
+
diff --git a/crypto/cryptodev/tests/cipher_comp.c b/crypto/cryptodev/tests/cipher_comp.c
new file mode 100644
index 0000000..b2bc5af
--- /dev/null
+++ b/crypto/cryptodev/tests/cipher_comp.c
@@ -0,0 +1,159 @@
+/*
+ * Compare encryption results with ones from openssl.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdint.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+#include "openssl_wrapper.h"
+
+#define	BLOCK_SIZE	16
+#define	KEY_SIZE	16
+#define MAX_DATALEN	(64 * 1024)
+
+
+static int
+test_crypto(int cfd, struct session_op *sess, int datalen)
+{
+	char *data, *encrypted;
+	char *encrypted_comp;
+
+	char iv_in[BLOCK_SIZE];
+	char iv[BLOCK_SIZE];
+	char iv_comp[BLOCK_SIZE];
+
+	struct crypt_op cryp;
+
+	int ret = 0, fail = 0;
+
+	data = malloc(datalen);
+	encrypted = malloc(datalen);
+	encrypted_comp = malloc(datalen);
+	memset(data, datalen & 0xff, datalen);
+	memset(encrypted, 0x27, datalen);
+	memset(encrypted_comp, 0x41, datalen);
+
+	memset(iv_in, 0x23, sizeof(iv_in));
+	memcpy(iv, iv_in, sizeof(iv));
+	memcpy(iv_comp, iv_in, sizeof(iv_comp));
+
+	memset(&cryp, 0, sizeof(cryp));
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess->ses;
+	cryp.len = datalen;
+	cryp.src = data;
+	cryp.dst = encrypted;
+	cryp.iv = iv;
+	cryp.op = COP_ENCRYPT;
+	cryp.flags = COP_FLAG_WRITE_IV;
+	if ((ret = ioctl(cfd, CIOCCRYPT, &cryp))) {
+		perror("ioctl(CIOCCRYPT)");
+		goto out;
+	}
+
+	cryp.dst = encrypted_comp;
+	cryp.iv = iv_comp;
+
+	if ((ret = openssl_cioccrypt(sess, &cryp))) {
+		fprintf(stderr, "openssl_cioccrypt() failed!\n");
+		goto out;
+	}
+
+	if ((ret = memcmp(encrypted, encrypted_comp, cryp.len))) {
+		printf("fail for datalen %d, cipher texts do not match!\n", datalen);
+	}
+	if ((ret = memcmp(iv, iv_comp, BLOCK_SIZE))) {
+		printf("fail for datalen %d, IVs do not match!\n", datalen);
+	}
+out:
+	free(data);
+	free(encrypted);
+	free(encrypted_comp);
+	return ret;
+}
+
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+int
+main(int argc, char **argv)
+{
+	int fd;
+	struct session_op sess;
+	unsigned char key[KEY_SIZE];
+	int datalen = BLOCK_SIZE;
+	int datalen_end = MAX_DATALEN;
+	int i;
+
+	if (argc > 1) {
+		datalen = min(max(atoi(argv[1]), BLOCK_SIZE), MAX_DATALEN);
+		datalen_end = datalen;
+	}
+	if (argc > 2) {
+		datalen_end = min(atoi(argv[2]), MAX_DATALEN);
+		if (datalen_end < datalen)
+			datalen_end = datalen;
+	}
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	for (i = 0; i < KEY_SIZE; i++)
+		key[i] = i & 0xff;
+	memset(&sess, 0, sizeof(sess));
+
+	/* encryption test */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+	if (ioctl(fd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	{
+		struct session_info_op siop = {
+			.ses = sess.ses,
+		};
+
+		if (ioctl(fd, CIOCGSESSINFO, &siop)) {
+			perror("ioctl(CIOCGSESSINFO)");
+		} else {
+			printf("requested cipher CRYPTO_AES_CBC and mac CRYPTO_SHA1_HMAC,"
+			       " got cipher %s with driver %s and hash %s with driver %s\n",
+					siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name,
+					siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+		}
+	}
+#endif
+
+	for (; datalen <= datalen_end; datalen += BLOCK_SIZE) {
+		if (test_crypto(fd, &sess, datalen)) {
+			printf("test_crypto() failed for datalen of %d\n", datalen);
+			return 1;
+		}
+	}
+
+	/* Finish crypto session */
+	if (ioctl(fd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+	}
+
+	close(fd);
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/fullspeed.c b/crypto/cryptodev/tests/fullspeed.c
new file mode 100644
index 0000000..611859d
--- /dev/null
+++ b/crypto/cryptodev/tests/fullspeed.c
@@ -0,0 +1,184 @@
+/*  cryptodev_test - simple benchmark tool for cryptodev
+ *
+ *    Copyright (C) 2010 by Phil Sutter <phil.sutter@viprinet.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <crypto/cryptodev.h>
+
+static int si = 1; /* SI by default */
+
+static double udifftimeval(struct timeval start, struct timeval end)
+{
+	return (double)(end.tv_usec - start.tv_usec) +
+	       (double)(end.tv_sec - start.tv_sec) * 1000 * 1000;
+}
+
+static int must_finish = 0;
+
+static void alarm_handler(int signo)
+{
+        must_finish = 1;
+}
+
+static char *units[] = { "", "Ki", "Mi", "Gi", "Ti", 0};
+static char *si_units[] = { "", "K", "M", "G", "T", 0};
+
+static void value2human(int si, double bytes, double time, double* data, double* speed,char* metric)
+{
+	int unit = 0;
+
+	*data = bytes;
+	
+	if (si) {
+		while (*data > 1000 && si_units[unit + 1]) {
+			*data /= 1000;
+			unit++;
+		}
+		*speed = *data / time;
+		sprintf(metric, "%sB", si_units[unit]);
+	} else {
+		while (*data > 1024 && units[unit + 1]) {
+			*data /= 1024;
+			unit++;
+		}
+		*speed = *data / time;
+		sprintf(metric, "%sB", units[unit]);
+	}
+}
+
+#define MAX(x,y) ((x)>(y)?(x):(y))
+
+int encrypt_data(int algo, void* keybuf, int key_size, int fdc, int chunksize)
+{
+	struct crypt_op cop;
+	char *buffer, iv[32];
+	static int val = 23;
+	struct timeval start, end;
+	double total = 0;
+	double secs, ddata, dspeed;
+	char metric[16];
+	struct session_op sess;
+
+	if (posix_memalign((void **)&buffer, 16, chunksize)) {
+		printf("posix_memalign() failed! (mask %x, size: %d)\n", 16, chunksize);
+		return 1;
+	}
+
+	memset(iv, 0x23, 32);
+
+	printf("\tEncrypting in chunks of %d bytes: ", chunksize);
+	fflush(stdout);
+
+	memset(buffer, val++, chunksize);
+
+	must_finish = 0;
+	alarm(5);
+
+	gettimeofday(&start, NULL);
+	do {
+		memset(&sess, 0, sizeof(sess));
+		sess.cipher = algo;
+		sess.keylen = key_size;
+		sess.key = keybuf;
+		if (ioctl(fdc, CIOCGSESSION, &sess)) {
+			perror("ioctl(CIOCGSESSION)");
+			return 1;
+		}
+
+		memset(&cop, 0, sizeof(cop));
+		cop.ses = sess.ses;
+		cop.len = chunksize;
+		cop.iv = (unsigned char *)iv;
+		cop.op = COP_ENCRYPT;
+		cop.src = (unsigned char *)buffer;
+		cop.dst = buffer;
+
+		if (ioctl(fdc, CIOCCRYPT, &cop)) {
+			perror("ioctl(CIOCCRYPT)");
+			return 1;
+		}
+		
+		ioctl(fdc, CIOCFSESSION, &sess.ses);
+
+		total+=chunksize;
+	} while(must_finish==0);
+	gettimeofday(&end, NULL);
+
+	secs = udifftimeval(start, end)/ 1000000.0;
+
+	value2human(si, total, secs, &ddata, &dspeed, metric);
+	printf ("done. %.2f %s in %.2f secs: ", ddata, metric, secs);
+	printf ("%.2f %s/sec\n", dspeed, metric);
+
+	free(buffer);
+	return 0;
+}
+
+int main(int argc, char** argv)
+{
+	int fd, i, fdc = -1;
+	char keybuf[32];
+
+	signal(SIGALRM, alarm_handler);
+	
+	if (argc > 1) {
+		if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0) {
+			printf("Usage: speed [--kib]\n");
+			exit(0);
+		}
+		if (strcmp(argv[1], "--kib") == 0) {
+			si = 0;
+		}
+	}
+
+	if ((fd = open("/dev/crypto", O_RDWR, 0)) < 0) {
+		perror("open()");
+		return 1;
+	}
+	if (ioctl(fd, CRIOGET, &fdc)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	fprintf(stderr, "Testing NULL cipher: \n");
+
+	for (i = 512; i <= (64 * 1024); i *= 2) {
+		if (encrypt_data(CRYPTO_NULL, keybuf, 0, fdc, i))
+			break;
+	}
+
+	fprintf(stderr, "\nTesting AES-128-CBC cipher: \n");
+	memset(keybuf, 0x42, 16);
+
+	for (i = 512; i <= (64 * 1024); i *= 2) {
+		if (encrypt_data(CRYPTO_AES_CBC, keybuf, 16, fdc, i))
+			break;
+	}
+
+	close(fdc);
+	close(fd);
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/hash_comp.c b/crypto/cryptodev/tests/hash_comp.c
new file mode 100644
index 0000000..9e700a1
--- /dev/null
+++ b/crypto/cryptodev/tests/hash_comp.c
@@ -0,0 +1,150 @@
+/*
+ * Compare digest results with ones from openssl.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdint.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+#include "openssl_wrapper.h"
+
+#define	BLOCK_SIZE	16
+#define MAX_DATALEN	(64 * 1024)
+
+static void printhex(unsigned char *buf, int buflen)
+{
+	while (buflen-- > 0) {
+		printf("\\x%.2x", *(buf++));
+	}
+	printf("\n");
+}
+
+static int
+test_crypto(int cfd, struct session_op *sess, int datalen)
+{
+	unsigned char *data;
+
+	unsigned char mac[AALG_MAX_RESULT_LEN];
+
+	unsigned char mac_comp[AALG_MAX_RESULT_LEN];
+
+	struct crypt_op cryp;
+
+	int ret = 0, fail = 0;
+
+	data = malloc(datalen);
+	memset(data, datalen & 0xff, datalen);
+
+	memset(mac, 0, sizeof(mac));
+	memset(mac_comp, 0, sizeof(mac_comp));
+
+	memset(&cryp, 0, sizeof(cryp));
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess->ses;
+	cryp.len = datalen;
+	cryp.src = data;
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	if ((ret = ioctl(cfd, CIOCCRYPT, &cryp))) {
+		perror("ioctl(CIOCCRYPT)");
+		goto out;
+	}
+
+	cryp.mac = mac_comp;
+
+	if ((ret = openssl_cioccrypt(sess, &cryp))) {
+		fprintf(stderr, "openssl_cioccrypt() failed!\n");
+		goto out;
+	}
+
+	if (memcmp(mac, mac_comp, AALG_MAX_RESULT_LEN)) {
+		printf("fail for datalen %d, MACs do not match!\n", datalen);
+		fail = 1;
+		printf("wrong mac: ");
+		printhex(mac, 20);
+		printf("right mac: ");
+		printhex(mac_comp, 20);
+	}
+
+out:
+	free(data);
+	return ret;
+}
+
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+int
+main(int argc, char **argv)
+{
+	int fd;
+	struct session_op sess;
+	int datalen = BLOCK_SIZE;
+	int datalen_end = MAX_DATALEN;
+	int i;
+
+	if (argc > 1) {
+		datalen = min(max(atoi(argv[1]), BLOCK_SIZE), MAX_DATALEN);
+		datalen_end = datalen;
+	}
+	if (argc > 2) {
+		datalen_end = min(atoi(argv[2]), MAX_DATALEN);
+		if (datalen_end < datalen)
+			datalen_end = datalen;
+	}
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	memset(&sess, 0, sizeof(sess));
+
+	/* Hash test */
+	sess.mac = CRYPTO_SHA1;
+	if (ioctl(fd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	{
+		struct session_info_op siop = {
+			.ses = sess.ses,
+		};
+
+		if (ioctl(fd, CIOCGSESSINFO, &siop)) {
+			perror("ioctl(CIOCGSESSINFO)");
+		} else {
+			printf("requested mac CRYPTO_SHA1, got hash %s with driver %s\n",
+					siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+		}
+	}
+#endif
+
+	for (; datalen <= datalen_end; datalen += BLOCK_SIZE) {
+		if (test_crypto(fd, &sess, datalen)) {
+			printf("test_crypto() failed for datalen of %d\n", datalen);
+			return 1;
+		}
+	}
+
+	/* Finish crypto session */
+	if (ioctl(fd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+	}
+
+	close(fd);
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/hashcrypt_speed.c b/crypto/cryptodev/tests/hashcrypt_speed.c
new file mode 100644
index 0000000..e60b73d
--- /dev/null
+++ b/crypto/cryptodev/tests/hashcrypt_speed.c
@@ -0,0 +1,206 @@
+/*  hashcrypt_speed - simple SHA+AES benchmark tool for cryptodev
+ *
+ *    Copyright (C) 2011 by Phil Sutter <phil.sutter@viprinet.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <crypto/cryptodev.h>
+
+#define MAX(x,y) ((x)>(y)?(x):(y))
+
+static double udifftimeval(struct timeval start, struct timeval end)
+{
+	return (double)(end.tv_usec - start.tv_usec) +
+	       (double)(end.tv_sec - start.tv_sec) * 1000 * 1000;
+}
+
+static int must_finish = 0;
+
+static void alarm_handler(int signo)
+{
+        must_finish = 1;
+}
+
+static char *units[] = { "", "Ki", "Mi", "Gi", "Ti", 0};
+static char *si_units[] = { "", "K", "M", "G", "T", 0};
+
+static void value2human(int si, double bytes, double time, double* data, double* speed,char* metric)
+{
+	int unit = 0;
+
+	*data = bytes;
+
+	if (si) {
+		while (*data > 1000 && si_units[unit + 1]) {
+			*data /= 1000;
+			unit++;
+		}
+		*speed = *data / time;
+		sprintf(metric, "%sB", si_units[unit]);
+	} else {
+		while (*data > 1024 && units[unit + 1]) {
+			*data /= 1024;
+			unit++;
+		}
+		*speed = *data / time;
+		sprintf(metric, "%sB", units[unit]);
+	}
+}
+
+
+int hash_data(struct session_op *sess, int fdc, int chunksize, int align)
+{
+	struct crypt_op cop;
+	char *buffer;
+	static int val = 23;
+	struct timeval start, end;
+	double total = 0;
+	double secs, ddata, dspeed;
+	char metric[16];
+	uint8_t mac[AALG_MAX_RESULT_LEN];
+
+	if (align) {
+		if (posix_memalign((void **)&buffer, align, chunksize)) {
+			printf("posix_memalign() failed, align: %d, size: %d!\n", align, chunksize);
+			return 1;
+		}
+	} else {
+		if (!(buffer = malloc(chunksize))) {
+			perror("malloc()");
+			return 1;
+		}
+	}
+
+	printf("\tEncrypting in chunks of %d bytes: ", chunksize);
+	fflush(stdout);
+
+	memset(buffer, val++, chunksize);
+
+	must_finish = 0;
+	alarm(5);
+
+	gettimeofday(&start, NULL);
+	do {
+		memset(&cop, 0, sizeof(cop));
+		cop.ses = sess->ses;
+		cop.len = chunksize;
+		cop.op = COP_ENCRYPT;
+		cop.src = cop.dst = (unsigned char *)buffer;
+		cop.mac = mac;
+
+		if (ioctl(fdc, CIOCCRYPT, &cop)) {
+			perror("ioctl(CIOCCRYPT)");
+			return 1;
+		}
+		total+=chunksize;
+	} while(must_finish==0);
+	gettimeofday(&end, NULL);
+
+	secs = udifftimeval(start, end)/ 1000000.0;
+
+	value2human(1, total, secs, &ddata, &dspeed, metric);
+	printf ("done. %.2f %s in %.2f secs: ", ddata, metric, secs);
+	printf ("%.2f %s/sec\n", dspeed, metric);
+
+	free(buffer);
+	return 0;
+}
+
+int main(void)
+{
+	int fd, i, fdc = -1, align = 0;
+	struct session_op sess;
+	char keybuf[32];
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop;
+#endif
+
+	signal(SIGALRM, alarm_handler);
+
+	if ((fd = open("/dev/crypto", O_RDWR, 0)) < 0) {
+		perror("open()");
+		return 1;
+	}
+	if (ioctl(fd, CRIOGET, &fdc)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	fprintf(stderr, "Testing AES128 with SHA1 Hash: \n");
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = 16;
+	memset(keybuf, 0x42, 32);
+	sess.key = (unsigned char *)keybuf;
+	sess.mac = CRYPTO_SHA1;
+	if (ioctl(fdc, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(fdc, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	printf("requested hash CRYPTO_SHA1, got %s with driver %s\n",
+			siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+	align = MAX(sizeof(void*), siop.alignmask+1);
+#endif
+
+	for (i = 256; i <= (64 * 1024); i *= 4) {
+		if (hash_data(&sess, fdc, i, align))
+			break;
+	}
+
+	fprintf(stderr, "\nTesting AES256 with SHA256 Hash: \n");
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = 32;
+	sess.key = (unsigned char *)keybuf;
+	sess.mac = CRYPTO_SHA2_256;
+	if (ioctl(fdc, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(fdc, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	printf("requested hash CRYPTO_SHA2_256, got %s with driver %s\n",
+			siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+	align = MAX(sizeof(void*), siop.alignmask+1);
+#endif
+
+	for (i = 256; i <= (64 * 1024); i *= 4) {
+		if (hash_data(&sess, fdc, i, align))
+			break;
+	}
+
+	close(fdc);
+	close(fd);
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/hmac.c b/crypto/cryptodev/tests/hmac.c
new file mode 100644
index 0000000..1d9349e
--- /dev/null
+++ b/crypto/cryptodev/tests/hmac.c
@@ -0,0 +1,338 @@
+/*
+ * Demo on how to use /dev/crypto device for HMAC.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdint.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+static int debug = 0;
+
+#define	DATA_SIZE	4096
+#define	BLOCK_SIZE	16
+#define	KEY_SIZE	16
+#define SHA1_HASH_LEN   20
+
+static int
+test_crypto(int cfd)
+{
+	struct {
+		uint8_t	in[DATA_SIZE],
+			encrypted[DATA_SIZE],
+			decrypted[DATA_SIZE],
+			iv[BLOCK_SIZE],
+			key[KEY_SIZE];
+	} data;
+	struct session_op sess;
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop;
+#endif
+	struct crypt_op cryp;
+	uint8_t mac[AALG_MAX_RESULT_LEN];
+	uint8_t oldmac[AALG_MAX_RESULT_LEN];
+	uint8_t md5_hmac_out[] = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03\xea\xa8\x6e\x31\x0a\x5d\xb7\x38";
+	uint8_t sha1_out[] = "\x8f\x82\x03\x94\xf9\x53\x35\x18\x20\x45\xda\x24\xf3\x4d\xe5\x2b\xf8\xbc\x34\x32";
+	int i;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cryp, 0, sizeof(cryp));
+
+	/* Use the garbage that is on the stack :-) */
+	/* memset(&data, 0, sizeof(data)); */
+
+	/* SHA1 plain test */
+	memset(mac, 0, sizeof(mac));
+
+	sess.cipher = 0;
+	sess.mac = CRYPTO_SHA1;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	if (debug) printf("requested mac CRYPTO_SHA1, got %s with driver %s\n",
+			siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+#endif
+
+	cryp.ses = sess.ses;
+	cryp.len = sizeof("what do ya want for nothing?")-1;
+	cryp.src = "what do ya want for nothing?";
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	if (memcmp(mac, sha1_out, 20)!=0) {
+		printf("mac: ");
+		for (i=0;i<SHA1_HASH_LEN;i++) {
+			printf("%.2x", (uint8_t)mac[i]);
+		}
+		puts("\n");
+		fprintf(stderr, "HASH test 1: failed\n");
+	} else {
+		if (debug) fprintf(stderr, "HASH test 1: passed\n");
+	}
+
+	/* MD5-HMAC test */
+	memset(mac, 0, sizeof(mac));
+
+	sess.cipher = 0;
+	sess.mackey = (uint8_t*)"Jefe";
+	sess.mackeylen = 4;
+	sess.mac = CRYPTO_MD5_HMAC;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	if (debug)
+		printf("requested mac CRYPTO_MD5_HMAC, got %s with driver %s\n",
+			siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+#endif
+
+	cryp.ses = sess.ses;
+	cryp.len = sizeof("what do ya want for nothing?")-1;
+	cryp.src = "what do ya want for nothing?";
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	if (memcmp(mac, md5_hmac_out, 16)!=0) {
+		printf("mac: ");
+		for (i=0;i<SHA1_HASH_LEN;i++) {
+			printf("%.2x", (uint8_t)mac[i]);
+		}
+		puts("\n");
+		fprintf(stderr, "HMAC test 1: failed\n");
+	} else {
+		if (debug) fprintf(stderr, "HMAC test 1: passed\n");
+	}
+
+	/* Hash and encryption in one step test */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.keylen = KEY_SIZE;
+	sess.key = data.key;
+	sess.mackeylen = 16;
+	sess.mackey = (uint8_t*)"\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b";
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	if (debug)
+		printf("requested cipher CRYPTO_AES_CBC and mac CRYPTO_SHA1_HMAC,"
+	       " got cipher %s with driver %s and hash %s with driver %s\n",
+			siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name,
+			siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+#endif
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess.ses;
+	cryp.len = sizeof(data.in);
+	cryp.src = data.in;
+	cryp.dst = data.encrypted;
+	cryp.iv = data.iv;
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	memcpy(oldmac, mac, sizeof(mac));
+
+	/* Decrypt data.encrypted to data.decrypted */
+	cryp.src = data.encrypted;
+	cryp.dst = data.decrypted;
+	cryp.op = COP_DECRYPT;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	/* Verify the result */
+	if (memcmp(data.in, data.decrypted, sizeof(data.in)) != 0) {
+		fprintf(stderr,
+			"FAIL: Decrypted data are different from the input data.\n");
+		return 1;
+	} else if (debug) printf("Crypt Test: passed\n");
+
+	if (memcmp(mac, oldmac, 20) != 0) {
+		fprintf(stderr,
+			"FAIL: Hash in decrypted data different than in encrypted.\n");
+		return 1;
+	} else if (debug) printf("HMAC Test 2: passed\n");
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int
+test_extras(int cfd)
+{
+	struct session_op sess;
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop;
+#endif
+	struct crypt_op cryp;
+	uint8_t mac[AALG_MAX_RESULT_LEN];
+	uint8_t oldmac[AALG_MAX_RESULT_LEN];
+	uint8_t md5_hmac_out[] = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03\xea\xa8\x6e\x31\x0a\x5d\xb7\x38";
+	uint8_t sha1_out[] = "\x8f\x82\x03\x94\xf9\x53\x35\x18\x20\x45\xda\x24\xf3\x4d\xe5\x2b\xf8\xbc\x34\x32";
+	int i;
+
+	memset(&sess, 0, sizeof(sess));
+	memset(&cryp, 0, sizeof(cryp));
+
+	/* Use the garbage that is on the stack :-) */
+	/* memset(&data, 0, sizeof(data)); */
+
+	/* SHA1 plain test */
+	memset(mac, 0, sizeof(mac));
+
+	sess.cipher = 0;
+	sess.mac = CRYPTO_SHA1;
+	if (ioctl(cfd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(cfd, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	if (debug)
+		printf("requested mac CRYPTO_SHA1, got %s with driver %s\n",
+			siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+#endif
+
+	cryp.ses = sess.ses;
+	cryp.len = sizeof("what do")-1;
+	cryp.src = "what do";
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	cryp.flags = COP_FLAG_UPDATE;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	cryp.ses = sess.ses;
+	cryp.len = sizeof(" ya want for nothing?")-1;
+	cryp.src = " ya want for nothing?";
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	cryp.flags = COP_FLAG_FINAL;
+	if (ioctl(cfd, CIOCCRYPT, &cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+
+	if (memcmp(mac, sha1_out, 20)!=0) {
+		printf("mac: ");
+		for (i=0;i<SHA1_HASH_LEN;i++) {
+			printf("%.2x", (uint8_t)mac[i]);
+		}
+		puts("\n");
+		fprintf(stderr, "HASH test [update]: failed\n");
+	} else {
+		if (debug) fprintf(stderr, "HASH test [update]: passed\n");
+	}
+
+	memset(mac, 0, sizeof(mac));
+
+	/* Finish crypto session */
+	if (ioctl(cfd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+		return 1;
+	}
+
+	return 0;
+}
+
+
+int
+main(int argc, char** argv)
+{
+	int fd = -1, cfd = -1;
+	
+	if (argc > 1) debug = 1;
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	/* Clone file descriptor */
+	if (ioctl(fd, CRIOGET, &cfd)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	/* Set close-on-exec (not really neede here) */
+	if (fcntl(cfd, F_SETFD, 1) == -1) {
+		perror("fcntl(F_SETFD)");
+		return 1;
+	}
+
+	/* Run the test itself */
+	if (test_crypto(cfd))
+		return 1;
+
+	if (test_extras(cfd))
+		return 1;
+
+	/* Close cloned descriptor */
+	if (close(cfd)) {
+		perror("close(cfd)");
+		return 1;
+	}
+
+	/* Close the original descriptor */
+	if (close(fd)) {
+		perror("close(fd)");
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/hmac_comp.c b/crypto/cryptodev/tests/hmac_comp.c
new file mode 100644
index 0000000..451bedb
--- /dev/null
+++ b/crypto/cryptodev/tests/hmac_comp.c
@@ -0,0 +1,180 @@
+/*
+ * Compare HMAC results with ones from openssl.
+ *
+ * Placed under public domain.
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdint.h>
+
+#include <sys/ioctl.h>
+#include <crypto/cryptodev.h>
+
+#include "openssl_wrapper.h"
+
+#define	BLOCK_SIZE	16
+#define	KEY_SIZE	16
+#define MACKEY_SIZE	20
+#define MAX_DATALEN	(64 * 1024)
+
+static void printhex(unsigned char *buf, int buflen)
+{
+	while (buflen-- > 0) {
+		printf("\\x%.2x", *(buf++));
+	}
+	printf("\n");
+}
+
+static int
+test_crypto(int cfd, struct session_op *sess, int datalen)
+{
+	unsigned char *data, *encrypted;
+	unsigned char *encrypted_comp;
+
+	unsigned char iv[BLOCK_SIZE];
+	unsigned char mac[AALG_MAX_RESULT_LEN];
+
+	unsigned char mac_comp[AALG_MAX_RESULT_LEN];
+
+	struct crypt_op cryp;
+
+	int ret = 0;
+
+	data = malloc(datalen);
+	encrypted = malloc(datalen);
+	encrypted_comp = malloc(datalen);
+	memset(data, datalen & 0xff, datalen);
+	memset(encrypted, 0x27, datalen);
+	memset(encrypted_comp, 0x27, datalen);
+
+	memset(iv, 0x23, sizeof(iv));
+	memset(mac, 0, sizeof(mac));
+	memset(mac_comp, 0, sizeof(mac_comp));
+
+	memset(&cryp, 0, sizeof(cryp));
+
+	/* Encrypt data.in to data.encrypted */
+	cryp.ses = sess->ses;
+	cryp.len = datalen;
+	cryp.src = data;
+	cryp.dst = encrypted;
+	cryp.iv = iv;
+	cryp.mac = mac;
+	cryp.op = COP_ENCRYPT;
+	if ((ret = ioctl(cfd, CIOCCRYPT, &cryp))) {
+		perror("ioctl(CIOCCRYPT)");
+		goto out;
+	}
+
+	cryp.dst = encrypted_comp;
+	cryp.mac = mac_comp;
+
+	if ((ret = openssl_cioccrypt(sess, &cryp))) {
+		fprintf(stderr, "openssl_cioccrypt() failed!\n");
+		goto out;
+	}
+
+	if ((ret = memcmp(encrypted, encrypted_comp, cryp.len))) {
+		printf("fail for datalen %d, cipher texts do not match!\n", datalen);
+	}
+	if ((ret = memcmp(mac, mac_comp, AALG_MAX_RESULT_LEN))) {
+		printf("fail for datalen 0x%x, MACs do not match!\n", datalen);
+		printf("wrong mac: ");
+		printhex(mac, 20);
+		printf("right mac: ");
+		printhex(mac_comp, 20);
+
+	}
+
+out:
+	free(data);
+	free(encrypted);
+	free(encrypted_comp);
+	return ret;
+}
+
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+int
+main(int argc, char **argv)
+{
+	int fd;
+	struct session_op sess;
+	unsigned char key[KEY_SIZE], mackey[MACKEY_SIZE];
+	int datalen = BLOCK_SIZE;
+	int datalen_end = MAX_DATALEN;
+	int i;
+
+	if (argc > 1) {
+		datalen = min(max(atoi(argv[1]), BLOCK_SIZE), MAX_DATALEN);
+		datalen_end = datalen;
+	}
+	if (argc > 2) {
+		datalen_end = min(atoi(argv[2]), MAX_DATALEN);
+		if (datalen_end < datalen)
+			datalen_end = datalen;
+	}
+
+	/* Open the crypto device */
+	fd = open("/dev/crypto", O_RDWR, 0);
+	if (fd < 0) {
+		perror("open(/dev/crypto)");
+		return 1;
+	}
+
+	for (i = 0; i < KEY_SIZE; i++)
+		key[i] = i & 0xff;
+	for (i = 0; i < MACKEY_SIZE; i++)
+		mackey[i] = i & 0xff;
+
+	memset(&sess, 0, sizeof(sess));
+
+	/* Hash and encryption in one step test */
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.mac = CRYPTO_SHA1_HMAC;
+	sess.keylen = KEY_SIZE;
+	sess.key = key;
+	sess.mackeylen = MACKEY_SIZE;
+	sess.mackey = mackey;
+	if (ioctl(fd, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+
+#ifdef CIOCGSESSINFO
+	{
+		struct session_info_op siop = {
+			.ses = sess.ses,
+		};
+
+		if (ioctl(fd, CIOCGSESSINFO, &siop)) {
+			perror("ioctl(CIOCGSESSINFO)");
+		} else {
+			printf("requested cipher CRYPTO_AES_CBC and mac CRYPTO_SHA1_HMAC,"
+			       " got cipher %s with driver %s and hash %s with driver %s\n",
+					siop.cipher_info.cra_name, siop.cipher_info.cra_driver_name,
+					siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+		}
+	}
+#endif
+
+	for (; datalen <= datalen_end; datalen += BLOCK_SIZE) {
+		if (test_crypto(fd, &sess, datalen)) {
+			printf("test_crypto() failed for datalen of %d\n", datalen);
+			return 1;
+		}
+	}
+
+	/* Finish crypto session */
+	if (ioctl(fd, CIOCFSESSION, &sess.ses)) {
+		perror("ioctl(CIOCFSESSION)");
+	}
+
+	close(fd);
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/openssl_wrapper.c b/crypto/cryptodev/tests/openssl_wrapper.c
new file mode 100644
index 0000000..038c58f
--- /dev/null
+++ b/crypto/cryptodev/tests/openssl_wrapper.c
@@ -0,0 +1,267 @@
+#include <crypto/cryptodev.h>
+#include <stdio.h>
+#include <string.h>
+#include <openssl/aes.h>
+#include <openssl/evp.h>
+#include <openssl/hmac.h>
+
+//#define DEBUG
+
+#ifdef DEBUG
+#  define dbgp(...) { \
+	fprintf(stderr, "%s:%d: ", __FILE__, __LINE__); \
+	fprintf(stderr, __VA_ARGS__); \
+	fprintf(stderr, "\n"); \
+}
+#else
+#  define dbgp(...) /* nothing */
+#endif
+
+enum ctx_type {
+	ctx_type_none = 0,
+	ctx_type_hmac,
+	ctx_type_md,
+};
+
+union openssl_ctx {
+	HMAC_CTX hmac;
+	EVP_MD_CTX md;
+};
+
+struct ctx_mapping {
+	__u32 ses;
+	enum ctx_type type;
+	union openssl_ctx ctx;
+};
+
+static struct ctx_mapping ctx_map[512];
+
+static struct ctx_mapping *find_mapping(__u32 ses)
+{
+	int i;
+
+	for (i = 0; i < 512; i++) {
+		if (ctx_map[i].ses == ses)
+			return &ctx_map[i];
+	}
+	return NULL;
+}
+
+static struct ctx_mapping *new_mapping(void)
+{
+	return find_mapping(0);
+}
+
+static void remove_mapping(__u32 ses)
+{
+	struct ctx_mapping *mapping;
+
+	if (!(mapping = find_mapping(ses))) {
+		printf("%s: failed to find mapping for session %d\n", __func__, ses);
+		return;
+	}
+	switch (mapping->type) {
+	case ctx_type_none:
+		break;
+	case ctx_type_hmac:
+		dbgp("%s: calling HMAC_CTX_cleanup\n", __func__);
+		HMAC_CTX_cleanup(&mapping->ctx.hmac);
+		break;
+	case ctx_type_md:
+		dbgp("%s: calling EVP_MD_CTX_cleanup\n", __func__);
+		EVP_MD_CTX_cleanup(&mapping->ctx.md);
+		break;
+	}
+	memset(mapping, 0, sizeof(*mapping));
+}
+
+static union openssl_ctx *__ses_to_ctx(__u32 ses)
+{
+	struct ctx_mapping *mapping;
+
+	if (!(mapping = find_mapping(ses)))
+		return NULL;
+	return &mapping->ctx;
+}
+
+static HMAC_CTX *ses_to_hmac(__u32 ses) { return (HMAC_CTX *)__ses_to_ctx(ses); }
+static EVP_MD_CTX *ses_to_md(__u32 ses) { return (EVP_MD_CTX *)__ses_to_ctx(ses); }
+
+static const EVP_MD *sess_to_evp_md(struct session_op *sess)
+{
+	switch (sess->mac) {
+#ifndef OPENSSL_NO_MD5
+	case CRYPTO_MD5_HMAC: return EVP_md5();
+#endif
+#ifndef OPENSSL_NO_SHA
+	case CRYPTO_SHA1_HMAC:
+	case CRYPTO_SHA1:
+		return EVP_sha1();
+#endif
+#ifndef OPENSSL_NO_RIPEMD
+	case CRYPTO_RIPEMD160_HMAC: return EVP_ripemd160();
+#endif
+#ifndef OPENSSL_NO_SHA256
+	case CRYPTO_SHA2_256_HMAC: return EVP_sha256();
+#endif
+#ifndef OPENSSL_NO_SHA512
+	case CRYPTO_SHA2_384_HMAC: return EVP_sha384();
+	case CRYPTO_SHA2_512_HMAC: return EVP_sha512();
+#endif
+	default:
+		printf("%s: failed to get an EVP, things will be broken!\n", __func__);
+		return NULL;
+	}
+}
+
+static int openssl_hmac(struct session_op *sess, struct crypt_op *cop)
+{
+	HMAC_CTX *ctx = ses_to_hmac(sess->ses);
+
+	if (!ctx) {
+		struct ctx_mapping *mapping = new_mapping();
+		if (!mapping) {
+			printf("%s: failed to get new mapping\n", __func__);
+			return 1;
+		}
+
+		mapping->ses = sess->ses;
+		mapping->type = ctx_type_hmac;
+		ctx = &mapping->ctx.hmac;
+
+		dbgp("calling HMAC_CTX_init");
+		HMAC_CTX_init(ctx);
+		dbgp("calling HMAC_Init_ex");
+		if (!HMAC_Init_ex(ctx, sess->mackey, sess->mackeylen,
+				sess_to_evp_md(sess), NULL)) {
+			printf("%s: HMAC_Init_ex failed\n", __func__);
+			return 1;
+		}
+	}
+
+	if (cop->len) {
+		dbgp("calling HMAC_Update");
+		if (!HMAC_Update(ctx, cop->src, cop->len)) {
+			printf("%s: HMAC_Update failed\n", __func__);
+			return 1;
+		}
+	}
+	if (cop->flags & COP_FLAG_FINAL ||
+	    (cop->len && !(cop->flags & COP_FLAG_UPDATE))) {
+		dbgp("calling HMAC_Final");
+		if (!HMAC_Final(ctx, cop->mac, 0)) {
+			printf("%s: HMAC_Final failed\n", __func__);
+			remove_mapping(sess->ses);
+			return 1;
+		}
+		remove_mapping(sess->ses);
+	}
+	return 0;
+}
+
+static int openssl_md(struct session_op *sess, struct crypt_op *cop)
+{
+	EVP_MD_CTX *ctx = ses_to_md(sess->ses);
+
+	if (!ctx) {
+		struct ctx_mapping *mapping = new_mapping();
+		if (!mapping) {
+			printf("%s: failed to get new mapping\n", __func__);
+			return 1;
+		}
+
+		mapping->ses = sess->ses;
+		mapping->type = ctx_type_md;
+		ctx = &mapping->ctx.md;
+
+		dbgp("calling EVP_MD_CTX_init");
+		EVP_MD_CTX_init(ctx);
+		dbgp("calling EVP_DigestInit");
+		EVP_DigestInit(ctx, sess_to_evp_md(sess));
+	}
+
+	if (cop->len) {
+		dbgp("calling EVP_DigestUpdate");
+		EVP_DigestUpdate(ctx, cop->src, cop->len);
+	}
+	if (cop->flags & COP_FLAG_FINAL ||
+	    (cop->len && !(cop->flags & COP_FLAG_UPDATE))) {
+		dbgp("calling EVP_DigestFinal");
+		EVP_DigestFinal(ctx, cop->mac, 0);
+		remove_mapping(sess->ses);
+	}
+
+	return 0;
+}
+
+static int openssl_aes(struct session_op *sess, struct crypt_op *cop)
+{
+	AES_KEY key;
+	int i, enc;
+	unsigned char ivec[AES_BLOCK_SIZE];
+
+	if (cop->len % AES_BLOCK_SIZE) {
+		printf("%s: illegal length passed, "
+		       "not a multiple of AES_BLOCK_SIZE\n", __func__);
+		return 1;
+	}
+
+	switch (cop->op) {
+	case COP_ENCRYPT:
+		AES_set_encrypt_key(sess->key, sess->keylen * 8, &key);
+		enc = 1;
+		break;
+	case COP_DECRYPT:
+		AES_set_decrypt_key(sess->key, sess->keylen * 8, &key);
+		enc = 0;
+		break;
+	default:
+		printf("%s: unknown cop->op received!\n", __func__);
+		return 1;
+	}
+
+	switch (sess->cipher) {
+	case CRYPTO_AES_CBC:
+		memcpy(ivec, cop->iv, AES_BLOCK_SIZE);
+		AES_cbc_encrypt(cop->src, cop->dst, cop->len, &key, ivec, enc);
+		if (cop->flags & COP_FLAG_WRITE_IV)
+			memcpy(cop->iv, ivec, AES_BLOCK_SIZE);
+		break;
+#if 0
+	/* XXX: TODO: implement this stuff */
+	case CRYPTO_AES_CTR:
+		AES_ctr128_encrypt(cop->src, cop->dst, &key, cop->iv,
+	case CRYPTO_AES_XTS:
+#endif
+	case CRYPTO_AES_ECB:
+		for (i = 0; i < cop->len; i += AES_BLOCK_SIZE)
+			AES_ecb_encrypt(cop->src + i, cop->dst + i, &key, enc);
+		break;
+	}
+	return 0;
+}
+
+int openssl_cioccrypt(struct session_op *sess, struct crypt_op *cop)
+{
+	if (sess->mac && sess->mackey && sess->mackeylen)
+		openssl_hmac(sess, cop);
+	else if (sess->mac)
+		openssl_md(sess, cop);
+
+	switch (sess->cipher) {
+	case CRYPTO_AES_CBC:
+	case CRYPTO_AES_CTR:
+	case CRYPTO_AES_XTS:
+	case CRYPTO_AES_ECB:
+		openssl_aes(sess, cop);
+		break;
+	case 0:
+		/* no encryption wanted, everythings fine */
+		break;
+	default:
+		printf("%s: unknown cipher passed!\n", __func__);
+		break;
+	}
+
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/openssl_wrapper.h b/crypto/cryptodev/tests/openssl_wrapper.h
new file mode 100644
index 0000000..5f1f516
--- /dev/null
+++ b/crypto/cryptodev/tests/openssl_wrapper.h
@@ -0,0 +1,6 @@
+#ifndef __OPENSSL_WRAPPER_H
+#define __OPENSSL_WRAPPER_H
+
+int openssl_cioccrypt(struct session_op *, struct crypt_op *);
+
+#endif /* __OPENSSL_WRAPPER_H */
diff --git a/crypto/cryptodev/tests/sha_speed.c b/crypto/cryptodev/tests/sha_speed.c
new file mode 100644
index 0000000..e1dc54b
--- /dev/null
+++ b/crypto/cryptodev/tests/sha_speed.c
@@ -0,0 +1,198 @@
+/*  sha_speed - simple SHA benchmark tool for cryptodev
+ *
+ *    Copyright (C) 2011 by Phil Sutter <phil.sutter@viprinet.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <signal.h>
+
+#include <crypto/cryptodev.h>
+
+static double udifftimeval(struct timeval start, struct timeval end)
+{
+	return (double)(end.tv_usec - start.tv_usec) +
+	       (double)(end.tv_sec - start.tv_sec) * 1000 * 1000;
+}
+
+static int must_finish = 0;
+
+static void alarm_handler(int signo)
+{
+        must_finish = 1;
+}
+
+static char *units[] = { "", "Ki", "Mi", "Gi", "Ti", 0};
+static char *si_units[] = { "", "K", "M", "G", "T", 0};
+
+static void value2human(int si, double bytes, double time, double* data, double* speed,char* metric)
+{
+	int unit = 0;
+
+	*data = bytes;
+
+	if (si) {
+		while (*data > 1000 && si_units[unit + 1]) {
+			*data /= 1000;
+			unit++;
+		}
+		*speed = *data / time;
+		sprintf(metric, "%sB", si_units[unit]);
+	} else {
+		while (*data > 1024 && units[unit + 1]) {
+			*data /= 1024;
+			unit++;
+		}
+		*speed = *data / time;
+		sprintf(metric, "%sB", units[unit]);
+	}
+}
+
+
+int hash_data(struct session_op *sess, int fdc, int chunksize, int alignmask)
+{
+	struct crypt_op cop;
+	char *buffer;
+	static int val = 23;
+	struct timeval start, end;
+	double total = 0;
+	double secs, ddata, dspeed;
+	char metric[16];
+	uint8_t mac[AALG_MAX_RESULT_LEN];
+
+	if (alignmask) {
+		if (posix_memalign((void **)&buffer, alignmask + 1, chunksize)) {
+			printf("posix_memalign() failed!\n");
+			return 1;
+		}
+	} else {
+		if (!(buffer = malloc(chunksize))) {
+			perror("malloc()");
+			return 1;
+		}
+	}
+
+	printf("\tEncrypting in chunks of %d bytes: ", chunksize);
+	fflush(stdout);
+
+	memset(buffer, val++, chunksize);
+
+	must_finish = 0;
+	alarm(5);
+
+	gettimeofday(&start, NULL);
+	do {
+		memset(&cop, 0, sizeof(cop));
+		cop.ses = sess->ses;
+		cop.len = chunksize;
+		cop.op = COP_ENCRYPT;
+		cop.src = (unsigned char *)buffer;
+		cop.mac = mac;
+
+		if (ioctl(fdc, CIOCCRYPT, &cop)) {
+			perror("ioctl(CIOCCRYPT)");
+			return 1;
+		}
+		total+=chunksize;
+	} while(must_finish==0);
+	gettimeofday(&end, NULL);
+
+	secs = udifftimeval(start, end)/ 1000000.0;
+
+	value2human(1, total, secs, &ddata, &dspeed, metric);
+	printf ("done. %.2f %s in %.2f secs: ", ddata, metric, secs);
+	printf ("%.2f %s/sec\n", dspeed, metric);
+
+	free(buffer);
+	return 0;
+}
+
+int main(void)
+{
+	int fd, i, fdc = -1, alignmask = 0;
+	struct session_op sess;
+	char keybuf[32];
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop;
+#endif
+
+	signal(SIGALRM, alarm_handler);
+
+	if ((fd = open("/dev/crypto", O_RDWR, 0)) < 0) {
+		perror("open()");
+		return 1;
+	}
+	if (ioctl(fd, CRIOGET, &fdc)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	fprintf(stderr, "Testing SHA1 Hash: \n");
+	memset(&sess, 0, sizeof(sess));
+	sess.mac = CRYPTO_SHA1;
+	if (ioctl(fdc, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(fdc, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	printf("requested hash CRYPTO_SHA1, got %s with driver %s\n",
+			siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+	alignmask = siop.alignmask;
+#endif
+
+	for (i = 256; i <= (64 * 1024); i *= 4) {
+		if (hash_data(&sess, fdc, i, alignmask))
+			break;
+	}
+
+	fprintf(stderr, "\nTesting SHA256 Hash: \n");
+	memset(&sess, 0, sizeof(sess));
+	sess.mac = CRYPTO_SHA2_256;
+	if (ioctl(fdc, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(fdc, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	printf("requested hash CRYPTO_SHA2_256, got %s with driver %s\n",
+			siop.hash_info.cra_name, siop.hash_info.cra_driver_name);
+	alignmask = siop.alignmask;
+#endif
+
+	for (i = 256; i <= (64 * 1024); i *= 4) {
+		if (hash_data(&sess, fdc, i, alignmask))
+			break;
+	}
+
+	close(fdc);
+	close(fd);
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/speed.c b/crypto/cryptodev/tests/speed.c
new file mode 100644
index 0000000..81c5a65
--- /dev/null
+++ b/crypto/cryptodev/tests/speed.c
@@ -0,0 +1,212 @@
+/*  cryptodev_test - simple benchmark tool for cryptodev
+ *
+ *    Copyright (C) 2010 by Phil Sutter <phil.sutter@viprinet.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <signal.h>
+
+#include <crypto/cryptodev.h>
+
+static int si = 1; /* SI by default */
+
+static double udifftimeval(struct timeval start, struct timeval end)
+{
+	return (double)(end.tv_usec - start.tv_usec) +
+	       (double)(end.tv_sec - start.tv_sec) * 1000 * 1000;
+}
+
+static int must_finish = 0;
+
+static void alarm_handler(int signo)
+{
+        must_finish = 1;
+}
+
+static char *units[] = { "", "Ki", "Mi", "Gi", "Ti", 0};
+static char *si_units[] = { "", "K", "M", "G", "T", 0};
+
+static void value2human(int si, double bytes, double time, double* data, double* speed,char* metric)
+{
+	int unit = 0;
+
+	*data = bytes;
+	
+	if (si) {
+		while (*data > 1000 && si_units[unit + 1]) {
+			*data /= 1000;
+			unit++;
+		}
+		*speed = *data / time;
+		sprintf(metric, "%sB", si_units[unit]);
+	} else {
+		while (*data > 1024 && units[unit + 1]) {
+			*data /= 1024;
+			unit++;
+		}
+		*speed = *data / time;
+		sprintf(metric, "%sB", units[unit]);
+	}
+}
+
+#define MAX(x,y) ((x)>(y)?(x):(y))
+
+int encrypt_data(struct session_op *sess, int fdc, int chunksize, int alignmask)
+{
+	struct crypt_op cop;
+	char *buffer, iv[32];
+	static int val = 23;
+	struct timeval start, end;
+	double total = 0;
+	double secs, ddata, dspeed;
+	char metric[16];
+
+	if (alignmask) {
+		if (posix_memalign((void **)&buffer, MAX(alignmask + 1, sizeof(void*)), chunksize)) {
+			printf("posix_memalign() failed! (mask %x, size: %d)\n", alignmask+1, chunksize);
+			return 1;
+		}
+	} else {
+		if (!(buffer = malloc(chunksize))) {
+			perror("malloc()");
+			return 1;
+		}
+	}
+
+	memset(iv, 0x23, 32);
+
+	printf("\tEncrypting in chunks of %d bytes: ", chunksize);
+	fflush(stdout);
+
+	memset(buffer, val++, chunksize);
+
+	must_finish = 0;
+	alarm(5);
+
+	gettimeofday(&start, NULL);
+	do {
+		memset(&cop, 0, sizeof(cop));
+		cop.ses = sess->ses;
+		cop.len = chunksize;
+		cop.iv = (unsigned char *)iv;
+		cop.op = COP_ENCRYPT;
+		cop.src = cop.dst = (unsigned char *)buffer;
+
+		if (ioctl(fdc, CIOCCRYPT, &cop)) {
+			perror("ioctl(CIOCCRYPT)");
+			return 1;
+		}
+		total+=chunksize;
+	} while(must_finish==0);
+	gettimeofday(&end, NULL);
+
+	secs = udifftimeval(start, end)/ 1000000.0;
+
+	value2human(si, total, secs, &ddata, &dspeed, metric);
+	printf ("done. %.2f %s in %.2f secs: ", ddata, metric, secs);
+	printf ("%.2f %s/sec\n", dspeed, metric);
+
+	free(buffer);
+	return 0;
+}
+
+int main(int argc, char** argv)
+{
+	int fd, i, fdc = -1, alignmask = 0;
+	struct session_op sess;
+#ifdef CIOCGSESSINFO
+	struct session_info_op siop;
+#endif
+	char keybuf[32];
+
+	signal(SIGALRM, alarm_handler);
+	
+	if (argc > 1) {
+		if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0) {
+			printf("Usage: speed [--kib]\n");
+			exit(0);
+		}
+		if (strcmp(argv[1], "--kib") == 0) {
+			si = 0;
+		}
+	}
+
+	if ((fd = open("/dev/crypto", O_RDWR, 0)) < 0) {
+		perror("open()");
+		return 1;
+	}
+	if (ioctl(fd, CRIOGET, &fdc)) {
+		perror("ioctl(CRIOGET)");
+		return 1;
+	}
+
+	fprintf(stderr, "Testing NULL cipher: \n");
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_NULL;
+	sess.keylen = 0;
+	sess.key = (unsigned char *)keybuf;
+	if (ioctl(fdc, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(fdc, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	alignmask = siop.alignmask;
+#endif
+
+	for (i = 512; i <= (64 * 1024); i *= 2) {
+		if (encrypt_data(&sess, fdc, i, alignmask))
+			break;
+	}
+
+	fprintf(stderr, "\nTesting AES-128-CBC cipher: \n");
+	memset(&sess, 0, sizeof(sess));
+	sess.cipher = CRYPTO_AES_CBC;
+	sess.keylen = 16;
+	memset(keybuf, 0x42, 16);
+	sess.key = (unsigned char *)keybuf;
+	if (ioctl(fdc, CIOCGSESSION, &sess)) {
+		perror("ioctl(CIOCGSESSION)");
+		return 1;
+	}
+#ifdef CIOCGSESSINFO
+	siop.ses = sess.ses;
+	if (ioctl(fdc, CIOCGSESSINFO, &siop)) {
+		perror("ioctl(CIOCGSESSINFO)");
+		return 1;
+	}
+	alignmask = siop.alignmask;
+#endif
+
+	for (i = 512; i <= (64 * 1024); i *= 2) {
+		if (encrypt_data(&sess, fdc, i, alignmask))
+			break;
+	}
+
+	close(fdc);
+	close(fd);
+	return 0;
+}
diff --git a/crypto/cryptodev/tests/testhelper.h b/crypto/cryptodev/tests/testhelper.h
new file mode 100644
index 0000000..ea0b100
--- /dev/null
+++ b/crypto/cryptodev/tests/testhelper.h
@@ -0,0 +1,57 @@
+/*
+ * Some helper stuff shared between the sample programs.
+ */
+#ifndef _TESTHELPER_H
+#define _TESTHELPER_H
+
+/* poll until POLLOUT, then call CIOCASYNCCRYPT */
+inline int do_async_crypt(int cfd, struct crypt_op *cryp)
+{
+	struct pollfd pfd;
+
+	pfd.fd = cfd;
+	pfd.events = POLLOUT;
+
+	if (poll(&pfd, 1, -1) < 1) {
+		perror("poll()");
+		return 1;
+	}
+
+	if (ioctl(cfd, CIOCASYNCCRYPT, cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+	return 0;
+}
+
+/* poll until POLLIN, then call CIOCASYNCFETCH */
+inline int do_async_fetch(int cfd, struct crypt_op *cryp)
+{
+	struct pollfd pfd;
+
+	pfd.fd = cfd;
+	pfd.events = POLLIN;
+
+	if (poll(&pfd, 1, -1) < 1) {
+		perror("poll()");
+		return 1;
+	}
+
+	if (ioctl(cfd, CIOCASYNCFETCH, cryp)) {
+		perror("ioctl(CIOCCRYPT)");
+		return 1;
+	}
+	return 0;
+}
+
+/* Check return value of stmt for identity with goodval. If they
+ * don't match, call return with the value of stmt. */
+#define DO_OR_DIE(stmt, goodval) {                           \
+	int __rc_val;                                        \
+	if ((__rc_val = stmt) != goodval) {                  \
+		perror("DO_OR_DIE(" #stmt "," #goodval ")"); \
+		return __rc_val;                             \
+	}                                                    \
+}
+
+#endif /* _TESTHELPER_H */
diff --git a/crypto/cryptodev/util.c b/crypto/cryptodev/util.c
new file mode 100644
index 0000000..9eba483
--- /dev/null
+++ b/crypto/cryptodev/util.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2011 Maxim Levitsky
+ *
+ * This file is part of linux cryptodev.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include "util.h"
+
+/* These were taken from Maxim Levitsky's patch to lkml.
+ */
+struct scatterlist *sg_advance(struct scatterlist *sg, int consumed)
+{
+	while (consumed >= sg->length) {
+		consumed -= sg->length;
+
+		sg = sg_next(sg);
+		if (!sg)
+			break;
+	}
+
+	WARN_ON(!sg && consumed);
+
+	if (!sg)
+		return NULL;
+
+	sg->offset += consumed;
+	sg->length -= consumed;
+
+	if (sg->offset >= PAGE_SIZE) {
+		struct page *page =
+			nth_page(sg_page(sg), sg->offset / PAGE_SIZE);
+		sg_set_page(sg, page, sg->length, sg->offset % PAGE_SIZE);
+	}
+
+	return sg;
+}
+
+/**
+ * sg_copy - copies sg entries from sg_from to sg_to, such
+ * as sg_to covers first 'len' bytes from sg_from.
+ */
+int sg_copy(struct scatterlist *sg_from, struct scatterlist *sg_to, int len)
+{
+	while (len > sg_from->length) {
+		len -= sg_from->length;
+
+		sg_set_page(sg_to, sg_page(sg_from),
+				sg_from->length, sg_from->offset);
+
+		sg_to = sg_next(sg_to);
+		sg_from = sg_next(sg_from);
+
+		if (len && (!sg_from || !sg_to))
+			return -ENOMEM;
+	}
+
+	if (len)
+		sg_set_page(sg_to, sg_page(sg_from),
+				len, sg_from->offset);
+	sg_mark_end(sg_to);
+	return 0;
+}
+
diff --git a/crypto/cryptodev/util.h b/crypto/cryptodev/util.h
new file mode 100644
index 0000000..204de75
--- /dev/null
+++ b/crypto/cryptodev/util.h
@@ -0,0 +1,2 @@
+int sg_copy(struct scatterlist *sg_from, struct scatterlist *sg_to, int len);
+struct scatterlist *sg_advance(struct scatterlist *sg, int consumed);
diff --git a/crypto/cryptodev/version.h b/crypto/cryptodev/version.h
new file mode 100644
index 0000000..83d49da
--- /dev/null
+++ b/crypto/cryptodev/version.h
@@ -0,0 +1 @@
+#define VERSION "1.6"
diff --git a/crypto/cryptodev/zc.c b/crypto/cryptodev/zc.c
new file mode 100644
index 0000000..884dbab
--- /dev/null
+++ b/crypto/cryptodev/zc.c
@@ -0,0 +1,217 @@
+/*
+ * Driver for /dev/crypto device (aka CryptoDev)
+ *
+ * Copyright (c) 2009-2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
+ * Copyright (c) 2010 Phil Sutter
+ * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
+ *
+ * This file is part of linux cryptodev.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <crypto/hash.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/ioctl.h>
+#include <linux/random.h>
+#include <linux/syscalls.h>
+#include <linux/pagemap.h>
+#include <linux/uaccess.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include "cryptodev_int.h"
+#include "zc.h"
+#include "version.h"
+
+/* Helper functions to assist zero copy.
+ * This needs to be redesigned and moved out of the session. --nmav
+ */
+
+/* offset of buf in it's first page */
+#define PAGEOFFSET(buf) ((unsigned long)buf & ~PAGE_MASK)
+
+/* fetch the pages addr resides in into pg and initialise sg with them */
+int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
+		unsigned int pgcount, struct page **pg, struct scatterlist *sg,
+		struct task_struct *task, struct mm_struct *mm)
+{
+	int ret, pglen, i = 0;
+	struct scatterlist *sgp;
+
+	if (unlikely(!pgcount || !len || !addr)) {
+		sg_mark_end(sg);
+		return 0;
+	}
+
+	down_read(&mm->mmap_sem);
+	ret = get_user_pages(task, mm,
+			(unsigned long)addr, pgcount, write, 0, pg, NULL);
+	up_read(&mm->mmap_sem);
+	if (ret != pgcount)
+		return -EINVAL;
+
+	sg_init_table(sg, pgcount);
+
+	pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
+	sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));
+
+	len -= pglen;
+	for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
+		pglen = min((uint32_t)PAGE_SIZE, len);
+		sg_set_page(sgp, pg[i++], pglen, 0);
+		len -= pglen;
+	}
+	sg_mark_end(sg_last(sg, pgcount));
+	return 0;
+}
+
+int adjust_sg_array(struct csession * ses, int pagecount)
+{
+	struct scatterlist *sg;
+	struct page **pages;
+	int array_size;
+
+	for (array_size = ses->array_size; array_size < pagecount;
+	     array_size *= 2)
+		;
+	dprintk(0, KERN_DEBUG, "reallocating from %d to %d pages\n",
+			ses->array_size, array_size);
+	pages = krealloc(ses->pages, array_size * sizeof(struct page *),
+			 GFP_KERNEL);
+	if (unlikely(!pages))
+		return -ENOMEM;
+	ses->pages = pages;
+	sg = krealloc(ses->sg, array_size * sizeof(struct scatterlist),
+		      GFP_KERNEL);
+	if (unlikely(!sg))
+		return -ENOMEM;
+	ses->sg = sg;
+	ses->array_size = array_size;
+
+	return 0;
+}
+
+void release_user_pages(struct csession *ses)
+{
+	unsigned int i;
+
+	for (i=0;i<ses->used_pages;i++) {
+		if (!PageReserved(ses->pages[i]))
+			SetPageDirty(ses->pages[i]);
+
+		if (ses->readonly_pages == 0)
+			flush_dcache_page(ses->pages[i]);
+		else
+			ses->readonly_pages--;
+
+		page_cache_release(ses->pages[i]);
+	}
+	ses->used_pages = 0;
+}
+
+/* make src and dst available in scatterlists.
+ * dst might be the same as src.
+ */
+int get_userbuf(struct csession *ses,
+                void* __user src, unsigned int src_len,
+                void* __user dst, unsigned int dst_len,
+                struct task_struct *task, struct mm_struct *mm,
+                struct scatterlist **src_sg,
+                struct scatterlist **dst_sg)
+{
+	int src_pagecount, dst_pagecount;
+	int rc;
+
+	/* Empty input is a valid option to many algorithms & is tested by NIST/FIPS */
+	/* Make sure NULL input has 0 length */
+	if (!src && src_len)
+		src_len = 0;
+
+	/* I don't know that null output is ever useful, but we can handle it gracefully */
+	/* Make sure NULL output has 0 length */
+	if (!dst && dst_len)
+		dst_len = 0;
+
+	if (ses->alignmask && !IS_ALIGNED((unsigned long)src, ses->alignmask)) {
+		dprintk(2, KERN_WARNING, "careful - source address %lx is not %d byte aligned\n",
+				(unsigned long)src, ses->alignmask + 1);
+	}
+
+	if (ses->alignmask && !IS_ALIGNED((unsigned long)dst, ses->alignmask)) {
+		dprintk(2, KERN_WARNING, "careful - destination address %lx is not %d byte aligned\n",
+				(unsigned long)dst, ses->alignmask + 1);
+	}
+
+	src_pagecount = PAGECOUNT(src, src_len);
+	dst_pagecount = PAGECOUNT(dst, dst_len);
+
+	ses->used_pages = (src == dst) ? max(src_pagecount, dst_pagecount)
+	                               : src_pagecount + dst_pagecount;
+
+	ses->readonly_pages = (src == dst) ? 0 : src_pagecount;
+
+	if (ses->used_pages > ses->array_size) {
+		rc = adjust_sg_array(ses, ses->used_pages);
+		if (rc)
+			return rc;
+	}
+
+	if (src == dst) {	/* inplace operation */
+		rc = __get_userbuf(src, src_len, 1, ses->used_pages,
+			               ses->pages, ses->sg, task, mm);
+		if (unlikely(rc)) {
+			dprintk(1, KERN_ERR,
+				"failed to get user pages for data IO\n");
+			return rc;
+		}
+		(*src_sg) = (*dst_sg) = ses->sg;
+		return 0;
+	}
+
+	*src_sg = NULL; // default to no input
+	*dst_sg = NULL; // default to ignore output
+
+	if(likely(src)) {
+		rc = __get_userbuf(src, src_len, 0, ses->readonly_pages,
+					   ses->pages, ses->sg, task, mm);
+		if (unlikely(rc)) {
+			dprintk(1, KERN_ERR,
+				"failed to get user pages for data input\n");
+			return rc;
+		}
+		*src_sg = ses->sg;
+	}
+
+	if(likely(dst)) {
+		const unsigned int writable_pages =
+			ses->used_pages - ses->readonly_pages;
+		struct page **dst_pages = ses->pages + ses->readonly_pages;
+		*dst_sg = ses->sg + ses->readonly_pages;
+
+		rc = __get_userbuf(dst, dst_len, 1, writable_pages,
+					   dst_pages, *dst_sg, task, mm);
+		if (unlikely(rc)) {
+			dprintk(1, KERN_ERR,
+					"failed to get user pages for data output\n");
+			release_user_pages(ses);  /* FIXME: use __release_userbuf(src, ...) */
+			return rc;
+		}
+	}
+	return 0;
+}
+
diff --git a/crypto/cryptodev/zc.h b/crypto/cryptodev/zc.h
new file mode 100644
index 0000000..b52616e
--- /dev/null
+++ b/crypto/cryptodev/zc.h
@@ -0,0 +1,27 @@
+#ifndef ZC_H
+# define ZC_H
+
+#include "cryptodev_int.h"
+
+/* For zero copy */
+int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
+		unsigned int pgcount, struct page **pg, struct scatterlist *sg,
+		struct task_struct *task, struct mm_struct *mm);
+void release_user_pages(struct csession* ses);
+
+int get_userbuf(struct csession *ses,
+                void* __user src, unsigned int src_len,
+                void* __user dst, unsigned int dst_len,
+                struct task_struct *task, struct mm_struct *mm,
+                struct scatterlist **src_sg,
+                struct scatterlist **dst_sg);
+
+/* buflen ? (last page - first page + 1) : 0 */
+#define PAGECOUNT(buf, buflen) ((buflen) \
+	? ((((unsigned long)(buf + buflen - 1)) >> PAGE_SHIFT) - \
+	   (((unsigned long)(buf             )) >> PAGE_SHIFT) + 1) \
+	: 0)
+
+#define DEFAULT_PREALLOC_PAGES 32
+
+#endif
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9953a42..f1a9506 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -100,6 +100,8 @@
 
 source "drivers/leds/Kconfig"
 
+source "drivers/switch/Kconfig"
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
@@ -166,4 +168,6 @@
 
 source "drivers/reset/Kconfig"
 
+source "drivers/android/Kconfig"
+
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 130abc1..fe5799a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -111,6 +111,7 @@
 obj-y				+= mmc/
 obj-$(CONFIG_MEMSTICK)		+= memstick/
 obj-y				+= leds/
+obj-$(CONFIG_SWITCH)		+= switch/
 obj-$(CONFIG_INFINIBAND)	+= infiniband/
 obj-$(CONFIG_SGI_SN)		+= sn/
 obj-y				+= firmware/
@@ -152,3 +153,4 @@
 obj-$(CONFIG_VME_BUS)		+= vme/
 obj-$(CONFIG_IPACK_BUS)		+= ipack/
 obj-$(CONFIG_NTB)		+= ntb/
+obj-$(CONFIG_ANDROID)		+= android/
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index fafec5d..e355255 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -29,7 +29,8 @@
 static const struct acpi_device_id acpi_platform_device_ids[] = {
 
 	{ "PNP0D40" },
-
+	{ "BCM43241" },
+	{ "BCM2E1A" },
 	{ }
 };
 
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 11441ad..2762f61 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -707,6 +707,11 @@
 		table = acpi_os_map_memory(acpi_tables_addr + table_offset,
 					   ACPI_HEADER_SIZE);
 
+		if (table == NULL) {
+			pr_err("Error mapping ACPI memory\n");
+			return AE_ERROR;
+		}
+
 		if (table_offset + table->length > all_tables_size) {
 			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
 			WARN_ON(1);
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
new file mode 100644
index 0000000..bdfc6c6
--- /dev/null
+++ b/drivers/android/Kconfig
@@ -0,0 +1,37 @@
+menu "Android"
+
+config ANDROID
+	bool "Android Drivers"
+	---help---
+	  Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+	bool "Android Binder IPC Driver"
+	depends on MMU
+	default n
+	---help---
+	  Binder is used in Android for both communication between processes,
+	  and remote method invocation.
+
+	  This means one Android process can call a method/routine in another
+	  Android process, using Binder to identify, invoke and pass arguments
+	  between said processes.
+
+config ANDROID_BINDER_IPC_32BIT
+	bool
+	depends on !64BIT && ANDROID_BINDER_IPC
+	default y
+	---help---
+	  The Binder API has been changed to support both 32 and 64bit
+	  applications in a mixed environment.
+
+	  Enable this to support an old 32-bit Android user-space (v4.4 and
+	  earlier).
+
+	  Note that enabling this will break newer Android user-space.
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
new file mode 100644
index 0000000..3b7e4b0
--- /dev/null
+++ b/drivers/android/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -I$(src)			# needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
new file mode 100644
index 0000000..383aa21
--- /dev/null
+++ b/drivers/android/binder.c
@@ -0,0 +1,3719 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/pid_namespace.h>
+#include <linux/security.h>
+
+#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
+#define BINDER_IPC_32BIT 1
+#endif
+
+#include <uapi/linux/android/binder.h>
+#include "binder_trace.h"
+
+static DEFINE_RT_MUTEX(binder_main_lock);
+static DEFINE_MUTEX(binder_deferred_lock);
+static DEFINE_MUTEX(binder_mmap_lock);
+
+static HLIST_HEAD(binder_procs);
+static HLIST_HEAD(binder_deferred_list);
+static HLIST_HEAD(binder_dead_nodes);
+
+static struct dentry *binder_debugfs_dir_entry_root;
+static struct dentry *binder_debugfs_dir_entry_proc;
+static struct binder_node *binder_context_mgr_node;
+static kuid_t binder_context_mgr_uid = INVALID_UID;
+static int binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
+
+#define BINDER_DEBUG_ENTRY(name) \
+static int binder_##name##_open(struct inode *inode, struct file *file) \
+{ \
+	return single_open(file, binder_##name##_show, inode->i_private); \
+} \
+\
+static const struct file_operations binder_##name##_fops = { \
+	.owner = THIS_MODULE, \
+	.open = binder_##name##_open, \
+	.read = seq_read, \
+	.llseek = seq_lseek, \
+	.release = single_release, \
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused);
+BINDER_DEBUG_ENTRY(proc);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K                               0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M                               0x400000
+#endif
+
+#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+	BINDER_DEBUG_USER_ERROR             = 1U << 0,
+	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
+	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
+	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
+	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
+	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
+	BINDER_DEBUG_READ_WRITE             = 1U << 6,
+	BINDER_DEBUG_USER_REFS              = 1U << 7,
+	BINDER_DEBUG_THREADS                = 1U << 8,
+	BINDER_DEBUG_TRANSACTION            = 1U << 9,
+	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
+	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
+	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
+	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
+	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
+	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+static bool binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+
+static int binder_set_stop_on_user_error(const char *val,
+					 struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_set_int(val, kp);
+	if (binder_stop_on_user_error < 2)
+		wake_up(&binder_user_error_wait);
+	return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_debug(mask, x...) \
+	do { \
+		if (binder_debug_mask & mask) \
+			pr_info(x); \
+	} while (0)
+
+#define binder_user_error(x...) \
+	do { \
+		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+			pr_info(x); \
+		if (binder_stop_on_user_error) \
+			binder_stop_on_user_error = 2; \
+	} while (0)
+
+enum binder_stat_types {
+	BINDER_STAT_PROC,
+	BINDER_STAT_THREAD,
+	BINDER_STAT_NODE,
+	BINDER_STAT_REF,
+	BINDER_STAT_DEATH,
+	BINDER_STAT_TRANSACTION,
+	BINDER_STAT_TRANSACTION_COMPLETE,
+	BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+	int obj_created[BINDER_STAT_COUNT];
+	int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+static inline void binder_stats_deleted(enum binder_stat_types type)
+{
+	binder_stats.obj_deleted[type]++;
+}
+
+static inline void binder_stats_created(enum binder_stat_types type)
+{
+	binder_stats.obj_created[type]++;
+}
+
+struct binder_transaction_log_entry {
+	int debug_id;
+	int call_type;
+	int from_proc;
+	int from_thread;
+	int target_handle;
+	int to_proc;
+	int to_thread;
+	int to_node;
+	int data_size;
+	int offsets_size;
+};
+struct binder_transaction_log {
+	int next;
+	int full;
+	struct binder_transaction_log_entry entry[32];
+};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+	struct binder_transaction_log *log)
+{
+	struct binder_transaction_log_entry *e;
+
+	e = &log->entry[log->next];
+	memset(e, 0, sizeof(*e));
+	log->next++;
+	if (log->next == ARRAY_SIZE(log->entry)) {
+		log->next = 0;
+		log->full = 1;
+	}
+	return e;
+}
+
+struct binder_work {
+	struct list_head entry;
+	enum {
+		BINDER_WORK_TRANSACTION = 1,
+		BINDER_WORK_TRANSACTION_COMPLETE,
+		BINDER_WORK_NODE,
+		BINDER_WORK_DEAD_BINDER,
+		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+	} type;
+};
+
+struct binder_node {
+	int debug_id;
+	struct binder_work work;
+	union {
+		struct rb_node rb_node;
+		struct hlist_node dead_node;
+	};
+	struct binder_proc *proc;
+	struct hlist_head refs;
+	int internal_strong_refs;
+	int local_weak_refs;
+	int local_strong_refs;
+	binder_uintptr_t ptr;
+	binder_uintptr_t cookie;
+	unsigned has_strong_ref:1;
+	unsigned pending_strong_ref:1;
+	unsigned has_weak_ref:1;
+	unsigned pending_weak_ref:1;
+	unsigned has_async_transaction:1;
+	unsigned accept_fds:1;
+	unsigned min_priority:8;
+	struct list_head async_todo;
+};
+
+struct binder_ref_death {
+	struct binder_work work;
+	binder_uintptr_t cookie;
+};
+
+struct binder_ref {
+	/* Lookups needed: */
+	/*   node + proc => ref (transaction) */
+	/*   desc + proc => ref (transaction, inc/dec ref) */
+	/*   node => refs + procs (proc exit) */
+	int debug_id;
+	struct rb_node rb_node_desc;
+	struct rb_node rb_node_node;
+	struct hlist_node node_entry;
+	struct binder_proc *proc;
+	struct binder_node *node;
+	uint32_t desc;
+	int strong;
+	int weak;
+	struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+	struct list_head entry; /* free and allocated entries by address */
+	struct rb_node rb_node; /* free entry by size or allocated entry */
+				/* by address */
+	unsigned free:1;
+	unsigned allow_user_free:1;
+	unsigned async_transaction:1;
+	unsigned debug_id:29;
+
+	struct binder_transaction *transaction;
+
+	struct binder_node *target_node;
+	size_t data_size;
+	size_t offsets_size;
+	uint8_t data[0];
+};
+
+enum binder_deferred_state {
+	BINDER_DEFERRED_PUT_FILES    = 0x01,
+	BINDER_DEFERRED_FLUSH        = 0x02,
+	BINDER_DEFERRED_RELEASE      = 0x04,
+};
+
+struct binder_proc {
+	struct hlist_node proc_node;
+	struct rb_root threads;
+	struct rb_root nodes;
+	struct rb_root refs_by_desc;
+	struct rb_root refs_by_node;
+	int pid;
+	struct vm_area_struct *vma;
+	struct mm_struct *vma_vm_mm;
+	struct task_struct *tsk;
+	struct files_struct *files;
+	struct hlist_node deferred_work_node;
+	int deferred_work;
+	void *buffer;
+	ptrdiff_t user_buffer_offset;
+
+	struct list_head buffers;
+	struct rb_root free_buffers;
+	struct rb_root allocated_buffers;
+	size_t free_async_space;
+
+	struct page **pages;
+	size_t buffer_size;
+	uint32_t buffer_free;
+	struct list_head todo;
+	wait_queue_head_t wait;
+	struct binder_stats stats;
+	struct list_head delivered_death;
+	int max_threads;
+	int requested_threads;
+	int requested_threads_started;
+	int ready_threads;
+	long default_priority;
+	struct dentry *debugfs_entry;
+};
+
+enum {
+	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
+	BINDER_LOOPER_STATE_ENTERED     = 0x02,
+	BINDER_LOOPER_STATE_EXITED      = 0x04,
+	BINDER_LOOPER_STATE_INVALID     = 0x08,
+	BINDER_LOOPER_STATE_WAITING     = 0x10,
+	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+	struct binder_proc *proc;
+	struct rb_node rb_node;
+	int pid;
+	int looper;
+	struct binder_transaction *transaction_stack;
+	struct list_head todo;
+	uint32_t return_error; /* Write failed, return error code in read buf */
+	uint32_t return_error2; /* Write failed, return error code in read */
+		/* buffer. Used when sending a reply to a dead process that */
+		/* we are also waiting on */
+	wait_queue_head_t wait;
+	struct binder_stats stats;
+};
+
+struct binder_transaction {
+	int debug_id;
+	struct binder_work work;
+	struct binder_thread *from;
+	struct binder_transaction *from_parent;
+	struct binder_proc *to_proc;
+	struct binder_thread *to_thread;
+	struct binder_transaction *to_parent;
+	unsigned need_reply:1;
+	/* unsigned is_dead:1; */	/* not used at the moment */
+
+	struct binder_buffer *buffer;
+	unsigned int	code;
+	unsigned int	flags;
+	long	priority;
+	long	saved_priority;
+	kuid_t	sender_euid;
+};
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+
+static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
+{
+	struct files_struct *files = proc->files;
+	unsigned long rlim_cur;
+	unsigned long irqs;
+
+	if (files == NULL)
+		return -ESRCH;
+
+	if (!lock_task_sighand(proc->tsk, &irqs))
+		return -EMFILE;
+
+	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
+	unlock_task_sighand(proc->tsk, &irqs);
+
+	return __alloc_fd(files, 0, rlim_cur, flags);
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+	struct binder_proc *proc, unsigned int fd, struct file *file)
+{
+	if (proc->files)
+		__fd_install(proc->files, fd, file);
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct binder_proc *proc, unsigned int fd)
+{
+	int retval;
+
+	if (proc->files == NULL)
+		return -ESRCH;
+
+	retval = __close_fd(proc->files, fd);
+	/* can't restart close syscall because file table entry was cleared */
+	if (unlikely(retval == -ERESTARTSYS ||
+		     retval == -ERESTARTNOINTR ||
+		     retval == -ERESTARTNOHAND ||
+		     retval == -ERESTART_RESTARTBLOCK))
+		retval = -EINTR;
+
+	return retval;
+}
+
+static inline void binder_lock(const char *tag)
+{
+	trace_binder_lock(tag);
+	rt_mutex_lock(&binder_main_lock);
+	trace_binder_locked(tag);
+}
+
+static inline void binder_unlock(const char *tag)
+{
+	trace_binder_unlock(tag);
+	rt_mutex_unlock(&binder_main_lock);
+}
+
+static void binder_set_nice(long nice)
+{
+	long min_nice;
+
+	if (can_nice(current, nice)) {
+		set_user_nice(current, nice);
+		return;
+	}
+	min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+		     "%d: nice value %ld not allowed use %ld instead\n",
+		      current->pid, nice, min_nice);
+	set_user_nice(current, min_nice);
+	if (min_nice < 20)
+		return;
+	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(struct binder_proc *proc,
+				 struct binder_buffer *buffer)
+{
+	if (list_is_last(&buffer->entry, &proc->buffers))
+		return proc->buffer + proc->buffer_size - (void *)buffer->data;
+	return (size_t)list_entry(buffer->entry.next,
+			  struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_proc *proc,
+				      struct binder_buffer *new_buffer)
+{
+	struct rb_node **p = &proc->free_buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_buffer *buffer;
+	size_t buffer_size;
+	size_t new_buffer_size;
+
+	BUG_ON(!new_buffer->free);
+
+	new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: add free buffer, size %zd, at %p\n",
+		      proc->pid, new_buffer_size, new_buffer);
+
+	while (*p) {
+		parent = *p;
+		buffer = rb_entry(parent, struct binder_buffer, rb_node);
+		BUG_ON(!buffer->free);
+
+		buffer_size = binder_buffer_size(proc, buffer);
+
+		if (new_buffer_size < buffer_size)
+			p = &parent->rb_left;
+		else
+			p = &parent->rb_right;
+	}
+	rb_link_node(&new_buffer->rb_node, parent, p);
+	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(struct binder_proc *proc,
+					   struct binder_buffer *new_buffer)
+{
+	struct rb_node **p = &proc->allocated_buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_buffer *buffer;
+
+	BUG_ON(new_buffer->free);
+
+	while (*p) {
+		parent = *p;
+		buffer = rb_entry(parent, struct binder_buffer, rb_node);
+		BUG_ON(buffer->free);
+
+		if (new_buffer < buffer)
+			p = &parent->rb_left;
+		else if (new_buffer > buffer)
+			p = &parent->rb_right;
+		else
+			BUG();
+	}
+	rb_link_node(&new_buffer->rb_node, parent, p);
+	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
+						  uintptr_t user_ptr)
+{
+	struct rb_node *n = proc->allocated_buffers.rb_node;
+	struct binder_buffer *buffer;
+	struct binder_buffer *kern_ptr;
+
+	kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
+		- offsetof(struct binder_buffer, data));
+
+	while (n) {
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+		BUG_ON(buffer->free);
+
+		if (kern_ptr < buffer)
+			n = n->rb_left;
+		else if (kern_ptr > buffer)
+			n = n->rb_right;
+		else
+			return buffer;
+	}
+	return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+				    void *start, void *end,
+				    struct vm_area_struct *vma)
+{
+	void *page_addr;
+	unsigned long user_page_addr;
+	struct vm_struct tmp_area;
+	struct page **page;
+	struct mm_struct *mm;
+
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: %s pages %p-%p\n", proc->pid,
+		     allocate ? "allocate" : "free", start, end);
+
+	if (end <= start)
+		return 0;
+
+	trace_binder_update_page_range(proc, allocate, start, end);
+
+	if (vma)
+		mm = NULL;
+	else
+		mm = get_task_mm(proc->tsk);
+
+	if (mm) {
+		down_write(&mm->mmap_sem);
+		vma = proc->vma;
+		if (vma && mm != proc->vma_vm_mm) {
+			pr_err("%d: vma mm and task mm mismatch\n",
+				proc->pid);
+			vma = NULL;
+		}
+	}
+
+	if (allocate == 0)
+		goto free_range;
+
+	if (vma == NULL) {
+		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+			proc->pid);
+		goto err_no_vma;
+	}
+
+	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+		int ret;
+		struct page **page_array_ptr;
+
+		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+		BUG_ON(*page);
+		*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+		if (*page == NULL) {
+			pr_err("%d: binder_alloc_buf failed for page at %p\n",
+				proc->pid, page_addr);
+			goto err_alloc_page_failed;
+		}
+		tmp_area.addr = page_addr;
+		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+		page_array_ptr = page;
+		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+		if (ret) {
+			pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
+			       proc->pid, page_addr);
+			goto err_map_kernel_failed;
+		}
+		user_page_addr =
+			(uintptr_t)page_addr + proc->user_buffer_offset;
+		ret = vm_insert_page(vma, user_page_addr, page[0]);
+		if (ret) {
+			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+			       proc->pid, user_page_addr);
+			goto err_vm_insert_page_failed;
+		}
+		/* vm_insert_page does not seem to increment the refcount */
+	}
+	if (mm) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+	return 0;
+
+free_range:
+	for (page_addr = end - PAGE_SIZE; page_addr >= start;
+	     page_addr -= PAGE_SIZE) {
+		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+		if (vma)
+			zap_page_range(vma, (uintptr_t)page_addr +
+				proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+		__free_page(*page);
+		*page = NULL;
+err_alloc_page_failed:
+		;
+	}
+err_no_vma:
+	if (mm) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+	return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+					      size_t data_size,
+					      size_t offsets_size, int is_async)
+{
+	struct rb_node *n = proc->free_buffers.rb_node;
+	struct binder_buffer *buffer;
+	size_t buffer_size;
+	struct rb_node *best_fit = NULL;
+	void *has_page_addr;
+	void *end_page_addr;
+	size_t size;
+
+	if (proc->vma == NULL) {
+		pr_err("%d: binder_alloc_buf, no vma\n",
+		       proc->pid);
+		return NULL;
+	}
+
+	size = ALIGN(data_size, sizeof(void *)) +
+		ALIGN(offsets_size, sizeof(void *));
+
+	if (size < data_size || size < offsets_size) {
+		binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
+				proc->pid, data_size, offsets_size);
+		return NULL;
+	}
+
+	if (is_async &&
+	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
+			      proc->pid, size);
+		return NULL;
+	}
+
+	while (n) {
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+		BUG_ON(!buffer->free);
+		buffer_size = binder_buffer_size(proc, buffer);
+
+		if (size < buffer_size) {
+			best_fit = n;
+			n = n->rb_left;
+		} else if (size > buffer_size)
+			n = n->rb_right;
+		else {
+			best_fit = n;
+			break;
+		}
+	}
+	if (best_fit == NULL) {
+		pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+			proc->pid, size);
+		return NULL;
+	}
+	if (n == NULL) {
+		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+		buffer_size = binder_buffer_size(proc, buffer);
+	}
+
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
+		      proc->pid, size, buffer, buffer_size);
+
+	has_page_addr =
+		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+	if (n == NULL) {
+		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+			buffer_size = size; /* no room for other buffers */
+		else
+			buffer_size = size + sizeof(struct binder_buffer);
+	}
+	end_page_addr =
+		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+	if (end_page_addr > has_page_addr)
+		end_page_addr = has_page_addr;
+	if (binder_update_page_range(proc, 1,
+	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+		return NULL;
+
+	rb_erase(best_fit, &proc->free_buffers);
+	buffer->free = 0;
+	binder_insert_allocated_buffer(proc, buffer);
+	if (buffer_size != size) {
+		struct binder_buffer *new_buffer = (void *)buffer->data + size;
+
+		list_add(&new_buffer->entry, &buffer->entry);
+		new_buffer->free = 1;
+		binder_insert_free_buffer(proc, new_buffer);
+	}
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: binder_alloc_buf size %zd got %p\n",
+		      proc->pid, size, buffer);
+	buffer->data_size = data_size;
+	buffer->offsets_size = offsets_size;
+	buffer->async_transaction = is_async;
+	if (is_async) {
+		proc->free_async_space -= size + sizeof(struct binder_buffer);
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+			     "%d: binder_alloc_buf size %zd async free %zd\n",
+			      proc->pid, size, proc->free_async_space);
+	}
+
+	return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+	return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_proc *proc,
+				      struct binder_buffer *buffer)
+{
+	struct binder_buffer *prev, *next = NULL;
+	int free_page_end = 1;
+	int free_page_start = 1;
+
+	BUG_ON(proc->buffers.next == &buffer->entry);
+	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+	BUG_ON(!prev->free);
+	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+		free_page_start = 0;
+		if (buffer_end_page(prev) == buffer_end_page(buffer))
+			free_page_end = 0;
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+			     "%d: merge free, buffer %p share page with %p\n",
+			      proc->pid, buffer, prev);
+	}
+
+	if (!list_is_last(&buffer->entry, &proc->buffers)) {
+		next = list_entry(buffer->entry.next,
+				  struct binder_buffer, entry);
+		if (buffer_start_page(next) == buffer_end_page(buffer)) {
+			free_page_end = 0;
+			if (buffer_start_page(next) ==
+			    buffer_start_page(buffer))
+				free_page_start = 0;
+			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				     "%d: merge free, buffer %p share page with %p\n",
+				      proc->pid, buffer, prev);
+		}
+	}
+	list_del(&buffer->entry);
+	if (free_page_start || free_page_end) {
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+			     "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
+			     proc->pid, buffer, free_page_start ? "" : " end",
+			     free_page_end ? "" : " start", prev, next);
+		binder_update_page_range(proc, 0, free_page_start ?
+			buffer_start_page(buffer) : buffer_end_page(buffer),
+			(free_page_end ? buffer_end_page(buffer) :
+			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+	}
+}
+
+static void binder_free_buf(struct binder_proc *proc,
+			    struct binder_buffer *buffer)
+{
+	size_t size, buffer_size;
+
+	buffer_size = binder_buffer_size(proc, buffer);
+
+	size = ALIGN(buffer->data_size, sizeof(void *)) +
+		ALIGN(buffer->offsets_size, sizeof(void *));
+
+	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+		     "%d: binder_free_buf %p size %zd buffer_size %zd\n",
+		      proc->pid, buffer, size, buffer_size);
+
+	BUG_ON(buffer->free);
+	BUG_ON(size > buffer_size);
+	BUG_ON(buffer->transaction != NULL);
+	BUG_ON((void *)buffer < proc->buffer);
+	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+	if (buffer->async_transaction) {
+		proc->free_async_space += size + sizeof(struct binder_buffer);
+
+		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+			     "%d: binder_free_buf size %zd async free %zd\n",
+			      proc->pid, size, proc->free_async_space);
+	}
+
+	binder_update_page_range(proc, 0,
+		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
+		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+		NULL);
+	rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+	buffer->free = 1;
+	if (!list_is_last(&buffer->entry, &proc->buffers)) {
+		struct binder_buffer *next = list_entry(buffer->entry.next,
+						struct binder_buffer, entry);
+
+		if (next->free) {
+			rb_erase(&next->rb_node, &proc->free_buffers);
+			binder_delete_free_buffer(proc, next);
+		}
+	}
+	if (proc->buffers.next != &buffer->entry) {
+		struct binder_buffer *prev = list_entry(buffer->entry.prev,
+						struct binder_buffer, entry);
+
+		if (prev->free) {
+			binder_delete_free_buffer(proc, buffer);
+			rb_erase(&prev->rb_node, &proc->free_buffers);
+			buffer = prev;
+		}
+	}
+	binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+					   binder_uintptr_t ptr)
+{
+	struct rb_node *n = proc->nodes.rb_node;
+	struct binder_node *node;
+
+	while (n) {
+		node = rb_entry(n, struct binder_node, rb_node);
+
+		if (ptr < node->ptr)
+			n = n->rb_left;
+		else if (ptr > node->ptr)
+			n = n->rb_right;
+		else
+			return node;
+	}
+	return NULL;
+}
+
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+					   binder_uintptr_t ptr,
+					   binder_uintptr_t cookie)
+{
+	struct rb_node **p = &proc->nodes.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_node *node;
+
+	while (*p) {
+		parent = *p;
+		node = rb_entry(parent, struct binder_node, rb_node);
+
+		if (ptr < node->ptr)
+			p = &(*p)->rb_left;
+		else if (ptr > node->ptr)
+			p = &(*p)->rb_right;
+		else
+			return NULL;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (node == NULL)
+		return NULL;
+	binder_stats_created(BINDER_STAT_NODE);
+	rb_link_node(&node->rb_node, parent, p);
+	rb_insert_color(&node->rb_node, &proc->nodes);
+	node->debug_id = ++binder_last_id;
+	node->proc = proc;
+	node->ptr = ptr;
+	node->cookie = cookie;
+	node->work.type = BINDER_WORK_NODE;
+	INIT_LIST_HEAD(&node->work.entry);
+	INIT_LIST_HEAD(&node->async_todo);
+	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+		     "%d:%d node %d u%016llx c%016llx created\n",
+		     proc->pid, current->pid, node->debug_id,
+		     (u64)node->ptr, (u64)node->cookie);
+	return node;
+}
+
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+			   struct list_head *target_list)
+{
+	if (strong) {
+		if (internal) {
+			if (target_list == NULL &&
+			    node->internal_strong_refs == 0 &&
+			    !(node == binder_context_mgr_node &&
+			    node->has_strong_ref)) {
+				pr_err("invalid inc strong node for %d\n",
+					node->debug_id);
+				return -EINVAL;
+			}
+			node->internal_strong_refs++;
+		} else
+			node->local_strong_refs++;
+		if (!node->has_strong_ref && target_list) {
+			list_del_init(&node->work.entry);
+			list_add_tail(&node->work.entry, target_list);
+		}
+	} else {
+		if (!internal)
+			node->local_weak_refs++;
+		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+			if (target_list == NULL) {
+				pr_err("invalid inc weak node for %d\n",
+					node->debug_id);
+				return -EINVAL;
+			}
+			list_add_tail(&node->work.entry, target_list);
+		}
+	}
+	return 0;
+}
+
+static int binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+	if (strong) {
+		if (internal)
+			node->internal_strong_refs--;
+		else
+			node->local_strong_refs--;
+		if (node->local_strong_refs || node->internal_strong_refs)
+			return 0;
+	} else {
+		if (!internal)
+			node->local_weak_refs--;
+		if (node->local_weak_refs || !hlist_empty(&node->refs))
+			return 0;
+	}
+	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+		if (list_empty(&node->work.entry)) {
+			list_add_tail(&node->work.entry, &node->proc->todo);
+			wake_up_interruptible(&node->proc->wait);
+		}
+	} else {
+		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+		    !node->local_weak_refs) {
+			list_del_init(&node->work.entry);
+			if (node->proc) {
+				rb_erase(&node->rb_node, &node->proc->nodes);
+				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+					     "refless node %d deleted\n",
+					     node->debug_id);
+			} else {
+				hlist_del(&node->dead_node);
+				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+					     "dead node %d deleted\n",
+					     node->debug_id);
+			}
+			kfree(node);
+			binder_stats_deleted(BINDER_STAT_NODE);
+		}
+	}
+
+	return 0;
+}
+
+
+static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+					 uint32_t desc)
+{
+	struct rb_node *n = proc->refs_by_desc.rb_node;
+	struct binder_ref *ref;
+
+	while (n) {
+		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+		if (desc < ref->desc)
+			n = n->rb_left;
+		else if (desc > ref->desc)
+			n = n->rb_right;
+		else
+			return ref;
+	}
+	return NULL;
+}
+
+static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
+						  struct binder_node *node)
+{
+	struct rb_node *n;
+	struct rb_node **p = &proc->refs_by_node.rb_node;
+	struct rb_node *parent = NULL;
+	struct binder_ref *ref, *new_ref;
+
+	while (*p) {
+		parent = *p;
+		ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+		if (node < ref->node)
+			p = &(*p)->rb_left;
+		else if (node > ref->node)
+			p = &(*p)->rb_right;
+		else
+			return ref;
+	}
+	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	if (new_ref == NULL)
+		return NULL;
+	binder_stats_created(BINDER_STAT_REF);
+	new_ref->debug_id = ++binder_last_id;
+	new_ref->proc = proc;
+	new_ref->node = node;
+	rb_link_node(&new_ref->rb_node_node, parent, p);
+	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+		if (ref->desc > new_ref->desc)
+			break;
+		new_ref->desc = ref->desc + 1;
+	}
+
+	p = &proc->refs_by_desc.rb_node;
+	while (*p) {
+		parent = *p;
+		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+		if (new_ref->desc < ref->desc)
+			p = &(*p)->rb_left;
+		else if (new_ref->desc > ref->desc)
+			p = &(*p)->rb_right;
+		else
+			BUG();
+	}
+	rb_link_node(&new_ref->rb_node_desc, parent, p);
+	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+	if (node) {
+		hlist_add_head(&new_ref->node_entry, &node->refs);
+
+		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+			     "%d new ref %d desc %d for node %d\n",
+			      proc->pid, new_ref->debug_id, new_ref->desc,
+			      node->debug_id);
+	} else {
+		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+			     "%d new ref %d desc %d for dead node\n",
+			      proc->pid, new_ref->debug_id, new_ref->desc);
+	}
+	return new_ref;
+}
+
+static void binder_delete_ref(struct binder_ref *ref)
+{
+	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+		     "%d delete ref %d desc %d for node %d\n",
+		      ref->proc->pid, ref->debug_id, ref->desc,
+		      ref->node->debug_id);
+
+	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+	if (ref->strong)
+		binder_dec_node(ref->node, 1, 1);
+	hlist_del(&ref->node_entry);
+	binder_dec_node(ref->node, 0, 1);
+	if (ref->death) {
+		binder_debug(BINDER_DEBUG_DEAD_BINDER,
+			     "%d delete ref %d desc %d has death notification\n",
+			      ref->proc->pid, ref->debug_id, ref->desc);
+		list_del(&ref->death->work.entry);
+		kfree(ref->death);
+		binder_stats_deleted(BINDER_STAT_DEATH);
+	}
+	kfree(ref);
+	binder_stats_deleted(BINDER_STAT_REF);
+}
+
+static int binder_inc_ref(struct binder_ref *ref, int strong,
+			  struct list_head *target_list)
+{
+	int ret;
+
+	if (strong) {
+		if (ref->strong == 0) {
+			ret = binder_inc_node(ref->node, 1, 1, target_list);
+			if (ret)
+				return ret;
+		}
+		ref->strong++;
+	} else {
+		if (ref->weak == 0) {
+			ret = binder_inc_node(ref->node, 0, 1, target_list);
+			if (ret)
+				return ret;
+		}
+		ref->weak++;
+	}
+	return 0;
+}
+
+
+static int binder_dec_ref(struct binder_ref *ref, int strong)
+{
+	if (strong) {
+		if (ref->strong == 0) {
+			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
+					  ref->proc->pid, ref->debug_id,
+					  ref->desc, ref->strong, ref->weak);
+			return -EINVAL;
+		}
+		ref->strong--;
+		if (ref->strong == 0) {
+			int ret;
+
+			ret = binder_dec_node(ref->node, strong, 1);
+			if (ret)
+				return ret;
+		}
+	} else {
+		if (ref->weak == 0) {
+			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
+					  ref->proc->pid, ref->debug_id,
+					  ref->desc, ref->strong, ref->weak);
+			return -EINVAL;
+		}
+		ref->weak--;
+	}
+	if (ref->strong == 0 && ref->weak == 0)
+		binder_delete_ref(ref);
+	return 0;
+}
+
+static void binder_pop_transaction(struct binder_thread *target_thread,
+				   struct binder_transaction *t)
+{
+	if (target_thread) {
+		BUG_ON(target_thread->transaction_stack != t);
+		BUG_ON(target_thread->transaction_stack->from != target_thread);
+		target_thread->transaction_stack =
+			target_thread->transaction_stack->from_parent;
+		t->from = NULL;
+	}
+	t->need_reply = 0;
+	if (t->buffer)
+		t->buffer->transaction = NULL;
+	kfree(t);
+	binder_stats_deleted(BINDER_STAT_TRANSACTION);
+}
+
+static void binder_send_failed_reply(struct binder_transaction *t,
+				     uint32_t error_code)
+{
+	struct binder_thread *target_thread;
+	struct binder_transaction *next;
+
+	BUG_ON(t->flags & TF_ONE_WAY);
+	while (1) {
+		target_thread = t->from;
+		if (target_thread) {
+			if (target_thread->return_error != BR_OK &&
+			   target_thread->return_error2 == BR_OK) {
+				target_thread->return_error2 =
+					target_thread->return_error;
+				target_thread->return_error = BR_OK;
+			}
+			if (target_thread->return_error == BR_OK) {
+				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+					     "send failed reply for transaction %d to %d:%d\n",
+					      t->debug_id,
+					      target_thread->proc->pid,
+					      target_thread->pid);
+
+				binder_pop_transaction(target_thread, t);
+				target_thread->return_error = error_code;
+				wake_up_interruptible(&target_thread->wait);
+			} else {
+				pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
+					target_thread->proc->pid,
+					target_thread->pid,
+					target_thread->return_error);
+			}
+			return;
+		}
+		next = t->from_parent;
+
+		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+			     "send failed reply for transaction %d, target dead\n",
+			     t->debug_id);
+
+		binder_pop_transaction(target_thread, t);
+		if (next == NULL) {
+			binder_debug(BINDER_DEBUG_DEAD_BINDER,
+				     "reply failed, no target thread at root\n");
+			return;
+		}
+		t = next;
+		binder_debug(BINDER_DEBUG_DEAD_BINDER,
+			     "reply failed, no target thread -- retry %d\n",
+			      t->debug_id);
+	}
+}
+
+static void binder_transaction_buffer_release(struct binder_proc *proc,
+					      struct binder_buffer *buffer,
+					      binder_size_t *failed_at)
+{
+	binder_size_t *offp, *off_end;
+	int debug_id = buffer->debug_id;
+
+	binder_debug(BINDER_DEBUG_TRANSACTION,
+		     "%d buffer release %d, size %zd-%zd, failed at %p\n",
+		     proc->pid, buffer->debug_id,
+		     buffer->data_size, buffer->offsets_size, failed_at);
+
+	if (buffer->target_node)
+		binder_dec_node(buffer->target_node, 1, 0);
+
+	offp = (binder_size_t *)(buffer->data +
+				 ALIGN(buffer->data_size, sizeof(void *)));
+	if (failed_at)
+		off_end = failed_at;
+	else
+		off_end = (void *)offp + buffer->offsets_size;
+	for (; offp < off_end; offp++) {
+		struct flat_binder_object *fp;
+
+		if (*offp > buffer->data_size - sizeof(*fp) ||
+		    buffer->data_size < sizeof(*fp) ||
+		    !IS_ALIGNED(*offp, sizeof(u32))) {
+			pr_err("transaction release %d bad offset %lld, size %zd\n",
+			       debug_id, (u64)*offp, buffer->data_size);
+			continue;
+		}
+		fp = (struct flat_binder_object *)(buffer->data + *offp);
+		switch (fp->type) {
+		case BINDER_TYPE_BINDER:
+		case BINDER_TYPE_WEAK_BINDER: {
+			struct binder_node *node = binder_get_node(proc, fp->binder);
+
+			if (node == NULL) {
+				pr_err("transaction release %d bad node %016llx\n",
+				       debug_id, (u64)fp->binder);
+				break;
+			}
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        node %d u%016llx\n",
+				     node->debug_id, (u64)node->ptr);
+			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+		} break;
+		case BINDER_TYPE_HANDLE:
+		case BINDER_TYPE_WEAK_HANDLE: {
+			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+
+			if (ref == NULL) {
+				pr_err("transaction release %d bad handle %d\n",
+				 debug_id, fp->handle);
+				break;
+			}
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        ref %d desc %d (node %d)\n",
+				     ref->debug_id, ref->desc, ref->node->debug_id);
+			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+		} break;
+
+		case BINDER_TYPE_FD:
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        fd %d\n", fp->handle);
+			if (failed_at)
+				task_close_fd(proc, fp->handle);
+			break;
+
+		default:
+			pr_err("transaction release %d bad object type %x\n",
+				debug_id, fp->type);
+			break;
+		}
+	}
+}
+
+static void binder_transaction(struct binder_proc *proc,
+			       struct binder_thread *thread,
+			       struct binder_transaction_data *tr, int reply)
+{
+	struct binder_transaction *t;
+	struct binder_work *tcomplete;
+	binder_size_t *offp, *off_end;
+	binder_size_t off_min;
+	struct binder_proc *target_proc;
+	struct binder_thread *target_thread = NULL;
+	struct binder_node *target_node = NULL;
+	struct list_head *target_list;
+	wait_queue_head_t *target_wait;
+	struct binder_transaction *in_reply_to = NULL;
+	struct binder_transaction_log_entry *e;
+	uint32_t return_error;
+
+	e = binder_transaction_log_add(&binder_transaction_log);
+	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+	e->from_proc = proc->pid;
+	e->from_thread = thread->pid;
+	e->target_handle = tr->target.handle;
+	e->data_size = tr->data_size;
+	e->offsets_size = tr->offsets_size;
+
+	if (reply) {
+		in_reply_to = thread->transaction_stack;
+		if (in_reply_to == NULL) {
+			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
+					  proc->pid, thread->pid);
+			return_error = BR_FAILED_REPLY;
+			goto err_empty_call_stack;
+		}
+		binder_set_nice(in_reply_to->saved_priority);
+		if (in_reply_to->to_thread != thread) {
+			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
+				proc->pid, thread->pid, in_reply_to->debug_id,
+				in_reply_to->to_proc ?
+				in_reply_to->to_proc->pid : 0,
+				in_reply_to->to_thread ?
+				in_reply_to->to_thread->pid : 0);
+			return_error = BR_FAILED_REPLY;
+			in_reply_to = NULL;
+			goto err_bad_call_stack;
+		}
+		thread->transaction_stack = in_reply_to->to_parent;
+		target_thread = in_reply_to->from;
+		if (target_thread == NULL) {
+			return_error = BR_DEAD_REPLY;
+			goto err_dead_binder;
+		}
+		if (target_thread->transaction_stack != in_reply_to) {
+			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
+				proc->pid, thread->pid,
+				target_thread->transaction_stack ?
+				target_thread->transaction_stack->debug_id : 0,
+				in_reply_to->debug_id);
+			return_error = BR_FAILED_REPLY;
+			in_reply_to = NULL;
+			target_thread = NULL;
+			goto err_dead_binder;
+		}
+		target_proc = target_thread->proc;
+	} else {
+		if (tr->target.handle) {
+			struct binder_ref *ref;
+
+			ref = binder_get_ref(proc, tr->target.handle);
+			if (ref == NULL) {
+				binder_user_error("%d:%d got transaction to invalid handle\n",
+					proc->pid, thread->pid);
+				return_error = BR_FAILED_REPLY;
+				goto err_invalid_target_handle;
+			}
+			target_node = ref->node;
+		} else {
+			target_node = binder_context_mgr_node;
+			if (target_node == NULL) {
+				return_error = BR_DEAD_REPLY;
+				goto err_no_context_mgr_node;
+			}
+		}
+		e->to_node = target_node->debug_id;
+		target_proc = target_node->proc;
+		if (target_proc == NULL) {
+			return_error = BR_DEAD_REPLY;
+			goto err_dead_binder;
+		}
+		if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
+			return_error = BR_FAILED_REPLY;
+			goto err_invalid_target_handle;
+		}
+		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+			struct binder_transaction *tmp;
+
+			tmp = thread->transaction_stack;
+			if (tmp->to_thread != thread) {
+				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
+					proc->pid, thread->pid, tmp->debug_id,
+					tmp->to_proc ? tmp->to_proc->pid : 0,
+					tmp->to_thread ?
+					tmp->to_thread->pid : 0);
+				return_error = BR_FAILED_REPLY;
+				goto err_bad_call_stack;
+			}
+			while (tmp) {
+				if (tmp->from && tmp->from->proc == target_proc)
+					target_thread = tmp->from;
+				tmp = tmp->from_parent;
+			}
+		}
+	}
+	if (target_thread) {
+		e->to_thread = target_thread->pid;
+		target_list = &target_thread->todo;
+		target_wait = &target_thread->wait;
+	} else {
+		target_list = &target_proc->todo;
+		target_wait = &target_proc->wait;
+	}
+	e->to_proc = target_proc->pid;
+
+	/* TODO: reuse incoming transaction for reply */
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (t == NULL) {
+		return_error = BR_FAILED_REPLY;
+		goto err_alloc_t_failed;
+	}
+	binder_stats_created(BINDER_STAT_TRANSACTION);
+
+	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+	if (tcomplete == NULL) {
+		return_error = BR_FAILED_REPLY;
+		goto err_alloc_tcomplete_failed;
+	}
+	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
+
+	t->debug_id = ++binder_last_id;
+	e->debug_id = t->debug_id;
+
+	if (reply)
+		binder_debug(BINDER_DEBUG_TRANSACTION,
+			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
+			     proc->pid, thread->pid, t->debug_id,
+			     target_proc->pid, target_thread->pid,
+			     (u64)tr->data.ptr.buffer,
+			     (u64)tr->data.ptr.offsets,
+			     (u64)tr->data_size, (u64)tr->offsets_size);
+	else
+		binder_debug(BINDER_DEBUG_TRANSACTION,
+			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
+			     proc->pid, thread->pid, t->debug_id,
+			     target_proc->pid, target_node->debug_id,
+			     (u64)tr->data.ptr.buffer,
+			     (u64)tr->data.ptr.offsets,
+			     (u64)tr->data_size, (u64)tr->offsets_size);
+
+	if (!reply && !(tr->flags & TF_ONE_WAY))
+		t->from = thread;
+	else
+		t->from = NULL;
+	t->sender_euid = task_euid(proc->tsk);
+	t->to_proc = target_proc;
+	t->to_thread = target_thread;
+	t->code = tr->code;
+	t->flags = tr->flags;
+	t->priority = task_nice(current);
+
+	trace_binder_transaction(reply, t, target_node);
+
+	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+	if (t->buffer == NULL) {
+		return_error = BR_FAILED_REPLY;
+		goto err_binder_alloc_buf_failed;
+	}
+	t->buffer->allow_user_free = 0;
+	t->buffer->debug_id = t->debug_id;
+	t->buffer->transaction = t;
+	t->buffer->target_node = target_node;
+	trace_binder_transaction_alloc_buf(t->buffer);
+	if (target_node)
+		binder_inc_node(target_node, 1, 0, NULL);
+
+	offp = (binder_size_t *)(t->buffer->data +
+				 ALIGN(tr->data_size, sizeof(void *)));
+
+	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+			   tr->data.ptr.buffer, tr->data_size)) {
+		binder_user_error("%d:%d got transaction with invalid data ptr\n",
+				proc->pid, thread->pid);
+		return_error = BR_FAILED_REPLY;
+		goto err_copy_data_failed;
+	}
+	if (copy_from_user(offp, (const void __user *)(uintptr_t)
+			   tr->data.ptr.offsets, tr->offsets_size)) {
+		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+				proc->pid, thread->pid);
+		return_error = BR_FAILED_REPLY;
+		goto err_copy_data_failed;
+	}
+	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
+		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
+				proc->pid, thread->pid, (u64)tr->offsets_size);
+		return_error = BR_FAILED_REPLY;
+		goto err_bad_offset;
+	}
+	off_end = (void *)offp + tr->offsets_size;
+	off_min = 0;
+	for (; offp < off_end; offp++) {
+		struct flat_binder_object *fp;
+
+		if (*offp > t->buffer->data_size - sizeof(*fp) ||
+		    *offp < off_min ||
+		    t->buffer->data_size < sizeof(*fp) ||
+		    !IS_ALIGNED(*offp, sizeof(u32))) {
+			binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+					  proc->pid, thread->pid, (u64)*offp,
+					  (u64)off_min,
+					  (u64)(t->buffer->data_size -
+					  sizeof(*fp)));
+			return_error = BR_FAILED_REPLY;
+			goto err_bad_offset;
+		}
+		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+		off_min = *offp + sizeof(struct flat_binder_object);
+		switch (fp->type) {
+		case BINDER_TYPE_BINDER:
+		case BINDER_TYPE_WEAK_BINDER: {
+			struct binder_ref *ref;
+			struct binder_node *node = binder_get_node(proc, fp->binder);
+
+			if (node == NULL) {
+				node = binder_new_node(proc, fp->binder, fp->cookie);
+				if (node == NULL) {
+					return_error = BR_FAILED_REPLY;
+					goto err_binder_new_node_failed;
+				}
+				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+			}
+			if (fp->cookie != node->cookie) {
+				binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
+					proc->pid, thread->pid,
+					(u64)fp->binder, node->debug_id,
+					(u64)fp->cookie, (u64)node->cookie);
+				return_error = BR_FAILED_REPLY;
+				goto err_binder_get_ref_for_node_failed;
+			}
+			if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+				return_error = BR_FAILED_REPLY;
+				goto err_binder_get_ref_for_node_failed;
+			}
+			ref = binder_get_ref_for_node(target_proc, node);
+			if (ref == NULL) {
+				return_error = BR_FAILED_REPLY;
+				goto err_binder_get_ref_for_node_failed;
+			}
+			if (fp->type == BINDER_TYPE_BINDER)
+				fp->type = BINDER_TYPE_HANDLE;
+			else
+				fp->type = BINDER_TYPE_WEAK_HANDLE;
+			fp->handle = ref->desc;
+			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+				       &thread->todo);
+
+			trace_binder_transaction_node_to_ref(t, node, ref);
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        node %d u%016llx -> ref %d desc %d\n",
+				     node->debug_id, (u64)node->ptr,
+				     ref->debug_id, ref->desc);
+		} break;
+		case BINDER_TYPE_HANDLE:
+		case BINDER_TYPE_WEAK_HANDLE: {
+			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+
+			if (ref == NULL) {
+				binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+						proc->pid,
+						thread->pid, fp->handle);
+				return_error = BR_FAILED_REPLY;
+				goto err_binder_get_ref_failed;
+			}
+			if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+				return_error = BR_FAILED_REPLY;
+				goto err_binder_get_ref_failed;
+			}
+			if (ref->node->proc == target_proc) {
+				if (fp->type == BINDER_TYPE_HANDLE)
+					fp->type = BINDER_TYPE_BINDER;
+				else
+					fp->type = BINDER_TYPE_WEAK_BINDER;
+				fp->binder = ref->node->ptr;
+				fp->cookie = ref->node->cookie;
+				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+				trace_binder_transaction_ref_to_node(t, ref);
+				binder_debug(BINDER_DEBUG_TRANSACTION,
+					     "        ref %d desc %d -> node %d u%016llx\n",
+					     ref->debug_id, ref->desc, ref->node->debug_id,
+					     (u64)ref->node->ptr);
+			} else {
+				struct binder_ref *new_ref;
+
+				new_ref = binder_get_ref_for_node(target_proc, ref->node);
+				if (new_ref == NULL) {
+					return_error = BR_FAILED_REPLY;
+					goto err_binder_get_ref_for_node_failed;
+				}
+				fp->handle = new_ref->desc;
+				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+				trace_binder_transaction_ref_to_ref(t, ref,
+								    new_ref);
+				binder_debug(BINDER_DEBUG_TRANSACTION,
+					     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
+					     ref->debug_id, ref->desc, new_ref->debug_id,
+					     new_ref->desc, ref->node->debug_id);
+			}
+		} break;
+
+		case BINDER_TYPE_FD: {
+			int target_fd;
+			struct file *file;
+
+			if (reply) {
+				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+					binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
+						proc->pid, thread->pid, fp->handle);
+					return_error = BR_FAILED_REPLY;
+					goto err_fd_not_allowed;
+				}
+			} else if (!target_node->accept_fds) {
+				binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
+					proc->pid, thread->pid, fp->handle);
+				return_error = BR_FAILED_REPLY;
+				goto err_fd_not_allowed;
+			}
+
+			file = fget(fp->handle);
+			if (file == NULL) {
+				binder_user_error("%d:%d got transaction with invalid fd, %d\n",
+					proc->pid, thread->pid, fp->handle);
+				return_error = BR_FAILED_REPLY;
+				goto err_fget_failed;
+			}
+			if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
+				fput(file);
+				return_error = BR_FAILED_REPLY;
+				goto err_get_unused_fd_failed;
+			}
+			target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+			if (target_fd < 0) {
+				fput(file);
+				return_error = BR_FAILED_REPLY;
+				goto err_get_unused_fd_failed;
+			}
+			task_fd_install(target_proc, target_fd, file);
+			trace_binder_transaction_fd(t, fp->handle, target_fd);
+			binder_debug(BINDER_DEBUG_TRANSACTION,
+				     "        fd %d -> %d\n", fp->handle, target_fd);
+			/* TODO: fput? */
+			fp->handle = target_fd;
+		} break;
+
+		default:
+			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
+				proc->pid, thread->pid, fp->type);
+			return_error = BR_FAILED_REPLY;
+			goto err_bad_object_type;
+		}
+	}
+	if (reply) {
+		BUG_ON(t->buffer->async_transaction != 0);
+		binder_pop_transaction(target_thread, in_reply_to);
+	} else if (!(t->flags & TF_ONE_WAY)) {
+		BUG_ON(t->buffer->async_transaction != 0);
+		t->need_reply = 1;
+		t->from_parent = thread->transaction_stack;
+		thread->transaction_stack = t;
+	} else {
+		BUG_ON(target_node == NULL);
+		BUG_ON(t->buffer->async_transaction != 1);
+		if (target_node->has_async_transaction) {
+			target_list = &target_node->async_todo;
+			target_wait = NULL;
+		} else
+			target_node->has_async_transaction = 1;
+	}
+	t->work.type = BINDER_WORK_TRANSACTION;
+	list_add_tail(&t->work.entry, target_list);
+	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+	list_add_tail(&tcomplete->entry, &thread->todo);
+	if (target_wait)
+		wake_up_interruptible(target_wait);
+	return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+	trace_binder_transaction_failed_buffer_release(t->buffer);
+	binder_transaction_buffer_release(target_proc, t->buffer, offp);
+	t->buffer->transaction = NULL;
+	binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+	kfree(tcomplete);
+	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+err_alloc_tcomplete_failed:
+	kfree(t);
+	binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+		     "%d:%d transaction failed %d, size %lld-%lld\n",
+		     proc->pid, thread->pid, return_error,
+		     (u64)tr->data_size, (u64)tr->offsets_size);
+
+	{
+		struct binder_transaction_log_entry *fe;
+
+		fe = binder_transaction_log_add(&binder_transaction_log_failed);
+		*fe = *e;
+	}
+
+	BUG_ON(thread->return_error != BR_OK);
+	if (in_reply_to) {
+		thread->return_error = BR_TRANSACTION_COMPLETE;
+		binder_send_failed_reply(in_reply_to, return_error);
+	} else
+		thread->return_error = return_error;
+}
+
+static int binder_thread_write(struct binder_proc *proc,
+			struct binder_thread *thread,
+			binder_uintptr_t binder_buffer, size_t size,
+			binder_size_t *consumed)
+{
+	uint32_t cmd;
+	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
+	void __user *ptr = buffer + *consumed;
+	void __user *end = buffer + size;
+
+	while (ptr < end && thread->return_error == BR_OK) {
+		if (get_user(cmd, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+		trace_binder_command(cmd);
+		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+			binder_stats.bc[_IOC_NR(cmd)]++;
+			proc->stats.bc[_IOC_NR(cmd)]++;
+			thread->stats.bc[_IOC_NR(cmd)]++;
+		}
+		switch (cmd) {
+		case BC_INCREFS:
+		case BC_ACQUIRE:
+		case BC_RELEASE:
+		case BC_DECREFS: {
+			uint32_t target;
+			struct binder_ref *ref;
+			const char *debug_string;
+
+			if (get_user(target, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (target == 0 && binder_context_mgr_node &&
+			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+				ref = binder_get_ref_for_node(proc,
+					       binder_context_mgr_node);
+				if (ref->desc != target) {
+					binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
+						proc->pid, thread->pid,
+						ref->desc);
+				}
+			} else
+				ref = binder_get_ref(proc, target);
+			if (ref == NULL) {
+				binder_user_error("%d:%d refcount change on invalid ref %d\n",
+					proc->pid, thread->pid, target);
+				break;
+			}
+			switch (cmd) {
+			case BC_INCREFS:
+				debug_string = "IncRefs";
+				binder_inc_ref(ref, 0, NULL);
+				break;
+			case BC_ACQUIRE:
+				debug_string = "Acquire";
+				binder_inc_ref(ref, 1, NULL);
+				break;
+			case BC_RELEASE:
+				debug_string = "Release";
+				binder_dec_ref(ref, 1);
+				break;
+			case BC_DECREFS:
+			default:
+				debug_string = "DecRefs";
+				binder_dec_ref(ref, 0);
+				break;
+			}
+			binder_debug(BINDER_DEBUG_USER_REFS,
+				     "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
+				     proc->pid, thread->pid, debug_string, ref->debug_id,
+				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+			break;
+		}
+		case BC_INCREFS_DONE:
+		case BC_ACQUIRE_DONE: {
+			binder_uintptr_t node_ptr;
+			binder_uintptr_t cookie;
+			struct binder_node *node;
+
+			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(binder_uintptr_t);
+			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(binder_uintptr_t);
+			node = binder_get_node(proc, node_ptr);
+			if (node == NULL) {
+				binder_user_error("%d:%d %s u%016llx no match\n",
+					proc->pid, thread->pid,
+					cmd == BC_INCREFS_DONE ?
+					"BC_INCREFS_DONE" :
+					"BC_ACQUIRE_DONE",
+					(u64)node_ptr);
+				break;
+			}
+			if (cookie != node->cookie) {
+				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
+					proc->pid, thread->pid,
+					cmd == BC_INCREFS_DONE ?
+					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+					(u64)node_ptr, node->debug_id,
+					(u64)cookie, (u64)node->cookie);
+				break;
+			}
+			if (cmd == BC_ACQUIRE_DONE) {
+				if (node->pending_strong_ref == 0) {
+					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
+						proc->pid, thread->pid,
+						node->debug_id);
+					break;
+				}
+				node->pending_strong_ref = 0;
+			} else {
+				if (node->pending_weak_ref == 0) {
+					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
+						proc->pid, thread->pid,
+						node->debug_id);
+					break;
+				}
+				node->pending_weak_ref = 0;
+			}
+			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+			binder_debug(BINDER_DEBUG_USER_REFS,
+				     "%d:%d %s node %d ls %d lw %d\n",
+				     proc->pid, thread->pid,
+				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
+			break;
+		}
+		case BC_ATTEMPT_ACQUIRE:
+			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
+			return -EINVAL;
+		case BC_ACQUIRE_RESULT:
+			pr_err("BC_ACQUIRE_RESULT not supported\n");
+			return -EINVAL;
+
+		case BC_FREE_BUFFER: {
+			binder_uintptr_t data_ptr;
+			struct binder_buffer *buffer;
+
+			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(binder_uintptr_t);
+
+			buffer = binder_buffer_lookup(proc, data_ptr);
+			if (buffer == NULL) {
+				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
+					proc->pid, thread->pid, (u64)data_ptr);
+				break;
+			}
+			if (!buffer->allow_user_free) {
+				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
+					proc->pid, thread->pid, (u64)data_ptr);
+				break;
+			}
+			binder_debug(BINDER_DEBUG_FREE_BUFFER,
+				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
+				     proc->pid, thread->pid, (u64)data_ptr,
+				     buffer->debug_id,
+				     buffer->transaction ? "active" : "finished");
+
+			if (buffer->transaction) {
+				buffer->transaction->buffer = NULL;
+				buffer->transaction = NULL;
+			}
+			if (buffer->async_transaction && buffer->target_node) {
+				BUG_ON(!buffer->target_node->has_async_transaction);
+				if (list_empty(&buffer->target_node->async_todo))
+					buffer->target_node->has_async_transaction = 0;
+				else
+					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+			}
+			trace_binder_transaction_buffer_release(buffer);
+			binder_transaction_buffer_release(proc, buffer, NULL);
+			binder_free_buf(proc, buffer);
+			break;
+		}
+
+		case BC_TRANSACTION:
+		case BC_REPLY: {
+			struct binder_transaction_data tr;
+
+			if (copy_from_user(&tr, ptr, sizeof(tr)))
+				return -EFAULT;
+			ptr += sizeof(tr);
+			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+			break;
+		}
+
+		case BC_REGISTER_LOOPER:
+			binder_debug(BINDER_DEBUG_THREADS,
+				     "%d:%d BC_REGISTER_LOOPER\n",
+				     proc->pid, thread->pid);
+			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+				thread->looper |= BINDER_LOOPER_STATE_INVALID;
+				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
+					proc->pid, thread->pid);
+			} else if (proc->requested_threads == 0) {
+				thread->looper |= BINDER_LOOPER_STATE_INVALID;
+				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
+					proc->pid, thread->pid);
+			} else {
+				proc->requested_threads--;
+				proc->requested_threads_started++;
+			}
+			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+			break;
+		case BC_ENTER_LOOPER:
+			binder_debug(BINDER_DEBUG_THREADS,
+				     "%d:%d BC_ENTER_LOOPER\n",
+				     proc->pid, thread->pid);
+			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+				thread->looper |= BINDER_LOOPER_STATE_INVALID;
+				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
+					proc->pid, thread->pid);
+			}
+			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+			break;
+		case BC_EXIT_LOOPER:
+			binder_debug(BINDER_DEBUG_THREADS,
+				     "%d:%d BC_EXIT_LOOPER\n",
+				     proc->pid, thread->pid);
+			thread->looper |= BINDER_LOOPER_STATE_EXITED;
+			break;
+
+		case BC_REQUEST_DEATH_NOTIFICATION:
+		case BC_CLEAR_DEATH_NOTIFICATION: {
+			uint32_t target;
+			binder_uintptr_t cookie;
+			struct binder_ref *ref;
+			struct binder_ref_death *death;
+
+			if (get_user(target, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(binder_uintptr_t);
+			ref = binder_get_ref(proc, target);
+			if (ref == NULL) {
+				binder_user_error("%d:%d %s invalid ref %d\n",
+					proc->pid, thread->pid,
+					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+					"BC_REQUEST_DEATH_NOTIFICATION" :
+					"BC_CLEAR_DEATH_NOTIFICATION",
+					target);
+				break;
+			}
+
+			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
+				     proc->pid, thread->pid,
+				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+				     "BC_REQUEST_DEATH_NOTIFICATION" :
+				     "BC_CLEAR_DEATH_NOTIFICATION",
+				     (u64)cookie, ref->debug_id, ref->desc,
+				     ref->strong, ref->weak, ref->node->debug_id);
+
+			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+				if (ref->death) {
+					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
+						proc->pid, thread->pid);
+					break;
+				}
+				death = kzalloc(sizeof(*death), GFP_KERNEL);
+				if (death == NULL) {
+					thread->return_error = BR_ERROR;
+					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+						     "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+						     proc->pid, thread->pid);
+					break;
+				}
+				binder_stats_created(BINDER_STAT_DEATH);
+				INIT_LIST_HEAD(&death->work.entry);
+				death->cookie = cookie;
+				ref->death = death;
+				if (ref->node->proc == NULL) {
+					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+						list_add_tail(&ref->death->work.entry, &thread->todo);
+					} else {
+						list_add_tail(&ref->death->work.entry, &proc->todo);
+						wake_up_interruptible(&proc->wait);
+					}
+				}
+			} else {
+				if (ref->death == NULL) {
+					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
+						proc->pid, thread->pid);
+					break;
+				}
+				death = ref->death;
+				if (death->cookie != cookie) {
+					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
+						proc->pid, thread->pid,
+						(u64)death->cookie,
+						(u64)cookie);
+					break;
+				}
+				ref->death = NULL;
+				if (list_empty(&death->work.entry)) {
+					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+						list_add_tail(&death->work.entry, &thread->todo);
+					} else {
+						list_add_tail(&death->work.entry, &proc->todo);
+						wake_up_interruptible(&proc->wait);
+					}
+				} else {
+					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+				}
+			}
+		} break;
+		case BC_DEAD_BINDER_DONE: {
+			struct binder_work *w;
+			binder_uintptr_t cookie;
+			struct binder_ref_death *death = NULL;
+
+			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+				return -EFAULT;
+
+			ptr += sizeof(void *);
+			list_for_each_entry(w, &proc->delivered_death, entry) {
+				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+
+				if (tmp_death->cookie == cookie) {
+					death = tmp_death;
+					break;
+				}
+			}
+			binder_debug(BINDER_DEBUG_DEAD_BINDER,
+				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+				     proc->pid, thread->pid, (u64)cookie,
+				     death);
+			if (death == NULL) {
+				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
+					proc->pid, thread->pid, (u64)cookie);
+				break;
+			}
+
+			list_del_init(&death->work.entry);
+			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+					list_add_tail(&death->work.entry, &thread->todo);
+				} else {
+					list_add_tail(&death->work.entry, &proc->todo);
+					wake_up_interruptible(&proc->wait);
+				}
+			}
+		} break;
+
+		default:
+			pr_err("%d:%d unknown command %d\n",
+			       proc->pid, thread->pid, cmd);
+			return -EINVAL;
+		}
+		*consumed = ptr - buffer;
+	}
+	return 0;
+}
+
+static void binder_stat_br(struct binder_proc *proc,
+			   struct binder_thread *thread, uint32_t cmd)
+{
+	trace_binder_return(cmd);
+	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+		binder_stats.br[_IOC_NR(cmd)]++;
+		proc->stats.br[_IOC_NR(cmd)]++;
+		thread->stats.br[_IOC_NR(cmd)]++;
+	}
+}
+
+static int binder_has_proc_work(struct binder_proc *proc,
+				struct binder_thread *thread)
+{
+	return !list_empty(&proc->todo) ||
+		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_has_thread_work(struct binder_thread *thread)
+{
+	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
+		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_thread_read(struct binder_proc *proc,
+			      struct binder_thread *thread,
+			      binder_uintptr_t binder_buffer, size_t size,
+			      binder_size_t *consumed, int non_block)
+{
+	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
+	void __user *ptr = buffer + *consumed;
+	void __user *end = buffer + size;
+
+	int ret = 0;
+	int wait_for_proc_work;
+
+	if (*consumed == 0) {
+		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+	}
+
+retry:
+	wait_for_proc_work = thread->transaction_stack == NULL &&
+				list_empty(&thread->todo);
+
+	if (thread->return_error != BR_OK && ptr < end) {
+		if (thread->return_error2 != BR_OK) {
+			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			binder_stat_br(proc, thread, thread->return_error2);
+			if (ptr == end)
+				goto done;
+			thread->return_error2 = BR_OK;
+		}
+		if (put_user(thread->return_error, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+		binder_stat_br(proc, thread, thread->return_error);
+		thread->return_error = BR_OK;
+		goto done;
+	}
+
+
+	thread->looper |= BINDER_LOOPER_STATE_WAITING;
+	if (wait_for_proc_work)
+		proc->ready_threads++;
+
+	binder_unlock(__func__);
+
+	trace_binder_wait_for_work(wait_for_proc_work,
+				   !!thread->transaction_stack,
+				   !list_empty(&thread->todo));
+	if (wait_for_proc_work) {
+		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+					BINDER_LOOPER_STATE_ENTERED))) {
+			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
+				proc->pid, thread->pid, thread->looper);
+			wait_event_interruptible(binder_user_error_wait,
+						 binder_stop_on_user_error < 2);
+		}
+		binder_set_nice(proc->default_priority);
+		if (non_block) {
+			if (!binder_has_proc_work(proc, thread))
+				ret = -EAGAIN;
+		} else
+			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+	} else {
+		if (non_block) {
+			if (!binder_has_thread_work(thread))
+				ret = -EAGAIN;
+		} else
+			ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
+	}
+
+	binder_lock(__func__);
+
+	if (wait_for_proc_work)
+		proc->ready_threads--;
+	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+	if (ret)
+		return ret;
+
+	while (1) {
+		uint32_t cmd;
+		struct binder_transaction_data tr;
+		struct binder_work *w;
+		struct binder_transaction *t = NULL;
+
+		if (!list_empty(&thread->todo)) {
+			w = list_first_entry(&thread->todo, struct binder_work,
+					     entry);
+		} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
+			w = list_first_entry(&proc->todo, struct binder_work,
+					     entry);
+		} else {
+			/* no data added */
+			if (ptr - buffer == 4 &&
+			    !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+				goto retry;
+			break;
+		}
+
+		if (end - ptr < sizeof(tr) + 4)
+			break;
+
+		switch (w->type) {
+		case BINDER_WORK_TRANSACTION: {
+			t = container_of(w, struct binder_transaction, work);
+		} break;
+		case BINDER_WORK_TRANSACTION_COMPLETE: {
+			cmd = BR_TRANSACTION_COMPLETE;
+			if (put_user(cmd, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+
+			binder_stat_br(proc, thread, cmd);
+			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+				     "%d:%d BR_TRANSACTION_COMPLETE\n",
+				     proc->pid, thread->pid);
+
+			list_del(&w->entry);
+			kfree(w);
+			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+		} break;
+		case BINDER_WORK_NODE: {
+			struct binder_node *node = container_of(w, struct binder_node, work);
+			uint32_t cmd = BR_NOOP;
+			const char *cmd_name;
+			int strong = node->internal_strong_refs || node->local_strong_refs;
+			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+
+			if (weak && !node->has_weak_ref) {
+				cmd = BR_INCREFS;
+				cmd_name = "BR_INCREFS";
+				node->has_weak_ref = 1;
+				node->pending_weak_ref = 1;
+				node->local_weak_refs++;
+			} else if (strong && !node->has_strong_ref) {
+				cmd = BR_ACQUIRE;
+				cmd_name = "BR_ACQUIRE";
+				node->has_strong_ref = 1;
+				node->pending_strong_ref = 1;
+				node->local_strong_refs++;
+			} else if (!strong && node->has_strong_ref) {
+				cmd = BR_RELEASE;
+				cmd_name = "BR_RELEASE";
+				node->has_strong_ref = 0;
+			} else if (!weak && node->has_weak_ref) {
+				cmd = BR_DECREFS;
+				cmd_name = "BR_DECREFS";
+				node->has_weak_ref = 0;
+			}
+			if (cmd != BR_NOOP) {
+				if (put_user(cmd, (uint32_t __user *)ptr))
+					return -EFAULT;
+				ptr += sizeof(uint32_t);
+				if (put_user(node->ptr,
+					     (binder_uintptr_t __user *)ptr))
+					return -EFAULT;
+				ptr += sizeof(binder_uintptr_t);
+				if (put_user(node->cookie,
+					     (binder_uintptr_t __user *)ptr))
+					return -EFAULT;
+				ptr += sizeof(binder_uintptr_t);
+
+				binder_stat_br(proc, thread, cmd);
+				binder_debug(BINDER_DEBUG_USER_REFS,
+					     "%d:%d %s %d u%016llx c%016llx\n",
+					     proc->pid, thread->pid, cmd_name,
+					     node->debug_id,
+					     (u64)node->ptr, (u64)node->cookie);
+			} else {
+				list_del_init(&w->entry);
+				if (!weak && !strong) {
+					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+						     "%d:%d node %d u%016llx c%016llx deleted\n",
+						     proc->pid, thread->pid,
+						     node->debug_id,
+						     (u64)node->ptr,
+						     (u64)node->cookie);
+					rb_erase(&node->rb_node, &proc->nodes);
+					kfree(node);
+					binder_stats_deleted(BINDER_STAT_NODE);
+				} else {
+					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+						     "%d:%d node %d u%016llx c%016llx state unchanged\n",
+						     proc->pid, thread->pid,
+						     node->debug_id,
+						     (u64)node->ptr,
+						     (u64)node->cookie);
+				}
+			}
+		} break;
+		case BINDER_WORK_DEAD_BINDER:
+		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+			struct binder_ref_death *death;
+			uint32_t cmd;
+
+			death = container_of(w, struct binder_ref_death, work);
+			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+			else
+				cmd = BR_DEAD_BINDER;
+			if (put_user(cmd, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (put_user(death->cookie,
+				     (binder_uintptr_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(binder_uintptr_t);
+			binder_stat_br(proc, thread, cmd);
+			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+				     "%d:%d %s %016llx\n",
+				      proc->pid, thread->pid,
+				      cmd == BR_DEAD_BINDER ?
+				      "BR_DEAD_BINDER" :
+				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+				      (u64)death->cookie);
+
+			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+				list_del(&w->entry);
+				kfree(death);
+				binder_stats_deleted(BINDER_STAT_DEATH);
+			} else
+				list_move(&w->entry, &proc->delivered_death);
+			if (cmd == BR_DEAD_BINDER)
+				goto done; /* DEAD_BINDER notifications can cause transactions */
+		} break;
+		}
+
+		if (!t)
+			continue;
+
+		BUG_ON(t->buffer == NULL);
+		if (t->buffer->target_node) {
+			struct binder_node *target_node = t->buffer->target_node;
+
+			tr.target.ptr = target_node->ptr;
+			tr.cookie =  target_node->cookie;
+			t->saved_priority = task_nice(current);
+			if (t->priority < target_node->min_priority &&
+			    !(t->flags & TF_ONE_WAY))
+				binder_set_nice(t->priority);
+			else if (!(t->flags & TF_ONE_WAY) ||
+				 t->saved_priority > target_node->min_priority)
+				binder_set_nice(target_node->min_priority);
+			cmd = BR_TRANSACTION;
+		} else {
+			tr.target.ptr = 0;
+			tr.cookie = 0;
+			cmd = BR_REPLY;
+		}
+		tr.code = t->code;
+		tr.flags = t->flags;
+		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+
+		if (t->from) {
+			struct task_struct *sender = t->from->proc->tsk;
+
+			tr.sender_pid = task_tgid_nr_ns(sender,
+							task_active_pid_ns(current));
+		} else {
+			tr.sender_pid = 0;
+		}
+
+		tr.data_size = t->buffer->data_size;
+		tr.offsets_size = t->buffer->offsets_size;
+		tr.data.ptr.buffer = (binder_uintptr_t)(
+					(uintptr_t)t->buffer->data +
+					proc->user_buffer_offset);
+		tr.data.ptr.offsets = tr.data.ptr.buffer +
+					ALIGN(t->buffer->data_size,
+					    sizeof(void *));
+
+		if (put_user(cmd, (uint32_t __user *)ptr))
+			return -EFAULT;
+		ptr += sizeof(uint32_t);
+		if (copy_to_user(ptr, &tr, sizeof(tr)))
+			return -EFAULT;
+		ptr += sizeof(tr);
+
+		trace_binder_transaction_received(t);
+		binder_stat_br(proc, thread, cmd);
+		binder_debug(BINDER_DEBUG_TRANSACTION,
+			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
+			     proc->pid, thread->pid,
+			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
+			     "BR_REPLY",
+			     t->debug_id, t->from ? t->from->proc->pid : 0,
+			     t->from ? t->from->pid : 0, cmd,
+			     t->buffer->data_size, t->buffer->offsets_size,
+			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
+
+		list_del(&t->work.entry);
+		t->buffer->allow_user_free = 1;
+		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+			t->to_parent = thread->transaction_stack;
+			t->to_thread = thread;
+			thread->transaction_stack = t;
+		} else {
+			t->buffer->transaction = NULL;
+			kfree(t);
+			binder_stats_deleted(BINDER_STAT_TRANSACTION);
+		}
+		break;
+	}
+
+done:
+
+	*consumed = ptr - buffer;
+	if (proc->requested_threads + proc->ready_threads == 0 &&
+	    proc->requested_threads_started < proc->max_threads &&
+	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
+	     /*spawn a new thread if we leave this out */) {
+		proc->requested_threads++;
+		binder_debug(BINDER_DEBUG_THREADS,
+			     "%d:%d BR_SPAWN_LOOPER\n",
+			     proc->pid, thread->pid);
+		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+			return -EFAULT;
+		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
+	}
+	return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+	struct binder_work *w;
+
+	while (!list_empty(list)) {
+		w = list_first_entry(list, struct binder_work, entry);
+		list_del_init(&w->entry);
+		switch (w->type) {
+		case BINDER_WORK_TRANSACTION: {
+			struct binder_transaction *t;
+
+			t = container_of(w, struct binder_transaction, work);
+			if (t->buffer->target_node &&
+			    !(t->flags & TF_ONE_WAY)) {
+				binder_send_failed_reply(t, BR_DEAD_REPLY);
+			} else {
+				binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+					"undelivered transaction %d\n",
+					t->debug_id);
+				t->buffer->transaction = NULL;
+				kfree(t);
+				binder_stats_deleted(BINDER_STAT_TRANSACTION);
+			}
+		} break;
+		case BINDER_WORK_TRANSACTION_COMPLETE: {
+			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+				"undelivered TRANSACTION_COMPLETE\n");
+			kfree(w);
+			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+		} break;
+		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+			struct binder_ref_death *death;
+
+			death = container_of(w, struct binder_ref_death, work);
+			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+				"undelivered death notification, %016llx\n",
+				(u64)death->cookie);
+			kfree(death);
+			binder_stats_deleted(BINDER_STAT_DEATH);
+		} break;
+		default:
+			pr_err("unexpected work type, %d, not freed\n",
+			       w->type);
+			break;
+		}
+	}
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+	struct binder_thread *thread = NULL;
+	struct rb_node *parent = NULL;
+	struct rb_node **p = &proc->threads.rb_node;
+
+	while (*p) {
+		parent = *p;
+		thread = rb_entry(parent, struct binder_thread, rb_node);
+
+		if (current->pid < thread->pid)
+			p = &(*p)->rb_left;
+		else if (current->pid > thread->pid)
+			p = &(*p)->rb_right;
+		else
+			break;
+	}
+	if (*p == NULL) {
+		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+		if (thread == NULL)
+			return NULL;
+		binder_stats_created(BINDER_STAT_THREAD);
+		thread->proc = proc;
+		thread->pid = current->pid;
+		init_waitqueue_head(&thread->wait);
+		INIT_LIST_HEAD(&thread->todo);
+		rb_link_node(&thread->rb_node, parent, p);
+		rb_insert_color(&thread->rb_node, &proc->threads);
+		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+		thread->return_error = BR_OK;
+		thread->return_error2 = BR_OK;
+	}
+	return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc,
+			      struct binder_thread *thread)
+{
+	struct binder_transaction *t;
+	struct binder_transaction *send_reply = NULL;
+	int active_transactions = 0;
+
+	rb_erase(&thread->rb_node, &proc->threads);
+	t = thread->transaction_stack;
+	if (t && t->to_thread == thread)
+		send_reply = t;
+	while (t) {
+		active_transactions++;
+		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+			     "release %d:%d transaction %d %s, still active\n",
+			      proc->pid, thread->pid,
+			     t->debug_id,
+			     (t->to_thread == thread) ? "in" : "out");
+
+		if (t->to_thread == thread) {
+			t->to_proc = NULL;
+			t->to_thread = NULL;
+			if (t->buffer) {
+				t->buffer->transaction = NULL;
+				t->buffer = NULL;
+			}
+			t = t->to_parent;
+		} else if (t->from == thread) {
+			t->from = NULL;
+			t = t->from_parent;
+		} else
+			BUG();
+	}
+	if (send_reply)
+		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+	binder_release_work(&thread->todo);
+	kfree(thread);
+	binder_stats_deleted(BINDER_STAT_THREAD);
+	return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp,
+				struct poll_table_struct *wait)
+{
+	struct binder_proc *proc = filp->private_data;
+	struct binder_thread *thread = NULL;
+	int wait_for_proc_work;
+
+	binder_lock(__func__);
+
+	thread = binder_get_thread(proc);
+
+	wait_for_proc_work = thread->transaction_stack == NULL &&
+		list_empty(&thread->todo) && thread->return_error == BR_OK;
+
+	binder_unlock(__func__);
+
+	if (wait_for_proc_work) {
+		if (binder_has_proc_work(proc, thread))
+			return POLLIN;
+		poll_wait(filp, &proc->wait, wait);
+		if (binder_has_proc_work(proc, thread))
+			return POLLIN;
+	} else {
+		if (binder_has_thread_work(thread))
+			return POLLIN;
+		poll_wait(filp, &thread->wait, wait);
+		if (binder_has_thread_work(thread))
+			return POLLIN;
+	}
+	return 0;
+}
+
+static int binder_ioctl_write_read(struct file *filp,
+				unsigned int cmd, unsigned long arg,
+				struct binder_thread *thread)
+{
+	int ret = 0;
+	struct binder_proc *proc = filp->private_data;
+	unsigned int size = _IOC_SIZE(cmd);
+	void __user *ubuf = (void __user *)arg;
+	struct binder_write_read bwr;
+
+	if (size != sizeof(struct binder_write_read)) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+		ret = -EFAULT;
+		goto out;
+	}
+	binder_debug(BINDER_DEBUG_READ_WRITE,
+		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
+		     proc->pid, thread->pid,
+		     (u64)bwr.write_size, (u64)bwr.write_buffer,
+		     (u64)bwr.read_size, (u64)bwr.read_buffer);
+
+	if (bwr.write_size > 0) {
+		ret = binder_thread_write(proc, thread,
+					  bwr.write_buffer,
+					  bwr.write_size,
+					  &bwr.write_consumed);
+		trace_binder_write_done(ret);
+		if (ret < 0) {
+			bwr.read_consumed = 0;
+			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+				ret = -EFAULT;
+			goto out;
+		}
+	}
+	if (bwr.read_size > 0) {
+		ret = binder_thread_read(proc, thread, bwr.read_buffer,
+					 bwr.read_size,
+					 &bwr.read_consumed,
+					 filp->f_flags & O_NONBLOCK);
+		trace_binder_read_done(ret);
+		if (!list_empty(&proc->todo))
+			wake_up_interruptible(&proc->wait);
+		if (ret < 0) {
+			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+				ret = -EFAULT;
+			goto out;
+		}
+	}
+	binder_debug(BINDER_DEBUG_READ_WRITE,
+		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
+		     proc->pid, thread->pid,
+		     (u64)bwr.write_consumed, (u64)bwr.write_size,
+		     (u64)bwr.read_consumed, (u64)bwr.read_size);
+	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+		ret = -EFAULT;
+		goto out;
+	}
+out:
+	return ret;
+}
+
+static int binder_ioctl_set_ctx_mgr(struct file *filp)
+{
+	int ret = 0;
+	struct binder_proc *proc = filp->private_data;
+	kuid_t curr_euid = current_euid();
+
+	if (binder_context_mgr_node != NULL) {
+		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
+		ret = -EBUSY;
+		goto out;
+	}
+	ret = security_binder_set_context_mgr(proc->tsk);
+	if (ret < 0)
+		goto out;
+	if (uid_valid(binder_context_mgr_uid)) {
+		if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
+			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
+			       from_kuid(&init_user_ns, curr_euid),
+			       from_kuid(&init_user_ns,
+					binder_context_mgr_uid));
+			ret = -EPERM;
+			goto out;
+		}
+	} else {
+		binder_context_mgr_uid = curr_euid;
+	}
+	binder_context_mgr_node = binder_new_node(proc, 0, 0);
+	if (binder_context_mgr_node == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	binder_context_mgr_node->local_weak_refs++;
+	binder_context_mgr_node->local_strong_refs++;
+	binder_context_mgr_node->has_strong_ref = 1;
+	binder_context_mgr_node->has_weak_ref = 1;
+out:
+	return ret;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int ret;
+	struct binder_proc *proc = filp->private_data;
+	struct binder_thread *thread;
+	unsigned int size = _IOC_SIZE(cmd);
+	void __user *ubuf = (void __user *)arg;
+
+	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
+			proc->pid, current->pid, cmd, arg);*/
+
+	trace_binder_ioctl(cmd, arg);
+
+	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+	if (ret)
+		goto err_unlocked;
+
+	binder_lock(__func__);
+	thread = binder_get_thread(proc);
+	if (thread == NULL) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	switch (cmd) {
+	case BINDER_WRITE_READ:
+		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
+		if (ret)
+			goto err;
+		break;
+	case BINDER_SET_MAX_THREADS:
+		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+			ret = -EINVAL;
+			goto err;
+		}
+		break;
+	case BINDER_SET_CONTEXT_MGR:
+		ret = binder_ioctl_set_ctx_mgr(filp);
+		if (ret)
+			goto err;
+		break;
+	case BINDER_THREAD_EXIT:
+		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
+			     proc->pid, thread->pid);
+		binder_free_thread(proc, thread);
+		thread = NULL;
+		break;
+	case BINDER_VERSION: {
+		struct binder_version __user *ver = ubuf;
+
+		if (size != sizeof(struct binder_version)) {
+			ret = -EINVAL;
+			goto err;
+		}
+		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
+			     &ver->protocol_version)) {
+			ret = -EINVAL;
+			goto err;
+		}
+		break;
+	}
+	default:
+		ret = -EINVAL;
+		goto err;
+	}
+	ret = 0;
+err:
+	if (thread)
+		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+	binder_unlock(__func__);
+	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+	if (ret && ret != -ERESTARTSYS)
+		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
+err_unlocked:
+	trace_binder_ioctl_done(ret);
+	return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+	struct binder_proc *proc = vma->vm_private_data;
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+		     proc->pid, vma->vm_start, vma->vm_end,
+		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+		     (unsigned long)pgprot_val(vma->vm_page_prot));
+}
+
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+	struct binder_proc *proc = vma->vm_private_data;
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+		     proc->pid, vma->vm_start, vma->vm_end,
+		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+		     (unsigned long)pgprot_val(vma->vm_page_prot));
+	proc->vma = NULL;
+	proc->vma_vm_mm = NULL;
+	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
+}
+
+static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+	.open = binder_vma_open,
+	.close = binder_vma_close,
+	.fault = binder_vm_fault,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+	struct vm_struct *area;
+	struct binder_proc *proc = filp->private_data;
+	const char *failure_string;
+	struct binder_buffer *buffer;
+
+	if (proc->tsk != current)
+		return -EINVAL;
+
+	if ((vma->vm_end - vma->vm_start) > SZ_4M)
+		vma->vm_end = vma->vm_start + SZ_4M;
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+		     proc->pid, vma->vm_start, vma->vm_end,
+		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+		     (unsigned long)pgprot_val(vma->vm_page_prot));
+
+	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+		ret = -EPERM;
+		failure_string = "bad vm_flags";
+		goto err_bad_arg;
+	}
+	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+
+	mutex_lock(&binder_mmap_lock);
+	if (proc->buffer) {
+		ret = -EBUSY;
+		failure_string = "already mapped";
+		goto err_already_mapped;
+	}
+
+	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+	if (area == NULL) {
+		ret = -ENOMEM;
+		failure_string = "get_vm_area";
+		goto err_get_vm_area_failed;
+	}
+	proc->buffer = area->addr;
+	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+	mutex_unlock(&binder_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+	if (cache_is_vipt_aliasing()) {
+		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+			pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+			vma->vm_start += PAGE_SIZE;
+		}
+	}
+#endif
+	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
+	if (proc->pages == NULL) {
+		ret = -ENOMEM;
+		failure_string = "alloc page array";
+		goto err_alloc_pages_failed;
+	}
+	proc->buffer_size = vma->vm_end - vma->vm_start;
+
+	vma->vm_ops = &binder_vm_ops;
+	vma->vm_private_data = proc;
+
+	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+		ret = -ENOMEM;
+		failure_string = "alloc small buf";
+		goto err_alloc_small_buf_failed;
+	}
+	buffer = proc->buffer;
+	INIT_LIST_HEAD(&proc->buffers);
+	list_add(&buffer->entry, &proc->buffers);
+	buffer->free = 1;
+	binder_insert_free_buffer(proc, buffer);
+	proc->free_async_space = proc->buffer_size / 2;
+	barrier();
+	proc->files = get_files_struct(current);
+	proc->vma = vma;
+	proc->vma_vm_mm = vma->vm_mm;
+
+	/*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
+		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+	return 0;
+
+err_alloc_small_buf_failed:
+	kfree(proc->pages);
+	proc->pages = NULL;
+err_alloc_pages_failed:
+	mutex_lock(&binder_mmap_lock);
+	vfree(proc->buffer);
+	proc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+	mutex_unlock(&binder_mmap_lock);
+err_bad_arg:
+	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
+	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+	return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+	struct binder_proc *proc;
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+		     current->group_leader->pid, current->pid);
+
+	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+	if (proc == NULL)
+		return -ENOMEM;
+	get_task_struct(current);
+	proc->tsk = current;
+	INIT_LIST_HEAD(&proc->todo);
+	init_waitqueue_head(&proc->wait);
+	proc->default_priority = task_nice(current);
+
+	binder_lock(__func__);
+
+	binder_stats_created(BINDER_STAT_PROC);
+	hlist_add_head(&proc->proc_node, &binder_procs);
+	proc->pid = current->group_leader->pid;
+	INIT_LIST_HEAD(&proc->delivered_death);
+	filp->private_data = proc;
+
+	binder_unlock(__func__);
+
+	if (binder_debugfs_dir_entry_proc) {
+		char strbuf[11];
+
+		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+			binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+	}
+
+	return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+	struct binder_proc *proc = filp->private_data;
+
+	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
+
+	return 0;
+}
+
+static void binder_deferred_flush(struct binder_proc *proc)
+{
+	struct rb_node *n;
+	int wake_count = 0;
+
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+
+		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+			wake_up_interruptible(&thread->wait);
+			wake_count++;
+		}
+	}
+	wake_up_interruptible_all(&proc->wait);
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "binder_flush: %d woke %d threads\n", proc->pid,
+		     wake_count);
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+	struct binder_proc *proc = filp->private_data;
+
+	debugfs_remove(proc->debugfs_entry);
+	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
+
+	return 0;
+}
+
+static int binder_node_release(struct binder_node *node, int refs)
+{
+	struct binder_ref *ref;
+	int death = 0;
+
+	list_del_init(&node->work.entry);
+	binder_release_work(&node->async_todo);
+
+	if (hlist_empty(&node->refs)) {
+		kfree(node);
+		binder_stats_deleted(BINDER_STAT_NODE);
+
+		return refs;
+	}
+
+	node->proc = NULL;
+	node->local_strong_refs = 0;
+	node->local_weak_refs = 0;
+	hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+	hlist_for_each_entry(ref, &node->refs, node_entry) {
+		refs++;
+
+		if (!ref->death)
+			continue;
+
+		death++;
+
+		if (list_empty(&ref->death->work.entry)) {
+			ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+			list_add_tail(&ref->death->work.entry,
+				      &ref->proc->todo);
+			wake_up_interruptible(&ref->proc->wait);
+		} else
+			BUG();
+	}
+
+	binder_debug(BINDER_DEBUG_DEAD_BINDER,
+		     "node %d now dead, refs %d, death %d\n",
+		     node->debug_id, refs, death);
+
+	return refs;
+}
+
+static void binder_deferred_release(struct binder_proc *proc)
+{
+	struct binder_transaction *t;
+	struct rb_node *n;
+	int threads, nodes, incoming_refs, outgoing_refs, buffers,
+		active_transactions, page_count;
+
+	BUG_ON(proc->vma);
+	BUG_ON(proc->files);
+
+	hlist_del(&proc->proc_node);
+
+	if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+		binder_debug(BINDER_DEBUG_DEAD_BINDER,
+			     "%s: %d context_mgr_node gone\n",
+			     __func__, proc->pid);
+		binder_context_mgr_node = NULL;
+	}
+
+	threads = 0;
+	active_transactions = 0;
+	while ((n = rb_first(&proc->threads))) {
+		struct binder_thread *thread;
+
+		thread = rb_entry(n, struct binder_thread, rb_node);
+		threads++;
+		active_transactions += binder_free_thread(proc, thread);
+	}
+
+	nodes = 0;
+	incoming_refs = 0;
+	while ((n = rb_first(&proc->nodes))) {
+		struct binder_node *node;
+
+		node = rb_entry(n, struct binder_node, rb_node);
+		nodes++;
+		rb_erase(&node->rb_node, &proc->nodes);
+		incoming_refs = binder_node_release(node, incoming_refs);
+	}
+
+	outgoing_refs = 0;
+	while ((n = rb_first(&proc->refs_by_desc))) {
+		struct binder_ref *ref;
+
+		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+		outgoing_refs++;
+		binder_delete_ref(ref);
+	}
+
+	binder_release_work(&proc->todo);
+	binder_release_work(&proc->delivered_death);
+
+	buffers = 0;
+	while ((n = rb_first(&proc->allocated_buffers))) {
+		struct binder_buffer *buffer;
+
+		buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+		t = buffer->transaction;
+		if (t) {
+			t->buffer = NULL;
+			buffer->transaction = NULL;
+			pr_err("release proc %d, transaction %d, not freed\n",
+			       proc->pid, t->debug_id);
+			/*BUG();*/
+		}
+
+		binder_free_buf(proc, buffer);
+		buffers++;
+	}
+
+	binder_stats_deleted(BINDER_STAT_PROC);
+
+	page_count = 0;
+	if (proc->pages) {
+		int i;
+
+		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+			void *page_addr;
+
+			if (!proc->pages[i])
+				continue;
+
+			page_addr = proc->buffer + i * PAGE_SIZE;
+			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				     "%s: %d: page %d at %p not freed\n",
+				     __func__, proc->pid, i, page_addr);
+			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+			__free_page(proc->pages[i]);
+			page_count++;
+		}
+		kfree(proc->pages);
+		vfree(proc->buffer);
+	}
+
+	put_task_struct(proc->tsk);
+
+	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+		     __func__, proc->pid, threads, nodes, incoming_refs,
+		     outgoing_refs, active_transactions, buffers, page_count);
+
+	kfree(proc);
+}
+
+static void binder_deferred_func(struct work_struct *work)
+{
+	struct binder_proc *proc;
+	struct files_struct *files;
+
+	int defer;
+
+	do {
+		binder_lock(__func__);
+		mutex_lock(&binder_deferred_lock);
+		if (!hlist_empty(&binder_deferred_list)) {
+			proc = hlist_entry(binder_deferred_list.first,
+					struct binder_proc, deferred_work_node);
+			hlist_del_init(&proc->deferred_work_node);
+			defer = proc->deferred_work;
+			proc->deferred_work = 0;
+		} else {
+			proc = NULL;
+			defer = 0;
+		}
+		mutex_unlock(&binder_deferred_lock);
+
+		files = NULL;
+		if (defer & BINDER_DEFERRED_PUT_FILES) {
+			files = proc->files;
+			if (files)
+				proc->files = NULL;
+		}
+
+		if (defer & BINDER_DEFERRED_FLUSH)
+			binder_deferred_flush(proc);
+
+		if (defer & BINDER_DEFERRED_RELEASE)
+			binder_deferred_release(proc); /* frees proc */
+
+		binder_unlock(__func__);
+		if (files)
+			put_files_struct(files);
+	} while (proc);
+}
+static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
+{
+	mutex_lock(&binder_deferred_lock);
+	proc->deferred_work |= defer;
+	if (hlist_unhashed(&proc->deferred_work_node)) {
+		hlist_add_head(&proc->deferred_work_node,
+				&binder_deferred_list);
+		queue_work(binder_deferred_workqueue, &binder_deferred_work);
+	}
+	mutex_unlock(&binder_deferred_lock);
+}
+
+static void print_binder_transaction(struct seq_file *m, const char *prefix,
+				     struct binder_transaction *t)
+{
+	seq_printf(m,
+		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+		   prefix, t->debug_id, t,
+		   t->from ? t->from->proc->pid : 0,
+		   t->from ? t->from->pid : 0,
+		   t->to_proc ? t->to_proc->pid : 0,
+		   t->to_thread ? t->to_thread->pid : 0,
+		   t->code, t->flags, t->priority, t->need_reply);
+	if (t->buffer == NULL) {
+		seq_puts(m, " buffer free\n");
+		return;
+	}
+	if (t->buffer->target_node)
+		seq_printf(m, " node %d",
+			   t->buffer->target_node->debug_id);
+	seq_printf(m, " size %zd:%zd data %p\n",
+		   t->buffer->data_size, t->buffer->offsets_size,
+		   t->buffer->data);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+				struct binder_buffer *buffer)
+{
+	seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
+		   prefix, buffer->debug_id, buffer->data,
+		   buffer->data_size, buffer->offsets_size,
+		   buffer->transaction ? "active" : "delivered");
+}
+
+static void print_binder_work(struct seq_file *m, const char *prefix,
+			      const char *transaction_prefix,
+			      struct binder_work *w)
+{
+	struct binder_node *node;
+	struct binder_transaction *t;
+
+	switch (w->type) {
+	case BINDER_WORK_TRANSACTION:
+		t = container_of(w, struct binder_transaction, work);
+		print_binder_transaction(m, transaction_prefix, t);
+		break;
+	case BINDER_WORK_TRANSACTION_COMPLETE:
+		seq_printf(m, "%stransaction complete\n", prefix);
+		break;
+	case BINDER_WORK_NODE:
+		node = container_of(w, struct binder_node, work);
+		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+			   prefix, node->debug_id,
+			   (u64)node->ptr, (u64)node->cookie);
+		break;
+	case BINDER_WORK_DEAD_BINDER:
+		seq_printf(m, "%shas dead binder\n", prefix);
+		break;
+	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+		seq_printf(m, "%shas cleared dead binder\n", prefix);
+		break;
+	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+		seq_printf(m, "%shas cleared death notification\n", prefix);
+		break;
+	default:
+		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
+		break;
+	}
+}
+
+static void print_binder_thread(struct seq_file *m,
+				struct binder_thread *thread,
+				int print_always)
+{
+	struct binder_transaction *t;
+	struct binder_work *w;
+	size_t start_pos = m->count;
+	size_t header_pos;
+
+	seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
+	header_pos = m->count;
+	t = thread->transaction_stack;
+	while (t) {
+		if (t->from == thread) {
+			print_binder_transaction(m,
+						 "    outgoing transaction", t);
+			t = t->from_parent;
+		} else if (t->to_thread == thread) {
+			print_binder_transaction(m,
+						 "    incoming transaction", t);
+			t = t->to_parent;
+		} else {
+			print_binder_transaction(m, "    bad transaction", t);
+			t = NULL;
+		}
+	}
+	list_for_each_entry(w, &thread->todo, entry) {
+		print_binder_work(m, "    ", "    pending transaction", w);
+	}
+	if (!print_always && m->count == header_pos)
+		m->count = start_pos;
+}
+
+static void print_binder_node(struct seq_file *m, struct binder_node *node)
+{
+	struct binder_ref *ref;
+	struct binder_work *w;
+	int count;
+
+	count = 0;
+	hlist_for_each_entry(ref, &node->refs, node_entry)
+		count++;
+
+	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
+		   node->has_strong_ref, node->has_weak_ref,
+		   node->local_strong_refs, node->local_weak_refs,
+		   node->internal_strong_refs, count);
+	if (count) {
+		seq_puts(m, " proc");
+		hlist_for_each_entry(ref, &node->refs, node_entry)
+			seq_printf(m, " %d", ref->proc->pid);
+	}
+	seq_puts(m, "\n");
+	list_for_each_entry(w, &node->async_todo, entry)
+		print_binder_work(m, "    ",
+				  "    pending async transaction", w);
+}
+
+static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+{
+	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
+		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+		   ref->node->debug_id, ref->strong, ref->weak, ref->death);
+}
+
+static void print_binder_proc(struct seq_file *m,
+			      struct binder_proc *proc, int print_all)
+{
+	struct binder_work *w;
+	struct rb_node *n;
+	size_t start_pos = m->count;
+	size_t header_pos;
+
+	seq_printf(m, "proc %d\n", proc->pid);
+	header_pos = m->count;
+
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+		print_binder_thread(m, rb_entry(n, struct binder_thread,
+						rb_node), print_all);
+	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+		struct binder_node *node = rb_entry(n, struct binder_node,
+						    rb_node);
+		if (print_all || node->has_async_transaction)
+			print_binder_node(m, node);
+	}
+	if (print_all) {
+		for (n = rb_first(&proc->refs_by_desc);
+		     n != NULL;
+		     n = rb_next(n))
+			print_binder_ref(m, rb_entry(n, struct binder_ref,
+						     rb_node_desc));
+	}
+	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+		print_binder_buffer(m, "  buffer",
+				    rb_entry(n, struct binder_buffer, rb_node));
+	list_for_each_entry(w, &proc->todo, entry)
+		print_binder_work(m, "  ", "  pending transaction", w);
+	list_for_each_entry(w, &proc->delivered_death, entry) {
+		seq_puts(m, "  has delivered dead binder\n");
+		break;
+	}
+	if (!print_all && m->count == header_pos)
+		m->count = start_pos;
+}
+
+static const char * const binder_return_strings[] = {
+	"BR_ERROR",
+	"BR_OK",
+	"BR_TRANSACTION",
+	"BR_REPLY",
+	"BR_ACQUIRE_RESULT",
+	"BR_DEAD_REPLY",
+	"BR_TRANSACTION_COMPLETE",
+	"BR_INCREFS",
+	"BR_ACQUIRE",
+	"BR_RELEASE",
+	"BR_DECREFS",
+	"BR_ATTEMPT_ACQUIRE",
+	"BR_NOOP",
+	"BR_SPAWN_LOOPER",
+	"BR_FINISHED",
+	"BR_DEAD_BINDER",
+	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
+	"BR_FAILED_REPLY"
+};
+
+static const char * const binder_command_strings[] = {
+	"BC_TRANSACTION",
+	"BC_REPLY",
+	"BC_ACQUIRE_RESULT",
+	"BC_FREE_BUFFER",
+	"BC_INCREFS",
+	"BC_ACQUIRE",
+	"BC_RELEASE",
+	"BC_DECREFS",
+	"BC_INCREFS_DONE",
+	"BC_ACQUIRE_DONE",
+	"BC_ATTEMPT_ACQUIRE",
+	"BC_REGISTER_LOOPER",
+	"BC_ENTER_LOOPER",
+	"BC_EXIT_LOOPER",
+	"BC_REQUEST_DEATH_NOTIFICATION",
+	"BC_CLEAR_DEATH_NOTIFICATION",
+	"BC_DEAD_BINDER_DONE"
+};
+
+static const char * const binder_objstat_strings[] = {
+	"proc",
+	"thread",
+	"node",
+	"ref",
+	"death",
+	"transaction",
+	"transaction_complete"
+};
+
+static void print_binder_stats(struct seq_file *m, const char *prefix,
+			       struct binder_stats *stats)
+{
+	int i;
+
+	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
+		     ARRAY_SIZE(binder_command_strings));
+	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+		if (stats->bc[i])
+			seq_printf(m, "%s%s: %d\n", prefix,
+				   binder_command_strings[i], stats->bc[i]);
+	}
+
+	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
+		     ARRAY_SIZE(binder_return_strings));
+	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+		if (stats->br[i])
+			seq_printf(m, "%s%s: %d\n", prefix,
+				   binder_return_strings[i], stats->br[i]);
+	}
+
+	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+		     ARRAY_SIZE(binder_objstat_strings));
+	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+		     ARRAY_SIZE(stats->obj_deleted));
+	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+		if (stats->obj_created[i] || stats->obj_deleted[i])
+			seq_printf(m, "%s%s: active %d total %d\n", prefix,
+				binder_objstat_strings[i],
+				stats->obj_created[i] - stats->obj_deleted[i],
+				stats->obj_created[i]);
+	}
+}
+
+static void print_binder_proc_stats(struct seq_file *m,
+				    struct binder_proc *proc)
+{
+	struct binder_work *w;
+	struct rb_node *n;
+	int count, strong, weak;
+
+	seq_printf(m, "proc %d\n", proc->pid);
+	count = 0;
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+		count++;
+	seq_printf(m, "  threads: %d\n", count);
+	seq_printf(m, "  requested threads: %d+%d/%d\n"
+			"  ready threads %d\n"
+			"  free async space %zd\n", proc->requested_threads,
+			proc->requested_threads_started, proc->max_threads,
+			proc->ready_threads, proc->free_async_space);
+	count = 0;
+	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+		count++;
+	seq_printf(m, "  nodes: %d\n", count);
+	count = 0;
+	strong = 0;
+	weak = 0;
+	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+		struct binder_ref *ref = rb_entry(n, struct binder_ref,
+						  rb_node_desc);
+		count++;
+		strong += ref->strong;
+		weak += ref->weak;
+	}
+	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
+
+	count = 0;
+	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+		count++;
+	seq_printf(m, "  buffers: %d\n", count);
+
+	count = 0;
+	list_for_each_entry(w, &proc->todo, entry) {
+		switch (w->type) {
+		case BINDER_WORK_TRANSACTION:
+			count++;
+			break;
+		default:
+			break;
+		}
+	}
+	seq_printf(m, "  pending transactions: %d\n", count);
+
+	print_binder_stats(m, "  ", &proc->stats);
+}
+
+
+static int binder_state_show(struct seq_file *m, void *unused)
+{
+	struct binder_proc *proc;
+	struct binder_node *node;
+	int do_lock = !binder_debug_no_lock;
+
+	if (do_lock)
+		binder_lock(__func__);
+
+	seq_puts(m, "binder state:\n");
+
+	if (!hlist_empty(&binder_dead_nodes))
+		seq_puts(m, "dead nodes:\n");
+	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
+		print_binder_node(m, node);
+
+	hlist_for_each_entry(proc, &binder_procs, proc_node)
+		print_binder_proc(m, proc, 1);
+	if (do_lock)
+		binder_unlock(__func__);
+	return 0;
+}
+
+static int binder_stats_show(struct seq_file *m, void *unused)
+{
+	struct binder_proc *proc;
+	int do_lock = !binder_debug_no_lock;
+
+	if (do_lock)
+		binder_lock(__func__);
+
+	seq_puts(m, "binder stats:\n");
+
+	print_binder_stats(m, "", &binder_stats);
+
+	hlist_for_each_entry(proc, &binder_procs, proc_node)
+		print_binder_proc_stats(m, proc);
+	if (do_lock)
+		binder_unlock(__func__);
+	return 0;
+}
+
+static int binder_transactions_show(struct seq_file *m, void *unused)
+{
+	struct binder_proc *proc;
+	int do_lock = !binder_debug_no_lock;
+
+	if (do_lock)
+		binder_lock(__func__);
+
+	seq_puts(m, "binder transactions:\n");
+	hlist_for_each_entry(proc, &binder_procs, proc_node)
+		print_binder_proc(m, proc, 0);
+	if (do_lock)
+		binder_unlock(__func__);
+	return 0;
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused)
+{
+	struct binder_proc *itr;
+	struct binder_proc *proc = m->private;
+	int do_lock = !binder_debug_no_lock;
+	bool valid_proc = false;
+
+	if (do_lock)
+		binder_lock(__func__);
+
+	hlist_for_each_entry(itr, &binder_procs, proc_node) {
+		if (itr == proc) {
+			valid_proc = true;
+			break;
+		}
+	}
+	if (valid_proc) {
+		seq_puts(m, "binder proc state:\n");
+		print_binder_proc(m, proc, 1);
+	}
+	if (do_lock)
+		binder_unlock(__func__);
+	return 0;
+}
+
+static void print_binder_transaction_log_entry(struct seq_file *m,
+					struct binder_transaction_log_entry *e)
+{
+	seq_printf(m,
+		   "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+		   e->debug_id, (e->call_type == 2) ? "reply" :
+		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
+		   e->from_thread, e->to_proc, e->to_thread, e->to_node,
+		   e->target_handle, e->data_size, e->offsets_size);
+}
+
+static int binder_transaction_log_show(struct seq_file *m, void *unused)
+{
+	struct binder_transaction_log *log = m->private;
+	int i;
+
+	if (log->full) {
+		for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
+			print_binder_transaction_log_entry(m, &log->entry[i]);
+	}
+	for (i = 0; i < log->next; i++)
+		print_binder_transaction_log_entry(m, &log->entry[i]);
+	return 0;
+}
+
+static const struct file_operations binder_fops = {
+	.owner = THIS_MODULE,
+	.poll = binder_poll,
+	.unlocked_ioctl = binder_ioctl,
+	.compat_ioctl = binder_ioctl,
+	.mmap = binder_mmap,
+	.open = binder_open,
+	.flush = binder_flush,
+	.release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "binder",
+	.fops = &binder_fops
+};
+
+BINDER_DEBUG_ENTRY(state);
+BINDER_DEBUG_ENTRY(stats);
+BINDER_DEBUG_ENTRY(transactions);
+BINDER_DEBUG_ENTRY(transaction_log);
+
+static int __init binder_init(void)
+{
+	int ret;
+
+	binder_deferred_workqueue = create_singlethread_workqueue("binder");
+	if (!binder_deferred_workqueue)
+		return -ENOMEM;
+
+	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
+	if (binder_debugfs_dir_entry_root)
+		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
+						 binder_debugfs_dir_entry_root);
+	ret = misc_register(&binder_miscdev);
+	if (binder_debugfs_dir_entry_root) {
+		debugfs_create_file("state",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    NULL,
+				    &binder_state_fops);
+		debugfs_create_file("stats",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    NULL,
+				    &binder_stats_fops);
+		debugfs_create_file("transactions",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    NULL,
+				    &binder_transactions_fops);
+		debugfs_create_file("transaction_log",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    &binder_transaction_log,
+				    &binder_transaction_log_fops);
+		debugfs_create_file("failed_transaction_log",
+				    S_IRUGO,
+				    binder_debugfs_dir_entry_root,
+				    &binder_transaction_log_failed,
+				    &binder_transaction_log_fops);
+	}
+	return ret;
+}
+
+device_initcall(binder_init);
+
+#define CREATE_TRACE_POINTS
+#include "binder_trace.h"
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
new file mode 100644
index 0000000..7f20f3d
--- /dev/null
+++ b/drivers/android/binder_trace.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM binder
+
+#if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _BINDER_TRACE_H
+
+#include <linux/tracepoint.h>
+
+struct binder_buffer;
+struct binder_node;
+struct binder_proc;
+struct binder_ref;
+struct binder_thread;
+struct binder_transaction;
+
+TRACE_EVENT(binder_ioctl,
+	TP_PROTO(unsigned int cmd, unsigned long arg),
+	TP_ARGS(cmd, arg),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned long, arg)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->arg = arg;
+	),
+	TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
+);
+
+DECLARE_EVENT_CLASS(binder_lock_class,
+	TP_PROTO(const char *tag),
+	TP_ARGS(tag),
+	TP_STRUCT__entry(
+		__field(const char *, tag)
+	),
+	TP_fast_assign(
+		__entry->tag = tag;
+	),
+	TP_printk("tag=%s", __entry->tag)
+);
+
+#define DEFINE_BINDER_LOCK_EVENT(name)	\
+DEFINE_EVENT(binder_lock_class, name,	\
+	TP_PROTO(const char *func), \
+	TP_ARGS(func))
+
+DEFINE_BINDER_LOCK_EVENT(binder_lock);
+DEFINE_BINDER_LOCK_EVENT(binder_locked);
+DEFINE_BINDER_LOCK_EVENT(binder_unlock);
+
+DECLARE_EVENT_CLASS(binder_function_return_class,
+	TP_PROTO(int ret),
+	TP_ARGS(ret),
+	TP_STRUCT__entry(
+		__field(int, ret)
+	),
+	TP_fast_assign(
+		__entry->ret = ret;
+	),
+	TP_printk("ret=%d", __entry->ret)
+);
+
+#define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name)	\
+DEFINE_EVENT(binder_function_return_class, name,	\
+	TP_PROTO(int ret), \
+	TP_ARGS(ret))
+
+DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
+DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
+DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
+
+TRACE_EVENT(binder_wait_for_work,
+	TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
+	TP_ARGS(proc_work, transaction_stack, thread_todo),
+
+	TP_STRUCT__entry(
+		__field(bool, proc_work)
+		__field(bool, transaction_stack)
+		__field(bool, thread_todo)
+	),
+	TP_fast_assign(
+		__entry->proc_work = proc_work;
+		__entry->transaction_stack = transaction_stack;
+		__entry->thread_todo = thread_todo;
+	),
+	TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d",
+		  __entry->proc_work, __entry->transaction_stack,
+		  __entry->thread_todo)
+);
+
+TRACE_EVENT(binder_transaction,
+	TP_PROTO(bool reply, struct binder_transaction *t,
+		 struct binder_node *target_node),
+	TP_ARGS(reply, t, target_node),
+	TP_STRUCT__entry(
+		__field(int, debug_id)
+		__field(int, target_node)
+		__field(int, to_proc)
+		__field(int, to_thread)
+		__field(int, reply)
+		__field(unsigned int, code)
+		__field(unsigned int, flags)
+	),
+	TP_fast_assign(
+		__entry->debug_id = t->debug_id;
+		__entry->target_node = target_node ? target_node->debug_id : 0;
+		__entry->to_proc = t->to_proc->pid;
+		__entry->to_thread = t->to_thread ? t->to_thread->pid : 0;
+		__entry->reply = reply;
+		__entry->code = t->code;
+		__entry->flags = t->flags;
+	),
+	TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x",
+		  __entry->debug_id, __entry->target_node,
+		  __entry->to_proc, __entry->to_thread,
+		  __entry->reply, __entry->flags, __entry->code)
+);
+
+TRACE_EVENT(binder_transaction_received,
+	TP_PROTO(struct binder_transaction *t),
+	TP_ARGS(t),
+
+	TP_STRUCT__entry(
+		__field(int, debug_id)
+	),
+	TP_fast_assign(
+		__entry->debug_id = t->debug_id;
+	),
+	TP_printk("transaction=%d", __entry->debug_id)
+);
+
+TRACE_EVENT(binder_transaction_node_to_ref,
+	TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+		 struct binder_ref *ref),
+	TP_ARGS(t, node, ref),
+
+	TP_STRUCT__entry(
+		__field(int, debug_id)
+		__field(int, node_debug_id)
+		__field(binder_uintptr_t, node_ptr)
+		__field(int, ref_debug_id)
+		__field(uint32_t, ref_desc)
+	),
+	TP_fast_assign(
+		__entry->debug_id = t->debug_id;
+		__entry->node_debug_id = node->debug_id;
+		__entry->node_ptr = node->ptr;
+		__entry->ref_debug_id = ref->debug_id;
+		__entry->ref_desc = ref->desc;
+	),
+	TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
+		  __entry->debug_id, __entry->node_debug_id,
+		  (u64)__entry->node_ptr,
+		  __entry->ref_debug_id, __entry->ref_desc)
+);
+
+TRACE_EVENT(binder_transaction_ref_to_node,
+	TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
+	TP_ARGS(t, ref),
+
+	TP_STRUCT__entry(
+		__field(int, debug_id)
+		__field(int, ref_debug_id)
+		__field(uint32_t, ref_desc)
+		__field(int, node_debug_id)
+		__field(binder_uintptr_t, node_ptr)
+	),
+	TP_fast_assign(
+		__entry->debug_id = t->debug_id;
+		__entry->ref_debug_id = ref->debug_id;
+		__entry->ref_desc = ref->desc;
+		__entry->node_debug_id = ref->node->debug_id;
+		__entry->node_ptr = ref->node->ptr;
+	),
+	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
+		  __entry->debug_id, __entry->node_debug_id,
+		  __entry->ref_debug_id, __entry->ref_desc,
+		  (u64)__entry->node_ptr)
+);
+
+TRACE_EVENT(binder_transaction_ref_to_ref,
+	TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
+		 struct binder_ref *dest_ref),
+	TP_ARGS(t, src_ref, dest_ref),
+
+	TP_STRUCT__entry(
+		__field(int, debug_id)
+		__field(int, node_debug_id)
+		__field(int, src_ref_debug_id)
+		__field(uint32_t, src_ref_desc)
+		__field(int, dest_ref_debug_id)
+		__field(uint32_t, dest_ref_desc)
+	),
+	TP_fast_assign(
+		__entry->debug_id = t->debug_id;
+		__entry->node_debug_id = src_ref->node->debug_id;
+		__entry->src_ref_debug_id = src_ref->debug_id;
+		__entry->src_ref_desc = src_ref->desc;
+		__entry->dest_ref_debug_id = dest_ref->debug_id;
+		__entry->dest_ref_desc = dest_ref->desc;
+	),
+	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d",
+		  __entry->debug_id, __entry->node_debug_id,
+		  __entry->src_ref_debug_id, __entry->src_ref_desc,
+		  __entry->dest_ref_debug_id, __entry->dest_ref_desc)
+);
+
+TRACE_EVENT(binder_transaction_fd,
+	TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd),
+	TP_ARGS(t, src_fd, dest_fd),
+
+	TP_STRUCT__entry(
+		__field(int, debug_id)
+		__field(int, src_fd)
+		__field(int, dest_fd)
+	),
+	TP_fast_assign(
+		__entry->debug_id = t->debug_id;
+		__entry->src_fd = src_fd;
+		__entry->dest_fd = dest_fd;
+	),
+	TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d",
+		  __entry->debug_id, __entry->src_fd, __entry->dest_fd)
+);
+
+DECLARE_EVENT_CLASS(binder_buffer_class,
+	TP_PROTO(struct binder_buffer *buf),
+	TP_ARGS(buf),
+	TP_STRUCT__entry(
+		__field(int, debug_id)
+		__field(size_t, data_size)
+		__field(size_t, offsets_size)
+	),
+	TP_fast_assign(
+		__entry->debug_id = buf->debug_id;
+		__entry->data_size = buf->data_size;
+		__entry->offsets_size = buf->offsets_size;
+	),
+	TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
+		  __entry->debug_id, __entry->data_size, __entry->offsets_size)
+);
+
+DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
+	TP_PROTO(struct binder_buffer *buffer),
+	TP_ARGS(buffer));
+
+DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release,
+	TP_PROTO(struct binder_buffer *buffer),
+	TP_ARGS(buffer));
+
+DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
+	TP_PROTO(struct binder_buffer *buffer),
+	TP_ARGS(buffer));
+
+TRACE_EVENT(binder_update_page_range,
+	TP_PROTO(struct binder_proc *proc, bool allocate,
+		 void *start, void *end),
+	TP_ARGS(proc, allocate, start, end),
+	TP_STRUCT__entry(
+		__field(int, proc)
+		__field(bool, allocate)
+		__field(size_t, offset)
+		__field(size_t, size)
+	),
+	TP_fast_assign(
+		__entry->proc = proc->pid;
+		__entry->allocate = allocate;
+		__entry->offset = start - proc->buffer;
+		__entry->size = end - start;
+	),
+	TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
+		  __entry->proc, __entry->allocate,
+		  __entry->offset, __entry->size)
+);
+
+TRACE_EVENT(binder_command,
+	TP_PROTO(uint32_t cmd),
+	TP_ARGS(cmd),
+	TP_STRUCT__entry(
+		__field(uint32_t, cmd)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+	),
+	TP_printk("cmd=0x%x %s",
+		  __entry->cmd,
+		  _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ?
+			  binder_command_strings[_IOC_NR(__entry->cmd)] :
+			  "unknown")
+);
+
+TRACE_EVENT(binder_return,
+	TP_PROTO(uint32_t cmd),
+	TP_ARGS(cmd),
+	TP_STRUCT__entry(
+		__field(uint32_t, cmd)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+	),
+	TP_printk("cmd=0x%x %s",
+		  __entry->cmd,
+		  _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ?
+			  binder_return_strings[_IOC_NR(__entry->cmd)] :
+			  "unknown")
+);
+
+#endif /* _BINDER_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE binder_trace
+#include <trace/define_trace.h>
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 07abd9d..9634800 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -187,6 +187,14 @@
 	bool
 	default n
 
+config HAVE_CPU_AUTOPROBE
+	def_bool ARCH_HAS_CPU_AUTOPROBE
+
+config GENERIC_CPU_AUTOPROBE
+	bool
+	depends on !ARCH_HAS_CPU_AUTOPROBE
+	select HAVE_CPU_AUTOPROBE
+
 config SOC_BUS
 	bool
 
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 3d48fc8..b566850 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -13,6 +13,7 @@
 #include <linux/gfp.h>
 #include <linux/slab.h>
 #include <linux/percpu.h>
+#include <linux/cpufeature.h>
 
 #include "base.h"
 
@@ -260,6 +261,45 @@
 	 */
 }
 
+#ifdef CONFIG_HAVE_CPU_AUTOPROBE
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static ssize_t print_cpu_modalias(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	ssize_t n;
+	u32 i;
+
+	n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
+		    CPU_FEATURE_TYPEVAL);
+
+	for (i = 0; i < MAX_CPU_FEATURES; i++)
+		if (cpu_have_feature(i)) {
+			if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
+				WARN(1, "CPU features overflow page\n");
+				break;
+			}
+			n += sprintf(&buf[n], ",%04X", i);
+		}
+	buf[n++] = '\n';
+	return n;
+}
+#else
+#define print_cpu_modalias	arch_print_cpu_modalias
+#endif
+
+static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (buf) {
+		print_cpu_modalias(NULL, NULL, buf);
+		add_uevent_var(env, "MODALIAS=%s", buf);
+		kfree(buf);
+	}
+	return 0;
+}
+#endif
+
 /*
  * register_cpu - Setup a sysfs device for a CPU.
  * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -278,7 +318,7 @@
 	cpu->dev.bus = &cpu_subsys;
 	cpu->dev.release = cpu_device_release;
 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
-	cpu->dev.bus->uevent = arch_cpu_uevent;
+	cpu->dev.bus->uevent = cpu_uevent;
 #endif
 	error = device_register(&cpu->dev);
 	if (!error && cpu->hotpluggable)
@@ -307,8 +347,8 @@
 }
 EXPORT_SYMBOL_GPL(get_cpu_device);
 
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
-static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
+#ifdef CONFIG_HAVE_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
 #endif
 
 static struct attribute *cpu_root_attrs[] = {
@@ -321,7 +361,7 @@
 	&cpu_attrs[2].attr.attr,
 	&dev_attr_kernel_max.attr,
 	&dev_attr_offline.attr,
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+#ifdef CONFIG_HAVE_CPU_AUTOPROBE
 	&dev_attr_modalias.attr,
 #endif
 	NULL
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bc256b6..6ec0b77 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -1,3 +1,4 @@
+#if !defined(__i386__) && !defined(__amd64__)
 /*
  * Coherent per-device memory handling.
  * Borrowed from i386
@@ -218,3 +219,228 @@
 	return 0;
 }
 EXPORT_SYMBOL(dma_mmap_from_coherent);
+#else
+/* CMA is a wretched hive of scum and villany --- and also doesn't
+ * compile on x86 */
+
+/*
+ * Coherent per-device memory handling.
+ * Borrowed from i386
+ */
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+
+struct dma_coherent_mem {
+	void		*virt_base;
+	dma_addr_t	device_base;
+	phys_addr_t	pfn_base;
+	int		size;
+	int		flags;
+	unsigned long	*bitmap;
+};
+
+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+				dma_addr_t device_addr, size_t size, int flags)
+{
+	void __iomem *mem_base = NULL;
+	int pages = size >> PAGE_SHIFT;
+	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+	if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
+		goto out;
+	if (!size)
+		goto out;
+	if (dev->dma_mem)
+		goto out;
+
+	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
+
+	mem_base = ioremap(bus_addr, size);
+	if (!mem_base)
+		goto out;
+
+	dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+	if (!dev->dma_mem)
+		goto out;
+	dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!dev->dma_mem->bitmap)
+		goto free1_out;
+
+	dev->dma_mem->virt_base = mem_base;
+	dev->dma_mem->device_base = device_addr;
+	dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
+	dev->dma_mem->size = pages;
+	dev->dma_mem->flags = flags;
+
+	if (flags & DMA_MEMORY_MAP)
+		return DMA_MEMORY_MAP;
+
+	return DMA_MEMORY_IO;
+
+ free1_out:
+	kfree(dev->dma_mem);
+ out:
+	if (mem_base)
+		iounmap(mem_base);
+	return 0;
+}
+EXPORT_SYMBOL(dma_declare_coherent_memory);
+
+void dma_release_declared_memory(struct device *dev)
+{
+	struct dma_coherent_mem *mem = dev->dma_mem;
+
+	if (!mem)
+		return;
+	dev->dma_mem = NULL;
+	iounmap(mem->virt_base);
+	kfree(mem->bitmap);
+	kfree(mem);
+}
+EXPORT_SYMBOL(dma_release_declared_memory);
+
+void *dma_mark_declared_memory_occupied(struct device *dev,
+					dma_addr_t device_addr, size_t size)
+{
+	struct dma_coherent_mem *mem = dev->dma_mem;
+	int pos, err;
+
+	size += device_addr & ~PAGE_MASK;
+
+	if (!mem)
+		return ERR_PTR(-EINVAL);
+
+	pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
+	err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
+	if (err != 0)
+		return ERR_PTR(err);
+	return mem->virt_base + (pos << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+
+/**
+ * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
+ *
+ * @dev:	device from which we allocate memory
+ * @size:	size of requested memory area
+ * @dma_handle:	This will be filled with the correct dma handle
+ * @ret:	This pointer will be filled with the virtual address
+ *		to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+				       dma_addr_t *dma_handle, void **ret)
+{
+	struct dma_coherent_mem *mem;
+	int order = get_order(size);
+	int pageno;
+
+	if (!dev)
+		return 0;
+	mem = dev->dma_mem;
+	if (!mem)
+		return 0;
+
+	*ret = NULL;
+
+	if (unlikely(size > (mem->size << PAGE_SHIFT)))
+		goto err;
+
+	pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
+	if (unlikely(pageno < 0))
+		goto err;
+
+	/*
+	 * Memory was found in the per-device area.
+	 */
+	*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+	*ret = mem->virt_base + (pageno << PAGE_SHIFT);
+	memset(*ret, 0, size);
+
+	return 1;
+
+err:
+	/*
+	 * In the case where the allocation can not be satisfied from the
+	 * per-device area, try to fall back to generic memory if the
+	 * constraints allow it.
+	 */
+	return mem->flags & DMA_MEMORY_EXCLUSIVE;
+}
+EXPORT_SYMBOL(dma_alloc_from_coherent);
+
+/**
+ * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
+ * @dev:	device from which the memory was allocated
+ * @order:	the order of pages allocated
+ * @vaddr:	virtual address of allocated pages
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, releases that memory.
+ *
+ * Returns 1 if we correctly released the memory, or 0 if
+ * dma_release_coherent() should proceed with releasing memory from
+ * generic pools.
+ */
+int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
+{
+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+	if (mem && vaddr >= mem->virt_base && vaddr <
+		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+
+		bitmap_release_region(mem->bitmap, page, order);
+		return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev:	device from which the memory was allocated
+ * @vma:	vm_area for the userspace memory
+ * @vaddr:	cpu address returned by dma_alloc_from_coherent
+ * @size:	size of the memory buffer allocated by dma_alloc_from_coherent
+ * @ret:	result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+			   void *vaddr, size_t size, int *ret)
+{
+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+	if (mem && vaddr >= mem->virt_base && vaddr + size <=
+		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+		unsigned long off = vma->vm_pgoff;
+		int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+		int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+		int count = size >> PAGE_SHIFT;
+
+		*ret = -ENXIO;
+		if (off < count && user_count <= count - off) {
+			unsigned pfn = mem->pfn_base + start + off;
+			*ret = remap_pfn_range(vma, vma->vm_start, pfn,
+					       user_count << PAGE_SHIFT,
+					       vma->vm_page_prot);
+		}
+		return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
+#endif
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8e08fab..30d575a 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -27,6 +27,7 @@
 #include <linux/pm.h>
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
+#include <linux/reboot.h>
 
 #include <generated/utsrelease.h>
 
@@ -130,6 +131,7 @@
 	struct page **pages;
 	int nr_pages;
 	int page_array_size;
+	struct list_head pending_list;
 #endif
 	char fw_id[];
 };
@@ -171,6 +173,9 @@
 	strcpy(buf->fw_id, fw_name);
 	buf->fwc = fwc;
 	init_completion(&buf->completion);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+	INIT_LIST_HEAD(&buf->pending_list);
+#endif
 
 	pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
 
@@ -446,10 +451,8 @@
 	return container_of(dev, struct firmware_priv, dev);
 }
 
-static void fw_load_abort(struct firmware_priv *fw_priv)
+static void __fw_load_abort(struct firmware_buf *buf)
 {
-	struct firmware_buf *buf = fw_priv->buf;
-
 	/*
 	 * There is a small window in which user can write to 'loading'
 	 * between loading done and disappearance of 'loading'
@@ -457,8 +460,16 @@
 	if (test_bit(FW_STATUS_DONE, &buf->status))
 		return;
 
+	list_del_init(&buf->pending_list);
 	set_bit(FW_STATUS_ABORT, &buf->status);
 	complete_all(&buf->completion);
+}
+
+static void fw_load_abort(struct firmware_priv *fw_priv)
+{
+	struct firmware_buf *buf = fw_priv->buf;
+
+	__fw_load_abort(buf);
 
 	/* avoid user action after loading abort */
 	fw_priv->buf = NULL;
@@ -467,6 +478,25 @@
 #define is_fw_load_aborted(buf)	\
 	test_bit(FW_STATUS_ABORT, &(buf)->status)
 
+static LIST_HEAD(pending_fw_head);
+
+/* reboot notifier for avoid deadlock with usermode_lock */
+static int fw_shutdown_notify(struct notifier_block *unused1,
+			      unsigned long unused2, void *unused3)
+{
+	mutex_lock(&fw_lock);
+	while (!list_empty(&pending_fw_head))
+		__fw_load_abort(list_first_entry(&pending_fw_head,
+					       struct firmware_buf,
+					       pending_list));
+	mutex_unlock(&fw_lock);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block fw_shutdown_nb = {
+	.notifier_call = fw_shutdown_notify,
+};
+
 static ssize_t firmware_timeout_show(struct class *class,
 				     struct class_attribute *attr,
 				     char *buf)
@@ -629,6 +659,7 @@
 			 * is completed.
 			 * */
 			fw_map_pages_buf(fw_buf);
+			list_del_init(&fw_buf->pending_list);
 			complete_all(&fw_buf->completion);
 			break;
 		}
@@ -863,8 +894,15 @@
 		goto err_del_dev;
 	}
 
+	mutex_lock(&fw_lock);
+	list_add(&buf->pending_list, &pending_fw_head);
+	mutex_unlock(&fw_lock);
+
 	retval = device_create_file(f_dev, &dev_attr_loading);
 	if (retval) {
+		mutex_lock(&fw_lock);
+		list_del_init(&buf->pending_list);
+		mutex_unlock(&fw_lock);
 		dev_err(f_dev, "%s: device_create_file failed\n", __func__);
 		goto err_del_bin_attr;
 	}
@@ -1539,6 +1577,7 @@
 {
 	fw_cache_init();
 #ifdef CONFIG_FW_LOADER_USER_HELPER
+	register_reboot_notifier(&fw_shutdown_nb);
 	return class_register(&firmware_class);
 #else
 	return 0;
@@ -1552,6 +1591,7 @@
 	unregister_pm_notifier(&fw_cache.pm_notify);
 #endif
 #ifdef CONFIG_FW_LOADER_USER_HELPER
+	unregister_reboot_notifier(&fw_shutdown_nb);
 	class_unregister(&firmware_class);
 #endif
 }
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5a9b656..5131ad8 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -29,6 +29,9 @@
 #include <linux/async.h>
 #include <linux/suspend.h>
 #include <linux/cpuidle.h>
+#include <linux/timer.h>
+#include <linux/wakeup_reason.h>
+
 #include "../base.h"
 #include "power.h"
 
@@ -54,6 +57,12 @@
 static DEFINE_MUTEX(dpm_list_mtx);
 static pm_message_t pm_transition;
 
+struct dpm_watchdog {
+	struct device		*dev;
+	struct task_struct	*tsk;
+	struct timer_list	timer;
+};
+
 static int async_error;
 
 /**
@@ -384,6 +393,56 @@
 	return error;
 }
 
+/**
+ * dpm_wd_handler - Driver suspend / resume watchdog handler.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so BUG() out for
+ * a crash-dump
+ */
+static void dpm_wd_handler(unsigned long data)
+{
+	struct dpm_watchdog *wd = (void *)data;
+	struct device *dev      = wd->dev;
+	struct task_struct *tsk = wd->tsk;
+
+	dev_emerg(dev, "**** DPM device timeout ****\n");
+	show_stack(tsk, NULL);
+
+	BUG();
+}
+
+/**
+ * dpm_wd_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
+{
+	struct timer_list *timer = &wd->timer;
+
+	wd->dev = dev;
+	wd->tsk = get_current();
+
+	init_timer_on_stack(timer);
+	timer->expires = jiffies + HZ * 12;
+	timer->function = dpm_wd_handler;
+	timer->data = (unsigned long)wd;
+	add_timer(timer);
+}
+
+/**
+ * dpm_wd_clear - Disable pm watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_wd_clear(struct dpm_watchdog *wd)
+{
+	struct timer_list *timer = &wd->timer;
+
+	del_timer_sync(timer);
+	destroy_timer_on_stack(timer);
+}
+
 /*------------------------- Resume routines -------------------------*/
 
 /**
@@ -570,6 +629,7 @@
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	int error = 0;
+	struct dpm_watchdog wd;
 
 	TRACE_DEVICE(dev);
 	TRACE_RESUME(0);
@@ -585,6 +645,7 @@
 	 * a resumed device, even if the device hasn't been completed yet.
 	 */
 	dev->power.is_prepared = false;
+	dpm_wd_set(&wd, dev);
 
 	if (!dev->power.is_suspended)
 		goto Unlock;
@@ -636,6 +697,7 @@
 
  Unlock:
 	device_unlock(dev);
+	dpm_wd_clear(&wd);
 
  Complete:
 	complete_all(&dev->power.completion);
@@ -877,6 +939,7 @@
 static int dpm_suspend_noirq(pm_message_t state)
 {
 	ktime_t starttime = ktime_get();
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 	int error = 0;
 
 	cpuidle_pause();
@@ -904,6 +967,9 @@
 		put_device(dev);
 
 		if (pm_wakeup_pending()) {
+			pm_get_active_wakeup_sources(suspend_abort,
+				MAX_SUSPEND_ABORT_LEN);
+			log_suspend_abort_reason(suspend_abort);
 			error = -EBUSY;
 			break;
 		}
@@ -962,6 +1028,7 @@
 static int dpm_suspend_late(pm_message_t state)
 {
 	ktime_t starttime = ktime_get();
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 	int error = 0;
 
 	mutex_lock(&dpm_list_mtx);
@@ -987,6 +1054,9 @@
 		put_device(dev);
 
 		if (pm_wakeup_pending()) {
+			pm_get_active_wakeup_sources(suspend_abort,
+				MAX_SUSPEND_ABORT_LEN);
+			log_suspend_abort_reason(suspend_abort);
 			error = -EBUSY;
 			break;
 		}
@@ -1053,6 +1123,8 @@
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	int error = 0;
+	struct dpm_watchdog wd;
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 
 	dpm_wait_for_children(dev, async);
 
@@ -1069,12 +1141,17 @@
 		pm_wakeup_event(dev, 0);
 
 	if (pm_wakeup_pending()) {
+		pm_get_active_wakeup_sources(suspend_abort,
+			MAX_SUSPEND_ABORT_LEN);
+		log_suspend_abort_reason(suspend_abort);
 		async_error = -EBUSY;
 		goto Complete;
 	}
 
 	if (dev->power.syscore)
 		goto Complete;
+	
+	dpm_wd_set(&wd, dev);
 
 	device_lock(dev);
 
@@ -1131,6 +1208,8 @@
 
 	device_unlock(dev);
 
+	dpm_wd_clear(&wd);
+
  Complete:
 	complete_all(&dev->power.completion);
 	if (error)
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 79715e7..ebe6c73 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -14,6 +14,7 @@
 #include <linux/suspend.h>
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
+#include <linux/types.h>
 #include <trace/events/power.h>
 
 #include "power.h"
@@ -53,6 +54,8 @@
 
 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
 
+static ktime_t last_read_time;
+
 /**
  * wakeup_source_prepare - Prepare a new wakeup source for initialization.
  * @ws: Wakeup source to prepare.
@@ -342,6 +345,20 @@
 }
 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
 
+/**
+ * wakeup_source_not_registered - validate the given wakeup source.
+ * @ws: Wakeup source to be validated.
+ */
+static bool wakeup_source_not_registered(struct wakeup_source *ws)
+{
+	/*
+	 * Use timer struct to check if the given source is initialized
+	 * by wakeup_source_add.
+	 */
+	return ws->timer.function != pm_wakeup_timer_fn ||
+		   ws->timer.data != (unsigned long)ws;
+}
+
 /*
  * The functions below use the observation that each wakeup event starts a
  * period in which the system should not be suspended.  The moment this period
@@ -382,6 +399,10 @@
 {
 	unsigned int cec;
 
+	if (WARN(wakeup_source_not_registered(ws),
+			"unregistered wakeup source\n"))
+		return;
+
 	/*
 	 * active wakeup source should bring the system
 	 * out of PM_SUSPEND_FREEZE state
@@ -659,6 +680,37 @@
 }
 EXPORT_SYMBOL_GPL(pm_wakeup_event);
 
+void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
+{
+	struct wakeup_source *ws, *last_active_ws = NULL;
+	int len = 0;
+	bool active = false;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+		if (ws->active) {
+			if (!active)
+				len += scnprintf(pending_wakeup_source, max,
+						"Pending Wakeup Sources: ");
+			len += scnprintf(pending_wakeup_source + len, max - len,
+				"%s ", ws->name);
+			active = true;
+		} else if (!active &&
+			   (!last_active_ws ||
+			    ktime_to_ns(ws->last_time) >
+			    ktime_to_ns(last_active_ws->last_time))) {
+			last_active_ws = ws;
+		}
+	}
+	if (!active && last_active_ws) {
+		scnprintf(pending_wakeup_source, max,
+				"Last active Wakeup Source: %s",
+				last_active_ws->name);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
+
 static void print_active_wakeup_sources(void)
 {
 	struct wakeup_source *ws;
@@ -728,10 +780,15 @@
 bool pm_get_wakeup_count(unsigned int *count, bool block)
 {
 	unsigned int cnt, inpr;
+	unsigned long flags;
 
 	if (block) {
 		DEFINE_WAIT(wait);
 
+		spin_lock_irqsave(&events_lock, flags);
+		last_read_time = ktime_get();
+		spin_unlock_irqrestore(&events_lock, flags);
+
 		for (;;) {
 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
 					TASK_INTERRUPTIBLE);
@@ -763,6 +820,7 @@
 {
 	unsigned int cnt, inpr;
 	unsigned long flags;
+	struct wakeup_source *ws;
 
 	events_check_enabled = false;
 	spin_lock_irqsave(&events_lock, flags);
@@ -770,6 +828,15 @@
 	if (cnt == count && inpr == 0) {
 		saved_count = count;
 		events_check_enabled = true;
+	} else {
+		rcu_read_lock();
+		list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+			if (ws->active ||
+			    ktime_compare(ws->last_time, last_read_time) > 0) {
+				ws->wakeup_count++;
+			}
+		}
+		rcu_read_unlock();
 	}
 	spin_unlock_irqrestore(&events_lock, flags);
 	return events_check_enabled;
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index e8d11b6..0ab5465 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -10,6 +10,7 @@
 #include <linux/mutex.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/wakeup_reason.h>
 
 static LIST_HEAD(syscore_ops_list);
 static DEFINE_MUTEX(syscore_ops_lock);
@@ -73,6 +74,8 @@
 	return 0;
 
  err_out:
+	log_suspend_abort_reason("System core suspend callback %pF failed",
+		ops->suspend);
 	pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
 
 	list_for_each_entry_continue(ops, &syscore_ops_list, node)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3bb6fa3..6fcb9b0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,19 @@
 
 source "drivers/tty/Kconfig"
 
+config DEVMEM
+	bool "Memory device driver"
+	default y
+	help
+	  The memory driver provides two character devices, mem and kmem, which
+	  provide access to the system's memory. The mem device is a view of
+	  physical memory, and each byte in the device corresponds to the
+	  matching physical address. The kmem device is the same as mem, but
+	  the addresses correspond to the kernel's virtual address space rather
+	  than physical memory. These devices are standard parts of a Linux
+	  system and most users should say Y here. You might say N if very
+	  security conscience or memory is tight.
+
 config DEVKMEM
 	bool "/dev/kmem virtual device support"
 	default y
@@ -584,6 +597,10 @@
 	depends on ISA || PCI
 	default y
 
+config DCC_TTY
+	tristate "DCC tty driver"
+	depends on ARM
+
 source "drivers/s390/char/Kconfig"
 
 config MSM_SMD_PKT
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7ff1d0d..e0047ed 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -56,6 +56,7 @@
 obj-$(CONFIG_HANGCHECK_TIMER)	+= hangcheck-timer.o
 obj-$(CONFIG_TCG_TPM)		+= tpm/
 
+obj-$(CONFIG_DCC_TTY)		+= dcc_tty.o
 obj-$(CONFIG_PS3_FLASH)		+= ps3flash.o
 
 obj-$(CONFIG_JS_RTC)		+= js-rtc.o
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
new file mode 100644
index 0000000..a787acc
--- /dev/null
+++ b/drivers/char/dcc_tty.c
@@ -0,0 +1,326 @@
+/* drivers/char/dcc_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/hrtimer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+MODULE_DESCRIPTION("DCC TTY Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static spinlock_t g_dcc_tty_lock = SPIN_LOCK_UNLOCKED;
+static struct hrtimer g_dcc_timer;
+static char g_dcc_buffer[16];
+static int g_dcc_buffer_head;
+static int g_dcc_buffer_count;
+static unsigned g_dcc_write_delay_usecs = 1;
+static struct tty_driver *g_dcc_tty_driver;
+static struct tty_struct *g_dcc_tty;
+static int g_dcc_tty_open_count;
+
+static void dcc_poll_locked(void)
+{
+	char ch;
+	int rch;
+	int written;
+
+	while (g_dcc_buffer_count) {
+		ch = g_dcc_buffer[g_dcc_buffer_head];
+		asm(
+			"mrc 14, 0, r15, c0, c1, 0\n"
+			"mcrcc 14, 0, %1, c0, c5, 0\n"
+			"movcc %0, #1\n"
+			"movcs %0, #0\n"
+			: "=r" (written)
+			: "r" (ch)
+		);
+		if (written) {
+			if (ch == '\n')
+				g_dcc_buffer[g_dcc_buffer_head] = '\r';
+			else {
+				g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
+				g_dcc_buffer_count--;
+				if (g_dcc_tty)
+					tty_wakeup(g_dcc_tty);
+			}
+			g_dcc_write_delay_usecs = 1;
+		} else {
+			if (g_dcc_write_delay_usecs > 0x100)
+				break;
+			g_dcc_write_delay_usecs <<= 1;
+			udelay(g_dcc_write_delay_usecs);
+		}
+	}
+
+	if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
+		asm(
+			"mrc 14, 0, %0, c0, c1, 0\n"
+			"tst %0, #(1 << 30)\n"
+			"moveq %0, #-1\n"
+			"mrcne 14, 0, %0, c0, c5, 0\n"
+			: "=r" (rch)
+		);
+		if (rch >= 0) {
+			ch = rch;
+			tty_insert_flip_string(g_dcc_tty, &ch, 1);
+			tty_flip_buffer_push(g_dcc_tty);
+		}
+	}
+
+
+	if (g_dcc_buffer_count)
+		hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
+	else
+		hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
+{
+	int ret;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	if (g_dcc_tty == NULL || g_dcc_tty == tty) {
+		g_dcc_tty = tty;
+		g_dcc_tty_open_count++;
+		ret = 0;
+	} else
+		ret = -EBUSY;
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+
+	printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
+
+	return ret;
+}
+
+static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
+{
+	printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
+	if (g_dcc_tty == tty) {
+		if (--g_dcc_tty_open_count == 0)
+			g_dcc_tty = NULL;
+	}
+}
+
+static int dcc_write(const unsigned char *buf_start, int count)
+{
+	const unsigned char *buf = buf_start;
+	unsigned long irq_flags;
+	int copy_len;
+	int space_left;
+	int tail;
+
+	if (count < 1)
+		return 0;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	do {
+		tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
+		copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
+		space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+		if (copy_len > space_left)
+			copy_len = space_left;
+		if (copy_len > count)
+			copy_len = count;
+		memcpy(&g_dcc_buffer[tail], buf, copy_len);
+		g_dcc_buffer_count += copy_len;
+		buf += copy_len;
+		count -= copy_len;
+		if (copy_len < count && copy_len < space_left) {
+			space_left -= copy_len;
+			copy_len = count;
+			if (copy_len > space_left) {
+				copy_len = space_left;
+			}
+			memcpy(g_dcc_buffer, buf, copy_len);
+			buf += copy_len;
+			count -= copy_len;
+			g_dcc_buffer_count += copy_len;
+		}
+		dcc_poll_locked();
+		space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+	} while(count && space_left);
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+	return buf - buf_start;
+}
+
+static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+	int ret;
+	/* printk("dcc_tty_write %p, %d\n", buf, count); */
+	ret = dcc_write(buf, count);
+	if (ret != count)
+		printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
+	return ret;
+}
+
+static int dcc_tty_write_room(struct tty_struct *tty)
+{
+	int space_left;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+	return space_left;
+}
+
+static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+	int ret;
+	asm(
+		"mrc 14, 0, %0, c0, c1, 0\n"
+		"mov %0, %0, LSR #30\n"
+		"and %0, %0, #1\n"
+		: "=r" (ret)
+	);
+	return ret;
+}
+
+static void dcc_tty_unthrottle(struct tty_struct * tty)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	dcc_poll_locked();
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+}
+
+static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+	dcc_poll_locked();
+	spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+	return HRTIMER_NORESTART;
+}
+
+void dcc_console_write(struct console *co, const char *b, unsigned count)
+{
+#if 1
+	dcc_write(b, count);
+#else
+	/* blocking printk */
+	while (count > 0) {
+		int written;
+		written = dcc_write(b, count);
+		if (written) {
+			b += written;
+			count -= written;
+		}
+	}
+#endif
+}
+
+static struct tty_driver *dcc_console_device(struct console *c, int *index)
+{
+	*index = 0;
+	return g_dcc_tty_driver;
+}
+
+static int __init dcc_console_setup(struct console *co, char *options)
+{
+	if (co->index != 0)
+		return -ENODEV;
+	return 0;
+}
+
+
+static struct console dcc_console =
+{
+	.name		= "ttyDCC",
+	.write		= dcc_console_write,
+	.device		= dcc_console_device,
+	.setup		= dcc_console_setup,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+};
+
+static struct tty_operations dcc_tty_ops = {
+	.open = dcc_tty_open,
+	.close = dcc_tty_close,
+	.write = dcc_tty_write,
+	.write_room = dcc_tty_write_room,
+	.chars_in_buffer = dcc_tty_chars_in_buffer,
+	.unthrottle = dcc_tty_unthrottle,
+};
+
+static int __init dcc_tty_init(void)
+{
+	int ret;
+
+	hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	g_dcc_timer.function = dcc_tty_timer_func;
+
+	g_dcc_tty_driver = alloc_tty_driver(1);
+	if (!g_dcc_tty_driver) {
+		printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
+		ret = -ENOMEM;
+		goto err_alloc_tty_driver_failed;
+	}
+	g_dcc_tty_driver->owner = THIS_MODULE;
+	g_dcc_tty_driver->driver_name = "dcc";
+	g_dcc_tty_driver->name = "ttyDCC";
+	g_dcc_tty_driver->major = 0; // auto assign
+	g_dcc_tty_driver->minor_start = 0;
+	g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	g_dcc_tty_driver->init_termios = tty_std_termios;
+	g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
+	ret = tty_register_driver(g_dcc_tty_driver);
+	if (ret) {
+		printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
+		goto err_tty_register_driver_failed;
+	}
+	tty_register_device(g_dcc_tty_driver, 0, NULL);
+
+	register_console(&dcc_console);
+	hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+	return 0;
+
+err_tty_register_driver_failed:
+	put_tty_driver(g_dcc_tty_driver);
+	g_dcc_tty_driver = NULL;
+err_alloc_tty_driver_failed:
+	return ret;
+}
+
+static void  __exit dcc_tty_exit(void)
+{
+	int ret;
+
+	tty_unregister_device(g_dcc_tty_driver, 0);
+	ret = tty_unregister_driver(g_dcc_tty_driver);
+	if (ret < 0) {
+		printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
+	} else {
+		put_tty_driver(g_dcc_tty_driver);
+	}
+	g_dcc_tty_driver = NULL;
+}
+
+module_init(dcc_tty_init);
+module_exit(dcc_tty_exit);
+
+
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 598ece7..d370021 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,7 @@
 }
 #endif
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 #ifdef CONFIG_STRICT_DEVMEM
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
@@ -81,7 +82,9 @@
 	return 1;
 }
 #endif
+#endif
 
+#ifdef CONFIG_DEVMEM
 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 {
 }
@@ -208,6 +211,9 @@
 	*ppos += written;
 	return written;
 }
+#endif	/* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 
 int __weak phys_mem_access_prot_allowed(struct file *file,
 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -329,6 +335,7 @@
 	}
 	return 0;
 }
+#endif	/* CONFIG_DEVMEM */
 
 #ifdef CONFIG_DEVKMEM
 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -723,6 +730,8 @@
 	return file->f_pos = 0;
 }
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
 /*
  * The memory devices use the full 32/64 bits of the offset, and so we cannot
  * check against negative addresses: they are ok. The return value is weird,
@@ -756,10 +765,14 @@
 	return ret;
 }
 
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
 static int open_port(struct inode *inode, struct file *filp)
 {
 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 }
+#endif
 
 #define zero_lseek	null_lseek
 #define full_lseek      null_lseek
@@ -770,6 +783,7 @@
 #define open_kmem	open_mem
 #define open_oldmem	open_mem
 
+#ifdef CONFIG_DEVMEM
 static const struct file_operations mem_fops = {
 	.llseek		= memory_lseek,
 	.read		= read_mem,
@@ -778,6 +792,7 @@
 	.open		= open_mem,
 	.get_unmapped_area = get_unmapped_area_mem,
 };
+#endif
 
 #ifdef CONFIG_DEVKMEM
 static const struct file_operations kmem_fops = {
@@ -847,7 +862,9 @@
 	const struct file_operations *fops;
 	struct backing_dev_info *dev_info;
 } devlist[] = {
+#ifdef CONFIG_DEVMEM
 	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
 #ifdef CONFIG_DEVKMEM
 	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
 #endif
diff --git a/drivers/char/random.c b/drivers/char/random.c
index aee3464..b223267 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1495,6 +1495,28 @@
 EXPORT_SYMBOL(get_random_int);
 
 /*
+ * Same as get_random_int(), but returns unsigned long.
+ */
+unsigned long get_random_long(void)
+{
+	__u32 *hash;
+	unsigned long ret;
+
+	if (arch_get_random_long(&ret))
+		return ret;
+
+	hash = get_cpu_var(get_random_int_hash);
+
+	hash[0] += current->pid + jiffies + get_cycles();
+	md5_transform(hash, random_int_secret);
+	ret = *(unsigned long *)hash;
+	put_cpu_var(get_random_int_hash);
+
+	return ret;
+}
+EXPORT_SYMBOL(get_random_long);
+
+/*
  * randomize_range() returns a start address such that
  *
  *    [...... <range> .....]
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 534fcb8..62236d6 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -102,6 +102,16 @@
 	  Be aware that not all cpufreq drivers support the conservative
 	  governor. If unsure have a look at the help section of the
 	  driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+	bool "interactive"
+	select CPU_FREQ_GOV_INTERACTIVE
+	help
+	  Use the CPUFreq governor 'interactive' as default. This allows
+	  you to get a full dynamic cpu frequency capable system by simply
+	  loading your cpufreq low-level hardware driver, using the
+	  'interactive' governor for latency-sensitive workloads.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -160,6 +170,23 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+	tristate "'interactive' cpufreq policy governor"
+	help
+	  'interactive' - This driver adds a dynamic cpufreq policy governor
+	  designed for latency-sensitive workloads.
+
+	  This governor attempts to reduce the latency of clock
+	  increases so that the system is more responsive to
+	  interactive workloads.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cpufreq_interactive.
+
+	  For details, take a look at linux/Documentation/cpu-freq.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
 	tristate "'conservative' cpufreq governor"
 	depends on CPU_FREQ
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 6bd63d6..2a9d25e 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -58,6 +58,21 @@
 	  By enabling this option the acpi_cpufreq driver provides the old
 	  entry in addition to the new boost ones, for compatibility reasons.
 
+config X86_SFI_CPUFREQ
+	tristate "SFI Processor P-States driver"
+	select CPU_FREQ_TABLE
+	depends on SFI
+	help
+	  This driver adds a CPUFreq driver which utilizes the SFI
+	  Processor Performance States enumeration.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sfi-cpufreq.
+
+	  For details, take a look at <file:Documentation/cpu-freq/>.
+
+	  If in doubt, say N.
+
 config ELAN_CPUFREQ
 	tristate "AMD Elan SC400 and SC410"
 	select CPU_FREQ_TABLE
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 3b95322..87d084a 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)	+= cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)	+= cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)	+= cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE)	+= cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)		+= cpufreq_governor.o
 
 # CPUfreq cross-arch helpers
@@ -25,6 +26,7 @@
 
 obj-$(CONFIG_X86_ACPI_CPUFREQ)		+= acpi-cpufreq.o mperf.o
 obj-$(CONFIG_X86_POWERNOW_K8)		+= powernow-k8.o
+obj-$(CONFIG_X86_SFI_CPUFREQ)		+= sfi-cpufreq.o mperf.o
 obj-$(CONFIG_X86_PCC_CPUFREQ)		+= pcc-cpufreq.o
 obj-$(CONFIG_X86_POWERNOW_K6)		+= powernow-k6.o
 obj-$(CONFIG_X86_POWERNOW_K7)		+= powernow-k7.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 66f6cf5..7b4f6770 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -133,6 +133,16 @@
 {
 	return cpufreq_driver->have_governor_per_policy;
 }
+EXPORT_SYMBOL_GPL(have_governor_per_policy);
+
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+	if (have_governor_per_policy())
+		return &policy->kobj;
+	else
+		return cpufreq_global_kobject;
+}
+EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
 
 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
 {
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 27b0e2a..7c4c1cc 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -29,14 +29,6 @@
 
 #include "cpufreq_governor.h"
 
-static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
-{
-	if (have_governor_per_policy())
-		return &policy->kobj;
-	else
-		return cpufreq_global_kobject;
-}
-
 static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
 {
 	if (have_governor_per_policy())
@@ -396,6 +388,7 @@
 
 		mutex_lock(&dbs_data->mutex);
 		mutex_destroy(&cpu_cdbs->timer_mutex);
+		cpu_cdbs->cur_policy = NULL;
 
 		mutex_unlock(&dbs_data->mutex);
 
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644
index 0000000..0654e40
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -0,0 +1,1414 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <asm/cputime.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+struct cpufreq_interactive_cpuinfo {
+	struct timer_list cpu_timer;
+	struct timer_list cpu_slack_timer;
+	spinlock_t load_lock; /* protects the next 4 fields */
+	u64 time_in_idle;
+	u64 time_in_idle_timestamp;
+	u64 cputime_speedadj;
+	u64 cputime_speedadj_timestamp;
+	struct cpufreq_policy *policy;
+	struct cpufreq_frequency_table *freq_table;
+	spinlock_t target_freq_lock; /*protects target freq */
+	unsigned int target_freq;
+	unsigned int floor_freq;
+	unsigned int max_freq;
+	u64 floor_validate_time;
+	u64 hispeed_validate_time;
+	struct rw_semaphore enable_sem;
+	int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+	DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+	int usage_count;
+	/* Hi speed to bump to from lo speed when load burst (default max) */
+	unsigned int hispeed_freq;
+	/* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+	unsigned long go_hispeed_load;
+	/* Target load. Lower values result in higher CPU speeds. */
+	spinlock_t target_loads_lock;
+	unsigned int *target_loads;
+	int ntarget_loads;
+	/*
+	 * The minimum amount of time to spend at a frequency before we can ramp
+	 * down.
+	 */
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+	unsigned long min_sample_time;
+	/*
+	 * The sample rate of the timer used to increase frequency
+	 */
+	unsigned long timer_rate;
+	/*
+	 * Wait this long before raising speed above hispeed, by default a
+	 * single timer interval.
+	 */
+	spinlock_t above_hispeed_delay_lock;
+	unsigned int *above_hispeed_delay;
+	int nabove_hispeed_delay;
+	/* Non-zero means indefinite speed boost active */
+	int boost_val;
+	/* Duration of a boot pulse in usecs */
+	int boostpulse_duration_val;
+	/* End time of boost pulse in ktime converted to usecs */
+	u64 boostpulse_endtime;
+	bool boosted;
+	/*
+	 * Max additional time to wait in idle, beyond timer_rate, at speeds
+	 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
+	 */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+	int timer_slack_val;
+	bool io_is_busy;
+};
+
+/* For cases where we have single governor instance for system */
+static struct cpufreq_interactive_tunables *common_tunables;
+
+static struct attribute_group *get_sysfs_attr(void);
+
+static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
+						  cputime64_t *wall)
+{
+	u64 idle_time;
+	u64 cur_wall_time;
+	u64 busy_time;
+
+	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+	busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+	idle_time = cur_wall_time - busy_time;
+	if (wall)
+		*wall = jiffies_to_usecs(cur_wall_time);
+
+	return jiffies_to_usecs(idle_time);
+}
+
+static inline cputime64_t get_cpu_idle_time(
+	unsigned int cpu,
+	cputime64_t *wall,
+	bool io_is_busy)
+{
+	u64 idle_time = get_cpu_idle_time_us(cpu, wall);
+
+	if (idle_time == -1ULL)
+		idle_time = get_cpu_idle_time_jiffy(cpu, wall);
+	else if (!io_is_busy)
+		idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+	return idle_time;
+}
+
+static void cpufreq_interactive_timer_resched(
+	struct cpufreq_interactive_cpuinfo *pcpu)
+{
+	struct cpufreq_interactive_tunables *tunables =
+		pcpu->policy->governor_data;
+	unsigned long expires;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pcpu->load_lock, flags);
+	pcpu->time_in_idle =
+		get_cpu_idle_time(smp_processor_id(),
+				  &pcpu->time_in_idle_timestamp,
+				  tunables->io_is_busy);
+	pcpu->cputime_speedadj = 0;
+	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+	expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
+	mod_timer_pinned(&pcpu->cpu_timer, expires);
+
+	if (tunables->timer_slack_val >= 0 &&
+	    pcpu->target_freq > pcpu->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
+	}
+
+	spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(
+	struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+	unsigned long expires = jiffies +
+		usecs_to_jiffies(tunables->timer_rate);
+	unsigned long flags;
+
+	pcpu->cpu_timer.expires = expires;
+	add_timer_on(&pcpu->cpu_timer, cpu);
+	if (tunables->timer_slack_val >= 0 &&
+	    pcpu->target_freq > pcpu->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		pcpu->cpu_slack_timer.expires = expires;
+		add_timer_on(&pcpu->cpu_slack_timer, cpu);
+	}
+
+	spin_lock_irqsave(&pcpu->load_lock, flags);
+	pcpu->time_in_idle =
+		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
+				  tunables->io_is_busy);
+	pcpu->cputime_speedadj = 0;
+	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+	spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+			freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+		;
+
+	ret = tunables->above_hispeed_delay[i];
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return ret;
+}
+
+static unsigned int freq_to_targetload(
+	struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+	for (i = 0; i < tunables->ntarget_loads - 1 &&
+		    freq >= tunables->target_loads[i+1]; i += 2)
+		;
+
+	ret = tunables->target_loads[i];
+	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+	return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
+		unsigned int loadadjfreq)
+{
+	unsigned int freq = pcpu->policy->cur;
+	unsigned int prevfreq, freqmin, freqmax;
+	unsigned int tl;
+	int index;
+
+	freqmin = 0;
+	freqmax = UINT_MAX;
+
+	do {
+		prevfreq = freq;
+		tl = freq_to_targetload(pcpu->policy->governor_data, freq);
+
+		/*
+		 * Find the lowest frequency where the computed load is less
+		 * than or equal to the target load.
+		 */
+
+		if (cpufreq_frequency_table_target(
+			    pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+			    CPUFREQ_RELATION_L, &index))
+			break;
+		freq = pcpu->freq_table[index].frequency;
+
+		if (freq > prevfreq) {
+			/* The previous frequency is too low. */
+			freqmin = prevfreq;
+
+			if (freq >= freqmax) {
+				/*
+				 * Find the highest frequency that is less
+				 * than freqmax.
+				 */
+				if (cpufreq_frequency_table_target(
+					    pcpu->policy, pcpu->freq_table,
+					    freqmax - 1, CPUFREQ_RELATION_H,
+					    &index))
+					break;
+				freq = pcpu->freq_table[index].frequency;
+
+				if (freq == freqmin) {
+					/*
+					 * The first frequency below freqmax
+					 * has already been found to be too
+					 * low.  freqmax is the lowest speed
+					 * we found that is fast enough.
+					 */
+					freq = freqmax;
+					break;
+				}
+			}
+		} else if (freq < prevfreq) {
+			/* The previous frequency is high enough. */
+			freqmax = prevfreq;
+
+			if (freq <= freqmin) {
+				/*
+				 * Find the lowest frequency that is higher
+				 * than freqmin.
+				 */
+				if (cpufreq_frequency_table_target(
+					    pcpu->policy, pcpu->freq_table,
+					    freqmin + 1, CPUFREQ_RELATION_L,
+					    &index))
+					break;
+				freq = pcpu->freq_table[index].frequency;
+
+				/*
+				 * If freqmax is the first frequency above
+				 * freqmin then we have already found that
+				 * this speed is fast enough.
+				 */
+				if (freq == freqmax)
+					break;
+			}
+		}
+
+		/* If same frequency chosen as previous then done. */
+	} while (freq != prevfreq);
+
+	return freq;
+}
+
+static u64 update_load(int cpu)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables =
+		pcpu->policy->governor_data;
+	u64 now;
+	u64 now_idle;
+	unsigned int delta_idle;
+	unsigned int delta_time;
+	u64 active_time;
+
+	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
+	delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+	delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+
+	if (delta_time <= delta_idle)
+		active_time = 0;
+	else
+		active_time = delta_time - delta_idle;
+
+	pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+	pcpu->time_in_idle = now_idle;
+	pcpu->time_in_idle_timestamp = now;
+	return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+	u64 now;
+	unsigned int delta_time;
+	u64 cputime_speedadj;
+	int cpu_load;
+	struct cpufreq_interactive_cpuinfo *pcpu =
+		&per_cpu(cpuinfo, data);
+	struct cpufreq_interactive_tunables *tunables =
+		pcpu->policy->governor_data;
+	unsigned int new_freq;
+	unsigned int loadadjfreq;
+	unsigned int index;
+	unsigned long flags;
+
+	if (!down_read_trylock(&pcpu->enable_sem))
+		return;
+	if (!pcpu->governor_enabled)
+		goto exit;
+
+	spin_lock_irqsave(&pcpu->load_lock, flags);
+	now = update_load(data);
+	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+	cputime_speedadj = pcpu->cputime_speedadj;
+	spin_unlock_irqrestore(&pcpu->load_lock, flags);
+
+	if (WARN_ON_ONCE(!delta_time))
+		goto rearm;
+
+	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
+	do_div(cputime_speedadj, delta_time);
+	loadadjfreq = (unsigned int)cputime_speedadj * 100;
+	cpu_load = loadadjfreq / pcpu->policy->cur;
+	tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+	if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
+		if (pcpu->target_freq < tunables->hispeed_freq) {
+			new_freq = tunables->hispeed_freq;
+		} else {
+			new_freq = choose_freq(pcpu, loadadjfreq);
+
+			if (new_freq < tunables->hispeed_freq)
+				new_freq = tunables->hispeed_freq;
+		}
+	} else {
+		new_freq = choose_freq(pcpu, loadadjfreq);
+		if (new_freq > tunables->hispeed_freq &&
+				pcpu->target_freq < tunables->hispeed_freq)
+			new_freq = tunables->hispeed_freq;
+	}
+
+	if (pcpu->target_freq >= tunables->hispeed_freq &&
+	    new_freq > pcpu->target_freq &&
+	    now - pcpu->hispeed_validate_time <
+	    freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
+		trace_cpufreq_interactive_notyet(
+			data, cpu_load, pcpu->target_freq,
+			pcpu->policy->cur, new_freq);
+		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	pcpu->hispeed_validate_time = now;
+
+	if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+					   new_freq, CPUFREQ_RELATION_L,
+					   &index)) {
+		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	new_freq = pcpu->freq_table[index].frequency;
+
+	/*
+	 * Do not scale below floor_freq unless we have been at or above the
+	 * floor frequency for the minimum sample time since last validated.
+	 */
+	if (new_freq < pcpu->floor_freq) {
+		if (now - pcpu->floor_validate_time <
+				tunables->min_sample_time) {
+			trace_cpufreq_interactive_notyet(
+				data, cpu_load, pcpu->target_freq,
+				pcpu->policy->cur, new_freq);
+			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+			goto rearm;
+		}
+	}
+
+	/*
+	 * Update the timestamp for checking whether speed has been held at
+	 * or above the selected frequency for a minimum of min_sample_time,
+	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
+	 * allow the speed to drop as soon as the boostpulse duration expires
+	 * (or the indefinite boost is turned off).
+	 */
+
+	if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
+		pcpu->floor_freq = new_freq;
+		pcpu->floor_validate_time = now;
+	}
+
+	if (pcpu->target_freq == new_freq &&
+			pcpu->target_freq <= pcpu->policy->cur) {
+		trace_cpufreq_interactive_already(
+			data, cpu_load, pcpu->target_freq,
+			pcpu->policy->cur, new_freq);
+		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+		goto rearm_if_notmax;
+	}
+
+	trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+					 pcpu->policy->cur, new_freq);
+
+	pcpu->target_freq = new_freq;
+	spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+	cpumask_set_cpu(data, &speedchange_cpumask);
+	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+	wake_up_process(speedchange_task);
+
+rearm_if_notmax:
+	/*
+	 * Already set max speed and don't see a need to change that,
+	 * wait until next idle to re-evaluate, don't need timer.
+	 */
+	if (pcpu->target_freq == pcpu->policy->max)
+		goto exit;
+
+rearm:
+	if (!timer_pending(&pcpu->cpu_timer))
+		cpufreq_interactive_timer_resched(pcpu);
+
+exit:
+	up_read(&pcpu->enable_sem);
+	return;
+}
+
+static void cpufreq_interactive_idle_start(void)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu =
+		&per_cpu(cpuinfo, smp_processor_id());
+	int pending;
+
+	if (!down_read_trylock(&pcpu->enable_sem))
+		return;
+	if (!pcpu->governor_enabled) {
+		up_read(&pcpu->enable_sem);
+		return;
+	}
+
+	pending = timer_pending(&pcpu->cpu_timer);
+
+	if (pcpu->target_freq != pcpu->policy->min) {
+		/*
+		 * Entering idle while not at lowest speed.  On some
+		 * platforms this can hold the other CPU(s) at that speed
+		 * even though the CPU is idle. Set a timer to re-evaluate
+		 * speed so this idle CPU doesn't hold the other CPUs above
+		 * min indefinitely.  This should probably be a quirk of
+		 * the CPUFreq driver.
+		 */
+		if (!pending)
+			cpufreq_interactive_timer_resched(pcpu);
+	}
+
+	up_read(&pcpu->enable_sem);
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu =
+		&per_cpu(cpuinfo, smp_processor_id());
+
+	if (!down_read_trylock(&pcpu->enable_sem))
+		return;
+	if (!pcpu->governor_enabled) {
+		up_read(&pcpu->enable_sem);
+		return;
+	}
+
+	/* Arm the timer for 1-2 ticks later if not already. */
+	if (!timer_pending(&pcpu->cpu_timer)) {
+		cpufreq_interactive_timer_resched(pcpu);
+	} else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+		del_timer(&pcpu->cpu_timer);
+		del_timer(&pcpu->cpu_slack_timer);
+		cpufreq_interactive_timer(smp_processor_id());
+	}
+
+	up_read(&pcpu->enable_sem);
+}
+
+static int cpufreq_interactive_speedchange_task(void *data)
+{
+	unsigned int cpu;
+	cpumask_t tmp_mask;
+	unsigned long flags;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+		if (cpumask_empty(&speedchange_cpumask)) {
+			spin_unlock_irqrestore(&speedchange_cpumask_lock,
+					       flags);
+			schedule();
+
+			if (kthread_should_stop())
+				break;
+
+			spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+		}
+
+		set_current_state(TASK_RUNNING);
+		tmp_mask = speedchange_cpumask;
+		cpumask_clear(&speedchange_cpumask);
+		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+		for_each_cpu(cpu, &tmp_mask) {
+			unsigned int j;
+			unsigned int max_freq = 0;
+
+			pcpu = &per_cpu(cpuinfo, cpu);
+			if (!down_read_trylock(&pcpu->enable_sem))
+				continue;
+			if (!pcpu->governor_enabled) {
+				up_read(&pcpu->enable_sem);
+				continue;
+			}
+
+			for_each_cpu(j, pcpu->policy->cpus) {
+				struct cpufreq_interactive_cpuinfo *pjcpu =
+					&per_cpu(cpuinfo, j);
+
+				if (pjcpu->target_freq > max_freq)
+					max_freq = pjcpu->target_freq;
+			}
+
+			if (max_freq != pcpu->policy->cur)
+				__cpufreq_driver_target(pcpu->policy,
+							max_freq,
+							CPUFREQ_RELATION_H);
+			trace_cpufreq_interactive_setspeed(cpu,
+						     pcpu->target_freq,
+						     pcpu->policy->cur);
+
+			up_read(&pcpu->enable_sem);
+		}
+	}
+
+	return 0;
+}
+
+static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
+{
+	int i;
+	int anyboost = 0;
+	unsigned long flags[2];
+	struct cpufreq_interactive_cpuinfo *pcpu;
+
+	tunables->boosted = true;
+
+	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
+
+	for_each_online_cpu(i) {
+		pcpu = &per_cpu(cpuinfo, i);
+		if (tunables != pcpu->policy->governor_data)
+			continue;
+
+		spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
+		if (pcpu->target_freq < tunables->hispeed_freq) {
+			pcpu->target_freq = tunables->hispeed_freq;
+			cpumask_set_cpu(i, &speedchange_cpumask);
+			pcpu->hispeed_validate_time =
+				ktime_to_us(ktime_get());
+			anyboost = 1;
+		}
+
+		/*
+		 * Set floor freq and (re)start timer for when last
+		 * validated.
+		 */
+
+		pcpu->floor_freq = tunables->hispeed_freq;
+		pcpu->floor_validate_time = ktime_to_us(ktime_get());
+		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
+	}
+
+	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
+
+	if (anyboost)
+		wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_notifier(
+	struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	int cpu;
+	unsigned long flags;
+
+	if (val == CPUFREQ_POSTCHANGE) {
+		pcpu = &per_cpu(cpuinfo, freq->cpu);
+		if (!down_read_trylock(&pcpu->enable_sem))
+			return 0;
+		if (!pcpu->governor_enabled) {
+			up_read(&pcpu->enable_sem);
+			return 0;
+		}
+
+		for_each_cpu(cpu, pcpu->policy->cpus) {
+			struct cpufreq_interactive_cpuinfo *pjcpu =
+				&per_cpu(cpuinfo, cpu);
+			if (cpu != freq->cpu) {
+				if (!down_read_trylock(&pjcpu->enable_sem))
+					continue;
+				if (!pjcpu->governor_enabled) {
+					up_read(&pjcpu->enable_sem);
+					continue;
+				}
+			}
+			spin_lock_irqsave(&pjcpu->load_lock, flags);
+			update_load(cpu);
+			spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+			if (cpu != freq->cpu)
+				up_read(&pjcpu->enable_sem);
+		}
+
+		up_read(&pcpu->enable_sem);
+	}
+	return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+	.notifier_call = cpufreq_interactive_notifier,
+};
+
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+	const char *cp;
+	int i;
+	int ntokens = 1;
+	unsigned int *tokenized_data;
+	int err = -EINVAL;
+
+	cp = buf;
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	if (!(ntokens & 0x1))
+		goto err;
+
+	tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+	if (!tokenized_data) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	cp = buf;
+	i = 0;
+	while (i < ntokens) {
+		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+			goto err_kfree;
+
+		cp = strpbrk(cp, " :");
+		if (!cp)
+			break;
+		cp++;
+	}
+
+	if (i != ntokens)
+		goto err_kfree;
+
+	*num_tokens = ntokens;
+	return tokenized_data;
+
+err_kfree:
+	kfree(tokenized_data);
+err:
+	return ERR_PTR(err);
+}
+
+static ssize_t show_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	char *buf)
+{
+	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+	for (i = 0; i < tunables->ntarget_loads; i++)
+		ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
+			       i & 0x1 ? ":" : " ");
+
+	sprintf(buf + ret - 1, "\n");
+	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+	return ret;
+}
+
+static ssize_t store_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
+{
+	int ntokens;
+	unsigned int *new_target_loads = NULL;
+	unsigned long flags;
+
+	new_target_loads = get_tokenized_data(buf, &ntokens);
+	if (IS_ERR(new_target_loads))
+		return PTR_RET(new_target_loads);
+
+	spin_lock_irqsave(&tunables->target_loads_lock, flags);
+	if (tunables->target_loads != default_target_loads)
+		kfree(tunables->target_loads);
+	tunables->target_loads = new_target_loads;
+	tunables->ntarget_loads = ntokens;
+	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+	return count;
+}
+
+static ssize_t show_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+	for (i = 0; i < tunables->nabove_hispeed_delay; i++)
+		ret += sprintf(buf + ret, "%u%s",
+			       tunables->above_hispeed_delay[i],
+			       i & 0x1 ? ":" : " ");
+
+	sprintf(buf + ret - 1, "\n");
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return ret;
+}
+
+static ssize_t store_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
+{
+	int ntokens;
+	unsigned int *new_above_hispeed_delay = NULL;
+	unsigned long flags;
+
+	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
+	if (IS_ERR(new_above_hispeed_delay))
+		return PTR_RET(new_above_hispeed_delay);
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+	if (tunables->above_hispeed_delay != default_above_hispeed_delay)
+		kfree(tunables->above_hispeed_delay);
+	tunables->above_hispeed_delay = new_above_hispeed_delay;
+	tunables->nabove_hispeed_delay = ntokens;
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return count;
+
+}
+
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	long unsigned int val;
+
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->hispeed_freq = val;
+	return count;
+}
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->go_hispeed_load = val;
+	return count;
+}
+
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->min_sample_time = val;
+	return count;
+}
+
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->timer_rate);
+}
+
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->timer_rate = val;
+	return count;
+}
+
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtol(buf, 10, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->timer_slack_val = val;
+	return count;
+}
+
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+			  char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+			   const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->boost_val = val;
+
+	if (tunables->boost_val) {
+		trace_cpufreq_interactive_boost("on");
+		if (!tunables->boosted)
+			cpufreq_interactive_boost(tunables);
+	} else {
+		tunables->boostpulse_endtime = ktime_to_us(ktime_get());
+		trace_cpufreq_interactive_unboost("off");
+	}
+
+	return count;
+}
+
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+				const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
+		tunables->boostpulse_duration_val;
+	trace_cpufreq_interactive_boost("pulse");
+	if (!tunables->boosted)
+		cpufreq_interactive_boost(tunables);
+	return count;
+}
+
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->boostpulse_duration_val = val;
+	return count;
+}
+
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->io_is_busy = val;
+	return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)					\
+static ssize_t show_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, char *buf)		\
+{									\
+	return show_##file_name(common_tunables, buf);			\
+}									\
+									\
+static ssize_t show_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, char *buf)				\
+{									\
+	return show_##file_name(policy->governor_data, buf);		\
+}
+
+#define store_gov_pol_sys(file_name)					\
+static ssize_t store_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, const char *buf,		\
+	size_t count)							\
+{									\
+	return store_##file_name(common_tunables, buf, count);		\
+}									\
+									\
+static ssize_t store_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, const char *buf, size_t count)		\
+{									\
+	return store_##file_name(policy->governor_data, buf, count);	\
+}
+
+#define show_store_gov_pol_sys(file_name)				\
+show_gov_pol_sys(file_name);						\
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+
+#define gov_sys_attr_rw(_name)						\
+static struct global_attr _name##_gov_sys =				\
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)						\
+static struct freq_attr _name##_gov_pol =				\
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)					\
+	gov_sys_attr_rw(_name);						\
+	gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+
+static struct global_attr boostpulse_gov_sys =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+	&target_loads_gov_sys.attr,
+	&above_hispeed_delay_gov_sys.attr,
+	&hispeed_freq_gov_sys.attr,
+	&go_hispeed_load_gov_sys.attr,
+	&min_sample_time_gov_sys.attr,
+	&timer_rate_gov_sys.attr,
+	&timer_slack_gov_sys.attr,
+	&boost_gov_sys.attr,
+	&boostpulse_gov_sys.attr,
+	&boostpulse_duration_gov_sys.attr,
+	&io_is_busy_gov_sys.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_sys = {
+	.attrs = interactive_attributes_gov_sys,
+	.name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+	&target_loads_gov_pol.attr,
+	&above_hispeed_delay_gov_pol.attr,
+	&hispeed_freq_gov_pol.attr,
+	&go_hispeed_load_gov_pol.attr,
+	&min_sample_time_gov_pol.attr,
+	&timer_rate_gov_pol.attr,
+	&timer_slack_gov_pol.attr,
+	&boost_gov_pol.attr,
+	&boostpulse_gov_pol.attr,
+	&boostpulse_duration_gov_pol.attr,
+	&io_is_busy_gov_pol.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+	.attrs = interactive_attributes_gov_pol,
+	.name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+	if (have_governor_per_policy())
+		return &interactive_attr_group_gov_pol;
+	else
+		return &interactive_attr_group_gov_sys;
+}
+
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+					     unsigned long val,
+					     void *data)
+{
+	switch (val) {
+	case IDLE_START:
+		cpufreq_interactive_idle_start();
+		break;
+	case IDLE_END:
+		cpufreq_interactive_idle_end();
+		break;
+	}
+
+	return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+	.notifier_call = cpufreq_interactive_idle_notifier,
+};
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+		unsigned int event)
+{
+	int rc;
+	unsigned int j;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct cpufreq_frequency_table *freq_table;
+	struct cpufreq_interactive_tunables *tunables;
+	unsigned long flags;
+
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
+
+	WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
+
+	switch (event) {
+	case CPUFREQ_GOV_POLICY_INIT:
+		if (have_governor_per_policy()) {
+			WARN_ON(tunables);
+		} else if (tunables) {
+			tunables->usage_count++;
+			policy->governor_data = tunables;
+			return 0;
+		}
+
+		tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+		if (!tunables) {
+			pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
+			return -ENOMEM;
+		}
+
+		tunables->usage_count = 1;
+		tunables->above_hispeed_delay = default_above_hispeed_delay;
+		tunables->nabove_hispeed_delay =
+			ARRAY_SIZE(default_above_hispeed_delay);
+		tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+		tunables->target_loads = default_target_loads;
+		tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+		tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+		tunables->timer_rate = DEFAULT_TIMER_RATE;
+		tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+		tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+		spin_lock_init(&tunables->target_loads_lock);
+		spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+		policy->governor_data = tunables;
+		if (!have_governor_per_policy())
+			common_tunables = tunables;
+
+		rc = sysfs_create_group(get_governor_parent_kobj(policy),
+				get_sysfs_attr());
+		if (rc) {
+			kfree(tunables);
+			policy->governor_data = NULL;
+			if (!have_governor_per_policy())
+				common_tunables = NULL;
+			return rc;
+		}
+
+		if (!policy->governor->initialized) {
+			idle_notifier_register(&cpufreq_interactive_idle_nb);
+			cpufreq_register_notifier(&cpufreq_notifier_block,
+					CPUFREQ_TRANSITION_NOTIFIER);
+		}
+
+		break;
+
+	case CPUFREQ_GOV_POLICY_EXIT:
+		if (!--tunables->usage_count) {
+			if (policy->governor->initialized == 1) {
+				cpufreq_unregister_notifier(&cpufreq_notifier_block,
+						CPUFREQ_TRANSITION_NOTIFIER);
+				idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+			}
+
+			sysfs_remove_group(get_governor_parent_kobj(policy),
+					get_sysfs_attr());
+			kfree(tunables);
+			common_tunables = NULL;
+		}
+
+		policy->governor_data = NULL;
+		break;
+
+	case CPUFREQ_GOV_START:
+		mutex_lock(&gov_lock);
+
+		freq_table = cpufreq_frequency_get_table(policy->cpu);
+		if (!tunables->hispeed_freq)
+			tunables->hispeed_freq = policy->max;
+
+		for_each_cpu(j, policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, j);
+			pcpu->policy = policy;
+			pcpu->target_freq = policy->cur;
+			pcpu->freq_table = freq_table;
+			pcpu->floor_freq = pcpu->target_freq;
+			pcpu->floor_validate_time =
+				ktime_to_us(ktime_get());
+			pcpu->hispeed_validate_time =
+				pcpu->floor_validate_time;
+			pcpu->max_freq = policy->max;
+			down_write(&pcpu->enable_sem);
+			del_timer_sync(&pcpu->cpu_timer);
+			del_timer_sync(&pcpu->cpu_slack_timer);
+			cpufreq_interactive_timer_start(tunables, j);
+			pcpu->governor_enabled = 1;
+			up_write(&pcpu->enable_sem);
+		}
+
+		mutex_unlock(&gov_lock);
+		break;
+
+	case CPUFREQ_GOV_STOP:
+		mutex_lock(&gov_lock);
+		for_each_cpu(j, policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, j);
+			down_write(&pcpu->enable_sem);
+			pcpu->governor_enabled = 0;
+			del_timer_sync(&pcpu->cpu_timer);
+			del_timer_sync(&pcpu->cpu_slack_timer);
+			up_write(&pcpu->enable_sem);
+		}
+
+		mutex_unlock(&gov_lock);
+		break;
+
+	case CPUFREQ_GOV_LIMITS:
+		if (policy->max < policy->cur)
+			__cpufreq_driver_target(policy,
+					policy->max, CPUFREQ_RELATION_H);
+		else if (policy->min > policy->cur)
+			__cpufreq_driver_target(policy,
+					policy->min, CPUFREQ_RELATION_L);
+		for_each_cpu(j, policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, j);
+
+			down_read(&pcpu->enable_sem);
+			if (pcpu->governor_enabled == 0) {
+				up_read(&pcpu->enable_sem);
+				continue;
+			}
+
+			spin_lock_irqsave(&pcpu->target_freq_lock, flags);
+			if (policy->max < pcpu->target_freq)
+				pcpu->target_freq = policy->max;
+			else if (policy->min > pcpu->target_freq)
+				pcpu->target_freq = policy->min;
+
+			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+			up_read(&pcpu->enable_sem);
+
+			/* Reschedule timer only if policy->max is raised.
+			 * Delete the timers, else the timer callback may
+			 * return without re-arm the timer when failed
+			 * acquire the semaphore. This race may cause timer
+			 * stopped unexpectedly.
+			 */
+
+			if (policy->max > pcpu->max_freq) {
+				down_write(&pcpu->enable_sem);
+				del_timer_sync(&pcpu->cpu_timer);
+				del_timer_sync(&pcpu->cpu_slack_timer);
+				cpufreq_interactive_timer_start(tunables, j);
+				up_write(&pcpu->enable_sem);
+			}
+
+			pcpu->max_freq = policy->max;
+		}
+		break;
+	}
+	return 0;
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+	.name = "interactive",
+	.governor = cpufreq_governor_interactive,
+	.max_transition_latency = 10000000,
+	.owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static int __init cpufreq_interactive_init(void)
+{
+	unsigned int i;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+	/* Initalize per-cpu timers */
+	for_each_possible_cpu(i) {
+		pcpu = &per_cpu(cpuinfo, i);
+		init_timer_deferrable(&pcpu->cpu_timer);
+		pcpu->cpu_timer.function = cpufreq_interactive_timer;
+		pcpu->cpu_timer.data = i;
+		init_timer(&pcpu->cpu_slack_timer);
+		pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+		spin_lock_init(&pcpu->load_lock);
+		spin_lock_init(&pcpu->target_freq_lock);
+		init_rwsem(&pcpu->enable_sem);
+	}
+
+	spin_lock_init(&speedchange_cpumask_lock);
+	mutex_init(&gov_lock);
+	speedchange_task =
+		kthread_create(cpufreq_interactive_speedchange_task, NULL,
+			       "cfinteractive");
+	if (IS_ERR(speedchange_task))
+		return PTR_ERR(speedchange_task);
+
+	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+	get_task_struct(speedchange_task);
+
+	/* NB: wake up so the thread does not look hung to the freezer */
+	wake_up_process(speedchange_task);
+
+	return cpufreq_register_governor(&cpufreq_gov_interactive);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+	cpufreq_unregister_governor(&cpufreq_gov_interactive);
+	kthread_stop(speedchange_task);
+	put_task_struct(speedchange_task);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+	"Latency sensitive workloads");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 7fb6002..0cfa547 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -20,6 +20,10 @@
 #include <linux/kobject.h>
 #include <linux/spinlock.h>
 #include <linux/notifier.h>
+#include <linux/sort.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/sched.h>
 #include <asm/cputime.h>
 
 static spinlock_t cpufreq_stats_lock;
@@ -38,7 +42,28 @@
 #endif
 };
 
+struct all_cpufreq_stats {
+	unsigned int state_num;
+	cputime64_t *time_in_state;
+	unsigned int *freq_table;
+};
+
+struct cpufreq_power_stats {
+	unsigned int state_num;
+	unsigned int *curr;
+	unsigned int *freq_table;
+};
+
+struct all_freq_table {
+	unsigned int *freq_table;
+	unsigned int table_size;
+};
+
+static struct all_freq_table *all_freq_table;
+
+static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
+static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
 
 struct cpufreq_stats_attribute {
 	struct attribute attr;
@@ -48,14 +73,24 @@
 static int cpufreq_stats_update(unsigned int cpu)
 {
 	struct cpufreq_stats *stat;
+	struct all_cpufreq_stats *all_stat;
 	unsigned long long cur_time;
 
 	cur_time = get_jiffies_64();
 	spin_lock(&cpufreq_stats_lock);
 	stat = per_cpu(cpufreq_stats_table, cpu);
-	if (stat->time_in_state)
+	all_stat = per_cpu(all_cpufreq_stats, cpu);
+	if (!stat) {
+		spin_unlock(&cpufreq_stats_lock);
+		return 0;
+	}
+	if (stat->time_in_state) {
 		stat->time_in_state[stat->last_index] +=
 			cur_time - stat->last_time;
+		if (all_stat)
+			all_stat->time_in_state[stat->last_index] +=
+					cur_time - stat->last_time;
+	}
 	stat->last_time = cur_time;
 	spin_unlock(&cpufreq_stats_lock);
 	return 0;
@@ -86,6 +121,104 @@
 	return len;
 }
 
+static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
+		unsigned int freq)
+{
+	int i;
+	if (!all_stat)
+		return -1;
+	for (i = 0; i < all_stat->state_num; i++) {
+		if (all_stat->freq_table[i] == freq)
+			return i;
+	}
+	return -1;
+}
+
+void acct_update_power(struct task_struct *task, cputime_t cputime) {
+	struct cpufreq_power_stats *powerstats;
+	struct cpufreq_stats *stats;
+	unsigned int cpu_num, curr;
+
+	if (!task)
+		return;
+	cpu_num = task_cpu(task);
+	powerstats = per_cpu(cpufreq_power_stats, cpu_num);
+	stats = per_cpu(cpufreq_stats_table, cpu_num);
+	if (!powerstats || !stats)
+		return;
+
+	curr = powerstats->curr[stats->last_index];
+	if (task->cpu_power != ULLONG_MAX)
+		task->cpu_power += curr * cputime_to_usecs(cputime);
+}
+EXPORT_SYMBOL_GPL(acct_update_power);
+
+static ssize_t show_current_in_state(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	ssize_t len = 0;
+	unsigned int i, cpu;
+	struct cpufreq_power_stats *powerstats;
+
+	spin_lock(&cpufreq_stats_lock);
+	for_each_possible_cpu(cpu) {
+		powerstats = per_cpu(cpufreq_power_stats, cpu);
+		if (!powerstats)
+			continue;
+		len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
+		for (i = 0; i < powerstats->state_num; i++)
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+					"%d=%d ", powerstats->freq_table[i],
+					powerstats->curr[i]);
+		len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+	}
+	spin_unlock(&cpufreq_stats_lock);
+	return len;
+}
+
+static ssize_t show_all_time_in_state(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	ssize_t len = 0;
+	unsigned int i, cpu, freq, index;
+	struct all_cpufreq_stats *all_stat;
+	struct cpufreq_policy *policy;
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
+	for_each_possible_cpu(cpu) {
+		len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
+		if (cpu_online(cpu))
+			cpufreq_stats_update(cpu);
+	}
+
+	if (!all_freq_table)
+		goto out;
+	for (i = 0; i < all_freq_table->table_size; i++) {
+		freq = all_freq_table->freq_table[i];
+		len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
+		for_each_possible_cpu(cpu) {
+			policy = cpufreq_cpu_get(cpu);
+			if (policy == NULL)
+				continue;
+			all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
+			index = get_index_all_cpufreq_stat(all_stat, freq);
+			if (index != -1) {
+				len += scnprintf(buf + len, PAGE_SIZE - len,
+					"%llu\t\t", (unsigned long long)
+					cputime64_to_clock_t(all_stat->time_in_state[index]));
+			} else {
+				len += scnprintf(buf + len, PAGE_SIZE - len,
+						"N/A\t\t");
+			}
+			cpufreq_cpu_put(policy);
+		}
+	}
+
+out:
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+	return len;
+}
+
 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
 {
@@ -149,6 +282,12 @@
 	.name = "stats"
 };
 
+static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
+		0444, show_all_time_in_state, NULL);
+
+static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
+		0444, show_current_in_state, NULL);
+
 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
 {
 	int index;
@@ -195,10 +334,50 @@
 	cpufreq_cpu_put(policy);
 }
 
-static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
-		struct cpufreq_frequency_table *table)
+static void cpufreq_allstats_free(void)
 {
-	unsigned int i, j, count = 0, ret = 0;
+	int cpu;
+	struct all_cpufreq_stats *all_stat;
+
+	sysfs_remove_file(cpufreq_global_kobject,
+						&_attr_all_time_in_state.attr);
+
+	for_each_possible_cpu(cpu) {
+		all_stat = per_cpu(all_cpufreq_stats, cpu);
+		if (!all_stat)
+			continue;
+		kfree(all_stat->time_in_state);
+		kfree(all_stat);
+		per_cpu(all_cpufreq_stats, cpu) = NULL;
+	}
+	if (all_freq_table) {
+		kfree(all_freq_table->freq_table);
+		kfree(all_freq_table);
+		all_freq_table = NULL;
+	}
+}
+
+static void cpufreq_powerstats_free(void)
+{
+	int cpu;
+	struct cpufreq_power_stats *powerstats;
+
+	sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
+
+	for_each_possible_cpu(cpu) {
+		powerstats = per_cpu(cpufreq_power_stats, cpu);
+		if (!powerstats)
+			continue;
+		kfree(powerstats->curr);
+		kfree(powerstats);
+		per_cpu(cpufreq_power_stats, cpu) = NULL;
+	}
+}
+
+static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
+		struct cpufreq_frequency_table *table, int count)
+{
+	unsigned int i, j, ret = 0;
 	struct cpufreq_stats *stat;
 	struct cpufreq_policy *data;
 	unsigned int alloc_size;
@@ -222,12 +401,6 @@
 	stat->cpu = cpu;
 	per_cpu(cpufreq_stats_table, cpu) = stat;
 
-	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
-		unsigned int freq = table[i].frequency;
-		if (freq == CPUFREQ_ENTRY_INVALID)
-			continue;
-		count++;
-	}
 
 	alloc_size = count * sizeof(int) + count * sizeof(u64);
 
@@ -281,13 +454,151 @@
 	stat->cpu = policy->cpu;
 }
 
+static void cpufreq_powerstats_create(unsigned int cpu,
+		struct cpufreq_frequency_table *table, int count) {
+	unsigned int alloc_size, i = 0, j = 0, ret = 0;
+	struct cpufreq_power_stats *powerstats;
+	struct device_node *cpu_node;
+	char device_path[16];
+
+	powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
+			GFP_KERNEL);
+	if (!powerstats)
+		return;
+
+	/* Allocate memory for freq table per cpu as well as clockticks per
+	 * freq*/
+	alloc_size = count * sizeof(unsigned int) +
+		count * sizeof(unsigned int);
+	powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
+	if (!powerstats->curr) {
+		kfree(powerstats);
+		return;
+	}
+	powerstats->freq_table = powerstats->curr + count;
+
+	spin_lock(&cpufreq_stats_lock);
+	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END && j < count; i++) {
+		unsigned int freq = table[i].frequency;
+
+		if (freq == CPUFREQ_ENTRY_INVALID)
+			continue;
+		powerstats->freq_table[j++] = freq;
+	}
+	powerstats->state_num = j;
+
+	snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
+	cpu_node = of_find_node_by_path(device_path);
+	if (cpu_node) {
+		ret = of_property_read_u32_array(cpu_node, "current",
+				powerstats->curr, count);
+		if (ret) {
+			kfree(powerstats->curr);
+			kfree(powerstats);
+			powerstats = NULL;
+		}
+	}
+	per_cpu(cpufreq_power_stats, cpu) = powerstats;
+	spin_unlock(&cpufreq_stats_lock);
+}
+
+static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
+{
+	unsigned int lhs = *(const unsigned int *)(lhs_ptr);
+	unsigned int rhs = *(const unsigned int *)(rhs_ptr);
+	if (lhs < rhs)
+		return -1;
+	if (lhs > rhs)
+		return 1;
+	return 0;
+}
+
+static bool check_all_freq_table(unsigned int freq)
+{
+	int i;
+	for (i = 0; i < all_freq_table->table_size; i++) {
+		if (freq == all_freq_table->freq_table[i])
+			return true;
+	}
+	return false;
+}
+
+static void create_all_freq_table(void)
+{
+	all_freq_table = kzalloc(sizeof(struct all_freq_table),
+			GFP_KERNEL);
+	if (!all_freq_table)
+		pr_warn("could not allocate memory for all_freq_table\n");
+	return;
+}
+
+static void add_all_freq_table(unsigned int freq)
+{
+	unsigned int size;
+	size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
+	all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
+			size, GFP_ATOMIC);
+	if (IS_ERR(all_freq_table->freq_table)) {
+		pr_warn("Could not reallocate memory for freq_table\n");
+		all_freq_table->freq_table = NULL;
+		return;
+	}
+	all_freq_table->freq_table[all_freq_table->table_size++] = freq;
+}
+
+static void cpufreq_allstats_create(unsigned int cpu,
+		struct cpufreq_frequency_table *table, int count)
+{
+	int i , j = 0;
+	unsigned int alloc_size;
+	struct all_cpufreq_stats *all_stat;
+	bool sort_needed = false;
+
+	all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
+			GFP_KERNEL);
+	if (!all_stat) {
+		pr_warn("Cannot allocate memory for cpufreq stats\n");
+		return;
+	}
+
+	/*Allocate memory for freq table per cpu as well as clockticks per freq*/
+	alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
+	all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
+	if (!all_stat->time_in_state) {
+		pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
+		kfree(all_stat);
+		all_stat = NULL;
+		return;
+	}
+	all_stat->freq_table = (unsigned int *)
+		(all_stat->time_in_state + count);
+
+	spin_lock(&cpufreq_stats_lock);
+	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+		unsigned int freq = table[i].frequency;
+		if (freq == CPUFREQ_ENTRY_INVALID)
+			continue;
+		all_stat->freq_table[j++] = freq;
+		if (all_freq_table && !check_all_freq_table(freq)) {
+			add_all_freq_table(freq);
+			sort_needed = true;
+		}
+	}
+	if (sort_needed)
+		sort(all_freq_table->freq_table, all_freq_table->table_size,
+				sizeof(unsigned int), &compare_for_sort, NULL);
+	all_stat->state_num = j;
+	per_cpu(all_cpufreq_stats, cpu) = all_stat;
+	spin_unlock(&cpufreq_stats_lock);
+}
+
 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
 		unsigned long val, void *data)
 {
-	int ret;
+	int ret, count = 0, i;
 	struct cpufreq_policy *policy = data;
 	struct cpufreq_frequency_table *table;
-	unsigned int cpu = policy->cpu;
+	unsigned int cpu_num, cpu = policy->cpu;
 
 	if (val == CPUFREQ_UPDATE_POLICY_CPU) {
 		cpufreq_stats_update_policy_cpu(policy);
@@ -299,7 +610,24 @@
 	table = cpufreq_frequency_get_table(cpu);
 	if (!table)
 		return 0;
-	ret = cpufreq_stats_create_table(policy, table);
+
+	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+		unsigned int freq = table[i].frequency;
+
+		if (freq == CPUFREQ_ENTRY_INVALID)
+			continue;
+		count++;
+	}
+
+	if (!per_cpu(all_cpufreq_stats, cpu))
+		cpufreq_allstats_create(cpu, table, count);
+
+	for_each_possible_cpu(cpu_num) {
+		if (!per_cpu(cpufreq_power_stats, cpu_num))
+			cpufreq_powerstats_create(cpu_num, table, count);
+	}
+
+	ret = cpufreq_stats_create_table(policy, table, count);
 	if (ret)
 		return ret;
 	return 0;
@@ -341,6 +669,43 @@
 	return 0;
 }
 
+static int cpufreq_stats_create_table_cpu(unsigned int cpu)
+{
+	struct cpufreq_policy *policy;
+	struct cpufreq_frequency_table *table;
+	int i, count, cpu_num, ret = -ENODEV;
+
+	policy = cpufreq_cpu_get(cpu);
+	if (!policy)
+		return -ENODEV;
+
+	table = cpufreq_frequency_get_table(cpu);
+	if (!table)
+		goto out;
+
+	count = 0;
+	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+		unsigned int freq = table[i].frequency;
+
+		if (freq != CPUFREQ_ENTRY_INVALID)
+			count++;
+	}
+
+	if (!per_cpu(all_cpufreq_stats, cpu))
+		cpufreq_allstats_create(cpu, table, count);
+
+	for_each_possible_cpu(cpu_num) {
+		if (!per_cpu(cpufreq_power_stats, cpu_num))
+			cpufreq_powerstats_create(cpu_num, table, count);
+	}
+
+	ret = cpufreq_stats_create_table(policy, table, count);
+
+out:
+	cpufreq_cpu_put(policy);
+	return ret;
+}
+
 static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
 					       unsigned long action,
 					       void *hcpu)
@@ -360,6 +725,10 @@
 	case CPU_DEAD_FROZEN:
 		cpufreq_stats_free_table(cpu);
 		break;
+	case CPU_DOWN_FAILED:
+	case CPU_DOWN_FAILED_FROZEN:
+		cpufreq_stats_create_table_cpu(cpu);
+		break;
 	}
 	return NOTIFY_OK;
 }
@@ -404,6 +773,17 @@
 		return ret;
 	}
 
+	create_all_freq_table();
+	ret = sysfs_create_file(cpufreq_global_kobject,
+			&_attr_all_time_in_state.attr);
+	if (ret)
+		pr_warn("Cannot create sysfs file for cpufreq stats\n");
+
+	ret = sysfs_create_file(cpufreq_global_kobject,
+			&_attr_current_in_state.attr);
+	if (ret)
+		pr_warn("Cannot create sysfs file for cpufreq current stats\n");
+
 	return 0;
 }
 static void __exit cpufreq_stats_exit(void)
@@ -419,6 +799,8 @@
 		cpufreq_stats_free_table(cpu);
 		cpufreq_stats_free_sysfs(cpu);
 	}
+	cpufreq_allstats_free();
+	cpufreq_powerstats_free();
 }
 
 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
new file mode 100644
index 0000000..2f9f9a2
--- /dev/null
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -0,0 +1,581 @@
+/*
+ * sfi_cpufreq.c - sfi Processor P-States Driver
+ *
+ * (C) 2010-2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Vishwesh M Rudramuni
+ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
+ */
+
+/*
+ * This sfi Processor P-States Driver re-uses most part of the code available
+ * in acpi cpufreq driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/sfi.h>
+#include <linux/io.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+
+#include "sfi-cpufreq.h"
+#include "mperf.h"
+
+MODULE_AUTHOR("Vishwesh Rudramuni");
+MODULE_DESCRIPTION("SFI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+DEFINE_PER_CPU(struct sfi_processor *, sfi_processors);
+
+static DEFINE_MUTEX(performance_mutex);
+static int sfi_cpufreq_num;
+static u32 sfi_cpu_num;
+
+#define SFI_FREQ_MAX		32
+#define INTEL_MSR_RANGE		0xffff
+#define INTEL_MSR_BUSRATIO_MASK	0xff00
+#define SFI_CPU_MAX		8
+
+#define X86_ATOM_ARCH_SLM	0x4a
+
+struct sfi_cpufreq_data {
+	struct sfi_processor_performance *sfi_data;
+	struct cpufreq_frequency_table *freq_table;
+	unsigned int max_freq;
+	unsigned int resume;
+};
+
+static DEFINE_PER_CPU(struct sfi_cpufreq_data *, drv_data);
+struct sfi_freq_table_entry sfi_cpufreq_array[SFI_FREQ_MAX];
+static struct sfi_cpu_table_entry sfi_cpu_array[SFI_CPU_MAX];
+
+/* sfi_perf_data is a pointer to percpu data. */
+static struct sfi_processor_performance *sfi_perf_data;
+
+static struct cpufreq_driver sfi_cpufreq_driver;
+
+static int parse_freq(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_freq_table_entry *pentry;
+	int totallen;
+
+	sb = (struct sfi_table_simple *)table;
+	if (!sb) {
+		printk(KERN_WARNING "SFI: Unable to map FREQ\n");
+		return -ENODEV;
+	}
+
+	if (!sfi_cpufreq_num) {
+		sfi_cpufreq_num = SFI_GET_NUM_ENTRIES(sb,
+			 struct sfi_freq_table_entry);
+		pentry = (struct sfi_freq_table_entry *)sb->pentry;
+		totallen = sfi_cpufreq_num * sizeof(*pentry);
+		memcpy(sfi_cpufreq_array, pentry, totallen);
+	}
+
+	return 0;
+}
+
+static int sfi_processor_get_performance_states(struct sfi_processor *pr)
+{
+	int result = 0;
+	int i;
+
+	pr->performance->state_count = sfi_cpufreq_num;
+	pr->performance->states =
+	    kmalloc(sizeof(struct sfi_processor_px) * sfi_cpufreq_num,
+		    GFP_KERNEL);
+	if (!pr->performance->states)
+		result = -ENOMEM;
+
+	printk(KERN_INFO "Num p-states %d\n", sfi_cpufreq_num);
+
+	/* Populate the P-states info from the SFI table here */
+	for (i = 0; i < sfi_cpufreq_num; i++) {
+		pr->performance->states[i].core_frequency =
+			sfi_cpufreq_array[i].freq_mhz;
+		pr->performance->states[i].transition_latency =
+			sfi_cpufreq_array[i].latency;
+		pr->performance->states[i].control =
+			sfi_cpufreq_array[i].ctrl_val;
+		printk(KERN_INFO "State [%d]: core_frequency[%d] transition_latency[%d] control[0x%x]\n",
+			i,
+			(u32) pr->performance->states[i].core_frequency,
+			(u32) pr->performance->states[i].transition_latency,
+			(u32) pr->performance->states[i].control);
+	}
+
+	return result;
+}
+
+static int sfi_processor_register_performance(struct sfi_processor_performance
+				    *performance, unsigned int cpu)
+{
+	struct sfi_processor *pr;
+
+	mutex_lock(&performance_mutex);
+
+	pr = per_cpu(sfi_processors, cpu);
+	if (!pr) {
+		mutex_unlock(&performance_mutex);
+		return -ENODEV;
+	}
+
+	if (pr->performance) {
+		mutex_unlock(&performance_mutex);
+		return -EBUSY;
+	}
+
+	WARN_ON(!performance);
+
+	pr->performance = performance;
+
+	/* parse the freq table from sfi */
+	sfi_cpufreq_num = 0;
+	sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, parse_freq);
+
+	sfi_processor_get_performance_states(pr);
+
+	mutex_unlock(&performance_mutex);
+	return 0;
+}
+
+void sfi_processor_unregister_performance(struct sfi_processor_performance
+				      *performance, unsigned int cpu)
+{
+	struct sfi_processor *pr;
+
+
+	mutex_lock(&performance_mutex);
+
+	pr = per_cpu(sfi_processors, cpu);
+	if (!pr) {
+		mutex_unlock(&performance_mutex);
+		return;
+	}
+
+	if (pr->performance)
+		kfree(pr->performance->states);
+	pr->performance = NULL;
+
+	mutex_unlock(&performance_mutex);
+
+	return;
+}
+
+static unsigned extract_freq(u32 msr, struct sfi_cpufreq_data *data)
+{
+	int i;
+	struct sfi_processor_performance *perf;
+	u32 sfi_ctrl;
+
+	msr &= INTEL_MSR_BUSRATIO_MASK;
+	perf = data->sfi_data;
+
+	for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+		sfi_ctrl = perf->states[data->freq_table[i].index].control
+			& INTEL_MSR_BUSRATIO_MASK;
+		if (sfi_ctrl == msr)
+			return data->freq_table[i].frequency;
+	}
+	return data->freq_table[0].frequency;
+}
+
+
+static u32 get_cur_val(const struct cpumask *mask)
+{
+	u32 val, dummy;
+
+	if (unlikely(cpumask_empty(mask)))
+		return 0;
+
+	rdmsr_on_cpu(cpumask_any(mask), MSR_IA32_PERF_STATUS, &val, &dummy);
+
+	return val;
+}
+
+static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, cpu);
+	unsigned int freq;
+	unsigned int cached_freq;
+
+	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
+
+	if (unlikely(data == NULL ||
+		data->sfi_data == NULL || data->freq_table == NULL)) {
+		return 0;
+	}
+
+	cached_freq = data->freq_table[data->sfi_data->state].frequency;
+	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
+	if (freq != cached_freq) {
+		/*
+		 * The dreaded BIOS frequency change behind our back.
+		 * Force set the frequency on next target call.
+		 */
+		data->resume = 1;
+	}
+
+	pr_debug("cur freq = %u\n", freq);
+
+	return freq;
+}
+
+static int sfi_cpufreq_target(struct cpufreq_policy *policy,
+			       unsigned int target_freq, unsigned int relation)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+	struct sfi_processor_performance *perf;
+	struct cpufreq_freqs freqs;
+	unsigned int next_state = 0; /* Index into freq_table */
+	unsigned int next_perf_state = 0; /* Index into perf table */
+	int result = 0;
+	u32 lo, hi;
+
+	pr_debug("sfi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
+
+	if (unlikely(data == NULL ||
+	     data->sfi_data == NULL || data->freq_table == NULL)) {
+		return -ENODEV;
+	}
+
+	perf = data->sfi_data;
+	result = cpufreq_frequency_table_target(policy,
+						data->freq_table,
+						target_freq,
+						relation, &next_state);
+	if (unlikely(result))
+		return -ENODEV;
+
+	next_perf_state = data->freq_table[next_state].index;
+	if (perf->state == next_perf_state) {
+		if (unlikely(data->resume)) {
+			pr_debug("Called after resume, resetting to P%d\n",
+				next_perf_state);
+			data->resume = 0;
+		} else {
+			pr_debug("Already at target state (P%d)\n",
+				next_perf_state);
+			return 0;
+		}
+	}
+
+	freqs.old = perf->states[perf->state].core_frequency * 1000;
+	freqs.new = data->freq_table[next_state].frequency;
+
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+	rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
+	lo = (lo & ~INTEL_MSR_RANGE) |
+		((u32) perf->states[next_perf_state].control & INTEL_MSR_RANGE);
+	wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
+
+
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+	perf->state = next_perf_state;
+
+	return result;
+}
+
+static int sfi_cpufreq_verify(struct cpufreq_policy *policy)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+	pr_debug("sfi_cpufreq_verify\n");
+
+	return cpufreq_frequency_table_verify(policy, data->freq_table);
+}
+
+/*
+ * sfi_cpufreq_early_init - initialize SFI P-States library
+ *
+ * Initialize the SFI P-States library (drivers/sfi/processor_perflib.c)
+ * in order to cope with the correct frequency and voltage pairings.
+ */
+static int __init sfi_cpufreq_early_init(void)
+{
+	sfi_perf_data = alloc_percpu(struct sfi_processor_performance);
+	if (!sfi_perf_data) {
+		pr_debug("Memory allocation error for sfi_perf_data.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+
+static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	unsigned int i;
+	unsigned int valid_states = 0;
+	unsigned int cpu = policy->cpu;
+	struct sfi_cpufreq_data *data;
+	unsigned int result = 0;
+	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
+	struct sfi_processor_performance *perf;
+	u32 lo, hi;
+
+	pr_debug("sfi_cpufreq_cpu_init CPU:%d\n", policy->cpu);
+
+	data = kzalloc(sizeof(struct sfi_cpufreq_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->sfi_data = per_cpu_ptr(sfi_perf_data, cpu);
+	per_cpu(drv_data, cpu) = data;
+
+	sfi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+
+
+	result = sfi_processor_register_performance(data->sfi_data, cpu);
+	if (result)
+		goto err_free;
+
+	perf = data->sfi_data;
+	policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+
+	cpumask_set_cpu(policy->cpu, policy->cpus);
+	cpumask_set_cpu(policy->cpu, policy->related_cpus);
+
+	/* capability check */
+	if (perf->state_count <= 1) {
+		pr_debug("No P-States\n");
+		result = -ENODEV;
+		goto err_unreg;
+	}
+
+	data->freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
+		    (perf->state_count+1), GFP_KERNEL);
+	if (!data->freq_table) {
+		result = -ENOMEM;
+		goto err_unreg;
+	}
+
+	/* detect transition latency */
+	policy->cpuinfo.transition_latency = 0;
+	for (i = 0; i < perf->state_count; i++) {
+		if ((perf->states[i].transition_latency * 1000) >
+		    policy->cpuinfo.transition_latency)
+			policy->cpuinfo.transition_latency =
+			    perf->states[i].transition_latency * 1000;
+	}
+
+	data->max_freq = perf->states[0].core_frequency * 1000;
+	/* table init */
+	for (i = 0; i < perf->state_count; i++) {
+		if (i > 0 && perf->states[i].core_frequency >=
+		    data->freq_table[valid_states-1].frequency / 1000)
+			continue;
+
+		data->freq_table[valid_states].index = i;
+		data->freq_table[valid_states].frequency =
+		    perf->states[i].core_frequency * 1000;
+		valid_states++;
+	}
+	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+	perf->state = 0;
+
+	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+	if (result)
+		goto err_freqfree;
+
+	policy->cur = get_cur_freq_on_cpu(cpu);
+
+
+	/* Check for APERF/MPERF support in hardware */
+	if (cpu_has(c, X86_FEATURE_APERFMPERF))
+		sfi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
+
+	/* enable eHALT for SLM */
+	if (boot_cpu_data.x86_model == X86_ATOM_ARCH_SLM) {
+		rdmsr_on_cpu(policy->cpu, MSR_IA32_POWER_MISC, &lo, &hi);
+		lo = lo | ENABLE_ULFM_AUTOCM | ENABLE_INDP_AUTOCM;
+		wrmsr_on_cpu(policy->cpu, MSR_IA32_POWER_MISC, lo, hi);
+	}
+
+	pr_debug("CPU%u - SFI performance management activated.\n", cpu);
+	for (i = 0; i < perf->state_count; i++)
+		pr_debug("     %cP%d: %d MHz, %d uS\n",
+			(i == perf->state ? '*' : ' '), i,
+			(u32) perf->states[i].core_frequency,
+			(u32) perf->states[i].transition_latency);
+
+	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
+
+	/*
+	 * the first call to ->target() should result in us actually
+	 * writing something to the appropriate registers.
+	 */
+	data->resume = 1;
+
+	return result;
+
+err_freqfree:
+	kfree(data->freq_table);
+err_unreg:
+	sfi_processor_unregister_performance(perf, cpu);
+err_free:
+	kfree(data);
+	per_cpu(drv_data, cpu) = NULL;
+
+	return result;
+}
+
+static int sfi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+	pr_debug("sfi_cpufreq_cpu_exit\n");
+
+	if (data) {
+		cpufreq_frequency_table_put_attr(policy->cpu);
+		per_cpu(drv_data, policy->cpu) = NULL;
+		sfi_processor_unregister_performance(data->sfi_data,
+							policy->cpu);
+		kfree(data->freq_table);
+		kfree(data);
+	}
+
+	return 0;
+}
+
+static int sfi_cpufreq_resume(struct cpufreq_policy *policy)
+{
+	struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+	pr_debug("sfi_cpufreq_resume\n");
+
+	data->resume = 1;
+
+	return 0;
+}
+
+static struct freq_attr *sfi_cpufreq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static struct cpufreq_driver sfi_cpufreq_driver = {
+	.get = get_cur_freq_on_cpu,
+	.verify = sfi_cpufreq_verify,
+	.target = sfi_cpufreq_target,
+	.init = sfi_cpufreq_cpu_init,
+	.exit = sfi_cpufreq_cpu_exit,
+	.resume = sfi_cpufreq_resume,
+	.name = "sfi-cpufreq",
+	.owner = THIS_MODULE,
+	.attr = sfi_cpufreq_attr,
+};
+
+static int __init parse_cpus(struct sfi_table_header *table)
+{
+	struct sfi_table_simple *sb;
+	struct sfi_cpu_table_entry *pentry;
+	int i;
+
+	sb = (struct sfi_table_simple *)table;
+
+	sfi_cpu_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cpu_table_entry);
+
+	pentry = (struct sfi_cpu_table_entry *) sb->pentry;
+	for (i = 0; i < sfi_cpu_num; i++) {
+		sfi_cpu_array[i].apic_id = pentry->apic_id;
+		printk(KERN_INFO "APIC ID: %d\n", pentry->apic_id);
+		pentry++;
+	}
+
+	return 0;
+
+}
+
+
+static int __init init_sfi_processor_list(void)
+{
+	struct sfi_processor *pr;
+	int i;
+	int result;
+
+	/* parse the cpus from the sfi table */
+	result = sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, parse_cpus);
+
+	if (result < 0)
+		return result;
+
+	pr = kzalloc(sfi_cpu_num * sizeof(struct sfi_processor), GFP_KERNEL);
+	if (!pr)
+		return -ENOMEM;
+
+	for (i = 0; i < sfi_cpu_num; i++) {
+		pr->id = sfi_cpu_array[i].apic_id;
+		per_cpu(sfi_processors, i) = pr;
+		pr++;
+	}
+
+	return 0;
+}
+
+static int __init sfi_cpufreq_init(void)
+{
+	int ret;
+
+	pr_debug("sfi_cpufreq_init\n");
+
+	ret = init_sfi_processor_list();
+	if (ret)
+		return ret;
+
+	ret = sfi_cpufreq_early_init();
+	if (ret)
+		return ret;
+
+	return cpufreq_register_driver(&sfi_cpufreq_driver);
+}
+
+static void __exit sfi_cpufreq_exit(void)
+{
+
+	struct sfi_processor *pr;
+
+	pr_debug("sfi_cpufreq_exit\n");
+
+	pr = per_cpu(sfi_processors, 0);
+	kfree(pr);
+
+	cpufreq_unregister_driver(&sfi_cpufreq_driver);
+
+	free_percpu(sfi_perf_data);
+
+	return;
+}
+late_initcall(sfi_cpufreq_init);
+module_exit(sfi_cpufreq_exit);
+
+MODULE_ALIAS("sfi");
diff --git a/drivers/cpufreq/sfi-cpufreq.h b/drivers/cpufreq/sfi-cpufreq.h
new file mode 100644
index 0000000..7e01c1e
--- /dev/null
+++ b/drivers/cpufreq/sfi-cpufreq.h
@@ -0,0 +1,65 @@
+/*
+ * sfi_processor.h
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __SFI_PROCESSOR_H__
+#define __SFI_PROCESSOR_H__
+
+#include <linux/sfi.h>
+#include <linux/cpuidle.h>
+
+struct sfi_processor_power {
+	struct cpuidle_device dev;
+	u32 default_state;
+	int count;
+	struct cpuidle_state *states;
+	struct sfi_cstate_table_entry *sfi_cstates;
+};
+
+struct sfi_processor_flags {
+	u8 valid;
+	u8 power;
+};
+
+struct sfi_processor {
+	u32 id;
+	struct sfi_processor_flags flags;
+	struct sfi_processor_power power;
+	struct sfi_processor_performance *performance;
+};
+
+/* Performance management */
+struct sfi_processor_px {
+	u32 core_frequency;	/* megahertz */
+	u32 transition_latency;	/* microseconds */
+	u32 control;	/* control value */
+};
+
+struct sfi_processor_performance {
+	unsigned int state;
+	unsigned int state_count;
+	struct sfi_processor_px *states;
+};
+
+/* for communication between multiple parts of the processor kernel module */
+DECLARE_PER_CPU(struct sfi_processor *, sfi_processors);
+
+int sfi_processor_power_init(struct sfi_processor *pr);
+int sfi_processor_power_exit(struct sfi_processor *pr);
+
+#endif /*__SFI_PROCESSOR_H__*/
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 67fd901..16e8a59 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -173,7 +173,12 @@
 
 	/* for higher loadavg, we are more reluctant */
 
-	mult += 2 * get_loadavg();
+	/*
+	 * this doesn't work as intended - it is almost always 0, but can
+	 * sometimes, depending on workload, spike very high into the hundreds
+	 * even when the average cpu load is under 10%.
+	 */
+	/* mult += 2 * get_loadavg(); */
 
 	/* for IO wait tasks (per cpu!) we add 5x each */
 	mult += 10 * nr_iowait_cpu(smp_processor_id());
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a2b0df5..4b82961 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -7,7 +7,7 @@
 obj-$(CONFIG_DMA_OF) += of-dma.o
 
 obj-$(CONFIG_NET_DMA) += iovlock.o
-obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
+obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o intel_mid_dma_acpi.o
 obj-$(CONFIG_DMATEST) += dmatest.o
 obj-$(CONFIG_INTEL_IOATDMA) += ioat/
 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index a0de82e..b0274ce 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -28,34 +28,47 @@
 #include <linux/pm_runtime.h>
 #include <linux/intel_mid_dma.h>
 #include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
 
 #include "dmaengine.h"
 
-#define MAX_CHAN	4 /*max ch across controllers*/
+#define MAX_CHAN	8 /*max ch across controllers*/
 #include "intel_mid_dma_regs.h"
 
 #define INTEL_MID_DMAC1_ID		0x0814
 #define INTEL_MID_DMAC2_ID		0x0813
 #define INTEL_MID_GP_DMAC2_ID		0x0827
 #define INTEL_MFLD_DMAC1_ID		0x0830
-#define LNW_PERIPHRAL_MASK_BASE		0xFFAE8008
-#define LNW_PERIPHRAL_MASK_SIZE		0x10
-#define LNW_PERIPHRAL_STATUS		0x0
-#define LNW_PERIPHRAL_MASK		0x8
+#define INTEL_CLV_GP_DMAC2_ID		0x08EF
+#define INTEL_CLV_DMAC1_ID		0x08F0
+#define INTEL_MRFLD_GP_DMAC2_ID         0x11A2
+#define INTEL_MRFLD_DMAC0_ID		0x119B
+#define INTEL_BYT_LPIO1_DMAC_ID		0x0F06
+#define INTEL_BYT_LPIO2_DMAC_ID		0x0F40
+#define INTEL_BYT_DMAC0_ID		0x0F28
 
-struct intel_mid_dma_probe_info {
-	u8 max_chan;
-	u8 ch_base;
-	u16 block_size;
-	u32 pimr_mask;
-};
+#define PCI_DEVICE_ID_INTEL_GP_DMAC2_MOOR      0x1497
 
-#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
+#define MRFL_INSTANCE_SPI3     3
+#define MRFL_INSTANCE_SPI5     5
+#define MRFL_INSTANCE_SPI6     6
+
+#define LNW_PERIPHRAL_MASK_SIZE		0x20
+
+#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask,	\
+		_pimr_base, _dword_trf, _pimr_offset, _pci_id,	\
+			_pdma_ops)				\
 	((kernel_ulong_t)&(struct intel_mid_dma_probe_info) {	\
 		.max_chan = (_max_chan),			\
 		.ch_base = (_ch_base),				\
 		.block_size = (_block_size),			\
 		.pimr_mask = (_pimr_mask),			\
+		.pimr_base = (_pimr_base),			\
+		.dword_trf = (_dword_trf),			\
+		.pimr_offset = (_pimr_offset),			\
+		.pci_id = (_pci_id),				\
+		.pdma_ops = (_pdma_ops)				\
 	})
 
 /*****************************************************************************
@@ -65,32 +78,90 @@
  * @status: status mask
  * @base: dma ch base value
  *
- * Modify the status mask and return the channel index needing
- * attention (or -1 if neither)
+ * Returns the channel index by checking the status bits.
+ * If none of the bits in status are set, then returns -1.
  */
-static int get_ch_index(int *status, unsigned int base)
+static int get_ch_index(int status, unsigned int base)
 {
 	int i;
 	for (i = 0; i < MAX_CHAN; i++) {
-		if (*status & (1 << (i + base))) {
-			*status = *status & ~(1 << (i + base));
-			pr_debug("MDMA: index %d New status %x\n", i, *status);
+		if (status & (1 << (i + base)))
 			return i;
-		}
 	}
 	return -1;
 }
 
+static inline bool is_byt_lpio_dmac(struct middma_device *mid)
+{
+	return (mid->pci_id == INTEL_BYT_LPIO1_DMAC_ID ||
+		mid->pci_id == INTEL_BYT_LPIO2_DMAC_ID);
+}
+
+static void dump_dma_reg(struct dma_chan *chan)
+{
+	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
+	struct middma_device	*mid = to_middma_device(chan->device);
+
+	if (!mid->pimr_base)
+		return;
+
+	pr_debug("<<<<<<<<<<<< DMA Dump Start >>>>>>>>>>>>");
+	pr_debug("DMA Dump for Channel id:%d & Chnl Base:%p",
+					midc->ch_id, midc->ch_regs);
+	/* dump common DMA registers */
+	pr_debug("PIMR:\t%#x", readl(mid->mask_reg) - 8);
+	pr_debug("ISRX:\t%#x", readl(mid->mask_reg));
+	pr_debug("ISRD:\t%#x", readl(mid->mask_reg + 0x8));
+	pr_debug("IMRX:\t%#x", readl(mid->mask_reg + 0x10));
+	pr_debug("IMRD:\t%#x", readl(mid->mask_reg + 0x18));
+	pr_debug("DMA_CHAN_EN:\t%#x", readl(midc->dma_base + DMA_CHAN_EN));
+	pr_debug("DMA_CFG:\t%#x", readl(midc->dma_base + DMA_CFG));
+	pr_debug("INTR_STATUS:\t%#x", readl(midc->dma_base + INTR_STATUS));
+	pr_debug("MASK_TFR:\t%#x", readl(midc->dma_base + MASK_TFR));
+	pr_debug("MASK_BLOCK:\t%#x", readl(midc->dma_base + MASK_BLOCK));
+	pr_debug("MASK_ERR:\t%#x", readl(midc->dma_base + MASK_ERR));
+	pr_debug("RAW_TFR:\t%#x", readl(midc->dma_base + RAW_TFR));
+	pr_debug("RAW_BLOCK:\t%#x", readl(midc->dma_base + RAW_BLOCK));
+	pr_debug("RAW_ERR:\t%#x", readl(midc->dma_base + RAW_ERR));
+	pr_debug("STATUS_TFR:\t%#x", readl(midc->dma_base + STATUS_TFR));
+	pr_debug("STATUS_BLOCK:\t%#x", readl(midc->dma_base + STATUS_BLOCK));
+	pr_debug("STATUS_ERR:\t%#x", readl(midc->dma_base + STATUS_ERR));
+	if (!mid->dword_trf) {
+		pr_debug("FIFO_PARTITION0_LO:\t%#x",
+				readl(midc->dma_base + FIFO_PARTITION0_LO));
+		pr_debug("FIFO_PARTITION0_HI:\t%#x",
+				readl(midc->dma_base + FIFO_PARTITION0_HI));
+		pr_debug("FIFO_PARTITION1_LO:\t%#x",
+				readl(midc->dma_base + FIFO_PARTITION1_LO));
+		pr_debug("FIFO_PARTITION1_HI:\t%#x",
+				readl(midc->dma_base + FIFO_PARTITION1_HI));
+		pr_debug("CH_SAI_ERR:\t%#x", readl(midc->dma_base + CH_SAI_ERR));
+	}
+
+	/* dump channel specific registers */
+	pr_debug("SAR:\t%#x", readl(midc->ch_regs + SAR));
+	pr_debug("DAR:\t%#x", readl(midc->ch_regs + DAR));
+	pr_debug("LLP:\t%#x", readl(midc->ch_regs + LLP));
+	pr_debug("CTL_LOW:\t%#x", readl(midc->ch_regs + CTL_LOW));
+	pr_debug("CTL_HIGH:\t%#x", readl(midc->ch_regs + CTL_HIGH));
+	pr_debug("CFG_LOW:\t%#x", readl(midc->ch_regs + CFG_LOW));
+	pr_debug("CFG_HIGH:\t%#x", readl(midc->ch_regs + CFG_HIGH));
+	pr_debug("<<<<<<<<<<<< DMA Dump ends >>>>>>>>>>>>");
+}
+
 /**
  * get_block_ts	-	calculates dma transaction length
  * @len: dma transfer length
  * @tx_width: dma transfer src width
  * @block_size: dma controller max block size
+ * @dword_trf: is transfer dword size aligned and needs the data transfer to
+ *   be in terms of data items and not bytes
  *
  * Based on src width calculate the DMA trsaction length in data items
  * return data items or FFFF if exceeds max length for block
  */
-static int get_block_ts(int len, int tx_width, int block_size)
+static unsigned int get_block_ts(int len, int tx_width,
+				int block_size, int dword_trf)
 {
 	int byte_width = 0, block_ts = 0;
 
@@ -106,13 +177,46 @@
 		byte_width = 4;
 		break;
 	}
-
-	block_ts = len/byte_width;
+	if (dword_trf)
+		block_ts = len/byte_width;
+	else
+		block_ts = len;
 	if (block_ts > block_size)
 		block_ts = 0xFFFF;
 	return block_ts;
 }
 
+/**
+ * get_reg_width	-	computes the DMA sample width
+ * @kernel_width: Kernel DMA slave bus width
+ *
+ * converts the DMA kernel slave bus width in the Intel DMA
+ * bus width
+ */
+static int get_reg_width(enum dma_slave_buswidth kernel_width)
+{
+	int reg_width = -1;
+
+	switch (kernel_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		reg_width = 0;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		reg_width = 1;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		reg_width = 2;
+		break;
+	case DMA_SLAVE_BUSWIDTH_UNDEFINED:
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+	default:
+		pr_err("ERR_MDMA: get_reg_width unsupported reg width\n");
+		break;
+	}
+	return reg_width;
+}
+
+
 /*****************************************************************************
 DMAC1 interrupt Functions*/
 
@@ -129,9 +233,9 @@
 	u32 pimr;
 
 	if (mid->pimr_mask) {
-		pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+		pimr = readl(mid->mask_reg + mid->pimr_offset);
 		pimr |= mid->pimr_mask;
-		writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+		writel(pimr, mid->mask_reg + mid->pimr_offset);
 	}
 	return;
 }
@@ -149,14 +253,37 @@
 	u32 pimr;
 	struct middma_device *mid = to_middma_device(midc->chan.device);
 
-	if (mid->pimr_mask) {
-		pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
+	if (mid->pimr_mask && mid->dword_trf) {
+		pimr = readl(mid->mask_reg + mid->pimr_offset);
 		pimr &= ~mid->pimr_mask;
-		writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
+		writel(pimr, mid->mask_reg + mid->pimr_offset);
+	}
+	if (mid->pimr_mask && !mid->dword_trf) {
+		pimr = readl(mid->mask_reg + mid->pimr_offset);
+		pimr &= ~(1 << (midc->ch_id + 16));
+		writel(pimr, mid->mask_reg + mid->pimr_offset);
 	}
 	return;
 }
 
+/*
+ * Some consumer may need to know how many bytes have been
+ * really transfered for one specific dma channel
+ */
+inline dma_addr_t intel_dma_get_src_addr(struct dma_chan *chan)
+{
+	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
+	return readl(midc->ch_regs + SAR);
+}
+EXPORT_SYMBOL(intel_dma_get_src_addr);
+
+inline dma_addr_t intel_dma_get_dst_addr(struct dma_chan *chan)
+{
+	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
+	return readl(midc->ch_regs + DAR);
+}
+EXPORT_SYMBOL(intel_dma_get_dst_addr);
+
 /**
  * enable_dma_interrupt -	enable the periphral interrupt
  * @midc: dma channel for which enable interrupt is required
@@ -167,10 +294,13 @@
  */
 static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
 {
+	struct middma_device *mid = to_middma_device(midc->chan.device);
+
 	dmac1_unmask_periphral_intr(midc);
 
 	/*en ch interrupts*/
 	iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+	set_bit(midc->ch_id, &mid->tfr_intr_mask);
 	iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
 	return;
 }
@@ -185,10 +315,39 @@
  */
 static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
 {
+	struct middma_device *mid = to_middma_device(midc->chan.device);
+	u32 pimr;
+
 	/*Check LPE PISR, make sure fwd is disabled*/
 	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
+	clear_bit(midc->ch_id, &mid->block_intr_mask);
 	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
+	clear_bit(midc->ch_id, &mid->tfr_intr_mask);
 	iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
+	if (mid->pimr_mask && !mid->dword_trf) {
+		pimr = readl(mid->mask_reg + mid->pimr_offset);
+		pimr |= (1 << (midc->ch_id + 16));
+		writel(pimr, mid->mask_reg + mid->pimr_offset);
+	}
+
+	return;
+}
+
+/**
+ * clear_dma_channel_interrupt - clear channel interrupt
+ * @midc: dma channel for which clear interrupt is required
+ *
+ */
+static void clear_dma_channel_interrupt(struct intel_mid_dma_chan *midc)
+{
+	struct middma_device *mid = to_middma_device(midc->chan.device);
+
+	/*clearing this interrupts first*/
+	iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
+	iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
+	iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
+
+
 	return;
 }
 
@@ -243,7 +402,7 @@
  * Load a transaction into the engine. This must be called with midc->lock
  * held and bh disabled.
  */
-static void midc_dostart(struct intel_mid_dma_chan *midc,
+static int midc_dostart(struct intel_mid_dma_chan *midc,
 			struct intel_mid_dma_desc *first)
 {
 	struct middma_device *mid = to_middma_device(midc->chan.device);
@@ -253,7 +412,7 @@
 		/*error*/
 		pr_err("ERR_MDMA: channel is busy in start\n");
 		/* The tasklet will hopefully advance the queue... */
-		return;
+		return -EBUSY;
 	}
 	midc->busy = true;
 	/*write registers and en*/
@@ -264,12 +423,13 @@
 	iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
 	iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
 	iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
-	pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
+	pr_debug("MDMA:TX SAR %x,DAR %x,CFGH %x,CFGL %x,CTLH %x, CTLL %x LLI %x",
 		(int)first->sar, (int)first->dar, first->cfg_hi,
-		first->cfg_lo, first->ctl_hi, first->ctl_lo);
+		first->cfg_lo, first->ctl_hi, first->ctl_lo, (int)first->lli_phys);
 	first->status = DMA_IN_PROGRESS;
 
 	iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+	return 0;
 }
 
 /**
@@ -303,46 +463,65 @@
 		else
 			desc->current_lli = 0;
 	}
-	spin_unlock_bh(&midc->lock);
+	if (midc->raw_tfr) {
+		list_del(&desc->desc_node);
+		desc->status = DMA_SUCCESS;
+		if (desc->lli != NULL && desc->lli->llp != 0)
+			dma_pool_free(desc->lli_pool, desc->lli,
+						desc->lli_phys);
+		list_add(&desc->desc_node, &midc->free_list);
+		midc->busy = false;
+		midc->raw_tfr = 0;
+		spin_unlock_bh(&midc->lock);
+	} else {
+		spin_unlock_bh(&midc->lock);
+	}
 	if (callback_txd) {
 		pr_debug("MDMA: TXD callback set ... calling\n");
 		callback_txd(param_txd);
 	}
-	if (midc->raw_tfr) {
-		desc->status = DMA_SUCCESS;
-		if (desc->lli != NULL) {
-			pci_pool_free(desc->lli_pool, desc->lli,
-						desc->lli_phys);
-			pci_pool_destroy(desc->lli_pool);
-			desc->lli = NULL;
-		}
-		list_move(&desc->desc_node, &midc->free_list);
-		midc->busy = false;
-	}
-	spin_lock_bh(&midc->lock);
 
+	spin_lock_bh(&midc->lock);
 }
-/**
- * midc_scan_descriptors -		check the descriptors in channel
- *					mark completed when tx is completete
- * @mid: device
- * @midc: channel to scan
- *
- * Walk the descriptor chain for the device and process any entries
- * that are complete.
- */
-static void midc_scan_descriptors(struct middma_device *mid,
+
+static struct
+intel_mid_dma_desc *midc_first_queued(struct intel_mid_dma_chan *midc)
+{
+	return list_entry(midc->queue.next, struct intel_mid_dma_desc, desc_node);
+}
+
+static void midc_collect_descriptors(struct middma_device *mid,
 				struct intel_mid_dma_chan *midc)
 {
 	struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
-
 	/*tx is complete*/
 	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
 		if (desc->status == DMA_IN_PROGRESS)
 			midc_descriptor_complete(midc, desc);
 	}
-	return;
+
+}
+
+/**
+ * midc_start_descriptors -		start the descriptors in queue
+ *
+ * @mid: device
+ * @midc: channel to scan
+ *
+ */
+static void midc_start_descriptors(struct middma_device *mid,
+				struct intel_mid_dma_chan *midc)
+{
+	if (!list_empty(&midc->queue)) {
+		pr_debug("MDMA: submitting txn in queue\n");
+		if (0 == midc_dostart(midc, midc_first_queued(midc)))
+			list_splice_init(&midc->queue, &midc->active_list);
+		else
+			pr_warn("Submit failed as ch is busy\n");
 	}
+	return;
+}
+
 /**
  * midc_lli_fill_sg -		Helper function to convert
  *				SG list to Linked List Items.
@@ -357,7 +536,8 @@
  */
 static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
 				struct intel_mid_dma_desc *desc,
-				struct scatterlist *sglist,
+				struct scatterlist *src_sglist,
+				struct scatterlist *dst_sglist,
 				unsigned int sglen,
 				unsigned int flags)
 {
@@ -366,18 +546,18 @@
 	dma_addr_t lli_next, sg_phy_addr;
 	struct intel_mid_dma_lli *lli_bloc_desc;
 	union intel_mid_dma_ctl_lo ctl_lo;
-	union intel_mid_dma_ctl_hi ctl_hi;
+	u32 ctl_hi;
 	int i;
 
-	pr_debug("MDMA: Entered midc_lli_fill_sg\n");
+	pr_debug("MDMA: Entered %s\n", __func__);
 	mids = midc->mid_slave;
 
 	lli_bloc_desc = desc->lli;
 	lli_next = desc->lli_phys;
 
 	ctl_lo.ctl_lo = desc->ctl_lo;
-	ctl_hi.ctl_hi = desc->ctl_hi;
-	for_each_sg(sglist, sg, sglen, i) {
+	ctl_hi = desc->ctl_hi;
+	for_each_sg(src_sglist, sg, sglen, i) {
 		/*Populate CTL_LOW and LLI values*/
 		if (i != sglen - 1) {
 			lli_next = lli_next +
@@ -389,29 +569,37 @@
 				lli_next = desc->lli_phys;
 			} else {
 				lli_next = 0;
-				ctl_lo.ctlx.llp_dst_en = 0;
-				ctl_lo.ctlx.llp_src_en = 0;
+				/* llp_dst_en = 0 llp_src_en = 0 */
+				ctl_lo.ctl_lo &= ~(1 << CTL_LO_BIT_LLP_DST_EN);
+				ctl_lo.ctl_lo &= ~(1 << CTL_LO_BIT_LLP_SRC_EN);
 			}
 		}
 		/*Populate CTL_HI values*/
-		ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
-							desc->width,
-							midc->dma->block_size);
+		ctl_hi = get_block_ts(sg->length, desc->width,
+					midc->dma->block_size, midc->dma->dword_trf);
 		/*Populate SAR and DAR values*/
-		sg_phy_addr = sg_dma_address(sg);
+		sg_phy_addr = sg_phys(sg);
 		if (desc->dirn ==  DMA_MEM_TO_DEV) {
 			lli_bloc_desc->sar  = sg_phy_addr;
 			lli_bloc_desc->dar  = mids->dma_slave.dst_addr;
 		} else if (desc->dirn ==  DMA_DEV_TO_MEM) {
 			lli_bloc_desc->sar  = mids->dma_slave.src_addr;
 			lli_bloc_desc->dar  = sg_phy_addr;
+		} else if (desc->dirn == DMA_MEM_TO_MEM && dst_sglist) {
+				lli_bloc_desc->sar = sg_phy_addr;
+				lli_bloc_desc->dar = sg_phys(dst_sglist);
 		}
 		/*Copy values into block descriptor in system memroy*/
 		lli_bloc_desc->llp = lli_next;
 		lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
-		lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
+		lli_bloc_desc->ctl_hi = ctl_hi;
 
+		pr_debug("MDMA:Calc CTL LO %x, CTL HI %x src: %x dest: %x sg->l:%x\n",
+					ctl_lo.ctl_lo, lli_bloc_desc->ctl_hi,
+					lli_bloc_desc->sar, lli_bloc_desc->dar, sg->length);
 		lli_bloc_desc++;
+		if (dst_sglist)
+			dst_sglist = sg_next(dst_sglist);
 	}
 	/*Copy very first LLI values to descriptor*/
 	desc->ctl_lo = desc->lli->ctl_lo;
@@ -421,13 +609,14 @@
 
 	return 0;
 }
+
 /*****************************************************************************
 DMA engine callback Functions*/
 /**
  * intel_mid_dma_tx_submit -	callback to submit DMA transaction
  * @tx: dma engine descriptor
  *
- * Submit the DMA transaction for this descriptor, start if ch idle
+ * Submit the DMA trasaction for this descriptor, start if ch idle
  */
 static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
@@ -436,6 +625,14 @@
 	dma_cookie_t		cookie;
 
 	spin_lock_bh(&midc->lock);
+
+	if (unlikely(!midc->in_use)) {
+		spin_unlock_bh(&midc->lock);
+		WARN(1, "chan[%d] gets new request after close",
+			tx->chan->chan_id);
+		return -EIO;
+	}
+
 	cookie = dma_cookie_assign(tx);
 
 	if (list_empty(&midc->active_list))
@@ -461,11 +658,74 @@
 
 	spin_lock_bh(&midc->lock);
 	if (!list_empty(&midc->queue))
-		midc_scan_descriptors(to_middma_device(chan->device), midc);
+		midc_start_descriptors(to_middma_device(chan->device), midc);
 	spin_unlock_bh(&midc->lock);
 }
 
 /**
+ * dma_wait_for_suspend - performs following functionality
+ * 		1. Suspends channel using mask bits
+ * 		2. Wait till FIFO to get empty
+ * 		3. Disable channel
+ * 		4. restore the previous masked bits
+ *
+ * @chan: chan where pending trascation needs to be checked and submitted
+ * @mask: mask bits to be used for suspend operation
+ *
+ */
+static inline void dma_wait_for_suspend(struct dma_chan *chan, unsigned int mask)
+{
+	union intel_mid_dma_cfg_lo cfg_lo;
+	struct middma_device	*mid = to_middma_device(chan->device);
+	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
+	int i;
+
+	/* Suspend channel */
+	cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+	cfg_lo.cfg_lo |= mask;
+	iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+	/* wait till FIFO gets empty */
+	/* FIFO should be cleared in couple of milli secs */
+	for (i = 0; i < 10; i++) {
+		cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+		if (cfg_lo.cfgx.fifo_empty)
+			break;
+	/* use delay since this might called from atomic context */
+		mdelay(1);
+	}
+	pr_debug("waited for %d ms for FIFO to get empty", i);
+	iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+
+	cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
+	cfg_lo.cfg_lo &= ~mask;
+	iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
+}
+/**
+ * intel_mid_dma_chan_suspend_v1 - suspends the given channel, waits
+ *		till FIFO is cleared and disables channel.
+ * @chan: chan where pending trascation needs to be checked and submitted
+ *
+ */
+static void intel_mid_dma_chan_suspend_v1(struct dma_chan *chan)
+{
+
+	pr_debug("%s", __func__);
+	dma_wait_for_suspend(chan, CH_SUSPEND);
+}
+
+/**
+ * intel_mid_dma_chan_suspend_v2 - suspends the given channel, waits
+ *		till FIFO is cleared and disables channel.
+ * @chan: chan where pending trascation needs to be checked and submitted
+ *
+ */
+static void intel_mid_dma_chan_suspend_v2(struct dma_chan *chan)
+{
+	pr_debug("%s", __func__);
+	dma_wait_for_suspend(chan, CH_SUSPEND | CH_DRAIN);
+}
+
+/**
  * intel_mid_dma_tx_status -	Return status of txn
  * @chan: chan for where status needs to be checked
  * @cookie: cookie for txn
@@ -483,7 +743,7 @@
 	ret = dma_cookie_status(chan, cookie, txstate);
 	if (ret != DMA_SUCCESS) {
 		spin_lock_bh(&midc->lock);
-		midc_scan_descriptors(to_middma_device(chan->device), midc);
+		midc_start_descriptors(to_middma_device(chan->device), midc);
 		spin_unlock_bh(&midc->lock);
 
 		ret = dma_cookie_status(chan, cookie, txstate);
@@ -509,6 +769,7 @@
 	midc->mid_slave = mid_slave;
 	return 0;
 }
+
 /**
  * intel_mid_dma_device_control -	DMA device control
  * @chan: chan for DMA control
@@ -523,8 +784,8 @@
 	struct intel_mid_dma_chan	*midc = to_intel_mid_dma_chan(chan);
 	struct middma_device	*mid = to_middma_device(chan->device);
 	struct intel_mid_dma_desc	*desc, *_desc;
-	union intel_mid_dma_cfg_lo cfg_lo;
 
+	pr_debug("%s:CMD:%d for channel:%d\n", __func__, cmd, midc->ch_id);
 	if (cmd == DMA_SLAVE_CONFIG)
 		return dma_slave_control(chan, arg);
 
@@ -536,30 +797,25 @@
 		spin_unlock_bh(&midc->lock);
 		return 0;
 	}
-	/*Suspend and disable the channel*/
-	cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
-	cfg_lo.cfgx.ch_susp = 1;
-	iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
-	iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
-	midc->busy = false;
-	/* Disable interrupts */
+	/* Disable CH interrupts */
 	disable_dma_interrupt(midc);
+	/* clear channel interrupts */
+	clear_dma_channel_interrupt(midc);
+	mid->dma_ops.dma_chan_suspend(chan);
+	midc->busy = false;
 	midc->descs_allocated = 0;
-
-	spin_unlock_bh(&midc->lock);
 	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
-		if (desc->lli != NULL) {
-			pci_pool_free(desc->lli_pool, desc->lli,
+		list_del(&desc->desc_node);
+		if (desc->lli != NULL)
+			dma_pool_free(desc->lli_pool, desc->lli,
 						desc->lli_phys);
-			pci_pool_destroy(desc->lli_pool);
-			desc->lli = NULL;
-		}
-		list_move(&desc->desc_node, &midc->free_list);
+		list_add(&desc->desc_node, &midc->free_list);
 	}
+	spin_unlock_bh(&midc->lock);
+
 	return 0;
 }
 
-
 /**
  * intel_mid_dma_prep_memcpy -	Prep memcpy txn
  * @chan: chan for DMA transfer
@@ -580,10 +836,12 @@
 	struct intel_mid_dma_desc *desc = NULL;
 	struct intel_mid_dma_slave *mids;
 	union intel_mid_dma_ctl_lo ctl_lo;
-	union intel_mid_dma_ctl_hi ctl_hi;
+	u32 ctl_hi;
 	union intel_mid_dma_cfg_lo cfg_lo;
 	union intel_mid_dma_cfg_hi cfg_hi;
 	enum dma_slave_buswidth width;
+	int dst_reg_width = 0;
+	int src_reg_width = 0;
 
 	pr_debug("MDMA: Prep for memcpy\n");
 	BUG_ON(!chan);
@@ -596,6 +854,11 @@
 	mids = midc->mid_slave;
 	BUG_ON(!mids);
 
+	if (unlikely(!midc->in_use)) {
+		pr_err("ERR_MDMA: %s: channel not in use", __func__);
+		return NULL;
+	}
+
 	pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
 				midc->dma->pci_id, midc->ch_id, len);
 	pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
@@ -634,36 +897,41 @@
 			}
 		} else {
 			cfg_hi.cfgx.protctl = 0x1; /*default value*/
-			cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
+			/* Baytrail DMAC uses dynamic device instance */
+			if (is_byt_lpio_dmac(midc->dma))
+				cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
+					mids->device_instance;
+			else
+				cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
 					midc->ch_id - midc->dma->chan_base;
 		}
 	}
-
 	/*calculate CTL_HI*/
-	ctl_hi.ctlx.reser = 0;
-	ctl_hi.ctlx.done  = 0;
 	width = mids->dma_slave.src_addr_width;
-
-	ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
+	ctl_hi = get_block_ts(len, width, midc->dma->block_size, midc->dma->dword_trf);
 	pr_debug("MDMA:calc len %d for block size %d\n",
-				ctl_hi.ctlx.block_ts, midc->dma->block_size);
+				ctl_hi, midc->dma->block_size);
 	/*calculate CTL_LO*/
 	ctl_lo.ctl_lo = 0;
 	ctl_lo.ctlx.int_en = 1;
+
+	dst_reg_width = get_reg_width(mids->dma_slave.dst_addr_width);
+	if (dst_reg_width < 0) {
+		pr_err("ERR_MDMA: Failed to get DST reg width\n");
+		return NULL;
+
+	}
+	ctl_lo.ctlx.dst_tr_width = dst_reg_width;
+
+	src_reg_width = get_reg_width(mids->dma_slave.src_addr_width);
+	if (src_reg_width < 0) {
+		pr_err("ERR_MDMA: Failed to get SRC reg width\n");
+				return NULL;
+	}
+	ctl_lo.ctlx.src_tr_width = src_reg_width;
 	ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
 	ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
 
-	/*
-	 * Here we need some translation from "enum dma_slave_buswidth"
-	 * to the format for our dma controller
-	 *		standard	intel_mid_dmac's format
-	 *		 1 Byte			0b000
-	 *		 2 Bytes		0b001
-	 *		 4 Bytes		0b010
-	 */
-	ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
-	ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
-
 	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
 		ctl_lo.ctlx.tt_fc = 0;
 		ctl_lo.ctlx.sinc = 0;
@@ -681,7 +949,7 @@
 	}
 
 	pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
-		ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
+		ctl_lo.ctl_lo, ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
 
 	enable_dma_interrupt(midc);
 
@@ -694,7 +962,7 @@
 	desc->cfg_hi = cfg_hi.cfg_hi;
 	desc->cfg_lo = cfg_lo.cfg_lo;
 	desc->ctl_lo = ctl_lo.ctl_lo;
-	desc->ctl_hi = ctl_hi.ctl_hi;
+	desc->ctl_hi = ctl_hi;
 	desc->width = width;
 	desc->dirn = mids->dma_slave.direction;
 	desc->lli_phys = 0;
@@ -707,6 +975,315 @@
 	midc_desc_put(midc, desc);
 	return NULL;
 }
+
+/**
+ * intel_mid_dma_prep_memcpy_v2 - Prep memcpy txn
+ * @chan: chan for DMA transfer
+ * @dest: destn address
+ * @src: src address
+ * @len: DMA transfer len
+ * @flags: DMA flags
+ *
+ * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
+ * The periphral txn details should be filled in slave structure properly
+ * Returns the descriptor for this txn
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy_v2(
+			struct dma_chan *chan, dma_addr_t dest,
+			dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct intel_mid_dma_chan *midc;
+	struct intel_mid_dma_desc *desc = NULL;
+	struct intel_mid_dma_slave *mids;
+	union intel_mid_dma_ctl_lo ctl_lo;
+	u32 ctl_hi;
+	union intel_mid_dma_cfg_lo cfg_lo;
+	union intel_mid_dma_cfg_hi cfg_hi;
+	enum dma_slave_buswidth width;
+	int dst_reg_width = 0;
+	int src_reg_width = 0;
+
+	pr_debug("MDMA:%s\n", __func__);
+	BUG_ON(!chan);
+	if (!len)
+		return NULL;
+
+	midc = to_intel_mid_dma_chan(chan);
+	BUG_ON(!midc);
+
+	mids = midc->mid_slave;
+	BUG_ON(!mids);
+
+	if (unlikely(!midc->in_use)) {
+		pr_err("ERR_MDMA: %s: channel not in use", __func__);
+		return NULL;
+	}
+
+	pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
+				midc->dma->pci_id, midc->ch_id, len);
+	pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
+			mids->cfg_mode, mids->dma_slave.direction,
+			mids->hs_mode, mids->dma_slave.src_addr_width);
+
+	/*calculate CFG_LO*/
+	cfg_lo.cfgx_v2.dst_burst_align = 1;
+	cfg_lo.cfgx_v2.src_burst_align = 1;
+
+	/* For  mem to mem transfer, it's SW HS only*/
+	cfg_hi.cfg_hi = 0;
+	/*calculate CFG_HI for mem to/from dev scenario */
+	if (mids->cfg_mode != LNW_DMA_MEM_TO_MEM) {
+		if (midc->dma->pimr_mask) {
+			/* device_instace => SSP0 = 0, SSP1 = 1, SSP2 = 2*/
+			if (mids->device_instance > 2) {
+				pr_err("Invalid SSP identifier\n");
+				return NULL;
+			}
+			cfg_hi.cfgx_v2.src_per = 0;
+			cfg_hi.cfgx_v2.dst_per = 0;
+			if (mids->dma_slave.direction == DMA_MEM_TO_DEV)
+				/* SSP DMA in Tx direction */
+				cfg_hi.cfgx_v2.dst_per = (2 * mids->device_instance) + 1;
+			else if (mids->dma_slave.direction == DMA_DEV_TO_MEM)
+				/* SSP DMA in Rx direction */
+				cfg_hi.cfgx_v2.src_per = (2 * mids->device_instance);
+			else
+				return NULL;
+
+		} else if ((midc->dma->pci_id == INTEL_MRFLD_GP_DMAC2_ID) ||
+				(midc->dma->pci_id == PCI_DEVICE_ID_INTEL_GP_DMAC2_MOOR)) {
+			if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+				cfg_hi.cfgx_v2.src_per = 0;
+
+				if (mids->device_instance ==
+					MRFL_INSTANCE_SPI3)
+					cfg_hi.cfgx_v2.dst_per = 0xF;
+				else if (mids->device_instance ==
+					MRFL_INSTANCE_SPI5)
+					cfg_hi.cfgx_v2.dst_per = 0xD;
+				else if (mids->device_instance ==
+					MRFL_INSTANCE_SPI6)
+					cfg_hi.cfgx_v2.dst_per = 0xB;
+				else
+					cfg_hi.cfgx_v2.dst_per = midc->ch_id
+						- midc->dma->chan_base;
+			} else if (mids->dma_slave.direction
+				== DMA_DEV_TO_MEM) {
+				if (mids->device_instance ==
+					MRFL_INSTANCE_SPI3)
+					cfg_hi.cfgx_v2.src_per = 0xE;
+				else if (mids->device_instance ==
+					MRFL_INSTANCE_SPI5)
+					cfg_hi.cfgx_v2.src_per = 0xC;
+				else if (mids->device_instance ==
+					MRFL_INSTANCE_SPI6)
+					cfg_hi.cfgx_v2.src_per = 0xA;
+				else
+					cfg_hi.cfgx_v2.src_per = midc->ch_id
+						- midc->dma->chan_base;
+
+				cfg_hi.cfgx_v2.dst_per = 0;
+			} else {
+				cfg_hi.cfgx_v2.dst_per =
+					cfg_hi.cfgx_v2.src_per = 0;
+			}
+		} else {
+			cfg_hi.cfgx_v2.src_per =
+				cfg_hi.cfgx_v2.dst_per =
+				midc->ch_id - midc->dma->chan_base;
+		}
+	}
+	/*calculate CTL_HI*/
+	width = mids->dma_slave.src_addr_width;
+	ctl_hi = get_block_ts(len, width, midc->dma->block_size, midc->dma->dword_trf);
+	pr_debug("MDMA:calc len %d for block size %d\n",
+				ctl_hi, midc->dma->block_size);
+	/*calculate CTL_LO*/
+	ctl_lo.ctl_lo = 0;
+	ctl_lo.ctlx_v2.int_en = 1;
+
+	dst_reg_width = get_reg_width(mids->dma_slave.dst_addr_width);
+	if (dst_reg_width < 0) {
+		pr_err("ERR_MDMA: Failed to get DST reg width\n");
+		return NULL;
+
+	}
+	ctl_lo.ctlx_v2.dst_tr_width = dst_reg_width;
+
+	src_reg_width = get_reg_width(mids->dma_slave.src_addr_width);
+	if (src_reg_width < 0) {
+		pr_err("ERR_MDMA: Failed to get SRC reg width\n");
+				return NULL;
+	}
+	ctl_lo.ctlx_v2.src_tr_width = src_reg_width;
+	ctl_lo.ctlx_v2.dst_msize = mids->dma_slave.src_maxburst;
+	ctl_lo.ctlx_v2.src_msize = mids->dma_slave.dst_maxburst;
+
+	if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
+		ctl_lo.ctlx_v2.tt_fc = 0;
+		ctl_lo.ctlx_v2.sinc = 0;
+		ctl_lo.ctlx_v2.dinc = 0;
+	} else {
+		if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+			ctl_lo.ctlx_v2.sinc = 0;
+			ctl_lo.ctlx_v2.dinc = 1;
+			ctl_lo.ctlx_v2.tt_fc = 1;
+		} else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
+			ctl_lo.ctlx_v2.sinc = 1;
+			ctl_lo.ctlx_v2.dinc = 0;
+			ctl_lo.ctlx_v2.tt_fc = 2;
+		}
+	}
+
+	pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
+		ctl_lo.ctl_lo, ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
+
+	enable_dma_interrupt(midc);
+
+	desc = midc_desc_get(midc);
+	if (desc == NULL)
+		goto err_desc_get;
+	desc->sar = src;
+	desc->dar = dest ;
+	desc->len = len;
+	desc->cfg_hi = cfg_hi.cfg_hi;
+	desc->cfg_lo = cfg_lo.cfg_lo;
+	desc->ctl_lo = ctl_lo.ctl_lo;
+	desc->ctl_hi = ctl_hi;
+	desc->width = width;
+	desc->dirn = mids->dma_slave.direction;
+	desc->lli_phys = 0;
+	desc->lli = NULL;
+	desc->lli_pool = NULL;
+	return &desc->txd;
+
+err_desc_get:
+	pr_err("ERR_MDMA: Failed to get desc\n");
+	midc_desc_put(midc, desc);
+	return NULL;
+}
+
+/**
+ * intel_mid_dma_chan_prep_desc
+ * @chan: chan for DMA transfer
+ * @src_sg: destination scatter gather list
+ * @dst_sg: source scatter gather list
+ * @flags: DMA flags
+ * @src_sg_len: length of src sg list
+ * @direction DMA transfer dirtn
+ *
+ * Prepares LLI based periphral transfer
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_chan_prep_desc(
+			struct dma_chan *chan, struct scatterlist *src_sg,
+			struct scatterlist *dst_sg, unsigned long flags,
+			unsigned long src_sg_len,
+			enum dma_transfer_direction direction)
+{
+	struct middma_device *mid = NULL;
+	struct intel_mid_dma_chan *midc = NULL;
+	struct intel_mid_dma_slave *mids = NULL;
+	struct intel_mid_dma_desc *desc = NULL;
+	struct dma_async_tx_descriptor *txd = NULL;
+	union intel_mid_dma_ctl_lo ctl_lo;
+	pr_debug("MDMA:intel_mid_dma_chan_prep_desc\n");
+
+	midc = to_intel_mid_dma_chan(chan);
+	BUG_ON(!midc);
+
+	mid = to_middma_device(midc->chan.device);
+	mids = midc->mid_slave;
+	BUG_ON(!mids);
+
+	if (!midc->dma->pimr_mask) {
+		pr_err("MDMA: SG list is not supported by this controller\n");
+		return  NULL;
+	}
+
+	txd = midc->dma->dma_ops.device_prep_dma_memcpy(chan, 0, 0, src_sg->length, flags);
+	if (NULL == txd) {
+		pr_err("MDMA: Prep memcpy failed\n");
+		return NULL;
+	}
+
+	desc = to_intel_mid_dma_desc(txd);
+	desc->dirn = direction;
+	ctl_lo.ctl_lo = desc->ctl_lo;
+	ctl_lo.ctl_lo |= (1 << CTL_LO_BIT_LLP_DST_EN);
+	ctl_lo.ctl_lo |= (1 << CTL_LO_BIT_LLP_SRC_EN);
+	desc->ctl_lo = ctl_lo.ctl_lo;
+	desc->lli_length = src_sg_len;
+	desc->current_lli = 0;
+	/* DMA coherent memory pool for LLI descriptors*/
+	desc->lli_pool = dma_pool_create("intel_mid_dma_lli_pool",
+				midc->dma->dev,
+				(sizeof(struct intel_mid_dma_lli)*src_sg_len),
+				32, 0);
+	if (NULL == desc->lli_pool) {
+		pr_err("MID_DMA:LLI pool create failed\n");
+		return NULL;
+	}
+	midc->lli_pool = desc->lli_pool;
+
+	desc->lli = dma_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
+	if (!desc->lli) {
+		pr_err("MID_DMA: LLI alloc failed\n");
+		dma_pool_destroy(desc->lli_pool);
+		return NULL;
+	}
+	midc_lli_fill_sg(midc, desc, src_sg, dst_sg, src_sg_len, flags);
+	if (flags & DMA_PREP_INTERRUPT) {
+		/* Enable Block intr, disable TFR intr.
+		* It's not required to enable TFR, when Block intr is enabled
+		* Otherwise, for last block we will end up in invoking calltxd
+		* two times */
+
+		iowrite32(MASK_INTR_REG(midc->ch_id),
+					midc->dma_base + MASK_TFR);
+		clear_bit(midc->ch_id, &mid->tfr_intr_mask);
+		iowrite32(UNMASK_INTR_REG(midc->ch_id),
+					midc->dma_base + MASK_BLOCK);
+		set_bit(midc->ch_id, &mid->block_intr_mask);
+		midc->block_intr_status = true;
+		pr_debug("MDMA: Enabled Block Interrupt\n");
+	}
+	return &desc->txd;
+
+}
+
+/**
+ * intel_mid_dma_prep_sg -        Prep sg txn
+ * @chan: chan for DMA transfer
+ * @dst_sg: destination scatter gather list
+ * @dst_sg_len: length of dest sg list
+ * @src_sg: source scatter gather list
+ * @src_sg_len: length of src sg list
+ * @flags: DMA flags
+ *
+ * Prepares LLI based periphral transfer
+ */
+static struct dma_async_tx_descriptor *intel_mid_dma_prep_sg(
+			struct dma_chan *chan, struct scatterlist *dst_sg,
+			unsigned int dst_sg_len, struct scatterlist *src_sg,
+			unsigned int src_sg_len, unsigned long flags)
+{
+
+	pr_debug("MDMA: Prep for memcpy SG\n");
+
+	if ((dst_sg_len != src_sg_len) || (dst_sg == NULL) ||
+							(src_sg == NULL)) {
+		pr_err("MDMA: Invalid SG length\n");
+		return NULL;
+	}
+
+	pr_debug("MDMA: SG Length = %d, Flags = %#lx, src_sg->length = %d\n",
+				src_sg_len, flags, src_sg->length);
+
+	return intel_mid_dma_chan_prep_desc(chan, src_sg, dst_sg, flags,
+						src_sg_len, DMA_MEM_TO_MEM);
+
+}
+
 /**
  * intel_mid_dma_prep_slave_sg -	Prep slave sg txn
  * @chan: chan for DMA transfer
@@ -719,84 +1296,26 @@
  * Prepares LLI based periphral transfer
  */
 static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
-			struct dma_chan *chan, struct scatterlist *sgl,
+			struct dma_chan *chan, struct scatterlist *sg,
 			unsigned int sg_len, enum dma_transfer_direction direction,
 			unsigned long flags, void *context)
 {
-	struct intel_mid_dma_chan *midc = NULL;
-	struct intel_mid_dma_slave *mids = NULL;
-	struct intel_mid_dma_desc *desc = NULL;
-	struct dma_async_tx_descriptor *txd = NULL;
-	union intel_mid_dma_ctl_lo ctl_lo;
 
 	pr_debug("MDMA: Prep for slave SG\n");
 
-	if (!sg_len) {
+	if (!sg_len || sg == NULL) {
 		pr_err("MDMA: Invalid SG length\n");
 		return NULL;
 	}
-	midc = to_intel_mid_dma_chan(chan);
-	BUG_ON(!midc);
-
-	mids = midc->mid_slave;
-	BUG_ON(!mids);
-
-	if (!midc->dma->pimr_mask) {
-		/* We can still handle sg list with only one item */
-		if (sg_len == 1) {
-			txd = intel_mid_dma_prep_memcpy(chan,
-						mids->dma_slave.dst_addr,
-						mids->dma_slave.src_addr,
-						sg_dma_len(sgl),
-						flags);
-			return txd;
-		} else {
-			pr_warn("MDMA: SG list is not supported by this controller\n");
-			return  NULL;
-		}
-	}
-
 	pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
-			sg_len, direction, flags);
-
-	txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
-	if (NULL == txd) {
-		pr_err("MDMA: Prep memcpy failed\n");
+				sg_len, direction, flags);
+	if (direction != DMA_MEM_TO_MEM) {
+		return intel_mid_dma_chan_prep_desc(chan, sg, NULL, flags,
+							sg_len, direction);
+	} else {
+		pr_err("MDMA: Invalid Direction\n");
 		return NULL;
 	}
-
-	desc = to_intel_mid_dma_desc(txd);
-	desc->dirn = direction;
-	ctl_lo.ctl_lo = desc->ctl_lo;
-	ctl_lo.ctlx.llp_dst_en = 1;
-	ctl_lo.ctlx.llp_src_en = 1;
-	desc->ctl_lo = ctl_lo.ctl_lo;
-	desc->lli_length = sg_len;
-	desc->current_lli = 0;
-	/* DMA coherent memory pool for LLI descriptors*/
-	desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
-				midc->dma->pdev,
-				(sizeof(struct intel_mid_dma_lli)*sg_len),
-				32, 0);
-	if (NULL == desc->lli_pool) {
-		pr_err("MID_DMA:LLI pool create failed\n");
-		return NULL;
-	}
-
-	desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
-	if (!desc->lli) {
-		pr_err("MID_DMA: LLI alloc failed\n");
-		pci_pool_destroy(desc->lli_pool);
-		return NULL;
-	}
-
-	midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
-	if (flags & DMA_PREP_INTERRUPT) {
-		iowrite32(UNMASK_INTR_REG(midc->ch_id),
-				midc->dma_base + MASK_BLOCK);
-		pr_debug("MDMA:Enabled Block interrupt\n");
-	}
-	return &desc->txd;
 }
 
 /**
@@ -811,31 +1330,52 @@
 	struct middma_device	*mid = to_middma_device(chan->device);
 	struct intel_mid_dma_desc	*desc, *_desc;
 
+	pr_debug("entry:%s\n", __func__);
+	if (false == midc->in_use) {
+		pr_err("ERR_MDMA: try to free chnl already freed\n");
+		return;
+	}
 	if (true == midc->busy) {
 		/*trying to free ch in use!!!!!*/
 		pr_err("ERR_MDMA: trying to free ch in use\n");
+		dump_dma_reg(chan);
 	}
+
+	/* Disable CH interrupts */
+	disable_dma_interrupt(midc);
+	clear_dma_channel_interrupt(midc);
+
+	midc->block_intr_status = false;
+	midc->in_use = false;
+	midc->busy = false;
+
+	tasklet_unlock_wait(&mid->tasklet);
+
 	spin_lock_bh(&midc->lock);
 	midc->descs_allocated = 0;
 	list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
 		list_del(&desc->desc_node);
-		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+		dma_pool_free(mid->dma_pool, desc, desc->txd.phys);
 	}
 	list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
 		list_del(&desc->desc_node);
-		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+		dma_pool_free(mid->dma_pool, desc, desc->txd.phys);
 	}
 	list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
 		list_del(&desc->desc_node);
-		pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
+		dma_pool_free(mid->dma_pool, desc, desc->txd.phys);
 	}
+	midc->raw_tfr = 0;
 	spin_unlock_bh(&midc->lock);
-	midc->in_use = false;
-	midc->busy = false;
-	/* Disable CH interrupts */
-	iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
-	iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
-	pm_runtime_put(&mid->pdev->dev);
+
+	if (midc->lli_pool) {
+		dma_pool_destroy(midc->lli_pool);
+		midc->lli_pool = NULL;
+	}
+
+	/* Disable the channel */
+	iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
+	pm_runtime_put(mid->dev);
 }
 
 /**
@@ -853,20 +1393,19 @@
 	dma_addr_t		phys;
 	int	i = 0;
 
-	pm_runtime_get_sync(&mid->pdev->dev);
+	pm_runtime_get_sync(mid->dev);
 
 	if (mid->state == SUSPENDED) {
-		if (dma_resume(&mid->pdev->dev)) {
+		if (dma_resume(mid->dev)) {
 			pr_err("ERR_MDMA: resume failed");
 			return -EFAULT;
 		}
 	}
 
 	/* ASSERT:  channel is idle */
-	if (test_ch_en(mid->dma_base, midc->ch_id)) {
-		/*ch is not idle*/
+	if (midc->in_use == true) {
 		pr_err("ERR_MDMA: ch not idle\n");
-		pm_runtime_put(&mid->pdev->dev);
+		pm_runtime_put(mid->dev);
 		return -EIO;
 	}
 	dma_cookie_init(chan);
@@ -874,10 +1413,10 @@
 	spin_lock_bh(&midc->lock);
 	while (midc->descs_allocated < DESCS_PER_CHANNEL) {
 		spin_unlock_bh(&midc->lock);
-		desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
+		desc = dma_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
 		if (!desc) {
 			pr_err("ERR_MDMA: desc failed\n");
-			pm_runtime_put(&mid->pdev->dev);
+			pm_runtime_put(mid->dev);
 			return -ENOMEM;
 			/*check*/
 		}
@@ -889,9 +1428,10 @@
 		i = ++midc->descs_allocated;
 		list_add_tail(&desc->desc_node, &midc->free_list);
 	}
+	midc->busy = false;
 	spin_unlock_bh(&midc->lock);
 	midc->in_use = true;
-	midc->busy = false;
+	midc->block_intr_status = false;
 	pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
 	return i;
 }
@@ -906,7 +1446,8 @@
 static void midc_handle_error(struct middma_device *mid,
 		struct intel_mid_dma_chan *midc)
 {
-	midc_scan_descriptors(mid, midc);
+	midc_collect_descriptors(mid, midc);
+	midc_start_descriptors(mid, midc);
 }
 
 /**
@@ -922,24 +1463,25 @@
 	struct intel_mid_dma_chan *midc = NULL;
 	u32 status, raw_tfr, raw_block;
 	int i;
-
 	mid = (struct middma_device *)data;
 	if (mid == NULL) {
 		pr_err("ERR_MDMA: tasklet Null param\n");
 		return;
 	}
-	pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
 	raw_tfr = ioread32(mid->dma_base + RAW_TFR);
-	raw_block = ioread32(mid->dma_base + RAW_BLOCK);
-	status = raw_tfr | raw_block;
-	status &= mid->intr_mask;
+	status = raw_tfr & mid->tfr_intr_mask;
+	pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
+	pr_debug("tfr_mask:%#lx, raw_tfr:%#x, status:%#x\n",
+			mid->tfr_intr_mask, raw_tfr, status);
 	while (status) {
 		/*txn interrupt*/
-		i = get_ch_index(&status, mid->chan_base);
+		i = get_ch_index(status, mid->chan_base);
 		if (i < 0) {
 			pr_err("ERR_MDMA:Invalid ch index %x\n", i);
 			return;
 		}
+		/* clear the status bit */
+		status = status & ~(1 << (i + mid->chan_base));
 		midc = &mid->ch[i];
 		if (midc == NULL) {
 			pr_err("ERR_MDMA:Null param midc\n");
@@ -947,40 +1489,71 @@
 		}
 		pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
 				status, midc->ch_id, i);
-		midc->raw_tfr = raw_tfr;
-		midc->raw_block = raw_block;
 		spin_lock_bh(&midc->lock);
+		midc->raw_tfr = raw_tfr;
 		/*clearing this interrupts first*/
 		iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
-		if (raw_block) {
-			iowrite32((1 << midc->ch_id),
-				mid->dma_base + CLEAR_BLOCK);
+		if (likely(midc->in_use)) {
+			midc_collect_descriptors(mid, midc);
+			midc_start_descriptors(mid, midc);
 		}
-		midc_scan_descriptors(mid, midc);
 		pr_debug("MDMA:Scan of desc... complete, unmasking\n");
 		iowrite32(UNMASK_INTR_REG(midc->ch_id),
-				mid->dma_base + MASK_TFR);
-		if (raw_block) {
-			iowrite32(UNMASK_INTR_REG(midc->ch_id),
-				mid->dma_base + MASK_BLOCK);
-		}
+					mid->dma_base + MASK_TFR);
 		spin_unlock_bh(&midc->lock);
 	}
 
-	status = ioread32(mid->dma_base + RAW_ERR);
-	status &= mid->intr_mask;
+	raw_block = ioread32(mid->dma_base + RAW_BLOCK);
+	status = raw_block & mid->block_intr_mask;
+	pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
+	pr_debug("block_mask:%#lx, raw_block%#x, status:%#x\n",
+			 mid->block_intr_mask, raw_block, status);
 	while (status) {
-		/*err interrupt*/
-		i = get_ch_index(&status, mid->chan_base);
+		/*txn interrupt*/
+		i = get_ch_index(status, mid->chan_base);
 		if (i < 0) {
 			pr_err("ERR_MDMA:Invalid ch index %x\n", i);
 			return;
 		}
+		/* clear the status bit */
+		status = status & ~(1 << (i + mid->chan_base));
 		midc = &mid->ch[i];
 		if (midc == NULL) {
 			pr_err("ERR_MDMA:Null param midc\n");
 			return;
 		}
+		pr_debug("MDMA:Tx complete interrupt raw block  %x, Ch No %d Index %d\n",
+				status, midc->ch_id, i);
+		spin_lock_bh(&midc->lock);
+		/*clearing this interrupts first*/
+
+		midc->raw_block = raw_block;
+		iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK);
+		if (midc->block_intr_status) {
+			midc_collect_descriptors(mid, midc);
+			midc_start_descriptors(mid, midc);
+		}
+
+		iowrite32(UNMASK_INTR_REG(midc->ch_id),
+					mid->dma_base + MASK_BLOCK);
+		spin_unlock_bh(&midc->lock);
+	}
+
+	status = ioread32(mid->dma_base + RAW_ERR);
+	pr_debug("MDMA:raw error status:%#x\n", status);
+	while (status) {
+		/*err interrupt*/
+		i = get_ch_index(status, mid->chan_base);
+		if (i < 0) {
+			pr_err("ERR_MDMA:Invalid ch index %x (raw err)\n", i);
+			return;
+		}
+		status = status & ~(1 << (i + mid->chan_base));
+		midc = &mid->ch[i];
+		if (midc == NULL) {
+			pr_err("ERR_MDMA:Null param midc (raw err)\n");
+			return;
+		}
 		pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
 				status, midc->ch_id, i);
 
@@ -1018,33 +1591,59 @@
 static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
 {
 	struct middma_device *mid = data;
-	u32 tfr_status, err_status;
-	int call_tasklet = 0;
+	u32 tfr_status, err_status, block_status;
+	u32 isr;
 
-	tfr_status = ioread32(mid->dma_base + RAW_TFR);
-	err_status = ioread32(mid->dma_base + RAW_ERR);
-	if (!tfr_status && !err_status)
+	/* On Baytrail, the DMAC is sharing IRQ with other devices */
+	if (is_byt_lpio_dmac(mid) && mid->state == SUSPENDED)
 		return IRQ_NONE;
 
 	/*DMA Interrupt*/
 	pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
-	pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
-	tfr_status &= mid->intr_mask;
+	if (!mid) {
+		pr_err("ERR_MDMA:null pointer mid\n");
+		return -EINVAL;
+	}
+
+	/* Read the interrupt status registers */
+	tfr_status = ioread32(mid->dma_base + STATUS_TFR);
+	err_status = ioread32(mid->dma_base + STATUS_ERR);
+	block_status = ioread32(mid->dma_base + STATUS_BLOCK);
+
+	/* Common case if the IRQ is shared with other devices */
+	if (!tfr_status && !err_status && !block_status)
+		return IRQ_NONE;
+
+	pr_debug("MDMA: trf_Status %x, Mask %x\n", tfr_status, mid->intr_mask);
 	if (tfr_status) {
 		/*need to disable intr*/
-		iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
-		iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
-		pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
-		call_tasklet = 1;
+		iowrite32((tfr_status << INT_MASK_WE),
+						mid->dma_base + MASK_TFR);
 	}
-	err_status &= mid->intr_mask;
+	if (block_status) {
+		/*need to disable intr*/
+		iowrite32((block_status << INT_MASK_WE),
+						mid->dma_base + MASK_BLOCK);
+	}
 	if (err_status) {
 		iowrite32((err_status << INT_MASK_WE),
 			  mid->dma_base + MASK_ERR);
-		call_tasklet = 1;
 	}
-	if (call_tasklet)
-		tasklet_schedule(&mid->tasklet);
+	/* in mrlfd we need to clear the pisr bits to stop intr as well
+	 * so read the PISR register, see if we have pisr bits status and clear
+	 * them
+	 */
+	if (mid->pimr_mask && !mid->dword_trf) {
+		isr = readl(mid->mask_reg);
+		pr_debug("isr says: %x", isr);
+		if (isr) {
+			isr &= mid->pimr_mask;
+			pr_debug("writing isr: %x", isr);
+			writel(isr, mid->mask_reg);
+		}
+	}
+
+	tasklet_schedule(&mid->tasklet);
 
 	return IRQ_HANDLED;
 }
@@ -1059,6 +1658,41 @@
 	return intel_mid_dma_interrupt(irq, data);
 }
 
+static void config_dma_fifo_partition(struct middma_device *dma)
+{
+	/* program FIFO Partition registers - 128 bytes for each ch */
+	iowrite32(DMA_FIFO_SIZE, dma->dma_base + FIFO_PARTITION0_HI);
+	iowrite32(DMA_FIFO_SIZE, dma->dma_base + FIFO_PARTITION1_LO);
+	iowrite32(DMA_FIFO_SIZE, dma->dma_base + FIFO_PARTITION1_HI);
+	iowrite32(DMA_FIFO_SIZE | BIT(26), dma->dma_base + FIFO_PARTITION0_LO);
+}
+
+/* v1 ops will be used for Medfield & CTP platforms */
+static struct intel_mid_dma_ops v1_dma_ops = {
+	.device_alloc_chan_resources	= intel_mid_dma_alloc_chan_resources,
+	.device_free_chan_resources	= intel_mid_dma_free_chan_resources,
+	.device_prep_dma_memcpy		= intel_mid_dma_prep_memcpy,
+	.device_prep_dma_sg		= intel_mid_dma_prep_sg,
+	.device_prep_slave_sg		= intel_mid_dma_prep_slave_sg,
+	.device_control			= intel_mid_dma_device_control,
+	.device_tx_status		= intel_mid_dma_tx_status,
+	.device_issue_pending		= intel_mid_dma_issue_pending,
+	.dma_chan_suspend		= intel_mid_dma_chan_suspend_v1,
+};
+
+/* v2 ops will be used in Merrifield and beyond plantforms */
+static struct intel_mid_dma_ops v2_dma_ops = {
+	.device_alloc_chan_resources    = intel_mid_dma_alloc_chan_resources,
+	.device_free_chan_resources     = intel_mid_dma_free_chan_resources,
+	.device_prep_dma_memcpy         = intel_mid_dma_prep_memcpy_v2,
+	.device_prep_dma_sg             = intel_mid_dma_prep_sg,
+	.device_prep_slave_sg           = intel_mid_dma_prep_slave_sg,
+	.device_control                 = intel_mid_dma_device_control,
+	.device_tx_status               = intel_mid_dma_tx_status,
+	.device_issue_pending           = intel_mid_dma_issue_pending,
+	.dma_chan_suspend		= intel_mid_dma_chan_suspend_v2,
+};
+
 /**
  * mid_setup_dma -	Setup the DMA controller
  * @pdev: Controller PCI device structure
@@ -1066,33 +1700,33 @@
  * Initialize the DMA controller, channels, registers with DMA engine,
  * ISR. Initialize DMA controller channels.
  */
-static int mid_setup_dma(struct pci_dev *pdev)
+int mid_setup_dma(struct device *dev)
 {
-	struct middma_device *dma = pci_get_drvdata(pdev);
+	struct middma_device *dma = dev_get_drvdata(dev);
 	int err, i;
 
 	/* DMA coherent memory pool for DMA descriptor allocations */
-	dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
+	dma->dma_pool = dma_pool_create("intel_mid_dma_desc_pool", dev,
 					sizeof(struct intel_mid_dma_desc),
 					32, 0);
 	if (NULL == dma->dma_pool) {
-		pr_err("ERR_MDMA:pci_pool_create failed\n");
+		pr_err("ERR_MDMA:dma_pool_create failed\n");
 		err = -ENOMEM;
+		kfree(dma);
 		goto err_dma_pool;
 	}
 
 	INIT_LIST_HEAD(&dma->common.channels);
-	dma->pci_id = pdev->device;
 	if (dma->pimr_mask) {
-		dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
-					LNW_PERIPHRAL_MASK_SIZE);
+		dma->mask_reg = devm_ioremap(dma->dev, dma->pimr_base, LNW_PERIPHRAL_MASK_SIZE);
 		if (dma->mask_reg == NULL) {
 			pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
 			err = -ENOMEM;
-			goto err_ioremap;
+			goto err_setup;
 		}
-	} else
+	} else {
 		dma->mask_reg = NULL;
+	}
 
 	pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
 	/*init CH structures*/
@@ -1137,18 +1771,17 @@
 	dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
 	dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
 	dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
-	dma->common.dev = &pdev->dev;
+	dma->common.dev = dev;
 
-	dma->common.device_alloc_chan_resources =
-					intel_mid_dma_alloc_chan_resources;
-	dma->common.device_free_chan_resources =
-					intel_mid_dma_free_chan_resources;
+	dma->common.device_alloc_chan_resources = dma->dma_ops.device_alloc_chan_resources;
+	dma->common.device_free_chan_resources = dma->dma_ops.device_free_chan_resources;
 
-	dma->common.device_tx_status = intel_mid_dma_tx_status;
-	dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
-	dma->common.device_issue_pending = intel_mid_dma_issue_pending;
-	dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
-	dma->common.device_control = intel_mid_dma_device_control;
+	dma->common.device_tx_status = dma->dma_ops.device_tx_status;
+	dma->common.device_prep_dma_memcpy = dma->dma_ops.device_prep_dma_memcpy;
+	dma->common.device_prep_dma_sg = dma->dma_ops.device_prep_dma_sg;
+	dma->common.device_issue_pending = dma->dma_ops.device_issue_pending;
+	dma->common.device_prep_slave_sg = dma->dma_ops.device_prep_slave_sg;
+	dma->common.device_control = dma->dma_ops.device_control;
 
 	/*enable dma cntrl*/
 	iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
@@ -1156,23 +1789,23 @@
 	/*register irq */
 	if (dma->pimr_mask) {
 		pr_debug("MDMA:Requesting irq shared for DMAC1\n");
-		err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
+		err = devm_request_irq(dma->dev, dma->irq, intel_mid_dma_interrupt1,
 			IRQF_SHARED, "INTEL_MID_DMAC1", dma);
 		if (0 != err)
-			goto err_irq;
+			goto err_setup;
 	} else {
 		dma->intr_mask = 0x03;
 		pr_debug("MDMA:Requesting irq for DMAC2\n");
-		err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
+		err = devm_request_irq(dma->dev, dma->irq, intel_mid_dma_interrupt2,
 			IRQF_SHARED, "INTEL_MID_DMAC2", dma);
 		if (0 != err)
-			goto err_irq;
+			goto err_setup;
 	}
 	/*register device w/ engine*/
 	err = dma_async_device_register(&dma->common);
 	if (0 != err) {
 		pr_err("ERR_MDMA:device_register failed: %d\n", err);
-		goto err_engine;
+		goto err_dma_pool;
 	}
 	if (dma->pimr_mask) {
 		pr_debug("setting up tasklet1 for DMAC1\n");
@@ -1181,15 +1814,15 @@
 		pr_debug("setting up tasklet2 for DMAC2\n");
 		tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
 	}
+	if (!dma->dword_trf) {
+		config_dma_fifo_partition(dma);
+		/* Mask all interrupts from DMA controller to IA by default */
+		dmac1_mask_periphral_intr(dma);
+	}
 	return 0;
 
-err_engine:
-	free_irq(pdev->irq, dma);
-err_irq:
-	if (dma->mask_reg)
-		iounmap(dma->mask_reg);
-err_ioremap:
-	pci_pool_destroy(dma->dma_pool);
+err_setup:
+	dma_pool_destroy(dma->dma_pool);
 err_dma_pool:
 	pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
 	return err;
@@ -1198,25 +1831,42 @@
 
 /**
  * middma_shutdown -	Shutdown the DMA controller
- * @pdev: Controller PCI device structure
+ * @dev: Controller device structure
  *
  * Called by remove
  * Unregister DMa controller, clear all structures and free interrupt
  */
-static void middma_shutdown(struct pci_dev *pdev)
+void middma_shutdown(struct device *dev)
 {
-	struct middma_device *device = pci_get_drvdata(pdev);
+	struct middma_device *device = dev_get_drvdata(dev);
 
 	dma_async_device_unregister(&device->common);
-	pci_pool_destroy(device->dma_pool);
-	if (device->mask_reg)
-		iounmap(device->mask_reg);
-	if (device->dma_base)
-		iounmap(device->dma_base);
-	free_irq(pdev->irq, device);
+	dma_pool_destroy(device->dma_pool);
 	return;
 }
 
+struct middma_device *mid_dma_setup_context(struct device *dev,
+					    struct intel_mid_dma_probe_info *info)
+{
+	struct middma_device *mid_device;
+	mid_device = devm_kzalloc(dev, sizeof(*mid_device), GFP_KERNEL);
+	if (!mid_device) {
+		pr_err("ERR_MDMA:kzalloc failed probe\n");
+		return NULL;
+	}
+	mid_device->dev = dev;
+	mid_device->max_chan = info->max_chan;
+	mid_device->chan_base = info->ch_base;
+	mid_device->block_size = info->block_size;
+	mid_device->pimr_mask = info->pimr_mask;
+	mid_device->pimr_base = info->pimr_base;
+	mid_device->dword_trf = info->dword_trf;
+	mid_device->pimr_offset = info->pimr_offset;
+	mid_device->pci_id = info->pci_id;
+	memcpy(&mid_device->dma_ops, info->pdma_ops, sizeof(struct intel_mid_dma_ops));
+	return mid_device;
+}
+
 /**
  * intel_mid_dma_probe -	PCI Probe
  * @pdev: Controller PCI device structure
@@ -1255,42 +1905,41 @@
 	if (err)
 		goto err_set_dma_mask;
 
-	device = kzalloc(sizeof(*device), GFP_KERNEL);
-	if (!device) {
-		pr_err("ERR_MDMA:kzalloc failed probe\n");
-		err = -ENOMEM;
+	pci_dev_get(pdev);
+	device = mid_dma_setup_context(&pdev->dev, info);
+	if (!device)
 		goto err_kzalloc;
-	}
-	device->pdev = pci_dev_get(pdev);
+
+	device->pci_id = pdev->device;
 
 	base_addr = pci_resource_start(pdev, 0);
 	bar_size  = pci_resource_len(pdev, 0);
-	device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
+	device->dma_base = devm_ioremap_nocache(&pdev->dev, base_addr, DMA_REG_SIZE);
 	if (!device->dma_base) {
 		pr_err("ERR_MDMA:ioremap failed\n");
 		err = -ENOMEM;
 		goto err_ioremap;
 	}
+	device->irq = pdev->irq;
 	pci_set_drvdata(pdev, device);
 	pci_set_master(pdev);
-	device->max_chan = info->max_chan;
-	device->chan_base = info->ch_base;
-	device->block_size = info->block_size;
-	device->pimr_mask = info->pimr_mask;
 
-	err = mid_setup_dma(pdev);
+#ifdef CONFIG_PRH_TEMP_WA_FOR_SPID
+	/* PRH uses, ch 4,5,6,7 override the info table data */
+	pr_info("Device is Bodegabay\n");
+	device->max_chan = 4;
+	device->chan_base = 4;
+#endif
+	err = mid_setup_dma(&pdev->dev);
 	if (err)
-		goto err_dma;
+		goto err_ioremap;
 
 	pm_runtime_put_noidle(&pdev->dev);
-	pm_runtime_allow(&pdev->dev);
+	pm_runtime_forbid(&pdev->dev);
 	return 0;
 
-err_dma:
-	iounmap(device->dma_base);
 err_ioremap:
 	pci_dev_put(pdev);
-	kfree(device);
 err_kzalloc:
 err_set_dma_mask:
 	pci_release_regions(pdev);
@@ -1310,126 +1959,135 @@
  */
 static void intel_mid_dma_remove(struct pci_dev *pdev)
 {
-	struct middma_device *device = pci_get_drvdata(pdev);
-
 	pm_runtime_get_noresume(&pdev->dev);
 	pm_runtime_forbid(&pdev->dev);
-	middma_shutdown(pdev);
+	middma_shutdown(&pdev->dev);
 	pci_dev_put(pdev);
-	kfree(device);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 }
 
 /* Power Management */
 /*
-* dma_suspend - PCI suspend function
+* dma_suspend - suspend function
 *
-* @pci: PCI device structure
-* @state: PM message
+* @dev: device structure
 *
 * This function is called by OS when a power event occurs
 */
-static int dma_suspend(struct device *dev)
+int dma_suspend(struct device *dev)
 {
-	struct pci_dev *pci = to_pci_dev(dev);
 	int i;
-	struct middma_device *device = pci_get_drvdata(pci);
+	struct middma_device *device = dev_get_drvdata(dev);
 	pr_debug("MDMA: dma_suspend called\n");
-
+#if 0
 	for (i = 0; i < device->max_chan; i++) {
 		if (device->ch[i].in_use)
 			return -EAGAIN;
 	}
+#endif
 	dmac1_mask_periphral_intr(device);
 	device->state = SUSPENDED;
-	pci_save_state(pci);
-	pci_disable_device(pci);
-	pci_set_power_state(pci, PCI_D3hot);
+
 	return 0;
 }
 
 /**
-* dma_resume - PCI resume function
+* dma_resume - resume function
 *
-* @pci:	PCI device structure
+* @dev:	device structure
 *
 * This function is called by OS when a power event occurs
 */
 int dma_resume(struct device *dev)
 {
-	struct pci_dev *pci = to_pci_dev(dev);
-	int ret;
-	struct middma_device *device = pci_get_drvdata(pci);
+	struct middma_device *device = dev_get_drvdata(dev);
 
 	pr_debug("MDMA: dma_resume called\n");
-	pci_set_power_state(pci, PCI_D0);
-	pci_restore_state(pci);
-	ret = pci_enable_device(pci);
-	if (ret) {
-		pr_err("MDMA: device can't be enabled for %x\n", pci->device);
-		return ret;
-	}
 	device->state = RUNNING;
 	iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+
+	if (!device->dword_trf)
+		config_dma_fifo_partition(device);
+
 	return 0;
 }
 
 static int dma_runtime_suspend(struct device *dev)
 {
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct middma_device *device = pci_get_drvdata(pci_dev);
-
-	device->state = SUSPENDED;
-	return 0;
+	return dma_suspend(dev);
 }
 
 static int dma_runtime_resume(struct device *dev)
 {
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct middma_device *device = pci_get_drvdata(pci_dev);
-
-	device->state = RUNNING;
-	iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
-	return 0;
+	return dma_resume(dev);
 }
 
 static int dma_runtime_idle(struct device *dev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct middma_device *device = pci_get_drvdata(pdev);
+	struct middma_device *device = dev_get_drvdata(dev);
 	int i;
 
 	for (i = 0; i < device->max_chan; i++) {
 		if (device->ch[i].in_use)
 			return -EAGAIN;
 	}
-
-	return pm_schedule_suspend(dev, 0);
+	return pm_schedule_suspend(dev, 0);;
 }
 
 /******************************************************************************
 * PCI stuff
 */
 static struct pci_device_id intel_mid_dma_ids[] = {
-	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID),	INFO(2, 6, 4095, 0x200020)},
-	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID),	INFO(2, 0, 2047, 0)},
-	{ PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID),	INFO(2, 0, 2047, 0)},
-	{ PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID),	INFO(4, 0, 4095, 0x400040)},
+	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID),
+		INFO(2, 6, SST_MAX_DMA_LEN, 0x200020, 0xFFAE8008, 1, 0x8, INTEL_MID_DMAC1_ID, &v1_dma_ops)},
+	{ PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID),
+		INFO(2, 0, 2047, 0, 0, 1, 0, INTEL_MID_DMAC2_ID, &v1_dma_ops)},
+	{ PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID),
+		INFO(2, 0, 2047, 0, 0, 1, 0, INTEL_MID_GP_DMAC2_ID, &v1_dma_ops)},
+	{ PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID),
+		INFO(4, 0, SST_MAX_DMA_LEN, 0x400040, 0xFFAE8008, 1, 0x8, INTEL_MFLD_DMAC1_ID, &v1_dma_ops)},
+	/* Cloverview support */
+	{ PCI_VDEVICE(INTEL, INTEL_CLV_GP_DMAC2_ID),
+		INFO(2, 0, 2047, 0, 0, 1, 0, INTEL_CLV_GP_DMAC2_ID, &v1_dma_ops)},
+	{ PCI_VDEVICE(INTEL, INTEL_CLV_DMAC1_ID),
+		INFO(4, 0, SST_MAX_DMA_LEN, 0x400040, 0xFFAE8008, 1, 0x8, INTEL_CLV_DMAC1_ID, &v1_dma_ops)},
+	/* Mrfld */
+	{ PCI_VDEVICE(INTEL, INTEL_MRFLD_GP_DMAC2_ID),
+		INFO(4, 0, SST_MAX_DMA_LEN_MRFLD, 0, 0, 0, 0, INTEL_MRFLD_GP_DMAC2_ID, &v2_dma_ops)},
+	{ PCI_VDEVICE(INTEL, INTEL_MRFLD_DMAC0_ID),
+		INFO(2, 6, SST_MAX_DMA_LEN_MRFLD, 0xFF0000, 0xFF340018, 0, 0x10, INTEL_MRFLD_DMAC0_ID, &v2_dma_ops)},
+	/* Baytrail Low Speed Peripheral DMA */
+	{ PCI_VDEVICE(INTEL, INTEL_BYT_LPIO1_DMAC_ID),
+		INFO(6, 0, 2047, 0, 0, 1, 0, INTEL_BYT_LPIO1_DMAC_ID, &v1_dma_ops)},
+	{ PCI_VDEVICE(INTEL, INTEL_BYT_LPIO2_DMAC_ID),
+		INFO(6, 0, 2047, 0, 0, 1, 0, INTEL_BYT_LPIO2_DMAC_ID, &v1_dma_ops)},
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
 
+struct intel_mid_dma_probe_info dma_byt_info = {
+	.max_chan = 4,
+	.ch_base = 4,
+	.block_size = 131071,
+	.pimr_mask = 0x00FF0000,
+	.pimr_base = 0xDF540018,
+	.dword_trf = 0,
+	.pimr_offset = 0x10,
+	.pci_id = INTEL_BYT_DMAC0_ID,
+	.pdma_ops = &v2_dma_ops,
+};
+
 static const struct dev_pm_ops intel_mid_dma_pm = {
-	.runtime_suspend = dma_runtime_suspend,
-	.runtime_resume = dma_runtime_resume,
-	.runtime_idle = dma_runtime_idle,
-	.suspend = dma_suspend,
-	.resume = dma_resume,
+	SET_SYSTEM_SLEEP_PM_OPS(dma_suspend,
+			dma_resume)
+	SET_RUNTIME_PM_OPS(dma_runtime_suspend,
+			dma_runtime_resume,
+			dma_runtime_idle)
 };
 
 static struct pci_driver intel_mid_dma_pci_driver = {
-	.name		=	"Intel MID DMA",
+	.name		=	"intel_mid_dma",
 	.id_table	=	intel_mid_dma_ids,
 	.probe		=	intel_mid_dma_probe,
 	.remove		=	intel_mid_dma_remove,
@@ -1440,17 +2098,55 @@
 #endif
 };
 
+static const struct acpi_device_id dma_acpi_ids[];
+
+struct intel_mid_dma_probe_info *mid_get_acpi_driver_data(const char *hid)
+{
+	const struct acpi_device_id *id;
+
+	pr_debug("%s", __func__);
+	for (id = dma_acpi_ids; id->id[0]; id++)
+		if (!strncmp(id->id, hid, 16))
+			return (struct intel_mid_dma_probe_info *)id->driver_data;
+	return NULL;
+}
+static const struct acpi_device_id dma_acpi_ids[] = {
+	{ "DMA0F28", (kernel_ulong_t)&dma_byt_info },
+	{ },
+};
+
+static struct platform_driver intel_dma_acpi_driver = {
+	.driver = {
+		.name			= "intel_dma_acpi",
+		.owner			= THIS_MODULE,
+		.acpi_match_table	= dma_acpi_ids,
+		.pm			= &intel_mid_dma_pm,
+	},
+	.probe	= dma_acpi_probe,
+	.remove	= dma_acpi_remove,
+};
+
 static int __init intel_mid_dma_init(void)
 {
+	int ret;
+
 	pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
 			INTEL_MID_DMA_DRIVER_VERSION);
-	return pci_register_driver(&intel_mid_dma_pci_driver);
+	ret = pci_register_driver(&intel_mid_dma_pci_driver);
+	if (ret)
+		pr_err("PCI dev registration failed");
+
+	ret = platform_driver_register(&intel_dma_acpi_driver);
+	if (ret)
+		pr_err("Platform dev registration failed");
+	return ret;
 }
-fs_initcall(intel_mid_dma_init);
+module_init(intel_mid_dma_init);
 
 static void __exit intel_mid_dma_exit(void)
 {
 	pci_unregister_driver(&intel_mid_dma_pci_driver);
+	platform_driver_unregister(&intel_dma_acpi_driver);
 }
 module_exit(intel_mid_dma_exit);
 
@@ -1458,3 +2154,5 @@
 MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
 MODULE_LICENSE("GPL v2");
 MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
+MODULE_ALIAS("pci:intel_mid_dma");
+MODULE_ALIAS("acpi:intel_dma_acpi");
diff --git a/drivers/dma/intel_mid_dma_acpi.c b/drivers/dma/intel_mid_dma_acpi.c
new file mode 100644
index 0000000..2bef1ab
--- /dev/null
+++ b/drivers/dma/intel_mid_dma_acpi.c
@@ -0,0 +1,192 @@
+
+/* intel_mid_dma_acpi.c - Intel MID DMA driver init file for ACPI enumaration.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ *  Authors:	Ramesh Babu K V <Ramesh.Babu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_runtime.h>
+#include <acpi/acpi_bus.h>
+
+#include "intel_mid_dma_regs.h"
+
+#define HID_MAX_SIZE 8
+
+struct list_head dma_dev_list;
+
+LIST_HEAD(dma_dev_list);
+
+struct acpi_dma_dev_list {
+	struct list_head dmadev_list;
+	char dma_hid[HID_MAX_SIZE];
+	struct device *acpi_dma_dev;
+};
+
+struct device *intel_mid_get_acpi_dma(const char *hid)
+{
+	struct acpi_dma_dev_list *listnode;
+	if (list_empty(&dma_dev_list))
+		return NULL;
+
+	list_for_each_entry(listnode, &dma_dev_list, dmadev_list) {
+		if (!(strncmp(listnode->dma_hid, hid, HID_MAX_SIZE)))
+			return listnode->acpi_dma_dev;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(intel_mid_get_acpi_dma);
+
+#if IS_ENABLED(CONFIG_ACPI)
+static int mid_get_and_map_rsrc(void **dest, struct platform_device *pdev,
+				unsigned int num)
+{
+	struct resource *rsrc;
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, num);
+	if (!rsrc) {
+		pr_err("%s: Invalid resource - %d", __func__, num);
+		return -EIO;
+	}
+	pr_debug("rsrc #%d = %#x", num, rsrc->start);
+	*dest = devm_ioremap_nocache(&pdev->dev, rsrc->start, resource_size(rsrc));
+	if (!*dest) {
+		pr_err("%s: unable to map resource: %#x", __func__, rsrc->start);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int mid_platform_get_resources(struct middma_device *mid_device,
+				      struct platform_device *pdev)
+{
+	int ret;
+	pr_debug("%s", __func__);
+
+	/* All ACPI resource request here */
+	/* Get DDR addr from platform resource table */
+	ret = mid_get_and_map_rsrc(&mid_device->dma_base, pdev, 0);
+	if (ret)
+		return ret;
+	pr_debug("dma_base:%p", mid_device->dma_base);
+
+	ret = mid_get_and_map_rsrc(&mid_device->mask_reg, pdev, 1);
+	if (ret)
+		return ret;
+	/* mask_reg should point to ISRX register */
+	mid_device->mask_reg += 0x18;
+	pr_debug("pimr_base:%p", mid_device->mask_reg);
+
+	mid_device->irq = platform_get_irq(pdev, 0);
+	if (mid_device->irq < 0) {
+		pr_err("invalid irq:%d", mid_device->irq);
+		return mid_device->irq;
+	}
+	pr_debug("irq from pdev is:%d", mid_device->irq);
+
+	return 0;
+}
+
+int dma_acpi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	acpi_handle handle = ACPI_HANDLE(dev);
+	struct acpi_device *device;
+	struct middma_device *mid_device;
+	struct intel_mid_dma_probe_info *info;
+	const char *hid;
+	int ret;
+
+	ret = acpi_bus_get_device(handle, &device);
+	if (ret) {
+		pr_err("%s: could not get acpi device - %d\n", __func__, ret);
+		return -ENODEV;
+	}
+
+	if (acpi_bus_get_status(device) || !device->status.present) {
+		pr_err("%s: device has invalid status", __func__);
+		return -ENODEV;
+	}
+
+	hid = acpi_device_hid(device);
+	pr_info("%s for %s", __func__, hid);
+
+	/* Apply default dma_mask if needed */
+	if (!pdev->dev.dma_mask) {
+		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	}
+
+	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		pr_err("dma_set_mask failed with err:%d", ret);
+		return ret;
+	}
+
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		pr_err("_coherent_mask failed with err:%d", ret);
+		return ret;
+	}
+	info = mid_get_acpi_driver_data(hid);
+	if (!info) {
+		pr_err("acpi driver data is null");
+		goto err_dma;
+	}
+
+	mid_device = mid_dma_setup_context(&pdev->dev, info);
+	if (!mid_device)
+		goto err_dma;
+
+	ret = mid_platform_get_resources(mid_device, pdev);
+	if (ret) {
+		pr_err("Error while get resources:%d", ret);
+		goto err_dma;
+	}
+	platform_set_drvdata(pdev, mid_device);
+	ret = mid_setup_dma(&pdev->dev);
+	if (ret)
+		goto err_dma;
+	pm_runtime_enable(&pdev->dev);
+	acpi_dma_dev = &pdev->dev;
+	pr_debug("%s:completed", __func__);
+	return 0;
+err_dma:
+	pr_err("ERR_MDMA:Probe failed %d\n", ret);
+	return ret;
+}
+#else
+int dma_acpi_probe(struct platform_device *pdev)
+{
+	return -EIO;
+}
+#endif
+
+int dma_acpi_remove(struct platform_device *pdev)
+{
+	pm_runtime_forbid(&pdev->dev);
+	middma_shutdown(&pdev->dev);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index 17b4219..30160e4 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -31,6 +31,8 @@
 
 #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
 
+#define MID_MAX_CHAN	8 /*max ch across controllers*/
+
 #define	REG_BIT0		0x00000001
 #define	REG_BIT8		0x00000100
 #define INT_MASK_WE		0x8
@@ -45,11 +47,13 @@
 #define DISABLE_CHANNEL(chan_num) \
 	(REG_BIT8 << chan_num)
 
-#define DESCS_PER_CHANNEL	16
+#define DESCS_PER_CHANNEL	128
 /*DMA Registers*/
 /*registers associated with channel programming*/
 #define DMA_REG_SIZE		0x400
 #define DMA_CH_SIZE		0x58
+#define DMA_FIFO_SIZE 0x100080
+
 
 /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
 #define SAR			0x00 /* Source Address Register*/
@@ -83,6 +87,17 @@
 #define INTR_STATUS		0x360
 #define DMA_CFG			0x398
 #define DMA_CHAN_EN		0x3A0
+#define FIFO_PARTITION0_LO	0x400
+#define FIFO_PARTITION0_HI	0x404
+#define FIFO_PARTITION1_LO	0x408
+#define FIFO_PARTITION1_HI	0x40C
+#define CH_SAI_ERR		0x410
+
+#define CTL_LO_BIT_LLP_DST_EN	27
+#define CTL_LO_BIT_LLP_SRC_EN	28
+
+#define CH_SUSPEND	(BIT(8))
+#define CH_DRAIN	(BIT(10))
 
 /*DMA channel control registers*/
 union intel_mid_dma_ctl_lo {
@@ -111,6 +126,34 @@
 		u32	llp_src_en:1;	/*enable/disable source LLP = 0*/
 		u32	reser2:3;
 	} ctlx;
+	struct {
+		u32	int_en:1;	/*enable or disable interrupts*/
+					/*should be 0*/
+		u32	dst_tr_width:3;	/*destination transfer width*/
+					/*usually 32 bits = 010*/
+		u32	src_tr_width:3; /*source transfer width*/
+					/*usually 32 bits = 010*/
+		u32	rsvd4:1;
+		u32	dinc:1;		/*destination address inc/dec*/
+		u32	rsvd3:1;
+					/*For mem:INC=00, Periphral NoINC=11*/
+		u32	sinc:1;		/*source address inc or dec, as above*/
+		u32	dst_msize:3;	/*destination burst transaction length*/
+					/*always = 16 ie 011*/
+		u32	src_msize:3;	/*source burst transaction length*/
+					/*always = 16 ie 011*/
+		u32	src_gather_en:1;
+		u32	dst_scatter_en:1;
+		u32	rsvd2:1;
+		u32	tt_fc:2;	/*transfer type and flow controller*/
+					/*M-M = 000
+					  P-M = 010
+					  M-P = 001*/
+		u32	rsvd1:5;
+		u32	llp_dst_en:1;	/*enable/disable destination LLP = 0*/
+		u32	llp_src_en:1;	/*enable/disable source LLP = 0*/
+		u32	reser:3;
+	} ctlx_v2;
 	u32	ctl_lo;
 };
 
@@ -120,8 +163,13 @@
 		u32	done:1;		/*Done - updated by DMAC*/
 		u32	reser:19;	/*configured by DMAC*/
 	} ctlx;
+	struct {
+		u32	block_ts:12;	/*block transfer size*/
+		u32	done:1;		/*Done - updated by DMAC*/
+		u32	ch_weight:11;
+		u32	ch_class:2;
+	} ctlx_v2;
 	u32	ctl_hi;
-
 };
 
 /*DMA channel configuration registers*/
@@ -141,6 +189,33 @@
 		u32	reload_src:1;	/*auto reload src addr =1 if src is P*/
 		u32	reload_dst:1;	/*AR destn addr =1 if dstn is P*/
 	} cfgx;
+	struct {
+		u32	dst_burst_align:1;
+		u32	src_burst_align:1;
+		u32	all_np_wr:1;
+		u32	hshake_np_wr:1;
+		u32	rsvd4:1;
+		u32	ctl_hi_upd_en:1;
+		u32	ds_upd_en:1;
+		u32	ss_upd_en:1;
+		u32	ch_susp:1;
+		u32	fifo_empty:1;
+		u32	ch_drain:1;
+		u32	rsvd11:1;
+		u32	rd_snp:1;
+		u32	wr_snp:1;
+		u32	rd_llp_snp:1;
+		u32	rd_stat_snp:1;
+		u32	wr_stat_snp:1;
+		u32	wr_ctlhi_snp:1;
+		u32	dst_hs_pol:1;
+		u32	src_hs_pol:1;
+		u32	dst_opt_bl:1;
+		u32	src_opt_bl:1;
+		u32	rsvd_22_29:8;
+		u32	reload_src:1;
+		u32	reload_dst:1;
+	} cfgx_v2;
 	u32	cfg_lo;
 };
 
@@ -154,9 +229,43 @@
 		u32	dst_per:4;	/*dstn hw HS interface*/
 		u32	reser2:17;
 	} cfgx;
+	struct {
+		u32	src_per:4;	/*src hw HS interface*/
+		u32	dst_per:4;	/*dstn hw HS interface*/
+		u32	rd_issue_thd:10;
+		u32	wr_issue_thd:10;
+		u32	src_per_ext:2;
+		u32	dst_per_ext:2;
+	} cfgx_v2;
 	u32	cfg_hi;
 };
 
+struct intel_mid_dma_ops {
+	int (*device_alloc_chan_resources)(struct dma_chan *chan);
+	void (*device_free_chan_resources)(struct dma_chan *chan);
+
+	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
+		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+		size_t len, unsigned long flags);
+	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+		struct dma_chan *chan,
+		struct scatterlist *dst_sg, unsigned int dst_nents,
+		struct scatterlist *src_sg, unsigned int src_nents,
+		unsigned long flags);
+
+	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context);
+	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+		unsigned long arg);
+
+	enum dma_status (*device_tx_status)(struct dma_chan *chan,
+					    dma_cookie_t cookie,
+					    struct dma_tx_state *txstate);
+	void (*device_issue_pending)(struct dma_chan *chan);
+	void (*dma_chan_suspend)(struct dma_chan *chan);
+};
 
 /**
  * struct intel_mid_dma_chan - internal mid representation of a DMA channel
@@ -168,13 +277,14 @@
  * @active_list: current active descriptors
  * @queue: current queued up descriptors
  * @free_list: current free descriptors
- * @slave: dma slave structure
- * @descs_allocated: total number of descriptors allocated
- * @dma: dma device structure pointer
+ * @slave: dma slave struture
+ * @descs_allocated: total number of decsiptors allocated
+ * @dma: dma device struture pointer
  * @busy: bool representing if ch is busy (active txn) or not
  * @in_use: bool representing if ch is in use or not
  * @raw_tfr: raw trf interrupt received
  * @raw_block: raw block interrupt received
+ * @block_intr_status: bool representing if block intr is enabled or not
  */
 struct intel_mid_dma_chan {
 	struct dma_chan		chan;
@@ -192,6 +302,8 @@
 	u32			raw_tfr;
 	u32			raw_block;
 	struct intel_mid_dma_slave *mid_slave;
+	struct dma_pool		*lli_pool;
+	bool block_intr_status;
 };
 
 static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
@@ -207,6 +319,8 @@
 /**
  * struct middma_device - internal representation of a DMA device
  * @pdev: PCI device
+ * @dev : pointer to current device struct
+ * @irq : holds irq for the device
  * @dma_base: MMIO register space pointer of DMA
  * @dma_pool: for allocating DMA descriptors
  * @common: embedded struct dma_device
@@ -220,14 +334,17 @@
  * @block_size: Block size of DMA transfer supported (from drv_data)
  * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
  * @state: dma PM device state
+ * @tfr_intr_mask: hold the status of tfr intr mask register
+ * @block_intr_mask: hold the status of block intr mask register
  */
 struct middma_device {
-	struct pci_dev		*pdev;
+	struct device		*dev;
+	unsigned int		irq;
 	void __iomem		*dma_base;
 	struct pci_pool		*dma_pool;
 	struct dma_device	common;
 	struct tasklet_struct   tasklet;
-	struct intel_mid_dma_chan ch[MAX_CHAN];
+	struct intel_mid_dma_chan ch[MID_MAX_CHAN];
 	unsigned int		pci_id;
 	unsigned int		intr_mask;
 	void __iomem		*mask_reg;
@@ -235,7 +352,13 @@
 	int			max_chan;
 	int			block_size;
 	unsigned int		pimr_mask;
+	unsigned int		pimr_base;
+	unsigned int		dword_trf;
+	unsigned int		pimr_offset;
+	unsigned long		tfr_intr_mask;
+	unsigned long		block_intr_mask;
 	enum intel_mid_dma_state state;
+	struct intel_mid_dma_ops	dma_ops;
 };
 
 static inline struct middma_device *to_middma_device(struct dma_device *common)
@@ -266,15 +389,30 @@
 	enum intel_mid_dma_mode		cfg_mode; /*mode configuration*/
 
 };
-
+/* struct intel_mid_dma_lli is used to provide the DMA IP with SAR,DAR,LLP etc.
+   Use u32 for the elements of this structure irrespective
+   of whether dma_addr_t is u32 or u64.This is necessary because
+   the DMA IP expects these elements to be 32 bit wide */
 struct intel_mid_dma_lli {
-	dma_addr_t			sar;
-	dma_addr_t			dar;
-	dma_addr_t			llp;
+	u32				sar;
+	u32				dar;
+	u32				llp;
 	u32				ctl_lo;
 	u32				ctl_hi;
 } __attribute__ ((packed));
 
+struct intel_mid_dma_probe_info {
+	u8 max_chan;
+	u8 ch_base;
+	u32 block_size;
+	u32 pimr_mask;
+	u32 pimr_base;
+	u8 dword_trf;
+	u32 pimr_offset;
+	unsigned int		pci_id;
+	struct intel_mid_dma_ops *pdma_ops;
+};
+
 static inline int test_ch_en(void __iomem *dma, u32 ch_no)
 {
 	u32 en_reg = ioread32(dma + DMA_CHAN_EN);
@@ -294,6 +432,12 @@
 }
 
 
+struct middma_device *mid_dma_setup_context(struct device *dev,
+					    struct intel_mid_dma_probe_info *info);
 int dma_resume(struct device *dev);
-
+int dma_acpi_probe(struct platform_device *pdev);
+int dma_acpi_remove(struct platform_device *pdev);
+struct intel_mid_dma_probe_info *mid_get_acpi_driver_data(const char *hid);
+int mid_setup_dma(struct device *dev);
+void middma_shutdown(struct device *dev);
 #endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 573c449..9d527a7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -81,6 +81,17 @@
 	  Kernel drivers may also request that a particular GPIO be
 	  exported to userspace; this can be useful when debugging.
 
+config GPIODEBUG
+	tristate "GPIO Setting DEBUG"
+	depends on DEBUG_FS
+	help
+	  Say yes here to support GPIO/FLIS Setting Debug.
+
+	  This is mostly useful to dump and set gpio/flis conguration.
+
+	  Kernel drivers may also request that a particular GPIO be
+	  exported to userspace; this can be useful when debugging.
+
 config GPIO_GENERIC
 	tristate
 
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 0cb2d65..2ecb796 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_GPIOLIB)		+= gpiolib.o
 obj-$(CONFIG_OF_GPIO)		+= gpiolib-of.o
 obj-$(CONFIG_GPIO_ACPI)		+= gpiolib-acpi.o
+obj-$(CONFIG_GPIODEBUG)		+= gpiodebug.o
 
 # Device drivers. Generally keep list sorted alphabetically
 obj-$(CONFIG_GPIO_GENERIC)	+= gpio-generic.o
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index 62ef10a..2c44199 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -1,7 +1,5 @@
-/*
- * Moorestown platform Langwell chip GPIO driver
- *
- * Copyright (c) 2008 - 2009,  Intel Corporation.
+/* gpio-langwell.c Moorestown platform Langwell chip GPIO driver
+ * Copyright (c) 2008 - 2013,  Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -35,8 +33,18 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
+#include <linux/lnw_gpio.h>
 #include <linux/pm_runtime.h>
+#include <asm/intel-mid.h>
 #include <linux/irqdomain.h>
+#include <asm/intel_scu_flis.h>
+#include "gpiodebug.h"
+
+#define IRQ_TYPE_EDGE	(1 << 0)
+#define IRQ_TYPE_LEVEL	(1 << 1)
+
+#define TANGIER_I2C_FLIS_START	0x1D00
+#define TANGIER_I2C_FLIS_END	0x1D34
 
 /*
  * Langwell chip has 64 pins and thus there are 2 32bit registers to control
@@ -61,14 +69,149 @@
 	GFER,		/* falling edge detect */
 	GEDR,		/* edge detect result */
 	GAFR,		/* alt function */
+	GFBR = 9,	/* glitch filter bypas */
+	GPIT,		/* interrupt type */
+	GPIP = GFER,	/* level interrupt polarity */
+	GPIM = GRER,	/* level interrupt mask */
+
+	/* the following registers only exist on MRFLD */
+	GFBR_TNG = 6,
+	GIMR,		/* interrupt mask */
+	GISR,		/* interrupt source */
+	GITR = 32,	/* interrupt type */
+	GLPR = 33,	/* level-input polarity */
+};
+
+enum GPIO_CONTROLLERS {
+	LINCROFT_GPIO,
+	PENWELL_GPIO_AON,
+	PENWELL_GPIO_CORE,
+	CLOVERVIEW_GPIO_AON,
+	CLOVERVIEW_GPIO_CORE,
+	TANGIER_GPIO,
+};
+
+/* langwell gpio driver data */
+struct lnw_gpio_ddata_t {
+	u16 ngpio;		/* number of gpio pins */
+	u32 gplr_offset;	/* offset of first GPLR register from base */
+	u32 (*get_flis_offset)(int gpio);
+	u32 chip_irq_type;	/* chip interrupt type */
+};
+
+struct gpio_flis_pair {
+	int gpio;	/* gpio number */
+	int offset;	/* register offset from FLIS base */
+};
+
+/*
+ * The following mapping table lists the pin and flis offset pair
+ * of some key gpio pins, the offset of other gpios can be calculated
+ * from the table.
+ */
+static struct gpio_flis_pair gpio_flis_mapping_table[] = {
+	{ 0,	0x2900 },
+	{ 12,	0x2544 },
+	{ 14,	0x0958 },
+	{ 16,	0x2D18 },
+	{ 17,	0x1D10 },
+	{ 19,	0x1D00 },
+	{ 23,	0x1D18 },
+	{ 31,	-EINVAL }, /* No GPIO 31 in pin list */
+	{ 32,	0x1508 },
+	{ 44,	0x3500 },
+	{ 64,	0x2534 },
+	{ 68,	0x2D1C },
+	{ 70,	0x1500 },
+	{ 72,	0x3D00 },
+	{ 77,	0x0D00 },
+	{ 97,	0x0954 },
+	{ 98,	-EINVAL }, /* No GPIO 98-101 in pin list */
+	{ 102,	0x1910 },
+	{ 120,	0x1900 },
+	{ 124,	0x2100 },
+	{ 136,	-EINVAL }, /* No GPIO 136 in pin list */
+	{ 137,	0x2D00 },
+	{ 143,	-EINVAL }, /* No GPIO 143-153 in pin list */
+	{ 154,	0x092C },
+	{ 164,	0x3900 },
+	{ 177,	0x2500 },
+	{ 190,	0x2D50 },
+};
+
+/*
+ * In new version of FW for Merrifield, I2C FLIS register can not
+ * be written directly but go though a IPC way which is sleepable,
+ * so we shouldn't use spin_lock_irq to protect the access when
+ * is_merr_i2c_flis() return true.
+ */
+static inline bool is_merr_i2c_flis(u32 offset)
+{
+	return ((offset >= TANGIER_I2C_FLIS_START)
+		&& (offset <= TANGIER_I2C_FLIS_END));
+}
+
+static u32 get_flis_offset_by_gpio(int gpio)
+{
+	int i;
+	int start;
+	u32 offset = -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(gpio_flis_mapping_table) - 1; i++) {
+		if (gpio >= gpio_flis_mapping_table[i].gpio
+			&& gpio < gpio_flis_mapping_table[i + 1].gpio)
+			break;
+	}
+
+	start = gpio_flis_mapping_table[i].gpio;
+
+	if (gpio_flis_mapping_table[i].offset != -EINVAL) {
+		offset = gpio_flis_mapping_table[i].offset
+				+ (gpio - start) * 4;
+	}
+
+	return offset;
+}
+
+static struct lnw_gpio_ddata_t lnw_gpio_ddata[] = {
+	[LINCROFT_GPIO] = {
+		.ngpio = 64,
+	},
+	[PENWELL_GPIO_AON] = {
+		.ngpio = 96,
+		.chip_irq_type = IRQ_TYPE_EDGE,
+	},
+	[PENWELL_GPIO_CORE] = {
+		.ngpio = 96,
+		.chip_irq_type = IRQ_TYPE_EDGE,
+	},
+	[CLOVERVIEW_GPIO_AON] = {
+		.ngpio = 96,
+		.chip_irq_type = IRQ_TYPE_EDGE | IRQ_TYPE_LEVEL,
+	},
+	[CLOVERVIEW_GPIO_CORE] = {
+		.ngpio = 96,
+		.chip_irq_type = IRQ_TYPE_EDGE,
+	},
+	[TANGIER_GPIO] = {
+		.ngpio = 192,
+		.gplr_offset = 4,
+		.get_flis_offset = get_flis_offset_by_gpio,
+		.chip_irq_type = IRQ_TYPE_EDGE | IRQ_TYPE_LEVEL,
+	},
 };
 
 struct lnw_gpio {
-	struct gpio_chip		chip;
-	void				*reg_base;
-	spinlock_t			lock;
-	struct pci_dev			*pdev;
-	struct irq_domain		*domain;
+	struct gpio_chip	chip;
+	void			*reg_base;
+	void			*reg_gplr;
+	spinlock_t		lock;
+	struct pci_dev		*pdev;
+	struct irq_domain	*domain;
+	u32			(*get_flis_offset)(int gpio);
+	u32			chip_irq_type;
+	int			type;
+	struct gpio_debug	*debug;
 };
 
 #define to_lnw_priv(chip)	container_of(chip, struct lnw_gpio, chip)
@@ -80,11 +223,162 @@
 	unsigned nreg = chip->ngpio / 32;
 	u8 reg = offset / 32;
 	void __iomem *ptr;
+	void *base;
 
-	ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
+	/**
+	 * On TNG B0, GITR[0]'s address is 0xFF008300, while GPLR[0]'s address
+	 * is 0xFF008004. To count GITR[0]'s address, it's easier to count
+	 * from 0xFF008000. So for GITR,GLPR... we switch the base to reg_base.
+	 * This does not affect PNW/CLV, since the reg_gplr is the reg_base,
+	 * while on TNG, the reg_gplr has an offset of 0x4.
+	 */
+	base = reg_type < GITR ? lnw->reg_gplr : lnw->reg_base;
+	ptr = (void __iomem *)(base + reg_type * nreg * 4 + reg * 4);
 	return ptr;
 }
 
+void lnw_gpio_set_alt(int gpio, int alt)
+{
+	struct lnw_gpio *lnw;
+	u32 __iomem *mem;
+	int reg;
+	int bit;
+	u32 offset;
+	u32 value;
+	unsigned long flags;
+
+	/* use this trick to get memio */
+	lnw = irq_get_chip_data(gpio_to_irq(gpio));
+	if (!lnw) {
+		pr_err("langwell_gpio: can not find pin %d\n", gpio);
+		return;
+	}
+	if (gpio < lnw->chip.base || gpio >= lnw->chip.base + lnw->chip.ngpio) {
+		dev_err(lnw->chip.dev, "langwell_gpio: wrong pin %d to config alt\n", gpio);
+		return;
+	}
+#if 0
+	if (lnw->irq_base + gpio - lnw->chip.base != gpio_to_irq(gpio)) {
+		dev_err(lnw->chip.dev, "langwell_gpio: wrong chip data for pin %d\n", gpio);
+		return;
+	}
+#endif
+	gpio -= lnw->chip.base;
+
+	if (lnw->type != TANGIER_GPIO) {
+		reg = gpio / 16;
+		bit = gpio % 16;
+
+		mem = gpio_reg(&lnw->chip, 0, GAFR);
+		spin_lock_irqsave(&lnw->lock, flags);
+		value = readl(mem + reg);
+		value &= ~(3 << (bit * 2));
+		value |= (alt & 3) << (bit * 2);
+		writel(value, mem + reg);
+		spin_unlock_irqrestore(&lnw->lock, flags);
+		dev_dbg(lnw->chip.dev, "ALT: writing 0x%x to %p\n",
+			value, mem + reg);
+	} else {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return;
+
+		if (!is_merr_i2c_flis(offset))
+			spin_lock_irqsave(&lnw->lock, flags);
+
+		value = get_flis_value(offset);
+		value &= ~7;
+		value |= (alt & 7);
+		set_flis_value(value, offset);
+
+		if (!is_merr_i2c_flis(offset))
+			spin_unlock_irqrestore(&lnw->lock, flags);
+	}
+}
+EXPORT_SYMBOL_GPL(lnw_gpio_set_alt);
+
+int gpio_get_alt(int gpio)
+{
+	struct lnw_gpio *lnw;
+	u32 __iomem *mem;
+	int reg;
+	int bit;
+	u32 value;
+	u32 offset;
+
+	 /* use this trick to get memio */
+	lnw = irq_get_chip_data(gpio_to_irq(gpio));
+	if (!lnw) {
+		pr_err("langwell_gpio: can not find pin %d\n", gpio);
+		return -1;
+	}
+	if (gpio < lnw->chip.base || gpio >= lnw->chip.base + lnw->chip.ngpio) {
+		dev_err(lnw->chip.dev,
+			"langwell_gpio: wrong pin %d to config alt\n", gpio);
+		return -1;
+	}
+#if 0
+	if (lnw->irq_base + gpio - lnw->chip.base != gpio_to_irq(gpio)) {
+		dev_err(lnw->chip.dev,
+			"langwell_gpio: wrong chip data for pin %d\n", gpio);
+		return -1;
+	}
+#endif
+	gpio -= lnw->chip.base;
+
+	if (lnw->type != TANGIER_GPIO) {
+		reg = gpio / 16;
+		bit = gpio % 16;
+
+		mem = gpio_reg(&lnw->chip, 0, GAFR);
+		value = readl(mem + reg);
+		value &= (3 << (bit * 2));
+		value >>= (bit * 2);
+	} else {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -EINVAL;
+
+		value = get_flis_value(offset) & 7;
+	}
+
+	return value;
+}
+EXPORT_SYMBOL_GPL(gpio_get_alt);
+
+static int lnw_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+				 unsigned debounce)
+{
+	struct lnw_gpio *lnw = to_lnw_priv(chip);
+	void __iomem *gfbr;
+	unsigned long flags;
+	u32 value;
+	enum GPIO_REG reg_type;
+
+	reg_type = (lnw->type == TANGIER_GPIO) ? GFBR_TNG : GFBR;
+	gfbr = gpio_reg(chip, offset, reg_type);
+
+	if (lnw->pdev)
+		pm_runtime_get(&lnw->pdev->dev);
+
+	spin_lock_irqsave(&lnw->lock, flags);
+	value = readl(gfbr);
+	if (debounce) {
+		/* debounce bypass disable */
+		value &= ~BIT(offset % 32);
+	} else {
+		/* debounce bypass enable */
+		value |= BIT(offset % 32);
+	}
+	writel(value, gfbr);
+	spin_unlock_irqrestore(&lnw->lock, flags);
+
+	if (lnw->pdev)
+		pm_runtime_put(&lnw->pdev->dev);
+
+	return 0;
+}
+
 static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
 				   enum GPIO_REG reg_type)
 {
@@ -99,9 +393,18 @@
 
 static int lnw_gpio_request(struct gpio_chip *chip, unsigned offset)
 {
-	void __iomem *gafr = gpio_reg_2bit(chip, offset, GAFR);
-	u32 value = readl(gafr);
-	int shift = (offset % 16) << 1, af = (value >> shift) & 3;
+	struct lnw_gpio *lnw = to_lnw_priv(chip);
+	u32 value;
+	void __iomem *gafr;
+	int shift, af;
+
+	if (lnw->type > CLOVERVIEW_GPIO_CORE)
+		return 0;
+
+	gafr = gpio_reg_2bit(chip, offset, GAFR);
+	value = readl(gafr);
+	shift = (offset % 16) << 1;
+	af = (value >> shift) & 3;
 
 	if (af) {
 		value &= ~(3 << shift);
@@ -117,10 +420,50 @@
 	return readl(gplr) & BIT(offset % 32);
 }
 
+#define PULLUP_ENABLE	(1 << 8)
+#define PULLDOWN_ENABLE	(1 << 9)
+#define PUPD_VAL_2K	(0 << 4)
+#define PUPD_VAL_20K	(1 << 4)
+#define PUPD_VAL_50K	(2 << 4)
+#define PUPD_VAL_910	(3 << 4)
+
+static int lnw_gpio_set_pull(struct gpio_chip *chip, unsigned gpio, int value)
+{
+	u32 flis_offset, flis_value;
+	struct lnw_gpio *lnw = to_lnw_priv(chip);
+	unsigned long flags;
+
+	if (lnw->type != TANGIER_GPIO)
+		return 0;
+
+	flis_offset = lnw->get_flis_offset(gpio);
+	if (WARN(flis_offset == -EINVAL, "invalid pin %d\n", gpio))
+		return -EINVAL;
+	if (is_merr_i2c_flis(flis_offset))
+		return 0;
+
+	spin_lock_irqsave(&lnw->lock, flags);
+	flis_value = get_flis_value(flis_offset);
+	if (value) {
+		flis_value |= PULLUP_ENABLE;
+		flis_value &= ~PULLDOWN_ENABLE;
+	} else {
+		flis_value |= PULLDOWN_ENABLE;
+		flis_value &= ~PULLUP_ENABLE;
+	}
+	flis_value |= PUPD_VAL_50K;
+	set_flis_value(flis_value, flis_offset);
+	spin_unlock_irqrestore(&lnw->lock, flags);
+
+	return 0;
+}
+
 static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
 	void __iomem *gpsr, *gpcr;
 
+	lnw_gpio_set_pull(chip, offset, value);
+
 	if (value) {
 		gpsr = gpio_reg(chip, offset, GPSR);
 		writel(BIT(offset % 32), gpsr);
@@ -188,8 +531,10 @@
 	u32 gpio = irqd_to_hwirq(d);
 	unsigned long flags;
 	u32 value;
+	int ret = 0;
 	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
 	void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
+	void __iomem *gpit, *gpip;
 
 	if (gpio >= lnw->chip.ngpio)
 		return -EINVAL;
@@ -197,47 +542,217 @@
 	if (lnw->pdev)
 		pm_runtime_get(&lnw->pdev->dev);
 
-	spin_lock_irqsave(&lnw->lock, flags);
-	if (type & IRQ_TYPE_EDGE_RISING)
-		value = readl(grer) | BIT(gpio % 32);
-	else
-		value = readl(grer) & (~BIT(gpio % 32));
-	writel(value, grer);
+	/* Chip that supports level interrupt has extra GPIT registers */
+	if (lnw->chip_irq_type & IRQ_TYPE_LEVEL) {
+		switch (lnw->type) {
+		case CLOVERVIEW_GPIO_AON:
+			gpit = gpio_reg(&lnw->chip, gpio, GPIT);
+			gpip = gpio_reg(&lnw->chip, gpio, GPIP);
+			break;
+		case TANGIER_GPIO:
+			gpit = gpio_reg(&lnw->chip, gpio, GITR);
+			gpip = gpio_reg(&lnw->chip, gpio, GLPR);
+			break;
+		default:
+			ret = -EINVAL;
+			goto out;
+		}
 
-	if (type & IRQ_TYPE_EDGE_FALLING)
-		value = readl(gfer) | BIT(gpio % 32);
-	else
-		value = readl(gfer) & (~BIT(gpio % 32));
-	writel(value, gfer);
-	spin_unlock_irqrestore(&lnw->lock, flags);
+		spin_lock_irqsave(&lnw->lock, flags);
+		if (type & IRQ_TYPE_LEVEL_MASK) {
+			/* To prevent glitches from triggering an unintended
+			 * level interrupt, configure GLPR register first
+			 * and then configure GITR.
+			 */
+			if (type & IRQ_TYPE_LEVEL_LOW)
+				value = readl(gpip) | BIT(gpio % 32);
+			else
+				value = readl(gpip) & (~BIT(gpio % 32));
+			writel(value, gpip);
 
+			value = readl(gpit) | BIT(gpio % 32);
+			writel(value, gpit);
+
+			__irq_set_handler_locked(d->irq, handle_level_irq);
+		} else if (type & IRQ_TYPE_EDGE_BOTH) {
+			value = readl(gpit) & (~BIT(gpio % 32));
+			writel(value, gpit);
+
+			if (type & IRQ_TYPE_EDGE_RISING)
+				value = readl(grer) | BIT(gpio % 32);
+			else
+				value = readl(grer) & (~BIT(gpio % 32));
+			writel(value, grer);
+
+			if (type & IRQ_TYPE_EDGE_FALLING)
+				value = readl(gfer) | BIT(gpio % 32);
+			else
+				value = readl(gfer) & (~BIT(gpio % 32));
+			writel(value, gfer);
+
+			__irq_set_handler_locked(d->irq, handle_edge_irq);
+		}
+		spin_unlock_irqrestore(&lnw->lock, flags);
+	} else {
+		if (type & IRQ_TYPE_LEVEL_MASK) {
+			ret = -EINVAL;
+		} else if (type & IRQ_TYPE_EDGE_BOTH) {
+			spin_lock_irqsave(&lnw->lock, flags);
+
+			if (type & IRQ_TYPE_EDGE_RISING)
+				value = readl(grer) | BIT(gpio % 32);
+			else
+				value = readl(grer) & (~BIT(gpio % 32));
+			writel(value, grer);
+
+			if (type & IRQ_TYPE_EDGE_FALLING)
+				value = readl(gfer) | BIT(gpio % 32);
+			else
+				value = readl(gfer) & (~BIT(gpio % 32));
+			writel(value, gfer);
+
+			spin_unlock_irqrestore(&lnw->lock, flags);
+		}
+	}
+
+out:
 	if (lnw->pdev)
 		pm_runtime_put(&lnw->pdev->dev);
 
+	return ret;
+}
+
+static int lnw_set_maskunmask(struct irq_data *d, enum GPIO_REG reg_type,
+				unsigned unmask)
+{
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = irqd_to_hwirq(d);
+	unsigned long flags;
+	u32 value;
+	void __iomem *gp_reg;
+
+	gp_reg = gpio_reg(&lnw->chip, gpio, reg_type);
+
+	spin_lock_irqsave(&lnw->lock, flags);
+
+	if (unmask) {
+		/* enable interrupt from GPIO input pin */
+		value = readl(gp_reg) | BIT(gpio % 32);
+	} else {
+		/* disable interrupt from GPIO input pin */
+		value = readl(gp_reg) & (~BIT(gpio % 32));
+	}
+
+	writel(value, gp_reg);
+
+	spin_unlock_irqrestore(&lnw->lock, flags);
+
 	return 0;
 }
 
 static void lnw_irq_unmask(struct irq_data *d)
 {
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = irqd_to_hwirq(d);
+	void __iomem *gpit;
+
+	if (gpio >= lnw->chip.ngpio)
+		return;
+
+	switch (lnw->type) {
+	case CLOVERVIEW_GPIO_AON:
+		gpit = gpio_reg(&lnw->chip, gpio, GPIT);
+
+		/* if it's level trigger, unmask GPIM */
+		if (readl(gpit) & BIT(gpio % 32))
+			lnw_set_maskunmask(d, GPIM, 1);
+
+		break;
+	case TANGIER_GPIO:
+		lnw_set_maskunmask(d, GIMR, 1);
+		break;
+	default:
+		break;
+	}
 }
 
 static void lnw_irq_mask(struct irq_data *d)
 {
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = irqd_to_hwirq(d);
+	void __iomem *gpit;
+
+	if (gpio >= lnw->chip.ngpio)
+		return;
+
+	switch (lnw->type) {
+	case CLOVERVIEW_GPIO_AON:
+		gpit = gpio_reg(&lnw->chip, gpio, GPIT);
+
+		/* if it's level trigger, mask GPIM */
+		if (readl(gpit) & BIT(gpio % 32))
+			lnw_set_maskunmask(d, GPIM, 0);
+
+		break;
+	case TANGIER_GPIO:
+		lnw_set_maskunmask(d, GIMR, 0);
+		break;
+	default:
+		break;
+	}
 }
 
+static int lwn_irq_set_wake(struct irq_data *d, unsigned on)
+{
+	return 0;
+}
+
+static void lnw_irq_ack(struct irq_data *d)
+{
+}
+
+static void lnw_irq_shutdown(struct irq_data *d)
+{
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = irqd_to_hwirq(d);
+	unsigned long flags;
+	u32 value;
+	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+	void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
+
+	spin_lock_irqsave(&lnw->lock, flags);
+	value = readl(grer) & (~BIT(gpio % 32));
+	writel(value, grer);
+	value = readl(gfer) & (~BIT(gpio % 32));
+	writel(value, gfer);
+	spin_unlock_irqrestore(&lnw->lock, flags);
+};
+
+
 static struct irq_chip lnw_irqchip = {
 	.name		= "LNW-GPIO",
+	.flags		= IRQCHIP_SET_TYPE_MASKED,
 	.irq_mask	= lnw_irq_mask,
 	.irq_unmask	= lnw_irq_unmask,
 	.irq_set_type	= lnw_irq_type,
+	.irq_set_wake	= lwn_irq_set_wake,
+	.irq_ack	= lnw_irq_ack,
+	.irq_shutdown	= lnw_irq_shutdown,
 };
 
 static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = {   /* pin number */
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb), .driver_data = 96 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), .driver_data = 96 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f),
+		.driver_data = LINCROFT_GPIO },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f),
+		.driver_data = PENWELL_GPIO_AON },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a),
+		.driver_data = PENWELL_GPIO_CORE },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08eb),
+		.driver_data = CLOVERVIEW_GPIO_AON },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
+		.driver_data = CLOVERVIEW_GPIO_CORE },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
+		.driver_data = TANGIER_GPIO },
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -245,28 +760,446 @@
 static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct lnw_gpio *lnw = irq_data_get_irq_handler_data(data);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
+	struct lnw_gpio *lnw;
+	struct gpio_debug *debug;
 	u32 base, gpio, mask;
 	unsigned long pending;
-	void __iomem *gedr;
+	void __iomem *gp_reg;
+	enum GPIO_REG reg_type;
+	struct irq_desc *lnw_irq_desc;
+	unsigned int lnw_irq;
+
+	lnw = irq_data_get_irq_handler_data(data);
+
+	debug = lnw->debug;
+
+	reg_type = (lnw->type == TANGIER_GPIO) ? GISR : GEDR;
 
 	/* check GPIO controller to check which pin triggered the interrupt */
 	for (base = 0; base < lnw->chip.ngpio; base += 32) {
-		gedr = gpio_reg(&lnw->chip, base, GEDR);
-		while ((pending = readl(gedr))) {
+		gp_reg = gpio_reg(&lnw->chip, base, reg_type);
+		while ((pending = (lnw->type != TANGIER_GPIO) ?
+			readl(gp_reg) :
+			(readl(gp_reg) &
+			readl(gpio_reg(&lnw->chip, base, GIMR))))) {
 			gpio = __ffs(pending);
+			DEFINE_DEBUG_IRQ_CONUNT_INCREASE(lnw->chip.base +
+				base + gpio);
+			/* Mask irq if not requested in kernel */
+			lnw_irq = irq_find_mapping(lnw->domain, base + gpio);
+			lnw_irq_desc = irq_to_desc(lnw_irq);
+			if (lnw_irq_desc && unlikely(!lnw_irq_desc->action)) {
+				lnw_irq_mask(&lnw_irq_desc->irq_data);
+				continue;
+			}
+
 			mask = BIT(gpio);
 			/* Clear before handling so we can't lose an edge */
-			writel(mask, gedr);
-			generic_handle_irq(irq_find_mapping(lnw->domain,
-							    base + gpio));
+			writel(mask, gp_reg);
+			generic_handle_irq(lnw_irq);
 		}
 	}
 
 	chip->irq_eoi(data);
 }
 
+static char conf_reg_msg[] =
+	"\nGPIO configuration register:\n"
+	"\t[ 2: 0]\tpinmux\n"
+	"\t[ 6: 4]\tpull strength\n"
+	"\t[ 8: 8]\tpullup enable\n"
+	"\t[ 9: 9]\tpulldown enable\n"
+	"\t[10:10]\tslew A, B setting\n"
+	"\t[12:12]\toverride input enable\n"
+	"\t[13:13]\toverride input enable enable\n"
+	"\t[14:14]\toverride output enable\n"
+	"\t[15:15]\toverride output enable enable\n"
+	"\t[16:16]\toverride input value\n"
+	"\t[17:17]\tenable input data override\n"
+	"\t[18:18]\toverride output value\n"
+	"\t[19:19]\tenable output data override\n"
+	"\t[21:21]\topen drain enable\n"
+	"\t[22:22]\tenable OVR_IOSTBY_VAL\n"
+	"\t[23:23]\tOVR_IOSTBY_VAL\n"
+	"\t[24:24]\tSBY_OUTDATAOV_EN\n"
+	"\t[25:25]\tSBY_INDATAOV_EN\n"
+	"\t[26:26]\tSBY_OVOUTEN_EN\n"
+	"\t[27:27]\tSBY_OVINEN_EN\n"
+	"\t[29:28]\tstandby pullmode\n"
+	"\t[30:30]\tstandby open drain mode\n";
+
+static char *pinvalue[] = {"low", "high"};
+static char *pindirection[] = {"in", "out"};
+static char *irqtype[] = {"irq_none", "edge_rising", "edge_falling",
+			"edge_both"};
+static char *pinmux[] = {"mode0", "mode1", "mode2", "mode3", "mode4", "mode5",
+			"mode6", "mode7"};
+static char *pullmode[] = {"nopull", "pullup", "pulldown"};
+static char *pullstrength[] = {"2k", "20k", "50k", "910ohms"};
+static char *enable[] = {"disable", "enable"};
+static char *override_direction[] = {"no-override", "override-enable",
+			"override-disable"};
+static char *override_value[] = {"no-override", "override-high",
+			"override-low"};
+static char *standby_trigger[] = {"no-override", "override-trigger",
+			"override-notrigger"};
+static char *standby_pupd_state[] = {"keep", "pulldown", "pullup", "nopull"};
+
+static int gpio_get_pinvalue(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 value;
+
+	value = lnw_gpio_get(&lnw->chip, gpio);
+
+	return value ? 1 : 0;
+}
+
+static int gpio_set_pinvalue(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num)
+{
+	struct lnw_gpio *lnw = private_data;
+
+	lnw_gpio_set(&lnw->chip, gpio, num);
+	return 0;
+}
+
+static int gpio_get_normal(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 __iomem *mem;
+	u32 value;
+
+	mem = gpio_reg(&lnw->chip, gpio, control->reg);
+
+	value = readl(mem);
+	value &= BIT(gpio % 32);
+
+	if (control->invert)
+		return value ? 0 : 1;
+	else
+		return value ? 1 : 0;
+}
+
+static int gpio_set_normal(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 __iomem *mem;
+	u32 value;
+	unsigned long flags;
+
+	mem = gpio_reg(&lnw->chip, gpio, control->reg);
+
+	spin_lock_irqsave(&lnw->lock, flags);
+	value = readl(mem);
+	value &= ~BIT(gpio % 32);
+	if (control->invert) {
+		if (num)
+			value &= ~BIT(gpio % 32);
+		else
+			value |= BIT(gpio % 32);
+	} else {
+		if (num)
+			value |= BIT(gpio % 32);
+		else
+			value &= ~BIT(gpio % 32);
+	}
+	writel(value, mem);
+	spin_unlock_irqrestore(&lnw->lock, flags);
+
+	return 0;
+}
+
+static int gpio_get_irqtype(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+	void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
+	u32 value;
+	int num;
+
+	value = readl(grer) & BIT(gpio % 32);
+	num = value ? 1 : 0;
+	value = readl(gfer) & BIT(gpio % 32);
+	if (num)
+		num = value ? 3 : 1;
+	else
+		num = value ? 2 : 0;
+
+	return num;
+}
+
+static int flis_get_normal(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 offset, value;
+	int num;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -1;
+
+		value = get_flis_value(offset);
+		num = (value >> control->shift) & control->mask;
+		if (num < control->num)
+			return num;
+	}
+
+	return -1;
+}
+
+static int flis_set_normal(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 shift = control->shift;
+	u32 mask = control->mask;
+	u32 offset, value;
+	unsigned long flags;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -1;
+
+		if (!is_merr_i2c_flis(offset))
+			spin_lock_irqsave(&lnw->lock, flags);
+		value = get_flis_value(offset);
+		value &= ~(mask << shift);
+		value |= ((num & mask) << shift);
+		set_flis_value(value, offset);
+		if (!is_merr_i2c_flis(offset))
+			spin_unlock_irqrestore(&lnw->lock, flags);
+		return 0;
+	}
+
+	return -1;
+}
+
+static int flis_get_override(struct gpio_control *control, void *private_data,
+		unsigned gpio)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 offset, value;
+	u32 val_bit, en_bit;
+	int num;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -1;
+
+		val_bit = 1 << control->shift;
+		en_bit = 1 << control->rshift;
+
+		value = get_flis_value(offset);
+
+		if (value & en_bit)
+			if (value & val_bit)
+				num = 1;
+			else
+				num = 2;
+		else
+			num = 0;
+
+		return num;
+	}
+
+	return -1;
+}
+
+static int flis_set_override(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num)
+{
+	struct lnw_gpio *lnw = private_data;
+	u32 offset, value;
+	u32 val_bit, en_bit;
+	unsigned long flags;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -1;
+
+		val_bit = 1 << control->shift;
+		en_bit = 1 << control->rshift;
+
+		if (!is_merr_i2c_flis(offset))
+			spin_lock_irqsave(&lnw->lock, flags);
+		value = get_flis_value(offset);
+		switch (num) {
+		case 0:
+			value &= ~(en_bit | val_bit);
+			break;
+		case 1:
+			value |= (en_bit | val_bit);
+			break;
+		case 2:
+			value |= en_bit;
+			value &= ~val_bit;
+			break;
+		default:
+			break;
+		}
+		set_flis_value(value, offset);
+		if (!is_merr_i2c_flis(offset))
+			spin_unlock_irqrestore(&lnw->lock, flags);
+
+		return 0;
+	}
+
+	return -1;
+}
+
+#define GPIO_VALUE_CONTROL(xtype, xinfo, xnum) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, \
+	.get = gpio_get_pinvalue, .set = gpio_set_pinvalue}
+#define GPIO_NORMAL_CONTROL(xtype, xinfo, xnum, xreg, xinvert) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, .reg = xreg, \
+	.invert = xinvert, .get = gpio_get_normal, .set = gpio_set_normal}
+#define GPIO_IRQTYPE_CONTROL(xtype, xinfo, xnum) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, \
+	.get = gpio_get_irqtype, .set = NULL}
+#define FLIS_NORMAL_CONTROL(xtype, xinfo, xnum, xshift, xmask) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, .shift = xshift, \
+	.mask = xmask, .get = flis_get_normal, .set = flis_set_normal}
+#define FLIS_OVERRIDE_CONTROL(xtype, xinfo, xnum, xshift, xrshift) \
+{	.type = xtype, .pininfo = xinfo, .num = xnum, .shift = xshift, \
+	.rshift = xrshift, .get = flis_get_override, .set = flis_set_override}
+
+static struct gpio_control lnw_gpio_controls[] = {
+GPIO_VALUE_CONTROL(TYPE_PIN_VALUE, pinvalue, 2),
+GPIO_NORMAL_CONTROL(TYPE_DIRECTION, pindirection, 2, GPDR, 0),
+GPIO_IRQTYPE_CONTROL(TYPE_IRQ_TYPE, irqtype, 4),
+GPIO_NORMAL_CONTROL(TYPE_DEBOUNCE, enable, 2, GFBR_TNG, 1),
+FLIS_NORMAL_CONTROL(TYPE_PINMUX, pinmux, 8, 0, 0x7),
+FLIS_NORMAL_CONTROL(TYPE_PULLSTRENGTH, pullstrength, 4, 4, 0x7),
+FLIS_NORMAL_CONTROL(TYPE_PULLMODE, pullmode, 3, 8, 0x3),
+FLIS_NORMAL_CONTROL(TYPE_OPEN_DRAIN, enable, 2, 21, 0x1),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_INDIR, override_direction, 3, 12, 13),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_OUTDIR, override_direction, 3, 14, 15),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_INVAL, override_value, 3, 16, 17),
+FLIS_OVERRIDE_CONTROL(TYPE_OVERRIDE_OUTVAL, override_value, 3, 18, 19),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_IO, standby_trigger, 3, 23, 22),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_OUTVAL, override_value, 3, 18, 24),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_INVAL, override_value, 3, 16, 25),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_OUTDIR, override_direction, 3, 14, 26),
+FLIS_OVERRIDE_CONTROL(TYPE_SBY_OVR_INDIR, override_direction, 3, 12, 27),
+FLIS_NORMAL_CONTROL(TYPE_SBY_PUPD_STATE, standby_pupd_state, 4, 28, 0x3),
+FLIS_NORMAL_CONTROL(TYPE_SBY_OD_DIS, enable, 2, 30, 0x1),
+};
+
+static unsigned int lnw_get_conf_reg(struct gpio_debug *debug, unsigned gpio)
+{
+	struct lnw_gpio *lnw = debug->private_data;
+	u32 offset, value = 0;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return -EINVAL;
+
+		value = get_flis_value(offset);
+	}
+
+	return value;
+}
+
+static void lnw_set_conf_reg(struct gpio_debug *debug, unsigned gpio,
+		unsigned int value)
+{
+	struct lnw_gpio *lnw = debug->private_data;
+	u32 offset;
+
+	if (lnw->type == TANGIER_GPIO) {
+		offset = lnw->get_flis_offset(gpio);
+		if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio))
+			return;
+
+		set_flis_value(value, offset);
+	}
+
+	return;
+}
+
+static char **lnw_get_avl_pininfo(struct gpio_debug *debug, unsigned gpio,
+		unsigned int type, unsigned *num)
+{
+	struct gpio_control *control;
+
+	control = find_gpio_control(lnw_gpio_controls,
+			ARRAY_SIZE(lnw_gpio_controls), type);
+	if (control == NULL)
+		return NULL;
+
+	*num = control->num;
+
+	return control->pininfo;
+}
+
+static char *lnw_get_cul_pininfo(struct gpio_debug *debug, unsigned gpio,
+		unsigned int type)
+{
+	struct lnw_gpio *lnw = debug->private_data;
+	struct gpio_control *control;
+	int num;
+
+	control = find_gpio_control(lnw_gpio_controls,
+			ARRAY_SIZE(lnw_gpio_controls), type);
+	if (control == NULL)
+		return NULL;
+
+	num = control->get(control, lnw, gpio);
+	if (num == -1)
+		return NULL;
+
+	return *(control->pininfo + num);
+}
+
+static void lnw_set_pininfo(struct gpio_debug *debug, unsigned gpio,
+		unsigned int type, const char *info)
+{
+	struct lnw_gpio *lnw = debug->private_data;
+	struct gpio_control *control;
+	int num;
+
+	control = find_gpio_control(lnw_gpio_controls,
+			ARRAY_SIZE(lnw_gpio_controls), type);
+	if (control == NULL)
+		return;
+
+	num = find_pininfo_num(control, info);
+	if (num == -1)
+		return;
+
+	if (control->set)
+		control->set(control, lnw, gpio, num);
+}
+
+static int lnw_get_register_msg(char **buf, unsigned long *size)
+{
+	*buf = conf_reg_msg;
+	*size = strlen(conf_reg_msg);
+
+	return 0;
+}
+
+static struct gpio_debug_ops lnw_gpio_debug_ops = {
+	.get_conf_reg = lnw_get_conf_reg,
+	.set_conf_reg = lnw_set_conf_reg,
+	.get_avl_pininfo = lnw_get_avl_pininfo,
+	.get_cul_pininfo = lnw_get_cul_pininfo,
+	.set_pininfo = lnw_set_pininfo,
+	.get_register_msg = lnw_get_register_msg,
+};
+
 static void lnw_irq_init_hw(struct lnw_gpio *lnw)
 {
 	void __iomem *reg;
@@ -303,6 +1236,16 @@
 	.xlate = irq_domain_xlate_twocell,
 };
 
+static int lnw_gpio_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int lnw_gpio_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
 static int lnw_gpio_runtime_idle(struct device *dev)
 {
 	int err = pm_schedule_suspend(dev, 500);
@@ -314,7 +1257,9 @@
 }
 
 static const struct dev_pm_ops lnw_gpio_pm_ops = {
-	SET_RUNTIME_PM_OPS(NULL, NULL, lnw_gpio_runtime_idle)
+	SET_RUNTIME_PM_OPS(lnw_gpio_runtime_suspend,
+			   lnw_gpio_runtime_resume,
+			   lnw_gpio_runtime_idle)
 };
 
 static int lnw_gpio_probe(struct pci_dev *pdev,
@@ -323,10 +1268,15 @@
 	void *base;
 	resource_size_t start, len;
 	struct lnw_gpio *lnw;
+	struct gpio_debug *debug;
 	u32 gpio_base;
 	u32 irq_base;
 	int retval;
-	int ngpio = id->driver_data;
+	struct lnw_gpio_ddata_t *ddata;
+	int pid;
+
+	pid = id->driver_data;
+	ddata = &lnw_gpio_ddata[pid];
 
 	retval = pci_enable_device(pdev);
 	if (retval)
@@ -367,20 +1317,29 @@
 		goto err_ioremap;
 	}
 
+	lnw->type = pid;
 	lnw->reg_base = base;
+	lnw->reg_gplr = lnw->reg_base + ddata->gplr_offset;
+	lnw->get_flis_offset = ddata->get_flis_offset;
+	lnw->chip_irq_type = ddata->chip_irq_type;
 	lnw->chip.label = dev_name(&pdev->dev);
 	lnw->chip.request = lnw_gpio_request;
 	lnw->chip.direction_input = lnw_gpio_direction_input;
 	lnw->chip.direction_output = lnw_gpio_direction_output;
+	lnw->chip.set_pinmux = lnw_gpio_set_alt;
+	lnw->chip.get_pinmux = gpio_get_alt;
 	lnw->chip.get = lnw_gpio_get;
 	lnw->chip.set = lnw_gpio_set;
 	lnw->chip.to_irq = lnw_gpio_to_irq;
 	lnw->chip.base = gpio_base;
-	lnw->chip.ngpio = ngpio;
+	lnw->chip.ngpio = ddata->ngpio;
 	lnw->chip.can_sleep = 0;
+	lnw->chip.set_debounce = lnw_gpio_set_debounce;
+	lnw->chip.dev = &pdev->dev;
 	lnw->pdev = pdev;
-
-	lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base,
+	spin_lock_init(&lnw->lock);
+	lnw->domain = irq_domain_add_simple(pdev->dev.of_node,
+					    lnw->chip.ngpio, irq_base,
 					    &lnw_gpio_irq_ops, lnw);
 	if (!lnw->domain) {
 		retval = -ENOMEM;
@@ -395,15 +1354,40 @@
 	}
 
 	lnw_irq_init_hw(lnw);
-
 	irq_set_handler_data(pdev->irq, lnw);
 	irq_set_chained_handler(pdev->irq, lnw_irq_handler);
 
-	spin_lock_init(&lnw->lock);
-
 	pm_runtime_put_noidle(&pdev->dev);
 	pm_runtime_allow(&pdev->dev);
 
+	/* add for gpiodebug */
+	debug = gpio_debug_alloc();
+	if (debug) {
+		__set_bit(TYPE_OVERRIDE_OUTDIR, debug->typebit);
+		__set_bit(TYPE_OVERRIDE_OUTVAL, debug->typebit);
+		__set_bit(TYPE_OVERRIDE_INDIR, debug->typebit);
+		__set_bit(TYPE_OVERRIDE_INVAL, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_IO, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_OUTVAL, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_INVAL, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_OUTDIR, debug->typebit);
+		__set_bit(TYPE_SBY_OVR_INDIR, debug->typebit);
+		__set_bit(TYPE_SBY_PUPD_STATE, debug->typebit);
+		__set_bit(TYPE_SBY_OD_DIS, debug->typebit);
+
+		debug->chip = &lnw->chip;
+		debug->ops = &lnw_gpio_debug_ops;
+		debug->private_data = lnw;
+		lnw->debug = debug;
+
+		retval = gpio_debug_register(debug);
+		if (retval) {
+			dev_err(&pdev->dev, "langwell gpio_debug_register failed %d\n",
+				retval);
+			gpio_debug_remove(debug);
+		}
+	}
+
 	return 0;
 
 err_ioremap:
@@ -506,4 +1490,4 @@
 	return ret;
 }
 
-device_initcall(lnw_gpio_init);
+fs_initcall(lnw_gpio_init);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index ac11e45..a57d090 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -29,6 +29,10 @@
 #define PCA953X_INVERT		2
 #define PCA953X_DIRECTION	3
 
+#define PCAL953X_IN_LATCH	34
+#define PCAL953X_INT_MASK	37
+#define PCAL953X_INT_STAT	38
+
 #define REG_ADDR_AI		0x80
 
 #define PCA957X_IN		0
@@ -44,30 +48,34 @@
 #define PCA_INT			0x0100
 #define PCA953X_TYPE		0x1000
 #define PCA957X_TYPE		0x2000
+#define PCAL953X_TYPE		0x4000
 
 static const struct i2c_device_id pca953x_id[] = {
-	{ "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
-	{ "pca9534", 8  | PCA953X_TYPE | PCA_INT, },
-	{ "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
-	{ "pca9536", 4  | PCA953X_TYPE, },
-	{ "pca9537", 4  | PCA953X_TYPE | PCA_INT, },
-	{ "pca9538", 8  | PCA953X_TYPE | PCA_INT, },
-	{ "pca9539", 16 | PCA953X_TYPE | PCA_INT, },
-	{ "pca9554", 8  | PCA953X_TYPE | PCA_INT, },
-	{ "pca9555", 16 | PCA953X_TYPE | PCA_INT, },
-	{ "pca9556", 8  | PCA953X_TYPE, },
-	{ "pca9557", 8  | PCA953X_TYPE, },
-	{ "pca9574", 8  | PCA957X_TYPE | PCA_INT, },
-	{ "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
+	{ "pca9505",   40 | PCA953X_TYPE | PCA_INT, },
+	{ "pca9534",   8  | PCA953X_TYPE | PCA_INT, },
+	{ "pca9535",   16 | PCA953X_TYPE | PCA_INT, },
+	{ "pca9536",   4  | PCA953X_TYPE, },
+	{ "pca9537",   4  | PCA953X_TYPE | PCA_INT, },
+	{ "pca9538",   8  | PCA953X_TYPE | PCA_INT, },
+	{ "pca9539",   16 | PCA953X_TYPE | PCA_INT, },
+	{ "pca9554",   8  | PCA953X_TYPE | PCA_INT, },
+	{ "pca9555",   16 | PCA953X_TYPE | PCA_INT, },
+	{ "pca9556",   8  | PCA953X_TYPE, },
+	{ "pca9557",   8  | PCA953X_TYPE, },
+	{ "pca9574",   8  | PCA957X_TYPE | PCA_INT, },
+	{ "pca9575",   16 | PCA957X_TYPE | PCA_INT, },
 
-	{ "max7310", 8  | PCA953X_TYPE, },
-	{ "max7312", 16 | PCA953X_TYPE | PCA_INT, },
-	{ "max7313", 16 | PCA953X_TYPE | PCA_INT, },
-	{ "max7315", 8  | PCA953X_TYPE | PCA_INT, },
-	{ "pca6107", 8  | PCA953X_TYPE | PCA_INT, },
-	{ "tca6408", 8  | PCA953X_TYPE | PCA_INT, },
-	{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
-	{ "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
+	{ "pcal9535a", 16 | PCAL953X_TYPE | PCA_INT, },
+	{ "pcal9555a", 16 | PCAL953X_TYPE | PCA_INT, },
+
+	{ "max7310",   8  | PCA953X_TYPE, },
+	{ "max7312",   16 | PCA953X_TYPE | PCA_INT, },
+	{ "max7313",   16 | PCA953X_TYPE | PCA_INT, },
+	{ "max7315",   8  | PCA953X_TYPE | PCA_INT, },
+	{ "pca6107",   8  | PCA953X_TYPE | PCA_INT, },
+	{ "tca6408",   8  | PCA953X_TYPE | PCA_INT, },
+	{ "tca6416",   16 | PCA953X_TYPE | PCA_INT, },
+	{ "tca6424",   24 | PCA953X_TYPE | PCA_INT, },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, pca953x_id);
@@ -148,6 +156,7 @@
 					NBANK(chip), val);
 	} else {
 		switch (chip->chip_type) {
+		case PCAL953X_TYPE:
 		case PCA953X_TYPE:
 			ret = i2c_smbus_write_word_data(chip->client,
 							reg << 1, (u16) *val);
@@ -210,6 +219,7 @@
 	reg_val = chip->reg_direction[off / BANK_SZ] | (1u << (off % BANK_SZ));
 
 	switch (chip->chip_type) {
+	case PCAL953X_TYPE:
 	case PCA953X_TYPE:
 		offset = PCA953X_DIRECTION;
 		break;
@@ -247,6 +257,7 @@
 			& ~(1u << (off % BANK_SZ));
 
 	switch (chip->chip_type) {
+	case PCAL953X_TYPE:
 	case PCA953X_TYPE:
 		offset = PCA953X_OUTPUT;
 		break;
@@ -263,6 +274,7 @@
 	/* then direction */
 	reg_val = chip->reg_direction[off / BANK_SZ] & ~(1u << (off % BANK_SZ));
 	switch (chip->chip_type) {
+	case PCAL953X_TYPE:
 	case PCA953X_TYPE:
 		offset = PCA953X_DIRECTION;
 		break;
@@ -291,6 +303,7 @@
 
 	mutex_lock(&chip->i2c_lock);
 	switch (chip->chip_type) {
+	case PCAL953X_TYPE:
 	case PCA953X_TYPE:
 		offset = PCA953X_INPUT;
 		break;
@@ -328,6 +341,7 @@
 			& ~(1u << (off % BANK_SZ));
 
 	switch (chip->chip_type) {
+	case PCAL953X_TYPE:
 	case PCA953X_TYPE:
 		offset = PCA953X_OUTPUT;
 		break;
@@ -411,6 +425,17 @@
 							level + (BANK_SZ * i));
 			new_irqs &= ~(1 << level);
 		}
+
+		if (chip->chip_type == PCAL953X_TYPE) {
+			/* Enable latch on interrupt-enabled inputs */
+			pca953x_write_single(chip, PCAL953X_IN_LATCH,
+						chip->irq_mask[i],
+						BANK_SZ * i);
+			/* Unmask enabled interrupts */
+			pca953x_write_single(chip, PCAL953X_INT_MASK,
+						~chip->irq_mask[i],
+						BANK_SZ * i);
+		}
 	}
 
 	mutex_unlock(&chip->irq_lock);
@@ -459,6 +484,7 @@
 	int ret, i, offset = 0;
 
 	switch (chip->chip_type) {
+	case PCAL953X_TYPE:
 	case PCA953X_TYPE:
 		offset = PCA953X_INPUT;
 		break;
@@ -496,14 +522,45 @@
 	return pendings;
 }
 
+static u32 pcal953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
+{
+	u8 cur_stat[MAX_BANK];
+	int ret, i = 0;
+	u8 pendings = 0;
+
+	/* Read the current interrupt status from the device */
+	ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, pending);
+	if (ret)
+		return 0;
+
+	/* Check latched inputs and clear interrupt status */
+	ret = pca953x_read_regs(chip, PCA953X_INPUT, cur_stat);
+	if (ret)
+		return 0;
+
+	/* Apply filter for rising/falling edge selection */
+	for (i = 0; i < NBANK(chip); i++) {
+		pending[i] &= (~cur_stat[i] & chip->irq_trig_fall[i]) |
+			(cur_stat[i] & chip->irq_trig_raise[i]);
+		pendings += pending[i];
+	}
+
+	return pendings;
+}
+
 static irqreturn_t pca953x_irq_handler(int irq, void *devid)
 {
 	struct pca953x_chip *chip = devid;
 	u8 pending[MAX_BANK];
 	u8 level;
-	int i;
+	int i, pendings;
 
-	if (!pca953x_irq_pending(chip, pending))
+	if (chip->chip_type == PCAL953X_TYPE)
+		pendings = pcal953x_irq_pending(chip, pending);
+	else
+		pendings = pca953x_irq_pending(chip, pending);
+
+	if (!pendings)
 		return IRQ_HANDLED;
 
 	for (i = 0; i < NBANK(chip); i++) {
@@ -549,25 +606,27 @@
 	if (irq_base != -1
 			&& (id->driver_data & PCA_INT)) {
 
-		switch (chip->chip_type) {
-		case PCA953X_TYPE:
-			offset = PCA953X_INPUT;
-			break;
-		case PCA957X_TYPE:
-			offset = PCA957X_IN;
-			break;
-		}
-		ret = pca953x_read_regs(chip, offset, chip->irq_stat);
-		if (ret)
-			return ret;
+		if (chip->chip_type != PCAL953X_TYPE) {
+			switch (chip->chip_type) {
+			case PCA953X_TYPE:
+				offset = PCA953X_INPUT;
+				break;
+			case PCA957X_TYPE:
+				offset = PCA957X_IN;
+				break;
+			}
+			ret = pca953x_read_regs(chip, offset, chip->irq_stat);
+			if (ret)
+				return ret;
 
-		/*
-		 * There is no way to know which GPIO line generated the
-		 * interrupt.  We have to rely on the previous read for
-		 * this purpose.
-		 */
-		for (i = 0; i < NBANK(chip); i++)
-			chip->irq_stat[i] &= chip->reg_direction[i];
+			/*
+			 * There is no way to know which GPIO line generated the
+			 * interrupt.  We have to rely on the previous read for
+			 * this purpose.
+			 */
+			for (i = 0; i < NBANK(chip); i++)
+				chip->irq_stat[i] &= chip->reg_direction[i];
+		}
 		mutex_init(&chip->irq_lock);
 
 		chip->domain = irq_domain_add_simple(client->dev.of_node,
@@ -748,7 +807,8 @@
 
 	chip->client = client;
 
-	chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
+	chip->chip_type = id->driver_data &
+		(PCAL953X_TYPE | PCA953X_TYPE | PCA957X_TYPE);
 
 	mutex_init(&chip->i2c_lock);
 
@@ -757,7 +817,7 @@
 	 */
 	pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
 
-	if (chip->chip_type == PCA953X_TYPE)
+	if (chip->chip_type & (PCA953X_TYPE | PCAL953X_TYPE))
 		ret = device_pca953x_init(chip, invert);
 	else
 		ret = device_pca957x_init(chip, invert);
@@ -824,6 +884,9 @@
 	{ .compatible = "nxp,pca9574", },
 	{ .compatible = "nxp,pca9575", },
 
+	{ .compatible = "nxp,pcal9535a", },
+	{ .compatible = "nxp,pcal9555a", },
+
 	{ .compatible = "maxim,max7310", },
 	{ .compatible = "maxim,max7312", },
 	{ .compatible = "maxim,max7313", },
diff --git a/drivers/gpio/gpiodebug.c b/drivers/gpio/gpiodebug.c
new file mode 100644
index 0000000..b83e1fc
--- /dev/null
+++ b/drivers/gpio/gpiodebug.c
@@ -0,0 +1,544 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include<linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include "gpiodebug.h"
+
+struct gpiodebug_data {
+	struct gpio_debug *debug;
+	int gpio;
+	unsigned int type;
+};
+
+enum {
+	REGISTER_FOPS = 0,
+	NORMAL_FOPS,
+	COUNT_FOPS,
+};
+
+static struct {
+	unsigned	fops_type;
+	unsigned	type;
+	char		*available_name;
+	char		*current_name;
+} global_array[] = {
+	{REGISTER_FOPS, TYPE_CONF_REG, "conf_reg", "conf_reg"},
+	{NORMAL_FOPS, TYPE_PIN_VALUE, "available_value",
+		"current_value"},
+	{NORMAL_FOPS, TYPE_DIRECTION, "available_direction",
+		"current_direction"},
+	{NORMAL_FOPS, TYPE_IRQ_TYPE, "available_irqtype",
+		"current_irqtype"},
+	{NORMAL_FOPS, TYPE_PINMUX, "available_pinmux",
+		"current_pinmux"},
+	{NORMAL_FOPS, TYPE_PULLMODE, "available_pullmode",
+		"current_pullmode"},
+	{NORMAL_FOPS, TYPE_PULLSTRENGTH, "available_pullstrength",
+		"current_pullstrength"},
+	{NORMAL_FOPS, TYPE_OPEN_DRAIN, "available_opendrain",
+		"current_opendrain"},
+	{COUNT_FOPS, TYPE_IRQ_COUNT, "irq_count", "irq_count"},
+	{NORMAL_FOPS, TYPE_WAKEUP, "available_wakeup", "current_wakeup"},
+	{COUNT_FOPS, TYPE_WAKEUP_COUNT, "wakeup_count", "wakeup_count"},
+	{NORMAL_FOPS, TYPE_DEBOUNCE, "available_debounce",
+		"current_debounce"},
+	{NORMAL_FOPS, TYPE_OVERRIDE_OUTDIR, "available_override_outdir",
+		"current_override_outdir"},
+	{NORMAL_FOPS, TYPE_OVERRIDE_OUTVAL, "available_override_outval",
+		"current_override_outval"},
+	{NORMAL_FOPS, TYPE_OVERRIDE_INDIR, "available_override_indir",
+		"current_override_indir"},
+	{NORMAL_FOPS, TYPE_OVERRIDE_INVAL, "available_override_inval",
+		"current_override_inval"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_IO, "available_standby_trigger",
+		"current_standby_trigger"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_OUTVAL, "available_standby_outval",
+		"current_standby_outval"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_INVAL, "available_standby_inval",
+		"current_standby_inval"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_OUTDIR, "available_standby_outdir",
+		"current_standby_outdir"},
+	{NORMAL_FOPS, TYPE_SBY_OVR_INDIR, "available_standby_indir",
+		"current_standby_indir"},
+	{NORMAL_FOPS, TYPE_SBY_PUPD_STATE, "available_standby_pullmode",
+		"current_standby_pullmode"},
+	{NORMAL_FOPS, TYPE_SBY_OD_DIS, "available_standby_opendrain",
+		"current_standby_opendrain"},
+
+};
+
+static struct dentry *gpio_root[ARCH_NR_GPIOS];
+static struct gpiodebug_data global_data[ARCH_NR_GPIOS][TYPE_MAX];
+
+static struct dentry *gpiodebug_debugfs_root;
+
+struct gpio_control *find_gpio_control(struct gpio_control *control, int num,
+			unsigned type)
+{
+	int i;
+
+	for (i = 0; i < num; i++) {
+		if ((control+i)->type == type)
+			break;
+	}
+
+	if (i < num)
+		return control+i;
+
+	return NULL;
+}
+
+int find_pininfo_num(struct gpio_control *control, const char *info)
+{
+	int num = 0;
+
+	while (num < control->num) {
+		if (!strcmp(*(control->pininfo+num), info))
+			break;
+		num++;
+	}
+
+	if (num < control->num)
+		return num;
+
+	return -1;
+}
+
+static struct dentry *gpiodebug_create_file(const char *name,
+			umode_t mode, struct dentry *parent,
+			void *data, const struct file_operations *fops)
+{
+	struct dentry *ret;
+
+	ret = debugfs_create_file(name, mode, parent, data, fops);
+	if (!ret)
+		pr_warn("Could not create debugfs '%s' entry\n", name);
+
+	return ret;
+}
+
+static int gpiodebug_open_file(struct inode *inode, struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static const char readme_msg[] =
+	"\n GPIO Debug Tool-HOWTO (Example):\n\n"
+	"# mount -t debugfs nodev /sys/kernel/debug\n\n"
+	"# cat /sys/kernel/debug/gpio_debug/gpio0/available_pullmode\n"
+	"nopull	pullup	pulldown\n\n"
+	"# cat /sys/kernel/debug/gpio_debug/gpio0/current_pullmode\n"
+	"nopull\n"
+	"# echo pullup > /sys/kernel/debug/gpio_debug/gpio0/current_pullmode\n"
+	"# cat /sys/kernel/debug/gpio_debug/gpio0/current_pullmode\n"
+	"pullup\n\n"
+	"# cat conf_reg\n"
+	"0x00003120\n"
+	"# echo 0x00003121 > conf_reg\n"
+	"0x00003121\n\n"
+	"# cat irq_count\n"
+	"1\n";
+
+/* gpio_readme_fops */
+static ssize_t show_gpio_readme(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, readme_msg,
+		strlen(readme_msg));
+
+	return ret;
+}
+
+static const struct file_operations gpio_readme_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= show_gpio_readme,
+	.llseek		= generic_file_llseek,
+};
+
+/* gpio_reginfo_fops */
+static ssize_t show_gpio_reginfo(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpio_debug *debug = filp->private_data;
+	unsigned long size;
+	char *buf;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	if (debug->ops->get_register_msg) {
+		debug->ops->get_register_msg(&buf, &size);
+		ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, size);
+	}
+
+	return ret;
+}
+
+static const struct file_operations gpio_reginfo_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= show_gpio_reginfo,
+	.llseek		= generic_file_llseek,
+};
+
+
+/* gpio_conf_fops */
+static ssize_t gpio_conf_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	int gpio = data->gpio;
+	char *buf;
+	unsigned int value = 0;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (debug->ops->get_conf_reg)
+		value = debug->ops->get_conf_reg(debug, gpio);
+
+	if (value == -EINVAL)
+		ret = sprintf(buf, "Invalid pin\n");
+	else
+		ret = sprintf(buf, "0x%08x\n", value);
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t gpio_conf_write(struct file *filp, const char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	int i, gpio = data->gpio, err;
+	char *buf, *start;
+	unsigned long int value;
+
+	ret = cnt;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, ubuf, cnt))
+		return -EFAULT;
+
+	start = buf;
+
+	while (*start == ' ')
+		start++;
+
+	/* strip ending whitespace. */
+	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+		buf[i] = 0;
+
+	err = kstrtoul(start, 16, &value);
+	if (err) {
+		pr_warn("kstrtoul() failed with errno: %d\n", err);
+		return err;
+	}
+
+	if (debug->ops->set_conf_reg)
+		debug->ops->set_conf_reg(debug, gpio, value);
+
+	*ppos += ret;
+
+	return ret;
+}
+
+static const struct file_operations gpio_conf_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= gpio_conf_read,
+	.write		= gpio_conf_write,
+	.llseek		= generic_file_llseek,
+};
+
+/* show_gpiodebug_fops */
+static ssize_t gpiodebug_show_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	unsigned int type = data->type;
+	int i, num = 0;
+	int gpio = data->gpio;
+	char *buf, **avl_buf = NULL;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* debug->ops->get_avl_info */
+	if (debug->ops->get_avl_pininfo) {
+		avl_buf = debug->ops->get_avl_pininfo(debug, gpio, type, &num);
+
+		for (i = 0; i < num; i++)
+			sprintf(buf, "%s%s\t", buf, *(avl_buf+i));
+	}
+
+	ret = sprintf(buf, "%s\n", buf);
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static const struct file_operations show_gpiodebug_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= gpiodebug_show_read,
+	.llseek		= generic_file_llseek,
+};
+
+/* set_gpiodebug_fops */
+static ssize_t gpiodebug_set_gpio_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	unsigned int type = data->type;
+	int gpio = data->gpio;
+	char *buf, *cur_info = NULL;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (debug->ops->get_cul_pininfo)
+		cur_info = debug->ops->get_cul_pininfo(debug, gpio, type);
+
+	if (cur_info)
+		ret = sprintf(buf, "%s\n", cur_info);
+	else
+		ret = sprintf(buf, "\n");
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t gpiodebug_set_gpio_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	unsigned int type = data->type;
+	int i, gpio = data->gpio;
+	char *buf;
+
+	ret = cnt;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, ubuf, cnt))
+		return -EFAULT;
+
+	/* strip ending whitespace. */
+	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+		buf[i] = 0;
+
+	if (debug->ops->set_pininfo)
+		debug->ops->set_pininfo(debug, gpio, type, buf);
+
+	*ppos += ret;
+
+	return ret;
+}
+
+static const struct file_operations set_gpiodebug_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= gpiodebug_set_gpio_read,
+	.write		= gpiodebug_set_gpio_write,
+	.llseek		= generic_file_llseek,
+};
+
+/* show_count_fops */
+static ssize_t show_count_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	ssize_t ret = 0;
+	struct gpiodebug_data *data = filp->private_data;
+	struct gpio_debug *debug = data->debug;
+	unsigned int type = data->type;
+	unsigned long count = 0;
+	int gpio = data->gpio;
+	char *buf;
+
+	if (*ppos < 0 || !cnt)
+		return -EINVAL;
+
+	buf = kzalloc(cnt, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (type == TYPE_IRQ_COUNT)
+		count = debug->irq_count[gpio];
+	else if (type == TYPE_WAKEUP_COUNT)
+		count = debug->wakeup_count[gpio];
+
+	ret = sprintf(buf, "%ld\n", count);
+
+	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static const struct file_operations show_count_fops = {
+	.open		= gpiodebug_open_file,
+	.read		= show_count_read,
+	.llseek		= generic_file_llseek,
+};
+
+/******************************************************************************/
+struct gpio_debug *gpio_debug_alloc(void)
+{
+	struct gpio_debug *debug;
+
+	debug = kzalloc(sizeof(struct gpio_debug), GFP_KERNEL);
+	if (debug) {
+		__set_bit(TYPE_CONF_REG, debug->typebit);
+		__set_bit(TYPE_PIN_VALUE, debug->typebit);
+		__set_bit(TYPE_DIRECTION, debug->typebit);
+		__set_bit(TYPE_IRQ_TYPE, debug->typebit);
+		__set_bit(TYPE_PINMUX, debug->typebit);
+		__set_bit(TYPE_PULLMODE, debug->typebit);
+		__set_bit(TYPE_PULLSTRENGTH, debug->typebit);
+		__set_bit(TYPE_OPEN_DRAIN, debug->typebit);
+		__set_bit(TYPE_IRQ_COUNT, debug->typebit);
+		__set_bit(TYPE_DEBOUNCE, debug->typebit);
+	}
+
+	return debug;
+}
+
+void gpio_debug_remove(struct gpio_debug *debug)
+{
+	struct gpio_chip *chip = debug->chip;
+	int base = chip->base;
+	unsigned ngpio = chip->ngpio;
+	int i;
+
+	for (i = base; i < base+ngpio; i++)
+		debugfs_remove_recursive(gpio_root[i]);
+
+	kfree(debug);
+}
+
+int gpio_debug_register(struct gpio_debug *debug)
+{
+	struct gpio_chip *chip = debug->chip;
+	int base = chip->base;
+	unsigned ngpio = chip->ngpio;
+	int i, j;
+	char gpioname[32];
+
+	for (i = base; i < base+ngpio; i++) {
+		sprintf(gpioname, "gpio%d", i);
+		gpio_root[i] = debugfs_create_dir(gpioname,
+				gpiodebug_debugfs_root);
+		if (!gpio_root[i]) {
+			pr_warn("gpiodebug: Failed to create debugfs directory\n");
+			return -ENOMEM;
+		}
+
+		/* register info */
+		gpiodebug_create_file("register_info", 0444, gpio_root[i],
+			debug, &gpio_reginfo_fops);
+
+		for (j = 0; j < ARRAY_SIZE(global_array); j++) {
+			if (test_bit(global_array[j].type, debug->typebit)) {
+				global_data[i][j].gpio = i;
+				global_data[i][j].debug = debug;
+				global_data[i][j].type = global_array[j].type;
+
+				switch (global_array[j].fops_type) {
+				case REGISTER_FOPS:
+					gpiodebug_create_file(
+					  global_array[j].current_name, 0644,
+					  gpio_root[i], &global_data[i][j],
+					  &gpio_conf_fops);
+					break;
+				case NORMAL_FOPS:
+					gpiodebug_create_file(
+					  global_array[j].available_name, 0444,
+					  gpio_root[i], &global_data[i][j],
+					  &show_gpiodebug_fops);
+
+					gpiodebug_create_file(
+					  global_array[j].current_name, 0644,
+					  gpio_root[i], &global_data[i][j],
+					  &set_gpiodebug_fops);
+					break;
+				case COUNT_FOPS:
+					gpiodebug_create_file(
+					  global_array[j].current_name, 0444,
+					  gpio_root[i], &global_data[i][j],
+					  &show_count_fops);
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int __init gpio_debug_init(void)
+{
+	gpiodebug_debugfs_root = debugfs_create_dir("gpio_debug", NULL);
+	if (IS_ERR(gpiodebug_debugfs_root) || !gpiodebug_debugfs_root) {
+		pr_warn("gpiodebug: Failed to create debugfs directory\n");
+		gpiodebug_debugfs_root = NULL;
+	}
+
+	/* readme */
+	gpiodebug_create_file("readme", 0444, gpiodebug_debugfs_root,
+		NULL, &gpio_readme_fops);
+
+	return 0;
+}
+
+subsys_initcall(gpio_debug_init);
diff --git a/drivers/gpio/gpiodebug.h b/drivers/gpio/gpiodebug.h
new file mode 100644
index 0000000..f1d58a5
--- /dev/null
+++ b/drivers/gpio/gpiodebug.h
@@ -0,0 +1,108 @@
+#ifndef __GPIO_DEBUG_H_
+#define __GPIO_DEBUG_H_
+
+#include <linux/gpio.h>
+
+struct gpio_debug;
+
+#define TYPE_CONF_REG			0x00
+#define TYPE_PIN_VALUE			0x01
+#define TYPE_DIRECTION			0x02
+#define TYPE_IRQ_TYPE			0x03
+#define TYPE_PINMUX			0x04
+#define TYPE_PULLMODE			0x05
+#define TYPE_PULLSTRENGTH		0x06
+#define TYPE_OPEN_DRAIN			0x07
+
+#define TYPE_IRQ_COUNT			0x08
+#define TYPE_WAKEUP			0x09
+#define TYPE_WAKEUP_COUNT		0x0A
+#define TYPE_OVERRIDE_OUTDIR		0x0B
+#define TYPE_OVERRIDE_OUTVAL		0x0C
+#define TYPE_OVERRIDE_INDIR		0x0D
+#define TYPE_OVERRIDE_INVAL		0x0E
+#define TYPE_DEBOUNCE			0x0F
+
+#define TYPE_SBY_OVR_IO			0x10
+#define TYPE_SBY_OVR_OUTVAL		0x11
+#define TYPE_SBY_OVR_INVAL		0x12
+#define TYPE_SBY_OVR_OUTDIR		0x13
+#define TYPE_SBY_OVR_INDIR		0x14
+#define TYPE_SBY_PUPD_STATE		0x15
+#define TYPE_SBY_OD_DIS			0x16
+#define TYPE_MAX			0x17
+
+struct gpio_control {
+	unsigned type, num;
+	char	 **pininfo;
+	u32	reg, invert;
+	u32 shift, rshift;
+	u32	mask;
+	int (*get)(struct gpio_control *control, void *private_data,
+		unsigned gpio);
+	int (*set)(struct gpio_control *control, void *private_data,
+		unsigned gpio, unsigned int num);
+};
+
+struct gpio_debug_ops {
+	unsigned int (*get_conf_reg)(struct gpio_debug *debug, unsigned gpio);
+	void	(*set_conf_reg)(struct gpio_debug *debug, unsigned gpio,
+			unsigned int value);
+	char	**(*get_avl_pininfo)(struct gpio_debug *debug, unsigned gpio,
+			unsigned int type, unsigned *num);
+	char	*(*get_cul_pininfo)(struct gpio_debug *debug, unsigned gpio,
+			unsigned int type);
+	void	(*set_pininfo)(struct gpio_debug *debug, unsigned gpio,
+			unsigned int type, const char *info);
+	int	(*get_register_msg)(char **buf, unsigned long *size);
+};
+
+struct gpio_debug {
+	unsigned long		typebit[BITS_TO_LONGS(TYPE_MAX)];
+	struct gpio_chip	*chip;
+	struct gpio_debug_ops	*ops;
+	unsigned long		irq_count[ARCH_NR_GPIOS];
+	unsigned long		wakeup_count[ARCH_NR_GPIOS];
+	void			*private_data;
+};
+
+#ifdef CONFIG_GPIODEBUG
+
+#define DEFINE_DEBUG_IRQ_CONUNT_INCREASE(gpio) (debug->irq_count[gpio]++)
+
+struct gpio_control *find_gpio_control(struct gpio_control *control, int num,
+			unsigned type);
+int find_pininfo_num(struct gpio_control *control, const char *info);
+
+struct gpio_debug *gpio_debug_alloc(void);
+void gpio_debug_remove(struct gpio_debug *debug);
+int gpio_debug_register(struct gpio_debug *debug);
+#else
+
+#define DEFINE_DEBUG_IRQ_CONUNT_INCREASE(gpio)
+
+static inline struct gpio_control *find_gpio_control(
+			struct gpio_control *control, int num, unsigned type)
+{
+	return NULL;
+}
+static inline int find_pininfo_num(struct gpio_control *control,
+			const char *info)
+{
+	return 0;
+}
+static inline struct gpio_debug *gpio_debug_alloc(void)
+{
+	return NULL;
+}
+
+static inline void gpio_debug_remove(struct gpio_debug *debug)
+{
+	return NULL;
+}
+static inline int gpio_debug_register(struct gpio_debug *debug)
+{
+	return 0;
+}
+#endif
+#endif
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index c3768fa..0b7ecd5 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -136,7 +136,7 @@
  */
 static int desc_to_gpio(const struct gpio_desc *desc)
 {
-	return desc->chip->base + gpio_chip_hwgpio(desc);
+	return desc - &gpio_desc[0];
 }
 
 
@@ -244,6 +244,9 @@
 
 /*
  * /sys/class/gpio/gpioN... only for GPIOs that are exported
+ *   /pinmux
+ *      * configures GPIO or alternate function
+ *      * r/w as zero (normal GPIO) or alternate function number
  *   /direction
  *      * MAY BE OMITTED if kernel won't allow direction changes
  *      * is read/write as "in" or "out"
@@ -262,6 +265,54 @@
  *      * also affects existing and subsequent "falling" and "rising"
  *        /edge configuration
  */
+static ssize_t gpio_pinmux_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	const struct gpio_desc	*desc = dev_get_drvdata(dev);
+	unsigned		gpio = desc - gpio_desc;
+	struct gpio_chip	*chip;
+	ssize_t			status = -EINVAL;
+
+	mutex_lock(&sysfs_lock);
+
+	chip = desc->chip;
+
+	if (!test_bit(FLAG_EXPORT, &desc->flags))
+		status = -EIO;
+	else if (chip->get_pinmux != NULL)
+		status = sprintf(buf, "%d\n", chip->get_pinmux(gpio));
+
+	mutex_unlock(&sysfs_lock);
+	return status;
+}
+
+static ssize_t gpio_pinmux_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	const struct gpio_desc	*desc = dev_get_drvdata(dev);
+	unsigned		gpio = desc - gpio_desc;
+	ssize_t			status = -EINVAL;
+	struct gpio_chip	*chip;
+	long	mux;
+
+	mutex_lock(&sysfs_lock);
+
+	chip = desc->chip;
+
+	if (!test_bit(FLAG_EXPORT, &desc->flags))
+		status = -EIO;
+	else if (chip->set_pinmux != NULL) {
+		status = kstrtol(buf, 0, &mux);
+		if (status == 0)
+			chip->set_pinmux(gpio, mux);
+	}
+
+	mutex_unlock(&sysfs_lock);
+	return status ? : size;
+}
+
+static DEVICE_ATTR(pinmux, 0644,
+		gpio_pinmux_show, gpio_pinmux_store);
 
 static ssize_t gpio_direction_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
@@ -586,6 +637,7 @@
 static struct attribute *gpio_attrs[] = {
 	&dev_attr_value.attr,
 	&dev_attr_active_low.attr,
+	&dev_attr_pinmux.attr,
 	NULL,
 };
 
@@ -1242,15 +1294,14 @@
 		}
 	}
 
+	spin_unlock_irqrestore(&gpio_lock, flags);
+
 #ifdef CONFIG_PINCTRL
 	INIT_LIST_HEAD(&chip->pin_ranges);
 #endif
 
 	of_gpiochip_add(chip);
 
-unlock:
-	spin_unlock_irqrestore(&gpio_lock, flags);
-
 	if (status)
 		goto fail;
 
@@ -1263,6 +1314,9 @@
 		chip->label ? : "generic");
 
 	return 0;
+
+unlock:
+	spin_unlock_irqrestore(&gpio_lock, flags);
 fail:
 	/* failures here can mean systems won't boot... */
 	pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 36eb074..61cea74 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -39,7 +39,7 @@
 #include "psb_intel_reg.h"
 #include "mdfld_output.h"
 
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 
 #define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 08747fd..7a9ce00 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -26,7 +26,7 @@
 #include "psb_drv.h"
 #include "psb_reg.h"
 #include "psb_intel_reg.h"
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 #include <asm/intel_scu_ipc.h>
 #include "mid_bios.h"
 #include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 325013a..d011e91 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -22,7 +22,7 @@
 
 #include <linux/i2c.h>
 #include <drm/drmP.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 
 #include "intel_bios.h"
 #include "psb_drv.h"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c1a8cf2..75de860 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1215,6 +1215,12 @@
 }
 EXPORT_SYMBOL_GPL(hid_output_report);
 
+static int hid_report_len(struct hid_report *report)
+{
+	/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+	return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+}
+
 /*
  * Allocator for buffer that is going to be passed to hid_output_report()
  */
@@ -1225,7 +1231,7 @@
 	 * of implement() working on 8 byte chunks
 	 */
 
-	int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+	int len = hid_report_len(report) + 7;
 
 	return kmalloc(len, flags);
 }
@@ -1281,6 +1287,41 @@
 	return report;
 }
 
+/*
+ * Implement a generic .request() callback, using .raw_request()
+ * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
+ */
+void __hid_request(struct hid_device *hid, struct hid_report *report,
+		int reqtype)
+{
+	char *buf;
+	int ret;
+	int len;
+
+	buf = hid_alloc_report_buf(report, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	len = hid_report_len(report);
+
+	if (reqtype == HID_REQ_SET_REPORT)
+		hid_output_report(report, buf);
+
+	ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
+					  report->type, reqtype);
+	if (ret < 0) {
+		dbg_hid("unable to complete request: %d\n", ret);
+		goto out;
+	}
+
+	if (reqtype == HID_REQ_GET_REPORT)
+		hid_input_report(hid, report->type, buf, ret, 0);
+
+out:
+	kfree(buf);
+}
+EXPORT_SYMBOL_GPL(__hid_request);
+
 int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
 		int interrupt)
 {
@@ -2378,6 +2419,14 @@
 		return -ENODEV;
 
 	/*
+	 * Check for the mandatory transport channel.
+	 */
+	 if (!hdev->ll_driver->raw_request) {
+		hid_err(hdev, "transport driver missing .raw_request()\n");
+		return -EINVAL;
+	 }
+
+	/*
 	 * Read the device report descriptor once and use as template
 	 * for the driver-specific modifications.
 	 */
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 8453214..941ab3c 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -768,6 +768,8 @@
 	[KEY_ALTERASE] = "AlternateErase",	[KEY_CANCEL] = "Cancel",
 	[KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
 	[KEY_MEDIA] = "Media",			[KEY_UNKNOWN] = "Unknown",
+	[BTN_DPAD_UP] = "BtnDPadUp",		[BTN_DPAD_DOWN] = "BtnDPadDown",
+	[BTN_DPAD_LEFT] = "BtnDPadLeft",	[BTN_DPAD_RIGHT] = "BtnDPadRight",
 	[BTN_0] = "Btn0",			[BTN_1] = "Btn1",
 	[BTN_2] = "Btn2",			[BTN_3] = "Btn3",
 	[BTN_4] = "Btn4",			[BTN_5] = "Btn5",
@@ -797,7 +799,8 @@
 	[BTN_TOOL_MOUSE] = "ToolMouse",		[BTN_TOOL_LENS] = "ToolLens",
 	[BTN_TOUCH] = "Touch",			[BTN_STYLUS] = "Stylus",
 	[BTN_STYLUS2] = "Stylus2",		[BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
-	[BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
+	[BTN_TOOL_TRIPLETAP] = "ToolTripleTap",	[BTN_TOOL_QUADTAP] = "ToolQuadrupleTap",
+	[BTN_GEAR_DOWN] = "WheelBtn",
 	[BTN_GEAR_UP] = "Gear up",		[KEY_OK] = "Ok",
 	[KEY_SELECT] = "Select",		[KEY_GOTO] = "Goto",
 	[KEY_CLEAR] = "Clear",			[KEY_POWER2] = "Power2",
@@ -852,6 +855,16 @@
 	[KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
 	[KEY_KBDILLUMUP] = "KbdIlluminationUp",
 	[KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
+	[KEY_BUTTONCONFIG] = "ButtonConfig",
+	[KEY_TASKMANAGER] = "TaskManager",
+	[KEY_JOURNAL] = "Journal",
+	[KEY_CONTROLPANEL] = "ControlPanel",
+	[KEY_APPSELECT] = "AppSelect",
+	[KEY_SCREENSAVER] = "ScreenSaver",
+	[KEY_VOICECOMMAND] = "VoiceCommand",
+	[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
+	[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
+	[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
 };
 
 static const char *relatives[REL_MAX + 1] = {
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index aa3fec0..20b28de 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -457,12 +457,22 @@
 {
 }
 
+static int mousevsc_hid_raw_request(struct hid_device *hid,
+				    unsigned char report_num,
+				    __u8 *buf, size_t len,
+				    unsigned char rtype,
+				    int reqtype)
+{
+	return 0;
+}
+
 static struct hid_ll_driver mousevsc_ll_driver = {
 	.parse = mousevsc_hid_parse,
 	.open = mousevsc_hid_open,
 	.close = mousevsc_hid_close,
 	.start = mousevsc_hid_start,
 	.stop = mousevsc_hid_stop,
+	.raw_request = mousevsc_hid_raw_request,
 };
 
 static struct hid_driver mousevsc_hid_driver;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 45c593d..992b310 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -750,6 +750,8 @@
 #define USB_DEVICE_ID_SONY_PS3_BDREMOTE		0x0306
 #define USB_DEVICE_ID_SONY_PS3_CONTROLLER	0x0268
 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER	0x042f
+#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER		0x0002
+#define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER	0x1000
 
 #define USB_VENDOR_ID_SOUNDGRAPH	0x15c2
 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST	0x0034
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index eb5700e..0fc2e67 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -358,9 +358,9 @@
 			ret = -ENOMEM;
 			break;
 		}
-		ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
-					      buf, 2,
-					      dev->battery_report_type);
+		ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2,
+					 dev->battery_report_type,
+					 HID_REQ_GET_REPORT);
 
 		if (ret != 2) {
 			if (ret >= 0)
@@ -729,6 +729,13 @@
 		case 0x06c: map_key_clear(KEY_YELLOW);		break;
 		case 0x06d: map_key_clear(KEY_ZOOM);		break;
 
+		case 0x06f: map_key_clear(KEY_BRIGHTNESSUP);		break;
+		case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN);		break;
+		case 0x072: map_key_clear(KEY_BRIGHTNESS_TOGGLE);	break;
+		case 0x073: map_key_clear(KEY_BRIGHTNESS_MIN);		break;
+		case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX);		break;
+		case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO);		break;
+
 		case 0x082: map_key_clear(KEY_VIDEO_NEXT);	break;
 		case 0x083: map_key_clear(KEY_LAST);		break;
 		case 0x084: map_key_clear(KEY_ENTER);		break;
@@ -769,6 +776,7 @@
 		case 0x0bf: map_key_clear(KEY_SLOW);		break;
 
 		case 0x0cd: map_key_clear(KEY_PLAYPAUSE);	break;
+		case 0x0cf: map_key_clear(KEY_VOICECOMMAND);	break;
 		case 0x0e0: map_abs_clear(ABS_VOLUME);		break;
 		case 0x0e2: map_key_clear(KEY_MUTE);		break;
 		case 0x0e5: map_key_clear(KEY_BASSBOOST);	break;
@@ -776,6 +784,7 @@
 		case 0x0ea: map_key_clear(KEY_VOLUMEDOWN);	break;
 		case 0x0f5: map_key_clear(KEY_SLOW);		break;
 
+		case 0x181: map_key_clear(KEY_BUTTONCONFIG);	break;
 		case 0x182: map_key_clear(KEY_BOOKMARKS);	break;
 		case 0x183: map_key_clear(KEY_CONFIG);		break;
 		case 0x184: map_key_clear(KEY_WORDPROCESSOR);	break;
@@ -789,6 +798,8 @@
 		case 0x18c: map_key_clear(KEY_VOICEMAIL);	break;
 		case 0x18d: map_key_clear(KEY_ADDRESSBOOK);	break;
 		case 0x18e: map_key_clear(KEY_CALENDAR);	break;
+		case 0x18f: map_key_clear(KEY_TASKMANAGER);	break;
+		case 0x190: map_key_clear(KEY_JOURNAL);		break;
 		case 0x191: map_key_clear(KEY_FINANCE);		break;
 		case 0x192: map_key_clear(KEY_CALC);		break;
 		case 0x193: map_key_clear(KEY_PLAYER);		break;
@@ -797,10 +808,16 @@
 		case 0x199: map_key_clear(KEY_CHAT);		break;
 		case 0x19c: map_key_clear(KEY_LOGOFF);		break;
 		case 0x19e: map_key_clear(KEY_COFFEE);		break;
+		case 0x19f: map_key_clear(KEY_CONTROLPANEL);		break;
+		case 0x1a2: map_key_clear(KEY_APPSELECT);		break;
+		case 0x1a3: map_key_clear(KEY_NEXT);		break;
+		case 0x1a4: map_key_clear(KEY_PREVIOUS);	break;
 		case 0x1a6: map_key_clear(KEY_HELP);		break;
 		case 0x1a7: map_key_clear(KEY_DOCUMENTS);	break;
 		case 0x1ab: map_key_clear(KEY_SPELLCHECK);	break;
 		case 0x1ae: map_key_clear(KEY_KEYBOARD);	break;
+		case 0x1b1: map_key_clear(KEY_SCREENSAVER);		break;
+		case 0x1b4: map_key_clear(KEY_FILE);		break;
 		case 0x1b6: map_key_clear(KEY_IMAGES);		break;
 		case 0x1b7: map_key_clear(KEY_AUDIO);		break;
 		case 0x1b8: map_key_clear(KEY_VIDEO);		break;
@@ -1164,6 +1181,77 @@
 }
 EXPORT_SYMBOL_GPL(hidinput_count_leds);
 
+static void hidinput_led_worker(struct work_struct *work)
+{
+	struct hid_device *hid = container_of(work, struct hid_device,
+					      led_work);
+	struct hid_field *field;
+	struct hid_report *report;
+	int len, ret;
+	__u8 *buf;
+
+	field = hidinput_get_led_field(hid);
+	if (!field)
+		return;
+
+	/*
+	 * field->report is accessed unlocked regarding HID core. So there might
+	 * be another incoming SET-LED request from user-space, which changes
+	 * the LED state while we assemble our outgoing buffer. However, this
+	 * doesn't matter as hid_output_report() correctly converts it into a
+	 * boolean value no matter what information is currently set on the LED
+	 * field (even garbage). So the remote device will always get a valid
+	 * request.
+	 * And in case we send a wrong value, a next led worker is spawned
+	 * for every SET-LED request so the following worker will send the
+	 * correct value, guaranteed!
+	 */
+
+	report = field->report;
+
+	/* use custom SET_REPORT request if possible (asynchronous) */
+	if (hid->ll_driver->request)
+		return hid->ll_driver->request(hid, report, HID_REQ_SET_REPORT);
+
+	/* fall back to generic raw-output-report */
+	len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+	buf = hid_alloc_report_buf(report, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	hid_output_report(report, buf);
+	/* synchronous output report */
+	ret = hid_hw_output_report(hid, buf, len);
+	if (ret == -ENOSYS)
+		hid_hw_raw_request(hid, report->id, buf, len, HID_OUTPUT_REPORT,
+				HID_REQ_SET_REPORT);
+	kfree(buf);
+}
+
+static int hidinput_input_event(struct input_dev *dev, unsigned int type,
+				unsigned int code, int value)
+{
+	struct hid_device *hid = input_get_drvdata(dev);
+	struct hid_field *field;
+	int offset;
+
+	if (type == EV_FF)
+		return input_ff_event(dev, type, code, value);
+
+	if (type != EV_LED)
+		return -1;
+
+	if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) {
+		hid_warn(dev, "event field not found\n");
+		return -1;
+	}
+
+	hid_set_field(field, offset, value);
+
+	schedule_work(&hid->led_work);
+	return 0;
+}
+
 static int hidinput_open(struct input_dev *dev)
 {
 	struct hid_device *hid = input_get_drvdata(dev);
@@ -1215,7 +1303,7 @@
 	}
 
 	input_set_drvdata(input_dev, hid);
-	input_dev->event = hid->ll_driver->hidinput_input_event;
+	input_dev->event = hidinput_input_event;
 	input_dev->open = hidinput_open;
 	input_dev->close = hidinput_close;
 	input_dev->setkeycode = hidinput_setkeycode;
@@ -1310,6 +1398,7 @@
 	int i, j, k;
 
 	INIT_LIST_HEAD(&hid->inputs);
+	INIT_WORK(&hid->led_work, hidinput_led_worker);
 
 	if (!force) {
 		for (i = 0; i < hid->maxcollection; i++) {
@@ -1358,8 +1447,9 @@
 				 * UGCI) cram a lot of unrelated inputs into the
 				 * same interface. */
 				hidinput->report = report;
-				if (drv->input_configured)
-					drv->input_configured(hid, hidinput);
+				if (drv->input_configured &&
+				    drv->input_configured(hid, hidinput))
+					goto out_cleanup;
 				if (input_register_device(hidinput->input))
 					goto out_cleanup;
 				hidinput = NULL;
@@ -1380,8 +1470,9 @@
 	}
 
 	if (hidinput) {
-		if (drv->input_configured)
-			drv->input_configured(hid, hidinput);
+		if (drv->input_configured &&
+		    drv->input_configured(hid, hidinput))
+			goto out_cleanup;
 		if (input_register_device(hidinput->input))
 			goto out_cleanup;
 	}
@@ -1411,6 +1502,12 @@
 		input_unregister_device(hidinput->input);
 		kfree(hidinput);
 	}
+
+	/* led_work is spawned by input_dev callbacks, but doesn't access the
+	 * parent input_dev at all. Once all input devices are removed, we
+	 * know that led_work will never get restarted, so we can cancel it
+	 * synchronously and are safe. */
+	cancel_work_sync(&hid->led_work);
 }
 EXPORT_SYMBOL_GPL(hidinput_disconnect);
 
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 12fc48c..f115375 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -692,7 +692,8 @@
 	if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
 		unsigned char buf[] = { 0x00, 0xAF,  0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 
-		ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
+		ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+					HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 
 		if (ret >= 0) {
 			/* insert a little delay of 10 jiffies ~ 40ms */
@@ -704,7 +705,8 @@
 			buf[1] = 0xB2;
 			get_random_bytes(&buf[2], 2);
 
-			ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
+			ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+					HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 		}
 	}
 
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index d4c6d9f..896de25 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -44,14 +44,6 @@
 	0x19, 0xE0,		/*   USAGE_MINIMUM (Left Control)   */
 	0x29, 0xE7,		/*   USAGE_MAXIMUM (Right GUI)      */
 	0x81, 0x02,		/*   INPUT (Data,Var,Abs)       */
-	0x95, 0x05,		/*   REPORT COUNT (5)           */
-	0x05, 0x08,		/*   USAGE PAGE (LED page)      */
-	0x19, 0x01,		/*   USAGE MINIMUM (1)          */
-	0x29, 0x05,		/*   USAGE MAXIMUM (5)          */
-	0x91, 0x02,		/*   OUTPUT (Data, Variable, Absolute)  */
-	0x95, 0x01,		/*   REPORT COUNT (1)           */
-	0x75, 0x03,		/*   REPORT SIZE (3)            */
-	0x91, 0x01,		/*   OUTPUT (Constant)          */
 	0x95, 0x06,		/*   REPORT_COUNT (6)           */
 	0x75, 0x08,		/*   REPORT_SIZE (8)            */
 	0x15, 0x00,		/*   LOGICAL_MINIMUM (0)        */
@@ -60,6 +52,18 @@
 	0x19, 0x00,		/*   USAGE_MINIMUM (no event)       */
 	0x2A, 0xFF, 0x00,	/*   USAGE_MAXIMUM (reserved)       */
 	0x81, 0x00,		/*   INPUT (Data,Ary,Abs)       */
+	0x85, 0x0e,		/* REPORT_ID (14)               */
+	0x05, 0x08,		/*   USAGE PAGE (LED page)      */
+	0x95, 0x05,		/*   REPORT COUNT (5)           */
+	0x75, 0x01,		/*   REPORT SIZE (1)            */
+	0x15, 0x00,		/*   LOGICAL_MINIMUM (0)        */
+	0x25, 0x01,		/*   LOGICAL_MAXIMUM (1)        */
+	0x19, 0x01,		/*   USAGE MINIMUM (1)          */
+	0x29, 0x05,		/*   USAGE MAXIMUM (5)          */
+	0x91, 0x02,		/*   OUTPUT (Data, Variable, Absolute)  */
+	0x95, 0x01,		/*   REPORT COUNT (1)           */
+	0x75, 0x03,		/*   REPORT SIZE (3)            */
+	0x91, 0x01,		/*   OUTPUT (Constant)          */
 	0xC0
 };
 
@@ -251,7 +255,6 @@
 	}
 
 	dj_hiddev->ll_driver = &logi_dj_ll_driver;
-	dj_hiddev->hid_output_raw_report = logi_dj_output_hidraw_report;
 
 	dj_hiddev->dev.parent = &djrcv_hdev->dev;
 	dj_hiddev->bus = BUS_USB;
@@ -525,9 +528,10 @@
 	dbg_hid("%s:%s\n", __func__, hid->phys);
 }
 
-static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
-					size_t count,
-					unsigned char report_type)
+static int logi_dj_ll_raw_request(struct hid_device *hid,
+				  unsigned char reportnum, __u8 *buf,
+				  size_t count, unsigned char report_type,
+				  int reqtype)
 {
 	/* Called by hid raw to send data */
 	dbg_hid("%s\n", __func__);
@@ -598,58 +602,6 @@
 	return retval;
 }
 
-static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
-				  unsigned int code, int value)
-{
-	/* Sent by the input layer to handle leds and Force Feedback */
-	struct hid_device *dj_hiddev = input_get_drvdata(dev);
-	struct dj_device *dj_dev = dj_hiddev->driver_data;
-
-	struct dj_receiver_dev *djrcv_dev =
-	    dev_get_drvdata(dj_hiddev->dev.parent);
-	struct hid_device *dj_rcv_hiddev = djrcv_dev->hdev;
-	struct hid_report_enum *output_report_enum;
-
-	struct hid_field *field;
-	struct hid_report *report;
-	unsigned char *data;
-	int offset;
-
-	dbg_hid("%s: %s, type:%d | code:%d | value:%d\n",
-		__func__, dev->phys, type, code, value);
-
-	if (type != EV_LED)
-		return -1;
-
-	offset = hidinput_find_field(dj_hiddev, type, code, &field);
-
-	if (offset == -1) {
-		dev_warn(&dev->dev, "event field not found\n");
-		return -1;
-	}
-	hid_set_field(field, offset, value);
-
-	data = hid_alloc_report_buf(field->report, GFP_KERNEL);
-	if (!data) {
-		dev_warn(&dev->dev, "failed to allocate report buf memory\n");
-		return -1;
-	}
-
-	hid_output_report(field->report, &data[0]);
-
-	output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT];
-	report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
-	hid_set_field(report->field[0], 0, dj_dev->device_index);
-	hid_set_field(report->field[0], 1, REPORT_TYPE_LEDS);
-	hid_set_field(report->field[0], 2, data[1]);
-
-	hid_hw_request(dj_rcv_hiddev, report, HID_REQ_SET_REPORT);
-
-	kfree(data);
-
-	return 0;
-}
-
 static int logi_dj_ll_start(struct hid_device *hid)
 {
 	dbg_hid("%s\n", __func__);
@@ -668,7 +620,7 @@
 	.stop = logi_dj_ll_stop,
 	.open = logi_dj_ll_open,
 	.close = logi_dj_ll_close,
-	.hidinput_input_event = logi_dj_ll_input_event,
+	.raw_request = logi_dj_ll_raw_request,
 };
 
 
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index c24f3df..e539261 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -548,8 +548,8 @@
 	 * but there seems to be no other way of switching the mode.
 	 * Thus the super-ugly hacky success check below.
 	 */
-	ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
-			HID_FEATURE_REPORT);
+	ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature),
+				HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 	if (ret != -EIO && ret != sizeof(feature)) {
 		hid_err(hdev, "unable to request touch data (%d)\n", ret);
 		goto err_stop_hw;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 3d8e58a..fb9ac12 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -443,6 +443,16 @@
 	    (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
 		td->mt_flags |= INPUT_MT_POINTER;
 
+	/* Only map fields from TouchScreen or TouchPad collections.
+         * We need to ignore fields that belong to other collections
+         * such as Mouse that might have the same GenericDesktop usages. */
+	if (field->application == HID_DG_TOUCHSCREEN)
+		set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
+	else if (field->application == HID_DG_TOUCHPAD)
+		set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+	else
+		return 0;
+
 	if (usage->usage_index)
 		prev_usage = &field->usage[usage->usage_index - 1];
 
@@ -772,12 +782,13 @@
 		mt_sync_frame(td, report->field[0]->hidinput->input);
 }
 
-static void mt_touch_input_configured(struct hid_device *hdev,
+static int mt_touch_input_configured(struct hid_device *hdev,
 					struct hid_input *hi)
 {
 	struct mt_device *td = hid_get_drvdata(hdev);
 	struct mt_class *cls = &td->mtclass;
 	struct input_dev *input = hi->input;
+	int ret;
 
 	if (!td->maxcontacts)
 		td->maxcontacts = MT_DEFAULT_MAXCONTACT;
@@ -792,9 +803,12 @@
 	if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
 		td->mt_flags |= INPUT_MT_DROP_UNUSED;
 
-	input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
+	ret = input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
+	if (ret)
+		return ret;
 
 	td->mt_flags = 0;
+	return 0;
 }
 
 static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -927,19 +941,21 @@
 		cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
 }
 
-static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
 {
 	struct mt_device *td = hid_get_drvdata(hdev);
 	char *name = kstrdup(hdev->name, GFP_KERNEL);
+	int ret = 0;
 
 	if (name)
 		hi->input->name = name;
 
 	if (hi->report->id == td->mt_report_id)
-		mt_touch_input_configured(hdev, hi);
+		ret = mt_touch_input_configured(hdev, hi);
 
 	if (hi->report->id == td->pen_report_id)
 		mt_pen_input_configured(hdev, hi);
+	return ret;
 }
 
 static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 312098e..8004470 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -15,51 +15,386 @@
  * any later version.
  */
 
+/*
+ * NOTE: in order for the Sony PS3 BD Remote Control to be found by
+ * a Bluetooth host, the key combination Start+Enter has to be kept pressed
+ * for about 7 seconds with the Bluetooth Host Controller in discovering mode.
+ *
+ * There will be no PIN request from the device.
+ */
+
 #include <linux/device.h>
 #include <linux/hid.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/power_supply.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/input/mt.h>
 #include <linux/usb.h>
 
 #include "hid-ids.h"
 
-#define VAIO_RDESC_CONSTANT     (1 << 0)
-#define SIXAXIS_CONTROLLER_USB  (1 << 1)
-#define SIXAXIS_CONTROLLER_BT   (1 << 2)
+#define VAIO_RDESC_CONSTANT       BIT(0)
+#define SIXAXIS_CONTROLLER_USB    BIT(1)
+#define SIXAXIS_CONTROLLER_BT     BIT(2)
+#define BUZZ_CONTROLLER           BIT(3)
+#define PS3REMOTE                 BIT(4)
 
-static const u8 sixaxis_rdesc_fixup[] = {
-	0x95, 0x13, 0x09, 0x01, 0x81, 0x02, 0x95, 0x0C,
-	0x81, 0x01, 0x75, 0x10, 0x95, 0x04, 0x26, 0xFF,
-	0x03, 0x46, 0xFF, 0x03, 0x09, 0x01, 0x81, 0x02
+#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
+#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER | BUZZ_CONTROLLER)
+#define SONY_BATTERY_SUPPORT (SIXAXIS_CONTROLLER)
+
+#define MAX_LEDS 4
+
+static __u8 sixaxis_rdesc[] = {
+	0x05, 0x01,         /*  Usage Page (Desktop),               */
+	0x09, 0x04,         /*  Usage (Joystik),                    */
+	0xA1, 0x01,         /*  Collection (Application),           */
+	0xA1, 0x02,         /*      Collection (Logical),           */
+	0x85, 0x01,         /*          Report ID (1),              */
+	0x75, 0x08,         /*          Report Size (8),            */
+	0x95, 0x01,         /*          Report Count (1),           */
+	0x15, 0x00,         /*          Logical Minimum (0),        */
+	0x26, 0xFF, 0x00,   /*          Logical Maximum (255),      */
+	0x81, 0x03,         /*          Input (Constant, Variable), */
+	0x75, 0x01,         /*          Report Size (1),            */
+	0x95, 0x13,         /*          Report Count (19),          */
+	0x15, 0x00,         /*          Logical Minimum (0),        */
+	0x25, 0x01,         /*          Logical Maximum (1),        */
+	0x35, 0x00,         /*          Physical Minimum (0),       */
+	0x45, 0x01,         /*          Physical Maximum (1),       */
+	0x05, 0x09,         /*          Usage Page (Button),        */
+	0x19, 0x01,         /*          Usage Minimum (01h),        */
+	0x29, 0x13,         /*          Usage Maximum (13h),        */
+	0x81, 0x02,         /*          Input (Variable),           */
+	0x75, 0x01,         /*          Report Size (1),            */
+	0x95, 0x0D,         /*          Report Count (13),          */
+	0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),         */
+	0x81, 0x03,         /*          Input (Constant, Variable), */
+	0x15, 0x00,         /*          Logical Minimum (0),        */
+	0x26, 0xFF, 0x00,   /*          Logical Maximum (255),      */
+	0x05, 0x01,         /*          Usage Page (Desktop),       */
+	0x09, 0x01,         /*          Usage (Pointer),            */
+	0xA1, 0x00,         /*          Collection (Physical),      */
+	0x75, 0x08,         /*              Report Size (8),        */
+	0x95, 0x04,         /*              Report Count (4),       */
+	0x35, 0x00,         /*              Physical Minimum (0),   */
+	0x46, 0xFF, 0x00,   /*              Physical Maximum (255), */
+	0x09, 0x30,         /*              Usage (X),              */
+	0x09, 0x31,         /*              Usage (Y),              */
+	0x09, 0x32,         /*              Usage (Z),              */
+	0x09, 0x35,         /*              Usage (Rz),             */
+	0x81, 0x02,         /*              Input (Variable),       */
+	0xC0,               /*          End Collection,             */
+	0x05, 0x01,         /*          Usage Page (Desktop),       */
+	0x95, 0x13,         /*          Report Count (19),          */
+	0x09, 0x01,         /*          Usage (Pointer),            */
+	0x81, 0x02,         /*          Input (Variable),           */
+	0x95, 0x0C,         /*          Report Count (12),          */
+	0x81, 0x01,         /*          Input (Constant),           */
+	0x75, 0x10,         /*          Report Size (16),           */
+	0x95, 0x04,         /*          Report Count (4),           */
+	0x26, 0xFF, 0x03,   /*          Logical Maximum (1023),     */
+	0x46, 0xFF, 0x03,   /*          Physical Maximum (1023),    */
+	0x09, 0x01,         /*          Usage (Pointer),            */
+	0x81, 0x02,         /*          Input (Variable),           */
+	0xC0,               /*      End Collection,                 */
+	0xA1, 0x02,         /*      Collection (Logical),           */
+	0x85, 0x02,         /*          Report ID (2),              */
+	0x75, 0x08,         /*          Report Size (8),            */
+	0x95, 0x30,         /*          Report Count (48),          */
+	0x09, 0x01,         /*          Usage (Pointer),            */
+	0xB1, 0x02,         /*          Feature (Variable),         */
+	0xC0,               /*      End Collection,                 */
+	0xA1, 0x02,         /*      Collection (Logical),           */
+	0x85, 0xEE,         /*          Report ID (238),            */
+	0x75, 0x08,         /*          Report Size (8),            */
+	0x95, 0x30,         /*          Report Count (48),          */
+	0x09, 0x01,         /*          Usage (Pointer),            */
+	0xB1, 0x02,         /*          Feature (Variable),         */
+	0xC0,               /*      End Collection,                 */
+	0xA1, 0x02,         /*      Collection (Logical),           */
+	0x85, 0xEF,         /*          Report ID (239),            */
+	0x75, 0x08,         /*          Report Size (8),            */
+	0x95, 0x30,         /*          Report Count (48),          */
+	0x09, 0x01,         /*          Usage (Pointer),            */
+	0xB1, 0x02,         /*          Feature (Variable),         */
+	0xC0,               /*      End Collection,                 */
+	0xC0                /*  End Collection                      */
 };
 
-static const u8 sixaxis_rdesc_fixup2[] = {
-	0x05, 0x01, 0x09, 0x04, 0xa1, 0x01, 0xa1, 0x02,
-	0x85, 0x01, 0x75, 0x08, 0x95, 0x01, 0x15, 0x00,
-	0x26, 0xff, 0x00, 0x81, 0x03, 0x75, 0x01, 0x95,
-	0x13, 0x15, 0x00, 0x25, 0x01, 0x35, 0x00, 0x45,
-	0x01, 0x05, 0x09, 0x19, 0x01, 0x29, 0x13, 0x81,
-	0x02, 0x75, 0x01, 0x95, 0x0d, 0x06, 0x00, 0xff,
-	0x81, 0x03, 0x15, 0x00, 0x26, 0xff, 0x00, 0x05,
-	0x01, 0x09, 0x01, 0xa1, 0x00, 0x75, 0x08, 0x95,
-	0x04, 0x35, 0x00, 0x46, 0xff, 0x00, 0x09, 0x30,
-	0x09, 0x31, 0x09, 0x32, 0x09, 0x35, 0x81, 0x02,
-	0xc0, 0x05, 0x01, 0x95, 0x13, 0x09, 0x01, 0x81,
-	0x02, 0x95, 0x0c, 0x81, 0x01, 0x75, 0x10, 0x95,
-	0x04, 0x26, 0xff, 0x03, 0x46, 0xff, 0x03, 0x09,
-	0x01, 0x81, 0x02, 0xc0, 0xa1, 0x02, 0x85, 0x02,
-	0x75, 0x08, 0x95, 0x30, 0x09, 0x01, 0xb1, 0x02,
-	0xc0, 0xa1, 0x02, 0x85, 0xee, 0x75, 0x08, 0x95,
-	0x30, 0x09, 0x01, 0xb1, 0x02, 0xc0, 0xa1, 0x02,
-	0x85, 0xef, 0x75, 0x08, 0x95, 0x30, 0x09, 0x01,
-	0xb1, 0x02, 0xc0, 0xc0,
+static __u8 ps3remote_rdesc[] = {
+	0x05, 0x01,          /* GUsagePage Generic Desktop */
+	0x09, 0x05,          /* LUsage 0x05 [Game Pad] */
+	0xA1, 0x01,          /* MCollection Application (mouse, keyboard) */
+
+	 /* Use collection 1 for joypad buttons */
+	 0xA1, 0x02,         /* MCollection Logical (interrelated data) */
+
+	  /* Ignore the 1st byte, maybe it is used for a controller
+	   * number but it's not needed for correct operation */
+	  0x75, 0x08,        /* GReportSize 0x08 [8] */
+	  0x95, 0x01,        /* GReportCount 0x01 [1] */
+	  0x81, 0x01,        /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
+
+	  /* Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
+	   * buttons multiple keypresses are allowed */
+	  0x05, 0x09,        /* GUsagePage Button */
+	  0x19, 0x01,        /* LUsageMinimum 0x01 [Button 1 (primary/trigger)] */
+	  0x29, 0x18,        /* LUsageMaximum 0x18 [Button 24] */
+	  0x14,              /* GLogicalMinimum [0] */
+	  0x25, 0x01,        /* GLogicalMaximum 0x01 [1] */
+	  0x75, 0x01,        /* GReportSize 0x01 [1] */
+	  0x95, 0x18,        /* GReportCount 0x18 [24] */
+	  0x81, 0x02,        /* MInput 0x02 (Data[0] Var[1] Abs[2]) */
+
+	  0xC0,              /* MEndCollection */
+
+	 /* Use collection 2 for remote control buttons */
+	 0xA1, 0x02,         /* MCollection Logical (interrelated data) */
+
+	  /* 5th byte is used for remote control buttons */
+	  0x05, 0x09,        /* GUsagePage Button */
+	  0x18,              /* LUsageMinimum [No button pressed] */
+	  0x29, 0xFE,        /* LUsageMaximum 0xFE [Button 254] */
+	  0x14,              /* GLogicalMinimum [0] */
+	  0x26, 0xFE, 0x00,  /* GLogicalMaximum 0x00FE [254] */
+	  0x75, 0x08,        /* GReportSize 0x08 [8] */
+	  0x95, 0x01,        /* GReportCount 0x01 [1] */
+	  0x80,              /* MInput  */
+
+	  /* Ignore bytes from 6th to 11th, 6th to 10th are always constant at
+	   * 0xff and 11th is for press indication */
+	  0x75, 0x08,        /* GReportSize 0x08 [8] */
+	  0x95, 0x06,        /* GReportCount 0x06 [6] */
+	  0x81, 0x01,        /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
+
+	  /* 12th byte is for battery strength */
+	  0x05, 0x06,        /* GUsagePage Generic Device Controls */
+	  0x09, 0x20,        /* LUsage 0x20 [Battery Strength] */
+	  0x14,              /* GLogicalMinimum [0] */
+	  0x25, 0x05,        /* GLogicalMaximum 0x05 [5] */
+	  0x75, 0x08,        /* GReportSize 0x08 [8] */
+	  0x95, 0x01,        /* GReportCount 0x01 [1] */
+	  0x81, 0x02,        /* MInput 0x02 (Data[0] Var[1] Abs[2]) */
+
+	  0xC0,              /* MEndCollection */
+
+	 0xC0                /* MEndCollection [Game Pad] */
 };
 
+static const unsigned int ps3remote_keymap_joypad_buttons[] = {
+	[0x01] = KEY_SELECT,
+	[0x02] = BTN_THUMBL,		/* L3 */
+	[0x03] = BTN_THUMBR,		/* R3 */
+	[0x04] = BTN_START,
+	[0x05] = KEY_UP,
+	[0x06] = KEY_RIGHT,
+	[0x07] = KEY_DOWN,
+	[0x08] = KEY_LEFT,
+	[0x09] = BTN_TL2,		/* L2 */
+	[0x0a] = BTN_TR2,		/* R2 */
+	[0x0b] = BTN_TL,		/* L1 */
+	[0x0c] = BTN_TR,		/* R1 */
+	[0x0d] = KEY_OPTION,		/* options/triangle */
+	[0x0e] = KEY_BACK,		/* back/circle */
+	[0x0f] = BTN_0,			/* cross */
+	[0x10] = KEY_SCREEN,		/* view/square */
+	[0x11] = KEY_HOMEPAGE,		/* PS button */
+	[0x14] = KEY_ENTER,
+};
+static const unsigned int ps3remote_keymap_remote_buttons[] = {
+	[0x00] = KEY_1,
+	[0x01] = KEY_2,
+	[0x02] = KEY_3,
+	[0x03] = KEY_4,
+	[0x04] = KEY_5,
+	[0x05] = KEY_6,
+	[0x06] = KEY_7,
+	[0x07] = KEY_8,
+	[0x08] = KEY_9,
+	[0x09] = KEY_0,
+	[0x0e] = KEY_ESC,		/* return */
+	[0x0f] = KEY_CLEAR,
+	[0x16] = KEY_EJECTCD,
+	[0x1a] = KEY_MENU,		/* top menu */
+	[0x28] = KEY_TIME,
+	[0x30] = KEY_PREVIOUS,
+	[0x31] = KEY_NEXT,
+	[0x32] = KEY_PLAY,
+	[0x33] = KEY_REWIND,		/* scan back */
+	[0x34] = KEY_FORWARD,		/* scan forward */
+	[0x38] = KEY_STOP,
+	[0x39] = KEY_PAUSE,
+	[0x40] = KEY_CONTEXT_MENU,	/* pop up/menu */
+	[0x60] = KEY_FRAMEBACK,		/* slow/step back */
+	[0x61] = KEY_FRAMEFORWARD,	/* slow/step forward */
+	[0x63] = KEY_SUBTITLE,
+	[0x64] = KEY_AUDIO,
+	[0x65] = KEY_ANGLE,
+	[0x70] = KEY_INFO,		/* display */
+	[0x80] = KEY_BLUE,
+	[0x81] = KEY_RED,
+	[0x82] = KEY_GREEN,
+	[0x83] = KEY_YELLOW,
+};
+
+static const unsigned int buzz_keymap[] = {
+	/*
+	 * The controller has 4 remote buzzers, each with one LED and 5
+	 * buttons.
+	 * 
+	 * We use the mapping chosen by the controller, which is:
+	 *
+	 * Key          Offset
+	 * -------------------
+	 * Buzz              1
+	 * Blue              5
+	 * Orange            4
+	 * Green             3
+	 * Yellow            2
+	 *
+	 * So, for example, the orange button on the third buzzer is mapped to
+	 * BTN_TRIGGER_HAPPY14
+	 */
+	[ 1] = BTN_TRIGGER_HAPPY1,
+	[ 2] = BTN_TRIGGER_HAPPY2,
+	[ 3] = BTN_TRIGGER_HAPPY3,
+	[ 4] = BTN_TRIGGER_HAPPY4,
+	[ 5] = BTN_TRIGGER_HAPPY5,
+	[ 6] = BTN_TRIGGER_HAPPY6,
+	[ 7] = BTN_TRIGGER_HAPPY7,
+	[ 8] = BTN_TRIGGER_HAPPY8,
+	[ 9] = BTN_TRIGGER_HAPPY9,
+	[10] = BTN_TRIGGER_HAPPY10,
+	[11] = BTN_TRIGGER_HAPPY11,
+	[12] = BTN_TRIGGER_HAPPY12,
+	[13] = BTN_TRIGGER_HAPPY13,
+	[14] = BTN_TRIGGER_HAPPY14,
+	[15] = BTN_TRIGGER_HAPPY15,
+	[16] = BTN_TRIGGER_HAPPY16,
+	[17] = BTN_TRIGGER_HAPPY17,
+	[18] = BTN_TRIGGER_HAPPY18,
+	[19] = BTN_TRIGGER_HAPPY19,
+	[20] = BTN_TRIGGER_HAPPY20,
+};
+
+static enum power_supply_property sony_battery_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_SCOPE,
+	POWER_SUPPLY_PROP_STATUS,
+};
+
+struct sixaxis_led {
+	__u8 time_enabled; /* the total time the led is active (0xff means forever) */
+	__u8 duty_length;  /* how long a cycle is in deciseconds (0 means "really fast") */
+	__u8 enabled;
+	__u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */
+	__u8 duty_on;  /* % of duty_length the led is on (0xff mean 100%) */
+} __packed;
+
+struct sixaxis_rumble {
+	__u8 padding;
+	__u8 right_duration; /* Right motor duration (0xff means forever) */
+	__u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */
+	__u8 left_duration;    /* Left motor duration (0xff means forever) */
+	__u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */
+} __packed;
+
+struct sixaxis_output_report {
+	__u8 report_id;
+	struct sixaxis_rumble rumble;
+	__u8 padding[4];
+	__u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */
+	struct sixaxis_led led[4];    /* LEDx at (4 - x) */
+	struct sixaxis_led _reserved; /* LED5, not actually soldered */
+} __packed;
+
+union sixaxis_output_report_01 {
+	struct sixaxis_output_report data;
+	__u8 buf[36];
+};
+
+#define SIXAXIS_REPORT_0xF2_SIZE 18
+
+static DEFINE_SPINLOCK(sony_dev_list_lock);
+static LIST_HEAD(sony_device_list);
+static DEFINE_IDA(sony_device_id_allocator);
+
 struct sony_sc {
+	spinlock_t lock;
+	struct list_head list_node;
+	struct hid_device *hdev;
+	struct led_classdev *leds[MAX_LEDS];
 	unsigned long quirks;
+	struct work_struct state_worker;
+	struct power_supply battery;
+	int device_id;
+	__u8 *output_report_dmabuf;
+
+	__u8 mac_address[6];
+	__u8 worker_initialized;
+	__u8 cable_state;
+	__u8 battery_charging;
+	__u8 battery_capacity;
+	__u8 led_state[MAX_LEDS];
+	__u8 led_delay_on[MAX_LEDS];
+	__u8 led_delay_off[MAX_LEDS];
+	__u8 led_count;
 };
 
-/* Sony Vaio VGX has wrongly mouse pointer declared as constant */
+static __u8 *sixaxis_fixup(struct hid_device *hdev, __u8 *rdesc,
+			     unsigned int *rsize)
+{
+	*rsize = sizeof(sixaxis_rdesc);
+	return sixaxis_rdesc;
+}
+
+static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc,
+			     unsigned int *rsize)
+{
+	*rsize = sizeof(ps3remote_rdesc);
+	return ps3remote_rdesc;
+}
+
+static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
+			     struct hid_field *field, struct hid_usage *usage,
+			     unsigned long **bit, int *max)
+{
+	unsigned int key = usage->hid & HID_USAGE;
+
+	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
+		return -1;
+
+	switch (usage->collection_index) {
+	case 1:
+		if (key >= ARRAY_SIZE(ps3remote_keymap_joypad_buttons))
+			return -1;
+
+		key = ps3remote_keymap_joypad_buttons[key];
+		if (!key)
+			return -1;
+		break;
+	case 2:
+		if (key >= ARRAY_SIZE(ps3remote_keymap_remote_buttons))
+			return -1;
+
+		key = ps3remote_keymap_remote_buttons[key];
+		if (!key)
+			return -1;
+		break;
+	default:
+		return -1;
+	}
+
+	hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
+	return 1;
+}
+
 static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 		unsigned int *rsize)
 {
@@ -81,79 +416,100 @@
 		rdesc[55] = 0x06;
 	}
 
-	/* The HID descriptor exposed over BT has a trailing zero byte */
-	if ((((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize == 148) ||
-			((sc->quirks & SIXAXIS_CONTROLLER_BT) && *rsize == 149)) &&
-			rdesc[83] == 0x75) {
-		hid_info(hdev, "Fixing up Sony Sixaxis report descriptor\n");
-		memcpy((void *)&rdesc[83], (void *)&sixaxis_rdesc_fixup,
-			sizeof(sixaxis_rdesc_fixup));
-	} else if (sc->quirks & SIXAXIS_CONTROLLER_USB &&
-		   *rsize > sizeof(sixaxis_rdesc_fixup2)) {
-		hid_info(hdev, "Sony Sixaxis clone detected. Using original report descriptor (size: %d clone; %d new)\n",
-			 *rsize, (int)sizeof(sixaxis_rdesc_fixup2));
-		*rsize = sizeof(sixaxis_rdesc_fixup2);
-		memcpy(rdesc, &sixaxis_rdesc_fixup2, *rsize);
-	}
+	if (sc->quirks & SIXAXIS_CONTROLLER)
+		return sixaxis_fixup(hdev, rdesc, rsize);
+
+	if (sc->quirks & PS3REMOTE)
+		return ps3remote_fixup(hdev, rdesc, rsize);
+
 	return rdesc;
 }
 
+static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+{
+	static const __u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
+	unsigned long flags;
+	__u8 cable_state, battery_capacity, battery_charging;
+
+	/*
+	 * The sixaxis is charging if the battery value is 0xee
+	 * and it is fully charged if the value is 0xef.
+	 * It does not report the actual level while charging so it
+	 * is set to 100% while charging is in progress.
+	 */
+	if (rd[30] >= 0xee) {
+		battery_capacity = 100;
+		battery_charging = !(rd[30] & 0x01);
+		cable_state = 1;
+	} else {
+		__u8 index = rd[30] <= 5 ? rd[30] : 5;
+		battery_capacity = sixaxis_battery_capacity[index];
+		battery_charging = 0;
+		cable_state = 0;
+	}
+
+	spin_lock_irqsave(&sc->lock, flags);
+	sc->cable_state = cable_state;
+	sc->battery_capacity = battery_capacity;
+	sc->battery_charging = battery_charging;
+	spin_unlock_irqrestore(&sc->lock, flags);
+}
+
 static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
 		__u8 *rd, int size)
 {
 	struct sony_sc *sc = hid_get_drvdata(hdev);
 
-	/* Sixaxis HID report has acclerometers/gyro with MSByte first, this
+	/*
+	 * Sixaxis HID report has acclerometers/gyro with MSByte first, this
 	 * has to be BYTE_SWAPPED before passing up to joystick interface
 	 */
-	if ((sc->quirks & (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)) &&
-			rd[0] == 0x01 && size == 49) {
+	if ((sc->quirks & SIXAXIS_CONTROLLER) && rd[0] == 0x01 && size == 49) {
 		swap(rd[41], rd[42]);
 		swap(rd[43], rd[44]);
 		swap(rd[45], rd[46]);
 		swap(rd[47], rd[48]);
+
+		sixaxis_parse_report(sc, rd, size);
 	}
 
 	return 0;
 }
 
-/*
- * The Sony Sixaxis does not handle HID Output Reports on the Interrupt EP
- * like it should according to usbhid/hid-core.c::usbhid_output_raw_report()
- * so we need to override that forcing HID Output Reports on the Control EP.
- *
- * There is also another issue about HID Output Reports via USB, the Sixaxis
- * does not want the report_id as part of the data packet, so we have to
- * discard buf[0] when sending the actual control message, even for numbered
- * reports, humpf!
- */
-static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
-		size_t count, unsigned char report_type)
+static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
+			struct hid_field *field, struct hid_usage *usage,
+			unsigned long **bit, int *max)
 {
-	struct usb_interface *intf = to_usb_interface(hid->dev.parent);
-	struct usb_device *dev = interface_to_usbdev(intf);
-	struct usb_host_interface *interface = intf->cur_altsetting;
-	int report_id = buf[0];
-	int ret;
+	struct sony_sc *sc = hid_get_drvdata(hdev);
 
-	if (report_type == HID_OUTPUT_REPORT) {
-		/* Don't send the Report ID */
-		buf++;
-		count--;
+	if (sc->quirks & BUZZ_CONTROLLER) {
+		unsigned int key = usage->hid & HID_USAGE;
+
+		if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
+			return -1;
+
+		switch (usage->collection_index) {
+		case 1:
+			if (key >= ARRAY_SIZE(buzz_keymap))
+				return -1;
+
+			key = buzz_keymap[key];
+			if (!key)
+				return -1;
+			break;
+		default:
+			return -1;
+		}
+
+		hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
+		return 1;
 	}
 
-	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-		HID_REQ_SET_REPORT,
-		USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-		((report_type + 1) << 8) | report_id,
-		interface->desc.bInterfaceNumber, buf, count,
-		USB_CTRL_SET_TIMEOUT);
+	if (sc->quirks & PS3REMOTE)
+		return ps3remote_mapping(hdev, hi, field, usage, bit, max);
 
-	/* Count also the Report ID, in case of an Output report. */
-	if (ret > 0 && report_type == HID_OUTPUT_REPORT)
-		ret++;
-
-	return ret;
+	/* Let hid-core decide for the others */
+	return 0;
 }
 
 /*
@@ -163,21 +519,15 @@
  */
 static int sixaxis_set_operational_usb(struct hid_device *hdev)
 {
-	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-	struct usb_device *dev = interface_to_usbdev(intf);
-	__u16 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
 	int ret;
 	char *buf = kmalloc(18, GFP_KERNEL);
 
 	if (!buf)
 		return -ENOMEM;
 
-	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
-				 HID_REQ_GET_REPORT,
-				 USB_DIR_IN | USB_TYPE_CLASS |
-				 USB_RECIP_INTERFACE,
-				 (3 << 8) | 0xf2, ifnum, buf, 17,
-				 USB_CTRL_GET_TIMEOUT);
+	ret = hid_hw_raw_request(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT,
+				 HID_REQ_GET_REPORT);
+
 	if (ret < 0)
 		hid_err(hdev, "can't set operational mode\n");
 
@@ -188,8 +538,618 @@
 
 static int sixaxis_set_operational_bt(struct hid_device *hdev)
 {
-	unsigned char buf[] = { 0xf4,  0x42, 0x03, 0x00, 0x00 };
-	return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
+	static const __u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
+	__u8 *buf;
+	int ret;
+
+	buf = kmemdup(report, sizeof(report), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(report),
+				  HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static void sixaxis_set_leds_from_id(int id, __u8 values[MAX_LEDS])
+{
+	static const __u8 sixaxis_leds[10][4] = {
+				{ 0x01, 0x00, 0x00, 0x00 },
+				{ 0x00, 0x01, 0x00, 0x00 },
+				{ 0x00, 0x00, 0x01, 0x00 },
+				{ 0x00, 0x00, 0x00, 0x01 },
+				{ 0x01, 0x00, 0x00, 0x01 },
+				{ 0x00, 0x01, 0x00, 0x01 },
+				{ 0x00, 0x00, 0x01, 0x01 },
+				{ 0x01, 0x00, 0x01, 0x01 },
+				{ 0x00, 0x01, 0x01, 0x01 },
+				{ 0x01, 0x01, 0x01, 0x01 }
+	};
+
+	BUG_ON(MAX_LEDS < ARRAY_SIZE(sixaxis_leds[0]));
+
+	if (id < 0)
+		return;
+
+	id %= 10;
+	memcpy(values, sixaxis_leds[id], sizeof(sixaxis_leds[id]));
+}
+
+static void buzz_set_leds(struct hid_device *hdev, const __u8 *leds)
+{
+	struct list_head *report_list =
+		&hdev->report_enum[HID_OUTPUT_REPORT].report_list;
+	struct hid_report *report = list_entry(report_list->next,
+		struct hid_report, list);
+	__s32 *value = report->field[0]->value;
+
+	value[0] = 0x00;
+	value[1] = leds[0] ? 0xff : 0x00;
+	value[2] = leds[1] ? 0xff : 0x00;
+	value[3] = leds[2] ? 0xff : 0x00;
+	value[4] = leds[3] ? 0xff : 0x00;
+	value[5] = 0x00;
+	value[6] = 0x00;
+	hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+}
+
+static void sony_set_leds(struct sony_sc *sc, const __u8 *leds, int count)
+{
+	int n;
+
+	BUG_ON(count > MAX_LEDS);
+
+	if (sc->quirks & BUZZ_CONTROLLER && count == 4) {
+		buzz_set_leds(sc->hdev, leds);
+	} else {
+		for (n = 0; n < count; n++)
+			sc->led_state[n] = leds[n];
+		schedule_work(&sc->state_worker);
+	}
+}
+
+static void sony_led_set_brightness(struct led_classdev *led,
+				    enum led_brightness value)
+{
+	struct device *dev = led->dev->parent;
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct sony_sc *drv_data;
+
+	int n;
+	int force_update;
+
+	drv_data = hid_get_drvdata(hdev);
+	if (!drv_data) {
+		hid_err(hdev, "No device data\n");
+		return;
+	}
+
+	/*
+	 * The Sixaxis on USB will override any LED settings sent to it
+	 * and keep flashing all of the LEDs until the PS button is pressed.
+	 * Updates, even if redundant, must be always be sent to the
+	 * controller to avoid having to toggle the state of an LED just to
+	 * stop the flashing later on.
+	 */
+	force_update = !!(drv_data->quirks & SIXAXIS_CONTROLLER_USB);
+
+	for (n = 0; n < drv_data->led_count; n++) {
+		if (led == drv_data->leds[n] && (force_update ||
+			(value != drv_data->led_state[n] ||
+			drv_data->led_delay_on[n] ||
+			drv_data->led_delay_off[n]))) {
+
+			drv_data->led_state[n] = value;
+
+			/* Setting the brightness stops the blinking */
+			drv_data->led_delay_on[n] = 0;
+			drv_data->led_delay_off[n] = 0;
+
+			sony_set_leds(drv_data, drv_data->led_state,
+					drv_data->led_count);
+			break;
+		}
+	}
+}
+
+static enum led_brightness sony_led_get_brightness(struct led_classdev *led)
+{
+	struct device *dev = led->dev->parent;
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct sony_sc *drv_data;
+
+	int n;
+
+	drv_data = hid_get_drvdata(hdev);
+	if (!drv_data) {
+		hid_err(hdev, "No device data\n");
+		return LED_OFF;
+	}
+
+	for (n = 0; n < drv_data->led_count; n++) {
+		if (led == drv_data->leds[n])
+			return drv_data->led_state[n];
+	}
+
+	return LED_OFF;
+}
+
+static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on,
+				unsigned long *delay_off)
+{
+	struct device *dev = led->dev->parent;
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct sony_sc *drv_data = hid_get_drvdata(hdev);
+	int n;
+	__u8 new_on, new_off;
+
+	if (!drv_data) {
+		hid_err(hdev, "No device data\n");
+		return -EINVAL;
+	}
+
+	/* Max delay is 255 deciseconds or 2550 milliseconds */
+	if (*delay_on > 2550)
+		*delay_on = 2550;
+	if (*delay_off > 2550)
+		*delay_off = 2550;
+
+	/* Blink at 1 Hz if both values are zero */
+	if (!*delay_on && !*delay_off)
+		*delay_on = *delay_off = 500;
+
+	new_on = *delay_on / 10;
+	new_off = *delay_off / 10;
+
+	for (n = 0; n < drv_data->led_count; n++) {
+		if (led == drv_data->leds[n])
+			break;
+	}
+
+	/* This LED is not registered on this device */
+	if (n >= drv_data->led_count)
+		return -EINVAL;
+
+	/* Don't schedule work if the values didn't change */
+	if (new_on != drv_data->led_delay_on[n] ||
+		new_off != drv_data->led_delay_off[n]) {
+		drv_data->led_delay_on[n] = new_on;
+		drv_data->led_delay_off[n] = new_off;
+		schedule_work(&drv_data->state_worker);
+	}
+
+	return 0;
+}
+
+static void sony_leds_remove(struct sony_sc *sc)
+{
+	struct led_classdev *led;
+	int n;
+
+	BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
+
+	for (n = 0; n < sc->led_count; n++) {
+		led = sc->leds[n];
+		sc->leds[n] = NULL;
+		if (!led)
+			continue;
+		led_classdev_unregister(led);
+		kfree(led);
+	}
+
+	sc->led_count = 0;
+}
+
+static int sony_leds_init(struct sony_sc *sc)
+{
+	struct hid_device *hdev = sc->hdev;
+	int n, ret = 0;
+	int use_ds4_names;
+	struct led_classdev *led;
+	size_t name_sz;
+	char *name;
+	size_t name_len;
+	const char *name_fmt;
+	static const char * const ds4_name_str[] = { "red", "green", "blue",
+						  "global" };
+	__u8 initial_values[MAX_LEDS] = { 0 };
+	__u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
+	__u8 use_hw_blink[MAX_LEDS] = { 0 };
+
+	BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
+
+	if (sc->quirks & BUZZ_CONTROLLER) {
+		sc->led_count = 4;
+		use_ds4_names = 0;
+		name_len = strlen("::buzz#");
+		name_fmt = "%s::buzz%d";
+		/* Validate expected report characteristics. */
+		if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
+			return -ENODEV;
+	} else {
+		sixaxis_set_leds_from_id(sc->device_id, initial_values);
+		sc->led_count = 4;
+		memset(use_hw_blink, 1, 4);
+		use_ds4_names = 0;
+		name_len = strlen("::sony#");
+		name_fmt = "%s::sony%d";
+	}
+
+	/*
+	 * Clear LEDs as we have no way of reading their initial state. This is
+	 * only relevant if the driver is loaded after somebody actively set the
+	 * LEDs to on
+	 */
+	sony_set_leds(sc, initial_values, sc->led_count);
+
+	name_sz = strlen(dev_name(&hdev->dev)) + name_len + 1;
+
+	for (n = 0; n < sc->led_count; n++) {
+
+		if (use_ds4_names)
+			name_sz = strlen(dev_name(&hdev->dev)) + strlen(ds4_name_str[n]) + 2;
+
+		led = kzalloc(sizeof(struct led_classdev) + name_sz, GFP_KERNEL);
+		if (!led) {
+			hid_err(hdev, "Couldn't allocate memory for LED %d\n", n);
+			ret = -ENOMEM;
+			goto error_leds;
+		}
+
+		name = (void *)(&led[1]);
+		if (use_ds4_names)
+			snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev),
+			ds4_name_str[n]);
+		else
+			snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), n + 1);
+		led->name = name;
+		led->brightness = initial_values[n];
+		led->max_brightness = max_brightness[n];
+		led->brightness_get = sony_led_get_brightness;
+		led->brightness_set = sony_led_set_brightness;
+
+		if (use_hw_blink[n])
+			led->blink_set = sony_led_blink_set;
+
+		sc->leds[n] = led;
+
+		ret = led_classdev_register(&hdev->dev, led);
+		if (ret) {
+			hid_err(hdev, "Failed to register LED %d\n", n);
+			sc->leds[n] = NULL;
+			kfree(led);
+			goto error_leds;
+		}
+	}
+
+	return ret;
+
+error_leds:
+	sony_leds_remove(sc);
+
+	return ret;
+}
+
+static void sixaxis_state_worker(struct work_struct *work)
+{
+	static const union sixaxis_output_report_01 default_report = {
+		.buf = {
+			0x01,
+			0x00, 0xff, 0x00, 0xff, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00,
+			0xff, 0x27, 0x10, 0x00, 0x32,
+			0xff, 0x27, 0x10, 0x00, 0x32,
+			0xff, 0x27, 0x10, 0x00, 0x32,
+			0xff, 0x27, 0x10, 0x00, 0x32,
+			0x00, 0x00, 0x00, 0x00, 0x00
+		}
+	};
+	struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
+	struct sixaxis_output_report *report =
+		(struct sixaxis_output_report *)sc->output_report_dmabuf;
+	int n;
+
+	/* Initialize the report with default values */
+	memcpy(report, &default_report, sizeof(struct sixaxis_output_report));
+
+	report->leds_bitmap |= sc->led_state[0] << 1;
+	report->leds_bitmap |= sc->led_state[1] << 2;
+	report->leds_bitmap |= sc->led_state[2] << 3;
+	report->leds_bitmap |= sc->led_state[3] << 4;
+
+	/* Set flag for all leds off, required for 3rd party INTEC controller */
+	if ((report->leds_bitmap & 0x1E) == 0)
+		report->leds_bitmap |= 0x20;
+
+	/*
+	 * The LEDs in the report are indexed in reverse order to their
+	 * corresponding light on the controller.
+	 * Index 0 = LED 4, index 1 = LED 3, etc...
+	 *
+	 * In the case of both delay values being zero (blinking disabled) the
+	 * default report values should be used or the controller LED will be
+	 * always off.
+	 */
+	for (n = 0; n < 4; n++) {
+		if (sc->led_delay_on[n] || sc->led_delay_off[n]) {
+			report->led[3 - n].duty_off = sc->led_delay_off[n];
+			report->led[3 - n].duty_on = sc->led_delay_on[n];
+		}
+	}
+
+	hid_hw_raw_request(sc->hdev, report->report_id, (__u8 *)report,
+			sizeof(struct sixaxis_output_report),
+			HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+}
+
+static int sony_allocate_output_report(struct sony_sc *sc)
+{
+	if (sc->quirks & SIXAXIS_CONTROLLER)
+		sc->output_report_dmabuf =
+			kmalloc(sizeof(union sixaxis_output_report_01),
+				GFP_KERNEL);
+	else
+		return 0;
+
+	if (!sc->output_report_dmabuf)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int sony_battery_get_property(struct power_supply *psy,
+				     enum power_supply_property psp,
+				     union power_supply_propval *val)
+{
+	struct sony_sc *sc = container_of(psy, struct sony_sc, battery);
+	unsigned long flags;
+	int ret = 0;
+	u8 battery_charging, battery_capacity, cable_state;
+
+	spin_lock_irqsave(&sc->lock, flags);
+	battery_charging = sc->battery_charging;
+	battery_capacity = sc->battery_capacity;
+	cable_state = sc->cable_state;
+	spin_unlock_irqrestore(&sc->lock, flags);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = 1;
+		break;
+	case POWER_SUPPLY_PROP_SCOPE:
+		val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = battery_capacity;
+		break;
+	case POWER_SUPPLY_PROP_STATUS:
+		if (battery_charging)
+			val->intval = POWER_SUPPLY_STATUS_CHARGING;
+		else
+			if (battery_capacity == 100 && cable_state)
+				val->intval = POWER_SUPPLY_STATUS_FULL;
+			else
+				val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int sony_battery_probe(struct sony_sc *sc)
+{
+	struct hid_device *hdev = sc->hdev;
+	int ret;
+
+	/*
+	 * Set the default battery level to 100% to avoid low battery warnings
+	 * if the battery is polled before the first device report is received.
+	 */
+	sc->battery_capacity = 100;
+
+	sc->battery.properties = sony_battery_props;
+	sc->battery.num_properties = ARRAY_SIZE(sony_battery_props);
+	sc->battery.get_property = sony_battery_get_property;
+	sc->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+	sc->battery.use_for_apm = 0;
+	sc->battery.name = kasprintf(GFP_KERNEL, "sony_controller_battery_%pMR",
+				     sc->mac_address);
+	if (!sc->battery.name)
+		return -ENOMEM;
+
+	ret = power_supply_register(&hdev->dev, &sc->battery);
+	if (ret) {
+		hid_err(hdev, "Unable to register battery device\n");
+		goto err_free;
+	}
+
+	power_supply_powers(&sc->battery, &hdev->dev);
+	return 0;
+
+err_free:
+	kfree(sc->battery.name);
+	sc->battery.name = NULL;
+	return ret;
+}
+
+static void sony_battery_remove(struct sony_sc *sc)
+{
+	if (!sc->battery.name)
+		return;
+
+	power_supply_unregister(&sc->battery);
+	kfree(sc->battery.name);
+	sc->battery.name = NULL;
+}
+
+/*
+ * If a controller is plugged in via USB while already connected via Bluetooth
+ * it will show up as two devices. A global list of connected controllers and
+ * their MAC addresses is maintained to ensure that a device is only connected
+ * once.
+ */
+static int sony_check_add_dev_list(struct sony_sc *sc)
+{
+	struct sony_sc *entry;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&sony_dev_list_lock, flags);
+
+	list_for_each_entry(entry, &sony_device_list, list_node) {
+		ret = memcmp(sc->mac_address, entry->mac_address,
+				sizeof(sc->mac_address));
+		if (!ret) {
+			ret = -EEXIST;
+			hid_info(sc->hdev, "controller with MAC address %pMR already connected\n",
+				sc->mac_address);
+			goto unlock;
+		}
+	}
+
+	ret = 0;
+	list_add(&(sc->list_node), &sony_device_list);
+
+unlock:
+	spin_unlock_irqrestore(&sony_dev_list_lock, flags);
+	return ret;
+}
+
+static void sony_remove_dev_list(struct sony_sc *sc)
+{
+	unsigned long flags;
+
+	if (sc->list_node.next) {
+		spin_lock_irqsave(&sony_dev_list_lock, flags);
+		list_del(&(sc->list_node));
+		spin_unlock_irqrestore(&sony_dev_list_lock, flags);
+	}
+}
+
+static int sony_get_bt_devaddr(struct sony_sc *sc)
+{
+	int ret;
+
+	/* HIDP stores the device MAC address as a string in the uniq field. */
+	ret = strlen(sc->hdev->uniq);
+	if (ret != 17)
+		return -EINVAL;
+
+	ret = sscanf(sc->hdev->uniq,
+		"%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+		&sc->mac_address[5], &sc->mac_address[4], &sc->mac_address[3],
+		&sc->mac_address[2], &sc->mac_address[1], &sc->mac_address[0]);
+
+	if (ret != 6)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int sony_check_add(struct sony_sc *sc)
+{
+	__u8 *buf = NULL;
+	int n, ret;
+
+	if ((sc->quirks & SIXAXIS_CONTROLLER_BT)) {
+		/*
+		 * sony_get_bt_devaddr() attempts to parse the Bluetooth MAC
+		 * address from the uniq string where HIDP stores it.
+		 * As uniq cannot be guaranteed to be a MAC address in all cases
+		 * a failure of this function should not prevent the connection.
+		 */
+		if (sony_get_bt_devaddr(sc) < 0) {
+			hid_warn(sc->hdev, "UNIQ does not contain a MAC address; duplicate check skipped\n");
+			return 0;
+		}
+	} else if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
+		buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+
+		/*
+		 * The MAC address of a Sixaxis controller connected via USB can
+		 * be retrieved with feature report 0xf2. The address begins at
+		 * offset 4.
+		 */
+		ret = hid_hw_raw_request(sc->hdev, 0xf2, buf,
+				SIXAXIS_REPORT_0xF2_SIZE, HID_FEATURE_REPORT,
+				HID_REQ_GET_REPORT);
+
+		if (ret != SIXAXIS_REPORT_0xF2_SIZE) {
+			hid_err(sc->hdev, "failed to retrieve feature report 0xf2 with the Sixaxis MAC address\n");
+			ret = ret < 0 ? ret : -EINVAL;
+			goto out_free;
+		}
+
+		/*
+		 * The Sixaxis device MAC in the report is big-endian and must
+		 * be byte-swapped.
+		 */
+		for (n = 0; n < 6; n++)
+			sc->mac_address[5-n] = buf[4+n];
+	} else {
+		return 0;
+	}
+
+	ret = sony_check_add_dev_list(sc);
+
+out_free:
+
+	kfree(buf);
+
+	return ret;
+}
+
+static int sony_set_device_id(struct sony_sc *sc)
+{
+	int ret;
+
+	/*
+	 * Only DualShock 4 or Sixaxis controllers get an id.
+	 * All others are set to -1.
+	 */
+	if (sc->quirks & SIXAXIS_CONTROLLER) {
+		ret = ida_simple_get(&sony_device_id_allocator, 0, 0,
+					GFP_KERNEL);
+		if (ret < 0) {
+			sc->device_id = -1;
+			return ret;
+		}
+		sc->device_id = ret;
+	} else {
+		sc->device_id = -1;
+	}
+
+	return 0;
+}
+
+static void sony_release_device_id(struct sony_sc *sc)
+{
+	if (sc->device_id >= 0) {
+		ida_simple_remove(&sony_device_id_allocator, sc->device_id);
+		sc->device_id = -1;
+	}
+}
+
+static inline void sony_init_work(struct sony_sc *sc,
+					void (*worker)(struct work_struct *))
+{
+	if (!sc->worker_initialized)
+		INIT_WORK(&sc->state_worker, worker);
+
+	sc->worker_initialized = 1;
+}
+
+static inline void sony_cancel_work_sync(struct sony_sc *sc)
+{
+	if (sc->worker_initialized)
+		cancel_work_sync(&sc->state_worker);
 }
 
 static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -197,53 +1157,137 @@
 	int ret;
 	unsigned long quirks = id->driver_data;
 	struct sony_sc *sc;
+	unsigned int connect_mask = HID_CONNECT_DEFAULT;
 
-	sc = kzalloc(sizeof(*sc), GFP_KERNEL);
+	sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
 	if (sc == NULL) {
 		hid_err(hdev, "can't alloc sony descriptor\n");
 		return -ENOMEM;
 	}
 
+	spin_lock_init(&sc->lock);
+	spin_lock_init(&sony_dev_list_lock);
+
 	sc->quirks = quirks;
 	hid_set_drvdata(hdev, sc);
+	sc->hdev = hdev;
 
 	ret = hid_parse(hdev);
 	if (ret) {
 		hid_err(hdev, "parse failed\n");
-		goto err_free;
+		return ret;
 	}
 
-	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
-			HID_CONNECT_HIDDEV_FORCE);
+	if (sc->quirks & VAIO_RDESC_CONSTANT)
+		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+	else if (sc->quirks & SIXAXIS_CONTROLLER)
+		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+
+	ret = hid_hw_start(hdev, connect_mask);
 	if (ret) {
 		hid_err(hdev, "hw start failed\n");
-		goto err_free;
+		return ret;
+	}
+
+	ret = sony_allocate_output_report(sc);
+	if (ret < 0) {
+		hid_err(hdev, "failed to allocate the output report buffer\n");
+		goto err_stop;
+	}
+
+	ret = sony_set_device_id(sc);
+	if (ret < 0) {
+		hid_err(hdev, "failed to allocate the device id\n");
+		goto err_stop;
 	}
 
 	if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
-		hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
+		/*
+		 * The Sony Sixaxis does not handle HID Output Reports on the
+		 * Interrupt EP like it could, so we need to force HID Output
+		 * Reports to use HID_REQ_SET_REPORT on the Control EP.
+		 *
+		 * There is also another issue about HID Output Reports via USB,
+		 * the Sixaxis does not want the report_id as part of the data
+		 * packet, so we have to discard buf[0] when sending the actual
+		 * control message, even for numbered reports, humpf!
+		 */
+		hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
+		hdev->quirks |= HID_QUIRK_SKIP_OUTPUT_REPORT_ID;
 		ret = sixaxis_set_operational_usb(hdev);
-	}
-	else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
+		sony_init_work(sc, sixaxis_state_worker);
+	} else if (sc->quirks & SIXAXIS_CONTROLLER_BT) {
+		/*
+		 * The Sixaxis wants output reports sent on the ctrl endpoint
+		 * when connected via Bluetooth.
+		 */
+		hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
 		ret = sixaxis_set_operational_bt(hdev);
-	else
+		sony_init_work(sc, sixaxis_state_worker);
+	} else
 		ret = 0;
 
 	if (ret < 0)
 		goto err_stop;
 
+	ret = sony_check_add(sc);
+	if (ret < 0)
+		goto err_stop;
+
+	if (sc->quirks & SONY_LED_SUPPORT) {
+		ret = sony_leds_init(sc);
+		if (ret < 0)
+			goto err_stop;
+	}
+
+	if (sc->quirks & SONY_BATTERY_SUPPORT) {
+		ret = sony_battery_probe(sc);
+		if (ret < 0)
+			goto err_stop;
+
+		/* Open the device to receive reports with battery info */
+		ret = hid_hw_open(hdev);
+		if (ret < 0) {
+			hid_err(hdev, "hw open failed\n");
+			goto err_stop;
+		}
+	}
+
 	return 0;
 err_stop:
+	if (sc->quirks & SONY_LED_SUPPORT)
+		sony_leds_remove(sc);
+	if (sc->quirks & SONY_BATTERY_SUPPORT)
+		sony_battery_remove(sc);
+	sony_cancel_work_sync(sc);
+	kfree(sc->output_report_dmabuf);
+	sony_remove_dev_list(sc);
+	sony_release_device_id(sc);
 	hid_hw_stop(hdev);
-err_free:
-	kfree(sc);
 	return ret;
 }
 
 static void sony_remove(struct hid_device *hdev)
 {
+	struct sony_sc *sc = hid_get_drvdata(hdev);
+
+	if (sc->quirks & SONY_LED_SUPPORT)
+		sony_leds_remove(sc);
+
+	if (sc->quirks & SONY_BATTERY_SUPPORT) {
+		hid_hw_close(hdev);
+		sony_battery_remove(sc);
+	}
+
+	sony_cancel_work_sync(sc);
+
+	kfree(sc->output_report_dmabuf);
+
+	sony_remove_dev_list(sc);
+
+	sony_release_device_id(sc);
+
 	hid_hw_stop(hdev);
-	kfree(hid_get_drvdata(hdev));
 }
 
 static const struct hid_device_id sony_devices[] = {
@@ -257,18 +1301,47 @@
 		.driver_data = VAIO_RDESC_CONSTANT },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
 		.driver_data = VAIO_RDESC_CONSTANT },
+	/* Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
+	 * Logitech joystick from the device descriptor. */
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER),
+		.driver_data = BUZZ_CONTROLLER },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER),
+		.driver_data = BUZZ_CONTROLLER },
+	/* PS3 BD Remote Control */
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE),
+		.driver_data = PS3REMOTE },
+	/* Logitech Harmony Adapter for PS3 */
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3),
+		.driver_data = PS3REMOTE },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, sony_devices);
 
 static struct hid_driver sony_driver = {
-	.name = "sony",
-	.id_table = sony_devices,
-	.probe = sony_probe,
-	.remove = sony_remove,
-	.report_fixup = sony_report_fixup,
-	.raw_event = sony_raw_event
+	.name             = "sony",
+	.id_table         = sony_devices,
+	.input_mapping    = sony_mapping,
+	.probe            = sony_probe,
+	.remove           = sony_remove,
+	.report_fixup     = sony_report_fixup,
+	.raw_event        = sony_raw_event
 };
-module_hid_driver(sony_driver);
+
+static int __init sony_init(void)
+{
+	dbg_hid("Sony:%s\n", __func__);
+
+	return hid_register_driver(&sony_driver);
+}
+
+static void __exit sony_exit(void)
+{
+	dbg_hid("Sony:%s\n", __func__);
+
+	hid_unregister_driver(&sony_driver);
+	ida_destroy(&sony_device_id_allocator);
+}
+module_init(sony_init);
+module_exit(sony_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
index 99342cf..a97c788 100644
--- a/drivers/hid/hid-thingm.c
+++ b/drivers/hid/hid-thingm.c
@@ -48,8 +48,8 @@
 			buf[0], buf[1], buf[2], buf[3], buf[4],
 			buf[5], buf[6], buf[7], buf[8]);
 
-	ret = data->hdev->hid_output_raw_report(data->hdev, buf,
-			BLINK1_CMD_SIZE, HID_FEATURE_REPORT);
+	ret = hid_hw_raw_request(data->hdev, buf[0], buf, BLINK1_CMD_SIZE,
+				 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 
 	return ret < 0 ? ret : 0;
 }
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index a4a8bb0..4acd509 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -126,8 +126,8 @@
 
 	rep_data[0] = WAC_CMD_ICON_START_STOP;
 	rep_data[1] = 0;
-	ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
-				HID_FEATURE_REPORT);
+	ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
+				 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 	if (ret < 0)
 		goto err;
 
@@ -141,15 +141,15 @@
 			rep_data[j + 3] = p[(i << 6) + j];
 
 		rep_data[2] = i;
-		ret = hdev->hid_output_raw_report(hdev, rep_data, 67,
-					HID_FEATURE_REPORT);
+		ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 67,
+					HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 	}
 
 	rep_data[0] = WAC_CMD_ICON_START_STOP;
 	rep_data[1] = 0;
 
-	ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
-				HID_FEATURE_REPORT);
+	ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
+				 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 
 err:
 	return;
@@ -181,7 +181,8 @@
 		buf[3] = value;
 		/* use fixed brightness for OLEDs */
 		buf[4] = 0x08;
-		hdev->hid_output_raw_report(hdev, buf, 9, HID_FEATURE_REPORT);
+		hid_hw_raw_request(hdev, buf[0], buf, 9, HID_FEATURE_REPORT,
+				   HID_REQ_SET_REPORT);
 		kfree(buf);
 	}
 
@@ -328,8 +329,8 @@
 		rep_data[0] = 0x03 ; rep_data[1] = 0x00;
 		limit = 3;
 		do {
-			ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
-					HID_FEATURE_REPORT);
+			ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
+					HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 		} while (ret < 0 && limit-- > 0);
 
 		if (ret >= 0) {
@@ -341,8 +342,9 @@
 			rep_data[1] = 0x00;
 			limit = 3;
 			do {
-				ret = hdev->hid_output_raw_report(hdev,
-					rep_data, 2, HID_FEATURE_REPORT);
+				ret = hid_hw_raw_request(hdev, rep_data[0],
+					rep_data, 2, HID_FEATURE_REPORT,
+					HID_REQ_SET_REPORT);
 			} while (ret < 0 && limit-- > 0);
 
 			if (ret >= 0) {
@@ -367,8 +369,8 @@
 		rep_data[0] = 0x03;
 		rep_data[1] = wdata->features;
 
-		ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
-					HID_FEATURE_REPORT);
+		ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
+				HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 		if (ret >= 0)
 			wdata->high_speed = speed;
 		break;
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index e5ee1f2..0139e18 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -62,14 +62,14 @@
 	__u8 *buf;
 	ssize_t ret;
 
-	if (!hdev->hid_output_raw_report)
+	if (!hdev->ll_driver->output_report)
 		return -ENODEV;
 
 	buf = kmemdup(buffer, count, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 
-	ret = hdev->hid_output_raw_report(hdev, buf, count, HID_OUTPUT_REPORT);
+	ret = hid_hw_output_report(hdev, buf, count);
 
 	kfree(buf);
 	return ret;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 612a655..9c58826 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -120,11 +120,6 @@
 
 	dev = hidraw_table[minor]->hid;
 
-	if (!dev->hid_output_raw_report) {
-		ret = -ENODEV;
-		goto out;
-	}
-
 	if (count > HID_MAX_BUFFER_SIZE) {
 		hid_warn(dev, "pid %d passed too large report\n",
 			 task_pid_nr(current));
@@ -150,7 +145,21 @@
 		goto out_free;
 	}
 
-	ret = dev->hid_output_raw_report(dev, buf, count, report_type);
+	if ((report_type == HID_OUTPUT_REPORT) &&
+	    !(dev->quirks & HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP)) {
+		ret = hid_hw_output_report(dev, buf, count);
+		/*
+		 * compatibility with old implementation of USB-HID and I2C-HID:
+		 * if the device does not support receiving output reports,
+		 * on an interrupt endpoint, fallback to SET_REPORT HID command.
+		 */
+		if (ret != -ENOSYS)
+			goto out_free;
+	}
+
+	ret = hid_hw_raw_request(dev, buf[0], buf, count, report_type,
+				HID_REQ_SET_REPORT);
+
 out_free:
 	kfree(buf);
 out:
@@ -184,7 +193,7 @@
 
 	dev = hidraw_table[minor]->hid;
 
-	if (!dev->hid_get_raw_report) {
+	if (!dev->ll_driver->raw_request) {
 		ret = -ENODEV;
 		goto out;
 	}
@@ -209,14 +218,17 @@
 		goto out;
 	}
 
-	/* Read the first byte from the user. This is the report number,
-	 * which is passed to dev->hid_get_raw_report(). */
+	/*
+	 * Read the first byte from the user. This is the report number,
+	 * which is passed to hid_hw_raw_request().
+	 */
 	if (copy_from_user(&report_number, buffer, 1)) {
 		ret = -EFAULT;
 		goto out_free;
 	}
 
-	ret = dev->hid_get_raw_report(dev, report_number, buf, count, report_type);
+	ret = hid_hw_raw_request(dev, report_number, buf, count, report_type,
+				 HID_REQ_GET_REPORT);
 
 	if (ret < 0)
 		goto out_free;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index ccc2f36..6b1aa09 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -255,15 +255,27 @@
 	return 0;
 }
 
-static int i2c_hid_set_report(struct i2c_client *client, u8 reportType,
-		u8 reportID, unsigned char *buf, size_t data_len)
+/**
+ * i2c_hid_set_or_send_report: forward an incoming report to the device
+ * @client: the i2c_client of the device
+ * @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT
+ * @reportID: the report ID
+ * @buf: the actual data to transfer, without the report ID
+ * @len: size of buf
+ * @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report
+ */
+static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
+		u8 reportID, unsigned char *buf, size_t data_len, bool use_data)
 {
 	struct i2c_hid *ihid = i2c_get_clientdata(client);
 	u8 *args = ihid->argsbuf;
+	const struct i2c_hid_cmd *hidcmd;
 	int ret;
 	u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+	u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
+	u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
 
-	/* hidraw already checked that data_len < HID_MAX_BUFFER_SIZE */
+	/* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
 	u16 size =	2			/* size */ +
 			(reportID ? 1 : 0)	/* reportID */ +
 			data_len		/* buf */;
@@ -274,13 +286,27 @@
 
 	i2c_hid_dbg(ihid, "%s\n", __func__);
 
+	if (!use_data && maxOutputLength == 0)
+		return -ENOSYS;
+
 	if (reportID >= 0x0F) {
 		args[index++] = reportID;
 		reportID = 0x0F;
 	}
 
-	args[index++] = dataRegister & 0xFF;
-	args[index++] = dataRegister >> 8;
+	/*
+	 * use the data register for feature reports or if the device does not
+	 * support the output register
+	 */
+	if (use_data) {
+		args[index++] = dataRegister & 0xFF;
+		args[index++] = dataRegister >> 8;
+		hidcmd = &hid_set_report_cmd;
+	} else {
+		args[index++] = outputRegister & 0xFF;
+		args[index++] = outputRegister >> 8;
+		hidcmd = &hid_no_cmd;
+	}
 
 	args[index++] = size & 0xFF;
 	args[index++] = size >> 8;
@@ -290,7 +316,7 @@
 
 	memcpy(&args[index], buf, data_len);
 
-	ret = __i2c_hid_command(client, &hid_set_report_cmd, reportID,
+	ret = __i2c_hid_command(client, hidcmd, reportID,
 		reportType, args, args_len, NULL, 0);
 	if (ret) {
 		dev_err(&client->dev, "failed to set a report to device.\n");
@@ -546,7 +572,7 @@
 }
 
 static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
-		size_t count, unsigned char report_type)
+		size_t count, unsigned char report_type, bool use_data)
 {
 	struct i2c_client *client = hid->driver_data;
 	int report_id = buf[0];
@@ -560,9 +586,9 @@
 		count--;
 	}
 
-	ret = i2c_hid_set_report(client,
+	ret = i2c_hid_set_or_send_report(client,
 				report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
-				report_id, buf, count);
+				report_id, buf, count, use_data);
 
 	if (report_id && ret >= 0)
 		ret++; /* add report_id to the number of transfered bytes */
@@ -570,34 +596,27 @@
 	return ret;
 }
 
-static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep,
-		int reqtype)
+static int i2c_hid_output_report(struct hid_device *hid, __u8 *buf,
+		size_t count)
 {
-	struct i2c_client *client = hid->driver_data;
-	char *buf;
-	int ret;
-	int len = i2c_hid_get_report_length(rep) - 2;
+	return i2c_hid_output_raw_report(hid, buf, count, HID_OUTPUT_REPORT,
+			false);
+}
 
-	buf = kzalloc(len, GFP_KERNEL);
-	if (!buf)
-		return;
-
+static int i2c_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+			       __u8 *buf, size_t len, unsigned char rtype,
+			       int reqtype)
+{
 	switch (reqtype) {
 	case HID_REQ_GET_REPORT:
-		ret = i2c_hid_get_raw_report(hid, rep->id, buf, len, rep->type);
-		if (ret < 0)
-			dev_err(&client->dev, "%s: unable to get report: %d\n",
-				__func__, ret);
-		else
-			hid_input_report(hid, rep->type, buf, ret, 0);
-		break;
+		return i2c_hid_get_raw_report(hid, reportnum, buf, len, rtype);
 	case HID_REQ_SET_REPORT:
-		hid_output_report(rep, buf);
-		i2c_hid_output_raw_report(hid, buf, len, rep->type);
-		break;
+		if (buf[0] != reportnum)
+			return -EINVAL;
+		return i2c_hid_output_raw_report(hid, buf, len, rtype, true);
+	default:
+		return -EIO;
 	}
-
-	kfree(buf);
 }
 
 static int i2c_hid_parse(struct hid_device *hid)
@@ -749,29 +768,6 @@
 	return ret;
 }
 
-static int i2c_hid_hidinput_input_event(struct input_dev *dev,
-		unsigned int type, unsigned int code, int value)
-{
-	struct hid_device *hid = input_get_drvdata(dev);
-	struct hid_field *field;
-	int offset;
-
-	if (type == EV_FF)
-		return input_ff_event(dev, type, code, value);
-
-	if (type != EV_LED)
-		return -1;
-
-	offset = hidinput_find_field(hid, type, code, &field);
-
-	if (offset == -1) {
-		hid_warn(dev, "event field not found\n");
-		return -1;
-	}
-
-	return hid_set_field(field, offset, value);
-}
-
 static struct hid_ll_driver i2c_hid_ll_driver = {
 	.parse = i2c_hid_parse,
 	.start = i2c_hid_start,
@@ -779,8 +775,8 @@
 	.open = i2c_hid_open,
 	.close = i2c_hid_close,
 	.power = i2c_hid_power,
-	.request = i2c_hid_request,
-	.hidinput_input_event = i2c_hid_hidinput_input_event,
+	.output_report = i2c_hid_output_report,
+	.raw_request = i2c_hid_raw_request,
 };
 
 static int i2c_hid_init_irq(struct i2c_client *client)
@@ -992,8 +988,6 @@
 
 	hid->driver_data = client;
 	hid->ll_driver = &i2c_hid_ll_driver;
-	hid->hid_get_raw_report = i2c_hid_get_raw_report;
-	hid->hid_output_raw_report = i2c_hid_output_raw_report;
 	hid->dev.parent = &client->dev;
 	ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev));
 	hid->bus = BUS_I2C;
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 0bb3bb8..53b78f2 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -44,10 +44,13 @@
 	__u8 tail;
 	struct uhid_event *outq[UHID_BUFSIZE];
 
+	/* blocking GET_REPORT support; state changes protected by qlock */
 	struct mutex report_lock;
 	wait_queue_head_t report_wait;
+	bool report_running;
 	atomic_t report_done;
-	atomic_t report_id;
+	u32 report_id;
+	u32 report_type;
 	struct uhid_event report_buf;
 };
 
@@ -116,30 +119,6 @@
 	uhid_queue_event(uhid, UHID_CLOSE);
 }
 
-static int uhid_hid_input(struct input_dev *input, unsigned int type,
-			  unsigned int code, int value)
-{
-	struct hid_device *hid = input_get_drvdata(input);
-	struct uhid_device *uhid = hid->driver_data;
-	unsigned long flags;
-	struct uhid_event *ev;
-
-	ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-	if (!ev)
-		return -ENOMEM;
-
-	ev->type = UHID_OUTPUT_EV;
-	ev->u.output_ev.type = type;
-	ev->u.output_ev.code = code;
-	ev->u.output_ev.value = value;
-
-	spin_lock_irqsave(&uhid->qlock, flags);
-	uhid_queue(uhid, ev);
-	spin_unlock_irqrestore(&uhid->qlock, flags);
-
-	return 0;
-}
-
 static int uhid_hid_parse(struct hid_device *hid)
 {
 	struct uhid_device *uhid = hid->driver_data;
@@ -147,87 +126,148 @@
 	return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
 }
 
-static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
-			    __u8 *buf, size_t count, unsigned char rtype)
+/* must be called with report_lock held */
+static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
+					struct uhid_event *ev,
+					__u32 *report_id)
 {
-	struct uhid_device *uhid = hid->driver_data;
-	__u8 report_type;
-	struct uhid_event *ev;
 	unsigned long flags;
 	int ret;
-	size_t uninitialized_var(len);
-	struct uhid_feature_answer_req *req;
+
+	spin_lock_irqsave(&uhid->qlock, flags);
+	*report_id = ++uhid->report_id;
+	uhid->report_type = ev->type + 1;
+	uhid->report_running = true;
+	uhid_queue(uhid, ev);
+	spin_unlock_irqrestore(&uhid->qlock, flags);
+
+	ret = wait_event_interruptible_timeout(uhid->report_wait,
+				!uhid->report_running || !uhid->running,
+				5 * HZ);
+	if (!ret || !uhid->running || uhid->report_running)
+		ret = -EIO;
+	else if (ret < 0)
+		ret = -ERESTARTSYS;
+	else
+		ret = 0;
+
+	uhid->report_running = false;
+
+	return ret;
+}
+
+static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
+			       u8 *buf, size_t count, u8 rtype)
+{
+	struct uhid_device *uhid = hid->driver_data;
+	struct uhid_get_report_reply_req *req;
+	struct uhid_event *ev;
+	int ret;
 
 	if (!uhid->running)
 		return -EIO;
 
+	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+	if (!ev)
+		return -ENOMEM;
+
+	ev->type = UHID_GET_REPORT;
+	ev->u.get_report.rnum = rnum;
+	ev->u.get_report.rtype = rtype;
+
+	ret = mutex_lock_interruptible(&uhid->report_lock);
+	if (ret) {
+		kfree(ev);
+		return ret;
+	}
+
+	/* this _always_ takes ownership of @ev */
+	ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id);
+	if (ret)
+		goto unlock;
+
+	req = &uhid->report_buf.u.get_report_reply;
+	if (req->err) {
+		ret = -EIO;
+	} else {
+		ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX);
+		memcpy(buf, req->data, ret);
+	}
+
+unlock:
+	mutex_unlock(&uhid->report_lock);
+	return ret;
+}
+
+static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
+			       const u8 *buf, size_t count, u8 rtype)
+{
+	struct uhid_device *uhid = hid->driver_data;
+	struct uhid_event *ev;
+	int ret;
+
+	if (!uhid->running || count > UHID_DATA_MAX)
+		return -EIO;
+
+	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+	if (!ev)
+		return -ENOMEM;
+
+	ev->type = UHID_SET_REPORT;
+	ev->u.set_report.rnum = rnum;
+	ev->u.set_report.rtype = rtype;
+	ev->u.set_report.size = count;
+	memcpy(ev->u.set_report.data, buf, count);
+
+	ret = mutex_lock_interruptible(&uhid->report_lock);
+	if (ret) {
+		kfree(ev);
+		return ret;
+	}
+
+	/* this _always_ takes ownership of @ev */
+	ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id);
+	if (ret)
+		goto unlock;
+
+	if (uhid->report_buf.u.set_report_reply.err)
+		ret = -EIO;
+	else
+		ret = count;
+
+unlock:
+	mutex_unlock(&uhid->report_lock);
+	return ret;
+}
+
+static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+				__u8 *buf, size_t len, unsigned char rtype,
+				int reqtype)
+{
+	u8 u_rtype;
+
 	switch (rtype) {
 	case HID_FEATURE_REPORT:
-		report_type = UHID_FEATURE_REPORT;
+		u_rtype = UHID_FEATURE_REPORT;
 		break;
 	case HID_OUTPUT_REPORT:
-		report_type = UHID_OUTPUT_REPORT;
+		u_rtype = UHID_OUTPUT_REPORT;
 		break;
 	case HID_INPUT_REPORT:
-		report_type = UHID_INPUT_REPORT;
+		u_rtype = UHID_INPUT_REPORT;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	ret = mutex_lock_interruptible(&uhid->report_lock);
-	if (ret)
-		return ret;
-
-	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
-	if (!ev) {
-		ret = -ENOMEM;
-		goto unlock;
+	switch (reqtype) {
+	case HID_REQ_GET_REPORT:
+		return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype);
+	case HID_REQ_SET_REPORT:
+		return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype);
+	default:
+		return -EIO;
 	}
-
-	spin_lock_irqsave(&uhid->qlock, flags);
-	ev->type = UHID_FEATURE;
-	ev->u.feature.id = atomic_inc_return(&uhid->report_id);
-	ev->u.feature.rnum = rnum;
-	ev->u.feature.rtype = report_type;
-
-	atomic_set(&uhid->report_done, 0);
-	uhid_queue(uhid, ev);
-	spin_unlock_irqrestore(&uhid->qlock, flags);
-
-	ret = wait_event_interruptible_timeout(uhid->report_wait,
-				atomic_read(&uhid->report_done), 5 * HZ);
-
-	/*
-	 * Make sure "uhid->running" is cleared on shutdown before
-	 * "uhid->report_done" is set.
-	 */
-	smp_rmb();
-	if (!ret || !uhid->running) {
-		ret = -EIO;
-	} else if (ret < 0) {
-		ret = -ERESTARTSYS;
-	} else {
-		spin_lock_irqsave(&uhid->qlock, flags);
-		req = &uhid->report_buf.u.feature_answer;
-
-		if (req->err) {
-			ret = -EIO;
-		} else {
-			ret = 0;
-			len = min(count,
-				min_t(size_t, req->size, UHID_DATA_MAX));
-			memcpy(buf, req->data, len);
-		}
-
-		spin_unlock_irqrestore(&uhid->qlock, flags);
-	}
-
-	atomic_set(&uhid->report_done, 1);
-
-unlock:
-	mutex_unlock(&uhid->report_lock);
-	return ret ? ret : len;
 }
 
 static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
@@ -268,13 +308,20 @@
 	return count;
 }
 
+static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
+				  size_t count)
+{
+	return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
+}
+
 static struct hid_ll_driver uhid_hid_driver = {
 	.start = uhid_hid_start,
 	.stop = uhid_hid_stop,
 	.open = uhid_hid_open,
 	.close = uhid_hid_close,
-	.hidinput_input_event = uhid_hid_input,
 	.parse = uhid_hid_parse,
+	.raw_request = uhid_hid_raw_request,
+	.output_report = uhid_hid_output_report,
 };
 
 #ifdef CONFIG_COMPAT
@@ -402,8 +449,6 @@
 	hid->uniq[63] = 0;
 
 	hid->ll_driver = &uhid_hid_driver;
-	hid->hid_get_raw_report = uhid_hid_get_raw;
-	hid->hid_output_raw_report = uhid_hid_output_raw;
 	hid->bus = ev->u.create.bus;
 	hid->vendor = ev->u.create.vendor;
 	hid->product = ev->u.create.product;
@@ -460,31 +505,6 @@
 	return 0;
 }
 
-static int uhid_dev_feature_answer(struct uhid_device *uhid,
-				   struct uhid_event *ev)
-{
-	unsigned long flags;
-
-	if (!uhid->running)
-		return -EINVAL;
-
-	spin_lock_irqsave(&uhid->qlock, flags);
-
-	/* id for old report; drop it silently */
-	if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
-		goto unlock;
-	if (atomic_read(&uhid->report_done))
-		goto unlock;
-
-	memcpy(&uhid->report_buf, ev, sizeof(*ev));
-	atomic_set(&uhid->report_done, 1);
-	wake_up_interruptible(&uhid->report_wait);
-
-unlock:
-	spin_unlock_irqrestore(&uhid->qlock, flags);
-	return 0;
-}
-
 static int uhid_char_open(struct inode *inode, struct file *file)
 {
 	struct uhid_device *uhid;
@@ -602,9 +622,6 @@
 	case UHID_INPUT:
 		ret = uhid_dev_input(uhid, &uhid->input_buf);
 		break;
-	case UHID_FEATURE_ANSWER:
-		ret = uhid_dev_feature_answer(uhid, &uhid->input_buf);
-		break;
 	default:
 		ret = -EOPNOTSUPP;
 	}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 92aef982..56dbc99 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -672,38 +672,6 @@
 	spin_unlock_irqrestore(&usbhid->lock, flags);
 }
 
-static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
-{
-	struct hid_device *hid = input_get_drvdata(dev);
-	struct usbhid_device *usbhid = hid->driver_data;
-	struct hid_field *field;
-	unsigned long flags;
-	int offset;
-
-	if (type == EV_FF)
-		return input_ff_event(dev, type, code, value);
-
-	if (type != EV_LED)
-		return -1;
-
-	if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) {
-		hid_warn(dev, "event field not found\n");
-		return -1;
-	}
-
-	spin_lock_irqsave(&usbhid->lock, flags);
-	hid_set_field(field, offset, value);
-	spin_unlock_irqrestore(&usbhid->lock, flags);
-
-	/*
-	 * Defer performing requested LED action.
-	 * This is more likely gather all LED changes into a single URB.
-	 */
-	schedule_work(&usbhid->led_work);
-
-	return 0;
-}
-
 static int usbhid_wait_io(struct hid_device *hid)
 {
 	struct usbhid_device *usbhid = hid->driver_data;
@@ -936,52 +904,66 @@
 	return ret;
 }
 
-static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count,
-		unsigned char report_type)
+static int usbhid_set_raw_report(struct hid_device *hid, unsigned int reportnum,
+				 __u8 *buf, size_t count, unsigned char rtype)
 {
 	struct usbhid_device *usbhid = hid->driver_data;
 	struct usb_device *dev = hid_to_usb_dev(hid);
 	struct usb_interface *intf = usbhid->intf;
 	struct usb_host_interface *interface = intf->cur_altsetting;
-	int ret;
+	int ret, skipped_report_id = 0;
 
-	if (usbhid->urbout && report_type != HID_FEATURE_REPORT) {
-		int actual_length;
-		int skipped_report_id = 0;
+	/* Byte 0 is the report number. Report data starts at byte 1.*/
+	if ((rtype == HID_OUTPUT_REPORT) &&
+	    (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORT_ID))
+		buf[0] = 0;
+	else
+		buf[0] = reportnum;
 
-		if (buf[0] == 0x0) {
-			/* Don't send the Report ID */
-			buf++;
-			count--;
-			skipped_report_id = 1;
-		}
-		ret = usb_interrupt_msg(dev, usbhid->urbout->pipe,
-			buf, count, &actual_length,
-			USB_CTRL_SET_TIMEOUT);
-		/* return the number of bytes transferred */
-		if (ret == 0) {
-			ret = actual_length;
-			/* count also the report id */
-			if (skipped_report_id)
-				ret++;
-		}
-	} else {
-		int skipped_report_id = 0;
-		int report_id = buf[0];
-		if (buf[0] == 0x0) {
-			/* Don't send the Report ID */
-			buf++;
-			count--;
-			skipped_report_id = 1;
-		}
-		ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+	if (buf[0] == 0x0) {
+		/* Don't send the Report ID */
+		buf++;
+		count--;
+		skipped_report_id = 1;
+	}
+
+	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 			HID_REQ_SET_REPORT,
 			USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-			((report_type + 1) << 8) | report_id,
+			((rtype + 1) << 8) | reportnum,
 			interface->desc.bInterfaceNumber, buf, count,
 			USB_CTRL_SET_TIMEOUT);
-		/* count also the report id, if this was a numbered report. */
-		if (ret > 0 && skipped_report_id)
+	/* count also the report id, if this was a numbered report. */
+	if (ret > 0 && skipped_report_id)
+		ret++;
+
+	return ret;
+}
+
+static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
+{
+	struct usbhid_device *usbhid = hid->driver_data;
+	struct usb_device *dev = hid_to_usb_dev(hid);
+	int actual_length, skipped_report_id = 0, ret;
+
+	if (!usbhid->urbout)
+		return -ENOSYS;
+
+	if (buf[0] == 0x0) {
+		/* Don't send the Report ID */
+		buf++;
+		count--;
+		skipped_report_id = 1;
+	}
+
+	ret = usb_interrupt_msg(dev, usbhid->urbout->pipe,
+				buf, count, &actual_length,
+				USB_CTRL_SET_TIMEOUT);
+	/* return the number of bytes transferred */
+	if (ret == 0) {
+		ret = actual_length;
+		/* count also the report id */
+		if (skipped_report_id)
 			ret++;
 	}
 
@@ -1244,6 +1226,20 @@
 	}
 }
 
+static int usbhid_raw_request(struct hid_device *hid, unsigned char reportnum,
+			      __u8 *buf, size_t len, unsigned char rtype,
+			      int reqtype)
+{
+	switch (reqtype) {
+	case HID_REQ_GET_REPORT:
+		return usbhid_get_raw_report(hid, reportnum, buf, len, rtype);
+	case HID_REQ_SET_REPORT:
+		return usbhid_set_raw_report(hid, reportnum, buf, len, rtype);
+	default:
+		return -EIO;
+	}
+}
+
 static int usbhid_idle(struct hid_device *hid, int report, int idle,
 		int reqtype)
 {
@@ -1265,8 +1261,9 @@
 	.open = usbhid_open,
 	.close = usbhid_close,
 	.power = usbhid_power,
-	.hidinput_input_event = usb_hidinput_input_event,
 	.request = usbhid_request,
+	.raw_request = usbhid_raw_request,
+	.output_report = usbhid_output_report,
 	.wait = usbhid_wait_io,
 	.idle = usbhid_idle,
 };
@@ -1298,8 +1295,6 @@
 
 	usb_set_intfdata(intf, hid);
 	hid->ll_driver = &usb_hid_driver;
-	hid->hid_get_raw_report = usbhid_get_raw_report;
-	hid->hid_output_raw_report = usbhid_output_raw_report;
 	hid->ff_init = hid_pidff_init;
 #ifdef CONFIG_USB_HIDDEV
 	hid->hiddev_connect = hiddev_connect;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f25f298..3d5f272 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -504,6 +504,16 @@
 	  sensor inside your CPU. Most of the family 6 CPUs
 	  are supported. Check Documentation/hwmon/coretemp for details.
 
+config SENSORS_CORETEMP_INTERRUPT
+	tristate "Intel Core/Core2/Atom temperature sensor Interrupts"
+	depends on SENSORS_CORETEMP
+	help
+	  If you say yes here you get support for interrupts when the
+	  CPU temperature crosses the programmed threshold.
+
+	  This is tested only for specific platforms(e.g Atom). If you
+	  are not sure, say N here.
+
 config SENSORS_IBMAEM
 	tristate "IBM Active Energy Manager temperature/power sensors and control"
 	select IPMI_SI
@@ -822,6 +832,21 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called lm95245.
 
+config MSIC_GPADC
+	tristate "MSIC GPADC driver for Intel Medfield platform"
+	depends on INTEL_SCU_IPC
+	help
+	  Say Y here to enable MSIC GPADC driver on Intel Medfield Platform
+
+config INTEL_MCU
+	tristate "Intel generic MCU control interface"
+	help
+	  Say Y here to enable control interface for intel mcu
+
+	  This driver provide userspace tty interface for the control and
+	  message output.
+	  You could use normal read/write to complete those operation.
+
 config SENSORS_MAX1111
 	tristate "Maxim MAX1111 Serial 8-bit ADC chip and compatibles"
 	depends on SPI_MASTER
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index d17d3e6..11e757a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -140,6 +140,8 @@
 obj-$(CONFIG_SENSORS_W83L786NG)	+= w83l786ng.o
 obj-$(CONFIG_SENSORS_WM831X)	+= wm831x-hwmon.o
 obj-$(CONFIG_SENSORS_WM8350)	+= wm8350-hwmon.o
+obj-$(CONFIG_MSIC_GPADC)        += intel_mid_gpadc.o
+obj-$(CONFIG_INTEL_MCU)        += intel_mcu_common.o
 
 obj-$(CONFIG_PMBUS)		+= pmbus/
 
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0cf2510..7d091ab 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -37,6 +37,7 @@
 #include <linux/smp.h>
 #include <linux/moduleparam.h>
 #include <asm/msr.h>
+#include <asm/mce.h>
 #include <asm/processor.h>
 #include <asm/cpu_device_id.h>
 
@@ -52,9 +53,10 @@
 
 #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
 #define NUM_REAL_CORES		32	/* Number of Real cores per cpu */
-#define CORETEMP_NAME_LENGTH	19	/* String Length of attrs */
-#define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
-#define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
+#define CORETEMP_NAME_LENGTH	33	/* String Length of attrs */
+#define MAX_CORE_ATTRS		5	/* Maximum no of basic attrs */
+#define MAX_THRESH_ATTRS	4	/* Maximum no of threshold attrs */
+#define TOTAL_ATTRS		(MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
 #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
 
 #define TO_PHYS_ID(cpu)		(cpu_data(cpu).phys_proc_id)
@@ -75,6 +77,8 @@
  *		This value is passed as "id" field to rdmsr/wrmsr functions.
  * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
  *		from where the temperature values should be read.
+ * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
+ *		from where the thresholds are read.
  * @attr_size:  Total number of pre-core attrs displayed in the sysfs.
  * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
  *		Otherwise, temp_data holds coretemp data.
@@ -88,6 +92,7 @@
 	unsigned int cpu;
 	u32 cpu_core_id;
 	u32 status_reg;
+	u32 intrpt_reg;
 	int attr_size;
 	bool is_pkg_data;
 	bool valid;
@@ -102,6 +107,7 @@
 	u16 phys_proc_id;
 	struct temp_data *core_data[MAX_CORE_DATA];
 	struct device_attribute name_attr;
+
 };
 
 struct pdev_entry {
@@ -113,12 +119,119 @@
 static LIST_HEAD(pdev_list);
 static DEFINE_MUTEX(pdev_list_mutex);
 
+#ifdef CONFIG_SENSORS_CORETEMP_INTERRUPT
+static DEFINE_PER_CPU(struct delayed_work, core_threshold_work);
+#endif
 static ssize_t show_name(struct device *dev,
 			struct device_attribute *devattr, char *buf)
 {
 	return sprintf(buf, "%s\n", DRVNAME);
 }
 
+static ssize_t show_tx_triggered(struct device *dev,
+				 struct device_attribute *devattr, char *buf,
+				 u32 mask)
+{
+	u32 eax, edx;
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct platform_data *pdata = dev_get_drvdata(dev);
+	struct temp_data *tdata = pdata->core_data[attr->index];
+
+	rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+
+	return sprintf(buf, "%d\n", !!(eax & mask));
+}
+
+static ssize_t show_t0_triggered(struct device *dev,
+				 struct device_attribute *devattr, char *buf)
+{
+	return show_tx_triggered(dev, devattr, buf, THERM_STATUS_THRESHOLD0);
+}
+
+static ssize_t show_t1_triggered(struct device *dev,
+				 struct device_attribute *devattr, char *buf)
+{
+	return show_tx_triggered(dev, devattr, buf, THERM_STATUS_THRESHOLD1);
+}
+
+static ssize_t show_tx(struct device *dev,
+		       struct device_attribute *devattr, char *buf,
+		       u32 mask, int shift)
+{
+	struct platform_data *pdata = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct temp_data *tdata = pdata->core_data[attr->index];
+	u32 eax, edx;
+	int t;
+
+	rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+	t = tdata->tjmax - ((eax & mask) >> shift) * 1000;
+	return sprintf(buf, "%d\n", t);
+}
+
+static ssize_t store_tx(struct device *dev,
+			struct device_attribute *devattr,
+			const char *buf, size_t count,
+			u32 mask, int shift)
+{
+	struct platform_data *pdata = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct temp_data *tdata = pdata->core_data[attr->index];
+	u32 eax, edx;
+	unsigned long val;
+	int diff;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	/*
+	 * Thermal threshold mask is 7 bits wide. Values are entered in terms
+	 * of milli degree celsius. Hence don't accept val > (127 * 1000)
+	 */
+	if (val > tdata->tjmax || val > 127000)
+		return -EINVAL;
+
+	diff = (tdata->tjmax - val) / 1000;
+
+	mutex_lock(&tdata->update_lock);
+	rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+	eax = (eax & ~mask) | (diff << shift);
+	wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
+	mutex_unlock(&tdata->update_lock);
+
+	return count;
+}
+
+static ssize_t show_t0(struct device *dev,
+		       struct device_attribute *devattr, char *buf)
+{
+	return show_tx(dev, devattr, buf, THERM_MASK_THRESHOLD0,
+		       THERM_SHIFT_THRESHOLD0);
+}
+
+static ssize_t store_t0(struct device *dev,
+			struct device_attribute *devattr,
+			const char *buf, size_t count)
+{
+	return store_tx(dev, devattr, buf, count, THERM_MASK_THRESHOLD0,
+			THERM_SHIFT_THRESHOLD0);
+}
+
+static ssize_t show_t1(struct device *dev,
+		       struct device_attribute *devattr, char *buf)
+{
+	return show_tx(dev, devattr, buf, THERM_MASK_THRESHOLD1,
+		       THERM_SHIFT_THRESHOLD1);
+}
+
+static ssize_t store_t1(struct device *dev,
+			struct device_attribute *devattr,
+			const char *buf, size_t count)
+{
+	return store_tx(dev, devattr, buf, count, THERM_MASK_THRESHOLD1,
+			THERM_SHIFT_THRESHOLD1);
+}
+
 static ssize_t show_label(struct device *dev,
 				struct device_attribute *devattr, char *buf)
 {
@@ -187,6 +300,7 @@
 	}
 
 	mutex_unlock(&tdata->update_lock);
+
 	return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
 }
 
@@ -198,7 +312,7 @@
 static const struct tjmax __cpuinitconst tjmax_table[] = {
 	{ "CPU  230", 100000 },		/* Model 0x1c, stepping 2	*/
 	{ "CPU  330", 125000 },		/* Model 0x1c, stepping 2	*/
-	{ "CPU CE4110", 110000 },	/* Model 0x1c, stepping 10 Sodaville */
+	{ "CPU CE4110", 110000 },	/* Model 0x1c, stepping 10	*/
 	{ "CPU CE4150", 110000 },	/* Model 0x1c, stepping 10	*/
 	{ "CPU CE4170", 110000 },	/* Model 0x1c, stepping 10	*/
 };
@@ -212,7 +326,7 @@
 #define ANY 0xff
 
 static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
-	{ 0x1c, 10, 100000 },	/* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
+	{ 0x1c, 10, 100000 },	/* D4xx, N4xx, D5xx, N5xx */
 	{ 0x1c, ANY, 90000 },	/* Z5xx, N2xx, possibly others
 				 * Note: Also matches 230 and 330,
 				 * which are covered by tjmax_table
@@ -222,7 +336,7 @@
 				 * is undetectable by software
 				 */
 	{ 0x27, ANY, 90000 },	/* Atom Medfield (Z2460) */
-	{ 0x35, ANY, 90000 },	/* Atom Clover Trail/Cloverview (Z2760) */
+	{ 0x35, ANY, 90000 },	/* Atom Clovertrail */
 	{ 0x36, ANY, 100000 },	/* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
 };
 
@@ -357,67 +471,7 @@
 	return adjust_tjmax(c, id, dev);
 }
 
-static int create_name_attr(struct platform_data *pdata,
-				      struct device *dev)
-{
-	sysfs_attr_init(&pdata->name_attr.attr);
-	pdata->name_attr.attr.name = "name";
-	pdata->name_attr.attr.mode = S_IRUGO;
-	pdata->name_attr.show = show_name;
-	return device_create_file(dev, &pdata->name_attr);
-}
-
-static int __cpuinit create_core_attrs(struct temp_data *tdata,
-				       struct device *dev, int attr_no)
-{
-	int err, i;
-	static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
-			struct device_attribute *devattr, char *buf) = {
-			show_label, show_crit_alarm, show_temp, show_tjmax,
-			show_ttarget };
-	static const char *const names[TOTAL_ATTRS] = {
-					"temp%d_label", "temp%d_crit_alarm",
-					"temp%d_input", "temp%d_crit",
-					"temp%d_max" };
-
-	for (i = 0; i < tdata->attr_size; i++) {
-		snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
-			attr_no);
-		sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
-		tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
-		tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
-		tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
-		tdata->sd_attrs[i].index = attr_no;
-		err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
-		if (err)
-			goto exit_free;
-	}
-	return 0;
-
-exit_free:
-	while (--i >= 0)
-		device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
-	return err;
-}
-
-
-static int __cpuinit chk_ucode_version(unsigned int cpu)
-{
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-	/*
-	 * Check if we have problem with errata AE18 of Core processors:
-	 * Readings might stop update when processor visited too deep sleep,
-	 * fixed for stepping D0 (6EC).
-	 */
-	if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
-		pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
-		return -ENODEV;
-	}
-	return 0;
-}
-
-static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu)
+static struct platform_device *coretemp_get_pdev(unsigned int cpu)
 {
 	u16 phys_proc_id = TO_PHYS_ID(cpu);
 	struct pdev_entry *p;
@@ -434,6 +488,228 @@
 	return NULL;
 }
 
+#ifdef CONFIG_SENSORS_CORETEMP_INTERRUPT
+/* Interrupt Handler for Core Threshold Events */
+static int coretemp_interrupt(__u64 msr_val)
+{
+	unsigned int cpu = smp_processor_id();
+
+	schedule_delayed_work_on(cpu, &per_cpu(core_threshold_work, cpu), 0);
+	return 0;
+}
+
+static void core_threshold_work_fn(struct work_struct *work)
+{
+	u32 eax, edx;
+	int thresh, event, t0, t1, temp;
+	char *thermal_event[5];
+	bool notify = false;
+	unsigned int cpu = smp_processor_id();
+	int indx = TO_ATTR_NO(cpu);
+	struct platform_device *pdev = coretemp_get_pdev(cpu);
+	struct platform_data *pdata = platform_get_drvdata(pdev);
+	struct temp_data *tdata = pdata->core_data[indx];
+
+	if (!tdata) {
+		pr_err("Could not retrieve temp_data\n");
+		return;
+	}
+
+	rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax, &edx);
+	if (eax & THERM_LOG_THRESHOLD0) {
+		thresh = 0;
+		event = !!(eax & THERM_STATUS_THRESHOLD0);
+
+		/* Reset the Threshold0 interrupt */
+		eax = eax & ~THERM_LOG_THRESHOLD0;
+		wrmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, eax, edx);
+
+		/* Notify only when we go below the lower threshold */
+		if (event != 1)
+			notify = true;
+
+	} else if (eax & THERM_LOG_THRESHOLD1) {
+		thresh = 1;
+		event = !!(eax & THERM_STATUS_THRESHOLD1);
+
+		/* Reset the Threshold1 interrupt */
+		eax = eax & ~THERM_LOG_THRESHOLD1;
+		wrmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, eax, edx);
+
+		/* Notify only when we go above the upper threshold */
+		if (event != 0)
+			notify = true;
+	}
+
+	/*
+	 * Read the current Temperature and send it to user land;
+	 * so that the user space can avoid a sysfs read.
+	 */
+	temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+
+	/* Read the threshold registers (only) to print threshold values. */
+	rdmsr_on_cpu(cpu, MSR_IA32_THERM_INTERRUPT, &eax, &edx);
+	t0 = tdata->tjmax - ((eax & THERM_MASK_THRESHOLD0) >> THERM_SHIFT_THRESHOLD0) * 1000;
+	t1 = tdata->tjmax - ((eax & THERM_MASK_THRESHOLD1) >> THERM_SHIFT_THRESHOLD1) * 1000;
+
+
+	if (!notify) {
+		pr_debug("Thermal Event: Sensor: Core %u, cur_temp: %d,\
+			event: %d, level: %d, t0: %d, t1: %d\n",
+			tdata->cpu_core_id, temp, event, thresh, t0, t1);
+		return;
+	} else {
+		pr_info("Thermal Event: Sensor: Core %u, cur_temp: %d,\
+			event: %d, level: %d, t0: %d, t1: %d\n",
+			tdata->cpu_core_id, temp, event, thresh, t0, t1);
+	}
+
+	thermal_event[0] = kasprintf(GFP_KERNEL, "NAME=Core %u",
+						tdata->cpu_core_id);
+	thermal_event[1] = kasprintf(GFP_KERNEL, "TEMP=%d", temp);
+	thermal_event[2] = kasprintf(GFP_KERNEL, "EVENT=%d", event);
+	thermal_event[3] = kasprintf(GFP_KERNEL, "LEVEL=%d", thresh);
+	thermal_event[4] = NULL;
+
+	kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, thermal_event);
+
+	kfree(thermal_event[3]);
+	kfree(thermal_event[2]);
+	kfree(thermal_event[1]);
+	kfree(thermal_event[0]);
+}
+
+static void configure_apic(void *info)
+{
+	u32 l;
+	int *flag = (int *)info;
+
+	l = apic_read(APIC_LVTTHMR);
+
+	if (*flag)	/* Non-Zero flag Masks the APIC */
+		apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
+	else		/* Zero flag UnMasks the APIC */
+		apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+}
+
+static int config_thresh_intrpt(struct temp_data *data, int enable)
+{
+	u32 eax, edx;
+	unsigned int cpu = data->cpu;
+	int flag = 1; /* Non-Zero Flag masks the apic */
+
+	smp_call_function_single(cpu, &configure_apic, &flag, 1);
+
+	rdmsr_on_cpu(cpu, MSR_IA32_THERM_INTERRUPT, &eax, &edx);
+
+	if (enable) {
+		INIT_DELAYED_WORK(&per_cpu(core_threshold_work, cpu),
+					core_threshold_work_fn);
+
+		eax |= (THERM_INT_THRESHOLD0_ENABLE |
+						THERM_INT_THRESHOLD1_ENABLE);
+		platform_thermal_notify = coretemp_interrupt;
+
+		pr_info("Enabled Aux0/Aux1 interrupts for coretemp\n");
+	} else {
+		eax &= (~(THERM_INT_THRESHOLD0_ENABLE |
+						THERM_INT_THRESHOLD1_ENABLE));
+		platform_thermal_notify = NULL;
+
+		cancel_delayed_work_sync(&per_cpu(core_threshold_work, cpu));
+	}
+
+	wrmsr_on_cpu(cpu, MSR_IA32_THERM_INTERRUPT, eax, edx);
+
+	flag = 0; /* Flag should be zero to unmask the apic */
+	smp_call_function_single(cpu, &configure_apic, &flag, 1);
+
+	return 0;
+}
+#else
+static inline int config_thresh_intrpt(struct temp_data *data, int enable)
+{
+	return 0;
+}
+#endif
+
+static int create_name_attr(struct platform_data *pdata,
+				      struct device *dev)
+{
+	sysfs_attr_init(&pdata->name_attr.attr);
+	pdata->name_attr.attr.name = "name";
+	pdata->name_attr.attr.mode = S_IRUGO;
+	pdata->name_attr.show = show_name;
+	return device_create_file(dev, &pdata->name_attr);
+}
+
+static int __cpuinit create_core_attrs(struct temp_data *tdata,
+			struct device *dev, int attr_no, bool have_ttarget)
+{
+	int err, i;
+	static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
+			struct device_attribute *devattr, char *buf) = {
+			show_label, show_crit_alarm, show_temp, show_tjmax,
+			show_ttarget, show_t0, show_t0_triggered,
+			show_t1, show_t1_triggered };
+	static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
+			struct device_attribute *devattr, const char *buf,
+			size_t count) = { NULL, NULL, NULL, NULL, NULL,
+					store_t0, NULL, store_t1, NULL };
+	static const char *const names[TOTAL_ATTRS] = {
+					"temp%d_label", "temp%d_crit_alarm",
+					"temp%d_input", "temp%d_crit",
+					"temp%d_max",
+					"temp%d_threshold1",
+					"temp%d_threshold1_triggered",
+					"temp%d_threshold2",
+					"temp%d_threshold2_triggered" };
+
+	for (i = 0; i < tdata->attr_size; i++) {
+		snprintf(tdata->attr_name[i], sizeof(tdata->attr_name[i]),
+				names[i], attr_no);
+		sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
+		tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
+		tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
+		if (rw_ptr[i]) {
+			tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
+			tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
+		}
+		tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
+		tdata->sd_attrs[i].index = attr_no;
+		err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
+		if (err)
+			goto exit_free;
+	}
+	return 0;
+
+exit_free:
+	while (--i >= 0) {
+		if (!tdata->sd_attrs[i].dev_attr.attr.name)
+			continue;
+		device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
+	}
+	return err;
+}
+
+
+static int __cpuinit chk_ucode_version(unsigned int cpu)
+{
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+	/*
+	 * Check if we have problem with errata AE18 of Core processors:
+	 * Readings might stop update when processor visited too deep sleep,
+	 * fixed for stepping D0 (6EC).
+	 */
+	if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
+		pr_err("Errata AE18 not fixed, update BIOS or "
+		       "microcode of the CPU!\n");
+		return -ENODEV;
+	}
+	return 0;
+}
+
 static struct temp_data __cpuinit *init_temp_data(unsigned int cpu,
 						  int pkg_flag)
 {
@@ -445,6 +721,8 @@
 
 	tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
 							MSR_IA32_THERM_STATUS;
+	tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
+						MSR_IA32_THERM_INTERRUPT;
 	tdata->is_pkg_data = pkg_flag;
 	tdata->cpu = cpu;
 	tdata->cpu_core_id = TO_CORE_ID(cpu);
@@ -461,6 +739,7 @@
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	u32 eax, edx;
 	int err, attr_no;
+	bool have_ttarget = false;
 
 	/*
 	 * Find attr number for sysfs:
@@ -506,17 +785,28 @@
 		if (!err) {
 			tdata->ttarget
 			  = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
-			tdata->attr_size++;
+			have_ttarget = true;
 		}
 	}
 
+	/*
+	 * Test if we can access the intrpt register. If so, increase
+	 * 'size' enough to support t0 and t1 attributes.
+	 */
+	err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
+	if (!err)
+		tdata->attr_size += MAX_THRESH_ATTRS;
+
 	pdata->core_data[attr_no] = tdata;
 
 	/* Create sysfs interfaces */
-	err = create_core_attrs(tdata, &pdev->dev, attr_no);
+	err = create_core_attrs(tdata, &pdev->dev, attr_no, have_ttarget);
 	if (err)
 		goto exit_free;
 
+	/* Enable threshold interrupt support */
+	config_thresh_intrpt(tdata, 1);
+
 	return 0;
 exit_free:
 	pdata->core_data[attr_no] = NULL;
@@ -544,8 +834,14 @@
 	struct temp_data *tdata = pdata->core_data[indx];
 
 	/* Remove the sysfs attributes */
-	for (i = 0; i < tdata->attr_size; i++)
+	for (i = 0; i < tdata->attr_size; i++) {
+		if (!tdata->sd_attrs[i].dev_attr.attr.name)
+			continue;
 		device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
+	}
+
+	/* Enable threshold interrupt support */
+	config_thresh_intrpt(tdata, 0);
 
 	kfree(pdata->core_data[indx]);
 	pdata->core_data[indx] = NULL;
diff --git a/drivers/hwmon/intel_mcu_common.c b/drivers/hwmon/intel_mcu_common.c
new file mode 100644
index 0000000..cdf078c
--- /dev/null
+++ b/drivers/hwmon/intel_mcu_common.c
@@ -0,0 +1,700 @@
+/**
+ * intel_mcu_common.c - Intel MCU common interface file
+ *
+ * Copyright (C) 2014 Intel Inc. - http://www.intel.com
+ *
+ * Authors: Lei Wen <lei.wen@intel.com>,
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <asm/intel_psh_ipc.h>
+#include "intel_mcu_common.h"
+#include <linux/circ_buf.h>
+
+#define APP_IMR_SIZE (1024 * 126)
+#define DRIVER_AUTHOR "Lei Wen <lei.wen@intel.com>"
+#define DRIVER_DESC "Intel mcu common control interface"
+#define INTEL_MCU_TTY_MAJOR		168
+#define INTEL_MCU_TTY_MINORS		3
+
+#define LOAD_APP		"load mcu app"
+#define GET_VERSION		"get mcu app version"
+struct tty_driver *intel_mcu_tty_driver;
+
+#define VER_LEN		1024
+struct mcu {
+	char ver[VER_LEN];
+	uintptr_t ddr_phy[2];
+	void *ddr[2];
+	int load_in_progress;
+};
+
+struct mcu_data {
+	struct device *dev;
+	struct tty_port port;
+	struct mcu *mcu;
+	struct completion cmp;
+	struct loop_buffer lbuf;
+	int index;
+};
+
+static struct mcu_data *mcu_table[INTEL_MCU_TTY_MINORS];
+static int log_level = 1;
+static char *debug_msg[] = {
+	"fatal",
+	"error",
+	"warning",
+	"info",
+	"debug",
+};
+
+static int send_cmd(struct mcu_data *data,
+		struct psh_msg *in, int ch, int wait)
+{
+	int ret;
+	ret = intel_ia2psh_command(in, NULL, ch, 1000000);
+	if (ret)
+		return ret;
+
+	if (wait) {
+		ret = wait_for_completion_timeout(&data->cmp, 3 * HZ);
+		if (ret == 0)
+			return -ETIME;
+	}
+
+	return 0;
+}
+
+static void lbuf_read_reset(struct loop_buffer *lbuf)
+{
+	if (lbuf) {
+		lbuf->off_head = lbuf->off_tail = 0;
+		lbuf->in_reading = 0;
+	}
+}
+
+static int lbuf_read_next(struct loop_buffer *lbuf, u8 **buf, u16 *size)
+{
+	struct frame_head *fhead =
+		(struct frame_head *)(lbuf->addr + lbuf->off_head);
+	*buf = NULL;
+	*size = 0;
+
+	if (lbuf->in_reading) {
+		lbuf->in_reading = 0;
+
+		/* go over previous frame has been read */
+		lbuf->off_head += frame_size(fhead->length);
+		lbuf->off_tail = lbuf->off_head;
+		fhead = (struct frame_head *)(lbuf->addr + lbuf->off_head);
+	}
+
+	if (fhead->sign == LBUF_DISCARD_SIGN) {
+		fhead = (struct frame_head *)lbuf->addr;
+		lbuf->off_head = lbuf->off_tail = 0;
+	}
+
+	if (fhead->sign == LBUF_CELL_SIGN) {
+
+		*buf = lbuf->addr + lbuf->off_head + sizeof(*fhead);
+		*size = fhead->length;
+		lbuf->in_reading = 1;
+	}
+
+	return !lbuf->in_reading;
+}
+
+static int intel_mcu_mcudbg_level(struct mcu_data *data, int level)
+{
+	struct psh_msg in;
+	struct cmd_debug_param *param;
+
+	in.param = 0;
+	in.msg = CMD_MCU_APP_DEBUG;
+	param = (struct cmd_debug_param *) (&(in.param));
+	if (level > 0) {
+		param->level = level;
+		param->sub_cmd = CMD_DEBUG_SET_MASK;
+	} else
+		param->sub_cmd = CMD_DEBUG_GET_MASK;
+
+	return send_cmd(data, &in, PSH2IA_CHANNEL2, 1);
+}
+
+static void push_char_into_port(struct tty_port *port, const char *buf, int len)
+{
+	int count;
+
+	if (len <= 0)
+		return;
+
+	do {
+		count = tty_insert_flip_string(port, buf, len);
+		len -= count;
+		buf += count;
+	} while (len > 0);
+
+	tty_flip_buffer_push(port);
+}
+
+static int intel_mcu_tty_open(struct tty_struct *tty, struct file *filp)
+{
+	dev_dbg(tty->dev, "%s\n", __func__);
+	tty->driver_data = mcu_table[tty->index];
+	/*
+	 * For we may get data cached while we don't open this tty,
+	 * so we need to flush out buffer, then we could
+	 * get full content without disappoint user
+	 */
+	if (tty->port)
+		tty_flip_buffer_push(tty->port);
+
+	return 0;
+}
+
+static void intel_mcu_tty_close(struct tty_struct *tty, struct file *filp)
+{
+	dev_dbg(tty->dev, "%s\n", __func__);
+	tty->driver_data = NULL;
+}
+
+static int do_get_ver(struct mcu_data *data)
+{
+	struct psh_msg in;
+
+	in.param = 0;
+	in.msg = CMD_MCU_APP_GET_VERSION;
+	return send_cmd(data, &in, PSH2IA_CHANNEL2, 1);
+}
+
+static int do_setup_ddr(struct mcu_data *data)
+{
+	struct mcu *mcu = data->mcu;
+	const struct firmware *fw_entry;
+	static int fw_load_done;
+	char fname[20];
+	struct psh_msg in;
+
+	if (fw_load_done)
+		return 0;
+
+	snprintf(fname, 20, "intel_mcu.bin");
+	if (!request_firmware(&fw_entry, fname, data->dev)) {
+		if (!fw_entry)
+			return -ENOMEM;
+
+		pr_debug("psh fw size %d virt:0x%p\n",
+				(int)fw_entry->size, fw_entry->data);
+		if (fw_entry->size > APP_IMR_SIZE) {
+			pr_err("psh fw size too big\n");
+		} else {
+			memcpy(mcu->ddr[0], fw_entry->data,
+					fw_entry->size);
+			in.msg = CMD_MCU_LOAD_APP;
+			in.param = mcu->ddr_phy[0];
+			mcu->load_in_progress = 1;
+			if (send_cmd(data, &in, PSH2IA_CHANNEL3, 1))
+				return -1;
+			fw_load_done = 1;
+		}
+		release_firmware(fw_entry);
+	} else {
+		pr_err("cannot find psh firmware(%s)\n", fname);
+		return -ENODEV;
+	}
+	in.msg = CMD_MCU_SETUP_DDR;
+	in.param = mcu->ddr_phy[1];
+	return send_cmd(data, &in, PSH2IA_CHANNEL2, 1);
+}
+
+static ssize_t load_app_store(struct device *device,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int len = strlen(LOAD_APP);
+
+	if (count >= len && strncmp(buf, LOAD_APP, len) == 0) {
+		do_setup_ddr(mcu_table[2]);
+		return count;
+	}
+
+	pr_err("Please provide right string as [%s]!\n", LOAD_APP);
+	return -1;
+}
+
+static ssize_t get_ver_show(struct device *device,
+		struct device_attribute *attr, char *buf)
+{
+	struct mcu_data *data = mcu_table[2];
+	struct mcu *mcu = data->mcu;
+
+	if (do_get_ver(data))
+		return -1;
+
+	return scnprintf(buf, VER_LEN, "%s", mcu->ver);
+}
+
+static ssize_t mdbg_control_show(struct device *device,
+		struct device_attribute *attr, char *buf)
+{
+	if (intel_mcu_mcudbg_level(mcu_table[2], -1) < 0)
+		goto err;
+
+	if (log_level > 0 && log_level < 6)
+		return scnprintf(buf, 8, "%s\n", debug_msg[log_level - 1]);
+
+err:
+	pr_info("get log level err\n");
+	return -1;
+}
+/*
+ *set msg level:echo log_level=fatal|info|warning|error|debug| >control
+*/
+#define LOG_LEVEL	"fatal|error|warning|info|debug"
+static ssize_t mdbg_control_store(struct device *device,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct mcu_data *data = mcu_table[2];
+	int level = 0;
+	long ltmp = 0;
+
+	if (!buf)
+		return -1;
+	if (!strncmp(buf, "fatal", strlen("fatal")))
+		level = 1;
+	else if (!strncmp(buf, "error", strlen("error")))
+		level = 2;
+	else if (!strncmp(buf, "warning", strlen("warning")))
+		level = 3;
+	else if (!strncmp(buf, "info", strlen("info")))
+		level = 4;
+	else if (!strncmp(buf, "debug", strlen("debug")))
+		level = 5;
+	else {
+		int err;
+		err = kstrtol(buf, 10, &ltmp);
+		if (!err && (ltmp > 0) && (ltmp < 6))
+			level = ltmp;
+		else {
+			pr_err("Please input words as [%s]\n", LOG_LEVEL);
+			return -1;
+		}
+	}
+	pr_info("set level:%d\n", level);
+	if (intel_mcu_mcudbg_level(data, level) < 0)
+		return -1;
+	return count;
+}
+
+static DEVICE_ATTR(control, 0200, NULL, load_app_store);
+static DEVICE_ATTR(fw_version, 0400, get_ver_show, NULL);
+static DEVICE_ATTR(log_level, 0600, mdbg_control_show, mdbg_control_store);
+
+static struct attribute *control_sysfs_attrs[] = {
+	&dev_attr_control.attr,
+	&dev_attr_fw_version.attr,
+	&dev_attr_log_level.attr,
+	NULL,
+
+};
+
+static struct attribute_group intel_mcu_tty_attribute_group = {
+	.name = NULL,
+	.attrs = control_sysfs_attrs,
+
+};
+
+static void raw_output(struct mcu_data *data, int ch,
+		const unsigned char *buf, int count)
+{
+	struct psh_msg in;
+	int i, left;
+
+	for (i = 0; i < count; i += 4) {
+		left = count - i;
+		if (left > 4) {
+			left = 4;
+			in.msg = PSH_IPC_CONTINUE;
+		} else
+			in.msg = 0;
+
+		memcpy(&in.param, buf, left);
+		buf += left;
+		send_cmd(data, &in, ch, 0);
+	}
+}
+
+#define TTY_WRITE_ROOM		512
+static int intel_mcu_tty_write(struct tty_struct *tty,
+		const unsigned char *buf, int count)
+{
+	struct mcu_data *data = tty->driver_data;
+
+	switch (tty->index) {
+	default:
+		pr_err("TTY index %d not supported!\n", tty->index);
+	case 1:
+		return -1;
+	case 0:
+		if (count > TTY_WRITE_ROOM) {
+			pr_err("Port 0's input size is limited by %d!\n",
+					TTY_WRITE_ROOM);
+			return -1;
+		}
+		raw_output(data, tty->index, buf, count);
+		break;
+	}
+	return count;
+}
+
+static int intel_mcu_tty_write_room(struct tty_struct *tty)
+{
+	return TTY_WRITE_ROOM;
+}
+
+static const struct tty_operations intel_mcu_ops = {
+	.open =			intel_mcu_tty_open,
+	.close =		intel_mcu_tty_close,
+	.write =		intel_mcu_tty_write,
+	.write_room =		intel_mcu_tty_write_room,
+};
+
+static int mem_alloc(struct pci_dev *pdev, uintptr_t *phy_addr,
+		void **virt_addr, int bar)
+{
+	void __iomem *mem;
+	int ret = 0;
+	unsigned long start = 0, len;
+
+	/* dedicate isolated memory region */
+	start = pci_resource_start(pdev, bar);
+	len = pci_resource_len(pdev, bar);
+	if (!start || !len) {
+		dev_err(&pdev->dev, "bar %d address not set\n", bar);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = pci_request_region(pdev, bar, "intel_mcu");
+	if (ret) {
+		dev_err(&pdev->dev,
+				"failed to request psh region 0x%lx-0x%lx\n",
+				start,
+				(unsigned long)pci_resource_end(pdev, bar));
+		goto err;
+	}
+
+	mem = ioremap_nocache(start, len);
+	if (!mem) {
+		dev_err(&pdev->dev, "can not ioremap app imr address\n");
+		ret = -EINVAL;
+		goto err_ioremap;
+	}
+
+	*phy_addr = start;
+	*virt_addr = (void *)mem;
+	return 0;
+
+err_ioremap:
+	pci_release_region(pdev, bar);
+err:
+	return ret;
+}
+
+static void cmd_handler(u32 msg, u32 param, void *_data)
+{
+	struct mcu_data *data = (struct mcu_data *)_data;
+	struct mcu *mcu = data->mcu;
+	struct cmd_resp *resp;
+	const struct version_resp *version;
+	struct debug_resp *debug_resp;
+	u8 *dbuf = NULL;
+	u16 size = 0;
+
+	if (mcu->load_in_progress) {
+		mcu->load_in_progress = 0;
+		goto done;
+	}
+
+	while (!lbuf_read_next(&data->lbuf, &dbuf, &size)) {
+		resp = (struct cmd_resp *)dbuf;
+
+		if (!resp->len)
+			continue;
+
+		switch (resp->cmd_id) {
+		case CMD_MCU_APP_GET_VERSION:
+			version = (struct version_resp *)resp->param;
+			if (version->total_length)
+				snprintf(mcu->ver, VER_LEN, version->buf,
+						version->total_length);
+			break;
+		case CMD_MCU_APP_DEBUG:
+			debug_resp = (struct debug_resp *)resp->param;
+			log_level = debug_resp->level;
+		default:
+			break;
+		}
+	}
+done:
+	complete(&data->cmp);
+}
+
+static void raw_data_handler(u32 msg, u32 param, void *_data)
+{
+	struct mcu_data *data = (struct mcu_data *)_data;
+	struct cmd_resp *resp;
+	u8 *dbuf = NULL;
+	u16 size = 0;
+
+	while (!lbuf_read_next(&data->lbuf, &dbuf, &size)) {
+		resp = (struct cmd_resp *)dbuf;
+		push_char_into_port(&data->port, resp->param, resp->len);
+	}
+	complete(&data->cmp);
+}
+
+static int mcu_platform_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	struct mcu_data *data;
+	struct mcu *mcu;
+	u8 *base;
+
+	mcu = platform_get_drvdata(pdev);
+	intel_mcu_tty_driver = alloc_tty_driver(INTEL_MCU_TTY_MINORS);
+	if (!intel_mcu_tty_driver) {
+		dev_err(&pdev->dev, "fail to alloc tty driver\n");
+		return -ENODEV;
+	}
+
+	intel_mcu_tty_driver->name = "ttymcu";
+	intel_mcu_tty_driver->major = INTEL_MCU_TTY_MAJOR;
+	intel_mcu_tty_driver->minor_start = 0;
+	intel_mcu_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	intel_mcu_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	intel_mcu_tty_driver->flags = TTY_DRIVER_REAL_RAW
+		| TTY_DRIVER_DYNAMIC_DEV;
+	intel_mcu_tty_driver->init_termios = tty_std_termios;
+	intel_mcu_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
+		HUPCL | CLOCAL;
+	intel_mcu_tty_driver->init_termios.c_ispeed = 38400;
+	intel_mcu_tty_driver->init_termios.c_ospeed = 38400;
+	intel_mcu_tty_driver->init_termios.c_iflag = 0;
+	intel_mcu_tty_driver->init_termios.c_oflag = 0;
+	intel_mcu_tty_driver->init_termios.c_lflag = 0;
+	tty_set_operations(intel_mcu_tty_driver, &intel_mcu_ops);
+
+	ret = tty_register_driver(intel_mcu_tty_driver);
+	if (ret) {
+		dev_err(&pdev->dev, "fail to register tty driver\n");
+		goto tty_reg_fail;
+	}
+
+	base = (u8 *)mcu->ddr[1];
+	for (i = INTEL_MCU_TTY_MINORS - 1; i >= 0; i--) {
+		data = kzalloc(sizeof(struct mcu_data), GFP_KERNEL);
+		if (data == NULL) {
+			dev_err(&pdev->dev, "fail to alloc mcu data\n");
+			goto data_alloc_fail;
+		}
+
+		data->index = i;
+		tty_port_init(&data->port);
+		data->dev = tty_port_register_device(&data->port,
+				intel_mcu_tty_driver, i, &pdev->dev);
+		mcu_table[i] = data;
+		data->mcu = mcu;
+		init_completion(&data->cmp);
+		data->lbuf.addr = base;
+		data->lbuf.length = BUF_IA_DDR_SIZE;
+		lbuf_read_reset(&data->lbuf);
+		base += BUF_IA_DDR_SIZE;
+	}
+	ret = sysfs_create_group(&pdev->dev.kobj,
+			&intel_mcu_tty_attribute_group);
+	if (ret) {
+		pr_err("failed to create the mdbg sysfs attributes\n");
+		sysfs_remove_group(&pdev->dev.kobj,
+				&intel_mcu_tty_attribute_group);
+		goto data_alloc_fail;
+	}
+
+	intel_psh_ipc_bind(PSH_RECV_CH0, raw_data_handler, mcu_table[0]);
+	intel_psh_ipc_bind(PSH_RECV_CH1, raw_data_handler, mcu_table[1]);
+	intel_psh_ipc_bind(PSH_RECV_CH2, cmd_handler, mcu_table[2]);
+
+	pr_info("MCU detected and ready to used!\n");
+
+	return 0;
+
+data_alloc_fail:
+	for (i = 0; i < INTEL_MCU_TTY_MINORS; i++)
+		kfree(mcu_table[i]);
+tty_reg_fail:
+	put_tty_driver(intel_mcu_tty_driver);
+	return ret;
+}
+
+static int mcu_platform_remove(struct platform_device *pdev)
+{
+	struct mcu *mcu;
+	int i;
+
+	mcu = platform_get_drvdata(pdev);
+	sysfs_remove_group(&pdev->dev.kobj,
+			&intel_mcu_tty_attribute_group);
+
+	for (i = 0; i < INTEL_MCU_TTY_MINORS; i++)
+		kfree(mcu_table[i]);
+	put_tty_driver(intel_mcu_tty_driver);
+	kfree(mcu);
+
+	return 0;
+}
+
+static struct platform_driver intel_mcu_platform = {
+	.driver = {
+		.name	= "intel_mcu",
+	},
+	.probe		= mcu_platform_probe,
+	.remove		= mcu_platform_remove,
+};
+module_platform_driver(intel_mcu_platform);
+
+static int intel_mcu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct platform_device *dev;
+	struct mcu *mcu;
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "fail to enable psh pci device\n");
+		return -ENODEV;
+	}
+
+	mcu = kzalloc(sizeof(struct mcu), GFP_KERNEL);
+	if (!mcu) {
+		dev_err(&pdev->dev, "cannot allocate memory for mcu\n");
+		ret = -ENOMEM;
+		goto mcu_err;
+	}
+
+	ret = mem_alloc(pdev, &mcu->ddr_phy[0], &mcu->ddr[0], 0);
+	if (ret)
+		goto plat_alloc_fail;
+
+	ret = mem_alloc(pdev, &mcu->ddr_phy[1], &mcu->ddr[1], 1);
+	if (ret)
+		goto plat_alloc_fail;
+
+	dev = platform_device_alloc("intel_mcu", -1);
+	if (!dev) {
+		ret = -ENODEV;
+		goto plat_alloc_fail;
+	}
+
+	dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+	platform_set_drvdata(dev, mcu);
+	dev_set_drvdata(&pdev->dev, mcu);
+
+	ret = platform_device_add(dev);
+	return ret;
+
+plat_alloc_fail:
+	kfree(mcu);
+mcu_err:
+	pci_dev_put(pdev);
+	return ret;
+}
+
+static void intel_mcu_remove(struct pci_dev *pdev)
+{
+	struct mcu *mcu;
+
+	mcu = dev_get_drvdata(&pdev->dev);
+	iounmap((void __iomem *)mcu->ddr[0]);
+	iounmap((void __iomem *)mcu->ddr[1]);
+
+	pci_release_region(pdev, 0);
+	pci_release_region(pdev, 1);
+	pci_dev_put(pdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x11a4)},
+	{ 0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+static struct pci_driver intel_mcu_driver = {
+	.name = "intel_mcu",
+	.id_table = pci_ids,
+	.probe	= intel_mcu_probe,
+	.remove	= intel_mcu_remove,
+};
+
+static int __init intel_mcu_init(void)
+{
+	return pci_register_driver(&intel_mcu_driver);
+}
+
+static void __exit intel_mcu_exit(void)
+{
+	pci_unregister_driver(&intel_mcu_driver);
+}
+
+module_init(intel_mcu_init);
+module_exit(intel_mcu_exit);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(INTEL_MCU_TTY_MAJOR);
diff --git a/drivers/hwmon/intel_mcu_common.h b/drivers/hwmon/intel_mcu_common.h
new file mode 100644
index 0000000..1ffd081
--- /dev/null
+++ b/drivers/hwmon/intel_mcu_common.h
@@ -0,0 +1,79 @@
+#ifndef _EDISON_COMMON_H_
+#define _EDISON_COMMON_H_
+
+#define PSH2IA_CHANNEL0	0
+#define PSH2IA_CHANNEL1	1
+#define PSH2IA_CHANNEL2	2
+#define PSH2IA_CHANNEL3	3
+
+enum cmd_id {
+	CMD_MCU_LOAD_APP = 0,
+	CMD_MCU_SETUP_DDR,
+	CMD_MCU_APP_DEBUG,
+	CMD_MCU_APP_GET_VERSION,
+};
+
+#define CIRC_SIZE (1024 * 64)
+struct ddr_param {
+	u32 ddr;
+	u32 ddr1;
+} __packed;
+
+#define CMD_DEBUG_SET_MASK	((u8)0x1)
+#define CMD_DEBUG_GET_MASK	((u8)0x2)
+#define MCU_DBG_ALL		((u16)-1)
+#define MCU_DBG_FATAL		1
+#define MCU_DBG_ERR		2
+#define MCU_DBG_WARN	3
+#define MCU_DBG_INFO	4
+#define MCU_DBG_DBG		5
+
+struct cmd_debug_param {
+	u8 sub_cmd;
+	u16 level;
+	char tag[30];
+} __packed;
+
+#define RESP_PARAM_MAX_SIZE	56
+struct cmd_resp {
+	u8 cmd_id;
+	u8 len;
+	int ret;
+	char param[RESP_PARAM_MAX_SIZE];
+} __packed;
+
+struct debug_resp {
+	u16 level;
+} __packed;
+
+struct version_resp {
+	u8 total_length;
+	u8 segment_length;
+	u8 sequence_number;
+	char buf[0];
+} __packed;
+
+#define LBUF_CELL_SIGN ((u16)0x4853)
+#define LBUF_EMPTY_SIGN ((u16)0x0000)
+#define LBUF_DISCARD_SIGN ((u16)0x4944)
+#define size_align(size) ((size % 4) ? (size + 4 - (size % 4)) : size)
+#define frame_size(size) (size_align(size) + \
+		sizeof(struct frame_head))
+
+struct frame_head {
+	u16 sign;
+	u16 length;
+	u8 buf[0];
+} __packed;
+
+#define BUF_IA_DDR_SIZE 8192
+struct loop_buffer {
+	int in_reading;
+	u8 *addr;
+	u16 length;
+
+	u16 off_head;
+	u16 off_tail;
+};
+
+#endif
diff --git a/drivers/hwmon/intel_mid_gpadc.c b/drivers/hwmon/intel_mid_gpadc.c
new file mode 100644
index 0000000..f4d0d2f
--- /dev/null
+++ b/drivers/hwmon/intel_mid_gpadc.c
@@ -0,0 +1,1212 @@
+/*
+ * intel_mdf_msic_gpadc.c - Intel Medfield MSIC GPADC Driver
+ *
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ * Author: Bin Yang <bin.yang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/pm_qos.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/rpmsg.h>
+
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_mid_gpadc.h>
+
+#define VAUDACNT		0x0DB
+#define MCCINT			0x013
+#define IRQLVL1			0x002
+#define IRQLVL1MSK		0x021
+#define ADC1INT			0x003
+#define ADC1ADDR0		0x1C5
+#define ADC1SNS0H		0x1D4
+#define ADC1OFFSETH		0x1C3
+#define ADC1OFFSETL		0x1C4
+#define ADC1CNTL1		0x1C0
+#define ADC1CNTL2		0x1C1
+#define ADC1CNTL3		0x1C2
+#define	ADC1BV0H		0x1F2
+#define ADC1BI0H		0x1FA
+
+#ifdef CONFIG_BOARD_CTP
+#define EEPROMCAL1		0x309
+#define EEPROMCAL2		0x30A
+#else
+#define EEPROMCAL1		0x317
+#define EEPROMCAL2		0x318
+#endif
+
+#define MCCINT_MCCCAL		(1 << 1)
+#define MCCINT_MOVERFLOW	(1 << 0)
+
+#define IRQLVL1MSK_ADCM		(1 << 1)
+
+#define ADC1CNTL1_AD1OFFSETEN	(1 << 6)
+#define ADC1CNTL1_AD1CALEN	(1 << 5)
+#define ADC1CNTL1_ADEN		(1 << 4)
+#define ADC1CNTL1_ADSTRT	(1 << 3)
+#define ADC1CNTL1_ADSLP		7
+#define ADC1CNTL1_ADSLP_DEF	1
+
+#define ADC1INT_ADC1CAL		(1 << 2)
+#define ADC1INT_GSM		(1 << 1)
+#define ADC1INT_RND		(1 << 0)
+
+#define ADC1CNTL3_ADCTHERM	(1 << 2)
+#define ADC1CNTL3_GSMDATARD	(1 << 1)
+#define ADC1CNTL3_RRDATARD	(1 << 0)
+
+#define ADC1CNTL2_DEF		0x7
+#define ADC1CNTL2_ADCGSMEN	(1 << 7)
+
+#define MSIC_STOPCH		(1 << 4)
+
+#define GPADC_CH_MAX		15
+
+#define GPADC_POWERON_DELAY	1
+
+#define SAMPLE_CH_MAX		2
+
+static void *adc_handle[GPADC_CH_MAX] = {};
+static int sample_result[GPADC_CH_MAX][SAMPLE_CH_MAX];
+static struct completion gsmadc_complete;
+static int vol_val;
+static int cur_val;
+
+struct gpadc_info {
+	int initialized;
+	int depth;
+
+	struct workqueue_struct *workq;
+	wait_queue_head_t trimming_wait;
+	struct work_struct trimming_work;
+	struct work_struct gsmpulse_work;
+	int trimming_start;
+
+	/* This mutex protects gpadc sample/config from concurrent conflict.
+	   Any function, which does the sample or config, needs to
+	   hold this lock.
+	   If it is locked, it also means the gpadc is in active mode.
+	   GSM mode sample does not need to hold this lock. It can be used with
+	   normal sample concurrent without poweron.
+	*/
+	struct mutex lock;
+	struct device *dev;
+	int irq;
+	void __iomem *intr;
+	int irq_status;
+
+	int vzse;
+	int vge;
+	int izse;
+	int ige;
+	int addr_mask;
+
+	wait_queue_head_t wait;
+	int rnd_done;
+	int conv_done;
+	int gsmpulse_done;
+
+	struct pm_qos_request pm_qos_request;
+	void (*gsmadc_notify)(int vol, int cur);
+
+	int pmic_ipc_status;
+};
+
+struct gpadc_request {
+	int count;
+	int vref;
+	int ch[GPADC_CH_MAX];
+	int addr[GPADC_CH_MAX];
+};
+
+static struct gpadc_info gpadc_info;
+
+static inline int gpadc_clear_bits(u16 addr, u8 mask)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int ret;
+
+	if (mgi->pmic_ipc_status)
+		return -EINVAL;
+
+	ret = intel_scu_ipc_update_register(addr, 0, mask);
+	if (ret)
+		mgi->pmic_ipc_status = -EINVAL;
+
+	return ret;
+}
+
+static inline int gpadc_set_bits(u16 addr, u8 mask)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int ret;
+
+	if (mgi->pmic_ipc_status)
+		return -EINVAL;
+
+	ret = intel_scu_ipc_update_register(addr, 0xff, mask);
+	if (ret)
+		mgi->pmic_ipc_status = -EINVAL;
+
+	return ret;
+}
+
+static inline int gpadc_write(u16 addr, u8 data)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int ret;
+
+	if (mgi->pmic_ipc_status)
+		return -EINVAL;
+
+	ret = intel_scu_ipc_iowrite8(addr, data);
+	if (ret)
+		mgi->pmic_ipc_status = -EINVAL;
+
+	return ret;
+}
+
+static inline int gpadc_read(u16 addr, u8 *data)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int ret;
+
+	if (mgi->pmic_ipc_status)
+		return -EINVAL;
+
+	ret = intel_scu_ipc_ioread8(addr, data);
+	if (ret)
+		mgi->pmic_ipc_status = -EINVAL;
+
+	return ret;
+}
+
+static void gpadc_dump(struct gpadc_info *mgi)
+{
+	u8 data;
+	int i;
+
+	dev_err(mgi->dev, "pmic ipc status: %s\n",
+			mgi->pmic_ipc_status ? "bad" : "good");
+	gpadc_read(VAUDACNT, &data);
+	dev_err(mgi->dev, "VAUDACNT: 0x%x\n", data);
+	gpadc_read(IRQLVL1MSK, &data);
+	dev_err(mgi->dev, "IRQLVL1MSK: 0x%x\n", data);
+	gpadc_read(IRQLVL1, &data);
+	dev_err(mgi->dev, "IRQLVL1: 0x%x\n", data);
+	gpadc_read(ADC1INT, &data);
+	dev_err(mgi->dev, "ADC1INT: 0x%x\n", data);
+	gpadc_read(ADC1CNTL1, &data);
+	dev_err(mgi->dev, "ADC1CNTL1: 0x%x\n", data);
+	gpadc_read(ADC1CNTL2, &data);
+	dev_err(mgi->dev, "ADC1CNTL2: 0x%x\n", data);
+	gpadc_read(ADC1CNTL3, &data);
+	dev_err(mgi->dev, "ADC1CNTL3: 0x%x\n", data);
+	for (i = 0; i < GPADC_CH_MAX; i++) {
+		gpadc_read(ADC1ADDR0+i, &data);
+		dev_err(mgi->dev, "ADC1ADDR[%d]: 0x%x\n", i, data);
+	}
+}
+
+static int gpadc_poweron(struct gpadc_info *mgi, int vref)
+{
+	if (!mgi->depth++) {
+		if (gpadc_set_bits(ADC1CNTL1, ADC1CNTL1_ADEN) != 0)
+			return -EIO;
+		msleep(GPADC_POWERON_DELAY);
+	}
+	if (vref) {
+		if (gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_ADCTHERM) != 0)
+			return -EIO;
+		msleep(GPADC_POWERON_DELAY);
+	}
+	return 0;
+}
+
+static int gpadc_poweroff(struct gpadc_info *mgi)
+{
+	if (!--mgi->depth) {
+		if (gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_ADEN) != 0)
+			return -EIO;
+		if (gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_ADCTHERM) != 0)
+			return -EIO;
+	}
+	return 0;
+}
+
+static int gpadc_calib(int rc, int zse, int ge)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	int tmp;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		if (ge == 0) {
+			dev_err(mgi->dev, "calibration divider is zero\n");
+			return 0;
+		}
+
+		/**
+		 * For Cloverview, using the calibration data, we have the
+		 * voltage and current after calibration correction as below:
+		 * V_CAL_CODE = 213.33 * (V_RAW_CODE - VZSE) / VGE
+		 * I_CAL_CODE = 213.33 * (I_RAW_CODE - IZSE) / IGE
+		 */
+
+		/* note: the input zse is multipled by 10,
+		 * input ge is multipled by 100, need to handle them here
+		 */
+		tmp = 21333 * (10 * rc - zse) / ge;
+	} else {
+		/**
+		 * For Medfield, using the calibration data, we have the
+		 * voltage and current after calibration correction as below:
+		 * V_CAL_CODE = V_RAW_CODE - (VZSE + (VGE)* VRAW_CODE/1023)
+		 * I_CAL_CODE = I_RAW_CODE - (IZSE + (IGE)* IRAW_CODE/1023)
+		 */
+		tmp = (10230 * rc - (10230 * zse + 10 * ge * rc)) / 1023;
+	}
+
+	/* tmp is 10 times of result value,
+	 * and it's used to obtain result's closest integer
+	 */
+	return DIV_ROUND_CLOSEST(tmp, 10);
+
+}
+
+static void gpadc_calc_zse_ge(struct gpadc_info *mgi)
+{
+	u8 data;
+	int fse, zse, fse_sign, zse_sign, ge, ge_sign;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		gpadc_read(EEPROMCAL1, &data);
+		zse = data & 0xf;
+		ge = (data >> 4) & 0xf;
+		gpadc_read(EEPROMCAL2, &data);
+		zse_sign = (data & (1 << 6)) ? -1 : 1;
+		ge_sign = (data & (1 << 7)) ? -1 : 1;
+		zse *= zse_sign;
+		ge *= ge_sign;
+		/* vzse divided by 2 may cause 0.5, x10 to avoid float */
+		mgi->vzse = mgi->izse = zse * 10 / 2;
+		/* vge multiple 100 to avoid float */
+		mgi->vge = mgi->ige = 21333 - (ge * 100 / 4);
+	} else {
+		/* voltage trim */
+		gpadc_read(EEPROMCAL1, &data);
+		zse = (data & 0xf)/2;
+		fse = ((data >> 4) & 0xf)/2;
+		gpadc_read(EEPROMCAL2, &data);
+		zse_sign = (data & (1 << 6)) ? 1 : 0;
+		fse_sign = (data & (1 << 7)) ? 1 : 0;
+		zse *= zse_sign;
+		fse *= fse_sign;
+		mgi->vzse = zse;
+		mgi->vge = fse - zse;
+
+		/* current trim */
+		fse = (data & 0xf)/2;
+		fse_sign = (data & (1 << 5)) ? 1 : 0;
+		fse = ~(fse_sign * fse) + 1;
+		gpadc_read(ADC1OFFSETH, &data);
+		zse = data << 2;
+		gpadc_read(ADC1OFFSETL, &data);
+		zse += data & 0x3;
+		mgi->izse = zse;
+		mgi->ige = fse + zse;
+	}
+}
+
+static void gpadc_trimming(struct work_struct *work)
+{
+	u8 data;
+	struct gpadc_info *mgi =
+		container_of(work, struct gpadc_info, trimming_work);
+
+	mutex_lock(&mgi->lock);
+	mgi->trimming_start = 1;
+	wake_up(&mgi->trimming_wait);
+	if (gpadc_poweron(mgi, 1)) {
+		dev_err(mgi->dev, "power on failed\n");
+		goto failed;
+	}
+	/* calibration */
+	gpadc_read(ADC1CNTL1, &data);
+	data &= ~ADC1CNTL1_AD1OFFSETEN;
+	data |= ADC1CNTL1_AD1CALEN;
+	gpadc_write(ADC1CNTL1, data);
+	gpadc_read(ADC1INT, &data);
+
+	/*workarround: no calib int */
+	msleep(300);
+	gpadc_set_bits(ADC1INT, ADC1INT_ADC1CAL);
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_AD1CALEN);
+
+	gpadc_calc_zse_ge(mgi);
+
+	if (gpadc_poweroff(mgi)) {
+		dev_err(mgi->dev, "power off failed\n");
+		goto failed;
+	}
+
+failed:
+	mutex_unlock(&mgi->lock);
+}
+
+static irqreturn_t msic_gpadc_isr(int irq, void *data)
+{
+	struct gpadc_info *mgi = data;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		mgi->irq_status = ADC1INT_RND;
+	else
+		mgi->irq_status = readl(mgi->intr) >> 8 & 0xff;
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t msic_gpadc_irq(int irq, void *data)
+{
+	struct gpadc_info *mgi = data;
+
+	if (mgi->irq_status & ADC1INT_GSM) {
+		mgi->gsmpulse_done = 1;
+		queue_work(mgi->workq, &mgi->gsmpulse_work);
+	} else if (mgi->irq_status & ADC1INT_RND) {
+		mgi->rnd_done = 1;
+		wake_up(&mgi->wait);
+	} else if (mgi->irq_status & ADC1INT_ADC1CAL) {
+		mgi->conv_done = 1;
+		wake_up(&mgi->wait);
+	} else {
+		/* coulomb counter should be handled by firmware. Ignore it */
+		dev_dbg(mgi->dev, "coulomb counter is not support\n");
+	}
+	return IRQ_HANDLED;
+}
+
+static int alloc_channel_addr(struct gpadc_info *mgi, int ch)
+{
+	int i;
+	int addr = -EBUSY;
+	int last = 0;
+
+	for (i = 0; i < GPADC_CH_MAX; i++)
+		if (mgi->addr_mask & (1 << i))
+			last = i;
+
+	for (i = 0; i < GPADC_CH_MAX; i++) {
+		if (!(mgi->addr_mask & (1 << i))) {
+			addr = i;
+			mgi->addr_mask |= 1 << i;
+			if (addr > last) {
+				gpadc_clear_bits(ADC1ADDR0+last, MSIC_STOPCH);
+				gpadc_write(ADC1ADDR0+addr, ch|MSIC_STOPCH);
+			} else {
+				gpadc_write(ADC1ADDR0+addr, ch);
+			}
+			break;
+		}
+	}
+	return addr;
+}
+
+static void free_channel_addr(struct gpadc_info *mgi, int addr)
+{
+	int last = 0;
+	int i;
+
+	mgi->addr_mask &= ~(1 << addr);
+	for (i = 0; i < GPADC_CH_MAX; i++)
+		if (mgi->addr_mask & (1 << i))
+			last = i;
+	if (addr > last)
+		gpadc_set_bits(ADC1ADDR0+last, MSIC_STOPCH);
+}
+
+static void gpadc_gsmpulse_work(struct work_struct *work)
+{
+	int i;
+	u8 data;
+	int tmp;
+	int vol, cur;
+	struct gpadc_info *mgi =
+		container_of(work, struct gpadc_info, gsmpulse_work);
+
+	mutex_lock(&mgi->lock);
+	gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_GSMDATARD);
+
+	vol = 0;
+	cur = 0;
+	for (i = 0; i < 4; i++) {
+		gpadc_read(ADC1BV0H + i * 2, &data);
+		tmp = data << 2;
+		gpadc_read(ADC1BV0H + i * 2 + 1, &data);
+		tmp += data & 0x3;
+		if (tmp > vol)
+			vol = tmp;
+
+		gpadc_read(ADC1BI0H + i * 2, &data);
+		tmp = data << 2;
+		gpadc_read(ADC1BI0H + i * 2 + 1, &data);
+		tmp += data & 0x3;
+		if (tmp > cur)
+			cur = tmp;
+	}
+
+	vol = gpadc_calib(vol, mgi->vzse, mgi->vge);
+	cur = gpadc_calib(cur, mgi->izse, mgi->ige);
+
+	gpadc_set_bits(ADC1INT, ADC1INT_GSM);
+	gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_GSMDATARD);
+	if (mgi->gsmadc_notify)
+		mgi->gsmadc_notify(vol, cur);
+	mutex_unlock(&mgi->lock);
+}
+
+/**
+ * intel_mid_gpadc_gsmpulse_register - power on gsm adc and register a callback
+ * @fn: callback function after gsm adc conversion is completed
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int intel_mid_gpadc_gsmpulse_register(void(*fn)(int vol, int cur))
+{
+	int ret = 0;
+	struct gpadc_info *mgi = &gpadc_info;
+
+	if (!mgi->initialized)
+		return -ENODEV;
+	mutex_lock(&mgi->lock);
+	if (!mgi->gsmadc_notify) {
+		gpadc_write(ADC1CNTL2, ADC1CNTL2_DEF);
+		gpadc_set_bits(ADC1CNTL2, ADC1CNTL2_ADCGSMEN);
+		mgi->gsmadc_notify = fn;
+	} else {
+		ret = -EBUSY;
+	}
+	mutex_unlock(&mgi->lock);
+	return ret;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_gsmpulse_register);
+
+/**
+ * intel_mid_gpadc_gsmpulse_unregister - power off gsm adc and unregister
+ *					the callback
+ * @fn: callback function after gsm adc conversion is completed
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int intel_mid_gpadc_gsmpulse_unregister(void(*fn)(int vol, int cur))
+{
+	int ret = 0;
+	struct gpadc_info *mgi = &gpadc_info;
+
+	if (!mgi->initialized)
+		return -ENODEV;
+	mutex_lock(&mgi->lock);
+	if (mgi->gsmadc_notify == fn) {
+		mgi->gsmadc_notify = NULL;
+		gpadc_clear_bits(ADC1CNTL2, ADC1CNTL2_ADCGSMEN);
+	}
+	mutex_unlock(&mgi->lock);
+	return ret;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_gsmpulse_unregister);
+
+/**
+ * intel_mid_gpadc_sample - do gpadc sample.
+ * @handle: the gpadc handle
+ * @sample_count: do sample serveral times and get the average value.
+ * @...: sampling resulting arguments of all channels. refer to sscanf.
+ *       caller should not access it before return.
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int intel_mid_gpadc_sample(void *handle, int sample_count, ...)
+{
+
+	struct gpadc_request *rq = handle;
+	struct gpadc_info *mgi = &gpadc_info;
+	int i;
+	u8 data;
+	int ret = 0;
+	int count;
+	int tmp;
+	int *val[GPADC_CH_MAX];
+	va_list args;
+
+	if (!mgi->initialized)
+		return -ENODEV;
+
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+
+	va_start(args, sample_count);
+	for (i = 0; i < rq->count; i++) {
+		val[i] = va_arg(args, int*);
+		*val[i] = 0;
+	}
+	va_end(args);
+
+	pm_qos_add_request(&mgi->pm_qos_request,
+			PM_QOS_CPU_DMA_LATENCY,	CSTATE_EXIT_LATENCY_S0i1-1);
+	gpadc_poweron(mgi, rq->vref);
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_AD1OFFSETEN);
+	gpadc_read(ADC1CNTL1, &data);
+	data = (data & ~ADC1CNTL1_ADSLP) + ADC1CNTL1_ADSLP_DEF;
+	gpadc_write(ADC1CNTL1, data);
+	mgi->rnd_done = 0;
+	gpadc_set_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+	for (count = 0; count < sample_count; count++) {
+		if (wait_event_timeout(mgi->wait, mgi->rnd_done, HZ) == 0) {
+			gpadc_dump(mgi);
+			dev_err(mgi->dev, "sample timeout\n");
+			ret = -ETIMEDOUT;
+			goto fail;
+		}
+		gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+		for (i = 0; i < rq->count; ++i) {
+			tmp = 0;
+			gpadc_read(ADC1SNS0H + 2 * rq->addr[i], &data);
+			tmp += data << 2;
+			gpadc_read(ADC1SNS0H + 2 * rq->addr[i] + 1, &data);
+			tmp += data & 0x3;
+
+			if (rq->ch[i] & CH_NEED_VCALIB)
+				tmp = gpadc_calib(tmp, mgi->vzse, mgi->vge);
+			if (rq->ch[i] & CH_NEED_ICALIB)
+				tmp = gpadc_calib(tmp, mgi->izse, mgi->ige);
+
+			*val[i] += tmp;
+		}
+		gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+		mgi->rnd_done = 0;
+	}
+
+	for (i = 0; i < rq->count; ++i)
+		*val[i] /= sample_count;
+
+fail:
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+	gpadc_poweroff(mgi);
+	pm_qos_remove_request(&mgi->pm_qos_request);
+
+	if (mgi->pmic_ipc_status) {
+		dev_err(mgi->dev, "sample broken\n");
+		ret = mgi->pmic_ipc_status;
+	}
+	mutex_unlock(&mgi->lock);
+	return ret;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_sample);
+
+/**
+ * get_gpadc_sample() - get gpadc sample.
+ * @handle: the gpadc handle
+ * @sample_count: do sample serveral times and get the average value.
+ * @buffer: sampling resulting arguments of all channels.
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+int get_gpadc_sample(void *handle, int sample_count, int *buffer)
+{
+
+	struct gpadc_request *rq = handle;
+	struct gpadc_info *mgi = &gpadc_info;
+	int i;
+	u8 data;
+	int ret = 0;
+	int count;
+	int tmp;
+
+	if (!mgi->initialized)
+		return -ENODEV;
+
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+
+	for (i = 0; i < rq->count; i++)
+		buffer[i] = 0;
+
+	pm_qos_add_request(&mgi->pm_qos_request,
+			PM_QOS_CPU_DMA_LATENCY,	CSTATE_EXIT_LATENCY_S0i1-1);
+	gpadc_poweron(mgi, rq->vref);
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_AD1OFFSETEN);
+	gpadc_read(ADC1CNTL1, &data);
+	data = (data & ~ADC1CNTL1_ADSLP) + ADC1CNTL1_ADSLP_DEF;
+	gpadc_write(ADC1CNTL1, data);
+	mgi->rnd_done = 0;
+	gpadc_set_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+	for (count = 0; count < sample_count; count++) {
+		if (wait_event_timeout(mgi->wait, mgi->rnd_done, HZ) == 0) {
+			gpadc_dump(mgi);
+			dev_err(mgi->dev, "sample timeout\n");
+			ret = -ETIMEDOUT;
+			goto fail;
+		}
+		gpadc_set_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+		for (i = 0; i < rq->count; ++i) {
+			tmp = 0;
+			gpadc_read(ADC1SNS0H + 2 * rq->addr[i], &data);
+			tmp += data << 2;
+			gpadc_read(ADC1SNS0H + 2 * rq->addr[i] + 1, &data);
+			tmp += data & 0x3;
+
+			if (rq->ch[i] & CH_NEED_VCALIB)
+				tmp = gpadc_calib(tmp, mgi->vzse, mgi->vge);
+			if (rq->ch[i] & CH_NEED_ICALIB)
+				tmp = gpadc_calib(tmp, mgi->izse, mgi->ige);
+			buffer[i] += tmp;
+		}
+		gpadc_clear_bits(ADC1CNTL3, ADC1CNTL3_RRDATARD);
+		mgi->rnd_done = 0;
+	}
+
+	for (i = 0; i < rq->count; ++i)
+		buffer[i] /= sample_count;
+
+fail:
+	gpadc_clear_bits(ADC1CNTL1, ADC1CNTL1_ADSTRT);
+	gpadc_poweroff(mgi);
+	pm_qos_remove_request(&mgi->pm_qos_request);
+	if (mgi->pmic_ipc_status) {
+		dev_err(mgi->dev, "sample broken\n");
+		ret = mgi->pmic_ipc_status;
+	}
+	mutex_unlock(&mgi->lock);
+	return ret;
+}
+EXPORT_SYMBOL(get_gpadc_sample);
+
+/**
+ * intel_mid_gpadc_free - free gpadc
+ * @handle: the gpadc handle
+ *
+ * This function may sleep.
+ */
+void intel_mid_gpadc_free(void *handle)
+{
+	struct gpadc_request *rq = handle;
+	struct gpadc_info *mgi = &gpadc_info;
+	int i;
+
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+	for (i = 0; i < rq->count; i++)
+		free_channel_addr(mgi, rq->addr[i]);
+
+	if (mgi->pmic_ipc_status)
+		dev_err(mgi->dev, "gpadc free broken\n");
+
+	mutex_unlock(&mgi->lock);
+	kfree(rq);
+}
+EXPORT_SYMBOL(intel_mid_gpadc_free);
+
+/**
+ * intel_mid_gpadc_alloc - allocate gpadc for channels
+ * @count: the count of channels
+ * @...: the channel parameters. (channel idx | flags)
+ *       flags:
+ *             CH_NEED_VCALIB   it needs voltage calibration
+ *             CH_NEED_ICALIB   it needs current calibration
+ *
+ * Returns gpadc handle on success or NULL on fail.
+ *
+ * This function may sleep.
+ */
+void *intel_mid_gpadc_alloc(int count, ...)
+{
+	struct gpadc_request *rq;
+	struct gpadc_info *mgi = &gpadc_info;
+	va_list args;
+	int ch;
+	int i;
+
+	if (!mgi->initialized)
+		return NULL;
+
+	rq = kzalloc(sizeof(struct gpadc_request), GFP_KERNEL);
+	if (rq == NULL)
+		return NULL;
+
+	va_start(args, count);
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+
+	rq->count = count;
+	for (i = 0; i < count; i++) {
+		ch = va_arg(args, int);
+		rq->ch[i] = ch;
+		if (ch & CH_NEED_VREF)
+			rq->vref = 1;
+		ch &= 0xf;
+		rq->addr[i] = alloc_channel_addr(mgi, ch);
+		if (rq->addr[i] < 0) {
+			dev_err(mgi->dev, "alloc addr failed\n");
+			while (i-- > 0)
+				free_channel_addr(mgi, rq->addr[i]);
+			kfree(rq);
+			rq = NULL;
+			break;
+		}
+	}
+	if (mgi->pmic_ipc_status)
+		dev_err(mgi->dev, "gpadc alloc broken\n");
+
+	mutex_unlock(&mgi->lock);
+	va_end(args);
+
+	return rq;
+}
+EXPORT_SYMBOL(intel_mid_gpadc_alloc);
+
+ /**
+ * gpadc_alloc_channels - allocate gpadc for channels
+ * @count: the count of channels
+ * @...: the channel parameters. (channel idx | flags)
+ *       flags:
+ *             CH_NEED_VCALIB   it needs voltage calibration
+ *             CH_NEED_ICALIB   it needs current calibration
+ *
+ * Returns gpadc handle on success or NULL on fail.
+ *
+ * This function may sleep.
+ *
+ * TODO: Cleanup intel_mid_gpadc_alloc() once all its users
+ *       are moved to gpadc_alloc_channels()
+ *
+ */
+
+void *gpadc_alloc_channels(int n, int *channel_info)
+{
+	struct gpadc_request *rq;
+	struct gpadc_info *mgi = &gpadc_info;
+	int ch;
+	int i;
+
+	if (!mgi->initialized)
+		return NULL;
+
+	rq = kzalloc(sizeof(struct gpadc_request), GFP_KERNEL);
+	if (rq == NULL)
+		return NULL;
+
+	mutex_lock(&mgi->lock);
+	mgi->pmic_ipc_status = 0;
+
+	rq->count = n;
+	for (i = 0; i < n; i++) {
+		ch = channel_info[i];
+		rq->ch[i] = ch;
+		if (ch & CH_NEED_VREF)
+			rq->vref = 1;
+		ch &= 0xf;
+		rq->addr[i] = alloc_channel_addr(mgi, ch);
+		if (rq->addr[i] < 0) {
+			dev_err(mgi->dev, "alloc addr failed\n");
+			while (i-- > 0)
+				free_channel_addr(mgi, rq->addr[i]);
+			kfree(rq);
+			rq = NULL;
+			break;
+		}
+	}
+	if (mgi->pmic_ipc_status)
+		dev_err(mgi->dev, "gpadc alloc broken\n");
+
+	mutex_unlock(&mgi->lock);
+
+	return rq;
+}
+EXPORT_SYMBOL(gpadc_alloc_channels);
+
+static ssize_t intel_mid_gpadc_store_alloc_channel(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	int val, hdn;
+	int ch[SAMPLE_CH_MAX];
+
+	val = sscanf(buf, "%d %x %x", &hdn, &ch[0], &ch[1]);
+
+	if (val < 2 || val > 3) {
+		dev_err(dev, "invalid number of arguments");
+		return -EINVAL;
+	}
+
+	if (hdn < 1 || hdn > GPADC_CH_MAX) {
+		dev_err(dev, "invalid handle value");
+		return -EINVAL;
+	}
+
+	if (adc_handle[hdn - 1]) {
+		dev_err(dev, "adc handle %d has been occupied", hdn);
+		return -EBUSY;
+	}
+
+	if (val == 2)
+		adc_handle[hdn - 1] = intel_mid_gpadc_alloc(1, ch[0]);
+	else
+		adc_handle[hdn - 1] = intel_mid_gpadc_alloc(2, ch[0], ch[1]);
+
+	if (!adc_handle[hdn - 1]) {
+		dev_err(dev, "allocating adc handle %d failed", hdn);
+		return -ENOMEM;
+	}
+
+	return size;
+}
+
+static ssize_t intel_mid_gpadc_store_free_channel(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	int hdn;
+
+	if (sscanf(buf, "%d", &hdn) != 1) {
+		dev_err(dev, "invalid number of argument");
+		return -EINVAL;
+	}
+
+	if (hdn < 1 || hdn > GPADC_CH_MAX) {
+		dev_err(dev, "invalid handle value");
+		return -EINVAL;
+	}
+
+	if (adc_handle[hdn - 1]) {
+		intel_mid_gpadc_free(adc_handle[hdn - 1]);
+		adc_handle[hdn - 1] = NULL;
+	}
+
+	return size;
+}
+
+static ssize_t intel_mid_gpadc_store_sample(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	int hdn, spc;
+	int ret;
+	struct gpadc_request *rq;
+
+	if (sscanf(buf, "%d %d", &hdn, &spc) != 2) {
+		dev_err(dev, "invalid number of arguments");
+		return -EINVAL;
+	}
+
+	if (hdn < 1 || hdn > GPADC_CH_MAX) {
+		dev_err(dev, "invalid handle value");
+		return -EINVAL;
+	}
+
+	rq = adc_handle[hdn - 1];
+	if (!rq) {
+		dev_err(dev, "null handle");
+		return -EINVAL;
+	}
+
+	if (rq->count == 1)
+		ret = intel_mid_gpadc_sample(adc_handle[hdn-1],
+			spc, &sample_result[hdn - 1][0]);
+	else
+		ret = intel_mid_gpadc_sample(adc_handle[hdn - 1],
+			spc, &sample_result[hdn - 1][0],
+			&sample_result[hdn - 1][1]);
+
+	if (ret) {
+		dev_err(dev, "sampling failed. adc handle: %d", hdn);
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t intel_mid_gpadc_show_sample(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int hdc;
+	int used = 0;
+	struct gpadc_request *rq;
+
+	for (hdc = 0; hdc < GPADC_CH_MAX; hdc++) {
+		if (adc_handle[hdc]) {
+			rq = adc_handle[hdc];
+			if (rq->count == 1)
+				used += snprintf(buf + used, PAGE_SIZE - used,
+					  "%d ", sample_result[hdc][0]);
+			else
+				used += snprintf(buf + used, PAGE_SIZE - used,
+					  "%d %d ", sample_result[hdc][0],
+					  sample_result[hdc][1]);
+		}
+	}
+
+	return used;
+}
+
+
+static void gsmpulse_sysfs_callback(int vol, int cur)
+{
+	vol_val = vol;
+	cur_val = cur;
+	complete(&gsmadc_complete);
+}
+
+static ssize_t intel_mid_gpadc_show_gsmpulse_sample(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+
+	INIT_COMPLETION(gsmadc_complete);
+	intel_mid_gpadc_gsmpulse_register(gsmpulse_sysfs_callback);
+	ret = wait_for_completion_interruptible(&gsmadc_complete);
+	intel_mid_gpadc_gsmpulse_unregister(gsmpulse_sysfs_callback);
+	if (ret)
+		return 0;
+	else
+		return snprintf(buf, PAGE_SIZE, "%d %d", vol_val, cur_val);
+}
+
+static DEVICE_ATTR(alloc_channel, S_IWUSR, NULL,
+		intel_mid_gpadc_store_alloc_channel);
+static DEVICE_ATTR(free_channel, S_IWUSR, NULL,
+		intel_mid_gpadc_store_free_channel);
+static DEVICE_ATTR(sample, S_IRUGO | S_IWUSR,
+		intel_mid_gpadc_show_sample, intel_mid_gpadc_store_sample);
+static DEVICE_ATTR(gsmpulse_sample, S_IRUGO,
+		intel_mid_gpadc_show_gsmpulse_sample, NULL);
+
+static struct attribute *intel_mid_gpadc_attrs[] = {
+	&dev_attr_alloc_channel.attr,
+	&dev_attr_free_channel.attr,
+	&dev_attr_sample.attr,
+	&dev_attr_gsmpulse_sample.attr,
+	NULL,
+};
+
+static struct attribute_group intel_mid_gpadc_attr_group = {
+	.name = "mid_gpadc",
+	.attrs = intel_mid_gpadc_attrs,
+};
+
+static int msic_gpadc_probe(struct platform_device *pdev)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+	struct intel_mid_gpadc_platform_data *pdata = pdev->dev.platform_data;
+	int err = 0;
+
+	mutex_init(&mgi->lock);
+	init_waitqueue_head(&mgi->wait);
+	init_waitqueue_head(&mgi->trimming_wait);
+	mgi->workq = create_singlethread_workqueue(dev_name(&pdev->dev));
+	if (mgi->workq == NULL)
+		return -ENOMEM;
+
+	mgi->dev = &pdev->dev;
+	mgi->intr = ioremap_nocache(pdata->intr, 4);
+	mgi->irq = platform_get_irq(pdev, 0);
+
+	gpadc_clear_bits(IRQLVL1MSK, IRQLVL1MSK_ADCM);
+	if (request_threaded_irq(mgi->irq, msic_gpadc_isr, msic_gpadc_irq,
+					IRQF_ONESHOT, "msic_adc", mgi)) {
+		dev_err(&pdev->dev, "unable to register irq %d\n", mgi->irq);
+		err = -ENODEV;
+		goto err_exit;
+	}
+
+	gpadc_write(ADC1ADDR0, MSIC_STOPCH);
+	INIT_WORK(&mgi->trimming_work, gpadc_trimming);
+	INIT_WORK(&mgi->gsmpulse_work, gpadc_gsmpulse_work);
+	queue_work(mgi->workq, &mgi->trimming_work);
+	wait_event(mgi->trimming_wait, mgi->trimming_start);
+	mgi->initialized = 1;
+
+	init_completion(&gsmadc_complete);
+
+	err = sysfs_create_group(&pdev->dev.kobj,
+			&intel_mid_gpadc_attr_group);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to export sysfs interface, error: %d\n",
+			err);
+		goto err_release_irq;
+	}
+
+	return 0;
+
+err_release_irq:
+	free_irq(mgi->irq, mgi);
+err_exit:
+	if (mgi->intr)
+		iounmap(mgi->intr);
+	return err;
+}
+
+static int msic_gpadc_remove(struct platform_device *pdev)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+
+	sysfs_remove_group(&pdev->dev.kobj, &intel_mid_gpadc_attr_group);
+	free_irq(mgi->irq, mgi);
+	iounmap(mgi->intr);
+	flush_workqueue(mgi->workq);
+	destroy_workqueue(mgi->workq);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int msic_gpadc_suspend_noirq(struct device *dev)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+
+	/* If the gpadc is locked, it means gpadc is still in active mode. */
+	if (mutex_trylock(&mgi->lock))
+		return 0;
+	else
+		return -EBUSY;
+}
+
+static int msic_gpadc_resume_noirq(struct device *dev)
+{
+	struct gpadc_info *mgi = &gpadc_info;
+
+	mutex_unlock(&mgi->lock);
+	return 0;
+}
+#else
+#define msic_gpadc_suspend_noirq    NULL
+#define msic_gpadc_resume_noirq     NULL
+#endif
+
+static const struct dev_pm_ops msic_gpadc_driver_pm_ops = {
+	.suspend_noirq	= msic_gpadc_suspend_noirq,
+	.resume_noirq	= msic_gpadc_resume_noirq,
+};
+
+static struct platform_driver msic_gpadc_driver = {
+	.driver = {
+		   .name = "msic_adc",
+		   .owner = THIS_MODULE,
+		   .pm = &msic_gpadc_driver_pm_ops,
+		   },
+	.probe = msic_gpadc_probe,
+	.remove = msic_gpadc_remove,
+};
+
+static int msic_gpadc_module_init(void)
+{
+	return platform_driver_register(&msic_gpadc_driver);
+}
+
+static void msic_gpadc_module_exit(void)
+{
+	platform_driver_unregister(&msic_gpadc_driver);
+}
+
+static int msic_adc_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed msic_gpadc rpmsg device\n");
+
+	ret = msic_gpadc_module_init();
+
+out:
+	return ret;
+}
+
+static void msic_adc_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	msic_gpadc_module_exit();
+	dev_info(&rpdev->dev, "Removed msic_gpadc rpmsg device\n");
+}
+
+static void msic_adc_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id msic_adc_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_msic_adc" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, msic_adc_rpmsg_id_table);
+
+static struct rpmsg_driver msic_adc_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= msic_adc_rpmsg_id_table,
+	.probe		= msic_adc_rpmsg_probe,
+	.callback	= msic_adc_rpmsg_cb,
+	.remove		= msic_adc_rpmsg_remove,
+};
+
+static int __init msic_adc_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&msic_adc_rpmsg);
+}
+
+#ifdef MODULE
+module_init(msic_adc_rpmsg_init);
+#else
+rootfs_initcall(msic_adc_rpmsg_init);
+#endif
+
+static void __exit msic_adc_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&msic_adc_rpmsg);
+}
+module_exit(msic_adc_rpmsg_exit);
+
+MODULE_AUTHOR("Jenny TC <jenny.tc@intel.com>");
+MODULE_DESCRIPTION("Intel Medfield MSIC GPADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d4fe13e..14f16c4 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -398,6 +398,52 @@
 	  devices such as DaVinci NIC.
 	  For details please see http://www.ti.com/davinci
 
+config I2C_DESIGNWARE_CORE_FORK
+	tristate "Synopsys DesignWare Controller"
+	help
+	  If you say yes to this option, support will be included for the
+	  Synopsys DesignWare adapter core. Only master mode is supported.
+          You need choose platform or pci driver for its driver support.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-designware-core.
+
+config I2C_DESIGNWARE_PCI_FORK
+	tristate "Synopsys DesignWare PCI"
+	depends on PCI && I2C_DESIGNWARE_CORE_FORK
+	help
+	  If you say yes to this option, support will be included for the
+	  Synopsys DesignWare I2C adapter. Only master mode is supported.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-designware-pci.
+
+config I2C_DESIGNWARE_PLATFORM_FORK
+	tristate "Synopsys DesignWare Platform"
+	depends on I2C_DESIGNWARE_CORE_FORK
+	help
+	  If you say yes to this option, support will be included for the
+	  Synopsys DesignWare I2C adapter. Only master mode is supported.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-designware-platform.
+
+config I2C_DW_SPEED_MODE_DEBUG
+	bool "Designware I2C Speed Mode Debug"
+	depends on I2C_DESIGNWARE_CORE_FORK
+	help
+	  If you say yes to this option, you could runtime change the I2C
+	  controller bus speed mode.
+
+config I2C_PMIC
+	bool "PMIC I2C Adapter"
+	depends on INTEL_SCU_IPC
+	help
+	  Say Y here if you have PMIC I2C adapter.
+
+	  PMIC-I2C adapter driver to handle I2C transactions
+	  in the PMIC's I2C bus.
+
 config I2C_DESIGNWARE_CORE
 	tristate
 
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 8f4fc23..030b4dd 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -92,4 +92,12 @@
 obj-$(CONFIG_SCx200_ACB)	+= scx200_acb.o
 obj-$(CONFIG_SCx200_I2C)	+= scx200_i2c.o
 
+obj-$(CONFIG_I2C_DESIGNWARE_CORE_FORK)	+= i2c-designware-core.o
+obj-$(CONFIG_I2C_DESIGNWARE_PCI_FORK)	+= i2c-designware-pci.o
+i2c-designware-pci-objs := i2c-designware-pcidrv.o
+obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM_FORK)	+= i2c-designware-platform.o
+i2c-designware-platform-objs := i2c-designware-platdrv.o
+
+obj-$(CONFIG_I2C_PMIC)          += i2c-pmic.o
+
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index f24a738..859eb20d 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -25,7 +25,8 @@
  * ----------------------------------------------------------------------------
  *
  */
-#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -33,112 +34,22 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
+#include <linux/nmi.h>
 #include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/acpi.h>
 #include "i2c-designware-core.h"
 
-/*
- * Registers offset
- */
-#define DW_IC_CON		0x0
-#define DW_IC_TAR		0x4
-#define DW_IC_DATA_CMD		0x10
-#define DW_IC_SS_SCL_HCNT	0x14
-#define DW_IC_SS_SCL_LCNT	0x18
-#define DW_IC_FS_SCL_HCNT	0x1c
-#define DW_IC_FS_SCL_LCNT	0x20
-#define DW_IC_INTR_STAT		0x2c
-#define DW_IC_INTR_MASK		0x30
-#define DW_IC_RAW_INTR_STAT	0x34
-#define DW_IC_RX_TL		0x38
-#define DW_IC_TX_TL		0x3c
-#define DW_IC_CLR_INTR		0x40
-#define DW_IC_CLR_RX_UNDER	0x44
-#define DW_IC_CLR_RX_OVER	0x48
-#define DW_IC_CLR_TX_OVER	0x4c
-#define DW_IC_CLR_RD_REQ	0x50
-#define DW_IC_CLR_TX_ABRT	0x54
-#define DW_IC_CLR_RX_DONE	0x58
-#define DW_IC_CLR_ACTIVITY	0x5c
-#define DW_IC_CLR_STOP_DET	0x60
-#define DW_IC_CLR_START_DET	0x64
-#define DW_IC_CLR_GEN_CALL	0x68
-#define DW_IC_ENABLE		0x6c
-#define DW_IC_STATUS		0x70
-#define DW_IC_TXFLR		0x74
-#define DW_IC_RXFLR		0x78
-#define DW_IC_TX_ABRT_SOURCE	0x80
-#define DW_IC_ENABLE_STATUS	0x9c
-#define DW_IC_COMP_PARAM_1	0xf4
-#define DW_IC_COMP_TYPE		0xfc
-#define DW_IC_COMP_TYPE_VALUE	0x44570140
-
-#define DW_IC_INTR_RX_UNDER	0x001
-#define DW_IC_INTR_RX_OVER	0x002
-#define DW_IC_INTR_RX_FULL	0x004
-#define DW_IC_INTR_TX_OVER	0x008
-#define DW_IC_INTR_TX_EMPTY	0x010
-#define DW_IC_INTR_RD_REQ	0x020
-#define DW_IC_INTR_TX_ABRT	0x040
-#define DW_IC_INTR_RX_DONE	0x080
-#define DW_IC_INTR_ACTIVITY	0x100
-#define DW_IC_INTR_STOP_DET	0x200
-#define DW_IC_INTR_START_DET	0x400
-#define DW_IC_INTR_GEN_CALL	0x800
-
-#define DW_IC_INTR_DEFAULT_MASK		(DW_IC_INTR_RX_FULL | \
-					 DW_IC_INTR_TX_EMPTY | \
-					 DW_IC_INTR_TX_ABRT | \
-					 DW_IC_INTR_STOP_DET)
-
-#define DW_IC_STATUS_ACTIVITY	0x1
-
-#define DW_IC_ERR_TX_ABRT	0x1
-
-/*
- * status codes
- */
-#define STATUS_IDLE			0x0
-#define STATUS_WRITE_IN_PROGRESS	0x1
-#define STATUS_READ_IN_PROGRESS		0x2
-
-#define TIMEOUT			20 /* ms */
-
-/*
- * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
- *
- * only expected abort codes are listed here
- * refer to the datasheet for the full list
- */
-#define ABRT_7B_ADDR_NOACK	0
-#define ABRT_10ADDR1_NOACK	1
-#define ABRT_10ADDR2_NOACK	2
-#define ABRT_TXDATA_NOACK	3
-#define ABRT_GCALL_NOACK	4
-#define ABRT_GCALL_READ		5
-#define ABRT_SBYTE_ACKDET	7
-#define ABRT_SBYTE_NORSTRT	9
-#define ABRT_10B_RD_NORSTRT	10
-#define ABRT_MASTER_DIS		11
-#define ARB_LOST		12
-
-#define DW_IC_TX_ABRT_7B_ADDR_NOACK	(1UL << ABRT_7B_ADDR_NOACK)
-#define DW_IC_TX_ABRT_10ADDR1_NOACK	(1UL << ABRT_10ADDR1_NOACK)
-#define DW_IC_TX_ABRT_10ADDR2_NOACK	(1UL << ABRT_10ADDR2_NOACK)
-#define DW_IC_TX_ABRT_TXDATA_NOACK	(1UL << ABRT_TXDATA_NOACK)
-#define DW_IC_TX_ABRT_GCALL_NOACK	(1UL << ABRT_GCALL_NOACK)
-#define DW_IC_TX_ABRT_GCALL_READ	(1UL << ABRT_GCALL_READ)
-#define DW_IC_TX_ABRT_SBYTE_ACKDET	(1UL << ABRT_SBYTE_ACKDET)
-#define DW_IC_TX_ABRT_SBYTE_NORSTRT	(1UL << ABRT_SBYTE_NORSTRT)
-#define DW_IC_TX_ABRT_10B_RD_NORSTRT	(1UL << ABRT_10B_RD_NORSTRT)
-#define DW_IC_TX_ABRT_MASTER_DIS	(1UL << ABRT_MASTER_DIS)
-#define DW_IC_TX_ARB_LOST		(1UL << ARB_LOST)
-
-#define DW_IC_TX_ABRT_NOACK		(DW_IC_TX_ABRT_7B_ADDR_NOACK | \
-					 DW_IC_TX_ABRT_10ADDR1_NOACK | \
-					 DW_IC_TX_ABRT_10ADDR2_NOACK | \
-					 DW_IC_TX_ABRT_TXDATA_NOACK | \
-					 DW_IC_TX_ABRT_GCALL_NOACK)
+int i2c_dw_init(struct dw_i2c_dev *dev);
+int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num);
+u32 i2c_dw_func(struct i2c_adapter *adap);
+void i2c_dw_enable(struct dw_i2c_dev *dev);
+void i2c_dw_disable(struct dw_i2c_dev *dev);
+irqreturn_t i2c_dw_isr(int this_irq, void *dev_id);
+void i2c_dw_disable_int(struct dw_i2c_dev *dev);
+void i2c_dw_clear_int(struct dw_i2c_dev *dev);
 
 static char *abort_sources[] = {
 	[ABRT_7B_ADDR_NOACK] =
@@ -167,15 +78,9 @@
 
 u32 dw_readl(struct dw_i2c_dev *dev, int offset)
 {
-	u32 value;
+	u32 value = readl(dev->base + offset);
 
-	if (dev->accessor_flags & ACCESS_16BIT)
-		value = readw(dev->base + offset) |
-			(readw(dev->base + offset + 2) << 16);
-	else
-		value = readl(dev->base + offset);
-
-	if (dev->accessor_flags & ACCESS_SWAP)
+	if (dev->swab)
 		return swab32(value);
 	else
 		return value;
@@ -183,17 +88,778 @@
 
 void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
 {
-	if (dev->accessor_flags & ACCESS_SWAP)
+	if (dev->swab)
 		b = swab32(b);
 
-	if (dev->accessor_flags & ACCESS_16BIT) {
-		writew((u16)b, dev->base + offset);
-		writew((u16)(b >> 16), dev->base + offset + 2);
-	} else {
-		writel(b, dev->base + offset);
-	}
+	writel(b, dev->base + offset);
 }
 
+static void i2c_dw_dump(struct dw_i2c_dev *dev)
+{
+	u32 value;
+
+	dev_err(dev->dev, "===== REGISTER DUMP (i2c) =====\n");
+	value = dw_readl(dev, DW_IC_CON);
+	dev_err(dev->dev, "DW_IC_CON:               0x%x\n", value);
+	value = dw_readl(dev, DW_IC_TAR);
+	dev_err(dev->dev, "DW_IC_TAR:               0x%x\n", value);
+	value = dw_readl(dev, DW_IC_SS_SCL_HCNT);
+	dev_err(dev->dev, "DW_IC_SS_SCL_HCNT:       0x%x\n", value);
+	value = dw_readl(dev, DW_IC_SS_SCL_LCNT);
+	dev_err(dev->dev, "DW_IC_SS_SCL_LCNT:       0x%x\n", value);
+	value = dw_readl(dev, DW_IC_FS_SCL_HCNT);
+	dev_err(dev->dev, "DW_IC_FS_SCL_HCNT:       0x%x\n", value);
+	value = dw_readl(dev, DW_IC_FS_SCL_LCNT);
+	dev_err(dev->dev, "DW_IC_FS_SCL_LCNT:       0x%x\n", value);
+	value = dw_readl(dev, DW_IC_INTR_STAT);
+	dev_err(dev->dev, "DW_IC_INTR_STAT:         0x%x\n", value);
+	value = dw_readl(dev, DW_IC_INTR_MASK);
+	dev_err(dev->dev, "DW_IC_INTR_MASK:         0x%x\n", value);
+	value = dw_readl(dev, DW_IC_RAW_INTR_STAT);
+	dev_err(dev->dev, "DW_IC_RAW_INTR_STAT:     0x%x\n", value);
+	value = dw_readl(dev, DW_IC_RX_TL);
+	dev_err(dev->dev, "DW_IC_RX_TL:             0x%x\n", value);
+	value = dw_readl(dev, DW_IC_TX_TL);
+	dev_err(dev->dev, "DW_IC_TX_TL:             0x%x\n", value);
+	value = dw_readl(dev, DW_IC_ENABLE);
+	dev_err(dev->dev, "DW_IC_ENABLE:            0x%x\n", value);
+	value = dw_readl(dev, DW_IC_STATUS);
+	dev_err(dev->dev, "DW_IC_STATUS:            0x%x\n", value);
+	value = dw_readl(dev, DW_IC_TXFLR);
+	dev_err(dev->dev, "DW_IC_TXFLR:             0x%x\n", value);
+	value = dw_readl(dev, DW_IC_RXFLR);
+	dev_err(dev->dev, "DW_IC_RXFLR:             0x%x\n", value);
+	value = dw_readl(dev, DW_IC_TX_ABRT_SOURCE);
+	dev_err(dev->dev, "DW_IC_TX_ABRT_SOURCE:    0x%x\n", value);
+	value = dw_readl(dev, DW_IC_DATA_CMD);
+	dev_err(dev->dev, "DW_IC_DATA_CMD:          0x%x\n", value);
+	dev_err(dev->dev, "===============================\n");
+}
+
+/* VLV2 PCI config space memio access to the controller is
+* enabled by
+* 1. Reset 0x804 and 0x808 offset from base address.
+* 2. Set 0x804 offset from base address to 0x3.
+*/
+static void vlv2_reset(struct dw_i2c_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 10; i++) {
+		dw_writel(dev, 0, 0x804);
+		dw_writel(dev, 0, 0x808);
+		usleep_range(10, 100);
+
+		dw_writel(dev, 3, 0x804);
+		usleep_range(10, 100);
+
+		if (dw_readl(dev, DW_IC_COMP_TYPE) != DW_IC_COMP_TYPE_VALUE)
+			continue;
+
+		return;
+	}
+
+	dev_warn(dev->dev, "vlv2 I2C reset failed\n");
+}
+
+static int mfld_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, PNW_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, PNW_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+	dw_writel(dev, PNW_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+	dw_writel(dev, PNW_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+	return 0;
+}
+
+static int ctp_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, CLV_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, CLV_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+	dw_writel(dev, CLV_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+	dw_writel(dev, CLV_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+	return 0;
+}
+
+static int merr_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, MERR_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, MERR_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+	dw_writel(dev, MERR_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+	dw_writel(dev, MERR_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+	dw_writel(dev, MERR_HS_SCLK_HCNT, DW_IC_HS_SCL_HCNT);
+	dw_writel(dev, MERR_HS_SCLK_LCNT, DW_IC_HS_SCL_LCNT);
+
+	return 0;
+}
+
+static int vlv2_i2c_scl_cfg(struct dw_i2c_dev *dev)
+{
+	dw_writel(dev, VLV2_SS_SCLK_HCNT, DW_IC_SS_SCL_HCNT);
+	dw_writel(dev, VLV2_SS_SCLK_LCNT, DW_IC_SS_SCL_LCNT);
+
+	dw_writel(dev, VLV2_FS_SCLK_HCNT, DW_IC_FS_SCL_HCNT);
+	dw_writel(dev, VLV2_FS_SCLK_LCNT, DW_IC_FS_SCL_LCNT);
+
+	dw_writel(dev, VLV2_HS_SCLK_HCNT, DW_IC_HS_SCL_HCNT);
+	dw_writel(dev, VLV2_HS_SCLK_LCNT, DW_IC_HS_SCL_LCNT);
+
+	return 0;
+}
+
+static struct  dw_controller  dw_controllers[] = {
+	[moorestown_0] = {
+		.bus_num     = 0,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 32,
+		.rx_fifo_depth = 32,
+		.clk_khz      = 25000,
+	},
+	[moorestown_1] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 32,
+		.rx_fifo_depth = 32,
+		.clk_khz      = 25000,
+	},
+	[moorestown_2] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 32,
+		.rx_fifo_depth = 32,
+		.clk_khz      = 25000,
+	},
+	[medfield_0] = {
+		.bus_num     = 0,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_1] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 20500,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_2] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_3] = {
+		.bus_num     = 3,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 20500,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_4] = {
+		.bus_num     = 4,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+	[medfield_5] = {
+		.bus_num     = 5,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = mfld_i2c_scl_cfg,
+	},
+
+	[cloverview_0] = {
+		.bus_num     = 0,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_1] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_2] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_3] = {
+		.bus_num     = 3,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 20500,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_4] = {
+		.bus_num     = 4,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+	[cloverview_5] = {
+		.bus_num     = 5,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 256,
+		.rx_fifo_depth = 256,
+		.clk_khz      = 17000,
+		.scl_cfg = ctp_i2c_scl_cfg,
+	},
+
+	[merrifield_0] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_1] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_2] = {
+		.bus_num     = 3,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_3] = {
+		.bus_num     = 4,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_4] = {
+		.bus_num     = 5,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_5] = {
+		.bus_num     = 6,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[merrifield_6] = {
+		.bus_num     = 7,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = merr_i2c_scl_cfg,
+	},
+	[valleyview_0] = {
+		.bus_num     = 1,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C1"
+	},
+	[valleyview_1] = {
+		.bus_num     = 2,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C2"
+	},
+	[valleyview_2] = {
+		.bus_num     = 3,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C3"
+	},
+	[valleyview_3] = {
+		.bus_num     = 4,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C4"
+	},
+	[valleyview_4] = {
+		.bus_num     = 5,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C5"
+	},
+	[valleyview_5] = {
+		.bus_num     = 6,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C6"
+	},
+	[valleyview_6] = {
+		.bus_num     = 7,
+		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+		.tx_fifo_depth = 64,
+		.rx_fifo_depth = 64,
+		.enable_stop = 1,
+		.scl_cfg = vlv2_i2c_scl_cfg,
+		.reset = vlv2_reset,
+		.share_irq = 1,
+		.acpi_name = "\\_SB.I2C7"
+	}
+};
+
+static struct i2c_algorithm i2c_dw_algo = {
+	.master_xfer	= i2c_dw_xfer,
+	.functionality	= i2c_dw_func,
+};
+
+int i2c_dw_suspend(struct dw_i2c_dev *dev, bool runtime)
+{
+	if (runtime)
+		i2c_dw_disable(dev);
+	else {
+		if (down_trylock(&dev->lock))
+			return -EBUSY;
+		i2c_dw_disable(dev);
+		dev->status &= ~STATUS_POWERON;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(i2c_dw_suspend);
+
+int i2c_dw_resume(struct dw_i2c_dev *dev, bool runtime)
+{
+	i2c_dw_init(dev);
+	if (!runtime) {
+		dev->status |= STATUS_POWERON;
+		up(&dev->lock);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(i2c_dw_resume);
+
+static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+	return dev->controller->clk_khz;
+}
+
+#ifdef CONFIG_I2C_DW_SPEED_MODE_DEBUG
+static ssize_t show_bus_num(struct device *dev, struct device_attribute *attr,
+							char *buf)
+{
+	struct dw_i2c_dev *i2c = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", i2c->controller->bus_num);
+}
+
+#define MODE_NAME_SIZE	10
+
+static ssize_t store_mode(struct device *dev,
+			  struct device_attribute *attr,
+			  const char *buf, size_t size)
+{
+	struct dw_i2c_dev *i2c = dev_get_drvdata(dev);
+	int ret = 0;
+	char mode[MODE_NAME_SIZE];
+
+	if (sscanf(buf, "%9s", mode) != 1) {
+		dev_err(dev, "input I2C speed mode: std/fast\n");
+		return -EINVAL;
+	}
+
+	down(&i2c->lock);
+	pm_runtime_get_sync(i2c->dev);
+
+	if (!strncmp("std", mode, MODE_NAME_SIZE)) {
+		i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+		i2c->master_cfg |= DW_IC_CON_SPEED_STD;
+	} else if (!strncmp("fast", mode, MODE_NAME_SIZE)) {
+		i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+		i2c->master_cfg |= DW_IC_CON_SPEED_FAST;
+	} else if (!strncmp("high", mode, MODE_NAME_SIZE)) {
+		i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+		i2c->master_cfg |= DW_IC_CON_SPEED_HIGH;
+	} else {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* init to configure the i2c master */
+	i2c_dw_init(i2c);
+
+	dev_info(dev, "I2C speed mode changed to %s\n", mode);
+
+out:
+	pm_runtime_mark_last_busy(i2c->dev);
+	pm_runtime_put_autosuspend(i2c->dev);
+	up(&i2c->lock);
+
+	return (ret < 0) ? ret : size;
+}
+
+static ssize_t show_mode(struct device *dev,
+			 struct device_attribute *attr,
+			 char *buf)
+{
+	struct dw_i2c_dev *i2c = dev_get_drvdata(dev);
+	int ret;
+
+	switch (i2c->master_cfg & DW_IC_SPEED_MASK) {
+	case DW_IC_CON_SPEED_STD:
+		ret = snprintf(buf, PAGE_SIZE, "%s\n", "std");
+		break;
+	case DW_IC_CON_SPEED_FAST:
+		ret = snprintf(buf, PAGE_SIZE, "%s\n", "fast");
+		break;
+	case DW_IC_CON_SPEED_HIGH:
+		ret = snprintf(buf, PAGE_SIZE, "%s\n", "high");
+		break;
+	default:
+		ret = snprintf(buf, PAGE_SIZE, "%s\n", "Not Supported\n");
+		break;
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR(bus_num, S_IRUGO, show_bus_num, NULL);
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, store_mode);
+
+static struct attribute *i2c_dw_attrs[] = {
+	&dev_attr_bus_num.attr,
+	&dev_attr_mode.attr,
+	NULL,
+};
+
+static struct attribute_group i2c_dw_attr_group = {
+	.name = "i2c_dw_sysnode",
+	.attrs = i2c_dw_attrs,
+};
+#endif
+
+static ssize_t store_lock_xfer(struct device *dev,
+			  struct device_attribute *attr,
+			  const char *buf, size_t size)
+{
+	struct dw_i2c_dev *i2c = dev_get_drvdata(dev->parent);
+	ssize_t	status = -EINVAL;
+	long lock;
+
+
+	status = kstrtol(buf, 0, &lock);
+	if (status == 0) {
+		if (lock && !i2c->lock_flag) {
+			down(&i2c->lock);
+			pm_runtime_get_sync(i2c->dev);
+			i2c->lock_flag = 1;
+			dev_info(dev, "lock i2c xfer\n");
+		} else if (!lock && i2c->lock_flag) {
+			pm_runtime_mark_last_busy(i2c->dev);
+			pm_runtime_put_autosuspend(i2c->dev);
+			i2c->lock_flag = 0;
+			up(&i2c->lock);
+			dev_info(dev, "unlock i2c xfer\n");
+		} else
+			return -EINVAL;
+	}
+
+	return status ? : size;
+}
+
+static DEVICE_ATTR(lock_xfer, S_IWUSR, NULL, store_lock_xfer);
+
+struct dw_i2c_dev *i2c_dw_setup(struct device *pdev, int bus_idx,
+	unsigned long start, unsigned long len, int irq)
+{
+	struct dw_i2c_dev *dev;
+	struct i2c_adapter *adap;
+	void __iomem *base;
+	struct  dw_controller *controller;
+	int r;
+
+	if (bus_idx >= ARRAY_SIZE(dw_controllers)) {
+		dev_err(pdev, "invalid bus index %d\n",
+			bus_idx);
+		r = -EINVAL;
+		goto exit;
+	}
+
+	controller = &dw_controllers[bus_idx];
+
+	base = ioremap_nocache(start, len);
+	if (!base) {
+		dev_err(pdev, "I/O memory remapping failed\n");
+		r = -ENOMEM;
+		goto exit;
+	}
+
+	dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
+	if (!dev) {
+		r = -ENOMEM;
+		goto err_iounmap;
+	}
+
+	init_completion(&dev->cmd_complete);
+	sema_init(&dev->lock, 1);
+	dev->status = STATUS_IDLE;
+	dev->clk = NULL;
+	dev->controller = controller;
+	dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
+	dev->base = base;
+	dev->dev = get_device(pdev);
+	dev->functionality =
+		I2C_FUNC_I2C |
+		I2C_FUNC_SMBUS_BYTE |
+		I2C_FUNC_SMBUS_BYTE_DATA |
+		I2C_FUNC_SMBUS_WORD_DATA |
+		I2C_FUNC_SMBUS_I2C_BLOCK;
+	dev->master_cfg =  controller->bus_cfg;
+	dev->get_scl_cfg = controller->scl_cfg;
+	dev->enable_stop = controller->enable_stop;
+	dev->clk_khz = controller->clk_khz;
+	dev->speed_cfg = dev->master_cfg & DW_IC_SPEED_MASK;
+	dev->use_dyn_clk = 0;
+	dev->reset = controller->reset;
+	dev->irq = irq;
+	dev->share_irq = controller->share_irq;
+	dev->abort = intel_mid_dw_i2c_abort;
+	dev->tx_fifo_depth = controller->tx_fifo_depth;
+	dev->rx_fifo_depth = controller->rx_fifo_depth;
+
+	r = i2c_dw_init(dev);
+	if (r)
+		goto err_kfree;
+
+	adap = &dev->adapter;
+	i2c_set_adapdata(adap, dev);
+	adap->owner = THIS_MODULE;
+	adap->class = 0;
+	adap->algo = &i2c_dw_algo;
+	adap->dev.parent = pdev;
+	adap->nr = controller->bus_num;
+	snprintf(adap->name, sizeof(adap->name), "i2c-designware-%d",
+		adap->nr);
+
+	r = request_irq(irq, i2c_dw_isr, IRQF_SHARED, adap->name, dev);
+	if (r) {
+		dev_err(pdev, "failure requesting irq %i\n", irq);
+		goto err_kfree;
+	}
+
+	i2c_dw_disable_int(dev);
+	i2c_dw_clear_int(dev);
+	r = i2c_add_numbered_adapter(adap);
+	if (r) {
+		dev_err(pdev, "failure adding adapter\n");
+		goto err_free_irq;
+	}
+
+#ifdef CONFIG_I2C_DW_SPEED_MODE_DEBUG
+	r = sysfs_create_group(&pdev->kobj, &i2c_dw_attr_group);
+	if (r) {
+		dev_err(pdev,
+			"Unable to export sysfs interface, error: %d\n", r);
+		goto err_del_adap;
+	}
+#endif
+	r = device_create_file(&adap->dev, &dev_attr_lock_xfer);
+	if (r < 0)
+		dev_err(&adap->dev,
+			"Failed to add lock_xfer sysfs files: %d\n", r);
+
+	return dev;
+
+#ifdef CONFIG_I2C_DW_SPEED_MODE_DEBUG
+err_del_adap:
+	i2c_del_adapter(&dev->adapter);
+#endif
+err_free_irq:
+	free_irq(irq, dev);
+err_kfree:
+	put_device(pdev);
+	kfree(dev);
+err_iounmap:
+	iounmap(base);
+exit:
+	return ERR_PTR(r);
+}
+EXPORT_SYMBOL(i2c_dw_setup);
+
+#ifdef CONFIG_ACPI
+static int acpi_i2c_get_freq(struct acpi_resource *ares,
+					void *data)
+{
+	struct dw_i2c_dev *i2c = data;
+
+	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+		struct acpi_resource_i2c_serialbus *sb;
+
+		sb = &ares->data.i2c_serial_bus;
+		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+			i2c->freq = sb->connection_speed;
+			if (i2c->freq == DW_STD_SPEED) {
+				i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+				i2c->master_cfg |= DW_IC_CON_SPEED_STD;
+			} else if (i2c->freq == DW_FAST_SPEED) {
+				i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+				i2c->master_cfg |= DW_IC_CON_SPEED_FAST;
+			} else if (i2c->freq == DW_HIGH_SPEED) {
+				i2c->master_cfg &= ~DW_IC_SPEED_MASK;
+				i2c->master_cfg |= DW_IC_CON_SPEED_HIGH;
+			}
+
+			down(&i2c->lock);
+			i2c_dw_init(i2c);
+			up(&i2c->lock);
+		}
+	}
+
+	return 1;
+}
+
+static acpi_status acpi_i2c_find_device_speed(acpi_handle handle, u32 level,
+					void *data, void **return_value)
+{
+	struct dw_i2c_dev *i2c = data;
+	struct list_head resource_list;
+	struct acpi_device *adev;
+	acpi_status status;
+	unsigned long long sta = 0;
+	int ret;
+
+	if (acpi_bus_get_device(handle, &adev))
+		return AE_OK;
+	if (acpi_bus_get_status(adev) || !adev->status.present)
+		return AE_OK;
+
+	INIT_LIST_HEAD(&resource_list);
+	ret = acpi_dev_get_resources(adev, &resource_list,
+				     acpi_i2c_get_freq, i2c);
+	acpi_dev_free_resource_list(&resource_list);
+
+	if (ret < 0)
+		return AE_OK;
+
+	pr_debug("i2c device: %s, freq: %dkHz\n",
+			dev_name(&adev->dev), i2c->freq/1000);
+
+	return AE_OK;
+}
+
+void i2c_acpi_devices_setup(struct device *pdev, struct dw_i2c_dev *dev)
+{
+	acpi_handle pdev_handle = ACPI_HANDLE(pdev);
+	acpi_handle handle = NULL;
+	acpi_status status;
+
+	if (pdev_handle) {
+		handle = pdev_handle;
+	} else if (dev->controller->acpi_name) {
+		acpi_get_handle(NULL,
+			dev->controller->acpi_name, &handle);
+
+		ACPI_HANDLE_SET(pdev, handle);
+	}
+
+	if (handle == NULL)
+		return;
+
+	acpi_i2c_register_devices(&dev->adapter);
+
+	/* Find I2C adapter bus frequency */
+	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+				     acpi_i2c_find_device_speed, NULL,
+				     dev, NULL);
+	if (ACPI_FAILURE(status))
+		dev_warn(pdev, "failed to get I2C bus freq\n");
+
+	/* Set the handle back to its raw value */
+	ACPI_HANDLE_SET(pdev, pdev_handle);
+}
+#else
+void i2c_acpi_devices_setup(struct device *pdev, struct dw_i2c_dev *dev) { }
+#endif
+EXPORT_SYMBOL(i2c_acpi_devices_setup);
+
+void i2c_dw_free(struct device *pdev, struct dw_i2c_dev *dev)
+{
+	struct i2c_adapter *adap = &dev->adapter;
+
+	i2c_dw_disable(dev);
+
+	device_remove_file(&adap->dev, &dev_attr_lock_xfer);
+#ifdef CONFIG_I2C_DW_SPEED_MODE_DEBUG
+	sysfs_remove_group(&pdev->kobj, &i2c_dw_attr_group);
+#endif
+
+	i2c_del_adapter(&dev->adapter);
+	put_device(pdev);
+	free_irq(dev->irq, dev);
+	kfree(dev);
+}
+EXPORT_SYMBOL(i2c_dw_free);
+
 static u32
 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
 {
@@ -249,27 +915,6 @@
 	return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset;
 }
 
-static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
-{
-	int timeout = 100;
-
-	do {
-		dw_writel(dev, enable, DW_IC_ENABLE);
-		if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable)
-			return;
-
-		/*
-		 * Wait 10 times the signaling period of the highest I2C
-		 * transfer supported by the driver (for 400KHz this is
-		 * 25us) as described in the DesignWare I2C databook.
-		 */
-		usleep_range(25, 250);
-	} while (timeout--);
-
-	dev_warn(dev->dev, "timeout in %sabling adapter\n",
-		 enable ? "en" : "dis");
-}
-
 /**
  * i2c_dw_init() - initialize the designware i2c master hardware
  * @dev: device private data
@@ -284,63 +929,72 @@
 	u32 hcnt, lcnt;
 	u32 reg;
 
+	if (dev->reset)
+		dev->reset(dev);
+
 	input_clock_khz = dev->get_clk_rate_khz(dev);
 
+	/* Configure register endianess access */
 	reg = dw_readl(dev, DW_IC_COMP_TYPE);
 	if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
-		/* Configure register endianess access */
-		dev->accessor_flags |= ACCESS_SWAP;
-	} else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
-		/* Configure register access mode 16bit */
-		dev->accessor_flags |= ACCESS_16BIT;
-	} else if (reg != DW_IC_COMP_TYPE_VALUE) {
+		dev->swab = 1;
+		reg = DW_IC_COMP_TYPE_VALUE;
+	}
+
+	if (reg != DW_IC_COMP_TYPE_VALUE) {
 		dev_err(dev->dev, "Unknown Synopsys component type: "
 			"0x%08x\n", reg);
 		return -ENODEV;
 	}
 
 	/* Disable the adapter */
-	__i2c_dw_enable(dev, false);
+	i2c_dw_disable(dev);
 
-	/* set standard and fast speed deviders for high/low periods */
+	if (dev->get_scl_cfg)
+		dev->get_scl_cfg(dev);
+	else {
+		/* set standard and fast speed deviders for high/low periods */
 
-	/* Standard-mode */
-	hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-				40,	/* tHD;STA = tHIGH = 4.0 us */
-				3,	/* tf = 0.3 us */
-				0,	/* 0: DW default, 1: Ideal */
-				0);	/* No offset */
-	lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-				47,	/* tLOW = 4.7 us */
-				3,	/* tf = 0.3 us */
-				0);	/* No offset */
-	dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
-	dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
-	dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+		/* Standard-mode */
+		hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+					227,	/* tHD;STA = tHIGH = 22.7 us */
+					3,	/* tf = 0.3 us */
+					0,	/* 0: DW default, 1: Ideal */
+					23);	/* offset = 23 */
+		lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+					227,	/* tLOW = 22.7 us */
+					3,	/* tf = 0.3 us */
+					28);	/* offset = 28 */
+		dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
+		dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
+		dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n",
+					hcnt, lcnt);
 
-	/* Fast-mode */
-	hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-				6,	/* tHD;STA = tHIGH = 0.6 us */
-				3,	/* tf = 0.3 us */
-				0,	/* 0: DW default, 1: Ideal */
-				0);	/* No offset */
-	lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-				13,	/* tLOW = 1.3 us */
-				3,	/* tf = 0.3 us */
-				0);	/* No offset */
-	dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
-	dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
-	dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+		/* Fast-mode */
+		hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+					52,	/* tHD;STA = tHIGH = 5.2 us */
+					3,	/* tf = 0.3 us */
+					0,	/* 0: DW default, 1: Ideal */
+					11);	/* offset = 11 */
+		lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+					72,	/* tLOW = 7.2 us */
+					3,	/* tf = 0.3 us */
+					12);	/* offset = 12 */
+		dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
+		dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
+		dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+	}
 
 	/* Configure Tx/Rx FIFO threshold levels */
-	dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL);
-	dw_writel(dev, 0, DW_IC_RX_TL);
+	dw_writel(dev, dev->tx_fifo_depth/2, DW_IC_TX_TL);
+	dw_writel(dev, dev->rx_fifo_depth/2, DW_IC_RX_TL);
 
 	/* configure the i2c master */
 	dw_writel(dev, dev->master_cfg , DW_IC_CON);
+
 	return 0;
 }
-EXPORT_SYMBOL_GPL(i2c_dw_init);
+EXPORT_SYMBOL(i2c_dw_init);
 
 /*
  * Waiting for bus not busy
@@ -355,7 +1009,7 @@
 			return -ETIMEDOUT;
 		}
 		timeout--;
-		usleep_range(1000, 1100);
+		mdelay(1);
 	}
 
 	return 0;
@@ -367,7 +1021,7 @@
 	u32 ic_con;
 
 	/* Disable the adapter */
-	__i2c_dw_enable(dev, false);
+	i2c_dw_disable(dev);
 
 	/* set the slave (target) address */
 	dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
@@ -384,7 +1038,7 @@
 	i2c_dw_disable_int(dev);
 
 	/* Enable the adapter */
-	__i2c_dw_enable(dev, true);
+	i2c_dw_enable(dev);
 
 	/* Clear and enable interrupts */
 	i2c_dw_clear_int(dev);
@@ -397,19 +1051,32 @@
  * messages into the tx buffer.  Even if the size of i2c_msg data is
  * longer than the size of the tx buffer, it handles everything.
  */
-static void
+void
 i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
 {
 	struct i2c_msg *msgs = dev->msgs;
 	u32 intr_mask;
 	int tx_limit, rx_limit;
+	int cmd;
 	u32 addr = msgs[dev->msg_write_idx].addr;
 	u32 buf_len = dev->tx_buf_len;
 	u8 *buf = dev->tx_buf;
+	unsigned long flags;
 
 	intr_mask = DW_IC_INTR_DEFAULT_MASK;
 
+	raw_local_irq_save(flags);
+	/* if fifo only has one byte, it is not safe */
+	if (!dev->enable_stop && (dev->status & STATUS_WRITE_IN_PROGRESS) &&
+		(dw_readl(dev, DW_IC_TXFLR) < 1)) {
+		dev_err(dev->dev, "TX FIFO underrun, addr: 0x%x.\n", addr);
+		dev->msg_err = -EAGAIN;
+	}
+
 	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
+		if (dev->msg_err)
+			break;
+
 		/*
 		 * if target address has changed, we need to
 		 * reprogram the target address in the i2c
@@ -439,27 +1106,12 @@
 		rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR);
 
 		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
-			u32 cmd = 0;
-
-			/*
-			 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
-			 * manually set the stop bit. However, it cannot be
-			 * detected from the registers so we set it always
-			 * when writing/reading the last byte.
-			 */
-			if (dev->msg_write_idx == dev->msgs_num - 1 &&
-			    buf_len == 1)
-				cmd |= BIT(9);
-
+			cmd = (dev->enable_stop && buf_len == 1
+				&& dev->msg_write_idx == dev->msgs_num - 1) ?
+				DW_IC_CMD_STOP : 0;
 			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
-
-				/* avoid rx buffer overrun */
-				if (rx_limit - dev->rx_outstanding <= 0)
-					break;
-
 				dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
 				rx_limit--;
-				dev->rx_outstanding++;
 			} else
 				dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
 			tx_limit--; buf_len--;
@@ -475,6 +1127,7 @@
 		} else
 			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
 	}
+	raw_local_irq_restore(flags);
 
 	/*
 	 * If i2c_msg index search is completed, we don't need TX_EMPTY
@@ -512,10 +1165,8 @@
 
 		rx_valid = dw_readl(dev, DW_IC_RXFLR);
 
-		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
+		for (; len > 0 && rx_valid > 0; len--, rx_valid--)
 			*buf++ = dw_readl(dev, DW_IC_DATA_CMD);
-			dev->rx_outstanding--;
-		}
 
 		if (len > 0) {
 			dev->status |= STATUS_READ_IN_PROGRESS;
@@ -558,10 +1209,11 @@
 {
 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
 	int ret;
+	unsigned long timeout;
 
 	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
 
-	mutex_lock(&dev->lock);
+	down(&dev->lock);
 	pm_runtime_get_sync(dev->dev);
 
 	INIT_COMPLETION(dev->cmd_complete);
@@ -573,7 +1225,6 @@
 	dev->msg_err = 0;
 	dev->status = STATUS_IDLE;
 	dev->abort_source = 0;
-	dev->rx_outstanding = 0;
 
 	ret = i2c_dw_wait_bus_not_busy(dev);
 	if (ret < 0)
@@ -583,14 +1234,17 @@
 	i2c_dw_xfer_init(dev);
 
 	/* wait for tx to complete */
-	ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ);
-	if (ret == 0) {
-		dev_err(dev->dev, "controller timed out\n");
+	timeout = wait_for_completion_timeout(&dev->cmd_complete, 3*HZ);
+	if (timeout == 0) {
+		dev_WARN(dev->dev, "controller timed out\n");
+		i2c_dw_dump(dev);
+		trigger_all_cpu_backtrace();
+		if (dev->abort)
+			dev->abort(adap->nr);
 		i2c_dw_init(dev);
 		ret = -ETIMEDOUT;
 		goto done;
-	} else if (ret < 0)
-		goto done;
+	}
 
 	if (dev->msg_err) {
 		ret = dev->msg_err;
@@ -600,7 +1254,7 @@
 	/* no error */
 	if (likely(!dev->cmd_err)) {
 		/* Disable the adapter */
-		__i2c_dw_enable(dev, false);
+		i2c_dw_disable(dev);
 		ret = num;
 		goto done;
 	}
@@ -615,18 +1269,16 @@
 done:
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
-	mutex_unlock(&dev->lock);
+	up(&dev->lock);
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(i2c_dw_xfer);
 
 u32 i2c_dw_func(struct i2c_adapter *adap)
 {
 	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
 	return dev->functionality;
 }
-EXPORT_SYMBOL_GPL(i2c_dw_func);
 
 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
 {
@@ -692,15 +1344,33 @@
 	struct dw_i2c_dev *dev = dev_id;
 	u32 stat, enabled;
 
+	pm_runtime_get(dev->dev);
+#ifdef CONFIG_PM_RUNTIME
+	if (!pm_runtime_active(dev->dev)) {
+		pm_runtime_put_autosuspend(dev->dev);
+		if (dev->share_irq)
+			return IRQ_NONE;
+		else
+			return IRQ_HANDLED;
+	}
+#endif
 	enabled = dw_readl(dev, DW_IC_ENABLE);
 	stat = dw_readl(dev, DW_IC_RAW_INTR_STAT);
 	dev_dbg(dev->dev, "%s:  %s enabled= 0x%x stat=0x%x\n", __func__,
 		dev->adapter.name, enabled, stat);
-	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
-		return IRQ_NONE;
+	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) {
+		pm_runtime_put_autosuspend(dev->dev);
+		if (dev->share_irq)
+			return IRQ_NONE;
+		else
+			return IRQ_HANDLED;
+	}
 
 	stat = i2c_dw_read_clear_intrbits(dev);
 
+	if (stat & DW_IC_INTR_RX_OVER)
+		dev_warn(dev->dev, "RX fifo overrun\n");
+
 	if (stat & DW_IC_INTR_TX_ABRT) {
 		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
 		dev->status = STATUS_IDLE;
@@ -726,25 +1396,49 @@
 	 */
 
 tx_aborted:
-	if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
-		complete(&dev->cmd_complete);
+	if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
+					|| dev->msg_err) {
+		/*
+		 * Check DW_IC_RXFLR register,
+		 * read from the RX FIFO if it's not empty.
+		 */
+		if ((stat & DW_IC_INTR_STOP_DET) &&
+			dw_readl(dev, DW_IC_RXFLR) > 0)
+			i2c_dw_read(dev);
 
+		complete(&dev->cmd_complete);
+	}
+
+	pm_runtime_put_autosuspend(dev->dev);
 	return IRQ_HANDLED;
 }
-EXPORT_SYMBOL_GPL(i2c_dw_isr);
+
+u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
+{
+	return dw_readl(dev, DW_IC_ENABLE_STATUS);
+}
+
+static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
+{
+	int timeout = 100;
+
+	do {
+		dw_writel(dev, enable, DW_IC_ENABLE);
+		if (i2c_dw_is_enabled(dev) == enable)
+			return;
+
+		usleep_range(25, 250);
+	} while (timeout-- > 0);
+
+	dev_warn(dev->dev, "timeout in %sabling adapter\n",
+		enable ? "en" : "dis");
+}
 
 void i2c_dw_enable(struct dw_i2c_dev *dev)
 {
        /* Enable the adapter */
 	__i2c_dw_enable(dev, true);
 }
-EXPORT_SYMBOL_GPL(i2c_dw_enable);
-
-u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
-{
-	return dw_readl(dev, DW_IC_ENABLE);
-}
-EXPORT_SYMBOL_GPL(i2c_dw_is_enabled);
 
 void i2c_dw_disable(struct dw_i2c_dev *dev)
 {
@@ -755,25 +1449,18 @@
 	dw_writel(dev, 0, DW_IC_INTR_MASK);
 	dw_readl(dev, DW_IC_CLR_INTR);
 }
-EXPORT_SYMBOL_GPL(i2c_dw_disable);
 
 void i2c_dw_clear_int(struct dw_i2c_dev *dev)
 {
 	dw_readl(dev, DW_IC_CLR_INTR);
 }
-EXPORT_SYMBOL_GPL(i2c_dw_clear_int);
 
 void i2c_dw_disable_int(struct dw_i2c_dev *dev)
 {
 	dw_writel(dev, 0, DW_IC_INTR_MASK);
 }
-EXPORT_SYMBOL_GPL(i2c_dw_disable_int);
 
 u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
 {
 	return dw_readl(dev, DW_IC_COMP_PARAM_1);
 }
-EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
-
-MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index e761ad1..4eb23a8 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -26,15 +26,171 @@
  *
  */
 
+/*
+ * Registers offset
+ */
+#define DW_IC_CON		0x0
+#define DW_IC_TAR		0x4
+#define DW_IC_DATA_CMD		0x10
+#define DW_IC_SS_SCL_HCNT	0x14
+#define DW_IC_SS_SCL_LCNT	0x18
+#define DW_IC_FS_SCL_HCNT	0x1c
+#define DW_IC_FS_SCL_LCNT	0x20
+#define DW_IC_HS_SCL_HCNT	0x24
+#define DW_IC_HS_SCL_LCNT	0x28
+#define DW_IC_INTR_STAT		0x2c
+#define DW_IC_INTR_MASK		0x30
+#define DW_IC_RAW_INTR_STAT	0x34
+#define DW_IC_RX_TL		0x38
+#define DW_IC_TX_TL		0x3c
+#define DW_IC_CLR_INTR		0x40
+#define DW_IC_CLR_RX_UNDER	0x44
+#define DW_IC_CLR_RX_OVER	0x48
+#define DW_IC_CLR_TX_OVER	0x4c
+#define DW_IC_CLR_RD_REQ	0x50
+#define DW_IC_CLR_TX_ABRT	0x54
+#define DW_IC_CLR_RX_DONE	0x58
+#define DW_IC_CLR_ACTIVITY	0x5c
+#define DW_IC_CLR_STOP_DET	0x60
+#define DW_IC_CLR_START_DET	0x64
+#define DW_IC_CLR_GEN_CALL	0x68
+#define DW_IC_ENABLE		0x6c
+#define DW_IC_STATUS		0x70
+#define DW_IC_TXFLR		0x74
+#define DW_IC_RXFLR		0x78
+#define DW_IC_TX_ABRT_SOURCE	0x80
+#define DW_IC_ENABLE_STATUS	0x9c
+#define DW_IC_COMP_PARAM_1	0xf4
+#define DW_IC_COMP_TYPE		0xfc
+#define DW_IC_COMP_TYPE_VALUE	0x44570140
 
 #define DW_IC_CON_MASTER		0x1
 #define DW_IC_CON_SPEED_STD		0x2
 #define DW_IC_CON_SPEED_FAST		0x4
+#define DW_IC_CON_SPEED_HIGH		0x6
 #define DW_IC_CON_10BITADDR_MASTER	0x10
 #define DW_IC_CON_RESTART_EN		0x20
 #define DW_IC_CON_SLAVE_DISABLE		0x40
 
+#define INTEL_MID_STD_CFG  (DW_IC_CON_MASTER |			\
+				DW_IC_CON_SLAVE_DISABLE |	\
+				DW_IC_CON_RESTART_EN)
 
+#define DW_IC_INTR_RX_UNDER	0x001
+#define DW_IC_INTR_RX_OVER	0x002
+#define DW_IC_INTR_RX_FULL	0x004
+#define DW_IC_INTR_TX_OVER	0x008
+#define DW_IC_INTR_TX_EMPTY	0x010
+#define DW_IC_INTR_RD_REQ	0x020
+#define DW_IC_INTR_TX_ABRT	0x040
+#define DW_IC_INTR_RX_DONE	0x080
+#define DW_IC_INTR_ACTIVITY	0x100
+#define DW_IC_INTR_STOP_DET	0x200
+#define DW_IC_INTR_START_DET	0x400
+#define DW_IC_INTR_GEN_CALL	0x800
+
+#define DW_IC_INTR_DEFAULT_MASK		(DW_IC_INTR_RX_FULL | \
+					 DW_IC_INTR_TX_EMPTY | \
+					 DW_IC_INTR_TX_ABRT | \
+					 DW_IC_INTR_STOP_DET | \
+					 DW_IC_INTR_RX_OVER)
+
+#define DW_IC_STATUS_ACTIVITY	0x1
+
+#define DW_IC_ERR_TX_ABRT	0x1
+
+#define DW_IC_CMD_STOP		0x200
+
+#define DW_IC_SPEED_MASK	0x6
+
+/*
+ * status codes
+ */
+#define STATUS_POWERON			0x0
+#define STATUS_IDLE			STATUS_POWERON
+#define STATUS_WRITE_IN_PROGRESS	0x1
+#define STATUS_READ_IN_PROGRESS		0x2
+
+#define TIMEOUT			20 /* ms */
+
+/*
+ * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
+ *
+ * only expected abort codes are listed here
+ * refer to the datasheet for the full list
+ */
+#define ABRT_7B_ADDR_NOACK	0
+#define ABRT_10ADDR1_NOACK	1
+#define ABRT_10ADDR2_NOACK	2
+#define ABRT_TXDATA_NOACK	3
+#define ABRT_GCALL_NOACK	4
+#define ABRT_GCALL_READ		5
+#define ABRT_SBYTE_ACKDET	7
+#define ABRT_SBYTE_NORSTRT	9
+#define ABRT_10B_RD_NORSTRT	10
+#define ABRT_MASTER_DIS		11
+#define ARB_LOST		12
+
+#define DW_IC_TX_ABRT_7B_ADDR_NOACK	(1UL << ABRT_7B_ADDR_NOACK)
+#define DW_IC_TX_ABRT_10ADDR1_NOACK	(1UL << ABRT_10ADDR1_NOACK)
+#define DW_IC_TX_ABRT_10ADDR2_NOACK	(1UL << ABRT_10ADDR2_NOACK)
+#define DW_IC_TX_ABRT_TXDATA_NOACK	(1UL << ABRT_TXDATA_NOACK)
+#define DW_IC_TX_ABRT_GCALL_NOACK	(1UL << ABRT_GCALL_NOACK)
+#define DW_IC_TX_ABRT_GCALL_READ	(1UL << ABRT_GCALL_READ)
+#define DW_IC_TX_ABRT_SBYTE_ACKDET	(1UL << ABRT_SBYTE_ACKDET)
+#define DW_IC_TX_ABRT_SBYTE_NORSTRT	(1UL << ABRT_SBYTE_NORSTRT)
+#define DW_IC_TX_ABRT_10B_RD_NORSTRT	(1UL << ABRT_10B_RD_NORSTRT)
+#define DW_IC_TX_ABRT_MASTER_DIS	(1UL << ABRT_MASTER_DIS)
+#define DW_IC_TX_ARB_LOST		(1UL << ARB_LOST)
+
+#define DW_IC_TX_ABRT_NOACK		(DW_IC_TX_ABRT_7B_ADDR_NOACK | \
+					 DW_IC_TX_ABRT_10ADDR1_NOACK | \
+					 DW_IC_TX_ABRT_10ADDR2_NOACK | \
+					 DW_IC_TX_ABRT_TXDATA_NOACK | \
+					 DW_IC_TX_ABRT_GCALL_NOACK)
+
+/*
+ * i2c scl hcnt/lcnt setting
+ */
+#define PNW_SS_SCLK_HCNT		0x1EC
+#define PNW_SS_SCLK_LCNT		0x1F3
+#define PNW_FS_SCLK_HCNT		0x66
+#define PNW_FS_SCLK_LCNT		0x8B
+#define PNW_HS_SCLK_HCNT		0x9
+#define PNW_HS_SCLK_LCNT		0x17
+
+#define CLV_SS_SCLK_HCNT		0x1EC
+#define CLV_SS_SCLK_LCNT		0x1F3
+#define CLV_FS_SCLK_HCNT		0x59
+#define CLV_FS_SCLK_LCNT		0x98
+#define CLV_HS_SCLK_HCNT		0x8
+#define CLV_HS_SCLK_LCNT		0x17
+
+/* inofficial configuration
+#define MERR_SS_SCLK_HCNT 0x2c8
+#define MERR_SS_SCLK_LCNT 0x380
+#define MERR_FS_SCLK_HCNT 0x084
+#define MERR_FS_SCLK_LCNT 0x100
+*/
+#define MERR_SS_SCLK_HCNT 0x2f8
+#define MERR_SS_SCLK_LCNT 0x37b
+#define MERR_FS_SCLK_HCNT 0x087
+#define MERR_FS_SCLK_LCNT 0x10a
+#define MERR_HS_SCLK_HCNT 0x8
+#define MERR_HS_SCLK_LCNT 0x20
+
+#define VLV2_SS_SCLK_HCNT 0x214
+#define VLV2_SS_SCLK_LCNT 0x272
+#define VLV2_FS_SCLK_HCNT 0x50
+#define VLV2_FS_SCLK_LCNT 0xad
+#define VLV2_HS_SCLK_HCNT 0x6
+#define VLV2_HS_SCLK_LCNT 0x16
+
+#define DW_STD_SPEED	100000
+#define DW_FAST_SPEED	400000
+#define DW_HIGH_SPEED	3400000
+
+struct dw_controller;
 /**
  * struct dw_i2c_dev - private i2c-designware data
  * @dev: driver model device node
@@ -60,16 +216,20 @@
  * @adapter: i2c subsystem adapter node
  * @tx_fifo_depth: depth of the hardware tx fifo
  * @rx_fifo_depth: depth of the hardware rx fifo
- * @rx_outstanding: current master-rx elements in tx fifo
  */
 struct dw_i2c_dev {
 	struct device		*dev;
 	void __iomem		*base;
 	struct completion	cmd_complete;
-	struct mutex		lock;
+	struct semaphore	lock;
 	struct clk		*clk;
 	u32			(*get_clk_rate_khz) (struct dw_i2c_dev *dev);
-	struct dw_pci_controller *controller;
+	int			(*get_scl_cfg) (struct dw_i2c_dev *dev);
+	void			(*reset)(struct dw_i2c_dev *dev);
+	int			(*abort)(int busnum);
+	struct dw_controller 	*controller;
+	int			enable_stop;
+	int			share_irq;
 	int			cmd_err;
 	struct i2c_msg		*msgs;
 	int			msgs_num;
@@ -83,28 +243,81 @@
 	unsigned int		status;
 	u32			abort_source;
 	int			irq;
-	u32			accessor_flags;
+	int			swab;
 	struct i2c_adapter	adapter;
 	u32			functionality;
 	u32			master_cfg;
 	unsigned int		tx_fifo_depth;
 	unsigned int		rx_fifo_depth;
-	int			rx_outstanding;
+	int			use_dyn_clk;	/* use dynamic clk setting */
+	u32			clk_khz;	/* input clock */
+	u32			speed_cfg;
+	u32			lock_flag;
+	u32			freq;
 };
 
-#define ACCESS_SWAP		0x00000001
-#define ACCESS_16BIT		0x00000002
+struct dw_controller {
+	u32 bus_num;
+	u32 bus_cfg;
+	u32 tx_fifo_depth;
+	u32 rx_fifo_depth;
+	u32 clk_khz;
+	int enable_stop;
+	int share_irq;
+	char *acpi_name;
+	int (*scl_cfg) (struct dw_i2c_dev *dev);
+	void (*reset)(struct dw_i2c_dev *dev);
+};
 
-extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
-extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
-extern int i2c_dw_init(struct dw_i2c_dev *dev);
-extern int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
-		int num);
-extern u32 i2c_dw_func(struct i2c_adapter *adap);
-extern irqreturn_t i2c_dw_isr(int this_irq, void *dev_id);
-extern void i2c_dw_enable(struct dw_i2c_dev *dev);
-extern u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev);
-extern void i2c_dw_disable(struct dw_i2c_dev *dev);
-extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
-extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
-extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
+enum dw_ctl_id_t {
+	moorestown_0,
+	moorestown_1,
+	moorestown_2,
+
+	medfield_0,
+	medfield_1,
+	medfield_2,
+	medfield_3,
+	medfield_4,
+	medfield_5,
+
+	cloverview_0,
+	cloverview_1,
+	cloverview_2,
+	cloverview_3,
+	cloverview_4,
+	cloverview_5,
+
+	merrifield_0,
+	merrifield_1,
+	merrifield_2,
+	merrifield_3,
+	merrifield_4,
+	merrifield_5,
+	merrifield_6,
+
+	valleyview_0,
+	valleyview_1,
+	valleyview_2,
+	valleyview_3,
+	valleyview_4,
+	valleyview_5,
+	valleyview_6,
+
+	cherryview_0 = valleyview_0,
+	cherryview_1 = valleyview_1,
+	cherryview_2 = valleyview_2,
+	cherryview_3 = valleyview_3,
+	cherryview_4 = valleyview_4,
+	cherryview_5 = valleyview_5,
+	cherryview_6 = valleyview_6,
+};
+
+extern int intel_mid_dw_i2c_abort(int busnum);
+int i2c_dw_init(struct dw_i2c_dev *dev);
+struct dw_i2c_dev *i2c_dw_setup(struct device *pdev, int bus_idx,
+	unsigned long start, unsigned long len, int irq);
+void i2c_dw_free(struct device *pdev, struct dw_i2c_dev *dev);
+int i2c_dw_suspend(struct dw_i2c_dev *dev, bool runtime);
+int i2c_dw_resume(struct dw_i2c_dev *dev, bool runtime);
+void i2c_acpi_devices_setup(struct device *pdev, struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index f6ed06c..b150bc2 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -39,113 +39,32 @@
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
+#include <linux/semaphore.h>
+#include <linux/fs.h>
+#include <linux/acpi.h>
 #include "i2c-designware-core.h"
 
 #define DRIVER_NAME "i2c-designware-pci"
-
-enum dw_pci_ctl_id_t {
-	moorestown_0,
-	moorestown_1,
-	moorestown_2,
-
-	medfield_0,
-	medfield_1,
-	medfield_2,
-	medfield_3,
-	medfield_4,
-	medfield_5,
-};
-
-struct dw_pci_controller {
-	u32 bus_num;
-	u32 bus_cfg;
-	u32 tx_fifo_depth;
-	u32 rx_fifo_depth;
-	u32 clk_khz;
-};
-
-#define INTEL_MID_STD_CFG  (DW_IC_CON_MASTER |			\
-				DW_IC_CON_SLAVE_DISABLE |	\
-				DW_IC_CON_RESTART_EN)
-
-static struct  dw_pci_controller  dw_pci_controllers[] = {
-	[moorestown_0] = {
-		.bus_num     = 0,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[moorestown_1] = {
-		.bus_num     = 1,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[moorestown_2] = {
-		.bus_num     = 2,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[medfield_0] = {
-		.bus_num     = 0,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[medfield_1] = {
-		.bus_num     = 1,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[medfield_2] = {
-		.bus_num     = 2,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[medfield_3] = {
-		.bus_num     = 3,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[medfield_4] = {
-		.bus_num     = 4,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-	[medfield_5] = {
-		.bus_num     = 5,
-		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-		.tx_fifo_depth = 32,
-		.rx_fifo_depth = 32,
-		.clk_khz      = 25000,
-	},
-};
-static struct i2c_algorithm i2c_dw_algo = {
-	.master_xfer	= i2c_dw_xfer,
-	.functionality	= i2c_dw_func,
-};
+#define DW_I2C_STATIC_BUS_NUM	10
 
 static int i2c_dw_pci_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
 	struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "suspend called\n");
+
+	return i2c_dw_suspend(i2c, false);
+}
+
+static int i2c_dw_pci_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
 	int err;
 
-
-	i2c_dw_disable(i2c);
+	dev_dbg(dev, "runtime suspend called\n");
+	i2c_dw_suspend(i2c, true);
 
 	err = pci_save_state(pdev);
 	if (err) {
@@ -166,130 +85,86 @@
 {
 	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
 	struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "resume called\n");
+	return i2c_dw_resume(i2c, false);
+}
+
+static int i2c_dw_pci_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct dw_i2c_dev *i2c = pci_get_drvdata(pdev);
 	int err;
-	u32 enabled;
 
-	enabled = i2c_dw_is_enabled(i2c);
-	if (enabled)
-		return 0;
-
+	dev_dbg(dev, "runtime resume called\n");
 	err = pci_set_power_state(pdev, PCI_D0);
 	if (err) {
 		dev_err(&pdev->dev, "pci_set_power_state() failed\n");
 		return err;
 	}
-
 	pci_restore_state(pdev);
+	i2c_dw_resume(i2c, true);
 
-	i2c_dw_init(i2c);
 	return 0;
 }
 
-static int i2c_dw_pci_runtime_idle(struct device *dev)
-{
-	int err = pm_schedule_suspend(dev, 500);
-	dev_dbg(dev, "runtime_idle called\n");
-
-	if (err != 0)
-		return 0;
-	return -EBUSY;
-}
-
 static const struct dev_pm_ops i2c_dw_pm_ops = {
-	.resume         = i2c_dw_pci_resume,
-	.suspend        = i2c_dw_pci_suspend,
-	SET_RUNTIME_PM_OPS(i2c_dw_pci_suspend, i2c_dw_pci_resume,
-			   i2c_dw_pci_runtime_idle)
+	.suspend_late = i2c_dw_pci_suspend,
+	.resume_early = i2c_dw_pci_resume,
+	SET_RUNTIME_PM_OPS(i2c_dw_pci_runtime_suspend,
+			   i2c_dw_pci_runtime_resume,
+			   NULL)
 };
 
-static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
-{
-	return dev->controller->clk_khz;
-}
-
 static int i2c_dw_pci_probe(struct pci_dev *pdev,
-			    const struct pci_device_id *id)
+const struct pci_device_id *id)
 {
 	struct dw_i2c_dev *dev;
-	struct i2c_adapter *adap;
+	unsigned long start, len;
 	int r;
-	struct  dw_pci_controller *controller;
+	int bus_idx;
+	static int bus_num;
 
-	if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) {
-		dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__,
-			id->driver_data);
-		return -EINVAL;
-	}
+	bus_idx = id->driver_data + bus_num;
+	bus_num++;
 
-	controller = &dw_pci_controllers[id->driver_data];
-
-	r = pcim_enable_device(pdev);
+	r = pci_enable_device(pdev);
 	if (r) {
 		dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n",
 			r);
 		return r;
 	}
 
-	r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
+	/* Determine the address of the I2C area */
+	start = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!start || len == 0) {
+		dev_err(&pdev->dev, "base address not set\n");
+		return -ENODEV;
+	}
+
+	r = pci_request_region(pdev, 0, DRIVER_NAME);
 	if (r) {
-		dev_err(&pdev->dev, "I/O memory remapping failed\n");
+		dev_err(&pdev->dev, "failed to request I2C region "
+			"0x%lx-0x%lx\n", start,
+			(unsigned long)pci_resource_end(pdev, 0));
 		return r;
 	}
 
-	dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
-	if (!dev)
-		return -ENOMEM;
-
-	init_completion(&dev->cmd_complete);
-	mutex_init(&dev->lock);
-	dev->clk = NULL;
-	dev->controller = controller;
-	dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
-	dev->base = pcim_iomap_table(pdev)[0];
-	dev->dev = &pdev->dev;
-	dev->functionality =
-		I2C_FUNC_I2C |
-		I2C_FUNC_SMBUS_BYTE |
-		I2C_FUNC_SMBUS_BYTE_DATA |
-		I2C_FUNC_SMBUS_WORD_DATA |
-		I2C_FUNC_SMBUS_I2C_BLOCK;
-	dev->master_cfg =  controller->bus_cfg;
+	dev = i2c_dw_setup(&pdev->dev, bus_idx, start, len, pdev->irq);
+	if (IS_ERR(dev)) {
+		pci_release_region(pdev, 0);
+		dev_err(&pdev->dev, "failed to setup i2c\n");
+		return -EINVAL;
+ 	}
 
 	pci_set_drvdata(pdev, dev);
 
-	dev->tx_fifo_depth = controller->tx_fifo_depth;
-	dev->rx_fifo_depth = controller->rx_fifo_depth;
-	r = i2c_dw_init(dev);
-	if (r)
-		return r;
+	i2c_acpi_devices_setup(&pdev->dev, dev);
 
-	adap = &dev->adapter;
-	i2c_set_adapdata(adap, dev);
-	adap->owner = THIS_MODULE;
-	adap->class = 0;
-	adap->algo = &i2c_dw_algo;
-	adap->dev.parent = &pdev->dev;
-	adap->nr = controller->bus_num;
-	snprintf(adap->name, sizeof(adap->name), "i2c-designware-pci-%d",
-		adap->nr);
-
-	r = devm_request_irq(&pdev->dev, pdev->irq, i2c_dw_isr, IRQF_SHARED,
-			adap->name, dev);
-	if (r) {
-		dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
-		return r;
-	}
-
-	i2c_dw_disable_int(dev);
-	i2c_dw_clear_int(dev);
-	r = i2c_add_numbered_adapter(adap);
-	if (r) {
-		dev_err(&pdev->dev, "failure adding adapter\n");
-		return r;
-	}
-
-	pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
 	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_put_noidle(&pdev->dev);
 	pm_runtime_allow(&pdev->dev);
 
 	return 0;
@@ -299,35 +174,60 @@
 {
 	struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
 
-	i2c_dw_disable(dev);
 	pm_runtime_forbid(&pdev->dev);
-	pm_runtime_get_noresume(&pdev->dev);
-
-	i2c_del_adapter(&dev->adapter);
+	i2c_dw_free(&pdev->dev, dev);
+	pci_set_drvdata(pdev, NULL);
+	pci_release_region(pdev, 0);
 }
 
 /* work with hotplug and coldplug */
 MODULE_ALIAS("i2c_designware-pci");
 
-static DEFINE_PCI_DEVICE_TABLE(i2_designware_pci_ids) = {
+DEFINE_PCI_DEVICE_TABLE(i2c_designware_pci_ids) = {
 	/* Moorestown */
 	{ PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
-	{ PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
-	{ PCI_VDEVICE(INTEL, 0x0804), moorestown_2 },
+	{ PCI_VDEVICE(INTEL, 0x0803), moorestown_0 },
+	{ PCI_VDEVICE(INTEL, 0x0804), moorestown_0 },
 	/* Medfield */
-	{ PCI_VDEVICE(INTEL, 0x0817), medfield_3,},
-	{ PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
-	{ PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
+	{ PCI_VDEVICE(INTEL, 0x0817), medfield_0 },
+	{ PCI_VDEVICE(INTEL, 0x0818), medfield_0 },
+	{ PCI_VDEVICE(INTEL, 0x0819), medfield_0 },
 	{ PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
-	{ PCI_VDEVICE(INTEL, 0x082D), medfield_1 },
-	{ PCI_VDEVICE(INTEL, 0x082E), medfield_2 },
+	{ PCI_VDEVICE(INTEL, 0x082D), medfield_0 },
+	{ PCI_VDEVICE(INTEL, 0x082E), medfield_0 },
+	/* Cloverview */
+	{ PCI_VDEVICE(INTEL, 0x08E2), cloverview_0 },
+	{ PCI_VDEVICE(INTEL, 0x08E3), cloverview_0 },
+	{ PCI_VDEVICE(INTEL, 0x08E4), cloverview_0 },
+	{ PCI_VDEVICE(INTEL, 0x08F4), cloverview_0 },
+	{ PCI_VDEVICE(INTEL, 0x08F5), cloverview_0 },
+	{ PCI_VDEVICE(INTEL, 0x08F6), cloverview_0 },
+	/* Merrifield */
+	{ PCI_VDEVICE(INTEL, 0x1195), merrifield_0 },
+	{ PCI_VDEVICE(INTEL, 0x1196), merrifield_0 },
+	/* Valleyview 2 */
+	{ PCI_VDEVICE(INTEL, 0x0F41), valleyview_0 },
+	{ PCI_VDEVICE(INTEL, 0x0F42), valleyview_0 },
+	{ PCI_VDEVICE(INTEL, 0x0F43), valleyview_0 },
+	{ PCI_VDEVICE(INTEL, 0x0F44), valleyview_0 },
+	{ PCI_VDEVICE(INTEL, 0x0F45), valleyview_0 },
+	{ PCI_VDEVICE(INTEL, 0x0F46), valleyview_0 },
+	{ PCI_VDEVICE(INTEL, 0x0F47), valleyview_0 },
+	/* Cherryview */
+	{ PCI_VDEVICE(INTEL, 0x22C1), cherryview_0 },
+	{ PCI_VDEVICE(INTEL, 0x22C2), cherryview_0 },
+	{ PCI_VDEVICE(INTEL, 0x22C3), cherryview_0 },
+	{ PCI_VDEVICE(INTEL, 0x22C4), cherryview_0 },
+	{ PCI_VDEVICE(INTEL, 0x22C5), cherryview_0 },
+	{ PCI_VDEVICE(INTEL, 0x22C6), cherryview_0 },
+	{ PCI_VDEVICE(INTEL, 0x22C7), cherryview_0 },
 	{ 0,}
 };
-MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
+MODULE_DEVICE_TABLE(pci, i2c_designware_pci_ids);
 
 static struct pci_driver dw_i2c_driver = {
 	.name		= DRIVER_NAME,
-	.id_table	= i2_designware_pci_ids,
+	.id_table	= i2c_designware_pci_ids,
 	.probe		= i2c_dw_pci_probe,
 	.remove		= i2c_dw_pci_remove,
 	.driver         = {
@@ -335,7 +235,38 @@
 	},
 };
 
-module_pci_driver(dw_i2c_driver);
+static int __init dw_i2c_init_driver(void)
+{
+	return  pci_register_driver(&dw_i2c_driver);
+}
+module_init(dw_i2c_init_driver);
+
+static void __exit dw_i2c_exit_driver(void)
+{
+	pci_unregister_driver(&dw_i2c_driver);
+}
+module_exit(dw_i2c_exit_driver);
+
+#ifndef MODULE
+static int __init dw_i2c_reserve_static_bus(void)
+{
+	struct i2c_board_info dummy = {
+		I2C_BOARD_INFO("dummy", 0xff),
+	};
+
+	i2c_register_board_info(DW_I2C_STATIC_BUS_NUM, &dummy, 1);
+	return 0;
+}
+subsys_initcall(dw_i2c_reserve_static_bus);
+
+static void dw_i2c_pci_final_quirks(struct pci_dev *pdev)
+{
+	pdev->pm_cap = 0x80;
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0F44,
+				dw_i2c_pci_final_quirks);
+#endif
 
 MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
 MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 35b70a1..96a3bcf 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -34,58 +34,94 @@
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
-#include <linux/of_i2c.h>
 #include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include "i2c-designware-core.h"
 
-static struct i2c_algorithm i2c_dw_algo = {
-	.master_xfer	= i2c_dw_xfer,
-	.functionality	= i2c_dw_func,
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dw_i2c_acpi_ids[] = {
+	{ "80860F41", valleyview_0 },
+	{ "808622C1", cherryview_0 },
+	{ }
 };
-static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_ids);
+#endif
+
+static int dw_i2c_plat_suspend(struct device *dev)
 {
-	return clk_get_rate(dev->clk)/1000;
+	struct platform_device *pdev =
+		container_of(dev, struct platform_device, dev);
+	struct dw_i2c_dev *i2c = platform_get_drvdata(pdev);
+
+	dev_dbg(dev, "suspend called\n");
+	return i2c_dw_suspend(i2c, false);
 }
 
-#ifdef CONFIG_ACPI
-static int dw_i2c_acpi_configure(struct platform_device *pdev)
+static int dw_i2c_plat_runtime_suspend(struct device *dev)
 {
-	struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+	struct platform_device *pdev =
+		container_of(dev, struct platform_device, dev);
+	struct dw_i2c_dev *i2c = platform_get_drvdata(pdev);
 
-	if (!ACPI_HANDLE(&pdev->dev))
-		return -ENODEV;
+	dev_dbg(dev, "runtime suspend called\n");
+	i2c_dw_suspend(i2c, true);
 
-	dev->adapter.nr = -1;
-	dev->tx_fifo_depth = 32;
-	dev->rx_fifo_depth = 32;
 	return 0;
 }
 
-static const struct acpi_device_id dw_i2c_acpi_match[] = {
-	{ "INT33C2", 0 },
-	{ "INT33C3", 0 },
-	{ "80860F41", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
-#else
-static inline int dw_i2c_acpi_configure(struct platform_device *pdev)
+static int dw_i2c_plat_resume(struct device *dev)
 {
-	return -ENODEV;
-}
-#endif
+	struct platform_device *pdev =
+		container_of(dev, struct platform_device, dev);
+	struct dw_i2c_dev *i2c = platform_get_drvdata(pdev);
 
-static int dw_i2c_probe(struct platform_device *pdev)
+	dev_dbg(dev, "resume called\n");
+	return i2c_dw_resume(i2c, false);
+}
+
+static int dw_i2c_plat_runtime_resume(struct device *dev)
+{
+	struct platform_device *pdev =
+		container_of(dev, struct platform_device, dev);
+	struct dw_i2c_dev *i2c = platform_get_drvdata(pdev);
+
+	dev_dbg(dev, "runtime resume called\n");
+	i2c_dw_resume(i2c, true);
+
+	return 0;
+}
+
+static const struct dev_pm_ops dw_i2c_plat_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend,
+				dw_i2c_plat_resume)
+	SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
+			   dw_i2c_plat_runtime_resume,
+			   NULL)
+};
+
+static int __init dw_i2c_probe(struct platform_device *pdev)
 {
 	struct dw_i2c_dev *dev;
-	struct i2c_adapter *adap;
-	struct resource *mem;
-	int irq, r;
+	struct resource *mem, *ioarea;
+	const struct acpi_device_id *id;
+	unsigned long start, len;
+	int bus_idx = 0;
+	static int bus_num;
+	int irq;
+
+#ifdef CONFIG_ACPI
+	for (id = dw_i2c_acpi_ids; id->id[0]; id++)
+		if (!strncmp(id->id, dev_name(&pdev->dev), strlen(id->id))) {
+			bus_idx = id->driver_data + bus_num;
+			bus_num++;
+		}
+#else
+	bus_idx = pdev->id;
+#endif
 
 	/* NOTE: driver uses the static register mapping */
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -93,6 +129,8 @@
 		dev_err(&pdev->dev, "no mem resource?\n");
 		return -EINVAL;
 	}
+	start = mem->start;
+	len = resource_size(mem);
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
@@ -100,133 +138,47 @@
 		return irq; /* -ENXIO */
 	}
 
-	dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
-	if (!dev)
-		return -ENOMEM;
+	ioarea = request_mem_region(mem->start, resource_size(mem),
+			pdev->name);
+	if (!ioarea) {
+		dev_err(&pdev->dev, "I2C region already claimed\n");
+		return -EBUSY;
+	}
 
-	dev->base = devm_ioremap_resource(&pdev->dev, mem);
-	if (IS_ERR(dev->base))
-		return PTR_ERR(dev->base);
+	dev = i2c_dw_setup(&pdev->dev, bus_idx, start, len, irq);
+	if (IS_ERR(dev)) {
+		release_mem_region(mem->start, resource_size(mem));
+		dev_err(&pdev->dev, "failed to setup i2c\n");
+		return -EINVAL;
+	}
 
-	init_completion(&dev->cmd_complete);
-	mutex_init(&dev->lock);
-	dev->dev = &pdev->dev;
-	dev->irq = irq;
 	platform_set_drvdata(pdev, dev);
 
-	dev->clk = devm_clk_get(&pdev->dev, NULL);
-	dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
+	acpi_i2c_register_devices(&dev->adapter);
 
-	if (IS_ERR(dev->clk))
-		return PTR_ERR(dev->clk);
-	clk_prepare_enable(dev->clk);
-
-	dev->functionality =
-		I2C_FUNC_I2C |
-		I2C_FUNC_10BIT_ADDR |
-		I2C_FUNC_SMBUS_BYTE |
-		I2C_FUNC_SMBUS_BYTE_DATA |
-		I2C_FUNC_SMBUS_WORD_DATA |
-		I2C_FUNC_SMBUS_I2C_BLOCK;
-	dev->master_cfg =  DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
-		DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
-
-	/* Try first if we can configure the device from ACPI */
-	r = dw_i2c_acpi_configure(pdev);
-	if (r) {
-		u32 param1 = i2c_dw_read_comp_param(dev);
-
-		dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
-		dev->rx_fifo_depth = ((param1 >> 8)  & 0xff) + 1;
-		dev->adapter.nr = pdev->id;
-	}
-	r = i2c_dw_init(dev);
-	if (r)
-		return r;
-
-	i2c_dw_disable_int(dev);
-	r = devm_request_irq(&pdev->dev, dev->irq, i2c_dw_isr, IRQF_SHARED,
-			pdev->name, dev);
-	if (r) {
-		dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
-		return r;
-	}
-
-	adap = &dev->adapter;
-	i2c_set_adapdata(adap, dev);
-	adap->owner = THIS_MODULE;
-	adap->class = I2C_CLASS_HWMON;
-	strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
-			sizeof(adap->name));
-	adap->algo = &i2c_dw_algo;
-	adap->dev.parent = &pdev->dev;
-	adap->dev.of_node = pdev->dev.of_node;
-
-	r = i2c_add_numbered_adapter(adap);
-	if (r) {
-		dev_err(&pdev->dev, "failure adding adapter\n");
-		return r;
-	}
-	of_i2c_register_devices(adap);
-	acpi_i2c_register_devices(adap);
-
-	pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
-	pm_runtime_use_autosuspend(&pdev->dev);
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
 
 	return 0;
 }
 
-static int dw_i2c_remove(struct platform_device *pdev)
+static int __exit dw_i2c_remove(struct platform_device *pdev)
 {
 	struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+	struct resource *mem;
 
-	pm_runtime_get_sync(&pdev->dev);
-
-	i2c_del_adapter(&dev->adapter);
-
-	i2c_dw_disable(dev);
-
-	pm_runtime_put(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-
+	pm_runtime_forbid(&pdev->dev);
+	i2c_dw_free(&pdev->dev, dev);
+	platform_set_drvdata(pdev, NULL);
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (mem)
+		release_mem_region(mem->start, resource_size(mem));
 	return 0;
 }
 
-#ifdef CONFIG_OF
-static const struct of_device_id dw_i2c_of_match[] = {
-	{ .compatible = "snps,designware-i2c", },
-	{},
-};
-MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
-#endif
-
-#ifdef CONFIG_PM
-static int dw_i2c_suspend(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
-
-	clk_disable_unprepare(i_dev->clk);
-
-	return 0;
-}
-
-static int dw_i2c_resume(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
-
-	clk_prepare_enable(i_dev->clk);
-	i2c_dw_init(i_dev);
-
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
-
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:i2c_designware");
 
@@ -235,17 +187,32 @@
 	.driver		= {
 		.name	= "i2c_designware",
 		.owner	= THIS_MODULE,
-		.of_match_table = of_match_ptr(dw_i2c_of_match),
-		.acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
-		.pm	= &dw_i2c_dev_pm_ops,
+		.pm     = &dw_i2c_plat_pm_ops,
+#ifdef CONFIG_ACPI
+		.acpi_match_table = ACPI_PTR(dw_i2c_acpi_ids),
+#endif
 	},
 };
 
 static int __init dw_i2c_init_driver(void)
 {
+	struct pci_dev *dw_pci;
+
+	/*
+	 * Try to get pci device, if exist, then exit ACPI platform
+	 * register, On BYT FDK, include two enum mode: PCI, ACPI,
+	 * ignore ACPI enum mode.
+	 */
+	dw_pci = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0F41, NULL);
+	if (dw_pci) {
+		pr_info("DW I2C: Find I2C controller in PCI device, "
+			"exit ACPI platform register!\n");
+		return 0;
+	}
+
 	return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
 }
-subsys_initcall(dw_i2c_init_driver);
+module_init(dw_i2c_init_driver);
 
 static void __exit dw_i2c_exit_driver(void)
 {
diff --git a/drivers/i2c/busses/i2c-pmic-regs.h b/drivers/i2c/busses/i2c-pmic-regs.h
new file mode 100644
index 0000000..e5a55ff
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pmic-regs.h
@@ -0,0 +1,78 @@
+/*
+ * i2c-pmic-regs.h - PMIC I2C registers
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Yegnesh Iyer <yegnesh.s.iyer@intel.com>
+ */
+
+#ifndef __I2C_PMIC_REGS_H__
+#define __I2C_PMIC_REGS_H__
+
+#include <linux/mutex.h>
+
+/*********************************************************************
+ *		Generic defines
+ *********************************************************************/
+
+#define D7 (1 << 7)
+#define D6 (1 << 6)
+#define D5 (1 << 5)
+#define D4 (1 << 4)
+#define D3 (1 << 3)
+#define D2 (1 << 2)
+#define D1 (1 << 1)
+#define D0 (1 << 0)
+
+#define PMIC_SRAM_INTR_ADDR 0xFFFFF616
+
+#define I2C_MSG_LEN		4
+
+#define I2COVRCTRL_ADDR		0x58
+#define I2COVRDADDR_ADDR	0x59
+#define I2COVROFFSET_ADDR	0x5A
+#define I2COVRWRDATA_ADDR	0x5B
+#define I2COVRRDDATA_ADDR	0x5C
+
+#define IRQLVL1_ADDR			0x01
+#define IRQLVL1_MASK_ADDR		0x0c
+#define IRQLVL1_CHRGR_MASK		D5
+
+#define MCHGRIRQ1_ADDR			0x13
+#define MCHGRIRQ0_ADDR			0x12
+
+#define PMIC_I2C_INTR_MASK ((u8)(D3|D2|D1))
+#define I2COVRCTRL_I2C_RD D1
+#define I2COVRCTRL_I2C_WR D0
+#define CHGRIRQ0_ADDR			0x07
+
+#define IRQ0_I2C_BIT_POS 1
+
+struct pmic_i2c_dev {
+	int irq;
+	u32 pmic_intr_sram_addr;
+	struct i2c_adapter adapter;
+	int i2c_rw;
+	wait_queue_head_t i2c_wait;
+	struct mutex i2c_pmic_rw_lock;
+	void __iomem *pmic_intr_map;
+	struct device *dev;
+};
+
+#endif
diff --git a/drivers/i2c/busses/i2c-pmic.c b/drivers/i2c/busses/i2c-pmic.c
new file mode 100644
index 0000000..9e96f10
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pmic.c
@@ -0,0 +1,452 @@
+/*
+ * i2c-pmic.c: PMIC I2C adapter driver.
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Yegnesh Iyer <yegnesh.s.iyer@intel.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/rpmsg.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include "i2c-pmic-regs.h"
+
+#define DRIVER_NAME "i2c_pmic_adap"
+#define PMIC_I2C_ADAPTER 8
+
+enum I2C_STATUS {
+	I2C_WR = 1,
+	I2C_RD,
+	I2C_NACK = 4
+};
+
+static struct pmic_i2c_dev *pmic_dev;
+
+/* Function Definitions */
+
+/* PMIC I2C read-write completion interrupt handler */
+static irqreturn_t pmic_i2c_handler(int irq, void *data)
+{
+	u8 irq0_int;
+
+	irq0_int = ioread8(pmic_dev->pmic_intr_map);
+	irq0_int &= PMIC_I2C_INTR_MASK;
+
+	if (irq0_int) {
+		pmic_dev->i2c_rw = (irq0_int >> IRQ0_I2C_BIT_POS);
+		return IRQ_WAKE_THREAD;
+	}
+
+	return IRQ_NONE;
+}
+
+
+static irqreturn_t pmic_thread_handler(int id, void *data)
+{
+#define IRQLVL1_MASK_ADDR		0x0c
+#define IRQLVL1_CHRGR_MASK		D5
+
+	dev_dbg(pmic_dev->dev, "Clearing IRQLVL1_MASK_ADDR\n");
+
+	intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+	wake_up(&(pmic_dev->i2c_wait));
+	return IRQ_HANDLED;
+}
+
+/* PMIC i2c read msg */
+static inline int pmic_i2c_read_xfer(struct i2c_msg msg)
+{
+	int ret;
+	u16 i;
+	u8 mask = (I2C_RD | I2C_NACK);
+	u16 regs[I2C_MSG_LEN] = {0};
+	u8 data[I2C_MSG_LEN] = {0};
+
+	for (i = 0; i < msg.len ; i++) {
+		pmic_dev->i2c_rw = 0;
+		regs[0] = I2COVRDADDR_ADDR;
+		data[0] = msg.addr;
+		regs[1] = I2COVROFFSET_ADDR;
+		data[1] = msg.buf[0] + i;
+		/* intel_scu_ipc_function works fine for even number of bytes */
+		/* Hence adding a dummy byte transfer */
+		regs[2] = I2COVROFFSET_ADDR;
+		data[2] = msg.buf[0] + i;
+		regs[3] = I2COVRCTRL_ADDR;
+		data[3] = I2COVRCTRL_I2C_RD;
+		ret = intel_scu_ipc_writev(regs, data, I2C_MSG_LEN);
+		if (unlikely(ret))
+			return ret;
+
+		ret = wait_event_timeout(pmic_dev->i2c_wait,
+				(pmic_dev->i2c_rw & mask),
+				HZ);
+
+		if (ret == 0) {
+			ret = -ETIMEDOUT;
+			goto read_err_exit;
+		} else if (pmic_dev->i2c_rw == I2C_NACK) {
+			ret = -EIO;
+			goto read_err_exit;
+		} else {
+			ret = intel_scu_ipc_ioread8(I2COVRRDDATA_ADDR,
+					&(msg.buf[i]));
+			if (unlikely(ret)) {
+				ret = -EIO;
+				goto read_err_exit;
+			}
+		}
+	}
+	return 0;
+
+read_err_exit:
+	return ret;
+}
+
+/* PMIC i2c write msg */
+static inline int pmic_i2c_write_xfer(struct i2c_msg msg)
+{
+	int ret;
+	u16 i;
+	u8 mask = (I2C_WR | I2C_NACK);
+	u16 regs[I2C_MSG_LEN] = {0};
+	u8 data[I2C_MSG_LEN] = {0};
+
+	for (i = 1; i <= msg.len ; i++) {
+		pmic_dev->i2c_rw = 0;
+		regs[0] = I2COVRDADDR_ADDR;
+		data[0] = msg.addr;
+		regs[1] = I2COVRWRDATA_ADDR;
+		data[1] = msg.buf[i];
+		regs[2] = I2COVROFFSET_ADDR;
+		data[2] = msg.buf[0] + i - 1;
+		regs[3] = I2COVRCTRL_ADDR;
+		data[3] = I2COVRCTRL_I2C_WR;
+		ret = intel_scu_ipc_writev(regs, data, I2C_MSG_LEN);
+		if (unlikely(ret))
+			return ret;
+
+		ret = wait_event_timeout(pmic_dev->i2c_wait,
+				(pmic_dev->i2c_rw & mask),
+				HZ);
+		if (ret == 0)
+			return -ETIMEDOUT;
+		else if (pmic_dev->i2c_rw == I2C_NACK)
+			return -EIO;
+	}
+	return 0;
+}
+
+static int (*xfer_fn[]) (struct i2c_msg) = {
+	pmic_i2c_write_xfer,
+	pmic_i2c_read_xfer
+};
+
+/* PMIC I2C Master transfer algorithm function */
+static int pmic_master_xfer(struct i2c_adapter *adap,
+				struct i2c_msg msgs[],
+				int num)
+{
+	int ret = 0;
+	int i;
+	u8 index;
+
+	mutex_lock(&pmic_dev->i2c_pmic_rw_lock);
+	pm_runtime_get_sync(pmic_dev->dev);
+	for (i = 0 ; i < num ; i++) {
+		index = msgs[i].flags & I2C_M_RD;
+		ret = (xfer_fn[index])(msgs[i]);
+
+		if (ret == -EACCES)
+			dev_info(pmic_dev->dev, "Blocked Access!\n");
+
+		/* If access is restricted, return true to
+		*  avoid extra error handling in client
+		*/
+
+		if (ret != 0 && ret != -EACCES)
+			goto transfer_err_exit;
+	}
+
+	ret = num;
+
+transfer_err_exit:
+	mutex_unlock(&pmic_dev->i2c_pmic_rw_lock);
+	pm_runtime_put_sync(pmic_dev->dev);
+	intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+	return ret;
+}
+
+/* PMIC I2C adapter capability function */
+static u32 pmic_master_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static int pmic_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+				unsigned short flags, char read_write,
+				u8 command, int size,
+				union i2c_smbus_data *data)
+{
+	struct i2c_msg msg;
+	u8 buf[2];
+	int ret;
+
+	msg.addr = addr;
+	msg.flags = flags & I2C_M_TEN;
+	msg.buf = buf;
+	msg.buf[0] = command;
+	if (read_write == I2C_SMBUS_WRITE) {
+		msg.len = 1;
+		msg.buf[1] = data->byte;
+	} else {
+		msg.flags |= I2C_M_RD;
+		msg.len = 1;
+	}
+
+	ret = pmic_master_xfer(adap, &msg, 1);
+	if (ret == 1) {
+		if (read_write == I2C_SMBUS_READ)
+			data->byte = msg.buf[0];
+		return 0;
+	}
+	return ret;
+}
+
+
+static const struct i2c_algorithm pmic_i2c_algo = {
+	.master_xfer = pmic_master_xfer,
+	.functionality = pmic_master_func,
+	.smbus_xfer = pmic_smbus_xfer,
+};
+
+static int pmic_i2c_probe(struct platform_device *pdev)
+{
+	struct i2c_adapter *adap;
+	int ret;
+
+	pmic_dev = kzalloc(sizeof(struct pmic_i2c_dev), GFP_KERNEL);
+	if (!pmic_dev)
+		return -ENOMEM;
+
+	pmic_dev->dev = &pdev->dev;
+	pmic_dev->irq = platform_get_irq(pdev, 0);
+
+
+
+	mutex_init(&pmic_dev->i2c_pmic_rw_lock);
+	init_waitqueue_head(&(pmic_dev->i2c_wait));
+
+	pmic_dev->pmic_intr_map = ioremap_nocache(PMIC_SRAM_INTR_ADDR, 8);
+	if (!pmic_dev->pmic_intr_map) {
+		dev_err(&pdev->dev, "ioremap Failed\n");
+		ret = -ENOMEM;
+		goto ioremap_failed;
+	}
+	ret = request_threaded_irq(pmic_dev->irq, pmic_i2c_handler,
+					pmic_thread_handler,
+					IRQF_SHARED|IRQF_NO_SUSPEND,
+					DRIVER_NAME, pmic_dev);
+	if (ret)
+		goto err_irq_request;
+
+	ret = intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+	if (unlikely(ret))
+		goto unmask_irq_failed;
+	ret = intel_scu_ipc_update_register(MCHGRIRQ0_ADDR, 0x00,
+			PMIC_I2C_INTR_MASK);
+	if (unlikely(ret))
+		goto unmask_irq_failed;
+
+	/* Init runtime PM state*/
+	pm_runtime_put_noidle(pmic_dev->dev);
+
+	adap = &pmic_dev->adapter;
+	adap->owner = THIS_MODULE;
+	adap->class = I2C_CLASS_HWMON;
+	adap->algo = &pmic_i2c_algo;
+	strcpy(adap->name, "PMIC I2C Adapter");
+	adap->nr = PMIC_I2C_ADAPTER;
+	ret = i2c_add_numbered_adapter(adap);
+
+	if (ret) {
+		dev_err(&pdev->dev, "Error adding the adapter\n");
+		goto err_adap_add;
+	}
+
+	pm_schedule_suspend(pmic_dev->dev, MSEC_PER_SEC);
+	return 0;
+
+err_adap_add:
+	free_irq(pmic_dev->irq, pmic_dev);
+unmask_irq_failed:
+err_irq_request:
+	iounmap(pmic_dev->pmic_intr_map);
+ioremap_failed:
+	kfree(pmic_dev);
+	return ret;
+}
+
+static int pmic_i2c_remove(struct platform_device *pdev)
+{
+	iounmap(pmic_dev->pmic_intr_map);
+	free_irq(pmic_dev->irq, pmic_dev);
+	pm_runtime_get_noresume(pmic_dev->dev);
+	kfree(pmic_dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic_i2c_suspend(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int pmic_i2c_resume(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int pmic_i2c_runtime_suspend(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int pmic_i2c_runtime_resume(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int pmic_i2c_runtime_idle(struct device *dev)
+{
+	dev_info(dev, "%s\n", __func__);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops pmic_i2c_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pmic_i2c_suspend,
+				pmic_i2c_resume)
+	SET_RUNTIME_PM_OPS(pmic_i2c_runtime_suspend,
+				pmic_i2c_runtime_resume,
+				pmic_i2c_runtime_idle)
+};
+
+struct platform_driver pmic_i2c_driver = {
+	.probe = pmic_i2c_probe,
+	.remove = pmic_i2c_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &pmic_i2c_pm_ops,
+	},
+};
+
+static int pmic_i2c_init(void)
+{
+	return platform_driver_register(&pmic_i2c_driver);
+}
+
+static void pmic_i2c_exit(void)
+{
+	platform_driver_unregister(&pmic_i2c_driver);
+}
+
+static int pmic_i2c_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed pmic_i2c rpmsg device\n");
+
+	ret = pmic_i2c_init();
+
+out:
+	return ret;
+}
+
+static void pmic_i2c_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	pmic_i2c_exit();
+	dev_info(&rpdev->dev, "Removed pmic_i2c rpmsg device\n");
+}
+
+static void pmic_i2c_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id pmic_i2c_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_i2c_pmic_adap" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_i2c_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_i2c_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= pmic_i2c_rpmsg_id_table,
+	.probe		= pmic_i2c_rpmsg_probe,
+	.callback	= pmic_i2c_rpmsg_cb,
+	.remove		= pmic_i2c_rpmsg_remove,
+};
+
+static int __init pmic_i2c_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&pmic_i2c_rpmsg);
+}
+
+static void __exit pmic_i2c_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&pmic_i2c_rpmsg);
+}
+module_init(pmic_i2c_rpmsg_init);
+module_exit(pmic_i2c_rpmsg_exit);
+
+MODULE_AUTHOR("Yegnesh Iyer <yegnesh.s.iyer@intel.com");
+MODULE_DESCRIPTION("PMIC I2C Master driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d47bb0f..725d94e 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -61,6 +61,7 @@
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
+#include <linux/intel_mid_pm.h>
 #include <asm/cpu_device_id.h>
 #include <asm/mwait.h>
 #include <asm/msr.h>
@@ -330,6 +331,321 @@
 		.enter = NULL }
 };
 
+static struct cpuidle_state vlv_cstates[CPUIDLE_STATE_MAX] = {
+	{ /* MWAIT C1 */
+		.name = "C1-ATM",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 4,
+		.enter = &intel_idle },
+	{ /* MWAIT C4 */
+		.name = "C4-ATM",
+		.desc = "MWAIT 0x30",
+		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 100,
+		.target_residency = 400,
+		.enter = &intel_idle },
+	{ /* MWAIT C6 */
+		.name = "C6-ATM",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 140,
+		.target_residency = 560,
+		.enter = &intel_idle },
+	{ /* MWAIT C7-S0i1 */
+		.name = "S0i1-ATM",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1200,
+		.target_residency = 4000,
+		.enter = &intel_idle },
+	{ /* MWAIT C8-S0i2 */
+		.name = "S0i2-ATM",
+		.desc = "MWAIT 0x62",
+		.flags = MWAIT2flg(0x62) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 2000,
+		.target_residency = 8000,
+		.enter = &intel_idle },
+	{ /* MWAIT C9-S0i3 */
+		.name = "S0i3-ATM",
+		.desc = "MWAIT 0x64",
+		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 20000,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+
+#if defined(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER)
+static struct cpuidle_state mrfld_cstates[CPUIDLE_STATE_MAX] = {
+	{ /* MWAIT C1 */
+		.name = "C1-ATM",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = 1,
+		.target_residency = 4,
+		.enter = &intel_idle },
+	{ /* MWAIT C4 */
+		.name = "C4-ATM",
+		.desc = "MWAIT 0x30",
+		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 100,
+		.target_residency = 400,
+		.enter = &intel_idle },
+	{ /* MWAIT C6 */
+		.name = "C6-ATM",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 140,
+		.target_residency = 560,
+		.enter = &intel_idle },
+	{ /* MWAIT C7-S0i1 */
+		.name = "S0i1-ATM",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1200,
+		.target_residency = 4000,
+		.enter = &intel_idle },
+	{ /* MWAIT C9-S0i3 */
+		.name = "S0i3-ATM",
+		.desc = "MWAIT 0x64",
+		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 20000,
+		.enter = &intel_idle },
+	{
+		.enter = NULL }
+};
+#else
+#define mrfld_cstates atom_cstates
+#endif
+
+#if defined(CONFIG_REMOVEME_INTEL_ATOM_MDFLD_POWER) || \
+	defined(CONFIG_REMOVEME_INTEL_ATOM_CLV_POWER)
+
+static struct cpuidle_state mfld_cstates[CPUIDLE_STATE_MAX] = {
+	{ /* MWAIT C1 */
+		.name = "ATM-C1",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = CSTATE_EXIT_LATENCY_C1,
+		.target_residency = 4,
+		.enter = &intel_idle },
+	{ /* MWAIT C2 */
+		.name = "ATM-C2",
+		.desc = "MWAIT 0x10",
+		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID,
+		.exit_latency = CSTATE_EXIT_LATENCY_C2,
+		.target_residency = 80,
+		.enter = &intel_idle },
+	{ /* MWAIT C4 */
+		.name = "ATM-C4",
+		.desc = "MWAIT 0x30",
+		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = CSTATE_EXIT_LATENCY_C4,
+		.target_residency = 400,
+		.enter = &intel_idle },
+	{ /* MWAIT C6 */
+		.name = "ATM-C6",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = CSTATE_EXIT_LATENCY_C6,
+		.power_usage  = C6_POWER_USAGE,
+		.target_residency = 560,
+		.enter = &soc_s0ix_idle },
+	{
+		.name = "ATM-S0i1",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = CSTATE_EXIT_LATENCY_S0i1,
+		.power_usage  = S0I1_POWER_USAGE,
+		.enter = &soc_s0ix_idle },
+	{
+		.name = "ATM-LpAudio",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = CSTATE_EXIT_LATENCY_LPMP3,
+		.power_usage  = LPMP3_POWER_USAGE,
+		.enter = &soc_s0ix_idle },
+	{
+		.name = "ATM-S0i3",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = CSTATE_EXIT_LATENCY_S0i3,
+		.power_usage  = S0I3_POWER_USAGE,
+		.enter = &soc_s0ix_idle },
+	{
+		.enter = NULL }
+};
+
+static int enter_s0ix_state(u32 eax, int gov_req_state, int s0ix_state,
+		  struct cpuidle_device *dev, int index)
+{
+	int s0ix_entered = 0;
+	int selected_state = C6_STATE_IDX;
+
+	if (atomic_add_return(1, &nr_cpus_in_c6) == num_online_cpus() &&
+		 s0ix_state) {
+		s0ix_entered = mid_s0ix_enter(s0ix_state);
+		if (!s0ix_entered) {
+			if (pmu_is_s0ix_in_progress()) {
+				atomic_dec(&nr_cpus_in_c6);
+				eax = C4_HINT;
+			}
+			pmu_set_s0ix_complete();
+		}
+	}
+	switch (s0ix_state) {
+	case MID_S0I1_STATE:
+		trace_cpu_idle(S0I1_STATE_IDX, dev->cpu);
+		break;
+	case MID_LPMP3_STATE:
+		trace_cpu_idle(LPMP3_STATE_IDX, dev->cpu);
+		break;
+	case MID_S0I3_STATE:
+		trace_cpu_idle(S0I3_STATE_IDX, dev->cpu);
+		break;
+	case MID_S3_STATE:
+		trace_cpu_idle(S0I3_STATE_IDX, dev->cpu);
+		break;
+	default:
+		trace_cpu_idle((eax >> 4) + 1, dev->cpu);
+	}
+	__monitor((void *)&current_thread_info()->flags, 0, 0);
+	smp_mb();
+	if (!need_resched())
+		__mwait(eax, 1);
+
+	if (likely(eax == C6_HINT))
+		atomic_dec(&nr_cpus_in_c6);
+
+	/* During s0ix exit inform scu that OS
+	 * has exited. In case scu is still waiting
+	 * for ack c6 trigger, it would exit out
+	 * of the ack-c6 timeout loop
+	 */
+	pmu_set_s0ix_complete();
+
+	/* In case of demotion to S0i1/lpmp3 update last_state */
+	if (s0ix_entered) {
+		selected_state = S0I3_STATE_IDX;
+
+		if (s0ix_state == MID_S0I1_STATE) {
+			index = S0I1_STATE_IDX;
+			selected_state = S0I1_STATE_IDX;
+		} else if (s0ix_state == MID_LPMP3_STATE) {
+			index = LPMP3_STATE_IDX;
+			selected_state = LPMP3_STATE_IDX;
+		}
+	} else if (eax == C4_HINT) {
+		index = C4_STATE_IDX;
+		selected_state = C4_STATE_IDX;
+	} else
+		index = C6_STATE_IDX;
+
+	pmu_s0ix_demotion_stat(gov_req_state, selected_state);
+
+	return index;
+}
+
+static int soc_s0ix_idle(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index)
+{
+	struct cpuidle_state *state = &drv->states[index];
+	unsigned long eax = flg2MWAIT(state->flags);
+	int cpu = smp_processor_id();
+	int s0ix_state   = 0;
+	unsigned int cstate;
+	int gov_req_state = (int) eax;
+
+	/* Check if s0ix is already in progress,
+	 * This is required to demote C6 while S0ix
+	 * is in progress
+	 */
+	if (unlikely(pmu_is_s0ix_in_progress()))
+		return intel_idle(dev, drv, C4_STATE_IDX);
+
+	/* check if we need/possible to do s0ix */
+	if (eax != C6_HINT)
+		s0ix_state = get_target_platform_state(&eax);
+
+	/*
+	 * leave_mm() to avoid costly and often unnecessary wakeups
+	 * for flushing the user TLB's associated with the active mm.
+	 */
+	if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+		leave_mm(cpu);
+
+	cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+
+	if (!(lapic_timer_reliable_states & (1 << (cstate))))
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+	stop_critical_timings();
+
+	if (!need_resched())
+		index = enter_s0ix_state(eax, gov_req_state,
+					s0ix_state, dev, index);
+
+	start_critical_timings();
+
+	if (!(lapic_timer_reliable_states & (1 << (cstate))))
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+
+	return index;
+}
+#else
+#define mfld_cstates atom_cstates
+#endif
+
+#ifdef CONFIG_ATOM_SOC_POWER
+static unsigned int get_target_residency(unsigned int cstate)
+{
+	unsigned int t_sleep = cpuidle_state_table[cstate].target_residency;
+	unsigned int prev_idx;
+
+	/* get the previous lower sleep state */
+	if ((cstate == 8) || (cstate == 9))
+		prev_idx = cstate - 2;
+	else
+		prev_idx = cstate - 1;
+
+	/* calculate target_residency only if not defined already */
+	if (!t_sleep) {
+		unsigned int p_active = cpuidle_state_table[0].power_usage;
+		unsigned int prev_state_power = cpuidle_state_table
+							[prev_idx].power_usage;
+		unsigned int curr_state_power = cpuidle_state_table
+							[cstate].power_usage;
+		unsigned int prev_state_lat = cpuidle_state_table
+							[prev_idx].exit_latency;
+		unsigned int curr_state_lat = cpuidle_state_table
+							[cstate].exit_latency;
+
+		if (curr_state_power && prev_state_power && p_active &&
+		    prev_state_lat && curr_state_lat &&
+		    (curr_state_lat > prev_state_lat) &&
+		    (prev_state_power > curr_state_power)) {
+			t_sleep = (p_active * (curr_state_lat - prev_state_lat)
+					+ (prev_state_lat * prev_state_power)
+					- (curr_state_lat * curr_state_power)) /
+				  (prev_state_power - curr_state_power);
+
+			/* round-up target_residency */
+			t_sleep++;
+		}
+	}
+
+	WARN_ON(!t_sleep);
+
+	pr_debug(PREFIX "cpuidle: target_residency[%d]= %d\n", cstate, t_sleep);
+
+	return t_sleep;
+}
+#endif
+
 /**
  * intel_idle
  * @dev: cpuidle_device
@@ -347,6 +663,17 @@
 	unsigned int cstate;
 	int cpu = smp_processor_id();
 
+#if (defined(CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER) && \
+	defined(CONFIG_PM_DEBUG))
+	{
+		/* Get Cstate based on ignore table from PMU driver */
+		unsigned int ncstate;
+		cstate =
+		(((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+		ncstate = pmu_get_new_cstate(cstate, &index);
+		eax	= flg2MWAIT(drv->states[index].flags);
+	}
+#endif
 	cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
 
 	/*
@@ -465,6 +792,10 @@
 	.disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_mrfld = {
+	.state_table = mrfld_cstates,
+};
+
 #define ICPU(model, cpu) \
 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 
@@ -486,6 +817,7 @@
 	ICPU(0x3f, idle_cpu_hsw),
 	ICPU(0x45, idle_cpu_hsw),
 	ICPU(0x46, idle_cpu_hsw),
+	ICPU(0x4a, idle_cpu_mrfld),	/* Tangier SoC */
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -585,13 +917,25 @@
 		mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
 
 		/* does the state exist in CPUID.MWAIT? */
-		num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
+
+		/* FIXME: Do not check number of substates for any states above C6
+		 * as these are not real C states supported by the CPU, they
+		 * are emulated c states for s0ix support.
+		*/
+		if ((cstate + 1) < 6) {
+			num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
 					& MWAIT_SUBSTATE_MASK;
+			if (num_substates == 0)
+				continue;
+		}
 
-		/* if sub-state in table is not enumerated by CPUID */
-		if ((mwait_substate + 1) > num_substates)
-			continue;
-
+#if !defined(CONFIG_ATOM_SOC_POWER)
+		if (boot_cpu_data.x86_model != 0x37) {
+			/* if sub-state in table is not enumerated by CPUID */
+			if ((mwait_substate + 1) > num_substates)
+				continue;
+		}
+#endif
 		if (((mwait_cstate + 1) > 2) &&
 			!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 			mark_tsc_unstable("TSC halts in idle"
@@ -643,13 +987,25 @@
 		mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
 
 		/* does the state exist in CPUID.MWAIT? */
-		num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
+
+		/* FIXME: Do not check number of substates for any states above C6
+		 * as these are not real C states supported by the CPU, they
+		 * are emulated c states for s0ix support.
+		 */
+		if ((cstate + 1) < 6) {
+			num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
 					& MWAIT_SUBSTATE_MASK;
+			if (num_substates == 0)
+				continue;
+		}
 
-		/* if sub-state in table is not enumerated by CPUID */
-		if ((mwait_substate + 1) > num_substates)
-			continue;
-
+#if !defined(CONFIG_ATOM_SOC_POWER)
+		if (boot_cpu_data.x86_model != 0x37) {
+			/* if sub-state in table is not enumerated by CPUID */
+			if ((mwait_substate + 1) > num_substates)
+				continue;
+		}
+#endif
 		dev->state_count += 1;
 	}
 
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ab0767e6..a6e0341 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -143,6 +143,18 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called ti-adc081c.
 
+config TI_ADS7955_ADC
+	tristate "Texas Instruments ADS7955 ADC driver"
+	depends on SPI
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  Say yes here to build support for Texas Instruments ADS7955
+	  8 Channel ADC.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ti-ads7955.
+
 config TI_AM335X_ADC
 	tristate "TI's ADC driver"
 	depends on MFD_TI_AM335X_TSCADC
@@ -157,4 +169,10 @@
 	  Say yes here to access the ADC part of the Nano River
 	  Technologies Viperboard.
 
+config IIO_BASINCOVE_GPADC
+	tristate "IIO Basincove GPADC driver"
+	depends on IIO
+	help
+	  Say yes here to build support for the IIO basincove GPADC driver.
+
 endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 0a825be..48f8fef0 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -15,5 +15,7 @@
 obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
 obj-$(CONFIG_MAX1363) += max1363.o
 obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
+obj-$(CONFIG_TI_ADS7955_ADC) += ti-ads7955.o
 obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
 obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
+obj-$(CONFIG_IIO_BASINCOVE_GPADC) += iio_basincove_gpadc.o
diff --git a/drivers/iio/adc/iio_basincove_gpadc.c b/drivers/iio/adc/iio_basincove_gpadc.c
new file mode 100644
index 0000000..c20b61b
--- /dev/null
+++ b/drivers/iio/adc/iio_basincove_gpadc.c
@@ -0,0 +1,596 @@
+/*
+ * iio_basincove_gpadc.c - Intel Merrifield Basin Cove GPADC Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Bin Yang <bin.yang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/rpmsg.h>
+
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_basincove_gpadc.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/types.h>
+#include <linux/iio/consumer.h>
+
+struct gpadc_info {
+	int initialized;
+	/* This mutex protects gpadc sample/config from concurrent conflict.
+	   Any function, which does the sample or config, needs to
+	   hold this lock.
+	   If it is locked, it also means the gpadc is in active mode.
+	*/
+	struct mutex lock;
+	struct device *dev;
+	int irq;
+	u8 irq_status;
+	wait_queue_head_t wait;
+	int sample_done;
+	void __iomem *intr;
+	int channel_num;
+	struct gpadc_regmap_t *gpadc_regmaps;
+	struct gpadc_regs_t *gpadc_regs;
+};
+
+static inline int gpadc_clear_bits(u16 addr, u8 mask)
+{
+	return intel_scu_ipc_update_register(addr, 0, mask);
+}
+
+static inline int gpadc_set_bits(u16 addr, u8 mask)
+{
+	return intel_scu_ipc_update_register(addr, 0xff, mask);
+}
+
+static inline int gpadc_write(u16 addr, u8 data)
+{
+	return intel_scu_ipc_iowrite8(addr, data);
+}
+
+static inline int gpadc_read(u16 addr, u8 *data)
+{
+	return intel_scu_ipc_ioread8(addr, data);
+}
+
+static int gpadc_busy_wait(struct gpadc_regs_t *regs)
+{
+	u8 tmp;
+	int timeout = 0;
+
+	gpadc_read(regs->gpadcreq, &tmp);
+	while (tmp & regs->gpadcreq_busy && timeout < 500) {
+		gpadc_read(regs->gpadcreq, &tmp);
+		usleep_range(1800, 2000);
+		timeout++;
+	}
+
+	if (tmp & regs->gpadcreq_busy)
+		return -EBUSY;
+	else
+		return 0;
+}
+
+static void gpadc_dump(struct gpadc_info *info)
+{
+	u8 tmp;
+	struct gpadc_regs_t *regs = info->gpadc_regs;
+
+	dev_err(info->dev, "GPADC registers dump:\n");
+	gpadc_read(regs->adcirq, &tmp);
+	dev_err(info->dev, "ADCIRQ: 0x%x\n", tmp);
+	gpadc_read(regs->madcirq, &tmp);
+	dev_err(info->dev, "MADCIRQ: 0x%x\n", tmp);
+	gpadc_read(regs->gpadcreq, &tmp);
+	dev_err(info->dev, "GPADCREQ: 0x%x\n", tmp);
+	gpadc_read(regs->adc1cntl, &tmp);
+	dev_err(info->dev, "ADC1CNTL: 0x%x\n", tmp);
+}
+
+static irqreturn_t gpadc_isr(int irq, void *data)
+{
+	struct gpadc_info *info = iio_priv(data);
+
+	info->irq_status = ioread8(info->intr);
+	info->sample_done = 1;
+	wake_up(&info->wait);
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t gpadc_threaded_isr(int irq, void *data)
+{
+	struct gpadc_info *info = iio_priv(data);
+	struct gpadc_regs_t *regs = info->gpadc_regs;
+
+	/* Clear IRQLVL1MASK */
+	gpadc_clear_bits(regs->mirqlvl1, regs->mirqlvl1_adc);
+
+	return IRQ_HANDLED;
+}
+
+
+/**
+ * iio_basincove_gpadc_sample - do gpadc sample.
+ * @indio_dev: industrial IO GPADC device handle
+ * @ch: gpadc bit set of channels to sample, for example, set ch = (1<<0)|(1<<2)
+ *	means you are going to sample both channel 0 and 2 at the same time.
+ * @res:gpadc sampling result
+ *
+ * Returns 0 on success or an error code.
+ *
+ * This function may sleep.
+ */
+
+int iio_basincove_gpadc_sample(struct iio_dev *indio_dev,
+				int ch, struct gpadc_result *res)
+{
+	struct gpadc_info *info = iio_priv(indio_dev);
+	int i, ret;
+	u8 tmp, th, tl;
+	u8 mask;
+	struct gpadc_regs_t *regs = info->gpadc_regs;
+
+	if (!info->initialized)
+		return -ENODEV;
+
+	mutex_lock(&info->lock);
+
+	mask = MBATTEMP | MSYSTEMP | MBATT | MVIBATT | MCCTICK;
+	gpadc_clear_bits(regs->madcirq, mask);
+	gpadc_clear_bits(regs->mirqlvl1, regs->mirqlvl1_adc);
+
+	tmp = regs->gpadcreq_irqen;
+
+	for (i = 0; i < info->channel_num; i++) {
+		if (ch & (1 << i))
+			tmp |= (1 << info->gpadc_regmaps[i].cntl);
+	}
+
+	info->sample_done = 0;
+
+	ret = gpadc_busy_wait(regs);
+	if (ret) {
+		dev_err(info->dev, "GPADC is busy\n");
+		goto done;
+	}
+
+	gpadc_write(regs->gpadcreq, tmp);
+
+	ret = wait_event_timeout(info->wait, info->sample_done, HZ);
+	if (ret == 0) {
+		gpadc_dump(info);
+		ret = -ETIMEDOUT;
+		dev_err(info->dev, "sample timeout, return %d\n", ret);
+		goto done;
+	} else {
+		ret = 0;
+	}
+
+	for (i = 0; i < info->channel_num; i++) {
+		if (ch & (1 << i)) {
+			gpadc_read(info->gpadc_regmaps[i].rslth, &th);
+			gpadc_read(info->gpadc_regmaps[i].rsltl, &tl);
+			res->data[i] = ((th & 0x3) << 8) + tl;
+		}
+	}
+
+done:
+	gpadc_set_bits(regs->mirqlvl1, regs->mirqlvl1_adc);
+	gpadc_set_bits(regs->madcirq, mask);
+	mutex_unlock(&info->lock);
+	return ret;
+}
+EXPORT_SYMBOL(iio_basincove_gpadc_sample);
+
+static struct gpadc_result sample_result;
+static int chs;
+
+static ssize_t intel_basincove_gpadc_store_channel(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	if (sscanf(buf, "%x", &chs) != 1) {
+		dev_err(dev, "one channel argument is needed\n");
+		return -EINVAL;
+	}
+
+	if (chs < (1 << 0) || chs >= (1 << info->channel_num)) {
+		dev_err(dev, "invalid channel, should be in [0x1 - 0x1FF]\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t intel_basincove_gpadc_show_channel(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", chs);
+}
+
+static ssize_t intel_basincove_gpadc_store_sample(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	int value, ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+	memset(sample_result.data, 0, sizeof(sample_result.data));
+
+	if (sscanf(buf, "%d", &value) != 1) {
+		dev_err(dev, "one argument is needed\n");
+		return -EINVAL;
+	}
+
+	if (value == 1) {
+		ret = iio_basincove_gpadc_sample(indio_dev, chs,
+						&sample_result);
+		if (ret) {
+			dev_err(dev, "sample failed\n");
+			return ret;
+		}
+	} else {
+		dev_err(dev, "input '1' to sample\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t intel_basincove_gpadc_show_result(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int i;
+	int used = 0;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	for (i = 0; i < info->channel_num; i++) {
+		used += snprintf(buf + used, PAGE_SIZE - used,
+			 "sample_result[%d] = %d\n", i, sample_result.data[i]);
+	}
+
+	return used;
+}
+
+
+static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO,
+		intel_basincove_gpadc_show_channel,
+		intel_basincove_gpadc_store_channel);
+static DEVICE_ATTR(sample, S_IWUSR, NULL, intel_basincove_gpadc_store_sample);
+static DEVICE_ATTR(result, S_IRUGO, intel_basincove_gpadc_show_result, NULL);
+
+static struct attribute *intel_basincove_gpadc_attrs[] = {
+	&dev_attr_channel.attr,
+	&dev_attr_sample.attr,
+	&dev_attr_result.attr,
+	NULL,
+};
+static struct attribute_group intel_basincove_gpadc_attr_group = {
+	.name = "basincove_gpadc",
+	.attrs = intel_basincove_gpadc_attrs,
+};
+
+static int basincove_adc_read_raw(struct iio_dev *indio_dev,
+			struct iio_chan_spec const *chan,
+			int *val, int *val2, long m)
+{
+	int ret;
+	int ch = chan->channel;
+	struct gpadc_info *info = iio_priv(indio_dev);
+	struct gpadc_result res;
+
+	ret = iio_basincove_gpadc_sample(indio_dev, (1 << ch), &res);
+	if (ret) {
+		dev_err(info->dev, "sample failed\n");
+		return -EINVAL;
+	}
+
+	*val = res.data[ch];
+
+	return ret;
+}
+
+static int basincove_adc_read_all_raw(struct iio_channel *chan,
+					int *val)
+{
+	int ret;
+	int i, num = 0;
+	int ch = 0;
+	int *channels;
+	struct gpadc_info *info = iio_priv(chan->indio_dev);
+	struct gpadc_result res;
+
+	while (chan[num].indio_dev)
+		num++;
+
+	channels = kzalloc(sizeof(int) * num, GFP_KERNEL);
+	if (channels == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < num; i++) {
+		channels[i] = chan[i].channel->channel;
+		ch |= (1 << channels[i]);
+	}
+
+	ret = iio_basincove_gpadc_sample(chan->indio_dev, ch, &res);
+	if (ret) {
+		dev_err(info->dev, "sample failed\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	for (i = 0; i < num; i++)
+		val[i] = res.data[channels[i]];
+
+end:
+	kfree(channels);
+	return ret;
+}
+
+static const struct iio_info basincove_adc_info = {
+	.read_raw = &basincove_adc_read_raw,
+	.read_all_raw = &basincove_adc_read_all_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static int bcove_gpadc_probe(struct platform_device *pdev)
+{
+	int err;
+	struct gpadc_info *info;
+	struct iio_dev *indio_dev;
+	struct intel_basincove_gpadc_platform_data *pdata =
+			pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data supplied\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	indio_dev = iio_device_alloc(sizeof(struct gpadc_info));
+	if (indio_dev == NULL) {
+		dev_err(&pdev->dev, "allocating iio device failed\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	info = iio_priv(indio_dev);
+
+	mutex_init(&info->lock);
+	init_waitqueue_head(&info->wait);
+	info->dev = &pdev->dev;
+	info->irq = platform_get_irq(pdev, 0);
+	info->intr = ioremap_nocache(pdata->intr, 1);
+	if (!info->intr) {
+		dev_err(&pdev->dev, "ioremap of ADCIRQ failed\n");
+		err = -ENOMEM;
+		goto err_free;
+	}
+	info->channel_num = pdata->channel_num;
+	info->gpadc_regmaps = pdata->gpadc_regmaps;
+	info->gpadc_regs = pdata->gpadc_regs;
+
+	err = request_threaded_irq(info->irq, gpadc_isr, gpadc_threaded_isr,
+			IRQF_ONESHOT, "adc", indio_dev);
+	if (err) {
+		gpadc_dump(info);
+		dev_err(&pdev->dev, "unable to register irq %d\n", info->irq);
+		goto err_iounmap;
+	}
+
+	platform_set_drvdata(pdev, indio_dev);
+
+	indio_dev->dev.parent = &pdev->dev;
+	indio_dev->name = pdev->name;
+
+	indio_dev->channels = pdata->gpadc_channels;
+	indio_dev->num_channels = pdata->channel_num;
+	indio_dev->info = &basincove_adc_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	err = iio_map_array_register(indio_dev, pdata->gpadc_iio_maps);
+	if (err)
+		goto err_release_irq;
+
+	err = iio_device_register(indio_dev);
+	if (err < 0)
+		goto err_array_unregister;
+
+	err = sysfs_create_group(&pdev->dev.kobj,
+			&intel_basincove_gpadc_attr_group);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to export sysfs interface, error: %d\n",
+			err);
+		goto err_iio_device_unregister;
+	}
+
+	info->initialized = 1;
+
+	dev_info(&pdev->dev, "bcove adc probed\n");
+
+	return 0;
+
+err_iio_device_unregister:
+	iio_device_unregister(indio_dev);
+err_array_unregister:
+	iio_map_array_unregister(indio_dev);
+err_release_irq:
+	free_irq(info->irq, info);
+err_iounmap:
+	iounmap(info->intr);
+err_free:
+	iio_device_free(indio_dev);
+out:
+	return err;
+}
+
+static int bcove_gpadc_remove(struct platform_device *pdev)
+{
+	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	sysfs_remove_group(&pdev->dev.kobj,
+			&intel_basincove_gpadc_attr_group);
+
+	iio_device_unregister(indio_dev);
+	iio_map_array_unregister(indio_dev);
+	free_irq(info->irq, info);
+	iounmap(info->intr);
+	iio_device_free(indio_dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int bcove_gpadc_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	if (!mutex_trylock(&info->lock))
+		return -EBUSY;
+
+	return 0;
+}
+
+static int bcove_gpadc_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct gpadc_info *info = iio_priv(indio_dev);
+
+	mutex_unlock(&info->lock);
+	return 0;
+}
+#else
+#define bcove_gpadc_suspend		NULL
+#define bcove_gpadc_resume		NULL
+#endif
+
+static const struct dev_pm_ops bcove_gpadc_driver_pm_ops = {
+	.suspend	= bcove_gpadc_suspend,
+	.resume		= bcove_gpadc_resume,
+};
+
+static struct platform_driver bcove_gpadc_driver = {
+	.driver = {
+		   .name = "bcove_adc",
+		   .owner = THIS_MODULE,
+		   .pm = &bcove_gpadc_driver_pm_ops,
+		   },
+	.probe = bcove_gpadc_probe,
+	.remove = bcove_gpadc_remove,
+};
+
+static int bcove_gpadc_module_init(void)
+{
+	return platform_driver_register(&bcove_gpadc_driver);
+}
+
+static void bcove_gpadc_module_exit(void)
+{
+	platform_driver_unregister(&bcove_gpadc_driver);
+}
+
+static int bcove_adc_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed bcove_gpadc rpmsg device\n");
+
+	ret = bcove_gpadc_module_init();
+
+out:
+	return ret;
+}
+
+static void bcove_adc_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	bcove_gpadc_module_exit();
+	dev_info(&rpdev->dev, "Removed bcove_gpadc rpmsg device\n");
+}
+
+static void bcove_adc_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id bcove_adc_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_bcove_adc" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, bcove_adc_rpmsg_id_table);
+
+static struct rpmsg_driver bcove_adc_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= bcove_adc_rpmsg_id_table,
+	.probe		= bcove_adc_rpmsg_probe,
+	.callback	= bcove_adc_rpmsg_cb,
+	.remove		= bcove_adc_rpmsg_remove,
+};
+
+static int __init bcove_adc_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&bcove_adc_rpmsg);
+}
+
+#ifdef MODULE
+module_init(bcove_adc_rpmsg_init);
+#else
+rootfs_initcall(bcove_adc_rpmsg_init);
+#endif
+
+static void __exit bcove_adc_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&bcove_adc_rpmsg);
+}
+module_exit(bcove_adc_rpmsg_exit);
+
+MODULE_AUTHOR("Yang Bin<bin.yang@intel.com>");
+MODULE_DESCRIPTION("Intel Merrifield Basin Cove GPADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ti-ads7955.c b/drivers/iio/adc/ti-ads7955.c
new file mode 100644
index 0000000..fde230b
--- /dev/null
+++ b/drivers/iio/adc/ti-ads7955.c
@@ -0,0 +1,421 @@
+/*
+ * ADS7955 SPI ADC driver
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dave Hunt <dave.hunt@emutex.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+/*
+ * [FIXME]
+ * Notes: This version of the ti-ads7955 driver is written with a couple of
+ * workarounds for the functionality of the SPI driver on Edison at the time
+ * of writing.
+ * Issue 1: The CS is pushed low between every frame
+ * Issue 2: spi_message_add_tail() can only be called once in the driver.
+ *		Subsequent messages are ignored.
+*/
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/platform_data/ti-ads7955.h>
+
+#define ADS7955_EXTREF		true
+#define SPI_MAX_SPEED_HZ	20000000
+#define SPI_BITS_PER_WORD	16
+
+#define ADS7955_MANUAL_MODE	(0x1 << 12)	/* Selects Manual Mode */
+#define ADS7955_AUTO1_MODE	(0x2 << 12)	/* Selects Auto Mode 1 */
+#define ADS7955_AUTO2_MODE	(0x3 << 12)	/* Selects Auto Mode 2 */
+#define ADS7955_AUTO1_PROGRAM	(0x8 << 12)	/* Programming Auto Mode 1 */
+#define ADS7955_AUTO2_PROGRAM	(0x9 << 12)	/* Programming Auto Mode 2 */
+
+#define ADS7955_CONFIG		BIT(11)		/* program bits DI06-00 */
+#define ADS7955_AUTO1_RESET	BIT(10)		/* Reset to first channel  */
+#define ADS7955_CHANNEL(x)	((x & 0xf) << 7)/* Channel select (DI10-07) */
+
+#define ADS7955_RANGE_1		0		/* Selects 2.5V input range */
+#define ADS7955_RANGE_2		BIT(6)		/* Selects 5.0V input range */
+
+#define ADS7955_POWER_NORMAL	0		/* No Powerdown */
+#define ADS7955_POWER_DOWN	BIT(5)		/* Powerdown on last edge */
+
+#define ADS7955_GET_CONVERSION	0		/* High bits have ch index*/
+#define ADS7955_GET_GPIO	BIT(4)		/* High bits have GPIO bits */
+
+#define ADS7955_SET_READ	(ADS7955_MANUAL_MODE | ADS7955_CONFIG | \
+				ADS7955_RANGE_2 | ADS7955_POWER_NORMAL | \
+				ADS7955_GET_CONVERSION)
+
+#define ADS7955_READ_AUTO1	(ADS7955_AUTO1_MODE | ADS7955_CONFIG | \
+				ADS7955_RANGE_2 | ADS7955_POWER_NORMAL | \
+				ADS7955_GET_CONVERSION)
+
+#define ADS7955_MAX_CHAN	8
+#define ADS7955_BITS		12
+#define ADS7955_STORAGE_BITS	16
+/*
+ * Define the Reference Voltage for the board on which this ADC is used.
+ * May change depending on jumper settings or wiring configuration.
+ */
+#define ADS7955_INTREF_mV	5000
+#define SPI_MSG_MAX_LEN		20		/* 8 channels plus timestamp */
+
+#define RES_MASK(bits)	((1 << (bits)) - 1)
+
+struct ads7955_state {
+	struct spi_device	*spi;
+	struct regulator	*reg;
+	unsigned		ext_ref;
+	struct spi_transfer	ring_xfer[10];
+	struct spi_transfer	scan_single_xfer[3];
+	struct spi_message	ring_msg;
+	struct spi_message	scan_single_msg;
+	/*
+	 * DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache lines.
+	 */
+	__u16			rx_buf[SPI_MSG_MAX_LEN] ____cacheline_aligned;
+	__u16			tx_buf[SPI_MSG_MAX_LEN];
+};
+
+#define ADS7955_V_CHAN(index)						\
+	{								\
+		.type = IIO_VOLTAGE,					\
+		.indexed = 1,						\
+		.channel = index,					\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+		.address = index,					\
+		.scan_index = index,					\
+		.scan_type = {						\
+			.sign = 'u',					\
+			.realbits = ADS7955_BITS,			\
+			.storagebits = ADS7955_STORAGE_BITS,		\
+			.endianness = IIO_CPU,				\
+		},							\
+	}
+
+static const struct iio_chan_spec ads7955_channels[] = {
+	ADS7955_V_CHAN(0),
+	ADS7955_V_CHAN(1),
+	ADS7955_V_CHAN(2),
+	ADS7955_V_CHAN(3),
+	ADS7955_V_CHAN(4),
+	ADS7955_V_CHAN(5),
+	ADS7955_V_CHAN(6),
+	ADS7955_V_CHAN(7),
+	IIO_CHAN_SOFT_TIMESTAMP(8),
+};
+
+/**
+ * ads7955_update_scan_mode() setup the spi transfer buffer for the scan mask
+ **/
+static int ads7955_update_scan_mode(struct iio_dev *indio_dev,
+	const unsigned long *active_scan_mask)
+{
+	struct ads7955_state *st = iio_priv(indio_dev);
+	int i, ret;
+	unsigned short channel_count;
+
+	/*
+	 * For programming the auto1 mode, we need to send two words, one to
+	 * specify program mode, and the other to give a bitmask of channels
+	 * to be read when reading the auto sequence.
+	 */
+	/*
+	 * [FIXME]
+	 * Workaround: Build up a custom SPI message containing all required
+	 * frames (including space for expected responses), and send as one
+	 * SPI messge. This is to get around the issue that the current SPI
+	 * driver only supports the first 'spi_message_add_tail' call.
+	 */
+	st->tx_buf[0] = ADS7955_AUTO1_PROGRAM;
+	st->tx_buf[1] = (unsigned short)*active_scan_mask;
+	st->tx_buf[2] = (ADS7955_SET_READ | ADS7955_POWER_DOWN);
+
+	ret = spi_sync(st->spi, &st->scan_single_msg);
+	if (ret)
+		return ret;
+
+	/*
+	* So now we've told the hardware about the channels we want to sample,
+	* now we set up the message sequence for when we're triggered.
+	*/
+	/*
+	 * [FIXME]
+	 * Workaround: Build up a custom SPI message containing all required
+	 * frames (including space for expected responses), and send as one
+	 * SPI messge. This is to get around the issue that the current SPI
+	 * driver only supports the first 'spi_message_add_tail' call.
+	 */
+	channel_count = 0;
+	for (i = 0; i < ADS7955_MAX_CHAN; i++) {
+		if (test_bit(i, active_scan_mask)) {
+			if (channel_count == 0)
+				st->tx_buf[channel_count] = (ADS7955_READ_AUTO1
+							| ADS7955_AUTO1_RESET);
+			else
+				st->tx_buf[channel_count] =
+							(ADS7955_READ_AUTO1);
+			channel_count++;
+		}
+	}
+
+	/* Put in some extra tx frames to allow us to get the
+		rx frames (behind tx by two frames) */
+	st->tx_buf[channel_count++] = (ADS7955_READ_AUTO1);
+	st->tx_buf[channel_count++] = (ADS7955_READ_AUTO1 |
+					ADS7955_POWER_DOWN);
+
+	st->ring_xfer[0].tx_buf = &st->tx_buf[0];
+	st->ring_xfer[0].rx_buf = &st->rx_buf[0];
+	st->ring_xfer[0].len = channel_count * 2;
+	spi_message_init(&st->ring_msg);
+	spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
+	return 0;
+}
+
+/**
+ * ads7955_trigger_handler() bh of trigger launched polling to ring buffer
+ **/
+static irqreturn_t ads7955_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct ads7955_state *st = iio_priv(indio_dev);
+	u8 *return_data = (u8 *)&(st->rx_buf[2]);
+	s64 time_ns = 0;
+	int ret;
+
+	ret = spi_sync(st->spi, &st->ring_msg);
+	if (ret)
+		return IRQ_HANDLED;
+
+	if (indio_dev->scan_timestamp) {
+		time_ns = iio_get_time_ns();
+		memcpy(return_data +
+			indio_dev->scan_bytes - sizeof(s64),
+			&time_ns, sizeof(time_ns));
+	}
+
+	iio_push_to_buffers(indio_dev, return_data);
+
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static int ads7955_scan_direct(struct ads7955_state *st, unsigned ch)
+{
+	int ret;
+
+	/*
+	 * [FIXME]
+	 * Workaround: Build up a custom SPI message containing all required
+	 * frames (including space for expected responses), and send as one
+	 * SPI messge. This is to get around the issue that the current SPI
+	 * driver only supports the first 'spi_message_add_tail' call.
+	 */
+	st->tx_buf[0] = (ADS7955_SET_READ | ADS7955_CHANNEL(ch));
+	st->tx_buf[1] = (ADS7955_SET_READ | ADS7955_CHANNEL(ch));
+	st->tx_buf[2] = (ADS7955_SET_READ | ADS7955_CHANNEL(ch) |
+						ADS7955_POWER_DOWN);
+
+	ret = spi_sync(st->spi, &st->scan_single_msg);
+	if (ret)
+		return ret;
+	return st->rx_buf[2];
+}
+
+
+static int ads7955_get_ref_voltage(struct ads7955_state *st)
+{
+	int vref;
+
+	if (st->ext_ref) {
+		vref = regulator_get_voltage(st->reg);
+		if (vref < 0)
+			return vref;
+
+		return vref / 1000;
+	} else {
+		return ADS7955_INTREF_mV;
+	}
+}
+
+static int ads7955_read_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *chan,
+			   int *val,
+			   int *val2,
+			   long m)
+{
+	int ret;
+	struct ads7955_state *st = iio_priv(indio_dev);
+
+	switch (m) {
+	case IIO_CHAN_INFO_RAW:
+		mutex_lock(&indio_dev->mlock);
+		if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
+			ret = -EBUSY;
+		else
+			ret = ads7955_scan_direct(st, chan->address);
+		mutex_unlock(&indio_dev->mlock);
+		if (ret < 0)
+			return ret;
+
+		*val = ret & RES_MASK(ADS7955_BITS);
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		switch (chan->type) {
+		case IIO_VOLTAGE:
+			*val = ads7955_get_ref_voltage(st);
+			*val2 = chan->scan_type.realbits;
+			return IIO_VAL_FRACTIONAL_LOG2;
+		default:
+			return -EINVAL;
+		}
+	}
+	return -EINVAL;
+}
+
+static const struct iio_info ads7955_info = {
+	.read_raw = &ads7955_read_raw,
+	.update_scan_mode = ads7955_update_scan_mode,
+	.driver_module = THIS_MODULE,
+};
+
+static int ads7955_probe(struct spi_device *spi)
+{
+	struct ads7955_platform_data *pdata = spi->dev.platform_data;
+	struct ads7955_state *st;
+	struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+	int ret;
+
+	if (indio_dev == NULL)
+		return -ENOMEM;
+
+	st = iio_priv(indio_dev);
+
+	if (pdata && pdata->ext_ref)
+		st->ext_ref = ADS7955_EXTREF;
+
+	if (st->ext_ref) {
+		st->reg = regulator_get(&spi->dev, "vref");
+		if (IS_ERR(st->reg)) {
+			ret = PTR_ERR(st->reg);
+			goto error_free;
+		}
+		ret = regulator_enable(st->reg);
+		if (ret)
+			goto error_put_reg;
+	}
+
+	spi_set_drvdata(spi, indio_dev);
+
+	st->spi = spi;
+
+	indio_dev->name = spi_get_device_id(spi)->name;
+	indio_dev->dev.parent = &spi->dev;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = ads7955_channels;
+	indio_dev->num_channels = ARRAY_SIZE(ads7955_channels);
+	indio_dev->info = &ads7955_info;
+
+	/*
+	 * Setup default message
+	 * [FIXME]
+	 * Workaround:  Send each frame as 16 bits to get over the fact that
+	 * the current SPI hardware pulls CS low between every frame.
+	 */
+	spi->bits_per_word = SPI_BITS_PER_WORD;
+	spi->max_speed_hz = SPI_MAX_SPEED_HZ;
+	spi_setup(spi);
+
+	st->scan_single_xfer[0].tx_buf = &st->tx_buf[0];
+	st->scan_single_xfer[0].rx_buf = &st->rx_buf[0];
+	st->scan_single_xfer[0].len = 6;
+
+	spi_message_init(&st->scan_single_msg);
+	spi_message_add_tail(&st->scan_single_xfer[0], &st->scan_single_msg);
+
+	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+			&ads7955_trigger_handler, NULL);
+	if (ret) {
+		dev_warn(&indio_dev->dev,
+			"Failed to set up iio_triggered_buffer_setup\n");
+		goto error_disable_reg;
+	}
+
+	ret = iio_device_register(indio_dev);
+	if (ret)
+		goto error_cleanup_ring;
+
+	return 0;
+
+error_cleanup_ring:
+	iio_triggered_buffer_cleanup(indio_dev);
+error_disable_reg:
+	if (st->ext_ref)
+		regulator_disable(st->reg);
+error_put_reg:
+	if (st->ext_ref)
+		regulator_put(st->reg);
+error_free:
+	iio_device_free(indio_dev);
+
+	return ret;
+}
+
+static int ads7955_remove(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+	struct ads7955_state *st = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
+	if (st->ext_ref) {
+		regulator_disable(st->reg);
+		regulator_put(st->reg);
+	}
+	iio_device_free(indio_dev);
+
+	return 0;
+}
+
+static const struct spi_device_id ads7955_id[] = {
+	{"ads7955", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, ads7955_id);
+
+static struct spi_driver ads7955_driver = {
+	.driver = {
+		.name	= "ads7955",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= ads7955_probe,
+	.remove		= ads7955_remove,
+	.id_table	= ads7955_id,
+};
+module_spi_driver(ads7955_driver);
+
+MODULE_AUTHOR("Dave Hunt <dave.hunt@emutex.com>");
+MODULE_DESCRIPTION("Texas Instruments ADS7955 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index e145931..424dea1 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -66,6 +66,7 @@
 	[IIO_ALTVOLTAGE] = "altvoltage",
 	[IIO_CCT] = "cct",
 	[IIO_PRESSURE] = "pressure",
+	[IIO_RESISTANCE] = "resistance",
 };
 
 static const char * const iio_modifier_names[] = {
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 10aa9ef..145c986 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -35,6 +35,7 @@
  */
 struct iio_event_interface {
 	wait_queue_head_t	wait;
+	struct mutex		read_lock;
 	DECLARE_KFIFO(det_events, struct iio_event_data, 16);
 
 	struct list_head	dev_attr_list;
@@ -97,14 +98,16 @@
 	if (count < sizeof(struct iio_event_data))
 		return -EINVAL;
 
-	spin_lock_irq(&ev_int->wait.lock);
+	if (mutex_lock_interruptible(&ev_int->read_lock))
+		return -ERESTARTSYS;
+
 	if (kfifo_is_empty(&ev_int->det_events)) {
 		if (filep->f_flags & O_NONBLOCK) {
 			ret = -EAGAIN;
 			goto error_unlock;
 		}
 		/* Blocking on device; waiting for something to be there */
-		ret = wait_event_interruptible_locked_irq(ev_int->wait,
+		ret = wait_event_interruptible(ev_int->wait,
 					!kfifo_is_empty(&ev_int->det_events));
 		if (ret)
 			goto error_unlock;
@@ -114,7 +117,7 @@
 	ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
 
 error_unlock:
-	spin_unlock_irq(&ev_int->wait.lock);
+	mutex_unlock(&ev_int->read_lock);
 
 	return ret ? ret : copied;
 }
@@ -371,6 +374,7 @@
 {
 	INIT_KFIFO(ev_int->det_events);
 	init_waitqueue_head(&ev_int->wait);
+	mutex_init(&ev_int->read_lock);
 }
 
 static const char *iio_event_group_name = "events";
@@ -434,6 +438,7 @@
 
 error_free_setup_event_lines:
 	__iio_remove_event_config_attrs(indio_dev);
+	mutex_destroy(&indio_dev->event_interface->read_lock);
 	kfree(indio_dev->event_interface);
 error_ret:
 
@@ -446,5 +451,6 @@
 		return;
 	__iio_remove_event_config_attrs(indio_dev);
 	kfree(indio_dev->event_interface->group.attrs);
+	mutex_destroy(&indio_dev->event_interface->read_lock);
 	kfree(indio_dev->event_interface);
 }
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 4fc88e6..5584188 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -415,6 +415,48 @@
 }
 EXPORT_SYMBOL_GPL(iio_channel_release_all);
 
+int iio_channel_get_num(const struct iio_channel *chan)
+{
+	int num = 0;
+
+	if (chan == NULL)
+		return -ENODEV;
+
+	while (chan[num].indio_dev)
+		num++;
+
+	return num;
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_num);
+
+int iio_channel_get_name(const struct iio_channel *chan, char **chan_name)
+{
+	int i = 0;
+	struct iio_map_internal *c = NULL;
+
+	if (chan == NULL)
+		return -ENODEV;
+
+	if (chan_name == NULL)
+		return -EINVAL;
+
+	while (chan[i].indio_dev) {
+		mutex_lock(&iio_map_list_lock);
+		list_for_each_entry(c, &iio_map_list, l) {
+			if (strcmp(chan[i].channel->datasheet_name,
+				c->map->adc_channel_label) != 0)
+				continue;
+			strcpy(chan_name[i], c->map->consumer_channel);
+			break;
+		}
+		mutex_unlock(&iio_map_list_lock);
+		i++;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_get_name);
+
 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
 	enum iio_chan_info_enum info)
 {
@@ -445,6 +487,24 @@
 }
 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
 
+int iio_read_channel_all_raw(struct iio_channel *chan, int *val)
+{
+	int ret;
+
+	mutex_lock(&chan->indio_dev->info_exist_lock);
+	if (chan->indio_dev->info == NULL) {
+		ret = -ENODEV;
+		goto err_unlock;
+	}
+
+	ret = chan->indio_dev->info->read_all_raw(chan, val);
+err_unlock:
+	mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_all_raw);
+
 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
 	int raw, int *processed, unsigned int scale)
 {
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index a11ff74..518efa2 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -174,6 +174,25 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called apm-power.
 
+config INPUT_KEYRESET
+	tristate "Reset key"
+	depends on INPUT
+	select INPUT_KEYCOMBO
+	---help---
+	  Say Y here if you want to reboot when some keys are pressed;
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called keyreset.
+
+config INPUT_KEYCOMBO
+	tristate "Key combo"
+	depends on INPUT
+	---help---
+	  Say Y here if you want to take action when some keys are pressed;
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called keycombo.
+
 comment "Input Device Drivers"
 
 source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 5ca3f63..ee4c0652 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -25,3 +25,6 @@
 obj-$(CONFIG_INPUT_MISC)	+= misc/
 
 obj-$(CONFIG_INPUT_APMPOWER)	+= apm-power.o
+obj-$(CONFIG_INPUT_KEYRESET)	+= keyreset.o
+obj-$(CONFIG_INPUT_KEYCOMBO)	+= keycombo.o
+
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c122dd2..3b772d5 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -26,6 +26,7 @@
 #include <linux/major.h>
 #include <linux/device.h>
 #include <linux/cdev.h>
+#include <linux/wakelock.h>
 #include "input-compat.h"
 
 struct evdev {
@@ -46,6 +47,9 @@
 	unsigned int tail;
 	unsigned int packet_head; /* [future] position of the first element of next packet */
 	spinlock_t buffer_lock; /* protects access to buffer, head and tail */
+	struct wake_lock wake_lock;
+	bool use_wake_lock;
+	char name[28];
 	struct fasync_struct *fasync;
 	struct evdev *evdev;
 	struct list_head node;
@@ -73,10 +77,14 @@
 		client->buffer[client->tail].value = 0;
 
 		client->packet_head = client->tail;
+		if (client->use_wake_lock)
+			wake_unlock(&client->wake_lock);
 	}
 
 	if (event->type == EV_SYN && event->code == SYN_REPORT) {
 		client->packet_head = client->head;
+		if (client->use_wake_lock)
+			wake_lock(&client->wake_lock);
 		kill_fasync(&client->fasync, SIGIO, POLL_IN);
 	}
 }
@@ -328,6 +336,8 @@
 
 	client->bufsize = bufsize;
 	spin_lock_init(&client->buffer_lock);
+	snprintf(client->name, sizeof(client->name), "%s-%d",
+			dev_name(&evdev->dev), task_tgid_vnr(current));
 	client->evdev = evdev;
 	evdev_attach_client(evdev, client);
 
@@ -394,6 +404,9 @@
 	if (have_event) {
 		*event = client->buffer[client->tail++];
 		client->tail &= client->bufsize - 1;
+		if (client->use_wake_lock &&
+		    client->packet_head == client->tail)
+			wake_unlock(&client->wake_lock);
 	}
 
 	spin_unlock_irq(&client->buffer_lock);
@@ -682,6 +695,35 @@
 	return 0;
 }
 
+static int evdev_enable_suspend_block(struct evdev *evdev,
+				      struct evdev_client *client)
+{
+	if (client->use_wake_lock)
+		return 0;
+
+	spin_lock_irq(&client->buffer_lock);
+	wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
+	client->use_wake_lock = true;
+	if (client->packet_head != client->tail)
+		wake_lock(&client->wake_lock);
+	spin_unlock_irq(&client->buffer_lock);
+	return 0;
+}
+
+static int evdev_disable_suspend_block(struct evdev *evdev,
+				       struct evdev_client *client)
+{
+	if (!client->use_wake_lock)
+		return 0;
+
+	spin_lock_irq(&client->buffer_lock);
+	client->use_wake_lock = false;
+	wake_lock_destroy(&client->wake_lock);
+	spin_unlock_irq(&client->buffer_lock);
+
+	return 0;
+}
+
 static long evdev_do_ioctl(struct file *file, unsigned int cmd,
 			   void __user *p, int compat_mode)
 {
@@ -763,6 +805,15 @@
 
 	case EVIOCSKEYCODE_V2:
 		return evdev_handle_set_keycode_v2(dev, p);
+
+	case EVIOCGSUSPENDBLOCK:
+		return put_user(client->use_wake_lock, ip);
+
+	case EVIOCSSUSPENDBLOCK:
+		if (p)
+			return evdev_enable_suspend_block(evdev, client);
+		else
+			return evdev_disable_suspend_block(evdev, client);
 	}
 
 	size = _IOC_SIZE(cmd);
diff --git a/drivers/input/keycombo.c b/drivers/input/keycombo.c
new file mode 100644
index 0000000..2fba451
--- /dev/null
+++ b/drivers/input/keycombo.c
@@ -0,0 +1,261 @@
+/* drivers/input/keycombo.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keycombo.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+struct keycombo_state {
+	struct input_handler input_handler;
+	unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+	unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+	unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+	spinlock_t lock;
+	struct  workqueue_struct *wq;
+	int key_down_target;
+	int key_down;
+	int key_up;
+	struct delayed_work key_down_work;
+	int delay;
+	struct work_struct key_up_work;
+	void (*key_up_fn)(void *);
+	void (*key_down_fn)(void *);
+	void *priv;
+	int key_is_down;
+	struct wakeup_source combo_held_wake_source;
+	struct wakeup_source combo_up_wake_source;
+};
+
+static void do_key_down(struct work_struct *work)
+{
+	struct delayed_work *dwork = container_of(work, struct delayed_work,
+									work);
+	struct keycombo_state *state = container_of(dwork,
+					struct keycombo_state, key_down_work);
+	if (state->key_down_fn)
+		state->key_down_fn(state->priv);
+}
+
+static void do_key_up(struct work_struct *work)
+{
+	struct keycombo_state *state = container_of(work, struct keycombo_state,
+								key_up_work);
+	if (state->key_up_fn)
+		state->key_up_fn(state->priv);
+	__pm_relax(&state->combo_up_wake_source);
+}
+
+static void keycombo_event(struct input_handle *handle, unsigned int type,
+		unsigned int code, int value)
+{
+	unsigned long flags;
+	struct keycombo_state *state = handle->private;
+
+	if (type != EV_KEY)
+		return;
+
+	if (code >= KEY_MAX)
+		return;
+
+	if (!test_bit(code, state->keybit))
+		return;
+
+	spin_lock_irqsave(&state->lock, flags);
+	if (!test_bit(code, state->key) == !value)
+		goto done;
+	__change_bit(code, state->key);
+	if (test_bit(code, state->upbit)) {
+		if (value)
+			state->key_up++;
+		else
+			state->key_up--;
+	} else {
+		if (value)
+			state->key_down++;
+		else
+			state->key_down--;
+	}
+	if (state->key_down == state->key_down_target && state->key_up == 0) {
+		__pm_stay_awake(&state->combo_held_wake_source);
+		state->key_is_down = 1;
+		if (queue_delayed_work(state->wq, &state->key_down_work,
+								state->delay))
+			pr_debug("Key down work already queued!");
+	} else if (state->key_is_down) {
+		if (!cancel_delayed_work(&state->key_down_work)) {
+			__pm_stay_awake(&state->combo_up_wake_source);
+			queue_work(state->wq, &state->key_up_work);
+		}
+		__pm_relax(&state->combo_held_wake_source);
+		state->key_is_down = 0;
+	}
+done:
+	spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keycombo_connect(struct input_handler *handler,
+		struct input_dev *dev,
+		const struct input_device_id *id)
+{
+	int i;
+	int ret;
+	struct input_handle *handle;
+	struct keycombo_state *state =
+		container_of(handler, struct keycombo_state, input_handler);
+	for (i = 0; i < KEY_MAX; i++) {
+		if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+			break;
+	}
+	if (i == KEY_MAX)
+		return -ENODEV;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = KEYCOMBO_NAME;
+	handle->private = state;
+
+	ret = input_register_handle(handle);
+	if (ret)
+		goto err_input_register_handle;
+
+	ret = input_open_device(handle);
+	if (ret)
+		goto err_input_open_device;
+
+	return 0;
+
+err_input_open_device:
+	input_unregister_handle(handle);
+err_input_register_handle:
+	kfree(handle);
+	return ret;
+}
+
+static void keycombo_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static const struct input_device_id keycombo_ids[] = {
+		{
+				.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+				.evbit = { BIT_MASK(EV_KEY) },
+		},
+		{ },
+};
+MODULE_DEVICE_TABLE(input, keycombo_ids);
+
+static int keycombo_probe(struct platform_device *pdev)
+{
+	int ret;
+	int key, *keyp;
+	struct keycombo_state *state;
+	struct keycombo_platform_data *pdata = pdev->dev.platform_data;
+
+	if (!pdata)
+		return -EINVAL;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	spin_lock_init(&state->lock);
+	keyp = pdata->keys_down;
+	while ((key = *keyp++)) {
+		if (key >= KEY_MAX)
+			continue;
+		state->key_down_target++;
+		__set_bit(key, state->keybit);
+	}
+	if (pdata->keys_up) {
+		keyp = pdata->keys_up;
+		while ((key = *keyp++)) {
+			if (key >= KEY_MAX)
+				continue;
+			__set_bit(key, state->keybit);
+			__set_bit(key, state->upbit);
+		}
+	}
+
+	state->wq = alloc_ordered_workqueue("keycombo", 0);
+	if (!state->wq)
+		return -ENOMEM;
+
+	state->priv = pdata->priv;
+
+	if (pdata->key_down_fn)
+		state->key_down_fn = pdata->key_down_fn;
+	INIT_DELAYED_WORK(&state->key_down_work, do_key_down);
+
+	if (pdata->key_up_fn)
+		state->key_up_fn = pdata->key_up_fn;
+	INIT_WORK(&state->key_up_work, do_key_up);
+
+	wakeup_source_init(&state->combo_held_wake_source, "key combo");
+	wakeup_source_init(&state->combo_up_wake_source, "key combo up");
+	state->delay = msecs_to_jiffies(pdata->key_down_delay);
+
+	state->input_handler.event = keycombo_event;
+	state->input_handler.connect = keycombo_connect;
+	state->input_handler.disconnect = keycombo_disconnect;
+	state->input_handler.name = KEYCOMBO_NAME;
+	state->input_handler.id_table = keycombo_ids;
+	ret = input_register_handler(&state->input_handler);
+	if (ret) {
+		kfree(state);
+		return ret;
+	}
+	platform_set_drvdata(pdev, state);
+	return 0;
+}
+
+int keycombo_remove(struct platform_device *pdev)
+{
+	struct keycombo_state *state = platform_get_drvdata(pdev);
+	input_unregister_handler(&state->input_handler);
+	destroy_workqueue(state->wq);
+	kfree(state);
+	return 0;
+}
+
+
+struct platform_driver keycombo_driver = {
+		.driver.name = KEYCOMBO_NAME,
+		.probe = keycombo_probe,
+		.remove = keycombo_remove,
+};
+
+static int __init keycombo_init(void)
+{
+	return platform_driver_register(&keycombo_driver);
+}
+
+static void __exit keycombo_exit(void)
+{
+	return platform_driver_unregister(&keycombo_driver);
+}
+
+module_init(keycombo_init);
+module_exit(keycombo_exit);
diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c
new file mode 100644
index 0000000..7fbf724
--- /dev/null
+++ b/drivers/input/keyreset.c
@@ -0,0 +1,145 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/keycombo.h>
+
+struct keyreset_state {
+	int restart_requested;
+	int (*reset_fn)(void);
+	struct platform_device *pdev_child;
+	struct work_struct restart_work;
+};
+
+static void do_restart(struct work_struct *unused)
+{
+	sys_sync();
+	kernel_restart(NULL);
+}
+
+static void do_reset_fn(void *priv)
+{
+	struct keyreset_state *state = priv;
+	if (state->restart_requested)
+		panic("keyboard reset failed, %d", state->restart_requested);
+	if (state->reset_fn) {
+		state->restart_requested = state->reset_fn();
+	} else {
+		pr_info("keyboard reset\n");
+		schedule_work(&state->restart_work);
+		state->restart_requested = 1;
+	}
+}
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+	int ret = -ENOMEM;
+	struct keycombo_platform_data *pdata_child;
+	struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+	int up_size = 0, down_size = 0, size;
+	int key, *keyp;
+	struct keyreset_state *state;
+
+	if (!pdata)
+		return -EINVAL;
+	state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	state->pdev_child = platform_device_alloc(KEYCOMBO_NAME,
+							PLATFORM_DEVID_AUTO);
+	if (!state->pdev_child)
+		return -ENOMEM;
+	state->pdev_child->dev.parent = &pdev->dev;
+	INIT_WORK(&state->restart_work, do_restart);
+
+	keyp = pdata->keys_down;
+	while ((key = *keyp++)) {
+		if (key >= KEY_MAX)
+			continue;
+		down_size++;
+	}
+	if (pdata->keys_up) {
+		keyp = pdata->keys_up;
+		while ((key = *keyp++)) {
+			if (key >= KEY_MAX)
+				continue;
+			up_size++;
+		}
+	}
+	size = sizeof(struct keycombo_platform_data)
+			+ sizeof(int) * (down_size + 1);
+	pdata_child = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!pdata_child)
+		goto error;
+	memcpy(pdata_child->keys_down, pdata->keys_down,
+						sizeof(int) * down_size);
+	if (up_size > 0) {
+		pdata_child->keys_up = devm_kzalloc(&pdev->dev, up_size + 1,
+								GFP_KERNEL);
+		if (!pdata_child->keys_up)
+			goto error;
+		memcpy(pdata_child->keys_up, pdata->keys_up,
+							sizeof(int) * up_size);
+		if (!pdata_child->keys_up)
+			goto error;
+	}
+	state->reset_fn = pdata->reset_fn;
+	pdata_child->key_down_fn = do_reset_fn;
+	pdata_child->priv = state;
+	pdata_child->key_down_delay = pdata->key_down_delay;
+	ret = platform_device_add_data(state->pdev_child, pdata_child, size);
+	if (ret)
+		goto error;
+	platform_set_drvdata(pdev, state);
+	return platform_device_add(state->pdev_child);
+error:
+	platform_device_put(state->pdev_child);
+	return ret;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+	struct keyreset_state *state = platform_get_drvdata(pdev);
+	platform_device_put(state->pdev_child);
+	return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+	.driver.name = KEYRESET_NAME,
+	.probe = keyreset_probe,
+	.remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+	return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+	return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index bb698e1..4abf046 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -299,6 +299,17 @@
 	  To compile this driver as a module, choose M here: the module will be
 	  called ati_remote2.
 
+config INPUT_KEYCHORD
+	tristate "Key chord input driver support"
+	help
+	  Say Y here if you want to enable the key chord driver
+	  accessible at /dev/keychord.  This driver can be used
+	  for receiving notifications when client specified key
+	  combinations are pressed.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called keychord.
+
 config INPUT_KEYSPAN_REMOTE
 	tristate "Keyspan DMR USB remote control"
 	depends on USB_ARCH_HAS_HCD
@@ -434,6 +445,11 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called sgi_btns.
 
+config INPUT_GPIO
+	tristate "GPIO driver support"
+	help
+	  Say Y here if you want to support gpio based keys, wheels etc...
+
 config HP_SDC_RTC
 	tristate "HP SDC Real Time Clock"
 	depends on (GSC || HP300) && SERIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index d7fc17f..6b0e8a6 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -28,9 +28,11 @@
 obj-$(CONFIG_INPUT_DM355EVM)		+= dm355evm_keys.o
 obj-$(CONFIG_INPUT_GP2A)		+= gp2ap002a00f.o
 obj-$(CONFIG_INPUT_GPIO_TILT_POLLED)	+= gpio_tilt_polled.o
+obj-$(CONFIG_INPUT_GPIO)		+= gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
 obj-$(CONFIG_HP_SDC_RTC)		+= hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IMS_PCU)		+= ims-pcu.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)	+= ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYCHORD)		+= keychord.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)	+= keyspan_remote.o
 obj-$(CONFIG_INPUT_KXTJ9)		+= kxtj9.o
 obj-$(CONFIG_INPUT_M68K_BEEP)		+= m68kspkr.o
diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c
new file mode 100644
index 0000000..0acf4a5
--- /dev/null
+++ b/drivers/input/misc/gpio_axis.c
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+	struct gpio_event_input_devs *input_devs;
+	struct gpio_event_axis_info *info;
+	uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+	[0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+	[0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+	[0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+	[0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+	[0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+	[0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+	[0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+	[0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+	return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+	[0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /*     10000 10100 11100 */
+	[0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /*     11110 11010 11000 */
+	[0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /*    01000 01010 01110  */
+	[0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /*    01111 01101 01100  */
+	[0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /*   00100 00101 00111   */
+	[0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /*   10111 10110 00110   */
+	[0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /*  00010 10010 10011    */
+	[0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /*  11011 01011 00011    */
+	[0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001     */
+	[0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001     */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+	struct gpio_event_axis_info *info, uint16_t in)
+{
+	return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+	struct gpio_event_axis_info *ai = as->info;
+	int i;
+	int change;
+	uint16_t state = 0;
+	uint16_t pos;
+	uint16_t old_pos = as->pos;
+	for (i = ai->count - 1; i >= 0; i--)
+		state = (state << 1) | gpio_get_value(ai->gpio[i]);
+	pos = ai->map(ai, state);
+	if (ai->flags & GPIOEAF_PRINT_RAW)
+		pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+			ai->type, ai->code, state, old_pos, pos);
+	if (report && pos != old_pos) {
+		if (ai->type == EV_REL) {
+			change = (ai->decoded_size + pos - old_pos) %
+				  ai->decoded_size;
+			if (change > ai->decoded_size / 2)
+				change -= ai->decoded_size;
+			if (change == ai->decoded_size / 2) {
+				if (ai->flags & GPIOEAF_PRINT_EVENT)
+					pr_info("axis %d-%d unknown direction, "
+						"pos %d -> %d\n", ai->type,
+						ai->code, old_pos, pos);
+				change = 0; /* no closest direction */
+			}
+			if (ai->flags & GPIOEAF_PRINT_EVENT)
+				pr_info("axis %d-%d change %d\n",
+					ai->type, ai->code, change);
+			input_report_rel(as->input_devs->dev[ai->dev],
+						ai->code, change);
+		} else {
+			if (ai->flags & GPIOEAF_PRINT_EVENT)
+				pr_info("axis %d-%d now %d\n",
+					ai->type, ai->code, pos);
+			input_event(as->input_devs->dev[ai->dev],
+					ai->type, ai->code, pos);
+		}
+		input_sync(as->input_devs->dev[ai->dev]);
+	}
+	as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_axis_state *as = dev_id;
+	gpio_event_update_axis(as, 1);
+	return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+			 struct gpio_event_info *info, void **data, int func)
+{
+	int ret;
+	int i;
+	int irq;
+	struct gpio_event_axis_info *ai;
+	struct gpio_axis_state *as;
+
+	ai = container_of(info, struct gpio_event_axis_info, info);
+	if (func == GPIO_EVENT_FUNC_SUSPEND) {
+		for (i = 0; i < ai->count; i++)
+			disable_irq(gpio_to_irq(ai->gpio[i]));
+		return 0;
+	}
+	if (func == GPIO_EVENT_FUNC_RESUME) {
+		for (i = 0; i < ai->count; i++)
+			enable_irq(gpio_to_irq(ai->gpio[i]));
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		*data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+		if (as == NULL) {
+			ret = -ENOMEM;
+			goto err_alloc_axis_state_failed;
+		}
+		as->input_devs = input_devs;
+		as->info = ai;
+		if (ai->dev >= input_devs->count) {
+			pr_err("gpio_event_axis: bad device index %d >= %d "
+				"for %d:%d\n", ai->dev, input_devs->count,
+				ai->type, ai->code);
+			ret = -EINVAL;
+			goto err_bad_device_index;
+		}
+
+		input_set_capability(input_devs->dev[ai->dev],
+				     ai->type, ai->code);
+		if (ai->type == EV_ABS) {
+			input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+					     0, ai->decoded_size - 1, 0, 0);
+		}
+		for (i = 0; i < ai->count; i++) {
+			ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+			if (ret < 0)
+				goto err_request_gpio_failed;
+			ret = gpio_direction_input(ai->gpio[i]);
+			if (ret < 0)
+				goto err_gpio_direction_input_failed;
+			ret = irq = gpio_to_irq(ai->gpio[i]);
+			if (ret < 0)
+				goto err_get_irq_num_failed;
+			ret = request_irq(irq, gpio_axis_irq_handler,
+					  IRQF_TRIGGER_RISING |
+					  IRQF_TRIGGER_FALLING,
+					  "gpio_event_axis", as);
+			if (ret < 0)
+				goto err_request_irq_failed;
+		}
+		gpio_event_update_axis(as, 0);
+		return 0;
+	}
+
+	ret = 0;
+	as = *data;
+	for (i = ai->count - 1; i >= 0; i--) {
+		free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+		gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+		;
+	}
+err_bad_device_index:
+	kfree(as);
+	*data = NULL;
+err_alloc_axis_state_failed:
+	return ret;
+}
diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c
new file mode 100644
index 0000000..90f07eb
--- /dev/null
+++ b/drivers/input/misc/gpio_event.c
@@ -0,0 +1,228 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+	struct gpio_event_input_devs *input_devs;
+	const struct gpio_event_platform_data *info;
+	void *state[0];
+};
+
+static int gpio_input_event(
+	struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+	int i;
+	int devnr;
+	int ret = 0;
+	int tmp_ret;
+	struct gpio_event_info **ii;
+	struct gpio_event *ip = input_get_drvdata(dev);
+
+	for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+		if (ip->input_devs->dev[devnr] == dev)
+			break;
+	if (devnr == ip->input_devs->count) {
+		pr_err("gpio_input_event: unknown device %p\n", dev);
+		return -EIO;
+	}
+
+	for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+		if ((*ii)->event) {
+			tmp_ret = (*ii)->event(ip->input_devs, *ii,
+						&ip->state[i],
+						devnr, type, code, value);
+			if (tmp_ret)
+				ret = tmp_ret;
+		}
+	}
+	return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+	int i;
+	int ret;
+	struct gpio_event_info **ii;
+
+	if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+		ii = ip->info->info;
+		for (i = 0; i < ip->info->info_count; i++, ii++) {
+			if ((*ii)->func == NULL) {
+				ret = -ENODEV;
+				pr_err("gpio_event_probe: Incomplete pdata, "
+					"no function\n");
+				goto err_no_func;
+			}
+			if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+				continue;
+			ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+					  func);
+			if (ret) {
+				pr_err("gpio_event_probe: function failed\n");
+				goto err_func_failed;
+			}
+		}
+		return 0;
+	}
+
+	ret = 0;
+	i = ip->info->info_count;
+	ii = ip->info->info + i;
+	while (i > 0) {
+		i--;
+		ii--;
+		if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+			continue;
+		(*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+		;
+	}
+	return ret;
+}
+
+static void __maybe_unused gpio_event_suspend(struct gpio_event *ip)
+{
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+	if (ip->info->power)
+		ip->info->power(ip->info, 0);
+}
+
+static void __maybe_unused gpio_event_resume(struct gpio_event *ip)
+{
+	if (ip->info->power)
+		ip->info->power(ip->info, 1);
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+	int err;
+	struct gpio_event *ip;
+	struct gpio_event_platform_data *event_info;
+	int dev_count = 1;
+	int i;
+	int registered = 0;
+
+	event_info = pdev->dev.platform_data;
+	if (event_info == NULL) {
+		pr_err("gpio_event_probe: No pdata\n");
+		return -ENODEV;
+	}
+	if ((!event_info->name && !event_info->names[0]) ||
+	    !event_info->info || !event_info->info_count) {
+		pr_err("gpio_event_probe: Incomplete pdata\n");
+		return -ENODEV;
+	}
+	if (!event_info->name)
+		while (event_info->names[dev_count])
+			dev_count++;
+	ip = kzalloc(sizeof(*ip) +
+		     sizeof(ip->state[0]) * event_info->info_count +
+		     sizeof(*ip->input_devs) +
+		     sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+	if (ip == NULL) {
+		err = -ENOMEM;
+		pr_err("gpio_event_probe: Failed to allocate private data\n");
+		goto err_kp_alloc_failed;
+	}
+	ip->input_devs = (void*)&ip->state[event_info->info_count];
+	platform_set_drvdata(pdev, ip);
+
+	for (i = 0; i < dev_count; i++) {
+		struct input_dev *input_dev = input_allocate_device();
+		if (input_dev == NULL) {
+			err = -ENOMEM;
+			pr_err("gpio_event_probe: "
+				"Failed to allocate input device\n");
+			goto err_input_dev_alloc_failed;
+		}
+		input_set_drvdata(input_dev, ip);
+		input_dev->name = event_info->name ?
+					event_info->name : event_info->names[i];
+		input_dev->event = gpio_input_event;
+		ip->input_devs->dev[i] = input_dev;
+	}
+	ip->input_devs->count = dev_count;
+	ip->info = event_info;
+	if (event_info->power)
+		ip->info->power(ip->info, 1);
+
+	err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+	if (err)
+		goto err_call_all_func_failed;
+
+	for (i = 0; i < dev_count; i++) {
+		err = input_register_device(ip->input_devs->dev[i]);
+		if (err) {
+			pr_err("gpio_event_probe: Unable to register %s "
+				"input device\n", ip->input_devs->dev[i]->name);
+			goto err_input_register_device_failed;
+		}
+		registered++;
+	}
+
+	return 0;
+
+err_input_register_device_failed:
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+	if (event_info->power)
+		ip->info->power(ip->info, 0);
+	for (i = 0; i < registered; i++)
+		input_unregister_device(ip->input_devs->dev[i]);
+	for (i = dev_count - 1; i >= registered; i--) {
+		input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+		;
+	}
+	kfree(ip);
+err_kp_alloc_failed:
+	return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+	struct gpio_event *ip = platform_get_drvdata(pdev);
+	int i;
+
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+	if (ip->info->power)
+		ip->info->power(ip->info, 0);
+	for (i = 0; i < ip->input_devs->count; i++)
+		input_unregister_device(ip->input_devs->dev[i]);
+	kfree(ip);
+	return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+	.probe		= gpio_event_probe,
+	.remove		= gpio_event_remove,
+	.driver		= {
+		.name	= GPIO_EVENT_DEV_NAME,
+	},
+};
+
+module_platform_driver(gpio_event_driver);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c
new file mode 100644
index 0000000..eefd027
--- /dev/null
+++ b/drivers/input/misc/gpio_input.c
@@ -0,0 +1,390 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pm_wakeup.h>
+
+enum {
+	DEBOUNCE_UNSTABLE     = BIT(0),	/* Got irq, while debouncing */
+	DEBOUNCE_PRESSED      = BIT(1),
+	DEBOUNCE_NOTPRESSED   = BIT(2),
+	DEBOUNCE_WAIT_IRQ     = BIT(3),	/* Stable irq state */
+	DEBOUNCE_POLL         = BIT(4),	/* Stable polling state */
+
+	DEBOUNCE_UNKNOWN =
+		DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+	struct gpio_input_state *ds;
+	uint8_t debounce;
+};
+
+struct gpio_input_state {
+	struct gpio_event_input_devs *input_devs;
+	const struct gpio_event_input_info *info;
+	struct hrtimer timer;
+	int use_irq;
+	int debounce_count;
+	spinlock_t irq_lock;
+	struct wakeup_source *ws;
+	struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+	int i;
+	int pressed;
+	struct gpio_input_state *ds =
+		container_of(timer, struct gpio_input_state, timer);
+	unsigned gpio_flags = ds->info->flags;
+	unsigned npolarity;
+	int nkeys = ds->info->keymap_size;
+	const struct gpio_event_direct_entry *key_entry;
+	struct gpio_key_state *key_state;
+	unsigned long irqflags;
+	uint8_t debounce;
+	bool sync_needed;
+
+#if 0
+	key_entry = kp->keys_info->keymap;
+	key_state = kp->key_state;
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+			gpio_read_detect_status(key_entry->gpio));
+#endif
+	key_entry = ds->info->keymap;
+	key_state = ds->key_state;
+	sync_needed = false;
+	spin_lock_irqsave(&ds->irq_lock, irqflags);
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+		debounce = key_state->debounce;
+		if (debounce & DEBOUNCE_WAIT_IRQ)
+			continue;
+		if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+			debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+			enable_irq(gpio_to_irq(key_entry->gpio));
+			if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) continue debounce\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+		}
+		npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+		pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+		if (debounce & DEBOUNCE_POLL) {
+			if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+				ds->debounce_count++;
+				key_state->debounce = DEBOUNCE_UNKNOWN;
+				if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+					pr_info("gpio_keys_scan_keys: key %x-"
+						"%x, %d (%d) start debounce\n",
+						ds->info->type, key_entry->code,
+						i, key_entry->gpio);
+			}
+			continue;
+		}
+		if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) debounce pressed 1\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+			key_state->debounce = DEBOUNCE_PRESSED;
+			continue;
+		}
+		if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) debounce pressed 0\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+			key_state->debounce = DEBOUNCE_NOTPRESSED;
+			continue;
+		}
+		/* key is stable */
+		ds->debounce_count--;
+		if (ds->use_irq)
+			key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+		else
+			key_state->debounce |= DEBOUNCE_POLL;
+		if (gpio_flags & GPIOEDF_PRINT_KEYS)
+			pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+				"changed to %d\n", ds->info->type,
+				key_entry->code, i, key_entry->gpio, pressed);
+		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+			    key_entry->code, pressed);
+		sync_needed = true;
+	}
+	if (sync_needed) {
+		for (i = 0; i < ds->input_devs->count; i++)
+			input_sync(ds->input_devs->dev[i]);
+	}
+
+#if 0
+	key_entry = kp->keys_info->keymap;
+	key_state = kp->key_state;
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+			gpio_read_detect_status(key_entry->gpio));
+	}
+#endif
+
+	if (ds->debounce_count)
+		hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+	else if (!ds->use_irq)
+		hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+	else
+		__pm_relax(ds->ws);
+
+	spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_key_state *ks = dev_id;
+	struct gpio_input_state *ds = ks->ds;
+	int keymap_index = ks - ds->key_state;
+	const struct gpio_event_direct_entry *key_entry;
+	unsigned long irqflags;
+	int pressed;
+
+	if (!ds->use_irq)
+		return IRQ_HANDLED;
+
+	key_entry = &ds->info->keymap[keymap_index];
+
+	if (ds->info->debounce_time.tv64) {
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+			ks->debounce = DEBOUNCE_UNKNOWN;
+			if (ds->debounce_count++ == 0) {
+				__pm_stay_awake(ds->ws);
+				hrtimer_start(
+					&ds->timer, ds->info->debounce_time,
+					HRTIMER_MODE_REL);
+			}
+			if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_event_input_irq_handler: "
+					"key %x-%x, %d (%d) start debounce\n",
+					ds->info->type, key_entry->code,
+					keymap_index, key_entry->gpio);
+		} else {
+			disable_irq_nosync(irq);
+			ks->debounce = DEBOUNCE_UNSTABLE;
+		}
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+	} else {
+		pressed = gpio_get_value(key_entry->gpio) ^
+			!(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+		if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+			pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+				"(%d) changed to %d\n",
+				ds->info->type, key_entry->code, keymap_index,
+				key_entry->gpio, pressed);
+		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+			    key_entry->code, pressed);
+		input_sync(ds->input_devs->dev[key_entry->dev]);
+	}
+	return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+	int i;
+	int err;
+	unsigned int irq;
+	unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+	for (i = 0; i < ds->info->keymap_size; i++) {
+		err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+		if (err < 0)
+			goto err_gpio_get_irq_num_failed;
+		err = request_irq(irq, gpio_event_input_irq_handler,
+				  req_flags, "gpio_keys", &ds->key_state[i]);
+		if (err) {
+			pr_err("gpio_event_input_request_irqs: request_irq "
+				"failed for input %d, irq %d\n",
+				ds->info->keymap[i].gpio, irq);
+			goto err_request_irq_failed;
+		}
+		if (ds->info->info.no_suspend) {
+			err = enable_irq_wake(irq);
+			if (err) {
+				pr_err("gpio_event_input_request_irqs: "
+					"enable_irq_wake failed for input %d, "
+					"irq %d\n",
+					ds->info->keymap[i].gpio, irq);
+				goto err_enable_irq_wake_failed;
+			}
+		}
+	}
+	return 0;
+
+	for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+		irq = gpio_to_irq(ds->info->keymap[i].gpio);
+		if (ds->info->info.no_suspend)
+			disable_irq_wake(irq);
+err_enable_irq_wake_failed:
+		free_irq(irq, &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+		;
+	}
+	return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func)
+{
+	int ret;
+	int i;
+	unsigned long irqflags;
+	struct gpio_event_input_info *di;
+	struct gpio_input_state *ds = *data;
+	char *wlname;
+
+	di = container_of(info, struct gpio_event_input_info, info);
+
+	if (func == GPIO_EVENT_FUNC_SUSPEND) {
+		if (ds->use_irq)
+			for (i = 0; i < di->keymap_size; i++)
+				disable_irq(gpio_to_irq(di->keymap[i].gpio));
+		hrtimer_cancel(&ds->timer);
+		return 0;
+	}
+	if (func == GPIO_EVENT_FUNC_RESUME) {
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		if (ds->use_irq)
+			for (i = 0; i < di->keymap_size; i++)
+				enable_irq(gpio_to_irq(di->keymap[i].gpio));
+		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		if (ktime_to_ns(di->poll_time) <= 0)
+			di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+		*data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+					di->keymap_size, GFP_KERNEL);
+		if (ds == NULL) {
+			ret = -ENOMEM;
+			pr_err("gpio_event_input_func: "
+				"Failed to allocate private data\n");
+			goto err_ds_alloc_failed;
+		}
+		ds->debounce_count = di->keymap_size;
+		ds->input_devs = input_devs;
+		ds->info = di;
+		wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s",
+				   input_devs->dev[0]->name,
+				   (input_devs->count > 1) ? "..." : "");
+
+		ds->ws = wakeup_source_register(wlname);
+		kfree(wlname);
+		if (!ds->ws) {
+			ret = -ENOMEM;
+			pr_err("gpio_event_input_func: "
+				"Failed to allocate wakeup source\n");
+			goto err_ws_failed;
+		}
+
+		spin_lock_init(&ds->irq_lock);
+
+		for (i = 0; i < di->keymap_size; i++) {
+			int dev = di->keymap[i].dev;
+			if (dev >= input_devs->count) {
+				pr_err("gpio_event_input_func: bad device "
+					"index %d >= %d for key code %d\n",
+					dev, input_devs->count,
+					di->keymap[i].code);
+				ret = -EINVAL;
+				goto err_bad_keymap;
+			}
+			input_set_capability(input_devs->dev[dev], di->type,
+					     di->keymap[i].code);
+			ds->key_state[i].ds = ds;
+			ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+		}
+
+		for (i = 0; i < di->keymap_size; i++) {
+			ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+			if (ret) {
+				pr_err("gpio_event_input_func: gpio_request "
+					"failed for %d\n", di->keymap[i].gpio);
+				goto err_gpio_request_failed;
+			}
+			ret = gpio_direction_input(di->keymap[i].gpio);
+			if (ret) {
+				pr_err("gpio_event_input_func: "
+					"gpio_direction_input failed for %d\n",
+					di->keymap[i].gpio);
+				goto err_gpio_configure_failed;
+			}
+		}
+
+		ret = gpio_event_input_request_irqs(ds);
+
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		ds->use_irq = ret == 0;
+
+		pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+			"mode\n", input_devs->dev[0]->name,
+			(input_devs->count > 1) ? "..." : "",
+			ret == 0 ? "interrupt" : "polling");
+
+		hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		ds->timer.function = gpio_event_input_timer_func;
+		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+		return 0;
+	}
+
+	ret = 0;
+	spin_lock_irqsave(&ds->irq_lock, irqflags);
+	hrtimer_cancel(&ds->timer);
+	if (ds->use_irq) {
+		for (i = di->keymap_size - 1; i >= 0; i--) {
+			int irq = gpio_to_irq(di->keymap[i].gpio);
+			if (ds->info->info.no_suspend)
+				disable_irq_wake(irq);
+			free_irq(irq, &ds->key_state[i]);
+		}
+	}
+	spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+	for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+		gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+		;
+	}
+err_bad_keymap:
+	wakeup_source_unregister(ds->ws);
+err_ws_failed:
+	kfree(ds);
+err_ds_alloc_failed:
+	return ret;
+}
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
new file mode 100644
index 0000000..eaa9e89
--- /dev/null
+++ b/drivers/input/misc/gpio_matrix.c
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+	struct gpio_event_input_devs *input_devs;
+	struct gpio_event_matrix_info *keypad_info;
+	struct hrtimer timer;
+	struct wake_lock wake_lock;
+	int current_output;
+	unsigned int use_irq:1;
+	unsigned int key_state_changed:1;
+	unsigned int last_key_state_changed:1;
+	unsigned int some_keys_pressed:2;
+	unsigned int disabled_irq:1;
+	unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	int key_index = out * mi->ninputs + in;
+	unsigned short keyentry = mi->keymap[key_index];
+	unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+	unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+	if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+		if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+			pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+				"cleared\n", keycode, out, in,
+				mi->output_gpios[out], mi->input_gpios[in]);
+		__clear_bit(key_index, kp->keys_pressed);
+	} else {
+		if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+			pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+				"not cleared\n", keycode, out, in,
+				mi->output_gpios[out], mi->input_gpios[in]);
+	}
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+	int rv = 0;
+	int key_index;
+
+	key_index = out * kp->keypad_info->ninputs + in;
+	while (out < kp->keypad_info->noutputs) {
+		if (test_bit(key_index, kp->keys_pressed)) {
+			rv = 1;
+			clear_phantom_key(kp, out, in);
+		}
+		key_index += kp->keypad_info->ninputs;
+		out++;
+	}
+	return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+	int out, in, inp;
+	int key_index;
+
+	if (kp->some_keys_pressed < 3)
+		return;
+
+	for (out = 0; out < kp->keypad_info->noutputs; out++) {
+		inp = -1;
+		key_index = out * kp->keypad_info->ninputs;
+		for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+			if (test_bit(key_index, kp->keys_pressed)) {
+				if (inp == -1) {
+					inp = in;
+					continue;
+				}
+				if (inp >= 0) {
+					if (!restore_keys_for_input(kp, out + 1,
+									inp))
+						break;
+					clear_phantom_key(kp, out, inp);
+					inp = -2;
+				}
+				restore_keys_for_input(kp, out, in);
+			}
+		}
+	}
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	int pressed = test_bit(key_index, kp->keys_pressed);
+	unsigned short keyentry = mi->keymap[key_index];
+	unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+	unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+	if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+		if (keycode == KEY_RESERVED) {
+			if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+				pr_info("gpiomatrix: unmapped key, %d-%d "
+					"(%d-%d) changed to %d\n",
+					out, in, mi->output_gpios[out],
+					mi->input_gpios[in], pressed);
+		} else {
+			if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+				pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+					"changed to %d\n", keycode,
+					out, in, mi->output_gpios[out],
+					mi->input_gpios[in], pressed);
+			input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+		}
+	}
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+	int i;
+
+	for (i = 0; i < kp->input_devs->count; i++)
+		input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+	int out, in;
+	int key_index;
+	int gpio;
+	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	unsigned gpio_keypad_flags = mi->flags;
+	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+	out = kp->current_output;
+	if (out == mi->noutputs) {
+		out = 0;
+		kp->last_key_state_changed = kp->key_state_changed;
+		kp->key_state_changed = 0;
+		kp->some_keys_pressed = 0;
+	} else {
+		key_index = out * mi->ninputs;
+		for (in = 0; in < mi->ninputs; in++, key_index++) {
+			gpio = mi->input_gpios[in];
+			if (gpio_get_value(gpio) ^ !polarity) {
+				if (kp->some_keys_pressed < 3)
+					kp->some_keys_pressed++;
+				kp->key_state_changed |= !__test_and_set_bit(
+						key_index, kp->keys_pressed);
+			} else
+				kp->key_state_changed |= __test_and_clear_bit(
+						key_index, kp->keys_pressed);
+		}
+		gpio = mi->output_gpios[out];
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(gpio, !polarity);
+		else
+			gpio_direction_input(gpio);
+		out++;
+	}
+	kp->current_output = out;
+	if (out < mi->noutputs) {
+		gpio = mi->output_gpios[out];
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(gpio, polarity);
+		else
+			gpio_direction_output(gpio, polarity);
+		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+		return HRTIMER_NORESTART;
+	}
+	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+		if (kp->key_state_changed) {
+			hrtimer_start(&kp->timer, mi->debounce_delay,
+				      HRTIMER_MODE_REL);
+			return HRTIMER_NORESTART;
+		}
+		kp->key_state_changed = kp->last_key_state_changed;
+	}
+	if (kp->key_state_changed) {
+		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+			remove_phantom_keys(kp);
+		key_index = 0;
+		for (out = 0; out < mi->noutputs; out++)
+			for (in = 0; in < mi->ninputs; in++, key_index++)
+				report_key(kp, key_index, out, in);
+		report_sync(kp);
+	}
+	if (!kp->use_irq || kp->some_keys_pressed) {
+		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+		return HRTIMER_NORESTART;
+	}
+
+	/* No keys are pressed, reenable interrupt */
+	for (out = 0; out < mi->noutputs; out++) {
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(mi->output_gpios[out], polarity);
+		else
+			gpio_direction_output(mi->output_gpios[out], polarity);
+	}
+	for (in = 0; in < mi->ninputs; in++)
+		enable_irq(gpio_to_irq(mi->input_gpios[in]));
+	wake_unlock(&kp->wake_lock);
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+	int i;
+	struct gpio_kp *kp = dev_id;
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	unsigned gpio_keypad_flags = mi->flags;
+
+	if (!kp->use_irq) {
+		/* ignore interrupt while registering the handler */
+		kp->disabled_irq = 1;
+		disable_irq_nosync(irq_in);
+		return IRQ_HANDLED;
+	}
+
+	for (i = 0; i < mi->ninputs; i++)
+		disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+	for (i = 0; i < mi->noutputs; i++) {
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(mi->output_gpios[i],
+				!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+		else
+			gpio_direction_input(mi->output_gpios[i]);
+	}
+	wake_lock(&kp->wake_lock);
+	hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+	return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+	int i;
+	int err;
+	unsigned int irq;
+	unsigned long request_flags;
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+	switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+	default:
+		request_flags = IRQF_TRIGGER_FALLING;
+		break;
+	case GPIOKPF_ACTIVE_HIGH:
+		request_flags = IRQF_TRIGGER_RISING;
+		break;
+	case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+		request_flags = IRQF_TRIGGER_LOW;
+		break;
+	case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+		request_flags = IRQF_TRIGGER_HIGH;
+		break;
+	}
+
+	for (i = 0; i < mi->ninputs; i++) {
+		err = irq = gpio_to_irq(mi->input_gpios[i]);
+		if (err < 0)
+			goto err_gpio_get_irq_num_failed;
+		err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+				  "gpio_kp", kp);
+		if (err) {
+			pr_err("gpiomatrix: request_irq failed for input %d, "
+				"irq %d\n", mi->input_gpios[i], irq);
+			goto err_request_irq_failed;
+		}
+		err = enable_irq_wake(irq);
+		if (err) {
+			pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+				"irq %d\n", mi->input_gpios[i], irq);
+		}
+		disable_irq(irq);
+		if (kp->disabled_irq) {
+			kp->disabled_irq = 0;
+			enable_irq(irq);
+		}
+	}
+	return 0;
+
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+		free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+		;
+	}
+	return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+	struct gpio_event_info *info, void **data, int func)
+{
+	int i;
+	int err;
+	int key_count;
+	struct gpio_kp *kp;
+	struct gpio_event_matrix_info *mi;
+
+	mi = container_of(info, struct gpio_event_matrix_info, info);
+	if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+		/* TODO: disable scanning */
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		if (mi->keymap == NULL ||
+		   mi->input_gpios == NULL ||
+		   mi->output_gpios == NULL) {
+			err = -ENODEV;
+			pr_err("gpiomatrix: Incomplete pdata\n");
+			goto err_invalid_platform_data;
+		}
+		key_count = mi->ninputs * mi->noutputs;
+
+		*data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+				     BITS_TO_LONGS(key_count), GFP_KERNEL);
+		if (kp == NULL) {
+			err = -ENOMEM;
+			pr_err("gpiomatrix: Failed to allocate private data\n");
+			goto err_kp_alloc_failed;
+		}
+		kp->input_devs = input_devs;
+		kp->keypad_info = mi;
+		for (i = 0; i < key_count; i++) {
+			unsigned short keyentry = mi->keymap[i];
+			unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+			unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+			if (dev >= input_devs->count) {
+				pr_err("gpiomatrix: bad device index %d >= "
+					"%d for key code %d\n",
+					dev, input_devs->count, keycode);
+				err = -EINVAL;
+				goto err_bad_keymap;
+			}
+			if (keycode && keycode <= KEY_MAX)
+				input_set_capability(input_devs->dev[dev],
+							EV_KEY, keycode);
+		}
+
+		for (i = 0; i < mi->noutputs; i++) {
+			err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+			if (err) {
+				pr_err("gpiomatrix: gpio_request failed for "
+					"output %d\n", mi->output_gpios[i]);
+				goto err_request_output_gpio_failed;
+			}
+			if (gpio_cansleep(mi->output_gpios[i])) {
+				pr_err("gpiomatrix: unsupported output gpio %d,"
+					" can sleep\n", mi->output_gpios[i]);
+				err = -EINVAL;
+				goto err_output_gpio_configure_failed;
+			}
+			if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+				err = gpio_direction_output(mi->output_gpios[i],
+					!(mi->flags & GPIOKPF_ACTIVE_HIGH));
+			else
+				err = gpio_direction_input(mi->output_gpios[i]);
+			if (err) {
+				pr_err("gpiomatrix: gpio_configure failed for "
+					"output %d\n", mi->output_gpios[i]);
+				goto err_output_gpio_configure_failed;
+			}
+		}
+		for (i = 0; i < mi->ninputs; i++) {
+			err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+			if (err) {
+				pr_err("gpiomatrix: gpio_request failed for "
+					"input %d\n", mi->input_gpios[i]);
+				goto err_request_input_gpio_failed;
+			}
+			err = gpio_direction_input(mi->input_gpios[i]);
+			if (err) {
+				pr_err("gpiomatrix: gpio_direction_input failed"
+					" for input %d\n", mi->input_gpios[i]);
+				goto err_gpio_direction_input_failed;
+			}
+		}
+		kp->current_output = mi->noutputs;
+		kp->key_state_changed = 1;
+
+		hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		kp->timer.function = gpio_keypad_timer_func;
+		wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+		err = gpio_keypad_request_irqs(kp);
+		kp->use_irq = err == 0;
+
+		pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+			"%s%s in %s mode\n", input_devs->dev[0]->name,
+			(input_devs->count > 1) ? "..." : "",
+			kp->use_irq ? "interrupt" : "polling");
+
+		if (kp->use_irq)
+			wake_lock(&kp->wake_lock);
+		hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+		return 0;
+	}
+
+	err = 0;
+	kp = *data;
+
+	if (kp->use_irq)
+		for (i = mi->noutputs - 1; i >= 0; i--)
+			free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+	hrtimer_cancel(&kp->timer);
+	wake_lock_destroy(&kp->wake_lock);
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+		gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+		;
+	}
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+		gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+		;
+	}
+err_bad_keymap:
+	kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+	return err;
+}
diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c
new file mode 100644
index 0000000..2aac2fa
--- /dev/null
+++ b/drivers/input/misc/gpio_output.c
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+	struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+	void **data, unsigned int dev, unsigned int type,
+	unsigned int code, int value)
+{
+	int i;
+	struct gpio_event_output_info *oi;
+	oi = container_of(info, struct gpio_event_output_info, info);
+	if (type != oi->type)
+		return 0;
+	if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+		value = !value;
+	for (i = 0; i < oi->keymap_size; i++)
+		if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+			gpio_set_value(oi->keymap[i].gpio, value);
+	return 0;
+}
+
+int gpio_event_output_func(
+	struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+	void **data, int func)
+{
+	int ret;
+	int i;
+	struct gpio_event_output_info *oi;
+	oi = container_of(info, struct gpio_event_output_info, info);
+
+	if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+		return 0;
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+		for (i = 0; i < oi->keymap_size; i++) {
+			int dev = oi->keymap[i].dev;
+			if (dev >= input_devs->count) {
+				pr_err("gpio_event_output_func: bad device "
+					"index %d >= %d for key code %d\n",
+					dev, input_devs->count,
+					oi->keymap[i].code);
+				ret = -EINVAL;
+				goto err_bad_keymap;
+			}
+			input_set_capability(input_devs->dev[dev], oi->type,
+					     oi->keymap[i].code);
+		}
+
+		for (i = 0; i < oi->keymap_size; i++) {
+			ret = gpio_request(oi->keymap[i].gpio,
+					   "gpio_event_output");
+			if (ret) {
+				pr_err("gpio_event_output_func: gpio_request "
+					"failed for %d\n", oi->keymap[i].gpio);
+				goto err_gpio_request_failed;
+			}
+			ret = gpio_direction_output(oi->keymap[i].gpio,
+						    output_level);
+			if (ret) {
+				pr_err("gpio_event_output_func: "
+					"gpio_direction_output failed for %d\n",
+					oi->keymap[i].gpio);
+				goto err_gpio_direction_output_failed;
+			}
+		}
+		return 0;
+	}
+
+	ret = 0;
+	for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+		gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+		;
+	}
+err_bad_keymap:
+	return ret;
+}
+
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
new file mode 100644
index 0000000..a5ea27a
--- /dev/null
+++ b/drivers/input/misc/keychord.c
@@ -0,0 +1,391 @@
+/*
+ *  drivers/input/misc/keychord.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/keychord.h>
+#include <linux/sched.h>
+
+#define KEYCHORD_NAME		"keychord"
+#define BUFFER_SIZE			16
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Key chord input driver");
+MODULE_SUPPORTED_DEVICE("keychord");
+MODULE_LICENSE("GPL");
+
+#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
+		((char *)kc + sizeof(struct input_keychord) + \
+		kc->count * sizeof(kc->keycodes[0])))
+
+struct keychord_device {
+	struct input_handler	input_handler;
+	int			registered;
+
+	/* list of keychords to monitor */
+	struct input_keychord	*keychords;
+	int			keychord_count;
+
+	/* bitmask of keys contained in our keychords */
+	unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+	/* current state of the keys */
+	unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
+	/* number of keys that are currently pressed */
+	int key_down;
+
+	/* second input_device_id is needed for null termination */
+	struct input_device_id  device_ids[2];
+
+	spinlock_t		lock;
+	wait_queue_head_t	waitq;
+	unsigned char		head;
+	unsigned char		tail;
+	__u16			buff[BUFFER_SIZE];
+};
+
+static int check_keychord(struct keychord_device *kdev,
+		struct input_keychord *keychord)
+{
+	int i;
+
+	if (keychord->count != kdev->key_down)
+		return 0;
+
+	for (i = 0; i < keychord->count; i++) {
+		if (!test_bit(keychord->keycodes[i], kdev->keystate))
+			return 0;
+	}
+
+	/* we have a match */
+	return 1;
+}
+
+static void keychord_event(struct input_handle *handle, unsigned int type,
+			   unsigned int code, int value)
+{
+	struct keychord_device *kdev = handle->private;
+	struct input_keychord *keychord;
+	unsigned long flags;
+	int i, got_chord = 0;
+
+	if (type != EV_KEY || code >= KEY_MAX)
+		return;
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	/* do nothing if key state did not change */
+	if (!test_bit(code, kdev->keystate) == !value)
+		goto done;
+	__change_bit(code, kdev->keystate);
+	if (value)
+		kdev->key_down++;
+	else
+		kdev->key_down--;
+
+	/* don't notify on key up */
+	if (!value)
+		goto done;
+	/* ignore this event if it is not one of the keys we are monitoring */
+	if (!test_bit(code, kdev->keybit))
+		goto done;
+
+	keychord = kdev->keychords;
+	if (!keychord)
+		goto done;
+
+	/* check to see if the keyboard state matches any keychords */
+	for (i = 0; i < kdev->keychord_count; i++) {
+		if (check_keychord(kdev, keychord)) {
+			kdev->buff[kdev->head] = keychord->id;
+			kdev->head = (kdev->head + 1) % BUFFER_SIZE;
+			got_chord = 1;
+			break;
+		}
+		/* skip to next keychord */
+		keychord = NEXT_KEYCHORD(keychord);
+	}
+
+done:
+	spin_unlock_irqrestore(&kdev->lock, flags);
+
+	if (got_chord) {
+		pr_info("keychord: got keychord id %d. Any tasks: %d\n",
+			keychord->id,
+			!list_empty_careful(&kdev->waitq.task_list));
+		wake_up_interruptible(&kdev->waitq);
+	}
+}
+
+static int keychord_connect(struct input_handler *handler,
+					  struct input_dev *dev,
+					  const struct input_device_id *id)
+{
+	int i, ret;
+	struct input_handle *handle;
+	struct keychord_device *kdev =
+		container_of(handler, struct keychord_device, input_handler);
+
+	/*
+	 * ignore this input device if it does not contain any keycodes
+	 * that we are monitoring
+	 */
+	for (i = 0; i < KEY_MAX; i++) {
+		if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
+			break;
+	}
+	if (i == KEY_MAX)
+		return -ENODEV;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = KEYCHORD_NAME;
+	handle->private = kdev;
+
+	ret = input_register_handle(handle);
+	if (ret)
+		goto err_input_register_handle;
+
+	ret = input_open_device(handle);
+	if (ret)
+		goto err_input_open_device;
+
+	pr_info("keychord: using input dev %s for fevent\n", dev->name);
+
+	return 0;
+
+err_input_open_device:
+	input_unregister_handle(handle);
+err_input_register_handle:
+	kfree(handle);
+	return ret;
+}
+
+static void keychord_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+/*
+ * keychord_read is used to read keychord events from the driver
+ */
+static ssize_t keychord_read(struct file *file, char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	struct keychord_device *kdev = file->private_data;
+	__u16   id;
+	int retval;
+	unsigned long flags;
+
+	if (count < sizeof(id))
+		return -EINVAL;
+	count = sizeof(id);
+
+	if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	retval = wait_event_interruptible(kdev->waitq,
+			kdev->head != kdev->tail);
+	if (retval)
+		return retval;
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	/* pop a keychord ID off the queue */
+	id = kdev->buff[kdev->tail];
+	kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
+	spin_unlock_irqrestore(&kdev->lock, flags);
+
+	if (copy_to_user(buffer, &id, count))
+		return -EFAULT;
+
+	return count;
+}
+
+/*
+ * keychord_write is used to configure the driver
+ */
+static ssize_t keychord_write(struct file *file, const char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	struct keychord_device *kdev = file->private_data;
+	struct input_keychord *keychords = 0;
+	struct input_keychord *keychord, *next, *end;
+	int ret, i, key;
+	unsigned long flags;
+
+	if (count < sizeof(struct input_keychord))
+		return -EINVAL;
+	keychords = kzalloc(count, GFP_KERNEL);
+	if (!keychords)
+		return -ENOMEM;
+
+	/* read list of keychords from userspace */
+	if (copy_from_user(keychords, buffer, count)) {
+		kfree(keychords);
+		return -EFAULT;
+	}
+
+	/* unregister handler before changing configuration */
+	if (kdev->registered) {
+		input_unregister_handler(&kdev->input_handler);
+		kdev->registered = 0;
+	}
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	/* clear any existing configuration */
+	kfree(kdev->keychords);
+	kdev->keychords = 0;
+	kdev->keychord_count = 0;
+	kdev->key_down = 0;
+	memset(kdev->keybit, 0, sizeof(kdev->keybit));
+	memset(kdev->keystate, 0, sizeof(kdev->keystate));
+	kdev->head = kdev->tail = 0;
+
+	keychord = keychords;
+	end = (struct input_keychord *)((char *)keychord + count);
+
+	while (keychord < end) {
+		next = NEXT_KEYCHORD(keychord);
+		if (keychord->count <= 0 || next > end) {
+			pr_err("keychord: invalid keycode count %d\n",
+				keychord->count);
+			goto err_unlock_return;
+		}
+		if (keychord->version != KEYCHORD_VERSION) {
+			pr_err("keychord: unsupported version %d\n",
+				keychord->version);
+			goto err_unlock_return;
+		}
+
+		/* keep track of the keys we are monitoring in keybit */
+		for (i = 0; i < keychord->count; i++) {
+			key = keychord->keycodes[i];
+			if (key < 0 || key >= KEY_CNT) {
+				pr_err("keychord: keycode %d out of range\n",
+					key);
+				goto err_unlock_return;
+			}
+			__set_bit(key, kdev->keybit);
+		}
+
+		kdev->keychord_count++;
+		keychord = next;
+	}
+
+	kdev->keychords = keychords;
+	spin_unlock_irqrestore(&kdev->lock, flags);
+
+	ret = input_register_handler(&kdev->input_handler);
+	if (ret) {
+		kfree(keychords);
+		kdev->keychords = 0;
+		return ret;
+	}
+	kdev->registered = 1;
+
+	return count;
+
+err_unlock_return:
+	spin_unlock_irqrestore(&kdev->lock, flags);
+	kfree(keychords);
+	return -EINVAL;
+}
+
+static unsigned int keychord_poll(struct file *file, poll_table *wait)
+{
+	struct keychord_device *kdev = file->private_data;
+
+	poll_wait(file, &kdev->waitq, wait);
+
+	if (kdev->head != kdev->tail)
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+static int keychord_open(struct inode *inode, struct file *file)
+{
+	struct keychord_device *kdev;
+
+	kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
+	if (!kdev)
+		return -ENOMEM;
+
+	spin_lock_init(&kdev->lock);
+	init_waitqueue_head(&kdev->waitq);
+
+	kdev->input_handler.event = keychord_event;
+	kdev->input_handler.connect = keychord_connect;
+	kdev->input_handler.disconnect = keychord_disconnect;
+	kdev->input_handler.name = KEYCHORD_NAME;
+	kdev->input_handler.id_table = kdev->device_ids;
+
+	kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
+	__set_bit(EV_KEY, kdev->device_ids[0].evbit);
+
+	file->private_data = kdev;
+
+	return 0;
+}
+
+static int keychord_release(struct inode *inode, struct file *file)
+{
+	struct keychord_device *kdev = file->private_data;
+
+	if (kdev->registered)
+		input_unregister_handler(&kdev->input_handler);
+	kfree(kdev);
+
+	return 0;
+}
+
+static const struct file_operations keychord_fops = {
+	.owner		= THIS_MODULE,
+	.open		= keychord_open,
+	.release	= keychord_release,
+	.read		= keychord_read,
+	.write		= keychord_write,
+	.poll		= keychord_poll,
+};
+
+static struct miscdevice keychord_misc = {
+	.fops		= &keychord_fops,
+	.name		= KEYCHORD_NAME,
+	.minor		= MISC_DYNAMIC_MINOR,
+};
+
+static int __init keychord_init(void)
+{
+	return misc_register(&keychord_misc);
+}
+
+static void __exit keychord_exit(void)
+{
+	misc_deregister(&keychord_misc);
+}
+
+module_init(keychord_init);
+module_exit(keychord_exit);
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 0d2e812..fe07d91 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -18,20 +18,36 @@
 
 #include <linux/module.h>
 #include <linux/device-mapper.h>
+#include <linux/reboot.h>
 #include <crypto/hash.h>
 
 #define DM_MSG_PREFIX			"verity"
 
+#define DM_VERITY_ENV_LENGTH		42
+#define DM_VERITY_ENV_VAR_NAME		"VERITY_ERR_BLOCK_NR"
+
 #define DM_VERITY_IO_VEC_INLINE		16
 #define DM_VERITY_MEMPOOL_SIZE		4
 #define DM_VERITY_DEFAULT_PREFETCH_SIZE	262144
 
 #define DM_VERITY_MAX_LEVELS		63
+#define DM_VERITY_MAX_CORRUPTED_ERRS	100
 
 static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
 
 module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
 
+enum verity_mode {
+	DM_VERITY_MODE_EIO = 0,
+	DM_VERITY_MODE_LOGGING = 1,
+	DM_VERITY_MODE_RESTART = 2
+};
+
+enum verity_block_type {
+	DM_VERITY_BLOCK_TYPE_DATA,
+	DM_VERITY_BLOCK_TYPE_METADATA
+};
+
 struct dm_verity {
 	struct dm_dev *data_dev;
 	struct dm_dev *hash_dev;
@@ -54,6 +70,8 @@
 	unsigned digest_size;	/* digest size for the current hash algorithm */
 	unsigned shash_descsize;/* the size of temporary space for crypto */
 	int hash_failed;	/* set to 1 if hash of any block failed */
+	enum verity_mode mode;	/* mode for handling verification errors */
+	unsigned corrupted_errs;/* Number of errors for corrupted blocks */
 
 	mempool_t *vec_mempool;	/* mempool of bio vector */
 
@@ -180,6 +198,54 @@
 }
 
 /*
+ * Handle verification errors.
+ */
+static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
+				 unsigned long long block)
+{
+	char verity_env[DM_VERITY_ENV_LENGTH];
+	char *envp[] = { verity_env, NULL };
+	const char *type_str = "";
+	struct mapped_device *md = dm_table_get_md(v->ti->table);
+
+	if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
+		goto out;
+
+	++v->corrupted_errs;
+
+	switch (type) {
+	case DM_VERITY_BLOCK_TYPE_DATA:
+		type_str = "data";
+		break;
+	case DM_VERITY_BLOCK_TYPE_METADATA:
+		type_str = "metadata";
+		break;
+	default:
+		BUG();
+	}
+
+	DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
+                type_str, block);
+
+	if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
+		DMERR("%s: reached maximum errors", v->data_dev->name);
+
+	snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
+		DM_VERITY_ENV_VAR_NAME, type, block);
+
+	kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
+
+out:
+	if (v->mode == DM_VERITY_MODE_LOGGING)
+		return 0;
+
+	if (v->mode == DM_VERITY_MODE_RESTART)
+		kernel_restart("dm-verity device corrupted");
+
+	return 1;
+}
+
+/*
  * Verify hash of a metadata block pertaining to the specified data block
  * ("block" argument) at a specified level ("level" argument).
  *
@@ -256,11 +322,13 @@
 			goto release_ret_r;
 		}
 		if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
-			DMERR_LIMIT("metadata block %llu is corrupted",
-				(unsigned long long)hash_block);
 			v->hash_failed = 1;
-			r = -EIO;
-			goto release_ret_r;
+
+			if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA,
+					      hash_block)) {
+				r = -EIO;
+				goto release_ret_r;
+			}
 		} else
 			aux->hash_verified = 1;
 	}
@@ -377,10 +445,11 @@
 			return r;
 		}
 		if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
-			DMERR_LIMIT("data block %llu is corrupted",
-				(unsigned long long)(io->block + b));
 			v->hash_failed = 1;
-			return -EIO;
+
+			if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+					      io->block + b))
+				return -EIO;
 		}
 	}
 	BUG_ON(vector != io->io_vec_size);
@@ -689,8 +758,8 @@
 		goto bad;
 	}
 
-	if (argc != 10) {
-		ti->error = "Invalid argument count: exactly 10 arguments required";
+	if (argc < 10 || argc > 11) {
+		ti->error = "Invalid argument count: 10-11 arguments required";
 		r = -EINVAL;
 		goto bad;
 	}
@@ -811,6 +880,17 @@
 		}
 	}
 
+	if (argc > 10) {
+		if (sscanf(argv[10], "%d%c", &num, &dummy) != 1 ||
+			num < DM_VERITY_MODE_EIO ||
+			num > DM_VERITY_MODE_RESTART) {
+			ti->error = "Invalid mode";
+			r = -EINVAL;
+			goto bad;
+		}
+		v->mode = num;
+	}
+
 	v->hash_per_block_bits =
 		fls((1 << v->hash_dev_block_bits) / v->digest_size) - 1;
 
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index d8d5137..44b60c9 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -18,7 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
-#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
 
 #define MSIC_VENDOR(id)		((id >> 6) & 3)
 #define MSIC_VERSION(id)	(id & 0x3f)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 7a68184..8aa6a49 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -147,6 +147,18 @@
 	  an Intel Atom (non-netbook) mobile device containing a MIPI
 	  P1149.7 standard implementation.
 
+config INTEL_PTI_STM
+	tristate "MIPI Sytem Trace Macro (STM) for Intel"
+	default n
+	depends on INTEL_MID_PTI
+	help
+	  The STM (Sytem Trace Monitor) driver control trace data
+	  route through an Intel Tangier PTI port or through USB xDCI
+	  interface with Debug-Class DvC.Trace support.
+
+	  It provide the ability to PTI driver to setup the output and
+	  to user to change the output with sysfs and exported header.
+
 config SGI_IOC4
 	tristate "SGI IOC4 Base IO support"
 	depends on PCI
@@ -424,6 +436,10 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called ti_dac7512.
 
+config UID_STAT
+	bool "UID based statistics tracking exported to /proc/uid_stat"
+	default n
+
 config VMWARE_BALLOON
 	tristate "VMware Balloon Driver"
 	depends on X86 && HYPERVISOR_GUEST
@@ -527,6 +543,25 @@
 	  the genalloc API. It is supposed to be used for small on-chip SRAM
 	  areas found on many SoCs.
 
+config EMMC_IPANIC
+	bool "Intel kernel panic diagnostics driver FOR EMMC"
+	default n
+	---help---
+	  Driver which handles kernel panics and attempts to write
+	  critical debugging data to EMMC.
+
+config EMMC_IPANIC_PLABEL
+	string "Intel kernel panic driver (EMMC_IPANIC) partition label"
+	depends on EMMC_IPANIC
+	default "panic"
+	---help---
+	  Set the default mmc partition label for EMMC_IPANIC driver.
+config UID_CPUTIME
+	tristate "Per-UID cpu time statistics"
+	depends on PROFILING
+	help
+	  Per UID based cpu time statistics exported to /proc/uid_cputime
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
@@ -536,4 +571,5 @@
 source "drivers/misc/altera-stapl/Kconfig"
 source "drivers/misc/mei/Kconfig"
 source "drivers/misc/vmw_vmci/Kconfig"
+source "drivers/misc/bcm-lpm/Kconfig"
 endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c235d5b..8efdf0e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_AD525X_DPOT_I2C)	+= ad525x_dpot-i2c.o
 obj-$(CONFIG_AD525X_DPOT_SPI)	+= ad525x_dpot-spi.o
 obj-$(CONFIG_INTEL_MID_PTI)	+= pti.o
+obj-$(CONFIG_INTEL_PTI_STM)	+= stm.o
 obj-$(CONFIG_ATMEL_PWM)		+= atmel_pwm.o
 obj-$(CONFIG_ATMEL_SSC)		+= atmel-ssc.o
 obj-$(CONFIG_ATMEL_TCLIB)	+= atmel_tclib.o
@@ -36,6 +37,7 @@
 obj-$(CONFIG_EP93XX_PWM)	+= ep93xx_pwm.o
 obj-$(CONFIG_DS1682)		+= ds1682.o
 obj-$(CONFIG_TI_DAC7512)	+= ti_dac7512.o
+obj-$(CONFIG_UID_STAT)		+= uid_stat.o
 obj-$(CONFIG_C2PORT)		+= c2port/
 obj-$(CONFIG_HMC6352)		+= hmc6352.o
 obj-y				+= eeprom/
@@ -53,3 +55,6 @@
 obj-$(CONFIG_VMWARE_VMCI)	+= vmw_vmci/
 obj-$(CONFIG_LATTICE_ECP3_CONFIG)	+= lattice-ecp3-config.o
 obj-$(CONFIG_SRAM)		+= sram.o
+obj-$(CONFIG_BCM_BT_LPM)	+=bcm-lpm/
+obj-$(CONFIG_EMMC_IPANIC)	+= emmc_ipanic.o
+obj-$(CONFIG_UID_CPUTIME) += uid_cputime.o
diff --git a/drivers/misc/bcm-lpm/Kconfig b/drivers/misc/bcm-lpm/Kconfig
new file mode 100644
index 0000000..cb8443a
--- /dev/null
+++ b/drivers/misc/bcm-lpm/Kconfig
@@ -0,0 +1,6 @@
+config BCM_BT_LPM
+	tristate "Broadcom Bluetooth Low Power Mode"
+	depends on SERIAL_MFD_HSU
+	default m
+	help
+	   Select this module for Broadcom Bluetooth low power management.
diff --git a/drivers/misc/bcm-lpm/Makefile b/drivers/misc/bcm-lpm/Makefile
new file mode 100644
index 0000000..6dd43fd
--- /dev/null
+++ b/drivers/misc/bcm-lpm/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_BCM_BT_LPM) += bcm_bt_lpm.o
diff --git a/drivers/misc/bcm-lpm/bcm_bt_lpm.c b/drivers/misc/bcm-lpm/bcm_bt_lpm.c
new file mode 100644
index 0000000..335a925
--- /dev/null
+++ b/drivers/misc/bcm-lpm/bcm_bt_lpm.c
@@ -0,0 +1,581 @@
+/*
+ * Bluetooth Broadcomm  and low power control via GPIO
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/hrtimer.h>
+#include <linux/irq.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_hsu.h>
+
+#ifndef CONFIG_ACPI
+#include <asm/bcm_bt_lpm.h>
+#else
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+enum {
+	gpio_wake_acpi_idx,
+	gpio_enable_bt_acpi_idx,
+	host_wake_acpi_idx
+};
+#endif
+
+static struct rfkill *bt_rfkill;
+static bool bt_enabled;
+static bool host_wake_uart_enabled;
+static bool wake_uart_enabled;
+static bool int_handler_enabled;
+
+#define LPM_ON
+
+static void activate_irq_handler(void);
+
+struct bcm_bt_lpm {
+	unsigned int gpio_wake;
+	unsigned int gpio_host_wake;
+	unsigned int int_host_wake;
+	unsigned int gpio_enable_bt;
+
+	int wake;
+	int host_wake;
+
+	struct hrtimer enter_lpm_timer;
+	ktime_t enter_lpm_delay;
+
+	struct device *tty_dev;
+
+	int port;
+} bt_lpm;
+
+#ifdef LPM_ON
+static void uart_enable(struct device *tty)
+{
+	pr_debug("%s: runtime get\n", __func__);
+	/* Tell PM runtime to power on the tty device and block s0i3 */
+	pm_runtime_get(tty);
+}
+
+static void uart_disable(struct device *tty)
+{
+	pr_debug("%s: runtime put\n", __func__);
+	/* Tell PM runtime to release tty device and allow s0i3 */
+	pm_runtime_put(tty);
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int bcm_bt_lpm_acpi_probe(struct platform_device *pdev)
+{
+	struct acpi_gpio_info info;
+	acpi_handle handle;
+	acpi_integer port;
+
+	/*
+	 * Handle ACPI specific initializations.
+	 */
+	dev_dbg(&pdev->dev, "BCM2E1A ACPI specific probe\n");
+
+	bt_lpm.gpio_enable_bt = acpi_get_gpio_by_index(&pdev->dev,
+						gpio_enable_bt_acpi_idx, &info);
+	if (!gpio_is_valid(bt_lpm.gpio_enable_bt)) {
+		pr_err("%s: gpio %d for gpio_enable_bt not valid\n", __func__,
+							bt_lpm.gpio_enable_bt);
+		return -EINVAL;
+	}
+
+#ifdef LPM_ON
+	bt_lpm.gpio_wake = acpi_get_gpio_by_index(&pdev->dev,
+						gpio_wake_acpi_idx, &info);
+	if (!gpio_is_valid(bt_lpm.gpio_wake)) {
+		pr_err("%s: gpio %d for gpio_wake not valid\n", __func__,
+							bt_lpm.gpio_wake);
+		return -EINVAL;
+	}
+
+	bt_lpm.gpio_host_wake = acpi_get_gpio_by_index(&pdev->dev,
+						host_wake_acpi_idx, &info);
+	if (!gpio_is_valid(bt_lpm.gpio_host_wake)) {
+		pr_err("%s: gpio %d for gpio_host_wake not valid\n", __func__,
+							bt_lpm.gpio_host_wake);
+		return -EINVAL;
+	}
+
+	bt_lpm.int_host_wake = gpio_to_irq(bt_lpm.gpio_host_wake);
+
+	pr_debug("%s: gpio_wake %d, gpio_host_wake %d, int_host_wake %d\n",
+							__func__,
+							bt_lpm.gpio_wake,
+							bt_lpm.gpio_host_wake,
+							bt_lpm.int_host_wake);
+#endif
+
+	handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+
+	if (ACPI_FAILURE(acpi_evaluate_integer(handle, "UART", NULL, &port))) {
+		dev_err(&pdev->dev, "Error evaluating UART port number\n");
+
+		/* FIXME - Force port 0 if the information is missing from the
+		 * ACPI table.
+		 * That will be removed once the ACPI tables will all have been
+		 * updated.
+		 */
+		 port = 0;
+	}
+
+	bt_lpm.port = port;
+	pr_debug("%s: UART port %d\n", __func__, bt_lpm.port);
+
+	return 0;
+}
+#endif /* CONFIG_ACPI */
+
+static int bcm43xx_bt_rfkill_set_power(void *data, bool blocked)
+{
+	/* rfkill_ops callback. Turn transmitter on when blocked is false */
+
+	if (!blocked) {
+		gpio_set_value(bt_lpm.gpio_wake, 1);
+		/*
+		* Delay advice by BRCM is min 2.5ns,
+		* setting it between 10 and 50us for more confort
+		*/
+		usleep_range(10, 50);
+
+		gpio_set_value(bt_lpm.gpio_enable_bt, 1);
+		pr_debug("%s: turn BT on\n", __func__);
+	} else {
+		gpio_set_value(bt_lpm.gpio_enable_bt, 0);
+		pr_debug("%s: turn BT off\n", __func__);
+	}
+
+	bt_enabled = !blocked;
+
+	return 0;
+}
+
+static const struct rfkill_ops bcm43xx_bt_rfkill_ops = {
+	.set_block = bcm43xx_bt_rfkill_set_power,
+};
+
+#ifdef LPM_ON
+static void set_wake_locked(int wake)
+{
+	bt_lpm.wake = wake;
+
+	if (!wake_uart_enabled && wake) {
+		WARN_ON(!bt_lpm.tty_dev);
+		uart_enable(bt_lpm.tty_dev);
+	}
+
+	gpio_set_value(bt_lpm.gpio_wake, wake);
+
+	if (wake_uart_enabled && !wake) {
+		WARN_ON(!bt_lpm.tty_dev);
+		uart_disable(bt_lpm.tty_dev);
+	}
+	wake_uart_enabled = wake;
+}
+
+static enum hrtimer_restart enter_lpm(struct hrtimer *timer)
+{
+	pr_debug("%s\n", __func__);
+
+	set_wake_locked(0);
+
+	return HRTIMER_NORESTART;
+}
+
+
+static void update_host_wake_locked(int host_wake)
+{
+	if (host_wake == bt_lpm.host_wake)
+		return;
+
+	bt_lpm.host_wake = host_wake;
+
+	if (host_wake) {
+		if (!host_wake_uart_enabled) {
+			WARN_ON(!bt_lpm.tty_dev);
+			uart_enable(bt_lpm.tty_dev);
+		}
+	} else  {
+		if (host_wake_uart_enabled) {
+			WARN_ON(!bt_lpm.tty_dev);
+			uart_disable(bt_lpm.tty_dev);
+		}
+	}
+
+	host_wake_uart_enabled = host_wake;
+
+}
+
+static irqreturn_t host_wake_isr(int irq, void *dev)
+{
+	int host_wake;
+
+	host_wake = gpio_get_value(bt_lpm.gpio_host_wake);
+
+	pr_debug("%s: lpm %s\n", __func__, host_wake ? "off" : "on");
+
+	irq_set_irq_type(irq, host_wake ? IRQF_TRIGGER_FALLING :
+							IRQF_TRIGGER_RISING);
+
+	if (!bt_lpm.tty_dev) {
+		bt_lpm.host_wake = host_wake;
+		return IRQ_HANDLED;
+	}
+
+	update_host_wake_locked(host_wake);
+
+	return IRQ_HANDLED;
+}
+
+static void activate_irq_handler(void)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+
+	ret = request_irq(bt_lpm.int_host_wake, host_wake_isr,
+				IRQF_TRIGGER_RISING, "bt_host_wake", NULL);
+
+	if (ret < 0) {
+		pr_err("Error lpm request IRQ");
+		gpio_free(bt_lpm.gpio_wake);
+		gpio_free(bt_lpm.gpio_host_wake);
+	}
+}
+
+
+static void bcm_bt_lpm_wake_peer(struct device *dev)
+{
+	bt_lpm.tty_dev = dev;
+
+	/*
+	 * the irq is enabled after the first host wake up signal.
+	 * in the original code, the irq should be in levels but, since mfld
+	 * does not support them, irq is triggering with edges.
+	 */
+
+	if (!int_handler_enabled) {
+		int_handler_enabled = true;
+		activate_irq_handler();
+	}
+
+	hrtimer_try_to_cancel(&bt_lpm.enter_lpm_timer);
+
+	set_wake_locked(1);
+
+	hrtimer_start(&bt_lpm.enter_lpm_timer, bt_lpm.enter_lpm_delay,
+		HRTIMER_MODE_REL);
+
+}
+
+static int bcm_bt_lpm_init(struct platform_device *pdev)
+{
+	int ret;
+	struct device *tty_dev;
+
+	hrtimer_init(&bt_lpm.enter_lpm_timer, CLOCK_MONOTONIC,
+							HRTIMER_MODE_REL);
+	bt_lpm.enter_lpm_delay = ktime_set(1, 0);  /* 1 sec */
+	bt_lpm.enter_lpm_timer.function = enter_lpm;
+
+	bt_lpm.host_wake = 0;
+
+	if (bt_lpm.gpio_host_wake < 0) {
+		pr_err("Error bt_lpm.gpio_host_wake\n");
+		return -ENODEV;
+	}
+
+	ret = irq_set_irq_wake(bt_lpm.int_host_wake, 1);
+	if (ret < 0) {
+		pr_err("Error lpm set irq IRQ");
+		gpio_free(bt_lpm.gpio_wake);
+		gpio_free(bt_lpm.gpio_host_wake);
+		return ret;
+	}
+
+	tty_dev = intel_mid_hsu_set_wake_peer(bt_lpm.port,
+			bcm_bt_lpm_wake_peer);
+	if (!tty_dev) {
+		pr_err("Error no tty dev");
+		gpio_free(bt_lpm.gpio_wake);
+		gpio_free(bt_lpm.gpio_host_wake);
+		return -ENODEV;
+	}
+
+	bcm_bt_lpm_wake_peer(tty_dev);
+	return 0;
+}
+#endif
+
+#ifndef CONFIG_ACPI
+static int bcm43xx_bluetooth_pdata_probe(struct platform_device *pdev)
+{
+	struct bcm_bt_lpm_platform_data *pdata = pdev->dev.platform_data;
+
+	if (pdata == NULL) {
+		pr_err("Cannot register bcm_bt_lpm drivers, pdata is NULL\n");
+		return -EINVAL;
+	}
+
+	if (!gpio_is_valid(pdata->gpio_enable)) {
+		pr_err("%s: gpio not valid\n", __func__);
+		return -EINVAL;
+	}
+
+#ifdef LPM_ON
+	if (!gpio_is_valid(pdata->gpio_wake) ||
+		!gpio_is_valid(pdata->gpio_host_wake)) {
+		pr_err("%s: gpio not valid\n", __func__);
+		return -EINVAL;
+	}
+#endif
+
+	bt_lpm.gpio_wake = pdata->gpio_wake;
+	bt_lpm.gpio_host_wake = pdata->gpio_host_wake;
+	bt_lpm.int_host_wake = pdata->int_host_wake;
+	bt_lpm.gpio_enable_bt = pdata->gpio_enable;
+
+	bt_lpm.port = pdata->port;
+
+	return 0;
+}
+#endif /* !CONFIG_ACPI */
+
+static int bcm43xx_bluetooth_probe(struct platform_device *pdev)
+{
+	bool default_state = true;	/* off */
+	int ret = 0;
+
+	int_handler_enabled = false;
+
+#ifdef CONFIG_ACPI
+	if (ACPI_HANDLE(&pdev->dev)) {
+		/*
+		 * acpi specific probe
+		 */
+		pr_debug("%s for ACPI device %s\n", __func__,
+							dev_name(&pdev->dev));
+		if (bcm_bt_lpm_acpi_probe(pdev) < 0)
+			ret = -EINVAL;
+	} else
+		ret = -ENODEV;
+#else
+	ret = bcm43xx_bluetooth_pdata_probe(pdev);
+#endif
+
+	if (ret < 0) {
+		pr_err("%s: Cannot register platform data\n", __func__);
+		goto err_data_probe;
+	}
+
+	ret = gpio_request(bt_lpm.gpio_enable_bt, pdev->name);
+	if (ret < 0) {
+		pr_err("%s: Unable to request gpio %d\n", __func__,
+							bt_lpm.gpio_enable_bt);
+		goto err_gpio_enable_req;
+	}
+
+	ret = gpio_direction_output(bt_lpm.gpio_enable_bt, 0);
+	if (ret < 0) {
+		pr_err("%s: Unable to set int direction for gpio %d\n",
+					__func__, bt_lpm.gpio_enable_bt);
+		goto err_gpio_enable_dir;
+	}
+
+#ifdef LPM_ON
+	ret = gpio_request(bt_lpm.gpio_host_wake, pdev->name);
+	if (ret < 0) {
+		pr_err("%s: Unable to request gpio %d\n",
+					__func__, bt_lpm.gpio_host_wake);
+		goto err_gpio_host_wake_req;
+	}
+
+	ret = gpio_direction_input(bt_lpm.gpio_host_wake);
+	if (ret < 0) {
+		pr_err("%s: Unable to set direction for gpio %d\n", __func__,
+							bt_lpm.gpio_host_wake);
+		goto err_gpio_host_wake_dir;
+	}
+
+	ret = gpio_request(bt_lpm.gpio_wake, pdev->name);
+	if (ret < 0) {
+		pr_err("%s: Unable to request gpio %d\n", __func__,
+							bt_lpm.gpio_wake);
+		goto err_gpio_wake_req;
+	}
+
+	ret =  gpio_direction_output(bt_lpm.gpio_wake, 0);
+	if (ret < 0) {
+		pr_err("%s: Unable to set direction for gpio %d\n", __func__,
+							bt_lpm.gpio_wake);
+		goto err_gpio_wake_dir;
+	}
+
+	pr_debug("%s: gpio_enable=%d, gpio_wake=%d, gpio_host_wake=%d\n",
+							__func__,
+							bt_lpm.gpio_enable_bt,
+							bt_lpm.gpio_wake,
+							bt_lpm.gpio_host_wake);
+#endif
+
+	bt_rfkill = rfkill_alloc("bcm43xx Bluetooth", &pdev->dev,
+				RFKILL_TYPE_BLUETOOTH, &bcm43xx_bt_rfkill_ops,
+				NULL);
+	if (unlikely(!bt_rfkill)) {
+		ret = -ENOMEM;
+		goto err_rfkill_alloc;
+	}
+
+	bcm43xx_bt_rfkill_set_power(NULL, default_state);
+	rfkill_init_sw_state(bt_rfkill, default_state);
+
+	ret = rfkill_register(bt_rfkill);
+	if (unlikely(ret))
+		goto err_rfkill_register;
+
+#ifdef LPM_ON
+	ret = bcm_bt_lpm_init(pdev);
+	if (ret)
+		goto err_lpm_init;
+#endif
+
+	return ret;
+
+err_lpm_init:
+	rfkill_unregister(bt_rfkill);
+err_rfkill_register:
+	rfkill_destroy(bt_rfkill);
+err_rfkill_alloc:
+#ifdef LPM_ON
+err_gpio_wake_dir:
+	gpio_free(bt_lpm.gpio_wake);
+err_gpio_wake_req:
+err_gpio_host_wake_dir:
+	gpio_free(bt_lpm.gpio_host_wake);
+err_gpio_host_wake_req:
+#endif
+err_gpio_enable_dir:
+	gpio_free(bt_lpm.gpio_enable_bt);
+err_gpio_enable_req:
+err_data_probe:
+	return ret;
+}
+
+static int bcm43xx_bluetooth_remove(struct platform_device *pdev)
+{
+	rfkill_unregister(bt_rfkill);
+	rfkill_destroy(bt_rfkill);
+
+	gpio_free(bt_lpm.gpio_enable_bt);
+#ifdef LPM_ON
+	gpio_free(bt_lpm.gpio_wake);
+	gpio_free(bt_lpm.gpio_host_wake);
+#endif
+	return 0;
+}
+#ifdef LPM_ON
+int bcm43xx_bluetooth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int host_wake;
+
+	pr_debug("%s\n", __func__);
+
+	if (!bt_enabled)
+		return 0;
+
+	disable_irq(bt_lpm.int_host_wake);
+	host_wake = gpio_get_value(bt_lpm.gpio_host_wake);
+	if (host_wake) {
+		enable_irq(bt_lpm.int_host_wake);
+		pr_err("%s suspend error, gpio %d set\n", __func__,
+							bt_lpm.gpio_host_wake);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+int bcm43xx_bluetooth_resume(struct platform_device *pdev)
+{
+	pr_debug("%s\n", __func__);
+
+	if (bt_enabled)
+		enable_irq(bt_lpm.int_host_wake);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id bcm_id_table[] = {
+	/* ACPI IDs here */
+	{ "BCM2E1A", 0 },
+	{ "BCM2E3A", 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(acpi, bcm_id_table);
+#endif
+
+static struct platform_driver bcm43xx_bluetooth_platform_driver = {
+	.probe = bcm43xx_bluetooth_probe,
+	.remove = bcm43xx_bluetooth_remove,
+#ifdef LPM_ON
+	.suspend = bcm43xx_bluetooth_suspend,
+	.resume = bcm43xx_bluetooth_resume,
+#endif
+	.driver = {
+		   .name = "bcm_bt_lpm",
+		   .owner = THIS_MODULE,
+#ifdef CONFIG_ACPI
+		.acpi_match_table = ACPI_PTR(bcm_id_table),
+#endif
+		   },
+};
+
+static int __init bcm43xx_bluetooth_init(void)
+{
+	bt_enabled = false;
+	return platform_driver_register(&bcm43xx_bluetooth_platform_driver);
+}
+
+static void __exit bcm43xx_bluetooth_exit(void)
+{
+	platform_driver_unregister(&bcm43xx_bluetooth_platform_driver);
+}
+
+
+module_init(bcm43xx_bluetooth_init);
+module_exit(bcm43xx_bluetooth_exit);
+
+MODULE_ALIAS("platform:bcm43xx");
+MODULE_DESCRIPTION("bcm43xx_bluetooth");
+MODULE_AUTHOR("Jaikumar Ganesh <jaikumar@google.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/misc/emmc_ipanic.c b/drivers/misc/emmc_ipanic.c
new file mode 100644
index 0000000..d1bdfdd
--- /dev/null
+++ b/drivers/misc/emmc_ipanic.c
@@ -0,0 +1,1158 @@
+/*
+ * drivers/misc/emmc_ipanic.c
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: dongxing.zhang@intel.com
+ * Author: jun.zhang@intel.com
+ * Author: chuansheng.liu@intel.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/mmc/host.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/preempt.h>
+#include <linux/pci.h>
+#include <linux/nmi.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/panic_gbuffer.h>
+#include "emmc_ipanic.h"
+
+#include <linux/kmsg_dump.h>
+
+static char *part_label = "";
+module_param(part_label, charp, 0);
+MODULE_PARM_DESC(part_label, "IPanic mmc partition device label (panic)");
+
+static u32 disable_emmc_ipanic;
+core_param(disable_emmc_ipanic, disable_emmc_ipanic, uint, 0644);
+
+static struct mmc_emergency_info emmc_info = {
+	.init = mmc_emergency_init,
+	.write = mmc_emergency_write,
+	.part_label = CONFIG_EMMC_IPANIC_PLABEL,
+};
+
+static unsigned char *ipanic_proc_entry_name[PROC_MAX_ENTRIES] = {
+	"emmc_ipanic_header",
+	"emmc_ipanic_console",
+	"emmc_ipanic_threads",
+	"emmc_ipanic_gbuffer"
+};
+
+static int in_panic;
+static struct emmc_ipanic_data drv_ctx;
+static struct work_struct proc_removal_work;
+static int log_offset[IPANIC_LOG_MAX];
+static int log_len[IPANIC_LOG_MAX];	/* sector count */
+static int log_size[IPANIC_LOG_MAX];	/* byte count */
+static size_t log_head[IPANIC_LOG_MAX];
+static size_t log_woff[IPANIC_LOG_MAX];
+static unsigned char last_chunk_buf[SECTOR_SIZE];
+static int last_chunk_buf_len;
+static DEFINE_MUTEX(drv_mutex);
+static void (*func_stream_emmc) (void);
+
+static struct kmsg_dumper ipanic_dumper;
+
+static void emmc_panic_erase(unsigned char *buffer, Sector * sect)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc = ctx->emmc;
+	unsigned char *read_buf_ptr = buffer;
+	Sector new_sect;
+	int rc;
+
+	if (!emmc) {
+		pr_err("%s:invalid emmc infomation\n", __func__);
+		return;
+	}
+
+	if (!read_buf_ptr || !sect) {
+		sect = &new_sect;
+		if (!emmc->bdev) {
+			pr_err("%s:invalid emmc block device\n", __func__);
+			goto out;
+		}
+		/* make sure the block device is open rw */
+		rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE,
+				emmc_panic_erase);
+		if (rc < 0) {
+			pr_err("%s: blk_dev_get failed!\n", __func__);
+			goto out;
+		}
+
+		/*read panic header */
+		read_buf_ptr =
+		    read_dev_sector(emmc->bdev, emmc->start_block, sect);
+		if (!read_buf_ptr) {
+			pr_err("%s: read sector error(%llu)!\n",
+			       __func__, (u64) emmc->start_block);
+			goto out;
+		}
+	}
+
+	/*write all zero to panic header */
+	lock_page(sect->v);
+	memset(read_buf_ptr, 0, SECTOR_SIZE);
+	set_page_dirty(sect->v);
+	unlock_page(sect->v);
+	sync_blockdev(emmc->bdev);
+
+	if (!read_buf_ptr)
+		put_dev_sector(*sect);
+out:
+	memset(&ctx->hdr, 0, SECTOR_SIZE);
+	return;
+}
+
+static int emmc_read(struct mmc_emergency_info *emmc, void *holder,
+		     char *buffer, off_t offset, int count, bool to_user)
+{
+	unsigned char *read_ptr;
+	unsigned int sector_no;
+	off_t sector_offset;
+	Sector sect;
+	int rc;
+
+	if (!emmc) {
+		pr_err("%s:invalid emmc infomation\n", __func__);
+		return 0;
+	}
+	if (!emmc->bdev) {
+		pr_err("%s:invalid emmc block device\n", __func__);
+		return 0;
+	}
+
+	sector_no = offset >> SECTOR_SIZE_SHIFT;
+	sector_offset = offset & (SECTOR_SIZE - 1);
+	if (sector_no >= emmc->block_count) {
+		pr_err("%s: reading an invalid address\n", __func__);
+		return -EINVAL;
+	}
+
+	/* make sure the block device is open rw */
+	rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE, holder);
+	if (rc < 0) {
+		pr_err("%s: blk_dev_get failed!\n", __func__);
+		return 0;
+	}
+
+	read_ptr = read_dev_sector(emmc->bdev, sector_no + emmc->start_block,
+				   &sect);
+	if (!read_ptr) {
+		put_dev_sector(sect);
+		return -EINVAL;
+	}
+	/* count and read_ptr are updated to match flash page size */
+	if (count + sector_offset > SECTOR_SIZE)
+		count = SECTOR_SIZE - sector_offset;
+
+	if (sector_offset)
+		read_ptr += sector_offset;
+
+	if (to_user) {
+		if (copy_to_user(buffer, read_ptr, count)) {
+			pr_err("%s: Failed to copy buffer to User\n", __func__);
+			return 0;
+		}
+	} else
+		memcpy(buffer, read_ptr, count);
+
+	put_dev_sector(sect);
+
+	return count;
+}
+
+static ssize_t emmc_ipanic_gbuffer_proc_read(struct file *file,
+					     char __user * buffer, size_t count,
+					     loff_t * ppos)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	size_t log_len, log_head;
+	off_t log_off;
+	int rc;
+
+	if (!ctx) {
+		pr_err("%s:invalid panic handler\n", __func__);
+		return 0;
+	}
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&drv_mutex);
+
+	log_off = ctx->curr.log_offset[IPANIC_LOG_GBUFFER];
+	log_len = ctx->curr.log_length[IPANIC_LOG_GBUFFER];
+	log_head = ctx->curr.log_head[IPANIC_LOG_GBUFFER];
+
+	if (*ppos >= log_len) {
+		mutex_unlock(&drv_mutex);
+		return 0;
+	}
+
+	if (*ppos < log_len - log_head) {
+		/* No overflow (log_head == 0)
+		 * or
+		 * overflow 2nd part buf (log_head = log_woff)
+		 * |-------w--------|
+		 *           off^
+		 *         |--------|
+		 */
+		log_off += log_head;
+		log_len -= log_head;
+	} else {
+		/* 1st part buf
+		 * |-------w--------|
+		 *   off^
+		 * |-------|
+		 */
+		*ppos -= (log_len - log_head);
+		log_len = log_head;
+	}
+
+	if ((*ppos + count) > log_len)
+		count = log_len - *ppos;
+
+	rc = emmc_read(ctx->emmc, emmc_ipanic_gbuffer_proc_read,
+		       buffer, log_off + *ppos, count, true);
+	if (rc <= 0) {
+		mutex_unlock(&drv_mutex);
+		pr_err
+		    ("%s: emmc_read: invalid args: offset:0x%08llx, count:%zd",
+		     __func__, (u64) (log_off + *ppos), count);
+		return rc;
+	}
+
+	*ppos += rc;
+
+	mutex_unlock(&drv_mutex);
+
+	return rc;
+}
+
+static ssize_t emmc_ipanic_proc_read_by_log(struct file *file,
+					    char __user * buffer, size_t count,
+					    loff_t * ppos, int log)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	size_t file_length;
+	off_t file_offset;
+	int rc;
+
+	if (!ctx) {
+		pr_err("%s:invalid panic handler\n", __func__);
+		return 0;
+	}
+
+	if (!count)
+		return 0;
+
+	if (log < 0 || log > IPANIC_LOG_MAX) {
+		pr_err("%s: Bad log number (%d)\n", __func__, log);
+		return -EINVAL;
+	}
+
+	mutex_lock(&drv_mutex);
+
+	if (log == IPANIC_LOG_HEADER) {
+		file_length = ctx->hdr.log_size;
+		file_offset = offsetof(struct panic_header, panic);
+	} else {
+		file_length = ctx->curr.log_length[log];
+		file_offset = ctx->curr.log_offset[log];
+	}
+
+	if (*ppos >= file_length) {
+		mutex_unlock(&drv_mutex);
+		return 0;
+	}
+
+	if ((*ppos + count) > file_length)
+		count = file_length - *ppos;
+
+	rc = emmc_read(ctx->emmc, emmc_ipanic_proc_read_by_log,
+		       buffer, file_offset + *ppos, count, true);
+	if (rc <= 0) {
+		mutex_unlock(&drv_mutex);
+		pr_err
+		    ("%s: emmc_read: invalid args: offset:0x%08llx, count:%zd",
+		     __func__, (u64) (file_offset + *ppos), count);
+		return rc;
+	}
+
+	*ppos += rc;
+
+	mutex_unlock(&drv_mutex);
+
+	return rc;
+}
+
+static ssize_t emmc_ipanic_proc_read_hdr(struct file *file,
+					 char __user * buffer, size_t count,
+					 loff_t * ppos)
+{
+	return emmc_ipanic_proc_read_by_log(file, buffer, count, ppos,
+					    IPANIC_LOG_HEADER);
+}
+
+static ssize_t emmc_ipanic_proc_read0(struct file *file, char __user * buffer,
+				      size_t count, loff_t * ppos)
+{
+	return emmc_ipanic_proc_read_by_log(file, buffer, count, ppos,
+					    IPANIC_LOG_CONSOLE);
+}
+
+static ssize_t emmc_ipanic_proc_read1(struct file *file, char __user * buffer,
+				      size_t count, loff_t * ppos)
+{
+	return emmc_ipanic_proc_read_by_log(file, buffer, count, ppos,
+					    IPANIC_LOG_THREADS);
+}
+
+static void emmc_ipanic_remove_proc_work(struct work_struct *work)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	int log;
+
+	mutex_lock(&drv_mutex);
+	emmc_panic_erase(NULL, NULL);
+
+	for (log = 0; log < PROC_MAX_ENTRIES; log++) {
+		if (ctx->ipanic_proc_entry[log]) {
+			remove_proc_entry(ctx->ipanic_proc_entry_name
+					  [log], NULL);
+			ctx->ipanic_proc_entry[log] = NULL;
+		}
+	}
+	mutex_unlock(&drv_mutex);
+}
+
+static ssize_t emmc_ipanic_proc_write(struct file *file,
+				      const char __user * buffer,
+				      size_t count, loff_t * ppos)
+{
+	schedule_work(&proc_removal_work);
+	return count;
+}
+
+/* In section order inside panic partition : */
+static const struct file_operations ipanic_emmc_read_header_fops = {
+	.read = emmc_ipanic_proc_read_hdr,
+	.write = emmc_ipanic_proc_write,
+};
+
+static const struct file_operations ipanic_emmc0_fops = {
+	.read = emmc_ipanic_proc_read0,
+	.write = emmc_ipanic_proc_write,
+};
+
+static const struct file_operations ipanic_emmc1_fops = {
+	.read = emmc_ipanic_proc_read1,
+	.write = emmc_ipanic_proc_write,
+};
+
+static const struct file_operations ipanic_emmc_gbuffer_fops = {
+	.read = emmc_ipanic_gbuffer_proc_read,
+	.write = emmc_ipanic_proc_write
+};
+
+static void emmc_panic_notify_add(void)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc;
+	unsigned char *read_buf_ptr;
+	Sector sect;
+	int rc, idx_log, idx_proc;
+	int proc_entry_created = 0;
+
+	if (!ctx) {
+		pr_err("%s:invalid panic handler\n", __func__);
+		return;
+	}
+
+	emmc = ctx->emmc;
+	if (!emmc) {
+		pr_err("%s:invalid emmc infomation\n", __func__);
+		goto out_err;
+	}
+
+	if (!emmc->bdev) {
+		pr_err("%s:invalid emmc block device\n", __func__);
+		goto out_err;
+	}
+
+	/* make sure the block device is open rw */
+	rc = blkdev_get(emmc->bdev, FMODE_READ | FMODE_WRITE,
+			emmc_panic_notify_add);
+	if (rc < 0) {
+		pr_err("%s: blk_dev_get failed!\n", __func__);
+		goto out_err;
+	}
+
+	/* read panic header */
+	read_buf_ptr = read_dev_sector(emmc->bdev, emmc->start_block, &sect);
+	if (!read_buf_ptr) {
+		pr_err("%s: read sector error(%llu)!\n", __func__,
+		       (u64) emmc->start_block);
+		return;
+	}
+
+	memcpy(&ctx->hdr, read_buf_ptr, sizeof(struct panic_header));
+
+	if (ctx->hdr.magic != PANIC_MAGIC) {
+		pr_info("%s: bad magic %x, no data available\n",
+			__func__, ctx->hdr.magic);
+		emmc_panic_erase(read_buf_ptr, &sect);
+		goto put_sector;
+	}
+
+	pr_info("%s: Data available in panic partition\n", __func__);
+
+	if (ctx->hdr.version != PHDR_VERSION) {
+		pr_err("%s: Version mismatch (%d != %d)\n",
+		       __func__, ctx->hdr.version, PHDR_VERSION);
+		emmc_panic_erase(read_buf_ptr, &sect);
+		goto put_sector;
+	}
+
+	/* Create proc entry for the panic header */
+	ctx->ipanic_proc_entry[PROC_HEADER_INDEX] =
+	    proc_create(ctx->ipanic_proc_entry_name
+			[PROC_HEADER_INDEX], S_IFREG | S_IRUGO, NULL,
+			&ipanic_emmc_read_header_fops);
+
+	if (!ctx->ipanic_proc_entry[PROC_HEADER_INDEX])
+		pr_err("%s: failed creating proc file\n", __func__);
+	else {
+		proc_entry_created = 1;
+		pr_info("%s: proc entry created: %s\n", __func__,
+			ctx->ipanic_proc_entry_name[PROC_HEADER_INDEX]);
+	}
+
+	/* read log_info to retrieve block numbers and offsets */
+	read_buf_ptr =
+	    read_dev_sector(emmc->bdev, emmc->start_block + 1, &sect);
+	if (!read_buf_ptr) {
+		pr_err("%s: read sector error(%llu)!\n", __func__,
+		       (u64) emmc->start_block + 1);
+		return;
+	}
+
+	memcpy(&ctx->curr, read_buf_ptr, sizeof(struct log_info));
+
+	/* Log files other than header */
+	for (idx_log = 0; idx_log < IPANIC_LOG_MAX; idx_log++) {
+
+		pr_info("%s: log file %u(%u, %u)\n", __func__, idx_log,
+			ctx->curr.log_offset[idx_log],
+			ctx->curr.log_length[idx_log]);
+
+		/* Skip empty file. */
+		if (ctx->curr.log_length[idx_log] == 0) {
+			pr_info("%s: empty log file %u\n", __func__, idx_log);
+			continue;
+		}
+
+		/* Create proc entry for console, threads and gbuffer log. */
+		if (idx_log == IPANIC_LOG_CONSOLE) {
+			idx_proc = PROC_CONSOLE_INDEX;
+			ctx->ipanic_proc_entry[PROC_CONSOLE_INDEX] =
+			    proc_create(ctx->ipanic_proc_entry_name
+					[PROC_CONSOLE_INDEX], S_IFREG | S_IRUGO,
+					NULL, &ipanic_emmc0_fops);
+		} else if (idx_log == IPANIC_LOG_THREADS) {
+			idx_proc = PROC_THREADS_INDEX;
+			ctx->ipanic_proc_entry[PROC_THREADS_INDEX] =
+			    proc_create(ctx->ipanic_proc_entry_name
+					[PROC_THREADS_INDEX], S_IFREG | S_IRUGO,
+					NULL, &ipanic_emmc1_fops);
+		} else if (idx_log == IPANIC_LOG_GBUFFER) {
+			idx_proc = PROC_GBUFFER_INDEX;
+			ctx->ipanic_proc_entry[PROC_GBUFFER_INDEX] =
+			    proc_create(ctx->ipanic_proc_entry_name
+					[PROC_GBUFFER_INDEX], S_IFREG | S_IRUGO,
+					NULL, &ipanic_emmc_gbuffer_fops);
+		} else {
+			/* No proc entry for this index */
+			idx_proc = 0;
+			continue;
+		}
+		if (!ctx->ipanic_proc_entry[idx_proc])
+			pr_err("%s: failed creating proc file\n", __func__);
+		else {
+			proc_entry_created = 1;
+			pr_info("%s: proc entry created: %s\n",
+				__func__,
+				ctx->ipanic_proc_entry_name[idx_proc]);
+		}
+	}
+
+	if (!proc_entry_created)
+		emmc_panic_erase(read_buf_ptr, &sect);
+
+put_sector:
+	put_dev_sector(sect);
+	return;
+out_err:
+	ctx->emmc = NULL;
+}
+
+static void emmc_panic_notify_remove(void)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+
+	if (ctx->emmc && ctx->emmc->part_dev) {
+		put_device(ctx->emmc->part_dev);
+		ctx->emmc->bdev = NULL;
+	}
+
+	ctx->emmc = NULL;
+}
+
+static int emmc_ipanic_writeflashpage(struct mmc_emergency_info *emmc,
+				      loff_t to, const u_char * buf)
+{
+	int rc;
+	size_t wlen = SECTOR_SIZE;
+
+	if (to >= emmc->start_block + emmc->block_count) {
+		pr_emerg("%s: panic partition is full.\n", __func__);
+		return 0;
+	}
+
+	rc = emmc->write((char *)buf, (unsigned int)to);
+	if (rc) {
+		pr_emerg("%s: Error writing data to flash (%d)\n",
+			 __func__, rc);
+		return rc;
+	}
+
+	return wlen;
+}
+
+/*
+ * Writes the contents of the console to the specified offset in flash.
+ * Returns number of bytes written
+ */
+static int emmc_ipanic_write_console(struct mmc_emergency_info *emmc,
+				     unsigned int off, int *actual_size)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	int saved_oip, rc, block_shift = 0, bounce_idx = 0;
+	size_t line_len = 0;
+	bool ret;
+
+	static unsigned char line[SECTOR_SIZE];
+
+	*actual_size = 0;
+	while (1) {
+		saved_oip = oops_in_progress;
+		oops_in_progress = 1;
+		bounce_idx = 0;
+
+		if (last_chunk_buf_len) {
+			memcpy(ctx->bounce, last_chunk_buf, last_chunk_buf_len);
+			bounce_idx += last_chunk_buf_len;
+			last_chunk_buf_len = 0;
+		}
+
+		do {
+			ret = kmsg_dump_get_line(&ipanic_dumper, false,
+						 line, SECTOR_SIZE, &line_len);
+
+			if (ret) {
+				if (bounce_idx + line_len < SECTOR_SIZE) {
+					memcpy(ctx->bounce + bounce_idx,
+					       line, line_len);
+					bounce_idx += line_len;
+				} else {
+					int len = SECTOR_SIZE - bounce_idx;
+					memcpy(ctx->bounce + bounce_idx,
+					       line, len);
+					bounce_idx = SECTOR_SIZE;
+					memcpy(last_chunk_buf,
+					       line + len, line_len - len);
+					last_chunk_buf_len = line_len - len;
+				}
+			}
+		} while (ret && (bounce_idx != SECTOR_SIZE));
+
+		oops_in_progress = saved_oip;
+
+		/* If it is the last chunk, just copy it to last chunk
+		 * buffer and exit loop.
+		 */
+		if (!ret) {
+			/* Leave the last chunk for next writing */
+			memcpy(last_chunk_buf, ctx->bounce, bounce_idx);
+			last_chunk_buf_len = bounce_idx;
+			break;
+		}
+
+		rc = emmc_ipanic_writeflashpage(emmc, off + block_shift,
+						ctx->bounce);
+		if (rc <= 0) {
+			pr_emerg("%s: Flash write failed (%d)\n", __func__, rc);
+			return block_shift;
+		}
+
+		block_shift++;
+		*actual_size += SECTOR_SIZE;
+	}
+
+	return block_shift;
+}
+
+static void emmc_ipanic_flush_lastchunk_emmc(loff_t to,
+					     int *size_written,
+					     int *sector_written)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc = ctx->emmc;
+	int rc = 0;
+
+	if (last_chunk_buf_len) {
+		memset(last_chunk_buf + last_chunk_buf_len, 0,
+		       SECTOR_SIZE - last_chunk_buf_len);
+
+		rc = emmc_ipanic_writeflashpage(emmc, to, last_chunk_buf);
+		if (rc <= 0) {
+			pr_emerg("emmc_ipanic: write last chunk failed (%d)\n",
+				 rc);
+			return;
+		}
+
+		*size_written += last_chunk_buf_len;
+		(*sector_written)++;
+		last_chunk_buf_len = 0;
+	}
+	return;
+}
+
+static void emmc_ipanic_write_thread_func(void)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc = ctx->emmc;
+	int size_written;
+	int thread_sector_count;
+
+	thread_sector_count =
+	    emmc_ipanic_write_console(emmc,
+				      log_offset[IPANIC_LOG_THREADS] +
+				      log_len[IPANIC_LOG_THREADS],
+				      &size_written);
+	if (thread_sector_count < 0) {
+		pr_emerg("Error writing threads to panic log! (%d)\n",
+			 log_len[IPANIC_LOG_THREADS]);
+		return;
+	}
+	log_size[IPANIC_LOG_THREADS] += size_written;
+	log_len[IPANIC_LOG_THREADS] += thread_sector_count;
+
+	/*reset the log buffer */
+	log_buf_clear();
+	kmsg_dump_rewind(&ipanic_dumper);
+}
+
+static void emmc_ipanic_write_logbuf(struct mmc_emergency_info *emmc, int log)
+{
+	/*
+	 * Write the log data from the third block :
+	 * - the first block is reserved for panic header
+	 * - the second one is reserved for offset information
+	 */
+	log_offset[log] = emmc->start_block + 2;
+	log_len[log] = emmc_ipanic_write_console(emmc, log_offset[log],
+						 &log_size[log]);
+	if (log_size[log] < 0) {
+		pr_emerg("Error writing console to panic log! (%d)\n",
+			 log_len[log]);
+		log_size[log] = 0;
+		log_len[log] = 0;
+	}
+	/* flush last chunk buffer for console */
+	emmc_ipanic_flush_lastchunk_emmc(log_offset[log] +
+					 log_len[log],
+					 &log_size[log], &log_len[log]);
+}
+
+static void emmc_ipanic_write_calltrace(struct mmc_emergency_info *emmc,
+					int log)
+{
+	log_offset[log] = log_offset[log - 1] + log_len[log - 1];
+	/*
+	 * config func_stream_emmc to emmc_ipanic_write_thread_func to
+	 * stream thread call trace.
+	 */
+	log_buf_clear();
+	kmsg_dump_rewind(&ipanic_dumper);
+	func_stream_emmc = emmc_ipanic_write_thread_func;
+	show_state_filter(0);
+
+	/* flush last chunk buffer */
+	emmc_ipanic_flush_lastchunk_emmc(log_offset[log] +
+					 log_len[log],
+					 &log_size[log], &log_len[log]);
+}
+
+static int emmc_ipanic_write_gbuffer_data(struct mmc_emergency_info *emmc,
+					  struct g_buffer_header *gbuffer,
+					  unsigned int off, int *actual_size)
+{
+	int rc, block_shift = 0;
+	size_t log_off = 0;
+	size_t log_size;
+	unsigned char *buf = gbuffer->base;
+
+	if (gbuffer->head)
+		/* has overflow */
+		log_size = gbuffer->size;
+	else
+		/* no overflow */
+		log_size = gbuffer->woff;
+
+	while (log_off < log_size) {
+		size_t size_copy = log_size - log_off;
+		if (size_copy < SECTOR_SIZE) {
+			/*
+			 * flash page not complete, flushed with
+			 * emmc_ipanic_flush_lastchunk_emmc
+			 */
+			memcpy(last_chunk_buf, buf + log_off, size_copy);
+			last_chunk_buf_len = size_copy;
+			break;
+		}
+		rc = emmc_ipanic_writeflashpage(emmc, off + block_shift,
+						buf + log_off);
+		if (rc <= 0) {
+			pr_emerg("%s: Flash write failed (%d)\n", __func__, rc);
+			return 0;
+		}
+		log_off += rc;
+		block_shift++;
+	}
+	*actual_size = log_off;
+
+	return block_shift;
+}
+
+static struct g_buffer_header gbuffer = {
+	.base = NULL,
+};
+
+static void emmc_ipanic_write_gbuffer(struct mmc_emergency_info *emmc, int log)
+{
+	struct g_buffer_header *m_gbuffer = &gbuffer;
+
+	log_offset[log] = log_offset[log - 1] + log_len[log - 1];
+
+	pr_info("write gbuffer data\n");
+	if (!m_gbuffer->base) {
+		pr_err("Ipanic error, no gbuffer data\n");
+		return;
+	}
+
+	log_len[log] = emmc_ipanic_write_gbuffer_data(emmc, m_gbuffer,
+						      log_offset[log],
+						      &log_size[log]);
+	if (log_len[log] < 0) {
+		pr_emerg("Error writing gbuffer to panic log! (%d)\n",
+			 log_len[log]);
+		log_size[log] = 0;
+		log_len[log] = 0;
+	}
+	/* flush last chunk buffer */
+	emmc_ipanic_flush_lastchunk_emmc(log_offset[log] + log_len[log],
+					 &log_size[log], &log_len[log]);
+	log_head[log] = m_gbuffer->head;
+	log_woff[log] = m_gbuffer->woff;
+	pr_info("write gbuffer data END\n");
+}
+
+/*
+ * Exported in <linux/panic_gbuffer.h>
+ */
+void panic_set_gbuffer(struct g_buffer_header *buf)
+{
+	if (gbuffer.base) {
+		pr_err("%s: gbuffer already set to 0x%p, can not set again",
+		       __func__, gbuffer.base);
+		return;
+	}
+
+	gbuffer.base = buf->base;
+	gbuffer.size = buf->size;
+	gbuffer.woff = buf->woff;
+	gbuffer.head = buf->head;
+}
+
+EXPORT_SYMBOL(panic_set_gbuffer);
+
+static void emmc_ipanic_write_pageheader(struct mmc_emergency_info *emmc)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct panic_header *hdr = (struct panic_header *)ctx->bounce;
+	int wc;
+	size_t len, total, max;
+
+	memset(ctx->bounce, 0, SECTOR_SIZE);
+	hdr->magic = PANIC_MAGIC;
+	hdr->version = PHDR_VERSION;
+
+	total = snprintf(hdr->panic, SECTOR_SIZE, "###Kernel panic###\n");
+
+	max = SECTOR_SIZE - offsetof(struct panic_header, panic) - total;
+	kmsg_dump_get_buffer(&ipanic_dumper, false, last_chunk_buf, max, &len);
+	kmsg_dump_rewind(&ipanic_dumper);
+
+	memcpy(hdr->panic + total, last_chunk_buf, len);
+	hdr->log_size = len + total;
+
+	/* Write header block */
+	wc = emmc_ipanic_writeflashpage(emmc, emmc->start_block, ctx->bounce);
+	if (wc <= 0) {
+		pr_emerg("emmc_ipanic: Info write failed (%d)\n", wc);
+		return;
+	}
+}
+
+static void emmc_ipanic_clean_loginfo(struct mmc_emergency_info *emmc)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	int rc;
+
+	memset(log_offset, 0, IPANIC_LOG_MAX * sizeof(int));
+	memset(log_len, 0, IPANIC_LOG_MAX * sizeof(int));
+	memset(log_size, 0, IPANIC_LOG_MAX * sizeof(int));
+
+	memset(ctx->bounce, 0, SECTOR_SIZE);
+
+	rc = emmc_ipanic_writeflashpage(emmc, emmc->start_block + 1,
+					ctx->bounce);
+	if (rc <= 0) {
+		pr_emerg("emmc_ipanic: Header write failed (%d)\n", rc);
+		return;
+	}
+}
+
+static void emmc_ipanic_write_loginfo(struct mmc_emergency_info *emmc,
+				      int newlog)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct log_info *info = (struct log_info *)ctx->bounce;
+	int log = IPANIC_LOG_CONSOLE;
+	int rc;
+
+	if ((newlog < 0) || (newlog >= IPANIC_LOG_MAX))
+		return;
+
+	if (log_size[newlog] == 0)
+		return;
+
+	memset(ctx->bounce, 0, SECTOR_SIZE);
+	/*Fill up log offset and size */
+	while (log < IPANIC_LOG_MAX) {
+		/*Configurate log offset and log size */
+		info->log_offset[log] = (log_offset[log] - emmc->start_block)
+		    << SECTOR_SIZE_SHIFT;
+		info->log_length[log] = log_size[log];
+		info->log_head[log] = log_head[log];
+		info->log_woff[log] = log_woff[log];
+		log++;
+	}
+	rc = emmc_ipanic_writeflashpage(emmc, emmc->start_block + 1,
+					ctx->bounce);
+	if (rc <= 0) {
+		pr_emerg("emmc_ipanic: Header write failed (%d)\n", rc);
+		return;
+	}
+}
+
+static int emmc_ipanic(struct notifier_block *this, unsigned long event,
+		       void *ptr)
+{
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc;
+	int rc, log;
+
+	pr_emerg("panic notified\n");
+
+	if (in_panic || disable_emmc_ipanic)
+		return NOTIFY_DONE;
+
+	in_panic = 1;
+
+#ifdef CONFIG_PREEMPT
+	/* Ensure that cond_resched() won't try to preempt anybody */
+	add_preempt_count(PREEMPT_ACTIVE);
+#endif
+	touch_nmi_watchdog();
+
+	if (!ctx)
+		goto out;
+	emmc = ctx->emmc;
+	if (!emmc)
+		goto out;
+	if (ctx->hdr.magic) {
+		pr_emerg("Crash partition in use!\n");
+		goto out;
+	}
+
+	rc = emmc->init();
+	if (rc) {
+		/* String too long to fit on 1 80-char line */
+		pr_emerg("%s %s, rc=%d\n",
+			 "Emmc emergency driver is",
+			 "not initialized successfully!", rc);
+		goto out;
+	}
+
+	/* Prepare kmsg dumper */
+	ipanic_dumper.active = 1;
+	/* Rewind kmsg dumper */
+	kmsg_dump_rewind(&ipanic_dumper);
+
+	/* Write emmc ipanic partition header */
+	emmc_ipanic_write_pageheader(emmc);
+	/* Clean emmc ipanic sections offsets */
+	emmc_ipanic_clean_loginfo(emmc);
+
+	/*Write all buffer into emmc */
+	log = IPANIC_LOG_CONSOLE;
+	while (log < IPANIC_LOG_MAX) {
+		/* Clear temporary buffer */
+		memset(ctx->bounce, 0, SECTOR_SIZE);
+		/* Log every buffer into emmc */
+		switch (log) {
+		case IPANIC_LOG_CONSOLE:
+			emmc_ipanic_write_logbuf(emmc, log);
+			break;
+		case IPANIC_LOG_THREADS:
+			emmc_ipanic_write_calltrace(emmc, log);
+			break;
+		case IPANIC_LOG_GBUFFER:
+			emmc_ipanic_write_gbuffer(emmc, log);
+			break;
+		default:
+			break;
+		}
+		/* Update emmc ipanic sections offsets */
+		emmc_ipanic_write_loginfo(emmc, log);
+		log++;
+	}
+	pr_info("Panic log data written done!\n");
+
+	ipanic_dumper.active = 0;
+
+out:
+#ifdef CONFIG_PREEMPT
+	sub_preempt_count(PREEMPT_ACTIVE);
+#endif
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_blk = {
+	.notifier_call = emmc_ipanic,
+	.priority = 100,
+};
+
+static int panic_dbg_get(void *data, u64 * val)
+{
+	emmc_ipanic(NULL, 0, NULL);
+	return 0;
+}
+
+static int panic_dbg_set(void *data, u64 val)
+{
+	BUG();
+	return -1;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(panic_dbg_fops, panic_dbg_get, panic_dbg_set, "%llu\n");
+
+static int match_dev_panic_part(struct device *dev, const void *data)
+{
+	struct hd_struct *part = dev_to_part(dev);
+	const char *name = (char *)data;
+
+	return part->info && !strcmp(name, part->info->volname);
+}
+
+static int emmc_panic_partition_notify(struct notifier_block *nb,
+				       unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct emmc_ipanic_data *ctx = &drv_ctx;
+	struct mmc_emergency_info *emmc;
+	struct gendisk *disk;
+
+	if (!ctx) {
+		pr_err("%s:invalid panic handler\n", __func__);
+		return 0;
+	}
+
+	emmc = ctx->emmc;
+	if (!emmc) {
+		pr_err("%s:invalid emmc information\n", __func__);
+		return 0;
+	}
+
+	switch (action) {
+	case BUS_NOTIFY_ADD_DEVICE:
+	case BUS_NOTIFY_BOUND_DRIVER:
+		/* if emmc already found, exit the function */
+		if (emmc->bdev)
+			return 0;
+
+		emmc->part_dev = class_find_device(&block_class, NULL,
+						   emmc->part_label,
+						   &match_dev_panic_part);
+		if (emmc->part_dev) {
+			emmc->part = dev_to_part(emmc->part_dev);
+			if (!emmc->part) {
+				pr_err("unable to get partition\n");
+				goto put_dev;
+			}
+
+			disk = part_to_disk(emmc->part);
+			if (!disk) {
+				pr_err("unable to get disk\n");
+				goto put_dev;
+			}
+
+			/* get whole disk */
+			emmc->bdev = bdget_disk(disk, 0);
+			if (!emmc->bdev) {
+				pr_err("unable to get emmc block device\n");
+				goto put_dev;
+			}
+
+			emmc->start_block = emmc->part->start_sect;
+			emmc->block_count = emmc->part->nr_sects;
+
+			pr_info("panic partition found, label:%s, device:%s\n",
+				emmc->part_label, dev_name(emmc->part_dev));
+
+			/* notify to add the panic device */
+			emmc_panic_notify_add();
+
+			atomic_notifier_chain_register(&panic_notifier_list,
+						       &panic_blk);
+
+			INIT_WORK(&proc_removal_work,
+				  emmc_ipanic_remove_proc_work);
+		}
+		break;
+	case BUS_NOTIFY_DEL_DEVICE:
+	case BUS_NOTIFY_UNBIND_DRIVER:
+		if (match_dev_panic_part(dev, emmc->part_label)) {
+			pr_info("bus notify removed device '%s', cleaning.\n",
+				dev_name(dev));
+			flush_scheduled_work();
+			atomic_notifier_chain_unregister(&panic_notifier_list,
+							 &panic_blk);
+			emmc_panic_notify_remove();
+		}
+		break;
+	case BUS_NOTIFY_BIND_DRIVER:
+	case BUS_NOTIFY_UNBOUND_DRIVER:
+		/* Nothing to do here, but we don't want
+		 * these actions to generate error messages,
+		 * so we need to catch them
+		 */
+		break;
+	default:
+		pr_err("Unknown action (%lu) on %s\n", action, dev_name(dev));
+		return 0;
+	}
+	return 1;
+
+put_dev:
+	put_device(emmc->part_dev);
+	return 0;
+}
+
+static struct notifier_block panic_partition_notifier = {
+	.notifier_call = emmc_panic_partition_notify,
+};
+
+void emmc_ipanic_stream_emmc(void)
+{
+	if (func_stream_emmc)
+		(*func_stream_emmc) ();
+}
+
+EXPORT_SYMBOL(emmc_ipanic_stream_emmc);
+
+static struct dentry *emmc_ipanic_d;
+static struct dentry *emmc_ipanic_disable_d;
+
+static int __init emmc_ipanic_init(void)
+{
+	/* initialization of drv_ctx */
+	memset(&drv_ctx, 0, sizeof(drv_ctx));
+	drv_ctx.emmc = &emmc_info;
+
+	if (*part_label)
+		strcpy(emmc_info.part_label, part_label);
+
+	drv_ctx.ipanic_proc_entry_name = ipanic_proc_entry_name;
+	drv_ctx.bounce = (void *)__get_free_page(GFP_KERNEL);
+
+	bus_register_notifier(&pci_bus_type, &panic_partition_notifier);
+
+	emmc_ipanic_d = debugfs_create_file("emmc_ipanic", 0644, NULL, NULL,
+					    &panic_dbg_fops);
+	emmc_ipanic_disable_d = debugfs_create_u32("disable_emmc_ipanic", 0644,
+						   NULL, &disable_emmc_ipanic);
+
+	pr_info("init success\n");
+
+	return 0;
+}
+
+static void __exit emmc_ipanic_exit(void)
+{
+	debugfs_remove(emmc_ipanic_d);
+	debugfs_remove(emmc_ipanic_disable_d);
+	bus_unregister_notifier(&pci_bus_type, &panic_partition_notifier);
+	flush_scheduled_work();
+	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_blk);
+	emmc_panic_notify_remove();
+}
+
+module_init(emmc_ipanic_init);
+module_exit(emmc_ipanic_exit);
diff --git a/drivers/misc/emmc_ipanic.h b/drivers/misc/emmc_ipanic.h
new file mode 100644
index 0000000..541f613
--- /dev/null
+++ b/drivers/misc/emmc_ipanic.h
@@ -0,0 +1,96 @@
+/*
+ * drivers/misc/emmc_ipanic.h
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: dongxing.zhang@intel.com
+ * Author: jun.zhang@intel.com
+ * Author: chuansheng.liu@intel.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _LINUX_EMMC_IPANIC_H
+#define _LINUX_EMMC_IPANIC_H
+
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/version.h>
+
+extern void log_buf_clear(void);
+
+#define SECTOR_SIZE_SHIFT (9)
+
+#define PROC_HEADER_INDEX        0
+#define PROC_CONSOLE_INDEX       1
+#define PROC_THREADS_INDEX       2
+#define PROC_GBUFFER_INDEX       3
+#define PROC_MAX_ENTRIES         4
+
+#define IPANIC_LOG_CONSOLE       0
+#define IPANIC_LOG_THREADS       1
+#define IPANIC_LOG_GBUFFER       2
+#define IPANIC_LOG_MAX           3
+#define IPANIC_LOG_HEADER        IPANIC_LOG_MAX
+
+
+struct mmc_emergency_info {
+#define DISK_NAME_LENGTH 20
+	/* emmc panic partition label */
+	char part_label[PARTITION_META_INFO_VOLNAMELTH];
+
+	struct block_device *bdev;
+	struct device *part_dev;
+	struct hd_struct *part;
+
+	/*panic partition start block */
+	sector_t start_block;
+	/*panic partition block count */
+	sector_t block_count;
+
+	int (*init) (void);
+	int (*write) (char *, unsigned int);
+	int (*read) (char *, unsigned int);
+};
+
+struct panic_header {
+	u32 magic;
+#define PANIC_MAGIC 0xdeadf00d
+
+	u32 version;
+#define PHDR_VERSION   0x01
+	u32 log_size;
+
+	char panic[SECTOR_SIZE];
+};
+
+struct log_info {
+	u32 log_offset[IPANIC_LOG_MAX];
+	u32 log_length[IPANIC_LOG_MAX];
+
+	/* For logcat and generic buffer log status */
+	size_t log_head[IPANIC_LOG_MAX];
+	size_t log_woff[IPANIC_LOG_MAX];
+};
+
+struct emmc_ipanic_data {
+	struct mmc_emergency_info *emmc;
+	struct panic_header hdr;
+	struct log_info curr;
+	void *bounce;
+	struct proc_dir_entry *ipanic_proc_entry[PROC_MAX_ENTRIES];
+	unsigned char **ipanic_proc_entry_name;
+};
+
+#endif /* _LINUX_EMMC_IPANIC_H */
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index f84ff0c..6ece8b6 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -21,6 +21,8 @@
  * compact JTAG, standard.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
@@ -36,6 +38,12 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
+#include <asm/intel_scu_ipc.h>
+
+#ifdef CONFIG_INTEL_PTI_STM
+#include "stm.h"
+#endif
+
 #define DRIVERNAME		"pti"
 #define PCINAME			"pciPTI"
 #define TTYNAME			"ttyPTI"
@@ -55,6 +63,55 @@
 #define APERTURE_14		0x3800000 /* offset to first OS write addr */
 #define APERTURE_LEN		0x400000  /* address length */
 
+#define SMIP_PTI_OFFSET	0x30C  /* offset to PTI config in MIP header */
+#define SMIP_PTI_EN	(1<<7) /* PTI enable bit in PTI configuration */
+
+#define PTI_PNW_PCI_ID			0x082B
+#define PTI_CLV_PCI_ID			0x0900
+#define PTI_TNG_PCI_ID			0x119F
+
+#define INTEL_PTI_PCI_DEVICE(dev, info) {	\
+	.vendor = PCI_VENDOR_ID_INTEL,		\
+	.device = dev,				\
+	.subvendor = PCI_ANY_ID,		\
+	.subdevice = PCI_ANY_ID,		\
+	.driver_data = (unsigned long) info }
+
+struct pti_device_info {
+	u8 pci_bar;
+	u8 scu_secure_mode:1;
+	u8 has_d8_d16_support:1;
+};
+
+static const struct pti_device_info intel_pti_pnw_info = {
+	.pci_bar = 1,
+	.scu_secure_mode = 0,
+	.has_d8_d16_support = 0,
+};
+
+static const struct pti_device_info intel_pti_clv_info = {
+	.pci_bar = 1,
+	.scu_secure_mode = 1,
+	.has_d8_d16_support = 0,
+};
+
+static const struct pti_device_info intel_pti_tng_info = {
+	.pci_bar = 2,
+	.scu_secure_mode = 0,
+	.has_d8_d16_support = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	INTEL_PTI_PCI_DEVICE(PTI_PNW_PCI_ID, &intel_pti_pnw_info),
+	INTEL_PTI_PCI_DEVICE(PTI_CLV_PCI_ID, &intel_pti_clv_info),
+	INTEL_PTI_PCI_DEVICE(PTI_TNG_PCI_ID, &intel_pti_tng_info),
+	{0}
+};
+
+#define GET_PCI_BAR(pti_dev) (pti_dev->pti_dev_info->pci_bar)
+#define HAS_SCU_SECURE_MODE(pti_dev) (pti_dev->pti_dev_info->scu_secure_mode)
+#define HAS_D8_D16_SUPPORT(pti_dev) (pti_dev->pti_dev_info->has_d8_d16_support)
+
 struct pti_tty {
 	struct pti_masterchannel *mc;
 };
@@ -67,8 +124,16 @@
 	u8 ia_app[MAX_APP_IDS];
 	u8 ia_os[MAX_OS_IDS];
 	u8 ia_modem[MAX_MODEM_IDS];
+	struct pti_device_info *pti_dev_info;
+#ifdef CONFIG_INTEL_PTI_STM
+	struct stm_dev stm;
+#endif
 };
 
+static unsigned int stm_enabled;
+module_param(stm_enabled, uint, 0600);
+MODULE_PARM_DESC(stm_enabled, "set to 1 to enable stm");
+
 /*
  * This protects access to ia_app, ia_os, and ia_modem,
  * which keeps track of channels allocated in
@@ -76,11 +141,6 @@
  */
 static DEFINE_MUTEX(alloclock);
 
-static const struct pci_device_id pci_ids[] = {
-		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x82B)},
-		{0}
-};
-
 static struct tty_driver *pti_tty_driver;
 static struct pti_dev *drv_data;
 
@@ -801,7 +861,6 @@
 {
 	unsigned int a;
 	int retval = -EINVAL;
-	int pci_bar = 1;
 
 	dev_dbg(&pdev->dev, "%s %s(%d): PTI PCI ID %04x:%04x\n", __FILE__,
 			__func__, __LINE__, pdev->vendor, pdev->device);
@@ -831,9 +890,13 @@
 			__func__, __LINE__);
 		goto err_disable_pci;
 	}
-	drv_data->pti_addr = pci_resource_start(pdev, pci_bar);
 
-	retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+	drv_data->pti_dev_info = (struct pti_device_info *)ent->driver_data;
+
+	drv_data->pti_addr = pci_resource_start(pdev, GET_PCI_BAR(drv_data));
+
+	retval = pci_request_region(pdev, GET_PCI_BAR(drv_data),
+				    dev_name(&pdev->dev));
 	if (retval != 0) {
 		dev_err(&pdev->dev,
 			"%s(%d): pci_request_region() returned error %d\n",
@@ -849,6 +912,14 @@
 		goto err_rel_reg;
 	}
 
+#ifdef CONFIG_INTEL_PTI_STM
+	/* Initialize STM resources */
+	if ((stm_enabled) && (stm_dev_init(pdev, &drv_data->stm) != 0)) {
+		retval = -ENOMEM;
+		goto err_rel_reg;
+	}
+#endif
+
 	pci_set_drvdata(pdev, drv_data);
 
 	for (a = 0; a < PTITTY_MINOR_NUM; a++) {
@@ -863,7 +934,7 @@
 
 	return 0;
 err_rel_reg:
-	pci_release_region(pdev, pci_bar);
+	pci_release_region(pdev, GET_PCI_BAR(drv_data));
 err_free_dd:
 	kfree(drv_data);
 err_disable_pci:
@@ -891,10 +962,14 @@
 		tty_port_destroy(&drv_data->port[a]);
 	}
 
+#ifdef CONFIG_INTEL_PTI_STM
+	if (stm_enabled)
+		stm_dev_clean(pdev, &drv_data->stm);
+#endif
 	iounmap(drv_data->pti_ioaddr);
+	pci_release_region(pdev, GET_PCI_BAR(drv_data));
 	pci_set_drvdata(pdev, NULL);
 	kfree(drv_data);
-	pci_release_region(pdev, 1);
 	pci_disable_device(pdev);
 
 	misc_deregister(&pti_char_driver);
diff --git a/drivers/misc/stm.c b/drivers/misc/stm.c
new file mode 100644
index 0000000..be96b32
--- /dev/null
+++ b/drivers/misc/stm.c
@@ -0,0 +1,470 @@
+/*
+ *  stm.c - MIPI STM Debug Unit driver
+ *
+ *  Copyright (C) Intel 2013
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The STM (Sytem Trace Macro) Unit driver configure trace output
+ * to the Intel Tangier PTI port and DWC3 USB xHCI controller
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard and USB Debug-Class
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sdm.h>
+
+#include "stm.h"
+#include <asm/intel_soc_debug.h>
+#include "../usb/dwc3/core.h"
+
+/* STM Registers */
+#define STM_CTRL		0x0000
+#define STM_USB3DBGGTHR		0x0008
+#define STM_MASMSK		0x0010
+#define STM_CHMSK		0x0080
+#define STM_AGTBAR0		0x00C0
+#define STM_AGTBAR1		0x0140
+#define STM_AGTBAR2		0x01C0
+#define STM_AGTBAR3		0x0240
+#define STM_AGTBAR4		0x02C0
+#define STM_AGTBAR5		0x0340
+#define STM_AGTBAR6		0x03C0
+#define STM_AGTBAR7		0x0440
+#define STM_AGTBAR8		0x04C0
+#define STM_AGTBAR9		0x0540
+#define STM_AGTBAR10		0x05C0
+#define STM_AGTBAR11		0x0640
+
+/*
+ * STM registers
+ */
+#define STM_REG_BASE		0x0        /* registers base offset */
+#define STM_REG_LEN		0x20       /* address length */
+/*
+ * TRB buffers
+ */
+#define STM_TRB_BASE		0x400      /* TRB base offset */
+#define STM_TRB_LEN		0x100	   /* address length */
+#define STM_TRB_NUM		16         /* number of TRBs */
+
+/*
+ * This protects R/W to stm registers
+ */
+static DEFINE_MUTEX(stmlock);
+
+static struct stm_dev *_dev_stm;
+
+static inline u32 stm_readl(void __iomem *base, u32 offset)
+{
+	return readl(base + offset);
+}
+
+static inline void stm_writel(void __iomem *base, u32 offset, u32 value)
+{
+	writel(value, base + offset);
+}
+
+/**
+ * stm_kernel_set_out()-
+ * Kernel API function used to
+ * set STM output configuration to PTI or USB.
+ *
+ * @bus_type:
+ *	0 = PTI 4-bits legacy end user
+ *	1 = PTI 4-bits NiDnT
+ *	2 = PTI 16-bits
+ *	3 = PTI 12-bits
+ *	4 = PTI 8-bits
+ *	15 = USB Debug-Class (DvC.Trace)
+ *
+ */
+int stm_kernel_set_out(int bus_type)
+{
+
+	struct stm_dev *drv_stm = _dev_stm;
+
+	/*
+	 * since this function is exported, this is treated like an
+	 * API function, thus, all parameters should
+	 * be checked for validity.
+	 */
+	if (drv_stm == NULL)
+		return 0;
+
+	mutex_lock(&stmlock);
+
+	drv_stm->stm_ctrl_hwreg.reg_word =
+		stm_readl(drv_stm->stm_ioaddr, (u32)STM_CTRL);
+
+	switch (bus_type) {
+	case STM_PTI_4BIT_LEGACY:
+	case STM_PTI_4BIT_NIDNT:
+	case STM_PTI_16BIT:
+	case STM_PTI_12BIT:
+	case STM_PTI_8BIT:
+		drv_stm->stm_ctrl_hwreg.pti_out_en = true;
+		drv_stm->stm_ctrl_hwreg.usb_debug_en = false;
+		drv_stm->stm_ctrl_hwreg.pti_out_mode_sel = bus_type;
+		stm_writel(drv_stm->stm_ioaddr, (u32)STM_CTRL,
+			   drv_stm->stm_ctrl_hwreg.reg_word);
+		break;
+	case STM_USB:
+		drv_stm->stm_ctrl_hwreg.pti_out_en = false;
+		drv_stm->stm_ctrl_hwreg.usb_debug_en = true;
+		stm_writel(drv_stm->stm_ioaddr, (u32)STM_CTRL,
+			   drv_stm->stm_ctrl_hwreg.reg_word);
+		break;
+	default:
+		/* N/A */
+		break;
+	}
+	mutex_unlock(&stmlock);
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(stm_kernel_set_out);
+
+/**
+ * stm_kernel_get_out()-
+ * Kernel API function used to get
+ * STM output cofiguration PTI or USB.
+ *
+ */
+int stm_kernel_get_out(void)
+{
+	struct stm_dev *drv_stm = _dev_stm;
+	int ret = -EOPNOTSUPP;
+
+	if (drv_stm == NULL)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&stmlock);
+
+	drv_stm->stm_ctrl_hwreg.reg_word =
+		stm_readl(drv_stm->stm_ioaddr, (u32)STM_CTRL);
+
+	if (!drv_stm->stm_ctrl_hwreg.usb_debug_en) {
+		if (drv_stm->stm_ctrl_hwreg.pti_out_en)
+			ret = (int)drv_stm->stm_ctrl_hwreg.pti_out_mode_sel;
+	} else {
+		ret = (int)STM_USB;
+	}
+	mutex_unlock(&stmlock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(stm_kernel_get_out);
+
+/**
+ * stm_set_out() - 'out' parameter set function from 'STM' module
+ *
+ * called when writing to 'out' parameter from 'STM' module in sysfs
+ */
+static int stm_set_out(const char *val, struct kernel_param *kp)
+{
+	int bus_type_value;
+	int ret = -EINVAL;
+
+	if (sscanf(val, "%2d", &bus_type_value) != 1)
+		return ret;
+
+	return stm_kernel_set_out(bus_type_value);
+}
+
+/**
+ * stm_get_out() - 'out' parameter get function from 'STM' module
+ *
+ * called when reading 'out' parameter from 'STM' module in sysfs
+ */
+static int stm_get_out(char *buffer, struct kernel_param *kp)
+{
+	int i;
+
+	i = stm_kernel_get_out();
+	if (i == -EOPNOTSUPP) {
+		buffer[0] = '\0';
+		return 0;
+	}
+
+	return sprintf(buffer, "%2d", i);
+}
+
+/**
+ * stm_init() - initialize stmsub3dbgthr register
+ *
+ * @return - 0 on Success
+ */
+static int stm_init(void)
+{
+	struct stm_dev *stm = _dev_stm;
+	struct stm_usb3_ctrl *usb3dbg;
+
+	if (!stm)
+		return -ENODEV;
+
+	usb3dbg = &stm->stm_usb3_hwreg;
+	usb3dbg->reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_USB3DBGGTHR);
+
+	usb3dbg->reg_word = 0xFF;
+
+	stm_writel(stm->stm_ioaddr, (u32)STM_USB3DBGGTHR, usb3dbg->reg_word);
+
+	return 0;
+}
+
+/**
+ * stm_alloc_static_trb_pool() - set stm trb pool dma_addr and return
+ * trb_pool
+ *
+ * @dma_addr - trb pool dma physical address to set
+ * @return - trb pool address ioremaped pointer
+ */
+static void *stm_alloc_static_trb_pool(dma_addr_t *dma_addr)
+{
+	struct stm_dev *stm = _dev_stm;
+	if (!stm)
+		return NULL;
+
+	*dma_addr = stm->stm_trb_base;
+	return stm->trb_ioaddr;
+}
+
+static void ebc_io_free_static_trb_pool(void)
+{
+	/* Nothing to do, HW TRB */
+}
+
+static int stm_xfer_start(void)
+{
+	struct stm_dev *stm = _dev_stm;
+	struct stm_ctrl *stm_ctrl;
+
+	if (!stm)
+		return -ENODEV;
+
+	stm_ctrl = &stm->stm_ctrl_hwreg;
+	stm_ctrl->reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_CTRL);
+
+	stm_ctrl->usb_debug_en = true;
+	stm_ctrl->pti_out_en = false;
+
+	stm_writel(stm->stm_ioaddr, (u32)STM_CTRL, stm_ctrl->reg_word);
+	pr_info("%s\n switch STM output to DvC.Trace ", __func__);
+
+	return 0;
+}
+
+static int stm_xfer_stop(void)
+{
+	struct stm_dev *stm = _dev_stm;
+	struct stm_ctrl *stm_ctrl;
+
+	if (!stm)
+		return -ENODEV;
+
+	stm_ctrl = &stm->stm_ctrl_hwreg;
+	stm_ctrl->reg_word = stm_readl(stm->stm_ioaddr, (u32)STM_CTRL);
+
+	stm_ctrl->usb_debug_en = false;
+	stm_ctrl->pti_out_en = true;
+
+	stm_writel(stm->stm_ioaddr, (u32)STM_CTRL, stm_ctrl->reg_word);
+	pr_info("%s\n switch STM to 4bits MIPI PTI (default)", __func__);
+
+	return 0;
+}
+
+static struct ebc_io stm_ebc_io_ops = {
+	.name = "stmbuf4kB",
+	.epname = "ep1in",
+	.epnum = 3,
+	.is_ondemand = 1,
+	.static_trb_pool_size = 4,
+	.init = stm_init,
+	.alloc_static_trb_pool = stm_alloc_static_trb_pool,
+	.free_static_trb_pool = ebc_io_free_static_trb_pool,
+	.xfer_start = stm_xfer_start,
+	.xfer_stop = stm_xfer_stop,
+};
+
+#define EXI_IN_TRB_POOL_OFFSET (4*16)
+static void *exi_inbound_alloc_static_trb_pool(dma_addr_t *dma_addr)
+{
+	struct stm_dev *stm = _dev_stm;
+	if (!stm)
+		return NULL;
+
+	*dma_addr = stm->stm_trb_base + EXI_IN_TRB_POOL_OFFSET;
+	return stm->trb_ioaddr + EXI_IN_TRB_POOL_OFFSET;
+}
+
+static struct ebc_io exi_in_ebc_io_ops = {
+	.name = "exi-inbound",
+	.epname = "ep8in",
+	.epnum = 17,
+	.is_ondemand = 0,
+	.static_trb_pool_size = 4,
+	.alloc_static_trb_pool = exi_inbound_alloc_static_trb_pool,
+	.free_static_trb_pool = ebc_io_free_static_trb_pool,
+};
+
+#define EXI_OUT_TRB_POOL_OFFSET (8*16)
+static void *exi_outbound_alloc_static_trb_pool(dma_addr_t *dma_addr)
+{
+	struct stm_dev *stm = _dev_stm;
+	if (!stm)
+		return NULL;
+
+	*dma_addr = stm->stm_trb_base + EXI_OUT_TRB_POOL_OFFSET;
+	return stm->trb_ioaddr + EXI_OUT_TRB_POOL_OFFSET;
+}
+
+static struct ebc_io exi_out_ebc_io_ops = {
+	.name = "exi-outbound",
+	.epname = "ep8out",
+	.epnum = 16,
+	.is_ondemand = 0,
+	.static_trb_pool_size = 2,
+	.alloc_static_trb_pool = exi_outbound_alloc_static_trb_pool,
+	.free_static_trb_pool = ebc_io_free_static_trb_pool,
+};
+
+int stm_is_enabled()
+{
+	return (_dev_stm != NULL);
+}
+EXPORT_SYMBOL_GPL(stm_is_enabled);
+
+/**
+ * stm_dev_init()- Used to setup STM resources on the pci bus.
+ *
+ * @pdev- pci_dev struct values for pti device.
+ * @stm- stm_dev struct managing stm resources
+ *
+ * Returns:
+ *	0 for success
+ *	otherwise, error
+ */
+int stm_dev_init(struct pci_dev *pdev,
+		 struct stm_dev *stm)
+{
+	int retval = 0;
+	int pci_bar = 0;
+
+	if (!cpu_has_debug_feature(DEBUG_FEATURE_PTI))
+		return -ENODEV;
+
+	dev_dbg(&pdev->dev, "%s %s(%d): STM PCI ID %04x:%04x\n", __FILE__,
+		__func__, __LINE__, pdev->vendor, pdev->device);
+
+	stm->stm_addr = pci_resource_start(pdev, pci_bar);
+
+	retval = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+	if (retval != 0) {
+		dev_err(&pdev->dev,
+			"%s(%d): pci_request_region() returned error %d\n",
+			__func__, __LINE__, retval);
+		return retval;
+	}
+	pr_info("stm add %lx\n", stm->stm_addr);
+
+	stm->stm_reg_base = stm->stm_addr+STM_REG_BASE;
+	stm->stm_ioaddr = ioremap_nocache((u32)stm->stm_reg_base,
+					  STM_REG_LEN);
+	if (!stm->stm_ioaddr) {
+		retval = -ENOMEM;
+		goto out_release_region;
+	}
+
+	stm->stm_trb_base = stm->stm_addr+STM_TRB_BASE;
+	stm->trb_ioaddr = ioremap_nocache((u32)stm->stm_trb_base,
+					  STM_TRB_LEN);
+	if (!stm->trb_ioaddr) {
+		retval = -ENOMEM;
+		goto out_iounmap_stm_ioaddr;
+	}
+
+	stm->stm_ctrl_hwreg.reg_word = stm_readl(stm->stm_ioaddr,
+						 (u32)STM_CTRL);
+	stm->stm_usb3_hwreg.reg_word = stm_readl(stm->stm_ioaddr,
+						 (u32)STM_USB3DBGGTHR);
+
+	_dev_stm = stm;
+
+	dwc3_register_io_ebc(&stm_ebc_io_ops);
+	dwc3_register_io_ebc(&exi_in_ebc_io_ops);
+	dwc3_register_io_ebc(&exi_out_ebc_io_ops);
+
+	pr_info("successfully registered ebc io ops\n");
+
+	return retval;
+
+out_iounmap_stm_ioaddr:
+	pci_iounmap(pdev, stm->stm_ioaddr);
+
+out_release_region:
+	pci_release_region(pdev, pci_bar);
+
+	_dev_stm = NULL;
+	return retval;
+
+}
+EXPORT_SYMBOL_GPL(stm_dev_init);
+
+/**
+ * stm_dev_clean()- Driver exit method to free STM resources from
+ *		   PCI bus.
+ * @pdev: variable containing pci info of STM.
+ * @dev_stm: stm_dev resources to clean.
+ */
+void stm_dev_clean(struct pci_dev *pdev,
+		   struct stm_dev *dev_stm)
+{
+	int pci_bar = 0;
+
+	/* If STM driver was not initialized properly,
+	 * there is nothing to do.
+	 */
+	if (_dev_stm == NULL)
+		return;
+
+	dwc3_unregister_io_ebc(&stm_ebc_io_ops);
+	dwc3_unregister_io_ebc(&exi_in_ebc_io_ops);
+	dwc3_unregister_io_ebc(&exi_out_ebc_io_ops);
+
+	if (dev_stm != NULL) {
+		pci_iounmap(pdev, dev_stm->stm_ioaddr);
+		pci_iounmap(pdev, dev_stm->trb_ioaddr);
+	}
+
+	pci_release_region(pdev, pci_bar);
+
+	_dev_stm = NULL;
+}
+EXPORT_SYMBOL_GPL(stm_dev_clean);
+
+module_param_call(stm_out, stm_set_out, stm_get_out, NULL, 0644);
+MODULE_PARM_DESC(stm_out, "configure System Trace Macro output");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florent Pirou");
+MODULE_DESCRIPTION("STM Driver");
diff --git a/drivers/misc/stm.h b/drivers/misc/stm.h
new file mode 100644
index 0000000..1fb2d2e
--- /dev/null
+++ b/drivers/misc/stm.h
@@ -0,0 +1,114 @@
+/*
+ * stm.h
+ *
+ *  Copyright (C) Intel 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The STM (Sytem Trace Macro) Unit driver configure trace output
+ * to the Intel Tangier PTI port and DWC3 USB xHCI controller
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard and USB Debug-Class
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#ifndef _STM_H
+#define _STM_H
+
+#include <linux/pci.h>
+
+/* STM_CTRL register bitmap */
+/**
+ * struct stm_ctrl - STM control block
+ * @usb_debug_en : STM needs to redirece the trace packet to the USB3
+ * @pti_io_idle_threshold : threshold for disabling the IO clock.
+ * @pkt_transfer_size : asserts the *buff_avail signal after it has
+ * 1 or 2 KB of data in buffer
+ * @dis_dcu7_use : disables the useage of DCU7 instead of PTI_Disable
+ * @en_sw_ms : enables software master usage
+ * @mst_id_en : enables the PTI unit to suppress sending the Master Command
+ * @d64_cmd_en : PTI unit to use the D64 commands
+ * @pti_out_mode_sel
+ *	0 = PTI 4-bits legacy end user
+ *	1 = PTI 4-bits NiDnT
+ *	2 = PTI 16-bits
+ *	3 = PTI 12-bits
+ *	4 = PTI 8-bits
+ * @pti_out_en : PTI output enable muxselects that propagate
+ * to the FLIS to be enabled
+ * @lossy_mode_enable : Output Agent will continue to accept writes,
+ * even if the queuese are full. The data will be dropped and the
+ * dropped packet indicator will be incremented
+ * @time_stamp_enable : Enable time stamping the final packet in trace record.
+ */
+struct stm_ctrl {
+	union {
+		struct {
+			u32             time_stamp_enable:1;
+			u32             lossy_mode_enable:1;
+			u32             pti_out_en:1;
+			u32             reserved:1;
+			u32             pti_out_mode_sel:4;
+			u32             d64_cmd_en:1;
+			u32             mst_id_en:1;
+			u32             en_sw_ms:1;
+			u32             dis_dcu7_use:1;
+			u32             pkt_transfer_size:1;
+			u32             pti_io_idle_threshold:5;
+			u32             usb_debug_en:1;
+			u32             reserved31_19:13;
+		};
+		u32 reg_word;
+	};
+} __packed;
+
+/**
+ * struct stm_usb3_ctrl - STM buffer USB3 hardware EBC
+ * @region_closure_threshold : This is the threshold for closing
+ * the 1KB region in the debug trace buffer. STM will wait for the
+ * configured time as specified in this field and then closes the region.
+ * The unit of this field is in 64 us. Eg when this field value is set
+ * to 0xffff, then it indicates 2 ms
+ * @empty_packets_threshold : When STM does not have data to send,
+ * it can send empty packets to keep the USB3 alive. This is useful
+ * in case of ISOC traffic, because in this mode the wake up latency
+ * is high. STM will send the configured number of empty packets as
+ * specified in this field.
+ */
+struct stm_usb3_ctrl {
+	union {
+		struct {
+			u32             region_closure_threshold:15;
+			u32             empty_packets_threshold:6;
+			u32             reserved31_21:11;
+		};
+		u32 reg_word;
+	};
+} __packed;
+
+struct stm_dev {
+	unsigned long stm_addr;
+	unsigned long stm_reg_base;
+	unsigned long stm_trb_base;
+	void __iomem *stm_ioaddr;
+	void __iomem *trb_ioaddr;
+	struct stm_ctrl stm_ctrl_hwreg;
+	struct stm_usb3_ctrl stm_usb3_hwreg;
+};
+
+int stm_dev_init(struct pci_dev *pdev, struct stm_dev *dev_stm);
+void stm_dev_clean(struct pci_dev *pdev, struct stm_dev *dev_stm);
+
+#endif /* _STM_H */
diff --git a/drivers/misc/uid_cputime.c b/drivers/misc/uid_cputime.c
new file mode 100644
index 0000000..43298a4
--- /dev/null
+++ b/drivers/misc/uid_cputime.c
@@ -0,0 +1,253 @@
+/* drivers/misc/uid_cputime.c
+ *
+ * Copyright (C) 2014 - 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#define UID_HASH_BITS	10
+DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
+
+static DEFINE_MUTEX(uid_lock);
+static struct proc_dir_entry *parent;
+
+struct uid_entry {
+	uid_t uid;
+	cputime_t utime;
+	cputime_t stime;
+	cputime_t active_utime;
+	cputime_t active_stime;
+	unsigned long long active_power;
+	unsigned long long power;
+	struct hlist_node hash;
+};
+
+static struct uid_entry *find_uid_entry(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+	hash_for_each_possible(hash_table, uid_entry, hash, uid) {
+		if (uid_entry->uid == uid)
+			return uid_entry;
+	}
+	return NULL;
+}
+
+static struct uid_entry *find_or_register_uid(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+
+	uid_entry = find_uid_entry(uid);
+	if (uid_entry)
+		return uid_entry;
+
+	uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
+	if (!uid_entry)
+		return NULL;
+
+	uid_entry->uid = uid;
+
+	hash_add(hash_table, &uid_entry->hash, uid);
+
+	return uid_entry;
+}
+
+static int uid_stat_show(struct seq_file *m, void *v)
+{
+	struct uid_entry *uid_entry;
+	struct task_struct *task, *temp;
+	cputime_t utime;
+	cputime_t stime;
+	unsigned long bkt;
+
+	mutex_lock(&uid_lock);
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		uid_entry->active_stime = 0;
+		uid_entry->active_utime = 0;
+		uid_entry->active_power = 0;
+	}
+
+	read_lock(&tasklist_lock);
+	do_each_thread(temp, task) {
+		uid_entry = find_or_register_uid(from_kuid_munged(
+			current_user_ns(), task_uid(task)));
+		if (!uid_entry) {
+			read_unlock(&tasklist_lock);
+			mutex_unlock(&uid_lock);
+			pr_err("%s: failed to find the uid_entry for uid %d\n",
+				__func__, from_kuid_munged(current_user_ns(),
+				task_uid(task)));
+			return -ENOMEM;
+		}
+		/* if this task is exiting, we have already accounted for the
+		 * time and power.
+		 */
+		if (task->cpu_power == ULLONG_MAX)
+			continue;
+		task_cputime_adjusted(task, &utime, &stime);
+		uid_entry->active_utime += utime;
+		uid_entry->active_stime += stime;
+		uid_entry->active_power += task->cpu_power;
+	} while_each_thread(temp, task);
+	read_unlock(&tasklist_lock);
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		cputime_t total_utime = uid_entry->utime +
+							uid_entry->active_utime;
+		cputime_t total_stime = uid_entry->stime +
+							uid_entry->active_stime;
+		unsigned long long total_power = uid_entry->power +
+							uid_entry->active_power;
+		seq_printf(m, "%d: %llu %llu %llu\n", uid_entry->uid,
+			(unsigned long long)jiffies_to_msecs(
+				cputime_to_jiffies(total_utime)) * USEC_PER_MSEC,
+			(unsigned long long)jiffies_to_msecs(
+				cputime_to_jiffies(total_stime)) * USEC_PER_MSEC,
+			total_power);
+	}
+
+	mutex_unlock(&uid_lock);
+	return 0;
+}
+
+static int uid_stat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, uid_stat_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_stat_fops = {
+	.open		= uid_stat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int uid_remove_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_remove_write(struct file *file,
+			const char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct uid_entry *uid_entry;
+	struct hlist_node *tmp;
+	char uids[128];
+	char *start_uid, *end_uid = NULL;
+	long int uid_start = 0, uid_end = 0;
+
+	if (count >= sizeof(uids))
+		count = sizeof(uids) - 1;
+
+	if (copy_from_user(uids, buffer, count))
+		return -EFAULT;
+
+	uids[count] = '\0';
+	end_uid = uids;
+	start_uid = strsep(&end_uid, "-");
+
+	if (!start_uid || !end_uid)
+		return -EINVAL;
+
+	if (kstrtol(start_uid, 10, &uid_start) != 0 ||
+		kstrtol(end_uid, 10, &uid_end) != 0) {
+		return -EINVAL;
+	}
+
+	mutex_lock(&uid_lock);
+
+	for (; uid_start <= uid_end; uid_start++) {
+		hash_for_each_possible_safe(hash_table, uid_entry, tmp,
+							hash, uid_start) {
+			hash_del(&uid_entry->hash);
+			kfree(uid_entry);
+		}
+	}
+
+	mutex_unlock(&uid_lock);
+	return count;
+}
+
+static const struct file_operations uid_remove_fops = {
+	.open		= uid_remove_open,
+	.release	= single_release,
+	.write		= uid_remove_write,
+};
+
+static int process_notifier(struct notifier_block *self,
+			unsigned long cmd, void *v)
+{
+	struct task_struct *task = v;
+	struct uid_entry *uid_entry;
+	cputime_t utime, stime;
+	uid_t uid;
+
+	if (!task)
+		return NOTIFY_OK;
+
+	mutex_lock(&uid_lock);
+	uid = from_kuid_munged(current_user_ns(), task_uid(task));
+	uid_entry = find_or_register_uid(uid);
+	if (!uid_entry) {
+		pr_err("%s: failed to find uid %d\n", __func__, uid);
+		goto exit;
+	}
+
+	task_cputime_adjusted(task, &utime, &stime);
+	uid_entry->utime += utime;
+	uid_entry->stime += stime;
+	uid_entry->power += task->cpu_power;
+	task->cpu_power = ULLONG_MAX;
+
+exit:
+	mutex_unlock(&uid_lock);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block process_notifier_block = {
+	.notifier_call	= process_notifier,
+};
+
+static int __init proc_uid_cputime_init(void)
+{
+	hash_init(hash_table);
+
+	parent = proc_mkdir("uid_cputime", NULL);
+	if (!parent) {
+		pr_err("%s: failed to create proc entry\n", __func__);
+		return -ENOMEM;
+	}
+
+	proc_create_data("remove_uid_range", S_IWUGO, parent, &uid_remove_fops,
+					NULL);
+
+	proc_create_data("show_uid_stat", S_IRUGO, parent, &uid_stat_fops,
+					NULL);
+
+	profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+
+	return 0;
+}
+
+early_initcall(proc_uid_cputime_init);
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644
index 0000000..4766c1f
--- /dev/null
+++ b/drivers/misc/uid_stat.c
@@ -0,0 +1,152 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+	struct list_head link;
+	uid_t uid;
+	atomic_t tcp_rcv;
+	atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+	struct uid_stat *entry;
+
+	list_for_each_entry(entry, &uid_list, link) {
+		if (entry->uid == uid) {
+			return entry;
+		}
+	}
+	return NULL;
+}
+
+static int uid_stat_atomic_int_show(struct seq_file *m, void *v)
+{
+	unsigned int bytes;
+	atomic_t *counter = m->private;
+
+	bytes = (unsigned int) (atomic_read(counter) + INT_MIN);
+	return seq_printf(m, "%u\n", bytes);
+}
+
+static int uid_stat_read_atomic_int_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, uid_stat_atomic_int_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_stat_read_atomic_int_fops = {
+	.open		= uid_stat_read_atomic_int_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+	struct uid_stat *new_uid;
+	/* Create the uid stat struct and append it to the list. */
+	new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC);
+	if (!new_uid)
+		return NULL;
+
+	new_uid->uid = uid;
+	/* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+	atomic_set(&new_uid->tcp_rcv, INT_MIN);
+	atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+	list_add_tail(&new_uid->link, &uid_list);
+	return new_uid;
+}
+
+static void create_stat_proc(struct uid_stat *new_uid)
+{
+	char uid_s[32];
+	struct proc_dir_entry *entry;
+	sprintf(uid_s, "%d", new_uid->uid);
+	entry = proc_mkdir(uid_s, parent);
+
+	/* Keep reference to uid_stat so we know what uid to read stats from. */
+	proc_create_data("tcp_snd", S_IRUGO, entry,
+			 &uid_stat_read_atomic_int_fops, &new_uid->tcp_snd);
+
+	proc_create_data("tcp_rcv", S_IRUGO, entry,
+			 &uid_stat_read_atomic_int_fops, &new_uid->tcp_rcv);
+}
+
+static struct uid_stat *find_or_create_uid_stat(uid_t uid)
+{
+	struct uid_stat *entry;
+	unsigned long flags;
+	spin_lock_irqsave(&uid_lock, flags);
+	entry = find_uid_stat(uid);
+	if (entry) {
+		spin_unlock_irqrestore(&uid_lock, flags);
+		return entry;
+	}
+	entry = create_stat(uid);
+	spin_unlock_irqrestore(&uid_lock, flags);
+	if (entry)
+		create_stat_proc(entry);
+	return entry;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+	struct uid_stat *entry;
+	activity_stats_update();
+	entry = find_or_create_uid_stat(uid);
+	if (!entry)
+		return -1;
+	atomic_add(size, &entry->tcp_snd);
+	return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+	struct uid_stat *entry;
+	activity_stats_update();
+	entry = find_or_create_uid_stat(uid);
+	if (!entry)
+		return -1;
+	atomic_add(size, &entry->tcp_rcv);
+	return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+	parent = proc_mkdir("uid_stat", NULL);
+	if (!parent) {
+		pr_err("uid_stat: failed to create proc entry\n");
+		return -1;
+	}
+	return 0;
+}
+
+__initcall(uid_stat_init);
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 5562308..79d8212 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,15 @@
 
 	  If unsure, say Y here.
 
+config MMC_BLOCK_DEFERRED_RESUME
+	bool "Deferr MMC layer resume until I/O is requested"
+	depends on MMC_BLOCK
+	default n
+	help
+	  Say Y here to enable deferred MMC resume until I/O
+	  is requested. This will reduce overall resume latency and
+	  save power when theres an SD card inserted but not being used.
+
 config SDIO_UART
 	tristate "SDIO UART/GPS class support"
 	depends on TTY
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index ce34c49..5b9aff2 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -35,6 +35,9 @@
 #include <linux/capability.h>
 #include <linux/compat.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
 #include <linux/mmc/ioctl.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -95,6 +98,7 @@
 #define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
 #define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
 #define MMC_BLK_PACKED_CMD	(1 << 2)	/* MMC packed command support */
+#define MMC_BLK_SUSPENDED	(1 << 3)	/* MMC block device suspended */
 
 	unsigned int	usage;
 	unsigned int	read_only;
@@ -105,7 +109,8 @@
 #define MMC_BLK_WRITE		BIT(1)
 #define MMC_BLK_DISCARD		BIT(2)
 #define MMC_BLK_SECDISCARD	BIT(3)
-
+#define MMC_BLK_RPMB		BIT(4)
+#define MMC_BLK_USER		BIT(5)
 	/*
 	 * Only set in main mmc_blk_data associated
 	 * with mmc_card with mmc_set_drvdata, and keeps
@@ -145,6 +150,9 @@
 	packed->blocks = 0;
 }
 
+static int mmc_rpmb_req_process(struct mmc_blk_data *,
+		struct mmc_ioc_rpmb_req *);
+
 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 {
 	struct mmc_blk_data *md;
@@ -162,11 +170,7 @@
 
 static inline int mmc_get_devidx(struct gendisk *disk)
 {
-	int devmaj = MAJOR(disk_devt(disk));
-	int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
-	if (!devmaj)
-		devidx = disk->first_minor / perdev_minors;
+	int devidx = disk->first_minor / perdev_minors;
 	return devidx;
 }
 
@@ -201,8 +205,6 @@
 
 	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
 
-	mmc_blk_put(md);
-
 	return ret;
 }
 
@@ -258,7 +260,7 @@
 	int ret;
 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 
-	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+	ret = snprintf(buf, PAGE_SIZE, "%d",
 		       get_disk_ro(dev_to_disk(dev)) ^
 		       md->read_only);
 	mmc_blk_put(md);
@@ -569,12 +571,53 @@
 	return err;
 }
 
+static int mmc_blk_ioctl_rpmb_req(struct block_device *bdev,
+		struct mmc_ioc_rpmb_req __user *ptr)
+{
+	struct mmc_ioc_rpmb_req req;
+	struct mmc_blk_data *md = NULL;
+	int err = 0;
+
+	/* The caller must have CAP_SYS_RAWIO */
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	memset(&req, 0, sizeof(req));
+
+	if (copy_from_user(&req, ptr, sizeof(req)))
+		return -EFAULT;
+
+	md = mmc_blk_get(bdev->bd_disk);
+	if (!md) {
+		pr_err("%s: NO eMMC block data. Try it later\n",
+				__func__);
+		return -ENODEV;
+	}
+	/* handle RPMB request event */
+	err = mmc_rpmb_req_process(md, &req);
+	if (err) {
+		mmc_blk_put(md);
+		return err;
+	}
+	/*
+	 * feedback to user space
+	 */
+	if (copy_to_user(ptr, &req, sizeof(req)))
+		return -EFAULT;
+
+	mmc_blk_put(md);
+	return 0;
+}
+
 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
 	unsigned int cmd, unsigned long arg)
 {
 	int ret = -EINVAL;
 	if (cmd == MMC_IOC_CMD)
 		ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+	else if (cmd == MMC_IOC_RPMB_REQ)
+		ret = mmc_blk_ioctl_rpmb_req(bdev,
+				(struct mmc_ioc_rpmb_req __user *)arg);
 	return ret;
 }
 
@@ -729,18 +772,22 @@
 			req->rq_disk->disk_name, "timed out", name, status);
 
 		/* If the status cmd initially failed, retry the r/w cmd */
-		if (!status_valid)
+		if (!status_valid) {
+			pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
 			return ERR_RETRY;
-
+		}
 		/*
 		 * If it was a r/w cmd crc error, or illegal command
 		 * (eg, issued in wrong state) then retry - we should
 		 * have corrected the state problem above.
 		 */
-		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+			pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
 			return ERR_RETRY;
+		}
 
 		/* Otherwise abort the command */
+		pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
 		return ERR_ABORT;
 
 	default:
@@ -909,6 +956,72 @@
 	md->reset_done &= ~type;
 }
 
+static int mmc_rpmb_req_process(struct mmc_blk_data *md,
+		struct mmc_ioc_rpmb_req *req)
+{
+	struct mmc_core_rpmb_req rpmb_req;
+	struct mmc_card *card = NULL;
+	int ret;
+
+	if (!md || !req)
+		return -EINVAL;
+
+	if (!(md->flags & MMC_BLK_CMD23) ||
+			(md->part_type != EXT_CSD_PART_CONFIG_ACC_RPMB))
+		return -EOPNOTSUPP;
+
+	card = md->queue.card;
+	if (!card || !mmc_card_mmc(card) || !card->ext_csd.rpmb_size)
+		return -ENODEV;
+
+	memset(&rpmb_req, 0, sizeof(struct mmc_core_rpmb_req));
+	rpmb_req.req = req;
+	/* check request */
+	ret = mmc_rpmb_pre_frame(&rpmb_req, card);
+	if (ret) {
+		pr_err("%s: prepare frame failed\n", mmc_hostname(card->host));
+		return ret;
+	}
+
+	mmc_claim_host(card->host);
+
+	if (md->flags & MMC_BLK_SUSPENDED) {
+		pr_warn("%s: MMC block device is already suspended\n",
+				mmc_hostname(card->host));
+		ret = -EPERM;
+		goto out;
+	}
+	/*
+	 * before start, let's change to RPMB partition first
+	 */
+	ret = mmc_blk_part_switch(card, md);
+	if (ret) {
+		pr_err("%s: Invalid RPMB partition switch (%d)!\n",
+				mmc_hostname(card->host), ret);
+		/*
+		 * In case partition is not in user data area, make
+		 * a force partition switch.
+		 * we need reset eMMC card at here
+		 */
+		ret = mmc_blk_reset(md, card->host, MMC_BLK_RPMB);
+		if (!ret)
+			mmc_blk_reset_success(md, MMC_BLK_RPMB);
+		else
+			pr_err("%s: eMMC card reset failed (%d)\n",
+					mmc_hostname(card->host), ret);
+		goto out;
+	}
+
+	ret = mmc_rpmb_partition_ops(&rpmb_req, card);
+	if (ret)
+		pr_err("%s: failed (%d) to handle RPMB request type (%d)!\n",
+				mmc_hostname(card->host), ret, req->type);
+out:
+	mmc_release_host(card->host);
+	mmc_rpmb_post_frame(&rpmb_req);
+	return ret;
+}
+
 int mmc_access_rpmb(struct mmc_queue *mq)
 {
 	struct mmc_blk_data *md = mq->data;
@@ -920,6 +1033,36 @@
 
 	return false;
 }
+EXPORT_SYMBOL_GPL(mmc_access_rpmb);
+
+int mmc_rpmb_req_handle(struct device *emmc, struct mmc_ioc_rpmb_req *req)
+{
+	int ret = 0;
+	struct gendisk *disk    = NULL;
+	struct mmc_blk_data *md = NULL;
+
+	if (!emmc || !req)
+		return -EINVAL;
+
+	disk = dev_to_disk(emmc);
+	if (!disk) {
+		pr_err("%s: NO eMMC disk found. Try it later\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	md = mmc_blk_get(disk);
+	if (!md) {
+		pr_err("%s: NO eMMC block data. Try it later\n",
+				__func__);
+		return -ENODEV;
+	}
+	ret = mmc_rpmb_req_process(md, req);
+	mmc_blk_put(md);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_req_handle);
 
 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 {
@@ -1032,9 +1175,12 @@
 			goto out;
 	}
 
-	if (mmc_can_sanitize(card))
+	if (mmc_can_sanitize(card)) {
+		trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_SANITIZE_START, 1, 0);
+		trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
+	}
 out_retry:
 	if (err && !mmc_blk_reset(md, card->host, type))
 		goto retry;
@@ -1830,11 +1976,9 @@
 			break;
 		case MMC_BLK_CMD_ERR:
 			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
-			if (mmc_blk_reset(md, card->host, type))
-				goto cmd_abort;
-			if (!ret)
-				goto start_new_req;
-			break;
+			if (!mmc_blk_reset(md, card->host, type))
+				break;
+			goto cmd_abort;
 		case MMC_BLK_RETRY:
 			if (retry++ < 5)
 				break;
@@ -1942,7 +2086,11 @@
 	struct mmc_card *card = md->queue.card;
 	struct mmc_host *host = card->host;
 	unsigned long flags;
-	unsigned int cmd_flags = req ? req->cmd_flags : 0;
+
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(card->host))
+		mmc_resume_bus(card->host);
+#endif
 
 	if (req && !mq->mqrq_prev->req)
 		/* claim host only for the first request */
@@ -1950,15 +2098,20 @@
 
 	ret = mmc_blk_part_switch(card, md);
 	if (ret) {
-		if (req) {
-			blk_end_request_all(req, -EIO);
+		pr_err("%s: switch part failed. Try to reset eMMC\n",
+				mmc_hostname(card->host));
+		if (mmc_blk_reset(md, card->host, MMC_BLK_USER)) {
+			if (req)
+				blk_end_request_all(req, -EIO);
+			ret = 0;
+			goto out;
 		}
-		ret = 0;
-		goto out;
+		pr_info("%s: Reset eMMC success\n", mmc_hostname(card->host));
+		mmc_blk_reset_success(md, MMC_BLK_USER);
 	}
 
 	mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
-	if (cmd_flags & REQ_DISCARD) {
+	if (req && req->cmd_flags & REQ_DISCARD) {
 		/* complete ongoing async transfer before issuing discard */
 		if (card->host->areq)
 			mmc_blk_issue_rw_rq(mq, NULL);
@@ -1967,7 +2120,7 @@
 			ret = mmc_blk_issue_secdiscard_rq(mq, req);
 		else
 			ret = mmc_blk_issue_discard_rq(mq, req);
-	} else if (cmd_flags & REQ_FLUSH) {
+	} else if (req && req->cmd_flags & REQ_FLUSH) {
 		/* complete ongoing async transfer before issuing flush */
 		if (card->host->areq)
 			mmc_blk_issue_rw_rq(mq, NULL);
@@ -1983,7 +2136,7 @@
 
 out:
 	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
-	     (cmd_flags & MMC_REQ_SPECIAL_MASK))
+	     (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
 		/*
 		 * Release host when there are no more requests
 		 * and after special request(discard, flush) is done.
@@ -2066,6 +2219,7 @@
 	md->disk->queue = md->queue.queue;
 	md->disk->driverfs_dev = parent;
 	set_disk_ro(md->disk, md->read_only || default_ro);
+	md->disk->flags = GENHD_FL_EXT_DEVT;
 	if (area_type & MMC_BLK_DATA_AREA_RPMB)
 		md->disk->flags |= GENHD_FL_NO_PART_SCAN;
 
@@ -2382,6 +2536,9 @@
 	mmc_set_drvdata(card, md);
 	mmc_fixup_device(card, blk_fixups);
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 1);
+#endif
 	if (mmc_add_disk(md))
 		goto out;
 
@@ -2407,14 +2564,15 @@
 	mmc_release_host(card->host);
 	mmc_blk_remove_req(md);
 	mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 0);
+#endif
 }
 
-#ifdef CONFIG_PM
-static int mmc_blk_suspend(struct mmc_card *card)
+static int _mmc_blk_suspend(struct mmc_card *card)
 {
 	struct mmc_blk_data *part_md;
 	struct mmc_blk_data *md = mmc_get_drvdata(card);
-
 	if (md) {
 		mmc_queue_suspend(&md->queue);
 		list_for_each_entry(part_md, &md->part, part) {
@@ -2424,6 +2582,17 @@
 	return 0;
 }
 
+static void mmc_blk_shutdown(struct mmc_card *card)
+{
+	_mmc_blk_suspend(card);
+}
+
+#ifdef CONFIG_PM
+static int mmc_blk_suspend(struct mmc_card *card)
+{
+	return _mmc_blk_suspend(card);
+}
+
 static int mmc_blk_resume(struct mmc_card *card)
 {
 	struct mmc_blk_data *part_md;
@@ -2438,6 +2607,18 @@
 		mmc_queue_resume(&md->queue);
 		list_for_each_entry(part_md, &md->part, part) {
 			mmc_queue_resume(&part_md->queue);
+			if (part_md->part_type ==
+					EXT_CSD_PART_CONFIG_ACC_RPMB) {
+				/*
+				 * RPMB partition is accessed by API directly.
+				 * Driver need to clear MMC_BLK_SUSPENDED flag
+				 * to make sure the next RPMB partition access
+				 * request won't be blocked
+				 */
+				mmc_claim_host(card->host);
+				part_md->flags &= ~MMC_BLK_SUSPENDED;
+				mmc_release_host(card->host);
+			}
 		}
 	}
 	return 0;
@@ -2455,6 +2636,7 @@
 	.remove		= mmc_blk_remove,
 	.suspend	= mmc_blk_suspend,
 	.resume		= mmc_blk_resume,
+	.shutdown	= mmc_blk_shutdown,
 };
 
 static int __init mmc_blk_init(void)
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 759714e..a69df52 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -3025,12 +3025,17 @@
 	mmc_test_free_dbgfs_file(card);
 }
 
+static void mmc_test_shutdown(struct mmc_card *card)
+{
+}
+
 static struct mmc_driver mmc_driver = {
 	.drv		= {
 		.name	= "mmc_test",
 	},
 	.probe		= mmc_test_probe,
 	.remove		= mmc_test_remove,
+	.shutdown	= mmc_test_shutdown,
 };
 
 static int __init mmc_test_init(void)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 645519f..88327f7 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -18,6 +18,7 @@
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
 #include "queue.h"
 
 #define MMC_QUEUE_BOUNCESZ	65536
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 99e6521..3bbd4e6 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -74,5 +74,4 @@
 extern void mmc_packed_clean(struct mmc_queue *);
 
 extern int mmc_access_rpmb(struct mmc_queue *);
-
 #endif
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 269d072..ae10a37 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -26,3 +26,18 @@
 	  support handling this in order for it to be of any use.
 
 	  If unsure, say N.
+
+config MMC_EMBEDDED_SDIO
+	boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+	help
+	  If you say Y here, support will be added for embedded SDIO
+	  devices which do not contain the necessary enumeration
+	  support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+	bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+	help
+	  If you say Y here, the MMC layer will be extra paranoid
+	  about re-trying SD init requests. This can be a useful
+	  work-around for buggy controllers and hardware. Enable
+	  if you are experiencing issues with SD detection.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 38ed210..d55cc5d 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -7,6 +7,6 @@
 				   mmc.o mmc_ops.o sd.o sd_ops.o \
 				   sdio.o sdio_ops.o sdio_bus.o \
 				   sdio_cis.o sdio_io.o sdio_irq.o \
-				   quirks.o slot-gpio.o
+				   quirks.o slot-gpio.o mmc_panic_ops.o
 
 mmc_core-$(CONFIG_DEBUG_FS)	+= debugfs.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index e219c97..1921e37 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,6 +122,24 @@
 	return 0;
 }
 
+static void mmc_bus_shutdown(struct device *dev)
+{
+	struct mmc_driver *drv = to_mmc_driver(dev->driver);
+	struct mmc_card *card = mmc_dev_to_card(dev);
+	struct mmc_host *host = card->host;
+	int ret;
+
+	if (dev->driver && drv->shutdown)
+		drv->shutdown(card);
+
+	if (host->bus_ops->shutdown) {
+		ret = host->bus_ops->shutdown(host);
+		if (ret)
+			pr_warn("%s: error %d during shutdown\n",
+				mmc_hostname(host), ret);
+	}
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int mmc_bus_suspend(struct device *dev)
 {
@@ -182,6 +200,7 @@
 	.uevent		= mmc_bus_uevent,
 	.probe		= mmc_bus_probe,
 	.remove		= mmc_bus_remove,
+	.shutdown	= mmc_bus_shutdown,
 	.pm		= &mmc_bus_pm_ops,
 };
 
@@ -309,11 +328,12 @@
 			mmc_card_ddr_mode(card) ? "DDR " : "",
 			type);
 	} else {
-		pr_info("%s: new %s%s%s%s%s card at address %04x\n",
+		pr_info("%s: new %s%s%s%s%s%s card at address %04x\n",
 			mmc_hostname(card->host),
 			mmc_card_uhs(card) ? "ultra high speed " :
 			(mmc_card_highspeed(card) ? "high speed " : ""),
 			(mmc_card_hs200(card) ? "HS200 " : ""),
+			(mmc_card_hs400(card) ? "HS400 " : ""),
 			mmc_card_ddr_mode(card) ? "DDR " : "",
 			uhs_bus_speed_mode, type, card->rca);
 	}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3ae6f13..99e0a9c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -27,6 +27,9 @@
 #include <linux/fault-inject.h>
 #include <linux/random.h>
 #include <linux/slab.h>
+#include <linux/wakelock.h>
+
+#include <trace/events/mmc.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -172,6 +175,7 @@
 			pr_debug("%s:     %d bytes transferred: %d\n",
 				mmc_hostname(host),
 				mrq->data->bytes_xfered, mrq->data->error);
+			trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
 		}
 
 		if (mrq->stop) {
@@ -538,8 +542,12 @@
 			mmc_start_bkops(host->card, true);
 	}
 
-	if (!err && areq)
+	if (!err && areq) {
+		trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
+				       areq->mrq->cmd->arg,
+				       areq->mrq->data);
 		start_err = __mmc_start_data_req(host, areq->mrq);
+	}
 
 	if (host->areq)
 		mmc_post_req(host, host->areq->mrq, 0);
@@ -1407,7 +1415,10 @@
 	mmc_set_ios(host);
 
 	/* Wait for at least 1 ms according to spec */
-	mmc_delay(1);
+	if (host->ops->busy_wait)
+		host->ops->busy_wait(host, 1000);
+	else
+		mmc_delay(1);
 
 	/*
 	 * Failure to switch is indicated by the card holding
@@ -1470,6 +1481,9 @@
 
 	mmc_host_clk_hold(host);
 
+	if (host->ops->set_dev_power)
+		host->ops->set_dev_power(host, true);
+
 	/* If ocr is set, we use it */
 	if (host->ocr)
 		bit = ffs(host->ocr) - 1;
@@ -1494,7 +1508,7 @@
 	 * This delay should be sufficient to allow the power supply
 	 * to reach the minimum voltage.
 	 */
-	mmc_delay(10);
+	usleep_range(10000, 11000);
 
 	host->ios.clock = host->f_init;
 
@@ -1505,7 +1519,7 @@
 	 * This delay must be at least 74 clock sizes, or 1 ms, or the
 	 * time required to reach a stable voltage.
 	 */
-	mmc_delay(10);
+	usleep_range(5000, 6000);
 
 	mmc_host_clk_release(host);
 }
@@ -1536,6 +1550,9 @@
 	host->ios.timing = MMC_TIMING_LEGACY;
 	mmc_set_ios(host);
 
+	if (host->ops->set_dev_power)
+		host->ops->set_dev_power(host, false);
+
 	/*
 	 * Some configurations, such as the 802.11 SDIO card in the OLPC
 	 * XO-1.5, require a short delay after poweroff before the card
@@ -1593,6 +1610,36 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+int mmc_resume_bus(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	if (!mmc_bus_needs_resume(host))
+		return -EINVAL;
+
+	printk("%s: Starting deferred resume\n", mmc_hostname(host));
+	spin_lock_irqsave(&host->lock, flags);
+	host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+	host->rescan_disable = 0;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	mmc_bus_get(host);
+	if (host->bus_ops && !host->bus_dead) {
+		mmc_power_up(host);
+		BUG_ON(!host->bus_ops->resume);
+		host->bus_ops->resume(host);
+	}
+
+	if (host->bus_ops->detect && !host->bus_dead)
+		host->bus_ops->detect(host);
+
+	mmc_bus_put(host);
+	printk("%s: Deferred resume completed\n", mmc_hostname(host));
+	return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_bus);
+
 /*
  * Assign a mmc bus handler to a host. Only one bus handler may control a
  * host at any given time.
@@ -1658,6 +1705,8 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 #endif
 	host->detect_change = 1;
+
+	wake_lock(&host->detect_wake_lock);
 	mmc_schedule_delayed_work(&host->detect, delay);
 }
 
@@ -1692,7 +1741,7 @@
 		card->erase_shift = ffs(card->ssr.au) - 1;
 	} else if (card->ext_csd.hc_erase_size) {
 		card->pref_erase = card->ext_csd.hc_erase_size;
-	} else {
+	} else if (card->erase_size) {
 		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
 		if (sz < 128)
 			card->pref_erase = 512 * 1024 / 512;
@@ -1709,7 +1758,8 @@
 			if (sz)
 				card->pref_erase += card->erase_size - sz;
 		}
-	}
+	} else
+		card->pref_erase = 0;
 }
 
 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
@@ -1817,8 +1867,13 @@
 	struct mmc_command cmd = {0};
 	unsigned int qty = 0;
 	unsigned long timeout;
+	unsigned int fr, nr;
 	int err;
 
+	fr = from;
+	nr = to - from + 1;
+	trace_mmc_blk_erase_start(arg, fr, nr);
+
 	/*
 	 * qty is used to calculate the erase timeout which depends on how many
 	 * erase groups (or allocation units in SD terminology) are affected.
@@ -1922,6 +1977,8 @@
 	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
 		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
 out:
+
+	trace_mmc_blk_erase_end(arg, fr, nr);
 	return err;
 }
 
@@ -2353,6 +2410,7 @@
 	struct mmc_host *host =
 		container_of(work, struct mmc_host, detect.work);
 	int i;
+	bool extend_wakelock = false;
 
 	if (host->rescan_disable)
 		return;
@@ -2374,6 +2432,12 @@
 
 	host->detect_change = 0;
 
+	/* If the card was removed the bus will be marked
+	 * as dead - extend the wakelock so userspace
+	 * can respond */
+	if (host->bus_dead)
+		extend_wakelock = 1;
+
 	/*
 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
 	 * the card is no longer present.
@@ -2402,16 +2466,25 @@
 
 	mmc_claim_host(host);
 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
+			extend_wakelock = true;
 			break;
+		}
 		if (freqs[i] <= host->f_min)
 			break;
 	}
 	mmc_release_host(host);
 
  out:
-	if (host->caps & MMC_CAP_NEEDS_POLL)
+	if (extend_wakelock)
+		wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
+	else
+		wake_unlock(&host->detect_wake_lock);
+	mmc_emergency_setup(host);
+	if (host->caps & MMC_CAP_NEEDS_POLL) {
+		wake_lock(&host->detect_wake_lock);
 		mmc_schedule_delayed_work(&host->detect, HZ);
+	}
 }
 
 void mmc_start_host(struct mmc_host *host)
@@ -2423,6 +2496,8 @@
 	else
 		mmc_power_up(host);
 	mmc_detect_change(host, 0);
+	if (host->caps2 & MMC_CAP2_INIT_CARD_SYNC)
+		flush_work_sync(&host->detect.work);
 }
 
 void mmc_stop_host(struct mmc_host *host)
@@ -2435,7 +2510,8 @@
 #endif
 
 	host->rescan_disable = 1;
-	cancel_delayed_work_sync(&host->detect);
+	if (cancel_delayed_work_sync(&host->detect))
+		wake_unlock(&host->detect_wake_lock);
 	mmc_flush_scheduled_work();
 
 	/* clear pm flags now and let card drivers set them as needed */
@@ -2582,44 +2658,6 @@
 }
 EXPORT_SYMBOL(mmc_flush_cache);
 
-/*
- * Turn the cache ON/OFF.
- * Turning the cache OFF shall trigger flushing of the data
- * to the non-volatile storage.
- * This function should be called with host claimed
- */
-int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
-{
-	struct mmc_card *card = host->card;
-	unsigned int timeout;
-	int err = 0;
-
-	if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
-			mmc_card_is_removable(host))
-		return err;
-
-	if (card && mmc_card_mmc(card) &&
-			(card->ext_csd.cache_size > 0)) {
-		enable = !!enable;
-
-		if (card->ext_csd.cache_ctrl ^ enable) {
-			timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
-			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-					EXT_CSD_CACHE_CTRL, enable, timeout);
-			if (err)
-				pr_err("%s: cache %s error %d\n",
-						mmc_hostname(card->host),
-						enable ? "on" : "off",
-						err);
-			else
-				card->ext_csd.cache_ctrl = enable;
-		}
-	}
-
-	return err;
-}
-EXPORT_SYMBOL(mmc_cache_ctrl);
-
 #ifdef CONFIG_PM
 
 /**
@@ -2630,7 +2668,11 @@
 {
 	int err = 0;
 
-	cancel_delayed_work(&host->detect);
+	if (mmc_bus_needs_resume(host))
+		return 0;
+
+	if (cancel_delayed_work(&host->detect))
+		wake_unlock(&host->detect_wake_lock);
 	mmc_flush_scheduled_work();
 
 	mmc_bus_get(host);
@@ -2681,6 +2723,12 @@
 	int err = 0;
 
 	mmc_bus_get(host);
+	if (mmc_bus_manual_resume(host)) {
+		host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+		mmc_bus_put(host);
+		return 0;
+	}
+
 	if (host->bus_ops && !host->bus_dead) {
 		if (!mmc_card_keep_power(host)) {
 			mmc_power_up(host);
@@ -2741,9 +2789,14 @@
 		}
 
 		spin_lock_irqsave(&host->lock, flags);
+		if (mmc_bus_needs_resume(host)) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			break;
+		}
 		host->rescan_disable = 1;
 		spin_unlock_irqrestore(&host->lock, flags);
-		cancel_delayed_work_sync(&host->detect);
+		if (cancel_delayed_work_sync(&host->detect))
+			wake_unlock(&host->detect_wake_lock);
 
 		if (!host->bus_ops || host->bus_ops->suspend)
 			break;
@@ -2764,6 +2817,10 @@
 	case PM_POST_RESTORE:
 
 		spin_lock_irqsave(&host->lock, flags);
+		if (mmc_bus_manual_resume(host)) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			break;
+		}
 		host->rescan_disable = 0;
 		spin_unlock_irqrestore(&host->lock, flags);
 		mmc_detect_change(host, 0);
@@ -2791,6 +2848,22 @@
 	init_waitqueue_head(&host->context_info.wait);
 }
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+				struct sdio_cis *cis,
+				struct sdio_cccr *cccr,
+				struct sdio_embedded_func *funcs,
+				int num_funcs)
+{
+	host->embedded_sdio_data.cis = cis;
+	host->embedded_sdio_data.cccr = cccr;
+	host->embedded_sdio_data.funcs = funcs;
+	host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
 static int __init mmc_init(void)
 {
 	int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index b9f18a2..c9c2c2f 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -25,6 +25,7 @@
 	int (*power_save)(struct mmc_host *);
 	int (*power_restore)(struct mmc_host *);
 	int (*alive)(struct mmc_host *);
+	int (*shutdown)(struct mmc_host *);
 };
 
 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 35c2f85..a0cb347 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -138,6 +138,9 @@
 	case MMC_TIMING_MMC_HS200:
 		str = "mmc high-speed SDR200";
 		break;
+	case MMC_TIMING_MMC_HS400:
+		str = "mmc high-speed DDR200";
+		break;
 	default:
 		str = "invalid";
 		break;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 2a3593d..b3dca19 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -409,6 +409,8 @@
 		host->caps |= MMC_CAP_POWER_OFF_CARD;
 	if (of_find_property(np, "cap-sdio-irq", &len))
 		host->caps |= MMC_CAP_SDIO_IRQ;
+	if (of_find_property(np, "full-pwr-cycle", &len))
+		host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
 	if (of_find_property(np, "keep-power-in-suspend", &len))
 		host->pm_caps |= MMC_PM_KEEP_POWER;
 	if (of_find_property(np, "enable-sdio-wakeup", &len))
@@ -459,6 +461,8 @@
 
 	spin_lock_init(&host->lock);
 	init_waitqueue_head(&host->wq);
+	wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND,
+		kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host)));
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
 #ifdef CONFIG_PM
 	host->pm_notify.notifier_call = mmc_pm_notify;
@@ -511,7 +515,8 @@
 	mmc_host_clk_sysfs_init(host);
 
 	mmc_start_host(host);
-	register_pm_notifier(&host->pm_notify);
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+		register_pm_notifier(&host->pm_notify);
 
 	return 0;
 }
@@ -528,7 +533,9 @@
  */
 void mmc_remove_host(struct mmc_host *host)
 {
-	unregister_pm_notifier(&host->pm_notify);
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+		unregister_pm_notifier(&host->pm_notify);
+
 	mmc_stop_host(host);
 
 #ifdef CONFIG_DEBUG_FS
@@ -555,6 +562,7 @@
 	spin_lock(&mmc_host_lock);
 	idr_remove(&mmc_host_idr, host->index);
 	spin_unlock(&mmc_host_lock);
+	wake_lock_destroy(&host->detect_wake_lock);
 
 	put_device(&host->class_dev);
 }
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dda1a42..eacc979 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -239,7 +239,8 @@
 static void mmc_select_card_type(struct mmc_card *card)
 {
 	struct mmc_host *host = card->host;
-	u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+	u8 card_type = card->ext_csd.raw_card_type &
+			EXT_CSD_CARD_TYPE_MASK_FULL;
 	u32 caps = host->caps, caps2 = host->caps2;
 	unsigned int hs_max_dtr = 0;
 
@@ -262,6 +263,12 @@
 			card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
 		hs_max_dtr = MMC_HS200_MAX_DTR;
 
+	if ((caps2 & MMC_CAP2_HS400_1_8V_DDR &&
+			card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) ||
+	    (caps2 & MMC_CAP2_HS400_1_2V_DDR &&
+			card_type & EXT_CSD_CARD_TYPE_HS400_1_2V))
+		hs_max_dtr = MMC_HS400_MAX_DTR;
+
 	card->ext_csd.hs_max_dtr = hs_max_dtr;
 	card->ext_csd.card_type = card_type;
 }
@@ -504,6 +511,9 @@
 		 * RPMB regions are defined in multiples of 128K.
 		 */
 		card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
+		card->ext_csd.rpmb_size = 128 *
+			card->ext_csd.raw_rpmb_size_mult;
+		card->ext_csd.rpmb_size <<= 2; /* Unit: half sector */
 		if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
 			mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
 				EXT_CSD_PART_CONFIG_ACC_RPMB,
@@ -555,6 +565,17 @@
 		card->ext_csd.data_sector_size = 512;
 	}
 
+	/*
+	 * If use legacy reliable write, then the blk counts must not
+	 * big than the reliable write sectors
+	 */
+	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+		if (card->ext_csd.rel_sectors < RPMB_AVALIABLE_SECTORS)
+			card->rpmb_max_req = card->ext_csd.rel_sectors;
+		else
+			card->rpmb_max_req = RPMB_AVALIABLE_SECTORS;
+	} else
+		card->rpmb_max_req = RPMB_AVALIABLE_SECTORS;
 out:
 	return err;
 }
@@ -642,6 +663,7 @@
 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
+MMC_DEV_ATTR(rpmb_size, "%d\n", card->ext_csd.rpmb_size);
 
 static struct attribute *mmc_std_attrs[] = {
 	&dev_attr_cid.attr,
@@ -660,6 +682,7 @@
 	&dev_attr_enhanced_area_size.attr,
 	&dev_attr_raw_rpmb_size_mult.attr,
 	&dev_attr_rel_sectors.attr,
+	&dev_attr_rpmb_size.attr,
 	NULL,
 };
 
@@ -714,8 +737,12 @@
 			index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
 				EXT_CSD_PWR_CL_52_195 :
 				EXT_CSD_PWR_CL_DDR_52_195;
-		else if (host->ios.clock <= 200000000)
-			index = EXT_CSD_PWR_CL_200_195;
+		else if (host->ios.clock <= 200000000) {
+			if (mmc_card_hs400(card))
+				index = EXT_CSD_PWR_CL_200_DDR_195;
+			else
+				index = EXT_CSD_PWR_CL_200_195;
+		}
 		break;
 	case MMC_VDD_27_28:
 	case MMC_VDD_28_29:
@@ -762,6 +789,81 @@
 }
 
 /*
+ * Support HS400:
+ * This function should be called after HS200 tuning.
+ */
+static int mmc_select_hs400_start(struct mmc_card *card)
+{
+	int err = -EINVAL;
+	struct mmc_host *host;
+	static unsigned ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
+	static unsigned bus_width = MMC_BUS_WIDTH_8;
+
+	BUG_ON(!card);
+
+	host = card->host;
+	/* HS400 mode only supports 8bit bus.*/
+	if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
+		pr_err("HS400: MMC host does not support 8bit bus, error!\n");
+		goto err;
+	}
+
+	/* Must set HS_TIMING to 1 after tuning completion. */
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_HS_TIMING, 1, 0);
+	if (!err) {
+		/* Set timing to DDR50 first */
+		mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
+		/* Then, set clock to 50MHz */
+		mmc_set_clock(host, MMC_HIGH_DDR_MAX_DTR);
+	} else {
+		goto err;
+	}
+
+	/*
+	 * Host is capable of 8bit transfer, switch
+	 * the device to work in 8bit transfer mode.
+	 * On success set 8bit bus width on the host.
+	 */
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_BUS_WIDTH,
+			ext_csd_bit,
+			card->ext_csd.generic_cmd6_time);
+	if (err)
+		goto err;
+
+	/* Bus test */
+	mmc_set_bus_width(card->host, bus_width);
+	if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+		err = mmc_compare_ext_csds(card, bus_width);
+	else
+		err = mmc_bus_test(card, bus_width);
+	if (err)
+		goto err;
+
+err:
+	return err;
+}
+
+static int mmc_select_hs400_end(struct mmc_card *card, unsigned int max_dtr)
+{
+	int err = -EINVAL;
+
+	/* Switch timing to HS400 now. */
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_HS_TIMING, 3, 0);
+	if (!err) {
+		mmc_set_timing(card->host, MMC_TIMING_MMC_HS400);
+		/*
+		 * After enablig HS400 mode, we should restore
+		 * frequency to 200MHz.
+		 */
+		mmc_set_clock(card->host, max_dtr);
+	}
+	return err;
+}
+
+/*
  * Selects the desired buswidth and switch to the HS200 mode
  * if bus width set without error
  */
@@ -782,12 +884,16 @@
 
 	host = card->host;
 
-	if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
-			host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+	if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
+			host->caps2 & MMC_CAP2_HS200_1_2V_SDR) ||
+	    (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_HS400_1_2V &&
+			host->caps2 & MMC_CAP2_HS400_1_2V_DDR))
 		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
 
-	if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
-			host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
+	if (err && ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
+			host->caps2 & MMC_CAP2_HS200_1_8V_SDR) ||
+	    (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_HS400_1_8V &&
+			host->caps2 & MMC_CAP2_HS400_1_8V_DDR)))
 		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
 
 	/* If fails try again during next card power cycle */
@@ -1020,11 +1126,9 @@
 	}
 
 	/*
-	 * If the host supports the power_off_notify capability then
-	 * set the notification byte in the ext_csd register of device
+	 * Enable power_off_notification byte in the ext_csd register
 	 */
-	if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
-	    (card->ext_csd.rev >= 6)) {
+	if (card->ext_csd.rev >= 6) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_POWER_OFF_NOTIFICATION,
 				 EXT_CSD_POWER_ON,
@@ -1045,8 +1149,10 @@
 	 */
 	if (card->ext_csd.hs_max_dtr != 0) {
 		err = 0;
+		/* Support HS400: set to HS200 before tuning complete. */
 		if (card->ext_csd.hs_max_dtr > 52000000 &&
-		    host->caps2 & MMC_CAP2_HS200)
+		    (host->caps2 & MMC_CAP2_HS200 ||
+		    host->caps2 & MMC_CAP2_HS400))
 			err = mmc_select_hs200(card);
 		else if	(host->caps & MMC_CAP_MMC_HIGHSPEED)
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1062,6 +1168,15 @@
 			err = 0;
 		} else {
 			if (card->ext_csd.hs_max_dtr > 52000000 &&
+			    host->caps2 & MMC_CAP2_HS400 &&
+			    (card->ext_csd.card_type &
+				EXT_CSD_CARD_TYPE_HS400_1_8V ||
+			    card->ext_csd.card_type &
+				EXT_CSD_CARD_TYPE_HS400_1_2V)) {
+				mmc_card_set_hs400(card);
+				mmc_set_timing(card->host,
+						MMC_TIMING_MMC_HS200);
+			} else if (card->ext_csd.hs_max_dtr > 52000000 &&
 			    host->caps2 & MMC_CAP2_HS200) {
 				mmc_card_set_hs200(card);
 				mmc_set_timing(card->host,
@@ -1078,7 +1193,9 @@
 	 */
 	max_dtr = (unsigned int)-1;
 
-	if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
+	if (mmc_card_highspeed(card) ||
+	    mmc_card_hs200(card) ||
+	    mmc_card_hs400(card)) {
 		if (max_dtr > card->ext_csd.hs_max_dtr)
 			max_dtr = card->ext_csd.hs_max_dtr;
 		if (mmc_card_highspeed(card) && (max_dtr > 52000000))
@@ -1106,9 +1223,9 @@
 	}
 
 	/*
-	 * Indicate HS200 SDR mode (if supported).
+	 * Indicate HS200 SDR mode or HS400 DDR mode (if supported).
 	 */
-	if (mmc_card_hs200(card)) {
+	if (mmc_card_hs200(card) || mmc_card_hs400(card)) {
 		u32 ext_csd_bits;
 		u32 bus_width = card->host->ios.bus_width;
 
@@ -1123,7 +1240,9 @@
 		 * 3. set the clock to > 52Mhz <=200MHz and
 		 * 4. execute tuning for HS200
 		 */
-		if ((host->caps2 & MMC_CAP2_HS200) &&
+		/* Support HS400: tuning under HS200 mode. */
+		if ((host->caps2 & MMC_CAP2_HS200 ||
+		    host->caps2 & MMC_CAP2_HS400) &&
 		    card->host->ops->execute_tuning) {
 			mmc_host_clk_hold(card->host);
 			err = card->host->ops->execute_tuning(card->host,
@@ -1136,19 +1255,45 @@
 			goto err;
 		}
 
-		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+		/* Support HS400 */
+		if (mmc_card_hs400(card)) {
+			/*
+			 * Per spec 5.0, follow below sequence to enable HS400:
+			 * 1. Set HS_TIMING to 1 after HS200 tuning.
+			 * 2. Set frequency below 52MHz.
+			 * 3. Set bus width to DDR 8bit.
+			 * 4. Set HS_TIMING to 3 as HS400.
+			 */
+			err = mmc_select_hs400_start(card);
+			if (err) {
+				pr_warn("%s: hs400_start err=0x%x.\n",
+					mmc_hostname(card->host), err);
+				goto free_card;
+			}
+			ext_csd_bits = EXT_CSD_DDR_BUS_WIDTH_8;
+		} else {
+			ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
 				EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
+		}
 		err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
 		if (err)
-			pr_warning("%s: power class selection to bus width %d"
+			pr_warn("%s: power class selection to bus width %d"
 				   " failed\n", mmc_hostname(card->host),
 				   1 << bus_width);
+		if (mmc_card_hs400(card)) {
+			err = mmc_select_hs400_end(card, max_dtr);
+			if (err) {
+				pr_warn("%s: hs400_end err=0x%x.\n",
+					mmc_hostname(card->host), err);
+				goto free_card;
+			}
+		}
 	}
 
 	/*
 	 * Activate wide bus and DDR (if supported).
 	 */
-	if (!mmc_card_hs200(card) &&
+	if ((!mmc_card_hs200(card) && !mmc_card_hs400(card)) &&
 	    (card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
 	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
 		static unsigned ext_csd_bits[][2] = {
@@ -1406,24 +1551,24 @@
 	}
 }
 
-/*
- * Suspend callback from host.
- */
-static int mmc_suspend(struct mmc_host *host)
+static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
 {
 	int err = 0;
+	unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
+					EXT_CSD_POWER_OFF_LONG;
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
 	mmc_claim_host(host);
 
-	err = mmc_cache_ctrl(host, 0);
+	err = mmc_flush_cache(host->card);
 	if (err)
 		goto out;
 
-	if (mmc_can_poweroff_notify(host->card))
-		err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
+	if (mmc_can_poweroff_notify(host->card) &&
+		((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
+		err = mmc_poweroff_notify(host->card, notify_type);
 	else if (mmc_card_can_sleep(host))
 		err = mmc_card_sleep(host);
 	else if (!mmc_host_is_spi(host))
@@ -1436,6 +1581,14 @@
 }
 
 /*
+ * Suspend callback from host.
+ */
+static int mmc_suspend(struct mmc_host *host)
+{
+	return _mmc_suspend(host, true);
+}
+
+/*
  * Resume callback from host.
  *
  * This function tries to determine if the same card is still present
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 49f04bc..432af79 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -141,7 +141,7 @@
 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 
-	for (i = 100; i; i--) {
+	for (i = 200; i; i--) {
 		err = mmc_wait_for_cmd(host, &cmd, 0);
 		if (err)
 			break;
@@ -161,7 +161,7 @@
 
 		err = -ETIMEDOUT;
 
-		mmc_delay(10);
+		usleep_range(5000, 5500);
 	}
 
 	if (rocr && !mmc_host_is_spi(host))
@@ -638,3 +638,415 @@
 
 	return 0;
 }
+
+static int mmc_rpmb_send_command(struct mmc_card *card, u8 *buf, __u16 blks,
+		__u16 type, u8 req_type)
+{
+	struct mmc_request mrq = {NULL};
+	struct mmc_command cmd = {0};
+	struct mmc_command sbc = {0};
+	struct mmc_data data = {0};
+	struct scatterlist sg;
+	u8 *transfer_buf = NULL;
+
+	mrq.sbc = &sbc;
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	mrq.stop = NULL;
+	transfer_buf = kzalloc(512 * blks, GFP_KERNEL);
+	if (!transfer_buf)
+		return -ENOMEM;
+
+	/*
+	 * set CMD23
+	 */
+	sbc.opcode = MMC_SET_BLOCK_COUNT;
+	sbc.arg = blks;
+	if ((req_type == RPMB_REQ) && (type == RPMB_WRITE_DATA ||
+				type == RPMB_PROGRAM_KEY))
+		sbc.arg |= 1 << 31;
+	sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+	/*
+	 * set CMD25/18
+	 */
+	sg_init_one(&sg, transfer_buf, 512 * blks);
+	if (req_type == RPMB_REQ) {
+		cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+		sg_copy_from_buffer(&sg, 1, buf, 512 * blks);
+		data.flags |= MMC_DATA_WRITE;
+	} else {
+		cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
+		data.flags |= MMC_DATA_READ;
+	}
+	cmd.arg = 0;
+	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+	data.blksz = 512;
+	data.blocks = blks;
+	data.sg = &sg;
+	data.sg_len = 1;
+
+	mmc_set_data_timeout(&data, card);
+
+	mmc_wait_for_req(card->host, &mrq);
+
+	if (req_type != RPMB_REQ)
+		sg_copy_to_buffer(&sg, 1, buf, 512 * blks);
+
+	kfree(transfer_buf);
+
+	if (cmd.error)
+		return cmd.error;
+	if (data.error)
+		return data.error;
+	return 0;
+}
+
+void mmc_rpmb_post_frame(struct mmc_core_rpmb_req *rpmb_req)
+{
+	int i;
+	struct mmc_ioc_rpmb_req *p_req;
+	__u8 *buf_frame;
+
+	if (!rpmb_req || !rpmb_req->ready)
+		return;
+
+	p_req = rpmb_req->req;
+	buf_frame = rpmb_req->frame;
+
+	if (!p_req || !buf_frame)
+		return;
+	/*
+	 * Regarding to the check rules, here is the post
+	 * rules
+	 * All will return result.
+	 * GET_WRITE_COUNTER:
+	 *              must: write counter, nonce
+	 *              optional: MAC
+	 * WRITE_DATA:
+	 *              must: MAC, write counter
+	 * READ_DATA:
+	 *              must: nonce, data
+	 *              optional: MAC
+	 * PROGRAM_KEY:
+	 *              must: Nothing
+	 *
+	 * Except READ_DATA, all of these operations only need to parse
+	 * one frame. READ_DATA needs blks frames to get DATA
+	 */
+
+	memcpy(p_req->result, buf_frame + RPMB_RES_BEG, 2);
+	*p_req->result = be16_to_cpup(p_req->result);
+
+	if (p_req->type == RPMB_PROGRAM_KEY)
+		goto out;
+
+	if (p_req->type == RPMB_GET_WRITE_COUNTER ||
+			p_req->type == RPMB_WRITE_DATA) {
+		memcpy(p_req->wc, buf_frame + RPMB_WCOUNTER_BEG, 4);
+		*p_req->wc = be32_to_cpup(p_req->wc);
+	}
+
+	if (p_req->type == RPMB_GET_WRITE_COUNTER ||
+			p_req->type == RPMB_READ_DATA) {
+		/* nonce copy */
+		memcpy(p_req->nonce, buf_frame + RPMB_NONCE_BEG, 16);
+	}
+	/*
+	 * Take MAC within the last package
+	 */
+	if (p_req->type == RPMB_READ_DATA) {
+		__u8 *data = p_req->data;
+		for (i = 0; i < p_req->blk_cnt; i++) {
+			memcpy(data, buf_frame + i * 512 + RPMB_DATA_BEG, 256);
+			data += 256;
+		}
+		/*
+		 * MAC stored in the last package
+		 */
+		if (p_req->mac)
+			memcpy(p_req->mac, buf_frame + i * 512 + RPMB_MAC_BEG,
+					32);
+	} else if (p_req->mac)
+		memcpy(p_req->mac, buf_frame + RPMB_MAC_BEG, 32);
+out:
+	kfree(buf_frame);
+	rpmb_req->frame = NULL;
+	return;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_post_frame);
+
+static int mmc_rpmb_request_check(struct mmc_card *card,
+		struct mmc_ioc_rpmb_req *p_req)
+{
+	/*
+	 * Some parameters are a must for the operation. Different
+	 * operation expect different paramters. Below code is
+	 * used for checking this.
+	 *
+	 * All operations will need result.
+	 * GET_WRITE_COUNTER:
+	 *              must: write counter, nonce
+	 *              optional: MAC
+	 * WRITE_DATA:
+	 *              must: MAC, data, write counter
+	 * READ_DATA:
+	 *              must: nonce, data
+	 *              optional: MAC
+	 * PROGRAM_KEY:
+	 *              must: MAC
+	 *
+	 * So here, we only check the 'must' paramters
+	 */
+	if (!p_req->result) {
+		pr_err("%s: Type %d has NULL pointer for result\n",
+				mmc_hostname(card->host), p_req->type);
+		return -EINVAL;
+	}
+
+	if (p_req->type == RPMB_GET_WRITE_COUNTER) {
+		if (!p_req->nonce || !p_req->wc) {
+			pr_err("%s: Type %d has NULL pointer for nonce/wc\n",
+					mmc_hostname(card->host), p_req->type);
+			return -EINVAL;
+		}
+		/*
+		 * used to allocate frame
+		 */
+		p_req->blk_cnt = 1;
+	} else if (p_req->type == RPMB_WRITE_DATA ||
+			p_req->type == RPMB_READ_DATA) {
+		if ((__u32)(p_req->addr + p_req->blk_cnt) >
+				card->ext_csd.rpmb_size) {
+			pr_err("%s Type %d: beyond the RPMB partition rang addr %d, blk_cnt %d, rpmb_size %d\n",
+					mmc_hostname(card->host),
+					p_req->type,
+					p_req->addr,
+					p_req->blk_cnt,
+					card->ext_csd.rpmb_size);
+			return -EINVAL;
+		}
+		if (p_req->blk_cnt == 0) {
+			pr_err("%s: Type %d has zero block count\n",
+					mmc_hostname(card->host),
+					p_req->blk_cnt);
+			return -EINVAL;
+		} else if (p_req->blk_cnt > card->rpmb_max_req) {
+			pr_err("%s: Type %d has invalid block count, cannot large than %d\n",
+					mmc_hostname(card->host),
+					p_req->blk_cnt,
+					card->rpmb_max_req);
+			return -EINVAL;
+		}
+		if (!p_req->data) {
+			pr_err("%s: Type %d has NULL pointer for data\n",
+					mmc_hostname(card->host), p_req->type);
+			return -EINVAL;
+		}
+		if (p_req->type == RPMB_WRITE_DATA) {
+			if (!p_req->wc || !p_req->mac) {
+				pr_err("%s: Type %d has NULL pointer for write counter/MAC\n",
+						mmc_hostname(card->host),
+						p_req->type);
+				return -EINVAL;
+			}
+		} else {
+			if (!p_req->nonce) {
+				pr_err("%s: Type %d has NULL pointer for nonce\n",
+						mmc_hostname(card->host),
+						p_req->type);
+				return -EINVAL;
+			}
+		}
+	} else if (p_req->type == RPMB_PROGRAM_KEY) {
+		if (!p_req->mac) {
+			pr_err("%s: Type %d has NULL pointer for MAC\n",
+					mmc_hostname(card->host), p_req->type);
+			return -EINVAL;
+		}
+		/*
+		 * used to allocate frame
+		 */
+		p_req->blk_cnt = 1;
+	} else
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+/*
+ * prepare the request of RPMB frame
+ * RPMB frame is MSB first
+ * convert needed bytes
+ * return how many frames will be prepared
+ */
+int mmc_rpmb_pre_frame(struct mmc_core_rpmb_req *rpmb_req,
+		struct mmc_card *card)
+{
+	int i, ret;
+	struct mmc_ioc_rpmb_req *p_req;
+	__u8 *buf_frame;
+	__u16 blk_cnt, addr, type;
+	__u32 w_counter;
+
+	if (!rpmb_req || !card)
+		return -EINVAL;
+
+	p_req = rpmb_req->req;
+	if (!p_req) {
+		pr_err("%s: mmc_ioc_rpmb_req is NULL. Wrong parameter\n",
+				mmc_hostname(card->host));
+		return -EINVAL;
+	}
+
+	/*
+	 * make sure these two items are clear
+	 */
+	rpmb_req->ready = 0;
+	rpmb_req->frame = NULL;
+
+	ret = mmc_rpmb_request_check(card, p_req);
+	if (ret)
+		return ret;
+
+	buf_frame = kzalloc(512 * p_req->blk_cnt, GFP_KERNEL);
+	if (!buf_frame) {
+		pr_err("%s: cannot allocate frame for type %d\n",
+				mmc_hostname(card->host), p_req->type);
+		return -ENOMEM;
+	}
+
+	type = cpu_to_be16p(&p_req->type);
+	if (p_req->type == RPMB_GET_WRITE_COUNTER ||
+			p_req->type == RPMB_READ_DATA) {
+		/*
+		 * One package prepared
+		 * This request needs Nonce and type
+		 * If is data read, then also need addr
+		 */
+		memcpy(buf_frame + RPMB_TYPE_BEG, &type, 2);
+		if (p_req->type == RPMB_READ_DATA) {
+			addr = cpu_to_be16p(&p_req->addr);
+			memcpy(buf_frame + RPMB_ADDR_BEG, &addr, 2);
+		}
+		/* convert Nonce code */
+		memcpy(buf_frame + RPMB_NONCE_BEG, p_req->nonce, 16);
+	} else if (p_req->type == RPMB_WRITE_DATA) {
+		__u8 *data = p_req->data;
+		/*
+		 * multiple package prepared
+		 * This request nees blk_cnt, addr, write_counter,
+		 * data and mac
+		 */
+		blk_cnt = cpu_to_be16p(&p_req->blk_cnt);
+		addr = cpu_to_be16p(&p_req->addr);
+		w_counter = cpu_to_be32p(p_req->wc);
+		for (i = 0; i < p_req->blk_cnt; i++) {
+			memcpy(buf_frame + i * 512 + RPMB_TYPE_BEG,
+					&type, 2);
+			memcpy(buf_frame + i * 512 + RPMB_BLKS_BEG,
+					&blk_cnt, 2);
+			memcpy(buf_frame + i * 512 + RPMB_ADDR_BEG,
+					&addr, 2);
+			memcpy(buf_frame + i * 512 + RPMB_WCOUNTER_BEG,
+					&w_counter, 4);
+			memcpy(buf_frame + i * 512 + RPMB_DATA_BEG,
+					data, 256);
+			data += 256;
+		}
+		/* convert MAC code */
+		memcpy(buf_frame + 512 * (i - 1) + RPMB_MAC_BEG,
+				p_req->mac, 32);
+	} else if (p_req->type == RPMB_PROGRAM_KEY) {
+		/*
+		 * One package prepared
+		 * This request only need mac
+		 */
+		memcpy(buf_frame + RPMB_TYPE_BEG, &type, 2);
+		/* convert MAC code */
+		memcpy(buf_frame + RPMB_MAC_BEG,
+				p_req->mac, 32);
+	} else {
+		pr_err("%s: We shouldn't be here\n", mmc_hostname(card->host));
+		kfree(buf_frame);
+		return -EINVAL;
+	}
+	rpmb_req->ready = 1;
+	rpmb_req->frame = buf_frame;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_pre_frame);
+
+int mmc_rpmb_partition_ops(struct mmc_core_rpmb_req *rpmb_req,
+		struct mmc_card *card)
+{
+	int err = 0;
+	struct mmc_ioc_rpmb_req *p_req;
+	__u16 type, blks;
+	__u8 *buf_frame;
+
+	if (!rpmb_req || !card)
+		return -EINVAL;
+
+	p_req = rpmb_req->req;
+	buf_frame = rpmb_req->frame;
+
+	if (!p_req || !rpmb_req->ready || !buf_frame) {
+		pr_err("%s: mmc_ioc_rpmb_req is not prepared\n",
+				mmc_hostname(card->host));
+		return -EINVAL;
+	}
+
+	type = p_req->type;
+	blks = p_req->blk_cnt;
+
+	/*
+	 * STEP 1: send request to RPMB partition
+	 */
+	if (type == RPMB_WRITE_DATA)
+		err = mmc_rpmb_send_command(card, buf_frame, blks,
+				type, RPMB_REQ);
+	else
+		err = mmc_rpmb_send_command(card, buf_frame, 1, type, RPMB_REQ);
+
+	if (err) {
+		pr_err("%s: request write counter failed (%d)\n",
+				mmc_hostname(card->host), err);
+		goto out;
+	}
+
+	memset(buf_frame, 0, 512 * blks);
+	/*
+	 * STEP 2: check write result
+	 * Only for WRITE_DATA or Program key
+	 */
+	if (type == RPMB_WRITE_DATA ||
+			type == RPMB_PROGRAM_KEY) {
+		buf_frame[RPMB_TYPE_BEG + 1] = RPMB_RESULT_READ;
+		err = mmc_rpmb_send_command(card, buf_frame, 1,
+				RPMB_RESULT_READ, RPMB_REQ);
+		if (err) {
+			pr_err("%s: request write counter failed (%d)\n",
+					mmc_hostname(card->host), err);
+			goto out;
+		}
+	}
+
+	/*
+	 * STEP 3: get response from RPMB partition
+	 */
+
+	if (type == RPMB_READ_DATA)
+		err = mmc_rpmb_send_command(card, buf_frame,
+				blks, type, RPMB_RESP);
+	else
+		err = mmc_rpmb_send_command(card, buf_frame,
+				1, type, RPMB_RESP);
+	if (err) {
+		pr_err("%s: response write counter failed (%d)\n",
+				mmc_hostname(card->host), err);
+	}
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(mmc_rpmb_partition_ops);
diff --git a/drivers/mmc/core/mmc_panic_ops.c b/drivers/mmc/core/mmc_panic_ops.c
new file mode 100644
index 0000000..2ed0204
--- /dev/null
+++ b/drivers/mmc/core/mmc_panic_ops.c
@@ -0,0 +1,837 @@
+/*
+ * linux/drivers/mmc/core/mmc_panic_ops.c
+ *
+ * Copyright (C) 2011 Intel Corp
+ * Author: dongxing.zhang@intel.com
+ * Author: jun.zhang@intel.com
+ * Author: chuansheng.liu@intel.com
+ * Author: chuanxiao.dong@intel.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include "core.h"
+#include "bus.h"
+#include "host.h"
+
+#include "mmc_ops.h"
+
+
+static struct mmc_panic_host *panic_host;
+
+static int mmc_emergency_prepare(void)
+{
+	struct mmc_host *mmc = panic_host->mmc;
+
+	if (mmc == NULL) {
+		pr_err("%s: panic host was not setup\n", __func__);
+		return -ENODEV;
+	}
+
+	/*
+	 * once panic happened, we monopolize the host controller.
+	 * so claim host without relase any more.
+	 */
+	mmc->claimed = 1;
+	mmc->claimer = current;
+	mmc->claim_cnt += 1;
+#ifdef CONFIG_MMC_CLKGATE
+	/*
+	 * disable the clock gating
+	 */
+	mmc->clk_gated = false;
+	mmc->clk_requests++;
+	mmc->ios.clock = mmc->clk_old;
+#endif
+	return 0;
+}
+
+static void mmc_emergency_ready(void)
+{
+	panic_host->panic_ready = 1;
+}
+
+/*
+ * Return the card size in sectors.
+ *
+ * return value:
+ * the sector number
+ */
+static unsigned int mmc_get_capacity(struct mmc_card *card)
+{
+	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
+		return card->ext_csd.sectors;
+	else
+		return card->csd.capacity << (card->csd.read_blkbits - 9);
+}
+
+static void mmc_emergency_send_req(struct mmc_request *mrq)
+{
+	struct mmc_panic_host *host = panic_host;
+
+	mrq->cmd->error = 0;
+	mrq->cmd->mrq = mrq;
+
+	if (mrq->data) {
+		BUG_ON(mrq->data->blksz > host->max_blk_size);
+		BUG_ON(mrq->data->blocks > host->max_blk_count);
+		BUG_ON(mrq->data->blocks * mrq->data->blksz >
+				host->max_req_size);
+
+		mrq->cmd->data = mrq->data;
+		mrq->data->error = 0;
+		mrq->data->mrq = mrq;
+		if (mrq->stop) {
+			mrq->data->stop = mrq->stop;
+			mrq->stop->error = 0;
+			mrq->stop->mrq = mrq;
+		}
+	}
+
+	/*
+	 * Send the request to host
+	 *
+	 * if request handling is successful, return.
+	 * If request handling is failed and has rety, resend request.
+	 * During retry, if request handling is still failed, core layer
+	 * will keep on retry untill cmd->retries is 0.
+	 *
+	 * So in this way, makes retry blind to host driver. Totally
+	 * controlled by core driver
+	 */
+	host->panic_ops->request(host, mrq);
+
+	while ((mrq->cmd->error || (mrq->data && (mrq->data->error ||
+			(mrq->data->stop && mrq->data->stop->error)))) &&
+			mrq->cmd->retries > 0) {
+		/* clear errors */
+		mrq->cmd->error = 0;
+		if (mrq->data) {
+			mrq->data->error = 0;
+			if (mrq->stop)
+				mrq->stop->error = 0;
+		}
+		host->panic_ops->request(host, mrq);
+		mrq->cmd->retries--;
+	}
+}
+
+static int mmc_emergency_send_cmd(struct mmc_command *cmd, int retries)
+{
+	struct mmc_request mrq;
+
+	memset(&mrq, 0, sizeof(struct mmc_request));
+
+	memset(cmd->resp, 0, sizeof(cmd->resp));
+	cmd->retries = retries;
+
+	mrq.cmd = cmd;
+	cmd->data = NULL;
+
+	mmc_emergency_send_req(&mrq);
+
+	return cmd->error;
+}
+
+static int __mmc_emergency_write(unsigned int blk_id)
+{
+	struct mmc_request mrq;
+	struct mmc_command cmd;
+	struct mmc_data data;
+
+	memset(&mrq, 0, sizeof(struct mmc_request));
+	memset(&cmd, 0, sizeof(struct mmc_command));
+	memset(&data, 0, sizeof(struct mmc_data));
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	cmd.opcode = MMC_WRITE_BLOCK;
+	cmd.arg = blk_id;
+	if (!panic_host->blkaddr)
+		cmd.arg <<= 9;
+	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+	/*
+	 * Fix these values;
+	 */
+	data.blksz = 512;
+	data.blocks = 1;
+	data.dmabuf = panic_host->dmabuf;
+
+	mmc_emergency_send_req(&mrq);
+
+	return cmd.error;
+}
+
+
+static int mmc_emergency_go_idle(struct mmc_panic_host *host)
+{
+	int err;
+	struct mmc_command cmd;
+
+	/*
+	 * Non-SPI hosts need to prevent chipselect going active during
+	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
+	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
+	 *
+	 * SPI hosts ignore ios.chip_select; it's managed according to
+	 * rules that must accomodate non-MMC slaves which this layer
+	 * won't even know about.
+	 */
+	if (!mmc_host_is_spi(host)) {
+		host->ios.chip_select = MMC_CS_HIGH;
+		host->panic_ops->set_ios(host);
+		mdelay(1);
+	}
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_GO_IDLE_STATE;
+	cmd.arg = 0;
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
+
+	err = mmc_emergency_send_cmd(&cmd, 0);
+
+	mdelay(1);
+
+	if (!mmc_host_is_spi(host)) {
+		host->ios.chip_select = MMC_CS_DONTCARE;
+		host->panic_ops->set_ios(host);
+		mdelay(1);
+	}
+
+	return err;
+}
+static int mmc_emergency_send_op_cond(struct mmc_panic_host *host,
+		u32 ocr, u32 *rocr)
+{
+	struct mmc_command cmd;
+	int i, err = 0;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SEND_OP_COND;
+	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
+
+	for (i = 100; i; i--) {
+		err = mmc_emergency_send_cmd(&cmd, 0);
+		if (err)
+			break;
+
+		/* if we're just probing, do a single pass */
+		if (ocr == 0)
+			break;
+
+		/* otherwise wait until reset completes */
+		if (mmc_host_is_spi(host)) {
+			if (!(cmd.resp[0] & R1_SPI_IDLE))
+				break;
+		} else {
+			if (cmd.resp[0] & MMC_CARD_BUSY)
+				break;
+		}
+
+		err = -ETIMEDOUT;
+
+		/*
+		 * If command 1 is failed, wait 10ms and then
+		 * have a retry. Card may need time to prepare
+		 * for the next command 1
+		 */
+		mdelay(10);
+	}
+
+	if (rocr && !mmc_host_is_spi(host))
+		*rocr = cmd.resp[0];
+
+	return err;
+}
+
+static int mmc_emergency_all_send_cid(u32 *cid)
+{
+	int err;
+	struct mmc_command cmd;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_ALL_SEND_CID;
+	cmd.arg = 0;
+	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	memcpy(cid, cmd.resp, sizeof(u32) * 4);
+
+	return 0;
+}
+
+static int mmc_emergency_set_relative_addr(struct mmc_card *card)
+{
+	int err;
+	struct mmc_command cmd;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SET_RELATIVE_ADDR;
+	cmd.arg = card->rca << 16;
+	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int mmc_emergency_select_card(struct mmc_card *card)
+{
+	int err;
+	struct mmc_command cmd;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SELECT_CARD;
+
+	if (card) {
+		cmd.arg = card->rca << 16;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+	} else {
+		cmd.arg = 0;
+		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
+	}
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int mmc_emergency_send_status(struct mmc_panic_host *host, u32 *status)
+{
+	struct mmc_card *card = host->card;
+	int err;
+	struct mmc_command cmd;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SEND_STATUS;
+	if (!mmc_host_is_spi(host))
+		cmd.arg = card->rca << 16;
+	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	/* NOTE: callers are required to understand the difference
+	 * between "native" and SPI format status words!
+	 */
+	if (status)
+		*status = cmd.resp[0];
+
+	return 0;
+}
+static int mmc_emergency_switch(struct mmc_panic_host *host,
+		u8 set, u8 index, u8 value)
+{
+	struct mmc_card *card = host->card;
+	int err;
+	struct mmc_command cmd;
+	u32 status;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = MMC_SWITCH;
+	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+		(index << 16) |
+		(value << 8) |
+		set;
+	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+	err = mmc_emergency_send_cmd(&cmd, MMC_CMD_RETRIES);
+	if (err)
+		return err;
+
+	/* Must check status to be sure of no errors */
+	do {
+		err = mmc_emergency_send_status(host, &status);
+		if (err)
+			return err;
+		if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+			break;
+		if (mmc_host_is_spi(host))
+			break;
+	} while (R1_CURRENT_STATE(status) == 7);
+
+	if (mmc_host_is_spi(host)) {
+		if (status & R1_SPI_ILLEGAL_COMMAND)
+			return -EBADMSG;
+	} else {
+		if (status & 0xFDFFA000)
+			pr_warn("%s: unexpected status %#x after switch",
+				mmc_hostname(card->host), status);
+		if (status & R1_SWITCH_ERROR)
+			return -EBADMSG;
+	}
+
+	return 0;
+}
+
+static int mmc_emergency_spi_set_crc(struct mmc_panic_host *host, int use)
+{
+	return -1;
+}
+
+static int mmc_emergency_send_cid(struct mmc_panic_host *host, u32 *cid)
+{
+	return -1;
+}
+/*
+ * reinit card:
+ * should also consider about the SPI host
+ */
+static int mmc_emergency_reinit_card(void)
+{
+	struct mmc_panic_host *host = panic_host;
+	struct mmc_card *card = host->card;
+	u32 ocr = host->ocr;
+	int err, ddr = 0;
+	u32 cid[4];
+	unsigned int max_dtr;
+
+	/*
+	 * low the clock to be init clock
+	 */
+	if (mmc_host_is_spi(host)) {
+		host->ios.chip_select = MMC_CS_HIGH;
+		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+	} else {
+		host->ios.chip_select = MMC_CS_DONTCARE;
+		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+	}
+	host->ios.bus_width = MMC_BUS_WIDTH_1;
+	host->ios.timing = MMC_TIMING_LEGACY;
+	/*
+	 * AS eMMC spec said, card init frequency cannot higher
+	 * then 400Khz. But a good card should support for 400Khz
+	 * frequence in initialize process.
+	 */
+	host->ios.clock = 400000;
+	host->panic_ops->set_ios(host);
+
+	/*
+	 * Since we're changing the OCR value, we seem to
+	 * need to tell some cards to go back to the idle
+	 * state.  We wait 1ms to give cards time to
+	 * respond.
+	 */
+	mmc_emergency_go_idle(host);
+
+	/* The extra bit indicates that we support high capacity */
+	err = mmc_emergency_send_op_cond(host, ocr | (1 << 30), NULL);
+	if (err)
+		goto err;
+
+	/*
+	 * For SPI, enable CRC as appropriate.
+	 */
+	if (mmc_host_is_spi(host)) {
+		err = mmc_emergency_spi_set_crc(host, 1);
+		if (err)
+			goto err;
+	}
+
+	/*
+	 * Fetch CID from card.
+	 */
+	if (mmc_host_is_spi(host))
+		err = mmc_emergency_send_cid(host, cid);
+	else
+		err = mmc_emergency_all_send_cid(cid);
+	if (err)
+		goto err;
+
+	if (memcmp(cid, card->raw_cid, sizeof(cid)) != 0) {
+		err = -ENOENT;
+		goto err;
+	}
+
+	/*
+	 * For native busses:  set card RCA and quit open drain mode.
+	 */
+	if (!mmc_host_is_spi(host)) {
+		err = mmc_emergency_set_relative_addr(card);
+		if (err)
+			goto err;
+
+		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+		host->panic_ops->set_ios(host);
+	}
+	/*
+	 * Select card, as all following commands rely on that.
+	 */
+	if (!mmc_host_is_spi(host)) {
+		err = mmc_emergency_select_card(card);
+		if (err)
+			goto err;
+	}
+
+	/*
+	 * Activate high speed (if supported)
+	 */
+	if ((card->ext_csd.hs_max_dtr != 0) &&
+			(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
+		err = mmc_emergency_switch(host, EXT_CSD_CMD_SET_NORMAL,
+				EXT_CSD_HS_TIMING, 1);
+		if (err && err != -EBADMSG)
+			goto err;
+
+		if (err) {
+			pr_warn("%s: switch to highspeed failed\n",
+					__func__);
+			err = 0;
+		} else {
+			mmc_card_set_highspeed(card);
+			host->ios.timing = MMC_TIMING_MMC_HS;
+			host->panic_ops->set_ios(host);
+		}
+	}
+
+	/*
+	 * Compute bus speed.
+	 */
+	max_dtr = (unsigned int)-1;
+
+	if (mmc_card_highspeed(card)) {
+		if (max_dtr > card->ext_csd.hs_max_dtr)
+			max_dtr = card->ext_csd.hs_max_dtr;
+	} else if (max_dtr > card->csd.max_dtr) {
+		max_dtr = card->csd.max_dtr;
+	}
+
+	host->ios.clock = max_dtr;
+	host->panic_ops->set_ios(host);
+
+	/*
+	 * Activate wide bus.
+	 * By default use SDR mode for panic write
+	 */
+	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
+		(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
+		unsigned ext_csd_bit, bus_width;
+
+		if (host->caps & MMC_CAP_8_BIT_DATA) {
+			ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
+			bus_width = MMC_BUS_WIDTH_8;
+		} else {
+			ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
+			bus_width = MMC_BUS_WIDTH_4;
+		}
+
+		err = mmc_emergency_switch(host, EXT_CSD_CMD_SET_NORMAL,
+				EXT_CSD_BUS_WIDTH, ext_csd_bit);
+
+		if (err && err != -EBADMSG)
+			goto err;
+
+		if (err) {
+			pr_warn("%s: switch to bus %dbit failed\n",
+				__func__, 1 << bus_width);
+			err = 0;
+		} else {
+			ddr = MMC_SDR_MODE;
+			host->ios.bus_width = bus_width;
+			host->panic_ops->set_ios(host);
+		}
+	}
+
+	return 0;
+err:
+	return err;
+}
+
+/*
+ * mmc_emergency_write - write 512Bytes to card in panic mode
+ * @data: data pointer which should pointed to an area no more than
+ * 512Bytes
+ * @blk_id: the block id need to write this 512B data
+ *
+ * This function is supplied to ipanic driver to write 512B data
+ * in panic mode. Please also make sure the data size should not be
+ * larger than 512B, otherwise data lossing.
+ */
+int mmc_emergency_write(char *data, unsigned int blk_id)
+{
+	struct mmc_panic_host *host = panic_host;
+	int ret;
+	if (host == NULL) {
+		pr_err("%s: no device for panic record\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!host->panic_ready) {
+		pr_err("%s: device is not ready for panic record\n", __func__);
+		return -EPERM;
+	}
+
+	if (!data) {
+		pr_err("%s: invalided writing data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (blk_id > host->totalsecs || blk_id < 0) {
+		pr_err("%s: invalided writing blk_id\n", __func__);
+		return -EINVAL;
+	}
+	/*
+	 * everything is OK. So, let's start panic record.
+	 *
+	 * Copy the message data to logbuf
+	 */
+	memcpy(host->logbuf, data, SECTOR_SIZE);
+
+	/* hold Dekker mutex first */
+	if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex) {
+		ret = host->panic_ops->hold_mutex(host);
+		if (ret) {
+			pr_err("%s: hold Dekker mutex failed\n", __func__);
+			return ret;
+		}
+	}
+
+	ret = __mmc_emergency_write(blk_id);
+
+	/* release Dekker mutex */
+	if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex)
+		host->panic_ops->release_mutex(host);
+
+	return ret;
+}
+EXPORT_SYMBOL(mmc_emergency_write);
+
+/*
+ * mmc_emergency_init: init host controller and emmc card
+ * when kernel panic occures
+ *
+ * return value:
+ * 0 - init successfully
+ * negative value - Failed during init
+ * -ENODEV - emmc card was removed by driver
+ */
+int mmc_emergency_init(void)
+{
+	struct mmc_panic_host *host = panic_host;
+	int ret;
+	if (host == NULL) {
+		pr_err("%s: no device for panic record\n", __func__);
+		return -ENODEV;
+	}
+
+	ret = mmc_emergency_prepare();
+	if (ret) {
+		pr_err("%s: prepare host controller failed\n", __func__);
+		return ret;
+	}
+
+	if (!host->panic_ops) {
+		pr_err("%s: no panic_ops for panic host\n", __func__);
+		return -EPERM;
+	}
+
+	/*
+	 * prepare host controller
+	 */
+	if (host->panic_ops->prepare)
+		host->panic_ops->prepare(host);
+
+	/*
+	 * during init eMMC card, don't want to be interrupted by SCU FW
+	 */
+	if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex) {
+		ret = host->panic_ops->hold_mutex(host);
+		if (ret) {
+			pr_err("%s: hold Dekker mutex failed\n", __func__);
+			return ret;
+		}
+	} else if (host->panic_ops->power_on)
+		/* don't have Dekker mutex, just power on host controller */
+		host->panic_ops->power_on(host);
+
+	/*
+	 * reset card since we are not sure whether card is in a good status
+	 *
+	 * Since in panic mode, we init a old card, so all the command to be
+	 * used has no data. So we can reuse the sdhci ops
+	 */
+	ret = mmc_emergency_reinit_card();
+	if (ret) {
+		pr_info("%s: reinit card failed\n", __func__);
+		goto out;
+	}
+
+	/*
+	 * OK. we are ready
+	 */
+	mmc_emergency_ready();
+out:
+	/* release Dekker mutex */
+	if (host->panic_ops->hold_mutex && host->panic_ops->release_mutex)
+		host->panic_ops->release_mutex(host);
+
+	return ret;
+}
+EXPORT_SYMBOL(mmc_emergency_init);
+
+/*
+ * mmc_emergency_setup - init panic_host which is used for panic writing
+ * @host: mmc host
+ *
+ * This function can sample some important value for panic_host use to init
+ * host controller and card. It only works for the driver which has already
+ * called mmc_alloc_panic_host in its probing process
+ */
+void mmc_emergency_setup(struct mmc_host *mmc)
+{
+	struct mmc_panic_host *host = panic_host;
+
+	/*
+	 * mmc host has no panic host
+	 */
+	if (!mmc->phost)
+		return;
+
+	/*
+	 * before setup panic host, make sure panic host is
+	 * allocated
+	 */
+	if (host == NULL)
+		return;
+
+	/*
+	 * panic host has already been setup
+	 */
+	if (host->mmc)
+		return;
+
+	/*
+	 * mmc host didn't init card
+	 */
+	if (!mmc->card)
+		return;
+	/*
+	 * if is SDIO card or SD card, by pass
+	 */
+	if (mmc_card_sdio(mmc->card) ||
+			mmc_card_sd(mmc->card))
+		return;
+
+	host->card = kzalloc(sizeof(struct mmc_card), GFP_KERNEL);
+	if (!host->card) {
+		pr_err("%s: cannot alloc mmc_card for panic host\n",
+				__func__);
+		return;
+	}
+
+	memcpy(host->card, mmc->card, sizeof(struct mmc_card));
+	host->caps = mmc->caps;
+	host->mmc = mmc;
+	host->ocr = mmc->ocr;
+	host->totalsecs = mmc_get_capacity(mmc->card);
+	host->max_blk_size = mmc->max_blk_size;
+	host->max_blk_count = mmc->max_blk_count;
+	host->max_req_size = mmc->max_req_size;
+	if (mmc_card_blockaddr(mmc->card))
+		host->blkaddr = 1;
+	/*
+	 * sample ios values
+	 */
+	memcpy(&host->ios, &mmc->ios, sizeof(struct mmc_ios));
+#ifdef CONFIG_MMC_CLKGATE
+	if (mmc->ios.clock == 0)
+		host->ios.clock = mmc->clk_old;
+#endif
+	if (host->panic_ops && host->panic_ops->setup)
+		host->panic_ops->setup(host);
+
+	return;
+}
+EXPORT_SYMBOL(mmc_emergency_setup);
+/*
+ * mmc_alloc_panic_host - used for host layer driver to alloc mmc_panic_host.
+ * @host: mmc host
+ * @ops: this is a pointer which points to mmc_host_panic_ops. This ops should
+ * be defined in host layer driver
+ *
+ * This function need to know the mmc_host_panic_ops, host layer driver should
+ * call this function during probing.
+ *
+ */
+void mmc_alloc_panic_host(struct mmc_host *host,
+		const struct mmc_host_panic_ops *ops)
+{
+	if (panic_host) {
+		pr_info("%s: already allocate panic host\n", __func__);
+		return;
+	}
+
+	panic_host = kzalloc(sizeof(struct mmc_panic_host), GFP_KERNEL);
+	if (!panic_host) {
+		pr_err("%s %s: panic structure allocate error\n",
+				__func__, mmc_hostname(host));
+		return;
+	}
+	/*
+	 * allocate log buffer and DMA buffer
+	 * log buffer size is 512
+	 */
+	panic_host->logbuf = kzalloc(SECTOR_SIZE, GFP_KERNEL);
+	if (!panic_host->logbuf) {
+		pr_err("%s %s: log buf allocate error\n",
+				__func__, mmc_hostname(host));
+		goto free_panic_host;
+	}
+
+	panic_host->dmabuf = dma_map_single(host->parent, panic_host->logbuf,
+			SECTOR_SIZE, DMA_TO_DEVICE);
+	if (!panic_host->dmabuf) {
+		pr_err("%s %s: DMA buf allocate error\n",
+				__func__, mmc_hostname(host));
+		goto free_logbuf;
+	}
+
+	panic_host->panic_ops = ops;
+	panic_host->mmc = NULL;
+	host->phost = panic_host;
+
+	return;
+
+free_logbuf:
+	kfree(panic_host->logbuf);
+free_panic_host:
+	kfree(panic_host);
+}
+EXPORT_SYMBOL(mmc_alloc_panic_host);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 9e645e1..6d945e0 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -805,6 +805,9 @@
 	bool reinit)
 {
 	int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	if (!reinit) {
 		/*
@@ -831,7 +834,26 @@
 		/*
 		 * Fetch switch information from card.
 		 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+		for (retries = 1; retries <= 3; retries++) {
+			err = mmc_read_switch(card);
+			if (!err) {
+				if (retries > 1) {
+					printk(KERN_WARNING
+					       "%s: recovered\n", 
+					       mmc_hostname(host));
+				}
+				break;
+			} else {
+				printk(KERN_WARNING
+				       "%s: read switch failed (attempt %d)\n",
+				       mmc_hostname(host), retries);
+			}
+		}
+#else
 		err = mmc_read_switch(card);
+#endif
+
 		if (err)
 			return err;
 	}
@@ -1032,7 +1054,10 @@
  */
 static void mmc_sd_detect(struct mmc_host *host)
 {
-	int err;
+	int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries = 5;
+#endif
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
@@ -1042,7 +1067,23 @@
 	/*
 	 * Just check if our card has been removed.
 	 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	while(retries) {
+		err = mmc_send_status(host->card, NULL);
+		if (err) {
+			retries--;
+			udelay(5);
+			continue;
+		}
+		break;
+	}
+	if (!retries) {
+		printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+		       __func__, mmc_hostname(host), err);
+	}
+#else
 	err = _mmc_detect_card_removed(host);
+#endif
 
 	mmc_release_host(host);
 
@@ -1084,12 +1125,31 @@
 static int mmc_sd_resume(struct mmc_host *host)
 {
 	int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
 	mmc_claim_host(host);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	retries = 5;
+	while (retries) {
+		err = mmc_sd_init_card(host, host->ocr, host->card);
+
+		if (err) {
+			printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+			       mmc_hostname(host), err, retries);
+			mdelay(5);
+			retries--;
+			continue;
+		}
+		break;
+	}
+#else
 	err = mmc_sd_init_card(host, host->ocr, host->card);
+#endif
 	mmc_release_host(host);
 
 	return err;
@@ -1114,6 +1174,7 @@
 	.resume = NULL,
 	.power_restore = mmc_sd_power_restore,
 	.alive = mmc_sd_alive,
+	.shutdown = mmc_sd_suspend,
 };
 
 static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
@@ -1123,6 +1184,7 @@
 	.resume = mmc_sd_resume,
 	.power_restore = mmc_sd_power_restore,
 	.alive = mmc_sd_alive,
+	.shutdown = mmc_sd_suspend,
 };
 
 static void mmc_sd_attach_bus_ops(struct mmc_host *host)
@@ -1143,6 +1205,9 @@
 {
 	int err;
 	u32 ocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
@@ -1198,9 +1263,27 @@
 	/*
 	 * Detect and init the card.
 	 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	retries = 5;
+	while (retries) {
+		err = mmc_sd_init_card(host, host->ocr, NULL);
+		if (err) {
+			retries--;
+			continue;
+		}
+		break;
+	}
+
+	if (!retries) {
+		printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+		       mmc_hostname(host), err);
+		goto err;
+	}
+#else
 	err = mmc_sd_init_card(host, host->ocr, NULL);
 	if (err)
 		goto err;
+#endif
 
 	mmc_release_host(host);
 	err = mmc_add_card(host->card);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 6889a82..46e68f1 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/pm_runtime.h>
 
 #include <linux/mmc/host.h>
@@ -28,6 +29,10 @@
 #include "sdio_ops.h"
 #include "sdio_cis.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
 static int sdio_read_fbr(struct sdio_func *func)
 {
 	int ret;
@@ -728,19 +733,35 @@
 		goto finish;
 	}
 
-	/*
-	 * Read the common registers.
-	 */
-	err = sdio_read_cccr(card, ocr);
-	if (err)
-		goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.cccr)
+		memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+	else {
+#endif
+		/*
+		 * Read the common registers.
+		 */
+		err = sdio_read_cccr(card,  ocr);
+		if (err)
+			goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	}
+#endif
 
-	/*
-	 * Read the common CIS tuples.
-	 */
-	err = sdio_read_common_cis(card);
-	if (err)
-		goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.cis)
+		memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+	else {
+#endif
+		/*
+		 * Read the common CIS tuples.
+		 */
+		err = sdio_read_common_cis(card);
+		if (err)
+			goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	}
+#endif
 
 	if (oldcard) {
 		int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -1147,14 +1168,36 @@
 	funcs = (ocr & 0x70000000) >> 28;
 	card->sdio_funcs = 0;
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.funcs)
+		card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
 	/*
 	 * Initialize (but don't add) all present functions.
 	 */
 	for (i = 0; i < funcs; i++, card->sdio_funcs++) {
-		err = sdio_init_func(host->card, i + 1);
-		if (err)
-			goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+		if (host->embedded_sdio_data.funcs) {
+			struct sdio_func *tmp;
 
+			tmp = sdio_alloc_func(host->card);
+			if (IS_ERR(tmp))
+				goto remove;
+			tmp->num = (i + 1);
+			card->sdio_func[i] = tmp;
+			tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+			tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+			tmp->vendor = card->cis.vendor;
+			tmp->device = card->cis.device;
+		} else {
+#endif
+			err = sdio_init_func(host->card, i + 1);
+			if (err)
+				goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+		}
+#endif
 		/*
 		 * Enable Runtime PM for this func (if supported)
 		 */
@@ -1202,3 +1245,39 @@
 	return err;
 }
 
+int sdio_reset_comm(struct mmc_card *card)
+{
+	struct mmc_host *host = card->host;
+	u32 ocr;
+	int err;
+
+	printk("%s():\n", __func__);
+	mmc_claim_host(host);
+
+	mmc_go_idle(host);
+
+	mmc_set_clock(host, host->f_min);
+
+	err = mmc_send_io_op_cond(host, 0, &ocr);
+	if (err)
+		goto err;
+
+	host->ocr = mmc_select_voltage(host, ocr);
+	if (!host->ocr) {
+		err = -EINVAL;
+		goto err;
+	}
+
+	err = mmc_sdio_init_card(host, host->ocr, card, 0);
+	if (err)
+		goto err;
+
+	mmc_release_host(host);
+	return 0;
+err:
+	printk("%s: Error resetting SDIO communications (%d)\n",
+	       mmc_hostname(host), err);
+	mmc_release_host(host);
+	return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 546c67c..c012cf5 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -25,6 +25,10 @@
 #include "sdio_cis.h"
 #include "sdio_bus.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
 /* show configuration fields */
 #define sdio_config_attr(field, format_string)				\
 static ssize_t								\
@@ -270,7 +274,14 @@
 {
 	struct sdio_func *func = dev_to_sdio_func(dev);
 
-	sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	/*
+	 * If this device is embedded then we never allocated
+	 * cis tables for this func
+	 */
+	if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+		sdio_free_func_cis(func);
 
 	kfree(func->info);
 
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 8e94e55..8b7539f 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -97,8 +97,6 @@
 	{ 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 };
 static const unsigned int speed_unit[8] =
 	{ 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
-
-
 typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
 			   const unsigned char *, unsigned);
 
@@ -225,12 +223,77 @@
 	{	0x22,	0,	cistpl_funce		},
 };
 
+/***************************** WP B0 WA *******************************/
+
+unsigned char wp_tpl_codes[] = {
+	0x21, 0x22, 0x20, 0x21, 0x22, 0x91, 0x15,
+};
+
+unsigned char wp_tpl_links[] = {
+	0x2, 0x4, 0x4, 0x2, 0x2a, 0x2, 0x19,
+};
+
+unsigned char wp_tuple_data[7][42] = {
+	{
+	  12, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  0, 0, 2, 11, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  137, 0, 96, 114, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  12, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  1, 1, 48, 0, 0, 3, 0, 2, 0, 128,
+	  255, 0, 7, 0, 0, 7, 7, 255, 0, 16,
+	  0, 200, 100, 0, 0, 0, 0, 0, 16, 1,
+	  33, 2, 0, 0, 0, 0, 32, 4, 137, 0,
+	  96, 114
+	},
+	{
+	  7, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+	{
+	  8, 0, 73, 110, 116, 101, 108, 40, 82, 41,
+	  32, 87, 105, 114, 101, 108, 101, 115, 115, 32,
+	  67, 111, 114, 101, 0, 0, 0, 0, 0, 0,
+	  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	  0, 0
+	},
+};
+
+/**********************************************************************/
+
 static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
 {
 	int ret;
 	struct sdio_func_tuple *this, **prev;
 	unsigned i, ptr = 0;
-
+	int count = 0;
+	bool replace = false;
 	/*
 	 * Note that this works for the common CIS (function number 0) as
 	 * well as a function's CIS * since SDIO_CCCR_CIS and SDIO_FBR_CIS
@@ -245,7 +308,8 @@
 			fn = 0;
 
 		ret = mmc_io_rw_direct(card, 0, 0,
-			SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i, 0, &x);
+				       SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i,
+				       0, &x);
 		if (ret)
 			return ret;
 		ptr |= x << (i * 8);
@@ -258,20 +322,45 @@
 
 	BUG_ON(*prev);
 
+	if (card->quirks & MMC_QUIRK_NON_STD_CIS)
+		count = (func) ? 2 : -1;
+
 	do {
 		unsigned char tpl_code, tpl_link;
+		if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+			count++;
+			if ((func && (count > 6)) || (!func && (count > 2))) {
+				pr_debug("%s: break: count %d\n",
+					 __func__, count);
+				break;
+			}
+		}
 
 		ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
 		if (ret)
 			break;
-
+		if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+			/* if the first tuple is 0 - then it's b0, so replace */
+			if ((count < 4) && (tpl_code == 0)) {
+				pr_info("%s card with non std CIS",
+					mmc_hostname(card->host));
+				/* disable UHS on buggy cards */
+				card->sw_caps.sd3_bus_mode = 0;
+				replace = true;
+			}
+		}
 		/* 0xff means we're done */
 		if (tpl_code == 0xff)
 			break;
 
 		/* null entries have no link field or data */
-		if (tpl_code == 0x00)
-			continue;
+		if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+			if ((tpl_code == 0x00) && (!replace))
+				continue;
+		} else {
+			if (tpl_code == 0x00)
+				continue;
+		}
 
 		ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
 		if (ret)
@@ -290,6 +379,7 @@
 					       ptr + i, 0, &this->data[i]);
 			if (ret)
 				break;
+			pr_debug("%d, ", this->data[i]);
 		}
 		if (ret) {
 			kfree(this);
@@ -297,9 +387,28 @@
 		}
 
 		/* Try to parse the CIS tuple */
-		ret = cis_tpl_parse(card, func, "CIS",
-				    cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
-				    tpl_code, this->data, tpl_link);
+		if (card->quirks & MMC_QUIRK_NON_STD_CIS) {
+			if (!replace)
+				ret = cis_tpl_parse(card, func, "CIS",
+						    cis_tpl_list,
+						    ARRAY_SIZE(cis_tpl_list),
+						    tpl_code, this->data,
+						    tpl_link);
+			else
+				ret = cis_tpl_parse(card, func, "CIS",
+						    cis_tpl_list,
+						    ARRAY_SIZE(cis_tpl_list),
+						    wp_tpl_codes[count],
+						    wp_tuple_data[count],
+						    wp_tpl_links[count]);
+		} else {
+			ret = cis_tpl_parse(card, func, "CIS",
+					    cis_tpl_list,
+					    ARRAY_SIZE(cis_tpl_list),
+					    tpl_code, this->data,
+					    tpl_link);
+		}
+
 		if (ret == -EILSEQ || ret == -ENOENT) {
 			/*
 			 * The tuple is unknown or known but not parsed.
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
old mode 100644
new mode 100755
index 78cb4d5..8fdeb07
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -384,6 +384,39 @@
 EXPORT_SYMBOL_GPL(sdio_readb);
 
 /**
+ *	sdio_readb_ext - read a single byte from a SDIO function
+ *	@func: SDIO function to access
+ *	@addr: address to read
+ *	@err_ret: optional status value from transfer
+ *	@in: value to add to argument
+ *
+ *	Reads a single byte from the address space of a given SDIO
+ *	function. If there is a problem reading the address, 0xff
+ *	is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+	int *err_ret, unsigned in)
+{
+	int ret;
+	unsigned char val;
+
+	BUG_ON(!func);
+
+	if (err_ret)
+		*err_ret = 0;
+
+	ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+	if (ret) {
+		if (err_ret)
+			*err_ret = ret;
+		return 0xFF;
+	}
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
+/**
  *	sdio_writeb - write a single byte to a SDIO function
  *	@func: SDIO function to access
  *	@b: byte to write
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e742761..aca59d9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -584,13 +584,6 @@
 	if (host->mrq->cmd->data) {
 		host->mrq->cmd->data->error = -ETIMEDOUT;
 		host->data = NULL;
-		/*
-		 * With some SDIO modules, sometimes DMA transfer hangs. If
-		 * stop_transfer() is not called then the DMA request is not
-		 * removed, following ones are queued and never computed.
-		 */
-		if (host->state == STATE_DATA_XFER)
-			host->stop_transfer(host);
 	} else {
 		host->mrq->cmd->error = -ETIMEDOUT;
 		host->cmd = NULL;
@@ -1188,22 +1181,11 @@
 	iflags |= ATMCI_CMDRDY;
 	cmd = mrq->cmd;
 	cmdflags = atmci_prepare_command(slot->mmc, cmd);
-
-	/*
-	 * DMA transfer should be started before sending the command to avoid
-	 * unexpected errors especially for read operations in SDIO mode.
-	 * Unfortunately, in PDC mode, command has to be sent before starting
-	 * the transfer.
-	 */
-	if (host->submit_data != &atmci_submit_data_dma)
-		atmci_send_command(host, cmd, cmdflags);
+	atmci_send_command(host, cmd, cmdflags);
 
 	if (data)
 		host->submit_data(host, data);
 
-	if (host->submit_data == &atmci_submit_data_dma)
-		atmci_send_command(host, cmd, cmdflags);
-
 	if (mrq->stop) {
 		host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
 		host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
@@ -1295,7 +1277,7 @@
 
 	if (ios->clock) {
 		unsigned int clock_min = ~0U;
-		int clkdiv;
+		u32 clkdiv;
 
 		spin_lock_bh(&host->lock);
 		if (!host->mode_reg) {
@@ -1320,12 +1302,7 @@
 		/* Calculate clock divider */
 		if (host->caps.has_odd_clk_div) {
 			clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
-			if (clkdiv < 0) {
-				dev_warn(&mmc->class_dev,
-					 "clock %u too fast; using %lu\n",
-					 clock_min, host->bus_hz / 2);
-				clkdiv = 0;
-			} else if (clkdiv > 511) {
+			if (clkdiv > 511) {
 				dev_warn(&mmc->class_dev,
 				         "clock %u too slow; using %lu\n",
 				         clock_min, host->bus_hz / (511 + 2));
@@ -1810,14 +1787,12 @@
 			if (unlikely(status)) {
 				host->stop_transfer(host);
 				host->data = NULL;
-				if (data) {
-					if (status & ATMCI_DTOE) {
-						data->error = -ETIMEDOUT;
-					} else if (status & ATMCI_DCRCE) {
-						data->error = -EILSEQ;
-					} else {
-						data->error = -EIO;
-					}
+				if (status & ATMCI_DTOE) {
+					data->error = -ETIMEDOUT;
+				} else if (status & ATMCI_DCRCE) {
+					data->error = -EILSEQ;
+				} else {
+					data->error = -EIO;
 				}
 			}
 
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 4c65a5a..ad13f42 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -247,9 +247,6 @@
 	case MMC_RSP_R1:
 		rsp_type = SD_RSP_TYPE_R1;
 		break;
-	case MMC_RSP_R1 & ~MMC_RSP_CRC:
-		rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
-		break;
 	case MMC_RSP_R1B:
 		rsp_type = SD_RSP_TYPE_R1b;
 		break;
@@ -341,13 +338,6 @@
 	}
 
 	if (rsp_type == SD_RSP_TYPE_R2) {
-		/*
-		 * The controller offloads the last byte {CRC-7, end bit 1'b1}
-		 * of response type R2. Assign dummy CRC, 0, and end bit to the
-		 * byte(ptr[16], goes into the LSB of resp[3] later).
-		 */
-		ptr[16] = 1;
-
 		for (i = 0; i < 4; i++) {
 			cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
 			dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index d33bb95..d25f9ab 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -40,7 +40,7 @@
 #define ESDHC_DMA_SYSCTL	0x40c
 #define ESDHC_DMA_SNOOP		0x00000040
 
-#define ESDHC_HOST_CONTROL_RES	0x01
+#define ESDHC_HOST_CONTROL_RES	0x05
 
 static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
 {
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 701d06d..39696af 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -26,16 +26,21 @@
 #include <linux/pm_runtime.h>
 #include <linux/mmc/sdhci-pci-data.h>
 
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_scu_flis.h>
+#include <asm/intel_scu_pmic.h>
+
 #include "sdhci.h"
 
+/* Settle down values copied from broadcom reference design. */
+#define DELAY_CARD_INSERTED	200
+#define DELAY_CARD_REMOVED	50
+
 /*
  * PCI device IDs
  */
 #define PCI_DEVICE_ID_INTEL_PCH_SDIO0	0x8809
 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1	0x880a
-#define PCI_DEVICE_ID_INTEL_BYT_EMMC	0x0f14
-#define PCI_DEVICE_ID_INTEL_BYT_SDIO	0x0f15
-#define PCI_DEVICE_ID_INTEL_BYT_SD	0x0f16
 
 /*
  * PCI registers
@@ -50,6 +55,18 @@
 #define  PCI_SLOT_INFO_FIRST_BAR_MASK	0x07
 
 #define MAX_SLOTS			8
+#define IPC_EMMC_MUTEX_CMD             0xEE
+
+/* CLV SD card power resource */
+
+#define VCCSDIO_ADDR		0xd5
+#define VCCSDIO_OFF		0x4
+#define VCCSDIO_NORMAL		0x7
+#define ENCTRL0_ISOLATE		0x55555557
+#define ENCTRL1_ISOLATE		0x5555
+#define STORAGESTIO_FLISNUM	0x8
+#define ENCTRL0_OFF		0x10
+#define ENCTRL1_OFF		0x11
 
 struct sdhci_pci_chip;
 struct sdhci_pci_slot;
@@ -77,6 +94,9 @@
 	int			rst_n_gpio;
 	int			cd_gpio;
 	int			cd_irq;
+	bool			dev_power;
+	struct mutex		power_lock;
+	bool			dma_enabled;
 };
 
 struct sdhci_pci_chip {
@@ -85,10 +105,14 @@
 	unsigned int		quirks;
 	unsigned int		quirks2;
 	bool			allow_runtime_pm;
+	unsigned int		autosuspend_delay;
 	const struct sdhci_pci_fixes *fixes;
 
 	int			num_slots;	/* Slots on controller */
 	struct sdhci_pci_slot	*slots[MAX_SLOTS]; /* Pointers to host slots */
+
+	unsigned int		enctrl0_orig;
+	unsigned int		enctrl1_orig;
 };
 
 
@@ -260,20 +284,240 @@
 
 #endif
 
+#define MFD_SDHCI_DEKKER_BASE  0xffff7fb0
+static void mfd_emmc_mutex_register(struct sdhci_pci_slot *slot)
+{
+	u32 mutex_var_addr;
+#ifdef CONFIG_INTEL_SCU_IPC
+	int err;
+
+	err = rpmsg_send_generic_command(IPC_EMMC_MUTEX_CMD, 0,
+			NULL, 0, &mutex_var_addr, 1);
+	if (err) {
+		dev_err(&slot->chip->pdev->dev, "IPC error: %d\n", err);
+		dev_info(&slot->chip->pdev->dev, "Specify mutex address\n");
+		/*
+		 * Since we failed to get mutex sram address, specify it
+		 */
+		mutex_var_addr = MFD_SDHCI_DEKKER_BASE;
+	}
+#else
+	mutex_var_addr = MFD_SDHCI_DEKKER_BASE;
+#endif
+
+	/* 3 housekeeping mutex variables, 12 bytes length */
+	slot->host->sram_addr = ioremap_nocache(mutex_var_addr,
+			3 * sizeof(u32));
+	if (!slot->host->sram_addr)
+		dev_err(&slot->chip->pdev->dev, "ioremap failed!\n");
+	else {
+		dev_info(&slot->chip->pdev->dev, "mapped addr: %p\n",
+				slot->host->sram_addr);
+		dev_info(&slot->chip->pdev->dev,
+		"current eMMC owner: %d, IA req: %d, SCU req: %d\n",
+				readl(slot->host->sram_addr +
+					DEKKER_EMMC_OWNER_OFFSET),
+				readl(slot->host->sram_addr +
+					DEKKER_IA_REQ_OFFSET),
+				readl(slot->host->sram_addr +
+					DEKKER_SCU_REQ_OFFSET));
+	}
+	spin_lock_init(&slot->host->dekker_lock);
+}
+
 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
 {
+	switch (slot->chip->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_MFD_EMMC0:
+		mfd_emmc_mutex_register(slot);
+		sdhci_alloc_panic_host(slot->host);
+		slot->host->mmc->caps2 |= MMC_CAP2_INIT_CARD_SYNC;
+		break;
+	case PCI_DEVICE_ID_INTEL_MFD_EMMC1:
+		break;
+	}
 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
 	slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC |
 				  MMC_CAP2_HC_ERASE_SZ;
 	return 0;
 }
 
+static void mfd_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+	switch (slot->chip->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_MFD_EMMC0:
+		if (slot->host->sram_addr)
+			iounmap(slot->host->sram_addr);
+		break;
+	case PCI_DEVICE_ID_INTEL_MFD_EMMC1:
+		break;
+	}
+}
+
 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
 {
 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
 	return 0;
 }
 
+#ifdef CONFIG_INTEL_SCU_FLIS
+/*
+ * Save the current power and shim status, if they are on, turn them off.
+ */
+static int ctp_sd_card_power_save(struct sdhci_pci_slot *slot)
+{
+	int err;
+	u16 addr;
+	u8 data;
+	struct sdhci_pci_chip *chip;
+
+	if (!slot->dev_power)
+		return 0;
+
+	chip = slot->chip;
+	err = intel_scu_ipc_read_shim(&chip->enctrl0_orig,
+			STORAGESTIO_FLISNUM, ENCTRL0_OFF);
+	if (err) {
+		pr_err("SDHCI device %04X: ENCTRL0 read failed, err %d\n",
+				chip->pdev->device, err);
+		chip->enctrl0_orig = ENCTRL0_ISOLATE;
+		chip->enctrl1_orig = ENCTRL1_ISOLATE;
+		/*
+		 * stop to shut down VCCSDIO, since we cannot recover
+		 * it.
+		 * this should not block system entering S3
+		 */
+		return 0;
+	}
+	err = intel_scu_ipc_read_shim(&chip->enctrl1_orig,
+			STORAGESTIO_FLISNUM, ENCTRL1_OFF);
+	if (err) {
+		pr_err("SDHCI device %04X: ENCTRL1 read failed, err %d\n",
+				chip->pdev->device, err);
+		chip->enctrl0_orig = ENCTRL0_ISOLATE;
+		chip->enctrl1_orig = ENCTRL1_ISOLATE;
+		/*
+		 * stop to shut down VCCSDIO, since we cannot recover
+		 * it.
+		 * this should not block system entering S3
+		 */
+		return 0;
+	}
+
+	/* isolate shim */
+	err = intel_scu_ipc_write_shim(ENCTRL0_ISOLATE,
+			STORAGESTIO_FLISNUM, ENCTRL0_OFF);
+	if (err) {
+		pr_err("SDHCI device %04X: ENCTRL0 ISOLATE failed, err %d\n",
+				chip->pdev->device, err);
+		/*
+		 * stop to shut down VCCSDIO. Without isolate shim, the power
+		 * may have leak if turn off VCCSDIO.
+		 * during S3 resuming, shim and VCCSDIO will be recofigured
+		 * this should not block system entering S3
+		 */
+		return 0;
+	}
+
+	err = intel_scu_ipc_write_shim(ENCTRL1_ISOLATE,
+			STORAGESTIO_FLISNUM, ENCTRL1_OFF);
+	if (err) {
+		pr_err("SDHCI device %04X: ENCTRL1 ISOLATE failed, err %d\n",
+				chip->pdev->device, err);
+		/*
+		 * stop to shut down VCCSDIO. Without isolate shim, the power
+		 * may have leak if turn off VCCSDIO.
+		 * during S3 resuming, shim and VCCSDIO will be recofigured
+		 * this should not block system entering S3
+		 */
+		return 0;
+	}
+
+	addr = VCCSDIO_ADDR;
+	data = VCCSDIO_OFF;
+	err = intel_scu_ipc_writev(&addr, &data, 1);
+	if (err) {
+		pr_err("SDHCI device %04X: VCCSDIO turn off failed, err %d\n",
+				chip->pdev->device, err);
+		/*
+		 * during S3 resuming, VCCSDIO will be recofigured
+		 * this should not block system entering S3.
+		 */
+	}
+
+	slot->dev_power = false;
+	return 0;
+}
+
+/*
+ * Restore the power and shim if they are original on.
+ */
+static int ctp_sd_card_power_restore(struct sdhci_pci_slot *slot)
+{
+	int err;
+	u16 addr;
+	u8 data;
+	struct sdhci_pci_chip *chip;
+
+	if (slot->dev_power)
+		return 0;
+
+	chip = slot->chip;
+
+	addr = VCCSDIO_ADDR;
+	data = VCCSDIO_NORMAL;
+	err = intel_scu_ipc_writev(&addr, &data, 1);
+	if (err) {
+		pr_err("SDHCI device %04X: VCCSDIO turn on failed, err %d\n",
+				chip->pdev->device, err);
+		/*
+		 * VCCSDIO trun on failed. This may impact the SD card
+		 * init and the read/write functions. We just report a
+		 * warning, and go on to have a try. Anyway, SD driver
+		 * can encounter error if powering up is failed
+		 */
+		WARN_ON(err);
+	}
+
+	if (chip->enctrl0_orig == ENCTRL0_ISOLATE &&
+			chip->enctrl1_orig == ENCTRL1_ISOLATE)
+		/* means we needn't to reconfigure the shim */
+		return 0;
+
+	/* reconnect shim */
+	err = intel_scu_ipc_write_shim(chip->enctrl0_orig,
+			STORAGESTIO_FLISNUM, ENCTRL0_OFF);
+
+	if (err) {
+		pr_err("SDHCI device %04X: ENCTRL0 CONNECT shim failed, err %d\n",
+				chip->pdev->device, err);
+		/* keep on setting enctrl1, but report a waring */
+		WARN_ON(err);
+	}
+
+	err = intel_scu_ipc_write_shim(chip->enctrl1_orig,
+			STORAGESTIO_FLISNUM, ENCTRL1_OFF);
+	if (err) {
+		pr_err("SDHCI device %04X: ENCTRL1 CONNECT shim failed, err %d\n",
+				chip->pdev->device, err);
+		/* leave this error to driver, but report a warning */
+		WARN_ON(err);
+	}
+
+	slot->dev_power = true;
+	return 0;
+}
+#else
+static int ctp_sd_card_power_save(struct sdhci_pci_slot *slot)
+{
+	return 0;
+}
+static int ctp_sd_card_power_restore(struct sdhci_pci_slot *slot)
+{
+	return 0;
+}
+#endif
+
 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
 	.quirks		= SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
 	.probe_slot	= mrst_hc_probe_slot,
@@ -284,9 +528,45 @@
 	.probe		= mrst_hc_probe,
 };
 
+static int ctp_sd_probe_slot(struct sdhci_pci_slot *slot)
+{
+#ifdef CONFIG_INTEL_SCU_IPC
+	int err;
+	u16 addr;
+#endif
+	u8 data = VCCSDIO_OFF;
+
+	if (!slot || !slot->chip || !slot->chip->pdev)
+		return -ENODEV;
+
+	if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_CLV_SDIO0)
+		return 0;
+
+	mutex_init(&slot->power_lock);
+
+	slot->host->flags |= SDHCI_POWER_CTRL_DEV;
+
+#ifdef CONFIG_INTEL_SCU_IPC
+	addr = VCCSDIO_ADDR;
+	err = intel_scu_ipc_readv(&addr, &data, 1);
+	if (err) {
+		/* suppose dev_power is true */
+		slot->dev_power = true;
+		return 0;
+	}
+#endif
+	if (data == VCCSDIO_NORMAL)
+		slot->dev_power = true;
+	else
+		slot->dev_power = false;
+
+	return 0;
+}
+
 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 	.allow_runtime_pm = true,
+	.probe_slot	= ctp_sd_probe_slot,
 };
 
 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
@@ -300,6 +580,7 @@
 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 	.allow_runtime_pm = true,
 	.probe_slot	= mfd_emmc_probe_slot,
+	.remove_slot	= mfd_emmc_remove_slot,
 };
 
 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
@@ -311,27 +592,206 @@
 {
 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
 	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+
 	return 0;
 }
 
 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 {
 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
+	switch (slot->chip->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_BYT_SDIO:
+		/* add a delay after runtime resuming back from D0i3 */
+		slot->chip->pdev->d3_delay = 10;
+		/* reduce the auto suspend delay for SDIO to be 500ms */
+		slot->chip->autosuspend_delay = 500;
+		break;
+	}
+
 	return 0;
 }
 
-static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+#define TNG_IOAPIC_IDX	0xfec00000
+static void mrfl_ioapic_rte_reg_addr_map(struct sdhci_pci_slot *slot)
+{
+	slot->host->rte_addr = ioremap_nocache(TNG_IOAPIC_IDX, 256);
+	if (!slot->host->rte_addr)
+		dev_err(&slot->chip->pdev->dev, "rte_addr ioremap fail!\n");
+	else
+		dev_info(&slot->chip->pdev->dev, "rte_addr mapped addr: %p\n",
+			slot->host->rte_addr);
+}
+
+/* Define Host controllers for Intel Merrifield platform */
+#define INTEL_MRFL_EMMC_0	0
+#define INTEL_MRFL_EMMC_1	1
+#define INTEL_MRFL_SD		2
+#define INTEL_MRFL_SDIO		3
+
+static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+	int ret = 0;
+
+	switch (PCI_FUNC(slot->chip->pdev->devfn)) {
+	case INTEL_MRFL_EMMC_0:
+		sdhci_alloc_panic_host(slot->host);
+		slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA |
+					MMC_CAP_NONREMOVABLE |
+					MMC_CAP_1_8V_DDR;
+		slot->host->mmc->caps2 |= MMC_CAP2_POLL_R1B_BUSY |
+					MMC_CAP2_INIT_CARD_SYNC |
+					MMC_CAP2_CACHE_CTRL;
+		if (slot->chip->pdev->revision == 0x1) { /* B0 stepping */
+			slot->host->mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+			/* WA for async abort silicon issue */
+			slot->host->quirks2 |= SDHCI_QUIRK2_2MS_DELAY |
+					SDHCI_QUIRK2_WAIT_FOR_IDLE |
+					SDHCI_QUIRK2_TUNING_POLL;
+		}
+		mrfl_ioapic_rte_reg_addr_map(slot);
+		break;
+	case INTEL_MRFL_SD:
+		slot->host->quirks2 |= SDHCI_QUIRK2_WAIT_FOR_IDLE;
+		/* Force 3.3V signal voltage */
+		slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+		slot->host->mmc->caps2 |= MMC_CAP2_FIXED_NCRC;
+		break;
+	case INTEL_MRFL_SDIO:
+		slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+		break;
+	}
+
+	if (slot->data->platform_quirks & PLFM_QUIRK_NO_HIGH_SPEED) {
+		slot->host->quirks2 |= SDHCI_QUIRK2_DISABLE_HIGH_SPEED;
+		slot->host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+	}
+
+	if (slot->data->platform_quirks & PLFM_QUIRK_NO_EMMC_BOOT_PART)
+		slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+
+	if (slot->data->platform_quirks & PLFM_QUIRK_NO_HOST_CTRL_HW) {
+		dev_info(&slot->chip->pdev->dev, "Disable MMC Func %d.\n",
+			PCI_FUNC(slot->chip->pdev->devfn));
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static void intel_mrfl_mmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+	if (PCI_FUNC(slot->chip->pdev->devfn) == INTEL_MRFL_EMMC_0)
+		if (slot->host->rte_addr)
+			iounmap(slot->host->rte_addr);
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
+	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2	= SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+				SDHCI_QUIRK2_HIGH_SPEED_SET_LATE |
+				SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 	.allow_runtime_pm = true,
-	.probe_slot	= byt_emmc_probe_slot,
+	.probe_slot	= intel_mrfl_mmc_probe_slot,
+	.remove_slot	= intel_mrfl_mmc_remove_slot,
 };
 
-static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
-	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+static int intel_moor_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA |
+				MMC_CAP_NONREMOVABLE |
+				MMC_CAP_1_8V_DDR;
+
+	sdhci_alloc_panic_host(slot->host);
+
+	slot->host->mmc->caps2 |= MMC_CAP2_POLL_R1B_BUSY |
+				MMC_CAP2_INIT_CARD_SYNC;
+
+	/* Enable HS200 and HS400 */
+	slot->host->mmc->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+
+	if (slot->chip->pdev->revision == 0x1) { /* B0 stepping */
+		slot->host->mmc->caps2 |= MMC_CAP2_HS400_1_8V_DDR;
+	}
+
+	if (slot->data)
+		if (slot->data->platform_quirks & PLFM_QUIRK_NO_HIGH_SPEED) {
+			slot->host->quirks2 |= SDHCI_QUIRK2_DISABLE_HIGH_SPEED;
+			slot->host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+			slot->host->mmc->caps2 &= ~MMC_CAP2_HS200_1_8V_SDR;
+			if (slot->chip->pdev->revision == 0x1) {
+				slot->host->mmc->caps2 &=
+					~MMC_CAP2_HS400_1_8V_DDR;
+			}
+		}
+
+	if (slot->data)
+		if (slot->data->platform_quirks & PLFM_QUIRK_NO_EMMC_BOOT_PART)
+			slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+
+	return 0;
+}
+
+static void intel_moor_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+}
+
+static int intel_moor_sd_probe_slot(struct sdhci_pci_slot *slot)
+{
+	int ret = 0;
+
+	if (slot->data)
+		if (slot->data->platform_quirks & PLFM_QUIRK_NO_HOST_CTRL_HW)
+			ret = -ENODEV;
+
+	return ret;
+}
+
+static void intel_moor_sd_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+}
+
+static int intel_moor_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+	int ret = 0;
+
+	slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+	if (slot->data)
+		if (slot->data->platform_quirks & PLFM_QUIRK_NO_HOST_CTRL_HW)
+			ret = -ENODEV;
+
+	return ret;
+}
+
+static void intel_moor_sdio_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_moor_emmc = {
+	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2	= SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+				SDHCI_QUIRK2_HIGH_SPEED_SET_LATE,
 	.allow_runtime_pm = true,
-	.probe_slot	= byt_sdio_probe_slot,
+	.probe_slot	= intel_moor_emmc_probe_slot,
+	.remove_slot	= intel_moor_emmc_remove_slot,
 };
 
-static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+static const struct sdhci_pci_fixes sdhci_intel_moor_sd = {
+	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2	= SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+				SDHCI_QUIRK2_HIGH_SPEED_SET_LATE,
+	.allow_runtime_pm = true,
+	.probe_slot	= intel_moor_sd_probe_slot,
+	.remove_slot	= intel_moor_sd_remove_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_moor_sdio = {
+	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2	= SDHCI_QUIRK2_BROKEN_AUTO_CMD23 |
+				SDHCI_QUIRK2_HIGH_SPEED_SET_LATE,
+	.allow_runtime_pm = true,
+	.probe_slot	= intel_moor_sdio_probe_slot,
+	.remove_slot	= intel_moor_sdio_remove_slot,
 };
 
 /* O2Micro extra registers */
@@ -887,26 +1347,50 @@
 
 	{
 		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_EMMC,
+		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO0,
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sd,
 	},
 
 	{
 		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_SDIO,
+		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO1,
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,
 	},
 
 	{
 		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_SD,
+		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO2,
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_INTEL,
+		.device		= PCI_DEVICE_ID_INTEL_CLV_EMMC0,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_INTEL,
+		.device		= PCI_DEVICE_ID_INTEL_CLV_EMMC1,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_INTEL,
+		.device		= PCI_DEVICE_ID_INTEL_MRFL_MMC,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
 	},
 
 	{
@@ -964,6 +1448,53 @@
  *                                                                           *
 \*****************************************************************************/
 
+static int try_request_regulator(struct device *dev, void *data)
+{
+	struct pci_dev        *pdev = container_of(dev, struct pci_dev, dev);
+	struct sdhci_pci_chip *chip;
+	struct sdhci_pci_slot *slot;
+	struct sdhci_host     *host;
+	int i;
+
+	chip = pci_get_drvdata(pdev);
+	if (!chip)
+		return 0;
+
+	for (i = 0; i < chip->num_slots; i++) {
+		slot = chip->slots[i];
+		if (!slot)
+			continue;
+		host = slot->host;
+		if (!host)
+			continue;
+		if (sdhci_try_get_regulator(host) == 0)
+			mmc_detect_change(host->mmc, 0);
+	}
+	return 0;
+}
+
+static struct pci_driver sdhci_driver;
+
+/**
+ *      sdhci_pci_request_regulators - retry requesting regulators of
+ *                                     all sdhci-pci devices
+ *
+ *      One some platforms, the regulators associated to the mmc are available
+ *      late in the boot.
+ *      sdhci_pci_request_regulators() is called by platform code to retry
+ *      getting the regulators associated to pci sdhcis
+ */
+
+int sdhci_pci_request_regulators(void)
+{
+	/* driver not yet registered */
+	if (!sdhci_driver.driver.p)
+		return 0;
+	return driver_for_each_device(&sdhci_driver.driver,
+				      NULL, NULL, try_request_regulator);
+}
+EXPORT_SYMBOL_GPL(sdhci_pci_request_regulators);
+
 static int sdhci_pci_enable_dma(struct sdhci_host *host)
 {
 	struct sdhci_pci_slot *slot;
@@ -971,6 +1502,9 @@
 	int ret;
 
 	slot = sdhci_priv(host);
+	if (slot->dma_enabled)
+		return 0;
+
 	pdev = slot->chip->pdev;
 
 	if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
@@ -986,6 +1520,8 @@
 
 	pci_set_master(pdev);
 
+	slot->dma_enabled = true;
+
 	return 0;
 }
 
@@ -1018,21 +1554,150 @@
 {
 	struct sdhci_pci_slot *slot = sdhci_priv(host);
 	int rst_n_gpio = slot->rst_n_gpio;
+	u8 pwr;
 
-	if (!gpio_is_valid(rst_n_gpio))
+	if (gpio_is_valid(rst_n_gpio)) {
+		gpio_set_value_cansleep(rst_n_gpio, 0);
+		/* For eMMC, minimum is 1us but give it 10us for good measure */
+		udelay(10);
+		gpio_set_value_cansleep(rst_n_gpio, 1);
+		/*
+		 * For eMMC, minimum is 200us,
+		 * but give it 300us for good measure
+		 */
+		usleep_range(300, 1000);
+	} else if (slot->host->mmc->caps & MMC_CAP_HW_RESET) {
+		/* first set bit4 of power control register */
+		pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
+		pwr |= SDHCI_HW_RESET;
+		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		/* keep the same delay for safe */
+		usleep_range(300, 1000);
+		/* then clear bit4 of power control register */
+		pwr &= ~SDHCI_HW_RESET;
+		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		/* keep the same delay for safe */
+		usleep_range(300, 1000);
+	}
+}
+
+static int sdhci_pci_power_up_host(struct sdhci_host *host)
+{
+	int ret = -ENOSYS;
+	struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+	if (slot->data && slot->data->power_up)
+		ret = slot->data->power_up(host);
+	else {
+		/*
+		 * use standard PCI power up function
+		 */
+		ret = pci_set_power_state(slot->chip->pdev, PCI_D0);
+		mdelay(50);
+	}
+	/*
+	 * If there is no power_up callbacks in platform data,
+	 * return -ENOSYS;
+	 */
+	if (ret)
+		return ret;
+
+	/*
+	 * after power up host, let's have a little test
+	 */
+
+	if (sdhci_readl(host, SDHCI_HOST_VERSION) ==
+			0xffffffff) {
+		pr_err("%s: power up sdhci host failed\n",
+				__func__);
+		return -EPERM;
+	}
+
+	pr_info("%s: host controller power up is done\n", __func__);
+
+	return 0;
+}
+
+static void sdhci_pci_set_dev_power(struct sdhci_host *host, bool poweron)
+{
+	struct sdhci_pci_slot *slot;
+	struct sdhci_pci_chip *chip;
+
+	slot = sdhci_priv(host);
+	if (slot)
+		chip = slot->chip;
+	else
 		return;
-	gpio_set_value_cansleep(rst_n_gpio, 0);
-	/* For eMMC, minimum is 1us but give it 10us for good measure */
-	udelay(10);
-	gpio_set_value_cansleep(rst_n_gpio, 1);
-	/* For eMMC, minimum is 200us but give it 300us for good measure */
-	usleep_range(300, 1000);
+
+	/* only available for Intel CTP platform */
+	if (chip && chip->pdev &&
+		chip->pdev->device == PCI_DEVICE_ID_INTEL_CLV_SDIO0) {
+		mutex_lock(&slot->power_lock);
+		if (poweron)
+			ctp_sd_card_power_restore(slot);
+		else
+			ctp_sd_card_power_save(slot);
+		mutex_unlock(&slot->power_lock);
+	}
+}
+
+static int sdhci_pci_get_cd(struct sdhci_host *host)
+{
+	bool present;
+	struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+	if (gpio_is_valid(slot->cd_gpio))
+		return gpio_get_value(slot->cd_gpio) ? 0 : 1;
+
+	/* If nonremovable or polling, assume that the card is always present */
+	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+		present = true;
+	else
+		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+			SDHCI_CARD_PRESENT;
+
+	return present;
+}
+
+static int sdhci_pci_get_tuning_count(struct sdhci_host *host)
+{
+	struct sdhci_pci_slot *slot = sdhci_priv(host);
+	int tuning_count = 0;
+
+	switch (slot->chip->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_BYT_EMMC45:
+		tuning_count = 4; /* using 8 seconds, this can be tuning */
+		break;
+	case PCI_DEVICE_ID_INTEL_MRFL_MMC:
+		tuning_count = 8; /* using 128 seconds, this can be tuning */
+		break;
+	default:
+		break;
+	}
+
+	return tuning_count;
+}
+
+static int sdhci_gpio_buf_check(struct sdhci_host *host, unsigned int clk)
+{
+	int ret = -ENOSYS;
+	struct sdhci_pci_slot *slot = sdhci_priv(host);
+
+	if (slot->data && slot->data->flis_check)
+		ret = slot->data->flis_check(host, clk);
+
+	return ret;
 }
 
 static const struct sdhci_ops sdhci_pci_ops = {
 	.enable_dma	= sdhci_pci_enable_dma,
 	.platform_bus_width	= sdhci_pci_bus_width,
 	.hw_reset		= sdhci_pci_hw_reset,
+	.power_up_host		= sdhci_pci_power_up_host,
+	.set_dev_power		= sdhci_pci_set_dev_power,
+	.get_cd		= sdhci_pci_get_cd,
+	.get_tuning_count = sdhci_pci_get_tuning_count,
+	.gpio_buf_check = sdhci_gpio_buf_check,
 };
 
 /*****************************************************************************\
@@ -1071,6 +1736,7 @@
 			sdhci_enable_irq_wakeups(slot->host);
 
 		pm_flags |= slot_pm_flags;
+		slot->dma_enabled = false;
 	}
 
 	if (chip->fixes && chip->fixes->suspend) {
@@ -1079,19 +1745,6 @@
 			goto err_pci_suspend;
 	}
 
-	pci_save_state(pdev);
-	if (pm_flags & MMC_PM_KEEP_POWER) {
-		if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) {
-			pci_pme_active(pdev, true);
-			pci_enable_wake(pdev, PCI_D3hot, 1);
-		}
-		pci_set_power_state(pdev, PCI_D3hot);
-	} else {
-		pci_enable_wake(pdev, PCI_D3hot, 0);
-		pci_disable_device(pdev);
-		pci_set_power_state(pdev, PCI_D3hot);
-	}
-
 	return 0;
 
 err_pci_suspend:
@@ -1111,12 +1764,6 @@
 	if (!chip)
 		return 0;
 
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	ret = pci_enable_device(pdev);
-	if (ret)
-		return ret;
-
 	if (chip->fixes && chip->fixes->resume) {
 		ret = chip->fixes->resume(chip);
 		if (ret)
@@ -1232,6 +1879,19 @@
 	.runtime_idle = sdhci_pci_runtime_idle,
 };
 
+static void sdhci_hsmmc_virtual_detect(void *dev_id, int carddetect)
+{
+	struct sdhci_host *host = dev_id;
+
+	if (carddetect)
+		mmc_detect_change(host->mmc,
+			msecs_to_jiffies(DELAY_CARD_INSERTED));
+	else
+		mmc_detect_change(host->mmc,
+			msecs_to_jiffies(DELAY_CARD_REMOVED));
+}
+
+
 /*****************************************************************************\
  *                                                                           *
  * Device probing/removal                                                    *
@@ -1280,11 +1940,17 @@
 	slot->rst_n_gpio = -EINVAL;
 	slot->cd_gpio = -EINVAL;
 
+	host->hw_name = "PCI";
+	host->ops = &sdhci_pci_ops;
+	host->quirks = chip->quirks;
+	host->quirks2 = chip->quirks2;
+
 	/* Retrieve platform data if there is any */
 	if (*sdhci_pci_get_data)
 		slot->data = sdhci_pci_get_data(pdev, slotno);
 
 	if (slot->data) {
+		slot->data->pdev = pdev;
 		if (slot->data->setup) {
 			ret = slot->data->setup(slot->data);
 			if (ret) {
@@ -1294,12 +1960,15 @@
 		}
 		slot->rst_n_gpio = slot->data->rst_n_gpio;
 		slot->cd_gpio = slot->data->cd_gpio;
+
+		if (slot->data->quirks)
+			host->quirks2 |= slot->data->quirks;
+
+		if (slot->data->register_embedded_control)
+			slot->data->register_embedded_control(host,
+					sdhci_hsmmc_virtual_detect);
 	}
 
-	host->hw_name = "PCI";
-	host->ops = &sdhci_pci_ops;
-	host->quirks = chip->quirks;
-	host->quirks2 = chip->quirks2;
 
 	host->irq = pdev->irq;
 
@@ -1334,6 +2003,9 @@
 
 	host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
 	host->mmc->slotno = slotno;
+
+	if (host->quirks2 & SDHCI_QUIRK2_DISABLE_MMC_CAP_NONREMOVABLE)
+		host->mmc->caps &= ~MMC_CAP_NONREMOVABLE;
 	host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
 
 	ret = sdhci_add_host(host);
@@ -1395,11 +2067,20 @@
 	sdhci_free_host(slot->host);
 }
 
-static void sdhci_pci_runtime_pm_allow(struct device *dev)
+static void sdhci_pci_runtime_pm_allow(struct sdhci_pci_chip *chip)
 {
+	struct device *dev;
+
+	if (!chip || !chip->pdev)
+		return;
+
+	dev = &chip->pdev->dev;
 	pm_runtime_put_noidle(dev);
 	pm_runtime_allow(dev);
-	pm_runtime_set_autosuspend_delay(dev, 50);
+	if (chip->autosuspend_delay)
+		pm_runtime_set_autosuspend_delay(dev, chip->autosuspend_delay);
+	else
+		pm_runtime_set_autosuspend_delay(dev, 50);
 	pm_runtime_use_autosuspend(dev);
 	pm_suspend_ignore_children(dev, 1);
 }
@@ -1434,7 +2115,10 @@
 	if (slots == 0)
 		return -ENODEV;
 
-	BUG_ON(slots > MAX_SLOTS);
+	if (slots > MAX_SLOTS) {
+		dev_err(&pdev->dev, "Invalid number of the slots. Aborting.\n");
+		return -ENODEV;
+	}
 
 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
 	if (ret)
@@ -1442,7 +2126,7 @@
 
 	first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
 
-	if (first_bar > 5) {
+	if (first_bar > 4) {
 		dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
 		return -ENODEV;
 	}
@@ -1475,6 +2159,11 @@
 	}
 
 	slots = chip->num_slots;	/* Quirk may have changed this */
+	/* slots maybe changed again, so check again */
+	if (slots > MAX_SLOTS) {
+		dev_err(&pdev->dev, "Invalid number of the slots. Aborting.\n");
+		goto free;
+	}
 
 	for (i = 0; i < slots; i++) {
 		slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
@@ -1489,7 +2178,7 @@
 	}
 
 	if (chip->allow_runtime_pm)
-		sdhci_pci_runtime_pm_allow(&pdev->dev);
+		sdhci_pci_runtime_pm_allow(chip);
 
 	return 0;
 
@@ -1523,11 +2212,41 @@
 	pci_disable_device(pdev);
 }
 
+static void sdhci_pci_shutdown(struct pci_dev *pdev)
+{
+	struct sdhci_pci_chip *chip;
+	int i;
+
+	chip = pci_get_drvdata(pdev);
+
+	if (!chip || !chip->pdev)
+		return;
+
+	switch (chip->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_CLV_SDIO0:
+		for (i = 0; i < chip->num_slots; i++) {
+			if (chip->slots[i]->host->flags & SDHCI_POWER_CTRL_DEV)
+				ctp_sd_card_power_save(chip->slots[i]);
+		}
+		break;
+	case PCI_DEVICE_ID_INTEL_MRFL_MMC:
+		if (chip->allow_runtime_pm) {
+			pm_runtime_get_sync(&pdev->dev);
+			pm_runtime_disable(&pdev->dev);
+			pm_runtime_put_noidle(&pdev->dev);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
 static struct pci_driver sdhci_driver = {
 	.name =		"sdhci-pci",
 	.id_table =	pci_ids,
 	.probe =	sdhci_pci_probe,
 	.remove =	sdhci_pci_remove,
+	.shutdown =	sdhci_pci_shutdown,
 	.driver =	{
 		.pm =   &sdhci_pci_pm_ops
 	},
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index c24fbc5..1ae358e 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -201,8 +201,8 @@
 	if (!pdata)
 		return NULL;
 
-	if (!of_property_read_u32(np, "mrvl,clk-delay-cycles",
-				  &clk_delay_cycles))
+	of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
+	if (clk_delay_cycles > 0)
 		pdata->clk_delay_cycles = clk_delay_cycles;
 
 	return pdata;
@@ -255,7 +255,6 @@
 		mmc_of_parse(host->mmc);
 		sdhci_get_of_property(pdev);
 		pdata = pxav3_get_mmc_pdata(dev);
-		pdev->dev.platform_data = pdata;
 	} else if (pdata) {
 		/* on-chip device */
 		if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 836e2ac..1f7c796 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -54,6 +54,7 @@
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
 static void sdhci_tuning_timer(unsigned long data);
 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios);
 
 #ifdef CONFIG_PM_RUNTIME
 static int sdhci_runtime_pm_get(struct sdhci_host *host);
@@ -173,6 +174,21 @@
 	sdhci_set_card_detection(host, false);
 }
 
+static void sdhci_busy_wait(struct mmc_host *mmc, u32 delay)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	/* totally 'delay' us, each loop 4us */
+	u32 loop = delay / 4;
+	while (loop) {
+		/* have a delay here */
+		udelay(4);
+		/* read register to make sure host won't be clock gated */
+		sdhci_readw(host, SDHCI_HOST_VERSION);
+		loop--;
+	}
+}
+
 static void sdhci_reset(struct sdhci_host *host, u8 mask)
 {
 	unsigned long timeout;
@@ -196,7 +212,7 @@
 		host->clock = 0;
 
 	/* Wait max 100 ms */
-	timeout = 100;
+	timeout = 10000;
 
 	/* hw clears the bit when it's done */
 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
@@ -207,7 +223,7 @@
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		udelay(10);
 	}
 
 	if (host->ops->platform_reset_exit)
@@ -267,6 +283,9 @@
 {
 	u8 ctrl;
 
+	if (!(host->mmc->caps2 & MMC_CAP2_LED_SUPPORT))
+		return;
+
 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 	ctrl |= SDHCI_CTRL_LED;
 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
@@ -276,6 +295,9 @@
 {
 	u8 ctrl;
 
+	if (!(host->mmc->caps2 & MMC_CAP2_LED_SUPPORT))
+		return;
+
 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 	ctrl &= ~SDHCI_CTRL_LED;
 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
@@ -288,6 +310,9 @@
 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
 	unsigned long flags;
 
+	if (!(host->mmc->caps2 & MMC_CAP2_LED_SUPPORT))
+		return;
+
 	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->runtime_suspended)
@@ -960,6 +985,8 @@
 		 * upon error conditions.
 		 */
 		if (data->error) {
+			if (host->quirks2 & SDHCI_QUIRK2_WAIT_FOR_IDLE)
+				sdhci_busy_wait(host->mmc, 1000);
 			sdhci_reset(host, SDHCI_RESET_CMD);
 			sdhci_reset(host, SDHCI_RESET_DATA);
 		}
@@ -978,7 +1005,7 @@
 	WARN_ON(host->cmd);
 
 	/* Wait max 10 ms */
-	timeout = 10;
+	timeout = 1000;
 
 	mask = SDHCI_CMD_INHIBIT;
 	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
@@ -999,12 +1026,13 @@
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		udelay(10);
 	}
 
 	mod_timer(&host->timer, jiffies + 10 * HZ);
 
 	host->cmd = cmd;
+	host->r1b_busy_end = 0;
 
 	sdhci_prepare_data(host, cmd);
 
@@ -1105,6 +1133,9 @@
 	case SDHCI_CTRL_UHS_DDR50:
 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
 		break;
+	case SDHCI_CTRL_HS_DDR200:
+		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
+		break;
 	default:
 		pr_warn("%s: Invalid UHS-I mode selected\n",
 			mmc_hostname(host->mmc));
@@ -1137,6 +1168,23 @@
 	if (clock == 0)
 		goto out;
 
+	/*
+	 * Check and change Host Controller pin GPIO buffer setting
+	 * according to the new clock will be used.
+	 * For example, when the SD bus frequency is 50MHz or 200MHz,
+	 * the controller SD bus CLK/CMD/DAT pin may need different
+	 * driving strength and slew settings.
+	 * So we add check here. And this API will also change the pin
+	 * gpio buffer settings if needed after the check. Of course,
+	 * it's platform specific behaviours.
+	 * To ensure that the clock signal does not change when gpio
+	 * buffer setting modified, we'd better disable SD bus clock
+	 * first before changing any gpio pin buffer settings and
+	 * enable the SD bus clock again after the changing.
+	 */
+	if (host->ops->gpio_buf_check)
+		host->ops->gpio_buf_check(host, clock);
+
 	if (host->version >= SDHCI_SPEC_300) {
 		if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
 			SDHCI_CTRL_PRESET_VAL_ENABLE) {
@@ -1210,7 +1258,7 @@
 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 
 	/* Wait max 20 ms */
-	timeout = 20;
+	timeout = 2000;
 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
 		& SDHCI_CLOCK_INT_STABLE)) {
 		if (timeout == 0) {
@@ -1220,7 +1268,7 @@
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		udelay(10);
 	}
 
 	clk |= SDHCI_CLOCK_CARD_EN;
@@ -1299,6 +1347,216 @@
 	return power;
 }
 
+/*
+ * One of the Medfield eMMC controller (PCI device id 0x0823, SDIO3) is
+ * a shared resource used by the SCU and the IA processors. SCU primarily
+ * uses the eMMC host controller to access the eMMC device's Boot Partition,
+ * while the IA CPU uses the eMMC host controller to access the eMMC device's
+ * User Partition.
+ *
+ * After the SCU hands off the system to the IA processor, the IA processor
+ * assumes ownership to the eMMC host controller. Due to absence of any
+ * arbitration at the eMMC host controller, this could result in concurrent
+ * eMMC host accesses resulting in bus contention and garbage data ending up
+ * in either of the partitions.
+ * To circumvent this from happening, eMMC host controller locking mechanism
+ * is employed, where at any one given time, only one agent, SCU or IA, may be
+ * allowed to access the host. This is achieved by implementing Dekker's
+ * Algorithm (http://en.wikipedia.org/wiki/Dekker's_algorithm) between the
+ * two processors.
+ *
+ * Before handing off the system to the IA processor, SCU must set up three
+ * housekeeping mutex variables allocated in the shared SRAM as follows:
+ *
+ * eMMC_Owner = IA (SCU and IA processors - RW, 32bit)
+ * IA_Req = FALSE (IA -RW, SCU - RO, 32bit)
+ * SCU_Req = FALSE (IA - RO, SCU - R/W, 32bit)
+ *
+ * There is no hardware based access control to these variables and so code
+ * executing on SCU and IA processors must follow below access rules
+ * (Dekker's algorithm):
+ *
+ * -----------------------------------------
+ * SCU Processor Implementation
+ * -----------------------------------------
+ * SCU_Req = TRUE;
+ * while (IA_Req == TRUE) {
+ *     if (eMMC_Owner != SCU){
+ *         SCU_Req = FALSE;
+ *         while (eMMC_Owner != SCU);
+ *         SCU_Req = TRUE;
+ *     }
+ * }
+ * // SCU now performs eMMC transactions here
+ * ...
+ * // When done, relinquish control to IA
+ * eMMC_Owner = IA;
+ * SCU_Req = FALSE;
+ *
+ * -----------------------------------------
+ * IA Processor Implementation
+ * -----------------------------------------
+ * IA_Req = TRUE;
+ * while (SCU_Req == TRUE) {
+ *     if (eMMC_Owner != IA){
+ *         IA_Req = FALSE;
+ *         while (eMMC_Owner != IA);
+ *         IA_Req = TRUE;
+ *     }
+ * }
+ * //IA now performs eMMC transactions here
+ * ...
+ * //When done, relinquish control to SCU
+ * eMMC_Owner = SCU;
+ * IA_Req = FALSE;
+ *
+ * ----------------------------------------
+ *
+ * sdhci_do_acquire_ownership- implement the Dekker's algorithm on IA side
+ * This function is only used for acquire ownership, not to re-cofnig host
+ * controller. Since in some scenarios, re-config is not useless. We can
+ * save some unused expenses.
+ * @mmc: mmc host
+ *
+ * @return return value:
+ * 0 - Acquried the ownership successfully. The last owner is IA
+ * 1 - Acquried the ownership successfully. The last owenr is SCU
+ * -EBUSY - failed to acquire ownership within the timeout period
+ */
+static int sdhci_do_acquire_ownership(struct mmc_host *mmc)
+{
+	struct sdhci_host *host;
+	unsigned long t1, t2;
+	unsigned long flags;
+
+	host = mmc_priv(mmc);
+
+	if (!host->sram_addr)
+		return 0;
+
+	/* if host has sram_addr, dekker_lock is initialized */
+	spin_lock_irqsave(&host->dekker_lock, flags);
+
+	host->usage_cnt++;
+
+	/* If IA has already hold the eMMC mutex, then just exit */
+	if (readl(host->sram_addr + DEKKER_IA_REQ_OFFSET)) {
+		spin_unlock_irqrestore(&host->dekker_lock, flags);
+		return 0;
+	}
+
+	DBG("Acquire ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+
+	writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+
+	t1 = jiffies + 10 * HZ;
+	t2 = 500;
+
+	while (readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET)) {
+		if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) !=
+				DEKKER_OWNER_IA) {
+			writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+			while (t2) {
+				if (readl(host->sram_addr +
+					DEKKER_EMMC_OWNER_OFFSET) ==
+						DEKKER_OWNER_IA)
+					break;
+				spin_unlock_irqrestore(&host->dekker_lock,
+						flags);
+				usleep_range(8000, 12000);
+				spin_lock_irqsave(&host->dekker_lock, flags);
+				t2--;
+			}
+			if (t2)
+				writel(1, host->sram_addr +
+						DEKKER_IA_REQ_OFFSET);
+			else
+				goto timeout;
+		}
+		if (time_after(jiffies, t1))
+			goto timeout;
+
+		cpu_relax();
+	}
+
+	spin_unlock_irqrestore(&host->dekker_lock, flags);
+	/*
+	 * if the last owner is SCU, will do the re-config host controller
+	 * in the next
+	 */
+	return (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) ==
+			DEKKER_OWNER_IA) ? 1 : 0;
+
+timeout:
+	pr_err(KERN_ERR "eMMC mutex timeout!\n"
+			"Dump Dekker's house keeping variables -"
+			"eMMC owner: %d, IA req: %d, SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+
+	/* Release eMMC mutex anyway */
+	writel(DEKKER_OWNER_SCU, host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+	writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+
+	spin_unlock_irqrestore(&host->dekker_lock, flags);
+
+	return -EBUSY;
+}
+
+static int sdhci_acquire_ownership(struct mmc_host *mmc)
+{
+	int ret;
+
+	ret = sdhci_do_acquire_ownership(mmc);
+	if (ret) {
+		struct sdhci_host *host;
+		host = mmc_priv(mmc);
+		/* Re-config HC in case SCU has changed HC reg already */
+		pm_runtime_get_sync(mmc->parent);
+		/*
+		 * reinit host registers.
+		 * include reset host controller all,
+		 * reconfigure clock, pwr and other registers.
+		 */
+		sdhci_init(host, 0);
+		host->clock = 0;
+		host->pwr = 0;
+		sdhci_do_set_ios(host, &host->mmc->ios);
+		pm_runtime_put(mmc->parent);
+	}
+
+	return ret;
+}
+
+static void sdhci_release_ownership(struct mmc_host *mmc)
+{
+	struct sdhci_host *host;
+	unsigned long flags;
+
+	host = mmc_priv(mmc);
+
+	if (!host->sram_addr)
+		return;
+
+	spin_lock_irqsave(&host->dekker_lock, flags);
+	BUG_ON(host->usage_cnt == 0);
+	host->usage_cnt--;
+	if (host->usage_cnt == 0) {
+		writel(DEKKER_OWNER_SCU,
+				host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+		writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+		DBG("Exit ownership-eMMC owner: %d,IA req: %d,SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+	}
+	spin_unlock_irqrestore(&host->dekker_lock, flags);
+}
+
 /*****************************************************************************\
  *                                                                           *
  * MMC callbacks                                                             *
@@ -1316,10 +1574,16 @@
 
 	sdhci_runtime_pm_get(host);
 
-	present = mmc_gpio_get_cd(host->mmc);
+	sdhci_acquire_ownership(host->mmc);
 
 	spin_lock_irqsave(&host->lock, flags);
 
+	if (host->suspended) {
+		pr_err("%s: %s: host is in suspend state\n",
+				__func__, mmc_hostname(mmc));
+		BUG_ON(1);
+	}
+
 	WARN_ON(host->mrq != NULL);
 
 #ifndef SDHCI_USE_LEDS_CLASS
@@ -1346,6 +1610,7 @@
 	 *     zero: cd-gpio is used, and card is removed
 	 *     one: cd-gpio is used, and card is present
 	 */
+	present = mmc_gpio_get_cd(host->mmc);
 	if (present < 0) {
 		/* If polling, assume that the card is always present. */
 		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1368,28 +1633,51 @@
 		 * tuning procedure before sending command.
 		 */
 		if ((host->flags & SDHCI_NEEDS_RETUNING) &&
-		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
+		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) &&
+			mrq->cmd->opcode != MMC_SEND_STATUS) {
 			if (mmc->card) {
+				if ((mmc->card->ext_csd.part_config & 0x07) ==
+					EXT_CSD_PART_CONFIG_ACC_RPMB)
+					goto end_tuning;
 				/* eMMC uses cmd21 but sd and sdio use cmd19 */
 				tuning_opcode =
 					mmc->card->type == MMC_TYPE_MMC ?
 					MMC_SEND_TUNING_BLOCK_HS200 :
 					MMC_SEND_TUNING_BLOCK;
+				host->mrq = NULL;
 				spin_unlock_irqrestore(&host->lock, flags);
 				sdhci_execute_tuning(mmc, tuning_opcode);
 				spin_lock_irqsave(&host->lock, flags);
-
+end_tuning:
 				/* Restore original mmc_request structure */
 				host->mrq = mrq;
 			}
 		}
 
+		if (!(sdhci_readw(host, SDHCI_CLOCK_CONTROL) &
+					SDHCI_CLOCK_CARD_EN)) {
+			/*
+			 * SD bus clock is stopped. no interrupts will be
+			 * generate in this case.
+			 */
+			pr_warn("%s:%s: SD bus clock not enabled\n",
+					__func__, mmc_hostname(mmc));
+			pr_warn("%s:%s: host->pwr 0x%x, host->clock 0x%x\n",
+					__func__, mmc_hostname(mmc),
+					host->pwr, host->clock);
+			sdhci_dumpregs(host);
+			host->mrq->cmd->error = -EIO;
+			tasklet_schedule(&host->finish_tasklet);
+			goto out;
+		}
+
 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
 			sdhci_send_command(host, mrq->sbc);
 		else
 			sdhci_send_command(host, mrq->cmd);
 	}
 
+out:
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 }
@@ -1402,6 +1690,9 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 
+	if (host->quirks2 & SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8)
+		ios->vdd = 7;
+
 	if (host->flags & SDHCI_DEVICE_DEAD) {
 		spin_unlock_irqrestore(&host->lock, flags);
 		if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
@@ -1476,6 +1767,7 @@
 
 		/* In case of UHS-I modes, set High Speed Enable */
 		if ((ios->timing == MMC_TIMING_MMC_HS200) ||
+		    (ios->timing == MMC_TIMING_MMC_HS400) ||
 		    (ios->timing == MMC_TIMING_UHS_SDR50) ||
 		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
 		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
@@ -1527,7 +1819,9 @@
 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
 			/* Select Bus Speed Mode for host */
 			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
-			if (ios->timing == MMC_TIMING_MMC_HS200)
+			if (ios->timing == MMC_TIMING_MMC_HS400)
+				ctrl_2 |= SDHCI_CTRL_HS_DDR200;
+			else if (ios->timing == MMC_TIMING_MMC_HS200)
 				ctrl_2 |= SDHCI_CTRL_HS_SDR200;
 			else if (ios->timing == MMC_TIMING_UHS_SDR12)
 				ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
@@ -1578,7 +1872,9 @@
 	struct sdhci_host *host = mmc_priv(mmc);
 
 	sdhci_runtime_pm_get(host);
+	sdhci_acquire_ownership(mmc);
 	sdhci_do_set_ios(host, ios);
+	sdhci_release_ownership(mmc);
 	sdhci_runtime_pm_put(host);
 }
 
@@ -1659,8 +1955,13 @@
 {
 	struct sdhci_host *host = mmc_priv(mmc);
 
-	if (host->ops && host->ops->hw_reset)
+	if (host->ops && host->ops->hw_reset) {
+		sdhci_runtime_pm_get(host);
+		sdhci_acquire_ownership(mmc);
 		host->ops->hw_reset(host);
+		sdhci_release_ownership(mmc);
+		sdhci_runtime_pm_put(host);
+	}
 }
 
 static int sdhci_get_ro(struct mmc_host *mmc)
@@ -1876,6 +2177,11 @@
 	sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
 
 	/*
+	 * set the data timeout register to be max value
+	 */
+	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
+
+	/*
 	 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
 	 * of loops reaches 40 times or a timeout of 150ms occurs.
 	 */
@@ -1883,6 +2189,8 @@
 	do {
 		struct mmc_command cmd = {0};
 		struct mmc_request mrq = {NULL};
+		unsigned int intmask;
+		unsigned long t = jiffies + msecs_to_jiffies(150);
 
 		if (!tuning_loop_counter && !timeout)
 			break;
@@ -1923,19 +2231,53 @@
 		sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
 
 		sdhci_send_command(host, &cmd);
+		mmiowb();
 
 		host->cmd = NULL;
 		host->mrq = NULL;
 
-		spin_unlock(&host->lock);
-		enable_irq(host->irq);
+		/* delete the timer created by send command */
+		del_timer(&host->timer);
 
-		/* Wait for Buffer Read Ready interrupt */
-		wait_event_interruptible_timeout(host->buf_ready_int,
-					(host->tuning_done == 1),
-					msecs_to_jiffies(50));
-		disable_irq(host->irq);
-		spin_lock(&host->lock);
+		if (host->quirks2 & SDHCI_QUIRK2_TUNING_POLL) {
+			while (!time_after(jiffies, t)) {
+				intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+				if (intmask & SDHCI_INT_DATA_AVAIL) {
+					host->tuning_done = 1;
+					sdhci_writel(host,
+						intmask & SDHCI_INT_DATA_AVAIL,
+						SDHCI_INT_STATUS);
+					break;
+				}
+			}
+		} else {
+			intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+			if (intmask & SDHCI_INT_DATA_AVAIL) {
+				host->tuning_done = 1;
+				sdhci_writel(host,
+					intmask & SDHCI_INT_DATA_AVAIL,
+					SDHCI_INT_STATUS);
+			}
+			spin_unlock(&host->lock);
+			enable_irq(host->irq);
+
+			if (!host->tuning_done)
+				/* Wait for Buffer Read Ready interrupt */
+				wait_event_interruptible_timeout(
+						host->buf_ready_int,
+						(host->tuning_done == 1),
+						msecs_to_jiffies(50));
+			disable_irq(host->irq);
+			spin_lock(&host->lock);
+
+			intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+			if (intmask & SDHCI_INT_DATA_AVAIL) {
+				host->tuning_done = 1;
+				sdhci_writel(host,
+					intmask & SDHCI_INT_DATA_AVAIL,
+					SDHCI_INT_STATUS);
+			}
+		}
 
 		if (!host->tuning_done) {
 			pr_info(DRIVER_NAME ": Timeout waiting for "
@@ -1954,9 +2296,13 @@
 		host->tuning_done = 0;
 
 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-		tuning_loop_counter--;
-		timeout--;
-		mdelay(1);
+		if (tuning_loop_counter)
+			tuning_loop_counter--;
+		if (timeout)
+			timeout--;
+		spin_unlock(&host->lock);
+		usleep_range(900, 1100);
+		spin_lock(&host->lock);
 	} while (ctrl & SDHCI_CTRL_EXEC_TUNING);
 
 	/*
@@ -2057,6 +2403,8 @@
 		pr_err("%s: Resetting controller.\n",
 			mmc_hostname(host->mmc));
 
+		if (host->quirks2 & SDHCI_QUIRK2_WAIT_FOR_IDLE)
+			sdhci_busy_wait(mmc, 1000);
 		sdhci_reset(host, SDHCI_RESET_CMD);
 		sdhci_reset(host, SDHCI_RESET_DATA);
 
@@ -2067,6 +2415,23 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+static void sdhci_set_dev_power(struct mmc_host *mmc, bool poweron)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	if (host->ops->set_dev_power)
+		host->ops->set_dev_power(host, poweron);
+}
+
+static void sdhci_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->quirks2 & SDHCI_QUIRK2_NON_STD_CIS) {
+		card->quirks |= MMC_QUIRK_NON_STD_CIS;
+	}
+}
+
+
 static const struct mmc_host_ops sdhci_ops = {
 	.request	= sdhci_request,
 	.set_ios	= sdhci_set_ios,
@@ -2078,6 +2443,9 @@
 	.execute_tuning			= sdhci_execute_tuning,
 	.card_event			= sdhci_card_event,
 	.card_busy	= sdhci_card_busy,
+	.set_dev_power = sdhci_set_dev_power,
+	.init_card = sdhci_init_card,
+	.busy_wait	= sdhci_busy_wait,
 };
 
 /*****************************************************************************\
@@ -2090,9 +2458,11 @@
 {
 	struct sdhci_host *host = (struct sdhci_host*)param;
 
+	cancel_delayed_work(&host->mmc->detect);
+
 	sdhci_card_event(host->mmc);
 
-	mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+	mmc_detect_change(host->mmc, msecs_to_jiffies(500));
 }
 
 static void sdhci_tasklet_finish(unsigned long param)
@@ -2123,7 +2493,8 @@
 	 * upon error conditions.
 	 */
 	if (!(host->flags & SDHCI_DEVICE_DEAD) &&
-	    ((mrq->cmd && mrq->cmd->error) ||
+		((mrq->cmd && mrq->cmd->error &&
+		mrq->cmd->error != -ENOMEDIUM) ||
 		 (mrq->data && (mrq->data->error ||
 		  (mrq->data->stop && mrq->data->stop->error))) ||
 		   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
@@ -2135,6 +2506,8 @@
 
 		/* Spec says we should do both at the same time, but Ricoh
 		   controllers do not like that. */
+		if (host->quirks2 & SDHCI_QUIRK2_WAIT_FOR_IDLE)
+			sdhci_busy_wait(host->mmc, 1000);
 		sdhci_reset(host, SDHCI_RESET_CMD);
 		sdhci_reset(host, SDHCI_RESET_DATA);
 	}
@@ -2150,10 +2523,25 @@
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 
+	sdhci_release_ownership(host->mmc);
 	mmc_request_done(host->mmc, mrq);
 	sdhci_runtime_pm_put(host);
 }
 
+static void dump_rte_apic_reg(struct sdhci_host *host, void __iomem *idx_addr)
+{
+	unsigned int rte_lo, rte_hi;
+
+	writeb(0x10 + 2 * host->irq, idx_addr);
+	rte_lo = readl(host->rte_addr + 0x10);
+
+	writeb(0x10 + 2 * host->irq + 1, idx_addr);
+	rte_hi = readl(host->rte_addr + 0x10);
+
+	pr_err("%s: dump APIC RTE reg - L32: 0x%08x, H32: 0x%08x\n",
+		mmc_hostname(host->mmc), rte_lo, rte_hi);
+}
+
 static void sdhci_timeout_timer(unsigned long data)
 {
 	struct sdhci_host *host;
@@ -2168,6 +2556,9 @@
 			"interrupt.\n", mmc_hostname(host->mmc));
 		sdhci_dumpregs(host);
 
+		if (host->rte_addr)
+			dump_rte_apic_reg(host, host->rte_addr);
+
 		if (host->data) {
 			host->data->error = -ETIMEDOUT;
 			sdhci_finish_data(host);
@@ -2244,7 +2635,10 @@
 			DBG("Cannot wait for busy signal when also "
 				"doing a data transfer");
 		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
-			return;
+			if (!host->r1b_busy_end) {
+				host->r1b_busy_end = 1;
+				return;
+			}
 
 		/* The controller does not support the end-of-busy IRQ,
 		 * fall through and take the SDHCI_INT_RESPONSE */
@@ -2304,10 +2698,23 @@
 		 * The "data complete" interrupt is also used to
 		 * indicate that a busy state has ended. See comment
 		 * above in sdhci_cmd_irq().
+		 *
+		 * "data timeout" interrupt may also happen
 		 */
 		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
 			if (intmask & SDHCI_INT_DATA_END) {
-				sdhci_finish_command(host);
+				if (host->r1b_busy_end)
+					sdhci_finish_command(host);
+				else
+					host->r1b_busy_end = 1;
+				return;
+			} else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+				pr_err("%s: Got data interrupt 0x%08x for busy cmd %d\n",
+						mmc_hostname(host->mmc),
+						(unsigned)intmask,
+						host->cmd->opcode);
+				host->cmd->error = -ETIMEDOUT;
+				tasklet_schedule(&host->finish_tasklet);
 				return;
 			}
 		}
@@ -2437,6 +2844,21 @@
 	}
 
 	if (intmask & SDHCI_INT_CMD_MASK) {
+		/*
+		 * If encounter command conflict interrupts,
+		 * before clearing it, delay 64 clocks, otherwise the interrupts
+		 * will be generated again.
+		 * This is just experience. SDHC spec doesn't
+		 * say the command conflict interrupts will be generated
+		 * again without a delay before clearing them.
+		 */
+		if ((intmask & SDHCI_INT_CMD_CONFLICT) ==
+				SDHCI_INT_CMD_CONFLICT) {
+			if (host->clock)
+				udelay(64 * 1000000 / host->clock);
+			else
+				udelay(500);
+		}
 		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
 			SDHCI_INT_STATUS);
 		sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
@@ -2492,12 +2914,744 @@
 	return result;
 }
 
+/************************************************************************\
+ *                                                                      *
+ * APIs for panic record use                                           *
+ * Note:                                                               *
+ * For panic use, please take care of sdhci_read/write.                        *
+ *                                                                     *
+ * sdhci_read/write function are defined by sdhci host layer which     *
+ * warpped the read/write function.                                    *
+ * But before calling read/write, sdhci_read/write will try to see if  *
+ * some host drivers defined special register reading/writing functions.*
+ * If do, that is means read/write function defined by kernel cannot be        *
+ * used, have to use the special ones.                                 *
+ * So, if host driver are using special ones, please make sure when in *
+ * panic mode, the special ones are still good to use                  *
+ * So, if not, read/write defined by kernel is safe for panic using    *
+ *                                                                     *
+ * @For MFLD sdhci host controller driver, no special reading/writing  *
+ * funtion are used                                                    *
+ *                                                                      *
+ \************************************************************************/
+
+static int panic_irq_done;
+
+static void sdhci_panic_irq_wait(struct sdhci_host *host);
+
+static inline void sdhci_panic_finish_req(struct sdhci_host *host)
+{
+	host->mrq = NULL;
+	host->cmd = NULL;
+	host->data = NULL;
+	panic_irq_done = 1;
+}
+
+/*
+ * assuming only use SDMA write and data length is 512Bytes
+ */
+static void sdhci_panic_send_cmd(struct sdhci_host *host,
+		struct mmc_command *cmd)
+{
+	unsigned long timeout;
+	u32 mask;
+	int flags;
+
+	WARN_ON(host->cmd);
+	/* Wait max 10 ms */
+	timeout = 10;
+	mask = SDHCI_CMD_INHIBIT;
+	if ((cmd->data != 0) || (cmd->flags & MMC_RSP_BUSY))
+		mask |= SDHCI_DATA_INHIBIT;
+
+	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
+		if (timeout == 0) {
+			pr_err("%s %s: Controller never released inhibit bit(s).\n",
+					__func__, mmc_hostname(host->mmc));
+			sdhci_dumpregs(host);
+			sdhci_panic_finish_req(host);
+			return;
+		}
+		timeout--;
+		/*
+		 * seems card is not ready for the next command.
+		 * We can wait for 1ms and then to have a retry
+		 */
+		mdelay(1);
+	}
+
+	host->cmd = cmd;
+	host->r1b_busy_end = 0;
+
+	/*
+	 * set the data timeout register to be max value
+	 */
+	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
+	/*
+	 * prepare data
+	 */
+	if (cmd->data) {
+		unsigned int mode;
+		struct mmc_data *data = cmd->data;
+		u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
+		u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
+
+		host->data = data;
+		host->data_early = 0;
+		/*
+		 * update DMA address
+		 */
+		sdhci_writel(host, data->dmabuf, SDHCI_DMA_ADDRESS);
+
+		if (host->version >= SDHCI_SPEC_200) {
+			u8 ctrl;
+			ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+			ctrl &= ~SDHCI_CTRL_DMA_MASK;
+			if ((host->flags & SDHCI_REQ_USE_DMA) &&
+					(host->flags & SDHCI_USE_ADMA))
+				ctrl |= SDHCI_CTRL_ADMA32;
+			else
+				ctrl |= SDHCI_CTRL_SDMA;
+			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+		}
+
+		if (host->flags & SDHCI_REQ_USE_DMA)
+			sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
+		else
+			sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
+
+		/*
+		 * We do not handle DMA boundaries,
+		 * so set it to max (512 KiB)
+		 */
+		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz),
+				SDHCI_BLOCK_SIZE);
+		sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+
+		/*
+		 * set transfer mode
+		 */
+		mode = SDHCI_TRNS_BLK_CNT_EN;
+		if (data->blocks > 1) {
+			if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
+				mode |= SDHCI_TRNS_MULTI |
+					SDHCI_TRNS_AUTO_CMD12;
+			else
+				mode |= SDHCI_TRNS_MULTI;
+		}
+		if (host->flags & SDHCI_REQ_USE_DMA)
+			mode |= SDHCI_TRNS_DMA;
+
+		sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+	}
+
+	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
+
+	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
+		pr_err("%s %s: Unsupported response type!\n",
+				__func__, mmc_hostname(host->mmc));
+		sdhci_panic_finish_req(host);
+		return;
+	}
+
+	if (!(cmd->flags & MMC_RSP_PRESENT))
+		flags = SDHCI_CMD_RESP_NONE;
+	else if (cmd->flags & MMC_RSP_136)
+		flags = SDHCI_CMD_RESP_LONG;
+	else if (cmd->flags & MMC_RSP_BUSY)
+		flags = SDHCI_CMD_RESP_SHORT_BUSY;
+	else
+		flags = SDHCI_CMD_RESP_SHORT;
+
+	if (cmd->flags & MMC_RSP_CRC)
+		flags |= SDHCI_CMD_CRC;
+	if (cmd->flags & MMC_RSP_OPCODE)
+		flags |= SDHCI_CMD_INDEX;
+	if (cmd->data)
+		flags |= SDHCI_CMD_DATA;
+
+	/*
+	 * send command
+	 */
+	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+
+	mmiowb();
+
+	/*
+	 * polling interrupt
+	 */
+	sdhci_panic_irq_wait(host);
+}
+
+static void sdhci_panic_finish_data(struct sdhci_host *host)
+{
+	struct mmc_data *data;
+
+	BUG_ON(!host->data);
+
+	data = host->data;
+	host->data = NULL;
+
+	/*
+	 * panic use, will not unmap anything here
+	 */
+
+	/*
+	 * The specification states that the block count register must
+	 * be updated, but it does not specify at what point in the
+	 * data flow. That makes the register entirely useless to read
+	 * back so we have to assume that nothing made it to the card
+	 * in the event of an error.
+	 */
+	if (data->error)
+		data->bytes_xfered = 0;
+	else
+		data->bytes_xfered = data->blksz * data->blocks;
+
+	if (data->stop) {
+		/*
+		 * we will not be here since we use single block
+		 * transfer when panic occured
+		 */
+		sdhci_panic_send_cmd(host, data->stop);
+	} else
+		sdhci_panic_finish_req(host);
+}
+
+static void sdhci_panic_finish_command(struct sdhci_host *host)
+{
+	int i;
+
+	BUG_ON(host->cmd == NULL);
+
+	if (host->cmd->flags & MMC_RSP_PRESENT) {
+		if (host->cmd->flags & MMC_RSP_136) {
+			/* CRC is stripped so we need to do some shifting. */
+			for (i = 0; i < 4; i++) {
+				host->cmd->resp[i] = sdhci_readl(host,
+						SDHCI_RESPONSE + (3-i)*4) << 8;
+				if (i != 3)
+					host->cmd->resp[i] |=
+						sdhci_readb(host,
+						SDHCI_RESPONSE + (3-i)*4-1);
+			}
+		} else {
+			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+		}
+	}
+
+	host->cmd->error = 0;
+
+	if (host->data && host->data_early)
+		sdhci_panic_finish_data(host);
+
+	if (!host->cmd->data)
+		sdhci_panic_finish_req(host);
+
+	host->cmd = NULL;
+}
+
+/*
+ * sdhci_panic_data_irq: handle data irq in panic mode
+ *
+ * When host is in panic mode, host driver need to poll its interrupt
+ * status register. Once looked up some cmd irqs, call this function
+ * to handle.
+ */
+static void sdhci_panic_cmd_irq(struct sdhci_host *host, u32 intmask)
+{
+	BUG_ON(intmask == 0);
+
+	if (intmask & SDHCI_INT_TIMEOUT)
+		host->cmd->error = -ETIMEDOUT;
+	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+				SDHCI_INT_INDEX))
+		host->cmd->error = -EILSEQ;
+
+	if (host->cmd->error) {
+		sdhci_panic_finish_req(host);
+		return;
+	}
+
+	if (host->cmd->flags & MMC_RSP_BUSY) {
+		if (host->cmd->data)
+			pr_debug("Cannot wait for busy signal when also doing a data transfer\n");
+		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
+			if (!host->r1b_busy_end) {
+				host->r1b_busy_end = 1;
+				return;
+			}
+	}
+
+	if (intmask & SDHCI_INT_RESPONSE)
+		sdhci_panic_finish_command(host);
+}
+
+/*
+ * sdhci_panic_data_irq: handle data irq in panic mode
+ *
+ * When host is in panic mode, host driver need to poll its interrupt
+ * status register. Once looked up some data irqs, call this function
+ * to handle.
+ */
+static void sdhci_panic_data_irq(struct sdhci_host *host, u32 intmask)
+{
+	BUG_ON(intmask == 0);
+
+	if (!host->data) {
+		/*
+		 * The "data complete" interrupt is also used to
+		 * indicate that a busy state has ended. See comment
+		 * above in sdhci_cmd_irq().
+		 */
+		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+			if (intmask & SDHCI_INT_DATA_END) {
+				if (host->r1b_busy_end)
+					sdhci_panic_finish_command(host);
+				else
+					host->r1b_busy_end = 1;
+				return;
+			}
+		}
+
+		pr_err("%s %s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
+			__func__, mmc_hostname(host->mmc), (unsigned)intmask);
+		sdhci_dumpregs(host);
+
+		return;
+	}
+
+	if (intmask & SDHCI_INT_DATA_TIMEOUT)
+		host->data->error = -ETIMEDOUT;
+	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+		host->data->error = -EILSEQ;
+	else if (intmask & SDHCI_INT_ADMA_ERROR) {
+		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+		host->data->error = -EIO;
+	}
+
+	if (host->data->error)
+		sdhci_panic_finish_data(host);
+	else {
+		if (intmask & SDHCI_INT_DMA_END)
+			sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
+					SDHCI_DMA_ADDRESS);
+
+		if (intmask & SDHCI_INT_DATA_END) {
+			if (host->cmd)
+				host->data_early = 1;
+			else
+				sdhci_panic_finish_data(host);
+		}
+	}
+}
+
+/*
+ * sdhci_panic_irq_wait: irq handler for panic record
+ */
+static void sdhci_panic_irq_wait(struct sdhci_host *host)
+{
+	u32 intmask;
+	panic_irq_done = 0;
+retry:
+	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
+
+	if (!intmask || intmask == 0xffffffff)
+		goto retry;
+
+	DBG("***%s got interrupt: 0x%08x\n",
+			__func__, intmask);
+
+	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+		sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
+				SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+		/*
+		 * do nothing for card detect
+		 */
+	}
+
+	intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+
+	if (intmask & SDHCI_INT_CMD_MASK) {
+		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
+				SDHCI_INT_STATUS);
+		sdhci_panic_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+	}
+
+	if (intmask & SDHCI_INT_DATA_MASK) {
+		sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
+				SDHCI_INT_STATUS);
+		sdhci_panic_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+	}
+
+	intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
+
+	intmask &= ~SDHCI_INT_ERROR;
+
+	if (intmask & SDHCI_INT_BUS_POWER) {
+		pr_err("%s %s: Card is consuming too much power!\n",
+				__func__, mmc_hostname(host->mmc));
+		sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
+	}
+
+	intmask &= ~SDHCI_INT_BUS_POWER;
+
+	if (intmask & SDHCI_INT_CARD_INT) {
+		sdhci_writel(host, intmask & SDHCI_INT_CARD_INT,
+				SDHCI_INT_STATUS);
+		/*
+		 * do nothing for this irq
+		 */
+		intmask &= ~SDHCI_INT_CARD_INT;
+	}
+
+	if (intmask) {
+		pr_err("%s %s: Unexpected interrupt 0x%08x.\n",
+				__func__, mmc_hostname(host->mmc), intmask);
+		sdhci_dumpregs(host);
+
+		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
+	}
+
+	mmiowb();
+	if (!panic_irq_done)
+		goto retry;
+}
+
+static void sdhci_mfld_panic_set_ios(struct mmc_panic_host *mmc)
+{
+	struct sdhci_host *host;
+	struct mmc_ios *ios;
+	u8 ctrl;
+
+	if (!mmc)
+		return;
+	ios = &mmc->ios;
+	host = (struct sdhci_host *)mmc->priv;
+
+	/*
+	 * Reset the chip on each power off.
+	 * Should clear out any weird states.
+	 */
+	if (ios->power_mode == MMC_POWER_OFF)
+		pr_info("%s: we are in panic, why need power off?\n", __func__);
+
+	sdhci_set_clock(host, ios->clock);
+
+	if (ios->power_mode == MMC_POWER_OFF)
+		sdhci_set_power(host, -1);
+	else
+		sdhci_set_power(host, ios->vdd);
+
+	if (host->ops->platform_send_init_74_clocks)
+		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
+
+	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+	if (ios->bus_width == MMC_BUS_WIDTH_8)
+		ctrl |= SDHCI_CTRL_8BITBUS;
+	else
+		ctrl &= ~SDHCI_CTRL_8BITBUS;
+
+	if (ios->bus_width == MMC_BUS_WIDTH_4)
+		ctrl |= SDHCI_CTRL_4BITBUS;
+	else
+		ctrl &= ~SDHCI_CTRL_4BITBUS;
+
+	if ((ios->timing == MMC_TIMING_SD_HS ||
+				ios->timing == MMC_TIMING_MMC_HS)
+			&& !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
+		ctrl |= SDHCI_CTRL_HISPD;
+	else
+		ctrl &= ~SDHCI_CTRL_HISPD;
+
+	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+	/*
+	 * Some (ENE) controllers go apeshit on some ios operation,
+	 * signalling timeout and CRC errors even on CMD0. Resetting
+	 * it on each ios seems to solve the problem.
+	 */
+	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+	mmiowb();
+}
+
+static void sdhci_panic_reinit_host(struct mmc_panic_host *mmc)
+{
+	struct sdhci_host *host = mmc->priv;
+	sdhci_init(host, 0);
+	host->pwr = 0; /* force power reprogram */
+	host->clock = 0; /* force clock reprogram */
+	sdhci_mfld_panic_set_ios(mmc);
+	mmiowb();
+}
+
+static void sdhci_mfld_panic_request(struct mmc_panic_host *panic_mmc,
+		struct mmc_request *mrq)
+{
+	struct sdhci_host *host;
+	bool present;
+
+	if (!panic_mmc || !mrq)
+		return;
+
+	host = (struct sdhci_host *)panic_mmc->priv;
+
+	/*
+	 * only support single block data DMA write
+	 */
+	if (mrq->cmd->data) {
+		if (mrq->cmd->data->blocks != 1 ||
+				mrq->cmd->data->flags & MMC_DATA_READ)
+			mrq->cmd->error = -EINVAL;
+	}
+
+	if (host->flags & SDHCI_USE_ADMA)
+		host->flags &= ~SDHCI_USE_ADMA;
+
+	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) {
+		if (mrq->stop) {
+			mrq->data->stop = NULL;
+			mrq->stop = NULL;
+		}
+	}
+
+	host->mrq = mrq;
+
+	/* If polling, assume that the card is always present. */
+	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+		present = true;
+	else
+		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+			SDHCI_CARD_PRESENT;
+
+	if (!present) {
+		host->mrq->cmd->error = -ENOMEDIUM;
+		sdhci_panic_finish_req(host);
+	} else
+		sdhci_panic_send_cmd(host, mrq->cmd);
+
+	/*
+	 * The controller needs a reset of internal state machines
+	 * upon error conditions.
+	 */
+	if (mrq->cmd->error || (mrq->data && (mrq->data->error ||
+			(mrq->data->stop && mrq->data->stop->error))) ||
+			(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) {
+		pr_err("%s: request handle failed\n", __func__);
+		sdhci_dumpregs(host);
+		sdhci_panic_reinit_host(panic_mmc);
+	}
+}
+
+/*
+ * The same like sdhci_acquire_ownership, used for IA to get the ownership
+ * before using host controller. Since this function is called in panic mode,
+ * so we can not use msleep() like sdhci_acquire_ownership does, use mdelay()
+ * instead.
+ */
+static int sdhci_mfld_panic_acquire_ownership(struct sdhci_host *host)
+{
+	unsigned long t1, t2;
+
+	if (!host->sram_addr)
+		return DEKKER_OWNER_IA;
+
+	/* If IA has already hold the eMMC mutex, then just exit */
+	if (readl(host->sram_addr + DEKKER_IA_REQ_OFFSET))
+		return 0;
+
+	writel(1, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+
+	t1 = 100;
+	t2 = 500;
+
+	while (readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET)) {
+		if (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) !=
+				DEKKER_OWNER_IA) {
+			writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+			while (t2) {
+				if (readl(host->sram_addr +
+						DEKKER_EMMC_OWNER_OFFSET) ==
+						DEKKER_OWNER_IA)
+					break;
+				mdelay(10);
+				t2--;
+			}
+			if (t2)
+				writel(1, host->sram_addr +
+						DEKKER_IA_REQ_OFFSET);
+			else
+				goto timeout;
+		}
+		/*
+		 * if we go to here, that means SCU FW is releasing the
+		 * ownership, so we just wait for a short time here.
+		 */
+		if (t1) {
+			mdelay(10);
+			t1--;
+		} else
+			goto timeout;
+	}
+
+	pr_debug("Acquire ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+
+	return (readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET) ==
+		DEKKER_OWNER_IA) ? DEKKER_OWNER_SCU : DEKKER_OWNER_IA;
+timeout:
+
+	pr_warn("%s: Timeout to hold eMMC mutex\n", __func__);
+	return -EBUSY;
+}
+
+static int sdhci_mfld_panic_power_on(struct mmc_panic_host *panic_host)
+{
+	int ret;
+	struct mmc_host *mmc;
+	struct sdhci_host *host;
+
+	if (!panic_host)
+		return -ENODEV;
+	mmc = panic_host->mmc;
+	host = panic_host->priv;
+
+	if (host->runtime_suspended) {
+		/*
+		 * power host controller
+		 */
+		pm_runtime_get_noresume(mmc->parent);
+
+		if (host->ops->power_up_host) {
+			ret = host->ops->power_up_host(host);
+			if (ret)
+				return ret;
+		}
+		sdhci_panic_reinit_host(panic_host);
+		host->runtime_suspended = false;
+	}
+
+	return 0;
+}
+
+static int sdhci_mfld_panic_hold_mutex(struct mmc_panic_host *panic_host)
+{
+	struct sdhci_host *host;
+	int ret;
+
+	if (!panic_host)
+		return -ENODEV;
+
+	host = panic_host->priv;
+
+	ret = sdhci_mfld_panic_acquire_ownership(host);
+
+	if (ret == DEKKER_OWNER_SCU) {
+		if (host->ops->power_up_host) {
+			ret = host->ops->power_up_host(host);
+			if (ret)
+				return ret;
+		}
+		sdhci_panic_reinit_host(panic_host);
+		return 0;
+	} else if (ret == DEKKER_OWNER_IA)
+		return sdhci_mfld_panic_power_on(panic_host);
+
+	return ret;
+}
+
+static void sdhci_mfld_panic_release_mutex(struct mmc_panic_host *panic_host)
+{
+	struct sdhci_host *host;
+
+	if (!panic_host)
+		return;
+	host = panic_host->priv;
+
+	if (!host->sram_addr)
+		return;
+
+	writel(DEKKER_OWNER_SCU,
+			host->sram_addr + DEKKER_EMMC_OWNER_OFFSET);
+	writel(0, host->sram_addr + DEKKER_IA_REQ_OFFSET);
+	DBG("Exit ownership - eMMC owner: %d, IA req: %d, SCU req: %d\n",
+			readl(host->sram_addr + DEKKER_EMMC_OWNER_OFFSET),
+			readl(host->sram_addr + DEKKER_IA_REQ_OFFSET),
+			readl(host->sram_addr + DEKKER_SCU_REQ_OFFSET));
+}
+
+static void sdhci_mfld_panic_prepare(struct mmc_panic_host *panic_host)
+{
+	struct sdhci_host *host;
+
+	if (!panic_host)
+		return;
+	host = panic_host->priv;
+
+	/*
+	 * assume host is powered off
+	 */
+	host->runtime_suspended = true;
+
+#ifdef CONFIG_PM_RUNTIME
+	/*
+	 * disable runtime pm directly
+	 */
+	panic_host->mmc->parent->power.disable_depth = 1;
+#endif
+}
+
+static int sdhci_mfld_panic_setup(struct mmc_panic_host *panic_host)
+{
+	struct sdhci_host *host;
+
+	if (!panic_host)
+		return 0;
+
+	host = mmc_priv(panic_host->mmc);
+	panic_host->priv = (void *)host;
+
+	return 0;
+}
+
+const struct mmc_host_panic_ops sdhci_panic_ops = {
+	.request        = sdhci_mfld_panic_request,
+	.prepare        = sdhci_mfld_panic_prepare,
+	.setup          = sdhci_mfld_panic_setup,
+	.set_ios        = sdhci_mfld_panic_set_ios,
+	.power_on       = sdhci_mfld_panic_power_on,
+	.hold_mutex     = sdhci_mfld_panic_hold_mutex,
+	.release_mutex  = sdhci_mfld_panic_release_mutex,
+};
+
+void sdhci_alloc_panic_host(struct sdhci_host *host)
+{
+	if (!host->mmc)
+		return;
+	mmc_alloc_panic_host(host->mmc, &sdhci_panic_ops);
+}
+EXPORT_SYMBOL_GPL(sdhci_alloc_panic_host);
+
+
 /*****************************************************************************\
  *                                                                           *
  * Suspend/resume                                                            *
  *                                                                           *
 \*****************************************************************************/
 
+static void sdhci_set_emmc_state(struct sdhci_host *host, uint32_t state)
+{
+	/* Only if there is dekker mutex available */
+	if (!host->sram_addr)
+		return;
+	writel(state, host->sram_addr + DEKKER_EMMC_STATE);
+}
+
 #ifdef CONFIG_PM
 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
 {
@@ -2529,10 +3683,13 @@
 int sdhci_suspend_host(struct sdhci_host *host)
 {
 	int ret;
+	unsigned long flags;
 
 	if (host->ops->platform_suspend)
 		host->ops->platform_suspend(host);
 
+	sdhci_acquire_ownership(host->mmc);
+
 	sdhci_disable_card_detection(host);
 
 	/* Disable tuning since we are suspending */
@@ -2551,7 +3708,7 @@
 
 		sdhci_enable_card_detection(host);
 
-		return ret;
+		goto out;
 	}
 
 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
@@ -2561,6 +3718,15 @@
 		sdhci_enable_irq_wakeups(host);
 		enable_irq_wake(host->irq);
 	}
+
+	/* Card succesfully suspended. Tell information to SCU */
+	sdhci_set_emmc_state(host, DEKKER_EMMC_CHIP_SUSPENDED);
+
+	spin_lock_irqsave(&host->lock, flags);
+	host->suspended = true;
+	spin_unlock_irqrestore(&host->lock, flags);
+out:
+	sdhci_release_ownership(host->mmc);
 	return ret;
 }
 
@@ -2569,6 +3735,9 @@
 int sdhci_resume_host(struct sdhci_host *host)
 {
 	int ret;
+	unsigned long flags;
+
+	sdhci_acquire_ownership(host->mmc);
 
 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 		if (host->ops->enable_dma)
@@ -2579,7 +3748,7 @@
 		ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
 				  mmc_hostname(host->mmc), host);
 		if (ret)
-			return ret;
+			goto out;
 	} else {
 		sdhci_disable_irq_wakeups(host);
 		disable_irq_wake(host->irq);
@@ -2597,6 +3766,10 @@
 		mmiowb();
 	}
 
+	spin_lock_irqsave(&host->lock, flags);
+	host->suspended = false;
+	spin_unlock_irqrestore(&host->lock, flags);
+
 	ret = mmc_resume_host(host->mmc);
 	sdhci_enable_card_detection(host);
 
@@ -2607,6 +3780,10 @@
 	if (host->flags & SDHCI_USING_RETUNING_TIMER)
 		host->flags |= SDHCI_NEEDS_RETUNING;
 
+	/* Card back in active state */
+	sdhci_set_emmc_state(host, DEKKER_EMMC_CHIP_ACTIVE);
+out:
+	sdhci_release_ownership(host->mmc);
 	return ret;
 }
 
@@ -2631,6 +3808,7 @@
 	unsigned long flags;
 	int ret = 0;
 
+	sdhci_do_acquire_ownership(host->mmc);
 	/* Disable tuning since we are suspending */
 	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
 		del_timer_sync(&host->tuning_timer);
@@ -2647,6 +3825,7 @@
 	host->runtime_suspended = true;
 	spin_unlock_irqrestore(&host->lock, flags);
 
+	sdhci_release_ownership(host->mmc);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
@@ -2656,6 +3835,8 @@
 	unsigned long flags;
 	int ret = 0, host_flags = host->flags;
 
+	sdhci_do_acquire_ownership(host->mmc);
+
 	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 		if (host->ops->enable_dma)
 			host->ops->enable_dma(host);
@@ -2693,6 +3874,7 @@
 
 	spin_unlock_irqrestore(&host->lock, flags);
 
+	sdhci_release_ownership(host->mmc);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
@@ -2725,6 +3907,36 @@
 
 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
 
+/**
+ *      sdhci_pci_request_regulators - try requesting regulator of
+ *                                     a sdhci device
+ *
+ *      We take care of race conditions here between sdhci_add_host() (probe)
+ *      and platform code that may kick a retry at anytime during boot.
+ */
+int sdhci_try_get_regulator(struct sdhci_host *host)
+{
+	struct regulator *vmmc;
+	unsigned long flags;
+	if (!host->vmmc) {
+		vmmc = regulator_get(mmc_dev(host->mmc), "vmmc");
+		if (!IS_ERR(vmmc)) {
+			spin_lock_irqsave(&host->lock, flags);
+			if (!host->vmmc) {
+				host->vmmc = vmmc;
+				spin_unlock_irqrestore(&host->lock, flags);
+				return 0;
+			} else {
+			  /* race! we got the regulator twice */
+				spin_unlock_irqrestore(&host->lock, flags);
+				regulator_put(vmmc);
+			}
+		}
+	}
+	return -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(sdhci_try_get_regulator);
+
 int sdhci_add_host(struct sdhci_host *host)
 {
 	struct mmc_host *mmc;
@@ -2898,6 +4110,8 @@
 
 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
 
+	mmc->caps |= MMC_CAP_POWER_OFF_CARD;
+
 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
 		host->flags |= SDHCI_AUTO_CMD12;
 
@@ -2990,6 +4204,8 @@
 	/* Initial value for re-tuning timer count */
 	host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
 			      SDHCI_RETUNING_TIMER_COUNT_SHIFT;
+	if (host->tuning_count == 0 && host->ops->get_tuning_count)
+		host->tuning_count = host->ops->get_tuning_count(host);
 
 	/*
 	 * In case Re-tuning Timer is not disabled, the actual value of
@@ -3003,17 +4219,10 @@
 			     SDHCI_RETUNING_MODE_SHIFT;
 
 	ocr_avail = 0;
-
-	host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
-	if (IS_ERR_OR_NULL(host->vmmc)) {
-		if (PTR_ERR(host->vmmc) < 0) {
-			pr_info("%s: no vmmc regulator found\n",
-				mmc_hostname(mmc));
-			host->vmmc = NULL;
-		}
-	}
+	spin_lock_init(&host->lock);
 
 #ifdef CONFIG_REGULATOR
+	sdhci_try_get_regulator(host);
 	/*
 	 * Voltage range check makes sense only if regulator reports
 	 * any voltage value.
@@ -3081,6 +4290,11 @@
 				   SDHCI_MAX_CURRENT_MULTIPLIER;
 	}
 
+	if (host->quirks2 & SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8)
+		ocr_avail |= MMC_VDD_20_21;
+	if (host->quirks2 & SDHCI_QUIRK2_ADVERTISE_3V0_FORCE_1V8)
+		ocr_avail |= MMC_VDD_32_33;
+
 	mmc->ocr_avail = ocr_avail;
 	mmc->ocr_avail_sdio = ocr_avail;
 	if (host->ocr_avail_sdio)
@@ -3100,8 +4314,6 @@
 		return -ENODEV;
 	}
 
-	spin_lock_init(&host->lock);
-
 	/*
 	 * Maximum number of segments. Depends on if the hardware
 	 * can do scatter/gather or not.
@@ -3182,6 +4394,7 @@
 		       mmc_hostname(mmc), host->irq, ret);
 		goto untasklet;
 	}
+	sdhci_do_acquire_ownership(mmc);
 
 	sdhci_init(host, 0);
 
@@ -3190,18 +4403,20 @@
 #endif
 
 #ifdef SDHCI_USE_LEDS_CLASS
-	snprintf(host->led_name, sizeof(host->led_name),
-		"%s::", mmc_hostname(mmc));
-	host->led.name = host->led_name;
-	host->led.brightness = LED_OFF;
-	host->led.default_trigger = mmc_hostname(mmc);
-	host->led.brightness_set = sdhci_led_control;
+	if (mmc->caps2 & MMC_CAP2_LED_SUPPORT) {
+		snprintf(host->led_name, sizeof(host->led_name),
+				"%s::", mmc_hostname(mmc));
+		host->led.name = host->led_name;
+		host->led.brightness = LED_OFF;
+		host->led.default_trigger = mmc_hostname(mmc);
+		host->led.brightness_set = sdhci_led_control;
 
-	ret = led_classdev_register(mmc_dev(mmc), &host->led);
-	if (ret) {
-		pr_err("%s: Failed to register LED device: %d\n",
-		       mmc_hostname(mmc), ret);
-		goto reset;
+		ret = led_classdev_register(mmc_dev(mmc), &host->led);
+		if (ret) {
+			pr_err("%s: Failed to register LED device: %d\n",
+					mmc_hostname(mmc), ret);
+			goto reset;
+		}
 	}
 #endif
 
@@ -3216,13 +4431,18 @@
 
 	sdhci_enable_card_detection(host);
 
+	sdhci_release_ownership(mmc);
+
 	return 0;
 
 #ifdef SDHCI_USE_LEDS_CLASS
 reset:
-	sdhci_reset(host, SDHCI_RESET_ALL);
-	sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
-	free_irq(host->irq, host);
+	if (mmc->caps2 & MMC_CAP2_LED_SUPPORT) {
+		sdhci_reset(host, SDHCI_RESET_ALL);
+		sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
+		free_irq(host->irq, host);
+	}
+	sdhci_release_ownership(mmc);
 #endif
 untasklet:
 	tasklet_kill(&host->card_tasklet);
@@ -3258,7 +4478,8 @@
 	mmc_remove_host(host->mmc);
 
 #ifdef SDHCI_USE_LEDS_CLASS
-	led_classdev_unregister(&host->led);
+	if (host->mmc->caps2 & MMC_CAP2_LED_SUPPORT)
+		led_classdev_unregister(&host->led);
 #endif
 
 	if (!dead)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 379e09d..b7c49ad 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -89,6 +89,7 @@
 #define  SDHCI_POWER_180	0x0A
 #define  SDHCI_POWER_300	0x0C
 #define  SDHCI_POWER_330	0x0E
+#define	 SDHCI_HW_RESET		0x10
 
 #define SDHCI_BLOCK_GAP_CONTROL	0x2A
 
@@ -130,6 +131,7 @@
 #define  SDHCI_INT_ERROR	0x00008000
 #define  SDHCI_INT_TIMEOUT	0x00010000
 #define  SDHCI_INT_CRC		0x00020000
+#define  SDHCI_INT_CMD_CONFLICT	0x00030000
 #define  SDHCI_INT_END_BIT	0x00040000
 #define  SDHCI_INT_INDEX	0x00080000
 #define  SDHCI_INT_DATA_TIMEOUT	0x00100000
@@ -160,7 +162,8 @@
 #define   SDHCI_CTRL_UHS_SDR50		0x0002
 #define   SDHCI_CTRL_UHS_SDR104		0x0003
 #define   SDHCI_CTRL_UHS_DDR50		0x0004
-#define   SDHCI_CTRL_HS_SDR200		0x0005 /* reserved value in SDIO spec */
+#define   SDHCI_CTRL_HS_SDR200		SDHCI_CTRL_UHS_SDR104
+#define   SDHCI_CTRL_HS_DDR200		0x0005
 #define  SDHCI_CTRL_VDD_180		0x0008
 #define  SDHCI_CTRL_DRV_TYPE_MASK	0x0030
 #define   SDHCI_CTRL_DRV_TYPE_B		0x0000
@@ -234,6 +237,7 @@
 #define SDHCI_PRESET_FOR_SDR50 0x6A
 #define SDHCI_PRESET_FOR_SDR104        0x6C
 #define SDHCI_PRESET_FOR_DDR50 0x6E
+#define SDHCI_PRESET_FOR_HS400 0x74
 #define SDHCI_PRESET_DRV_MASK  0xC000
 #define SDHCI_PRESET_DRV_SHIFT  14
 #define SDHCI_PRESET_CLKGEN_SEL_MASK   0x400
@@ -294,6 +298,11 @@
 	void	(*platform_resume)(struct sdhci_host *host);
 	void    (*adma_workaround)(struct sdhci_host *host, u32 intmask);
 	void	(*platform_init)(struct sdhci_host *host);
+	int	(*power_up_host)(struct sdhci_host *host);
+	void	(*set_dev_power)(struct sdhci_host *, bool);
+	int	(*get_cd)(struct sdhci_host *host);
+	int	(*get_tuning_count)(struct sdhci_host *host);
+	int	(*gpio_buf_check)(struct sdhci_host *host, unsigned int clk);
 };
 
 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -392,6 +401,7 @@
 extern void sdhci_card_detect(struct sdhci_host *host);
 extern int sdhci_add_host(struct sdhci_host *host);
 extern void sdhci_remove_host(struct sdhci_host *host, int dead);
+extern int sdhci_try_get_regulator(struct sdhci_host *host);
 
 #ifdef CONFIG_PM
 extern int sdhci_suspend_host(struct sdhci_host *host);
@@ -404,4 +414,5 @@
 extern int sdhci_runtime_resume_host(struct sdhci_host *host);
 #endif
 
+extern void sdhci_alloc_panic_host(struct sdhci_host *host);
 #endif /* __SDHCI_HW_H */
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 50543f1..f2ab08c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+	tristate "Include chip ids for known NAND devices."
+	depends on MTD
+	help
+	  Useful for NAND drivers that do not use the NAND subsystem but
+	  still like to take advantage of the known chip information.
+
 config MTD_NAND_ECC
 	tristate
 
@@ -133,9 +140,6 @@
 	default 8 if MTD_NAND_OMAP_BCH8
 endif
 
-config MTD_NAND_IDS
-	tristate
-
 config MTD_NAND_RICOH
 	tristate "Ricoh xD card reader"
 	default n
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 7fb7e17..6797b10 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -582,6 +582,19 @@
 };
 
 /**
+ * struct pch_gbe_privdata - PCI Device ID driver data
+ * @phy_tx_clk_delay:		Bool, configure the PHY TX delay in software
+ * @phy_disable_hibernate:	Bool, disable PHY hibernation
+ * @platform_init:		Platform initialization callback, called from
+ *				probe, prior to PHY initialization.
+ */
+struct pch_gbe_privdata {
+	bool phy_tx_clk_delay;
+	bool phy_disable_hibernate;
+	int (*platform_init)(struct pci_dev *pdev);
+};
+
+/**
  * struct pch_gbe_adapter - board specific private data structure
  * @stats_lock:	Spinlock structure for status
  * @ethtool_lock:	Spinlock structure for ethtool
@@ -604,6 +617,7 @@
  * @rx_buffer_len:	Receive buffer length
  * @tx_queue_len:	Transmit queue length
  * @have_msi:		PCI MSI mode flag
+ * @pch_gbe_privdata:	PCI Device ID driver_data
  */
 
 struct pch_gbe_adapter {
@@ -631,8 +645,11 @@
 	int hwts_tx_en;
 	int hwts_rx_en;
 	struct pci_dev *ptp_pdev;
+	struct pch_gbe_privdata *pdata;
 };
 
+#define pch_gbe_hw_to_adapter(hw)	container_of(hw, struct pch_gbe_adapter, hw)
+
 extern const char pch_driver_version[];
 
 /* pch_gbe_main.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index 5ae03e8..001821f 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -70,7 +70,9 @@
 
 	ret_val = pch_gbe_phy_get_id(hw);
 	if (ret_val) {
-		pr_err("pch_gbe_phy_get_id error\n");
+		struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+
+		netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
 		return ret_val;
 	}
 	pch_gbe_phy_init_setting(hw);
@@ -115,7 +117,9 @@
 inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
 {
 	if (!hw->reg) {
-		pr_err("ERROR: Registers not mapped\n");
+		struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+
+		netdev_err(adapter->netdev, "ERROR: Registers not mapped\n");
 		return -ENOSYS;
 	}
 	pch_gbe_plat_init_function_pointers(hw);
@@ -128,10 +132,13 @@
  */
 inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
 {
-	if (!hw->func->get_bus_info)
-		pr_err("ERROR: configuration\n");
-	else
-		hw->func->get_bus_info(hw);
+	if (!hw->func->get_bus_info) {
+		struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+
+		netdev_err(adapter->netdev, "ERROR: configuration\n");
+		return;
+	}
+	hw->func->get_bus_info(hw);
 }
 
 /**
@@ -144,7 +151,9 @@
 inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
 {
 	if (!hw->func->init_hw) {
-		pr_err("ERROR: configuration\n");
+		struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+
+		netdev_err(adapter->netdev, "ERROR: configuration\n");
 		return -ENOSYS;
 	}
 	return hw->func->init_hw(hw);
@@ -190,10 +199,13 @@
  */
 inline void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
 {
-	if (!hw->func->reset_phy)
-		pr_err("ERROR: configuration\n");
-	else
-		hw->func->reset_phy(hw);
+	if (!hw->func->reset_phy) {
+		struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+
+		netdev_err(adapter->netdev, "ERROR: configuration\n");
+		return;
+	}
+	hw->func->reset_phy(hw);
 }
 
 /**
@@ -202,10 +214,13 @@
  */
 inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
 {
-	if (!hw->func->sw_reset_phy)
-		pr_err("ERROR: configuration\n");
-	else
-		hw->func->sw_reset_phy(hw);
+	if (!hw->func->sw_reset_phy) {
+		struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+
+		netdev_err(adapter->netdev, "ERROR: configuration\n");
+		return;
+	}
+	hw->func->sw_reset_phy(hw);
 }
 
 /**
@@ -218,7 +233,9 @@
 inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
 {
 	if (!hw->func->read_mac_addr) {
-		pr_err("ERROR: configuration\n");
+		struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+
+		netdev_err(adapter->netdev, "ERROR: configuration\n");
 		return -ENOSYS;
 	}
 	return hw->func->read_mac_addr(hw);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 24b787b..1129db0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -122,7 +122,7 @@
 	}
 	ret = mii_ethtool_sset(&adapter->mii, ecmd);
 	if (ret) {
-		pr_err("Error: mii_ethtool_sset\n");
+		netdev_err(netdev, "Error: mii_ethtool_sset\n");
 		return ret;
 	}
 	hw->mac.link_speed = speed;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 0c1c65a..665bfef 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -23,6 +23,7 @@
 #include <linux/module.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_classify.h>
+#include <linux/gpio.h>
 
 #define DRV_VERSION     "1.01"
 const char pch_driver_version[] = DRV_VERSION;
@@ -111,6 +112,8 @@
 #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
 #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
 
+#define MINNOW_PHY_RESET_GPIO		13
+
 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 
 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
@@ -300,6 +303,7 @@
  */
 s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
 {
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 	u32  adr1a, adr1b;
 
 	adr1a = ioread32(&hw->reg->mac_adr[0].high);
@@ -312,7 +316,7 @@
 	hw->mac.addr[4] = (u8)(adr1b & 0xFF);
 	hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
 
-	pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
+	netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr);
 	return 0;
 }
 
@@ -324,6 +328,7 @@
 static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
 {
 	u32 tmp;
+
 	/* wait busy */
 	tmp = 1000;
 	while ((ioread32(reg) & bit) && --tmp)
@@ -340,9 +345,10 @@
  */
 static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
 {
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 	u32 mar_low, mar_high, adrmask;
 
-	pr_debug("index : 0x%x\n", index);
+	netdev_dbg(adapter->netdev, "index : 0x%x\n", index);
 
 	/*
 	 * HW expects these in little endian so we reverse the byte order
@@ -468,10 +474,11 @@
  */
 s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
 {
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 	struct pch_gbe_mac_info *mac = &hw->mac;
 	u32 rx_fctrl;
 
-	pr_debug("mac->fc = %u\n", mac->fc);
+	netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc);
 
 	rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
 
@@ -493,14 +500,16 @@
 		mac->tx_fc_enable = true;
 		break;
 	default:
-		pr_err("Flow control param set incorrectly\n");
+		netdev_err(adapter->netdev,
+			   "Flow control param set incorrectly\n");
 		return -EINVAL;
 	}
 	if (mac->link_duplex == DUPLEX_HALF)
 		rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 	iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
-	pr_debug("RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
-		 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
+	netdev_dbg(adapter->netdev,
+		   "RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
+		   ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
 	return 0;
 }
 
@@ -511,10 +520,11 @@
  */
 static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
 {
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 	u32 addr_mask;
 
-	pr_debug("wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
-		 wu_evt, ioread32(&hw->reg->ADDR_MASK));
+	netdev_dbg(adapter->netdev, "wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
+		   wu_evt, ioread32(&hw->reg->ADDR_MASK));
 
 	if (wu_evt) {
 		/* Set Wake-On-Lan address mask */
@@ -546,6 +556,7 @@
 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
 			u16 data)
 {
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 	u32 data_out = 0;
 	unsigned int i;
 	unsigned long flags;
@@ -558,7 +569,7 @@
 		udelay(20);
 	}
 	if (i == 0) {
-		pr_err("pch-gbe.miim won't go Ready\n");
+		netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
 		spin_unlock_irqrestore(&hw->miim_lock, flags);
 		return 0;	/* No way to indicate timeout error */
 	}
@@ -573,9 +584,9 @@
 	}
 	spin_unlock_irqrestore(&hw->miim_lock, flags);
 
-	pr_debug("PHY %s: reg=%d, data=0x%04X\n",
-		 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
-		 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
+	netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
+		   dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
+		   dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
 	return (u16) data_out;
 }
 
@@ -585,6 +596,7 @@
  */
 static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
 {
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 	unsigned long tmp2, tmp3;
 
 	/* Set Pause packet */
@@ -606,10 +618,13 @@
 	/* Transmit Pause Packet */
 	iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
 
-	pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
-		 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
-		 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
-		 ioread32(&hw->reg->PAUSE_PKT5));
+	netdev_dbg(adapter->netdev,
+		   "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		   ioread32(&hw->reg->PAUSE_PKT1),
+		   ioread32(&hw->reg->PAUSE_PKT2),
+		   ioread32(&hw->reg->PAUSE_PKT3),
+		   ioread32(&hw->reg->PAUSE_PKT4),
+		   ioread32(&hw->reg->PAUSE_PKT5));
 
 	return;
 }
@@ -624,15 +639,15 @@
  */
 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
 {
-	adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
+	adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
+					sizeof(*adapter->tx_ring), GFP_KERNEL);
 	if (!adapter->tx_ring)
 		return -ENOMEM;
 
-	adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
-	if (!adapter->rx_ring) {
-		kfree(adapter->tx_ring);
+	adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev,
+					sizeof(*adapter->rx_ring), GFP_KERNEL);
+	if (!adapter->rx_ring)
 		return -ENOMEM;
-	}
 	return 0;
 }
 
@@ -669,8 +684,8 @@
 			break;
 	}
 	adapter->hw.phy.addr = adapter->mii.phy_id;
-	pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
-	if (addr == 32)
+	netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
+	if (addr == PCH_GBE_PHY_REGS_LEN)
 		return -EAGAIN;
 	/* Selected the phy and isolate the rest */
 	for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
@@ -758,13 +773,15 @@
  */
 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
 {
+	struct net_device *netdev = adapter->netdev;
+
 	pch_gbe_mac_reset_hw(&adapter->hw);
 	/* reprogram multicast address register after reset */
-	pch_gbe_set_multi(adapter->netdev);
+	pch_gbe_set_multi(netdev);
 	/* Setup the receive address. */
 	pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
 	if (pch_gbe_hal_init_hw(&adapter->hw))
-		pr_err("Hardware Error\n");
+		netdev_err(netdev, "Hardware Error\n");
 }
 
 /**
@@ -778,7 +795,7 @@
 	free_irq(adapter->pdev->irq, netdev);
 	if (adapter->have_msi) {
 		pci_disable_msi(adapter->pdev);
-		pr_debug("call pci_disable_msi\n");
+		netdev_dbg(netdev, "call pci_disable_msi\n");
 	}
 }
 
@@ -795,7 +812,8 @@
 	ioread32(&hw->reg->INT_ST);
 	synchronize_irq(adapter->pdev->irq);
 
-	pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+	netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
+		   ioread32(&hw->reg->INT_EN));
 }
 
 /**
@@ -809,7 +827,8 @@
 	if (likely(atomic_dec_and_test(&adapter->irq_sem)))
 		iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 	ioread32(&hw->reg->INT_ST);
-	pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+	netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
+		   ioread32(&hw->reg->INT_EN));
 }
 
 
@@ -846,9 +865,9 @@
 	struct pch_gbe_hw *hw = &adapter->hw;
 	u32 tdba, tdlen, dctrl;
 
-	pr_debug("dma addr = 0x%08llx  size = 0x%08x\n",
-		 (unsigned long long)adapter->tx_ring->dma,
-		 adapter->tx_ring->size);
+	netdev_dbg(adapter->netdev, "dma addr = 0x%08llx  size = 0x%08x\n",
+		   (unsigned long long)adapter->tx_ring->dma,
+		   adapter->tx_ring->size);
 
 	/* Setup the HW Tx Head and Tail descriptor pointers */
 	tdba = adapter->tx_ring->dma;
@@ -894,9 +913,9 @@
 	struct pch_gbe_hw *hw = &adapter->hw;
 	u32 rdba, rdlen, rxdma;
 
-	pr_debug("dma adr = 0x%08llx  size = 0x%08x\n",
-		 (unsigned long long)adapter->rx_ring->dma,
-		 adapter->rx_ring->size);
+	netdev_dbg(adapter->netdev, "dma adr = 0x%08llx  size = 0x%08x\n",
+		   (unsigned long long)adapter->rx_ring->dma,
+		   adapter->rx_ring->size);
 
 	pch_gbe_mac_force_mac_fc(hw);
 
@@ -907,9 +926,10 @@
 	rxdma &= ~PCH_GBE_RX_DMA_EN;
 	iowrite32(rxdma, &hw->reg->DMA_CTRL);
 
-	pr_debug("MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
-		 ioread32(&hw->reg->MAC_RX_EN),
-		 ioread32(&hw->reg->DMA_CTRL));
+	netdev_dbg(adapter->netdev,
+		   "MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
+		   ioread32(&hw->reg->MAC_RX_EN),
+		   ioread32(&hw->reg->DMA_CTRL));
 
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 	 * the Base and Length of the Rx Descriptor Ring */
@@ -977,7 +997,8 @@
 		buffer_info = &tx_ring->buffer_info[i];
 		pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
 	}
-	pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
+	netdev_dbg(adapter->netdev,
+		   "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
 
 	size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 	memset(tx_ring->buffer_info, 0, size);
@@ -1009,7 +1030,8 @@
 		buffer_info = &rx_ring->buffer_info[i];
 		pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
 	}
-	pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
+	netdev_dbg(adapter->netdev,
+		   "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
 	size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 	memset(rx_ring->buffer_info, 0, size);
 
@@ -1087,7 +1109,7 @@
 	struct net_device *netdev = adapter->netdev;
 	struct pch_gbe_hw *hw = &adapter->hw;
 
-	pr_debug("right now = %ld\n", jiffies);
+	netdev_dbg(netdev, "right now = %ld\n", jiffies);
 
 	pch_gbe_update_stats(adapter);
 	if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
@@ -1095,7 +1117,7 @@
 		netdev->tx_queue_len = adapter->tx_queue_len;
 		/* mii library handles link maintenance tasks */
 		if (mii_ethtool_gset(&adapter->mii, &cmd)) {
-			pr_err("ethtool get setting Error\n");
+			netdev_err(netdev, "ethtool get setting Error\n");
 			mod_timer(&adapter->watchdog_timer,
 				  round_jiffies(jiffies +
 						PCH_GBE_WATCHDOG_PERIOD));
@@ -1213,7 +1235,7 @@
 					  buffer_info->length,
 					  DMA_TO_DEVICE);
 	if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
-		pr_err("TX DMA map failed\n");
+		netdev_err(adapter->netdev, "TX DMA map failed\n");
 		buffer_info->dma = 0;
 		buffer_info->time_stamp = 0;
 		tx_ring->next_to_use = ring_num;
@@ -1333,13 +1355,13 @@
 	/* When request status is no interruption factor */
 	if (unlikely(!int_st))
 		return IRQ_NONE;	/* Not our interrupt. End processing. */
-	pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
+	netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st);
 	if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
 		adapter->stats.intr_rx_frame_err_count++;
 	if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
 		if (!adapter->rx_stop_flag) {
 			adapter->stats.intr_rx_fifo_err_count++;
-			pr_debug("Rx fifo over run\n");
+			netdev_dbg(netdev, "Rx fifo over run\n");
 			adapter->rx_stop_flag = true;
 			int_en = ioread32(&hw->reg->INT_EN);
 			iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
@@ -1359,7 +1381,7 @@
 	/* When Rx descriptor is empty  */
 	if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
 		adapter->stats.intr_rx_dsc_empty_count++;
-		pr_debug("Rx descriptor is empty\n");
+		netdev_dbg(netdev, "Rx descriptor is empty\n");
 		int_en = ioread32(&hw->reg->INT_EN);
 		iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
 		if (hw->mac.tx_fc_enable) {
@@ -1382,8 +1404,8 @@
 			__napi_schedule(&adapter->napi);
 		}
 	}
-	pr_debug("return = 0x%08x  INT_EN reg = 0x%08x\n",
-		 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
+	netdev_dbg(netdev, "return = 0x%08x  INT_EN reg = 0x%08x\n",
+		   IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
 	return IRQ_HANDLED;
 }
 
@@ -1437,9 +1459,10 @@
 		rx_desc->buffer_addr = (buffer_info->dma);
 		rx_desc->gbec_status = DSC_INIT16;
 
-		pr_debug("i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
-			 i, (unsigned long long)buffer_info->dma,
-			 buffer_info->length);
+		netdev_dbg(netdev,
+			   "i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
+			   i, (unsigned long long)buffer_info->dma,
+			   buffer_info->length);
 
 		if (unlikely(++i == rx_ring->count))
 			i = 0;
@@ -1531,12 +1554,13 @@
 	bool cleaned = false;
 	int unused, thresh;
 
-	pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+	netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
+		   tx_ring->next_to_clean);
 
 	i = tx_ring->next_to_clean;
 	tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
-	pr_debug("gbec_status:0x%04x  dma_status:0x%04x\n",
-		 tx_desc->gbec_status, tx_desc->dma_status);
+	netdev_dbg(adapter->netdev, "gbec_status:0x%04x  dma_status:0x%04x\n",
+		   tx_desc->gbec_status, tx_desc->dma_status);
 
 	unused = PCH_GBE_DESC_UNUSED(tx_ring);
 	thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
@@ -1544,8 +1568,10 @@
 	{  /* current marked clean, tx queue filling up, do extra clean */
 		int j, k;
 		if (unused < 8) {  /* tx queue nearly full */
-			pr_debug("clean_tx: transmit queue warning (%x,%x) unused=%d\n",
-				tx_ring->next_to_clean,tx_ring->next_to_use,unused);
+			netdev_dbg(adapter->netdev,
+				   "clean_tx: transmit queue warning (%x,%x) unused=%d\n",
+				   tx_ring->next_to_clean, tx_ring->next_to_use,
+				   unused);
 		}
 
 		/* current marked clean, scan for more that need cleaning. */
@@ -1557,49 +1583,56 @@
 			if (++k >= tx_ring->count) k = 0;  /*increment, wrap*/
 		}
 		if (j < PCH_GBE_TX_WEIGHT) {
-			pr_debug("clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
-				unused,j, i,k, tx_ring->next_to_use, tx_desc->gbec_status);
+			netdev_dbg(adapter->netdev,
+				   "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
+				   unused, j, i, k, tx_ring->next_to_use,
+				   tx_desc->gbec_status);
 			i = k;  /*found one to clean, usu gbec_status==2000.*/
 		}
 	}
 
 	while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
-		pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
+		netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n",
+			   tx_desc->gbec_status);
 		buffer_info = &tx_ring->buffer_info[i];
 		skb = buffer_info->skb;
 		cleaned = true;
 
 		if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
 			adapter->stats.tx_aborted_errors++;
-			pr_err("Transfer Abort Error\n");
+			netdev_err(adapter->netdev, "Transfer Abort Error\n");
 		} else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
 			  ) {
 			adapter->stats.tx_carrier_errors++;
-			pr_err("Transfer Carrier Sense Error\n");
+			netdev_err(adapter->netdev,
+				   "Transfer Carrier Sense Error\n");
 		} else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
 			  ) {
 			adapter->stats.tx_aborted_errors++;
-			pr_err("Transfer Collision Abort Error\n");
+			netdev_err(adapter->netdev,
+				   "Transfer Collision Abort Error\n");
 		} else if ((tx_desc->gbec_status &
 			    (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
 			     PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
 			adapter->stats.collisions++;
 			adapter->stats.tx_packets++;
 			adapter->stats.tx_bytes += skb->len;
-			pr_debug("Transfer Collision\n");
+			netdev_dbg(adapter->netdev, "Transfer Collision\n");
 		} else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
 			  ) {
 			adapter->stats.tx_packets++;
 			adapter->stats.tx_bytes += skb->len;
 		}
 		if (buffer_info->mapped) {
-			pr_debug("unmap buffer_info->dma : %d\n", i);
+			netdev_dbg(adapter->netdev,
+				   "unmap buffer_info->dma : %d\n", i);
 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 					 buffer_info->length, DMA_TO_DEVICE);
 			buffer_info->mapped = false;
 		}
 		if (buffer_info->skb) {
-			pr_debug("trim buffer_info->skb : %d\n", i);
+			netdev_dbg(adapter->netdev,
+				   "trim buffer_info->skb : %d\n", i);
 			skb_trim(buffer_info->skb, 0);
 		}
 		tx_desc->gbec_status = DSC_INIT16;
@@ -1613,8 +1646,9 @@
 			break;
 		}
 	}
-	pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
-		 cleaned_count);
+	netdev_dbg(adapter->netdev,
+		   "called pch_gbe_unmap_and_free_tx_resource() %d count\n",
+		   cleaned_count);
 	if (cleaned_count > 0)  { /*skip this if nothing cleaned*/
 		/* Recover from running out of Tx resources in xmit_frame */
 		spin_lock(&tx_ring->tx_lock);
@@ -1622,12 +1656,13 @@
 		{
 			netif_wake_queue(adapter->netdev);
 			adapter->stats.tx_restart_count++;
-			pr_debug("Tx wake queue\n");
+			netdev_dbg(adapter->netdev, "Tx wake queue\n");
 		}
 
 		tx_ring->next_to_clean = i;
 
-		pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+		netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
+			   tx_ring->next_to_clean);
 		spin_unlock(&tx_ring->tx_lock);
 	}
 	return cleaned;
@@ -1684,22 +1719,22 @@
 				   buffer_info->length, DMA_FROM_DEVICE);
 		buffer_info->mapped = false;
 
-		pr_debug("RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x "
-			 "TCP:0x%08x]  BufInf = 0x%p\n",
-			 i, dma_status, gbec_status, tcp_ip_status,
-			 buffer_info);
+		netdev_dbg(netdev,
+			   "RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x]  BufInf = 0x%p\n",
+			   i, dma_status, gbec_status, tcp_ip_status,
+			   buffer_info);
 		/* Error check */
 		if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
 			adapter->stats.rx_frame_errors++;
-			pr_err("Receive Not Octal Error\n");
+			netdev_err(netdev, "Receive Not Octal Error\n");
 		} else if (unlikely(gbec_status &
 				PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
 			adapter->stats.rx_frame_errors++;
-			pr_err("Receive Nibble Error\n");
+			netdev_err(netdev, "Receive Nibble Error\n");
 		} else if (unlikely(gbec_status &
 				PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
 			adapter->stats.rx_crc_errors++;
-			pr_err("Receive CRC Error\n");
+			netdev_err(netdev, "Receive CRC Error\n");
 		} else {
 			/* get receive length */
 			/* length convert[-3], length includes FCS length */
@@ -1730,8 +1765,9 @@
 
 			napi_gro_receive(&adapter->napi, skb);
 			(*work_done)++;
-			pr_debug("Receive skb->ip_summed: %d length: %d\n",
-				 skb->ip_summed, length);
+			netdev_dbg(netdev,
+				   "Receive skb->ip_summed: %d length: %d\n",
+				   skb->ip_summed, length);
 		}
 		/* return some buffers to hardware, one at a time is too slow */
 		if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
@@ -1787,10 +1823,10 @@
 		tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
 		tx_desc->gbec_status = DSC_INIT16;
 	}
-	pr_debug("tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx\n"
-		 "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
-		 tx_ring->desc, (unsigned long long)tx_ring->dma,
-		 tx_ring->next_to_clean, tx_ring->next_to_use);
+	netdev_dbg(adapter->netdev,
+		   "tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
+		   tx_ring->desc, (unsigned long long)tx_ring->dma,
+		   tx_ring->next_to_clean, tx_ring->next_to_use);
 	return 0;
 }
 
@@ -1829,10 +1865,10 @@
 		rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
 		rx_desc->gbec_status = DSC_INIT16;
 	}
-	pr_debug("rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx "
-		 "next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
-		 rx_ring->desc, (unsigned long long)rx_ring->dma,
-		 rx_ring->next_to_clean, rx_ring->next_to_use);
+	netdev_dbg(adapter->netdev,
+		   "rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
+		   rx_ring->desc, (unsigned long long)rx_ring->dma,
+		   rx_ring->next_to_clean, rx_ring->next_to_use);
 	return 0;
 }
 
@@ -1886,9 +1922,9 @@
 	flags = IRQF_SHARED;
 	adapter->have_msi = false;
 	err = pci_enable_msi(adapter->pdev);
-	pr_debug("call pci_enable_msi\n");
+	netdev_dbg(netdev, "call pci_enable_msi\n");
 	if (err) {
-		pr_debug("call pci_enable_msi - Error: %d\n", err);
+		netdev_dbg(netdev, "call pci_enable_msi - Error: %d\n", err);
 	} else {
 		flags = 0;
 		adapter->have_msi = true;
@@ -1896,9 +1932,11 @@
 	err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
 			  flags, netdev->name, netdev);
 	if (err)
-		pr_err("Unable to allocate interrupt Error: %d\n", err);
-	pr_debug("adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
-		 adapter->have_msi, flags, err);
+		netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
+			   err);
+	netdev_dbg(netdev,
+		   "adapter->have_msi : %d  flags : 0x%04x  return : 0x%04x\n",
+		   adapter->have_msi, flags, err);
 	return err;
 }
 
@@ -1919,7 +1957,7 @@
 
 	/* Ensure we have a valid MAC */
 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
-		pr_err("Error: Invalid MAC address\n");
+		netdev_err(netdev, "Error: Invalid MAC address\n");
 		goto out;
 	}
 
@@ -1933,12 +1971,14 @@
 
 	err = pch_gbe_request_irq(adapter);
 	if (err) {
-		pr_err("Error: can't bring device up - irq request failed\n");
+		netdev_err(netdev,
+			   "Error: can't bring device up - irq request failed\n");
 		goto out;
 	}
 	err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
 	if (err) {
-		pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
+		netdev_err(netdev,
+			   "Error: can't bring device up - alloc rx buffers pool failed\n");
 		goto freeirq;
 	}
 	pch_gbe_alloc_tx_buffers(adapter, tx_ring);
@@ -2015,11 +2055,11 @@
 
 	/* Initialize the hardware-specific values */
 	if (pch_gbe_hal_setup_init_funcs(hw)) {
-		pr_err("Hardware Initialization Failure\n");
+		netdev_err(netdev, "Hardware Initialization Failure\n");
 		return -EIO;
 	}
 	if (pch_gbe_alloc_queues(adapter)) {
-		pr_err("Unable to allocate memory for queues\n");
+		netdev_err(netdev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
 	spin_lock_init(&adapter->hw.miim_lock);
@@ -2030,9 +2070,10 @@
 
 	pch_gbe_init_stats(adapter);
 
-	pr_debug("rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
-		 (u32) adapter->rx_buffer_len,
-		 hw->mac.min_frame_size, hw->mac.max_frame_size);
+	netdev_dbg(netdev,
+		   "rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
+		   (u32) adapter->rx_buffer_len,
+		   hw->mac.min_frame_size, hw->mac.max_frame_size);
 	return 0;
 }
 
@@ -2061,7 +2102,7 @@
 	err = pch_gbe_up(adapter);
 	if (err)
 		goto err_up;
-	pr_debug("Success End\n");
+	netdev_dbg(netdev, "Success End\n");
 	return 0;
 
 err_up:
@@ -2072,7 +2113,7 @@
 	pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
 err_setup_tx:
 	pch_gbe_reset(adapter);
-	pr_err("Error End\n");
+	netdev_err(netdev, "Error End\n");
 	return err;
 }
 
@@ -2116,8 +2157,9 @@
 	if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
 		netif_stop_queue(netdev);
 		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
-		pr_debug("Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
-			 tx_ring->next_to_use, tx_ring->next_to_clean);
+		netdev_dbg(netdev,
+			   "Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
+			   tx_ring->next_to_use, tx_ring->next_to_clean);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -2152,7 +2194,7 @@
 	int i;
 	int mc_count;
 
-	pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
+	netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
 
 	/* Check for Promiscuous and All Multicast modes */
 	rctl = ioread32(&hw->reg->RX_MODE);
@@ -2192,7 +2234,8 @@
 					PCH_GBE_MAR_ENTRIES);
 	kfree(mta_list);
 
-	pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
+	netdev_dbg(netdev,
+		 "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
 		 ioread32(&hw->reg->RX_MODE), mc_count);
 }
 
@@ -2218,12 +2261,12 @@
 		pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
 		ret_val = 0;
 	}
-	pr_debug("ret_val : 0x%08x\n", ret_val);
-	pr_debug("dev_addr : %pM\n", netdev->dev_addr);
-	pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
-	pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
-		 ioread32(&adapter->hw.reg->mac_adr[0].high),
-		 ioread32(&adapter->hw.reg->mac_adr[0].low));
+	netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val);
+	netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr);
+	netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr);
+	netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n",
+		   ioread32(&adapter->hw.reg->mac_adr[0].high),
+		   ioread32(&adapter->hw.reg->mac_adr[0].low));
 	return ret_val;
 }
 
@@ -2245,7 +2288,7 @@
 	max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
 		(max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
-		pr_err("Invalid MTU setting\n");
+		netdev_err(netdev, "Invalid MTU setting\n");
 		return -EINVAL;
 	}
 	if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
@@ -2274,9 +2317,10 @@
 		adapter->hw.mac.max_frame_size = max_frame;
 	}
 
-	pr_debug("max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
-		 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
-		 adapter->hw.mac.max_frame_size);
+	netdev_dbg(netdev,
+		   "max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
+		   max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
+		   adapter->hw.mac.max_frame_size);
 	return 0;
 }
 
@@ -2317,7 +2361,7 @@
 {
 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
-	pr_debug("cmd : 0x%04x\n", cmd);
+	netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
 
 	if (cmd == SIOCSHWTSTAMP)
 		return hwtstamp_ioctl(netdev, ifr, cmd);
@@ -2354,7 +2398,7 @@
 	bool poll_end_flag = false;
 	bool cleaned = false;
 
-	pr_debug("budget : %d\n", budget);
+	netdev_dbg(adapter->netdev, "budget : %d\n", budget);
 
 	pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
 	cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
@@ -2377,8 +2421,9 @@
 		pch_gbe_enable_dma_rx(&adapter->hw);
 	}
 
-	pr_debug("poll_end_flag : %d  work_done : %d  budget : %d\n",
-		 poll_end_flag, work_done, budget);
+	netdev_dbg(adapter->netdev,
+		   "poll_end_flag : %d  work_done : %d  budget : %d\n",
+		   poll_end_flag, work_done, budget);
 
 	return work_done;
 }
@@ -2435,7 +2480,7 @@
 	struct pch_gbe_hw *hw = &adapter->hw;
 
 	if (pci_enable_device(pdev)) {
-		pr_err("Cannot re-enable PCI device after reset\n");
+		netdev_err(netdev, "Cannot re-enable PCI device after reset\n");
 		return PCI_ERS_RESULT_DISCONNECT;
 	}
 	pci_set_master(pdev);
@@ -2455,7 +2500,8 @@
 
 	if (netif_running(netdev)) {
 		if (pch_gbe_up(adapter)) {
-			pr_debug("can't bring device back up after reset\n");
+			netdev_dbg(netdev,
+				   "can't bring device back up after reset\n");
 			return;
 		}
 	}
@@ -2509,7 +2555,7 @@
 
 	err = pci_enable_device(pdev);
 	if (err) {
-		pr_err("Cannot enable PCI device from suspend\n");
+		netdev_err(netdev, "Cannot enable PCI device from suspend\n");
 		return err;
 	}
 	pci_set_master(pdev);
@@ -2545,13 +2591,7 @@
 
 	pch_gbe_hal_phy_hw_reset(&adapter->hw);
 
-	kfree(adapter->tx_ring);
-	kfree(adapter->rx_ring);
-
-	iounmap(adapter->hw.reg);
-	pci_release_regions(pdev);
 	free_netdev(netdev);
-	pci_disable_device(pdev);
 }
 
 static int pch_gbe_probe(struct pci_dev *pdev,
@@ -2561,7 +2601,7 @@
 	struct pch_gbe_adapter *adapter;
 	int ret;
 
-	ret = pci_enable_device(pdev);
+	ret = pcim_enable_device(pdev);
 	if (ret)
 		return ret;
 
@@ -2574,24 +2614,22 @@
 			if (ret) {
 				dev_err(&pdev->dev, "ERR: No usable DMA "
 					"configuration, aborting\n");
-				goto err_disable_device;
+				return ret;
 			}
 		}
 	}
 
-	ret = pci_request_regions(pdev, KBUILD_MODNAME);
+	ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev));
 	if (ret) {
 		dev_err(&pdev->dev,
 			"ERR: Can't reserve PCI I/O and memory resources\n");
-		goto err_disable_device;
+		return ret;
 	}
 	pci_set_master(pdev);
 
 	netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
-	if (!netdev) {
-		ret = -ENOMEM;
-		goto err_release_pci;
-	}
+	if (!netdev)
+		return -ENOMEM;
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 
 	pci_set_drvdata(pdev, netdev);
@@ -2599,18 +2637,17 @@
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
 	adapter->hw.back = adapter;
-	adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
-	if (!adapter->hw.reg) {
-		ret = -EIO;
-		dev_err(&pdev->dev, "Can't ioremap\n");
-		goto err_free_netdev;
-	}
+	adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
+	adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
+	if (adapter->pdata && adapter->pdata->platform_init)
+		adapter->pdata->platform_init(pdev);
 
 	adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
 					       PCI_DEVFN(12, 4));
 	if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
-		pr_err("Bad ptp filter\n");
-		return -EINVAL;
+		dev_err(&pdev->dev, "Bad ptp filter\n");
+		ret = -EINVAL;
+		goto err_free_netdev;
 	}
 
 	netdev->netdev_ops = &pch_gbe_netdev_ops;
@@ -2628,7 +2665,7 @@
 	/* setup the private structure */
 	ret = pch_gbe_sw_init(adapter);
 	if (ret)
-		goto err_iounmap;
+		goto err_free_netdev;
 
 	/* Initialize PHY */
 	ret = pch_gbe_init_phy(adapter);
@@ -2679,27 +2716,62 @@
 
 	dev_dbg(&pdev->dev, "PCH Network Connection\n");
 
+	/* Disable hibernation on certain platforms */
+	if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
+		pch_gbe_phy_disable_hibernate(&adapter->hw);
+
 	device_set_wakeup_enable(&pdev->dev, 1);
 	return 0;
 
 err_free_adapter:
 	pch_gbe_hal_phy_hw_reset(&adapter->hw);
-	kfree(adapter->tx_ring);
-	kfree(adapter->rx_ring);
-err_iounmap:
-	iounmap(adapter->hw.reg);
 err_free_netdev:
 	free_netdev(netdev);
-err_release_pci:
-	pci_release_regions(pdev);
-err_disable_device:
-	pci_disable_device(pdev);
 	return ret;
 }
 
+/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
+ * ensure it is awake for probe and init. Request the line and reset the PHY.
+ */
+static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
+{
+	unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
+	unsigned gpio = MINNOW_PHY_RESET_GPIO;
+	int ret;
+
+	ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
+				    "minnow_phy_reset");
+	if (ret) {
+		dev_err(&pdev->dev,
+			"ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
+		return ret;
+	}
+
+	gpio_set_value(gpio, 0);
+	usleep_range(1250, 1500);
+	gpio_set_value(gpio, 1);
+	usleep_range(1250, 1500);
+
+	return ret;
+}
+
+static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
+	.phy_tx_clk_delay = true,
+	.phy_disable_hibernate = true,
+	.platform_init = pch_gbe_minnow_platform_init,
+};
+
 static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
 	{.vendor = PCI_VENDOR_ID_INTEL,
 	 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
+	 .subvendor = PCI_VENDOR_ID_CIRCUITCO,
+	 .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
+	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+	 .class_mask = (0xFFFF00),
+	 .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
+	 },
+	{.vendor = PCI_VENDOR_ID_INTEL,
+	 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
 	 .subvendor = PCI_ANY_ID,
 	 .subdevice = PCI_ANY_ID,
 	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 8653c3b..cf7c9b3 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -237,16 +237,17 @@
 	case enable_option:
 		switch (*value) {
 		case OPTION_ENABLED:
-			pr_debug("%s Enabled\n", opt->name);
+			netdev_dbg(adapter->netdev, "%s Enabled\n", opt->name);
 			return 0;
 		case OPTION_DISABLED:
-			pr_debug("%s Disabled\n", opt->name);
+			netdev_dbg(adapter->netdev, "%s Disabled\n", opt->name);
 			return 0;
 		}
 		break;
 	case range_option:
 		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
-			pr_debug("%s set to %i\n", opt->name, *value);
+			netdev_dbg(adapter->netdev, "%s set to %i\n",
+				   opt->name, *value);
 			return 0;
 		}
 		break;
@@ -258,7 +259,8 @@
 			ent = &opt->arg.l.p[i];
 			if (*value == ent->i) {
 				if (ent->str[0] != '\0')
-					pr_debug("%s\n", ent->str);
+					netdev_dbg(adapter->netdev, "%s\n",
+						   ent->str);
 				return 0;
 			}
 		}
@@ -268,8 +270,8 @@
 		BUG();
 	}
 
-	pr_debug("Invalid %s value specified (%i) %s\n",
-		 opt->name, *value, opt->err);
+	netdev_dbg(adapter->netdev, "Invalid %s value specified (%i) %s\n",
+		   opt->name, *value, opt->err);
 	*value = opt->def;
 	return -1;
 }
@@ -318,7 +320,8 @@
 					 .p = an_list} }
 		};
 		if (speed || dplx) {
-			pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
+			netdev_dbg(adapter->netdev,
+				   "AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
 			hw->phy.autoneg_advertised = opt.def;
 		} else {
 			int tmp = AutoNeg;
@@ -332,13 +335,16 @@
 	case 0:
 		hw->mac.autoneg = hw->mac.fc_autoneg = 1;
 		if ((speed || dplx))
-			pr_debug("Speed and duplex autonegotiation enabled\n");
+			netdev_dbg(adapter->netdev,
+				   "Speed and duplex autonegotiation enabled\n");
 		hw->mac.link_speed = SPEED_10;
 		hw->mac.link_duplex = DUPLEX_HALF;
 		break;
 	case HALF_DUPLEX:
-		pr_debug("Half Duplex specified without Speed\n");
-		pr_debug("Using Autonegotiation at Half Duplex only\n");
+		netdev_dbg(adapter->netdev,
+			   "Half Duplex specified without Speed\n");
+		netdev_dbg(adapter->netdev,
+			   "Using Autonegotiation at Half Duplex only\n");
 		hw->mac.autoneg = hw->mac.fc_autoneg = 1;
 		hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
 						PHY_ADVERTISE_100_HALF;
@@ -346,8 +352,10 @@
 		hw->mac.link_duplex = DUPLEX_HALF;
 		break;
 	case FULL_DUPLEX:
-		pr_debug("Full Duplex specified without Speed\n");
-		pr_debug("Using Autonegotiation at Full Duplex only\n");
+		netdev_dbg(adapter->netdev,
+			   "Full Duplex specified without Speed\n");
+		netdev_dbg(adapter->netdev,
+			   "Using Autonegotiation at Full Duplex only\n");
 		hw->mac.autoneg = hw->mac.fc_autoneg = 1;
 		hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL |
 						PHY_ADVERTISE_100_FULL |
@@ -356,8 +364,10 @@
 		hw->mac.link_duplex = DUPLEX_FULL;
 		break;
 	case SPEED_10:
-		pr_debug("10 Mbps Speed specified without Duplex\n");
-		pr_debug("Using Autonegotiation at 10 Mbps only\n");
+		netdev_dbg(adapter->netdev,
+			   "10 Mbps Speed specified without Duplex\n");
+		netdev_dbg(adapter->netdev,
+			   "Using Autonegotiation at 10 Mbps only\n");
 		hw->mac.autoneg = hw->mac.fc_autoneg = 1;
 		hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
 						PHY_ADVERTISE_10_FULL;
@@ -365,22 +375,24 @@
 		hw->mac.link_duplex = DUPLEX_HALF;
 		break;
 	case SPEED_10 + HALF_DUPLEX:
-		pr_debug("Forcing to 10 Mbps Half Duplex\n");
+		netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Half Duplex\n");
 		hw->mac.autoneg = hw->mac.fc_autoneg = 0;
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.link_speed = SPEED_10;
 		hw->mac.link_duplex = DUPLEX_HALF;
 		break;
 	case SPEED_10 + FULL_DUPLEX:
-		pr_debug("Forcing to 10 Mbps Full Duplex\n");
+		netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Full Duplex\n");
 		hw->mac.autoneg = hw->mac.fc_autoneg = 0;
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.link_speed = SPEED_10;
 		hw->mac.link_duplex = DUPLEX_FULL;
 		break;
 	case SPEED_100:
-		pr_debug("100 Mbps Speed specified without Duplex\n");
-		pr_debug("Using Autonegotiation at 100 Mbps only\n");
+		netdev_dbg(adapter->netdev,
+			   "100 Mbps Speed specified without Duplex\n");
+		netdev_dbg(adapter->netdev,
+			   "Using Autonegotiation at 100 Mbps only\n");
 		hw->mac.autoneg = hw->mac.fc_autoneg = 1;
 		hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF |
 						PHY_ADVERTISE_100_FULL;
@@ -388,28 +400,33 @@
 		hw->mac.link_duplex = DUPLEX_HALF;
 		break;
 	case SPEED_100 + HALF_DUPLEX:
-		pr_debug("Forcing to 100 Mbps Half Duplex\n");
+		netdev_dbg(adapter->netdev,
+			   "Forcing to 100 Mbps Half Duplex\n");
 		hw->mac.autoneg = hw->mac.fc_autoneg = 0;
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.link_speed = SPEED_100;
 		hw->mac.link_duplex = DUPLEX_HALF;
 		break;
 	case SPEED_100 + FULL_DUPLEX:
-		pr_debug("Forcing to 100 Mbps Full Duplex\n");
+		netdev_dbg(adapter->netdev,
+			   "Forcing to 100 Mbps Full Duplex\n");
 		hw->mac.autoneg = hw->mac.fc_autoneg = 0;
 		hw->phy.autoneg_advertised = 0;
 		hw->mac.link_speed = SPEED_100;
 		hw->mac.link_duplex = DUPLEX_FULL;
 		break;
 	case SPEED_1000:
-		pr_debug("1000 Mbps Speed specified without Duplex\n");
+		netdev_dbg(adapter->netdev,
+			   "1000 Mbps Speed specified without Duplex\n");
 		goto full_duplex_only;
 	case SPEED_1000 + HALF_DUPLEX:
-		pr_debug("Half Duplex is not supported at 1000 Mbps\n");
+		netdev_dbg(adapter->netdev,
+			   "Half Duplex is not supported at 1000 Mbps\n");
 		/* fall through */
 	case SPEED_1000 + FULL_DUPLEX:
 full_duplex_only:
-		pr_debug("Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+		netdev_dbg(adapter->netdev,
+			   "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
 		hw->mac.autoneg = hw->mac.fc_autoneg = 1;
 		hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL;
 		hw->mac.link_speed = SPEED_1000;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index 28bb960..23e7ee8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -74,6 +74,15 @@
 #define MII_SR_100X_FD_CAPS      0x4000	/* 100X  Full Duplex Capable */
 #define MII_SR_100T4_CAPS        0x8000	/* 100T4 Capable */
 
+/* AR8031 PHY Debug Registers */
+#define PHY_AR803X_ID           0x00001374
+#define PHY_AR8031_DBG_OFF      0x1D
+#define PHY_AR8031_DBG_DAT      0x1E
+#define PHY_AR8031_SERDES       0x05
+#define PHY_AR8031_HIBERNATE    0x0B
+#define PHY_AR8031_SERDES_TX_CLK_DLY   0x0100 /* TX clock delay of 2.0ns */
+#define PHY_AR8031_PS_HIB_EN           0x8000 /* Hibernate enable */
+
 /* Phy Id Register (word 2) */
 #define PHY_REVISION_MASK        0x000F
 
@@ -97,6 +106,7 @@
  */
 s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw)
 {
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 	struct pch_gbe_phy_info *phy = &hw->phy;
 	s32 ret;
 	u16 phy_id1;
@@ -115,8 +125,9 @@
 	phy->id = (u32)phy_id1;
 	phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10));
 	phy->revision = (u32) (phy_id2 & 0x000F);
-	pr_debug("phy->id : 0x%08x  phy->revision : 0x%08x\n",
-		 phy->id, phy->revision);
+	netdev_dbg(adapter->netdev,
+		   "phy->id : 0x%08x  phy->revision : 0x%08x\n",
+		   phy->id, phy->revision);
 	return 0;
 }
 
@@ -134,7 +145,10 @@
 	struct pch_gbe_phy_info *phy = &hw->phy;
 
 	if (offset > PHY_MAX_REG_ADDRESS) {
-		pr_err("PHY Address %d is out of range\n", offset);
+		struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+
+		netdev_err(adapter->netdev, "PHY Address %d is out of range\n",
+			   offset);
 		return -EINVAL;
 	}
 	*data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ,
@@ -156,7 +170,10 @@
 	struct pch_gbe_phy_info *phy = &hw->phy;
 
 	if (offset > PHY_MAX_REG_ADDRESS) {
-		pr_err("PHY Address %d is out of range\n", offset);
+		struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+
+		netdev_err(adapter->netdev, "PHY Address %d is out of range\n",
+			   offset);
 		return -EINVAL;
 	}
 	pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE,
@@ -241,20 +258,64 @@
 }
 
 /**
+ * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY
+ * @hw:	            Pointer to the HW structure
+ * Returns
+ *	0:		Successful.
+ *	-EINVAL:	Invalid argument.
+ */
+static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw)
+{
+	/* The RGMII interface requires a ~2ns TX clock delay. This is typically
+	 * done in layout with a longer trace or via PHY strapping, but can also
+	 * be done via PHY configuration registers.
+	 */
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+	u16 mii_reg;
+	int ret = 0;
+
+	switch (hw->phy.id) {
+	case PHY_AR803X_ID:
+		netdev_dbg(adapter->netdev,
+			   "Configuring AR803X PHY for 2ns TX clock delay\n");
+		pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg);
+		ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+						 PHY_AR8031_SERDES);
+		if (ret)
+			break;
+
+		pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+		mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY;
+		ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+						 mii_reg);
+		break;
+	default:
+		netdev_err(adapter->netdev,
+			   "Unknown PHY (%x), could not set TX clock delay\n",
+			   hw->phy.id);
+		return -EINVAL;
+	}
+
+	if (ret)
+		netdev_err(adapter->netdev,
+			   "Could not configure tx clock delay for PHY\n");
+	return ret;
+}
+
+/**
  * pch_gbe_phy_init_setting - PHY initial setting
  * @hw:	            Pointer to the HW structure
  */
 void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
 {
-	struct pch_gbe_adapter *adapter;
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 	struct ethtool_cmd     cmd = { .cmd = ETHTOOL_GSET };
 	int ret;
 	u16 mii_reg;
 
-	adapter = container_of(hw, struct pch_gbe_adapter, hw);
 	ret = mii_ethtool_gset(&adapter->mii, &cmd);
 	if (ret)
-		pr_err("Error: mii_ethtool_gset\n");
+		netdev_err(adapter->netdev, "Error: mii_ethtool_gset\n");
 
 	ethtool_cmd_speed_set(&cmd, hw->mac.link_speed);
 	cmd.duplex = hw->mac.link_duplex;
@@ -263,7 +324,7 @@
 	pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
 	ret = mii_ethtool_sset(&adapter->mii, &cmd);
 	if (ret)
-		pr_err("Error: mii_ethtool_sset\n");
+		netdev_err(adapter->netdev, "Error: mii_ethtool_sset\n");
 
 	pch_gbe_phy_sw_reset(hw);
 
@@ -271,4 +332,47 @@
 	mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
 	pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
 
+	/* Setup a TX clock delay on certain platforms */
+	if (adapter->pdata && adapter->pdata->phy_tx_clk_delay)
+		pch_gbe_phy_tx_clk_delay(hw);
+}
+
+/**
+ * pch_gbe_phy_disable_hibernate - Disable the PHY low power state
+ * @hw:	            Pointer to the HW structure
+ * Returns
+ *	0:		Successful.
+ *	-EINVAL:	Invalid argument.
+ */
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw)
+{
+	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+	u16 mii_reg;
+	int ret = 0;
+
+	switch (hw->phy.id) {
+	case PHY_AR803X_ID:
+		netdev_dbg(adapter->netdev,
+			   "Disabling hibernation for AR803X PHY\n");
+		ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+						 PHY_AR8031_HIBERNATE);
+		if (ret)
+			break;
+
+		pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+		mii_reg &= ~PHY_AR8031_PS_HIB_EN;
+		ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+						 mii_reg);
+		break;
+	default:
+		netdev_err(adapter->netdev,
+			   "Unknown PHY (%x), could not disable hibernation\n",
+			   hw->phy.id);
+		return -EINVAL;
+	}
+
+	if (ret)
+		netdev_err(adapter->netdev,
+			   "Could not disable PHY hibernation\n");
+	return ret;
 }
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 03264dc..0cbe692 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -33,5 +33,6 @@
 void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
 void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
 void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw);
 
 #endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 1373c6d..282aec4 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -149,6 +149,23 @@
 	  tunnels. L2TP is replacing PPTP for VPN uses.
 if TTY
 
+config PPPOLAC
+	tristate "PPP on L2TP Access Concentrator"
+	depends on PPP && INET
+	help
+	  L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles L2TP data packets between a UDP socket
+	  and a PPP channel, but only permits one session per socket. Thus it is
+	  fairly simple and suited for clients.
+
+config PPPOPNS
+	tristate "PPP on PPTP Network Server"
+	depends on PPP && INET
+	help
+	  PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles PPTP data packets between a RAW socket
+	  and a PPP channel. It is fairly simple and easy to use.
+
 config PPP_ASYNC
 	tristate "PPP support for async serial ports"
 	depends on PPP
diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile
index a6b6297..d283d03c 100644
--- a/drivers/net/ppp/Makefile
+++ b/drivers/net/ppp/Makefile
@@ -11,3 +11,5 @@
 obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
 obj-$(CONFIG_PPPOL2TP) += pppox.o
 obj-$(CONFIG_PPTP) += pppox.o pptp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
new file mode 100644
index 0000000..a5d3d63
--- /dev/null
+++ b/drivers/net/ppp/pppolac.c
@@ -0,0 +1,449 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT	0x80
+#define L2TP_LENGTH_BIT		0x40
+#define L2TP_SEQUENCE_BIT	0x08
+#define L2TP_OFFSET_BIT		0x02
+#define L2TP_VERSION		0x02
+#define L2TP_VERSION_MASK	0x0F
+
+#define PPP_ADDR	0xFF
+#define PPP_CTRL	0x03
+
+union unaligned {
+	__u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+	return (union unaligned *)ptr;
+}
+
+struct meta {
+	__u32 sequence;
+	__u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+	return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+	struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+	struct meta *meta = skb_meta(skb);
+	__u32 now = jiffies;
+	__u8 bits;
+	__u8 *ptr;
+
+	/* Drop the packet if L2TP header is missing. */
+	if (skb->len < sizeof(struct udphdr) + 6)
+		goto drop;
+
+	/* Put it back if it is a control packet. */
+	if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+		return opt->backlog_rcv(sk_udp, skb);
+
+	/* Skip UDP header. */
+	skb_pull(skb, sizeof(struct udphdr));
+
+	/* Check the version. */
+	if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+		goto drop;
+	bits = skb->data[0];
+	ptr = &skb->data[2];
+
+	/* Check the length if it is present. */
+	if (bits & L2TP_LENGTH_BIT) {
+		if ((ptr[0] << 8 | ptr[1]) != skb->len)
+			goto drop;
+		ptr += 2;
+	}
+
+	/* Skip all fields including optional ones. */
+	if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+			(bits & L2TP_LENGTH_BIT ? 2 : 0) +
+			(bits & L2TP_OFFSET_BIT ? 2 : 0)))
+		goto drop;
+
+	/* Skip the offset padding if it is present. */
+	if (bits & L2TP_OFFSET_BIT &&
+			!skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+		goto drop;
+
+	/* Check the tunnel and the session. */
+	if (unaligned(ptr)->u32 != opt->local)
+		goto drop;
+
+	/* Check the sequence if it is present. */
+	if (bits & L2TP_SEQUENCE_BIT) {
+		meta->sequence = ptr[4] << 8 | ptr[5];
+		if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+			goto drop;
+	}
+
+	/* Skip PPP address and control if they are present. */
+	if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+			skb->data[1] == PPP_CTRL)
+		skb_pull(skb, 2);
+
+	/* Fix PPP protocol if it is compressed. */
+	if (skb->len >= 1 && skb->data[0] & 1)
+		skb_push(skb, 1)[0] = 0;
+
+	/* Drop the packet if PPP protocol is missing. */
+	if (skb->len < 2)
+		goto drop;
+
+	/* Perform reordering if sequencing is enabled. */
+	atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+	if (bits & L2TP_SEQUENCE_BIT) {
+		struct sk_buff *skb1;
+
+		/* Insert the packet into receive queue in order. */
+		skb_set_owner_r(skb, sk);
+		skb_queue_walk(&sk->sk_receive_queue, skb1) {
+			struct meta *meta1 = skb_meta(skb1);
+			__s16 order = meta->sequence - meta1->sequence;
+			if (order == 0)
+				goto drop;
+			if (order < 0) {
+				meta->timestamp = meta1->timestamp;
+				skb_insert(skb1, skb, &sk->sk_receive_queue);
+				skb = NULL;
+				break;
+			}
+		}
+		if (skb) {
+			meta->timestamp = now;
+			skb_queue_tail(&sk->sk_receive_queue, skb);
+		}
+
+		/* Remove packets from receive queue as long as
+		 * 1. the receive buffer is full,
+		 * 2. they are queued longer than one second, or
+		 * 3. there are no missing packets before them. */
+		skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+			meta = skb_meta(skb);
+			if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+					now - meta->timestamp < HZ &&
+					meta->sequence != opt->recv_sequence)
+				break;
+			skb_unlink(skb, &sk->sk_receive_queue);
+			opt->recv_sequence = (__u16)(meta->sequence + 1);
+			skb_orphan(skb);
+			ppp_input(&pppox_sk(sk)->chan, skb);
+		}
+		return NET_RX_SUCCESS;
+	}
+
+	/* Flush receive queue if sequencing is disabled. */
+	skb_queue_purge(&sk->sk_receive_queue);
+	skb_orphan(skb);
+	ppp_input(&pppox_sk(sk)->chan, skb);
+	return NET_RX_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+	sock_hold(sk_udp);
+	sk_receive_skb(sk_udp, skb, 0);
+	return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+	mm_segment_t old_fs = get_fs();
+	struct sk_buff *skb;
+
+	set_fs(KERNEL_DS);
+	while ((skb = skb_dequeue(&delivery_queue))) {
+		struct sock *sk_udp = skb->sk;
+		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+		struct msghdr msg = {
+			.msg_iov = (struct iovec *)&iov,
+			.msg_iovlen = 1,
+			.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+		};
+		sk_udp->sk_prot->sendmsg(NULL, sk_udp, &msg, skb->len);
+		kfree_skb(skb);
+	}
+	set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk_udp = (struct sock *)chan->private;
+	struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+	/* Install PPP address and control. */
+	skb_push(skb, 2);
+	skb->data[0] = PPP_ADDR;
+	skb->data[1] = PPP_CTRL;
+
+	/* Install L2TP header. */
+	if (atomic_read(&opt->sequencing)) {
+		skb_push(skb, 10);
+		skb->data[0] = L2TP_SEQUENCE_BIT;
+		skb->data[6] = opt->xmit_sequence >> 8;
+		skb->data[7] = opt->xmit_sequence;
+		skb->data[8] = 0;
+		skb->data[9] = 0;
+		opt->xmit_sequence++;
+	} else {
+		skb_push(skb, 6);
+		skb->data[0] = 0;
+	}
+	skb->data[1] = L2TP_VERSION;
+	unaligned(&skb->data[2])->u32 = opt->remote;
+
+	/* Now send the packet via the delivery queue. */
+	skb_set_owner_w(skb, sk_udp);
+	skb_queue_tail(&delivery_queue, skb);
+	schedule_work(&delivery_work);
+	return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+	.start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+	int addrlen, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+	struct socket *sock_udp = NULL;
+	struct sock *sk_udp;
+	int error;
+
+	if (addrlen != sizeof(struct sockaddr_pppolac) ||
+			!addr->local.tunnel || !addr->local.session ||
+			!addr->remote.tunnel || !addr->remote.session) {
+		return -EINVAL;
+	}
+
+	lock_sock(sk);
+	error = -EALREADY;
+	if (sk->sk_state != PPPOX_NONE)
+		goto out;
+
+	sock_udp = sockfd_lookup(addr->udp_socket, &error);
+	if (!sock_udp)
+		goto out;
+	sk_udp = sock_udp->sk;
+	lock_sock(sk_udp);
+
+	/* Remove this check when IPv6 supports UDP encapsulation. */
+	error = -EAFNOSUPPORT;
+	if (sk_udp->sk_family != AF_INET)
+		goto out;
+	error = -EPROTONOSUPPORT;
+	if (sk_udp->sk_protocol != IPPROTO_UDP)
+		goto out;
+	error = -EDESTADDRREQ;
+	if (sk_udp->sk_state != TCP_ESTABLISHED)
+		goto out;
+	error = -EBUSY;
+	if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+		goto out;
+	if (!sk_udp->sk_bound_dev_if) {
+		struct dst_entry *dst = sk_dst_get(sk_udp);
+		error = -ENODEV;
+		if (!dst)
+			goto out;
+		sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+		dst_release(dst);
+	}
+
+	po->chan.hdrlen = 12;
+	po->chan.private = sk_udp;
+	po->chan.ops = &pppolac_channel_ops;
+	po->chan.mtu = PPP_MRU - 80;
+	po->proto.lac.local = unaligned(&addr->local)->u32;
+	po->proto.lac.remote = unaligned(&addr->remote)->u32;
+	atomic_set(&po->proto.lac.sequencing, 1);
+	po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+	error = ppp_register_channel(&po->chan);
+	if (error)
+		goto out;
+
+	sk->sk_state = PPPOX_CONNECTED;
+	udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+	udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+	sk_udp->sk_backlog_rcv = pppolac_recv_core;
+	sk_udp->sk_user_data = sk;
+out:
+	if (sock_udp) {
+		release_sock(sk_udp);
+		if (error)
+			sockfd_put(sock_udp);
+	}
+	release_sock(sk);
+	return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	if (sk->sk_state != PPPOX_NONE) {
+		struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+		lock_sock(sk_udp);
+		skb_queue_purge(&sk->sk_receive_queue);
+		pppox_unbind_sock(sk);
+		udp_sk(sk_udp)->encap_type = 0;
+		udp_sk(sk_udp)->encap_rcv = NULL;
+		sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+		sk_udp->sk_user_data = NULL;
+		release_sock(sk_udp);
+		sockfd_put(sk_udp->sk_socket);
+	}
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+	release_sock(sk);
+	sock_put(sk);
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+	.name = "PPPOLAC",
+	.owner = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+	.family = PF_PPPOX,
+	.owner = THIS_MODULE,
+	.release = pppolac_release,
+	.bind = sock_no_bind,
+	.connect = pppolac_connect,
+	.socketpair = sock_no_socketpair,
+	.accept = sock_no_accept,
+	.getname = sock_no_getname,
+	.poll = sock_no_poll,
+	.ioctl = pppox_ioctl,
+	.listen = sock_no_listen,
+	.shutdown = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = sock_no_getsockopt,
+	.sendmsg = sock_no_sendmsg,
+	.recvmsg = sock_no_recvmsg,
+	.mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock)
+{
+	struct sock *sk;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+	sock->state = SS_UNCONNECTED;
+	sock->ops = &pppolac_proto_ops;
+	sk->sk_protocol = PX_PROTO_OLAC;
+	sk->sk_state = PPPOX_NONE;
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+	.create = pppolac_create,
+	.owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+	int error;
+
+	error = proto_register(&pppolac_proto, 0);
+	if (error)
+		return error;
+
+	error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+	if (error)
+		proto_unregister(&pppolac_proto);
+	else
+		skb_queue_head_init(&delivery_queue);
+	return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+	unregister_pppox_proto(PX_PROTO_OLAC);
+	proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
new file mode 100644
index 0000000..6016d29
--- /dev/null
+++ b/drivers/net/ppp/pppopns.c
@@ -0,0 +1,428 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE		8
+
+#define PPTP_GRE_BITS		htons(0x2001)
+#define PPTP_GRE_BITS_MASK	htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT	htons(0x1000)
+#define PPTP_GRE_ACK_BIT	htons(0x0080)
+#define PPTP_GRE_TYPE		htons(0x880B)
+
+#define PPP_ADDR	0xFF
+#define PPP_CTRL	0x03
+
+struct header {
+	__u16	bits;
+	__u16	type;
+	__u16	length;
+	__u16	call;
+	__u32	sequence;
+} __attribute__((packed));
+
+struct meta {
+	__u32 sequence;
+	__u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+	return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+	struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+	struct meta *meta = skb_meta(skb);
+	__u32 now = jiffies;
+	struct header *hdr;
+
+	/* Skip transport header */
+	skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+	/* Drop the packet if GRE header is missing. */
+	if (skb->len < GRE_HEADER_SIZE)
+		goto drop;
+	hdr = (struct header *)skb->data;
+
+	/* Check the header. */
+	if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+			(hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+		goto drop;
+
+	/* Skip all fields including optional ones. */
+	if (!skb_pull(skb, GRE_HEADER_SIZE +
+			(hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+			(hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+		goto drop;
+
+	/* Check the length. */
+	if (skb->len != ntohs(hdr->length))
+		goto drop;
+
+	/* Check the sequence if it is present. */
+	if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+		meta->sequence = ntohl(hdr->sequence);
+		if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+			goto drop;
+	}
+
+	/* Skip PPP address and control if they are present. */
+	if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+			skb->data[1] == PPP_CTRL)
+		skb_pull(skb, 2);
+
+	/* Fix PPP protocol if it is compressed. */
+	if (skb->len >= 1 && skb->data[0] & 1)
+		skb_push(skb, 1)[0] = 0;
+
+	/* Drop the packet if PPP protocol is missing. */
+	if (skb->len < 2)
+		goto drop;
+
+	/* Perform reordering if sequencing is enabled. */
+	if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+		struct sk_buff *skb1;
+
+		/* Insert the packet into receive queue in order. */
+		skb_set_owner_r(skb, sk);
+		skb_queue_walk(&sk->sk_receive_queue, skb1) {
+			struct meta *meta1 = skb_meta(skb1);
+			__s32 order = meta->sequence - meta1->sequence;
+			if (order == 0)
+				goto drop;
+			if (order < 0) {
+				meta->timestamp = meta1->timestamp;
+				skb_insert(skb1, skb, &sk->sk_receive_queue);
+				skb = NULL;
+				break;
+			}
+		}
+		if (skb) {
+			meta->timestamp = now;
+			skb_queue_tail(&sk->sk_receive_queue, skb);
+		}
+
+		/* Remove packets from receive queue as long as
+		 * 1. the receive buffer is full,
+		 * 2. they are queued longer than one second, or
+		 * 3. there are no missing packets before them. */
+		skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+			meta = skb_meta(skb);
+			if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+					now - meta->timestamp < HZ &&
+					meta->sequence != opt->recv_sequence)
+				break;
+			skb_unlink(skb, &sk->sk_receive_queue);
+			opt->recv_sequence = meta->sequence + 1;
+			skb_orphan(skb);
+			ppp_input(&pppox_sk(sk)->chan, skb);
+		}
+		return NET_RX_SUCCESS;
+	}
+
+	/* Flush receive queue if sequencing is disabled. */
+	skb_queue_purge(&sk->sk_receive_queue);
+	skb_orphan(skb);
+	ppp_input(&pppox_sk(sk)->chan, skb);
+	return NET_RX_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw, int length)
+{
+	struct sk_buff *skb;
+	while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+		sock_hold(sk_raw);
+		sk_receive_skb(sk_raw, skb, 0);
+	}
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+	mm_segment_t old_fs = get_fs();
+	struct sk_buff *skb;
+
+	set_fs(KERNEL_DS);
+	while ((skb = skb_dequeue(&delivery_queue))) {
+		struct sock *sk_raw = skb->sk;
+		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+		struct msghdr msg = {
+			.msg_iov = (struct iovec *)&iov,
+			.msg_iovlen = 1,
+			.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+		};
+		sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len);
+		kfree_skb(skb);
+	}
+	set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk_raw = (struct sock *)chan->private;
+	struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+	struct header *hdr;
+	__u16 length;
+
+	/* Install PPP address and control. */
+	skb_push(skb, 2);
+	skb->data[0] = PPP_ADDR;
+	skb->data[1] = PPP_CTRL;
+	length = skb->len;
+
+	/* Install PPTP GRE header. */
+	hdr = (struct header *)skb_push(skb, 12);
+	hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+	hdr->type = PPTP_GRE_TYPE;
+	hdr->length = htons(length);
+	hdr->call = opt->remote;
+	hdr->sequence = htonl(opt->xmit_sequence);
+	opt->xmit_sequence++;
+
+	/* Now send the packet via the delivery queue. */
+	skb_set_owner_w(skb, sk_raw);
+	skb_queue_tail(&delivery_queue, skb);
+	schedule_work(&delivery_work);
+	return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+	.start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+	int addrlen, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+	struct sockaddr_storage ss;
+	struct socket *sock_tcp = NULL;
+	struct socket *sock_raw = NULL;
+	struct sock *sk_tcp;
+	struct sock *sk_raw;
+	int error;
+
+	if (addrlen != sizeof(struct sockaddr_pppopns))
+		return -EINVAL;
+
+	lock_sock(sk);
+	error = -EALREADY;
+	if (sk->sk_state != PPPOX_NONE)
+		goto out;
+
+	sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+	if (!sock_tcp)
+		goto out;
+	sk_tcp = sock_tcp->sk;
+	error = -EPROTONOSUPPORT;
+	if (sk_tcp->sk_protocol != IPPROTO_TCP)
+		goto out;
+	addrlen = sizeof(struct sockaddr_storage);
+	error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+	if (error)
+		goto out;
+	if (!sk_tcp->sk_bound_dev_if) {
+		struct dst_entry *dst = sk_dst_get(sk_tcp);
+		error = -ENODEV;
+		if (!dst)
+			goto out;
+		sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+		dst_release(dst);
+	}
+
+	error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+	if (error)
+		goto out;
+	sk_raw = sock_raw->sk;
+	sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+	error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+	if (error)
+		goto out;
+
+	po->chan.hdrlen = 14;
+	po->chan.private = sk_raw;
+	po->chan.ops = &pppopns_channel_ops;
+	po->chan.mtu = PPP_MRU - 80;
+	po->proto.pns.local = addr->local;
+	po->proto.pns.remote = addr->remote;
+	po->proto.pns.data_ready = sk_raw->sk_data_ready;
+	po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+	error = ppp_register_channel(&po->chan);
+	if (error)
+		goto out;
+
+	sk->sk_state = PPPOX_CONNECTED;
+	lock_sock(sk_raw);
+	sk_raw->sk_data_ready = pppopns_recv;
+	sk_raw->sk_backlog_rcv = pppopns_recv_core;
+	sk_raw->sk_user_data = sk;
+	release_sock(sk_raw);
+out:
+	if (sock_tcp)
+		sockfd_put(sock_tcp);
+	if (error && sock_raw)
+		sock_release(sock_raw);
+	release_sock(sk);
+	return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	if (sk->sk_state != PPPOX_NONE) {
+		struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+		lock_sock(sk_raw);
+		skb_queue_purge(&sk->sk_receive_queue);
+		pppox_unbind_sock(sk);
+		sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+		sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+		sk_raw->sk_user_data = NULL;
+		release_sock(sk_raw);
+		sock_release(sk_raw->sk_socket);
+	}
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+	release_sock(sk);
+	sock_put(sk);
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+	.name = "PPPOPNS",
+	.owner = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+	.family = PF_PPPOX,
+	.owner = THIS_MODULE,
+	.release = pppopns_release,
+	.bind = sock_no_bind,
+	.connect = pppopns_connect,
+	.socketpair = sock_no_socketpair,
+	.accept = sock_no_accept,
+	.getname = sock_no_getname,
+	.poll = sock_no_poll,
+	.ioctl = pppox_ioctl,
+	.listen = sock_no_listen,
+	.shutdown = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = sock_no_getsockopt,
+	.sendmsg = sock_no_sendmsg,
+	.recvmsg = sock_no_recvmsg,
+	.mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock)
+{
+	struct sock *sk;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+	sock->state = SS_UNCONNECTED;
+	sock->ops = &pppopns_proto_ops;
+	sk->sk_protocol = PX_PROTO_OPNS;
+	sk->sk_state = PPPOX_NONE;
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+	.create = pppopns_create,
+	.owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+	int error;
+
+	error = proto_register(&pppopns_proto, 0);
+	if (error)
+		return error;
+
+	error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+	if (error)
+		proto_unregister(&pppopns_proto);
+	else
+		skb_queue_head_init(&delivery_queue);
+	return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+	unregister_pppox_proto(PX_PROTO_OPNS);
+	proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5824971..aeff706 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1898,6 +1898,12 @@
 	int vnet_hdr_sz;
 	int ret;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+	if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
+		return -EPERM;
+	}
+#endif
+
 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
 		if (copy_from_user(&ifr, argp, ifreq_len))
 			return -EFAULT;
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index 0d1fe89..49fe1fc 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -222,10 +222,9 @@
 
 #endif	/* CONFIG_USB_ARMLINUX */
 
-
 /*-------------------------------------------------------------------------*/
 
-#ifndef	HAVE_HARDWARE
+#ifndef HAVE_HARDWARE
 #warning You need to configure some hardware for this driver
 #endif
 
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f8f0156..83b6a7a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -264,6 +264,16 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called mwl8k.  If unsure, say N.
 
+config WIFI_CONTROL_FUNC
+	bool "Enable WiFi control function abstraction"
+	help
+	  Enables Power/Reset/Carddetect function abstraction
+
+config WIFI_PLATFORM_DATA
+	bool "Enable WiFi platform data"
+	---help---
+	 Enables platform_wifi
+
 source "drivers/net/wireless/ath/Kconfig"
 source "drivers/net/wireless/b43/Kconfig"
 source "drivers/net/wireless/b43legacy/Kconfig"
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 5c9736a..008b4d9 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3244,6 +3244,14 @@
 	struct ath6kl_vif *vif = netdev_priv(dev);
 	u16 interval;
 	int ret, rssi_thold;
+	int n_match_sets = request->n_match_sets;
+
+	/* If there's a matchset w/o an SSID, then assume it's just for
+	 * the RSSI (nothing else is currently supported) and ignore it.
+	 * The device only supports a global RSSI filter that we set below.
+	 */
+	if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
+		n_match_sets = 0;
 
 	if (ar->state != ATH6KL_STATE_ON)
 		return -EIO;
@@ -3256,11 +3264,11 @@
 	ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
 				      request->n_ssids,
 				      request->match_sets,
-				      request->n_match_sets);
+				      n_match_sets);
 	if (ret < 0)
 		return ret;
 
-	if (!request->n_match_sets) {
+	if (!n_match_sets) {
 		ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
 					       ALL_BSS_FILTER, 0);
 		if (ret < 0)
@@ -3274,12 +3282,12 @@
 
 	if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
 		     ar->fw_capabilities)) {
-		if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
+		if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
 			rssi_thold = 0;
-		else if (request->rssi_thold < -127)
+		else if (request->min_rssi_thold < -127)
 			rssi_thold = -127;
 		else
-			rssi_thold = request->rssi_thold;
+			rssi_thold = request->min_rssi_thold;
 
 		ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
 						     rssi_thold);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 1187737..8c5b334 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -661,36 +661,66 @@
 	return 0;
 }
 
+/*
+ * Convert configs to something easy to use in C code
+ */
+#if defined(CONFIG_CMDLINE_FORCE)
+static const int overwrite_incoming_cmdline = 1;
+static const int read_dt_cmdline;
+static const int concat_cmdline;
+#elif defined(CONFIG_CMDLINE_EXTEND)
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline = 1;
+#else /* CMDLINE_FROM_BOOTLOADER */
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline;
+#endif
+
+#ifdef CONFIG_CMDLINE
+static const char *config_cmdline = CONFIG_CMDLINE;
+#else
+static const char *config_cmdline = "";
+#endif
+
 int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data)
 {
-	unsigned long l;
-	char *p;
+	unsigned long l = 0;
+	char *p = NULL;
+	char *cmdline = data;
 
 	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
 
-	if (depth != 1 || !data ||
+	if (depth != 1 || !cmdline ||
 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
 		return 0;
 
 	early_init_dt_check_for_initrd(node);
 
-	/* Retrieve command line */
-	p = of_get_flat_dt_prop(node, "bootargs", &l);
-	if (p != NULL && l > 0)
-		strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
+	/* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */
+	if (overwrite_incoming_cmdline || !cmdline[0])
+		strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE);
 
-	/*
-	 * CONFIG_CMDLINE is meant to be a default in case nothing else
-	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
-	 * is set in which case we override whatever was found earlier.
-	 */
-#ifdef CONFIG_CMDLINE
-#ifndef CONFIG_CMDLINE_FORCE
-	if (!((char *)data)[0])
-#endif
-		strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
+	/* Retrieve command line unless forcing */
+	if (read_dt_cmdline)
+		p = of_get_flat_dt_prop(node, "bootargs", &l);
+
+	if (p != NULL && l > 0) {
+		if (concat_cmdline) {
+			int cmdline_len;
+			int copy_len;
+			strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+			cmdline_len = strlen(cmdline);
+			copy_len = COMMAND_LINE_SIZE - cmdline_len - 1;
+			copy_len = min((int)l, copy_len);
+			strncpy(cmdline + cmdline_len, p, copy_len);
+			cmdline[cmdline_len + copy_len] = '\0';
+		} else {
+			strlcpy(cmdline, p, min((int)l, COMMAND_LINE_SIZE));
+		}
+	}
 
 	pr_debug("Command line is: %s\n", (char*)data);
 
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 0c3efcf..5651527 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -34,6 +34,7 @@
 # Some architectures use the generic PCI setup functions
 #
 obj-$(CONFIG_X86) += setup-bus.o
+obj-$(CONFIG_ATOM_SOC_POWER) += pci-atom_soc.o
 obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
 obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
 obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
diff --git a/drivers/pci/pci-atom_soc.c b/drivers/pci/pci-atom_soc.c
new file mode 100644
index 0000000..edc3e23
--- /dev/null
+++ b/drivers/pci/pci-atom_soc.c
@@ -0,0 +1,78 @@
+/*
+ * pci-atom_soc.c - register Intel MID PCI plaform ops
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/kernel.h>
+
+#include "pci.h"
+
+static bool mid_pci_power_manageable(struct pci_dev *dev)
+{
+	return true;
+}
+
+static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
+{
+	return PCI_D3hot;
+}
+
+static int mid_pci_sleep_wake(struct pci_dev *dev, bool enable)
+{
+	return 0;
+}
+
+static int mid_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+	return 0;
+}
+
+static struct pci_platform_pm_ops mid_pci_platform_pm = {
+	.is_manageable = mid_pci_power_manageable,
+	.choose_state = mid_pci_choose_state,
+	.sleep_wake = mid_pci_sleep_wake,
+	.run_wake = mid_pci_run_wake,
+	.set_state = pmu_pci_set_power_state,
+	.choose_state = pmu_pci_choose_state,
+};
+
+/**
+ * mid_pci_init - It registers callback function for all the PCI devices
+ * for platform specific device power on/shutdown acticities.
+ */
+static int __init mid_pci_init(void)
+{
+	if (boot_cpu_data.x86 != 6)
+		return 0;
+
+	/*
+	 * n.b. this model check does not uniquely identify the platform,
+	 * and additional checks are necessary inside the pmu driver
+	 */
+	switch (boot_cpu_data.x86_model) {
+	case INTEL_ATOM_MFLD:
+	case INTEL_ATOM_CLV:
+	case INTEL_ATOM_MRFLD:
+		pci_set_platform_pm(&mid_pci_platform_pm);
+		break;
+	}
+
+	return 0;
+}
+arch_initcall(mid_pci_init);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d6ceb2e..6904a06 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -562,8 +562,11 @@
 	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 		pci_dev_d3_sleep(dev);
 	else if (state == PCI_D2 || dev->current_state == PCI_D2)
+#ifdef CONFIG_ATOM_SOC_POWER
+		; /* On Intel mid platforms pci delays are handled by SCU */
+#else
 		udelay(PCI_PM_D2_DELAY);
-
+#endif
 	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 	if (dev->current_state != state && printk_ratelimit())
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b6625e5..c081f11 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3112,7 +3112,11 @@
 
 	pci_apply_fixup_final_quirks = true;
 	for_each_pci_dev(dev) {
-		pci_fixup_device(pci_fixup_final, dev);
+		if (dev->device==0x119d)
+			printk("Ignore USB EHCI Tanhier (HSIC) since we do not have the driver in the kernel\n");
+		else
+			pci_fixup_device(pci_fixup_final, dev);
+
 		/*
 		 * If arch hasn't set it explicitly yet, use the CLS
 		 * value shared by all PCI devices.  If there's a
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 8577261..d42f5ef 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -651,6 +651,20 @@
 	  IPC is used to bridge the communications between kernel and SCU on
 	  some embedded Intel x86 platforms. This is not needed for PC-type
 	  machines.
+choice
+	prompt "IPC access mode"
+	depends on INTEL_SCU_IPC
+	default INTEL_SCU_IPC_INTR_MODE
+	---help---
+	Select the desired access mode for IPC call.
+
+config INTEL_SCU_IPC_INTR_MODE
+	bool "Intel SCU IPC interrupt mode"
+
+config INTEL_SCU_IPC_POLL_MODE
+	bool "Intel SCU IPC polling mode"
+
+endchoice
 
 config INTEL_SCU_IPC_UTIL
 	tristate "Intel SCU IPC utility driver"
@@ -789,4 +803,30 @@
 	  a paravirtualized device provided by QEMU; it lets a virtual machine
 	  (guest) communicate panic events to the host.
 
+config INTEL_SCU_FLIS
+	bool "scu flis driver config"
+	depends on INTEL_SCU_IPC
+	default y
+	help
+	  This driver builds the SCU Flis Access Sysfs Interfaces.
+	  We could read write the flis address and configure the
+	  pin pull up/down using these interfaces.
+
+config INTEL_PSH_IPC
+	bool "Intel PSH IPC Support"
+	depends on X86_INTEL_MID
+	---help---
+	  PSH(Platform Services Hub) is a low frequence IA core on Tangier Platform,
+	  whose power consumption is quite low. PSH runs RTOS software inside itself,
+	  which independently controls and collects sensor data, pre-processes the data,
+	  and communicates with Atom. Thus ATOM side could be put into low power mode
+	  with more time, while all the sensor data are collected without any lost.
+
+	  PSH IPC is used as a  bridge for OS sensor service to control and access PSH
+	  sensors communications between kernel and PSH. This is not needed for PC-type
+	  machines.
+
+	  Say Y here to get Intel PSH IPC support.
+
+
 endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index ef0ec74..7d82ebf 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -36,9 +36,9 @@
 obj-$(CONFIG_ACPI_TOSHIBA)	+= toshiba_acpi.o
 
 obj-$(CONFIG_TOSHIBA_BT_RFKILL)	+= toshiba_bluetooth.o
-obj-$(CONFIG_INTEL_SCU_IPC)	+= intel_scu_ipc.o
+obj-$(CONFIG_INTEL_SCU_IPC)	+= intel_scu_ipc.o intel_scu_pmic.o intel_scu_mip.o intel_scu_fw_update.o
 obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
-obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
+obj-$(CONFIG_INTEL_SCU_FLIS)	+= intel_scu_flis.o
 obj-$(CONFIG_INTEL_IPS)		+= intel_ips.o
 obj-$(CONFIG_GPIO_INTEL_PMIC)	+= intel_pmic_gpio.o
 obj-$(CONFIG_XO1_RFKILL)	+= xo1-rfkill.o
@@ -53,3 +53,5 @@
 obj-$(CONFIG_CHROMEOS_LAPTOP)	+= chromeos_laptop.o
 
 obj-$(CONFIG_PVPANIC)           += pvpanic.o
+
+obj-$(CONFIG_INTEL_PSH_IPC)     += intel_psh_ipc.o
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index f59683a..10eaafe 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -21,56 +21,100 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/input.h>
-#include <linux/mfd/intel_msic.h>
+#include <linux/io.h>
+#include <linux/rpmsg.h>
+#include <linux/async.h>
+#include <asm/intel_mid_powerbtn.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/suspend.h>
 
 #define DRIVER_NAME "msic_power_btn"
 
-#define MSIC_PB_LEVEL	(1 << 3) /* 1 - release, 0 - press */
+struct mid_pb_priv {
+	struct input_dev *input;
+	int irq;
+	void __iomem *pb_stat;
+	u16 pb_level;
+	u16 irq_lvl1_mask;
+	bool irq_ack;
+};
 
-/*
- * MSIC document ti_datasheet defines the 1st bit reg 0x21 is used to mask
- * power button interrupt
- */
-#define MSIC_PWRBTNM    (1 << 0)
-
-static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
+static inline int pb_clear_bits(u16 addr, u8 mask)
 {
-	struct input_dev *input = dev_id;
-	int ret;
+	return intel_scu_ipc_update_register(addr, 0, mask);
+}
+
+static irqreturn_t mid_pb_isr(int irq, void *dev_id)
+{
+	struct mid_pb_priv *priv = dev_id;
 	u8 pbstat;
 
-	ret = intel_msic_reg_read(INTEL_MSIC_PBSTATUS, &pbstat);
-	dev_dbg(input->dev.parent, "PB_INT status= %d\n", pbstat);
+	pbstat = readb(priv->pb_stat);
+	dev_dbg(&priv->input->dev, "pbstat: 0x%x\n", pbstat);
 
-	if (ret < 0) {
-		dev_err(input->dev.parent, "Read error %d while reading"
-			       " MSIC_PB_STATUS\n", ret);
-	} else {
-		input_event(input, EV_KEY, KEY_POWER,
-			       !(pbstat & MSIC_PB_LEVEL));
-		input_sync(input);
-	}
+	input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & priv->pb_level));
+	input_sync(priv->input);
+
+	if (pbstat & priv->pb_level)
+		pr_info("[%s] power button released\n", priv->input->name);
+	else
+		pr_info("[%s] power button pressed\n", priv->input->name);
+	freeze_wake();
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mid_pb_threaded_isr(int irq, void *dev_id)
+{
+	struct mid_pb_priv *priv = dev_id;
+
+	if (priv->irq_ack)
+		pb_clear_bits(priv->irq_lvl1_mask, MSIC_PWRBTNM);
 
 	return IRQ_HANDLED;
 }
 
-static int mfld_pb_probe(struct platform_device *pdev)
+static int mid_pb_probe(struct platform_device *pdev)
 {
 	struct input_dev *input;
-	int irq = platform_get_irq(pdev, 0);
-	int error;
+	struct mid_pb_priv *priv;
+	int irq;
+	int ret;
+	struct intel_msic_power_btn_platform_data *pdata;
 
+	if (pdev == NULL)
+		return -ENODEV;
+
+	pdata = pdev->dev.platform_data;
+	if (pdata == NULL) {
+		dev_err(&pdev->dev, "No power button platform data\n");
+		return -EINVAL;
+	}
+
+	dev_info(&pdev->dev, "Probed mid powerbutton devivce\n");
+
+	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
 		return -EINVAL;
 
+	priv = kzalloc(sizeof(struct mid_pb_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
 	input = input_allocate_device();
 	if (!input) {
-		dev_err(&pdev->dev, "Input device allocation error\n");
+		kfree(priv);
 		return -ENOMEM;
 	}
 
+	priv->input = input;
+	priv->irq = irq;
+	platform_set_drvdata(pdev, priv);
+
 	input->name = pdev->name;
 	input->phys = "power-button/input0";
 	input->id.bustype = BUS_HOST;
@@ -78,71 +122,159 @@
 
 	input_set_capability(input, EV_KEY, KEY_POWER);
 
-	error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND,
-			DRIVER_NAME, input);
-	if (error) {
-		dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
-				"button\n", irq);
-		goto err_free_input;
+	priv->pb_stat = ioremap(pdata->pbstat, MSIC_PB_LEN);
+	if (!priv->pb_stat) {
+		ret = -ENOMEM;
+		goto fail;
 	}
 
-	error = input_register_device(input);
-	if (error) {
-		dev_err(&pdev->dev, "Unable to register input dev, error "
-				"%d\n", error);
-		goto err_free_irq;
+	ret = input_register_device(input);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"unable to register input dev, error %d\n", ret);
+		goto out_iounmap;
 	}
 
-	platform_set_drvdata(pdev, input);
+	priv->pb_level = pdata->pb_level;
+	priv->irq_lvl1_mask = pdata->irq_lvl1_mask;
 
-	/*
-	 * SCU firmware might send power button interrupts to IA core before
+	/* Unmask the PBIRQ and MPBIRQ on Tangier */
+	if (pdata->irq_ack) {
+		pdata->irq_ack(pdata);
+		priv->irq_ack = true;
+	}
+
+	ret = request_threaded_irq(priv->irq, mid_pb_isr, mid_pb_threaded_isr,
+		IRQF_NO_SUSPEND, DRIVER_NAME, priv);
+
+	if (ret) {
+		dev_err(&pdev->dev,
+			"unable to request irq %d for power button\n", irq);
+		goto out_unregister_input;
+	}
+
+	/* SCU firmware might send power button interrupts to IA core before
 	 * kernel boots and doesn't get EOI from IA core. The first bit of
-	 * MSIC reg 0x21 is kept masked, and SCU firmware doesn't send new
+	 * MSIC lvl1 mask reg is kept masked, and SCU firmware doesn't send new
 	 * power interrupt to Android kernel. Unmask the bit when probing
 	 * power button in kernel.
-	 * There is a very narrow race between irq handler and power button
-	 * initialization. The race happens rarely. So we needn't worry
-	 * about it.
 	 */
-	error = intel_msic_reg_update(INTEL_MSIC_IRQLVL1MSK, 0, MSIC_PWRBTNM);
-	if (error) {
-		dev_err(&pdev->dev, "Unable to clear power button interrupt, "
-				"error: %d\n", error);
-		goto err_free_irq;
-	}
+	pb_clear_bits(priv->irq_lvl1_mask, MSIC_PWRBTNM);
 
 	return 0;
 
-err_free_irq:
-	free_irq(irq, input);
-err_free_input:
-	input_free_device(input);
-	return error;
-}
-
-static int mfld_pb_remove(struct platform_device *pdev)
-{
-	struct input_dev *input = platform_get_drvdata(pdev);
-	int irq = platform_get_irq(pdev, 0);
-
-	free_irq(irq, input);
+out_unregister_input:
 	input_unregister_device(input);
+	input = NULL;
+out_iounmap:
+	iounmap(priv->pb_stat);
+fail:
 	platform_set_drvdata(pdev, NULL);
+	input_free_device(input);
+	kfree(priv);
+	return ret;
+}
+
+static int mid_pb_remove(struct platform_device *pdev)
+{
+	struct mid_pb_priv *priv = platform_get_drvdata(pdev);
+
+	iounmap(priv->pb_stat);
+	free_irq(priv->irq, priv);
+	platform_set_drvdata(pdev, NULL);
+	input_unregister_device(priv->input);
+	kfree(priv);
 
 	return 0;
 }
 
-static struct platform_driver mfld_pb_driver = {
+static const struct platform_device_id mid_pb_table[] = {
+	{"mid_powerbtn", 1},
+};
+
+static struct platform_driver mid_pb_driver = {
 	.driver = {
 		.name = DRIVER_NAME,
 		.owner = THIS_MODULE,
 	},
-	.probe	= mfld_pb_probe,
-	.remove	= mfld_pb_remove,
+	.probe	= mid_pb_probe,
+	.remove	= mid_pb_remove,
+	.id_table = mid_pb_table,
 };
 
-module_platform_driver(mfld_pb_driver);
+static int __init mid_pb_module_init(void)
+{
+	return platform_driver_register(&mid_pb_driver);
+}
+
+static void  mid_pb_module_exit(void)
+{
+	platform_driver_unregister(&mid_pb_driver);
+}
+
+/* RPMSG related functionality */
+
+static int mid_pb_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed mid_pb rpmsg device\n");
+
+	ret = mid_pb_module_init();
+out:
+	return ret;
+}
+
+
+static void mid_pb_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	mid_pb_module_exit();
+	dev_info(&rpdev->dev, "Removed mid_pb rpmsg device\n");
+}
+
+static void mid_pb_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id mid_pb_id_table[] = {
+	{ .name	= "rpmsg_mid_powerbtn" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, mid_pb_id_table);
+
+
+static struct rpmsg_driver mid_pb_rpmsg_driver = {
+	.drv.name	= DRIVER_NAME,
+	.drv.owner	= THIS_MODULE,
+	.probe		= mid_pb_rpmsg_probe,
+	.callback	= mid_pb_rpmsg_cb,
+	.remove		= mid_pb_rpmsg_remove,
+	.id_table	= mid_pb_id_table,
+};
+
+static int __init mid_pb_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&mid_pb_rpmsg_driver);
+}
+
+static void __exit mid_pb_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&mid_pb_rpmsg_driver);
+}
+
+late_initcall(mid_pb_rpmsg_init);
+
+module_exit(mid_pb_rpmsg_exit);
 
 MODULE_AUTHOR("Hong Liu <hong.liu@intel.com>");
 MODULE_DESCRIPTION("Intel Medfield Power Button Driver");
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
deleted file mode 100644
index 81c491e..0000000
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ /dev/null
@@ -1,574 +0,0 @@
-/*
- * intel_mid_thermal.c - Intel MID platform thermal driver
- *
- * Copyright (C) 2011 Intel Corporation
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.        See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * Author: Durgadoss R <durgadoss.r@intel.com>
- */
-
-#define pr_fmt(fmt) "intel_mid_thermal: " fmt
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/param.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/pm.h>
-#include <linux/thermal.h>
-#include <linux/mfd/intel_msic.h>
-
-/* Number of thermal sensors */
-#define MSIC_THERMAL_SENSORS	4
-
-/* ADC1 - thermal registers */
-#define MSIC_ADC_ENBL		0x10
-#define MSIC_ADC_START		0x08
-
-#define MSIC_ADCTHERM_ENBL	0x04
-#define MSIC_ADCRRDATA_ENBL	0x05
-#define MSIC_CHANL_MASK_VAL	0x0F
-
-#define MSIC_STOPBIT_MASK	16
-#define MSIC_ADCTHERM_MASK	4
-/* Number of ADC channels */
-#define ADC_CHANLS_MAX		15
-#define ADC_LOOP_MAX		(ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
-
-/* ADC channel code values */
-#define SKIN_SENSOR0_CODE	0x08
-#define SKIN_SENSOR1_CODE	0x09
-#define SYS_SENSOR_CODE		0x0A
-#define MSIC_DIE_SENSOR_CODE	0x03
-
-#define SKIN_THERM_SENSOR0	0
-#define SKIN_THERM_SENSOR1	1
-#define SYS_THERM_SENSOR2	2
-#define MSIC_DIE_THERM_SENSOR3	3
-
-/* ADC code range */
-#define ADC_MAX			977
-#define ADC_MIN			162
-#define ADC_VAL0C		887
-#define ADC_VAL20C		720
-#define ADC_VAL40C		508
-#define ADC_VAL60C		315
-
-/* ADC base addresses */
-#define ADC_CHNL_START_ADDR	INTEL_MSIC_ADC1ADDR0	/* increments by 1 */
-#define ADC_DATA_START_ADDR	INTEL_MSIC_ADC1SNS0H	/* increments by 2 */
-
-/* MSIC die attributes */
-#define MSIC_DIE_ADC_MIN	488
-#define MSIC_DIE_ADC_MAX	1004
-
-/* This holds the address of the first free ADC channel,
- * among the 15 channels
- */
-static int channel_index;
-
-struct platform_info {
-	struct platform_device *pdev;
-	struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
-};
-
-struct thermal_device_info {
-	unsigned int chnl_addr;
-	int direct;
-	/* This holds the current temperature in millidegree celsius */
-	long curr_temp;
-};
-
-/**
- * to_msic_die_temp - converts adc_val to msic_die temperature
- * @adc_val: ADC value to be converted
- *
- * Can sleep
- */
-static int to_msic_die_temp(uint16_t adc_val)
-{
-	return (368 * (adc_val) / 1000) - 220;
-}
-
-/**
- * is_valid_adc - checks whether the adc code is within the defined range
- * @min: minimum value for the sensor
- * @max: maximum value for the sensor
- *
- * Can sleep
- */
-static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
-{
-	return (adc_val >= min) && (adc_val <= max);
-}
-
-/**
- * adc_to_temp - converts the ADC code to temperature in C
- * @direct: true if ths channel is direct index
- * @adc_val: the adc_val that needs to be converted
- * @tp: temperature return value
- *
- * Linear approximation is used to covert the skin adc value into temperature.
- * This technique is used to avoid very long look-up table to get
- * the appropriate temp value from ADC value.
- * The adc code vs sensor temp curve is split into five parts
- * to achieve very close approximate temp value with less than
- * 0.5C error
- */
-static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
-{
-	int temp;
-
-	/* Direct conversion for die temperature */
-	if (direct) {
-		if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
-			*tp = to_msic_die_temp(adc_val) * 1000;
-			return 0;
-		}
-		return -ERANGE;
-	}
-
-	if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
-		return -ERANGE;
-
-	/* Linear approximation for skin temperature */
-	if (adc_val > ADC_VAL0C)
-		temp = 177 - (adc_val/5);
-	else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
-		temp = 111 - (adc_val/8);
-	else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
-		temp = 92 - (adc_val/10);
-	else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
-		temp = 91 - (adc_val/10);
-	else
-		temp = 112 - (adc_val/6);
-
-	/* Convert temperature in celsius to milli degree celsius */
-	*tp = temp * 1000;
-	return 0;
-}
-
-/**
- * mid_read_temp - read sensors for temperature
- * @temp: holds the current temperature for the sensor after reading
- *
- * reads the adc_code from the channel and converts it to real
- * temperature. The converted value is stored in temp.
- *
- * Can sleep
- */
-static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
-{
-	struct thermal_device_info *td_info = tzd->devdata;
-	uint16_t adc_val, addr;
-	uint8_t data = 0;
-	int ret;
-	unsigned long curr_temp;
-
-
-	addr = td_info->chnl_addr;
-
-	/* Enable the msic for conversion before reading */
-	ret = intel_msic_reg_write(INTEL_MSIC_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
-	if (ret)
-		return ret;
-
-	/* Re-toggle the RRDATARD bit (temporary workaround) */
-	ret = intel_msic_reg_write(INTEL_MSIC_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
-	if (ret)
-		return ret;
-
-	/* Read the higher bits of data */
-	ret = intel_msic_reg_read(addr, &data);
-	if (ret)
-		return ret;
-
-	/* Shift bits to accommodate the lower two data bits */
-	adc_val = (data << 2);
-	addr++;
-
-	ret = intel_msic_reg_read(addr, &data);/* Read lower bits */
-	if (ret)
-		return ret;
-
-	/* Adding lower two bits to the higher bits */
-	data &= 03;
-	adc_val += data;
-
-	/* Convert ADC value to temperature */
-	ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
-	if (ret == 0)
-		*temp = td_info->curr_temp = curr_temp;
-	return ret;
-}
-
-/**
- * configure_adc - enables/disables the ADC for conversion
- * @val: zero: disables the ADC non-zero:enables the ADC
- *
- * Enable/Disable the ADC depending on the argument
- *
- * Can sleep
- */
-static int configure_adc(int val)
-{
-	int ret;
-	uint8_t data;
-
-	ret = intel_msic_reg_read(INTEL_MSIC_ADC1CNTL1, &data);
-	if (ret)
-		return ret;
-
-	if (val) {
-		/* Enable and start the ADC */
-		data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
-	} else {
-		/* Just stop the ADC */
-		data &= (~MSIC_ADC_START);
-	}
-	return intel_msic_reg_write(INTEL_MSIC_ADC1CNTL1, data);
-}
-
-/**
- * set_up_therm_channel - enable thermal channel for conversion
- * @base_addr: index of free msic ADC channel
- *
- * Enable all the three channels for conversion
- *
- * Can sleep
- */
-static int set_up_therm_channel(u16 base_addr)
-{
-	int ret;
-
-	/* Enable all the sensor channels */
-	ret = intel_msic_reg_write(base_addr, SKIN_SENSOR0_CODE);
-	if (ret)
-		return ret;
-
-	ret = intel_msic_reg_write(base_addr + 1, SKIN_SENSOR1_CODE);
-	if (ret)
-		return ret;
-
-	ret = intel_msic_reg_write(base_addr + 2, SYS_SENSOR_CODE);
-	if (ret)
-		return ret;
-
-	/* Since this is the last channel, set the stop bit
-	 * to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
-	ret = intel_msic_reg_write(base_addr + 3,
-			(MSIC_DIE_SENSOR_CODE | 0x10));
-	if (ret)
-		return ret;
-
-	/* Enable ADC and start it */
-	return configure_adc(1);
-}
-
-/**
- * reset_stopbit - sets the stop bit to 0 on the given channel
- * @addr: address of the channel
- *
- * Can sleep
- */
-static int reset_stopbit(uint16_t addr)
-{
-	int ret;
-	uint8_t data;
-	ret = intel_msic_reg_read(addr, &data);
-	if (ret)
-		return ret;
-	/* Set the stop bit to zero */
-	return intel_msic_reg_write(addr, (data & 0xEF));
-}
-
-/**
- * find_free_channel - finds an empty channel for conversion
- *
- * If the ADC is not enabled then start using 0th channel
- * itself. Otherwise find an empty channel by looking for a
- * channel in which the stopbit is set to 1. returns the index
- * of the first free channel if succeeds or an error code.
- *
- * Context: can sleep
- *
- * FIXME: Ultimately the channel allocator will move into the intel_scu_ipc
- * code.
- */
-static int find_free_channel(void)
-{
-	int ret;
-	int i;
-	uint8_t data;
-
-	/* check whether ADC is enabled */
-	ret = intel_msic_reg_read(INTEL_MSIC_ADC1CNTL1, &data);
-	if (ret)
-		return ret;
-
-	if ((data & MSIC_ADC_ENBL) == 0)
-		return 0;
-
-	/* ADC is already enabled; Looking for an empty channel */
-	for (i = 0; i < ADC_CHANLS_MAX; i++) {
-		ret = intel_msic_reg_read(ADC_CHNL_START_ADDR + i, &data);
-		if (ret)
-			return ret;
-
-		if (data & MSIC_STOPBIT_MASK) {
-			ret = i;
-			break;
-		}
-	}
-	return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
-}
-
-/**
- * mid_initialize_adc - initializing the ADC
- * @dev: our device structure
- *
- * Initialize the ADC for reading thermistor values. Can sleep.
- */
-static int mid_initialize_adc(struct device *dev)
-{
-	u8  data;
-	u16 base_addr;
-	int ret;
-
-	/*
-	 * Ensure that adctherm is disabled before we
-	 * initialize the ADC
-	 */
-	ret = intel_msic_reg_read(INTEL_MSIC_ADC1CNTL3, &data);
-	if (ret)
-		return ret;
-
-	data &= ~MSIC_ADCTHERM_MASK;
-	ret = intel_msic_reg_write(INTEL_MSIC_ADC1CNTL3, data);
-	if (ret)
-		return ret;
-
-	/* Index of the first channel in which the stop bit is set */
-	channel_index = find_free_channel();
-	if (channel_index < 0) {
-		dev_err(dev, "No free ADC channels");
-		return channel_index;
-	}
-
-	base_addr = ADC_CHNL_START_ADDR + channel_index;
-
-	if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
-		/* Reset stop bit for channels other than 0 and 12 */
-		ret = reset_stopbit(base_addr);
-		if (ret)
-			return ret;
-
-		/* Index of the first free channel */
-		base_addr++;
-		channel_index++;
-	}
-
-	ret = set_up_therm_channel(base_addr);
-	if (ret) {
-		dev_err(dev, "unable to enable ADC");
-		return ret;
-	}
-	dev_dbg(dev, "ADC initialization successful");
-	return ret;
-}
-
-/**
- * initialize_sensor - sets default temp and timer ranges
- * @index: index of the sensor
- *
- * Context: can sleep
- */
-static struct thermal_device_info *initialize_sensor(int index)
-{
-	struct thermal_device_info *td_info =
-		kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
-
-	if (!td_info)
-		return NULL;
-
-	/* Set the base addr of the channel for this sensor */
-	td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
-	/* Sensor 3 is direct conversion */
-	if (index == 3)
-		td_info->direct = 1;
-	return td_info;
-}
-
-/**
- * mid_thermal_resume - resume routine
- * @dev: device structure
- *
- * mid thermal resume: re-initializes the adc. Can sleep.
- */
-static int mid_thermal_resume(struct device *dev)
-{
-	return mid_initialize_adc(dev);
-}
-
-/**
- * mid_thermal_suspend - suspend routine
- * @dev: device structure
- *
- * mid thermal suspend implements the suspend functionality
- * by stopping the ADC. Can sleep.
- */
-static int mid_thermal_suspend(struct device *dev)
-{
-	/*
-	 * This just stops the ADC and does not disable it.
-	 * temporary workaround until we have a generic ADC driver.
-	 * If 0 is passed, it disables the ADC.
-	 */
-	return configure_adc(0);
-}
-
-static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
-			 mid_thermal_suspend, mid_thermal_resume);
-
-/**
- * read_curr_temp - reads the current temperature and stores in temp
- * @temp: holds the current temperature value after reading
- *
- * Can sleep
- */
-static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
-{
-	WARN_ON(tzd == NULL);
-	return mid_read_temp(tzd, temp);
-}
-
-/* Can't be const */
-static struct thermal_zone_device_ops tzd_ops = {
-	.get_temp = read_curr_temp,
-};
-
-/**
- * mid_thermal_probe - mfld thermal initialize
- * @pdev: platform device structure
- *
- * mid thermal probe initializes the hardware and registers
- * all the sensors with the generic thermal framework. Can sleep.
- */
-static int mid_thermal_probe(struct platform_device *pdev)
-{
-	static char *name[MSIC_THERMAL_SENSORS] = {
-		"skin0", "skin1", "sys", "msicdie"
-	};
-
-	int ret;
-	int i;
-	struct platform_info *pinfo;
-
-	pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
-	if (!pinfo)
-		return -ENOMEM;
-
-	/* Initializing the hardware */
-	ret = mid_initialize_adc(&pdev->dev);
-	if (ret) {
-		dev_err(&pdev->dev, "ADC init failed");
-		kfree(pinfo);
-		return ret;
-	}
-
-	/* Register each sensor with the generic thermal framework*/
-	for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
-		struct thermal_device_info *td_info = initialize_sensor(i);
-
-		if (!td_info) {
-			ret = -ENOMEM;
-			goto err;
-		}
-		pinfo->tzd[i] = thermal_zone_device_register(name[i],
-				0, 0, td_info, &tzd_ops, NULL, 0, 0);
-		if (IS_ERR(pinfo->tzd[i])) {
-			kfree(td_info);
-			ret = PTR_ERR(pinfo->tzd[i]);
-			goto err;
-		}
-	}
-
-	pinfo->pdev = pdev;
-	platform_set_drvdata(pdev, pinfo);
-	return 0;
-
-err:
-	while (--i >= 0) {
-		kfree(pinfo->tzd[i]->devdata);
-		thermal_zone_device_unregister(pinfo->tzd[i]);
-	}
-	configure_adc(0);
-	kfree(pinfo);
-	return ret;
-}
-
-/**
- * mid_thermal_remove - mfld thermal finalize
- * @dev: platform device structure
- *
- * MLFD thermal remove unregisters all the sensors from the generic
- * thermal framework. Can sleep.
- */
-static int mid_thermal_remove(struct platform_device *pdev)
-{
-	int i;
-	struct platform_info *pinfo = platform_get_drvdata(pdev);
-
-	for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
-		kfree(pinfo->tzd[i]->devdata);
-		thermal_zone_device_unregister(pinfo->tzd[i]);
-	}
-
-	kfree(pinfo);
-	platform_set_drvdata(pdev, NULL);
-
-	/* Stop the ADC */
-	return configure_adc(0);
-}
-
-#define DRIVER_NAME "msic_thermal"
-
-static const struct platform_device_id therm_id_table[] = {
-	{ DRIVER_NAME, 1 },
-	{ "msic_thermal", 1 },
-	{ }
-};
-
-static struct platform_driver mid_thermal_driver = {
-	.driver = {
-		.name = DRIVER_NAME,
-		.owner = THIS_MODULE,
-		.pm = &mid_thermal_pm,
-	},
-	.probe = mid_thermal_probe,
-	.remove = mid_thermal_remove,
-	.id_table = therm_id_table,
-};
-
-module_platform_driver(mid_thermal_driver);
-
-MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
-MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 6f4b728..c2d271a 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -31,7 +31,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
-#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
 #include <linux/device.h>
 #include <linux/intel_pmic_gpio.h>
 #include <linux/platform_device.h>
diff --git a/drivers/platform/x86/intel_psh_ipc.c b/drivers/platform/x86/intel_psh_ipc.c
new file mode 100644
index 0000000..12e8bf1
--- /dev/null
+++ b/drivers/platform/x86/intel_psh_ipc.c
@@ -0,0 +1,637 @@
+/*
+ * intel_psh_ipc.c: Driver for the Intel PSH IPC mechanism
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Yang Bin (bin.yang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <asm/intel_psh_ipc.h>
+#include <asm/intel-mid.h>
+#include <linux/fs.h>
+#include <linux/intel_mid_pm.h>
+
+#define PSH_ERR(fmt, arg...)	dev_err(&ipc_ctrl.pdev->dev, fmt, ##arg)
+#define PSH_DBG(fmt, arg...)	dev_dbg(&ipc_ctrl.pdev->dev, fmt, ##arg)
+
+#define STATUS_PSH2IA(x)	(1 << ((x) + 6))
+#define FLAG_BIND		(1 << 0)
+
+#define PIMR_ADDR(x)		(&ipc_ctrl.psh_regs->psh_regs_b_step.pimr##x)
+
+#define PSH_REG_ADDR(x)		(&ipc_ctrl.psh_regs->psh_regs_b_step.x)
+
+#define PSH_CH_HANDLE(x)	(ipc_ctrl.channel_handle[x])
+#define PSH_CH_DATA(x)		(ipc_ctrl.channel_data[x])
+#define PSH_CH_FLAG(x)		(ipc_ctrl.flags[x])
+
+/* PSH registers */
+union psh_registers {
+	/* reg mem map A */
+	struct {
+		u32		csr;	/* 00h */
+		u32		res1;	/* padding */
+		u32		pisr;	/* 08h */
+		u32		pimr0;	/* 0Ch */
+		u32		pimr1;	/* 10h */
+		u32		pimr2;	/* 14h */
+		u32		pimr3;	/* 18h */
+		u32		pmctl;	/* 1Ch */
+		u32		pmstat;	/* 20h */
+		u32		res2;	/* padding */
+		struct psh_msg	ia2psh[NUM_IA2PSH_IPC];/* 28h ~ 44h + 3 */
+		struct psh_msg	cry2psh;/* 48h ~ 4Ch + 3 */
+		struct psh_msg	scu2psh;/* 50h ~ 54h + 3 */
+		u32		res3[2];/* padding */
+		struct psh_msg	psh2ia[NUM_PSH2IA_IPC];/* 60h ~ 7Ch + 3 */
+		struct psh_msg	psh2cry;/* 80h ~ 84h + 3 */
+		struct psh_msg  psh2scu;/* 88h */
+		u32		msi_dir;/* 90h */
+		u32		res4[3];
+		u32		scratchpad[2];/* A0 */
+	} __packed psh_regs_a_step;
+	/* reg mem map B */
+	struct {
+		u32		pimr0;		/* 00h */
+		u32		csr;		/* 04h */
+		u32		pmctl;		/* 08h */
+		u32		pmstat;		/* 0Ch */
+		u32		psh_msi_direct;	/* 10h */
+		u32		res1[59];	/* 14h ~ FCh + 3, padding */
+		u32		pimr3;		/* 100h */
+		struct psh_msg	scu2psh;	/* 104h ~ 108h + 3 */
+		struct psh_msg	psh2scu;	/* 10Ch ~ 110h + 3 */
+		u32		res2[187];	/* 114h ~ 3FCh + 3, padding */
+		u32		pisr;		/* 400h */
+		u32		scratchpad[2];	/* 404h ~ 407h */
+		u32		res3[61];	/* 40Ch ~ 4FCh + 3, padding */
+		u32		pimr1;		/* 500h */
+		struct psh_msg	ia2psh[NUM_IA2PSH_IPC];	/* 504h ~ 520h + 3 */
+		struct psh_msg	psh2ia[NUM_PSH2IA_IPC];	/* 524h ~ 540h + 3 */
+		u32		res4[175];	/* 544h ~ 7FCh + 3, padding */
+		u32		pimr2;		/* 800h */
+		struct psh_msg	cry2psh;	/* 804h ~ 808h + 3 */
+		struct psh_msg	psh2cry;	/* 80Ch ~ 810h + 3 */
+	} __packed psh_regs_b_step;
+} __packed;
+
+static struct ipc_controller_t {
+	int			reg_map;
+	int			initialized;
+	struct pci_dev		*pdev;
+	spinlock_t		lock;
+	int			flags[NUM_ALL_CH];
+	union psh_registers	*psh_regs;
+	struct semaphore	ch_lock[NUM_ALL_CH];
+	struct mutex		psh_mutex;
+	psh_channel_handle_t	channel_handle[NUM_PSH2IA_IPC];
+	void			*channel_data[NUM_PSH2IA_IPC];
+} ipc_ctrl;
+
+
+/**
+ * intel_ia2psh_command - send IA to PSH command
+ * Send ia2psh command and return psh message and status
+ *
+ * @in: input psh message
+ * @out: output psh message
+ * @ch: psh channel
+ * @timeout: timeout for polling busy bit, in us
+ */
+int intel_ia2psh_command(struct psh_msg *in, struct psh_msg *out,
+			 int ch, int timeout)
+{
+	int ret = 0;
+	u32 status;
+
+	might_sleep();
+
+	if (!ipc_ctrl.initialized)
+		return -ENODEV;
+
+	if (ch < PSH_SEND_CH0 || ch > PSH_SEND_CH0 + NUM_IA2PSH_IPC - 1
+		|| in == NULL)
+		return -EINVAL;
+
+	if (!in || in->msg & CHANNEL_BUSY)
+		return -EINVAL;
+
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	down(&ipc_ctrl.ch_lock[ch]);
+
+	in->msg |= CHANNEL_BUSY;
+	/* Check if channel is ready for IA sending command */
+
+	if (readl(PSH_REG_ADDR(ia2psh[ch].msg)) & CHANNEL_BUSY) {
+		ret = -EBUSY;
+		goto end;
+	}
+
+	writel(in->param, PSH_REG_ADDR(ia2psh[ch].param));
+	writel(in->msg, PSH_REG_ADDR(ia2psh[ch].msg));
+
+	/* Input timeout is zero, do not check channel status */
+	if (timeout == 0)
+		goto end;
+
+	/* Input timeout is nonzero, check channel status */
+	while (((status = readl(PSH_REG_ADDR(ia2psh[ch].msg))) & CHANNEL_BUSY)
+		&& timeout) {
+		usleep_range(100, 101);
+		timeout -= 100;
+	}
+
+	if (timeout <= 0) {
+		ret = -ETIMEDOUT;
+		PSH_ERR("ia2psh channel %d is always busy!\n", ch);
+		goto end;
+	} else {
+		if (out == NULL)
+			goto end;
+
+		out->param = readl(PSH_REG_ADDR(ia2psh[ch].param));
+		out->msg = status;
+	}
+
+end:
+	up(&ipc_ctrl.ch_lock[ch]);
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(intel_ia2psh_command);
+
+/**
+ * intel_psh_ipc_bind - bind a handler to a psh channel
+ *
+ * @ch: psh channel
+ * @handle: handle function called when IA received psh interrupt
+ * @data: data passed to handle
+ */
+int intel_psh_ipc_bind(int ch, psh_channel_handle_t handle, void *data)
+{
+	unsigned long flags;
+
+	if (!ipc_ctrl.initialized)
+		return -ENODEV;
+
+	if (!handle || ch < PSH_RECV_CH0
+			|| ch > PSH_RECV_CH0 + NUM_PSH2IA_IPC - 1)
+		return -EINVAL;
+
+	mutex_lock(&ipc_ctrl.psh_mutex);
+	down(&ipc_ctrl.ch_lock[ch]);
+	if (PSH_CH_HANDLE(ch - PSH_RECV_CH0) != NULL) {
+		up(&ipc_ctrl.ch_lock[ch]);
+		mutex_unlock(&ipc_ctrl.psh_mutex);
+		return -EBUSY;
+	} else {
+		PSH_CH_DATA(ch - PSH_RECV_CH0) = data;
+		PSH_CH_HANDLE(ch - PSH_RECV_CH0) = handle;
+	}
+	up(&ipc_ctrl.ch_lock[ch]);
+
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	spin_lock_irqsave(&ipc_ctrl.lock, flags);
+	PSH_CH_FLAG(ch) |= FLAG_BIND;
+	writel(readl(PIMR_ADDR(1)) | (1 << (ch - PSH_RECV_CH0)), PIMR_ADDR(1));
+	spin_unlock_irqrestore(&ipc_ctrl.lock, flags);
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+	mutex_unlock(&ipc_ctrl.psh_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(intel_psh_ipc_bind);
+
+/**
+ * intel_psh_ipc_unbind - unbind a handler to a psh channel
+ *
+ * @ch: psh channel
+ */
+void intel_psh_ipc_unbind(int ch)
+{
+	unsigned long flags;
+
+	if (!ipc_ctrl.initialized)
+		return;
+
+	if (ch < PSH_RECV_CH0 || ch > PSH_RECV_CH0 + NUM_PSH2IA_IPC - 1)
+		return;
+
+	if (!(PSH_CH_FLAG(ch) & FLAG_BIND))
+		return;
+
+	mutex_lock(&ipc_ctrl.psh_mutex);
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	spin_lock_irqsave(&ipc_ctrl.lock, flags);
+	PSH_CH_FLAG(ch) &= ~FLAG_BIND;
+	writel(readl(PIMR_ADDR(1)) & (~(1 << (ch - PSH_RECV_CH0))),
+						PIMR_ADDR(1));
+	spin_unlock_irqrestore(&ipc_ctrl.lock, flags);
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+
+	down(&ipc_ctrl.ch_lock[ch]);
+	PSH_CH_HANDLE(ch - PSH_RECV_CH0) = NULL;
+	up(&ipc_ctrl.ch_lock[ch]);
+	mutex_unlock(&ipc_ctrl.psh_mutex);
+}
+EXPORT_SYMBOL(intel_psh_ipc_unbind);
+
+void intel_psh_ipc_disable_irq(void)
+{
+	disable_irq(ipc_ctrl.pdev->irq);
+}
+EXPORT_SYMBOL(intel_psh_ipc_disable_irq);
+
+void intel_psh_ipc_enable_irq(void)
+{
+	enable_irq(ipc_ctrl.pdev->irq);
+}
+EXPORT_SYMBOL(intel_psh_ipc_enable_irq);
+
+static void psh_recv_handle(int i)
+{
+	int msg, param;
+
+	down(&ipc_ctrl.ch_lock[i + PSH_RECV_CH0]);
+
+	msg = readl(PSH_REG_ADDR(psh2ia[i].msg)) & (~CHANNEL_BUSY);
+	param = readl(PSH_REG_ADDR(psh2ia[i].param));
+
+	if (PSH_CH_HANDLE(i) == NULL) {
+		PSH_ERR("Ignore message from channel %d\n", i+PSH_RECV_CH0);
+		goto end;
+	}
+
+	/* write back to clear the busy bit */
+	writel(msg, PSH_REG_ADDR(psh2ia[i].msg));
+	PSH_CH_HANDLE(i)(msg, param, PSH_CH_DATA(i));
+end:
+	up(&ipc_ctrl.ch_lock[i+PSH_RECV_CH0]);
+}
+
+static irqreturn_t psh_ipc_irq(int irq, void *data)
+{
+	int i;
+	u32 status;
+
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	status = readl(PSH_REG_ADDR(pisr));
+
+	for (i = 0; i < NUM_PSH2IA_IPC; i++) {
+		if (status & STATUS_PSH2IA(i))
+			psh_recv_handle(i);
+	}
+
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+	return IRQ_HANDLED;
+}
+
+static void psh_regs_dump(void)
+{
+	int i;
+
+	pm_runtime_get_sync(&ipc_ctrl.pdev->dev);
+	PSH_ERR("\n<-------------start------------>\n");
+
+	PSH_ERR("csr:\t%#x\n", readl(PSH_REG_ADDR(csr)));
+	PSH_ERR("pisr:\t%#x\n", readl(PSH_REG_ADDR(pisr)));
+
+	PSH_ERR("pimr0:\t%#x\n", readl(PIMR_ADDR(0)));
+	PSH_ERR("pimr1:\t%#x\n", readl(PIMR_ADDR(1)));
+	PSH_ERR("pimr2:\t%#x\n", readl(PIMR_ADDR(2)));
+	PSH_ERR("pimr3:\t%#x\n", readl(PIMR_ADDR(3)));
+
+	PSH_ERR("pmctl:\t%#x\n", readl(PSH_REG_ADDR(pmctl)));
+	PSH_ERR("pmstat:\t%#x\n", readl(PSH_REG_ADDR(pmstat)));
+	PSH_ERR("scratchpad0:\t%#x\n", readl(PSH_REG_ADDR(scratchpad[0])));
+	PSH_ERR("scratchpad1:\t%#x\n", readl(PSH_REG_ADDR(scratchpad[1])));
+
+	for (i = 0; i < NUM_IA2PSH_IPC; i++) {
+		PSH_ERR("ia2psh[%d].msg:\t%#x\n", i,
+				readl(PSH_REG_ADDR(ia2psh[i].msg)));
+		PSH_ERR("ia2psh[%d].param:\t%#x\n", i,
+				readl(PSH_REG_ADDR(ia2psh[i].param)));
+	}
+
+	PSH_ERR("cry2psh.msg:\t%#x\n", readl(PSH_REG_ADDR(cry2psh.msg)));
+	PSH_ERR("cry2psh.param:\t%#x\n", readl(PSH_REG_ADDR(cry2psh.param)));
+	PSH_ERR("scu2psh.msg:\t%#x\n", readl(PSH_REG_ADDR(scu2psh.msg)));
+	PSH_ERR("scu2psh.param:\t%#x\n", readl(PSH_REG_ADDR(scu2psh.param)));
+
+	for (i = 0; i < NUM_PSH2IA_IPC; i++) {
+		PSH_ERR("psh2ia[%d].msg:\t%#x\n", i,
+				readl(PSH_REG_ADDR(psh2ia[i].msg)));
+		PSH_ERR("psh2ia[%d].param:\t%#x\n", i,
+				readl(PSH_REG_ADDR(psh2ia[i].param)));
+	}
+
+	PSH_ERR("psh2cry.msg:\t%#x\n", readl(PSH_REG_ADDR(psh2cry.msg)));
+	PSH_ERR("psh2cry.param:\t%#x\n", readl(PSH_REG_ADDR(psh2cry.param)));
+
+	PSH_ERR("\n<-------------end------------>\n");
+	pm_runtime_put(&ipc_ctrl.pdev->dev);
+}
+
+static struct psh_msg psh_dbg_msg;
+static int psh_ch;
+
+static ssize_t psh_msg_show(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	return snprintf(buf, PAGE_SIZE,
+			"\nLast ia2psh command with msg: %#x\nparam: %#x\n",
+			psh_dbg_msg.msg, psh_dbg_msg.param);
+}
+
+static ssize_t psh_msg_store(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t size)
+{
+	int ret;
+	u32 msg, param;
+
+	memset(&psh_dbg_msg, 0, sizeof(psh_dbg_msg));
+
+	ret = sscanf(buf, "%x %x", &msg, &param);
+	if (ret != 2) {
+		PSH_ERR("Input two arguments as psh msg and param\n");
+		return -EINVAL;
+	}
+
+	psh_dbg_msg.msg = msg;
+	psh_dbg_msg.param = param;
+
+	return size;
+}
+
+static ssize_t psh_ch_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE,
+			"\nLast psh channel: %d\n", psh_ch);
+}
+
+static ssize_t psh_ch_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t size)
+{
+	int ret;
+
+	ret = sscanf(buf, "%d", &psh_ch);
+	if (ret != 1) {
+		PSH_ERR("Input one argument as psh channel\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t psh_send_cmd_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t size)
+{
+	int psh_dbg_err;
+	struct psh_msg out_msg;
+
+	memset(&out_msg, 0, sizeof(out_msg));
+
+	psh_dbg_err = intel_ia2psh_command(&psh_dbg_msg, &out_msg,
+					psh_ch, 3000000);
+	if (psh_dbg_err) {
+		PSH_ERR("Send ia2psh command failed, err %d\n", psh_dbg_err);
+		psh_regs_dump();
+		return psh_dbg_err;
+	}
+
+	return size;
+}
+
+static DEVICE_ATTR(psh_msg, S_IRUGO | S_IWUSR, psh_msg_show, psh_msg_store);
+static DEVICE_ATTR(psh_ch, S_IRUGO | S_IWUSR, psh_ch_show, psh_ch_store);
+static DEVICE_ATTR(ia2psh_cmd, S_IWUSR, NULL, psh_send_cmd_store);
+
+static struct attribute *psh_attrs[] = {
+	&dev_attr_psh_msg.attr,
+	&dev_attr_psh_ch.attr,
+	&dev_attr_ia2psh_cmd.attr,
+	NULL,
+};
+
+static struct attribute_group psh_attr_group = {
+	.name = "psh_debug",
+	.attrs = psh_attrs,
+};
+
+static int intel_psh_debug_sysfs_create(struct pci_dev *pdev)
+{
+	return sysfs_create_group(&pdev->dev.kobj, &psh_attr_group);
+}
+
+static void pmic_sysfs_remove(struct pci_dev *pdev)
+{
+	sysfs_remove_group(&pdev->dev.kobj, &psh_attr_group);
+}
+
+#ifdef CONFIG_PM
+static int psh_ipc_suspend_noirq(struct device *dev)
+{
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < NUM_ALL_CH; i++) {
+		if (down_trylock(&ipc_ctrl.ch_lock[i])) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+
+	if (ret) {
+		for (; i > 0; i--)
+			up(&ipc_ctrl.ch_lock[i - 1]);
+	}
+
+	return ret;
+}
+
+static int psh_ipc_resume_noirq(struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < NUM_ALL_CH; i++)
+		up(&ipc_ctrl.ch_lock[i]);
+
+	return 0;
+}
+
+#else
+
+#define psh_ipc_suspend_noirq	NULL
+#define psh_ipc_resume_noirq	NULL
+
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int psh_ipc_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "runtime suspend called\n");
+	return 0;
+}
+
+static int psh_ipc_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "runtime resume called\n");
+	return 0;
+}
+
+#else
+
+#define psh_ipc_runtime_suspend	NULL
+#define psh_ipc_runtime_resume	NULL
+
+#endif
+
+static int psh_ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int i, ret;
+	unsigned long start, len;
+
+	ipc_ctrl.pdev = pci_dev_get(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		goto err1;
+
+	start = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!start || !len) {
+		ret = -ENODEV;
+		goto err1;
+	}
+
+	ret = pci_request_regions(pdev, "intel_psh_ipc");
+	if (ret)
+		goto err1;
+
+	switch (intel_mid_identify_cpu()) {
+	case INTEL_MID_CPU_CHIP_TANGIER:
+		ipc_ctrl.reg_map = 1;
+		break;
+	case INTEL_MID_CPU_CHIP_ANNIEDALE:
+		ipc_ctrl.reg_map = 1;
+		break;
+	default:
+		dev_err(&pdev->dev, "error register map\n");
+		ret = -EINVAL;
+		goto err2;
+		break;
+	}
+
+	ipc_ctrl.psh_regs = (union psh_registers *)ioremap_nocache(start, len);
+	if (!ipc_ctrl.psh_regs) {
+		ret = -ENOMEM;
+		goto err2;
+	}
+
+	ret = request_threaded_irq(pdev->irq, NULL, psh_ipc_irq, IRQF_ONESHOT,
+			"intel_psh_ipc", NULL);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to register irq %d\n", pdev->irq);
+		goto err3;
+	}
+
+	irq_set_irq_wake(pdev->irq, 1);
+
+	spin_lock_init(&ipc_ctrl.lock);
+	mutex_init(&ipc_ctrl.psh_mutex);
+
+	for (i = 0; i < NUM_ALL_CH; i++)
+		sema_init(&ipc_ctrl.ch_lock[i], 1);
+
+	intel_psh_devices_create();
+
+	intel_psh_debug_sysfs_create(pdev);
+
+	ipc_ctrl.initialized = 1;
+
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	return 0;
+
+err3:
+	iounmap(ipc_ctrl.psh_regs);
+err2:
+	pci_release_regions(pdev);
+err1:
+	pci_dev_put(pdev);
+
+	return ret;
+}
+
+static void psh_ipc_remove(struct pci_dev *pdev)
+{
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+	free_irq(pdev->irq, NULL);
+	iounmap(ipc_ctrl.psh_regs);
+	pci_release_regions(pdev);
+	pci_dev_put(pdev);
+	intel_psh_devices_destroy();
+	pmic_sysfs_remove(pdev);
+	ipc_ctrl.initialized = 0;
+}
+
+static const struct dev_pm_ops psh_ipc_drv_pm_ops = {
+	.suspend_noirq		= psh_ipc_suspend_noirq,
+	.resume_noirq		= psh_ipc_resume_noirq,
+	.runtime_suspend	= psh_ipc_runtime_suspend,
+	.runtime_resume		= psh_ipc_runtime_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x11a3)},
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver psh_ipc_driver = {
+	.name = "intel_psh_ipc",
+	.driver = {
+		.pm = &psh_ipc_drv_pm_ops,
+	},
+	.id_table = pci_ids,
+	.probe = psh_ipc_probe,
+	.remove = psh_ipc_remove,
+};
+
+static int __init psh_ipc_init(void)
+{
+	return  pci_register_driver(&psh_ipc_driver);
+}
+
+static void __exit psh_ipc_exit(void)
+{
+	pci_unregister_driver(&psh_ipc_driver);
+}
+
+MODULE_AUTHOR("bin.yang@intel.com");
+MODULE_DESCRIPTION("Intel PSH IPC driver");
+MODULE_LICENSE("GPL v2");
+
+fs_initcall(psh_ipc_init);
+module_exit(psh_ipc_exit);
diff --git a/drivers/platform/x86/intel_scu_flis.c b/drivers/platform/x86/intel_scu_flis.c
new file mode 100644
index 0000000..4dc3598
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_flis.c
@@ -0,0 +1,747 @@
+/* intel_scu_flis.c SCU FLIS INTERFACES
+ *
+ * Copyright (c) 2012,  Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/rpmsg.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_flis.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+static struct rpmsg_instance *flis_instance;
+
+static u32 shim_flis_addr;
+static u32 shim_offset;
+static u32 shim_data;
+static char shim_ops[OPS_STR_LEN];
+
+static u32 param_type;	/* flis param type: PULL/PIN DIRECTION/OPEN_DRAIN */
+static u32 param_value;	/* value of certain flis param */
+static unsigned int pin_name;
+static char ops[OPS_STR_LEN];
+
+struct intel_scu_flis_info {
+	struct pinstruct_t *pin_t;
+	struct pin_mmio_flis_t *mmio_flis_t;
+	int pin_num;
+	int initialized;
+	void *flis_base;
+	u32 flis_paddr;
+};
+
+static struct intel_scu_flis_info flis_info;
+
+static DEFINE_SPINLOCK(mmio_flis_lock);
+
+u32 get_flis_value(u32 offset)
+{
+	struct intel_scu_flis_info *isfi = &flis_info;
+	u32 __iomem *mem;
+
+	if (!isfi->initialized || !isfi->flis_base)
+		return -ENODEV;
+
+	mem = (void __iomem *)(isfi->flis_base + offset);
+
+	return readl(mem);
+}
+EXPORT_SYMBOL(get_flis_value);
+
+void set_flis_value(u32 value, u32 offset)
+{
+	struct intel_scu_flis_info *isfi = &flis_info;
+	u32 __iomem *mem;
+	unsigned long flags;
+
+	if (!isfi->initialized || !isfi->flis_base)
+		return;
+
+	/*
+	 * There is one security region for Merrifield FLIS, which
+	 * are read only to OS side. Use IPC when write access is needed.
+	 */
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER
+			&& offset >= 0x1d00
+			&& offset <= 0x1d34) {
+		/* IPC call should not be called in atomic context */
+		might_sleep();
+		rpmsg_send_generic_raw_command(RP_INDIRECT_WRITE, 0,
+					(u8 *)&value, 4,
+					NULL, 0,
+					isfi->flis_paddr + offset, 0);
+
+	} else {
+		mem = (void __iomem *)(isfi->flis_base + offset);
+		spin_lock_irqsave(&mmio_flis_lock, flags);
+		writel(value, mem);
+		spin_unlock_irqrestore(&mmio_flis_lock, flags);
+	}
+}
+EXPORT_SYMBOL(set_flis_value);
+
+/* directly write to flis address */
+int intel_scu_ipc_write_shim(u32 data, u32 flis_addr, u32 offset)
+{
+	int ret;
+	u32 ipc_wbuf[3];
+
+	/* offset 0xff means the flis is reserved, just return 0*/
+	if (offset == 0xFF)
+		return 0;
+
+	ipc_wbuf[0] = flis_addr; /* wbuf[0]: flis address */
+	ipc_wbuf[1] = offset;	/* wbuf[1]: register offset */
+	ipc_wbuf[2] = data;	/* wbuf[2]: data */
+
+	ret = rpmsg_send_command(flis_instance,	IPCMSG_SHIM_CONFIG,
+				IPC_CMD_SHIM_WR, (u8 *)ipc_wbuf, NULL, 12, 0);
+	if (ret)
+		pr_err("%s: failed to write shim, flis addr: 0x%x, offset: 0x%x\n",
+			__func__, flis_addr, offset);
+
+	return ret;
+}
+EXPORT_SYMBOL(intel_scu_ipc_write_shim);
+
+/* directly read from flis address */
+int intel_scu_ipc_read_shim(u32 *data, u32 flis_addr, u32 offset)
+{
+	int ret;
+	u32 ipc_wbuf[2];
+
+	/* offset 0xff means the flis is reserved, just return 0 */
+	if (offset == 0xFF)
+		return 0;
+
+	ipc_wbuf[0] = flis_addr;
+	ipc_wbuf[1] = offset;
+
+	ret = rpmsg_send_command(flis_instance,	IPCMSG_SHIM_CONFIG,
+				IPC_CMD_SHIM_RD, (u8 *)ipc_wbuf, data, 8, 1);
+	if (ret)
+		pr_err("%s: failed to read shim, flis addr: 0x%x, offset: 0x%x\n",
+			__func__, flis_addr, offset);
+
+	return ret;
+}
+EXPORT_SYMBOL(intel_scu_ipc_read_shim);
+
+int intel_scu_ipc_update_shim(u32 data, u32 mask, u32 flis_addr, u32 offset)
+{
+	u32 tmp = 0;
+	int ret;
+
+	ret = intel_scu_ipc_read_shim(&tmp, flis_addr, offset);
+	if (ret) {
+		pr_err("read shim failed, addr = 0x%x, off = 0x%x\n",
+			flis_addr, offset);
+		return ret;
+	}
+
+	tmp &= ~mask;
+	tmp |= (data & mask);
+
+	ret = intel_scu_ipc_write_shim(tmp, flis_addr, offset);
+	if (ret) {
+		pr_err("write shim failed, addr = 0x%x, off = 0x%x\n",
+			flis_addr, offset);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(intel_scu_ipc_update_shim);
+
+/**
+ * config_pin_flis -- configure pin mux,
+ *		      pull direction and strength and open-drain enable.
+ *
+ * @name: pin name
+ * @param: flis param
+ * @val: value to be set
+ *
+ * example:
+ * config pull up/down:
+ *	config_pin_flis(i2s_2_clk, PULL, UP_20K);
+ *	config_pin_flis(i2s_2_clk, PULL, DOWN_20K);
+ *
+ * config pin mux:
+ *	config_pin_flis(i2s_2_clk, MUX, MUX_EN_INPUT_EN);
+ *	config_pin_flis(i2s_2_clk, MUX, INPUT_EN);
+ *	config_pin_flis(i2s_2_clk, MUX, MUX_EN_OUTPUT_EN);
+ *	config_pin_flis(i2s_2_clk, MUX, OUTPUT_EN);
+ *
+ * config pin open-drain:
+ *	config_pin_flis(i2s_2_clk, OPEN_DRAIN, OD_ENABLE);
+ *	config_pin_flis(i2s_2_clk, OPEN_DRAIN, OD_DISABLE);
+ *
+ */
+int config_pin_flis(unsigned int name, enum flis_param_t param, u32 val)
+{
+	u32 flis_addr, off, data, mask;
+	int ret;
+	int pos;
+	struct intel_scu_flis_info *isfi = &flis_info;
+	struct pin_mmio_flis_t *mmft;
+	u32 old_val;
+
+	if (!isfi->initialized)
+		return -ENODEV;
+
+	if (name < 0 || name >= isfi->pin_num)
+		return -EINVAL;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		/* Check if the pin is configurable */
+		if (isfi->pin_t[name].valid == false)
+			return -EINVAL;
+
+		flis_addr = isfi->pin_t[name].bus_address;
+
+		switch (param) {
+		case PULL:
+			off = isfi->pin_t[name].pullup_offset;
+			pos = isfi->pin_t[name].pullup_lsb_pos;
+			mask = (PULL_MASK << pos);
+			break;
+		case MUX:
+			off = isfi->pin_t[name].direction_offset;
+			pos = isfi->pin_t[name].direction_lsb_pos;
+			mask = (MUX_MASK << pos);
+			break;
+		case OPEN_DRAIN:
+			off = isfi->pin_t[name].open_drain_offset;
+			pos = isfi->pin_t[name].open_drain_bit;
+			mask = (OPEN_DRAIN_MASK << pos);
+			break;
+		default:
+			pr_err("Please specify valid flis param\n");
+			return -EINVAL;
+		}
+
+		data = (val << pos);
+		pr_debug("addr = 0x%x, off = 0x%x, pos = %d, mask = 0x%x, data = 0x%x\n",
+				flis_addr, off, pos, mask, data);
+
+		ret = intel_scu_ipc_update_shim(data, mask, flis_addr, off);
+		if (ret) {
+			pr_err("update shim failed\n");
+			return ret;
+		}
+	} else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		mmft = isfi->mmio_flis_t;
+		off = mmft[name].offset;
+
+		/* Check if the FLIS is writable by mmio access */
+		if (!(mmft[name].access_ctrl & writable))
+			return -EINVAL;
+
+		old_val = get_flis_value(off);
+
+		switch (param) {
+		case PULL:
+			mask = PULL_MASK;
+			break;
+		case MUX:
+			mask = MUX_MASK;
+			break;
+		case OPEN_DRAIN:
+			mask = OPEN_DRAIN_MASK;
+			break;
+		default:
+			pr_err("Please specify valid flis param\n");
+			return -EINVAL;
+		}
+
+		set_flis_value((old_val & ~mask) | val, off);
+
+	} else
+		return -EINVAL;
+
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(config_pin_flis);
+
+int get_pin_flis(unsigned int name, enum flis_param_t param, u32 *val)
+{
+	u32 flis_addr, off;
+	u32 data = 0;
+	int ret;
+	int pos;
+	u32 mask;
+	struct intel_scu_flis_info *isfi = &flis_info;
+	struct pin_mmio_flis_t *mmft;
+	u32 old_val;
+
+	if (!isfi->initialized)
+		return -ENODEV;
+
+	if (name < 0 || name >= isfi->pin_num)
+		return -EINVAL;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		if (isfi->pin_t[name].valid == false)
+			return -EINVAL;
+
+		flis_addr = isfi->pin_t[name].bus_address;
+
+		switch (param) {
+		case PULL:
+			off = isfi->pin_t[name].pullup_offset;
+			pos = isfi->pin_t[name].pullup_lsb_pos;
+			mask = PULL_MASK;
+			break;
+		case MUX:
+			off = isfi->pin_t[name].direction_offset;
+			pos = isfi->pin_t[name].direction_lsb_pos;
+			mask = MUX_MASK;
+			break;
+		case OPEN_DRAIN:
+			off = isfi->pin_t[name].open_drain_offset;
+			pos = isfi->pin_t[name].open_drain_bit;
+			mask = OPEN_DRAIN_MASK;
+			break;
+		default:
+			pr_err("Please specify valid flis param\n");
+			return -EINVAL;
+		}
+
+		ret = intel_scu_ipc_read_shim(&data, flis_addr, off);
+		if (ret) {
+			pr_err("read shim failed, addr = 0x%x, off = 0x%x\n",
+				flis_addr, off);
+			return ret;
+		}
+
+		*val = (data >> pos) & mask;
+
+		pr_debug("read: data = 0x%x, val = 0x%x\n", data, *val);
+	} else if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		mmft = isfi->mmio_flis_t;
+		off = mmft[name].offset;
+
+		old_val = get_flis_value(off);
+
+		switch (param) {
+		case PULL:
+			pos = 4;
+			mask = PULL_MASK;
+			break;
+		case MUX:
+			pos = 12;
+			mask = MUX_MASK;
+			break;
+		case OPEN_DRAIN:
+			pos = 21;
+			mask = OPEN_DRAIN_MASK;
+			break;
+		default:
+			pr_err("Please specify valid flis param\n");
+			return -EINVAL;
+		}
+
+		*val = (old_val & mask) >> pos;
+
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(get_pin_flis);
+
+static void flis_generic_store(const char *buf, int type)
+{
+	u32 tmp;
+	int ret;
+
+	/* use decimal for pin number */
+	if (type == DBG_PIN_NAME)
+		ret = sscanf(buf, "%d", &tmp);
+	else
+		ret = sscanf(buf, "%x", &tmp);
+
+	if (ret != 1)
+		return;
+
+	switch (type) {
+	case DBG_SHIM_FLIS_ADDR:
+		shim_flis_addr = tmp;
+		break;
+	case DBG_SHIM_OFFSET:
+		shim_offset = tmp;
+		break;
+	case DBG_SHIM_DATA:
+		shim_data = tmp;
+		break;
+	case DBG_PARAM_VAL:
+		param_value = tmp;
+		break;
+	case DBG_PARAM_TYPE:
+		param_type = tmp;
+		break;
+	case DBG_PIN_NAME:
+		pin_name = tmp;
+		break;
+	default:
+		break;
+	}
+}
+
+static ssize_t shim_flis_addr_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_SHIM_FLIS_ADDR);
+	return size;
+}
+
+static ssize_t shim_flis_addr_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", shim_flis_addr);
+}
+
+static ssize_t shim_offset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_SHIM_OFFSET);
+	return size;
+}
+
+static ssize_t shim_offset_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", shim_offset);
+}
+
+static ssize_t shim_data_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_SHIM_DATA);
+	return size;
+}
+
+static ssize_t shim_data_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", shim_data);
+}
+
+static ssize_t shim_ops_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	memset(shim_ops, 0, sizeof(shim_ops));
+
+	ret = sscanf(buf, "%9s", shim_ops);
+	if (ret != 1)
+		return -EINVAL;
+
+	if (!strncmp("read", shim_ops, OPS_STR_LEN)) {
+		ret = intel_scu_ipc_read_shim(&shim_data, shim_flis_addr,
+				shim_offset);
+	} else if (!strncmp("write", shim_ops, OPS_STR_LEN)) {
+		ret = intel_scu_ipc_write_shim(shim_data, shim_flis_addr,
+				shim_offset);
+	} else {
+		dev_err(dev, "Not supported ops\n");
+		ret = -EINVAL;
+	}
+
+	if (ret) {
+		dev_err(dev, "shim config met error\n");
+		return ret;
+	}
+
+	return size;
+}
+
+static ssize_t param_val_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", param_value);
+}
+
+static ssize_t param_val_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_PARAM_VAL);
+	return size;
+}
+
+static ssize_t flis_param_type_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", param_type);
+}
+
+static ssize_t flis_param_type_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_PARAM_TYPE);
+	return size;
+}
+
+static ssize_t pinname_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", pin_name);
+}
+
+static ssize_t pinname_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	flis_generic_store(buf, DBG_PIN_NAME);
+	return size;
+}
+
+static ssize_t ops_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	memset(ops, 0, sizeof(ops));
+
+	ret = sscanf(buf, "%9s", ops);
+	if (ret != 1) {
+		dev_err(dev, "input error\n");
+		return -EINVAL;
+	}
+
+	if (!strncmp("get", ops, OPS_STR_LEN))
+		ret = get_pin_flis(pin_name, param_type, &param_value);
+	else if (!strncmp("set", ops, OPS_STR_LEN))
+		ret = config_pin_flis(pin_name, param_type, param_value);
+	else {
+		dev_err(dev, "wrong ops\n");
+		ret = -EINVAL;
+	}
+
+	if (ret) {
+		dev_err(dev, "Access flis error, ret = %d\n", ret);
+		return ret;
+	}
+
+	return size;
+}
+
+static DEVICE_ATTR(flis_addr, S_IRUGO|S_IWUSR,
+		shim_flis_addr_show, shim_flis_addr_store);
+static DEVICE_ATTR(offset, S_IRUGO|S_IWUSR,
+		shim_offset_show, shim_offset_store);
+static DEVICE_ATTR(data, S_IRUGO|S_IWUSR, shim_data_show, shim_data_store);
+static DEVICE_ATTR(flis_ops, S_IWUSR, NULL, shim_ops_store);
+
+static struct attribute *flis_attrs[] = {
+	&dev_attr_flis_addr.attr,
+	&dev_attr_offset.attr,
+	&dev_attr_data.attr,
+	&dev_attr_flis_ops.attr,
+	NULL,
+};
+
+static struct attribute_group flis_attr_group = {
+	.name = "flis_debug",
+	.attrs = flis_attrs,
+};
+
+static DEVICE_ATTR(pin_name, S_IRUGO|S_IWUSR, pinname_show, pinname_store);
+static DEVICE_ATTR(flis_param, S_IRUGO|S_IWUSR, flis_param_type_show,
+						flis_param_type_store);
+static DEVICE_ATTR(val, S_IRUGO|S_IWUSR, param_val_show, param_val_store);
+static DEVICE_ATTR(ops, S_IWUSR, NULL, ops_store);
+
+static struct attribute *pin_config_attrs[] = {
+	&dev_attr_pin_name.attr,
+	&dev_attr_flis_param.attr,
+	&dev_attr_val.attr,
+	&dev_attr_ops.attr,
+	NULL,
+};
+
+static struct attribute_group pin_config_attr_group = {
+	.name = "pin_config_debug",
+	.attrs = pin_config_attrs,
+};
+
+static int scu_flis_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct intel_scu_flis_info *isfi = &flis_info;
+	struct intel_scu_flis_platform_data *pdata = pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	isfi->pin_t = pdata->pin_t;
+	isfi->pin_num = pdata->pin_num;
+	isfi->mmio_flis_t = pdata->mmio_flis_t;
+	if (pdata->mmio_flis_t && pdata->flis_base) {
+		isfi->flis_paddr = pdata->flis_base;
+		isfi->flis_base = ioremap_nocache(pdata->flis_base,
+					pdata->flis_len);
+		if (!isfi->flis_base) {
+			dev_err(&pdev->dev, "error mapping flis base\n");
+			ret = -EFAULT;
+			goto out;
+		}
+	}
+
+	if ((isfi->pin_t || isfi->mmio_flis_t)&&isfi->pin_num)
+		isfi->initialized = 1;
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &flis_attr_group);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to create flis sysfs interface\n");
+		goto err1;
+	}
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &pin_config_attr_group);
+	if (ret) {
+		dev_err(&pdev->dev,
+				"Failed to create pin config sysfs interface\n");
+		goto err2;
+	}
+
+	dev_info(&pdev->dev, "scu flis probed\n");
+	return 0;
+
+err2:
+	sysfs_remove_group(&pdev->dev.kobj, &flis_attr_group);
+err1:
+	if (pdata->flis_base)
+		iounmap(isfi->flis_base);
+out:
+	isfi->initialized = 0;
+	return ret;
+}
+
+static int scu_flis_remove(struct platform_device *pdev)
+{
+	sysfs_remove_group(&pdev->dev.kobj, &pin_config_attr_group);
+	sysfs_remove_group(&pdev->dev.kobj, &flis_attr_group);
+
+	return 0;
+}
+
+static struct platform_driver scu_flis_driver = {
+	.driver = {
+		   .name = "intel_scu_flis",
+		   .owner = THIS_MODULE,
+		   },
+	.probe = scu_flis_probe,
+	.remove = scu_flis_remove,
+};
+
+static int scu_flis_module_init(void)
+{
+	return platform_driver_register(&scu_flis_driver);
+}
+
+static void scu_flis_module_exit(void)
+{
+	platform_driver_unregister(&scu_flis_driver);
+}
+
+static int flis_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed flis rpmsg device\n");
+
+	/* Allocate rpmsg instance for flis*/
+	ret = alloc_rpmsg_instance(rpdev, &flis_instance);
+	if (!flis_instance) {
+		dev_err(&rpdev->dev, "kzalloc flis instance failed\n");
+		goto out;
+	}
+
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(flis_instance);
+
+	ret = scu_flis_module_init();
+	if (ret)
+		free_rpmsg_instance(rpdev, &flis_instance);
+
+out:
+	return ret;
+}
+
+static void flis_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	scu_flis_module_exit();
+	free_rpmsg_instance(rpdev, &flis_instance);
+	dev_info(&rpdev->dev, "Removed flis rpmsg device\n");
+}
+
+static void flis_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id flis_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_flis" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, flis_rpmsg_id_table);
+
+static struct rpmsg_driver flis_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= flis_rpmsg_id_table,
+	.probe		= flis_rpmsg_probe,
+	.callback	= flis_rpmsg_cb,
+	.remove		= flis_rpmsg_remove,
+};
+
+static int __init flis_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&flis_rpmsg);
+}
+
+static void __exit flis_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&flis_rpmsg);
+}
+
+fs_initcall(flis_rpmsg_init);
+module_exit(flis_rpmsg_exit);
+
+MODULE_AUTHOR("Ning Li <ning.li@intel.com>");
+MODULE_DESCRIPTION("Intel FLIS Access Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_scu_fw_update.c b/drivers/platform/x86/intel_scu_fw_update.c
new file mode 100644
index 0000000..59f8c7f
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_fw_update.c
@@ -0,0 +1,1087 @@
+/*
+ * fw_update.c - Intel SCU Firmware Update Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/rpmsg.h>
+#include <linux/intel_mid_pm.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel-mid.h>
+
+/* Medfield & Cloverview firmware update.
+ * The flow and communication between IA and SCU has changed for
+ * Medfield firmware update. For more details, please refer to
+ * Firmware Arch Spec.
+ * Below macros and structs apply for medfield firmware update
+ */
+
+#define IPC_CMD_FW_UPDATE_GO	0x02
+
+#define MAX_FW_CHUNK		(128*1024)
+#define IFWX_CHUNK_SIZE		(96*1024)
+
+#define SRAM_ADDR		0xFFFC0000
+#define MAILBOX_ADDR		0xFFFE0000
+
+#define SCU_FLAG_OFFSET		8
+#define IA_FLAG_OFFSET		12
+
+#define MIP_HEADER_OFFSET	0
+#define SUCP_OFFSET		0x1D8000
+#define VEDFW_OFFSET		0x1A6000
+
+#define DNX_HDR_LEN		24
+#define FUPH_HDR_LEN		36
+
+#define DNX_IMAGE	"DXBL"
+#define FUPH_HDR_SIZE	"RUPHS"
+#define FUPH		"RUPH"
+#define MIP		"DMIP"
+#define IFWI		"IFW"
+#define LOWER_128K	"LOFW"
+#define UPPER_128K	"HIFW"
+#define PSFW1		"PSFW1"
+#define PSFW2		"PSFW2"
+#define SSFW		"SSFW"
+#define SUCP		"SuCP"
+#define VEDFW		"VEDFW"
+#define UPDATE_DONE	"HLT$"
+#define UPDATE_ABORT	"HLT0"
+#define UPDATE_ERROR	"ER"
+
+#define MAX_LEN_IFW	4
+#define MAX_LEN_PSFW	7
+#define MAX_LEN_SSFW	6
+#define MAX_LEN_SUCP	6
+#define MAX_LEN_VEDFW	7
+
+#define FUPH_MIP_OFFSET		0x04
+#define FUPH_IFWI_OFFSET	0x08
+#define FUPH_PSFW1_OFFSET	0x0c
+#define FUPH_PSFW2_OFFSET	0x10
+#define FUPH_SSFW_OFFSET	0x14
+#define FUPH_SUCP_OFFSET	0x18
+#define FUPH_VEDFW_OFFSET	0x1c
+
+#define DNX_MAX_SIZE	(128*1024)
+#define IFWI_MAX_SIZE	(3*1024*1024)
+#define FOTA_MEM_SIZE	(4*1024*1024)
+
+#define DNX_SIZE_OFFSET	0
+#define GP_FLAG_OFFSET	4
+#define XOR_CHK_OFFSET	20
+
+#define GPF_BIT32	1
+#define FUPH_STR	"UPH$"
+#define FUPH_MAX_LEN	36
+#define SKIP_BYTES	8
+
+static struct kobject *scu_fw_update_kobj;
+static struct rpmsg_instance *fw_update_instance;
+
+/* Modified IA-SCU mailbox for medfield firmware update. */
+struct ia_scu_mailbox {
+	char mail[8];
+	u32 scu_flag;
+	u32 ia_flag;
+};
+
+/* Structure to parse input from firmware-update application. */
+struct fw_ud {
+	u8 *fw_file_data;
+	u32 fsize;
+	u8 *dnx_hdr;
+	u8 *dnx_file_data;
+	u32 dnx_size;
+	u32 fuph_hdr_len;
+};
+
+struct mfld_fw_update {
+	void __iomem *sram;
+	void __iomem *mailbox;
+	u32 wscu;
+	u32 wia;
+	char mb_status[8];
+};
+
+/* Holds size parameters read from fuph header */
+struct fuph_hdr_attrs {
+	u32 mip_size;
+	u32 ifwi_size;
+	u32 psfw1_size;
+	u32 psfw2_size;
+	u32 ssfw_size;
+	u32 sucp_size;
+	u32 vedfw_size;
+};
+
+enum mailbox_status {
+	MB_DONE,
+	MB_CONTINUE,
+	MB_ERROR
+};
+
+/* Misc. firmware components that are part of integrated firmware */
+struct misc_fw {
+	const char *fw_type;
+	u8 str_len;
+};
+
+/* lock used to prevent multiple calls to fw update sysfs interface */
+static DEFINE_MUTEX(fwud_lock);
+
+static char err_buf[50];
+static u8 *pending_data;
+
+struct fw_update_info {
+	struct device *dev;
+	struct fw_ud *fwud_pending;
+};
+
+static struct fw_update_info fui;
+
+static struct misc_fw misc_fw_table[] = {
+	{ .fw_type = IFWI, .str_len  = MAX_LEN_IFW },
+	{ .fw_type = PSFW1, .str_len  = MAX_LEN_PSFW },
+	{ .fw_type = SSFW, .str_len  = MAX_LEN_SSFW },
+	{ .fw_type = PSFW2, .str_len  = MAX_LEN_PSFW },
+	{ .fw_type = SUCP, .str_len  = MAX_LEN_SUCP },
+	{ .fw_type = VEDFW, .str_len  = MAX_LEN_VEDFW }
+};
+
+static int alloc_fota_mem_early;
+
+int __init alloc_mem_fota_early_flag(char *p)
+{
+	alloc_fota_mem_early = 1;
+	return 0;
+}
+early_param("alloc_fota_mem_early", alloc_mem_fota_early_flag);
+
+/*
+ * IA will wait in busy-state, and poll mailbox, to check
+ * if SCU is done processing.
+ * If it has to wait for more than a second, it will exit with
+ * error code.
+ */
+static int busy_wait(struct mfld_fw_update *mfld_fw_upd)
+{
+	u32 count = 0;
+	u32 flag;
+
+	flag = mfld_fw_upd->wscu;
+
+	while (ioread32(mfld_fw_upd->mailbox + SCU_FLAG_OFFSET) != flag
+		&& count < 500) {
+		/* There are synchronization issues between IA and SCU */
+		mb();
+		/* FIXME: we must use mdelay currently */
+		mdelay(10);
+		count++;
+	}
+
+	if (ioread32(mfld_fw_upd->mailbox + SCU_FLAG_OFFSET) != flag) {
+		dev_err(fui.dev, "IA-waited and quitting\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/* This function will
+ * 1)copy firmware chunk from user-space to kernel-space.
+ * 2) Copy from kernel-space to shared SRAM.
+ * 3) Write to mailbox.
+ * 4) And wait for SCU to process that firmware chunk.
+ * Returns 0 on success, and < 0 for failure.
+ */
+static int process_fw_chunk(u8 *fws, u8 *userptr, u32 chunklen,
+					struct mfld_fw_update *mfld_fw_upd)
+{
+	memcpy(fws, userptr, chunklen);
+
+	/* IA copy to sram */
+	memcpy_toio(mfld_fw_upd->sram, fws, chunklen);
+
+	/* There are synchronization issues between IA and SCU */
+	mb();
+	mfld_fw_upd->wia = !(mfld_fw_upd->wia);
+	iowrite32(mfld_fw_upd->wia, mfld_fw_upd->mailbox + IA_FLAG_OFFSET);
+
+	mb();
+	dev_dbg(fui.dev, "wrote ia_flag=%d\n",
+		 ioread32(mfld_fw_upd->mailbox + IA_FLAG_OFFSET));
+
+	mfld_fw_upd->wscu = !mfld_fw_upd->wscu;
+	return busy_wait(mfld_fw_upd);
+}
+
+/*
+ * This function will check mailbox status flag, and return state of mailbox.
+ */
+static enum mailbox_status check_mb_status(struct mfld_fw_update *mfld_fw_upd)
+{
+
+	enum mailbox_status mb_state;
+
+	/* There are synchronization issues between IA and SCU */
+	mb();
+
+	memcpy_fromio(mfld_fw_upd->mb_status, mfld_fw_upd->mailbox, 8);
+
+	if (!strncmp(mfld_fw_upd->mb_status, UPDATE_ERROR,
+					sizeof(UPDATE_ERROR) - 1) ||
+		!strncmp(mfld_fw_upd->mb_status, UPDATE_ABORT,
+					sizeof(UPDATE_ABORT) - 1)) {
+		dev_dbg(fui.dev,
+			"mailbox error=%s\n", mfld_fw_upd->mb_status);
+		return MB_ERROR;
+	} else {
+		mb_state = (!strncmp(mfld_fw_upd->mb_status, UPDATE_DONE,
+			sizeof(UPDATE_DONE) - 1)) ? MB_DONE : MB_CONTINUE;
+		dev_dbg(fui.dev,
+			"mailbox pass=%s, mb_state=%d\n",
+			mfld_fw_upd->mb_status, mb_state);
+	}
+
+	return mb_state;
+}
+
+/* Helper function used to calculate length and offset.  */
+int helper_for_calc_offset_length(struct fw_ud *fw_ud_ptr, char *scu_req,
+			void **offset, u32 *len, struct fuph_hdr_attrs *fuph,
+			const char *fw_type)
+{
+	unsigned long chunk_no;
+	u32 chunk_rem;
+	u32 max_chunk_cnt;
+	u32 fw_size;
+	u32 fw_offset;
+	u32 max_fw_chunk_size = MAX_FW_CHUNK;
+
+	if (!strncmp(fw_type, IFWI, strlen(IFWI))) {
+
+		if (kstrtoul(scu_req + strlen(IFWI), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		/* On CTP, IFWx starts from IFW1, not IFW0, thus adjust the
+		 * chunk_no to make '*offset' point to the correct address.
+		 * Besides, the size of each IFWx chunk is 96k, not 128k
+		 */
+		chunk_no = chunk_no - 1;
+		fw_size = fuph->ifwi_size;
+		fw_offset = fuph->mip_size;
+		max_fw_chunk_size = IFWX_CHUNK_SIZE;
+	} else if (!strncmp(fw_type, PSFW1, strlen(PSFW1))) {
+
+		if (kstrtoul(scu_req + strlen(PSFW1), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->psfw1_size;
+		fw_offset = fuph->mip_size + fuph->ifwi_size;
+	} else if (!strncmp(fw_type, PSFW2, strlen(PSFW2))) {
+
+		if (kstrtoul(scu_req + strlen(PSFW2), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->psfw2_size;
+		fw_offset = fuph->mip_size + fuph->ifwi_size +
+				fuph->psfw1_size + fuph->ssfw_size;
+	} else if (!strncmp(fw_type, SSFW, strlen(SSFW))) {
+
+		if (kstrtoul(scu_req + strlen(SSFW), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->ssfw_size;
+		fw_offset = fuph->mip_size + fuph->ifwi_size +
+				fuph->psfw1_size;
+	} else if (!strncmp(fw_type, SUCP, strlen(SUCP))) {
+
+		if (kstrtoul(scu_req + strlen(SUCP), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->sucp_size;
+		fw_offset = SUCP_OFFSET;
+	} else if (!strncmp(fw_type, VEDFW, strlen(VEDFW))) {
+
+		if (kstrtoul(scu_req + strlen(VEDFW), 10, &chunk_no) < 0)
+			return -EINVAL;
+
+		fw_size = fuph->vedfw_size;
+		fw_offset = VEDFW_OFFSET;
+	} else
+		return -EINVAL;
+
+	chunk_rem = fw_size % max_fw_chunk_size;
+	max_chunk_cnt = (fw_size/max_fw_chunk_size) + (chunk_rem ? 1 : 0);
+
+	dev_dbg(fui.dev,
+		"str=%s,chunk_no=%lx, chunk_rem=%d,max_chunk_cnt=%d\n",
+		fw_type, chunk_no, chunk_rem, max_chunk_cnt);
+
+	if ((chunk_no + 1) > max_chunk_cnt)
+		return -EINVAL;
+
+	/* Note::Logic below will make sure, that we get right length if input
+	 is 128K or multiple. */
+	*len = (chunk_no == (max_chunk_cnt - 1)) ?
+		(chunk_rem ? chunk_rem : max_fw_chunk_size) : max_fw_chunk_size;
+
+	*offset = fw_ud_ptr->fw_file_data + fw_offset +
+		chunk_no * max_fw_chunk_size;
+
+	return 0;
+}
+
+/*
+ * This api calculates offset and length depending on type of firmware chunk
+ * requested by SCU. Note: Intent is to follow the architecture such that,
+ * SCU controls the flow, and IA simply hands out, what is requested by SCU.
+ * IA will simply follow SCU's commands, unless SCU requests for something
+ * IA cannot give. TODO:That will be a special error case, need to figure out
+ * how to handle that.
+ */
+int calc_offset_and_length(struct fw_ud *fw_ud_ptr, char *scu_req,
+			void **offset, u32 *len, struct fuph_hdr_attrs *fuph)
+{
+	u8 cnt;
+
+	if (!strncmp(DNX_IMAGE, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->dnx_file_data;
+		*len = fw_ud_ptr->dnx_size;
+		return 0;
+	} else if (!strncmp(FUPH, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->fw_file_data + fw_ud_ptr->fsize
+				- fw_ud_ptr->fuph_hdr_len;
+		*len = fw_ud_ptr->fuph_hdr_len;
+		return 0;
+	} else if (!strncmp(MIP, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->fw_file_data + MIP_HEADER_OFFSET;
+		*len = fuph->mip_size;
+		return 0;
+	} else if (!strncmp(LOWER_128K, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->fw_file_data + fuph->mip_size;
+		*len = MAX_FW_CHUNK;
+		return 0;
+	} else if (!strncmp(UPPER_128K, scu_req, strlen(scu_req))) {
+		*offset = fw_ud_ptr->fw_file_data
+				+ fuph->mip_size + MAX_FW_CHUNK;
+		*len = MAX_FW_CHUNK;
+		return 0;
+	} else {
+		for (cnt = 0; cnt < ARRAY_SIZE(misc_fw_table); cnt++) {
+
+			if (!strncmp(misc_fw_table[cnt].fw_type, scu_req,
+					strlen(misc_fw_table[cnt].fw_type))) {
+
+				if (strlen(scu_req) ==
+						misc_fw_table[cnt].str_len) {
+
+					if (helper_for_calc_offset_length
+						(fw_ud_ptr, scu_req,
+						offset, len, fuph,
+						misc_fw_table[cnt].fw_type) < 0)
+						goto error_case;
+
+					dev_dbg(fui.dev,
+					"\nmisc fw type=%s, len=%d,offset=%d",
+					misc_fw_table[cnt].fw_type, *len,
+					(int)*offset);
+
+					return 0;
+
+				} else
+					goto error_case;
+			}
+		}
+
+	}
+
+	dev_dbg(fui.dev, "Unexpected mailbox request from scu\n");
+
+error_case:
+	/* TODO::Need to test this error case..and see how SCU reacts
+	* and how IA handles
+	* subsequent error response and whether exit is graceful...
+	*/
+
+	dev_dbg(fui.dev, "error case,respond back to SCU..\n");
+	dev_dbg(fui.dev, "scu_req=%s\n", scu_req);
+	*len = 0;
+	*offset = 0;
+
+	return -EINVAL;
+}
+
+/**
+ * intel_scu_ipc_medfw_upgrade - Medfield Firmware update utility
+ *
+ * The flow and communication between IA and SCU has changed for
+ * Medfield firmware update. So we have a different api below
+ * to support Medfield firmware update.
+ *
+ * On success returns 0, for failure , returns < 0.
+ */
+static int intel_scu_ipc_medfw_upgrade(void)
+{
+	struct fw_ud *fw_ud_param = fui.fwud_pending;
+	struct mfld_fw_update	mfld_fw_upd;
+	u8 *fw_file_data = NULL;
+	u8 *fws = NULL;
+	u8 *fuph_start = NULL;
+	int ret_val = 0;
+
+	struct fuph_hdr_attrs fuph;
+	u32 length = 0;
+	void *offset;
+	enum mailbox_status mb_state;
+
+	/* set all devices in d0i0 before IFWI upgrade */
+	if (unlikely(pmu_set_devices_in_d0i0())) {
+		pr_debug("pmu: failed to set all devices in d0i0...\n");
+		BUG();
+	}
+
+	rpmsg_global_lock();
+	mfld_fw_upd.wscu = 0;
+	mfld_fw_upd.wia = 0;
+	memset(mfld_fw_upd.mb_status, 0, sizeof(char) * 8);
+
+	fw_file_data = fw_ud_param->fw_file_data;
+	mfld_fw_upd.sram = ioremap_nocache(SRAM_ADDR, MAX_FW_CHUNK);
+	if (mfld_fw_upd.sram == NULL) {
+		dev_err(fui.dev, "unable to map sram\n");
+		ret_val = -ENOMEM;
+		goto out_unlock;
+	}
+
+	mfld_fw_upd.mailbox = ioremap_nocache(MAILBOX_ADDR,
+					sizeof(struct ia_scu_mailbox));
+
+	if (mfld_fw_upd.mailbox == NULL) {
+		dev_err(fui.dev, "unable to map the mailbox\n");
+		ret_val = -ENOMEM;
+		goto unmap_sram;
+	}
+
+	/*IA initializes both IAFlag and SCUFlag to zero */
+	iowrite32(0, mfld_fw_upd.mailbox + SCU_FLAG_OFFSET);
+	iowrite32(0, mfld_fw_upd.mailbox + IA_FLAG_OFFSET);
+	memset_io(mfld_fw_upd.mailbox, 0, 8);
+
+	fws = kmalloc(MAX_FW_CHUNK, GFP_KERNEL);
+	if (fws == NULL) {
+		ret_val = -ENOMEM;
+		goto unmap_mb;
+	}
+
+	/* fuph header start */
+	fuph_start = fw_ud_param->fw_file_data + (fw_ud_param->fsize - 1)
+					- (fw_ud_param->fuph_hdr_len - 1);
+
+	/* Convert sizes in DWORDS to number of bytes. */
+	fuph.mip_size = (*((u32 *)(fuph_start + FUPH_MIP_OFFSET)))*4;
+	fuph.ifwi_size = (*((u32 *)(fuph_start + FUPH_IFWI_OFFSET)))*4;
+	fuph.psfw1_size = (*((u32 *)(fuph_start + FUPH_PSFW1_OFFSET)))*4;
+	fuph.psfw2_size = (*((u32 *)(fuph_start + FUPH_PSFW2_OFFSET)))*4;
+	fuph.ssfw_size = (*((u32 *)(fuph_start + FUPH_SSFW_OFFSET)))*4;
+	fuph.sucp_size = (*((u32 *)(fuph_start + FUPH_SUCP_OFFSET)))*4;
+
+	if (fw_ud_param->fuph_hdr_len == FUPH_HDR_LEN) {
+		fuph.vedfw_size =
+				(*((u32 *)(fuph_start + FUPH_VEDFW_OFFSET)))*4;
+	} else
+		fuph.vedfw_size = 0;
+
+	dev_dbg(fui.dev,
+		"ln=%d, mi=%d, if=%d, ps1=%d, ps2=%d, sfw=%d, sucp=%d, vd=%d\n",
+		fw_ud_param->fuph_hdr_len, fuph.mip_size, fuph.ifwi_size,
+		fuph.psfw1_size, fuph.psfw2_size, fuph.ssfw_size,
+		fuph.sucp_size,	fuph.vedfw_size);
+
+	/* TODO_SK::There is just
+	 *  1 write required from IA side for DFU.
+	 *  So commenting this-out, until it gets confirmed */
+	/*ipc_command(IPC_CMD_FW_UPDATE_READY); */
+
+	/*1. DNX SIZE HEADER   */
+	memcpy(fws, fw_ud_param->dnx_hdr, DNX_HDR_LEN);
+
+	memcpy_toio(mfld_fw_upd.sram, fws, DNX_HDR_LEN);
+
+	/* There are synchronization issues between IA and SCU */
+	mb();
+
+	/* Write cmd to trigger an interrupt to SCU for firmware update*/
+	ret_val = rpmsg_send_simple_command(fw_update_instance,
+					    IPCMSG_FW_UPDATE,
+					    IPC_CMD_FW_UPDATE_GO);
+	if (ret_val) {
+		dev_err(fui.dev, "IPC_CMD_FW_UPDATE_GO failed\n");
+		goto term;
+	}
+
+	mfld_fw_upd.wscu = !mfld_fw_upd.wscu;
+
+	if (busy_wait(&mfld_fw_upd) < 0) {
+		ret_val = -1;
+		goto term;
+	}
+
+	/* TODO:Add a count for iteration, based on sizes of security firmware,
+	 * so that we determine finite number of iterations to loop thro.
+	 * That way at the very least, we can atleast control the number
+	 * of iterations, and prevent infinite looping if there are any bugs.
+	 * The only catch being for B0, SCU will request twice for each firmware
+	 * chunk, since its writing to 2 partitions.
+	 * TODO::Investigate if we need to increase timeout for busy_wait,
+	 * since SCU is now writing to 2 partitions.
+	 */
+
+	while ((mb_state = check_mb_status(&mfld_fw_upd)) != MB_DONE) {
+
+		if (mb_state == MB_ERROR) {
+			dev_dbg(fui.dev, "check_mb_status,error\n");
+			ret_val = -1;
+			goto term;
+		}
+
+		if (!strncmp(mfld_fw_upd.mb_status, FUPH_HDR_SIZE,
+				strlen(FUPH_HDR_SIZE))) {
+			iowrite32(fw_ud_param->fuph_hdr_len, mfld_fw_upd.sram);
+			/* There are synchronization issues between IA-SCU */
+			mb();
+			dev_dbg(fui.dev,
+				"copied fuph hdr size=%d\n",
+				ioread32(mfld_fw_upd.sram));
+			mfld_fw_upd.wia = !mfld_fw_upd.wia;
+			iowrite32(mfld_fw_upd.wia, mfld_fw_upd.mailbox +
+				IA_FLAG_OFFSET);
+			dev_dbg(fui.dev, "ia_flag=%d\n",
+				ioread32(mfld_fw_upd.mailbox + IA_FLAG_OFFSET));
+			mb();
+			mfld_fw_upd.wscu = !mfld_fw_upd.wscu;
+
+			if (busy_wait(&mfld_fw_upd) < 0) {
+				ret_val = -1;
+				goto term;
+			}
+
+			continue;
+		}
+
+		if (calc_offset_and_length(fw_ud_param, mfld_fw_upd.mb_status,
+					&offset, &length, &fuph) < 0) {
+			dev_err(fui.dev,
+			"calc_offset_and_length_error,error\n");
+			ret_val = -1;
+			goto term;
+		}
+
+		if ((process_fw_chunk(fws, offset, length,
+				      &mfld_fw_upd)) != 0) {
+			dev_err(fui.dev,
+			"Error processing fw chunk=%s\n",
+			mfld_fw_upd.mb_status);
+			ret_val = -1;
+			goto term;
+		} else
+			dev_dbg(fui.dev,
+				"PASS processing fw chunk=%s\n",
+				mfld_fw_upd.mb_status);
+	}
+	ret_val = intel_scu_ipc_check_status();
+
+term:
+	kfree(fws);
+unmap_mb:
+	iounmap(mfld_fw_upd.mailbox);
+unmap_sram:
+	iounmap(mfld_fw_upd.sram);
+out_unlock:
+	rpmsg_global_unlock();
+	return ret_val;
+}
+
+static void cur_err(const char *err_info)
+{
+	strncpy(err_buf, err_info, sizeof(err_buf) - 1);
+}
+
+static ssize_t write_dnx(struct file *file, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+	int ret;
+
+	mutex_lock(&fwud_lock);
+
+	if (!pending_data) {
+		pending_data = vmalloc(FOTA_MEM_SIZE);
+		if (NULL == pending_data) {
+			cur_err("alloc fota memory by sysfs failed\n");
+			ret = -ENOMEM;
+			goto end;
+		}
+	}
+
+	fui.fwud_pending->dnx_file_data = pending_data + IFWI_MAX_SIZE;
+
+	if (unlikely(off >= DNX_MAX_SIZE)) {
+		fui.fwud_pending->dnx_file_data = NULL;
+		cur_err("too large dnx binary stream!");
+		ret = -EFBIG;
+		goto end;
+	}
+
+	memcpy(fui.fwud_pending->dnx_file_data + off, buf, count);
+
+	if (!off)
+		fui.fwud_pending->dnx_size = count;
+	else
+		fui.fwud_pending->dnx_size += count;
+
+	mutex_unlock(&fwud_lock);
+	return count;
+
+end:
+	mutex_unlock(&fwud_lock);
+	return ret;
+}
+
+/* Parses from the end of IFWI, and looks for UPH$,
+ * to determine length of FUPH header
+ */
+static int find_fuph_header_len(unsigned int *len,
+		unsigned char *file_data, unsigned int file_size)
+{
+	int ret = -EINVAL;
+	unsigned char *temp;
+	unsigned int cnt = 0;
+
+	if (!len || !file_data || !file_size) {
+		dev_err(fui.dev, "find_fuph_header_len: Invalid inputs\n");
+		return ret;
+	}
+
+	/* Skipping the checksum at the end, and moving to the
+	 * start of the last add-on firmware size in fuph.
+	 */
+	temp = file_data + file_size - SKIP_BYTES;
+
+	while (cnt <= FUPH_MAX_LEN) {
+		if (!strncmp(temp, FUPH_STR, sizeof(FUPH_STR) - 1)) {
+			pr_info("Fuph_hdr_len=%d\n", cnt + SKIP_BYTES);
+			*len = cnt + SKIP_BYTES;
+			ret = 0;
+			break;
+		}
+		temp -= 4;
+		cnt += 4;
+	}
+
+	return ret;
+}
+
+static ssize_t write_ifwi(struct file *file, struct kobject *kobj,
+	struct bin_attribute *attr, char *buf, loff_t off, size_t count)
+{
+	int ret;
+
+	mutex_lock(&fwud_lock);
+
+	if (!pending_data) {
+		pending_data = vmalloc(FOTA_MEM_SIZE);
+		if (NULL == pending_data) {
+			cur_err("alloc fota memory by sysfs failed\n");
+			ret = -ENOMEM;
+			goto end;
+		}
+	}
+
+	fui.fwud_pending->fw_file_data = pending_data;
+
+	if (unlikely(off >= IFWI_MAX_SIZE)) {
+		fui.fwud_pending->fw_file_data = NULL;
+		cur_err("too large ifwi binary stream!\n");
+		ret = -EFBIG;
+		goto end;
+	}
+
+	memcpy(fui.fwud_pending->fw_file_data + off, buf, count);
+
+	if (!off)
+		fui.fwud_pending->fsize = count;
+	else
+		fui.fwud_pending->fsize += count;
+
+	mutex_unlock(&fwud_lock);
+	return count;
+
+end:
+	mutex_unlock(&fwud_lock);
+	return ret;
+}
+
+/*
+ * intel_scu_fw_prepare - prepare dnx_hdr and fuph
+ *
+ * This function will be invoked at reboot, when DNX and IFWI data are ready.
+ */
+static int intel_scu_fw_prepare(struct fw_ud *fwud_pending)
+{
+	unsigned int size;
+	unsigned int gpFlags = 0;
+	unsigned int xorcs;
+	unsigned char dnxSH[DNX_HDR_LEN] = { 0 };
+
+	mutex_lock(&fwud_lock);
+
+	size = fui.fwud_pending->dnx_size;
+
+	/* Set GPFlags parameter */
+	gpFlags = gpFlags | (GPF_BIT32 << 31);
+	xorcs = (size ^ gpFlags);
+
+	memcpy((dnxSH + DNX_SIZE_OFFSET), (unsigned char *)(&size), 4);
+	memcpy((dnxSH + GP_FLAG_OFFSET), (unsigned char *)(&gpFlags), 4);
+	memcpy((dnxSH + XOR_CHK_OFFSET), (unsigned char *)(&xorcs), 4);
+
+	/* assign the last DNX_HDR_LEN bytes memory to dnx header */
+	fui.fwud_pending->dnx_hdr = pending_data + FOTA_MEM_SIZE - DNX_HDR_LEN;
+
+	/* directly memcpy to dnx_hdr */
+	memcpy(fui.fwud_pending->dnx_hdr, dnxSH, DNX_HDR_LEN);
+
+	if (find_fuph_header_len(&(fui.fwud_pending->fuph_hdr_len),
+			fui.fwud_pending->fw_file_data,
+			fui.fwud_pending->fsize) < 0) {
+		dev_err(fui.dev, "Error with FUPH header\n");
+		mutex_unlock(&fwud_lock);
+		return -EINVAL;
+	}
+
+	dev_dbg(fui.dev, "fupd_hdr_len=%d, fsize=%d, dnx_size=%d",
+		fui.fwud_pending->fuph_hdr_len,	fui.fwud_pending->fsize,
+		fui.fwud_pending->dnx_size);
+
+	mutex_unlock(&fwud_lock);
+	return 0;
+}
+
+int intel_scu_ipc_fw_update(void)
+{
+	int ret = 0;
+
+	/* jump fw upgrade process when fota memory not allocated
+	 * or when user cancels update
+	 * or when one of dnx and ifwi is not written
+	 * or when failure happens in writing one of dnx and ifwi
+	 */
+	if (!pending_data || !fui.fwud_pending ||
+		!fui.fwud_pending->dnx_file_data ||
+		!fui.fwud_pending->fw_file_data) {
+		pr_info("Jump FW upgrade process\n");
+		goto end;
+	}
+
+	ret = intel_scu_fw_prepare(fui.fwud_pending);
+	if (ret) {
+		dev_err(fui.dev, "intel_scu_fw_prepare failed\n");
+		goto end;
+	}
+
+	ret = intel_scu_ipc_medfw_upgrade();
+	if (ret)
+		dev_err(fui.dev, "intel_scu_ipc_medfw_upgrade failed\n");
+
+end:
+	return ret;
+}
+EXPORT_SYMBOL(intel_scu_ipc_fw_update);
+
+static ssize_t fw_version_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	u8 data[16] = { 0 };
+	int ret;
+	int i;
+	int used = 0;
+
+	ret = rpmsg_send_command(fw_update_instance, IPCMSG_FW_REVISION, 0,
+					NULL, (u32 *)data, 0, 4);
+	if (ret < 0) {
+		cur_err("Error getting fw version");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < 16; i++)
+		used += snprintf(buf + used, PAGE_SIZE - used, "%x ", data[i]);
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		ret = rpmsg_send_command(fw_update_instance,
+			IPCMSG_FW_REVISION, 1, NULL, (u32 *)data, 0, 4);
+		if (ret < 0) {
+			cur_err("Error getting fw version");
+			return -EINVAL;
+		}
+		for (i = 0; i < 16; i++)
+			used += snprintf(buf + used, PAGE_SIZE - used,
+				"%x ", data[i]);
+	}
+
+	return used;
+}
+
+static ssize_t last_error_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", err_buf);
+}
+
+static ssize_t cancel_update_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	int value;
+
+	if (sscanf(buf, "%d", &value) != 1) {
+		cur_err("One argument is needed\n");
+		return -EINVAL;
+	}
+
+	if (value == 1) {
+		mutex_lock(&fwud_lock);
+		fui.fwud_pending->fw_file_data = NULL;
+		fui.fwud_pending->dnx_file_data = NULL;
+		mutex_unlock(&fwud_lock);
+	} else {
+		cur_err("input '1' to cancel fw upgrade\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+#define __BIN_ATTR(_name, _mode, _size, _read, _write) { \
+	.attr = {.name = __stringify(_name), .mode = _mode },	\
+	.size	= _size,					\
+	.read	= _read,					\
+	.write	= _write,					\
+}
+
+#define BIN_ATTR(_name, _mode, _size, _read, _write) \
+struct bin_attribute bin_attr_##_name =	\
+	__BIN_ATTR(_name, _mode, _size, _read, _write)
+
+#define KOBJ_FW_UPDATE_ATTR(_name, _mode, _show, _store) \
+	struct kobj_attribute _name##_attr = __ATTR(_name, _mode, _show, _store)
+
+static KOBJ_FW_UPDATE_ATTR(cancel_update, S_IWUSR, NULL, cancel_update_store);
+static KOBJ_FW_UPDATE_ATTR(fw_version, S_IRUGO, fw_version_show, NULL);
+static KOBJ_FW_UPDATE_ATTR(last_error, S_IRUGO, last_error_show, NULL);
+static BIN_ATTR(dnx, S_IWUSR, DNX_MAX_SIZE, NULL, write_dnx);
+static BIN_ATTR(ifwi, S_IWUSR, IFWI_MAX_SIZE, NULL, write_ifwi);
+
+static struct attribute *fw_update_attrs[] = {
+	&cancel_update_attr.attr,
+	&fw_version_attr.attr,
+	&last_error_attr.attr,
+	NULL,
+};
+
+static struct attribute_group fw_update_attr_group = {
+	.name = "fw_info",
+	.attrs = fw_update_attrs,
+};
+
+static int intel_fw_update_sysfs_create(struct kobject *kobj)
+{
+	int ret;
+
+	ret = sysfs_create_group(kobj, &fw_update_attr_group);
+	if (ret) {
+		dev_err(fui.dev, "Unable to export sysfs interface\n");
+		goto out;
+	}
+
+	ret = sysfs_create_bin_file(kobj, &bin_attr_dnx);
+	if (ret) {
+		dev_err(fui.dev, "Unable to create dnx bin file\n");
+		goto err_dnx_bin;
+	}
+
+	ret = sysfs_create_bin_file(kobj, &bin_attr_ifwi);
+	if (ret) {
+		dev_err(fui.dev, "Unable to create ifwi bin file\n");
+		goto err_ifwi_bin;
+	}
+
+	return 0;
+
+err_ifwi_bin:
+	sysfs_remove_bin_file(kobj, &bin_attr_dnx);
+err_dnx_bin:
+	sysfs_remove_group(kobj, &fw_update_attr_group);
+out:
+	return ret;
+}
+
+static void intel_fw_update_sysfs_remove(struct kobject *kobj)
+{
+	sysfs_remove_bin_file(kobj, &bin_attr_ifwi);
+	sysfs_remove_bin_file(kobj, &bin_attr_dnx);
+	sysfs_remove_group(kobj, &fw_update_attr_group);
+}
+
+static int fw_update_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret;
+	struct fw_update_info *fu_info = &fui;
+
+	if (rpdev == NULL) {
+		pr_err("fw_update rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed fw_update rpmsg device\n");
+
+	/* Allocate rpmsg instance for fw_update*/
+	ret = alloc_rpmsg_instance(rpdev, &fw_update_instance);
+	if (!fw_update_instance) {
+		dev_err(&rpdev->dev, "kzalloc fw_update instance failed\n");
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(fw_update_instance);
+
+	fu_info->dev = &rpdev->dev;
+
+	fui.fwud_pending = kzalloc(sizeof(struct fw_ud), GFP_KERNEL);
+	if (NULL == fui.fwud_pending) {
+		ret = -ENOMEM;
+		dev_err(fui.dev, "alloc fwud_pending memory failed\n");
+		goto err_fwud_pending;
+	}
+
+	scu_fw_update_kobj = kobject_create_and_add("fw_update", kernel_kobj);
+	if (!scu_fw_update_kobj) {
+		ret = -ENOMEM;
+		dev_err(fui.dev, "create kobject failed\n");
+		goto err_kobj;
+	}
+
+	ret = intel_fw_update_sysfs_create(scu_fw_update_kobj);
+	if (ret) {
+		dev_err(fui.dev, "creating fw update sysfs failed\n");
+		goto err_free_fwud;
+	}
+
+	/* If alloc_fota_mem_early flag is set, allocate FOTA_MEM_SIZE
+	 * bytes memory.
+	 * reserve the first contiguous IFWI_MAX_SIZE bytes for IFWI,
+	 * the next contiguous DNX_MAX_SIZE bytes are reserved for DNX,
+	 * the last DNX_HDR_LEN bytes for DNX Header
+	 */
+	if (alloc_fota_mem_early) {
+		pending_data = vmalloc(FOTA_MEM_SIZE);
+		if (NULL == pending_data) {
+			ret = -ENOMEM;
+			dev_err(fui.dev, "early alloc fota memory failed\n");
+			goto err_sysfs;
+		}
+	}
+
+	return 0;
+
+err_sysfs:
+	intel_fw_update_sysfs_remove(scu_fw_update_kobj);
+err_free_fwud:
+	kobject_put(scu_fw_update_kobj);
+err_kobj:
+	kfree(fui.fwud_pending);
+	fui.fwud_pending = NULL;
+err_fwud_pending:
+	free_rpmsg_instance(rpdev, &fw_update_instance);
+out:
+	return ret;
+}
+
+static void fw_update_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	free_rpmsg_instance(rpdev, &fw_update_instance);
+	intel_fw_update_sysfs_remove(scu_fw_update_kobj);
+	kobject_put(scu_fw_update_kobj);
+
+	vfree(pending_data);
+	pending_data = NULL;
+	kfree(fui.fwud_pending);
+	fui.fwud_pending = NULL;
+}
+
+static void fw_update_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id fw_update_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_fw_update" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, fw_update_rpmsg_id_table);
+
+static struct rpmsg_driver fw_update_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= fw_update_rpmsg_id_table,
+	.probe		= fw_update_rpmsg_probe,
+	.callback	= fw_update_rpmsg_cb,
+	.remove		= fw_update_rpmsg_remove,
+};
+
+static int __init fw_update_module_init(void)
+{
+	return register_rpmsg_driver(&fw_update_rpmsg);
+}
+
+static void __exit fw_update_module_exit(void)
+{
+	unregister_rpmsg_driver(&fw_update_rpmsg);
+}
+
+module_init(fw_update_module_init);
+module_exit(fw_update_module_exit);
+
+MODULE_AUTHOR("Sreedhara DS <sreedhara.ds@intel.com>");
+MODULE_AUTHOR("Ning Li <ning.li@intel.com>");
+MODULE_DESCRIPTION("Intel SCU Firmware Update Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 9215ed7..d8d251f05 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -23,22 +23,67 @@
 #include <linux/pm.h>
 #include <linux/pci.h>
 #include <linux/interrupt.h>
-#include <linux/sfi.h>
 #include <linux/module.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 #include <asm/intel_scu_ipc.h>
+#include <linux/pm_qos.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
 
-/* IPC defines the following message types */
-#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
-#define IPCMSG_BATTERY        0xEF /* Coulomb Counter Accumulator */
-#define IPCMSG_FW_UPDATE      0xFE /* Firmware update */
-#define IPCMSG_PCNTRL         0xFF /* Power controller unit read/write */
-#define IPCMSG_FW_REVISION    0xF4 /* Get firmware revision */
+enum {
+	SCU_IPC_LINCROFT,
+	SCU_IPC_PENWELL,
+	SCU_IPC_CLOVERVIEW,
+	SCU_IPC_TANGIER,
+};
 
-/* Command id associated with message IPCMSG_PCNTRL */
-#define IPC_CMD_PCNTRL_W      0 /* Register write */
-#define IPC_CMD_PCNTRL_R      1 /* Register read */
-#define IPC_CMD_PCNTRL_M      2 /* Register read-modify-write */
+/* intel scu ipc driver data*/
+struct intel_scu_ipc_pdata_t {
+	u32 ipc_base;
+	u32 i2c_base;
+	u32 ipc_len;
+	u32 i2c_len;
+};
+
+static struct intel_scu_ipc_pdata_t intel_scu_ipc_pdata[] = {
+	[SCU_IPC_LINCROFT] = {
+		.ipc_base = 0xff11c000,
+		.i2c_base = 0xff12b000,
+		.ipc_len = 0x100,
+		.i2c_len = 0x10,
+	},
+	[SCU_IPC_PENWELL] = {
+		.ipc_base = 0xff11c000,
+		.i2c_base = 0xff12b000,
+		.ipc_len = 0x100,
+		.i2c_len = 0x10,
+	},
+	[SCU_IPC_CLOVERVIEW] = {
+		.ipc_base = 0xff11c000,
+		.i2c_base = 0xff12b000,
+		.ipc_len = 0x100,
+		.i2c_len = 0x10,
+	},
+	[SCU_IPC_TANGIER] = {
+		.ipc_base = 0xff009000,
+		.i2c_base  = 0xff00d000,
+		.ipc_len  = 0x100,
+		.i2c_len = 0x10,
+	},
+};
+static int  scu_ipc_pm_callback(struct notifier_block *nb,
+					unsigned long action,
+					void *ignored);
+
+static struct notifier_block scu_ipc_pm_notifier = {
+	.notifier_call = scu_ipc_pm_callback,
+	.priority = 1,
+};
 
 /*
  * IPC register summary
@@ -58,37 +103,82 @@
  *    message handler is called within firmware.
  */
 
-#define IPC_BASE_ADDR     0xFF11C000	/* IPC1 base register address */
-#define IPC_MAX_ADDR      0x100		/* Maximum IPC regisers */
-#define IPC_WWBUF_SIZE    20		/* IPC Write buffer Size */
-#define IPC_RWBUF_SIZE    20		/* IPC Read buffer Size */
-#define IPC_I2C_BASE      0xFF12B000	/* I2C control register base address */
-#define IPC_I2C_MAX_ADDR  0x10		/* Maximum I2C regisers */
+#define IPC_STATUS_ADDR		0X04
+#define IPC_SPTR_ADDR		0x08
+#define IPC_DPTR_ADDR		0x0C
+#define IPC_READ_BUFFER		0x90
+#define IPC_WRITE_BUFFER	0x80
+#define IPC_IOC			0x100
 
-static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
-static void ipc_remove(struct pci_dev *pdev);
-
-struct intel_scu_ipc_dev {
+struct intel_ipc_controller {
 	struct pci_dev *pdev;
 	void __iomem *ipc_base;
 	void __iomem *i2c_base;
+	int ioc;
+	int cmd;
+	struct completion cmd_complete;
 };
 
-static struct intel_scu_ipc_dev  ipcdev; /* Only one for now */
+static struct intel_ipc_controller  ipcdev; /* Only one for now */
 
-static int platform;		/* Platform type */
+static int platform; /* Platform type */
 
-/*
- * IPC Read Buffer (Read Only):
- * 16 byte buffer for receiving data from SCU, if IPC command
- * processing results in response data
- */
-#define IPC_READ_BUFFER		0x90
+static char *ipc_err_sources[] = {
+	[IPC_ERR_NONE] =
+		"no error",
+	[IPC_ERR_CMD_NOT_SUPPORTED] =
+		"command not supported",
+	[IPC_ERR_CMD_NOT_SERVICED] =
+		"command not serviced",
+	[IPC_ERR_UNABLE_TO_SERVICE] =
+		"unable to service",
+	[IPC_ERR_CMD_INVALID] =
+		"command invalid",
+	[IPC_ERR_CMD_FAILED] =
+		"command failed",
+	[IPC_ERR_EMSECURITY] =
+		"Invalid Battery",
+	[IPC_ERR_UNSIGNEDKERNEL] =
+		"Unsigned kernel",
+};
 
-#define IPC_I2C_CNTRL_ADDR	0
-#define I2C_DATA_ADDR		0x04
+/* PM Qos struct */
+static struct pm_qos_request *qos;
 
-static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
+/* Suspend status*/
+static bool suspend_status;
+static DEFINE_MUTEX(scu_suspend_lock);
+
+/* Suspend status get */
+bool suspend_in_progress(void)
+{
+	return suspend_status;
+}
+
+/* Suspend status set */
+void set_suspend_status(bool status)
+{
+	mutex_lock(&scu_suspend_lock);
+	suspend_status = status;
+	mutex_unlock(&scu_suspend_lock);
+}
+
+/* IPC PM notifier callback */
+static int scu_ipc_pm_callback(struct notifier_block *nb,
+					unsigned long action,
+					void *ignored)
+{
+	switch (action) {
+	case PM_SUSPEND_PREPARE:
+		set_suspend_status(true);
+		return NOTIFY_OK;
+	case PM_POST_SUSPEND:
+		set_suspend_status(false);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
 
 /*
  * Command Register (Write Only):
@@ -96,9 +186,18 @@
  * Format:
  * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
  */
-static inline void ipc_command(u32 cmd) /* Send ipc command */
+void intel_scu_ipc_send_command(u32 cmd) /* Send ipc command */
 {
-	writel(cmd, ipcdev.ipc_base);
+	ipcdev.cmd = cmd;
+	INIT_COMPLETION(ipcdev.cmd_complete);
+
+	if (system_state == SYSTEM_RUNNING && !suspend_in_progress()) {
+		ipcdev.ioc = 1;
+		writel(cmd | IPC_IOC, ipcdev.ipc_base);
+	} else {
+		ipcdev.ioc = 0;
+		writel(cmd, ipcdev.ipc_base);
+	}
 }
 
 /*
@@ -108,7 +207,7 @@
  */
 static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
 {
-	writel(data, ipcdev.ipc_base + 0x80 + offset);
+	writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
 }
 
 /*
@@ -119,9 +218,9 @@
  * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
  */
 
-static inline u8 ipc_read_status(void)
+static inline u32 ipc_read_status(void)
 {
-	return __raw_readl(ipcdev.ipc_base + 0x04);
+	return __raw_readl(ipcdev.ipc_base + IPC_STATUS_ADDR);
 }
 
 static inline u8 ipc_data_readb(u32 offset) /* Read ipc byte data */
@@ -134,243 +233,72 @@
 	return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
 }
 
-static inline int busy_loop(void) /* Wait till scu status is busy */
+int intel_scu_ipc_check_status(void)
 {
-	u32 status = 0;
-	u32 loop_count = 0;
+	int i;
+	int ret = 0;
+	int status;
+	int loop_count = 3000000;
+
+	if (ipcdev.ioc && (system_state == SYSTEM_RUNNING) &&
+			(!suspend_in_progress())) {
+		if (0 == wait_for_completion_timeout(
+				&ipcdev.cmd_complete, 3 * HZ))
+			ret = -ETIMEDOUT;
+	} else {
+		while ((ipc_read_status() & 1) && --loop_count)
+			udelay(1);
+		if (loop_count == 0)
+			ret = -ETIMEDOUT;
+	}
 
 	status = ipc_read_status();
-	while (status & 1) {
-		udelay(1); /* scu processing time is in few u secods */
-		status = ipc_read_status();
-		loop_count++;
-		/* break if scu doesn't reset busy bit after huge retry */
-		if (loop_count > 100000) {
-			dev_err(&ipcdev.pdev->dev, "IPC timed out");
-			return -ETIMEDOUT;
-		}
-	}
-	if ((status >> 1) & 1)
-		return -EIO;
+	if (ret == -ETIMEDOUT)
+		dev_err(&ipcdev.pdev->dev,
+			"IPC timed out, IPC_STS=0x%x, IPC_CMD=0x%x\n",
+			status, ipcdev.cmd);
 
-	return 0;
-}
-
-/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
-static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
-{
-	int nc;
-	u32 offset = 0;
-	int err;
-	u8 cbuf[IPC_WWBUF_SIZE] = { };
-	u32 *wbuf = (u32 *)&cbuf;
-
-	mutex_lock(&ipclock);
-
-	memset(cbuf, 0, sizeof(cbuf));
-
-	if (ipcdev.pdev == NULL) {
-		mutex_unlock(&ipclock);
-		return -ENODEV;
+	if (status & 0x2) {
+		ret = -EIO;
+		i = (status >> 16) & 0xFF;
+		if (i < ARRAY_SIZE(ipc_err_sources))
+			dev_err(&ipcdev.pdev->dev,
+				"IPC failed: %s, IPC_STS=0x%x, IPC_CMD=0x%x\n",
+				ipc_err_sources[i], status, ipcdev.cmd);
+		else
+			dev_err(&ipcdev.pdev->dev,
+				"IPC failed: unknown error, IPC_STS=0x%x, "
+				"IPC_CMD=0x%x\n", status, ipcdev.cmd);
+		if ((i == IPC_ERR_UNSIGNEDKERNEL) || (i == IPC_ERR_EMSECURITY))
+			ret = -EACCES;
 	}
 
-	for (nc = 0; nc < count; nc++, offset += 2) {
-		cbuf[offset] = addr[nc];
-		cbuf[offset + 1] = addr[nc] >> 8;
-	}
-
-	if (id == IPC_CMD_PCNTRL_R) {
-		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
-			ipc_data_writel(wbuf[nc], offset);
-		ipc_command((count*2) << 16 |  id << 12 | 0 << 8 | op);
-	} else if (id == IPC_CMD_PCNTRL_W) {
-		for (nc = 0; nc < count; nc++, offset += 1)
-			cbuf[offset] = data[nc];
-		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
-			ipc_data_writel(wbuf[nc], offset);
-		ipc_command((count*3) << 16 |  id << 12 | 0 << 8 | op);
-	} else if (id == IPC_CMD_PCNTRL_M) {
-		cbuf[offset] = data[0];
-		cbuf[offset + 1] = data[1];
-		ipc_data_writel(wbuf[0], 0); /* Write wbuff */
-		ipc_command(4 << 16 |  id << 12 | 0 << 8 | op);
-	}
-
-	err = busy_loop();
-	if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
-		/* Workaround: values are read as 0 without memcpy_fromio */
-		memcpy_fromio(cbuf, ipcdev.ipc_base + 0x90, 16);
-		for (nc = 0; nc < count; nc++)
-			data[nc] = ipc_data_readb(nc);
-	}
-	mutex_unlock(&ipclock);
-	return err;
+	return ret;
 }
 
-/**
- *	intel_scu_ipc_ioread8		-	read a word via the SCU
- *	@addr: register on SCU
- *	@data: return pointer for read byte
- *
- *	Read a single register. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_ioread8(u16 addr, u8 *data)
+void intel_scu_ipc_lock(void)
 {
-	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread8);
+	/* Prevent C-states beyond C6 */
+	pm_qos_update_request(qos, CSTATE_EXIT_LATENCY_S0i1 - 1);
 
-/**
- *	intel_scu_ipc_ioread16		-	read a word via the SCU
- *	@addr: register on SCU
- *	@data: return pointer for read word
- *
- *	Read a register pair. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_ioread16(u16 addr, u16 *data)
+	/* Prevent S3 */
+	mutex_lock(&scu_suspend_lock);
+
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_lock);
+
+void intel_scu_ipc_unlock(void)
 {
-	u16 x[2] = {addr, addr + 1 };
-	return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+	/* Re-enable S3 */
+	mutex_unlock(&scu_suspend_lock);
+
+	/* Re-enable Deeper C-states beyond C6 */
+	pm_qos_update_request(qos, PM_QOS_DEFAULT_VALUE);
 }
-EXPORT_SYMBOL(intel_scu_ipc_ioread16);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_unlock);
 
 /**
- *	intel_scu_ipc_ioread32		-	read a dword via the SCU
- *	@addr: register on SCU
- *	@data: return pointer for read dword
- *
- *	Read four registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_ioread32(u16 addr, u32 *data)
-{
-	u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
-	return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_ioread32);
-
-/**
- *	intel_scu_ipc_iowrite8		-	write a byte via the SCU
- *	@addr: register on SCU
- *	@data: byte to write
- *
- *	Write a single register. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_iowrite8(u16 addr, u8 data)
-{
-	return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
-
-/**
- *	intel_scu_ipc_iowrite16		-	write a word via the SCU
- *	@addr: register on SCU
- *	@data: word to write
- *
- *	Write two registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_iowrite16(u16 addr, u16 data)
-{
-	u16 x[2] = {addr, addr + 1 };
-	return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
-
-/**
- *	intel_scu_ipc_iowrite32		-	write a dword via the SCU
- *	@addr: register on SCU
- *	@data: dword to write
- *
- *	Write four registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_iowrite32(u16 addr, u32 data)
-{
-	u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
-	return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
-
-/**
- *	intel_scu_ipc_readvv		-	read a set of registers
- *	@addr: register list
- *	@data: bytes to return
- *	@len: length of array
- *
- *	Read registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	The largest array length permitted by the hardware is 5 items.
- *
- *	This function may sleep.
- */
-int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
-{
-	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
-}
-EXPORT_SYMBOL(intel_scu_ipc_readv);
-
-/**
- *	intel_scu_ipc_writev		-	write a set of registers
- *	@addr: register list
- *	@data: bytes to write
- *	@len: length of array
- *
- *	Write registers. Returns 0 on success or an error code. All
- *	locking between SCU accesses is handled for the caller.
- *
- *	The largest array length permitted by the hardware is 5 items.
- *
- *	This function may sleep.
- *
- */
-int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
-{
-	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
-}
-EXPORT_SYMBOL(intel_scu_ipc_writev);
-
-
-/**
- *	intel_scu_ipc_update_register	-	r/m/w a register
- *	@addr: register address
- *	@bits: bits to update
- *	@mask: mask of bits to update
- *
- *	Read-modify-write power control unit register. The first data argument
- *	must be register value and second is mask value
- *	mask is a bitmap that indicates which bits to update.
- *	0 = masked. Don't modify this bit, 1 = modify this bit.
- *	returns 0 on success or an error code.
- *
- *	This function may sleep. Locking between SCU accesses is handled
- *	for the caller.
- */
-int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
-{
-	u8 data[2] = { bits, mask };
-	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
-}
-EXPORT_SYMBOL(intel_scu_ipc_update_register);
-
-/**
- *	intel_scu_ipc_simple_command	-	send a simple command
+ *	intel_scu_ipc_simple_command - send a simple command
  *	@cmd: command
  *	@sub: sub type
  *
@@ -385,117 +313,115 @@
 {
 	int err;
 
-	mutex_lock(&ipclock);
-	if (ipcdev.pdev == NULL) {
-		mutex_unlock(&ipclock);
+	if (ipcdev.pdev == NULL)
 		return -ENODEV;
-	}
-	ipc_command(sub << 12 | cmd);
-	err = busy_loop();
-	mutex_unlock(&ipclock);
+
+	intel_scu_ipc_lock();
+	intel_scu_ipc_send_command(sub << 12 | cmd);
+	err = intel_scu_ipc_check_status();
+	intel_scu_ipc_unlock();
 	return err;
 }
 EXPORT_SYMBOL(intel_scu_ipc_simple_command);
 
 /**
- *	intel_scu_ipc_command	-	command with data
- *	@cmd: command
- *	@sub: sub type
- *	@in: input data
- *	@inlen: input length in dwords
- *	@out: output data
- *	@outlein: output length in dwords
+ * intel_scu_ipc_raw_cmd - raw ipc command with data
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length in bytes
+ * @out: output data
+ * @outlen: output length in dwords
+ * @sptr: data writing to SPTR register
+ * @dptr: data writing to DPTR register
  *
- *	Issue a command to the SCU which involves data transfers. Do the
- *	data copies under the lock but leave it for the caller to interpret
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret
+ * Note: This function should be called with the holding of ipclock
  */
-
-int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
-							u32 *out, int outlen)
+int intel_scu_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
+		u32 outlen, u32 dptr, u32 sptr)
 {
 	int i, err;
+	u32 wbuf[4] = { 0 };
 
-	mutex_lock(&ipclock);
-	if (ipcdev.pdev == NULL) {
-		mutex_unlock(&ipclock);
+	if (ipcdev.pdev == NULL)
 		return -ENODEV;
+
+	if (inlen > 16)
+		return -EINVAL;
+
+	memcpy(wbuf, in, inlen);
+
+	writel(dptr, ipcdev.ipc_base + IPC_DPTR_ADDR);
+	writel(sptr, ipcdev.ipc_base + IPC_SPTR_ADDR);
+
+	/**
+	 * SRAM controller doesn't support 8bit write, it only supports
+	 * 32bit write, so we have to write into the WBUF in 32bit,
+	 * and SCU FW will use the inlen to determine the actual input
+	 * data length in the WBUF.
+	 */
+	for (i = 0; i < ((inlen + 3) / 4); i++)
+		ipc_data_writel(wbuf[i], 4 * i);
+
+	/**
+	 * Watchdog IPC command is an exception here using double word
+	 * as the unit of input data size because of historical reasons
+	 * and SCU FW is doing so.
+	 */
+	if ((cmd & 0xFF) == IPCMSG_WATCHDOG_TIMER)
+		inlen = (inlen + 3) / 4;
+	/*
+	 *  In case of 3 pmic writes or read-modify-writes
+	 *  there are holes in the middle of the buffer which are
+	 *  ignored by SCU. These bytes should not be included into
+	 *  size of the ipc msg. Holes are as follows:
+	 *  write: wbuf[6 & 7]
+	 *  read-modifu-write: wbuf[6 & 7 & 11]
+	 */
+	else if ((cmd & 0xFF) == IPCMSG_PCNTRL) {
+		if (sub == IPC_CMD_PCNTRL_W && inlen == 11)
+			inlen -= 2;
+		else if (sub == IPC_CMD_PCNTRL_M && inlen == 15)
+			inlen -= 3;
 	}
-
-	for (i = 0; i < inlen; i++)
-		ipc_data_writel(*in++, 4 * i);
-
-	ipc_command((inlen << 16) | (sub << 12) | cmd);
-	err = busy_loop();
+	intel_scu_ipc_send_command((inlen << 16) | (sub << 12) | cmd);
+	err = intel_scu_ipc_check_status();
 
 	for (i = 0; i < outlen; i++)
 		*out++ = ipc_data_readl(4 * i);
 
-	mutex_unlock(&ipclock);
 	return err;
 }
-EXPORT_SYMBOL(intel_scu_ipc_command);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_cmd);
 
-/*I2C commands */
-#define IPC_I2C_WRITE 1 /* I2C Write command */
-#define IPC_I2C_READ  2 /* I2C Read command */
-
-/**
- *	intel_scu_ipc_i2c_cntrl		-	I2C read/write operations
- *	@addr: I2C address + command bits
- *	@data: data to read/write
- *
- *	Perform an an I2C read/write operation via the SCU. All locking is
- *	handled for the caller. This function may sleep.
- *
- *	Returns an error code or 0 on success.
- *
- *	This has to be in the IPC driver for the locking.
- */
-int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
+int intel_scu_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
+		u32 *out, u32 outlen)
 {
-	u32 cmd = 0;
-
-	mutex_lock(&ipclock);
-	if (ipcdev.pdev == NULL) {
-		mutex_unlock(&ipclock);
-		return -ENODEV;
-	}
-	cmd = (addr >> 24) & 0xFF;
-	if (cmd == IPC_I2C_READ) {
-		writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
-		/* Write not getting updated without delay */
-		mdelay(1);
-		*data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
-	} else if (cmd == IPC_I2C_WRITE) {
-		writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
-		mdelay(1);
-		writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
-	} else {
-		dev_err(&ipcdev.pdev->dev,
-			"intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
-
-		mutex_unlock(&ipclock);
-		return -EIO;
-	}
-	mutex_unlock(&ipclock);
-	return 0;
+	int ret;
+	intel_scu_ipc_lock();
+	ret = intel_scu_ipc_raw_cmd(cmd, sub, in, inlen, out, outlen, 0, 0);
+	intel_scu_ipc_unlock();
+	return ret;
 }
-EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
+EXPORT_SYMBOL_GPL(intel_scu_ipc_command);
 
 /*
  * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
  * When ioc bit is set to 1, caller api must wait for interrupt handler called
- * which in turn unlocks the caller api. Currently this is not used
+ * which in turn unlocks the caller api.
  *
  * This is edge triggered so we need take no action to clear anything
  */
 static irqreturn_t ioc(int irq, void *dev_id)
 {
+	complete(&ipcdev.cmd_complete);
 	return IRQ_HANDLED;
 }
 
 /**
- *	ipc_probe	-	probe an Intel SCU IPC
+ *	ipc_probe - probe an Intel SCU IPC
  *	@dev: the PCI device matching
  *	@id: entry in the match table
  *
@@ -504,12 +430,16 @@
  */
 static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
-	int err;
+	int err, pid;
+	struct intel_scu_ipc_pdata_t *pdata;
 	resource_size_t pci_resource;
 
 	if (ipcdev.pdev)		/* We support only one SCU */
 		return -EBUSY;
 
+	pid = id->driver_data;
+	pdata = &intel_scu_ipc_pdata[pid];
+
 	ipcdev.pdev = pci_dev_get(dev);
 
 	err = pci_enable_device(dev);
@@ -524,14 +454,17 @@
 	if (!pci_resource)
 		return -ENOMEM;
 
-	if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
+	init_completion(&ipcdev.cmd_complete);
+
+	if (request_irq(dev->irq, ioc, IRQF_NO_SUSPEND, "intel_scu_ipc",
+		&ipcdev))
 		return -EBUSY;
 
-	ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR);
+	ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len);
 	if (!ipcdev.ipc_base)
 		return -ENOMEM;
 
-	ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR);
+	ipcdev.i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
 	if (!ipcdev.i2c_base) {
 		iounmap(ipcdev.ipc_base);
 		return -ENOMEM;
@@ -543,7 +476,7 @@
 }
 
 /**
- *	ipc_remove	-	remove a bound IPC device
+ *	ipc_remove - remove a bound IPC device
  *	@pdev: PCI device
  *
  *	In practice the SCU is not removable but this function is also
@@ -564,7 +497,10 @@
 }
 
 static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
+	{PCI_VDEVICE(INTEL, 0x080e), SCU_IPC_PENWELL},
+	{PCI_VDEVICE(INTEL, 0x082a), SCU_IPC_LINCROFT},
+	{PCI_VDEVICE(INTEL, 0x08ea), SCU_IPC_CLOVERVIEW},
+	{PCI_VDEVICE(INTEL, 0x11a0), SCU_IPC_TANGIER},
 	{ 0,}
 };
 MODULE_DEVICE_TABLE(pci, pci_ids);
@@ -576,17 +512,27 @@
 	.remove = ipc_remove,
 };
 
-
-static int __init intel_scu_ipc_init(void)
+static int intel_scu_ipc_init(void)
 {
-	platform = mrst_identify_cpu();
+	platform = intel_mid_identify_cpu();
 	if (platform == 0)
 		return -ENODEV;
+
+	qos = kzalloc(sizeof(struct pm_qos_request), GFP_KERNEL);
+	if (!qos)
+		return -ENOMEM;
+
+	pm_qos_add_request(qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+	register_pm_notifier(&scu_ipc_pm_notifier);
+
 	return  pci_register_driver(&ipc_driver);
 }
 
 static void __exit intel_scu_ipc_exit(void)
 {
+	pm_qos_remove_request(qos);
+
 	pci_unregister_driver(&ipc_driver);
 }
 
@@ -594,5 +540,5 @@
 MODULE_DESCRIPTION("Intel SCU IPC driver");
 MODULE_LICENSE("GPL");
 
-module_init(intel_scu_ipc_init);
+fs_initcall(intel_scu_ipc_init);
 module_exit(intel_scu_ipc_exit);
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index aa45424..9276bce 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -3,6 +3,8 @@
  *
  * (C) Copyright 2008-2010 Intel Corporation
  * Author: Sreedhara DS (sreedhara.ds@intel.com)
+ * (C) Copyright 2010 Intel Corporation
+ * Author: Sudha Krishnakumar (sudha.krishnakumar@intel.com)
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -22,22 +24,420 @@
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/io.h>
+#include <linux/rpmsg.h>
 #include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipcutil.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#include <linux/pm_runtime.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
 
-static int major;
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
 
-/* ioctl commnds */
-#define	INTE_SCU_IPC_REGISTER_READ	0
-#define INTE_SCU_IPC_REGISTER_WRITE	1
-#define INTE_SCU_IPC_REGISTER_UPDATE	2
+#define MAX_FW_SIZE 264192
 
-struct scu_ipc_data {
-	u32     count;  /* No. of registers */
-	u16     addr[5]; /* Register addresses */
-	u8      data[5]; /* Register data */
-	u8      mask; /* Valid for read-modify-write */
+#define PMIT_RESET1_OFFSET		14
+#define PMIT_RESET2_OFFSET		15
+
+#define IPC_RESIDENCY_CMD_ID_START	0
+#define IPC_RESIDENCY_CMD_ID_DUMP	2
+
+#define SRAM_ADDR_S0IX_RESIDENCY	0xFFFF71E0
+#define ALL_RESIDENCY_DATA_SIZE		12
+
+#define DUMP_OSNIB
+
+#define OSHOB_EXTEND_DESC_SIZE	52  /* OSHOB header+osnib+oem info: 52 bytes.*/
+
+#define OSHOB_HEADER_MAGIC_SIZE	4   /* Size (bytes) of magic number in OSHOB */
+				    /* header.                               */
+
+#define OSHOB_MAGIC_NUMBER	"$OH$"	/* If found when reading the first   */
+					/* 4 bytes of the OSOHB zone, it     */
+					/* means that the new extended OSHOB */
+					/* is going to be used.              */
+
+#define OSHOB_REV_MAJ_DEFAULT	0	/* Default revision number of OSHOB. */
+#define OSHOB_REV_MIN_DEFAULT	1	/* If 0.1 the default OSHOB is used  */
+					/* instead of the extended one.      */
+
+/* Defines for the SCU buffer included in OSHOB structure. */
+#define OSHOB_SCU_BUF_BASE_DW_SIZE	1   /* In dwords. By default SCU     */
+					    /* buffer size is 1 dword.       */
+
+#define OSHOB_SCU_BUF_MRFLD_DW_SIZE (4*OSHOB_SCU_BUF_BASE_DW_SIZE)
+					    /* In dwords. On Merrifield the  */
+					    /* SCU trace buffer size is      */
+					    /* 4 dwords.                     */
+#define OSHOB_DEF_FABRIC_ERR_MRFLD_SIZE   50	/* In DWORDS. For Merrifield.*/
+					/* Fabric error log size (in DWORDS).*/
+					/* From offs 0x44 to 0x10C.          */
+					/* Used in default OSHOB.            */
+
+#define OSNIB_SIZE		32	/* Size (bytes) of the default OSNIB.*/
+
+#define OSNIB_INTEL_RSVD_SIZE	24	/* Size (bytes) of Intel RESERVED in */
+					/* OSNIB.                            */
+#define OSNIB_OEM_RSVD_SIZE	96	/* Size (bytes) of OEM RESERVED      */
+					/* in OSNIB.                         */
+
+#define OSNIB_NVRAM_SIZE	128	/* Size (bytes) of NVRAM             */
+					/* in OSNIB.                         */
+
+#define OSHOB_DEF_FABRIC_ERR_SIZE   50	/* In DWORDS.                        */
+					/* Fabric error log size (in DWORDS).*/
+					/* From offs 0x44 to 0x10C.          */
+					/* Used in default OSHOB.            */
+
+#define OSHOB_FABRIC_ERROR1_SIZE  12    /* 1st part of Fabric error dump.    */
+					/* Used in extended OSHOB.           */
+
+#define OSHOB_FABRIC_ERROR2_SIZE  9     /* 2nd part of Fabric error dump.    */
+					/* Used in extended OSHOB.           */
+
+#define OSHOB_RESERVED_DEBUG_SIZE 5     /* Reserved for debug                */
+
+/* Size (bytes) of the default OSHOB structure. Includes the default OSNIB   */
+/* size.                                                                     */
+#define OSHOB_SIZE	(68 + (4*OSHOB_SCU_BUF_BASE_DW_SIZE) + \
+			    (4*OSHOB_DEF_FABRIC_ERR_SIZE))	/* In bytes. */
+
+#define OSHOB_MRFLD_SIZE (68 + (4*OSHOB_SCU_BUF_MRFLD_DW_SIZE) + \
+			    (4*OSHOB_DEF_FABRIC_ERR_MRFLD_SIZE))/* In bytes. */
+
+/* SCU buffer size is give in dwords. So it is x4 here to get the total      */
+/* number of bytes.                                                          */
+
+#define SCU_TRACE_HEADER_SIZE    16     /* SCU trace header                  */
+
+#define CHAABI_DEBUG_DATA_SIZE   5      /* Reserved for chaabi debug         */
+
+#define OSHOB_RESERVED_SIZE      184    /* Reserved                          */
+
+
+struct chip_reset_event {
+	int id;
+	const char *reset_ev1_name;
+	const char *reset_ev2_name;
 };
 
+static struct chip_reset_event chip_reset_events[] = {
+	{ INTEL_MID_CPU_CHIP_TANGIER, "RESETSRC0", "RESETSRC1" },
+	{ INTEL_MID_CPU_CHIP_CLOVERVIEW, "RESETIRQ1", "RESETIRQ2" },
+	{ INTEL_MID_CPU_CHIP_PENWELL, "RESETIRQ1", "RESETIRQ2" },
+};
+
+struct osnib_target_os {
+	const char *target_os_name;
+	int id;
+};
+
+static struct osnib_target_os osnib_target_oses[] = {
+	{ "main", SIGNED_MOS_ATTR },
+	{ "charging", SIGNED_COS_ATTR  },
+	{ "recovery", SIGNED_RECOVERY_ATTR },
+	{ "fastboot", SIGNED_POS_ATTR },
+	{ "factory", SIGNED_FACTORY_ATTR },
+};
+
+
+struct osnib_wake_src {
+	u8 id;
+	const char *wakesrc_name;
+};
+
+static struct osnib_wake_src osnib_wake_srcs[] = {
+	{ WAKE_BATT_INSERT, "battery inserted" },
+	{ WAKE_PWR_BUTTON_PRESS, "power button pressed" },
+	{ WAKE_RTC_TIMER, "rtc timer" },
+	{ WAKE_USB_CHRG_INSERT, "usb charger inserted" },
+	{ WAKE_RESERVED, "reserved" },
+	{ WAKE_REAL_RESET, "real reset" },
+	{ WAKE_COLD_BOOT, "cold boot" },
+	{ WAKE_UNKNOWN, "unknown" },
+	{ WAKE_KERNEL_WATCHDOG_RESET, "kernel watchdog reset" },
+	{ WAKE_SECURITY_WATCHDOG_RESET, "security watchdog reset" },
+	{ WAKE_WATCHDOG_COUNTER_EXCEEDED, "watchdog counter exceeded" },
+	{ WAKE_POWER_SUPPLY_DETECTED, "power supply detected" },
+	{ WAKE_FASTBOOT_BUTTONS_COMBO, "fastboot combo" },
+	{ WAKE_NO_MATCHING_OSIP_ENTRY, "no matching osip entry" },
+	{ WAKE_CRITICAL_BATTERY, "critical battery" },
+	{ WAKE_INVALID_CHECKSUM, "invalid checksum" },
+	{ WAKE_FORCED_RESET, "forced reset"},
+	{ WAKE_ACDC_CHRG_INSERT, "ac charger inserted" },
+	{ WAKE_PMIC_WATCHDOG_RESET, "pmic watchdog reset" },
+	{ WAKE_PLATFORM_WATCHDOG_RESET, "HWWDT reset platform" },
+	{ WAKE_SC_WATCHDOG_RESET, "HWWDT reset SC" },
+};
+
+
+/* OSNIB allocation. */
+struct scu_ipc_osnib {
+	u8 target_mode;        /* Target mode.                      */
+	u8 wd_count;           /* Software watchdog.                */
+	u8 alarm;              /* RTC alarm.                        */
+	u8 wakesrc;            /* WAKESRC.                          */
+	u8 reset_ev1;          /* RESETIRQ1 or RESETSRC0.           */
+	u8 reset_ev2;          /* RESETIRQ2 or RESETSRC1.           */
+	u8 spare;              /* Spare.                            */
+	u8 intel_reserved[OSNIB_INTEL_RSVD_SIZE]; /* INTEL RESERVED */
+			       /* (offsets 7 to 30).                */
+	u8 checksum;           /* CHECKSUM.                         */
+	u8 oem_reserved[OSNIB_OEM_RSVD_SIZE];     /* OEM RESERVED   */
+			       /* (offsets 32 to 127).              */
+	u8 nvram[OSNIB_NVRAM_SIZE];               /* NVRAM          */
+			       /* (offsets 128 to 255).             */
+};
+
+/* Default OSHOB allocation. */
+struct scu_ipc_oshob {
+	u32 scutxl;             /* SCUTxl offset position.      */
+	u32 iatxl;              /* IATxl offset.                */
+	u32 bocv;               /* BOCV offset.                 */
+	u8 osnibr[OSNIB_SIZE];  /* OSNIB area offset.           */
+	u32 pmit;               /* PMIT offset.                 */
+	u32 pemmcmhki;          /* PeMMCMHKI offset.            */
+	u32 osnibw_ptr;         /* OSNIB Write at offset 0x34.  */
+	u32 fab_err_log[OSHOB_DEF_FABRIC_ERR_SIZE]; /* Fabric   */
+				/* error log buffer.            */
+};
+
+/* Extended OSHOB allocation. version 1.3 */
+struct scu_ipc_oshob_extend {
+	u32 magic;              /* MAGIC number.                           */
+	u8  rev_major;          /* Revision major.                         */
+	u8  rev_minor;          /* Revision minor.                         */
+	u16 oshob_size;         /* OSHOB size.                             */
+	u32 nvram_addr;         /* NVRAM phys addres                       */
+	u32 scutxl;             /* SCUTxl offset position.                 */
+				/* If on MRFLD platform, next param may be */
+				/* shifted by                              */
+				/* (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1) bytes.*/
+	u32 iatxl;              /* IATxl.                                  */
+	u32 bocv;               /* BOCV.                                   */
+
+	u16 intel_size;         /* Intel size (in OSNIB area).             */
+	u16 oem_size;           /* OEM size (of OEM area).                 */
+	u32 r_intel_ptr;        /* Read Intel pointer.                     */
+	u32 w_intel_ptr;        /* Write Intel pointer.                    */
+	u32 r_oem_ptr;          /* Read OEM pointer.                       */
+	u32 w_oem_ptr;          /* Write OEM pointer.                      */
+
+	u32 pmit;               /* PMIT.                       */
+	u32 pemmcmhki;          /* PeMMCMHKI.                  */
+
+	/* OSHOB as defined for CLOVERVIEW */
+	u32 nvram_size;         /* NVRAM max size in bytes     */
+	u32 fabricerrlog1[OSHOB_FABRIC_ERROR1_SIZE]; /* fabric error data */
+	u8  vrtc_alarm_dow;     /* Alarm sync                  */
+	u8  vrtc_alarm_dom;     /* Alarm sync                  */
+	u8  vrtc_alarm_month;   /* Alarm sync                  */
+	u8  vrtc_alarm_year;    /* Alarm sync                  */
+	u32 reserved_debug[OSHOB_RESERVED_DEBUG_SIZE];/* Reserved Debug data */
+	u32 reserved2;          /* Reserved                    */
+	u32 fabricerrlog2[OSHOB_FABRIC_ERROR2_SIZE]; /* fabric error data2 */
+	u32 sculogbufferaddr;   /* phys addr of scu log buffer   */
+	u32 sculogbuffersize;   /* size of scu log buffer      */
+};
+
+/* Extended OSHOB allocation. version 1.4. */
+struct scu_ipc_oshob_extend_v14 {
+	u32 magic;              /* MAGIC number.                           */
+	u8  rev_major;          /* Revision major.                         */
+	u8  rev_minor;          /* Revision minor.                         */
+	u16 oshob_size;         /* OSHOB size.                             */
+
+	u32 scutxl;             /* SCUTxl offset position.                 */
+				/* If on MRFLD platform, next param may be */
+				/* shifted by                              */
+				/* (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1) bytes.*/
+	u32 iatxl;              /* IATxl.                                  */
+	u32 bocv;               /* BOCV.                                   */
+
+	u32 osnib_ptr;          /* The unique OSNIB pointer.               */
+
+	u32 pmit;               /* PMIT.                                   */
+	u8  scutraceheader[SCU_TRACE_HEADER_SIZE];   /* SCU trace header   */
+	u32 fabricerrlog[OSHOB_DEF_FABRIC_ERR_SIZE]; /* fabric error data  */
+	u32 chaabidebugdata[CHAABI_DEBUG_DATA_SIZE]; /* fabric error data  */
+	u32 pmuemergency;       /* pmu emergency                           */
+	u32 sculogbufferaddr;   /* scu log buffer address                  */
+	u32 sculogbuffersize;   /* size of scu log buffer                  */
+	u32 oshob_reserved[OSHOB_RESERVED_SIZE];     /* oshob reserved     */
+};
+
+struct scu_ipc_oshob_info {
+	__u32	oshob_base;     /* Base address of OSHOB. Use ioremap to     */
+				/* remap for access.                         */
+	__u8	oshob_majrev;   /* Major revision number of OSHOB structure. */
+	__u8	oshob_minrev;   /* Minor revision number of OSHOB structure. */
+	__u16	oshob_size;     /* Total size (bytes) of OSHOB structure.    */
+	__u32   scu_trace[OSHOB_SCU_BUF_BASE_DW_SIZE*4]; /* SCU trace buffer.*/
+				/* Set to max SCU buffer size (dwords) to    */
+				/* adapt to MRFLD. On other platforms, only  */
+				/* the first dword is stored and read.       */
+	__u32   ia_trace;       /* IA trace buffer.                          */
+	__u16	osnib_size;     /* Total size (bytes) of OSNIB structure.    */
+	__u16	oemnib_size;    /* Total size (bytes) of OEMNIB area.        */
+	__u32	osnibr_ptr;     /* Pointer to Intel read zone.               */
+	__u32	osnibw_ptr;     /* Pointer to Intel write zone.              */
+	__u32	oemnibr_ptr;    /* Pointer to OEM read zone.                 */
+	__u32	oemnibw_ptr;    /* Pointer to OEM write zone.                */
+	__u32   scu_trace_buf;  /* SCU extended trace buffer                 */
+	__u32   scu_trace_size; /* SCU extended trace buffer size            */
+	__u32   nvram_addr;     /* NV ram phys addr                          */
+	__u32   nvram_size;     /* NV ram size in bytes                      */
+
+	int (*scu_ipc_write_osnib)(u8 *data, int len, int offset);
+	int (*scu_ipc_read_osnib)(u8 *data, int len, int offset);
+
+	int platform_type;     /* Identifies the platform (list of supported */
+			       /* platforms is given in intel-mid.h).        */
+
+	u16 offs_add;          /* The additional shift bytes to consider     */
+			       /* giving the offset at which the OSHOB params*/
+			       /* will be read. If MRFLD it must be set to   */
+			       /* take into account the extra SCU dwords.    */
+
+};
+
+/* Structure for OSHOB info */
+struct scu_ipc_oshob_info *oshob_info;
+
+static struct rpmsg_instance *ipcutil_instance;
+
+/* Mode for Audio clock */
+static DEFINE_MUTEX(osc_clk0_lock);
+static unsigned int osc_clk0_mode;
+
+int intel_scu_ipc_osc_clk(u8 clk, unsigned int khz)
+{
+	/* SCU IPC COMMAND(osc clk on/off) definition:
+	 * ipc_wbuf[0] = clock to act on {0, 1, 2, 3}
+	 * ipc_wbuf[1] =
+	 * bit 0 - 1:on  0:off
+	 * bit 1 - if 1, read divider setting from bits 3:2 as follows:
+	 * bit [3:2] - 00: clk/1, 01: clk/2, 10: clk/4, 11: reserved
+	 */
+	unsigned int base_freq;
+	unsigned int div;
+	u8 ipc_wbuf[2];
+	int ipc_ret;
+
+	if (clk > 3)
+		return -EINVAL;
+
+	ipc_wbuf[0] = clk;
+	ipc_wbuf[1] = 0;
+	if (khz) {
+#ifdef CONFIG_CTP_CRYSTAL_38M4
+		base_freq = 38400;
+#else
+		base_freq = 19200;
+#endif
+		div = fls(base_freq / khz) - 1;
+		if (div >= 3 || (1 << div) * khz != base_freq)
+			return -EINVAL;	/* Allow only exact frequencies */
+		ipc_wbuf[1] = 0x03 | (div << 2);
+	}
+
+	ipc_ret = rpmsg_send_command(ipcutil_instance,
+		RP_OSC_CLK_CTRL, 0, ipc_wbuf, NULL, 2, 0);
+	if (ipc_ret != 0)
+		pr_err("%s: failed to set osc clk(%d) output\n", __func__, clk);
+
+	return ipc_ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_osc_clk);
+
+/*
+ * OSC_CLK_AUDIO is connected to the MSIC as well as Audience, so it should be
+ * turned on if any one of them requests it to be on and it should be turned off
+ * only if no one needs it on.
+ */
+int intel_scu_ipc_set_osc_clk0(unsigned int enable, enum clk0_mode mode)
+{
+	int ret = 0, clk_enable;
+	static const unsigned int clk_khz = 19200;
+
+	pr_info("set_clk0 request %s for Mode 0x%x\n",
+				enable ? "ON" : "OFF", mode);
+	mutex_lock(&osc_clk0_lock);
+	if (mode == CLK0_QUERY) {
+		ret = osc_clk0_mode;
+		goto out;
+	}
+	if (enable) {
+		/* if clock is already on, just add new user */
+		if (osc_clk0_mode) {
+			osc_clk0_mode |= mode;
+			goto out;
+		}
+		osc_clk0_mode |= mode;
+		pr_info("set_clk0: enabling clk, mode 0x%x\n", osc_clk0_mode);
+		clk_enable = 1;
+	} else {
+		osc_clk0_mode &= ~mode;
+		pr_info("set_clk0: disabling clk, mode 0x%x\n", osc_clk0_mode);
+		/* others using the clock, cannot turn it of */
+		if (osc_clk0_mode)
+			goto out;
+		clk_enable = 0;
+	}
+	pr_info("configuring OSC_CLK_AUDIO now\n");
+	ret = intel_scu_ipc_osc_clk(OSC_CLK_AUDIO, clk_enable ? clk_khz : 0);
+out:
+	mutex_unlock(&osc_clk0_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_set_osc_clk0);
+
+#define MSIC_VPROG1_CTRL        0xD6
+#define MSIC_VPROG2_CTRL        0xD7
+
+#define MSIC_VPROG2_ON          0x36 /*1.200V and Auto mode*/
+#define MSIC_VPROG1_ON          0xF6 /*2.800V and Auto mode*/
+#define MSIC_VPROG_OFF          0x24 /*1.200V and OFF*/
+
+/* Defines specific of MRFLD platform (CONFIG_X86_MRFLD). */
+#define MSIC_VPROG1_MRFLD_CTRL	0xAC
+#define MSIC_VPROG2_MRFLD_CTRL	0xAD
+
+#define MSIC_VPROG1_MRFLD_ON	0xC1	/* 2.80V */
+#define MSIC_VPROG2_MRFLD_ON	0xC1	/* 2.80V */
+#define MSIC_VPROG_MRFLD_OFF	0	/* OFF */
+/* End of MRFLD specific.*/
+
+/* Helpers to turn on/off msic vprog1 and vprog2 */
+int intel_scu_ipc_msic_vprog1(int on)
+{
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+		return intel_scu_ipc_iowrite8(MSIC_VPROG1_MRFLD_CTRL,
+			on ? MSIC_VPROG1_MRFLD_ON : MSIC_VPROG_MRFLD_OFF);
+	else
+		return intel_scu_ipc_iowrite8(MSIC_VPROG1_CTRL,
+			on ? MSIC_VPROG1_ON : MSIC_VPROG_OFF);
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_msic_vprog1);
+
+int intel_scu_ipc_msic_vprog2(int on)
+{
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+		return intel_scu_ipc_iowrite8(MSIC_VPROG2_MRFLD_CTRL,
+			on ? MSIC_VPROG2_MRFLD_ON : MSIC_VPROG_MRFLD_OFF);
+	else
+		return intel_scu_ipc_iowrite8(MSIC_VPROG2_CTRL,
+			on ? MSIC_VPROG2_ON : MSIC_VPROG_OFF);
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_msic_vprog2);
+
 /**
  *	scu_reg_access		-	implement register access ioctls
  *	@cmd: command we are doing (read/write/update)
@@ -49,22 +449,170 @@
 
 static int scu_reg_access(u32 cmd, struct scu_ipc_data  *data)
 {
+	int ret;
 	unsigned int count = data->count;
 
-	if (count == 0 || count == 3 || count > 4)
+	if (count == 0 || count > 5)
 		return -EINVAL;
 
 	switch (cmd) {
-	case INTE_SCU_IPC_REGISTER_READ:
-		return intel_scu_ipc_readv(data->addr, data->data, count);
-	case INTE_SCU_IPC_REGISTER_WRITE:
-		return intel_scu_ipc_writev(data->addr, data->data, count);
-	case INTE_SCU_IPC_REGISTER_UPDATE:
-		return intel_scu_ipc_update_register(data->addr[0],
-						    data->data[0], data->mask);
+	case INTEL_SCU_IPC_REGISTER_READ:
+		ret = intel_scu_ipc_readv(data->addr, data->data, data->count);
+		break;
+	case INTEL_SCU_IPC_REGISTER_WRITE:
+		ret = intel_scu_ipc_writev(data->addr, data->data, data->count);
+		break;
+	case INTEL_SCU_IPC_REGISTER_UPDATE:
+		ret = intel_scu_ipc_update_register(data->addr[0],
+							data->data[0],
+							data->mask);
+		break;
 	default:
 		return -ENOTTY;
 	}
+	return ret;
+}
+
+#define check_pmdb_sub_cmd(x)	(x == PMDB_SUB_CMD_R_OTPCTL || \
+		x == PMDB_SUB_CMD_R_WMDB || x == PMDB_SUB_CMD_W_WMDB || \
+		x == PMDB_SUB_CMD_R_OTPDB || x == PMDB_SUB_CMD_W_OTPDB)
+#define pmdb_sub_cmd_is_read(x)	(x == PMDB_SUB_CMD_R_OTPCTL || \
+		x == PMDB_SUB_CMD_R_WMDB || x == PMDB_SUB_CMD_R_OTPDB)
+
+static int check_pmdb_buffer(struct scu_ipc_pmdb_buffer *p_buf)
+{
+	int size;
+
+	switch (p_buf->sub) {
+	case PMDB_SUB_CMD_R_WMDB:
+	case PMDB_SUB_CMD_W_WMDB:
+		size = PMDB_WMDB_SIZE;
+		break;
+	case PMDB_SUB_CMD_R_OTPDB:
+	case PMDB_SUB_CMD_W_OTPDB:
+		size = PMDB_OTPDB_SIZE;
+		break;
+	case PMDB_SUB_CMD_R_OTPCTL:
+		size = PMDB_OTPCTL_SIZE;
+		break;
+	default:
+		size = 0;
+	}
+
+	return check_pmdb_sub_cmd(p_buf->sub) &&
+		(p_buf->count + p_buf->offset < size) &&
+		(p_buf->count % 4 == 0);
+}
+
+/**
+ *	scu_pmdb_access	-	access PMDB data through SCU IPC cmds
+ *	@p_buf: PMDB access buffer, it describe the data to write/read.
+ *		p_buf->sub - SCU IPC sub cmd of PMDB access,
+ *			this sub cmd distinguish different componet
+ *			in PMDB which to be accessd. (WMDB, OTPDB, OTPCTL)
+ *		p_buf->count - access data's count;
+ *		p_buf->offset - access data's offset for each component in PMDB;
+ *		p_buf->data - data to write/read.
+ *
+ *	Write/read data to/from PMDB.
+ *
+ */
+static int scu_pmdb_access(struct scu_ipc_pmdb_buffer *p_buf)
+{
+	int i, offset, ret = -EINVAL;
+	u8 *p_data;
+
+	if (!check_pmdb_buffer(p_buf)) {
+		pr_err("Invalid PMDB buffer!\n");
+		return -EINVAL;
+	}
+
+	/* 1. we use rpmsg_send_raw_command() IPC cmd interface
+	 *    to access PMDB data. Each call of rpmsg_send_raw_command()
+	 *    can only access at most PMDB_ACCESS_SIZE bytes' data.
+	 * 2. There are two kinds of pmdb sub commands, read command
+	 *    and write command. For read command, we must transport
+	 *    in and out buffer to rpmsg_send_raw_command(), because
+	 *    in buffer length is pass as access length which must
+	 *    be transported to SCU.
+	 */
+	p_data = p_buf->data;
+	offset = p_buf->offset;
+	for (i = 0; i < p_buf->count/PMDB_ACCESS_SIZE; i++) {
+		if (pmdb_sub_cmd_is_read(p_buf->sub))
+			ret = rpmsg_send_raw_command(ipcutil_instance,
+					RP_PMDB, p_buf->sub,
+					p_data, (u32 *)p_data,
+					PMDB_ACCESS_SIZE, PMDB_ACCESS_SIZE / 4,
+					0, offset);
+		else
+			ret = rpmsg_send_raw_command(ipcutil_instance,
+					RP_PMDB, p_buf->sub,
+					p_data, NULL, PMDB_ACCESS_SIZE,
+					0, 0, offset);
+		if (ret < 0) {
+			pr_err("intel_scu_ipc_raw_cmd failed!\n");
+			return ret;
+		}
+		offset += PMDB_ACCESS_SIZE;
+		p_data += PMDB_ACCESS_SIZE;
+	}
+	if (p_buf->count % PMDB_ACCESS_SIZE > 0) {
+		if (pmdb_sub_cmd_is_read(p_buf->sub))
+			ret = rpmsg_send_raw_command(ipcutil_instance,
+					RP_PMDB, p_buf->sub,
+					p_data, (u32 *)p_data,
+					p_buf->count % PMDB_ACCESS_SIZE,
+					(p_buf->count % PMDB_ACCESS_SIZE) / 4,
+					0, offset);
+		else
+			ret = rpmsg_send_raw_command(ipcutil_instance,
+					RP_PMDB, p_buf->sub,
+					p_data, NULL,
+					p_buf->count % PMDB_ACCESS_SIZE,
+					0, 0, offset);
+		if (ret < 0) {
+			pr_err("intel_scu_ipc_raw_cmd failed!\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int do_pmdb_user_buf_access(void __user *argp)
+{
+	int ret;
+	struct scu_ipc_pmdb_buffer *p_buf;
+
+	p_buf = kzalloc(sizeof(struct scu_ipc_pmdb_buffer), GFP_KERNEL);
+	if (p_buf == NULL) {
+		pr_err("failed to allocate memory for pmdb buffer!\n");
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(p_buf, argp, sizeof(struct scu_ipc_pmdb_buffer));
+	if (ret < 0) {
+		pr_err("copy from user failed!!\n");
+		goto err;
+	}
+
+	ret = scu_pmdb_access(p_buf);
+	if (ret < 0) {
+		pr_err("scu_pmdb_access error!\n");
+		goto err;
+	}
+
+	if (pmdb_sub_cmd_is_read(p_buf->sub)) {
+		ret = copy_to_user(argp + 3 * sizeof(u32),
+					p_buf->data, p_buf->count);
+		if (ret < 0)
+			pr_err("copy to user failed!!\n");
+	}
+
+err:
+	kfree(p_buf);
+	return ret;
 }
 
 /**
@@ -78,43 +626,2003 @@
 static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
 							unsigned long arg)
 {
-	int ret;
+	int ret = -EINVAL;
 	struct scu_ipc_data  data;
 	void __user *argp = (void __user *)arg;
 
-	if (!capable(CAP_SYS_RAWIO))
+	/* Only IOCTL cmd allowed to pass through without capability check */
+	/* is getting fw version info, all others need to check to prevent */
+	/* arbitrary access to all sort of bit of the hardware exposed here*/
+
+	if ((cmd != INTEL_SCU_IPC_FW_REVISION_GET &&
+		cmd != INTEL_SCU_IPC_FW_REVISION_EXT_GET &&
+		cmd != INTEL_SCU_IPC_S0IX_RESIDENCY) &&
+		!capable(CAP_SYS_RAWIO))
 		return -EPERM;
 
-	if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
-		return -EFAULT;
-	ret = scu_reg_access(cmd, &data);
+	switch (cmd) {
+	case INTEL_SCU_IPC_S0IX_RESIDENCY:
+	{
+		void __iomem *s0ix_residencies_addr;
+		u8 dump_results[ALL_RESIDENCY_DATA_SIZE] = {0};
+		u32 cmd_id;
+
+		if (copy_from_user(&cmd_id, argp, sizeof(u32))) {
+			pr_err("copy from user failed!!\n");
+			return -EFAULT;
+		}
+
+		/* Check get residency counter valid cmd range */
+
+		if (cmd_id > IPC_RESIDENCY_CMD_ID_DUMP) {
+			pr_err("invalid si0x residency sub-cmd id!\n");
+			return -EINVAL;
+		}
+
+		ret = rpmsg_send_simple_command(ipcutil_instance,
+					RP_S0IX_COUNTER, cmd_id);
+
+		if (ret < 0) {
+			pr_err("ipc_get_s0ix_counter failed!\n");
+			return ret;
+		}
+
+		if (cmd_id == IPC_RESIDENCY_CMD_ID_DUMP) {
+			s0ix_residencies_addr = ioremap_nocache(
+				SRAM_ADDR_S0IX_RESIDENCY,
+				ALL_RESIDENCY_DATA_SIZE);
+
+			if (!s0ix_residencies_addr) {
+				pr_err("ioremap SRAM address failed!!\n");
+				return -EFAULT;
+			}
+
+			memcpy(&dump_results[0], s0ix_residencies_addr,
+				ALL_RESIDENCY_DATA_SIZE);
+
+			iounmap(s0ix_residencies_addr);
+			ret = copy_to_user(argp, &dump_results[0],
+					ALL_RESIDENCY_DATA_SIZE);
+		}
+
+		break;
+	}
+	case INTEL_SCU_IPC_READ_RR_FROM_OSNIB:
+	{
+		u8 reboot_reason;
+		ret = intel_scu_ipc_read_osnib_rr(&reboot_reason);
+		if (ret < 0)
+			return ret;
+		ret = copy_to_user(argp, &reboot_reason, 1);
+		break;
+	}
+	case INTEL_SCU_IPC_WRITE_RR_TO_OSNIB:
+	{
+		u8 data;
+
+		ret = copy_from_user(&data, (u8 *)arg, 1);
+		if (ret < 0) {
+			pr_err("copy from user failed!!\n");
+			return ret;
+		}
+		ret = intel_scu_ipc_write_osnib_rr(data);
+		break;
+	}
+	case INTEL_SCU_IPC_WRITE_ALARM_FLAG_TO_OSNIB:
+	{
+		u8 flag, data;
+		ret = copy_from_user(&flag, (u8 *)arg, 1);
+		if (ret < 0) {
+			pr_err("copy from user failed!!\n");
+			return ret;
+		}
+
+		ret = oshob_info->scu_ipc_read_osnib(
+				&data,
+				1,
+				offsetof(struct scu_ipc_osnib, alarm));
+
+		if (ret < 0)
+			return ret;
+		if (flag) {
+			data = data | 0x1; /* set alarm flag */
+			pr_info("scu_ipc_ioctl: set alarm flag\n");
+		} else {
+			data = data & 0xFE; /* clear alarm flag */
+			pr_info("scu_ipc_ioctl: clear alarm flag\n");
+		}
+
+		ret = oshob_info->scu_ipc_write_osnib(
+				&data,
+				1,
+				offsetof(struct scu_ipc_osnib, alarm));
+
+		break;
+	}
+	case INTEL_SCU_IPC_READ_VBATTCRIT:
+	{
+		u32 value = 0;
+
+		pr_info("cmd = INTEL_SCU_IPC_READ_VBATTCRIT");
+		ret = intel_scu_ipc_read_mip((u8 *)&value, 4, 0x318, 1);
+		if (ret < 0)
+			return ret;
+		pr_info("VBATTCRIT VALUE = %x\n", value);
+		ret = copy_to_user(argp, &value, 4);
+		break;
+	}
+	case INTEL_SCU_IPC_FW_REVISION_GET:
+	case INTEL_SCU_IPC_FW_REVISION_EXT_GET:
+	{
+		struct scu_ipc_version version;
+
+		if (copy_from_user(&version, argp, sizeof(u32)))
+			return -EFAULT;
+
+		if (version.count > 16)
+			return -EINVAL;
+
+		ret = rpmsg_send_command(ipcutil_instance, RP_GET_FW_REVISION,
+			cmd & 0x1, NULL, (u32 *)version.data, 0, 4);
+		if (ret < 0)
+			return ret;
+
+		if (copy_to_user(argp + sizeof(u32),
+					version.data, version.count))
+			ret = -EFAULT;
+		break;
+	}
+	case INTEL_SCU_IPC_OSC_CLK_CNTL:
+	{
+		struct osc_clk_t osc_clk;
+
+		if (copy_from_user(&osc_clk, argp, sizeof(struct osc_clk_t)))
+			return -EFAULT;
+
+		ret = intel_scu_ipc_osc_clk(osc_clk.id, osc_clk.khz);
+		if (ret)
+			pr_err("%s: failed to set osc clk\n", __func__);
+
+		break;
+	}
+	case INTEL_SCU_IPC_PMDB_ACCESS:
+	{
+		ret = do_pmdb_user_buf_access(argp);
+
+		break;
+	}
+	default:
+		if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
+			return -EFAULT;
+		ret = scu_reg_access(cmd, &data);
+		if (ret < 0)
+			return ret;
+		if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
+			return -EFAULT;
+		return 0;
+	}
+
+	return ret;
+}
+
+int intel_scu_ipc_get_oshob_base(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+
+	return oshob_info->oshob_base;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_oshob_base);
+
+int intel_scu_ipc_get_oshob_size(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+
+	return oshob_info->oshob_size;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_oshob_size);
+
+int intel_scu_ipc_read_oshob(u8 *data, int len, int offset)
+{
+	int ret = 0, i;
+	void __iomem *oshob_addr;
+	u8 *ptr = data;
+
+	oshob_addr = ioremap_nocache(
+				    oshob_info->oshob_base,
+				    oshob_info->oshob_size);
+
+	if (!oshob_addr) {
+		pr_err("ipc_read_oshob: addr ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	for (i = 0; i < len; i = i+1) {
+		*ptr = readb(oshob_addr + offset + i);
+		pr_debug("addr(remapped)=%8x, offset=%2x, value=%2x\n",
+			(u32)(oshob_addr + i),
+			offset + i, *ptr);
+		ptr++;
+	}
+
+	iounmap(oshob_addr);
+exit:
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(intel_scu_ipc_read_oshob);
+
+/* This function is used for the default OSNIB. */
+int intel_scu_ipc_read_osnib(u8 *data, int len, int offset)
+{
+	int i, ret = 0;
+	u32 osnibw_ptr;
+	u8 *ptr, check = 0;
+	u16 struct_offs;
+	void __iomem *oshob_addr, *osnibr_addr, *osnibw_addr;
+
+	pr_debug("OSHOB base addr value is %x\n", oshob_info->oshob_base);
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob, osnibr) +
+			    oshob_info->offs_add;
+	osnibr_addr = oshob_addr + struct_offs;
+
+	if (!osnibr_addr) {
+		pr_err("Bad osnib address!\n");
+		ret = -EFAULT;
+		iounmap(oshob_addr);
+		goto exit;
+	}
+
+	pr_debug("OSNIB read addr (remapped) is %x\n",
+						(unsigned int)osnibr_addr);
+
+	/* Make a chksum verification for osnib */
+	for (i = 0; i < oshob_info->osnib_size; i++)
+		check += readb(osnibr_addr + i);
+	if (check) {
+		pr_err("WARNING!!! osnib chksum verification faild, reset all osnib data!\n");
+		struct_offs = offsetof(struct scu_ipc_oshob, osnibw_ptr) +
+				    oshob_info->offs_add;
+		osnibw_ptr = readl(oshob_addr + struct_offs);
+		osnibw_addr = ioremap_nocache(
+					osnibw_ptr, oshob_info->osnib_size);
+		if (osnibw_addr) {
+			for (i = 0; i < oshob_info->osnib_size; i++)
+				writeb(0, osnibw_addr + i);
+			rpmsg_send_raw_command(ipcutil_instance,
+				RP_WRITE_OSNIB, 0,
+				NULL, NULL, 0, 0,
+				0xFFFFFFFF, 0);
+			iounmap(osnibw_addr);
+		}
+	}
+
+	ptr = data;
+	for (i = 0; i < len; i++) {
+		*ptr = readb(osnibr_addr + offset + i);
+		pr_debug("addr(remapped)=%8x, offset=%2x, value=%2x\n",
+			(u32)(osnibr_addr+offset+i), offset+i, *ptr);
+		ptr++;
+	}
+
+	iounmap(oshob_addr);
+exit:
+	return ret;
+}
+
+/* This function is used for the default OSNIB. */
+int intel_scu_ipc_write_osnib(u8 *data, int len, int offset)
+{
+	int i;
+	int ret = 0;
+	u32 osnibw_ptr;
+	u8 osnib_data[oshob_info->osnib_size];
+	u8 check = 0, chksum = 0;
+	u16 struct_offs;
+	void __iomem *oshob_addr, *osnibw_addr, *osnibr_addr;
+
+	pr_debug("OSHOB base addr value is 0x%8x\n", oshob_info->oshob_base);
+
+	rpmsg_global_lock();
+
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/*Dump osnib data for generate chksum */
+	struct_offs = offsetof(struct scu_ipc_oshob, osnibr) +
+			    oshob_info->offs_add;
+	osnibr_addr = oshob_addr + struct_offs;
+
+	pr_debug("OSNIB read addr (remapped) in OSHOB at %x\n",
+						(unsigned int)osnibr_addr);
+
+	for (i = 0; i < oshob_info->osnib_size; i++) {
+		osnib_data[i] = readb(osnibr_addr + i);
+		check += osnib_data[i];
+	}
+	memcpy(osnib_data + offset, data, len);
+
+	if (check) {
+		pr_err("WARNING!!! OSNIB data chksum verification FAILED!\n");
+	} else {
+		/* generate chksum */
+		for (i = 0; i < oshob_info->osnib_size - 1; i++)
+			chksum += osnib_data[i];
+		osnib_data[oshob_info->osnib_size - 1] = ~chksum + 1;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob, osnibw_ptr) +
+			    oshob_info->offs_add;
+	osnibw_ptr = readl(oshob_addr + struct_offs);
+	if (osnibw_ptr == 0) { /* workaround here for BZ 2914 */
+		osnibw_ptr = 0xFFFF3400;
+		pr_err("ERR: osnibw ptr from oshob is 0, manually set it here\n");
+	}
+
+	pr_debug("POSNIB write address: %x\n", osnibw_ptr);
+
+	osnibw_addr = ioremap_nocache(osnibw_ptr, oshob_info->osnib_size);
+	if (!osnibw_addr) {
+		pr_err("ioremap failed!\n");
+		ret = -ENOMEM;
+		goto unmap_oshob_addr;
+	}
+
+	for (i = 0; i < oshob_info->osnib_size; i++)
+		writeb(*(osnib_data + i), (osnibw_addr + i));
+
+	ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0,
+			NULL, NULL, 0, 0,
+			0xFFFFFFFF, 0);
 	if (ret < 0)
-		return ret;
-	if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
+		pr_err("ipc_write_osnib failed!!\n");
+
+	iounmap(osnibw_addr);
+
+unmap_oshob_addr:
+	iounmap(oshob_addr);
+exit:
+	rpmsg_global_unlock();
+
+	return ret;
+}
+
+/* This function is used for the extended OSHOB/OSNIB. */
+int intel_scu_ipc_read_osnib_extend(u8 *data, int len, int offset)
+{
+	int i, ret = 0;
+	u8 *ptr, check = 0;
+	void __iomem *oshob_addr, *osnibr_addr, *osnibw_addr;
+	u32 sptr_dw_mask;
+
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ipc_read_osnib_extend: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	pr_debug(
+		"ipc_read_osnib_extend: remap OSNIB addr=0x%x size %d\n",
+		oshob_info->osnibr_ptr, oshob_info->osnib_size);
+
+	osnibr_addr = ioremap_nocache(oshob_info->osnibr_ptr,
+				      oshob_info->osnib_size);
+
+	if (!osnibr_addr) {
+		pr_err("ipc_read_osnib_extend: ioremap of osnib failed!\n");
+		ret = -ENOMEM;
+		goto unmap_oshob_addr;
+	}
+
+	/* Make a chksum verification for osnib */
+	for (i = 0; i < oshob_info->osnib_size; i++)
+		check += readb(osnibr_addr + i);
+
+	if (check) {
+		pr_err("ipc_read_osnib_extend: WARNING!!! osnib chksum verification faild, reset all osnib data!\n");
+		pr_debug(
+			"ipc_read_osnib_extend: remap osnibw ptr addr=0x%x size %d\n",
+			oshob_info->osnibw_ptr, oshob_info->osnib_size);
+
+		osnibw_addr = ioremap_nocache(oshob_info->osnibw_ptr,
+					      oshob_info->osnib_size);
+		if (!osnibw_addr) {
+			pr_err("ipc_read_osnib_extend: cannot remap osnib write ptr\n");
+			goto unmap_oshob_addr;
+		}
+
+		for (i = 0; i < oshob_info->osnib_size; i++)
+			writeb(0, osnibw_addr + i);
+
+		/* Send command. The mask to be written identifies which      */
+		/* double words of the OSNIB osnib_size bytes will be written.*/
+		/* So the mask is coded on 4 bytes.                           */
+		sptr_dw_mask = 0xFFFFFFFF;
+		rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB,
+			0, NULL, NULL, 0, 0, sptr_dw_mask, 0);
+		iounmap(osnibw_addr);
+	}
+
+	ptr = data;
+	pr_debug("ipc_read_osnib_extend: OSNIB content:\n");
+	for (i = 0; i < len; i++) {
+		*ptr = readb(osnibr_addr + offset + i);
+		pr_debug("addr(remapped)=%8x, offset=%2x, value=%2x\n",
+			(u32)(osnibr_addr+offset+i), offset+i, *ptr);
+		ptr++;
+	}
+
+	iounmap(osnibr_addr);
+
+unmap_oshob_addr:
+	iounmap(oshob_addr);
+exit:
+	return ret;
+}
+
+/* This function is used for the extended OSHOB/OSNIB. */
+int intel_scu_ipc_write_osnib_extend(u8 *data, int len, int offset)
+{
+	int i;
+	int ret = 0;
+	u8 *posnib_data, *ptr;
+	u8 check = 0, chksum = 0;
+	void __iomem *oshob_addr, *osnibw_addr, *osnibr_addr;
+	u32 sptr_dw_mask;
+
+	rpmsg_global_lock();
+
+	pr_debug(
+		"ipc_write_osnib_extend: remap OSHOB addr 0x%8x size %d\n",
+		oshob_info->oshob_base, oshob_info->oshob_size);
+
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ipc_write_osnib_extend: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	osnibr_addr = ioremap_nocache(oshob_info->osnibr_ptr,
+				      oshob_info->osnib_size);
+
+	if (!osnibr_addr) {
+		pr_err("ipc_write_osnib_extend: ioremap of osnib failed!\n");
+		ret = -ENOMEM;
+		goto unmap_oshob_addr;
+	}
+
+	/* Dump osnib data for generate chksum */
+	posnib_data = kzalloc(oshob_info->osnib_size, GFP_KERNEL);
+
+	if (posnib_data == NULL) {
+		pr_err("ipc_write_osnib_extend: The buffer for getting OSNIB is NULL\n");
+		ret = -EFAULT;
+		iounmap(osnibr_addr);
+		goto unmap_oshob_addr;
+	}
+
+	ptr = posnib_data;
+	for (i = 0; i < oshob_info->osnib_size; i++) {
+		*ptr = readb(osnibr_addr + i);
+		check += *ptr;
+		ptr++;
+	}
+
+	memcpy(posnib_data + offset, data, len);
+
+	if (check) {
+		pr_err("ipc_write_osnib_extend: WARNING!!! OSNIB data chksum verification FAILED!\n");
+	} else {
+		/* generate chksum.  */
+		pr_debug("ipc_write_osnib_extend: generating checksum\n");
+		for (i = 0; i < oshob_info->osnib_size - 1; i++)
+			chksum += *(posnib_data + i);
+		/* Fill checksum at the CHECKSUM offset place in OSNIB. */
+		*(posnib_data +
+		    offsetof(struct scu_ipc_osnib, checksum)) = ~chksum + 1;
+	}
+
+	pr_debug(
+		"ipc_write_osnib_extend: remap osnibw ptr addr=0x%x size %d\n",
+		oshob_info->osnibw_ptr, oshob_info->osnib_size);
+
+	osnibw_addr = ioremap_nocache(oshob_info->osnibw_ptr,
+				      oshob_info->osnib_size);
+	if (!osnibw_addr) {
+		pr_err("scu_ipc_write_osnib_extend: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit_osnib;
+	}
+
+	for (i = 0; i < oshob_info->osnib_size; i++)
+		writeb(*(posnib_data + i), (osnibw_addr + i));
+
+	/* Send command. The mask to be written identifies which            */
+	/* double words of the OSNIB osnib_size bytes will be written.*/
+	/* So the mask is coded on 4 bytes.                                 */
+	sptr_dw_mask = 0xFFFFFFFF;
+	ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 0);
+	if (ret < 0)
+		pr_err("scu_ipc_write_osnib_extend: ipc_write_osnib failed!!\n");
+
+	iounmap(osnibw_addr);
+
+exit_osnib:
+	iounmap(osnibr_addr);
+
+	kfree(posnib_data);
+
+unmap_oshob_addr:
+	iounmap(oshob_addr);
+exit:
+	rpmsg_global_unlock();
+
+	return ret;
+}
+
+/*
+ * This writes the reboot reason in the OSNIB (factor and avoid any overlap)
+ */
+int intel_scu_ipc_write_osnib_rr(u8 rr)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(osnib_target_oses); i++) {
+		if (osnib_target_oses[i].id == rr) {
+			pr_info("intel_scu_ipc_write_osnib_rr: reboot reason: %s\n",
+				osnib_target_oses[i].target_os_name);
+			return oshob_info->scu_ipc_write_osnib(
+				&rr,
+				1,
+				offsetof(struct scu_ipc_osnib, intel_reserved));
+		}
+	}
+
+	pr_warn("intel_scu_ipc_write_osnib_rr: reboot reason [0x%x] not found\n",
+			rr);
+	return -1;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_write_osnib_rr);
+
+/*
+ * This reads the reboot reason from the OSNIB (factor)
+ */
+int intel_scu_ipc_read_osnib_rr(u8 *rr)
+{
+	pr_debug("intel_scu_ipc_read_osnib_rr: read reboot reason\n");
+	return oshob_info->scu_ipc_read_osnib(
+			rr,
+			1,
+			offsetof(struct scu_ipc_osnib, target_mode));
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_read_osnib_rr);
+
+
+int intel_scu_ipc_read_oshob_extend_param(void __iomem *poshob_addr)
+{
+	u16 struct_offs;
+	int buff_size;
+
+	/* Get defined OSNIB space size. */
+	oshob_info->osnib_size = readw(
+			    poshob_addr +
+			    offsetof(struct scu_ipc_oshob_extend, intel_size));
+
+	if (oshob_info->osnib_size == 0) {
+		pr_err("ipc_read_oshob_extend_param: OSNIB size is null!\n");
 		return -EFAULT;
+	}
+
+	/* Get defined OEM space size. */
+	oshob_info->oemnib_size = readw(
+			    poshob_addr +
+			    offsetof(struct scu_ipc_oshob_extend, oem_size));
+
+	if (oshob_info->oemnib_size == 0) {
+		pr_err("ipc_read_oshob_extend_param: OEMNIB size is null!\n");
+		return -EFAULT;
+	}
+
+	/* Set SCU and IA trace buffers. Size calculated in bytes here. */
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+		buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+	else
+		buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+	intel_scu_ipc_read_oshob(
+		(u8 *)(oshob_info->scu_trace),
+		buff_size,
+		offsetof(struct scu_ipc_oshob_extend, scutxl));
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, iatxl) +
+			    oshob_info->offs_add;
+	oshob_info->ia_trace = readl(poshob_addr + struct_offs);
+
+	/* Set pointers */
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, r_intel_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->osnibr_ptr = readl(poshob_addr + struct_offs);
+
+	if (!oshob_info->osnibr_ptr) {
+		pr_err("ipc_read_oshob_extend_param: R_INTEL_POINTER is NULL!\n");
+		return -ENOMEM;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, w_intel_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->osnibw_ptr = readl(poshob_addr + struct_offs);
+
+	if (oshob_info->osnibw_ptr == 0) {
+		/* workaround here for BZ 2914 */
+		oshob_info->osnibw_ptr = 0xFFFF3400;
+		pr_err(
+		    "ipc_read_oshob_extend_param: ERR: osnibw from oshob is 0, manually set it here\n");
+	}
+
+	pr_info("(extend oshob) osnib read ptr = 0x%8x\n",
+			oshob_info->osnibr_ptr);
+	pr_info("(extend oshob) osnib write ptr = 0x%8x\n",
+			oshob_info->osnibw_ptr);
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, r_oem_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->oemnibr_ptr = readl(poshob_addr + struct_offs);
+
+	if (!oshob_info->oemnibr_ptr) {
+		pr_err("ipc_read_oshob_extend_param: R_OEM_POINTER is NULL!\n");
+		return -ENOMEM;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend, w_oem_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->oemnibw_ptr = readl(poshob_addr + struct_offs);
+
+	if (!oshob_info->oemnibw_ptr) {
+		pr_err("ipc_read_oshob_extend_param: W_OEM_POINTER is NULL!\n");
+		return -ENOMEM;
+	}
+
+	oshob_info->scu_ipc_write_osnib =
+					&intel_scu_ipc_write_osnib_extend;
+	oshob_info->scu_ipc_read_osnib =
+					&intel_scu_ipc_read_osnib_extend;
+
+	pr_info(
+		"Using extended oshob structure size = %d bytes\n",
+		oshob_info->oshob_size);
+	pr_info(
+		"OSNIB Intel size = %d bytes OEMNIB size = %d bytes\n",
+		oshob_info->osnib_size, oshob_info->oemnib_size);
+
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 1)) {
+			/* CLVP and correct version of the oshob. */
+			oshob_info->scu_trace_buf =
+				readl(poshob_addr +
+				      offsetof(struct scu_ipc_oshob_extend,
+					       sculogbufferaddr));
+			oshob_info->scu_trace_size =
+				readl(poshob_addr +
+				      offsetof(struct scu_ipc_oshob_extend,
+					       sculogbuffersize));
+		}
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 3)) {
+			/* CLVP and correct version of the oshob. */
+			oshob_info->nvram_addr =
+				readl(poshob_addr +
+				      offsetof(struct scu_ipc_oshob_extend,
+					       nvram_addr));
+			oshob_info->nvram_size =
+				readl(poshob_addr +
+				      offsetof(struct scu_ipc_oshob_extend,
+					       nvram_size));
+		}
+	}
 	return 0;
 }
 
+int intel_scu_ipc_read_oshob_extend_param_v14(void __iomem *poshob_addr)
+{
+	u16 struct_offs;
+	int buff_size;
+
+	/* set intel OSNIB space size. */
+	oshob_info->osnib_size = OSNIB_SIZE;
+
+	/* set OEM OSNIB space size. */
+	oshob_info->oemnib_size = OSNIB_OEM_RSVD_SIZE;
+
+	/* Set SCU and IA trace buffers. Size calculated in bytes here. */
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+		buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+	else
+		buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+	intel_scu_ipc_read_oshob(
+		(u8 *)(oshob_info->scu_trace),
+		buff_size,
+		offsetof(struct scu_ipc_oshob_extend_v14, scutxl));
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend_v14, iatxl) +
+			    oshob_info->offs_add;
+	oshob_info->ia_trace = readl(poshob_addr + struct_offs);
+
+	/* Set pointers */
+	struct_offs = offsetof(struct scu_ipc_oshob_extend_v14, osnib_ptr) +
+			    oshob_info->offs_add;
+	oshob_info->osnibr_ptr = readl(poshob_addr + struct_offs);
+
+	if (!oshob_info->osnibr_ptr) {
+		pr_err("ipc_read_oshob_extend_param_v14: R_INTEL_POINTER is NULL!\n");
+		return -ENOMEM;
+	}
+
+	/* write and read pointer are the same */
+	oshob_info->osnibw_ptr = oshob_info->osnibr_ptr;
+
+	pr_info("(latest extend oshob) osnib ptr = 0x%8x\n",
+		oshob_info->osnibr_ptr);
+
+	/* OEM NIB point at offset OSNIB_SIZE */
+	oshob_info->oemnibr_ptr = oshob_info->osnibr_ptr + OSNIB_SIZE;
+
+	/* write and read pinter are the same */
+	oshob_info->oemnibw_ptr = oshob_info->oemnibr_ptr;
+
+	/* we use tha same function for all extended OSHOB structure */
+	oshob_info->scu_ipc_write_osnib =
+					&intel_scu_ipc_write_osnib_extend;
+	oshob_info->scu_ipc_read_osnib =
+					&intel_scu_ipc_read_osnib_extend;
+
+	pr_info(
+		"Using latest extended oshob structure size = %d bytes\n",
+		oshob_info->oshob_size);
+	pr_info(
+		"OSNIB Intel size = %d bytes OEMNIB size = %d bytes\n",
+		oshob_info->osnib_size, oshob_info->oemnib_size);
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend_v14,
+			    sculogbufferaddr) + oshob_info->offs_add;
+	oshob_info->scu_trace_buf = readl(poshob_addr + struct_offs);
+
+	struct_offs = offsetof(struct scu_ipc_oshob_extend_v14,
+			    sculogbuffersize) + oshob_info->offs_add;
+	oshob_info->scu_trace_size = readl(poshob_addr + struct_offs);
+
+	/* NVRAM after Intel and OEM OSNIB */
+	oshob_info->nvram_addr = oshob_info->oemnibr_ptr + OSNIB_OEM_RSVD_SIZE;
+	oshob_info->nvram_size = OSNIB_NVRAM_SIZE;
+
+	return 0;
+}
+
+int intel_scu_ipc_read_oshob_def_param(void __iomem *poshob_addr)
+{
+	u16 struct_offs;
+	int ret = 0;
+	int buff_size;
+
+	oshob_info->oshob_majrev = OSHOB_REV_MAJ_DEFAULT;
+	oshob_info->oshob_minrev = OSHOB_REV_MIN_DEFAULT;
+	oshob_info->osnib_size = OSNIB_SIZE;
+	oshob_info->oemnib_size = 0;
+
+	/* Set OSHOB total size */
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+		oshob_info->oshob_size = OSHOB_MRFLD_SIZE;
+	else
+		oshob_info->oshob_size = OSHOB_SIZE;
+
+	/* Set SCU and IA trace buffers. Size calculated in bytes here. */
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+		buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+	else
+		buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+	ret = intel_scu_ipc_read_oshob(
+		(u8 *)(oshob_info->scu_trace),
+		buff_size,
+		offsetof(struct scu_ipc_oshob, scutxl));
+
+	if (ret != 0) {
+		pr_err("Cannot get scutxl data from OSHOB\n");
+		return ret;
+	}
+
+	struct_offs = offsetof(struct scu_ipc_oshob, iatxl) +
+			    oshob_info->offs_add;
+	oshob_info->ia_trace = readl(poshob_addr + struct_offs);
+
+	oshob_info->scu_ipc_write_osnib =
+					&intel_scu_ipc_write_osnib;
+	oshob_info->scu_ipc_read_osnib =
+					&intel_scu_ipc_read_osnib;
+
+	struct_offs = offsetof(struct scu_ipc_oshob, osnibr) +
+			    oshob_info->offs_add;
+	oshob_info->osnibr_ptr = (unsigned long)(poshob_addr + struct_offs);
+
+	pr_info("Using default oshob structure size = %d bytes\n",
+					oshob_info->oshob_size);
+
+	pr_debug("Using default oshob structure OSNIB read ptr %x\n",
+		oshob_info->osnibr_ptr);
+
+	return ret;
+}
+
+int intel_scu_ipc_read_oshob_info(void)
+{
+	int i, ret = 0;
+	u32 oshob_base = 0;
+	void __iomem *oshob_addr;
+	unsigned char oshob_magic[4];
+
+	ret = rpmsg_send_command(ipcutil_instance,
+		RP_GET_HOBADDR, 0, NULL, &oshob_base, 0, 1);
+
+	if (ret < 0) {
+		pr_err("ipc_read_oshob cmd failed!!\n");
+		goto exit;
+	}
+
+	/* At this stage, we still don't know which OSHOB type (default or  */
+	/* extended) can be used, and the size of resource to be remapped   */
+	/* depends on the type of OSHOB structure to be used.               */
+	/* So just remap the minimum size to get the needed bytes of the    */
+	/* OSHOB zone.                                                      */
+	oshob_addr = ioremap_nocache(oshob_base, OSHOB_EXTEND_DESC_SIZE);
+
+	if (!oshob_addr) {
+		pr_err("oshob addr ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	pr_info("(oshob) base addr = 0x%8x\n", oshob_base);
+
+	/* Store base address. */
+	oshob_info->oshob_base = oshob_base;
+
+	oshob_info->platform_type = intel_mid_identify_cpu();
+
+	/*
+	 * Buffer is allocated using kmalloc. Memory is not initialized and
+	 * these fields are not updated in all the branches.
+	 */
+	oshob_info->scu_trace_buf = 0;
+	oshob_info->scu_trace_size = 0;
+	oshob_info->nvram_addr = 0;
+	oshob_info->nvram_size = 0;
+
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+		pr_info("(oshob) identified platform = INTEL_MID_CPU_CHIP_TANGIER\n");
+
+		/* By default we already have 1 dword reserved in the OSHOB */
+		/* structures for SCU buffer. For Merrifield, SCU size to   */
+		/* consider is OSHOB_SCU_BUF_MRFLD_DW_SIZE dwords. So with  */
+		/* Merrifield, when calculating structures offsets, we have */
+		/* to add (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1) dwords, with    */
+		/* the offsets calculated in bytes.                         */
+		oshob_info->offs_add = (OSHOB_SCU_BUF_MRFLD_DW_SIZE - 1)*4;
+	} else
+		oshob_info->offs_add = 0;
+
+	pr_debug("(oshob) additional offset = 0x%x\n", oshob_info->offs_add);
+
+	/* Extract magic number that will help identifying the good OSHOB  */
+	/* that is going to be used.                                       */
+	for (i = 0; i < OSHOB_HEADER_MAGIC_SIZE; i = i+1)
+		oshob_magic[i] = readb(oshob_addr + i);
+
+	pr_debug("(oshob) OSHOB magic = %x %x %x %x\n",
+		oshob_magic[0], oshob_magic[1], oshob_magic[2], oshob_magic[3]);
+
+	if (strncmp(oshob_magic, OSHOB_MAGIC_NUMBER,
+		    OSHOB_HEADER_MAGIC_SIZE) == 0) {
+		/* Get OSHOB version and size which are commoon to all */
+		/* extended OSHOB structure. */
+		oshob_info->oshob_majrev = readb(oshob_addr +
+			offsetof(struct scu_ipc_oshob_extend, rev_major));
+		oshob_info->oshob_minrev = readb(oshob_addr +
+			offsetof(struct scu_ipc_oshob_extend, rev_minor));
+		oshob_info->oshob_size = readw(oshob_addr +
+			offsetof(struct scu_ipc_oshob_extend, oshob_size));
+
+		pr_info("(oshob) oshob version = %x.%x\n",
+			oshob_info->oshob_majrev, oshob_info->oshob_minrev);
+
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 4)) {
+			if (intel_scu_ipc_read_oshob_extend_param_v14(
+					oshob_addr) != 0) {
+				ret = -EFAULT;
+				goto unmap_oshob;
+			}
+		} else {
+			if (intel_scu_ipc_read_oshob_extend_param(
+					oshob_addr) != 0) {
+				ret = -EFAULT;
+				goto unmap_oshob;
+			}
+		}
+
+		if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+			pr_info("(extend oshob) SCU buffer size is %d bytes\n",
+				OSHOB_SCU_BUF_MRFLD_DW_SIZE*4);
+		} else {
+			pr_debug("(extend oshob) SCU buffer size is %d bytes\n",
+				OSHOB_SCU_BUF_BASE_DW_SIZE*4);
+		}
+	} else {
+		ret = intel_scu_ipc_read_oshob_def_param(oshob_addr);
+
+		if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+			pr_debug("(default oshob) SCU buffer size is %d bytes\n",
+				OSHOB_SCU_BUF_MRFLD_DW_SIZE*4);
+		} else {
+			pr_debug("(default oshob) SCU buffer size is %d bytes\n",
+				OSHOB_SCU_BUF_BASE_DW_SIZE*4);
+		}
+	}
+
+unmap_oshob:
+	iounmap(oshob_addr);
+
+exit:
+	return ret;
+}
+
+/*
+ * This writes the OEMNIB buffer in the internal RAM of the SCU.
+ */
+int intel_scu_ipc_write_oemnib(u8 *oemnib, int len, int offset)
+{
+	int i;
+	int ret = 0;
+	void __iomem *oshob_addr, *oemnibw_addr;
+	u32 sptr_dw_mask;
+
+	if (oemnib == NULL) {
+		pr_err("ipc_write_oemnib: passed buffer for writting OEMNIB is NULL\n");
+		return -EINVAL;
+	}
+
+	rpmsg_global_lock();
+
+	pr_debug("ipc_write_oemnib: remap OSHOB addr 0x%8x size %d\n",
+		oshob_info->oshob_base, oshob_info->oshob_size);
+
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ipc_write_oemnib: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if ((len == 0) || (len > oshob_info->oemnib_size)) {
+		pr_err(
+			"ipc_write_oemnib: bad OEMNIB data length (%d) to write (max=%d bytes)\n",
+			    len, oshob_info->oemnib_size);
+		ret = -EINVAL;
+		goto unmap_oshob_addr;
+	}
+
+	/* offset shall start at 0 from the OEMNIB base address and shall */
+	/* not exceed the OEMNIB allowed size.                            */
+	if ((offset < 0) || (offset >= oshob_info->oemnib_size) ||
+	    (len + offset > oshob_info->oemnib_size)) {
+		pr_err(
+			"ipc_write_oemnib: Bad OEMNIB data offset/len for writing (offset=%d , len=%d)\n",
+			offset, len);
+		ret = -EINVAL;
+		goto unmap_oshob_addr;
+	}
+
+	pr_debug("ipc_write_oemnib: POEMNIB remap oemnibw ptr 0x%x size %d\n",
+		oshob_info->oemnibw_ptr, oshob_info->oemnib_size);
+
+	oemnibw_addr = ioremap_nocache(oshob_info->oemnibw_ptr,
+				       oshob_info->oemnib_size);
+	if (!oemnibw_addr) {
+		pr_err("ipc_write_oemnib: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto unmap_oshob_addr;
+	}
+
+	for (i = 0; i < len; i++)
+		writeb(*(oemnib + i), (oemnibw_addr + offset + i));
+
+	/* Send command. The mask to be written identifies which double */
+	/* words of the OSNIB oemnib_size bytes will be written.        */
+	/* So the mask is coded on 4 bytes.                             */
+	sptr_dw_mask = 0xFFFFFFFF;
+	if ((oshob_info->oshob_majrev >= 1) &&
+	    (oshob_info->oshob_minrev >= 4)) {
+		sptr_dw_mask = 0xFFFFFFFF;
+		/* OEM NIB lies on region 1, 2, and 3 */
+		ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 1);
+		if (ret < 0) {
+			pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+			goto unmap_oemnibw_addr;
+		}
+		ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 2);
+		if (ret < 0) {
+			pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+			goto unmap_oemnibw_addr;
+		}
+		ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OSNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 3);
+		if (ret < 0) {
+			pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+			goto unmap_oemnibw_addr;
+		}
+	} else {
+		ret = rpmsg_send_raw_command(ipcutil_instance,
+			RP_WRITE_OEMNIB, 0, NULL, NULL,
+			0, 0, sptr_dw_mask, 0);
+		if (ret < 0) {
+			pr_err("ipc_write_oemnib: ipc_write_osnib failed!!\n");
+			goto unmap_oemnibw_addr;
+		}
+	}
+
+unmap_oemnibw_addr:
+	iounmap(oemnibw_addr);
+
+unmap_oshob_addr:
+	iounmap(oshob_addr);
+exit:
+	rpmsg_global_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_write_oemnib);
+
+/*
+ * This reads the OEMNIB from the internal RAM of the SCU.
+ */
+static int intel_scu_ipc_read_oemnib(u8 *oemnib, int len, int offset)
+{
+	int i, ret = 0;
+	u8 *ptr;
+	void __iomem *oshob_addr, *oemnibr_addr;
+
+	if (oemnib == NULL) {
+		pr_err("ipc_read_oemnib: passed buffer for reading OEMNIB is NULL\n");
+		return -EINVAL;
+	}
+
+	pr_debug("ipc_read_oemnib: remap OSHOB base addr 0x%x size %d\n",
+		oshob_info->oshob_base, oshob_info->oshob_size);
+
+	oshob_addr = ioremap_nocache(oshob_info->oshob_base,
+				     oshob_info->oshob_size);
+	if (!oshob_addr) {
+		pr_err("ipc_read_oemnib: ioremap failed!\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if ((len == 0) || (len > oshob_info->oemnib_size)) {
+		pr_err("ipc_read_oemnib: Bad OEMNIB data length (%d) to be read (max=%d bytes)\n",
+			    len, oshob_info->oemnib_size);
+		ret = -EINVAL;
+		goto unmap_oshob_addr;
+	}
+
+	/* offset shall start at 0 from the OEMNIB base address and shall */
+	/* not exceed the OEMNIB allowed size.                            */
+	if ((offset < 0) || (offset >= oshob_info->oemnib_size) ||
+	    (len + offset > oshob_info->oemnib_size)) {
+		pr_err(
+		"ipc_read_oemnib: Bad OEMNIB data offset/len to read (offset=%d ,len=%d)\n",
+		offset, len);
+		ret = -EINVAL;
+		goto unmap_oshob_addr;
+	}
+
+	pr_debug("ipc_read_oemnib: POEMNIB remap oemnibr ptr 0x%x size %d\n",
+		oshob_info->oemnibr_ptr, oshob_info->oemnib_size);
+
+	oemnibr_addr = ioremap_nocache(oshob_info->oemnibr_ptr,
+				       oshob_info->oemnib_size);
+
+	if (!oemnibr_addr) {
+		pr_err("ipc_read_oemnib: ioremap of oemnib failed!\n");
+		ret = -ENOMEM;
+		goto unmap_oshob_addr;
+	}
+
+	ptr = oemnib;
+	pr_debug("ipc_read_oemnib: OEMNIB content:\n");
+	for (i = 0; i < len; i++) {
+		*ptr = readb(oemnibr_addr + offset + i);
+		pr_debug("addr(remapped)=%8x, offset=%2x, value=%2x\n",
+			(u32)(oemnibr_addr+offset+i), offset+i, *ptr);
+		ptr++;
+	}
+
+	iounmap(oemnibr_addr);
+
+unmap_oshob_addr:
+	iounmap(oshob_addr);
+exit:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_read_oemnib);
+
+#ifdef DUMP_OSNIB
+/*
+ * This reads the PMIT from the OSHOB (pointer to interrupt tree)
+ */
+static int intel_scu_ipc_read_oshob_it_tree(u32 *ptr)
+{
+	u16 struct_offs;
+
+	pr_debug("intel_scu_ipc_read_oshob_it_tree: read IT tree\n");
+
+	if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+	    (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+		struct_offs = offsetof(struct scu_ipc_oshob, pmit) +
+				    oshob_info->offs_add;
+	} else if ((oshob_info->oshob_majrev >= 1) &&
+		   (oshob_info->oshob_minrev >= 4)) {
+		struct_offs = offsetof(struct scu_ipc_oshob_extend_v14, pmit) +
+				    oshob_info->offs_add;
+	} else {
+		struct_offs = offsetof(struct scu_ipc_oshob_extend, pmit) +
+				    oshob_info->offs_add;
+	}
+	return intel_scu_ipc_read_oshob(
+			(u8 *) ptr,
+			4,
+			struct_offs);
+}
+#endif
+
+/*
+ * This reads the RESETIRQ1 or RESETSRC0 from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_reset_ev1(u8 *rev1)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+			pr_debug(
+				"intel_scu_ipc_read_osnib_rst_ev1: read %s\n",
+				chip_reset_events[i].reset_ev1_name);
+
+			return oshob_info->scu_ipc_read_osnib(
+				    rev1,
+				    1,
+				    offsetof(struct scu_ipc_osnib, reset_ev1));
+		}
+	}
+
+	pr_err("intel_scu_ipc_read_osnib_reset_ev1: param not found\n");
+	return -EFAULT;
+}
+#endif
+
+/*
+ * This reads the RESETIRQ2 or RESETSRC1 from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_reset_ev2(u8 *rev2)
+{
+	int i;
+
+	for (i = 0 ; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+			pr_debug(
+				"intel_scu_ipc_read_osnib_rst_ev2: read %s\n",
+				chip_reset_events[i].reset_ev2_name);
+
+			return oshob_info->scu_ipc_read_osnib(
+				rev2,
+				1,
+				offsetof(struct scu_ipc_osnib, reset_ev2));
+		}
+	}
+
+	pr_err("intel_scu_ipc_read_osnib_reset_ev2: param not found\n");
+	return -EFAULT;
+}
+#endif
+
+/*
+ * This reads the WD from the OSNIB
+ */
+int intel_scu_ipc_read_osnib_wd(u8 *wd)
+{
+	pr_debug("intel_scu_ipc_read_osnib_wd: read WATCHDOG\n");
+
+	return oshob_info->scu_ipc_read_osnib(
+			wd,
+			1,
+			offsetof(struct scu_ipc_osnib, wd_count));
+}
+
+/*
+ * This writes the WD in the OSNIB
+ */
+int intel_scu_ipc_write_osnib_wd(u8 *wd)
+{
+	pr_info("intel_scu_ipc_write_osnib_wd: write WATCHDOG %x\n", *wd);
+
+	return oshob_info->scu_ipc_write_osnib(
+			wd,
+			1,
+			offsetof(struct scu_ipc_osnib, wd_count));
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_write_osnib_wd);
+
+/*
+ * Get SCU trace buffer physical address if available
+ */
+u32 intel_scu_ipc_get_scu_trace_buffer(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+	return oshob_info->scu_trace_buf;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_scu_trace_buffer);
+
+/*
+ * Get SCU trace buffer size
+ */
+u32 intel_scu_ipc_get_scu_trace_buffer_size(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+	return oshob_info->scu_trace_size;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_scu_trace_buffer_size);
+
+/*
+ * Get nvram size
+ */
+u32 intel_scu_ipc_get_nvram_size(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+	return oshob_info->nvram_size;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_nvram_size);
+
+/*
+ * Get nvram addr
+ */
+u32 intel_scu_ipc_get_nvram_addr(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+	return oshob_info->nvram_addr;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_get_nvram_addr);
+
+/*
+ * Get SCU fabric error buffer1 offset
+ */
+u32 intel_scu_ipc_get_fabricerror_buf1_offset(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		return offsetof(struct scu_ipc_oshob_extend, fabricerrlog1);
+	else if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 4)) {
+			return offsetof(struct scu_ipc_oshob_extend_v14,
+					fabricerrlog) + oshob_info->offs_add;
+		} else {
+			return offsetof(struct scu_ipc_oshob,
+					fab_err_log) + oshob_info->offs_add;
+		}
+	else {
+		pr_err("scu_ipc_get_fabricerror_buf_offset: platform not recognized!\n");
+		return 0;
+	}
+}
+
+/*
+ * Get SCU fabric error buffer2 offset
+ */
+u32 intel_scu_ipc_get_fabricerror_buf2_offset(void)
+{
+	if (oshob_info == NULL)
+		return 0;
+
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+		return offsetof(struct scu_ipc_oshob_extend, fabricerrlog2);
+	else {
+		pr_warn("scu_ipc_get_fabricerror_buf2_offset: not supported for this platform!\n");
+		return 0;
+	}
+}
+
+
+/*
+ * This reads the ALARM from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_alarm(u8 *alarm)
+{
+	pr_debug("intel_scu_ipc_read_osnib_alarm: read ALARM\n");
+
+	return oshob_info->scu_ipc_read_osnib(
+			alarm,
+			1,
+			offsetof(struct scu_ipc_osnib, alarm));
+}
+#endif
+
+/*
+ * This reads the WAKESRC from the OSNIB
+ */
+#ifdef DUMP_OSNIB
+static int intel_scu_ipc_read_osnib_wakesrc(u8 *wksrc)
+{
+	pr_debug("intel_scu_ipc_read_osnib_wakesrc: read WAKESRC\n");
+
+	return oshob_info->scu_ipc_read_osnib(
+			wksrc,
+			1,
+			offsetof(struct scu_ipc_osnib, wakesrc));
+}
+#endif
+
+
+#define OEMNIB_BUF_DESC_LEN	4096
+
+#ifdef CONFIG_DEBUG_FS
+static int intel_scu_ipc_oshob_stat(struct seq_file *m, void *unused)
+{
+	void __iomem *osnib;
+	int i, count;
+	int ret = 0;
+
+	u32 value;
+	if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+	     (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+		seq_printf(m, "DEFAULT OSHOB\n");
+		seq_printf(m, "OSHOB size : %d\n", oshob_info->oshob_size);
+		if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+			seq_printf(m, "SCU trace : ");
+
+			for (i = 0; i < OSHOB_SCU_BUF_MRFLD_DW_SIZE; i++)
+				seq_printf(m, "%x ", oshob_info->scu_trace[i]);
+
+			seq_printf(m, "\n");
+		} else
+			seq_printf(m, "SCU trace : %x\n",
+					oshob_info->scu_trace[0]);
+
+		seq_printf(m, "IA trace  : %x\n", oshob_info->ia_trace);
+	} else {
+		seq_printf(m, "EXTENDED OSHOB v%d.%d\n",
+						oshob_info->oshob_majrev,
+						oshob_info->oshob_minrev);
+		seq_printf(m, "OSHOB size : %d\n\n", oshob_info->oshob_size);
+		if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+			seq_printf(m, "SCU trace : ");
+
+			for (i = 0; i < OSHOB_SCU_BUF_MRFLD_DW_SIZE; i++)
+				seq_printf(m, "%x ", oshob_info->scu_trace[i]);
+
+			seq_printf(m, "\n");
+		} else
+			seq_printf(m, "SCU trace : %x\n",
+					oshob_info->scu_trace[0]);
+
+		seq_printf(m, "IA trace  : %x\n\n", oshob_info->ia_trace);
+
+		seq_printf(m, "OSNIB size : %d\n", oshob_info->osnib_size);
+		seq_printf(m, "OSNIB  read address  : %x\n",
+						    oshob_info->osnibr_ptr);
+		seq_printf(m, "OSNIB  write address : %x\n",
+						oshob_info->osnibw_ptr);
+		/* Dump OSNIB */
+		osnib = ioremap_nocache(oshob_info->osnibr_ptr,
+						oshob_info->osnib_size);
+		if (!osnib) {
+			pr_err("Cannot remap OSNIB\n");
+			ret = -ENOMEM;
+			return ret;
+		}
+
+		i = 0;
+		count = 0; /* used for fancy presentation */
+		while (i < oshob_info->osnib_size) {
+			if (count%4 == 0)
+				seq_printf(m, "\nOSNIB[%08x] ",
+						oshob_info->osnibr_ptr+i);
+
+			value = readl(osnib+i);
+			seq_printf(m, "%08x ", value);
+			i += 4;
+			count++;
+		}
+		seq_printf(m, "\n\n");
+		iounmap(osnib);
+
+		seq_printf(m, "OEMNIB size : %d\n",
+						oshob_info->oemnib_size);
+		seq_printf(m, "OEMNIB read address  : %x\n",
+						oshob_info->oemnibr_ptr);
+		seq_printf(m, "OEMNIB write address : %x\n",
+						oshob_info->oemnibw_ptr);
+		seq_printf(m, "\n\n");
+	}
+	return 0;
+}
+
+static int intel_scu_ipc_oemnib_stat(struct seq_file *m, void *unused)
+{
+	void __iomem *oemnib;
+	int i, count;
+	u32 value;
+
+	/* Dump OEMNIB */
+	oemnib = ioremap_nocache(oshob_info->oemnibr_ptr,
+				oshob_info->oemnib_size);
+
+	if (!oemnib) {
+		pr_err("Cannot remap OEMNIB\n");
+		return -ENOMEM;
+	}
+
+	i = 0;
+	count = 0; /* used for fancy presentation */
+	while (i < oshob_info->oemnib_size) {
+		if (count%4 == 0)
+			seq_printf(m, "\nOEMNIB[%08x] ",
+				    oshob_info->oemnibr_ptr+i);
+
+		value = readl(oemnib+i);
+		seq_printf(m, "%08x ", value);
+		i += 4;
+		count++;
+	}
+	seq_printf(m, "\n\n");
+	iounmap(oemnib);
+
+	return 0;
+}
+
+static ssize_t intel_scu_ipc_oshob_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, intel_scu_ipc_oshob_stat, NULL);
+}
+
+static ssize_t intel_scu_ipc_oemnib_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, intel_scu_ipc_oemnib_stat, NULL);
+}
+
+
+/*
+*	debugfs interface: the "oemnib_write" stores the OEMNIB part of OSNIB,
+*       starting at offset ppos.
+*/
+static ssize_t intel_scu_ipc_oemnib_write(struct file *file,
+					  const char __user *buf,
+					    size_t count, loff_t *ppos)
+{
+	int ret, i;
+	u8 *posnib_data, *ptr;
+	char *ptrchar, *temp;
+
+	if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+	    (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+		/* OEMNIB only usable with extended OSHOB structure. */
+		pr_err(
+		"Write OEMNIB: OEMNIB only usable with extended OSHOB structure.\n");
+		return -EFAULT;
+	}
+
+	pr_info("Write OEMNIB: number bytes = %d\n", count);
+
+	/* Note: when the string is passed through debugfs interface, the  */
+	/* real count value includes the end of line \n. So we must take   */
+	/* care to consider count - 1 as the real number of OEM bytes.     */
+
+	if (buf == NULL) {
+		pr_err("Write OEMNIB: The passed OEMNIB buffer is NULL\n");
+		return -EINVAL;
+	}
+
+	if (count == 0) {
+		pr_err("Write OEMNIB: The OEMNIB data length to write is NULL\n");
+		return -EINVAL;
+	}
+
+	posnib_data = kzalloc(count - 1, GFP_KERNEL);
+
+	if (posnib_data == NULL) {
+		pr_err("Write OEMNIB: Cannot allocate buffer for writting OEMNIB\n");
+		return -ENOMEM;
+	}
+
+	memset(posnib_data, 0, count - 1);
+
+	temp = kzalloc(count - 1, GFP_KERNEL);
+
+	if (temp == NULL) {
+		pr_err(
+		"Write OEMNIB: Cannot allocate temp buffer for writting OEMNIB\n");
+		return -ENOMEM;
+	}
+
+	memset(temp, 0, count - 1);
+
+	if (copy_from_user(temp, buf, count - 1)) {
+		pr_err(
+		"Write OEMNIB: Cannot transfer from user buf to OEMNIB buf\n");
+		kfree(posnib_data);
+		return -EFAULT;
+	}
+
+	ptrchar = temp;
+	ptr = posnib_data;
+
+	for (i = 0; i <= count - 1; i++) {
+		if (*ptrchar >= '0' && *ptrchar <= '9')
+			*ptr = *ptrchar - '0';
+		if (*ptrchar >= 'A' && *ptrchar <= 'F')
+			*ptr = *ptrchar - 'A' + 10;
+		if (*ptrchar >= 'a' && *ptrchar <= 'f')
+			*ptr = *ptrchar - 'a' + 10;
+
+		ptrchar++;
+		ptr++;
+	}
+
+	ret = intel_scu_ipc_write_oemnib(posnib_data, count - 1, *ppos);
+
+	if (ret < 0) {
+		pr_err("Write OEMNIB: ipc write of OEMNIB failed!!\n");
+		kfree(posnib_data);
+		return ret;
+	}
+
+	kfree(posnib_data);
+	kfree(temp);
+
+	pr_info("Write OEMNIB: OEMNIB updated: count=%d bytes\n", count);
+
+	return count;
+}
+
+/* Attach the debugfs operations methods */
+static const struct file_operations scu_ipc_oemnib_fops = {
+	.owner = THIS_MODULE,
+	.open = intel_scu_ipc_oemnib_open,
+	.read = seq_read,
+	.write = intel_scu_ipc_oemnib_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static const struct file_operations scu_ipc_oshob_fops = {
+	.owner = THIS_MODULE,
+	.open = intel_scu_ipc_oshob_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct dentry *scu_ipc_oemnib_dir;
+static struct dentry *scu_ipc_oemnib_file;
+static struct dentry *scu_ipc_oshob_file;
+
+/*
+*	debugfs interface: init interface.
+*/
+static int intel_mid_scu_ipc_oemnib_debugfs_init(void)
+{
+	/* Create debugfs directory /sys/kernel/debug/intel_scu_oshob */
+	scu_ipc_oemnib_dir = debugfs_create_dir("intel_scu_oshob", NULL);
+
+	if (!scu_ipc_oemnib_dir) {
+		pr_err("cannot create OSHOB debugfs directory\n");
+		return -1;
+	}
+
+	/* Add operations /sys/kernel/debug/intel_scu_oshob to control */
+	/* the OEM.                                                     */
+	scu_ipc_oemnib_file = debugfs_create_file("oemnib_debug",
+				S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+				scu_ipc_oemnib_dir,
+				NULL, &scu_ipc_oemnib_fops);
+
+	if (!scu_ipc_oemnib_file) {
+		pr_err("cannot create OEMNIB debugfs file\n");
+		debugfs_remove(scu_ipc_oemnib_dir);
+		return -1;
+	}
+
+	/* Add operations /sys/kernel/debug/intel_scu_oshob to debug OSHOB */
+	/* content.                                                         */
+	scu_ipc_oshob_file = debugfs_create_file("oshob_dump",
+				S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+				scu_ipc_oemnib_dir, NULL, &scu_ipc_oshob_fops);
+
+	if (!scu_ipc_oshob_file) {
+		pr_err("cannot create OSHOB debugfs file\n");
+		debugfs_remove_recursive(scu_ipc_oemnib_dir);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+*	debugfs interface: exit interface.
+*/
+static void intel_mid_scu_ipc_oemnib_debugfs_exit(void)
+{
+	debugfs_remove_recursive(scu_ipc_oemnib_dir);
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
 static const struct file_operations scu_ipc_fops = {
 	.unlocked_ioctl = scu_ipc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = scu_ipc_ioctl,
+#endif
 };
 
-static int __init ipc_module_init(void)
-{
-	major = register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
-	if (major < 0)
-		return major;
+static struct miscdevice scu_ipcutil = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "mid_ipc",
+	.fops = &scu_ipc_fops,
+};
 
-	return 0;
+static int oshob_init(void)
+{
+	int ret, i;
+	u16 struct_offs;
+
+#ifdef DUMP_OSNIB
+	u8 rr, reset_ev1, reset_ev2, wd, alarm, wakesrc, *ptr;
+	int rr_found = 0, wksrc_found = 0;
+	u32 pmit, scu_trace[OSHOB_SCU_BUF_BASE_DW_SIZE*4], ia_trace;
+	int buff_size;
+#endif
+
+	/* Identify the type and size of OSHOB to be used. */
+	ret = intel_scu_ipc_read_oshob_info();
+
+	if (ret != 0) {
+		pr_err("Cannot init ipc module: oshob info not read\n");
+		goto exit;
+	}
+
+#ifdef DUMP_OSNIB
+	/* Dumping reset events from the interrupt tree */
+	ret = intel_scu_ipc_read_oshob_it_tree(&pmit);
+
+	if (ret != 0) {
+		pr_err("Cannot read interrupt tree\n");
+		goto exit;
+	}
+
+	ptr = ioremap_nocache(pmit + PMIT_RESET1_OFFSET, 2);
+
+	if (!ptr) {
+		pr_err("Cannot remap PMIT\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	pr_debug("PMIT addr 0x%8x remapped to 0x%8x\n", pmit, (u32)ptr);
+
+	reset_ev1 = readb(ptr);
+	reset_ev2 = readb(ptr+1);
+	for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+			pr_warn("[BOOT] %s=0x%02x %s=0x%02x (PMIT interrupt tree)\n",
+				chip_reset_events[i].reset_ev1_name,
+				reset_ev1,
+				chip_reset_events[i].reset_ev2_name,
+				reset_ev2);
+		}
+	}
+	iounmap(ptr);
+
+	/* Dumping OSHOB content */
+	if ((oshob_info->oshob_majrev == OSHOB_REV_MAJ_DEFAULT) &&
+	    (oshob_info->oshob_minrev == OSHOB_REV_MIN_DEFAULT)) {
+		/* Use default OSHOB here. Calculate in bytes here. */
+		if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+			buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+		else
+			buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+		ret = intel_scu_ipc_read_oshob(
+			(u8 *)(scu_trace),
+			buff_size,
+			offsetof(struct scu_ipc_oshob, scutxl));
+
+		if (ret != 0) {
+			pr_err("Cannot read SCU data\n");
+			goto exit;
+		}
+
+		struct_offs = offsetof(struct scu_ipc_oshob, iatxl) +
+				oshob_info->offs_add;
+		ret = intel_scu_ipc_read_oshob(
+			    (u8 *)(&ia_trace),
+			    4,
+			    struct_offs);
+
+		if (ret != 0) {
+			pr_err("Cannot read IA data\n");
+			goto exit;
+		}
+	    } else {
+		/* Use extended OSHOB here. Calculate in bytes here. */
+		if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER)
+			buff_size = OSHOB_SCU_BUF_MRFLD_DW_SIZE*4;
+		else
+			buff_size = OSHOB_SCU_BUF_BASE_DW_SIZE*4;
+
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 4)) {
+			ret = intel_scu_ipc_read_oshob(
+				(u8 *)(scu_trace),
+				buff_size,
+				offsetof(struct scu_ipc_oshob_extend_v14,
+								scutxl));
+		} else {
+			ret = intel_scu_ipc_read_oshob(
+				(u8 *)(scu_trace),
+				buff_size,
+				offsetof(struct scu_ipc_oshob_extend, scutxl));
+		}
+
+		if (ret != 0) {
+			pr_err("Cannot read SCU data\n");
+			goto exit;
+		}
+
+		if ((oshob_info->oshob_majrev >= 1) &&
+		    (oshob_info->oshob_minrev >= 4)) {
+			struct_offs = offsetof(struct scu_ipc_oshob_extend_v14,
+						iatxl) + oshob_info->offs_add;
+		} else {
+			struct_offs = offsetof(struct scu_ipc_oshob_extend,
+						iatxl) + oshob_info->offs_add;
+		}
+
+		ret = intel_scu_ipc_read_oshob(
+				(u8 *)(&ia_trace),
+				4,
+				struct_offs);
+
+		if (ret != 0) {
+			pr_err("Cannot read IA data\n");
+			goto exit;
+		}
+	}
+
+	if (oshob_info->platform_type == INTEL_MID_CPU_CHIP_TANGIER) {
+		for (i = 0; i < OSHOB_SCU_BUF_MRFLD_DW_SIZE; i++)
+			pr_warn("[BOOT] SCU_TR[%d]=0x%08x\n", i, scu_trace[i]);
+	} else
+		pr_warn("[BOOT] SCU_TR=0x%08x (oshob)\n", scu_trace[0]);
+
+	pr_warn("[BOOT] IA_TR=0x%08x (oshob)\n", ia_trace);
+
+	/* Dumping OSNIB content */
+	ret = 0;
+	ret |= intel_scu_ipc_read_osnib_rr(&rr);
+	ret |= intel_scu_ipc_read_osnib_reset_ev1(&reset_ev1);
+	ret |= intel_scu_ipc_read_osnib_reset_ev2(&reset_ev2);
+	ret |= intel_scu_ipc_read_osnib_wd(&wd);
+	ret |= intel_scu_ipc_read_osnib_alarm(&alarm);
+	ret |= intel_scu_ipc_read_osnib_wakesrc(&wakesrc);
+
+	if (ret) {
+		pr_err("Cannot read OSNIB content\n");
+		goto exit;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(osnib_target_oses); i++) {
+		if (osnib_target_oses[i].id == rr) {
+			pr_warn("[BOOT] RR=[%s] WD=0x%02x ALARM=0x%02x (osnib)\n",
+				osnib_target_oses[i].target_os_name, wd, alarm);
+			rr_found++;
+			break;
+		}
+	}
+
+	if (!rr_found)
+		pr_warn("[BOOT] RR=[UNKNOWN 0x%02x] WD=0x%02x ALARM=0x%02x (osnib)\n",
+			rr, wd, alarm);
+
+	for (i = 0; i < ARRAY_SIZE(osnib_wake_srcs); i++) {
+		if (osnib_wake_srcs[i].id == wakesrc) {
+			pr_warn("[BOOT] WAKESRC=[%s] (osnib)\n",
+				osnib_wake_srcs[i].wakesrc_name);
+			wksrc_found++;
+			break;
+		}
+	}
+
+	if (!wksrc_found)
+		pr_warn("[BOOT] WAKESRC=[UNKNOWN 0x%02x] (osnib)\n", wakesrc);
+
+	for (i = 0; i < ARRAY_SIZE(chip_reset_events); i++) {
+		if (chip_reset_events[i].id == oshob_info->platform_type) {
+			pr_warn("[BOOT] %s=0x%02x %s=0x%02x (osnib)\n",
+				chip_reset_events[i].reset_ev1_name,
+				reset_ev1,
+				chip_reset_events[i].reset_ev2_name,
+				reset_ev2);
+			break;
+		}
+	}
+
+#endif /* DUMP_OSNIB */
+
+#ifdef CONFIG_DEBUG_FS
+	if (oshob_info->oshob_majrev != OSHOB_REV_MAJ_DEFAULT) {
+		/* OEMNIB only usable with extended OSHOB structure. */
+		ret = intel_mid_scu_ipc_oemnib_debugfs_init();
+
+		if (ret != 0) {
+			pr_err("Cannot register OEMNIB interface to debugfs\n");
+			goto exit;
+		} else {
+			pr_info("OEMNIB interface registered to debugfs\n");
+		}
+	}
+#endif /* CONFIG_DEBUG_FS */
+
+exit:
+	return ret;
 }
 
-static void __exit ipc_module_exit(void)
+static int ipcutil_rpmsg_probe(struct rpmsg_channel *rpdev)
 {
-	unregister_chrdev(major, "intel_mid_scu");
+	int ret = 0;
+
+	oshob_info = kmalloc(sizeof(struct scu_ipc_oshob_info), GFP_KERNEL);
+	if (oshob_info == NULL) {
+		pr_err(
+		"Cannot init ipc module: oshob info struct not allocated\n");
+		return -ENOMEM;
+	}
+
+	if (rpdev == NULL) {
+		pr_err("ipcutil rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed ipcutil rpmsg device\n");
+
+	/* Allocate rpmsg instance for mip*/
+	ret = alloc_rpmsg_instance(rpdev, &ipcutil_instance);
+	if (!ipcutil_instance) {
+		dev_err(&rpdev->dev, "kzalloc ipcutil instance failed\n");
+		goto out;
+	}
+
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(ipcutil_instance);
+
+	ret = oshob_init();
+	if (ret)
+		goto misc_err;
+
+	ret = misc_register(&scu_ipcutil);
+	if (ret) {
+		pr_err("misc register failed\n");
+		goto misc_err;
+	}
+
+	return ret;
+
+misc_err:
+	free_rpmsg_instance(rpdev, &ipcutil_instance);
+out:
+	kfree(oshob_info);
+	return ret;
 }
 
-module_init(ipc_module_init);
-module_exit(ipc_module_exit);
+static void ipcutil_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+#ifdef CONFIG_DEBUG_FS
+	if (oshob_info->oshob_majrev != OSHOB_REV_MAJ_DEFAULT) {
+		/* OEMNIB only usable with extended OSHOB structure. */
+		/* unregister from debugfs.                     */
+		intel_mid_scu_ipc_oemnib_debugfs_exit();
+	}
+#endif /* CONFIG_DEBUG_FS */
+
+	kfree(oshob_info);
+
+	/* unregister scu_ipc_ioctl from sysfs. */
+	misc_deregister(&scu_ipcutil);
+	free_rpmsg_instance(rpdev, &ipcutil_instance);
+}
+
+static void ipcutil_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id ipcutil_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_ipc_util" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, ipcutil_rpmsg_id_table);
+
+static struct rpmsg_driver ipcutil_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= ipcutil_rpmsg_id_table,
+	.probe		= ipcutil_rpmsg_probe,
+	.callback	= ipcutil_rpmsg_cb,
+	.remove		= ipcutil_rpmsg_remove,
+};
+
+static int __init ipcutil_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&ipcutil_rpmsg);
+}
+
+static void __exit ipcutil_rpmsg_exit(void)
+{
+	unregister_rpmsg_driver(&ipcutil_rpmsg);
+}
+
+rootfs_initcall(ipcutil_rpmsg_init);
+module_exit(ipcutil_rpmsg_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Utility driver for intel scu ipc");
diff --git a/drivers/platform/x86/intel_scu_mip.c b/drivers/platform/x86/intel_scu_mip.c
new file mode 100644
index 0000000..ec06140
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_mip.c
@@ -0,0 +1,776 @@
+/*
+ * intel_scu_mip.c: Driver for the Intel scu mip and umip access
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Shijie Zhang (shijie.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/rpmsg.h>
+#include <linux/blkdev.h>
+#include <linux/pagemap.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mip.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#define DRIVER_NAME "intel_scu_mip"
+
+#define IPC_MIP_BASE     0xFFFD8000	/* sram base address for mip accessing*/
+#define IPC_MIP_MAX_ADDR 0x1000
+
+#define KOBJ_MIP_ATTR(_name, _mode, _show, _store) \
+	struct kobj_attribute _name##_attr = __ATTR(_name, _mode, _show, _store)
+
+static struct kobject *scu_mip_kobj;
+static struct rpmsg_instance *mip_instance;
+static struct scu_mip_platform_data *pdata;
+
+static void __iomem *intel_mip_base;
+#define SECTOR_SIZE			512
+#define UMIP_TOTAL_CHKSUM_ENTRY		126
+#define UMIP_HEADER_HEADROOM_SECTOR	1
+#define UMIP_HEADER_SECTOR		0
+#define UMIP_HEADER_CHKSUM_ADDR		7
+#define UMIP_START_CHKSUM_ADDR		8
+#define UMIP_TOTAL_HEADER_SECTOR_NO	2
+
+#define UMIP_BLKDEVICE			"mmcblk0boot0"
+
+static int xorblock(u32 *buf, u32 size)
+{
+	u32 cs = 0;
+
+	size >>= 2;
+	while (size--)
+		cs ^= *buf++;
+
+	return cs;
+}
+
+static u8 dword_to_byte_chksum(u32 dw)
+{
+	int n = 0;
+	u32 cs = dw;
+	for (n = 0; n < 3; n++) {
+		dw >>= 8;
+		cs ^= dw;
+	}
+
+	return (u8)cs;
+}
+
+static u8 calc_checksum(void *_buf, int size)
+{
+	int i;
+	u8 checksum = 0, *buf = (u8 *)_buf;
+
+	for (i = 0; i < size; i++)
+		checksum = checksum ^ (buf[i]);
+
+	return checksum;
+}
+
+static int mmcblk0boot0_match(struct device *dev, const void *data)
+{
+	if (strcmp(dev_name(dev), UMIP_BLKDEVICE) == 0)
+		return 1;
+
+	return 0;
+}
+
+static struct block_device *get_emmc_bdev(void)
+{
+	struct block_device *bdev;
+	struct device *emmc_disk;
+
+	emmc_disk = class_find_device(&block_class, NULL, NULL,
+					mmcblk0boot0_match);
+	if (emmc_disk == 0) {
+		pr_err("emmc not found!\n");
+		return NULL;
+	}
+
+	/* partition 0 means raw disk */
+	bdev = bdget_disk(dev_to_disk(emmc_disk), 0);
+	if (bdev == NULL) {
+		dev_err(emmc_disk, "unable to get disk\n");
+		return NULL;
+	}
+
+	/* Note: this bdev ref will be freed after first
+	 * bdev_get/bdev_put cycle
+	 */
+
+	return bdev;
+}
+
+
+static int read_mip(u8 *data, int len, int offset, int issigned)
+{
+	int ret;
+	u32 sptr, dptr, cmd, cmdid, data_off;
+
+	dptr = offset;
+	sptr = (len + 3) / 4;
+
+	cmdid = issigned ? IPC_CMD_SMIP_RD : IPC_CMD_UMIP_RD;
+	cmd = 4 << 16 | cmdid << 12 | IPCMSG_MIP_ACCESS;
+
+	do {
+		ret = rpmsg_send_raw_command(mip_instance, cmd, 0, NULL,
+			(u32 *)&data_off, 0, 1, sptr, dptr);
+
+		if (ret == -EIO)
+			msleep(20);
+	} while (ret == -EIO);
+
+	if (!ret)
+		memcpy(data, intel_mip_base + data_off, len);
+
+	return ret;
+}
+
+int intel_scu_ipc_read_mip(u8 *data, int len, int offset, int issigned)
+{
+	int ret = 0;
+	Sector sect;
+	struct block_device *bdev;
+	char *buffer = NULL;
+	int *holderId = NULL;
+	int sect_no, remainder;
+
+	/* Only SMIP read for Cloverview is supported */
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW)
+			&& (issigned != 1)) { /* CTP read UMIP from eMMC */
+
+		/* Opening the mmcblk0boot0 */
+		bdev = get_emmc_bdev();
+		if (bdev == NULL) {
+			pr_err("%s: get_emmc failed!\n", __func__);
+			return -ENODEV;
+		}
+
+		/* make sure the block device is open read only */
+		ret = blkdev_get(bdev, FMODE_READ, holderId);
+		if (ret < 0) {
+			pr_err("%s: blk_dev_get failed!\n", __func__);
+			return -ret;
+		}
+
+		/* Get sector number of where data located */
+		sect_no = offset / SECTOR_SIZE;
+		remainder = offset % SECTOR_SIZE;
+		buffer = read_dev_sector(bdev, sect_no +
+					UMIP_HEADER_HEADROOM_SECTOR, &sect);
+
+		/* Shouldn't need to access UMIP sector 0/1 */
+		if (sect_no < UMIP_TOTAL_HEADER_SECTOR_NO) {
+			pr_err("invalid umip offset\n");
+			ret = -EINVAL;
+			goto bd_put;
+		} else if (data == NULL || buffer == NULL) {
+			pr_err("buffer is empty\n");
+			ret = -ENODEV;
+			goto bd_put;
+		} else if (len > (SECTOR_SIZE - remainder)) {
+			pr_err("not enough data to read\n");
+			ret = -EINVAL;
+			goto bd_put;
+		}
+
+		memcpy(data, buffer + remainder, len);
+bd_put:
+		if (buffer)
+			put_dev_sector(sect);
+
+		blkdev_put(bdev, FMODE_READ);
+		return ret;
+	} else {
+
+		if (!intel_mip_base)
+			return -ENODEV;
+
+		if (offset + len > IPC_MIP_MAX_ADDR)
+			return -EINVAL;
+
+		rpmsg_global_lock();
+		ret = read_mip(data, len, offset, issigned);
+		rpmsg_global_unlock();
+
+		return ret;
+	}
+}
+EXPORT_SYMBOL(intel_scu_ipc_read_mip);
+
+int get_smip_property_by_name(enum platform_prop pp)
+{
+	u8 data[SMIP_MAX_PROP_LEN];
+	int i, val, ret;
+	struct smip_platform_prop prop[SMIP_NUM_CONFIG_PROPS];
+
+	if (!pdata->smip_prop)
+		return -EINVAL;
+
+	for (i = 0; i < SMIP_NUM_CONFIG_PROPS; i++)
+		prop[i] = pdata->smip_prop[i];
+
+	/* Read the property requested by the caller */
+	ret = intel_scu_ipc_read_mip(data, prop[pp].len, prop[pp].offset, 1);
+	if (ret)
+		return ret;
+
+	/* Adjust the bytes according to the length and return the int */
+	val = data[0];
+	for (i = 1; i < prop[pp].len; i++)
+		val = val << 8 | data[i];
+
+	/* If the requested property is a bit field, return that bit value */
+	if (prop[pp].is_bit_field)
+		val &= prop[pp].mask;
+
+	return val;
+}
+EXPORT_SYMBOL(get_smip_property_by_name);
+
+int intel_scu_ipc_write_umip(u8 *data, int len, int offset)
+{
+	int i, ret = 0, offset_align;
+	int remainder, len_align = 0;
+	u32 dptr, sptr, cmd;
+	u8 cs, tbl_cs = 0, *buf = NULL;
+	Sector sect;
+	struct block_device *bdev;
+	char *buffer = NULL;
+	int *holderId = NULL;
+	int sect_no;
+	u8 checksum;
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_CLOVERVIEW) {
+
+		/* Opening the mmcblk0boot0 */
+		bdev = get_emmc_bdev();
+		if (bdev == NULL) {
+			pr_err("%s: get_emmc failed!\n", __func__);
+			return -ENODEV;
+		}
+
+		/* make sure the block device is open rw */
+		ret = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, holderId);
+		if (ret < 0) {
+			pr_err("%s: blk_dev_get failed!\n", __func__);
+			return -ret;
+		}
+
+		/* get memmap of the UMIP header */
+		sect_no = offset / SECTOR_SIZE;
+		remainder = offset % SECTOR_SIZE;
+		buffer = read_dev_sector(bdev, sect_no +
+					UMIP_HEADER_HEADROOM_SECTOR, &sect);
+
+		/* Shouldn't need to access UMIP sector 0/1 */
+		if (sect_no < UMIP_TOTAL_HEADER_SECTOR_NO) {
+			pr_err("invalid umip offset\n");
+			ret = -EINVAL;
+			goto bd_put;
+		} else if (data == NULL || buffer == NULL) {
+			pr_err("buffer is empty\n");
+			ret = -ENODEV;
+			goto bd_put;
+		} else if (len > (SECTOR_SIZE - remainder)) {
+			pr_err("too much data to write\n");
+			ret = -EINVAL;
+			goto bd_put;
+		}
+
+		lock_page(sect.v);
+		memcpy(buffer + remainder, data, len);
+		checksum = calc_checksum(buffer, SECTOR_SIZE);
+
+		set_page_dirty(sect.v);
+		unlock_page(sect.v);
+		sync_blockdev(bdev);
+		put_dev_sector(sect);
+
+		/*
+		 * Updating the checksum, sector 0 (starting from UMIP
+		 * offset 0x08), we maintains 4 bytes for tracking each of
+		 * sector changes individually. For example, the dword at
+		 * offset 0x08 is used to checksum data integrity of sector
+		 * number 2, and so on so forth. It's worthnoting that only
+		 * the first byte in each 4 bytes stores checksum.
+		 * For detail, please check CTP FAS UMIP header definition
+		 */
+
+		buffer = read_dev_sector(bdev, UMIP_HEADER_SECTOR +
+					UMIP_HEADER_HEADROOM_SECTOR, &sect);
+
+		if (buffer == NULL) {
+			pr_err("buffer is empty\n");
+			ret = -ENODEV;
+			goto bd_put;
+		}
+
+		lock_page(sect.v);
+		memcpy(buffer + 4 * (sect_no - UMIP_TOTAL_HEADER_SECTOR_NO) +
+			UMIP_START_CHKSUM_ADDR, &checksum, 1/* one byte */);
+
+		/* Change UMIP prologue chksum to zero */
+		*(buffer + UMIP_HEADER_CHKSUM_ADDR) = 0;
+
+		for (i = 0; i < UMIP_TOTAL_CHKSUM_ENTRY; i++) {
+			tbl_cs ^= *(u8 *)(buffer + 4 * i +
+					UMIP_START_CHKSUM_ADDR);
+		}
+
+		/* Finish up with re-calcuating UMIP prologue checksum */
+		cs = dword_to_byte_chksum(xorblock((u32 *)buffer,
+							SECTOR_SIZE));
+
+		*(buffer + UMIP_HEADER_CHKSUM_ADDR) = tbl_cs ^ cs;
+
+		set_page_dirty(sect.v);
+		unlock_page(sect.v);
+		sync_blockdev(bdev);
+bd_put:
+		if (buffer)
+			put_dev_sector(sect);
+
+		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+		return ret;
+	} else {
+
+		if (!intel_mip_base)
+			return -ENODEV;
+
+		if (offset + len > IPC_MIP_MAX_ADDR)
+			return -EINVAL;
+
+		rpmsg_global_lock();
+
+		offset_align = offset & (~0x3);
+		len_align = (len + (offset - offset_align) + 3) & (~0x3);
+
+		if (len != len_align) {
+			buf = kzalloc(len_align, GFP_KERNEL);
+			if (!buf) {
+				pr_err("Alloc memory failed\n");
+				ret = -ENOMEM;
+				goto fail;
+			}
+			ret = read_mip(buf, len_align, offset_align, 0);
+			if (ret)
+				goto fail;
+			memcpy(buf + offset - offset_align, data, len);
+		} else {
+			buf = data;
+		}
+
+		dptr = offset_align;
+		sptr = len_align / 4;
+		cmd = IPC_CMD_UMIP_WR << 12 | IPCMSG_MIP_ACCESS;
+
+		memcpy(intel_mip_base, buf, len_align);
+
+		do {
+			ret = rpmsg_send_raw_command(mip_instance, cmd, 0, NULL,
+					NULL, 0, 0, sptr, dptr);
+			if (ret == -EIO)
+				msleep(20);
+		} while (ret == -EIO);
+
+fail:
+		if (buf && len_align != len)
+			kfree(buf);
+
+		rpmsg_global_unlock();
+
+		return ret;
+	}
+}
+EXPORT_SYMBOL(intel_scu_ipc_write_umip);
+
+
+#define MAX_DATA_NR 8
+#define MIP_CMD_LEN 11
+
+enum {
+	MIP_DBG_DATA,
+	MIP_DBG_LEN,
+	MIP_DBG_OFFSET,
+	MIP_DBG_ISSIGNED,
+	MIP_DBG_ERROR,
+};
+
+static u8 mip_data[MAX_DATA_NR];
+static int valid_data_nr;
+static int mip_len;
+static int mip_offset;
+static int mip_issigned;
+static int mip_dbg_error;
+static char mip_cmd[MIP_CMD_LEN];
+
+static ssize_t mip_generic_show(char *buf, int type, int *data)
+{
+	int i;
+	ssize_t ret = 0;
+
+	switch (type) {
+	case MIP_DBG_DATA:
+		for (i = 0; i < valid_data_nr; i++) {
+			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+					"data[%d]: %#x\n",
+					i, mip_data[i]);
+		}
+		break;
+	case MIP_DBG_LEN:
+		ret = snprintf(buf, PAGE_SIZE, "len: %d\n", *data);
+		break;
+	case MIP_DBG_OFFSET:
+		ret = snprintf(buf, PAGE_SIZE, "offset: %#x\n", *data);
+		break;
+	case MIP_DBG_ISSIGNED:
+		ret = snprintf(buf, PAGE_SIZE, "issigned: %d\n", *data);
+		break;
+	case MIP_DBG_ERROR:
+		ret = snprintf(buf, PAGE_SIZE, "error: %d\n", *data);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static void mip_generic_store(const char *buf, int type, int *data)
+{
+	int i, ret;
+
+	if (type == MIP_DBG_DATA) {
+		u32 t[MAX_DATA_NR];
+
+		valid_data_nr = 0;
+		memset(mip_data, 0, sizeof(mip_data));
+
+		ret = sscanf(buf, "%x %x %x %x %x %x %x %x", &t[0], &t[1],
+				&t[2], &t[3], &t[4], &t[5], &t[6], &t[7]);
+		if (ret == 0 || ret > MAX_DATA_NR) {
+			mip_dbg_error = -EINVAL;
+			return;
+		} else {
+			for (i = 0; i < ret; i++)
+				mip_data[i] = (u8)t[i];
+			valid_data_nr = ret;
+		}
+	} else {
+		*data = 0;
+		switch (type) {
+		case MIP_DBG_OFFSET:
+			ret = sscanf(buf, "%x", data);
+			break;
+		case MIP_DBG_LEN:
+		case MIP_DBG_ISSIGNED:
+			ret = sscanf(buf, "%d", data);
+			break;
+		default:
+			ret = -1;
+			break;
+		}
+	}
+
+	if (ret)
+		mip_dbg_error = 0;
+	else
+		mip_dbg_error = -EINVAL;
+
+	return;
+}
+
+static ssize_t mip_data_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_DATA, NULL);
+}
+
+static ssize_t mip_data_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	mip_generic_store(buf, MIP_DBG_DATA, NULL);
+	return size;
+}
+
+static ssize_t mip_len_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_LEN, &mip_len);
+}
+
+static ssize_t mip_len_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	mip_generic_store(buf, MIP_DBG_LEN, &mip_len);
+	return size;
+}
+
+static ssize_t mip_offset_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_OFFSET, &mip_offset);
+}
+
+static ssize_t mip_offset_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	mip_generic_store(buf, MIP_DBG_OFFSET, &mip_offset);
+	return size;
+}
+
+static ssize_t mip_issigned_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_ISSIGNED, &mip_issigned);
+}
+
+static ssize_t mip_issigned_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	mip_generic_store(buf, MIP_DBG_ISSIGNED, &mip_issigned);
+	return size;
+}
+
+static ssize_t mip_error_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return mip_generic_show(buf, MIP_DBG_ERROR, &mip_dbg_error);
+}
+
+static ssize_t mip_cmd_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+
+	int ret;
+
+	memset(mip_cmd, 0, sizeof(mip_cmd));
+
+	ret = sscanf(buf, "%10s", mip_cmd);
+	if (ret == 0) {
+		mip_dbg_error = -EINVAL;
+		goto end;
+	}
+
+	if (!strncmp("read_mip", mip_cmd, MIP_CMD_LEN)) {
+		memset(mip_data, 0, sizeof(mip_data));
+		ret = intel_scu_ipc_read_mip(mip_data, mip_len, mip_offset,
+				mip_issigned);
+		if (!ret)
+			valid_data_nr = mip_len;
+
+	} else if (!strncmp("write_umip", mip_cmd, MIP_CMD_LEN)) {
+		if (mip_len == valid_data_nr) {
+			ret = intel_scu_ipc_write_umip(mip_data, mip_len,
+					mip_offset);
+		} else
+			goto error;
+	} else
+		goto error;
+
+	if (ret)
+		goto error;
+	else
+		goto end;
+
+error:
+	mip_dbg_error = -EINVAL;
+
+end:
+	return size;
+}
+
+static KOBJ_MIP_ATTR(data, S_IRUGO|S_IWUSR, mip_data_show, mip_data_store);
+static KOBJ_MIP_ATTR(len, S_IRUGO|S_IWUSR, mip_len_show, mip_len_store);
+static KOBJ_MIP_ATTR(offset, S_IRUGO|S_IWUSR, mip_offset_show,
+		mip_offset_store);
+static KOBJ_MIP_ATTR(issigned, S_IRUGO|S_IWUSR, mip_issigned_show,
+		mip_issigned_store);
+static KOBJ_MIP_ATTR(cmd, S_IWUSR, NULL, mip_cmd_store);
+static KOBJ_MIP_ATTR(error, S_IRUGO, mip_error_show, NULL);
+
+static struct attribute *mip_attrs[] = {
+	&data_attr.attr,
+	&len_attr.attr,
+	&offset_attr.attr,
+	&issigned_attr.attr,
+	&cmd_attr.attr,
+	&error_attr.attr,
+	NULL,
+};
+
+static struct attribute_group mip_attr_group = {
+	.name = "mip_debug",
+	.attrs = mip_attrs,
+};
+
+static int scu_mip_probe(struct platform_device *pdev)
+{
+	if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_PENWELL) {
+		if (!pdev->dev.platform_data)
+			return -EINVAL;
+		pdata =
+		(struct scu_mip_platform_data *)pdev->dev.platform_data;
+	}
+	return 0;
+}
+
+static int scu_mip_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct platform_device_id scu_mip_table[] = {
+		{DRIVER_NAME, 1 },
+};
+
+static struct platform_driver scu_mip_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = scu_mip_probe,
+	.remove = scu_mip_remove,
+	.id_table = scu_mip_table,
+};
+
+static int __init scu_mip_init(void)
+{
+	return platform_driver_register(&scu_mip_driver);
+}
+
+static void scu_mip_exit(void)
+{
+	platform_driver_unregister(&scu_mip_driver);
+}
+
+static int mip_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed mip rpmsg device\n");
+
+	/* Allocate rpmsg instance for mip*/
+	ret = alloc_rpmsg_instance(rpdev, &mip_instance);
+	if (!mip_instance) {
+		dev_err(&rpdev->dev, "kzalloc mip instance failed\n");
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(mip_instance);
+
+	/* Init mip base */
+	intel_mip_base = ioremap_nocache(IPC_MIP_BASE, IPC_MIP_MAX_ADDR);
+	if (!intel_mip_base) {
+		ret = -ENOMEM;
+		goto rpmsg_err;
+	}
+
+	/* Create debugfs for mip regs */
+	scu_mip_kobj = kobject_create_and_add(mip_attr_group.name,
+						kernel_kobj);
+
+	if (!scu_mip_kobj) {
+		ret = -ENOMEM;
+		goto mip_base_err;
+	}
+
+	ret = sysfs_create_group(scu_mip_kobj, &mip_attr_group);
+
+	if (ret) {
+		kobject_put(scu_mip_kobj);
+		goto mip_base_err;
+	}
+
+	ret = scu_mip_init();
+	goto out;
+mip_base_err:
+	iounmap(intel_mip_base);
+rpmsg_err:
+	free_rpmsg_instance(rpdev, &mip_instance);
+out:
+	return ret;
+}
+
+static void mip_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	scu_mip_exit();
+	iounmap(intel_mip_base);
+	free_rpmsg_instance(rpdev, &mip_instance);
+	sysfs_remove_group(scu_mip_kobj, &mip_attr_group);
+	kobject_put(scu_mip_kobj);
+	dev_info(&rpdev->dev, "Removed mip rpmsg device\n");
+}
+
+static void mip_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id mip_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_mip" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, mip_rpmsg_id_table);
+
+static struct rpmsg_driver mip_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= mip_rpmsg_id_table,
+	.probe		= mip_rpmsg_probe,
+	.callback	= mip_rpmsg_cb,
+	.remove		= mip_rpmsg_remove,
+};
+
+static int __init mip_rpmsg_init(void)
+{
+	if ((intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_PENWELL)
+		&& (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_CLOVERVIEW))
+		return -EINVAL;
+
+	return register_rpmsg_driver(&mip_rpmsg);
+}
+
+#ifdef MODULE
+module_init(mip_rpmsg_init);
+#else
+fs_initcall_sync(mip_rpmsg_init);
+#endif
+
+static void __exit mip_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&mip_rpmsg);
+}
+module_exit(mip_rpmsg_exit);
+
+MODULE_AUTHOR("Shijie Zhang <shijie.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel SCU MIP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_scu_pmic.c b/drivers/platform/x86/intel_scu_pmic.c
new file mode 100644
index 0000000..d92e1dc
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_pmic.c
@@ -0,0 +1,477 @@
+/*
+ * pmic.c - Intel MSIC Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Bin Yang <bin.yang@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/rpmsg.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#define IPC_WWBUF_SIZE    20
+#define IPC_RWBUF_SIZE    20
+
+static struct kobject *scu_pmic_kobj;
+static struct rpmsg_instance *pmic_instance;
+
+static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 cmd, u32 sub)
+{
+	int i, err, inlen = 0, outlen = 0;
+
+	u8 wbuf[IPC_WWBUF_SIZE] = {};
+	u8 rbuf[IPC_RWBUF_SIZE] = {};
+
+	memset(wbuf, 0, sizeof(wbuf));
+
+	for (i = 0; i < count; i++) {
+		wbuf[inlen++] = addr[i] & 0xff;
+		wbuf[inlen++] = (addr[i] >> 8) & 0xff;
+	}
+
+	if (sub == IPC_CMD_PCNTRL_R) {
+		outlen = count > 0 ? ((count - 1) / 4) + 1 : 0;
+	} else if (sub == IPC_CMD_PCNTRL_W) {
+		if (count == 3)
+			inlen += 2;
+
+		for (i = 0; i < count; i++)
+			wbuf[inlen++] = data[i] & 0xff;
+
+		if (count == 3)
+			inlen -= 2;
+
+		outlen = 0;
+	} else if (sub == IPC_CMD_PCNTRL_M) {
+		wbuf[inlen++] = data[0] & 0xff;
+		wbuf[inlen++] = data[1] & 0xff;
+		outlen = 0;
+	} else
+		pr_err("IPC command not supported\n");
+
+	err = rpmsg_send_command(pmic_instance, cmd, sub, wbuf,
+			(u32 *)rbuf, inlen, outlen);
+
+	if (sub == IPC_CMD_PCNTRL_R) {
+		for (i = 0; i < count; i++)
+			data[i] = rbuf[i];
+	}
+
+	return err;
+}
+
+int intel_scu_ipc_ioread8(u16 addr, u8 *data)
+{
+	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_ioread8);
+
+int intel_scu_ipc_iowrite8(u16 addr, u8 data)
+{
+	return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
+
+int intel_scu_ipc_iowrite32(u16 addr, u32 data)
+{
+	u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
+	return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
+
+int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
+{
+	if (len < 1 || len > 8)
+		return -EINVAL;
+
+	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_readv);
+
+int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
+{
+	if (len < 1 || len > 4)
+		return -EINVAL;
+
+	return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_writev);
+
+int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
+{
+	u8 data[2] = { bits, mask };
+	return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
+}
+EXPORT_SYMBOL(intel_scu_ipc_update_register);
+
+/* pmic sysfs for debug */
+
+#define MAX_PMIC_REG_NR 4
+#define PMIC_OPS_LEN 10
+
+enum {
+	PMIC_DBG_ADDR,
+	PMIC_DBG_BITS,
+	PMIC_DBG_DATA,
+	PMIC_DBG_MASK,
+};
+
+static char *pmic_msg_format[] = {
+	"addr[%d]: %#x\n",
+	"bits[%d]: %#x\n",
+	"data[%d]: %#x\n",
+	"mask[%d]: %#x\n",
+};
+
+static u16 pmic_reg_addr[MAX_PMIC_REG_NR];
+static u8 pmic_reg_bits[MAX_PMIC_REG_NR];
+static u8 pmic_reg_data[MAX_PMIC_REG_NR];
+static u8 pmic_reg_mask[MAX_PMIC_REG_NR];
+static int valid_addr_nr;
+static int valid_bits_nr;
+static int valid_data_nr;
+static int valid_mask_nr;
+static char pmic_ops[PMIC_OPS_LEN];
+
+static int pmic_dbg_error;
+
+static ssize_t pmic_generic_show(char *buf, int valid, u8 *array, int type)
+{
+	int i, buf_size;
+	ssize_t ret = 0;
+
+	switch (type) {
+	case PMIC_DBG_ADDR:
+		for (i = 0; i < valid; i++) {
+			buf_size = PAGE_SIZE - ret;
+			ret += snprintf(buf + ret, buf_size,
+					pmic_msg_format[type],
+					i, pmic_reg_addr[i]);
+		}
+		break;
+	case PMIC_DBG_BITS:
+	case PMIC_DBG_DATA:
+	case PMIC_DBG_MASK:
+		for (i = 0; i < valid; i++) {
+			buf_size = PAGE_SIZE - ret;
+			ret += snprintf(buf + ret, buf_size,
+					pmic_msg_format[type],
+					i, array[i]);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static void pmic_generic_store(const char *buf, int *valid, u8 *array, int type)
+{
+	u32 tmp[MAX_PMIC_REG_NR];
+	int i, ret;
+
+	ret = sscanf(buf, "%x %x %x %x", &tmp[0], &tmp[1], &tmp[2], &tmp[3]);
+	if (ret == 0 || ret > MAX_PMIC_REG_NR) {
+		*valid = 0;
+		pmic_dbg_error = -EINVAL;
+		return;
+	}
+
+	*valid = ret;
+
+	switch (type) {
+	case PMIC_DBG_ADDR:
+		memset(pmic_reg_addr, 0, sizeof(pmic_reg_addr));
+		for (i = 0; i < ret; i++)
+			pmic_reg_addr[i] = (u16)tmp[i];
+		break;
+	case PMIC_DBG_BITS:
+	case PMIC_DBG_DATA:
+	case PMIC_DBG_MASK:
+		memset(array, 0, sizeof(*array) * MAX_PMIC_REG_NR);
+		for (i = 0; i < ret; i++)
+			array[i] = (u8)tmp[i];
+		break;
+	default:
+		break;
+	}
+}
+
+static ssize_t pmic_addr_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	return pmic_generic_show(buf, valid_addr_nr, NULL, PMIC_DBG_ADDR);
+}
+
+static ssize_t pmic_addr_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	pmic_generic_store(buf, &valid_addr_nr, NULL, PMIC_DBG_ADDR);
+	return size;
+}
+
+static ssize_t pmic_bits_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	return pmic_generic_show(buf, valid_bits_nr, pmic_reg_bits,
+			PMIC_DBG_BITS);
+}
+static ssize_t pmic_bits_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	pmic_generic_store(buf, &valid_bits_nr, pmic_reg_bits, PMIC_DBG_BITS);
+	return size;
+}
+
+static ssize_t pmic_data_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	return pmic_generic_show(buf, valid_data_nr, pmic_reg_data,
+			PMIC_DBG_DATA);
+}
+
+static ssize_t pmic_data_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	pmic_generic_store(buf, &valid_data_nr, pmic_reg_data, PMIC_DBG_DATA);
+	return size;
+}
+
+static ssize_t pmic_mask_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	return pmic_generic_show(buf, valid_mask_nr, pmic_reg_mask,
+			PMIC_DBG_MASK);
+}
+
+static ssize_t pmic_mask_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	pmic_generic_store(buf, &valid_mask_nr, pmic_reg_mask, PMIC_DBG_MASK);
+	return size;
+}
+
+static ssize_t pmic_ops_store(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t size)
+{
+	int i, ret;
+
+	memset(pmic_ops, 0, sizeof(pmic_ops));
+
+	ret = sscanf(buf, "%9s", pmic_ops);
+	if (ret == 0) {
+		pmic_dbg_error = -EINVAL;
+		goto end;
+	}
+
+	if (valid_addr_nr <= 0) {
+		pmic_dbg_error = -EINVAL;
+		goto end;
+	}
+
+	if (!strncmp("read", pmic_ops, PMIC_OPS_LEN)) {
+		valid_data_nr = valid_addr_nr;
+		for (i = 0; i < valid_addr_nr; i++) {
+			ret = intel_scu_ipc_ioread8(pmic_reg_addr[i],
+					&pmic_reg_data[i]);
+			if (ret) {
+				pmic_dbg_error = ret;
+				goto end;
+			}
+		}
+	} else if (!strncmp("write", pmic_ops, PMIC_OPS_LEN)) {
+		if (valid_addr_nr == valid_data_nr) {
+			for (i = 0; i < valid_addr_nr; i++) {
+				ret = intel_scu_ipc_iowrite8(pmic_reg_addr[i],
+						pmic_reg_data[i]);
+				if (ret) {
+					pmic_dbg_error = ret;
+					goto end;
+				}
+			}
+		} else {
+			pmic_dbg_error = -EINVAL;
+			goto end;
+		}
+	} else if (!strncmp("update", pmic_ops, PMIC_OPS_LEN)) {
+		if (valid_addr_nr == valid_mask_nr &&
+				valid_mask_nr == valid_bits_nr) {
+			for (i = 0; i < valid_addr_nr; i++) {
+				ret = intel_scu_ipc_update_register(
+						pmic_reg_addr[i],
+						pmic_reg_bits[i],
+						pmic_reg_mask[i]);
+				if (ret) {
+					pmic_dbg_error = ret;
+					goto end;
+				}
+			}
+		} else {
+			pmic_dbg_error = -EINVAL;
+			goto end;
+		}
+	} else {
+		pmic_dbg_error = -EINVAL;
+		goto end;
+	}
+
+		pmic_dbg_error = 0;
+
+end:
+	return size;
+}
+
+static ssize_t pmic_show_error(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", pmic_dbg_error);
+}
+
+static KOBJ_PMIC_ATTR(addr, S_IRUGO|S_IWUSR, pmic_addr_show, pmic_addr_store);
+static KOBJ_PMIC_ATTR(bits, S_IRUGO|S_IWUSR, pmic_bits_show, pmic_bits_store);
+static KOBJ_PMIC_ATTR(data, S_IRUGO|S_IWUSR, pmic_data_show, pmic_data_store);
+static KOBJ_PMIC_ATTR(mask, S_IRUGO|S_IWUSR, pmic_mask_show, pmic_mask_store);
+static KOBJ_PMIC_ATTR(ops, S_IWUSR, NULL, pmic_ops_store);
+static KOBJ_PMIC_ATTR(error, S_IRUGO, pmic_show_error, NULL);
+
+static struct attribute *pmic_attrs[] = {
+	&addr_attr.attr,
+	&bits_attr.attr,
+	&data_attr.attr,
+	&mask_attr.attr,
+	&ops_attr.attr,
+	&error_attr.attr,
+	NULL,
+};
+
+static struct attribute_group pmic_attr_group = {
+	.name = "pmic_debug",
+	.attrs = pmic_attrs,
+};
+
+static int pmic_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed pmic rpmsg device\n");
+
+	/* Allocate rpmsg instance for pmic*/
+	ret = alloc_rpmsg_instance(rpdev, &pmic_instance);
+	if (!pmic_instance) {
+		dev_err(&rpdev->dev, "kzalloc pmic instance failed\n");
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(pmic_instance);
+
+	/* Create debugfs for pmic regs */
+	scu_pmic_kobj = kobject_create_and_add(pmic_attr_group.name,
+						kernel_kobj);
+
+	if (!scu_pmic_kobj) {
+		ret = -ENOMEM;
+		goto rpmsg_err;
+	}
+
+	ret = sysfs_create_group(scu_pmic_kobj, &pmic_attr_group);
+
+	if (ret) {
+		kobject_put(scu_pmic_kobj);
+		goto rpmsg_err;
+	}
+
+	goto out;
+
+rpmsg_err:
+	free_rpmsg_instance(rpdev, &pmic_instance);
+out:
+	return ret;
+}
+
+static void pmic_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	free_rpmsg_instance(rpdev, &pmic_instance);
+	sysfs_remove_group(scu_pmic_kobj, &pmic_attr_group);
+	kobject_put(scu_pmic_kobj);
+	dev_info(&rpdev->dev, "Removed pmic rpmsg device\n");
+}
+
+static void pmic_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id pmic_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_pmic" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= pmic_rpmsg_id_table,
+	.probe		= pmic_rpmsg_probe,
+	.callback	= pmic_rpmsg_cb,
+	.remove		= pmic_rpmsg_remove,
+};
+
+static int __init pmic_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&pmic_rpmsg);
+}
+
+#ifdef MODULE
+module_init(pmic_rpmsg_init);
+#else
+fs_initcall_sync(pmic_rpmsg_init);
+#endif
+
+static void __exit pmic_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&pmic_rpmsg);
+}
+module_exit(pmic_rpmsg_exit);
+
+MODULE_AUTHOR("Bin Yang<bin.yang@intel.com>");
+MODULE_DESCRIPTION("Intel PMIC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 7b8979c..1c83042 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -14,6 +14,26 @@
 	  Say Y here to enable debugging messages for power supply class
 	  and drivers.
 
+config PMIC_CCSM
+	tristate "PMIC CCSM driver"
+	select POWER_SUPPLY_BATTID
+	depends on INTEL_SCU_IPC && IIO
+	help
+	  Say Y to include support for PMIC Charger Control State Machine driver
+	  Driver for initializing and monitoring the CCSM in PMIC
+	  This driver sets the CCSM registers and handles the PMIC
+	  charger interrupts.
+
+config BQ24261_CHARGER
+	tristate "BQ24261 charger driver"
+	select POWER_SUPPLY_CHARGER
+	depends on I2C
+	help
+	  Say Y to include support for BQ24261 Charger driver. This driver
+	  makes use of power supply charging framework. So the driver gives
+	  the charger hardware abstraction only. Charging logic is abstracted
+	  in the charging framework.
+
 config PDA_POWER
 	tristate "Generic PDA/phone power driver"
 	depends on !S390
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 653bf6c..ad734a2 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -1,12 +1,15 @@
 ccflags-$(CONFIG_POWER_SUPPLY_DEBUG) := -DDEBUG
 
-power_supply-y				:= power_supply_core.o
-power_supply-$(CONFIG_SYSFS)		+= power_supply_sysfs.o
-power_supply-$(CONFIG_LEDS_TRIGGERS)	+= power_supply_leds.o
+power_supply-y					:= power_supply_core.o
+power_supply-$(CONFIG_SYSFS)			+= power_supply_sysfs.o
+power_supply-$(CONFIG_LEDS_TRIGGERS)		+= power_supply_leds.o
+power_supply-$(CONFIG_POWER_SUPPLY_CHARGER)	+= power_supply_charger.o
+power_supply-$(CONFIG_POWER_SUPPLY_BATTID)	+= battery_id.o
 
 obj-$(CONFIG_POWER_SUPPLY)	+= power_supply.o
 obj-$(CONFIG_GENERIC_ADC_BATTERY)	+= generic-adc-battery.o
 
+obj-$(CONFIG_POWER_SUPPLY_CHARGING_ALGO_PSE)	+= charging_algo_pse.o
 obj-$(CONFIG_PDA_POWER)		+= pda_power.o
 obj-$(CONFIG_APM_POWER)		+= apm_power.o
 obj-$(CONFIG_MAX8925_POWER)	+= max8925_power.o
@@ -54,3 +57,6 @@
 obj-$(CONFIG_CHARGER_SMB347)	+= smb347-charger.o
 obj-$(CONFIG_CHARGER_TPS65090)	+= tps65090-charger.o
 obj-$(CONFIG_POWER_RESET)	+= reset/
+
+obj-$(CONFIG_BQ24261_CHARGER)	+= bq24261_charger.o
+obj-$(CONFIG_PMIC_CCSM) += pmic_ccsm.o
diff --git a/drivers/power/battery_id.c b/drivers/power/battery_id.c
new file mode 100644
index 0000000..13425b6
--- /dev/null
+++ b/drivers/power/battery_id.c
@@ -0,0 +1,68 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/notifier.h>
+#include <linux/power/battery_id.h>
+
+ATOMIC_NOTIFIER_HEAD(batt_id_notifier);
+
+static struct ps_batt_chg_prof *batt_property;
+static int batt_status;
+
+int batt_id_reg_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&batt_id_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(batt_id_reg_notifier);
+
+void batt_id_unreg_notifier(struct notifier_block *nb)
+{
+	atomic_notifier_chain_unregister(&batt_id_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(batt_id_unreg_notifier);
+
+
+/**
+ * battery_prop_changed - Update properties when  battery connection status
+ *                        changes
+ * @battery_conn_stat : The current connection status of battery
+ * @batt_prop : Address of the ps_batt_chg_prof structure with the updated
+ *              values passed from the calling function
+ *
+ * Whenever the battery connection status changes this function will be called
+ * to indicate a change in the status and to update the status and value of
+ * properties
+ */
+void battery_prop_changed(int battery_conn_stat,
+			struct ps_batt_chg_prof *batt_prop)
+{
+	if (batt_status != battery_conn_stat) {
+		if (battery_conn_stat == POWER_SUPPLY_BATTERY_INSERTED)
+			batt_property = batt_prop;
+		else
+			batt_property = NULL;
+
+		batt_status = battery_conn_stat;
+	}
+
+	atomic_notifier_call_chain(&batt_id_notifier,
+			0, &(batt_property));
+
+}
+EXPORT_SYMBOL_GPL(battery_prop_changed);
+
+/**
+ * get_batt_prop - Get the battery connection status and updated properties
+ * @batt_prop : battery properties structure copied to this address
+ */
+int get_batt_prop(struct ps_batt_chg_prof *batt_prop)
+{
+	if (batt_property)
+		memcpy(batt_prop, batt_property,
+			sizeof(struct ps_batt_chg_prof));
+	else
+		return -ENOMEM;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(get_batt_prop);
diff --git a/drivers/power/bq24261_charger.c b/drivers/power/bq24261_charger.c
new file mode 100644
index 0000000..373ad8f
--- /dev/null
+++ b/drivers/power/bq24261_charger.c
@@ -0,0 +1,1887 @@
+/*
+ * bq24261_charger.c - BQ24261 Charger I2C client driver
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/usb/otg.h>
+#include <linux/power/bq24261_charger.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <asm/intel_scu_ipc.h>
+
+#define DEV_NAME "bq24261_charger"
+#define DEV_MANUFACTURER "TI"
+#define MODEL_NAME_SIZE 8
+#define DEV_MANUFACTURER_NAME_SIZE 4
+
+#define CHRG_TERM_WORKER_DELAY (30 * HZ)
+#define EXCEPTION_MONITOR_DELAY (60 * HZ)
+#define WDT_RESET_DELAY (15 * HZ)
+
+/* BQ24261 registers */
+#define BQ24261_STAT_CTRL0_ADDR		0x00
+#define BQ24261_CTRL_ADDR		0x01
+#define BQ24261_BATT_VOL_CTRL_ADDR	0x02
+#define BQ24261_VENDOR_REV_ADDR		0x03
+#define BQ24261_TERM_FCC_ADDR		0x04
+#define BQ24261_VINDPM_STAT_ADDR	0x05
+#define BQ24261_ST_NTC_MON_ADDR		0x06
+
+#define BQ24261_RESET_MASK		(0x01 << 7)
+#define BQ24261_RESET_ENABLE		(0x01 << 7)
+
+#define BQ24261_FAULT_MASK		0x07
+#define BQ24261_STAT_MASK		(0x03 << 4)
+#define BQ24261_BOOST_MASK		(0x01 << 6)
+#define BQ24261_TMR_RST_MASK		(0x01 << 7)
+#define BQ24261_TMR_RST			(0x01 << 7)
+
+#define BQ24261_ENABLE_BOOST		(0x01 << 6)
+
+#define BQ24261_VOVP			0x01
+#define BQ24261_LOW_SUPPLY		0x02
+#define BQ24261_THERMAL_SHUTDOWN	0x03
+#define BQ24261_BATT_TEMP_FAULT		0x04
+#define BQ24261_TIMER_FAULT		0x05
+#define BQ24261_BATT_OVP		0x06
+#define BQ24261_NO_BATTERY		0x07
+#define BQ24261_STAT_READY		0x00
+
+#define BQ24261_STAT_CHRG_PRGRSS	(0x01 << 4)
+#define BQ24261_STAT_CHRG_DONE		(0x02 << 4)
+#define BQ24261_STAT_FAULT		(0x03 << 4)
+
+#define BQ24261_CE_MASK			(0x01 << 1)
+#define BQ24261_CE_DISABLE		(0x01 << 1)
+
+#define BQ24261_HZ_MASK			(0x01)
+#define BQ24261_HZ_ENABLE		(0x01)
+
+#define BQ24261_ICHRG_MASK		(0x1F << 3)
+#define BQ24261_ICHRG_100ma		(0x01 << 3)
+#define BQ24261_ICHRG_200ma		(0x01 << 4)
+#define BQ24261_ICHRG_400ma		(0x01 << 5)
+#define BQ24261_ICHRG_800ma		(0x01 << 6)
+#define BQ24261_ICHRG_1600ma		(0x01 << 7)
+
+#define BQ24261_ITERM_MASK		(0x03)
+#define BQ24261_ITERM_50ma		(0x01 << 0)
+#define BQ24261_ITERM_100ma		(0x01 << 1)
+#define BQ24261_ITERM_200ma		(0x01 << 2)
+
+#define BQ24261_VBREG_MASK		(0x3F << 2)
+
+#define BQ24261_INLMT_MASK		(0x03 << 4)
+#define BQ24261_INLMT_100		0x00
+#define BQ24261_INLMT_150		(0x01 << 4)
+#define BQ24261_INLMT_500		(0x02 << 4)
+#define BQ24261_INLMT_900		(0x03 << 4)
+#define BQ24261_INLMT_1500		(0x04 << 4)
+#define BQ24261_INLMT_2500		(0x06 << 4)
+
+#define BQ24261_TE_MASK			(0x01 << 2)
+#define BQ24261_TE_ENABLE		(0x01 << 2)
+#define BQ24261_STAT_ENABLE_MASK	(0x01 << 3)
+#define BQ24261_STAT_ENABLE		(0x01 << 3)
+
+#define BQ24261_VENDOR_MASK		(0x07 << 5)
+#define BQ24261_VENDOR			(0x02 << 5)
+#define BQ24261_REV_MASK		(0x07)
+#define BQ24261_2_3_REV			(0x06)
+#define BQ24261_REV			(0x02)
+#define BQ24260_REV			(0x01)
+
+#define BQ24261_TS_MASK			(0x01 << 3)
+#define BQ24261_TS_ENABLED		(0x01 << 3)
+#define BQ24261_BOOST_ILIM_MASK		(0x01 << 4)
+#define BQ24261_BOOST_ILIM_500ma	(0x0)
+#define BQ24261_BOOST_ILIM_1A		(0x01 << 4)
+
+#define BQ24261_SAFETY_TIMER_MASK	(0x03 << 5)
+#define BQ24261_SAFETY_TIMER_40MIN	0x00
+#define BQ24261_SAFETY_TIMER_6HR	(0x01 << 5)
+#define BQ24261_SAFETY_TIMER_9HR	(0x02 << 5)
+#define BQ24261_SAFETY_TIMER_DISABLED	(0x03 << 5)
+
+/* 1% above voltage max design to report over voltage */
+#define BQ24261_OVP_MULTIPLIER			1010
+#define BQ24261_OVP_RECOVER_MULTIPLIER		990
+#define BQ24261_DEF_BAT_VOLT_MAX_DESIGN		4200000
+
+/* Settings for Voltage / DPPM Register (05) */
+#define BQ24261_VBATT_LEVEL1		3700000
+#define BQ24261_VBATT_LEVEL2		3960000
+#define BQ24261_VINDPM_MASK		(0x07)
+#define BQ24261_VINDPM_320MV		(0x01 << 2)
+#define BQ24261_VINDPM_160MV		(0x01 << 1)
+#define BQ24261_VINDPM_80MV		(0x01 << 0)
+#define BQ24261_CD_STATUS_MASK		(0x01 << 3)
+#define BQ24261_DPM_EN_MASK		(0x01 << 4)
+#define BQ24261_DPM_EN_FORCE		(0x01 << 4)
+#define BQ24261_LOW_CHG_MASK		(0x01 << 5)
+#define BQ24261_LOW_CHG_EN		(0x01 << 5)
+#define BQ24261_LOW_CHG_DIS		(~BQ24261_LOW_CHG_EN)
+#define BQ24261_DPM_STAT_MASK		(0x01 << 6)
+#define BQ24261_MINSYS_STAT_MASK	(0x01 << 7)
+
+#define BQ24261_MIN_CC			500
+
+u16 bq24261_sfty_tmr[][2] = {
+	{0, BQ24261_SAFETY_TIMER_DISABLED}
+	,
+	{40, BQ24261_SAFETY_TIMER_40MIN}
+	,
+	{360, BQ24261_SAFETY_TIMER_6HR}
+	,
+	{540, BQ24261_SAFETY_TIMER_9HR}
+	,
+};
+
+
+u16 bq24261_inlmt[][2] = {
+	{100, BQ24261_INLMT_100}
+	,
+	{150, BQ24261_INLMT_150}
+	,
+	{500, BQ24261_INLMT_500}
+	,
+	{900, BQ24261_INLMT_900}
+	,
+	{1500, BQ24261_INLMT_1500}
+	,
+	{2500, BQ24261_INLMT_2500}
+	,
+};
+
+u16 bq24261_iterm[][2] = {
+	{0, 0x00}
+	,
+	{50, BQ24261_ITERM_50ma}
+	,
+	{100, BQ24261_ITERM_100ma}
+	,
+	{150, BQ24261_ITERM_100ma | BQ24261_ITERM_50ma}
+	,
+	{200, BQ24261_ITERM_200ma}
+	,
+	{250, BQ24261_ITERM_200ma | BQ24261_ITERM_50ma}
+	,
+	{300, BQ24261_ITERM_200ma | BQ24261_ITERM_100ma}
+	,
+	{350, BQ24261_ITERM_200ma | BQ24261_ITERM_100ma | BQ24261_ITERM_50ma}
+	,
+};
+
+u16 bq24261_cc[][2] = {
+
+	{500, 0x00}
+	,
+	{600, BQ24261_ICHRG_100ma}
+	,
+	{700, BQ24261_ICHRG_200ma}
+	,
+	{800, BQ24261_ICHRG_100ma | BQ24261_ICHRG_200ma}
+	,
+	{900, BQ24261_ICHRG_400ma}
+	,
+	{1000, BQ24261_ICHRG_400ma | BQ24261_ICHRG_100ma}
+	,
+	{1100, BQ24261_ICHRG_400ma | BQ24261_ICHRG_200ma}
+	,
+	{1200, BQ24261_ICHRG_400ma | BQ24261_ICHRG_200ma | BQ24261_ICHRG_100ma}
+	,
+	{1300, BQ24261_ICHRG_800ma}
+	,
+	{1400, BQ24261_ICHRG_800ma | BQ24261_ICHRG_100ma}
+	,
+	{1500, BQ24261_ICHRG_800ma | BQ24261_ICHRG_200ma}
+	,
+};
+
+#define BQ24261_MIN_CV 3500
+#define BQ24261_MAX_CV 4440
+#define BQ24261_CV_DIV 20
+#define BQ24261_CV_BIT_POS 2
+
+static enum power_supply_property bq24261_usb_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_INLMT,
+	POWER_SUPPLY_PROP_ENABLE_CHARGING,
+	POWER_SUPPLY_PROP_ENABLE_CHARGER,
+	POWER_SUPPLY_PROP_CHARGE_TERM_CUR,
+	POWER_SUPPLY_PROP_CABLE_TYPE,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_MANUFACTURER,
+	POWER_SUPPLY_PROP_MAX_TEMP,
+	POWER_SUPPLY_PROP_MIN_TEMP,
+};
+
+enum bq24261_chrgr_stat {
+	BQ24261_CHRGR_STAT_UNKNOWN,
+	BQ24261_CHRGR_STAT_READY,
+	BQ24261_CHRGR_STAT_CHARGING,
+	BQ24261_CHRGR_STAT_BAT_FULL,
+	BQ24261_CHRGR_STAT_FAULT,
+};
+
+struct bq24261_otg_event {
+	struct list_head node;
+	bool is_enable;
+};
+
+struct bq24261_charger {
+
+	struct mutex lock;
+	struct i2c_client *client;
+	struct bq24261_plat_data *pdata;
+	struct power_supply psy_usb;
+	struct delayed_work sw_term_work;
+	struct delayed_work wdt_work;
+	struct delayed_work low_supply_fault_work;
+	struct delayed_work exception_mon_work;
+	struct notifier_block otg_nb;
+	struct usb_phy *transceiver;
+	struct work_struct otg_work;
+	struct work_struct irq_work;
+	struct list_head otg_queue;
+	struct list_head irq_queue;
+	wait_queue_head_t wait_ready;
+	spinlock_t otg_queue_lock;
+	void __iomem *irq_iomap;
+
+	int chrgr_health;
+	int bat_health;
+	int cc;
+	int cv;
+	int inlmt;
+	int max_cc;
+	int max_cv;
+	int iterm;
+	int cable_type;
+	int cntl_state;
+	int max_temp;
+	int min_temp;
+	int revision;
+	enum bq24261_chrgr_stat chrgr_stat;
+	bool online;
+	bool present;
+	bool is_charging_enabled;
+	bool is_charger_enabled;
+	bool is_vsys_on;
+	bool boost_mode;
+	bool is_hw_chrg_term;
+	char model_name[MODEL_NAME_SIZE];
+	char manufacturer[DEV_MANUFACTURER_NAME_SIZE];
+};
+
+enum bq2426x_model_num {
+	BQ2426X = 0,
+	BQ24260,
+	BQ24261,
+};
+
+struct bq2426x_model {
+	char model_name[MODEL_NAME_SIZE];
+	enum bq2426x_model_num model;
+};
+
+static struct bq2426x_model bq24261_model_name[] = {
+	{ "bq2426x", BQ2426X },
+	{ "bq24260", BQ24260 },
+	{ "bq24261", BQ24261 },
+};
+
+struct i2c_client *bq24261_client;
+static inline int get_battery_voltage(int *volt);
+static inline int get_battery_current(int *cur);
+static int bq24261_handle_irq(struct bq24261_charger *chip, u8 stat_reg);
+static inline int bq24261_set_iterm(struct bq24261_charger *chip, int iterm);
+
+enum power_supply_type get_power_supply_type(
+		enum power_supply_charger_cable_type cable)
+{
+
+	switch (cable) {
+
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+		return POWER_SUPPLY_TYPE_USB_DCP;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+		return POWER_SUPPLY_TYPE_USB_CDP;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_ACA:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+		return POWER_SUPPLY_TYPE_USB_ACA;
+	case POWER_SUPPLY_CHARGER_TYPE_AC:
+		return POWER_SUPPLY_TYPE_MAINS;
+	case POWER_SUPPLY_CHARGER_TYPE_NONE:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+	default:
+		return POWER_SUPPLY_TYPE_USB;
+	}
+
+	return POWER_SUPPLY_TYPE_USB;
+}
+
+static void lookup_regval(u16 tbl[][2], size_t size, u16 in_val, u8 *out_val)
+{
+	int i;
+	for (i = 1; i < size; ++i)
+		if (in_val < tbl[i][0])
+			break;
+
+	*out_val = (u8) tbl[i - 1][1];
+}
+
+void bq24261_cc_to_reg(int cc, u8 *reg_val)
+{
+	return lookup_regval(bq24261_cc, ARRAY_SIZE(bq24261_cc), cc, reg_val);
+
+}
+
+void bq24261_cv_to_reg(int cv, u8 *reg_val)
+{
+	int val;
+
+	val = clamp_t(int, cv, BQ24261_MIN_CV, BQ24261_MAX_CV);
+	*reg_val =
+		(((val - BQ24261_MIN_CV) / BQ24261_CV_DIV)
+			<< BQ24261_CV_BIT_POS);
+}
+
+void bq24261_inlmt_to_reg(int inlmt, u8 *regval)
+{
+	return lookup_regval(bq24261_inlmt, ARRAY_SIZE(bq24261_inlmt),
+			     inlmt, regval);
+}
+
+static inline void bq24261_iterm_to_reg(int iterm, u8 *regval)
+{
+	return lookup_regval(bq24261_iterm, ARRAY_SIZE(bq24261_iterm),
+			     iterm, regval);
+}
+
+static inline void bq24261_sfty_tmr_to_reg(int tmr, u8 *regval)
+{
+	return lookup_regval(bq24261_sfty_tmr, ARRAY_SIZE(bq24261_sfty_tmr),
+			     tmr, regval);
+}
+
+static inline int bq24261_read_reg(struct i2c_client *client, u8 reg)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0)
+		dev_err(&client->dev, "Error(%d) in reading reg %d\n", ret,
+			reg);
+
+	return ret;
+}
+
+
+static inline void bq24261_dump_regs(bool dump_master)
+{
+	int i;
+	int ret;
+	int bat_cur, bat_volt;
+	struct bq24261_charger *chip;
+
+	if (!bq24261_client)
+		return;
+
+	chip = i2c_get_clientdata(bq24261_client);
+
+	ret = get_battery_current(&bat_cur);
+	if (ret)
+		dev_err(&bq24261_client->dev,
+			"%s: Error in getting battery current", __func__);
+	else
+		dev_info(&bq24261_client->dev, "Battery Current=%dma\n",
+				(bat_cur/1000));
+
+	ret = get_battery_voltage(&bat_volt);
+	if (ret)
+		dev_err(&bq24261_client->dev,
+			"%s: Error in getting battery voltage", __func__);
+	else
+		dev_info(&bq24261_client->dev, "Battery VOlatge=%dmV\n",
+			(bat_volt/1000));
+
+
+	dev_info(&bq24261_client->dev, "BQ24261 Register dump\n");
+
+	dev_info(&bq24261_client->dev, "*======================*\n");
+	for (i = 0; i < 7; ++i) {
+		ret = bq24261_read_reg(bq24261_client, i);
+		if (ret < 0)
+			dev_err(&bq24261_client->dev,
+				"Error in reading REG 0x%X\n", i);
+		else
+			dev_info(&bq24261_client->dev,
+				"0x%X=0x%X ", i, ret);
+	}
+	dev_info(&bq24261_client->dev, "*======================*\n");
+
+	if (chip->pdata->dump_master_regs && dump_master)
+			chip->pdata->dump_master_regs();
+
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+static int bq24261_reg_show(struct seq_file *seq, void *unused)
+{
+	int val;
+	u8 reg;
+
+	reg = *((u8 *)seq->private);
+	val = bq24261_read_reg(bq24261_client, reg);
+
+	seq_printf(seq, "0x%02x\n", val);
+	return 0;
+}
+
+static int bq24261_dbgfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, bq24261_reg_show, inode->i_private);
+}
+
+static u32 bq24261_register_set[] = {
+	BQ24261_STAT_CTRL0_ADDR,
+	BQ24261_CTRL_ADDR,
+	BQ24261_BATT_VOL_CTRL_ADDR,
+	BQ24261_VENDOR_REV_ADDR,
+	BQ24261_TERM_FCC_ADDR,
+	BQ24261_VINDPM_STAT_ADDR,
+	BQ24261_ST_NTC_MON_ADDR,
+};
+
+static struct dentry *bq24261_dbgfs_dir;
+
+static const struct file_operations bq24261_dbg_fops = {
+	.open = bq24261_dbgfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static void bq24261_debugfs_init(void)
+{
+	struct dentry *fentry;
+	u32 count = ARRAY_SIZE(bq24261_register_set);
+	u32 i;
+	char name[6] = {0};
+
+	bq24261_dbgfs_dir = debugfs_create_dir(DEV_NAME, NULL);
+	if (bq24261_dbgfs_dir == NULL)
+		goto debugfs_root_exit;
+
+	for (i = 0; i < count; i++) {
+		snprintf(name, 6, "%02x", bq24261_register_set[i]);
+		fentry = debugfs_create_file(name, S_IRUGO,
+						bq24261_dbgfs_dir,
+						&bq24261_register_set[i],
+						&bq24261_dbg_fops);
+		if (fentry == NULL)
+			goto debugfs_err_exit;
+	}
+	dev_err(&bq24261_client->dev, "Debugfs created successfully!!\n");
+	return;
+
+debugfs_err_exit:
+	debugfs_remove_recursive(bq24261_dbgfs_dir);
+debugfs_root_exit:
+	dev_err(&bq24261_client->dev, "Error Creating debugfs!!\n");
+	return;
+}
+
+static void bq24261_debugfs_exit(void)
+{
+	if (bq24261_dbgfs_dir)
+		debugfs_remove_recursive(bq24261_dbgfs_dir);
+
+	return;
+}
+
+#else
+static void bq24261_debugfs_init(void)
+{
+	return;
+}
+
+static void bq24261_debugfs_exit(void)
+{
+	return;
+}
+#endif
+
+static inline int bq24261_write_reg(struct i2c_client *client, u8 reg, u8 data)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(client, reg, data);
+	if (ret < 0)
+		dev_err(&client->dev, "Error(%d) in writing %d to reg %d\n",
+			ret, data, reg);
+
+	return ret;
+}
+
+static inline int bq24261_read_modify_reg(struct i2c_client *client, u8 reg,
+					  u8 mask, u8 val)
+{
+	int ret;
+
+	ret = bq24261_read_reg(client, reg);
+	if (ret < 0)
+		return ret;
+	ret = (ret & ~mask) | (mask & val);
+	return bq24261_write_reg(client, reg, ret);
+}
+
+static inline int bq24261_tmr_ntc_init(struct bq24261_charger *chip)
+{
+	u8 reg_val;
+	int ret;
+
+	bq24261_sfty_tmr_to_reg(chip->pdata->safety_timer, &reg_val);
+
+	if (chip->pdata->is_ts_enabled)
+		reg_val |= BQ24261_TS_ENABLED;
+
+	/* Check if boost mode current configuration is above 1A*/
+	if (chip->pdata->boost_mode_ma >= 1000)
+		reg_val |= BQ24261_BOOST_ILIM_1A;
+
+	ret = bq24261_read_modify_reg(chip->client, BQ24261_ST_NTC_MON_ADDR,
+			BQ24261_TS_MASK|BQ24261_SAFETY_TIMER_MASK|
+			BQ24261_BOOST_ILIM_MASK, reg_val);
+
+	return ret;
+}
+
+static inline int bq24261_enable_charging(
+	struct bq24261_charger *chip, bool val)
+{
+	int ret;
+	u8 reg_val;
+	bool is_ready;
+
+	ret = bq24261_read_reg(chip->client,
+					BQ24261_STAT_CTRL0_ADDR);
+	if (ret < 0) {
+		dev_err(&chip->client->dev,
+			"Error(%d) in reading BQ24261_STAT_CTRL0_ADDR\n", ret);
+	}
+
+	is_ready =  (ret & BQ24261_STAT_MASK) != BQ24261_STAT_FAULT;
+
+	/* If status is fault, wait for READY before enabling the charging */
+
+	if (!is_ready) {
+		ret = wait_event_timeout(chip->wait_ready,
+			(chip->chrgr_stat != BQ24261_CHRGR_STAT_READY),
+				HZ);
+		dev_info(&chip->client->dev,
+			"chrgr_stat=%x\n", chip->chrgr_stat);
+		if (ret == 0) {
+			dev_err(&chip->client->dev,
+				"Waiting for Charger Ready Failed.Enabling charging anyway\n");
+		}
+	}
+
+	if (chip->pdata->enable_charging)
+		chip->pdata->enable_charging(val);
+
+	if (val) {
+		reg_val = (~BQ24261_CE_DISABLE & BQ24261_CE_MASK);
+		if (chip->is_hw_chrg_term)
+			reg_val |= BQ24261_TE_ENABLE;
+	} else {
+		reg_val = BQ24261_CE_DISABLE;
+	}
+
+	reg_val |=  BQ24261_STAT_ENABLE;
+
+	ret = bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+		       BQ24261_STAT_ENABLE_MASK|BQ24261_RESET_MASK|
+				BQ24261_CE_MASK|BQ24261_TE_MASK,
+					reg_val);
+	if (ret || !val)
+		return ret;
+
+	bq24261_set_iterm(chip, chip->iterm);
+	return bq24261_tmr_ntc_init(chip);
+}
+
+static inline int bq24261_reset_timer(struct bq24261_charger *chip)
+{
+	return bq24261_read_modify_reg(chip->client, BQ24261_STAT_CTRL0_ADDR,
+			BQ24261_TMR_RST_MASK, BQ24261_TMR_RST);
+}
+
+static inline int bq24261_enable_charger(
+	struct bq24261_charger *chip, int val)
+{
+
+	/* TODO: Implement enable/disable HiZ mode to enable/
+	*  disable charger
+	*/
+	u8 reg_val;
+	int ret;
+
+	reg_val = val ? (~BQ24261_HZ_ENABLE & BQ24261_HZ_MASK)  :
+			BQ24261_HZ_ENABLE;
+
+	ret = bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+		       BQ24261_HZ_MASK|BQ24261_RESET_MASK, reg_val);
+	if (ret)
+		return ret;
+
+	return bq24261_reset_timer(chip);
+}
+
+static inline int bq24261_set_cc(struct bq24261_charger *chip, int cc)
+{
+	u8 reg_val;
+	int ret;
+
+	dev_dbg(&chip->client->dev, "cc=%d\n", cc);
+	if (chip->pdata->set_cc) {
+		ret = chip->pdata->set_cc(cc);
+		if (unlikely(ret))
+			return ret;
+	}
+
+	if (cc && (cc < BQ24261_MIN_CC)) {
+		dev_dbg(&chip->client->dev, "Set LOW_CHG bit\n");
+		reg_val = BQ24261_LOW_CHG_EN;
+		ret = bq24261_read_modify_reg(chip->client,
+				BQ24261_VINDPM_STAT_ADDR,
+				BQ24261_LOW_CHG_MASK, reg_val);
+	} else {
+		dev_dbg(&chip->client->dev, "Clear LOW_CHG bit\n");
+		reg_val = BQ24261_LOW_CHG_DIS;
+		ret = bq24261_read_modify_reg(chip->client,
+				BQ24261_VINDPM_STAT_ADDR,
+				BQ24261_LOW_CHG_MASK, reg_val);
+	}
+
+	/* Return from here since the cc setting will be done
+	   by platform specific hardware */
+	if (chip->pdata->set_cc)
+		return ret;
+
+	bq24261_cc_to_reg(cc, &reg_val);
+
+	return bq24261_read_modify_reg(chip->client, BQ24261_TERM_FCC_ADDR,
+			BQ24261_ICHRG_MASK, reg_val);
+}
+
+static inline int bq24261_set_cv(struct bq24261_charger *chip, int cv)
+{
+	int bat_volt;
+	int ret;
+	u8 reg_val;
+	u8 vindpm_val = 0x0;
+
+	/*
+	* Setting VINDPM value as per the battery voltage
+	*  VBatt           Vindpm     Register Setting
+	*  < 3.7v           4.2v       0x0 (default)
+	*  3.71v - 3.96v    4.36v      0x2
+	*  > 3.96v          4.6v       0x5
+	*/
+	ret = get_battery_voltage(&bat_volt);
+	if (ret) {
+		dev_err(&chip->client->dev,
+			"Error getting battery voltage!!\n");
+	} else {
+		if (bat_volt > BQ24261_VBATT_LEVEL2)
+			vindpm_val =
+				(BQ24261_VINDPM_320MV | BQ24261_VINDPM_80MV);
+		else if (bat_volt > BQ24261_VBATT_LEVEL1)
+			vindpm_val = BQ24261_VINDPM_160MV;
+	}
+
+	ret = bq24261_read_modify_reg(chip->client,
+			BQ24261_VINDPM_STAT_ADDR,
+			BQ24261_VINDPM_MASK,
+			vindpm_val);
+	if (ret) {
+		dev_err(&chip->client->dev,
+			"Error setting VINDPM setting!!\n");
+		return ret;
+	}
+
+	if (chip->pdata->set_cv)
+		return chip->pdata->set_cv(cv);
+
+	bq24261_cv_to_reg(cv, &reg_val);
+
+	return bq24261_read_modify_reg(chip->client, BQ24261_BATT_VOL_CTRL_ADDR,
+				       BQ24261_VBREG_MASK, reg_val);
+}
+
+static inline int bq24261_set_inlmt(struct bq24261_charger *chip, int inlmt)
+{
+	u8 reg_val;
+
+	if (chip->pdata->set_inlmt)
+		return chip->pdata->set_inlmt(inlmt);
+
+	bq24261_inlmt_to_reg(inlmt, &reg_val);
+
+	return bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+		       BQ24261_RESET_MASK|BQ24261_INLMT_MASK, reg_val);
+
+}
+
+static inline void resume_charging(struct bq24261_charger *chip)
+{
+
+	if (chip->is_charger_enabled)
+		bq24261_enable_charger(chip, true);
+	if (chip->inlmt)
+		bq24261_set_inlmt(chip, chip->inlmt);
+	if (chip->cc)
+		bq24261_set_cc(chip, chip->cc);
+	if (chip->cv)
+		bq24261_set_cv(chip, chip->cv);
+	if (chip->is_charging_enabled)
+		bq24261_enable_charging(chip, true);
+}
+
+static inline int bq24261_set_iterm(struct bq24261_charger *chip, int iterm)
+{
+	u8 reg_val;
+
+	if (chip->pdata->set_iterm)
+		return chip->pdata->set_iterm(iterm);
+
+	bq24261_iterm_to_reg(iterm, &reg_val);
+
+	return bq24261_read_modify_reg(chip->client, BQ24261_TERM_FCC_ADDR,
+				       BQ24261_ITERM_MASK, reg_val);
+}
+
+static inline int bq24261_enable_hw_charge_term(
+	struct bq24261_charger *chip, bool val)
+{
+	u8 data;
+	int ret;
+
+	data = val ? BQ24261_TE_ENABLE : (~BQ24261_TE_ENABLE & BQ24261_TE_MASK);
+
+
+	ret = bq24261_read_modify_reg(chip->client, BQ24261_CTRL_ADDR,
+			       BQ24261_RESET_MASK|BQ24261_TE_MASK, data);
+
+	if (ret)
+		return ret;
+
+	chip->is_hw_chrg_term = val ? true : false;
+
+	return ret;
+}
+
+static inline int bq24261_enable_boost_mode(
+	struct bq24261_charger *chip, int val)
+{
+	int ret = 0;
+
+
+	if (val) {
+
+		if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+			if (chip->pdata->enable_vbus)
+				chip->pdata->enable_vbus(true);
+		}
+
+		/* TODO: Support different Host Mode Current limits */
+
+		bq24261_enable_charger(chip, true);
+		ret =
+		    bq24261_read_modify_reg(chip->client,
+					    BQ24261_STAT_CTRL0_ADDR,
+					    BQ24261_BOOST_MASK,
+					    BQ24261_ENABLE_BOOST);
+		if (unlikely(ret))
+			return ret;
+
+		ret = bq24261_tmr_ntc_init(chip);
+		if (unlikely(ret))
+			return ret;
+		chip->boost_mode = true;
+
+		if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV)
+			schedule_delayed_work(&chip->wdt_work, 0);
+
+		dev_info(&chip->client->dev, "Boost Mode enabled\n");
+	} else {
+
+		ret =
+		    bq24261_read_modify_reg(chip->client,
+					    BQ24261_STAT_CTRL0_ADDR,
+					    BQ24261_BOOST_MASK,
+					    ~BQ24261_ENABLE_BOOST);
+
+		if (unlikely(ret))
+			return ret;
+		/* if charging need not to be enabled, disable
+		* the charger else keep the charger on
+		*/
+		if (!chip->is_charging_enabled)
+			bq24261_enable_charger(chip, false);
+		chip->boost_mode = false;
+		dev_info(&chip->client->dev, "Boost Mode disabled\n");
+
+		if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+			cancel_delayed_work_sync(&chip->wdt_work);
+
+			if (chip->pdata->enable_vbus)
+				chip->pdata->enable_vbus(false);
+		}
+
+		/* Notify power supply subsystem to enable charging
+		 * if needed. Eg. if DC adapter is connected
+		 */
+		power_supply_changed(&chip->psy_usb);
+	}
+
+	return ret;
+}
+
+static inline bool bq24261_is_vsys_on(struct bq24261_charger *chip)
+{
+	int ret;
+	struct i2c_client *client = chip->client;
+
+	ret = bq24261_read_reg(client, BQ24261_CTRL_ADDR);
+	if (ret < 0) {
+		dev_err(&client->dev,
+			"Error(%d) in reading BQ24261_CTRL_ADDR\n", ret);
+		return false;
+	}
+
+	if (((ret & BQ24261_HZ_MASK) == BQ24261_HZ_ENABLE) &&
+			chip->is_charger_enabled) {
+		dev_err(&client->dev, "Charger in Hi Z Mode\n");
+		bq24261_dump_regs(true);
+		return false;
+	}
+
+	ret = bq24261_read_reg(client, BQ24261_VINDPM_STAT_ADDR);
+	if (ret < 0) {
+		dev_err(&client->dev,
+			"Error(%d) in reading BQ24261_VINDPM_STAT_ADDR\n", ret);
+		return false;
+	}
+
+	if (ret & BQ24261_CD_STATUS_MASK) {
+		dev_err(&client->dev, "CD line asserted\n");
+		bq24261_dump_regs(true);
+		return false;
+	}
+
+	return true;
+}
+
+
+static inline bool bq24261_is_online(struct bq24261_charger *chip)
+{
+	if (chip->cable_type == POWER_SUPPLY_CHARGER_TYPE_NONE)
+		return false;
+	else if (!chip->is_charger_enabled)
+		return false;
+	/* BQ24261 gives interrupt only on stop/resume charging.
+	 * If charging is already stopped, we need to query the hardware
+	 * to see charger is still active and can supply vsys or not.
+	 */
+	else if ((chip->chrgr_stat == BQ24261_CHRGR_STAT_FAULT) ||
+		 (!chip->is_charging_enabled))
+		return bq24261_is_vsys_on(chip);
+	else
+		return chip->is_vsys_on;
+}
+
+static int bq24261_usb_set_property(struct power_supply *psy,
+				    enum power_supply_property psp,
+				    const union power_supply_propval *val)
+{
+	struct bq24261_charger *chip = container_of(psy,
+						    struct bq24261_charger,
+						    psy_usb);
+	int ret = 0;
+
+
+	mutex_lock(&chip->lock);
+
+
+	switch (psp) {
+
+	case POWER_SUPPLY_PROP_PRESENT:
+		chip->present = val->intval;
+		/*If charging capable cable is present, then
+		hold the charger wakelock so that the target
+		does not enter suspend mode when charging is
+		in progress.
+		If charging cable has been removed, then
+		unlock the wakelock to allow the target to
+		enter the sleep mode*/
+/*		if (!wake_lock_active(&chip->chrgr_en_wakelock) &&
+					val->intval)
+			wake_lock(&chip->chrgr_en_wakelock);
+		else if (wake_lock_active(&chip->chrgr_en_wakelock) &&
+					!val->intval)
+			wake_unlock(&chip->chrgr_en_wakelock);
+*/
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		chip->online = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_CHARGING:
+
+		ret = bq24261_enable_charging(chip, val->intval);
+
+		if (ret)
+			dev_err(&chip->client->dev,
+				"Error(%d) in %s charging", ret,
+				(val->intval ? "enable" : "disable"));
+		else
+			chip->is_charging_enabled = val->intval;
+
+		if (val->intval)
+			bq24261_enable_hw_charge_term(chip, true);
+		else
+			cancel_delayed_work_sync(&chip->sw_term_work);
+
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_CHARGER:
+
+		/* Don't enable the charger unless overvoltage is recovered */
+
+		if (chip->bat_health != POWER_SUPPLY_HEALTH_OVERVOLTAGE) {
+			ret = bq24261_enable_charger(chip, val->intval);
+
+			if (ret)
+				dev_err(&chip->client->dev,
+					"Error(%d) in %s charger", ret,
+					(val->intval ? "enable" : "disable"));
+			else
+				chip->is_charger_enabled = val->intval;
+		} else {
+			dev_info(&chip->client->dev, "Battery Over Voltage. Charger will be disabled\n");
+		}
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CURRENT:
+		ret = bq24261_set_cc(chip, val->intval);
+		if (!ret)
+			chip->cc = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_VOLTAGE:
+		ret = bq24261_set_cv(chip, val->intval);
+		if (!ret)
+			chip->cv = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT:
+		chip->max_cc = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE:
+		chip->max_cv = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TERM_CUR:
+		ret = bq24261_set_iterm(chip, val->intval);
+		if (!ret)
+			chip->iterm = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_CABLE_TYPE:
+
+		chip->cable_type = val->intval;
+		chip->psy_usb.type = get_power_supply_type(chip->cable_type);
+		if (chip->cable_type != POWER_SUPPLY_CHARGER_TYPE_NONE) {
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+			chip->chrgr_stat = BQ24261_CHRGR_STAT_UNKNOWN;
+
+			/* Adding this processing in order to check
+			for any faults during connect */
+
+			ret = bq24261_read_reg(chip->client,
+						BQ24261_STAT_CTRL0_ADDR);
+			if (ret < 0)
+				dev_err(&chip->client->dev,
+				"Error (%d) in reading status register(0x00)\n",
+				ret);
+			else
+				bq24261_handle_irq(chip, ret);
+		} else {
+			chip->chrgr_stat = BQ24261_CHRGR_STAT_UNKNOWN;
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+			cancel_delayed_work_sync(&chip->low_supply_fault_work);
+		}
+
+
+		break;
+	case POWER_SUPPLY_PROP_INLMT:
+		ret = bq24261_set_inlmt(chip, val->intval);
+		if (!ret)
+			chip->inlmt = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+		chip->cntl_state = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_MAX_TEMP:
+		chip->max_temp = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_MIN_TEMP:
+		chip->min_temp = val->intval;
+		break;
+	default:
+		ret = -ENODATA;
+	}
+
+	mutex_unlock(&chip->lock);
+	return ret;
+}
+
+static int bq24261_usb_get_property(struct power_supply *psy,
+				    enum power_supply_property psp,
+				    union power_supply_propval *val)
+{
+	struct bq24261_charger *chip = container_of(psy,
+						    struct bq24261_charger,
+						    psy_usb);
+
+	mutex_lock(&chip->lock);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = chip->present;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = chip->online;
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = chip->chrgr_health;
+		break;
+	case POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT:
+		val->intval = chip->max_cc;
+		break;
+	case POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE:
+		val->intval = chip->max_cv;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CURRENT:
+		val->intval = chip->cc;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_VOLTAGE:
+		val->intval = chip->cv;
+		break;
+	case POWER_SUPPLY_PROP_INLMT:
+		val->intval = chip->inlmt;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TERM_CUR:
+		val->intval = chip->iterm;
+		break;
+	case POWER_SUPPLY_PROP_CABLE_TYPE:
+		val->intval = chip->cable_type;
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_CHARGING:
+		if (chip->boost_mode)
+			val->intval = false;
+		else
+			val->intval = (chip->is_charging_enabled &&
+			(chip->chrgr_stat == BQ24261_CHRGR_STAT_CHARGING));
+
+		break;
+	case POWER_SUPPLY_PROP_ENABLE_CHARGER:
+		val->intval = bq24261_is_online(chip);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+		val->intval = chip->cntl_state;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+		val->intval = chip->pdata->num_throttle_states;
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = chip->model_name;
+		break;
+	case POWER_SUPPLY_PROP_MANUFACTURER:
+		val->strval = chip->manufacturer;
+		break;
+	case POWER_SUPPLY_PROP_MAX_TEMP:
+		val->intval = chip->max_temp;
+		break;
+	case POWER_SUPPLY_PROP_MIN_TEMP:
+		val->intval = chip->min_temp;
+		break;
+	default:
+		mutex_unlock(&chip->lock);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&chip->lock);
+	return 0;
+}
+
+static inline struct power_supply *get_psy_battery(void)
+{
+	struct class_dev_iter iter;
+	struct device *dev;
+	static struct power_supply *pst;
+
+	class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		pst = (struct power_supply *)dev_get_drvdata(dev);
+		if (pst->type == POWER_SUPPLY_TYPE_BATTERY) {
+			class_dev_iter_exit(&iter);
+			return pst;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	return NULL;
+}
+
+static inline int get_battery_voltage(int *volt)
+{
+	struct power_supply *psy;
+	union power_supply_propval val;
+	int ret;
+
+	psy = get_psy_battery();
+	if (!psy)
+		return -EINVAL;
+
+	ret = psy->get_property(psy, POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+	if (!ret)
+		*volt = (val.intval);
+
+	return ret;
+}
+
+static inline int get_battery_volt_max_design(int *volt)
+{
+	struct power_supply *psy;
+	union power_supply_propval val;
+	int ret;
+
+	psy = get_psy_battery();
+	if (!psy)
+		return -EINVAL;
+
+	ret = psy->get_property(psy,
+		POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, &val);
+	if (!ret)
+		(*volt = val.intval);
+	return ret;
+}
+
+static inline int get_battery_current(int *cur)
+{
+	struct power_supply *psy;
+	union power_supply_propval val;
+	int ret;
+
+	psy = get_psy_battery();
+	if (!psy)
+		return -EINVAL;
+
+	ret = psy->get_property(psy, POWER_SUPPLY_PROP_CURRENT_NOW, &val);
+	if (!ret)
+		*cur = val.intval;
+
+	return ret;
+}
+
+static void bq24261_wdt_reset_worker(struct work_struct *work)
+{
+
+	struct bq24261_charger *chip = container_of(work,
+			    struct bq24261_charger, wdt_work.work);
+	int ret;
+	ret = bq24261_reset_timer(chip);
+
+	if (ret)
+		dev_err(&chip->client->dev, "Error (%d) in WDT reset\n", ret);
+	else
+		dev_info(&chip->client->dev, "WDT reset\n");
+
+	schedule_delayed_work(&chip->wdt_work, WDT_RESET_DELAY);
+}
+
+static void bq24261_sw_charge_term_worker(struct work_struct *work)
+{
+
+	struct bq24261_charger *chip = container_of(work,
+						    struct bq24261_charger,
+						    sw_term_work.work);
+
+	power_supply_changed(NULL);
+
+	schedule_delayed_work(&chip->sw_term_work,
+			      CHRG_TERM_WORKER_DELAY);
+
+}
+
+int bq24261_get_bat_health(void)
+{
+
+	struct bq24261_charger *chip;
+
+	if (!bq24261_client)
+		return -ENODEV;
+
+	chip = i2c_get_clientdata(bq24261_client);
+
+	return chip->bat_health;
+}
+
+
+static void bq24261_low_supply_fault_work(struct work_struct *work)
+{
+	struct bq24261_charger *chip = container_of(work,
+						    struct bq24261_charger,
+						    low_supply_fault_work.work);
+
+	if (chip->chrgr_stat == BQ24261_CHRGR_STAT_FAULT) {
+		dev_err(&chip->client->dev, "Low Supply Fault detected!!\n");
+		chip->chrgr_health = POWER_SUPPLY_HEALTH_DEAD;
+		power_supply_changed(&chip->psy_usb);
+		bq24261_dump_regs(true);
+	}
+	return;
+}
+
+
+/* is_bat_over_voltage: check battery is over voltage or not
+*  @chip: bq24261_charger context
+*
+*  This function is used to verify the over voltage condition.
+*  In some scenarios, HW generates Over Voltage exceptions when
+*  battery voltage is normal. This function uses the over voltage
+*  condition (voltage_max_design * 1.01) to verify battery is really
+*  over charged or not.
+*/
+
+static bool is_bat_over_voltage(struct bq24261_charger *chip,
+		bool verify_recovery)
+{
+
+	int bat_volt, bat_volt_max_des, ret;
+
+	ret = get_battery_voltage(&bat_volt);
+	if (ret)
+		return verify_recovery ? false : true;
+
+	ret = get_battery_volt_max_design(&bat_volt_max_des);
+
+	if (ret)
+		bat_volt_max_des = BQ24261_DEF_BAT_VOLT_MAX_DESIGN;
+
+	dev_info(&chip->client->dev, "bat_volt=%d Voltage Max Design=%d OVP_VOLT=%d OVP recover volt=%d\n",
+			bat_volt, bat_volt_max_des,
+			(bat_volt_max_des/1000 * BQ24261_OVP_MULTIPLIER),
+			(bat_volt_max_des/1000 *
+				BQ24261_OVP_RECOVER_MULTIPLIER));
+	if (verify_recovery) {
+		if ((bat_volt) <= (bat_volt_max_des / 1000 *
+				BQ24261_OVP_RECOVER_MULTIPLIER))
+			return true;
+		else
+			return false;
+	} else {
+		if ((bat_volt) >= (bat_volt_max_des / 1000 *
+					BQ24261_OVP_MULTIPLIER))
+			return true;
+		else
+			return false;
+	}
+
+	return false;
+}
+
+#define IS_BATTERY_OVER_VOLTAGE(chip) \
+	is_bat_over_voltage(chip , false)
+
+#define IS_BATTERY_OVER_VOLTAGE_RECOVERED(chip) \
+	is_bat_over_voltage(chip , true)
+
+static void handle_battery_over_voltage(struct bq24261_charger *chip)
+{
+	/* Set Health to Over Voltage. Disable charger to discharge
+	*  battery to reduce the battery voltage.
+	*/
+	chip->bat_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+	bq24261_enable_charger(chip, false);
+	chip->is_charger_enabled = false;
+	cancel_delayed_work_sync(&chip->exception_mon_work);
+	schedule_delayed_work(&chip->exception_mon_work,
+			EXCEPTION_MONITOR_DELAY);
+}
+
+static void bq24261_exception_mon_work(struct work_struct *work)
+{
+	struct bq24261_charger *chip = container_of(work,
+						    struct bq24261_charger,
+						    exception_mon_work.work);
+	/* Only overvoltage exception need to monitor.*/
+	if (IS_BATTERY_OVER_VOLTAGE_RECOVERED(chip)) {
+		dev_info(&chip->client->dev, "Over Voltage Exception Recovered\n");
+		chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+		bq24261_enable_charger(chip, true);
+		chip->is_charger_enabled = true;
+		resume_charging(chip);
+	} else {
+		schedule_delayed_work(&chip->exception_mon_work,
+			      EXCEPTION_MONITOR_DELAY);
+	}
+}
+
+static int bq24261_handle_irq(struct bq24261_charger *chip, u8 stat_reg)
+{
+	struct i2c_client *client = chip->client;
+	bool notify = true;
+
+	dev_info(&client->dev, "%s:%d stat=0x%x\n",
+			__func__, __LINE__, stat_reg);
+
+	switch (stat_reg & BQ24261_STAT_MASK) {
+	case BQ24261_STAT_READY:
+		chip->chrgr_stat = BQ24261_CHRGR_STAT_READY;
+		chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+		chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+		dev_info(&client->dev, "Charger Status: Ready\n");
+		notify = false;
+		break;
+	case BQ24261_STAT_CHRG_PRGRSS:
+		chip->chrgr_stat = BQ24261_CHRGR_STAT_CHARGING;
+		chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+		chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+		dev_info(&client->dev, "Charger Status: Charge Progress\n");
+		bq24261_dump_regs(false);
+		break;
+	case BQ24261_STAT_CHRG_DONE:
+		chip->chrgr_health = POWER_SUPPLY_HEALTH_GOOD;
+		chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+		dev_info(&client->dev, "Charger Status: Charge Done\n");
+
+		bq24261_enable_hw_charge_term(chip, false);
+		resume_charging(chip);
+		schedule_delayed_work(&chip->sw_term_work, 0);
+		break;
+
+	case BQ24261_STAT_FAULT:
+		break;
+	}
+
+	if (stat_reg & BQ24261_BOOST_MASK)
+		dev_info(&client->dev, "Boost Mode\n");
+
+	if ((stat_reg & BQ24261_STAT_MASK) == BQ24261_STAT_FAULT) {
+		bool dump_master = true;
+		chip->chrgr_stat = BQ24261_CHRGR_STAT_FAULT;
+
+		switch (stat_reg & BQ24261_FAULT_MASK) {
+		case BQ24261_VOVP:
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+			dev_err(&client->dev, "Charger OVP Fault\n");
+			break;
+
+		case BQ24261_LOW_SUPPLY:
+			notify = false;
+
+			if (chip->pdata->handle_low_supply)
+				chip->pdata->handle_low_supply();
+
+			if (chip->cable_type !=
+					POWER_SUPPLY_CHARGER_TYPE_NONE) {
+				schedule_delayed_work
+					(&chip->low_supply_fault_work,
+					5*HZ);
+				dev_dbg(&client->dev,
+					"Schedule Low Supply Fault work!!\n");
+			}
+			break;
+
+		case BQ24261_THERMAL_SHUTDOWN:
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_OVERHEAT;
+			dev_err(&client->dev, "Charger Thermal Fault\n");
+			break;
+
+		case BQ24261_BATT_TEMP_FAULT:
+			chip->bat_health = POWER_SUPPLY_HEALTH_OVERHEAT;
+			dev_err(&client->dev, "Battery Temperature Fault\n");
+			break;
+
+		case BQ24261_TIMER_FAULT:
+			chip->bat_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+			chip->chrgr_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+			dev_err(&client->dev, "Charger Timer Fault\n");
+			break;
+
+		case BQ24261_BATT_OVP:
+			notify = false;
+			if (chip->bat_health !=
+					POWER_SUPPLY_HEALTH_OVERVOLTAGE) {
+				if (!IS_BATTERY_OVER_VOLTAGE(chip)) {
+					chip->chrgr_stat =
+						BQ24261_CHRGR_STAT_UNKNOWN;
+					resume_charging(chip);
+				} else {
+					dev_err(&client->dev, "Battery Over Voltage Fault\n");
+					handle_battery_over_voltage(chip);
+					notify = true;
+				}
+			}
+			break;
+		case BQ24261_NO_BATTERY:
+			dev_err(&client->dev, "No Battery Connected\n");
+			break;
+
+		}
+
+		if (chip->chrgr_stat == BQ24261_CHRGR_STAT_FAULT && notify)
+			bq24261_dump_regs(dump_master);
+	}
+
+	wake_up(&chip->wait_ready);
+
+	chip->is_vsys_on = bq24261_is_vsys_on(chip);
+	if (notify)
+		power_supply_changed(&chip->psy_usb);
+
+	return 0;
+}
+
+static void bq24261_irq_worker(struct work_struct *work)
+{
+	struct bq24261_charger *chip =
+	    container_of(work, struct bq24261_charger, irq_work);
+	int ret;
+
+	/*Lock to ensure that interrupt register readings are done
+	* and processed sequentially. The interrupt Fault registers
+	* are read on clear and without sequential processing double
+	* fault interrupts or fault recovery cannot be handlled propely
+	*/
+
+	mutex_lock(&chip->lock);
+
+	dev_dbg(&chip->client->dev, "%s\n", __func__);
+
+	ret = bq24261_read_reg(chip->client, BQ24261_STAT_CTRL0_ADDR);
+	if (ret < 0) {
+		dev_err(&chip->client->dev,
+			"Error (%d) in reading BQ24261_STAT_CTRL0_ADDR\n", ret);
+	}
+	else {
+		bq24261_handle_irq(chip, ret);
+	}
+	mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t bq24261_thread_handler(int id, void *data)
+{
+	struct bq24261_charger *chip = (struct bq24261_charger *)data;
+
+	queue_work(system_nrt_wq, &chip->irq_work);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t bq24261_irq_handler(int irq, void *data)
+{
+	struct bq24261_charger *chip = (struct bq24261_charger *)data;
+	u8 intr_stat;
+
+	if (chip->irq_iomap) {
+		intr_stat = ioread8(chip->irq_iomap);
+		if ((intr_stat & chip->pdata->irq_mask)) {
+			dev_dbg(&chip->client->dev, "%s\n", __func__);
+			return IRQ_WAKE_THREAD;
+		}
+	}
+
+	return IRQ_NONE;
+}
+
+static void bq24261_boostmode_worker(struct work_struct *work)
+{
+	struct bq24261_charger *chip =
+	    container_of(work, struct bq24261_charger, otg_work);
+	struct bq24261_otg_event *evt, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chip->otg_queue_lock, flags);
+	list_for_each_entry_safe(evt, tmp, &chip->otg_queue, node) {
+		list_del(&evt->node);
+		spin_unlock_irqrestore(&chip->otg_queue_lock, flags);
+
+		dev_info(&chip->client->dev,
+			"%s:%d state=%d\n", __FILE__, __LINE__,
+				evt->is_enable);
+		mutex_lock(&chip->lock);
+		if (evt->is_enable)
+			bq24261_enable_boost_mode(chip, 1);
+		else
+			bq24261_enable_boost_mode(chip, 0);
+
+		mutex_unlock(&chip->lock);
+		spin_lock_irqsave(&chip->otg_queue_lock, flags);
+		kfree(evt);
+
+	}
+	spin_unlock_irqrestore(&chip->otg_queue_lock, flags);
+}
+
+static int otg_handle_notification(struct notifier_block *nb,
+				   unsigned long event, void *param)
+{
+
+	struct bq24261_charger *chip =
+	    container_of(nb, struct bq24261_charger, otg_nb);
+	struct bq24261_otg_event *evt;
+
+	dev_dbg(&chip->client->dev, "OTG notification: %lu\n", event);
+	if (!param || event != USB_EVENT_DRIVE_VBUS)
+		return NOTIFY_DONE;
+
+	evt = kzalloc(sizeof(*evt), GFP_ATOMIC);
+	if (!evt) {
+		dev_err(&chip->client->dev,
+			"failed to allocate memory for OTG event\n");
+		return NOTIFY_DONE;
+	}
+
+	evt->is_enable = *(int *)param;
+	INIT_LIST_HEAD(&evt->node);
+
+	spin_lock(&chip->otg_queue_lock);
+	list_add_tail(&evt->node, &chip->otg_queue);
+	spin_unlock(&chip->otg_queue_lock);
+
+	queue_work(system_nrt_wq, &chip->otg_work);
+	return NOTIFY_OK;
+}
+
+static inline int register_otg_notifications(struct bq24261_charger *chip)
+{
+
+	int retval;
+
+	INIT_LIST_HEAD(&chip->otg_queue);
+	INIT_WORK(&chip->otg_work, bq24261_boostmode_worker);
+	spin_lock_init(&chip->otg_queue_lock);
+
+	chip->otg_nb.notifier_call = otg_handle_notification;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	chip->transceiver = usb_get_transceiver();
+#else
+	chip->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+#endif
+	if (!chip->transceiver || IS_ERR(chip->transceiver)) {
+		dev_err(&chip->client->dev, "failed to get otg transceiver\n");
+		return -EINVAL;
+	}
+	retval = usb_register_notifier(chip->transceiver, &chip->otg_nb);
+	if (retval) {
+		dev_err(&chip->client->dev,
+			"failed to register otg notifier\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static enum bq2426x_model_num bq24261_get_model(int bq24261_rev_reg)
+{
+	switch (bq24261_rev_reg & BQ24261_REV_MASK) {
+	case BQ24260_REV:
+		return BQ24260;
+	case BQ24261_REV:
+	case BQ24261_2_3_REV:
+		return BQ24261;
+	default:
+		return BQ2426X;
+	}
+}
+
+static int bq24261_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct i2c_adapter *adapter;
+	struct bq24261_charger *chip;
+	int ret;
+	int bq2426x_rev;
+	enum bq2426x_model_num bq24261_rev_index;
+
+	adapter = to_i2c_adapter(client->dev.parent);
+
+	if (!client->dev.platform_data) {
+		dev_err(&client->dev, "platform data is null");
+		return -EFAULT;
+	}
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+			"I2C adapter %s doesn'tsupport BYTE DATA transfer\n",
+			adapter->name);
+		return -EIO;
+	}
+
+	bq2426x_rev = bq24261_read_reg(client, BQ24261_VENDOR_REV_ADDR);
+	if (bq2426x_rev < 0) {
+		dev_err(&client->dev,
+			"Error (%d) in reading BQ24261_VENDOR_REV_ADDR\n", bq2426x_rev);
+		return bq2426x_rev;
+	}
+	dev_info(&client->dev, "bq2426x revision: 0x%x found!!\n", bq2426x_rev);
+
+	bq24261_rev_index = bq24261_get_model(bq2426x_rev);
+	if ((bq2426x_rev & BQ24261_VENDOR_MASK) != BQ24261_VENDOR) {
+		dev_err(&client->dev,
+			"Invalid Vendor/Revision number in BQ24261_VENDOR_REV_ADDR: %d",
+			bq2426x_rev);
+		return -ENODEV;
+	}
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip) {
+		dev_err(&client->dev, "mem alloc failed\n");
+		return -ENOMEM;
+	}
+
+	init_waitqueue_head(&chip->wait_ready);
+	i2c_set_clientdata(client, chip);
+	chip->pdata = client->dev.platform_data;
+
+	/* Remap IRQ map address to read the IRQ status */
+	if ((chip->pdata->irq_map) && (chip->pdata->irq_mask)) {
+		chip->irq_iomap = ioremap_nocache(chip->pdata->irq_map, 8);
+		if (!chip->irq_iomap) {
+			dev_err(&client->dev, "Failed: ioremap_nocache\n");
+			return -EFAULT;
+		}
+	}
+
+	chip->client = client;
+	chip->pdata = client->dev.platform_data;
+
+	chip->psy_usb.name = DEV_NAME;
+	chip->psy_usb.type = POWER_SUPPLY_TYPE_USB;
+	chip->psy_usb.properties = bq24261_usb_props;
+	chip->psy_usb.num_properties = ARRAY_SIZE(bq24261_usb_props);
+	chip->psy_usb.get_property = bq24261_usb_get_property;
+	chip->psy_usb.set_property = bq24261_usb_set_property;
+	chip->psy_usb.supplied_to = chip->pdata->supplied_to;
+	chip->psy_usb.num_supplicants = chip->pdata->num_supplicants;
+	chip->psy_usb.throttle_states = chip->pdata->throttle_states;
+	chip->psy_usb.num_throttle_states = chip->pdata->num_throttle_states;
+	chip->psy_usb.supported_cables = POWER_SUPPLY_CHARGER_TYPE_USB;
+	chip->max_cc = 1500;
+	chip->chrgr_stat = BQ24261_CHRGR_STAT_UNKNOWN;
+	chip->chrgr_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+	chip->revision = bq2426x_rev;
+
+	strncpy(chip->model_name,
+		bq24261_model_name[bq24261_rev_index].model_name,
+		MODEL_NAME_SIZE);
+	strncpy(chip->manufacturer, DEV_MANUFACTURER,
+		DEV_MANUFACTURER_NAME_SIZE);
+
+	mutex_init(&chip->lock);
+	ret = power_supply_register(&client->dev, &chip->psy_usb);
+	if (ret) {
+		dev_err(&client->dev, "Failed: power supply register (%d)\n",
+			ret);
+		iounmap(chip->irq_iomap);
+		return ret;
+	}
+
+	INIT_DELAYED_WORK(&chip->sw_term_work, bq24261_sw_charge_term_worker);
+	INIT_DELAYED_WORK(&chip->low_supply_fault_work,
+				bq24261_low_supply_fault_work);
+	INIT_DELAYED_WORK(&chip->exception_mon_work,
+				bq24261_exception_mon_work);
+	if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+		INIT_DELAYED_WORK(&chip->wdt_work,
+					bq24261_wdt_reset_worker);
+	}
+
+	INIT_WORK(&chip->irq_work, bq24261_irq_worker);
+	if (chip->client->irq) {
+		ret = request_threaded_irq(chip->client->irq,
+					   bq24261_irq_handler,
+					   bq24261_thread_handler,
+					   IRQF_SHARED|IRQF_NO_SUSPEND,
+					   DEV_NAME, chip);
+		if (ret) {
+			dev_err(&client->dev, "Failed: request_irq (%d)\n",
+				ret);
+			iounmap(chip->irq_iomap);
+			power_supply_unregister(&chip->psy_usb);
+			return ret;
+		}
+	}
+
+	if (IS_BATTERY_OVER_VOLTAGE(chip))
+		handle_battery_over_voltage(chip);
+	else
+		chip->bat_health = POWER_SUPPLY_HEALTH_GOOD;
+
+	if (register_otg_notifications(chip))
+		dev_err(&client->dev, "Error in registering OTG notifications. Unable to supply power to Host\n");
+
+	bq24261_client = client;
+	power_supply_changed(&chip->psy_usb);
+	bq24261_debugfs_init();
+
+	return 0;
+}
+
+static int bq24261_remove(struct i2c_client *client)
+{
+	struct bq24261_charger *chip = i2c_get_clientdata(client);
+
+	if (client->irq)
+		free_irq(client->irq, chip);
+
+	flush_scheduled_work();
+	if (chip->irq_iomap)
+		iounmap(chip->irq_iomap);
+	if (chip->transceiver)
+		usb_unregister_notifier(chip->transceiver, &chip->otg_nb);
+
+	power_supply_unregister(&chip->psy_usb);
+	bq24261_debugfs_exit();
+	return 0;
+}
+
+static int bq24261_suspend(struct device *dev)
+{
+	struct bq24261_charger *chip = dev_get_drvdata(dev);
+
+	if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+		if (chip->boost_mode)
+			cancel_delayed_work_sync(&chip->wdt_work);
+	}
+	dev_dbg(&chip->client->dev, "bq24261 suspend\n");
+	return 0;
+}
+
+static int bq24261_resume(struct device *dev)
+{
+	struct bq24261_charger *chip = dev_get_drvdata(dev);
+
+	if ((chip->revision & BQ24261_REV_MASK) == BQ24261_REV) {
+		if (chip->boost_mode)
+			bq24261_enable_boost_mode(chip, 1);
+	}
+
+	dev_dbg(&chip->client->dev, "bq24261 resume\n");
+	return 0;
+}
+
+static int bq24261_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int bq24261_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int bq24261_runtime_idle(struct device *dev)
+{
+
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static const struct dev_pm_ops bq24261_pm_ops = {
+	.suspend = bq24261_suspend,
+	.resume = bq24261_resume,
+	.runtime_suspend = bq24261_runtime_suspend,
+	.runtime_resume = bq24261_runtime_resume,
+	.runtime_idle = bq24261_runtime_idle,
+};
+
+static const struct i2c_device_id bq24261_id[] = {
+	{DEV_NAME, 0},
+	{},
+};
+
+MODULE_DEVICE_TABLE(i2c, bq24261_id);
+
+static struct i2c_driver bq24261_driver = {
+	.driver = {
+		   .name = DEV_NAME,
+		   .pm = &bq24261_pm_ops,
+		   },
+	.probe = bq24261_probe,
+	.remove = bq24261_remove,
+	.id_table = bq24261_id,
+};
+
+static int __init bq24261_init(void)
+{
+	return i2c_add_driver(&bq24261_driver);
+}
+
+module_init(bq24261_init);
+
+static void __exit bq24261_exit(void)
+{
+	i2c_del_driver(&bq24261_driver);
+}
+
+module_exit(bq24261_exit);
+
+MODULE_AUTHOR("Jenny TC <jenny.tc@intel.com>");
+MODULE_DESCRIPTION("BQ24261 Charger Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/charging_algo_pse.c b/drivers/power/charging_algo_pse.c
new file mode 100644
index 0000000..2088594
--- /dev/null
+++ b/drivers/power/charging_algo_pse.c
@@ -0,0 +1,180 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+#include <linux/power/battery_id.h>
+#include "power_supply.h"
+#include "power_supply_charger.h"
+
+/* 98% of CV is considered as voltage to detect Full */
+#define FULL_CV_MIN 98
+
+/* Offset to exit from maintenance charging. In maintenance charging
+*  if the volatge is less than the (maintenance_lower_threshold -
+*  MAINT_EXIT_OFFSET) then system can switch to normal charging
+*/
+#define MAINT_EXIT_OFFSET 50  /* mV */
+
+static int get_tempzone(struct ps_pse_mod_prof *pse_mod_bprof,
+		int temp)
+{
+
+	int i = 0;
+	int temp_range_cnt = min_t(u16, pse_mod_bprof->temp_mon_ranges,
+					BATT_TEMP_NR_RNG);
+
+	if ((temp < pse_mod_bprof->temp_low_lim) ||
+		(temp > pse_mod_bprof->temp_mon_range[0].temp_up_lim))
+		return -EINVAL;
+
+	for (i = 0; i < temp_range_cnt; ++i)
+		if (temp > pse_mod_bprof->temp_mon_range[i].temp_up_lim)
+			break;
+	return i-1;
+}
+
+static inline bool __is_battery_full
+	(long volt, long cur, long iterm, unsigned long cv)
+{
+	pr_devel("%s:current=%d pse_mod_bprof->chrg_term_mA =%d voltage_now=%d full_cond=%d",
+			__func__, cur, iterm, volt * 100, (FULL_CV_MIN * cv));
+
+	return ((cur > 0) && (cur <= iterm) &&
+	((volt * 100)  >= (FULL_CV_MIN * cv)));
+
+}
+
+static inline bool is_battery_full(struct batt_props bat_prop,
+		struct ps_pse_mod_prof *pse_mod_bprof, unsigned long cv)
+{
+
+	int i;
+	/* Software full detection. Check the battery charge current to detect
+	*  battery Full. The voltage also verified to avoid false charge
+	*  full detection.
+	*/
+	pr_devel("%s:current=%d pse_mod_bprof->chrg_term_mA =%d bat_prop.voltage_now=%d full_cond=%d",
+		__func__, bat_prop.current_now, (pse_mod_bprof->chrg_term_mA),
+		bat_prop.voltage_now * 100, (FULL_CV_MIN * cv));
+
+	for (i = (MAX_CUR_VOLT_SAMPLES - 1); i >= 0; --i) {
+
+		if (!(__is_battery_full(bat_prop.voltage_now_cache[i],
+				bat_prop.current_now_cache[i],
+				pse_mod_bprof->chrg_term_mA, cv)))
+			return false;
+	}
+
+	return true;
+}
+
+static int  pse_get_bat_thresholds(struct ps_batt_chg_prof  bprof,
+			struct psy_batt_thresholds *bat_thresh)
+{
+	struct ps_pse_mod_prof *pse_mod_bprof =
+			(struct ps_pse_mod_prof *) bprof.batt_prof;
+
+	if ((bprof.chrg_prof_type != PSE_MOD_CHRG_PROF) || (!pse_mod_bprof))
+		return -EINVAL;
+
+	bat_thresh->iterm = pse_mod_bprof->chrg_term_mA;
+	bat_thresh->temp_min = pse_mod_bprof->temp_low_lim;
+	bat_thresh->temp_max = pse_mod_bprof->temp_mon_range[0].temp_up_lim;
+
+	return 0;
+}
+
+static enum psy_algo_stat pse_get_next_cc_cv(struct batt_props bat_prop,
+	struct ps_batt_chg_prof  bprof, unsigned long *cc, unsigned long *cv)
+{
+	int tzone;
+	struct ps_pse_mod_prof *pse_mod_bprof =
+			(struct ps_pse_mod_prof *) bprof.batt_prof;
+	enum psy_algo_stat algo_stat = bat_prop.algo_stat;
+	int maint_exit_volt;
+
+	*cc = *cv = 0;
+
+	/* If STATUS is discharging, assume that charger is not connected.
+	*  If charger is not connected, no need to take any action.
+	*  If charge profile type is not PSE_MOD_CHRG_PROF or the charge profile
+	*  is not present, no need to take any action.
+	*/
+
+	pr_devel("%s:battery status = %d algo_status=%d\n",
+			__func__, bat_prop.status, algo_stat);
+
+	if ((bprof.chrg_prof_type != PSE_MOD_CHRG_PROF) || (!pse_mod_bprof))
+		return PSY_ALGO_STAT_NOT_CHARGE;
+
+	tzone = get_tempzone(pse_mod_bprof, bat_prop.temperature);
+
+	if (tzone < 0)
+		return PSY_ALGO_STAT_NOT_CHARGE;
+
+	/* Change the algo status to not charging, if battery is
+	*  not really charging or less than maintenance exit threshold.
+	*  This way algorithm can switch to normal
+	*  charging if current status is full/maintenace
+	*/
+	maint_exit_volt = pse_mod_bprof->
+			temp_mon_range[tzone].maint_chrg_vol_ll -
+				MAINT_EXIT_OFFSET;
+
+	if ((bat_prop.status == POWER_SUPPLY_STATUS_DISCHARGING) ||
+		(bat_prop.status == POWER_SUPPLY_STATUS_NOT_CHARGING) ||
+			bat_prop.voltage_now < maint_exit_volt) {
+
+		algo_stat = PSY_ALGO_STAT_NOT_CHARGE;
+
+	}
+
+	/* read cc and cv based on temperature and algorithm status*/
+	if (algo_stat == PSY_ALGO_STAT_FULL ||
+			algo_stat == PSY_ALGO_STAT_MAINT) {
+
+		/* if status is full and voltage is lower than maintenance lower
+		*  threshold change status to maintenenance
+		*/
+
+		if (algo_stat == PSY_ALGO_STAT_FULL && (bat_prop.voltage_now <=
+			pse_mod_bprof->temp_mon_range[tzone].maint_chrg_vol_ll))
+				algo_stat = PSY_ALGO_STAT_MAINT;
+
+		/* Read maintenance CC and CV */
+		if (algo_stat == PSY_ALGO_STAT_MAINT) {
+			*cv = pse_mod_bprof->temp_mon_range
+					[tzone].maint_chrg_vol_ul;
+			*cc = pse_mod_bprof->temp_mon_range
+					[tzone].maint_chrg_cur;
+		}
+	} else {
+		*cv = pse_mod_bprof->temp_mon_range[tzone].full_chrg_vol;
+		*cc = pse_mod_bprof->temp_mon_range[tzone].full_chrg_cur;
+		algo_stat = PSY_ALGO_STAT_CHARGE;
+	}
+
+	if (is_battery_full(bat_prop, pse_mod_bprof, *cv)) {
+		*cc = *cv = 0;
+		algo_stat = PSY_ALGO_STAT_FULL;
+	}
+
+	return algo_stat;
+}
+
+static int __init pse_algo_init(void)
+{
+	struct charging_algo pse_algo;
+	pse_algo.chrg_prof_type = PSE_MOD_CHRG_PROF;
+	pse_algo.name = "pse_algo";
+	pse_algo.get_next_cc_cv = pse_get_next_cc_cv;
+	pse_algo.get_batt_thresholds = pse_get_bat_thresholds;
+	power_supply_register_charging_algo(&pse_algo);
+	return 0;
+}
+
+module_init(pse_algo_init);
diff --git a/drivers/power/pmic_ccsm.c b/drivers/power/pmic_ccsm.c
new file mode 100644
index 0000000..c7c9954
--- /dev/null
+++ b/drivers/power/pmic_ccsm.c
@@ -0,0 +1,2001 @@
+/*
+ * pmic_ccsm.c - Intel MID PMIC Charger Driver
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ * Author: Yegnesh Iyer <yegnesh.s.iyer@intel.com>
+ */
+
+/* Includes */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/param.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/usb/otg.h>
+#include <linux/power_supply.h>
+#include <linux/power_supply.h>
+#include <linux/rpmsg.h>
+#include <linux/version.h>
+#include <asm/intel_basincove_gpadc.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
+#include <linux/iio/consumer.h>
+#else
+#include "../../../kernel/drivers/staging/iio/consumer.h"
+#endif
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_mid_remoteproc.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/sfi.h>
+#include <linux/async.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <linux/power/battery_id.h>
+#include "pmic_ccsm.h"
+
+/* Macros */
+#define DRIVER_NAME "pmic_ccsm"
+#define PMIC_SRAM_INTR_ADDR 0xFFFFF616
+#define ADC_TO_TEMP 1
+#define TEMP_TO_ADC 0
+#define is_valid_temp(tmp)\
+	(!(tmp > chc.pdata->adc_tbl[0].temp ||\
+	tmp < chc.pdata->adc_tbl[chc.pdata->max_tbl_row_cnt - 1].temp))
+#define is_valid_adc_code(val)\
+	(!(val < chc.pdata->adc_tbl[0].adc_val ||\
+	val > chc.pdata->adc_tbl[chc.pdata->max_tbl_row_cnt - 1].adc_val))
+#define CONVERT_ADC_TO_TEMP(adc_val, temp)\
+	adc_temp_conv(adc_val, temp, ADC_TO_TEMP)
+#define CONVERT_TEMP_TO_ADC(temp, adc_val)\
+	adc_temp_conv(temp, adc_val, TEMP_TO_ADC)
+#define NEED_ZONE_SPLIT(bprof)\
+	 ((bprof->temp_mon_ranges < MIN_BATT_PROF))
+
+#define USB_WAKE_LOCK_TIMEOUT	(5 * HZ)
+
+/* 100mA value definition for setting the inlimit in bq24261 */
+#define USBINPUTICC100VAL	100
+
+/* Type definitions */
+static void pmic_bat_zone_changed(void);
+static void pmic_battery_overheat_handler(bool);
+
+/* Extern definitions */
+
+/* Global declarations */
+static DEFINE_MUTEX(pmic_lock);
+static struct pmic_chrgr_drv_context chc;
+static struct interrupt_info chgrirq0_info[] = {
+	{
+		CHGIRQ0_BZIRQ_MASK,
+		0,
+		"Battery temperature zone changed",
+		NULL,
+		NULL,
+		pmic_bat_zone_changed,
+		NULL,
+	},
+	{
+		CHGIRQ0_BAT_CRIT_MASK,
+		SCHGIRQ0_SBAT_CRIT_MASK,
+		NULL,
+		"Battery Over heat exception",
+		"Battery Over heat exception Recovered",
+		NULL,
+		pmic_battery_overheat_handler
+	},
+	{
+		CHGIRQ0_BAT0_ALRT_MASK,
+		SCHGIRQ0_SBAT0_ALRT_MASK,
+		NULL,
+		"Battery0 temperature inside boundary",
+		"Battery0 temperature outside boundary",
+		NULL,
+		pmic_battery_overheat_handler
+	},
+	{
+		CHGIRQ0_BAT1_ALRT_MASK,
+		SCHGIRQ0_SBAT1_ALRT_MASK,
+		NULL,
+		"Battery1 temperature inside boundary",
+		"Battery1 temperature outside boundary",
+		NULL,
+		NULL
+	},
+};
+
+u16 pmic_inlmt[][2] = {
+	{ 100, CHGRCTRL1_FUSB_INLMT_100},
+	{ 150, CHGRCTRL1_FUSB_INLMT_150},
+	{ 500, CHGRCTRL1_FUSB_INLMT_500},
+	{ 900, CHGRCTRL1_FUSB_INLMT_900},
+	{ 1500, CHGRCTRL1_FUSB_INLMT_1500},
+};
+
+static inline struct power_supply *get_psy_battery(void)
+{
+	struct class_dev_iter iter;
+	struct device *dev;
+	struct power_supply *pst;
+
+	class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		pst = (struct power_supply *)dev_get_drvdata(dev);
+		if (pst->type == POWER_SUPPLY_TYPE_BATTERY) {
+			class_dev_iter_exit(&iter);
+			return pst;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	return NULL;
+}
+
+
+/* Function definitions */
+static void lookup_regval(u16 tbl[][2], size_t size, u16 in_val, u8 *out_val)
+{
+	int i;
+	for (i = 1; i < size; ++i)
+		if (in_val < tbl[i][0])
+			break;
+
+	*out_val = (u8)tbl[i-1][1];
+}
+
+static int interpolate_y(int dx1x0, int dy1y0, int dxx0, int y0)
+{
+	return y0 + DIV_ROUND_CLOSEST((dxx0 * dy1y0), dx1x0);
+}
+
+static int interpolate_x(int dy1y0, int dx1x0, int dyy0, int x0)
+{
+	return x0 + DIV_ROUND_CLOSEST((dyy0 * dx1x0), dy1y0);
+}
+
+static int adc_temp_conv(int in_val, int *out_val, int conv)
+{
+	int tbl_row_cnt, i;
+	struct temp_lookup *adc_temp_tbl;
+
+	if (!chc.pdata) {
+		dev_err(chc.dev, "ADC-lookup table not yet available\n");
+		return -ERANGE;
+	}
+
+	tbl_row_cnt = chc.pdata->max_tbl_row_cnt;
+	adc_temp_tbl = chc.pdata->adc_tbl;
+
+	if (conv == ADC_TO_TEMP) {
+		if (!is_valid_adc_code(in_val))
+			return -ERANGE;
+
+		if (in_val == adc_temp_tbl[tbl_row_cnt-1].adc_val)
+			i = tbl_row_cnt - 1;
+		else {
+			for (i = 0; i < tbl_row_cnt; ++i)
+				if (in_val < adc_temp_tbl[i].adc_val)
+					break;
+		}
+
+		*out_val =
+		    interpolate_y((adc_temp_tbl[i].adc_val
+					- adc_temp_tbl[i - 1].adc_val),
+				  (adc_temp_tbl[i].temp
+				   - adc_temp_tbl[i - 1].temp),
+				  (in_val - adc_temp_tbl[i - 1].adc_val),
+				  adc_temp_tbl[i - 1].temp);
+	} else {
+		if (!is_valid_temp(in_val))
+			return -ERANGE;
+
+		if (in_val == adc_temp_tbl[tbl_row_cnt-1].temp)
+			i = tbl_row_cnt - 1;
+		else {
+			for (i = 0; i < tbl_row_cnt; ++i)
+				if (in_val > adc_temp_tbl[i].temp)
+					break;
+		}
+
+		*((short int *)out_val) =
+		    interpolate_x((adc_temp_tbl[i].temp
+					- adc_temp_tbl[i - 1].temp),
+				  (adc_temp_tbl[i].adc_val
+				   - adc_temp_tbl[i - 1].adc_val),
+				  (in_val - adc_temp_tbl[i - 1].temp),
+				  adc_temp_tbl[i - 1].adc_val);
+	}
+	return 0;
+}
+
+static int pmic_write_reg(u16 addr, u8 *val)
+{
+	int ret;
+
+	ret = intel_scu_ipc_iowrite8(addr, *val);
+	if (ret) {
+		dev_err(chc.dev,
+			"Error in intel_scu_ipc_ioread8 0x%.4x\n", addr);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int pmic_read_reg(u16 addr, u8 *val)
+{
+	int ret;
+
+	ret = intel_scu_ipc_ioread8(addr, val);
+	if (ret) {
+		dev_err(chc.dev,
+			"Error in intel_scu_ipc_ioread8 0x%.4x\n", addr);
+		return -EIO;
+	}
+	return 0;
+}
+
+
+static int __pmic_write_tt(u8 addr, u8 data)
+{
+	int ret;
+
+	ret = intel_scu_ipc_iowrite8(CHRTTADDR_ADDR, addr);
+	if (unlikely(ret))
+		return ret;
+
+	return intel_scu_ipc_iowrite8(CHRTTDATA_ADDR, data);
+}
+
+static inline int pmic_write_tt(u8 addr, u8 data)
+{
+	int ret;
+
+	mutex_lock(&pmic_lock);
+	ret = __pmic_write_tt(addr, data);
+	mutex_unlock(&pmic_lock);
+
+	/* If access is blocked return success to avoid additional
+	*  error handling at client side
+	*/
+	if (ret == -EACCES) {
+		dev_warn(chc.dev, "IPC write blocked due to unsigned kernel/invalid battery\n");
+		ret = 0;
+	}
+	return ret;
+}
+
+static int __pmic_read_tt(u8 addr, u8 *data)
+{
+	int ret;
+
+	ret = intel_scu_ipc_iowrite8(CHRTTADDR_ADDR, addr);
+	if (ret)
+		return ret;
+
+	usleep_range(2000, 3000);
+
+	return intel_scu_ipc_ioread8(CHRTTDATA_ADDR, data);
+}
+
+static inline int pmic_read_tt(u8 addr, u8 *data)
+{
+	int ret;
+
+	mutex_lock(&pmic_lock);
+	ret = __pmic_read_tt(addr, data);
+	mutex_unlock(&pmic_lock);
+
+	return ret;
+}
+
+static int pmic_update_tt(u8 addr, u8 mask, u8 data)
+{
+	u8 tdata;
+	int ret;
+
+	mutex_lock(&pmic_lock);
+	ret = __pmic_read_tt(addr, &tdata);
+	if (unlikely(ret))
+		goto exit;
+
+	tdata = (tdata & ~mask) | (data & mask);
+	ret = __pmic_write_tt(addr, tdata);
+exit:
+	mutex_unlock(&pmic_lock);
+	return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int pmic_chrgr_reg_show(struct seq_file *seq, void *unused)
+{
+	int ret;
+	u16 addr;
+	u16 val1;
+	u8 val;
+
+	addr = *((u8 *)seq->private);
+
+	if (addr == CHRGRIRQ1_ADDR) {
+		val1 = ioread16(chc.pmic_intr_iomap);
+		val = (u8)(val1 >> 8);
+	} else if (addr == CHGRIRQ0_ADDR) {
+		val1 = ioread16(chc.pmic_intr_iomap);
+		val = (u8)val1;
+	} else {
+		ret = pmic_read_reg(addr, &val);
+		if (ret != 0) {
+			dev_err(chc.dev,
+				"Error reading tt register 0x%2x\n",
+				addr);
+			return -EIO;
+		}
+	}
+
+	seq_printf(seq, "0x%x\n", val);
+	return 0;
+}
+
+static int pmic_chrgr_tt_reg_show(struct seq_file *seq, void *unused)
+{
+	int ret;
+	u8 addr;
+	u8 val;
+
+	addr = *((u8 *)seq->private);
+
+	ret = pmic_read_tt(addr, &val);
+	if (ret != 0) {
+		dev_err(chc.dev,
+			"Error reading tt register 0x%2x\n",
+			addr);
+		return -EIO;
+	}
+
+	seq_printf(seq, "0x%x\n", val);
+	return 0;
+}
+
+static int pmic_chrgr_tt_reg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmic_chrgr_tt_reg_show, inode->i_private);
+}
+
+static int pmic_chrgr_reg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmic_chrgr_reg_show, inode->i_private);
+}
+
+static struct dentry *charger_debug_dir;
+static struct pmic_regs_def pmic_regs_bc[] = {
+	PMIC_REG_DEF(PMIC_ID_ADDR),
+	PMIC_REG_DEF(IRQLVL1_ADDR),
+	PMIC_REG_DEF(IRQLVL1_MASK_ADDR),
+	PMIC_REG_DEF(CHGRIRQ0_ADDR),
+	PMIC_REG_DEF(SCHGRIRQ0_ADDR),
+	PMIC_REG_DEF(MCHGRIRQ0_ADDR),
+	PMIC_REG_DEF(LOWBATTDET0_ADDR),
+	PMIC_REG_DEF(LOWBATTDET1_ADDR),
+	PMIC_REG_DEF(BATTDETCTRL_ADDR),
+	PMIC_REG_DEF(VBUSDETCTRL_ADDR),
+	PMIC_REG_DEF(VDCINDETCTRL_ADDR),
+	PMIC_REG_DEF(CHRGRIRQ1_ADDR),
+	PMIC_REG_DEF(SCHGRIRQ1_ADDR),
+	PMIC_REG_DEF(MCHGRIRQ1_ADDR),
+	PMIC_REG_DEF(CHGRCTRL0_ADDR),
+	PMIC_REG_DEF(CHGRCTRL1_ADDR),
+	PMIC_REG_DEF(CHGRSTATUS_ADDR),
+	PMIC_REG_DEF(USBIDCTRL_ADDR),
+	PMIC_REG_DEF(USBIDSTAT_ADDR),
+	PMIC_REG_DEF(WAKESRC_ADDR),
+	PMIC_REG_DEF(THRMBATZONE_ADDR_BC),
+	PMIC_REG_DEF(THRMZN0L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN0H_ADDR_BC),
+	PMIC_REG_DEF(THRMZN1L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN1H_ADDR_BC),
+	PMIC_REG_DEF(THRMZN2L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN2H_ADDR_BC),
+	PMIC_REG_DEF(THRMZN3L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN3H_ADDR_BC),
+	PMIC_REG_DEF(THRMZN4L_ADDR_BC),
+	PMIC_REG_DEF(THRMZN4H_ADDR_BC),
+	PMIC_REG_DEF(VBATRSLTH_ADDR_BC),
+	PMIC_REG_DEF(VBATRSLTL_ADDR_BC),
+};
+
+static struct pmic_regs_def pmic_regs_sc[] = {
+	PMIC_REG_DEF(PMIC_ID_ADDR),
+	PMIC_REG_DEF(IRQLVL1_ADDR),
+	PMIC_REG_DEF(IRQLVL1_MASK_ADDR),
+	PMIC_REG_DEF(CHGRIRQ0_ADDR),
+	PMIC_REG_DEF(SCHGRIRQ0_ADDR),
+	PMIC_REG_DEF(MCHGRIRQ0_ADDR),
+	PMIC_REG_DEF(LOWBATTDET0_ADDR),
+	PMIC_REG_DEF(LOWBATTDET1_ADDR),
+	PMIC_REG_DEF(BATTDETCTRL_ADDR),
+	PMIC_REG_DEF(VBUSDETCTRL_ADDR),
+	PMIC_REG_DEF(VDCINDETCTRL_ADDR),
+	PMIC_REG_DEF(CHRGRIRQ1_ADDR),
+	PMIC_REG_DEF(SCHGRIRQ1_ADDR),
+	PMIC_REG_DEF(MCHGRIRQ1_ADDR),
+	PMIC_REG_DEF(CHGRCTRL0_ADDR),
+	PMIC_REG_DEF(CHGRCTRL1_ADDR),
+	PMIC_REG_DEF(CHGRSTATUS_ADDR),
+	PMIC_REG_DEF(USBIDCTRL_ADDR),
+	PMIC_REG_DEF(USBIDSTAT_ADDR),
+	PMIC_REG_DEF(WAKESRC_ADDR),
+	PMIC_REG_DEF(USBPATH_ADDR),
+	PMIC_REG_DEF(USBSRCDETSTATUS_ADDR),
+	PMIC_REG_DEF(THRMBATZONE_ADDR_SC),
+	PMIC_REG_DEF(THRMZN0L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN0H_ADDR_SC),
+	PMIC_REG_DEF(THRMZN1L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN1H_ADDR_SC),
+	PMIC_REG_DEF(THRMZN2L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN2H_ADDR_SC),
+	PMIC_REG_DEF(THRMZN3L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN3H_ADDR_SC),
+	PMIC_REG_DEF(THRMZN4L_ADDR_SC),
+	PMIC_REG_DEF(THRMZN4H_ADDR_SC),
+};
+
+static struct pmic_regs_def pmic_tt_regs[] = {
+	PMIC_REG_DEF(TT_I2CDADDR_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT0OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT1OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT2OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT3OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT4OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT5OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT6OS_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT7OS_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICCOS_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICCMASK_ADDR),
+	PMIC_REG_DEF(TT_CHRCVOS_ADDR),
+	PMIC_REG_DEF(TT_CHRCVMASK_ADDR),
+	PMIC_REG_DEF(TT_CHRCCOS_ADDR),
+	PMIC_REG_DEF(TT_CHRCCMASK_ADDR),
+	PMIC_REG_DEF(TT_LOWCHROS_ADDR),
+	PMIC_REG_DEF(TT_LOWCHRMASK_ADDR),
+	PMIC_REG_DEF(TT_WDOGRSTOS_ADDR),
+	PMIC_REG_DEF(TT_WDOGRSTMASK_ADDR),
+	PMIC_REG_DEF(TT_CHGRENOS_ADDR),
+	PMIC_REG_DEF(TT_CHGRENMASK_ADDR),
+	PMIC_REG_DEF(TT_CUSTOMFIELDEN_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT0VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT1VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT2VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT3VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT4VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT5VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT6VAL_ADDR),
+	PMIC_REG_DEF(TT_CHGRINIT7VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC100VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC150VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC500VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC900VAL_ADDR),
+	PMIC_REG_DEF(TT_USBINPUTICC1500VAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVEMRGLOWVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVCOLDVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVCOOLVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVWARMVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVHOTVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCVEMRGHIVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCEMRGLOWVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCCOLDVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCCOOLVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCWARMVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCHOTVAL_ADDR),
+	PMIC_REG_DEF(TT_CHRCCEMRGHIVAL_ADDR),
+	PMIC_REG_DEF(TT_LOWCHRENVAL_ADDR),
+	PMIC_REG_DEF(TT_LOWCHRDISVAL_ADDR),
+};
+
+void dump_pmic_regs(void)
+{
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+	u32 pmic_reg_cnt = 0;
+	u32 reg_index;
+	u8 data;
+	int retval;
+	struct pmic_regs_def *pmic_regs = NULL;
+
+	if (vendor_id == BASINCOVE_VENDORID) {
+		pmic_reg_cnt = ARRAY_SIZE(pmic_regs_bc);
+		pmic_regs = pmic_regs_bc;
+	} else if (vendor_id == SHADYCOVE_VENDORID) {
+		pmic_reg_cnt = ARRAY_SIZE(pmic_regs_sc);
+		pmic_regs = pmic_regs_sc;
+	}
+
+	dev_info(chc.dev, "PMIC Register dump\n");
+	dev_info(chc.dev, "====================\n");
+
+	for (reg_index = 0; reg_index < pmic_reg_cnt; reg_index++) {
+
+		retval = intel_scu_ipc_ioread8(pmic_regs[reg_index].addr,
+				&data);
+		if (retval)
+			dev_err(chc.dev, "Error in reading %x\n",
+				pmic_regs[reg_index].addr);
+		else
+			dev_info(chc.dev, "0x%x=0x%x\n",
+				pmic_regs[reg_index].addr, data);
+	}
+	dev_info(chc.dev, "====================\n");
+}
+
+void dump_pmic_tt_regs(void)
+{
+	u32 pmic_tt_reg_cnt = ARRAY_SIZE(pmic_tt_regs);
+	u32 reg_index;
+	u8 data;
+	int retval;
+
+	dev_info(chc.dev, "PMIC CHRGR TT dump\n");
+	dev_info(chc.dev, "====================\n");
+
+	for (reg_index = 0; reg_index < pmic_tt_reg_cnt; reg_index++) {
+
+		retval = pmic_read_tt(pmic_tt_regs[reg_index].addr, &data);
+		if (retval)
+			dev_err(chc.dev, "Error in reading %x\n",
+				pmic_tt_regs[reg_index].addr);
+		else
+			dev_info(chc.dev, "0x%x=0x%x\n",
+				pmic_tt_regs[reg_index].addr, data);
+	}
+
+	dev_info(chc.dev, "====================\n");
+}
+static const struct file_operations pmic_chrgr_reg_fops = {
+	.open = pmic_chrgr_reg_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static const struct file_operations pmic_chrgr_tt_reg_fops = {
+	.open = pmic_chrgr_tt_reg_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release
+};
+
+static void pmic_debugfs_init(void)
+{
+	struct dentry *fentry;
+	struct dentry *pmic_regs_dir;
+	struct dentry *pmic_tt_regs_dir;
+
+	u32 reg_index;
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+	u32 pmic_reg_cnt = 0;
+	u32 pmic_tt_reg_cnt = ARRAY_SIZE(pmic_tt_regs);
+	char name[PMIC_REG_NAME_LEN] = {0};
+	struct pmic_regs_def *pmic_regs = NULL;
+
+	if (vendor_id == BASINCOVE_VENDORID) {
+		pmic_reg_cnt = ARRAY_SIZE(pmic_regs_bc);
+		pmic_regs = pmic_regs_bc;
+	} else if (vendor_id == SHADYCOVE_VENDORID) {
+		pmic_reg_cnt = ARRAY_SIZE(pmic_regs_sc);
+		pmic_regs = pmic_regs_sc;
+	}
+
+	/* Creating a directory under debug fs for charger */
+	charger_debug_dir = debugfs_create_dir(DRIVER_NAME , NULL) ;
+	if (charger_debug_dir == NULL)
+		goto debugfs_root_exit;
+
+	/* Create a directory for pmic charger registers */
+	pmic_regs_dir = debugfs_create_dir("pmic_ccsm_regs",
+			charger_debug_dir);
+
+	if (pmic_regs_dir == NULL)
+		goto debugfs_err_exit;
+
+	for (reg_index = 0; reg_index < pmic_reg_cnt; reg_index++) {
+
+		sprintf(name, "%s",
+			pmic_regs[reg_index].reg_name);
+
+		fentry = debugfs_create_file(name,
+				S_IRUGO,
+				pmic_regs_dir,
+				&pmic_regs[reg_index].addr,
+				&pmic_chrgr_reg_fops);
+
+		if (fentry == NULL)
+			goto debugfs_err_exit;
+	}
+
+	/* Create a directory for pmic tt charger registers */
+	pmic_tt_regs_dir = debugfs_create_dir("pmic_ccsm_tt_regs",
+			charger_debug_dir);
+
+	if (pmic_tt_regs_dir == NULL)
+		goto debugfs_err_exit;
+
+	for (reg_index = 0; reg_index < pmic_tt_reg_cnt; reg_index++) {
+
+		sprintf(name, "%s", pmic_tt_regs[reg_index].reg_name);
+
+		fentry = debugfs_create_file(name,
+				S_IRUGO,
+				pmic_tt_regs_dir,
+				&pmic_tt_regs[reg_index].addr,
+				&pmic_chrgr_tt_reg_fops);
+
+		if (fentry == NULL)
+			goto debugfs_err_exit;
+	}
+
+	dev_dbg(chc.dev, "Debugfs created successfully!!");
+	return;
+
+debugfs_err_exit:
+	debugfs_remove_recursive(charger_debug_dir);
+debugfs_root_exit:
+	dev_err(chc.dev, "Error creating debugfs entry!!");
+	return;
+}
+
+static void pmic_debugfs_exit(void)
+{
+	if (charger_debug_dir != NULL)
+		debugfs_remove_recursive(charger_debug_dir);
+}
+#endif
+
+static void pmic_bat_zone_changed(void)
+{
+	int retval;
+	int cur_zone;
+	u16 addr = 0;
+	u8 data = 0;
+	struct power_supply *psy_bat;
+	int vendor_id;
+
+	vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+	if (vendor_id == BASINCOVE_VENDORID)
+		addr = THRMBATZONE_ADDR_BC;
+	else if (vendor_id == SHADYCOVE_VENDORID)
+		addr = THRMBATZONE_ADDR_SC;
+
+	retval = intel_scu_ipc_ioread8(addr, &data);
+	if (retval) {
+		dev_err(chc.dev, "Error in reading battery zone\n");
+		return;
+	}
+
+	cur_zone = data & THRMBATZONE_MASK;
+	dev_info(chc.dev, "Battery Zone changed. Current zone is %d\n",
+			(data & THRMBATZONE_MASK));
+
+	/* if current zone is the top and bottom zones then report OVERHEAT
+	 */
+	if ((cur_zone == PMIC_BZONE_LOW) || (cur_zone == PMIC_BZONE_HIGH))
+		chc.health = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else
+		chc.health = POWER_SUPPLY_HEALTH_GOOD;
+
+	psy_bat = get_psy_battery();
+
+	if (psy_bat && psy_bat->external_power_changed)
+		psy_bat->external_power_changed(psy_bat);
+
+	return;
+}
+
+static void pmic_battery_overheat_handler(bool stat)
+{
+	if (stat)
+		chc.health = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else
+		chc.health = POWER_SUPPLY_HEALTH_GOOD;
+	return;
+}
+
+int pmic_get_health(void)
+{
+	return chc.health;
+}
+
+int pmic_enable_vbus(bool enable)
+{
+	int ret;
+	int vendor_id;
+
+	vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+	if (enable) {
+		ret = intel_scu_ipc_update_register(CHGRCTRL0_ADDR,
+				WDT_NOKICK_ENABLE, CHGRCTRL0_WDT_NOKICK_MASK);
+		if (ret)
+			return ret;
+
+		if (vendor_id == SHADYCOVE_VENDORID)
+			ret = intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+					CHGRCTRL1_OTGMODE_MASK,
+					CHGRCTRL1_OTGMODE_MASK);
+	} else {
+		ret = intel_scu_ipc_update_register(CHGRCTRL0_ADDR,
+				WDT_NOKICK_DISABLE, CHGRCTRL0_WDT_NOKICK_MASK);
+		if (ret)
+			return ret;
+
+		if (vendor_id == SHADYCOVE_VENDORID)
+			ret = intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+					0x0, CHGRCTRL1_OTGMODE_MASK);
+	}
+
+	/* If access is blocked return success to avoid additional
+	*  error handling at client side
+	*/
+	if (ret == -EACCES) {
+		dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+		ret = 0;
+	}
+
+	return ret;
+}
+
+int pmic_enable_charging(bool enable)
+{
+	int ret;
+	u8 val;
+
+	if (enable) {
+		ret = intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+			CHGRCTRL1_FTEMP_EVENT_MASK, CHGRCTRL1_FTEMP_EVENT_MASK);
+		if (ret)
+			return ret;
+	}
+
+	val = (enable) ? 0 : EXTCHRDIS_ENABLE;
+
+	ret = intel_scu_ipc_update_register(CHGRCTRL0_ADDR,
+			val, CHGRCTRL0_EXTCHRDIS_MASK);
+	/* If access is blocked return success to avoid additional
+	*  error handling at client side
+	*/
+	if (ret == -EACCES) {
+		dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static inline int update_zone_cc(int zone, u8 reg_val)
+{
+	u8 addr_cc = TT_CHRCCHOTVAL_ADDR - zone;
+	dev_dbg(chc.dev, "%s:%X=%X\n", __func__, addr_cc, reg_val);
+	return pmic_write_tt(addr_cc, reg_val);
+}
+
+static inline int update_zone_cv(int zone, u8 reg_val)
+{
+	u8 addr_cv = TT_CHRCVHOTVAL_ADDR - zone;
+	dev_dbg(chc.dev, "%s:%X=%X\n", __func__, addr_cv, reg_val);
+	return pmic_write_tt(addr_cv, reg_val);
+}
+
+static inline int update_zone_temp(int zone, u16 adc_val)
+{
+	int ret;
+	u16 addr_tzone;
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+	if (vendor_id == BASINCOVE_VENDORID)
+		addr_tzone = THRMZN4H_ADDR_BC - (2 * zone);
+	else if (vendor_id == SHADYCOVE_VENDORID) {
+		/* to take care of address-discontinuity of zone-registers */
+		int offset_zone = zone;
+		if (zone >= 3)
+			offset_zone += 1;
+
+		addr_tzone = THRMZN4H_ADDR_SC - (2 * offset_zone);
+	} else {
+		dev_err(chc.dev, "%s: invalid vendor id %X\n", __func__, vendor_id);
+		return -EINVAL;
+	}
+
+	ret = intel_scu_ipc_iowrite8(addr_tzone, (u8)(adc_val >> 8));
+	if (unlikely(ret))
+		return ret;
+	dev_dbg(chc.dev, "%s:%X:%X=%X\n", __func__, addr_tzone,
+				(addr_tzone+1), adc_val);
+
+	return intel_scu_ipc_iowrite8(addr_tzone+1, (u8)(adc_val & 0xFF));
+}
+
+int pmic_set_cc(int new_cc)
+{
+	struct ps_pse_mod_prof *bcprof = chc.actual_bcprof;
+	struct ps_pse_mod_prof *r_bcprof = chc.runtime_bcprof;
+	int temp_mon_ranges;
+	int new_cc1;
+	int ret;
+	int i;
+	u8 reg_val = 0;
+
+	/* No need to write PMIC if CC = 0 */
+	if (!new_cc)
+		return 0;
+
+	temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+			BATT_TEMP_NR_RNG);
+
+	for (i = 0; i < temp_mon_ranges; ++i) {
+		new_cc1 = min_t(int, new_cc,
+				bcprof->temp_mon_range[i].full_chrg_cur);
+
+		if (new_cc1 != r_bcprof->temp_mon_range[i].full_chrg_cur) {
+			if (chc.pdata->cc_to_reg) {
+				chc.pdata->cc_to_reg(new_cc1, &reg_val);
+				ret = update_zone_cc(i, reg_val);
+				if (unlikely(ret))
+					return ret;
+			}
+			r_bcprof->temp_mon_range[i].full_chrg_cur = new_cc1;
+		}
+	}
+
+	/* send the new CC and CV */
+	intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+		CHGRCTRL1_FTEMP_EVENT_MASK, CHGRCTRL1_FTEMP_EVENT_MASK);
+
+	return 0;
+}
+
+int pmic_set_cv(int new_cv)
+{
+	struct ps_pse_mod_prof *bcprof = chc.actual_bcprof;
+	struct ps_pse_mod_prof *r_bcprof = chc.runtime_bcprof;
+	int temp_mon_ranges;
+	int new_cv1;
+	int ret;
+	int i;
+	u8 reg_val = 0;
+
+	/* No need to write PMIC if CV = 0 */
+	if (!new_cv)
+		return 0;
+
+	temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+			BATT_TEMP_NR_RNG);
+
+	for (i = 0; i < temp_mon_ranges; ++i) {
+		new_cv1 = min_t(int, new_cv,
+				bcprof->temp_mon_range[i].full_chrg_vol);
+
+		if (new_cv1 != r_bcprof->temp_mon_range[i].full_chrg_vol) {
+			if (chc.pdata->cv_to_reg) {
+				chc.pdata->cv_to_reg(new_cv1, &reg_val);
+				ret = update_zone_cv(i, reg_val);
+				if (unlikely(ret))
+					return ret;
+			}
+			r_bcprof->temp_mon_range[i].full_chrg_vol = new_cv1;
+		}
+	}
+
+	/* send the new CC and CV */
+	intel_scu_ipc_update_register(CHGRCTRL1_ADDR,
+		CHGRCTRL1_FTEMP_EVENT_MASK, CHGRCTRL1_FTEMP_EVENT_MASK);
+
+	return 0;
+}
+
+int pmic_set_ilimma(int ilim_ma)
+{
+	u8 reg_val;
+	int ret;
+
+	lookup_regval(pmic_inlmt, ARRAY_SIZE(pmic_inlmt),
+			ilim_ma, &reg_val);
+	dev_dbg(chc.dev, "Setting inlmt %d in register %x=%x\n", ilim_ma,
+		CHGRCTRL1_ADDR, reg_val);
+	ret = intel_scu_ipc_iowrite8(CHGRCTRL1_ADDR, reg_val);
+
+	/* If access is blocked return success to avoid additional
+	*  error handling at client side
+	*/
+	if (ret == -EACCES) {
+		dev_warn(chc.dev, "IPC blocked due to unsigned kernel/invalid battery\n");
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/**
+ * pmic_read_adc_val - read ADC value of specified sensors
+ * @channel: channel of the sensor to be sampled
+ * @sensor_val: pointer to the charger property to hold sampled value
+ * @chc :  battery info pointer
+ *
+ * Returns 0 if success
+ */
+static int pmic_read_adc_val(int channel, int *sensor_val,
+			      struct pmic_chrgr_drv_context *chc)
+{
+	int val;
+	int ret;
+	struct iio_channel *indio_chan;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	indio_chan = iio_st_channel_get("BATTEMP", "BATTEMP0");
+#else
+	indio_chan = iio_channel_get(NULL, "BATTEMP0");
+#endif
+	if (IS_ERR_OR_NULL(indio_chan)) {
+		ret = PTR_ERR(indio_chan);
+		goto exit;
+	}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	ret = iio_st_read_channel_raw(indio_chan, &val);
+#else
+	ret = iio_read_channel_raw(indio_chan, &val);
+#endif
+	if (ret) {
+		dev_err(chc->dev, "IIO channel read error\n");
+		goto err_exit;
+	}
+
+	switch (channel) {
+	case GPADC_BATTEMP0:
+		ret = CONVERT_ADC_TO_TEMP(val, sensor_val);
+		break;
+	default:
+		dev_err(chc->dev, "invalid sensor%d", channel);
+		ret = -EINVAL;
+	}
+	dev_dbg(chc->dev, "pmic_ccsm pmic_ccsm.0: %s adc val=%x, %d temp=%d\n",
+		__func__, val, val, *sensor_val);
+
+err_exit:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	iio_st_channel_release(indio_chan);
+#else
+	iio_channel_release(indio_chan);
+#endif
+exit:
+	return ret;
+}
+
+int pmic_get_battery_pack_temp(int *temp)
+{
+	if (chc.invalid_batt)
+		return -ENODEV;
+	return pmic_read_adc_val(GPADC_BATTEMP0, temp, &chc);
+}
+
+int pmic_get_battery_voltage(int *vol)
+{
+	long tmp;
+	int ret;
+	u8 val = GPADCREG_VIBATT_REQ;
+
+	ret = pmic_write_reg(GPADCREQ_ADDR_BC, &val);
+	if (ret != 0) {
+		dev_err(chc.dev,
+			"Error writing GPADCREQ_ADDR_BC-register\n");
+		return -EINVAL;
+	}
+	msleep(200);
+
+	ret = pmic_read_reg(VBATRSLTH_ADDR_BC, &val);
+	if (ret != 0) {
+		dev_err(chc.dev,
+			"Error reading VBATRSLTH_ADDR_BC-register\n");
+		return -EINVAL;
+	}
+	tmp = val * 0x100;
+
+	ret = pmic_read_reg(VBATRSLTL_ADDR_BC, &val);
+	if (ret != 0) {
+		dev_err(chc.dev,
+			"Error reading VBATRSLTL_ADDR_BC-register\n");
+		return -EINVAL;
+	}
+
+	tmp += val;
+	tmp *= GPADC_MAX_VOLTAGE;
+	tmp /= GPADC_MAX_RANGE;
+	*vol = tmp;
+
+	return 0;
+}
+
+static int get_charger_type()
+{
+	int ret, i = 0;
+	u8 val;
+	int chgr_type;
+
+	do {
+		ret = pmic_read_reg(USBSRCDETSTATUS_ADDR, &val);
+		if (ret != 0) {
+			dev_err(chc.dev,
+				"Error reading USBSRCDETSTAT-register 0x%2x\n",
+				USBSRCDETSTATUS_ADDR);
+			return 0;
+		}
+
+		i++;
+		dev_info(chc.dev, "Read USBSRCDETSTATUS val: %x\n", val);
+
+		if (val & USBSRCDET_SUSBHWDET_DETSUCC)
+			break;
+		else
+			msleep(USBSRCDET_SLEEP_TIME);
+	} while (i < USBSRCDET_RETRY_CNT);
+
+	if (!(val & USBSRCDET_SUSBHWDET_DETSUCC)) {
+		dev_err(chc.dev, "Charger detection unsuccessful after %dms\n",
+			i * USBSRCDET_SLEEP_TIME);
+		return 0;
+	}
+
+	chgr_type = (val & USBSRCDET_USBSRCRSLT_MASK) >> 2;
+	dev_info(chc.dev, "Charger type after detection complete: %d\n",
+			(val & USBSRCDET_USBSRCRSLT_MASK) >> 2);
+
+	switch (chgr_type) {
+	case PMIC_CHARGER_TYPE_SDP:
+		return POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+	case PMIC_CHARGER_TYPE_DCP:
+		return POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+	case PMIC_CHARGER_TYPE_CDP:
+		return POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+	case PMIC_CHARGER_TYPE_ACA:
+		return POWER_SUPPLY_CHARGER_TYPE_USB_ACA;
+	case PMIC_CHARGER_TYPE_SE1:
+		return POWER_SUPPLY_CHARGER_TYPE_SE1;
+	case PMIC_CHARGER_TYPE_MHL:
+		return POWER_SUPPLY_CHARGER_TYPE_MHL;
+	default:
+		return POWER_SUPPLY_CHARGER_TYPE_NONE;
+	}
+}
+
+static void handle_internal_usbphy_notifications(int mask)
+{
+	struct power_supply_cable_props cap;
+
+	if (mask) {
+		cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+		cap.chrg_type = get_charger_type();
+		chc.charger_type = cap.chrg_type;
+	} else {
+		cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+		cap.chrg_type = chc.charger_type;
+	}
+
+	if (cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+		cap.ma = 0;
+	else if ((cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP)
+			|| (cap.chrg_type == POWER_SUPPLY_TYPE_USB_CDP)
+			|| (cap.chrg_type == POWER_SUPPLY_CHARGER_TYPE_SE1))
+		cap.ma = 1500;
+
+	dev_info(chc.dev, "Notifying OTG ev:%d, evt:%d, chrg_type:%d, mA:%d\n",
+			USB_EVENT_CHARGER, cap.chrg_evt, cap.chrg_type,
+			cap.ma);
+	atomic_notifier_call_chain(&chc.otg->notifier,
+			USB_EVENT_CHARGER, &cap);
+}
+
+/* ShadyCove-WA for VBUS removal detect issue */
+int pmic_handle_low_supply(void)
+{
+	int ret;
+	u8 val;
+	int vendor_id = chc.pmic_id & PMIC_VENDOR_ID_MASK;
+
+	dev_info(chc.dev, "Low-supply event received from external-charger\n");
+	if (vendor_id == BASINCOVE_VENDORID || !chc.vbus_connect_status) {
+		dev_err(chc.dev, "Ignore Low-supply event received\n");
+		return 0;
+	}
+
+	msleep(50);
+	ret = pmic_read_reg(SCHGRIRQ1_ADDR, &val);
+	if (ret) {
+		dev_err(chc.dev,
+			"Error reading SCHGRIRQ1-register 0x%2x\n",
+			SCHGRIRQ1_ADDR);
+		return ret;
+	}
+
+	if (!(val & SCHRGRIRQ1_SVBUSDET_MASK)) {
+		int mask = 0;
+
+		dev_info(chc.dev, "USB VBUS Removed. Notifying OTG driver\n");
+		chc.vbus_connect_status = false;
+
+		if (chc.is_internal_usb_phy)
+			handle_internal_usbphy_notifications(mask);
+		else
+			atomic_notifier_call_chain(&chc.otg->notifier,
+					USB_EVENT_VBUS, &mask);
+	}
+
+	return ret;
+}
+
+static void handle_level0_interrupt(u8 int_reg, u8 stat_reg,
+				struct interrupt_info int_info[],
+				int int_info_size)
+{
+	int i;
+	bool int_stat;
+	char *log_msg;
+
+	for (i = 0; i < int_info_size; ++i) {
+
+		/*continue if interrupt register bit is not set */
+		if (!(int_reg & int_info[i].int_reg_mask))
+			continue;
+
+		/*log message if interrupt bit is set */
+		if (int_info[i].log_msg_int_reg_true)
+			dev_err(chc.dev, "%s",
+					int_info[i].log_msg_int_reg_true);
+
+		/* interrupt bit is set.call int handler. */
+		if (int_info[i].int_handle)
+			int_info[i].int_handle();
+
+		/* continue if stat_reg_mask is zero which
+		 *  means ignore status register
+		 */
+		if (!(int_info[i].stat_reg_mask))
+			continue;
+
+		dev_dbg(chc.dev,
+				"stat_reg=%X int_info[i].stat_reg_mask=%X",
+				stat_reg, int_info[i].stat_reg_mask);
+
+		/* check if the interrupt status is true */
+		int_stat = (stat_reg & int_info[i].stat_reg_mask);
+
+		/* log message */
+		log_msg = int_stat ? int_info[i].log_msg_stat_true :
+			int_info[i].log_msg_stat_false;
+
+		if (log_msg)
+			dev_err(chc.dev, "%s", log_msg);
+
+		/* call status handler function */
+		if (int_info[i].stat_handle)
+			int_info[i].stat_handle(int_stat);
+
+	}
+
+	return ;
+}
+
+static void handle_level1_interrupt(u8 int_reg, u8 stat_reg)
+{
+	int mask;
+	u8 usb_id_sts;
+	int ret;
+
+	if (!int_reg)
+		return;
+
+	mask = !!(int_reg & stat_reg);
+	if (int_reg & CHRGRIRQ1_SUSBIDDET_MASK) {
+		if (mask)
+			dev_info(chc.dev,
+				"USB ID Detected. Notifying OTG driver\n");
+		else
+			dev_info(chc.dev,
+				"USB ID Removed. Notifying OTG driver\n");
+		atomic_notifier_call_chain(&chc.otg->notifier,
+				USB_EVENT_ID, &mask);
+	}
+
+	if (int_reg & CHRGRIRQ1_SVBUSDET_MASK) {
+		if (mask) {
+			dev_info(chc.dev,
+				"USB VBUS Detected. Notifying OTG driver\n");
+			chc.vbus_connect_status = true;
+		} else {
+			dev_info(chc.dev, "USB VBUS Removed. Notifying OTG driver\n");
+			chc.vbus_connect_status = false;
+		}
+
+		if (chc.is_internal_usb_phy)
+			handle_internal_usbphy_notifications(mask);
+		else
+			atomic_notifier_call_chain(&chc.otg->notifier,
+					USB_EVENT_VBUS, &mask);
+	}
+
+	return;
+}
+static void pmic_event_worker(struct work_struct *work)
+{
+	struct pmic_event *evt, *tmp;
+
+	dev_dbg(chc.dev, "%s\n", __func__);
+
+	mutex_lock(&chc.evt_queue_lock);
+	list_for_each_entry_safe(evt, tmp, &chc.evt_queue, node) {
+		list_del(&evt->node);
+
+		dev_dbg(chc.dev, "CHGRIRQ0=%X SCHGRIRQ0=%X CHGRIRQ1=%x SCHGRIRQ1=%X\n",
+				evt->chgrirq0_int, evt->chgrirq0_stat,
+				evt->chgrirq1_int, evt->chgrirq1_stat);
+		if (evt->chgrirq0_int)
+			handle_level0_interrupt(evt->chgrirq0_int,
+				evt->chgrirq0_stat, chgrirq0_info,
+				ARRAY_SIZE(chgrirq0_info));
+
+		if (evt->chgrirq1_stat)
+			handle_level1_interrupt(evt->chgrirq1_int,
+							evt->chgrirq1_stat);
+		kfree(evt);
+	}
+
+	mutex_unlock(&chc.evt_queue_lock);
+}
+
+static irqreturn_t pmic_isr(int irq, void *data)
+{
+	u16 pmic_intr;
+	u8 chgrirq0_int;
+	u8 chgrirq1_int;
+	u8 mask = ((CHRGRIRQ1_SVBUSDET_MASK) | (CHRGRIRQ1_SUSBIDDET_MASK));
+
+	pmic_intr = ioread16(chc.pmic_intr_iomap);
+	chgrirq0_int = (u8)pmic_intr;
+	chgrirq1_int = (u8)(pmic_intr >> 8);
+
+	if (!chgrirq1_int && !(chgrirq0_int & PMIC_CHRGR_INT0_MASK))
+		return IRQ_NONE;
+
+	dev_dbg(chc.dev, "%s", __func__);
+
+	return IRQ_WAKE_THREAD;
+}
+static irqreturn_t pmic_thread_handler(int id, void *data)
+{
+	u16 pmic_intr;
+	struct pmic_event *evt;
+	int ret;
+
+	evt = kzalloc(sizeof(*evt), GFP_ATOMIC);
+	if (evt == NULL) {
+		dev_dbg(chc.dev, "Error allocating evt structure in fn:%s\n",
+			__func__);
+		return IRQ_NONE;
+	}
+
+	pmic_intr = ioread16(chc.pmic_intr_iomap);
+	evt->chgrirq0_int = (u8)pmic_intr;
+	evt->chgrirq1_int = (u8)(pmic_intr >> 8);
+	dev_dbg(chc.dev, "irq0=%x irq1=%x\n",
+		evt->chgrirq0_int, evt->chgrirq1_int);
+
+	/*
+	In case this is an external charger interrupt, we are
+	clearing the level 1 irq register and let external charger
+	driver handle the interrupt.
+	 */
+
+	if (!(evt->chgrirq1_int) &&
+		!(evt->chgrirq0_int & PMIC_CHRGR_CCSM_INT0_MASK)) {
+		intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+				IRQLVL1_CHRGR_MASK);
+		if ((chc.invalid_batt) &&
+			(evt->chgrirq0_int & PMIC_CHRGR_EXT_CHRGR_INT_MASK)) {
+			dev_dbg(chc.dev, "Handling external charger interrupt!!\n");
+			kfree(evt);
+			return IRQ_HANDLED;
+		}
+		kfree(evt);
+		dev_dbg(chc.dev, "Unhandled interrupt!!\n");
+		return IRQ_NONE;
+	}
+
+	if (evt->chgrirq0_int & PMIC_CHRGR_CCSM_INT0_MASK) {
+		ret = intel_scu_ipc_ioread8(SCHGRIRQ0_ADDR,
+				&evt->chgrirq0_stat);
+		if (ret) {
+			dev_err(chc.dev,
+				"%s: Error(%d) in intel_scu_ipc_ioread8. Faile to read SCHGRIRQ0_ADDR\n",
+					__func__, ret);
+			kfree(evt);
+			goto end;
+		}
+	}
+	if (evt->chgrirq1_int) {
+		ret = intel_scu_ipc_ioread8(SCHGRIRQ1_ADDR,
+				&evt->chgrirq1_stat);
+		if (ret) {
+			dev_err(chc.dev,
+				"%s: Error(%d) in intel_scu_ipc_ioread8. Faile to read SCHGRIRQ1_ADDR\n",
+					__func__, ret);
+			kfree(evt);
+			goto end;
+		}
+	}
+
+	INIT_LIST_HEAD(&evt->node);
+
+	mutex_lock(&chc.evt_queue_lock);
+	list_add_tail(&evt->node, &chc.evt_queue);
+	mutex_unlock(&chc.evt_queue_lock);
+
+	queue_work(system_nrt_wq, &chc.evt_work);
+
+end:
+	/*clear first level IRQ */
+	dev_dbg(chc.dev, "Clearing IRQLVL1_MASK_ADDR\n");
+	intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+
+	return IRQ_HANDLED;
+}
+
+static int pmic_init(void)
+{
+	int ret = 0, i, temp_mon_ranges;
+	u16 adc_val;
+	u8 reg_val;
+	struct ps_pse_mod_prof *bcprof = chc.actual_bcprof;
+
+
+	temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+			BATT_TEMP_NR_RNG);
+	for (i = 0; i < temp_mon_ranges; ++i) {
+		ret =
+		CONVERT_TEMP_TO_ADC(bcprof->temp_mon_range[i].temp_up_lim,
+				(int *)&adc_val);
+		if (unlikely(ret)) {
+			dev_err(chc.dev,
+				"Error converting temperature for zone %d!!\n",
+				i);
+			return ret;
+		}
+		ret = update_zone_temp(i, adc_val);
+		if (unlikely(ret)) {
+			dev_err(chc.dev,
+				"Error updating zone temp for zone %d\n",
+				i);
+			return ret;
+		}
+
+		if (chc.pdata->cc_to_reg)
+			chc.pdata->cc_to_reg(bcprof->temp_mon_range[i].
+					full_chrg_cur, &reg_val);
+
+		ret = update_zone_cc(i, reg_val);
+		if (unlikely(ret)) {
+			dev_err(chc.dev,
+				"Error updating zone cc for zone %d\n",
+				i);
+			return ret;
+		}
+
+		if (chc.pdata->cv_to_reg)
+			chc.pdata->cv_to_reg(bcprof->temp_mon_range[i].
+					full_chrg_vol, &reg_val);
+
+		ret = update_zone_cv(i, reg_val);
+		if (unlikely(ret)) {
+			dev_err(chc.dev,
+				"Error updating zone cv for zone %d\n",
+				i);
+			return ret;
+		}
+
+		/* Write lowest temp limit */
+		if (i == (bcprof->temp_mon_ranges - 1)) {
+			ret = CONVERT_TEMP_TO_ADC(bcprof->temp_low_lim,
+							(int *)&adc_val);
+			if (unlikely(ret)) {
+				dev_err(chc.dev,
+					"Error converting low lim temp!!\n");
+				return ret;
+			}
+
+			ret = update_zone_temp(i+1, adc_val);
+
+			if (unlikely(ret)) {
+				dev_err(chc.dev,
+					"Error updating last temp for zone %d\n",
+					i+1);
+				return ret;
+			}
+		}
+	}
+	ret = pmic_update_tt(TT_CUSTOMFIELDEN_ADDR,
+				TT_HOT_COLD_LC_MASK,
+				TT_HOT_COLD_LC_DIS);
+
+	if (unlikely(ret)) {
+		dev_err(chc.dev, "Error updating TT_CUSTOMFIELD_EN reg\n");
+		return ret;
+	}
+
+	if (chc.pdata->inlmt_to_reg)
+		chc.pdata->inlmt_to_reg(USBINPUTICC100VAL, &reg_val);
+
+	ret = pmic_write_tt(TT_USBINPUTICC100VAL_ADDR, reg_val);
+	return ret;
+}
+
+static inline void print_ps_pse_mod_prof(struct ps_pse_mod_prof *bcprof)
+{
+	int i, temp_mon_ranges;
+
+	dev_info(chc.dev, "ChrgProf: batt_id:%s\n", bcprof->batt_id);
+	dev_info(chc.dev, "ChrgProf: battery_type:%u\n", bcprof->battery_type);
+	dev_info(chc.dev, "ChrgProf: capacity:%u\n", bcprof->capacity);
+	dev_info(chc.dev, "ChrgProf: voltage_max:%u\n", bcprof->voltage_max);
+	dev_info(chc.dev, "ChrgProf: chrg_term_ma:%u\n", bcprof->chrg_term_ma);
+	dev_info(chc.dev, "ChrgProf: low_batt_mV:%u\n", bcprof->low_batt_mV);
+	dev_info(chc.dev, "ChrgProf: disch_tmp_ul:%d\n", bcprof->disch_tmp_ul);
+	dev_info(chc.dev, "ChrgProf: disch_tmp_ll:%d\n", bcprof->disch_tmp_ll);
+	dev_info(chc.dev, "ChrgProf: temp_mon_ranges:%u\n",
+			bcprof->temp_mon_ranges);
+	temp_mon_ranges = min_t(u16, bcprof->temp_mon_ranges,
+			BATT_TEMP_NR_RNG);
+
+	for (i = 0; i < temp_mon_ranges; ++i) {
+		dev_info(chc.dev, "ChrgProf: temp_up_lim[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].temp_up_lim);
+		dev_info(chc.dev, "ChrgProf: full_chrg_vol[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].full_chrg_vol);
+		dev_info(chc.dev, "ChrgProf: full_chrg_cur[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].full_chrg_cur);
+		dev_info(chc.dev, "ChrgProf: maint_chrgr_vol_ll[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].maint_chrg_vol_ll);
+		dev_info(chc.dev, "ChrgProf: maint_chrgr_vol_ul[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].maint_chrg_vol_ul);
+		dev_info(chc.dev, "ChrgProf: maint_chrg_cur[%d]:%d\n",
+				i, bcprof->temp_mon_range[i].maint_chrg_cur);
+	}
+	dev_info(chc.dev, "ChrgProf: temp_low_lim:%d\n", bcprof->temp_low_lim);
+}
+
+static int find_tempzone_index(short int *interval,
+				int *num_zones,
+				short int *temp_up_lim)
+{
+	struct ps_pse_mod_prof *bprof = chc.sfi_bcprof->batt_prof;
+	int up_lim_index = 0, low_lim_index = -1;
+	int diff = 0;
+	int i;
+
+	*num_zones = MIN_BATT_PROF - bprof->temp_mon_ranges + 1;
+	if ((*num_zones) <= 0)
+		return 0;
+
+	for (i = 0 ; i < bprof->temp_mon_ranges ; i++) {
+		if (bprof->temp_mon_range[i].temp_up_lim == BATT_TEMP_WARM)
+			up_lim_index = i;
+	}
+
+	low_lim_index = up_lim_index + 1;
+
+	if (low_lim_index == bprof->temp_mon_ranges)
+		diff = bprof->temp_low_lim -
+			bprof->temp_mon_range[up_lim_index].temp_up_lim;
+	else
+		diff = bprof->temp_mon_range[low_lim_index].temp_up_lim -
+			bprof->temp_mon_range[up_lim_index].temp_up_lim;
+
+	*interval = diff / (*num_zones);
+	*temp_up_lim = bprof->temp_mon_range[up_lim_index].temp_up_lim;
+
+	return up_lim_index;
+}
+
+
+static void set_pmic_batt_prof(struct ps_pse_mod_prof *new_prof,
+				struct ps_pse_mod_prof *bprof)
+{
+	int num_zones;
+	int split_index;
+	int i, j = 0;
+	short int temp_up_lim;
+	short int interval;
+
+	if ((new_prof == NULL) || (bprof == NULL))
+		return;
+
+	if (!NEED_ZONE_SPLIT(bprof)) {
+		dev_info(chc.dev, "No need to split the zones!!\n");
+		memcpy(new_prof, bprof, sizeof(struct ps_pse_mod_prof));
+		return;
+	}
+
+	strcpy(&(new_prof->batt_id[0]), &(bprof->batt_id[0]));
+	new_prof->battery_type = bprof->battery_type;
+	new_prof->capacity = bprof->capacity;
+	new_prof->voltage_max =  bprof->voltage_max;
+	new_prof->chrg_term_ma = bprof->chrg_term_ma;
+	new_prof->low_batt_mV =  bprof->low_batt_mV;
+	new_prof->disch_tmp_ul = bprof->disch_tmp_ul;
+	new_prof->disch_tmp_ll = bprof->disch_tmp_ll;
+
+	split_index = find_tempzone_index(&interval, &num_zones, &temp_up_lim);
+
+	for (i = 0 ; i < bprof->temp_mon_ranges; i++) {
+		if ((i == split_index) && (num_zones > 0)) {
+			for (j = 0; j < num_zones; j++,
+					temp_up_lim += interval) {
+				memcpy(&new_prof->temp_mon_range[i+j],
+					&bprof->temp_mon_range[i],
+					sizeof(bprof->temp_mon_range[i]));
+				new_prof->temp_mon_range[i+j].temp_up_lim =
+					temp_up_lim;
+			}
+			j--;
+		} else {
+			memcpy(&new_prof->temp_mon_range[i+j],
+				&bprof->temp_mon_range[i],
+				sizeof(bprof->temp_mon_range[i]));
+		}
+	}
+
+	new_prof->temp_mon_ranges = i+j;
+	new_prof->temp_low_lim = bprof->temp_low_lim;
+
+	return;
+}
+
+
+static int pmic_check_initial_events(void)
+{
+	struct pmic_event *evt;
+	int ret;
+	u8 mask = (CHRGRIRQ1_SVBUSDET_MASK);
+
+	evt = kzalloc(sizeof(struct pmic_event), GFP_KERNEL);
+	if (evt == NULL) {
+		dev_dbg(chc.dev, "Error allocating evt structure in fn:%s\n",
+			__func__);
+		return -1;
+	}
+
+	ret = intel_scu_ipc_ioread8(SCHGRIRQ0_ADDR, &evt->chgrirq0_stat);
+	evt->chgrirq0_int = evt->chgrirq0_stat;
+	ret = intel_scu_ipc_ioread8(SCHGRIRQ1_ADDR, &evt->chgrirq1_stat);
+	evt->chgrirq1_int = evt->chgrirq1_stat;
+
+	if (evt->chgrirq1_stat || evt->chgrirq0_int) {
+		INIT_LIST_HEAD(&evt->node);
+		mutex_lock(&chc.evt_queue_lock);
+		list_add_tail(&evt->node, &chc.evt_queue);
+		mutex_unlock(&chc.evt_queue_lock);
+		schedule_work(&chc.evt_work);
+	}
+
+	pmic_bat_zone_changed();
+
+	return ret;
+}
+
+static ssize_t
+pmic_show_battery_level(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	int ret = 0;
+
+	ret = pmic_get_battery_voltage(&chc.battry_voltage);
+	if (ret != 0) {
+		dev_info(chc.dev, "read error!\n");
+		return -1;
+	}
+
+	return sprintf(buf, "0x%x\n", chc.battry_voltage);
+}
+
+static DEVICE_ATTR(battery_level, S_IWUSR | S_IRUGO, pmic_show_battery_level,
+		   NULL);
+
+static struct attribute *pmic_sysfs_entries[] = {
+	&dev_attr_battery_level.attr,
+	NULL
+};
+
+static struct attribute_group pmic_attribute_group = {
+	.name  = NULL,		/* put in device directory */
+	.attrs = pmic_sysfs_entries,
+};
+
+/**
+ * pmic_charger_probe - PMIC charger probe function
+ * @pdev: pmic platform device structure
+ * Context: can sleep
+ *
+ * pmic charger driver initializes its internal data
+ * structure and other  infrastructure components for it
+ * to work as expected.
+ */
+static int pmic_chrgr_probe(struct platform_device *pdev)
+{
+	int retval = 0;
+	u8 val;
+
+	if (!pdev)
+		return -ENODEV;
+
+	chc.health = POWER_SUPPLY_HEALTH_UNKNOWN;
+	chc.dev = &pdev->dev;
+	chc.irq = platform_get_irq(pdev, 0);
+	chc.pdata = pdev->dev.platform_data;
+	platform_set_drvdata(pdev, &chc);
+
+	if (chc.pdata == NULL) {
+		dev_err(chc.dev, "Platform data not initialized\n");
+		return -EFAULT;
+	}
+
+	retval = intel_scu_ipc_ioread8(PMIC_ID_ADDR, &chc.pmic_id);
+	if (retval) {
+		dev_err(chc.dev,
+			"Error reading PMIC ID register\n");
+		return retval;
+	}
+
+	dev_info(chc.dev, "PMIC-ID: %x\n", chc.pmic_id);
+	if ((chc.pmic_id & PMIC_VENDOR_ID_MASK) == SHADYCOVE_VENDORID) {
+		retval = pmic_read_reg(USBPATH_ADDR, &val);
+		if (retval) {
+			dev_err(chc.dev,
+				"Error reading CHGRSTATUS-register 0x%2x\n",
+				CHGRSTATUS_ADDR);
+			return retval;
+		}
+
+		if (val & USBPATH_USBSEL_MASK) {
+			dev_info(chc.dev, "SOC-Internal-USBPHY used\n");
+			chc.is_internal_usb_phy = true;
+		} else
+			dev_info(chc.dev, "External-USBPHY used\n");
+	}
+
+	chc.sfi_bcprof = kzalloc(sizeof(struct ps_batt_chg_prof),
+				GFP_KERNEL);
+	if (chc.sfi_bcprof == NULL) {
+		dev_err(chc.dev,
+			"Error allocating memeory SFI battery profile\n");
+		return -ENOMEM;
+	}
+
+	retval = get_batt_prop(chc.sfi_bcprof);
+	if (retval) {
+		dev_err(chc.dev,
+			"Error reading battery profile from battid frmwrk\n");
+		kfree(chc.sfi_bcprof);
+		chc.invalid_batt = true;
+		chc.sfi_bcprof = NULL;
+	}
+
+	retval = intel_scu_ipc_update_register(CHGRCTRL0_ADDR, SWCONTROL_ENABLE,
+			CHGRCTRL0_SWCONTROL_MASK);
+	if (retval)
+		dev_err(chc.dev, "Error enabling sw control. Charging may continue in h/w control mode\n");
+
+	if (!chc.invalid_batt) {
+		chc.actual_bcprof = kzalloc(sizeof(struct ps_pse_mod_prof),
+					GFP_KERNEL);
+		if (chc.actual_bcprof == NULL) {
+			dev_err(chc.dev,
+				"Error allocating mem for local battery profile\n");
+			kfree(chc.sfi_bcprof);
+			return -ENOMEM;
+		}
+
+		chc.runtime_bcprof = kzalloc(sizeof(struct ps_pse_mod_prof),
+					GFP_KERNEL);
+		if (chc.runtime_bcprof == NULL) {
+			dev_err(chc.dev,
+			"Error allocating mem for runtime batt profile\n");
+			kfree(chc.sfi_bcprof);
+			kfree(chc.actual_bcprof);
+			return -ENOMEM;
+		}
+
+		set_pmic_batt_prof(chc.actual_bcprof,
+				chc.sfi_bcprof->batt_prof);
+		print_ps_pse_mod_prof(chc.actual_bcprof);
+		retval = pmic_init();
+		if (retval)
+			dev_err(chc.dev, "Error in Initializing PMIC. Continue in h/w charging mode\n");
+
+		memcpy(chc.runtime_bcprof, chc.actual_bcprof,
+			sizeof(struct ps_pse_mod_prof));
+	}
+
+	chc.pmic_intr_iomap = ioremap_nocache(PMIC_SRAM_INTR_ADDR, 8);
+	if (!chc.pmic_intr_iomap) {
+		dev_err(&pdev->dev, "ioremap Failed\n");
+		retval = -ENOMEM;
+		goto ioremap_failed;
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+	chc.otg = usb_get_transceiver();
+#else
+	chc.otg = usb_get_phy(USB_PHY_TYPE_USB2);
+#endif
+	if (!chc.otg || IS_ERR(chc.otg)) {
+		dev_err(&pdev->dev, "Failed to get otg transceiver!!\n");
+		retval = -ENOMEM;
+		goto otg_req_failed;
+	}
+
+	INIT_WORK(&chc.evt_work, pmic_event_worker);
+	INIT_LIST_HEAD(&chc.evt_queue);
+	mutex_init(&chc.evt_queue_lock);
+
+	/* register interrupt */
+	retval = request_threaded_irq(chc.irq, pmic_isr,
+			pmic_thread_handler,
+			IRQF_SHARED|IRQF_NO_SUSPEND ,
+			DRIVER_NAME, &chc);
+	if (retval) {
+		dev_err(&pdev->dev,
+			"Error in request_threaded_irq(irq(%d)!!\n",
+			chc.irq);
+		goto otg_req_failed;
+	}
+
+	retval = pmic_check_initial_events();
+	if (unlikely(retval)) {
+		dev_err(&pdev->dev,
+			"Error posting initial events\n");
+		goto req_irq_failed;
+	}
+
+	/* unmask charger interrupts in second level IRQ register*/
+	retval = intel_scu_ipc_update_register(MCHGRIRQ0_ADDR, 0x00,
+			PMIC_CHRGR_INT0_MASK);
+	/* unmask charger interrupts in second level IRQ register*/
+	retval = intel_scu_ipc_iowrite8(MCHGRIRQ1_ADDR, 0x00);
+	if (unlikely(retval))
+		goto unmask_irq_failed;
+
+
+	/* unmask IRQLVL1 register */
+	retval = intel_scu_ipc_update_register(IRQLVL1_MASK_ADDR, 0x00,
+			IRQLVL1_CHRGR_MASK);
+	if (unlikely(retval))
+		goto unmask_irq_failed;
+
+	retval = intel_scu_ipc_update_register(USBIDCTRL_ADDR,
+			 ACADETEN_MASK | USBIDEN_MASK,
+			ACADETEN_MASK | USBIDEN_MASK);
+	if (unlikely(retval))
+		goto unmask_irq_failed;
+
+	retval = sysfs_create_group(&pdev->dev.kobj, &pmic_attribute_group);
+	if (retval) {
+		dev_err(&pdev->dev, "failed to create sysfs device attributes\n");
+		goto unmask_irq_failed;
+	}
+
+	chc.health = POWER_SUPPLY_HEALTH_GOOD;
+#ifdef CONFIG_DEBUG_FS
+	pmic_debugfs_init();
+#endif
+	return 0;
+
+unmask_irq_failed:
+req_irq_failed:
+	free_irq(chc.irq, &chc);
+otg_req_failed:
+	iounmap(chc.pmic_intr_iomap);
+ioremap_failed:
+	kfree(chc.sfi_bcprof);
+	kfree(chc.actual_bcprof);
+	kfree(chc.runtime_bcprof);
+	return retval;
+}
+
+static void pmic_chrgr_do_exit_ops(struct pmic_chrgr_drv_context *chc)
+{
+	/*TODO:
+	 * If charger is connected send IPC message to SCU to continue charging
+	 */
+#ifdef CONFIG_DEBUG_FS
+	pmic_debugfs_exit();
+#endif
+}
+
+/**
+ * pmic_charger_remove - PMIC Charger driver remove
+ * @pdev: PMIC charger platform device structure
+ * Context: can sleep
+ *
+ * PMIC charger finalizes its internal data structure and other
+ * infrastructure components that it initialized in
+ * pmic_chrgr_probe.
+ */
+static int pmic_chrgr_remove(struct platform_device *pdev)
+{
+	struct pmic_chrgr_drv_context *chc = platform_get_drvdata(pdev);
+
+	if (chc) {
+		pmic_chrgr_do_exit_ops(chc);
+		free_irq(chc->irq, chc);
+		iounmap(chc->pmic_intr_iomap);
+		kfree(chc->sfi_bcprof);
+		kfree(chc->actual_bcprof);
+		kfree(chc->runtime_bcprof);
+		sysfs_remove_group(&pdev->dev.kobj, &pmic_attribute_group);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pmic_chrgr_suspend(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int pmic_chrgr_resume(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int pmic_chrgr_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int pmic_chrgr_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+
+static int pmic_chrgr_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "%s called\n", __func__);
+	return 0;
+}
+#endif
+
+/*********************************************************************
+ *		Driver initialisation and finalization
+ *********************************************************************/
+
+static const struct dev_pm_ops pmic_chrgr_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pmic_chrgr_suspend,
+				pmic_chrgr_resume)
+	SET_RUNTIME_PM_OPS(pmic_chrgr_runtime_suspend,
+				pmic_chrgr_runtime_resume,
+				pmic_chrgr_runtime_idle)
+};
+
+static struct platform_driver pmic_chrgr_driver = {
+	.driver = {
+		   .name = DRIVER_NAME,
+		   .owner = THIS_MODULE,
+		   .pm = &pmic_chrgr_pm_ops,
+		   },
+	.probe = pmic_chrgr_probe,
+	.remove = pmic_chrgr_remove,
+};
+
+static int pmic_chrgr_init(void)
+{
+	return platform_driver_register(&pmic_chrgr_driver);
+}
+
+static void pmic_chrgr_exit(void)
+{
+	platform_driver_unregister(&pmic_chrgr_driver);
+}
+
+static int pmic_ccsm_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed pmic_ccsm rpmsg device\n");
+
+	ret = pmic_chrgr_init();
+
+out:
+	return ret;
+}
+
+static void pmic_ccsm_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	pmic_chrgr_exit();
+	dev_info(&rpdev->dev, "Removed pmic_ccsm rpmsg device\n");
+}
+
+static void pmic_ccsm_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id pmic_ccsm_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_pmic_ccsm" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, pmic_ccsm_rpmsg_id_table);
+
+static struct rpmsg_driver pmic_ccsm_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= pmic_ccsm_rpmsg_id_table,
+	.probe		= pmic_ccsm_rpmsg_probe,
+	.callback	= pmic_ccsm_rpmsg_cb,
+	.remove		= pmic_ccsm_rpmsg_remove,
+};
+
+static int __init pmic_ccsm_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&pmic_ccsm_rpmsg);
+}
+
+static void __exit pmic_ccsm_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&pmic_ccsm_rpmsg);
+}
+/* Defer init call so that dependant drivers will be loaded. Using  async
+ * for parallel driver initialization */
+late_initcall(pmic_ccsm_rpmsg_init);
+module_exit(pmic_ccsm_rpmsg_exit);
+
+MODULE_AUTHOR("Jenny TC <jenny.tc@intel.com>");
+MODULE_DESCRIPTION("PMIC Charger  Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/pmic_ccsm.h b/drivers/power/pmic_ccsm.h
new file mode 100644
index 0000000..77b6b2e
--- /dev/null
+++ b/drivers/power/pmic_ccsm.h
@@ -0,0 +1,366 @@
+/*
+ * pmic_ccsm.h - Intel MID PMIC CCSM Driver header file
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Jenny TC <jenny.tc@intel.com>
+ */
+
+#ifndef __PMIC_CCSM_H__
+#define __PMIC_CCSM_H__
+
+#include <asm/pmic_pdata.h>
+/*********************************************************************
+ *		Generic defines
+ *********************************************************************/
+
+#define D7 (1 << 7)
+#define D6 (1 << 6)
+#define D5 (1 << 5)
+#define D4 (1 << 4)
+#define D3 (1 << 3)
+#define D2 (1 << 2)
+#define D1 (1 << 1)
+#define D0 (1 << 0)
+
+#define PMIC_ID_ADDR	0x00
+
+#define PMIC_VENDOR_ID_MASK	(0x03 << 6)
+#define PMIC_MINOR_REV_MASK	0x07
+#define PMIC_MAJOR_REV_MASK	(0x07 << 3)
+
+#define BASINCOVE_VENDORID	(0x03 << 6)
+#define SHADYCOVE_VENDORID	0x00
+
+#define BC_PMIC_MAJOR_REV_A0	0x00
+#define BC_PMIC_MAJOR_REV_B0	(0x01 << 3)
+
+#define PMIC_BZONE_LOW 0
+#define PMIC_BZONE_HIGH 5
+
+#define IRQLVL1_ADDR			0x01
+#define IRQLVL1_MASK_ADDR		0x0c
+#define IRQLVL1_CHRGR_MASK		D5
+
+#define THRMZN0H_ADDR_BC		0xCE
+#define THRMZN0L_ADDR_BC		0xCF
+#define THRMZN1H_ADDR_BC		0xD0
+#define THRMZN1L_ADDR_BC		0xD1
+#define THRMZN2H_ADDR_BC		0xD2
+#define THRMZN2L_ADDR_BC		0xD3
+#define THRMZN3H_ADDR_BC		0xD4
+#define THRMZN3L_ADDR_BC		0xD5
+#define THRMZN4H_ADDR_BC		0xD6
+#define THRMZN4L_ADDR_BC		0xD7
+
+#define GPADCREQ_ADDR_BC		0xDC
+#define GPADCREG_VIBATT_REQ		(0x01 << 5)
+
+#define VBATRSLTH_ADDR_BC		0xE9
+#define VBATRSLTL_ADDR_BC		0xEA
+
+#define THRMZN0H_ADDR_SC		0xD7
+#define THRMZN0L_ADDR_SC		0xD8
+#define THRMZN1H_ADDR_SC		0xD9
+#define THRMZN1L_ADDR_SC		0xDA
+#define THRMZN2H_ADDR_SC		0xDD
+#define THRMZN2L_ADDR_SC		0xDE
+#define THRMZN3H_ADDR_SC		0xDF
+#define THRMZN3L_ADDR_SC		0xE0
+#define THRMZN4H_ADDR_SC		0xE1
+#define THRMZN4L_ADDR_SC		0xE2
+
+#define CHGRIRQ0_ADDR			0x07
+#define CHGIRQ0_BZIRQ_MASK		D7
+#define CHGIRQ0_BAT_CRIT_MASK		D6
+#define CHGIRQ0_BAT1_ALRT_MASK		D5
+#define CHGIRQ0_BAT0_ALRT_MASK		D4
+
+#define MCHGRIRQ0_ADDR			0x12
+#define MCHGIRQ0_RSVD_MASK		D7
+#define MCHGIRQ0_MBAT_CRIT_MASK		D6
+#define MCHGIRQ0_MBAT1_ALRT_MASK	D5
+#define MCHGIRQ0_MBAT0_ALRT_MASK	D4
+
+#define SCHGRIRQ0_ADDR			0x4E
+#define SCHGIRQ0_RSVD_MASK		D7
+#define SCHGIRQ0_SBAT_CRIT_MASK		D6
+#define SCHGIRQ0_SBAT1_ALRT_MASK	D5
+#define SCHGIRQ0_SBAT0_ALRT_MASK	D4
+
+#define LOWBATTDET0_ADDR		0x2C
+#define LOWBATTDET1_ADDR		0x2D
+#define BATTDETCTRL_ADDR		0x2E
+#define VBUSDETCTRL_ADDR		0x50
+#define VDCINDETCTRL_ADDR		0x51
+
+#define CHRGRIRQ1_ADDR			0x08
+#define CHRGRIRQ1_SUSBIDDET_MASK	D3
+#define CHRGRIRQ1_SBATTDET_MASK		D2
+#define CHRGRIRQ1_SDCDET_MASK		D1
+#define CHRGRIRQ1_SVBUSDET_MASK		D0
+#define MCHGRIRQ1_ADDR			0x13
+#define MCHRGRIRQ1_SUSBIDDET_MASK	D3
+#define MCHRGRIRQ1_SBATTDET_MAS		D2
+#define MCHRGRIRQ1_SDCDET_MASK		D1
+#define MCHRGRIRQ1_SVBUSDET_MASK	D0
+#define SCHGRIRQ1_ADDR			0x4F
+#define SCHRGRIRQ1_SUSBIDDET_MASK	D3
+#define SCHRGRIRQ1_SBATTDET_MASK	D2
+#define SCHRGRIRQ1_SDCDET_MASK		D1
+#define SCHRGRIRQ1_SVBUSDET_MASK	D0
+
+#define PMIC_CHRGR_INT0_MASK		0xB1
+#define PMIC_CHRGR_CCSM_INT0_MASK	0xB0
+#define PMIC_CHRGR_EXT_CHRGR_INT_MASK	0x01
+
+#define CHGRCTRL0_ADDR			0x4B
+#define CHGRCTRL0_WDT_NOKICK_MASK	D7
+#define CHGRCTRL0_DBPOFF_MASK		D6
+#define CHGRCTRL0_CCSM_OFF_MASK		D5
+#define CHGRCTRL0_TTLCK_MASK		D4
+#define CHGRCTRL0_SWCONTROL_MASK	D3
+#define CHGRCTRL0_EXTCHRDIS_MASK	D2
+#define	CHRCTRL0_EMRGCHREN_MASK		D1
+#define	CHRCTRL0_CHGRRESET_MASK		D0
+
+#define WDT_NOKICK_ENABLE		(0x01 << 7)
+#define WDT_NOKICK_DISABLE		(~WDT_NOKICK_ENABLE & 0xFF)
+
+#define EXTCHRDIS_ENABLE		(0x01 << 2)
+#define EXTCHRDIS_DISABLE		(~EXTCHRDIS_ENABLE & 0xFF)
+#define SWCONTROL_ENABLE		(0x01 << 3)
+#define EMRGCHREN_ENABLE		(0x01 << 1)
+
+#define CHGRCTRL1_ADDR			0x4C
+#define CHGRCTRL1_DBPEN_MASK		D7
+#define CHGRCTRL1_OTGMODE_MASK		D6
+#define CHGRCTRL1_FTEMP_EVENT_MASK	D5
+#define CHGRCTRL1_FUSB_INLMT_1500	D4
+#define CHGRCTRL1_FUSB_INLMT_900	D3
+#define CHGRCTRL1_FUSB_INLMT_500	D2
+#define CHGRCTRL1_FUSB_INLMT_150	D1
+#define CHGRCTRL1_FUSB_INLMT_100	D0
+
+#define CHGRSTATUS_ADDR			0x4D
+#define CHGRSTATUS_RSVD_MASK		(D7|D6|D5|D3)
+#define CHGRSTATUS_SDPB_MASK		D4
+#define CHGRSTATUS_CHGDISLVL_MASK	D2
+#define CHGRSTATUS_CHGDETB_LATCH_MASK	D1
+#define CHGDETB_MASK			D0
+
+#define THRMBATZONE_ADDR_BC		0xB5
+#define THRMBATZONE_ADDR_SC		0xB6
+#define THRMBATZONE_MASK		(D0|D1|D2)
+
+#define USBIDCTRL_ADDR		0x19
+#define USBIDEN_MASK		0x01
+#define ACADETEN_MASK		(0x01 << 1)
+
+#define USBIDSTAT_ADDR		0x1A
+#define ID_SHORT		D4
+#define ID_SHORT_VBUS		(1 << 4)
+#define ID_NOT_SHORT_VBUS	0
+#define ID_FLOAT_STS		D3
+#define R_ID_FLOAT_DETECT	(1 << 3)
+#define R_ID_FLOAT_NOT_DETECT	0
+#define ID_RAR_BRC_STS		((D2 | D1))
+#define ID_ACA_NOT_DETECTED	0
+#define R_ID_A			(1 << 1)
+#define R_ID_B			(2 << 1)
+#define R_ID_C			(3 << 1)
+#define ID_GND			D0
+#define ID_TYPE_A		0
+#define ID_TYPE_B		1
+#define is_aca(x) ((x & R_ID_A) || (x & R_ID_B) || (x & R_ID_C))
+
+#define WAKESRC_ADDR		0x24
+
+#define CHRTTADDR_ADDR		0x56
+#define CHRTTDATA_ADDR		0x57
+
+#define USBSRCDET_RETRY_CNT		4
+#define USBSRCDET_SLEEP_TIME		200
+#define USBSRCDETSTATUS_ADDR		0x5D
+#define USBSRCDET_SUSBHWDET_MASK	(D0|D1)
+#define USBSRCDET_USBSRCRSLT_MASK	(D2|D3|D4|D5)
+#define USBSRCDET_SDCD_MASK		(D6|D7)
+#define USBSRCDET_SUSBHWDET_DETON	(0x01 << 0)
+#define USBSRCDET_SUSBHWDET_DETSUCC	(0x01 << 1)
+#define USBSRCDET_SUSBHWDET_DETFAIL	(0x03 << 0)
+
+/* Register on I2C-dev2-0x6E */
+#define USBPATH_ADDR		0x011C
+#define USBPATH_USBSEL_MASK	D3
+
+#define TT_I2CDADDR_ADDR		0x00
+#define TT_CHGRINIT0OS_ADDR		0x01
+#define TT_CHGRINIT1OS_ADDR		0x02
+#define TT_CHGRINIT2OS_ADDR		0x03
+#define TT_CHGRINIT3OS_ADDR		0x04
+#define TT_CHGRINIT4OS_ADDR		0x05
+#define TT_CHGRINIT5OS_ADDR		0x06
+#define TT_CHGRINIT6OS_ADDR		0x07
+#define TT_CHGRINIT7OS_ADDR		0x08
+#define TT_USBINPUTICCOS_ADDR		0x09
+#define TT_USBINPUTICCMASK_ADDR		0x0A
+#define TT_CHRCVOS_ADDR			0X0B
+#define TT_CHRCVMASK_ADDR		0X0C
+#define TT_CHRCCOS_ADDR			0X0D
+#define TT_CHRCCMASK_ADDR		0X0E
+#define TT_LOWCHROS_ADDR		0X0F
+#define TT_LOWCHRMASK_ADDR		0X10
+#define TT_WDOGRSTOS_ADDR		0X11
+#define TT_WDOGRSTMASK_ADDR		0X12
+#define TT_CHGRENOS_ADDR		0X13
+#define TT_CHGRENMASK_ADDR		0X14
+
+#define TT_CUSTOMFIELDEN_ADDR		0X15
+#define TT_HOT_LC_EN			D1
+#define TT_COLD_LC_EN			D0
+#define TT_HOT_COLD_LC_MASK		(TT_HOT_LC_EN | TT_COLD_LC_EN)
+#define TT_HOT_COLD_LC_EN		(TT_HOT_LC_EN | TT_COLD_LC_EN)
+#define TT_HOT_COLD_LC_DIS		0
+
+#define TT_CHGRINIT0VAL_ADDR		0X20
+#define TT_CHGRINIT1VAL_ADDR		0X21
+#define TT_CHGRINIT2VAL_ADDR		0X22
+#define TT_CHGRINIT3VAL_ADDR		0X23
+#define TT_CHGRINIT4VAL_ADDR		0X24
+#define TT_CHGRINIT5VAL_ADDR		0X25
+#define TT_CHGRINIT6VAL_ADDR		0X26
+#define TT_CHGRINIT7VAL_ADDR		0X27
+#define TT_USBINPUTICC100VAL_ADDR	0X28
+#define TT_USBINPUTICC150VAL_ADDR	0X29
+#define TT_USBINPUTICC500VAL_ADDR	0X2A
+#define TT_USBINPUTICC900VAL_ADDR	0X2B
+#define TT_USBINPUTICC1500VAL_ADDR	0X2C
+#define TT_CHRCVEMRGLOWVAL_ADDR		0X2D
+#define TT_CHRCVCOLDVAL_ADDR		0X2E
+#define TT_CHRCVCOOLVAL_ADDR		0X2F
+#define TT_CHRCVWARMVAL_ADDR		0X30
+#define TT_CHRCVHOTVAL_ADDR		0X31
+#define TT_CHRCVEMRGHIVAL_ADDR		0X32
+#define TT_CHRCCEMRGLOWVAL_ADDR		0X33
+#define TT_CHRCCCOLDVAL_ADDR		0X34
+#define TT_CHRCCCOOLVAL_ADDR		0X35
+#define TT_CHRCCWARMVAL_ADDR		0X36
+#define TT_CHRCCHOTVAL_ADDR		0X37
+#define TT_CHRCCEMRGHIVAL_ADDR		0X38
+#define TT_LOWCHRENVAL_ADDR		0X39
+#define TT_LOWCHRDISVAL_ADDR		0X3A
+#define TT_WDOGRSTVAL_ADDR		0X3B
+#define TT_CHGRENVAL_ADDR		0X3C
+#define TT_CHGRDISVAL_ADDR		0X3D
+
+/*Interrupt registers*/
+#define BATT_CHR_BATTDET_MASK	D2
+/*Status registers*/
+#define BATT_PRESENT		1
+#define BATT_NOT_PRESENT	0
+
+#define BATT_STRING_MAX		8
+#define BATTID_STR_LEN		8
+
+#define CHARGER_PRESENT		1
+#define CHARGER_NOT_PRESENT	0
+
+/*FIXME: Modify default values */
+#define BATT_DEAD_CUTOFF_VOLT		3400	/* 3400 mV */
+#define BATT_CRIT_CUTOFF_VOLT		3700	/* 3700 mV */
+
+#define MSIC_BATT_TEMP_MAX		60	/* 60 degrees */
+#define MSIC_BATT_TEMP_MIN		0
+
+#define BATT_TEMP_WARM			45	/* 45 degrees */
+#define MIN_BATT_PROF			4
+
+#define PMIC_REG_NAME_LEN		28
+#define PMIC_REG_DEF(x) { .reg_name = #x, .addr = x }
+
+#define GPADC_MAX_VOLTAGE		450
+#define GPADC_MAX_RANGE 		1024
+
+struct interrupt_info {
+	/* Interrupt register mask*/
+	u8 int_reg_mask;
+	/* interrupt status register mask */
+	u8 stat_reg_mask;
+	/* log message if interrupt is set */
+	char *log_msg_int_reg_true;
+	/* log message if stat is true or false */
+	char *log_msg_stat_true;
+	char *log_msg_stat_false;
+	/* handle if interrupt bit is set */
+	void (*int_handle) (void);
+	/* interrupt status handler */
+	void (*stat_handle) (bool);
+};
+
+enum pmic_charger_cable_type {
+	PMIC_CHARGER_TYPE_NONE = 0,
+	PMIC_CHARGER_TYPE_SDP,
+	PMIC_CHARGER_TYPE_DCP,
+	PMIC_CHARGER_TYPE_CDP,
+	PMIC_CHARGER_TYPE_ACA,
+	PMIC_CHARGER_TYPE_SE1,
+	PMIC_CHARGER_TYPE_MHL,
+	PMIC_CHARGER_TYPE_FLOAT_DP_DN,
+	PMIC_CHARGER_TYPE_OTHER,
+	PMIC_CHARGER_TYPE_DCP_EXTPHY,
+};
+
+struct pmic_chrgr_drv_context {
+	bool invalid_batt;
+	bool is_batt_present;
+	bool current_sense_enabled;
+	unsigned int irq;		/* GPE_ID or IRQ# */
+	void __iomem *pmic_intr_iomap;
+	struct device *dev;
+	int health;
+	int battry_voltage;
+	u8 pmic_id;
+	bool is_internal_usb_phy;
+	enum pmic_charger_cable_type charger_type;
+	/* ShadyCove-WA for VBUS removal detect issue */
+	bool vbus_connect_status;
+	struct ps_batt_chg_prof *sfi_bcprof;
+	struct ps_pse_mod_prof *actual_bcprof;
+	struct ps_pse_mod_prof *runtime_bcprof;
+	struct pmic_platform_data *pdata;
+	struct usb_phy *otg;
+	struct list_head evt_queue;
+	struct work_struct evt_work;
+	struct mutex evt_queue_lock;
+};
+
+struct pmic_event {
+	struct list_head node;
+	u8 chgrirq0_int;
+	u8 chgrirq1_int;
+	u8 chgrirq0_stat;
+	u8 chgrirq1_stat;
+};
+
+struct pmic_regs_def {
+	char reg_name[PMIC_REG_NAME_LEN];
+	u16 addr;
+};
+
+#endif
diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
index cc439fd..d2b3a53 100644
--- a/drivers/power/power_supply.h
+++ b/drivers/power/power_supply.h
@@ -40,3 +40,24 @@
 static inline void power_supply_remove_triggers(struct power_supply *psy) {}
 
 #endif /* CONFIG_LEDS_TRIGGERS */
+#ifdef CONFIG_POWER_SUPPLY_CHARGER
+
+extern void power_supply_trigger_charging_handler(struct power_supply *psy);
+extern int power_supply_register_charger(struct power_supply *psy);
+extern int power_supply_unregister_charger(struct power_supply *psy);
+extern int psy_charger_throttle_charger(struct power_supply *psy,
+					unsigned long state);
+
+#else
+
+static inline void
+	power_supply_trigger_charging_handler(struct power_supply *psy) { }
+static inline int power_supply_register_charger(struct power_supply *psy)
+{ return 0; }
+static inline int power_supply_unregister_charger(struct power_supply *psy)
+{ return 0; }
+static inline int psy_charger_throttle_charger(struct power_supply *psy,
+					unsigned long state)
+{ return 0; }
+
+#endif
diff --git a/drivers/power/power_supply_charger.c b/drivers/power/power_supply_charger.c
new file mode 100644
index 0000000..9ed83bd
--- /dev/null
+++ b/drivers/power/power_supply_charger.c
@@ -0,0 +1,1151 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+#include <linux/extcon.h>
+#include <linux/power/battery_id.h>
+#include <linux/notifier.h>
+#include <linux/usb/otg.h>
+#include "power_supply.h"
+#include "power_supply_charger.h"
+
+struct work_struct otg_work;
+#define MAX_CHARGER_COUNT 5
+
+static LIST_HEAD(algo_list);
+
+struct power_supply_charger {
+	bool is_cable_evt_reg;
+	/*cache battery and charger properties */
+	struct list_head chrgr_cache_lst;
+	struct list_head batt_cache_lst;
+	struct list_head evt_queue;
+	struct work_struct algo_trigger_work;
+	struct mutex evt_lock;
+	wait_queue_head_t wait_chrg_enable;
+};
+
+struct charger_cable {
+	struct work_struct work;
+	struct notifier_block nb;
+	struct extcon_chrgr_cbl_props cable_props;
+	enum extcon_cable_name extcon_cable_type;
+	enum power_supply_charger_cable_type psy_cable_type;
+	struct extcon_specific_cable_nb extcon_dev;
+	struct extcon_dev *edev;
+};
+
+static struct power_supply_charger psy_chrgr;
+
+static struct charger_cable cable_list[] = {
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP,
+	 .extcon_cable_type = EXTCON_SDP,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_CDP,
+	 .extcon_cable_type = EXTCON_CDP,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_DCP,
+	 .extcon_cable_type = EXTCON_DCP,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_USB_ACA,
+	 .extcon_cable_type = EXTCON_ACA,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK,
+	 .extcon_cable_type = EXTCON_ACA,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_SE1,
+	 .extcon_cable_type = EXTCON_TA,
+	 },
+	{
+	 .psy_cable_type = POWER_SUPPLY_CHARGER_TYPE_AC,
+	 .extcon_cable_type = EXTCON_AC,
+	 },
+};
+
+static int get_supplied_by_list(struct power_supply *psy,
+				struct power_supply *psy_lst[]);
+
+static int otg_handle_notification(struct notifier_block *nb,
+				   unsigned long event, void *data);
+struct usb_phy *otg_xceiver;
+struct notifier_block otg_nb = {
+		   .notifier_call = otg_handle_notification,
+		};
+static void configure_chrgr_source(struct charger_cable *cable_lst);
+
+struct charger_cable *get_cable(unsigned long usb_chrgr_type)
+{
+
+	switch (usb_chrgr_type) {
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+		pr_info("%s:%d SDP\n", __FILE__, __LINE__);
+		return &cable_list[0];
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+		pr_info("%s:%d CDP\n", __FILE__, __LINE__);
+		return &cable_list[1];
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+		pr_info("%s:%d DCP\n", __FILE__, __LINE__);
+		return &cable_list[2];
+	case POWER_SUPPLY_CHARGER_TYPE_USB_ACA:
+		pr_info("%s:%d ACA\n", __FILE__, __LINE__);
+		return &cable_list[3];
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+		pr_info("%s:%d ACA DOCK\n", __FILE__, __LINE__);
+		return &cable_list[4];
+	case POWER_SUPPLY_CHARGER_TYPE_AC:
+		pr_info("%s:%d AC\n", __FILE__, __LINE__);
+		return &cable_list[6];
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		pr_info("%s:%d SE1\n", __FILE__, __LINE__);
+		return &cable_list[5];
+	}
+
+	return NULL;
+}
+
+
+static void otg_event_worker(struct work_struct *work)
+{
+	configure_chrgr_source(cable_list);
+
+}
+
+static int process_cable_props(struct power_supply_cable_props *cap)
+{
+
+	struct charger_cable *cable = NULL;
+
+	cable = get_cable(cap->chrg_type);
+	if (!cable) {
+
+		pr_err("%s:%d Error in getting charger cable from get_cable\n",
+				__FILE__, __LINE__);
+		return -EINVAL;
+	}
+
+	switch (cap->chrg_evt) {
+	case POWER_SUPPLY_CHARGER_EVENT_CONNECT:
+		printk(KERN_ERR "%s:%d Connected inlmt=%d\n",
+				__FILE__, __LINE__, cap->mA);
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_CONNECTED;
+		break;
+	case POWER_SUPPLY_CHARGER_EVENT_UPDATE:
+		printk(KERN_ERR "%s:%d Connected\n", __FILE__, __LINE__);
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_UPDATED;
+		break;
+	case POWER_SUPPLY_CHARGER_EVENT_DISCONNECT:
+		printk(KERN_ERR "%s:%d Disconnected inlmt=%d\n",
+			__FILE__, __LINE__, cap->mA);
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_DISCONNECTED;
+		break;
+	case POWER_SUPPLY_CHARGER_EVENT_SUSPEND:
+		printk(KERN_ERR "%s:%d Suspended inlmt=%d\n",
+			__FILE__, __LINE__, cap->mA);
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_SUSPENDED;
+		break;
+	default:
+		printk(KERN_ERR "%s:%d Invalid event\n", __FILE__, __LINE__);
+		break;
+	}
+
+	cable->cable_props.mA = cap->mA;
+	schedule_work(&otg_work);
+
+	return 0;
+
+}
+
+static int otg_handle_notification(struct notifier_block *nb,
+				   unsigned long event, void *data)
+{
+
+	struct power_supply_cable_props *cap;
+
+	cap = (struct power_supply_cable_props *)data;
+
+	if (event != USB_EVENT_CHARGER)
+		return NOTIFY_DONE;
+
+	process_cable_props(cap);
+
+
+	return NOTIFY_OK;
+}
+
+int otg_register(void)
+{
+	int retval;
+
+	otg_xceiver = usb_get_transceiver();
+	if (!otg_xceiver) {
+		pr_err("%s:%d failure to get otg transceiver\n",
+					__FILE__, __LINE__);
+		goto otg_reg_failed;
+	}
+	retval = usb_register_notifier(otg_xceiver, &otg_nb);
+	if (retval) {
+		pr_err("%s:%d failure to register otg notifier\n",
+			__FILE__, __LINE__);
+		goto otg_reg_failed;
+	}
+
+	INIT_WORK(&otg_work, otg_event_worker);
+
+
+	return 0;
+
+otg_reg_failed:
+
+	return -EIO;
+}
+
+static int charger_cable_notifier(struct notifier_block *nb,
+				  unsigned long event, void *ptr);
+static void charger_cable_event_worker(struct work_struct *work);
+struct charging_algo *power_supply_get_charging_algo
+		(struct power_supply *, struct ps_batt_chg_prof *);
+
+static void init_charger_cables(struct charger_cable *cable_lst, int count)
+{
+	struct charger_cable *cable;
+	struct extcon_chrgr_cbl_props cable_props;
+	const char *cable_name;
+	struct power_supply_cable_props cap;
+
+	otg_register();
+
+	while (--count) {
+		cable = cable_lst++;
+		/* initialize cable instance */
+		INIT_WORK(&cable->work, charger_cable_event_worker);
+		cable->nb.notifier_call = charger_cable_notifier;
+		cable->cable_props.cable_stat = EXTCON_CHRGR_CABLE_DISCONNECTED;
+		cable->cable_props.mA = 0;
+		cable_name = extcon_cable_name[cable->extcon_cable_type];
+
+		if (extcon_register_interest(&cable->extcon_dev,
+				NULL, cable_name, &cable->nb))
+				continue;
+
+		cable->edev = cable->extcon_dev.edev;
+
+		if (!cable->edev)
+			continue;
+
+		if (cable->edev->get_cable_properties(cable_name,
+						      (void *)&cable_props)) {
+			continue;
+
+		} else if (cable_props.cable_stat !=
+			   cable->cable_props.cable_stat) {
+			cable->cable_props.cable_stat = cable_props.cable_stat;
+			cable->cable_props.mA = cable_props.mA;
+		}
+	}
+
+	if (!otg_get_chrg_status(otg_xceiver, &cap))
+		process_cable_props(&cap);
+
+}
+
+static inline void get_cur_chrgr_prop(struct power_supply *psy,
+				      struct charger_props *chrgr_prop)
+{
+	chrgr_prop->is_charging = IS_CHARGING_ENABLED(psy);
+	chrgr_prop->name = psy->name;
+	chrgr_prop->online = IS_ONLINE(psy);
+	chrgr_prop->present = IS_PRESENT(psy);
+	chrgr_prop->cable = CABLE_TYPE(psy);
+	chrgr_prop->health = HEALTH(psy);
+	chrgr_prop->tstamp = get_jiffies_64();
+
+}
+
+static inline int get_chrgr_prop_cache(struct power_supply *psy,
+				       struct charger_props *chrgr_cache)
+{
+
+	struct charger_props *chrgr_prop;
+	int ret = -ENODEV;
+
+	list_for_each_entry(chrgr_prop, &psy_chrgr.chrgr_cache_lst, node) {
+		if (!strcmp(chrgr_prop->name, psy->name)) {
+			memcpy(chrgr_cache, chrgr_prop, sizeof(*chrgr_cache));
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void dump_charger_props(struct charger_props *props)
+{
+	pr_devel("%s:name=%s present=%d is_charging=%d health=%d online=%d cable=%d tstamp=%d\n",
+		__func__, props->name, props->present, props->is_charging,
+		props->health, props->online, props->cable, props->tstamp);
+}
+
+static void dump_battery_props(struct batt_props *props)
+{
+	pr_devel("%s:name=%s voltage_now=%d current_now=%d temperature=%d status=%d health=%d tstamp=%d algo_stat=%d ",
+		__func__, props->name, props->voltage_now, props->current_now,
+		props->temperature, props->status, props->health,
+		props->tstamp, props->algo_stat);
+}
+
+static inline void cache_chrgr_prop(struct charger_props *chrgr_prop_new)
+{
+
+	struct charger_props *chrgr_cache;
+
+	list_for_each_entry(chrgr_cache, &psy_chrgr.chrgr_cache_lst, node) {
+		if (!strcmp(chrgr_cache->name, chrgr_prop_new->name))
+			goto update_props;
+	}
+
+	chrgr_cache = kzalloc(sizeof(*chrgr_cache), GFP_KERNEL);
+	if (chrgr_cache == NULL) {
+		pr_err("%s:%dError in allocating memory\n", __FILE__, __LINE__);
+		return;
+	}
+
+	INIT_LIST_HEAD(&chrgr_cache->node);
+	list_add_tail(&chrgr_cache->node, &psy_chrgr.chrgr_cache_lst);
+
+	chrgr_cache->name = chrgr_prop_new->name;
+
+update_props:
+	chrgr_cache->is_charging = chrgr_prop_new->is_charging;
+	chrgr_cache->online = chrgr_prop_new->online;
+	chrgr_cache->health = chrgr_prop_new->health;
+	chrgr_cache->present = chrgr_prop_new->present;
+	chrgr_cache->cable = chrgr_prop_new->cable;
+	chrgr_cache->tstamp = chrgr_prop_new->tstamp;
+}
+
+
+static inline bool is_chrgr_prop_changed(struct power_supply *psy)
+{
+
+	struct charger_props chrgr_prop_cache, chrgr_prop;
+
+	get_cur_chrgr_prop(psy, &chrgr_prop);
+	/* Get cached battery property. If no cached property available
+	 *  then cache the new property and return true
+	 */
+	if (get_chrgr_prop_cache(psy, &chrgr_prop_cache)) {
+		cache_chrgr_prop(&chrgr_prop);
+		return true;
+	}
+
+	pr_devel("%s\n", __func__);
+	dump_charger_props(&chrgr_prop);
+	dump_charger_props(&chrgr_prop_cache);
+
+	if (!IS_CHARGER_PROP_CHANGED(chrgr_prop, chrgr_prop_cache))
+		return false;
+
+	cache_chrgr_prop(&chrgr_prop);
+	return true;
+}
+static void cache_successive_samples(long *sample_array, long new_sample)
+{
+
+	int i;
+
+	for (i = 0; i < MAX_CUR_VOLT_SAMPLES - 1; ++i)
+		*(sample_array + i) = *(sample_array + i + 1);
+
+	*(sample_array + i) = new_sample;
+
+}
+
+static inline void cache_bat_prop(struct batt_props *bat_prop_new)
+{
+
+	struct batt_props *bat_cache;
+
+	/* Find entry in cache list. If an entry is located update
+	 * the existing entry else create new entry in the list */
+	list_for_each_entry(bat_cache, &psy_chrgr.batt_cache_lst, node) {
+		if (!strcmp(bat_cache->name, bat_prop_new->name))
+			goto update_props;
+	}
+
+	bat_cache = kzalloc(sizeof(*bat_cache), GFP_KERNEL);
+	if (bat_cache == NULL) {
+		pr_err("%s:%dError in allocating memory\n", __FILE__, __LINE__);
+		return;
+	}
+	INIT_LIST_HEAD(&bat_cache->node);
+	list_add_tail(&bat_cache->node, &psy_chrgr.batt_cache_lst);
+
+	bat_cache->name = bat_prop_new->name;
+
+update_props:
+	if (time_after(bat_prop_new->tstamp,
+		(bat_cache->tstamp + DEF_CUR_VOLT_SAMPLE_JIFF)) ||
+						bat_cache->tstamp == 0) {
+		cache_successive_samples(bat_cache->voltage_now_cache,
+						bat_prop_new->voltage_now);
+		cache_successive_samples(bat_cache->current_now_cache,
+						bat_prop_new->current_now);
+		bat_cache->tstamp = bat_prop_new->tstamp;
+	}
+
+	bat_cache->voltage_now = bat_prop_new->voltage_now;
+	bat_cache->current_now = bat_prop_new->current_now;
+	bat_cache->health = bat_prop_new->health;
+
+	bat_cache->temperature = bat_prop_new->temperature;
+	bat_cache->status = bat_prop_new->status;
+	bat_cache->algo_stat = bat_prop_new->algo_stat;
+	bat_cache->throttle_state = bat_prop_new->throttle_state;
+}
+
+static inline int get_bat_prop_cache(struct power_supply *psy,
+				     struct batt_props *bat_cache)
+{
+
+	struct batt_props *bat_prop;
+	int ret = -ENODEV;
+
+	list_for_each_entry(bat_prop, &psy_chrgr.batt_cache_lst, node) {
+		if (!strcmp(bat_prop->name, psy->name)) {
+			memcpy(bat_cache, bat_prop, sizeof(*bat_cache));
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static inline void get_cur_bat_prop(struct power_supply *psy,
+				    struct batt_props *bat_prop)
+{
+	struct batt_props bat_prop_cache;
+	int ret;
+
+	bat_prop->name = psy->name;
+	bat_prop->voltage_now = VOLTAGE_OCV(psy) / 1000;
+	bat_prop->current_now = CURRENT_NOW(psy) / 1000;
+	bat_prop->temperature = TEMPERATURE(psy) / 10;
+	bat_prop->status = STATUS(psy);
+	bat_prop->health = HEALTH(psy);
+	bat_prop->tstamp = get_jiffies_64();
+	bat_prop->throttle_state = CURRENT_THROTTLE_STATE(psy);
+
+	/* Populate cached algo data to new profile */
+	ret = get_bat_prop_cache(psy, &bat_prop_cache);
+	if (!ret)
+		bat_prop->algo_stat = bat_prop_cache.algo_stat;
+}
+
+static inline bool is_batt_prop_changed(struct power_supply *psy)
+{
+
+	struct batt_props bat_prop_cache, bat_prop;
+
+	/* Get cached battery property. If no cached property available
+	 *  then cache the new property and return true
+	 */
+	get_cur_bat_prop(psy, &bat_prop);
+	if (get_bat_prop_cache(psy, &bat_prop_cache)) {
+		cache_bat_prop(&bat_prop);
+		return true;
+	}
+
+	pr_devel("%s\n", __func__);
+	dump_battery_props(&bat_prop);
+	dump_battery_props(&bat_prop_cache);
+
+	if (!IS_BAT_PROP_CHANGED(bat_prop, bat_prop_cache))
+		return false;
+
+	cache_bat_prop(&bat_prop);
+	return true;
+}
+
+static inline bool is_supplied_to_has_ext_pwr_changed(struct power_supply *psy)
+{
+	int i;
+	struct power_supply *psb;
+	bool is_pwr_changed_defined = true;
+
+	for (i = 0; i < psy->num_supplicants; i++) {
+		psb =
+		    power_supply_get_by_name(psy->
+					     supplied_to[i]);
+		if (psb && !psb->external_power_changed)
+			is_pwr_changed_defined &= false;
+	}
+
+	return is_pwr_changed_defined;
+
+}
+
+static inline bool is_supplied_by_changed(struct power_supply *psy)
+{
+
+	int cnt;
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+
+	cnt = get_supplied_by_list(psy, chrgr_lst);
+	while (cnt--) {
+		if ((IS_CHARGER(chrgr_lst[cnt])) &&
+			is_chrgr_prop_changed(chrgr_lst[cnt]))
+			return true;
+	}
+
+	return false;
+}
+
+static inline bool is_trigger_charging_algo(struct power_supply *psy)
+{
+
+	/* trigger charging alorithm if battery or
+	 * charger properties are changed. Also no need to
+	 * invoke algorithm for power_supply_changed from
+	 * charger, if all supplied_to has the ext_port_changed defined.
+	 * On invoking the ext_port_changed the supplied to can send
+	 * power_supplied_changed event.
+	 */
+
+	if ((IS_CHARGER(psy) && !is_supplied_to_has_ext_pwr_changed(psy)) &&
+			is_chrgr_prop_changed(psy))
+		return true;
+
+	if ((IS_BATTERY(psy)) && (is_batt_prop_changed(psy) ||
+				is_supplied_by_changed(psy)))
+		return true;
+
+	return false;
+}
+
+static int get_supplied_by_list(struct power_supply *psy,
+				struct power_supply *psy_lst[])
+{
+	struct class_dev_iter iter;
+	struct device *dev;
+	struct power_supply *pst;
+	int cnt = 0, i, j;
+
+	if (!IS_BATTERY(psy))
+		return 0;
+
+	/* Identify chargers which are supplying power to the battery */
+	class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		pst = (struct power_supply *)dev_get_drvdata(dev);
+		if (!IS_CHARGER(pst))
+			continue;
+		for (i = 0; i < pst->num_supplicants; i++) {
+			if (!strcmp(pst->supplied_to[i], psy->name))
+				psy_lst[cnt++] = pst;
+		}
+	}
+	class_dev_iter_exit(&iter);
+
+	if (cnt <= 1)
+		return cnt;
+
+	/*sort based on priority. 0 has the highest priority  */
+	for (i = 0; i < cnt; ++i)
+		for (j = 0; j < cnt; ++j)
+			if (PRIORITY(psy_lst[j]) > PRIORITY(psy_lst[i]))
+				swap(psy_lst[j], psy_lst[i]);
+
+	return cnt;
+}
+
+static int get_battery_status(struct power_supply *psy)
+{
+	int cnt, status, ret;
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+	struct batt_props bat_prop;
+
+	if (!IS_BATTERY(psy))
+		return -EINVAL;
+
+	ret = get_bat_prop_cache(psy, &bat_prop);
+	if (ret)
+		return ret;
+
+	status = POWER_SUPPLY_STATUS_DISCHARGING;
+	cnt = get_supplied_by_list(psy, chrgr_lst);
+
+
+	while (cnt--) {
+
+
+		if (IS_PRESENT(chrgr_lst[cnt]))
+			status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+		if (IS_CHARGING_CAN_BE_ENABLED(chrgr_lst[cnt]) &&
+			(IS_HEALTH_GOOD(psy)) &&
+				(IS_HEALTH_GOOD(chrgr_lst[cnt]))) {
+
+			if ((bat_prop.algo_stat == PSY_ALGO_STAT_FULL) ||
+				(bat_prop.algo_stat == PSY_ALGO_STAT_MAINT))
+				status = POWER_SUPPLY_STATUS_FULL;
+			else if (IS_CHARGING_ENABLED(chrgr_lst[cnt]))
+				status = POWER_SUPPLY_STATUS_CHARGING;
+		}
+	}
+	pr_devel("%s: Set status=%d for %s\n", __func__, status, psy->name);
+
+	return status;
+}
+
+static void update_charger_online(struct power_supply *psy)
+{
+	if (IS_CHARGER_ENABLED(psy))
+		set_charger_online(psy, 1);
+	else
+		set_charger_online(psy, 0);
+}
+
+static void update_sysfs(struct power_supply *psy)
+{
+	int i, cnt;
+	struct power_supply *psb;
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+
+	if (IS_BATTERY(psy)) {
+		/* set battery status */
+		set_battery_status(psy, get_battery_status(psy));
+
+		/* set charger online */
+		cnt = get_supplied_by_list(psy, chrgr_lst);
+		while (cnt--) {
+			if (!IS_PRESENT(chrgr_lst[cnt]))
+				continue;
+
+			update_charger_online(psy);
+		}
+	} else {
+		/*set battery status */
+		for (i = 0; i < psy->num_supplicants; i++) {
+			psb =
+			    power_supply_get_by_name(psy->
+						     supplied_to[i]);
+			if (psb && IS_BATTERY(psb) && IS_PRESENT(psb))
+				set_battery_status(psb,
+					get_battery_status(psb));
+		}
+
+		/*set charger online */
+		update_charger_online(psy);
+
+	}
+}
+
+static int trigger_algo(struct power_supply *psy)
+{
+	unsigned long cc = 0, cv = 0, cc_min;
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+	struct batt_props bat_prop;
+	struct charging_algo *algo;
+	struct ps_batt_chg_prof chrg_profile;
+	int cnt;
+
+
+	if (psy->type != POWER_SUPPLY_TYPE_BATTERY)
+		return 0;
+
+	if (get_batt_prop(&chrg_profile)) {
+		pr_err("Error in getting charge profile:%s:%d\n", __FILE__,
+		       __LINE__);
+		return -EINVAL;
+	}
+
+
+	get_bat_prop_cache(psy, &bat_prop);
+
+	algo = power_supply_get_charging_algo(psy, &chrg_profile);
+	if (!algo) {
+		pr_err("Error in getting charging algo!!\n");
+		return -EINVAL;
+	}
+
+	bat_prop.algo_stat = algo->get_next_cc_cv(bat_prop,
+						chrg_profile, &cc, &cv);
+
+	switch (bat_prop.algo_stat) {
+	case PSY_ALGO_STAT_CHARGE:
+		pr_devel("%s:Algo_status: Charging Enabled\n", __func__);
+		break;
+	case PSY_ALGO_STAT_FULL:
+		pr_devel("%s:Algo_status: Battery is Full\n", __func__);
+		break;
+	case PSY_ALGO_STAT_MAINT:
+		pr_devel("%s:Algo_status: Maintenance charging started\n",
+			__func__);
+		break;
+	case PSY_ALGO_STAT_UNKNOWN:
+		pr_devel("%s:Algo Status: unknown\n", __func__);
+		break;
+	case PSY_ALGO_STAT_NOT_CHARGE:
+		pr_devel("%s:Algo Status: charging not enabled\n",
+			__func__);
+		break;
+	}
+
+	cache_bat_prop(&bat_prop);
+
+	if (!cc || !cv)
+		return -ENODATA;
+
+	/* CC needs to be updated for all chargers which are supplying
+	 *  power to this battery to ensure that the sum of CCs of all
+	 * chargers are never more than the CC selected by the algo.
+	 * The CC is set based on the charger priority.
+	 */
+	cnt = get_supplied_by_list(psy, chrgr_lst);
+
+	while (cnt--) {
+		if (!IS_PRESENT(chrgr_lst[cnt]))
+			continue;
+
+		cc_min = min_t(unsigned long, MAX_CC(chrgr_lst[cnt]), cc);
+		if (cc_min < 0)
+			cc_min = 0;
+		cc -= cc_min;
+		set_cc(chrgr_lst[cnt], cc_min);
+		set_cv(chrgr_lst[cnt], cv);
+	}
+
+	return 0;
+}
+
+static inline void wait_for_charging_enabled(struct power_supply *psy)
+{
+	wait_event_timeout(psy_chrgr.wait_chrg_enable,
+			(IS_CHARGING_ENABLED(psy)), HZ);
+}
+
+static inline void enable_supplied_by_charging
+		(struct power_supply *psy, bool is_enable)
+{
+	struct power_supply *chrgr_lst[MAX_CHARGER_COUNT];
+	int cnt;
+
+	if (psy->type != POWER_SUPPLY_TYPE_BATTERY)
+		return;
+	/* Get list of chargers supplying power to this battery and
+	 * disable charging for all chargers
+	 */
+	cnt = get_supplied_by_list(psy, chrgr_lst);
+	if (cnt == 0)
+		return;
+	while (cnt--) {
+		if (!IS_PRESENT(chrgr_lst[cnt]))
+			continue;
+		if (is_enable && IS_CHARGING_CAN_BE_ENABLED(chrgr_lst[cnt])) {
+			enable_charging(chrgr_lst[cnt]);
+			wait_for_charging_enabled(chrgr_lst[cnt]);
+		} else
+			disable_charging(chrgr_lst[cnt]);
+	}
+}
+
+static void __power_supply_trigger_charging_handler(struct power_supply *psy)
+{
+	int i;
+	struct power_supply *psb = NULL;
+
+
+	mutex_lock(&psy_chrgr.evt_lock);
+
+	if (is_trigger_charging_algo(psy)) {
+
+		if (IS_BATTERY(psy)) {
+			if (trigger_algo(psy))
+				enable_supplied_by_charging(psy, false);
+			else
+				enable_supplied_by_charging(psy, true);
+
+		} else if (IS_CHARGER(psy)) {
+			for (i = 0; i < psy->num_supplicants; i++) {
+				psb =
+				    power_supply_get_by_name(psy->
+							     supplied_to[i]);
+
+				if (psb && IS_BATTERY(psb) && IS_PRESENT(psb)) {
+					if (trigger_algo(psb)) {
+						disable_charging(psy);
+						break;
+					} else if (IS_CHARGING_CAN_BE_ENABLED
+								(psy)) {
+						enable_charging(psy);
+						wait_for_charging_enabled(psy);
+					}
+				}
+			}
+		}
+		update_sysfs(psy);
+		power_supply_changed(psy);
+	}
+	mutex_unlock(&psy_chrgr.evt_lock);
+
+}
+
+static int __trigger_charging_handler(struct device *dev, void *data)
+{
+	struct power_supply *psy = dev_get_drvdata(dev);
+
+
+	__power_supply_trigger_charging_handler(psy);
+
+	return 0;
+}
+
+static void trigger_algo_psy_class(struct work_struct *work)
+{
+
+	class_for_each_device(power_supply_class, NULL, NULL,
+			__trigger_charging_handler);
+
+}
+
+static bool is_cable_connected(void)
+{
+	int i;
+	struct charger_cable *cable;
+
+	for (i = 0; i < ARRAY_SIZE(cable_list); ++i) {
+		cable = cable_list + i;
+		if (IS_CABLE_ACTIVE(cable->cable_props.cable_stat))
+			return true;
+	}
+	return false;
+}
+
+void power_supply_trigger_charging_handler(struct power_supply *psy)
+{
+
+	if (!psy_chrgr.is_cable_evt_reg || !is_cable_connected())
+		return;
+
+	wake_up(&psy_chrgr.wait_chrg_enable);
+
+	if (psy)
+		__power_supply_trigger_charging_handler(psy);
+	else
+		schedule_work(&psy_chrgr.algo_trigger_work);
+
+}
+EXPORT_SYMBOL(power_supply_trigger_charging_handler);
+
+static inline int get_battery_thresholds(struct power_supply *psy,
+	struct psy_batt_thresholds *bat_thresh)
+{
+	struct charging_algo *algo;
+	struct ps_batt_chg_prof chrg_profile;
+
+
+	/* FIXME: Get iterm only for supplied_to arguments*/
+	if (get_batt_prop(&chrg_profile)) {
+		pr_err("Error in getting charge profile:%s:%d\n", __FILE__,
+		       __LINE__);
+		return -EINVAL;
+	}
+
+	algo = power_supply_get_charging_algo(psy, &chrg_profile);
+	if (!algo) {
+		pr_err("Error in getting charging algo!!\n");
+		return -EINVAL;
+	}
+
+	if (algo->get_batt_thresholds) {
+		algo->get_batt_thresholds(chrg_profile, bat_thresh);
+	} else {
+		pr_err("Error in getting battery thresholds from %s:%s\n",
+			algo->name, __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int select_chrgr_cable(struct device *dev, void *data)
+{
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct charger_cable *cable, *max_mA_cable = NULL;
+	struct charger_cable *cable_lst = (struct charger_cable *)data;
+	unsigned int max_mA = 0, iterm;
+	int i;
+
+	if (!IS_CHARGER(psy))
+		return 0;
+
+	mutex_lock(&psy_chrgr.evt_lock);
+
+	/* get cable with maximum capability */
+	for (i = 0; i < ARRAY_SIZE(cable_list); ++i) {
+		cable = cable_lst + i;
+		if ((!IS_CABLE_ACTIVE(cable->cable_props.cable_stat)) ||
+		    (!IS_SUPPORTED_CABLE(psy, cable->psy_cable_type)))
+			continue;
+
+		if (cable->cable_props.mA > max_mA) {
+			max_mA_cable = cable;
+			max_mA = cable->cable_props.mA;
+		}
+	}
+
+	/* no cable connected. disable charging */
+	if (!max_mA_cable) {
+
+		if ((IS_CHARGER_ENABLED(psy) || IS_CHARGING_ENABLED(psy))) {
+			disable_charging(psy);
+			disable_charger(psy);
+		}
+		set_cc(psy, 0);
+		set_cv(psy, 0);
+		set_inlmt(psy, 0);
+
+		/* set present and online as 0 */
+		set_present(psy, 0);
+		update_charger_online(psy);
+
+		switch_cable(psy, POWER_SUPPLY_CHARGER_TYPE_NONE);
+
+		mutex_unlock(&psy_chrgr.evt_lock);
+		power_supply_changed(psy);
+		return 0;
+	}
+
+	/* cable type changed.New cable connected or existing cable
+	 * capabilities changed.switch cable and enable charger and charging
+	 */
+	set_present(psy, 1);
+
+	if (CABLE_TYPE(psy) != max_mA_cable->psy_cable_type)
+		switch_cable(psy, max_mA_cable->psy_cable_type);
+
+	if (IS_CHARGER_CAN_BE_ENABLED(psy)) {
+		struct psy_batt_thresholds bat_thresh;
+		memset(&bat_thresh, 0, sizeof(bat_thresh));
+		enable_charger(psy);
+
+		update_charger_online(psy);
+
+		set_inlmt(psy, max_mA_cable->cable_props.mA);
+		if (!get_battery_thresholds(psy, &bat_thresh)) {
+			SET_ITERM(psy, bat_thresh.iterm);
+			SET_MIN_TEMP(psy, bat_thresh.temp_min);
+			SET_MAX_TEMP(psy, bat_thresh.temp_max);
+		}
+
+	} else {
+
+		disable_charger(psy);
+		update_charger_online(psy);
+	}
+
+
+	mutex_unlock(&psy_chrgr.evt_lock);
+	power_supply_trigger_charging_handler(NULL);
+	/* Cable status is same as previous. No action to be taken */
+	return 0;
+
+}
+
+static void configure_chrgr_source(struct charger_cable *cable_lst)
+{
+
+	class_for_each_device(power_supply_class, NULL,
+			      cable_lst, select_chrgr_cable);
+
+}
+
+static void charger_cable_event_worker(struct work_struct *work)
+{
+	struct charger_cable *cable =
+	    container_of(work, struct charger_cable, work);
+	struct extcon_chrgr_cbl_props cable_props;
+
+	if (cable->edev->
+	    get_cable_properties(extcon_cable_name[cable->extcon_cable_type],
+				 (void *)&cable_props)) {
+		pr_err("Erron in getting cable(%s) properties from extcon device(%s):%s:%d",
+				extcon_cable_name[cable->extcon_cable_type],
+				cable->edev->name, __FILE__, __LINE__);
+		return;
+	} else {
+		if (cable_props.cable_stat != cable->cable_props.cable_stat) {
+			cable->cable_props.cable_stat = cable_props.cable_stat;
+			cable->cable_props.mA = cable_props.mA;
+			configure_chrgr_source(cable_list);
+		}
+	}
+
+}
+
+static int charger_cable_notifier(struct notifier_block *nb,
+				  unsigned long stat, void *ptr)
+{
+
+	struct charger_cable *cable =
+	    container_of(nb, struct charger_cable, nb);
+
+	schedule_work(&cable->work);
+
+	return NOTIFY_DONE | NOTIFY_STOP_MASK;
+}
+
+int psy_charger_throttle_charger(struct power_supply *psy,
+					unsigned long state)
+{
+	int ret = 0;
+
+	if (state < 0 || state > MAX_THROTTLE_STATE(psy))
+		return -EINVAL;
+
+	mutex_lock(&psy_chrgr.evt_lock);
+
+	switch THROTTLE_ACTION(psy, state)
+	{
+
+		case PSY_THROTTLE_DISABLE_CHARGER:
+			SET_MAX_CC(psy, 0);
+			disable_charger(psy);
+			break;
+		case PSY_THROTTLE_DISABLE_CHARGING:
+			SET_MAX_CC(psy, 0);
+			disable_charging(psy);
+			break;
+		case PSY_THROTTLE_CC_LIMIT:
+			SET_MAX_CC(psy, THROTTLE_CC_VALUE(psy, state));
+			break;
+		case PSY_THROTTLE_INPUT_LIMIT:
+			set_inlmt(psy, THROTTLE_CC_VALUE(psy, state));
+			break;
+		default:
+			pr_err("Invalid throttle action for %s\n", psy->name);
+			ret = -EINVAL;
+			break;
+	}
+	mutex_unlock(&psy_chrgr.evt_lock);
+
+	/* Configure the driver based on new state */
+	if (!ret)
+		configure_chrgr_source(cable_list);
+	return ret;
+}
+EXPORT_SYMBOL(psy_charger_throttle_charger);
+
+int power_supply_register_charger(struct power_supply *psy)
+{
+	int ret = 0;
+
+	if (!psy_chrgr.is_cable_evt_reg) {
+		mutex_init(&psy_chrgr.evt_lock);
+		init_waitqueue_head(&psy_chrgr.wait_chrg_enable);
+		init_charger_cables(cable_list, ARRAY_SIZE(cable_list));
+		INIT_LIST_HEAD(&psy_chrgr.chrgr_cache_lst);
+		INIT_LIST_HEAD(&psy_chrgr.batt_cache_lst);
+		INIT_WORK(&psy_chrgr.algo_trigger_work, trigger_algo_psy_class);
+		psy_chrgr.is_cable_evt_reg = true;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(power_supply_register_charger);
+
+static inline void flush_charger_context(struct power_supply *psy)
+{
+	struct charger_props *chrgr_prop, *tmp;
+
+
+	list_for_each_entry_safe(chrgr_prop, tmp,
+				&psy_chrgr.chrgr_cache_lst, node) {
+		if (!strcmp(chrgr_prop->name, psy->name)) {
+			list_del(&chrgr_prop->node);
+			kfree(chrgr_prop);
+		}
+	}
+}
+int power_supply_unregister_charger(struct power_supply *psy)
+{
+	flush_charger_context(psy);
+	return 0;
+}
+EXPORT_SYMBOL(power_supply_unregister_charger);
+
+int power_supply_register_charging_algo(struct charging_algo *algo)
+{
+
+	struct charging_algo *algo_new;
+
+	algo_new = kzalloc(sizeof(*algo_new), GFP_KERNEL);
+	if (algo_new == NULL) {
+		pr_err("%s: Error allocating memory for algo!!", __func__);
+		return -1;
+	}
+	memcpy(algo_new, algo, sizeof(*algo_new));
+
+	list_add_tail(&algo_new->node, &algo_list);
+	return 0;
+}
+EXPORT_SYMBOL(power_supply_register_charging_algo);
+
+int power_supply_unregister_charging_algo(struct charging_algo *algo)
+{
+	struct charging_algo *algo_l, *tmp;
+
+	list_for_each_entry_safe(algo_l, tmp, &algo_list, node) {
+		if (!strcmp(algo_l->name, algo->name)) {
+			list_del(&algo_l->node);
+			kfree(algo_l);
+		}
+	}
+	return 0;
+
+}
+EXPORT_SYMBOL(power_supply_unregister_charging_algo);
+
+static struct charging_algo *get_charging_algo_byname(char *algo_name)
+{
+	struct charging_algo *algo;
+
+	list_for_each_entry(algo, &algo_list, node) {
+		if (!strcmp(algo->name, algo_name))
+			return algo;
+	}
+
+	return NULL;
+}
+
+static struct charging_algo *get_charging_algo_by_type
+		(enum batt_chrg_prof_type chrg_prof_type)
+{
+	struct charging_algo *algo;
+
+	list_for_each_entry(algo, &algo_list, node) {
+		if (algo->chrg_prof_type == chrg_prof_type)
+			return algo;
+	}
+
+	return NULL;
+}
+
+struct charging_algo *power_supply_get_charging_algo
+	(struct power_supply *psy, struct ps_batt_chg_prof *batt_prof)
+{
+
+	return get_charging_algo_by_type(batt_prof->chrg_prof_type);
+
+}
+EXPORT_SYMBOL_GPL(power_supply_get_charging_algo);
diff --git a/drivers/power/power_supply_charger.h b/drivers/power/power_supply_charger.h
new file mode 100644
index 0000000..81324a4
--- /dev/null
+++ b/drivers/power/power_supply_charger.h
@@ -0,0 +1,244 @@
+
+#ifndef __POWER_SUPPLY_CHARGER_H__
+
+#define __POWER_SUPPLY_CHARGER_H__
+#include <linux/power/battery_id.h>
+#include <linux/power_supply.h>
+
+#define MAX_CUR_VOLT_SAMPLES 3
+#define DEF_CUR_VOLT_SAMPLE_JIFF (30*HZ)
+
+enum psy_algo_stat {
+	PSY_ALGO_STAT_UNKNOWN,
+	PSY_ALGO_STAT_NOT_CHARGE,
+	PSY_ALGO_STAT_CHARGE,
+	PSY_ALGO_STAT_FULL,
+	PSY_ALGO_STAT_MAINT,
+};
+
+struct batt_props {
+	struct list_head node;
+	const char *name;
+	long voltage_now;
+	long voltage_now_cache[MAX_CUR_VOLT_SAMPLES];
+	long current_now;
+	long current_now_cache[MAX_CUR_VOLT_SAMPLES];
+	int temperature;
+	long status;
+	unsigned long tstamp;
+	enum psy_algo_stat algo_stat;
+	int health;
+	int throttle_state;
+};
+
+struct charger_props {
+	struct list_head node;
+	const char *name;
+	bool present;
+	bool is_charging;
+	int health;
+	bool online;
+	unsigned long cable;
+	unsigned long tstamp;
+};
+
+struct psy_batt_thresholds {
+	int temp_min;
+	int temp_max;
+	unsigned int iterm;
+};
+
+struct charging_algo {
+	struct list_head node;
+	unsigned int chrg_prof_type;
+	char *name;
+	enum psy_algo_stat (*get_next_cc_cv)(struct batt_props,
+			struct ps_batt_chg_prof, unsigned long *cc,
+			unsigned long *cv);
+	int (*get_batt_thresholds)(struct ps_batt_chg_prof,
+			struct psy_batt_thresholds *bat_thr);
+};
+
+
+extern int power_supply_register_charging_algo(struct charging_algo *);
+extern int power_supply_unregister_charging_algo(struct charging_algo *);
+
+static inline int set_ps_int_property(struct power_supply *psy,
+				      enum power_supply_property psp,
+				      int prop_val)
+{
+
+	union power_supply_propval val;
+
+	val.intval = prop_val;
+	return psy->set_property(psy, psp, &val);
+}
+
+static inline int get_ps_int_property(struct power_supply *psy,
+				      enum power_supply_property psp)
+{
+	union power_supply_propval val;
+
+	val.intval = 0;
+
+	psy->get_property(psy, psp, &val);
+	return val.intval;
+}
+/* Define a TTL for some properies to optimize the frequency of
+* algorithm calls. This can be used by properties which will be changed
+* very frequently (eg. current, volatge..)
+*/
+#define PROP_TTL (HZ*10)
+#define enable_charging(psy) \
+		({if ((CABLE_TYPE(psy) != POWER_SUPPLY_CHARGER_TYPE_NONE) &&\
+			!IS_CHARGING_ENABLED(psy)) { \
+		enable_charger(psy); \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGING,\
+					true); } })
+#define disable_charging(psy) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_ENABLE_CHARGING, false);
+
+#define enable_charger(psy) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGER, true)
+#define disable_charger(psy) \
+		({  disable_charging(psy); \
+			set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_ENABLE_CHARGER, false); })
+
+#define set_cc(psy, cc) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_CURRENT, cc)
+
+#define set_cv(psy, cv) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_VOLTAGE, cv)
+
+#define set_inlmt(psy, inlmt) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_INLMT, inlmt)
+
+#define set_present(psy, present) \
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_PRESENT, present)
+
+#define SET_MAX_CC(psy, max_cc) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT, max_cc)
+#define SET_ITERM(psy, iterm) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_CHARGE_TERM_CUR, iterm)
+#define SET_MAX_TEMP(psy, temp) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_MAX_TEMP, temp)
+#define SET_MIN_TEMP(psy, temp) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_MIN_TEMP, temp)
+#define switch_cable(psy, new_cable) \
+		set_ps_int_property(psy,\
+				POWER_SUPPLY_PROP_CABLE_TYPE, new_cable)
+
+#define HEALTH(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_HEALTH)
+#define CV(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_VOLTAGE)
+#define CC(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_CHARGE_CURRENT)
+#define INLMT(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_INLMT)
+#define MAX_CC(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT)
+#define MAX_CV(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE)
+#define VOLTAGE_NOW(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_VOLTAGE_NOW)
+#define VOLTAGE_OCV(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_VOLTAGE_OCV)
+#define CURRENT_NOW(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_CURRENT_NOW)
+#define STATUS(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_STATUS)
+#define TEMPERATURE(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_TEMP)
+#define BATTERY_TYPE(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_TECHNOLOGY)
+#define PRIORITY(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_PRIORITY)
+#define CABLE_TYPE(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_CABLE_TYPE)
+#define ONLINE(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_ONLINE)
+
+#define IS_CHARGING_ENABLED(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGING)
+#define IS_CHARGER_ENABLED(psy) \
+		get_ps_int_property(psy, POWER_SUPPLY_PROP_ENABLE_CHARGER)
+#define IS_BATTERY(psy) (psy->type == POWER_SUPPLY_TYPE_BATTERY)
+#define IS_CHARGER(psy) (psy->type == POWER_SUPPLY_TYPE_USB ||\
+				psy->type == POWER_SUPPLY_TYPE_USB_CDP || \
+			psy->type == POWER_SUPPLY_TYPE_USB_DCP ||\
+			psy->type == POWER_SUPPLY_TYPE_USB_ACA)
+#define IS_ONLINE(psy) \
+		(get_ps_int_property(psy, POWER_SUPPLY_PROP_ONLINE) == 1)
+#define IS_PRESENT(psy) \
+		(get_ps_int_property(psy, POWER_SUPPLY_PROP_PRESENT) == 1)
+#define IS_SUPPORTED_CABLE(psy, cable_type) \
+		(psy->supported_cables & cable_type)
+#define IS_CABLE_ACTIVE(status) \
+	(!((status == EXTCON_CHRGR_CABLE_DISCONNECTED) ||\
+			(status == EXTCON_CHRGR_CABLE_SUSPENDED)))
+
+#define IS_CHARGER_PROP_CHANGED(prop, cache_prop)\
+	((cache_prop.online != prop.online) || \
+	(cache_prop.present != prop.present) || \
+	(cache_prop.is_charging != prop.is_charging) || \
+	(cache_prop.health != prop.health))
+
+#define IS_BAT_PROP_CHANGED(bat_prop, bat_cache)\
+	((bat_cache.voltage_now != bat_prop.voltage_now) || \
+	(time_after64(bat_prop.tstamp, (bat_cache.tstamp + PROP_TTL)) &&\
+	((bat_cache.current_now != bat_prop.current_now) || \
+	(bat_cache.voltage_now != bat_prop.voltage_now))) || \
+	(bat_cache.temperature != bat_prop.temperature) || \
+	(bat_cache.health != bat_prop.health) || \
+	(bat_cache.throttle_state != bat_prop.throttle_state))
+
+#define THROTTLE_ACTION(psy, state)\
+		(((psy->throttle_states)+state)->throttle_action)
+
+#define MAX_THROTTLE_STATE(psy)\
+		((psy->num_throttle_states))
+
+#define CURRENT_THROTTLE_STATE(psy)\
+		(get_ps_int_property(psy,\
+			POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT))
+
+#define CURRENT_THROTTLE_ACTION(psy)\
+		THROTTLE_ACTION(psy, CURRENT_THROTTLE_STATE(psy))
+
+#define THROTTLE_CC_VALUE(psy, state)\
+		(((psy->throttle_states)+state)->throttle_val)
+
+#define IS_CHARGING_CAN_BE_ENABLED(psy) \
+	((CURRENT_THROTTLE_ACTION(psy) != PSY_THROTTLE_DISABLE_CHARGER)  &&\
+	(CURRENT_THROTTLE_ACTION(psy) != PSY_THROTTLE_DISABLE_CHARGING))
+#define IS_CHARGER_CAN_BE_ENABLED(psy) \
+	(CURRENT_THROTTLE_ACTION(psy) != PSY_THROTTLE_DISABLE_CHARGER)
+
+#define IS_HEALTH_GOOD(psy)\
+	(HEALTH(psy) == POWER_SUPPLY_HEALTH_GOOD)
+
+static inline void set_battery_status(struct power_supply *psy, int status)
+{
+
+	if (STATUS(psy) != status)
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_STATUS, status);
+
+
+}
+
+static inline void set_charger_online(struct power_supply *psy, int online)
+{
+
+	if (ONLINE(psy) != online)
+		set_ps_int_property(psy, POWER_SUPPLY_PROP_ONLINE, online);
+
+}
+
+#endif
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 1c517c3..26fbc84 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -19,6 +19,7 @@
 #include <linux/power_supply.h>
 #include <linux/thermal.h>
 #include "power_supply.h"
+#include "power_supply_charger.h"
 
 /* exported for the APM Power driver, APM emulation */
 struct class *power_supply_class;
@@ -26,6 +27,13 @@
 
 static struct device_type power_supply_dev_type;
 
+static struct mutex ps_chrg_evt_lock;
+
+static struct power_supply_charger_cap power_supply_chrg_cap = {
+		.chrg_evt	= POWER_SUPPLY_CHARGER_EVENT_DISCONNECT,
+		.chrg_type	= POWER_SUPPLY_TYPE_USB,
+		.mA		= 0	/* 0 mA */
+};
 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
 					 struct power_supply *supply)
 {
@@ -67,27 +75,84 @@
 
 static void power_supply_changed_work(struct work_struct *work)
 {
+	unsigned long flags;
 	struct power_supply *psy = container_of(work, struct power_supply,
 						changed_work);
 
 	dev_dbg(psy->dev, "%s\n", __func__);
 
-	class_for_each_device(power_supply_class, NULL, psy,
-			      __power_supply_changed_work);
+	spin_lock_irqsave(&psy->changed_lock, flags);
+	if (psy->changed) {
+		psy->changed = false;
+		spin_unlock_irqrestore(&psy->changed_lock, flags);
 
-	power_supply_update_leds(psy);
+		class_for_each_device(power_supply_class, NULL, psy,
+				      __power_supply_changed_work);
 
-	kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+		power_supply_trigger_charging_handler(psy);
+
+		power_supply_update_leds(psy);
+
+		power_supply_update_leds(psy);
+
+		kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+		spin_lock_irqsave(&psy->changed_lock, flags);
+	}
+	if (!psy->changed)
+		pm_relax(psy->dev);
+	spin_unlock_irqrestore(&psy->changed_lock, flags);
 }
 
 void power_supply_changed(struct power_supply *psy)
 {
+	unsigned long flags;
+
+	if (psy == NULL) {
+		power_supply_trigger_charging_handler(psy);
+		return;
+	}
+
 	dev_dbg(psy->dev, "%s\n", __func__);
 
+	spin_lock_irqsave(&psy->changed_lock, flags);
+	psy->changed = true;
+	pm_stay_awake(psy->dev);
+	spin_unlock_irqrestore(&psy->changed_lock, flags);
 	schedule_work(&psy->changed_work);
 }
 EXPORT_SYMBOL_GPL(power_supply_changed);
 
+static int __power_supply_charger_event(struct device *dev, void *data)
+{
+	struct power_supply_charger_cap *cap =
+				(struct power_supply_charger_cap *)data;
+	struct power_supply *psy = dev_get_drvdata(dev);
+
+	if (psy->charging_port_changed)
+		psy->charging_port_changed(psy, cap);
+
+	return 0;
+}
+
+void power_supply_charger_event(struct power_supply_charger_cap cap)
+{
+	class_for_each_device(power_supply_class, NULL, &cap,
+				      __power_supply_charger_event);
+
+	mutex_lock(&ps_chrg_evt_lock);
+	memcpy(&power_supply_chrg_cap, &cap, sizeof(power_supply_chrg_cap));
+	mutex_unlock(&ps_chrg_evt_lock);
+}
+EXPORT_SYMBOL_GPL(power_supply_charger_event);
+
+void power_supply_query_charger_caps(struct power_supply_charger_cap *cap)
+{
+	mutex_lock(&ps_chrg_evt_lock);
+	memcpy(cap, &power_supply_chrg_cap, sizeof(power_supply_chrg_cap));
+	mutex_unlock(&ps_chrg_evt_lock);
+}
+EXPORT_SYMBOL_GPL(power_supply_query_charger_caps);
+
 #ifdef CONFIG_OF
 #include <linux/of.h>
 
@@ -271,12 +336,12 @@
 	unsigned int count = 0;
 
 	error = class_for_each_device(power_supply_class, NULL, &count,
-				      __power_supply_is_system_supplied);
+					__power_supply_is_system_supplied);
 
 	/*
-	 * If no power class device was found at all, most probably we are
-	 * running on a desktop system, so assume we are on mains power.
-	 */
+	* If no power class device was found at all, most probably we are
+	* running on a desktop system, so assume we are on mains power.
+	*/
 	if (count == 0)
 		return 1;
 
@@ -284,6 +349,30 @@
 }
 EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
 
+static int __power_supply_is_battery_connected(struct device *dev, void *data)
+{
+	union power_supply_propval ret = {0,};
+	struct power_supply *psy = dev_get_drvdata(dev);
+
+	if (psy->type == POWER_SUPPLY_TYPE_BATTERY) {
+		if (psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT, &ret))
+			return 0;
+		if (ret.intval)
+			return ret.intval;
+	}
+	return 0;
+}
+
+int power_supply_is_battery_connected(void)
+{
+	int error;
+
+	error = class_for_each_device(power_supply_class, NULL, NULL,
+					__power_supply_is_battery_connected);
+	return error;
+}
+EXPORT_SYMBOL_GPL(power_supply_is_battery_connected);
+
 int power_supply_set_battery_charged(struct power_supply *psy)
 {
 	if (psy->type == POWER_SUPPLY_TYPE_BATTERY && psy->set_charged) {
@@ -326,13 +415,14 @@
 
 #ifdef CONFIG_THERMAL
 static int power_supply_read_temp(struct thermal_zone_device *tzd,
-		unsigned long *temp)
+		long *temp)
 {
 	struct power_supply *psy;
 	union power_supply_propval val;
 	int ret;
 
-	WARN_ON(tzd == NULL);
+	if (WARN_ON(tzd == NULL))
+		return -EINVAL;
 	psy = tzd->devdata;
 	ret = psy->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
 
@@ -379,6 +469,8 @@
 	union power_supply_propval val;
 	int ret;
 
+	if (WARN_ON(tcd == NULL))
+		return -EINVAL;
 	psy = tcd->devdata;
 	ret = psy->get_property(psy,
 		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
@@ -395,6 +487,8 @@
 	union power_supply_propval val;
 	int ret;
 
+	if (WARN_ON(tcd == NULL))
+		return -EINVAL;
 	psy = tcd->devdata;
 	ret = psy->get_property(psy,
 		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
@@ -411,11 +505,15 @@
 	union power_supply_propval val;
 	int ret;
 
+	if (WARN_ON(tcd == NULL))
+		return -EINVAL;
 	psy = tcd->devdata;
 	val.intval = state;
 	ret = psy->set_property(psy,
 		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
 
+	psy_charger_throttle_charger(psy, state);
+
 	return ret;
 }
 
@@ -504,6 +602,11 @@
 	if (rc)
 		goto device_add_failed;
 
+	spin_lock_init(&psy->changed_lock);
+	rc = device_init_wakeup(dev, true);
+	if (rc)
+		goto wakeup_init_failed;
+
 	rc = psy_register_thermal(psy);
 	if (rc)
 		goto register_thermal_failed;
@@ -516,15 +619,22 @@
 	if (rc)
 		goto create_triggers_failed;
 
+	if (IS_CHARGER(psy))
+		rc = power_supply_register_charger(psy);
+	if (rc)
+		goto charger_register_failed;
+
 	power_supply_changed(psy);
 
 	goto success;
 
+charger_register_failed:
 create_triggers_failed:
 	psy_unregister_cooler(psy);
 register_cooler_failed:
 	psy_unregister_thermal(psy);
 register_thermal_failed:
+wakeup_init_failed:
 	device_del(dev);
 kobject_set_name_failed:
 device_add_failed:
@@ -539,9 +649,12 @@
 {
 	cancel_work_sync(&psy->changed_work);
 	sysfs_remove_link(&psy->dev->kobj, "powers");
+	if (IS_CHARGER(psy))
+		power_supply_unregister_charger(psy);
 	power_supply_remove_triggers(psy);
 	psy_unregister_cooler(psy);
 	psy_unregister_thermal(psy);
+	device_init_wakeup(psy->dev, false);
 	device_unregister(psy->dev);
 }
 EXPORT_SYMBOL_GPL(power_supply_unregister);
@@ -555,6 +668,7 @@
 
 	power_supply_class->dev_uevent = power_supply_uevent;
 	power_supply_init_attrs(&power_supply_dev_type);
+	mutex_init(&ps_chrg_evt_lock);
 
 	return 0;
 }
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 29178f7..5236c35 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -44,7 +44,7 @@
 					  struct device_attribute *attr,
 					  char *buf) {
 	static char *type_text[] = {
-		"Unknown", "Battery", "UPS", "Mains", "USB",
+		"Unknown", "Battery", "UPS", "Mains", "USB", "USB",
 		"USB_DCP", "USB_CDP", "USB_ACA"
 	};
 	static char *status_text[] = {
@@ -105,7 +105,10 @@
 	else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
 		return sprintf(buf, "%s\n", value.strval);
 
-	return sprintf(buf, "%d\n", value.intval);
+	if (off == POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT)
+		return sprintf(buf, "%lld\n", value.int64val);
+	else
+		return sprintf(buf, "%d\n", value.intval);
 }
 
 static ssize_t power_supply_store_property(struct device *dev,
@@ -165,8 +168,14 @@
 	POWER_SUPPLY_ATTR(constant_charge_current_max),
 	POWER_SUPPLY_ATTR(constant_charge_voltage),
 	POWER_SUPPLY_ATTR(constant_charge_voltage_max),
+	POWER_SUPPLY_ATTR(charge_current_limit),
 	POWER_SUPPLY_ATTR(charge_control_limit),
 	POWER_SUPPLY_ATTR(charge_control_limit_max),
+	POWER_SUPPLY_ATTR(charge_current),
+	POWER_SUPPLY_ATTR(max_charge_current),
+	POWER_SUPPLY_ATTR(charge_voltage),
+	POWER_SUPPLY_ATTR(max_charge_voltage),
+	POWER_SUPPLY_ATTR(input_cur_limit),
 	POWER_SUPPLY_ATTR(energy_full_design),
 	POWER_SUPPLY_ATTR(energy_empty_design),
 	POWER_SUPPLY_ATTR(energy_full),
@@ -180,6 +189,8 @@
 	POWER_SUPPLY_ATTR(temp),
 	POWER_SUPPLY_ATTR(temp_alert_min),
 	POWER_SUPPLY_ATTR(temp_alert_max),
+	POWER_SUPPLY_ATTR(max_temp),
+	POWER_SUPPLY_ATTR(min_temp),
 	POWER_SUPPLY_ATTR(temp_ambient),
 	POWER_SUPPLY_ATTR(temp_ambient_alert_min),
 	POWER_SUPPLY_ATTR(temp_ambient_alert_max),
@@ -188,7 +199,18 @@
 	POWER_SUPPLY_ATTR(time_to_full_now),
 	POWER_SUPPLY_ATTR(time_to_full_avg),
 	POWER_SUPPLY_ATTR(type),
+	POWER_SUPPLY_ATTR(charge_term_cur),
+	POWER_SUPPLY_ATTR(enable_charging),
+	POWER_SUPPLY_ATTR(enable_charger),
+	POWER_SUPPLY_ATTR(cable_type),
+	POWER_SUPPLY_ATTR(priority),
 	POWER_SUPPLY_ATTR(scope),
+	/* Local extensions */
+	POWER_SUPPLY_ATTR(usb_hc),
+	POWER_SUPPLY_ATTR(usb_otg),
+	POWER_SUPPLY_ATTR(charge_enabled),
+	/* Local extensions of type int64_t */
+	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_ATTR(model_name),
 	POWER_SUPPLY_ATTR(manufacturer),
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 115b644..28c0b10 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -28,6 +28,10 @@
 
 if PWM
 
+config PWM_SYSFS
+	bool
+	default y if SYSFS
+
 config PWM_AB8500
 	tristate "AB8500 PWM support"
 	depends on AB8500_CORE && ARCH_U8500
@@ -201,4 +205,13 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-vt8500.
 
+config PWM_INTEL_MID
+	tristate "Support for Intel MID PWM"
+	help
+	  This option enables support for Intel Mid PWM Driver. Say Y
+	  here if you want to enable the PWM functionality.
+
+	  To compile this driver as a module, choose M here. The module will
+	  be called pwm-intel-mid.
+
 endif
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 94ba21e..b802906 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -1,4 +1,5 @@
 obj-$(CONFIG_PWM)		+= core.o
+obj-$(CONFIG_PWM_SYSFS)		+= sysfs.o
 obj-$(CONFIG_PWM_AB8500)	+= pwm-ab8500.o
 obj-$(CONFIG_PWM_ATMEL_TCB)	+= pwm-atmel-tcb.o
 obj-$(CONFIG_PWM_BFIN)		+= pwm-bfin.o
@@ -17,3 +18,4 @@
 obj-$(CONFIG_PWM_TWL)		+= pwm-twl.o
 obj-$(CONFIG_PWM_TWL_LED)	+= pwm-twl-led.o
 obj-$(CONFIG_PWM_VT8500)	+= pwm-vt8500.o
+obj-$(CONFIG_PWM_INTEL_MID)	+= pwm-intel-mid.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 32221cb..9e9f5b8 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -274,6 +274,8 @@
 	if (IS_ENABLED(CONFIG_OF))
 		of_pwmchip_add(chip);
 
+	pwmchip_sysfs_export(chip);
+
 out:
 	mutex_unlock(&pwm_lock);
 	return ret;
@@ -310,6 +312,8 @@
 
 	free_pwms(chip);
 
+	pwmchip_sysfs_unexport(chip);
+
 out:
 	mutex_unlock(&pwm_lock);
 	return ret;
@@ -402,10 +406,19 @@
  */
 int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
 {
+	int err;
+
 	if (!pwm || duty_ns < 0 || period_ns <= 0 || duty_ns > period_ns)
 		return -EINVAL;
 
-	return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
+	err = pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
+	if (err)
+		return err;
+
+	pwm->duty_cycle = duty_ns;
+	pwm->period = period_ns;
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(pwm_config);
 
@@ -418,6 +431,8 @@
  */
 int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
 {
+	int err;
+
 	if (!pwm || !pwm->chip->ops)
 		return -EINVAL;
 
@@ -427,7 +442,13 @@
 	if (test_bit(PWMF_ENABLED, &pwm->flags))
 		return -EBUSY;
 
-	return pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
+	err = pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
+	if (err)
+		return err;
+
+	pwm->polarity = polarity;
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(pwm_set_polarity);
 
diff --git a/drivers/pwm/pwm-intel-mid.c b/drivers/pwm/pwm-intel-mid.c
new file mode 100644
index 0000000..7897607
--- /dev/null
+++ b/drivers/pwm/pwm-intel-mid.c
@@ -0,0 +1,404 @@
+/*
+ * pwm-intel-mid.c: Driver for PWM on Intel MID platform
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Nicolas Pernas Maradei <nicolas.pernas.maradei@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#define PWM_INTEL_MID_DRIVER_NAME     "pwm-intel-mid"
+#define PCI_DEVICE_ID_INTEL_MID_MRFLD 0x11a5
+#define CLOCK_RATE                    19200000
+#define BASE_10                       10
+#define NSECS_PER_SEC                 1000000000UL
+#define PWM_PERIOD_NS_MAX             218453000 /* about 4.6 Hz */
+#define PWM_PERIOD_NS_MIN             104 /* about 9.6 MHz */
+#define PWM_ON_TIME_DIVISOR_BITS      8
+#define PWM_BASE_UNIT_FRAC_BITS       14
+#define PWM_BASE_UNIT_INT_BITS        8
+#define PWM_COMPARE_UNIT_SIZE         256UL
+#define PWM_DYNAMYC_RANGE_DEFAULT     100UL
+#define PWM_DYNAMYC_RANGE_THRESHOLD   13333
+#define PWM_DEFAULT_PERIOD            4950495 /* about 200 Hz */
+#define PWM_CONTROL_REGISTER_SIZE     0x400
+
+#define get_control_register(pwm, pwm_id) \
+	((u8 *)pwm->regs + (pwm_id * PWM_CONTROL_REGISTER_SIZE));
+
+union pwmctrl_reg {
+	struct {
+		u32 on_time_divisor:PWM_ON_TIME_DIVISOR_BITS;
+		u32 base_unit_frac:PWM_BASE_UNIT_FRAC_BITS;
+		u32 base_unit_int:PWM_BASE_UNIT_INT_BITS;
+		u32 sw_update:1;
+		u32 enable:1;
+	} part;
+	u32 full;
+};
+
+struct intel_mid_pwm_chip {
+	struct pwm_chip chip;
+	void __iomem *regs;
+	union pwmctrl_reg *pwmctrls;
+	int num_of_pwms;
+};
+
+static inline struct intel_mid_pwm_chip *
+to_pwm(struct pwm_chip *chip)
+{
+	return container_of(chip, struct intel_mid_pwm_chip, chip);
+}
+
+static inline void
+pwm_set_enable_bit(void __iomem *reg, union pwmctrl_reg *pwmctrl, u8 value)
+{
+	pwmctrl->full = readl(reg);
+	pwmctrl->part.enable = value;
+	writel(pwmctrl->full, reg);
+}
+
+static void
+intel_mid_pwm_on_time_divisor(void __iomem *reg, union pwmctrl_reg *pwmctrl,
+	const u32 period, const u32 duty_cycle)
+{
+	u64 on_time_divisor;
+
+	/* Calculate and set on time divisor */
+	on_time_divisor = duty_cycle * (u64)(PWM_COMPARE_UNIT_SIZE - 1UL);
+	do_div(on_time_divisor, period);
+	on_time_divisor = PWM_COMPARE_UNIT_SIZE - on_time_divisor - 1UL;
+
+	pwmctrl->full = readl(reg);
+	pwmctrl->part.on_time_divisor = on_time_divisor;
+	writel(pwmctrl->full, reg);
+}
+
+static int
+intel_mid_pwm_base_unit(void __iomem *reg, union pwmctrl_reg *pwmctrl,
+	const u32 period, const u32 clock_rate)
+{
+	u32 dynamic_range = PWM_DYNAMYC_RANGE_DEFAULT;
+	u64 rest;
+	u64 tmp;
+	u64 fraction = 0;
+	u64 numerator = 1;
+	u64 base_unit_integer;
+	u64 frequency = NSECS_PER_SEC;
+	u32 base_unit_fraction = 0;
+	int i;
+
+	/* The dynamic range multiplier is used to get more accurate
+	calculations when the frequency is small (less than 75 KHz) in the
+	fraction part of base unit. In some way when requesting low frequencies
+	all calculations are done using the period in nano-secs e-2. For high
+	frecuencies we use nano-secs only. */
+	if (period < PWM_DYNAMYC_RANGE_THRESHOLD)
+		dynamic_range = 1UL;
+	frequency *= dynamic_range;
+
+	/* calculate frequency: f (hz) = 1e9 (ns/ps) / p (ns/ps). Result is in
+	Hz depending on dynamic_range being 1. */
+	do_div(frequency, period);
+
+	/* base_unit is a 22 bits register composed of a fractional part (first
+	14 bits) and an integer part (next 8 bits). The integer part
+	calculation is trivial. Done in place by do_div() below. */
+	base_unit_integer = frequency * PWM_COMPARE_UNIT_SIZE;
+
+	rest = do_div(base_unit_integer, clock_rate * dynamic_range);
+
+	/* The fractional part of base_unit needs to be calculated and converted
+	to binary fixed point notation. Two steps will be needed to do the
+	calculation. First to calculate the fraction part in decimal and
+	secondly to convert it to binary fixed point. Due to lack of float
+	support in the kernel we'll use the rest of (frequency *
+	PWM_COMPARE_UNIT_SIZE / clock_speed) division to calculate it and then
+	following the standard division algorithm the rest will be multiplied
+	by 10 and divided by clock_rate several times until the desired level
+	of precision is reached. At the end it will look like this:
+	base_unit_fraction = fraction / numerator where numerator is a power
+	of 10.
+
+	E.g.:	base_unit = 1.123, base_unit_fraction = 0.123,
+			fraction = 123, numerator = 1000 */
+	for (i = 0; i < PWM_BASE_UNIT_FRAC_BITS; i++) {
+		tmp = rest * BASE_10;
+		rest = do_div(tmp, clock_rate * dynamic_range);
+		fraction += tmp;
+		fraction *= BASE_10;
+		numerator *= BASE_10;
+	}
+	do_div(fraction, BASE_10);
+
+	/* At this point we've got the fraction and numerator done following
+	the above description. The binary fixed point conversion is done by
+	repeated multiplications of the fraction (but using fractions (fra/num)
+	instead of floats). When the fraction is multipled by 2 and gets greater
+	or equal than 1 (or in our case frac/num >= 1 -> frac >= num) then we
+	know the next digit in the binary fixed point number will be a '1'.
+	Also this excess needs to be removed. In the original algorithm the
+	overflow digit is substracted. In our case we can substract the
+	numerator. */
+	for (i = 0; i < PWM_BASE_UNIT_FRAC_BITS; i++) {
+		/* Multiply fraction by 2 */
+		fraction <<= 1;
+		base_unit_fraction <<= 1;
+
+		/* frac / num >= 1 -> set next bit to '1' and remove "overflow
+		digit" */
+		if (fraction >= numerator) {
+			base_unit_fraction |= 1;
+			fraction -= numerator;
+		}
+	}
+	/* If both the values are 0, the output will be somehow not correct.
+	 * So if it happens, change the fraction to 1.
+	 */
+	if ((0 == base_unit_fraction) && (0 == base_unit_integer))
+		base_unit_fraction = 1UL;
+
+	pwmctrl->full = readl(reg);
+	pwmctrl->part.base_unit_int = (u32)base_unit_integer;
+	pwmctrl->part.base_unit_frac = base_unit_fraction;
+	writel(pwmctrl->full, reg);
+
+	return 0;
+}
+
+static int
+intel_mid_pwm_setup(void __iomem *reg, union pwmctrl_reg *pwmctrl,
+	int duty_ns, int period_ns)
+{
+	int ret;
+
+	/* Calculate and set base_unit */
+	ret = intel_mid_pwm_base_unit(reg, pwmctrl, period_ns, CLOCK_RATE);
+	if (ret)
+		return ret;
+
+	/* Calculate and set on time divisor */
+	intel_mid_pwm_on_time_divisor(reg, pwmctrl, period_ns, duty_ns);
+
+	/* Set software update bit */
+	pwmctrl->part.sw_update = 1UL;
+	writel(pwmctrl->full, reg);
+
+	return 0;
+}
+
+static int
+intel_mid_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm_dev,
+	int duty_ns, int period_ns)
+{
+	struct intel_mid_pwm_chip *pwm = to_pwm(chip);
+	union pwmctrl_reg *pwmctrl = &pwm->pwmctrls[pwm_dev->hwpwm];
+	void __iomem *reg = get_control_register(pwm, pwm_dev->hwpwm);
+
+	dev_dbg(chip->dev, "%s: period_ns %d, duty_ns %d\n", __func__,
+		period_ns, duty_ns);
+
+	/* Check the period is valid within HW capabilities */
+	if (period_ns < PWM_PERIOD_NS_MIN || period_ns > PWM_PERIOD_NS_MAX) {
+		dev_err(chip->dev, "Period (ns) must be in range %u:%u\n",
+			PWM_PERIOD_NS_MIN, PWM_PERIOD_NS_MAX);
+		return -EINVAL;
+	}
+
+	return intel_mid_pwm_setup(reg, pwmctrl, duty_ns, period_ns);
+}
+
+static int
+intel_mid_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm_dev)
+{
+	struct intel_mid_pwm_chip *pwm = to_pwm(chip);
+	union pwmctrl_reg *pwmctrl = &pwm->pwmctrls[pwm_dev->hwpwm];
+	int ret;
+	void __iomem *reg = get_control_register(pwm, pwm_dev->hwpwm);
+
+	pm_runtime_get_sync(chip->dev);
+
+	ret = intel_mid_pwm_setup(reg, pwmctrl, pwm_dev->duty_cycle,
+		pwm_dev->period);
+	if (ret)
+		return ret;
+
+	pwm_set_enable_bit(reg, pwmctrl, 1U);
+
+	dev_dbg(chip->dev, "%s: pwmctrl %#x\n", __func__, pwmctrl->full);
+
+	return 0;
+}
+
+static void
+intel_mid_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm_dev)
+{
+	struct intel_mid_pwm_chip *pwm = to_pwm(chip);
+	union pwmctrl_reg *pwmctrl = &pwm->pwmctrls[pwm_dev->hwpwm];
+	void __iomem *reg = get_control_register(pwm, pwm_dev->hwpwm);
+
+	pwm_set_enable_bit(reg, pwmctrl, 0);
+	pm_runtime_put(chip->dev);
+	dev_dbg(chip->dev, "%s: pwmctrl %#x\n", __func__, pwmctrl->full);
+}
+
+static const struct pwm_ops intel_mid_pwm_ops = {
+	.config = intel_mid_pwm_config,
+	.enable = intel_mid_pwm_enable,
+	.disable = intel_mid_pwm_disable,
+	.owner = THIS_MODULE,
+};
+
+static int
+intel_mid_pwm_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+	struct intel_mid_pwm_chip *pwm;
+	int ret, i;
+	resource_size_t resource_len;
+
+	pwm = devm_kzalloc(&pci->dev, sizeof(*pwm), GFP_KERNEL);
+	if (!pwm) {
+		dev_err(&pci->dev, "Can't allocate memory for pwm\n");
+		return -ENOMEM;
+	}
+
+	/* Init the device */
+	ret = pci_enable_device(pci);
+	if (ret) {
+		dev_err(&pci->dev, "Can't enable pci device\n");
+		return ret;
+	}
+
+	ret = pci_request_regions(pci, PWM_INTEL_MID_DRIVER_NAME);
+	if (ret) {
+		dev_err(&pci->dev, "Can't request regions\n");
+		goto do_disable_device;
+	}
+	pci_dev_get(pci);
+
+	pwm->regs = pci_ioremap_bar(pci, 0);
+	if (!pwm->regs) {
+		dev_err(&pci->dev, "ioremap failed\n");
+		ret = -EIO;
+		goto do_disable_device;
+	}
+
+	/* Calculate number of available pwm modules */
+	resource_len = pci_resource_len(pci, 0);
+	do_div(resource_len, PWM_CONTROL_REGISTER_SIZE);
+	pwm->num_of_pwms = resource_len;
+
+	/* allocate memory for PWM control register images */
+	pwm->pwmctrls = devm_kzalloc(&pci->dev,
+		sizeof(*pwm->pwmctrls) * pwm->num_of_pwms, GFP_KERNEL);
+	if (!pwm->pwmctrls) {
+		dev_err(&pci->dev, "Can't allocate memory for pwm pwmctrls\n");
+		ret = -ENOMEM;
+		goto do_unmap_regs;
+	}
+
+	/* register the driver with PWM framework */
+	pwm->chip.dev = &pci->dev;
+	pwm->chip.ops = &intel_mid_pwm_ops;
+	pwm->chip.base = -1;
+	pwm->chip.npwm = pwm->num_of_pwms;
+
+	ret = pwmchip_add(&pwm->chip);
+	if (ret) {
+		dev_err(&pci->dev, "Failed to add PWM chip: %d\n", ret);
+		goto do_unmap_regs;
+	}
+
+	/* Set default frequency/period (about 200Hz) on all pwm modules. */
+	for (i = 0; i < pwm->num_of_pwms; i++)
+		pwm->chip.pwms[i].period = PWM_DEFAULT_PERIOD;
+
+	pci_set_drvdata(pci, pwm);
+	pm_runtime_allow(&pci->dev);
+	pm_runtime_put_noidle(&pci->dev);
+
+	return ret;
+
+do_unmap_regs:
+	iounmap(pwm->regs);
+	pci_release_regions(pci);
+do_disable_device:
+	pci_disable_device(pci);
+
+	return ret;
+}
+
+static void
+intel_mid_pwm_remove(struct pci_dev *pci)
+{
+	struct intel_mid_pwm_chip *pwm = pci_get_drvdata(pci);
+
+	pm_runtime_get_noresume(&pci->dev);
+	pm_runtime_forbid(&pci->dev);
+	pwmchip_remove(&pwm->chip);
+	iounmap(pwm->regs);
+	pci_release_regions(pci);
+	pci_disable_device(pci);
+	pci_set_drvdata(pci, NULL);
+}
+
+#if CONFIG_PM
+static int
+intel_mid_pwm_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int
+intel_mid_pwm_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static void
+intel_mid_pwm_runtime_complete(struct device *dev) {
+}
+
+static const struct dev_pm_ops intel_mid_pm_ops = {
+	.prepare = intel_mid_pwm_runtime_suspend,
+	.complete = intel_mid_pwm_runtime_complete,
+	.runtime_suspend = intel_mid_pwm_runtime_suspend,
+	.runtime_resume = intel_mid_pwm_runtime_resume,
+};
+#endif
+
+/* PCI Routines */
+static DEFINE_PCI_DEVICE_TABLE(intel_mid_pwm_pci_ids) = {
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MID_MRFLD), 0},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, intel_mid_pwm_pci_ids);
+
+static struct pci_driver intel_mid_pci_driver = {
+	.name = PWM_INTEL_MID_DRIVER_NAME,
+	.id_table = intel_mid_pwm_pci_ids,
+	.probe = intel_mid_pwm_probe,
+	.remove = intel_mid_pwm_remove,
+#ifdef CONFIG_PM
+	.driver = {
+		.pm = &intel_mid_pm_ops,
+	},
+#endif
+};
+
+module_pci_driver(intel_mid_pci_driver);
+
+MODULE_ALIAS("pci:" PWM_INTEL_MID_DRIVER_NAME);
+MODULE_DESCRIPTION("Intel(R) MID PWM driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Pernas Maradei <nicolas.pernas.maradei@emutex.com>");
+
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
new file mode 100644
index 0000000..f98b4a0
--- /dev/null
+++ b/drivers/pwm/sysfs.c
@@ -0,0 +1,353 @@
+/*
+ * A simple sysfs interface for the generic PWM framework
+ *
+ * Copyright (C) 2013 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * Based on previous work by Lars Poeschel <poeschel@lemonage.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/kdev_t.h>
+#include <linux/pwm.h>
+
+struct pwm_export {
+	struct device child;
+	struct pwm_device *pwm;
+};
+
+static struct pwm_export *child_to_pwm_export(struct device *child)
+{
+	return container_of(child, struct pwm_export, child);
+}
+
+static struct pwm_device *child_to_pwm_device(struct device *child)
+{
+	struct pwm_export *export = child_to_pwm_export(child);
+
+	return export->pwm;
+}
+
+static ssize_t pwm_period_show(struct device *child,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	const struct pwm_device *pwm = child_to_pwm_device(child);
+
+	return sprintf(buf, "%u\n", pwm->period);
+}
+
+static ssize_t pwm_period_store(struct device *child,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct pwm_device *pwm = child_to_pwm_device(child);
+	unsigned int val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	ret = pwm_config(pwm, pwm->duty_cycle, val);
+
+	return ret ? : size;
+}
+
+static ssize_t pwm_duty_cycle_show(struct device *child,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	const struct pwm_device *pwm = child_to_pwm_device(child);
+
+	return sprintf(buf, "%u\n", pwm->duty_cycle);
+}
+
+static ssize_t pwm_duty_cycle_store(struct device *child,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct pwm_device *pwm = child_to_pwm_device(child);
+	unsigned int val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	ret = pwm_config(pwm, val, pwm->period);
+
+	return ret ? : size;
+}
+
+static ssize_t pwm_enable_show(struct device *child,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	const struct pwm_device *pwm = child_to_pwm_device(child);
+	int enabled = test_bit(PWMF_ENABLED, &pwm->flags);
+
+	return sprintf(buf, "%d\n", enabled);
+}
+
+static ssize_t pwm_enable_store(struct device *child,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct pwm_device *pwm = child_to_pwm_device(child);
+	int val, ret;
+
+	ret = kstrtoint(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	switch (val) {
+	case 0:
+		pwm_disable(pwm);
+		break;
+	case 1:
+		ret = pwm_enable(pwm);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret ? : size;
+}
+
+static ssize_t pwm_polarity_show(struct device *child,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	const struct pwm_device *pwm = child_to_pwm_device(child);
+
+	return sprintf(buf, "%s\n", pwm->polarity ? "inversed" : "normal");
+}
+
+static ssize_t pwm_polarity_store(struct device *child,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	struct pwm_device *pwm = child_to_pwm_device(child);
+	enum pwm_polarity polarity;
+	int ret;
+
+	if (sysfs_streq(buf, "normal"))
+		polarity = PWM_POLARITY_NORMAL;
+	else if (sysfs_streq(buf, "inversed"))
+		polarity = PWM_POLARITY_INVERSED;
+	else
+		return -EINVAL;
+
+	ret = pwm_set_polarity(pwm, polarity);
+
+	return ret ? : size;
+}
+
+static DEVICE_ATTR(period, 0644, pwm_period_show, pwm_period_store);
+static DEVICE_ATTR(duty_cycle, 0644, pwm_duty_cycle_show, pwm_duty_cycle_store);
+static DEVICE_ATTR(enable, 0644, pwm_enable_show, pwm_enable_store);
+static DEVICE_ATTR(polarity, 0644, pwm_polarity_show, pwm_polarity_store);
+
+static struct attribute *pwm_attrs[] = {
+	&dev_attr_period.attr,
+	&dev_attr_duty_cycle.attr,
+	&dev_attr_enable.attr,
+	&dev_attr_polarity.attr,
+	NULL
+};
+
+static const struct attribute_group pwm_attr_group = {
+	.attrs	  = pwm_attrs,
+};
+
+static const struct attribute_group *pwm_attr_groups[] = {
+	&pwm_attr_group,
+	NULL,
+};
+
+static void pwm_export_release(struct device *child)
+{
+	struct pwm_export *export = child_to_pwm_export(child);
+
+	kfree(export);
+}
+
+static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
+{
+	struct pwm_export *export;
+	int ret;
+
+	if (test_and_set_bit(PWMF_EXPORTED, &pwm->flags))
+		return -EBUSY;
+
+	export = kzalloc(sizeof(*export), GFP_KERNEL);
+	if (!export) {
+		clear_bit(PWMF_EXPORTED, &pwm->flags);
+		return -ENOMEM;
+	}
+
+	export->pwm = pwm;
+
+	export->child.release = pwm_export_release;
+	export->child.parent = parent;
+	export->child.devt = MKDEV(0, 0);
+	export->child.groups = pwm_attr_groups;
+	dev_set_name(&export->child, "pwm%u", pwm->hwpwm);
+
+	ret = device_register(&export->child);
+	if (ret) {
+		clear_bit(PWMF_EXPORTED, &pwm->flags);
+		kfree(export);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int pwm_unexport_match(struct device *child, void *data)
+{
+	return child_to_pwm_device(child) == data;
+}
+
+static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm)
+{
+	struct device *child;
+
+	if (!test_and_clear_bit(PWMF_EXPORTED, &pwm->flags))
+		return -ENODEV;
+
+	child = device_find_child(parent, pwm, pwm_unexport_match);
+	if (!child)
+		return -ENODEV;
+
+	/* for device_find_child() */
+	put_device(child);
+	device_unregister(child);
+	pwm_put(pwm);
+
+	return 0;
+}
+
+static ssize_t pwm_export_store(struct device *parent,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	struct pwm_chip *chip = dev_get_drvdata(parent);
+	struct pwm_device *pwm;
+	unsigned int hwpwm;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &hwpwm);
+	if (ret < 0)
+		return ret;
+
+	if (hwpwm >= chip->npwm)
+		return -ENODEV;
+
+	pwm = pwm_request_from_chip(chip, hwpwm, "sysfs");
+	if (IS_ERR(pwm))
+		return PTR_ERR(pwm);
+
+	ret = pwm_export_child(parent, pwm);
+	if (ret < 0)
+		pwm_put(pwm);
+
+	return ret ? : len;
+}
+
+static ssize_t pwm_unexport_store(struct device *parent,
+				  struct device_attribute *attr,
+				  const char *buf, size_t len)
+{
+	struct pwm_chip *chip = dev_get_drvdata(parent);
+	unsigned int hwpwm;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &hwpwm);
+	if (ret < 0)
+		return ret;
+
+	if (hwpwm >= chip->npwm)
+		return -ENODEV;
+
+	ret = pwm_unexport_child(parent, &chip->pwms[hwpwm]);
+
+	return ret ? : len;
+}
+
+static ssize_t pwm_npwm_show(struct device *parent,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	const struct pwm_chip *chip = dev_get_drvdata(parent);
+
+	return sprintf(buf, "%u\n", chip->npwm);
+}
+
+static struct device_attribute pwm_chip_attrs[] = {
+	__ATTR(export, 0200, NULL, pwm_export_store),
+	__ATTR(unexport, 0200, NULL, pwm_unexport_store),
+	__ATTR(npwm, 0444, pwm_npwm_show, NULL),
+	__ATTR_NULL,
+};
+
+static struct class pwm_class = {
+	.name	   = "pwm",
+	.owner	  = THIS_MODULE,
+	.dev_attrs  = pwm_chip_attrs,
+};
+
+static int pwmchip_sysfs_match(struct device *parent, const void *data)
+{
+	return dev_get_drvdata(parent) == data;
+}
+
+void pwmchip_sysfs_export(struct pwm_chip *chip)
+{
+	struct device *parent;
+
+	/*
+	 * If device_create() fails the pwm_chip is still usable by
+	 * the kernel its just not exported.
+	 */
+	parent = device_create(&pwm_class, chip->dev, MKDEV(0, 0), chip,
+				   "pwmchip%d", chip->base);
+	if (IS_ERR(parent)) {
+		dev_warn(chip->dev,
+			 "device_create failed for pwm_chip sysfs export\n");
+	}
+}
+
+void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+{
+	struct device *parent;
+
+	parent = class_find_device(&pwm_class, NULL, chip,
+				   pwmchip_sysfs_match);
+	if (parent) {
+		/* for class_find_device() */
+		put_device(parent);
+		device_unregister(parent);
+	}
+}
+
+static int __init pwm_sysfs_init(void)
+{
+	return class_register(&pwm_class);
+}
+subsys_initcall(pwm_sysfs_init);
+
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 8bb2644..27b22de 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -514,5 +514,9 @@
 	  This driver provides support for the voltage regulators on the
 	  AS3711 PMIC
 
-endif
+config REGULATOR_PMIC_BASIN_COVE
+        tristate "PMIC Basin Cove voltage regulator"
+        help
+          This driver controls intel Basin Cove pmic voltage output regulator
 
+endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 47a34ff..8b751e7 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -2,6 +2,7 @@
 # Makefile for regulator drivers.
 #
 
+CFLAGS_pmic_basin_cove.o		:= -Werror
 
 obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o
 obj-$(CONFIG_OF) += of_regulator.o
@@ -48,6 +49,7 @@
 obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
 obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
 obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
+obj-$(CONFIG_REGULATOR_PMIC_BASIN_COVE) += pmic_basin_cove.o
 obj-$(CONFIG_REGULATOR_RC5T583)  += rc5t583-regulator.o
 obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
 obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
diff --git a/drivers/regulator/pmic_basin_cove.c b/drivers/regulator/pmic_basin_cove.c
new file mode 100644
index 0000000..6f23578
--- /dev/null
+++ b/drivers/regulator/pmic_basin_cove.c
@@ -0,0 +1,302 @@
+/*
+ * pmic_basin_cove.c - Merrifield regulator driver
+ * Copyright (c) 2013, Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/intel_basin_cove_pmic.h>
+#include <linux/regulator/machine.h>
+
+#include <asm/intel_scu_pmic.h>
+
+/* Intel Voltage cntrl register parameters*/
+#define REG_ENA_STATUS_MASK	0x01
+#define REG_VSEL_MASK		0xc0
+#define VSEL_SHIFT		6
+
+#define REG_ON			0x01
+#define REG_OFF			0xfe
+
+const u16 reg_addr_offset[] = { VPROG1CNT_ADDR, VPROG2CNT_ADDR,
+					 VPROG3CNT_ADDR };
+
+/**
+* intel_pmic_reg_is_enabled - To check if the regulator is enabled
+* @rdev:    regulator_dev structure
+* @return value : 1 - Regulator is ON
+*			  :0 - Regulator is OFF
+*/
+static int intel_pmic_reg_is_enabled(struct regulator_dev *rdev)
+{
+	struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+	u8 reg;
+	int ret;
+
+	/*FIXME: Is it ok to use the following IPC API*/
+	ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, &reg);
+	if (ret) {
+		dev_err(&rdev->dev,
+			"intel_scu_ipc_ioread8 returns error %08x\n", ret);
+		return ret;
+	}
+
+	return reg & REG_ENA_STATUS_MASK;
+}
+/**
+* intel_pmic_reg_enable - To enable the regulator
+* @rdev:    regulator_dev structure
+* @return value : 0 - Regulator enabling success
+*			:1 - Regulator enabling failed
+*/
+static int intel_pmic_reg_enable(struct regulator_dev *rdev)
+{
+	struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+	u8  reg;
+	int ret;
+
+	ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, &reg);
+	if (ret) {
+		dev_err(&rdev->dev,
+			"intel_scu_ipc_ioread8 returns error %08x\n", ret);
+		return ret;
+	}
+	return intel_scu_ipc_iowrite8(pmic_info->pmic_reg, (reg | REG_ON));
+}
+/**
+* intel_pmic_reg_disable - To disable the regulator
+* @rdev:    regulator_dev structure
+* @return value :0 - Regulator disabling success
+*			:1 - Regulator disabling failed
+*/
+static int intel_pmic_reg_disable(struct regulator_dev *rdev)
+{
+	struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+	u8  reg;
+	int ret;
+
+	ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, &reg);
+	if (ret) {
+		dev_err(&rdev->dev,
+			"intel_scu_ipc_ioread8 returns error %08x\n", ret);
+		return ret;
+	}
+	return intel_scu_ipc_iowrite8(pmic_info->pmic_reg,
+		(reg & REG_OFF));
+}
+/**
+* intel_pmic_reg_listvoltage - Return the voltage value,this is called
+*                                   from core framework
+* @rdev: regulator source
+* @index : passed on from core
+* @return value : Returns the value in micro volts.
+ */
+static int intel_pmic_reg_listvoltage(struct regulator_dev *rdev,
+								unsigned index)
+{
+	struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+
+	if (index >= pmic_info->table_len) {
+		dev_err(&rdev->dev, "Index out of range in listvoltage\n");
+		return -EINVAL;
+	}
+	return pmic_info->table[index] * 1000;
+}
+/**
+* intel_pmic_reg_getvoltage - Return the current voltage value in  uV
+* @rdev:    regulator_dev structure
+*  @return value : Returns the voltage value.
+*/
+static int intel_pmic_reg_getvoltage(struct regulator_dev *rdev)
+{
+	struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+	u8  reg, vsel;
+	int ret;
+
+	ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, &reg);
+	if (ret) {
+		dev_err(&rdev->dev,
+			"intel_scu_ipc_ioread8 returns error %08x\n", ret);
+		return ret;
+	}
+	vsel = (reg & REG_VSEL_MASK) >> VSEL_SHIFT;
+	if (vsel >= pmic_info->table_len) {
+		dev_err(&rdev->dev, "vsel value is out of range\n");
+		return -EINVAL;
+	}
+	dev_dbg(&rdev->dev, "Voltage value is %d mV\n",
+		pmic_info->table[vsel]);
+	return pmic_info->table[vsel] * 1000;
+}
+
+/**
+* intel_pmic_reg_setvoltage - Set voltage to the regulator
+* @rdev:    regulator_dev structure
+* @min_uV: Minimum required voltage in uV
+* @max_uV: Maximum acceptable voltage in uV
+* @selector: Voltage value passed back to core layer
+* Sets a voltage regulator to the desired output voltage
+* @return value : Returns 0 if success
+*			: Return error value on failure
+*/
+static int intel_pmic_reg_setvoltage(struct regulator_dev *rdev, int min_uV,
+					int max_uV, unsigned *selector)
+{
+	struct intel_pmic_info *pmic_info = rdev_get_drvdata(rdev);
+	int ret;
+	u8 reg, vsel;
+
+	for (vsel = 0; vsel < pmic_info->table_len; vsel++) {
+		int mV = pmic_info->table[vsel];
+		int uV = mV * 1000;
+		if (min_uV > uV || uV > max_uV)
+			continue;
+
+		*selector = vsel;
+		ret = intel_scu_ipc_ioread8(pmic_info->pmic_reg, &reg);
+		if (ret) {
+			dev_err(&rdev->dev,
+				"intel_scu_ipc_ioread8 error %08x\n", ret);
+			return ret;
+		}
+		reg &= ~REG_VSEL_MASK;
+		reg |= vsel << VSEL_SHIFT;
+		dev_dbg(&rdev->dev,
+			"intel_pmic_reg_setvoltage voltage: %u uV\n", uV);
+		return intel_scu_ipc_iowrite8(pmic_info->pmic_reg, reg);
+	}
+	return -EINVAL;
+}
+
+/* regulator_ops registration */
+static struct regulator_ops intel_pmic_ops = {
+	.is_enabled = intel_pmic_reg_is_enabled,
+	.enable = intel_pmic_reg_enable,
+	.disable = intel_pmic_reg_disable,
+	.get_voltage = intel_pmic_reg_getvoltage,
+	.set_voltage = intel_pmic_reg_setvoltage,
+	.list_voltage = intel_pmic_reg_listvoltage,
+};
+/**
+* struct regulator_desc - Regulator descriptor
+* Each regulator registered with the core is described with a structure of
+* this type.
+* @name: Identifying name for the regulator.
+* @id: Numerical identifier for the regulator.
+* @n_voltages: Number of selectors available for ops.list_voltage().
+* @ops: Regulator operations table.
+* @irq: Interrupt number for the regulator.
+* @type: Indicates if the regulator is a voltage or current regulator.
+* @owner: Module providing the regulator, used for refcounting.
+*/
+static struct regulator_desc intel_pmic_desc[] = {
+	{
+		.name = "vprog1",
+		.id = VPROG1,
+		.ops = &intel_pmic_ops,
+		.n_voltages = ARRAY_SIZE(VPROG1_VSEL_table),
+		.type = REGULATOR_VOLTAGE,
+		.owner = THIS_MODULE,
+	},
+	{
+		.name = "vprog2",
+		.id = VPROG2,
+		.ops = &intel_pmic_ops,
+		.n_voltages = ARRAY_SIZE(VPROG2_VSEL_table),
+		.type = REGULATOR_VOLTAGE,
+		.owner = THIS_MODULE,
+	},
+	{
+		.name = "vprog3",
+		.id = VPROG3,
+		.ops = &intel_pmic_ops,
+		.n_voltages = ARRAY_SIZE(VPROG3_VSEL_table),
+		.type = REGULATOR_VOLTAGE,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int basin_cove_pmic_probe(struct platform_device *pdev)
+{
+	struct intel_pmic_info *pdata = dev_get_platdata(&pdev->dev);
+	struct regulator_config config = { };
+	unsigned int i;
+
+	if (!pdata || !pdata->pmic_reg)
+		return -EINVAL;
+
+	config.dev = &pdev->dev;
+	config.init_data = pdata->init_data;
+	config.driver_data = pdata;
+
+	for (i = 0; i < ARRAY_SIZE(reg_addr_offset); i++) {
+		if (reg_addr_offset[i] == pdata->pmic_reg)
+			break;
+	}
+	if (i == (ARRAY_SIZE(reg_addr_offset)))
+		return -EINVAL;
+
+	pdata->intel_pmic_rdev =
+	regulator_register(&intel_pmic_desc[i], &config);
+	if (IS_ERR(pdata->intel_pmic_rdev)) {
+		dev_err(&pdev->dev, "can't register regulator..error %ld\n",
+				PTR_ERR(pdata->intel_pmic_rdev));
+		return PTR_ERR(pdata->intel_pmic_rdev);
+	}
+	platform_set_drvdata(pdev, pdata->intel_pmic_rdev);
+	dev_dbg(&pdev->dev, "registered regulator\n");
+	return 0;
+}
+
+static int basin_cove_pmic_remove(struct platform_device *pdev)
+{
+	regulator_unregister(platform_get_drvdata(pdev));
+	return 0;
+}
+
+static const struct platform_device_id basin_cove_id_table[] = {
+	{ "intel_regulator", 0 },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(platform, basin_cove_id_table);
+
+static struct platform_driver basin_cove_pmic_driver = {
+	.driver		= {
+		.name = "intel_regulator",
+		.owner = THIS_MODULE,
+	},
+	.probe = basin_cove_pmic_probe,
+	.remove = basin_cove_pmic_remove,
+	.id_table = basin_cove_id_table,
+};
+static int __init basin_cove_pmic_init(void)
+{
+	return platform_driver_register(&basin_cove_pmic_driver);
+}
+subsys_initcall(basin_cove_pmic_init);
+
+static void __exit basin_cove_pmic_exit(void)
+{
+	platform_driver_unregister(&basin_cove_pmic_driver);
+}
+module_exit(basin_cove_pmic_exit);
+
+MODULE_DESCRIPTION("Basin Cove voltage regulator driver");
+MODULE_AUTHOR("Vishwesh/Mahesh/Sudarshan");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index d4d377c..97d3774 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -63,4 +63,15 @@
 	  It's safe to say n here if you're not interested in multimedia
 	  offloading.
 
+config INTEL_MID_REMOTEPROC
+	tristate "Intel MID remoteproc support"
+	depends on X86
+	select REMOTEPROC
+	select RPMSG
+	help
+	  Say y to support Intel MID's remote processors core driver
+	  and SCU driver.
+	  Please say y here if you want to enable x86 remoteproc core
+	  driver support.
+
 endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index ac2ff75..efb0eb8 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -10,3 +10,4 @@
 obj-$(CONFIG_OMAP_REMOTEPROC)		+= omap_remoteproc.o
 obj-$(CONFIG_STE_MODEM_RPROC)	 	+= ste_modem_rproc.o
 obj-$(CONFIG_DA8XX_REMOTEPROC)		+= da8xx_remoteproc.o
+obj-$(CONFIG_INTEL_MID_REMOTEPROC)	+= intel_mid_rproc_scu.o intel_mid_rproc_core.o
diff --git a/drivers/remoteproc/intel_mid_rproc_core.c b/drivers/remoteproc/intel_mid_rproc_core.c
new file mode 100644
index 0000000..dc30ba1f
--- /dev/null
+++ b/drivers/remoteproc/intel_mid_rproc_core.c
@@ -0,0 +1,269 @@
+/*
+ * INTEL MID Remote Processor Core driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_ids.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#include "intel_mid_rproc_core.h"
+#include "remoteproc_internal.h"
+
+#define RPMSG_NS_ADDR	53
+
+/**
+ * rpmsg_ns_alloc() - allocate a name service annoucement structure
+ * @name: name of remote service
+ * @id: rproc type
+ * @addr: address of remote service
+ */
+struct rpmsg_ns_info *rpmsg_ns_alloc(const char *name, int id, u32 addr)
+{
+	struct rpmsg_ns_info *ns_info;
+
+	ns_info = kzalloc(sizeof(struct rpmsg_ns_info), GFP_KERNEL);
+	if (ns_info) {
+		strcpy(ns_info->name, name);
+		ns_info->type = id;
+		ns_info->addr = addr;
+		ns_info->flags = RPMSG_NS_CREATE;
+	}
+
+	return ns_info;
+};
+EXPORT_SYMBOL_GPL(rpmsg_ns_alloc);
+
+/**
+ * rpmsg_ns_add_to_list() -- add a name service node to the global list
+ * @info: name service node
+ */
+void rpmsg_ns_add_to_list(struct rpmsg_ns_info *info,
+				struct rpmsg_ns_list *nslist)
+{
+	mutex_lock(&nslist->lock);
+	list_add_tail(&info->node, &nslist->list);
+	mutex_unlock(&nslist->lock);
+}
+EXPORT_SYMBOL_GPL(rpmsg_ns_add_to_list);
+
+/**
+ * free_rpmsg_ns() -- free rpmsg name service node
+ * @info: name service node
+ */
+void free_rpmsg_ns(struct rpmsg_ns_info *info)
+{
+	kfree(info);
+}
+
+/**
+ * rpmsg_ns_del_list() -- free rpmsg name service list
+ */
+void rpmsg_ns_del_list(struct rpmsg_ns_list *nslist)
+{
+	struct rpmsg_ns_info *info, *next;
+	mutex_lock(&nslist->lock);
+	list_for_each_entry_safe(info, next, &nslist->list, node) {
+		list_del(&info->node);
+		free_rpmsg_ns(info);
+	}
+	mutex_unlock(&nslist->lock);
+}
+EXPORT_SYMBOL_GPL(rpmsg_ns_del_list);
+
+/**
+ * find_rvdev() - find the rproc state of a supported virtio device
+ * @rproc: rproc handle
+ * @id: virtio device id
+ */
+struct rproc_vdev *find_rvdev(struct rproc *rproc, int id)
+{
+	struct rproc_vdev *rvdev;
+
+	list_for_each_entry(rvdev, &rproc->rvdevs, node)
+		if (rvdev->vdev.id.device == id)
+			return rvdev;
+
+	return NULL;
+}
+
+/*
+ * Since we could not get vring structure directly from rproc_vring
+ * structure, we have to create two local vrings and identify them
+ * by matching with rproc_vrings.
+ * @id: virtio device id.
+ * Currently one rproc_vdev is supported by firmware, and the id is
+ * VIRTIO_ID_RPMSG (declared in linux/virtio_ids.h).
+ */
+int find_vring_index(struct rproc *rproc, int vqid, int id)
+{
+	struct rproc_vdev *rvdev;
+	struct device *dev = rproc->dev.parent;
+	int vring_idx = 0;
+
+	rvdev = find_rvdev(rproc, id);
+	if (rvdev == NULL) {
+		dev_err(dev, "virtio device not found\n");
+		return -EINVAL;
+	}
+
+	while (vring_idx < RVDEV_NUM_VRINGS) {
+		if (rvdev->vring[vring_idx].notifyid == vqid)
+			break;
+		vring_idx++;
+	}
+
+	/* no match found? there's a problem */
+	if (vring_idx == RVDEV_NUM_VRINGS) {
+		dev_err(dev, "Can not find vring\n");
+		return -EINVAL;
+	}
+
+	return vring_idx;
+}
+
+void intel_mid_rproc_vring_init(struct rproc *rproc,
+			struct vring *vring, enum local_vring_idx id)
+{
+	int align, len;
+	void *addr;
+	struct rproc_vdev *rvdev;
+	struct device *dev = rproc->dev.parent;
+
+	rvdev = find_rvdev(rproc, VIRTIO_ID_RPMSG);
+	if (rvdev == NULL) {
+		dev_err(dev, "virtio device not found\n");
+		return;
+	}
+
+	addr = rvdev->vring[id].va;
+	align = rvdev->vring[id].align;
+	len = rvdev->vring[id].len;
+	vring_init(vring, len, addr, align);
+}
+
+/**
+ * intel_mid_rproc_vq_interrupt() - inform a vq interrupt to rproc
+ *				    after vq buffers are handled
+ * @rproc: rproc handle
+ * @msg: vq notify id
+ */
+void intel_mid_rproc_vq_interrupt(struct rproc *rproc, int msg)
+{
+	struct device *dev = rproc->dev.parent;
+
+	if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+		dev_err(dev, "no message was found in vqid %d\n", msg);
+}
+
+/**
+ * intel_mid_rproc_msg_handle() - generic interface as a vq buffer handle
+ *				  during rpmsg transaction
+ * @iproc: intel mid rproc data
+ */
+int intel_mid_rproc_msg_handle(struct intel_mid_rproc *iproc)
+{
+	int ret;
+	struct vring *r_vring, *s_vring;
+	void *r_virt_addr, *s_virt_addr;
+	u16 r_idx, s_idx;
+	u64 r_dma_addr, s_dma_addr;
+	u32 r_len, s_len;
+
+	r_vring = &iproc->rx_vring;
+	s_vring = &iproc->tx_vring;
+
+	r_idx = iproc->r_vring_last_used & (r_vring->num - 1);
+	s_idx = iproc->s_vring_last_used & (s_vring->num - 1);
+
+	r_dma_addr = r_vring->desc[r_idx].addr;
+	s_dma_addr = s_vring->desc[s_idx].addr;
+
+	r_virt_addr = phys_to_virt(r_dma_addr);
+	s_virt_addr = phys_to_virt(s_dma_addr);
+
+	ret = iproc->rproc_rpmsg_handle(r_virt_addr, s_virt_addr,
+						&r_len, &s_len);
+
+	r_vring->used->ring[r_idx].id = r_idx;
+	r_vring->used->ring[r_idx].len = r_len;
+	r_vring->used->idx++;
+
+	s_vring->used->ring[s_idx].id = s_idx;
+	s_vring->used->ring[s_idx].len = s_len;
+	s_vring->used->idx++;
+
+	iproc->r_vring_last_used++;
+	iproc->s_vring_last_used++;
+
+	return ret;
+}
+
+/**
+ * Remoteproc side rx buffer handler during name service creation.
+ * @iproc: intel mid rproc data
+ * @ns_info: name service info
+ *
+ * After remote processor receives name service messages, it needs to
+ * update the elements of its virtio device's rx virtqueue buffer
+ * before next rpmsg transaction.
+ * Here we have this function simulating the above effect.
+ */
+int intel_mid_rproc_ns_handle(struct intel_mid_rproc *iproc,
+				struct rpmsg_ns_info *ns_info)
+{
+	u16 index;
+	u32 len;
+	u64 dma_addr;
+	void *virt_addr;
+
+	struct vring *r_vring;
+	struct rpmsg_hdr *msg;
+	struct rpmsg_ns_msg *nsm;
+
+	if (ns_info == NULL) {
+		pr_err("ns_info = NULL\n");
+		return -ENODEV;
+	}
+
+	r_vring = &iproc->rx_vring;
+
+	index = iproc->r_vring_last_used & (r_vring->num - 1);
+
+	len = sizeof(*msg) + sizeof(*nsm);
+
+	dma_addr = r_vring->desc[index].addr;
+	virt_addr = phys_to_virt(dma_addr);
+
+	msg = (struct rpmsg_hdr *)virt_addr;
+	nsm = (struct rpmsg_ns_msg *)(virt_addr + sizeof(*msg));
+
+	nsm->addr = ns_info->addr;
+	nsm->flags = ns_info->flags;
+	strncpy(nsm->name, ns_info->name, RPMSG_NAME_SIZE);
+
+	msg->len = sizeof(*nsm);
+	msg->src = nsm->addr;
+	msg->dst = RPMSG_NS_ADDR;
+
+	r_vring->used->ring[index].id = index;
+	r_vring->used->ring[index].len = len;
+	r_vring->used->idx++;
+
+	iproc->r_vring_last_used++;
+
+	return 0;
+}
diff --git a/drivers/remoteproc/intel_mid_rproc_core.h b/drivers/remoteproc/intel_mid_rproc_core.h
new file mode 100644
index 0000000..8ce5721
--- /dev/null
+++ b/drivers/remoteproc/intel_mid_rproc_core.h
@@ -0,0 +1,79 @@
+/*
+ * INTEL MID Remote Processor Core Head File
+ *
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+typedef int (*rpmsg_handle_t)(void *rx_buf, void *tx_buf,
+				u32 *r_len, u32 *s_len);
+
+int scu_ipc_rpmsg_handle(void *rx_buf, void *tx_buf, u32 *r_len, u32 *s_len);
+int psh_ipc_rpmsg_handle(void *rx_buf, void *tx_buf, u32 *len);
+
+#define RPROC_FW_LOADING_TIMEOUT	(3 * HZ)
+#define IPROC_NAME_SIZE 20
+
+/**
+ * struct intel_mid_rproc - intel mid remote processor
+ * @ns_enabled: name service enabled flag
+ * @name: rproc name
+ * @type: rproc type
+ * @r_vring_last_used: last used index of rx vring
+ * @s_vring_last_used: last used index of tx vring
+ * @rproc: rproc handle
+ * @rx_vring: rproc rx vring
+ * @tx_vring: rproc tx vring
+ * @ns_info: loop cursor when creating ns channels
+ * @rproc_rpmsg_handle: rproc private rpmsg handle
+ */
+struct intel_mid_rproc {
+	bool ns_enabled;
+	char name[IPROC_NAME_SIZE];
+	u32 type;
+	u32 r_vring_last_used;
+	u32 s_vring_last_used;
+	struct rproc *rproc;
+	struct vring rx_vring;
+	struct vring tx_vring;
+	struct rpmsg_ns_info *ns_info;
+	rpmsg_handle_t rproc_rpmsg_handle;
+};
+
+enum local_vring_idx {
+	RX_VRING,
+	TX_VRING,
+};
+
+extern void intel_mid_rproc_vq_interrupt(struct rproc *rproc, int msg);
+extern int intel_mid_rproc_msg_handle(struct intel_mid_rproc *iproc);
+extern int intel_mid_rproc_ns_handle(struct intel_mid_rproc *iproc,
+					struct rpmsg_ns_info *ns_info);
+
+extern struct rproc_vdev *find_rvdev(struct rproc *rproc, int id);
+extern int find_vring_index(struct rproc *rproc, int vqid, int id);
+extern void intel_mid_rproc_vring_init(struct rproc *rproc,
+			struct vring *vring, enum local_vring_idx id);
+
+extern void rpmsg_ns_del_list(struct rpmsg_ns_list *nslist);
+
+/* Please do NOT use these APIs to send ipc commands,
+ * use rpmsg commands defined in <asm/intel_mid_rpmsg.h>
+ */
+extern void intel_scu_ipc_send_command(u32 cmd);
+
+/* Issue commands to the SCU with or without data */
+extern int intel_scu_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
+		u32 *out, u32 outlen, u32 dptr, u32 sptr);
+
+/* IPC locking */
+extern void intel_scu_ipc_lock(void);
+extern void intel_scu_ipc_unlock(void);
diff --git a/drivers/remoteproc/intel_mid_rproc_scu.c b/drivers/remoteproc/intel_mid_rproc_scu.c
new file mode 100644
index 0000000..53a1cdc
--- /dev/null
+++ b/drivers/remoteproc/intel_mid_rproc_scu.c
@@ -0,0 +1,441 @@
+/*
+ * INTEL MID Remote Processor - SCU driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/remoteproc.h>
+#include <linux/delay.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_ids.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#include <asm/intel_scu_ipc.h>
+#include <asm/scu_ipc_rpmsg.h>
+#include <asm/intel-mid.h>
+
+#include "intel_mid_rproc_core.h"
+#include "remoteproc_internal.h"
+
+static struct rpmsg_ns_list *nslist;
+
+
+static int scu_ipc_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	ret = intel_scu_ipc_command(tx_msg->cmd, tx_msg->sub,
+				tx_msg->in, tx_msg->inlen,
+				tx_msg->out, tx_msg->outlen);
+	return ret;
+}
+
+static int scu_ipc_raw_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	intel_scu_ipc_lock();
+	ret = intel_scu_ipc_raw_cmd(tx_msg->cmd, tx_msg->sub,
+				tx_msg->in, tx_msg->inlen,
+				tx_msg->out, tx_msg->outlen,
+				tx_msg->dptr, tx_msg->sptr);
+	intel_scu_ipc_unlock();
+
+	return ret;
+}
+
+static int scu_ipc_simple_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	ret = intel_scu_ipc_simple_command(tx_msg->cmd, tx_msg->sub);
+
+	return ret;
+}
+
+static void scu_ipc_send_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+	intel_scu_ipc_send_command(tx_msg->sub << 12 | tx_msg->cmd);
+}
+
+static int scu_ipc_fw_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	switch (tx_msg->cmd) {
+	case RP_GET_FW_REVISION:
+		ret = scu_ipc_command(tx_buf);
+		break;
+	case RP_FW_UPDATE:
+		/* Only scu_ipc_send_command works for fw update */
+		scu_ipc_send_command(tx_buf);
+		break;
+	default:
+		pr_info("Command %x not supported\n", tx_msg->cmd);
+		break;
+	};
+
+	return ret;
+}
+
+static int scu_ipc_util_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	switch (tx_msg->cmd) {
+	case RP_GET_FW_REVISION:
+	case RP_GET_HOBADDR:
+	case RP_OSC_CLK_CTRL:
+		ret = scu_ipc_command(tx_buf);
+		break;
+	case RP_S0IX_COUNTER:
+		ret = scu_ipc_simple_command(tx_buf);
+		break;
+	case RP_WRITE_OSNIB:
+		ret = scu_ipc_raw_command(tx_buf);
+		break;
+	default:
+		pr_info("Command %x not supported\n", tx_msg->cmd);
+		break;
+	};
+
+	return ret;
+}
+
+static int scu_ipc_vrtc_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	switch (tx_msg->cmd) {
+	case RP_GET_HOBADDR:
+		ret = scu_ipc_command(tx_buf);
+		break;
+	case RP_VRTC:
+		ret = scu_ipc_simple_command(tx_buf);
+		break;
+	default:
+		pr_info("Command %x not supported\n", tx_msg->cmd);
+		break;
+	};
+
+	return ret;
+}
+
+static int scu_ipc_fw_logging_command(void *tx_buf)
+{
+	struct tx_ipc_msg *tx_msg;
+	int ret = 0;
+
+	tx_msg = (struct tx_ipc_msg *)tx_buf;
+
+	switch (tx_msg->cmd) {
+	case RP_GET_HOBADDR:
+		ret = scu_ipc_command(tx_buf);
+		break;
+	case RP_CLEAR_FABERROR:
+		ret = scu_ipc_simple_command(tx_buf);
+		break;
+	default:
+		pr_info("Command %x not supported\n", tx_msg->cmd);
+		break;
+	};
+
+	return ret;
+}
+
+/**
+ * scu_ipc_rpmsg_handle() - scu rproc specified ipc rpmsg handle
+ * @rx_buf: rx buffer to be add
+ * @tx_buf: tx buffer to be get
+ * @r_len: rx buffer length
+ * @s_len: tx buffer length
+ */
+int scu_ipc_rpmsg_handle(void *rx_buf, void *tx_buf, u32 *r_len, u32 *s_len)
+{
+	struct rpmsg_hdr *tx_hdr, *tmp_hdr;
+	struct tx_ipc_msg *tx_msg;
+	struct rx_ipc_msg *tmp_msg;
+	int ret = 0;
+
+	*r_len = sizeof(struct rpmsg_hdr) + sizeof(struct rx_ipc_msg);
+	*s_len = sizeof(struct rpmsg_hdr) + sizeof(struct tx_ipc_msg);
+
+	/* get tx_msg and send scu ipc command */
+	tx_hdr = (struct rpmsg_hdr *)tx_buf;
+	tx_msg = (struct tx_ipc_msg *)(tx_buf + sizeof(*tx_hdr));
+
+	tmp_hdr = (struct rpmsg_hdr *)rx_buf;
+	tmp_msg = (struct rx_ipc_msg *)tmp_hdr->data;
+
+	switch (tx_hdr->dst) {
+	case RP_PMIC_ACCESS:
+	case RP_FLIS_ACCESS:
+	case RP_IPC_COMMAND:
+		tmp_msg->status = scu_ipc_command(tx_msg);
+		break;
+	case RP_SET_WATCHDOG:
+		if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+			tmp_msg->status = scu_ipc_raw_command(tx_msg);
+		else
+			tmp_msg->status = scu_ipc_command(tx_msg);
+		break;
+	case RP_MIP_ACCESS:
+	case RP_IPC_RAW_COMMAND:
+		tmp_msg->status = scu_ipc_raw_command(tx_msg);
+		break;
+	case RP_IPC_SIMPLE_COMMAND:
+		tmp_msg->status = scu_ipc_simple_command(tx_msg);
+		break;
+	case RP_IPC_UTIL:
+		tmp_msg->status = scu_ipc_util_command(tx_msg);
+		break;
+	case RP_FW_ACCESS:
+		tmp_msg->status = scu_ipc_fw_command(tx_msg);
+		break;
+	case RP_VRTC:
+		tmp_msg->status = scu_ipc_vrtc_command(tx_msg);
+		break;
+	case RP_FW_LOGGING:
+		tmp_msg->status = scu_ipc_fw_logging_command(tx_msg);
+		break;
+	default:
+		tmp_msg->status = 0;
+		pr_info("Command %x not supported yet\n", tx_hdr->dst);
+		break;
+	};
+
+	/* prepare rx buffer, switch src and dst */
+	tmp_hdr->src = tx_hdr->dst;
+	tmp_hdr->dst = tx_hdr->src;
+
+	tmp_hdr->flags = tx_hdr->flags;
+	tmp_hdr->len = sizeof(struct rx_ipc_msg);
+
+	return ret;
+}
+
+/* kick a virtqueue */
+static void intel_rproc_scu_kick(struct rproc *rproc, int vqid)
+{
+	int idx;
+	int ret;
+	struct intel_mid_rproc *iproc;
+	struct rproc_vdev *rvdev;
+	struct device *dev = rproc->dev.parent;
+	static unsigned long ns_info_all_received;
+
+	iproc = (struct intel_mid_rproc *)rproc->priv;
+
+	/*
+	 * Remote processor virtqueue being kicked.
+	 * This part simulates remote processor handling messages.
+	 */
+	idx = find_vring_index(rproc, vqid, VIRTIO_ID_RPMSG);
+
+	switch (idx) {
+	case RX_VRING:
+		if (iproc->ns_enabled && !ns_info_all_received) {
+			/* push messages with ns_info for ALL available
+			name services in the list (nslist) into
+			rx buffers. */
+			list_for_each_entry_continue(iproc->ns_info,
+				&nslist->list, node) {
+				ret = intel_mid_rproc_ns_handle(iproc,
+					iproc->ns_info);
+				if (ret) {
+					dev_err(dev, "ns handle error\n");
+					return;
+				}
+			}
+
+			ns_info_all_received = 1;
+			intel_mid_rproc_vq_interrupt(rproc, vqid);
+		}
+		break;
+
+	case TX_VRING:
+
+		dev_dbg(dev, "remote processor got the message ...\n");
+		intel_mid_rproc_msg_handle(iproc);
+		intel_mid_rproc_vq_interrupt(rproc, vqid);
+
+		/*
+		 * After remoteproc handles the message, it calls
+		 * the receive callback.
+		 * TODO: replace this part with real remote processor
+		 * operation.
+		 */
+		rvdev = find_rvdev(rproc, VIRTIO_ID_RPMSG);
+		if (rvdev)
+			intel_mid_rproc_vq_interrupt(rproc,
+				rvdev->vring[RX_VRING].notifyid);
+		else
+			WARN(1, "%s: can't find given rproc state\n", __func__);
+		break;
+
+	default:
+		dev_err(dev, "invalid vring index\n");
+		break;
+	}
+}
+
+/* power up the remote processor */
+static int intel_rproc_scu_start(struct rproc *rproc)
+{
+	struct intel_mid_rproc *iproc;
+
+	pr_info("Started intel scu remote processor\n");
+	iproc = (struct intel_mid_rproc *)rproc->priv;
+	intel_mid_rproc_vring_init(rproc, &iproc->rx_vring, RX_VRING);
+	intel_mid_rproc_vring_init(rproc, &iproc->tx_vring, TX_VRING);
+
+	return 0;
+}
+
+/* power off the remote processor */
+static int intel_rproc_scu_stop(struct rproc *rproc)
+{
+	pr_info("Stopped intel scu remote processor\n");
+	return 0;
+}
+
+static struct rproc_ops intel_rproc_scu_ops = {
+	.start		= intel_rproc_scu_start,
+	.stop		= intel_rproc_scu_stop,
+	.kick		= intel_rproc_scu_kick,
+};
+
+static int intel_rproc_scu_probe(struct platform_device *pdev)
+{
+	struct intel_mid_rproc_pdata *pdata = pdev->dev.platform_data;
+	struct intel_mid_rproc *iproc;
+	struct rproc *rproc;
+	int ret;
+
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+		return ret;
+	}
+
+	rproc = rproc_alloc(&pdev->dev, pdata->name, &intel_rproc_scu_ops,
+				pdata->firmware, sizeof(*iproc));
+	if (!rproc)
+		return -ENOMEM;
+
+	iproc = rproc->priv;
+	iproc->rproc = rproc;
+	nslist = pdata->nslist;
+
+	platform_set_drvdata(pdev, rproc);
+
+	ret = rproc_add(rproc);
+	if (ret)
+		goto free_rproc;
+
+	/*
+	 * Temporarily follow the rproc framework to load firmware
+	 * TODO: modify remoteproc code according to X86 architecture
+	 */
+	if (0 == wait_for_completion_timeout(&rproc->firmware_loading_complete,
+		RPROC_FW_LOADING_TIMEOUT)) {
+		dev_err(pdev->dev.parent, "fw loading not complete\n");
+		goto free_rproc;
+	}
+
+	/* Initialize intel_rproc_scu private data */
+	strncpy(iproc->name, pdev->id_entry->name, sizeof(iproc->name) - 1);
+	iproc->type = pdev->id_entry->driver_data;
+	iproc->r_vring_last_used = 0;
+	iproc->s_vring_last_used = 0;
+	iproc->ns_enabled = true;
+	iproc->rproc_rpmsg_handle = scu_ipc_rpmsg_handle;
+	iproc->ns_info = list_entry(&nslist->list,
+			struct rpmsg_ns_info, node);
+
+	return 0;
+
+free_rproc:
+	rproc_put(rproc);
+	return ret;
+}
+
+static int intel_rproc_scu_remove(struct platform_device *pdev)
+{
+	struct rproc *rproc = platform_get_drvdata(pdev);
+
+	if (nslist)
+		rpmsg_ns_del_list(nslist);
+
+	rproc_del(rproc);
+	rproc_put(rproc);
+
+	return 0;
+}
+
+static const struct platform_device_id intel_rproc_scu_id_table[] = {
+	{ "intel_rproc_scu", RPROC_SCU },
+	{ },
+};
+
+static struct platform_driver intel_rproc_scu_driver = {
+	.probe = intel_rproc_scu_probe,
+	.remove = intel_rproc_scu_remove,
+	.driver = {
+		.name = "intel_rproc_scu",
+		.owner = THIS_MODULE,
+	},
+	.id_table = intel_rproc_scu_id_table,
+};
+
+static int __init intel_rproc_scu_init(void)
+{
+	return platform_driver_register(&intel_rproc_scu_driver);
+}
+
+static void __exit intel_rproc_scu_exit(void)
+{
+	platform_driver_unregister(&intel_rproc_scu_driver);
+}
+
+subsys_initcall(intel_rproc_scu_init);
+module_exit(intel_rproc_scu_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ning Li<ning.li@intel.com>");
+MODULE_DESCRIPTION("INTEL MID Remoteproc Core driver");
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index 69a2193..620fef5 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -6,4 +6,13 @@
 	select VIRTIO
 	select VIRTUALIZATION
 
+config RPMSG_IPC
+	tristate "Build rpmsg ipc driver"
+	depends on RPMSG
+	help
+	  Build an rpmsg ipc driver, which demonstrates how IA
+	  communicates with remote processor through IPC rpmsg
+	  over the rpmsg bus. It register a rpmsg driver matched
+	  with the rpmsg device created in remoteproc framework.
+
 endmenu
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 7617fcb..d8f5030 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -1 +1,2 @@
 obj-$(CONFIG_RPMSG)	+= virtio_rpmsg_bus.o
+obj-$(CONFIG_RPMSG_IPC)	+= intel_mid_rpmsg.o
diff --git a/drivers/rpmsg/intel_mid_rpmsg.c b/drivers/rpmsg/intel_mid_rpmsg.c
new file mode 100644
index 0000000..b2660e5
--- /dev/null
+++ b/drivers/rpmsg/intel_mid_rpmsg.c
@@ -0,0 +1,446 @@
+/*
+ * rpmsg_mid_rpmsg.c - Intel RPMSG Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/rpmsg.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/platform_data/intel_mid_remoteproc.h>
+
+#include <asm/intel_mid_rpmsg.h>
+
+/* Instance for generic kernel IPC calls */
+static struct rpmsg_device_data rpmsg_ddata[RPMSG_IPC_COMMAND_TYPE_NUM] = {
+	[RPMSG_IPC_COMMAND] = {
+		.name = "rpmsg_ipc_command",
+		.rpdev = NULL,	/* initialized during driver probe */
+		.rpmsg_instance = NULL, /* initialized during driver probe */
+	},
+	[RPMSG_IPC_SIMPLE_COMMAND] = {
+		.name = "rpmsg_ipc_simple_command",
+		.rpdev = NULL,
+		.rpmsg_instance = NULL,
+	},
+	[RPMSG_IPC_RAW_COMMAND] = {
+		.name = "rpmsg_ipc_raw_command",
+		.rpdev = NULL,
+		.rpmsg_instance = NULL,
+	},
+};
+
+/* Providing rpmsg ipc generic interfaces.
+ * Modules can call these API directly without registering rpmsg driver.
+ *
+ * The arg list is the same as intel_scu_ipc_command(),
+ * so simply change intel_scu_ipc_command() to rpmsg_send_generic_command()
+ */
+int rpmsg_send_generic_command(u32 cmd, u32 sub,
+				u8 *in, u32 inlen,
+				u32 *out, u32 outlen)
+{
+	struct rpmsg_instance *rpmsg_ipc_instance =
+		rpmsg_ddata[RPMSG_IPC_COMMAND].rpmsg_instance;
+
+	return rpmsg_send_command(rpmsg_ipc_instance, cmd, sub,
+					in, out, inlen, outlen);
+}
+EXPORT_SYMBOL(rpmsg_send_generic_command);
+
+int rpmsg_send_generic_simple_command(u32 cmd, u32 sub)
+{
+	struct rpmsg_instance *rpmsg_ipc_instance =
+		rpmsg_ddata[RPMSG_IPC_SIMPLE_COMMAND].rpmsg_instance;
+
+	return rpmsg_send_simple_command(rpmsg_ipc_instance, cmd, sub);
+}
+EXPORT_SYMBOL(rpmsg_send_generic_simple_command);
+
+int rpmsg_send_generic_raw_command(u32 cmd, u32 sub,
+				   u8 *in, u32 inlen,
+				   u32 *out, u32 outlen,
+				   u32 dptr, u32 sptr)
+{
+	struct rpmsg_instance *rpmsg_ipc_instance =
+		rpmsg_ddata[RPMSG_IPC_RAW_COMMAND].rpmsg_instance;
+
+	return rpmsg_send_raw_command(rpmsg_ipc_instance, cmd, sub,
+					in, out, inlen, outlen, sptr, dptr);
+}
+EXPORT_SYMBOL(rpmsg_send_generic_raw_command);
+
+/* Global lock for rpmsg framework */
+static struct rpmsg_lock global_lock = {
+	.lock = __MUTEX_INITIALIZER(global_lock.lock),
+	.locked_prev = 0,
+	.pending = ATOMIC_INIT(0),
+};
+
+#define is_global_locked_prev		(global_lock.locked_prev)
+#define set_global_locked_prev(lock)	(global_lock.locked_prev = lock)
+#define global_locked_by_current	(global_lock.lock.owner == current)
+
+void rpmsg_global_lock(void)
+{
+	atomic_inc(&global_lock.pending);
+	mutex_lock(&global_lock.lock);
+}
+EXPORT_SYMBOL(rpmsg_global_lock);
+
+void rpmsg_global_unlock(void)
+{
+	mutex_unlock(&global_lock.lock);
+	if (!atomic_dec_and_test(&global_lock.pending))
+		schedule();
+}
+EXPORT_SYMBOL(rpmsg_global_unlock);
+
+static void rpmsg_lock(void)
+{
+	if (!mutex_trylock(&global_lock.lock)) {
+		if (global_locked_by_current)
+			set_global_locked_prev(1);
+		else
+			rpmsg_global_lock();
+	}
+}
+
+static void rpmsg_unlock(void)
+{
+	if (!is_global_locked_prev)
+		rpmsg_global_unlock();
+	else
+		set_global_locked_prev(0);
+}
+
+int rpmsg_send_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub, u8 *in,
+						u32 *out, u32 inlen,
+						u32 outlen)
+{
+	int ret = 0;
+
+	if (!instance) {
+		pr_err("%s: Instance is NULL\n", __func__);
+		return -EFAULT;
+	}
+
+	/* Hold global rpmsg lock */
+	rpmsg_lock();
+
+	mutex_lock(&instance->instance_lock);
+
+	/* Prepare Tx buffer */
+	instance->tx_msg->cmd = cmd;
+	instance->tx_msg->sub = sub;
+	instance->tx_msg->in = in;
+	instance->tx_msg->out = out;
+	instance->tx_msg->inlen = inlen;
+	instance->tx_msg->outlen = outlen;
+
+	/* Preapre Rx buffer */
+	mutex_lock(&instance->rx_lock);
+	instance->rx_msg->status = -1;
+	mutex_unlock(&instance->rx_lock);
+	INIT_COMPLETION(instance->reply_arrived);
+
+	/* Send message to remote processor(SCU) using rpdev channel */
+	ret = rpmsg_send_offchannel(
+					instance->rpdev,
+					instance->endpoint->addr,
+					instance->rpdev->dst,
+					instance->tx_msg,
+					sizeof(*instance->tx_msg)
+					);
+	if (ret) {
+		dev_err(&instance->rpdev->dev, "%s failed: %d\n",
+						 __func__, ret);
+		goto end;
+	}
+
+	if (0 == wait_for_completion_timeout(&instance->reply_arrived,
+						RPMSG_TX_TIMEOUT)) {
+		dev_err(&instance->rpdev->dev,
+				"timeout: %d\n", ret);
+		ret = -ETIMEDOUT;
+		goto end;
+	}
+
+	mutex_lock(&instance->rx_lock);
+	ret = instance->rx_msg->status;
+	mutex_unlock(&instance->rx_lock);
+end:
+	mutex_unlock(&instance->instance_lock);
+	rpmsg_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_send_command);
+
+int rpmsg_send_raw_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub, u8 *in,
+						u32 *out, u32 inlen,
+						u32 outlen, u32 sptr,
+						u32 dptr)
+{
+	int ret = 0;
+
+	if (!instance) {
+		pr_err("%s: Instance is NULL\n", __func__);
+		return -EFAULT;
+	}
+
+	mutex_lock(&instance->instance_lock);
+	instance->tx_msg->sptr = sptr;
+	instance->tx_msg->dptr = dptr;
+	mutex_unlock(&instance->instance_lock);
+
+	ret = rpmsg_send_command(instance, cmd, sub, in, out, inlen, outlen);
+
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_send_raw_command);
+
+int rpmsg_send_simple_command(struct rpmsg_instance *instance, u32 cmd,
+						u32 sub)
+{
+	int ret;
+
+	ret = rpmsg_send_command(instance, cmd, sub, NULL, NULL, 0, 0);
+
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_send_simple_command);
+
+static void rpmsg_recv_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+#ifdef DEBUG_RPMSG_MSG
+	static int rx_count;
+#endif
+	struct rpmsg_instance *instance = priv;
+
+	if (len != sizeof(struct rx_ipc_msg)) {
+		dev_warn(&rpdev->dev, "%s, incorrect msg length\n", __func__);
+		return;
+	}
+
+#ifdef DEBUG_RPMSG_MSG
+	dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n", ++rx_count, src);
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+#endif
+
+	mutex_lock(&instance->rx_lock);
+
+	memcpy(instance->rx_msg, data, len);
+
+	mutex_unlock(&instance->rx_lock);
+
+	complete(&instance->reply_arrived);
+
+}
+
+int alloc_rpmsg_instance(struct rpmsg_channel *rpdev,
+				struct rpmsg_instance **pInstance)
+{
+	int ret = 0;
+	struct rpmsg_instance *instance;
+
+	dev_info(&rpdev->dev, "Allocating rpmsg_instance\n");
+
+	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+	if (!instance) {
+		ret = -ENOMEM;
+		dev_err(&rpdev->dev, "kzalloc rpmsg_instance failed\n");
+		goto alloc_out;
+	}
+
+	instance->rpdev = rpdev;
+
+	instance->tx_msg = kzalloc(sizeof(struct tx_ipc_msg), GFP_KERNEL);
+	if (!instance->tx_msg) {
+		ret = -ENOMEM;
+		dev_err(&rpdev->dev, "kzalloc instance tx_msg failed\n");
+		goto error_tx_msg_create;
+	}
+
+	instance->rx_msg = kzalloc(sizeof(struct rx_ipc_msg), GFP_KERNEL);
+	if (!instance->rx_msg) {
+		ret = -ENOMEM;
+		dev_err(&rpdev->dev, "kzalloc instance rx_msg failed\n");
+		goto error_rx_msg_create;
+	}
+
+	instance->endpoint = rpmsg_create_ept(rpdev, rpmsg_recv_cb,
+							instance,
+							RPMSG_ADDR_ANY);
+	if (!instance->endpoint) {
+		dev_err(&rpdev->dev, "create instance endpoint failed\n");
+		ret = -ENOMEM;
+		goto error_endpoint_create;
+	}
+
+	goto alloc_out;
+
+error_endpoint_create:
+	kfree(instance->rx_msg);
+	instance->rx_msg = NULL;
+error_rx_msg_create:
+	kfree(instance->tx_msg);
+	instance->tx_msg = NULL;
+error_tx_msg_create:
+	kfree(instance);
+	instance = NULL;
+alloc_out:
+	*pInstance = instance;
+	return ret;
+
+}
+EXPORT_SYMBOL(alloc_rpmsg_instance);
+
+void free_rpmsg_instance(struct rpmsg_channel *rpdev,
+				struct rpmsg_instance **pInstance)
+{
+	struct rpmsg_instance *instance = *pInstance;
+
+	mutex_lock(&instance->instance_lock);
+	rpmsg_destroy_ept(instance->endpoint);
+	kfree(instance->tx_msg);
+	instance->tx_msg = NULL;
+	kfree(instance->rx_msg);
+	instance->rx_msg = NULL;
+	mutex_unlock(&instance->instance_lock);
+	kfree(instance);
+	*pInstance = NULL;
+	dev_info(&rpdev->dev, "Freeing rpmsg device\n");
+}
+EXPORT_SYMBOL(free_rpmsg_instance);
+
+void init_rpmsg_instance(struct rpmsg_instance *instance)
+{
+	init_completion(&instance->reply_arrived);
+	mutex_init(&instance->instance_lock);
+	mutex_init(&instance->rx_lock);
+}
+EXPORT_SYMBOL(init_rpmsg_instance);
+
+static int rpmsg_ipc_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+	int i;
+	struct rpmsg_device_data *ddata = rpmsg_ddata;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel %s not created\n", rpdev->id.name);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed rpmsg_ipc device %s\n", rpdev->id.name);
+
+	for (i = RPMSG_IPC_COMMAND; i < RPMSG_IPC_COMMAND_TYPE_NUM; i++) {
+		if (!strncmp(rpdev->id.name, ddata[i].name, RPMSG_NAME_SIZE)) {
+
+			/* Allocate rpmsg instance for kernel IPC calls*/
+			ret = alloc_rpmsg_instance(rpdev,
+					&ddata[i].rpmsg_instance);
+			if (!ddata[i].rpmsg_instance) {
+				dev_err(&rpdev->dev,
+					"alloc rpmsg instance failed\n");
+				goto out;
+			}
+
+			/* Initialize rpmsg instance */
+			init_rpmsg_instance(ddata[i].rpmsg_instance);
+
+			ddata[i].rpdev = rpdev;
+			break;
+		}
+	}
+
+out:
+	return ret;
+}
+
+static void rpmsg_ipc_remove(struct rpmsg_channel *rpdev)
+{
+	int i;
+	struct rpmsg_device_data *ddata = rpmsg_ddata;
+
+	for (i = RPMSG_IPC_COMMAND; i < RPMSG_IPC_COMMAND_TYPE_NUM; i++) {
+		if (!strncmp(rpdev->id.name, ddata[i].name, RPMSG_NAME_SIZE)) {
+			free_rpmsg_instance(rpdev, &ddata[i].rpmsg_instance);
+			break;
+		}
+	}
+	dev_info(&rpdev->dev, "Removed rpmsg_ipc device\n");
+}
+
+static void rpmsg_ipc_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id rpmsg_ipc_id_table[] = {
+	{ .name	= "rpmsg_ipc_command" },
+	{ .name	= "rpmsg_ipc_simple_command" },
+	{ .name	= "rpmsg_ipc_raw_command" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_ipc_id_table);
+
+static struct rpmsg_driver rpmsg_ipc = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= rpmsg_ipc_id_table,
+	.probe		= rpmsg_ipc_probe,
+	.callback	= rpmsg_ipc_cb,
+	.remove		= rpmsg_ipc_remove,
+};
+
+static int __init rpmsg_ipc_init(void)
+{
+	return register_rpmsg_driver(&rpmsg_ipc);
+}
+subsys_initcall(rpmsg_ipc_init);
+
+static void __exit rpmsg_ipc_exit(void)
+{
+	return unregister_rpmsg_driver(&rpmsg_ipc);
+}
+module_exit(rpmsg_ipc_exit);
+
+MODULE_AUTHOR("Ning Li<ning.li@intel.com>");
+MODULE_DESCRIPTION("Intel IPC RPMSG Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index b6135d4..a9ad4a7 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -749,9 +749,9 @@
 	dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
 					msg->src, msg->dst, msg->len,
 					msg->flags, msg->reserved);
-	print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
+/*	print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
 					msg, sizeof(*msg) + msg->len, true);
-
+*/
 	sg_init_one(&sg, msg, sizeof(*msg) + len);
 
 	mutex_lock(&vrp->tx_lock);
@@ -786,9 +786,9 @@
 	dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
 					msg->src, msg->dst, msg->len,
 					msg->flags, msg->reserved);
-	print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
+/*	print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
 					msg, sizeof(*msg) + msg->len, true);
-
+*/
 	/*
 	 * We currently use fixed-sized buffers, so trivially sanitize
 	 * the reported payload length.
@@ -898,10 +898,10 @@
 	struct device *dev = &vrp->vdev->dev;
 	int ret;
 
-	print_hex_dump(KERN_DEBUG, "NS announcement: ",
+/*	print_hex_dump(KERN_DEBUG, "NS announcement: ",
 			DUMP_PREFIX_NONE, 16, 1,
 			data, len, true);
-
+*/
 	if (len != sizeof(*msg)) {
 		dev_err(dev, "malformed ns msg (%d)\n", len);
 		return;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b983813..fe9640c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -614,6 +614,20 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-cmos.
 
+config RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES
+	tristate "PC-style 'CMOS' can wakeup from low power states"
+	depends on RTC_DRV_CMOS
+	help
+	  Say "yes" to have rtc-cmos driver capability to wakeup from low
+	  power states (S3 and S4/5).
+
+config RTC_DRV_CMOS_DAYOFMONTH_ALARM
+	tristate "PC-style 'CMOS' supports day of month for alarms"
+	depends on RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES
+	help
+	  Say "yes" to have rtc-cmos driver support day of month for
+	  alarms through REG_D register.
+
 config RTC_DRV_VRTC
 	tristate "Virtual RTC for Intel MID platforms"
 	depends on X86_INTEL_MID
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 6ae046b..459bdad 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -729,8 +729,10 @@
 	hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq);
 	CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
 
+#ifndef CONFIG_RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES
 	/* disable irqs */
 	cmos_irq_disable(&cmos_rtc, RTC_PIE | RTC_AIE | RTC_UIE);
+#endif
 
 	rtc_control = CMOS_READ(RTC_CONTROL);
 
@@ -1023,6 +1025,30 @@
 	device_init_wakeup(dev, 1);
 }
 
+#elif defined(CONFIG_RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES)
+
+#ifdef	CONFIG_RTC_DRV_CMOS_DAYOFMONTH_ALARM
+static struct cmos_rtc_board_info cmos_wakeup_rtc_info;
+#endif
+
+static void cmos_wake_setup(struct device *dev)
+{
+#ifdef	CONFIG_RTC_DRV_CMOS_DAYOFMONTH_ALARM
+	/* add day of month capability for alarms */
+	cmos_wakeup_rtc_info.rtc_day_alarm = RTC_REG_D;
+	cmos_wakeup_rtc_info.rtc_mon_alarm = 0;
+	cmos_wakeup_rtc_info.rtc_century = 0;
+
+	cmos_wakeup_rtc_info.wake_on = NULL;
+	cmos_wakeup_rtc_info.wake_off = NULL;
+
+	dev->platform_data = &cmos_wakeup_rtc_info;
+#endif
+
+	/* RTC always wakes from S1/S2/S3, and often S4/STD */
+	device_init_wakeup(dev, 1);
+}
+
 #else
 
 static void cmos_wake_setup(struct device *dev)
@@ -1168,10 +1194,12 @@
 
 static void cmos_platform_shutdown(struct platform_device *pdev)
 {
+#ifndef CONFIG_RTC_DRV_CMOS_WAKEUP_FROM_LPSTATES
 	if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pdev->dev))
 		return;
 
 	cmos_do_shutdown();
+#endif
 }
 
 /* work with hotplug and coldplug */
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 578baf9..b26ff5a 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -35,11 +35,14 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sfi.h>
+#include <linux/io.h>
 
 #include <asm-generic/rtc.h>
 #include <asm/intel_scu_ipc.h>
-#include <asm/mrst.h>
+#include <asm/intel-mid.h>
 #include <asm/mrst-vrtc.h>
+#include <linux/rpmsg.h>
+#include <asm/intel_mid_rpmsg.h>
 
 struct mrst_rtc {
 	struct rtc_device	*rtc;
@@ -51,10 +54,24 @@
 	u8			suspend_ctrl;
 };
 
+/* both platform and pnp busses use negative numbers for invalid irqs */
+#define is_valid_irq(n)		((n) >= 0)
+
 static const char driver_name[] = "rtc_mrst";
 
 #define	RTC_IRQMASK	(RTC_PF | RTC_AF)
 
+#define OSHOB_ALARM_OFFSET 0x68
+#define OSHOB_DAYW_OFFSET  0x00
+#define OSHOB_DAYM_OFFSET  0x01
+#define OSHOB_MON_OFFSET   0x02
+#define OSHOB_YEAR_OFFSET  0x03
+
+static u32 oshob_base;
+static void __iomem *oshob_addr;
+
+static struct rpmsg_instance *vrtc_mrst_instance;
+
 static inline int is_intr(u8 rtc_intr)
 {
 	if (!(rtc_intr & RTC_IRQF))
@@ -73,6 +90,34 @@
 	return uip;
 }
 
+/* If the interrupt is of alarm-type-RTC_AF, then check if it's for
+ * the correct day. With the support for alarms more than 24-hours,
+ * alarm-date is compared with date-fields in OSHOB, as the vRTC
+ * doesn't have date-fields for alarm
+ */
+static int is_valid_af(u8 rtc_intr)
+{
+	char *p;
+	unsigned long vrtc_date, oshob_date;
+
+	if ((__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_PENWELL) ||
+	    (__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_CLOVERVIEW)) {
+		if (rtc_intr & RTC_AF) {
+			p = (char *) &vrtc_date;
+			*(p+1) = vrtc_cmos_read(RTC_DAY_OF_MONTH);
+			*(p+2) = vrtc_cmos_read(RTC_MONTH);
+			*(p+3) = vrtc_cmos_read(RTC_YEAR);
+
+			oshob_date = readl(oshob_addr);
+			if ((oshob_date & 0xFFFFFF00)
+					!= (vrtc_date & 0xFFFFFF00))
+				return false;
+		}
+	}
+
+	return true;
+}
+
 /*
  * rtc_time's year contains the increment over 1900, but vRTC's YEAR
  * register can't be programmed to value larger than 0x64, so vRTC
@@ -137,7 +182,8 @@
 
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
-	ret = intel_scu_ipc_simple_command(IPCMSG_VRTC, IPC_CMD_VRTC_SETTIME);
+	ret = rpmsg_send_simple_command(vrtc_mrst_instance,
+				IPCMSG_VRTC, IPC_CMD_VRTC_SETTIME);
 	return ret;
 }
 
@@ -146,7 +192,7 @@
 	struct mrst_rtc	*mrst = dev_get_drvdata(dev);
 	unsigned char rtc_control;
 
-	if (mrst->irq <= 0)
+	if (!is_valid_irq(mrst->irq))
 		return -EIO;
 
 	/* Basic alarms only support hour, minute, and seconds fields.
@@ -182,7 +228,7 @@
 	 */
 	rtc_intr = vrtc_cmos_read(RTC_INTR_FLAGS);
 	rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
-	if (is_intr(rtc_intr))
+	if (is_intr(rtc_intr) && is_valid_af(rtc_intr))
 		rtc_update_irq(mrst->rtc, 1, rtc_intr);
 }
 
@@ -217,15 +263,21 @@
 {
 	struct mrst_rtc	*mrst = dev_get_drvdata(dev);
 	unsigned char hrs, min, sec;
+	unsigned char wday, mday, mon, year;
 	int ret = 0;
 
-	if (!mrst->irq)
+	if (!is_valid_irq(mrst->irq))
 		return -EIO;
 
 	hrs = t->time.tm_hour;
 	min = t->time.tm_min;
 	sec = t->time.tm_sec;
 
+	wday = t->time.tm_wday;
+	mday = t->time.tm_mday;
+	mon = t->time.tm_mon;
+	year = t->time.tm_year;
+
 	spin_lock_irq(&rtc_lock);
 	/* Next rtc irq must not be from previous alarm setting */
 	mrst_irq_disable(mrst, RTC_AIE);
@@ -235,13 +287,18 @@
 	vrtc_cmos_write(min, RTC_MINUTES_ALARM);
 	vrtc_cmos_write(sec, RTC_SECONDS_ALARM);
 
-	spin_unlock_irq(&rtc_lock);
+	if ((__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_PENWELL) ||
+	    (__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_CLOVERVIEW)) {
+		/* Support for date-field in Alarm using OSHOB
+		 * Since, vRTC doesn't have Alarm-registers for date-fields,
+		 * write date-fields into OSHOB for SCU to sync to MSIC-RTC */
+		writeb(wday, oshob_addr+OSHOB_DAYW_OFFSET);
+		writeb(mday, oshob_addr+OSHOB_DAYM_OFFSET);
+		writeb(mon+1, oshob_addr+OSHOB_MON_OFFSET);
+		/* Adjust for the 1972/1900 */
+		writeb(year-72, oshob_addr+OSHOB_YEAR_OFFSET);
+	}
 
-	ret = intel_scu_ipc_simple_command(IPCMSG_VRTC, IPC_CMD_VRTC_SETALARM);
-	if (ret)
-		return ret;
-
-	spin_lock_irq(&rtc_lock);
 	if (t->enabled)
 		mrst_irq_enable(mrst, RTC_AIE);
 
@@ -250,21 +307,42 @@
 	return 0;
 }
 
+#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
+
 /* Currently, the vRTC doesn't support UIE ON/OFF */
-static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+static int
+mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
 	struct mrst_rtc	*mrst = dev_get_drvdata(dev);
 	unsigned long	flags;
 
+	switch (cmd) {
+	case RTC_AIE_OFF:
+	case RTC_AIE_ON:
+		if (!is_valid_irq(mrst->irq))
+			return -EINVAL;
+		break;
+	default:
+		/* PIE ON/OFF is handled by mrst_irq_set_state() */
+		return -ENOIOCTLCMD;
+	}
+
 	spin_lock_irqsave(&rtc_lock, flags);
-	if (enabled)
-		mrst_irq_enable(mrst, RTC_AIE);
-	else
+	switch (cmd) {
+	case RTC_AIE_OFF:	/* alarm off */
 		mrst_irq_disable(mrst, RTC_AIE);
+		break;
+	case RTC_AIE_ON:	/* alarm on */
+		mrst_irq_enable(mrst, RTC_AIE);
+		break;
+	}
 	spin_unlock_irqrestore(&rtc_lock, flags);
 	return 0;
 }
 
+#else
+#define	mrst_rtc_ioctl	NULL
+#endif
 
 #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
 
@@ -290,13 +368,26 @@
 #define	mrst_procfs	NULL
 #endif
 
+static int mrst_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct mrst_rtc *mrst = dev_get_drvdata(dev);
+
+	if (enabled)
+		mrst_irq_enable(mrst, RTC_AIE);
+	else
+		mrst_irq_disable(mrst, RTC_AIE);
+
+	return 0;
+}
+
 static const struct rtc_class_ops mrst_rtc_ops = {
-	.read_time	= mrst_read_time,
-	.set_time	= mrst_set_time,
-	.read_alarm	= mrst_read_alarm,
-	.set_alarm	= mrst_set_alarm,
-	.proc		= mrst_procfs,
-	.alarm_irq_enable = mrst_rtc_alarm_irq_enable,
+	.ioctl		  = mrst_rtc_ioctl,
+	.read_time	  = mrst_read_time,
+	.set_time	  = mrst_set_time,
+	.read_alarm	  = mrst_read_alarm,
+	.set_alarm	  = mrst_set_alarm,
+	.proc		  = mrst_procfs,
+	.alarm_irq_enable = mrst_alarm_irq_enable,
 };
 
 static struct mrst_rtc	mrst_rtc;
@@ -308,22 +399,33 @@
 static irqreturn_t mrst_rtc_irq(int irq, void *p)
 {
 	u8 irqstat;
+	int ret = 0;
 
 	spin_lock(&rtc_lock);
 	/* This read will clear all IRQ flags inside Reg C */
 	irqstat = vrtc_cmos_read(RTC_INTR_FLAGS);
+	irqstat &= RTC_IRQMASK | RTC_IRQF;
+	ret = is_valid_af(irqstat);
 	spin_unlock(&rtc_lock);
 
-	irqstat &= RTC_IRQMASK | RTC_IRQF;
 	if (is_intr(irqstat)) {
-		rtc_update_irq(p, 1, irqstat);
+		/* If it's an alarm-interrupt, update RTC-IRQ only if it's
+		 * for current day. Alarms beyond 24-hours will result in
+		 * interrupts at given time, everyday till actual alarm-date.
+		 * From hardware perspective, it's still a valid interrupt,
+		 * hence need to return IRQ_HANDLED. */
+		if (ret)
+			rtc_update_irq(p, 1, irqstat);
+
 		return IRQ_HANDLED;
+	} else {
+		pr_err("vRTC: error in IRQ handler\n");
+		return IRQ_NONE;
 	}
-	return IRQ_NONE;
 }
 
-static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
-			      int rtc_irq)
+static int
+vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
 {
 	int retval = 0;
 	unsigned char rtc_control;
@@ -335,8 +437,9 @@
 	if (!iomem)
 		return -ENODEV;
 
-	iomem = request_mem_region(iomem->start, resource_size(iomem),
-				   driver_name);
+	iomem = request_mem_region(iomem->start,
+			iomem->end + 1 - iomem->start,
+			driver_name);
 	if (!iomem) {
 		dev_dbg(dev, "i/o mem already in use.\n");
 		return -EBUSY;
@@ -364,9 +467,9 @@
 	if (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))
 		dev_dbg(dev, "TODO: support more than 24-hr BCD mode\n");
 
-	if (rtc_irq) {
+	if (is_valid_irq(rtc_irq)) {
 		retval = request_irq(rtc_irq, mrst_rtc_irq,
-				0, dev_name(&mrst_rtc.rtc->dev),
+				IRQF_NO_SUSPEND, dev_name(&mrst_rtc.rtc->dev),
 				mrst_rtc.rtc);
 		if (retval < 0) {
 			dev_dbg(dev, "IRQ %d is already in use, err %d\n",
@@ -374,7 +477,30 @@
 			goto cleanup1;
 		}
 	}
-	dev_dbg(dev, "initialised\n");
+
+	/* make RTC device wake capable from sleep */
+	device_init_wakeup(dev, true);
+
+	if ((__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_PENWELL) ||
+	    (__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_CLOVERVIEW)) {
+		retval = rpmsg_send_command(vrtc_mrst_instance,
+				IPCMSG_GET_HOBADDR, 0, NULL, &oshob_base, 0, 1);
+		if (retval < 0) {
+			dev_dbg(dev,
+				"Unable to get OSHOB base address, err %d\n",
+				retval);
+			goto cleanup1;
+		}
+
+		oshob_addr = ioremap_nocache(oshob_base+OSHOB_ALARM_OFFSET, 4);
+		if (!oshob_addr) {
+			dev_dbg(dev, "Unable to do ioremap for OSHOB\n");
+			retval = -ENOMEM;
+			goto cleanup1;
+		}
+	}
+
+	dev_info(dev, "vRTC driver initialised\n");
 	return 0;
 
 cleanup1:
@@ -401,9 +527,15 @@
 
 	rtc_mrst_do_shutdown();
 
-	if (mrst->irq)
+	if (is_valid_irq(mrst->irq))
 		free_irq(mrst->irq, mrst->rtc);
 
+	if ((__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_PENWELL) ||
+	    (__intel_mid_cpu_chip == INTEL_MID_CPU_CHIP_CLOVERVIEW)) {
+		if (oshob_addr != NULL)
+			iounmap(oshob_addr);
+	}
+
 	rtc_device_unregister(mrst->rtc);
 	mrst->rtc = NULL;
 
@@ -416,7 +548,7 @@
 }
 
 #ifdef	CONFIG_PM
-static int mrst_suspend(struct device *dev, pm_message_t mesg)
+static int mrst_suspend(struct device *dev)
 {
 	struct mrst_rtc	*mrst = dev_get_drvdata(dev);
 	unsigned char	tmp;
@@ -455,7 +587,7 @@
  */
 static inline int mrst_poweroff(struct device *dev)
 {
-	return mrst_suspend(dev, PMSG_HIBERNATE);
+	return mrst_suspend(dev);
 }
 
 static int mrst_resume(struct device *dev)
@@ -478,7 +610,7 @@
 
 			mask = vrtc_cmos_read(RTC_INTR_FLAGS);
 			mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
-			if (!is_intr(mask))
+			if (!(is_intr(mask) && is_valid_af(mask)))
 				break;
 
 			rtc_update_irq(mrst->rtc, 1, mask);
@@ -526,18 +658,102 @@
 
 MODULE_ALIAS("platform:vrtc_mrst");
 
+static const struct dev_pm_ops vrtc_mrst_platform_driver_pm_ops = {
+	.suspend	= mrst_suspend,
+	.resume		= mrst_resume,
+};
+
 static struct platform_driver vrtc_mrst_platform_driver = {
 	.probe		= vrtc_mrst_platform_probe,
 	.remove		= vrtc_mrst_platform_remove,
 	.shutdown	= vrtc_mrst_platform_shutdown,
-	.driver = {
-		.name		= (char *) driver_name,
-		.suspend	= mrst_suspend,
-		.resume		= mrst_resume,
-	}
+	.driver.name	= (char *) driver_name,
+	.driver.pm	= &vrtc_mrst_platform_driver_pm_ops,
 };
 
-module_platform_driver(vrtc_mrst_platform_driver);
+static int vrtc_mrst_init(void)
+{
+	return platform_driver_register(&vrtc_mrst_platform_driver);
+}
+
+static void vrtc_mrst_exit(void)
+{
+	platform_driver_unregister(&vrtc_mrst_platform_driver);
+}
+
+static int vrtc_mrst_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret;
+
+	if (rpdev == NULL) {
+		pr_err("vrtc_mrst rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed vrtc_mrst rpmsg device\n");
+
+	/* Allocate rpmsg instance for fw_update*/
+	ret = alloc_rpmsg_instance(rpdev, &vrtc_mrst_instance);
+	if (!vrtc_mrst_instance) {
+		dev_err(&rpdev->dev, "kzalloc vrtc_mrst instance failed\n");
+		goto out;
+	}
+
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(vrtc_mrst_instance);
+
+	ret = vrtc_mrst_init();
+	if (ret)
+		free_rpmsg_instance(rpdev, &vrtc_mrst_instance);
+
+out:
+	return ret;
+}
+
+static void vrtc_mrst_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	vrtc_mrst_exit();
+	free_rpmsg_instance(rpdev, &vrtc_mrst_instance);
+	dev_info(&rpdev->dev, "Removed vrtc_mrst rpmsg device\n");
+}
+
+static void vrtc_mrst_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id vrtc_mrst_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_vrtc" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, vrtc_mrst_rpmsg_id_table);
+
+static struct rpmsg_driver vrtc_mrst_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= vrtc_mrst_rpmsg_id_table,
+	.probe		= vrtc_mrst_rpmsg_probe,
+	.callback	= vrtc_mrst_rpmsg_cb,
+	.remove		= vrtc_mrst_rpmsg_remove,
+};
+
+static int __init vrtc_mrst_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&vrtc_mrst_rpmsg);
+}
+
+static void __exit vrtc_mrst_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&vrtc_mrst_rpmsg);
+}
+
+module_init(vrtc_mrst_rpmsg_init);
+module_exit(vrtc_mrst_rpmsg_exit);
 
 MODULE_AUTHOR("Jacob Pan; Feng Tang");
 MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 92a9345..1c0ce9e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -198,6 +198,15 @@
 	  This enables using the Freescale i.MX SPI controllers in master
 	  mode.
 
+config SPI_INTEL_MID_SSP
+	tristate "SSP SPI controller driver for Intel MID platforms (EXPERIMENTAL)"
+	depends on SPI_MASTER && INTEL_MID_DMAC
+	help
+	  This is the unified SSP SPI slave controller driver for the Intel
+	  MID platforms (Moorestown, Medfield, Clovertrial and
+	  Merrifield), primarily used to implement a SPI host controller
+	  driver over a SSP host controller.
+
 config SPI_LM70_LLP
 	tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
 	depends on PARPORT
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 33f9c09..1ab0e90 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -35,6 +35,7 @@
 obj-$(CONFIG_SPI_FSL_SPI)		+= spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)			+= spi-gpio.o
 obj-$(CONFIG_SPI_IMX)			+= spi-imx.o
+obj-$(CONFIG_SPI_INTEL_MID_SSP)		+= intel_mid_ssp_spi.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi-lm70llp.o
 obj-$(CONFIG_SPI_MPC512x_PSC)		+= spi-mpc512x-psc.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)		+= spi-mpc52xx-psc.o
diff --git a/drivers/spi/intel_mid_ssp_spi.c b/drivers/spi/intel_mid_ssp_spi.c
new file mode 100644
index 0000000..f686910
--- /dev/null
+++ b/drivers/spi/intel_mid_ssp_spi.c
@@ -0,0 +1,1809 @@
+/*
+ * intel_mid_ssp_spi.c
+ * This driver supports Bulverde SSP core used on Intel MID platforms
+ * It supports SSP of Moorestown & Medfield platforms and handles clock
+ * slave & master modes.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ *  Ken Mills <ken.k.mills@intel.com>
+ *  Sylvain Centelles <sylvain.centelles@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * Note:
+ *
+ * Supports DMA and non-interrupt polled transfers.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/completion.h>
+#include <asm/intel-mid.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/intel_mid_ssp_spi.h>
+
+#define DRIVER_NAME "intel_mid_ssp_spi_unified"
+
+MODULE_AUTHOR("Ken Mills");
+MODULE_DESCRIPTION("Bulverde SSP core SPI contoller");
+MODULE_LICENSE("GPL");
+
+static int ssp_timing_wr;
+
+#ifdef DUMP_RX
+static void dump_trailer(const struct device *dev, char *buf, int len, int sz)
+{
+	int tlen1 = (len < sz ? len : sz);
+	int tlen2 =  ((len - sz) > sz) ? sz : (len - sz);
+	unsigned char *p;
+	static char msg[MAX_SPI_TRANSFER_SIZE];
+
+	memset(msg, '\0', sizeof(msg));
+	p = buf;
+	while (p < buf + tlen1)
+		sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+
+	if (tlen2 > 0) {
+		sprintf(msg, "%s .....", msg);
+		p = (buf+len) - tlen2;
+		while (p < buf + len)
+			sprintf(msg, "%s%02x", msg, (unsigned int)*p++);
+	}
+
+	dev_info(dev, "DUMP: %p[0:%d ... %d:%d]:%s", buf, tlen1 - 1,
+		   len-tlen2, len - 1, msg);
+}
+#endif
+
+static inline u8 ssp_cfg_get_mode(u8 ssp_cfg)
+{
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER ||
+	    intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+		return (ssp_cfg) & 0x03;
+	else
+		return (ssp_cfg) & 0x07;
+}
+
+static inline u8 ssp_cfg_get_spi_bus_nb(u8 ssp_cfg)
+{
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER ||
+	    intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+		return ((ssp_cfg) >> 2) & 0x07;
+	else
+		return ((ssp_cfg) >> 3) & 0x07;
+}
+
+static inline u8 ssp_cfg_is_spi_slave(u8 ssp_cfg)
+{
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER ||
+	    intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+		return (ssp_cfg) & 0x20;
+	else
+		return (ssp_cfg) & 0x40;
+}
+
+static inline u32 is_tx_fifo_empty(struct ssp_drv_context *sspc)
+{
+	u32 sssr;
+	sssr = read_SSSR(sspc->ioaddr);
+	if ((sssr & SSSR_TFL_MASK) || (sssr & SSSR_TNF) == 0)
+		return 0;
+	else
+		return 1;
+}
+
+static inline u32 is_rx_fifo_empty(struct ssp_drv_context *sspc)
+{
+	return ((read_SSSR(sspc->ioaddr) & SSSR_RNE) == 0);
+}
+
+static inline void disable_interface(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+}
+
+static inline void disable_triggers(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	write_SSCR1(read_SSCR1(reg) & ~sspc->cr1_sig, reg);
+}
+
+
+static void flush(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	u32 i = 0;
+
+	/* If the transmit fifo is not empty, reset the interface. */
+	if (!is_tx_fifo_empty(sspc)) {
+		dev_err(&sspc->pdev->dev, "TX FIFO not empty. Reset of SPI IF");
+		disable_interface(sspc);
+		return;
+	}
+
+	dev_dbg(&sspc->pdev->dev, " SSSR=%x\r\n", read_SSSR(reg));
+	while (!is_rx_fifo_empty(sspc) && (i < SPI_FIFO_SIZE + 1)) {
+		read_SSDR(reg);
+		i++;
+	}
+	WARN(i > 0, "%d words flush occured\n", i);
+
+	return;
+}
+
+static int null_writer(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	u8 n_bytes = sspc->n_bytes;
+
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (sspc->tx == sspc->tx_end))
+		return 0;
+
+	write_SSDR(0, reg);
+	sspc->tx += n_bytes;
+
+	return n_bytes;
+}
+
+static int null_reader(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	u8 n_bytes = sspc->n_bytes;
+	size_t pkg_len = sspc->len;
+
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (pkg_len > 0)) {
+		read_SSDR(reg);
+		sspc->rx += n_bytes;
+		pkg_len -= n_bytes;
+	}
+
+	return sspc->rx == sspc->rx_end;
+}
+
+static int u8_writer(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (sspc->tx == sspc->tx_end))
+		return 0;
+
+	write_SSDR(*(u8 *)(sspc->tx), reg);
+	++sspc->tx;
+
+	return 1;
+}
+
+static int u8_reader(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	size_t pkg_len = sspc->len;
+
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (pkg_len > 0)) {
+		*(u8 *)(sspc->rx) = read_SSDR(reg);
+		++sspc->rx;
+		--pkg_len;
+	}
+
+	return sspc->rx == sspc->rx_end;
+}
+
+static int u16_writer(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (sspc->tx == sspc->tx_end))
+		return 0;
+
+	write_SSDR(*(u16 *)(sspc->tx), reg);
+	sspc->tx += 2;
+
+	return 2;
+}
+
+static int u16_reader(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	size_t pkg_len = sspc->len;
+
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (pkg_len > 0)) {
+		*(u16 *)(sspc->rx) = read_SSDR(reg);
+		sspc->rx += 2;
+		pkg_len -= 2;
+	}
+
+	return sspc->rx == sspc->rx_end;
+}
+
+static int u32_writer(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
+		|| (sspc->tx == sspc->tx_end))
+		return 0;
+
+	write_SSDR(*(u32 *)(sspc->tx), reg);
+	sspc->tx += 4;
+
+	return 4;
+}
+
+static int u32_reader(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	size_t pkg_len = sspc->len;
+
+	while ((read_SSSR(reg) & SSSR_RNE)
+		&& (pkg_len > 0)) {
+		*(u32 *)(sspc->rx) = read_SSDR(reg);
+		sspc->rx += 4;
+		pkg_len -= 4;
+	}
+
+	return sspc->rx == sspc->rx_end;
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+	struct ssp_drv_context *sspc = param;
+	bool ret = false;
+
+	if (!sspc->dmac1)
+		return ret;
+
+	if (chan->device->dev == &sspc->dmac1->dev)
+		ret = true;
+
+	return ret;
+}
+
+/**
+ * unmap_dma_buffers() - Unmap the DMA buffers used during the last transfer.
+ * @sspc:	Pointer to the private driver context
+ */
+static void unmap_dma_buffers(struct ssp_drv_context *sspc)
+{
+	struct device *dev = &sspc->pdev->dev;
+
+	if (!sspc->dma_mapped)
+		return;
+	dma_unmap_single(dev, sspc->rx_dma, sspc->len, PCI_DMA_FROMDEVICE);
+	dma_unmap_single(dev, sspc->tx_dma, sspc->len, PCI_DMA_TODEVICE);
+	sspc->dma_mapped = 0;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_done() - End of DMA transfer callback
+ * @arg:	Pointer to the data provided at callback registration
+ *
+ * This function is set as callback for both RX and TX DMA transfers. The
+ * RX or TX 'done' flag is set acording to the direction of the ended
+ * transfer. Then, if both RX and TX flags are set, it means that the
+ * transfer job is completed.
+ */
+static void intel_mid_ssp_spi_dma_done(void *arg)
+{
+	struct callback_param *cb_param = (struct callback_param *)arg;
+	struct ssp_drv_context *sspc = cb_param->drv_context;
+	struct device *dev = &sspc->pdev->dev;
+	void *reg = sspc->ioaddr;
+
+	if (cb_param->direction == TX_DIRECTION) {
+		dma_sync_single_for_cpu(dev, sspc->tx_dma,
+			sspc->len, DMA_TO_DEVICE);
+		sspc->txdma_done = 1;
+	} else {
+		sspc->rxdma_done = 1;
+		dma_sync_single_for_cpu(dev, sspc->rx_dma,
+			sspc->len, DMA_FROM_DEVICE);
+	}
+
+	dev_dbg(dev, "DMA callback for direction %d [RX done:%d] [TX done:%d]\n",
+		cb_param->direction, sspc->rxdma_done,
+		sspc->txdma_done);
+
+	if (sspc->txdma_done && sspc->rxdma_done) {
+		/* Clear Status Register */
+		write_SSSR(sspc->clear_sr, reg);
+		dev_dbg(dev, "DMA done\n");
+		/* Disable Triggers to DMA or to CPU*/
+		disable_triggers(sspc);
+		unmap_dma_buffers(sspc);
+
+		queue_work(sspc->dma_wq, &sspc->complete_work);
+	}
+}
+
+/**
+ * intel_mid_ssp_spi_dma_init() - Initialize DMA
+ * @sspc:	Pointer to the private driver context
+ *
+ * This function is called at driver setup phase to allocate DMA
+ * ressources.
+ */
+static void intel_mid_ssp_spi_dma_init(struct ssp_drv_context *sspc)
+{
+	struct intel_mid_dma_slave *rxs, *txs;
+	struct dma_slave_config *ds;
+	dma_cap_mask_t mask;
+	struct device *dev = &sspc->pdev->dev;
+	unsigned int device_id;
+
+	/* Configure RX channel parameters */
+	rxs = &sspc->dmas_rx;
+	ds = &rxs->dma_slave;
+
+	ds->direction = DMA_FROM_DEVICE;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	ds->dst_addr_width = sspc->n_bytes;
+	ds->src_addr_width = sspc->n_bytes;
+
+	if (sspc->quirks & QUIRKS_PLATFORM_BYT) {
+		/*These are fixed HW info from Baytrail datasheet*/
+		rxs->device_instance = 1; /*DMA Req line*/
+	} else if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+		rxs->device_instance = sspc->master->bus_num;
+	else
+		rxs->device_instance = 0;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (sspc->rx_fifo_threshold == 8) {
+		ds->src_maxburst = LNW_DMA_MSIZE_8;
+		ds->dst_maxburst = LNW_DMA_MSIZE_8;
+	} else if (sspc->rx_fifo_threshold == 4) {
+		ds->src_maxburst = LNW_DMA_MSIZE_4;
+		ds->dst_maxburst = LNW_DMA_MSIZE_4;
+	} else {
+		ds->src_maxburst = LNW_DMA_MSIZE_1;
+		ds->dst_maxburst = LNW_DMA_MSIZE_1;
+	}
+
+	/* Configure TX channel parameters */
+	txs = &sspc->dmas_tx;
+	ds = &txs->dma_slave;
+
+	ds->direction = DMA_TO_DEVICE;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	ds->src_addr_width = sspc->n_bytes;
+	ds->dst_addr_width = sspc->n_bytes;
+
+	if (sspc->quirks & QUIRKS_PLATFORM_BYT) {
+		/*These are fixed HW info from Baytrail datasheet*/
+		txs->device_instance = 0;/*DMA Req Line*/
+	} else if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+		txs->device_instance = sspc->master->bus_num;
+	else
+		txs->device_instance = 0;
+
+	/* Use a DMA burst according to the FIFO thresholds */
+	if (sspc->rx_fifo_threshold == 8) {
+		ds->src_maxburst = LNW_DMA_MSIZE_8;
+		ds->dst_maxburst = LNW_DMA_MSIZE_8;
+	} else if (sspc->rx_fifo_threshold == 4) {
+		ds->src_maxburst = LNW_DMA_MSIZE_4;
+		ds->dst_maxburst = LNW_DMA_MSIZE_4;
+	} else {
+		ds->src_maxburst = LNW_DMA_MSIZE_1;
+		ds->dst_maxburst = LNW_DMA_MSIZE_1;
+	}
+
+	/* Nothing more to do if already initialized */
+	if (sspc->dma_initialized)
+		return;
+
+	/* Use DMAC1 */
+	if (sspc->quirks & QUIRKS_PLATFORM_MRST)
+		device_id = PCI_MRST_DMAC1_ID;
+	else if (sspc->quirks & QUIRKS_PLATFORM_BYT)
+		device_id = PCI_BYT_DMAC1_ID;
+	else if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+		device_id = PCI_MRFL_DMAC_ID;
+	else
+		device_id = PCI_MDFL_DMAC1_ID;
+
+	sspc->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
+	if (!sspc->dmac1) {
+		dev_err(dev, "Can't find DMAC1");
+		return;
+	}
+
+	if (sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY) {
+		sspc->virt_addr_sram_rx = ioremap_nocache(SRAM_BASE_ADDR,
+				2 * MAX_SPI_TRANSFER_SIZE);
+		if (sspc->virt_addr_sram_rx)
+			sspc->virt_addr_sram_tx = sspc->virt_addr_sram_rx +
+							MAX_SPI_TRANSFER_SIZE;
+		else
+			dev_err(dev, "Virt_addr_sram_rx is null\n");
+	}
+
+	/* 1. Allocate rx channel */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	sspc->rxchan = dma_request_channel(mask, chan_filter, sspc);
+	if (!sspc->rxchan)
+		goto err_exit;
+
+	sspc->rxchan->private = rxs;
+
+	/* 2. Allocate tx channel */
+	dma_cap_set(DMA_SLAVE, mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	sspc->txchan = dma_request_channel(mask, chan_filter, sspc);
+	if (!sspc->txchan)
+		goto free_rxchan;
+	else
+		sspc->txchan->private = txs;
+
+	/* set the dma done bit to 1 */
+	sspc->txdma_done = 1;
+	sspc->rxdma_done = 1;
+
+	sspc->tx_param.drv_context  = sspc;
+	sspc->tx_param.direction = TX_DIRECTION;
+	sspc->rx_param.drv_context  = sspc;
+	sspc->rx_param.direction = RX_DIRECTION;
+
+	sspc->dma_initialized = 1;
+	return;
+
+free_rxchan:
+	dma_release_channel(sspc->rxchan);
+err_exit:
+	dev_err(dev, "Error : DMA Channel Not available\n");
+
+	if (sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(sspc->virt_addr_sram_rx);
+
+	pci_dev_put(sspc->dmac1);
+	return;
+}
+
+/**
+ * intel_mid_ssp_spi_dma_exit() - Release DMA ressources
+ * @sspc:	Pointer to the private driver context
+ */
+static void intel_mid_ssp_spi_dma_exit(struct ssp_drv_context *sspc)
+{
+	dma_release_channel(sspc->txchan);
+	dma_release_channel(sspc->rxchan);
+
+	if (sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)
+		iounmap(sspc->virt_addr_sram_rx);
+
+	pci_dev_put(sspc->dmac1);
+}
+
+/**
+ * dma_transfer() - Initiate a DMA transfer
+ * @sspc:	Pointer to the private driver context
+ */
+static void dma_transfer(struct ssp_drv_context *sspc)
+{
+	dma_addr_t ssdr_addr;
+	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+	struct dma_chan *txchan, *rxchan;
+	enum dma_ctrl_flags flag;
+	struct device *dev = &sspc->pdev->dev;
+
+	/* get Data Read/Write address */
+	ssdr_addr = (dma_addr_t)(sspc->paddr + 0x10);
+
+	if (sspc->tx_dma)
+		sspc->txdma_done = 0;
+
+	if (sspc->rx_dma)
+		sspc->rxdma_done = 0;
+
+	/* 2. prepare the RX dma transfer */
+	txchan = sspc->txchan;
+	rxchan = sspc->rxchan;
+
+	flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+	if (likely(sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+		/* Since the DMA is configured to do 32bits access */
+		/* to/from the DDR, the DMA transfer size must be  */
+		/* a multiple of 4 bytes                           */
+		sspc->len_dma_rx = sspc->len & ~(4 - 1);
+		sspc->len_dma_tx = sspc->len_dma_rx;
+
+		/* In Rx direction, TRAIL Bytes are handled by memcpy */
+		if (sspc->rx_dma &&
+			(sspc->len_dma_rx >=
+				sspc->rx_fifo_threshold * sspc->n_bytes))
+		{
+			sspc->len_dma_rx = TRUNCATE(sspc->len_dma_rx,
+				sspc->rx_fifo_threshold * sspc->n_bytes);
+			sspc->len_dma_tx = sspc->len_dma_rx;
+		}
+		else if (!sspc->rx_dma)
+			dev_err(dev, "ERROR : rx_dma is null\r\n");
+	} else {
+		/* TRAIL Bytes are handled by DMA */
+		if (sspc->rx_dma) {
+			sspc->len_dma_rx = sspc->len;
+			sspc->len_dma_tx = sspc->len;
+		} else
+			dev_err(dev, "ERROR : sspc->rx_dma is null!\n");
+	}
+
+	sspc->dmas_rx.dma_slave.src_addr = ssdr_addr;
+	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+		(unsigned long)&(sspc->dmas_rx.dma_slave));
+	dma_sync_single_for_device(dev, sspc->rx_dma,
+		sspc->len, DMA_FROM_DEVICE);
+
+	rxdesc = rxchan->device->device_prep_dma_memcpy
+		(rxchan,			/* DMA Channel */
+		sspc->rx_dma,			/* DAR */
+		ssdr_addr,			/* SAR */
+		sspc->len_dma_rx,		/* Data Length */
+		flag);					/* Flag */
+
+	if (rxdesc) {
+		rxdesc->callback = intel_mid_ssp_spi_dma_done;
+		rxdesc->callback_param = &sspc->rx_param;
+	} else {
+		dev_dbg(dev, "rxdesc is null! (len_dma_rx:%d)\n",
+			sspc->len_dma_rx);
+		sspc->rxdma_done = 1;
+	}
+
+	/* 3. prepare the TX dma transfer */
+	sspc->dmas_tx.dma_slave.dst_addr = ssdr_addr;
+	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+		(unsigned long)&(sspc->dmas_tx.dma_slave));
+	dma_sync_single_for_device(dev, sspc->tx_dma,
+		sspc->len, DMA_TO_DEVICE);
+
+	if (sspc->tx_dma) {
+		txdesc = txchan->device->device_prep_dma_memcpy
+			(txchan,			/* DMA Channel */
+			ssdr_addr,			/* DAR */
+			sspc->tx_dma,			/* SAR */
+			sspc->len_dma_tx,		/* Data Length */
+			flag);				/* Flag */
+		if (txdesc) {
+			txdesc->callback = intel_mid_ssp_spi_dma_done;
+			txdesc->callback_param = &sspc->tx_param;
+		} else {
+			dev_dbg(dev, "txdesc is null! (len_dma_tx:%d)\n",
+				sspc->len_dma_tx);
+			sspc->txdma_done = 1;
+		}
+	} else {
+		dev_err(dev, "ERROR : sspc->tx_dma is null!\n");
+		return;
+	}
+
+	dev_dbg(dev, "DMA transfer len:%d len_dma_tx:%d len_dma_rx:%d\n",
+		sspc->len, sspc->len_dma_tx, sspc->len_dma_rx);
+
+	if (rxdesc || txdesc) {
+		if (rxdesc) {
+			dev_dbg(dev, "Firing DMA RX channel\n");
+			rxdesc->tx_submit(rxdesc);
+		}
+		if (txdesc) {
+			dev_dbg(dev, "Firing DMA TX channel\n");
+			txdesc->tx_submit(txdesc);
+		}
+	} else {
+		struct callback_param cb_param;
+		cb_param.drv_context = sspc;
+		dev_dbg(dev, "Bypassing DMA transfer\n");
+		intel_mid_ssp_spi_dma_done(&cb_param);
+	}
+}
+
+/**
+ * map_dma_buffers() - Map DMA buffer before a transfer
+ * @sspc:	Pointer to the private drivzer context
+ */
+static int map_dma_buffers(struct ssp_drv_context *sspc)
+{
+	struct device *dev = &sspc->pdev->dev;
+
+	if (unlikely(sspc->dma_mapped)) {
+		dev_err(dev, "ERROR : DMA buffers already mapped\n");
+		return 0;
+	}
+	if (unlikely(sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY)) {
+		/* Copy sspc->tx into sram_tx */
+		memcpy_toio(sspc->virt_addr_sram_tx, sspc->tx, sspc->len);
+#ifdef DUMP_RX
+		dump_trailer(&sspc->pdev->dev, sspc->tx, sspc->len, 16);
+#endif
+		sspc->rx_dma = SRAM_RX_ADDR;
+		sspc->tx_dma = SRAM_TX_ADDR;
+	} else {
+		/* no QUIRKS_SRAM_ADDITIONAL_CPY */
+		if (unlikely(sspc->dma_mapped))
+			return 1;
+
+		sspc->tx_dma = dma_map_single(dev, sspc->tx, sspc->len,
+						PCI_DMA_TODEVICE);
+		if (unlikely(dma_mapping_error(dev, sspc->tx_dma))) {
+			dev_err(dev, "ERROR : tx dma mapping failed\n");
+			return 0;
+		}
+
+		sspc->rx_dma = dma_map_single(dev, sspc->rx, sspc->len,
+						PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(dev, sspc->rx_dma))) {
+			dma_unmap_single(dev, sspc->tx_dma,
+				sspc->len, DMA_TO_DEVICE);
+			dev_err(dev, "ERROR : rx dma mapping failed\n");
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/**
+ * drain_trail() - Handle trailing bytes of a transfer
+ * @sspc:	Pointer to the private driver context
+ *
+ * This function handles the trailing bytes of a transfer for the case
+ * they are not handled by the DMA.
+ */
+void drain_trail(struct ssp_drv_context *sspc)
+{
+	struct device *dev = &sspc->pdev->dev;
+	void *reg = sspc->ioaddr;
+
+	if (sspc->len != sspc->len_dma_rx) {
+		dev_dbg(dev, "Handling trailing bytes. SSSR:%08x\n",
+			read_SSSR(reg));
+		sspc->rx += sspc->len_dma_rx;
+		sspc->tx += sspc->len_dma_tx;
+		sspc->len = sspc->len - sspc->len_dma_rx;
+		sspc->cur_msg->actual_length = sspc->len_dma_rx;
+
+		while ((sspc->tx < sspc->tx_end) ||
+			(sspc->rx < sspc->rx_end)) {
+			sspc->read(sspc);
+			sspc->write(sspc);
+		}
+	}
+}
+
+/**
+ * sram_to_ddr_cpy() - Copy data from Langwell SDRAM to DDR
+ * @sspc:	Pointer to the private driver context
+ */
+static void sram_to_ddr_cpy(struct ssp_drv_context *sspc)
+{
+	u32 length = sspc->len;
+
+	if ((sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+		&& (sspc->len > sspc->rx_fifo_threshold * sspc->n_bytes))
+		length = TRUNCATE(sspc->len,
+			sspc->rx_fifo_threshold * sspc->n_bytes);
+
+	memcpy_fromio(sspc->rx, sspc->virt_addr_sram_rx, length);
+}
+
+static void int_transfer_complete(struct ssp_drv_context *sspc)
+{
+	void *reg = sspc->ioaddr;
+	struct spi_message *msg;
+	struct device *dev = &sspc->pdev->dev;
+
+	if (unlikely(sspc->quirks & QUIRKS_USE_PM_QOS))
+		pm_qos_update_request(&sspc->pm_qos_req,
+					PM_QOS_DEFAULT_VALUE);
+
+	if (unlikely(sspc->quirks & QUIRKS_SRAM_ADDITIONAL_CPY))
+		sram_to_ddr_cpy(sspc);
+
+	if (likely(sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL))
+		drain_trail(sspc);
+	else
+		/* Stop getting Time Outs */
+		write_SSTO(0, reg);
+
+	sspc->cur_msg->status = 0;
+	sspc->cur_msg->actual_length += sspc->len;
+
+#ifdef DUMP_RX
+	dump_trailer(dev, sspc->rx, sspc->len, 16);
+#endif
+
+	if (sspc->cs_control)
+		sspc->cs_control(!sspc->cs_assert);
+
+	dev_dbg(dev, "End of transfer. SSSR:%08X\n", read_SSSR(reg));
+	complete(&sspc->msg_done);
+}
+
+static void int_transfer_complete_work(struct work_struct *work)
+{
+	struct ssp_drv_context *sspc = container_of(work,
+				struct ssp_drv_context, complete_work);
+
+	int_transfer_complete(sspc);
+}
+
+static void poll_transfer_complete(struct ssp_drv_context *sspc)
+{
+	/* Update total byte transfered return count actual bytes read */
+	sspc->cur_msg->actual_length += sspc->len - (sspc->rx_end - sspc->rx);
+
+	sspc->cur_msg->status = 0;
+}
+
+/**
+ * ssp_int() - Interrupt handler
+ * @irq
+ * @dev_id
+ *
+ * The SSP interrupt is not used for transfer which are handled by
+ * DMA or polling: only under/over run are catched to detect
+ * broken transfers.
+ */
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+	struct ssp_drv_context *sspc = dev_id;
+	void *reg = sspc->ioaddr;
+	struct device *dev = &sspc->pdev->dev;
+	u32 status = read_SSSR(reg);
+
+	/* It should never be our interrupt since SSP will */
+	/* only trigs interrupt for under/over run.*/
+	if (likely(!(status & sspc->mask_sr)))
+		return IRQ_NONE;
+
+	if (status & SSSR_ROR || status & SSSR_TUR) {
+		dev_err(dev, "--- SPI ROR or TUR occurred : SSSR=%x\n",	status);
+		WARN_ON(1);
+		if (status & SSSR_ROR)
+			dev_err(dev, "we have Overrun\n");
+		if (status & SSSR_TUR)
+			dev_err(dev, "we have Underrun\n");
+	}
+
+	/* We can fall here when not using DMA mode */
+	if (!sspc->cur_msg) {
+		disable_interface(sspc);
+		disable_triggers(sspc);
+	}
+	/* clear status register */
+	write_SSSR(sspc->clear_sr, reg);
+	return IRQ_HANDLED;
+}
+
+static void poll_writer(struct work_struct *work)
+{
+	struct ssp_drv_context *sspc =
+		container_of(work, struct ssp_drv_context, poll_write);
+	struct device *dev = &sspc->pdev->dev;
+	size_t pkg_len = sspc->len;
+	int ret;
+
+	while ((pkg_len > 0)) {
+		ret = sspc->write(sspc);
+		pkg_len -= ret;
+	}
+}
+
+/*
+ * Perform a single transfer.
+ */
+static void poll_transfer(unsigned long data)
+{
+	struct ssp_drv_context *sspc = (void *)data;
+
+	while (!sspc->read(sspc))
+		cpu_relax();
+
+	poll_transfer_complete(sspc);
+}
+
+/**
+ * start_bitbanging() - Clock synchronization by bit banging
+ * @sspc:	Pointer to private driver context
+ *
+ * This clock synchronization will be removed as soon as it is
+ * handled by the SCU.
+ */
+static void start_bitbanging(struct ssp_drv_context *sspc)
+{
+	u32 sssr;
+	u32 count = 0;
+	u32 cr0;
+	void *i2c_reg = sspc->I2C_ioaddr;
+	struct device *dev = &sspc->pdev->dev;
+	void *reg = sspc->ioaddr;
+	struct chip_data *chip = spi_get_ctldata(sspc->cur_msg->spi);
+	cr0 = chip->cr0;
+
+	dev_warn(dev, "In %s : Starting bit banging\n",
+		__func__);
+	if (read_SSSR(reg) & SSP_NOT_SYNC)
+		dev_warn(dev, "SSP clock desynchronized.\n");
+	if (!(read_SSCR0(reg) & SSCR0_SSE))
+		dev_warn(dev, "in SSCR0, SSP disabled.\n");
+
+	dev_dbg(dev, "SSP not ready, start CLK sync\n");
+
+	write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+	write_SSPSP(0x02010007, reg);
+
+	write_SSTO(chip->timeout, reg);
+	write_SSCR0(cr0, reg);
+
+	/*
+	*  This routine uses the DFx block to override the SSP inputs
+	*  and outputs allowing us to bit bang SSPSCLK. On Langwell,
+	*  we have to generate the clock to clear busy.
+	*/
+	write_I2CDATA(0x3, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070034, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CDATA(0x00000099, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	sssr = read_SSSR(reg);
+
+	/* Bit bang the clock until CSS clears */
+	while ((sssr & 0x400000) && (count < MAX_BITBANGING_LOOP)) {
+		write_I2CDATA(0x2, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CDATA(0x3, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		write_I2CCTRL(0x01070034, i2c_reg);
+		udelay(I2C_ACCESS_USDELAY);
+		sssr = read_SSSR(reg);
+		count++;
+	}
+	if (count >= MAX_BITBANGING_LOOP)
+		dev_err(dev, "ERROR in %s : infinite loop on bit banging. Aborting\n",
+								__func__);
+
+	dev_dbg(dev, "---Bit bang count=%d\n", count);
+
+	write_I2CDATA(0x0, i2c_reg);
+	udelay(I2C_ACCESS_USDELAY);
+	write_I2CCTRL(0x01070038, i2c_reg);
+}
+
+static unsigned int ssp_get_clk_div(struct ssp_drv_context *sspc, int speed)
+{
+	if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+		/* The clock divider shall stay between 0 and 4095. */
+		return clamp(25000000 / speed - 1, 0, 4095);
+	else
+		return clamp(100000000 / speed - 1, 3, 4095);
+}
+
+
+static int ssp_get_speed(struct ssp_drv_context *sspc, int clk_div)
+{
+	if (sspc->quirks & QUIRKS_PLATFORM_MRFL)
+		return 25000000 / (clk_div + 1);
+	else
+		return 100000000 / (clk_div + 1);
+}
+
+/**
+ * transfer() - Start a SPI transfer
+ * @spi:	Pointer to the spi_device struct
+ * @msg:	Pointer to the spi_message struct
+ */
+static int transfer(struct spi_device *spi, struct spi_message *msg)
+{
+	struct ssp_drv_context *sspc = spi_master_get_devdata(spi->master);
+	unsigned long flags;
+
+	msg->actual_length = 0;
+	msg->status = -EINPROGRESS;
+	spin_lock_irqsave(&sspc->lock, flags);
+	list_add_tail(&msg->queue, &sspc->queue);
+	if (!sspc->suspended)
+		queue_work(sspc->workqueue, &sspc->pump_messages);
+	spin_unlock_irqrestore(&sspc->lock, flags);
+
+	return 0;
+}
+
+static int handle_message(struct ssp_drv_context *sspc)
+{
+	struct chip_data *chip = NULL;
+	struct spi_transfer *transfer = NULL;
+	void *reg = sspc->ioaddr;
+	u32 cr0, saved_cr0, cr1, saved_cr1;
+	struct device *dev = &sspc->pdev->dev;
+	struct spi_message *msg = sspc->cur_msg;
+	u32 clk_div, saved_speed_hz, speed_hz;
+	u8 dma_enabled;
+	u32 timeout;
+	u8 chip_select;
+	u32 mask = 0;
+	int bits_per_word, saved_bits_per_word;
+	unsigned long flags;
+
+	chip = spi_get_ctldata(msg->spi);
+
+	/* get every chip data we need to handle atomically the full message */
+	spin_lock_irqsave(&sspc->lock, flags);
+	saved_cr0 = chip->cr0;
+	saved_cr1 = chip->cr1;
+	saved_bits_per_word = msg->spi->bits_per_word;
+	saved_speed_hz = chip->speed_hz;
+	sspc->cs_control = chip->cs_control;
+	timeout = chip->timeout;
+	chip_select = chip->chip_select;
+	dma_enabled = chip->dma_enabled;
+	spin_unlock_irqrestore(&sspc->lock, flags);
+
+	/* multiple transfers must be serialized and this complete() is needed in order
+        to be able to process the transfers list in a generic way */
+	complete(&sspc->msg_done);
+
+	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+		wait_for_completion(&sspc->msg_done);
+		INIT_COMPLETION(sspc->msg_done);
+
+		/* Check transfer length */
+		if (unlikely((transfer->len > MAX_SPI_TRANSFER_SIZE) ||
+					(transfer->len == 0))) {
+			dev_warn(dev, "transfer length null or greater than %d\n",
+					MAX_SPI_TRANSFER_SIZE);
+			dev_warn(dev, "length = %d\n", transfer->len);
+			msg->status = -EINVAL;
+
+			if (msg->complete)
+				msg->complete(msg->context);
+			complete(&sspc->msg_done);
+			return 0;
+		}
+
+		/* If the bits_per_word field in non-zero in the spi_transfer provided
+		 * by the user-space, consider this value. Otherwise consider the
+		 * default bits_per_word field from the spi setting. */
+		if (transfer->bits_per_word) {
+			bits_per_word = transfer->bits_per_word;
+			cr0 = saved_cr0;
+			cr0 &= ~(SSCR0_EDSS | SSCR0_DSS);
+			cr0 |= SSCR0_DataSize(bits_per_word > 16 ?
+					bits_per_word - 16 : bits_per_word)
+				| (bits_per_word > 16 ? SSCR0_EDSS : 0);
+		} else {
+			/* Use default value. */
+			bits_per_word = saved_bits_per_word;
+			cr0 = saved_cr0;
+		}
+
+		if ((bits_per_word < MIN_BITS_PER_WORD
+					|| bits_per_word > MAX_BITS_PER_WORD)) {
+			dev_warn(dev, "invalid wordsize\n");
+			msg->status = -EINVAL;
+			if (msg->complete)
+				msg->complete(msg->context);
+			complete(&sspc->msg_done);
+			return 0;
+		}
+
+		/* Check message length and bit per words consistency */
+		if (bits_per_word <= 8)
+			mask = 0;
+		else if (bits_per_word <= 16)
+			mask = 1;
+		else if (bits_per_word <= 32)
+			mask = 3;
+
+		if (transfer->len & mask) {
+			dev_warn(dev,
+					"message rejected : data length %d not multiple of %d "
+					"while in %d bits mode\n",
+					transfer->len,
+					mask + 1,
+					(mask == 1) ? 16 : 32);
+			msg->status = -EINVAL;
+			if (msg->complete)
+				msg->complete(msg->context);
+			complete(&sspc->msg_done);
+			return 0;
+		}
+
+		/* Flush any remaining data (in case of failed previous transfer) */
+		flush(sspc);
+
+		dev_dbg(dev, "%d bits/word, mode %d\n",
+				bits_per_word, msg->spi->mode & 0x3);
+		if (bits_per_word <= 8) {
+			sspc->n_bytes = 1;
+			sspc->read = u8_reader;
+			sspc->write = u8_writer;
+		} else if (bits_per_word <= 16) {
+			sspc->n_bytes = 2;
+			sspc->read = u16_reader;
+			sspc->write = u16_writer;
+		} else if (bits_per_word <= 32) {
+			if (!ssp_timing_wr)
+				cr0 |= SSCR0_EDSS;
+			sspc->n_bytes = 4;
+			sspc->read = u32_reader;
+			sspc->write = u32_writer;
+		}
+
+		sspc->tx  = (void *)transfer->tx_buf;
+		sspc->rx  = (void *)transfer->rx_buf;
+		sspc->len = transfer->len;
+		sspc->cs_control = chip->cs_control;
+		sspc->cs_change = transfer->cs_change;
+
+		if (likely(chip->dma_enabled)) {
+			sspc->dma_mapped = map_dma_buffers(sspc);
+			if (unlikely(!sspc->dma_mapped))
+				return 0;
+		}
+
+		sspc->write = sspc->tx ? sspc->write : null_writer;
+		sspc->read  = sspc->rx ? sspc->read : null_reader;
+
+		sspc->tx_end = sspc->tx + transfer->len;
+		sspc->rx_end = sspc->rx + transfer->len;
+
+		/* [REVERT ME] Bug in status register clear for Tangier simulation */
+		if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) ||
+				(intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)) {
+			if ((intel_mid_identify_sim() != INTEL_MID_CPU_SIMULATION_VP &&
+						(intel_mid_identify_sim() != INTEL_MID_CPU_SIMULATION_HVP)))
+				write_SSSR(sspc->clear_sr, reg);
+		} else /* Clear status  */
+			write_SSSR(sspc->clear_sr, reg);
+
+		/* setup the CR1 control register */
+		cr1 = saved_cr1 | sspc->cr1_sig;
+
+		if (likely(sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)) {
+			/* in case of len smaller than burst size, adjust the RX     */
+			/* threshold. All other cases will use the default threshold */
+			/* value. The RX fifo threshold must be aligned with the DMA */
+			/* RX transfer size, which may be limited to a multiple of 4 */
+			/* bytes due to 32bits DDR access.                           */
+			if  (sspc->len / sspc->n_bytes < sspc->rx_fifo_threshold) {
+				u32 rx_fifo_threshold;
+
+				rx_fifo_threshold = (sspc->len & ~(4 - 1)) /
+					sspc->n_bytes;
+				cr1 &= ~(SSCR1_RFT);
+				cr1 |= SSCR1_RxTresh(rx_fifo_threshold) & SSCR1_RFT;
+			} else
+				write_SSTO(timeout, reg);
+		}
+		dev_dbg(dev, "transfer len:%d  n_bytes:%d  cr0:%x  cr1:%x",
+				sspc->len, sspc->n_bytes, cr0, cr1);
+
+		/* first set CR1 */
+		write_SSCR1(cr1, reg);
+
+		if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+			write_SSFS((1 << chip_select), reg);
+
+		/* recalculate the frequency for each transfer */
+		if (transfer->speed_hz)
+			speed_hz = transfer->speed_hz;
+		else
+			speed_hz = saved_speed_hz;
+
+		clk_div = ssp_get_clk_div(sspc, speed_hz);
+
+		cr0 &= ~SSCR0_SCR;
+		cr0 |= (clk_div & 0xFFF) << 8;
+
+		/* Do bitbanging only if SSP not-enabled or not-synchronized */
+		if (unlikely(((read_SSSR(reg) & SSP_NOT_SYNC) ||
+						(!(read_SSCR0(reg) & SSCR0_SSE))) &&
+					(sspc->quirks & QUIRKS_BIT_BANGING))) {
+			start_bitbanging(sspc);
+		} else {
+
+			/* if speed is higher than 6.25Mhz, enable clock delay */
+			if (speed_hz > 6250000)
+				write_SSCR2((read_SSCR2(reg) | SSCR2_CLK_DEL_EN), reg);
+			else
+				write_SSCR2((read_SSCR2(reg) & ~SSCR2_CLK_DEL_EN), reg);
+
+			/* (re)start the SSP */
+			if (ssp_timing_wr) {
+				dev_dbg(dev, "original cr0 before reset:%x",
+						cr0);
+				/*we should not disable TUM and RIM interrup*/
+				write_SSCR0(0x0000000F, reg);
+				cr0 &= ~(SSCR0_SSE);
+				dev_dbg(dev, "reset ssp:cr0:%x", cr0);
+				write_SSCR0(cr0, reg);
+				cr0 |= SSCR0_SSE;
+				dev_dbg(dev, "reset ssp:cr0:%x", cr0);
+				write_SSCR0(cr0, reg);
+			} else
+				write_SSCR0(cr0, reg);
+		}
+
+		if (sspc->cs_control)
+			sspc->cs_control(sspc->cs_assert);
+
+		if (likely(dma_enabled)) {
+			if (unlikely(sspc->quirks & QUIRKS_USE_PM_QOS))
+				pm_qos_update_request(&sspc->pm_qos_req,
+						MIN_EXIT_LATENCY);
+			dma_transfer(sspc);
+		} else {
+			/* Do the transfer syncronously */
+			queue_work(sspc->wq_poll_write, &sspc->poll_write);
+			poll_transfer((unsigned long)sspc);
+			unmap_dma_buffers(sspc);
+			complete(&sspc->msg_done);
+		}
+
+		if (list_is_last(&transfer->transfer_list, &msg->transfers)
+				|| sspc->cs_change) {
+			if (sspc->cs_control)
+				sspc->cs_control(!sspc->cs_assert);
+		}
+
+	} /* end of list_for_each_entry */
+
+	wait_for_completion(&sspc->msg_done);
+
+	/* Now we are done with this entire message */
+	if (likely(msg->complete))
+		msg->complete(msg->context);
+
+	return 0;
+}
+
+static void pump_messages(struct work_struct *work)
+{
+	struct ssp_drv_context *sspc =
+		container_of(work, struct ssp_drv_context, pump_messages);
+	struct device *dev = &sspc->pdev->dev;
+	unsigned long flags;
+	struct spi_message *msg;
+
+	pm_runtime_get_sync(dev);
+	spin_lock_irqsave(&sspc->lock, flags);
+	while (!list_empty(&sspc->queue)) {
+		if (sspc->suspended)
+			break;
+		msg = list_entry(sspc->queue.next, struct spi_message, queue);
+		list_del_init(&msg->queue);
+		sspc->cur_msg = msg;
+		spin_unlock_irqrestore(&sspc->lock, flags);
+		handle_message(sspc);
+		spin_lock_irqsave(&sspc->lock, flags);
+		sspc->cur_msg = NULL;
+	}
+	spin_unlock_irqrestore(&sspc->lock, flags);
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+}
+
+/**
+ * setup() - Driver setup procedure
+ * @spi:	Pointeur to the spi_device struct
+ */
+static int setup(struct spi_device *spi)
+{
+	struct intel_mid_ssp_spi_chip *chip_info = NULL;
+	struct chip_data *chip, *alloc_chip = NULL;
+	struct ssp_drv_context *sspc =
+		spi_master_get_devdata(spi->master);
+	u32 tx_fifo_threshold;
+	u32 burst_size;
+	u32 clk_div;
+	static u32 one_time_setup = 1;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&sspc->lock, flags);
+	if (!spi->bits_per_word)
+		spi->bits_per_word = DFLT_BITS_PER_WORD;
+
+	if ((spi->bits_per_word != 8) && (spi->bits_per_word != 16)
+		&& (spi->bits_per_word != 32)) {
+		spin_unlock_irqrestore(&sspc->lock, flags);
+		dev_warn(&spi->dev, "invalid wordsize, system only support 8,16,32 bits per word.\n");
+		return -EINVAL;
+	}
+
+	chip = spi_get_ctldata(spi);
+	if (!chip) {
+		spin_unlock_irqrestore(&sspc->lock, flags);
+		alloc_chip = chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+		spin_lock_irqsave(&sspc->lock, flags);
+		if (!chip) {
+			dev_err(&spi->dev,
+			"failed setup: can't allocate chip data\n");
+			ret = -ENOMEM;
+			goto exit_setup;
+		}
+
+		if (spi_get_ctldata(spi)) {
+			dev_err(&spi->dev, "failed setup: already executed\n");
+			ret = -EAGAIN;
+			goto exit_setup;
+		}
+	}
+
+	chip->cr0 = SSCR0_Motorola | SSCR0_DataSize(spi->bits_per_word > 16 ?
+		spi->bits_per_word - 16 : spi->bits_per_word)
+			| SSCR0_SSE
+			| (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
+
+	/* protocol drivers may change the chip settings, so...  */
+	/* if chip_info exists, use it                           */
+	chip_info = spi->controller_data;
+
+	/* chip_info isn't always needed */
+	chip->cr1 = 0;
+	if (chip_info) {
+		/* If user requested CS Active High need to verify that there
+		 * is no transfer pending. If this is the case, kindly fail.  */
+		if ((spi->mode & SPI_CS_HIGH) != sspc->cs_assert) {
+			if (sspc->cur_msg) {
+				dev_err(&spi->dev, "message pending... Failing\n");
+				/* A message is currently in transfer. Do not toggle CS */
+				ret = -EAGAIN;
+				goto exit_setup;
+			}
+			if (!chip_info->cs_control) {
+				/* unable to control cs by hand */
+				dev_err(&spi->dev,
+						"This CS does not support SPI_CS_HIGH flag\n");
+				ret = -EINVAL;
+				goto exit_setup;
+			}
+			sspc->cs_assert = spi->mode & SPI_CS_HIGH;
+			chip_info->cs_control(!sspc->cs_assert);
+		}
+
+		burst_size = chip_info->burst_size;
+		if (burst_size > IMSS_FIFO_BURST_8)
+			burst_size = DFLT_FIFO_BURST_SIZE;
+
+		chip->timeout = chip_info->timeout;
+
+		if (chip_info->enable_loopback)
+			chip->cr1 |= SSCR1_LBM;
+
+		chip->dma_enabled = chip_info->dma_enabled;
+		chip->cs_control = chip_info->cs_control;
+
+		/* Request platform-specific gpio and pinmux here since
+		 * it is not possible to get the intel_mid_ssp_spi_chip
+		 * structure in probe */
+		if (one_time_setup && !chip_info->dma_enabled
+				&& chip_info->platform_pinmux) {
+			chip_info->platform_pinmux();
+			one_time_setup = 0;
+		}
+
+	} else {
+		/* if no chip_info provided by protocol driver, */
+		/* set default values                           */
+		dev_info(&spi->dev, "setting default chip values\n");
+
+		burst_size = DFLT_FIFO_BURST_SIZE;
+		chip->dma_enabled = 1;
+		if (sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			chip->timeout = 0;
+		else
+			chip->timeout = DFLT_TIMEOUT_VAL;
+	}
+	/* Set FIFO thresholds according to burst_size */
+	if (burst_size == IMSS_FIFO_BURST_8)
+		sspc->rx_fifo_threshold = 8;
+	else if (burst_size == IMSS_FIFO_BURST_4)
+		sspc->rx_fifo_threshold = 4;
+	else
+		sspc->rx_fifo_threshold = 1;
+	/* FIXME: This is a workaround. */
+	/* When speed is lower than 800KHz, the transfer data will be */
+	/* incorrect on MRFL by DMA method*/
+	if (sspc->quirks & QUIRKS_PLATFORM_MRFL && chip->dma_enabled
+			&& (spi->max_speed_hz < 800000))
+		sspc->rx_fifo_threshold = 1;
+	tx_fifo_threshold = SPI_FIFO_SIZE - sspc->rx_fifo_threshold;
+	chip->cr1 |= (SSCR1_RxTresh(sspc->rx_fifo_threshold) &
+		SSCR1_RFT) | (SSCR1_TxTresh(tx_fifo_threshold) & SSCR1_TFT);
+
+	sspc->dma_mapped = 0;
+
+	/* setting phase and polarity. spi->mode comes from boardinfo */
+	if ((spi->mode & SPI_CPHA) != 0)
+		chip->cr1 |= SSCR1_SPH;
+	if ((spi->mode & SPI_CPOL) != 0)
+		chip->cr1 |= SSCR1_SPO;
+
+	if (sspc->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE)
+		/* set slave mode */
+		chip->cr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
+	chip->cr1 |= SSCR1_SCFR;        /* clock is not free running */
+
+	if (spi->bits_per_word <= 8) {
+		chip->n_bytes = 1;
+	} else if (spi->bits_per_word <= 16) {
+		chip->n_bytes = 2;
+	} else if (spi->bits_per_word <= 32) {
+		chip->n_bytes = 4;
+	} else {
+		dev_err(&spi->dev, "invalid wordsize\n");
+		ret = -EINVAL;
+		goto exit_setup;
+	}
+
+	if ((sspc->quirks & QUIRKS_SPI_SLAVE_CLOCK_MODE) == 0) {
+		clk_div = ssp_get_clk_div(sspc, spi->max_speed_hz);
+		chip->cr0 |= (clk_div & 0xFFF) << 8;
+		spi->max_speed_hz = ssp_get_speed(sspc, clk_div);
+		chip->speed_hz = spi->max_speed_hz;
+		dev_dbg(&spi->dev, "spi->max_speed_hz:%d clk_div:%x cr0:%x",
+			spi->max_speed_hz, clk_div, chip->cr0);
+	}
+	chip->bits_per_word = spi->bits_per_word;
+	chip->chip_select = spi->chip_select;
+
+	alloc_chip = NULL;
+	spi_set_ctldata(spi, chip);
+
+	/* setup of sspc members that will not change across transfers */
+
+	if (chip->dma_enabled) {
+		sspc->n_bytes = chip->n_bytes;
+		spin_unlock_irqrestore(&sspc->lock, flags);
+		intel_mid_ssp_spi_dma_init(sspc);
+		spin_lock_irqsave(&sspc->lock, flags);
+		sspc->cr1_sig = SSCR1_TSRE | SSCR1_RSRE;
+		sspc->mask_sr = SSSR_ROR | SSSR_TUR;
+		if (sspc->quirks & QUIRKS_DMA_USE_NO_TRAIL)
+			sspc->cr1_sig |= SSCR1_TRAIL;
+	} else {
+		sspc->cr1_sig = SSCR1_TINTE;
+		sspc->mask_sr = SSSR_ROR | SSSR_TUR | SSSR_TINT;
+	}
+	sspc->clear_sr = SSSR_TUR | SSSR_ROR | SSSR_TINT;
+
+exit_setup:
+	if (alloc_chip)
+		kfree(alloc_chip);
+
+	spin_unlock_irqrestore(&sspc->lock, flags);
+	return ret;
+}
+
+/**
+ * cleanup() - Driver cleanup procedure
+ * @spi:	Pointer to the spi_device struct
+ */
+static void cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+	struct ssp_drv_context *sspc =
+		spi_master_get_devdata(spi->master);
+
+	if (sspc->dma_initialized)
+		intel_mid_ssp_spi_dma_exit(sspc);
+
+	/* Remove the PM_QOS request */
+	if (sspc->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_remove_request(&sspc->pm_qos_req);
+
+	kfree(chip);
+	spi_set_ctldata(spi, NULL);
+}
+
+/**
+ * intel_mid_ssp_spi_probe() - Driver probe procedure
+ * @pdev:	Pointer to the pci_dev struct
+ * @ent:	Pointer to the pci_device_id struct
+ */
+static int intel_mid_ssp_spi_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct spi_master *master;
+	struct ssp_drv_context *sspc = 0;
+	int status;
+	u32 iolen = 0;
+	u8 ssp_cfg;
+	int pos;
+	void __iomem *syscfg_ioaddr;
+	unsigned long syscfg;
+
+	/* Check if the SSP we are probed for has been allocated */
+	/* to operate as SPI. This information is retreived from */
+	/* the field adid of the Vendor-Specific PCI capability  */
+	/* which is used as a configuration register.            */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+	if (pos > 0) {
+		pci_read_config_byte(pdev,
+			pos + VNDR_CAPABILITY_ADID_OFFSET,
+			&ssp_cfg);
+	} else {
+		dev_info(dev, "No Vendor Specific PCI capability\n");
+		goto err_abort_probe;
+	}
+
+	if (ssp_cfg_get_mode(ssp_cfg) != SSP_CFG_SPI_MODE_ID) {
+		dev_info(dev, "Unsupported SSP mode (%02xh)\n", ssp_cfg);
+		goto err_abort_probe;
+	}
+
+	dev_info(dev, "found PCI SSP controller (ID: %04xh:%04xh cfg: %02xh)\n",
+		pdev->vendor, pdev->device, ssp_cfg);
+
+	status = pci_enable_device(pdev);
+	if (status)
+		return status;
+
+	/* Allocate Slave with space for sspc and null dma buffer */
+	master = spi_alloc_master(dev, sizeof(struct ssp_drv_context));
+
+	if (!master) {
+		dev_err(dev, "cannot alloc spi_slave\n");
+		status = -ENOMEM;
+		goto err_free_0;
+	}
+
+	sspc = spi_master_get_devdata(master);
+	sspc->master = master;
+
+	sspc->pdev = pdev;
+	sspc->quirks = ent->driver_data;
+
+	/* Set platform & configuration quirks */
+	if (sspc->quirks & QUIRKS_PLATFORM_MRST) {
+		/* Apply bit banging workarround on MRST */
+		sspc->quirks |= QUIRKS_BIT_BANGING;
+		/* MRST slave mode workarrounds */
+		if (ssp_cfg_is_spi_slave(ssp_cfg))
+			sspc->quirks |= QUIRKS_USE_PM_QOS |
+					QUIRKS_SRAM_ADDITIONAL_CPY;
+	}
+	sspc->quirks |= QUIRKS_DMA_USE_NO_TRAIL;
+	if (ssp_cfg_is_spi_slave(ssp_cfg))
+		sspc->quirks |= QUIRKS_SPI_SLAVE_CLOCK_MODE;
+
+	master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+	master->bus_num = ssp_cfg_get_spi_bus_nb(ssp_cfg);
+	master->num_chipselect = 4;
+	master->cleanup = cleanup;
+	master->setup = setup;
+	master->transfer = transfer;
+	sspc->dma_wq = create_workqueue("intel_mid_ssp_spi");
+	INIT_WORK(&sspc->complete_work, int_transfer_complete_work);
+
+	sspc->dma_initialized = 0;
+	sspc->suspended = 0;
+	sspc->cur_msg = NULL;
+
+	/* get basic io resource and map it */
+	sspc->paddr = pci_resource_start(pdev, 0);
+	iolen = pci_resource_len(pdev, 0);
+
+	status = pci_request_region(pdev, 0, dev_name(&pdev->dev));
+	if (status)
+		goto err_free_1;
+
+	sspc->ioaddr = ioremap_nocache(sspc->paddr, iolen);
+	if (!sspc->ioaddr) {
+		status = -ENOMEM;
+		goto err_free_2;
+	}
+	dev_dbg(dev, "paddr = : %08lx", sspc->paddr);
+	dev_dbg(dev, "ioaddr = : %p\n", sspc->ioaddr);
+	dev_dbg(dev, "attaching to IRQ: %04x\n", pdev->irq);
+	dev_dbg(dev, "quirks = : %08lx\n", sspc->quirks);
+
+	if (sspc->quirks & QUIRKS_BIT_BANGING) {
+		/* Bit banging on the clock is done through */
+		/* DFT which is available through I2C.      */
+		/* get base address of I2C_Serbus registers */
+		sspc->I2C_paddr = 0xff12b000;
+		sspc->I2C_ioaddr = ioremap_nocache(sspc->I2C_paddr, 0x10);
+		if (!sspc->I2C_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_3;
+		}
+	}
+
+	/* Attach to IRQ */
+	sspc->irq = pdev->irq;
+	status = request_irq(sspc->irq, ssp_int, IRQF_SHARED,
+		"intel_mid_ssp_spi", sspc);
+
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
+		if ((intel_mid_identify_sim() ==
+				INTEL_MID_CPU_SIMULATION_SLE) ||
+		    (intel_mid_identify_sim() ==
+				INTEL_MID_CPU_SIMULATION_NONE)) {
+			/* [REVERT ME] Tangier SLE not supported.
+			 * Requires debug before removal.  Assume
+			 * also required in Si. */
+			disable_irq_nosync(sspc->irq);
+		}
+		if (intel_mid_identify_sim() == INTEL_MID_CPU_SIMULATION_NONE)
+			ssp_timing_wr = 1;
+	}
+
+	if (status < 0) {
+		dev_err(&pdev->dev, "can not get IRQ\n");
+		goto err_free_4;
+	}
+
+	if (sspc->quirks & QUIRKS_PLATFORM_MDFL) {
+		/* get base address of DMA selector. */
+		syscfg = sspc->paddr - SYSCFG;
+		syscfg_ioaddr = ioremap_nocache(syscfg, 0x10);
+		if (!syscfg_ioaddr) {
+			status = -ENOMEM;
+			goto err_free_5;
+		}
+		iowrite32(ioread32(syscfg_ioaddr) | 2, syscfg_ioaddr);
+	}
+
+	INIT_LIST_HEAD(&sspc->queue);
+	init_completion(&sspc->msg_done);
+	spin_lock_init(&sspc->lock);
+	INIT_WORK(&sspc->pump_messages, pump_messages);
+	sspc->workqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
+
+	INIT_WORK(&sspc->poll_write, poll_writer);
+	sspc->wq_poll_write = create_singlethread_workqueue("spi_poll_wr");
+
+	/* Register with the SPI framework */
+	dev_info(dev, "register with SPI framework (bus spi%d)\n",
+			master->bus_num);
+
+	status = spi_register_master(master);
+	if (status) {
+		dev_err(dev, "problem registering spi\n");
+		goto err_free_5;
+	}
+
+	pci_set_drvdata(pdev, sspc);
+
+	/* Create the PM_QOS request */
+	if (sspc->quirks & QUIRKS_USE_PM_QOS)
+		pm_qos_add_request(&sspc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+				PM_QOS_DEFAULT_VALUE);
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 25);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+	if (!pm_runtime_enabled(&pdev->dev))
+		dev_err(&pdev->dev, "spi runtime pm not enabled!\n");
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	return status;
+
+err_free_5:
+	free_irq(sspc->irq, sspc);
+err_free_4:
+	iounmap(sspc->I2C_ioaddr);
+err_free_3:
+	iounmap(sspc->ioaddr);
+err_free_2:
+	pci_release_region(pdev, 0);
+err_free_1:
+	spi_master_put(master);
+err_free_0:
+	pci_disable_device(pdev);
+
+	return status;
+err_abort_probe:
+	dev_info(dev, "Abort probe for SSP %04xh:%04xh\n",
+		pdev->vendor, pdev->device);
+	return -ENODEV;
+}
+
+/**
+ * intel_mid_ssp_spi_remove() - driver remove procedure
+ * @pdev:	Pointer to the pci_dev struct
+ */
+static void intel_mid_ssp_spi_remove(struct pci_dev *pdev)
+{
+	struct ssp_drv_context *sspc = pci_get_drvdata(pdev);
+
+	if (!sspc)
+		return;
+
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+
+	if (sspc->dma_wq)
+		destroy_workqueue(sspc->dma_wq);
+	if (sspc->workqueue)
+		destroy_workqueue(sspc->workqueue);
+
+	/* Release IRQ */
+	free_irq(sspc->irq, sspc);
+
+	if (sspc->ioaddr)
+		iounmap(sspc->ioaddr);
+	if (sspc->quirks & QUIRKS_BIT_BANGING && sspc->I2C_ioaddr)
+		iounmap(sspc->I2C_ioaddr);
+
+	/* disconnect from the SPI framework */
+	if (sspc->master)
+		spi_unregister_master(sspc->master);
+
+	pci_set_drvdata(pdev, NULL);
+	pci_release_region(pdev, 0);
+	pci_disable_device(pdev);
+
+	return;
+}
+
+#ifdef CONFIG_PM
+static int intel_mid_ssp_spi_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ssp_drv_context *sspc = pci_get_drvdata(pdev);
+	unsigned long flags;
+	int loop = 26;
+
+	dev_dbg(dev, "suspend\n");
+
+	spin_lock_irqsave(&sspc->lock, flags);
+	sspc->suspended = 1;
+	/*
+	 * If there is one msg being handled, wait 500ms at most,
+	 * if still not done, return busy
+	 */
+	while (sspc->cur_msg && --loop) {
+		spin_unlock_irqrestore(&sspc->lock, flags);
+		msleep(20);
+		spin_lock_irqsave(&sspc->lock, flags);
+		if (!loop)
+			sspc->suspended = 0;
+	}
+	spin_unlock_irqrestore(&sspc->lock, flags);
+
+	if (loop)
+		return 0;
+	else
+		return -EBUSY;
+}
+
+static int intel_mid_ssp_spi_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct ssp_drv_context *sspc = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "resume\n");
+	spin_lock(&sspc->lock);
+	sspc->suspended = 0;
+	if (!list_empty(&sspc->queue))
+		queue_work(sspc->workqueue, &sspc->pump_messages);
+	spin_unlock(&sspc->lock);
+	return 0;
+}
+
+static int intel_mid_ssp_spi_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "runtime suspend called\n");
+	return 0;
+}
+
+static int intel_mid_ssp_spi_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "runtime resume called\n");
+	return 0;
+}
+
+static int intel_mid_ssp_spi_runtime_idle(struct device *dev)
+{
+	int err;
+
+	dev_dbg(dev, "runtime idle called\n");
+	if (system_state == SYSTEM_BOOTING)
+		/* if SSP SPI UART is set as default console and earlyprintk
+		 * is enabled, it cannot shutdown SSP controller during booting.
+		 */
+		err = pm_schedule_suspend(dev, 30000);
+	else
+		err = pm_schedule_suspend(dev, 500);
+
+	return err;
+}
+#else
+#define intel_mid_ssp_spi_suspend NULL
+#define intel_mid_ssp_spi_resume NULL
+#define intel_mid_ssp_spi_runtime_suspend NULL
+#define intel_mid_ssp_spi_runtime_resume NULL
+#define intel_mid_ssp_spi_runtime_idle NULL
+#endif /* CONFIG_PM */
+
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	/* MRST SSP0 */
+	{ PCI_VDEVICE(INTEL, 0x0815), QUIRKS_PLATFORM_MRST},
+	/* MDFL SSP0 */
+	{ PCI_VDEVICE(INTEL, 0x0832), QUIRKS_PLATFORM_MDFL},
+	/* MDFL SSP1 */
+	{ PCI_VDEVICE(INTEL, 0x0825), QUIRKS_PLATFORM_MDFL},
+	/* MDFL SSP3 */
+	{ PCI_VDEVICE(INTEL, 0x0816), QUIRKS_PLATFORM_MDFL},
+	/* MRFL SSP5 */
+	{ PCI_VDEVICE(INTEL, 0x1194), QUIRKS_PLATFORM_MRFL},
+	/* BYT SSP3 */
+	{ PCI_VDEVICE(INTEL, 0x0f0e), QUIRKS_PLATFORM_BYT},
+	{},
+};
+
+static const struct dev_pm_ops intel_mid_ssp_spi_pm_ops = {
+	.suspend = intel_mid_ssp_spi_suspend,
+	.resume = intel_mid_ssp_spi_resume,
+	.runtime_suspend = intel_mid_ssp_spi_runtime_suspend,
+	.runtime_resume = intel_mid_ssp_spi_runtime_resume,
+	.runtime_idle = intel_mid_ssp_spi_runtime_idle,
+};
+
+static struct pci_driver intel_mid_ssp_spi_driver = {
+	.name =		DRIVER_NAME,
+	.id_table =	pci_ids,
+	.probe =	intel_mid_ssp_spi_probe,
+	.remove =	intel_mid_ssp_spi_remove,
+	.driver =	{
+		.pm	= &intel_mid_ssp_spi_pm_ops,
+	},
+};
+
+static int __init intel_mid_ssp_spi_init(void)
+{
+	return pci_register_driver(&intel_mid_ssp_spi_driver);
+}
+
+late_initcall(intel_mid_ssp_spi_init);
+
+static void __exit intel_mid_ssp_spi_exit(void)
+{
+	pci_unregister_driver(&intel_mid_ssp_spi_driver);
+}
+
+module_exit(intel_mid_ssp_spi_exit);
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 1389fef..631445d 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -22,7 +22,6 @@
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
-#include <linux/types.h>
 
 #include "spi-dw.h"
 
@@ -55,6 +54,8 @@
 	dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
 	if (!dws->dmac)
 		dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+	if (!dws->dmac)
+		dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x08EF, NULL);
 
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
@@ -117,8 +118,11 @@
 {
 	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
 	struct dma_chan *txchan, *rxchan;
-	struct dma_slave_config txconf, rxconf;
+	struct dma_slave_config *txconf, *rxconf;
 	u16 dma_ctrl = 0;
+	enum dma_ctrl_flags flag;
+	struct device *dev = &dws->master->dev;
+	struct intel_mid_dma_slave *rxs, *txs;
 
 	/* 1. setup DMA related registers */
 	if (cs_change) {
@@ -137,51 +141,65 @@
 	txchan = dws->txchan;
 	rxchan = dws->rxchan;
 
+	txs = txchan->private;
+	rxs = rxchan->private;
+
+	txconf = &txs->dma_slave;
+	rxconf = &rxs->dma_slave;
+
+	flag = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_CTRL_ACK;
+
 	/* 2. Prepare the TX dma transfer */
-	txconf.direction = DMA_MEM_TO_DEV;
-	txconf.dst_addr = dws->dma_addr;
-	txconf.dst_maxburst = LNW_DMA_MSIZE_16;
-	txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	txconf.dst_addr_width = dws->dma_width;
-	txconf.device_fc = false;
+	txconf->direction = DMA_MEM_TO_DEV;
+	txconf->dst_addr = dws->dma_addr;
+	txconf->src_maxburst = LNW_DMA_MSIZE_16;
+	txconf->dst_maxburst = LNW_DMA_MSIZE_16;
+	txconf->src_addr_width = dws->dma_width;
+	txconf->dst_addr_width = dws->dma_width;
+	txconf->device_fc = false;
 
 	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
-				       (unsigned long) &txconf);
+				       (unsigned long) txconf);
 
-	memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
-	dws->tx_sgl.dma_address = dws->tx_dma;
-	dws->tx_sgl.length = dws->len;
-
-	txdesc = dmaengine_prep_slave_sg(txchan,
-				&dws->tx_sgl,
-				1,
-				DMA_MEM_TO_DEV,
-				DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
-	txdesc->callback = dw_spi_dma_done;
-	txdesc->callback_param = dws;
+	txdesc = txchan->device->device_prep_dma_memcpy
+		(txchan,			/* DMA Channel */
+		dws->dma_addr,			/* DAR */
+		dws->tx_dma,			/* SAR */
+		dws->len,			/* Data Length */
+		flag);
+	if (txdesc) {
+		txdesc->callback = dw_spi_dma_done;
+		txdesc->callback_param = dws;
+	} else {
+		dev_err(dev, "ERROR: prepare txdesc failed\n");
+		return -EINVAL;
+	}
 
 	/* 3. Prepare the RX dma transfer */
-	rxconf.direction = DMA_DEV_TO_MEM;
-	rxconf.src_addr = dws->dma_addr;
-	rxconf.src_maxburst = LNW_DMA_MSIZE_16;
-	rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	rxconf.src_addr_width = dws->dma_width;
-	rxconf.device_fc = false;
+	rxconf->direction = DMA_DEV_TO_MEM;
+	rxconf->src_addr = dws->dma_addr;
+	rxconf->src_maxburst = LNW_DMA_MSIZE_16;
+	rxconf->dst_maxburst = LNW_DMA_MSIZE_16;
+	rxconf->dst_addr_width = dws->dma_width;
+	rxconf->src_addr_width = dws->dma_width;
+	rxconf->device_fc = false;
 
 	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
-				       (unsigned long) &rxconf);
+				       (unsigned long) rxconf);
 
-	memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
-	dws->rx_sgl.dma_address = dws->rx_dma;
-	dws->rx_sgl.length = dws->len;
-
-	rxdesc = dmaengine_prep_slave_sg(rxchan,
-				&dws->rx_sgl,
-				1,
-				DMA_DEV_TO_MEM,
-				DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
-	rxdesc->callback = dw_spi_dma_done;
-	rxdesc->callback_param = dws;
+	rxdesc = rxchan->device->device_prep_dma_memcpy
+		(rxchan,			/* DMA Channel */
+		dws->rx_dma,			/* DAR */
+		dws->dma_addr,			/* SAR */
+		dws->len,			/* Data Length */
+		flag);
+	if (rxdesc) {
+		rxdesc->callback = dw_spi_dma_done;
+		rxdesc->callback_param = dws;
+	} else {
+		dev_err(dev, "ERROR: prepare rxdesc failed\n");
+		return -EINVAL;
+	}
 
 	/* rx must be started before tx due to spi instinct */
 	rxdesc->tx_submit(rxdesc);
@@ -189,10 +207,41 @@
 	return 0;
 }
 
+static int mid_spi_dma_suspend(struct dw_spi *dws)
+{
+	struct dma_chan *txchan, *rxchan;
+
+	txchan = dws->txchan;
+	rxchan = dws->rxchan;
+
+	txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+	rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+
+	txchan->device->device_control(txchan, DMA_PAUSE, 0);
+	rxchan->device->device_control(rxchan, DMA_PAUSE, 0);
+
+	return 0;
+}
+
+static int mid_spi_dma_resume(struct dw_spi *dws)
+{
+	struct dma_chan *txchan, *rxchan;
+
+	txchan = dws->txchan;
+	rxchan = dws->rxchan;
+
+	txchan->device->device_control(txchan, DMA_RESUME, 0);
+	rxchan->device->device_control(rxchan, DMA_RESUME, 0);
+
+	return 0;
+}
+
 static struct dw_spi_dma_ops mid_dma_ops = {
 	.dma_init	= mid_spi_dma_init,
 	.dma_exit	= mid_spi_dma_exit,
 	.dma_transfer	= mid_spi_dma_transfer,
+	.dma_suspend	= mid_spi_dma_suspend,
+	.dma_resume	= mid_spi_dma_resume,
 };
 #endif
 
@@ -207,12 +256,12 @@
 #define CLK_SPI_CDIV_MASK	0x00000e00
 #define CLK_SPI_DISABLE_OFFSET	8
 
-int dw_spi_mid_init(struct dw_spi *dws)
+int dw_spi_mid_init(struct dw_spi *dws, int bus_num)
 {
 	void __iomem *clk_reg;
 	u32 clk_cdiv;
 
-	clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
+	clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG + bus_num * 4, 16);
 	if (!clk_reg)
 		return -ENOMEM;
 
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 6055c8d..c2c0756 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -20,6 +20,7 @@
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <linux/spi/spi.h>
 #include <linux/module.h>
 
@@ -72,7 +73,7 @@
 	}
 
 	dws->parent_dev = &pdev->dev;
-	dws->bus_num = 0;
+	dws->bus_num = ent->driver_data;
 	dws->num_cs = 4;
 	dws->irq = pdev->irq;
 
@@ -80,11 +81,9 @@
 	 * Specific handling for Intel MID paltforms, like dma setup,
 	 * clock rate, FIFO depth.
 	 */
-	if (pdev->device == 0x0800) {
-		ret = dw_spi_mid_init(dws);
-		if (ret)
-			goto err_unmap;
-	}
+	ret = dw_spi_mid_init(dws, ent->driver_data);
+	if (ret)
+		goto err_unmap;
 
 	ret = dw_spi_add_host(dws);
 	if (ret)
@@ -92,6 +91,11 @@
 
 	/* PCI hook and SPI hook use the same drv data */
 	pci_set_drvdata(pdev, dwpci);
+
+	pm_suspend_ignore_children(&pdev->dev, true);
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
 	return 0;
 
 err_unmap:
@@ -111,6 +115,8 @@
 
 	pci_set_drvdata(pdev, NULL);
 	dw_spi_remove_host(&dwpci->dws);
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
 	iounmap(dwpci->dws.regs);
 	pci_release_region(pdev, 0);
 	kfree(dwpci);
@@ -118,8 +124,9 @@
 }
 
 #ifdef CONFIG_PM
-static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int spi_suspend(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
 	int ret;
 
@@ -128,12 +135,13 @@
 		return ret;
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	pci_set_power_state(pdev, PCI_D3hot);
 	return ret;
 }
 
-static int spi_resume(struct pci_dev *pdev)
+static int spi_resume(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
 	int ret;
 
@@ -144,27 +152,93 @@
 		return ret;
 	return dw_spi_resume_host(&dwpci->dws);
 }
+
+static int spi_dw_pci_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "PCI runtime suspend called\n");
+	return dw_spi_suspend_host(&dwpci->dws);
+}
+
+static int spi_dw_pci_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+	dev_dbg(dev, "pci_runtime_resume called\n");
+	return dw_spi_resume_host(&dwpci->dws);
+}
+
+static int spi_dw_pci_runtime_idle(struct device *dev)
+{
+	int err;
+
+	dev_dbg(dev, "pci_runtime_idle called\n");
+	if (system_state == SYSTEM_BOOTING)
+		/* if SPI UART is set as default console and earlyprintk
+		 * is enabled, it cannot shutdown SPI controller during booting.
+		 */
+		err = pm_schedule_suspend(dev, 30000);
+	else
+		err = pm_schedule_suspend(dev, 500);
+
+	if (err != 0)
+		return 0;
+
+	return -EBUSY;
+}
+
 #else
 #define spi_suspend	NULL
 #define spi_resume	NULL
+#define spi_dw_pci_runtime_suspend NULL
+#define spi_dw_pci_runtime_resume NULL
+#define spi_dw_pci_runtime_idle NULL
 #endif
 
 static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
-	/* Intel MID platform SPI controller 0 */
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
+	/* Intel Medfield platform SPI controller 1 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800), .driver_data = 0 },
+	/* Intel Cloverview platform SPI controller 1 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08E1), .driver_data = 0 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08EE), .driver_data = 1 },
+	/* Intel EVx platform SPI controller 1 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0812), .driver_data = 2 },
 	{},
 };
 
+static const struct dev_pm_ops dw_spi_pm_ops = {
+	.suspend = spi_suspend,
+	.resume = spi_resume,
+	.runtime_suspend = spi_dw_pci_runtime_suspend,
+	.runtime_resume = spi_dw_pci_runtime_resume,
+	.runtime_idle = spi_dw_pci_runtime_idle,
+};
+
 static struct pci_driver dw_spi_driver = {
 	.name =		DRIVER_NAME,
 	.id_table =	pci_ids,
 	.probe =	spi_pci_probe,
 	.remove =	spi_pci_remove,
-	.suspend =	spi_suspend,
-	.resume	=	spi_resume,
+	.driver =	{
+		.pm	= &dw_spi_pm_ops,
+	},
 };
 
-module_pci_driver(dw_spi_driver);
+static int __init mrst_spi_init(void)
+{
+	return pci_register_driver(&dw_spi_driver);
+}
+
+static void __exit mrst_spi_exit(void)
+{
+	pci_unregister_driver(&dw_spi_driver);
+}
+
+module_init(mrst_spi_init);
+module_exit(mrst_spi_exit);
 
 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
 MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 137a4de..1851f3b 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -23,6 +23,7 @@
 #include <linux/highmem.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <linux/spi/spi.h>
 
 #include "spi-dw.h"
@@ -63,6 +64,12 @@
 };
 
 #ifdef CONFIG_DEBUG_FS
+static int spi_show_regs_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
 #define SPI_REGS_BUFSIZE	1024
 static ssize_t  spi_show_regs(struct file *file, char __user *user_buf,
 				size_t count, loff_t *ppos)
@@ -122,7 +129,7 @@
 
 static const struct file_operations mrst_spi_regs_ops = {
 	.owner		= THIS_MODULE,
-	.open		= simple_open,
+	.open		= spi_show_regs_open,
 	.read		= spi_show_regs,
 	.llseek		= default_llseek,
 };
@@ -191,7 +198,7 @@
 	u16 txw = 0;
 
 	while (max--) {
-		/* Set the tx word if the transfer's original "tx" is not null */
+		/* Set the txw if the transfer's original "tx" is not null */
 		if (dws->tx_end - dws->len) {
 			if (dws->n_bytes == 1)
 				txw = *(u8 *)(dws->tx);
@@ -256,7 +263,40 @@
 	if (dws->cur_transfer->rx_dma)
 		dws->rx_dma = dws->cur_transfer->rx_dma;
 
+	/* map dma buffer if it's not mapped */
+	if (!dws->tx_dma) {
+		dws->tx_dma = dma_map_single(NULL, dws->tx,
+				dws->len, DMA_TO_DEVICE);
+		if (dma_mapping_error(NULL, dws->tx_dma)) {
+			pr_err("map tx dma buffer failed\n");
+			goto err1;
+		}
+	}
+
+	if (!dws->rx_dma) {
+		dws->rx_dma = dma_map_single(NULL, dws->rx,
+				dws->len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(NULL, dws->rx_dma)) {
+			pr_err("map rx dma buffer failed\n");
+			goto err2;
+		}
+	}
+
 	return 1;
+
+err2:
+	dma_unmap_single(NULL, dws->tx_dma, dws->len, DMA_TO_DEVICE);
+err1:
+	dws->cur_msg->is_dma_mapped = 0;
+	return 0;
+}
+
+static void unmap_dma_buffers(struct dw_spi *dws)
+{
+	dma_unmap_single(NULL, dws->rx_dma,
+				dws->len, DMA_FROM_DEVICE);
+	dma_unmap_single(NULL, dws->tx_dma,
+				dws->len, DMA_TO_DEVICE);
 }
 
 /* Caller already set message->status; dma and pio irqs are blocked */
@@ -267,7 +307,12 @@
 	struct spi_message *msg;
 
 	spin_lock_irqsave(&dws->lock, flags);
+
+	if (dws->dma_mapped)
+		unmap_dma_buffers(dws);
+
 	msg = dws->cur_msg;
+	list_del_init(&dws->cur_msg->queue);
 	dws->cur_msg = NULL;
 	dws->cur_transfer = NULL;
 	dws->prev_chip = dws->cur_chip;
@@ -312,6 +357,7 @@
 		giveback(dws);
 	} else
 		tasklet_schedule(&dws->pump_transfers);
+
 }
 EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
 
@@ -324,7 +370,7 @@
 		dw_readw(dws, DW_SPI_TXOICR);
 		dw_readw(dws, DW_SPI_RXOICR);
 		dw_readw(dws, DW_SPI_RXUICR);
-		int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
+		int_error_stop(dws, "interrupt_transfer: fifo over/underrun");
 		return IRQ_HANDLED;
 	}
 
@@ -502,7 +548,8 @@
 		txint_level = dws->fifo_len / 2;
 		txint_level = (templen > txint_level) ? txint_level : templen;
 
-		imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
+		imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI
+			| SPI_INT_RXOI;
 		dws->transfer_handler = interrupt_transfer;
 	}
 
@@ -512,7 +559,8 @@
 	 *	2. clk_div is changed
 	 *	3. control value changes
 	 */
-	if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) {
+	if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change
+			|| clk_div || imask) {
 		spi_enable_chip(dws, 0);
 
 		if (dw_readw(dws, DW_SPI_CTRL0) != cr0)
@@ -552,23 +600,19 @@
 		container_of(work, struct dw_spi, pump_messages);
 	unsigned long flags;
 
+	pm_runtime_get_sync(dws->parent_dev);
+
 	/* Lock queue and check for queue work */
 	spin_lock_irqsave(&dws->lock, flags);
-	if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
-		dws->busy = 0;
-		spin_unlock_irqrestore(&dws->lock, flags);
-		return;
-	}
+	if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED)
+		goto exit;
 
 	/* Make sure we are not already running a message */
-	if (dws->cur_msg) {
-		spin_unlock_irqrestore(&dws->lock, flags);
-		return;
-	}
+	if (dws->cur_msg)
+		goto exit;
 
 	/* Extract head of queue */
 	dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
-	list_del_init(&dws->cur_msg->queue);
 
 	/* Initial message state*/
 	dws->cur_msg->state = START_STATE;
@@ -580,8 +624,9 @@
 	/* Mark as busy and launch transfers */
 	tasklet_schedule(&dws->pump_transfers);
 
-	dws->busy = 1;
+exit:
 	spin_unlock_irqrestore(&dws->lock, flags);
+	pm_runtime_put_sync(dws->parent_dev);
 }
 
 /* spi_device use this to queue in their spi_msg */
@@ -592,29 +637,13 @@
 
 	spin_lock_irqsave(&dws->lock, flags);
 
-	if (dws->run == QUEUE_STOPPED) {
-		spin_unlock_irqrestore(&dws->lock, flags);
-		return -ESHUTDOWN;
-	}
-
 	msg->actual_length = 0;
 	msg->status = -EINPROGRESS;
 	msg->state = START_STATE;
 
 	list_add_tail(&msg->queue, &dws->queue);
 
-	if (dws->run == QUEUE_RUNNING && !dws->busy) {
-
-		if (dws->cur_transfer || dws->cur_msg)
-			queue_work(dws->workqueue,
-					&dws->pump_messages);
-		else {
-			/* If no other data transaction in air, just go */
-			spin_unlock_irqrestore(&dws->lock, flags);
-			pump_messages(&dws->pump_messages);
-			return 0;
-		}
-	}
+	queue_work(dws->workqueue, &dws->pump_messages);
 
 	spin_unlock_irqrestore(&dws->lock, flags);
 	return 0;
@@ -692,13 +721,12 @@
 	kfree(chip);
 }
 
-static int init_queue(struct dw_spi *dws)
+static int dw_spi_init_queue(struct dw_spi *dws)
 {
 	INIT_LIST_HEAD(&dws->queue);
 	spin_lock_init(&dws->lock);
 
 	dws->run = QUEUE_STOPPED;
-	dws->busy = 0;
 
 	tasklet_init(&dws->pump_transfers,
 			pump_transfers,	(unsigned long)dws);
@@ -712,13 +740,13 @@
 	return 0;
 }
 
-static int start_queue(struct dw_spi *dws)
+static int dw_spi_start_queue(struct dw_spi *dws)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&dws->lock, flags);
 
-	if (dws->run == QUEUE_RUNNING || dws->busy) {
+	if (dws->run == QUEUE_RUNNING) {
 		spin_unlock_irqrestore(&dws->lock, flags);
 		return -EBUSY;
 	}
@@ -735,32 +763,27 @@
 	return 0;
 }
 
-static int stop_queue(struct dw_spi *dws)
+int dw_spi_stop_queue(struct dw_spi *dws)
 {
 	unsigned long flags;
-	unsigned limit = 50;
 	int status = 0;
 
 	spin_lock_irqsave(&dws->lock, flags);
-	dws->run = QUEUE_STOPPED;
-	while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
-		spin_unlock_irqrestore(&dws->lock, flags);
-		msleep(10);
-		spin_lock_irqsave(&dws->lock, flags);
-	}
-
-	if (!list_empty(&dws->queue) || dws->busy)
+	if (!list_empty(&dws->queue))
 		status = -EBUSY;
+	else
+		dws->run = QUEUE_STOPPED;
 	spin_unlock_irqrestore(&dws->lock, flags);
 
 	return status;
 }
+EXPORT_SYMBOL_GPL(dw_spi_stop_queue);
 
-static int destroy_queue(struct dw_spi *dws)
+static int dw_spi_destroy_queue(struct dw_spi *dws)
 {
 	int status;
 
-	status = stop_queue(dws);
+	status = dw_spi_stop_queue(dws);
 	if (status != 0)
 		return status;
 	destroy_workqueue(dws->workqueue);
@@ -768,11 +791,10 @@
 }
 
 /* Restart the controller, disable all interrupts, clean rx fifo */
-static void spi_hw_init(struct dw_spi *dws)
+static void dw_spi_hw_init(struct dw_spi *dws)
 {
 	spi_enable_chip(dws, 0);
 	spi_mask_intr(dws, 0xff);
-	spi_enable_chip(dws, 1);
 
 	/*
 	 * Try to detect the FIFO depth if not set by interface driver,
@@ -789,6 +811,8 @@
 		dws->fifo_len = (fifo == 257) ? 0 : fifo;
 		dw_writew(dws, DW_SPI_TXFLTR, 0);
 	}
+
+	spi_enable_chip(dws, 1);
 }
 
 int dw_spi_add_host(struct dw_spi *dws)
@@ -827,7 +851,7 @@
 	master->transfer = dw_spi_transfer;
 
 	/* Basic HW init */
-	spi_hw_init(dws);
+	dw_spi_hw_init(dws);
 
 	if (dws->dma_ops && dws->dma_ops->dma_init) {
 		ret = dws->dma_ops->dma_init(dws);
@@ -838,12 +862,12 @@
 	}
 
 	/* Initial and start queue */
-	ret = init_queue(dws);
+	ret = dw_spi_init_queue(dws);
 	if (ret) {
 		dev_err(&master->dev, "problem initializing queue\n");
 		goto err_diable_hw;
 	}
-	ret = start_queue(dws);
+	ret = dw_spi_start_queue(dws);
 	if (ret) {
 		dev_err(&master->dev, "problem starting queue\n");
 		goto err_diable_hw;
@@ -860,7 +884,7 @@
 	return 0;
 
 err_queue_alloc:
-	destroy_queue(dws);
+	dw_spi_destroy_queue(dws);
 	if (dws->dma_ops && dws->dma_ops->dma_exit)
 		dws->dma_ops->dma_exit(dws);
 err_diable_hw:
@@ -882,7 +906,7 @@
 	mrst_spi_debugfs_remove(dws);
 
 	/* Remove the queue */
-	status = destroy_queue(dws);
+	status = dw_spi_destroy_queue(dws);
 	if (status != 0)
 		dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
 			"complete, message memory not freed\n");
@@ -903,11 +927,16 @@
 {
 	int ret = 0;
 
-	ret = stop_queue(dws);
+	ret = dw_spi_stop_queue(dws);
 	if (ret)
 		return ret;
+
 	spi_enable_chip(dws, 0);
 	spi_set_clk(dws, 0);
+
+	if (dws->dma_inited)
+		dws->dma_ops->dma_suspend(dws);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
@@ -916,10 +945,14 @@
 {
 	int ret;
 
-	spi_hw_init(dws);
-	ret = start_queue(dws);
+	if (dws->dma_inited)
+		dws->dma_ops->dma_resume(dws);
+
+	dw_spi_hw_init(dws);
+	ret = dw_spi_start_queue(dws);
 	if (ret)
 		dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 9c57c07..2aaccb7 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -87,6 +87,8 @@
 	int (*dma_init)(struct dw_spi *dws);
 	void (*dma_exit)(struct dw_spi *dws);
 	int (*dma_transfer)(struct dw_spi *dws, int cs_change);
+	int (*dma_suspend)(struct dw_spi *dws);
+	int (*dma_resume)(struct dw_spi *dws);
 };
 
 struct dw_spi {
@@ -111,7 +113,6 @@
 	struct work_struct	pump_messages;
 	spinlock_t		lock;
 	struct list_head	queue;
-	int			busy;
 	int			run;
 
 	/* Message Transfer pump */
@@ -236,7 +237,9 @@
 extern int dw_spi_suspend_host(struct dw_spi *dws);
 extern int dw_spi_resume_host(struct dw_spi *dws);
 extern void dw_spi_xfer_done(struct dw_spi *dws);
+extern int dw_spi_stop_queue(struct dw_spi *dws);
 
 /* platform related setup */
-extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */
+/* Intel MID platforms */
+extern int dw_spi_mid_init(struct dw_spi *dws, int bus_num);
 #endif /* DW_SPI_HEADER_H */
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index a08f923..12112a1 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -438,7 +438,8 @@
 		break;
 
 	default:
-		/* segmented and/or full-duplex I/O request */
+		/* segmented and/or full-duplex I/O request.
+		   Note: Maximum support 511 n_ioc at a time */
 		if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
 				|| _IOC_DIR(cmd) != _IOC_WRITE) {
 			retval = -ENOTTY;
@@ -452,7 +453,10 @@
 		}
 		n_ioc = tmp / sizeof(struct spi_ioc_transfer);
 		if (n_ioc == 0)
+		{
+			dev_err(&spi->dev, "Value of n_ioc is out of range( 1 ~ 511 ).\n");
 			break;
+		}
 
 		/* copy into scratch area */
 		ioc = kmalloc(tmp, GFP_KERNEL);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index aefe820..706b5cb 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -70,6 +70,8 @@
 
 source "drivers/staging/sep/Kconfig"
 
+source "drivers/staging/sep54/Kconfig"
+
 source "drivers/staging/iio/Kconfig"
 
 source "drivers/staging/zsmalloc/Kconfig"
@@ -140,4 +142,6 @@
 
 source "drivers/staging/dwc2/Kconfig"
 
+source "drivers/staging/edison-bcm43340/Kconfig"
+
 endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 415772e..a805eae 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_VT6656)		+= vt6656/
 obj-$(CONFIG_VME_BUS)		+= vme/
 obj-$(CONFIG_DX_SEP)            += sep/
+obj-$(CONFIG_DX_SEP54)		+= sep54/
 obj-$(CONFIG_IIO)		+= iio/
 obj-$(CONFIG_ZRAM)		+= zram/
 obj-$(CONFIG_ZSMALLOC)		+= zsmalloc/
@@ -62,3 +63,4 @@
 obj-$(CONFIG_ZCACHE)		+= zcache/
 obj-$(CONFIG_GOLDFISH)		+= goldfish/
 obj-$(CONFIG_USB_DWC2)		+= dwc2/
+obj-$(CONFIG_BCMDHD)		+= edison-bcm43340/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index c0c95be..ef10416 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -8,17 +8,6 @@
 
 if ANDROID
 
-config ANDROID_BINDER_IPC
-	bool "Android Binder IPC Driver"
-	default n
-	---help---
-	  Binder is used in Android for both communication between processes,
-	  and remote method invocation.
-
-	  This means one Android process can call a method/routine in another
-	  Android process, using Binder to identify, invoke and pass arguments
-	  between said processes.
-
 config ASHMEM
 	bool "Enable the Anonymous Shared Memory Subsystem"
 	default n
@@ -31,23 +20,6 @@
 	  It is, in theory, a good memory allocator for low-memory devices,
 	  because it can discard shared memory units when under memory pressure.
 
-config ANDROID_LOGGER
-	tristate "Android log driver"
-	default n
-	---help---
-	  This adds support for system-wide logging using four log buffers.
-
-	  These are:
-
-	      1: main
-	      2: events
-	      3: radio
-	      4: system
-
-	  Log reading and writing is performed via normal Linux reads and
-	  optimized writes. This optimization avoids logging having too
-	  much overhead in the system.
-
 config ANDROID_TIMED_OUTPUT
 	bool "Timed output class driver"
 	default y
@@ -63,14 +35,14 @@
 	---help---
 	  Registers processes to be killed when memory is low
 
-config ANDROID_INTF_ALARM_DEV
-	bool "Android alarm driver"
-	depends on RTC_CLASS
-	default n
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+	bool "Android Low Memory Killer: detect oom_adj values"
+	depends on ANDROID_LOW_MEMORY_KILLER
+	default y
 	---help---
-	  Provides non-wakeup and rtc backed wakeup alarms based on rtc or
-	  elapsed realtime, and a non-wakeup alarm on the monotonic clock.
-	  Also exports the alarm interface to user-space.
+	  Detect oom_adj values written to
+	  /sys/module/lowmemorykiller/parameters/adj and convert them
+	  to oom_score_adj values.
 
 config SYNC
 	bool "Synchronization framework"
@@ -99,6 +71,12 @@
 	  *WARNING* improper use of this can result in deadlocking kernel
 	  drivers from userspace.
 
+source "drivers/staging/android/ion/Kconfig"
+
+source "drivers/staging/android/fiq_debugger/Kconfig"
+
+source "drivers/staging/android/reboot/Kconfig"
+
 endif # if ANDROID
 
 endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c136299..f09945e 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,11 +1,12 @@
 ccflags-y += -I$(src)			# needed for trace events
 
-obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o
+obj-y					+= ion/
+obj-$(CONFIG_FIQ_DEBUGGER)		+= fiq_debugger/
+obj-y					+= reboot/
+
 obj-$(CONFIG_ASHMEM)			+= ashmem.o
-obj-$(CONFIG_ANDROID_LOGGER)		+= logger.o
 obj-$(CONFIG_ANDROID_TIMED_OUTPUT)	+= timed_output.o
 obj-$(CONFIG_ANDROID_TIMED_GPIO)	+= timed_gpio.o
 obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)	+= lowmemorykiller.o
-obj-$(CONFIG_ANDROID_INTF_ALARM_DEV)	+= alarm-dev.o
 obj-$(CONFIG_SYNC)			+= sync.o
 obj-$(CONFIG_SW_SYNC)			+= sw_sync.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
deleted file mode 100644
index b15fb0d..0000000
--- a/drivers/staging/android/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
-	- checkpatch.pl cleanups
-	- sparse fixes
-	- rename files to be not so "generic"
-	- make sure things build as modules properly
-	- add proper arch dependencies as needed
-	- audit userspace interfaces to make sure they are sane
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
-Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
deleted file mode 100644
index 6dc27da..0000000
--- a/drivers/staging/android/alarm-dev.c
+++ /dev/null
@@ -1,443 +0,0 @@
-/* drivers/rtc/alarm-dev.c
- *
- * Copyright (C) 2007-2009 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/time.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
-#include <linux/alarmtimer.h>
-#include "android_alarm.h"
-
-#define ANDROID_ALARM_PRINT_INFO (1U << 0)
-#define ANDROID_ALARM_PRINT_IO (1U << 1)
-#define ANDROID_ALARM_PRINT_INT (1U << 2)
-
-static int debug_mask = ANDROID_ALARM_PRINT_INFO;
-module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
-
-#define alarm_dbg(debug_level_mask, fmt, ...)				\
-do {									\
-	if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask)	\
-		pr_info(fmt, ##__VA_ARGS__);				\
-} while (0)
-
-#define ANDROID_ALARM_WAKEUP_MASK ( \
-	ANDROID_ALARM_RTC_WAKEUP_MASK | \
-	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
-
-static int alarm_opened;
-static DEFINE_SPINLOCK(alarm_slock);
-static struct wakeup_source alarm_wake_lock;
-static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
-static uint32_t alarm_pending;
-static uint32_t alarm_enabled;
-static uint32_t wait_pending;
-
-struct devalarm {
-	union {
-		struct hrtimer hrt;
-		struct alarm alrm;
-	} u;
-	enum android_alarm_type type;
-};
-
-static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT];
-
-
-static int is_wakeup(enum android_alarm_type type)
-{
-	return (type == ANDROID_ALARM_RTC_WAKEUP ||
-		type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP);
-}
-
-
-static void devalarm_start(struct devalarm *alrm, ktime_t exp)
-{
-	if (is_wakeup(alrm->type))
-		alarm_start(&alrm->u.alrm, exp);
-	else
-		hrtimer_start(&alrm->u.hrt, exp, HRTIMER_MODE_ABS);
-}
-
-
-static int devalarm_try_to_cancel(struct devalarm *alrm)
-{
-	if (is_wakeup(alrm->type))
-		return alarm_try_to_cancel(&alrm->u.alrm);
-	return hrtimer_try_to_cancel(&alrm->u.hrt);
-}
-
-static void devalarm_cancel(struct devalarm *alrm)
-{
-	if (is_wakeup(alrm->type))
-		alarm_cancel(&alrm->u.alrm);
-	else
-		hrtimer_cancel(&alrm->u.hrt);
-}
-
-static void alarm_clear(enum android_alarm_type alarm_type)
-{
-	uint32_t alarm_type_mask = 1U << alarm_type;
-	unsigned long flags;
-
-	spin_lock_irqsave(&alarm_slock, flags);
-	alarm_dbg(IO, "alarm %d clear\n", alarm_type);
-	devalarm_try_to_cancel(&alarms[alarm_type]);
-	if (alarm_pending) {
-		alarm_pending &= ~alarm_type_mask;
-		if (!alarm_pending && !wait_pending)
-			__pm_relax(&alarm_wake_lock);
-	}
-	alarm_enabled &= ~alarm_type_mask;
-	spin_unlock_irqrestore(&alarm_slock, flags);
-
-}
-
-static void alarm_set(enum android_alarm_type alarm_type,
-							struct timespec *ts)
-{
-	uint32_t alarm_type_mask = 1U << alarm_type;
-	unsigned long flags;
-
-	spin_lock_irqsave(&alarm_slock, flags);
-	alarm_dbg(IO, "alarm %d set %ld.%09ld\n",
-			alarm_type, ts->tv_sec, ts->tv_nsec);
-	alarm_enabled |= alarm_type_mask;
-	devalarm_start(&alarms[alarm_type], timespec_to_ktime(*ts));
-	spin_unlock_irqrestore(&alarm_slock, flags);
-}
-
-static int alarm_wait(void)
-{
-	unsigned long flags;
-	int rv = 0;
-
-	spin_lock_irqsave(&alarm_slock, flags);
-	alarm_dbg(IO, "alarm wait\n");
-	if (!alarm_pending && wait_pending) {
-		__pm_relax(&alarm_wake_lock);
-		wait_pending = 0;
-	}
-	spin_unlock_irqrestore(&alarm_slock, flags);
-
-	rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
-	if (rv)
-		return rv;
-
-	spin_lock_irqsave(&alarm_slock, flags);
-	rv = alarm_pending;
-	wait_pending = 1;
-	alarm_pending = 0;
-	spin_unlock_irqrestore(&alarm_slock, flags);
-
-	return rv;
-}
-
-static int alarm_set_rtc(struct timespec *ts)
-{
-	struct rtc_time new_rtc_tm;
-	struct rtc_device *rtc_dev;
-	unsigned long flags;
-	int rv = 0;
-
-	rtc_time_to_tm(ts->tv_sec, &new_rtc_tm);
-	rtc_dev = alarmtimer_get_rtcdev();
-	rv = do_settimeofday(ts);
-	if (rv < 0)
-		return rv;
-	if (rtc_dev)
-		rv = rtc_set_time(rtc_dev, &new_rtc_tm);
-
-	spin_lock_irqsave(&alarm_slock, flags);
-	alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
-	wake_up(&alarm_wait_queue);
-	spin_unlock_irqrestore(&alarm_slock, flags);
-
-	return rv;
-}
-
-static int alarm_get_time(enum android_alarm_type alarm_type,
-							struct timespec *ts)
-{
-	int rv = 0;
-
-	switch (alarm_type) {
-	case ANDROID_ALARM_RTC_WAKEUP:
-	case ANDROID_ALARM_RTC:
-		getnstimeofday(ts);
-		break;
-	case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
-	case ANDROID_ALARM_ELAPSED_REALTIME:
-		get_monotonic_boottime(ts);
-		break;
-	case ANDROID_ALARM_SYSTEMTIME:
-		ktime_get_ts(ts);
-		break;
-	default:
-		rv = -EINVAL;
-	}
-	return rv;
-}
-
-static long alarm_do_ioctl(struct file *file, unsigned int cmd,
-							struct timespec *ts)
-{
-	int rv = 0;
-	unsigned long flags;
-	enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
-
-	if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
-		return -EINVAL;
-
-	if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
-		if ((file->f_flags & O_ACCMODE) == O_RDONLY)
-			return -EPERM;
-		if (file->private_data == NULL &&
-		    cmd != ANDROID_ALARM_SET_RTC) {
-			spin_lock_irqsave(&alarm_slock, flags);
-			if (alarm_opened) {
-				spin_unlock_irqrestore(&alarm_slock, flags);
-				return -EBUSY;
-			}
-			alarm_opened = 1;
-			file->private_data = (void *)1;
-			spin_unlock_irqrestore(&alarm_slock, flags);
-		}
-	}
-
-	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
-	case ANDROID_ALARM_CLEAR(0):
-		alarm_clear(alarm_type);
-		break;
-	case ANDROID_ALARM_SET(0):
-		alarm_set(alarm_type, ts);
-		break;
-	case ANDROID_ALARM_SET_AND_WAIT(0):
-		alarm_set(alarm_type, ts);
-		/* fall though */
-	case ANDROID_ALARM_WAIT:
-		rv = alarm_wait();
-		break;
-	case ANDROID_ALARM_SET_RTC:
-		rv = alarm_set_rtc(ts);
-		break;
-	case ANDROID_ALARM_GET_TIME(0):
-		rv = alarm_get_time(alarm_type, ts);
-		break;
-
-	default:
-		rv = -EINVAL;
-	}
-	return rv;
-}
-
-static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-
-	struct timespec ts;
-	int rv;
-
-	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
-	case ANDROID_ALARM_SET_AND_WAIT(0):
-	case ANDROID_ALARM_SET(0):
-	case ANDROID_ALARM_SET_RTC:
-		if (copy_from_user(&ts, (void __user *)arg, sizeof(ts)))
-			return -EFAULT;
-		break;
-	}
-
-	rv = alarm_do_ioctl(file, cmd, &ts);
-	if (rv)
-		return rv;
-
-	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
-	case ANDROID_ALARM_GET_TIME(0):
-		if (copy_to_user((void __user *)arg, &ts, sizeof(ts)))
-			return -EFAULT;
-		break;
-	}
-
-	return 0;
-}
-#ifdef CONFIG_COMPAT
-static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
-							unsigned long arg)
-{
-
-	struct timespec ts;
-	int rv;
-
-	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
-	case ANDROID_ALARM_SET_AND_WAIT_COMPAT(0):
-	case ANDROID_ALARM_SET_COMPAT(0):
-	case ANDROID_ALARM_SET_RTC_COMPAT:
-		if (compat_get_timespec(&ts, (void __user *)arg))
-			return -EFAULT;
-		/* fall through */
-	case ANDROID_ALARM_GET_TIME_COMPAT(0):
-		cmd = ANDROID_ALARM_COMPAT_TO_NORM(cmd);
-		break;
-	}
-
-	rv = alarm_do_ioctl(file, cmd, &ts);
-	if (rv)
-		return rv;
-
-	switch (ANDROID_ALARM_BASE_CMD(cmd)) {
-	case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */
-		if (compat_put_timespec(&ts, (void __user *)arg))
-			return -EFAULT;
-		break;
-	}
-
-	return 0;
-}
-#endif
-
-static int alarm_open(struct inode *inode, struct file *file)
-{
-	file->private_data = NULL;
-	return 0;
-}
-
-static int alarm_release(struct inode *inode, struct file *file)
-{
-	int i;
-	unsigned long flags;
-
-	spin_lock_irqsave(&alarm_slock, flags);
-	if (file->private_data) {
-		for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
-			uint32_t alarm_type_mask = 1U << i;
-			if (alarm_enabled & alarm_type_mask) {
-				alarm_dbg(INFO,
-					  "%s: clear alarm, pending %d\n",
-					  __func__,
-					  !!(alarm_pending & alarm_type_mask));
-				alarm_enabled &= ~alarm_type_mask;
-			}
-			spin_unlock_irqrestore(&alarm_slock, flags);
-			devalarm_cancel(&alarms[i]);
-			spin_lock_irqsave(&alarm_slock, flags);
-		}
-		if (alarm_pending | wait_pending) {
-			if (alarm_pending)
-				alarm_dbg(INFO, "%s: clear pending alarms %x\n",
-					  __func__, alarm_pending);
-			__pm_relax(&alarm_wake_lock);
-			wait_pending = 0;
-			alarm_pending = 0;
-		}
-		alarm_opened = 0;
-	}
-	spin_unlock_irqrestore(&alarm_slock, flags);
-	return 0;
-}
-
-static void devalarm_triggered(struct devalarm *alarm)
-{
-	unsigned long flags;
-	uint32_t alarm_type_mask = 1U << alarm->type;
-
-	alarm_dbg(INT, "%s: type %d\n", __func__, alarm->type);
-	spin_lock_irqsave(&alarm_slock, flags);
-	if (alarm_enabled & alarm_type_mask) {
-		__pm_wakeup_event(&alarm_wake_lock, 5000); /* 5secs */
-		alarm_enabled &= ~alarm_type_mask;
-		alarm_pending |= alarm_type_mask;
-		wake_up(&alarm_wait_queue);
-	}
-	spin_unlock_irqrestore(&alarm_slock, flags);
-}
-
-
-static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt)
-{
-	struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt);
-
-	devalarm_triggered(devalrm);
-	return HRTIMER_NORESTART;
-}
-
-static enum alarmtimer_restart devalarm_alarmhandler(struct alarm *alrm,
-							ktime_t now)
-{
-	struct devalarm *devalrm = container_of(alrm, struct devalarm, u.alrm);
-
-	devalarm_triggered(devalrm);
-	return ALARMTIMER_NORESTART;
-}
-
-
-static const struct file_operations alarm_fops = {
-	.owner = THIS_MODULE,
-	.unlocked_ioctl = alarm_ioctl,
-	.open = alarm_open,
-	.release = alarm_release,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = alarm_compat_ioctl,
-#endif
-};
-
-static struct miscdevice alarm_device = {
-	.minor = MISC_DYNAMIC_MINOR,
-	.name = "alarm",
-	.fops = &alarm_fops,
-};
-
-static int __init alarm_dev_init(void)
-{
-	int err;
-	int i;
-
-	err = misc_register(&alarm_device);
-	if (err)
-		return err;
-
-	alarm_init(&alarms[ANDROID_ALARM_RTC_WAKEUP].u.alrm,
-			ALARM_REALTIME, devalarm_alarmhandler);
-	hrtimer_init(&alarms[ANDROID_ALARM_RTC].u.hrt,
-			CLOCK_REALTIME, HRTIMER_MODE_ABS);
-	alarm_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].u.alrm,
-			ALARM_BOOTTIME, devalarm_alarmhandler);
-	hrtimer_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME].u.hrt,
-			CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
-	hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].u.hrt,
-			CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-
-	for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
-		alarms[i].type = i;
-		if (!is_wakeup(i))
-			alarms[i].u.hrt.function = devalarm_hrthandler;
-	}
-
-	wakeup_source_init(&alarm_wake_lock, "alarm");
-	return 0;
-}
-
-static void  __exit alarm_dev_exit(void)
-{
-	misc_deregister(&alarm_device);
-	wakeup_source_trash(&alarm_wake_lock);
-}
-
-module_init(alarm_dev_init);
-module_exit(alarm_dev_exit);
-
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
deleted file mode 100644
index 4fd32f3..0000000
--- a/drivers/staging/android/android_alarm.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* include/linux/android_alarm.h
- *
- * Copyright (C) 2006-2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_ANDROID_ALARM_H
-#define _LINUX_ANDROID_ALARM_H
-
-#include <linux/ioctl.h>
-#include <linux/time.h>
-#include <linux/compat.h>
-
-enum android_alarm_type {
-	/* return code bit numbers or set alarm arg */
-	ANDROID_ALARM_RTC_WAKEUP,
-	ANDROID_ALARM_RTC,
-	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
-	ANDROID_ALARM_ELAPSED_REALTIME,
-	ANDROID_ALARM_SYSTEMTIME,
-
-	ANDROID_ALARM_TYPE_COUNT,
-
-	/* return code bit numbers */
-	/* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
-	ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
-	ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
-	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
-				1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
-	ANDROID_ALARM_ELAPSED_REALTIME_MASK =
-				1U << ANDROID_ALARM_ELAPSED_REALTIME,
-	ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
-	ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT                  _IO('a', 1)
-
-#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
-
-
-#ifdef CONFIG_COMPAT
-#define ANDROID_ALARM_SET_COMPAT(type)		ALARM_IOW(2, type, \
-							struct compat_timespec)
-#define ANDROID_ALARM_SET_AND_WAIT_COMPAT(type)	ALARM_IOW(3, type, \
-							struct compat_timespec)
-#define ANDROID_ALARM_GET_TIME_COMPAT(type)	ALARM_IOW(4, type, \
-							struct compat_timespec)
-#define ANDROID_ALARM_SET_RTC_COMPAT		_IOW('a', 5, \
-							struct compat_timespec)
-#define ANDROID_ALARM_IOCTL_NR(cmd)		(_IOC_NR(cmd) & ((1<<4)-1))
-#define ANDROID_ALARM_COMPAT_TO_NORM(cmd)  \
-				ALARM_IOW(ANDROID_ALARM_IOCTL_NR(cmd), \
-					ANDROID_ALARM_IOCTL_TO_TYPE(cmd), \
-					struct timespec)
-
-#endif
-
-#endif
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index e681bdd..9ab0320 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -224,21 +224,29 @@
 
 	/* If size is not set, or set to 0, always return EOF. */
 	if (asma->size == 0)
-		goto out;
+		goto out_unlock;
 
 	if (!asma->file) {
 		ret = -EBADF;
-		goto out;
+		goto out_unlock;
 	}
 
+	mutex_unlock(&ashmem_mutex);
+
+	/*
+	 * asma and asma->file are used outside the lock here.  We assume
+	 * once asma->file is set it will never be changed, and will not
+	 * be destroyed until all references to the file are dropped and
+	 * ashmem_release is called.
+	 */
 	ret = asma->file->f_op->read(asma->file, buf, len, pos);
-	if (ret < 0)
-		goto out;
+	if (ret >= 0) {
+		/** Update backing file pos, since f_ops->read() doesn't */
+		asma->file->f_pos = *pos;
+	}
+	return ret;
 
-	/** Update backing file pos, since f_ops->read() doesn't */
-	asma->file->f_pos = *pos;
-
-out:
+out_unlock:
 	mutex_unlock(&ashmem_mutex);
 	return ret;
 }
@@ -317,22 +325,14 @@
 	}
 	get_file(asma->file);
 
-	/*
-	 * XXX - Reworked to use shmem_zero_setup() instead of
-	 * shmem_set_file while we're in staging. -jstultz
-	 */
-	if (vma->vm_flags & VM_SHARED) {
-		ret = shmem_zero_setup(vma);
-		if (ret) {
-			fput(asma->file);
-			goto out;
-		}
+	if (vma->vm_flags & VM_SHARED)
+		shmem_set_file(vma, asma->file);
+	else {
+		if (vma->vm_file)
+			fput(vma->vm_file);
+		vma->vm_file = asma->file;
 	}
 
-	if (vma->vm_file)
-		fput(vma->vm_file);
-	vma->vm_file = asma->file;
-
 out:
 	mutex_unlock(&ashmem_mutex);
 	return ret;
@@ -363,12 +363,14 @@
 	if (!sc->nr_to_scan)
 		return lru_count;
 
-	mutex_lock(&ashmem_mutex);
+	if (!mutex_trylock(&ashmem_mutex))
+		return -1;
+
 	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
 
-		do_fallocate(range->asma->file,
+		range->asma->file->f_op->fallocate(range->asma->file,
 				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
 				start, end - start);
 		range->purged = ASHMEM_WAS_PURGED;
@@ -413,6 +415,7 @@
 
 static int set_name(struct ashmem_area *asma, void __user *name)
 {
+	int len;
 	int ret = 0;
 	char local_name[ASHMEM_NAME_LEN];
 
@@ -425,21 +428,19 @@
 	 * variable that does not need protection and later copy the local
 	 * variable to the structure member with lock held.
 	 */
-	if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
-		return -EFAULT;
-
+	len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+	if (len < 0)
+		return len;
+	if (len == ASHMEM_NAME_LEN)
+		local_name[ASHMEM_NAME_LEN - 1] = '\0';
 	mutex_lock(&ashmem_mutex);
 	/* cannot change an existing mapping's name */
-	if (unlikely(asma->file)) {
+	if (unlikely(asma->file))
 		ret = -EINVAL;
-		goto out;
-	}
-	memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
-		local_name, ASHMEM_NAME_LEN);
-	asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
-	mutex_unlock(&ashmem_mutex);
+	else
+		strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
 
+	mutex_unlock(&ashmem_mutex);
 	return ret;
 }
 
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
index 8dc0f0d..5abcfd7 100644
--- a/drivers/staging/android/ashmem.h
+++ b/drivers/staging/android/ashmem.h
@@ -16,35 +16,7 @@
 #include <linux/ioctl.h>
 #include <linux/compat.h>
 
-#define ASHMEM_NAME_LEN		256
-
-#define ASHMEM_NAME_DEF		"dev/ashmem"
-
-/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
-#define ASHMEM_NOT_PURGED	0
-#define ASHMEM_WAS_PURGED	1
-
-/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
-#define ASHMEM_IS_UNPINNED	0
-#define ASHMEM_IS_PINNED	1
-
-struct ashmem_pin {
-	__u32 offset;	/* offset into region, in bytes, page-aligned */
-	__u32 len;	/* length forward from offset, in bytes, page-aligned */
-};
-
-#define __ASHMEMIOC		0x77
-
-#define ASHMEM_SET_NAME		_IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
-#define ASHMEM_GET_NAME		_IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, size_t)
-#define ASHMEM_GET_SIZE		_IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, unsigned long)
-#define ASHMEM_GET_PROT_MASK	_IO(__ASHMEMIOC, 6)
-#define ASHMEM_PIN		_IOW(__ASHMEMIOC, 7, struct ashmem_pin)
-#define ASHMEM_UNPIN		_IOW(__ASHMEMIOC, 8, struct ashmem_pin)
-#define ASHMEM_GET_PIN_STATUS	_IO(__ASHMEMIOC, 9)
-#define ASHMEM_PURGE_ALL_CACHES	_IO(__ASHMEMIOC, 10)
+#include "uapi/ashmem.h"
 
 /* support of 32bit userspace on 64bit platforms */
 #ifdef CONFIG_COMPAT
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
deleted file mode 100644
index 0fce5fc..0000000
--- a/drivers/staging/android/binder.c
+++ /dev/null
@@ -1,3562 +0,0 @@
-/* binder.c
- *
- * Android IPC Subsystem
- *
- * Copyright (C) 2007-2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <asm/cacheflush.h>
-#include <linux/fdtable.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/nsproxy.h>
-#include <linux/poll.h>
-#include <linux/debugfs.h>
-#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/pid_namespace.h>
-
-#include "binder.h"
-#include "binder_trace.h"
-
-static DEFINE_MUTEX(binder_main_lock);
-static DEFINE_MUTEX(binder_deferred_lock);
-static DEFINE_MUTEX(binder_mmap_lock);
-
-static HLIST_HEAD(binder_procs);
-static HLIST_HEAD(binder_deferred_list);
-static HLIST_HEAD(binder_dead_nodes);
-
-static struct dentry *binder_debugfs_dir_entry_root;
-static struct dentry *binder_debugfs_dir_entry_proc;
-static struct binder_node *binder_context_mgr_node;
-static kuid_t binder_context_mgr_uid = INVALID_UID;
-static int binder_last_id;
-static struct workqueue_struct *binder_deferred_workqueue;
-
-#define BINDER_DEBUG_ENTRY(name) \
-static int binder_##name##_open(struct inode *inode, struct file *file) \
-{ \
-	return single_open(file, binder_##name##_show, inode->i_private); \
-} \
-\
-static const struct file_operations binder_##name##_fops = { \
-	.owner = THIS_MODULE, \
-	.open = binder_##name##_open, \
-	.read = seq_read, \
-	.llseek = seq_lseek, \
-	.release = single_release, \
-}
-
-static int binder_proc_show(struct seq_file *m, void *unused);
-BINDER_DEBUG_ENTRY(proc);
-
-/* This is only defined in include/asm-arm/sizes.h */
-#ifndef SZ_1K
-#define SZ_1K                               0x400
-#endif
-
-#ifndef SZ_4M
-#define SZ_4M                               0x400000
-#endif
-
-#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
-
-#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
-
-enum {
-	BINDER_DEBUG_USER_ERROR             = 1U << 0,
-	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
-	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
-	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
-	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
-	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
-	BINDER_DEBUG_READ_WRITE             = 1U << 6,
-	BINDER_DEBUG_USER_REFS              = 1U << 7,
-	BINDER_DEBUG_THREADS                = 1U << 8,
-	BINDER_DEBUG_TRANSACTION            = 1U << 9,
-	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
-	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
-	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
-	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
-	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
-	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
-};
-static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
-	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
-module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
-static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
-static int binder_stop_on_user_error;
-
-static int binder_set_stop_on_user_error(const char *val,
-					 struct kernel_param *kp)
-{
-	int ret;
-	ret = param_set_int(val, kp);
-	if (binder_stop_on_user_error < 2)
-		wake_up(&binder_user_error_wait);
-	return ret;
-}
-module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
-	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
-
-#define binder_debug(mask, x...) \
-	do { \
-		if (binder_debug_mask & mask) \
-			pr_info(x); \
-	} while (0)
-
-#define binder_user_error(x...) \
-	do { \
-		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
-			pr_info(x); \
-		if (binder_stop_on_user_error) \
-			binder_stop_on_user_error = 2; \
-	} while (0)
-
-enum binder_stat_types {
-	BINDER_STAT_PROC,
-	BINDER_STAT_THREAD,
-	BINDER_STAT_NODE,
-	BINDER_STAT_REF,
-	BINDER_STAT_DEATH,
-	BINDER_STAT_TRANSACTION,
-	BINDER_STAT_TRANSACTION_COMPLETE,
-	BINDER_STAT_COUNT
-};
-
-struct binder_stats {
-	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
-	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
-	int obj_created[BINDER_STAT_COUNT];
-	int obj_deleted[BINDER_STAT_COUNT];
-};
-
-static struct binder_stats binder_stats;
-
-static inline void binder_stats_deleted(enum binder_stat_types type)
-{
-	binder_stats.obj_deleted[type]++;
-}
-
-static inline void binder_stats_created(enum binder_stat_types type)
-{
-	binder_stats.obj_created[type]++;
-}
-
-struct binder_transaction_log_entry {
-	int debug_id;
-	int call_type;
-	int from_proc;
-	int from_thread;
-	int target_handle;
-	int to_proc;
-	int to_thread;
-	int to_node;
-	int data_size;
-	int offsets_size;
-};
-struct binder_transaction_log {
-	int next;
-	int full;
-	struct binder_transaction_log_entry entry[32];
-};
-static struct binder_transaction_log binder_transaction_log;
-static struct binder_transaction_log binder_transaction_log_failed;
-
-static struct binder_transaction_log_entry *binder_transaction_log_add(
-	struct binder_transaction_log *log)
-{
-	struct binder_transaction_log_entry *e;
-	e = &log->entry[log->next];
-	memset(e, 0, sizeof(*e));
-	log->next++;
-	if (log->next == ARRAY_SIZE(log->entry)) {
-		log->next = 0;
-		log->full = 1;
-	}
-	return e;
-}
-
-struct binder_work {
-	struct list_head entry;
-	enum {
-		BINDER_WORK_TRANSACTION = 1,
-		BINDER_WORK_TRANSACTION_COMPLETE,
-		BINDER_WORK_NODE,
-		BINDER_WORK_DEAD_BINDER,
-		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
-		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
-	} type;
-};
-
-struct binder_node {
-	int debug_id;
-	struct binder_work work;
-	union {
-		struct rb_node rb_node;
-		struct hlist_node dead_node;
-	};
-	struct binder_proc *proc;
-	struct hlist_head refs;
-	int internal_strong_refs;
-	int local_weak_refs;
-	int local_strong_refs;
-	void __user *ptr;
-	void __user *cookie;
-	unsigned has_strong_ref:1;
-	unsigned pending_strong_ref:1;
-	unsigned has_weak_ref:1;
-	unsigned pending_weak_ref:1;
-	unsigned has_async_transaction:1;
-	unsigned accept_fds:1;
-	unsigned min_priority:8;
-	struct list_head async_todo;
-};
-
-struct binder_ref_death {
-	struct binder_work work;
-	void __user *cookie;
-};
-
-struct binder_ref {
-	/* Lookups needed: */
-	/*   node + proc => ref (transaction) */
-	/*   desc + proc => ref (transaction, inc/dec ref) */
-	/*   node => refs + procs (proc exit) */
-	int debug_id;
-	struct rb_node rb_node_desc;
-	struct rb_node rb_node_node;
-	struct hlist_node node_entry;
-	struct binder_proc *proc;
-	struct binder_node *node;
-	uint32_t desc;
-	int strong;
-	int weak;
-	struct binder_ref_death *death;
-};
-
-struct binder_buffer {
-	struct list_head entry; /* free and allocated entries by address */
-	struct rb_node rb_node; /* free entry by size or allocated entry */
-				/* by address */
-	unsigned free:1;
-	unsigned allow_user_free:1;
-	unsigned async_transaction:1;
-	unsigned debug_id:29;
-
-	struct binder_transaction *transaction;
-
-	struct binder_node *target_node;
-	size_t data_size;
-	size_t offsets_size;
-	uint8_t data[0];
-};
-
-enum binder_deferred_state {
-	BINDER_DEFERRED_PUT_FILES    = 0x01,
-	BINDER_DEFERRED_FLUSH        = 0x02,
-	BINDER_DEFERRED_RELEASE      = 0x04,
-};
-
-struct binder_proc {
-	struct hlist_node proc_node;
-	struct rb_root threads;
-	struct rb_root nodes;
-	struct rb_root refs_by_desc;
-	struct rb_root refs_by_node;
-	int pid;
-	struct vm_area_struct *vma;
-	struct mm_struct *vma_vm_mm;
-	struct task_struct *tsk;
-	struct files_struct *files;
-	struct hlist_node deferred_work_node;
-	int deferred_work;
-	void *buffer;
-	ptrdiff_t user_buffer_offset;
-
-	struct list_head buffers;
-	struct rb_root free_buffers;
-	struct rb_root allocated_buffers;
-	size_t free_async_space;
-
-	struct page **pages;
-	size_t buffer_size;
-	uint32_t buffer_free;
-	struct list_head todo;
-	wait_queue_head_t wait;
-	struct binder_stats stats;
-	struct list_head delivered_death;
-	int max_threads;
-	int requested_threads;
-	int requested_threads_started;
-	int ready_threads;
-	long default_priority;
-	struct dentry *debugfs_entry;
-};
-
-enum {
-	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
-	BINDER_LOOPER_STATE_ENTERED     = 0x02,
-	BINDER_LOOPER_STATE_EXITED      = 0x04,
-	BINDER_LOOPER_STATE_INVALID     = 0x08,
-	BINDER_LOOPER_STATE_WAITING     = 0x10,
-	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
-};
-
-struct binder_thread {
-	struct binder_proc *proc;
-	struct rb_node rb_node;
-	int pid;
-	int looper;
-	struct binder_transaction *transaction_stack;
-	struct list_head todo;
-	uint32_t return_error; /* Write failed, return error code in read buf */
-	uint32_t return_error2; /* Write failed, return error code in read */
-		/* buffer. Used when sending a reply to a dead process that */
-		/* we are also waiting on */
-	wait_queue_head_t wait;
-	struct binder_stats stats;
-};
-
-struct binder_transaction {
-	int debug_id;
-	struct binder_work work;
-	struct binder_thread *from;
-	struct binder_transaction *from_parent;
-	struct binder_proc *to_proc;
-	struct binder_thread *to_thread;
-	struct binder_transaction *to_parent;
-	unsigned need_reply:1;
-	/* unsigned is_dead:1; */	/* not used at the moment */
-
-	struct binder_buffer *buffer;
-	unsigned int	code;
-	unsigned int	flags;
-	long	priority;
-	long	saved_priority;
-	kuid_t	sender_euid;
-};
-
-static void
-binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
-
-static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
-{
-	struct files_struct *files = proc->files;
-	unsigned long rlim_cur;
-	unsigned long irqs;
-
-	if (files == NULL)
-		return -ESRCH;
-
-	if (!lock_task_sighand(proc->tsk, &irqs))
-		return -EMFILE;
-
-	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
-	unlock_task_sighand(proc->tsk, &irqs);
-
-	return __alloc_fd(files, 0, rlim_cur, flags);
-}
-
-/*
- * copied from fd_install
- */
-static void task_fd_install(
-	struct binder_proc *proc, unsigned int fd, struct file *file)
-{
-	if (proc->files)
-		__fd_install(proc->files, fd, file);
-}
-
-/*
- * copied from sys_close
- */
-static long task_close_fd(struct binder_proc *proc, unsigned int fd)
-{
-	int retval;
-
-	if (proc->files == NULL)
-		return -ESRCH;
-
-	retval = __close_fd(proc->files, fd);
-	/* can't restart close syscall because file table entry was cleared */
-	if (unlikely(retval == -ERESTARTSYS ||
-		     retval == -ERESTARTNOINTR ||
-		     retval == -ERESTARTNOHAND ||
-		     retval == -ERESTART_RESTARTBLOCK))
-		retval = -EINTR;
-
-	return retval;
-}
-
-static inline void binder_lock(const char *tag)
-{
-	trace_binder_lock(tag);
-	mutex_lock(&binder_main_lock);
-	trace_binder_locked(tag);
-}
-
-static inline void binder_unlock(const char *tag)
-{
-	trace_binder_unlock(tag);
-	mutex_unlock(&binder_main_lock);
-}
-
-static void binder_set_nice(long nice)
-{
-	long min_nice;
-	if (can_nice(current, nice)) {
-		set_user_nice(current, nice);
-		return;
-	}
-	min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
-	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
-		     "%d: nice value %ld not allowed use %ld instead\n",
-		      current->pid, nice, min_nice);
-	set_user_nice(current, min_nice);
-	if (min_nice < 20)
-		return;
-	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
-}
-
-static size_t binder_buffer_size(struct binder_proc *proc,
-				 struct binder_buffer *buffer)
-{
-	if (list_is_last(&buffer->entry, &proc->buffers))
-		return proc->buffer + proc->buffer_size - (void *)buffer->data;
-	else
-		return (size_t)list_entry(buffer->entry.next,
-			struct binder_buffer, entry) - (size_t)buffer->data;
-}
-
-static void binder_insert_free_buffer(struct binder_proc *proc,
-				      struct binder_buffer *new_buffer)
-{
-	struct rb_node **p = &proc->free_buffers.rb_node;
-	struct rb_node *parent = NULL;
-	struct binder_buffer *buffer;
-	size_t buffer_size;
-	size_t new_buffer_size;
-
-	BUG_ON(!new_buffer->free);
-
-	new_buffer_size = binder_buffer_size(proc, new_buffer);
-
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: add free buffer, size %zd, at %p\n",
-		      proc->pid, new_buffer_size, new_buffer);
-
-	while (*p) {
-		parent = *p;
-		buffer = rb_entry(parent, struct binder_buffer, rb_node);
-		BUG_ON(!buffer->free);
-
-		buffer_size = binder_buffer_size(proc, buffer);
-
-		if (new_buffer_size < buffer_size)
-			p = &parent->rb_left;
-		else
-			p = &parent->rb_right;
-	}
-	rb_link_node(&new_buffer->rb_node, parent, p);
-	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
-}
-
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
-					   struct binder_buffer *new_buffer)
-{
-	struct rb_node **p = &proc->allocated_buffers.rb_node;
-	struct rb_node *parent = NULL;
-	struct binder_buffer *buffer;
-
-	BUG_ON(new_buffer->free);
-
-	while (*p) {
-		parent = *p;
-		buffer = rb_entry(parent, struct binder_buffer, rb_node);
-		BUG_ON(buffer->free);
-
-		if (new_buffer < buffer)
-			p = &parent->rb_left;
-		else if (new_buffer > buffer)
-			p = &parent->rb_right;
-		else
-			BUG();
-	}
-	rb_link_node(&new_buffer->rb_node, parent, p);
-	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
-}
-
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
-						  void __user *user_ptr)
-{
-	struct rb_node *n = proc->allocated_buffers.rb_node;
-	struct binder_buffer *buffer;
-	struct binder_buffer *kern_ptr;
-
-	kern_ptr = user_ptr - proc->user_buffer_offset
-		- offsetof(struct binder_buffer, data);
-
-	while (n) {
-		buffer = rb_entry(n, struct binder_buffer, rb_node);
-		BUG_ON(buffer->free);
-
-		if (kern_ptr < buffer)
-			n = n->rb_left;
-		else if (kern_ptr > buffer)
-			n = n->rb_right;
-		else
-			return buffer;
-	}
-	return NULL;
-}
-
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
-				    void *start, void *end,
-				    struct vm_area_struct *vma)
-{
-	void *page_addr;
-	unsigned long user_page_addr;
-	struct vm_struct tmp_area;
-	struct page **page;
-	struct mm_struct *mm;
-
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: %s pages %p-%p\n", proc->pid,
-		     allocate ? "allocate" : "free", start, end);
-
-	if (end <= start)
-		return 0;
-
-	trace_binder_update_page_range(proc, allocate, start, end);
-
-	if (vma)
-		mm = NULL;
-	else
-		mm = get_task_mm(proc->tsk);
-
-	if (mm) {
-		down_write(&mm->mmap_sem);
-		vma = proc->vma;
-		if (vma && mm != proc->vma_vm_mm) {
-			pr_err("%d: vma mm and task mm mismatch\n",
-				proc->pid);
-			vma = NULL;
-		}
-	}
-
-	if (allocate == 0)
-		goto free_range;
-
-	if (vma == NULL) {
-		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
-			proc->pid);
-		goto err_no_vma;
-	}
-
-	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
-		int ret;
-		struct page **page_array_ptr;
-		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
-
-		BUG_ON(*page);
-		*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
-		if (*page == NULL) {
-			pr_err("%d: binder_alloc_buf failed for page at %p\n",
-				proc->pid, page_addr);
-			goto err_alloc_page_failed;
-		}
-		tmp_area.addr = page_addr;
-		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
-		page_array_ptr = page;
-		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
-		if (ret) {
-			pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
-			       proc->pid, page_addr);
-			goto err_map_kernel_failed;
-		}
-		user_page_addr =
-			(uintptr_t)page_addr + proc->user_buffer_offset;
-		ret = vm_insert_page(vma, user_page_addr, page[0]);
-		if (ret) {
-			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
-			       proc->pid, user_page_addr);
-			goto err_vm_insert_page_failed;
-		}
-		/* vm_insert_page does not seem to increment the refcount */
-	}
-	if (mm) {
-		up_write(&mm->mmap_sem);
-		mmput(mm);
-	}
-	return 0;
-
-free_range:
-	for (page_addr = end - PAGE_SIZE; page_addr >= start;
-	     page_addr -= PAGE_SIZE) {
-		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
-		if (vma)
-			zap_page_range(vma, (uintptr_t)page_addr +
-				proc->user_buffer_offset, PAGE_SIZE, NULL);
-err_vm_insert_page_failed:
-		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
-		__free_page(*page);
-		*page = NULL;
-err_alloc_page_failed:
-		;
-	}
-err_no_vma:
-	if (mm) {
-		up_write(&mm->mmap_sem);
-		mmput(mm);
-	}
-	return -ENOMEM;
-}
-
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
-					      size_t data_size,
-					      size_t offsets_size, int is_async)
-{
-	struct rb_node *n = proc->free_buffers.rb_node;
-	struct binder_buffer *buffer;
-	size_t buffer_size;
-	struct rb_node *best_fit = NULL;
-	void *has_page_addr;
-	void *end_page_addr;
-	size_t size;
-
-	if (proc->vma == NULL) {
-		pr_err("%d: binder_alloc_buf, no vma\n",
-		       proc->pid);
-		return NULL;
-	}
-
-	size = ALIGN(data_size, sizeof(void *)) +
-		ALIGN(offsets_size, sizeof(void *));
-
-	if (size < data_size || size < offsets_size) {
-		binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
-				proc->pid, data_size, offsets_size);
-		return NULL;
-	}
-
-	if (is_async &&
-	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
-			      proc->pid, size);
-		return NULL;
-	}
-
-	while (n) {
-		buffer = rb_entry(n, struct binder_buffer, rb_node);
-		BUG_ON(!buffer->free);
-		buffer_size = binder_buffer_size(proc, buffer);
-
-		if (size < buffer_size) {
-			best_fit = n;
-			n = n->rb_left;
-		} else if (size > buffer_size)
-			n = n->rb_right;
-		else {
-			best_fit = n;
-			break;
-		}
-	}
-	if (best_fit == NULL) {
-		pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
-			proc->pid, size);
-		return NULL;
-	}
-	if (n == NULL) {
-		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
-		buffer_size = binder_buffer_size(proc, buffer);
-	}
-
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
-		      proc->pid, size, buffer, buffer_size);
-
-	has_page_addr =
-		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
-	if (n == NULL) {
-		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
-			buffer_size = size; /* no room for other buffers */
-		else
-			buffer_size = size + sizeof(struct binder_buffer);
-	}
-	end_page_addr =
-		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
-	if (end_page_addr > has_page_addr)
-		end_page_addr = has_page_addr;
-	if (binder_update_page_range(proc, 1,
-	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
-		return NULL;
-
-	rb_erase(best_fit, &proc->free_buffers);
-	buffer->free = 0;
-	binder_insert_allocated_buffer(proc, buffer);
-	if (buffer_size != size) {
-		struct binder_buffer *new_buffer = (void *)buffer->data + size;
-		list_add(&new_buffer->entry, &buffer->entry);
-		new_buffer->free = 1;
-		binder_insert_free_buffer(proc, new_buffer);
-	}
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: binder_alloc_buf size %zd got %p\n",
-		      proc->pid, size, buffer);
-	buffer->data_size = data_size;
-	buffer->offsets_size = offsets_size;
-	buffer->async_transaction = is_async;
-	if (is_async) {
-		proc->free_async_space -= size + sizeof(struct binder_buffer);
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
-			     "%d: binder_alloc_buf size %zd async free %zd\n",
-			      proc->pid, size, proc->free_async_space);
-	}
-
-	return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
-	return (void *)((uintptr_t)buffer & PAGE_MASK);
-}
-
-static void *buffer_end_page(struct binder_buffer *buffer)
-{
-	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
-}
-
-static void binder_delete_free_buffer(struct binder_proc *proc,
-				      struct binder_buffer *buffer)
-{
-	struct binder_buffer *prev, *next = NULL;
-	int free_page_end = 1;
-	int free_page_start = 1;
-
-	BUG_ON(proc->buffers.next == &buffer->entry);
-	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
-	BUG_ON(!prev->free);
-	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
-		free_page_start = 0;
-		if (buffer_end_page(prev) == buffer_end_page(buffer))
-			free_page_end = 0;
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			     "%d: merge free, buffer %p share page with %p\n",
-			      proc->pid, buffer, prev);
-	}
-
-	if (!list_is_last(&buffer->entry, &proc->buffers)) {
-		next = list_entry(buffer->entry.next,
-				  struct binder_buffer, entry);
-		if (buffer_start_page(next) == buffer_end_page(buffer)) {
-			free_page_end = 0;
-			if (buffer_start_page(next) ==
-			    buffer_start_page(buffer))
-				free_page_start = 0;
-			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-				     "%d: merge free, buffer %p share page with %p\n",
-				      proc->pid, buffer, prev);
-		}
-	}
-	list_del(&buffer->entry);
-	if (free_page_start || free_page_end) {
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			     "%d: merge free, buffer %p do not share page%s%s with with %p or %p\n",
-			     proc->pid, buffer, free_page_start ? "" : " end",
-			     free_page_end ? "" : " start", prev, next);
-		binder_update_page_range(proc, 0, free_page_start ?
-			buffer_start_page(buffer) : buffer_end_page(buffer),
-			(free_page_end ? buffer_end_page(buffer) :
-			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
-	}
-}
-
-static void binder_free_buf(struct binder_proc *proc,
-			    struct binder_buffer *buffer)
-{
-	size_t size, buffer_size;
-
-	buffer_size = binder_buffer_size(proc, buffer);
-
-	size = ALIGN(buffer->data_size, sizeof(void *)) +
-		ALIGN(buffer->offsets_size, sizeof(void *));
-
-	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-		     "%d: binder_free_buf %p size %zd buffer_size %zd\n",
-		      proc->pid, buffer, size, buffer_size);
-
-	BUG_ON(buffer->free);
-	BUG_ON(size > buffer_size);
-	BUG_ON(buffer->transaction != NULL);
-	BUG_ON((void *)buffer < proc->buffer);
-	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
-
-	if (buffer->async_transaction) {
-		proc->free_async_space += size + sizeof(struct binder_buffer);
-
-		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
-			     "%d: binder_free_buf size %zd async free %zd\n",
-			      proc->pid, size, proc->free_async_space);
-	}
-
-	binder_update_page_range(proc, 0,
-		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
-		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
-		NULL);
-	rb_erase(&buffer->rb_node, &proc->allocated_buffers);
-	buffer->free = 1;
-	if (!list_is_last(&buffer->entry, &proc->buffers)) {
-		struct binder_buffer *next = list_entry(buffer->entry.next,
-						struct binder_buffer, entry);
-		if (next->free) {
-			rb_erase(&next->rb_node, &proc->free_buffers);
-			binder_delete_free_buffer(proc, next);
-		}
-	}
-	if (proc->buffers.next != &buffer->entry) {
-		struct binder_buffer *prev = list_entry(buffer->entry.prev,
-						struct binder_buffer, entry);
-		if (prev->free) {
-			binder_delete_free_buffer(proc, buffer);
-			rb_erase(&prev->rb_node, &proc->free_buffers);
-			buffer = prev;
-		}
-	}
-	binder_insert_free_buffer(proc, buffer);
-}
-
-static struct binder_node *binder_get_node(struct binder_proc *proc,
-					   void __user *ptr)
-{
-	struct rb_node *n = proc->nodes.rb_node;
-	struct binder_node *node;
-
-	while (n) {
-		node = rb_entry(n, struct binder_node, rb_node);
-
-		if (ptr < node->ptr)
-			n = n->rb_left;
-		else if (ptr > node->ptr)
-			n = n->rb_right;
-		else
-			return node;
-	}
-	return NULL;
-}
-
-static struct binder_node *binder_new_node(struct binder_proc *proc,
-					   void __user *ptr,
-					   void __user *cookie)
-{
-	struct rb_node **p = &proc->nodes.rb_node;
-	struct rb_node *parent = NULL;
-	struct binder_node *node;
-
-	while (*p) {
-		parent = *p;
-		node = rb_entry(parent, struct binder_node, rb_node);
-
-		if (ptr < node->ptr)
-			p = &(*p)->rb_left;
-		else if (ptr > node->ptr)
-			p = &(*p)->rb_right;
-		else
-			return NULL;
-	}
-
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
-	if (node == NULL)
-		return NULL;
-	binder_stats_created(BINDER_STAT_NODE);
-	rb_link_node(&node->rb_node, parent, p);
-	rb_insert_color(&node->rb_node, &proc->nodes);
-	node->debug_id = ++binder_last_id;
-	node->proc = proc;
-	node->ptr = ptr;
-	node->cookie = cookie;
-	node->work.type = BINDER_WORK_NODE;
-	INIT_LIST_HEAD(&node->work.entry);
-	INIT_LIST_HEAD(&node->async_todo);
-	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-		     "%d:%d node %d u%p c%p created\n",
-		     proc->pid, current->pid, node->debug_id,
-		     node->ptr, node->cookie);
-	return node;
-}
-
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
-			   struct list_head *target_list)
-{
-	if (strong) {
-		if (internal) {
-			if (target_list == NULL &&
-			    node->internal_strong_refs == 0 &&
-			    !(node == binder_context_mgr_node &&
-			    node->has_strong_ref)) {
-				pr_err("invalid inc strong node for %d\n",
-					node->debug_id);
-				return -EINVAL;
-			}
-			node->internal_strong_refs++;
-		} else
-			node->local_strong_refs++;
-		if (!node->has_strong_ref && target_list) {
-			list_del_init(&node->work.entry);
-			list_add_tail(&node->work.entry, target_list);
-		}
-	} else {
-		if (!internal)
-			node->local_weak_refs++;
-		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
-			if (target_list == NULL) {
-				pr_err("invalid inc weak node for %d\n",
-					node->debug_id);
-				return -EINVAL;
-			}
-			list_add_tail(&node->work.entry, target_list);
-		}
-	}
-	return 0;
-}
-
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
-{
-	if (strong) {
-		if (internal)
-			node->internal_strong_refs--;
-		else
-			node->local_strong_refs--;
-		if (node->local_strong_refs || node->internal_strong_refs)
-			return 0;
-	} else {
-		if (!internal)
-			node->local_weak_refs--;
-		if (node->local_weak_refs || !hlist_empty(&node->refs))
-			return 0;
-	}
-	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
-		if (list_empty(&node->work.entry)) {
-			list_add_tail(&node->work.entry, &node->proc->todo);
-			wake_up_interruptible(&node->proc->wait);
-		}
-	} else {
-		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
-		    !node->local_weak_refs) {
-			list_del_init(&node->work.entry);
-			if (node->proc) {
-				rb_erase(&node->rb_node, &node->proc->nodes);
-				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-					     "refless node %d deleted\n",
-					     node->debug_id);
-			} else {
-				hlist_del(&node->dead_node);
-				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-					     "dead node %d deleted\n",
-					     node->debug_id);
-			}
-			kfree(node);
-			binder_stats_deleted(BINDER_STAT_NODE);
-		}
-	}
-
-	return 0;
-}
-
-
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
-					 uint32_t desc)
-{
-	struct rb_node *n = proc->refs_by_desc.rb_node;
-	struct binder_ref *ref;
-
-	while (n) {
-		ref = rb_entry(n, struct binder_ref, rb_node_desc);
-
-		if (desc < ref->desc)
-			n = n->rb_left;
-		else if (desc > ref->desc)
-			n = n->rb_right;
-		else
-			return ref;
-	}
-	return NULL;
-}
-
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
-						  struct binder_node *node)
-{
-	struct rb_node *n;
-	struct rb_node **p = &proc->refs_by_node.rb_node;
-	struct rb_node *parent = NULL;
-	struct binder_ref *ref, *new_ref;
-
-	while (*p) {
-		parent = *p;
-		ref = rb_entry(parent, struct binder_ref, rb_node_node);
-
-		if (node < ref->node)
-			p = &(*p)->rb_left;
-		else if (node > ref->node)
-			p = &(*p)->rb_right;
-		else
-			return ref;
-	}
-	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
-	if (new_ref == NULL)
-		return NULL;
-	binder_stats_created(BINDER_STAT_REF);
-	new_ref->debug_id = ++binder_last_id;
-	new_ref->proc = proc;
-	new_ref->node = node;
-	rb_link_node(&new_ref->rb_node_node, parent, p);
-	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
-
-	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
-	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
-		ref = rb_entry(n, struct binder_ref, rb_node_desc);
-		if (ref->desc > new_ref->desc)
-			break;
-		new_ref->desc = ref->desc + 1;
-	}
-
-	p = &proc->refs_by_desc.rb_node;
-	while (*p) {
-		parent = *p;
-		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
-
-		if (new_ref->desc < ref->desc)
-			p = &(*p)->rb_left;
-		else if (new_ref->desc > ref->desc)
-			p = &(*p)->rb_right;
-		else
-			BUG();
-	}
-	rb_link_node(&new_ref->rb_node_desc, parent, p);
-	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
-	if (node) {
-		hlist_add_head(&new_ref->node_entry, &node->refs);
-
-		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-			     "%d new ref %d desc %d for node %d\n",
-			      proc->pid, new_ref->debug_id, new_ref->desc,
-			      node->debug_id);
-	} else {
-		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-			     "%d new ref %d desc %d for dead node\n",
-			      proc->pid, new_ref->debug_id, new_ref->desc);
-	}
-	return new_ref;
-}
-
-static void binder_delete_ref(struct binder_ref *ref)
-{
-	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-		     "%d delete ref %d desc %d for node %d\n",
-		      ref->proc->pid, ref->debug_id, ref->desc,
-		      ref->node->debug_id);
-
-	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
-	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
-	if (ref->strong)
-		binder_dec_node(ref->node, 1, 1);
-	hlist_del(&ref->node_entry);
-	binder_dec_node(ref->node, 0, 1);
-	if (ref->death) {
-		binder_debug(BINDER_DEBUG_DEAD_BINDER,
-			     "%d delete ref %d desc %d has death notification\n",
-			      ref->proc->pid, ref->debug_id, ref->desc);
-		list_del(&ref->death->work.entry);
-		kfree(ref->death);
-		binder_stats_deleted(BINDER_STAT_DEATH);
-	}
-	kfree(ref);
-	binder_stats_deleted(BINDER_STAT_REF);
-}
-
-static int binder_inc_ref(struct binder_ref *ref, int strong,
-			  struct list_head *target_list)
-{
-	int ret;
-	if (strong) {
-		if (ref->strong == 0) {
-			ret = binder_inc_node(ref->node, 1, 1, target_list);
-			if (ret)
-				return ret;
-		}
-		ref->strong++;
-	} else {
-		if (ref->weak == 0) {
-			ret = binder_inc_node(ref->node, 0, 1, target_list);
-			if (ret)
-				return ret;
-		}
-		ref->weak++;
-	}
-	return 0;
-}
-
-
-static int binder_dec_ref(struct binder_ref *ref, int strong)
-{
-	if (strong) {
-		if (ref->strong == 0) {
-			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
-					  ref->proc->pid, ref->debug_id,
-					  ref->desc, ref->strong, ref->weak);
-			return -EINVAL;
-		}
-		ref->strong--;
-		if (ref->strong == 0) {
-			int ret;
-			ret = binder_dec_node(ref->node, strong, 1);
-			if (ret)
-				return ret;
-		}
-	} else {
-		if (ref->weak == 0) {
-			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
-					  ref->proc->pid, ref->debug_id,
-					  ref->desc, ref->strong, ref->weak);
-			return -EINVAL;
-		}
-		ref->weak--;
-	}
-	if (ref->strong == 0 && ref->weak == 0)
-		binder_delete_ref(ref);
-	return 0;
-}
-
-static void binder_pop_transaction(struct binder_thread *target_thread,
-				   struct binder_transaction *t)
-{
-	if (target_thread) {
-		BUG_ON(target_thread->transaction_stack != t);
-		BUG_ON(target_thread->transaction_stack->from != target_thread);
-		target_thread->transaction_stack =
-			target_thread->transaction_stack->from_parent;
-		t->from = NULL;
-	}
-	t->need_reply = 0;
-	if (t->buffer)
-		t->buffer->transaction = NULL;
-	kfree(t);
-	binder_stats_deleted(BINDER_STAT_TRANSACTION);
-}
-
-static void binder_send_failed_reply(struct binder_transaction *t,
-				     uint32_t error_code)
-{
-	struct binder_thread *target_thread;
-	BUG_ON(t->flags & TF_ONE_WAY);
-	while (1) {
-		target_thread = t->from;
-		if (target_thread) {
-			if (target_thread->return_error != BR_OK &&
-			   target_thread->return_error2 == BR_OK) {
-				target_thread->return_error2 =
-					target_thread->return_error;
-				target_thread->return_error = BR_OK;
-			}
-			if (target_thread->return_error == BR_OK) {
-				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-					     "send failed reply for transaction %d to %d:%d\n",
-					      t->debug_id, target_thread->proc->pid,
-					      target_thread->pid);
-
-				binder_pop_transaction(target_thread, t);
-				target_thread->return_error = error_code;
-				wake_up_interruptible(&target_thread->wait);
-			} else {
-				pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
-					target_thread->proc->pid,
-					target_thread->pid,
-					target_thread->return_error);
-			}
-			return;
-		} else {
-			struct binder_transaction *next = t->from_parent;
-
-			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-				     "send failed reply for transaction %d, target dead\n",
-				     t->debug_id);
-
-			binder_pop_transaction(target_thread, t);
-			if (next == NULL) {
-				binder_debug(BINDER_DEBUG_DEAD_BINDER,
-					     "reply failed, no target thread at root\n");
-				return;
-			}
-			t = next;
-			binder_debug(BINDER_DEBUG_DEAD_BINDER,
-				     "reply failed, no target thread -- retry %d\n",
-				      t->debug_id);
-		}
-	}
-}
-
-static void binder_transaction_buffer_release(struct binder_proc *proc,
-					      struct binder_buffer *buffer,
-					      size_t *failed_at)
-{
-	size_t *offp, *off_end;
-	int debug_id = buffer->debug_id;
-
-	binder_debug(BINDER_DEBUG_TRANSACTION,
-		     "%d buffer release %d, size %zd-%zd, failed at %p\n",
-		     proc->pid, buffer->debug_id,
-		     buffer->data_size, buffer->offsets_size, failed_at);
-
-	if (buffer->target_node)
-		binder_dec_node(buffer->target_node, 1, 0);
-
-	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
-	if (failed_at)
-		off_end = failed_at;
-	else
-		off_end = (void *)offp + buffer->offsets_size;
-	for (; offp < off_end; offp++) {
-		struct flat_binder_object *fp;
-		if (*offp > buffer->data_size - sizeof(*fp) ||
-		    buffer->data_size < sizeof(*fp) ||
-		    !IS_ALIGNED(*offp, sizeof(void *))) {
-			pr_err("transaction release %d bad offset %zd, size %zd\n",
-			 debug_id, *offp, buffer->data_size);
-			continue;
-		}
-		fp = (struct flat_binder_object *)(buffer->data + *offp);
-		switch (fp->type) {
-		case BINDER_TYPE_BINDER:
-		case BINDER_TYPE_WEAK_BINDER: {
-			struct binder_node *node = binder_get_node(proc, fp->binder);
-			if (node == NULL) {
-				pr_err("transaction release %d bad node %p\n",
-					debug_id, fp->binder);
-				break;
-			}
-			binder_debug(BINDER_DEBUG_TRANSACTION,
-				     "        node %d u%p\n",
-				     node->debug_id, node->ptr);
-			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
-		} break;
-		case BINDER_TYPE_HANDLE:
-		case BINDER_TYPE_WEAK_HANDLE: {
-			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
-			if (ref == NULL) {
-				pr_err("transaction release %d bad handle %ld\n",
-				 debug_id, fp->handle);
-				break;
-			}
-			binder_debug(BINDER_DEBUG_TRANSACTION,
-				     "        ref %d desc %d (node %d)\n",
-				     ref->debug_id, ref->desc, ref->node->debug_id);
-			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
-		} break;
-
-		case BINDER_TYPE_FD:
-			binder_debug(BINDER_DEBUG_TRANSACTION,
-				     "        fd %ld\n", fp->handle);
-			if (failed_at)
-				task_close_fd(proc, fp->handle);
-			break;
-
-		default:
-			pr_err("transaction release %d bad object type %lx\n",
-				debug_id, fp->type);
-			break;
-		}
-	}
-}
-
-static void binder_transaction(struct binder_proc *proc,
-			       struct binder_thread *thread,
-			       struct binder_transaction_data *tr, int reply)
-{
-	struct binder_transaction *t;
-	struct binder_work *tcomplete;
-	size_t *offp, *off_end;
-	struct binder_proc *target_proc;
-	struct binder_thread *target_thread = NULL;
-	struct binder_node *target_node = NULL;
-	struct list_head *target_list;
-	wait_queue_head_t *target_wait;
-	struct binder_transaction *in_reply_to = NULL;
-	struct binder_transaction_log_entry *e;
-	uint32_t return_error;
-
-	e = binder_transaction_log_add(&binder_transaction_log);
-	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
-	e->from_proc = proc->pid;
-	e->from_thread = thread->pid;
-	e->target_handle = tr->target.handle;
-	e->data_size = tr->data_size;
-	e->offsets_size = tr->offsets_size;
-
-	if (reply) {
-		in_reply_to = thread->transaction_stack;
-		if (in_reply_to == NULL) {
-			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
-					  proc->pid, thread->pid);
-			return_error = BR_FAILED_REPLY;
-			goto err_empty_call_stack;
-		}
-		binder_set_nice(in_reply_to->saved_priority);
-		if (in_reply_to->to_thread != thread) {
-			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
-				proc->pid, thread->pid, in_reply_to->debug_id,
-				in_reply_to->to_proc ?
-				in_reply_to->to_proc->pid : 0,
-				in_reply_to->to_thread ?
-				in_reply_to->to_thread->pid : 0);
-			return_error = BR_FAILED_REPLY;
-			in_reply_to = NULL;
-			goto err_bad_call_stack;
-		}
-		thread->transaction_stack = in_reply_to->to_parent;
-		target_thread = in_reply_to->from;
-		if (target_thread == NULL) {
-			return_error = BR_DEAD_REPLY;
-			goto err_dead_binder;
-		}
-		if (target_thread->transaction_stack != in_reply_to) {
-			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
-				proc->pid, thread->pid,
-				target_thread->transaction_stack ?
-				target_thread->transaction_stack->debug_id : 0,
-				in_reply_to->debug_id);
-			return_error = BR_FAILED_REPLY;
-			in_reply_to = NULL;
-			target_thread = NULL;
-			goto err_dead_binder;
-		}
-		target_proc = target_thread->proc;
-	} else {
-		if (tr->target.handle) {
-			struct binder_ref *ref;
-			ref = binder_get_ref(proc, tr->target.handle);
-			if (ref == NULL) {
-				binder_user_error("%d:%d got transaction to invalid handle\n",
-					proc->pid, thread->pid);
-				return_error = BR_FAILED_REPLY;
-				goto err_invalid_target_handle;
-			}
-			target_node = ref->node;
-		} else {
-			target_node = binder_context_mgr_node;
-			if (target_node == NULL) {
-				return_error = BR_DEAD_REPLY;
-				goto err_no_context_mgr_node;
-			}
-		}
-		e->to_node = target_node->debug_id;
-		target_proc = target_node->proc;
-		if (target_proc == NULL) {
-			return_error = BR_DEAD_REPLY;
-			goto err_dead_binder;
-		}
-		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
-			struct binder_transaction *tmp;
-			tmp = thread->transaction_stack;
-			if (tmp->to_thread != thread) {
-				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
-					proc->pid, thread->pid, tmp->debug_id,
-					tmp->to_proc ? tmp->to_proc->pid : 0,
-					tmp->to_thread ?
-					tmp->to_thread->pid : 0);
-				return_error = BR_FAILED_REPLY;
-				goto err_bad_call_stack;
-			}
-			while (tmp) {
-				if (tmp->from && tmp->from->proc == target_proc)
-					target_thread = tmp->from;
-				tmp = tmp->from_parent;
-			}
-		}
-	}
-	if (target_thread) {
-		e->to_thread = target_thread->pid;
-		target_list = &target_thread->todo;
-		target_wait = &target_thread->wait;
-	} else {
-		target_list = &target_proc->todo;
-		target_wait = &target_proc->wait;
-	}
-	e->to_proc = target_proc->pid;
-
-	/* TODO: reuse incoming transaction for reply */
-	t = kzalloc(sizeof(*t), GFP_KERNEL);
-	if (t == NULL) {
-		return_error = BR_FAILED_REPLY;
-		goto err_alloc_t_failed;
-	}
-	binder_stats_created(BINDER_STAT_TRANSACTION);
-
-	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
-	if (tcomplete == NULL) {
-		return_error = BR_FAILED_REPLY;
-		goto err_alloc_tcomplete_failed;
-	}
-	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
-
-	t->debug_id = ++binder_last_id;
-	e->debug_id = t->debug_id;
-
-	if (reply)
-		binder_debug(BINDER_DEBUG_TRANSACTION,
-			     "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n",
-			     proc->pid, thread->pid, t->debug_id,
-			     target_proc->pid, target_thread->pid,
-			     tr->data.ptr.buffer, tr->data.ptr.offsets,
-			     tr->data_size, tr->offsets_size);
-	else
-		binder_debug(BINDER_DEBUG_TRANSACTION,
-			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n",
-			     proc->pid, thread->pid, t->debug_id,
-			     target_proc->pid, target_node->debug_id,
-			     tr->data.ptr.buffer, tr->data.ptr.offsets,
-			     tr->data_size, tr->offsets_size);
-
-	if (!reply && !(tr->flags & TF_ONE_WAY))
-		t->from = thread;
-	else
-		t->from = NULL;
-	t->sender_euid = proc->tsk->cred->euid;
-	t->to_proc = target_proc;
-	t->to_thread = target_thread;
-	t->code = tr->code;
-	t->flags = tr->flags;
-	t->priority = task_nice(current);
-
-	trace_binder_transaction(reply, t, target_node);
-
-	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
-		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
-	if (t->buffer == NULL) {
-		return_error = BR_FAILED_REPLY;
-		goto err_binder_alloc_buf_failed;
-	}
-	t->buffer->allow_user_free = 0;
-	t->buffer->debug_id = t->debug_id;
-	t->buffer->transaction = t;
-	t->buffer->target_node = target_node;
-	trace_binder_transaction_alloc_buf(t->buffer);
-	if (target_node)
-		binder_inc_node(target_node, 1, 0, NULL);
-
-	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
-
-	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
-		binder_user_error("%d:%d got transaction with invalid data ptr\n",
-				proc->pid, thread->pid);
-		return_error = BR_FAILED_REPLY;
-		goto err_copy_data_failed;
-	}
-	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
-		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
-				proc->pid, thread->pid);
-		return_error = BR_FAILED_REPLY;
-		goto err_copy_data_failed;
-	}
-	if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
-		binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n",
-				proc->pid, thread->pid, tr->offsets_size);
-		return_error = BR_FAILED_REPLY;
-		goto err_bad_offset;
-	}
-	off_end = (void *)offp + tr->offsets_size;
-	for (; offp < off_end; offp++) {
-		struct flat_binder_object *fp;
-		if (*offp > t->buffer->data_size - sizeof(*fp) ||
-		    t->buffer->data_size < sizeof(*fp) ||
-		    !IS_ALIGNED(*offp, sizeof(void *))) {
-			binder_user_error("%d:%d got transaction with invalid offset, %zd\n",
-					proc->pid, thread->pid, *offp);
-			return_error = BR_FAILED_REPLY;
-			goto err_bad_offset;
-		}
-		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
-		switch (fp->type) {
-		case BINDER_TYPE_BINDER:
-		case BINDER_TYPE_WEAK_BINDER: {
-			struct binder_ref *ref;
-			struct binder_node *node = binder_get_node(proc, fp->binder);
-			if (node == NULL) {
-				node = binder_new_node(proc, fp->binder, fp->cookie);
-				if (node == NULL) {
-					return_error = BR_FAILED_REPLY;
-					goto err_binder_new_node_failed;
-				}
-				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
-				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
-			}
-			if (fp->cookie != node->cookie) {
-				binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n",
-					proc->pid, thread->pid,
-					fp->binder, node->debug_id,
-					fp->cookie, node->cookie);
-				goto err_binder_get_ref_for_node_failed;
-			}
-			ref = binder_get_ref_for_node(target_proc, node);
-			if (ref == NULL) {
-				return_error = BR_FAILED_REPLY;
-				goto err_binder_get_ref_for_node_failed;
-			}
-			if (fp->type == BINDER_TYPE_BINDER)
-				fp->type = BINDER_TYPE_HANDLE;
-			else
-				fp->type = BINDER_TYPE_WEAK_HANDLE;
-			fp->handle = ref->desc;
-			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
-				       &thread->todo);
-
-			trace_binder_transaction_node_to_ref(t, node, ref);
-			binder_debug(BINDER_DEBUG_TRANSACTION,
-				     "        node %d u%p -> ref %d desc %d\n",
-				     node->debug_id, node->ptr, ref->debug_id,
-				     ref->desc);
-		} break;
-		case BINDER_TYPE_HANDLE:
-		case BINDER_TYPE_WEAK_HANDLE: {
-			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
-			if (ref == NULL) {
-				binder_user_error("%d:%d got transaction with invalid handle, %ld\n",
-						proc->pid,
-						thread->pid, fp->handle);
-				return_error = BR_FAILED_REPLY;
-				goto err_binder_get_ref_failed;
-			}
-			if (ref->node->proc == target_proc) {
-				if (fp->type == BINDER_TYPE_HANDLE)
-					fp->type = BINDER_TYPE_BINDER;
-				else
-					fp->type = BINDER_TYPE_WEAK_BINDER;
-				fp->binder = ref->node->ptr;
-				fp->cookie = ref->node->cookie;
-				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
-				trace_binder_transaction_ref_to_node(t, ref);
-				binder_debug(BINDER_DEBUG_TRANSACTION,
-					     "        ref %d desc %d -> node %d u%p\n",
-					     ref->debug_id, ref->desc, ref->node->debug_id,
-					     ref->node->ptr);
-			} else {
-				struct binder_ref *new_ref;
-				new_ref = binder_get_ref_for_node(target_proc, ref->node);
-				if (new_ref == NULL) {
-					return_error = BR_FAILED_REPLY;
-					goto err_binder_get_ref_for_node_failed;
-				}
-				fp->handle = new_ref->desc;
-				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
-				trace_binder_transaction_ref_to_ref(t, ref,
-								    new_ref);
-				binder_debug(BINDER_DEBUG_TRANSACTION,
-					     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
-					     ref->debug_id, ref->desc, new_ref->debug_id,
-					     new_ref->desc, ref->node->debug_id);
-			}
-		} break;
-
-		case BINDER_TYPE_FD: {
-			int target_fd;
-			struct file *file;
-
-			if (reply) {
-				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
-					binder_user_error("%d:%d got reply with fd, %ld, but target does not allow fds\n",
-						proc->pid, thread->pid, fp->handle);
-					return_error = BR_FAILED_REPLY;
-					goto err_fd_not_allowed;
-				}
-			} else if (!target_node->accept_fds) {
-				binder_user_error("%d:%d got transaction with fd, %ld, but target does not allow fds\n",
-					proc->pid, thread->pid, fp->handle);
-				return_error = BR_FAILED_REPLY;
-				goto err_fd_not_allowed;
-			}
-
-			file = fget(fp->handle);
-			if (file == NULL) {
-				binder_user_error("%d:%d got transaction with invalid fd, %ld\n",
-					proc->pid, thread->pid, fp->handle);
-				return_error = BR_FAILED_REPLY;
-				goto err_fget_failed;
-			}
-			target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
-			if (target_fd < 0) {
-				fput(file);
-				return_error = BR_FAILED_REPLY;
-				goto err_get_unused_fd_failed;
-			}
-			task_fd_install(target_proc, target_fd, file);
-			trace_binder_transaction_fd(t, fp->handle, target_fd);
-			binder_debug(BINDER_DEBUG_TRANSACTION,
-				     "        fd %ld -> %d\n", fp->handle, target_fd);
-			/* TODO: fput? */
-			fp->handle = target_fd;
-		} break;
-
-		default:
-			binder_user_error("%d:%d got transaction with invalid object type, %lx\n",
-				proc->pid, thread->pid, fp->type);
-			return_error = BR_FAILED_REPLY;
-			goto err_bad_object_type;
-		}
-	}
-	if (reply) {
-		BUG_ON(t->buffer->async_transaction != 0);
-		binder_pop_transaction(target_thread, in_reply_to);
-	} else if (!(t->flags & TF_ONE_WAY)) {
-		BUG_ON(t->buffer->async_transaction != 0);
-		t->need_reply = 1;
-		t->from_parent = thread->transaction_stack;
-		thread->transaction_stack = t;
-	} else {
-		BUG_ON(target_node == NULL);
-		BUG_ON(t->buffer->async_transaction != 1);
-		if (target_node->has_async_transaction) {
-			target_list = &target_node->async_todo;
-			target_wait = NULL;
-		} else
-			target_node->has_async_transaction = 1;
-	}
-	t->work.type = BINDER_WORK_TRANSACTION;
-	list_add_tail(&t->work.entry, target_list);
-	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
-	list_add_tail(&tcomplete->entry, &thread->todo);
-	if (target_wait)
-		wake_up_interruptible(target_wait);
-	return;
-
-err_get_unused_fd_failed:
-err_fget_failed:
-err_fd_not_allowed:
-err_binder_get_ref_for_node_failed:
-err_binder_get_ref_failed:
-err_binder_new_node_failed:
-err_bad_object_type:
-err_bad_offset:
-err_copy_data_failed:
-	trace_binder_transaction_failed_buffer_release(t->buffer);
-	binder_transaction_buffer_release(target_proc, t->buffer, offp);
-	t->buffer->transaction = NULL;
-	binder_free_buf(target_proc, t->buffer);
-err_binder_alloc_buf_failed:
-	kfree(tcomplete);
-	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
-err_alloc_tcomplete_failed:
-	kfree(t);
-	binder_stats_deleted(BINDER_STAT_TRANSACTION);
-err_alloc_t_failed:
-err_bad_call_stack:
-err_empty_call_stack:
-err_dead_binder:
-err_invalid_target_handle:
-err_no_context_mgr_node:
-	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-		     "%d:%d transaction failed %d, size %zd-%zd\n",
-		     proc->pid, thread->pid, return_error,
-		     tr->data_size, tr->offsets_size);
-
-	{
-		struct binder_transaction_log_entry *fe;
-		fe = binder_transaction_log_add(&binder_transaction_log_failed);
-		*fe = *e;
-	}
-
-	BUG_ON(thread->return_error != BR_OK);
-	if (in_reply_to) {
-		thread->return_error = BR_TRANSACTION_COMPLETE;
-		binder_send_failed_reply(in_reply_to, return_error);
-	} else
-		thread->return_error = return_error;
-}
-
-int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
-			void __user *buffer, int size, signed long *consumed)
-{
-	uint32_t cmd;
-	void __user *ptr = buffer + *consumed;
-	void __user *end = buffer + size;
-
-	while (ptr < end && thread->return_error == BR_OK) {
-		if (get_user(cmd, (uint32_t __user *)ptr))
-			return -EFAULT;
-		ptr += sizeof(uint32_t);
-		trace_binder_command(cmd);
-		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
-			binder_stats.bc[_IOC_NR(cmd)]++;
-			proc->stats.bc[_IOC_NR(cmd)]++;
-			thread->stats.bc[_IOC_NR(cmd)]++;
-		}
-		switch (cmd) {
-		case BC_INCREFS:
-		case BC_ACQUIRE:
-		case BC_RELEASE:
-		case BC_DECREFS: {
-			uint32_t target;
-			struct binder_ref *ref;
-			const char *debug_string;
-
-			if (get_user(target, (uint32_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(uint32_t);
-			if (target == 0 && binder_context_mgr_node &&
-			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
-				ref = binder_get_ref_for_node(proc,
-					       binder_context_mgr_node);
-				if (ref->desc != target) {
-					binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
-						proc->pid, thread->pid,
-						ref->desc);
-				}
-			} else
-				ref = binder_get_ref(proc, target);
-			if (ref == NULL) {
-				binder_user_error("%d:%d refcount change on invalid ref %d\n",
-					proc->pid, thread->pid, target);
-				break;
-			}
-			switch (cmd) {
-			case BC_INCREFS:
-				debug_string = "IncRefs";
-				binder_inc_ref(ref, 0, NULL);
-				break;
-			case BC_ACQUIRE:
-				debug_string = "Acquire";
-				binder_inc_ref(ref, 1, NULL);
-				break;
-			case BC_RELEASE:
-				debug_string = "Release";
-				binder_dec_ref(ref, 1);
-				break;
-			case BC_DECREFS:
-			default:
-				debug_string = "DecRefs";
-				binder_dec_ref(ref, 0);
-				break;
-			}
-			binder_debug(BINDER_DEBUG_USER_REFS,
-				     "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
-				     proc->pid, thread->pid, debug_string, ref->debug_id,
-				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
-			break;
-		}
-		case BC_INCREFS_DONE:
-		case BC_ACQUIRE_DONE: {
-			void __user *node_ptr;
-			void *cookie;
-			struct binder_node *node;
-
-			if (get_user(node_ptr, (void * __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(void *);
-			if (get_user(cookie, (void * __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(void *);
-			node = binder_get_node(proc, node_ptr);
-			if (node == NULL) {
-				binder_user_error("%d:%d %s u%p no match\n",
-					proc->pid, thread->pid,
-					cmd == BC_INCREFS_DONE ?
-					"BC_INCREFS_DONE" :
-					"BC_ACQUIRE_DONE",
-					node_ptr);
-				break;
-			}
-			if (cookie != node->cookie) {
-				binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n",
-					proc->pid, thread->pid,
-					cmd == BC_INCREFS_DONE ?
-					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
-					node_ptr, node->debug_id,
-					cookie, node->cookie);
-				break;
-			}
-			if (cmd == BC_ACQUIRE_DONE) {
-				if (node->pending_strong_ref == 0) {
-					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
-						proc->pid, thread->pid,
-						node->debug_id);
-					break;
-				}
-				node->pending_strong_ref = 0;
-			} else {
-				if (node->pending_weak_ref == 0) {
-					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
-						proc->pid, thread->pid,
-						node->debug_id);
-					break;
-				}
-				node->pending_weak_ref = 0;
-			}
-			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
-			binder_debug(BINDER_DEBUG_USER_REFS,
-				     "%d:%d %s node %d ls %d lw %d\n",
-				     proc->pid, thread->pid,
-				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
-				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
-			break;
-		}
-		case BC_ATTEMPT_ACQUIRE:
-			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
-			return -EINVAL;
-		case BC_ACQUIRE_RESULT:
-			pr_err("BC_ACQUIRE_RESULT not supported\n");
-			return -EINVAL;
-
-		case BC_FREE_BUFFER: {
-			void __user *data_ptr;
-			struct binder_buffer *buffer;
-
-			if (get_user(data_ptr, (void * __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(void *);
-
-			buffer = binder_buffer_lookup(proc, data_ptr);
-			if (buffer == NULL) {
-				binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n",
-					proc->pid, thread->pid, data_ptr);
-				break;
-			}
-			if (!buffer->allow_user_free) {
-				binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n",
-					proc->pid, thread->pid, data_ptr);
-				break;
-			}
-			binder_debug(BINDER_DEBUG_FREE_BUFFER,
-				     "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
-				     proc->pid, thread->pid, data_ptr, buffer->debug_id,
-				     buffer->transaction ? "active" : "finished");
-
-			if (buffer->transaction) {
-				buffer->transaction->buffer = NULL;
-				buffer->transaction = NULL;
-			}
-			if (buffer->async_transaction && buffer->target_node) {
-				BUG_ON(!buffer->target_node->has_async_transaction);
-				if (list_empty(&buffer->target_node->async_todo))
-					buffer->target_node->has_async_transaction = 0;
-				else
-					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
-			}
-			trace_binder_transaction_buffer_release(buffer);
-			binder_transaction_buffer_release(proc, buffer, NULL);
-			binder_free_buf(proc, buffer);
-			break;
-		}
-
-		case BC_TRANSACTION:
-		case BC_REPLY: {
-			struct binder_transaction_data tr;
-
-			if (copy_from_user(&tr, ptr, sizeof(tr)))
-				return -EFAULT;
-			ptr += sizeof(tr);
-			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
-			break;
-		}
-
-		case BC_REGISTER_LOOPER:
-			binder_debug(BINDER_DEBUG_THREADS,
-				     "%d:%d BC_REGISTER_LOOPER\n",
-				     proc->pid, thread->pid);
-			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
-				thread->looper |= BINDER_LOOPER_STATE_INVALID;
-				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
-					proc->pid, thread->pid);
-			} else if (proc->requested_threads == 0) {
-				thread->looper |= BINDER_LOOPER_STATE_INVALID;
-				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
-					proc->pid, thread->pid);
-			} else {
-				proc->requested_threads--;
-				proc->requested_threads_started++;
-			}
-			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
-			break;
-		case BC_ENTER_LOOPER:
-			binder_debug(BINDER_DEBUG_THREADS,
-				     "%d:%d BC_ENTER_LOOPER\n",
-				     proc->pid, thread->pid);
-			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
-				thread->looper |= BINDER_LOOPER_STATE_INVALID;
-				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
-					proc->pid, thread->pid);
-			}
-			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
-			break;
-		case BC_EXIT_LOOPER:
-			binder_debug(BINDER_DEBUG_THREADS,
-				     "%d:%d BC_EXIT_LOOPER\n",
-				     proc->pid, thread->pid);
-			thread->looper |= BINDER_LOOPER_STATE_EXITED;
-			break;
-
-		case BC_REQUEST_DEATH_NOTIFICATION:
-		case BC_CLEAR_DEATH_NOTIFICATION: {
-			uint32_t target;
-			void __user *cookie;
-			struct binder_ref *ref;
-			struct binder_ref_death *death;
-
-			if (get_user(target, (uint32_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(uint32_t);
-			if (get_user(cookie, (void __user * __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(void *);
-			ref = binder_get_ref(proc, target);
-			if (ref == NULL) {
-				binder_user_error("%d:%d %s invalid ref %d\n",
-					proc->pid, thread->pid,
-					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
-					"BC_REQUEST_DEATH_NOTIFICATION" :
-					"BC_CLEAR_DEATH_NOTIFICATION",
-					target);
-				break;
-			}
-
-			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
-				     "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
-				     proc->pid, thread->pid,
-				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
-				     "BC_REQUEST_DEATH_NOTIFICATION" :
-				     "BC_CLEAR_DEATH_NOTIFICATION",
-				     cookie, ref->debug_id, ref->desc,
-				     ref->strong, ref->weak, ref->node->debug_id);
-
-			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
-				if (ref->death) {
-					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
-						proc->pid, thread->pid);
-					break;
-				}
-				death = kzalloc(sizeof(*death), GFP_KERNEL);
-				if (death == NULL) {
-					thread->return_error = BR_ERROR;
-					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-						     "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
-						     proc->pid, thread->pid);
-					break;
-				}
-				binder_stats_created(BINDER_STAT_DEATH);
-				INIT_LIST_HEAD(&death->work.entry);
-				death->cookie = cookie;
-				ref->death = death;
-				if (ref->node->proc == NULL) {
-					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
-					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
-						list_add_tail(&ref->death->work.entry, &thread->todo);
-					} else {
-						list_add_tail(&ref->death->work.entry, &proc->todo);
-						wake_up_interruptible(&proc->wait);
-					}
-				}
-			} else {
-				if (ref->death == NULL) {
-					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
-						proc->pid, thread->pid);
-					break;
-				}
-				death = ref->death;
-				if (death->cookie != cookie) {
-					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n",
-						proc->pid, thread->pid,
-						death->cookie, cookie);
-					break;
-				}
-				ref->death = NULL;
-				if (list_empty(&death->work.entry)) {
-					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
-					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
-						list_add_tail(&death->work.entry, &thread->todo);
-					} else {
-						list_add_tail(&death->work.entry, &proc->todo);
-						wake_up_interruptible(&proc->wait);
-					}
-				} else {
-					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
-					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
-				}
-			}
-		} break;
-		case BC_DEAD_BINDER_DONE: {
-			struct binder_work *w;
-			void __user *cookie;
-			struct binder_ref_death *death = NULL;
-			if (get_user(cookie, (void __user * __user *)ptr))
-				return -EFAULT;
-
-			ptr += sizeof(void *);
-			list_for_each_entry(w, &proc->delivered_death, entry) {
-				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
-				if (tmp_death->cookie == cookie) {
-					death = tmp_death;
-					break;
-				}
-			}
-			binder_debug(BINDER_DEBUG_DEAD_BINDER,
-				     "%d:%d BC_DEAD_BINDER_DONE %p found %p\n",
-				     proc->pid, thread->pid, cookie, death);
-			if (death == NULL) {
-				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n",
-					proc->pid, thread->pid, cookie);
-				break;
-			}
-
-			list_del_init(&death->work.entry);
-			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
-				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
-				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
-					list_add_tail(&death->work.entry, &thread->todo);
-				} else {
-					list_add_tail(&death->work.entry, &proc->todo);
-					wake_up_interruptible(&proc->wait);
-				}
-			}
-		} break;
-
-		default:
-			pr_err("%d:%d unknown command %d\n",
-			       proc->pid, thread->pid, cmd);
-			return -EINVAL;
-		}
-		*consumed = ptr - buffer;
-	}
-	return 0;
-}
-
-void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
-		    uint32_t cmd)
-{
-	trace_binder_return(cmd);
-	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
-		binder_stats.br[_IOC_NR(cmd)]++;
-		proc->stats.br[_IOC_NR(cmd)]++;
-		thread->stats.br[_IOC_NR(cmd)]++;
-	}
-}
-
-static int binder_has_proc_work(struct binder_proc *proc,
-				struct binder_thread *thread)
-{
-	return !list_empty(&proc->todo) ||
-		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
-}
-
-static int binder_has_thread_work(struct binder_thread *thread)
-{
-	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
-		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
-}
-
-static int binder_thread_read(struct binder_proc *proc,
-			      struct binder_thread *thread,
-			      void  __user *buffer, int size,
-			      signed long *consumed, int non_block)
-{
-	void __user *ptr = buffer + *consumed;
-	void __user *end = buffer + size;
-
-	int ret = 0;
-	int wait_for_proc_work;
-
-	if (*consumed == 0) {
-		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
-			return -EFAULT;
-		ptr += sizeof(uint32_t);
-	}
-
-retry:
-	wait_for_proc_work = thread->transaction_stack == NULL &&
-				list_empty(&thread->todo);
-
-	if (thread->return_error != BR_OK && ptr < end) {
-		if (thread->return_error2 != BR_OK) {
-			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(uint32_t);
-			binder_stat_br(proc, thread, thread->return_error2);
-			if (ptr == end)
-				goto done;
-			thread->return_error2 = BR_OK;
-		}
-		if (put_user(thread->return_error, (uint32_t __user *)ptr))
-			return -EFAULT;
-		ptr += sizeof(uint32_t);
-		binder_stat_br(proc, thread, thread->return_error);
-		thread->return_error = BR_OK;
-		goto done;
-	}
-
-
-	thread->looper |= BINDER_LOOPER_STATE_WAITING;
-	if (wait_for_proc_work)
-		proc->ready_threads++;
-
-	binder_unlock(__func__);
-
-	trace_binder_wait_for_work(wait_for_proc_work,
-				   !!thread->transaction_stack,
-				   !list_empty(&thread->todo));
-	if (wait_for_proc_work) {
-		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
-					BINDER_LOOPER_STATE_ENTERED))) {
-			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
-				proc->pid, thread->pid, thread->looper);
-			wait_event_interruptible(binder_user_error_wait,
-						 binder_stop_on_user_error < 2);
-		}
-		binder_set_nice(proc->default_priority);
-		if (non_block) {
-			if (!binder_has_proc_work(proc, thread))
-				ret = -EAGAIN;
-		} else
-			ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
-	} else {
-		if (non_block) {
-			if (!binder_has_thread_work(thread))
-				ret = -EAGAIN;
-		} else
-			ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
-	}
-
-	binder_lock(__func__);
-
-	if (wait_for_proc_work)
-		proc->ready_threads--;
-	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
-
-	if (ret)
-		return ret;
-
-	while (1) {
-		uint32_t cmd;
-		struct binder_transaction_data tr;
-		struct binder_work *w;
-		struct binder_transaction *t = NULL;
-
-		if (!list_empty(&thread->todo))
-			w = list_first_entry(&thread->todo, struct binder_work, entry);
-		else if (!list_empty(&proc->todo) && wait_for_proc_work)
-			w = list_first_entry(&proc->todo, struct binder_work, entry);
-		else {
-			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
-				goto retry;
-			break;
-		}
-
-		if (end - ptr < sizeof(tr) + 4)
-			break;
-
-		switch (w->type) {
-		case BINDER_WORK_TRANSACTION: {
-			t = container_of(w, struct binder_transaction, work);
-		} break;
-		case BINDER_WORK_TRANSACTION_COMPLETE: {
-			cmd = BR_TRANSACTION_COMPLETE;
-			if (put_user(cmd, (uint32_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(uint32_t);
-
-			binder_stat_br(proc, thread, cmd);
-			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
-				     "%d:%d BR_TRANSACTION_COMPLETE\n",
-				     proc->pid, thread->pid);
-
-			list_del(&w->entry);
-			kfree(w);
-			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
-		} break;
-		case BINDER_WORK_NODE: {
-			struct binder_node *node = container_of(w, struct binder_node, work);
-			uint32_t cmd = BR_NOOP;
-			const char *cmd_name;
-			int strong = node->internal_strong_refs || node->local_strong_refs;
-			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
-			if (weak && !node->has_weak_ref) {
-				cmd = BR_INCREFS;
-				cmd_name = "BR_INCREFS";
-				node->has_weak_ref = 1;
-				node->pending_weak_ref = 1;
-				node->local_weak_refs++;
-			} else if (strong && !node->has_strong_ref) {
-				cmd = BR_ACQUIRE;
-				cmd_name = "BR_ACQUIRE";
-				node->has_strong_ref = 1;
-				node->pending_strong_ref = 1;
-				node->local_strong_refs++;
-			} else if (!strong && node->has_strong_ref) {
-				cmd = BR_RELEASE;
-				cmd_name = "BR_RELEASE";
-				node->has_strong_ref = 0;
-			} else if (!weak && node->has_weak_ref) {
-				cmd = BR_DECREFS;
-				cmd_name = "BR_DECREFS";
-				node->has_weak_ref = 0;
-			}
-			if (cmd != BR_NOOP) {
-				if (put_user(cmd, (uint32_t __user *)ptr))
-					return -EFAULT;
-				ptr += sizeof(uint32_t);
-				if (put_user(node->ptr, (void * __user *)ptr))
-					return -EFAULT;
-				ptr += sizeof(void *);
-				if (put_user(node->cookie, (void * __user *)ptr))
-					return -EFAULT;
-				ptr += sizeof(void *);
-
-				binder_stat_br(proc, thread, cmd);
-				binder_debug(BINDER_DEBUG_USER_REFS,
-					     "%d:%d %s %d u%p c%p\n",
-					     proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
-			} else {
-				list_del_init(&w->entry);
-				if (!weak && !strong) {
-					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-						     "%d:%d node %d u%p c%p deleted\n",
-						     proc->pid, thread->pid, node->debug_id,
-						     node->ptr, node->cookie);
-					rb_erase(&node->rb_node, &proc->nodes);
-					kfree(node);
-					binder_stats_deleted(BINDER_STAT_NODE);
-				} else {
-					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-						     "%d:%d node %d u%p c%p state unchanged\n",
-						     proc->pid, thread->pid, node->debug_id, node->ptr,
-						     node->cookie);
-				}
-			}
-		} break;
-		case BINDER_WORK_DEAD_BINDER:
-		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
-		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
-			struct binder_ref_death *death;
-			uint32_t cmd;
-
-			death = container_of(w, struct binder_ref_death, work);
-			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
-				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
-			else
-				cmd = BR_DEAD_BINDER;
-			if (put_user(cmd, (uint32_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(uint32_t);
-			if (put_user(death->cookie, (void * __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(void *);
-			binder_stat_br(proc, thread, cmd);
-			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
-				     "%d:%d %s %p\n",
-				      proc->pid, thread->pid,
-				      cmd == BR_DEAD_BINDER ?
-				      "BR_DEAD_BINDER" :
-				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
-				      death->cookie);
-
-			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
-				list_del(&w->entry);
-				kfree(death);
-				binder_stats_deleted(BINDER_STAT_DEATH);
-			} else
-				list_move(&w->entry, &proc->delivered_death);
-			if (cmd == BR_DEAD_BINDER)
-				goto done; /* DEAD_BINDER notifications can cause transactions */
-		} break;
-		}
-
-		if (!t)
-			continue;
-
-		BUG_ON(t->buffer == NULL);
-		if (t->buffer->target_node) {
-			struct binder_node *target_node = t->buffer->target_node;
-			tr.target.ptr = target_node->ptr;
-			tr.cookie =  target_node->cookie;
-			t->saved_priority = task_nice(current);
-			if (t->priority < target_node->min_priority &&
-			    !(t->flags & TF_ONE_WAY))
-				binder_set_nice(t->priority);
-			else if (!(t->flags & TF_ONE_WAY) ||
-				 t->saved_priority > target_node->min_priority)
-				binder_set_nice(target_node->min_priority);
-			cmd = BR_TRANSACTION;
-		} else {
-			tr.target.ptr = NULL;
-			tr.cookie = NULL;
-			cmd = BR_REPLY;
-		}
-		tr.code = t->code;
-		tr.flags = t->flags;
-		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
-
-		if (t->from) {
-			struct task_struct *sender = t->from->proc->tsk;
-			tr.sender_pid = task_tgid_nr_ns(sender,
-							task_active_pid_ns(current));
-		} else {
-			tr.sender_pid = 0;
-		}
-
-		tr.data_size = t->buffer->data_size;
-		tr.offsets_size = t->buffer->offsets_size;
-		tr.data.ptr.buffer = (void *)t->buffer->data +
-					proc->user_buffer_offset;
-		tr.data.ptr.offsets = tr.data.ptr.buffer +
-					ALIGN(t->buffer->data_size,
-					    sizeof(void *));
-
-		if (put_user(cmd, (uint32_t __user *)ptr))
-			return -EFAULT;
-		ptr += sizeof(uint32_t);
-		if (copy_to_user(ptr, &tr, sizeof(tr)))
-			return -EFAULT;
-		ptr += sizeof(tr);
-
-		trace_binder_transaction_received(t);
-		binder_stat_br(proc, thread, cmd);
-		binder_debug(BINDER_DEBUG_TRANSACTION,
-			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n",
-			     proc->pid, thread->pid,
-			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
-			     "BR_REPLY",
-			     t->debug_id, t->from ? t->from->proc->pid : 0,
-			     t->from ? t->from->pid : 0, cmd,
-			     t->buffer->data_size, t->buffer->offsets_size,
-			     tr.data.ptr.buffer, tr.data.ptr.offsets);
-
-		list_del(&t->work.entry);
-		t->buffer->allow_user_free = 1;
-		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
-			t->to_parent = thread->transaction_stack;
-			t->to_thread = thread;
-			thread->transaction_stack = t;
-		} else {
-			t->buffer->transaction = NULL;
-			kfree(t);
-			binder_stats_deleted(BINDER_STAT_TRANSACTION);
-		}
-		break;
-	}
-
-done:
-
-	*consumed = ptr - buffer;
-	if (proc->requested_threads + proc->ready_threads == 0 &&
-	    proc->requested_threads_started < proc->max_threads &&
-	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
-	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
-	     /*spawn a new thread if we leave this out */) {
-		proc->requested_threads++;
-		binder_debug(BINDER_DEBUG_THREADS,
-			     "%d:%d BR_SPAWN_LOOPER\n",
-			     proc->pid, thread->pid);
-		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
-			return -EFAULT;
-		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
-	}
-	return 0;
-}
-
-static void binder_release_work(struct list_head *list)
-{
-	struct binder_work *w;
-	while (!list_empty(list)) {
-		w = list_first_entry(list, struct binder_work, entry);
-		list_del_init(&w->entry);
-		switch (w->type) {
-		case BINDER_WORK_TRANSACTION: {
-			struct binder_transaction *t;
-
-			t = container_of(w, struct binder_transaction, work);
-			if (t->buffer->target_node &&
-			    !(t->flags & TF_ONE_WAY)) {
-				binder_send_failed_reply(t, BR_DEAD_REPLY);
-			} else {
-				binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-					"undelivered transaction %d\n",
-					t->debug_id);
-				t->buffer->transaction = NULL;
-				kfree(t);
-				binder_stats_deleted(BINDER_STAT_TRANSACTION);
-			}
-		} break;
-		case BINDER_WORK_TRANSACTION_COMPLETE: {
-			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-				"undelivered TRANSACTION_COMPLETE\n");
-			kfree(w);
-			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
-		} break;
-		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
-		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
-			struct binder_ref_death *death;
-
-			death = container_of(w, struct binder_ref_death, work);
-			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-				"undelivered death notification, %p\n",
-				death->cookie);
-			kfree(death);
-			binder_stats_deleted(BINDER_STAT_DEATH);
-		} break;
-		default:
-			pr_err("unexpected work type, %d, not freed\n",
-			       w->type);
-			break;
-		}
-	}
-
-}
-
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
-{
-	struct binder_thread *thread = NULL;
-	struct rb_node *parent = NULL;
-	struct rb_node **p = &proc->threads.rb_node;
-
-	while (*p) {
-		parent = *p;
-		thread = rb_entry(parent, struct binder_thread, rb_node);
-
-		if (current->pid < thread->pid)
-			p = &(*p)->rb_left;
-		else if (current->pid > thread->pid)
-			p = &(*p)->rb_right;
-		else
-			break;
-	}
-	if (*p == NULL) {
-		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
-		if (thread == NULL)
-			return NULL;
-		binder_stats_created(BINDER_STAT_THREAD);
-		thread->proc = proc;
-		thread->pid = current->pid;
-		init_waitqueue_head(&thread->wait);
-		INIT_LIST_HEAD(&thread->todo);
-		rb_link_node(&thread->rb_node, parent, p);
-		rb_insert_color(&thread->rb_node, &proc->threads);
-		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
-		thread->return_error = BR_OK;
-		thread->return_error2 = BR_OK;
-	}
-	return thread;
-}
-
-static int binder_free_thread(struct binder_proc *proc,
-			      struct binder_thread *thread)
-{
-	struct binder_transaction *t;
-	struct binder_transaction *send_reply = NULL;
-	int active_transactions = 0;
-
-	rb_erase(&thread->rb_node, &proc->threads);
-	t = thread->transaction_stack;
-	if (t && t->to_thread == thread)
-		send_reply = t;
-	while (t) {
-		active_transactions++;
-		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-			     "release %d:%d transaction %d %s, still active\n",
-			      proc->pid, thread->pid,
-			     t->debug_id,
-			     (t->to_thread == thread) ? "in" : "out");
-
-		if (t->to_thread == thread) {
-			t->to_proc = NULL;
-			t->to_thread = NULL;
-			if (t->buffer) {
-				t->buffer->transaction = NULL;
-				t->buffer = NULL;
-			}
-			t = t->to_parent;
-		} else if (t->from == thread) {
-			t->from = NULL;
-			t = t->from_parent;
-		} else
-			BUG();
-	}
-	if (send_reply)
-		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
-	binder_release_work(&thread->todo);
-	kfree(thread);
-	binder_stats_deleted(BINDER_STAT_THREAD);
-	return active_transactions;
-}
-
-static unsigned int binder_poll(struct file *filp,
-				struct poll_table_struct *wait)
-{
-	struct binder_proc *proc = filp->private_data;
-	struct binder_thread *thread = NULL;
-	int wait_for_proc_work;
-
-	binder_lock(__func__);
-
-	thread = binder_get_thread(proc);
-
-	wait_for_proc_work = thread->transaction_stack == NULL &&
-		list_empty(&thread->todo) && thread->return_error == BR_OK;
-
-	binder_unlock(__func__);
-
-	if (wait_for_proc_work) {
-		if (binder_has_proc_work(proc, thread))
-			return POLLIN;
-		poll_wait(filp, &proc->wait, wait);
-		if (binder_has_proc_work(proc, thread))
-			return POLLIN;
-	} else {
-		if (binder_has_thread_work(thread))
-			return POLLIN;
-		poll_wait(filp, &thread->wait, wait);
-		if (binder_has_thread_work(thread))
-			return POLLIN;
-	}
-	return 0;
-}
-
-static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-	int ret;
-	struct binder_proc *proc = filp->private_data;
-	struct binder_thread *thread;
-	unsigned int size = _IOC_SIZE(cmd);
-	void __user *ubuf = (void __user *)arg;
-
-	/*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
-
-	trace_binder_ioctl(cmd, arg);
-
-	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
-	if (ret)
-		goto err_unlocked;
-
-	binder_lock(__func__);
-	thread = binder_get_thread(proc);
-	if (thread == NULL) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	switch (cmd) {
-	case BINDER_WRITE_READ: {
-		struct binder_write_read bwr;
-		if (size != sizeof(struct binder_write_read)) {
-			ret = -EINVAL;
-			goto err;
-		}
-		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
-			ret = -EFAULT;
-			goto err;
-		}
-		binder_debug(BINDER_DEBUG_READ_WRITE,
-			     "%d:%d write %ld at %08lx, read %ld at %08lx\n",
-			     proc->pid, thread->pid, bwr.write_size,
-			     bwr.write_buffer, bwr.read_size, bwr.read_buffer);
-
-		if (bwr.write_size > 0) {
-			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
-			trace_binder_write_done(ret);
-			if (ret < 0) {
-				bwr.read_consumed = 0;
-				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
-					ret = -EFAULT;
-				goto err;
-			}
-		}
-		if (bwr.read_size > 0) {
-			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
-			trace_binder_read_done(ret);
-			if (!list_empty(&proc->todo))
-				wake_up_interruptible(&proc->wait);
-			if (ret < 0) {
-				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
-					ret = -EFAULT;
-				goto err;
-			}
-		}
-		binder_debug(BINDER_DEBUG_READ_WRITE,
-			     "%d:%d wrote %ld of %ld, read return %ld of %ld\n",
-			     proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
-			     bwr.read_consumed, bwr.read_size);
-		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
-			ret = -EFAULT;
-			goto err;
-		}
-		break;
-	}
-	case BINDER_SET_MAX_THREADS:
-		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
-			ret = -EINVAL;
-			goto err;
-		}
-		break;
-	case BINDER_SET_CONTEXT_MGR:
-		if (binder_context_mgr_node != NULL) {
-			pr_err("BINDER_SET_CONTEXT_MGR already set\n");
-			ret = -EBUSY;
-			goto err;
-		}
-		if (uid_valid(binder_context_mgr_uid)) {
-			if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
-				pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
-				       from_kuid(&init_user_ns, current->cred->euid),
-				       from_kuid(&init_user_ns, binder_context_mgr_uid));
-				ret = -EPERM;
-				goto err;
-			}
-		} else
-			binder_context_mgr_uid = current->cred->euid;
-		binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
-		if (binder_context_mgr_node == NULL) {
-			ret = -ENOMEM;
-			goto err;
-		}
-		binder_context_mgr_node->local_weak_refs++;
-		binder_context_mgr_node->local_strong_refs++;
-		binder_context_mgr_node->has_strong_ref = 1;
-		binder_context_mgr_node->has_weak_ref = 1;
-		break;
-	case BINDER_THREAD_EXIT:
-		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
-			     proc->pid, thread->pid);
-		binder_free_thread(proc, thread);
-		thread = NULL;
-		break;
-	case BINDER_VERSION:
-		if (size != sizeof(struct binder_version)) {
-			ret = -EINVAL;
-			goto err;
-		}
-		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
-			ret = -EINVAL;
-			goto err;
-		}
-		break;
-	default:
-		ret = -EINVAL;
-		goto err;
-	}
-	ret = 0;
-err:
-	if (thread)
-		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
-	binder_unlock(__func__);
-	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
-	if (ret && ret != -ERESTARTSYS)
-		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
-err_unlocked:
-	trace_binder_ioctl_done(ret);
-	return ret;
-}
-
-static void binder_vma_open(struct vm_area_struct *vma)
-{
-	struct binder_proc *proc = vma->vm_private_data;
-	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
-		     proc->pid, vma->vm_start, vma->vm_end,
-		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
-		     (unsigned long)pgprot_val(vma->vm_page_prot));
-}
-
-static void binder_vma_close(struct vm_area_struct *vma)
-{
-	struct binder_proc *proc = vma->vm_private_data;
-	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
-		     proc->pid, vma->vm_start, vma->vm_end,
-		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
-		     (unsigned long)pgprot_val(vma->vm_page_prot));
-	proc->vma = NULL;
-	proc->vma_vm_mm = NULL;
-	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
-}
-
-static struct vm_operations_struct binder_vm_ops = {
-	.open = binder_vma_open,
-	.close = binder_vma_close,
-};
-
-static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-	int ret;
-	struct vm_struct *area;
-	struct binder_proc *proc = filp->private_data;
-	const char *failure_string;
-	struct binder_buffer *buffer;
-
-	if (proc->tsk != current)
-		return -EINVAL;
-
-	if ((vma->vm_end - vma->vm_start) > SZ_4M)
-		vma->vm_end = vma->vm_start + SZ_4M;
-
-	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-		     "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
-		     proc->pid, vma->vm_start, vma->vm_end,
-		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
-		     (unsigned long)pgprot_val(vma->vm_page_prot));
-
-	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
-		ret = -EPERM;
-		failure_string = "bad vm_flags";
-		goto err_bad_arg;
-	}
-	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
-	mutex_lock(&binder_mmap_lock);
-	if (proc->buffer) {
-		ret = -EBUSY;
-		failure_string = "already mapped";
-		goto err_already_mapped;
-	}
-
-	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
-	if (area == NULL) {
-		ret = -ENOMEM;
-		failure_string = "get_vm_area";
-		goto err_get_vm_area_failed;
-	}
-	proc->buffer = area->addr;
-	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
-	mutex_unlock(&binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
-	if (cache_is_vipt_aliasing()) {
-		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
-			pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
-			vma->vm_start += PAGE_SIZE;
-		}
-	}
-#endif
-	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
-	if (proc->pages == NULL) {
-		ret = -ENOMEM;
-		failure_string = "alloc page array";
-		goto err_alloc_pages_failed;
-	}
-	proc->buffer_size = vma->vm_end - vma->vm_start;
-
-	vma->vm_ops = &binder_vm_ops;
-	vma->vm_private_data = proc;
-
-	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
-		ret = -ENOMEM;
-		failure_string = "alloc small buf";
-		goto err_alloc_small_buf_failed;
-	}
-	buffer = proc->buffer;
-	INIT_LIST_HEAD(&proc->buffers);
-	list_add(&buffer->entry, &proc->buffers);
-	buffer->free = 1;
-	binder_insert_free_buffer(proc, buffer);
-	proc->free_async_space = proc->buffer_size / 2;
-	barrier();
-	proc->files = get_files_struct(current);
-	proc->vma = vma;
-	proc->vma_vm_mm = vma->vm_mm;
-
-	/*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
-		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
-	return 0;
-
-err_alloc_small_buf_failed:
-	kfree(proc->pages);
-	proc->pages = NULL;
-err_alloc_pages_failed:
-	mutex_lock(&binder_mmap_lock);
-	vfree(proc->buffer);
-	proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
-	mutex_unlock(&binder_mmap_lock);
-err_bad_arg:
-	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
-	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
-	return ret;
-}
-
-static int binder_open(struct inode *nodp, struct file *filp)
-{
-	struct binder_proc *proc;
-
-	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
-		     current->group_leader->pid, current->pid);
-
-	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
-	if (proc == NULL)
-		return -ENOMEM;
-	get_task_struct(current);
-	proc->tsk = current;
-	INIT_LIST_HEAD(&proc->todo);
-	init_waitqueue_head(&proc->wait);
-	proc->default_priority = task_nice(current);
-
-	binder_lock(__func__);
-
-	binder_stats_created(BINDER_STAT_PROC);
-	hlist_add_head(&proc->proc_node, &binder_procs);
-	proc->pid = current->group_leader->pid;
-	INIT_LIST_HEAD(&proc->delivered_death);
-	filp->private_data = proc;
-
-	binder_unlock(__func__);
-
-	if (binder_debugfs_dir_entry_proc) {
-		char strbuf[11];
-		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
-		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
-			binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
-	}
-
-	return 0;
-}
-
-static int binder_flush(struct file *filp, fl_owner_t id)
-{
-	struct binder_proc *proc = filp->private_data;
-
-	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
-
-	return 0;
-}
-
-static void binder_deferred_flush(struct binder_proc *proc)
-{
-	struct rb_node *n;
-	int wake_count = 0;
-	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
-		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
-		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
-		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
-			wake_up_interruptible(&thread->wait);
-			wake_count++;
-		}
-	}
-	wake_up_interruptible_all(&proc->wait);
-
-	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-		     "binder_flush: %d woke %d threads\n", proc->pid,
-		     wake_count);
-}
-
-static int binder_release(struct inode *nodp, struct file *filp)
-{
-	struct binder_proc *proc = filp->private_data;
-	debugfs_remove(proc->debugfs_entry);
-	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
-
-	return 0;
-}
-
-static int binder_node_release(struct binder_node *node, int refs)
-{
-	struct binder_ref *ref;
-	int death = 0;
-
-	list_del_init(&node->work.entry);
-	binder_release_work(&node->async_todo);
-
-	if (hlist_empty(&node->refs)) {
-		kfree(node);
-		binder_stats_deleted(BINDER_STAT_NODE);
-
-		return refs;
-	}
-
-	node->proc = NULL;
-	node->local_strong_refs = 0;
-	node->local_weak_refs = 0;
-	hlist_add_head(&node->dead_node, &binder_dead_nodes);
-
-	hlist_for_each_entry(ref, &node->refs, node_entry) {
-		refs++;
-
-		if (!ref->death)
-			continue;
-
-		death++;
-
-		if (list_empty(&ref->death->work.entry)) {
-			ref->death->work.type = BINDER_WORK_DEAD_BINDER;
-			list_add_tail(&ref->death->work.entry,
-				      &ref->proc->todo);
-			wake_up_interruptible(&ref->proc->wait);
-		} else
-			BUG();
-	}
-
-	binder_debug(BINDER_DEBUG_DEAD_BINDER,
-		     "node %d now dead, refs %d, death %d\n",
-		     node->debug_id, refs, death);
-
-	return refs;
-}
-
-static void binder_deferred_release(struct binder_proc *proc)
-{
-	struct binder_transaction *t;
-	struct rb_node *n;
-	int threads, nodes, incoming_refs, outgoing_refs, buffers,
-		active_transactions, page_count;
-
-	BUG_ON(proc->vma);
-	BUG_ON(proc->files);
-
-	hlist_del(&proc->proc_node);
-
-	if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
-		binder_debug(BINDER_DEBUG_DEAD_BINDER,
-			     "%s: %d context_mgr_node gone\n",
-			     __func__, proc->pid);
-		binder_context_mgr_node = NULL;
-	}
-
-	threads = 0;
-	active_transactions = 0;
-	while ((n = rb_first(&proc->threads))) {
-		struct binder_thread *thread;
-
-		thread = rb_entry(n, struct binder_thread, rb_node);
-		threads++;
-		active_transactions += binder_free_thread(proc, thread);
-	}
-
-	nodes = 0;
-	incoming_refs = 0;
-	while ((n = rb_first(&proc->nodes))) {
-		struct binder_node *node;
-
-		node = rb_entry(n, struct binder_node, rb_node);
-		nodes++;
-		rb_erase(&node->rb_node, &proc->nodes);
-		incoming_refs = binder_node_release(node, incoming_refs);
-	}
-
-	outgoing_refs = 0;
-	while ((n = rb_first(&proc->refs_by_desc))) {
-		struct binder_ref *ref;
-
-		ref = rb_entry(n, struct binder_ref, rb_node_desc);
-		outgoing_refs++;
-		binder_delete_ref(ref);
-	}
-
-	binder_release_work(&proc->todo);
-	binder_release_work(&proc->delivered_death);
-
-	buffers = 0;
-	while ((n = rb_first(&proc->allocated_buffers))) {
-		struct binder_buffer *buffer;
-
-		buffer = rb_entry(n, struct binder_buffer, rb_node);
-
-		t = buffer->transaction;
-		if (t) {
-			t->buffer = NULL;
-			buffer->transaction = NULL;
-			pr_err("release proc %d, transaction %d, not freed\n",
-			       proc->pid, t->debug_id);
-			/*BUG();*/
-		}
-
-		binder_free_buf(proc, buffer);
-		buffers++;
-	}
-
-	binder_stats_deleted(BINDER_STAT_PROC);
-
-	page_count = 0;
-	if (proc->pages) {
-		int i;
-
-		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
-			void *page_addr;
-
-			if (!proc->pages[i])
-				continue;
-
-			page_addr = proc->buffer + i * PAGE_SIZE;
-			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-				     "%s: %d: page %d at %p not freed\n",
-				     __func__, proc->pid, i, page_addr);
-			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-			__free_page(proc->pages[i]);
-			page_count++;
-		}
-		kfree(proc->pages);
-		vfree(proc->buffer);
-	}
-
-	put_task_struct(proc->tsk);
-
-	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
-		     __func__, proc->pid, threads, nodes, incoming_refs,
-		     outgoing_refs, active_transactions, buffers, page_count);
-
-	kfree(proc);
-}
-
-static void binder_deferred_func(struct work_struct *work)
-{
-	struct binder_proc *proc;
-	struct files_struct *files;
-
-	int defer;
-	do {
-		binder_lock(__func__);
-		mutex_lock(&binder_deferred_lock);
-		if (!hlist_empty(&binder_deferred_list)) {
-			proc = hlist_entry(binder_deferred_list.first,
-					struct binder_proc, deferred_work_node);
-			hlist_del_init(&proc->deferred_work_node);
-			defer = proc->deferred_work;
-			proc->deferred_work = 0;
-		} else {
-			proc = NULL;
-			defer = 0;
-		}
-		mutex_unlock(&binder_deferred_lock);
-
-		files = NULL;
-		if (defer & BINDER_DEFERRED_PUT_FILES) {
-			files = proc->files;
-			if (files)
-				proc->files = NULL;
-		}
-
-		if (defer & BINDER_DEFERRED_FLUSH)
-			binder_deferred_flush(proc);
-
-		if (defer & BINDER_DEFERRED_RELEASE)
-			binder_deferred_release(proc); /* frees proc */
-
-		binder_unlock(__func__);
-		if (files)
-			put_files_struct(files);
-	} while (proc);
-}
-static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
-
-static void
-binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
-{
-	mutex_lock(&binder_deferred_lock);
-	proc->deferred_work |= defer;
-	if (hlist_unhashed(&proc->deferred_work_node)) {
-		hlist_add_head(&proc->deferred_work_node,
-				&binder_deferred_list);
-		queue_work(binder_deferred_workqueue, &binder_deferred_work);
-	}
-	mutex_unlock(&binder_deferred_lock);
-}
-
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
-				     struct binder_transaction *t)
-{
-	seq_printf(m,
-		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
-		   prefix, t->debug_id, t,
-		   t->from ? t->from->proc->pid : 0,
-		   t->from ? t->from->pid : 0,
-		   t->to_proc ? t->to_proc->pid : 0,
-		   t->to_thread ? t->to_thread->pid : 0,
-		   t->code, t->flags, t->priority, t->need_reply);
-	if (t->buffer == NULL) {
-		seq_puts(m, " buffer free\n");
-		return;
-	}
-	if (t->buffer->target_node)
-		seq_printf(m, " node %d",
-			   t->buffer->target_node->debug_id);
-	seq_printf(m, " size %zd:%zd data %p\n",
-		   t->buffer->data_size, t->buffer->offsets_size,
-		   t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
-				struct binder_buffer *buffer)
-{
-	seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
-		   prefix, buffer->debug_id, buffer->data,
-		   buffer->data_size, buffer->offsets_size,
-		   buffer->transaction ? "active" : "delivered");
-}
-
-static void print_binder_work(struct seq_file *m, const char *prefix,
-			      const char *transaction_prefix,
-			      struct binder_work *w)
-{
-	struct binder_node *node;
-	struct binder_transaction *t;
-
-	switch (w->type) {
-	case BINDER_WORK_TRANSACTION:
-		t = container_of(w, struct binder_transaction, work);
-		print_binder_transaction(m, transaction_prefix, t);
-		break;
-	case BINDER_WORK_TRANSACTION_COMPLETE:
-		seq_printf(m, "%stransaction complete\n", prefix);
-		break;
-	case BINDER_WORK_NODE:
-		node = container_of(w, struct binder_node, work);
-		seq_printf(m, "%snode work %d: u%p c%p\n",
-			   prefix, node->debug_id, node->ptr, node->cookie);
-		break;
-	case BINDER_WORK_DEAD_BINDER:
-		seq_printf(m, "%shas dead binder\n", prefix);
-		break;
-	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
-		seq_printf(m, "%shas cleared dead binder\n", prefix);
-		break;
-	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
-		seq_printf(m, "%shas cleared death notification\n", prefix);
-		break;
-	default:
-		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
-		break;
-	}
-}
-
-static void print_binder_thread(struct seq_file *m,
-				struct binder_thread *thread,
-				int print_always)
-{
-	struct binder_transaction *t;
-	struct binder_work *w;
-	size_t start_pos = m->count;
-	size_t header_pos;
-
-	seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
-	header_pos = m->count;
-	t = thread->transaction_stack;
-	while (t) {
-		if (t->from == thread) {
-			print_binder_transaction(m,
-						 "    outgoing transaction", t);
-			t = t->from_parent;
-		} else if (t->to_thread == thread) {
-			print_binder_transaction(m,
-						 "    incoming transaction", t);
-			t = t->to_parent;
-		} else {
-			print_binder_transaction(m, "    bad transaction", t);
-			t = NULL;
-		}
-	}
-	list_for_each_entry(w, &thread->todo, entry) {
-		print_binder_work(m, "    ", "    pending transaction", w);
-	}
-	if (!print_always && m->count == header_pos)
-		m->count = start_pos;
-}
-
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
-{
-	struct binder_ref *ref;
-	struct binder_work *w;
-	int count;
-
-	count = 0;
-	hlist_for_each_entry(ref, &node->refs, node_entry)
-		count++;
-
-	seq_printf(m, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
-		   node->debug_id, node->ptr, node->cookie,
-		   node->has_strong_ref, node->has_weak_ref,
-		   node->local_strong_refs, node->local_weak_refs,
-		   node->internal_strong_refs, count);
-	if (count) {
-		seq_puts(m, " proc");
-		hlist_for_each_entry(ref, &node->refs, node_entry)
-			seq_printf(m, " %d", ref->proc->pid);
-	}
-	seq_puts(m, "\n");
-	list_for_each_entry(w, &node->async_todo, entry)
-		print_binder_work(m, "    ",
-				  "    pending async transaction", w);
-}
-
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
-{
-	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
-		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
-		   ref->node->debug_id, ref->strong, ref->weak, ref->death);
-}
-
-static void print_binder_proc(struct seq_file *m,
-			      struct binder_proc *proc, int print_all)
-{
-	struct binder_work *w;
-	struct rb_node *n;
-	size_t start_pos = m->count;
-	size_t header_pos;
-
-	seq_printf(m, "proc %d\n", proc->pid);
-	header_pos = m->count;
-
-	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
-		print_binder_thread(m, rb_entry(n, struct binder_thread,
-						rb_node), print_all);
-	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
-		struct binder_node *node = rb_entry(n, struct binder_node,
-						    rb_node);
-		if (print_all || node->has_async_transaction)
-			print_binder_node(m, node);
-	}
-	if (print_all) {
-		for (n = rb_first(&proc->refs_by_desc);
-		     n != NULL;
-		     n = rb_next(n))
-			print_binder_ref(m, rb_entry(n, struct binder_ref,
-						     rb_node_desc));
-	}
-	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
-		print_binder_buffer(m, "  buffer",
-				    rb_entry(n, struct binder_buffer, rb_node));
-	list_for_each_entry(w, &proc->todo, entry)
-		print_binder_work(m, "  ", "  pending transaction", w);
-	list_for_each_entry(w, &proc->delivered_death, entry) {
-		seq_puts(m, "  has delivered dead binder\n");
-		break;
-	}
-	if (!print_all && m->count == header_pos)
-		m->count = start_pos;
-}
-
-static const char * const binder_return_strings[] = {
-	"BR_ERROR",
-	"BR_OK",
-	"BR_TRANSACTION",
-	"BR_REPLY",
-	"BR_ACQUIRE_RESULT",
-	"BR_DEAD_REPLY",
-	"BR_TRANSACTION_COMPLETE",
-	"BR_INCREFS",
-	"BR_ACQUIRE",
-	"BR_RELEASE",
-	"BR_DECREFS",
-	"BR_ATTEMPT_ACQUIRE",
-	"BR_NOOP",
-	"BR_SPAWN_LOOPER",
-	"BR_FINISHED",
-	"BR_DEAD_BINDER",
-	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
-	"BR_FAILED_REPLY"
-};
-
-static const char * const binder_command_strings[] = {
-	"BC_TRANSACTION",
-	"BC_REPLY",
-	"BC_ACQUIRE_RESULT",
-	"BC_FREE_BUFFER",
-	"BC_INCREFS",
-	"BC_ACQUIRE",
-	"BC_RELEASE",
-	"BC_DECREFS",
-	"BC_INCREFS_DONE",
-	"BC_ACQUIRE_DONE",
-	"BC_ATTEMPT_ACQUIRE",
-	"BC_REGISTER_LOOPER",
-	"BC_ENTER_LOOPER",
-	"BC_EXIT_LOOPER",
-	"BC_REQUEST_DEATH_NOTIFICATION",
-	"BC_CLEAR_DEATH_NOTIFICATION",
-	"BC_DEAD_BINDER_DONE"
-};
-
-static const char * const binder_objstat_strings[] = {
-	"proc",
-	"thread",
-	"node",
-	"ref",
-	"death",
-	"transaction",
-	"transaction_complete"
-};
-
-static void print_binder_stats(struct seq_file *m, const char *prefix,
-			       struct binder_stats *stats)
-{
-	int i;
-
-	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
-		     ARRAY_SIZE(binder_command_strings));
-	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
-		if (stats->bc[i])
-			seq_printf(m, "%s%s: %d\n", prefix,
-				   binder_command_strings[i], stats->bc[i]);
-	}
-
-	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
-		     ARRAY_SIZE(binder_return_strings));
-	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
-		if (stats->br[i])
-			seq_printf(m, "%s%s: %d\n", prefix,
-				   binder_return_strings[i], stats->br[i]);
-	}
-
-	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
-		     ARRAY_SIZE(binder_objstat_strings));
-	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
-		     ARRAY_SIZE(stats->obj_deleted));
-	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
-		if (stats->obj_created[i] || stats->obj_deleted[i])
-			seq_printf(m, "%s%s: active %d total %d\n", prefix,
-				binder_objstat_strings[i],
-				stats->obj_created[i] - stats->obj_deleted[i],
-				stats->obj_created[i]);
-	}
-}
-
-static void print_binder_proc_stats(struct seq_file *m,
-				    struct binder_proc *proc)
-{
-	struct binder_work *w;
-	struct rb_node *n;
-	int count, strong, weak;
-
-	seq_printf(m, "proc %d\n", proc->pid);
-	count = 0;
-	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
-		count++;
-	seq_printf(m, "  threads: %d\n", count);
-	seq_printf(m, "  requested threads: %d+%d/%d\n"
-			"  ready threads %d\n"
-			"  free async space %zd\n", proc->requested_threads,
-			proc->requested_threads_started, proc->max_threads,
-			proc->ready_threads, proc->free_async_space);
-	count = 0;
-	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
-		count++;
-	seq_printf(m, "  nodes: %d\n", count);
-	count = 0;
-	strong = 0;
-	weak = 0;
-	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
-		struct binder_ref *ref = rb_entry(n, struct binder_ref,
-						  rb_node_desc);
-		count++;
-		strong += ref->strong;
-		weak += ref->weak;
-	}
-	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
-
-	count = 0;
-	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
-		count++;
-	seq_printf(m, "  buffers: %d\n", count);
-
-	count = 0;
-	list_for_each_entry(w, &proc->todo, entry) {
-		switch (w->type) {
-		case BINDER_WORK_TRANSACTION:
-			count++;
-			break;
-		default:
-			break;
-		}
-	}
-	seq_printf(m, "  pending transactions: %d\n", count);
-
-	print_binder_stats(m, "  ", &proc->stats);
-}
-
-
-static int binder_state_show(struct seq_file *m, void *unused)
-{
-	struct binder_proc *proc;
-	struct binder_node *node;
-	int do_lock = !binder_debug_no_lock;
-
-	if (do_lock)
-		binder_lock(__func__);
-
-	seq_puts(m, "binder state:\n");
-
-	if (!hlist_empty(&binder_dead_nodes))
-		seq_puts(m, "dead nodes:\n");
-	hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
-		print_binder_node(m, node);
-
-	hlist_for_each_entry(proc, &binder_procs, proc_node)
-		print_binder_proc(m, proc, 1);
-	if (do_lock)
-		binder_unlock(__func__);
-	return 0;
-}
-
-static int binder_stats_show(struct seq_file *m, void *unused)
-{
-	struct binder_proc *proc;
-	int do_lock = !binder_debug_no_lock;
-
-	if (do_lock)
-		binder_lock(__func__);
-
-	seq_puts(m, "binder stats:\n");
-
-	print_binder_stats(m, "", &binder_stats);
-
-	hlist_for_each_entry(proc, &binder_procs, proc_node)
-		print_binder_proc_stats(m, proc);
-	if (do_lock)
-		binder_unlock(__func__);
-	return 0;
-}
-
-static int binder_transactions_show(struct seq_file *m, void *unused)
-{
-	struct binder_proc *proc;
-	int do_lock = !binder_debug_no_lock;
-
-	if (do_lock)
-		binder_lock(__func__);
-
-	seq_puts(m, "binder transactions:\n");
-	hlist_for_each_entry(proc, &binder_procs, proc_node)
-		print_binder_proc(m, proc, 0);
-	if (do_lock)
-		binder_unlock(__func__);
-	return 0;
-}
-
-static int binder_proc_show(struct seq_file *m, void *unused)
-{
-	struct binder_proc *proc = m->private;
-	int do_lock = !binder_debug_no_lock;
-
-	if (do_lock)
-		binder_lock(__func__);
-	seq_puts(m, "binder proc state:\n");
-	print_binder_proc(m, proc, 1);
-	if (do_lock)
-		binder_unlock(__func__);
-	return 0;
-}
-
-static void print_binder_transaction_log_entry(struct seq_file *m,
-					struct binder_transaction_log_entry *e)
-{
-	seq_printf(m,
-		   "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
-		   e->debug_id, (e->call_type == 2) ? "reply" :
-		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
-		   e->from_thread, e->to_proc, e->to_thread, e->to_node,
-		   e->target_handle, e->data_size, e->offsets_size);
-}
-
-static int binder_transaction_log_show(struct seq_file *m, void *unused)
-{
-	struct binder_transaction_log *log = m->private;
-	int i;
-
-	if (log->full) {
-		for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
-			print_binder_transaction_log_entry(m, &log->entry[i]);
-	}
-	for (i = 0; i < log->next; i++)
-		print_binder_transaction_log_entry(m, &log->entry[i]);
-	return 0;
-}
-
-static const struct file_operations binder_fops = {
-	.owner = THIS_MODULE,
-	.poll = binder_poll,
-	.unlocked_ioctl = binder_ioctl,
-	.mmap = binder_mmap,
-	.open = binder_open,
-	.flush = binder_flush,
-	.release = binder_release,
-};
-
-static struct miscdevice binder_miscdev = {
-	.minor = MISC_DYNAMIC_MINOR,
-	.name = "binder",
-	.fops = &binder_fops
-};
-
-BINDER_DEBUG_ENTRY(state);
-BINDER_DEBUG_ENTRY(stats);
-BINDER_DEBUG_ENTRY(transactions);
-BINDER_DEBUG_ENTRY(transaction_log);
-
-static int __init binder_init(void)
-{
-	int ret;
-
-	binder_deferred_workqueue = create_singlethread_workqueue("binder");
-	if (!binder_deferred_workqueue)
-		return -ENOMEM;
-
-	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
-	if (binder_debugfs_dir_entry_root)
-		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
-						 binder_debugfs_dir_entry_root);
-	ret = misc_register(&binder_miscdev);
-	if (binder_debugfs_dir_entry_root) {
-		debugfs_create_file("state",
-				    S_IRUGO,
-				    binder_debugfs_dir_entry_root,
-				    NULL,
-				    &binder_state_fops);
-		debugfs_create_file("stats",
-				    S_IRUGO,
-				    binder_debugfs_dir_entry_root,
-				    NULL,
-				    &binder_stats_fops);
-		debugfs_create_file("transactions",
-				    S_IRUGO,
-				    binder_debugfs_dir_entry_root,
-				    NULL,
-				    &binder_transactions_fops);
-		debugfs_create_file("transaction_log",
-				    S_IRUGO,
-				    binder_debugfs_dir_entry_root,
-				    &binder_transaction_log,
-				    &binder_transaction_log_fops);
-		debugfs_create_file("failed_transaction_log",
-				    S_IRUGO,
-				    binder_debugfs_dir_entry_root,
-				    &binder_transaction_log_failed,
-				    &binder_transaction_log_fops);
-	}
-	return ret;
-}
-
-device_initcall(binder_init);
-
-#define CREATE_TRACE_POINTS
-#include "binder_trace.h"
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
deleted file mode 100644
index dbe81ce..0000000
--- a/drivers/staging/android/binder.h
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Copyright (C) 2008 Google, Inc.
- *
- * Based on, but no longer compatible with, the original
- * OpenBinder.org binder driver interface, which is:
- *
- * Copyright (c) 2005 Palmsource, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_BINDER_H
-#define _LINUX_BINDER_H
-
-#include <linux/ioctl.h>
-
-#define B_PACK_CHARS(c1, c2, c3, c4) \
-	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
-#define B_TYPE_LARGE 0x85
-
-enum {
-	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
-	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
-	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
-	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
-	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
-};
-
-enum {
-	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
-	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
-};
-
-/*
- * This is the flattened representation of a Binder object for transfer
- * between processes.  The 'offsets' supplied as part of a binder transaction
- * contains offsets into the data where these structures occur.  The Binder
- * driver takes care of re-writing the structure type and data as it moves
- * between processes.
- */
-struct flat_binder_object {
-	/* 8 bytes for large_flat_header. */
-	unsigned long		type;
-	unsigned long		flags;
-
-	/* 8 bytes of data. */
-	union {
-		void __user	*binder;	/* local object */
-		signed long	handle;		/* remote object */
-	};
-
-	/* extra data associated with local object */
-	void __user		*cookie;
-};
-
-/*
- * On 64-bit platforms where user code may run in 32-bits the driver must
- * translate the buffer (and local binder) addresses appropriately.
- */
-
-struct binder_write_read {
-	signed long	write_size;	/* bytes to write */
-	signed long	write_consumed;	/* bytes consumed by driver */
-	unsigned long	write_buffer;
-	signed long	read_size;	/* bytes to read */
-	signed long	read_consumed;	/* bytes consumed by driver */
-	unsigned long	read_buffer;
-};
-
-/* Use with BINDER_VERSION, driver fills in fields. */
-struct binder_version {
-	/* driver protocol version -- increment with incompatible change */
-	signed long	protocol_version;
-};
-
-/* This is the current protocol version. */
-#define BINDER_CURRENT_PROTOCOL_VERSION 7
-
-#define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read)
-#define	BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64)
-#define	BINDER_SET_MAX_THREADS		_IOW('b', 5, size_t)
-#define	BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, __s32)
-#define	BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32)
-#define	BINDER_THREAD_EXIT		_IOW('b', 8, __s32)
-#define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
-
-/*
- * NOTE: Two special error codes you should check for when calling
- * in to the driver are:
- *
- * EINTR -- The operation has been interupted.  This should be
- * handled by retrying the ioctl() until a different error code
- * is returned.
- *
- * ECONNREFUSED -- The driver is no longer accepting operations
- * from your process.  That is, the process is being destroyed.
- * You should handle this by exiting from your process.  Note
- * that once this error code is returned, all further calls to
- * the driver from any thread will return this same code.
- */
-
-enum transaction_flags {
-	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */
-	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
-	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
-	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
-};
-
-struct binder_transaction_data {
-	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
-	 * identifying the target and contents of the transaction.
-	 */
-	union {
-		size_t	handle;	/* target descriptor of command transaction */
-		void	*ptr;	/* target descriptor of return transaction */
-	} target;
-	void		*cookie;	/* target object cookie */
-	unsigned int	code;		/* transaction command */
-
-	/* General information about the transaction. */
-	unsigned int	flags;
-	pid_t		sender_pid;
-	uid_t		sender_euid;
-	size_t		data_size;	/* number of bytes of data */
-	size_t		offsets_size;	/* number of bytes of offsets */
-
-	/* If this transaction is inline, the data immediately
-	 * follows here; otherwise, it ends with a pointer to
-	 * the data buffer.
-	 */
-	union {
-		struct {
-			/* transaction data */
-			const void __user	*buffer;
-			/* offsets from buffer to flat_binder_object structs */
-			const void __user	*offsets;
-		} ptr;
-		uint8_t	buf[8];
-	} data;
-};
-
-struct binder_ptr_cookie {
-	void *ptr;
-	void *cookie;
-};
-
-struct binder_pri_desc {
-	int priority;
-	int desc;
-};
-
-struct binder_pri_ptr_cookie {
-	int priority;
-	void *ptr;
-	void *cookie;
-};
-
-enum binder_driver_return_protocol {
-	BR_ERROR = _IOR('r', 0, int),
-	/*
-	 * int: error code
-	 */
-
-	BR_OK = _IO('r', 1),
-	/* No parameters! */
-
-	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
-	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
-	/*
-	 * binder_transaction_data: the received command.
-	 */
-
-	BR_ACQUIRE_RESULT = _IOR('r', 4, int),
-	/*
-	 * not currently supported
-	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
-	 * Else the remote object has acquired a primary reference.
-	 */
-
-	BR_DEAD_REPLY = _IO('r', 5),
-	/*
-	 * The target of the last transaction (either a bcTRANSACTION or
-	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
-	 */
-
-	BR_TRANSACTION_COMPLETE = _IO('r', 6),
-	/*
-	 * No parameters... always refers to the last transaction requested
-	 * (including replies).  Note that this will be sent even for
-	 * asynchronous transactions.
-	 */
-
-	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
-	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
-	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
-	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
-	/*
-	 * void *:	ptr to binder
-	 * void *: cookie for binder
-	 */
-
-	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
-	/*
-	 * not currently supported
-	 * int:	priority
-	 * void *: ptr to binder
-	 * void *: cookie for binder
-	 */
-
-	BR_NOOP = _IO('r', 12),
-	/*
-	 * No parameters.  Do nothing and examine the next command.  It exists
-	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
-	 */
-
-	BR_SPAWN_LOOPER = _IO('r', 13),
-	/*
-	 * No parameters.  The driver has determined that a process has no
-	 * threads waiting to service incoming transactions.  When a process
-	 * receives this command, it must spawn a new service thread and
-	 * register it via bcENTER_LOOPER.
-	 */
-
-	BR_FINISHED = _IO('r', 14),
-	/*
-	 * not currently supported
-	 * stop threadpool thread
-	 */
-
-	BR_DEAD_BINDER = _IOR('r', 15, void *),
-	/*
-	 * void *: cookie
-	 */
-	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
-	/*
-	 * void *: cookie
-	 */
-
-	BR_FAILED_REPLY = _IO('r', 17),
-	/*
-	 * The the last transaction (either a bcTRANSACTION or
-	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
-	 */
-};
-
-enum binder_driver_command_protocol {
-	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
-	BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
-	/*
-	 * binder_transaction_data: the sent command.
-	 */
-
-	BC_ACQUIRE_RESULT = _IOW('c', 2, int),
-	/*
-	 * not currently supported
-	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
-	 * Else you have acquired a primary reference on the object.
-	 */
-
-	BC_FREE_BUFFER = _IOW('c', 3, int),
-	/*
-	 * void *: ptr to transaction data received on a read
-	 */
-
-	BC_INCREFS = _IOW('c', 4, int),
-	BC_ACQUIRE = _IOW('c', 5, int),
-	BC_RELEASE = _IOW('c', 6, int),
-	BC_DECREFS = _IOW('c', 7, int),
-	/*
-	 * int:	descriptor
-	 */
-
-	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
-	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
-	/*
-	 * void *: ptr to binder
-	 * void *: cookie for binder
-	 */
-
-	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
-	/*
-	 * not currently supported
-	 * int: priority
-	 * int: descriptor
-	 */
-
-	BC_REGISTER_LOOPER = _IO('c', 11),
-	/*
-	 * No parameters.
-	 * Register a spawned looper thread with the device.
-	 */
-
-	BC_ENTER_LOOPER = _IO('c', 12),
-	BC_EXIT_LOOPER = _IO('c', 13),
-	/*
-	 * No parameters.
-	 * These two commands are sent as an application-level thread
-	 * enters and exits the binder loop, respectively.  They are
-	 * used so the binder can have an accurate count of the number
-	 * of looping threads it has available.
-	 */
-
-	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
-	/*
-	 * void *: ptr to binder
-	 * void *: cookie
-	 */
-
-	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
-	/*
-	 * void *: ptr to binder
-	 * void *: cookie
-	 */
-
-	BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
-	/*
-	 * void *: cookie
-	 */
-};
-
-#endif /* _LINUX_BINDER_H */
-
diff --git a/drivers/staging/android/binder_trace.h b/drivers/staging/android/binder_trace.h
deleted file mode 100644
index 82a567c..0000000
--- a/drivers/staging/android/binder_trace.h
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM binder
-
-#if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _BINDER_TRACE_H
-
-#include <linux/tracepoint.h>
-
-struct binder_buffer;
-struct binder_node;
-struct binder_proc;
-struct binder_ref;
-struct binder_thread;
-struct binder_transaction;
-
-TRACE_EVENT(binder_ioctl,
-	TP_PROTO(unsigned int cmd, unsigned long arg),
-	TP_ARGS(cmd, arg),
-
-	TP_STRUCT__entry(
-		__field(unsigned int, cmd)
-		__field(unsigned long, arg)
-	),
-	TP_fast_assign(
-		__entry->cmd = cmd;
-		__entry->arg = arg;
-	),
-	TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
-);
-
-DECLARE_EVENT_CLASS(binder_lock_class,
-	TP_PROTO(const char *tag),
-	TP_ARGS(tag),
-	TP_STRUCT__entry(
-		__field(const char *, tag)
-	),
-	TP_fast_assign(
-		__entry->tag = tag;
-	),
-	TP_printk("tag=%s", __entry->tag)
-);
-
-#define DEFINE_BINDER_LOCK_EVENT(name)	\
-DEFINE_EVENT(binder_lock_class, name,	\
-	TP_PROTO(const char *func), \
-	TP_ARGS(func))
-
-DEFINE_BINDER_LOCK_EVENT(binder_lock);
-DEFINE_BINDER_LOCK_EVENT(binder_locked);
-DEFINE_BINDER_LOCK_EVENT(binder_unlock);
-
-DECLARE_EVENT_CLASS(binder_function_return_class,
-	TP_PROTO(int ret),
-	TP_ARGS(ret),
-	TP_STRUCT__entry(
-		__field(int, ret)
-	),
-	TP_fast_assign(
-		__entry->ret = ret;
-	),
-	TP_printk("ret=%d", __entry->ret)
-);
-
-#define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name)	\
-DEFINE_EVENT(binder_function_return_class, name,	\
-	TP_PROTO(int ret), \
-	TP_ARGS(ret))
-
-DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
-DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
-DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
-
-TRACE_EVENT(binder_wait_for_work,
-	TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
-	TP_ARGS(proc_work, transaction_stack, thread_todo),
-
-	TP_STRUCT__entry(
-		__field(bool, proc_work)
-		__field(bool, transaction_stack)
-		__field(bool, thread_todo)
-	),
-	TP_fast_assign(
-		__entry->proc_work = proc_work;
-		__entry->transaction_stack = transaction_stack;
-		__entry->thread_todo = thread_todo;
-	),
-	TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d",
-		  __entry->proc_work, __entry->transaction_stack,
-		  __entry->thread_todo)
-);
-
-TRACE_EVENT(binder_transaction,
-	TP_PROTO(bool reply, struct binder_transaction *t,
-		 struct binder_node *target_node),
-	TP_ARGS(reply, t, target_node),
-	TP_STRUCT__entry(
-		__field(int, debug_id)
-		__field(int, target_node)
-		__field(int, to_proc)
-		__field(int, to_thread)
-		__field(int, reply)
-		__field(unsigned int, code)
-		__field(unsigned int, flags)
-	),
-	TP_fast_assign(
-		__entry->debug_id = t->debug_id;
-		__entry->target_node = target_node ? target_node->debug_id : 0;
-		__entry->to_proc = t->to_proc->pid;
-		__entry->to_thread = t->to_thread ? t->to_thread->pid : 0;
-		__entry->reply = reply;
-		__entry->code = t->code;
-		__entry->flags = t->flags;
-	),
-	TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x",
-		  __entry->debug_id, __entry->target_node,
-		  __entry->to_proc, __entry->to_thread,
-		  __entry->reply, __entry->flags, __entry->code)
-);
-
-TRACE_EVENT(binder_transaction_received,
-	TP_PROTO(struct binder_transaction *t),
-	TP_ARGS(t),
-
-	TP_STRUCT__entry(
-		__field(int, debug_id)
-	),
-	TP_fast_assign(
-		__entry->debug_id = t->debug_id;
-	),
-	TP_printk("transaction=%d", __entry->debug_id)
-);
-
-TRACE_EVENT(binder_transaction_node_to_ref,
-	TP_PROTO(struct binder_transaction *t, struct binder_node *node,
-		 struct binder_ref *ref),
-	TP_ARGS(t, node, ref),
-
-	TP_STRUCT__entry(
-		__field(int, debug_id)
-		__field(int, node_debug_id)
-		__field(void __user *, node_ptr)
-		__field(int, ref_debug_id)
-		__field(uint32_t, ref_desc)
-	),
-	TP_fast_assign(
-		__entry->debug_id = t->debug_id;
-		__entry->node_debug_id = node->debug_id;
-		__entry->node_ptr = node->ptr;
-		__entry->ref_debug_id = ref->debug_id;
-		__entry->ref_desc = ref->desc;
-	),
-	TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d",
-		  __entry->debug_id, __entry->node_debug_id, __entry->node_ptr,
-		  __entry->ref_debug_id, __entry->ref_desc)
-);
-
-TRACE_EVENT(binder_transaction_ref_to_node,
-	TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
-	TP_ARGS(t, ref),
-
-	TP_STRUCT__entry(
-		__field(int, debug_id)
-		__field(int, ref_debug_id)
-		__field(uint32_t, ref_desc)
-		__field(int, node_debug_id)
-		__field(void __user *, node_ptr)
-	),
-	TP_fast_assign(
-		__entry->debug_id = t->debug_id;
-		__entry->ref_debug_id = ref->debug_id;
-		__entry->ref_desc = ref->desc;
-		__entry->node_debug_id = ref->node->debug_id;
-		__entry->node_ptr = ref->node->ptr;
-	),
-	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p",
-		  __entry->debug_id, __entry->node_debug_id,
-		  __entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr)
-);
-
-TRACE_EVENT(binder_transaction_ref_to_ref,
-	TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
-		 struct binder_ref *dest_ref),
-	TP_ARGS(t, src_ref, dest_ref),
-
-	TP_STRUCT__entry(
-		__field(int, debug_id)
-		__field(int, node_debug_id)
-		__field(int, src_ref_debug_id)
-		__field(uint32_t, src_ref_desc)
-		__field(int, dest_ref_debug_id)
-		__field(uint32_t, dest_ref_desc)
-	),
-	TP_fast_assign(
-		__entry->debug_id = t->debug_id;
-		__entry->node_debug_id = src_ref->node->debug_id;
-		__entry->src_ref_debug_id = src_ref->debug_id;
-		__entry->src_ref_desc = src_ref->desc;
-		__entry->dest_ref_debug_id = dest_ref->debug_id;
-		__entry->dest_ref_desc = dest_ref->desc;
-	),
-	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d",
-		  __entry->debug_id, __entry->node_debug_id,
-		  __entry->src_ref_debug_id, __entry->src_ref_desc,
-		  __entry->dest_ref_debug_id, __entry->dest_ref_desc)
-);
-
-TRACE_EVENT(binder_transaction_fd,
-	TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd),
-	TP_ARGS(t, src_fd, dest_fd),
-
-	TP_STRUCT__entry(
-		__field(int, debug_id)
-		__field(int, src_fd)
-		__field(int, dest_fd)
-	),
-	TP_fast_assign(
-		__entry->debug_id = t->debug_id;
-		__entry->src_fd = src_fd;
-		__entry->dest_fd = dest_fd;
-	),
-	TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d",
-		  __entry->debug_id, __entry->src_fd, __entry->dest_fd)
-);
-
-DECLARE_EVENT_CLASS(binder_buffer_class,
-	TP_PROTO(struct binder_buffer *buf),
-	TP_ARGS(buf),
-	TP_STRUCT__entry(
-		__field(int, debug_id)
-		__field(size_t, data_size)
-		__field(size_t, offsets_size)
-	),
-	TP_fast_assign(
-		__entry->debug_id = buf->debug_id;
-		__entry->data_size = buf->data_size;
-		__entry->offsets_size = buf->offsets_size;
-	),
-	TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
-		  __entry->debug_id, __entry->data_size, __entry->offsets_size)
-);
-
-DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
-	TP_PROTO(struct binder_buffer *buffer),
-	TP_ARGS(buffer));
-
-DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release,
-	TP_PROTO(struct binder_buffer *buffer),
-	TP_ARGS(buffer));
-
-DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
-	TP_PROTO(struct binder_buffer *buffer),
-	TP_ARGS(buffer));
-
-TRACE_EVENT(binder_update_page_range,
-	TP_PROTO(struct binder_proc *proc, bool allocate,
-		 void *start, void *end),
-	TP_ARGS(proc, allocate, start, end),
-	TP_STRUCT__entry(
-		__field(int, proc)
-		__field(bool, allocate)
-		__field(size_t, offset)
-		__field(size_t, size)
-	),
-	TP_fast_assign(
-		__entry->proc = proc->pid;
-		__entry->allocate = allocate;
-		__entry->offset = start - proc->buffer;
-		__entry->size = end - start;
-	),
-	TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
-		  __entry->proc, __entry->allocate,
-		  __entry->offset, __entry->size)
-);
-
-TRACE_EVENT(binder_command,
-	TP_PROTO(uint32_t cmd),
-	TP_ARGS(cmd),
-	TP_STRUCT__entry(
-		__field(uint32_t, cmd)
-	),
-	TP_fast_assign(
-		__entry->cmd = cmd;
-	),
-	TP_printk("cmd=0x%x %s",
-		  __entry->cmd,
-		  _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ?
-			  binder_command_strings[_IOC_NR(__entry->cmd)] :
-			  "unknown")
-);
-
-TRACE_EVENT(binder_return,
-	TP_PROTO(uint32_t cmd),
-	TP_ARGS(cmd),
-	TP_STRUCT__entry(
-		__field(uint32_t, cmd)
-	),
-	TP_fast_assign(
-		__entry->cmd = cmd;
-	),
-	TP_printk("cmd=0x%x %s",
-		  __entry->cmd,
-		  _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ?
-			  binder_return_strings[_IOC_NR(__entry->cmd)] :
-			  "unknown")
-);
-
-#endif /* _BINDER_TRACE_H */
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE binder_trace
-#include <trace/define_trace.h>
diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig
new file mode 100644
index 0000000..56f7f99
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/Kconfig
@@ -0,0 +1,49 @@
+config FIQ_DEBUGGER
+	bool "FIQ Mode Serial Debugger"
+	default n
+	depends on ARM || ARM64
+	help
+	  The FIQ serial debugger can accept commands even when the
+	  kernel is unresponsive due to being stuck with interrupts
+	  disabled.
+
+config FIQ_DEBUGGER_NO_SLEEP
+	bool "Keep serial debugger active"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Enables the serial debugger at boot. Passing
+	  fiq_debugger.no_sleep on the kernel commandline will
+	  override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+	bool "Don't disable wakeup IRQ when debugger is active"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Don't disable the wakeup irq when enabling the uart clock.  This will
+	  cause extra interrupts, but it makes the serial debugger usable with
+	  on some MSM radio builds that ignore the uart clock request in power
+	  collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+	bool "Console on FIQ Serial Debugger port"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Enables a console so that printk messages are displayed on
+	  the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+	bool "Put the FIQ debugger into console mode by default"
+	depends on FIQ_DEBUGGER_CONSOLE
+	default n
+	help
+	  If enabled, this puts the fiq debugger into console mode by default.
+	  Otherwise, the fiq debugger will start out in debug mode.
+
+config FIQ_WATCHDOG
+	bool
+	select FIQ_DEBUGGER
+	select PSTORE_RAM
+	default n
diff --git a/drivers/staging/android/fiq_debugger/Makefile b/drivers/staging/android/fiq_debugger/Makefile
new file mode 100644
index 0000000..a7ca487
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/Makefile
@@ -0,0 +1,4 @@
+obj-y			+= fiq_debugger.o
+obj-$(CONFIG_ARM)	+= fiq_debugger_arm.o
+obj-$(CONFIG_ARM64)	+= fiq_debugger_arm64.o
+obj-$(CONFIG_FIQ_WATCHDOG)	+= fiq_watchdog.o
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
new file mode 100644
index 0000000..1d73362
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c
@@ -0,0 +1,1213 @@
+/*
+ * drivers/staging/android/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/kmsg_dump.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+
+#ifdef CONFIG_FIQ_GLUE
+#include <asm/fiq_glue.h>
+#endif
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger.h"
+#include "fiq_debugger_priv.h"
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define MAX_FIQ_DEBUGGER_PORTS 4
+
+struct fiq_debugger_state {
+#ifdef CONFIG_FIQ_GLUE
+	struct fiq_glue_handler handler;
+#endif
+	struct fiq_debugger_output output;
+
+	int fiq;
+	int uart_irq;
+	int signal_irq;
+	int wakeup_irq;
+	bool wakeup_irq_no_set_wake;
+	struct clk *clk;
+	struct fiq_debugger_pdata *pdata;
+	struct platform_device *pdev;
+
+	char debug_cmd[DEBUG_MAX];
+	int debug_busy;
+	int debug_abort;
+
+	char debug_buf[DEBUG_MAX];
+	int debug_count;
+
+	bool no_sleep;
+	bool debug_enable;
+	bool ignore_next_wakeup_irq;
+	struct timer_list sleep_timer;
+	spinlock_t sleep_timer_lock;
+	bool uart_enabled;
+	struct wake_lock debugger_wake_lock;
+	bool console_enable;
+	int current_cpu;
+	atomic_t unhandled_fiq_count;
+	bool in_fiq;
+
+	struct work_struct work;
+	spinlock_t work_lock;
+	char work_cmd[DEBUG_MAX];
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+	spinlock_t console_lock;
+	struct console console;
+	struct tty_port tty_port;
+	struct fiq_debugger_ringbuf *tty_rbuf;
+	bool syslog_dumping;
+#endif
+
+	unsigned int last_irqs[NR_IRQS];
+	unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+struct tty_driver *fiq_tty_driver;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+static bool fiq_kgdb_enable;
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+	if (state->wakeup_irq < 0)
+		return;
+	enable_irq(state->wakeup_irq);
+	if (!state->wakeup_irq_no_set_wake)
+		enable_irq_wake(state->wakeup_irq);
+}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+	if (state->wakeup_irq < 0)
+		return;
+	disable_irq_nosync(state->wakeup_irq);
+	if (!state->wakeup_irq_no_set_wake)
+		disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static inline bool fiq_debugger_have_fiq(struct fiq_debugger_state *state)
+{
+	return (state->fiq >= 0);
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_force_irq(struct fiq_debugger_state *state)
+{
+	unsigned int irq = state->signal_irq;
+
+	if (WARN_ON(!fiq_debugger_have_fiq(state)))
+		return;
+	if (state->pdata->force_irq) {
+		state->pdata->force_irq(state->pdev, irq);
+	} else {
+		struct irq_chip *chip = irq_get_chip(irq);
+		if (chip && chip->irq_retrigger)
+			chip->irq_retrigger(irq_get_irq_data(irq));
+	}
+}
+#endif
+
+static void fiq_debugger_uart_enable(struct fiq_debugger_state *state)
+{
+	if (state->clk)
+		clk_enable(state->clk);
+	if (state->pdata->uart_enable)
+		state->pdata->uart_enable(state->pdev);
+}
+
+static void fiq_debugger_uart_disable(struct fiq_debugger_state *state)
+{
+	if (state->pdata->uart_disable)
+		state->pdata->uart_disable(state->pdev);
+	if (state->clk)
+		clk_disable(state->clk);
+}
+
+static void fiq_debugger_uart_flush(struct fiq_debugger_state *state)
+{
+	if (state->pdata->uart_flush)
+		state->pdata->uart_flush(state->pdev);
+}
+
+static void fiq_debugger_putc(struct fiq_debugger_state *state, char c)
+{
+	state->pdata->uart_putc(state->pdev, c);
+}
+
+static void fiq_debugger_puts(struct fiq_debugger_state *state, char *s)
+{
+	unsigned c;
+	while ((c = *s++)) {
+		if (c == '\n')
+			fiq_debugger_putc(state, '\r');
+		fiq_debugger_putc(state, c);
+	}
+}
+
+static void fiq_debugger_prompt(struct fiq_debugger_state *state)
+{
+	fiq_debugger_puts(state, "debug> ");
+}
+
+static void fiq_debugger_dump_kernel_log(struct fiq_debugger_state *state)
+{
+	char buf[512];
+	size_t len;
+	struct kmsg_dumper dumper = { .active = true };
+
+
+	kmsg_dump_rewind_nolock(&dumper);
+	while (kmsg_dump_get_line_nolock(&dumper, true, buf,
+					 sizeof(buf) - 1, &len)) {
+		buf[len] = 0;
+		fiq_debugger_puts(state, buf);
+	}
+}
+
+static void fiq_debugger_printf(struct fiq_debugger_output *output,
+			       const char *fmt, ...)
+{
+	struct fiq_debugger_state *state;
+	char buf[256];
+	va_list ap;
+
+	state = container_of(output, struct fiq_debugger_state, output);
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+
+	fiq_debugger_puts(state, buf);
+}
+
+/* Safe outside fiq context */
+static int fiq_debugger_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+	struct fiq_debugger_state *state = cookie;
+	char buf[256];
+	va_list ap;
+	unsigned long irq_flags;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, 128, fmt, ap);
+	va_end(ap);
+
+	local_irq_save(irq_flags);
+	fiq_debugger_puts(state, buf);
+	fiq_debugger_uart_flush(state);
+	local_irq_restore(irq_flags);
+	return state->debug_abort;
+}
+
+static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state)
+{
+	int n;
+	struct irq_desc *desc;
+
+	fiq_debugger_printf(&state->output,
+			"irqnr       total  since-last   status  name\n");
+	for_each_irq_desc(n, desc) {
+		struct irqaction *act = desc->action;
+		if (!act && !kstat_irqs(n))
+			continue;
+		fiq_debugger_printf(&state->output, "%5d: %10u %11u %8x  %s\n", n,
+			kstat_irqs(n),
+			kstat_irqs(n) - state->last_irqs[n],
+			desc->status_use_accessors,
+			(act && act->name) ? act->name : "???");
+		state->last_irqs[n] = kstat_irqs(n);
+	}
+}
+
+static void fiq_debugger_do_ps(struct fiq_debugger_state *state)
+{
+	struct task_struct *g;
+	struct task_struct *p;
+	unsigned task_state;
+	static const char stat_nam[] = "RSDTtZX";
+
+	fiq_debugger_printf(&state->output, "pid   ppid  prio task            pc\n");
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		task_state = p->state ? __ffs(p->state) + 1 : 0;
+		fiq_debugger_printf(&state->output,
+			     "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+		fiq_debugger_printf(&state->output, "%-13.13s %c", p->comm,
+			     task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+		if (task_state == TASK_RUNNING)
+			fiq_debugger_printf(&state->output, " running\n");
+		else
+			fiq_debugger_printf(&state->output, " %08lx\n",
+					thread_saved_pc(p));
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+	state->syslog_dumping = true;
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+	state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+	do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+	fiq_debugger_dump_kernel_log(state);
+}
+#endif
+
+static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+	if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) {
+		fiq_debugger_printf(&state->output, "sysrq-g blocked\n");
+		return;
+	}
+	fiq_debugger_begin_syslog_dump(state);
+	handle_sysrq(rq);
+	fiq_debugger_end_syslog_dump(state);
+}
+
+#ifdef CONFIG_KGDB
+static void fiq_debugger_do_kgdb(struct fiq_debugger_state *state)
+{
+	if (!fiq_kgdb_enable) {
+		fiq_debugger_printf(&state->output, "kgdb through fiq debugger not enabled\n");
+		return;
+	}
+
+	fiq_debugger_printf(&state->output, "enabling console and triggering kgdb\n");
+	state->console_enable = true;
+	handle_sysrq('g');
+}
+#endif
+
+static void fiq_debugger_schedule_work(struct fiq_debugger_state *state,
+		char *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&state->work_lock, flags);
+	if (state->work_cmd[0] != '\0') {
+		fiq_debugger_printf(&state->output, "work command processor busy\n");
+		spin_unlock_irqrestore(&state->work_lock, flags);
+		return;
+	}
+
+	strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd));
+	spin_unlock_irqrestore(&state->work_lock, flags);
+
+	schedule_work(&state->work);
+}
+
+static void fiq_debugger_work(struct work_struct *work)
+{
+	struct fiq_debugger_state *state;
+	char work_cmd[DEBUG_MAX];
+	char *cmd;
+	unsigned long flags;
+
+	state = container_of(work, struct fiq_debugger_state, work);
+
+	spin_lock_irqsave(&state->work_lock, flags);
+
+	strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd));
+	state->work_cmd[0] = '\0';
+
+	spin_unlock_irqrestore(&state->work_lock, flags);
+
+	cmd = work_cmd;
+	if (!strncmp(cmd, "reboot", 6)) {
+		cmd += 6;
+		while (*cmd == ' ')
+			cmd++;
+		if (cmd != '\0')
+			kernel_restart(cmd);
+		else
+			kernel_restart(NULL);
+	} else {
+		fiq_debugger_printf(&state->output, "unknown work command '%s'\n",
+				work_cmd);
+	}
+}
+
+/* This function CANNOT be called in FIQ context */
+static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+	if (!strcmp(cmd, "ps"))
+		fiq_debugger_do_ps(state);
+	if (!strcmp(cmd, "sysrq"))
+		fiq_debugger_do_sysrq(state, 'h');
+	if (!strncmp(cmd, "sysrq ", 6))
+		fiq_debugger_do_sysrq(state, cmd[6]);
+#ifdef CONFIG_KGDB
+	if (!strcmp(cmd, "kgdb"))
+		fiq_debugger_do_kgdb(state);
+#endif
+	if (!strncmp(cmd, "reboot", 6))
+		fiq_debugger_schedule_work(state, cmd);
+}
+
+static void fiq_debugger_help(struct fiq_debugger_state *state)
+{
+	fiq_debugger_printf(&state->output,
+				"FIQ Debugger commands:\n"
+				" pc            PC status\n"
+				" regs          Register dump\n"
+				" allregs       Extended Register dump\n"
+				" bt            Stack trace\n");
+	fiq_debugger_printf(&state->output,
+				" reboot [<c>]  Reboot with command <c>\n"
+				" reset [<c>]   Hard reset with command <c>\n"
+				" irqs          Interupt status\n"
+				" kmsg          Kernel log\n"
+				" version       Kernel version\n");
+	fiq_debugger_printf(&state->output,
+				" sleep         Allow sleep while in FIQ\n"
+				" nosleep       Disable sleep while in FIQ\n"
+				" console       Switch terminal to console\n"
+				" cpu           Current CPU\n"
+				" cpu <number>  Switch to CPU<number>\n");
+	fiq_debugger_printf(&state->output,
+				" ps            Process list\n"
+				" sysrq         sysrq options\n"
+				" sysrq <param> Execute sysrq with <param>\n");
+#ifdef CONFIG_KGDB
+	fiq_debugger_printf(&state->output,
+				" kgdb          Enter kernel debugger\n");
+#endif
+}
+
+static void fiq_debugger_take_affinity(void *info)
+{
+	struct fiq_debugger_state *state = info;
+	struct cpumask cpumask;
+
+	cpumask_clear(&cpumask);
+	cpumask_set_cpu(get_cpu(), &cpumask);
+
+	irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void fiq_debugger_switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+	if (!fiq_debugger_have_fiq(state))
+		smp_call_function_single(cpu, fiq_debugger_take_affinity, state,
+				false);
+	state->current_cpu = cpu;
+}
+
+static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state,
+			const char *cmd, const struct pt_regs *regs,
+			void *svc_sp)
+{
+	bool signal_helper = false;
+
+	if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+		fiq_debugger_help(state);
+	} else if (!strcmp(cmd, "pc")) {
+		fiq_debugger_dump_pc(&state->output, regs);
+	} else if (!strcmp(cmd, "regs")) {
+		fiq_debugger_dump_regs(&state->output, regs);
+	} else if (!strcmp(cmd, "allregs")) {
+		fiq_debugger_dump_allregs(&state->output, regs);
+	} else if (!strcmp(cmd, "bt")) {
+		fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp);
+	} else if (!strncmp(cmd, "reset", 5)) {
+		cmd += 5;
+		while (*cmd == ' ')
+			cmd++;
+		if (*cmd) {
+			char tmp_cmd[32];
+			strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
+			machine_restart(tmp_cmd);
+		} else {
+			machine_restart(NULL);
+		}
+	} else if (!strcmp(cmd, "irqs")) {
+		fiq_debugger_dump_irqs(state);
+	} else if (!strcmp(cmd, "kmsg")) {
+		fiq_debugger_dump_kernel_log(state);
+	} else if (!strcmp(cmd, "version")) {
+		fiq_debugger_printf(&state->output, "%s\n", linux_banner);
+	} else if (!strcmp(cmd, "sleep")) {
+		state->no_sleep = false;
+		fiq_debugger_printf(&state->output, "enabling sleep\n");
+	} else if (!strcmp(cmd, "nosleep")) {
+		state->no_sleep = true;
+		fiq_debugger_printf(&state->output, "disabling sleep\n");
+	} else if (!strcmp(cmd, "console")) {
+		fiq_debugger_printf(&state->output, "console mode\n");
+		fiq_debugger_uart_flush(state);
+		state->console_enable = true;
+	} else if (!strcmp(cmd, "cpu")) {
+		fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+	} else if (!strncmp(cmd, "cpu ", 4)) {
+		unsigned long cpu = 0;
+		if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
+			fiq_debugger_switch_cpu(state, cpu);
+		else
+			fiq_debugger_printf(&state->output, "invalid cpu\n");
+		fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+	} else {
+		if (state->debug_busy) {
+			fiq_debugger_printf(&state->output,
+				"command processor busy. trying to abort.\n");
+			state->debug_abort = -1;
+		} else {
+			strcpy(state->debug_cmd, cmd);
+			state->debug_busy = 1;
+		}
+
+		return true;
+	}
+	if (!state->console_enable)
+		fiq_debugger_prompt(state);
+
+	return signal_helper;
+}
+
+static void fiq_debugger_sleep_timer_expired(unsigned long data)
+{
+	struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state->sleep_timer_lock, flags);
+	if (state->uart_enabled && !state->no_sleep) {
+		if (state->debug_enable && !state->console_enable) {
+			state->debug_enable = false;
+			fiq_debugger_printf_nfiq(state,
+					"suspending fiq debugger\n");
+		}
+		state->ignore_next_wakeup_irq = true;
+		fiq_debugger_uart_disable(state);
+		state->uart_enabled = false;
+		fiq_debugger_enable_wakeup_irq(state);
+	}
+	wake_unlock(&state->debugger_wake_lock);
+	spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void fiq_debugger_handle_wakeup(struct fiq_debugger_state *state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&state->sleep_timer_lock, flags);
+	if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+		state->ignore_next_wakeup_irq = false;
+	} else if (!state->uart_enabled) {
+		wake_lock(&state->debugger_wake_lock);
+		fiq_debugger_uart_enable(state);
+		state->uart_enabled = true;
+		fiq_debugger_disable_wakeup_irq(state);
+		mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+	}
+	spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static irqreturn_t fiq_debugger_wakeup_irq_handler(int irq, void *dev)
+{
+	struct fiq_debugger_state *state = dev;
+
+	if (!state->no_sleep)
+		fiq_debugger_puts(state, "WAKEUP\n");
+	fiq_debugger_handle_wakeup(state);
+
+	return IRQ_HANDLED;
+}
+
+static
+void fiq_debugger_handle_console_irq_context(struct fiq_debugger_state *state)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+	if (state->tty_port.ops) {
+		int i;
+		int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+		for (i = 0; i < count; i++) {
+			int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+			tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL);
+			if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+				pr_warn("fiq tty failed to consume byte\n");
+		}
+		tty_flip_buffer_push(&state->tty_port);
+	}
+#endif
+}
+
+static void fiq_debugger_handle_irq_context(struct fiq_debugger_state *state)
+{
+	if (!state->no_sleep) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&state->sleep_timer_lock, flags);
+		wake_lock(&state->debugger_wake_lock);
+		mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+		spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+	}
+	fiq_debugger_handle_console_irq_context(state);
+	if (state->debug_busy) {
+		fiq_debugger_irq_exec(state, state->debug_cmd);
+		if (!state->console_enable)
+			fiq_debugger_prompt(state);
+		state->debug_busy = 0;
+	}
+}
+
+static int fiq_debugger_getc(struct fiq_debugger_state *state)
+{
+	return state->pdata->uart_getc(state->pdev);
+}
+
+static bool fiq_debugger_handle_uart_interrupt(struct fiq_debugger_state *state,
+			int this_cpu, const struct pt_regs *regs, void *svc_sp)
+{
+	int c;
+	static int last_c;
+	int count = 0;
+	bool signal_helper = false;
+
+	if (this_cpu != state->current_cpu) {
+		if (state->in_fiq)
+			return false;
+
+		if (atomic_inc_return(&state->unhandled_fiq_count) !=
+					MAX_UNHANDLED_FIQ_COUNT)
+			return false;
+
+		fiq_debugger_printf(&state->output,
+			"fiq_debugger: cpu %d not responding, "
+			"reverting to cpu %d\n", state->current_cpu,
+			this_cpu);
+
+		atomic_set(&state->unhandled_fiq_count, 0);
+		fiq_debugger_switch_cpu(state, this_cpu);
+		return false;
+	}
+
+	state->in_fiq = true;
+
+	while ((c = fiq_debugger_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+		count++;
+		if (!state->debug_enable) {
+			if ((c == 13) || (c == 10)) {
+				state->debug_enable = true;
+				state->debug_count = 0;
+				fiq_debugger_prompt(state);
+			}
+		} else if (c == FIQ_DEBUGGER_BREAK) {
+			state->console_enable = false;
+			fiq_debugger_puts(state, "fiq debugger mode\n");
+			state->debug_count = 0;
+			fiq_debugger_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+		} else if (state->console_enable && state->tty_rbuf) {
+			fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+			signal_helper = true;
+#endif
+		} else if ((c >= ' ') && (c < 127)) {
+			if (state->debug_count < (DEBUG_MAX - 1)) {
+				state->debug_buf[state->debug_count++] = c;
+				fiq_debugger_putc(state, c);
+			}
+		} else if ((c == 8) || (c == 127)) {
+			if (state->debug_count > 0) {
+				state->debug_count--;
+				fiq_debugger_putc(state, 8);
+				fiq_debugger_putc(state, ' ');
+				fiq_debugger_putc(state, 8);
+			}
+		} else if ((c == 13) || (c == 10)) {
+			if (c == '\r' || (c == '\n' && last_c != '\r')) {
+				fiq_debugger_putc(state, '\r');
+				fiq_debugger_putc(state, '\n');
+			}
+			if (state->debug_count) {
+				state->debug_buf[state->debug_count] = 0;
+				state->debug_count = 0;
+				signal_helper |=
+					fiq_debugger_fiq_exec(state,
+							state->debug_buf,
+							regs, svc_sp);
+			} else {
+				fiq_debugger_prompt(state);
+			}
+		}
+		last_c = c;
+	}
+	if (!state->console_enable)
+		fiq_debugger_uart_flush(state);
+	if (state->pdata->fiq_ack)
+		state->pdata->fiq_ack(state->pdev, state->fiq);
+
+	/* poke sleep timer if necessary */
+	if (state->debug_enable && !state->no_sleep)
+		signal_helper = true;
+
+	atomic_set(&state->unhandled_fiq_count, 0);
+	state->in_fiq = false;
+
+	return signal_helper;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_fiq(struct fiq_glue_handler *h,
+		const struct pt_regs *regs, void *svc_sp)
+{
+	struct fiq_debugger_state *state =
+		container_of(h, struct fiq_debugger_state, handler);
+	unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+	bool need_irq;
+
+	need_irq = fiq_debugger_handle_uart_interrupt(state, this_cpu, regs,
+			svc_sp);
+	if (need_irq)
+		fiq_debugger_force_irq(state);
+}
+#endif
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t fiq_debugger_uart_irq(int irq, void *dev)
+{
+	struct fiq_debugger_state *state = dev;
+	bool not_done;
+
+	fiq_debugger_handle_wakeup(state);
+
+	/* handle the debugger irq in regular context */
+	not_done = fiq_debugger_handle_uart_interrupt(state, smp_processor_id(),
+					      get_irq_regs(),
+					      current_thread_info());
+	if (not_done)
+		fiq_debugger_handle_irq_context(state);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t fiq_debugger_signal_irq(int irq, void *dev)
+{
+	struct fiq_debugger_state *state = dev;
+
+	if (state->pdata->force_irq_ack)
+		state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+	fiq_debugger_handle_irq_context(state);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_resume(struct fiq_glue_handler *h)
+{
+	struct fiq_debugger_state *state =
+		container_of(h, struct fiq_debugger_state, handler);
+	if (state->pdata->uart_resume)
+		state->pdata->uart_resume(state->pdev);
+}
+#endif
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *fiq_debugger_console_device(struct console *co, int *index)
+{
+	*index = co->index;
+	return fiq_tty_driver;
+}
+
+static void fiq_debugger_console_write(struct console *co,
+				const char *s, unsigned int count)
+{
+	struct fiq_debugger_state *state;
+	unsigned long flags;
+
+	state = container_of(co, struct fiq_debugger_state, console);
+
+	if (!state->console_enable && !state->syslog_dumping)
+		return;
+
+	fiq_debugger_uart_enable(state);
+	spin_lock_irqsave(&state->console_lock, flags);
+	while (count--) {
+		if (*s == '\n')
+			fiq_debugger_putc(state, '\r');
+		fiq_debugger_putc(state, *s++);
+	}
+	fiq_debugger_uart_flush(state);
+	spin_unlock_irqrestore(&state->console_lock, flags);
+	fiq_debugger_uart_disable(state);
+}
+
+static struct console fiq_debugger_console = {
+	.name = "ttyFIQ",
+	.device = fiq_debugger_console_device,
+	.write = fiq_debugger_console_write,
+	.flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+	int line = tty->index;
+	struct fiq_debugger_state **states = tty->driver->driver_state;
+	struct fiq_debugger_state *state = states[line];
+
+	return tty_port_open(&state->tty_port, tty, filp);
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+	tty_port_close(tty->port, tty, filp);
+}
+
+int  fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	int i;
+	int line = tty->index;
+	struct fiq_debugger_state **states = tty->driver->driver_state;
+	struct fiq_debugger_state *state = states[line];
+
+	if (!state->console_enable)
+		return count;
+
+	fiq_debugger_uart_enable(state);
+	spin_lock_irq(&state->console_lock);
+	for (i = 0; i < count; i++)
+		fiq_debugger_putc(state, *buf++);
+	spin_unlock_irq(&state->console_lock);
+	fiq_debugger_uart_disable(state);
+
+	return count;
+}
+
+int  fiq_tty_write_room(struct tty_struct *tty)
+{
+	return 16;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options)
+{
+	return 0;
+}
+
+static int fiq_tty_poll_get_char(struct tty_driver *driver, int line)
+{
+	struct fiq_debugger_state **states = driver->driver_state;
+	struct fiq_debugger_state *state = states[line];
+	int c = NO_POLL_CHAR;
+
+	fiq_debugger_uart_enable(state);
+	if (fiq_debugger_have_fiq(state)) {
+		int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+		if (count > 0) {
+			c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+			fiq_debugger_ringbuf_consume(state->tty_rbuf, 1);
+		}
+	} else {
+		c = fiq_debugger_getc(state);
+		if (c == FIQ_DEBUGGER_NO_CHAR)
+			c = NO_POLL_CHAR;
+	}
+	fiq_debugger_uart_disable(state);
+
+	return c;
+}
+
+static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+	struct fiq_debugger_state **states = driver->driver_state;
+	struct fiq_debugger_state *state = states[line];
+	fiq_debugger_uart_enable(state);
+	fiq_debugger_putc(state, ch);
+	fiq_debugger_uart_disable(state);
+}
+#endif
+
+static const struct tty_port_operations fiq_tty_port_ops;
+
+static const struct tty_operations fiq_tty_driver_ops = {
+	.write = fiq_tty_write,
+	.write_room = fiq_tty_write_room,
+	.open = fiq_tty_open,
+	.close = fiq_tty_close,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_init = fiq_tty_poll_init,
+	.poll_get_char = fiq_tty_poll_get_char,
+	.poll_put_char = fiq_tty_poll_put_char,
+#endif
+};
+
+static int fiq_debugger_tty_init(void)
+{
+	int ret;
+	struct fiq_debugger_state **states = NULL;
+
+	states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL);
+	if (!states) {
+		pr_err("Failed to allocate fiq debugger state structres\n");
+		return -ENOMEM;
+	}
+
+	fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS);
+	if (!fiq_tty_driver) {
+		pr_err("Failed to allocate fiq debugger tty\n");
+		ret = -ENOMEM;
+		goto err_free_state;
+	}
+
+	fiq_tty_driver->owner		= THIS_MODULE;
+	fiq_tty_driver->driver_name	= "fiq-debugger";
+	fiq_tty_driver->name		= "ttyFIQ";
+	fiq_tty_driver->type		= TTY_DRIVER_TYPE_SERIAL;
+	fiq_tty_driver->subtype		= SERIAL_TYPE_NORMAL;
+	fiq_tty_driver->init_termios	= tty_std_termios;
+	fiq_tty_driver->flags		= TTY_DRIVER_REAL_RAW |
+					  TTY_DRIVER_DYNAMIC_DEV;
+	fiq_tty_driver->driver_state	= states;
+
+	fiq_tty_driver->init_termios.c_cflag =
+					B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+	fiq_tty_driver->init_termios.c_ispeed = 115200;
+	fiq_tty_driver->init_termios.c_ospeed = 115200;
+
+	tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops);
+
+	ret = tty_register_driver(fiq_tty_driver);
+	if (ret) {
+		pr_err("Failed to register fiq tty: %d\n", ret);
+		goto err_free_tty;
+	}
+
+	pr_info("Registered FIQ tty driver\n");
+	return 0;
+
+err_free_tty:
+	put_tty_driver(fiq_tty_driver);
+	fiq_tty_driver = NULL;
+err_free_state:
+	kfree(states);
+	return ret;
+}
+
+static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state)
+{
+	int ret;
+	struct device *tty_dev;
+	struct fiq_debugger_state **states = fiq_tty_driver->driver_state;
+
+	states[state->pdev->id] = state;
+
+	state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+	if (!state->tty_rbuf) {
+		pr_err("Failed to allocate fiq debugger ringbuf\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	tty_port_init(&state->tty_port);
+	state->tty_port.ops = &fiq_tty_port_ops;
+
+	tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver,
+					   state->pdev->id, &state->pdev->dev);
+	if (IS_ERR(tty_dev)) {
+		pr_err("Failed to register fiq debugger tty device\n");
+		ret = PTR_ERR(tty_dev);
+		goto err;
+	}
+
+	device_set_wakeup_capable(tty_dev, 1);
+
+	pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id);
+
+	return 0;
+
+err:
+	fiq_debugger_ringbuf_free(state->tty_rbuf);
+	state->tty_rbuf = NULL;
+	return ret;
+}
+#endif
+
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+	if (state->pdata->uart_dev_suspend)
+		return state->pdata->uart_dev_suspend(pdev);
+	return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+	if (state->pdata->uart_dev_resume)
+		return state->pdata->uart_dev_resume(pdev);
+	return 0;
+}
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+	struct fiq_debugger_state *state;
+	int fiq;
+	int uart_irq;
+
+	if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS)
+		return -EINVAL;
+
+	if (!pdata->uart_getc || !pdata->uart_putc)
+		return -EINVAL;
+	if ((pdata->uart_enable && !pdata->uart_disable) ||
+	    (!pdata->uart_enable && pdata->uart_disable))
+		return -EINVAL;
+
+	fiq = platform_get_irq_byname(pdev, "fiq");
+	uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+	/* uart_irq mode and fiq mode are mutually exclusive, but one of them
+	 * is required */
+	if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+		return -EINVAL;
+	if (fiq >= 0 && !pdata->fiq_enable)
+		return -EINVAL;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	state->output.printf = fiq_debugger_printf;
+	setup_timer(&state->sleep_timer, fiq_debugger_sleep_timer_expired,
+		    (unsigned long)state);
+	state->pdata = pdata;
+	state->pdev = pdev;
+	state->no_sleep = initial_no_sleep;
+	state->debug_enable = initial_debug_enable;
+	state->console_enable = initial_console_enable;
+
+	state->fiq = fiq;
+	state->uart_irq = uart_irq;
+	state->signal_irq = platform_get_irq_byname(pdev, "signal");
+	state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+	INIT_WORK(&state->work, fiq_debugger_work);
+	spin_lock_init(&state->work_lock);
+
+	platform_set_drvdata(pdev, state);
+
+	spin_lock_init(&state->sleep_timer_lock);
+
+	if (state->wakeup_irq < 0 && fiq_debugger_have_fiq(state))
+		state->no_sleep = true;
+	state->ignore_next_wakeup_irq = !state->no_sleep;
+
+	wake_lock_init(&state->debugger_wake_lock,
+			WAKE_LOCK_SUSPEND, "serial-debug");
+
+	state->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(state->clk))
+		state->clk = NULL;
+
+	/* do not call pdata->uart_enable here since uart_init may still
+	 * need to do some initialization before uart_enable can work.
+	 * So, only try to manage the clock during init.
+	 */
+	if (state->clk)
+		clk_enable(state->clk);
+
+	if (pdata->uart_init) {
+		ret = pdata->uart_init(pdev);
+		if (ret)
+			goto err_uart_init;
+	}
+
+	fiq_debugger_printf_nfiq(state,
+				"<hit enter %sto activate fiq debugger>\n",
+				state->no_sleep ? "" : "twice ");
+
+#ifdef CONFIG_FIQ_GLUE
+	if (fiq_debugger_have_fiq(state)) {
+		state->handler.fiq = fiq_debugger_fiq;
+		state->handler.resume = fiq_debugger_resume;
+		ret = fiq_glue_register_handler(&state->handler);
+		if (ret) {
+			pr_err("%s: could not install fiq handler\n", __func__);
+			goto err_register_irq;
+		}
+
+		pdata->fiq_enable(pdev, state->fiq, 1);
+	} else
+#endif
+	{
+		ret = request_irq(state->uart_irq, fiq_debugger_uart_irq,
+				  IRQF_NO_SUSPEND, "debug", state);
+		if (ret) {
+			pr_err("%s: could not install irq handler\n", __func__);
+			goto err_register_irq;
+		}
+
+		/* for irq-only mode, we want this irq to wake us up, if it
+		 * can.
+		 */
+		enable_irq_wake(state->uart_irq);
+	}
+
+	if (state->clk)
+		clk_disable(state->clk);
+
+	if (state->signal_irq >= 0) {
+		ret = request_irq(state->signal_irq, fiq_debugger_signal_irq,
+			  IRQF_TRIGGER_RISING, "debug-signal", state);
+		if (ret)
+			pr_err("serial_debugger: could not install signal_irq");
+	}
+
+	if (state->wakeup_irq >= 0) {
+		ret = request_irq(state->wakeup_irq,
+				  fiq_debugger_wakeup_irq_handler,
+				  IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+				  "debug-wakeup", state);
+		if (ret) {
+			pr_err("serial_debugger: "
+				"could not install wakeup irq\n");
+			state->wakeup_irq = -1;
+		} else {
+			ret = enable_irq_wake(state->wakeup_irq);
+			if (ret) {
+				pr_err("serial_debugger: "
+					"could not enable wakeup\n");
+				state->wakeup_irq_no_set_wake = true;
+			}
+		}
+	}
+	if (state->no_sleep)
+		fiq_debugger_handle_wakeup(state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+	spin_lock_init(&state->console_lock);
+	state->console = fiq_debugger_console;
+	state->console.index = pdev->id;
+	if (!console_set_on_cmdline)
+		add_preferred_console(state->console.name,
+			state->console.index, NULL);
+	register_console(&state->console);
+	fiq_debugger_tty_init_one(state);
+#endif
+	return 0;
+
+err_register_irq:
+	if (pdata->uart_free)
+		pdata->uart_free(pdev);
+err_uart_init:
+	if (state->clk)
+		clk_disable(state->clk);
+	if (state->clk)
+		clk_put(state->clk);
+	wake_lock_destroy(&state->debugger_wake_lock);
+	platform_set_drvdata(pdev, NULL);
+	kfree(state);
+	return ret;
+}
+
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+	.suspend	= fiq_debugger_dev_suspend,
+	.resume		= fiq_debugger_dev_resume,
+};
+
+static struct platform_driver fiq_debugger_driver = {
+	.probe	= fiq_debugger_probe,
+	.driver	= {
+		.name	= "fiq_debugger",
+		.pm	= &fiq_debugger_dev_pm_ops,
+	},
+};
+
+static int __init fiq_debugger_init(void)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+	fiq_debugger_tty_init();
+#endif
+	return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.h b/drivers/staging/android/fiq_debugger/fiq_debugger.h
new file mode 100644
index 0000000..c9ec4f8
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.h
@@ -0,0 +1,64 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME	"fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME	"signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME	"wakeup"
+
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume:	used to restore uart state right before enabling
+ *			the fiq.
+ * @uart_enable:	Do the work necessary to communicate with the uart
+ *			hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable:	Do the work necessary to disable the uart hw
+ *			(disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend:	called during PM suspend, generally not needed
+ *			for real fiq mode debugger.
+ * @uart_dev_resume:	called during PM resume, generally not needed
+ *			for real fiq mode debugger.
+ */
+struct fiq_debugger_pdata {
+	int (*uart_init)(struct platform_device *pdev);
+	void (*uart_free)(struct platform_device *pdev);
+	int (*uart_resume)(struct platform_device *pdev);
+	int (*uart_getc)(struct platform_device *pdev);
+	void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+	void (*uart_flush)(struct platform_device *pdev);
+	void (*uart_enable)(struct platform_device *pdev);
+	void (*uart_disable)(struct platform_device *pdev);
+
+	int (*uart_dev_suspend)(struct platform_device *pdev);
+	int (*uart_dev_resume)(struct platform_device *pdev);
+
+	void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+								bool enable);
+	void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+	void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+	void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c
new file mode 100644
index 0000000..8b3e013
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(unsigned cpsr)
+{
+	switch (cpsr & MODE_MASK) {
+	case USR_MODE: return "USR";
+	case FIQ_MODE: return "FIQ";
+	case IRQ_MODE: return "IRQ";
+	case SVC_MODE: return "SVC";
+	case ABT_MODE: return "ABT";
+	case UND_MODE: return "UND";
+	case SYSTEM_MODE: return "SYS";
+	default: return "???";
+	}
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	output->printf(output, " pc %08x cpsr %08x mode %s\n",
+		regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	output->printf(output,
+			" r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+			regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+	output->printf(output,
+			" r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+			regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
+	output->printf(output,
+			" r8 %08x  r9 %08x r10 %08x r11 %08x  mode %s\n",
+			regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp,
+			mode_name(regs->ARM_cpsr));
+	output->printf(output,
+			" ip %08x  sp %08x  lr %08x  pc %08x cpsr %08x\n",
+			regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc,
+			regs->ARM_cpsr);
+}
+
+struct mode_regs {
+	unsigned long sp_svc;
+	unsigned long lr_svc;
+	unsigned long spsr_svc;
+
+	unsigned long sp_abt;
+	unsigned long lr_abt;
+	unsigned long spsr_abt;
+
+	unsigned long sp_und;
+	unsigned long lr_und;
+	unsigned long spsr_und;
+
+	unsigned long sp_irq;
+	unsigned long lr_irq;
+	unsigned long spsr_irq;
+
+	unsigned long r8_fiq;
+	unsigned long r9_fiq;
+	unsigned long r10_fiq;
+	unsigned long r11_fiq;
+	unsigned long r12_fiq;
+	unsigned long sp_fiq;
+	unsigned long lr_fiq;
+	unsigned long spsr_fiq;
+};
+
+static void __naked get_mode_regs(struct mode_regs *regs)
+{
+	asm volatile (
+	"mrs	r1, cpsr\n"
+	"msr	cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r8 - r14}\n"
+	"mrs	r2, spsr\n"
+	"stmia	r0!, {r2}\n"
+	"msr	cpsr_c, r1\n"
+	"bx	lr\n");
+}
+
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	struct mode_regs mode_regs;
+	unsigned long mode = regs->ARM_cpsr & MODE_MASK;
+
+	fiq_debugger_dump_regs(output, regs);
+	get_mode_regs(&mode_regs);
+
+	output->printf(output,
+			"%csvc: sp %08x  lr %08x  spsr %08x\n",
+			mode == SVC_MODE ? '*' : ' ',
+			mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+	output->printf(output,
+			"%cabt: sp %08x  lr %08x  spsr %08x\n",
+			mode == ABT_MODE ? '*' : ' ',
+			mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+	output->printf(output,
+			"%cund: sp %08x  lr %08x  spsr %08x\n",
+			mode == UND_MODE ? '*' : ' ',
+			mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+	output->printf(output,
+			"%cirq: sp %08x  lr %08x  spsr %08x\n",
+			mode == IRQ_MODE ? '*' : ' ',
+			mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+	output->printf(output,
+			"%cfiq: r8 %08x  r9 %08x  r10 %08x  r11 %08x  r12 %08x\n",
+			mode == FIQ_MODE ? '*' : ' ',
+			mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+			mode_regs.r11_fiq, mode_regs.r12_fiq);
+	output->printf(output,
+			" fiq: sp %08x  lr %08x  spsr %08x\n",
+			mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+struct stacktrace_state {
+	struct fiq_debugger_output *output;
+	unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+	struct stacktrace_state *sts = d;
+
+	if (sts->depth) {
+		sts->output->printf(sts->output,
+			"  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+			frame->pc, frame->pc, frame->lr, frame->lr,
+			frame->sp, frame->fp);
+		sts->depth--;
+		return 0;
+	}
+	sts->output->printf(sts->output, "  ...\n");
+
+	return sts->depth == 0;
+}
+
+struct frame_tail {
+	struct frame_tail *fp;
+	unsigned long sp;
+	unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_output *output,
+					struct frame_tail *tail)
+{
+	struct frame_tail buftail[2];
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+		output->printf(output, "  invalid frame pointer %p\n",
+				tail);
+		return NULL;
+	}
+	if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+		output->printf(output,
+			"  failed to copy frame pointer %p\n", tail);
+		return NULL;
+	}
+
+	output->printf(output, "  %p\n", buftail[0].lr);
+
+	/* frame pointers should strictly progress back up the stack
+	 * (towards higher addresses) */
+	if (tail >= buftail[0].fp)
+		return NULL;
+
+	return buftail[0].fp-1;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+		const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+	struct frame_tail *tail;
+	struct thread_info *real_thread_info = THREAD_INFO(ssp);
+	struct stacktrace_state sts;
+
+	sts.depth = depth;
+	sts.output = output;
+	*current_thread_info() = *real_thread_info;
+
+	if (!current)
+		output->printf(output, "current NULL\n");
+	else
+		output->printf(output, "pid: %d  comm: %s\n",
+			current->pid, current->comm);
+	fiq_debugger_dump_regs(output, regs);
+
+	if (!user_mode(regs)) {
+		struct stackframe frame;
+		frame.fp = regs->ARM_fp;
+		frame.sp = regs->ARM_sp;
+		frame.lr = regs->ARM_lr;
+		frame.pc = regs->ARM_pc;
+		output->printf(output,
+			"  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+			regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+			regs->ARM_sp, regs->ARM_fp);
+		walk_stackframe(&frame, report_trace, &sts);
+		return;
+	}
+
+	tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+	while (depth-- && tail && !((unsigned long) tail & 3))
+		tail = user_backtrace(output, tail);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
new file mode 100644
index 0000000..99c6584
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(const struct pt_regs *regs)
+{
+	if (compat_user_mode(regs)) {
+		return "USR";
+	} else {
+		switch (processor_mode(regs)) {
+		case PSR_MODE_EL0t: return "EL0t";
+		case PSR_MODE_EL1t: return "EL1t";
+		case PSR_MODE_EL1h: return "EL1h";
+		case PSR_MODE_EL2t: return "EL2t";
+		case PSR_MODE_EL2h: return "EL2h";
+		default: return "???";
+		}
+	}
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	output->printf(output, " pc %016lx cpsr %08lx mode %s\n",
+		regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	output->printf(output, " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+			regs->compat_usr(0), regs->compat_usr(1),
+			regs->compat_usr(2), regs->compat_usr(3));
+	output->printf(output, " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+			regs->compat_usr(4), regs->compat_usr(5),
+			regs->compat_usr(6), regs->compat_usr(7));
+	output->printf(output, " r8 %08x  r9 %08x r10 %08x r11 %08x\n",
+			regs->compat_usr(8), regs->compat_usr(9),
+			regs->compat_usr(10), regs->compat_usr(11));
+	output->printf(output, " ip %08x  sp %08x  lr %08x  pc %08x\n",
+			regs->compat_usr(12), regs->compat_sp,
+			regs->compat_lr, regs->pc);
+	output->printf(output, " cpsr %08x (%s)\n",
+			regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+
+	output->printf(output, "  x0 %016lx   x1 %016lx\n",
+			regs->regs[0], regs->regs[1]);
+	output->printf(output, "  x2 %016lx   x3 %016lx\n",
+			regs->regs[2], regs->regs[3]);
+	output->printf(output, "  x4 %016lx   x5 %016lx\n",
+			regs->regs[4], regs->regs[5]);
+	output->printf(output, "  x6 %016lx   x7 %016lx\n",
+			regs->regs[6], regs->regs[7]);
+	output->printf(output, "  x8 %016lx   x9 %016lx\n",
+			regs->regs[8], regs->regs[9]);
+	output->printf(output, " x10 %016lx  x11 %016lx\n",
+			regs->regs[10], regs->regs[11]);
+	output->printf(output, " x12 %016lx  x13 %016lx\n",
+			regs->regs[12], regs->regs[13]);
+	output->printf(output, " x14 %016lx  x15 %016lx\n",
+			regs->regs[14], regs->regs[15]);
+	output->printf(output, " x16 %016lx  x17 %016lx\n",
+			regs->regs[16], regs->regs[17]);
+	output->printf(output, " x18 %016lx  x19 %016lx\n",
+			regs->regs[18], regs->regs[19]);
+	output->printf(output, " x20 %016lx  x21 %016lx\n",
+			regs->regs[20], regs->regs[21]);
+	output->printf(output, " x22 %016lx  x23 %016lx\n",
+			regs->regs[22], regs->regs[23]);
+	output->printf(output, " x24 %016lx  x25 %016lx\n",
+			regs->regs[24], regs->regs[25]);
+	output->printf(output, " x26 %016lx  x27 %016lx\n",
+			regs->regs[26], regs->regs[27]);
+	output->printf(output, " x28 %016lx  x29 %016lx\n",
+			regs->regs[28], regs->regs[29]);
+	output->printf(output, " x30 %016lx   sp %016lx\n",
+			regs->regs[30], regs->sp);
+	output->printf(output, "  pc %016lx cpsr %08x (%s)\n",
+			regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	if (compat_user_mode(regs))
+		fiq_debugger_dump_regs_aarch32(output, regs);
+	else
+		fiq_debugger_dump_regs_aarch64(output, regs);
+}
+
+#define READ_SPECIAL_REG(x) ({ \
+	u64 val; \
+	asm volatile ("mrs %0, " # x : "=r"(val)); \
+	val; \
+})
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	u32 pstate = READ_SPECIAL_REG(CurrentEl);
+	bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t;
+
+	fiq_debugger_dump_regs(output, regs);
+
+	output->printf(output, " sp_el0   %016lx\n",
+			READ_SPECIAL_REG(sp_el0));
+
+	if (in_el2)
+		output->printf(output, " sp_el1   %016lx\n",
+				READ_SPECIAL_REG(sp_el1));
+
+	output->printf(output, " elr_el1  %016lx\n",
+			READ_SPECIAL_REG(elr_el1));
+
+	output->printf(output, " spsr_el1 %08lx\n",
+			READ_SPECIAL_REG(spsr_el1));
+
+	if (in_el2) {
+		output->printf(output, " spsr_irq %08lx\n",
+				READ_SPECIAL_REG(spsr_irq));
+		output->printf(output, " spsr_abt %08lx\n",
+				READ_SPECIAL_REG(spsr_abt));
+		output->printf(output, " spsr_und %08lx\n",
+				READ_SPECIAL_REG(spsr_und));
+		output->printf(output, " spsr_fiq %08lx\n",
+				READ_SPECIAL_REG(spsr_fiq));
+		output->printf(output, " spsr_el2 %08lx\n",
+				READ_SPECIAL_REG(elr_el2));
+		output->printf(output, " spsr_el2 %08lx\n",
+				READ_SPECIAL_REG(spsr_el2));
+	}
+}
+
+struct stacktrace_state {
+	struct fiq_debugger_output *output;
+	unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+	struct stacktrace_state *sts = d;
+
+	if (sts->depth) {
+		sts->output->printf(sts->output, "%pF:\n", frame->pc);
+		sts->output->printf(sts->output,
+				"  pc %016lx   sp %016lx   fp %016lx\n",
+				frame->pc, frame->sp, frame->fp);
+		sts->depth--;
+		return 0;
+	}
+	sts->output->printf(sts->output, "  ...\n");
+
+	return sts->depth == 0;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+		const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+	struct thread_info *real_thread_info = THREAD_INFO(ssp);
+	struct stacktrace_state sts;
+
+	sts.depth = depth;
+	sts.output = output;
+	*current_thread_info() = *real_thread_info;
+
+	if (!current)
+		output->printf(output, "current NULL\n");
+	else
+		output->printf(output, "pid: %d  comm: %s\n",
+			current->pid, current->comm);
+	fiq_debugger_dump_regs(output, regs);
+
+	if (!user_mode(regs)) {
+		struct stackframe frame;
+		frame.fp = regs->regs[29];
+		frame.sp = regs->sp;
+		frame.pc = regs->pc;
+		output->printf(output, "\n");
+		walk_stackframe(&frame, report_trace, &sts);
+	}
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h
new file mode 100644
index 0000000..d5d051f
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_DEBUGGER_PRIV_H_
+#define _FIQ_DEBUGGER_PRIV_H_
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+		((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_output {
+	void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...);
+};
+
+struct pt_regs;
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+		const struct pt_regs *regs);
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs);
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs);
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+		const struct pt_regs *regs, unsigned int depth, void *ssp);
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
new file mode 100644
index 0000000..10c3c5d
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
@@ -0,0 +1,94 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+	int len;
+	int head;
+	int tail;
+	u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+	struct fiq_debugger_ringbuf *rbuf;
+
+	rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+	if (rbuf == NULL)
+		return NULL;
+
+	rbuf->len = len;
+	rbuf->head = 0;
+	rbuf->tail = 0;
+	smp_mb();
+
+	return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+	kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+	int level = rbuf->head - rbuf->tail;
+
+	if (level < 0)
+		level = rbuf->len + level;
+
+	return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+	return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+	return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+	count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+	rbuf->tail = (rbuf->tail + count) % rbuf->len;
+	smp_mb();
+
+	return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+	if (fiq_debugger_ringbuf_room(rbuf) == 0)
+		return 0;
+
+	rbuf->buf[rbuf->head] = datum;
+	smp_mb();
+	rbuf->head = (rbuf->head + 1) % rbuf->len;
+	smp_mb();
+
+	return 1;
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.c b/drivers/staging/android/fiq_debugger/fiq_watchdog.c
new file mode 100644
index 0000000..194b541
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/pstore_ram.h>
+
+#include "fiq_watchdog.h"
+#include "fiq_debugger_priv.h"
+
+static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock);
+
+static void fiq_watchdog_printf(struct fiq_debugger_output *output,
+				const char *fmt, ...)
+{
+	char buf[256];
+	va_list ap;
+	int len;
+
+	va_start(ap, fmt);
+	len = vscnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+
+	ramoops_console_write_buf(buf, len);
+}
+
+struct fiq_debugger_output fiq_watchdog_output = {
+	.printf = fiq_watchdog_printf,
+};
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp)
+{
+	char msg[24];
+	int len;
+
+	raw_spin_lock(&fiq_watchdog_lock);
+
+	len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n",
+			THREAD_INFO(svc_sp)->cpu);
+	ramoops_console_write_buf(msg, len);
+
+	fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp);
+
+	raw_spin_unlock(&fiq_watchdog_lock);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.h b/drivers/staging/android/fiq_debugger/fiq_watchdog.h
new file mode 100644
index 0000000..c6b507f
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_WATCHDOG_H_
+#define _FIQ_WATCHDOG_H_
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp);
+
+#endif
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
new file mode 100644
index 0000000..5f520b7
--- /dev/null
+++ b/drivers/staging/android/ion/Kconfig
@@ -0,0 +1,32 @@
+menuconfig ION
+	bool "Ion Memory Manager"
+	depends on HAVE_MEMBLOCK
+	select GENERIC_ALLOCATOR
+	select DMA_SHARED_BUFFER
+	---help---
+	  Chose this option to enable the ION Memory Manager,
+	  used by Android to efficiently allocate buffers
+	  from userspace that can be shared between drivers.
+	  If you're not using Android its probably safe to
+	  say N here.
+
+config ION_TEST
+	tristate "Ion Test Device"
+	depends on ION
+	help
+	  Choose this option to create a device that can be used to test the
+	  kernel and device side ION functions.
+
+config ION_TEGRA
+	tristate "Ion for Tegra"
+	depends on ARCH_TEGRA && ION
+	help
+	  Choose this option if you wish to use ion on an nVidia Tegra.
+
+config ION_POOL_CACHE_POLICY
+	bool "Ion set page pool cache policy"
+	depends on ION
+	default y if X86
+	help
+	  Choose this option if need to explicity set cache policy of the
+	  pages in the page pool.
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
new file mode 100644
index 0000000..75039b9
--- /dev/null
+++ b/drivers/staging/android/ion/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION_TEST) += ion_test.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_ION) += compat_ion.o
+endif
+obj-$(CONFIG_ION_TEGRA) += tegra/
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
new file mode 100644
index 0000000..a5e8c40
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/gpu/ion/compat_ion.c
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "compat_ion.h"
+
+/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
+struct compat_ion_allocation_data {
+	compat_size_t len;
+	compat_size_t align;
+	compat_uint_t heap_id_mask;
+	compat_uint_t flags;
+	compat_int_t handle;
+};
+
+struct compat_ion_custom_data {
+	compat_uint_t cmd;
+	compat_ulong_t arg;
+};
+
+struct compat_ion_handle_data {
+	compat_int_t handle;
+};
+
+#define COMPAT_ION_IOC_ALLOC	_IOWR(ION_IOC_MAGIC, 0, \
+				      struct compat_ion_allocation_data)
+#define COMPAT_ION_IOC_FREE	_IOWR(ION_IOC_MAGIC, 1, \
+				      struct compat_ion_handle_data)
+#define COMPAT_ION_IOC_CUSTOM	_IOWR(ION_IOC_MAGIC, 6, \
+				      struct compat_ion_custom_data)
+
+static int compat_get_ion_allocation_data(
+			struct compat_ion_allocation_data __user *data32,
+			struct ion_allocation_data __user *data)
+{
+	compat_size_t s;
+	compat_uint_t u;
+	compat_int_t i;
+	int err;
+
+	err = get_user(s, &data32->len);
+	err |= put_user(s, &data->len);
+	err |= get_user(s, &data32->align);
+	err |= put_user(s, &data->align);
+	err |= get_user(u, &data32->heap_id_mask);
+	err |= put_user(u, &data->heap_id_mask);
+	err |= get_user(u, &data32->flags);
+	err |= put_user(u, &data->flags);
+	err |= get_user(i, &data32->handle);
+	err |= put_user(i, &data->handle);
+
+	return err;
+}
+
+static int compat_get_ion_handle_data(
+			struct compat_ion_handle_data __user *data32,
+			struct ion_handle_data __user *data)
+{
+	compat_int_t i;
+	int err;
+
+	err = get_user(i, &data32->handle);
+	err |= put_user(i, &data->handle);
+
+	return err;
+}
+
+static int compat_put_ion_allocation_data(
+			struct compat_ion_allocation_data __user *data32,
+			struct ion_allocation_data __user *data)
+{
+	compat_size_t s;
+	compat_uint_t u;
+	compat_int_t i;
+	int err;
+
+	err = get_user(s, &data->len);
+	err |= put_user(s, &data32->len);
+	err |= get_user(s, &data->align);
+	err |= put_user(s, &data32->align);
+	err |= get_user(u, &data->heap_id_mask);
+	err |= put_user(u, &data32->heap_id_mask);
+	err |= get_user(u, &data->flags);
+	err |= put_user(u, &data32->flags);
+	err |= get_user(i, &data->handle);
+	err |= put_user(i, &data32->handle);
+
+	return err;
+}
+
+static int compat_get_ion_custom_data(
+			struct compat_ion_custom_data __user *data32,
+			struct ion_custom_data __user *data)
+{
+	compat_uint_t cmd;
+	compat_ulong_t arg;
+	int err;
+
+	err = get_user(cmd, &data32->cmd);
+	err |= put_user(cmd, &data->cmd);
+	err |= get_user(arg, &data32->arg);
+	err |= put_user(arg, &data->arg);
+
+	return err;
+};
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+		return -ENOTTY;
+
+	switch (cmd) {
+	case COMPAT_ION_IOC_ALLOC:
+	{
+		struct compat_ion_allocation_data __user *data32;
+		struct ion_allocation_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_ion_allocation_data(data32, data);
+		if (err)
+			return err;
+		ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
+							(unsigned long)data);
+		err = compat_put_ion_allocation_data(data32, data);
+		return ret ? ret : err;
+	}
+	case COMPAT_ION_IOC_FREE:
+	{
+		struct compat_ion_handle_data __user *data32;
+		struct ion_handle_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_ion_handle_data(data32, data);
+		if (err)
+			return err;
+
+		return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
+							(unsigned long)data);
+	}
+	case COMPAT_ION_IOC_CUSTOM: {
+		struct compat_ion_custom_data __user *data32;
+		struct ion_custom_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_ion_custom_data(data32, data);
+		if (err)
+			return err;
+
+		return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
+							(unsigned long)data);
+	}
+	case ION_IOC_SHARE:
+	case ION_IOC_MAP:
+	case ION_IOC_IMPORT:
+	case ION_IOC_SYNC:
+		return filp->f_op->unlocked_ioctl(filp, cmd,
+						(unsigned long)compat_ptr(arg));
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
new file mode 100644
index 0000000..3a9c8c0
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.h
@@ -0,0 +1,30 @@
+/*
+
+ * drivers/gpu/ion/compat_ion.h
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#else
+
+#define compat_ion_ioctl  NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
new file mode 100644
index 0000000..698e363
--- /dev/null
+++ b/drivers/staging/android/ion/ion.c
@@ -0,0 +1,1644 @@
+/*
+
+ * drivers/gpu/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev:		the actual misc device
+ * @buffers:		an rb tree of all the existing buffers
+ * @buffer_lock:	lock protecting the tree of buffers
+ * @lock:		rwsem protecting the tree of heaps and clients
+ * @heaps:		list of all the heaps in the system
+ * @user_clients:	list of all the clients created from userspace
+ */
+struct ion_device {
+	struct miscdevice dev;
+	struct rb_root buffers;
+	struct mutex buffer_lock;
+	struct rw_semaphore lock;
+	struct plist_head heaps;
+	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+			      unsigned long arg);
+	struct rb_root clients;
+	struct dentry *debug_root;
+	struct dentry *heaps_debug_root;
+	struct dentry *clients_debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node:		node in the tree of all clients
+ * @dev:		backpointer to ion device
+ * @handles:		an rb tree of all the handles in this client
+ * @idr:		an idr space for allocating handle ids
+ * @lock:		lock protecting the tree of handles
+ * @name:		used for debugging
+ * @display_name:	used for debugging (unique version of @name)
+ * @display_serial:	used for debugging (to make display_name unique)
+ * @task:		used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+	struct rb_node node;
+	struct ion_device *dev;
+	struct rb_root handles;
+	struct idr idr;
+	struct mutex lock;
+	const char *name;
+	char *display_name;
+	int display_serial;
+	struct task_struct *task;
+	pid_t pid;
+	struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref:		reference count
+ * @client:		back pointer to the client the buffer resides in
+ * @buffer:		pointer to the buffer
+ * @node:		node in the client's handle rbtree
+ * @kmap_cnt:		count of times this client has mapped to kernel
+ * @id:			client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client.  Other fields are never changed after initialization.
+ */
+struct ion_handle {
+	struct kref ref;
+	struct ion_client *client;
+	struct ion_buffer *buffer;
+	struct rb_node node;
+	unsigned int kmap_cnt;
+	int id;
+};
+
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+	return (buffer->flags & ION_FLAG_CACHED) &&
+		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+	return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+	return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+	return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+	*page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
+}
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+			   struct ion_buffer *buffer)
+{
+	struct rb_node **p = &dev->buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_buffer *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_buffer, node);
+
+		if (buffer < entry) {
+			p = &(*p)->rb_left;
+		} else if (buffer > entry) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: buffer already found.", __func__);
+			BUG();
+		}
+	}
+
+	rb_link_node(&buffer->node, parent, p);
+	rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+				     struct ion_device *dev,
+				     unsigned long len,
+				     unsigned long align,
+				     unsigned long flags)
+{
+	struct ion_buffer *buffer;
+	struct sg_table *table;
+	struct scatterlist *sg;
+	int i, ret;
+
+	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+	if (!buffer)
+		return ERR_PTR(-ENOMEM);
+
+	buffer->heap = heap;
+	buffer->flags = flags;
+	kref_init(&buffer->ref);
+
+	ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
+	if (ret) {
+		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+			goto err2;
+
+		ion_heap_freelist_drain(heap, 0);
+		ret = heap->ops->allocate(heap, buffer, len, align,
+					  flags);
+		if (ret)
+			goto err2;
+	}
+
+	buffer->dev = dev;
+	buffer->size = len;
+
+	table = heap->ops->map_dma(heap, buffer);
+	if (WARN_ONCE(table == NULL,
+			"heap->ops->map_dma should return ERR_PTR on error"))
+		table = ERR_PTR(-EINVAL);
+	if (IS_ERR(table)) {
+		heap->ops->free(buffer);
+		kfree(buffer);
+		return ERR_PTR(PTR_ERR(table));
+	}
+	buffer->sg_table = table;
+	if (ion_buffer_fault_user_mappings(buffer)) {
+		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+		struct scatterlist *sg;
+		int i, j, k = 0;
+
+		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+		if (!buffer->pages) {
+			ret = -ENOMEM;
+			goto err1;
+		}
+
+		for_each_sg(table->sgl, sg, table->nents, i) {
+			struct page *page = sg_page(sg);
+
+			for (j = 0; j < sg->length / PAGE_SIZE; j++)
+				buffer->pages[k++] = page++;
+		}
+
+		if (ret)
+			goto err;
+	}
+
+	buffer->dev = dev;
+	buffer->size = len;
+	INIT_LIST_HEAD(&buffer->vmas);
+	mutex_init(&buffer->lock);
+	/* this will set up dma addresses for the sglist -- it is not
+	   technically correct as per the dma api -- a specific
+	   device isn't really taking ownership here.  However, in practice on
+	   our systems the only dma_address space is physical addresses.
+	   Additionally, we can't afford the overhead of invalidating every
+	   allocation via dma_map_sg. The implicit contract here is that
+	   memory comming from the heaps is ready for dma, ie if it has a
+	   cached mapping that mapping has been invalidated */
+	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+		sg_dma_address(sg) = sg_phys(sg);
+	mutex_lock(&dev->buffer_lock);
+	ion_buffer_add(dev, buffer);
+	mutex_unlock(&dev->buffer_lock);
+	return buffer;
+
+err:
+	heap->ops->unmap_dma(heap, buffer);
+	heap->ops->free(buffer);
+err1:
+	if (buffer->pages)
+		vfree(buffer->pages);
+err2:
+	kfree(buffer);
+	return ERR_PTR(ret);
+}
+
+void ion_buffer_destroy(struct ion_buffer *buffer)
+{
+	if (WARN_ON(buffer->kmap_cnt > 0))
+		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+	buffer->heap->ops->free(buffer);
+	if (buffer->pages)
+		vfree(buffer->pages);
+	kfree(buffer);
+}
+
+static void _ion_buffer_destroy(struct kref *kref)
+{
+	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+	struct ion_heap *heap = buffer->heap;
+	struct ion_device *dev = buffer->dev;
+
+	mutex_lock(&dev->buffer_lock);
+	rb_erase(&buffer->node, &dev->buffers);
+	mutex_unlock(&dev->buffer_lock);
+
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+		ion_heap_freelist_add(heap, buffer);
+	else
+		ion_buffer_destroy(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+	kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+	return kref_put(&buffer->ref, _ion_buffer_destroy);
+}
+
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+	mutex_lock(&buffer->lock);
+	buffer->handle_count++;
+	mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+	/*
+	 * when a buffer is removed from a handle, if it is not in
+	 * any other handles, copy the taskcomm and the pid of the
+	 * process it's being removed from into the buffer.  At this
+	 * point there will be no way to track what processes this buffer is
+	 * being used by, it only exists as a dma_buf file descriptor.
+	 * The taskcomm and pid can provide a debug hint as to where this fd
+	 * is in the system
+	 */
+	mutex_lock(&buffer->lock);
+	buffer->handle_count--;
+	BUG_ON(buffer->handle_count < 0);
+	if (!buffer->handle_count) {
+		struct task_struct *task;
+
+		task = current->group_leader;
+		get_task_comm(buffer->task_comm, task);
+		buffer->pid = task_pid_nr(task);
+	}
+	mutex_unlock(&buffer->lock);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+				     struct ion_buffer *buffer)
+{
+	struct ion_handle *handle;
+
+	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+	if (!handle)
+		return ERR_PTR(-ENOMEM);
+	kref_init(&handle->ref);
+	RB_CLEAR_NODE(&handle->node);
+	handle->client = client;
+	ion_buffer_get(buffer);
+	ion_buffer_add_to_handle(buffer);
+	handle->buffer = buffer;
+
+	return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+	struct ion_client *client = handle->client;
+	struct ion_buffer *buffer = handle->buffer;
+
+	mutex_lock(&buffer->lock);
+	while (handle->kmap_cnt)
+		ion_handle_kmap_put(handle);
+	mutex_unlock(&buffer->lock);
+
+	idr_remove(&client->idr, handle->id);
+	if (!RB_EMPTY_NODE(&handle->node))
+		rb_erase(&handle->node, &client->handles);
+
+	ion_buffer_remove_from_handle(buffer);
+	ion_buffer_put(buffer);
+
+	kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+	return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+	kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+	struct ion_client *client = handle->client;
+	int ret;
+
+	mutex_lock(&client->lock);
+	ret = kref_put(&handle->ref, ion_handle_destroy);
+	mutex_unlock(&client->lock);
+
+	return ret;
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+					    struct ion_buffer *buffer)
+{
+	struct rb_node *n = client->handles.rb_node;
+
+	while (n) {
+		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+
+		if (buffer < entry->buffer)
+			n = n->rb_left;
+		else if (buffer > entry->buffer)
+			n = n->rb_right;
+		else
+			return entry;
+	}
+	return ERR_PTR(-EINVAL);
+}
+
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+						int id)
+{
+	struct ion_handle *handle;
+
+	mutex_lock(&client->lock);
+	handle = idr_find(&client->idr, id);
+	if (handle)
+		ion_handle_get(handle);
+	mutex_unlock(&client->lock);
+
+	return handle ? handle : ERR_PTR(-EINVAL);
+}
+
+static bool ion_handle_validate(struct ion_client *client,
+				struct ion_handle *handle)
+{
+	WARN_ON(!mutex_is_locked(&client->lock));
+	return (idr_find(&client->idr, handle->id) == handle);
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+	int id;
+	struct rb_node **p = &client->handles.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_handle *entry;
+
+	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+	if (id < 0)
+		return id;
+
+	handle->id = id;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_handle, node);
+
+		if (handle->buffer < entry->buffer)
+			p = &(*p)->rb_left;
+		else if (handle->buffer > entry->buffer)
+			p = &(*p)->rb_right;
+		else
+			WARN(1, "%s: buffer already found.", __func__);
+	}
+
+	rb_link_node(&handle->node, parent, p);
+	rb_insert_color(&handle->node, &client->handles);
+
+	return 0;
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int heap_id_mask,
+			     unsigned int flags)
+{
+	struct ion_handle *handle;
+	struct ion_device *dev = client->dev;
+	struct ion_buffer *buffer = NULL;
+	struct ion_heap *heap;
+	int ret;
+
+	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
+		 len, align, heap_id_mask, flags);
+	/*
+	 * traverse the list of heaps available in this system in priority
+	 * order.  If the heap type is supported by the client, and matches the
+	 * request of the caller allocate from it.  Repeat until allocate has
+	 * succeeded or all heaps have been tried
+	 */
+	len = PAGE_ALIGN(len);
+
+	if (!len)
+		return ERR_PTR(-EINVAL);
+
+	down_read(&dev->lock);
+	plist_for_each_entry(heap, &dev->heaps, node) {
+		/* if the caller didn't specify this heap id */
+		if (!((1 << heap->id) & heap_id_mask))
+			continue;
+		buffer = ion_buffer_create(heap, dev, len, align, flags);
+		if (!IS_ERR(buffer))
+			break;
+	}
+	up_read(&dev->lock);
+
+	if (buffer == NULL)
+		return ERR_PTR(-ENODEV);
+
+	if (IS_ERR(buffer))
+		return ERR_PTR(PTR_ERR(buffer));
+
+	handle = ion_handle_create(client, buffer);
+
+	/*
+	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
+	 * and ion_handle_create will take a second reference, drop one here
+	 */
+	ion_buffer_put(buffer);
+
+	if (IS_ERR(handle))
+		return handle;
+
+	mutex_lock(&client->lock);
+	ret = ion_handle_add(client, handle);
+	mutex_unlock(&client->lock);
+	if (ret) {
+		ion_handle_put(handle);
+		handle = ERR_PTR(ret);
+	}
+
+	return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+	bool valid_handle;
+
+	BUG_ON(client != handle->client);
+
+	mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+
+	if (!valid_handle) {
+		WARN(1, "%s: invalid handle passed to free.\n", __func__);
+		mutex_unlock(&client->lock);
+		return;
+	}
+	mutex_unlock(&client->lock);
+	ion_handle_put(handle);
+}
+EXPORT_SYMBOL(ion_free);
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+	     ion_phys_addr_t *addr, size_t *len)
+{
+	struct ion_buffer *buffer;
+	int ret;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+
+	buffer = handle->buffer;
+
+	if (!buffer->heap->ops->phys) {
+		pr_err("%s: ion_phys is not implemented by this heap.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return -ENODEV;
+	}
+	mutex_unlock(&client->lock);
+	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+	return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+	void *vaddr;
+
+	if (buffer->kmap_cnt) {
+		buffer->kmap_cnt++;
+		return buffer->vaddr;
+	}
+	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+	if (WARN_ONCE(vaddr == NULL,
+			"heap->ops->map_kernel should return ERR_PTR on error"))
+		return ERR_PTR(-EINVAL);
+	if (IS_ERR(vaddr))
+		return vaddr;
+	buffer->vaddr = vaddr;
+	buffer->kmap_cnt++;
+	return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+	struct ion_buffer *buffer = handle->buffer;
+	void *vaddr;
+
+	if (handle->kmap_cnt) {
+		handle->kmap_cnt++;
+		return buffer->vaddr;
+	}
+	vaddr = ion_buffer_kmap_get(buffer);
+	if (IS_ERR(vaddr))
+		return vaddr;
+	handle->kmap_cnt++;
+	return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+	buffer->kmap_cnt--;
+	if (!buffer->kmap_cnt) {
+		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+		buffer->vaddr = NULL;
+	}
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+	struct ion_buffer *buffer = handle->buffer;
+
+	handle->kmap_cnt--;
+	if (!handle->kmap_cnt)
+		ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	void *vaddr;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_kernel.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+	buffer = handle->buffer;
+
+	if (!handle->buffer->heap->ops->map_kernel) {
+		pr_err("%s: map_kernel is not implemented by this heap.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-ENODEV);
+	}
+
+	mutex_lock(&buffer->lock);
+	vaddr = ion_handle_kmap_get(handle);
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+	return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	ion_handle_kmap_put(handle);
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+	struct ion_client *client = s->private;
+	struct rb_node *n;
+	size_t sizes[ION_NUM_HEAP_IDS] = {0};
+	const char *names[ION_NUM_HEAP_IDS] = {NULL};
+	int i;
+
+	mutex_lock(&client->lock);
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		unsigned int id = handle->buffer->heap->id;
+
+		if (!names[id])
+			names[id] = handle->buffer->heap->name;
+		sizes[id] += handle->buffer->size;
+	}
+	mutex_unlock(&client->lock);
+
+	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
+		if (!names[i])
+			continue;
+		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+	}
+	return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+	.open = ion_debug_client_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int ion_get_client_serial(const struct rb_root *root,
+					const unsigned char *name)
+{
+	int serial = -1;
+	struct rb_node *node;
+
+	for (node = rb_first(root); node; node = rb_next(node)) {
+		struct ion_client *client = rb_entry(node, struct ion_client,
+						node);
+
+		if (strcmp(client->name, name))
+			continue;
+		serial = max(serial, client->display_serial);
+	}
+	return serial + 1;
+}
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+				     const char *name)
+{
+	struct ion_client *client;
+	struct task_struct *task;
+	struct rb_node **p;
+	struct rb_node *parent = NULL;
+	struct ion_client *entry;
+	pid_t pid;
+
+	if (!name) {
+		pr_err("%s: Name cannot be null\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	get_task_struct(current->group_leader);
+	task_lock(current->group_leader);
+	pid = task_pid_nr(current->group_leader);
+	/* don't bother to store task struct for kernel threads,
+	   they can't be killed anyway */
+	if (current->group_leader->flags & PF_KTHREAD) {
+		put_task_struct(current->group_leader);
+		task = NULL;
+	} else {
+		task = current->group_leader;
+	}
+	task_unlock(current->group_leader);
+
+	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+	if (!client)
+		goto err_put_task_struct;
+
+	client->dev = dev;
+	client->handles = RB_ROOT;
+	idr_init(&client->idr);
+	mutex_init(&client->lock);
+	client->task = task;
+	client->pid = pid;
+	client->name = kstrdup(name, GFP_KERNEL);
+	if (!client->name)
+		goto err_free_client;
+
+	down_write(&dev->lock);
+	client->display_serial = ion_get_client_serial(&dev->clients, name);
+	client->display_name = kasprintf(
+		GFP_KERNEL, "%s-%d", name, client->display_serial);
+	if (!client->display_name) {
+		up_write(&dev->lock);
+		goto err_free_client_name;
+	}
+	p = &dev->clients.rb_node;
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_client, node);
+
+		if (client < entry)
+			p = &(*p)->rb_left;
+		else if (client > entry)
+			p = &(*p)->rb_right;
+	}
+	rb_link_node(&client->node, parent, p);
+	rb_insert_color(&client->node, &dev->clients);
+
+	client->debug_root = debugfs_create_file(client->display_name, 0664,
+						dev->clients_debug_root,
+						client, &debug_client_fops);
+	if (!client->debug_root) {
+		char buf[256], *path;
+		path = dentry_path(dev->clients_debug_root, buf, 256);
+		pr_err("Failed to create client debugfs at %s/%s\n",
+			path, client->display_name);
+	}
+
+	up_write(&dev->lock);
+
+	return client;
+
+err_free_client_name:
+	kfree(client->name);
+err_free_client:
+	kfree(client);
+err_put_task_struct:
+	if (task)
+		put_task_struct(current->group_leader);
+	return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(ion_client_create);
+
+void ion_client_destroy(struct ion_client *client)
+{
+	struct ion_device *dev = client->dev;
+	struct rb_node *n;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	while ((n = rb_first(&client->handles))) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		ion_handle_destroy(&handle->ref);
+	}
+
+	idr_destroy(&client->idr);
+
+	down_write(&dev->lock);
+	if (client->task)
+		put_task_struct(client->task);
+	rb_erase(&client->node, &dev->clients);
+	debugfs_remove_recursive(client->debug_root);
+	up_write(&dev->lock);
+
+	kfree(client->display_name);
+	kfree(client->name);
+	kfree(client);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+			      struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	struct sg_table *table;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_dma.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = handle->buffer;
+	table = buffer->sg_table;
+	mutex_unlock(&client->lock);
+	return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+				       struct device *dev,
+				       enum dma_data_direction direction);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+					enum dma_data_direction direction)
+{
+	struct dma_buf *dmabuf = attachment->dmabuf;
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+	return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+			      struct sg_table *table,
+			      enum dma_data_direction direction)
+{
+}
+
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+		size_t size, enum dma_data_direction dir)
+{
+	struct scatterlist sg;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, page, size, 0);
+	/*
+	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+	 * for the the targeted device, but this works on the currently targeted
+	 * hardware.
+	 */
+	sg_dma_address(&sg) = page_to_phys(page);
+	dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
+struct ion_vma_list {
+	struct list_head list;
+	struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+				       struct device *dev,
+				       enum dma_data_direction dir)
+{
+	struct ion_vma_list *vma_list;
+	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+	int i;
+
+	pr_debug("%s: syncing for device %s\n", __func__,
+		 dev ? dev_name(dev) : "null");
+
+	if (!ion_buffer_fault_user_mappings(buffer))
+		return;
+
+	mutex_lock(&buffer->lock);
+	for (i = 0; i < pages; i++) {
+		struct page *page = buffer->pages[i];
+
+		if (ion_buffer_page_is_dirty(page))
+			ion_pages_sync_for_device(dev, ion_buffer_page(page),
+							PAGE_SIZE, dir);
+
+		ion_buffer_page_clean(buffer->pages + i);
+	}
+	list_for_each_entry(vma_list, &buffer->vmas, list) {
+		struct vm_area_struct *vma = vma_list->vma;
+
+		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+			       NULL);
+	}
+	mutex_unlock(&buffer->lock);
+}
+
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct ion_buffer *buffer = vma->vm_private_data;
+	unsigned long pfn;
+	int ret;
+
+	mutex_lock(&buffer->lock);
+	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
+	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
+
+	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+	mutex_unlock(&buffer->lock);
+	if (ret)
+		return VM_FAULT_ERROR;
+
+	return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = vma->vm_private_data;
+	struct ion_vma_list *vma_list;
+
+	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+	if (!vma_list)
+		return;
+	vma_list->vma = vma;
+	mutex_lock(&buffer->lock);
+	list_add(&vma_list->list, &buffer->vmas);
+	mutex_unlock(&buffer->lock);
+	pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = vma->vm_private_data;
+	struct ion_vma_list *vma_list, *tmp;
+
+	pr_debug("%s\n", __func__);
+	mutex_lock(&buffer->lock);
+	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+		if (vma_list->vma != vma)
+			continue;
+		list_del(&vma_list->list);
+		kfree(vma_list);
+		pr_debug("%s: deleting %p\n", __func__, vma);
+		break;
+	}
+	mutex_unlock(&buffer->lock);
+}
+
+static struct vm_operations_struct ion_vma_ops = {
+	.open = ion_vm_open,
+	.close = ion_vm_close,
+	.fault = ion_vm_fault,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	int ret = 0;
+
+	if (!buffer->heap->ops->map_user) {
+		pr_err("%s: this heap does not define a method for mapping "
+		       "to userspace\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ion_buffer_fault_user_mappings(buffer)) {
+		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+							VM_DONTDUMP;
+		vma->vm_private_data = buffer;
+		vma->vm_ops = &ion_vma_ops;
+		ion_vm_open(vma);
+		return 0;
+	}
+
+	if (!(buffer->flags & ION_FLAG_CACHED))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	mutex_lock(&buffer->lock);
+	/* now map it to userspace */
+	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+	mutex_unlock(&buffer->lock);
+
+	if (ret)
+		pr_err("%s: failure mapping buffer to userspace\n",
+		       __func__);
+
+	return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+			       void *ptr)
+{
+	return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+					size_t len,
+					enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	void *vaddr;
+
+	if (!buffer->heap->ops->map_kernel) {
+		pr_err("%s: map kernel is not implemented by this heap.\n",
+		       __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&buffer->lock);
+	vaddr = ion_buffer_kmap_get(buffer);
+	mutex_unlock(&buffer->lock);
+	if (IS_ERR(vaddr))
+		return PTR_ERR(vaddr);
+	return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+				       size_t len,
+				       enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	mutex_lock(&buffer->lock);
+	ion_buffer_kmap_put(buffer);
+	mutex_unlock(&buffer->lock);
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+	.map_dma_buf = ion_map_dma_buf,
+	.unmap_dma_buf = ion_unmap_dma_buf,
+	.mmap = ion_mmap,
+	.release = ion_dma_buf_release,
+	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
+	.end_cpu_access = ion_dma_buf_end_cpu_access,
+	.kmap_atomic = ion_dma_buf_kmap,
+	.kunmap_atomic = ion_dma_buf_kunmap,
+	.kmap = ion_dma_buf_kmap,
+	.kunmap = ion_dma_buf_kunmap,
+};
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+						struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	struct dma_buf *dmabuf;
+	bool valid_handle;
+
+	mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+	if (!valid_handle) {
+		WARN(1, "%s: invalid handle passed to share.\n", __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = handle->buffer;
+	ion_buffer_get(buffer);
+	mutex_unlock(&client->lock);
+
+	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+	if (IS_ERR(dmabuf)) {
+		ion_buffer_put(buffer);
+		return dmabuf;
+	}
+
+	return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+	struct dma_buf *dmabuf;
+	int fd;
+
+	dmabuf = ion_share_dma_buf(client, handle);
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+	if (fd < 0)
+		dma_buf_put(dmabuf);
+
+	return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+	struct dma_buf *dmabuf;
+	struct ion_buffer *buffer;
+	struct ion_handle *handle;
+	int ret;
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR(dmabuf))
+		return ERR_PTR(PTR_ERR(dmabuf));
+	/* if this memory came from ion */
+
+	if (dmabuf->ops != &dma_buf_ops) {
+		pr_err("%s: can not import dmabuf from another exporter\n",
+		       __func__);
+		dma_buf_put(dmabuf);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = dmabuf->priv;
+
+	mutex_lock(&client->lock);
+	/* if a handle exists for this buffer just take a reference to it */
+	handle = ion_handle_lookup(client, buffer);
+	if (!IS_ERR(handle)) {
+		ion_handle_get(handle);
+		mutex_unlock(&client->lock);
+		goto end;
+	}
+	mutex_unlock(&client->lock);
+
+	handle = ion_handle_create(client, buffer);
+	if (IS_ERR(handle))
+		goto end;
+
+	mutex_lock(&client->lock);
+	ret = ion_handle_add(client, handle);
+	mutex_unlock(&client->lock);
+	if (ret) {
+		ion_handle_put(handle);
+		handle = ERR_PTR(ret);
+	}
+
+end:
+	dma_buf_put(dmabuf);
+	return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+	struct dma_buf *dmabuf;
+	struct ion_buffer *buffer;
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	/* if this memory came from ion */
+	if (dmabuf->ops != &dma_buf_ops) {
+		pr_err("%s: can not sync dmabuf from another exporter\n",
+		       __func__);
+		dma_buf_put(dmabuf);
+		return -EINVAL;
+	}
+	buffer = dmabuf->priv;
+
+	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+	dma_buf_put(dmabuf);
+	return 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+	switch (cmd) {
+	case ION_IOC_SYNC:
+	case ION_IOC_FREE:
+	case ION_IOC_CUSTOM:
+		return _IOC_WRITE;
+	default:
+		return _IOC_DIR(cmd);
+	}
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct ion_client *client = filp->private_data;
+	struct ion_device *dev = client->dev;
+	struct ion_handle *cleanup_handle = NULL;
+	int ret = 0;
+	unsigned int dir;
+
+	union {
+		struct ion_fd_data fd;
+		struct ion_allocation_data allocation;
+		struct ion_handle_data handle;
+		struct ion_custom_data custom;
+	} data;
+
+	dir = ion_ioctl_dir(cmd);
+
+	if (_IOC_SIZE(cmd) > sizeof(data))
+		return -EINVAL;
+
+	if (dir & _IOC_WRITE)
+		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+
+	switch (cmd) {
+	case ION_IOC_ALLOC:
+	{
+		struct ion_handle *handle;
+
+		handle = ion_alloc(client, data.allocation.len,
+						data.allocation.align,
+						data.allocation.heap_id_mask,
+						data.allocation.flags);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+
+		data.allocation.handle = handle->id;
+
+		cleanup_handle = handle;
+		break;
+	}
+	case ION_IOC_FREE:
+	{
+		struct ion_handle *handle;
+
+		handle = ion_handle_get_by_id(client, data.handle.handle);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+		ion_free(client, handle);
+		ion_handle_put(handle);
+		break;
+	}
+	case ION_IOC_SHARE:
+	case ION_IOC_MAP:
+	{
+		struct ion_handle *handle;
+
+		handle = ion_handle_get_by_id(client, data.handle.handle);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+		data.fd.fd = ion_share_dma_buf_fd(client, handle);
+		ion_handle_put(handle);
+		if (data.fd.fd < 0)
+			ret = data.fd.fd;
+		break;
+	}
+	case ION_IOC_IMPORT:
+	{
+		struct ion_handle *handle;
+
+		handle = ion_import_dma_buf(client, data.fd.fd);
+		if (IS_ERR(handle))
+			ret = PTR_ERR(handle);
+		else
+			data.handle.handle = handle->id;
+		break;
+	}
+	case ION_IOC_SYNC:
+	{
+		ret = ion_sync_for_device(client, data.fd.fd);
+		break;
+	}
+	case ION_IOC_CUSTOM:
+	{
+		if (!dev->custom_ioctl)
+			return -ENOTTY;
+		ret = dev->custom_ioctl(client, data.custom.cmd,
+						data.custom.arg);
+		break;
+	}
+	default:
+		return -ENOTTY;
+	}
+
+	if (dir & _IOC_READ) {
+		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+			if (cleanup_handle)
+				ion_free(client, cleanup_handle);
+			return -EFAULT;
+		}
+	}
+	return ret;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+	struct ion_client *client = file->private_data;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	ion_client_destroy(client);
+	return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+	struct miscdevice *miscdev = file->private_data;
+	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+	struct ion_client *client;
+	char debug_name[64];
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
+	client = ion_client_create(dev, debug_name);
+	if (IS_ERR(client))
+		return PTR_ERR(client);
+	file->private_data = client;
+
+	return 0;
+}
+
+static const struct file_operations ion_fops = {
+	.owner          = THIS_MODULE,
+	.open           = ion_open,
+	.release        = ion_release,
+	.unlocked_ioctl = ion_ioctl,
+	.compat_ioctl   = compat_ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+				   unsigned int id)
+{
+	size_t size = 0;
+	struct rb_node *n;
+
+	mutex_lock(&client->lock);
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n,
+						     struct ion_handle,
+						     node);
+		if (handle->buffer->heap->id == id)
+			size += handle->buffer->size;
+	}
+	mutex_unlock(&client->lock);
+	return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+	struct ion_heap *heap = s->private;
+	struct ion_device *dev = heap->dev;
+	struct rb_node *n;
+	size_t total_size = 0;
+	size_t total_orphaned_size = 0;
+
+	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+	seq_printf(s, "----------------------------------------------------\n");
+
+	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+		struct ion_client *client = rb_entry(n, struct ion_client,
+						     node);
+		size_t size = ion_debug_heap_total(client, heap->id);
+
+		if (!size)
+			continue;
+		if (client->task) {
+			char task_comm[TASK_COMM_LEN];
+
+			get_task_comm(task_comm, client->task);
+			seq_printf(s, "%16.s %16u %16zu\n", task_comm,
+				   client->pid, size);
+		} else {
+			seq_printf(s, "%16.s %16u %16zu\n", client->name,
+				   client->pid, size);
+		}
+	}
+	seq_printf(s, "----------------------------------------------------\n");
+	seq_printf(s, "orphaned allocations (info is from last known client):"
+		   "\n");
+	mutex_lock(&dev->buffer_lock);
+	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+						     node);
+		if (buffer->heap->id != heap->id)
+			continue;
+		total_size += buffer->size;
+		if (!buffer->handle_count) {
+			seq_printf(s, "%16.s %16u %16zu %d %d\n",
+				   buffer->task_comm, buffer->pid,
+				   buffer->size, buffer->kmap_cnt,
+				   atomic_read(&buffer->ref.refcount));
+			total_orphaned_size += buffer->size;
+		}
+	}
+	mutex_unlock(&dev->buffer_lock);
+	seq_printf(s, "----------------------------------------------------\n");
+	seq_printf(s, "%16.s %16zu\n", "total orphaned",
+		   total_orphaned_size);
+	seq_printf(s, "%16.s %16zu\n", "total ", total_size);
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+		seq_printf(s, "%16.s %16zu\n", "deferred free",
+				heap->free_list_size);
+	seq_printf(s, "----------------------------------------------------\n");
+
+	if (heap->debug_show)
+		heap->debug_show(heap, s, unused);
+
+	return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+	.open = ion_debug_heap_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+#ifdef DEBUG_HEAP_SHRINKER
+static int debug_shrink_set(void *data, u64 val)
+{
+	struct ion_heap *heap = data;
+	struct shrink_control sc;
+	int objs;
+
+	sc.gfp_mask = -1;
+	sc.nr_to_scan = 0;
+
+	if (!val)
+		return 0;
+
+	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+	sc.nr_to_scan = objs;
+
+	heap->shrinker.shrink(&heap->shrinker, &sc);
+	return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+	struct ion_heap *heap = data;
+	struct shrink_control sc;
+	int objs;
+
+	sc.gfp_mask = -1;
+	sc.nr_to_scan = 0;
+
+	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+	*val = objs;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+			debug_shrink_set, "%llu\n");
+#endif
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+	struct dentry *debug_file;
+
+	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+	    !heap->ops->unmap_dma)
+		pr_err("%s: can not add heap with invalid ops struct.\n",
+		       __func__);
+
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+		ion_heap_init_deferred_free(heap);
+
+	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+		ion_heap_init_shrinker(heap);
+
+	heap->dev = dev;
+	down_write(&dev->lock);
+	/* use negative heap->id to reverse the priority -- when traversing
+	   the list later attempt higher id numbers first */
+	plist_node_init(&heap->node, -heap->id);
+	plist_add(&heap->node, &dev->heaps);
+	debug_file = debugfs_create_file(heap->name, 0664,
+					dev->heaps_debug_root, heap,
+					&debug_heap_fops);
+
+	if (!debug_file) {
+		char buf[256], *path;
+
+		path = dentry_path(dev->heaps_debug_root, buf, 256);
+		pr_err("Failed to create heap debugfs at %s/%s\n",
+			path, heap->name);
+	}
+
+#ifdef DEBUG_HEAP_SHRINKER
+	if (heap->shrinker.shrink) {
+		char debug_name[64];
+
+		snprintf(debug_name, 64, "%s_shrink", heap->name);
+		debug_file = debugfs_create_file(
+			debug_name, 0644, dev->heaps_debug_root, heap,
+			&debug_shrink_fops);
+		if (!debug_file) {
+			char buf[256], *path;
+
+			path = dentry_path(dev->heaps_debug_root, buf, 256);
+			pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
+				path, debug_name);
+		}
+	}
+#endif
+	up_write(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+				     (struct ion_client *client,
+				      unsigned int cmd,
+				      unsigned long arg))
+{
+	struct ion_device *idev;
+	int ret;
+
+	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+	if (!idev)
+		return ERR_PTR(-ENOMEM);
+
+	idev->dev.minor = MISC_DYNAMIC_MINOR;
+	idev->dev.name = "ion";
+	idev->dev.fops = &ion_fops;
+	idev->dev.parent = NULL;
+	ret = misc_register(&idev->dev);
+	if (ret) {
+		pr_err("ion: failed to register misc device.\n");
+		return ERR_PTR(ret);
+	}
+
+	idev->debug_root = debugfs_create_dir("ion", NULL);
+	if (!idev->debug_root) {
+		pr_err("ion: failed to create debugfs root directory.\n");
+		goto debugfs_done;
+	}
+	idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
+	if (!idev->heaps_debug_root) {
+		pr_err("ion: failed to create debugfs heaps directory.\n");
+		goto debugfs_done;
+	}
+	idev->clients_debug_root = debugfs_create_dir("clients",
+						idev->debug_root);
+	if (!idev->clients_debug_root)
+		pr_err("ion: failed to create debugfs clients directory.\n");
+
+debugfs_done:
+
+	idev->custom_ioctl = custom_ioctl;
+	idev->buffers = RB_ROOT;
+	mutex_init(&idev->buffer_lock);
+	init_rwsem(&idev->lock);
+	plist_head_init(&idev->heaps);
+	idev->clients = RB_ROOT;
+	return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+	misc_deregister(&dev->dev);
+	debugfs_remove_recursive(dev->debug_root);
+	/* XXX need to free the heaps and clients ? */
+	kfree(dev);
+}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+	int i;
+
+	for (i = 0; i < data->nr; i++) {
+		if (data->heaps[i].size == 0)
+			continue;
+
+		if (data->heaps[i].base == 0) {
+			phys_addr_t paddr;
+
+			paddr = memblock_alloc_base(data->heaps[i].size,
+						    data->heaps[i].align,
+						    MEMBLOCK_ALLOC_ANYWHERE);
+			if (!paddr) {
+				pr_err("%s: error allocating memblock for "
+				       "heap %d\n",
+					__func__, i);
+				continue;
+			}
+			data->heaps[i].base = paddr;
+		} else {
+			int ret = memblock_reserve(data->heaps[i].base,
+					       data->heaps[i].size);
+			if (ret)
+				pr_err("memblock reserve of %zx@%lx failed\n",
+				       data->heaps[i].size,
+				       data->heaps[i].base);
+		}
+		pr_info("%s: %s reserved base %lx size %zu\n", __func__,
+			data->heaps[i].name,
+			data->heaps[i].base,
+			data->heaps[i].size);
+	}
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
new file mode 100644
index 0000000..dcd2a0c
--- /dev/null
+++ b/drivers/staging/android/ion/ion.h
@@ -0,0 +1,204 @@
+/*
+ * drivers/staging/android/ion/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+#include "../uapi/ion.h"
+
+struct ion_handle;
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+   plumbed in the kernel, and all instances of ion_phys_addr_t should
+   be converted to phys_addr_t.  For the time being many kernel interfaces
+   do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type:	type of the heap from ion_heap_type enum
+ * @id:		unique identifier for heap.  When allocating higher numbers
+ *		will be allocated from first.  At allocation these are passed
+ *		as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name:	used for debug purposes
+ * @base:	base address of heap in physical memory if applicable
+ * @size:	size of the heap in bytes if applicable
+ * @align:	required alignment in physical memory if applicable
+ * @priv:	private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+	enum ion_heap_type type;
+	unsigned int id;
+	const char *name;
+	ion_phys_addr_t base;
+	size_t size;
+	ion_phys_addr_t align;
+	void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr:		number of structures in the array
+ * @heaps:	array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+	int nr;
+	struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data:	platform data specifying starting physical address and
+ *		size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() -  allocate a client and returns it
+ * @dev:		the global ion device
+ * @heap_type_mask:	mask of heaps this client can allocate from
+ * @name:		used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+				     const char *name);
+
+/**
+ * ion_client_destroy() -  free's a client and all it's handles
+ * @client:	the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client:		the client
+ * @len:		size of the allocation
+ * @align:		requested allocation alignment, lots of hardware blocks
+ *			have alignment requirements of some kind
+ * @heap_id_mask:	mask of heaps to allocate from, if multiple bits are set
+ *			heaps will be tried in order from highest to lowest
+ *			id
+ * @flags:		heap flags, the low 16 bits are consumed by ion, the
+ *			high 16 bits are passed on to the respective heap and
+ *			can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int heap_id_mask,
+			     unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client:	the client
+ * @handle:	the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client:	the client
+ * @handle:	the handle
+ * @addr:	a pointer to put the address in
+ * @len:	a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address.  It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead.  Returns -EINVAL if the handle is invalid.  This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+	     ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client:	the client
+ * @handle:	the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+			      struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client:	the client
+ * @handle:	handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client:	the client
+ * @handle:	handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client:	the client
+ * @handle:	the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+						struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client:	the client
+ * @handle:	the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client:	the client
+ * @fd:		the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it.  If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
new file mode 100644
index 0000000..5165de2
--- /dev/null
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -0,0 +1,194 @@
+/*
+ * drivers/gpu/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_carveout_heap {
+	struct ion_heap heap;
+	struct gen_pool *pool;
+	ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+				      unsigned long size,
+				      unsigned long align)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+	unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+	if (!offset)
+		return ION_CARVEOUT_ALLOCATE_FAIL;
+
+	return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+		       unsigned long size)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+
+	if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+		return;
+	gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+				  struct ion_buffer *buffer,
+				  ion_phys_addr_t *addr, size_t *len)
+{
+	struct sg_table *table = buffer->priv_virt;
+	struct page *page = sg_page(table->sgl);
+	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+	*addr = paddr;
+	*len = buffer->size;
+	return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long size, unsigned long align,
+				      unsigned long flags)
+{
+	struct sg_table *table;
+	ion_phys_addr_t paddr;
+	int ret;
+
+	if (align > PAGE_SIZE)
+		return -EINVAL;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto err_free;
+
+	paddr = ion_carveout_allocate(heap, size, align);
+	if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+		ret = -ENOMEM;
+		goto err_free_table;
+	}
+
+	sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+	buffer->priv_virt = table;
+
+	return 0;
+
+err_free_table:
+	sg_free_table(table);
+err_free:
+	kfree(table);
+	return ret;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+	struct sg_table *table = buffer->priv_virt;
+	struct page *page = sg_page(table->sgl);
+	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+	ion_heap_buffer_zero(buffer);
+
+	if (ion_buffer_cached(buffer))
+		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+							DMA_BIDIRECTIONAL);
+
+	ion_carveout_free(heap, paddr, buffer->size);
+	sg_free_table(table);
+	kfree(table);
+}
+
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+						  struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+					struct ion_buffer *buffer)
+{
+	return;
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+	.allocate = ion_carveout_heap_allocate,
+	.free = ion_carveout_heap_free,
+	.phys = ion_carveout_heap_phys,
+	.map_dma = ion_carveout_heap_map_dma,
+	.unmap_dma = ion_carveout_heap_unmap_dma,
+	.map_user = ion_heap_map_user,
+	.map_kernel = ion_heap_map_kernel,
+	.unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_carveout_heap *carveout_heap;
+	int ret;
+
+	struct page *page;
+	size_t size;
+
+	page = pfn_to_page(PFN_DOWN(heap_data->base));
+	size = heap_data->size;
+
+	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+	if (ret)
+		return ERR_PTR(ret);
+
+	carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+	if (!carveout_heap)
+		return ERR_PTR(-ENOMEM);
+
+	carveout_heap->pool = gen_pool_create(12, -1);
+	if (!carveout_heap->pool) {
+		kfree(carveout_heap);
+		return ERR_PTR(-ENOMEM);
+	}
+	carveout_heap->base = heap_data->base;
+	gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+		     -1);
+	carveout_heap->heap.ops = &carveout_heap_ops;
+	carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+	carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+	return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_carveout_heap *carveout_heap =
+	     container_of(heap, struct  ion_carveout_heap, heap);
+
+	gen_pool_destroy(carveout_heap->pool);
+	kfree(carveout_heap);
+	carveout_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
new file mode 100644
index 0000000..ca20d62
--- /dev/null
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/gpu/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_chunk_heap {
+	struct ion_heap heap;
+	struct gen_pool *pool;
+	ion_phys_addr_t base;
+	unsigned long chunk_size;
+	unsigned long size;
+	unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long size, unsigned long align,
+				      unsigned long flags)
+{
+	struct ion_chunk_heap *chunk_heap =
+		container_of(heap, struct ion_chunk_heap, heap);
+	struct sg_table *table;
+	struct scatterlist *sg;
+	int ret, i;
+	unsigned long num_chunks;
+	unsigned long allocated_size;
+
+	if (align > chunk_heap->chunk_size)
+		return -EINVAL;
+
+	allocated_size = ALIGN(size, chunk_heap->chunk_size);
+	num_chunks = allocated_size / chunk_heap->chunk_size;
+
+	if (allocated_size > chunk_heap->size - chunk_heap->allocated)
+		return -ENOMEM;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+	ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+	if (ret) {
+		kfree(table);
+		return ret;
+	}
+
+	sg = table->sgl;
+	for (i = 0; i < num_chunks; i++) {
+		unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+						     chunk_heap->chunk_size);
+		if (!paddr)
+			goto err;
+		sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+				chunk_heap->chunk_size, 0);
+		sg = sg_next(sg);
+	}
+
+	buffer->priv_virt = table;
+	chunk_heap->allocated += allocated_size;
+	return 0;
+err:
+	sg = table->sgl;
+	for (i -= 1; i >= 0; i--) {
+		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+			      sg->length);
+		sg = sg_next(sg);
+	}
+	sg_free_table(table);
+	kfree(table);
+	return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+	struct ion_chunk_heap *chunk_heap =
+		container_of(heap, struct ion_chunk_heap, heap);
+	struct sg_table *table = buffer->priv_virt;
+	struct scatterlist *sg;
+	int i;
+	unsigned long allocated_size;
+
+	allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
+
+	ion_heap_buffer_zero(buffer);
+
+	if (ion_buffer_cached(buffer))
+		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+								DMA_BIDIRECTIONAL);
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+			      sg->length);
+	}
+	chunk_heap->allocated -= allocated_size;
+	sg_free_table(table);
+	kfree(table);
+}
+
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+					       struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+				     struct ion_buffer *buffer)
+{
+	return;
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+	.allocate = ion_chunk_heap_allocate,
+	.free = ion_chunk_heap_free,
+	.map_dma = ion_chunk_heap_map_dma,
+	.unmap_dma = ion_chunk_heap_unmap_dma,
+	.map_user = ion_heap_map_user,
+	.map_kernel = ion_heap_map_kernel,
+	.unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_chunk_heap *chunk_heap;
+	int ret;
+	struct page *page;
+	size_t size;
+
+	page = pfn_to_page(PFN_DOWN(heap_data->base));
+	size = heap_data->size;
+
+	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+	if (ret)
+		return ERR_PTR(ret);
+
+	chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+	if (!chunk_heap)
+		return ERR_PTR(-ENOMEM);
+
+	chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+	chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+					   PAGE_SHIFT, -1);
+	if (!chunk_heap->pool) {
+		ret = -ENOMEM;
+		goto error_gen_pool_create;
+	}
+	chunk_heap->base = heap_data->base;
+	chunk_heap->size = heap_data->size;
+	chunk_heap->allocated = 0;
+
+	gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+	chunk_heap->heap.ops = &chunk_heap_ops;
+	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+	chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+	pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
+		heap_data->size, heap_data->align);
+
+	return &chunk_heap->heap;
+
+error_gen_pool_create:
+	kfree(chunk_heap);
+	return ERR_PTR(ret);
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_chunk_heap *chunk_heap =
+	     container_of(heap, struct  ion_chunk_heap, heap);
+
+	gen_pool_destroy(chunk_heap->pool);
+	kfree(chunk_heap);
+	chunk_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
new file mode 100644
index 0000000..4418bda
--- /dev/null
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -0,0 +1,218 @@
+/*
+ * drivers/gpu/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_heap {
+	struct ion_heap heap;
+	struct device *dev;
+};
+
+#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
+
+struct ion_cma_buffer_info {
+	void *cpu_addr;
+	dma_addr_t handle;
+	struct sg_table *table;
+};
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replaced by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+			       void *cpu_addr, dma_addr_t handle, size_t size)
+{
+	struct page *page = virt_to_page(cpu_addr);
+	int ret;
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (unlikely(ret))
+		return ret;
+
+	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+	struct device *dev = cma_heap->dev;
+	struct ion_cma_buffer_info *info;
+
+	dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+	if (buffer->flags & ION_FLAG_CACHED)
+		return -EINVAL;
+
+	if (align > PAGE_SIZE)
+		return -EINVAL;
+
+	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+	if (!info) {
+		dev_err(dev, "Can't allocate buffer info\n");
+		return ION_CMA_ALLOCATE_FAILED;
+	}
+
+	info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+						GFP_HIGHUSER | __GFP_ZERO);
+
+	if (!info->cpu_addr) {
+		dev_err(dev, "Fail to allocate buffer\n");
+		goto err;
+	}
+
+	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(dev, "Fail to allocate sg table\n");
+		goto free_mem;
+	}
+
+	if (ion_cma_get_sgtable
+	    (dev, info->table, info->cpu_addr, info->handle, len))
+		goto free_table;
+	/* keep this for memory release */
+	buffer->priv_virt = info;
+	dev_dbg(dev, "Allocate buffer %p\n", buffer);
+	return 0;
+
+free_table:
+	kfree(info->table);
+free_mem:
+	dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+	struct device *dev = cma_heap->dev;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Release buffer %p\n", buffer);
+	/* release memory */
+	dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+	/* release sg table */
+	sg_free_table(info->table);
+	kfree(info->table);
+	kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+			ion_phys_addr_t *addr, size_t *len)
+{
+	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+	struct device *dev = cma_heap->dev;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+		&info->handle);
+
+	*addr = info->handle;
+	*len = buffer->size;
+
+	return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+					     struct ion_buffer *buffer)
+{
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+				   struct ion_buffer *buffer)
+{
+	return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+			struct vm_area_struct *vma)
+{
+	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+	struct device *dev = cma_heap->dev;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+				 buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+				struct ion_buffer *buffer)
+{
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+	/* kernel memory mapping has been done at allocation time */
+	return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+					struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+	.allocate = ion_cma_allocate,
+	.free = ion_cma_free,
+	.map_dma = ion_cma_heap_map_dma,
+	.unmap_dma = ion_cma_heap_unmap_dma,
+	.phys = ion_cma_phys,
+	.map_user = ion_cma_mmap,
+	.map_kernel = ion_cma_map_kernel,
+	.unmap_kernel = ion_cma_unmap_kernel,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_cma_heap *cma_heap;
+
+	cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+
+	if (!cma_heap)
+		return ERR_PTR(-ENOMEM);
+
+	cma_heap->heap.ops = &ion_cma_ops;
+	/* get device from private heaps data, later it will be
+	 * used to make the link with reserved CMA memory */
+	cma_heap->dev = data->priv;
+	cma_heap->heap.type = ION_HEAP_TYPE_DMA;
+	return &cma_heap->heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+
+	kfree(cma_heap);
+}
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
new file mode 100644
index 0000000..e1bbd8b
--- /dev/null
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -0,0 +1,371 @@
+/*
+ * drivers/gpu/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+			  struct ion_buffer *buffer)
+{
+	struct scatterlist *sg;
+	int i, j;
+	void *vaddr;
+	pgprot_t pgprot;
+	struct sg_table *table = buffer->sg_table;
+	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+	struct page **pages = vmalloc(sizeof(struct page *) * npages);
+	struct page **tmp = pages;
+
+	if (!pages)
+		return NULL;
+
+	if (buffer->flags & ION_FLAG_CACHED)
+		pgprot = PAGE_KERNEL;
+	else
+		pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+		struct page *page = sg_page(sg);
+
+		BUG_ON(i >= npages);
+		for (j = 0; j < npages_this_entry; j++)
+			*(tmp++) = page++;
+	}
+	vaddr = vmap(pages, npages, VM_MAP, pgprot);
+	vfree(pages);
+
+	if (vaddr == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+			   struct ion_buffer *buffer)
+{
+	vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+		      struct vm_area_struct *vma)
+{
+	struct sg_table *table = buffer->sg_table;
+	unsigned long addr = vma->vm_start;
+	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+	struct scatterlist *sg;
+	int i;
+	int ret;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		struct page *page = sg_page(sg);
+		unsigned long remainder = vma->vm_end - addr;
+		unsigned long len = sg->length;
+
+		if (offset >= sg->length) {
+			offset -= sg->length;
+			continue;
+		} else if (offset) {
+			page += offset / PAGE_SIZE;
+			len = sg->length - offset;
+			offset = 0;
+		}
+		len = min(len, remainder);
+		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+				vma->vm_page_prot);
+		if (ret)
+			return ret;
+		addr += len;
+		if (addr >= vma->vm_end)
+			return 0;
+	}
+	return 0;
+}
+
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
+{
+	void *addr = vm_map_ram(pages, num, -1, pgprot);
+
+	if (!addr)
+		return -ENOMEM;
+	memset(addr, 0, PAGE_SIZE * num);
+	vm_unmap_ram(addr, num);
+
+	return 0;
+}
+
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+						pgprot_t pgprot)
+{
+	int p = 0;
+	int ret = 0;
+	struct sg_page_iter piter;
+	struct page *pages[32];
+
+	for_each_sg_page(sgl, &piter, nents, 0) {
+		pages[p++] = sg_page_iter_page(&piter);
+		if (p == ARRAY_SIZE(pages)) {
+			ret = ion_heap_clear_pages(pages, p, pgprot);
+			if (ret)
+				return ret;
+			p = 0;
+		}
+	}
+	if (p)
+		ret = ion_heap_clear_pages(pages, p, pgprot);
+
+	return ret;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+	struct sg_table *table = buffer->sg_table;
+	pgprot_t pgprot;
+
+	if (buffer->flags & ION_FLAG_CACHED)
+		pgprot = PAGE_KERNEL;
+	else
+		pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+	return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+}
+
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
+{
+	struct scatterlist sg;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, page, size, 0);
+	return ion_heap_sglist_zero(&sg, 1, pgprot);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+	spin_lock(&heap->free_lock);
+	list_add(&buffer->list, &heap->free_list);
+	heap->free_list_size += buffer->size;
+	spin_unlock(&heap->free_lock);
+	wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+	size_t size;
+
+	spin_lock(&heap->free_lock);
+	size = heap->free_list_size;
+	spin_unlock(&heap->free_lock);
+
+	return size;
+}
+
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+				bool skip_pools)
+{
+	struct ion_buffer *buffer;
+	size_t total_drained = 0;
+
+	if (ion_heap_freelist_size(heap) == 0)
+		return 0;
+
+	spin_lock(&heap->free_lock);
+	if (size == 0)
+		size = heap->free_list_size;
+
+	while (!list_empty(&heap->free_list)) {
+		if (total_drained >= size)
+			break;
+		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+					  list);
+		list_del(&buffer->list);
+		heap->free_list_size -= buffer->size;
+		if (skip_pools)
+			buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+		total_drained += buffer->size;
+		spin_unlock(&heap->free_lock);
+		ion_buffer_destroy(buffer);
+		spin_lock(&heap->free_lock);
+	}
+	spin_unlock(&heap->free_lock);
+
+	return total_drained;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+	return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+	return _ion_heap_freelist_drain(heap, size, true);
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+	struct ion_heap *heap = data;
+
+	while (true) {
+		struct ion_buffer *buffer;
+
+		wait_event_freezable(heap->waitqueue,
+				     ion_heap_freelist_size(heap) > 0);
+
+		spin_lock(&heap->free_lock);
+		if (list_empty(&heap->free_list)) {
+			spin_unlock(&heap->free_lock);
+			continue;
+		}
+		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+					  list);
+		list_del(&buffer->list);
+		heap->free_list_size -= buffer->size;
+		spin_unlock(&heap->free_lock);
+		ion_buffer_destroy(buffer);
+	}
+
+	return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+	struct sched_param param = { .sched_priority = 0 };
+
+	INIT_LIST_HEAD(&heap->free_list);
+	heap->free_list_size = 0;
+	spin_lock_init(&heap->free_lock);
+	init_waitqueue_head(&heap->waitqueue);
+	heap->task = kthread_run(ion_heap_deferred_free, heap,
+				 "%s", heap->name);
+	sched_setscheduler(heap->task, SCHED_IDLE, &param);
+	if (IS_ERR(heap->task)) {
+		pr_err("%s: creating thread for deferred free failed\n",
+		       __func__);
+		return PTR_RET(heap->task);
+	}
+	return 0;
+}
+
+static int ion_heap_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+{
+	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+					     shrinker);
+	int total = 0;
+	int freed = 0;
+	int to_scan = sc->nr_to_scan;
+
+	if (to_scan == 0)
+		goto out;
+
+	/*
+	 * shrink the free list first, no point in zeroing the memory if we're
+	 * just going to reclaim it. Also, skip any possible page pooling.
+	 */
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+		freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
+				PAGE_SIZE;
+
+	to_scan -= freed;
+	if (to_scan < 0)
+		to_scan = 0;
+
+out:
+	total = ion_heap_freelist_size(heap) / PAGE_SIZE;
+	if (heap->ops->shrink)
+		total += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
+	return total;
+}
+
+void ion_heap_init_shrinker(struct ion_heap *heap)
+{
+	heap->shrinker.shrink = ion_heap_shrink;
+	heap->shrinker.seeks = DEFAULT_SEEKS;
+	heap->shrinker.batch = 0;
+	register_shrinker(&heap->shrinker);
+}
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_heap *heap = NULL;
+
+	switch (heap_data->type) {
+	case ION_HEAP_TYPE_SYSTEM_CONTIG:
+		heap = ion_system_contig_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_SYSTEM:
+		heap = ion_system_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_CARVEOUT:
+		heap = ion_carveout_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_CHUNK:
+		heap = ion_chunk_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_DMA:
+		heap = ion_cma_heap_create(heap_data);
+		break;
+	default:
+		pr_err("%s: Invalid heap type %d\n", __func__,
+		       heap_data->type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (IS_ERR_OR_NULL(heap)) {
+		pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+		       __func__, heap_data->name, heap_data->type,
+		       heap_data->base, heap_data->size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	heap->name = heap_data->name;
+	heap->id = heap_data->id;
+	return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+	if (!heap)
+		return;
+
+	switch (heap->type) {
+	case ION_HEAP_TYPE_SYSTEM_CONTIG:
+		ion_system_contig_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_SYSTEM:
+		ion_system_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_CARVEOUT:
+		ion_carveout_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_CHUNK:
+		ion_chunk_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_DMA:
+		ion_cma_heap_destroy(heap);
+		break;
+	default:
+		pr_err("%s: Invalid heap type %d\n", __func__,
+		       heap->type);
+	}
+}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
new file mode 100644
index 0000000..f08c6db
--- /dev/null
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -0,0 +1,198 @@
+/*
+ * drivers/gpu/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "ion_priv.h"
+
+struct ion_page_pool_item {
+	struct page *page;
+	struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+	struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+	if (!page)
+		return NULL;
+	ion_page_pool_alloc_set_cache_policy(pool, page);
+
+	ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+						DMA_BIDIRECTIONAL);
+	return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+				     struct page *page)
+{
+	ion_page_pool_free_set_cache_policy(pool, page);
+	__free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+	struct ion_page_pool_item *item;
+
+	item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+	if (!item)
+		return -ENOMEM;
+
+	mutex_lock(&pool->mutex);
+	item->page = page;
+	if (PageHighMem(page)) {
+		list_add_tail(&item->list, &pool->high_items);
+		pool->high_count++;
+	} else {
+		list_add_tail(&item->list, &pool->low_items);
+		pool->low_count++;
+	}
+	mutex_unlock(&pool->mutex);
+	return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+	struct ion_page_pool_item *item;
+	struct page *page;
+
+	if (high) {
+		BUG_ON(!pool->high_count);
+		item = list_first_entry(&pool->high_items,
+					struct ion_page_pool_item, list);
+		pool->high_count--;
+	} else {
+		BUG_ON(!pool->low_count);
+		item = list_first_entry(&pool->low_items,
+					struct ion_page_pool_item, list);
+		pool->low_count--;
+	}
+
+	list_del(&item->list);
+	page = item->page;
+	kfree(item);
+	return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+	struct page *page = NULL;
+
+	BUG_ON(!pool);
+
+	mutex_lock(&pool->mutex);
+	if (pool->high_count)
+		page = ion_page_pool_remove(pool, true);
+	else if (pool->low_count)
+		page = ion_page_pool_remove(pool, false);
+	mutex_unlock(&pool->mutex);
+
+	if (!page)
+		page = ion_page_pool_alloc_pages(pool);
+
+	return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
+{
+	int ret;
+
+	ret = ion_page_pool_add(pool, page);
+	if (ret)
+		ion_page_pool_free_pages(pool, page);
+}
+
+void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page)
+{
+	ion_page_pool_free_pages(pool, page);
+}
+
+static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+{
+	int total = 0;
+
+	total += high ? (pool->high_count + pool->low_count) *
+		(1 << pool->order) :
+			pool->low_count * (1 << pool->order);
+	return total;
+}
+
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+				int nr_to_scan)
+{
+	int i;
+	bool high;
+
+	high = !!(gfp_mask & __GFP_HIGHMEM);
+
+	for (i = 0; i < nr_to_scan; i++) {
+		struct page *page;
+
+		mutex_lock(&pool->mutex);
+		if (pool->low_count) {
+			page = ion_page_pool_remove(pool, false);
+		} else if (high && pool->high_count) {
+			page = ion_page_pool_remove(pool, true);
+		} else {
+			mutex_unlock(&pool->mutex);
+			break;
+		}
+		mutex_unlock(&pool->mutex);
+		ion_page_pool_free_pages(pool, page);
+	}
+
+	return ion_page_pool_total(pool, high);
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+	struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+					     GFP_KERNEL);
+	if (!pool)
+		return NULL;
+	pool->high_count = 0;
+	pool->low_count = 0;
+	INIT_LIST_HEAD(&pool->low_items);
+	INIT_LIST_HEAD(&pool->high_items);
+	pool->gfp_mask = gfp_mask;
+	pool->order = order;
+	mutex_init(&pool->mutex);
+	plist_node_init(&pool->list, order);
+
+	return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+	kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+	return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
new file mode 100644
index 0000000..13531d1
--- /dev/null
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -0,0 +1,439 @@
+/*
+ * drivers/gpu/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+#include <asm/cacheflush.h>
+#endif
+
+#include "ion.h"
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref:		refernce count
+ * @node:		node in the ion_device buffers tree
+ * @dev:		back pointer to the ion_device
+ * @heap:		back pointer to the heap the buffer came from
+ * @flags:		buffer specific flags
+ * @private_flags:	internal buffer specific flags
+ * @size:		size of the buffer
+ * @priv_virt:		private data to the buffer representable as
+ *			a void *
+ * @priv_phys:		private data to the buffer representable as
+ *			an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock:		protects the buffers cnt fields
+ * @kmap_cnt:		number of times the buffer is mapped to the kernel
+ * @vaddr:		the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt:		number of times the buffer is mapped for dma
+ * @sg_table:		the sg table for the buffer if dmap_cnt is not zero
+ * @pages:		flat array of pages in the buffer -- used by fault
+ *			handler and only valid for buffers that are faulted in
+ * @vmas:		list of vma's mapping this buffer
+ * @handle_count:	count of handles referencing this buffer
+ * @task_comm:		taskcomm of last client to reference this buffer in a
+ *			handle, used for debugging
+ * @pid:		pid of last client to reference this buffer in a
+ *			handle, used for debugging
+*/
+struct ion_buffer {
+	struct kref ref;
+	union {
+		struct rb_node node;
+		struct list_head list;
+	};
+	struct ion_device *dev;
+	struct ion_heap *heap;
+	unsigned long flags;
+	unsigned long private_flags;
+	size_t size;
+	union {
+		void *priv_virt;
+		ion_phys_addr_t priv_phys;
+	};
+	struct mutex lock;
+	int kmap_cnt;
+	void *vaddr;
+	int dmap_cnt;
+	struct sg_table *sg_table;
+	struct page **pages;
+	struct list_head vmas;
+	/* used to track orphaned buffers */
+	int handle_count;
+	char task_comm[TASK_COMM_LEN];
+	pid_t pid;
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate:		allocate memory
+ * @free:		free memory
+ * @phys		get physical address of a buffer (only define on
+ *			physically contiguous heaps)
+ * @map_dma		map the memory for dma to a scatterlist
+ * @unmap_dma		unmap the memory for dma
+ * @map_kernel		map memory to the kernel
+ * @unmap_kernel	unmap memory to the kernel
+ * @map_user		map memory to userspace
+ *
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
+ */
+struct ion_heap_ops {
+	int (*allocate) (struct ion_heap *heap,
+			 struct ion_buffer *buffer, unsigned long len,
+			 unsigned long align, unsigned long flags);
+	void (*free) (struct ion_buffer *buffer);
+	int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+		     ion_phys_addr_t *addr, size_t *len);
+	struct sg_table *(*map_dma) (struct ion_heap *heap,
+					struct ion_buffer *buffer);
+	void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+	void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+	void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+	int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+			 struct vm_area_struct *vma);
+	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+};
+
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node:		rb node to put the heap on the device's tree of heaps
+ * @dev:		back pointer to the ion_device
+ * @type:		type of heap
+ * @ops:		ops struct as above
+ * @flags:		flags
+ * @id:			id of heap, also indicates priority of this heap when
+ *			allocating.  These are specified by platform data and
+ *			MUST be unique
+ * @name:		used for debugging
+ * @shrinker:		a shrinker for the heap
+ * @free_list:		free list head if deferred free is used
+ * @free_list_size	size of the deferred free list in bytes
+ * @lock:		protects the free list
+ * @waitqueue:		queue to wait on from deferred free thread
+ * @task:		task struct of deferred free thread
+ * @debug_show:		called when heap debug file is read to add any
+ *			heap specific debug info to output
+ *
+ * Represents a pool of memory from which buffers can be made.  In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+	struct plist_node node;
+	struct ion_device *dev;
+	enum ion_heap_type type;
+	struct ion_heap_ops *ops;
+	unsigned long flags;
+	unsigned int id;
+	const char *name;
+	struct shrinker shrinker;
+	struct list_head free_list;
+	size_t free_list_size;
+	spinlock_t free_lock;
+	wait_queue_head_t waitqueue;
+	struct task_struct *task;
+
+	int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
+
+/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer:		buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer:		buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl:	arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+				     (struct ion_client *client,
+				      unsigned int cmd,
+				      unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev:		the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev:		the device
+ * @heap:		the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+			struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap:		the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap:		the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap:		the heap
+ * @buffer:		the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap:		the heap
+ * @size:		ammount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed.  The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ *				list, skipping any heap-specific
+ *				pooling or caching mechanisms
+ *
+ * @heap:		the heap
+ * @size:		amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed.  The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+					size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap:		the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+				      unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+		       unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap.  Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count:		number of highmem items in the pool
+ * @low_count:		number of lowmem items in the pool
+ * @high_items:		list of highmem items
+ * @low_items:		list of lowmem items
+ * @mutex:		lock protecting this struct and especially the count
+ *			item list
+ * @gfp_mask:		gfp_mask to use from alloc
+ * @order:		order of pages in the pool
+ * @list:		plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+	int high_count;
+	int low_count;
+	struct list_head high_items;
+	struct list_head low_items;
+	struct mutex mutex;
+	gfp_t gfp_mask;
+	unsigned int order;
+	struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
+
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+static inline void ion_page_pool_alloc_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){
+	void *va = page_address(page);
+
+	if (va)
+		set_memory_wc((unsigned long)va, 1 << pool->order);
+}
+
+static inline void ion_page_pool_free_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){
+	void *va = page_address(page);
+
+	if (va)
+		set_memory_wb((unsigned long)va, 1 << pool->order);
+
+}
+#else
+static inline void ion_page_pool_alloc_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){ }
+
+static inline void ion_page_pool_free_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){ }
+#endif
+
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool:		the pool
+ * @gfp_mask:		the memory type to reclaim
+ * @nr_to_scan:		number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+			  int nr_to_scan);
+
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ *                             device
+ * @dev:		the device the pages will be used with
+ * @page:		the first page to be flushed
+ * @size:		size in bytes of region to be flushed
+ * @dir:		direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+		size_t size, enum dma_data_direction dir);
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
new file mode 100644
index 0000000..69c2168
--- /dev/null
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -0,0 +1,453 @@
+/*
+ * drivers/gpu/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+				     __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+	int i;
+
+	for (i = 0; i < num_orders; i++)
+		if (order == orders[i])
+			return i;
+	BUG();
+	return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+	return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+	struct ion_heap heap;
+	struct ion_page_pool **pools;
+};
+
+struct page_info {
+	struct page *page;
+	unsigned int order;
+	struct list_head list;
+};
+
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long order)
+{
+	bool cached = ion_buffer_cached(buffer);
+	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+	struct page *page;
+
+	if (!cached) {
+		page = ion_page_pool_alloc(pool);
+	} else {
+		gfp_t gfp_flags = low_order_gfp_flags;
+
+		if (order > 4)
+			gfp_flags = high_order_gfp_flags;
+		page = alloc_pages(gfp_flags, order);
+		if (!page)
+			return NULL;
+		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+						DMA_BIDIRECTIONAL);
+	}
+	if (!page)
+		return NULL;
+
+	return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+			     struct ion_buffer *buffer, struct page *page,
+			     unsigned int order)
+{
+	bool cached = ion_buffer_cached(buffer);
+
+	if (!cached) {
+		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+		if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
+			ion_page_pool_free_immediate(pool, page);
+		else
+			ion_page_pool_free(pool, page);
+	} else {
+		__free_pages(page, order);
+	}
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+						 struct ion_buffer *buffer,
+						 unsigned long size,
+						 unsigned int max_order)
+{
+	struct page *page;
+	struct page_info *info;
+	int i;
+
+	info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+	if (!info)
+		return NULL;
+
+	for (i = 0; i < num_orders; i++) {
+		if (size < order_to_size(orders[i]))
+			continue;
+		if (max_order < orders[i])
+			continue;
+
+		page = alloc_buffer_page(heap, buffer, orders[i]);
+		if (!page)
+			continue;
+
+		info->page = page;
+		info->order = orders[i];
+		INIT_LIST_HEAD(&info->list);
+		return info;
+	}
+	kfree(info);
+
+	return NULL;
+}
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+				     struct ion_buffer *buffer,
+				     unsigned long size, unsigned long align,
+				     unsigned long flags)
+{
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
+	struct sg_table *table;
+	struct scatterlist *sg;
+	int ret;
+	struct list_head pages;
+	struct page_info *info, *tmp_info;
+	int i = 0;
+	unsigned long size_remaining = PAGE_ALIGN(size);
+	unsigned int max_order = orders[0];
+
+	if (align > PAGE_SIZE)
+		return -EINVAL;
+
+	if (size / PAGE_SIZE > totalram_pages / 2)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&pages);
+	while (size_remaining > 0) {
+		info = alloc_largest_available(sys_heap, buffer, size_remaining,
+						max_order);
+		if (!info)
+			goto err;
+		list_add_tail(&info->list, &pages);
+		size_remaining -= (1 << info->order) * PAGE_SIZE;
+		max_order = info->order;
+		i++;
+	}
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		goto err;
+
+	ret = sg_alloc_table(table, i, GFP_KERNEL);
+	if (ret)
+		goto err1;
+
+	sg = table->sgl;
+	list_for_each_entry_safe(info, tmp_info, &pages, list) {
+		struct page *page = info->page;
+		sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+		sg = sg_next(sg);
+		list_del(&info->list);
+		kfree(info);
+	}
+
+	buffer->priv_virt = table;
+	return 0;
+err1:
+	kfree(table);
+err:
+	list_for_each_entry_safe(info, tmp_info, &pages, list) {
+		free_buffer_page(sys_heap, buffer, info->page, info->order);
+		kfree(info);
+	}
+	return -ENOMEM;
+}
+
+static void ion_system_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
+	struct sg_table *table = buffer->sg_table;
+	bool cached = ion_buffer_cached(buffer);
+	struct scatterlist *sg;
+	LIST_HEAD(pages);
+	int i;
+
+	/* uncached pages come from the page pools, zero them before returning
+	   for security purposes (other allocations are zerod at alloc time */
+	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+		ion_heap_buffer_zero(buffer);
+
+	for_each_sg(table->sgl, sg, table->nents, i)
+		free_buffer_page(sys_heap, buffer, sg_page(sg),
+				get_order(sg->length));
+	sg_free_table(table);
+	kfree(table);
+}
+
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+						struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+				      struct ion_buffer *buffer)
+{
+	return;
+}
+
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+					int nr_to_scan)
+{
+	struct ion_system_heap *sys_heap;
+	int nr_total = 0;
+	int i;
+
+	sys_heap = container_of(heap, struct ion_system_heap, heap);
+
+	for (i = 0; i < num_orders; i++) {
+		struct ion_page_pool *pool = sys_heap->pools[i];
+
+		nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+	}
+
+	return nr_total;
+}
+
+static struct ion_heap_ops system_heap_ops = {
+	.allocate = ion_system_heap_allocate,
+	.free = ion_system_heap_free,
+	.map_dma = ion_system_heap_map_dma,
+	.unmap_dma = ion_system_heap_unmap_dma,
+	.map_kernel = ion_heap_map_kernel,
+	.unmap_kernel = ion_heap_unmap_kernel,
+	.map_user = ion_heap_map_user,
+	.shrink = ion_system_heap_shrink,
+};
+
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+				      void *unused)
+{
+
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
+	int i;
+
+	for (i = 0; i < num_orders; i++) {
+		struct ion_page_pool *pool = sys_heap->pools[i];
+
+		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+			   pool->high_count, pool->order,
+			   (1 << pool->order) * PAGE_SIZE * pool->high_count);
+		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+			   pool->low_count, pool->order,
+			   (1 << pool->order) * PAGE_SIZE * pool->low_count);
+	}
+	return 0;
+}
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+	struct ion_system_heap *heap;
+	int i;
+
+	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->heap.ops = &system_heap_ops;
+	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+			      GFP_KERNEL);
+	if (!heap->pools)
+		goto err_alloc_pools;
+	for (i = 0; i < num_orders; i++) {
+		struct ion_page_pool *pool;
+		gfp_t gfp_flags = low_order_gfp_flags;
+
+		if (orders[i] > 4)
+			gfp_flags = high_order_gfp_flags;
+		pool = ion_page_pool_create(gfp_flags, orders[i]);
+		if (!pool)
+			goto err_create_pool;
+		heap->pools[i] = pool;
+	}
+
+	heap->heap.debug_show = ion_system_heap_debug_show;
+	return &heap->heap;
+err_create_pool:
+	for (i = 0; i < num_orders; i++)
+		if (heap->pools[i])
+			ion_page_pool_destroy(heap->pools[i]);
+	kfree(heap->pools);
+err_alloc_pools:
+	kfree(heap);
+	return ERR_PTR(-ENOMEM);
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
+	int i;
+
+	for (i = 0; i < num_orders; i++)
+		ion_page_pool_destroy(sys_heap->pools[i]);
+	kfree(sys_heap->pools);
+	kfree(sys_heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+					   struct ion_buffer *buffer,
+					   unsigned long len,
+					   unsigned long align,
+					   unsigned long flags)
+{
+	int order = get_order(len);
+	struct page *page;
+	struct sg_table *table;
+	unsigned long i;
+	int ret;
+
+	if (align > (PAGE_SIZE << order))
+		return -EINVAL;
+
+	page = alloc_pages(low_order_gfp_flags, order);
+	if (!page)
+		return -ENOMEM;
+
+	split_page(page, order);
+
+	len = PAGE_ALIGN(len);
+	for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+		__free_page(page + i);
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto out;
+
+	sg_set_page(table->sgl, page, len, 0);
+
+	buffer->priv_virt = table;
+
+	ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
+	return 0;
+
+out:
+	for (i = 0; i < len >> PAGE_SHIFT; i++)
+		__free_page(page + i);
+	kfree(table);
+	return ret;
+}
+
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+	struct sg_table *table = buffer->priv_virt;
+	struct page *page = sg_page(table->sgl);
+	unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+	unsigned long i;
+
+	for (i = 0; i < pages; i++)
+		__free_page(page + i);
+	sg_free_table(table);
+	kfree(table);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+				       struct ion_buffer *buffer,
+				       ion_phys_addr_t *addr, size_t *len)
+{
+	struct sg_table *table = buffer->priv_virt;
+	struct page *page = sg_page(table->sgl);
+	*addr = page_to_phys(page);
+	*len = buffer->size;
+	return 0;
+}
+
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+						struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+					     struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+	.allocate = ion_system_contig_heap_allocate,
+	.free = ion_system_contig_heap_free,
+	.phys = ion_system_contig_heap_phys,
+	.map_dma = ion_system_contig_heap_map_dma,
+	.unmap_dma = ion_system_contig_heap_unmap_dma,
+	.map_kernel = ion_heap_map_kernel,
+	.unmap_kernel = ion_heap_unmap_kernel,
+	.map_user = ion_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->ops = &kmalloc_ops;
+	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+	return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
+
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
new file mode 100644
index 0000000..654acb5
--- /dev/null
+++ b/drivers/staging/android/ion/ion_test.c
@@ -0,0 +1,282 @@
+/*
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "ion-test: " fmt
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "../uapi/ion_test.h"
+
+#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
+
+struct ion_test_device {
+	struct miscdevice misc;
+};
+
+struct ion_test_data {
+	struct dma_buf *dma_buf;
+	struct device *dev;
+};
+
+static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
+		void __user *ptr, size_t offset, size_t size, bool write)
+{
+	int ret = 0;
+	struct dma_buf_attachment *attach;
+	struct sg_table *table;
+	pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+	enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+	struct sg_page_iter sg_iter;
+	unsigned long offset_page;
+
+	attach = dma_buf_attach(dma_buf, dev);
+	if (IS_ERR(attach))
+		return PTR_ERR(attach);
+
+	table = dma_buf_map_attachment(attach, dir);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	offset_page = offset >> PAGE_SHIFT;
+	offset %= PAGE_SIZE;
+
+	for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
+		size_t to_copy = PAGE_SIZE - offset;
+
+		to_copy = min(to_copy, size);
+		if (!vaddr) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		if (write)
+			ret = copy_from_user(vaddr + offset, ptr, to_copy);
+		else
+			ret = copy_to_user(ptr, vaddr + offset, to_copy);
+
+		vunmap(vaddr);
+		if (ret) {
+			ret = -EFAULT;
+			goto err;
+		}
+		size -= to_copy;
+		if (!size)
+			break;
+		ptr += to_copy;
+		offset = 0;
+	}
+
+err:
+	dma_buf_unmap_attachment(attach, table, dir);
+	dma_buf_detach(dma_buf, attach);
+	return ret;
+}
+
+static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
+		size_t offset, size_t size, bool write)
+{
+	int ret;
+	unsigned long page_offset = offset >> PAGE_SHIFT;
+	size_t copy_offset = offset % PAGE_SIZE;
+	size_t copy_size = size;
+	enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+	if (offset > dma_buf->size || size > dma_buf->size - offset)
+		return -EINVAL;
+
+	ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+	if (ret)
+		return ret;
+
+	while (copy_size > 0) {
+		size_t to_copy;
+		void *vaddr = dma_buf_kmap(dma_buf, page_offset);
+
+		if (!vaddr)
+			goto err;
+
+		to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
+
+		if (write)
+			ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
+		else
+			ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
+
+		dma_buf_kunmap(dma_buf, page_offset, vaddr);
+		if (ret) {
+			ret = -EFAULT;
+			goto err;
+		}
+
+		copy_size -= to_copy;
+		ptr += to_copy;
+		page_offset++;
+		copy_offset = 0;
+	}
+err:
+	dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+	return ret;
+}
+
+static long ion_test_ioctl(struct file *filp, unsigned int cmd,
+						unsigned long arg)
+{
+	struct ion_test_data *test_data = filp->private_data;
+	int ret = 0;
+
+	union {
+		struct ion_test_rw_data test_rw;
+	} data;
+
+	if (_IOC_SIZE(cmd) > sizeof(data))
+		return -EINVAL;
+
+	if (_IOC_DIR(cmd) & _IOC_WRITE)
+		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+
+	switch (cmd) {
+	case ION_IOC_TEST_SET_FD:
+	{
+		struct dma_buf *dma_buf = NULL;
+		int fd = arg;
+
+		if (fd >= 0) {
+			dma_buf = dma_buf_get((int)arg);
+			if (IS_ERR(dma_buf))
+				return PTR_ERR(dma_buf);
+		}
+		if (test_data->dma_buf)
+			dma_buf_put(test_data->dma_buf);
+		test_data->dma_buf = dma_buf;
+		break;
+	}
+	case ION_IOC_TEST_DMA_MAPPING:
+	{
+		ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
+					u64_to_uptr(data.test_rw.ptr),
+					data.test_rw.offset, data.test_rw.size,
+					data.test_rw.write);
+		break;
+	}
+	case ION_IOC_TEST_KERNEL_MAPPING:
+	{
+		ret = ion_handle_test_kernel(test_data->dma_buf,
+					u64_to_uptr(data.test_rw.ptr),
+					data.test_rw.offset, data.test_rw.size,
+					data.test_rw.write);
+		break;
+	}
+	default:
+		return -ENOTTY;
+	}
+
+	if (_IOC_DIR(cmd) & _IOC_READ) {
+		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+			return -EFAULT;
+	}
+	return ret;
+}
+
+static int ion_test_open(struct inode *inode, struct file *file)
+{
+	struct ion_test_data *data;
+	struct miscdevice *miscdev = file->private_data;
+
+	data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->dev = miscdev->parent;
+
+	file->private_data = data;
+
+	return 0;
+}
+
+static int ion_test_release(struct inode *inode, struct file *file)
+{
+	struct ion_test_data *data = file->private_data;
+
+	kfree(data);
+
+	return 0;
+}
+
+static const struct file_operations ion_test_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = ion_test_ioctl,
+	.compat_ioctl = ion_test_ioctl,
+	.open = ion_test_open,
+	.release = ion_test_release,
+};
+
+static int __init ion_test_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct ion_test_device *testdev;
+
+	testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
+				GFP_KERNEL);
+	if (!testdev)
+		return -ENOMEM;
+
+	testdev->misc.minor = MISC_DYNAMIC_MINOR;
+	testdev->misc.name = "ion-test";
+	testdev->misc.fops = &ion_test_fops;
+	testdev->misc.parent = &pdev->dev;
+	ret = misc_register(&testdev->misc);
+	if (ret) {
+		pr_err("failed to register misc device.\n");
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, testdev);
+
+	return 0;
+}
+
+static struct platform_driver ion_test_platform_driver = {
+	.driver = {
+		.name = "ion-test",
+	},
+};
+
+static int __init ion_test_init(void)
+{
+	platform_device_register_simple("ion-test", -1, NULL, 0);
+	return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+}
+
+static void __exit ion_test_exit(void)
+{
+	platform_driver_unregister(&ion_test_platform_driver);
+}
+
+module_init(ion_test_init);
+module_exit(ion_test_exit);
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
new file mode 100644
index 0000000..11cd003
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/Makefile
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
new file mode 100644
index 0000000..3474c65
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -0,0 +1,84 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion.h"
+#include "../ion_priv.h"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+static int tegra_ion_probe(struct platform_device *pdev)
+{
+	struct ion_platform_data *pdata = pdev->dev.platform_data;
+	int err;
+	int i;
+
+	num_heaps = pdata->nr;
+
+	heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+	idev = ion_device_create(NULL);
+	if (IS_ERR_OR_NULL(idev)) {
+		kfree(heaps);
+		return PTR_ERR(idev);
+	}
+
+	/* create the heaps as specified in the board file */
+	for (i = 0; i < num_heaps; i++) {
+		struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+		heaps[i] = ion_heap_create(heap_data);
+		if (IS_ERR_OR_NULL(heaps[i])) {
+			err = PTR_ERR(heaps[i]);
+			goto err;
+		}
+		ion_device_add_heap(idev, heaps[i]);
+	}
+	platform_set_drvdata(pdev, idev);
+	return 0;
+err:
+	for (i = 0; i < num_heaps; i++) {
+		if (heaps[i])
+			ion_heap_destroy(heaps[i]);
+	}
+	kfree(heaps);
+	return err;
+}
+
+static int tegra_ion_remove(struct platform_device *pdev)
+{
+	struct ion_device *idev = platform_get_drvdata(pdev);
+	int i;
+
+	ion_device_destroy(idev);
+	for (i = 0; i < num_heaps; i++)
+		ion_heap_destroy(heaps[i]);
+	kfree(heaps);
+	return 0;
+}
+
+static struct platform_driver ion_driver = {
+	.probe = tegra_ion_probe,
+	.remove = tegra_ion_remove,
+	.driver = { .name = "ion-tegra" }
+};
+
+module_platform_driver(ion_driver);
+
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
deleted file mode 100644
index 34519ea..0000000
--- a/drivers/staging/android/logger.c
+++ /dev/null
@@ -1,851 +0,0 @@
-/*
- * drivers/misc/logger.c
- *
- * A Logging Subsystem
- *
- * Copyright (C) 2007-2008 Google, Inc.
- *
- * Robert Love <rlove@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "logger: " fmt
-
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/vmalloc.h>
-#include <linux/aio.h>
-#include "logger.h"
-
-#include <asm/ioctls.h>
-
-/**
- * struct logger_log - represents a specific log, such as 'main' or 'radio'
- * @buffer:	The actual ring buffer
- * @misc:	The "misc" device representing the log
- * @wq:		The wait queue for @readers
- * @readers:	This log's readers
- * @mutex:	The mutex that protects the @buffer
- * @w_off:	The current write head offset
- * @head:	The head, or location that readers start reading at.
- * @size:	The size of the log
- * @logs:	The list of log channels
- *
- * This structure lives from module insertion until module removal, so it does
- * not need additional reference counting. The structure is protected by the
- * mutex 'mutex'.
- */
-struct logger_log {
-	unsigned char		*buffer;
-	struct miscdevice	misc;
-	wait_queue_head_t	wq;
-	struct list_head	readers;
-	struct mutex		mutex;
-	size_t			w_off;
-	size_t			head;
-	size_t			size;
-	struct list_head	logs;
-};
-
-static LIST_HEAD(log_list);
-
-
-/**
- * struct logger_reader - a logging device open for reading
- * @log:	The associated log
- * @list:	The associated entry in @logger_log's list
- * @r_off:	The current read head offset.
- * @r_all:	Reader can read all entries
- * @r_ver:	Reader ABI version
- *
- * This object lives from open to release, so we don't need additional
- * reference counting. The structure is protected by log->mutex.
- */
-struct logger_reader {
-	struct logger_log	*log;
-	struct list_head	list;
-	size_t			r_off;
-	bool			r_all;
-	int			r_ver;
-};
-
-/* logger_offset - returns index 'n' into the log via (optimized) modulus */
-static size_t logger_offset(struct logger_log *log, size_t n)
-{
-	return n & (log->size - 1);
-}
-
-
-/*
- * file_get_log - Given a file structure, return the associated log
- *
- * This isn't aesthetic. We have several goals:
- *
- *	1) Need to quickly obtain the associated log during an I/O operation
- *	2) Readers need to maintain state (logger_reader)
- *	3) Writers need to be very fast (open() should be a near no-op)
- *
- * In the reader case, we can trivially go file->logger_reader->logger_log.
- * For a writer, we don't want to maintain a logger_reader, so we just go
- * file->logger_log. Thus what file->private_data points at depends on whether
- * or not the file was opened for reading. This function hides that dirtiness.
- */
-static inline struct logger_log *file_get_log(struct file *file)
-{
-	if (file->f_mode & FMODE_READ) {
-		struct logger_reader *reader = file->private_data;
-		return reader->log;
-	} else
-		return file->private_data;
-}
-
-/*
- * get_entry_header - returns a pointer to the logger_entry header within
- * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
- * be provided. Typically the return value will be a pointer within
- * 'logger->buf'.  However, a pointer to 'scratch' may be returned if
- * the log entry spans the end and beginning of the circular buffer.
- */
-static struct logger_entry *get_entry_header(struct logger_log *log,
-		size_t off, struct logger_entry *scratch)
-{
-	size_t len = min(sizeof(struct logger_entry), log->size - off);
-	if (len != sizeof(struct logger_entry)) {
-		memcpy(((void *) scratch), log->buffer + off, len);
-		memcpy(((void *) scratch) + len, log->buffer,
-			sizeof(struct logger_entry) - len);
-		return scratch;
-	}
-
-	return (struct logger_entry *) (log->buffer + off);
-}
-
-/*
- * get_entry_msg_len - Grabs the length of the message of the entry
- * starting from from 'off'.
- *
- * An entry length is 2 bytes (16 bits) in host endian order.
- * In the log, the length does not include the size of the log entry structure.
- * This function returns the size including the log entry structure.
- *
- * Caller needs to hold log->mutex.
- */
-static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
-{
-	struct logger_entry scratch;
-	struct logger_entry *entry;
-
-	entry = get_entry_header(log, off, &scratch);
-	return entry->len;
-}
-
-static size_t get_user_hdr_len(int ver)
-{
-	if (ver < 2)
-		return sizeof(struct user_logger_entry_compat);
-	else
-		return sizeof(struct logger_entry);
-}
-
-static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
-					 char __user *buf)
-{
-	void *hdr;
-	size_t hdr_len;
-	struct user_logger_entry_compat v1;
-
-	if (ver < 2) {
-		v1.len      = entry->len;
-		v1.__pad    = 0;
-		v1.pid      = entry->pid;
-		v1.tid      = entry->tid;
-		v1.sec      = entry->sec;
-		v1.nsec     = entry->nsec;
-		hdr         = &v1;
-		hdr_len     = sizeof(struct user_logger_entry_compat);
-	} else {
-		hdr         = entry;
-		hdr_len     = sizeof(struct logger_entry);
-	}
-
-	return copy_to_user(buf, hdr, hdr_len);
-}
-
-/*
- * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
- * user-space buffer 'buf'. Returns 'count' on success.
- *
- * Caller must hold log->mutex.
- */
-static ssize_t do_read_log_to_user(struct logger_log *log,
-				   struct logger_reader *reader,
-				   char __user *buf,
-				   size_t count)
-{
-	struct logger_entry scratch;
-	struct logger_entry *entry;
-	size_t len;
-	size_t msg_start;
-
-	/*
-	 * First, copy the header to userspace, using the version of
-	 * the header requested
-	 */
-	entry = get_entry_header(log, reader->r_off, &scratch);
-	if (copy_header_to_user(reader->r_ver, entry, buf))
-		return -EFAULT;
-
-	count -= get_user_hdr_len(reader->r_ver);
-	buf += get_user_hdr_len(reader->r_ver);
-	msg_start = logger_offset(log,
-		reader->r_off + sizeof(struct logger_entry));
-
-	/*
-	 * We read from the msg in two disjoint operations. First, we read from
-	 * the current msg head offset up to 'count' bytes or to the end of
-	 * the log, whichever comes first.
-	 */
-	len = min(count, log->size - msg_start);
-	if (copy_to_user(buf, log->buffer + msg_start, len))
-		return -EFAULT;
-
-	/*
-	 * Second, we read any remaining bytes, starting back at the head of
-	 * the log.
-	 */
-	if (count != len)
-		if (copy_to_user(buf + len, log->buffer, count - len))
-			return -EFAULT;
-
-	reader->r_off = logger_offset(log, reader->r_off +
-		sizeof(struct logger_entry) + count);
-
-	return count + get_user_hdr_len(reader->r_ver);
-}
-
-/*
- * get_next_entry_by_uid - Starting at 'off', returns an offset into
- * 'log->buffer' which contains the first entry readable by 'euid'
- */
-static size_t get_next_entry_by_uid(struct logger_log *log,
-		size_t off, kuid_t euid)
-{
-	while (off != log->w_off) {
-		struct logger_entry *entry;
-		struct logger_entry scratch;
-		size_t next_len;
-
-		entry = get_entry_header(log, off, &scratch);
-
-		if (uid_eq(entry->euid, euid))
-			return off;
-
-		next_len = sizeof(struct logger_entry) + entry->len;
-		off = logger_offset(log, off + next_len);
-	}
-
-	return off;
-}
-
-/*
- * logger_read - our log's read() method
- *
- * Behavior:
- *
- *	- O_NONBLOCK works
- *	- If there are no log entries to read, blocks until log is written to
- *	- Atomically reads exactly one log entry
- *
- * Will set errno to EINVAL if read
- * buffer is insufficient to hold next entry.
- */
-static ssize_t logger_read(struct file *file, char __user *buf,
-			   size_t count, loff_t *pos)
-{
-	struct logger_reader *reader = file->private_data;
-	struct logger_log *log = reader->log;
-	ssize_t ret;
-	DEFINE_WAIT(wait);
-
-start:
-	while (1) {
-		mutex_lock(&log->mutex);
-
-		prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
-
-		ret = (log->w_off == reader->r_off);
-		mutex_unlock(&log->mutex);
-		if (!ret)
-			break;
-
-		if (file->f_flags & O_NONBLOCK) {
-			ret = -EAGAIN;
-			break;
-		}
-
-		if (signal_pending(current)) {
-			ret = -EINTR;
-			break;
-		}
-
-		schedule();
-	}
-
-	finish_wait(&log->wq, &wait);
-	if (ret)
-		return ret;
-
-	mutex_lock(&log->mutex);
-
-	if (!reader->r_all)
-		reader->r_off = get_next_entry_by_uid(log,
-			reader->r_off, current_euid());
-
-	/* is there still something to read or did we race? */
-	if (unlikely(log->w_off == reader->r_off)) {
-		mutex_unlock(&log->mutex);
-		goto start;
-	}
-
-	/* get the size of the next entry */
-	ret = get_user_hdr_len(reader->r_ver) +
-		get_entry_msg_len(log, reader->r_off);
-	if (count < ret) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* get exactly one entry from the log */
-	ret = do_read_log_to_user(log, reader, buf, ret);
-
-out:
-	mutex_unlock(&log->mutex);
-
-	return ret;
-}
-
-/*
- * get_next_entry - return the offset of the first valid entry at least 'len'
- * bytes after 'off'.
- *
- * Caller must hold log->mutex.
- */
-static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
-{
-	size_t count = 0;
-
-	do {
-		size_t nr = sizeof(struct logger_entry) +
-			get_entry_msg_len(log, off);
-		off = logger_offset(log, off + nr);
-		count += nr;
-	} while (count < len);
-
-	return off;
-}
-
-/*
- * is_between - is a < c < b, accounting for wrapping of a, b, and c
- *    positions in the buffer
- *
- * That is, if a<b, check for c between a and b
- * and if a>b, check for c outside (not between) a and b
- *
- * |------- a xxxxxxxx b --------|
- *               c^
- *
- * |xxxxx b --------- a xxxxxxxxx|
- *    c^
- *  or                    c^
- */
-static inline int is_between(size_t a, size_t b, size_t c)
-{
-	if (a < b) {
-		/* is c between a and b? */
-		if (a < c && c <= b)
-			return 1;
-	} else {
-		/* is c outside of b through a? */
-		if (c <= b || a < c)
-			return 1;
-	}
-
-	return 0;
-}
-
-/*
- * fix_up_readers - walk the list of all readers and "fix up" any who were
- * lapped by the writer; also do the same for the default "start head".
- * We do this by "pulling forward" the readers and start head to the first
- * entry after the new write head.
- *
- * The caller needs to hold log->mutex.
- */
-static void fix_up_readers(struct logger_log *log, size_t len)
-{
-	size_t old = log->w_off;
-	size_t new = logger_offset(log, old + len);
-	struct logger_reader *reader;
-
-	if (is_between(old, new, log->head))
-		log->head = get_next_entry(log, log->head, len);
-
-	list_for_each_entry(reader, &log->readers, list)
-		if (is_between(old, new, reader->r_off))
-			reader->r_off = get_next_entry(log, reader->r_off, len);
-}
-
-/*
- * do_write_log - writes 'len' bytes from 'buf' to 'log'
- *
- * The caller needs to hold log->mutex.
- */
-static void do_write_log(struct logger_log *log, const void *buf, size_t count)
-{
-	size_t len;
-
-	len = min(count, log->size - log->w_off);
-	memcpy(log->buffer + log->w_off, buf, len);
-
-	if (count != len)
-		memcpy(log->buffer, buf + len, count - len);
-
-	log->w_off = logger_offset(log, log->w_off + count);
-
-}
-
-/*
- * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
- * the log 'log'
- *
- * The caller needs to hold log->mutex.
- *
- * Returns 'count' on success, negative error code on failure.
- */
-static ssize_t do_write_log_from_user(struct logger_log *log,
-				      const void __user *buf, size_t count)
-{
-	size_t len;
-
-	len = min(count, log->size - log->w_off);
-	if (len && copy_from_user(log->buffer + log->w_off, buf, len))
-		return -EFAULT;
-
-	if (count != len)
-		if (copy_from_user(log->buffer, buf + len, count - len))
-			/*
-			 * Note that by not updating w_off, this abandons the
-			 * portion of the new entry that *was* successfully
-			 * copied, just above.  This is intentional to avoid
-			 * message corruption from missing fragments.
-			 */
-			return -EFAULT;
-
-	log->w_off = logger_offset(log, log->w_off + count);
-
-	return count;
-}
-
-/*
- * logger_aio_write - our write method, implementing support for write(),
- * writev(), and aio_write(). Writes are our fast path, and we try to optimize
- * them above all else.
- */
-static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
-			 unsigned long nr_segs, loff_t ppos)
-{
-	struct logger_log *log = file_get_log(iocb->ki_filp);
-	size_t orig;
-	struct logger_entry header;
-	struct timespec now;
-	ssize_t ret = 0;
-
-	now = current_kernel_time();
-
-	header.pid = current->tgid;
-	header.tid = current->pid;
-	header.sec = now.tv_sec;
-	header.nsec = now.tv_nsec;
-	header.euid = current_euid();
-	header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
-	header.hdr_size = sizeof(struct logger_entry);
-
-	/* null writes succeed, return zero */
-	if (unlikely(!header.len))
-		return 0;
-
-	mutex_lock(&log->mutex);
-
-	orig = log->w_off;
-
-	/*
-	 * Fix up any readers, pulling them forward to the first readable
-	 * entry after (what will be) the new write offset. We do this now
-	 * because if we partially fail, we can end up with clobbered log
-	 * entries that encroach on readable buffer.
-	 */
-	fix_up_readers(log, sizeof(struct logger_entry) + header.len);
-
-	do_write_log(log, &header, sizeof(struct logger_entry));
-
-	while (nr_segs-- > 0) {
-		size_t len;
-		ssize_t nr;
-
-		/* figure out how much of this vector we can keep */
-		len = min_t(size_t, iov->iov_len, header.len - ret);
-
-		/* write out this segment's payload */
-		nr = do_write_log_from_user(log, iov->iov_base, len);
-		if (unlikely(nr < 0)) {
-			log->w_off = orig;
-			mutex_unlock(&log->mutex);
-			return nr;
-		}
-
-		iov++;
-		ret += nr;
-	}
-
-	mutex_unlock(&log->mutex);
-
-	/* wake up any blocked readers */
-	wake_up_interruptible(&log->wq);
-
-	return ret;
-}
-
-static struct logger_log *get_log_from_minor(int minor)
-{
-	struct logger_log *log;
-
-	list_for_each_entry(log, &log_list, logs)
-		if (log->misc.minor == minor)
-			return log;
-	return NULL;
-}
-
-/*
- * logger_open - the log's open() file operation
- *
- * Note how near a no-op this is in the write-only case. Keep it that way!
- */
-static int logger_open(struct inode *inode, struct file *file)
-{
-	struct logger_log *log;
-	int ret;
-
-	ret = nonseekable_open(inode, file);
-	if (ret)
-		return ret;
-
-	log = get_log_from_minor(MINOR(inode->i_rdev));
-	if (!log)
-		return -ENODEV;
-
-	if (file->f_mode & FMODE_READ) {
-		struct logger_reader *reader;
-
-		reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
-		if (!reader)
-			return -ENOMEM;
-
-		reader->log = log;
-		reader->r_ver = 1;
-		reader->r_all = in_egroup_p(inode->i_gid) ||
-			capable(CAP_SYSLOG);
-
-		INIT_LIST_HEAD(&reader->list);
-
-		mutex_lock(&log->mutex);
-		reader->r_off = log->head;
-		list_add_tail(&reader->list, &log->readers);
-		mutex_unlock(&log->mutex);
-
-		file->private_data = reader;
-	} else
-		file->private_data = log;
-
-	return 0;
-}
-
-/*
- * logger_release - the log's release file operation
- *
- * Note this is a total no-op in the write-only case. Keep it that way!
- */
-static int logger_release(struct inode *ignored, struct file *file)
-{
-	if (file->f_mode & FMODE_READ) {
-		struct logger_reader *reader = file->private_data;
-		struct logger_log *log = reader->log;
-
-		mutex_lock(&log->mutex);
-		list_del(&reader->list);
-		mutex_unlock(&log->mutex);
-
-		kfree(reader);
-	}
-
-	return 0;
-}
-
-/*
- * logger_poll - the log's poll file operation, for poll/select/epoll
- *
- * Note we always return POLLOUT, because you can always write() to the log.
- * Note also that, strictly speaking, a return value of POLLIN does not
- * guarantee that the log is readable without blocking, as there is a small
- * chance that the writer can lap the reader in the interim between poll()
- * returning and the read() request.
- */
-static unsigned int logger_poll(struct file *file, poll_table *wait)
-{
-	struct logger_reader *reader;
-	struct logger_log *log;
-	unsigned int ret = POLLOUT | POLLWRNORM;
-
-	if (!(file->f_mode & FMODE_READ))
-		return ret;
-
-	reader = file->private_data;
-	log = reader->log;
-
-	poll_wait(file, &log->wq, wait);
-
-	mutex_lock(&log->mutex);
-	if (!reader->r_all)
-		reader->r_off = get_next_entry_by_uid(log,
-			reader->r_off, current_euid());
-
-	if (log->w_off != reader->r_off)
-		ret |= POLLIN | POLLRDNORM;
-	mutex_unlock(&log->mutex);
-
-	return ret;
-}
-
-static long logger_set_version(struct logger_reader *reader, void __user *arg)
-{
-	int version;
-	if (copy_from_user(&version, arg, sizeof(int)))
-		return -EFAULT;
-
-	if ((version < 1) || (version > 2))
-		return -EINVAL;
-
-	reader->r_ver = version;
-	return 0;
-}
-
-static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	struct logger_log *log = file_get_log(file);
-	struct logger_reader *reader;
-	long ret = -EINVAL;
-	void __user *argp = (void __user *) arg;
-
-	mutex_lock(&log->mutex);
-
-	switch (cmd) {
-	case LOGGER_GET_LOG_BUF_SIZE:
-		ret = log->size;
-		break;
-	case LOGGER_GET_LOG_LEN:
-		if (!(file->f_mode & FMODE_READ)) {
-			ret = -EBADF;
-			break;
-		}
-		reader = file->private_data;
-		if (log->w_off >= reader->r_off)
-			ret = log->w_off - reader->r_off;
-		else
-			ret = (log->size - reader->r_off) + log->w_off;
-		break;
-	case LOGGER_GET_NEXT_ENTRY_LEN:
-		if (!(file->f_mode & FMODE_READ)) {
-			ret = -EBADF;
-			break;
-		}
-		reader = file->private_data;
-
-		if (!reader->r_all)
-			reader->r_off = get_next_entry_by_uid(log,
-				reader->r_off, current_euid());
-
-		if (log->w_off != reader->r_off)
-			ret = get_user_hdr_len(reader->r_ver) +
-				get_entry_msg_len(log, reader->r_off);
-		else
-			ret = 0;
-		break;
-	case LOGGER_FLUSH_LOG:
-		if (!(file->f_mode & FMODE_WRITE)) {
-			ret = -EBADF;
-			break;
-		}
-		if (!(in_egroup_p(file->f_dentry->d_inode->i_gid) ||
-				capable(CAP_SYSLOG))) {
-			ret = -EPERM;
-			break;
-		}
-		list_for_each_entry(reader, &log->readers, list)
-			reader->r_off = log->w_off;
-		log->head = log->w_off;
-		ret = 0;
-		break;
-	case LOGGER_GET_VERSION:
-		if (!(file->f_mode & FMODE_READ)) {
-			ret = -EBADF;
-			break;
-		}
-		reader = file->private_data;
-		ret = reader->r_ver;
-		break;
-	case LOGGER_SET_VERSION:
-		if (!(file->f_mode & FMODE_READ)) {
-			ret = -EBADF;
-			break;
-		}
-		reader = file->private_data;
-		ret = logger_set_version(reader, argp);
-		break;
-	}
-
-	mutex_unlock(&log->mutex);
-
-	return ret;
-}
-
-static const struct file_operations logger_fops = {
-	.owner = THIS_MODULE,
-	.read = logger_read,
-	.aio_write = logger_aio_write,
-	.poll = logger_poll,
-	.unlocked_ioctl = logger_ioctl,
-	.compat_ioctl = logger_ioctl,
-	.open = logger_open,
-	.release = logger_release,
-};
-
-/*
- * Log size must must be a power of two, and greater than
- * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
- */
-static int __init create_log(char *log_name, int size)
-{
-	int ret = 0;
-	struct logger_log *log;
-	unsigned char *buffer;
-
-	buffer = vmalloc(size);
-	if (buffer == NULL)
-		return -ENOMEM;
-
-	log = kzalloc(sizeof(struct logger_log), GFP_KERNEL);
-	if (log == NULL) {
-		ret = -ENOMEM;
-		goto out_free_buffer;
-	}
-	log->buffer = buffer;
-
-	log->misc.minor = MISC_DYNAMIC_MINOR;
-	log->misc.name = kstrdup(log_name, GFP_KERNEL);
-	if (log->misc.name == NULL) {
-		ret = -ENOMEM;
-		goto out_free_log;
-	}
-
-	log->misc.fops = &logger_fops;
-	log->misc.parent = NULL;
-
-	init_waitqueue_head(&log->wq);
-	INIT_LIST_HEAD(&log->readers);
-	mutex_init(&log->mutex);
-	log->w_off = 0;
-	log->head = 0;
-	log->size = size;
-
-	INIT_LIST_HEAD(&log->logs);
-	list_add_tail(&log->logs, &log_list);
-
-	/* finally, initialize the misc device for this log */
-	ret = misc_register(&log->misc);
-	if (unlikely(ret)) {
-		pr_err("failed to register misc device for log '%s'!\n",
-				log->misc.name);
-		goto out_free_log;
-	}
-
-	pr_info("created %luK log '%s'\n",
-		(unsigned long) log->size >> 10, log->misc.name);
-
-	return 0;
-
-out_free_log:
-	kfree(log);
-
-out_free_buffer:
-	vfree(buffer);
-	return ret;
-}
-
-static int __init logger_init(void)
-{
-	int ret;
-
-	ret = create_log(LOGGER_LOG_MAIN, 256*1024);
-	if (unlikely(ret))
-		goto out;
-
-	ret = create_log(LOGGER_LOG_EVENTS, 256*1024);
-	if (unlikely(ret))
-		goto out;
-
-	ret = create_log(LOGGER_LOG_RADIO, 256*1024);
-	if (unlikely(ret))
-		goto out;
-
-	ret = create_log(LOGGER_LOG_SYSTEM, 256*1024);
-	if (unlikely(ret))
-		goto out;
-
-out:
-	return ret;
-}
-
-static void __exit logger_exit(void)
-{
-	struct logger_log *current_log, *next_log;
-
-	list_for_each_entry_safe(current_log, next_log, &log_list, logs) {
-		/* we have to delete all the entry inside log_list */
-		misc_deregister(&current_log->misc);
-		vfree(current_log->buffer);
-		kfree(current_log->misc.name);
-		list_del(&current_log->logs);
-		kfree(current_log);
-	}
-}
-
-
-device_initcall(logger_init);
-module_exit(logger_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Robert Love, <rlove@google.com>");
-MODULE_DESCRIPTION("Android Logger");
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
deleted file mode 100644
index 70af7d8..0000000
--- a/drivers/staging/android/logger.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* include/linux/logger.h
- *
- * Copyright (C) 2007-2008 Google, Inc.
- * Author: Robert Love <rlove@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_LOGGER_H
-#define _LINUX_LOGGER_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-/**
- * struct user_logger_entry_compat - defines a single entry that is given to a logger
- * @len:	The length of the payload
- * @__pad:	Two bytes of padding that appear to be required
- * @pid:	The generating process' process ID
- * @tid:	The generating process' thread ID
- * @sec:	The number of seconds that have elapsed since the Epoch
- * @nsec:	The number of nanoseconds that have elapsed since @sec
- * @msg:	The message that is to be logged
- *
- * The userspace structure for version 1 of the logger_entry ABI.
- * This structure is returned to userspace unless the caller requests
- * an upgrade to a newer ABI version.
- */
-struct user_logger_entry_compat {
-	__u16		len;
-	__u16		__pad;
-	__s32		pid;
-	__s32		tid;
-	__s32		sec;
-	__s32		nsec;
-	char		msg[0];
-};
-
-/**
- * struct logger_entry - defines a single entry that is given to a logger
- * @len:	The length of the payload
- * @hdr_size:	sizeof(struct logger_entry_v2)
- * @pid:	The generating process' process ID
- * @tid:	The generating process' thread ID
- * @sec:	The number of seconds that have elapsed since the Epoch
- * @nsec:	The number of nanoseconds that have elapsed since @sec
- * @euid:	Effective UID of logger
- * @msg:	The message that is to be logged
- *
- * The structure for version 2 of the logger_entry ABI.
- * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION)
- * is called with version >= 2
- */
-struct logger_entry {
-	__u16		len;
-	__u16		hdr_size;
-	__s32		pid;
-	__s32		tid;
-	__s32		sec;
-	__s32		nsec;
-	kuid_t		euid;
-	char		msg[0];
-};
-
-#define LOGGER_LOG_RADIO	"log_radio"	/* radio-related messages */
-#define LOGGER_LOG_EVENTS	"log_events"	/* system/hardware events */
-#define LOGGER_LOG_SYSTEM	"log_system"	/* system/framework messages */
-#define LOGGER_LOG_MAIN		"log_main"	/* everything else */
-
-#define LOGGER_ENTRY_MAX_PAYLOAD	4076
-
-#define __LOGGERIO	0xAE
-
-#define LOGGER_GET_LOG_BUF_SIZE		_IO(__LOGGERIO, 1) /* size of log */
-#define LOGGER_GET_LOG_LEN		_IO(__LOGGERIO, 2) /* used log len */
-#define LOGGER_GET_NEXT_ENTRY_LEN	_IO(__LOGGERIO, 3) /* next entry len */
-#define LOGGER_FLUSH_LOG		_IO(__LOGGERIO, 4) /* flush log */
-#define LOGGER_GET_VERSION		_IO(__LOGGERIO, 5) /* abi version */
-#define LOGGER_SET_VERSION		_IO(__LOGGERIO, 6) /* abi version */
-
-#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index fe74494..a56e089 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -39,7 +39,6 @@
 #include <linux/sched.h>
 #include <linux/swap.h>
 #include <linux/rcupdate.h>
-#include <linux/profile.h>
 #include <linux/notifier.h>
 
 static uint32_t lowmem_debug_level = 1;
@@ -74,6 +73,7 @@
 	int tasksize;
 	int i;
 	short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
+	int minfree = 0;
 	int selected_tasksize = 0;
 	short selected_oom_score_adj;
 	int array_size = ARRAY_SIZE(lowmem_adj);
@@ -86,8 +86,8 @@
 	if (lowmem_minfree_size < array_size)
 		array_size = lowmem_minfree_size;
 	for (i = 0; i < array_size; i++) {
-		if (other_free < lowmem_minfree[i] &&
-		    other_file < lowmem_minfree[i]) {
+		minfree = lowmem_minfree[i];
+		if (other_free < minfree && other_file < minfree) {
 			min_score_adj = lowmem_adj[i];
 			break;
 		}
@@ -144,13 +144,22 @@
 		selected = p;
 		selected_tasksize = tasksize;
 		selected_oom_score_adj = oom_score_adj;
-		lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
-			     p->pid, p->comm, oom_score_adj, tasksize);
+		lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+			     p->comm, p->pid, oom_score_adj, tasksize);
 	}
 	if (selected) {
-		lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
-			     selected->pid, selected->comm,
-			     selected_oom_score_adj, selected_tasksize);
+		lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
+				"   to free %ldkB on behalf of '%s' (%d) because\n" \
+				"   cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
+				"   Free memory is %ldkB above reserved\n",
+			     selected->comm, selected->pid,
+			     selected_oom_score_adj,
+			     selected_tasksize * (long)(PAGE_SIZE / 1024),
+			     current->comm, current->pid,
+			     other_file * (long)(PAGE_SIZE / 1024),
+			     minfree * (long)(PAGE_SIZE / 1024),
+			     min_score_adj,
+			     other_free * (long)(PAGE_SIZE / 1024));
 		lowmem_deathpending_timeout = jiffies + HZ;
 		send_sig(SIGKILL, selected, 0);
 		set_tsk_thread_flag(selected, TIF_MEMDIE);
@@ -178,9 +187,94 @@
 	unregister_shrinker(&lowmem_shrinker);
 }
 
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+static short lowmem_oom_adj_to_oom_score_adj(short oom_adj)
+{
+	if (oom_adj == OOM_ADJUST_MAX)
+		return OOM_SCORE_ADJ_MAX;
+	else
+		return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+}
+
+static void lowmem_autodetect_oom_adj_values(void)
+{
+	int i;
+	short oom_adj;
+	short oom_score_adj;
+	int array_size = ARRAY_SIZE(lowmem_adj);
+
+	if (lowmem_adj_size < array_size)
+		array_size = lowmem_adj_size;
+
+	if (array_size <= 0)
+		return;
+
+	oom_adj = lowmem_adj[array_size - 1];
+	if (oom_adj > OOM_ADJUST_MAX)
+		return;
+
+	oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+	if (oom_score_adj <= OOM_ADJUST_MAX)
+		return;
+
+	lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
+	for (i = 0; i < array_size; i++) {
+		oom_adj = lowmem_adj[i];
+		oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+		lowmem_adj[i] = oom_score_adj;
+		lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
+			     oom_adj, oom_score_adj);
+	}
+}
+
+static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_array_ops.set(val, kp);
+
+	/* HACK: Autodetect oom_adj values in lowmem_adj array */
+	lowmem_autodetect_oom_adj_values();
+
+	return ret;
+}
+
+static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
+{
+	return param_array_ops.get(buffer, kp);
+}
+
+static void lowmem_adj_array_free(void *arg)
+{
+	param_array_ops.free(arg);
+}
+
+static struct kernel_param_ops lowmem_adj_array_ops = {
+	.set = lowmem_adj_array_set,
+	.get = lowmem_adj_array_get,
+	.free = lowmem_adj_array_free,
+};
+
+static const struct kparam_array __param_arr_adj = {
+	.max = ARRAY_SIZE(lowmem_adj),
+	.num = &lowmem_adj_size,
+	.ops = &param_ops_short,
+	.elemsize = sizeof(lowmem_adj[0]),
+	.elem = lowmem_adj,
+};
+#endif
+
 module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+__module_param_call(MODULE_PARAM_PREFIX, adj,
+		    &lowmem_adj_array_ops,
+		    .arr = &__param_arr_adj,
+		    S_IRUGO | S_IWUSR, -1);
+__MODULE_PARM_TYPE(adj, "array of short");
+#else
 module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
 			 S_IRUGO | S_IWUSR);
+#endif
 module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
 			 S_IRUGO | S_IWUSR);
 module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/reboot/Kconfig b/drivers/staging/android/reboot/Kconfig
new file mode 100644
index 0000000..0094696
--- /dev/null
+++ b/drivers/staging/android/reboot/Kconfig
@@ -0,0 +1,18 @@
+#
+# X86 Platform Specific Drivers
+#
+
+config INTEL_SCU_REBOOT_REASON
+	tristate "osip driver for Intel MID platforms"
+	default y
+	depends on INTEL_REBOOT_TARGET && INTEL_SCU_IPC
+	help
+	  This driver squirrels away the reboot reason with the SCU.
+
+	  If unsure, say N.
+
+config INTEL_REBOOT_TARGET
+	bool "Intel Reboot Target"
+	---help---
+	  This driver provides a generic implementation for reboot target setting at
+	  reset time.
diff --git a/drivers/staging/android/reboot/Makefile b/drivers/staging/android/reboot/Makefile
new file mode 100644
index 0000000..6ee5f82
--- /dev/null
+++ b/drivers/staging/android/reboot/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_SCU_REBOOT_REASON)	+= intel_scu_reboot_reason.o
+obj-$(CONFIG_INTEL_REBOOT_TARGET)	+= reboot_target.o
diff --git a/drivers/staging/android/reboot/intel_scu_reboot_reason.c b/drivers/staging/android/reboot/intel_scu_reboot_reason.c
new file mode 100644
index 0000000..aacb881
--- /dev/null
+++ b/drivers/staging/android/reboot/intel_scu_reboot_reason.c
@@ -0,0 +1,124 @@
+/*
+ * Reboot Reason driver for Edison, derived from SCURR driver for Medfield.
+ *
+ * Copyright (C) 2015,2011 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/reboot.h>
+#include <linux/pagemap.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/genhd.h>
+#include <linux/seq_file.h>
+#include <linux/rpmsg.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 1))
+#include <linux/platform_data/intel_mid_remoteproc.h>
+#else
+#include <asm/intel_mid_remoteproc.h>
+#endif
+#include <linux/delay.h>
+#include <asm/intel_scu_ipcutil.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel-mid.h>
+
+#include "reboot_target.h"
+
+#include <asm/intel_scu_ipc.h>
+#include <linux/power_supply.h>
+
+static int scurr_reboot_target_call(const char *target, int id)
+{
+	int ret_ipc;
+
+	pr_info("%s: notified [%s] target\n", __func__, target);
+
+	pr_warn("[REBOOT] %s, rebooting into %s\n", __func__, target);
+
+	ret_ipc = intel_scu_ipc_write_osnib_rr(id);
+	if (ret_ipc < 0)
+		pr_err("%s cannot write %s reboot reason in OSNIB\n",
+			__func__, target);
+	return NOTIFY_DONE;
+}
+
+static struct reboot_target scurr_reboot_target = {
+	.set_reboot_target = scurr_reboot_target_call,
+};
+
+static int scurr_init(void)
+{
+	pr_info("%s: reboot_target registered\n", __func__);
+	if (reboot_target_register(&scurr_reboot_target))
+		pr_warning("scurr: unable to register reboot notifier");
+
+	return 0;
+}
+
+static void scurr_exit(void)
+{
+	pr_info("%s: reboot_target unregistered\n", __func__);
+	reboot_target_unregister(&scurr_reboot_target);
+}
+
+static int __init scurr_probe(struct platform_device *dev)
+{
+	return scurr_init();
+}
+
+static int scurr_remove(struct platform_device *dev)
+{
+	scurr_exit();
+	return 0;
+}
+
+static struct platform_driver scurr_driver = {
+	.remove         = scurr_remove,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = KBUILD_MODNAME,
+	},
+};
+
+static int __init scurr_init_module(void)
+{
+	int err=0;
+
+	pr_info("Intel SCURR Driver\n");
+
+        platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
+	err =  platform_driver_probe(&scurr_driver,scurr_probe);
+
+	return err;
+}
+
+static void __exit scurr_cleanup_module(void)
+{
+	platform_driver_unregister(&scurr_driver);
+	pr_info("SCURR Module Unloaded\n");
+}
+module_init(scurr_init_module);
+module_exit(scurr_cleanup_module);
+
+MODULE_AUTHOR("Pierre Tardy <pierre.tardy@intel.com>");
+MODULE_AUTHOR("Xiaokang Qin <xiaokang.qin@intel.com>");
+MODULE_DESCRIPTION("Intel SCU Reboot Reason Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/reboot/reboot_target.c b/drivers/staging/android/reboot/reboot_target.c
new file mode 100644
index 0000000..3c9696c
--- /dev/null
+++ b/drivers/staging/android/reboot/reboot_target.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2013 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+
+#include "reboot_target.h"
+
+/* Currently registered Reboot Target setter  */
+static struct reboot_target *var;
+
+struct name2id {
+	const char *name;
+	int id;
+};
+
+static const unsigned int DEFAULT_TARGET_INDEX = 0;
+
+static const struct name2id NAME2ID[] = {
+	{ "main",        0x00 },
+	{ "android",     0x00 },
+	{ "charging",    0x0A },
+	{ "recovery",    0x0C },
+	{ "fastboot",    0x0E },
+	{ "bootloader",  0x0E },
+	{ "factory",     0x12 },
+	{ "dnx",         0x14 },
+	{ "ramconsole",  0x16 },
+	{ "factory2",    0x18 },
+	{ "bootoneshot", 0x1A },
+};
+
+#define ALLOW_FACTORY_PARAM_NAME "allow_factory="
+
+static int reboot_target_name2id(const char *name)
+{
+	int i;
+	char *allow_factory;
+
+	allow_factory = strstr(saved_command_line, ALLOW_FACTORY_PARAM_NAME);
+	if (!allow_factory && strstr(name, "factory"))
+		return NAME2ID[DEFAULT_TARGET_INDEX].id;
+
+	for (i = 0; i < ARRAY_SIZE(NAME2ID); i++)
+		if (!strcmp(NAME2ID[i].name, name))
+			return NAME2ID[i].id;
+
+	return -EINVAL;
+}
+
+const char *reboot_target_id2name(int id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(NAME2ID); i++)
+		if (NAME2ID[i].id == id)
+			return NAME2ID[i].name;
+
+	return "";
+}
+
+static int set_reboot_target(const char *name)
+{
+	int id;
+
+	if (name == NULL) {
+		pr_err("Error in %s: NULL target\n", __func__);
+		return -EINVAL;
+	}
+
+	id  = reboot_target_name2id(name);
+	if (id < 0) {
+		pr_err("Error in %s: '%s' is not a valid target\n",
+		       __func__, name );
+		return -EINVAL;
+	}
+
+	return var ? var->set_reboot_target(name, id) : -ENODEV;
+}
+
+static int reboot_target_notify(struct notifier_block *notifier,
+				unsigned long what, void *data)
+{
+	const char *target = (const char *)data;
+	int ret;
+
+	if (what != SYS_RESTART)
+		goto out;
+
+	if (!target || target[0] == '\0')
+		target = NAME2ID[DEFAULT_TARGET_INDEX].name;
+
+	ret = set_reboot_target(target);
+	if (ret)
+		pr_err("%s: Failed to set the reboot target, return=%d\n",
+		       __func__, ret);
+
+out:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block reboot_target_notifier = {
+	.notifier_call = reboot_target_notify,
+};
+
+int reboot_target_register(struct reboot_target *new)
+{
+	if (var)
+		return -EBUSY;
+
+	var = new;
+	return 0;
+}
+EXPORT_SYMBOL(reboot_target_register);
+
+int reboot_target_unregister(struct reboot_target *old)
+{
+	if (old && old == var) {
+		var = NULL;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(reboot_target_unregister);
+
+int __init reboot_target_init(void)
+{
+	int ret = 0;
+
+	ret = register_reboot_notifier(&reboot_target_notifier);
+	if (ret)
+		pr_err("%s: failed to register reboot_notifier\n", __func__);
+
+	return ret;
+}
+
+void __exit reboot_target_exit(void)
+{
+	unregister_reboot_notifier(&reboot_target_notifier);
+}
+
+module_init(reboot_target_init);
+module_exit(reboot_target_exit);
+
+MODULE_AUTHOR("Jeremy Compostella <jeremy.compostella@intel.com>");
+MODULE_DESCRIPTION("Intel Reboot Target");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/reboot/reboot_target.h b/drivers/staging/android/reboot/reboot_target.h
new file mode 100644
index 0000000..023d998
--- /dev/null
+++ b/drivers/staging/android/reboot/reboot_target.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2013 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _REBOOT_TARGET_H_
+#define _REBOOT_TARGET_H_
+
+struct reboot_target {
+	int (*set_reboot_target)(const char *name, const int id);
+};
+
+extern const char *reboot_target_id2name(int id);
+
+extern int reboot_target_register(struct reboot_target *);
+extern int reboot_target_unregister(struct reboot_target *);
+
+#endif	/* _REBOOT_TARGET_H_ */
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
index 4928f93..820af5c 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/staging/android/sw_sync.c
@@ -97,6 +97,7 @@
 				       char *str, int size)
 {
 	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+
 	snprintf(str, size, "%d", pt->value);
 }
 
@@ -156,6 +157,7 @@
 static int sw_sync_release(struct inode *inode, struct file *file)
 {
 	struct sw_sync_timeline *obj = file->private_data;
+
 	sync_timeline_destroy(&obj->obj);
 	return 0;
 }
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040b..1a50669 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -18,10 +18,9 @@
 #define _LINUX_SW_SYNC_H
 
 #include <linux/types.h>
-
-#ifdef __KERNEL__
-
+#include <linux/kconfig.h>
 #include "sync.h"
+#include "uapi/sw_sync.h"
 
 struct sw_sync_timeline {
 	struct	sync_timeline	obj;
@@ -35,24 +34,26 @@
 	u32			value;
 };
 
+#if IS_ENABLED(CONFIG_SW_SYNC)
 struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
 void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
 
 struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+	return NULL;
+}
 
-#endif /* __KERNEL __ */
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
 
-struct sw_sync_create_fence_data {
-	__u32	value;
-	char	name[32];
-	__s32	fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC	'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\
-		struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+		u32 value)
+{
+	return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
 
 #endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 3893a35..61e6249 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@
 		container_of(kref, struct sync_timeline, kref);
 	unsigned long flags;
 
-	if (obj->ops->release_obj)
-		obj->ops->release_obj(obj);
-
 	spin_lock_irqsave(&sync_timeline_list_lock, flags);
 	list_del(&obj->sync_timeline_list);
 	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
 
+	if (obj->ops->release_obj)
+		obj->ops->release_obj(obj);
+
 	kfree(obj);
 }
 
 void sync_timeline_destroy(struct sync_timeline *obj)
 {
 	obj->destroyed = true;
+	smp_wmb();
 
 	/*
-	 * If this is not the last reference, signal any children
-	 * that their parent is going away.
+	 * signal any children that their parent is going away.
 	 */
+	sync_timeline_signal(obj);
 
-	if (!kref_put(&obj->kref, sync_timeline_free))
-		sync_timeline_signal(obj);
+	kref_put(&obj->kref, sync_timeline_free);
 }
 EXPORT_SYMBOL(sync_timeline_destroy);
 
@@ -384,6 +384,7 @@
 
 	list_for_each_safe(pos, n, &fence->pt_list_head) {
 		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+
 		sync_timeline_remove_pt(pt);
 	}
 }
@@ -394,6 +395,7 @@
 
 	list_for_each_safe(pos, n, &fence->pt_list_head) {
 		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+
 		sync_pt_free(pt);
 	}
 }
@@ -827,6 +829,7 @@
 			     unsigned long arg)
 {
 	struct sync_fence *fence = file->private_data;
+
 	switch (cmd) {
 	case SYNC_IOC_WAIT:
 		return sync_fence_ioctl_wait(fence, arg);
@@ -856,18 +859,21 @@
 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
 {
 	int status = pt->status;
+
 	seq_printf(s, "  %s%spt %s",
 		   fence ? pt->parent->name : "",
 		   fence ? "_" : "",
 		   sync_status_str(status));
 	if (pt->status) {
 		struct timeval tv = ktime_to_timeval(pt->timestamp);
+
 		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
 	}
 
 	if (pt->parent->ops->timeline_value_str &&
 	    pt->parent->ops->pt_value_str) {
 		char value[64];
+
 		pt->parent->ops->pt_value_str(pt, value, sizeof(value));
 		seq_printf(s, ": %s", value);
 		if (fence) {
@@ -892,6 +898,7 @@
 
 	if (obj->ops->timeline_value_str) {
 		char value[64];
+
 		obj->ops->timeline_value_str(obj, value, sizeof(value));
 		seq_printf(s, ": %s", value);
 	} else if (obj->ops->print_obj) {
@@ -1001,6 +1008,7 @@
 	for (i = 0; i < s.count; i += DUMP_CHUNK) {
 		if ((s.count - i) > DUMP_CHUNK) {
 			char c = s.buf[i + DUMP_CHUNK];
+
 			s.buf[i + DUMP_CHUNK] = 0;
 			pr_cont("%s", s.buf + i);
 			s.buf[i + DUMP_CHUNK] = c;
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index 38ea986..75da9e8 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -14,14 +14,14 @@
 #define _LINUX_SYNC_H
 
 #include <linux/types.h>
-#ifdef __KERNEL__
-
 #include <linux/kref.h>
 #include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 
+#include "uapi/sync.h"
+
 struct sync_timeline;
 struct sync_pt;
 struct sync_fence;
@@ -341,86 +341,4 @@
  */
 int sync_fence_wait(struct sync_fence *fence, long timeout);
 
-#endif /* __KERNEL__ */
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2:	file descriptor of second fence
- * @name:	name of new fence
- * @fence:	returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
-	__s32	fd2; /* fd of second fence */
-	char	name[32]; /* name of new fence */
-	__s32	fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len:		length of sync_pt_info including any driver_data
- * @obj_name:		name of parent sync_timeline
- * @driver_name:	name of driver implmenting the parent
- * @status:		status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns:	timestamp of status change in nanoseconds
- * @driver_data:	any driver dependant data
- */
-struct sync_pt_info {
-	__u32	len;
-	char	obj_name[32];
-	char	driver_name[32];
-	__s32	status;
-	__u64	timestamp_ns;
-
-	__u8	driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len:	ioctl caller writes the size of the buffer its passing in.
- *		ioctl returns length of sync_fence_data reutnred to userspace
- *		including pt_info.
- * @name:	name of fence
- * @status:	status of fence. 1: signaled 0:active <0:error
- * @pt_info:	a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
-	__u32	len;
-	char	name[32];
-	__s32	status;
-
-	__u8	pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC		'>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds.  Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT		_IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data.  Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE		_IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len.  On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO	_IOWR(SYNC_IOC_MAGIC, 2,\
-	struct sync_fence_info_data)
-
 #endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index e814514..ae9966d 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -51,6 +51,7 @@
 	if (hrtimer_active(&data->timer)) {
 		ktime_t r = hrtimer_get_remaining(&data->timer);
 		struct timeval t = ktime_to_timeval(r);
+
 		return t.tv_sec * 1000 + t.tv_usec / 1000;
 	} else
 		return 0;
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
new file mode 100644
index 0000000..13df42d
--- /dev/null
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -0,0 +1,48 @@
+/*
+ * drivers/staging/android/uapi/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define ASHMEM_NAME_LEN		256
+
+#define ASHMEM_NAME_DEF		"dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED	0
+#define ASHMEM_WAS_PURGED	1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED	0
+#define ASHMEM_IS_PINNED	1
+
+struct ashmem_pin {
+	__u32 offset;	/* offset into region, in bytes, page-aligned */
+	__u32 len;	/* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC		0x77
+
+#define ASHMEM_SET_NAME		_IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME		_IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE		_IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK	_IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN		_IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN		_IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS	_IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES	_IO(__ASHMEMIOC, 10)
+
+#endif	/* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
new file mode 100644
index 0000000..f09e7c1
--- /dev/null
+++ b/drivers/staging/android/uapi/ion.h
@@ -0,0 +1,196 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
+ * 				 carveout heap, allocations are physically
+ * 				 contiguous
+ * @ION_HEAP_TYPE_DMA:		 memory allocated via DMA API
+ * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask
+ * 				 is used to identify the heaps, so only 32
+ * 				 total heap types are supported
+ */
+enum ion_heap_type {
+	ION_HEAP_TYPE_SYSTEM,
+	ION_HEAP_TYPE_SYSTEM_CONTIG,
+	ION_HEAP_TYPE_CARVEOUT,
+	ION_HEAP_TYPE_CHUNK,
+	ION_HEAP_TYPE_DMA,
+	ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+				 are at the end of this enum */
+	ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK		(1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS		sizeof(unsigned int) * 8
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1		/* mappings of this buffer should be
+					   cached, ion will do cache
+					   maintenance when the buffer is
+					   mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2	/* mappings of this buffer will created
+					   at mmap time, if this is set
+					   caches must be managed manually */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:		size of the allocation
+ * @align:		required alignment of the allocation
+ * @heap_id_mask:	mask of heap ids to allocate from
+ * @flags:		flags passed to heap
+ * @handle:		pointer that will be populated with a cookie to use to 
+ *			refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+	size_t len;
+	size_t align;
+	unsigned int heap_id_mask;
+	unsigned int flags;
+	ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle:	a handle
+ * @fd:		a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+	ion_user_handle_t handle;
+	int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle:	a handle
+ */
+struct ion_handle_data {
+	ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd:	the custom ioctl function to call
+ * @arg:	additional data to pass to the custom ioctl, typically a user
+ *		pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+	unsigned int cmd;
+	unsigned long arg;
+};
+
+#define ION_IOC_MAGIC		'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \
+				      struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP		_IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be passed to another process.  The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC		_IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
new file mode 100644
index 0000000..ffef06f
--- /dev/null
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -0,0 +1,70 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr:	a pointer to an area at least as large as size
+ * @offset:	offset into the ion buffer to start reading
+ * @size:	size to read or write
+ * @write:	1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+	__u64 ptr;
+	__u64 offset;
+	__u64 size;
+	int write;
+	int __padding;
+};
+
+#define ION_IOC_MAGIC		'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver.  Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+			_IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping.  Can be
+ * used by unit tests to emulate a DMA engine as close as possible.  Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+			_IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping.  Can be
+ * used by unit tests to test heap map_kernel functions.  Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+			_IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
new file mode 100644
index 0000000..9b5d486
--- /dev/null
+++ b/drivers/staging/android/uapi/sw_sync.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+	__u32	value;
+	char	name[32];
+	__s32	fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC	'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\
+		struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
new file mode 100644
index 0000000..57fdaad
--- /dev/null
+++ b/drivers/staging/android/uapi/sync.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2:	file descriptor of second fence
+ * @name:	name of new fence
+ * @fence:	returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+	__s32	fd2; /* fd of second fence */
+	char	name[32]; /* name of new fence */
+	__s32	fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len:		length of sync_pt_info including any driver_data
+ * @obj_name:		name of parent sync_timeline
+ * @driver_name:	name of driver implmenting the parent
+ * @status:		status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns:	timestamp of status change in nanoseconds
+ * @driver_data:	any driver dependant data
+ */
+struct sync_pt_info {
+	__u32	len;
+	char	obj_name[32];
+	char	driver_name[32];
+	__s32	status;
+	__u64	timestamp_ns;
+
+	__u8	driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len:	ioctl caller writes the size of the buffer its passing in.
+ *		ioctl returns length of sync_fence_data reutnred to userspace
+ *		including pt_info.
+ * @name:	name of fence
+ * @status:	status of fence. 1: signaled 0:active <0:error
+ * @pt_info:	a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+	__u32	len;
+	char	name[32];
+	__s32	status;
+
+	__u8	pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC		'>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds.  Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT		_IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data.  Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE		_IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len.  On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO	_IOWR(SYNC_IOC_MAGIC, 2,\
+	struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/staging/edison-bcm43340/COPYING b/drivers/staging/edison-bcm43340/COPYING
new file mode 100644
index 0000000..d728556
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/COPYING
@@ -0,0 +1,17 @@
+      Unless you and Broadcom execute a separate written software license
+ agreement governing use of this software, this software is licensed to you
+ under the terms of the GNU General Public License version 2 (the "GPL"),
+ available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ following added to such license:
+ 
+      As a special exception, the copyright holders of this software give you
+ permission to link this software with independent modules, and to copy and
+ distribute the resulting executable under terms of your choice, provided that
+ you also meet, for each linked independent module, the terms and conditions of
+ the license of that module.  An independent module is a module which is not
+ derived from this software.  The special exception does not apply to any
+ modifications of the software.
+ 
+      Notwithstanding the above, under no circumstances may you combine this
+ software in any way with any other Broadcom software provided under a license
+ other than the GPL, without Broadcom's express prior written consent.
\ No newline at end of file
diff --git a/drivers/staging/edison-bcm43340/Kconfig b/drivers/staging/edison-bcm43340/Kconfig
new file mode 100644
index 0000000..d0e2245
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/Kconfig
@@ -0,0 +1,125 @@
+config BCMDHD
+	tristate "Broadcom BCM4334, 4335, 43241 wireless cards support"
+	depends on MMC
+	---help---
+	  This module adds support for wireless adapters based on
+	  Broadcom BCM4334, 4335, 43241 chipset.
+
+	  This driver uses the kernel's wireless extensions subsystem.
+
+	  If you choose to build a module, it will be called bcmdhd. Say M if
+	  unsure.
+
+choice
+	prompt "---Enable chip specific options"
+	depends on BCMDHD
+	---help---
+		Enable specific chip.
+
+config BCMDHD_GENERIC_CHIP_SUPPORT
+	bool "No chip specific options"
+	depends on BCMDHD
+	---help---
+		Disable any chip specific options.
+
+config BCM4330
+        bool "Broadcom BCM4330 support only"
+        depends on BCMDHD
+        ---help---
+                Enable any BCM4330 specific options.
+
+config BCM4334
+	bool "Broadcom BCM4334 support only"
+	depends on BCMDHD
+	---help---
+		Enable any BCM4334 specific options.
+
+config BCM4334X
+        bool "Broadcom BCM4334x support only"
+        depends on BCMDHD
+        ---help---
+                Enable any BCM4334x specific options.
+
+config BCM43241
+	bool "Broadcom BCM43241 support only"
+	depends on BCMDHD
+	---help---
+		Enable any BCM43241 specific options.
+
+config BCM4335
+	bool "Broadcom BCM4335 support only"
+	depends on BCMDHD
+	---help---
+		Enable any BCM4335 specific options.
+
+config BCM4339
+        bool "Broadcom BCM4339 support only"
+        depends on BCMDHD
+        ---help---
+                Enable any BCM4339 specific options.
+
+config BCM4354
+        bool "BCM4354 support"
+        depends on BCMDHD
+	---help--- 
+                Enable any BCM4354 specific options.
+endchoice
+
+choice
+	prompt "---Enable Chip Interface"
+        depends on BCMDHD
+        ---help---
+                Enable chip Internaface.
+
+config BCMDHD_SDIO
+        bool "SDIO bus interface support"
+        depends on BCMDHD && MMC
+
+config BCMDHD_PCIE
+        bool "PCIe bus interface support"
+        depends on BCMDHD && PCI
+
+endchoice
+
+config INTEL_PLATFORM
+	bool "Enable Intel specific flags"
+	depends on BCMDHD
+	default y
+	---help---
+	        Enable Intel specific flags
+
+config BCMDHD_FW_PATH
+	depends on BCMDHD
+	string "Firmware path"
+	default "/system/etc/firmware/fw_bcmdhd.bin"
+	---help---
+	  Path to the firmware file.
+
+config BCMDHD_NVRAM_PATH
+	depends on BCMDHD
+	string "NVRAM path"
+	default "/system/etc/wifi/bcmdhd.cal"
+	---help---
+	  Path to the calibration file.
+
+config BCMDHD_WEXT
+	bool "Enable WEXT support"
+	depends on BCMDHD && CFG80211 = n
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	help
+	  Enables WEXT support
+
+config DHD_USE_STATIC_BUF
+	bool "Enable memory preallocation"
+	depends on BCMDHD
+	default n
+	---help---
+	  Use memory preallocated in platform
+
+config DHD_USE_SCHED_SCAN
+	bool "Use CFG80211 sched scan"
+	depends on BCMDHD && CFG80211
+	default n
+	---help---
+	  Use CFG80211 sched scan
diff --git a/drivers/staging/edison-bcm43340/Makefile b/drivers/staging/edison-bcm43340/Makefile
new file mode 100644
index 0000000..43cc578
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/Makefile
@@ -0,0 +1,341 @@
+# Added here as we want we keep CONFIG_xxx external to i386_edison_defconfig
+#CONFIG_BCMDHD=m
+#CONFIG_BCM4334x=y	<-- this is WRONG way to declare a kernel config and causes
+#				issues by using lowercase "x" in its name. The Kconfig 
+#				uses CONFIG_BCM4334X with uppercase "X"; We need to change
+#				this Makefile from what Yocto has to use the proper CONFIG_BCM4334X
+#				throughout to match the Kconfig/defconfig.
+#CONFIG_BCMDHD_SDIO=y
+#CONFIG_INTEL_PLATFORM=y
+#CONFIG_DHD_USE_SCHED_SCAN=y
+
+#DHDCFLAGS += -DCONFIG_BCMDHD=$(CONFIG_BCMDHD)
+#DHDCFLAGS += -DCONFIG_BCM4334x=$(CONFIG_BCM4334x)
+#DHDCFLAGS += -DCONFIG_BCMDHD_SDIO=$(CONFIG_BCMDHD_SDIO)
+#DHDCFLAGS += -DCONFIG_INTEL_PLATFORM=$(CONFIG_INTEL_PLATFORM)
+#DHDCFLAGS += -DCONFIG_DHD_USE_SCHED_SCAN=$(CONFIG_DHD_USE_SCHED_SCAN)
+
+#KDIR=$(KERNEL_SRC)
+#CURDIR =$(shell pwd)
+
+# bcmdhd for INTEL 4334, 4335, 43241
+ifneq ($(CONFIG_BCM4334),)
+BCM43XX_DRIVER=bcm4334
+DHDCFLAGS += -DUSE_4334
+endif
+
+ifneq ($(CONFIG_BCM4335),)
+BCM43XX_DRIVER=bcm4335
+DHDCFLAGS += -DUSE_4335
+endif
+
+ifneq ($(CONFIG_BCM43241),)
+BCM43XX_DRIVER=bcm43241
+DHDCFLAGS += -DUSE_4324
+endif
+
+ifneq ($(CONFIG_BCM4334X),)
+BCM43XX_DRIVER=bcm4334x
+DHDCFLAGS += -DUSE_4334X
+endif
+
+ifneq ($(CONFIG_BCM4354),)
+BCM43XX_DRIVER=bcm4354
+DHDCFLAGS += -DUSE_4354
+endif
+
+ifneq ($(CONFIG_BCM43430),)
+BCM43XX_DRIVER=bcm43430
+DHDCFLAGS += -DUSE_43430
+endif
+
+
+DHDCFLAGS += -Wall -Wstrict-prototypes -Dlinux -DLINUX -DBCMDRIVER            \
+	-DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE            \
+	-DDHDTHREAD -DSHOW_EVENTS -DBCMDBG 	                            \
+	-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT                                \
+	-DKEEP_ALIVE -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT             \
+	-DEMBEDDED_PLATFORM -DPNO_SUPPORT                                     \
+	-DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT                           \
+	-DCUSTOMER_HW2 -DENABLE_INSMOD_NO_FW_LOAD -DLINUX_FW_REQUEST_API	\
+	-I$(src) -I$(src)/include -I$(src)/common/include
+
+#################
+# Common feature
+#################
+DHDCFLAGS += -DWL_CFG80211
+# Print out kernel panic point of file and line info when assertion happened
+DHDCFLAGS += -DBCMASSERT_LOG
+
+# keepalive
+DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=10000
+
+DHDCFLAGS += -DVSDB
+
+# For p2p connection issue
+DHDCFLAGS += -DWL_SCB_TIMEOUT=10
+DHDCFLAGS += -DWLP2P
+
+# TDLS enable
+DHDCFLAGS += -DWLTDLS -DWLTDLS_AUTO_ENABLE
+# For TDLS tear down inactive time 40 sec
+DHDCFLAGS += -DCUSTOM_TDLS_IDLE_MODE_SETTING=40000
+# for TDLS RSSI HIGH for establishing TDLS link
+DHDCFLAGS += -DCUSTOM_TDLS_RSSI_THRESHOLD_HIGH=-60
+# for TDLS RSSI HIGH for tearing down TDLS link
+DHDCFLAGS += -DCUSTOM_TDLS_RSSI_THRESHOLD_LOW=-70
+
+# Roaming
+DHDCFLAGS += -DROAM_ENABLE -DROAM_CHANNEL_CACHE -DROAM_API
+DHDCFLAGS += -DENABLE_FW_ROAM_SUSPEND
+# Roaming trigger
+DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-75
+DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=10
+# Set PM 2 always regardless suspend/resume
+DHDCFLAGS += -DSUPPORT_PM2_ONLY
+
+# For special PNO Event keep wake lock for 10sec
+DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=10
+DHDCFLAGS += -DMIRACAST_AMPDU_SIZE=8
+
+# Early suspend
+DHDCFLAGS += -DDHD_USE_EARLYSUSPEND
+
+# For Scan result patch
+DHDCFLAGS += -DESCAN_RESULT_PATCH
+
+# For Static Buffer
+ifeq ($(CONFIG_BROADCOM_WIFI_RESERVED_MEM),y)
+  DHDCFLAGS += -DCONFIG_DHD_USE_STATIC_BUF
+  DHDCFLAGS += -DENHANCED_STATIC_BUF
+  DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT
+endif
+
+ifneq ($(CONFIG_DHD_USE_SCHED_SCAN),)
+DHDCFLAGS += -DWL_SCHED_SCAN
+endif
+
+# Prevent rx thread monopolize
+#DHDCFLAGS += -DWAIT_DEQUEUE
+
+# Config PM Control
+DHDCFLAGS += -DCONFIG_CONTROL_PM
+
+# idle count
+DHDCFLAGS += -DDHD_USE_IDLECOUNT
+
+# SKB TAILPAD to avoid out of boundary memory access
+DHDCFLAGS += -DDHDENABLE_TAILPAD
+
+# Wi-Fi Direct
+DHDCFLAGS += -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+DHDCFLAGS += -DWL_CFG80211_STA_EVENT
+DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
+DHDCFLAGS += -DWL_ENABLE_P2P_IF
+
+##########################
+# driver type
+# m: module type driver
+# y: built-in type driver
+##########################
+DRIVER_TYPE ?= $(CONFIG_BCMDHD)
+
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+  DHDCFLAGS += -DBDC -DOOB_INTR_ONLY -DDHD_BCMEVENTS -DMMC_SDIO_ABORT
+  DHDCFLAGS += -DBCMSDIO -DBCMLXSDMMC -DUSE_SDIOFIFO_IOVAR
+  DHDCFLAGS += -DPROP_TXSTATUS -DPROP_TXSTATUS_VSDB
+endif
+
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+  DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1
+endif
+
+ifeq (${TARGET_BUILD_VARIANT},$(filter ${TARGET_BUILD_VARIANT}, eng userdebug))
+DHDCFLAGS += -DDHD_DEBUG
+endif
+ifeq (${TARGET_BUILD_VARIANT},$(filter ${TARGET_BUILD_VARIANT}, eng))
+DHDCFLAGS += -DSDTEST
+endif
+
+################################
+# INTEL platform specific features
+#################################
+CONFIG_INTEL_PLATFORM=y
+ifneq ($(CONFIG_INTEL_PLATFORM),)
+$(info bcmdhd: Compiling for Intel platform $(TARGET_PRODUCT) $(obj))
+ifneq (,$(filter saltbay% mofd%,$(TARGET_PRODUCT)))
+  DHDCFLAGS += -DHW_OOB
+endif
+ifneq (,$(filter mofd_v0%,$(TARGET_PRODUCT)))
+  DHDCFLAGS += -DLOW_POWER_NVRAM
+else ifneq (,$(filter mofd_v1%,$(TARGET_PRODUCT)))
+  DHDCFLAGS += -DLOW_POWER_NVRAM_V1
+endif
+  DHDCFLAGS += -DSUPPORT_AUTO_CHANNEL
+  DHDCFLAGS += -DOEM_ANDROID
+  DHDCFLAGS += -DCUSTOM_LISTEN_INTERVAL=3
+  DHDCFLAGS += -DBOARD_INTEL
+  DHDCFLAGS += -DUSE_KTHREAD_API
+  DHDCFLAGS += -DCUSTOM_RXCHAIN=1
+  DHDCFLAGS += -DDHD_USE_EARLYSUSPEND
+  DHDCFLAGS += -DCUSTOM_DPC_CPUCORE=0
+endif
+
+#########################
+# Chip dependent feature
+#########################
+ifneq ($(CONFIG_BCM4354),)
+$(info bcmdhd: Compiling for 4354 chip)
+  DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8
+  DHDCFLAGS += -DUSE_DYNAMIC_F2_BLKSIZE -DDYNAMIC_F2_BLKSIZE_FOR_NONLEGACY=128
+  DHDCFLAGS += -DBCMSDIOH_TXGLOM -DCUSTOM_TXGLOM=1 -DBCMSDIOH_TXGLOM_HIGHSPEED
+  DHDCFLAGS += -DDHDTCPACK_SUPPRESS
+  DHDCFLAGS += -DUSE_WL_TXBF
+  DHDCFLAGS += -DUSE_SDIOFIFO_IOVAR
+  DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=40
+  DHDCFLAGS += -DRXFRAME_THREAD
+  DHDCFLAGS += -DREPEAT_READFRAME
+  DHDCFLAGS += -DUSE_WL_FRAMEBURST
+  DHDCFLAGS += -DCUSTOM_MAX_TXGLOM_SIZE=32
+  DHDCFLAGS += -DCUSTOM_DEF_TXGLOM_SIZE=14
+  DHDCFLAGS += -DUSE_DYNAMIC_MAXPKT_RXGLOM
+  DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=512
+  DHDCFLAGS += -DMAX_HDR_READ=128
+  DHDCFLAGS += -DDHD_FIRSTREAD=128
+  DHDCFLAGS += -DCUSTOM_AMPDU_MPDU=16
+  # New Features
+  DHDCFLAGS += -DWL11U -DMFP
+  DHDCFLAGS += -DDHD_ENABLE_LPC
+  DHDCFLAGS += -DCUSTOM_PSPRETEND_THR=30
+endif
+
+ifneq ($(CONFIG_BCM4339),)
+$(info bcmdhd: Compiling for 4339 chip)
+   DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8
+  DHDCFLAGS += -DUSE_DYNAMIC_F2_BLKSIZE -DDYNAMIC_F2_BLKSIZE_FOR_NONLEGACY=128
+  DHDCFLAGS += -DBCMSDIOH_TXGLOM -DCUSTOM_TXGLOM=1 -DBCMSDIOH_TXGLOM_HIGHSPEED
+  DHDCFLAGS += -DDHDTCPACK_SUPPRESS
+  DHDCFLAGS += -DUSE_WL_TXBF
+  DHDCFLAGS += -DUSE_SDIOFIFO_IOVAR
+  DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=40
+  DHDCFLAGS += -DRXFRAME_THREAD
+  DHDCFLAGS += -DREPEAT_READFRAME
+  DHDCFLAGS += -DUSE_WL_FRAMEBURST
+  DHDCFLAGS += -DCUSTOM_MAX_TXGLOM_SIZE=32
+  DHDCFLAGS += -DCUSTOM_DEF_TXGLOM_SIZE=14
+  DHDCFLAGS += -DUSE_DYNAMIC_MAXPKT_RXGLOM
+  DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=512
+  # New Features
+  DHDCFLAGS += -DWL11U
+  DHDCFLAGS += -DDHD_ENABLE_LPC
+  DHDCFLAGS += -DCUSTOM_PSPRETEND_THR=30
+endif
+
+ifneq ($(CONFIG_BCM4335),)
+$(info bcmdhd: Compiling for 4335 chip)
+  DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8
+  DHDCFLAGS += -DUSE_DYNAMIC_F2_BLKSIZE -DDYNAMIC_F2_BLKSIZE_FOR_NONLEGACY=128
+  DHDCFLAGS += -DBCMSDIOH_TXGLOM -DCUSTOM_TXGLOM=1 -DBCMSDIOH_TXGLOM_HIGHSPEED
+  DHDCFLAGS += -DDHDTCPACK_SUPPRESS
+  DHDCFLAGS += -DUSE_WL_TXBF
+  DHDCFLAGS += -DUSE_SDIOFIFO_IOVAR
+  DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=40
+  DHDCFLAGS += -DRXFRAME_THREAD
+  DHDCFLAGS += -DREPEAT_READFRAME
+  DHDCFLAGS += -DUSE_WL_FRAMEBURST
+  DHDCFLAGS += -DCUSTOM_MAX_TXGLOM_SIZE=32
+  DHDCFLAGS += -DCUSTOM_DEF_TXGLOM_SIZE=14
+  DHDCFLAGS += -DUSE_DYNAMIC_MAXPKT_RXGLOM
+  DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=512
+  # New Features
+  DHDCFLAGS += -DWL11U
+  DHDCFLAGS += -DDHD_ENABLE_LPC
+  DHDCFLAGS += -DCUSTOM_PSPRETEND_THR=30
+# For BT LOCK
+ifeq ($(CONFIG_BCM4335BT),y)
+  DHDCFLAGS += -DENABLE_4335BT_WAR
+endif
+endif
+
+ifneq ($(CONFIG_BCM4334),)
+$(info bcmdhd: Compiling for 4334 chip)
+  DHDCFLAGS += -DCUSTOM_GLOM_SETTING=5
+  DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=128
+endif
+
+ifneq ($(CONFIG_BCM4334X),)
+$(info bcmdhd: Compiling for 4334x chip)
+  DHDCFLAGS += -DCUSTOM_GLOM_SETTING=5
+  DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=128
+  DHDCFLAGS += -DPKTPRIO_OVERRIDE
+endif
+
+ifneq ($(CONFIG_BCM43241),)
+$(info bcmdhd: Compiling for 43241 chip)
+  DHDCFLAGS += -DCUSTOM_GLOM_SETTING=5
+  DHDCFLAGS += -DUSE_SDIOFIFO_IOVAR
+  DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=128
+endif
+
+ifneq ($(CONFIG_BCM43430),)
+$(info bcmdhd: Compiling for 43430 chip)
+  DHDCFLAGS += -DCUSTOM_GLOM_SETTING=5
+  DHDCFLAGS += -DUSE_SDIOFIFO_IOVAR
+  DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=128
+endif
+
+ifneq ($(CONFIG_BCM4330),)
+$(info bcmdhd: Compiling for 4330 chip)
+  DHDCFLAGS += -DCUSTOM_GLOM_SETTING=0
+
+  # Remove common feature for BCM4330
+  DHDCFLAGS :=$(filter-out -DSUPPORT_AMPDU_MPDU_CMD,$(DHDCFLAGS))
+  DHDCFLAGS :=$(filter-out -DVSDB,$(DHDCFLAGS))
+  DHDCFLAGS :=$(filter-out -DPROP_TXSTATUS,$(DHDCFLAGS))
+  DHDCFLAGS :=$(filter-out -DDHD_USE_IDLECOUNT,$(DHDCFLAGS))
+endif
+
+
+EXTRA_CFLAGS = $(DHDCFLAGS)
+ifeq ($(DRIVER_TYPE),m)
+EXTRA_LDFLAGS += --strip-debug
+endif
+
+EXTRA_CFLAGS += $(DHDCFLAGS) -DDHD_DEBUG
+EXTRA_CFLAGS += -DSRCBASE=\"$(src)\"
+EXTRA_CFLAGS += -I$(src)/include/ -I$(src)/
+KBUILD_CFLAGS += -I$(LINUXDIR)/include -I$(shell pwd)
+
+DHDOFILES := dhd_pno.o dhd_common.o dhd_ip.o dhd_custom_gpio.o \
+	dhd_linux.o dhd_linux_sched.o dhd_cfg80211.o dhd_linux_wq.o aiutils.o bcmevent.o \
+	bcmutils.o bcmwifi_channels.o hndpmu.o linux_osl.o sbutils.o siutils.o \
+	wl_android.o wl_cfg80211.o wl_cfgp2p.o wl_cfg_btcoex.o wldev_common.o wl_linux_mon.o  \
+	dhd_linux_platdev.o dhd_pno.o dhd_linux_wq.o wl_cfg_btcoex.o
+
+ifneq ($(CONFIG_BCMDHD_SDIO),)
+  DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o
+  DHDOFILES += dhd_cdc.o dhd_wlfc.o dhd_sdio.o
+endif
+
+ifneq ($(CONFIG_BCMDHD_PCIE),)
+  DHDOFILES += dhd_pcie.o dhd_pcie_linux.o dhd_msgbuf.o circularbuf.o
+endif
+
+$(BCM43XX_DRIVER)-objs := $(DHDOFILES)
+obj-$(DRIVER_TYPE)   += $(BCM43XX_DRIVER).o
+
+all:
+	@echo "$(MAKE) --no-print-directory -C $(KDIR) SUBDIRS=$(CURDIR) modules"
+	@$(MAKE) --no-print-directory -C $(KDIR) SUBDIRS=$(CURDIR) modules
+	#@echo "$(MAKE) --no-print-directory -C $(KDIR) SUBDIRS=$(CURDIR) M=$(CURDIR) modules"
+	#@$(MAKE) --no-print-directory -C $(KDIR) SUBDIRS=$(CURDIR) M=$(CURDIR) modules
+
+clean:
+	rm -rf *.o *.ko *.mod.c *~ .*.cmd *.o.cmd .*.o.cmd \
+	Module.symvers modules.order .tmp_versions modules.builtin
+
+modules_install:
+	@$(MAKE) --no-print-directory -C $(KDIR) \
+		SUBDIRS=$(CURDIR) modules_install
+		#SUBDIRS=$(CURDIR) M=$(CURDIR) modules_install
+
diff --git a/drivers/staging/edison-bcm43340/aiutils.c b/drivers/staging/edison-bcm43340/aiutils.c
new file mode 100644
index 0000000..6292396
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/aiutils.c
@@ -0,0 +1,1004 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aiutils.c 432226 2013-10-26 04:34:36Z $
+ */
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+
+#include "siutils_priv.h"
+
+#define BCM47162_DMP() (0)
+#define BCM5357_DMP() (0)
+#define BCM4707_DMP() (0)
+#define remap_coreid(sih, coreid)	(coreid)
+#define remap_corerev(sih, corerev)	(corerev)
+
+/* EROM parsing */
+
+static uint32
+get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
+{
+	uint32 ent;
+	uint inv = 0, nom = 0;
+
+	while (TRUE) {
+		ent = R_REG(si_osh(sih), *eromptr);
+		(*eromptr)++;
+
+		if (mask == 0)
+			break;
+
+		if ((ent & ER_VALID) == 0) {
+			inv++;
+			continue;
+		}
+
+		if (ent == (ER_END | ER_VALID))
+			break;
+
+		if ((ent & mask) == match)
+			break;
+
+		nom++;
+	}
+
+	SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
+	if (inv + nom) {
+		SI_VMSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
+	}
+	return ent;
+}
+
+static uint32
+get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
+        uint32 *sizel, uint32 *sizeh)
+{
+	uint32 asd, sz, szd;
+
+	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+	if (((asd & ER_TAG1) != ER_ADD) ||
+	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+	    ((asd & AD_ST_MASK) != st)) {
+		/* This is not what we want, "push" it back */
+		(*eromptr)--;
+		return 0;
+	}
+	*addrl = asd & AD_ADDR_MASK;
+	if (asd & AD_AG32)
+		*addrh = get_erom_ent(sih, eromptr, 0, 0);
+	else
+		*addrh = 0;
+	*sizeh = 0;
+	sz = asd & AD_SZ_MASK;
+	if (sz == AD_SZ_SZD) {
+		szd = get_erom_ent(sih, eromptr, 0, 0);
+		*sizel = szd & SD_SZ_MASK;
+		if (szd & SD_SG32)
+			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
+	} else
+		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+	SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+	        sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+	return asd;
+}
+
+static void
+ai_hwfixup(si_info_t *sii)
+{
+}
+
+
+/* parse the enumeration rom to identify all cores */
+void
+ai_scan(si_t *sih, void *regs, uint devid)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc = (chipcregs_t *)regs;
+	uint32 erombase, *eromptr, *eromlim;
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+		break;
+
+	case PCI_BUS:
+		/* Set wrappers address */
+		sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
+
+		/* Now point the window at the erom */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+		eromptr = regs;
+		break;
+
+	case SPI_BUS:
+	case SDIO_BUS:
+		eromptr = (uint32 *)(uintptr)erombase;
+		break;
+
+	case PCMCIA_BUS:
+	default:
+		SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
+		ASSERT(0);
+		return;
+	}
+	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+	SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
+	         regs, erombase, eromptr, eromlim));
+	while (eromptr < eromlim) {
+		uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+		uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+		uint i, j, idx;
+		bool br;
+
+		br = FALSE;
+
+		/* Grok a component */
+		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+		if (cia == (ER_END | ER_VALID)) {
+			SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
+			ai_hwfixup(sii);
+			return;
+		}
+
+		cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+		if ((cib & ER_TAG) != ER_CI) {
+			SI_ERROR(("CIA not followed by CIB\n"));
+			goto error;
+		}
+
+		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+#ifdef BCMDBG_SI
+		SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
+		         "nsw = %d, nmp = %d & nsp = %d\n",
+		         mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp));
+#else
+		BCM_REFERENCE(crev);
+#endif
+
+		if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+			continue;
+		if ((nmw + nsw == 0)) {
+			/* A component which is not a core */
+			if (cid == OOB_ROUTER_CORE_ID) {
+				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+					&addrl, &addrh, &sizel, &sizeh);
+				if (asd != 0) {
+					sii->oob_router = addrl;
+				}
+			}
+			if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID)
+				continue;
+		}
+
+		idx = sii->numcores;
+
+		cores_info->cia[idx] = cia;
+		cores_info->cib[idx] = cib;
+		cores_info->coreid[idx] = remap_coreid(sih, cid);
+
+		for (i = 0; i < nmp; i++) {
+			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+			if ((mpd & ER_TAG) != ER_MP) {
+				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+				goto error;
+			}
+			SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
+			         (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+			         (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+		}
+
+		/* First Slave Address Descriptor should be port 0:
+		 * the main register space for the core
+		 */
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+		if (asd == 0) {
+			do {
+			/* Try again to see if it is a bridge */
+			asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd != 0)
+				br = TRUE;
+			else {
+					if (br == TRUE) {
+						break;
+					}
+					else if ((addrh != 0) || (sizeh != 0) ||
+						(sizel != SI_CORE_SIZE)) {
+						SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
+							"0x%x\n", addrh, sizeh, sizel));
+						SI_ERROR(("First Slave ASD for"
+							"core 0x%04x malformed "
+							"(0x%08x)\n", cid, asd));
+						goto error;
+					}
+				}
+			} while (1);
+		}
+		cores_info->coresba[idx] = addrl;
+		cores_info->coresba_size[idx] = sizel;
+		/* Get any more ASDs in port 0 */
+		j = 1;
+		do {
+			asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+				cores_info->coresba2[idx] = addrl;
+				cores_info->coresba2_size[idx] = sizel;
+			}
+			j++;
+		} while (asd != 0);
+
+		/* Go through the ASDs for other slave ports */
+		for (i = 1; i < nsp; i++) {
+			j = 0;
+			do {
+				asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+				              &sizel, &sizeh);
+
+				if (asd == 0)
+					break;
+				j++;
+			} while (1);
+			if (j == 0) {
+				SI_ERROR((" SP %d has no address descriptors\n", i));
+				goto error;
+			}
+		}
+
+		/* Now get master wrappers */
+		for (i = 0; i < nmw; i++) {
+			asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for MW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if (i == 0)
+				cores_info->wrapba[idx] = addrl;
+		}
+
+		/* And finally slave wrappers */
+		for (i = 0; i < nsw; i++) {
+			uint fwp = (nsp == 1) ? 0 : 1;
+			asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
+			              &sizel, &sizeh);
+			if (asd == 0) {
+				SI_ERROR(("Missing descriptor for SW %d\n", i));
+				goto error;
+			}
+			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+				goto error;
+			}
+			if ((nmw == 0) && (i == 0))
+				cores_info->wrapba[idx] = addrl;
+		}
+
+
+		/* Don't record bridges */
+		if (br)
+			continue;
+
+		/* Done with core */
+		sii->numcores++;
+	}
+
+	SI_ERROR(("Reached end of erom without finding END"));
+
+error:
+	sii->numcores = 0;
+	return;
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 addr, wrap;
+	void *regs;
+
+	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
+		return (NULL);
+
+	addr = cores_info->coresba[coreidx];
+	wrap = cores_info->wrapba[coreidx];
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	switch (BUSTYPE(sih->bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		sii->curmap = regs = cores_info->regs[coreidx];
+		if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
+			cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
+		}
+		sii->curwrap = cores_info->wrappers[coreidx];
+		break;
+
+	case PCI_BUS:
+		/* point bar0 window */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+		regs = sii->curmap;
+		/* point bar0 2nd 4KB window to the primary wrapper */
+		if (PCIE_GEN2(sii))
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
+		else
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+		break;
+
+	case SPI_BUS:
+	case SDIO_BUS:
+		sii->curmap = regs = (void *)((uintptr)addr);
+		sii->curwrap = (void *)((uintptr)wrap);
+		break;
+
+	case PCMCIA_BUS:
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	sii->curmap = regs;
+	sii->curidx = coreidx;
+
+	return regs;
+}
+
+
+void
+ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc = NULL;
+	uint32 erombase, *eromptr, *eromlim;
+	uint i, j, cidx;
+	uint32 cia, cib, nmp, nsp;
+	uint32 asd, addrl, addrh, sizel, sizeh;
+
+	for (i = 0; i < sii->numcores; i++) {
+		if (cores_info->coreid[i] == CC_CORE_ID) {
+			cc = (chipcregs_t *)cores_info->regs[i];
+			break;
+		}
+	}
+	if (cc == NULL)
+		goto error;
+
+	erombase = R_REG(sii->osh, &cc->eromptr);
+	eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
+	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+	cidx = sii->curidx;
+	cia = cores_info->cia[cidx];
+	cib = cores_info->cib[cidx];
+
+	nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+	nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+	/* scan for cores */
+	while (eromptr < eromlim) {
+		if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
+			(get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
+			break;
+		}
+	}
+
+	/* skip master ports */
+	for (i = 0; i < nmp; i++)
+		get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+
+	/* Skip ASDs in port 0 */
+	asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
+	if (asd == 0) {
+		/* Try again to see if it is a bridge */
+		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
+		              &sizel, &sizeh);
+	}
+
+	j = 1;
+	do {
+		asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
+		              &sizel, &sizeh);
+		j++;
+	} while (asd != 0);
+
+	/* Go through the ASDs for other slave ports */
+	for (i = 1; i < nsp; i++) {
+		j = 0;
+		do {
+			asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
+				&sizel, &sizeh);
+			if (asd == 0)
+				break;
+
+			if (!asidx--) {
+				*addr = addrl;
+				*size = sizel;
+				return;
+			}
+			j++;
+		} while (1);
+
+		if (j == 0) {
+			SI_ERROR((" SP %d has no address descriptors\n", i));
+			break;
+		}
+	}
+
+error:
+	*size = 0;
+	return;
+}
+
+/* Return the number of address spaces in current core */
+int
+ai_numaddrspaces(si_t *sih)
+{
+	return 2;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+ai_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint cidx;
+
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return cores_info->coresba[cidx];
+	else if (asidx == 1)
+		return cores_info->coresba2[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+ai_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint cidx;
+
+	cidx = sii->curidx;
+
+	if (asidx == 0)
+		return cores_info->coresba_size[cidx];
+	else if (asidx == 1)
+		return cores_info->coresba2_size[cidx];
+	else {
+		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+		          __FUNCTION__, asidx));
+		return 0;
+	}
+}
+
+uint
+ai_flag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
+			__FUNCTION__));
+		return sii->curidx;
+	}
+	ai = sii->curwrap;
+
+	return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+uint
+ai_flag_alt(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+		return sii->curidx;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
+			__FUNCTION__));
+		return sii->curidx;
+	}
+	ai = sii->curwrap;
+
+	return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
+}
+
+void
+ai_setint(si_t *sih, int siflag)
+{
+}
+
+uint
+ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	uint32 *map = (uint32 *) sii->curwrap;
+
+	if (mask || val) {
+		uint32 w = R_REG(sii->osh, map+(offset/4));
+		w &= ~mask;
+		w |= val;
+		W_REG(sii->osh, map+(offset/4), w);
+	}
+
+	return (R_REG(sii->osh, map+(offset/4)));
+}
+
+uint
+ai_corevendor(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 cia;
+
+	cia = cores_info->cia[sii->curidx];
+	return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint
+ai_corerev(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 cib;
+
+
+	cib = cores_info->cib[sii->curidx];
+	return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool
+ai_iscoreup(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+
+	ai = sii->curwrap;
+
+	return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
+	        ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_REG(sii->osh, r) & ~mask) | val;
+		W_REG(sii->osh, r, w);
+	}
+
+	/* readback */
+	w = R_REG(sii->osh, r);
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			ai_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	uint32 *r = NULL;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sih->bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast)
+		return 0;
+
+	return (r);
+}
+
+void
+ai_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii = SI_INFO(sih);
+	volatile uint32 dummy;
+	uint32 status;
+	aidmp_t *ai;
+
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/* if core is already in reset, just return */
+	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+		return;
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+	/* if pending backplane ops still, try waiting longer */
+	if (status != 0) {
+		/* 300usecs was sufficient to allow backplane ops to clear for big hammer */
+		/* during driver load we may need more time */
+		SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
+		/* if still pending ops, continue on and try disable anyway */
+		/* this is in big hammer path, so don't call wl_reinit in this case... */
+	}
+
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	dummy = R_REG(sii->osh, &ai->resetctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	W_REG(sii->osh, &ai->ioctrl, bits);
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(10);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	volatile uint32 dummy;
+	uint loop_counter = 10;
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+	/* put core into reset state */
+	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+	OSL_DELAY(10);
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+
+	/* ensure there are no pending backplane operations */
+	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+	while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
+		/* ensure there are no pending backplane operations */
+		SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+		/* take core out of reset */
+		W_REG(sii->osh, &ai->resetctrl, 0);
+
+		/* ensure there are no pending backplane operations */
+		SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+	}
+
+
+	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+	dummy = R_REG(sii->osh, &ai->ioctrl);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+}
+
+void
+ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+		          __FUNCTION__));
+		return;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+		          __FUNCTION__));
+		return;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return;
+	}
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+}
+
+uint32
+ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+		W_REG(sii->osh, &ai->ioctrl, w);
+	}
+
+	return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32
+ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	aidmp_t *ai;
+	uint32 w;
+
+	if (BCM47162_DMP()) {
+		SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM5357_DMP()) {
+		SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
+		          __FUNCTION__));
+		return 0;
+	}
+	if (BCM4707_DMP()) {
+		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
+			__FUNCTION__));
+		return 0;
+	}
+
+	ASSERT(GOODREGS(sii->curwrap));
+	ai = sii->curwrap;
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	if (mask || val) {
+		w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+		W_REG(sii->osh, &ai->iostatus, w);
+	}
+
+	return R_REG(sii->osh, &ai->iostatus);
+}
diff --git a/drivers/staging/edison-bcm43340/bcmevent.c b/drivers/staging/edison-bcm43340/bcmevent.c
new file mode 100644
index 0000000..e5b50f9
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/bcmevent.c
@@ -0,0 +1,154 @@
+/*
+ * bcmevent read-only data shared by kernel or app layers
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmevent.c 440870 2013-12-04 05:23:45Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <proto/ethernet.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmevent.h>
+
+/* Use the actual name for event tracing */
+#define BCMEVENT_NAME(_event) {(_event), #_event}
+
+const bcmevent_name_t bcmevent_names[] = {
+	BCMEVENT_NAME(WLC_E_SET_SSID),
+	BCMEVENT_NAME(WLC_E_JOIN),
+	BCMEVENT_NAME(WLC_E_START),
+	BCMEVENT_NAME(WLC_E_AUTH),
+	BCMEVENT_NAME(WLC_E_AUTH_IND),
+	BCMEVENT_NAME(WLC_E_DEAUTH),
+	BCMEVENT_NAME(WLC_E_DEAUTH_IND),
+	BCMEVENT_NAME(WLC_E_ASSOC),
+	BCMEVENT_NAME(WLC_E_ASSOC_IND),
+	BCMEVENT_NAME(WLC_E_REASSOC),
+	BCMEVENT_NAME(WLC_E_REASSOC_IND),
+	BCMEVENT_NAME(WLC_E_DISASSOC),
+	BCMEVENT_NAME(WLC_E_DISASSOC_IND),
+	BCMEVENT_NAME(WLC_E_QUIET_START),
+	BCMEVENT_NAME(WLC_E_QUIET_END),
+	BCMEVENT_NAME(WLC_E_BEACON_RX),
+	BCMEVENT_NAME(WLC_E_LINK),
+	BCMEVENT_NAME(WLC_E_MIC_ERROR),
+	BCMEVENT_NAME(WLC_E_NDIS_LINK),
+	BCMEVENT_NAME(WLC_E_ROAM),
+	BCMEVENT_NAME(WLC_E_TXFAIL),
+	BCMEVENT_NAME(WLC_E_PMKID_CACHE),
+	BCMEVENT_NAME(WLC_E_RETROGRADE_TSF),
+	BCMEVENT_NAME(WLC_E_PRUNE),
+	BCMEVENT_NAME(WLC_E_AUTOAUTH),
+	BCMEVENT_NAME(WLC_E_EAPOL_MSG),
+	BCMEVENT_NAME(WLC_E_SCAN_COMPLETE),
+	BCMEVENT_NAME(WLC_E_ADDTS_IND),
+	BCMEVENT_NAME(WLC_E_DELTS_IND),
+	BCMEVENT_NAME(WLC_E_BCNSENT_IND),
+	BCMEVENT_NAME(WLC_E_BCNRX_MSG),
+	BCMEVENT_NAME(WLC_E_BCNLOST_MSG),
+	BCMEVENT_NAME(WLC_E_ROAM_PREP),
+	BCMEVENT_NAME(WLC_E_PFN_NET_FOUND),
+	BCMEVENT_NAME(WLC_E_PFN_NET_LOST),
+#if defined(IBSS_PEER_DISCOVERY_EVENT)
+	BCMEVENT_NAME(WLC_E_IBSS_ASSOC),
+#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
+	BCMEVENT_NAME(WLC_E_RADIO),
+	BCMEVENT_NAME(WLC_E_PSM_WATCHDOG),
+	BCMEVENT_NAME(WLC_E_PROBREQ_MSG),
+	BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND),
+	BCMEVENT_NAME(WLC_E_PSK_SUP),
+	BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED),
+	BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME),
+	BCMEVENT_NAME(WLC_E_ICV_ERROR),
+	BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR),
+	BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR),
+	BCMEVENT_NAME(WLC_E_TRACE),
+	BCMEVENT_NAME(WLC_E_IF),
+#ifdef WLP2P
+	BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE),
+#endif
+	BCMEVENT_NAME(WLC_E_RSSI),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE),
+	BCMEVENT_NAME(WLC_E_EXTLOG_MSG),
+#ifdef WIFI_ACT_FRAME
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
+#endif
+#ifdef BCMWAPI_WAI
+	BCMEVENT_NAME(WLC_E_WAI_STA_EVENT),
+	BCMEVENT_NAME(WLC_E_WAI_MSG),
+#endif /* BCMWAPI_WAI */
+	BCMEVENT_NAME(WLC_E_ESCAN_RESULT),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE),
+#ifdef WLP2P
+	BCMEVENT_NAME(WLC_E_PROBRESP_MSG),
+	BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG),
+#endif
+#ifdef PROP_TXSTATUS
+	BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP),
+#endif
+	BCMEVENT_NAME(WLC_E_WAKE_EVENT),
+	BCMEVENT_NAME(WLC_E_DCS_REQUEST),
+	BCMEVENT_NAME(WLC_E_RM_COMPLETE),
+#ifdef WLMEDIA_HTSF
+	BCMEVENT_NAME(WLC_E_HTSFSYNC),
+#endif
+	BCMEVENT_NAME(WLC_E_OVERLAY_REQ),
+	BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND),
+	BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE),
+	BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
+#ifdef SOFTAP
+	BCMEVENT_NAME(WLC_E_GTK_PLUMBED),
+#endif
+	BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE),
+	BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE),
+	BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX_NDIS),
+	BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX),
+#ifdef WLTDLS
+	BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT),
+#endif /* WLTDLS */
+	BCMEVENT_NAME(WLC_E_NATIVE),
+#ifdef WLPKTDLYSTAT
+	BCMEVENT_NAME(WLC_E_PKTDELAY_IND),
+#endif /* WLPKTDLYSTAT */
+	BCMEVENT_NAME(WLC_E_SERVICE_FOUND),
+	BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX),
+	BCMEVENT_NAME(WLC_E_GAS_COMPLETE),
+	BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE),
+	BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE),
+#ifdef WLWNM
+	BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
+#endif /* WLWNM */
+#if defined(WL_PROXDETECT)
+	BCMEVENT_NAME(WLC_E_PROXD),
+#endif
+	BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
+	BCMEVENT_NAME(WLC_E_BSSID),
+#ifdef PROP_TXSTATUS
+	BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT),
+#endif
+	BCMEVENT_NAME(WLC_E_TXFAIL_THRESH),
+};
+
+const int bcmevent_names_size = ARRAYSIZE(bcmevent_names);
diff --git a/drivers/staging/edison-bcm43340/bcmsdh.c b/drivers/staging/edison-bcm43340/bcmsdh.c
new file mode 100644
index 0000000..09a8a3e
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/bcmsdh.c
@@ -0,0 +1,734 @@
+/*
+ *  BCMSDH interface glue
+ *  implement bcmsdh API for SDIOH driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.c 455573 2014-02-14 17:49:31Z $
+ */
+
+/**
+ * @file bcmsdh.c
+ */
+
+/* ****************** BCMSDH Interface Functions *************************** */
+
+#include <typedefs.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <osl.h>
+
+#include <bcmsdh.h>	/* BRCM API for SDIO clients (such as wl, dhd) */
+#include <bcmsdbus.h>	/* common SDIO/controller interface */
+#include <sbsdio.h>	/* SDIO device core hardware definitions. */
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+
+const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
+
+/* local copy of bcm sd handler */
+bcmsdh_info_t * l_bcmsdh = NULL;
+
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern int
+sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+
+void
+bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
+{
+	sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
+}
+#endif
+
+/* Attach BCMSDH layer to SDIO Host Controller Driver
+ *
+ * @param osh OSL Handle.
+ * @param cfghdl Configuration Handle.
+ * @param regsva Virtual address of controller registers.
+ * @param irq Interrupt number of SDIO controller.
+ *
+ * @return bcmsdh_info_t Handle to BCMSDH context.
+ */
+bcmsdh_info_t *
+bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva)
+{
+	bcmsdh_info_t *bcmsdh;
+
+	if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) {
+		BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
+	bcmsdh->sdioh = sdioh;
+	bcmsdh->osh = osh;
+	bcmsdh->init_success = TRUE;
+	*regsva = SI_ENUM_BASE;
+
+	/* Report the BAR, to fix if needed */
+	bcmsdh->sbwad = SI_ENUM_BASE;
+
+	/* save the handler locally */
+	l_bcmsdh = bcmsdh;
+
+	return bcmsdh;
+}
+
+int
+bcmsdh_detach(osl_t *osh, void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bcmsdh != NULL) {
+		MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
+	}
+
+	l_bcmsdh = NULL;
+
+	return 0;
+}
+
+int
+bcmsdh_iovar_op(void *sdh, const char *name,
+                void *params, int plen, void *arg, int len, bool set)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
+}
+
+bool
+bcmsdh_intr_query(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	bool on;
+
+	ASSERT(bcmsdh);
+	status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
+	if (SDIOH_API_SUCCESS(status))
+		return FALSE;
+	else
+		return on;
+}
+
+int
+bcmsdh_intr_enable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_disable(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_intr_dereg(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	ASSERT(bcmsdh);
+
+	status = sdioh_interrupt_deregister(bcmsdh->sdioh);
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+#if defined(DHD_DEBUG)
+bool
+bcmsdh_intr_pending(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	ASSERT(sdh);
+	return sdioh_interrupt_pending(bcmsdh->sdioh);
+}
+#endif
+
+
+int
+bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+{
+	ASSERT(sdh);
+
+	/* don't support yet */
+	return BCME_UNSUPPORTED;
+}
+
+/**
+ * Read from SDIO Configuration Space
+ * @param sdh SDIO Host context.
+ * @param func_num Function number to read from.
+ * @param addr Address to read from.
+ * @param err Error return.
+ * @return value read from SDIO configuration space.
+ */
+uint8
+bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+	uint8 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(BCMSDH_RW_RETRY);
+#endif
+	status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	int32 retry = 0;
+#endif
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	do {
+		if (retry)	/* wait for 1 ms till bus get settled down */
+			OSL_DELAY(BCMSDH_RW_RETRY);
+#endif
+	status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+	} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+#endif
+	if (err)
+		*err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+}
+
+uint32
+bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 data = 0;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__,
+	            fnc_num, addr, data));
+
+	return data;
+}
+
+void
+bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num,
+	                            addr, &data, 4);
+
+	if (err)
+		*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num,
+	             addr, data));
+}
+
+
+int
+bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+
+	uint8 *tmp_buf, *tmp_ptr;
+	uint8 *ptr;
+	bool ascii = func & ~0xf;
+	func &= 0x7;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+	ASSERT(cis);
+	ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
+
+	status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+
+	if (ascii) {
+		/* Move binary bits to tmp and format them into the provided buffer. */
+		if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) {
+			BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__));
+			return BCME_NOMEM;
+		}
+		bcopy(cis, tmp_buf, length);
+		for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) {
+			ptr += snprintf((char*)ptr, (cis + length - ptr - 4),
+				"%.2x ", *tmp_ptr & 0xff);
+			if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0)
+				ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n");
+		}
+		MFREE(bcmsdh->osh, tmp_buf, length);
+	}
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+
+int
+bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set)
+{
+	int err = 0;
+	uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK;
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (bar0 != bcmsdh->sbwad || force_set) {
+		bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+			(address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+		if (!err)
+			bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+				(address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+		if (!err)
+			bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+				(address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+
+		if (!err)
+			bcmsdh->sbwad = bar0;
+		else
+			/* invalidate cached window var */
+			bcmsdh->sbwad = 0;
+
+	}
+
+	return err;
+}
+
+uint32
+bcmsdh_reg_read(void *sdh, uint32 addr, uint size)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint32 word = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))
+		return 0xFFFFFFFF;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+		SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
+
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	BCMSDH_INFO(("uint32data = 0x%x\n", word));
+
+	/* if ok, return appropriately masked word */
+	if (SDIOH_API_SUCCESS(status)) {
+		switch (size) {
+			case sizeof(uint8):
+				return (word & 0xff);
+			case sizeof(uint16):
+				return (word & 0xffff);
+			case sizeof(uint32):
+				return word;
+			default:
+				bcmsdh->regfail = TRUE;
+
+		}
+	}
+
+	/* otherwise, bad sdio access or invalid size */
+	BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size));
+	return 0xFFFFFFFF;
+}
+
+uint32
+bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	int err = 0;
+
+	BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+	             __FUNCTION__, addr, size*8, data));
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	ASSERT(bcmsdh->init_success);
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	if (size == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+	status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1,
+	                            addr, &data, size);
+	bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+
+	if (SDIOH_API_SUCCESS(status))
+		return 0;
+
+	BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+	              __FUNCTION__, data, addr, size));
+	return 0xFFFFFFFF;
+}
+
+bool
+bcmsdh_regfail(void *sdh)
+{
+	return ((bcmsdh_info_t *)sdh)->regfail;
+}
+
+int
+bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	             __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
+}
+
+int
+bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                uint8 *buf, uint nbytes, void *pkt,
+                bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status;
+	uint incr_fix;
+	uint width;
+	int err = 0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+
+	BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+	            __FUNCTION__, fn, addr, nbytes));
+
+	/* Async not implemented yet */
+	ASSERT(!(flags & SDIO_REQ_ASYNC));
+	if (flags & SDIO_REQ_ASYNC)
+		return BCME_UNSUPPORTED;
+
+	if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+	incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+	width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+	if (width == 4)
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+	                              SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	SDIOH_API_RC status=0;
+
+	ASSERT(bcmsdh);
+	ASSERT(bcmsdh->init_success);
+	ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
+
+#ifdef BOARD_INTEL
+	if (!rw) {
+		/* MMC stack function sdio_memcpy_fromio() returns an error
+		 * for the READ F1 when the size if greater than the F1 block size.
+		 * It seems that there is a mismatch/error somewhere for the mode "DATA INCR"
+		 * So, split the single read into multiple read since one read 1 block
+		 * is successfull.
+		 * FYI, bcmsdh_rwdata is not used for data packet transaction
+		 */
+		int f1_blksize = 64;
+		uint32 bytes, offset, size;
+
+		for (bytes=0; bytes<nbytes; bytes+=f1_blksize) {
+			offset = (addr + bytes) & SBSDIO_SB_OFT_ADDR_MASK;
+			offset |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+			size = (bytes + f1_blksize <= nbytes) ?f1_blksize : (nbytes % f1_blksize);
+
+			status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+			                              (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+			                              offset, 4, size, buf+bytes, NULL);
+			if (!SDIOH_API_SUCCESS(status))
+				break;
+		}
+	}
+	else
+#endif
+	{
+
+		addr &= SBSDIO_SB_OFT_ADDR_MASK;
+		addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+		status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
+		                              (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+		                              addr, 4, nbytes, buf, NULL);
+	}
+
+	return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+int
+bcmsdh_abort(void *sdh, uint fn)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_abort(bcmsdh->sdioh, fn);
+}
+
+int
+bcmsdh_start(void *sdh, int stage)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_start(bcmsdh->sdioh, stage);
+}
+
+int
+bcmsdh_stop(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_stop(bcmsdh->sdioh);
+}
+
+int
+bcmsdh_waitlockfree(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_waitlockfree(bcmsdh->sdioh);
+}
+
+
+int
+bcmsdh_query_device(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+	bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0;
+	return (bcmsdh->vendevid);
+}
+
+uint
+bcmsdh_query_iofnum(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (sdioh_query_iofnum(bcmsdh->sdioh));
+}
+
+int
+bcmsdh_reset(bcmsdh_info_t *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	return sdioh_sdio_reset(bcmsdh->sdioh);
+}
+
+void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
+{
+	ASSERT(sdh);
+	return sdh->sdioh;
+}
+
+/* Function to pass device-status bits to DHD. */
+uint32
+bcmsdh_get_dstatus(void *sdh)
+{
+	return 0;
+}
+uint32
+bcmsdh_cur_sbwad(void *sdh)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+
+	if (!bcmsdh)
+		bcmsdh = l_bcmsdh;
+
+	return (bcmsdh->sbwad);
+}
+
+void
+bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
+{
+	return;
+}
+
+
+int
+bcmsdh_sleep(void *sdh, bool enab)
+{
+#ifdef SDIOH_SLEEP_ENABLED
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_sleep(sd, enab);
+#else
+	return BCME_UNSUPPORTED;
+#endif
+}
+
+int
+bcmsdh_gpio_init(void *sdh)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpio_init(sd);
+}
+
+bool
+bcmsdh_gpioin(void *sdh, uint32 gpio)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioin(sd, gpio);
+}
+
+int
+bcmsdh_gpioouten(void *sdh, uint32 gpio)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioouten(sd, gpio);
+}
+
+int
+bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab)
+{
+	bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
+	sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
+
+	return sdioh_gpioout(sd, gpio, enab);
+}
diff --git a/drivers/staging/edison-bcm43340/bcmsdh_linux.c b/drivers/staging/edison-bcm43340/bcmsdh_linux.c
new file mode 100644
index 0000000..541a9c3
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/bcmsdh_linux.c
@@ -0,0 +1,498 @@
+/*
+ * SDIO access interface for drivers - linux specific (pci only)
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_linux.c 461443 2014-03-12 02:40:59Z $
+ */
+
+/**
+ * @file bcmsdh_linux.c
+ */
+
+#define __UNDEF_NO_VERSION__
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <asm/intel-mid.h>
+
+#include <osl.h>
+#include <pcicfg.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+#include <linux/irq.h>
+extern void dhdsdio_isr(void * args);
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#if defined(CONFIG_ARCH_ODIN)
+#include <linux/platform_data/gpio-odin.h>
+#endif /* defined(CONFIG_ARCH_ODIN) */
+
+#include <dhd_linux.h>
+
+#include <asm/io_apic.h>
+
+/* driver info, initialized when bcmsdh_register is called */
+static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL};
+
+typedef enum {
+	DHD_INTR_INVALID = 0,
+	DHD_INTR_INBAND,
+	DHD_INTR_HWOOB,
+	DHD_INTR_SWOOB
+} DHD_HOST_INTR_TYPE;
+
+/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g.
+ * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather
+ * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this
+ * structure.
+ */
+typedef struct bcmsdh_os_info {
+	DHD_HOST_INTR_TYPE	intr_type;
+	int			oob_irq_num;	/* valid when hardware or software oob in use */
+	unsigned long		oob_irq_flags;	/* valid when hardware or software oob in use */
+	bool			oob_irq_registered;
+	bool			oob_irq_enabled;
+	bool			oob_irq_wake_enabled;
+	spinlock_t		oob_irq_spinlock;
+	bcmsdh_cb_fn_t		oob_irq_handler;
+	void			*oob_irq_handler_context;
+	void			*context;	/* context returned from upper layer */
+	void			*sdioh;		/* handle to lower layer (sdioh) */
+	void			*dev;		/* handle to the underlying device */
+	bool			dev_wake_enabled;
+} bcmsdh_os_info_t;
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+/**
+ * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
+ */
+bool
+bcmsdh_chipmatch(uint16 vendor, uint16 device)
+{
+	/* Add other vendors and devices as required */
+
+#ifdef BCMSDIOH_STD
+	/* Check for Arasan host controller */
+	if (vendor == VENDOR_SI_IMAGE) {
+		return (TRUE);
+	}
+	/* Check for BRCM 27XX Standard host controller */
+	if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for BRCM Standard host controller */
+	if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		return (TRUE);
+	}
+	/* Check for TI PCIxx21 Standard host controller */
+	if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) {
+		return (TRUE);
+	}
+	/* Ricoh R5C822 Standard SDIO Host */
+	if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) {
+		return (TRUE);
+	}
+	/* JMicron Standard SDIO Host */
+	if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) {
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_STD */
+#ifdef BCMSDIOH_SPI
+	/* This is the PciSpiHost. */
+	if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) {
+		printf("Found PCI SPI Host Controller\n");
+		return (TRUE);
+	}
+
+#endif /* BCMSDIOH_SPI */
+
+	return (FALSE);
+}
+
+void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+	uint bus_num, uint slot_num)
+{
+	ulong regs;
+	bcmsdh_info_t *bcmsdh;
+	uint32 vendevid;
+	bcmsdh_os_info_t *bcmsdh_osinfo = NULL;
+
+	bcmsdh = bcmsdh_attach(osh, sdioh, &regs);
+	if (bcmsdh == NULL) {
+		SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+		goto err;
+	}
+	bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t));
+	if (bcmsdh_osinfo == NULL) {
+		SDLX_MSG(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__));
+		goto err;
+	}
+	bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+	bcmsdh->os_cxt = bcmsdh_osinfo;
+	bcmsdh_osinfo->sdioh = sdioh;
+	bcmsdh_osinfo->dev = dev;
+	osl_set_bus_handle(osh, bcmsdh);
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dev && device_init_wakeup(dev, true) == 0)
+		bcmsdh_osinfo->dev_wake_enabled = TRUE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+#if defined(OOB_INTR_ONLY)
+	spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock);
+	/* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+	bcmsdh_osinfo->oob_irq_num = dhd_customer_oob_irq_map(adapter_info,
+		&bcmsdh_osinfo->oob_irq_flags);
+	if  (bcmsdh_osinfo->oob_irq_num < 0) {
+		SDLX_MSG(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+		goto err;
+	}
+#endif /* defined(BCMLXSDMMC) */
+
+	/* Read the vendor/device ID from the CIS */
+	vendevid = bcmsdh_query_device(bcmsdh);
+	/* try to attach to the target device */
+	bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
+		slot_num, 0, bus_type, (void *)regs, osh, bcmsdh);
+	if (bcmsdh_osinfo->context == NULL) {
+		SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	return bcmsdh;
+
+	/* error handling */
+err:
+	if (bcmsdh != NULL)
+		bcmsdh_detach(osh, bcmsdh);
+	if (bcmsdh_osinfo != NULL)
+		MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+	return NULL;
+}
+
+int bcmsdh_remove(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#if defined(OOB_INTR_ONLY)
+	if (bcmsdh_osinfo->oob_irq_num > 0)
+		dhd_customer_oob_irq_unmap();
+#endif
+
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (bcmsdh_osinfo->dev)
+		device_init_wakeup(bcmsdh_osinfo->dev, false);
+	bcmsdh_osinfo->dev_wake_enabled = FALSE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+
+	drvinfo.remove(bcmsdh_osinfo->context);
+	MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t));
+	bcmsdh_detach(bcmsdh->osh, bcmsdh);
+
+	return 0;
+}
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context))
+		return -EBUSY;
+	return 0;
+}
+
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	if (drvinfo.resume)
+		return drvinfo.resume(bcmsdh_osinfo->context);
+	return 0;
+}
+
+extern int bcmsdh_register_client_driver(void);
+extern void bcmsdh_unregister_client_driver(void);
+extern int sdio_func_reg_notify(void* semaphore);
+extern void sdio_func_unreg_notify(void);
+
+#if defined(BCMLXSDMMC)
+int bcmsdh_reg_sdio_notify(void* semaphore)
+{
+	return sdio_func_reg_notify(semaphore);
+}
+
+void bcmsdh_unreg_sdio_notify(void)
+{
+	sdio_func_unreg_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+int
+bcmsdh_register(bcmsdh_driver_t *driver)
+{
+	int error = 0;
+
+	drvinfo = *driver;
+	SDLX_MSG(("%s: register client driver\n", __FUNCTION__));
+	error = bcmsdh_register_client_driver();
+	if (error)
+		SDLX_MSG(("%s: failed %d\n", __FUNCTION__, error));
+
+	return error;
+}
+
+void
+bcmsdh_unregister(void)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+		if (bcmsdh_pci_driver.node.next == NULL)
+			return;
+#endif
+
+	bcmsdh_unregister_client_driver();
+}
+
+void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	pm_stay_awake(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+	pm_relax(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
+
+bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	return bcmsdh_osinfo->dev_wake_enabled;
+}
+
+#if defined(OOB_INTR_ONLY)
+void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable)
+{
+	unsigned long flags;
+	bcmsdh_os_info_t *bcmsdh_osinfo;
+
+	if (!bcmsdh)
+		return;
+
+	bcmsdh_osinfo = bcmsdh->os_cxt;
+	spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+	if (bcmsdh_osinfo->oob_irq_enabled != enable) {
+		if (enable)
+			enable_irq(bcmsdh_osinfo->oob_irq_num);
+		else
+			disable_irq_nosync(bcmsdh_osinfo->oob_irq_num);
+		bcmsdh_osinfo->oob_irq_enabled = enable;
+	}
+	spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+}
+
+static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
+{
+	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+#if defined(HW_OOB)
+	bcmsdh_oob_intr_set(bcmsdh, FALSE);
+#endif /* defined(HW_OOB) */
+	bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context);
+
+	return IRQ_HANDLED;
+}
+
+int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+	void* oob_irq_handler_context)
+{
+	int err = 0;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+	if (bcmsdh_osinfo->oob_irq_registered) {
+		SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__));
+		return -EBUSY;
+	}
+#if defined(HW_OOB)
+	bcmsdh_osinfo->oob_irq_flags |= IRQF_ONESHOT;
+#endif /* defined(HW_OOB) */
+	SDLX_MSG(("%s OOB irq=%d flags=%X \n", __FUNCTION__,
+		(int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+	bcmsdh_osinfo->oob_irq_handler = oob_irq_handler;
+	bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context;
+#if defined(CONFIG_ARCH_ODIN)
+	err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+		bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#else
+	err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+		bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#endif /* defined(CONFIG_ARCH_ODIN) */
+	if (err) {
+		SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err));
+		return err;
+	}
+
+		err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+		if (!err)
+			bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
+	bcmsdh_osinfo->oob_irq_enabled = TRUE;
+	bcmsdh_osinfo->oob_irq_registered = TRUE;
+	return err;
+}
+
+void bcmsdh_set_irq(int flag, bcmsdh_info_t *bcmsdh)
+{
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	if (bcmsdh_osinfo->oob_irq_registered && bcmsdh_osinfo->oob_irq_enabled != flag) {
+		SDLX_MSG(("%s Flag = %d", __FUNCTION__, flag));
+		bcmsdh_osinfo->oob_irq_enabled = flag;
+		bcmsdh_osinfo->oob_irq_wake_enabled = flag;
+		if (flag) {
+			enable_irq(bcmsdh_osinfo->oob_irq_num);
+				enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+		} else {
+				disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+			disable_irq(bcmsdh_osinfo->oob_irq_num);
+		}
+	}
+}
+
+
+void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh)
+{
+	int err = 0;
+	bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+
+	SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+	if (!bcmsdh_osinfo->oob_irq_registered) {
+		SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__));
+		return;
+	}
+	if (bcmsdh_osinfo->oob_irq_wake_enabled) {
+			err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+			if (!err)
+				bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+	}
+	if (bcmsdh_osinfo->oob_irq_enabled) {
+		disable_irq(bcmsdh_osinfo->oob_irq_num);
+		bcmsdh_osinfo->oob_irq_enabled = FALSE;
+	}
+	free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh);
+	bcmsdh_osinfo->oob_irq_registered = FALSE;
+}
+#endif 
+
+/* Module parameters specific to each host-controller driver */
+
+extern uint sd_msglevel;	/* Debug message level */
+module_param(sd_msglevel, uint, 0);
+
+extern uint sd_power;	/* 0 = SD Power OFF, 1 = SD Power ON. */
+module_param(sd_power, uint, 0);
+
+extern uint sd_clock;	/* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */
+module_param(sd_clock, uint, 0);
+
+extern uint sd_divisor;	/* Divisor (-1 means external clock) */
+module_param(sd_divisor, uint, 0);
+
+extern uint sd_sdmode;	/* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
+module_param(sd_sdmode, uint, 0);
+
+extern uint sd_hiok;	/* Ok to use hi-speed mode */
+module_param(sd_hiok, uint, 0);
+
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+#ifdef BCMSDIOH_STD
+extern int sd_uhsimode;
+module_param(sd_uhsimode, int, 0);
+extern uint sd_tuning_period;
+module_param(sd_tuning_period, uint, 0);
+extern int sd_delay_value;
+module_param(sd_delay_value, uint, 0);
+
+/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */
+extern char dhd_sdiod_uhsi_ds_override[2];
+module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0);
+
+#endif
+
+#ifdef BCMSDH_MODULE
+EXPORT_SYMBOL(bcmsdh_attach);
+EXPORT_SYMBOL(bcmsdh_detach);
+EXPORT_SYMBOL(bcmsdh_intr_query);
+EXPORT_SYMBOL(bcmsdh_intr_enable);
+EXPORT_SYMBOL(bcmsdh_intr_disable);
+EXPORT_SYMBOL(bcmsdh_intr_reg);
+EXPORT_SYMBOL(bcmsdh_intr_dereg);
+
+#if defined(DHD_DEBUG)
+EXPORT_SYMBOL(bcmsdh_intr_pending);
+#endif
+
+EXPORT_SYMBOL(bcmsdh_devremove_reg);
+EXPORT_SYMBOL(bcmsdh_cfg_read);
+EXPORT_SYMBOL(bcmsdh_cfg_write);
+EXPORT_SYMBOL(bcmsdh_cis_read);
+EXPORT_SYMBOL(bcmsdh_reg_read);
+EXPORT_SYMBOL(bcmsdh_reg_write);
+EXPORT_SYMBOL(bcmsdh_regfail);
+EXPORT_SYMBOL(bcmsdh_send_buf);
+EXPORT_SYMBOL(bcmsdh_recv_buf);
+
+EXPORT_SYMBOL(bcmsdh_rwdata);
+EXPORT_SYMBOL(bcmsdh_abort);
+EXPORT_SYMBOL(bcmsdh_query_device);
+EXPORT_SYMBOL(bcmsdh_query_iofnum);
+EXPORT_SYMBOL(bcmsdh_iovar_op);
+EXPORT_SYMBOL(bcmsdh_register);
+EXPORT_SYMBOL(bcmsdh_unregister);
+EXPORT_SYMBOL(bcmsdh_chipmatch);
+EXPORT_SYMBOL(bcmsdh_reset);
+EXPORT_SYMBOL(bcmsdh_waitlockfree);
+
+EXPORT_SYMBOL(bcmsdh_get_dstatus);
+EXPORT_SYMBOL(bcmsdh_cfg_read_word);
+EXPORT_SYMBOL(bcmsdh_cfg_write_word);
+EXPORT_SYMBOL(bcmsdh_cur_sbwad);
+EXPORT_SYMBOL(bcmsdh_chipinfo);
+
+#endif /* BCMSDH_MODULE */
diff --git a/drivers/staging/edison-bcm43340/bcmsdh_sdmmc.c b/drivers/staging/edison-bcm43340/bcmsdh_sdmmc.c
new file mode 100644
index 0000000..eb097d5
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/bcmsdh_sdmmc.c
@@ -0,0 +1,1453 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.c 457662 2014-02-24 15:07:28Z $
+ */
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <sdioh.h>	/* Standard SDIO Host Controller Specification */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+extern volatile bool dhd_mmc_suspend;
+#endif
+#include "bcmsdh_sdmmc.h"
+
+#ifndef BCMSDH_MODULE
+extern int sdio_function_init(void);
+extern void sdio_function_cleanup(void);
+#endif /* BCMSDH_MODULE */
+
+#if !defined(OOB_INTR_ONLY)
+static void IRQHandler(struct sdio_func *func);
+static void IRQHandlerF2(struct sdio_func *func);
+#endif /* !defined(OOB_INTR_ONLY) */
+static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
+extern int sdio_reset_comm(struct mmc_card *card);
+
+#define DEFAULT_SDIO_F2_BLKSIZE		512
+#ifndef CUSTOM_SDIO_F2_BLKSIZE
+#define CUSTOM_SDIO_F2_BLKSIZE		DEFAULT_SDIO_F2_BLKSIZE
+#endif
+
+#define MAX_IO_RW_EXTENDED_BLK		511
+
+uint sd_sdmode = SDIOH_MODE_SD4;	/* Use SD4 mode by default */
+uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
+uint sd_divisor = 2;			/* Default 48MHz/2 = 24MHz */
+
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_hiok = FALSE;	/* Don't use hi-speed mode by default */
+uint sd_msglevel = 0x01;
+uint sd_use_dma = TRUE;
+
+#ifndef CUSTOM_RXCHAIN
+#define CUSTOM_RXCHAIN 0
+#endif
+
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
+
+#define DMA_ALIGN_MASK	0x03
+#define MMC_SDIO_ABORT_RETRY_LIMIT 5
+
+int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+
+static int
+sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+{
+	int err_ret;
+	uint32 fbraddr;
+	uint8 func;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	/* Get the Card's common CIS address */
+	sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Get the Card's function CIS (for each function) */
+	for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+	     func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+		sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+		sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
+		         __FUNCTION__, func, sd->func_cis_ptr[func]));
+	}
+
+	sd->func_cis_ptr[0] = sd->com_cis_ptr;
+	sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
+
+	/* Enable Function 1 */
+	if (sd->func[1]) {
+		sdio_claim_host(sd->func[1]);
+		err_ret = sdio_enable_func(sd->func[1]);
+		sdio_release_host(sd->func[1]);
+		if (err_ret) {
+			sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
+		}
+	}
+	else
+		sd_err(("%s:gInstance->func[1] is null\n", __FUNCTION__));
+
+	return FALSE;
+}
+
+/*
+ *	Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, struct sdio_func *func)
+{
+	sdioh_info_t *sd = NULL;
+	int err_ret;
+
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (func == NULL) {
+		sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
+		return NULL;
+	}
+
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	/* Initialize the scatter gather table entries */
+	sg_init_table(&sd->sg_list[0], SDIOH_SDMMC_MAX_SG_ENTRIES);
+	sd->osh = osh;
+	sd->fake_func0.num = 0;
+	sd->fake_func0.card = func->card;
+	sd->func[0] = &sd->fake_func0;
+	sd->func[1] = func->card->sdio_func[0];
+	sd->func[2] = func->card->sdio_func[1];
+	sd->num_funcs = 2;
+	sd->sd_blockmode = TRUE;
+	sd->use_client_ints = TRUE;
+	sd->client_block_size[0] = 64;
+	sd->use_rxchain = CUSTOM_RXCHAIN;
+	if (sd->func[1] == NULL || sd->func[2] == NULL) {
+		sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
+		goto fail;
+	}
+	sdio_set_drvdata(sd->func[1], sd);
+
+	sdio_claim_host(sd->func[1]);
+	sd->client_block_size[1] = 64;
+	err_ret = sdio_set_block_size(sd->func[1], 64);
+	sdio_release_host(sd->func[1]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
+		goto fail;
+	}
+
+	sdio_claim_host(sd->func[2]);
+	sd->client_block_size[2] = sd_f2_blocksize;
+	err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+	sdio_release_host(sd->func[2]);
+	if (err_ret) {
+		sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
+			sd_f2_blocksize, err_ret));
+		goto fail;
+	}
+
+	sdioh_sdmmc_card_enablefuncs(sd);
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+	return sd;
+
+fail:
+	MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	return NULL;
+}
+
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+
+	if (sd) {
+
+		/* Disable Function 2 */
+		if (sd->func[2]) {
+			sdio_claim_host(sd->func[2]);
+			sdio_disable_func(sd->func[2]);
+			sdio_release_host(sd->func[2]);
+		}
+
+		/* Disable Function 1 */
+		if (sd->func[1]) {
+			sdio_claim_host(sd->func[1]);
+			sdio_disable_func(sd->func[1]);
+			sdio_release_host(sd->func[1]);
+		}
+
+		sd->func[1] = NULL;
+		sd->func[2] = NULL;
+
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+extern SDIOH_API_RC
+sdioh_enable_func_intr(sdioh_info_t *sd)
+{
+	uint8 reg;
+	int err;
+
+	if (sd->func[0] == NULL) {
+		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sdio_claim_host(sd->func[0]);
+	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+	if (err) {
+		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		sdio_release_host(sd->func[0]);
+		return SDIOH_API_RC_FAIL;
+	}
+	/* Enable F1 and F2 interrupts, clear master enable */
+	reg &= ~INTR_CTL_MASTER_EN;
+	reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+	sdio_release_host(sd->func[0]);
+
+	if (err) {
+		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_disable_func_intr(sdioh_info_t *sd)
+{
+	uint8 reg;
+	int err;
+
+	if (sd->func[0] == NULL) {
+		sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sdio_claim_host(sd->func[0]);
+	reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+	if (err) {
+		sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		sdio_release_host(sd->func[0]);
+		return SDIOH_API_RC_FAIL;
+	}
+	reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+	/* Disable master interrupt with the last function interrupt */
+	if (!(reg & 0xFE))
+		reg = 0;
+	sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+	sdio_release_host(sd->func[0]);
+
+	if (err) {
+		sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	if (fn == NULL) {
+		sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+#if !defined(OOB_INTR_ONLY)
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+
+	/* register and unmask irq */
+	if (sd->func[2]) {
+		sdio_claim_host(sd->func[2]);
+		sdio_claim_irq(sd->func[2], IRQHandlerF2);
+		sdio_release_host(sd->func[2]);
+	}
+
+	if (sd->func[1]) {
+		sdio_claim_host(sd->func[1]);
+		sdio_claim_irq(sd->func[1], IRQHandler);
+		sdio_release_host(sd->func[1]);
+	}
+#elif defined(HW_OOB)
+	sdioh_enable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+
+#if !defined(OOB_INTR_ONLY)
+	if (sd->func[1]) {
+		/* register and unmask irq */
+		sdio_claim_host(sd->func[1]);
+		sdio_release_irq(sd->func[1]);
+		sdio_release_host(sd->func[1]);
+	}
+
+	if (sd->func[2]) {
+		/* Claim host controller F2 */
+		sdio_claim_host(sd->func[2]);
+		sdio_release_irq(sd->func[2]);
+		/* Release host controller F2 */
+		sdio_release_host(sd->func[2]);
+	}
+
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+#elif defined(HW_OOB)
+	sdioh_disable_func_intr(sd);
+#endif /* !defined(OOB_INTR_ONLY) */
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return (0);
+}
+#endif
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_RXCHAIN
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel", IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+	{"sd_blockmode", IOV_BLOCKMODE, 0,	IOVT_BOOL,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints", 	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG, 	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode", 	IOV_SDMODE, 	0,	IOVT_UINT32,	100},
+	{"sd_highspeed", IOV_HISPEED,	0,	IOVT_UINT32,	0 },
+	{"sd_rxchain",  IOV_RXCHAIN,    0, 	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                           void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+	BCM_REFERENCE(bool_val);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKMODE):
+		int_val = (int32)si->sd_blockmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKMODE):
+		si->sd_blockmode = (bool)int_val;
+		/* Haven't figured out how to make non-block mode with DMA */
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_BLOCKSIZE):
+	{
+		uint func = ((uint32)int_val >> 16);
+		uint blksize = (uint16)int_val;
+		uint maxsize;
+
+		if (func > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		switch (func) {
+		case 0: maxsize = 32; break;
+		case 1: maxsize = BLOCK_SIZE_4318; break;
+		case 2: maxsize = BLOCK_SIZE_4328; break;
+		default: maxsize = 0;
+		}
+		if (blksize > maxsize) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		if (!blksize) {
+			blksize = maxsize;
+		}
+
+		/* Now set it */
+		si->client_block_size[func] = blksize;
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_RXCHAIN):
+		int_val = (int32)si->use_rxchain;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		si->use_client_ints = (bool)int_val;
+		if (si->use_client_ints)
+			si->intmask |= CLIENT_INTR;
+		else
+			si->intmask &= ~CLIENT_INTR;
+
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)0;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		if (sd_ptr->offset & 1)
+			int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
+		else if (sd_ptr->offset & 2)
+			int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
+		else
+			int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
+
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_HOSTREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+
+		if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
+			sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
+		                  (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
+		                  sd_ptr->offset));
+		break;
+	}
+
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = 0;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+
+SDIOH_API_RC
+sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
+{
+	SDIOH_API_RC status;
+	uint8 data;
+
+	if (enable)
+		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
+	else
+		data = SDIO_SEPINT_ACT_HI;	/* disable hw oob interrupt */
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data);
+	return status;
+}
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+static int
+sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
+{
+	/* read 24 bits and return valid 17 bit addr */
+	int i;
+	uint32 scratch, regdata;
+	uint8 *ptr = (uint8 *)&scratch;
+	for (i = 0; i < 3; i++) {
+		if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
+			sd_err(("%s: Can't read!\n", __FUNCTION__));
+
+		*ptr++ = (uint8) regdata;
+		regaddr++;
+	}
+
+	/* Only the lower 17-bits are valid */
+	scratch = ltoh32(scratch);
+	scratch &= 0x0001FFFF;
+	return (scratch);
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 foo;
+	uint8 *cis = cisd;
+
+	sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
+
+	if (!sd->func_cis_ptr[func]) {
+		bzero(cis, length);
+		sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+
+	for (count = 0; count < length; count++) {
+		offset =  sd->func_cis_ptr[func] + count;
+		if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			return SDIOH_API_RC_FAIL;
+		}
+
+		*cis = (uint8)(foo & 0xff);
+		cis++;
+	}
+
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int err_ret = 0;
+#if defined(MMC_SDIO_ABORT)
+	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+
+	sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	if(rw) { /* CMD52 Write */
+		if (func == 0) {
+			/* Can only directly write to some F0 registers.  Handle F2 enable
+			 * as a special case.
+			 */
+			if (regaddr == SDIOD_CCCR_IOEN) {
+				if (sd->func[2]) {
+					sdio_claim_host(sd->func[2]);
+					if (*byte & SDIO_FUNC_ENABLE_2) {
+						/* Enable Function 2 */
+						err_ret = sdio_enable_func(sd->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+								err_ret));
+						}
+					} else {
+						/* Disable Function 2 */
+						err_ret = sdio_disable_func(sd->func[2]);
+						if (err_ret) {
+							sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+								err_ret));
+						}
+					}
+					sdio_release_host(sd->func[2]);
+				}
+			}
+#if defined(MMC_SDIO_ABORT)
+			/* to allow abort command through F1 */
+			else if (regaddr == SDIOD_CCCR_IOABORT) {
+				while (sdio_abort_retry--) {
+					if (sd->func[func]) {
+						sdio_claim_host(sd->func[func]);
+						/*
+						 * this sdio_f0_writeb() can be replaced with
+						 * another api depending upon MMC driver change.
+						 * As of this time, this is temporaray one
+						 */
+						sdio_writeb(sd->func[func],
+							*byte, regaddr, &err_ret);
+						sdio_release_host(sd->func[func]);
+					}
+					if (!err_ret)
+						break;
+				}
+			}
+#endif /* MMC_SDIO_ABORT */
+			else if (regaddr < 0xF0) {
+				sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
+			} else {
+				/* Claim host controller, perform F0 write, and release */
+				if (sd->func[func]) {
+					sdio_claim_host(sd->func[func]);
+					sdio_f0_writeb(sd->func[func],
+						*byte, regaddr, &err_ret);
+					sdio_release_host(sd->func[func]);
+				}
+			}
+		} else {
+			/* Claim host controller, perform Fn write, and release */
+			if (sd->func[func]) {
+				sdio_claim_host(sd->func[func]);
+				sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
+				sdio_release_host(sd->func[func]);
+			}
+		}
+	} else { /* CMD52 Read */
+		/* Claim host controller, perform Fn read, and release */
+		if (sd->func[func]) {
+			sdio_claim_host(sd->func[func]);
+			if (func == 0) {
+				*byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
+			} else {
+				*byte = sdio_readb(sd->func[func], regaddr, &err_ret);
+			}
+			sdio_release_host(sd->func[func]);
+		}
+	}
+
+	if (err_ret) {
+		if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
+		} else {
+			sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+				rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
+		}
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                                   uint32 *word, uint nbytes)
+{
+	int err_ret = SDIOH_API_RC_FAIL;
+#if defined(MMC_SDIO_ABORT)
+	int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
+#endif
+
+	if (func == 0) {
+		sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+	         __FUNCTION__, cmd_type, rw, func, addr, nbytes));
+
+	DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+	/* Claim host controller */
+	sdio_claim_host(sd->func[func]);
+
+	if(rw) { /* CMD52 Write */
+		if (nbytes == 4) {
+			sdio_writel(sd->func[func], *word, addr, &err_ret);
+		} else if (nbytes == 2) {
+			sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	} else { /* CMD52 Read */
+		if (nbytes == 4) {
+			*word = sdio_readl(sd->func[func], addr, &err_ret);
+		} else if (nbytes == 2) {
+			*word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
+		} else {
+			sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
+		}
+	}
+
+	/* Release host controller */
+	sdio_release_host(sd->func[func]);
+
+	if (err_ret) {
+#if defined(MMC_SDIO_ABORT)
+		/* Any error on CMD53 transaction should abort that function using function 0. */
+		while (sdio_abort_retry--) {
+			if (sd->func[0]) {
+				sdio_claim_host(sd->func[0]);
+				/*
+				 * this sdio_f0_writeb() can be replaced with another api
+				 * depending upon MMC driver change.
+				 * As of this time, this is temporaray one
+				 */
+				sdio_writeb(sd->func[0],
+					func, SDIOD_CCCR_IOABORT, &err_ret);
+				sdio_release_host(sd->func[0]);
+			}
+			if (!err_ret)
+				break;
+		}
+		if (err_ret)
+#endif /* MMC_SDIO_ABORT */
+		{
+			sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+				rw ? "Write" : "Read", err_ret));
+		}
+	}
+
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+static SDIOH_API_RC
+sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, void *pkt)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	int err_ret = 0;
+	void *pnext;
+	uint ttl_len, pkt_offset;
+	uint blk_num;
+	uint blk_size;
+	uint max_blk_count;
+	uint max_req_size;
+	struct mmc_request mmc_req;
+	struct mmc_command mmc_cmd;
+	struct mmc_data mmc_dat;
+	uint32 sg_count;
+	struct sdio_func *sdio_func = sd->func[func];
+	struct mmc_host *host = sdio_func->card->host;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	ASSERT(pkt);
+	DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	blk_size = sd->client_block_size[func];
+	max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
+	max_req_size = min(max_blk_count * blk_size, host->max_req_size);
+
+	pkt_offset = 0;
+	pnext = pkt;
+
+	while (pnext != NULL) {
+		ttl_len = 0;
+		sg_count = 0;
+		memset(&mmc_req, 0, sizeof(struct mmc_request));
+		memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+		memset(&mmc_dat, 0, sizeof(struct mmc_data));
+		sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
+
+		/* Set up scatter-gather DMA descriptors. this loop is to find out the max
+		 * data we can transfer with one command 53. blocks per command is limited by
+		 * host max_req_size and 9-bit max block number. when the total length of this
+		 * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
+		 * commands (each transfer is still block aligned)
+		 */
+		while (pnext != NULL && ttl_len < max_req_size) {
+			int pkt_len;
+			int sg_data_size;
+			uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
+
+			ASSERT(pdata != NULL);
+			pkt_len = PKTLEN(sd->osh, pnext);
+			sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
+			/* sg_count is unlikely larger than the array size, and this is
+			 * NOT something we can handle here, but in case it happens, PLEASE put
+			 * a restriction on max tx/glom count (based on host->max_segs).
+			 */
+			if (sg_count >= ARRAYSIZE(sd->sg_list)) {
+				sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__));
+				return (SDIOH_API_RC_FAIL);
+			}
+			pdata += pkt_offset;
+
+			sg_data_size = pkt_len - pkt_offset;
+			if (sg_data_size > max_req_size - ttl_len)
+				sg_data_size = max_req_size - ttl_len;
+			/* some platforms put a restriction on the data size of each scatter-gather
+			 * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
+			 * max_seg_size
+			 */
+			if (sg_data_size > host->max_seg_size)
+				sg_data_size = host->max_seg_size;
+			sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
+
+			ttl_len += sg_data_size;
+			pkt_offset += sg_data_size;
+			if (pkt_offset == pkt_len) {
+				pnext = PKTNEXT(sd->osh, pnext);
+				pkt_offset = 0;
+			}
+		}
+
+		if (ttl_len % blk_size != 0) {
+			sd_err(("%s, data length %d not aligned to block size %d\n",
+				__FUNCTION__,  ttl_len, blk_size));
+			return SDIOH_API_RC_FAIL;
+		}
+		blk_num = ttl_len / blk_size;
+		mmc_dat.sg = sd->sg_list;
+		mmc_dat.sg_len = sg_count;
+		mmc_dat.blksz = blk_size;
+		mmc_dat.blocks = blk_num;
+		mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+		mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+		mmc_cmd.arg = write ? 1<<31 : 0;
+		mmc_cmd.arg |= (func & 0x7) << 28;
+		mmc_cmd.arg |= 1<<27;
+		mmc_cmd.arg |= fifo ? 0 : 1<<26;
+		mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
+		mmc_cmd.arg |= blk_num & 0x1FF;
+		mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+		mmc_req.cmd = &mmc_cmd;
+		mmc_req.data = &mmc_dat;
+		if (!fifo)
+			addr += ttl_len;
+
+		sdio_claim_host(sdio_func);
+		mmc_set_data_timeout(&mmc_dat, sdio_func->card);
+		mmc_wait_for_req(host, &mmc_req);
+		sdio_release_host(sdio_func);
+
+		err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
+		if (0 != err_ret) {
+			sd_err(("%s:CMD53 %s failed with code %d\n",
+				__FUNCTION__, write ? "write" : "read", err_ret));
+			return SDIOH_API_RC_FAIL;
+		}
+	}
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+static SDIOH_API_RC
+sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+                     uint addr, uint8 *buf, uint len)
+{
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+	int err_ret = 0;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	ASSERT(buf);
+
+	/* NOTE:
+	 * For all writes, each packet length is aligned to 32 (or 4)
+	 * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
+	 * is aligned to block boundary. If you want to align each packet to
+	 * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
+	 *
+	 * For reads, the alignment is doen in sdioh_request_buffer.
+	 *
+	 */
+	sdio_claim_host(sd->func[func]);
+
+	if ((write) && (!fifo))
+		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+	else if (write)
+		err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+	else if (fifo)
+		err_ret = sdio_readsb(sd->func[func], buf, addr, len);
+	else
+		err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
+
+	sdio_release_host(sd->func[func]);
+
+	if (err_ret)
+		sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
+		       (write) ? "TX" : "RX", buf, addr, len, err_ret));
+	else
+		sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
+			(write) ? "TX" : "RX", buf, addr, len));
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain.  If it is a packet chain,
+ * then all the packets in the chain must be properly aligned.  If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+	uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
+{
+	SDIOH_API_RC status;
+	void *tmppkt;
+
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+	DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+	if (pkt) {
+		/* packet chain, only used for tx/rx glom, all packets length
+		 * are aligned, total length is a block multiple
+		 */
+		if (PKTNEXT(sd->osh, pkt))
+			return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
+
+		/* non-glom mode, ignore the buffer parameter and use the packet pointer
+		 * (this shouldn't happen)
+		 */
+		buffer = PKTDATA(sd->osh, pkt);
+		buf_len = PKTLEN(sd->osh, pkt);
+	}
+
+	ASSERT(buffer);
+
+	/* buffer and length are aligned, use it directly so we can avoid memory copy */
+	if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
+		return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
+
+	sd_err(("%s: [%d] doing memory copy buf=%p, len=%d\n",
+		__FUNCTION__, write, buffer, buf_len));
+
+	/* otherwise, a memory copy is needed as the input buffer is not aligned */
+	tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
+	if (tmppkt == NULL) {
+		sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
+		return SDIOH_API_RC_FAIL;
+	}
+
+	if (write)
+		bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
+
+	status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
+		PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
+
+	if (!write)
+		bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
+
+	PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
+
+	return status;
+}
+
+/* this function performs "abort" for both of host & device */
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+#if defined(MMC_SDIO_ABORT)
+	char t_func = (char) func;
+#endif /* defined(MMC_SDIO_ABORT) */
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+
+#if defined(MMC_SDIO_ABORT)
+	/* issue abort cmd52 command through F1 */
+	sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
+#endif /* defined(MMC_SDIO_ABORT) */
+
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int sdioh_sdio_reset(sdioh_info_t *si)
+{
+	sd_trace(("%s: Enter\n", __FUNCTION__));
+	sd_trace(("%s: Exit\n", __FUNCTION__));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Disable device interrupt */
+void
+sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask &= ~CLIENT_INTR;
+}
+
+/* Enable device interrupt */
+void
+sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+{
+	sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
+	sd->intmask |= CLIENT_INTR;
+}
+
+/* Read client card reg */
+int
+sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+	int ret = SUCCESS;
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp = 0;
+
+		ret = sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		*data = temp;
+		*data &= 0xff;
+		sd_data(("%s: byte read data=0x%02x\n",
+		         __FUNCTION__, *data));
+	} else {
+		ret = sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
+		if (regsize == 2)
+			*data &= 0xffff;
+
+		sd_data(("%s: word read data=0x%08x\n",
+		         __FUNCTION__, *data));
+	}
+
+	return ret;
+}
+
+#if !defined(OOB_INTR_ONLY)
+/* bcmsdh_sdmmc interrupt handler */
+static void IRQHandler(struct sdio_func *func)
+{
+	sdioh_info_t *sd;
+
+	sd = sdio_get_drvdata(func);
+
+	ASSERT(sd != NULL);
+	sdio_release_host(sd->func[0]);
+
+	if (sd->use_client_ints) {
+		sd->intrcount++;
+		ASSERT(sd->intr_handler);
+		ASSERT(sd->intr_handler_arg);
+		(sd->intr_handler)(sd->intr_handler_arg);
+	} else {
+		sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+
+		sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
+		        __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
+	}
+
+	sdio_claim_host(sd->func[0]);
+}
+
+/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
+static void IRQHandlerF2(struct sdio_func *func)
+{
+	sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+}
+#endif /* !defined(OOB_INTR_ONLY) */
+
+#ifdef NOTUSED
+/* Write client card reg */
+static int
+sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+
+	if ((func == 0) || (regsize == 1)) {
+		uint8 temp;
+
+		temp = data & 0xff;
+		sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+		sd_data(("%s: byte write data=0x%02x\n",
+		         __FUNCTION__, data));
+	} else {
+		if (regsize == 2)
+			data &= 0xffff;
+
+		sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
+
+		sd_data(("%s: word write data=0x%08x\n",
+		         __FUNCTION__, data));
+	}
+
+	return SUCCESS;
+}
+#endif /* NOTUSED */
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	int ret;
+
+	if (!sd) {
+		sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
+		return (0);
+	}
+
+	/* Need to do this stages as we can't enable the interrupt till
+		downloading of the firmware is complete, other wise polling
+		sdio access will come in way
+	*/
+	if (sd->func[0]) {
+			if (stage == 0) {
+		/* Since the power to the chip is killed, we will have
+			re enumerate the device again. Set the block size
+			and enable the fucntion 1 for in preparation for
+			downloading the code
+		*/
+			sd->num_funcs = 2;
+			sd->sd_blockmode = TRUE;
+			sd->use_client_ints = TRUE;
+			sd->client_block_size[0] = 64;
+
+			if (sd->func[1]) {
+				/* Claim host controller */
+				sdio_claim_host(sd->func[1]);
+
+				sd->client_block_size[1] = 64;
+				ret = sdio_set_block_size(sd->func[1], 64);
+				if (ret) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F1 "
+						"blocksize(%d)\n", ret));
+				}
+
+				/* Release host controller F1 */
+				sdio_release_host(sd->func[1]);
+			}
+
+			if (sd->func[2]) {
+				/* Claim host controller F2 */
+				sdio_claim_host(sd->func[2]);
+
+				sd->client_block_size[2] = sd_f2_blocksize;
+				ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+				if (ret) {
+					sd_err(("bcmsdh_sdmmc: Failed to set F2 "
+						"blocksize to %d(%d)\n", sd_f2_blocksize, ret));
+				}
+
+				/* Release host controller F2 */
+				sdio_release_host(sd->func[2]);
+			}
+
+			sdioh_sdmmc_card_enablefuncs(sd);
+		} else {
+#if !defined(OOB_INTR_ONLY)
+			sdio_claim_host(sd->func[0]);
+			if (sd->func[2])
+				sdio_claim_irq(sd->func[2], IRQHandlerF2);
+			if (sd->func[1])
+				sdio_claim_irq(sd->func[1], IRQHandler);
+			sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+			sdioh_enable_func_intr(sd);
+#endif
+			bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
+#endif /* !defined(OOB_INTR_ONLY) */
+		}
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+
+	return (0);
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	/* MSM7201A Android sdio stack has bug with interrupt
+		So internaly within SDIO stack they are polling
+		which cause issue when device is turned off. So
+		unregister interrupt with SDIO stack to stop the
+		polling
+	*/
+	if (sd->func[0]) {
+#if !defined(OOB_INTR_ONLY)
+		sdio_claim_host(sd->func[0]);
+		if (sd->func[1])
+			sdio_release_irq(sd->func[1]);
+		if (sd->func[2])
+			sdio_release_irq(sd->func[2]);
+		sdio_release_host(sd->func[0]);
+#else /* defined(OOB_INTR_ONLY) */
+#if defined(HW_OOB)
+		sdioh_disable_func_intr(sd);
+#endif
+		bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
+#endif /* !defined(OOB_INTR_ONLY) */
+	}
+	else
+		sd_err(("%s Failed\n", __FUNCTION__));
+	return (0);
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+	return (1);
+}
+
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+	return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+	return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+	return SDIOH_API_RC_FAIL;
+}
diff --git a/drivers/staging/edison-bcm43340/bcmsdh_sdmmc_linux.c b/drivers/staging/edison-bcm43340/bcmsdh_sdmmc_linux.c
new file mode 100644
index 0000000..746a1a3
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/bcmsdh_sdmmc_linux.c
@@ -0,0 +1,484 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc_linux.c 434724 2013-11-07 05:38:43Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <sdio.h>	/* SDIO Device and Protocol Specs */
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* to get msglevel bit values */
+
+#include <linux/sched.h>	/* request_irq() */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdhci.h>
+#include <dhd_linux.h>
+#include <bcmsdh_sdmmc.h>
+#include <dhd_dbg.h>
+
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM		0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
+
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT	0x0000
+
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB	0x0492	/* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325	0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329	0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319	0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4330)
+#define SDIO_DEVICE_ID_BROADCOM_4330	0x4330
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4334)
+#define SDIO_DEVICE_ID_BROADCOM_4334    0x4334
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43340)
+#define SDIO_DEVICE_ID_BROADCOM_43340    0xa94d
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43340) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4335)
+#define SDIO_DEVICE_ID_BROADCOM_4335    0x4335
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4335) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4324)
+#define SDIO_DEVICE_ID_BROADCOM_4324    0x4324
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43239)
+#define SDIO_DEVICE_ID_BROADCOM_43239    43239
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4354)
+#define SDIO_DEVICE_ID_BROADCOM_4354    0x4354
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4354) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43430)
+#define SDIO_DEVICE_ID_BROADCOM_43430    0xa9a6
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43430) */
+
+
+extern void wl_cfg80211_set_parent_dev(void *dev);
+extern void* wl_cfg80211_get_dhdp(void);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+	uint bus_num, uint slot_num);
+extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh);
+void dhd_os_shutdown(void *dhdp);
+
+int sdio_function_init(void);
+void sdio_function_cleanup(void);
+
+#define DESCRIPTION "bcmsdh_sdmmc Driver"
+#define AUTHOR "Broadcom Corporation"
+
+/* module param defaults */
+static int clockoverride = 0;
+
+static struct sdio_func * gfunc = NULL;
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
+
+/* Maximum number of bcmsdh_sdmmc devices supported by driver */
+#define BCMSDH_SDMMC_MAX_DEVICES 1
+
+extern void bcmsdh_set_irq(int flag, bcmsdh_info_t *bcmsdh);
+extern volatile bool dhd_mmc_suspend;
+
+
+extern int sdhci_pci_request_regulators(void);
+
+int bcmsdh_sdmmc_set_power(int on)
+{
+	static struct sdio_func *sdio_func;
+	struct sdhci_host *host;
+
+	if (gfunc) {
+		sdio_func = gfunc;
+
+		host = (struct sdhci_host *)sdio_func->card->host;
+
+		if (on)
+			mmc_power_restore_host(sdio_func->card->host);
+		else
+			mmc_power_save_host(sdio_func->card->host);
+	}
+
+	return 0;
+}
+
+void bcmsdh_sdmmc_get_regulator(void)
+{
+	sdhci_pci_request_regulators();
+}
+
+static int sdioh_probe(struct sdio_func *func)
+{
+	int host_idx = func->card->host->index;
+	uint32 rca = func->card->rca;
+	wifi_adapter_info_t *adapter;
+	osl_t *osh = NULL;
+	sdioh_info_t *sdioh = NULL;
+
+	sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca));
+	adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca);
+	if (adapter  != NULL)
+		sd_err(("found adapter info '%s'\n", adapter->name));
+	else
+		sd_err(("can't find adapter info for this chip\n"));
+
+#ifdef WL_CFG80211
+	wl_cfg80211_set_parent_dev(&func->dev);
+#endif
+
+	 /* allocate SDIO Host Controller state info */
+	 osh = osl_attach(&func->dev, SDIO_BUS, TRUE);
+	 if (osh == NULL) {
+		 sd_err(("%s: osl_attach failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+	 osl_static_mem_init(osh, adapter);
+	 sdioh = sdioh_attach(osh, func);
+	 if (sdioh == NULL) {
+		 sd_err(("%s: sdioh_attach failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+	 sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca);
+	 if (sdioh->bcmsdh == NULL) {
+		 sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__));
+		 goto fail;
+	 }
+
+	sdio_set_drvdata(func, sdioh);
+	return 0;
+
+fail:
+	if (sdioh != NULL)
+		sdioh_detach(osh, sdioh);
+	if (osh != NULL)
+		osl_detach(osh);
+	return -ENOMEM;
+}
+
+static void sdioh_remove(struct sdio_func *func)
+{
+	sdioh_info_t *sdioh;
+	osl_t *osh;
+
+	sdioh = sdio_get_drvdata(func);
+	if (sdioh == NULL) {
+		sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
+		return;
+	}
+
+	osh = sdioh->osh;
+	bcmsdh_remove(sdioh->bcmsdh);
+	sdioh_detach(osh, sdioh);
+	osl_detach(osh);
+}
+static int bcmsdh_sdmmc_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	int ret = 0;
+
+	if (func == NULL)
+		return -EINVAL;
+
+	sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+	
+//	if (func->num == 1)
+//		func->card->host->bus_resume_flags = 0;
+
+	/* 4318 doesn't have function 2 */
+	if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) {
+		gfunc = func;	
+		ret = sdioh_probe(func);
+	}
+	return ret;
+}
+
+static void bcmsdh_sdmmc_remove(struct sdio_func *func)
+{
+	if (func == NULL) {
+		sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__));
+		return;
+	}
+
+	sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+	sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+	sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+	sd_info(("sdio_device: 0x%04x\n", func->device));
+	sd_info(("Function#: 0x%04x\n", func->num));
+
+//	if (func->num == 1)
+//		func->card->host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+
+
+	if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+		sdioh_remove(func);
+
+}
+
+static void bcmsdh_sdmmc_shutdown(struct device *dev) {
+	struct sdio_func *func = dev_to_sdio_func(dev);
+	sdioh_info_t *sdioh;
+
+	sd_trace(("%s Enter\n", __FUNCTION__));
+
+	sdioh = sdio_get_drvdata(func);
+
+	if (func && func->num == 2) {
+		if (sdioh)
+			bcmsdh_set_irq(0, sdioh->bcmsdh);
+		else
+			sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
+		dhd_os_shutdown(wl_cfg80211_get_dhdp());
+	}
+}
+
+/* devices we support, null terminated */
+static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
+#ifdef USE_4325
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+#endif
+#ifdef USE_4329
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+#endif
+#ifdef USE_4319
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+#endif
+#ifdef USE_4330
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330) },
+#endif
+#ifdef USE_4334
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334) },
+#endif
+#ifdef USE_4334X
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43340) },
+#endif
+#ifdef USE_4324
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) },
+#endif
+#ifdef USE_4335
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335) },
+#endif
+#ifdef USE_4354
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354) },
+#endif
+#ifdef USE_43239
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) },
+#endif
+#ifdef USE_43430
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43430) },
+#endif
+	{ /* end: all zeroes */				},
+};
+
+MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+static int bcmsdh_sdmmc_suspend(struct device *pdev)
+{
+	int err;
+	sdioh_info_t *sdioh;
+	struct sdio_func *func = dev_to_sdio_func(pdev);
+	mmc_pm_flag_t sdio_flags;
+
+	sd_err(("%s Enter\n", __FUNCTION__));
+	if (func->num != 2)
+		return 0;
+
+	sdioh = sdio_get_drvdata(func);
+	err = bcmsdh_suspend(sdioh->bcmsdh);
+	if (err)
+		return err;
+
+	sdio_flags = sdio_get_host_pm_caps(func);
+	if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+		sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
+		return  -EINVAL;
+	}
+
+	/* keep power while host suspended */
+	err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+	if (err) {
+		sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
+		return err;
+	}
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(sdioh->bcmsdh, FALSE);
+#endif 
+	dhd_mmc_suspend = TRUE;
+	smp_mb();
+
+	return 0;
+}
+
+static int bcmsdh_sdmmc_resume(struct device *pdev)
+{
+	sdioh_info_t *sdioh;
+	struct sdio_func *func = dev_to_sdio_func(pdev);
+
+	sd_err(("%s Enter\n", __FUNCTION__));
+	if (func->num != 2)
+		return 0;
+
+	sdioh = sdio_get_drvdata(func);
+	dhd_mmc_suspend = FALSE;
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_resume(sdioh->bcmsdh);
+#endif 
+	if (func->card && func->card->host)
+		func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER;
+
+	smp_mb();
+	return 0;
+}
+
+static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = {
+	.suspend	= bcmsdh_sdmmc_suspend,
+	.resume		= bcmsdh_sdmmc_resume,
+};
+#endif  /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+
+#if defined(BCMLXSDMMC)
+static struct semaphore *notify_semaphore = NULL;
+
+static int dummy_probe(struct sdio_func *func,
+                              const struct sdio_device_id *id)
+{
+	if (func && (func->num != 2)) {
+		return 0;
+	}
+
+	if (notify_semaphore)
+		up(notify_semaphore);
+	return 0;
+}
+
+static void dummy_remove(struct sdio_func *func)
+{
+}
+
+static struct sdio_driver dummy_sdmmc_driver = {
+	.probe		= dummy_probe,
+	.remove		= dummy_remove,
+	.name		= "dummy_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+	};
+
+int sdio_func_reg_notify(void* semaphore)
+{
+	notify_semaphore = semaphore;
+	return sdio_register_driver(&dummy_sdmmc_driver);
+}
+
+void sdio_func_unreg_notify(void)
+{
+	OSL_SLEEP(15);
+	sdio_unregister_driver(&dummy_sdmmc_driver);
+}
+
+#endif /* defined(BCMLXSDMMC) */
+
+static struct sdio_driver bcmsdh_sdmmc_driver = {
+	.probe		= bcmsdh_sdmmc_probe,
+	.remove		= bcmsdh_sdmmc_remove,
+	.name		= "bcmsdh_sdmmc",
+	.id_table	= bcmsdh_sdmmc_ids,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
+	.drv = {
+	.pm	= &bcmsdh_sdmmc_pm_ops,
+	.shutdown = bcmsdh_sdmmc_shutdown,
+	},
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */
+};
+
+struct sdos_info {
+	sdioh_info_t *sd;
+	spinlock_t lock;
+};
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+	if (!sd)
+		return BCME_BADARG;
+
+	sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#ifdef BCMSDH_MODULE
+static int __init
+bcmsdh_module_init(void)
+{
+	int error = 0;
+	error = sdio_function_init();
+	return error;
+}
+
+static void __exit
+bcmsdh_module_cleanup(void)
+{
+	sdio_function_cleanup();
+}
+
+module_init(bcmsdh_module_init);
+module_exit(bcmsdh_module_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DESCRIPTION);
+MODULE_AUTHOR(AUTHOR);
+
+#endif /* BCMSDH_MODULE */
+/*
+ * module init
+*/
+int bcmsdh_register_client_driver(void)
+{
+	return sdio_register_driver(&bcmsdh_sdmmc_driver);
+}
+
+/*
+ * module cleanup
+*/
+void bcmsdh_unregister_client_driver(void)
+{
+	sdio_unregister_driver(&bcmsdh_sdmmc_driver);
+}
diff --git a/drivers/staging/edison-bcm43340/bcmutils.c b/drivers/staging/edison-bcm43340/bcmutils.c
new file mode 100644
index 0000000..f31ea68
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/bcmutils.c
@@ -0,0 +1,3082 @@
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmutils.c 461404 2014-03-12 01:59:36Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#ifdef BCMDRIVER
+
+#include <osl.h>
+#include <bcmutils.h>
+#if defined(BCMNVRAM)
+#include <siutils.h>
+#include <bcmnvram.h>
+#endif
+
+#else /* !BCMDRIVER */
+
+#include <stdio.h>
+#include <string.h>
+#include <bcmutils.h>
+
+#if defined(BCMEXTSUP)
+#include <bcm_osl.h>
+#endif
+
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/bcmip.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+
+void *_bcmutils_dummy_fn = NULL;
+
+
+
+#ifdef BCMDRIVER
+
+
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+	if (len < 0)
+		len = 4096;	/* "infinite" */
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(PKTDATA(osh, p) + offset, buf, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint
+pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+	uint n, ret = 0;
+
+
+	/* skip 'offset' bytes */
+	for (; p && offset; p = PKTNEXT(osh, p)) {
+		if (offset < (uint)PKTLEN(osh, p))
+			break;
+		offset -= PKTLEN(osh, p);
+	}
+
+	if (!p)
+		return 0;
+
+	/* copy the data */
+	for (; p && len; p = PKTNEXT(osh, p)) {
+		n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+		bcopy(buf, PKTDATA(osh, p) + offset, n);
+		buf += n;
+		len -= n;
+		ret += n;
+		offset = 0;
+	}
+
+	return ret;
+}
+
+
+
+/* return total length of buffer chain */
+uint BCMFASTPATH
+pkttotlen(osl_t *osh, void *p)
+{
+	uint total;
+	int len;
+
+	total = 0;
+	for (; p; p = PKTNEXT(osh, p)) {
+		len = PKTLEN(osh, p);
+		total += len;
+#ifdef BCMLFRAG
+		if (BCMLFRAG_ENAB()) {
+			if (PKTISFRAG(osh, p)) {
+				total += PKTFRAGTOTLEN(osh, p);
+			}
+		}
+#endif
+	}
+
+	return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+	for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+		;
+
+	return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt(osl_t *osh, void *p)
+{
+	uint cnt;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+		cnt++;
+#ifdef BCMLFRAG
+		if (BCMLFRAG_ENAB()) {
+			if (PKTISFRAG(osh, p)) {
+				cnt += PKTFRAGTOTNUM(osh, p);
+			}
+		}
+#endif
+	}
+
+	return cnt;
+}
+
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt_war(osl_t *osh, void *p)
+{
+	uint cnt;
+	uint8 *pktdata;
+	uint len, remain, align64;
+
+	for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+		cnt++;
+		len = PKTLEN(osh, p);
+		if (len > 128) {
+			pktdata = (uint8 *)PKTDATA(osh, p);	/* starting address of data */
+			/* Check for page boundary straddle (2048B) */
+			if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff))
+				cnt++;
+
+			align64 = (uint)((uintptr)pktdata & 0x3f);	/* aligned to 64B */
+			align64 = (64 - align64) & 0x3f;
+			len -= align64;		/* bytes from aligned 64B to end */
+			/* if aligned to 128B, check for MOD 128 between 1 to 4B */
+			remain = len % 128;
+			if (remain > 0 && remain <= 4)
+				cnt++;		/* add extra seg */
+		}
+	}
+
+	return cnt;
+}
+
+uint8 * BCMFASTPATH
+pktdataoffset(osl_t *osh, void *p,  uint offset)
+{
+	uint total = pkttotlen(osh, p);
+	uint pkt_off = 0, len = 0;
+	uint8 *pdata = (uint8 *) PKTDATA(osh, p);
+
+	if (offset > total)
+		return NULL;
+
+	for (; p; p = PKTNEXT(osh, p)) {
+		pdata = (uint8 *) PKTDATA(osh, p);
+		pkt_off = offset - len;
+		len += PKTLEN(osh, p);
+		if (len > offset)
+			break;
+	}
+	return (uint8*) (pdata+pkt_off);
+}
+
+
+/* given a offset in pdata, find the pkt seg hdr */
+void *
+pktoffset(osl_t *osh, void *p,  uint offset)
+{
+	uint total = pkttotlen(osh, p);
+	uint len = 0;
+
+	if (offset > total)
+		return NULL;
+
+	for (; p; p = PKTNEXT(osh, p)) {
+		len += PKTLEN(osh, p);
+		if (len > offset)
+			break;
+	}
+	return p;
+}
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void * BCMFASTPATH
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head)
+		PKTSETLINK(q->tail, p);
+	else
+		q->head = p;
+
+	q->tail = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+	struct pktq_prec *q;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	if (q->head == NULL)
+		q->tail = p;
+
+	PKTSETLINK(p, q->head);
+	q->head = p;
+	q->len++;
+
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if (prev_p == NULL)
+		return NULL;
+
+	if ((p = PKTLINK(prev_p)) == NULL)
+		return NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(prev_p, PKTLINK(p));
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			break;
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+	if (p == NULL)
+		return NULL;
+
+	if (prev == NULL) {
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		PKTSETLINK(prev, PKTLINK(p));
+		if (q->tail == p) {
+			q->tail = prev;
+		}
+	}
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	q = &pq->q[prec];
+	p = q->head;
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			bool head = (p == q->head);
+			if (head)
+				q->head = PKTLINK(p);
+			else
+				PKTSETLINK(prev, PKTLINK(p));
+			PKTSETLINK(p, NULL);
+			PKTFREE(osh, p, dir);
+			q->len--;
+			pq->len--;
+			p = (head ? q->head : PKTLINK(prev));
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+
+	if (q->head == NULL) {
+		ASSERT(q->len == 0);
+		q->tail = NULL;
+	}
+}
+
+bool BCMFASTPATH
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+	struct pktq_prec *q;
+	void *p;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	if (!pktbuf)
+		return FALSE;
+
+	q = &pq->q[prec];
+
+	if (q->head == pktbuf) {
+		if ((q->head = PKTLINK(pktbuf)) == NULL)
+			q->tail = NULL;
+	} else {
+		for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+			;
+		if (p == NULL)
+			return FALSE;
+
+		PKTSETLINK(p, PKTLINK(pktbuf));
+		if (q->tail == pktbuf)
+			q->tail = p;
+	}
+
+	q->len--;
+	pq->len--;
+	PKTSETLINK(pktbuf, NULL);
+	return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+	int prec;
+
+	ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+	/* pq is variable size; only zero out what's requested */
+	bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+	pq->num_prec = (uint16)num_prec;
+
+	pq->max = (uint16)max_len;
+
+	for (prec = 0; prec < num_prec; prec++)
+		pq->q[prec].max = pq->max;
+}
+
+void
+pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
+{
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	if (prec < pq->num_prec)
+		pq->q[prec].max = (uint16)max_len;
+}
+
+void * BCMFASTPATH
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void * BCMFASTPATH
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p, *prev;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+		prev = p;
+
+	if (prev)
+		PKTSETLINK(prev, NULL);
+	else
+		q->head = NULL;
+
+	q->tail = prev;
+	q->len--;
+
+	pq->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	for (prec = 0; prec < pq->hi_prec; prec++)
+		if (pq->q[prec].head)
+			break;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+{
+	int prec;
+
+	/* Optimize flush, if pktq len = 0, just return.
+	 * pktq len of 0 means pktq's prec q's are all empty.
+	 */
+	if (pq->len == 0) {
+		return;
+	}
+
+	for (prec = 0; prec < pq->num_prec; prec++)
+		pktq_pflush(osh, pq, prec, dir, fn, arg);
+	if (fn == NULL)
+		ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+	int prec, len;
+
+	len = 0;
+
+	for (prec = 0; prec <= pq->hi_prec; prec++)
+		if (prec_bmp & (1 << prec))
+			len += pq->q[prec].len;
+
+	return len;
+}
+
+/* Priority peek from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+	{
+		return NULL;
+	}
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+		if (prec-- == 0)
+			return NULL;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	return p;
+}
+/* Priority dequeue from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+	struct pktq_prec *q;
+	void *p;
+	int prec;
+
+	if (pq->len == 0)
+		return NULL;
+
+	while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+		pq->hi_prec--;
+
+	while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
+		if (prec-- == 0)
+			return NULL;
+
+	q = &pq->q[prec];
+
+	if ((p = q->head) == NULL)
+		return NULL;
+
+	if ((q->head = PKTLINK(p)) == NULL)
+		q->tail = NULL;
+
+	q->len--;
+
+	if (prec_out)
+		*prec_out = prec;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+#endif /* BCMDRIVER */
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+const unsigned char bcm_ctype[] = {
+
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 0-7 */
+	_BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+	_BCM_C,	/* 8-15 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 16-23 */
+	_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,			/* 24-31 */
+	_BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,		/* 32-39 */
+	_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 40-47 */
+	_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,			/* 48-55 */
+	_BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 56-63 */
+	_BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+	_BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 72-79 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,			/* 80-87 */
+	_BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,			/* 88-95 */
+	_BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+	_BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+	_BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 128-143 */
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,		/* 144-159 */
+	_BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 160-175 */
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+	_BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,	/* 176-191 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,	/* 192-207 */
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+	_BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L,	/* 208-223 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,	/* 224-239 */
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+	_BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(const char *cp, char **endp, uint base)
+{
+	ulong result, last_result = 0, value;
+	bool minus;
+
+	minus = FALSE;
+
+	while (bcm_isspace(*cp))
+		cp++;
+
+	if (cp[0] == '+')
+		cp++;
+	else if (cp[0] == '-') {
+		minus = TRUE;
+		cp++;
+	}
+
+	if (base == 0) {
+		if (cp[0] == '0') {
+			if ((cp[1] == 'x') || (cp[1] == 'X')) {
+				base = 16;
+				cp = &cp[2];
+			} else {
+				base = 8;
+				cp = &cp[1];
+			}
+		} else
+			base = 10;
+	} else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+		cp = &cp[2];
+	}
+
+	result = 0;
+
+	while (bcm_isxdigit(*cp) &&
+	       (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+		result = result*base + value;
+		/* Detected overflow */
+		if (result < last_result && !minus)
+			return (ulong)-1;
+		last_result = result;
+		cp++;
+	}
+
+	if (minus)
+		result = (ulong)(-(long)result);
+
+	if (endp)
+		*endp = DISCARD_QUAL(cp, char);
+
+	return (result);
+}
+
+int
+bcm_atoi(const char *s)
+{
+	return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char *
+bcmstrstr(const char *haystack, const char *needle)
+{
+	int len, nlen;
+	int i;
+
+	if ((haystack == NULL) || (needle == NULL))
+		return DISCARD_QUAL(haystack, char);
+
+	nlen = strlen(needle);
+	len = strlen(haystack) - nlen + 1;
+
+	for (i = 0; i < len; i++)
+		if (memcmp(needle, &haystack[i], nlen) == 0)
+			return DISCARD_QUAL(&haystack[i], char);
+	return (NULL);
+}
+
+char *
+bcmstrcat(char *dest, const char *src)
+{
+	char *p;
+
+	p = dest + strlen(dest);
+
+	while ((*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+char *
+bcmstrncat(char *dest, const char *src, uint size)
+{
+	char *endp;
+	char *p;
+
+	p = dest + strlen(dest);
+	endp = p + size;
+
+	while (p != endp && (*p++ = *src++) != '\0')
+		;
+
+	return (dest);
+}
+
+
+/****************************************************************************
+* Function:   bcmstrtok
+*
+* Purpose:
+*  Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+*  but allows strToken() to be used by different strings or callers at the same
+*  time. Each call modifies '*string' by substituting a NULL character for the
+*  first delimiter that is encountered, and updates 'string' to point to the char
+*  after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+*  string      (mod) Ptr to string ptr, updated by token.
+*  delimiters  (in)  Set of delimiter characters.
+*  tokdelim    (out) Character that delimits the returned token. (May
+*                    be set to NULL if token delimiter is not required).
+*
+* Returns:  Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+	unsigned char *str;
+	unsigned long map[8];
+	int count;
+	char *nextoken;
+
+	if (tokdelim != NULL) {
+		/* Prime the token delimiter */
+		*tokdelim = '\0';
+	}
+
+	/* Clear control map */
+	for (count = 0; count < 8; count++) {
+		map[count] = 0;
+	}
+
+	/* Set bits in delimiter table */
+	do {
+		map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+	}
+	while (*delimiters++);
+
+	str = (unsigned char*)*string;
+
+	/* Find beginning of token (skip over leading delimiters). Note that
+	 * there is no token iff this loop sets str to point to the terminal
+	 * null (*str == '\0')
+	 */
+	while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+		str++;
+	}
+
+	nextoken = (char*)str;
+
+	/* Find the end of the token. If it is not the end of the string,
+	 * put a null there.
+	 */
+	for (; *str; str++) {
+		if (map[*str >> 5] & (1 << (*str & 31))) {
+			if (tokdelim != NULL) {
+				*tokdelim = *str;
+			}
+
+			*str++ = '\0';
+			break;
+		}
+	}
+
+	*string = (char*)str;
+
+	/* Determine if a token has been found. */
+	if (nextoken == (char *) str) {
+		return NULL;
+	}
+	else {
+		return nextoken;
+	}
+}
+
+
+#define xToLower(C) \
+	((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+
+/****************************************************************************
+* Function:   bcmstricmp
+*
+* Purpose:    Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+*             s2 (in) Second string to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+	char dc, sc;
+
+	while (*s2 && *s1) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+	}
+
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+
+/****************************************************************************
+* Function:   bcmstrnicmp
+*
+* Purpose:    Compare to strings case insensitively, upto a max of 'cnt'
+*             characters.
+*
+* Parameters: s1  (in) First string to compare.
+*             s2  (in) Second string to compare.
+*             cnt (in) Max characters to compare.
+*
+* Returns:    Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+*             t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
+{
+	char dc, sc;
+
+	while (*s2 && *s1 && cnt) {
+		dc = xToLower(*s1);
+		sc = xToLower(*s2);
+		if (dc < sc) return -1;
+		if (dc > sc) return 1;
+		s1++;
+		s2++;
+		cnt--;
+	}
+
+	if (!cnt) return 0;
+	if (*s1 && !*s2) return 1;
+	if (!*s1 && *s2) return -1;
+	return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(const char *p, struct ether_addr *ea)
+{
+	int i = 0;
+	char *ep;
+
+	for (;;) {
+		ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+		p = ep;
+		if (!*p++ || i == 6)
+			break;
+	}
+
+	return (i == 6);
+}
+
+int
+bcm_atoipv4(const char *p, struct ipv4_addr *ip)
+{
+
+	int i = 0;
+	char *c;
+	for (;;) {
+		ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0);
+		if (*c++ != '.' || i == IPV4_ADDR_LEN)
+			break;
+		p = c;
+	}
+	return (i == IPV4_ADDR_LEN);
+}
+#endif	/* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+	ulong copyct = 1;
+	ushort i;
+
+	if (abuflen == 0)
+		return 0;
+
+	/* wbuflen is in bytes */
+	wbuflen /= sizeof(ushort);
+
+	for (i = 0; i < wbuflen; ++i) {
+		if (--abuflen == 0)
+			break;
+		*abuf++ = (char) *wbuf++;
+		++copyct;
+	}
+	*abuf = '\0';
+
+	return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
+
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+	static const char hex[] =
+	  {
+		  '0', '1', '2', '3', '4', '5', '6', '7',
+		  '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+	  };
+	const uint8 *octet = ea->octet;
+	char *p = buf;
+	int i;
+
+	for (i = 0; i < 6; i++, octet++) {
+		*p++ = hex[(*octet >> 4) & 0xf];
+		*p++ = hex[*octet & 0xf];
+		*p++ = ':';
+	}
+
+	*(p-1) = '\0';
+
+	return (buf);
+}
+
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+	snprintf(buf, 16, "%d.%d.%d.%d",
+	         ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+	return (buf);
+}
+
+char *
+bcm_ipv6_ntoa(void *ipv6, char *buf)
+{
+	/* Implementing RFC 5952 Sections 4 + 5 */
+	/* Not thoroughly tested */
+	uint16 tmp[8];
+	uint16 *a = &tmp[0];
+	char *p = buf;
+	int i, i_max = -1, cnt = 0, cnt_max = 1;
+	uint8 *a4 = NULL;
+	memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN);
+
+	for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+		if (a[i]) {
+			if (cnt > cnt_max) {
+				cnt_max = cnt;
+				i_max = i - cnt;
+			}
+			cnt = 0;
+		} else
+			cnt++;
+	}
+	if (cnt > cnt_max) {
+		cnt_max = cnt;
+		i_max = i - cnt;
+	}
+	if (i_max == 0 &&
+		/* IPv4-translated: ::ffff:0:a.b.c.d */
+		((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) ||
+		/* IPv4-mapped: ::ffff:a.b.c.d */
+		(cnt_max == 5 && a[5] == 0xffff)))
+		a4 = (uint8*) (a + 6);
+
+	for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+		if ((uint8*) (a + i) == a4) {
+			snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]);
+			break;
+		} else if (i == i_max) {
+			*p++ = ':';
+			i += cnt_max - 1;
+			p[0] = ':';
+			p[1] = '\0';
+		} else {
+			if (i)
+				*p++ = ':';
+			p += snprintf(p, 8, "%x", ntoh16(a[i]));
+		}
+	}
+
+	return buf;
+}
+#ifdef BCMDRIVER
+
+void
+bcm_mdelay(uint ms)
+{
+	uint i;
+
+	for (i = 0; i < ms; i++) {
+		OSL_DELAY(1000);
+	}
+}
+
+
+
+
+
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+	void *p;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	for (p = p0; p; p = PKTNEXT(osh, p))
+		prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
+}
+#endif	
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint BCMFASTPATH
+pktsetprio(void *pkt, bool update_vtag)
+{
+	struct ether_header *eh;
+	struct ethervlan_header *evh;
+	uint8 *pktdata;
+	int priority = 0;
+	int rc = 0;
+
+	pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+	ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+	eh = (struct ether_header *) pktdata;
+
+	if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
+		uint16 vlan_tag;
+		int vlan_prio, dscp_prio = 0;
+
+		evh = (struct ethervlan_header *)eh;
+
+		vlan_tag = ntoh16(evh->vlan_tag);
+		vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+		if (evh->ether_type == hton16(ETHER_TYPE_IP)) {
+			uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+			uint8 tos_tc = IP_TOS46(ip_body);
+			dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+		}
+
+		/* DSCP priority gets precedence over 802.1P (vlan tag) */
+		if (dscp_prio != 0) {
+			priority = dscp_prio;
+			rc |= PKTPRIO_VDSCP;
+		} else {
+			priority = vlan_prio;
+			rc |= PKTPRIO_VLAN;
+		}
+		/*
+		 * If the DSCP priority is not the same as the VLAN priority,
+		 * then overwrite the priority field in the vlan tag, with the
+		 * DSCP priority value. This is required for Linux APs because
+		 * the VLAN driver on Linux, overwrites the skb->priority field
+		 * with the priority value in the vlan tag
+		 */
+		if (update_vtag && (priority != vlan_prio)) {
+			vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+			vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+			evh->vlan_tag = hton16(vlan_tag);
+			rc |= PKTPRIO_UPD;
+		}
+	} else if (eh->ether_type == hton16(ETHER_TYPE_IP)) {
+		uint8 *ip_body = pktdata + sizeof(struct ether_header);
+		uint8 tos_tc = IP_TOS46(ip_body);
+		uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
+		switch (dscp) {
+		case DSCP_EF:
+			priority = PRIO_8021D_VO;
+			break;
+		case DSCP_AF31:
+		case DSCP_AF32:
+		case DSCP_AF33:
+			priority = PRIO_8021D_CL;
+			break;
+		case DSCP_AF21:
+		case DSCP_AF22:
+		case DSCP_AF23:
+		case DSCP_AF11:
+		case DSCP_AF12:
+		case DSCP_AF13:
+			priority = PRIO_8021D_EE;
+			break;
+		default:
+			priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+			break;
+		}
+
+		rc |= PKTPRIO_DSCP;
+	}
+
+	ASSERT(priority >= 0 && priority <= MAXPRIO);
+	PKTSETPRIO(pkt, priority);
+	return (rc | priority);
+}
+
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings  */
+const char *
+bcmerrorstr(int bcmerror)
+{
+	/* check if someone added a bcmerror code but forgot to add errorstring */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+	if (bcmerror > 0 || bcmerror < BCME_LAST) {
+		snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+		return bcm_undeferrstr;
+	}
+
+	ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+	return bcmerrorstrtable[-bcmerror];
+}
+
+
+
+/* iovar table lookup */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+	const bcm_iovar_t *vi;
+	const char *lookup_name;
+
+	/* skip any ':' delimited option prefixes */
+	lookup_name = strrchr(name, ':');
+	if (lookup_name != NULL)
+		lookup_name++;
+	else
+		lookup_name = name;
+
+	ASSERT(table != NULL);
+
+	for (vi = table; vi->name; vi++) {
+		if (!strcmp(vi->name, lookup_name))
+			return vi;
+	}
+	/* ran to end of table */
+
+	return NULL; /* var name not found */
+}
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+
+	/* length check on io buf */
+	switch (vi->type) {
+	case IOVT_BOOL:
+	case IOVT_INT8:
+	case IOVT_INT16:
+	case IOVT_INT32:
+	case IOVT_UINT8:
+	case IOVT_UINT16:
+	case IOVT_UINT32:
+		/* all integers are int32 sized args at the ioctl interface */
+		if (len < (int)sizeof(int)) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_BUFFER:
+		/* buffer must meet minimum length requirement */
+		if (len < vi->minlen) {
+			bcmerror = BCME_BUFTOOSHORT;
+		}
+		break;
+
+	case IOVT_VOID:
+		if (!set) {
+			/* Cannot return nil... */
+			bcmerror = BCME_UNSUPPORTED;
+		} else if (len) {
+			/* Set is an action w/o parameters */
+			bcmerror = BCME_BUFTOOLONG;
+		}
+		break;
+
+	default:
+		/* unknown type for length check in iovar info */
+		ASSERT(0);
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#endif	/* BCMDRIVER */
+
+
+uint8 *
+bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst)
+{
+	uint8 *new_dst = dst;
+	bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst;
+
+	/* dst buffer should always be valid */
+	ASSERT(dst);
+
+	/* data len must be within valid range */
+	ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE));
+
+	/* source data buffer pointer should be valid, unless datalen is 0
+	 * meaning no data with this TLV
+	 */
+	ASSERT((data != NULL) || (datalen == 0));
+
+	/* only do work if the inputs are valid
+	 * - must have a dst to write to AND
+	 * - datalen must be within range AND
+	 * - the source data pointer must be non-NULL if datalen is non-zero
+	 * (this last condition detects datalen > 0 with a NULL data pointer)
+	 */
+	if ((dst != NULL) &&
+	    ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) &&
+	    ((data != NULL) || (datalen == 0))) {
+
+	        /* write type, len fields */
+		dst_tlv->id = (uint8)type;
+	        dst_tlv->len = (uint8)datalen;
+
+		/* if data is present, copy to the output buffer and update
+		 * pointer to output buffer
+		 */
+		if (datalen > 0) {
+
+			memcpy(dst_tlv->data, data, datalen);
+		}
+
+		/* update the output destination poitner to point past
+		 * the TLV written
+		 */
+		new_dst = dst + BCM_TLV_HDR_SIZE + datalen;
+	}
+
+	return (new_dst);
+}
+
+uint8 *
+bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen)
+{
+	uint8 *new_dst = dst;
+
+	if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) {
+
+		/* if len + tlv hdr len is more than destlen, don't do anything
+		 * just return the buffer untouched
+		 */
+		if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) {
+
+			new_dst = bcm_write_tlv(type, data, datalen, dst);
+		}
+	}
+
+	return (new_dst);
+}
+
+uint8 *
+bcm_copy_tlv(const void *src, uint8 *dst)
+{
+	uint8 *new_dst = dst;
+	const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+	uint totlen;
+
+	ASSERT(dst && src);
+	if (dst && src) {
+
+		totlen = BCM_TLV_HDR_SIZE + src_tlv->len;
+		memcpy(dst, src_tlv, totlen);
+		new_dst = dst + totlen;
+	}
+
+	return (new_dst);
+}
+
+
+uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen)
+{
+	uint8 *new_dst = dst;
+	const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+
+	ASSERT(src);
+	if (src) {
+		if (bcm_valid_tlv(src_tlv, dst_maxlen)) {
+			new_dst = bcm_copy_tlv(src, dst);
+		}
+	}
+
+	return (new_dst);
+}
+
+
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ *       x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+    0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+    0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+    0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+    0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+    0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+    0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+    0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+    0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+    0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+    0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+    0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+    0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+    0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+    0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+    0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+    0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+    0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+    0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+    0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+    0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+    0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+    0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+    0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+    0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+    0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+    0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+    0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+    0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+    0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+    0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+    0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+    0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+	(c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8
+hndcrc8(
+	uint8 *pdata,	/* pointer to array of data to process */
+	uint  nbytes,	/* number of input data bytes to process */
+	uint8 crc	/* either CRC8_INIT_VALUE or previous return value */
+)
+{
+	/* hard code the crc loop instead of using CRC_INNER_LOOP macro
+	 * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+	 */
+	while (nbytes-- > 0)
+		crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+	return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ *       x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data.  When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream.  When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ *   Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ *     ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ *     ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+    0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+    0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+    0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+    0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+    0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+    0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+    0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+    0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+    0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+    0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+    0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+    0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+    0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+    0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+    0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+    0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+    0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+    0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+    0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+    0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+    0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+    0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+    0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+    0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+    0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+    0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+    0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+    0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+    0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+    0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+    0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+    0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16
+hndcrc16(
+    uint8 *pdata,  /* pointer to array of data to process */
+    uint nbytes, /* number of input data bytes to process */
+    uint16 crc     /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+	while (nbytes-- > 0)
+		CRC_INNER_LOOP(16, crc, *pdata++);
+	return crc;
+}
+
+static const uint32 crc32_table[256] = {
+    0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+    0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+    0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+    0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+    0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+    0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+    0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+    0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+    0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+    0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+    0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+    0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+    0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+    0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+    0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+    0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+    0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+    0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+    0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+    0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+    0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+    0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+    0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+    0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+    0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+    0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+    0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+    0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+    0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+    0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+    0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+    0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+    0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+    0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+    0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+    0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+    0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+    0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+    0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+    0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+    0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+    0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+    0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+    0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+    0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+    0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+    0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+    0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+    0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+    0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+    0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+    0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+    0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+    0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+    0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+    0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+    0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+    0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+    0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+    0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+    0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+    0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+    0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+    0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(uint8 *pdata, uint nbytes, uint32 crc)
+{
+	uint8 *pend;
+	pend = pdata + nbytes;
+	while (pdata < pend)
+		CRC_INNER_LOOP(32, crc, *pdata++);
+
+	return crc;
+}
+
+#ifdef notdef
+#define CLEN 	1499 	/*  CRC Length */
+#define CBUFSIZ 	(CLEN+4)
+#define CNBUFS		5 /* # of bufs */
+
+void
+testcrc32(void)
+{
+	uint j, k, l;
+	uint8 *buf;
+	uint len[CNBUFS];
+	uint32 crcr;
+	uint32 crc32tv[CNBUFS] =
+		{0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+	ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+	/* step through all possible alignments */
+	for (l = 0; l <= 4; l++) {
+		for (j = 0; j < CNBUFS; j++) {
+			len[j] = CLEN;
+			for (k = 0; k < len[j]; k++)
+				*(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+		}
+
+		for (j = 0; j < CNBUFS; j++) {
+			crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+			ASSERT(crcr == crc32tv[j]);
+		}
+	}
+
+	MFREE(buf, CBUFSIZ*CNBUFS);
+	return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+	int len;
+
+	/* validate current elt */
+	if (!bcm_valid_tlv(elt, *buflen)) {
+		return NULL;
+	}
+
+	/* advance to next elt */
+	len = elt->len;
+	elt = (bcm_tlv_t*)(elt->data + len);
+	*buflen -= (TLV_HDR_LEN + len);
+
+	/* validate next elt */
+	if (!bcm_valid_tlv(elt, *buflen)) {
+		return NULL;
+	}
+
+	return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		int len = elt->len;
+
+		/* validate remaining totlen */
+		if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+
+			return (elt);
+		}
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+
+	return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag.  Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+	bcm_tlv_t *elt;
+	int totlen;
+
+	elt = (bcm_tlv_t*)buf;
+	totlen = buflen;
+
+	/* find tagged parameter */
+	while (totlen >= TLV_HDR_LEN) {
+		uint id = elt->id;
+		int len = elt->len;
+
+		/* Punt if we start seeing IDs > than target key */
+		if (id > key) {
+			return (NULL);
+		}
+
+		/* validate remaining totlen */
+		if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+			return (elt);
+		}
+
+		elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+		totlen -= (len + TLV_HDR_LEN);
+	}
+	return NULL;
+}
+#endif	/* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+	defined(DHD_DEBUG)
+int
+bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len)
+{
+	int i, slen = 0;
+	uint32 bit, mask;
+	const char *name;
+	mask = bd->mask;
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+
+	for (i = 0;  (name = bd->bitfield[i].name) != NULL; i++) {
+		bit = bd->bitfield[i].bit;
+		if ((flags & mask) == bit) {
+			if (len > (int)strlen(name)) {
+				slen = strlen(name);
+				strncpy(buf, name, slen+1);
+			}
+			break;
+		}
+	}
+	return slen;
+}
+
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+{
+	int i;
+	char* p = buf;
+	char hexstr[16];
+	int slen = 0, nlen = 0;
+	uint32 bit;
+	const char* name;
+
+	if (len < 2 || !buf)
+		return 0;
+
+	buf[0] = '\0';
+
+	for (i = 0; flags != 0; i++) {
+		bit = bd[i].bit;
+		name = bd[i].name;
+		if (bit == 0 && flags != 0) {
+			/* print any unnamed bits */
+			snprintf(hexstr, 16, "0x%X", flags);
+			name = hexstr;
+			flags = 0;	/* exit loop */
+		} else if ((flags & bit) == 0)
+			continue;
+		flags &= ~bit;
+		nlen = strlen(name);
+		slen += nlen;
+		/* count btwn flag space */
+		if (flags != 0)
+			slen += 1;
+		/* need NULL char as well */
+		if (len <= slen)
+			break;
+		/* copy NULL char but don't count it */
+		strncpy(p, name, nlen + 1);
+		p += nlen;
+		/* copy btwn flag space and NULL char */
+		if (flags != 0)
+			p += snprintf(p, 2, " ");
+	}
+
+	/* indicate the str was too short */
+	if (flags != 0) {
+		if (len < 2)
+			p -= 2 - len;	/* overwrite last char */
+		p += snprintf(p, 2, ">");
+	}
+
+	return (int)(p - buf);
+}
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+	int i;
+	char *p = str;
+	const uint8 *src = (const uint8*)bytes;
+
+	for (i = 0; i < len; i++) {
+		p += snprintf(p, 3, "%02X", *src);
+		src++;
+	}
+	return (int)(p - str);
+}
+#endif 
+
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, uchar *buf, uint nbytes)
+{
+	char line[128], *p;
+	int len = sizeof(line);
+	int nchar;
+	uint i;
+
+	if (msg && (msg[0] != '\0'))
+		printf("%s:\n", msg);
+
+	p = line;
+	for (i = 0; i < nbytes; i++) {
+		if (i % 16 == 0) {
+			nchar = snprintf(p, len, "  %04d: ", i);	/* line prefix */
+			p += nchar;
+			len -= nchar;
+		}
+		if (len > 0) {
+			nchar = snprintf(p, len, "%02x ", buf[i]);
+			p += nchar;
+			len -= nchar;
+		}
+
+		if (i % 16 == 15) {
+			printf("%s\n", line);		/* flush line */
+			p = line;
+			len = sizeof(line);
+		}
+	}
+
+	/* flush last partial line */
+	if (p != line)
+		printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+	"NONE",
+	"WEP1",
+	"TKIP",
+	"WEP128",
+	"AES_CCM",
+	"AES_OCB_MSDU",
+	"AES_OCB_MPDU",
+	"NALG",
+	"UNDEF",
+	"UNDEF",
+	"UNDEF",
+	"WAPI",
+	"PMK",
+	"BIP",
+	"AES_GCM",
+	"AES_CCM256",
+	"AES_GCM256",
+	"BIP_CMAC256",
+	"BIP_GMAC",
+	"BIP_GMAC256",
+	"UNDEF"
+};
+
+const char *
+bcm_crypto_algo_name(uint algo)
+{
+	return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+	const char *fmt;
+
+	fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+	snprintf(buf, len, fmt, chipid);
+	return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
+{
+	if (brev < 0x100)
+		snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+	else
+		snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+	return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
+{
+	uint len, max_len;
+	char c;
+
+	len = strlen(buf);
+
+	max_len = BUFSIZE_TODUMP_ATONCE;
+
+	while (len > max_len) {
+		c = buf[max_len];
+		buf[max_len] = '\0';
+		printf("%s", buf);
+		buf[max_len] = c;
+
+		buf += max_len;
+		len -= max_len;
+	}
+	/* print the remaining string */
+	printf("%s\n", buf);
+	return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+	char *buf, uint32 bufsize)
+{
+	uint  filled_len;
+	int len;
+	struct fielddesc *cur_ptr;
+
+	filled_len = 0;
+	cur_ptr = fielddesc_array;
+
+	while (bufsize > 1) {
+		if (cur_ptr->nameandfmt == NULL)
+			break;
+		len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+		               read_rtn(arg0, arg1, cur_ptr->offset));
+		/* check for snprintf overflow or error */
+		if (len < 0 || (uint32)len >= bufsize)
+			len = bufsize - 1;
+		buf += len;
+		bufsize -= len;
+		filled_len += len;
+		cur_ptr++;
+	}
+	return filled_len;
+}
+
+uint
+bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+	uint len;
+
+	len = strlen(name) + 1;
+
+	if ((len + datalen) > buflen)
+		return 0;
+
+	strncpy(buf, name, buflen);
+
+	/* append data onto the end of the name string */
+	memcpy(&buf[len], data, datalen);
+	len += datalen;
+
+	return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153		/* Offset for first entry */
+#define QDBM_TABLE_LEN 40	/* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: 	+0 	+1 	+2 	+3 	+4 	+5 	+6 	+7 */
+/* 153: */      6683,	7079,	7499,	7943,	8414,	8913,	9441,	10000,
+/* 161: */      10593,	11220,	11885,	12589,	13335,	14125,	14962,	15849,
+/* 169: */      16788,	17783,	18836,	19953,	21135,	22387,	23714,	25119,
+/* 177: */      26607,	28184,	29854,	31623,	33497,	35481,	37584,	39811,
+/* 185: */      42170,	44668,	47315,	50119,	53088,	56234,	59566,	63096
+};
+
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+	uint factor = 1;
+	int idx = qdbm - QDBM_OFFSET;
+
+	if (idx >= QDBM_TABLE_LEN) {
+		/* clamp to max uint16 mW value */
+		return 0xFFFF;
+	}
+
+	/* scale the qdBm index up to the range of the table 0-40
+	 * where an offset of 40 qdBm equals a factor of 10 mW.
+	 */
+	while (idx < 0) {
+		idx += 40;
+		factor *= 10;
+	}
+
+	/* return the mW value scaled down to the correct factor of 10,
+	 * adding in factor/2 to get proper rounding.
+	 */
+	return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
+{
+	uint8 qdbm;
+	int offset;
+	uint mw_uint = mw;
+	uint boundary;
+
+	/* handle boundary case */
+	if (mw_uint <= 1)
+		return 0;
+
+	offset = QDBM_OFFSET;
+
+	/* move mw into the range of the table */
+	while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+		mw_uint *= 10;
+		offset -= 40;
+	}
+
+	for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+		boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+		                                    nqdBm_to_mW_map[qdbm])/2;
+		if (mw_uint < boundary) break;
+	}
+
+	qdbm += (uint8)offset;
+
+	return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+	uint bitcount = 0, i;
+	uint8 tmp;
+	for (i = 0; i < length; i++) {
+		tmp = bitmap[i];
+		while (tmp) {
+			bitcount++;
+			tmp &= (tmp - 1);
+		}
+	}
+	return bitcount;
+}
+
+#ifdef BCMDRIVER
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+	b->origsize = b->size = size;
+	b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+	va_list ap;
+	int r;
+
+	va_start(ap, fmt);
+
+	r = vsnprintf(b->buf, b->size, fmt, ap);
+
+	/* Non Ansi C99 compliant returns -1,
+	 * Ansi compliant return r >= b->size,
+	 * bcmstdlib returns 0, handle all
+	 */
+	/* r == 0 is also the case when strlen(fmt) is zero.
+	 * typically the case when "" is passed as argument.
+	 */
+	if ((r == -1) || (r >= (int)b->size)) {
+		b->size = 0;
+	} else {
+		b->size -= r;
+		b->buf += r;
+	}
+
+	va_end(ap);
+
+	return r;
+}
+
+void
+bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len)
+{
+	int i;
+
+	if (msg != NULL && msg[0] != '\0')
+		bcm_bprintf(b, "%s", msg);
+	for (i = 0; i < len; i ++)
+		bcm_bprintf(b, "%02X", buf[i]);
+	if (newline)
+		bcm_bprintf(b, "\n");
+}
+
+void
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
+{
+	int i;
+
+	for (i = 0; i < num_bytes; i++) {
+		num[i] += amount;
+		if (num[i] >= amount)
+			break;
+		amount = 1;
+	}
+}
+
+int
+bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes)
+{
+	int i;
+
+	for (i = nbytes - 1; i >= 0; i--) {
+		if (arg1[i] != arg2[i])
+			return (arg1[i] - arg2[i]);
+	}
+	return 0;
+}
+
+void
+bcm_print_bytes(const char *name, const uchar *data, int len)
+{
+	int i;
+	int per_line = 0;
+
+	printf("%s: %d \n", name ? name : "", len);
+	for (i = 0; i < len; i++) {
+		printf("%02x ", *data++);
+		per_line++;
+		if (per_line == 16) {
+			per_line = 0;
+			printf("\n");
+		}
+	}
+	printf("\n");
+}
+
+/* Look for vendor-specific IE with specified OUI and optional type */
+bcm_tlv_t *
+bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len)
+{
+	bcm_tlv_t *ie;
+	uint8 ie_len;
+
+	ie = (bcm_tlv_t*)tlvs;
+
+	/* make sure we are looking at a valid IE */
+	if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) {
+		return NULL;
+	}
+
+	/* Walk through the IEs looking for an OUI match */
+	do {
+		ie_len = ie->len;
+		if ((ie->id == DOT11_MNG_PROPR_ID) &&
+		    (ie_len >= (DOT11_OUI_LEN + type_len)) &&
+		    !bcmp(ie->data, voui, DOT11_OUI_LEN))
+		{
+			/* compare optional type */
+			if (type_len == 0 ||
+			    !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) {
+				return (ie);		/* a match */
+			}
+		}
+	} while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL);
+
+	return NULL;
+}
+
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN	((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+{
+	uint i, c;
+	char *p = buf;
+	char *endp = buf + SSID_FMT_BUF_LEN;
+
+	if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+
+	for (i = 0; i < ssid_len; i++) {
+		c = (uint)ssid[i];
+		if (c == '\\') {
+			*p++ = '\\';
+			*p++ = '\\';
+		} else if (bcm_isprint((uchar)c)) {
+			*p++ = (char)c;
+		} else {
+			p += snprintf(p, (endp - p), "\\x%02X", c);
+		}
+	}
+	*p = '\0';
+	ASSERT(p < endp);
+
+	return (int)(p - buf);
+}
+#endif 
+
+#endif /* BCMDRIVER */
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs.  End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
+{
+	char *dp;
+	bool findNewline;
+	int column;
+	unsigned int buf_len, n;
+	unsigned int pad = 0;
+
+	dp = varbuf;
+
+	findNewline = FALSE;
+	column = 0;
+
+	for (n = 0; n < len; n++) {
+		if (varbuf[n] == '\r')
+			continue;
+		if (findNewline && varbuf[n] != '\n')
+			continue;
+		findNewline = FALSE;
+		if (varbuf[n] == '#') {
+			findNewline = TRUE;
+			continue;
+		}
+		if (varbuf[n] == '\n') {
+			if (column == 0)
+				continue;
+			*dp++ = 0;
+			column = 0;
+			continue;
+		}
+		*dp++ = varbuf[n];
+		column++;
+	}
+	buf_len = (unsigned int)(dp - varbuf);
+	if (buf_len % 4) {
+		pad = 4 - buf_len % 4;
+		if (pad && (buf_len + pad <= len)) {
+			buf_len += pad;
+		}
+	}
+
+	while (dp < varbuf + n)
+		*dp++ = 0;
+
+	return buf_len;
+}
+
+/* calculate a * b + c */
+void
+bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c)
+{
+#define FORMALIZE(var) {cc += (var & 0x80000000) ? 1 : 0; var &= 0x7fffffff;}
+	uint32 r1, r0;
+	uint32 a1, a0, b1, b0, t, cc = 0;
+
+	a1 = a >> 16;
+	a0 = a & 0xffff;
+	b1 = b >> 16;
+	b0 = b & 0xffff;
+
+	r0 = a0 * b0;
+	FORMALIZE(r0);
+
+	t = (a1 * b0) << 16;
+	FORMALIZE(t);
+
+	r0 += t;
+	FORMALIZE(r0);
+
+	t = (a0 * b1) << 16;
+	FORMALIZE(t);
+
+	r0 += t;
+	FORMALIZE(r0);
+
+	FORMALIZE(c);
+
+	r0 += c;
+	FORMALIZE(r0);
+
+	r0 |= (cc % 2) ? 0x80000000 : 0;
+	r1 = a1 * b1 + ((a1 * b0) >> 16) + ((b1 * a0) >> 16) + (cc / 2);
+
+	*r_high = r1;
+	*r_low = r0;
+}
+
+/* calculate a / b */
+void
+bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+	uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+	if (b < 2)
+		return;
+
+	while (a1 != 0) {
+		r0 += (0xffffffff / b) * a1;
+		bcm_uint64_multiple_add(&a1, &a0, ((0xffffffff % b) + 1) % b, a1, a0);
+	}
+
+	r0 += a0 / b;
+	*r = r0;
+}
+
+#ifndef setbit /* As in the header file */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+/* Set bit in byte array. */
+void
+setbit(void *array, uint bit)
+{
+	((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY);
+}
+
+/* Clear bit in byte array. */
+void
+clrbit(void *array, uint bit)
+{
+	((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY));
+}
+
+/* Test if bit is set in byte array. */
+bool
+isset(const void *array, uint bit)
+{
+	return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY)));
+}
+
+/* Test if bit is clear in byte array. */
+bool
+isclr(const void *array, uint bit)
+{
+	return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0);
+}
+#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */
+#endif /* setbit */
+
+void
+bcm_bitprint32(const uint32 u32)
+{
+	int i;
+	for (i = NBITS(uint32) - 1; i >= 0; i--) {
+		isbitset(u32, i) ? printf("1") : printf("0");
+		if ((i % NBBY) == 0) printf(" ");
+	}
+	printf("\n");
+}
+
+#ifdef BCMDRIVER
+/*
+ * Hierarchical Multiword bitmap based small id allocator.
+ *
+ * Multilevel hierarchy bitmap. (maximum 2 levels)
+ * First hierarchy uses a multiword bitmap to identify 32bit words in the
+ * second hierarchy that have at least a single bit set. Each bit in a word of
+ * the second hierarchy represents a unique ID that may be allocated.
+ *
+ * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed.
+ * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word
+ * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs.
+ * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non
+ *                       non-zero bitmap word carrying at least one free ID.
+ * BCM_MWBMAP_SHIFT_OP:  Used in MOD, DIV and MUL operations.
+ * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID
+ *
+ * Design Notes:
+ * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many
+ * bits are computed each time on allocation and deallocation, requiring 4
+ * array indexed access and 3 arithmetic operations. When not defined, a runtime
+ * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed.
+ * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation.
+ * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may
+ * be used by defining BCM_MWBMAP_USE_CNTSETBITS.
+ *
+ * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array
+ * size is fixed. No intention to support larger than 4K indice allocation. ID
+ * allocators for ranges smaller than 4K will have a wastage of only 12Bytes
+ * with savings in not having to use an indirect access, had it been dynamically
+ * allocated.
+ */
+#define BCM_MWBMAP_ITEMS_MAX    (4 * 1024)  /* May increase to 16K */
+
+#define BCM_MWBMAP_BITS_WORD    (NBITS(uint32))
+#define BCM_MWBMAP_WORDS_MAX    (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_WDMAP_MAX    (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_SHIFT_OP     (5)
+#define BCM_MWBMAP_MODOP(ix)    ((ix) & (BCM_MWBMAP_BITS_WORD - 1))
+#define BCM_MWBMAP_DIVOP(ix)    ((ix) >> BCM_MWBMAP_SHIFT_OP)
+#define BCM_MWBMAP_MULOP(ix)    ((ix) << BCM_MWBMAP_SHIFT_OP)
+
+/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */
+#define BCM_MWBMAP_PTR(hdl)		((struct bcm_mwbmap *)(hdl))
+#define BCM_MWBMAP_HDL(ptr)		((void *)(ptr))
+
+#if defined(BCM_MWBMAP_DEBUG)
+#define BCM_MWBMAP_AUDIT(mwb) \
+	do { \
+		ASSERT((mwb != NULL) && \
+		       (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \
+		bcm_mwbmap_audit(mwb); \
+	} while (0)
+#define MWBMAP_ASSERT(exp)		ASSERT(exp)
+#define MWBMAP_DBG(x)           printf x
+#else   /* !BCM_MWBMAP_DEBUG */
+#define BCM_MWBMAP_AUDIT(mwb)   do {} while (0)
+#define MWBMAP_ASSERT(exp)		do {} while (0)
+#define MWBMAP_DBG(x)
+#endif  /* !BCM_MWBMAP_DEBUG */
+
+
+typedef struct bcm_mwbmap {     /* Hierarchical multiword bitmap allocator    */
+	uint16 wmaps;               /* Total number of words in free wd bitmap    */
+	uint16 imaps;               /* Total number of words in free id bitmap    */
+	int16  ifree;               /* Count of free indices. Used only in audits */
+	uint16 total;               /* Total indices managed by multiword bitmap  */
+
+	void * magic;               /* Audit handle parameter from user           */
+
+	uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of            */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+	int8   wd_count[BCM_MWBMAP_WORDS_MAX];  /* free id running count, 1st lvl */
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+
+	uint32 id_bitmap[0];        /* Second level bitmap                        */
+} bcm_mwbmap_t;
+
+/* Incarnate a hierarchical multiword bitmap based small index allocator. */
+struct bcm_mwbmap *
+bcm_mwbmap_init(osl_t *osh, uint32 items_max)
+{
+	struct bcm_mwbmap * mwbmap_p;
+	uint32 wordix, size, words, extra;
+
+	/* Implementation Constraint: Uses 32bit word bitmap */
+	MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U);
+	MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U);
+	MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX));
+	MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U);
+
+	ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX);
+
+	/* Determine the number of words needed in the multiword bitmap */
+	extra = BCM_MWBMAP_MODOP(items_max);
+	words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U);
+
+	/* Allocate runtime state of multiword bitmap */
+	/* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */
+	size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words);
+	mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size);
+	if (mwbmap_p == (bcm_mwbmap_t *)NULL) {
+		ASSERT(0);
+		goto error1;
+	}
+	memset(mwbmap_p, 0, size);
+
+	/* Initialize runtime multiword bitmap state */
+	mwbmap_p->imaps = (uint16)words;
+	mwbmap_p->ifree = (int16)items_max;
+	mwbmap_p->total = (uint16)items_max;
+
+	/* Setup magic, for use in audit of handle */
+	mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p);
+
+	/* Setup the second level bitmap of free indices */
+	/* Mark all indices as available */
+	for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) {
+		mwbmap_p->id_bitmap[wordix] = (uint32)(~0U);
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+		mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD;
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+	}
+
+	/* Ensure that extra indices are tagged as un-available */
+	if (extra) { /* fixup the free ids in last bitmap and wd_count */
+		uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1];
+		*bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+		mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+	}
+
+	/* Setup the first level bitmap hierarchy */
+	extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps);
+	words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U);
+
+	mwbmap_p->wmaps = (uint16)words;
+
+	for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++)
+		mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U);
+	if (extra) {
+		uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1];
+		*bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+	}
+
+	return mwbmap_p;
+
+error1:
+	return BCM_MWBMAP_INVALID_HDL;
+}
+
+/* Release resources used by multiword bitmap based small index allocator. */
+void
+bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap)
+	                     + (sizeof(uint32) * mwbmap_p->imaps));
+	return;
+}
+
+/* Allocate a unique small index using a multiword bitmap index allocator.    */
+uint32
+bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	/* Start with the first hierarchy */
+	for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) {
+
+		bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */
+
+		if (bitmap != 0U) {
+
+			uint32 count, bitix, *bitmap_p;
+
+			bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+			/* clear all except trailing 1 */
+			bitmap   = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+			MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+			              bcm_count_leading_zeros(bitmap));
+			bitix    = (BCM_MWBMAP_BITS_WORD - 1)
+			         - bcm_count_leading_zeros(bitmap); /* use asm clz */
+			wordix   = BCM_MWBMAP_MULOP(wordix) + bitix;
+
+			/* Clear bit if wd count is 0, without conditional branch */
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+			count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+			mwbmap_p->wd_count[wordix]--;
+			count = mwbmap_p->wd_count[wordix];
+			MWBMAP_ASSERT(count ==
+			              (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+			MWBMAP_ASSERT(count >= 0);
+
+			/* clear wd_bitmap bit if id_map count is 0 */
+			bitmap = (count == 0) << bitix;
+
+			MWBMAP_DBG((
+			    "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+			    bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count));
+
+			*bitmap_p ^= bitmap;
+
+			/* Use bitix in the second hierarchy */
+			bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+			bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */
+			MWBMAP_ASSERT(bitmap != 0U);
+
+			/* clear all except trailing 1 */
+			bitmap   = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+			MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+			              bcm_count_leading_zeros(bitmap));
+			bitix    = BCM_MWBMAP_MULOP(wordix)
+			         + (BCM_MWBMAP_BITS_WORD - 1)
+			         - bcm_count_leading_zeros(bitmap); /* use asm clz */
+
+			mwbmap_p->ifree--; /* decrement system wide free count */
+			MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
+
+			MWBMAP_DBG((
+			    "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d",
+			    bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+			    mwbmap_p->ifree));
+
+			*bitmap_p ^= bitmap; /* mark as allocated = 1b0 */
+
+			return bitix;
+		}
+	}
+
+	ASSERT(mwbmap_p->ifree == 0);
+
+	return BCM_MWBMAP_INVALID_IDX;
+}
+
+/* Force an index at a specified position to be in use */
+void
+bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 count, wordix, bitmap, *bitmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	/* Start with second hierarchy */
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (uint32)(1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+	ASSERT((*bitmap_p & bitmap) == bitmap);
+
+	mwbmap_p->ifree--; /* update free count */
+	ASSERT(mwbmap_p->ifree >= 0);
+
+	MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d",
+	           bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+	           mwbmap_p->ifree));
+
+	*bitmap_p ^= bitmap; /* mark as in use */
+
+	/* Update first hierarchy */
+	bitix    = wordix;
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+	count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+	mwbmap_p->wd_count[bitix]--;
+	count = mwbmap_p->wd_count[bitix];
+	MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+	MWBMAP_ASSERT(count >= 0);
+
+	bitmap   = (count == 0) << BCM_MWBMAP_MODOP(bitix);
+
+	MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+	           BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
+	           (*bitmap_p) ^ bitmap, count));
+
+	*bitmap_p ^= bitmap; /* mark as in use */
+
+	return;
+}
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+void
+bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap, *bitmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	/* Start with second level hierarchy */
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+	ASSERT((*bitmap_p & bitmap) == 0U);	/* ASSERT not a double free */
+
+	mwbmap_p->ifree++; /* update free count */
+	ASSERT(mwbmap_p->ifree <= mwbmap_p->total);
+
+	MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d",
+	           bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap,
+	           mwbmap_p->ifree));
+
+	*bitmap_p |= bitmap; /* mark as available */
+
+	/* Now update first level hierarchy */
+
+	bitix    = wordix;
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+	bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+	mwbmap_p->wd_count[bitix]++;
+#endif
+
+#if defined(BCM_MWBMAP_DEBUG)
+	{
+		uint32 count;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+		count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else  /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+		count = mwbmap_p->wd_count[bitix];
+		MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /*  ! BCM_MWBMAP_USE_CNTSETBITS */
+
+		MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD);
+
+		MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d",
+		            bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count));
+	}
+#endif /* BCM_MWBMAP_DEBUG */
+
+	*bitmap_p |= bitmap;
+
+	return;
+}
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+uint32
+bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(mwbmap_p->ifree >= 0);
+
+	return mwbmap_p->ifree;
+}
+
+/* Determine whether an index is inuse or free */
+bool
+bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 wordix, bitmap;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	ASSERT(bitix < mwbmap_p->total);
+
+	wordix   = BCM_MWBMAP_DIVOP(bitix);
+	bitmap   = (1U << BCM_MWBMAP_MODOP(bitix));
+
+	return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U);
+}
+
+/* Debug dump a multiword bitmap allocator */
+void
+bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl)
+{
+	uint32 ix, count;
+	bcm_mwbmap_t * mwbmap_p;
+
+	BCM_MWBMAP_AUDIT(mwbmap_hdl);
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", mwbmap_p,
+	       mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total);
+	for (ix = 0U; ix < mwbmap_p->wmaps; ix++) {
+		printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]);
+		bcm_bitprint32(mwbmap_p->wd_bitmap[ix]);
+		printf("\n");
+	}
+	for (ix = 0U; ix < mwbmap_p->imaps; ix++) {
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+		count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+		count = mwbmap_p->wd_count[ix];
+		MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+		printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
+		bcm_bitprint32(mwbmap_p->id_bitmap[ix]);
+		printf("\n");
+	}
+
+	return;
+}
+
+/* Audit a hierarchical multiword bitmap */
+void
+bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl)
+{
+	bcm_mwbmap_t * mwbmap_p;
+	uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p;
+
+	mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+	for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) {
+
+		bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+		for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) {
+			if ((*bitmap_p) & (1 << bitix)) {
+				idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+				count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
+#else  /* ! BCM_MWBMAP_USE_CNTSETBITS */
+				count = mwbmap_p->wd_count[idmap_ix];
+				ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+				ASSERT(count != 0U);
+				free_cnt += count;
+			}
+		}
+	}
+
+	ASSERT(free_cnt == mwbmap_p->ifree);
+}
+/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */
+
+#endif /* BCMDRIVER */
+
+/* calculate a >> b; and returns only lower 32 bits */
+void
+bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+	uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+	if (b == 0) {
+		r0 = a_low;
+		*r = r0;
+		return;
+	}
+
+	if (b < 32) {
+		a0 = a0 >> b;
+		a1 = a1 & ((1 << b) - 1);
+		a1 = a1 << (32 - b);
+		r0 = a0 | a1;
+		*r = r0;
+		return;
+	} else {
+		r0 = a1 >> (b - 32);
+		*r = r0;
+		return;
+	}
+
+}
+
+/* calculate a + b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+	uint32 r1_lo = *r_lo;
+	(*r_lo) += offset;
+	if (*r_lo < r1_lo)
+		(*r_hi) ++;
+}
+
+/* calculate a - b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+	uint32 r1_lo = *r_lo;
+	(*r_lo) -= offset;
+	if (*r_lo > r1_lo)
+		(*r_hi) --;
+}
+
+#ifdef DEBUG_COUNTER
+#if (OSL_SYSUPTIME_SUPPORT == TRUE)
+void counter_printlog(counter_tbl_t *ctr_tbl)
+{
+	uint32 now;
+
+	if (!ctr_tbl->enabled)
+		return;
+
+	now = OSL_SYSUPTIME();
+
+	if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) {
+		uint8 i = 0;
+		printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print);
+
+		for (i = 0; i < ctr_tbl->needed_cnt; i++) {
+			printf(" %u", ctr_tbl->cnt[i]);
+		}
+		printf("\n");
+
+		ctr_tbl->prev_log_print = now;
+		bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint));
+	}
+}
+#else
+/* OSL_SYSUPTIME is not supported so no way to get time */
+#define counter_printlog(a) do {} while (0)
+#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */
+#endif /* DEBUG_COUNTER */
diff --git a/drivers/staging/edison-bcm43340/bcmwifi_channels.c b/drivers/staging/edison-bcm43340/bcmwifi_channels.c
new file mode 100644
index 0000000..f092699
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/bcmwifi_channels.c
@@ -0,0 +1,1199 @@
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: bcmwifi_channels.c 309193 2012-01-19 00:03:57Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMDRIVER */
+
+#include <bcmwifi_channels.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> 	/* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */
+#endif
+
+/* Definitions for D11AC capable Chanspec type */
+
+/* Chanspec ASCII representation with 802.11ac capability:
+ * [<band> 'g'] <channel> ['/'<bandwidth> [<ctl-sideband>]['/'<1st80channel>'-'<2nd80channel>]]
+ *
+ * <band>:
+ *      (optional) 2, 3, 4, 5 for 2.4GHz, 3GHz, 4GHz, and 5GHz respectively.
+ *      Default value is 2g if channel <= 14, otherwise 5g.
+ * <channel>:
+ *      channel number of the 5MHz, 10MHz, 20MHz channel,
+ *      or primary channel of 40MHz, 80MHz, 160MHz, or 80+80MHz channel.
+ * <bandwidth>:
+ *      (optional) 5, 10, 20, 40, 80, 160, or 80+80. Default value is 20.
+ * <primary-sideband>:
+ *      (only for 2.4GHz band 40MHz) U for upper sideband primary, L for lower.
+ *
+ *      For 2.4GHz band 40MHz channels, the same primary channel may be the
+ *      upper sideband for one 40MHz channel, and the lower sideband for an
+ *      overlapping 40MHz channel.  The U/L disambiguates which 40MHz channel
+ *      is being specified.
+ *
+ *      For 40MHz in the 5GHz band and all channel bandwidths greater than
+ *      40MHz, the U/L specificaion is not allowed since the channels are
+ *      non-overlapping and the primary sub-band is derived from its
+ *      position in the wide bandwidth channel.
+ *
+ * <1st80Channel>:
+ * <2nd80Channel>:
+ *      Required for 80+80, otherwise not allowed.
+ *      Specifies the center channel of the first and second 80MHz band.
+ *
+ * In its simplest form, it is a 20MHz channel number, with the implied band
+ * of 2.4GHz if channel number <= 14, and 5GHz otherwise.
+ *
+ * To allow for backward compatibility with scripts, the old form for
+ * 40MHz channels is also allowed: <channel><ctl-sideband>
+ *
+ * <channel>:
+ *	primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz
+ * <ctl-sideband>:
+ * 	"U" for upper, "L" for lower (or lower case "u" "l")
+ *
+ * 5 GHz Examples:
+ *      Chanspec        BW        Center Ch  Channel Range  Primary Ch
+ *      5g8             20MHz     8          -              -
+ *      52              20MHz     52         -              -
+ *      52/40           40MHz     54         52-56          52
+ *      56/40           40MHz     54         52-56          56
+ *      52/80           80MHz     58         52-64          52
+ *      56/80           80MHz     58         52-64          56
+ *      60/80           80MHz     58         52-64          60
+ *      64/80           80MHz     58         52-64          64
+ *      52/160          160MHz    50         36-64          52
+ *      36/160          160MGz    50         36-64          36
+ *      36/80+80/42-106 80+80MHz  42,106     36-48,100-112  36
+ *
+ * 2 GHz Examples:
+ *      Chanspec        BW        Center Ch  Channel Range  Primary Ch
+ *      2g8             20MHz     8          -              -
+ *      8               20MHz     8          -              -
+ *      6               20MHz     6          -              -
+ *      6/40l           40MHz     8          6-10           6
+ *      6l              40MHz     8          6-10           6
+ *      6/40u           40MHz     4          2-6            6
+ *      6u              40MHz     4          2-6            6
+ */
+
+/* bandwidth ASCII string */
+static const char *wf_chspec_bw_str[] =
+{
+	"5",
+	"10",
+	"20",
+	"40",
+	"80",
+	"160",
+	"80+80",
+	"na"
+};
+
+static const uint8 wf_chspec_bw_mhz[] =
+{5, 10, 20, 40, 80, 160, 160};
+
+#define WF_NUM_BW \
+	(sizeof(wf_chspec_bw_mhz)/sizeof(uint8))
+
+/* 40MHz channels in 5GHz band */
+static const uint8 wf_5g_40m_chans[] =
+{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159};
+#define WF_NUM_5G_40M_CHANS \
+	(sizeof(wf_5g_40m_chans)/sizeof(uint8))
+
+/* 80MHz channels in 5GHz band */
+static const uint8 wf_5g_80m_chans[] =
+{42, 58, 106, 122, 138, 155};
+#define WF_NUM_5G_80M_CHANS \
+	(sizeof(wf_5g_80m_chans)/sizeof(uint8))
+
+/* 160MHz channels in 5GHz band */
+static const uint8 wf_5g_160m_chans[] =
+{50, 114};
+#define WF_NUM_5G_160M_CHANS \
+	(sizeof(wf_5g_160m_chans)/sizeof(uint8))
+
+
+/* convert bandwidth from chanspec to MHz */
+static uint
+bw_chspec_to_mhz(chanspec_t chspec)
+{
+	uint bw;
+
+	bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT;
+	return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]);
+}
+
+/* bw in MHz, return the channel count from the center channel to the
+ * the channel at the edge of the band
+ */
+static uint8
+center_chan_to_edge(uint bw)
+{
+	/* edge channels separated by BW - 10MHz on each side
+	 * delta from cf to edge is half of that,
+	 * MHz to channel num conversion is 5MHz/channel
+	 */
+	return (uint8)(((bw - 20) / 2) / 5);
+}
+
+/* return channel number of the low edge of the band
+ * given the center channel and BW
+ */
+static uint8
+channel_low_edge(uint center_ch, uint bw)
+{
+	return (uint8)(center_ch - center_chan_to_edge(bw));
+}
+
+/* return side band number given center channel and control channel
+ * return -1 on error
+ */
+static int
+channel_to_sb(uint center_ch, uint ctl_ch, uint bw)
+{
+	uint lowest = channel_low_edge(center_ch, bw);
+	uint sb;
+
+	if ((ctl_ch - lowest) % 4) {
+		/* bad ctl channel, not mult 4 */
+		return -1;
+	}
+
+	sb = ((ctl_ch - lowest) / 4);
+
+	/* sb must be a index to a 20MHz channel in range */
+	if (sb >= (bw / 20)) {
+		/* ctl_ch must have been too high for the center_ch */
+		return -1;
+	}
+
+	return sb;
+}
+
+/* return control channel given center channel and side band */
+static uint8
+channel_to_ctl_chan(uint center_ch, uint bw, uint sb)
+{
+	return (uint8)(channel_low_edge(center_ch, bw) + sb * 4);
+}
+
+/* return index of 80MHz channel from channel number
+ * return -1 on error
+ */
+static int
+channel_80mhz_to_id(uint ch)
+{
+	uint i;
+	for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
+		if (ch == wf_5g_80m_chans[i])
+			return i;
+	}
+
+	return -1;
+}
+
+/* given a chanspec and a string buffer, format the chanspec as a
+ * string, and return the original pointer a.
+ * Min buffer length must be CHANSPEC_STR_LEN.
+ * On error return NULL
+ */
+char *
+wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+	const char *band;
+	uint ctl_chan;
+
+	if (wf_chspec_malformed(chspec))
+		return NULL;
+
+	band = "";
+
+	/* check for non-default band spec */
+	if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) ||
+	    (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL))
+		band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g";
+
+	/* ctl channel */
+	ctl_chan = wf_chspec_ctlchan(chspec);
+
+	/* bandwidth and ctl sideband */
+	if (CHSPEC_IS20(chspec)) {
+		snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan);
+	} else if (!CHSPEC_IS8080(chspec)) {
+		const char *bw;
+		const char *sb = "";
+
+		bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT];
+
+#ifdef CHANSPEC_NEW_40MHZ_FORMAT
+		/* ctl sideband string if needed for 2g 40MHz */
+		if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) {
+			sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+		}
+
+		snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb);
+#else
+		/* ctl sideband string instead of BW for 40MHz */
+		if (CHSPEC_IS40(chspec)) {
+			sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
+			snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb);
+		} else {
+			snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw);
+		}
+#endif /* CHANSPEC_NEW_40MHZ_FORMAT */
+
+	} else {
+		/* 80+80 */
+		uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT;
+		uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT;
+
+		/* convert to channel number */
+		chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0;
+		chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0;
+
+		/* Outputs a max of CHANSPEC_STR_LEN chars including '\0'  */
+		snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2);
+	}
+
+	return (buf);
+}
+
+static int
+read_uint(const char **p, unsigned int *num)
+{
+	unsigned long val;
+	char *endp = NULL;
+
+	val = strtoul(*p, &endp, 10);
+	/* if endp is the initial pointer value, then a number was not read */
+	if (endp == *p)
+		return 0;
+
+	/* advance the buffer pointer to the end of the integer string */
+	*p = endp;
+	/* return the parsed integer */
+	*num = (unsigned int)val;
+
+	return 1;
+}
+
+/* given a chanspec string, convert to a chanspec.
+ * On error return 0
+ */
+chanspec_t
+wf_chspec_aton(const char *a)
+{
+	chanspec_t chspec;
+	uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb;
+	uint num, ctl_ch;
+	uint ch1, ch2;
+	char c, sb_ul = '\0';
+	int i;
+
+	bw = 20;
+	chspec_sb = 0;
+	chspec_ch = ch1 = ch2 = 0;
+
+	/* parse channel num or band */
+	if (!read_uint(&a, &num))
+		return 0;
+
+	/* if we are looking at a 'g', then the first number was a band */
+	c = tolower((int)a[0]);
+	if (c == 'g') {
+		a ++; /* consume the char */
+
+		/* band must be "2" or "5" */
+		if (num == 2)
+			chspec_band = WL_CHANSPEC_BAND_2G;
+		else if (num == 5)
+			chspec_band = WL_CHANSPEC_BAND_5G;
+		else
+			return 0;
+
+		/* read the channel number */
+		if (!read_uint(&a, &ctl_ch))
+			return 0;
+
+		c = tolower((int)a[0]);
+	}
+	else {
+		/* first number is channel, use default for band */
+		ctl_ch = num;
+		chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ?
+		               WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+	}
+
+	if (c == '\0') {
+		/* default BW of 20MHz */
+		chspec_bw = WL_CHANSPEC_BW_20;
+		goto done_read;
+	}
+
+	a ++; /* consume the 'u','l', or '/' */
+
+	/* check 'u'/'l' */
+	if (c == 'u' || c == 'l') {
+		sb_ul = c;
+		chspec_bw = WL_CHANSPEC_BW_40;
+		goto done_read;
+	}
+
+	/* next letter must be '/' */
+	if (c != '/')
+		return 0;
+
+	/* read bandwidth */
+	if (!read_uint(&a, &bw))
+		return 0;
+
+	/* convert to chspec value */
+	if (bw == 20) {
+		chspec_bw = WL_CHANSPEC_BW_20;
+	} else if (bw == 40) {
+		chspec_bw = WL_CHANSPEC_BW_40;
+	} else if (bw == 80) {
+		chspec_bw = WL_CHANSPEC_BW_80;
+	} else if (bw == 160) {
+		chspec_bw = WL_CHANSPEC_BW_160;
+	} else {
+		return 0;
+	}
+
+	/* So far we have <band>g<chan>/<bw>
+	 * Can now be followed by u/l if bw = 40,
+	 * or '+80' if bw = 80, to make '80+80' bw.
+	 */
+
+	c = tolower((int)a[0]);
+
+	/* if we have a 2g/40 channel, we should have a l/u spec now */
+	if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
+		if (c == 'u' || c == 'l') {
+			a ++; /* consume the u/l char */
+			sb_ul = c;
+			goto done_read;
+		}
+	}
+
+	/* check for 80+80 */
+	if (c == '+') {
+		/* 80+80 */
+		static const char *plus80 = "80/";
+
+		/* must be looking at '+80/'
+		 * check and consume this string.
+		 */
+		chspec_bw = WL_CHANSPEC_BW_8080;
+
+		a ++; /* consume the char '+' */
+
+		/* consume the '80/' string */
+		for (i = 0; i < 3; i++) {
+			if (*a++ != *plus80++) {
+				return 0;
+			}
+		}
+
+		/* read primary 80MHz channel */
+		if (!read_uint(&a, &ch1))
+			return 0;
+
+		/* must followed by '-' */
+		if (a[0] != '-')
+			return 0;
+		a ++; /* consume the char */
+
+		/* read secondary 80MHz channel */
+		if (!read_uint(&a, &ch2))
+			return 0;
+	}
+
+done_read:
+	/* skip trailing white space */
+	while (a[0] == ' ') {
+		a ++;
+	}
+
+	/* must be end of string */
+	if (a[0] != '\0')
+		return 0;
+
+	/* Now have all the chanspec string parts read;
+	 * chspec_band, ctl_ch, chspec_bw, sb_ul, ch1, ch2.
+	 * chspec_band and chspec_bw are chanspec values.
+	 * Need to convert ctl_ch, sb_ul, and ch1,ch2 into
+	 * a center channel (or two) and sideband.
+	 */
+
+	/* if a sb u/l string was given, just use that,
+	 * guaranteed to be bw = 40 by sting parse.
+	 */
+	if (sb_ul != '\0') {
+		if (sb_ul == 'l') {
+			chspec_ch = UPPER_20_SB(ctl_ch);
+			chspec_sb = WL_CHANSPEC_CTL_SB_LLL;
+		} else if (sb_ul == 'u') {
+			chspec_ch = LOWER_20_SB(ctl_ch);
+			chspec_sb = WL_CHANSPEC_CTL_SB_LLU;
+		}
+	}
+	/* if the bw is 20, center and sideband are trivial */
+	else if (chspec_bw == WL_CHANSPEC_BW_20) {
+		chspec_ch = ctl_ch;
+		chspec_sb = WL_CHANSPEC_CTL_SB_NONE;
+	}
+	/* if the bw is 40/80/160, not 80+80, a single method
+	 * can be used to to find the center and sideband
+	 */
+	else if (chspec_bw != WL_CHANSPEC_BW_8080) {
+		/* figure out ctl sideband based on ctl channel and bandwidth */
+		const uint8 *center_ch = NULL;
+		int num_ch = 0;
+		int sb = -1;
+
+		if (chspec_bw == WL_CHANSPEC_BW_40) {
+			center_ch = wf_5g_40m_chans;
+			num_ch = WF_NUM_5G_40M_CHANS;
+		} else if (chspec_bw == WL_CHANSPEC_BW_80) {
+			center_ch = wf_5g_80m_chans;
+			num_ch = WF_NUM_5G_80M_CHANS;
+		} else if (chspec_bw == WL_CHANSPEC_BW_160) {
+			center_ch = wf_5g_160m_chans;
+			num_ch = WF_NUM_5G_160M_CHANS;
+		} else {
+			return 0;
+		}
+
+		for (i = 0; i < num_ch; i ++) {
+			sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+			if (sb >= 0) {
+				chspec_ch = center_ch[i];
+				chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+				break;
+			}
+		}
+
+		/* check for no matching sb/center */
+		if (sb < 0) {
+			return 0;
+		}
+	}
+	/* Otherwise, bw is 80+80. Figure out channel pair and sb */
+	else {
+		int ch1_id = 0, ch2_id = 0;
+		int sb;
+
+		ch1_id = channel_80mhz_to_id(ch1);
+		ch2_id = channel_80mhz_to_id(ch2);
+
+		/* validate channels */
+		if (ch1 >= ch2 || ch1_id < 0 || ch2_id < 0)
+			return 0;
+
+		/* combined channel in chspec */
+		chspec_ch = (((uint16)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) |
+			((uint16)ch2_id << WL_CHANSPEC_CHAN2_SHIFT));
+
+		/* figure out ctl sideband */
+
+		/* does the primary channel fit with the 1st 80MHz channel ? */
+		sb = channel_to_sb(ch1, ctl_ch, bw);
+		if (sb < 0) {
+			/* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+			sb = channel_to_sb(ch2, ctl_ch, bw);
+			if (sb < 0) {
+				/* no match for ctl_ch to either 80MHz center channel */
+				return 0;
+			}
+			/* sb index is 0-3 for the low 80MHz channel, and 4-7 for
+			 * the high 80MHz channel. Add 4 to to shift to high set.
+			 */
+			sb += 4;
+		}
+
+		chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
+	}
+
+	chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb);
+
+	if (wf_chspec_malformed(chspec))
+		return 0;
+
+	return chspec;
+}
+
+/*
+ * Verify the chanspec is using a legal set of parameters, i.e. that the
+ * chanspec specified a band, bw, ctl_sb and channel and that the
+ * combination could be legal given any set of circumstances.
+ * RETURNS: TRUE is the chanspec is malformed, false if it looks good.
+ */
+bool
+wf_chspec_malformed(chanspec_t chanspec)
+{
+	uint chspec_bw = CHSPEC_BW(chanspec);
+	uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+	/* must be 2G or 5G band */
+	if (CHSPEC_IS2G(chanspec)) {
+		/* must be valid bandwidth */
+		if (chspec_bw != WL_CHANSPEC_BW_20 &&
+		    chspec_bw != WL_CHANSPEC_BW_40) {
+			return TRUE;
+		}
+	} else if (CHSPEC_IS5G(chanspec)) {
+		if (chspec_bw == WL_CHANSPEC_BW_8080) {
+			uint ch1_id, ch2_id;
+
+			/* channel number in 80+80 must be in range */
+			ch1_id = CHSPEC_CHAN1(chanspec);
+			ch2_id = CHSPEC_CHAN2(chanspec);
+			if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS)
+				return TRUE;
+
+			/* ch2 must be above ch1 for the chanspec */
+			if (ch2_id <= ch1_id)
+				return TRUE;
+		} else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 ||
+		           chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) {
+
+			if (chspec_ch > MAXCHANNEL) {
+				return TRUE;
+			}
+		} else {
+			/* invalid bandwidth */
+			return TRUE;
+		}
+	} else {
+		/* must be 2G or 5G band */
+		return TRUE;
+	}
+
+	/* side band needs to be consistent with bandwidth */
+	if (chspec_bw == WL_CHANSPEC_BW_20) {
+		if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL)
+			return TRUE;
+	} else if (chspec_bw == WL_CHANSPEC_BW_40) {
+		if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU)
+			return TRUE;
+	} else if (chspec_bw == WL_CHANSPEC_BW_80) {
+		if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU)
+			return TRUE;
+	}
+
+	return FALSE;
+}
+
+/*
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ * RETURNS: TRUE if the chanspec is a valid 802.11 channel
+ */
+bool
+wf_chspec_valid(chanspec_t chanspec)
+{
+	uint chspec_bw = CHSPEC_BW(chanspec);
+	uint chspec_ch = CHSPEC_CHANNEL(chanspec);
+
+	if (wf_chspec_malformed(chanspec))
+		return FALSE;
+
+	if (CHSPEC_IS2G(chanspec)) {
+		/* must be valid bandwidth and channel range */
+		if (chspec_bw == WL_CHANSPEC_BW_20) {
+			if (chspec_ch >= 1 && chspec_ch <= 14)
+				return TRUE;
+		} else if (chspec_bw == WL_CHANSPEC_BW_40) {
+			if (chspec_ch >= 3 && chspec_ch <= 11)
+				return TRUE;
+		}
+	} else if (CHSPEC_IS5G(chanspec)) {
+		if (chspec_bw == WL_CHANSPEC_BW_8080) {
+			uint16 ch1, ch2;
+
+			ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)];
+			ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)];
+
+			/* the two channels must be separated by more than 80MHz by VHT req,
+			 * and ch2 above ch1 for the chanspec
+			 */
+			if (ch2 > ch1 + CH_80MHZ_APART)
+				return TRUE;
+		} else {
+			const uint8 *center_ch;
+			uint num_ch, i;
+
+			if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) {
+				center_ch = wf_5g_40m_chans;
+				num_ch = WF_NUM_5G_40M_CHANS;
+			} else if (chspec_bw == WL_CHANSPEC_BW_80) {
+				center_ch = wf_5g_80m_chans;
+				num_ch = WF_NUM_5G_80M_CHANS;
+			} else if (chspec_bw == WL_CHANSPEC_BW_160) {
+				center_ch = wf_5g_160m_chans;
+				num_ch = WF_NUM_5G_160M_CHANS;
+			} else {
+				/* invalid bandwidth */
+				return FALSE;
+			}
+
+			/* check for a valid center channel */
+			if (chspec_bw == WL_CHANSPEC_BW_20) {
+				/* We don't have an array of legal 20MHz 5G channels, but they are
+				 * each side of the legal 40MHz channels.  Check the chanspec
+				 * channel against either side of the 40MHz channels.
+				 */
+				for (i = 0; i < num_ch; i ++) {
+					if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) ||
+					    chspec_ch == (uint)UPPER_20_SB(center_ch[i]))
+						break; /* match found */
+				}
+
+				if (i == num_ch) {
+					/* check for channel 165 which is not the side band
+					 * of 40MHz 5G channel
+					 */
+					if (chspec_ch == 165)
+						i = 0;
+
+					/* check for legacy JP channels on failure */
+					if (chspec_ch == 34 || chspec_ch == 38 ||
+					    chspec_ch == 42 || chspec_ch == 46)
+						i = 0;
+				}
+			} else {
+				/* check the chanspec channel to each legal channel */
+				for (i = 0; i < num_ch; i ++) {
+					if (chspec_ch == center_ch[i])
+						break; /* match found */
+				}
+			}
+
+			if (i < num_ch) {
+				/* match found */
+				return TRUE;
+			}
+		}
+	}
+
+	return FALSE;
+}
+
+/*
+ * This function returns the channel number that control traffic is being sent on, for 20MHz
+ * channels this is just the channel number, for 40MHZ, 80MHz, 160MHz channels it is the 20MHZ
+ * sideband depending on the chanspec selected
+ */
+uint8
+wf_chspec_ctlchan(chanspec_t chspec)
+{
+	uint center_chan;
+	uint bw_mhz;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* Is there a sideband ? */
+	if (CHSPEC_IS20(chspec)) {
+		return CHSPEC_CHANNEL(chspec);
+	} else {
+		sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+		if (CHSPEC_IS8080(chspec)) {
+			bw_mhz = 80;
+
+			if (sb < 4) {
+				center_chan = CHSPEC_CHAN1(chspec);
+			}
+			else {
+				center_chan = CHSPEC_CHAN2(chspec);
+				sb -= 4;
+			}
+
+			/* convert from channel index to channel number */
+			center_chan = wf_5g_80m_chans[center_chan];
+		}
+		else {
+			bw_mhz = bw_chspec_to_mhz(chspec);
+			center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT;
+		}
+
+		return (channel_to_ctl_chan(center_chan, bw_mhz, sb));
+	}
+}
+
+/*
+ * This function returns the chanspec of the control channel of a given chanspec
+ */
+chanspec_t
+wf_chspec_ctlchspec(chanspec_t chspec)
+{
+	chanspec_t ctl_chspec = chspec;
+	uint8 ctl_chan;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	/* Is there a sideband ? */
+	if (!CHSPEC_IS20(chspec)) {
+		ctl_chan = wf_chspec_ctlchan(chspec);
+		ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20;
+		ctl_chspec |= CHSPEC_BAND(chspec);
+	}
+	return ctl_chspec;
+}
+
+/* return chanspec given control channel and bandwidth
+ * return 0 on error
+ */
+uint16
+wf_channel2chspec(uint ctl_ch, uint bw)
+{
+	uint16 chspec;
+	const uint8 *center_ch = NULL;
+	int num_ch = 0;
+	int sb = -1;
+	int i = 0;
+
+	chspec = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+
+	chspec |= bw;
+
+	if (bw == WL_CHANSPEC_BW_40) {
+		center_ch = wf_5g_40m_chans;
+		num_ch = WF_NUM_5G_40M_CHANS;
+		bw = 40;
+	} else if (bw == WL_CHANSPEC_BW_80) {
+		center_ch = wf_5g_80m_chans;
+		num_ch = WF_NUM_5G_80M_CHANS;
+		bw = 80;
+	} else if (bw == WL_CHANSPEC_BW_160) {
+		center_ch = wf_5g_160m_chans;
+		num_ch = WF_NUM_5G_160M_CHANS;
+		bw = 160;
+	} else if (bw == WL_CHANSPEC_BW_20) {
+		chspec |= ctl_ch;
+		return chspec;
+	} else {
+		return 0;
+	}
+
+	for (i = 0; i < num_ch; i ++) {
+		sb = channel_to_sb(center_ch[i], ctl_ch, bw);
+		if (sb >= 0) {
+			chspec |= center_ch[i];
+			chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT);
+			break;
+		}
+	}
+
+	/* check for no matching sb/center */
+	if (sb < 0) {
+		return 0;
+	}
+
+	return chspec;
+}
+
+/*
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec)
+{
+	chanspec_t chspec40 = chspec;
+	uint center_chan;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+
+	if (CHSPEC_IS80(chspec)) {
+		center_chan = CHSPEC_CHANNEL(chspec);
+		sb = CHSPEC_CTL_SB(chspec);
+
+		if (sb == WL_CHANSPEC_CTL_SB_UL) {
+			/* Primary 40MHz is on upper side */
+			sb = WL_CHANSPEC_CTL_SB_L;
+			center_chan += CH_20MHZ_APART;
+		} else if (sb == WL_CHANSPEC_CTL_SB_UU) {
+			/* Primary 40MHz is on upper side */
+			sb = WL_CHANSPEC_CTL_SB_U;
+			center_chan += CH_20MHZ_APART;
+		} else {
+			/* Primary 40MHz is on lower side */
+			/* sideband bits are the same for LL/LU and L/U */
+			center_chan -= CH_20MHZ_APART;
+		}
+
+		/* Create primary 40MHz chanspec */
+		chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
+		            sb | center_chan);
+	}
+
+	return chspec40;
+}
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int
+wf_mhz2channel(uint freq, uint start_factor)
+{
+	int ch = -1;
+	uint base;
+	int offset;
+
+	/* take the default channel start frequency */
+	if (start_factor == 0) {
+		if (freq >= 2400 && freq <= 2500)
+			start_factor = WF_CHAN_FACTOR_2_4_G;
+		else if (freq >= 5000 && freq <= 6000)
+			start_factor = WF_CHAN_FACTOR_5_G;
+	}
+
+	if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+		return 14;
+
+	base = start_factor / 2;
+
+	/* check that the frequency is in 1GHz range of the base */
+	if ((freq < base) || (freq > base + 1000))
+		return -1;
+
+	offset = freq - base;
+	ch = offset / 5;
+
+	/* check that frequency is a 5MHz multiple from the base */
+	if (offset != (ch * 5))
+		return -1;
+
+	/* restricted channel range check for 2.4G */
+	if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+		return -1;
+
+	return ch;
+}
+
+/*
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_4_G, and WF_CHAN_FACTOR_5_G
+ * are defined for 2.4 GHz, 4 GHz, and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814 = 2407 * 2).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int
+wf_channel2mhz(uint ch, uint start_factor)
+{
+	int freq;
+
+	if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+	    (ch > 200))
+		freq = -1;
+	else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+		freq = 2484;
+	else
+		freq = ch * 5 + start_factor / 2;
+
+	return freq;
+}
+
+/*
+ * Returns the 80+80 chanspec corresponding to the following input parameters
+ *
+ *    primary_20mhz - Primary 20 Mhz channel
+ *    chan1 - channel number of first 80 Mhz band
+ *    chan2 - channel number of second 80 Mhz band
+ *
+ *  parameters chan1 and chan2  are channel numbers in {42, 58, 106, 122, 138, 155}
+ *
+ *  returns INVCHANSPEC in case of error
+ */
+
+chanspec_t
+wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan1, uint8 chan2)
+{
+	int sb = 0;
+	uint16 chanspec = 0;
+	int chan1_id = 0, chan2_id = 0;
+
+	/* does the primary channel fit with the 1st 80MHz channel ? */
+	sb = channel_to_sb(chan1, primary_20mhz, 80);
+	if (sb < 0) {
+		/* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+		sb = channel_to_sb(chan2, primary_20mhz, 80);
+		if (sb < 0) {
+			/* no match for ctl_ch to either 80MHz center channel */
+			return INVCHANSPEC;
+		}
+		/* sb index is 0-3 for the low 80MHz channel, and 4-7 for
+		 * the high 80MHz channel. Add 4 to to shift to high set.
+		 */
+		sb += 4;
+	}
+	chan1_id = channel_80mhz_to_id(chan1);
+	chan2_id = channel_80mhz_to_id(chan2);
+	if (chan1_id == -1 || chan2_id == -1)
+		return INVCHANSPEC;
+
+	chanspec = (chan1_id << WL_CHANSPEC_CHAN1_SHIFT)|
+		(chan2_id << WL_CHANSPEC_CHAN2_SHIFT)|
+		(sb << WL_CHANSPEC_CTL_SB_SHIFT)|
+		(WL_CHANSPEC_BW_8080)|
+		(WL_CHANSPEC_BAND_5G);
+
+	return chanspec;
+
+}
+
+/*
+ * This function returns the 80Mhz channel for the given id.
+ */
+static uint8
+wf_chspec_get80Mhz_ch(uint8 chan_80Mhz_id)
+{
+	if (chan_80Mhz_id < WF_NUM_5G_80M_CHANS)
+		return wf_5g_80m_chans[chan_80Mhz_id];
+
+	return 0;
+}
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+
+uint8
+wf_chspec_primary80_channel(chanspec_t chanspec)
+{
+	uint8 chan1 = 0, chan2 = 0, primary_20mhz = 0, primary80_chan = 0;
+	int sb = 0;
+
+	primary_20mhz = wf_chspec_ctlchan(chanspec);
+
+	if (CHSPEC_IS80(chanspec))	{
+		primary80_chan = CHSPEC_CHANNEL(chanspec);
+	}
+	else if (CHSPEC_IS8080(chanspec)) {
+		chan1 = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chanspec));
+		chan2 = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chanspec));
+
+		/* does the primary channel fit with the 1st 80MHz channel ? */
+		sb = channel_to_sb(chan1, primary_20mhz, 80);
+		if (sb < 0) {
+			/* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+			sb = channel_to_sb(chan2, primary_20mhz, 80);
+			if (!(sb < 0)) {
+				primary80_chan = chan2;
+			}
+		}
+		else {
+			primary80_chan = chan1;
+		}
+	}
+	else if (CHSPEC_IS160(chanspec)) {
+		chan1 = CHSPEC_CHANNEL(chanspec);
+		sb = channel_to_sb(chan1, primary_20mhz, 160);
+		if (!(sb < 0)) {
+		    /* based on the sb value  primary 80 channel can be retrieved
+			 * if sb is in range 0 to 3 the lower band is the 80Mhz primary band
+			 */
+			if (sb < 4) {
+				primary80_chan = chan1 - CH_40MHZ_APART;
+			}
+			/* if sb is in range 4 to 7 the lower band is the 80Mhz primary band */
+			else
+			{
+				primary80_chan = chan1 + CH_40MHZ_APART;
+			}
+		}
+	}
+	else {
+		/* for 20 and 40 Mhz */
+		primary80_chan = -1;
+	}
+	return primary80_chan;
+}
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+uint8
+wf_chspec_secondary80_channel(chanspec_t chanspec)
+{
+	uint8 chan1 = 0, chan2 = 0, primary_20mhz = 0, secondary80_chan = 0;
+	int sb = 0;
+
+	primary_20mhz = wf_chspec_ctlchan(chanspec);
+	if (CHSPEC_IS80(chanspec)) {
+		secondary80_chan = -1;
+	}
+	else if (CHSPEC_IS8080(chanspec)) {
+		chan1 = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chanspec));
+		chan2 = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chanspec));
+
+		/* does the primary channel fit with the 1st 80MHz channel ? */
+		sb = channel_to_sb(chan1, primary_20mhz, 80);
+		if (sb < 0) {
+			/* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+			sb = channel_to_sb(chan2, primary_20mhz, 80);
+			if (!(sb < 0)) {
+				secondary80_chan = chan1;
+			}
+		}
+		else {
+			secondary80_chan = chan2;
+		}
+	}
+	else if (CHSPEC_IS160(chanspec)) {
+		chan1 = CHSPEC_CHANNEL(chanspec);
+		sb = channel_to_sb(chan1, primary_20mhz, 160);
+		if (!(sb < 0)) {
+		    /* based on the sb value  secondary 80 channel can be retrieved
+			  *if sb is in range 0 to 3 upper band is the secondary 80Mhz  band
+			  */
+			if (sb < 4) {
+				secondary80_chan = chan1 + CH_40MHZ_APART;
+			}
+			/* if sb is in range 4 to 7 the lower band is the secondary 80Mhz band */
+			else
+			{
+				secondary80_chan = chan1 - CH_40MHZ_APART;
+			}
+		}
+	}
+	else {
+		/* for 20 and 40 Mhz */
+		secondary80_chan  = -1;
+	}
+	return secondary80_chan;
+}
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ *
+ *    chanspec - Input chanspec for which the primary 80Mhz chanspec has to be retreived
+ *
+ *  returns INVCHANSPEC in case the provided channel is 20/40 Mhz chanspec
+ */
+chanspec_t
+wf_chspec_primary80_chspec(chanspec_t chspec)
+{
+	chanspec_t chspec80;
+	uint center_chan, chan1 = 0, chan2 = 0;
+	uint sb;
+
+	ASSERT(!wf_chspec_malformed(chspec));
+	if (CHSPEC_IS8080(chspec)) {
+		chan1 = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec));
+		chan2 = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chspec));
+
+		sb = CHSPEC_CTL_SB(chspec);
+
+		if (sb < 4) {
+			/* Primary 80MHz is on lower side */
+			center_chan = chan1;
+		}
+		else
+		{
+			/* Primary 80MHz is on upper side */
+			center_chan = chan2;
+			sb -= 4;
+		}
+		/* Create primary 80MHz chanspec */
+		chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 |sb | center_chan);
+	}
+	else if (CHSPEC_IS160(chspec)) {
+		center_chan = CHSPEC_CHANNEL(chspec);
+		sb = CHSPEC_CTL_SB(chspec);
+
+		if (sb < 4) {
+			/* Primary 80MHz is on upper side */
+			center_chan -= CH_40MHZ_APART;
+		}
+		else
+		{
+			/* Primary 80MHz is on lower side */
+			center_chan += CH_40MHZ_APART;
+			sb -= 4;
+		}
+		/* Create primary 80MHz chanspec */
+		chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+	}
+	else
+	{
+		chspec80 = INVCHANSPEC;
+	}
+	return chspec80;
+}
diff --git a/drivers/staging/edison-bcm43340/common/include/devctrl_if/wlioctl_defs.h b/drivers/staging/edison-bcm43340/common/include/devctrl_if/wlioctl_defs.h
new file mode 100644
index 0000000..1a42dfb
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/devctrl_if/wlioctl_defs.h
@@ -0,0 +1,2000 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl_defs.h 403826 2013-05-22 16:40:55Z $
+ */
+
+
+#ifndef wlioctl_defs_h
+#define wlioctl_defs_h
+
+
+
+
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef  D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+/* WL_RSPEC defines for rate information */
+#define WL_RSPEC_RATE_MASK      0x000000FF      /* rate or HT MCS value */
+#define WL_RSPEC_VHT_MCS_MASK   0x0000000F      /* VHT MCS value */
+#define WL_RSPEC_VHT_NSS_MASK   0x000000F0      /* VHT Nss value */
+#define WL_RSPEC_VHT_NSS_SHIFT  4               /* VHT Nss value shift */
+#define WL_RSPEC_TXEXP_MASK     0x00000300
+#define WL_RSPEC_TXEXP_SHIFT    8
+#define WL_RSPEC_BW_MASK        0x00070000      /* bandwidth mask */
+#define WL_RSPEC_BW_SHIFT       16              /* bandwidth shift */
+#define WL_RSPEC_STBC           0x00100000      /* STBC encoding, Nsts = 2 x Nss */
+#define WL_RSPEC_TXBF           0x00200000      /* bit indicates TXBF mode */
+#define WL_RSPEC_LDPC           0x00400000      /* bit indicates adv coding in use */
+#define WL_RSPEC_SGI            0x00800000      /* Short GI mode */
+#define WL_RSPEC_ENCODING_MASK  0x03000000      /* Encoding of Rate/MCS field */
+#define WL_RSPEC_OVERRIDE_RATE  0x40000000      /* bit indicate to override mcs only */
+#define WL_RSPEC_OVERRIDE_MODE  0x80000000      /* bit indicates override both rate & mode */
+
+/* WL_RSPEC_ENCODING field defs */
+#define WL_RSPEC_ENCODE_RATE    0x00000000      /* Legacy rate is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HT      0x01000000      /* HT MCS is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_VHT     0x02000000      /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
+
+/* WL_RSPEC_BW field defs */
+#define WL_RSPEC_BW_UNSPECIFIED 0
+#define WL_RSPEC_BW_20MHZ       0x00010000
+#define WL_RSPEC_BW_40MHZ       0x00020000
+#define WL_RSPEC_BW_80MHZ       0x00030000
+#define WL_RSPEC_BW_160MHZ      0x00040000
+
+/* Legacy defines for the nrate iovar */
+#define OLD_NRATE_MCS_INUSE         0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define OLD_NRATE_RATE_MASK         0x0000007f /* rate/mcs value */
+#define OLD_NRATE_STF_MASK          0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define OLD_NRATE_STF_SHIFT         8          /* stf mode shift */
+#define OLD_NRATE_OVERRIDE          0x80000000 /* bit indicates override both rate & mode */
+#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define OLD_NRATE_SGI               0x00800000 /* sgi mode */
+#define OLD_NRATE_LDPC_CODING       0x00400000 /* bit indicates adv coding in use */
+
+#define OLD_NRATE_STF_SISO	0		/* stf mode SISO */
+#define OLD_NRATE_STF_CDD	1		/* stf mode CDD */
+#define OLD_NRATE_STF_STBC	2		/* stf mode STBC */
+#define OLD_NRATE_STF_SDM	3		/* stf mode SDM */
+
+#define HIGHEST_SINGLE_STREAM_MCS	7 /* MCS values greater than this enable multiple streams */
+
+#define MAX_CCA_CHANNELS 38	/* Max number of 20 Mhz wide channels */
+#define MAX_CCA_SECS	60	/* CCA keeps this many seconds history */
+
+#define IBSS_MED        15	/* Mediom in-bss congestion percentage */
+#define IBSS_HI         25	/* Hi in-bss congestion percentage */
+#define OBSS_MED        12
+#define OBSS_HI         25
+#define INTERFER_MED    5
+#define INTERFER_HI     10
+
+#define  CCA_FLAG_2G_ONLY		0x01	/* Return a channel from 2.4 Ghz band */
+#define  CCA_FLAG_5G_ONLY		0x02	/* Return a channel from 2.4 Ghz band */
+#define  CCA_FLAG_IGNORE_DURATION	0x04	/* Ignore dwell time for each channel */
+#define  CCA_FLAGS_PREFER_1_6_11	0x10
+#define  CCA_FLAG_IGNORE_INTERFER 	0x20 /* do not exlude channel based on interfer level */
+
+#define CCA_ERRNO_BAND 		1	/* After filtering for band pref, no choices left */
+#define CCA_ERRNO_DURATION	2	/* After filtering for duration, no choices left */
+#define CCA_ERRNO_PREF_CHAN	3	/* After filtering for chan pref, no choices left */
+#define CCA_ERRNO_INTERFER	4	/* After filtering for interference, no choices left */
+#define CCA_ERRNO_TOO_FEW	5	/* Only 1 channel was input */
+
+#define WL_STA_AID(a)		((a) &~ 0xc000)
+
+/* Flags for sta_info_t indicating properties of STA */
+#define WL_STA_BRCM		0x00000001	/* Running a Broadcom driver */
+#define WL_STA_WME		0x00000002	/* WMM association */
+#define WL_STA_NONERP		0x00000004	/* No ERP */
+#define WL_STA_AUTHE		0x00000008	/* Authenticated */
+#define WL_STA_ASSOC		0x00000010	/* Associated */
+#define WL_STA_AUTHO		0x00000020	/* Authorized */
+#define WL_STA_WDS		0x00000040	/* Wireless Distribution System */
+#define WL_STA_WDS_LINKUP	0x00000080	/* WDS traffic/probes flowing properly */
+#define WL_STA_PS		0x00000100	/* STA is in power save mode from AP's viewpoint */
+#define WL_STA_APSD_BE		0x00000200	/* APSD delv/trigger for AC_BE is default enabled */
+#define WL_STA_APSD_BK		0x00000400	/* APSD delv/trigger for AC_BK is default enabled */
+#define WL_STA_APSD_VI		0x00000800	/* APSD delv/trigger for AC_VI is default enabled */
+#define WL_STA_APSD_VO		0x00001000	/* APSD delv/trigger for AC_VO is default enabled */
+#define WL_STA_N_CAP		0x00002000	/* STA 802.11n capable */
+#define WL_STA_SCBSTATS		0x00004000	/* Per STA debug stats */
+#define WL_STA_AMPDU_CAP	0x00008000	/* STA AMPDU capable */
+#define WL_STA_AMSDU_CAP	0x00010000	/* STA AMSDU capable */
+#define WL_STA_MIMO_PS		0x00020000	/* mimo ps mode is enabled */
+#define WL_STA_MIMO_RTS		0x00040000	/* send rts in mimo ps mode */
+#define WL_STA_RIFS_CAP		0x00080000	/* rifs enabled */
+#define WL_STA_VHT_CAP		0x00100000	/* STA VHT(11ac) capable */
+#define WL_STA_WPS		0x00200000	/* WPS state */
+
+#define WL_WDS_LINKUP		WL_STA_WDS_LINKUP	/* deprecated */
+
+/* STA HT cap fields */
+#define WL_STA_CAP_LDPC_CODING		0x0001	/* Support for rx of LDPC coded pkts */
+#define WL_STA_CAP_40MHZ		0x0002  /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define WL_STA_CAP_MIMO_PS_MASK		0x000C  /* Mimo PS mask */
+#define WL_STA_CAP_MIMO_PS_SHIFT	0x0002	/* Mimo PS shift */
+#define WL_STA_CAP_MIMO_PS_OFF		0x0003	/* Mimo PS, no restriction */
+#define WL_STA_CAP_MIMO_PS_RTS		0x0001	/* Mimo PS, send RTS/CTS around MIMO frames */
+#define WL_STA_CAP_MIMO_PS_ON		0x0000	/* Mimo PS, MIMO disallowed */
+#define WL_STA_CAP_GF			0x0010	/* Greenfield preamble support */
+#define WL_STA_CAP_SHORT_GI_20		0x0020	/* 20MHZ short guard interval support */
+#define WL_STA_CAP_SHORT_GI_40		0x0040	/* 40Mhz short guard interval support */
+#define WL_STA_CAP_TX_STBC		0x0080	/* Tx STBC support */
+#define WL_STA_CAP_RX_STBC_MASK		0x0300	/* Rx STBC mask */
+#define WL_STA_CAP_RX_STBC_SHIFT	8	/* Rx STBC shift */
+#define WL_STA_CAP_DELAYED_BA		0x0400	/* delayed BA support */
+#define WL_STA_CAP_MAX_AMSDU		0x0800	/* Max AMSDU size in bytes , 0=3839, 1=7935 */
+#define WL_STA_CAP_DSSS_CCK		0x1000	/* DSSS/CCK supported by the BSS */
+#define WL_STA_CAP_PSMP			0x2000	/* Power Save Multi Poll support */
+#define WL_STA_CAP_40MHZ_INTOLERANT	0x4000	/* 40MHz Intolerant */
+#define WL_STA_CAP_LSIG_TXOP		0x8000	/* L-SIG TXOP protection support */
+
+#define WL_STA_CAP_RX_STBC_NO		0x0	/* no rx STBC support */
+#define WL_STA_CAP_RX_STBC_ONE_STREAM	0x1	/* rx STBC support of 1 spatial stream */
+#define WL_STA_CAP_RX_STBC_TWO_STREAM	0x2	/* rx STBC support of 1-2 spatial streams */
+#define WL_STA_CAP_RX_STBC_THREE_STREAM	0x3	/* rx STBC support of 1-3 spatial streams */
+
+/* scb vht flags */
+#define WL_STA_VHT_LDPCCAP	0x0001
+#define WL_STA_SGI80		0x0002
+#define WL_STA_SGI160		0x0004
+#define WL_STA_VHT_TX_STBCCAP	0x0008
+#define WL_STA_VHT_RX_STBCCAP	0x0010
+#define WL_STA_SU_BEAMFORMER	0x0020
+#define WL_STA_SU_BEAMFORMEE	0x0040
+#define WL_STA_MU_BEAMFORMER	0x0080
+#define WL_STA_MU_BEAMFORMEE	0x0100
+#define WL_STA_VHT_TXOP_PS	0x0200
+#define WL_STA_HTC_VHT_CAP	0x0400
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED  0
+#define WLC_TXFILTER_OVERRIDE_ENABLED   1
+
+#define WL_IOCTL_ACTION_GET				0x0
+#define WL_IOCTL_ACTION_SET				0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK	0x1e
+#define WL_IOCTL_ACTION_OVL_RSV			0x20
+#define WL_IOCTL_ACTION_OVL				0x40
+#define WL_IOCTL_ACTION_MASK			0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT		1
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY   2
+
+/* Bitmask for scan_type */
+#define WL_SCANFLAGS_PASSIVE	0x01	/* force passive scan */
+#define WL_SCANFLAGS_RESERVED	0x02	/* Reserved */
+#define WL_SCANFLAGS_PROHIBITED	0x04	/* allow scanning prohibited channels */
+#define WL_SCANFLAGS_OFFCHAN	0x08	/* allow scanning/reporting off-channel APs */
+#define WL_SCANFLAGS_HOTSPOT	0x10	/* automatic ANQP to hotspot APs */
+
+/* wl_iscan_results status values */
+#define WL_SCAN_RESULTS_SUCCESS	0
+#define WL_SCAN_RESULTS_PARTIAL	1
+#define WL_SCAN_RESULTS_PENDING	2
+#define WL_SCAN_RESULTS_ABORTED	3
+#define WL_SCAN_RESULTS_NO_MEM  4
+
+#define SCANOL_ENABLED			(1 << 0)
+#define SCANOL_BCAST_SSID		(1 << 1)
+#define SCANOL_NOTIFY_BCAST_SSID	(1 << 2)
+#define SCANOL_RESULTS_PER_CYCLE	(1 << 3)
+
+/* scan times in milliseconds */
+#define SCANOL_HOME_TIME		45	/* for home channel processing */
+#define SCANOL_ASSOC_TIME		20	/* dwell on a channel while associated */
+#define SCANOL_UNASSOC_TIME		40	/* dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME		110	/* listen on a channelfor passive scan */
+#define SCANOL_AWAY_LIMIT		100	/* max time to be away from home channel */
+#define SCANOL_IDLE_REST_TIME		40
+#define SCANOL_IDLE_REST_MULTIPLIER	0
+#define SCANOL_ACTIVE_REST_TIME		20
+#define SCANOL_ACTIVE_REST_MULTIPLIER	0
+#define SCANOL_CYCLE_IDLE_REST_TIME	300000	/* Idle Rest Time between Scan Cycle (msec) */
+#define SCANOL_CYCLE_IDLE_REST_MULTIPLIER	0	/* Idle Rest Time Multiplier */
+#define SCANOL_CYCLE_ACTIVE_REST_TIME	200
+#define SCANOL_CYCLE_ACTIVE_REST_MULTIPLIER	0
+#define SCANOL_MAX_REST_TIME		3600000	/* max rest time between scan cycle (msec) */
+#define SCANOL_CYCLE_DEFAULT		0	/* default for Max Scan Cycle, 0 = forever */
+#define SCANOL_CYCLE_MAX		864000	/* Max Scan Cycle */
+						/* 10 sec/scan cycle => 100 days */
+#define SCANOL_NPROBES			2	/* for Active scan; send n probes on each channel */
+#define SCANOL_NPROBES_MAX		5	/* for Active scan; send n probes on each channel */
+#define SCANOL_SCAN_START_DLY		10	/* delay start of offload scan (sec) */
+#define SCANOL_SCAN_START_DLY_MAX	240	/* delay start of offload scan (sec) */
+#define SCANOL_MULTIPLIER_MAX		10	/* Max Multiplier */
+#define SCANOL_UNASSOC_TIME_MAX		100	/* max dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME_MAX		500	/* max listen on a channel for passive scan */
+#define SCANOL_SSID_MAX			16	/* max supported preferred SSID */
+
+/* masks for channel and ssid count */
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START      1
+#define WL_SCAN_ACTION_CONTINUE   2
+#define WL_SCAN_ACTION_ABORT      3
+
+
+#define ANTENNA_NUM_1	1		/* total number of antennas to be used */
+#define ANTENNA_NUM_2	2
+#define ANTENNA_NUM_3	3
+#define ANTENNA_NUM_4	4
+
+#define ANT_SELCFG_AUTO		0x80	/* bit indicates antenna sel AUTO */
+#define ANT_SELCFG_MASK		0x33	/* antenna configuration mask */
+#define ANT_SELCFG_TX_UNICAST	0	/* unicast tx antenna configuration */
+#define ANT_SELCFG_RX_UNICAST	1	/* unicast rx antenna configuration */
+#define ANT_SELCFG_TX_DEF	2	/* default tx antenna configuration */
+#define ANT_SELCFG_RX_DEF	3	/* default rx antenna configuration */
+
+/* interference source detection and identification mode */
+#define ITFR_MODE_DISABLE	0	/* disable feature */
+#define ITFR_MODE_MANUAL_ENABLE	1	/* enable manual detection */
+#define ITFR_MODE_AUTO_ENABLE	2	/* enable auto detection */
+
+/* bit definitions for flags in interference source report */
+#define ITFR_INTERFERENCED	1	/* interference detected */
+#define ITFR_HOME_CHANNEL	2	/* home channel has interference */
+#define ITFR_NOISY_ENVIRONMENT	4	/* noisy environemnt so feature stopped */
+
+#define WL_NUM_RPI_BINS		8
+#define WL_RM_TYPE_BASIC	1
+#define WL_RM_TYPE_CCA		2
+#define WL_RM_TYPE_RPI		3
+
+#define WL_RM_FLAG_PARALLEL	(1<<0)
+
+#define WL_RM_FLAG_LATE		(1<<1)
+#define WL_RM_FLAG_INCAPABLE	(1<<2)
+#define WL_RM_FLAG_REFUSED	(1<<3)
+
+/* flags */
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
+
+#define WLC_CIS_DEFAULT	0	/* built-in default */
+#define WLC_CIS_SROM	1	/* source is sprom */
+#define WLC_CIS_OTP	2	/* source is otp */
+
+/* PCL - Power Control Loop */
+/* current gain setting is replaced by user input */
+#define WL_ATTEN_APP_INPUT_PCL_OFF	0	/* turn off PCL, apply supplied input */
+#define WL_ATTEN_PCL_ON			1	/* turn on PCL */
+/* current gain setting is maintained */
+#define WL_ATTEN_PCL_OFF		2	/* turn off PCL. */
+
+#define	PLC_CMD_FAILOVER	1
+#define	PLC_CMD_MAC_COST	2
+#define	PLC_CMD_LINK_COST	3
+#define	PLC_CMD_NODE_LIST	4
+
+#define NODE_TYPE_UNKNOWN	0	/* Unknown link */
+#define NODE_TYPE_WIFI_ONLY	1	/* Pure Wireless STA node */
+#define NODE_TYPE_PLC_ONLY	2	/* Pure PLC only node */
+#define NODE_TYPE_WIFI_PLC	3	/* WiFi PLC capable node */
+
+/* defines used by poweridx iovar - it controls power in a-band */
+/* current gain setting is maintained */
+#define WL_PWRIDX_PCL_OFF	-2	/* turn off PCL.  */
+#define WL_PWRIDX_PCL_ON	-1	/* turn on PCL */
+#define WL_PWRIDX_LOWER_LIMIT	-2	/* lower limit */
+#define WL_PWRIDX_UPPER_LIMIT	63	/* upper limit */
+/* value >= 0 causes
+ *	- input to be set to that value
+ *	- PCL to be off
+ */
+
+#define BCM_MAC_STATUS_INDICATION	(0x40010200L)
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED  0
+#define WLC_TXFILTER_OVERRIDE_ENABLED   1
+
+/* magic pattern used for mismatch driver and wl */
+#define WL_TXFIFO_SZ_MAGIC	0xa5a5
+
+/* check this magic number */
+#define WLC_IOCTL_MAGIC		0x14e46c77
+
+/* bss_info_cap_t flags */
+#define WL_BSS_FLAGS_FROM_BEACON	0x01	/* bss_info derived from beacon */
+#define WL_BSS_FLAGS_FROM_CACHE		0x02	/* bss_info collected from cache */
+#define WL_BSS_FLAGS_RSSI_ONCHANNEL	0x04	/* rssi info received on channel (vs offchannel) */
+#define WL_BSS_FLAGS_HS20		0x08	/* hotspot 2.0 capable */
+#define WL_BSS_FLAGS_RSSI_INVALID	0x10	/* BSS contains invalid RSSI */
+#define WL_BSS_FLAGS_RSSI_INACCURATE	0x20	/* BSS contains inaccurate RSSI */
+
+/* bssinfo flag for nbss_cap */
+#define VHT_BI_SGI_80MHZ			0x00000100
+#define VHT_BI_80MHZ			    0x00000200
+#define VHT_BI_160MHZ			    0x00000400
+#define VHT_BI_8080MHZ			    0x00000800
+
+/* reference to wl_ioctl_t struct used by usermode driver */
+#define ioctl_subtype	set		/* subtype param */
+#define ioctl_pid	used		/* pid param */
+#define ioctl_status	needed		/* status param */
+
+
+/* Enumerate crypto algorithms */
+#define	CRYPTO_ALGO_OFF			0
+#define	CRYPTO_ALGO_WEP1		1
+#define	CRYPTO_ALGO_TKIP		2
+#define	CRYPTO_ALGO_WEP128		3
+#define CRYPTO_ALGO_AES_CCM		4
+#define CRYPTO_ALGO_AES_OCB_MSDU	5
+#define CRYPTO_ALGO_AES_OCB_MPDU	6
+#if !defined(BCMEXTCCX)
+#define CRYPTO_ALGO_NALG		7
+#else
+#define CRYPTO_ALGO_CKIP		7
+#define CRYPTO_ALGO_CKIP_MMH	8
+#define CRYPTO_ALGO_WEP_MMH		9
+#define CRYPTO_ALGO_NALG		10
+#endif 
+
+#define CRYPTO_ALGO_SMS4		11
+#define CRYPTO_ALGO_PMK			12	/* for 802.1x supp to set PMK before 4-way */
+#define CRYPTO_ALGO_BIP			13  /* 802.11w BIP (aes cmac) */
+
+#define CRYPTO_ALGO_AES_GCM     14  /* 128 bit GCM */
+#define CRYPTO_ALGO_AES_CCM256  15  /* 256 bit CCM */
+#define CRYPTO_ALGO_AES_GCM256  16  /* 256 bit GCM */
+#define CRYPTO_ALGO_BIP_CMAC256 17  /* 256 bit BIP CMAC */
+#define CRYPTO_ALGO_BIP_GMAC    18  /* 128 bit BIP GMAC */
+#define CRYPTO_ALGO_BIP_GMAC256 19  /* 256 bit BIP GMAC */
+
+#define CRYPTO_ALGO_NONE        CRYPTO_ALGO_OFF
+
+#define WSEC_GEN_MIC_ERROR	0x0001
+#define WSEC_GEN_REPLAY		0x0002
+#define WSEC_GEN_ICV_ERROR	0x0004
+#define WSEC_GEN_MFP_ACT_ERROR	0x0008
+#define WSEC_GEN_MFP_DISASSOC_ERROR	0x0010
+#define WSEC_GEN_MFP_DEAUTH_ERROR	0x0020
+
+#define WL_SOFT_KEY	(1 << 0)	/* Indicates this key is using soft encrypt */
+#define WL_PRIMARY_KEY	(1 << 1)	/* Indicates this key is the primary (ie tx) key */
+#if defined(BCMEXTCCX)
+#define WL_CKIP_KP	(1 << 4)	/* CMIC */
+#define WL_CKIP_MMH	(1 << 5)	/* CKIP */
+#else
+#define WL_KF_RES_4	(1 << 4)	/* Reserved for backward compat */
+#define WL_KF_RES_5	(1 << 5)	/* Reserved for backward compat */
+#endif 
+#define WL_IBSS_PEER_GROUP_KEY	(1 << 6)	/* Indicates a group key for a IBSS PEER */
+
+/* wireless security bitvec */
+#define WEP_ENABLED		0x0001
+#define TKIP_ENABLED		0x0002
+#define AES_ENABLED		0x0004
+#define WSEC_SWFLAG		0x0008
+#define SES_OW_ENABLED		0x0040	/* to go into transition mode without setting wep */
+#ifdef BCMWAPI_WPI
+#define SMS4_ENABLED		0x0100
+#endif /* BCMWAPI_WPI */
+
+/* wsec macros for operating on the above definitions */
+#define WSEC_WEP_ENABLED(wsec)	((wsec) & WEP_ENABLED)
+#define WSEC_TKIP_ENABLED(wsec)	((wsec) & TKIP_ENABLED)
+#define WSEC_AES_ENABLED(wsec)	((wsec) & AES_ENABLED)
+
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec)	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec)	((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+#define WSEC_SES_OW_ENABLED(wsec)	((wsec) & SES_OW_ENABLED)
+#ifdef BCMWAPI_WAI
+#define WSEC_SMS4_ENABLED(wsec)	((wsec) & SMS4_ENABLED)
+#endif /* BCMWAPI_WAI */
+
+#define MFP_CAPABLE		0x0200
+#define MFP_REQUIRED	0x0400
+#define MFP_SHA256		0x0800 /* a special configuration for STA for WIFI test tool */
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED	0x0000	/* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE		0x0001	/* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED	0x0002	/* over 802.1x */
+#define WPA_AUTH_PSK		0x0004	/* Pre-shared key */
+#if defined(BCMEXTCCX)
+#define WPA_AUTH_CCKM		0x0008	/* CCKM */
+#define WPA2_AUTH_CCKM		0x0010	/* CCKM2 */
+#endif	
+/* #define WPA_AUTH_8021X 0x0020 */	/* 802.1x, reserved */
+#define WPA2_AUTH_UNSPECIFIED	0x0040	/* over 802.1x */
+#define WPA2_AUTH_PSK		0x0080	/* Pre-shared key */
+#define BRCM_AUTH_PSK           0x0100  /* BRCM specific PSK */
+#define BRCM_AUTH_DPT		0x0200	/* DPT PSK without group keys */
+#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
+#define WPA_AUTH_WAPI           0x0400
+#define WAPI_AUTH_NONE		WPA_AUTH_NONE	/* none (IBSS) */
+#define WAPI_AUTH_UNSPECIFIED	0x0400	/* over AS */
+#define WAPI_AUTH_PSK		0x0800	/* Pre-shared key */
+#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
+#define WPA2_AUTH_MFP           0x1000  /* MFP (11w) in contrast to CCX */
+#define WPA2_AUTH_TPK		0x2000 	/* TDLS Peer Key */
+#define WPA2_AUTH_FT		0x4000 	/* Fast Transition. */
+#define WPA_AUTH_PFN_ANY	0xffffffff	/* for PFN, match only ssid */
+
+/* pmkid */
+#define	MAXPMKID		16
+
+#define	WLC_IOCTL_MAXLEN		8192	/* max length ioctl buffer required */
+#define	WLC_IOCTL_SMLEN			256	/* "small" length ioctl buffer required */
+#define WLC_IOCTL_MEDLEN		1536    /* "med" length ioctl buffer required */
+#if defined(LCNCONF) || defined(LCN40CONF)
+#define WLC_SAMPLECOLLECT_MAXLEN	8192	/* Max Sample Collect buffer */
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN	10240	/* Max Sample Collect buffer for two cores */
+#endif
+#define WLC_SAMPLECOLLECT_MAXLEN_LCN40  8192
+
+/* common ioctl definitions */
+#define WLC_GET_MAGIC				0
+#define WLC_GET_VERSION				1
+#define WLC_UP					2
+#define WLC_DOWN				3
+#define WLC_GET_LOOP				4
+#define WLC_SET_LOOP				5
+#define WLC_DUMP				6
+#define WLC_GET_MSGLEVEL			7
+#define WLC_SET_MSGLEVEL			8
+#define WLC_GET_PROMISC				9
+#define WLC_SET_PROMISC				10
+/* #define WLC_OVERLAY_IOCTL			11 */ /* not supported */
+#define WLC_GET_RATE				12
+#define WLC_GET_MAX_RATE			13
+#define WLC_GET_INSTANCE			14
+/* #define WLC_GET_FRAG				15 */ /* no longer supported */
+/* #define WLC_SET_FRAG				16 */ /* no longer supported */
+/* #define WLC_GET_RTS				17 */ /* no longer supported */
+/* #define WLC_SET_RTS				18 */ /* no longer supported */
+#define WLC_GET_INFRA				19
+#define WLC_SET_INFRA				20
+#define WLC_GET_AUTH				21
+#define WLC_SET_AUTH				22
+#define WLC_GET_BSSID				23
+#define WLC_SET_BSSID				24
+#define WLC_GET_SSID				25
+#define WLC_SET_SSID				26
+#define WLC_RESTART				27
+#define WLC_TERMINATED				28
+/* #define WLC_DUMP_SCB				28 */ /* no longer supported */
+#define WLC_GET_CHANNEL				29
+#define WLC_SET_CHANNEL				30
+#define WLC_GET_SRL				31
+#define WLC_SET_SRL				32
+#define WLC_GET_LRL				33
+#define WLC_SET_LRL				34
+#define WLC_GET_PLCPHDR				35
+#define WLC_SET_PLCPHDR				36
+#define WLC_GET_RADIO				37
+#define WLC_SET_RADIO				38
+#define WLC_GET_PHYTYPE				39
+#define WLC_DUMP_RATE				40
+#define WLC_SET_RATE_PARAMS			41
+#define WLC_GET_FIXRATE				42
+#define WLC_SET_FIXRATE				43
+/* #define WLC_GET_WEP				42 */ /* no longer supported */
+/* #define WLC_SET_WEP				43 */ /* no longer supported */
+#define WLC_GET_KEY				44
+#define WLC_SET_KEY				45
+#define WLC_GET_REGULATORY			46
+#define WLC_SET_REGULATORY			47
+#define WLC_GET_PASSIVE_SCAN			48
+#define WLC_SET_PASSIVE_SCAN			49
+#define WLC_SCAN				50
+#define WLC_SCAN_RESULTS			51
+#define WLC_DISASSOC				52
+#define WLC_REASSOC				53
+#define WLC_GET_ROAM_TRIGGER			54
+#define WLC_SET_ROAM_TRIGGER			55
+#define WLC_GET_ROAM_DELTA			56
+#define WLC_SET_ROAM_DELTA			57
+#define WLC_GET_ROAM_SCAN_PERIOD		58
+#define WLC_SET_ROAM_SCAN_PERIOD		59
+#define WLC_EVM					60	/* diag */
+#define WLC_GET_TXANT				61
+#define WLC_SET_TXANT				62
+#define WLC_GET_ANTDIV				63
+#define WLC_SET_ANTDIV				64
+/* #define WLC_GET_TXPWR			65 */ /* no longer supported */
+/* #define WLC_SET_TXPWR			66 */ /* no longer supported */
+#define WLC_GET_CLOSED				67
+#define WLC_SET_CLOSED				68
+#define WLC_GET_MACLIST				69
+#define WLC_SET_MACLIST				70
+#define WLC_GET_RATESET				71
+#define WLC_SET_RATESET				72
+/* #define WLC_GET_LOCALE			73 */ /* no longer supported */
+#define WLC_LONGTRAIN				74
+#define WLC_GET_BCNPRD				75
+#define WLC_SET_BCNPRD				76
+#define WLC_GET_DTIMPRD				77
+#define WLC_SET_DTIMPRD				78
+#define WLC_GET_SROM				79
+#define WLC_SET_SROM				80
+#define WLC_GET_WEP_RESTRICT			81
+#define WLC_SET_WEP_RESTRICT			82
+#define WLC_GET_COUNTRY				83
+#define WLC_SET_COUNTRY				84
+#define WLC_GET_PM				85
+#define WLC_SET_PM				86
+#define WLC_GET_WAKE				87
+#define WLC_SET_WAKE				88
+/* #define WLC_GET_D11CNTS			89 */ /* -> "counters" iovar */
+#define WLC_GET_FORCELINK			90	/* ndis only */
+#define WLC_SET_FORCELINK			91	/* ndis only */
+#define WLC_FREQ_ACCURACY			92	/* diag */
+#define WLC_CARRIER_SUPPRESS			93	/* diag */
+#define WLC_GET_PHYREG				94
+#define WLC_SET_PHYREG				95
+#define WLC_GET_RADIOREG			96
+#define WLC_SET_RADIOREG			97
+#define WLC_GET_REVINFO				98
+#define WLC_GET_UCANTDIV			99
+#define WLC_SET_UCANTDIV			100
+#define WLC_R_REG				101
+#define WLC_W_REG				102
+/* #define WLC_DIAG_LOOPBACK			103	old tray diag */
+/* #define WLC_RESET_D11CNTS			104 */ /* -> "reset_d11cnts" iovar */
+#define WLC_GET_MACMODE				105
+#define WLC_SET_MACMODE				106
+#define WLC_GET_MONITOR				107
+#define WLC_SET_MONITOR				108
+#define WLC_GET_GMODE				109
+#define WLC_SET_GMODE				110
+#define WLC_GET_LEGACY_ERP			111
+#define WLC_SET_LEGACY_ERP			112
+#define WLC_GET_RX_ANT				113
+#define WLC_GET_CURR_RATESET			114	/* current rateset */
+#define WLC_GET_SCANSUPPRESS			115
+#define WLC_SET_SCANSUPPRESS			116
+#define WLC_GET_AP				117
+#define WLC_SET_AP				118
+#define WLC_GET_EAP_RESTRICT			119
+#define WLC_SET_EAP_RESTRICT			120
+#define WLC_SCB_AUTHORIZE			121
+#define WLC_SCB_DEAUTHORIZE			122
+#define WLC_GET_WDSLIST				123
+#define WLC_SET_WDSLIST				124
+#define WLC_GET_ATIM				125
+#define WLC_SET_ATIM				126
+#define WLC_GET_RSSI				127
+#define WLC_GET_PHYANTDIV			128
+#define WLC_SET_PHYANTDIV			129
+#define WLC_AP_RX_ONLY				130
+#define WLC_GET_TX_PATH_PWR			131
+#define WLC_SET_TX_PATH_PWR			132
+#define WLC_GET_WSEC				133
+#define WLC_SET_WSEC				134
+#define WLC_GET_PHY_NOISE			135
+#define WLC_GET_BSS_INFO			136
+#define WLC_GET_PKTCNTS				137
+#define WLC_GET_LAZYWDS				138
+#define WLC_SET_LAZYWDS				139
+#define WLC_GET_BANDLIST			140
+
+#define WLC_GET_BAND				141
+#define WLC_SET_BAND				142
+#define WLC_SCB_DEAUTHENTICATE			143
+#define WLC_GET_SHORTSLOT			144
+#define WLC_GET_SHORTSLOT_OVERRIDE		145
+#define WLC_SET_SHORTSLOT_OVERRIDE		146
+#define WLC_GET_SHORTSLOT_RESTRICT		147
+#define WLC_SET_SHORTSLOT_RESTRICT		148
+#define WLC_GET_GMODE_PROTECTION		149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE	150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE	151
+#define WLC_UPGRADE				152
+/* #define WLC_GET_MRATE			153 */ /* no longer supported */
+/* #define WLC_SET_MRATE			154 */ /* no longer supported */
+#define WLC_GET_IGNORE_BCNS			155
+#define WLC_SET_IGNORE_BCNS			156
+#define WLC_GET_SCB_TIMEOUT			157
+#define WLC_SET_SCB_TIMEOUT			158
+#define WLC_GET_ASSOCLIST			159
+#define WLC_GET_CLK				160
+#define WLC_SET_CLK				161
+#define WLC_GET_UP				162
+#define WLC_OUT					163
+#define WLC_GET_WPA_AUTH			164
+#define WLC_SET_WPA_AUTH			165
+#define WLC_GET_UCFLAGS				166
+#define WLC_SET_UCFLAGS				167
+#define WLC_GET_PWRIDX				168
+#define WLC_SET_PWRIDX				169
+#define WLC_GET_TSSI				170
+#define WLC_GET_SUP_RATESET_OVERRIDE		171
+#define WLC_SET_SUP_RATESET_OVERRIDE		172
+/* #define WLC_SET_FAST_TIMER			173 */ /* no longer supported */
+/* #define WLC_GET_FAST_TIMER			174 */ /* no longer supported */
+/* #define WLC_SET_SLOW_TIMER			175 */ /* no longer supported */
+/* #define WLC_GET_SLOW_TIMER			176 */ /* no longer supported */
+/* #define WLC_DUMP_PHYREGS			177 */ /* no longer supported */
+#define WLC_GET_PROTECTION_CONTROL		178
+#define WLC_SET_PROTECTION_CONTROL		179
+#define WLC_GET_PHYLIST				180
+#define WLC_ENCRYPT_STRENGTH			181	/* ndis only */
+#define WLC_DECRYPT_STATUS			182	/* ndis only */
+#define WLC_GET_KEY_SEQ				183
+#define WLC_GET_SCAN_CHANNEL_TIME		184
+#define WLC_SET_SCAN_CHANNEL_TIME		185
+#define WLC_GET_SCAN_UNASSOC_TIME		186
+#define WLC_SET_SCAN_UNASSOC_TIME		187
+#define WLC_GET_SCAN_HOME_TIME			188
+#define WLC_SET_SCAN_HOME_TIME			189
+#define WLC_GET_SCAN_NPROBES			190
+#define WLC_SET_SCAN_NPROBES			191
+#define WLC_GET_PRB_RESP_TIMEOUT		192
+#define WLC_SET_PRB_RESP_TIMEOUT		193
+#define WLC_GET_ATTEN				194
+#define WLC_SET_ATTEN				195
+#define WLC_GET_SHMEM				196	/* diag */
+#define WLC_SET_SHMEM				197	/* diag */
+/* #define WLC_GET_GMODE_PROTECTION_CTS		198 */ /* no longer supported */
+/* #define WLC_SET_GMODE_PROTECTION_CTS		199 */ /* no longer supported */
+#define WLC_SET_WSEC_TEST			200
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON	201
+#define WLC_TKIP_COUNTERMEASURES		202
+#define WLC_GET_PIOMODE				203
+#define WLC_SET_PIOMODE				204
+#define WLC_SET_ASSOC_PREFER			205
+#define WLC_GET_ASSOC_PREFER			206
+#define WLC_SET_ROAM_PREFER			207
+#define WLC_GET_ROAM_PREFER			208
+#define WLC_SET_LED				209
+#define WLC_GET_LED				210
+#define WLC_GET_INTERFERENCE_MODE		211
+#define WLC_SET_INTERFERENCE_MODE		212
+#define WLC_GET_CHANNEL_QA			213
+#define WLC_START_CHANNEL_QA			214
+#define WLC_GET_CHANNEL_SEL			215
+#define WLC_START_CHANNEL_SEL			216
+#define WLC_GET_VALID_CHANNELS			217
+#define WLC_GET_FAKEFRAG			218
+#define WLC_SET_FAKEFRAG			219
+#define WLC_GET_PWROUT_PERCENTAGE		220
+#define WLC_SET_PWROUT_PERCENTAGE		221
+#define WLC_SET_BAD_FRAME_PREEMPT		222
+#define WLC_GET_BAD_FRAME_PREEMPT		223
+#define WLC_SET_LEAP_LIST			224
+#define WLC_GET_LEAP_LIST			225
+#define WLC_GET_CWMIN				226
+#define WLC_SET_CWMIN				227
+#define WLC_GET_CWMAX				228
+#define WLC_SET_CWMAX				229
+#define WLC_GET_WET				230
+#define WLC_SET_WET				231
+#define WLC_GET_PUB				232
+/* #define WLC_SET_GLACIAL_TIMER		233 */ /* no longer supported */
+/* #define WLC_GET_GLACIAL_TIMER		234 */ /* no longer supported */
+#define WLC_GET_KEY_PRIMARY			235
+#define WLC_SET_KEY_PRIMARY			236
+
+
+/* #define WLC_DUMP_RADIOREGS			237 */ /* no longer supported */
+#define WLC_GET_ACI_ARGS			238
+#define WLC_SET_ACI_ARGS			239
+#define WLC_UNSET_CALLBACK			240
+#define WLC_SET_CALLBACK			241
+#define WLC_GET_RADAR				242
+#define WLC_SET_RADAR				243
+#define WLC_SET_SPECT_MANAGMENT			244
+#define WLC_GET_SPECT_MANAGMENT			245
+#define WLC_WDS_GET_REMOTE_HWADDR		246	/* handled in wl_linux.c/wl_vx.c */
+#define WLC_WDS_GET_WPA_SUP			247
+#define WLC_SET_CS_SCAN_TIMER			248
+#define WLC_GET_CS_SCAN_TIMER			249
+#define WLC_MEASURE_REQUEST			250
+#define WLC_INIT				251
+#define WLC_SEND_QUIET				252
+#define WLC_KEEPALIVE			253
+#define WLC_SEND_PWR_CONSTRAINT			254
+#define WLC_UPGRADE_STATUS			255
+#define WLC_CURRENT_PWR				256
+#define WLC_GET_SCAN_PASSIVE_TIME		257
+#define WLC_SET_SCAN_PASSIVE_TIME		258
+#define WLC_LEGACY_LINK_BEHAVIOR		259
+#define WLC_GET_CHANNELS_IN_COUNTRY		260
+#define WLC_GET_COUNTRY_LIST			261
+#define WLC_GET_VAR				262	/* get value of named variable */
+#define WLC_SET_VAR				263	/* set named variable to value */
+#define WLC_NVRAM_GET				264	/* deprecated */
+#define WLC_NVRAM_SET				265
+#define WLC_NVRAM_DUMP				266
+#define WLC_REBOOT				267
+#define WLC_SET_WSEC_PMK			268
+#define WLC_GET_AUTH_MODE			269
+#define WLC_SET_AUTH_MODE			270
+#define WLC_GET_WAKEENTRY			271
+#define WLC_SET_WAKEENTRY			272
+#define WLC_NDCONFIG_ITEM			273	/* currently handled in wl_oid.c */
+#define WLC_NVOTPW				274
+#define WLC_OTPW				275
+#define WLC_IOV_BLOCK_GET			276
+#define WLC_IOV_MODULES_GET			277
+#define WLC_SOFT_RESET				278
+#define WLC_GET_ALLOW_MODE			279
+#define WLC_SET_ALLOW_MODE			280
+#define WLC_GET_DESIRED_BSSID			281
+#define WLC_SET_DESIRED_BSSID			282
+#define	WLC_DISASSOC_MYAP			283
+#define WLC_GET_NBANDS				284	/* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES			285	/* for Dongle EXT_STA support */
+#define WLC_GET_WLC_BSS_INFO			286	/* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_INFO			287	/* for Dongle EXT_STA support */
+#define WLC_GET_OID_PHY				288	/* for Dongle EXT_STA support */
+#define WLC_SET_OID_PHY				289	/* for Dongle EXT_STA support */
+#define WLC_SET_ASSOC_TIME			290	/* for Dongle EXT_STA support */
+#define WLC_GET_DESIRED_SSID			291	/* for Dongle EXT_STA support */
+#define WLC_GET_CHANSPEC			292	/* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_STATE			293	/* for Dongle EXT_STA support */
+#define WLC_SET_PHY_STATE			294	/* for Dongle EXT_STA support */
+#define WLC_GET_SCAN_PENDING			295	/* for Dongle EXT_STA support */
+#define WLC_GET_SCANREQ_PENDING			296	/* for Dongle EXT_STA support */
+#define WLC_GET_PREV_ROAM_REASON		297	/* for Dongle EXT_STA support */
+#define WLC_SET_PREV_ROAM_REASON		298	/* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES_PI			299	/* for Dongle EXT_STA support */
+#define WLC_GET_PHY_STATE			300	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA_RSN			301	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA2_RSN			302	/* for Dongle EXT_STA support */
+#define WLC_GET_BSS_BCN_TS			303	/* for Dongle EXT_STA support */
+#define WLC_GET_INT_DISASSOC			304	/* for Dongle EXT_STA support */
+#define WLC_SET_NUM_PEERS			305     /* for Dongle EXT_STA support */
+#define WLC_GET_NUM_BSS				306	/* for Dongle EXT_STA support */
+#define WLC_PHY_SAMPLE_COLLECT			307	/* phy sample collect mode */
+/* #define WLC_UM_PRIV				308 */	/* Deprecated: usermode driver */
+#define WLC_GET_CMD				309
+/* #define WLC_LAST				310 */	/* Never used - can be reused */
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE	311	/* set inter mode override */
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE	312	/* get inter mode override */
+/* #define WLC_GET_WAI_RESTRICT			313 */	/* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_RESTRICT			314 */	/* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_REKEY			315 */	/* for WAPI, deprecated use iovar instead */
+#define WLC_SET_NAT_CONFIG			316	/* for configuring NAT filter driver */
+#define WLC_GET_NAT_STATE			317
+#define WLC_GET_TXBF_RATESET			318
+#define WLC_SET_TXBF_RATESET			319
+#define WLC_SCAN_CQ				320
+#define WLC_GET_RSSI_QDB			321 /* qdB portion of the RSSI */
+
+#define WLC_LAST				322
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE		0xABADCEDE
+#endif
+
+/* vx wlc ioctl's offset */
+#define CMN_IOCTL_OFF 0x180
+
+/*
+ * custom OID support
+ *
+ * 0xFF - implementation specific OID
+ * 0xE4 - first byte of Broadcom PCI vendor ID
+ * 0x14 - second byte of Broadcom PCI vendor ID
+ * 0xXX - the custom OID number
+ */
+
+/* begin 0x1f values beyond the start of the ET driver range. */
+#define WL_OID_BASE		0xFFE41420
+
+/* NDIS overrides */
+#define OID_WL_GETINSTANCE	(WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK	(WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK	(WL_OID_BASE + WLC_SET_FORCELINK)
+#define	OID_WL_ENCRYPT_STRENGTH	(WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS	(WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM	(WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+/* EXT_STA Dongle suuport */
+#define OID_STA_CHANSPEC	(WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS		(WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY		(WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY		(WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME	(WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID	(WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE	(WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING	(WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE	(WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC	(WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS	(WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS	(WL_OID_BASE + WLC_GET_NUM_BSS)
+
+/* NAT filter driver support */
+#define OID_NAT_SET_CONFIG	(WL_OID_BASE + WLC_SET_NAT_CONFIG)
+#define OID_NAT_GET_STATE	(WL_OID_BASE + WLC_GET_NAT_STATE)
+
+#define WL_DECRYPT_STATUS_SUCCESS	1
+#define WL_DECRYPT_STATUS_FAILURE	2
+#define WL_DECRYPT_STATUS_UNKNOWN	3
+
+/* allows user-mode app to poll the status of USB image upgrade */
+#define WLC_UPGRADE_SUCCESS			0
+#define WLC_UPGRADE_PENDING			1
+
+/* WLC_GET_AUTH, WLC_SET_AUTH values */
+#define WL_AUTH_OPEN_SYSTEM		0	/* d11 open authentication */
+#define WL_AUTH_SHARED_KEY		1	/* d11 shared authentication */
+#define WL_AUTH_OPEN_SHARED		2	/* try open, then shared if open failed w/rc 13 */
+
+/* a large TX Power as an init value to factor out of MIN() calculations,
+ * keep low enough to fit in an int8, units are .25 dBm
+ */
+#define WLC_TXPWR_MAX		(127)	/* ~32 dBm = 1,500 mW */
+
+/* "diag" iovar argument and error code */
+#define WL_DIAG_INTERRUPT			1	/* d11 loopback interrupt test */
+#define WL_DIAG_LOOPBACK			2	/* d11 loopback data test */
+#define WL_DIAG_MEMORY				3	/* d11 memory test */
+#define WL_DIAG_LED				4	/* LED test */
+#define WL_DIAG_REG				5	/* d11/phy register test */
+#define WL_DIAG_SROM				6	/* srom read/crc test */
+#define WL_DIAG_DMA				7	/* DMA test */
+#define WL_DIAG_LOOPBACK_EXT			8	/* enhenced d11 loopback data test */
+
+#define WL_DIAGERR_SUCCESS			0
+#define WL_DIAGERR_FAIL_TO_RUN			1	/* unable to run requested diag */
+#define WL_DIAGERR_NOT_SUPPORTED		2	/* diag requested is not supported */
+#define WL_DIAGERR_INTERRUPT_FAIL		3	/* loopback interrupt test failed */
+#define WL_DIAGERR_LOOPBACK_FAIL		4	/* loopback data test failed */
+#define WL_DIAGERR_SROM_FAIL			5	/* srom read failed */
+#define WL_DIAGERR_SROM_BADCRC			6	/* srom crc failed */
+#define WL_DIAGERR_REG_FAIL			7	/* d11/phy register test failed */
+#define WL_DIAGERR_MEMORY_FAIL			8	/* d11 memory test failed */
+#define WL_DIAGERR_NOMEM			9	/* diag test failed due to no memory */
+#define WL_DIAGERR_DMA_FAIL			10	/* DMA test failed */
+
+#define WL_DIAGERR_MEMORY_TIMEOUT		11	/* d11 memory test didn't finish in time */
+#define WL_DIAGERR_MEMORY_BADPATTERN		12	/* d11 memory test result in bad pattern */
+
+/* band types */
+#define	WLC_BAND_AUTO		0	/* auto-select */
+#define	WLC_BAND_5G		1	/* 5 Ghz */
+#define	WLC_BAND_2G		2	/* 2.4 Ghz */
+#define	WLC_BAND_ALL		3	/* all bands */
+
+/* band range returned by band_range iovar */
+#define WL_CHAN_FREQ_RANGE_2G      0
+#define WL_CHAN_FREQ_RANGE_5GL     1
+#define WL_CHAN_FREQ_RANGE_5GM     2
+#define WL_CHAN_FREQ_RANGE_5GH     3
+
+#define WL_CHAN_FREQ_RANGE_5GLL_5BAND    4
+#define WL_CHAN_FREQ_RANGE_5GLH_5BAND    5
+#define WL_CHAN_FREQ_RANGE_5GML_5BAND    6
+#define WL_CHAN_FREQ_RANGE_5GMH_5BAND    7
+#define WL_CHAN_FREQ_RANGE_5GH_5BAND     8
+
+#define WL_CHAN_FREQ_RANGE_5G_BAND0     1
+#define WL_CHAN_FREQ_RANGE_5G_BAND1     2
+#define WL_CHAN_FREQ_RANGE_5G_BAND2     3
+#define WL_CHAN_FREQ_RANGE_5G_BAND3     4
+
+#define WL_CHAN_FREQ_RANGE_5G_4BAND	5
+
+/* MAC list modes */
+#define WLC_MACMODE_DISABLED	0	/* MAC list disabled */
+#define WLC_MACMODE_DENY	1	/* Deny specified (i.e. allow unspecified) */
+#define WLC_MACMODE_ALLOW	2	/* Allow specified (i.e. deny unspecified) */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B			Rateset: 1b, 2b, 5.5, 11
+ *					Preamble: Long
+ *					Shortslot: Off
+ * GMODE_AUTO				Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ *					Extended Rateset: 6, 9, 12, 48
+ *					Preamble: Long
+ *					Shortslot: Auto
+ * GMODE_ONLY				Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ *					Extended Rateset: 6b, 9, 12b, 48
+ *					Preamble: Short required
+ *					Shortslot: Auto
+ * GMODE_B_DEFERRED			Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ *					Extended Rateset: 6, 9, 12, 48
+ *					Preamble: Long
+ *					Shortslot: On
+ * GMODE_PERFORMANCE			Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ *					Preamble: Short required
+ *					Shortslot: On and required
+ * GMODE_LRS				Rateset: 1b, 2b, 5.5b, 11b
+ *					Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ *					Preamble: Long
+ *					Shortslot: Auto
+ */
+#define GMODE_LEGACY_B		0
+#define GMODE_AUTO		1
+#define GMODE_ONLY		2
+#define GMODE_B_DEFERRED	3
+#define GMODE_PERFORMANCE	4
+#define GMODE_LRS		5
+#define GMODE_MAX		6
+
+/* values for PLCPHdr_override */
+#define WLC_PLCP_AUTO	-1
+#define WLC_PLCP_SHORT	0
+#define WLC_PLCP_LONG	1
+
+/* values for g_protection_override and n_protection_override */
+#define WLC_PROTECTION_AUTO		-1
+#define WLC_PROTECTION_OFF		0
+#define WLC_PROTECTION_ON		1
+#define WLC_PROTECTION_MMHDR_ONLY	2
+#define WLC_PROTECTION_CTS_ONLY		3
+
+/* values for g_protection_control and n_protection_control */
+#define WLC_PROTECTION_CTL_OFF		0
+#define WLC_PROTECTION_CTL_LOCAL	1
+#define WLC_PROTECTION_CTL_OVERLAP	2
+
+/* values for n_protection */
+#define WLC_N_PROTECTION_OFF		0
+#define WLC_N_PROTECTION_OPTIONAL	1
+#define WLC_N_PROTECTION_20IN40		2
+#define WLC_N_PROTECTION_MIXEDMODE	3
+
+/* values for n_preamble_type */
+#define WLC_N_PREAMBLE_MIXEDMODE	0
+#define WLC_N_PREAMBLE_GF		1
+#define WLC_N_PREAMBLE_GF_BRCM          2
+
+/* values for band specific 40MHz capabilities (deprecated) */
+#define WLC_N_BW_20ALL			0
+#define WLC_N_BW_40ALL			1
+#define WLC_N_BW_20IN2G_40IN5G		2
+
+#define WLC_BW_20MHZ_BIT		(1<<0)
+#define WLC_BW_40MHZ_BIT		(1<<1)
+#define WLC_BW_80MHZ_BIT		(1<<2)
+#define WLC_BW_160MHZ_BIT		(1<<3)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ		(WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ		(WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ		(WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_160MHZ		(WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+	WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED		0xFF
+
+#define WL_BW_CAP_20MHZ(bw_cap)	(((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_40MHZ(bw_cap)	(((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_80MHZ(bw_cap)	(((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE)
+
+/* values to force tx/rx chain */
+#define WLC_N_TXRX_CHAIN0		0
+#define WLC_N_TXRX_CHAIN1		1
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define WLC_N_SGI_20			0x01
+#define WLC_N_SGI_40			0x02
+#define WLC_VHT_SGI_80			0x04
+
+/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */
+#define WLC_SGI_ALL				0x02
+
+#define LISTEN_INTERVAL			10
+/* interference mitigation options */
+#define	INTERFERE_OVRRIDE_OFF	-1	/* interference override off */
+#define	INTERFERE_NONE	0	/* off */
+#define	NON_WLAN	1	/* foreign/non 802.11 interference, no auto detect */
+#define	WLAN_MANUAL	2	/* ACI: no auto detection */
+#define	WLAN_AUTO	3	/* ACI: auto detect */
+#define	WLAN_AUTO_W_NOISE	4	/* ACI: auto - detect and non 802.11 interference */
+#define AUTO_ACTIVE	(1 << 7) /* Auto is currently active */
+
+/* interfernece mode bit-masks (ACPHY) */
+#define ACPHY_ACI_GLITCHBASED_DESENSE 1   /* bit 0 */
+#define ACPHY_ACI_HWACI_PKTGAINLMT 2      /* bit 1 */
+#define ACPHY_ACI_W2NB_PKTGAINLMT 4       /* bit 2 */
+#define ACPHY_ACI_PREEMPTION 8            /* bit 3 */
+#define ACPHY_ACI_MAX_MODE 15
+
+/* AP environment */
+#define AP_ENV_DETECT_NOT_USED		0 /* We aren't using AP environment detection */
+#define AP_ENV_DENSE			1 /* "Corporate" or other AP dense environment */
+#define AP_ENV_SPARSE			2 /* "Home" or other sparse environment */
+#define AP_ENV_INDETERMINATE		3 /* AP environment hasn't been identified */
+
+#define TRIGGER_NOW				0
+#define TRIGGER_CRS				0x01
+#define TRIGGER_CRSDEASSERT			0x02
+#define TRIGGER_GOODFCS				0x04
+#define TRIGGER_BADFCS				0x08
+#define TRIGGER_BADPLCP				0x10
+#define TRIGGER_CRSGLITCH			0x20
+
+#define	WL_SAMPLEDATA_HEADER_TYPE	1
+#define WL_SAMPLEDATA_HEADER_SIZE	80	/* sample collect header size (bytes) */
+#define	WL_SAMPLEDATA_TYPE		2
+#define	WL_SAMPLEDATA_SEQ		0xff	/* sequence # */
+#define	WL_SAMPLEDATA_MORE_DATA		0x100	/* more data mask */
+
+/* WL_OTA START */
+#define WL_OTA_ARG_PARSE_BLK_SIZE	1200
+#define WL_OTA_TEST_MAX_NUM_RATE	30
+#define WL_OTA_TEST_MAX_NUM_SEQ		100
+
+#define WL_THRESHOLD_LO_BAND	70	/* range from 5250MHz - 5350MHz */
+
+/* radar iovar SET defines */
+#define WL_RADAR_DETECTOR_OFF		0	/* radar detector off */
+#define WL_RADAR_DETECTOR_ON		1	/* radar detector on */
+#define WL_RADAR_SIMULATED		2	/* force radar detector to declare
+						 * detection once
+						 */
+#define WL_RSSI_ANT_VERSION	1	/* current version of wl_rssi_ant_t */
+#define WL_ANT_RX_MAX		2	/* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX	3	/* max 3 receive antennas/cores */
+#define WL_ANT_IDX_1		0	/* antenna index 1 */
+#define WL_ANT_IDX_2		1	/* antenna index 2 */
+
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX		4	/* max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+/* dfs_status iovar-related defines */
+
+/* cac - channel availability check,
+ * ism - in-service monitoring
+ * csa - channel switching announcement
+ */
+
+/* cac state values */
+#define WL_DFS_CACSTATE_IDLE		0	/* state for operating in non-radar channel */
+#define	WL_DFS_CACSTATE_PREISM_CAC	1	/* CAC in progress */
+#define WL_DFS_CACSTATE_ISM		2	/* ISM in progress */
+#define WL_DFS_CACSTATE_CSA		3	/* csa */
+#define WL_DFS_CACSTATE_POSTISM_CAC	4	/* ISM CAC */
+#define WL_DFS_CACSTATE_PREISM_OOC	5	/* PREISM OOC */
+#define WL_DFS_CACSTATE_POSTISM_OOC	6	/* POSTISM OOC */
+#define WL_DFS_CACSTATES		7	/* this many states exist */
+
+/* Defines used with channel_bandwidth for curpower */
+#define WL_BW_20MHZ		0
+#define WL_BW_40MHZ		1
+#define WL_BW_80MHZ		2
+#define WL_BW_160MHZ		3
+
+/* tx_power_t.flags bits */
+#define WL_TX_POWER_F_ENABLED	1
+#define WL_TX_POWER_F_HW		2
+#define WL_TX_POWER_F_MIMO		4
+#define WL_TX_POWER_F_SISO		8
+#define WL_TX_POWER_F_HT		0x10
+#define WL_TX_POWER_F_VHT		0x20
+#define WL_TX_POWER_F_OPENLOOP		0x40
+
+/* Message levels */
+#define WL_ERROR_VAL		0x00000001
+#define WL_TRACE_VAL		0x00000002
+#define WL_PRHDRS_VAL		0x00000004
+#define WL_PRPKT_VAL		0x00000008
+#define WL_INFORM_VAL		0x00000010
+#define WL_TMP_VAL		0x00000020
+#define WL_OID_VAL		0x00000040
+#define WL_RATE_VAL		0x00000080
+#define WL_ASSOC_VAL		0x00000100
+#define WL_PRUSR_VAL		0x00000200
+#define WL_PS_VAL		0x00000400
+#define WL_TXPWR_VAL		0x00000800	/* retired in TOT on 6/10/2009 */
+#define WL_PORT_VAL		0x00001000
+#define WL_DUAL_VAL		0x00002000
+#define WL_WSEC_VAL		0x00004000
+#define WL_WSEC_DUMP_VAL	0x00008000
+#define WL_LOG_VAL		0x00010000
+#define WL_NRSSI_VAL		0x00020000	/* retired in TOT on 6/10/2009 */
+#define WL_LOFT_VAL		0x00040000	/* retired in TOT on 6/10/2009 */
+#define WL_REGULATORY_VAL	0x00080000
+#define WL_PHYCAL_VAL		0x00100000	/* retired in TOT on 6/10/2009 */
+#define WL_RADAR_VAL		0x00200000	/* retired in TOT on 6/10/2009 */
+#define WL_MPC_VAL		0x00400000
+#define WL_APSTA_VAL		0x00800000
+#define WL_DFS_VAL		0x01000000
+#define WL_BA_VAL		0x02000000	/* retired in TOT on 6/14/2010 */
+#define WL_ACI_VAL		0x04000000
+#define WL_MBSS_VAL		0x04000000
+#define WL_CAC_VAL		0x08000000
+#define WL_AMSDU_VAL		0x10000000
+#define WL_AMPDU_VAL		0x20000000
+#define WL_FFPLD_VAL		0x40000000
+
+/* wl_msg_level is full. For new bits take the next one and AND with
+ * wl_msg_level2 in wl_dbg.h
+ */
+#define WL_DPT_VAL		0x00000001
+#define WL_SCAN_VAL		0x00000002
+#define WL_WOWL_VAL		0x00000004
+#define WL_COEX_VAL		0x00000008
+#define WL_RTDC_VAL		0x00000010
+#define WL_PROTO_VAL		0x00000020
+#define WL_BTA_VAL		0x00000040
+#define WL_CHANINT_VAL		0x00000080
+#define WL_WMF_VAL		0x00000100
+#define WL_P2P_VAL		0x00000200
+#define WL_ITFR_VAL		0x00000400
+#define WL_MCHAN_VAL		0x00000800
+#define WL_TDLS_VAL		0x00001000
+#define WL_MCNX_VAL		0x00002000
+#define WL_PROT_VAL		0x00004000
+#define WL_PSTA_VAL		0x00008000
+#define WL_TSO_VAL		0x00010000
+#define WL_TRF_MGMT_VAL		0x00020000
+#define WL_LPC_VAL	        0x00040000
+#define WL_L2FILTER_VAL		0x00080000
+#define WL_TXBF_VAL		0x00100000
+#define WL_P2PO_VAL		0x00200000
+#define WL_TBTT_VAL		0x00400000
+#define WL_NIC_VAL		0x00800000
+#define WL_MQ_VAL		0x01000000
+
+/* This level is currently used in Phoenix2 only */
+#define WL_SRSCAN_VAL		0x02000000
+
+#define WL_WNM_VAL		0x04000000
+#define WL_PWRSEL_VAL		0x10000000
+#define WL_NET_DETECT_VAL	0x20000000
+#define WL_PCIE_VAL		0x40000000
+
+/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
+ * rather than a message-type of its own
+ */
+#define WL_TIMESTAMP_VAL        0x80000000
+
+/* max # of leds supported by GPIO (gpio pin# == led index#) */
+#define	WL_LED_NUMGPIO		32	/* gpio 0-31 */
+
+/* led per-pin behaviors */
+#define	WL_LED_OFF		0		/* always off */
+#define	WL_LED_ON		1		/* always on */
+#define	WL_LED_ACTIVITY		2		/* activity */
+#define	WL_LED_RADIO		3		/* radio enabled */
+#define	WL_LED_ARADIO		4		/* 5  Ghz radio enabled */
+#define	WL_LED_BRADIO		5		/* 2.4Ghz radio enabled */
+#define	WL_LED_BGMODE		6		/* on if gmode, off if bmode */
+#define	WL_LED_WI1		7
+#define	WL_LED_WI2		8
+#define	WL_LED_WI3		9
+#define	WL_LED_ASSOC		10		/* associated state indicator */
+#define	WL_LED_INACTIVE		11		/* null behavior (clears default behavior) */
+#define	WL_LED_ASSOCACT		12		/* on when associated; blink fast for activity */
+#define WL_LED_WI4		13
+#define WL_LED_WI5		14
+#define	WL_LED_BLINKSLOW	15		/* blink slow */
+#define	WL_LED_BLINKMED		16		/* blink med */
+#define	WL_LED_BLINKFAST	17		/* blink fast */
+#define	WL_LED_BLINKCUSTOM	18		/* blink custom */
+#define	WL_LED_BLINKPERIODIC	19		/* blink periodic (custom 1000ms / off 400ms) */
+#define WL_LED_ASSOC_WITH_SEC	20		/* when connected with security */
+						/* keep on for 300 sec */
+#define WL_LED_START_OFF	21		/* off upon boot, could be turned on later */
+#define WL_LED_WI6		22
+#define WL_LED_WI7		23
+#define WL_LED_WI8		24
+#define	WL_LED_NUMBEHAVIOR	25
+
+/* led behavior numeric value format */
+#define	WL_LED_BEH_MASK		0x7f		/* behavior mask */
+#define	WL_LED_AL_MASK		0x80		/* activelow (polarity) bit */
+
+/* number of bytes needed to define a proper bit mask for MAC event reporting */
+#define BCMIO_ROUNDUP(x, y)	((((x) + ((y) - 1)) / (y)) * (y))
+#define BCMIO_NBBY		8
+#define WL_EVENTING_MASK_LEN	16
+
+
+/* join preference types */
+#define WL_JOIN_PREF_RSSI	1	/* by RSSI */
+#define WL_JOIN_PREF_WPA	2	/* by akm and ciphers */
+#define WL_JOIN_PREF_BAND	3	/* by 802.11 band */
+#define WL_JOIN_PREF_RSSI_DELTA	4	/* by 802.11 band only if RSSI delta condition matches */
+#define WL_JOIN_PREF_TRANS_PREF	5	/* defined by requesting AP */
+
+/* band preference */
+#define WLJP_BAND_ASSOC_PREF	255	/* use what WLC_SET_ASSOC_PREFER ioctl specifies */
+
+/* any multicast cipher suite */
+#define WL_WPA_ACP_MCS_ANY	"\x00\x00\x00\x00"
+
+/* 802.11h measurement types */
+#define WLC_MEASURE_TPC			1
+#define WLC_MEASURE_CHANNEL_BASIC	2
+#define WLC_MEASURE_CHANNEL_CCA		3
+#define WLC_MEASURE_CHANNEL_RPI		4
+
+/* regulatory enforcement levels */
+#define SPECT_MNGMT_OFF			0		/* both 11h and 11d disabled */
+#define SPECT_MNGMT_LOOSE_11H		1		/* allow non-11h APs in scan lists */
+#define SPECT_MNGMT_STRICT_11H		2		/* prune out non-11h APs from scan list */
+#define SPECT_MNGMT_STRICT_11D		3		/* switch to 802.11D mode */
+/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE
+ * adoption is done regardless of capability spectrum_management
+ */
+#define SPECT_MNGMT_LOOSE_11H_D		4		/* operation defined above */
+
+#define WL_CHAN_VALID_HW	(1 << 0)	/* valid with current HW */
+#define WL_CHAN_VALID_SW	(1 << 1)	/* valid with current country setting */
+#define WL_CHAN_BAND_5G		(1 << 2)	/* 5GHz-band channel */
+#define WL_CHAN_RADAR		(1 << 3)	/* radar sensitive  channel */
+#define WL_CHAN_INACTIVE	(1 << 4)	/* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE		(1 << 5)	/* channel is in passive mode */
+#define WL_CHAN_RESTRICTED	(1 << 6)	/* restricted use channel */
+
+/* BTC mode used by "btc_mode" iovar */
+#define	WL_BTC_DISABLE		0	/* disable BT coexistence */
+#define WL_BTC_FULLTDM      1	/* full TDM COEX */
+#define WL_BTC_ENABLE       1	/* full TDM COEX to maintain backward compatiblity */
+#define WL_BTC_PREMPT      2    /* full TDM COEX with preemption */
+#define WL_BTC_LITE        3	/* light weight coex for large isolation platform */
+#define WL_BTC_PARALLEL		4   /* BT and WLAN run in parallel with separate antenna  */
+#define WL_BTC_HYBRID		5   /* hybrid coex, only ack is allowed to transmit in BT slot */
+#define WL_BTC_DEFAULT		8	/* set the default mode for the device */
+#define WL_INF_BTC_DISABLE      0
+#define WL_INF_BTC_ENABLE       1
+#define WL_INF_BTC_AUTO         3
+
+/* BTC wire used by "btc_wire" iovar */
+#define	WL_BTC_DEFWIRE		0	/* use default wire setting */
+#define WL_BTC_2WIRE		2	/* use 2-wire BTC */
+#define WL_BTC_3WIRE		3	/* use 3-wire BTC */
+#define WL_BTC_4WIRE		4	/* use 4-wire BTC */
+
+/* BTC flags: BTC configuration that can be set by host */
+#define WL_BTC_FLAG_PREMPT               (1 << 0)
+#define WL_BTC_FLAG_BT_DEF               (1 << 1)
+#define WL_BTC_FLAG_ACTIVE_PROT          (1 << 2)
+#define WL_BTC_FLAG_SIM_RSP              (1 << 3)
+#define WL_BTC_FLAG_PS_PROTECT           (1 << 4)
+#define WL_BTC_FLAG_SIM_TX_LP	         (1 << 5)
+#define WL_BTC_FLAG_ECI                  (1 << 6)
+#define WL_BTC_FLAG_LIGHT                (1 << 7)
+#define WL_BTC_FLAG_PARALLEL             (1 << 8)
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS		64
+
+/* max number of chanspecs (used by the iovar to calc. buf space) */
+#define WL_NUMCHANSPECS 110
+
+/* WDS link local endpoint WPA role */
+#define WL_WDS_WPA_ROLE_AUTH	0	/* authenticator */
+#define WL_WDS_WPA_ROLE_SUP	1	/* supplicant */
+#define WL_WDS_WPA_ROLE_AUTO	255	/* auto, based on mac addr value */
+
+/* Base offset values */
+#define WL_PKT_FILTER_BASE_PKT   0
+#define WL_PKT_FILTER_BASE_END   1
+#define WL_PKT_FILTER_BASE_D11_H 2 /* May be removed */
+#define WL_PKT_FILTER_BASE_D11_D 3 /* May be removed */
+#define WL_PKT_FILTER_BASE_ETH_H 4
+#define WL_PKT_FILTER_BASE_ETH_D 5
+#define WL_PKT_FILTER_BASE_ARP_H 6
+#define WL_PKT_FILTER_BASE_ARP_D 7 /* May be removed */
+#define WL_PKT_FILTER_BASE_IP4_H 8
+#define WL_PKT_FILTER_BASE_IP4_D 9
+#define WL_PKT_FILTER_BASE_IP6_H 10
+#define WL_PKT_FILTER_BASE_IP6_D 11
+#define WL_PKT_FILTER_BASE_TCP_H 12
+#define WL_PKT_FILTER_BASE_TCP_D 13 /* May be removed */
+#define WL_PKT_FILTER_BASE_UDP_H 14
+#define WL_PKT_FILTER_BASE_UDP_D 15
+#define WL_PKT_FILTER_BASE_IP6_P 16
+#define WL_PKT_FILTER_BASE_COUNT 17 /* May be removed */
+
+/* String mapping for bases that may be used by applications or debug */
+#define WL_PKT_FILTER_BASE_NAMES \
+	{ "START", WL_PKT_FILTER_BASE_PKT },   \
+	{ "END",   WL_PKT_FILTER_BASE_END },   \
+	{ "ETH_H", WL_PKT_FILTER_BASE_ETH_H }, \
+	{ "ETH_D", WL_PKT_FILTER_BASE_ETH_D }, \
+	{ "D11_H", WL_PKT_FILTER_BASE_D11_H }, \
+	{ "D11_D", WL_PKT_FILTER_BASE_D11_D }, \
+	{ "ARP_H", WL_PKT_FILTER_BASE_ARP_H }, \
+	{ "IP4_H", WL_PKT_FILTER_BASE_IP4_H }, \
+	{ "IP4_D", WL_PKT_FILTER_BASE_IP4_D }, \
+	{ "IP6_H", WL_PKT_FILTER_BASE_IP6_H }, \
+	{ "IP6_D", WL_PKT_FILTER_BASE_IP6_D }, \
+	{ "IP6_P", WL_PKT_FILTER_BASE_IP6_P }, \
+	{ "TCP_H", WL_PKT_FILTER_BASE_TCP_H }, \
+	{ "TCP_D", WL_PKT_FILTER_BASE_TCP_D }, \
+	{ "UDP_H", WL_PKT_FILTER_BASE_UDP_H }, \
+	{ "UDP_D", WL_PKT_FILTER_BASE_UDP_D }
+
+/* Flags for a pattern list element */
+#define WL_PKT_FILTER_MFLAG_NEG 0x0001
+
+/*
+ * Packet engine interface
+ */
+
+#define WL_PKTENG_PER_TX_START			0x01
+#define WL_PKTENG_PER_TX_STOP			0x02
+#define WL_PKTENG_PER_RX_START			0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START		0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START		0x06
+#define WL_PKTENG_PER_RX_STOP			0x08
+#define WL_PKTENG_PER_MASK			0xff
+
+#define WL_PKTENG_SYNCHRONOUS			0x100	/* synchronous flag */
+
+#define WL_PKTENG_MAXPKTSZ				16384	/* max pktsz limit for pkteng */
+
+#define NUM_80211b_RATES	4
+#define NUM_80211ag_RATES	8
+#define NUM_80211n_RATES	32
+#define NUM_80211_RATES		(NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+
+/*
+ * WOWL capability/override settings
+ */
+#define WL_WOWL_MAGIC           (1 << 0)    /* Wakeup on Magic packet */
+#define WL_WOWL_NET             (1 << 1)    /* Wakeup on Netpattern */
+#define WL_WOWL_DIS             (1 << 2)    /* Wakeup on loss-of-link due to Disassoc/Deauth */
+#define WL_WOWL_RETR            (1 << 3)    /* Wakeup on retrograde TSF */
+#define WL_WOWL_BCN             (1 << 4)    /* Wakeup on loss of beacon */
+#define WL_WOWL_TST             (1 << 5)    /* Wakeup after test */
+#define WL_WOWL_M1              (1 << 6)    /* Wakeup after PTK refresh */
+#define WL_WOWL_EAPID           (1 << 7)    /* Wakeup after receipt of EAP-Identity Req */
+#define WL_WOWL_PME_GPIO        (1 << 8)    /* Wakeind via PME(0) or GPIO(1) */
+#define WL_WOWL_NEEDTKIP1       (1 << 9)    /* need tkip phase 1 key to be updated by the driver */
+#define WL_WOWL_GTK_FAILURE     (1 << 10)   /* enable wakeup if GTK fails */
+#define WL_WOWL_EXTMAGPAT       (1 << 11)   /* support extended magic packets */
+#define WL_WOWL_ARPOFFLOAD      (1 << 12)   /* support ARP/NS/keepalive offloading */
+#define WL_WOWL_WPA2            (1 << 13)   /* read protocol version for EAPOL frames */
+#define WL_WOWL_KEYROT          (1 << 14)   /* If the bit is set, use key rotaton */
+#define WL_WOWL_BCAST           (1 << 15)   /* If the bit is set, frm received was bcast frame */
+#define WL_WOWL_SCANOL          (1 << 16)   /* If the bit is set, scan offload is enabled */
+#define WL_WOWL_TCPKEEP_TIME    (1 << 17)   /* Wakeup on tcpkeep alive timeout */
+#define WL_WOWL_MDNS_CONFLICT   (1 << 18)   /* Wakeup on mDNS Conflict Resolution */
+#define WL_WOWL_MDNS_SERVICE    (1 << 19)   /* Wakeup on mDNS Service Connect */
+#define WL_WOWL_TCPKEEP_DATA    (1 << 20)   /* tcp keepalive got data */
+#define WL_WOWL_FW_HALT         (1 << 21)   /* Firmware died in wowl mode */
+#define WL_WOWL_ENAB_HWRADIO    (1 << 22)   /* Enable detection of radio button changes */
+#define WL_WOWL_MIC_FAIL        (1 << 23)   /* Offloads detected MIC failure(s) */
+#define WL_WOWL_LINKDOWN        (1 << 31)   /* Link Down indication in WoWL mode */
+
+#define WL_WOWL_TCPKEEP         (1 << 20)   /* temp copy to satisfy automerger */
+#define MAGIC_PKT_MINLEN 102    /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+
+#define WOWL_PATTEN_TYPE_ARP	(1 << 0)	/* ARP offload Pattern */
+#define WOWL_PATTEN_TYPE_NA	(1 << 1)	/* NA offload Pattern */
+
+#define MAGIC_PKT_MINLEN	102    /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+#define MAGIC_PKT_NUM_MAC_ADDRS	16
+
+
+/* Overlap BSS Scan parameters default, minimum, maximum */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT		20	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN			5	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX			1000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT		10	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN			10	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX			1000	/* unit TU */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT	300	/* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN		10	/* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX		900	/* unit Sec */
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN	5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX	100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT	200	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN	200	/* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX	10000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT	20	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN	20	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX	10000	/* unit TU */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT	25	/* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN		0	/* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX		100	/* unit percent */
+
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7	/* minimum number of arguments required for OBSS Scan */
+
+#define WL_COEX_INFO_MASK		0x07
+#define WL_COEX_INFO_REQ		0x01
+#define	WL_COEX_40MHZ_INTOLERANT	0x02
+#define	WL_COEX_WIDTH20			0x04
+
+#define	WLC_RSSI_INVALID	 0	/* invalid RSSI value */
+
+#define MAX_RSSI_LEVELS 8
+
+/* **** EXTLOG **** */
+#define EXTLOG_CUR_VER		0x0100
+
+#define MAX_ARGSTR_LEN		18 /* At least big enough for storing ETHER_ADDR_STR_LEN */
+
+/* log modules (bitmap) */
+#define LOG_MODULE_COMMON	0x0001
+#define LOG_MODULE_ASSOC	0x0002
+#define LOG_MODULE_EVENT	0x0004
+#define LOG_MODULE_MAX		3			/* Update when adding module */
+
+/* log levels */
+#define WL_LOG_LEVEL_DISABLE	0
+#define WL_LOG_LEVEL_ERR	1
+#define WL_LOG_LEVEL_WARN	2
+#define WL_LOG_LEVEL_INFO	3
+#define WL_LOG_LEVEL_MAX	WL_LOG_LEVEL_INFO	/* Update when adding level */
+
+/* flag */
+#define LOG_FLAG_EVENT		1
+
+/* log arg_type */
+#define LOG_ARGTYPE_NULL	0
+#define LOG_ARGTYPE_STR		1	/* %s */
+#define LOG_ARGTYPE_INT		2	/* %d */
+#define LOG_ARGTYPE_INT_STR	3	/* %d...%s */
+#define LOG_ARGTYPE_STR_INT	4	/* %s...%d */
+
+/* 802.11 Mgmt Packet flags */
+#define VNDR_IE_BEACON_FLAG	0x1
+#define VNDR_IE_PRBRSP_FLAG	0x2
+#define VNDR_IE_ASSOCRSP_FLAG	0x4
+#define VNDR_IE_AUTHRSP_FLAG	0x8
+#define VNDR_IE_PRBREQ_FLAG	0x10
+#define VNDR_IE_ASSOCREQ_FLAG	0x20
+#define VNDR_IE_IWAPID_FLAG	0x40 /* vendor IE in IW advertisement protocol ID field */
+#define VNDR_IE_CUSTOM_FLAG	0x100 /* allow custom IE id */
+
+#if defined(WLP2P)
+/* P2P Action Frames flags (spec ordered) */
+#define VNDR_IE_GONREQ_FLAG     0x001000
+#define VNDR_IE_GONRSP_FLAG     0x002000
+#define VNDR_IE_GONCFM_FLAG     0x004000
+#define VNDR_IE_INVREQ_FLAG     0x008000
+#define VNDR_IE_INVRSP_FLAG     0x010000
+#define VNDR_IE_DISREQ_FLAG     0x020000
+#define VNDR_IE_DISRSP_FLAG     0x040000
+#define VNDR_IE_PRDREQ_FLAG     0x080000
+#define VNDR_IE_PRDRSP_FLAG     0x100000
+
+#define VNDR_IE_P2PAF_SHIFT	12
+#endif /* WLP2P */
+
+/* channel interference measurement (chanim) related defines */
+
+/* chanim mode */
+#define CHANIM_DISABLE	0	/* disabled */
+#define CHANIM_DETECT	1	/* detection only */
+#define CHANIM_EXT		2	/* external state machine */
+#define CHANIM_ACT		3	/* full internal state machine, detect + act */
+#define CHANIM_MODE_MAX 4
+
+/* define for apcs reason code */
+#define APCS_INIT		0
+#define APCS_IOCTL		1
+#define APCS_CHANIM		2
+#define APCS_CSTIMER		3
+#define APCS_BTA		4
+#define APCS_TXDLY		5
+#define APCS_NONACSD		6
+#define APCS_DFS_REENTRY	7
+#define APCS_TXFAIL		8
+#define APCS_MAX		9
+
+/* number of ACS record entries */
+#define CHANIM_ACS_RECORD			10
+
+/* CHANIM */
+#define CCASTATS_TXDUR  0
+#define CCASTATS_INBSS  1
+#define CCASTATS_OBSS   2
+#define CCASTATS_NOCTG  3
+#define CCASTATS_NOPKT  4
+#define CCASTATS_DOZE   5
+#define CCASTATS_TXOP	6
+#define CCASTATS_GDTXDUR        7
+#define CCASTATS_BDTXDUR        8
+#define CCASTATS_MAX    9
+
+#define WL_CHANIM_COUNT_ALL	0xff
+#define WL_CHANIM_COUNT_ONE	0x1
+
+/* ap tpc modes */
+#define	AP_TPC_OFF		0
+#define	AP_TPC_BSS_PWR		1	/* BSS power control */
+#define AP_TPC_AP_PWR		2	/* AP power control */
+#define	AP_TPC_AP_BSS_PWR	3	/* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN	127
+
+/* ap tpc modes */
+#define	AP_TPC_OFF		0
+#define	AP_TPC_BSS_PWR		1	/* BSS power control */
+#define AP_TPC_AP_PWR		2	/* AP power control */
+#define	AP_TPC_AP_BSS_PWR	3	/* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN	127
+
+/* state */
+#define WL_P2P_DISC_ST_SCAN	0
+#define WL_P2P_DISC_ST_LISTEN	1
+#define WL_P2P_DISC_ST_SEARCH	2
+
+/* i/f type */
+#define WL_P2P_IF_CLIENT	0
+#define WL_P2P_IF_GO		1
+#define WL_P2P_IF_DYNBCN_GO	2
+#define WL_P2P_IF_DEV		3
+
+/* count */
+#define WL_P2P_SCHED_RSVD	0
+#define WL_P2P_SCHED_REPEAT	255	/* anything > 255 will be treated as 255 */
+
+#define WL_P2P_SCHED_FIXED_LEN		3
+
+/* schedule type */
+#define WL_P2P_SCHED_TYPE_ABS		0	/* Scheduled Absence */
+#define WL_P2P_SCHED_TYPE_REQ_ABS	1	/* Requested Absence */
+
+/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
+#define WL_P2P_SCHED_ACTION_NONE	0	/* no action */
+#define WL_P2P_SCHED_ACTION_DOZE	1	/* doze */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_ACTION_GOOFF	2	/* turn off GO beacon/prbrsp functions */
+/* schedule option - WL_P2P_SCHED_TYPE_XXX */
+#define WL_P2P_SCHED_ACTION_RESET	255	/* reset */
+
+/* schedule option - WL_P2P_SCHED_TYPE_ABS */
+#define WL_P2P_SCHED_OPTION_NORMAL	0	/* normal start/interval/duration/count */
+#define WL_P2P_SCHED_OPTION_BCNPCT	1	/* percentage of beacon interval */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_OPTION_TSFOFS	2	/* normal start/internal/duration/count with
+						 * start being an offset of the 'current' TSF
+						 */
+
+/* feature flags */
+#define WL_P2P_FEAT_GO_CSA	(1 << 0)	/* GO moves with the STA using CSA method */
+#define WL_P2P_FEAT_GO_NOLEGACY	(1 << 1)	/* GO does not probe respond to non-p2p probe
+						 * requests
+						 */
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2)	/* Restrict p2p dev interface from responding */
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2			1
+#define WL_11N_3x3			3
+#define WL_11N_4x4			4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N		0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX	0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX	0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX	0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX	0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX	0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX	0x00000040
+#define WLFEATURE_DISABLE_11N_GF	0x00000080
+
+/* Proxy STA modes */
+#define PSTA_MODE_DISABLED		0
+#define PSTA_MODE_PROXY			1
+#define PSTA_MODE_REPEATER		2
+
+/* op code in nat_cfg */
+#define NAT_OP_ENABLE		1	/* enable NAT on given interface */
+#define NAT_OP_DISABLE		2	/* disable NAT on given interface */
+#define NAT_OP_DISABLE_ALL	3	/* disable NAT on all interfaces */
+
+/* NAT state */
+#define NAT_STATE_ENABLED	1	/* NAT is enabled */
+#define NAT_STATE_DISABLED	2	/* NAT is disabled */
+
+#define CHANNEL_5G_LOW_START	36	/* 5G low (36..48) CDD enable/disable bit mask */
+#define CHANNEL_5G_MID_START	52	/* 5G mid (52..64) CDD enable/disable bit mask */
+#define CHANNEL_5G_HIGH_START	100	/* 5G high (100..140) CDD enable/disable bit mask */
+#define CHANNEL_5G_UPPER_START	149	/* 5G upper (149..161) CDD enable/disable bit mask */
+
+/* D0 Coalescing */
+#define IPV4_ARP_FILTER		0x0001
+#define IPV4_NETBT_FILTER	0x0002
+#define IPV4_LLMNR_FILTER	0x0004
+#define IPV4_SSDP_FILTER	0x0008
+#define IPV4_WSD_FILTER		0x0010
+#define IPV6_NETBT_FILTER	0x0200
+#define IPV6_LLMNR_FILTER	0x0400
+#define IPV6_SSDP_FILTER	0x0800
+#define IPV6_WSD_FILTER		0x1000
+
+/* Network Offload Engine */
+#define NWOE_OL_ENABLE		0x00000001
+
+/*
+ * Traffic management structures/defines.
+ */
+
+/* Traffic management bandwidth parameters */
+#define TRF_MGMT_MAX_PRIORITIES                 3
+
+#define TRF_MGMT_FLAG_ADD_DSCP                  0x0001  /* Add DSCP to IP TOS field */
+#define TRF_MGMT_FLAG_DISABLE_SHAPING           0x0002  /* Don't shape traffic */
+#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC      0x0008  /* Manage traffic over our local subnet */
+#define TRF_MGMT_FLAG_FILTER_ON_MACADDR         0x0010  /* filter on MAC address */
+#define TRF_MGMT_FLAG_NO_RX                     0x0020  /* do not apply fiters to rx packets */
+
+#define TRF_FILTER_MAC_ADDR              0x0001 /* L2 filter use dst mac address for filtering */
+#define TRF_FILTER_IP_ADDR               0x0002 /* L3 filter use ip ddress for filtering */
+#define TRF_FILTER_L4                    0x0004 /* L4 filter use tcp/udp for filtering */
+#define TRF_FILTER_DWM                   0x0008 /* L3 filter use DSCP for filtering */
+#define TRF_FILTER_FAVORED               0x0010 /* Tag the packet FAVORED */
+
+/* WNM/NPS subfeatures mask */
+#define WL_WNM_BSSTRANS		0x00000001
+#define WL_WNM_PROXYARP		0x00000002
+#define WL_WNM_MAXIDLE		0x00000004
+#define WL_WNM_TIMBC		0x00000008
+#define WL_WNM_TFS		0x00000010
+#define WL_WNM_SLEEP		0x00000020
+#define WL_WNM_DMS		0x00000040
+#define WL_WNM_FMS		0x00000080
+#define WL_WNM_NOTIF		0x00000100
+#define WL_WNM_MAX		0x00000200
+
+#ifndef ETHER_MAX_DATA
+#define ETHER_MAX_DATA	1500
+#endif /* ETHER_MAX_DATA */
+
+/* Different discovery modes for dpt */
+#define	DPT_DISCOVERY_MANUAL	0x01	/* manual discovery mode */
+#define	DPT_DISCOVERY_AUTO	0x02	/* auto discovery mode */
+#define	DPT_DISCOVERY_SCAN	0x04	/* scan-based discovery mode */
+
+/* different path selection values */
+#define DPT_PATHSEL_AUTO	0	/* auto mode for path selection */
+#define DPT_PATHSEL_DIRECT	1	/* always use direct DPT path */
+#define DPT_PATHSEL_APPATH	2	/* always use AP path */
+
+/* different ops for deny list */
+#define DPT_DENY_LIST_ADD	1	/* add to dpt deny list */
+#define DPT_DENY_LIST_REMOVE	2	/* remove from dpt deny list */
+
+/* different ops for manual end point */
+#define DPT_MANUAL_EP_CREATE	1	/* create manual dpt endpoint */
+#define DPT_MANUAL_EP_MODIFY	2	/* modify manual dpt endpoint */
+#define DPT_MANUAL_EP_DELETE	3	/* delete manual dpt endpoint */
+
+/* flags to indicate DPT status */
+#define	DPT_STATUS_ACTIVE	0x01	/* link active (though may be suspended) */
+#define	DPT_STATUS_AES		0x02	/* link secured through AES encryption */
+#define	DPT_STATUS_FAILED	0x04	/* DPT link failed */
+
+#ifdef WLTDLS
+/* different ops for manual end point */
+#define TDLS_MANUAL_EP_CREATE	1	/* create manual dpt endpoint */
+#define TDLS_MANUAL_EP_MODIFY	2	/* modify manual dpt endpoint */
+#define TDLS_MANUAL_EP_DELETE	3	/* delete manual dpt endpoint */
+#define TDLS_MANUAL_EP_PM		4	/*  put dpt endpoint in PM mode */
+#define TDLS_MANUAL_EP_WAKE		5	/* wake up dpt endpoint from PM */
+#define TDLS_MANUAL_EP_DISCOVERY	6	/* discover if endpoint is TDLS capable */
+#define TDLS_MANUAL_EP_CHSW		7	/* channel switch */
+#define TDLS_MANUAL_EP_WFD_TPQ	8	/* WiFi-Display Tunneled Probe reQuest */
+
+/* modes */
+#define TDLS_WFD_IE_TX			0
+#define TDLS_WFD_IE_RX			1
+#define TDLS_WFD_PROBE_IE_TX	2
+#define TDLS_WFD_PROBE_IE_RX	3
+#endif /* WLTDLS */
+
+/* define for flag */
+#define TSPEC_PENDING		0	/* TSPEC pending */
+#define TSPEC_ACCEPTED		1	/* TSPEC accepted */
+#define TSPEC_REJECTED		2	/* TSPEC rejected */
+#define TSPEC_UNKNOWN		3	/* TSPEC unknown */
+#define TSPEC_STATUS_MASK	7	/* TSPEC status mask */
+
+
+/* Software feature flag defines used by wlfeatureflag */
+#ifdef WLAFTERBURNER
+#define WL_SWFL_ABBFL       0x0001 /* Allow Afterburner on systems w/o hardware BFL */
+#define WL_SWFL_ABENCORE    0x0002 /* Allow AB on non-4318E chips */
+#endif /* WLAFTERBURNER */
+#define WL_SWFL_NOHWRADIO	0x0004
+#define WL_SWFL_FLOWCONTROL     0x0008 /* Enable backpressure to OS stack */
+#define WL_SWFL_WLBSSSORT	0x0010 /* Per-port supports sorting of BSS */
+
+#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
+
+#define CSA_BROADCAST_ACTION_FRAME	0	/* csa broadcast action frame */
+#define CSA_UNICAST_ACTION_FRAME	  1 /* csa unicast action frame */
+
+/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER.
+ *
+ * (-100 < value < 0)   value is used directly as a roaming trigger in dBm
+ * (0 <= value) value specifies a logical roaming trigger level from
+ *                      the list below
+ *
+ * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never
+ * the logical roam trigger value.
+ */
+#define WLC_ROAM_TRIGGER_DEFAULT	0 /* default roaming trigger */
+#define WLC_ROAM_TRIGGER_BANDWIDTH	1 /* optimize for bandwidth roaming trigger */
+#define WLC_ROAM_TRIGGER_DISTANCE	2 /* optimize for distance roaming trigger */
+#define WLC_ROAM_TRIGGER_AUTO		3 /* auto-detect environment */
+#define WLC_ROAM_TRIGGER_MAX_VALUE	3 /* max. valid value */
+
+#define WLC_ROAM_NEVER_ROAM_TRIGGER	(-100) /* Avoid Roaming by setting a large value */
+
+/* Preferred Network Offload (PNO, formerly PFN) defines */
+#define WPA_AUTH_PFN_ANY	0xffffffff	/* for PFN, match only ssid */
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT		2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT		6
+#define IMMEDIATE_EVENT_BIT		8
+#define SUPPRESS_SSID_BIT		9
+#define ENABLE_NET_OFFLOAD_BIT		10
+
+#define SORT_CRITERIA_MASK		0x0001
+#define AUTO_NET_SWITCH_MASK		0x0002
+#define ENABLE_BKGRD_SCAN_MASK		0x0004
+#define IMMEDIATE_SCAN_MASK		0x0008
+#define	AUTO_CONNECT_MASK		0x0010
+
+#define ENABLE_BD_SCAN_MASK		0x0020
+#define ENABLE_ADAPTSCAN_MASK		0x00c0
+#define IMMEDIATE_EVENT_MASK		0x0100
+#define SUPPRESS_SSID_MASK		0x0200
+#define ENABLE_NET_OFFLOAD_MASK		0x0400
+
+#define PFN_VERSION			2
+#define PFN_SCANRESULT_VERSION		1
+#define MAX_PFN_LIST_COUNT		16
+
+#define PFN_COMPLETE			1
+#define PFN_INCOMPLETE			0
+
+#define DEFAULT_BESTN			2
+#define DEFAULT_MSCAN			0
+#define DEFAULT_REPEAT			10
+#define DEFAULT_EXP				2
+
+#define WL_PFN_SUPPRESSFOUND_MASK	0x08
+#define WL_PFN_SUPPRESSLOST_MASK	0x10
+#define WL_PFN_RSSI_MASK			0xff00
+#define WL_PFN_RSSI_SHIFT			8
+
+#define WL_PFN_REPORT_ALLNET    0
+#define WL_PFN_REPORT_SSIDNET   1
+#define WL_PFN_REPORT_BSSIDNET  2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED	0x00000001	/* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_RESERVED	0xfffffffe	/* Remaining reserved for future use */
+
+#define WL_PFN_HIDDEN_BIT		2
+#define PNO_SCAN_MAX_FW			508*1000	/* max time scan time in msec */
+#define PNO_SCAN_MAX_FW_SEC		PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
+#define PNO_SCAN_MIN_FW_SEC		10			/* min time scan time in SEC */
+#define WL_PFN_HIDDEN_MASK		0x4
+
+/* TCP Checksum Offload error injection for testing */
+#define TOE_ERRTEST_TX_CSUM	0x00000001
+#define TOE_ERRTEST_RX_CSUM	0x00000002
+#define TOE_ERRTEST_RX_CSUM2	0x00000004
+
+/* ARP Offload feature flags for arp_ol iovar */
+#define ARP_OL_AGENT		0x00000001
+#define ARP_OL_SNOOP		0x00000002
+#define ARP_OL_HOST_AUTO_REPLY	0x00000004
+#define ARP_OL_PEER_AUTO_REPLY	0x00000008
+
+/* ARP Offload error injection */
+#define ARP_ERRTEST_REPLY_PEER	0x1
+#define ARP_ERRTEST_REPLY_HOST	0x2
+
+#define ARP_MULTIHOMING_MAX	8	/* Maximum local host IP addresses */
+#define ND_MULTIHOMING_MAX 10	/* Maximum local host IP addresses */
+#define ND_REQUEST_MAX		5	/* Max set of offload params */
+
+
+/* AOAC wake event flag */
+#define WAKE_EVENT_NLO_DISCOVERY_BIT		1
+#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT	2
+#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4
+#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8
+
+#define MAX_NUM_WOL_PATTERN	16 /* LOGO requirements min 16 */
+
+/* Packet filter operation mode */
+/* True: 1; False: 0 */
+#define PKT_FILTER_MODE_FORWARD_ON_MATCH		1
+/* Enable and disable pkt_filter as a whole */
+#define PKT_FILTER_MODE_DISABLE					2
+/* Cache first matched rx pkt(be queried by host later) */
+#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH		4
+/* If pkt_filter is enabled and no filter is set, don't forward anything */
+#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8
+
+#ifdef DONGLEOVERLAYS
+#define OVERLAY_IDX_MASK		0x000000ff
+#define OVERLAY_IDX_SHIFT		0
+#define OVERLAY_FLAGS_MASK		0xffffff00
+#define OVERLAY_FLAGS_SHIFT		8
+/* overlay written to device memory immediately after loading the base image */
+#define OVERLAY_FLAG_POSTLOAD	0x100
+/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */
+#define OVERLAY_FLAG_DEFER_DL	0x200
+/* overlay downloaded prior to the host going to sleep */
+#define OVERLAY_FLAG_PRESLEEP	0x400
+#define OVERLAY_DOWNLOAD_CHUNKSIZE	1024
+#endif /* DONGLEOVERLAYS */
+
+/* reuse two number in the sc/rc space */
+#define	SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED	0xFFFD
+
+/* RFAWARE def */
+#define BCM_ACTION_RFAWARE		0x77
+#define BCM_ACTION_RFAWARE_DCS  0x01
+
+/* DCS reason code define */
+#define BCM_DCS_IOVAR		0x1
+#define BCM_DCS_UNKNOWN		0xFF
+
+
+#ifdef PROP_TXSTATUS
+/* Bit definitions for tlv iovar */
+/*
+ * enable RSSI signals:
+ * WLFC_CTL_TYPE_RSSI
+ */
+#define WLFC_FLAGS_RSSI_SIGNALS			0x0001
+
+/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals:
+ *
+ * WLFC_CTL_TYPE_MAC_OPEN
+ * WLFC_CTL_TYPE_MAC_CLOSE
+ *
+ * WLFC_CTL_TYPE_INTERFACE_OPEN
+ * WLFC_CTL_TYPE_INTERFACE_CLOSE
+ *
+ * WLFC_CTL_TYPE_MACDESC_ADD
+ * WLFC_CTL_TYPE_MACDESC_DEL
+ *
+ */
+#define WLFC_FLAGS_XONXOFF_SIGNALS		0x0002
+
+/* enable (status, fifo_credit, mac_credit) signals
+ * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT
+ * WLFC_CTL_TYPE_TXSTATUS
+ * WLFC_CTL_TYPE_FIFO_CREDITBACK
+ */
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS	0x0004
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE	0x0008
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE	0x0010
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE	0x0020
+#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE	0x0040
+#define WLFC_FLAGS_PKT_STAMP_SIGNALS		0x0080
+
+#endif /* PROP_TXSTATUS */
+
+#define WL_TIMBC_STATUS_AP_UNKNOWN	255	/* AP status for internal use only */
+
+#define WL_DFRTS_LOGIC_OFF	0	/* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR	1	/* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND	2	/* AND all non-zero threshold conditions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RELMCAST_MAX_CLIENT		32
+#define WL_RELMCAST_FLAG_INBLACKLIST	1
+#define WL_RELMCAST_FLAG_ACTIVEACKER	2
+#define WL_RELMCAST_FLAG_RELMCAST	4
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE	0
+#define WL_PROXD_MODE_NEUTRAL	1
+#define WL_PROXD_MODE_INITIATOR	2
+#define WL_PROXD_MODE_TARGET	3
+#define WL_PROXD_RANDOM_WAKEUP	0x8000
+
+
+#ifdef NET_DETECT
+#define NET_DETECT_MAX_WAKE_DATA_SIZE	2048
+#define NET_DETECT_MAX_PROFILES		16
+#define NET_DETECT_MAX_CHANNELS		50
+#endif /* NET_DETECT */
+
+/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+#define WL_RADIO_SW_DISABLE		(1<<0)
+#define WL_RADIO_HW_DISABLE		(1<<1)
+#define WL_RADIO_MPC_DISABLE		(1<<2)
+#define WL_RADIO_COUNTRY_DISABLE	(1<<3)	/* some countries don't support any channel */
+
+#define	WL_SPURAVOID_OFF	0
+#define	WL_SPURAVOID_ON1	1
+#define	WL_SPURAVOID_ON2	2
+
+
+#define WL_4335_SPURAVOID_ON1	1
+#define WL_4335_SPURAVOID_ON2	2
+#define WL_4335_SPURAVOID_ON3	3
+#define WL_4335_SPURAVOID_ON4	4
+#define WL_4335_SPURAVOID_ON5	5
+#define WL_4335_SPURAVOID_ON6	6
+#define WL_4335_SPURAVOID_ON7	7
+#define WL_4335_SPURAVOID_ON8	8
+#define WL_4335_SPURAVOID_ON9	9
+
+/* Override bit for WLC_SET_TXPWR.  if set, ignore other level limits */
+#define WL_TXPWR_OVERRIDE	(1U<<31)
+#define WL_TXPWR_NEG   (1U<<30)
+
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define	WLC_PHY_TYPE_A		0
+#define	WLC_PHY_TYPE_B		1
+#define	WLC_PHY_TYPE_G		2
+#define	WLC_PHY_TYPE_N		4
+#define	WLC_PHY_TYPE_LP		5
+#define	WLC_PHY_TYPE_SSN	6
+#define	WLC_PHY_TYPE_HT		7
+#define	WLC_PHY_TYPE_LCN	8
+#define	WLC_PHY_TYPE_LCN40	10
+#define WLC_PHY_TYPE_AC		11
+#define	WLC_PHY_TYPE_NULL	0xf
+
+/* Values for PM */
+#define PM_OFF	0
+#define PM_MAX	1
+#define PM_FAST 2
+#define PM_FORCE_OFF 3		/* use this bit to force PM off even bt is active */
+
+#define WL_WME_CNT_VERSION	1	/* current version of wl_wme_cnt_t */
+
+/* fbt_cap: FBT assoc / reassoc modes. */
+#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC  1 /* Driver 4-way handshake & reassoc (WLFBT). */
+
+/* monitor_promisc_level bits */
+#define WL_MONPROMISC_PROMISC 0x0001
+#define WL_MONPROMISC_CTRL 0x0002
+#define WL_MONPROMISC_FCS 0x0004
+
+/* TCP Checksum Offload defines */
+#define TOE_TX_CSUM_OL		0x00000001
+#define TOE_RX_CSUM_OL		0x00000002
+
+#endif /* wlioctl_defs_h */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/802.11.h b/drivers/staging/edison-bcm43340/common/include/proto/802.11.h
new file mode 100644
index 0000000..05b1340
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/802.11.h
@@ -0,0 +1,3815 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.11
+ *
+ * $Id: 802.11.h 444070 2013-12-18 13:20:12Z $
+ */
+
+#ifndef _802_11_H_
+#define _802_11_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+#ifndef _NET_ETHERNET_H_
+#include <proto/ethernet.h>
+#endif
+
+#include <proto/wpa.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+#define DOT11_TU_TO_US			1024	/* 802.11 Time Unit is 1024 microseconds */
+
+/* Generic 802.11 frame constants */
+#define DOT11_A3_HDR_LEN		24	/* d11 header length with A3 */
+#define DOT11_A4_HDR_LEN		30	/* d11 header length with A4 */
+#define DOT11_MAC_HDR_LEN		DOT11_A3_HDR_LEN	/* MAC header length */
+#define DOT11_FCS_LEN			4	/* d11 FCS length */
+#define DOT11_ICV_LEN			4	/* d11 ICV length */
+#define DOT11_ICV_AES_LEN		8	/* d11 ICV/AES length */
+#define DOT11_QOS_LEN			2	/* d11 QoS length */
+#define DOT11_HTC_LEN			4	/* d11 HT Control field length */
+
+#define DOT11_KEY_INDEX_SHIFT		6	/* d11 key index shift */
+#define DOT11_IV_LEN			4	/* d11 IV length */
+#define DOT11_IV_TKIP_LEN		8	/* d11 IV TKIP length */
+#define DOT11_IV_AES_OCB_LEN		4	/* d11 IV/AES/OCB length */
+#define DOT11_IV_AES_CCM_LEN		8	/* d11 IV/AES/CCM length */
+#define DOT11_IV_MAX_LEN		8	/* maximum iv len for any encryption */
+
+/* Includes MIC */
+#define DOT11_MAX_MPDU_BODY_LEN		2304	/* max MPDU body length */
+/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */
+#define DOT11_MAX_MPDU_LEN		(DOT11_A4_HDR_LEN + \
+					 DOT11_QOS_LEN + \
+					 DOT11_IV_AES_CCM_LEN + \
+					 DOT11_MAX_MPDU_BODY_LEN + \
+					 DOT11_ICV_LEN + \
+					 DOT11_FCS_LEN)	/* d11 max MPDU length */
+
+#define DOT11_MAX_SSID_LEN		32	/* d11 max ssid length */
+
+/* dot11RTSThreshold */
+#define DOT11_DEFAULT_RTS_LEN		2347	/* d11 default RTS length */
+#define DOT11_MAX_RTS_LEN		2347	/* d11 max RTS length */
+
+/* dot11FragmentationThreshold */
+#define DOT11_MIN_FRAG_LEN		256	/* d11 min fragmentation length */
+#define DOT11_MAX_FRAG_LEN		2346	/* Max frag is also limited by aMPDUMaxLength
+						* of the attached PHY
+						*/
+#define DOT11_DEFAULT_FRAG_LEN		2346	/* d11 default fragmentation length */
+
+/* dot11BeaconPeriod */
+#define DOT11_MIN_BEACON_PERIOD		1	/* d11 min beacon period */
+#define DOT11_MAX_BEACON_PERIOD		0xFFFF	/* d11 max beacon period */
+
+/* dot11DTIMPeriod */
+#define DOT11_MIN_DTIM_PERIOD		1	/* d11 min DTIM period */
+#define DOT11_MAX_DTIM_PERIOD		0xFF	/* d11 max DTIM period */
+
+/* 802.2 LLC/SNAP header used by 802.11 per 802.1H */
+#define DOT11_LLC_SNAP_HDR_LEN		8	/* d11 LLC/SNAP header length */
+#define DOT11_OUI_LEN			3	/* d11 OUI length */
+BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[DOT11_OUI_LEN];		/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	type;				/* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* RFC1042 header used by 802.11 per 802.1H */
+#define RFC1042_HDR_LEN	(ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)	/* RCF1042 header length */
+
+/* Generic 802.11 MAC header */
+/*
+ * N.B.: This struct reflects the full 4 address 802.11 MAC header.
+ *		 The fields are defined such that the shorter 1, 2, and 3
+ *		 address headers just use the first k fields.
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	a1;		/* address 1 */
+	struct ether_addr	a2;		/* address 2 */
+	struct ether_addr	a3;		/* address 3 */
+	uint16			seq;		/* sequence control */
+	struct ether_addr	a4;		/* address 4 */
+} BWL_POST_PACKED_STRUCT;
+
+/* Control frames */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_RTS_LEN		16		/* d11 RTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CTS_LEN		10		/* d11 CTS frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_ACK_LEN		10		/* d11 ACK frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* AID */
+	struct ether_addr	bssid;		/* receiver address, STA in AP */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_PS_POLL_LEN	16		/* d11 PS poll frame length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	bssid;		/* transmitter address, STA in AP */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_CS_END_LEN	16		/* d11 CF-END frame length */
+
+/* RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling
+*  category+OUI+vendor specific content ( this can be variable)
+*/
+BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
+	uint8	category;
+	uint8	OUI[3];
+	uint8	type;
+	uint8	subtype;
+	uint8	data[1040];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
+
+/* generic vender specific action frame with variable length */
+BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
+	uint8	category;
+	uint8	OUI[3];
+	uint8	type;
+	uint8	subtype;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+#define DOT11_ACTION_VS_HDR_LEN	6
+
+#define BCM_ACTION_OUI_BYTE0	0x00
+#define BCM_ACTION_OUI_BYTE1	0x90
+#define BCM_ACTION_OUI_BYTE2	0x4c
+
+/* BA/BAR Control parameters */
+#define DOT11_BA_CTL_POLICY_NORMAL	0x0000	/* normal ack */
+#define DOT11_BA_CTL_POLICY_NOACK	0x0001	/* no ack */
+#define DOT11_BA_CTL_POLICY_MASK	0x0001	/* ack policy mask */
+
+#define DOT11_BA_CTL_MTID		0x0002	/* multi tid BA */
+#define DOT11_BA_CTL_COMPRESSED		0x0004	/* compressed bitmap */
+
+#define DOT11_BA_CTL_NUMMSDU_MASK	0x0FC0	/* num msdu in bitmap mask */
+#define DOT11_BA_CTL_NUMMSDU_SHIFT	6	/* num msdu in bitmap shift */
+
+#define DOT11_BA_CTL_TID_MASK		0xF000	/* tid mask */
+#define DOT11_BA_CTL_TID_SHIFT		12	/* tid shift */
+
+/* control frame header (BA/BAR) */
+BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	ra;		/* receiver address */
+	struct ether_addr	ta;		/* transmitter address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_CTL_HDR_LEN	16		/* control frame hdr len */
+
+/* BAR frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_bar {
+	uint16			bar_control;	/* BAR Control */
+	uint16			seqnum;		/* Starting Sequence control */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BAR_LEN		4		/* BAR frame payload length */
+
+#define DOT11_BA_BITMAP_LEN	128		/* bitmap length */
+#define DOT11_BA_CMP_BITMAP_LEN	8		/* compressed bitmap length */
+/* BA frame payload */
+BWL_PRE_PACKED_STRUCT struct dot11_ba {
+	uint16			ba_control;	/* BA Control */
+	uint16			seqnum;		/* Starting Sequence control */
+	uint8			bitmap[DOT11_BA_BITMAP_LEN];	/* Block Ack Bitmap */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_BA_LEN		4		/* BA frame payload len (wo bitmap) */
+
+/* Management frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_management_header {
+	uint16			fc;		/* frame control */
+	uint16			durid;		/* duration/ID */
+	struct ether_addr	da;		/* receiver address */
+	struct ether_addr	sa;		/* transmitter address */
+	struct ether_addr	bssid;		/* BSS ID */
+	uint16			seq;		/* sequence control */
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_MGMT_HDR_LEN	24		/* d11 management header length */
+
+/* Management frame payloads */
+
+BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
+	uint32			timestamp[2];
+	uint16			beacon_interval;
+	uint16			capability;
+} BWL_POST_PACKED_STRUCT;
+#define	DOT11_BCN_PRB_LEN	12		/* 802.11 beacon/probe frame fixed length */
+#define	DOT11_BCN_PRB_FIXED_LEN	12		/* 802.11 beacon/probe frame fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_auth {
+	uint16			alg;		/* algorithm */
+	uint16			seq;		/* sequence control */
+	uint16			status;		/* status code */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_AUTH_FIXED_LEN	6		/* length of auth frame without challenge IE */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
+	uint16			capability;	/* capability information */
+	uint16			listen;		/* listen interval */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_REQ_FIXED_LEN	4	/* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
+	uint16			capability;	/* capability information */
+	uint16			listen;		/* listen interval */
+	struct ether_addr	ap;		/* Current AP address */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_REASSOC_REQ_FIXED_LEN	10	/* length of assoc frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
+	uint16			capability;	/* capability information */
+	uint16			status;		/* status code */
+	uint16			aid;		/* association ID */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ASSOC_RESP_FIXED_LEN	6	/* length of assoc resp frame without info elts */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
+	uint8	category;
+	uint8	action;
+	uint8	token;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_MEASURE_LEN	3	/* d11 action measurement header length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
+	uint8	category;
+	uint8	action;
+	uint8	ch_width;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops {
+	uint8	category;
+	uint8	action;
+	uint8	control;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query {
+	uint8	category;
+	uint8	action;
+	uint16	id;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode {
+	uint8	category;
+	uint8	action;
+	uint8	mode;
+} BWL_POST_PACKED_STRUCT;
+
+#define SM_PWRSAVE_ENABLE	1
+#define SM_PWRSAVE_MODE		2
+
+/* ************* 802.11h related definitions. ************* */
+BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
+	uint8 id;
+	uint8 len;
+	uint8 power;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cnst dot11_power_cnst_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
+	uint8 min;
+	uint8 max;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_power_cap dot11_power_cap_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep {
+	uint8 id;
+	uint8 len;
+	uint8 tx_pwr;
+	uint8 margin;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tpc_rep dot11_tpc_rep_t;
+#define DOT11_MNG_IE_TPC_REPORT_LEN	2 	/* length of IE data, not including 2 byte header */
+
+BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
+	uint8 id;
+	uint8 len;
+	uint8 first_channel;
+	uint8 num_channels;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_supp_channels dot11_supp_channels_t;
+
+/* Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband
+ * offset for 40MHz operation.  The possible 3 values are:
+ * 1 = above control channel
+ * 3 = below control channel
+ * 0 = no extension channel
+ */
+BWL_PRE_PACKED_STRUCT struct dot11_extch {
+	uint8	id;		/* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */
+	uint8	len;		/* IE length */
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extch dot11_extch_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];		/* Proprietary OUI, BRCM_PROP_OUI */
+	uint8	type;           /* type inidicates what follows */
+	uint8	extch;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
+
+#define BRCM_EXTCH_IE_LEN	5
+#define BRCM_EXTCH_IE_TYPE	53	/* 802.11n ID not yet assigned */
+#define DOT11_EXTCH_IE_LEN	1
+#define DOT11_EXT_CH_MASK	0x03	/* extension channel mask */
+#define DOT11_EXT_CH_UPPER	0x01	/* ext. ch. on upper sb */
+#define DOT11_EXT_CH_LOWER	0x03	/* ext. ch. on lower sb */
+#define DOT11_EXT_CH_NONE	0x00	/* no extension ch.  */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
+	uint8	category;
+	uint8	action;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_ACTION_FRMHDR_LEN	2
+
+/* CSA IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
+	uint8 id;	/* id DOT11_MNG_CHANNEL_SWITCH_ID */
+	uint8 len;	/* length of IE */
+	uint8 mode;	/* mode 0 or 1 */
+	uint8 channel;	/* channel switch to */
+	uint8 count;	/* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
+
+#define DOT11_SWITCH_IE_LEN	3	/* length of IE data, not including 2 byte header */
+/* CSA mode - 802.11h-2003 $7.3.2.20 */
+#define DOT11_CSA_MODE_ADVISORY		0	/* no DOT11_CSA_MODE_NO_TX restriction imposed */
+#define DOT11_CSA_MODE_NO_TX		1	/* no transmission upon receiving CSA frame. */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
+	uint8	category;
+	uint8	action;
+	dot11_chan_switch_ie_t chan_switch_ie;	/* for switch IE */
+	dot11_brcm_extch_ie_t extch_ie;		/* extension channel offset */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
+	uint8 mode;	/* mode 0 or 1 */
+	uint8 reg;	/* regulatory class */
+	uint8 channel;	/* channel switch to */
+	uint8 count;	/* number of beacons before switching */
+} BWL_POST_PACKED_STRUCT;
+
+/* 11n Extended Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
+	uint8 id;	/* id DOT11_MNG_EXT_CHANNEL_SWITCH_ID */
+	uint8 len;	/* length of IE */
+	struct dot11_csa_body b;	/* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
+#define DOT11_EXT_CSA_IE_LEN	4	/* length of extended channel switch IE body */
+
+BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	dot11_ext_csa_ie_t chan_switch_ie;	/* for switch IE */
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
+	uint8	category;
+	uint8	action;
+	struct dot11_csa_body b;	/* body of the ie */
+} BWL_POST_PACKED_STRUCT;
+
+/*  Wide Bandwidth Channel Switch IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	uint8 channel_width;			/* new channel width */
+	uint8 center_frequency_segment_0;	/* center frequency segment 0 */
+	uint8 center_frequency_segment_1;	/* center frequency segment 1 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t;
+
+#define DOT11_WIDE_BW_SWITCH_IE_LEN     3       /* length of IE data, not including 2 byte header */
+
+/* Channel Switch Wrapper IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t;
+
+/* VHT Transmit Power Envelope IE data structure */
+BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope {
+	uint8 id;				/* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+	uint8 len;				/* length of IE */
+	uint8 transmit_power_info;
+	uint8 local_max_transmit_power_20;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t;
+
+/* vht transmit power envelope IE length depends on channel width */
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ	1
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ	2
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ	3
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
+	uint8	id;
+	uint8	len;
+	uint8	info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_coex dot11_obss_coex_t;
+#define DOT11_OBSS_COEXINFO_LEN	1	/* length of OBSS Coexistence INFO IE */
+
+#define	DOT11_OBSS_COEX_INFO_REQ		0x01
+#define	DOT11_OBSS_COEX_40MHZ_INTOLERANT	0x02
+#define	DOT11_OBSS_COEX_20MHZ_WIDTH_REQ	0x04
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist {
+	uint8	id;
+	uint8	len;
+	uint8	regclass;
+	uint8	chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
+#define DOT11_OBSS_CHANLIST_FIXED_LEN	1	/* fixed length of regclass */
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
+	uint8 id;
+	uint8 len;
+	uint8 cap[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap_ie dot11_extcap_ie_t;
+
+#define DOT11_EXTCAP_LEN_MAX	8
+
+#define DOT11_EXTCAP_LEN_COEX	1
+#define DOT11_EXTCAP_LEN_BT	3
+#define DOT11_EXTCAP_LEN_IW	4
+#define DOT11_EXTCAP_LEN_SI	6
+
+#define DOT11_EXTCAP_LEN_TDLS	5
+#define DOT11_11AC_EXTCAP_LEN_TDLS	8
+
+#define DOT11_EXTCAP_LEN_FMS			2
+#define DOT11_EXTCAP_LEN_PROXY_ARP		2
+#define DOT11_EXTCAP_LEN_TFS			3
+#define DOT11_EXTCAP_LEN_WNM_SLEEP		3
+#define DOT11_EXTCAP_LEN_TIMBC			3
+#define DOT11_EXTCAP_LEN_BSSTRANS		3
+#define DOT11_EXTCAP_LEN_DMS			4
+#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION	6
+#define DOT11_EXTCAP_LEN_TDLS_WBW		8
+#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION	8
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap {
+	uint8 extcap[DOT11_EXTCAP_LEN_MAX];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap dot11_extcap_t;
+
+/* TDLS Capabilities */
+#define DOT11_TDLS_CAP_TDLS			37		/* TDLS support */
+#define DOT11_TDLS_CAP_PU_BUFFER_STA	28		/* TDLS Peer U-APSD buffer STA support */
+#define DOT11_TDLS_CAP_PEER_PSM		20		/* TDLS Peer PSM support */
+#define DOT11_TDLS_CAP_CH_SW			30		/* TDLS Channel switch */
+#define DOT11_TDLS_CAP_PROH			38		/* TDLS prohibited */
+#define DOT11_TDLS_CAP_CH_SW_PROH		39		/* TDLS Channel switch prohibited */
+#define DOT11_TDLS_CAP_TDLS_WIDER_BW	61	/* TDLS Wider Band-Width */
+
+#define TDLS_CAP_MAX_BIT		39		/* TDLS max bit defined in ext cap */
+
+/* 802.11h/802.11k Measurement Request/Report IEs */
+/* Measurement Type field */
+#define DOT11_MEASURE_TYPE_BASIC 	0	/* d11 measurement basic type */
+#define DOT11_MEASURE_TYPE_CCA 		1	/* d11 measurement CCA type */
+#define DOT11_MEASURE_TYPE_RPI		2	/* d11 measurement RPI type */
+#define DOT11_MEASURE_TYPE_CHLOAD		3	/* d11 measurement Channel Load type */
+#define DOT11_MEASURE_TYPE_NOISE		4	/* d11 measurement Noise Histogram type */
+#define DOT11_MEASURE_TYPE_BEACON		5	/* d11 measurement Beacon type */
+#define DOT11_MEASURE_TYPE_FRAME	6	/* d11 measurement Frame type */
+#define DOT11_MEASURE_TYPE_STAT		7	/* d11 measurement STA Statistics type */
+#define DOT11_MEASURE_TYPE_LCI		8	/* d11 measurement LCI type */
+#define DOT11_MEASURE_TYPE_TXSTREAM		9	/* d11 measurement TX Stream type */
+#define DOT11_MEASURE_TYPE_PAUSE		255	/* d11 measurement pause type */
+
+/* Measurement Request Modes */
+#define DOT11_MEASURE_MODE_PARALLEL 	(1<<0)	/* d11 measurement parallel */
+#define DOT11_MEASURE_MODE_ENABLE 	(1<<1)	/* d11 measurement enable */
+#define DOT11_MEASURE_MODE_REQUEST	(1<<2)	/* d11 measurement request */
+#define DOT11_MEASURE_MODE_REPORT 	(1<<3)	/* d11 measurement report */
+#define DOT11_MEASURE_MODE_DUR 	(1<<4)	/* d11 measurement dur mandatory */
+/* Measurement Report Modes */
+#define DOT11_MEASURE_MODE_LATE 	(1<<0)	/* d11 measurement late */
+#define DOT11_MEASURE_MODE_INCAPABLE	(1<<1)	/* d11 measurement incapable */
+#define DOT11_MEASURE_MODE_REFUSED	(1<<2)	/* d11 measurement refuse */
+/* Basic Measurement Map bits */
+#define DOT11_MEASURE_BASIC_MAP_BSS	((uint8)(1<<0))	/* d11 measurement basic map BSS */
+#define DOT11_MEASURE_BASIC_MAP_OFDM	((uint8)(1<<1))	/* d11 measurement map OFDM */
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN	((uint8)(1<<2))	/* d11 measurement map unknown */
+#define DOT11_MEASURE_BASIC_MAP_RADAR	((uint8)(1<<3))	/* d11 measurement map radar */
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS	((uint8)(1<<4))	/* d11 measurement map unmeasuremnt */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req dot11_meas_req_t;
+#define DOT11_MNG_IE_MREQ_LEN 14	/* d11 measurement request IE length */
+/* length of Measure Request IE data not including variable len */
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3	/* d11 measurement request IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	BWL_PRE_PACKED_STRUCT union
+	{
+		BWL_PRE_PACKED_STRUCT struct {
+			uint8 channel;
+			uint8 start_time[8];
+			uint16 duration;
+			uint8 map;
+		} BWL_POST_PACKED_STRUCT basic;
+		uint8 data[1];
+	} BWL_POST_PACKED_STRUCT rep;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep dot11_meas_rep_t;
+
+/* length of Measure Report IE data not including variable len */
+#define DOT11_MNG_IE_MREP_FIXED_LEN	3	/* d11 measurement response IE fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
+	uint8 channel;
+	uint8 start_time[8];
+	uint16 duration;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
+#define DOT11_MEASURE_BASIC_REP_LEN	12	/* d11 measurement basic report length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_quiet {
+	uint8 id;
+	uint8 len;
+	uint8 count;	/* TBTTs until beacon interval in quiet starts */
+	uint8 period;	/* Beacon intervals between periodic quiet periods ? */
+	uint16 duration;	/* Length of quiet period, in TU's */
+	uint16 offset;	/* TU's offset from TBTT in Count field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_quiet dot11_quiet_t;
+
+BWL_PRE_PACKED_STRUCT struct chan_map_tuple {
+	uint8 channel;
+	uint8 map;
+} BWL_POST_PACKED_STRUCT;
+typedef struct chan_map_tuple chan_map_tuple_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs {
+	uint8 id;
+	uint8 len;
+	uint8 eaddr[ETHER_ADDR_LEN];
+	uint8 interval;
+	chan_map_tuple_t map[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
+
+/* WME Elements */
+#define WME_OUI			"\x00\x50\xf2"	/* WME OUI */
+#define WME_OUI_LEN		3
+#define WME_OUI_TYPE		2	/* WME type */
+#define WME_TYPE		2	/* WME type, deprecated */
+#define WME_SUBTYPE_IE		0	/* Information Element */
+#define WME_SUBTYPE_PARAM_IE	1	/* Parameter Element */
+#define WME_SUBTYPE_TSPEC	2	/* Traffic Specification */
+#define WME_VER			1	/* WME version */
+
+/* WME Access Category Indices (ACIs) */
+#define AC_BE			0	/* Best Effort */
+#define AC_BK			1	/* Background */
+#define AC_VI			2	/* Video */
+#define AC_VO			3	/* Voice */
+#define AC_COUNT		4	/* number of ACs */
+
+typedef uint8 ac_bitmap_t;	/* AC bitmap of (1 << AC_xx) */
+
+#define AC_BITMAP_NONE		0x0	/* No ACs */
+#define AC_BITMAP_ALL		0xf	/* All ACs */
+#define AC_BITMAP_TST(ab, ac)	(((ab) & (1 << (ac))) != 0)
+#define AC_BITMAP_SET(ab, ac)	(((ab) |= (1 << (ac))))
+#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
+/* WME Information Element (IE) */
+BWL_PRE_PACKED_STRUCT struct wme_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_ie wme_ie_t;
+#define WME_IE_LEN 7	/* WME IE length */
+
+BWL_PRE_PACKED_STRUCT struct edcf_acparam {
+	uint8	ACI;
+	uint8	ECW;
+	uint16  TXOP;		/* stored in network order (ls octet first) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct edcf_acparam edcf_acparam_t;
+
+/* WME Parameter Element (PE) */
+BWL_PRE_PACKED_STRUCT struct wme_param_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 subtype;
+	uint8 version;
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct wme_param_ie wme_param_ie_t;
+#define WME_PARAM_IE_LEN            24          /* WME Parameter IE length */
+
+/* QoS Info field for IE as sent from AP */
+#define WME_QI_AP_APSD_MASK         0x80        /* U-APSD Supported mask */
+#define WME_QI_AP_APSD_SHIFT        7           /* U-APSD Supported shift */
+#define WME_QI_AP_COUNT_MASK        0x0f        /* Parameter set count mask */
+#define WME_QI_AP_COUNT_SHIFT       0           /* Parameter set count shift */
+
+/* QoS Info field for IE as sent from STA */
+#define WME_QI_STA_MAXSPLEN_MASK    0x60        /* Max Service Period Length mask */
+#define WME_QI_STA_MAXSPLEN_SHIFT   5           /* Max Service Period Length shift */
+#define WME_QI_STA_APSD_ALL_MASK    0xf         /* APSD all AC bits mask */
+#define WME_QI_STA_APSD_ALL_SHIFT   0           /* APSD all AC bits shift */
+#define WME_QI_STA_APSD_BE_MASK     0x8         /* APSD AC_BE mask */
+#define WME_QI_STA_APSD_BE_SHIFT    3           /* APSD AC_BE shift */
+#define WME_QI_STA_APSD_BK_MASK     0x4         /* APSD AC_BK mask */
+#define WME_QI_STA_APSD_BK_SHIFT    2           /* APSD AC_BK shift */
+#define WME_QI_STA_APSD_VI_MASK     0x2         /* APSD AC_VI mask */
+#define WME_QI_STA_APSD_VI_SHIFT    1           /* APSD AC_VI shift */
+#define WME_QI_STA_APSD_VO_MASK     0x1         /* APSD AC_VO mask */
+#define WME_QI_STA_APSD_VO_SHIFT    0           /* APSD AC_VO shift */
+
+/* ACI */
+#define EDCF_AIFSN_MIN               1           /* AIFSN minimum value */
+#define EDCF_AIFSN_MAX               15          /* AIFSN maximum value */
+#define EDCF_AIFSN_MASK              0x0f        /* AIFSN mask */
+#define EDCF_ACM_MASK                0x10        /* ACM mask */
+#define EDCF_ACI_MASK                0x60        /* ACI mask */
+#define EDCF_ACI_SHIFT               5           /* ACI shift */
+#define EDCF_AIFSN_SHIFT             12          /* 4 MSB(0xFFF) in ifs_ctl for AC idx */
+
+/* ECW */
+#define EDCF_ECW_MIN                 0           /* cwmin/cwmax exponent minimum value */
+#define EDCF_ECW_MAX                 15          /* cwmin/cwmax exponent maximum value */
+#define EDCF_ECW2CW(exp)             ((1 << (exp)) - 1)
+#define EDCF_ECWMIN_MASK             0x0f        /* cwmin exponent form mask */
+#define EDCF_ECWMAX_MASK             0xf0        /* cwmax exponent form mask */
+#define EDCF_ECWMAX_SHIFT            4           /* cwmax exponent form shift */
+
+/* TXOP */
+#define EDCF_TXOP_MIN                0           /* TXOP minimum value */
+#define EDCF_TXOP_MAX                65535       /* TXOP maximum value */
+#define EDCF_TXOP2USEC(txop)         ((txop) << 5)
+
+/* Default BE ACI value for non-WME connection STA */
+#define NON_EDCF_AC_BE_ACI_STA          0x02
+
+/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */
+#define EDCF_AC_BE_ACI_STA           0x03	/* STA ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_STA           0xA4	/* STA ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_STA          0x0000	/* STA TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_STA           0x27	/* STA ACI value for background AC */
+#define EDCF_AC_BK_ECW_STA           0xA4	/* STA ECW value for background AC */
+#define EDCF_AC_BK_TXOP_STA          0x0000	/* STA TXOP value for background AC */
+#define EDCF_AC_VI_ACI_STA           0x42	/* STA ACI value for video AC */
+#define EDCF_AC_VI_ECW_STA           0x43	/* STA ECW value for video AC */
+#define EDCF_AC_VI_TXOP_STA          0x005e	/* STA TXOP value for video AC */
+#define EDCF_AC_VO_ACI_STA           0x62	/* STA ACI value for audio AC */
+#define EDCF_AC_VO_ECW_STA           0x32	/* STA ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_STA          0x002f	/* STA TXOP value for audio AC */
+
+/* Default EDCF parameters that AP uses; WMM draft Table 14 */
+#define EDCF_AC_BE_ACI_AP            0x03	/* AP ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_AP            0x64	/* AP ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_AP           0x0000	/* AP TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_AP            0x27	/* AP ACI value for background AC */
+#define EDCF_AC_BK_ECW_AP            0xA4	/* AP ECW value for background AC */
+#define EDCF_AC_BK_TXOP_AP           0x0000	/* AP TXOP value for background AC */
+#define EDCF_AC_VI_ACI_AP            0x41	/* AP ACI value for video AC */
+#define EDCF_AC_VI_ECW_AP            0x43	/* AP ECW value for video AC */
+#define EDCF_AC_VI_TXOP_AP           0x005e	/* AP TXOP value for video AC */
+#define EDCF_AC_VO_ACI_AP            0x61	/* AP ACI value for audio AC */
+#define EDCF_AC_VO_ECW_AP            0x32	/* AP ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_AP           0x002f	/* AP TXOP value for audio AC */
+
+/* EDCA Parameter IE */
+BWL_PRE_PACKED_STRUCT struct edca_param_ie {
+	uint8 qosinfo;
+	uint8 rsvd;
+	edcf_acparam_t acparam[AC_COUNT];
+} BWL_POST_PACKED_STRUCT;
+typedef struct edca_param_ie edca_param_ie_t;
+#define EDCA_PARAM_IE_LEN            18          /* EDCA Parameter IE length */
+
+/* QoS Capability IE */
+BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
+	uint8 qosinfo;
+} BWL_POST_PACKED_STRUCT;
+typedef struct qos_cap_ie qos_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
+	uint8 id; 			/* 11, DOT11_MNG_QBSS_LOAD_ID */
+	uint8 length;
+	uint16 station_count; 		/* total number of STAs associated */
+	uint8 channel_utilization;	/* % of time, normalized to 255, QAP sensed medium busy */
+	uint16 aac; 			/* available admission capacity */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
+#define BSS_LOAD_IE_SIZE 	7	/* BSS load IE size */
+
+/* nom_msdu_size */
+#define FIXED_MSDU_SIZE 0x8000		/* MSDU size is fixed */
+#define MSDU_SIZE_MASK	0x7fff		/* (Nominal or fixed) MSDU size */
+
+/* surplus_bandwidth */
+/* Represented as 3 bits of integer, binary point, 13 bits fraction */
+#define	INTEGER_SHIFT	13	/* integer shift */
+#define FRACTION_MASK	0x1FFF	/* fraction mask */
+
+/* Management Notification Frame */
+BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
+	uint8 category;			/* DOT11_ACTION_NOTIFICATION */
+	uint8 action;
+	uint8 token;
+	uint8 status;
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+#define DOT11_MGMT_NOTIFICATION_LEN 4	/* Fixed length */
+
+/* Timeout Interval IE */
+BWL_PRE_PACKED_STRUCT struct ti_ie {
+	uint8 ti_type;
+	uint32 ti_val;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ti_ie ti_ie_t;
+#define TI_TYPE_REASSOC_DEADLINE	1
+#define TI_TYPE_KEY_LIFETIME		2
+
+/* WME Action Codes */
+#define WME_ADDTS_REQUEST	0	/* WME ADDTS request */
+#define WME_ADDTS_RESPONSE	1	/* WME ADDTS response */
+#define WME_DELTS_REQUEST	2	/* WME DELTS request */
+
+/* WME Setup Response Status Codes */
+#define WME_ADMISSION_ACCEPTED		0	/* WME admission accepted */
+#define WME_INVALID_PARAMETERS		1	/* WME invalide parameters */
+#define WME_ADMISSION_REFUSED		3	/* WME admission refused */
+
+/* Macro to take a pointer to a beacon or probe response
+ * body and return the char* pointer to the SSID info element
+ */
+#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
+
+/* Authentication frame payload constants */
+#define DOT11_OPEN_SYSTEM	0	/* d11 open authentication */
+#define DOT11_SHARED_KEY	1	/* d11 shared authentication */
+#define DOT11_FAST_BSS		2	/* d11 fast bss authentication */
+#define DOT11_CHALLENGE_LEN	128	/* d11 challenge text length */
+
+/* Frame control macros */
+#define FC_PVER_MASK		0x3	/* PVER mask */
+#define FC_PVER_SHIFT		0	/* PVER shift */
+#define FC_TYPE_MASK		0xC	/* type mask */
+#define FC_TYPE_SHIFT		2	/* type shift */
+#define FC_SUBTYPE_MASK		0xF0	/* subtype mask */
+#define FC_SUBTYPE_SHIFT	4	/* subtype shift */
+#define FC_TODS			0x100	/* to DS */
+#define FC_TODS_SHIFT		8	/* to DS shift */
+#define FC_FROMDS		0x200	/* from DS */
+#define FC_FROMDS_SHIFT		9	/* from DS shift */
+#define FC_MOREFRAG		0x400	/* more frag. */
+#define FC_MOREFRAG_SHIFT	10	/* more frag. shift */
+#define FC_RETRY		0x800	/* retry */
+#define FC_RETRY_SHIFT		11	/* retry shift */
+#define FC_PM			0x1000	/* PM */
+#define FC_PM_SHIFT		12	/* PM shift */
+#define FC_MOREDATA		0x2000	/* more data */
+#define FC_MOREDATA_SHIFT	13	/* more data shift */
+#define FC_WEP			0x4000	/* WEP */
+#define FC_WEP_SHIFT		14	/* WEP shift */
+#define FC_ORDER		0x8000	/* order */
+#define FC_ORDER_SHIFT		15	/* order shift */
+
+/* sequence control macros */
+#define SEQNUM_SHIFT		4	/* seq. number shift */
+#define SEQNUM_MAX		0x1000	/* max seqnum + 1 */
+#define FRAGNUM_MASK		0xF	/* frag. number mask */
+
+/* Frame Control type/subtype defs */
+
+/* FC Types */
+#define FC_TYPE_MNG		0	/* management type */
+#define FC_TYPE_CTL		1	/* control type */
+#define FC_TYPE_DATA		2	/* data type */
+
+/* Management Subtypes */
+#define FC_SUBTYPE_ASSOC_REQ		0	/* assoc. request */
+#define FC_SUBTYPE_ASSOC_RESP		1	/* assoc. response */
+#define FC_SUBTYPE_REASSOC_REQ		2	/* reassoc. request */
+#define FC_SUBTYPE_REASSOC_RESP		3	/* reassoc. response */
+#define FC_SUBTYPE_PROBE_REQ		4	/* probe request */
+#define FC_SUBTYPE_PROBE_RESP		5	/* probe response */
+#define FC_SUBTYPE_BEACON		8	/* beacon */
+#define FC_SUBTYPE_ATIM			9	/* ATIM */
+#define FC_SUBTYPE_DISASSOC		10	/* disassoc. */
+#define FC_SUBTYPE_AUTH			11	/* authentication */
+#define FC_SUBTYPE_DEAUTH		12	/* de-authentication */
+#define FC_SUBTYPE_ACTION		13	/* action */
+#define FC_SUBTYPE_ACTION_NOACK		14	/* action no-ack */
+
+/* Control Subtypes */
+#define FC_SUBTYPE_CTL_WRAPPER		7	/* Control Wrapper */
+#define FC_SUBTYPE_BLOCKACK_REQ		8	/* Block Ack Req */
+#define FC_SUBTYPE_BLOCKACK		9	/* Block Ack */
+#define FC_SUBTYPE_PS_POLL		10	/* PS poll */
+#define FC_SUBTYPE_RTS			11	/* RTS */
+#define FC_SUBTYPE_CTS			12	/* CTS */
+#define FC_SUBTYPE_ACK			13	/* ACK */
+#define FC_SUBTYPE_CF_END		14	/* CF-END */
+#define FC_SUBTYPE_CF_END_ACK		15	/* CF-END ACK */
+
+/* Data Subtypes */
+#define FC_SUBTYPE_DATA			0	/* Data */
+#define FC_SUBTYPE_DATA_CF_ACK		1	/* Data + CF-ACK */
+#define FC_SUBTYPE_DATA_CF_POLL		2	/* Data + CF-Poll */
+#define FC_SUBTYPE_DATA_CF_ACK_POLL	3	/* Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_NULL			4	/* Null */
+#define FC_SUBTYPE_CF_ACK		5	/* CF-Ack */
+#define FC_SUBTYPE_CF_POLL		6	/* CF-Poll */
+#define FC_SUBTYPE_CF_ACK_POLL		7	/* CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA		8	/* QoS Data */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK	9	/* QoS Data + CF-Ack */
+#define FC_SUBTYPE_QOS_DATA_CF_POLL	10	/* QoS Data + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL	11	/* QoS Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_NULL		12	/* QoS Null */
+#define FC_SUBTYPE_QOS_CF_POLL		14	/* QoS CF-Poll */
+#define FC_SUBTYPE_QOS_CF_ACK_POLL	15	/* QoS CF-Ack + CF-Poll */
+
+/* Data Subtype Groups */
+#define FC_SUBTYPE_ANY_QOS(s)		(((s) & 8) != 0)
+#define FC_SUBTYPE_ANY_NULL(s)		(((s) & 4) != 0)
+#define FC_SUBTYPE_ANY_CF_POLL(s)	(((s) & 2) != 0)
+#define FC_SUBTYPE_ANY_CF_ACK(s)	(((s) & 1) != 0)
+#define FC_SUBTYPE_ANY_PSPOLL(s)	(((s) & 10) != 0)
+
+/* Type/Subtype Combos */
+#define FC_KIND_MASK		(FC_TYPE_MASK | FC_SUBTYPE_MASK)	/* FC kind mask */
+
+#define FC_KIND(t, s)	(((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))	/* FC kind */
+
+#define FC_SUBTYPE(fc)	(((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)	/* Subtype from FC */
+#define FC_TYPE(fc)	(((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)	/* Type from FC */
+
+#define FC_ASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)	/* assoc. request */
+#define FC_ASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)	/* assoc. response */
+#define FC_REASSOC_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)	/* reassoc. request */
+#define FC_REASSOC_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)	/* reassoc. response */
+#define FC_PROBE_REQ	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)	/* probe request */
+#define FC_PROBE_RESP	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)	/* probe response */
+#define FC_BEACON	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)		/* beacon */
+#define FC_DISASSOC	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)	/* disassoc */
+#define FC_AUTH		FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)		/* authentication */
+#define FC_DEAUTH	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)		/* deauthentication */
+#define FC_ACTION	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)		/* action */
+#define FC_ACTION_NOACK	FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)	/* action no-ack */
+
+#define FC_CTL_WRAPPER	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)	/* Control Wrapper */
+#define FC_BLOCKACK_REQ	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)	/* Block Ack Req */
+#define FC_BLOCKACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)	/* Block Ack */
+#define FC_PS_POLL	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)	/* PS poll */
+#define FC_RTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)		/* RTS */
+#define FC_CTS		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)		/* CTS */
+#define FC_ACK		FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)		/* ACK */
+#define FC_CF_END	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)		/* CF-END */
+#define FC_CF_END_ACK	FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)	/* CF-END ACK */
+
+#define FC_DATA		FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)		/* data */
+#define FC_NULL_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)		/* null data */
+#define FC_DATA_CF_ACK	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)	/* data CF ACK */
+#define FC_QOS_DATA	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)	/* QoS data */
+#define FC_QOS_NULL	FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)	/* QoS null */
+
+/* QoS Control Field */
+
+/* 802.1D Priority */
+#define QOS_PRIO_SHIFT		0	/* QoS priority shift */
+#define QOS_PRIO_MASK		0x0007	/* QoS priority mask */
+#define QOS_PRIO(qos)		(((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)	/* QoS priority */
+
+/* Traffic Identifier */
+#define QOS_TID_SHIFT		0	/* QoS TID shift */
+#define QOS_TID_MASK		0x000f	/* QoS TID mask */
+#define QOS_TID(qos)		(((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)	/* QoS TID */
+
+/* End of Service Period (U-APSD) */
+#define QOS_EOSP_SHIFT		4	/* QoS End of Service Period shift */
+#define QOS_EOSP_MASK		0x0010	/* QoS End of Service Period mask */
+#define QOS_EOSP(qos)		(((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)	/* Qos EOSP */
+
+/* Ack Policy */
+#define QOS_ACK_NORMAL_ACK	0	/* Normal Ack */
+#define QOS_ACK_NO_ACK		1	/* No Ack (eg mcast) */
+#define QOS_ACK_NO_EXP_ACK	2	/* No Explicit Ack */
+#define QOS_ACK_BLOCK_ACK	3	/* Block Ack */
+#define QOS_ACK_SHIFT		5	/* QoS ACK shift */
+#define QOS_ACK_MASK		0x0060	/* QoS ACK mask */
+#define QOS_ACK(qos)		(((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)	/* QoS ACK */
+
+/* A-MSDU flag */
+#define QOS_AMSDU_SHIFT		7	/* AMSDU shift */
+#define QOS_AMSDU_MASK		0x0080	/* AMSDU mask */
+
+/* Management Frames */
+
+/* Management Frame Constants */
+
+/* Fixed fields */
+#define DOT11_MNG_AUTH_ALGO_LEN		2	/* d11 management auth. algo. length */
+#define DOT11_MNG_AUTH_SEQ_LEN		2	/* d11 management auth. seq. length */
+#define DOT11_MNG_BEACON_INT_LEN	2	/* d11 management beacon interval length */
+#define DOT11_MNG_CAP_LEN		2	/* d11 management cap. length */
+#define DOT11_MNG_AP_ADDR_LEN		6	/* d11 management AP address length */
+#define DOT11_MNG_LISTEN_INT_LEN	2	/* d11 management listen interval length */
+#define DOT11_MNG_REASON_LEN		2	/* d11 management reason length */
+#define DOT11_MNG_AID_LEN		2	/* d11 management AID length */
+#define DOT11_MNG_STATUS_LEN		2	/* d11 management status length */
+#define DOT11_MNG_TIMESTAMP_LEN		8	/* d11 management timestamp length */
+
+/* DUR/ID field in assoc resp is 0xc000 | AID */
+#define DOT11_AID_MASK			0x3fff	/* d11 AID mask */
+
+/* Reason Codes */
+#define DOT11_RC_RESERVED		0	/* d11 RC reserved */
+#define DOT11_RC_UNSPECIFIED		1	/* Unspecified reason */
+#define DOT11_RC_AUTH_INVAL		2	/* Previous authentication no longer valid */
+#define DOT11_RC_DEAUTH_LEAVING		3	/* Deauthenticated because sending station
+						 * is leaving (or has left) IBSS or ESS
+						 */
+#define DOT11_RC_INACTIVITY		4	/* Disassociated due to inactivity */
+#define DOT11_RC_BUSY			5	/* Disassociated because AP is unable to handle
+						 * all currently associated stations
+						 */
+#define DOT11_RC_INVAL_CLASS_2		6	/* Class 2 frame received from
+						 * nonauthenticated station
+						 */
+#define DOT11_RC_INVAL_CLASS_3		7	/* Class 3 frame received from
+						 *  nonassociated station
+						 */
+#define DOT11_RC_DISASSOC_LEAVING	8	/* Disassociated because sending station is
+						 * leaving (or has left) BSS
+						 */
+#define DOT11_RC_NOT_AUTH		9	/* Station requesting (re)association is not
+						 * authenticated with responding station
+						 */
+#define DOT11_RC_BAD_PC			10	/* Unacceptable power capability element */
+#define DOT11_RC_BAD_CHANNELS		11	/* Unacceptable supported channels element */
+/* 12 is unused */
+
+/* 32-39 are QSTA specific reasons added in 11e */
+#define DOT11_RC_UNSPECIFIED_QOS	32	/* unspecified QoS-related reason */
+#define DOT11_RC_INSUFFCIENT_BW		33	/* QAP lacks sufficient bandwidth */
+#define DOT11_RC_EXCESSIVE_FRAMES	34	/* excessive number of frames need ack */
+#define DOT11_RC_TX_OUTSIDE_TXOP	35	/* transmitting outside the limits of txop */
+#define DOT11_RC_LEAVING_QBSS		36	/* QSTA is leaving the QBSS (or restting) */
+#define DOT11_RC_BAD_MECHANISM		37	/* does not want to use the mechanism */
+#define DOT11_RC_SETUP_NEEDED		38	/* mechanism needs a setup */
+#define DOT11_RC_TIMEOUT		39	/* timeout */
+
+#define DOT11_RC_MAX			23	/* Reason codes > 23 are reserved */
+
+#define DOT11_RC_TDLS_PEER_UNREACH	25
+#define DOT11_RC_TDLS_DOWN_UNSPECIFIED	26
+
+/* Status Codes */
+#define DOT11_SC_SUCCESS		0	/* Successful */
+#define DOT11_SC_FAILURE		1	/* Unspecified failure */
+#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2	/* TDLS wakeup schedule rejected but alternative  */
+					/* schedule provided */
+#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3	/* TDLS wakeup schedule rejected */
+#define DOT11_SC_TDLS_SEC_DISABLED	5	/* TDLS Security disabled */
+#define DOT11_SC_LIFETIME_REJ		6	/* Unacceptable lifetime */
+#define DOT11_SC_NOT_SAME_BSS		7	/* Not in same BSS */
+#define DOT11_SC_CAP_MISMATCH		10	/* Cannot support all requested
+						 * capabilities in the Capability
+						 * Information field
+						 */
+#define DOT11_SC_REASSOC_FAIL		11	/* Reassociation denied due to inability
+						 * to confirm that association exists
+						 */
+#define DOT11_SC_ASSOC_FAIL		12	/* Association denied due to reason
+						 * outside the scope of this standard
+						 */
+#define DOT11_SC_AUTH_MISMATCH		13	/* Responding station does not support
+						 * the specified authentication
+						 * algorithm
+						 */
+#define DOT11_SC_AUTH_SEQ		14	/* Received an Authentication frame
+						 * with authentication transaction
+						 * sequence number out of expected
+						 * sequence
+						 */
+#define DOT11_SC_AUTH_CHALLENGE_FAIL	15	/* Authentication rejected because of
+						 * challenge failure
+						 */
+#define DOT11_SC_AUTH_TIMEOUT		16	/* Authentication rejected due to timeout
+						 * waiting for next frame in sequence
+						 */
+#define DOT11_SC_ASSOC_BUSY_FAIL	17	/* Association denied because AP is
+						 * unable to handle additional
+						 * associated stations
+						 */
+#define DOT11_SC_ASSOC_RATE_MISMATCH	18	/* Association denied due to requesting
+						 * station not supporting all of the
+						 * data rates in the BSSBasicRateSet
+						 * parameter
+						 */
+#define DOT11_SC_ASSOC_SHORT_REQUIRED	19	/* Association denied due to requesting
+						 * station not supporting the Short
+						 * Preamble option
+						 */
+#define DOT11_SC_ASSOC_PBCC_REQUIRED	20	/* Association denied due to requesting
+						 * station not supporting the PBCC
+						 * Modulation option
+						 */
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED	21	/* Association denied due to requesting
+						 * station not supporting the Channel
+						 * Agility option
+						 */
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED	22	/* Association denied because Spectrum
+							 * Management capability is required.
+							 */
+#define DOT11_SC_ASSOC_BAD_POWER_CAP	23	/* Association denied because the info
+						 * in the Power Cap element is
+						 * unacceptable.
+						 */
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS	24	/* Association denied because the info
+						 * in the Supported Channel element is
+						 * unacceptable
+						 */
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED	25	/* Association denied due to requesting
+							 * station not supporting the Short Slot
+							 * Time option
+							 */
+#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26	/* Association denied because requesting station
+						 * does not support the DSSS-OFDM option
+						 */
+#define DOT11_SC_ASSOC_HT_REQUIRED	27	/* Association denied because the requesting
+						 * station does not support HT features
+						 */
+#define DOT11_SC_ASSOC_R0KH_UNREACHABLE	28	/* Association denied due to AP
+						 * being unable to reach the R0 Key Holder
+						 */
+#define DOT11_SC_ASSOC_TRY_LATER	30	/* Association denied temporarily, try again later
+						 */
+#define DOT11_SC_ASSOC_MFP_VIOLATION	31	/* Association denied due to Robust Management
+						 * frame policy violation
+						 */
+
+#define	DOT11_SC_DECLINED		37	/* request declined */
+#define	DOT11_SC_INVALID_PARAMS		38	/* One or more params have invalid values */
+#define DOT11_SC_INVALID_PAIRWISE_CIPHER	42 /* invalid pairwise cipher */
+#define	DOT11_SC_INVALID_AKMP		43	/* Association denied due to invalid AKMP */
+#define DOT11_SC_INVALID_RSNIE_CAP	45	/* invalid RSN IE capabilities */
+#define DOT11_SC_DLS_NOT_ALLOWED	48	/* DLS is not allowed in the BSS by policy */
+#define	DOT11_SC_INVALID_PMKID		53	/* Association denied due to invalid PMKID */
+#define	DOT11_SC_INVALID_MDID		54	/* Association denied due to invalid MDID */
+#define	DOT11_SC_INVALID_FTIE		55	/* Association denied due to invalid FTIE */
+
+#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED	59	/* ad proto not supported */
+#define DOT11_SC_NO_OUTSTAND_REQ			60	/* no outstanding req */
+#define DOT11_SC_RSP_NOT_RX_FROM_SERVER		61	/* no response from server */
+#define DOT11_SC_TIMEOUT					62	/* timeout */
+#define DOT11_SC_QUERY_RSP_TOO_LARGE		63	/* query rsp too large */
+#define DOT11_SC_SERVER_UNREACHABLE			65	/* server unreachable */
+
+#define DOT11_SC_UNEXP_MSG			70	/* Unexpected message */
+#define DOT11_SC_INVALID_SNONCE		71	/* Invalid SNonce */
+#define DOT11_SC_INVALID_RSNIE		72	/* Invalid contents of RSNIE */
+#define DOT11_SC_ASSOC_VHT_REQUIRED	104	/* Association denied because the requesting
+						 * station does not support VHT features.
+						 */
+
+#define DOT11_SC_TRANSMIT_FAILURE	79	/* transmission failure */
+
+/* Info Elts, length of INFORMATION portion of Info Elts */
+#define DOT11_MNG_DS_PARAM_LEN			1	/* d11 management DS parameter length */
+#define DOT11_MNG_IBSS_PARAM_LEN		2	/* d11 management IBSS parameter length */
+
+/* TIM Info element has 3 bytes fixed info in INFORMATION field,
+ * followed by 1 to 251 bytes of Partial Virtual Bitmap
+ */
+#define DOT11_MNG_TIM_FIXED_LEN			3	/* d11 management TIM fixed length */
+#define DOT11_MNG_TIM_DTIM_COUNT		0	/* d11 management DTIM count */
+#define DOT11_MNG_TIM_DTIM_PERIOD		1	/* d11 management DTIM period */
+#define DOT11_MNG_TIM_BITMAP_CTL		2	/* d11 management TIM BITMAP control  */
+#define DOT11_MNG_TIM_PVB			3	/* d11 management TIM PVB */
+
+/* TLV defines */
+#define TLV_TAG_OFF		0	/* tag offset */
+#define TLV_LEN_OFF		1	/* length offset */
+#define TLV_HDR_LEN		2	/* header length */
+#define TLV_BODY_OFF		2	/* body offset */
+#define TLV_BODY_LEN_MAX	255	/* max body length */
+
+/* Management Frame Information Element IDs */
+#define DOT11_MNG_SSID_ID			0	/* d11 management SSID id */
+#define DOT11_MNG_RATES_ID			1	/* d11 management rates id */
+#define DOT11_MNG_FH_PARMS_ID			2	/* d11 management FH parameter id */
+#define DOT11_MNG_DS_PARMS_ID			3	/* d11 management DS parameter id */
+#define DOT11_MNG_CF_PARMS_ID			4	/* d11 management CF parameter id */
+#define DOT11_MNG_TIM_ID			5	/* d11 management TIM id */
+#define DOT11_MNG_IBSS_PARMS_ID			6	/* d11 management IBSS parameter id */
+#define DOT11_MNG_COUNTRY_ID			7	/* d11 management country id */
+#define DOT11_MNG_HOPPING_PARMS_ID		8	/* d11 management hopping parameter id */
+#define DOT11_MNG_HOPPING_TABLE_ID		9	/* d11 management hopping table id */
+#define DOT11_MNG_REQUEST_ID			10	/* d11 management request id */
+#define DOT11_MNG_QBSS_LOAD_ID 			11	/* d11 management QBSS Load id */
+#define DOT11_MNG_EDCA_PARAM_ID			12	/* 11E EDCA Parameter id */
+#define DOT11_MNG_TSPEC_ID			13	/* d11 management TSPEC id */
+#define DOT11_MNG_TCLAS_ID			14	/* d11 management TCLAS id */
+#define DOT11_MNG_CHALLENGE_ID			16	/* d11 management chanllenge id */
+#define DOT11_MNG_PWR_CONSTRAINT_ID		32	/* 11H PowerConstraint */
+#define DOT11_MNG_PWR_CAP_ID			33	/* 11H PowerCapability */
+#define DOT11_MNG_TPC_REQUEST_ID 		34	/* 11H TPC Request */
+#define DOT11_MNG_TPC_REPORT_ID			35	/* 11H TPC Report */
+#define DOT11_MNG_SUPP_CHANNELS_ID		36	/* 11H Supported Channels */
+#define DOT11_MNG_CHANNEL_SWITCH_ID		37	/* 11H ChannelSwitch Announcement */
+#define DOT11_MNG_MEASURE_REQUEST_ID		38	/* 11H MeasurementRequest */
+#define DOT11_MNG_MEASURE_REPORT_ID		39	/* 11H MeasurementReport */
+#define DOT11_MNG_QUIET_ID			40	/* 11H Quiet */
+#define DOT11_MNG_IBSS_DFS_ID			41	/* 11H IBSS_DFS */
+#define DOT11_MNG_ERP_ID			42	/* d11 management ERP id */
+#define DOT11_MNG_TS_DELAY_ID			43	/* d11 management TS Delay id */
+#define DOT11_MNG_TCLAS_PROC_ID			44	/* d11 management TCLAS processing id */
+#define	DOT11_MNG_HT_CAP			45	/* d11 mgmt HT cap id */
+#define DOT11_MNG_QOS_CAP_ID			46	/* 11E QoS Capability id */
+#define DOT11_MNG_NONERP_ID			47	/* d11 management NON-ERP id */
+#define DOT11_MNG_RSN_ID			48	/* d11 management RSN id */
+#define DOT11_MNG_EXT_RATES_ID			50	/* d11 management ext. rates id */
+#define DOT11_MNG_AP_CHREP_ID			51	/* 11k AP Channel report id */
+#define DOT11_MNG_NEIGHBOR_REP_ID		52	/* 11k & 11v Neighbor report id */
+#define DOT11_MNG_RCPI_ID			53	/* 11k RCPI */
+#define DOT11_MNG_MDIE_ID			54	/* 11r Mobility domain id */
+#define DOT11_MNG_FTIE_ID			55	/* 11r Fast Bss Transition id */
+#define DOT11_MNG_FT_TI_ID			56	/* 11r Timeout Interval id */
+#define DOT11_MNG_RDE_ID			57	/* 11r RIC Data Element id */
+#define	DOT11_MNG_REGCLASS_ID			59	/* d11 management regulatory class id */
+#define DOT11_MNG_EXT_CSA_ID			60	/* d11 Extended CSA */
+#define	DOT11_MNG_HT_ADD			61	/* d11 mgmt additional HT info */
+#define	DOT11_MNG_EXT_CHANNEL_OFFSET		62	/* d11 mgmt ext channel offset */
+#define DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID	63	/* 11k bss average access delay */
+#define DOT11_MNG_ANTENNA_ID			64	/* 11k antenna id */
+#define DOT11_MNG_RSNI_ID			65	/* 11k RSNI id */
+#define DOT11_MNG_MEASUREMENT_PILOT_TX_ID	66	/* 11k measurement pilot tx info id */
+#define DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID	67	/* 11k bss aval admission cap id */
+#define DOT11_MNG_BSS_AC_ACCESS_DELAY_ID	68	/* 11k bss AC access delay id */
+#define DOT11_MNG_WAPI_ID			68	/* d11 management WAPI id */
+#define DOT11_MNG_TIME_ADVERTISE_ID	69	/* 11p time advertisement */
+#define DOT11_MNG_RRM_CAP_ID		70	/* 11k radio measurement capability */
+#define DOT11_MNG_MULTIPLE_BSSID_ID		71	/* 11k multiple BSSID id */
+#define	DOT11_MNG_HT_BSS_COEXINFO_ID		72	/* d11 mgmt OBSS Coexistence INFO */
+#define	DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID	73	/* d11 mgmt OBSS Intolerant Channel list */
+#define	DOT11_MNG_HT_OBSS_ID			74	/* d11 mgmt OBSS HT info */
+#define DOT11_MNG_MMIE_ID			76	/* d11 mgmt MIC IE */
+#define DOT11_MNG_FMS_DESCR_ID			86	/* 11v FMS descriptor */
+#define DOT11_MNG_FMS_REQ_ID			87	/* 11v FMS request id */
+#define DOT11_MNG_FMS_RESP_ID			88	/* 11v FMS response id */
+#define DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID	90	/* 11v bss max idle id */
+#define DOT11_MNG_TFS_REQUEST_ID		91	/* 11v tfs request id */
+#define DOT11_MNG_TFS_RESPONSE_ID		92	/* 11v tfs response id */
+#define DOT11_MNG_WNM_SLEEP_MODE_ID		93	/* 11v wnm-sleep mode id */
+#define DOT11_MNG_TIMBC_REQ_ID			94	/* 11v TIM broadcast request id */
+#define DOT11_MNG_TIMBC_RESP_ID			95	/* 11v TIM broadcast response id */
+#define DOT11_MNG_CHANNEL_USAGE			97	/* 11v channel usage */
+#define DOT11_MNG_TIME_ZONE_ID			98	/* 11v time zone */
+#define DOT11_MNG_DMS_REQUEST_ID		99	/* 11v dms request id */
+#define DOT11_MNG_DMS_RESPONSE_ID		100	/* 11v dms response id */
+#define DOT11_MNG_LINK_IDENTIFIER_ID		101	/* 11z TDLS Link Identifier IE */
+#define DOT11_MNG_WAKEUP_SCHEDULE_ID		102	/* 11z TDLS Wakeup Schedule IE */
+#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID	104	/* 11z TDLS Channel Switch Timing IE */
+#define DOT11_MNG_PTI_CONTROL_ID		105	/* 11z TDLS PTI Control IE */
+#define DOT11_MNG_PU_BUFFER_STATUS_ID	106	/* 11z TDLS PU Buffer Status IE */
+#define DOT11_MNG_INTERWORKING_ID		107	/* 11u interworking */
+#define DOT11_MNG_ADVERTISEMENT_ID		108	/* 11u advertisement protocol */
+#define DOT11_MNG_EXP_BW_REQ_ID			109	/* 11u expedited bandwith request */
+#define DOT11_MNG_QOS_MAP_ID			110	/* 11u QoS map set */
+#define DOT11_MNG_ROAM_CONSORT_ID		111	/* 11u roaming consortium */
+#define DOT11_MNG_EMERGCY_ALERT_ID		112	/* 11u emergency alert identifier */
+#define	DOT11_MNG_EXT_CAP_ID			127	/* d11 mgmt ext capability */
+#define	DOT11_MNG_VHT_CAP_ID			191	/* d11 mgmt VHT cap id */
+#define	DOT11_MNG_VHT_OPERATION_ID		192	/* d11 mgmt VHT op id */
+#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID		194	/* Wide BW Channel Switch IE */
+#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID	195	/* VHT transmit Power Envelope IE */
+#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID		196	/* Channel Switch Wrapper IE */
+#define DOT11_MNG_AID_ID					197	/* Association ID  IE */
+#define	DOT11_MNG_OPER_MODE_NOTIF_ID	199	/* d11 mgmt VHT oper mode notif */
+
+
+#define DOT11_MNG_WPA_ID			221	/* d11 management WPA id */
+#define DOT11_MNG_PROPR_ID			221	/* d11 management proprietary id */
+/* should start using this one instead of above two */
+#define DOT11_MNG_VS_ID				221	/* d11 management Vendor Specific IE */
+
+/* Rate Defines */
+
+/* Valid rates for the Supported Rates and Extended Supported Rates IEs.
+ * Encoding is the rate in 500kbps units, rouding up for fractional values.
+ * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values.
+ * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates.
+ * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27},
+ * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices.
+ */
+
+#define DOT11_RATE_1M   2       /* 1  Mbps in 500kbps units */
+#define DOT11_RATE_2M   4       /* 2  Mbps in 500kbps units */
+#define DOT11_RATE_5M5  11      /* 5.5 Mbps in 500kbps units */
+#define DOT11_RATE_11M  22      /* 11 Mbps in 500kbps units */
+#define DOT11_RATE_6M   12      /* 6  Mbps in 500kbps units */
+#define DOT11_RATE_9M   18      /* 9  Mbps in 500kbps units */
+#define DOT11_RATE_12M  24      /* 12 Mbps in 500kbps units */
+#define DOT11_RATE_18M  36      /* 18 Mbps in 500kbps units */
+#define DOT11_RATE_24M  48      /* 24 Mbps in 500kbps units */
+#define DOT11_RATE_36M  72      /* 36 Mbps in 500kbps units */
+#define DOT11_RATE_48M  96      /* 48 Mbps in 500kbps units */
+#define DOT11_RATE_54M  108     /* 54 Mbps in 500kbps units */
+#define DOT11_RATE_MAX  108     /* highest rate (54 Mbps) in 500kbps units */
+
+/* Supported Rates and Extended Supported Rates IEs
+ * The supported rates octets are defined a the MSB indicatin a Basic Rate
+ * and bits 0-6 as the rate value
+ */
+#define DOT11_RATE_BASIC                0x80 /* flag for a Basic Rate */
+#define DOT11_RATE_MASK                 0x7F /* mask for numeric part of rate */
+
+/* BSS Membership Selector parameters
+ * 802.11-2012 and 802.11ac_D4.0 sec 8.4.2.3
+ * These selector values are advertised in Supported Rates and Extended Supported Rates IEs
+ * in the supported rates list with the Basic rate bit set.
+ * Constants below include the basic bit.
+ */
+#define DOT11_BSS_MEMBERSHIP_HT         0xFF  /* Basic 0x80 + 127, HT Required to join */
+#define DOT11_BSS_MEMBERSHIP_VHT        0xFE  /* Basic 0x80 + 126, VHT Required to join */
+
+/* ERP info element bit values */
+#define DOT11_MNG_ERP_LEN			1	/* ERP is currently 1 byte long */
+#define DOT11_MNG_NONERP_PRESENT		0x01	/* NonERP (802.11b) STAs are present
+							 *in the BSS
+							 */
+#define DOT11_MNG_USE_PROTECTION		0x02	/* Use protection mechanisms for
+							 *ERP-OFDM frames
+							 */
+#define DOT11_MNG_BARKER_PREAMBLE		0x04	/* Short Preambles: 0 == allowed,
+							 * 1 == not allowed
+							 */
+/* TS Delay element offset & size */
+#define DOT11_MGN_TS_DELAY_LEN		4	/* length of TS DELAY IE */
+#define TS_DELAY_FIELD_SIZE			4	/* TS DELAY field size */
+
+/* Capability Information Field */
+#define DOT11_CAP_ESS				0x0001	/* d11 cap. ESS */
+#define DOT11_CAP_IBSS				0x0002	/* d11 cap. IBSS */
+#define DOT11_CAP_POLLABLE			0x0004	/* d11 cap. pollable */
+#define DOT11_CAP_POLL_RQ			0x0008	/* d11 cap. poll request */
+#define DOT11_CAP_PRIVACY			0x0010	/* d11 cap. privacy */
+#define DOT11_CAP_SHORT				0x0020	/* d11 cap. short */
+#define DOT11_CAP_PBCC				0x0040	/* d11 cap. PBCC */
+#define DOT11_CAP_AGILITY			0x0080	/* d11 cap. agility */
+#define DOT11_CAP_SPECTRUM			0x0100	/* d11 cap. spectrum */
+#define DOT11_CAP_QOS				0x0200	/* d11 cap. qos */
+#define DOT11_CAP_SHORTSLOT			0x0400	/* d11 cap. shortslot */
+#define DOT11_CAP_APSD				0x0800	/* d11 cap. apsd */
+#define DOT11_CAP_RRM				0x1000	/* d11 cap. 11k radio measurement */
+#define DOT11_CAP_CCK_OFDM			0x2000	/* d11 cap. CCK/OFDM */
+#define DOT11_CAP_DELAY_BA			0x4000	/* d11 cap. delayed block ack */
+#define DOT11_CAP_IMMEDIATE_BA			0x8000	/* d11 cap. immediate block ack */
+
+/* Extended capabilities IE bitfields */
+/* 20/40 BSS Coexistence Management support bit position */
+#define DOT11_EXT_CAP_OBSS_COEX_MGMT		0
+/* Extended Channel Switching support bit position */
+#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING	2
+/* scheduled PSMP support bit position */
+#define DOT11_EXT_CAP_SPSMP			6
+/*  Flexible Multicast Service */
+#define DOT11_EXT_CAP_FMS			11
+/* proxy ARP service support bit position */
+#define DOT11_EXT_CAP_PROXY_ARP			12
+/* Traffic Filter Service */
+#define DOT11_EXT_CAP_TFS			16
+/* WNM-Sleep Mode */
+#define DOT11_EXT_CAP_WNM_SLEEP			17
+/* TIM Broadcast service */
+#define DOT11_EXT_CAP_TIMBC			18
+/* BSS Transition Management support bit position */
+#define DOT11_EXT_CAP_BSSTRANS_MGMT		19
+/* Direct Multicast Service */
+#define DOT11_EXT_CAP_DMS			26
+/* Interworking support bit position */
+#define DOT11_EXT_CAP_IW			31
+/* service Interval granularity bit position and mask */
+#define DOT11_EXT_CAP_SI			41
+#define DOT11_EXT_CAP_SI_MASK			0x0E
+/* WNM notification */
+#define DOT11_EXT_CAP_WNM_NOTIF			46
+/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */
+#define DOT11_EXT_CAP_OPER_MODE_NOTIF		62
+
+/* VHT Operating mode bit fields -  (11ac D3.0 - 8.4.1.50) */
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3
+#define DOT11_OPER_MODE_RXNSS_SHIFT 4
+#define DOT11_OPER_MODE_RXNSS_MASK 0x70
+#define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7
+#define DOT11_OPER_MODE_RXNSS_TYPE_MASK 0x80
+
+#define DOT11_OPER_MODE(type, nss, chanw) (\
+	((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\
+		 DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\
+	(((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\
+	((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\
+		 DOT11_OPER_MODE_CHANNEL_WIDTH_MASK))
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \
+	(((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\
+		>> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT)
+#define DOT11_OPER_MODE_RXNSS(mode) \
+	((((mode) & DOT11_OPER_MODE_RXNSS_MASK)		\
+		>> DOT11_OPER_MODE_RXNSS_SHIFT) + 1)
+#define DOT11_OPER_MODE_RXNSS_TYPE(mode) \
+	(((mode) & DOT11_OPER_MODE_RXNSS_TYPE_MASK)\
+		>> DOT11_OPER_MODE_RXNSS_TYPE_SHIFT)
+
+#define DOT11_OPER_MODE_20MHZ 0
+#define DOT11_OPER_MODE_40MHZ 1
+#define DOT11_OPER_MODE_80MHZ 2
+#define DOT11_OPER_MODE_160MHZ 3
+#define DOT11_OPER_MODE_8080MHZ 3
+
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_40MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_40MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_160MHZ)
+#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\
+	((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_8080MHZ)
+
+/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */
+BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie {
+	uint8 mode;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t;
+
+#define DOT11_OPER_MODE_NOTIF_IE_LEN 1
+
+/* Extended Capability Information Field */
+#define DOT11_OBSS_COEX_MNG_SUPPORT	0x01	/* 20/40 BSS Coexistence Management support */
+
+/*
+ * Action Frame Constants
+ */
+#define DOT11_ACTION_HDR_LEN		2	/* action frame category + action field */
+#define DOT11_ACTION_CAT_OFF		0	/* category offset */
+#define DOT11_ACTION_ACT_OFF		1	/* action offset */
+
+/* Action Category field (sec 8.4.1.11) */
+#define DOT11_ACTION_CAT_ERR_MASK	0x80	/* category error mask */
+#define DOT11_ACTION_CAT_MASK		0x7F	/* category mask */
+#define DOT11_ACTION_CAT_SPECT_MNG	0	/* category spectrum management */
+#define DOT11_ACTION_CAT_QOS		1	/* category QoS */
+#define DOT11_ACTION_CAT_DLS		2	/* category DLS */
+#define DOT11_ACTION_CAT_BLOCKACK	3	/* category block ack */
+#define DOT11_ACTION_CAT_PUBLIC		4	/* category public */
+#define DOT11_ACTION_CAT_RRM		5	/* category radio measurements */
+#define DOT11_ACTION_CAT_FBT	6	/* category fast bss transition */
+#define DOT11_ACTION_CAT_HT		7	/* category for HT */
+#define	DOT11_ACTION_CAT_SA_QUERY	8	/* security association query */
+#define	DOT11_ACTION_CAT_PDPA		9	/* protected dual of public action */
+#define DOT11_ACTION_CAT_WNM		10	/* category for WNM */
+#define DOT11_ACTION_CAT_UWNM		11	/* category for Unprotected WNM */
+#define DOT11_ACTION_NOTIFICATION	17
+#define DOT11_ACTION_CAT_VHT		21	/* VHT action */
+#define DOT11_ACTION_CAT_VSP		126	/* protected vendor specific */
+#define DOT11_ACTION_CAT_VS		127	/* category Vendor Specific */
+
+/* Spectrum Management Action IDs (sec 7.4.1) */
+#define DOT11_SM_ACTION_M_REQ		0	/* d11 action measurement request */
+#define DOT11_SM_ACTION_M_REP		1	/* d11 action measurement response */
+#define DOT11_SM_ACTION_TPC_REQ		2	/* d11 action TPC request */
+#define DOT11_SM_ACTION_TPC_REP		3	/* d11 action TPC response */
+#define DOT11_SM_ACTION_CHANNEL_SWITCH	4	/* d11 action channel switch */
+#define DOT11_SM_ACTION_EXT_CSA		5	/* d11 extened CSA for 11n */
+
+/* HT action ids */
+#define DOT11_ACTION_ID_HT_CH_WIDTH	0	/* notify channel width action id */
+#define DOT11_ACTION_ID_HT_MIMO_PS	1	/* mimo ps action id */
+
+/* Public action ids */
+#define DOT11_PUB_ACTION_BSS_COEX_MNG	0	/* 20/40 Coexistence Management action id */
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH	4	/* d11 action channel switch */
+#define DOT11_PUB_ACTION_GAS_CB_REQ	12	/* GAS Comeback Request */
+
+/* Block Ack action types */
+#define DOT11_BA_ACTION_ADDBA_REQ	0	/* ADDBA Req action frame type */
+#define DOT11_BA_ACTION_ADDBA_RESP	1	/* ADDBA Resp action frame type */
+#define DOT11_BA_ACTION_DELBA		2	/* DELBA action frame type */
+
+/* ADDBA action parameters */
+#define DOT11_ADDBA_PARAM_AMSDU_SUP	0x0001	/* AMSDU supported under BA */
+#define DOT11_ADDBA_PARAM_POLICY_MASK	0x0002	/* policy mask(ack vs delayed) */
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT	1	/* policy shift */
+#define DOT11_ADDBA_PARAM_TID_MASK	0x003c	/* tid mask */
+#define DOT11_ADDBA_PARAM_TID_SHIFT	2	/* tid shift */
+#define DOT11_ADDBA_PARAM_BSIZE_MASK	0xffc0	/* buffer size mask */
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT	6	/* buffer size shift */
+
+#define DOT11_ADDBA_POLICY_DELAYED	0	/* delayed BA policy */
+#define DOT11_ADDBA_POLICY_IMMEDIATE	1	/* immediate BA policy */
+
+/* Fast Transition action types */
+#define DOT11_FT_ACTION_FT_RESERVED		0
+#define DOT11_FT_ACTION_FT_REQ			1	/* FBT request - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_RES			2	/* FBT response - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_CON			3	/* FBT confirm - for OTDS with RRP */
+#define DOT11_FT_ACTION_FT_ACK			4	/* FBT ack */
+
+/* DLS action types */
+#define DOT11_DLS_ACTION_REQ			0	/* DLS Request */
+#define DOT11_DLS_ACTION_RESP			1	/* DLS Response */
+#define DOT11_DLS_ACTION_TD			2	/* DLS Teardown */
+
+/* Wireless Network Management (WNM) action types */
+#define DOT11_WNM_ACTION_EVENT_REQ		0
+#define DOT11_WNM_ACTION_EVENT_REP		1
+#define DOT11_WNM_ACTION_DIAG_REQ		2
+#define DOT11_WNM_ACTION_DIAG_REP		3
+#define DOT11_WNM_ACTION_LOC_CFG_REQ		4
+#define DOT11_WNM_ACTION_LOC_RFG_RESP		5
+#define DOT11_WNM_ACTION_BSSTRANS_QUERY		6
+#define DOT11_WNM_ACTION_BSSTRANS_REQ		7
+#define DOT11_WNM_ACTION_BSSTRANS_RESP		8
+#define DOT11_WNM_ACTION_FMS_REQ		9
+#define DOT11_WNM_ACTION_FMS_RESP		10
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ	11
+#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP	12
+#define DOT11_WNM_ACTION_TFS_REQ		13
+#define DOT11_WNM_ACTION_TFS_RESP		14
+#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ		15
+#define DOT11_WNM_ACTION_WNM_SLEEP_REQ		16
+#define DOT11_WNM_ACTION_WNM_SLEEP_RESP		17
+#define DOT11_WNM_ACTION_TIMBC_REQ		18
+#define DOT11_WNM_ACTION_TIMBC_RESP		19
+#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD	20
+#define DOT11_WNM_ACTION_CHAN_USAGE_REQ		21
+#define DOT11_WNM_ACTION_CHAN_USAGE_RESP	22
+#define DOT11_WNM_ACTION_DMS_REQ		23
+#define DOT11_WNM_ACTION_DMS_RESP		24
+#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ	25
+#define DOT11_WNM_ACTION_NOTFCTN_REQ		26
+#define DOT11_WNM_ACTION_NOTFCTN_RESP		27
+#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP	28
+
+/* Unprotected Wireless Network Management (WNM) action types */
+#define DOT11_UWNM_ACTION_TIM			0
+#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT	1
+
+#define DOT11_MNG_COUNTRY_ID_LEN 3
+
+/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */
+#define DOT11_VHT_ACTION_CBF				0	/* Compressed Beamforming */
+#define DOT11_VHT_ACTION_GID_MGMT			1	/* Group ID Management */
+#define DOT11_VHT_ACTION_OPER_MODE_NOTIF	2	/* Operating mode notif'n */
+
+/* DLS Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_req {
+	uint8 category;			/* category of action frame (2) */
+	uint8 action;				/* DLS action: req (0) */
+	struct ether_addr	da;		/* destination address */
+	struct ether_addr	sa;		/* source address */
+	uint16 cap;				/* capability */
+	uint16 timeout;			/* timeout value */
+	uint8 data[1];				/* IE:support rate, extend support rate, HT cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_req dot11_dls_req_t;
+#define DOT11_DLS_REQ_LEN 18	/* Fixed length */
+
+/* DLS response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dls_resp {
+	uint8 category;			/* category of action frame (2) */
+	uint8 action;				/* DLS action: req (0) */
+	uint16 status;				/* status code field */
+	struct ether_addr	da;		/* destination address */
+	struct ether_addr	sa;		/* source address */
+	uint8 data[1];				/* optional: capability, rate ... */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dls_resp dot11_dls_resp_t;
+#define DOT11_DLS_RESP_LEN 16	/* Fixed length */
+
+
+/* ************* 802.11v related definitions. ************* */
+
+/* BSS Management Transition Query frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_query (6) */
+	uint8 token;			/* dialog token */
+	uint8 reason;			/* transition query reason */
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_query dot11_bsstrans_query_t;
+#define DOT11_BSSTRANS_QUERY_LEN 4	/* Fixed length */
+
+/* BSS Management Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_req (7) */
+	uint8 token;			/* dialog token */
+	uint8 reqmode;			/* transition request mode */
+	uint16 disassoc_tmr;		/* disassociation timer */
+	uint8 validity_intrvl;		/* validity interval */
+	uint8 data[1];			/* optional: BSS term duration, ... */
+						/* ...session info URL, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_req dot11_bsstrans_req_t;
+#define DOT11_BSSTRANS_REQ_LEN 7	/* Fixed length */
+
+/* BSS Mgmt Transition Request Mode Field - 802.11v */
+#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL		0x01
+#define DOT11_BSSTRANS_REQMODE_ABRIDGED			0x02
+#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT	0x04
+#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL		0x08
+#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT	0x10
+
+/* BSS Management transition response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* WNM action: trans_resp (8) */
+	uint8 token;			/* dialog token */
+	uint8 status;			/* transition status */
+	uint8 term_delay;		/* validity interval */
+	uint8 data[1];			/* optional: BSSID target, candidate list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t;
+#define DOT11_BSSTRANS_RESP_LEN 5	/* Fixed length */
+
+/* BSS Mgmt Transition Response Status Field */
+#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT			0
+#define DOT11_BSSTRANS_RESP_STATUS_REJECT			1
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN		2
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_CAP		3
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_UNDESIRED		4
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_DELAY_REQ		5
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_BSS_LIST_PROVIDED	6
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS		7
+#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS		8
+
+
+/* BSS Max Idle Period element */
+BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie {
+	uint8 id;				/* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */
+	uint8 len;
+	uint16 max_idle_period;			/* in unit of 1000 TUs */
+	uint8 idle_opt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t;
+#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN	3	/* bss max idle period IE size */
+#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED	1	/* BSS max idle option */
+
+/* TIM Broadcast request element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie {
+	uint8 id;				/* 94, DOT11_MNG_TIMBC_REQ_ID */
+	uint8 len;
+	uint8 interval;				/* in unit of beacon interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t;
+#define DOT11_TIMBC_REQ_IE_LEN		1	/* Fixed length */
+
+/* TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* TIM broadcast request element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_req dot11_timbc_req_t;
+#define DOT11_TIMBC_REQ_LEN		3	/* Fixed length */
+
+/* TIM Broadcast response element */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie {
+	uint8 id;				/* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */
+	uint8 len;
+	uint8 status;				/* status of add request */
+	uint8 interval;				/* in unit of beacon interval */
+	int32 offset;				/* in unit of ms */
+	uint16 high_rate;			/* in unit of 0.5 Mb/s */
+	uint16 low_rate;			/* in unit of 0.5 Mb/s */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t;
+#define DOT11_TIMBC_DENY_RESP_IE_LEN	1	/* Deny. Fixed length */
+#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN	10	/* Accept. Fixed length */
+
+#define DOT11_TIMBC_STATUS_ACCEPT		0
+#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP	1
+#define DOT11_TIMBC_STATUS_DENY			2
+#define DOT11_TIMBC_STATUS_OVERRIDDEN		3
+#define DOT11_TIMBC_STATUS_RESERVED		4
+
+/* TIM Broadcast request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp {
+	uint8 category;			/* category of action frame (10) */
+	uint8 action;			/* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */
+	uint8 token;			/* dialog token */
+	uint8 data[1];			/* TIM broadcast response element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc_resp dot11_timbc_resp_t;
+#define DOT11_TIMBC_RESP_LEN	3	/* Fixed length */
+
+/* TIM element */
+BWL_PRE_PACKED_STRUCT struct dot11_tim_ie {
+	uint8 id;			/* 5, DOT11_MNG_TIM_ID	 */
+	uint8 len;			/* 4 - 255 */
+	uint8 dtim_count;		/* DTIM decrementing counter */
+	uint8 dtim_period;		/* DTIM period */
+	uint8 bitmap_control;	/* AID 0 + bitmap offset */
+	uint8 pvb[1];		/* Partial Virtual Bitmap, variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tim_ie dot11_tim_ie_t;
+#define DOT11_TIM_IE_FIXED_LEN	3	/* Fixed length, without id and len */
+#define DOT11_TIM_IE_FIXED_TOTAL_LEN	5	/* Fixed length, with id and len */
+
+/* TIM Broadcast frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_timbc {
+	uint8 category;			/* category of action frame (11) */
+	uint8 action;			/* action: TIM (0) */
+	uint8 check_beacon;		/* need to check-beacon */
+	uint8 tsf[8];			/* Time Synchronization Function */
+	dot11_tim_ie_t tim_ie;		/* TIM element */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timbc dot11_timbc_t;
+#define DOT11_TIMBC_HDR_LEN	(sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t))
+#define DOT11_TIMBC_FIXED_LEN	(sizeof(dot11_timbc_t) - 1)	/* Fixed length */
+#define DOT11_TIMBC_LEN			11	/* Fixed length */
+
+/* TCLAS frame classifier type */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr {
+	uint8 type;
+	uint8 mask;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t;
+#define DOT11_TCLAS_FC_HDR_LEN		2	/* Fixed length */
+
+#define DOT11_TCLAS_MASK_0		0x1
+#define DOT11_TCLAS_MASK_1		0x2
+#define DOT11_TCLAS_MASK_2		0x4
+#define DOT11_TCLAS_MASK_3		0x8
+#define DOT11_TCLAS_MASK_4		0x10
+#define DOT11_TCLAS_MASK_5		0x20
+#define DOT11_TCLAS_MASK_6		0x40
+#define DOT11_TCLAS_MASK_7		0x80
+
+#define DOT11_TCLAS_FC_0_ETH		0
+#define DOT11_TCLAS_FC_1_IP		1
+#define DOT11_TCLAS_FC_2_8021Q		2
+#define DOT11_TCLAS_FC_3_OFFSET		3
+#define DOT11_TCLAS_FC_4_IP_HIGHER	4
+#define DOT11_TCLAS_FC_5_8021D		5
+
+/* TCLAS frame classifier type 0 parameters for Ethernet */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth {
+	uint8 type;
+	uint8 mask;
+	uint8 sa[ETHER_ADDR_LEN];
+	uint8 da[ETHER_ADDR_LEN];
+	uint16 eth_type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t;
+#define DOT11_TCLAS_FC_0_ETH_LEN	16
+
+/* TCLAS frame classifier type 1 parameters for IPV4 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 {
+	uint8 type;
+	uint8 mask;
+	uint8 version;
+	uint32 src_ip;
+	uint32 dst_ip;
+	uint16 src_port;
+	uint16 dst_port;
+	uint8 dscp;
+	uint8 protocol;
+	uint8 reserved;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t;
+#define DOT11_TCLAS_FC_1_IPV4_LEN	18
+
+/* TCLAS frame classifier type 2 parameters for 802.1Q */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q {
+	uint8 type;
+	uint8 mask;
+	uint16 tci;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t;
+#define DOT11_TCLAS_FC_2_8021Q_LEN	4
+
+/* TCLAS frame classifier type 3 parameters for filter offset */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter {
+	uint8 type;
+	uint8 mask;
+	uint16 offset;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t;
+#define DOT11_TCLAS_FC_3_FILTER_LEN	4
+
+/* TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */
+typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t;
+#define DOT11_TCLAS_FC_4_IPV4_LEN	DOT11_TCLAS_FC_1_IPV4_LEN
+
+/* TCLAS frame classifier type 4 parameters for IPV6 */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 {
+	uint8 type;
+	uint8 mask;
+	uint8 version;
+	uint8 saddr[16];
+	uint8 daddr[16];
+	uint16 src_port;
+	uint16 dst_port;
+	uint8 dscp;
+	uint8 nexthdr;
+	uint8 flow_lbl[3];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t;
+#define DOT11_TCLAS_FC_4_IPV6_LEN	44
+
+/* TCLAS frame classifier type 5 parameters for 802.1D */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d {
+	uint8 type;
+	uint8 mask;
+	uint8 pcp;
+	uint8 cfi;
+	uint16 vid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t;
+#define DOT11_TCLAS_FC_5_8021D_LEN	6
+
+/* TCLAS frame classifier type parameters */
+BWL_PRE_PACKED_STRUCT union dot11_tclas_fc {
+	uint8 data[1];
+	dot11_tclas_fc_hdr_t hdr;
+	dot11_tclas_fc_0_eth_t t0_eth;
+	dot11_tclas_fc_1_ipv4_t	t1_ipv4;
+	dot11_tclas_fc_2_8021q_t t2_8021q;
+	dot11_tclas_fc_3_filter_t t3_filter;
+	dot11_tclas_fc_4_ipv4_t	t4_ipv4;
+	dot11_tclas_fc_4_ipv6_t	t4_ipv6;
+	dot11_tclas_fc_5_8021d_t t5_8021d;
+} BWL_POST_PACKED_STRUCT;
+typedef union dot11_tclas_fc dot11_tclas_fc_t;
+
+#define DOT11_TCLAS_FC_MIN_LEN		4	/* Classifier Type 2 has the min size */
+#define DOT11_TCLAS_FC_MAX_LEN		254
+
+/* TCLAS element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie {
+	uint8 id;				/* 14, DOT11_MNG_TCLAS_ID */
+	uint8 len;
+	uint8 user_priority;
+	dot11_tclas_fc_t fc;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_ie dot11_tclas_ie_t;
+#define DOT11_TCLAS_IE_LEN		3	/* Fixed length, include id and len */
+
+/* TCLAS processing element */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie {
+	uint8 id;				/* 44, DOT11_MNG_TCLAS_PROC_ID */
+	uint8 len;
+	uint8 process;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t;
+#define DOT11_TCLAS_PROC_IE_LEN		3	/* Fixed length, include id and len */
+
+#define DOT11_TCLAS_PROC_MATCHALL	0	/* All high level element need to match */
+#define DOT11_TCLAS_PROC_MATCHONE	1	/* One high level element need to match */
+#define DOT11_TCLAS_PROC_NONMATCH	2	/* Non match to any high level element */
+
+
+/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */
+#define DOT11_TSPEC_IE_LEN		57	/* Fixed length */
+
+/* TFS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie {
+	uint8 id;				/* 91, DOT11_MNG_TFS_REQUEST_ID */
+	uint8 len;
+	uint8 tfs_id;
+	uint8 actcode;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t;
+#define DOT11_TFS_REQ_IE_LEN		2	/* Fixed length, without id and len */
+
+/* TFS request action codes (bitfield) */
+#define DOT11_TFS_ACTCODE_DELETE	1
+#define DOT11_TFS_ACTCODE_NOTIFY	2
+
+/* TFS request subelement IDs */
+#define DOT11_TFS_REQ_TFS_SE_ID		1
+#define DOT11_TFS_REQ_VENDOR_SE_ID	221
+
+/* TFS subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 data[1];				/* TCLAS element(s) + optional TCLAS proc */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_se dot11_tfs_se_t;
+
+
+/* TFS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie {
+	uint8 id;				/* 92, DOT11_MNG_TFS_RESPONSE_ID */
+	uint8 len;
+	uint8 tfs_id;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t;
+#define DOT11_TFS_RESP_IE_LEN		1	/* Fixed length, without id and len */
+
+/* TFS response subelement IDs (same subelments, but different IDs than in TFS request */
+#define DOT11_TFS_RESP_TFS_STATUS_SE_ID		1
+#define DOT11_TFS_RESP_TFS_SE_ID		2
+#define DOT11_TFS_RESP_VENDOR_SE_ID		221
+
+/* TFS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se {
+	uint8 sub_id;				/* 92, DOT11_MNG_TFS_RESPONSE_ID */
+	uint8 len;
+	uint8 resp_st;
+	uint8 data[1];				/* Potential dot11_tfs_se_t included */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_status_se dot11_tfs_status_se_t;
+#define DOT11_TFS_STATUS_SE_LEN			1	/* Fixed length, without id and len */
+
+/* Following Definition should be merged to FMS_TFS macro below */
+/* TFS Response status code. Identical to FMS Element status, without N/A  */
+#define DOT11_TFS_STATUS_ACCEPT			0
+#define DOT11_TFS_STATUS_DENY_FORMAT		1
+#define DOT11_TFS_STATUS_DENY_RESOURCE		2
+#define DOT11_TFS_STATUS_DENY_POLICY		4
+#define DOT11_TFS_STATUS_DENY_UNSPECIFIED	5
+#define DOT11_TFS_STATUS_ALTPREF_POLICY		7
+#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP	14
+
+/* FMS Element Status and TFS Response Status Definition */
+#define DOT11_FMS_TFS_STATUS_ACCEPT		0
+#define DOT11_FMS_TFS_STATUS_DENY_FORMAT	1
+#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE	2
+#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI	3
+#define DOT11_FMS_TFS_STATUS_DENY_POLICY	4
+#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED	5
+#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI	6
+#define DOT11_FMS_TFS_STATUS_ALT_POLICY		7
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI	8
+#define DOT11_FMS_TFS_STATUS_ALT_MCRATE		9
+#define DOT11_FMS_TFS_STATUS_TERM_POLICY	10
+#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE	11
+#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO	12
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI	13
+#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP	14
+
+/* TFS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS request (13) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_req dot11_tfs_req_t;
+#define DOT11_TFS_REQ_LEN		3	/* Fixed length */
+
+/* TFS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS request (14) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_resp dot11_tfs_resp_t;
+#define DOT11_TFS_RESP_LEN		3	/* Fixed length */
+
+/* TFS Management Notify frame request header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS notify request (15) */
+	uint8 tfs_id_cnt;			/* TFS IDs count */
+	uint8 tfs_id[1];			/* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t;
+#define DOT11_TFS_NOTIFY_REQ_LEN	3	/* Fixed length */
+
+/* TFS Management Notify frame response header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: TFS notify response (28) */
+	uint8 tfs_id_cnt;			/* TFS IDs count */
+	uint8 tfs_id[1];			/* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t;
+#define DOT11_TFS_NOTIFY_RESP_LEN	3	/* Fixed length */
+
+
+/* WNM-Sleep Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: wnm-sleep request (16) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t;
+#define DOT11_WNM_SLEEP_REQ_LEN		3	/* Fixed length */
+
+/* WNM-Sleep Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: wnm-sleep request (17) */
+	uint8 token;				/* dialog token */
+	uint16 key_len;				/* key data length */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t;
+#define DOT11_WNM_SLEEP_RESP_LEN	5	/* Fixed length */
+
+#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK	0
+#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK	1
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_gtk {
+	uint8 sub_id;
+	uint8 len;
+	uint16 key_info;
+	uint8 key_length;
+	uint8 rsc[8];
+	uint8 key[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN	11	/* without sub_id, len, and key */
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN	43	/* without sub_id and len */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk {
+	uint8 sub_id;
+	uint8 len;
+	uint16 key_id;
+	uint8 pn[6];
+	uint8 key[16];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t;
+#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie {
+	uint8 id;				/* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */
+	uint8 len;
+	uint8 act_type;
+	uint8 resp_status;
+	uint16 interval;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t;
+#define DOT11_WNM_SLEEP_IE_LEN		4	/* Fixed length */
+
+#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER	0
+#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT	1
+
+#define DOT11_WNM_SLEEP_RESP_ACCEPT	0
+#define DOT11_WNM_SLEEP_RESP_UPDATE	1
+#define DOT11_WNM_SLEEP_RESP_DENY	2
+#define DOT11_WNM_SLEEP_RESP_DENY_TEMP	3
+#define DOT11_WNM_SLEEP_RESP_DENY_KEY	4
+#define DOT11_WNM_SLEEP_RESP_DENY_INUSE	5
+#define DOT11_WNM_SLEEP_RESP_LAST	6
+
+/* DMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: dms request (23) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req dot11_dms_req_t;
+#define DOT11_DMS_REQ_LEN		3	/* Fixed length */
+
+/* DMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: dms request (24) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp dot11_dms_resp_t;
+#define DOT11_DMS_RESP_LEN		3	/* Fixed length */
+
+/* DMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie {
+	uint8 id;				/* 99, DOT11_MNG_DMS_REQUEST_ID */
+	uint8 len;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_ie dot11_dms_req_ie_t;
+#define DOT11_DMS_REQ_IE_LEN		2	/* Fixed length */
+
+/* DMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie {
+	uint8 id;				/* 100, DOT11_MNG_DMS_RESPONSE_ID */
+	uint8 len;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t;
+#define DOT11_DMS_RESP_IE_LEN		2	/* Fixed length */
+
+/* DMS request descriptor */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc {
+	uint8 dms_id;
+	uint8 len;
+	uint8 type;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_req_desc dot11_dms_req_desc_t;
+#define DOT11_DMS_REQ_DESC_LEN		3	/* Fixed length */
+
+#define DOT11_DMS_REQ_TYPE_ADD		0
+#define DOT11_DMS_REQ_TYPE_REMOVE	1
+#define DOT11_DMS_REQ_TYPE_CHANGE	2
+
+/* DMS response status */
+BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st {
+	uint8 dms_id;
+	uint8 len;
+	uint8 type;
+	uint16 lsc;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_dms_resp_st dot11_dms_resp_st_t;
+#define DOT11_DMS_RESP_STATUS_LEN	5	/* Fixed length */
+
+#define DOT11_DMS_RESP_TYPE_ACCEPT	0
+#define DOT11_DMS_RESP_TYPE_DENY	1
+#define DOT11_DMS_RESP_TYPE_TERM	2
+
+#define DOT11_DMS_RESP_LSC_UNSUPPORTED	0xFFFF
+
+/* FMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: fms request (9) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req dot11_fms_req_t;
+#define DOT11_FMS_REQ_LEN		3	/* Fixed length */
+
+/* FMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp {
+	uint8 category;				/* category of action frame (10) */
+	uint8 action;				/* WNM action: fms request (10) */
+	uint8 token;				/* dialog token */
+	uint8 data[1];				/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp dot11_fms_resp_t;
+#define DOT11_FMS_RESP_LEN		3	/* Fixed length */
+
+/* FMS Descriptor element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_desc {
+	uint8 id;
+	uint8 len;
+	uint8 num_fms_cnt;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_desc dot11_fms_desc_t;
+#define DOT11_FMS_DESC_LEN		1	/* Fixed length */
+
+#define DOT11_FMS_CNTR_MAX		0x8
+#define DOT11_FMS_CNTR_ID_MASK		0x7
+#define DOT11_FMS_CNTR_ID_SHIFT		0x0
+#define DOT11_FMS_CNTR_COUNT_MASK	0xf1
+#define DOT11_FMS_CNTR_SHIFT		0x3
+
+/* FMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie {
+	uint8 id;
+	uint8 len;
+	uint8 fms_token;			/* token used to identify fms stream set */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req_ie dot11_fms_req_ie_t;
+#define DOT11_FMS_REQ_IE_FIX_LEN		1	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field {
+	uint8 mask;
+	uint8 mcs_idx;
+	uint16 rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rate_id_field dot11_rate_id_field_t;
+#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK	0x7
+#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET	0
+#define DOT11_RATE_ID_FIELD_RATETYPE_MASK	0x18
+#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET	3
+#define DOT11_RATE_ID_FIELD_LEN		sizeof(dot11_rate_id_field_t)
+
+/* FMS request subelements */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 interval;
+	uint8 max_interval;
+	dot11_rate_id_field_t rate;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_se dot11_fms_se_t;
+#define DOT11_FMS_REQ_SE_LEN		6	/* Fixed length */
+
+#define DOT11_FMS_REQ_SE_ID_FMS		1	/* FMS subelement */
+#define DOT11_FMS_REQ_SE_ID_VS		221	/* Vendor Specific subelement */
+
+/* FMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie {
+	uint8 id;
+	uint8 len;
+	uint8 fms_token;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t;
+#define DOT11_FMS_RESP_IE_FIX_LEN		1	/* Fixed length */
+
+/* FMS status subelements */
+#define DOT11_FMS_STATUS_SE_ID_FMS	1	/* FMS Status */
+#define DOT11_FMS_STATUS_SE_ID_TCLAS	2	/* TCLAS Status */
+#define DOT11_FMS_STATUS_SE_ID_VS	221	/* Vendor Specific subelement */
+
+/* FMS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 status;
+	uint8 interval;
+	uint8 max_interval;
+	uint8 fmsid;
+	uint8 counter;
+	dot11_rate_id_field_t rate;
+	uint8 mcast_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_status_se dot11_fms_status_se_t;
+#define DOT11_FMS_STATUS_SE_LEN		15	/* Fixed length */
+
+/* TCLAS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 fmsid;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_status_se dot11_tclas_status_se_t;
+#define DOT11_TCLAS_STATUS_SE_LEN		1	/* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba req */
+	uint8 token;				/* identifier */
+	uint16 addba_param_set;		/* parameter set */
+	uint16 timeout;				/* timeout in seconds */
+	uint16 start_seqnum;		/* starting sequence number */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_req dot11_addba_req_t;
+#define DOT11_ADDBA_REQ_LEN		9	/* length of addba req frame */
+
+BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba resp */
+	uint8 token;				/* identifier */
+	uint16 status;				/* status of add request */
+	uint16 addba_param_set;			/* negotiated parameter set */
+	uint16 timeout;				/* negotiated timeout in seconds */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_addba_resp dot11_addba_resp_t;
+#define DOT11_ADDBA_RESP_LEN		9	/* length of addba resp frame */
+
+/* DELBA action parameters */
+#define DOT11_DELBA_PARAM_INIT_MASK	0x0800	/* initiator mask */
+#define DOT11_DELBA_PARAM_INIT_SHIFT	11	/* initiator shift */
+#define DOT11_DELBA_PARAM_TID_MASK	0xf000	/* tid mask */
+#define DOT11_DELBA_PARAM_TID_SHIFT	12	/* tid shift */
+
+BWL_PRE_PACKED_STRUCT struct dot11_delba {
+	uint8 category;				/* category of action frame (3) */
+	uint8 action;				/* action: addba req */
+	uint16 delba_param_set;			/* paarmeter set */
+	uint16 reason;				/* reason for dellba */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_delba dot11_delba_t;
+#define DOT11_DELBA_LEN			6	/* length of delba frame */
+
+/* SA Query action field value */
+#define SA_QUERY_REQUEST		0
+#define SA_QUERY_RESPONSE		1
+
+/* ************* 802.11r related definitions. ************* */
+
+/* Over-the-DS Fast Transition Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_req {
+	uint8 category;			/* category of action frame (6) */
+	uint8 action;			/* action: ft req */
+	uint8 sta_addr[ETHER_ADDR_LEN];
+	uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_req dot11_ft_req_t;
+#define DOT11_FT_REQ_FIXED_LEN 14
+
+/* Over-the-DS Fast Transition Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_res {
+	uint8 category;			/* category of action frame (6) */
+	uint8 action;			/* action: ft resp */
+	uint8 sta_addr[ETHER_ADDR_LEN];
+	uint8 tgt_ap_addr[ETHER_ADDR_LEN];
+	uint16 status;			/* status code */
+	uint8 data[1];			/* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_res dot11_ft_res_t;
+#define DOT11_FT_RES_FIXED_LEN 16
+
+/* RDE RIC Data Element. */
+BWL_PRE_PACKED_STRUCT struct dot11_rde_ie {
+	uint8 id;			/* 11r, DOT11_MNG_RDE_ID */
+	uint8 length;
+	uint8 rde_id;			/* RDE identifier. */
+	uint8 rd_count;			/* Resource Descriptor Count. */
+	uint16 status;			/* Status Code. */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rde_ie dot11_rde_ie_t;
+
+/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */
+#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t)
+
+
+/* ************* 802.11k related definitions. ************* */
+
+/* Radio measurements enabled capability ie */
+#define DOT11_RRM_CAP_LEN		5	/* length of rrm cap bitmap */
+#define RCPI_IE_LEN 1
+#define RSNI_IE_LEN 1
+BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
+	uint8 cap[DOT11_RRM_CAP_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
+
+/* Bitmap definitions for cap ie */
+#define DOT11_RRM_CAP_LINK		0
+#define DOT11_RRM_CAP_NEIGHBOR_REPORT	1
+#define DOT11_RRM_CAP_PARALLEL		2
+#define DOT11_RRM_CAP_REPEATED		3
+#define DOT11_RRM_CAP_BCN_PASSIVE	4
+#define DOT11_RRM_CAP_BCN_ACTIVE	5
+#define DOT11_RRM_CAP_BCN_TABLE		6
+#define DOT11_RRM_CAP_BCN_REP_COND	7
+#define DOT11_RRM_CAP_FM		8
+#define DOT11_RRM_CAP_CLM		9
+#define DOT11_RRM_CAP_NHM		10
+#define DOT11_RRM_CAP_SM		11
+#define DOT11_RRM_CAP_LCIM		12
+#define DOT11_RRM_CAP_LCIA		13
+#define DOT11_RRM_CAP_TSCM		14
+#define DOT11_RRM_CAP_TTSCM		15
+#define DOT11_RRM_CAP_AP_CHANREP	16
+#define DOT11_RRM_CAP_RMMIB		17
+/* bit18-bit26, not used for RRM_IOVAR */
+#define DOT11_RRM_CAP_MPTI		27
+#define DOT11_RRM_CAP_NBRTSFO		28
+#define DOT11_RRM_CAP_RCPI		29
+#define DOT11_RRM_CAP_RSNI		30
+#define DOT11_RRM_CAP_BSSAAD		31
+#define DOT11_RRM_CAP_BSSAAC		32
+#define DOT11_RRM_CAP_AI		33
+
+/* Operating Class (formerly "Regulatory Class") definitions */
+#define DOT11_OP_CLASS_NONE			255
+
+BWL_PRE_PACKED_STRUCT struct do11_ap_chrep {
+	uint8 id;
+	uint8 len;
+	uint8 reg;
+	uint8 chanlist[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct do11_ap_chrep dot11_ap_chrep_t;
+
+/* Radio Measurements action ids */
+#define DOT11_RM_ACTION_RM_REQ		0	/* Radio measurement request */
+#define DOT11_RM_ACTION_RM_REP		1	/* Radio measurement report */
+#define DOT11_RM_ACTION_LM_REQ		2	/* Link measurement request */
+#define DOT11_RM_ACTION_LM_REP		3	/* Link measurement report */
+#define DOT11_RM_ACTION_NR_REQ		4	/* Neighbor report request */
+#define DOT11_RM_ACTION_NR_REP		5	/* Neighbor report response */
+
+/* Generic radio measurement action frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_action dot11_rm_action_t;
+#define DOT11_RM_ACTION_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint16 reps;				/* no. of repetitions */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq dot11_rmreq_t;
+#define DOT11_RMREQ_LEN	5
+
+BWL_PRE_PACKED_STRUCT struct dot11_rm_ie {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rm_ie dot11_rm_ie_t;
+#define DOT11_RM_IE_LEN	5
+
+/* Definitions for "mode" bits in rm req */
+#define DOT11_RMREQ_MODE_PARALLEL	1
+#define DOT11_RMREQ_MODE_ENABLE		2
+#define DOT11_RMREQ_MODE_REQUEST	4
+#define DOT11_RMREQ_MODE_REPORT		8
+#define DOT11_RMREQ_MODE_DURMAND	0x10	/* Duration Mandatory */
+
+/* Definitions for "mode" bits in rm rep */
+#define DOT11_RMREP_MODE_LATE		1
+#define DOT11_RMREP_MODE_INCAPABLE	2
+#define DOT11_RMREP_MODE_REFUSED	4
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+	uint8 bcn_mode;
+	struct ether_addr	bssid;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
+#define DOT11_RMREQ_BCN_LEN	18
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 frame_info;
+	uint8 rcpi;
+	uint8 rsni;
+	struct ether_addr	bssid;
+	uint8 antenna_id;
+	uint32 parent_tsf;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
+#define DOT11_RMREP_BCN_LEN	26
+
+/* Beacon request measurement mode */
+#define DOT11_RMREQ_BCN_PASSIVE	0
+#define DOT11_RMREQ_BCN_ACTIVE	1
+#define DOT11_RMREQ_BCN_TABLE	2
+
+/* Sub-element IDs for Beacon Request */
+#define DOT11_RMREQ_BCN_SSID_ID 0
+#define DOT11_RMREQ_BCN_REPINFO_ID  1
+#define DOT11_RMREQ_BCN_REPDET_ID   2
+#define DOT11_RMREQ_BCN_REQUEST_ID  10
+#define DOT11_RMREQ_BCN_APCHREP_ID  DOT11_MNG_AP_CHREP_ID
+
+/* Reporting Detail element definition */
+#define DOT11_RMREQ_BCN_REPDET_FIXED	0	/* Fixed length fields only */
+#define DOT11_RMREQ_BCN_REPDET_REQUEST	1	/* + requested information elems */
+#define DOT11_RMREQ_BCN_REPDET_ALL	2	/* All fields */
+
+/* Sub-element IDs for Beacon Report */
+#define DOT11_RMREP_BCN_FRM_BODY	1
+
+/* Sub-element IDs for Frame Report */
+#define DOT11_RMREP_FRAME_COUNT_REPORT 1
+
+/* Channel load request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t;
+#define DOT11_RMREQ_CHANLOAD_LEN	11
+
+/* Channel load report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 channel_load;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t;
+#define DOT11_RMREP_CHANLOAD_LEN	13
+
+/* Noise histogram request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_noise dot11_rmreq_noise_t;
+#define DOT11_RMREQ_NOISE_LEN 11
+
+/* Noise histogram report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+	uint8 antid;
+	uint8 anpi;
+	uint8 ipi0_dens;
+	uint8 ipi1_dens;
+	uint8 ipi2_dens;
+	uint8 ipi3_dens;
+	uint8 ipi4_dens;
+	uint8 ipi5_dens;
+	uint8 ipi6_dens;
+	uint8 ipi7_dens;
+	uint8 ipi8_dens;
+	uint8 ipi9_dens;
+	uint8 ipi10_dens;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_noise dot11_rmrep_noise_t;
+#define DOT11_RMREP_NOISE_LEN 25
+
+/* Frame request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint8 reg;
+	uint8 channel;
+	uint16 interval;
+	uint16 duration;
+	uint8 req_type;
+	struct ether_addr	ta;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_frame dot11_rmreq_frame_t;
+#define DOT11_RMREQ_FRAME_LEN 18
+
+/* Frame report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame {
+	uint8 reg;
+	uint8 channel;
+	uint32 starttime[2];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frame dot11_rmrep_frame_t;
+#define DOT11_RMREP_FRAME_LEN 12
+
+/* Frame report entry */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry {
+	struct ether_addr	ta;
+	struct ether_addr	bssid;
+	uint8 phy_type;
+	uint8 avg_rcpi;
+	uint8 last_rsni;
+	uint8 last_rcpi;
+	uint8 ant_id;
+	uint16 frame_cnt;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t;
+#define DOT11_RMREP_FRMENTRY_LEN 19
+
+/* STA statistics request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	struct ether_addr	peer;
+	uint16 interval;
+	uint16 duration;
+	uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_stat dot11_rmreq_stat_t;
+#define DOT11_RMREQ_STAT_LEN 16
+
+/* STA statistics report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat {
+	uint16 duration;
+	uint8 group_id;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_stat dot11_rmrep_stat_t;
+
+/* Transmit stream/category measurement request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint16 interval;
+	uint16 duration;
+	struct ether_addr	peer;
+	uint8 traffic_id;
+	uint8 bin0_range;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t;
+
+/* Transmit stream/category measurement report */
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream {
+	uint32 starttime[2];
+	uint16 duration;
+	struct ether_addr	peer;
+	uint8 traffic_id;
+	uint8 reason;
+	uint32 txmsdu_cnt;
+	uint32 msdu_discarded_cnt;
+	uint32 msdufailed_cnt;
+	uint32 msduretry_cnt;
+	uint32 cfpolls_lost_cnt;
+	uint32 avrqueue_delay;
+	uint32 avrtx_delay;
+	uint8 bin0_range;
+	uint32 bin0;
+	uint32 bin1;
+	uint32 bin2;
+	uint32 bin3;
+	uint32 bin4;
+	uint32 bin5;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t;
+
+/* Measurement pause request */
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time {
+	uint8 id;
+	uint8 len;
+	uint8 token;
+	uint8 mode;
+	uint8 type;
+	uint16 pause_time;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t;
+
+
+/* Neighbor Report subelements ID (11k & 11v) */
+#define DOT11_NGBR_TSF_INFO_SE_ID	1
+#define DOT11_NGBR_CCS_SE_ID		2
+#define DOT11_NGBR_BSSTRANS_PREF_SE_ID	3
+#define DOT11_NGBR_BSS_TERM_DUR_SE_ID	4
+#define DOT11_NGBR_BEARING_SE_ID	5
+
+/* Neighbor Report, BSS Transition Candidate Preference subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 preference;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t;
+#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN	1
+
+/* Neighbor Report, BSS Termination Duration subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se {
+	uint8 sub_id;
+	uint8 len;
+	uint8 tsf[8];
+	uint16 duration;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t;
+#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN	10
+
+/* Neighbor Report BSSID Information Field */
+#define DOT11_NGBR_BI_REACHABILTY_UNKN	0x0002
+#define DOT11_NGBR_BI_REACHABILTY	0x0003
+#define DOT11_NGBR_BI_SEC		0x0004
+#define DOT11_NGBR_BI_KEY_SCOPE		0x0008
+#define DOT11_NGBR_BI_CAP		0x03f0
+#define DOT11_NGBR_BI_CAP_SPEC_MGMT	0x0010
+#define DOT11_NGBR_BI_CAP_QOS		0x0020
+#define DOT11_NGBR_BI_CAP_APSD		0x0040
+#define DOT11_NGBR_BI_CAP_RDIO_MSMT	0x0080
+#define DOT11_NGBR_BI_CAP_DEL_BA	0x0100
+#define DOT11_NGBR_BI_CAP_IMM_BA	0x0200
+#define DOT11_NGBR_BI_MOBILITY		0x0400
+#define DOT11_NGBR_BI_HT		0x0800
+
+/* Neighbor Report element (11k & 11v) */
+BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie {
+	uint8 id;
+	uint8 len;
+	struct ether_addr bssid;
+	uint32 bssid_info;
+	uint8 reg;		/* Operating class */
+	uint8 channel;
+	uint8 phytype;
+	uint8 data[1]; 		/* Variable size subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t;
+#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN	13
+
+
+/* MLME Enumerations */
+#define DOT11_BSSTYPE_INFRASTRUCTURE		0	/* d11 infrastructure */
+#define DOT11_BSSTYPE_INDEPENDENT		1	/* d11 independent */
+#define DOT11_BSSTYPE_ANY			2	/* d11 any BSS type */
+#define DOT11_SCANTYPE_ACTIVE			0	/* d11 scan active */
+#define DOT11_SCANTYPE_PASSIVE			1	/* d11 scan passive */
+
+/* Link Measurement */
+BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	uint8 txpwr;				/* Transmit Power Used */
+	uint8 maxtxpwr;				/* Max Transmit Power */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmreq dot11_lmreq_t;
+#define DOT11_LMREQ_LEN	5
+
+BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
+	uint8 category;				/* category of action frame (5) */
+	uint8 action;				/* radio measurement action */
+	uint8 token;				/* dialog token */
+	dot11_tpc_rep_t tpc;			/* TPC element */
+	uint8 rxant;				/* Receive Antenna ID */
+	uint8 txant;				/* Transmit Antenna ID */
+	uint8 rcpi;				/* RCPI */
+	uint8 rsni;				/* RSNI */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lmrep dot11_lmrep_t;
+#define DOT11_LMREP_LEN	11
+
+/* 802.11 BRCM "Compromise" Pre N constants */
+#define PREN_PREAMBLE		24	/* green field preamble time */
+#define PREN_MM_EXT		12	/* extra mixed mode preamble time */
+#define PREN_PREAMBLE_EXT	4	/* extra preamble (multiply by unique_streams-1) */
+
+/* 802.11N PHY constants */
+#define RIFS_11N_TIME		2	/* NPHY RIFS time */
+
+/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3
+ * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2
+ */
+/* HT-SIG1 */
+#define HT_SIG1_MCS_MASK        0x00007F
+#define HT_SIG1_CBW             0x000080
+#define HT_SIG1_HT_LENGTH       0xFFFF00
+
+/* HT-SIG2 */
+#define HT_SIG2_SMOOTHING       0x000001
+#define HT_SIG2_NOT_SOUNDING    0x000002
+#define HT_SIG2_RESERVED        0x000004
+#define HT_SIG2_AGGREGATION     0x000008
+#define HT_SIG2_STBC_MASK       0x000030
+#define HT_SIG2_STBC_SHIFT      4
+#define HT_SIG2_FEC_CODING      0x000040
+#define HT_SIG2_SHORT_GI        0x000080
+#define HT_SIG2_ESS_MASK        0x000300
+#define HT_SIG2_ESS_SHIFT       8
+#define HT_SIG2_CRC             0x03FC00
+#define HT_SIG2_TAIL            0x1C0000
+
+/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */
+#define HT_T_LEG_PREAMBLE      16
+#define HT_T_L_SIG              4
+#define HT_T_SIG                8
+#define HT_T_LTF1               4
+#define HT_T_GF_LTF1            8
+#define HT_T_LTFs               4
+#define HT_T_STF                4
+#define HT_T_GF_STF             8
+#define HT_T_SYML               4
+
+#define HT_N_SERVICE           16       /* bits in SERVICE field */
+#define HT_N_TAIL               6       /* tail bits per BCC encoder */
+
+/* 802.11 A PHY constants */
+#define APHY_SLOT_TIME          9       /* APHY slot time */
+#define APHY_SIFS_TIME          16      /* APHY SIFS time */
+#define APHY_DIFS_TIME          (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))  /* APHY DIFS time */
+#define APHY_PREAMBLE_TIME      16      /* APHY preamble time */
+#define APHY_SIGNAL_TIME        4       /* APHY signal time */
+#define APHY_SYMBOL_TIME        4       /* APHY symbol time */
+#define APHY_SERVICE_NBITS      16      /* APHY service nbits */
+#define APHY_TAIL_NBITS         6       /* APHY tail nbits */
+#define APHY_CWMIN              15      /* APHY cwmin */
+
+/* 802.11 B PHY constants */
+#define BPHY_SLOT_TIME          20      /* BPHY slot time */
+#define BPHY_SIFS_TIME          10      /* BPHY SIFS time */
+#define BPHY_DIFS_TIME          50      /* BPHY DIFS time */
+#define BPHY_PLCP_TIME          192     /* BPHY PLCP time */
+#define BPHY_PLCP_SHORT_TIME    96      /* BPHY PLCP short time */
+#define BPHY_CWMIN              31      /* BPHY cwmin */
+
+/* 802.11 G constants */
+#define DOT11_OFDM_SIGNAL_EXTENSION	6	/* d11 OFDM signal extension */
+
+#define PHY_CWMAX		1023	/* PHY cwmax */
+
+#define	DOT11_MAXNUMFRAGS	16	/* max # fragments per MSDU */
+
+/* 802.11 VHT constants */
+
+typedef int vht_group_id_t;
+
+/* for VHT-A1 */
+/* SIG-A1 reserved bits */
+#define VHT_SIGA1_CONST_MASK            0x800004
+
+#define VHT_SIGA1_BW_MASK               0x000003
+#define VHT_SIGA1_20MHZ_VAL             0x000000
+#define VHT_SIGA1_40MHZ_VAL             0x000001
+#define VHT_SIGA1_80MHZ_VAL             0x000002
+#define VHT_SIGA1_160MHZ_VAL            0x000003
+
+#define VHT_SIGA1_STBC                  0x000008
+
+#define VHT_SIGA1_GID_MASK              0x0003f0
+#define VHT_SIGA1_GID_SHIFT             4
+#define VHT_SIGA1_GID_TO_AP             0x00
+#define VHT_SIGA1_GID_NOT_TO_AP         0x3f
+#define VHT_SIGA1_GID_MAX_GID           0x3f
+
+#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00
+#define VHT_SIGA1_NSTS_SHIFT            10
+
+#define VHT_SIGA1_PARTIAL_AID_MASK      0x3fe000
+#define VHT_SIGA1_PARTIAL_AID_SHIFT     13
+
+#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED   0x400000
+
+/* for VHT-A2 */
+#define VHT_SIGA2_GI_NONE               0x000000
+#define VHT_SIGA2_GI_SHORT              0x000001
+#define VHT_SIGA2_GI_W_MOD10            0x000002
+#define VHT_SIGA2_CODING_LDPC           0x000004
+#define VHT_SIGA2_LDPC_EXTRA_OFDM_SYM   0x000008
+#define VHT_SIGA2_BEAMFORM_ENABLE       0x000100
+#define VHT_SIGA2_MCS_SHIFT             4
+
+#define VHT_SIGA2_B9_RESERVED           0x000200
+#define VHT_SIGA2_TAIL_MASK             0xfc0000
+#define VHT_SIGA2_TAIL_VALUE            0x000000
+
+/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */
+#define VHT_T_LEG_PREAMBLE      16
+#define VHT_T_L_SIG              4
+#define VHT_T_SIG_A              8
+#define VHT_T_LTF                4
+#define VHT_T_STF                4
+#define VHT_T_SIG_B              4
+#define VHT_T_SYML               4
+
+#define VHT_N_SERVICE           16	/* bits in SERVICE field */
+#define VHT_N_TAIL               6	/* tail bits per BCC encoder */
+
+
+/* dot11Counters Table - 802.11 spec., Annex D */
+typedef struct d11cnt {
+	uint32		txfrag;		/* dot11TransmittedFragmentCount */
+	uint32		txmulti;	/* dot11MulticastTransmittedFrameCount */
+	uint32		txfail;		/* dot11FailedCount */
+	uint32		txretry;	/* dot11RetryCount */
+	uint32		txretrie;	/* dot11MultipleRetryCount */
+	uint32		rxdup;		/* dot11FrameduplicateCount */
+	uint32		txrts;		/* dot11RTSSuccessCount */
+	uint32		txnocts;	/* dot11RTSFailureCount */
+	uint32		txnoack;	/* dot11ACKFailureCount */
+	uint32		rxfrag;		/* dot11ReceivedFragmentCount */
+	uint32		rxmulti;	/* dot11MulticastReceivedFrameCount */
+	uint32		rxcrc;		/* dot11FCSErrorCount */
+	uint32		txfrmsnt;	/* dot11TransmittedFrameCount */
+	uint32		rxundec;	/* dot11WEPUndecryptableCount */
+} d11cnt_t;
+
+/* OUI for BRCM proprietary IE */
+#define BRCM_PROP_OUI		"\x00\x90\x4C"
+
+
+/* Action frame type for RWL */
+#define RWL_WIFI_DEFAULT		0
+#define RWL_WIFI_FIND_MY_PEER		9 /* Used while finding server */
+#define RWL_WIFI_FOUND_PEER		10 /* Server response to the client  */
+#define RWL_ACTION_WIFI_FRAG_TYPE	85 /* Fragment indicator for receiver */
+
+#define PROXD_AF_TYPE			11 /* Wifi proximity action frame type */
+#define BRCM_RELMACST_AF_TYPE	        12 /* RMC action frame type */
+
+
+/* brcm syscap_ie cap */
+#define BRCM_SYSCAP_WET_TUNNEL	0x0100	/* Device with WET_TUNNEL support */
+
+/* BRCM OUI: Used in the proprietary(221) IE in all broadcom devices */
+#define BRCM_OUI		"\x00\x10\x18"	/* Broadcom OUI */
+
+/* BRCM info element */
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];		/* Proprietary OUI, BRCM_OUI */
+	uint8	ver;		/* type/ver of this IE */
+	uint8	assoc;		/* # of assoc STAs */
+	uint8	flags;		/* misc flags */
+	uint8	flags1;		/* misc flags */
+	uint16	amsdu_mtu_pref;	/* preferred A-MSDU MTU */
+} BWL_POST_PACKED_STRUCT;
+typedef	struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN		11	/* BRCM IE length */
+#define BRCM_IE_VER		2	/* BRCM IE version */
+#define BRCM_IE_LEGACY_AES_VER	1	/* BRCM IE legacy AES version */
+
+/* brcm_ie flags */
+#define	BRF_LZWDS		0x4	/* lazy wds enabled */
+#define	BRF_BLOCKACK		0x8	/* BlockACK capable */
+
+/* brcm_ie flags1 */
+#define	BRF1_AMSDU		0x1	/* A-MSDU capable */
+#define BRF1_WMEPS		0x4	/* AP is capable of handling WME + PS w/o APSD */
+#define BRF1_PSOFIX		0x8	/* AP has fixed PS mode out-of-order packets */
+#define	BRF1_RX_LARGE_AGG	0x10	/* device can rx large aggregates */
+#define BRF1_RFAWARE_DCS	0x20    /* RFAWARE dynamic channel selection (DCS) */
+#define BRF1_SOFTAP		0x40    /* Configure as Broadcom SOFTAP */
+#define BRF1_DWDS		0x80    /* DWDS capable */
+
+/* Vendor IE structure */
+BWL_PRE_PACKED_STRUCT struct vndr_ie {
+	uchar id;
+	uchar len;
+	uchar oui [3];
+	uchar data [1]; 	/* Variable size data */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vndr_ie vndr_ie_t;
+
+#define VNDR_IE_HDR_LEN		2	/* id + len field */
+#define VNDR_IE_MIN_LEN		3	/* size of the oui field */
+#define VNDR_IE_FIXED_LEN	(VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN)
+
+#define VNDR_IE_MAX_LEN		255	/* vendor IE max length, without ID and len */
+
+/* BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */
+BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie {
+	uchar id;
+	uchar len;
+	uchar oui[3];
+	uint8	type;           /* type inidicates what follows */
+	struct ether_addr ea;   /* Device Primary MAC Adrress */
+} BWL_POST_PACKED_STRUCT;
+typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t;
+
+#define MEMBER_OF_BRCM_PROP_IE_LEN		10	/* IE max length */
+#define MEMBER_OF_BRCM_PROP_IE_HDRLEN	        (sizeof(member_of_brcm_prop_ie_t))
+#define MEMBER_OF_BRCM_PROP_IE_TYPE		54
+
+/* BRCM Reliable Multicast IE */
+BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie {
+	uint8 id;
+	uint8 len;
+	uint8 oui[3];
+	uint8 type;           /* type inidicates what follows */
+	struct ether_addr ea;   /* The ack sender's MAC Adrress */
+	struct ether_addr mcast_ea;  /* The multicast MAC address */
+	uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */
+} BWL_POST_PACKED_STRUCT;
+typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t;
+
+/* IE length */
+/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */
+#define RELMCAST_BRCM_PROP_IE_LEN	(sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8)))
+
+#define RELMCAST_BRCM_PROP_IE_TYPE	55
+
+/* ************* HT definitions. ************* */
+#define MCSSET_LEN	16	/* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */
+#define MAX_MCS_NUM	(128)	/* max mcs number = 128 */
+
+BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
+	uint16	cap;
+	uint8	params;
+	uint8	supp_mcs[MCSSET_LEN];
+	uint16	ext_htcap;
+	uint32	txbf_cap;
+	uint8	as_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_cap_ie ht_cap_ie_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ht_cap_ie {
+	uint8	id;
+	uint8	len;
+	ht_cap_ie_t ht_cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t;
+
+/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the capability IE is primarily used to convey this nodes abilities */
+BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];		/* Proprietary OUI, BRCM_PROP_OUI */
+	uint8	type;           /* type inidicates what follows */
+	ht_cap_ie_t cap_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
+
+#define HT_PROP_IE_OVERHEAD	4	/* overhead bytes for prop oui ie */
+#define HT_CAP_IE_LEN		26	/* HT capability len (based on .11n d2.0) */
+#define HT_CAP_IE_TYPE		51
+
+#define HT_CAP_LDPC_CODING	0x0001	/* Support for rx of LDPC coded pkts */
+#define HT_CAP_40MHZ		0x0002  /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define HT_CAP_MIMO_PS_MASK	0x000C  /* Mimo PS mask */
+#define HT_CAP_MIMO_PS_SHIFT	0x0002	/* Mimo PS shift */
+#define HT_CAP_MIMO_PS_OFF	0x0003	/* Mimo PS, no restriction */
+#define HT_CAP_MIMO_PS_RTS	0x0001	/* Mimo PS, send RTS/CTS around MIMO frames */
+#define HT_CAP_MIMO_PS_ON	0x0000	/* Mimo PS, MIMO disallowed */
+#define HT_CAP_GF		0x0010	/* Greenfield preamble support */
+#define HT_CAP_SHORT_GI_20	0x0020	/* 20MHZ short guard interval support */
+#define HT_CAP_SHORT_GI_40	0x0040	/* 40Mhz short guard interval support */
+#define HT_CAP_TX_STBC		0x0080	/* Tx STBC support */
+#define HT_CAP_RX_STBC_MASK	0x0300	/* Rx STBC mask */
+#define HT_CAP_RX_STBC_SHIFT	8	/* Rx STBC shift */
+#define HT_CAP_DELAYED_BA	0x0400	/* delayed BA support */
+#define HT_CAP_MAX_AMSDU	0x0800	/* Max AMSDU size in bytes , 0=3839, 1=7935 */
+
+#define HT_CAP_DSSS_CCK	0x1000	/* DSSS/CCK supported by the BSS */
+#define HT_CAP_PSMP		0x2000	/* Power Save Multi Poll support */
+#define HT_CAP_40MHZ_INTOLERANT 0x4000	/* 40MHz Intolerant */
+#define HT_CAP_LSIG_TXOP	0x8000	/* L-SIG TXOP protection support */
+
+#define HT_CAP_RX_STBC_NO		0x0	/* no rx STBC support */
+#define HT_CAP_RX_STBC_ONE_STREAM	0x1	/* rx STBC support of 1 spatial stream */
+#define HT_CAP_RX_STBC_TWO_STREAM	0x2	/* rx STBC support of 1-2 spatial streams */
+#define HT_CAP_RX_STBC_THREE_STREAM	0x3	/* rx STBC support of 1-3 spatial streams */
+
+
+#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX	0x1
+#define HT_CAP_TXBF_CAP_NDP_TX			0x8
+#define HT_CAP_TXBF_CAP_NDP_RX			0x10
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI		0x100
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING	0x200
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING	0x400
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_MASK	0x1800
+#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_SHIFT	11
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_MASK	0x6000
+#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_SHIFT	13
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_MASK	0x18000
+#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_SHIFT	15
+#define HT_CAP_TXBF_CAP_CSI_BFR_ANT_SHIFT	19
+#define HT_CAP_TXBF_CAP_NC_BFR_ANT_SHIFT	21
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_SHIFT		23
+#define HT_CAP_TXBF_CAP_C_BFR_ANT_MASK		0x1800000
+
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_SHIFT	27
+#define HT_CAP_TXBF_CAP_CHAN_ESTIM_MASK		0x18000000
+
+#define HT_CAP_TXBF_FB_TYPE_NONE 	0
+#define HT_CAP_TXBF_FB_TYPE_DELAYED 	1
+#define HT_CAP_TXBF_FB_TYPE_IMMEDIATE 	2
+#define HT_CAP_TXBF_FB_TYPE_BOTH 	3
+
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_MASK	0x400
+#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_SHIFT	10
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000
+#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15
+
+#define VHT_MAX_MPDU		11454	/* max mpdu size for now (bytes) */
+#define VHT_MPDU_MSDU_DELTA	56		/* Difference in spec - vht mpdu, amsdu len */
+/* Max AMSDU len - per spec */
+#define VHT_MAX_AMSDU		(VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA)
+
+#define HT_MAX_AMSDU		7935	/* max amsdu size (bytes) per the HT spec */
+#define HT_MIN_AMSDU		3835	/* min amsdu size (bytes) per the HT spec */
+
+#define HT_PARAMS_RX_FACTOR_MASK	0x03	/* ampdu rcv factor mask */
+#define HT_PARAMS_DENSITY_MASK		0x1C	/* ampdu density mask */
+#define HT_PARAMS_DENSITY_SHIFT	2	/* ampdu density shift */
+
+/* HT/AMPDU specific define */
+#define AMPDU_MAX_MPDU_DENSITY  7       /* max mpdu density; in 1/4 usec units */
+#define AMPDU_DENSITY_NONE      0       /* No density requirement */
+#define AMPDU_DENSITY_1over4_US 1       /* 1/4 us density */
+#define AMPDU_DENSITY_1over2_US 2       /* 1/2 us density */
+#define AMPDU_DENSITY_1_US      3       /*   1 us density */
+#define AMPDU_DENSITY_2_US      4       /*   2 us density */
+#define AMPDU_DENSITY_4_US      5       /*   4 us density */
+#define AMPDU_DENSITY_8_US      6       /*   8 us density */
+#define AMPDU_DENSITY_16_US     7       /*  16 us density */
+#define AMPDU_RX_FACTOR_8K      0       /* max rcv ampdu len (8kb) */
+#define AMPDU_RX_FACTOR_16K     1       /* max rcv ampdu len (16kb) */
+#define AMPDU_RX_FACTOR_32K     2       /* max rcv ampdu len (32kb) */
+#define AMPDU_RX_FACTOR_64K     3       /* max rcv ampdu len (64kb) */
+
+/* AMPDU RX factors for VHT rates */
+#define AMPDU_RX_FACTOR_128K    4       /* max rcv ampdu len (128kb) */
+#define AMPDU_RX_FACTOR_256K    5       /* max rcv ampdu len (256kb) */
+#define AMPDU_RX_FACTOR_512K    6       /* max rcv ampdu len (512kb) */
+#define AMPDU_RX_FACTOR_1024K   7       /* max rcv ampdu len (1024kb) */
+
+#define AMPDU_RX_FACTOR_BASE    8*1024  /* ampdu factor base for rx len */
+#define AMPDU_RX_FACTOR_BASE_PWR	13	/* ampdu factor base for rx len in power of 2 */
+
+#define AMPDU_DELIMITER_LEN	4	/* length of ampdu delimiter */
+#define AMPDU_DELIMITER_LEN_MAX	63	/* max length of ampdu delimiter(enforced in HW) */
+
+#define HT_CAP_EXT_PCO			0x0001
+#define HT_CAP_EXT_PCO_TTIME_MASK	0x0006
+#define HT_CAP_EXT_PCO_TTIME_SHIFT	1
+#define HT_CAP_EXT_MCS_FEEDBACK_MASK	0x0300
+#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT	8
+#define HT_CAP_EXT_HTC			0x0400
+#define HT_CAP_EXT_RD_RESP		0x0800
+
+BWL_PRE_PACKED_STRUCT struct ht_add_ie {
+	uint8	ctl_ch;			/* control channel number */
+	uint8	byte1;			/* ext ch,rec. ch. width, RIFS support */
+	uint16	opmode;			/* operation mode */
+	uint16	misc_bits;		/* misc bits */
+	uint8	basic_mcs[MCSSET_LEN];  /* required MCS set */
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_add_ie ht_add_ie_t;
+
+/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the additional IE is primarily used to convey the current BSS configuration */
+BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
+	uint8	id;		/* IE ID, 221, DOT11_MNG_PROPR_ID */
+	uint8	len;		/* IE length */
+	uint8	oui[3];		/* Proprietary OUI, BRCM_PROP_OUI */
+	uint8	type;		/* indicates what follows */
+	ht_add_ie_t add_ie;
+} BWL_POST_PACKED_STRUCT;
+typedef struct ht_prop_add_ie ht_prop_add_ie_t;
+
+#define HT_ADD_IE_LEN	22
+#define HT_ADD_IE_TYPE	52
+
+/* byte1 defn's */
+#define HT_BW_ANY		0x04	/* set, STA can use 20 or 40MHz */
+#define HT_RIFS_PERMITTED     	0x08	/* RIFS allowed */
+
+/* opmode defn's */
+#define HT_OPMODE_MASK	        0x0003	/* protection mode mask */
+#define HT_OPMODE_SHIFT		0	/* protection mode shift */
+#define HT_OPMODE_PURE		0x0000	/* protection mode PURE */
+#define HT_OPMODE_OPTIONAL	0x0001	/* protection mode optional */
+#define HT_OPMODE_HT20IN40	0x0002	/* protection mode 20MHz HT in 40MHz BSS */
+#define HT_OPMODE_MIXED	0x0003	/* protection mode Mixed Mode */
+#define HT_OPMODE_NONGF	0x0004	/* protection mode non-GF */
+#define DOT11N_TXBURST		0x0008	/* Tx burst limit */
+#define DOT11N_OBSS_NONHT	0x0010	/* OBSS Non-HT STA present */
+
+/* misc_bites defn's */
+#define HT_BASIC_STBC_MCS	0x007f	/* basic STBC MCS */
+#define HT_DUAL_STBC_PROT	0x0080	/* Dual STBC Protection */
+#define HT_SECOND_BCN		0x0100	/* Secondary beacon support */
+#define HT_LSIG_TXOP		0x0200	/* L-SIG TXOP Protection full support */
+#define HT_PCO_ACTIVE		0x0400	/* PCO active */
+#define HT_PCO_PHASE		0x0800	/* PCO phase */
+#define HT_DUALCTS_PROTECTION	0x0080	/* DUAL CTS protection needed */
+
+/* Tx Burst Limits */
+#define DOT11N_2G_TXBURST_LIMIT	6160	/* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+#define DOT11N_5G_TXBURST_LIMIT	3080	/* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+
+/* Macros for opmode */
+#define GET_HT_OPMODE(add_ie)		((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					>> HT_OPMODE_SHIFT)
+#define HT_MIXEDMODE_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_MIXED)	/* mixed mode present */
+#define HT_HT20_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_HT20IN40)	/* 20MHz HT present */
+#define HT_OPTIONAL_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
+					== HT_OPMODE_OPTIONAL)	/* Optional protection present */
+#define HT_USE_PROTECTION(add_ie)	(HT_HT20_PRESENT((add_ie)) || \
+					HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */
+#define HT_NONGF_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
+					== HT_OPMODE_NONGF)	/* non-GF present */
+#define DOT11N_TXBURST_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
+					== DOT11N_TXBURST)	/* Tx Burst present */
+#define DOT11N_OBSS_NONHT_PRESENT(add_ie)	((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
+					== DOT11N_OBSS_NONHT)	/* OBSS Non-HT present */
+
+BWL_PRE_PACKED_STRUCT struct obss_params {
+	uint16	passive_dwell;
+	uint16	active_dwell;
+	uint16	bss_widthscan_interval;
+	uint16	passive_total;
+	uint16	active_total;
+	uint16	chanwidth_transition_dly;
+	uint16	activity_threshold;
+} BWL_POST_PACKED_STRUCT;
+typedef struct obss_params obss_params_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_obss_ie {
+	uint8	id;
+	uint8	len;
+	obss_params_t obss_params;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_obss_ie dot11_obss_ie_t;
+#define DOT11_OBSS_SCAN_IE_LEN	sizeof(obss_params_t)	/* HT OBSS len (based on 802.11n d3.0) */
+
+/* HT control field */
+#define HT_CTRL_LA_TRQ		0x00000002	/* sounding request */
+#define HT_CTRL_LA_MAI		0x0000003C	/* MCS request or antenna selection indication */
+#define HT_CTRL_LA_MAI_SHIFT	2
+#define HT_CTRL_LA_MAI_MRQ	0x00000004	/* MCS request */
+#define HT_CTRL_LA_MAI_MSI	0x00000038	/* MCS request sequence identifier */
+#define HT_CTRL_LA_MFSI		0x000001C0	/* MFB sequence identifier */
+#define HT_CTRL_LA_MFSI_SHIFT	6
+#define HT_CTRL_LA_MFB_ASELC	0x0000FE00	/* MCS feedback, antenna selection command/data */
+#define HT_CTRL_LA_MFB_ASELC_SH	9
+#define HT_CTRL_LA_ASELC_CMD	0x00000C00	/* ASEL command */
+#define HT_CTRL_LA_ASELC_DATA	0x0000F000	/* ASEL data */
+#define HT_CTRL_CAL_POS		0x00030000	/* Calibration position */
+#define HT_CTRL_CAL_SEQ		0x000C0000	/* Calibration sequence */
+#define HT_CTRL_CSI_STEERING	0x00C00000	/* CSI/Steering */
+#define HT_CTRL_CSI_STEER_SHIFT	22
+#define HT_CTRL_CSI_STEER_NFB	0		/* no fedback required */
+#define HT_CTRL_CSI_STEER_CSI	1		/* CSI, H matrix */
+#define HT_CTRL_CSI_STEER_NCOM	2		/* non-compressed beamforming */
+#define HT_CTRL_CSI_STEER_COM	3		/* compressed beamforming */
+#define HT_CTRL_NDP_ANNOUNCE	0x01000000	/* NDP announcement */
+#define HT_CTRL_AC_CONSTRAINT	0x40000000	/* AC Constraint */
+#define HT_CTRL_RDG_MOREPPDU	0x80000000	/* RDG/More PPDU */
+
+/* ************* VHT definitions. ************* */
+
+/*
+ * VHT Capabilites IE (sec 8.4.2.160)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_cap_ie {
+	uint32  vht_cap_info;
+	/* supported MCS set - 64 bit field */
+	uint16	rx_mcs_map;
+	uint16  rx_max_rate;
+	uint16  tx_mcs_map;
+	uint16	tx_max_rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_cap_ie vht_cap_ie_t;
+
+/* 4B cap_info + 8B supp_mcs */
+#define VHT_CAP_IE_LEN 12
+
+/* VHT Capabilities Info field - 32bit - in VHT Cap IE */
+#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK          0x00000003
+#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK       0x0000000c
+#define VHT_CAP_INFO_LDPC                       0x00000010
+#define VHT_CAP_INFO_SGI_80MHZ                  0x00000020
+#define VHT_CAP_INFO_SGI_160MHZ                 0x00000040
+#define VHT_CAP_INFO_TX_STBC                    0x00000080
+#define VHT_CAP_INFO_RX_STBC_MASK               0x00000700
+#define VHT_CAP_INFO_RX_STBC_SHIFT              8
+#define VHT_CAP_INFO_SU_BEAMFMR                 0x00000800
+#define VHT_CAP_INFO_SU_BEAMFMEE                0x00001000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK         0x0000e000
+#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT        13
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK      0x00070000
+#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT     16
+#define VHT_CAP_INFO_MU_BEAMFMR                 0x00080000
+#define VHT_CAP_INFO_MU_BEAMFMEE                0x00100000
+#define VHT_CAP_INFO_TXOPPS                     0x00200000
+#define VHT_CAP_INFO_HTCVHT                     0x00400000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK      0x03800000
+#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT     23
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK        0x0c000000
+#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT       26
+
+/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK   0x1fff
+#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT  0
+
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK   0x1fff
+#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT  0
+
+#define VHT_CAP_MCS_MAP_0_7                     0
+#define VHT_CAP_MCS_MAP_0_8                     1
+#define VHT_CAP_MCS_MAP_0_9                     2
+#define VHT_CAP_MCS_MAP_NONE                    3
+#define VHT_CAP_MCS_MAP_S                       2 /* num bits for 1-stream */
+#define VHT_CAP_MCS_MAP_M                       0x3 /* mask for 1-stream */
+/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */
+#define VHT_CAP_MCS_MAP_NONE_ALL                0xffff
+/* mcsmap with MCS0-9 for Nss = 3 */
+#define VHT_CAP_MCS_MAP_0_9_NSS3 \
+	        ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \
+	         (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \
+	         (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3)))
+
+#define VHT_CAP_MCS_MAP_NSS_MAX                 8
+
+/* get mcsmap with given mcs for given nss streams */
+#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \
+	do { \
+		int i; \
+		for (i = 1; i <= nss; i++) { \
+			VHT_MCS_MAP_SET_MCS_PER_SS(i, mcs, mcsmap); \
+		} \
+	} while (0)
+
+/* Map the mcs code to mcs bit map */
+#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \
+	((mcs_code == VHT_CAP_MCS_MAP_0_7) ? 0xff : \
+	 (mcs_code == VHT_CAP_MCS_MAP_0_8) ? 0x1ff : \
+	 (mcs_code == VHT_CAP_MCS_MAP_0_9) ? 0x3ff : 0)
+
+/* Map the mcs bit map to mcs code */
+#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \
+	((mcs_map == 0xff)  ? VHT_CAP_MCS_MAP_0_7 : \
+	 (mcs_map == 0x1ff) ? VHT_CAP_MCS_MAP_0_8 : \
+	 (mcs_map == 0x3ff) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE)
+
+/* VHT Capabilities Supported Channel Width */
+typedef enum vht_cap_chan_width {
+	VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00,
+	VHT_CAP_CHAN_WIDTH_SUPPORT_160       = 0x04,
+	VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080  = 0x08
+} vht_cap_chan_width_t;
+
+/* VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */
+typedef enum vht_cap_max_mpdu_len {
+	VHT_CAP_MPDU_MAX_4K     = 0x00,
+	VHT_CAP_MPDU_MAX_8K     = 0x01,
+	VHT_CAP_MPDU_MAX_11K    = 0x02
+} vht_cap_max_mpdu_len_t;
+
+/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */
+#define VHT_MPDU_LIMIT_4K        3895
+#define VHT_MPDU_LIMIT_8K        7991
+#define VHT_MPDU_LIMIT_11K      11454
+
+
+/*
+ * VHT Operation IE (sec 8.4.2.161)
+ */
+
+BWL_PRE_PACKED_STRUCT struct vht_op_ie {
+	uint8	chan_width;
+	uint8	chan1;
+	uint8	chan2;
+	uint16	supp_mcs;  /*  same def as above in vht cap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_op_ie vht_op_ie_t;
+
+/* 3B VHT Op info + 2B Basic MCS */
+#define VHT_OP_IE_LEN 5
+
+typedef enum vht_op_chan_width {
+	VHT_OP_CHAN_WIDTH_20_40	= 0,
+	VHT_OP_CHAN_WIDTH_80	= 1,
+	VHT_OP_CHAN_WIDTH_160	= 2,
+	VHT_OP_CHAN_WIDTH_80_80	= 3
+} vht_op_chan_width_t;
+
+/* AID length */
+#define AID_IE_LEN		2
+/*
+ * BRCM vht features IE header
+ * The header if the fixed part of the IE
+ * On the 5GHz band this is the entire IE,
+ * on 2.4GHz the VHT IEs as defined in the 802.11ac
+ * specification follows
+ *
+ *
+ * VHT features rates  bitmap.
+ * Bit0:		5G MCS 0-9 BW 160MHz
+ * Bit1:		5G MCS 0-9 support BW 80MHz
+ * Bit2:		5G MCS 0-9 support BW 20MHz
+ * Bit3:		2.4G MCS 0-9 support BW 20MHz
+ * Bits:4-7	Reserved for future use
+ *
+ */
+#define VHT_FEATURES_IE_TYPE	0x4
+BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr {
+	uint8 oui[3];		/* Proprietary OUI, BRCM_PROP_OUI */
+	uint8 type;		/* type of this IE = 4 */
+	uint8 rate_mask;	/* VHT rate mask */
+} BWL_POST_PACKED_STRUCT;
+typedef struct vht_features_ie_hdr vht_features_ie_hdr_t;
+
+/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */
+#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S)
+#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \
+	(((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M)
+#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \
+	do { \
+	 (mcsMap) &= (~(VHT_CAP_MCS_MAP_M << VHT_MCS_MAP_GET_SS_IDX(nss))); \
+	 (mcsMap) |= (((numMcs) & VHT_CAP_MCS_MAP_M) << VHT_MCS_MAP_GET_SS_IDX(nss)); \
+	} while (0)
+#define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \
+		 (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE)
+
+
+/* ************* WPA definitions. ************* */
+#define WPA_OUI			"\x00\x50\xF2"	/* WPA OUI */
+#define WPA_OUI_LEN		3		/* WPA OUI length */
+#define WPA_OUI_TYPE		1
+#define WPA_VERSION		1		/* WPA version */
+#define WPA2_OUI		"\x00\x0F\xAC"	/* WPA2 OUI */
+#define WPA2_OUI_LEN		3		/* WPA2 OUI length */
+#define WPA2_VERSION		1		/* WPA2 version */
+#define WPA2_VERSION_LEN	2		/* WAP2 version length */
+
+/* ************* WPS definitions. ************* */
+#define WPS_OUI			"\x00\x50\xF2"	/* WPS OUI */
+#define WPS_OUI_LEN		3		/* WPS OUI length */
+#define WPS_OUI_TYPE		4
+
+/* ************* WFA definitions. ************* */
+
+#ifdef P2P_IE_OVRD
+#define WFA_OUI			MAC_OUI
+#else
+#define WFA_OUI			"\x50\x6F\x9A"	/* WFA OUI */
+#endif /* P2P_IE_OVRD */
+#define WFA_OUI_LEN		3		/* WFA OUI length */
+#ifdef P2P_IE_OVRD
+#define WFA_OUI_TYPE_P2P	MAC_OUI_TYPE_P2P
+#else
+#define WFA_OUI_TYPE_TPC	8
+#define WFA_OUI_TYPE_P2P	9
+#endif
+
+#define WFA_OUI_TYPE_TPC	8
+#ifdef WLTDLS
+#define WFA_OUI_TYPE_TPQ	4	/* WFD Tunneled Probe ReQuest */
+#define WFA_OUI_TYPE_TPS	5	/* WFD Tunneled Probe ReSponse */
+#define WFA_OUI_TYPE_WFD	10
+#endif /* WTDLS */
+#define WFA_OUI_TYPE_HS20	0x10
+
+/* RSN authenticated key managment suite */
+#define RSN_AKM_NONE		0	/* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED	1	/* Over 802.1x */
+#define RSN_AKM_PSK		2	/* Pre-shared Key */
+#define RSN_AKM_FBT_1X		3	/* Fast Bss transition using 802.1X */
+#define RSN_AKM_FBT_PSK		4	/* Fast Bss transition using Pre-shared Key */
+#define RSN_AKM_MFP_1X		5	/* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_MFP_PSK		6	/* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_TPK			7	/* TPK(TDLS Peer Key) handshake */
+
+/* Key related defines */
+#define DOT11_MAX_DEFAULT_KEYS	4	/* number of default keys */
+#define DOT11_MAX_IGTK_KEYS		2
+#define DOT11_MAX_KEY_SIZE	32	/* max size of any key */
+#define DOT11_MAX_IV_SIZE	16	/* max size of any IV */
+#define DOT11_EXT_IV_FLAG	(1<<5)	/* flag to indicate IV is > 4 bytes */
+#define DOT11_WPA_KEY_RSC_LEN   8       /* WPA RSC key len */
+
+#define WEP1_KEY_SIZE		5	/* max size of any WEP key */
+#define WEP1_KEY_HEX_SIZE	10	/* size of WEP key in hex. */
+#define WEP128_KEY_SIZE		13	/* max size of any WEP key */
+#define WEP128_KEY_HEX_SIZE	26	/* size of WEP key in hex. */
+#define TKIP_MIC_SIZE		8	/* size of TKIP MIC */
+#define TKIP_EOM_SIZE		7	/* max size of TKIP EOM */
+#define TKIP_EOM_FLAG		0x5a	/* TKIP EOM flag byte */
+#define TKIP_KEY_SIZE		32	/* size of any TKIP key, includs MIC keys */
+#define TKIP_TK_SIZE		16
+#define TKIP_MIC_KEY_SIZE	8
+#define TKIP_MIC_AUTH_TX	16	/* offset to Authenticator MIC TX key */
+#define TKIP_MIC_AUTH_RX	24	/* offset to Authenticator MIC RX key */
+#define TKIP_MIC_SUP_RX		TKIP_MIC_AUTH_TX	/* offset to Supplicant MIC RX key */
+#define TKIP_MIC_SUP_TX		TKIP_MIC_AUTH_RX	/* offset to Supplicant MIC TX key */
+#define AES_KEY_SIZE		16	/* size of AES key */
+#define AES_MIC_SIZE		8	/* size of AES MIC */
+#define BIP_KEY_SIZE		16	/* size of BIP key */
+#define BIP_MIC_SIZE		8   /* sizeof BIP MIC */
+
+#define AES_GCM_MIC_SIZE	16	/* size of MIC for 128-bit GCM - .11adD9 */
+
+#define AES256_KEY_SIZE		32	/* size of AES 256 key - .11acD5 */
+#define AES256_MIC_SIZE		16	/* size of MIC for 256 bit keys, incl BIP */
+
+/* WCN */
+#define WCN_OUI			"\x00\x50\xf2"	/* WCN OUI */
+#define WCN_TYPE		4	/* WCN type */
+
+#ifdef BCMWAPI_WPI
+#define SMS4_KEY_LEN		16
+#define SMS4_WPI_CBC_MAC_LEN	16
+#endif
+
+/* 802.11r protocol definitions */
+
+/* Mobility Domain IE */
+BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mdid;		/* Mobility Domain Id */
+	uint8 cap;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_mdid_ie dot11_mdid_ie_t;
+
+#define FBT_MDID_CAP_OVERDS	0x01	/* Fast Bss transition over the DS support */
+#define FBT_MDID_CAP_RRP	0x02	/* Resource request protocol support */
+
+/* Fast Bss Transition IE */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
+	uint8 id;
+	uint8 len;
+	uint16 mic_control;		/* Mic Control */
+	uint8 mic[16];
+	uint8 anonce[32];
+	uint8 snonce[32];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ft_ie dot11_ft_ie_t;
+
+#define TIE_TYPE_RESERVED		0
+#define TIE_TYPE_REASSOC_DEADLINE	1
+#define TIE_TYPE_KEY_LIEFTIME		2
+#define TIE_TYPE_ASSOC_COMEBACK		3
+BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie {
+	uint8 id;
+	uint8 len;
+	uint8 type;		/* timeout interval type */
+	uint32 value;		/* timeout interval value */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_timeout_ie dot11_timeout_ie_t;
+
+/* GTK ie */
+BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
+	uint8 id;
+	uint8 len;
+	uint16 key_info;
+	uint8 key_len;
+	uint8 rsc[8];
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_gtk_ie dot11_gtk_ie_t;
+
+/* Management MIC ie */
+BWL_PRE_PACKED_STRUCT struct mmic_ie {
+	uint8   id;					/* IE ID: DOT11_MNG_MMIE_ID */
+	uint8   len;				/* IE length */
+	uint16  key_id;				/* key id */
+	uint8   ipn[6];				/* ipn */
+	uint8   mic[16];			/* mic */
+} BWL_POST_PACKED_STRUCT;
+typedef struct mmic_ie mmic_ie_t;
+
+#define BSSID_INVALID           "\x00\x00\x00\x00\x00\x00"
+#define BSSID_BROADCAST         "\xFF\xFF\xFF\xFF\xFF\xFF"
+
+#ifdef BCMWAPI_WAI
+#define WAPI_IE_MIN_LEN 	20	/* WAPI IE min length */
+#define WAPI_VERSION		1	/* WAPI version */
+#define WAPI_VERSION_LEN	2	/* WAPI version length */
+#define WAPI_OUI		"\x00\x14\x72"	/* WAPI OUI */
+#define WAPI_OUI_LEN		DOT11_OUI_LEN	/* WAPI OUI length */
+#endif /* BCMWAPI_WAI */
+
+/* ************* WMM Parameter definitions. ************* */
+#define WMM_OUI			"\x00\x50\xF2"	/* WNN OUI */
+#define WMM_OUI_LEN		3		/* WMM OUI length */
+#define WMM_OUI_TYPE	2		/* WMM OUT type */
+#define WMM_VERSION		1
+#define WMM_VERSION_LEN	1
+
+/* WMM OUI subtype */
+#define WMM_OUI_SUBTYPE_PARAMETER	1
+#define WMM_PARAMETER_IE_LEN		24
+
+/* Link Identifier Element */
+BWL_PRE_PACKED_STRUCT struct link_id_ie {
+	uint8 id;
+	uint8 len;
+	struct ether_addr	bssid;
+	struct ether_addr	tdls_init_mac;
+	struct ether_addr	tdls_resp_mac;
+} BWL_POST_PACKED_STRUCT;
+typedef struct link_id_ie link_id_ie_t;
+#define TDLS_LINK_ID_IE_LEN		18
+
+/* Link Wakeup Schedule Element */
+BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie {
+	uint8 id;
+	uint8 len;
+	uint32 offset;			/* in ms between TSF0 and start of 1st Awake Window */
+	uint32 interval;		/* in ms bwtween the start of 2 Awake Windows */
+	uint32 awake_win_slots;	/* in backof slots, duration of Awake Window */
+	uint32 max_wake_win;	/* in ms, max duration of Awake Window */
+	uint16 idle_cnt;		/* number of consecutive Awake Windows */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wakeup_sch_ie wakeup_sch_ie_t;
+#define TDLS_WAKEUP_SCH_IE_LEN		18
+
+/* Channel Switch Timing Element */
+BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie {
+	uint8 id;
+	uint8 len;
+	uint16 switch_time;		/* in ms, time to switch channels */
+	uint16 switch_timeout;	/* in ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct channel_switch_timing_ie channel_switch_timing_ie_t;
+#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN		4
+
+/* PTI Control Element */
+BWL_PRE_PACKED_STRUCT struct pti_control_ie {
+	uint8 id;
+	uint8 len;
+	uint8 tid;
+	uint16 seq_control;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pti_control_ie pti_control_ie_t;
+#define TDLS_PTI_CONTROL_IE_LEN		3
+
+/* PU Buffer Status Element */
+BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie {
+	uint8 id;
+	uint8 len;
+	uint8 status;
+} BWL_POST_PACKED_STRUCT;
+typedef struct pu_buffer_status_ie pu_buffer_status_ie_t;
+#define TDLS_PU_BUFFER_STATUS_IE_LEN	1
+#define TDLS_PU_BUFFER_STATUS_AC_BK		1
+#define TDLS_PU_BUFFER_STATUS_AC_BE		2
+#define TDLS_PU_BUFFER_STATUS_AC_VI		4
+#define TDLS_PU_BUFFER_STATUS_AC_VO		8
+
+/* TDLS Action Field Values */
+#define TDLS_SETUP_REQ				0
+#define TDLS_SETUP_RESP				1
+#define TDLS_SETUP_CONFIRM			2
+#define TDLS_TEARDOWN				3
+#define TDLS_PEER_TRAFFIC_IND			4
+#define TDLS_CHANNEL_SWITCH_REQ			5
+#define TDLS_CHANNEL_SWITCH_RESP		6
+#define TDLS_PEER_PSM_REQ			7
+#define TDLS_PEER_PSM_RESP			8
+#define TDLS_PEER_TRAFFIC_RESP			9
+#define TDLS_DISCOVERY_REQ			10
+
+/* 802.11z TDLS Public Action Frame action field */
+#define TDLS_DISCOVERY_RESP			14
+
+/* 802.11u GAS action frames */
+#define GAS_REQUEST_ACTION_FRAME				10
+#define GAS_RESPONSE_ACTION_FRAME				11
+#define GAS_COMEBACK_REQUEST_ACTION_FRAME		12
+#define GAS_COMEBACK_RESPONSE_ACTION_FRAME		13
+
+/* 802.11u interworking access network options */
+#define IW_ANT_MASK				0x0f
+#define IW_INTERNET_MASK		0x10
+#define IW_ASRA_MASK			0x20
+#define IW_ESR_MASK				0x40
+#define IW_UESA_MASK			0x80
+
+/* 802.11u interworking access network type */
+#define IW_ANT_PRIVATE_NETWORK					0
+#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST		1
+#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK		2
+#define IW_ANT_FREE_PUBLIC_NETWORK				3
+#define IW_ANT_PERSONAL_DEVICE_NETWORK			4
+#define IW_ANT_EMERGENCY_SERVICES_NETWORK		5
+#define IW_ANT_TEST_NETWORK						14
+#define IW_ANT_WILDCARD_NETWORK					15
+
+/* 802.11u advertisement protocol */
+#define ADVP_ANQP_PROTOCOL_ID	0
+
+/* 802.11u advertisement protocol masks */
+#define ADVP_QRL_MASK					0x7f
+#define ADVP_PAME_BI_MASK				0x80
+
+/* 802.11u advertisement protocol values */
+#define ADVP_QRL_REQUEST				0x00
+#define ADVP_QRL_RESPONSE				0x7f
+#define ADVP_PAME_BI_DEPENDENT			0x00
+#define ADVP_PAME_BI_INDEPENDENT		ADVP_PAME_BI_MASK
+
+/* 802.11u ANQP information ID */
+#define ANQP_ID_QUERY_LIST							256
+#define ANQP_ID_CAPABILITY_LIST						257
+#define ANQP_ID_VENUE_NAME_INFO						258
+#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO			259
+#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO	260
+#define ANQP_ID_ROAMING_CONSORTIUM_LIST				261
+#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO	262
+#define ANQP_ID_NAI_REALM_LIST						263
+#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO			264
+#define ANQP_ID_AP_GEOSPATIAL_LOCATION				265
+#define ANQP_ID_AP_CIVIC_LOCATION					266
+#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI			267
+#define ANQP_ID_DOMAIN_NAME_LIST					268
+#define ANQP_ID_EMERGENCY_ALERT_ID_URI				269
+#define ANQP_ID_EMERGENCY_NAI						271
+#define ANQP_ID_VENDOR_SPECIFIC_LIST				56797
+
+/* 802.11u ANQP OUI */
+#define ANQP_OUI_SUBTYPE	9
+
+/* 802.11u venue name */
+#define VENUE_LANGUAGE_CODE_SIZE		3
+#define VENUE_NAME_SIZE					255
+
+/* 802.11u venue groups */
+#define VENUE_UNSPECIFIED				0
+#define VENUE_ASSEMBLY					1
+#define VENUE_BUSINESS					2
+#define VENUE_EDUCATIONAL				3
+#define VENUE_FACTORY					4
+#define VENUE_INSTITUTIONAL				5
+#define VENUE_MERCANTILE				6
+#define VENUE_RESIDENTIAL				7
+#define VENUE_STORAGE					8
+#define VENUE_UTILITY					9
+#define VENUE_VEHICULAR					10
+#define VENUE_OUTDOOR					11
+
+/* 802.11u network authentication type indicator */
+#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS			0
+#define NATI_ONLINE_ENROLLMENT_SUPPORTED			1
+#define NATI_HTTP_HTTPS_REDIRECTION					2
+#define NATI_DNS_REDIRECTION						3
+
+/* 802.11u IP address type availability - IPv6 */
+#define IPA_IPV6_SHIFT						0
+#define IPA_IPV6_MASK						(0x03 << IPA_IPV6_SHIFT)
+#define	IPA_IPV6_NOT_AVAILABLE				0x00
+#define IPA_IPV6_AVAILABLE					0x01
+#define IPA_IPV6_UNKNOWN_AVAILABILITY		0x02
+
+/* 802.11u IP address type availability - IPv4 */
+#define IPA_IPV4_SHIFT						2
+#define IPA_IPV4_MASK						(0x3f << IPA_IPV4_SHIFT)
+#define	IPA_IPV4_NOT_AVAILABLE				0x00
+#define IPA_IPV4_PUBLIC						0x01
+#define IPA_IPV4_PORT_RESTRICT				0x02
+#define IPA_IPV4_SINGLE_NAT					0x03
+#define IPA_IPV4_DOUBLE_NAT					0x04
+#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT	0x05
+#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT	0x06
+#define IPA_IPV4_UNKNOWN_AVAILABILITY		0x07
+
+/* 802.11u NAI realm encoding */
+#define REALM_ENCODING_RFC4282	0
+#define REALM_ENCODING_UTF8		1
+
+/* 802.11u IANA EAP method type numbers */
+#define REALM_EAP_TLS					13
+#define REALM_EAP_SIM					18
+#define REALM_EAP_TTLS					21
+#define REALM_EAP_AKA					23
+#define REALM_EAP_PSK					47
+#define REALM_EAP_AKAP					50
+#define REALM_EAP_EXPANDED				254
+
+/* 802.11u authentication ID */
+#define REALM_EXPANDED_EAP						1
+#define REALM_NON_EAP_INNER_AUTHENTICATION		2
+#define REALM_INNER_AUTHENTICATION_EAP			3
+#define REALM_EXPANDED_INNER_EAP				4
+#define REALM_CREDENTIAL						5
+#define REALM_TUNNELED_EAP_CREDENTIAL			6
+#define REALM_VENDOR_SPECIFIC_EAP				221
+
+/* 802.11u non-EAP inner authentication type */
+#define REALM_PAP					1
+#define REALM_CHAP					2
+#define REALM_MSCHAP				3
+#define REALM_MSCHAPV2				4
+
+/* 802.11u credential type */
+#define REALM_SIM					1
+#define REALM_USIM					2
+#define REALM_NFC					3
+#define REALM_HARDWARE_TOKEN		4
+#define REALM_SOFTOKEN				5
+#define REALM_CERTIFICATE			6
+#define REALM_USERNAME_PASSWORD		7
+#define REALM_SERVER_SIDE			8
+
+/* 802.11u 3GPP PLMN */
+#define G3PP_GUD_VERSION		0
+#define G3PP_PLMN_LIST_IE		0
+
+/* hotspot2.0 indication element (vendor specific) */
+BWL_PRE_PACKED_STRUCT struct hs20_ie {
+	uint8 oui[3];
+	uint8 type;
+	uint8 config;
+} BWL_POST_PACKED_STRUCT;
+typedef struct hs20_ie hs20_ie_t;
+#define HS20_IE_LEN 5	/* HS20 IE length */
+
+/* IEEE 802.11 Annex E */
+typedef enum {
+	DOT11_2GHZ_20MHZ_CLASS_12		= 81,	/* Ch 1-11			 */
+	DOT11_5GHZ_20MHZ_CLASS_1		= 115,	/* Ch 36-48			 */
+	DOT11_5GHZ_20MHZ_CLASS_2_DFS	= 118,	/* Ch 52-64			 */
+	DOT11_5GHZ_20MHZ_CLASS_3		= 124,	/* Ch 149-161		 */
+	DOT11_5GHZ_20MHZ_CLASS_4_DFS	= 121,	/* Ch 100-140		 */
+	DOT11_5GHZ_20MHZ_CLASS_5		= 125,	/* Ch 149-165		 */
+	DOT11_5GHZ_40MHZ_CLASS_22		= 116,	/* Ch 36-44,   lower */
+	DOT11_5GHZ_40MHZ_CLASS_23_DFS 	= 119,	/* Ch 52-60,   lower */
+	DOT11_5GHZ_40MHZ_CLASS_24_DFS	= 122,	/* Ch 100-132, lower */
+	DOT11_5GHZ_40MHZ_CLASS_25		= 126,	/* Ch 149-157, lower */
+	DOT11_5GHZ_40MHZ_CLASS_27		= 117,	/* Ch 40-48,   upper */
+	DOT11_5GHZ_40MHZ_CLASS_28_DFS	= 120,	/* Ch 56-64,   upper */
+	DOT11_5GHZ_40MHZ_CLASS_29_DFS	= 123,	/* Ch 104-136, upper */
+	DOT11_5GHZ_40MHZ_CLASS_30		= 127,	/* Ch 153-161, upper */
+	DOT11_2GHZ_40MHZ_CLASS_32		= 83,	/* Ch 1-7,     lower */
+	DOT11_2GHZ_40MHZ_CLASS_33		= 84,	/* Ch 5-11,    upper */
+} dot11_op_class_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11_H_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/802.11_bta.h b/drivers/staging/edison-bcm43340/common/include/proto/802.11_bta.h
new file mode 100644
index 0000000..19d898b
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/802.11_bta.h
@@ -0,0 +1,45 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer)
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11_bta.h 382882 2013-02-04 23:24:31Z $
+*/
+
+#ifndef _802_11_BTA_H_
+#define _802_11_BTA_H_
+
+#define BT_SIG_SNAP_MPROT		"\xAA\xAA\x03\x00\x19\x58"
+
+/* BT-AMP 802.11 PAL Protocols */
+#define BTA_PROT_L2CAP				1
+#define	BTA_PROT_ACTIVITY_REPORT		2
+#define BTA_PROT_SECURITY			3
+#define BTA_PROT_LINK_SUPERVISION_REQUEST	4
+#define BTA_PROT_LINK_SUPERVISION_REPLY		5
+
+/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */
+#define BTA_TYPE_ID_MAC_ADDRESS			1
+#define BTA_TYPE_ID_PREFERRED_CHANNELS		2
+#define BTA_TYPE_ID_CONNECTED_CHANNELS		3
+#define BTA_TYPE_ID_CAPABILITIES		4
+#define BTA_TYPE_ID_VERSION			5
+#endif /* _802_11_bta_h_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/802.11e.h b/drivers/staging/edison-bcm43340/common/include/proto/802.11e.h
new file mode 100644
index 0000000..f990f21
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/802.11e.h
@@ -0,0 +1,132 @@
+/*
+ * 802.11e protocol header file
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: 802.11e.h 382883 2013-02-04 23:26:09Z $
+ */
+
+#ifndef _802_11e_H_
+#define _802_11e_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WME Traffic Specification (TSPEC) element */
+#define WME_TSPEC_HDR_LEN           2           /* WME TSPEC header length */
+#define WME_TSPEC_BODY_OFF          2           /* WME TSPEC body offset */
+
+#define WME_CATEGORY_CODE_OFFSET	0		/* WME Category code offset */
+#define WME_ACTION_CODE_OFFSET		1		/* WME Action code offset */
+#define WME_TOKEN_CODE_OFFSET		2		/* WME Token code offset */
+#define WME_STATUS_CODE_OFFSET		3		/* WME Status code offset */
+
+BWL_PRE_PACKED_STRUCT struct tsinfo {
+	uint8 octets[3];
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct tsinfo tsinfo_t;
+
+/* 802.11e TSPEC IE */
+typedef BWL_PRE_PACKED_STRUCT struct tspec {
+	uint8 oui[DOT11_OUI_LEN];	/* WME_OUI */
+	uint8 type;					/* WME_TYPE */
+	uint8 subtype;				/* WME_SUBTYPE_TSPEC */
+	uint8 version;				/* WME_VERSION */
+	tsinfo_t tsinfo;			/* TS Info bit field */
+	uint16 nom_msdu_size;		/* (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/* Maximum MSDU Size (bytes) */
+	uint32 min_srv_interval;	/* Minimum Service Interval (us) */
+	uint32 max_srv_interval;	/* Maximum Service Interval (us) */
+	uint32 inactivity_interval;	/* Inactivity Interval (us) */
+	uint32 suspension_interval; /* Suspension Interval (us) */
+	uint32 srv_start_time;		/* Service Start Time (us) */
+	uint32 min_data_rate;		/* Minimum Data Rate (bps) */
+	uint32 mean_data_rate;		/* Mean Data Rate (bps) */
+	uint32 peak_data_rate;		/* Peak Data Rate (bps) */
+	uint32 max_burst_size;		/* Maximum Burst Size (bytes) */
+	uint32 delay_bound;			/* Delay Bound (us) */
+	uint32 min_phy_rate;		/* Minimum PHY Rate (bps) */
+	uint16 surplus_bw;			/* Surplus Bandwidth Allowance (range 1.0-8.0) */
+	uint16 medium_time;			/* Medium Time (32 us/s periods) */
+} BWL_POST_PACKED_STRUCT tspec_t;
+
+#define WME_TSPEC_LEN	(sizeof(tspec_t))		/* not including 2-bytes of header */
+
+/* ts_info */
+/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */
+#define TS_INFO_TID_SHIFT		1	/* TS info. TID shift */
+#define TS_INFO_TID_MASK		(0xf << TS_INFO_TID_SHIFT)	/* TS info. TID mask */
+#define TS_INFO_CONTENTION_SHIFT	7	/* TS info. contention shift */
+#define TS_INFO_CONTENTION_MASK	(0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */
+#define TS_INFO_DIRECTION_SHIFT	5	/* TS info. direction shift */
+#define TS_INFO_DIRECTION_MASK	(0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */
+#define TS_INFO_PSB_SHIFT		2		/* TS info. PSB bit Shift */
+#define TS_INFO_PSB_MASK		(1 << TS_INFO_PSB_SHIFT)	/* TS info. PSB mask */
+#define TS_INFO_UPLINK			(0 << TS_INFO_DIRECTION_SHIFT)	/* TS info. uplink */
+#define TS_INFO_DOWNLINK		(1 << TS_INFO_DIRECTION_SHIFT)	/* TS info. downlink */
+#define TS_INFO_BIDIRECTIONAL	(3 << TS_INFO_DIRECTION_SHIFT)	/* TS info. bidirectional */
+#define TS_INFO_USER_PRIO_SHIFT	3	/* TS info. user priority shift */
+/* TS info. user priority mask */
+#define TS_INFO_USER_PRIO_MASK	(0x7 << TS_INFO_USER_PRIO_SHIFT)
+
+/* Macro to get/set bit(s) field in TSINFO */
+#define WLC_CAC_GET_TID(pt)	((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT)
+#define WLC_CAC_GET_DIR(pt)	((((pt).octets[0]) & \
+	TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT)
+#define WLC_CAC_GET_PSB(pt)	((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT)
+#define WLC_CAC_GET_USER_PRIO(pt)	((((pt).octets[1]) & \
+	TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT)
+
+#define WLC_CAC_SET_TID(pt, id)	((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \
+	((id) << TS_INFO_TID_SHIFT))
+#define WLC_CAC_SET_USER_PRIO(pt, prio)	((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \
+	((prio) << TS_INFO_USER_PRIO_SHIFT))
+
+/* 802.11e QBSS Load IE */
+#define QBSS_LOAD_IE_LEN		5	/* QBSS Load IE length */
+#define QBSS_LOAD_AAC_OFF		3	/* AAC offset in IE */
+
+#define CAC_ADDTS_RESP_TIMEOUT		1000	/* default ADDTS response timeout in ms */
+						/* DEFVAL dot11ADDTSResponseTimeout = 1s */
+
+/* 802.11e ADDTS status code */
+#define DOT11E_STATUS_ADMISSION_ACCEPTED	0	/* TSPEC Admission accepted status */
+#define DOT11E_STATUS_ADDTS_INVALID_PARAM	1	/* TSPEC invalid parameter status */
+#define DOT11E_STATUS_ADDTS_REFUSED_NSBW	3	/* ADDTS refused (non-sufficient BW) */
+#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE	47	/* ADDTS refused but could retry later */
+
+/* 802.11e DELTS status code */
+#define DOT11E_STATUS_QSTA_LEAVE_QBSS		36	/* STA leave QBSS */
+#define DOT11E_STATUS_END_TS				37	/* END TS */
+#define DOT11E_STATUS_UNKNOWN_TS			38	/* UNKNOWN TS */
+#define DOT11E_STATUS_QSTA_REQ_TIMEOUT		39	/* STA ADDTS request timeout */
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _802_11e_CAC_H_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/802.1d.h b/drivers/staging/edison-bcm43340/common/include/proto/802.1d.h
new file mode 100644
index 0000000..17a5430
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/802.1d.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to 802.1D
+ *
+ * $Id: 802.1d.h 382882 2013-02-04 23:24:31Z $
+ */
+
+#ifndef _802_1_D_
+#define _802_1_D_
+
+/* 802.1D priority defines */
+#define	PRIO_8021D_NONE		2	/* None = - */
+#define	PRIO_8021D_BK		1	/* BK - Background */
+#define	PRIO_8021D_BE		0	/* BE - Best-effort */
+#define	PRIO_8021D_EE		3	/* EE - Excellent-effort */
+#define	PRIO_8021D_CL		4	/* CL - Controlled Load */
+#define	PRIO_8021D_VI		5	/* Vi - Video */
+#define	PRIO_8021D_VO		6	/* Vo - Voice */
+#define	PRIO_8021D_NC		7	/* NC - Network Control */
+#define	MAXPRIO			7	/* 0-7 */
+#define NUMPRIO			(MAXPRIO + 1)
+
+#define ALLPRIO		-1	/* All prioirty */
+
+/* Converts prio to precedence since the numerical value of
+ * PRIO_8021D_BE and PRIO_8021D_NONE are swapped.
+ */
+#define PRIO2PREC(prio) \
+	(((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
+
+#endif /* _802_1_D__ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/802.3.h b/drivers/staging/edison-bcm43340/common/include/proto/802.3.h
new file mode 100644
index 0000000..841e6da
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/802.3.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to 802.3
+ *
+ * $Id: 802.3.h 417943 2013-08-13 07:54:04Z $
+ */
+
+#ifndef _802_3_h_
+#define _802_3_h_
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define SNAP_HDR_LEN	6	/* 802.3 SNAP header length */
+#define DOT3_OUI_LEN	3	/* 802.3 oui length */
+
+BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];	/* dest mac */
+	uint8	ether_shost[ETHER_ADDR_LEN];	/* src mac */
+	uint16	length;				/* frame length incl header */
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[DOT3_OUI_LEN];		/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	type;				/* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* #ifndef _802_3_h_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/bcmeth.h b/drivers/staging/edison-bcm43340/common/include/proto/bcmeth.h
new file mode 100644
index 0000000..50d9bdc
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/bcmeth.h
@@ -0,0 +1,112 @@
+/*
+ * Broadcom Ethernettype  protocol definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmeth.h 382882 2013-02-04 23:24:31Z $
+ */
+
+/*
+ * Broadcom Ethernet protocol defines
+ */
+
+#ifndef _BCMETH_H_
+#define _BCMETH_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* ETHER_TYPE_BRCM is defined in ethernet.h */
+
+/*
+ * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field
+ * in one of two formats: (only subtypes 32768-65535 are in use now)
+ *
+ * subtypes 0-32767:
+ *     8 bit subtype (0-127)
+ *     8 bit length in bytes (0-255)
+ *
+ * subtypes 32768-65535:
+ *     16 bit big-endian subtype
+ *     16 bit big-endian length in bytes (0-65535)
+ *
+ * length is the number of additional bytes beyond the 4 or 6 byte header
+ *
+ * Reserved values:
+ * 0 reserved
+ * 5-15 reserved for iLine protocol assignments
+ * 17-126 reserved, assignable
+ * 127 reserved
+ * 32768 reserved
+ * 32769-65534 reserved, assignable
+ * 65535 reserved
+ */
+
+/*
+ * While adding the subtypes and their specific processing code make sure
+ * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition
+ */
+
+#define	BCMILCP_SUBTYPE_RATE		1
+#define	BCMILCP_SUBTYPE_LINK		2
+#define	BCMILCP_SUBTYPE_CSA		3
+#define	BCMILCP_SUBTYPE_LARQ		4
+#define BCMILCP_SUBTYPE_VENDOR		5
+#define	BCMILCP_SUBTYPE_FLH		17
+
+#define BCMILCP_SUBTYPE_VENDOR_LONG	32769
+#define BCMILCP_SUBTYPE_CERT		32770
+#define BCMILCP_SUBTYPE_SES		32771
+
+
+#define BCMILCP_BCM_SUBTYPE_RESERVED		0
+#define BCMILCP_BCM_SUBTYPE_EVENT		1
+#define BCMILCP_BCM_SUBTYPE_SES			2
+/*
+ * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded
+ * within BCMILCP_BCM_SUBTYPE_EVENT type messages
+ */
+/* #define BCMILCP_BCM_SUBTYPE_EAPOL		3 */
+#define BCMILCP_BCM_SUBTYPE_DPT			4
+
+#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH	8
+#define BCMILCP_BCM_SUBTYPEHDR_VERSION		0
+
+/* These fields are stored in network order */
+typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
+{
+	uint16	subtype;	/* Vendor specific..32769 */
+	uint16	length;
+	uint8	version;	/* Version is 0 */
+	uint8	oui[3];		/* Broadcom OUI */
+	/* user specific Data */
+	uint16	usr_subtype;
+} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/*  _BCMETH_H_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/bcmevent.h b/drivers/staging/edison-bcm43340/common/include/proto/bcmevent.h
new file mode 100644
index 0000000..13104d6
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/bcmevent.h
@@ -0,0 +1,420 @@
+/*
+ * Broadcom Event  protocol definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Dependencies: proto/bcmeth.h
+ *
+ * $Id: bcmevent.h 472452 2014-04-24 01:04:46Z $
+ *
+ */
+
+/*
+ * Broadcom Ethernet Events protocol defines
+ *
+ */
+
+#ifndef _BCMEVENT_H_
+#define _BCMEVENT_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+/* #include <ethernet.h> -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */
+#include <proto/bcmeth.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define BCM_EVENT_MSG_VERSION		2	/* wl_event_msg_t struct version */
+#define BCM_MSG_IFNAME_MAX		16	/* max length of interface name */
+
+/* flags */
+#define WLC_EVENT_MSG_LINK		0x01	/* link is up */
+#define WLC_EVENT_MSG_FLUSHTXQ		0x02	/* flush tx queue on MIC error */
+#define WLC_EVENT_MSG_GROUP		0x04	/* group MIC error */
+#define WLC_EVENT_MSG_UNKBSS		0x08	/* unknown source bsscfg */
+#define WLC_EVENT_MSG_UNKIF		0x10	/* unknown source OS i/f */
+
+/* these fields are stored in network order */
+
+/* version 1 */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			/* see flags below */
+	uint32	event_type;		/* Message (see below) */
+	uint32	status;			/* Status code (see below) */
+	uint32	reason;			/* Reason code (if applicable) */
+	uint32	auth_type;		/* WLC_E_AUTH */
+	uint32	datalen;		/* data buf */
+	struct ether_addr	addr;	/* Station address (if applicable) */
+	char	ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
+
+/* the current version */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint16	version;
+	uint16	flags;			/* see flags below */
+	uint32	event_type;		/* Message (see below) */
+	uint32	status;			/* Status code (see below) */
+	uint32	reason;			/* Reason code (if applicable) */
+	uint32	auth_type;		/* WLC_E_AUTH */
+	uint32	datalen;		/* data buf */
+	struct ether_addr	addr;	/* Station address (if applicable) */
+	char	ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+	uint8	ifidx;			/* destination OS i/f index */
+	uint8	bsscfgidx;		/* source bsscfg index */
+} BWL_POST_PACKED_STRUCT wl_event_msg_t;
+
+/* used by driver msgs */
+typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
+	struct ether_header eth;
+	bcmeth_hdr_t		bcm_hdr;
+	wl_event_msg_t		event;
+	/* data portion follows */
+} BWL_POST_PACKED_STRUCT bcm_event_t;
+
+#define BCM_MSG_LEN	(sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
+
+/* Event messages */
+#define WLC_E_SET_SSID		0	/* indicates status of set SSID */
+#define WLC_E_JOIN		1	/* differentiates join IBSS from found (WLC_E_START) IBSS */
+#define WLC_E_START		2	/* STA founded an IBSS or AP started a BSS */
+#define WLC_E_AUTH		3	/* 802.11 AUTH request */
+#define WLC_E_AUTH_IND		4	/* 802.11 AUTH indication */
+#define WLC_E_DEAUTH		5	/* 802.11 DEAUTH request */
+#define WLC_E_DEAUTH_IND	6	/* 802.11 DEAUTH indication */
+#define WLC_E_ASSOC		7	/* 802.11 ASSOC request */
+#define WLC_E_ASSOC_IND		8	/* 802.11 ASSOC indication */
+#define WLC_E_REASSOC		9	/* 802.11 REASSOC request */
+#define WLC_E_REASSOC_IND	10	/* 802.11 REASSOC indication */
+#define WLC_E_DISASSOC		11	/* 802.11 DISASSOC request */
+#define WLC_E_DISASSOC_IND	12	/* 802.11 DISASSOC indication */
+#define WLC_E_QUIET_START	13	/* 802.11h Quiet period started */
+#define WLC_E_QUIET_END		14	/* 802.11h Quiet period ended */
+#define WLC_E_BEACON_RX		15	/* BEACONS received/lost indication */
+#define WLC_E_LINK		16	/* generic link indication */
+#define WLC_E_MIC_ERROR		17	/* TKIP MIC error occurred */
+#define WLC_E_NDIS_LINK		18	/* NDIS style link indication */
+#define WLC_E_ROAM		19	/* roam attempt occurred: indicate status & reason */
+#define WLC_E_TXFAIL		20	/* change in dot11FailedCount (txfail) */
+#define WLC_E_PMKID_CACHE	21	/* WPA2 pmkid cache indication */
+#define WLC_E_RETROGRADE_TSF	22	/* current AP's TSF value went backward */
+#define WLC_E_PRUNE		23	/* AP was pruned from join list for reason */
+#define WLC_E_AUTOAUTH		24	/* report AutoAuth table entry match for join attempt */
+#define WLC_E_EAPOL_MSG		25	/* Event encapsulating an EAPOL message */
+#define WLC_E_SCAN_COMPLETE	26	/* Scan results are ready or scan was aborted */
+#define WLC_E_ADDTS_IND		27	/* indicate to host addts fail/success */
+#define WLC_E_DELTS_IND		28	/* indicate to host delts fail/success */
+#define WLC_E_BCNSENT_IND	29	/* indicate to host of beacon transmit */
+#define WLC_E_BCNRX_MSG		30	/* Send the received beacon up to the host */
+#define WLC_E_BCNLOST_MSG	31	/* indicate to host loss of beacon */
+#define WLC_E_ROAM_PREP		32	/* before attempting to roam */
+#define WLC_E_PFN_NET_FOUND	33	/* PFN network found event */
+#define WLC_E_PFN_NET_LOST	34	/* PFN network lost event */
+#define WLC_E_RESET_COMPLETE	35
+#define WLC_E_JOIN_START	36
+#define WLC_E_ROAM_START	37
+#define WLC_E_ASSOC_START	38
+#define WLC_E_IBSS_ASSOC	39
+#define WLC_E_RADIO		40
+#define WLC_E_PSM_WATCHDOG	41	/* PSM microcode watchdog fired */
+#define WLC_E_PROBREQ_MSG       44      /* probe request received */
+#define WLC_E_SCAN_CONFIRM_IND  45
+#define WLC_E_PSK_SUP		46	/* WPA Handshake fail */
+#define WLC_E_COUNTRY_CODE_CHANGED	47
+#define	WLC_E_EXCEEDED_MEDIUM_TIME	48	/* WMMAC excedded medium time */
+#define WLC_E_ICV_ERROR		49	/* WEP ICV error occurred */
+#define WLC_E_UNICAST_DECODE_ERROR	50	/* Unsupported unicast encrypted frame */
+#define WLC_E_MULTICAST_DECODE_ERROR	51	/* Unsupported multicast encrypted frame */
+#define WLC_E_TRACE		52
+#define WLC_E_IF		54	/* I/F change (for dongle host notification) */
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE	55	/* listen state expires */
+#define WLC_E_RSSI		56	/* indicate RSSI change based on configured levels */
+/* PFN best network batching event, conflict/share with WLC_E_PFN_SCAN_COMPLETE */
+#define WLC_E_PFN_BEST_BATCHING     57
+#define WLC_E_PFN_SCAN_COMPLETE	57	/* PFN completed scan of network list */
+#define WLC_E_EXTLOG_MSG	58
+#define WLC_E_ACTION_FRAME      59	/* Action frame Rx */
+#define WLC_E_ACTION_FRAME_COMPLETE	60	/* Action frame Tx complete */
+#define WLC_E_PRE_ASSOC_IND	61	/* assoc request received */
+#define WLC_E_PRE_REASSOC_IND	62	/* re-assoc request received */
+#define WLC_E_CHANNEL_ADOPTED	63
+#define WLC_E_AP_STARTED	64	/* AP started */
+#define WLC_E_DFS_AP_STOP	65	/* AP stopped due to DFS */
+#define WLC_E_DFS_AP_RESUME	66	/* AP resumed due to DFS */
+#define WLC_E_WAI_STA_EVENT	67	/* WAI stations event */
+#define WLC_E_WAI_MSG 		68	/* event encapsulating an WAI message */
+#define WLC_E_ESCAN_RESULT 	69	/* escan result event */
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 	70	/* action frame off channel complete */
+#define WLC_E_PROBRESP_MSG	71	/* probe response received */
+#define WLC_E_P2P_PROBREQ_MSG	72	/* P2P Probe request received */
+#define WLC_E_DCS_REQUEST	73
+#define WLC_E_FIFO_CREDIT_MAP	74	/* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */
+#define WLC_E_ACTION_FRAME_RX	75	/* Received action frame event WITH
+					 * wl_event_rx_frame_data_t header
+					 */
+#define WLC_E_WAKE_EVENT	76	/* Wake Event timer fired, used for wake WLAN test mode */
+#define WLC_E_RM_COMPLETE	77	/* Radio measurement complete */
+#define WLC_E_HTSFSYNC		78	/* Synchronize TSF with the host */
+#define WLC_E_OVERLAY_REQ	79	/* request an overlay IOCTL/iovar from the host */
+#define WLC_E_CSA_COMPLETE_IND		80	/* 802.11 CHANNEL SWITCH ACTION completed */
+#define WLC_E_EXCESS_PM_WAKE_EVENT	81	/* excess PM Wake Event to inform host  */
+#define WLC_E_PFN_SCAN_NONE		82	/* no PFN networks around */
+/* PFN BSSID network found event, conflict/share with  WLC_E_PFN_SCAN_NONE */
+#define WLC_E_PFN_BSSID_NET_FOUND	82
+#define WLC_E_PFN_SCAN_ALLGONE		83	/* last found PFN network gets lost */
+/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */
+#define WLC_E_PFN_BSSID_NET_LOST	83
+#define WLC_E_GTK_PLUMBED		84
+#define WLC_E_ASSOC_IND_NDIS		85	/* 802.11 ASSOC indication for NDIS only */
+#define WLC_E_REASSOC_IND_NDIS		86	/* 802.11 REASSOC indication for NDIS only */
+#define WLC_E_ASSOC_REQ_IE		87
+#define WLC_E_ASSOC_RESP_IE		88
+#define WLC_E_ASSOC_RECREATED		89	/* association recreated on resume */
+#define WLC_E_ACTION_FRAME_RX_NDIS	90	/* rx action frame event for NDIS only */
+#define WLC_E_AUTH_REQ			91	/* authentication request received */
+#define WLC_E_TDLS_PEER_EVENT		92	/* discovered peer, connected/disconnected peer */
+#define WLC_E_SPEEDY_RECREATE_FAIL	93	/* fast assoc recreation failed */
+#define WLC_E_NATIVE			94	/* port-specific event and payload (e.g. NDIS) */
+#define WLC_E_PKTDELAY_IND		95	/* event for tx pkt delay suddently jump */
+#define WLC_E_PSTA_PRIMARY_INTF_IND	99	/* psta primary interface indication */
+#define WLC_E_EVENT_100			100
+#define WLC_E_BEACON_FRAME_RX		101
+#define WLC_E_SERVICE_FOUND		102	/* desired service found */
+#define WLC_E_GAS_FRAGMENT_RX		103	/* GAS fragment received */
+#define WLC_E_GAS_COMPLETE		104	/* GAS sessions all complete */
+#define WLC_E_P2PO_ADD_DEVICE		105	/* New device found by p2p offload */
+#define WLC_E_P2PO_DEL_DEVICE		106	/* device has been removed by p2p offload */
+#define WLC_E_WNM_STA_SLEEP		107	/* WNM event to notify STA enter sleep mode */
+#define WLC_E_TXFAIL_THRESH		108	/* Indication of MAC tx failures (exhaustion of
+						 * 802.11 retries) exceeding threshold(s)
+						 */
+#define WLC_E_PROXD			109	/* Proximity Detection event */
+#define WLC_E_IBSS_COALESCE		110	/* IBSS Coalescing */
+#define WLC_E_AIBSS_TXFAIL		110	/* TXFAIL event for AIBSS, re using event 110 */
+#define WLC_E_CSA_START_IND		121
+#define WLC_E_CSA_DONE_IND		122
+#define WLC_E_CSA_FAILURE_IND		123
+#define WLC_E_CCA_CHAN_QUAL		124	/* CCA based channel quality report */
+#define WLC_E_BSSID		125	/* to report change in BSSID while roaming */
+#define WLC_E_TX_STAT_ERROR		126	/* tx error indication */
+#define WLC_E_BCMC_CREDIT_SUPPORT	127	/* credit check for BCMC supported */
+#define WLC_E_LAST			128	/* highest val + 1 for range checking */
+
+#if (WLC_E_LAST > 128)
+#error "WLC_E_LAST: Invalid value for last event; must be <= 128."
+#endif /* WLC_E_LAST */
+
+
+/* Table of event name strings for UIs and debugging dumps */
+typedef struct {
+	uint event;
+	const char *name;
+} bcmevent_name_t;
+
+extern const bcmevent_name_t	bcmevent_names[];
+extern const int		bcmevent_names_size;
+
+/* Event status codes */
+#define WLC_E_STATUS_SUCCESS		0	/* operation was successful */
+#define WLC_E_STATUS_FAIL		1	/* operation failed */
+#define WLC_E_STATUS_TIMEOUT		2	/* operation timed out */
+#define WLC_E_STATUS_NO_NETWORKS	3	/* failed due to no matching network found */
+#define WLC_E_STATUS_ABORT		4	/* operation was aborted */
+#define WLC_E_STATUS_NO_ACK		5	/* protocol failure: packet not ack'd */
+#define WLC_E_STATUS_UNSOLICITED	6	/* AUTH or ASSOC packet was unsolicited */
+#define WLC_E_STATUS_ATTEMPT		7	/* attempt to assoc to an auto auth configuration */
+#define WLC_E_STATUS_PARTIAL		8	/* scan results are incomplete */
+#define WLC_E_STATUS_NEWSCAN		9	/* scan aborted by another scan */
+#define WLC_E_STATUS_NEWASSOC		10	/* scan aborted due to assoc in progress */
+#define WLC_E_STATUS_11HQUIET		11	/* 802.11h quiet period started */
+#define WLC_E_STATUS_SUPPRESS		12	/* user disabled scanning (WLC_SET_SCANSUPPRESS) */
+#define WLC_E_STATUS_NOCHANS		13	/* no allowable channels to scan */
+#define WLC_E_STATUS_CS_ABORT		15	/* abort channel select */
+#define WLC_E_STATUS_ERROR		16	/* request failed due to error */
+
+/* roam reason codes */
+#define WLC_E_REASON_INITIAL_ASSOC	0	/* initial assoc */
+#define WLC_E_REASON_LOW_RSSI		1	/* roamed due to low RSSI */
+#define WLC_E_REASON_DEAUTH		2	/* roamed due to DEAUTH indication */
+#define WLC_E_REASON_DISASSOC		3	/* roamed due to DISASSOC indication */
+#define WLC_E_REASON_BCNS_LOST		4	/* roamed due to lost beacons */
+
+/* Roam codes used primarily by CCX */
+#define WLC_E_REASON_FAST_ROAM_FAILED	5	/* roamed due to fast roam failure */
+#define WLC_E_REASON_DIRECTED_ROAM	6	/* roamed due to request by AP */
+#define WLC_E_REASON_TSPEC_REJECTED	7	/* roamed due to TSPEC rejection */
+#define WLC_E_REASON_BETTER_AP		8	/* roamed due to finding better AP */
+#define WLC_E_REASON_MINTXRATE		9	/* roamed because at mintxrate for too long */
+#define WLC_E_REASON_TXFAIL		10	/* We can hear AP, but AP can't hear us */
+#define WLC_E_REASON_REQUESTED_ROAM	11	/* roamed due to BSS Mgmt Transition REQ by AP */
+
+/* prune reason codes */
+#define WLC_E_PRUNE_ENCR_MISMATCH	1	/* encryption mismatch */
+#define WLC_E_PRUNE_BCAST_BSSID		2	/* AP uses a broadcast BSSID */
+#define WLC_E_PRUNE_MAC_DENY		3	/* STA's MAC addr is in AP's MAC deny list */
+#define WLC_E_PRUNE_MAC_NA		4	/* STA's MAC addr is not in AP's MAC allow list */
+#define WLC_E_PRUNE_REG_PASSV		5	/* AP not allowed due to regulatory restriction */
+#define WLC_E_PRUNE_SPCT_MGMT		6	/* AP does not support STA locale spectrum mgmt */
+#define WLC_E_PRUNE_RADAR		7	/* AP is on a radar channel of STA locale */
+#define WLC_E_RSN_MISMATCH		8	/* STA does not support AP's RSN */
+#define WLC_E_PRUNE_NO_COMMON_RATES	9	/* No rates in common with AP */
+#define WLC_E_PRUNE_BASIC_RATES		10	/* STA does not support all basic rates of BSS */
+#define WLC_E_PRUNE_CIPHER_NA		12	/* BSS's cipher not supported */
+#define WLC_E_PRUNE_KNOWN_STA		13	/* AP is already known to us as a STA */
+#define WLC_E_PRUNE_WDS_PEER		15	/* AP is already known to us as a WDS peer */
+#define WLC_E_PRUNE_QBSS_LOAD		16	/* QBSS LOAD - AAC is too low */
+#define WLC_E_PRUNE_HOME_AP		17	/* prune home AP */
+
+/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
+#define WLC_E_SUP_OTHER			0	/* Other reason */
+#define WLC_E_SUP_DECRYPT_KEY_DATA	1	/* Decryption of key data failed */
+#define WLC_E_SUP_BAD_UCAST_WEP128	2	/* Illegal use of ucast WEP128 */
+#define WLC_E_SUP_BAD_UCAST_WEP40	3	/* Illegal use of ucast WEP40 */
+#define WLC_E_SUP_UNSUP_KEY_LEN		4	/* Unsupported key length */
+#define WLC_E_SUP_PW_KEY_CIPHER		5	/* Unicast cipher mismatch in pairwise key */
+#define WLC_E_SUP_MSG3_TOO_MANY_IE	6	/* WPA IE contains > 1 RSN IE in key msg 3 */
+#define WLC_E_SUP_MSG3_IE_MISMATCH	7	/* WPA IE mismatch in key message 3 */
+#define WLC_E_SUP_NO_INSTALL_FLAG	8	/* INSTALL flag unset in 4-way msg */
+#define WLC_E_SUP_MSG3_NO_GTK		9	/* encapsulated GTK missing from msg 3 */
+#define WLC_E_SUP_GRP_KEY_CIPHER	10	/* Multicast cipher mismatch in group key */
+#define WLC_E_SUP_GRP_MSG1_NO_GTK	11	/* encapsulated GTK missing from group msg 1 */
+#define WLC_E_SUP_GTK_DECRYPT_FAIL	12	/* GTK decrypt failure */
+#define WLC_E_SUP_SEND_FAIL		13	/* message send failure */
+#define WLC_E_SUP_DEAUTH		14	/* received FC_DEAUTH */
+#define WLC_E_SUP_WPA_PSK_TMO		15	/* WPA PSK 4-way handshake timeout */
+
+/* Event data for events that include frames received over the air */
+/* WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
+	uint16	version;
+	uint16	channel;	/* Matches chanspec_t format from bcmwifi_channels.h */
+	int32	rssi;
+	uint32	mactime;
+	uint32	rate;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t;
+
+#define BCM_RX_FRAME_DATA_VERSION 1
+
+/* WLC_E_IF event data */
+typedef struct wl_event_data_if {
+	uint8 ifidx;		/* RTE virtual device index (for dongle) */
+	uint8 opcode;		/* see I/F opcode */
+	uint8 reserved;		/* bit mask (WLC_E_IF_FLAGS_XXX ) */
+	uint8 bssidx;		/* bsscfg index */
+	uint8 role;		/* see I/F role */
+} wl_event_data_if_t;
+
+/* opcode in WLC_E_IF event */
+#define WLC_E_IF_ADD		1	/* bsscfg add */
+#define WLC_E_IF_DEL		2	/* bsscfg delete */
+#define WLC_E_IF_CHANGE		3	/* bsscfg role change */
+
+/* I/F role code in WLC_E_IF event */
+#define WLC_E_IF_ROLE_STA		0	/* Infra STA */
+#define WLC_E_IF_ROLE_AP		1	/* Access Point */
+#define WLC_E_IF_ROLE_WDS		2	/* WDS link */
+#define WLC_E_IF_ROLE_P2P_GO		3	/* P2P Group Owner */
+#define WLC_E_IF_ROLE_P2P_CLIENT	4	/* P2P Client */
+
+/* WLC_E_RSSI event data */
+typedef struct wl_event_data_rssi {
+	int32 rssi;
+	int32 snr;
+	int32 noise;
+} wl_event_data_rssi_t;
+
+/* WLC_E_IF flag */
+#define WLC_E_IF_FLAGS_BSSCFG_NOIF	0x1	/* no host I/F creation needed */
+
+/* Reason codes for LINK */
+#define WLC_E_LINK_BCN_LOSS	1	/* Link down because of beacon loss */
+#define WLC_E_LINK_DISASSOC	2	/* Link down because of disassoc */
+#define WLC_E_LINK_ASSOC_REC	3	/* Link down because assoc recreate failed */
+#define WLC_E_LINK_BSSCFG_DIS	4	/* Link down due to bsscfg down */
+
+/* reason codes for WLC_E_OVERLAY_REQ event */
+#define WLC_E_OVL_DOWNLOAD		0	/* overlay download request */
+#define WLC_E_OVL_UPDATE_IND	1	/* device indication of host overlay update */
+
+/* reason codes for WLC_E_TDLS_PEER_EVENT event */
+#define WLC_E_TDLS_PEER_DISCOVERED		0	/* peer is ready to establish TDLS */
+#define WLC_E_TDLS_PEER_CONNECTED		1
+#define WLC_E_TDLS_PEER_DISCONNECTED	2
+
+
+/* GAS event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas {
+	uint16	channel;		/* channel of GAS protocol */
+	uint8	dialog_token;	/* GAS dialog token */
+	uint8	fragment_id;	/* fragment id */
+	uint16	status_code;	/* status code on GAS completion */
+	uint16 	data_len;		/* length of data to follow */
+	uint8	data[1];		/* variable length specified by data_len */
+} BWL_POST_PACKED_STRUCT wl_event_gas_t;
+
+/* service discovery TLV */
+typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv {
+	uint16	length;			/* length of response_data */
+	uint8	protocol;		/* service protocol type */
+	uint8	transaction_id;		/* service transaction id */
+	uint8	status_code;		/* status code */
+	uint8	data[1];		/* response data */
+} BWL_POST_PACKED_STRUCT wl_sd_tlv_t;
+
+/* service discovery event data */
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd {
+	uint16	channel;		/* channel */
+	uint8	count;			/* number of tlvs */
+	wl_sd_tlv_t	tlv[1];		/* service discovery TLV */
+} BWL_POST_PACKED_STRUCT wl_event_sd_t;
+
+/* Reason codes for WLC_E_PROXD */
+#define WLC_E_PROXD_FOUND	1	/* Found a proximity device */
+#define WLC_E_PROXD_GONE	2	/* Lost a proximity device */
+
+
+/* Video Traffic Interference Monitor Event */
+#define INTFER_EVENT_VERSION		1
+#define INTFER_STREAM_TYPE_NONTCP	1
+#define INTFER_STREAM_TYPE_TCP		2
+#define WLINTFER_STATS_NSMPLS		4
+typedef struct wl_intfer_event {
+	uint16 version;			/* version */
+	uint16 status;			/* status */
+	uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */
+} wl_intfer_event_t;
+
+/* WLC_E_PSTA_PRIMARY_INTF_IND event data */
+typedef struct wl_psta_primary_intf_event {
+	struct ether_addr prim_ea;	/* primary intf ether addr */
+} wl_psta_primary_intf_event_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _BCMEVENT_H_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/bcmip.h b/drivers/staging/edison-bcm43340/common/include/proto/bcmip.h
new file mode 100644
index 0000000..7549235
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/bcmip.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h 457888 2014-02-25 03:34:39Z $
+ */
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* IPV4 and IPV6 common */
+#define IP_VER_OFFSET		0x0	/* offset to version field */
+#define IP_VER_MASK		0xf0	/* version mask */
+#define IP_VER_SHIFT		4	/* version shift */
+#define IP_VER_4		4	/* version number for IPV4 */
+#define IP_VER_6		6	/* version number for IPV6 */
+
+#define IP_VER(ip_body) \
+	((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP		0x1	/* ICMP protocol */
+#define IP_PROT_IGMP		0x2	/* IGMP protocol */
+#define IP_PROT_TCP		0x6	/* TCP protocol */
+#define IP_PROT_UDP		0x11	/* UDP protocol type */
+#define IP_PROT_ICMP6		0x3a	/* ICMPv6 protocol type */
+
+/* IPV4 field offsets */
+#define IPV4_VER_HL_OFFSET      0       /* version and ihl byte offset */
+#define IPV4_TOS_OFFSET         1       /* type of service offset */
+#define IPV4_PKTLEN_OFFSET      2       /* packet length offset */
+#define IPV4_PKTFLAG_OFFSET     6       /* more-frag,dont-frag flag offset */
+#define IPV4_PROT_OFFSET        9       /* protocol type offset */
+#define IPV4_CHKSUM_OFFSET      10      /* IP header checksum offset */
+#define IPV4_SRC_IP_OFFSET      12      /* src IP addr offset */
+#define IPV4_DEST_IP_OFFSET     16      /* dest IP addr offset */
+#define IPV4_OPTIONS_OFFSET     20      /* IP options offset */
+#define IPV4_MIN_HEADER_LEN     20      /* Minimum size for an IP header (no options) */
+
+/* IPV4 field decodes */
+#define IPV4_VER_MASK		0xf0	/* IPV4 version mask */
+#define IPV4_VER_SHIFT		4	/* IPV4 version shift */
+
+#define IPV4_HLEN_MASK		0x0f	/* IPV4 header length mask */
+#define IPV4_HLEN(ipv4_body)	(4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN		4	/* IPV4 address length */
+
+#define IPV4_ADDR_NULL(a)	((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+				  ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_ADDR_BCAST(a)	((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
+				  ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
+
+#define	IPV4_TOS_DSCP_MASK	0xfc	/* DiffServ codepoint mask */
+#define	IPV4_TOS_DSCP_SHIFT	2	/* DiffServ codepoint shift */
+
+#define	IPV4_TOS(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define	IPV4_TOS_PREC_MASK	0xe0	/* Historical precedence mask */
+#define	IPV4_TOS_PREC_SHIFT	5	/* Historical precedence shift */
+
+#define IPV4_TOS_LOWDELAY	0x10	/* Lowest delay requested */
+#define IPV4_TOS_THROUGHPUT	0x8	/* Best throughput requested */
+#define IPV4_TOS_RELIABILITY	0x4	/* Most reliable delivery requested */
+
+#define IPV4_PROT(ipv4_body)	(((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_FRAG_RESV		0x8000	/* Reserved */
+#define IPV4_FRAG_DONT		0x4000	/* Don't fragment */
+#define IPV4_FRAG_MORE		0x2000	/* More fragments */
+#define IPV4_FRAG_OFFSET_MASK	0x1fff	/* Fragment offset */
+
+#define IPV4_ADDR_STR_LEN	16	/* Max IP address length in string format */
+
+/* IPV4 packet formats */
+BWL_PRE_PACKED_STRUCT struct ipv4_addr {
+	uint8	addr[IPV4_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
+	uint8	version_ihl;		/* Version and Internet Header Length */
+	uint8	tos;			/* Type Of Service */
+	uint16	tot_len;		/* Number of bytes in packet (max 65535) */
+	uint16	id;
+	uint16	frag;			/* 3 flag bits and fragment offset */
+	uint8	ttl;			/* Time To Live */
+	uint8	prot;			/* Protocol */
+	uint16	hdr_chksum;		/* IP header checksum */
+	uint8	src_ip[IPV4_ADDR_LEN];	/* Source IP Address */
+	uint8	dst_ip[IPV4_ADDR_LEN];	/* Destination IP Address */
+} BWL_POST_PACKED_STRUCT;
+
+/* IPV6 field offsets */
+#define IPV6_PAYLOAD_LEN_OFFSET	4	/* payload length offset */
+#define IPV6_NEXT_HDR_OFFSET	6	/* next header/protocol offset */
+#define IPV6_HOP_LIMIT_OFFSET	7	/* hop limit offset */
+#define IPV6_SRC_IP_OFFSET	8	/* src IP addr offset */
+#define IPV6_DEST_IP_OFFSET	24	/* dst IP addr offset */
+
+/* IPV6 field decodes */
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+	 ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+	(((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+	 (((uint8 *)(ipv6_body))[2] << 8) | \
+	 (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+	((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+	 ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+	(((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body)	IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN		16	/* IPV6 address length */
+
+/* IPV4 TOS or IPV6 Traffic Classifier or 0 */
+#define IP_TOS46(ip_body) \
+	(IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+	 IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT);
+
+/* IPV6 extension headers (options) */
+#define IPV6_EXTHDR_HOP		0
+#define IPV6_EXTHDR_ROUTING	43
+#define IPV6_EXTHDR_FRAGMENT	44
+#define IPV6_EXTHDR_AUTH	51
+#define IPV6_EXTHDR_NONE	59
+#define IPV6_EXTHDR_DEST	60
+
+#define IPV6_EXTHDR(prot)	(((prot) == IPV6_EXTHDR_HOP) || \
+	                         ((prot) == IPV6_EXTHDR_ROUTING) || \
+	                         ((prot) == IPV6_EXTHDR_FRAGMENT) || \
+	                         ((prot) == IPV6_EXTHDR_AUTH) || \
+	                         ((prot) == IPV6_EXTHDR_NONE) || \
+	                         ((prot) == IPV6_EXTHDR_DEST))
+
+#define IPV6_MIN_HLEN 		40
+
+#define IPV6_EXTHDR_LEN(eh)	((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3)
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr {
+	uint8	nexthdr;
+	uint8	hdrlen;
+} BWL_POST_PACKED_STRUCT;
+
+BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag {
+	uint8	nexthdr;
+	uint8	rsvd;
+	uint16	frag_off;
+	uint32	ident;
+} BWL_POST_PACKED_STRUCT;
+
+static INLINE int32
+ipv6_exthdr_len(uint8 *h, uint8 *proto)
+{
+	uint16 len = 0, hlen;
+	struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h;
+
+	while (IPV6_EXTHDR(eh->nexthdr)) {
+		if (eh->nexthdr == IPV6_EXTHDR_NONE)
+			return -1;
+		else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT)
+			hlen = 8;
+		else if (eh->nexthdr == IPV6_EXTHDR_AUTH)
+			hlen = (eh->hdrlen + 2) << 2;
+		else
+			hlen = IPV6_EXTHDR_LEN(eh);
+
+		len += hlen;
+		eh = (struct ipv6_exthdr *)(h + len);
+	}
+
+	*proto = eh->nexthdr;
+	return len;
+}
+
+#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000)
+
+#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \
+{ \
+	ether[0] = 0x01; \
+	ether[1] = 0x00; \
+	ether[2] = 0x5E; \
+	ether[3] = (ipv4 & 0x7f0000) >> 16; \
+	ether[4] = (ipv4 & 0xff00) >> 8; \
+	ether[5] = (ipv4 & 0xff); \
+}
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define IPV4_ADDR_STR "%d.%d.%d.%d"
+#define IPV4_ADDR_TO_STR(addr)	((uint32)addr & 0xff000000) >> 24, \
+								((uint32)addr & 0x00ff0000) >> 16, \
+								((uint32)addr & 0x0000ff00) >> 8, \
+								((uint32)addr & 0x000000ff)
+
+#endif	/* _bcmip_h_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/bcmtcp.h b/drivers/staging/edison-bcm43340/common/include/proto/bcmtcp.h
new file mode 100644
index 0000000..6754f36
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/bcmtcp.h
@@ -0,0 +1,90 @@
+/*
+ * Fundamental constants relating to TCP Protocol
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmtcp.h 457888 2014-02-25 03:34:39Z $
+ */
+
+#ifndef _bcmtcp_h_
+#define _bcmtcp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+#define TCP_SRC_PORT_OFFSET	0	/* TCP source port offset */
+#define TCP_DEST_PORT_OFFSET	2	/* TCP dest port offset */
+#define TCP_SEQ_NUM_OFFSET	4	/* TCP sequence number offset */
+#define TCP_ACK_NUM_OFFSET	8	/* TCP acknowledgement number offset */
+#define TCP_HLEN_OFFSET		12	/* HLEN and reserved bits offset */
+#define TCP_FLAGS_OFFSET	13	/* FLAGS and reserved bits offset */
+#define TCP_CHKSUM_OFFSET	16	/* TCP body checksum offset */
+
+#define TCP_PORT_LEN		2	/* TCP port field length */
+
+/* 8bit TCP flag field */
+#define TCP_FLAG_URG            0x20
+#define TCP_FLAG_ACK            0x10
+#define TCP_FLAG_PSH            0x08
+#define TCP_FLAG_RST            0x04
+#define TCP_FLAG_SYN            0x02
+#define TCP_FLAG_FIN            0x01
+
+#define TCP_HLEN_MASK           0xf000
+#define TCP_HLEN_SHIFT          12
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr
+{
+	uint16	src_port;	/* Source Port Address */
+	uint16	dst_port;	/* Destination Port Address */
+	uint32	seq_num;	/* TCP Sequence Number */
+	uint32	ack_num;	/* TCP Sequence Number */
+	uint16	hdrlen_rsvd_flags;	/* Header length, reserved bits and flags */
+	uint16	tcpwin;		/* TCP window */
+	uint16	chksum;		/* Segment checksum with pseudoheader */
+	uint16	urg_ptr;	/* Points to seq-num of byte following urg data */
+} BWL_POST_PACKED_STRUCT;
+
+#define TCP_MIN_HEADER_LEN 20
+
+#define TCP_HDRLEN_MASK 0xf0
+#define TCP_HDRLEN_SHIFT 4
+#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT)
+
+#define TCP_FLAGS_MASK  0x1f
+#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* To address round up by 32bit. */
+#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31))		/* a >= b */
+#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31))		/* a =< b */
+#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b)		/* a > b */
+#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b)		/* a < b */
+
+#endif	/* #ifndef _bcmtcp_h_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/bt_amp_hci.h b/drivers/staging/edison-bcm43340/common/include/proto/bt_amp_hci.h
new file mode 100644
index 0000000..fc05ab1
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/bt_amp_hci.h
@@ -0,0 +1,441 @@
+/*
+ * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface)
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bt_amp_hci.h 382882 2013-02-04 23:24:31Z $
+*/
+
+#ifndef _bt_amp_hci_h
+#define _bt_amp_hci_h
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* AMP HCI CMD packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd {
+	uint16 opcode;
+	uint8 plen;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_cmd_t;
+
+#define HCI_CMD_PREAMBLE_SIZE		OFFSETOF(amp_hci_cmd_t, parms)
+#define HCI_CMD_DATA_SIZE		255
+
+/* AMP HCI CMD opcode layout */
+#define HCI_CMD_OPCODE(ogf, ocf)	((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF))
+#define HCI_CMD_OGF(opcode)		((uint8)(((opcode) >> 10) & 0x3F))
+#define HCI_CMD_OCF(opcode)		((opcode) & 0x03FF)
+
+/* AMP HCI command opcodes */
+#define HCI_Read_Failed_Contact_Counter		HCI_CMD_OPCODE(0x05, 0x0001)
+#define HCI_Reset_Failed_Contact_Counter	HCI_CMD_OPCODE(0x05, 0x0002)
+#define HCI_Read_Link_Quality			HCI_CMD_OPCODE(0x05, 0x0003)
+#define HCI_Read_Local_AMP_Info			HCI_CMD_OPCODE(0x05, 0x0009)
+#define HCI_Read_Local_AMP_ASSOC		HCI_CMD_OPCODE(0x05, 0x000A)
+#define HCI_Write_Remote_AMP_ASSOC		HCI_CMD_OPCODE(0x05, 0x000B)
+#define HCI_Create_Physical_Link		HCI_CMD_OPCODE(0x01, 0x0035)
+#define HCI_Accept_Physical_Link_Request	HCI_CMD_OPCODE(0x01, 0x0036)
+#define HCI_Disconnect_Physical_Link		HCI_CMD_OPCODE(0x01, 0x0037)
+#define HCI_Create_Logical_Link			HCI_CMD_OPCODE(0x01, 0x0038)
+#define HCI_Accept_Logical_Link			HCI_CMD_OPCODE(0x01, 0x0039)
+#define HCI_Disconnect_Logical_Link		HCI_CMD_OPCODE(0x01, 0x003A)
+#define HCI_Logical_Link_Cancel			HCI_CMD_OPCODE(0x01, 0x003B)
+#define HCI_Flow_Spec_Modify			HCI_CMD_OPCODE(0x01, 0x003C)
+#define HCI_Write_Flow_Control_Mode		HCI_CMD_OPCODE(0x01, 0x0067)
+#define HCI_Read_Best_Effort_Flush_Timeout	HCI_CMD_OPCODE(0x01, 0x0069)
+#define HCI_Write_Best_Effort_Flush_Timeout	HCI_CMD_OPCODE(0x01, 0x006A)
+#define HCI_Short_Range_Mode			HCI_CMD_OPCODE(0x01, 0x006B)
+#define HCI_Reset				HCI_CMD_OPCODE(0x03, 0x0003)
+#define HCI_Read_Connection_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0015)
+#define HCI_Write_Connection_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0016)
+#define HCI_Read_Link_Supervision_Timeout	HCI_CMD_OPCODE(0x03, 0x0036)
+#define HCI_Write_Link_Supervision_Timeout	HCI_CMD_OPCODE(0x03, 0x0037)
+#define HCI_Enhanced_Flush			HCI_CMD_OPCODE(0x03, 0x005F)
+#define HCI_Read_Logical_Link_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0061)
+#define HCI_Write_Logical_Link_Accept_Timeout	HCI_CMD_OPCODE(0x03, 0x0062)
+#define HCI_Set_Event_Mask_Page_2		HCI_CMD_OPCODE(0x03, 0x0063)
+#define HCI_Read_Location_Data_Command		HCI_CMD_OPCODE(0x03, 0x0064)
+#define HCI_Write_Location_Data_Command		HCI_CMD_OPCODE(0x03, 0x0065)
+#define HCI_Read_Local_Version_Info		HCI_CMD_OPCODE(0x04, 0x0001)
+#define HCI_Read_Local_Supported_Commands	HCI_CMD_OPCODE(0x04, 0x0002)
+#define HCI_Read_Buffer_Size			HCI_CMD_OPCODE(0x04, 0x0005)
+#define HCI_Read_Data_Block_Size		HCI_CMD_OPCODE(0x04, 0x000A)
+
+/* AMP HCI command parameters */
+typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms {
+	uint8 plh;
+	uint8 offset[2];			/* length so far */
+	uint8 max_remote[2];
+} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms {
+	uint8 plh;
+	uint8 offset[2];
+	uint8 len[2];
+	uint8 frag[1];
+} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms {
+	uint8 plh;
+	uint8 key_length;
+	uint8 key_type;
+	uint8 key[1];
+} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms {
+	uint8 plh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms {
+	uint8 plh;
+	uint8 txflow[16];
+	uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec {
+	uint8 id;
+	uint8 service_type;
+	uint8 max_sdu[2];
+	uint8 sdu_ia_time[4];
+	uint8 access_latency[4];
+	uint8 flush_timeout[4];
+} BWL_POST_PACKED_STRUCT ext_flow_spec_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms {
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms {
+	uint8 llh[2];
+	uint8 txflow[16];
+	uint8 rxflow[16];
+} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct plh_pad {
+	uint8 plh;
+	uint8 pad;
+} BWL_POST_PACKED_STRUCT plh_pad_t;
+
+typedef BWL_PRE_PACKED_STRUCT union hci_handle {
+	uint16 bredr;
+	plh_pad_t amp;
+} BWL_POST_PACKED_STRUCT hci_handle_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms {
+	hci_handle_t handle;
+	uint8 timeout[2];
+} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms {
+	uint8 llh[2];
+	uint8 befto[4];
+} BWL_POST_PACKED_STRUCT befto_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms {
+	uint8 plh;
+	uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms {
+	uint8 ld_aware;
+	uint8 ld[2];
+	uint8 ld_opts;
+	uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_cmd_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms {
+	uint8 llh[2];
+	uint8 packet_type;
+} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t;
+
+/* Generic AMP extended flow spec service types */
+#define EFS_SVCTYPE_NO_TRAFFIC		0
+#define EFS_SVCTYPE_BEST_EFFORT		1
+#define EFS_SVCTYPE_GUARANTEED		2
+
+/* AMP HCI event packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event {
+	uint8 ecode;
+	uint8 plen;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT amp_hci_event_t;
+
+#define HCI_EVT_PREAMBLE_SIZE			OFFSETOF(amp_hci_event_t, parms)
+
+/* AMP HCI event codes */
+#define HCI_Command_Complete			0x0E
+#define HCI_Command_Status			0x0F
+#define HCI_Flush_Occurred			0x11
+#define HCI_Enhanced_Flush_Complete		0x39
+#define HCI_Physical_Link_Complete		0x40
+#define HCI_Channel_Select			0x41
+#define HCI_Disconnect_Physical_Link_Complete	0x42
+#define HCI_Logical_Link_Complete		0x45
+#define HCI_Disconnect_Logical_Link_Complete	0x46
+#define HCI_Flow_Spec_Modify_Complete		0x47
+#define HCI_Number_of_Completed_Data_Blocks	0x48
+#define HCI_Short_Range_Mode_Change_Complete	0x4C
+#define HCI_Status_Change_Event			0x4D
+#define HCI_Vendor_Specific			0xFF
+
+/* AMP HCI event mask bit positions */
+#define HCI_Physical_Link_Complete_Event_Mask			0x0001
+#define HCI_Channel_Select_Event_Mask				0x0002
+#define HCI_Disconnect_Physical_Link_Complete_Event_Mask	0x0004
+#define HCI_Logical_Link_Complete_Event_Mask			0x0020
+#define HCI_Disconnect_Logical_Link_Complete_Event_Mask		0x0040
+#define HCI_Flow_Spec_Modify_Complete_Event_Mask		0x0080
+#define HCI_Number_of_Completed_Data_Blocks_Event_Mask		0x0100
+#define HCI_Short_Range_Mode_Change_Complete_Event_Mask		0x1000
+#define HCI_Status_Change_Event_Mask				0x2000
+#define HCI_All_Event_Mask					0x31e7
+/* AMP HCI event parameters */
+typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms {
+	uint8 status;
+	uint8 cmdpkts;
+	uint16 opcode;
+} BWL_POST_PACKED_STRUCT cmd_status_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms {
+	uint8 cmdpkts;
+	uint16 opcode;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT cmd_complete_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms {
+	uint16 handle;
+} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms {
+	uint8 status;
+	uint8 plh;
+} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint16 len;
+	uint8 frag[1];
+} BWL_POST_PACKED_STRUCT read_local_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms {
+	uint8 status;
+	uint8 AMP_status;
+	uint32 bandwidth;
+	uint32 gbandwidth;
+	uint32 latency;
+	uint32 PDU_size;
+	uint8 ctrl_type;
+	uint16 PAL_cap;
+	uint16 AMP_ASSOC_len;
+	uint32 max_flush_timeout;
+	uint32 be_flush_timeout;
+} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms {
+	uint8 status;
+	uint16 llh;
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms {
+	uint8 status;
+	uint16 llh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 tx_fs_ID;
+} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms {
+	uint8 status;
+	uint16 llh;
+} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms {
+	uint8 status;
+	uint8 plh;
+} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 reason;
+} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms {
+	uint8 status;
+	hci_handle_t handle;
+	uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms {
+	uint8 status;
+	uint16 timeout;
+} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms {
+	uint8 status;
+	uint16 ACL_pkt_len;
+	uint16 data_block_len;
+	uint16 data_block_num;
+} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct data_blocks {
+	uint16 handle;
+	uint16 pkts;
+	uint16 blocks;
+} BWL_POST_PACKED_STRUCT data_blocks_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms {
+	uint16 num_blocks;
+	uint8 num_handles;
+	data_blocks_t completed[1];
+} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms {
+	uint8 status;
+	uint32 befto;
+} BWL_POST_PACKED_STRUCT befto_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms {
+	uint8 status;
+	uint8 plh;
+	uint8 srm;
+} BWL_POST_PACKED_STRUCT srm_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms {
+	uint8 status;
+	uint8 llh[2];
+	uint16 counter;
+} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms {
+	uint8 status;
+	uint8 llh[2];
+} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms {
+	uint8 status;
+	hci_handle_t handle;
+	uint8 link_quality;
+} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms {
+	uint8 status;
+	uint8 ld_aware;
+	uint8 ld[2];
+	uint8 ld_opts;
+	uint8 l_opts;
+} BWL_POST_PACKED_STRUCT ld_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms {
+	uint16 handle;
+} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms {
+	uint8 len;
+	uint8 parms[1];
+} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms {
+	uint8 status;
+	uint8 hci_version;
+	uint16 hci_revision;
+	uint8 pal_version;
+	uint16 mfg_name;
+	uint16 pal_subversion;
+} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t;
+
+#define MAX_SUPPORTED_CMD_BYTE	64
+typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms {
+	uint8 status;
+	uint8 cmd[MAX_SUPPORTED_CMD_BYTE];
+} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms {
+	uint8 status;
+	uint8 amp_status;
+} BWL_POST_PACKED_STRUCT status_change_evt_parms_t;
+
+/* AMP HCI error codes */
+#define HCI_SUCCESS				0x00
+#define HCI_ERR_ILLEGAL_COMMAND			0x01
+#define HCI_ERR_NO_CONNECTION			0x02
+#define HCI_ERR_MEMORY_FULL			0x07
+#define HCI_ERR_CONNECTION_TIMEOUT		0x08
+#define HCI_ERR_MAX_NUM_OF_CONNECTIONS		0x09
+#define HCI_ERR_CONNECTION_EXISTS		0x0B
+#define HCI_ERR_CONNECTION_DISALLOWED		0x0C
+#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT	0x10
+#define HCI_ERR_UNSUPPORTED_VALUE		0x11
+#define HCI_ERR_ILLEGAL_PARAMETER_FMT		0x12
+#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST		0x16
+#define HCI_ERR_UNSPECIFIED			0x1F
+#define HCI_ERR_UNIT_KEY_USED			0x26
+#define HCI_ERR_QOS_REJECTED			0x2D
+#define HCI_ERR_PARAM_OUT_OF_RANGE		0x30
+#define HCI_ERR_NO_SUITABLE_CHANNEL		0x39
+#define HCI_ERR_CHANNEL_MOVE			0xFF
+
+/* AMP HCI ACL Data packet format */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data {
+	uint16	handle;			/* 12-bit connection handle + 2-bit PB and 2-bit BC flags */
+	uint16	dlen;			/* data total length */
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t;
+
+#define HCI_ACL_DATA_PREAMBLE_SIZE	OFFSETOF(amp_hci_ACL_data_t, data)
+
+#define HCI_ACL_DATA_BC_FLAGS		(0x0 << 14)
+#define HCI_ACL_DATA_PB_FLAGS		(0x3 << 12)
+
+#define HCI_ACL_DATA_HANDLE(handle)	((handle) & 0x0fff)
+#define HCI_ACL_DATA_FLAGS(handle)	((handle) >> 12)
+
+/* AMP Activity Report packet formats */
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report {
+	uint8	ScheduleKnown;
+	uint8	NumReports;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple {
+	uint32	StartTime;
+	uint32	Duration;
+	uint32	Periodicity;
+} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t;
+
+#define HCI_AR_SCHEDULE_KNOWN		0x01
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _bt_amp_hci_h_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/eapol.h b/drivers/staging/edison-bcm43340/common/include/proto/eapol.h
new file mode 100644
index 0000000..3f283a6
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/eapol.h
@@ -0,0 +1,193 @@
+/*
+ * 802.1x EAPOL definitions
+ *
+ * See
+ * IEEE Std 802.1X-2001
+ * IEEE 802.1X RADIUS Usage Guidelines
+ *
+ * Copyright Open Broadcom Corporation
+ *
+ * $Id: eapol.h 452678 2014-01-31 19:16:29Z $
+ */
+
+#ifndef _eapol_h_
+#define _eapol_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#include <bcmcrypto/aeskeywrap.h>
+
+/* EAPOL for 802.3/Ethernet */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ether_header eth;	/* 802.3/Ethernet header */
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+	unsigned char body[1];		/* Body (optional) */
+} BWL_POST_PACKED_STRUCT eapol_header_t;
+
+#define EAPOL_HEADER_LEN 18
+
+typedef struct {
+	unsigned char version;		/* EAPOL protocol version */
+	unsigned char type;		/* EAPOL type */
+	unsigned short length;		/* Length of body */
+} eapol_hdr_t;
+
+#define EAPOL_HDR_LEN 4
+
+/* EAPOL version */
+#define WPA2_EAPOL_VERSION	2
+#define WPA_EAPOL_VERSION	1
+#define LEAP_EAPOL_VERSION	1
+#define SES_EAPOL_VERSION	1
+
+/* EAPOL types */
+#define EAP_PACKET		0
+#define EAPOL_START		1
+#define EAPOL_LOGOFF		2
+#define EAPOL_KEY		3
+#define EAPOL_ASF		4
+
+/* EAPOL-Key types */
+#define EAPOL_RC4_KEY		1
+#define EAPOL_WPA2_KEY		2	/* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY		254	/* WPA */
+
+/* RC4 EAPOL-Key header field sizes */
+#define EAPOL_KEY_REPLAY_LEN	8
+#define EAPOL_KEY_IV_LEN	16
+#define EAPOL_KEY_SIG_LEN	16
+
+/* RC4 EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;			/* Key Descriptor Type */
+	unsigned short length;			/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char iv[EAPOL_KEY_IV_LEN];		/* Key IV */
+	unsigned char index;				/* Key Flags & Index */
+	unsigned char signature[EAPOL_KEY_SIG_LEN];	/* Key Signature */
+	unsigned char key[1];				/* Key (optional) */
+} BWL_POST_PACKED_STRUCT eapol_key_header_t;
+
+#define EAPOL_KEY_HEADER_LEN 	44
+
+/* RC4 EAPOL-Key flags */
+#define EAPOL_KEY_FLAGS_MASK	0x80
+#define EAPOL_KEY_BROADCAST	0
+#define EAPOL_KEY_UNICAST	0x80
+
+/* RC4 EAPOL-Key index */
+#define EAPOL_KEY_INDEX_MASK	0x7f
+
+/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
+#define EAPOL_WPA_KEY_REPLAY_LEN	8
+#define EAPOL_WPA_KEY_NONCE_LEN		32
+#define EAPOL_WPA_KEY_IV_LEN		16
+#define EAPOL_WPA_KEY_RSC_LEN		8
+#define EAPOL_WPA_KEY_ID_LEN		8
+#define EAPOL_WPA_KEY_MIC_LEN		16
+#define EAPOL_WPA_KEY_DATA_LEN		(EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
+#define EAPOL_WPA_MAX_KEY_SIZE		32
+
+/* WPA EAPOL-Key */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	unsigned char type;		/* Key Descriptor Type */
+	unsigned short key_info;	/* Key Information (unaligned) */
+	unsigned short key_len;		/* Key Length (unaligned) */
+	unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN];	/* Replay Counter */
+	unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN];	/* Nonce */
+	unsigned char iv[EAPOL_WPA_KEY_IV_LEN];		/* Key IV */
+	unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN];	/* Key RSC */
+	unsigned char id[EAPOL_WPA_KEY_ID_LEN];		/* WPA:Key ID, 802.11i/WPA2: Reserved */
+	unsigned char mic[EAPOL_WPA_KEY_MIC_LEN];	/* Key MIC */
+	unsigned short data_len;			/* Key Data Length */
+	unsigned char data[EAPOL_WPA_KEY_DATA_LEN];	/* Key data */
+} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
+
+#define EAPOL_WPA_KEY_LEN 		95
+
+/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_V1		0x01
+#define WPA_KEY_DESC_V2		0x02
+#define WPA_KEY_DESC_V3		0x03
+#define WPA_KEY_PAIRWISE	0x08
+#define WPA_KEY_INSTALL		0x40
+#define WPA_KEY_ACK		0x80
+#define WPA_KEY_MIC		0x100
+#define WPA_KEY_SECURE		0x200
+#define WPA_KEY_ERROR		0x400
+#define WPA_KEY_REQ		0x800
+
+#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2
+
+/* WPA-only KEY KEY_INFO bits */
+#define WPA_KEY_INDEX_0		0x00
+#define WPA_KEY_INDEX_1		0x10
+#define WPA_KEY_INDEX_2		0x20
+#define WPA_KEY_INDEX_3		0x30
+#define WPA_KEY_INDEX_MASK	0x30
+#define WPA_KEY_INDEX_SHIFT	0x04
+
+/* 802.11i/WPA2-only KEY KEY_INFO bits */
+#define WPA_KEY_ENCRYPTED_DATA	0x1000
+
+/* Key Data encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 type;
+	uint8 length;
+	uint8 oui[3];
+	uint8 subtype;
+	uint8 data[1];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t;
+
+#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 	6
+
+#define WPA2_KEY_DATA_SUBTYPE_GTK	1
+#define WPA2_KEY_DATA_SUBTYPE_STAKEY	2
+#define WPA2_KEY_DATA_SUBTYPE_MAC	3
+#define WPA2_KEY_DATA_SUBTYPE_PMKID	4
+#define WPA2_KEY_DATA_SUBTYPE_IGTK	9
+
+/* GTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	flags;
+	uint8	reserved;
+	uint8	gtk[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t;
+
+#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 	2
+
+#define WPA2_GTK_INDEX_MASK	0x03
+#define WPA2_GTK_INDEX_SHIFT	0x00
+
+#define WPA2_GTK_TRANSMIT	0x04
+
+/* IGTK encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16	key_id;
+	uint8	ipn[6];
+	uint8	key[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t;
+
+#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 	8
+
+/* STAKey encapsulation */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	reserved[2];
+	uint8	mac[ETHER_ADDR_LEN];
+	uint8	stakey[EAPOL_WPA_MAX_KEY_SIZE];
+} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t;
+
+#define WPA2_KEY_DATA_PAD	0xdd
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _eapol_h_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/ethernet.h b/drivers/staging/edison-bcm43340/common/include/proto/ethernet.h
new file mode 100644
index 0000000..0760302
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/ethernet.h
@@ -0,0 +1,220 @@
+/*
+ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: ethernet.h 403353 2013-05-20 14:05:33Z $
+ */
+
+#ifndef _NET_ETHERNET_H_	/* use native BSD ethernet.h when available */
+#define _NET_ETHERNET_H_
+
+#ifndef _TYPEDEFS_H_
+#include "typedefs.h"
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/*
+ * The number of bytes in an ethernet (MAC) address.
+ */
+#define	ETHER_ADDR_LEN		6
+
+/*
+ * The number of bytes in the type field.
+ */
+#define	ETHER_TYPE_LEN		2
+
+/*
+ * The number of bytes in the trailing CRC field.
+ */
+#define	ETHER_CRC_LEN		4
+
+/*
+ * The length of the combined header.
+ */
+#define	ETHER_HDR_LEN		(ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
+
+/*
+ * The minimum packet length.
+ */
+#define	ETHER_MIN_LEN		64
+
+/*
+ * The minimum packet user data length.
+ */
+#define	ETHER_MIN_DATA		46
+
+/*
+ * The maximum packet length.
+ */
+#define	ETHER_MAX_LEN		1518
+
+/*
+ * The maximum packet user data length.
+ */
+#define	ETHER_MAX_DATA		1500
+
+/* ether types */
+#define ETHER_TYPE_MIN		0x0600		/* Anything less than MIN is a length */
+#define	ETHER_TYPE_IP		0x0800		/* IP */
+#define ETHER_TYPE_ARP		0x0806		/* ARP */
+#define ETHER_TYPE_8021Q	0x8100		/* 802.1Q */
+#define	ETHER_TYPE_IPV6		0x86dd		/* IPv6 */
+#define	ETHER_TYPE_BRCM		0x886c		/* Broadcom Corp. */
+#define	ETHER_TYPE_802_1X	0x888e		/* 802.1x */
+#ifdef PLC
+#define	ETHER_TYPE_88E1		0x88e1		/* GIGLE */
+#define	ETHER_TYPE_8912		0x8912		/* GIGLE */
+#define ETHER_TYPE_GIGLED	0xffff		/* GIGLE */
+#endif /* PLC */
+#define	ETHER_TYPE_802_1X_PREAUTH 0x88c7	/* 802.1x preauthentication */
+#define ETHER_TYPE_WAI		0x88b4		/* WAI */
+#define ETHER_TYPE_89_0D	0x890d		/* 89-0d frame for TDLS */
+
+#define ETHER_TYPE_PPP_SES	0x8864		/* PPPoE Session */
+
+#define ETHER_TYPE_IAPP_L2_UPDATE	0x6	/* IAPP L2 update frame */
+
+/* Broadcom subtype follows ethertype;  First 2 bytes are reserved; Next 2 are subtype; */
+#define	ETHER_BRCM_SUBTYPE_LEN	4	/* Broadcom 4 byte subtype */
+
+/* ether header */
+#define ETHER_DEST_OFFSET	(0 * ETHER_ADDR_LEN)	/* dest address offset */
+#define ETHER_SRC_OFFSET	(1 * ETHER_ADDR_LEN)	/* src address offset */
+#define ETHER_TYPE_OFFSET	(2 * ETHER_ADDR_LEN)	/* ether type offset */
+
+/*
+ * A macro to validate a length with
+ */
+#define	ETHER_IS_VALID_LEN(foo)	\
+	((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) {		\
+		((uint8 *)ea)[0] = 0x01;			\
+		((uint8 *)ea)[1] = 0x00;			\
+		((uint8 *)ea)[2] = 0x5e;			\
+		((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f;	\
+		((uint8 *)ea)[4] = ((mgrp_ip) >>  8) & 0xff;	\
+		((uint8 *)ea)[5] = ((mgrp_ip) >>  0) & 0xff;	\
+}
+
+#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+BWL_PRE_PACKED_STRUCT struct ether_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	ether_type;
+} BWL_POST_PACKED_STRUCT;
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+BWL_PRE_PACKED_STRUCT struct	ether_addr {
+	uint8 octet[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+#endif	/* !__INCif_etherh Quick and ugly hack for VxWorks */
+
+/*
+ * Takes a pointer, set, test, clear, toggle locally admininistered
+ * address bit in the 48-bit Ethernet address.
+ */
+#define ETHER_SET_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
+#define ETHER_IS_LOCALADDR(ea) 	(((uint8 *)(ea))[0] & 2)
+#define ETHER_CLR_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd))
+#define ETHER_TOGGLE_LOCALADDR(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
+
+/* Takes a pointer, marks unicast address bit in the MAC address */
+#define ETHER_SET_UNICAST(ea)	(((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
+
+/*
+ * Takes a pointer, returns true if a 48-bit multicast address
+ * (including broadcast, since it is all ones)
+ */
+#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
+
+/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */
+#define eacmp(a, b)	((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \
+	                 (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \
+	                 (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2]))
+
+#define	ether_cmp(a, b)	eacmp(a, b)
+
+/* copy an ethernet address - assumes the pointers can be referenced as shorts */
+#define eacopy(s, d) \
+do { \
+	((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \
+	((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \
+	((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \
+} while (0)
+
+#define	ether_copy(s, d) eacopy(s, d)
+
+/* Copy an ethernet address in reverse order */
+#define	ether_rcopy(s, d) \
+do { \
+	((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \
+	((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \
+	((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \
+} while (0)
+
+
+
+static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
+static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
+static const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}};
+
+#define ETHER_ISBCAST(ea)	((((const uint8 *)(ea))[0] &		\
+	                          ((const uint8 *)(ea))[1] &		\
+				  ((const uint8 *)(ea))[2] &		\
+				  ((const uint8 *)(ea))[3] &		\
+				  ((const uint8 *)(ea))[4] &		\
+				  ((const uint8 *)(ea))[5]) == 0xff)
+#define ETHER_ISNULLADDR(ea)	((((const uint8 *)(ea))[0] |		\
+				  ((const uint8 *)(ea))[1] |		\
+				  ((const uint8 *)(ea))[2] |		\
+				  ((const uint8 *)(ea))[3] |		\
+				  ((const uint8 *)(ea))[4] |		\
+				  ((const uint8 *)(ea))[5]) == 0)
+
+#define ETHER_ISNULLDEST(da)	((((const uint16 *)(da))[0] |           \
+				  ((const uint16 *)(da))[1] |           \
+				  ((const uint16 *)(da))[2]) == 0)
+#define ETHER_ISNULLSRC(sa)	ETHER_ISNULLDEST(sa)
+
+#define ETHER_MOVE_HDR(d, s) \
+do { \
+	struct ether_header t; \
+	t = *(struct ether_header *)(s); \
+	*(struct ether_header *)(d) = t; \
+} while (0)
+
+#define  ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _NET_ETHERNET_H_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/p2p.h b/drivers/staging/edison-bcm43340/common/include/proto/p2p.h
new file mode 100644
index 0000000..27c4794
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/p2p.h
@@ -0,0 +1,608 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
+ *
+ * $Id: p2p.h 444066 2013-12-18 12:49:24Z $
+ */
+
+#ifndef _P2P_H_
+#define _P2P_H_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+#include <wlioctl.h>
+#include <proto/802.11.h>
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* WiFi P2P OUI values */
+#define P2P_OUI			WFA_OUI			/* WiFi P2P OUI */
+#define P2P_VER			WFA_OUI_TYPE_P2P	/* P2P version: 9=WiFi P2P v1.0 */
+
+#define P2P_IE_ID		0xdd			/* P2P IE element ID */
+
+/* WiFi P2P IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
+	uint8	id;		/* IE ID: 0xDD */
+	uint8	len;		/* IE length */
+	uint8	OUI[3];		/* WiFi P2P specific OUI: P2P_OUI */
+	uint8	oui_type;	/* Identifies P2P version: P2P_VER */
+	uint8	subelts[1];	/* variable length subelements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ie wifi_p2p_ie_t;
+
+#define P2P_IE_FIXED_LEN	6
+
+#define P2P_ATTR_ID_OFF		0
+#define P2P_ATTR_LEN_OFF	1
+#define P2P_ATTR_DATA_OFF	3
+
+#define P2P_ATTR_ID_LEN		1	/* ID filed length */
+#define P2P_ATTR_LEN_LEN	2	/* length field length */
+#define P2P_ATTR_HDR_LEN	3 /* ID + 2-byte length field spec 1.02 */
+
+/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */
+#define P2P_SEID_STATUS			0	/* Status */
+#define P2P_SEID_MINOR_RC		1	/* Minor Reason Code */
+#define P2P_SEID_P2P_INFO		2	/* P2P Capability (capabilities info) */
+#define P2P_SEID_DEV_ID			3	/* P2P Device ID */
+#define P2P_SEID_INTENT			4	/* Group Owner Intent */
+#define P2P_SEID_CFG_TIMEOUT		5	/* Configuration Timeout */
+#define P2P_SEID_CHANNEL		6	/* Listen channel */
+#define P2P_SEID_GRP_BSSID		7	/* P2P Group BSSID */
+#define P2P_SEID_XT_TIMING		8	/* Extended Listen Timing */
+#define P2P_SEID_INTINTADDR		9	/* Intended P2P Interface Address */
+#define P2P_SEID_P2P_MGBTY		10	/* P2P Manageability */
+#define P2P_SEID_CHAN_LIST		11	/* Channel List */
+#define P2P_SEID_ABSENCE		12	/* Notice of Absence */
+#define P2P_SEID_DEV_INFO		13	/* Device Info */
+#define P2P_SEID_GROUP_INFO		14	/* Group Info */
+#define P2P_SEID_GROUP_ID		15	/* Group ID */
+#define P2P_SEID_P2P_IF			16	/* P2P Interface */
+#define P2P_SEID_OP_CHANNEL		17	/* Operating Channel */
+#define P2P_SEID_INVITE_FLAGS		18	/* Invitation Flags */
+#define P2P_SEID_VNDR			221	/* Vendor-specific subelement */
+
+#define P2P_SE_VS_ID_SERVICES	0x1b
+
+
+/* WiFi P2P IE subelement: P2P Capability (capabilities info) */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_INFO */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	dev;		/* Device Capability Bitmap */
+	uint8	group;		/* Group Capability Bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
+
+/* P2P Capability subelement's Device Capability Bitmap bit values */
+#define P2P_CAPSE_DEV_SERVICE_DIS	0x1 /* Service Discovery */
+#define P2P_CAPSE_DEV_CLIENT_DIS	0x2 /* Client Discoverability */
+#define P2P_CAPSE_DEV_CONCURRENT	0x4 /* Concurrent Operation */
+#define P2P_CAPSE_DEV_INFRA_MAN		0x8 /* P2P Infrastructure Managed */
+#define P2P_CAPSE_DEV_LIMIT			0x10 /* P2P Device Limit */
+#define P2P_CAPSE_INVITE_PROC		0x20 /* P2P Invitation Procedure */
+
+/* P2P Capability subelement's Group Capability Bitmap bit values */
+#define P2P_CAPSE_GRP_OWNER			0x1 /* P2P Group Owner */
+#define P2P_CAPSE_PERSIST_GRP		0x2 /* Persistent P2P Group */
+#define P2P_CAPSE_GRP_LIMIT			0x4 /* P2P Group Limit */
+#define P2P_CAPSE_GRP_INTRA_BSS		0x8 /* Intra-BSS Distribution */
+#define P2P_CAPSE_GRP_X_CONNECT		0x10 /* Cross Connection */
+#define P2P_CAPSE_GRP_PERSISTENT	0x20 /* Persistent Reconnect */
+#define P2P_CAPSE_GRP_FORMATION		0x40 /* Group Formation */
+
+
+/* WiFi P2P IE subelement: Group Owner Intent */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INTENT */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	intent;		/* Intent Value 0...15 (0=legacy 15=master only) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
+
+/* WiFi P2P IE subelement: Configuration Timeout */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CFG_TIMEOUT */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	go_tmo;		/* GO config timeout in units of 10 ms */
+	uint8	client_tmo;	/* Client config timeout in units of 10 ms */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
+
+/* WiFi P2P IE subelement: Listen Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CHANNEL */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	op_class;	/* Operating Class */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t;
+
+/* WiFi P2P IE subelement: P2P Group BSSID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_GRP_BSSID */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P group bssid */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t;
+
+/* WiFi P2P IE subelement: P2P Group ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_GROUP_ID */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P device address */
+	uint8	ssid[1];	/* ssid. device id. variable length */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t;
+
+/* WiFi P2P IE subelement: P2P Interface */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_IF */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* P2P device address */
+	uint8	ifaddrs;	/* P2P Interface Address count */
+	uint8	ifaddr[1][6];	/* P2P Interface Address list */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t;
+
+/* WiFi P2P IE subelement: Status */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_STATUS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	status;		/* Status Code: P2P_STATSE_* */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
+
+/* Status subelement Status Code definitions */
+#define P2P_STATSE_SUCCESS			0
+				/* Success */
+#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL	1
+				/* Failed, information currently unavailable */
+#define P2P_STATSE_PASSED_UP			P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
+				/* Old name for above in P2P spec 1.08 and older */
+#define P2P_STATSE_FAIL_INCOMPAT_PARAMS		2
+				/* Failed, incompatible parameters */
+#define P2P_STATSE_FAIL_LIMIT_REACHED		3
+				/* Failed, limit reached */
+#define P2P_STATSE_FAIL_INVALID_PARAMS		4
+				/* Failed, invalid parameters */
+#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM		5
+				/* Failed, unable to accomodate request */
+#define P2P_STATSE_FAIL_PROTO_ERROR		6
+				/* Failed, previous protocol error or disruptive behaviour */
+#define P2P_STATSE_FAIL_NO_COMMON_CHAN		7
+				/* Failed, no common channels */
+#define P2P_STATSE_FAIL_UNKNOWN_GROUP		8
+				/* Failed, unknown P2P Group */
+#define P2P_STATSE_FAIL_INTENT			9
+				/* Failed, both peers indicated Intent 15 in GO Negotiation */
+#define P2P_STATSE_FAIL_INCOMPAT_PROVIS		10
+				/* Failed, incompatible provisioning method */
+#define P2P_STATSE_FAIL_USER_REJECT		11
+				/* Failed, rejected by user */
+
+/* WiFi P2P IE attribute: Extended Listen Timing */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
+	uint8	eltId;		/* ID: P2P_SEID_EXT_TIMING */
+	uint8	len[2];		/* length not including eltId, len fields */
+	uint8	avail[2];	/* availibility period */
+	uint8	interval[2];	/* availibility interval */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
+
+#define P2P_EXT_MIN	10	/* minimum 10ms */
+
+/* WiFi P2P IE subelement: Intended P2P Interface Address */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INTINTADDR */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mac[6];		/* intended P2P interface MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
+
+/* WiFi P2P IE subelement: Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_STATUS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	band;		/* Regulatory Class (band) */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
+
+/* Channel Entry structure within the Channel List SE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
+	uint8	band;						/* Regulatory Class (band) */
+	uint8	num_channels;				/* # of channels in the channel list */
+	uint8	channels[WL_NUMCHANNELS];	/* Channel List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
+#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
+
+/* WiFi P2P IE subelement: Channel List */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_CHAN_LIST */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	num_entries;	/* # of channel entries */
+	wifi_p2p_chanlist_entry_t	entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
+						/* Channel Entry List */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
+
+/* WiFi Primary Device Type structure */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s {
+	uint16	cat_id;		/* Category ID */
+	uint8	OUI[3];		/* WFA OUI: 0x0050F2 */
+	uint8	oui_type;	/* WPS_OUI_TYPE */
+	uint16	sub_cat_id;	/* Sub Category ID */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t;
+
+/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category
+ * maximum values for each category
+ */
+#define P2P_DISE_SUBCATEGORY_MINVAL		1
+#define P2P_DISE_CATEGORY_COMPUTER		1
+#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL		8
+#define P2P_DISE_CATEGORY_INPUT_DEVICE		2
+#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL	9
+#define P2P_DISE_CATEGORY_PRINTER		3
+#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL		5
+#define P2P_DISE_CATEGORY_CAMERA		4
+#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL		4
+#define P2P_DISE_CATEGORY_STORAGE		5
+#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL		1
+#define P2P_DISE_CATEGORY_NETWORK_INFRA		6
+#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL	4
+#define P2P_DISE_CATEGORY_DISPLAY		7
+#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL		4
+#define P2P_DISE_CATEGORY_MULTIMEDIA		8
+#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL		6
+#define P2P_DISE_CATEGORY_GAMING		9
+#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL		5
+#define P2P_DISE_CATEGORY_TELEPHONE		10
+#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL		5
+#define P2P_DISE_CATEGORY_AUDIO			11
+#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL		6
+
+/* WiFi P2P IE's Device Info subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_DEVINFO */
+	uint8	len[2];			/* SE length not including eltId, len fields */
+	uint8	mac[6];			/* P2P Device MAC address */
+	uint16	wps_cfg_meths;		/* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8	pri_devtype[8];		/* Primary Device Type */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
+
+#define P2P_DEV_TYPE_LEN	8
+
+/* WiFi P2P IE's Group Info subelement Client Info Descriptor */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
+	uint8	len;
+	uint8	devaddr[ETHER_ADDR_LEN];	/* P2P Device Address */
+	uint8	ifaddr[ETHER_ADDR_LEN];		/* P2P Interface Address */
+	uint8	devcap;				/* Device Capability */
+	uint8	cfg_meths[2];			/* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+	uint8	pridt[P2P_DEV_TYPE_LEN];	/* Primary Device Type */
+	uint8	secdts;				/* Number of Secondary Device Types */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
+
+/* WiFi P2P IE's Device ID subelement */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
+	uint8	eltId;
+	uint8	len[2];
+	struct ether_addr	addr;			/* P2P Device MAC address */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
+
+/* WiFi P2P IE subelement: P2P Manageability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_P2P_MGBTY */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	mg_bitmap;	/* manageability bitmap */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+/* mg_bitmap field bit values */
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG   0x1 /* AP supports Managed P2P Device */
+
+/* WiFi P2P IE subelement: Group Info */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
+	uint8	eltId;			/* SE ID: P2P_SEID_GROUP_INFO */
+	uint8	len[2];			/* SE length not including eltId, len fields */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
+
+/* WiFi IE subelement: Operating Channel */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_OP_CHANNEL */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	country[3];	/* Country String */
+	uint8	op_class;	/* Operating Class */
+	uint8	channel;	/* Channel */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t;
+
+/* WiFi IE subelement: INVITATION FLAGS */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s {
+	uint8	eltId;		/* SE ID: P2P_SEID_INVITE_FLAGS */
+	uint8	len[2];		/* SE length not including eltId, len fields */
+	uint8	flags;		/* Flags */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t;
+
+/* WiFi P2P Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
+	uint8	category;	/* P2P_AF_CATEGORY */
+	uint8	OUI[3];		/* OUI - P2P_OUI */
+	uint8	type;		/* OUI Type - P2P_VER */
+	uint8	subtype;	/* OUI Subtype - P2P_AF_* */
+	uint8	dialog_token;	/* nonzero, identifies req/resp tranaction */
+	uint8	elts[1];	/* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
+#define P2P_AF_CATEGORY		0x7f
+
+#define P2P_AF_FIXED_LEN	7
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE	0	/* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ		1	/* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP		2	/* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ		3	/* GO Discoverability Request */
+
+
+/* WiFi P2P Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
+	uint8	category;	/* P2P_PUB_AF_CATEGORY */
+	uint8	action;		/* P2P_PUB_AF_ACTION */
+	uint8	oui[3];		/* P2P_OUI */
+	uint8	oui_type;	/* OUI type - P2P_VER */
+	uint8	subtype;	/* OUI subtype - P2P_TYPE_* */
+	uint8	dialog_token;	/* nonzero, identifies req/rsp transaction */
+	uint8	elts[1];	/* Variable length information elements.  Max size =
+				 * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+				 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
+#define P2P_PUB_AF_FIXED_LEN	8
+#define P2P_PUB_AF_CATEGORY	0x04
+#define P2P_PUB_AF_ACTION	0x09
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ		0	/* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP		1	/* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF	2	/* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ	3	/* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP	4	/* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ	5	/* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP	6	/* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ	7	/* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP	8	/* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID	255	/* Invalid Subtype */
+
+/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */
+#define P2P_TYPE_MNREQ		P2P_PAF_GON_REQ
+#define P2P_TYPE_MNRSP		P2P_PAF_GON_RSP
+#define P2P_TYPE_MNCONF		P2P_PAF_GON_CONF
+
+/* WiFi P2P IE subelement: Notice of Absence */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
+	uint8	cnt_type;	/* Count/Type */
+	uint32	duration;	/* Duration */
+	uint32	interval;	/* Interval */
+	uint32	start;		/* Start Time */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
+
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
+	uint8	eltId;		/* Subelement ID */
+	uint8	len[2];		/* Length */
+	uint8	index;		/* Index */
+	uint8	ops_ctw_parms;	/* CTWindow and OppPS Parameters */
+	wifi_p2p_noa_desc_t	desc[1];	/* Notice of Absence Descriptor(s) */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
+
+#define P2P_NOA_SE_FIXED_LEN	5
+
+#define P2P_NOA_SE_MAX_DESC	2	/* max NoA descriptors in presence request */
+
+/* cnt_type field values */
+#define P2P_NOA_DESC_CNT_RESERVED	0	/* reserved and should not be used */
+#define P2P_NOA_DESC_CNT_REPEAT		255	/* continuous schedule */
+#define P2P_NOA_DESC_TYPE_PREFERRED	1	/* preferred values */
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE	2	/* acceptable limits */
+
+/* ctw_ops_parms field values */
+#define P2P_NOA_CTW_MASK	0x7f
+#define P2P_NOA_OPS_MASK	0x80
+#define P2P_NOA_OPS_SHIFT	7
+
+#define P2P_CTW_MIN	10	/* minimum 10TU */
+
+/*
+ * P2P Service Discovery related
+ */
+#define	P2PSD_ACTION_CATEGORY		0x04
+				/* Public action frame */
+#define	P2PSD_ACTION_ID_GAS_IREQ	0x0a
+				/* Action value for GAS Initial Request AF */
+#define	P2PSD_ACTION_ID_GAS_IRESP	0x0b
+				/* Action value for GAS Initial Response AF */
+#define	P2PSD_ACTION_ID_GAS_CREQ	0x0c
+				/* Action value for GAS Comback Request AF */
+#define	P2PSD_ACTION_ID_GAS_CRESP	0x0d
+				/* Action value for GAS Comback Response AF */
+#define P2PSD_AD_EID				0x6c
+				/* Advertisement Protocol IE ID */
+#define P2PSD_ADP_TUPLE_QLMT_PAMEBI	0x00
+				/* Query Response Length Limit 7 bits plus PAME-BI 1 bit */
+#define P2PSD_ADP_PROTO_ID			0x00
+				/* Advertisement Protocol ID. Always 0 for P2P SD */
+#define P2PSD_GAS_OUI				P2P_OUI
+				/* WFA OUI */
+#define P2PSD_GAS_OUI_SUBTYPE		P2P_VER
+				/* OUI Subtype for GAS IE */
+#define P2PSD_GAS_NQP_INFOID		0xDDDD
+				/* NQP Query Info ID: 56797 */
+#define P2PSD_GAS_COMEBACKDEALY		0x00
+				/* Not used in the Native GAS protocol */
+
+/* Service Protocol Type */
+typedef enum p2psd_svc_protype {
+	SVC_RPOTYPE_ALL = 0,
+	SVC_RPOTYPE_BONJOUR = 1,
+	SVC_RPOTYPE_UPNP = 2,
+	SVC_RPOTYPE_WSD = 3,
+	SVC_RPOTYPE_VENDOR = 255
+} p2psd_svc_protype_t;
+
+/* Service Discovery response status code */
+typedef enum {
+	P2PSD_RESP_STATUS_SUCCESS = 0,
+	P2PSD_RESP_STATUS_PROTYPE_NA = 1,
+	P2PSD_RESP_STATUS_DATA_NA = 2,
+	P2PSD_RESP_STATUS_BAD_REQUEST = 3
+} p2psd_resp_status_t;
+
+/* Advertisement Protocol IE tuple field */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
+	uint8	llm_pamebi;	/* Query Response Length Limit bit 0-6, set to 0 plus
+				* Pre-Associated Message Exchange BSSID Independent bit 7, set to 0
+				*/
+	uint8	adp_id;		/* Advertisement Protocol ID: 0 for NQP Native Query Protocol */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
+
+/* Advertisement Protocol IE */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
+	uint8	id;		/* IE ID: 0x6c - 108 */
+	uint8	len;	/* IE length */
+	wifi_p2psd_adp_tpl_t adp_tpl;  /* Advertisement Protocol Tuple field. Only one
+				* tuple is defined for P2P Service Discovery
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
+
+/* NQP Vendor-specific Content */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
+	uint8	oui_subtype;	/* OUI Subtype: 0x09 */
+	uint16	svc_updi;		/* Service Update Indicator */
+	uint8	svc_tlvs[1];	/* wifi_p2psd_qreq_tlv_t type for service request,
+				* wifi_p2psd_qresp_tlv_t type for service response
+				*/
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
+
+/* Service Request TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
+	uint16	len;			/* Length: 5 plus size of Query Data */
+	uint8	svc_prot;		/* Service Protocol Type */
+	uint8	svc_tscid;		/* Service Transaction ID */
+	uint8	query_data[1];	/* Query Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
+
+/* Query Request Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
+	uint16	info_id;	/* Info ID: 0xDDDD */
+	uint16	len;		/* Length of service request TLV, 5 plus the size of request data */
+	uint8	oui[3];		/* WFA OUI: 0x0050F2 */
+	uint8	qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
+
+/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
+	wifi_p2psd_adp_ie_t		adp_ie;		/* Advertisement Protocol IE */
+	uint16					qreq_len;	/* Query Request Length */
+	uint8	qreq_frm[1];	/* Query Request Frame wifi_p2psd_qreq_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
+
+/* Service Response TLV */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
+	uint16	len;				/* Length: 5 plus size of Query Data */
+	uint8	svc_prot;			/* Service Protocol Type */
+	uint8	svc_tscid;			/* Service Transaction ID */
+	uint8	status;				/* Value defined in Table 57 of P2P spec. */
+	uint8	query_data[1];		/* Response Data, passed in from above Layer 2 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
+
+/* Query Response Frame, defined in generic format, instead of NQP specific */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
+	uint16	info_id;	/* Info ID: 0xDDDD */
+	uint16	len;		/* Lenth of service response TLV, 6 plus the size of resp data */
+	uint8	oui[3];		/* WFA OUI: 0x0050F2 */
+	uint8	qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */
+
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
+
+/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
+	uint16	status;			/* Value defined in Table 7-23 of IEEE P802.11u */
+	uint16	cb_delay;		/* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t	adp_ie;		/* Advertisement Protocol IE */
+	uint16		qresp_len;	/* Query Response Length */
+	uint8	qresp_frm[1];	/* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
+
+/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
+	uint16	status;			/* Value defined in Table 7-23 of IEEE P802.11u */
+	uint8	fragment_id;	/* Fragmentation ID */
+	uint16	cb_delay;		/* GAS Comeback Delay */
+	wifi_p2psd_adp_ie_t	adp_ie;		/* Advertisement Protocol IE */
+	uint16	qresp_len;		/* Query Response Length */
+	uint8	qresp_frm[1];	/* Query Response Frame wifi_p2psd_qresp_frame_t */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
+
+/* Wi-Fi GAS Public Action Frame */
+BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
+	uint8	category;		/* 0x04 Public Action Frame */
+	uint8	action;			/* 0x6c Advertisement Protocol */
+	uint8	dialog_token;	/* nonzero, identifies req/rsp transaction */
+	uint8	query_data[1];	/* Query Data. wifi_p2psd_gas_ireq_frame_t
+					 * or wifi_p2psd_gas_iresp_frame_t format
+					 */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _P2P_H_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/sdspi.h b/drivers/staging/edison-bcm43340/common/include/proto/sdspi.h
new file mode 100644
index 0000000..f84d0b8
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/sdspi.h
@@ -0,0 +1,75 @@
+/*
+ * SD-SPI Protocol Standard
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdspi.h 382882 2013-02-04 23:24:31Z $
+ */
+#ifndef	_SD_SPI_H
+#define	_SD_SPI_H
+
+#define SPI_START_M		BITFIELD_MASK(1)	/* Bit [31] 	- Start Bit */
+#define SPI_START_S		31
+#define SPI_DIR_M		BITFIELD_MASK(1)	/* Bit [30] 	- Direction */
+#define SPI_DIR_S		30
+#define SPI_CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [29:24] - Command number */
+#define SPI_CMD_INDEX_S		24
+#define SPI_RW_M		BITFIELD_MASK(1)	/* Bit [23] 	- Read=0, Write=1 */
+#define SPI_RW_S		23
+#define SPI_FUNC_M		BITFIELD_MASK(3)	/* Bits [22:20]	- Function Number */
+#define SPI_FUNC_S		20
+#define SPI_RAW_M		BITFIELD_MASK(1)	/* Bit [19] 	- Read After Wr */
+#define SPI_RAW_S		19
+#define SPI_STUFF_M		BITFIELD_MASK(1)	/* Bit [18] 	- Stuff bit */
+#define SPI_STUFF_S		18
+#define SPI_BLKMODE_M		BITFIELD_MASK(1)	/* Bit [19] 	- Blockmode 1=blk */
+#define SPI_BLKMODE_S		19
+#define SPI_OPCODE_M		BITFIELD_MASK(1)	/* Bit [18] 	- OP Code */
+#define SPI_OPCODE_S		18
+#define SPI_ADDR_M		BITFIELD_MASK(17)	/* Bits [17:1] 	- Address */
+#define SPI_ADDR_S		1
+#define SPI_STUFF0_M		BITFIELD_MASK(1)	/* Bit [0] 	- Stuff bit */
+#define SPI_STUFF0_S		0
+
+#define SPI_RSP_START_M		BITFIELD_MASK(1)	/* Bit [7] 	- Start Bit (always 0) */
+#define SPI_RSP_START_S		7
+#define SPI_RSP_PARAM_ERR_M	BITFIELD_MASK(1)	/* Bit [6] 	- Parameter Error */
+#define SPI_RSP_PARAM_ERR_S	6
+#define SPI_RSP_RFU5_M		BITFIELD_MASK(1)	/* Bit [5] 	- RFU (Always 0) */
+#define SPI_RSP_RFU5_S		5
+#define SPI_RSP_FUNC_ERR_M	BITFIELD_MASK(1)	/* Bit [4] 	- Function number error */
+#define SPI_RSP_FUNC_ERR_S	4
+#define SPI_RSP_CRC_ERR_M	BITFIELD_MASK(1)	/* Bit [3] 	- COM CRC Error */
+#define SPI_RSP_CRC_ERR_S	3
+#define SPI_RSP_ILL_CMD_M	BITFIELD_MASK(1)	/* Bit [2] 	- Illegal Command error */
+#define SPI_RSP_ILL_CMD_S	2
+#define SPI_RSP_RFU1_M		BITFIELD_MASK(1)	/* Bit [1] 	- RFU (Always 0) */
+#define SPI_RSP_RFU1_S		1
+#define SPI_RSP_IDLE_M		BITFIELD_MASK(1)	/* Bit [0] 	- In idle state */
+#define SPI_RSP_IDLE_S		0
+
+/* SD-SPI Protocol Definitions */
+#define SDSPI_COMMAND_LEN	6	/* Number of bytes in an SD command */
+#define SDSPI_START_BLOCK	0xFE	/* SD Start Block Token */
+#define SDSPI_IDLE_PAD		0xFF	/* SD-SPI idle value for MOSI */
+#define SDSPI_START_BIT_MASK	0x80
+
+#endif /* _SD_SPI_H */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/vlan.h b/drivers/staging/edison-bcm43340/common/include/proto/vlan.h
new file mode 100644
index 0000000..a474cfd
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/vlan.h
@@ -0,0 +1,95 @@
+/*
+ * 802.1Q VLAN protocol definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: vlan.h 382883 2013-02-04 23:26:09Z $
+ */
+
+#ifndef _vlan_h_
+#define _vlan_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#ifndef	 VLAN_VID_MASK
+#define VLAN_VID_MASK		0xfff	/* low 12 bits are vlan id */
+#endif
+
+#define	VLAN_CFI_SHIFT		12	/* canonical format indicator bit */
+#define VLAN_PRI_SHIFT		13	/* user priority */
+
+#define VLAN_PRI_MASK		7	/* 3 bits of priority */
+
+#define	VLAN_TPID_OFFSET	12	/* offset of tag protocol id field */
+#define	VLAN_TCI_OFFSET		14	/* offset of tag ctrl info field */
+
+#define	VLAN_TAG_LEN		4
+#define	VLAN_TAG_OFFSET		(2 * ETHER_ADDR_LEN)	/* offset in Ethernet II packet only */
+
+#define VLAN_TPID		0x8100	/* VLAN ethertype/Tag Protocol ID */
+
+struct vlan_header {
+	uint16	vlan_type;		/* 0x8100 */
+	uint16	vlan_tag;		/* priority, cfi and vid */
+};
+
+struct ethervlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];
+	uint8	ether_shost[ETHER_ADDR_LEN];
+	uint16	vlan_type;		/* 0x8100 */
+	uint16	vlan_tag;		/* priority, cfi and vid */
+	uint16	ether_type;
+};
+
+struct dot3_mac_llc_snapvlan_header {
+	uint8	ether_dhost[ETHER_ADDR_LEN];	/* dest mac */
+	uint8	ether_shost[ETHER_ADDR_LEN];	/* src mac */
+	uint16	length;				/* frame length incl header */
+	uint8	dsap;				/* always 0xAA */
+	uint8	ssap;				/* always 0xAA */
+	uint8	ctl;				/* always 0x03 */
+	uint8	oui[3];				/* RFC1042: 0x00 0x00 0x00
+						 * Bridge-Tunnel: 0x00 0x00 0xF8
+						 */
+	uint16	vlan_type;			/* 0x8100 */
+	uint16	vlan_tag;			/* priority, cfi and vid */
+	uint16	ether_type;			/* ethertype */
+};
+
+#define	ETHERVLAN_HDR_LEN	(ETHER_HDR_LEN + VLAN_TAG_LEN)
+
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#define ETHERVLAN_MOVE_HDR(d, s) \
+do { \
+	struct ethervlan_header t; \
+	t = *(struct ethervlan_header *)(s); \
+	*(struct ethervlan_header *)(d) = t; \
+} while (0)
+
+#endif /* _vlan_h_ */
diff --git a/drivers/staging/edison-bcm43340/common/include/proto/wpa.h b/drivers/staging/edison-bcm43340/common/include/proto/wpa.h
new file mode 100644
index 0000000..9ef6ecc
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/common/include/proto/wpa.h
@@ -0,0 +1,205 @@
+/*
+ * Fundamental types and constants relating to WPA
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wpa.h 384536 2013-02-12 04:13:09Z $
+ */
+
+#ifndef _proto_wpa_h_
+#define _proto_wpa_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Reason Codes */
+
+/* 13 through 23 taken from IEEE Std 802.11i-2004 */
+#define DOT11_RC_INVALID_WPA_IE		13	/* Invalid info. element */
+#define DOT11_RC_MIC_FAILURE		14	/* Michael failure */
+#define DOT11_RC_4WH_TIMEOUT		15	/* 4-way handshake timeout */
+#define DOT11_RC_GTK_UPDATE_TIMEOUT	16	/* Group key update timeout */
+#define DOT11_RC_WPA_IE_MISMATCH	17	/* WPA IE in 4-way handshake differs from
+						 * (re-)assoc. request/probe response
+						 */
+#define DOT11_RC_INVALID_MC_CIPHER	18	/* Invalid multicast cipher */
+#define DOT11_RC_INVALID_UC_CIPHER	19	/* Invalid unicast cipher */
+#define DOT11_RC_INVALID_AKMP		20	/* Invalid authenticated key management protocol */
+#define DOT11_RC_BAD_WPA_VERSION	21	/* Unsupported WPA version */
+#define DOT11_RC_INVALID_WPA_CAP	22	/* Invalid WPA IE capabilities */
+#define DOT11_RC_8021X_AUTH_FAIL	23	/* 802.1X authentication failure */
+
+#define WPA2_PMKID_LEN	16
+
+/* WPA IE fixed portion */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 tag;	/* TAG */
+	uint8 length;	/* TAG length */
+	uint8 oui[3];	/* IE OUI */
+	uint8 oui_type;	/* OUI type */
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	/* IE version */
+} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
+#define WPA_IE_OUITYPE_LEN	4
+#define WPA_IE_FIXED_LEN	8
+#define WPA_IE_TAG_FIXED_LEN	6
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8 tag;	/* TAG */
+	uint8 length;	/* TAG length */
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT version;	/* IE version */
+} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
+#define WPA_RSN_IE_FIXED_LEN	4
+#define WPA_RSN_IE_TAG_FIXED_LEN	2
+typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+
+/* WPA suite/multicast suite */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	uint8 oui[3];
+	uint8 type;
+} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
+#define WPA_SUITE_LEN	4
+
+/* WPA unicast suite list/key management suite list */
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_suite_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
+#define WPA_IE_SUITE_COUNT_LEN	2
+typedef BWL_PRE_PACKED_STRUCT struct
+{
+	BWL_PRE_PACKED_STRUCT struct {
+		uint8 low;
+		uint8 high;
+	} BWL_POST_PACKED_STRUCT count;
+	wpa_pmkid_t list[1];
+} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
+
+/* WPA cipher suites */
+#define WPA_CIPHER_NONE		0	/* None */
+#define WPA_CIPHER_WEP_40	1	/* WEP (40-bit) */
+#define WPA_CIPHER_TKIP		2	/* TKIP: default for WPA */
+#define WPA_CIPHER_AES_OCB	3	/* AES (OCB) */
+#define WPA_CIPHER_AES_CCM	4	/* AES (CCM) */
+#define WPA_CIPHER_WEP_104	5	/* WEP (104-bit) */
+#define WPA_CIPHER_BIP		6	/* WEP (104-bit) */
+#define WPA_CIPHER_TPK		7	/* Group addressed traffic not allowed */
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CIPHER_NONE	WPA_CIPHER_NONE
+#define WAPI_CIPHER_SMS4	11
+
+#define WAPI_CSE_WPI_SMS4	1
+#endif /* BCMWAPI_WAI */
+
+#define IS_WPA_CIPHER(cipher)	((cipher) == WPA_CIPHER_NONE || \
+				 (cipher) == WPA_CIPHER_WEP_40 || \
+				 (cipher) == WPA_CIPHER_WEP_104 || \
+				 (cipher) == WPA_CIPHER_TKIP || \
+				 (cipher) == WPA_CIPHER_AES_OCB || \
+				 (cipher) == WPA_CIPHER_AES_CCM || \
+				 (cipher) == WPA_CIPHER_TPK)
+
+#ifdef BCMWAPI_WAI
+#define IS_WAPI_CIPHER(cipher)	((cipher) == WAPI_CIPHER_NONE || \
+				 (cipher) == WAPI_CSE_WPI_SMS4)
+
+/* convert WAPI_CSE_WPI_XXX to WAPI_CIPHER_XXX */
+#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \
+				WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE)
+
+#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
+				WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
+#endif /* BCMWAPI_WAI */
+
+/* WPA TKIP countermeasures parameters */
+#define WPA_TKIP_CM_DETECT	60	/* multiple MIC failure window (seconds) */
+#define WPA_TKIP_CM_BLOCK	60	/* countermeasures active window (seconds) */
+
+/* RSN IE defines */
+#define RSN_CAP_LEN		2	/* Length of RSN capabilities field (2 octets) */
+
+/* RSN Capabilities defined in 802.11i */
+#define RSN_CAP_PREAUTH			0x0001
+#define RSN_CAP_NOPAIRWISE		0x0002
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK	0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT	2
+#define RSN_CAP_GTK_REPLAY_CNTR_MASK	0x0030
+#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT	4
+#define RSN_CAP_1_REPLAY_CNTR		0
+#define RSN_CAP_2_REPLAY_CNTRS		1
+#define RSN_CAP_4_REPLAY_CNTRS		2
+#define RSN_CAP_16_REPLAY_CNTRS		3
+#define RSN_CAP_MFPR			0x0040
+#define RSN_CAP_MFPC			0x0080
+#define RSN_CAP_SPPC			0x0400
+#define RSN_CAP_SPPR			0x0800
+
+/* WPA capabilities defined in 802.11i */
+#define WPA_CAP_4_REPLAY_CNTRS		RSN_CAP_4_REPLAY_CNTRS
+#define WPA_CAP_16_REPLAY_CNTRS		RSN_CAP_16_REPLAY_CNTRS
+#define WPA_CAP_REPLAY_CNTR_SHIFT	RSN_CAP_PTK_REPLAY_CNTR_SHIFT
+#define WPA_CAP_REPLAY_CNTR_MASK	RSN_CAP_PTK_REPLAY_CNTR_MASK
+
+/* WPA capabilities defined in 802.11zD9.0 */
+#define WPA_CAP_PEER_KEY_ENABLE		(0x1 << 1)	/* bit 9 */
+
+/* WPA Specific defines */
+#define WPA_CAP_LEN	RSN_CAP_LEN	/* Length of RSN capabilities in RSN IE (2 octets) */
+#define WPA_PMKID_CNT_LEN	2 	/* Length of RSN PMKID count (2 octests) */
+
+#define	WPA_CAP_WPA2_PREAUTH		RSN_CAP_PREAUTH
+
+#define WPA2_PMKID_COUNT_LEN	2
+
+#ifdef BCMWAPI_WAI
+#define WAPI_CAP_PREAUTH		RSN_CAP_PREAUTH
+
+/* Other WAI definition */
+#define WAPI_WAI_REQUEST		0x00F1
+#define WAPI_UNICAST_REKEY		0x00F2
+#define WAPI_STA_AGING			0x00F3
+#define WAPI_MUTIL_REKEY		0x00F4
+#define WAPI_STA_STATS			0x00F5
+
+#define WAPI_USK_REKEY_COUNT		0x4000000 /* 0xA00000 */
+#define WAPI_MSK_REKEY_COUNT		0x4000000 /* 0xA00000 */
+#endif /* BCMWAPI_WAI */
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* _proto_wpa_h_ */
diff --git a/drivers/staging/edison-bcm43340/dhd.h b/drivers/staging/edison-bcm43340/dhd.h
new file mode 100644
index 0000000..b77e7fc
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd.h
@@ -0,0 +1,881 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd.h 473046 2014-04-26 15:09:39Z $
+ */
+
+/****************
+ * Common types *
+ */
+
+#ifndef _dhd_h_
+#define _dhd_h_
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+/* The kernel threading is sdio-specific */
+struct task_struct;
+struct sched_param;
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+int get_scheduler_policy(struct task_struct *p);
+
+#define ALL_INTERFACES	0xff
+
+#include <wlioctl.h>
+#include <wlfc_proto.h>
+
+
+#if defined(WL11U) && !defined(MFP)
+#define MFP /* Applying interaction with MFP by spec HS2.0 REL2 */
+#endif /* WL11U */
+
+#if defined(KEEP_ALIVE)
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define KEEP_ALIVE_PERIOD 55000
+#define NULL_PKT_STR	"null_pkt"
+#endif /* KEEP_ALIVE */
+/* Forward decls */
+struct dhd_bus;
+struct dhd_prot;
+struct dhd_info;
+struct dhd_ioctl;
+
+/* The level of bus communication with the dongle */
+enum dhd_bus_state {
+	DHD_BUS_DOWN,		/* Not ready for frame transfers */
+	DHD_BUS_LOAD,		/* Download access only (CPU reset) */
+	DHD_BUS_DATA		/* Ready for frame transfers */
+};
+
+
+enum dhd_op_flags {
+/* Firmware requested operation mode */
+	DHD_FLAG_STA_MODE				= (1 << (0)), /* STA only */
+	DHD_FLAG_HOSTAP_MODE				= (1 << (1)), /* SOFTAP only */
+	DHD_FLAG_P2P_MODE				= (1 << (2)), /* P2P Only */
+	/* STA + P2P */
+	DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE),
+	DHD_FLAG_CONCURR_MULTI_CHAN_MODE		= (1 << (4)), /* STA + P2P */
+	/* Current P2P mode for P2P connection */
+	DHD_FLAG_P2P_GC_MODE				= (1 << (5)),
+	DHD_FLAG_P2P_GO_MODE				= (1 << (6)),
+	DHD_FLAG_MBSS_MODE				= (1 << (7)), /* MBSS in future */
+	DHD_FLAG_IBSS_MODE				= (1 << (8)),
+	DHD_FLAG_MFG_MODE				= (1 << (9))
+};
+
+/* Max sequential TX/RX Control timeouts to set HANG event */
+#ifndef MAX_CNTL_TX_TIMEOUT
+#define MAX_CNTL_TX_TIMEOUT 2
+#endif /* MAX_CNTL_TX_TIMEOUT */
+#ifndef MAX_CNTL_RX_TIMEOUT
+#define MAX_CNTL_RX_TIMEOUT 1
+#endif /* MAX_CNTL_RX_TIMEOUT */
+
+#define DHD_SCAN_ASSOC_ACTIVE_TIME	40 /* ms: Embedded default Active setting from DHD */
+#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */
+#define DHD_SCAN_PASSIVE_TIME		130 /* ms: Embedded default Passive setting from DHD */
+
+#ifndef POWERUP_MAX_RETRY
+#define POWERUP_MAX_RETRY	3 /* how many times we retry to power up the chip */
+#endif
+#ifndef POWERUP_WAIT_MS
+#define POWERUP_WAIT_MS		2000 /* ms: time out in waiting wifi to come up */
+#endif
+
+enum dhd_bus_wake_state {
+	WAKE_LOCK_OFF,
+	WAKE_LOCK_PRIV,
+	WAKE_LOCK_DPC,
+	WAKE_LOCK_IOCTL,
+	WAKE_LOCK_DOWNLOAD,
+	WAKE_LOCK_TMOUT,
+	WAKE_LOCK_WATCHDOG,
+	WAKE_LOCK_LINK_DOWN_TMOUT,
+	WAKE_LOCK_PNO_FIND_TMOUT,
+	WAKE_LOCK_SOFTAP_SET,
+	WAKE_LOCK_SOFTAP_STOP,
+	WAKE_LOCK_SOFTAP_START,
+	WAKE_LOCK_SOFTAP_THREAD
+};
+
+enum dhd_prealloc_index {
+	DHD_PREALLOC_PROT = 0,
+	DHD_PREALLOC_RXBUF,
+	DHD_PREALLOC_DATABUF,
+	DHD_PREALLOC_OSL_BUF,
+#if defined(STATIC_WL_PRIV_STRUCT)
+	DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+#endif /* STATIC_WL_PRIV_STRUCT */
+	DHD_PREALLOC_DHD_INFO = 7
+};
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+
+/* host reordering packts logic */
+/* followed the structure to hold the reorder buffers (void **p) */
+typedef struct reorder_info {
+	void **p;
+	uint8 flow_id;
+	uint8 cur_idx;
+	uint8 exp_idx;
+	uint8 max_idx;
+	uint8 pend_pkts;
+} reorder_info_t;
+
+#ifdef DHDTCPACK_SUPPRESS
+#define TCPACK_SUP_OFF		0	/* TCPACK suppress off */
+/* Replace TCPACK in txq when new coming one has higher ACK number. */
+#define TCPACK_SUP_REPLACE	1
+/* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA.
+ * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that
+ * 1. we are able to read TCP DATA packets first from the bus
+ * 2. TCPACKs that do not need to hurry delivered remains longer in TXQ so can be suppressed.
+ */
+#define TCPACK_SUP_DELAYTX	2
+#endif /* DHDTCPACK_SUPPRESS */
+
+/* Common structure for module and instance linkage */
+typedef struct dhd_pub {
+	/* Linkage ponters */
+	osl_t *osh;		/* OSL handle */
+	struct dhd_bus *bus;	/* Bus module handle */
+	struct dhd_prot *prot;	/* Protocol module handle */
+	struct dhd_info  *info; /* Info module handle */
+
+	/* to NDIS developer, the structure dhd_common is redundant,
+	 * please do NOT merge it back from other branches !!!
+	 */
+
+
+	/* Internal dhd items */
+	bool up;		/* Driver up/down (to OS) */
+	bool txoff;		/* Transmit flow-controlled */
+	bool dongle_reset;  /* TRUE = DEVRESET put dongle into reset */
+	enum dhd_bus_state busstate;
+	uint hdrlen;		/* Total DHD header length (proto + bus) */
+	uint maxctl;		/* Max size rxctl request from proto to bus */
+	uint rxsz;		/* Rx buffer size bus module should use */
+	uint8 wme_dp;	/* wme discard priority */
+
+	/* Dongle media info */
+	bool iswl;		/* Dongle-resident driver is wl */
+	ulong drv_version;	/* Version of dongle-resident driver */
+	struct ether_addr mac;	/* MAC address obtained from dongle */
+	dngl_stats_t dstats;	/* Stats for dongle-based data */
+
+	/* Additional stats for the bus level */
+	ulong tx_packets;	/* Data packets sent to dongle */
+	ulong tx_multicast;	/* Multicast data packets sent to dongle */
+	ulong tx_errors;	/* Errors in sending data to dongle */
+	ulong tx_ctlpkts;	/* Control packets sent to dongle */
+	ulong tx_ctlerrs;	/* Errors sending control frames to dongle */
+	ulong rx_packets;	/* Packets sent up the network interface */
+	ulong rx_multicast;	/* Multicast packets sent up the network interface */
+	ulong rx_errors;	/* Errors processing rx data packets */
+	ulong rx_ctlpkts;	/* Control frames processed from dongle */
+	ulong rx_ctlerrs;	/* Errors in processing rx control frames */
+	ulong rx_dropped;	/* Packets dropped locally (no memory) */
+	ulong rx_flushed;  /* Packets flushed due to unscheduled sendup thread */
+	ulong wd_dpc_sched;   /* Number of times dhd dpc scheduled by watchdog timer */
+
+	ulong rx_readahead_cnt;	/* Number of packets where header read-ahead was used. */
+	ulong tx_realloc;	/* Number of tx packets we had to realloc for headroom */
+	ulong fc_packets;       /* Number of flow control pkts recvd */
+
+	/* Last error return */
+	int bcmerror;
+	uint tickcnt;
+
+	/* Last error from dongle */
+	int dongle_error;
+
+	uint8 country_code[WLC_CNTRY_BUF_SZ];
+
+	/* Suspend disable flag and "in suspend" flag */
+	int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
+	int in_suspend;			/* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+	int pno_enable;			/* pno status : "1" is pno enable */
+	int pno_suspend;		/* pno suspend status : "1" is pno suspended */
+#endif /* PNO_SUPPORT */
+	/* DTIM skip value, default 0(or 1) means wake each DTIM
+	 * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3)
+	 */
+	int suspend_bcn_li_dtim;         /* bcn_li_dtim value in suspend mode */
+#ifdef PKT_FILTER_SUPPORT
+	int early_suspended;	/* Early suspend status */
+	int dhcp_in_progress;	/* DHCP period */
+#endif
+
+	/* Pkt filter defination */
+	char * pktfilter[100];
+	int pktfilter_count;
+
+	wl_country_t dhd_cspec;		/* Current Locale info */
+	char eventmask[WL_EVENTING_MASK_LEN];
+	int	op_mode;				/* STA, HostAPD, WFD, SoftAP */
+
+/* Set this to 1 to use a seperate interface (p2p0) for p2p operations.
+ *  For ICS MR1 releases it should be disable to be compatable with ICS MR1 Framework
+ *  see target dhd-cdc-sdmmc-panda-cfg80211-icsmr1-gpl-debug in Makefile
+ */
+/* #define WL_ENABLE_P2P_IF		1 */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	struct mutex 	wl_start_stop_lock; /* lock/unlock for Android start/stop */
+	struct mutex 	wl_softap_lock;		 /* lock/unlock for any SoftAP/STA settings */
+#endif 
+
+#ifdef PROP_TXSTATUS
+	bool	wlfc_enabled;
+	int	wlfc_mode;
+	void*	wlfc_state;
+	/*
+	Mode in which the dhd flow control shall operate. Must be set before
+	traffic starts to the device.
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	3 - Only AMPDU hostreorder used. no wlfc.
+	*/
+	uint8	proptxstatus_mode;
+	bool	proptxstatus_txoff;
+	bool	proptxstatus_module_ignore;
+	bool	proptxstatus_credit_ignore;
+	bool	proptxstatus_txstatus_ignore;
+	/*
+	 * implement below functions in each platform if needed.
+	 */
+	/* platform specific function whether to skip flow control */
+	bool (*skip_fc)(void);
+	/* platform specific function for wlfc_enable and wlfc_deinit */
+	void (*plat_init)(void *dhd);
+	void (*plat_deinit)(void *dhd);
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+	void *pno_state;
+#endif
+	bool	dongle_isolation;
+	bool	dongle_trap_occured;	/* flag for sending HANG event to upper layer */
+	int   hang_was_sent;
+	int   rxcnt_timeout;		/* counter rxcnt timeout to send HANG */
+	int   txcnt_timeout;		/* counter txcnt timeout to send HANG */
+	bool hang_report;		/* enable hang report by default */
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
+#endif
+#ifdef WLTDLS
+	bool tdls_enable;
+#endif
+	struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
+	char  fw_capabilities[WLC_IOCTL_SMLEN];
+	#define MAXSKBPEND 1024
+	void *skbbuf[MAXSKBPEND];
+	uint32 store_idx;
+	uint32 sent_idx;
+#ifdef DHDTCPACK_SUPPRESS
+	uint8 tcpack_sup_mode;		/* TCPACK suppress mode */
+	void *tcpack_sup_module;	/* TCPACK suppress module */
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(ARP_OFFLOAD_SUPPORT)
+	uint32 arp_version;
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+	struct task_struct * current_dpc;
+	struct task_struct * current_rxf;
+	int chan_isvht80;
+#endif /* CUSTOM_SET_CPUCORE */
+} dhd_pub_t;
+
+
+	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+
+	#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define _DHD_PM_RESUME_WAIT(a, b) do {\
+			int retry = 0; \
+			SMP_RD_BARRIER_DEPENDS(); \
+			while (dhd_mmc_suspend && retry++ != b) { \
+				SMP_RD_BARRIER_DEPENDS(); \
+				wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \
+			} \
+		} 	while (0)
+	#define DHD_PM_RESUME_WAIT(a) 		_DHD_PM_RESUME_WAIT(a, 200)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a) 	_DHD_PM_RESUME_WAIT(a, ~0)
+	#ifdef CUSTOMER_HW4
+		#define DHD_PM_RESUME_RETURN_ERROR(a)   do { \
+				if (dhd_mmc_suspend) { \
+					printf("%s[%d]: mmc is still in suspend state!!!\n", \
+							__FUNCTION__, __LINE__); \
+					return a; \
+				} \
+			} while (0)
+	#else
+		#define DHD_PM_RESUME_RETURN_ERROR(a)	do { \
+			if (dhd_mmc_suspend) return a; } while (0)
+	#endif 
+	#define DHD_PM_RESUME_RETURN		do { if (dhd_mmc_suspend) return; } while (0)
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+	#define SPINWAIT_SLEEP(a, exp, us) do { \
+		uint countdown = (us) + 9999; \
+		while ((exp) && (countdown >= 10000)) { \
+			wait_event_interruptible_timeout(a, FALSE, 1); \
+			countdown -= 10000; \
+		} \
+	} while (0)
+
+	#else
+
+	#define DHD_PM_RESUME_WAIT_INIT(a)
+	#define DHD_PM_RESUME_WAIT(a)
+	#define DHD_PM_RESUME_WAIT_FOREVER(a)
+	#define DHD_PM_RESUME_RETURN_ERROR(a)
+	#define DHD_PM_RESUME_RETURN
+
+	#define DHD_SPINWAIT_SLEEP_INIT(a)
+	#define SPINWAIT_SLEEP(a, exp, us)  do { \
+		uint countdown = (us) + 9; \
+		while ((exp) && (countdown >= 10)) { \
+			OSL_DELAY(10);  \
+			countdown -= 10;  \
+		} \
+	} while (0)
+
+	#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+#ifndef OSL_SLEEP
+#define OSL_SLEEP(ms)		OSL_DELAY(ms*1000)
+#endif /* OSL_SLEEP */
+
+#define DHD_IF_VIF	0x01	/* Virtual IF (Hidden from user) */
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub);
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+#ifdef PNO_SUPPORT
+int dhd_pno_clean(dhd_pub_t *dhd);
+#endif /* PNO_SUPPORT */
+/*
+ *  Wake locks are an Android power management concept. They are used by applications and services
+ *  to request CPU resources.
+ */
+extern int dhd_os_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_lock(dhd_pub_t *pub);
+extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub);
+
+inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_init(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_lock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+}
+
+#define DHD_OS_WAKE_LOCK(pub)			dhd_os_wake_lock(pub)
+#define DHD_OS_WAKE_UNLOCK(pub)		dhd_os_wake_unlock(pub)
+#define DHD_OS_WAKE_LOCK_TIMEOUT(pub)		dhd_os_wake_lock_timeout(pub)
+#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
+	dhd_os_wake_lock_rx_timeout_enable(pub, val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
+	dhd_os_wake_lock_ctrl_timeout_enable(pub, val)
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
+	dhd_os_wake_lock_ctrl_timeout_cancel(pub)
+
+#define DHD_OS_WD_WAKE_LOCK(pub)		dhd_os_wd_wake_lock(pub)
+#define DHD_OS_WD_WAKE_UNLOCK(pub)		dhd_os_wd_wake_unlock(pub)
+#define DHD_PACKET_TIMEOUT_MS	500
+#define DHD_EVENT_TIMEOUT_MS	1500
+
+
+/* interface operations (register, remove) should be atomic, use this lock to prevent race
+ * condition among wifi on/off and interface operation functions
+ */
+void dhd_net_if_lock(struct net_device *dev);
+void dhd_net_if_unlock(struct net_device *dev);
+
+
+typedef enum dhd_attach_states
+{
+	DHD_ATTACH_STATE_INIT = 0x0,
+	DHD_ATTACH_STATE_NET_ALLOC = 0x1,
+	DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
+	DHD_ATTACH_STATE_ADD_IF = 0x4,
+	DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
+	DHD_ATTACH_STATE_WL_ATTACH = 0x10,
+	DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
+	DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
+	DHD_ATTACH_STATE_CFG80211 = 0x80,
+	DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
+	DHD_ATTACH_STATE_DONE = 0x200
+} dhd_attach_states_t;
+
+/* Value -1 means we are unsuccessful in creating the kthread. */
+#define DHD_PID_KT_INVALID 	-1
+/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
+#define DHD_PID_KT_TL_INVALID	-2
+
+/*
+ * Exported from dhd OS modules (dhd_linux/dhd_ndis)
+ */
+
+/* Indication from bus module regarding presence/insertion of dongle.
+ * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Returned structure should have bus and prot pointers filled in.
+ * bus_hdrlen specifies required headroom for bus module header.
+ */
+extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+#if defined(WLP2P) && defined(WL_CFG80211)
+/* To allow attach/detach calls corresponding to p2p0 interface  */
+extern int dhd_attach_p2p(dhd_pub_t *);
+extern int dhd_detach_p2p(dhd_pub_t *);
+#endif /* WLP2P && WL_CFG80211 */
+extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock);
+
+/* Indication from bus module regarding removal/absence of dongle */
+extern void dhd_detach(dhd_pub_t *dhdp);
+extern void dhd_free(dhd_pub_t *dhdp);
+
+/* Indication from bus module to change flow-control state */
+extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason);
+
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+
+/* Receive frame for delivery to OS.  Callee disposes of rxp. */
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
+
+/* Return pointer to interface name */
+extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
+
+/* Request scheduling of the bus dpc */
+extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+
+/* Notify tx completion */
+extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+
+/* OS independent layer functions */
+extern int dhd_os_proto_block(dhd_pub_t * pub);
+extern int dhd_os_proto_unblock(dhd_pub_t * pub);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
+extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
+extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+
+extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void * dhd_os_open_image(char * filename);
+extern void dhd_os_close_image(void * image);
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+extern void dhd_os_sdlock(dhd_pub_t * pub);
+extern void dhd_os_sdunlock(dhd_pub_t * pub);
+extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
+#ifdef DHDTCPACK_SUPPRESS
+extern void dhd_os_tcpacklock(dhd_pub_t *pub);
+extern void dhd_os_tcpackunlock(dhd_pub_t *pub);
+#endif /* DHDTCPACK_SUPPRESS */
+
+extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr);
+extern void dhd_customer_oob_irq_unmap(void);
+extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff);
+extern int dhd_custom_get_mac_address(unsigned char *buf);
+extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec);
+extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
+extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
+extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
+extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
+extern int dhd_os_send_hang_message(dhd_pub_t *dhdp);
+extern void dhd_set_version_info(dhd_pub_t *pub, char *fw);
+extern bool dhd_os_check_if_up(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock(dhd_pub_t *pub);
+
+#ifdef CUSTOM_SET_CPUCORE
+extern void dhd_set_cpucore(dhd_pub_t *dhd, int set);
+#endif /* CUSTOM_SET_CPUCORE */
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+
+#ifdef PKT_FILTER_SUPPORT
+#define DHD_UNICAST_FILTER_NUM		0
+#define DHD_BROADCAST_FILTER_NUM	1
+#define DHD_MULTICAST4_FILTER_NUM	2
+#define DHD_MULTICAST6_FILTER_NUM	3
+#define DHD_MDNS_FILTER_NUM		4
+#define DHD_ARP_FILTER_NUM		5
+#ifdef BOARD_INTEL
+#define DHD_IPV4_BC_255_FILTER_NUM	6
+#define DHD_IPV4_MC_224_FILTER_NUM 	7
+#define DHD_IPV6_MC_FF00_FILTER_NUM	8
+#endif /* BOARD_INTEL */
+extern int 	dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
+extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
+extern int net_os_enable_packet_filter(struct net_device *dev, int val);
+extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
+#endif /* PKT_FILTER_SUPPORT */
+
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+extern bool dhd_support_sta_mode(dhd_pub_t *dhd);
+
+#ifdef DHD_DEBUG
+extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
+#endif /* DHD_DEBUG */
+extern void dhd_os_sdtxlock(dhd_pub_t * pub);
+extern void dhd_os_sdtxunlock(dhd_pub_t * pub);
+
+typedef struct {
+	uint32 limit;		/* Expiration time (usec) */
+	uint32 increment;	/* Current expiration increment (usec) */
+	uint32 elapsed;		/* Current elapsed time (usec) */
+	uint32 tick;		/* O/S tick time (usec) */
+} dhd_timeout_t;
+
+extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
+extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+
+extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
+extern struct net_device * dhd_idx2net(void *pub, int ifidx);
+extern int net_os_send_hang_message(struct net_device *dev);
+extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata,
+                         wl_event_msg_t *, void **data_ptr);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+
+extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
+extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
+                            int ifindex);
+extern void dhd_common_init(osl_t *osh);
+
+extern int dhd_do_driver_init(struct net_device *net);
+extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+	char *name, uint8 *mac);
+extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+	char *name, uint8 *mac);
+extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx, bool need_rtnl_lock);
+extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock);
+extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
+extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
+extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
+extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
+
+/* Send packet to dongle via data channel */
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+
+/* send up locally generated event */
+extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+/* Send event to host */
+extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+#ifdef LOG_INTO_TCPDUMP
+extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len);
+#endif /* LOG_INTO_TCPDUMP */
+extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
+extern uint dhd_bus_status(dhd_pub_t *dhdp);
+extern int  dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_suspend(dhd_pub_t *dhdpub);
+extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage);
+extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
+extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
+extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval);
+extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp);
+extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp);
+
+#if defined(KEEP_ALIVE)
+extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
+#endif /* KEEP_ALIVE */
+
+extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd);
+extern int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set);
+typedef enum cust_gpio_modes {
+	WLAN_RESET_ON,
+	WLAN_RESET_OFF,
+	WLAN_POWER_ON,
+	WLAN_POWER_OFF
+} cust_gpio_modes_t;
+
+extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
+extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Watchdog timer interval */
+extern uint dhd_watchdog_ms;
+
+#if defined(DHD_DEBUG)
+/* Console output poll interval */
+extern uint dhd_console_ms;
+extern uint wl_msg_level;
+#endif /* defined(DHD_DEBUG) */
+
+extern uint dhd_slpauto;
+
+/* Use interrupts */
+extern uint dhd_intr;
+
+/* Use polling */
+extern uint dhd_poll;
+
+/* ARP offload agent mode */
+extern uint dhd_arp_mode;
+
+/* ARP offload enable */
+extern uint dhd_arp_enable;
+
+/* Pkt filte enable control */
+extern uint dhd_pkt_filter_enable;
+
+/*  Pkt filter init setup */
+extern uint dhd_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint dhd_master_mode;
+
+/* Roaming mode control */
+extern uint dhd_roam_disable;
+
+/* Roaming mode control */
+extern uint dhd_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int dhd_idletime;
+#ifdef DHD_USE_IDLECOUNT
+#define DHD_IDLETIME_TICKS 1
+#else
+#define DHD_IDLETIME_TICKS 1
+#endif /* DHD_USE_IDLECOUNT */
+
+/* SDIO Drive Strength */
+extern uint dhd_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint dhd_force_tx_queueing;
+/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
+#define DEFAULT_KEEP_ALIVE_VALUE 	55000 /* msec */
+#ifndef CUSTOM_KEEP_ALIVE_SETTING
+#define CUSTOM_KEEP_ALIVE_SETTING 	DEFAULT_KEEP_ALIVE_VALUE
+#endif /* DEFAULT_KEEP_ALIVE_VALUE */
+
+#define NULL_PKT_STR	"null_pkt"
+
+/* hooks for custom glom setting option via Makefile */
+#define DEFAULT_GLOM_VALUE 	-1
+#ifndef CUSTOM_GLOM_SETTING
+#define CUSTOM_GLOM_SETTING 	DEFAULT_GLOM_VALUE
+#endif
+#define WL_AUTO_ROAM_TRIGGER -75
+/* hooks for custom Roaming Trigger  setting via Makefile */
+#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */
+#define DEFAULT_ROAM_TRIGGER_SETTING 	-1
+#ifndef CUSTOM_ROAM_TRIGGER_SETTING
+#define CUSTOM_ROAM_TRIGGER_SETTING 	DEFAULT_ROAM_TRIGGER_VALUE
+#endif
+
+/* hooks for custom Roaming Romaing  setting via Makefile */
+#define DEFAULT_ROAM_DELTA_VALUE  10 /* dBm default roam delta all band */
+#define DEFAULT_ROAM_DELTA_SETTING 	-1
+#ifndef CUSTOM_ROAM_DELTA_SETTING
+#define CUSTOM_ROAM_DELTA_SETTING 	DEFAULT_ROAM_DELTA_VALUE
+#endif
+
+/* hooks for custom PNO Event wake lock to guarantee enough time
+	for the Platform to detect Event before system suspended
+*/
+#define DEFAULT_PNO_EVENT_LOCK_xTIME 	2 	/* multiplay of DHD_PACKET_TIMEOUT_MS */
+#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME
+#define CUSTOM_PNO_EVENT_LOCK_xTIME	 DEFAULT_PNO_EVENT_LOCK_xTIME
+#endif
+/* hooks for custom dhd_dpc_prio setting option via Makefile */
+#define DEFAULT_DHP_DPC_PRIO  1
+#ifndef CUSTOM_DPC_PRIO_SETTING
+#define CUSTOM_DPC_PRIO_SETTING 	DEFAULT_DHP_DPC_PRIO
+#endif
+
+#ifndef CUSTOM_LISTEN_INTERVAL
+#define CUSTOM_LISTEN_INTERVAL 		LISTEN_INTERVAL
+#endif /* CUSTOM_LISTEN_INTERVAL */
+
+#define DEFAULT_SUSPEND_BCN_LI_DTIM		3
+#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM
+#define CUSTOM_SUSPEND_BCN_LI_DTIM		DEFAULT_SUSPEND_BCN_LI_DTIM
+#endif
+
+#ifndef CUSTOM_RXF_PRIO_SETTING
+#define CUSTOM_RXF_PRIO_SETTING		MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1)
+#endif
+
+#define DEFAULT_WIFI_TURNOFF_DELAY		0
+#define WIFI_TURNOFF_DELAY		DEFAULT_WIFI_TURNOFF_DELAY
+
+#define DEFAULT_WIFI_TURNON_DELAY		200
+#ifndef WIFI_TURNON_DELAY
+#define WIFI_TURNON_DELAY		DEFAULT_WIFI_TURNON_DELAY
+#endif /* WIFI_TURNON_DELAY */
+
+#ifdef WLTDLS
+#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING
+#define CUSTOM_TDLS_IDLE_MODE_SETTING  60000 /* 60sec to tear down TDLS of not active */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
+#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */
+#endif
+#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW
+#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */
+#endif
+#endif /* WLTDLS */
+
+
+#define MAX_DTIM_SKIP_BEACON_INTERVAL	102 /* max allowed associated AP beacon for DTIM skip */
+#ifndef MAX_DTIM_ALLOWED_INTERVAL
+#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */
+#endif
+#define NO_DTIM_SKIP 1
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint dhd_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint dhd_pktgen_len;
+#define MAX_PKTGEN_LEN 1800
+#endif
+
+
+/* optionally set by a module_param_string() */
+#define MOD_PARAM_PATHLEN	2048
+#define MOD_PARAM_INFOLEN	512
+
+#ifdef SOFTAP
+extern char fw_path2[MOD_PARAM_PATHLEN];
+#endif
+
+/* Flag to indicate if we should download firmware on driver load */
+extern uint dhd_download_fw_on_driverload;
+
+
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS	16
+#define DHD_DEL_IF	-0xe
+#define DHD_BAD_IF	-0xf
+
+extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
+extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
+
+#define IFLOCK_INIT(lock)       *lock = 0
+#define IFLOCK(lock)    while (InterlockedCompareExchange((lock), 1, 0))	\
+	NdisStallExecution(1);
+#define IFUNLOCK(lock)  InterlockedExchange((lock), 0)
+#define IFLOCK_FREE(lock)
+#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, #capa) != NULL))
+#ifdef ARP_OFFLOAD_SUPPORT
+#define MAX_IPV4_ENTRIES	8
+void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
+void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable);
+
+/* dhd_commn arp offload wrapers */
+void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx);
+void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx);
+int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx);
+void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx);
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef WLTDLS
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac);
+#endif
+/* Neighbor Discovery Offload Support */
+int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable);
+int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx);
+int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx);
+/* ioctl processing for nl80211 */
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf);
+
+#if defined(SUPPORT_MULTIPLE_REVISION)
+extern int
+concate_revision(struct dhd_bus *bus, char *fwpath, int fw_path_len, char *nvpath, int nv_path_len);
+#endif /* SUPPORT_MULTIPLE_REVISION */
+void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path);
+void dhd_set_bus_state(void *bus, uint32 state);
+
+/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */
+typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ);
+extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn);
+
+#ifdef PROP_TXSTATUS
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+#endif /* PROP_TXSTATUS */
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE)
+#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size)
+#else
+#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size)
+#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size)
+#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
+
+
+#endif /* _dhd_h_ */
diff --git a/drivers/staging/edison-bcm43340/dhd_bta.c b/drivers/staging/edison-bcm43340/dhd_bta.c
new file mode 100644
index 0000000..d48c3fa
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_bta.c
@@ -0,0 +1,337 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.c 379512 2013-01-17 22:49:08Z $
+ */
+#error "WLBTAMP is not defined"
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmcdc.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/802.11.h>
+#include <proto/802.11_bta.h>
+#include <proto/bt_amp_hci.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhdioctl.h>
+#include <dhd_dbg.h>
+
+#include <dhd_bta.h>
+
+
+#ifdef SEND_HCI_CMD_VIA_IOCTL
+#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE
+
+/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+	amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+	uint8 buf[BTA_HCI_CMD_MAX_LEN + 16];
+	uint len = sizeof(buf);
+	wl_ioctl_t ioc;
+
+	if (cmd_len < HCI_CMD_PREAMBLE_SIZE)
+		return BCME_BADLEN;
+
+	if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len)
+		return BCME_BADLEN;
+
+	len = bcm_mkiovar("HCI_cmd",
+		(char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len);
+
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = TRUE;
+
+	return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len);
+}
+#else /* !SEND_HCI_CMD_VIA_IOCTL */
+
+static void
+dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh)
+{
+	int prec;
+	struct pktq *q;
+	uint count = 0;
+
+	q = dhd_bus_txq(pub->bus);
+	if (q == NULL)
+		return;
+
+	DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh));
+
+	dhd_os_sdlock_txq(pub);
+
+	/* Walk through the txq and toss all HCI ACL data packets */
+	PKTQ_PREC_ITER(q, prec) {
+		void *head_pkt = NULL;
+
+		while (pktq_ppeek(q, prec) != head_pkt) {
+			void *pkt = pktq_pdeq(q, prec);
+			int ifidx;
+
+			dhd_prot_hdrpull(pub, &ifidx, pkt, NULL, NULL);
+
+			if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) {
+				struct ether_header *eh =
+				        (struct ether_header *)PKTDATA(pub->osh, pkt);
+
+				if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) {
+					struct dot11_llc_snap_header *lsh =
+					        (struct dot11_llc_snap_header *)&eh[1];
+
+					if (bcmp(lsh, BT_SIG_SNAP_MPROT,
+					         DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
+					    ntoh16(lsh->type) == BTA_PROT_L2CAP) {
+						amp_hci_ACL_data_t *ACL_data =
+						        (amp_hci_ACL_data_t *)&lsh[1];
+						uint16 handle = ltoh16(ACL_data->handle);
+
+						if (HCI_ACL_DATA_HANDLE(handle) == llh) {
+							PKTFREE(pub->osh, pkt, TRUE);
+							count ++;
+							continue;
+						}
+					}
+				}
+			}
+
+			dhd_prot_hdrpush(pub, ifidx, pkt);
+
+			if (head_pkt == NULL)
+				head_pkt = pkt;
+			pktq_penq(q, prec, pkt);
+		}
+	}
+
+	dhd_os_sdunlock_txq(pub);
+
+	DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh));
+}
+
+/* Handle HCI cmd locally.
+ * Return 0: continue to send the cmd across SDIO
+ *        < 0: stop, fail
+ *        > 0: stop, succuess
+ */
+static int
+_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd)
+{
+	int status = 0;
+
+	switch (ltoh16_ua((uint8 *)&cmd->opcode)) {
+	case HCI_Enhanced_Flush: {
+		eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms;
+		dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh));
+		break;
+	}
+	default:
+		break;
+	}
+
+	return status;
+}
+
+/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */
+int
+dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len)
+{
+	amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf;
+	struct ether_header *eh;
+	struct dot11_llc_snap_header *lsh;
+	osl_t *osh = pub->osh;
+	uint len;
+	void *p;
+	int status;
+
+	if (cmd_len < HCI_CMD_PREAMBLE_SIZE) {
+		DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len));
+		return BCME_BADLEN;
+	}
+
+	if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) {
+		DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n",
+		           len, cmd_len));
+		/* return BCME_BADLEN; */
+	}
+
+	p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+	if (p == NULL) {
+		DHD_ERROR(("dhd_bta_docmd: out of memory\n"));
+		return BCME_NOMEM;
+	}
+
+
+	/* intercept and handle the HCI cmd locally */
+	if ((status = _dhd_bta_docmd(pub, cmd)) > 0)
+		return 0;
+	else if (status < 0)
+		return status;
+
+	/* copy in HCI cmd */
+	PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+	bcopy(cmd, PKTDATA(osh, p), len);
+
+	/* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+	PKTPUSH(osh, p, RFC1042_HDR_LEN);
+	eh = (struct ether_header *)PKTDATA(osh, p);
+	bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+	ETHER_SET_LOCALADDR(eh->ether_dhost);
+	bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+	eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+	lsh = (struct dot11_llc_snap_header *)&eh[1];
+	bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+	lsh->type = 0;
+
+	return dhd_sendpkt(pub, 0, p);
+}
+#endif /* !SEND_HCI_CMD_VIA_IOCTL */
+
+/* Send HCI ACL data to dongle via data channel */
+int
+dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len)
+{
+	amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf;
+	struct ether_header *eh;
+	struct dot11_llc_snap_header *lsh;
+	osl_t *osh = pub->osh;
+	uint len;
+	void *p;
+
+	if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len));
+		return BCME_BADLEN;
+	}
+
+	if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n",
+		           len, data_len));
+		/* return BCME_BADLEN; */
+	}
+
+	p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE);
+	if (p == NULL) {
+		DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n"));
+		return BCME_NOMEM;
+	}
+
+
+	/* copy in HCI ACL data header and HCI ACL data */
+	PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN);
+	bcopy(data, PKTDATA(osh, p), len);
+
+	/* copy in partial Ethernet header with BT-SIG LLC/SNAP header */
+	PKTPUSH(osh, p, RFC1042_HDR_LEN);
+	eh = (struct ether_header *)PKTDATA(osh, p);
+	bzero(eh->ether_dhost, ETHER_ADDR_LEN);
+	bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN);
+	eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN);
+	lsh = (struct dot11_llc_snap_header *)&eh[1];
+	bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2);
+	lsh->type = HTON16(BTA_PROT_L2CAP);
+
+	return dhd_sendpkt(pub, 0, p);
+}
+
+/* txcomplete callback */
+void
+dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp);
+	amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN);
+	uint16 handle = ltoh16(ACL_data->handle);
+	uint16 llh = HCI_ACL_DATA_HANDLE(handle);
+
+	wl_event_msg_t event;
+	uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)];
+	amp_hci_event_t *evt;
+	num_completed_data_blocks_evt_parms_t *parms;
+
+	uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t);
+
+	/* update the event struct */
+	memset(&event, 0, sizeof(event));
+	event.version = hton16(BCM_EVENT_MSG_VERSION);
+	event.event_type = hton32(WLC_E_BTA_HCI_EVENT);
+	event.status = 0;
+	event.reason = 0;
+	event.auth_type = 0;
+	event.datalen = hton32(len);
+	event.flags = 0;
+
+	/* generate Number of Completed Blocks event */
+	evt = (amp_hci_event_t *)data;
+	evt->ecode = HCI_Number_of_Completed_Data_Blocks;
+	evt->plen = sizeof(num_completed_data_blocks_evt_parms_t);
+
+	parms = (num_completed_data_blocks_evt_parms_t *)evt->parms;
+	htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks);
+	parms->num_handles = 1;
+	htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle);
+	parms->completed[0].pkts = 1;
+	parms->completed[0].blocks = 1;
+
+	dhd_sendup_event_common(dhdp, &event, data);
+}
+
+/* event callback */
+void
+dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len)
+{
+	amp_hci_event_t *evt = (amp_hci_event_t *)data_buf;
+
+	ASSERT(dhdp);
+	ASSERT(evt);
+
+	switch (evt->ecode) {
+	case HCI_Command_Complete: {
+		cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms;
+		switch (ltoh16_ua((uint8 *)&parms->opcode)) {
+		case HCI_Read_Data_Block_Size: {
+			read_data_block_size_evt_parms_t *parms2 =
+			        (read_data_block_size_evt_parms_t *)parms->parms;
+			dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num);
+			break;
+		}
+		}
+		break;
+	}
+
+	case HCI_Flush_Occurred: {
+		flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms;
+		dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle));
+		break;
+	}
+	default:
+		break;
+	}
+}
diff --git a/drivers/staging/edison-bcm43340/dhd_bta.h b/drivers/staging/edison-bcm43340/dhd_bta.h
new file mode 100644
index 0000000..db636a8
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_bta.h
@@ -0,0 +1,39 @@
+/*
+ * BT-AMP support routines
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bta.h 291086 2011-10-21 01:17:24Z $
+ */
+#ifndef __dhd_bta_h__
+#define __dhd_bta_h__
+
+struct dhd_pub;
+
+extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len);
+
+extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len);
+
+extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len);
+extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success);
+
+
+#endif /* __dhd_bta_h__ */
diff --git a/drivers/staging/edison-bcm43340/dhd_bus.h b/drivers/staging/edison-bcm43340/dhd_bus.h
new file mode 100644
index 0000000..f22ac6a
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_bus.h
@@ -0,0 +1,158 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_bus.h 457888 2014-02-25 03:34:39Z $
+ */
+
+#ifndef _dhd_bus_h_
+#define _dhd_bus_h_
+
+/*
+ * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ */
+
+/* Indicate (dis)interest in finding dongles. */
+extern int dhd_bus_register(void);
+extern void dhd_bus_unregister(void);
+
+/* Download firmware image and nvram image */
+extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, char *fw_path, char *nv_path);
+
+/* Stop bus module: clear pending frames, disable data flow */
+extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+
+/* Initialize bus module: prepare for communication w/dongle */
+extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+
+/* Get the Bus Idle Time */
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime);
+
+/* Set the Bus Idle Time */
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+
+/* Send a data frame to the dongle.  Callee disposes of txp. */
+#ifdef BCMPCIE
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx);
+#else
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+#endif
+
+
+/* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen);
+
+/* Watchdog timer function */
+extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
+
+extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable);
+extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub);
+extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub);
+extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub);
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
+#endif /* defined(DHD_DEBUG) */
+
+/* Deferred processing for the bus, return TRUE requests reschedule */
+extern bool dhd_bus_dpc(struct dhd_bus *bus);
+extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                            void *params, int plen, void *arg, int len, bool set);
+
+/* Add bus dump output to a buffer */
+extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Clear any bus counters */
+extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
+
+/* return the dongle chipid */
+extern uint dhd_bus_chip(struct dhd_bus *bus);
+
+/* return the dongle chiprev */
+extern uint dhd_bus_chiprev(struct dhd_bus *bus);
+
+/* Set user-specified nvram parameters. */
+extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
+
+extern void *dhd_bus_pub(struct dhd_bus *bus);
+extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern void *dhd_bus_sih(struct dhd_bus *bus);
+extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val);
+
+#define DHD_SET_BUS_STATE_DOWN(_bus)  do { \
+	(_bus)->dhd->busstate = DHD_BUS_DOWN; \
+} while (0)
+
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+extern int dhd_bus_reg_sdio_notify(void* semaphore);
+extern void dhd_bus_unreg_sdio_notify(void);
+extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable);
+extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num,
+	uint32 *slot_num);
+
+#ifdef BCMPCIE
+enum {
+	DNGL_TO_HOST_BUF_IOCT,
+	DNGL_TO_HOST_BUF_ADDR,
+	HOST_TO_DNGL_BUF_ADDR,
+	HOST_TO_DNGL_WPTR,
+	HOST_TO_DNGL_RPTR,
+	DNGL_TO_HOST_WPTR,
+	DNGL_TO_HOST_RPTR,
+	TOTAL_LFRAG_PACKET_CNT,
+	HOST_TO_DNGL_CTRLBUF_ADDR,
+	DNGL_TO_HOST_CTRLBUF_ADDR,
+	HTOD_CTRL_RPTR,
+	HTOD_CTRL_WPTR,
+	DTOH_CTRL_RPTR,
+	DTOH_CTRL_WPTR,
+	HTOD_MB_DATA,
+	DTOH_MB_DATA,
+	MAX_HOST_RXBUFS
+};
+typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32);
+extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type);
+extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value);
+extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type);
+extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus);
+extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count);
+extern void dhd_bus_start_queue(struct dhd_bus *bus);
+extern void dhd_bus_stop_queue(struct dhd_bus *bus);
+extern void dhd_bus_update_retlen(struct dhd_bus *bus, uint32 retlen, uint32 cmd_id, uint32 status,
+	uint32 inline_data);
+extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus);
+#endif /* BCMPCIE */
+#endif /* _dhd_bus_h_ */
diff --git a/drivers/staging/edison-bcm43340/dhd_cdc.c b/drivers/staging/edison-bcm43340/dhd_cdc.c
new file mode 100644
index 0000000..52fa91c
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_cdc.c
@@ -0,0 +1,789 @@
+/*
+ * DHD Protocol Module for CDC and BDC.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_cdc.c 449353 2014-01-16 21:34:16Z $
+ *
+ * BDC is like CDC, except it includes a header for data packets to convey
+ * packet priority over the bus, and flags (e.g. to indicate checksum status
+ * for dongle offload.)
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmcdc.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+
+#define RETRIES 2		/* # of retries to retrieve matching ioctl response */
+#define BUS_HEADER_LEN	(24+DHD_SDALIGN)	/* Must be at least SDPCM_RESERVE
+				 * defined in dhd_sdio.c (amount of header tha might be added)
+				 * plus any space that might be needed for alignment padding.
+				 */
+#define ROUND_UP_MARGIN	2048	/* Biggest SDIO block size possible for
+				 * round off at the end of buffer
+				 */
+
+typedef struct dhd_prot {
+	uint16 reqid;
+	uint8 pending;
+	uint32 lastcmd;
+	uint8 bus_header[BUS_HEADER_LEN];
+	cdc_ioctl_t msg;
+	unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+} dhd_prot_t;
+
+
+static int
+dhdcdc_msg(dhd_pub_t *dhd)
+{
+	int err = 0;
+	dhd_prot_t *prot = dhd->prot;
+	int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t);
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_OS_WAKE_LOCK(dhd);
+
+	/* NOTE : cdc->msg.len holds the desired length of the buffer to be
+	 *        returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
+	 *	  is actually sent to the dongle
+	 */
+	if (len > CDC_MAX_MSG_SIZE)
+		len = CDC_MAX_MSG_SIZE;
+
+	/* Send request */
+	err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len);
+
+	DHD_OS_WAKE_UNLOCK(dhd);
+	return err;
+}
+
+static int
+dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len)
+{
+	int ret;
+	int cdc_len = len + sizeof(cdc_ioctl_t);
+	dhd_prot_t *prot = dhd->prot;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+	do {
+		ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
+		if (ret < 0)
+			break;
+	} while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
+
+
+	return ret;
+}
+
+static int
+dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0, retries = 0;
+	uint32 id, flags = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+
+	/* Respond "bcmerror" and "bcmerrorstr" with local cache */
+	if (cmd == WLC_GET_VAR && buf)
+	{
+		if (!strcmp((char *)buf, "bcmerrorstr"))
+		{
+			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), MIN(BCME_STRLEN, len));
+			goto done;
+		}
+		else if (!strcmp((char *)buf, "bcmerror"))
+		{
+			*(int *)buf = dhd->dongle_error;
+			goto done;
+		}
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT);
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		if (!dhd->hang_was_sent)
+		DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret));
+		goto done;
+	}
+
+retry:
+	/* wait for interrupt and get first fragment */
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if ((id < prot->reqid) && (++retries < RETRIES))
+		goto retry;
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Copy info buffer */
+	if (buf)
+	{
+		if (ret < (int)len)
+			len = ret;
+		memcpy(buf, (void*) prot->buf, len);
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+
+static int
+dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+	dhd_prot_t *prot = dhd->prot;
+	cdc_ioctl_t *msg = &prot->msg;
+	int ret = 0;
+	uint32 flags, id;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+	if (dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return -EIO;
+	}
+
+	/* don't talk to the dongle if fw is about to be reloaded */
+	if (dhd->hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+			__FUNCTION__));
+		return -EIO;
+	}
+
+	memset(msg, 0, sizeof(cdc_ioctl_t));
+
+	msg->cmd = htol32(cmd);
+	msg->len = htol32(len);
+	msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
+	CDC_SET_IF_IDX(msg, ifidx);
+	/* add additional action bits */
+	action &= WL_IOCTL_ACTION_MASK;
+	msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET;
+	msg->flags = htol32(msg->flags);
+
+	if (buf)
+		memcpy(prot->buf, buf, len);
+
+	if ((ret = dhdcdc_msg(dhd)) < 0) {
+		DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+
+	if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0)
+		goto done;
+
+	flags = ltoh32(msg->flags);
+	id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
+
+	if (id != prot->reqid) {
+		DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+		           dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid));
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* Check the ERROR flag */
+	if (flags & CDCF_IOC_ERROR)
+	{
+		ret = ltoh32(msg->status);
+		/* Cache error from dongle */
+		dhd->dongle_error = ret;
+	}
+
+done:
+	return ret;
+}
+
+
+int
+dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+	dhd_prot_t *prot = dhd->prot;
+	int ret = -1;
+	uint8 action;
+
+	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+	if (len > WLC_IOCTL_MAXLEN)
+		goto done;
+
+	if (prot->pending == TRUE) {
+		DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+			ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+			(unsigned long)prot->lastcmd));
+		if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+			DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+		}
+		goto done;
+	}
+
+	prot->pending = TRUE;
+	prot->lastcmd = ioc->cmd;
+	action = ioc->set;
+	if (action & WL_IOCTL_ACTION_SET)
+		ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+	else {
+		ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+		if (ret > 0)
+			ioc->used = ret - sizeof(cdc_ioctl_t);
+	}
+
+	/* Too many programs assume ioctl() returns 0 on success */
+	if (ret >= 0)
+		ret = 0;
+	else {
+		cdc_ioctl_t *msg = &prot->msg;
+		ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */
+	}
+
+	/* Intercept the wme_dp ioctl here */
+	if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+		int slen, val = 0;
+
+		slen = strlen("wme_dp") + 1;
+		if (len >= (int)(slen + sizeof(int)))
+			bcopy(((char *)buf + slen), &val, sizeof(int));
+		dhd->wme_dp = (uint8) ltoh32(val);
+	}
+
+	prot->pending = FALSE;
+
+done:
+
+	return ret;
+}
+
+int
+dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                  void *params, int plen, void *arg, int len, bool set)
+{
+	return BCME_UNSUPPORTED;
+}
+
+void
+dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_dump(dhdp, strbuf);
+#endif
+}
+
+/*	The FreeBSD PKTPUSH could change the packet buf pinter
+	so we need to make it changable
+*/
+#define PKTBUF pktbuf
+void
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif /* BDC */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	/* Push BDC header used to convey priority for buses that don't */
+
+	PKTPUSH(dhd->osh, PKTBUF, BDC_HEADER_LEN);
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, PKTBUF);
+
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(PKTBUF))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = 0;
+#endif /* BDC */
+	BDC_SET_IF_IDX(h, ifidx);
+}
+#undef PKTBUF	/* Only defined in the above routine */
+
+int
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info,
+	uint *reorder_info_len)
+{
+#ifdef BDC
+	struct bdc_header *h;
+#endif
+	uint8 data_offset = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BDC
+	if (reorder_info_len)
+		*reorder_info_len = 0;
+	/* Pop BDC header used to convey priority for buses that don't */
+
+	if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+
+	h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf);
+
+	if (!ifidx) {
+		/* for tx packet, skip the analysis */
+		data_offset = h->dataOffset;
+		PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+		goto exit;
+	}
+
+	if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
+		           __FUNCTION__, *ifidx));
+		return BCME_ERROR;
+	}
+
+	if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
+		DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
+		           dhd_ifname(dhd, *ifidx), h->flags));
+		if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1)
+			h->dataOffset = 0;
+		else
+		return BCME_ERROR;
+	}
+
+	if (h->flags & BDC_FLAG_SUM_GOOD) {
+		DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n",
+		          dhd_ifname(dhd, *ifidx), h->flags));
+		PKTSETSUMGOOD(pktbuf, TRUE);
+	}
+
+	PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+	data_offset = h->dataOffset;
+	PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
+#endif /* BDC */
+
+
+#ifdef PROP_TXSTATUS
+	if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) {
+		/*
+		- parse txstatus only for packets that came from the firmware
+		*/
+		dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2),
+			reorder_buf_info, reorder_info_len);
+
+	}
+#endif /* PROP_TXSTATUS */
+
+exit:
+	PKTPULL(dhd->osh, pktbuf, (data_offset << 2));
+	return 0;
+}
+
+
+int
+dhd_prot_attach(dhd_pub_t *dhd)
+{
+	dhd_prot_t *cdc;
+
+	if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	memset(cdc, 0, sizeof(dhd_prot_t));
+
+	/* ensure that the msg buf directly follows the cdc msg struct */
+	if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) {
+		DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+		goto fail;
+	}
+
+	dhd->prot = cdc;
+#ifdef BDC
+	dhd->hdrlen += BDC_HEADER_LEN;
+#endif
+	dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+	return 0;
+
+fail:
+	if (cdc != NULL)
+		DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t));
+	return BCME_NOMEM;
+}
+
+/* ~NOTE~ What if another thread is waiting on the semaphore?  Holding it? */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_deinit(dhd);
+#endif
+	DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
+	dhd->prot = NULL;
+}
+
+void
+dhd_prot_dstats(dhd_pub_t *dhd)
+{
+/* No stats from dongle added yet, copy bus stats */
+	dhd->dstats.tx_packets = dhd->tx_packets;
+	dhd->dstats.tx_errors = dhd->tx_errors;
+	dhd->dstats.rx_packets = dhd->rx_packets;
+	dhd->dstats.rx_errors = dhd->rx_errors;
+	dhd->dstats.rx_dropped = dhd->rx_dropped;
+	dhd->dstats.multicast = dhd->rx_multicast;
+	return;
+}
+
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	wlc_rev_info_t revinfo;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+	/* Get the device rev info */
+	memset(&revinfo, 0, sizeof(revinfo));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+	if (ret < 0)
+		goto done;
+
+
+	ret = dhd_preinit_ioctls(dhd);
+
+	/* Always assumes wl for now */
+	dhd->iswl = TRUE;
+
+done:
+	return ret;
+}
+
+void
+dhd_prot_stop(dhd_pub_t *dhd)
+{
+/* Nothing to do for CDC */
+}
+
+
+static void
+dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt,
+	uint32 *pkt_count, void **pplast, uint8 start, uint8 end)
+{
+	void *plast = NULL, *p;
+	uint32 pkt_cnt = 0;
+
+	if (ptr->pend_pkts == 0) {
+		DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__));
+		*pplast = NULL;
+		*pkt_count = 0;
+		*pkt = NULL;
+		return;
+	}
+	do {
+		p = (void *)(ptr->p[start]);
+		ptr->p[start] = NULL;
+
+		if (p != NULL) {
+			if (plast == NULL)
+				*pkt = p;
+			else
+				PKTSETNEXT(osh, plast, p);
+
+			plast = p;
+			pkt_cnt++;
+		}
+		start++;
+		if (start > ptr->max_idx)
+			start = 0;
+	} while (start != end);
+	*pplast = plast;
+	*pkt_count = pkt_cnt;
+	ptr->pend_pkts -= (uint8)pkt_cnt;
+}
+
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+	void **pkt, uint32 *pkt_count)
+{
+	uint8 flow_id, max_idx, cur_idx, exp_idx;
+	struct reorder_info *ptr;
+	uint8 flags;
+	void *cur_pkt, *plast = NULL;
+	uint32 cnt = 0;
+
+	if (pkt == NULL) {
+		if (pkt_count != NULL)
+			*pkt_count = 0;
+		return 0;
+	}
+
+	flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET];
+	flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET];
+
+	DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags,
+		reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET],
+		reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET],
+		reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]));
+
+	/* validate flags and flow id */
+	if (flags == 0xFF) {
+		DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__));
+		*pkt_count = 1;
+		return 0;
+	}
+
+	cur_pkt = *pkt;
+	*pkt = NULL;
+
+	ptr = dhd->reorder_bufs[flow_id];
+	if (flags & WLHOST_REORDERDATA_DEL_FLOW) {
+		uint32 buf_size = sizeof(struct reorder_info);
+
+		DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n",
+			__FUNCTION__, flow_id));
+
+		if (ptr == NULL) {
+			DHD_REORDER(("%s: received flags to cleanup, but no flow (%d) yet\n",
+				__FUNCTION__, flow_id));
+			*pkt_count = 1;
+			*pkt = cur_pkt;
+			return 0;
+		}
+
+		dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+			ptr->exp_idx, ptr->exp_idx);
+		/* set it to the last packet */
+		if (plast) {
+			PKTSETNEXT(dhd->osh, plast, cur_pkt);
+			cnt++;
+		}
+		else {
+			if (cnt != 0) {
+				DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n",
+					__FUNCTION__, cnt));
+			}
+			*pkt = cur_pkt;
+			cnt = 1;
+		}
+		buf_size += ((ptr->max_idx + 1) * sizeof(void *));
+		MFREE(dhd->osh, ptr, buf_size);
+		dhd->reorder_bufs[flow_id] = NULL;
+		*pkt_count = cnt;
+		return 0;
+	}
+	/* all the other cases depend on the existance of the reorder struct for that flow id */
+	if (ptr == NULL) {
+		uint32 buf_size_alloc = sizeof(reorder_info_t);
+		max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+
+		buf_size_alloc += ((max_idx + 1) * sizeof(void*));
+		/* allocate space to hold the buffers, index etc */
+
+		DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n",
+			__FUNCTION__, buf_size_alloc, flow_id, max_idx));
+		ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc);
+		if (ptr == NULL) {
+			DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__));
+			*pkt_count = 1;
+			return 0;
+		}
+		bzero(ptr, buf_size_alloc);
+		dhd->reorder_bufs[flow_id] = ptr;
+		ptr->p = (void *)(ptr+1);
+		ptr->max_idx = max_idx;
+	}
+	if (flags & WLHOST_REORDERDATA_NEW_HOLE)  {
+		DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__));
+		if (ptr->pend_pkts) {
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				ptr->exp_idx, ptr->exp_idx);
+			ptr->pend_pkts = 0;
+		}
+		ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+		ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+		ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET];
+		ptr->p[ptr->cur_idx] = cur_pkt;
+		ptr->pend_pkts++;
+		*pkt_count = cnt;
+	}
+	else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) {
+		cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
+		exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+
+		if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) {
+			/* still in the current hole */
+			/* enqueue the current on the buffer chain */
+			if (ptr->p[cur_idx] != NULL) {
+				DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n",
+					__FUNCTION__));
+				PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+				ptr->p[cur_idx] = NULL;
+			}
+			ptr->p[cur_idx] = cur_pkt;
+			ptr->pend_pkts++;
+			ptr->cur_idx = cur_idx;
+			DHD_REORDER(("%s: fill up a hole..pending packets is %d\n",
+				__FUNCTION__, ptr->pend_pkts));
+			*pkt_count = 0;
+			*pkt = NULL;
+		}
+		else if (ptr->exp_idx == cur_idx) {
+			/* got the right one ..flush from cur to exp and update exp */
+			DHD_REORDER(("%s: got the right one now, cur_idx is %d\n",
+				__FUNCTION__, cur_idx));
+			if (ptr->p[cur_idx] != NULL) {
+				DHD_REORDER(("%s: Error buffer pending..free it\n",
+					__FUNCTION__));
+				PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE);
+				ptr->p[cur_idx] = NULL;
+			}
+			ptr->p[cur_idx] = cur_pkt;
+			ptr->pend_pkts++;
+
+			ptr->cur_idx = cur_idx;
+			ptr->exp_idx = exp_idx;
+
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				cur_idx, exp_idx);
+			*pkt_count = cnt;
+			DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n",
+				__FUNCTION__, cnt, ptr->pend_pkts));
+		}
+		else {
+			uint8 end_idx;
+			bool flush_current = FALSE;
+			/* both cur and exp are moved now .. */
+			DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n",
+				__FUNCTION__, flow_id, ptr->cur_idx, cur_idx,
+				ptr->exp_idx, exp_idx));
+			if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+				end_idx = ptr->exp_idx;
+			else
+				end_idx = exp_idx;
+
+			/* flush pkts first */
+			dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
+				ptr->exp_idx, end_idx);
+
+			if (cur_idx == ptr->max_idx) {
+				if (exp_idx == 0)
+					flush_current = TRUE;
+			} else {
+				if (exp_idx == cur_idx + 1)
+					flush_current = TRUE;
+			}
+			if (flush_current) {
+				if (plast)
+					PKTSETNEXT(dhd->osh, plast, cur_pkt);
+				else
+					*pkt = cur_pkt;
+				cnt++;
+			}
+			else {
+				ptr->p[cur_idx] = cur_pkt;
+				ptr->pend_pkts++;
+			}
+			ptr->exp_idx = exp_idx;
+			ptr->cur_idx = cur_idx;
+			*pkt_count = cnt;
+		}
+	}
+	else {
+		uint8 end_idx;
+		/* no real packet but update to exp_seq...that means explicit window move */
+		exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
+		DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n",
+			__FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx));
+		if (flags & WLHOST_REORDERDATA_FLUSH_ALL)
+			end_idx =  ptr->exp_idx;
+		else
+			end_idx =  exp_idx;
+
+		dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx);
+		if (plast)
+			PKTSETNEXT(dhd->osh, plast, cur_pkt);
+		else
+			*pkt = cur_pkt;
+		cnt++;
+		*pkt_count = cnt;
+		/* set the new expected idx */
+		ptr->exp_idx = exp_idx;
+	}
+	return 0;
+}
diff --git a/drivers/staging/edison-bcm43340/dhd_cfg80211.c b/drivers/staging/edison-bcm43340/dhd_cfg80211.c
new file mode 100644
index 0000000..971e70e
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_cfg80211.c
@@ -0,0 +1,301 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+#include <linux/vmalloc.h>
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <brcm_nl80211.h>
+#include <dhd_cfg80211.h>
+
+#ifdef PKT_FILTER_SUPPORT
+#include <dngl_stats.h>
+#include <dhd.h>
+#endif
+extern struct bcm_cfg80211 *g_bcm_cfg;
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+static int dhd_dongle_up = FALSE;
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+
+static s32 wl_dongle_up(struct net_device *ndev, u32 up);
+
+/**
+ * Function implementations
+ */
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg)
+{
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg)
+{
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+	dhd_dongle_up = FALSE;
+	return 0;
+}
+
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val)
+{
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	dhd->op_mode |= val;
+	WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode));
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->arp_version == 1) {
+		/* IF P2P is enabled, disable arpoe */
+		dhd_arp_offload_set(dhd, 0);
+		dhd_arp_offload_enable(dhd, false);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	return 0;
+}
+
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg)
+{
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE);
+	WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode));
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->arp_version == 1) {
+		/* IF P2P is disabled, enable arpoe back for STA mode. */
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+		dhd_arp_offload_enable(dhd, true);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	return 0;
+}
+
+struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx)
+{
+	return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE);
+}
+
+int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev)
+{
+	return dhd_register_if(cfg->pub, ifidx, FALSE);
+}
+
+int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev)
+{
+	return dhd_remove_if(cfg->pub, ifidx, FALSE);
+}
+
+struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev)
+{
+	if (ndev) {
+		if (ndev->ieee80211_ptr) {
+			kfree(ndev->ieee80211_ptr);
+			ndev->ieee80211_ptr = NULL;
+		}
+		free_netdev(ndev);
+		return NULL;
+	}
+
+	return ndev;
+}
+
+void dhd_netdev_free(struct net_device *ndev)
+{
+#ifdef WL_CFG80211
+	ndev = dhd_cfg80211_netdev_free(ndev);
+#endif
+	if (ndev)
+		free_netdev(ndev);
+}
+
+static s32 wl_dongle_up(struct net_device *ndev, u32 up)
+{
+	s32 err = 0;
+
+	err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_UP error (%d)\n", err));
+	}
+	return err;
+}
+
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN	32
+#endif
+	struct net_device *ndev;
+	s32 err = 0;
+
+	WL_TRACE(("In\n"));
+	if (dhd_dongle_up) {
+		WL_ERR(("Dongle is already up\n"));
+		return err;
+	}
+
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	err = wl_dongle_up(ndev, 0);
+	if (unlikely(err)) {
+		WL_ERR(("wl_dongle_up failed\n"));
+		goto default_conf_out;
+	}
+	dhd_dongle_up = true;
+
+default_conf_out:
+
+	return err;
+
+}
+
+#ifdef CONFIG_NL80211_TESTMODE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len)
+#else
+int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
+#endif  /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+{
+	struct sk_buff *reply;
+	struct bcm_cfg80211 *cfg;
+	dhd_pub_t *dhd;
+	struct bcm_nlmsg_hdr *nlioc = data;
+	dhd_ioctl_t ioc = { 0 };
+	int err = 0;
+	void *buf = NULL, *cur;
+	u16 buflen;
+	u16 maxmsglen = PAGE_SIZE - 0x100;
+	bool newbuf = false;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	int8 index = 0;
+	struct net_device *ndev = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+
+	WL_TRACE(("entry: cmd = %d\n", nlioc->cmd));
+	cfg = wiphy_priv(wiphy);
+	dhd = cfg->pub;
+
+	DHD_OS_WAKE_LOCK(dhd);
+
+	/* send to dongle only if we are not waiting for reload already */
+	if (dhd->hang_was_sent) {
+		WL_ERR(("HANG was sent up earlier\n"));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
+		DHD_OS_WAKE_UNLOCK(dhd);
+		return OSL_ERROR(BCME_DONGLE_DOWN);
+	}
+
+	len -= sizeof(struct bcm_nlmsg_hdr);
+
+	if (nlioc->len > 0) {
+		if (nlioc->len <= len) {
+			buf = (void *)nlioc + nlioc->offset;
+			*(char *)(buf + nlioc->len) = '\0';
+		} else {
+			if (nlioc->len > DHD_IOCTL_MAXLEN)
+				nlioc->len = DHD_IOCTL_MAXLEN;
+			buf = vzalloc(nlioc->len);
+			if (!buf)
+				return -ENOMEM;
+			newbuf = true;
+			memcpy(buf, (void *)nlioc + nlioc->offset, len);
+			*(char *)(buf + len) = '\0';
+		}
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	ndev = wdev_to_wlc_ndev(wdev, cfg);
+	index = dhd_net2idx(dhd->info, ndev);
+	if (index == DHD_BAD_IF) {
+	WL_ERR(("Bad ifidx from wdev:%p\n", wdev));
+		return BCME_ERROR;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+
+	ioc.cmd = nlioc->cmd;
+	ioc.len = nlioc->len;
+	ioc.set = nlioc->set;
+	ioc.driver = nlioc->magic;
+	err = dhd_ioctl_process(dhd, 0, &ioc, buf);
+	if (err) {
+		WL_TRACE(("dhd_ioctl_process return err %d\n", err));
+		err = OSL_ERROR(err);
+		goto done;
+	}
+
+	cur = buf;
+	while (nlioc->len > 0) {
+		buflen = nlioc->len > maxmsglen ? maxmsglen : nlioc->len;
+		nlioc->len -= buflen;
+		reply = cfg80211_testmode_alloc_reply_skb(wiphy, buflen+4);
+		if (!reply) {
+			WL_ERR(("Failed to allocate reply msg\n"));
+			err = -ENOMEM;
+			break;
+		}
+
+		if (nla_put(reply, BCM_NLATTR_DATA, buflen, cur) ||
+			nla_put_u16(reply, BCM_NLATTR_LEN, buflen)) {
+			kfree_skb(reply);
+			err = -ENOBUFS;
+			break;
+		}
+
+		do {
+			err = cfg80211_testmode_reply(reply);
+		} while (err == -EAGAIN);
+		if (err) {
+			WL_ERR(("testmode reply failed:%d\n", err));
+			break;
+		}
+		cur += buflen;
+	}
+
+done:
+	if (newbuf)
+		vfree(buf);
+	DHD_OS_WAKE_UNLOCK(dhd);
+	return err;
+}
+#endif /* CONFIG_NL80211_TESTMODE */
diff --git a/drivers/staging/edison-bcm43340/dhd_cfg80211.h b/drivers/staging/edison-bcm43340/dhd_cfg80211.h
new file mode 100644
index 0000000..da83567
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_cfg80211.h
@@ -0,0 +1,59 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ */
+
+
+#ifndef __DHD_CFG80211__
+#define __DHD_CFG80211__
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val);
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg);
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg);
+
+#ifdef CONFIG_NL80211_TESTMODE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len);
+#else
+int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+static inline int
+dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, void *data, int len)
+#else
+static inline int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+{
+	return 0;
+}
+#endif /* CONFIG_NL80211_TESTMODE */
+
+#endif /* __DHD_CFG80211__ */
diff --git a/drivers/staging/edison-bcm43340/dhd_common.c b/drivers/staging/edison-bcm43340/dhd_common.c
new file mode 100644
index 0000000..ceb41b5
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_common.c
@@ -0,0 +1,2292 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), common DHD core.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_common.c 470785 2014-04-16 11:07:19Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+#include <dhd.h>
+#include <dhd_ip.h>
+
+#include <proto/bcmevent.h>
+
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <msgtrace.h>
+
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#ifdef SET_RANDOM_MAC_SOFTAP
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#endif
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef WLMEDIA_HTSF
+extern void htsf_update(struct dhd_info *dhd, void *data);
+#endif
+int dhd_msg_level = DHD_ERROR_VAL;
+
+
+#include <wl_iw.h>
+
+#ifdef SOFTAP
+char fw_path2[MOD_PARAM_PATHLEN];
+extern bool softap_enabled;
+#endif
+
+/* Last connection success/failure status */
+uint32 dhd_conn_event;
+uint32 dhd_conn_status;
+uint32 dhd_conn_reason;
+
+extern int dhd_iscan_request(void * dhdp, uint16 action);
+extern void dhd_ind_scan_confirm(void *h, bool status);
+extern int dhd_iscan_in_progress(void *h);
+void dhd_iscan_lock(void);
+void dhd_iscan_unlock(void);
+extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
+#if !defined(AP) && defined(WLP2P)
+extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
+#endif
+bool ap_cfg_running = FALSE;
+bool ap_fw_loaded = FALSE;
+
+/* Version string to report */
+#ifdef DHD_DEBUG
+#ifndef SRCBASE
+#define SRCBASE        "drivers/net/wireless/bcmdhd"
+#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#endif /* DHD_DEBUG */
+
+#if defined(DHD_DEBUG)
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+	DHD_COMPILED " on " __DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from ";
+#endif 
+
+void dhd_set_timer(void *bus, uint wdtick);
+
+/* IOVar table */
+enum {
+	IOV_VERSION = 1,
+	IOV_MSGLEVEL,
+	IOV_BCMERRORSTR,
+	IOV_BCMERROR,
+	IOV_WDTICK,
+	IOV_DUMP,
+	IOV_CLEARCOUNTS,
+	IOV_LOGDUMP,
+	IOV_LOGCAL,
+	IOV_LOGSTAMP,
+	IOV_GPIOOB,
+	IOV_IOCTLTIMEOUT,
+#if defined(DHD_DEBUG)
+	IOV_CONS,
+	IOV_DCONSOLE_POLL,
+#endif /* defined(DHD_DEBUG) */
+#ifdef PROP_TXSTATUS
+	IOV_PROPTXSTATUS_ENABLE,
+	IOV_PROPTXSTATUS_MODE,
+	IOV_PROPTXSTATUS_OPT,
+	IOV_PROPTXSTATUS_MODULE_IGNORE,
+	IOV_PROPTXSTATUS_CREDIT_IGNORE,
+	IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
+#endif /* PROP_TXSTATUS */
+	IOV_BUS_TYPE,
+#ifdef WLMEDIA_HTSF
+	IOV_WLPKTDLYSTAT_SZ,
+#endif
+	IOV_CHANGEMTU,
+	IOV_HOSTREORDER_FLOWS,
+#ifdef DHDTCPACK_SUPPRESS
+	IOV_TCPACK_SUPPRESS,
+#endif /* DHDTCPACK_SUPPRESS */
+	IOV_LAST
+};
+
+const bcm_iovar_t dhd_iovars[] = {
+	{"version",	IOV_VERSION,	0,	IOVT_BUFFER,	sizeof(dhd_version) },
+#ifdef DHD_DEBUG
+	{"msglevel",	IOV_MSGLEVEL,	0,	IOVT_UINT32,	0 },
+#endif /* DHD_DEBUG */
+	{"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER,	BCME_STRLEN },
+	{"bcmerror",	IOV_BCMERROR,	0,	IOVT_INT8,	0 },
+	{"wdtick",	IOV_WDTICK, 0,	IOVT_UINT32,	0 },
+	{"dump",	IOV_DUMP,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+#ifdef DHD_DEBUG
+	{"cons",	IOV_CONS,	0,	IOVT_BUFFER,	0 },
+	{"dconpoll",	IOV_DCONSOLE_POLL, 0,	IOVT_UINT32,	0 },
+#endif
+	{"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID,	0 },
+	{"gpioob",	IOV_GPIOOB,	0,	IOVT_UINT32,	0 },
+	{"ioctl_timeout",	IOV_IOCTLTIMEOUT,	0,	IOVT_UINT32,	0 },
+#ifdef PROP_TXSTATUS
+	{"proptx",	IOV_PROPTXSTATUS_ENABLE,	0,	IOVT_BOOL,	0 },
+	/*
+	set the proptxtstatus operation mode:
+	0 - Do not do any proptxtstatus flow control
+	1 - Use implied credit from a packet status
+	2 - Use explicit credit
+	*/
+	{"ptxmode",	IOV_PROPTXSTATUS_MODE,	0,	IOVT_UINT32,	0 },
+	{"proptx_opt", IOV_PROPTXSTATUS_OPT,	0,	IOVT_UINT32,	0 },
+	{"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, IOVT_BOOL, 0 },
+	{"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, IOVT_BOOL, 0 },
+	{"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, IOVT_BOOL, 0 },
+#endif /* PROP_TXSTATUS */
+	{"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0},
+#ifdef WLMEDIA_HTSF
+	{"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 },
+#endif
+	{"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 },
+	{"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, IOVT_BUFFER,
+	(WLHOST_REORDERDATA_MAXFLOWS + 1) },
+#ifdef DHDTCPACK_SUPPRESS
+	{"tcpack_suppress",	IOV_TCPACK_SUPPRESS,	0,	IOVT_UINT8,	0 },
+#endif /* DHDTCPACK_SUPPRESS */
+	{NULL, 0, 0, 0, 0 }
+};
+
+#define DHD_IOVAR_BUF_SIZE	128
+
+/* to NDIS developer, the structure dhd_common is redundant,
+ * please do NOT merge it back from other branches !!!
+ */
+
+static int
+dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+{
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	struct bcmstrbuf b;
+	struct bcmstrbuf *strbuf = &b;
+
+	bcm_binit(strbuf, buf, buflen);
+
+	/* Base DHD info */
+	bcm_bprintf(strbuf, "%s\n", dhd_version);
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+	            dhdp->up, dhdp->txoff, dhdp->busstate);
+	bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
+	            dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
+	bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+	            dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
+	bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
+
+	bcm_bprintf(strbuf, "dongle stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
+	            dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
+	            dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
+	bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
+	            dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
+	            dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
+	bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
+
+	bcm_bprintf(strbuf, "bus stats:\n");
+	bcm_bprintf(strbuf, "tx_packets %lu tx_multicast %lu tx_errors %lu\n",
+	            dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
+	bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
+	            dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
+	bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
+	            dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
+	bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
+	            dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
+	bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
+	            dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any prot info */
+	dhd_prot_dump(dhdp, strbuf);
+	bcm_bprintf(strbuf, "\n");
+
+	/* Add any bus info */
+	dhd_bus_dump(dhdp, strbuf);
+
+	return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
+}
+
+int
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex)
+{
+	wl_ioctl_t ioc;
+
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+
+	return dhd_wl_ioctl(dhd_pub, ifindex, &ioc, arg, len);
+}
+
+
+int
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len)
+{
+	int ret = 0;
+
+	if (dhd_os_proto_block(dhd_pub))
+	{
+
+		ret = dhd_prot_ioctl(dhd_pub, ifindex, ioc, buf, len);
+		if ((ret) && (dhd_pub->up))
+			/* Send hang event only if dhd_open() was success */
+			dhd_os_check_hang(dhd_pub, ifindex, ret);
+
+		if (ret == -ETIMEDOUT && !dhd_pub->up) {
+			DHD_ERROR(("%s: 'resumed on timeout' error is "
+				"occurred before the interface does not"
+				" bring up\n", __FUNCTION__));
+			dhd_pub->busstate = DHD_BUS_DOWN;
+		}
+
+		dhd_os_proto_unblock(dhd_pub);
+
+
+	}
+
+	return ret;
+}
+
+static int
+dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+            void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_VERSION):
+		/* Need to have checked buffer length */
+		bcm_strncpy_s((char*)arg, len, dhd_version, len);
+		break;
+
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)dhd_msg_level;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+#ifdef WL_CFG80211
+		/* Enable DHD and WL logs in oneshot */
+		if (int_val & DHD_WL_VAL2)
+			wl_cfg80211_enable_trace(TRUE, int_val & (~DHD_WL_VAL2));
+		else if (int_val & DHD_WL_VAL)
+			wl_cfg80211_enable_trace(FALSE, WL_DBG_DBG);
+		if (!(int_val & DHD_WL_VAL2))
+#endif /* WL_CFG80211 */
+		dhd_msg_level = int_val;
+		break;
+	case IOV_GVAL(IOV_BCMERRORSTR):
+		bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
+		((char *)arg)[BCME_STRLEN - 1] = 0x00;
+		break;
+
+	case IOV_GVAL(IOV_BCMERROR):
+		int_val = (int32)dhd_pub->bcmerror;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_WDTICK):
+		int_val = (int32)dhd_watchdog_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WDTICK):
+		if (!dhd_pub->up) {
+			bcmerror = BCME_NOTUP;
+			break;
+		}
+		dhd_os_wd_timer(dhd_pub, (uint)int_val);
+		break;
+
+	case IOV_GVAL(IOV_DUMP):
+		bcmerror = dhd_dump(dhd_pub, arg, len);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_DCONSOLE_POLL):
+		int_val = (int32)dhd_console_ms;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DCONSOLE_POLL):
+		dhd_console_ms = (uint)int_val;
+		break;
+
+	case IOV_SVAL(IOV_CONS):
+		if (len > 0)
+			bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+		break;
+#endif /* DHD_DEBUG */
+
+	case IOV_SVAL(IOV_CLEARCOUNTS):
+		dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
+		dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
+		dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
+		dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+		dhd_pub->rx_dropped = 0;
+		dhd_pub->rx_readahead_cnt = 0;
+		dhd_pub->tx_realloc = 0;
+		dhd_pub->wd_dpc_sched = 0;
+		memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
+		dhd_bus_clearcounts(dhd_pub);
+#ifdef PROP_TXSTATUS
+		/* clear proptxstatus related counters */
+		dhd_wlfc_clear_counts(dhd_pub);
+#endif /* PROP_TXSTATUS */
+		break;
+
+
+	case IOV_GVAL(IOV_IOCTLTIMEOUT): {
+		int_val = (int32)dhd_os_get_ioctl_resp_timeout();
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_IOCTLTIMEOUT): {
+		if (int_val <= 0)
+			bcmerror = BCME_BADARG;
+		else
+			dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
+		break;
+	}
+
+
+#ifdef PROP_TXSTATUS
+	case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
+		bool wlfc_enab = FALSE;
+		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		int_val = wlfc_enab ? 1 : 0;
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
+		bool wlfc_enab = FALSE;
+		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+		if (bcmerror != BCME_OK)
+			goto exit;
+
+		/* wlfc is already set as desired */
+		if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
+			goto exit;
+
+		if (int_val == TRUE)
+			bcmerror = dhd_wlfc_init(dhd_pub);
+		else
+			bcmerror = dhd_wlfc_deinit(dhd_pub);
+
+		break;
+	}
+	case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
+		bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
+		dhd_wlfc_set_mode(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+		bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+		dhd_wlfc_set_module_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+		bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+		dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
+		break;
+
+	case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+		bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
+		if (bcmerror != BCME_OK)
+			goto exit;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+		dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
+		break;
+#endif /* PROP_TXSTATUS */
+
+	case IOV_GVAL(IOV_BUS_TYPE):
+		/* The dhd application queries the driver to check if its usb or sdio.  */
+#ifdef BCMDHDUSB
+		int_val = BUS_TYPE_USB;
+#endif
+		int_val = BUS_TYPE_SDIO;
+#ifdef PCIE_FULL_DONGLE
+		int_val = BUS_TYPE_PCIE;
+#endif
+		bcopy(&int_val, arg, val_size);
+		break;
+
+
+#ifdef WLMEDIA_HTSF
+	case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
+		int_val = dhd_pub->htsfdlystat_sz;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
+		dhd_pub->htsfdlystat_sz = int_val & 0xff;
+		printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
+		break;
+#endif
+	case IOV_SVAL(IOV_CHANGEMTU):
+		int_val &= 0xffff;
+		bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
+		break;
+
+	case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
+	{
+		uint i = 0;
+		uint8 *ptr = (uint8 *)arg;
+		uint8 count = 0;
+
+		ptr++;
+		for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
+			if (dhd_pub->reorder_bufs[i] != NULL) {
+				*ptr = dhd_pub->reorder_bufs[i]->flow_id;
+				ptr++;
+				count++;
+			}
+		}
+		ptr = (uint8 *)arg;
+		*ptr = count;
+		break;
+	}
+#ifdef DHDTCPACK_SUPPRESS
+	case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
+		int_val = (uint32)dhd_pub->tcpack_sup_mode;
+		bcopy(&int_val, arg, val_size);
+		break;
+	}
+	case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
+		bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
+		break;
+	}
+#endif /* DHDTCPACK_SUPPRESS */
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
+	return bcmerror;
+}
+
+/* Store the status of a connection attempt for later retrieval by an iovar */
+void
+dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
+{
+	/* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
+	 * because an encryption/rsn mismatch results in both events, and
+	 * the important information is in the WLC_E_PRUNE.
+	 */
+	if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
+	      dhd_conn_event == WLC_E_PRUNE)) {
+		dhd_conn_event = event;
+		dhd_conn_status = status;
+		dhd_conn_reason = reason;
+	}
+}
+
+bool
+dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+{
+	void *p;
+	int eprec = -1;		/* precedence to evict from */
+	bool discard_oldest;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(q, prec) && !pktq_full(q)) {
+		pktq_penq(q, prec, pkt);
+		return TRUE;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(q, prec))
+		eprec = prec;
+	else if (pktq_full(q)) {
+		p = pktq_peek_tail(q, &eprec);
+		ASSERT(p);
+		if (eprec > prec || eprec < 0)
+			return FALSE;
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(q, eprec));
+		discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+		if (eprec == prec && !discard_oldest)
+			return FALSE;		/* refuse newer (incoming) packet */
+		/* Evict packet according to discard policy */
+		p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
+		ASSERT(p);
+#ifdef DHDTCPACK_SUPPRESS
+		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+				__FUNCTION__, __LINE__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+		}
+#endif /* DHDTCPACK_SUPPRESS */
+		PKTFREE(dhdp->osh, p, TRUE);
+	}
+
+	/* Enqueue */
+	p = pktq_penq(q, prec, pkt);
+	ASSERT(p);
+
+	return TRUE;
+}
+
+/*
+ * Functions to drop proper pkts from queue:
+ *	If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
+ *	If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
+ *	If can't find pkts matching upper 2 cases, drop first pkt anyway
+ */
+bool
+dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
+{
+	struct pktq_prec *q = NULL;
+	void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
+	pkt_frag_t frag_info;
+
+	ASSERT(dhdp && pq);
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	if (p == NULL)
+		return FALSE;
+
+	while (p) {
+		frag_info = pkt_frag_info(dhdp->osh, p);
+		if (frag_info == DHD_PKT_FRAG_NONE) {
+			break;
+		} else if (frag_info == DHD_PKT_FRAG_FIRST) {
+			if (first) {
+				/* No last frag pkt, use prev as last */
+				last = prev;
+				break;
+			} else {
+				first = p;
+				prev_first = prev;
+			}
+		} else if (frag_info == DHD_PKT_FRAG_LAST) {
+			if (first) {
+				last = p;
+				break;
+			}
+		}
+
+		prev = p;
+		p = PKTLINK(p);
+	}
+
+	if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
+		/* Not found matching pkts, use oldest */
+		prev = NULL;
+		p = q->head;
+		frag_info = 0;
+	}
+
+	if (frag_info == DHD_PKT_FRAG_NONE) {
+		first = last = p;
+		prev_first = prev;
+	}
+
+	p = first;
+	while (p) {
+		next = PKTLINK(p);
+		q->len--;
+		pq->len--;
+
+		PKTSETLINK(p, NULL);
+
+		if (fn)
+			fn(dhdp, prec, p, TRUE);
+
+		if (p == last)
+			break;
+
+		p = next;
+	}
+
+	if (prev_first == NULL) {
+		if ((q->head = next) == NULL)
+			q->tail = NULL;
+	} else {
+		PKTSETLINK(prev_first, next);
+		if (!next)
+			q->tail = prev_first;
+	}
+
+	return TRUE;
+}
+
+static int
+dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+	void *params, int plen, void *arg, int len, bool set)
+{
+	int bcmerror = 0;
+	int val_size;
+	const bcm_iovar_t *vi = NULL;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+		name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+
+	bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+int
+dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen)
+{
+	int bcmerror = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!buf) {
+		return BCME_BADARG;
+	}
+
+	switch (ioc->cmd) {
+	case DHD_GET_MAGIC:
+		if (buflen < sizeof(int))
+			bcmerror = BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_MAGIC;
+		break;
+
+	case DHD_GET_VERSION:
+		if (buflen < sizeof(int))
+			bcmerror = BCME_BUFTOOSHORT;
+		else
+			*(int*)buf = DHD_IOCTL_VERSION;
+		break;
+
+	case DHD_GET_VAR:
+	case DHD_SET_VAR: {
+		char *arg;
+		uint arglen;
+
+		/* scan past the name to any arguments */
+		for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+			;
+
+		if (*arg) {
+			bcmerror = BCME_BUFTOOSHORT;
+			break;
+		}
+
+		/* account for the NUL terminator */
+		arg++, arglen--;
+
+		/* call with the appropriate arguments */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+			buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* not in generic table, try protocol module */
+		if (ioc->cmd == DHD_GET_VAR)
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+				arglen, buf, buflen, IOV_GET);
+		else
+			bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+				NULL, 0, arg, arglen, IOV_SET);
+		if (bcmerror != BCME_UNSUPPORTED)
+			break;
+
+		/* if still not found, try bus module */
+		if (ioc->cmd == DHD_GET_VAR) {
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+				arg, arglen, buf, buflen, IOV_GET);
+		} else {
+			bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+				NULL, 0, arg, arglen, IOV_SET);
+		}
+
+		break;
+	}
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+	}
+
+	return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+static void
+wl_show_host_event(wl_event_msg_t *event, void *event_data)
+{
+	uint i, status, reason;
+	bool group = FALSE, flush_txq = FALSE, link = FALSE;
+	const char *auth_str;
+	const char *event_name;
+	uchar *buf;
+	char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
+	uint event_type, flags, auth_type, datalen;
+
+	event_type = ntoh32(event->event_type);
+	flags = ntoh16(event->flags);
+	status = ntoh32(event->status);
+	reason = ntoh32(event->reason);
+	BCM_REFERENCE(reason);
+	auth_type = ntoh32(event->auth_type);
+	datalen = ntoh32(event->datalen);
+
+	/* debug dump of event messages */
+	snprintf(eabuf, sizeof(eabuf), "%02x:%02x:%02x:%02x:%02x:%02x",
+	        (uchar)event->addr.octet[0]&0xff,
+	        (uchar)event->addr.octet[1]&0xff,
+	        (uchar)event->addr.octet[2]&0xff,
+	        (uchar)event->addr.octet[3]&0xff,
+	        (uchar)event->addr.octet[4]&0xff,
+	        (uchar)event->addr.octet[5]&0xff);
+
+	event_name = "UNKNOWN";
+	for (i = 0; i < (uint)bcmevent_names_size; i++)
+		if (bcmevent_names[i].event == event_type)
+			event_name = bcmevent_names[i].name;
+
+	if (flags & WLC_EVENT_MSG_LINK)
+		link = TRUE;
+	if (flags & WLC_EVENT_MSG_GROUP)
+		group = TRUE;
+	if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+		flush_txq = TRUE;
+
+	switch (event_type) {
+	case WLC_E_START:
+	case WLC_E_DEAUTH:
+	case WLC_E_DISASSOC:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_ASSOC:
+	case WLC_E_REASSOC:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+			       event_name, eabuf, (int)reason));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
+			       event_name, eabuf, (int)status));
+		}
+		break;
+
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
+		break;
+
+	case WLC_E_AUTH:
+	case WLC_E_AUTH_IND:
+		if (auth_type == DOT11_OPEN_SYSTEM)
+			auth_str = "Open System";
+		else if (auth_type == DOT11_SHARED_KEY)
+			auth_str = "Shared Key";
+		else {
+			snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
+			auth_str = err_msg;
+		}
+		if (event_type == WLC_E_AUTH_IND) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_TIMEOUT) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+				event_name, eabuf, auth_str));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+			       event_name, eabuf, auth_str, (int)reason));
+		}
+		BCM_REFERENCE(auth_str);
+
+		break;
+
+	case WLC_E_JOIN:
+	case WLC_E_ROAM:
+	case WLC_E_SET_SSID:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
+		} else if (status == WLC_E_STATUS_NO_NETWORKS) {
+			DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
+				event_name, (int)status));
+		}
+		break;
+
+	case WLC_E_BEACON_RX:
+		if (status == WLC_E_STATUS_SUCCESS) {
+			DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+		} else if (status == WLC_E_STATUS_FAIL) {
+			DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+		} else {
+			DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
+		}
+		break;
+
+	case WLC_E_LINK:
+		DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
+		BCM_REFERENCE(link);
+		break;
+
+	case WLC_E_MIC_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+		       event_name, eabuf, group, flush_txq));
+		BCM_REFERENCE(group);
+		BCM_REFERENCE(flush_txq);
+		break;
+
+	case WLC_E_ICV_ERROR:
+	case WLC_E_UNICAST_DECODE_ERROR:
+	case WLC_E_MULTICAST_DECODE_ERROR:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n",
+		       event_name, eabuf));
+		break;
+
+	case WLC_E_TXFAIL:
+		DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+		break;
+
+	case WLC_E_SCAN_COMPLETE:
+	case WLC_E_ASSOC_REQ_IE:
+	case WLC_E_ASSOC_RESP_IE:
+	case WLC_E_PMKID_CACHE:
+		DHD_EVENT(("MACEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_NET_LOST:
+	case WLC_E_PFN_SCAN_COMPLETE:
+	case WLC_E_PFN_SCAN_NONE:
+	case WLC_E_PFN_SCAN_ALLGONE:
+		DHD_EVENT(("PNOEVENT: %s\n", event_name));
+		break;
+
+	case WLC_E_PSK_SUP:
+	case WLC_E_PRUNE:
+		DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+		           event_name, (int)status, (int)reason));
+		break;
+
+#ifdef WIFI_ACT_FRAME
+	case WLC_E_ACTION_FRAME:
+		DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
+		break;
+#endif /* WIFI_ACT_FRAME */
+
+	case WLC_E_TRACE: {
+		static uint32 seqnum_prev = 0;
+		static uint32 logtrace_seqnum_prev = 0;
+		msgtrace_hdr_t hdr;
+		uint32 nblost;
+		char *s, *p;
+
+		buf = (uchar *) event_data;
+		memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+		if (hdr.version != MSGTRACE_VERSION) {
+			printf("\nMACEVENT: %s [unsupported version --> "
+			       "dhd version:%d dongle version:%d]\n",
+			       event_name, MSGTRACE_VERSION, hdr.version);
+			/* Reset datalen to avoid display below */
+			datalen = 0;
+			break;
+		}
+
+		if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) {
+			/* There are 2 bytes available at the end of data */
+			buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+			if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+				printf("\nWLC_E_TRACE: [Discarded traces in dongle -->"
+				       "discarded_bytes %d discarded_printf %d]\n",
+				       ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf));
+			}
+
+			nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+			if (nblost > 0) {
+				printf("\nWLC_E_TRACE: [Event lost (msg) --> seqnum %d nblost %d\n",
+				       ntoh32(hdr.seqnum), nblost);
+			}
+			seqnum_prev = ntoh32(hdr.seqnum);
+
+			/* Display the trace buffer. Advance from \n to \n to avoid display big
+			 * printf (issue with Linux printk )
+			 */
+			p = (char *)&buf[MSGTRACE_HDRLEN];
+		while (*p != '\0' && (s = strstr(p, "\n")) != NULL) {
+				*s = '\0';
+				printf("WLC_E_TRACE: %s\n", p);
+				p = s+1;
+			}
+			if (*p) printf("WLC_E_TRACE: %s", p);
+
+			/* Reset datalen to avoid display below */
+			datalen = 0;
+
+		} else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) {
+			/* Let the standard event printing work for now */
+			uint32 timestamp, w;
+			if (ntoh32(hdr.seqnum) == logtrace_seqnum_prev) {
+				printf("\nWLC_E_TRACE: [Event duplicate (log) %d",
+				       logtrace_seqnum_prev);
+			} else {
+				nblost = ntoh32(hdr.seqnum) - logtrace_seqnum_prev - 1;
+				if (nblost > 0) {
+					printf("\nWLC_E_TRACE: [Event lost (log)"
+					       " --> seqnum %d nblost %d\n",
+					       ntoh32(hdr.seqnum), nblost);
+				}
+				logtrace_seqnum_prev = ntoh32(hdr.seqnum);
+
+				p = (char *)&buf[MSGTRACE_HDRLEN];
+				datalen -= MSGTRACE_HDRLEN;
+				w = ntoh32((uint32) *p);
+				p += 4;
+				datalen -= 4;
+				timestamp = ntoh32((uint32) *p);
+				printf("Logtrace %x timestamp %x %x",
+				       logtrace_seqnum_prev, timestamp, w);
+
+				while (datalen > 4) {
+					p += 4;
+					datalen -= 4;
+					/* Print each word.  DO NOT ntoh it.  */
+					printf(" %8.8x", *((uint32 *) p));
+				}
+				printf("\n");
+			}
+			datalen = 0;
+		}
+
+		break;
+	}
+
+
+	case WLC_E_RSSI:
+		DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
+		break;
+
+	case WLC_E_SERVICE_FOUND:
+	case WLC_E_P2PO_ADD_DEVICE:
+	case WLC_E_P2PO_DEL_DEVICE:
+		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+		break;
+
+	default:
+		DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
+		       event_name, event_type, eabuf, (int)status, (int)reason,
+		       (int)auth_type));
+		break;
+	}
+
+	/* show any appended data */
+	if (DHD_BYTES_ON() && DHD_EVENT_ON() && datalen) {
+		buf = (uchar *) event_data;
+		DHD_EVENT((" data (%d) : ", datalen));
+		for (i = 0; i < datalen; i++)
+			DHD_EVENT((" 0x%02x ", *buf++));
+		DHD_EVENT(("\n"));
+	}
+}
+#endif /* SHOW_EVENTS */
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
+              wl_event_msg_t *event, void **data_ptr)
+{
+	/* check whether packet is a BRCM event pkt */
+	bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
+	uint8 *event_data;
+	uint32 type, status, datalen;
+	uint16 flags;
+	int evlen;
+
+	if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+		DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	/* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
+	if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
+		DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
+		return (BCME_ERROR);
+	}
+
+	*data_ptr = &pvt_data[1];
+	event_data = *data_ptr;
+
+	/* memcpy since BRCM event pkt may be unaligned. */
+	memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+
+	type = ntoh32_ua((void *)&event->event_type);
+	flags = ntoh16_ua((void *)&event->flags);
+	status = ntoh32_ua((void *)&event->status);
+	datalen = ntoh32_ua((void *)&event->datalen);
+	evlen = datalen + sizeof(bcm_event_t);
+
+	switch (type) {
+#ifdef PROP_TXSTATUS
+	case WLC_E_FIFO_CREDIT_MAP:
+		dhd_wlfc_enable(dhd_pub);
+		dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
+		WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
+			"(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
+			event_data[2],
+			event_data[3], event_data[4], event_data[5]));
+		break;
+
+	case WLC_E_BCMC_CREDIT_SUPPORT:
+		dhd_wlfc_BCMCCredit_support_event(dhd_pub);
+		break;
+#endif
+
+	case WLC_E_IF: {
+		struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
+
+		/* Ignore the event if NOIF is set */
+		if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
+			DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
+			return (BCME_UNSUPPORTED);
+		}
+
+#ifdef PROP_TXSTATUS
+		{
+			uint8* ea = pvt_data->eth.ether_dhost;
+			WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+			              "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+			              ifevent->ifidx,
+			              ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
+			              ((ifevent->role == 0) ? "STA":"AP "),
+			              ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
+			(void)ea;
+
+			if (ifevent->opcode == WLC_E_IF_CHANGE)
+				dhd_wlfc_interface_event(dhd_pub,
+					eWLFC_MAC_ENTRY_ACTION_UPDATE,
+					ifevent->ifidx, ifevent->role, ea);
+			else
+				dhd_wlfc_interface_event(dhd_pub,
+					((ifevent->opcode == WLC_E_IF_ADD) ?
+					eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+					ifevent->ifidx, ifevent->role, ea);
+
+			/* dhd already has created an interface by default, for 0 */
+			if (ifevent->ifidx == 0)
+				break;
+		}
+#endif /* PROP_TXSTATUS */
+
+		if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+			if (ifevent->opcode == WLC_E_IF_ADD) {
+				if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
+					event->addr.octet)) {
+
+					DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d  %s\n",
+						__FUNCTION__, ifevent->ifidx, event->ifname));
+					return (BCME_ERROR);
+				}
+			} else if (ifevent->opcode == WLC_E_IF_DEL) {
+				dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
+					event->addr.octet);
+			} else if (ifevent->opcode == WLC_E_IF_CHANGE) {
+#ifdef WL_CFG80211
+				wl_cfg80211_notify_ifchange(ifevent->ifidx,
+					event->ifname, event->addr.octet, ifevent->bssidx);
+#endif /* WL_CFG80211 */
+			}
+		} else {
+#ifndef PROP_TXSTATUS
+			DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
+				__FUNCTION__, ifevent->ifidx, event->ifname));
+#endif /* !PROP_TXSTATUS */
+		}
+
+		/* send up the if event: btamp user needs it */
+		*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+		/* push up to external supp/auth */
+		dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+		break;
+	}
+#ifdef WLMEDIA_HTSF
+	case WLC_E_HTSFSYNC:
+		htsf_update(dhd_pub->info, event_data);
+		break;
+#endif /* WLMEDIA_HTSF */
+	case WLC_E_NDIS_LINK: {
+		uint32 temp = hton32(WLC_E_LINK);
+
+		memcpy((void *)(&pvt_data->event.event_type), &temp,
+		       sizeof(pvt_data->event.event_type));
+	}
+	case WLC_E_PFN_NET_FOUND:
+	case WLC_E_PFN_NET_LOST:
+		break;
+#if defined(PNO_SUPPORT)
+	case WLC_E_PFN_BSSID_NET_FOUND:
+	case WLC_E_PFN_BSSID_NET_LOST:
+	case WLC_E_PFN_BEST_BATCHING:
+		dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
+		break;
+#endif 
+		/* These are what external supplicant/authenticator wants */
+		/* fall through */
+	case WLC_E_LINK:
+	case WLC_E_DEAUTH:
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC:
+	case WLC_E_DISASSOC_IND:
+		DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
+		           __FUNCTION__, type, flags, status));
+		/* fall through */
+	default:
+		*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+		/* push up to external supp/auth */
+		dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
+		DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+		           __FUNCTION__, type, flags, status));
+		BCM_REFERENCE(flags);
+		BCM_REFERENCE(status);
+
+		break;
+	}
+
+#ifdef SHOW_EVENTS
+	wl_show_host_event(event, (void *)event_data);
+#endif /* SHOW_EVENTS */
+
+	return (BCME_OK);
+}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+	/* Event struct members passed from dongle to host are stored in network
+	 * byte order. Convert all members to host-order.
+	 */
+	evt->event_type = ntoh32(evt->event_type);
+	evt->flags = ntoh16(evt->flags);
+	evt->status = ntoh32(evt->status);
+	evt->reason = ntoh32(evt->reason);
+	evt->auth_type = ntoh32(evt->auth_type);
+	evt->datalen = ntoh32(evt->datalen);
+	evt->version = ntoh16(evt->version);
+}
+
+void
+dhd_print_buf(void *pbuf, int len, int bytes_per_line)
+{
+#ifdef DHD_DEBUG
+	int i, j = 0;
+	unsigned char *buf = pbuf;
+
+	if (bytes_per_line == 0) {
+		bytes_per_line = len;
+	}
+
+	for (i = 0; i < len; i++) {
+		printf("%2.2x", *buf++);
+		j++;
+		if (j == bytes_per_line) {
+			printf("\n");
+			j = 0;
+		} else {
+			printf(":");
+		}
+	}
+	printf("\n");
+#endif /* DHD_DEBUG */
+}
+#ifndef strtoul
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#endif
+
+#ifdef PKT_FILTER_SUPPORT
+/* Convert user's input in hex pattern to byte-size mask */
+static int
+wl_pattern_atoh(char *src, char *dst)
+{
+	int i;
+	if (strncmp(src, "0x", 2) != 0 &&
+	    strncmp(src, "0X", 2) != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+		return -1;
+	}
+	src = src + 2; /* Skip past 0x */
+	if (strlen(src) % 2 != 0) {
+		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+		return -1;
+	}
+	for (i = 0; *src != '\0'; i++) {
+		char num[3];
+		bcm_strncpy_s(num, sizeof(num), src, 2);
+		num[2] = '\0';
+		dst[i] = (uint8)strtoul(num, NULL, 16);
+		src += 2;
+	}
+	return i;
+}
+
+void
+dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
+{
+	char				*argv[8];
+	int					i = 0;
+	const char 			*str;
+	int					buf_len;
+	int					str_len;
+	char				*arg_save = 0, *arg_org = 0;
+	int					rc;
+	char				buf[128];
+	wl_pkt_filter_enable_t	enable_parm;
+	wl_pkt_filter_enable_t	* pkt_filterp;
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+	arg_org = arg_save;
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_enable";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, sizeof(buf), str, str_len);
+	buf[str_len] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+
+	/* Parse enable/disable value. */
+	enable_parm.enable = htod32(enable);
+
+	buf_len += sizeof(enable_parm);
+	memcpy((char *)pkt_filterp,
+	       &enable_parm,
+	       sizeof(enable_parm));
+
+	/* Enable/disable the specified filter. */
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+	/* Contorl the master mode */
+	bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+}
+
+void
+dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
+{
+	const char 			*str;
+	wl_pkt_filter_t		pkt_filter;
+	wl_pkt_filter_t		*pkt_filterp;
+	int					buf_len;
+	int					str_len;
+	int 				rc;
+	uint32				mask_size;
+	uint32				pattern_size;
+	char				*argv[8], * buf = 0;
+	int					i = 0;
+	char				*arg_save = 0, *arg_org = 0;
+#define BUF_SIZE		2048
+
+	if (!arg)
+		return;
+
+	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	arg_org = arg_save;
+
+	if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
+		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	memcpy(arg_save, arg, strlen(arg) + 1);
+
+	if (strlen(arg) > BUF_SIZE) {
+		DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
+		goto fail;
+	}
+
+	argv[i] = bcmstrtok(&arg_save, " ", 0);
+	while (argv[i++])
+		argv[i] = bcmstrtok(&arg_save, " ", 0);
+
+	i = 0;
+	if (argv[i] == NULL) {
+		DHD_ERROR(("No args provided\n"));
+		goto fail;
+	}
+
+	str = "pkt_filter_add";
+	str_len = strlen(str);
+	bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
+	buf[ str_len ] = '\0';
+	buf_len = str_len + 1;
+
+	pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+
+	/* Parse packet filter id. */
+	pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Polarity not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter polarity. */
+	pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Filter type not provided\n"));
+		goto fail;
+	}
+
+	/* Parse filter type. */
+	pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Offset not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter offset. */
+	pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Bitmask not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter mask. */
+	mask_size =
+		htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
+
+	if (argv[++i] == NULL) {
+		DHD_ERROR(("Pattern not provided\n"));
+		goto fail;
+	}
+
+	/* Parse pattern filter pattern. */
+	pattern_size =
+		htod32(wl_pattern_atoh(argv[i],
+	         (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
+
+	if (mask_size != pattern_size) {
+		DHD_ERROR(("Mask and pattern not the same size\n"));
+		goto fail;
+	}
+
+	pkt_filter.u.pattern.size_bytes = mask_size;
+	buf_len += WL_PKT_FILTER_FIXED_LEN;
+	buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+
+	/* Keep-alive attributes are set in local	variable (keep_alive_pkt), and
+	** then memcpy'ed into buffer (keep_alive_pktp) since there is no
+	** guarantee that the buffer is properly aligned.
+	*/
+	memcpy((char *)pkt_filterp,
+	       &pkt_filter,
+	       WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
+	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+	rc = rc >= 0 ? 0 : rc;
+
+	if (rc)
+		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+		__FUNCTION__, arg, rc));
+	else
+		DHD_TRACE(("%s: successfully added pktfilter %s\n",
+		__FUNCTION__, arg));
+
+fail:
+	if (arg_org)
+		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
+
+	if (buf)
+		MFREE(dhd->osh, buf, BUF_SIZE);
+}
+
+void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
+{
+	char iovbuf[32];
+	int ret;
+
+	bcm_mkiovar("pkt_filter_delete", (char *)&id, 4, iovbuf, sizeof(iovbuf));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	if (ret < 0) {
+		DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
+			__FUNCTION__, id, ret));
+	}
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+/* ========================== */
+/* ==== ARP OFFLOAD SUPPORT = */
+/* ========================== */
+#ifdef ARP_OFFLOAD_SUPPORT
+void
+dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iovar_len;
+	int retcode;
+
+	iovar_len = bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+	if (!iovar_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+			__FUNCTION__, arp_mode, retcode));
+	else
+		DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+			__FUNCTION__, arp_mode));
+}
+
+void
+dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iovar_len;
+	int retcode;
+
+	iovar_len = bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+	if (!iovar_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
+	retcode = retcode >= 0 ? 0 : retcode;
+	if (retcode)
+		DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+			__FUNCTION__, arp_enable, retcode));
+	else
+		DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+			__FUNCTION__, arp_enable));
+	if (arp_enable) {
+		uint32 version;
+		bcm_mkiovar("arp_version", 0, 0, iovbuf, sizeof(iovbuf));
+		retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+		if (retcode) {
+			DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
+				__FUNCTION__, retcode));
+			dhd->arp_version = 1;
+		}
+		else {
+			memcpy(&version, iovbuf, sizeof(version));
+			DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
+			dhd->arp_version = version;
+		}
+	}
+}
+
+void
+dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
+{
+	int ret = 0;
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
+{
+	int ret = 0;
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0)
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+}
+
+void
+dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int retcode;
+
+
+	if (dhd == NULL) return;
+	if (dhd->arp_version == 1)
+		idx = 0;
+	iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr,
+		sizeof(ipaddr), iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: sARP H ipaddr entry added \n",
+		__FUNCTION__));
+}
+
+int
+dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
+{
+	int retcode, i;
+	int iov_len;
+	uint32 *ptr32 = buf;
+	bool clr_bottom = FALSE;
+
+	if (!buf)
+		return -1;
+	if (dhd == NULL) return -1;
+	if (dhd->arp_version == 1)
+		idx = 0;
+
+	iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
+	BCM_REFERENCE(iov_len);
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, idx);
+
+	if (retcode) {
+		DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
+		__FUNCTION__, retcode));
+
+		return -1;
+	}
+
+	/* clean up the buf, ascii reminder */
+	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+		if (!clr_bottom) {
+			if (*ptr32 == 0)
+				clr_bottom = TRUE;
+		} else {
+			*ptr32 = 0;
+		}
+		ptr32++;
+	}
+
+	return 0;
+}
+#endif /* ARP_OFFLOAD_SUPPORT  */
+
+/*
+ * Neighbor Discovery Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface comes up/goes down
+ */
+int
+dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
+{
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int iov_len;
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("ndoe", (char *)&ndo_enable, 4, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+	if (retcode)
+		DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
+			__FUNCTION__, ndo_enable, retcode));
+	else
+		DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
+			__FUNCTION__, ndo_enable));
+
+	return retcode;
+}
+
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface comes up
+ */
+int
+dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
+		IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: ndo ipaddr entry added \n",
+		__FUNCTION__));
+
+	return retcode;
+}
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called  by ipv6 event handler when interface goes down
+ */
+int
+dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
+{
+	int iov_len = 0;
+	char iovbuf[DHD_IOVAR_BUF_SIZE];
+	int retcode;
+
+	if (dhd == NULL)
+		return -1;
+
+	iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
+		0, iovbuf, sizeof(iovbuf));
+	if (!iov_len) {
+		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+			__FUNCTION__, sizeof(iovbuf)));
+		return -1;
+	}
+	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
+
+	if (retcode)
+		DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
+		__FUNCTION__, retcode));
+	else
+		DHD_TRACE(("%s: ndo ipaddr entry removed \n",
+		__FUNCTION__));
+
+	return retcode;
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	switch (ntoh32(event->event_type)) {
+	default:
+		break;
+	}
+
+	/* Call per-port handler. */
+	dhd_sendup_event(dhdp, event, data);
+}
+
+
+/*
+ * returns = TRUE if associated, FALSE if not associated
+ */
+bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval)
+{
+	char bssid[6], zbuf[6];
+	int ret = -1;
+
+	bzero(bssid, 6);
+	bzero(zbuf, 6);
+
+	ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0);
+	DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
+
+	if (ret == BCME_NOTASSOCIATED) {
+		DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
+	}
+
+	if (retval)
+		*retval = ret;
+
+	if (ret < 0)
+		return FALSE;
+
+	if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) {
+		/*  STA is assocoated BSSID is non zero */
+
+		if (bss_buf) {
+			/* return bss if caller provided buf */
+			memcpy(bss_buf, bssid, ETHER_ADDR_LEN);
+		}
+		return TRUE;
+	} else {
+		DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
+		return FALSE;
+	}
+}
+
+
+/* Function to estimate possible DTIM_SKIP value */
+int
+dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
+{
+	int bcn_li_dtim = 1; /* deafult no dtim skip setting */
+	int ret = -1;
+	int dtim_period = 0;
+	int ap_beacon = 0;
+	int allowed_skip_dtim_cnt = 0;
+	/* Check if associated */
+	if (dhd_is_associated(dhd, NULL, NULL) == FALSE) {
+		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* read associated AP beacon interval */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
+		&ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* if associated APs Beacon more than 1024usec do no dtim skip */
+	if (ap_beacon > MAX_DTIM_SKIP_BEACON_INTERVAL) {
+		DHD_ERROR(("%s NO dtim skip for AP with beacon %d ms\n", __FUNCTION__, ap_beacon));
+		goto exit;
+	}
+
+	/* read associated ap's dtim setup */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
+		&dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	/* if not assocated just eixt */
+	if (dtim_period == 0) {
+		goto exit;
+	}
+
+	/* attemp to use platform defined dtim skip interval */
+	bcn_li_dtim = dhd->suspend_bcn_li_dtim;
+
+	/* check if sta listen interval fits into AP dtim */
+	if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
+		/* AP DTIM to big for our Listen Interval : no dtim skiping */
+		bcn_li_dtim = NO_DTIM_SKIP;
+		DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
+			__FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
+		goto exit;
+	}
+
+	if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
+		 allowed_skip_dtim_cnt = MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
+		 bcn_li_dtim = (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
+	}
+
+	if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
+		/* Round up dtim_skip to fit into STAs Listen Interval */
+		bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
+		DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
+	}
+
+	DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
+		__FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
+
+exit:
+	return bcn_li_dtim;
+}
+
+/* Check if the mode supports STA MODE */
+bool dhd_support_sta_mode(dhd_pub_t *dhd)
+{
+
+#ifdef  WL_CFG80211
+	if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
+		return FALSE;
+	else
+#endif /* WL_CFG80211 */
+		return TRUE;
+}
+
+#if defined(KEEP_ALIVE)
+int dhd_keep_alive_onoff(dhd_pub_t *dhd)
+{
+	char				buf[256];
+	const char			*str;
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt = {0};
+	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
+	int					buf_len;
+	int					str_len;
+	int res					= -1;
+
+	if (!dhd_support_sta_mode(dhd))
+		return res;
+
+	DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+	str = "mkeep_alive";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[ str_len ] = '\0';
+	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+	mkeep_alive_pkt.period_msec = CUSTOM_KEEP_ALIVE_SETTING;
+	buf_len = str_len + 1;
+	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+	/* Setup keep alive zero for null packet generation */
+	mkeep_alive_pkt.keep_alive_id = 0;
+	mkeep_alive_pkt.len_bytes = 0;
+	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+	bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
+	/* Keep-alive attributes are set in local	variable (mkeep_alive_pkt), and
+	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+	 * guarantee that the buffer is properly aligned.
+	 */
+	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+	res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
+
+	return res;
+}
+#endif /* defined(KEEP_ALIVE) */
+/* Android ComboSCAN support */
+
+/*
+ *  data parsing from ComboScan tlv list
+*/
+int
+wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
+                     int input_size, int *bytes_left)
+{
+	char* str;
+	uint16 short_temp;
+	uint32 int_temp;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+
+	/* Clean all dest bytes */
+	memset(dst, 0, dst_size);
+	while (*bytes_left > 0) {
+
+		if (str[0] != token) {
+			DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
+				__FUNCTION__, token, str[0], *bytes_left));
+			return -1;
+		}
+
+		*bytes_left -= 1;
+		str += 1;
+
+		if (input_size == 1) {
+			memcpy(dst, str, input_size);
+		}
+		else if (input_size == 2) {
+			memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
+				input_size);
+		}
+		else if (input_size == 4) {
+			memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
+				input_size);
+		}
+
+		*bytes_left -= input_size;
+		str += input_size;
+		*list_str = str;
+		return 1;
+	}
+	return 1;
+}
+
+/*
+ *  channel list parsing from cscan tlv list
+*/
+int
+wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
+                             int channel_num, int *bytes_left)
+{
+	char* str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
+			*list_str = str;
+			DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+		/* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* All channels */
+			channel_list[idx] = 0x0;
+		}
+		else {
+			channel_list[idx] = (uint16)str[0];
+			DHD_TRACE(("%s channel=%d \n", __FUNCTION__,  channel_list[idx]));
+		}
+		*bytes_left -= 1;
+		str += 1;
+
+		if (idx++ > 255) {
+			DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/*
+ *  SSIDs list parsing from cscan tlv list
+ */
+int
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
+{
+	char* str;
+	int idx = 0;
+
+	if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
+		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
+		return -1;
+	}
+	str = *list_str;
+	while (*bytes_left > 0) {
+
+		if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
+			*list_str = str;
+			DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
+			return idx;
+		}
+
+		/* Get proper CSCAN_TLV_TYPE_SSID_IE */
+		*bytes_left -= 1;
+		str += 1;
+
+		if (str[0] == 0) {
+			/* Broadcast SSID */
+			ssid[idx].SSID_len = 0;
+			memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
+			*bytes_left -= 1;
+			str += 1;
+
+			DHD_TRACE(("BROADCAST SCAN  left=%d\n", *bytes_left));
+		}
+		else if (str[0] <= DOT11_MAX_SSID_LEN) {
+			/* Get proper SSID size */
+			ssid[idx].SSID_len = str[0];
+			*bytes_left -= 1;
+			str += 1;
+
+			/* Get SSID */
+			if (ssid[idx].SSID_len > *bytes_left) {
+				DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
+				__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
+				return -1;
+			}
+
+			memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
+
+			*bytes_left -= ssid[idx].SSID_len;
+			str += ssid[idx].SSID_len;
+
+			DHD_TRACE(("%s :size=%d left=%d\n",
+				(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
+		}
+		else {
+			DHD_ERROR(("### SSID size more that %d\n", str[0]));
+			return -1;
+		}
+
+		if (idx++ >  max) {
+			DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+			return -1;
+		}
+	}
+
+	*list_str = str;
+	return idx;
+}
+
+/* Parse a comma-separated list from list_str into ssid array, starting
+ * at index idx.  Max specifies size of the ssid array.  Parses ssids
+ * and returns updated idx; if idx >= max not all fit, the excess have
+ * not been copied.  Returns -1 on empty string, or on ssid too long.
+ */
+int
+wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
+{
+	char* str, *ptr;
+
+	if ((list_str == NULL) || (*list_str == NULL))
+		return -1;
+
+	for (str = *list_str; str != NULL; str = ptr) {
+
+		/* check for next TAG */
+		if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
+			*list_str	 = str + strlen(GET_CHANNEL);
+			return idx;
+		}
+
+		if ((ptr = strchr(str, ',')) != NULL) {
+			*ptr++ = '\0';
+		}
+
+		if (strlen(str) > DOT11_MAX_SSID_LEN) {
+			DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
+			return -1;
+		}
+
+		if (strlen(str) == 0)
+			ssid[idx].SSID_len = 0;
+
+		if (idx < max) {
+			bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
+			strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1);
+			ssid[idx].SSID_len = strlen(str);
+		}
+		idx++;
+	}
+	return idx;
+}
+
+/*
+ * Parse channel list from iwpriv CSCAN
+ */
+int
+wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
+{
+	int num;
+	int val;
+	char* str;
+	char* endptr = NULL;
+
+	if ((list_str == NULL)||(*list_str == NULL))
+		return -1;
+
+	str = *list_str;
+	num = 0;
+	while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
+		val = (int)strtoul(str, &endptr, 0);
+		if (endptr == str) {
+			printf("could not parse channel number starting at"
+				" substring \"%s\" in list:\n%s\n",
+				str, *list_str);
+			return -1;
+		}
+		str = endptr + strspn(endptr, " ,");
+
+		if (num == channel_num) {
+			DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
+				channel_num, *list_str));
+			return -1;
+		}
+
+		channel_list[num++] = (uint16)val;
+	}
+	*list_str = str;
+	return num;
+}
diff --git a/drivers/staging/edison-bcm43340/dhd_custom_gpio.c b/drivers/staging/edison-bcm43340/dhd_custom_gpio.c
new file mode 100644
index 0000000..4e39ec2
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_custom_gpio.c
@@ -0,0 +1,291 @@
+/*
+* Customer code to add GPIO control during WLAN start/stop
+* Copyright (C) 1999-2014, Broadcom Corporation
+* 
+*      Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+* 
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+* 
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+*
+* $Id: dhd_custom_gpio.c 466835 2014-04-01 20:44:55Z $
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+
+#include <wlioctl.h>
+#include <wl_iw.h>
+
+#include <asm/intel-mid.h>
+#include <linux/gpio.h>
+
+#if defined(SUPPORT_MULTIPLE_REVISION)
+#include "bcmdevs.h"
+#endif /* SUPPORT_MULTIPLE_REVISION */
+
+#define WL_ERROR(x) printf x
+#define WL_TRACE(x)
+
+#ifdef CUSTOMER_HW
+extern  void bcm_wlan_power_off(int);
+extern  void bcm_wlan_power_on(int);
+#endif /* CUSTOMER_HW */
+
+#if defined(OOB_INTR_ONLY)
+
+#if defined(BCMLXSDMMC)
+extern int sdioh_mmc_irq(int irq);
+#endif /* (BCMLXSDMMC)  */
+
+#if defined(CUSTOMER_HW3)
+#include <mach/gpio.h>
+#endif
+
+/* Customer specific Host GPIO defintion  */
+static int dhd_oob_gpio_num = -1;
+
+module_param(dhd_oob_gpio_num, int, 0644);
+MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
+
+/* This function will return:
+ *  1) return :  Host gpio interrupt number per customer platform
+ *  2) irq_flags_ptr : Type of Host interrupt as Level or Edge
+ *
+ *  NOTE :
+ *  Customer should check his platform definitions
+ *  and his Host Interrupt spec
+ *  to figure out the proper setting for his platform.
+ *  Broadcom provides just reference settings as example.
+ *
+ */
+int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr)
+{
+	int  host_oob_irq = 0;
+
+#if defined(CUSTOMER_HW2) || defined(CUSTOMER_HW4)
+	host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr);
+	if (!wifi_platform_irq_is_fastirq(adapter)) {
+		if (gpio_request(host_oob_irq, "bcm43xx_irq") < 0) {
+			WL_ERROR(("%s: Error on gpio_request bcm43xx_irq: %d\n", __func__, host_oob_irq));
+			return -1;
+		}
+		if (gpio_direction_input(host_oob_irq) < 0) {
+			WL_ERROR(("%s: Error on gpio_direction_input\n", __func__));
+			return -1;
+		}
+		if (gpio_set_debounce(host_oob_irq, 0) < 0) {
+			WL_ERROR(("%s: Error on gpio_set_debounce\n", __func__));
+			return -1;
+		}
+		dhd_oob_gpio_num = host_oob_irq;
+		host_oob_irq = gpio_to_irq(host_oob_irq);
+	}
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+	if (dhd_oob_gpio_num < 0) {
+		dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+	}
+#endif /* CUSTOMER_OOB_GPIO_NUM */
+
+	if (dhd_oob_gpio_num < 0) {
+		WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+		__FUNCTION__));
+		return (dhd_oob_gpio_num);
+	}
+
+	WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+	         __FUNCTION__, dhd_oob_gpio_num));
+
+#if defined CUSTOMER_HW3
+	gpio_request(dhd_oob_gpio_num, "oob irq");
+	host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
+	gpio_direction_input(dhd_oob_gpio_num);
+#endif /* CUSTOMER_HW3 */
+#endif /* CUSTOMER_HW2 || CUSTOMER_HW4 */
+
+	return (host_oob_irq);
+}
+
+void dhd_customer_oob_irq_unmap(void)
+{
+	if (dhd_oob_gpio_num > 0)
+		gpio_free(dhd_oob_gpio_num);
+}
+#endif  /* OOB_INTR_ONLY */
+
+/* Customer function to control hw specific wlan gpios */
+int
+dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff)
+{
+	int err = 0;
+
+	return err;
+}
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+#define MAC_ADDRESS_LEN 12
+
+int wifi_get_mac_addr_intel(unsigned char *buf){
+	int ret = 0;
+	int i;
+	struct file *fp = NULL;
+	unsigned char c_mac[MAC_ADDRESS_LEN];
+	char fname[]="/config/wifi/mac.txt";
+
+	WL_TRACE(("%s Enter\n", __FUNCTION__));
+
+	fp = dhd_os_open_image(fname);
+	if (fp== NULL){
+		WL_ERROR(("%s: unable to open %s\n",__FUNCTION__, fname));
+		return 1;
+	}
+
+	if ( dhd_os_get_image_block(c_mac, MAC_ADDRESS_LEN, fp) != MAC_ADDRESS_LEN ){
+		WL_ERROR(("%s: Error on reading mac address from %s \n",__FUNCTION__, fname));
+		dhd_os_close_image(fp);
+		return 1;
+	}
+	dhd_os_close_image(fp);
+
+	for (i =0; i< MAC_ADDRESS_LEN ; i+=2){
+		c_mac[i] = bcm_isdigit(c_mac[i]) ? c_mac[i]-'0' : bcm_toupper(c_mac[i])-'A'+10;
+		c_mac[i+1] = bcm_isdigit(c_mac[i+1]) ? c_mac[i+1]-'0' : bcm_toupper(c_mac[i+1])-'A'+10;
+
+		buf[i/2] = c_mac[i]*16 + c_mac[i+1];
+	}
+
+	WL_TRACE(("%s: read from file mac address: %x:%x:%x:%x:%x:%x\n",
+			 __FUNCTION__, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]));
+
+	return ret;
+}
+
+
+/* Function to get custom MAC address */
+int
+dhd_custom_get_mac_address(unsigned char *buf)
+{
+	int ret = 0;
+
+	WL_TRACE(("%s Enter\n", __FUNCTION__));
+	if (!buf)
+		return -EINVAL;
+
+	/* Customer access to MAC address stored outside of DHD driver */
+#if (defined(CUSTOMER_HW2) || defined(CUSTOMER_HW10)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+	ret = wifi_get_mac_addr_intel(buf);
+#endif
+
+#ifdef EXAMPLE_GET_MAC
+	/* EXAMPLE code */
+	{
+		struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
+		bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
+	}
+#endif /* EXAMPLE_GET_MAC */
+
+	return ret;
+}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+/* Customized Locale table : OPTIONAL feature */
+const struct cntry_locales_custom translate_custom_table[] = {
+/* Table should be filled out based on custom platform regulatory requirement */
+/* Table must match BRCM firmware tables */
+/*
+ * Only modify the Country Code which have a problem regarding Intel requirement.
+ * Currently, on BCM4335/4339 when using "FR" it is required to use "FR/5" to enable 802.11ac.
+ *
+ * In addition, since Finland team is also testing 802.11ac,
+ * it is required to use "FR/5" instead of "FI".
+ * No issue to use FR/5 instead of FI since they are EU...
+ *
+ * Fix Korea country code to "KR/4" for 43340, 43241, 4335/4339, 4354 but still use "KR/0" for 4334.
+ *
+ * Fix US country code to "US/0" for 4334, 43340, 43241, 4335/4339, 4354. It enable 5Ghz channels.
+ */
+#ifdef BOARD_INTEL
+#if defined(SUPPORT_MULTIPLE_REVISION)
+	{BCM4334_CHIP_ID,  "US", "US", 0}, /* Translate US into US/0 in order to enable 5GHz band */
+	{BCM4324_CHIP_ID,  "US", "US", 0}, /* Translate US into US/0 in order to enable 5GHz band */
+	{BCM43341_CHIP_ID, "US", "US", 0}, /* Translate US into US/0 in order to enable 5GHz band */
+	{BCM4335_CHIP_ID,  "US", "US", 0}, /* Translate US into US/0 in order to enable 5GHz band */
+	{BCM4339_CHIP_ID,  "US", "US", 0}, /* Translate US into US/0 in order to enable 5GHz band */
+	{BCM4354_CHIP_ID,  "US", "US", 0}, /* Translate US into US/0 in order to enable 5GHz band */
+	{BCM4335_CHIP_ID,  "FR", "FR", 5}, /* Translate FR into FR/5 in order to enable 802.11ac */
+	{BCM4339_CHIP_ID,  "FR", "FR", 5}, /* Translate FR into FR/5 in order to enable 802.11ac */
+	{BCM4354_CHIP_ID,  "FR", "FR", 5}, /* Translate FR into FR/5 in order to enable 802.11ac */
+	{BCM4335_CHIP_ID,  "FI", "FR", 5}, /* Translate FI into FR/5 in order to enable 802.11ac */
+	{BCM4339_CHIP_ID,  "FI", "FR", 5}, /* Translate FI into FR/5 in order to enable 802.11ac */
+	{BCM4354_CHIP_ID,  "FI", "FR", 5}, /* Translate FI into FR/5 in order to enable 802.11ac */
+	{BCM4324_CHIP_ID,  "KR", "KR", 4}, /* Enable correct 5Ghz channel */
+	{BCM43341_CHIP_ID, "KR", "KR", 4}, /* Enable correct 5Ghz channel */
+	{BCM4335_CHIP_ID,  "KR", "KR", 4}, /* Enable correct 5Ghz channel */
+	{BCM4339_CHIP_ID,  "KR", "KR", 4}, /* Enable correct 5Ghz channel */
+	{BCM4354_CHIP_ID,  "KR", "KR", 4}, /* Enable correct 5Ghz channel */
+	{BCM4339_CHIP_ID,  "CN", "CN", 24}, /* Enable correct 5Ghz channel for CN */
+	{BCM4354_CHIP_ID,  "CN", "CN", 24}, /* Enable correct 5Ghz channel for CN */
+#else /* SUPPORT_MULTIPLE_REVISION */
+	{"US", "US", 0}, /* Translate US into US/0 in order to enable 5GHz band */
+	{"FR", "FR", 5}, /* Translate FR into FR/5 in order to enable 802.11ac */
+	{"FI", "FR", 5}, /* Translate FI into FR/5 in order to enable 802.11ac */
+	{"KR", "KR", 4}, /* Enable correct 5Ghz channel */
+#endif /* SUPPORT_MULTIPLE_REVISION */
+#endif /* BOARD_INTEL */
+};
+
+/* Customized Locale convertor
+*  input : ISO 3166-1 country abbreviation
+*  output: customized cspec
+*/
+void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec)
+{
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+
+	int size, i;
+
+	size = ARRAYSIZE(translate_custom_table);
+
+	if (cspec == 0)
+		 return;
+
+	if (size == 0)
+		 return;
+
+	for (i = 0; i < size; i++) {
+		uint chip = 0;
+#if defined(SUPPORT_MULTIPLE_REVISION)
+		chip = bcm_get_chip_id();
+		if (chip != translate_custom_table[i].chip_id) {
+			continue;
+		}
+#endif
+		if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) {
+			memcpy(cspec->ccode,
+				translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ);
+			cspec->rev = translate_custom_table[i].custom_locale_rev;
+			printk(KERN_ERR "%s: chip = %x => ccode = %s, regrev = %d\n", __func__, chip, cspec->ccode, cspec->rev);
+			return;
+		}
+	}
+	return;
+#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */
+}
diff --git a/drivers/staging/edison-bcm43340/dhd_dbg.h b/drivers/staging/edison-bcm43340/dhd_dbg.h
new file mode 100644
index 0000000..b9f1c6f
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_dbg.h
@@ -0,0 +1,123 @@
+/*
+ * Debug/trace/assert driver definitions for Dongle Host Driver.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_dbg.h 424863 2013-09-19 20:06:14Z $
+ */
+
+#ifndef _dhd_dbg_
+#define _dhd_dbg_
+
+#define USE_NET_RATELIMIT		1
+
+#if defined(DHD_DEBUG)
+
+#define DHD_ERROR(args)		do {if ((dhd_msg_level & DHD_ERROR_VAL) && USE_NET_RATELIMIT) \
+								printf args;} while (0)
+#define DHD_TRACE(args)		do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_INFO(args)		do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#define DHD_DATA(args)		do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args)		do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args)		do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args)		do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args)		do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args)		do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args)		do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#define DHD_EVENT(args)		do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#define DHD_BTA(args)		do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
+#define DHD_ISCAN(args)		do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args)		do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+#define DHD_REORDER(args)	do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
+#define DHD_PNO(args)		do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
+
+#define DHD_TRACE_HW4	DHD_TRACE
+
+#define DHD_ERROR_ON()		(dhd_msg_level & DHD_ERROR_VAL)
+#define DHD_TRACE_ON()		(dhd_msg_level & DHD_TRACE_VAL)
+#define DHD_INFO_ON()		(dhd_msg_level & DHD_INFO_VAL)
+#define DHD_DATA_ON()		(dhd_msg_level & DHD_DATA_VAL)
+#define DHD_CTL_ON()		(dhd_msg_level & DHD_CTL_VAL)
+#define DHD_TIMER_ON()		(dhd_msg_level & DHD_TIMER_VAL)
+#define DHD_HDRS_ON()		(dhd_msg_level & DHD_HDRS_VAL)
+#define DHD_BYTES_ON()		(dhd_msg_level & DHD_BYTES_VAL)
+#define DHD_INTR_ON()		(dhd_msg_level & DHD_INTR_VAL)
+#define DHD_GLOM_ON()		(dhd_msg_level & DHD_GLOM_VAL)
+#define DHD_EVENT_ON()		(dhd_msg_level & DHD_EVENT_VAL)
+#define DHD_BTA_ON()		(dhd_msg_level & DHD_BTA_VAL)
+#define DHD_ISCAN_ON()		(dhd_msg_level & DHD_ISCAN_VAL)
+#define DHD_ARPOE_ON()		(dhd_msg_level & DHD_ARPOE_VAL)
+#define DHD_REORDER_ON()	(dhd_msg_level & DHD_REORDER_VAL)
+#define DHD_NOCHECKDIED_ON()	(dhd_msg_level & DHD_NOCHECKDIED_VAL)
+#define DHD_PNO_ON()		(dhd_msg_level & DHD_PNO_VAL)
+
+#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+
+#define DHD_ERROR(args)		do {if (USE_NET_RATELIMIT) printf args;} while (0)
+#define DHD_TRACE(args)
+#define DHD_INFO(args)
+#define DHD_DATA(args)
+#define DHD_CTL(args)
+#define DHD_TIMER(args)
+#define DHD_HDRS(args)
+#define DHD_BYTES(args)
+#define DHD_INTR(args)
+#define DHD_GLOM(args)
+#define DHD_EVENT(args)
+#define DHD_BTA(args)
+#define DHD_ISCAN(args)
+#define DHD_ARPOE(args)
+#define DHD_REORDER(args)
+#define DHD_PNO(args)
+
+#define DHD_TRACE_HW4	DHD_TRACE
+
+#define DHD_ERROR_ON()		0
+#define DHD_TRACE_ON()		0
+#define DHD_INFO_ON()		0
+#define DHD_DATA_ON()		0
+#define DHD_CTL_ON()		0
+#define DHD_TIMER_ON()		0
+#define DHD_HDRS_ON()		0
+#define DHD_BYTES_ON()		0
+#define DHD_INTR_ON()		0
+#define DHD_GLOM_ON()		0
+#define DHD_EVENT_ON()		0
+#define DHD_BTA_ON()		0
+#define DHD_ISCAN_ON()		0
+#define DHD_ARPOE_ON()		0
+#define DHD_REORDER_ON()	0
+#define DHD_NOCHECKDIED_ON()	0
+#define DHD_PNO_ON()		0
+
+#endif 
+
+#define DHD_LOG(args)
+
+#define DHD_BLOG(cp, size)
+
+#define DHD_NONE(args)
+extern int dhd_msg_level;
+
+/* Defines msg bits */
+#include <dhdioctl.h>
+
+#endif /* _dhd_dbg_ */
diff --git a/drivers/staging/edison-bcm43340/dhd_ip.c b/drivers/staging/edison-bcm43340/dhd_ip.c
new file mode 100644
index 0000000..3db2ed8
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_ip.c
@@ -0,0 +1,953 @@
+/*
+ * IP Packet Parser Module.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_ip.c 457995 2014-02-25 13:53:31Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <proto/ethernet.h>
+#include <proto/vlan.h>
+#include <proto/802.3.h>
+#include <proto/bcmip.h>
+#include <bcmendian.h>
+
+#include <dhd_dbg.h>
+
+#include <dhd_ip.h>
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_bus.h>
+#include <proto/bcmtcp.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+/* special values */
+/* 802.3 llc/snap header */
+static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+pkt_frag_t pkt_frag_info(osl_t *osh, void *p)
+{
+	uint8 *frame;
+	int length;
+	uint8 *pt;			/* Pointer to type field */
+	uint16 ethertype;
+	struct ipv4_hdr *iph;		/* IP frame pointer */
+	int ipl;			/* IP frame length */
+	uint16 iph_frag;
+
+	ASSERT(osh && p);
+
+	frame = PKTDATA(osh, p);
+	length = PKTLEN(osh, p);
+
+	/* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+	if (length < ETHER_HDR_LEN) {
+		DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length));
+		return DHD_PKT_FRAG_NONE;
+	} else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) {
+		/* Frame is Ethernet II */
+		pt = frame + ETHER_TYPE_OFFSET;
+	} else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+	           !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+		pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+	} else {
+		DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	ethertype = ntoh16(*(uint16 *)pt);
+
+	/* Skip VLAN tag, if any */
+	if (ethertype == ETHER_TYPE_8021Q) {
+		pt += VLAN_TAG_LEN;
+
+		if (pt + ETHER_TYPE_LEN > frame + length) {
+			DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length));
+			return DHD_PKT_FRAG_NONE;
+		}
+
+		ethertype = ntoh16(*(uint16 *)pt);
+	}
+
+	if (ethertype != ETHER_TYPE_IP) {
+		DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n",
+			__FUNCTION__, ethertype, length));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN);
+	ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame));
+
+	/* We support IPv4 only */
+	if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) {
+		DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl));
+		return DHD_PKT_FRAG_NONE;
+	}
+
+	iph_frag = ntoh16(iph->frag);
+
+	if (iph_frag & IPV4_FRAG_DONT) {
+		return DHD_PKT_FRAG_NONE;
+	} else if ((iph_frag & IPV4_FRAG_MORE) == 0) {
+		return DHD_PKT_FRAG_LAST;
+	} else {
+		return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST;
+	}
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+
+typedef struct {
+	void *pkt_in_q;			/* TCP ACK packet that is already in txq or DelayQ */
+	void *pkt_ether_hdr;	/* Ethernet header pointer of pkt_in_q */
+} tcpack_info_t;
+
+typedef struct _tdata_psh_info_t {
+	uint32 end_seq;			/* end seq# of a received TCP PSH DATA pkt */
+	struct _tdata_psh_info_t *next;	/* next pointer of the link chain */
+} tdata_psh_info_t;
+
+typedef struct {
+	uint8 src_ip_addr[IPV4_ADDR_LEN];	/* SRC ip addrs of this TCP stream */
+	uint8 dst_ip_addr[IPV4_ADDR_LEN];	/* DST ip addrs of this TCP stream */
+	uint8 src_tcp_port[TCP_PORT_LEN];	/* SRC tcp ports of this TCP stream */
+	uint8 dst_tcp_port[TCP_PORT_LEN];	/* DST tcp ports of this TCP stream */
+	tdata_psh_info_t *tdata_psh_info_head;	/* Head of received TCP PSH DATA chain */
+	tdata_psh_info_t *tdata_psh_info_tail;	/* Tail of received TCP PSH DATA chain */
+	uint32 last_used_time;	/* The last time this tcpdata_info was used(in ms) */
+} tcpdata_info_t;
+
+/* TCPACK SUPPRESS module */
+typedef struct {
+	int tcpack_info_cnt;
+	tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM];	/* Info of TCP ACK to send */
+	int tcpdata_info_cnt;
+	tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM];	/* Info of received TCP DATA */
+	tdata_psh_info_t *tdata_psh_info_pool;	/* Pointer to tdata_psh_info elements pool */
+	tdata_psh_info_t *tdata_psh_info_free;	/* free tdata_psh_info elements chain in pool */
+#ifdef DHDTCPACK_SUP_DBG
+	int psh_info_enq_num;	/* Number of free TCP PSH DATA info elements in pool */
+#endif /* DHDTCPACK_SUP_DBG */
+} tcpack_sup_module_t;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1};
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+static void
+_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod,
+	tdata_psh_info_t *tdata_psh_info)
+{
+	if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) {
+		DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__,
+			tcpack_sup_mod, tdata_psh_info));
+		return;
+	}
+
+	ASSERT(tdata_psh_info->next == NULL);
+	tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free;
+	tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info;
+#ifdef DHDTCPACK_SUP_DBG
+	tcpack_sup_mod->psh_info_enq_num++;
+#endif
+}
+
+static tdata_psh_info_t*
+_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod)
+{
+	tdata_psh_info_t *tdata_psh_info = NULL;
+
+	if (tcpack_sup_mod == NULL) {
+		DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__,
+			tcpack_sup_mod));
+		return NULL;
+	}
+
+	tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free;
+	if (tdata_psh_info == NULL)
+		DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__));
+	else {
+		tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+		tdata_psh_info->next = NULL;
+#ifdef DHDTCPACK_SUP_DBG
+		tcpack_sup_mod->psh_info_enq_num--;
+#endif /* DHDTCPACK_SUP_DBG */
+	}
+
+	return tdata_psh_info;
+}
+
+static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp,
+	tcpack_sup_module_t *tcpack_sup_mod)
+{
+	tdata_psh_info_t *tdata_psh_info_pool = NULL;
+	uint i;
+
+	DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+	if (tcpack_sup_mod == NULL)
+		return BCME_ERROR;
+
+	ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL);
+	ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL);
+
+	tdata_psh_info_pool =
+		MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+
+	if (tdata_psh_info_pool == NULL)
+		return BCME_NOMEM;
+	bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+#ifdef DHDTCPACK_SUP_DBG
+	tcpack_sup_mod->psh_info_enq_num = 0;
+#endif /* DHDTCPACK_SUP_DBG */
+
+	/* Enqueue newly allocated tcpdata psh info elements to the pool */
+	for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++)
+		_tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]);
+
+	ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL);
+	tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool;
+
+	return BCME_OK;
+}
+
+static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp,
+	tcpack_sup_module_t *tcpack_sup_mod)
+{
+	uint i;
+	tdata_psh_info_t *tdata_psh_info;
+
+	DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+	if (tcpack_sup_mod == NULL) {
+		DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n",
+			__FUNCTION__, __LINE__));
+		return;
+	}
+
+	for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+		tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		/* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */
+		while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+			tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+			tdata_psh_info->next = NULL;
+			_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+		}
+		tcpdata_info->tdata_psh_info_tail = NULL;
+	}
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+	i = 0;
+	/* Be sure we recollected all tdata_psh_info elements */
+	while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) {
+		tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+		tdata_psh_info->next = NULL;
+		i++;
+	}
+	ASSERT(i == TCPDATA_PSH_INFO_MAXNUM);
+	MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool,
+		sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+	tcpack_sup_mod->tdata_psh_info_pool = NULL;
+
+	return;
+}
+
+int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode)
+{
+	int ret = BCME_OK;
+
+	dhd_os_tcpacklock(dhdp);
+
+	if (dhdp->tcpack_sup_mode == mode) {
+		DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode));
+		goto exit;
+	}
+
+	if (mode > TCPACK_SUP_DELAYTX) {
+		DHD_ERROR(("%s %d: Invalid mode %d\n", __FUNCTION__, __LINE__, mode));
+		ret = BCME_BADARG;
+		goto exit;
+	}
+
+	DHD_TRACE(("%s: %d -> %d\n",
+		__FUNCTION__, dhdp->tcpack_sup_mode, mode));
+
+	/* Old tcpack_sup_mode is TCPACK_SUP_DELAYTX */
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX) {
+		tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+		/* We won't need tdata_psh_info pool and tcpddata_info_tbl anymore */
+		_tdata_psh_info_pool_deinit(dhdp, tcpack_sup_mod);
+		tcpack_sup_mod->tcpdata_info_cnt = 0;
+		bzero(tcpack_sup_mod->tcpdata_info_tbl,
+			sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM);
+		/* For half duplex bus interface, tx precedes rx by default */
+		if (dhdp->bus)
+			dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+	}
+
+	dhdp->tcpack_sup_mode = mode;
+
+	if (mode == TCPACK_SUP_OFF) {
+		ASSERT(dhdp->tcpack_sup_module != NULL);
+		MFREE(dhdp->osh, dhdp->tcpack_sup_module, sizeof(tcpack_sup_module_t));
+		dhdp->tcpack_sup_module = NULL;
+		goto exit;
+	}
+
+	if (dhdp->tcpack_sup_module == NULL) {
+		tcpack_sup_module_t *tcpack_sup_mod =
+			MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t));
+		if (tcpack_sup_mod == NULL) {
+			DHD_ERROR(("%s %d: No MEM\n", __FUNCTION__, __LINE__));
+			dhdp->tcpack_sup_mode = TCPACK_SUP_OFF;
+			ret = BCME_NOMEM;
+			goto exit;
+		}
+		bzero(tcpack_sup_mod, sizeof(tcpack_sup_module_t));
+		dhdp->tcpack_sup_module = tcpack_sup_mod;
+	}
+
+	if (mode == TCPACK_SUP_DELAYTX) {
+		ret = _tdata_psh_info_pool_init(dhdp, dhdp->tcpack_sup_module);
+		if (ret != BCME_OK)
+			DHD_ERROR(("%s %d: pool init fail with %d\n", __FUNCTION__, __LINE__, ret));
+		else if (dhdp->bus)
+			dhd_bus_set_dotxinrx(dhdp->bus, FALSE);
+	}
+
+exit:
+	dhd_os_tcpackunlock(dhdp);
+	return ret;
+}
+
+void
+dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp)
+{
+	tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	dhd_os_tcpacklock(dhdp);
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
+			__FUNCTION__, __LINE__));
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	tcpack_sup_mod->tcpack_info_cnt = 0;
+	bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM);
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	return;
+}
+
+inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 i;
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpack_info_t *tcpack_info_tbl;
+	int tbl_cnt;
+	uint pushed_len;
+	int ret = BCME_OK;
+	void *pdata;
+	uint32 pktlen;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	pdata = PKTDATA(dhdp->osh, pkt);
+
+	/* Length of BDC(+WLFC) headers pushed */
+	pushed_len = BDC_HEADER_LEN + (((struct bdc_header *)pdata)->dataOffset * 4);
+	pktlen = PKTLEN(dhdp->osh, pkt) - pushed_len;
+
+	if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) {
+		DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+			__FUNCTION__, __LINE__, pktlen));
+		goto exit;
+	}
+
+	dhd_os_tcpacklock(dhdp);
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+	tbl_cnt = tcpack_sup_mod->tcpack_info_cnt;
+	tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+	ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM);
+
+	for (i = 0; i < tbl_cnt; i++) {
+		if (tcpack_info_tbl[i].pkt_in_q == pkt) {
+			DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n",
+				__FUNCTION__, __LINE__, pkt, i, tbl_cnt));
+			/* This pkt is being transmitted so remove the tcp_ack_info of it. */
+			if (i < tbl_cnt - 1) {
+				bcopy(&tcpack_info_tbl[tbl_cnt - 1],
+					&tcpack_info_tbl[i], sizeof(tcpack_info_t));
+			}
+			bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t));
+			if (--tcpack_sup_mod->tcpack_info_cnt < 0) {
+				DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n",
+					__FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
+				ret = BCME_ERROR;
+			}
+			break;
+		}
+	}
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	return ret;
+}
+
+static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr,
+	uint8 *tcp_hdr, uint32 tcp_ack_num)
+{
+	tcpack_sup_module_t *tcpack_sup_mod;
+	int i;
+	tcpdata_info_t *tcpdata_info = NULL;
+	tdata_psh_info_t *tdata_psh_info = NULL;
+	bool ret = FALSE;
+
+	if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+		goto exit;
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+		" TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+		tcp_ack_num));
+
+	for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+		tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->src_ip_addr)),
+			IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->dst_ip_addr)),
+			ntoh16_ua(tcpdata_info_tmp->src_tcp_port),
+			ntoh16_ua(tcpdata_info_tmp->dst_tcp_port)));
+
+		/* If either IP address or TCP port number does not match, skip. */
+		if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+			tcpdata_info_tmp->dst_ip_addr, IPV4_ADDR_LEN) == 0 &&
+			memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET],
+			tcpdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN) == 0 &&
+			memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+			tcpdata_info_tmp->dst_tcp_port, TCP_PORT_LEN) == 0 &&
+			memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET],
+			tcpdata_info_tmp->src_tcp_port, TCP_PORT_LEN) == 0) {
+			tcpdata_info = tcpdata_info_tmp;
+			break;
+		}
+	}
+
+	if (tcpdata_info == NULL) {
+		DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	if (tcpdata_info->tdata_psh_info_head == NULL) {
+		DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__));
+	}
+
+	while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+		if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) {
+			DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n",
+				__FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq));
+			tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+			tdata_psh_info->next = NULL;
+			_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+			ret = TRUE;
+		} else
+			break;
+	}
+	if (tdata_psh_info == NULL)
+		tcpdata_info->tdata_psh_info_tail = NULL;
+
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+exit:
+	return ret;
+}
+
+bool
+dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 *new_ether_hdr;	/* Ethernet header of the new packet */
+	uint16 new_ether_type;	/* Ethernet type of the new packet */
+	uint8 *new_ip_hdr;		/* IP header of the new packet */
+	uint8 *new_tcp_hdr;		/* TCP header of the new packet */
+	uint32 new_ip_hdr_len;	/* IP header length of the new packet */
+	uint32 cur_framelen;
+	uint32 new_tcp_ack_num;		/* TCP acknowledge number of the new packet */
+	uint16 new_ip_total_len;	/* Total length of IP packet for the new packet */
+	uint32 new_tcp_hdr_len;		/* TCP header length of the new packet */
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpack_info_t *tcpack_info_tbl;
+	int i;
+	bool ret = FALSE;
+	bool set_dotxinrx = TRUE;
+
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+		goto exit;
+
+	new_ether_hdr = PKTDATA(dhdp->osh, pkt);
+	cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+	if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) {
+		DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+			__FUNCTION__, __LINE__, cur_framelen));
+		goto exit;
+	}
+
+	new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13];
+
+	if (new_ether_type != ETHER_TYPE_IP) {
+		DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+			__FUNCTION__, __LINE__, new_ether_type));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type));
+
+	new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN;
+	cur_framelen -= ETHER_HDR_LEN;
+
+	ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+	new_ip_hdr_len = IPV4_HLEN(new_ip_hdr);
+	if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) {
+		DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+			__FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr)));
+		goto exit;
+	}
+
+	new_tcp_hdr = new_ip_hdr + new_ip_hdr_len;
+	cur_framelen -= new_ip_hdr_len;
+
+	ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+	DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+	/* is it an ack ? Allow only ACK flag, not to suppress others. */
+	if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) {
+		DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n",
+			__FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET]));
+		goto exit;
+	}
+
+	new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]);
+	new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]);
+
+	/* This packet has TCP data, so just send */
+	if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) {
+		DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len);
+
+	new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+	DHD_TRACE(("%s %d: TCP ACK with zero DATA length"
+		" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+		__FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+	/* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
+	dhd_os_tcpacklock(dhdp);
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+	counter_printlog(&tack_tbl);
+	tack_tbl.cnt[0]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+	tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) {
+		/* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+		tack_tbl.cnt[5]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+	} else
+		set_dotxinrx = FALSE;
+
+	for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) {
+		void *oldpkt;	/* TCPACK packet that is already in txq or DelayQ */
+		uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr;
+		uint32 old_ip_hdr_len, old_tcp_hdr_len;
+		uint32 old_tcpack_num;	/* TCP ACK number of old TCPACK packet in Q */
+
+		if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) {
+			DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+			break;
+		}
+
+		if (PKTDATA(dhdp->osh, oldpkt) == NULL) {
+			DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+			break;
+		}
+
+		old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr;
+		old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN;
+		old_ip_hdr_len = IPV4_HLEN(old_ip_hdr);
+		old_tcp_hdr = old_ip_hdr + old_ip_hdr_len;
+		old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]);
+
+		DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])),
+			IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])),
+			ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+			ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+		/* If either of IP address or TCP port number does not match, skip. */
+		if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
+			&old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
+			memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
+			&old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2))
+			continue;
+
+		old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+		if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) {
+			/* New packet has higher TCP ACK number, so it replaces the old packet */
+			if (new_ip_hdr_len == old_ip_hdr_len &&
+				new_tcp_hdr_len == old_tcp_hdr_len) {
+				ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0);
+				bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len);
+				PKTFREE(dhdp->osh, pkt, FALSE);
+				DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n",
+					__FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num));
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+				tack_tbl.cnt[2]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+				ret = TRUE;
+			} else
+				DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d\n",
+					__FUNCTION__, __LINE__, new_ip_hdr_len, old_ip_hdr_len,
+					new_tcp_hdr_len, old_tcp_hdr_len));
+		} else if (new_tcp_ack_num == old_tcpack_num) {
+			set_dotxinrx = TRUE;
+			/* TCPACK retransmission */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+			tack_tbl.cnt[3]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+		} else {
+			DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n",
+				__FUNCTION__, __LINE__, old_tcpack_num, oldpkt,
+				new_tcp_ack_num, pkt));
+		}
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) {
+		/* No TCPACK packet with the same IP addr and TCP port is found
+		 * in tcp_ack_info_tbl. So add this packet to the table.
+		 */
+		DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n",
+			__FUNCTION__, __LINE__, pkt, new_ether_hdr,
+			tcpack_sup_mod->tcpack_info_cnt));
+
+		tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt;
+		tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr;
+		tcpack_sup_mod->tcpack_info_cnt++;
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+		tack_tbl.cnt[1]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+	} else {
+		ASSERT(i == tcpack_sup_mod->tcpack_info_cnt);
+		DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
+			__FUNCTION__, __LINE__));
+	}
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	/* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */
+	if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx)
+		dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+
+	return ret;
+}
+
+bool
+dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt)
+{
+	uint8 *ether_hdr;	/* Ethernet header of the new packet */
+	uint16 ether_type;	/* Ethernet type of the new packet */
+	uint8 *ip_hdr;		/* IP header of the new packet */
+	uint8 *tcp_hdr;		/* TCP header of the new packet */
+	uint32 ip_hdr_len;	/* IP header length of the new packet */
+	uint32 cur_framelen;
+	uint16 ip_total_len;	/* Total length of IP packet for the new packet */
+	uint32 tcp_hdr_len;		/* TCP header length of the new packet */
+	uint32 tcp_seq_num;		/* TCP sequence number of the new packet */
+	uint16 tcp_data_len;	/* TCP DATA length that excludes IP and TCP headers */
+	uint32 end_tcp_seq_num;	/* TCP seq number of the last byte in the new packet */
+	tcpack_sup_module_t *tcpack_sup_mod;
+	tcpdata_info_t *tcpdata_info = NULL;
+	tdata_psh_info_t *tdata_psh_info;
+
+	int i;
+	bool ret = FALSE;
+
+	if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+		goto exit;
+
+	ether_hdr = PKTDATA(dhdp->osh, pkt);
+	cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+	ether_type = ether_hdr[12] << 8 | ether_hdr[13];
+
+	if (ether_type != ETHER_TYPE_IP) {
+		DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+			__FUNCTION__, __LINE__, ether_type));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type));
+
+	ip_hdr = ether_hdr + ETHER_HDR_LEN;
+	cur_framelen -= ETHER_HDR_LEN;
+
+	ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+	ip_hdr_len = IPV4_HLEN(ip_hdr);
+	if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
+		DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+			__FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
+		goto exit;
+	}
+
+	tcp_hdr = ip_hdr + ip_hdr_len;
+	cur_framelen -= ip_hdr_len;
+
+	ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+	DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+	ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]);
+	tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]);
+
+	/* This packet is mere TCP ACK, so do nothing */
+	if (ip_total_len == ip_hdr_len + tcp_hdr_len) {
+		DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len);
+
+	if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) {
+		DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__));
+		goto exit;
+	}
+
+	DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length"
+		" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n",
+		__FUNCTION__, __LINE__,
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+		IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+		ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+		ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+		tcp_hdr[TCP_FLAGS_OFFSET]));
+
+	dhd_os_tcpacklock(dhdp);
+	tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+	if (!tcpack_sup_mod) {
+		DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+
+	/* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */
+	i = 0;
+	while (i < tcpack_sup_mod->tcpdata_info_cnt) {
+		tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+		uint32 now_in_ms = OSL_SYSUPTIME();
+		DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+			IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->src_ip_addr)),
+			IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->dst_ip_addr)),
+			ntoh16_ua(tdata_info_tmp->src_tcp_port),
+			ntoh16_ua(tdata_info_tmp->dst_tcp_port)));
+
+		/* If both IP address and TCP port number match, we found it so break. */
+		if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+			tdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN * 2) == 0 &&
+			memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+			tdata_info_tmp->src_tcp_port, TCP_PORT_LEN * 2) == 0) {
+			tcpdata_info = tdata_info_tmp;
+			tcpdata_info->last_used_time = now_in_ms;
+			break;
+		}
+
+		if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) {
+			tdata_psh_info_t *tdata_psh_info_tmp;
+			tcpdata_info_t *last_tdata_info;
+
+			while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) {
+				tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next;
+				tdata_psh_info_tmp->next = NULL;
+				DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n",
+					__FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq));
+				_tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp);
+			}
+#ifdef DHDTCPACK_SUP_DBG
+			DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+				__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+			tcpack_sup_mod->tcpdata_info_cnt--;
+			ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0);
+
+			last_tdata_info =
+				&tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt];
+			if (i < tcpack_sup_mod->tcpdata_info_cnt) {
+				ASSERT(last_tdata_info != tdata_info_tmp);
+				bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t));
+			}
+			bzero(last_tdata_info, sizeof(tcpdata_info_t));
+			DHD_ERROR(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt));
+			/* Don't increase "i" here, so that the prev last tcpdata_info is checked */
+		} else
+			 i++;
+	}
+
+	tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]);
+	tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len;
+	end_tcp_seq_num = tcp_seq_num + tcp_data_len;
+
+	if (tcpdata_info == NULL) {
+		ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt);
+		if (i >= TCPDATA_INFO_MAXNUM) {
+			DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d"
+				" IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+				__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt,
+				IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+				IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+				ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+				ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+			dhd_os_tcpackunlock(dhdp);
+			goto exit;
+		}
+		tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+
+		/* No TCP flow with the same IP addr and TCP port is found
+		 * in tcp_data_info_tbl. So add this flow to the table.
+		 */
+		DHD_ERROR(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+			" TCP port %d %d\n",
+			__FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt,
+			IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+			IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+			ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+			ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+		bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], tcpdata_info->src_ip_addr,
+			IPV4_ADDR_LEN * 2);
+		bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], tcpdata_info->src_tcp_port,
+			TCP_PORT_LEN * 2);
+
+		tcpdata_info->last_used_time = OSL_SYSUPTIME();
+		tcpack_sup_mod->tcpdata_info_cnt++;
+	}
+
+	ASSERT(tcpdata_info != NULL);
+
+	tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod);
+#ifdef DHDTCPACK_SUP_DBG
+	DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+		__FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+	if (tdata_psh_info == NULL) {
+		DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__));
+		ret = BCME_ERROR;
+		dhd_os_tcpackunlock(dhdp);
+		goto exit;
+	}
+	tdata_psh_info->end_seq = end_tcp_seq_num;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+	tack_tbl.cnt[4]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+	DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n",
+		__FUNCTION__, __LINE__, tdata_psh_info->end_seq));
+
+	ASSERT(tdata_psh_info->next == NULL);
+
+	if (tcpdata_info->tdata_psh_info_head == NULL)
+		tcpdata_info->tdata_psh_info_head = tdata_psh_info;
+	else {
+		ASSERT(tcpdata_info->tdata_psh_info_tail);
+		tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info;
+	}
+	tcpdata_info->tdata_psh_info_tail = tdata_psh_info;
+
+	dhd_os_tcpackunlock(dhdp);
+
+exit:
+	return ret;
+}
+
+#endif /* DHDTCPACK_SUPPRESS */
diff --git a/drivers/staging/edison-bcm43340/dhd_ip.h b/drivers/staging/edison-bcm43340/dhd_ip.h
new file mode 100644
index 0000000..328329e
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_ip.h
@@ -0,0 +1,71 @@
+/*
+ * Header file describing the common ip parser function.
+ *
+ * Provides type definitions and function prototypes used to parse ip packet.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_ip.h 457888 2014-02-25 03:34:39Z $
+ */
+
+#ifndef _dhd_ip_h_
+#define _dhd_ip_h_
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dngl_stats.h>
+#include <bcmutils.h>
+#include <dhd.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+typedef enum pkt_frag
+{
+	DHD_PKT_FRAG_NONE = 0,
+	DHD_PKT_FRAG_FIRST,
+	DHD_PKT_FRAG_CONT,
+	DHD_PKT_FRAG_LAST
+} pkt_frag_t;
+
+extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
+
+#ifdef DHDTCPACK_SUPPRESS
+#define	TCPACKSZMIN	(ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
+/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */
+#define	TCPACKSZMAX	(TCPACKSZMIN + 100)
+
+/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */
+#define TCPACK_INFO_MAXNUM 4
+#define TCPDATA_INFO_MAXNUM 4
+#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM)
+
+#define TCPDATA_INFO_TIMEOUT 5000	/* Remove tcpdata_info if inactive for this time (in ms) */
+
+extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on);
+extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp);
+extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt);
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+extern counter_tbl_t tack_tbl;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+#endif /* DHDTCPACK_SUPPRESS */
+
+#endif /* _dhd_ip_h_ */
diff --git a/drivers/staging/edison-bcm43340/dhd_linux.c b/drivers/staging/edison-bcm43340/dhd_linux.c
new file mode 100644
index 0000000..c902735
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_linux.c
@@ -0,0 +1,7466 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
+ * Basically selected code segments from usb-cdc.c and usb-rndis.c
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.c 476604 2014-05-09 09:02:24Z $
+ */
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/ip.h>
+#include <net/addrconf.h>
+#include <linux/cpufreq.h>
+
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <proto/ethernet.h>
+#include <proto/bcmevent.h>
+#include <dngl_stats.h>
+#include <dhd_linux_wq.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+//#include <linux/kct.h>
+
+#ifdef WLMEDIA_HTSF
+#include <linux/time.h>
+#include <htsf.h>
+
+#define HTSF_MINLEN 200    /* min. packet length to timestamp */
+#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us  */
+#define TSMAX  1000        /* max no. of timing record kept   */
+#define NUMBIN 34
+
+static uint32 tsidx = 0;
+static uint32 htsf_seqnum = 0;
+uint32 tsfsync;
+struct timeval tsync;
+static uint32 tsport = 5010;
+
+typedef struct histo_ {
+	uint32 bin[NUMBIN];
+} histo_t;
+
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
+#endif /* WLMEDIA_HTSF */
+
+
+
+#if defined(SOFTAP)
+extern bool ap_cfg_running;
+extern bool ap_fw_loaded;
+#endif
+
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+#define DEFAULT_CPUFREQ_THRESH		1000000	/* threshold frequency : 1000000 = 1GHz */
+#ifndef CUSTOM_CPUFREQ_THRESH
+#define CUSTOM_CPUFREQ_THRESH	DEFAULT_CPUFREQ_THRESH
+#endif /* CUSTOM_CPUFREQ_THRESH */
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+/* enable HOSTIP cache update from the host side when an eth0:N is up */
+#define AOE_IP_ALIAS_SUPPORT 1
+
+#ifdef BCM_FD_AGGR
+#include <bcm_rpc.h>
+#include <bcm_rpc_tp.h>
+#endif
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <wl_android.h>
+
+
+#ifdef ARP_OFFLOAD_SUPPORT
+void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+	unsigned long event, void *ptr);
+static struct notifier_block dhd_inetaddr_notifier = {
+	.notifier_call = dhd_inetaddr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inetaddr_notifier_registered = FALSE;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+static int dhd_inet6addr_notifier_call(struct notifier_block *this,
+	unsigned long event, void *ptr);
+static struct notifier_block dhd_inet6addr_notifier = {
+	.notifier_call = dhd_inet6addr_notifier_call
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inet6addr_notifier_registered = FALSE;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
+#include <linux/suspend.h>
+volatile bool dhd_mmc_suspend = FALSE;
+DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
+
+volatile bool shutdown_in_progress = FALSE;
+
+#if defined(OOB_INTR_ONLY)
+extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
+#endif 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
+#endif 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+MODULE_LICENSE("GPL v2");
+#endif /* LinuxVer */
+
+#include <dhd_bus.h>
+
+#ifdef BCM_FD_AGGR
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
+#else
+#ifndef PROP_TXSTATUS
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen)
+#else
+#define DBUS_RX_BUFFER_SIZE_DHD(net)	(net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
+#endif
+#endif /* BCM_FD_AGGR */
+
+#ifdef PROP_TXSTATUS
+extern bool dhd_wlfc_skip_fc(void);
+extern void dhd_wlfc_plat_init(void *dhd);
+extern void dhd_wlfc_plat_deinit(void *dhd);
+#endif /* PROP_TXSTATUS */
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+	return "";
+}
+#endif	/* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
+
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+extern wl_iw_extra_params_t  g_wl_iw_params;
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+#endif
+
+
+#ifdef READ_MACADDR
+extern int dhd_read_macaddr(struct dhd_info *dhd);
+#else
+static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; }
+#endif
+#ifdef WRITE_MACADDR
+extern int dhd_write_macaddr(struct ether_addr *mac);
+#else
+static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
+#endif
+
+#ifdef ENABLE_CONTROL_SCHED
+#ifndef ENABLE_ADAPTIVE_SCHED
+#error ENABLE_ADAPTIVE_SCHED not define.
+#endif /* ENABLE_ADAPTIVE_SCHED */
+static int dhd_sysfs_create_node(struct net_device *net);
+static void dhd_sysfs_destroy_node(struct net_device *net);
+#endif /* ENABLE_CONTROL_SCHED */
+
+
+typedef struct dhd_if_event {
+	struct list_head	list;
+	wl_event_data_if_t	event;
+	char			name[IFNAMSIZ+1];
+	uint8			mac[ETHER_ADDR_LEN];
+} dhd_if_event_t;
+
+/* Interface control information */
+typedef struct dhd_if {
+	struct dhd_info *info;			/* back pointer to dhd_info */
+	/* OS/stack specifics */
+	struct net_device *net;
+	struct net_device_stats stats;
+	int 			idx;			/* iface idx in dongle */
+	uint 			subunit;		/* subunit */
+	uint8			mac_addr[ETHER_ADDR_LEN];	/* assigned MAC address */
+	bool			attached;		/* Delayed attachment when unset */
+	bool			txflowcontrol;	/* Per interface flow control indicator */
+	char			name[IFNAMSIZ+1]; /* linux interface name */
+	uint8			bssidx;			/* bsscfg index for the interface */
+	bool			set_macaddress;
+	bool			set_multicast;
+} dhd_if_t;
+
+#ifdef WLMEDIA_HTSF
+typedef struct {
+	uint32 low;
+	uint32 high;
+} tsf_t;
+
+typedef struct {
+	uint32 last_cycle;
+	uint32 last_sec;
+	uint32 last_tsf;
+	uint32 coef;     /* scaling factor */
+	uint32 coefdec1; /* first decimal  */
+	uint32 coefdec2; /* second decimal */
+} htsf_t;
+
+typedef struct {
+	uint32 t1;
+	uint32 t2;
+	uint32 t3;
+	uint32 t4;
+} tstamp_t;
+
+static tstamp_t ts[TSMAX];
+static tstamp_t maxdelayts;
+static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
+
+#endif  /* WLMEDIA_HTSF */
+
+struct ipv6_work_info_t {
+	uint8			if_idx;
+	char			ipv6_addr[16];
+	unsigned long		event;
+};
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(WL_WIRELESS_EXT)
+	wl_iw_t		iw;		/* wireless extensions state (must be first) */
+#endif /* defined(WL_WIRELESS_EXT) */
+
+	dhd_pub_t pub;
+	void *adapter;			/* adapter information, interrupt, fw path etc. */
+	char fw_path[PATH_MAX];		/* path to firmware image */
+	char nv_path[PATH_MAX];		/* path to nvram vars file */
+
+	/* For supporting multiple interfaces */
+	dhd_if_t *iflist[DHD_MAX_IFS];
+
+	struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+	spinlock_t	wlfc_spinlock;
+
+#endif /* PROP_TXSTATUS */
+#ifdef WLMEDIA_HTSF
+	htsf_t  htsf;
+#endif
+	wait_queue_head_t ioctl_resp_wait;
+	uint32	default_wd_interval;
+
+	struct timer_list timer;
+	bool wd_timer_valid;
+	struct tasklet_struct tasklet;
+	spinlock_t	sdlock;
+	spinlock_t	txqlock;
+	spinlock_t	dhd_lock;
+
+	struct semaphore sdsem;
+	tsk_ctl_t	thr_dpc_ctl;
+	tsk_ctl_t	thr_wdt_ctl;
+
+	tsk_ctl_t	thr_rxf_ctl;
+	spinlock_t	rxf_lock;
+	bool		rxthread_enabled;
+
+	/* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	struct wake_lock wl_wifi;   /* Wifi wakelock */
+	struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+	struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
+	struct wake_lock wl_wdwake; /* Wifi wd wakelock */
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	/* net_device interface lock, prevent race conditions among net_dev interface
+	 * calls and wifi_on or wifi_off
+	 */
+	struct mutex dhd_net_if_mutex;
+	struct mutex dhd_suspend_mutex;
+#endif
+	spinlock_t wakelock_spinlock;
+	uint32 wakelock_counter;
+	bool waive_wakelock;
+	uint32 wakelock_before_waive;
+	int wakelock_wd_counter;
+	int wakelock_rx_timeout_enable;
+	int wakelock_ctrl_timeout_enable;
+
+	/* Thread to issue ioctl for multicast */
+	wait_queue_head_t ctrl_wait;
+	atomic_t pend_8021x_cnt;
+	dhd_attach_states_t dhd_state;
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef BCM_FD_AGGR
+	void *rpc_th;
+	void *rpc_osh;
+	struct timer_list rpcth_timer;
+	bool rpcth_timer_active;
+	bool fdaggr;
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+	spinlock_t	tcpack_lock;
+#endif /* DHDTCPACK_SUPPRESS */
+	void			*dhd_deferred_wq;
+#ifdef DEBUG_CPU_FREQ
+	struct notifier_block freq_trans;
+	int __percpu *new_freq;
+#endif
+	unsigned int unit;
+	struct notifier_block pm_notifier;
+} dhd_info_t;
+
+/* Flag to indicate if we should download firmware on driver load */
+uint dhd_download_fw_on_driverload = TRUE;
+
+/* Definitions to provide path to the firmware and nvram
+ * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
+ */
+char firmware_path[MOD_PARAM_PATHLEN];
+char nvram_path[MOD_PARAM_PATHLEN];
+
+/* information string to keep firmware, chio, cheip version info visiable from log */
+char info_string[MOD_PARAM_INFOLEN];
+module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
+int op_mode = 0;
+int disable_proptx = 0;
+module_param(op_mode, int, 0644);
+extern int wl_control_wl_start(struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
+struct semaphore dhd_registration_sem;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+/* deferred handlers */
+static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
+static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
+
+#ifdef WL_CFG80211
+extern void dhd_netdev_free(struct net_device *ndev);
+#endif /* WL_CFG80211 */
+
+/* Error bits */
+module_param(dhd_msg_level, int, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* ARP offload enable */
+uint dhd_arp_enable = TRUE;
+module_param(dhd_arp_enable, uint, 0);
+
+/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
+
+module_param(dhd_arp_mode, uint, 0);
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/* Disable Prop tx */
+module_param(disable_proptx, int, 0644);
+/* load firmware and/or nvram values from the filesystem */
+module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
+
+/* Watchdog interval */
+
+/* extend watchdog expiration to 2 seconds when DPC is running */
+#define WATCHDOG_EXTEND_INTERVAL (2000)
+
+uint dhd_watchdog_ms = 50;
+module_param(dhd_watchdog_ms, uint, 0);
+
+#if defined(DHD_DEBUG)
+/* Console poll interval */
+uint dhd_console_ms = 0;
+module_param(dhd_console_ms, uint, 0644);
+#endif /* defined(DHD_DEBUG) */
+
+
+uint dhd_slpauto = TRUE;
+module_param(dhd_slpauto, uint, 0);
+
+#ifdef PKT_FILTER_SUPPORT
+/* Global Pkt filter enable control */
+uint dhd_pkt_filter_enable = TRUE;
+module_param(dhd_pkt_filter_enable, uint, 0);
+#endif
+
+/* Pkt filter init setup */
+uint dhd_pkt_filter_init = 0;
+module_param(dhd_pkt_filter_init, uint, 0);
+
+/* Pkt filter mode control */
+#if defined(GAN_LITE_NAT_KEEPALIVE_FILTER) || defined(BOARD_INTEL)
+uint dhd_master_mode = FALSE;
+#else
+uint dhd_master_mode = TRUE;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER || BOARD_INTEL */
+module_param(dhd_master_mode, uint, 0);
+
+int dhd_watchdog_prio = 0;
+module_param(dhd_watchdog_prio, int, 0);
+
+/* DPC thread priority */
+int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
+module_param(dhd_dpc_prio, int, 0);
+
+/* RX frame thread priority */
+int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
+module_param(dhd_rxf_prio, int, 0);
+
+#if !defined(BCMDHDUSB)
+extern int dhd_dongle_ramsize;
+module_param(dhd_dongle_ramsize, int, 0);
+#endif /* BCMDHDUSB */
+
+/* Keep track of number of instances */
+static int dhd_found = 0;
+static int instance_base = 0; /* Starting instance number */
+module_param(instance_base, int, 0644);
+
+/* Control fw roaming */
+uint dhd_roam_disable = 0;
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#ifdef BCMDBGFS
+extern void dhd_dbg_init(dhd_pub_t *dhdp);
+extern void dhd_dbg_remove(void);
+#endif /* BCMDBGFS */
+
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+
+extern char dhd_version[];
+
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+static void dhd_suspend_lock(dhd_pub_t *dhdp);
+static void dhd_suspend_unlock(dhd_pub_t *dhdp);
+
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
+
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+/* Firmware version */
+char fwversion[64] ="unknown";
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(WL_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+void dhd_os_wd_timer_extend(void *bus, bool extend);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+                             wl_event_msg_t *event_ptr, void **data_ptr);
+
+extern int bcmsdh_sdmmc_get_regulator(void);
+#if defined(SUPPORT_P2P_GO_PS)
+#ifdef PROP_TXSTATUS
+static int dhd_wakelock_waive(dhd_info_t *dhdinfo);
+static int dhd_wakelock_restore(dhd_info_t *dhdinfo);
+#endif
+#endif /* defined(SUPPORT_P2P_GO_PS) */
+
+#if defined(CONFIG_PM_SLEEP)
+static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+	int ret = NOTIFY_DONE;
+	bool suspend = FALSE;
+	dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
+
+	BCM_REFERENCE(dhdinfo);
+	switch (action) {
+		case PM_HIBERNATION_PREPARE:
+		case PM_SUSPEND_PREPARE:
+			suspend = TRUE;
+			break;
+		case PM_POST_HIBERNATION:
+		case PM_POST_SUSPEND:
+			suspend = FALSE;
+			break;
+	}
+
+#if defined(SUPPORT_P2P_GO_PS)
+#ifdef PROP_TXSTATUS
+	if (suspend) {
+		dhd_wakelock_waive(dhdinfo);
+		dhd_wlfc_suspend(&dhdinfo->pub);
+		dhd_wakelock_restore(dhdinfo);
+	} else {
+		dhd_wlfc_resume(&dhdinfo->pub);
+	}
+
+#endif
+#endif /* defined(SUPPORT_P2P_GO_PS) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(2, 6, 39))
+	dhd_mmc_suspend = suspend;
+	smp_mb();
+#endif
+	return ret;
+}
+
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_pm_notifier_registered = FALSE;
+
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* defined(CONFIG_PM_SLEEP) */
+
+/* Request scheduling of the bus rx frame */
+static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
+static void dhd_os_rxflock(dhd_pub_t *pub);
+static void dhd_os_rxfunlock(dhd_pub_t *pub);
+
+static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
+{
+	uint32 store_idx;
+	uint32 sent_idx;
+
+	if (!skb) {
+		DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
+		return BCME_ERROR;
+	}
+
+	dhd_os_rxflock(dhdp);
+	store_idx = dhdp->store_idx;
+	sent_idx = dhdp->sent_idx;
+	if (dhdp->skbbuf[store_idx] != NULL) {
+		/* Make sure the previous packets are processed */
+		dhd_os_rxfunlock(dhdp);
+#ifdef RXF_DEQUEUE_ON_BUSY
+		DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+			skb, store_idx, sent_idx));
+		return BCME_BUSY;
+#else /* RXF_DEQUEUE_ON_BUSY */
+		DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+			skb, store_idx, sent_idx));
+		/* removed msleep here, should use wait_event_timeout if we
+		 * want to give rx frame thread a chance to run
+		 */
+#if defined(WAIT_DEQUEUE)
+		OSL_SLEEP(1);
+#else
+		msleep(1);
+#endif
+		return BCME_ERROR;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+	}
+	DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
+		skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
+	dhdp->skbbuf[store_idx] = skb;
+	dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
+	dhd_os_rxfunlock(dhdp);
+
+	return BCME_OK;
+}
+
+static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
+{
+	uint32 store_idx;
+	uint32 sent_idx;
+	void *skb;
+
+	dhd_os_rxflock(dhdp);
+
+	store_idx = dhdp->store_idx;
+	sent_idx = dhdp->sent_idx;
+	skb = dhdp->skbbuf[sent_idx];
+
+	if (skb == NULL) {
+		dhd_os_rxfunlock(dhdp);
+		DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
+			store_idx, sent_idx));
+		return NULL;
+	}
+
+	dhdp->skbbuf[sent_idx] = NULL;
+	dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
+
+	DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
+		skb, sent_idx));
+
+	dhd_os_rxfunlock(dhdp);
+
+	return skb;
+}
+
+static int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	if (prepost) { /* pre process */
+		dhd_read_macaddr(dhd);
+	} else { /* post process */
+		dhd_write_macaddr(&dhd->pub.mac);
+	}
+
+	return 0;
+}
+
+#if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) && !defined(BOARD_INTEL)
+static bool
+_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode)
+{
+	bool _apply = FALSE;
+	/* In case of IBSS mode, apply arp pkt filter */
+	if (op_mode & DHD_FLAG_IBSS_MODE) {
+		_apply = TRUE;
+		goto exit;
+	}
+	/* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
+	if ((dhd->arp_version == 1) &&
+		(op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
+		_apply = TRUE;
+		goto exit;
+	}
+
+exit:
+	return _apply;
+}
+#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER && !BOARD_INTEL */
+
+
+void dhd_set_packet_filter(dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+	int i;
+
+	DHD_TRACE(("%s: enter\n", __FUNCTION__));
+	if (dhd_pkt_filter_enable) {
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+			dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+		}
+	}
+#endif /* PKT_FILTER_SUPPORT */
+}
+
+void dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
+{
+#ifdef PKT_FILTER_SUPPORT
+	int i;
+
+	DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
+	/* 1 - Enable packet filter, only allow unicast packet to send up */
+	/* 0 - Disable packet filter */
+	if (dhd_pkt_filter_enable && (!value ||
+	    (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
+	    {
+		for (i = 0; i < dhd->pktfilter_count; i++) {
+#if !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) && !defined(BOARD_INTEL)
+			if (value && (i == DHD_ARP_FILTER_NUM) &&
+				!_turn_on_arp_filter(dhd, dhd->op_mode)) {
+				DHD_TRACE(("Do not turn on ARP white list pkt filter:"
+					"val %d, cnt %d, op_mode 0x%x\n",
+					value, i, dhd->op_mode));
+				continue;
+			}
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER && !BOARD_INTEL */
+			dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+				value, dhd_master_mode);
+		}
+	}
+#endif /* PKT_FILTER_SUPPORT */
+}
+
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+#ifndef SUPPORT_PM2_ONLY
+	int power_mode = PM_MAX;
+#endif /* SUPPORT_PM2_ONLY */
+	/* wl_pkt_filter_enable_t	enable_parm; */
+	char iovbuf[32];
+	int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+	uint roamvar = 1;
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+	uint nd_ra_filter = 0;
+	int ret = 0;
+
+#ifdef DYNAMIC_SWOOB_DURATION
+#ifndef CUSTOM_INTR_WIDTH
+#define CUSTOM_INTR_WIDTH 100
+#endif /* CUSTOM_INTR_WIDTH */
+	int intr_width = 0;
+#endif /* DYNAMIC_SWOOB_DURATION */
+	if (!dhd)
+		return -ENODEV;
+
+	DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+		__FUNCTION__, value, dhd->in_suspend));
+
+	dhd_suspend_lock(dhd);
+
+#ifdef CUSTOM_SET_CPUCORE
+	DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
+	/* set specific cpucore */
+	dhd_set_cpucore(dhd, TRUE);
+#endif /* CUSTOM_SET_CPUCORE */
+	if (dhd->up) {
+		if (value && dhd->in_suspend) {
+#ifdef PKT_FILTER_SUPPORT
+				dhd->early_suspended = 1;
+#endif
+				/* Kernel suspended */
+				DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
+
+#ifndef SUPPORT_PM2_ONLY
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				                 sizeof(power_mode), TRUE, 0);
+#endif /* SUPPORT_PM2_ONLY */
+
+				/* Enable packet filter, only allow unicast packet to send up */
+				dhd_enable_packet_filter(1, dhd);
+
+
+				/* If DTIM skip is set up as default, force it to wake
+				 * each third DTIM for better power savings.  Note that
+				 * one side effect is a chance to miss BC/MC packet.
+				 */
+				bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
+				bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+					4, iovbuf, sizeof(iovbuf));
+				if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf),
+					TRUE, 0) < 0)
+					DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
+
+#ifndef ENABLE_FW_ROAM_SUSPEND
+				/* Disable firmware roaming during suspend */
+				bcm_mkiovar("roam_off", (char *)&roamvar, 4,
+					iovbuf, sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+				if (FW_SUPPORTED(dhd, ndoe)) {
+					/* enable IPv6 RA filter in  firmware during suspend */
+					nd_ra_filter = 1;
+					bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
+						iovbuf, sizeof(iovbuf));
+					if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+						sizeof(iovbuf), TRUE, 0)) < 0)
+						DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+							ret));
+				}
+#ifdef DYNAMIC_SWOOB_DURATION
+				intr_width = CUSTOM_INTR_WIDTH;
+				bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
+					iovbuf, sizeof(iovbuf));
+				if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+					sizeof(iovbuf), TRUE, 0)) < 0)
+					DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+#endif /* DYNAMIC_SWOOB_DURATION */
+			} else {
+#ifdef PKT_FILTER_SUPPORT
+				dhd->early_suspended = 0;
+#endif
+				/* Kernel resumed  */
+				DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
+#ifdef DYNAMIC_SWOOB_DURATION
+				intr_width = 0;
+				bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
+					iovbuf, sizeof(iovbuf));
+				if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+					sizeof(iovbuf), TRUE, 0)) < 0)
+					DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+#endif /* DYNAMIC_SWOOB_DURATION */
+
+#ifndef SUPPORT_PM2_ONLY
+				power_mode = PM_FAST;
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+				                 sizeof(power_mode), TRUE, 0);
+#endif /* SUPPORT_PM2_ONLY */
+#ifdef PKT_FILTER_SUPPORT
+				/* disable pkt filter */
+				dhd_enable_packet_filter(0, dhd);
+#endif /* PKT_FILTER_SUPPORT */
+
+				/* restore pre-suspend setting for dtim_skip */
+				bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
+					4, iovbuf, sizeof(iovbuf));
+
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#ifndef ENABLE_FW_ROAM_SUSPEND
+				roamvar = dhd_roam_disable;
+				bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
+					sizeof(iovbuf));
+				dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+				if (FW_SUPPORTED(dhd, ndoe)) {
+					/* disable IPv6 RA filter in  firmware during suspend */
+					nd_ra_filter = 0;
+					bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
+						iovbuf, sizeof(iovbuf));
+					if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+						sizeof(iovbuf), TRUE, 0)) < 0)
+						DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+							ret));
+				}
+			}
+	}
+	dhd_suspend_unlock(dhd);
+
+	return 0;
+}
+
+static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
+{
+	dhd_pub_t *dhdp = &dhd->pub;
+	int ret = 0;
+
+	DHD_OS_WAKE_LOCK(dhdp);
+	/* Set flag when early suspend was called */
+	dhdp->in_suspend = val;
+	if ((force || !dhdp->suspend_disable_flag) &&
+		dhd_support_sta_mode(dhdp))
+	{
+		ret = dhd_set_suspend(val, dhdp);
+	}
+
+	DHD_OS_WAKE_UNLOCK(dhdp);
+	return ret;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+static void dhd_early_suspend(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 1, 0);
+}
+
+static void dhd_late_resume(struct early_suspend *h)
+{
+	struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+	DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
+
+	if (dhd)
+		dhd_suspend_resume_helper(dhd, 0, 0);
+}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+/*
+ * Generalized timeout mechanism.  Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay.  Usage:
+ *
+ *      dhd_timeout_start(&tmo, usec);
+ *      while (!dhd_timeout_expired(&tmo))
+ *              if (poll_something())
+ *                      break;
+ *      if (dhd_timeout_expired(&tmo))
+ *              fatal();
+ */
+
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+	tmo->limit = usec;
+	tmo->increment = 0;
+	tmo->elapsed = 0;
+	tmo->tick = jiffies_to_usecs(1);
+}
+
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+	/* Does nothing the first call */
+	if (tmo->increment == 0) {
+		tmo->increment = 1;
+		return 0;
+	}
+
+	if (tmo->elapsed >= tmo->limit)
+		return 1;
+
+	/* Add the delay that's about to take place */
+	tmo->elapsed += tmo->increment;
+
+	if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
+		OSL_DELAY(tmo->increment);
+		tmo->increment *= 2;
+		if (tmo->increment > tmo->tick)
+			tmo->increment = tmo->tick;
+	} else {
+		wait_queue_head_t delay_wait;
+		DECLARE_WAITQUEUE(wait, current);
+		init_waitqueue_head(&delay_wait);
+		add_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(1);
+		remove_wait_queue(&delay_wait, &wait);
+		set_current_state(TASK_RUNNING);
+	}
+
+	return 0;
+}
+
+int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+	int i = 0;
+
+	ASSERT(dhd);
+	while (i < DHD_MAX_IFS) {
+		if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
+			return i;
+		i++;
+	}
+
+	return DHD_BAD_IF;
+}
+
+struct net_device * dhd_idx2net(void *pub, int ifidx)
+{
+	struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
+	struct dhd_info *dhd_info;
+
+	if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+		return NULL;
+	dhd_info = dhd_pub->info;
+	if (dhd_info && dhd_info->iflist[ifidx])
+		return dhd_info->iflist[ifidx]->net;
+	return NULL;
+}
+
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+	int i = DHD_MAX_IFS;
+
+	ASSERT(dhd);
+
+	if (name == NULL || *name == '\0')
+		return 0;
+
+	while (--i > 0)
+		if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+				break;
+
+	DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
+
+	return i;	/* default - the primary interface */
+}
+
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	ASSERT(dhd);
+
+	if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+		DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+		return "<if_bad>";
+	}
+
+	if (dhd->iflist[ifidx] == NULL) {
+		DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+		return "<if_null>";
+	}
+
+	if (dhd->iflist[ifidx]->net)
+		return dhd->iflist[ifidx]->net->name;
+
+	return "<if_none>";
+}
+
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
+{
+	int i;
+	dhd_info_t *dhd = (dhd_info_t *)dhdp;
+
+	ASSERT(dhd);
+	for (i = 0; i < DHD_MAX_IFS; i++)
+	if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+		return dhd->iflist[i]->mac_addr;
+
+	return NULL;
+}
+
+
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+	struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+	struct netdev_hw_addr *ha;
+#else
+	struct dev_mc_list *mclist;
+#endif
+	uint32 allmulti, cnt;
+
+	wl_ioctl_t ioc;
+	char *buf, *bufp;
+	uint buflen;
+	int ret;
+
+			ASSERT(dhd && dhd->iflist[ifidx]);
+			dev = dhd->iflist[ifidx]->net;
+			if (!dev)
+				return;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+			cnt = netdev_mc_count(dev);
+#else
+			cnt = dev->mc_count;
+#endif /* LINUX_VERSION_CODE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_unlock_bh(dev);
+#endif
+
+			/* Determine initial value of allmulti flag */
+	allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+
+	/* Send down the multicast list first. */
+
+
+	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+	if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+		           dhd_ifname(&dhd->pub, ifidx), cnt));
+		return;
+	}
+
+	strncpy(bufp, "mcast_list", buflen - 1);
+	bufp[buflen - 1] = '\0';
+	bufp += strlen("mcast_list") + 1;
+
+	cnt = htol32(cnt);
+	memcpy(bufp, &cnt, sizeof(cnt));
+	bufp += sizeof(cnt);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_lock_bh(dev);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+			netdev_for_each_mc_addr(ha, dev) {
+				if (!cnt)
+					break;
+				memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+				bufp += ETHER_ADDR_LEN;
+				cnt--;
+	}
+#else
+	for (mclist = dev->mc_list; (mclist && (cnt > 0));
+		cnt--, mclist = mclist->next) {
+				memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+				bufp += ETHER_ADDR_LEN;
+			}
+#endif /* LINUX_VERSION_CODE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			netif_addr_unlock_bh(dev);
+#endif
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+			dhd_ifname(&dhd->pub, ifidx), cnt));
+		allmulti = cnt ? TRUE : allmulti;
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Now send the allmulti setting.  This is based on the setting in the
+	 * net_device flags, but might be modified above to be turned on if we
+	 * were trying to set some addresses and dongle rejected it...
+	 */
+
+	buflen = sizeof("allmulti") + sizeof(allmulti);
+	if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
+		DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
+		return;
+	}
+	allmulti = htol32(allmulti);
+
+	if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
+		DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+		           dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
+		MFREE(dhd->pub.osh, buf, buflen);
+		return;
+	}
+
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = buflen;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set allmulti %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+
+	MFREE(dhd->pub.osh, buf, buflen);
+
+	/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
+
+	allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+
+	allmulti = htol32(allmulti);
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_PROMISC;
+	ioc.buf = &allmulti;
+	ioc.len = sizeof(allmulti);
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set promisc %d failed\n",
+		           dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+	}
+}
+
+int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
+{
+	char buf[32];
+	wl_ioctl_t ioc;
+	int ret;
+
+	if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
+		DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
+		return -1;
+	}
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = 32;
+	ioc.set = TRUE;
+
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (ret < 0) {
+		DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
+	} else {
+		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+		if (ifidx == 0)
+			memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
+	}
+
+	return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
+#endif
+
+static void
+dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_event_t *if_event = event_info;
+	struct net_device *ndev;
+	int ifidx, bssidx;
+	int ret;
+
+	if (event != DHD_WQ_WORK_IF_ADD) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (!if_event) {
+		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+
+	ifidx = if_event->event.ifidx;
+	bssidx = if_event->event.bssidx;
+	DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
+
+	ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
+		if_event->mac, bssidx, TRUE);
+	if (!ndev) {
+		DHD_ERROR(("%s: net device alloc failed  \n", __FUNCTION__));
+		goto done;
+	}
+
+	ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
+	if (ret != BCME_OK) {
+		DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
+			dhd_remove_if(&dhd->pub, ifidx, TRUE);
+	}
+done:
+	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	int ifidx;
+	dhd_if_event_t *if_event = event_info;
+
+
+	if (event != DHD_WQ_WORK_IF_DEL) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+	if (!if_event) {
+		DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+		return;
+	}
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+
+	ifidx = if_event->event.ifidx;
+	DHD_TRACE(("Removing interface with idx %d\n", ifidx));
+
+	dhd_remove_if(&dhd->pub, ifidx, TRUE);
+
+	MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_t *ifp = event_info;
+
+#ifdef SOFTAP
+	unsigned long flags;
+	bool in_ap = FALSE;
+#endif
+
+	if (event != DHD_WQ_WORK_SET_MAC) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+#ifdef SOFTAP
+	flags = dhd_os_spin_lock(&dhd->pub);
+	in_ap = (ap_net_dev != NULL);
+	dhd_os_spin_unlock(&dhd->pub, flags);
+
+	if (in_ap)  {
+		DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
+			ifp->net->name));
+		return;
+	}
+#endif
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+
+	if (ifp == NULL || !dhd->pub.up) {
+		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+		goto done;
+	}
+
+	DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
+	ifp->set_macaddress = FALSE;
+	if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
+		DHD_INFO(("%s: MACID is overwritten\n",	__FUNCTION__));
+	else
+		DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+
+done:
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
+{
+	dhd_info_t *dhd = handle;
+	dhd_if_t *ifp = event_info;
+	int ifidx;
+
+#ifdef SOFTAP
+	bool in_ap = FALSE;
+	unsigned long flags;
+#endif
+
+	if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+		return;
+	}
+
+#ifdef SOFTAP
+	flags = dhd_os_spin_lock(&dhd->pub);
+	in_ap = (ap_net_dev != NULL);
+	dhd_os_spin_unlock(&dhd->pub, flags);
+
+	if (in_ap)  {
+		DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
+		ifp->net->name));
+		ifp->set_multicast = FALSE;
+		return;
+	}
+#endif
+
+	dhd_net_if_lock_local(dhd);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+
+	if (ifp == NULL || !dhd->pub.up) {
+		DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+		goto done;
+	}
+
+	ifidx = ifp->idx;
+
+
+	_dhd_set_multicast_list(dhd, ifidx);
+	DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
+
+done:
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
+{
+	int ret = 0;
+
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	struct sockaddr *sa = (struct sockaddr *)addr;
+	int ifidx;
+	dhd_if_t *dhdif;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return -1;
+
+	dhdif = dhd->iflist[ifidx];
+
+	dhd_net_if_lock_local(dhd);
+	memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
+	dhdif->set_macaddress = TRUE;
+	dhd_net_if_unlock_local(dhd);
+	dhd_deferred_schedule_work((void *)dhdif, DHD_WQ_WORK_SET_MAC,
+		dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
+	return ret;
+}
+
+static void
+dhd_set_multicast_list(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ifidx;
+
+	ifidx = dhd_net2idx(dhd, dev);
+	if (ifidx == DHD_BAD_IF)
+		return;
+
+	dhd->iflist[ifidx]->set_multicast = TRUE;
+	dhd_deferred_schedule_work((void *)dhd->iflist[ifidx], DHD_WQ_WORK_SET_MCAST_LIST,
+		dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
+}
+
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+	ASSERT(di != NULL);
+	spin_lock_bh(&di->wlfc_spinlock);
+	return 1;
+}
+
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t *di = (dhd_info_t *)(pub->info);
+
+	ASSERT(di != NULL);
+	spin_unlock_bh(&di->wlfc_spinlock);
+	return 1;
+}
+
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio)	wme_fifo2ac[prio2fifo[(prio)]]
+
+#endif /* PROP_TXSTATUS */
+int BCMFASTPATH
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+	int ret = BCME_OK;
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh = NULL;
+
+	/* Reject if down */
+	if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+		/* free the packet here since the caller won't */
+		PKTFREE(dhdp->osh, pktbuf, TRUE);
+		return -ENODEV;
+	}
+
+	/* Update multicast statistic */
+	if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+		eh = (struct ether_header *)pktdata;
+
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			dhdp->tx_multicast++;
+		if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+			atomic_inc(&dhd->pend_8021x_cnt);
+	} else {
+			PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+			return BCME_ERROR;
+	}
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* If this packet has replaced another packet and got freed, just return */
+	if (dhd_tcpack_suppress(dhdp, pktbuf))
+		return ret;
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Look into the packet and update the packet priority */
+#ifndef PKTPRIO_OVERRIDE
+	if (PKTPRIO(pktbuf) == 0)
+#endif 
+		pktsetprio(pktbuf, FALSE);
+
+#ifdef PROP_TXSTATUS
+	if (dhd_wlfc_is_supported(dhdp)) {
+		/* store the interface ID */
+		DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
+
+		/* store destination MAC in the tag as well */
+		DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
+
+		/* decide which FIFO this packet belongs to */
+		if (ETHER_ISMULTI(eh->ether_dhost))
+			/* one additional queue index (highest AC + 1) is used for bc/mc queue */
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+		else
+			DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+	} else
+#endif /* PROP_TXSTATUS */
+	/* If the protocol uses a data header, apply it */
+	dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+
+	/* Use bus module to send data frame */
+#ifdef WLMEDIA_HTSF
+	dhd_htsf_addtxts(dhdp, pktbuf);
+#endif
+#ifdef PROP_TXSTATUS
+	{
+		if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
+			dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
+			/* non-proptxstatus way */
+			ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+		}
+	}
+#else
+#ifdef BCMPCIE
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+	ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+#endif /* PROP_TXSTATUS */
+
+	return ret;
+}
+
+int BCMFASTPATH
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	int ret;
+	uint datalen;
+	void *pktbuf;
+	dhd_info_t *dhd  =  *(dhd_info_t **)netdev_priv(net);
+	dhd_if_t *ifp = NULL;
+	int ifidx;
+#ifdef WLMEDIA_HTSF
+	uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
+#else
+	uint8 htsfdlystat_sz = 0;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+
+	/* Reject if down */
+	if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent ||
+            shutdown_in_progress == TRUE) {
+		DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+			__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+		netif_stop_queue(net);
+		/* Send Event when bus down detected during data session */
+		if (dhd->pub.up && shutdown_in_progress != TRUE) {
+			DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+			net_os_send_hang_message(net);
+		}
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+		netif_stop_queue(net);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+		return -ENODEV;
+#else
+		return NETDEV_TX_BUSY;
+#endif
+	}
+
+	/* re-align socket buffer if "skb->data" is odd adress */
+	if (((unsigned long)(skb->data)) & 0x1) {
+		unsigned char *data = skb->data;
+		uint32 length = skb->len;
+		PKTPUSH(dhd->pub.osh, skb, 1);
+		memmove(skb->data, data, length);
+		PKTSETLEN(dhd->pub.osh, skb, length);
+	}
+
+	ifp = dhd->iflist[ifidx];
+	datalen  = PKTLEN(dhd->pub.osh, skb);
+
+	/* Make sure there's enough room for any header */
+
+	if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+		struct sk_buff *skb2;
+
+		DHD_INFO(("%s: insufficient headroom\n",
+		          dhd_ifname(&dhd->pub, ifidx)));
+		dhd->pub.tx_realloc++;
+
+		skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
+
+		dev_kfree_skb(skb);
+		if ((skb = skb2) == NULL) {
+			DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+			           dhd_ifname(&dhd->pub, ifidx)));
+			ret = -ENOMEM;
+			goto done;
+		}
+	}
+
+	/* Convert to packet */
+	if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+		DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+		           dhd_ifname(&dhd->pub, ifidx)));
+		dev_kfree_skb_any(skb);
+		ret = -ENOMEM;
+		goto done;
+	}
+#ifdef WLMEDIA_HTSF
+	if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
+		uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
+		struct ether_header *eh = (struct ether_header *)pktdata;
+
+		if (!ETHER_ISMULTI(eh->ether_dhost) &&
+			(ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
+			eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
+		}
+	}
+#endif
+
+	ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+
+done:
+	if (ret) {
+		ifp->stats.tx_dropped++;
+	}
+	else {
+		dhd->pub.tx_packets++;
+		ifp->stats.tx_packets++;
+		ifp->stats.tx_bytes += datalen;
+	}
+
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	/* Return ok: we always eat the packet */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+	return 0;
+#else
+	return NETDEV_TX_OK;
+#endif
+}
+
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+	struct net_device *net;
+	dhd_info_t *dhd = dhdp->info;
+	int i;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(dhd);
+
+	if (ifidx == ALL_INTERFACES) {
+		/* Flow control on all active interfaces */
+		dhdp->txoff = state;
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i]) {
+				net = dhd->iflist[i]->net;
+				if (state == ON)
+					netif_stop_queue(net);
+				else
+					netif_wake_queue(net);
+			}
+		}
+	}
+	else {
+		if (dhd->iflist[ifidx]) {
+			net = dhd->iflist[ifidx]->net;
+			if (state == ON)
+				netif_stop_queue(net);
+			else
+				netif_wake_queue(net);
+		}
+	}
+}
+
+#ifdef DHD_RX_DUMP
+typedef struct {
+	uint16 type;
+	const char *str;
+} PKTTYPE_INFO;
+
+static const PKTTYPE_INFO packet_type_info[] =
+{
+	{ ETHER_TYPE_IP, "IP" },
+	{ ETHER_TYPE_ARP, "ARP" },
+	{ ETHER_TYPE_BRCM, "BRCM" },
+	{ ETHER_TYPE_802_1X, "802.1X" },
+	{ ETHER_TYPE_WAI, "WAPI" },
+	{ 0, ""}
+};
+
+static const char *_get_packet_type_str(uint16 type)
+{
+	int i;
+	int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
+
+	for (i = 0; i < n; i++) {
+		if (packet_type_info[i].type == type)
+			return packet_type_info[i].str;
+	}
+
+	return packet_type_info[n].str;
+}
+#endif /* DHD_RX_DUMP */
+
+
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	uchar *eth;
+	uint len;
+	void *data, *pnext = NULL;
+	int i;
+	dhd_if_t *ifp;
+	wl_event_msg_t event;
+	int tout_rx = 0;
+	int tout_ctrl = 0;
+	void *skbhead = NULL;
+	void *skbprev = NULL;
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
+	char *dump_data;
+	uint16 protocol;
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+		struct ether_header *eh;
+
+		pnext = PKTNEXT(dhdp->osh, pktbuf);
+		PKTSETNEXT(dhdp->osh, pktbuf, NULL);
+
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL) {
+			DHD_ERROR(("%s: ifp is NULL. drop packet\n",
+				__FUNCTION__));
+			PKTFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+		eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+		/* Dropping only data packets before registering net device to avoid kernel panic */
+#ifndef PROP_TXSTATUS_VSDB
+		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
+			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
+#else
+		if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
+			(ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) {
+#endif /* PROP_TXSTATUS_VSDB */
+			DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
+			__FUNCTION__));
+			PKTFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+
+
+#ifdef PROP_TXSTATUS
+		if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
+			/* WLFC may send header only packet when
+			there is an urgent message but no packet to
+			piggy-back on
+			*/
+			PKTFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+		}
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+		dhd_tcpdata_info_get(dhdp, pktbuf);
+#endif
+		skb = PKTTONATIVE(dhdp->osh, pktbuf);
+
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL)
+			ifp = dhd->iflist[0];
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+
+
+		/* Get the protocol, maintain skb around eth_type_trans()
+		 * The main reason for this hack is for the limitation of
+		 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+		 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+		 * coping of the packet coming from the network stack to add
+		 * BDC, Hardware header etc, during network interface registration
+		 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+		 * for BDC, Hardware header etc. and not just the ETH_HLEN
+		 */
+		eth = skb->data;
+		len = skb->len;
+
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
+		dump_data = skb->data;
+		protocol = (dump_data[12] << 8) | dump_data[13];
+
+		if (protocol == ETHER_TYPE_802_1X) {
+			DHD_ERROR(("ETHER_TYPE_802_1X: "
+				"ver %d, type %d, replay %d\n",
+				dump_data[14], dump_data[15],
+				dump_data[30]));
+		}
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
+#if defined(DHD_RX_DUMP)
+		DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
+		if (protocol != ETHER_TYPE_BRCM) {
+			if (dump_data[0] == 0xFF) {
+				DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
+
+				if ((dump_data[12] == 8) &&
+					(dump_data[13] == 6)) {
+					DHD_ERROR(("%s: ARP %d\n",
+						__FUNCTION__, dump_data[0x15]));
+				}
+			} else if (dump_data[0] & 1) {
+				DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
+					__FUNCTION__, MAC2STRDBG(dump_data)));
+			}
+#ifdef DHD_RX_FULL_DUMP
+			{
+				int k;
+				for (k = 0; k < skb->len; k++) {
+					DHD_ERROR(("%02X ", dump_data[k]));
+					if ((k & 15) == 15)
+						DHD_ERROR(("\n"));
+				}
+				DHD_ERROR(("\n"));
+			}
+#endif /* DHD_RX_FULL_DUMP */
+		}
+#endif /* DHD_RX_DUMP */
+
+		skb->protocol = eth_type_trans(skb, skb->dev);
+
+		if (skb->pkt_type == PACKET_MULTICAST) {
+			dhd->pub.rx_multicast++;
+			ifp->stats.multicast++;
+		}
+
+		skb->data = eth;
+		skb->len = len;
+
+#ifdef WLMEDIA_HTSF
+		dhd_htsf_addrxts(dhdp, pktbuf);
+#endif
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		/* Process special event packets and then discard them */
+		memset(&event, 0, sizeof(event));
+		if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+			dhd_wl_host_event(dhd, &ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+			skb_mac_header(skb),
+#else
+			skb->mac.raw,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
+			&event,
+			&data);
+
+			wl_event_to_host_order(&event);
+			if (!tout_ctrl)
+				tout_ctrl = DHD_PACKET_TIMEOUT_MS;
+
+#if defined(PNO_SUPPORT)
+			if (event.event_type == WLC_E_PFN_NET_FOUND) {
+				/* enforce custom wake lock to garantee that Kernel not suspended */
+				tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
+			}
+#endif /* PNO_SUPPORT */
+
+#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+			PKTFREE(dhdp->osh, pktbuf, FALSE);
+			continue;
+#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
+		} else {
+			tout_rx = DHD_PACKET_TIMEOUT_MS;
+		}
+
+		ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+		ifp = dhd->iflist[ifidx];
+
+		if (ifp->net)
+			ifp->net->last_rx = jiffies;
+
+		if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
+		dhdp->dstats.rx_bytes += skb->len;
+		dhdp->rx_packets++; /* Local count */
+		ifp->stats.rx_bytes += skb->len;
+		ifp->stats.rx_packets++;
+		}
+
+		if (in_interrupt()) {
+			netif_rx(skb);
+		} else {
+			if (dhd->rxthread_enabled) {
+				if (!skbhead)
+					skbhead = skb;
+				else
+					PKTSETNEXT(dhdp->osh, skbprev, skb);
+				skbprev = skb;
+			} else {
+
+				/* If the receive is not processed inside an ISR,
+				 * the softirqd must be woken explicitly to service
+				 * the NET_RX_SOFTIRQ.	In 2.6 kernels, this is handled
+				 * by netif_rx_ni(), but in earlier kernels, we need
+				 * to do it manually.
+				 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+				netif_rx_ni(skb);
+#else
+				ulong flags;
+				netif_rx(skb);
+				local_irq_save(flags);
+				RAISE_RX_SOFTIRQ();
+				local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+			}
+		}
+	}
+
+	if (dhd->rxthread_enabled && skbhead)
+		dhd_sched_rxf(dhdp, skbhead);
+
+	DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
+	DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
+}
+
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
+{
+	/* Linux version has nothing to do */
+	return;
+}
+
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct ether_header *eh;
+	uint16 type;
+
+	dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
+
+	eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+	type  = ntoh16(eh->ether_type);
+
+	if (type == ETHER_TYPE_802_1X)
+		atomic_dec(&dhd->pend_8021x_cnt);
+
+}
+
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+	dhd_if_t *ifp;
+	int ifidx;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
+
+		memset(&net->stats, 0, sizeof(net->stats));
+		return &net->stats;
+	}
+
+	ifp = dhd->iflist[ifidx];
+	ASSERT(dhd && ifp);
+
+	if (dhd->pub.up) {
+		/* Use the protocol to get dongle stats */
+		dhd_prot_dstats(&dhd->pub);
+	}
+	return &ifp->stats;
+}
+
+static int
+dhd_watchdog_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_watchdog_prio > 0) {
+		struct sched_param param;
+		param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+			dhd_watchdog_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+	while (1)
+		if (down_interruptible (&tsk->sema) == 0) {
+			unsigned long flags;
+			unsigned long jiffies_at_start = jiffies;
+			unsigned long time_lapse;
+
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			dhd_os_sdlock(&dhd->pub);
+			if (dhd->pub.dongle_reset == FALSE) {
+				DHD_TIMER(("%s:\n", __FUNCTION__));
+
+				/* Call the bus module watchdog */
+				dhd_bus_watchdog(&dhd->pub);
+
+				flags = dhd_os_spin_lock(&dhd->pub);
+				/* Count the tick for reference */
+				dhd->pub.tickcnt++;
+				time_lapse = jiffies - jiffies_at_start;
+
+				/* Reschedule the watchdog */
+				if (dhd->wd_timer_valid)
+					mod_timer(&dhd->timer,
+					jiffies +
+					msecs_to_jiffies(dhd_watchdog_ms) -
+					min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
+				dhd_os_spin_unlock(&dhd->pub, flags);
+			}
+			dhd_os_sdunlock(&dhd->pub);
+		} else {
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static void dhd_watchdog(ulong data)
+{
+	dhd_info_t *dhd = (dhd_info_t *)data;
+	unsigned long flags;
+
+	if (dhd->pub.dongle_reset) {
+		return;
+	}
+
+	if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+		up(&dhd->thr_wdt_ctl.sema);
+		return;
+	}
+
+	dhd_os_sdlock(&dhd->pub);
+	/* Call the bus module watchdog */
+	dhd_bus_watchdog(&dhd->pub);
+
+	flags = dhd_os_spin_lock(&dhd->pub);
+	/* Count the tick for reference */
+	dhd->pub.tickcnt++;
+
+	/* Reschedule the watchdog */
+	if (dhd->wd_timer_valid)
+		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+	dhd_os_spin_unlock(&dhd->pub, flags);
+	dhd_os_sdunlock(&dhd->pub);
+}
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+/* DPC thread policy */
+static int dhd_dpc_poli = SCHED_FIFO;
+static void
+dhd_sched_policy(int prio)
+{
+	struct sched_param param;
+	if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
+		param.sched_priority = 0;
+		setScheduler(current, SCHED_NORMAL, &param);
+	} else {
+		if (get_scheduler_policy(current) != SCHED_FIFO) {
+		param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
+			setScheduler(current, dhd_dpc_poli, &param);
+		}
+	}
+}
+#endif /* ENABLE_ADAPTIVE_SCHED */
+#ifdef DEBUG_CPU_FREQ
+static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+	dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
+	struct cpufreq_freqs *freq = data;
+	if (dhd) {
+		if (!dhd->new_freq)
+			goto exit;
+		if (val == CPUFREQ_POSTCHANGE) {
+			DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
+				freq->new, freq->cpu));
+			*per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
+		}
+	}
+exit:
+	return 0;
+}
+#endif /* DEBUG_CPU_FREQ */
+static int
+dhd_dpc_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_dpc_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+#ifdef CUSTOM_DPC_CPUCORE
+	set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->pub.current_dpc = current;
+#endif /* CUSTOM_SET_CPUCORE */
+
+	/* Run until signal received */
+	while (1) {
+		if (!binary_sema_down(tsk)) {
+#ifdef ENABLE_ADAPTIVE_SCHED
+			dhd_sched_policy(dhd_dpc_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+			SMP_RD_BARRIER_DEPENDS();
+			if (tsk->terminated) {
+				break;
+			}
+
+			/* Call bus dpc unless it indicated down (then clean stop) */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+				dhd_os_wd_timer_extend(&dhd->pub, TRUE);
+				while (dhd_bus_dpc(dhd->pub.bus)) {
+					/* process all data */
+				}
+				dhd_os_wd_timer_extend(&dhd->pub, FALSE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+			} else {
+				if (dhd->pub.up)
+					dhd_bus_stop(dhd->pub.bus, TRUE);
+				DHD_OS_WAKE_UNLOCK(&dhd->pub);
+			}
+		}
+		else
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static int
+dhd_rxf_thread(void *data)
+{
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+	dhd_pub_t *pub = &dhd->pub;
+#if defined(WAIT_DEQUEUE)
+#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) /  */
+	ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
+#endif
+
+	/* This thread doesn't need any user-level access,
+	 * so get rid of all our resources
+	 */
+	if (dhd_rxf_prio > 0)
+	{
+		struct sched_param param;
+		param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
+		setScheduler(current, SCHED_FIFO, &param);
+	}
+
+	DAEMONIZE("dhd_rxf");
+	/* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below  */
+
+	/*  signal: thread has started */
+	complete(&tsk->completed);
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->pub.current_rxf = current;
+#endif /* CUSTOM_SET_CPUCORE */
+
+	/* Run until signal received */
+	while (1) {
+		if (down_interruptible(&tsk->sema) == 0) {
+			void *skb;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+			ulong flags;
+#endif
+#ifdef ENABLE_ADAPTIVE_SCHED
+			dhd_sched_policy(dhd_rxf_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
+			SMP_RD_BARRIER_DEPENDS();
+
+			if (tsk->terminated) {
+				break;
+			}
+			skb = dhd_rxf_dequeue(pub);
+
+			if (skb == NULL) {
+				continue;
+			}
+			while (skb) {
+				void *skbnext = PKTNEXT(pub->osh, skb);
+				PKTSETNEXT(pub->osh, skb, NULL);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+				netif_rx_ni(skb);
+#else
+				netif_rx(skb);
+				local_irq_save(flags);
+				RAISE_RX_SOFTIRQ();
+				local_irq_restore(flags);
+
+#endif
+				skb = skbnext;
+			}
+#if defined(WAIT_DEQUEUE)
+			if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
+				OSL_SLEEP(1);
+				watchdogTime = OSL_SYSUPTIME();
+			}
+#endif
+
+			DHD_OS_WAKE_UNLOCK(pub);
+		}
+		else
+			break;
+	}
+
+	complete_and_exit(&tsk->completed, 0);
+}
+
+static void
+dhd_dpc(ulong data)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)data;
+
+	/* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+	 * down below , wake lock is set,
+	 * the tasklet is initialized in dhd_attach()
+	 */
+	/* Call bus dpc unless it indicated down (then clean stop) */
+	if ((dhd->pub.busstate != DHD_BUS_DOWN) && (shutdown_in_progress != TRUE)) {
+		if (dhd_bus_dpc(dhd->pub.bus))
+			tasklet_schedule(&dhd->tasklet);
+		else
+			DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	} else {
+		dhd_bus_stop(dhd->pub.bus, TRUE);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	}
+}
+
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+	DHD_OS_WAKE_LOCK(dhdp);
+	if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+		/* If the semaphore does not get up,
+		* wake unlock should be done here
+		*/
+		if (!binary_sema_up(&dhd->thr_dpc_ctl))
+			DHD_OS_WAKE_UNLOCK(dhdp);
+		return;
+	} else {
+		tasklet_schedule(&dhd->tasklet);
+	}
+}
+
+static void
+dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef RXF_DEQUEUE_ON_BUSY
+	int ret = BCME_OK;
+	int retry = 2;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+
+	DHD_OS_WAKE_LOCK(dhdp);
+
+	DHD_TRACE(("dhd_sched_rxf: Enter\n"));
+#ifdef RXF_DEQUEUE_ON_BUSY
+	do {
+		ret = dhd_rxf_enqueue(dhdp, skb);
+		if (ret == BCME_OK || ret == BCME_ERROR)
+			break;
+		else
+			OSL_SLEEP(50); /* waiting for dequeueing */
+	} while (retry-- > 0);
+
+	if (retry <= 0 && ret == BCME_BUSY) {
+		void *skbp = skb;
+
+		while (skbp) {
+			void *skbnext = PKTNEXT(dhdp->osh, skbp);
+			PKTSETNEXT(dhdp->osh, skbp, NULL);
+			netif_rx_ni(skbp);
+			skbp = skbnext;
+		}
+		DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
+	}
+	else {
+		if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+			up(&dhd->thr_rxf_ctl.sema);
+		}
+	}
+#else /* RXF_DEQUEUE_ON_BUSY */
+	do {
+		if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
+			break;
+	} while (1);
+	if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+		up(&dhd->thr_rxf_ctl.sema);
+	}
+	return;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+}
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = FALSE;
+
+	strncpy(buf, "toe_ol", sizeof(buf) - 1);
+	buf[sizeof(buf) - 1] = '\0';
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		/* Check for older dongle image that doesn't support toe_ol */
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: toe not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+
+		DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	memcpy(toe_ol, buf, sizeof(uint32));
+	return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int toe, ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = WLC_SET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = TRUE;
+
+	/* Set toe_ol as requested */
+
+	strncpy(buf, "toe_ol", sizeof(buf) - 1);
+	buf[sizeof(buf) - 1] = '\0';
+	memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
+
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+			dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	/* Enable toe globally only if any components are enabled. */
+
+	toe = (toe_ol != 0);
+
+	strcpy(buf, "toe");
+	memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
+
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
+		return ret;
+	}
+
+	return 0;
+}
+#endif /* TOE */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+
+	snprintf(info->driver, sizeof(info->driver), "wl");
+	snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
+}
+
+struct ethtool_ops dhd_ethtool_ops = {
+	.get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+{
+	struct ethtool_drvinfo info;
+	char drvname[sizeof(info.driver)];
+	uint32 cmd;
+#ifdef TOE
+	struct ethtool_value edata;
+	uint32 toe_cmpnt, csum_dir;
+	int ret;
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* all ethtool calls start with a cmd word */
+	if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+		return -EFAULT;
+
+	switch (cmd) {
+	case ETHTOOL_GDRVINFO:
+		/* Copy out any request driver name */
+		if (copy_from_user(&info, uaddr, sizeof(info)))
+			return -EFAULT;
+		strncpy(drvname, info.driver, sizeof(info.driver));
+		drvname[sizeof(info.driver)-1] = '\0';
+
+		/* clear struct for return */
+		memset(&info, 0, sizeof(info));
+		info.cmd = cmd;
+
+		/* if dhd requested, identify ourselves */
+		if (strcmp(drvname, "?dhd") == 0) {
+			snprintf(info.driver, sizeof(info.driver), "dhd");
+			strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
+			info.version[sizeof(info.version) - 1] = '\0';
+		}
+
+		/* otherwise, require dongle to be up */
+		else if (!dhd->pub.up) {
+			DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+			return -ENODEV;
+		}
+
+		/* finally, report dongle driver type */
+		else if (dhd->pub.iswl)
+			snprintf(info.driver, sizeof(info.driver), "wl");
+		else
+			snprintf(info.driver, sizeof(info.driver), "xx");
+
+		snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
+		if (copy_to_user(uaddr, &info, sizeof(info)))
+			return -EFAULT;
+		DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+		         (int)sizeof(drvname), drvname, info.driver));
+		break;
+
+#ifdef TOE
+	/* Get toe offload components from dongle */
+	case ETHTOOL_GRXCSUM:
+	case ETHTOOL_GTXCSUM:
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		edata.cmd = cmd;
+		edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+		if (copy_to_user(uaddr, &edata, sizeof(edata)))
+			return -EFAULT;
+		break;
+
+	/* Set toe offload components in dongle */
+	case ETHTOOL_SRXCSUM:
+	case ETHTOOL_STXCSUM:
+		if (copy_from_user(&edata, uaddr, sizeof(edata)))
+			return -EFAULT;
+
+		/* Read the current settings, update and write back */
+		if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+			return ret;
+
+		csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+		if (edata.data != 0)
+			toe_cmpnt |= csum_dir;
+		else
+			toe_cmpnt &= ~csum_dir;
+
+		if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+			return ret;
+
+		/* If setting TX checksum mode, tell Linux the new mode */
+		if (cmd == ETHTOOL_STXCSUM) {
+			if (edata.data)
+				dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+			else
+				dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+		}
+
+		break;
+#endif /* TOE */
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
+{
+	dhd_info_t *dhd;
+
+	if (!dhdp) {
+		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+		return FALSE;
+	}
+
+	if (!dhdp->up)
+		return FALSE;
+
+	dhd = (dhd_info_t *)dhdp->info;
+#if !defined(BCMPCIE)
+	if (dhd->thr_dpc_ctl.thr_pid < 0) {
+		DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
+		return FALSE;
+	}
+#endif 
+
+	if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
+		((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
+		DHD_ERROR(("%s: Event HANG send up due to  re=%d te=%d e=%d s=%d\n", __FUNCTION__,
+			dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+		net_os_send_hang_message(net);
+		return TRUE;
+	}
+	return FALSE;
+}
+
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
+{
+	int bcmerror = BCME_OK;
+	int buflen = 0;
+	struct net_device *net;
+
+	net = dhd_idx2net(pub, ifidx);
+	if (!net) {
+		bcmerror = BCME_BADARG;
+		goto done;
+	}
+
+	if (data_buf)
+		buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
+
+	/* check for local dhd ioctl and handle it */
+	if (ioc->driver == DHD_IOCTL_MAGIC) {
+		bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
+		if (bcmerror)
+			pub->bcmerror = bcmerror;
+		goto done;
+	}
+
+
+	/* send to dongle (must be up, and wl). */
+	if (pub->busstate != DHD_BUS_DATA) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	if (!pub->iswl) {
+		bcmerror = BCME_DONGLE_DOWN;
+		goto done;
+	}
+
+	/*
+	 * Flush the TX queue if required for proper message serialization:
+	 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+	 * prevent M4 encryption and
+	 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+	 * prevent disassoc frame being sent before WPS-DONE frame.
+	 */
+	if (ioc->cmd == WLC_SET_KEY ||
+	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+	     strncmp("wsec_key", data_buf, 9) == 0) ||
+	    (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+	     strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
+	    ioc->cmd == WLC_DISASSOC)
+		dhd_wait_pend8021x(net);
+
+	/*
+	 * Intel BZ-83276 : Intercept WLC_SET_VAR country for on-the-fly
+	 * country code update.
+	 */
+	if (ioc->cmd == WLC_SET_VAR && ioc->buf != NULL &&
+	     strncmp("country", ioc->buf, 8) == 0) {
+		/*  update cfg80211 before updating fw */
+		wldev_set_country(net, ioc->buf + 8, true, true);
+	}
+
+#ifdef WLMEDIA_HTSF
+	if (data_buf) {
+		/*  short cut wl ioctl calls here  */
+		if (strcmp("htsf", data_buf) == 0) {
+			dhd_ioctl_htsf_get(dhd, 0);
+			return BCME_OK;
+		}
+
+		if (strcmp("htsflate", data_buf) == 0) {
+			if (ioc->set) {
+				memset(ts, 0, sizeof(tstamp_t)*TSMAX);
+				memset(&maxdelayts, 0, sizeof(tstamp_t));
+				maxdelay = 0;
+				tspktcnt = 0;
+				maxdelaypktno = 0;
+				memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+				memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			} else {
+				dhd_dump_latency();
+			}
+			return BCME_OK;
+		}
+		if (strcmp("htsfclear", data_buf) == 0) {
+			memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+			memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+			htsf_seqnum = 0;
+			return BCME_OK;
+		}
+		if (strcmp("htsfhis", data_buf) == 0) {
+			dhd_dump_htsfhisto(&vi_d1, "H to D");
+			dhd_dump_htsfhisto(&vi_d2, "D to D");
+			dhd_dump_htsfhisto(&vi_d3, "D to H");
+			dhd_dump_htsfhisto(&vi_d4, "H to H");
+			return BCME_OK;
+		}
+		if (strcmp("tsport", data_buf) == 0) {
+			if (ioc->set) {
+				memcpy(&tsport, data_buf + 7, 4);
+			} else {
+				DHD_ERROR(("current timestamp port: %d \n", tsport));
+			}
+			return BCME_OK;
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+
+	if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
+		data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
+#ifdef BCM_FD_AGGR
+		bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+#else
+		bcmerror = BCME_UNSUPPORTED;
+#endif
+		goto done;
+	}
+	bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+
+done:
+	dhd_check_hang(net, pub, bcmerror);
+
+	return bcmerror;
+}
+
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+	dhd_ioctl_t ioc;
+	int bcmerror = 0;
+	int ifidx;
+	int ret;
+	void *local_buf = NULL;
+	u16 buflen = 0;
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+
+	/* send to dongle only if we are not waiting for reload already */
+	if (dhd->pub.hang_was_sent) {
+		DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return OSL_ERROR(BCME_DONGLE_DOWN);
+	}
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
+
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -1;
+	}
+
+#if defined(WL_WIRELESS_EXT)
+	/* linux wireless extensions */
+	if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+		/* may recurse, do NOT lock */
+		ret = wl_iw_ioctl(net, ifr, cmd);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+	if (cmd == SIOCETHTOOL) {
+		ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
+
+	if (cmd == SIOCDEVPRIVATE+1) {
+		ret = wl_android_priv_cmd(net, ifr, cmd);
+		dhd_check_hang(net, &dhd->pub, ret);
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return ret;
+	}
+
+	if (cmd != SIOCDEVPRIVATE) {
+		DHD_OS_WAKE_UNLOCK(&dhd->pub);
+		return -EOPNOTSUPP;
+	}
+
+	memset(&ioc, 0, sizeof(ioc));
+
+#ifdef CONFIG_COMPAT
+	if (is_compat_task()) {
+		compat_wl_ioctl_t compat_ioc;
+		if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+		ioc.cmd = compat_ioc.cmd;
+		ioc.buf = compat_ptr(compat_ioc.buf);
+		ioc.len = compat_ioc.len;
+		ioc.set = compat_ioc.set;
+		ioc.used = compat_ioc.used;
+		ioc.needed = compat_ioc.needed;
+		/* To differentiate between wl and dhd read 4 more byes */
+		if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
+			sizeof(uint)) != 0)) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		/* Copy the ioc control structure part of ioctl request */
+		if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+
+		/* To differentiate between wl and dhd read 4 more byes */
+		if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+			sizeof(uint)) != 0)) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+	}
+
+	if (!capable(CAP_NET_ADMIN)) {
+		bcmerror = BCME_EPERM;
+		goto done;
+	}
+
+	if (ioc.len > 0) {
+		buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+		if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+		if (copy_from_user(local_buf, ioc.buf, buflen)) {
+			bcmerror = BCME_BADADDR;
+			goto done;
+		}
+		*(char *)(local_buf + buflen) = '\0';
+	}
+
+	bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
+
+	if (!bcmerror && buflen && local_buf && ioc.buf) {
+		if (copy_to_user(ioc.buf, local_buf, buflen))
+			bcmerror = -EFAULT;
+	}
+
+done:
+	if (local_buf)
+		MFREE(dhd->pub.osh, local_buf, buflen+1);
+
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	return OSL_ERROR(bcmerror);
+}
+
+
+
+static int
+dhd_stop(struct net_device *net)
+{
+	int ifidx = 0;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
+	if (dhd->pub.up == 0) {
+		goto exit;
+	}
+
+
+	ifidx = dhd_net2idx(dhd, net);
+	BCM_REFERENCE(ifidx);
+
+	/* Set state and stop OS transmissions */
+	netif_stop_queue(net);
+	dhd->pub.up = 0;
+
+#ifdef ENABLE_CONTROL_SCHED
+	dhd_sysfs_destroy_node(net);
+	dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
+	dhd_dpc_poli = SCHED_FIFO;
+#endif	/* ENABLE_CONTROL_SCHED */
+
+#ifdef WL_CFG80211
+	if (ifidx == 0) {
+		wl_cfg80211_down(NULL);
+
+		/*
+		 * For CFG80211: Clean up all the left over virtual interfaces
+		 * when the primary Interface is brought down. [ifconfig wlan0 down]
+		 */
+		if (!dhd_download_fw_on_driverload) {
+			if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
+				(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+				int i;
+
+				dhd_net_if_lock_local(dhd);
+				for (i = 1; i < DHD_MAX_IFS; i++)
+					dhd_remove_if(&dhd->pub, i, FALSE);
+				dhd_net_if_unlock_local(dhd);
+			}
+		}
+	}
+#endif /* WL_CFG80211 */
+
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
+#endif
+	/* Stop the protocol module */
+	dhd_prot_stop(&dhd->pub);
+
+	OLD_MOD_DEC_USE_COUNT;
+exit:
+#if defined(WL_CFG80211)
+	if (ifidx == 0 && !dhd_download_fw_on_driverload)
+		wl_android_wifi_off(net);
+#endif 
+	dhd->pub.rxcnt_timeout = 0;
+	dhd->pub.txcnt_timeout = 0;
+
+	dhd->pub.hang_was_sent = 0;
+
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+	return 0;
+}
+
+#if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
+	defined(USE_INITIAL_SHORT_DWELL_TIME))
+extern bool g_first_broadcast_scan;
+#endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
+
+#ifdef WL11U
+static int dhd_interworking_enable(dhd_pub_t *dhd)
+{
+	char iovbuf[WLC_IOCTL_SMLEN];
+	uint32 enable = true;
+	int ret = BCME_OK;
+
+	bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
+	}
+
+	if (ret == BCME_OK) {
+		/* basic capabilities for HS20 REL2 */
+		uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
+		bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+			iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
+		}
+	}
+
+	return ret;
+}
+#endif /* WL11u */
+
+#ifdef ENABLE_CONTROL_SCHED
+static ssize_t
+read_dpc_prio(struct file *filp, struct kobject *kobj,
+	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+	DHD_INFO(("%s: read cur_prio(%d), cur_poli(%d)\n",
+		__FUNCTION__, dhd_dpc_prio, dhd_dpc_poli));
+	memset(buf, 0, sizeof(uint32));
+	count += snprintf(buf, sizeof(uint32), "%d", dhd_dpc_prio + (dhd_dpc_poli * 100));
+	return count;
+}
+
+static ssize_t
+write_dpc_prio(struct file *filp, struct kobject *kobj,
+	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t size)
+{
+	uint32 new_prio = 0;
+	uint32 cur_prio = dhd_dpc_prio + (dhd_dpc_poli * 100);
+
+	memcpy(&new_prio, buf, sizeof(uint32));
+	new_prio = bcm_atoi((char*)&new_prio);
+
+	if (new_prio < 0 || new_prio > 199)	{
+		DHD_ERROR(("%s: ERROR invalid new_prio(%d)\n",
+			__FUNCTION__, new_prio));
+		return -EINVAL;
+	}
+
+	if (cur_prio != new_prio) {
+		dhd_dpc_prio = new_prio % 100;
+		dhd_dpc_poli = new_prio / 100;
+		DHD_INFO(("%s: write new_prio(%d), new_policy(%d)\n",
+			__FUNCTION__, dhd_dpc_prio, dhd_dpc_poli));
+	}
+
+	return sizeof(uint32);
+}
+
+static struct bin_attribute dpc_prio_attr = {
+	.attr  = {.name = "dpc_prio", .mode = 0644},
+	.size  = sizeof(uint32),
+	.read  = read_dpc_prio,
+	.write = write_dpc_prio,
+};
+
+static int
+dhd_sysfs_create_node(struct net_device *net)
+{
+	int ret;
+
+	DHD_INFO(("%s Enter\n", __func__));
+	ret = sysfs_create_bin_file(&net->dev.kobj, &dpc_prio_attr);
+
+	if (ret) {
+		DHD_ERROR(("%s: create dpc_prio sysfs fail, ret(%d)\n", __FUNCTION__, ret));
+		return ret;
+	}
+	return 0;
+}
+
+static void
+dhd_sysfs_destroy_node(struct net_device *net)
+{
+	DHD_INFO(("%s Enter\n", __FUNCTION__));
+	sysfs_remove_bin_file(&net->dev.kobj, &dpc_prio_attr);
+}
+#endif /* ENABLE_CONTROL_SCHED */
+
+
+static int
+dhd_open(struct net_device *net)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+#ifdef TOE
+	uint32 toe_ol;
+#endif
+	int ifidx;
+	int32 ret = 0;
+
+	/* Wait the end of module insertion doing a power down */
+	dhd_wifi_platform_load_lock();
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	dhd->pub.dongle_trap_occured = 0;
+	dhd->pub.hang_was_sent = 0;
+
+#ifdef ENABLE_CONTROL_SCHED
+	ret = dhd_sysfs_create_node(net);
+	if (ret) {
+		DHD_ERROR(("%s: Failed to setup dpc_prio ret(%d)\n", __FUNCTION__, ret));
+		goto exit;
+	}
+#endif /* ENABLE_CONTROL_SCHED */
+
+#if !defined(WL_CFG80211)
+	/*
+	 * Force start if ifconfig_up gets called before START command
+	 *  We keep WEXT's wl_control_wl_start to provide backward compatibility
+	 *  This should be removed in the future
+	 */
+	ret = wl_control_wl_start(net);
+	if (ret != 0) {
+		DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+		ret = -1;
+		goto exit;
+	}
+
+#endif 
+
+	ifidx = dhd_net2idx(dhd, net);
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	if (ifidx < 0) {
+		DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+		ret = -1;
+		goto exit;
+	}
+
+	if (!dhd->iflist[ifidx]) {
+		DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+		ret = -1;
+		goto exit;
+	}
+
+	if (ifidx == 0) {
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+#if defined(WL_CFG80211)
+		if (!dhd_download_fw_on_driverload) {
+			DHD_ERROR(("\n%s\n", dhd_version));
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+			g_first_broadcast_scan = TRUE;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+			ret = wl_android_wifi_on(net);
+			if (ret != 0) {
+				DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
+					__FUNCTION__, ret));
+				ret = -1;
+				goto exit;
+			}
+		}
+#endif 
+
+		if (dhd->pub.busstate != DHD_BUS_DATA) {
+
+			/* try to bring up bus */
+			if ((ret = dhd_bus_start(&dhd->pub)) != 0) {
+				DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+				ret = -1;
+				goto exit;
+			}
+
+		}
+
+		/* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */
+		memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+
+#ifdef TOE
+		/* Get current TOE mode from dongle */
+		if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+			dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+		else
+			dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+#endif /* TOE */
+
+#if defined(WL_CFG80211)
+		if (unlikely(wl_cfg80211_up(NULL))) {
+			DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+			ret = -1;
+			goto exit;
+		}
+#endif /* WL_CFG80211 */
+	}
+
+	/* Allow transmit calls */
+	netif_start_queue(net);
+	dhd->pub.up = 1;
+
+#ifdef BCMDBGFS
+	dhd_dbg_init(&dhd->pub);
+#endif
+
+	OLD_MOD_INC_USE_COUNT;
+exit:
+	if (ret)
+		dhd_stop(net);
+
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	dhd_wifi_platform_load_unlock();
+
+	return ret;
+}
+
+int dhd_do_driver_init(struct net_device *net)
+{
+	dhd_info_t *dhd = NULL;
+
+	if (!net) {
+		DHD_ERROR(("Primary Interface not initialized \n"));
+		return -EINVAL;
+	}
+
+
+	/*  && defined(OEM_ANDROID) && defined(BCMSDIO) */
+	dhd = *(dhd_info_t **)netdev_priv(net);
+
+	/* If driver is already initialized, do nothing
+	 */
+	if (dhd->pub.busstate == DHD_BUS_DATA) {
+		DHD_TRACE(("Driver already Inititalized. Nothing to do"));
+		return 0;
+	}
+
+	if (dhd_open(net) < 0) {
+		DHD_ERROR(("Driver Init Failed \n"));
+		return -1;
+	}
+
+	return 0;
+}
+
+int
+dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+
+#ifdef WL_CFG80211
+	if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+		return BCME_OK;
+#endif
+
+	/* handle IF event caused by wl commands, SoftAP, WEXT and
+	 * anything else. This has to be done asynchronously otherwise
+	 * DPC will be blocked (and iovars will timeout as DPC has no chance
+	 * to read the response back)
+	 */
+	if (ifevent->ifidx > 0) {
+		dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+
+		memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+		memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+		strncpy(if_event->name, name, IFNAMSIZ);
+		if_event->name[IFNAMSIZ - 1] = '\0';
+		dhd_deferred_schedule_work((void *)if_event, DHD_WQ_WORK_IF_ADD,
+			dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
+	}
+
+	return BCME_OK;
+}
+
+int
+dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+	dhd_if_event_t *if_event;
+
+#ifdef WL_CFG80211
+	if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+		return BCME_OK;
+#endif /* WL_CFG80211 */
+
+	/* handle IF event caused by wl commands, SoftAP, WEXT and
+	 * anything else
+	 */
+	if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+	memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+	memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+	strncpy(if_event->name, name, IFNAMSIZ);
+	if_event->name[IFNAMSIZ - 1] = '\0';
+	dhd_deferred_schedule_work((void *)if_event, DHD_WQ_WORK_IF_DEL,
+		dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
+
+	return BCME_OK;
+}
+
+/* unregister and free the existing net_device interface (if any) in iflist and
+ * allocate a new one. the slot is reused. this function does NOT register the
+ * new interface to linux kernel. dhd_register_if does the job
+ */
+struct net_device*
+dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+	dhd_if_t *ifp;
+
+	ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
+	ifp = dhdinfo->iflist[ifidx];
+
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
+
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+				free_netdev(ifp->net);
+			} else {
+				netif_stop_queue(ifp->net);
+				if (need_rtnl_lock)
+					unregister_netdev(ifp->net);
+				else
+					unregister_netdevice(ifp->net);
+			}
+			ifp->net = NULL;
+		}
+	} else {
+		ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
+		if (ifp == NULL) {
+			DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
+			return NULL;
+		}
+	}
+
+	memset(ifp, 0, sizeof(dhd_if_t));
+	ifp->info = dhdinfo;
+	ifp->idx = ifidx;
+	ifp->bssidx = bssidx;
+	if (mac != NULL)
+		memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
+
+	/* Allocate etherdev, including space for private structure */
+	ifp->net = alloc_etherdev(sizeof(dhdinfo));
+	if (ifp->net == NULL) {
+		DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
+		goto fail;
+	}
+	memcpy(netdev_priv(ifp->net), &dhdinfo, sizeof(dhdinfo));
+	if (name && name[0]) {
+		strncpy(ifp->net->name, name, IFNAMSIZ);
+		ifp->net->name[IFNAMSIZ - 1] = '\0';
+	}
+#ifdef WL_CFG80211
+	if (ifidx == 0)
+		ifp->net->destructor = free_netdev;
+	else
+		ifp->net->destructor = dhd_netdev_free;
+#else
+	ifp->net->destructor = free_netdev;
+#endif /* WL_CFG80211 */
+	strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
+	ifp->name[IFNAMSIZ - 1] = '\0';
+	dhdinfo->iflist[ifidx] = ifp;
+	return ifp->net;
+
+fail:
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			free_netdev(ifp->net);
+			ifp->net = NULL;
+		}
+		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+		ifp = NULL;
+	}
+	dhdinfo->iflist[ifidx] = NULL;
+	return NULL;
+}
+
+/* unregister and free the the net_device interface associated with the indexed
+ * slot, also free the slot memory and set the slot pointer to NULL
+ */
+int
+dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+	dhd_if_t *ifp;
+
+	ifp = dhdinfo->iflist[ifidx];
+	if (ifp != NULL) {
+		if (ifp->net != NULL) {
+			DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
+
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+				free_netdev(ifp->net);
+			} else {
+				netif_stop_queue(ifp->net);
+
+				if (need_rtnl_lock)
+					unregister_netdev(ifp->net);
+				else
+					unregister_netdevice(ifp->net);
+			}
+			ifp->net = NULL;
+		}
+
+		dhdinfo->iflist[ifidx] = NULL;
+		MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+
+	}
+
+	return BCME_OK;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+	.ndo_open = dhd_open,
+	.ndo_stop = dhd_stop,
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+
+static struct net_device_ops dhd_ops_virt = {
+	.ndo_get_stats = dhd_get_stats,
+	.ndo_do_ioctl = dhd_ioctl_entry,
+	.ndo_start_xmit = dhd_start_xmit,
+	.ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
+
+#ifdef DEBUGGER
+extern void debugger_init(void *bus_handle);
+#endif
+
+
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+{
+	dhd_info_t *dhd = NULL;
+	struct net_device *net = NULL;
+	char if_name[IFNAMSIZ] = {'\0'};
+	uint32 bus_type = -1;
+	uint32 bus_num = -1;
+	uint32 slot_num = -1;
+	wifi_adapter_info_t *adapter = NULL;
+
+	dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* will implement get_ids for DBUS later */
+	dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
+	adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+
+	/* Allocate primary dhd_info */
+	dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
+	if (dhd == NULL) {
+		dhd = MALLOC(osh, sizeof(dhd_info_t));
+		if (dhd == NULL) {
+			DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+			goto fail;
+		}
+	}
+	memset(dhd, 0, sizeof(dhd_info_t));
+	dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
+
+	dhd->pub.osh = osh;
+	dhd->adapter = adapter;
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
+#endif /* GET_CUSTOM_MAC_ENABLE */
+	dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+	dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+
+	/* Initialize thread based operation and lock */
+	sema_init(&dhd->sdsem, 1);
+
+	/* Link to info module */
+	dhd->pub.info = dhd;
+	/* Link to bus module */
+	dhd->pub.bus = bus;
+	dhd->pub.hdrlen = bus_hdrlen;
+
+	/* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
+	 * This is indeed a hack but we have to make it work properly before we have a better
+	 * solution
+	 */
+	dhd_update_fw_nv_path(dhd);
+
+	/* Set network interface name if it was provided as module parameter */
+	if (iface_name[0]) {
+		int len;
+		char ch;
+		strncpy(if_name, iface_name, IFNAMSIZ);
+		if_name[IFNAMSIZ - 1] = 0;
+		len = strlen(if_name);
+		ch = if_name[len - 1];
+		if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+			strcat(if_name, "%d");
+	}
+	net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
+	if (net == NULL)
+		goto fail;
+	dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+
+	sema_init(&dhd->proto_sem, 1);
+
+#ifdef PROP_TXSTATUS
+	spin_lock_init(&dhd->wlfc_spinlock);
+
+	dhd->pub.skip_fc = dhd_wlfc_skip_fc;
+	dhd->pub.plat_init = dhd_wlfc_plat_init;
+	dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
+#endif /* PROP_TXSTATUS */
+
+	/* Initialize other structure content */
+	init_waitqueue_head(&dhd->ioctl_resp_wait);
+	init_waitqueue_head(&dhd->ctrl_wait);
+
+	/* Initialize the spinlocks */
+	spin_lock_init(&dhd->sdlock);
+	spin_lock_init(&dhd->txqlock);
+	spin_lock_init(&dhd->dhd_lock);
+	spin_lock_init(&dhd->rxf_lock);
+#if defined(RXFRAME_THREAD)
+	dhd->rxthread_enabled = TRUE;
+#endif /* defined(RXFRAME_THREAD) */
+
+#ifdef DHDTCPACK_SUPPRESS
+	spin_lock_init(&dhd->tcpack_lock);
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Initialize Wakelock stuff */
+	spin_lock_init(&dhd->wakelock_spinlock);
+	dhd->wakelock_counter = 0;
+	dhd->wakelock_wd_counter = 0;
+	dhd->wakelock_rx_timeout_enable = 0;
+	dhd->wakelock_ctrl_timeout_enable = 0;
+	dhd->waive_wakelock = FALSE;
+#ifdef CONFIG_HAS_WAKELOCK
+	wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+	wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+	wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
+	wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
+#endif /* CONFIG_HAS_WAKELOCK */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	mutex_init(&dhd->dhd_net_if_mutex);
+	mutex_init(&dhd->dhd_suspend_mutex);
+#endif
+	dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+
+	/* Attach and link in the protocol */
+	if (dhd_prot_attach(&dhd->pub) != 0) {
+		DHD_ERROR(("dhd_prot_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+
+#ifdef WL_CFG80211
+	/* Attach and link in the cfg80211 */
+	if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+		DHD_ERROR(("wl_cfg80211_attach failed\n"));
+		goto fail;
+	}
+
+	dhd_monitor_init(&dhd->pub);
+	dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+#if defined(WL_WIRELESS_EXT)
+	/* Attach and link in the iw */
+	if (!(dhd_state &  DHD_ATTACH_STATE_CFG80211)) {
+		if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+		DHD_ERROR(("wl_iw_attach failed\n"));
+		goto fail;
+	}
+	dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+
+	/* Set up the watchdog timer */
+	init_timer(&dhd->timer);
+	dhd->timer.data = (ulong)dhd;
+	dhd->timer.function = dhd_watchdog;
+	dhd->default_wd_interval = dhd_watchdog_ms;
+
+	if (dhd_watchdog_prio >= 0) {
+		/* Initialize watchdog thread */
+		PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
+
+	} else {
+		dhd->thr_wdt_ctl.thr_pid = -1;
+	}
+
+#ifdef DEBUGGER
+	debugger_init((void *) bus);
+#endif
+
+	/* Set up the bottom half handler */
+	if (dhd_dpc_prio >= 0) {
+		/* Initialize DPC thread */
+		PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
+	} else {
+		/*  use tasklet for dpc */
+		tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+		dhd->thr_dpc_ctl.thr_pid = -1;
+	}
+
+	if (dhd->rxthread_enabled) {
+		bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
+		/* Initialize RXF thread */
+		PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
+	}
+
+	dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+
+	/*
+	 * Save the dhd_info into the priv
+	 */
+	memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+
+#if defined(CONFIG_PM_SLEEP)
+	dhd->pm_notifier.notifier_call = dhd_pm_callback;
+	dhd->pm_notifier.priority = 10;
+	if (!dhd_pm_notifier_registered) {
+		dhd_pm_notifier_registered = TRUE;
+		register_pm_notifier(&dhd->pm_notifier);
+	}
+#endif /* CONFIG_PM_SLEEP */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+	dhd->early_suspend.suspend = dhd_early_suspend;
+	dhd->early_suspend.resume = dhd_late_resume;
+	register_early_suspend(&dhd->early_suspend);
+	dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	dhd->pend_ipaddr = 0;
+	if (!dhd_inetaddr_notifier_registered) {
+		dhd_inetaddr_notifier_registered = TRUE;
+		register_inetaddr_notifier(&dhd_inetaddr_notifier);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+	if (!dhd_inet6addr_notifier_registered) {
+		dhd_inet6addr_notifier_registered = TRUE;
+		register_inet6addr_notifier(&dhd_inet6addr_notifier);
+	}
+	dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
+#ifdef DEBUG_CPU_FREQ
+	dhd->new_freq = alloc_percpu(int);
+	dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
+	cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
+#endif /* DHDTCPACK_SUPPRESS */
+
+	dhd_state |= DHD_ATTACH_STATE_DONE;
+	dhd->dhd_state = dhd_state;
+
+	dhd->unit = dhd_found + instance_base;
+	dhd_found++;
+	return &dhd->pub;
+
+fail:
+	if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
+		DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+			__FUNCTION__, dhd_state, &dhd->pub));
+		dhd->dhd_state = dhd_state;
+		dhd_detach(&dhd->pub);
+		dhd_free(&dhd->pub);
+	}
+
+	return NULL;
+}
+
+int dhd_get_fw_mode(dhd_info_t *dhdinfo)
+{
+	if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
+		return DHD_FLAG_HOSTAP_MODE;
+	if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
+		return DHD_FLAG_P2P_MODE;
+	if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
+		return DHD_FLAG_IBSS_MODE;
+	if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
+		return DHD_FLAG_MFG_MODE;
+
+	return DHD_FLAG_STA_MODE;
+}
+
+bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
+{
+	int fw_len;
+	int nv_len;
+	const char *fw = NULL;
+	const char *nv = NULL;
+	bool bconcat = FALSE;
+	wifi_adapter_info_t *adapter = dhdinfo->adapter;
+
+
+	/* Update firmware and nvram path. The path may be from adapter info or module parameter
+	 * The path from adapter info is used for initialization only (as it won't change).
+	 *
+	 * The firmware_path/nvram_path module parameter may be changed by the system at run
+	 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
+	 * command may change dhdinfo->fw_path. As such we need to clear the path info in
+	 * module parameter after it is copied. We won't update the path until the module parameter
+	 * is changed again (first character is not '\0')
+	 */
+
+	/* set default firmware and nvram path for built-in type driver */
+	if (!dhd_download_fw_on_driverload) {
+#ifdef CONFIG_BCMDHD_FW_PATH
+		fw = CONFIG_BCMDHD_FW_PATH;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+		nv = CONFIG_BCMDHD_NVRAM_PATH;
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+	}
+
+	/* check if we need to initialize the path */
+	if (dhdinfo->fw_path[0] == '\0') {
+		if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
+			fw = adapter->fw_path;
+
+	}
+	if (dhdinfo->nv_path[0] == '\0') {
+		if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
+			nv = adapter->nv_path;
+	}
+
+	/* Use module parameter if it is valid, EVEN IF the path has not been initialized
+	 *
+	 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
+	 */
+	if (firmware_path[0] != '\0')
+		fw = firmware_path;
+	if (nvram_path[0] != '\0')
+		nv = nvram_path;
+
+	if (fw && fw[0] != '\0') {
+		fw_len = strlen(fw);
+		if (fw_len >= sizeof(dhdinfo->fw_path)) {
+			DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
+		/* updates firmware path for backward compatibility */
+		if (fw_len && dhdinfo->fw_path[fw_len -1] == '/') {
+			snprintf(dhdinfo->fw_path, sizeof(dhdinfo->fw_path), "%sfw_bcmdhd.bin", dhdinfo->fw_path);
+			dhdinfo->fw_path[sizeof(dhdinfo->fw_path) - 1] = '\0';
+		}
+		bconcat = TRUE;
+	}
+	if (nv && nv[0] != '\0') {
+		nv_len = strlen(nv);
+		if (nv_len >= sizeof(dhdinfo->nv_path)) {
+			DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
+			return FALSE;
+		}
+		strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
+		/* updates nvram path for backward compatibility */
+		if (nv_len && dhdinfo->nv_path[nv_len -1] == '/') {
+			snprintf(dhdinfo->nv_path, sizeof(dhdinfo->nv_path), "%sbcmdhd_aob.cal", dhdinfo->nv_path);
+			dhdinfo->nv_path[sizeof(dhdinfo->nv_path) - 1] = '\0';
+		}
+		bconcat = TRUE;
+	}
+
+#if defined(SUPPORT_MULTIPLE_REVISION)
+	if (bconcat == TRUE) {
+		if(concate_revision(dhdinfo->pub.bus, dhdinfo->fw_path, sizeof(dhdinfo->fw_path),
+				dhdinfo->nv_path, sizeof(dhdinfo->nv_path)))
+			DHD_ERROR(("%s: fail to concatnate revison \n",__FUNCTION__));
+	}
+#endif
+
+	/* clear the path in module parameter */
+	firmware_path[0] = '\0';
+	nvram_path[0] = '\0';
+
+	if (dhdinfo->fw_path[0] == '\0') {
+		DHD_ERROR(("firmware path not found\n"));
+		return FALSE;
+	}
+	if (dhdinfo->nv_path[0] == '\0') {
+		DHD_ERROR(("nvram path not found\n"));
+		return FALSE;
+	}
+
+	return TRUE;
+}
+
+
+int
+dhd_bus_start(dhd_pub_t *dhdp)
+{
+	int ret = -1;
+	dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+	unsigned long flags;
+
+	ASSERT(dhd);
+
+	DHD_TRACE(("Enter %s:\n", __FUNCTION__));
+
+	if (shutdown_in_progress) {
+		DHD_ERROR(("%s: dhd shutdown in progress!\n", __FUNCTION__));
+		return ret;
+	}
+
+	/* try to download image and nvram to the dongle */
+	if  (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
+		DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
+		ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, dhd->fw_path, dhd->nv_path);
+
+		if (ret < 0) {
+			DHD_ERROR(("%s: failed to download firmware %s\n",
+			          __FUNCTION__, dhd->fw_path));
+			return ret;
+		}
+	}
+	if (dhd->pub.busstate != DHD_BUS_LOAD) {
+		return -ENETDOWN;
+	}
+
+	dhd_os_sdlock(dhdp);
+
+	/* Start the watchdog timer */
+	dhd->pub.tickcnt = 0;
+	dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+
+	/* Bring up the bus */
+	if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+
+		DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+		dhd_os_sdunlock(dhdp);
+		return ret;
+	}
+#if defined(OOB_INTR_ONLY)
+	/* Host registration for OOB interrupt */
+	if (dhd_bus_oob_intr_register(dhdp)) {
+		/* deactivate timer and wait for the handler to finish */
+
+		flags = dhd_os_spin_lock(&dhd->pub);
+		dhd->wd_timer_valid = FALSE;
+		dhd_os_spin_unlock(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+
+		DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+		return -ENODEV;
+	}
+
+	/* Enable oob at firmware */
+	dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif 
+
+	/* If bus is not ready, can't come up */
+	if (dhd->pub.busstate != DHD_BUS_DATA) {
+		flags = dhd_os_spin_lock(&dhd->pub);
+		dhd->wd_timer_valid = FALSE;
+		dhd_os_spin_unlock(&dhd->pub, flags);
+		del_timer_sync(&dhd->timer);
+		DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+		dhd_os_sdunlock(dhdp);
+		DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+		return -ENODEV;
+	}
+
+	dhd_os_sdunlock(dhdp);
+
+	dhd_process_cid_mac(dhdp, TRUE);
+
+	/* Bus is ready, do any protocol initialization */
+	if ((ret = dhd_prot_init(&dhd->pub)) < 0)
+		return ret;
+
+	dhd_process_cid_mac(dhdp, FALSE);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd->pend_ipaddr) {
+#ifdef AOE_IP_ALIAS_SUPPORT
+		aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+		dhd->pend_ipaddr = 0;
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+	return 0;
+}
+#ifdef WLTDLS
+int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+	char iovbuf[WLC_IOCTL_SMLEN];
+	uint32 tdls = tdls_on;
+	int ret = 0;
+	uint32 tdls_auto_op = 0;
+	uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
+	int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
+	int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
+	BCM_REFERENCE(mac);
+	if (!FW_SUPPORTED(dhd, tdls))
+		return BCME_ERROR;
+
+	if (dhd->tdls_enable == tdls_on)
+		goto auto_mode;
+	bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
+		goto exit;
+	}
+	dhd->tdls_enable = tdls_on;
+auto_mode:
+
+	tdls_auto_op = auto_on;
+	bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
+		iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
+		goto exit;
+	}
+
+	if (tdls_auto_op) {
+		bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
+			sizeof(tdls_idle_time),	iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+		bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+		bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
+			goto exit;
+		}
+	}
+
+exit:
+	return ret;
+}
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+	if (dhd)
+		ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
+	else
+		ret = BCME_ERROR;
+	return ret;
+}
+#endif 
+
+bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
+{
+	if (!dhd)
+		return FALSE;
+
+	if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
+		return TRUE;
+	else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
+		DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
+		return TRUE;
+	else
+		return FALSE;
+}
+#if !defined(AP) && defined(WLP2P)
+/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
+ * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
+ * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
+ * would still be named as fw_bcmdhd_apsta.
+ */
+uint32
+dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
+{
+	int32 ret = 0;
+	char buf[WLC_IOCTL_SMLEN];
+	bool mchan_supported = FALSE;
+	/* if dhd->op_mode is already set for HOSTAP and Manufacturing
+	 * test mode, that means we only will use the mode as it is
+	 */
+	if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
+		return 0;
+	if (FW_SUPPORTED(dhd, vsdb)) {
+		mchan_supported = TRUE;
+	}
+	if (!FW_SUPPORTED(dhd, p2p)) {
+		DHD_TRACE(("Chip does not support p2p\n"));
+		return 0;
+	}
+	else {
+		/* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+			FALSE, 0)) < 0) {
+			DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
+			return 0;
+		}
+		else {
+			if (buf[0] == 1) {
+				/* By default, chip supports single chan concurrency,
+				* now lets check for mchan
+				*/
+				ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
+				if (mchan_supported)
+					ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+				/* For customer_hw4, although ICS,
+				* we still support concurrent mode
+				*/
+				return ret;
+#else
+				return 0;
+#endif 
+			}
+		}
+	}
+	return 0;
+}
+#endif 
+
+
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
+{
+	int ret = 0;
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/*  Room for "event_msgs" + '\0' + bitvec  */
+	uint32 buf_key_b4_m4 = 1;
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
+	uint32 ampdu_ba_wsize = 0;
+#endif 
+#if defined(CUSTOM_AMPDU_MPDU)
+	uint32 ampdu_mpdu = 0;
+#endif
+
+#ifdef PROP_TXSTATUS
+	int wlfc_enable = TRUE;
+#ifndef DISABLE_11N
+	uint32 hostreorder = 1;
+	int ret2 = 0;
+#endif /* DISABLE_11N */
+#endif /* PROP_TXSTATUS */
+
+#ifdef DHD_ENABLE_LPC
+	uint32 lpc = 1;
+#endif /* DHD_ENABLE_LPC */
+	uint power_mode = PM_FAST;
+	uint32 dongle_align = DHD_SDALIGN;
+	uint32 glom = CUSTOM_GLOM_SETTING;
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+	uint32 credall = 1;
+#endif
+#if defined(VSDB) || defined(ROAM_ENABLE)
+	uint bcn_timeout = 8;
+#else
+	uint bcn_timeout = 4;
+#endif 
+	uint retry_max = 3;
+#if defined(ARP_OFFLOAD_SUPPORT)
+	int arpoe = 1;
+#endif
+	int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
+	int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
+	int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
+	char buf[WLC_IOCTL_SMLEN];
+	char *ptr;
+	uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#ifdef ROAM_ENABLE
+	uint roamvar = 0;
+	int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
+	int roam_scan_period[2] = {10, WLC_BAND_ALL};
+	int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
+#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
+	int roam_fullscan_period = 60;
+#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+	int roam_fullscan_period = 120;
+#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+#else
+#ifdef DISABLE_BUILTIN_ROAM
+	uint roamvar = 1;
+#endif /* DISABLE_BUILTIN_ROAM */
+#endif /* ROAM_ENABLE */
+
+#if defined(SOFTAP)
+	uint dtim = 1;
+#endif
+#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
+	uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
+	struct ether_addr p2p_ea;
+#endif
+
+#if defined(AP) || defined(WLP2P)
+	uint32 apsta = 1; /* Enable APSTA mode */
+#endif /* defined(AP) || defined(WLP2P) */
+#ifdef GET_CUSTOM_MAC_ENABLE
+	struct ether_addr ea_addr;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+#ifdef DISABLE_11N
+	uint32 nmode = 0;
+#endif /* DISABLE_11N */
+
+#ifdef USE_WL_TXBF
+	uint32 txbf = 1;
+#endif /* USE_WL_TXBF */
+#ifdef USE_WL_FRAMEBURST
+	uint32 frameburst = 1;
+#endif /* USE_WL_FRAMEBURST */
+#ifdef CUSTOM_PSPRETEND_THR
+	uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+	dhd_pkt_filter_enable = TRUE;
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef WLTDLS
+	dhd->tdls_enable = FALSE;
+#endif /* WLTDLS */
+	dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
+	DHD_TRACE(("Enter %s\n", __FUNCTION__));
+	dhd->op_mode = 0;
+	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+		(op_mode == DHD_FLAG_MFG_MODE)) {
+		/* Check and adjust IOCTL response timeout for Manufactring firmware */
+		dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
+		DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
+			__FUNCTION__));
+	}
+	else {
+		dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+		DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
+	}
+#ifdef GET_CUSTOM_MAC_ENABLE
+	ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
+	if (!ret) {
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+			return BCME_NOTUP;
+		}
+		memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
+	} else {
+#endif /* GET_CUSTOM_MAC_ENABLE */
+		/* Get the default device MAC address directly from firmware */
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+			FALSE, 0)) < 0) {
+			DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+			return BCME_NOTUP;
+		}
+		/* Update public MAC address after reading from Firmware */
+		memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+	}
+#endif /* GET_CUSTOM_MAC_ENABLE */
+	/* get a capabilities from firmware */
+	memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
+	bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
+		sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+			__FUNCTION__, ret));
+		return 0;
+	}
+	if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
+		(op_mode == DHD_FLAG_HOSTAP_MODE)) {
+#ifdef SET_RANDOM_MAC_SOFTAP
+		uint rand_mac;
+#endif
+		dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+			dhd_pkt_filter_enable = FALSE;
+#endif
+#ifdef SET_RANDOM_MAC_SOFTAP
+		SRANDOM32((uint)jiffies);
+		rand_mac = RANDOM32();
+		iovbuf[0] = 0x02;			   /* locally administered bit */
+		iovbuf[1] = 0x1A;
+		iovbuf[2] = 0x11;
+		iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+		iovbuf[4] = (unsigned char)(rand_mac >> 8);
+		iovbuf[5] = (unsigned char)(rand_mac >> 16);
+
+		bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret < 0) {
+			DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+		} else
+			memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+#endif /* SET_RANDOM_MAC_SOFTAP */
+#if !defined(AP) && defined(WL_CFG80211)
+		/* Turn off MPC in AP mode */
+		bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s mpc for HostAPD failed  %d\n", __FUNCTION__, ret));
+		}
+#endif
+	} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+		(op_mode == DHD_FLAG_MFG_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+		arpoe = 0;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PKT_FILTER_SUPPORT
+		dhd_pkt_filter_enable = FALSE;
+#endif /* PKT_FILTER_SUPPORT */
+		dhd->op_mode = DHD_FLAG_MFG_MODE;
+	} else {
+		uint32 concurrent_mode = 0;
+		if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
+			(op_mode == DHD_FLAG_P2P_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+			dhd_pkt_filter_enable = FALSE;
+#endif
+			dhd->op_mode = DHD_FLAG_P2P_MODE;
+		} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
+			(op_mode == DHD_FLAG_IBSS_MODE)) {
+			dhd->op_mode = DHD_FLAG_IBSS_MODE;
+		} else
+			dhd->op_mode = DHD_FLAG_STA_MODE;
+#if !defined(AP) && defined(WLP2P)
+		if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
+			(concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+			arpoe = 1;
+#endif
+			dhd->op_mode |= concurrent_mode;
+		}
+
+		/* Check if we are enabling p2p */
+		if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+			bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+				iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+				DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
+			}
+
+			memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
+			ETHER_SET_LOCALADDR(&p2p_ea);
+			bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
+				ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf));
+			if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+				iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+				DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
+			} else {
+				DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
+			}
+		}
+#else
+	(void)concurrent_mode;
+#endif 
+	}
+
+	DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
+		dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
+	/* Set Country code  */
+	if (dhd->dhd_cspec.ccode[0] != 0) {
+		bcm_mkiovar("country", (char *)&dhd->dhd_cspec,
+			sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+			DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+	}
+
+
+	/* Set Listen Interval */
+	bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+	/* Disable built-in roaming to allowed ext supplicant to take care of roaming */
+	bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+#if defined(ROAM_ENABLE)
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
+		sizeof(roam_trigger), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
+		sizeof(roam_scan_period), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
+	if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
+		sizeof(roam_delta), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
+	bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
+#endif /* ROAM_ENABLE */
+
+#ifdef WLTDLS
+	/* by default TDLS on and auto mode off */
+	_dhd_tdls_enable(dhd, true, false, NULL);
+#endif /* WLTDLS */
+
+#ifdef DHD_ENABLE_LPC
+	/* Set lpc 1 */
+	bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set lpc failed  %d\n", __FUNCTION__, ret));
+#if defined(CUSTOM_PLATFORM_NV_TEGRA)
+		if (ret == BCME_NOTDOWN) {
+			uint wl_down = 1;
+			ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+				(char *)&wl_down, sizeof(wl_down), TRUE, 0);
+			DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
+
+			bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
+			ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
+		}
+#endif 
+		/* XXX Dongle state is sometimes not down state.
+		 * So, if ioctl error is BCME_NOTDOWN, make the dongle state down
+		 * using WLC_DOWN ioctl command and try again.
+		*/
+		if (ret == BCME_NOTDOWN) {
+			uint wl_down = 1;
+			ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+				(char *)&wl_down, sizeof(wl_down), TRUE, 0);
+			DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
+
+			bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
+			ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
+		}
+	}
+#endif /* DHD_ENABLE_LPC */
+
+	/* Set PowerSave mode */
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+
+	/* Match Host and Dongle rx alignment */
+	bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+	/* enable credall to reduce the chance of no bus credit happened. */
+	bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif
+
+	if (glom != DEFAULT_GLOM_VALUE) {
+		DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
+		bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+		dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	}
+
+	/* Setup timeout if Beacons are lost and roam is off to report link down */
+	bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	/* Setup assoc_retry_max count to reconnect target AP in dongle */
+	bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#if defined(AP) && !defined(WLP2P)
+	/* Turn off MPC in AP mode */
+	bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+	bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* defined(AP) && !defined(WLP2P) */
+
+
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == TRUE) {
+		dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+	}
+#endif 
+
+#if defined(KEEP_ALIVE)
+	{
+	/* Set Keep Alive : be sure to use FW with -keepalive */
+	int res;
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded == FALSE)
+#endif 
+		if (!(dhd->op_mode &
+			(DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+			if ((res = dhd_keep_alive_onoff(dhd)) < 0)
+				DHD_ERROR(("%s set keeplive failed %d\n",
+				__FUNCTION__, res));
+		}
+	}
+#endif /* defined(KEEP_ALIVE) */
+#ifdef USE_WL_TXBF
+	bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set txbf failed  %d\n", __FUNCTION__, ret));
+	}
+#endif /* USE_WL_TXBF */
+#ifdef USE_WL_FRAMEBURST
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+	/* Disable Framebursting for SofAP */
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		frameburst = 0;
+	}
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+	/* Set frameburst to value */
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
+		sizeof(frameburst), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set frameburst failed  %d\n", __FUNCTION__, ret));
+	}
+#endif /* USE_WL_FRAMEBURST */
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
+	/* Set ampdu ba wsize to 64 or 16 */
+#ifdef CUSTOM_AMPDU_BA_WSIZE
+	ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
+#endif
+	if (ampdu_ba_wsize != 0) {
+		bcm_mkiovar("ampdu_ba_wsize", (char *)&ampdu_ba_wsize, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed  %d\n",
+				__FUNCTION__, ampdu_ba_wsize, ret));
+		}
+	}
+#endif 
+#if defined(CUSTOM_AMPDU_MPDU)
+	ampdu_mpdu = CUSTOM_AMPDU_MPDU;
+	if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
+		bcm_mkiovar("ampdu_mpdu", (char *)&ampdu_mpdu, 4, iovbuf, sizeof(iovbuf));
+		if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+			sizeof(iovbuf), TRUE, 0)) < 0) {
+			DHD_ERROR(("%s Set ampdu_mpdu to %d failed  %d\n",
+				__FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
+		}
+	}
+#endif /* CUSTOM_AMPDU_MPDU */
+
+#ifdef CUSTOM_PSPRETEND_THR
+	/* Turn off MPC in AP mode */
+	bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
+		iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s pspretend_threshold for HostAPD failed  %d\n",
+			__FUNCTION__, ret));
+	}
+#endif
+
+	bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+		sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
+	}
+
+	/* Read event_msgs mask */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
+		DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+
+	/* Setup event_msgs */
+	setbit(eventmask, WLC_E_SET_SSID);
+	setbit(eventmask, WLC_E_PRUNE);
+	setbit(eventmask, WLC_E_AUTH);
+	setbit(eventmask, WLC_E_ASSOC);
+	setbit(eventmask, WLC_E_REASSOC);
+	setbit(eventmask, WLC_E_REASSOC_IND);
+	setbit(eventmask, WLC_E_DEAUTH);
+	setbit(eventmask, WLC_E_DEAUTH_IND);
+	setbit(eventmask, WLC_E_DISASSOC_IND);
+	setbit(eventmask, WLC_E_DISASSOC);
+	setbit(eventmask, WLC_E_JOIN);
+	setbit(eventmask, WLC_E_START);
+	setbit(eventmask, WLC_E_ASSOC_IND);
+	setbit(eventmask, WLC_E_PSK_SUP);
+	setbit(eventmask, WLC_E_LINK);
+	setbit(eventmask, WLC_E_NDIS_LINK);
+	setbit(eventmask, WLC_E_MIC_ERROR);
+	setbit(eventmask, WLC_E_ASSOC_REQ_IE);
+	setbit(eventmask, WLC_E_ASSOC_RESP_IE);
+#ifndef WL_CFG80211
+	setbit(eventmask, WLC_E_PMKID_CACHE);
+	setbit(eventmask, WLC_E_TXFAIL);
+#endif
+	setbit(eventmask, WLC_E_JOIN_START);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+#ifdef WLMEDIA_HTSF
+	setbit(eventmask, WLC_E_HTSFSYNC);
+#endif /* WLMEDIA_HTSF */
+#ifdef PNO_SUPPORT
+	setbit(eventmask, WLC_E_PFN_NET_FOUND);
+	setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
+	setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
+	setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
+#endif /* PNO_SUPPORT */
+	/* enable dongle roaming event */
+	setbit(eventmask, WLC_E_ROAM);
+	setbit(eventmask, WLC_E_BSSID);
+#ifdef WLTDLS
+	setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
+#endif /* WLTDLS */
+#ifdef WL_CFG80211
+	setbit(eventmask, WLC_E_ESCAN_RESULT);
+	if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+		setbit(eventmask, WLC_E_ACTION_FRAME_RX);
+		setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+	}
+#endif /* WL_CFG80211 */
+	setbit(eventmask, WLC_E_TRACE);
+
+	/* Write updated Event mask */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
+		goto done;
+	}
+
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+		sizeof(scan_assoc_time), TRUE, 0);
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+		sizeof(scan_unassoc_time), TRUE, 0);
+	dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
+		sizeof(scan_passive_time), TRUE, 0);
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	/* Set and enable ARP offload feature for STA only  */
+#if defined(SOFTAP)
+	if (arpoe && !ap_fw_loaded) {
+#else
+	if (arpoe) {
+#endif 
+		dhd_arp_offload_enable(dhd, TRUE);
+		dhd_arp_offload_set(dhd, dhd_arp_mode);
+	} else {
+		dhd_arp_offload_enable(dhd, FALSE);
+		dhd_arp_offload_set(dhd, 0);
+	}
+	dhd_arp_enable = arpoe;
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef PKT_FILTER_SUPPORT
+#ifndef BOARD_INTEL
+	/* Setup default defintions for pktfilter , enable in suspend */
+	dhd->pktfilter_count = 6;
+	/* Setup filter to allow only unicast */
+	dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
+	dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
+	dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
+	dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+	/* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
+	dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
+	/* apply APP pktfilter */
+	dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
+#else
+	/* Setup drop defintions for pktfilter , enable in suspend */
+	dhd->pktfilter_count = 9;
+	dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL;
+	/* discard all broadcast packets */
+	dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+	/* discard all IPv4 multicast packets */
+	dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E";
+	/* discard all IPv6 multicast packets */
+	dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333";
+
+	dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
+	dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL;
+
+	/* discard IPv4 broadcast address XXX.XXX.XXX.255 */
+	dhd->pktfilter[DHD_IPV4_BC_255_FILTER_NUM] = "106 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF";
+	/* discard IPv4 multicast address 224.0.0.0/4 */
+	dhd->pktfilter[DHD_IPV4_MC_224_FILTER_NUM] = "107 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0";
+	/* discard IPv6 multicast address FF00::/8 */
+	dhd->pktfilter[DHD_IPV6_MC_FF00_FILTER_NUM] = "108 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF";
+#endif /* !BOARD_INTEL */
+
+#if defined(SOFTAP)
+	if (ap_fw_loaded) {
+		dhd_enable_packet_filter(0, dhd);
+	}
+#endif /* defined(SOFTAP) */
+	dhd_set_packet_filter(dhd);
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef DISABLE_11N
+	bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
+	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+		DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
+#endif /* DISABLE_11N */
+
+
+	/* query for 'ver' to get version info from firmware */
+	memset(buf, 0, sizeof(buf));
+	ptr = buf;
+	bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf));
+	if ((ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0)
+		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+	else {
+		bcmstrtok(&ptr, "\n", 0);
+		/* Print fw version info */
+		DHD_ERROR(("Firmware version = %s\n", buf));
+		dhd_set_version_info(dhd, buf);
+		strncpy(fwversion, buf, sizeof(fwversion));
+		fwversion[sizeof(fwversion)-1] = '\0';
+
+		DHD_BLOG(buf, strlen(buf) + 1);
+		DHD_BLOG(dhd_version, strlen(dhd_version) + 1);
+	}
+
+	dhd_txglom_enable(dhd, TRUE);
+
+#ifdef PROP_TXSTATUS
+	if (disable_proptx ||
+#ifdef PROP_TXSTATUS_VSDB
+		/* enable WLFC only if the firmware is VSDB when it is in STA mode */
+		(dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+		 dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
+#endif /* PROP_TXSTATUS_VSDB */
+		FALSE) {
+		wlfc_enable = FALSE;
+	}
+
+#ifndef DISABLE_11N
+	bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
+	if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+		DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
+		if (ret2 != BCME_UNSUPPORTED)
+			ret = ret2;
+		/* XXX Dongle state is sometimes not down state.
+		* So, if ioctl error is BCME_NOTDOWN, make the dongle state down
+		* using WLC_DOWN ioctl command and try again.
+		*/
+		if (ret == BCME_NOTDOWN) {
+			uint wl_down = 1;
+				ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
+					sizeof(wl_down), TRUE, 0);
+			DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
+				__FUNCTION__, ret2, hostreorder));
+
+			bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
+				iovbuf, sizeof(iovbuf));
+			ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+			DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
+			if (ret2 != BCME_UNSUPPORTED)
+			ret = ret2;
+		}
+		if (ret2 != BCME_OK)
+			hostreorder = 0;
+	}
+#endif /* DISABLE_11N */
+	if (wlfc_enable)
+		dhd_wlfc_init(dhd);
+#ifndef DISABLE_11N
+	else if (hostreorder)
+		dhd_wlfc_hostreorder_init(dhd);
+#endif /* DISABLE_11N */
+
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+	if (!dhd->pno_state) {
+		dhd_pno_init(dhd);
+	}
+#endif
+#ifdef WL11U
+	dhd_interworking_enable(dhd);
+#endif /* WL11U */
+
+done:
+	return ret;
+}
+
+
+int
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
+{
+	char buf[strlen(name) + 1 + cmd_len];
+	int len = sizeof(buf);
+	wl_ioctl_t ioc;
+	int ret;
+
+	len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+
+	memset(&ioc, 0, sizeof(ioc));
+
+	ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = len;
+	ioc.set = set;
+
+	ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+	if (!set && ret >= 0)
+		memcpy(cmd_buf, buf, cmd_len);
+
+	return ret;
+}
+
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+	struct dhd_info *dhd = dhdp->info;
+	struct net_device *dev = NULL;
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	dev = dhd->iflist[ifidx]->net;
+	ASSERT(dev);
+
+	if (netif_running(dev)) {
+		DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
+		return BCME_NOTDOWN;
+	}
+
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
+
+	if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+		DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+		return BCME_BADARG;
+	}
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface)  */
+void
+aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
+{
+	u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
+	int i;
+	int ret;
+
+	bzero(ipv4_buf, sizeof(ipv4_buf));
+
+	/* display what we've got */
+	ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+	DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+#ifdef AOE_DBG
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+	/* now we saved hoste_ip table, clr it in the dongle AOE */
+	dhd_aoe_hostip_clr(dhd_pub, idx);
+
+	if (ret) {
+		DHD_ERROR(("%s failed\n", __FUNCTION__));
+		return;
+	}
+
+	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+		if (add && (ipv4_buf[i] == 0)) {
+				ipv4_buf[i] = ipa;
+				add = FALSE; /* added ipa to local table  */
+				DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+				__FUNCTION__, i));
+		} else if (ipv4_buf[i] == ipa) {
+			ipv4_buf[i]	= 0;
+			DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+				__FUNCTION__, ipa, i));
+		}
+
+		if (ipv4_buf[i] != 0) {
+			/* add back host_ip entries from our local cache */
+			dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
+			DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+				__FUNCTION__, ipv4_buf[i], i));
+		}
+	}
+#ifdef AOE_DBG
+	/* see the resulting hostip table */
+	dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+	DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+	dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+}
+
+/*
+ * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
+ * whenever there is an event related to an IP address.
+ * ptr : kernel provided pointer to IP address that has changed
+ */
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+	unsigned long event,
+	void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+	dhd_info_t *dhd;
+	dhd_pub_t *dhd_pub;
+	int idx;
+
+	if (!dhd_arp_enable)
+		return NOTIFY_DONE;
+	if (!ifa || !(ifa->ifa_dev->dev))
+		return NOTIFY_DONE;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	/* Filter notifications meant for non Broadcom devices */
+	if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
+	    (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
+#if defined(WL_ENABLE_P2P_IF)
+		if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
+#endif /* WL_ENABLE_P2P_IF */
+			return NOTIFY_DONE;
+	}
+#endif /* LINUX_VERSION_CODE */
+
+	dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
+	if (!dhd)
+		return NOTIFY_DONE;
+
+	dhd_pub = &dhd->pub;
+
+	if (dhd_pub->arp_version == 1) {
+		idx = 0;
+	}
+	else {
+		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+			if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
+			break;
+		}
+		if (idx < DHD_MAX_IFS)
+			DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
+				dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
+		else {
+			DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
+			idx = 0;
+		}
+	}
+
+	switch (event) {
+		case NETDEV_UP:
+			DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+
+			if (dhd->pub.busstate != DHD_BUS_DATA) {
+				DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
+				if (dhd->pend_ipaddr) {
+					DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
+						__FUNCTION__, dhd->pend_ipaddr));
+				}
+				dhd->pend_ipaddr = ifa->ifa_address;
+				break;
+			}
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+			DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+				__FUNCTION__));
+			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+			break;
+
+		case NETDEV_DOWN:
+			DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+				__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+			dhd->pend_ipaddr = 0;
+#ifdef AOE_IP_ALIAS_SUPPORT
+			DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
+				__FUNCTION__));
+			aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
+#else
+			dhd_aoe_hostip_clr(&dhd->pub, idx);
+			dhd_aoe_arp_clr(&dhd->pub, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+			break;
+
+		default:
+			DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+				__func__, ifa->ifa_label, event));
+			break;
+	}
+	return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+/* Neighbor Discovery Offload: defered handler */
+static void
+dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
+{
+	struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
+	dhd_pub_t	*pub = &((dhd_info_t *)dhd_info)->pub;
+	int		ret;
+
+	if (event != DHD_WQ_WORK_IPV6_NDO) {
+		DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+		return;
+	}
+
+	if (!ndo_work) {
+		DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
+		return;
+	}
+
+	if (!pub) {
+		DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
+		return;
+	}
+
+	if (ndo_work->if_idx) {
+		DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
+		return;
+	}
+
+	switch (ndo_work->event) {
+		case NETDEV_UP:
+			DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
+			ret = dhd_ndo_enable(pub, TRUE);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
+			}
+
+			ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
+					__FUNCTION__, ret));
+			}
+			break;
+		case NETDEV_DOWN:
+			DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
+			ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
+			if (ret < 0) {
+				DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
+					__FUNCTION__, ret));
+				goto done;
+			}
+
+			ret = dhd_ndo_enable(pub, FALSE);
+			if (ret < 0) {
+				DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
+				goto done;
+			}
+			break;
+		default:
+			DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
+			break;
+	}
+done:
+	/* free ndo_work. alloced while scheduling the work */
+	kfree(ndo_work);
+
+	return;
+}
+
+/*
+ * Neighbor Discovery Offload: Called when an interface
+ * is assigned with ipv6 address.
+ * Handles only primary interface
+ */
+static int dhd_inet6addr_notifier_call(struct notifier_block *this,
+	unsigned long event,
+	void *ptr)
+{
+	dhd_info_t *dhd;
+	dhd_pub_t *dhd_pub;
+	struct inet6_ifaddr *inet6_ifa = ptr;
+	struct in6_addr *ipv6_addr = &inet6_ifa->addr;
+	struct ipv6_work_info_t *ndo_info;
+	int idx = 0; /* REVISIT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+	/* Filter notifications meant for non Broadcom devices */
+	if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
+			return NOTIFY_DONE;
+	}
+#endif /* LINUX_VERSION_CODE */
+
+	dhd = *(dhd_info_t **)netdev_priv(inet6_ifa->idev->dev);
+	if (!dhd)
+		return NOTIFY_DONE;
+
+	if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
+		return NOTIFY_DONE;
+	dhd_pub = &dhd->pub;
+	if (!FW_SUPPORTED(dhd_pub, ndoe))
+		return NOTIFY_DONE;
+
+	ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
+	if (!ndo_info) {
+		DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
+		return NOTIFY_DONE;
+	}
+
+	ndo_info->event = event;
+	ndo_info->if_idx = idx;
+	memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
+
+	/* defer the work to thread as it may block kernel */
+	dhd_deferred_schedule_work((void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
+		dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
+	return NOTIFY_DONE;
+}
+
+int
+dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct net_device *net = NULL;
+	int err = 0;
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+	DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+
+	ASSERT(dhd && dhd->iflist[ifidx]);
+	net = dhd->iflist[ifidx]->net;
+	ASSERT(net);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->get_stats = dhd_get_stats;
+	net->do_ioctl = dhd_ioctl_entry;
+	net->hard_start_xmit = dhd_start_xmit;
+	net->set_mac_address = dhd_set_mac_address;
+	net->set_multicast_list = dhd_set_multicast_list;
+	net->open = net->stop = NULL;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &dhd_ops_virt;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+	/* Ok, link into the network layer... */
+	if (ifidx == 0) {
+		/*
+		 * device functions for the primary interface only
+		 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+		net->open = dhd_open;
+		net->stop = dhd_stop;
+#else
+		net->netdev_ops = &dhd_ops_pri;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+		if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
+			memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+	} else {
+		/*
+		 * We have to use the primary MAC for virtual interfaces
+		 */
+		memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN);
+		/*
+		 * Android sets the locally administered bit to indicate that this is a
+		 * portable hotspot.  This will not work in simultaneous AP/STA mode,
+		 * nor with P2P.  Need to set the Donlge's MAC address, and then use that.
+		 */
+		if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
+			ETHER_ADDR_LEN)) {
+			DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
+			__func__, net->name));
+			temp_addr[0] |= 0x02;
+		}
+	}
+
+	net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(WL_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+	net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+	net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(WL_WIRELESS_EXT) */
+
+	dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+	if (ifidx == 0)
+		printf("%s\n", dhd_version);
+
+	if (need_rtnl_lock)
+		err = register_netdev(net);
+	else
+		err = register_netdevice(net);
+
+	if (err != 0) {
+		DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
+		goto fail;
+	}
+
+
+	printf("Register interface [%s]  MAC: "MACDBG"\n\n", net->name,
+		MAC2STRDBG(net->dev_addr));
+
+#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
+		wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif
+
+#if defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	if (ifidx == 0) {
+#ifdef BCMLXSDMMC
+		up(&dhd_registration_sem);
+#endif
+		if (!dhd_download_fw_on_driverload) {
+			dhd_net_bus_devreset(net, TRUE);
+			dhd_net_bus_suspend(net);
+		}
+	}
+#endif /* OEM_ANDROID && BCMLXSDMMC && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+	return 0;
+
+fail:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	net->open = NULL;
+#else
+	net->netdev_ops = NULL;
+#endif
+	return err;
+}
+
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		dhd = (dhd_info_t *)dhdp->info;
+		if (dhd) {
+
+			/*
+			 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
+			 *  calling stop again will cuase SD read/write errors.
+			 */
+			if (dhd->pub.busstate != DHD_BUS_DOWN) {
+				/* Stop the protocol module */
+				dhd_prot_stop(&dhd->pub);
+
+				/* Stop the bus module */
+				dhd_bus_stop(dhd->pub.bus, TRUE);
+			}
+
+#if defined(OOB_INTR_ONLY)
+			dhd_bus_oob_intr_unregister(dhdp);
+#endif 
+		}
+	}
+}
+
+
+void dhd_detach(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	unsigned long flags;
+	int timer_valid = FALSE;
+
+	if (!dhdp)
+		return;
+
+	dhd = (dhd_info_t *)dhdp->info;
+	if (!dhd)
+		return;
+
+	DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+
+	dhd->pub.up = 0;
+	if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+		/* Give sufficient time for threads to start running in case
+		 * dhd_attach() has failed
+		 */
+		OSL_SLEEP(100);
+	}
+
+	if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+		dhd_bus_detach(dhdp);
+
+		if (dhdp->prot)
+			dhd_prot_detach(dhdp);
+	}
+
+#ifdef ARP_OFFLOAD_SUPPORT
+	if (dhd_inetaddr_notifier_registered) {
+		dhd_inetaddr_notifier_registered = FALSE;
+		unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+	}
+#endif /* ARP_OFFLOAD_SUPPORT */
+	if (dhd_inet6addr_notifier_registered) {
+		dhd_inet6addr_notifier_registered = FALSE;
+		unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+	}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+		if (dhd->early_suspend.suspend)
+			unregister_early_suspend(&dhd->early_suspend);
+	}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#if defined(WL_WIRELESS_EXT)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+		/* Detatch and unlink in the iw */
+		wl_iw_detach();
+	}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+	/* delete all interfaces, start with virtual  */
+	if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+		int i = 1;
+		dhd_if_t *ifp;
+
+		/* Cleanup virtual interfaces */
+		dhd_net_if_lock_local(dhd);
+		for (i = 1; i < DHD_MAX_IFS; i++) {
+			if (dhd->iflist[i])
+				dhd_remove_if(&dhd->pub, i, TRUE);
+		}
+		dhd_net_if_unlock_local(dhd);
+
+		/*  delete primary interface 0 */
+		ifp = dhd->iflist[0];
+		ASSERT(ifp);
+		ASSERT(ifp->net);
+		if (ifp && ifp->net) {
+			/* in unregister_netdev case, the interface gets freed by net->destructor
+			 * (which is set to free_netdev)
+			 */
+			if (ifp->net->reg_state == NETREG_UNINITIALIZED)
+				free_netdev(ifp->net);
+			else
+				unregister_netdev(ifp->net);
+			ifp->net = NULL;
+			MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+			dhd->iflist[0] = NULL;
+		}
+	}
+
+	/* Clear the watchdog timer */
+	flags = dhd_os_spin_lock(&dhd->pub);
+	timer_valid = dhd->wd_timer_valid;
+	dhd->wd_timer_valid = FALSE;
+	dhd_os_spin_unlock(&dhd->pub, flags);
+	if (timer_valid)
+		del_timer_sync(&dhd->timer);
+
+	if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+		if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_wdt_ctl);
+		}
+
+		if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_rxf_ctl);
+		}
+
+		if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_dpc_ctl);
+		} else
+			tasklet_kill(&dhd->tasklet);
+	}
+#ifdef WL_CFG80211
+	if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+		wl_cfg80211_detach(NULL);
+		dhd_monitor_uninit();
+	}
+#endif
+	/* free deferred work queue */
+	dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
+	dhd->dhd_deferred_wq = NULL;
+
+#ifdef PNO_SUPPORT
+	if (dhdp->pno_state)
+		dhd_pno_deinit(dhdp);
+#endif
+#if defined(CONFIG_PM_SLEEP)
+	if (dhd_pm_notifier_registered) {
+		unregister_pm_notifier(&dhd->pm_notifier);
+		dhd_pm_notifier_registered = FALSE;
+	}
+#endif /* CONFIG_PM_SLEEP */
+#ifdef DEBUG_CPU_FREQ
+		if (dhd->new_freq)
+			free_percpu(dhd->new_freq);
+		dhd->new_freq = NULL;
+		cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+	if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+		DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
+#ifdef CONFIG_HAS_WAKELOCK
+		dhd->wakelock_counter = 0;
+		dhd->wakelock_wd_counter = 0;
+		dhd->wakelock_rx_timeout_enable = 0;
+		dhd->wakelock_ctrl_timeout_enable = 0;
+		wake_lock_destroy(&dhd->wl_wifi);
+		wake_lock_destroy(&dhd->wl_rxwake);
+		wake_lock_destroy(&dhd->wl_ctrlwake);
+		wake_lock_destroy(&dhd->wl_wdwake);
+#endif /* CONFIG_HAS_WAKELOCK */
+	}
+
+
+#ifdef DHDTCPACK_SUPPRESS
+	/* This will free all MEM allocated for TCPACK SUPPRESS */
+	dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+}
+
+
+void
+dhd_free(dhd_pub_t *dhdp)
+{
+	dhd_info_t *dhd;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (dhdp) {
+		int i;
+		for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+			if (dhdp->reorder_bufs[i]) {
+				reorder_info_t *ptr;
+				uint32 buf_size = sizeof(struct reorder_info);
+
+				ptr = dhdp->reorder_bufs[i];
+
+				buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+				DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+					i, ptr->max_idx, buf_size));
+
+				MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+				dhdp->reorder_bufs[i] = NULL;
+			}
+		}
+		dhd = (dhd_info_t *)dhdp->info;
+		/* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
+		if (dhd &&
+			dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
+			MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+		dhd = NULL;
+	}
+}
+
+static void __exit
+dhd_module_cleanup(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_bus_unregister();
+
+	wl_android_exit();
+
+	dhd_wifi_platform_unregister_drv();
+}
+
+static int __init
+dhd_module_init(void)
+{
+	int err;
+
+	DHD_ERROR(("%s in\n", __FUNCTION__));
+	err = dhd_wifi_platform_register_drv();
+
+	return err;
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+#if defined(CONFIG_DEFERRED_INITCALLS)
+deferred_module_init(dhd_module_init);
+#elif defined(USE_LATE_INITCALL_SYNC)
+late_initcall_sync(dhd_module_init);
+#else
+late_initcall(dhd_module_init);
+#endif /* USE_LATE_INITCALL_SYNC */
+#else
+module_init(dhd_module_init);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+
+module_exit(dhd_module_cleanup);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		down(&dhd->proto_sem);
+		return 1;
+	}
+
+	return 0;
+}
+
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd) {
+		up(&dhd->proto_sem);
+		return 1;
+	}
+
+	return 0;
+}
+
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+	return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+{
+	dhd_ioctl_timeout_msec = (int)timeout_msec;
+}
+
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+{
+	dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+	int timeout;
+
+	/* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#else
+	timeout = dhd_ioctl_timeout_msec * HZ / 1000;
+#endif
+
+	timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
+	return timeout;
+}
+
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	wake_up(&dhd->ioctl_resp_wait);
+	return 0;
+}
+
+void
+dhd_os_wd_timer_extend(void *bus, bool extend)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+
+	if (extend)
+		dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
+	else
+		dhd_os_wd_timer(bus, dhd->default_wd_interval);
+}
+
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
+{
+	dhd_pub_t *pub = bus;
+	dhd_info_t *dhd = (dhd_info_t *)pub->info;
+	unsigned long flags;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!dhd) {
+		DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+		return;
+	}
+
+	flags = dhd_os_spin_lock(pub);
+
+	/* don't start the wd until fw is loaded */
+	if (pub->busstate == DHD_BUS_DOWN) {
+		dhd_os_spin_unlock(pub, flags);
+		if (!wdtick)
+			DHD_OS_WD_WAKE_UNLOCK(pub);
+		return;
+	}
+
+	/* Totally stop the timer */
+	if (!wdtick && dhd->wd_timer_valid == TRUE) {
+		dhd->wd_timer_valid = FALSE;
+		dhd_os_spin_unlock(pub, flags);
+		del_timer_sync(&dhd->timer);
+		DHD_OS_WD_WAKE_UNLOCK(pub);
+		return;
+	}
+
+	if (wdtick) {
+		DHD_OS_WD_WAKE_LOCK(pub);
+		dhd_watchdog_ms = (uint)wdtick;
+		/* Re arm the timer, at last watchdog period */
+		mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+		dhd->wd_timer_valid = TRUE;
+	}
+	dhd_os_spin_unlock(pub, flags);
+}
+
+void *
+dhd_os_open_image_old(char *filename)
+{
+	struct file *fp;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+dhd_os_get_image_block_old(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	if (!image)
+		return 0;
+
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+
+	return rdlen;
+}
+
+void
+dhd_os_close_image_old(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+#if defined(LINUX_FW_REQUEST_API) && defined(ENABLE_INSMOD_NO_FW_LOAD)
+static int request_firmware_use = 0;
+static struct platform_device *pdev_fw;
+static int data_offset = 0;
+
+void *
+dhd_os_open_image(char *filename)
+{
+	const struct firmware *fw = NULL;
+	data_offset = 0;
+
+	if (strstr(filename, "/")) {
+		goto fallback;
+	}
+
+	if (!pdev_fw) {
+		pdev_fw = platform_device_register_simple("bcmfw", 0, NULL, 0);
+	}
+
+	if (!pdev_fw || IS_ERR(pdev_fw)) {
+		pdev_fw = NULL;
+	} else if(request_firmware(&fw, filename, &pdev_fw->dev)) {
+		goto fallback;
+	} else {
+		request_firmware_use = 1;
+	}
+
+	return (void*)fw;
+
+fallback:
+	/* try to use the legacy file method */
+	request_firmware_use = 0;
+	return dhd_os_open_image_old(filename);
+}
+
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+	const char *data = ((struct firmware*)image)->data;
+	int size = (int)(((struct firmware*)image)->size);
+
+	if (!request_firmware_use) {
+		return dhd_os_get_image_block_old(buf, len, image);
+	}
+
+	if (data_offset >= size) {
+		return 0;
+	}
+
+	if ((size - data_offset) < len) {
+		len = size - data_offset;
+	}
+
+	memcpy(buf, data + data_offset, len);
+	data_offset = data_offset + len;
+
+	return len;
+}
+
+void
+dhd_os_close_image(void *image)
+{
+	if (request_firmware_use) {
+		release_firmware((struct firmware*)image);
+	}
+	else
+		dhd_os_close_image_old(image);
+
+	if (pdev_fw) {
+		platform_device_unregister(pdev_fw);
+		pdev_fw = NULL;
+	}
+}
+#else /* LINUX_FW_REQUEST_API && ENABLE_INSMOD_NO_FW_LOAD */
+void *
+dhd_os_open_image(char *filename)
+{
+	return dhd_os_open_image_old(filename);
+}
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
+{
+	return dhd_os_get_image_block_old(buf, len, image);
+}
+void
+dhd_os_close_image(void *image)
+{
+	dhd_os_close_image_old(image);
+}
+#endif /* !LINUX_FW_REQUEST_API || !ENABLE_INSMOD_NO_FW_LOAD */
+
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd_dpc_prio >= 0)
+		down(&dhd->sdsem);
+	else
+		spin_lock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd_dpc_prio >= 0)
+		up(&dhd->sdsem);
+	else
+		spin_unlock_bh(&dhd->sdlock);
+}
+
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->txqlock);
+}
+
+void
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+{
+}
+
+void
+dhd_os_sdtxlock(dhd_pub_t *pub)
+{
+	dhd_os_sdlock(pub);
+}
+
+void
+dhd_os_sdtxunlock(dhd_pub_t *pub)
+{
+	dhd_os_sdunlock(pub);
+}
+
+static void
+dhd_os_rxflock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->rxf_lock);
+
+}
+
+static void
+dhd_os_rxfunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->rxf_lock);
+}
+
+#ifdef DHDTCPACK_SUPPRESS
+void
+dhd_os_tcpacklock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_lock_bh(&dhd->tcpack_lock);
+
+}
+
+void
+dhd_os_tcpackunlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd;
+
+	dhd = (dhd_info_t *)(pub->info);
+	spin_unlock_bh(&dhd->tcpack_lock);
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
+{
+	uint8* buf;
+	gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+
+	buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
+	if (buf == NULL && kmalloc_if_fail)
+		buf = kmalloc(size, flags);
+
+	return buf;
+}
+
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
+{
+}
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+	int res = 0;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (!dhd->pub.up) {
+		return NULL;
+	}
+
+	res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+	if (res == 0)
+		return &dhd->iw.wstats;
+	else
+		return NULL;
+}
+#endif /* defined(WL_WIRELESS_EXT) */
+
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+	wl_event_msg_t *event, void **data)
+{
+	int bcmerror = 0;
+	ASSERT(dhd != NULL);
+
+	bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data);
+	if (bcmerror != BCME_OK)
+		return (bcmerror);
+
+#if defined(WL_WIRELESS_EXT)
+	if (event->bsscfgidx == 0) {
+		/*
+		 * Wireless ext is on primary interface only
+		 */
+
+	ASSERT(dhd->iflist[*ifidx] != NULL);
+	ASSERT(dhd->iflist[*ifidx]->net != NULL);
+
+		if (dhd->iflist[*ifidx]->net) {
+		wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
+		}
+	}
+#endif /* defined(WL_WIRELESS_EXT)  */
+
+#ifdef WL_CFG80211
+	ASSERT(dhd->iflist[*ifidx] != NULL);
+	ASSERT(dhd->iflist[*ifidx]->net != NULL);
+	if (dhd->iflist[*ifidx]->net)
+		wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
+#endif /* defined(WL_CFG80211) */
+
+	return (bcmerror);
+}
+
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
+{
+	switch (ntoh32(event->event_type)) {
+
+	default:
+		break;
+	}
+}
+
+#ifdef LOG_INTO_TCPDUMP
+void
+dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
+{
+	struct sk_buff *p, *skb;
+	uint32 pktlen;
+	int len;
+	dhd_if_t *ifp;
+	dhd_info_t *dhd;
+	uchar *skb_data;
+	int ifidx = 0;
+	struct ether_header eth;
+
+	pktlen = sizeof(eth) + data_len;
+	dhd = dhdp->info;
+
+	if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+		ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+		bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
+		bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
+		ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
+		eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+		bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
+		bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
+		skb = PKTTONATIVE(dhdp->osh, p);
+		skb_data = skb->data;
+		len = skb->len;
+
+		ifidx = dhd_ifname2idx(dhd, "wlan0");
+		ifp = dhd->iflist[ifidx];
+		if (ifp == NULL)
+			 ifp = dhd->iflist[0];
+
+		ASSERT(ifp);
+		skb->dev = ifp->net;
+		skb->protocol = eth_type_trans(skb, skb->dev);
+		skb->data = skb_data;
+		skb->len = len;
+
+		/* Strip header, count, deliver upward */
+		skb_pull(skb, ETH_HLEN);
+
+		/* Send the packet */
+		if (in_interrupt()) {
+			netif_rx(skb);
+		} else {
+			netif_rx_ni(skb);
+		}
+	}
+	else {
+		/* Could not allocate a sk_buf */
+		DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+	}
+}
+#endif /* LOG_INTO_TCPDUMP */
+
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
+#else
+	int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+	dhd_os_sdunlock(dhd);
+	wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
+	dhd_os_sdlock(dhd);
+#endif 
+	return;
+}
+
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	struct dhd_info *dhdinfo =  dhd->info;
+	if (waitqueue_active(&dhdinfo->ctrl_wait))
+		wake_up(&dhdinfo->ctrl_wait);
+#endif
+	return;
+}
+
+int
+dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
+{
+	int ret;
+
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (flag == TRUE) {
+		/* Issue wl down command before resetting the chip */
+		if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+			DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
+		}
+#ifdef PROP_TXSTATUS
+		if (dhd->pub.wlfc_enabled)
+			dhd_wlfc_deinit(&dhd->pub);
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+	if (dhd->pub.pno_state)
+		dhd_pno_deinit(&dhd->pub);
+#endif
+	}
+
+	if (!flag) {
+		dhd_update_fw_nv_path(dhd);
+		/* update firmware and nvram path to sdio bus */
+		dhd_bus_update_fw_nv_path(dhd->pub.bus,	dhd->fw_path, dhd->nv_path);
+	}
+
+	ret = dhd_bus_devreset(&dhd->pub, flag);
+	if (ret) {
+		DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+		return ret;
+	}
+
+	return ret;
+}
+
+int
+dhd_net_bus_suspend(struct net_device *dev)
+{
+	dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
+	return dhd_bus_suspend(&dhdinfo->pub);
+}
+
+int
+dhd_net_bus_resume(struct net_device *dev, uint8 stage)
+{
+	dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
+	return dhd_bus_resume(&dhdinfo->pub, stage);
+}
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd) {
+		ret = dhd->pub.suspend_disable_flag;
+		dhd->pub.suspend_disable_flag = val;
+	}
+	return ret;
+}
+
+int net_os_set_suspend(struct net_device *dev, int val, int force)
+{
+	int ret = 0;
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (dhd) {
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+		ret = dhd_set_suspend(val, &dhd->pub);
+#else
+		ret = dhd_suspend_resume_helper(dhd, val, force);
+#endif
+#ifdef WL_CFG80211
+		wl_cfg80211_update_power_mode(dev);
+#endif
+	}
+	return ret;
+}
+
+int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (dhd)
+		dhd->pub.suspend_bcn_li_dtim = val;
+
+	return 0;
+}
+
+#ifdef PKT_FILTER_SUPPORT
+void logfilters(int add_remove, int num)
+{
+	char b1[32], b2[32];
+	b1[sizeof(b1)-1] = b2[sizeof(b2)-1] = '\0';
+	snprintf(b1, sizeof(b1)-1, "%d", add_remove);
+	snprintf(b2, sizeof(b2)-1, "%d", num);
+//	kct_log(CT_EV_INFO, "cws.wifi", "filters", EV_FLAGS_PRIORITY_LOW, b1, b2);
+}
+
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	char *filterp = NULL;
+	int filter_id = 0;
+	int ret = 0;
+
+	if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
+		(num == DHD_MDNS_FILTER_NUM))
+		return ret;
+	if (num >= dhd->pub.pktfilter_count)
+		return -EINVAL;
+#ifdef BOARD_INTEL
+	add_remove = !add_remove; /* using discard patterns instead */
+#endif
+	switch (num) {
+		case DHD_BROADCAST_FILTER_NUM:
+			filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+			filter_id = 101;
+			break;
+		case DHD_MULTICAST4_FILTER_NUM:
+			filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+			filter_id = 102;
+			break;
+		case DHD_MULTICAST6_FILTER_NUM:
+			filterp = "103 0 0 0 0xFFFF 0x3333";
+			filter_id = 103;
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	/* Add filter */
+	if (add_remove) {
+	dhd->pub.pktfilter[num] = filterp;
+	dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
+	} else { /* Delete filter */
+		dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
+	}
+ 
+#ifdef BOARD_INTEL
+	switch (num) {
+	case DHD_BROADCAST_FILTER_NUM:
+		num = DHD_IPV4_BC_255_FILTER_NUM;
+		filterp = "106 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF";
+		filter_id = 106;
+        break;
+	case DHD_MULTICAST4_FILTER_NUM:
+		num = DHD_IPV4_MC_224_FILTER_NUM;
+		filterp = "107 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0";
+		filter_id = 107;
+        break;
+	case DHD_MULTICAST6_FILTER_NUM:
+		num = DHD_IPV6_MC_FF00_FILTER_NUM;
+		filterp = "108 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF";
+		filter_id = 108;
+		break;
+	default:
+		num = 0;
+		break;
+	}
+
+	/* Add filter */
+	if (add_remove) {
+		dhd->pub.pktfilter[num] = filterp;
+		dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
+	} else { /* Delete filter */
+		if (dhd->pub.pktfilter[num] != NULL) {
+			dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
+			dhd->pub.pktfilter[num] = NULL;
+		}
+	}
+#endif /* BOARD_INTEL */
+	logfilters(add_remove, num);
+	return ret;
+}
+
+int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+
+{
+	int ret = 0;
+
+	/* Packet filtering is set only if we still in early-suspend and
+	 * we need either to turn it ON or turn it OFF
+	 * We can always turn it OFF in case of early-suspend, but we turn it
+	 * back ON only if suspend_disable_flag was not set
+	*/
+	if (dhdp && dhdp->up) {
+		if (dhdp->in_suspend) {
+			if (!val || (val && !dhdp->suspend_disable_flag))
+				dhd_enable_packet_filter(val, dhdp);
+		}
+	}
+	return ret;
+}
+
+/* function to enable/disable packet for Network device */
+int net_os_enable_packet_filter(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return dhd_os_enable_packet_filter(&dhd->pub, val);
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+int
+dhd_dev_init_ioctl(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret;
+
+	dhd_process_cid_mac(&dhd->pub, TRUE);
+
+	if ((ret = dhd_prot_init(&dhd->pub)) < 0)
+		goto done;
+
+	dhd_process_cid_mac(&dhd->pub, FALSE);
+
+done:
+	return ret;
+}
+
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_stop_for_ssid */
+int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_stop_for_ssid(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_set_for_ssid */
+int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
+		pno_repeat, pno_freq_expo_max, channel_list, nchan));
+}
+
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int enable)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	return (dhd_pno_enable(&dhd->pub, enable));
+}
+
+/* Linux wrapper to call common dhd_pno_set_for_hotlist */
+int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
+int
+dhd_dev_pno_stop_for_batch(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	return (dhd_pno_stop_for_batch(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
+int
+dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+}
+#endif /* PNO_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
+{
+	dhd_info_t *dhd;
+	struct net_device *dev;
+
+	dhd = (dhd_info_t *)dhd_info;
+	dev = dhd->iflist[0]->net;
+
+	if (dev) {
+		rtnl_lock();
+		dev_close(dev);
+		rtnl_unlock();
+#if defined(WL_WIRELESS_EXT)
+		wl_iw_send_priv_event(dev, "HANG");
+#endif
+#if defined(WL_CFG80211)
+		wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+	}
+}
+
+int dhd_os_send_hang_message(dhd_pub_t *dhdp)
+{
+	int ret = 0;
+	if (dhdp) {
+		if (!dhdp->hang_was_sent) {
+			dhdp->hang_was_sent = 1;
+			dhd_deferred_schedule_work((void *)dhdp, DHD_WQ_WORK_HANG_MSG,
+				dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
+		}
+	}
+	return ret;
+}
+
+int net_os_send_hang_message(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd) {
+		/* Report FW problem when enabled */
+		if (dhd->pub.hang_report) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+			ret = dhd_os_send_hang_message(&dhd->pub);
+#else
+			ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+		} else {
+			DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
+				__FUNCTION__));
+			/* Enforce bus down to stop any future traffic */
+			dhd->pub.busstate = DHD_BUS_DOWN;
+		}
+	}
+	return ret;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+
+
+int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
+{
+	dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
+	return wifi_platform_set_power(dhdinfo->adapter, on, delay_msec);
+}
+
+void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+	wl_country_t *cspec)
+{
+	dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
+	get_customized_country_code(dhdinfo->adapter, country_iso_code, cspec);
+}
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	if (dhd && dhd->pub.up) {
+		memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+#ifdef WL_CFG80211
+		wl_update_wiphybands(NULL, notify);
+#endif
+	}
+}
+
+void dhd_bus_band_set(struct net_device *dev, uint band)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	if (dhd && dhd->pub.up) {
+#ifdef WL_CFG80211
+		wl_update_wiphybands(NULL, true);
+#endif
+	}
+}
+
+int dhd_net_set_fw_path(struct net_device *dev, char *fw)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+	if (!fw || fw[0] == '\0')
+		return -EINVAL;
+
+	strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
+	dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
+
+#if defined(SOFTAP)
+	if (strstr(fw, "apsta") != NULL) {
+		DHD_INFO(("GOT APSTA FIRMWARE\n"));
+		ap_fw_loaded = TRUE;
+	} else {
+		DHD_INFO(("GOT STA FIRMWARE\n"));
+		ap_fw_loaded = FALSE;
+	}
+#endif 
+	return 0;
+}
+
+void dhd_net_if_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	dhd_net_if_lock_local(dhd);
+}
+
+void dhd_net_if_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	dhd_net_if_unlock_local(dhd);
+}
+
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd)
+		mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	if (dhd)
+		mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
+
+static void dhd_suspend_lock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	if (dhd)
+		mutex_lock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+static void dhd_suspend_unlock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	if (dhd)
+		mutex_unlock(&dhd->dhd_suspend_mutex);
+#endif
+}
+
+unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags = 0;
+
+	if (dhd)
+		spin_lock_irqsave(&dhd->dhd_lock, flags);
+
+	return flags;
+}
+
+void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+	if (dhd)
+		spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
+
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+	return (atomic_read(&dhd->pend_8021x_cnt));
+}
+
+#define MAX_WAIT_FOR_8021X_TX	50
+
+int
+dhd_wait_pend8021x(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int timeout = msecs_to_jiffies(10);
+	int ntimes = MAX_WAIT_FOR_8021X_TX;
+	int pend = dhd_get_pend_8021x_cnt(dhd);
+
+	while (ntimes && pend) {
+		if (pend) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(timeout);
+			set_current_state(TASK_RUNNING);
+			ntimes--;
+		}
+		pend = dhd_get_pend_8021x_cnt(dhd);
+	}
+	if (ntimes == 0)
+	{
+		atomic_set(&dhd->pend_8021x_cnt, 0);
+		DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
+	}
+	return pend;
+}
+
+#ifdef DHD_DEBUG
+int
+write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
+{
+	int ret = 0;
+	struct file *fp;
+	mm_segment_t old_fs;
+	loff_t pos = 0;
+
+	/* change to KERNEL_DS address limit */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	/* open file to write */
+	fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
+	if (!fp) {
+		printf("%s: open file error\n", __FUNCTION__);
+		ret = -1;
+		goto exit;
+	}
+
+	/* Write buf to file */
+	fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+	/* free buf before return */
+	MFREE(dhd->osh, buf, size);
+	/* close file before return */
+	if (fp)
+		filp_close(fp, current->files);
+	/* restore previous address limit */
+	set_fs(old_fs);
+
+	return ret;
+}
+#endif /* DHD_DEBUG */
+
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
+			dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (dhd->wakelock_rx_timeout_enable)
+			wake_lock_timeout(&dhd->wl_rxwake,
+				msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
+		if (dhd->wakelock_ctrl_timeout_enable)
+			wake_lock_timeout(&dhd->wl_ctrlwake,
+				msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
+#endif
+		dhd->wakelock_rx_timeout_enable = 0;
+		dhd->wakelock_ctrl_timeout_enable = 0;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_timeout(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (val > dhd->wakelock_rx_timeout_enable)
+			dhd->wakelock_rx_timeout_enable = val;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (val > dhd->wakelock_ctrl_timeout_enable)
+			dhd->wakelock_ctrl_timeout_enable = val;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+		if (wake_lock_active(&dhd->wl_ctrlwake))
+			wake_unlock(&dhd->wl_ctrlwake);
+#endif
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return 0;
+}
+
+int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
+	return ret;
+}
+
+int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
+	return ret;
+}
+
+int dhd_os_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+			wake_lock(&dhd->wl_wifi);
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+		dhd_bus_dev_pm_stay_awake(pub);
+#endif
+		}
+		dhd->wakelock_counter++;
+		ret = dhd->wakelock_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int net_os_wake_lock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_lock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	dhd_os_wake_lock_timeout(pub);
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_counter > 0) {
+			dhd->wakelock_counter--;
+			if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+				wake_unlock(&dhd->wl_wifi);
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+			dhd_bus_dev_pm_relax(pub);
+#endif
+			}
+			ret = dhd->wakelock_counter;
+		}
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_check_wakelock(dhd_pub_t *pub)
+{
+#if defined(CONFIG_HAS_WAKELOCK) || (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	dhd_info_t *dhd;
+
+	if (!pub)
+		return 0;
+	dhd = (dhd_info_t *)(pub->info);
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+
+#ifdef CONFIG_HAS_WAKELOCK
+	/* Indicate to the SD Host to avoid going to suspend if internal locks are up */
+	if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
+		(wake_lock_active(&dhd->wl_wdwake))))
+		return 1;
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+	if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+		return 1;
+#endif
+	return 0;
+}
+int net_os_wake_unlock(struct net_device *dev)
+{
+	dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+	int ret = 0;
+
+	if (dhd)
+		ret = dhd_os_wake_unlock(&dhd->pub);
+	return ret;
+}
+
+int dhd_os_wd_wake_lock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+			/* if wakelock_wd_counter was never used : lock it at once */
+			wake_lock(&dhd->wl_wdwake);
+#endif
+		}
+		dhd->wakelock_wd_counter++;
+		ret = dhd->wakelock_wd_counter;
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+	unsigned long flags;
+	int ret = 0;
+
+	if (dhd) {
+		spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+		if (dhd->wakelock_wd_counter > 0) {
+			dhd->wakelock_wd_counter = 0;
+			if (!dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+				wake_unlock(&dhd->wl_wdwake);
+#endif
+			}
+		}
+		spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+	}
+	return ret;
+}
+
+#ifdef PROP_TXSTATUS
+/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
+ * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
+ */
+int dhd_wakelock_waive(dhd_info_t *dhdinfo)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&dhdinfo->wakelock_spinlock, flags);
+	/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+	if (dhdinfo->waive_wakelock)
+		goto exit;
+	/* record current lock status */
+	dhdinfo->wakelock_before_waive = dhdinfo->wakelock_counter;
+	dhdinfo->waive_wakelock = TRUE;
+
+exit:
+	ret = dhdinfo->wakelock_wd_counter;
+	spin_unlock_irqrestore(&dhdinfo->wakelock_spinlock, flags);
+	return ret;
+}
+
+int dhd_wakelock_restore(dhd_info_t *dhdinfo)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&dhdinfo->wakelock_spinlock, flags);
+	/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+	if (!dhdinfo->waive_wakelock)
+		goto exit;
+
+	dhdinfo->waive_wakelock = FALSE;
+	/* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
+	* we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
+	* the lock in between, do the same by calling wake_unlock or pm_relax
+	*/
+	if (dhdinfo->wakelock_before_waive == 0 && dhdinfo->wakelock_counter > 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_lock(&dhdinfo->wl_wifi);
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+			dhd_bus_dev_pm_stay_awake(&dhdinfo->pub);
+#endif
+	} else if (dhdinfo->wakelock_before_waive > 0 && dhdinfo->wakelock_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+		wake_unlock(&dhdinfo->wl_wifi);
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+			dhd_bus_dev_pm_relax(&dhdinfo->pub);
+#endif
+	}
+	dhdinfo->wakelock_before_waive = 0;
+exit:
+	ret = dhdinfo->wakelock_wd_counter;
+	spin_unlock_irqrestore(&dhdinfo->wakelock_spinlock, flags);
+	return ret;
+}
+#endif /* PROP_TXSTATUS */
+
+bool dhd_os_check_if_up(dhd_pub_t *pub)
+{
+	if (!pub)
+		return FALSE;
+	return pub->up;
+}
+
+void dhd_os_shutdown(void *dhdp)
+{
+	dhd_pub_t *pub = (dhd_pub_t *)dhdp;
+	unsigned long flags;
+	int timer_valid = FALSE;
+	dhd_info_t *dhd;
+
+	shutdown_in_progress = TRUE;
+	if (!pub)
+		return;
+
+	dhd = (dhd_info_t *)pub->info;
+	if (!dhd)
+		return;
+
+	dhd_bus_detach(pub);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+	if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+		if (dhd->early_suspend.suspend)
+			unregister_early_suspend(&dhd->early_suspend);
+	}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+	/* free deferred work queue */
+	if (dhd->dhd_deferred_wq) {
+		dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
+		dhd->dhd_deferred_wq = NULL;
+	}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))  */
+
+	/* Clear the watchdog timer */
+	flags = dhd_os_spin_lock(&dhd->pub);
+	timer_valid = dhd->wd_timer_valid;
+	dhd->wd_timer_valid = FALSE;
+	dhd_os_spin_unlock(&dhd->pub, flags);
+	if (timer_valid)
+		del_timer_sync(&dhd->timer);
+
+	/* stop all threads */
+	if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+#ifdef DHDTHREAD
+		if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_wdt_ctl);
+		}
+
+		if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_dpc_ctl);
+		}
+#ifdef RXFRAME_THREAD
+		if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+			PROC_STOP(&dhd->thr_rxf_ctl);
+		}
+#endif
+		else
+#endif /* DHDTHREAD */
+		tasklet_kill(&dhd->tasklet);
+	}
+}
+
+/* function to collect firmware, chip id and chip version info */
+void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
+{
+	int i;
+
+	i = snprintf(info_string, sizeof(info_string),
+		"  Driver: %s\n  Firmware: %s ", EPI_VERSION_STR, fw);
+
+	if (!dhdp)
+		return;
+
+	i = snprintf(&info_string[i], sizeof(info_string) - i,
+		"\n  Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
+		dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
+}
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
+{
+	int ifidx;
+	int ret = 0;
+	dhd_info_t *dhd = NULL;
+
+	if (!net || !netdev_priv(net)) {
+		DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd = *(dhd_info_t **)netdev_priv(net);
+	if (!dhd)
+		return -EINVAL;
+
+	ifidx = dhd_net2idx(dhd, net);
+	if (ifidx == DHD_BAD_IF) {
+		DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	DHD_OS_WAKE_LOCK(&dhd->pub);
+	ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+	dhd_check_hang(net, &dhd->pub, ret);
+	DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+	return ret;
+}
+
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+{
+	struct net_device *net;
+
+	net = dhd_idx2net(dhdp, ifidx);
+	if (!net) {
+		DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
+		return -EINVAL;
+	}
+
+	return dhd_check_hang(net, dhdp, ret);
+}
+
+
+#ifdef PROP_TXSTATUS
+
+void dhd_wlfc_plat_init(void *dhd)
+{
+	return;
+}
+
+void dhd_wlfc_plat_deinit(void *dhd)
+{
+	return;
+}
+
+bool dhd_wlfc_skip_fc(void)
+{
+	return FALSE;
+}
+#endif /* PROP_TXSTATUS */
+
+#ifdef BCMDBGFS
+
+#include <linux/debugfs.h>
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+typedef struct dhd_dbgfs {
+	struct dentry	*debugfs_dir;
+	struct dentry	*debugfs_mem;
+	dhd_pub_t 	*dhdp;
+	uint32 		size;
+} dhd_dbgfs_t;
+
+dhd_dbgfs_t g_dbgfs;
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+                       size_t count, loff_t *ppos)
+{
+	ssize_t rval;
+	uint32 tmp;
+	loff_t pos = *ppos;
+	size_t ret;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	/* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+	tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+	ret = copy_to_user(ubuf, &tmp, 4);
+	if (ret == count)
+		return -EFAULT;
+
+	count -= ret;
+	*ppos = pos + count;
+	rval = count;
+
+	return rval;
+}
+
+
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	size_t ret;
+	uint32 buf;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= g_dbgfs.size || !count)
+		return 0;
+	if (count > g_dbgfs.size - pos)
+		count = g_dbgfs.size - pos;
+
+	ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+	if (ret == count)
+		return -EFAULT;
+
+	/* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+	dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+	return count;
+}
+
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+	loff_t pos = -1;
+
+	switch (whence) {
+		case 0:
+			pos = off;
+			break;
+		case 1:
+			pos = file->f_pos + off;
+			break;
+		case 2:
+			pos = g_dbgfs.size - off;
+	}
+	return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+	.read   = dhd_dbg_state_read,
+	.write	= dhd_debugfs_write,
+	.open   = dhd_dbg_state_open,
+	.llseek	= dhd_debugfs_lseek
+};
+
+static void dhd_dbg_create(void)
+{
+	if (g_dbgfs.debugfs_dir) {
+		g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+			NULL, &dhd_dbg_state_ops);
+	}
+}
+
+void dhd_dbg_init(dhd_pub_t *dhdp)
+{
+	int err;
+
+	g_dbgfs.dhdp = dhdp;
+	g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+
+	g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+	if (IS_ERR(g_dbgfs.debugfs_dir)) {
+		err = PTR_ERR(g_dbgfs.debugfs_dir);
+		g_dbgfs.debugfs_dir = NULL;
+		return;
+	}
+
+	dhd_dbg_create();
+
+	return;
+}
+
+void dhd_dbg_remove(void)
+{
+	debugfs_remove(g_dbgfs.debugfs_mem);
+	debugfs_remove(g_dbgfs.debugfs_dir);
+
+	bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+
+}
+#endif /* ifdef BCMDBGFS */
+
+#ifdef WLMEDIA_HTSF
+
+static
+void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+	struct sk_buff *skb;
+	uint32 htsf = 0;
+	uint16 dport = 0, oldmagic = 0xACAC;
+	char *p1;
+	htsfts_t ts;
+
+	/*  timestamp packet  */
+
+	p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
+/*		memcpy(&proto, p1+26, 4);  	*/
+		memcpy(&dport, p1+40, 2);
+/* 	proto = ((ntoh32(proto))>> 16) & 0xFF;  */
+		dport = ntoh16(dport);
+	}
+
+	/* timestamp only if  icmp or udb iperf with port 5555 */
+/*	if (proto == 17 && dport == tsport) { */
+	if (dport >= tsport && dport <= tsport + 20) {
+
+		skb = (struct sk_buff *) pktbuf;
+
+		htsf = dhd_get_htsf(dhd, 0);
+		memset(skb->data + 44, 0, 2); /* clear checksum */
+		memcpy(skb->data+82, &oldmagic, 2);
+		memcpy(skb->data+84, &htsf, 4);
+
+		memset(&ts, 0, sizeof(htsfts_t));
+		ts.magic  = HTSFMAGIC;
+		ts.prio   = PKTPRIO(pktbuf);
+		ts.seqnum = htsf_seqnum++;
+		ts.c10    = get_cycles();
+		ts.t10    = htsf;
+		ts.endmagic = HTSFENDMAGIC;
+
+		memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
+	}
+}
+
+static void dhd_dump_htsfhisto(histo_t *his, char *s)
+{
+	int pktcnt = 0, curval = 0, i;
+	for (i = 0; i < (NUMBIN-2); i++) {
+		curval += 500;
+		printf("%d ",  his->bin[i]);
+		pktcnt += his->bin[i];
+	}
+	printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
+		his->bin[NUMBIN-1], s);
+}
+
+static
+void sorttobin(int value, histo_t *histo)
+{
+	int i, binval = 0;
+
+	if (value < 0) {
+		histo->bin[NUMBIN-1]++;
+		return;
+	}
+	if (value > histo->bin[NUMBIN-2])  /* store the max value  */
+		histo->bin[NUMBIN-2] = value;
+
+	for (i = 0; i < (NUMBIN-2); i++) {
+		binval += 500; /* 500m s bins */
+		if (value <= binval) {
+			histo->bin[i]++;
+			return;
+		}
+	}
+	histo->bin[NUMBIN-3]++;
+}
+
+static
+void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+	dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+	struct sk_buff *skb;
+	char *p1;
+	uint16 old_magic;
+	int d1, d2, d3, end2end;
+	htsfts_t *htsf_ts;
+	uint32 htsf;
+
+	skb = PKTTONATIVE(dhdp->osh, pktbuf);
+	p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
+
+	if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
+		memcpy(&old_magic, p1+78, 2);
+		htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
+	}
+	else
+		return;
+
+	if (htsf_ts->magic == HTSFMAGIC) {
+		htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
+		htsf_ts->cE0 = get_cycles();
+	}
+
+	if (old_magic == 0xACAC) {
+
+		tspktcnt++;
+		htsf = dhd_get_htsf(dhd, 0);
+		memcpy(skb->data+92, &htsf, sizeof(uint32));
+
+		memcpy(&ts[tsidx].t1, skb->data+80, 16);
+
+		d1 = ts[tsidx].t2 - ts[tsidx].t1;
+		d2 = ts[tsidx].t3 - ts[tsidx].t2;
+		d3 = ts[tsidx].t4 - ts[tsidx].t3;
+		end2end = ts[tsidx].t4 - ts[tsidx].t1;
+
+		sorttobin(d1, &vi_d1);
+		sorttobin(d2, &vi_d2);
+		sorttobin(d3, &vi_d3);
+		sorttobin(end2end, &vi_d4);
+
+		if (end2end > 0 && end2end >  maxdelay) {
+			maxdelay = end2end;
+			maxdelaypktno = tspktcnt;
+			memcpy(&maxdelayts, &ts[tsidx], 16);
+		}
+		if (++tsidx >= TSMAX)
+			tsidx = 0;
+	}
+}
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
+{
+	uint32 htsf = 0, cur_cycle, delta, delta_us;
+	uint32    factor, baseval, baseval2;
+	cycles_t t;
+
+	t = get_cycles();
+	cur_cycle = t;
+
+	if (cur_cycle >  dhd->htsf.last_cycle)
+		delta = cur_cycle -  dhd->htsf.last_cycle;
+	else {
+		delta = cur_cycle + (0xFFFFFFFF -  dhd->htsf.last_cycle);
+	}
+
+	delta = delta >> 4;
+
+	if (dhd->htsf.coef) {
+		/* times ten to get the first digit */
+	        factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
+		baseval  = (delta*10)/factor;
+		baseval2 = (delta*10)/(factor+1);
+		delta_us  = (baseval -  (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
+		htsf = (delta_us << 4) +  dhd->htsf.last_tsf + HTSF_BUS_DELAY;
+	}
+	else {
+		DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
+	}
+
+	return htsf;
+}
+
+static void dhd_dump_latency(void)
+{
+	int i, max = 0;
+	int d1, d2, d3, d4, d5;
+
+	printf("T1       T2       T3       T4           d1  d2   t4-t1     i    \n");
+	for (i = 0; i < TSMAX; i++) {
+		d1 = ts[i].t2 - ts[i].t1;
+		d2 = ts[i].t3 - ts[i].t2;
+		d3 = ts[i].t4 - ts[i].t3;
+		d4 = ts[i].t4 - ts[i].t1;
+		d5 = ts[max].t4-ts[max].t1;
+		if (d4 > d5 && d4 > 0)  {
+			max = i;
+		}
+		printf("%08X %08X %08X %08X \t%d %d %d   %d i=%d\n",
+			ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
+			d1, d2, d3, d4, i);
+	}
+
+	printf("current idx = %d \n", tsidx);
+
+	printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
+	printf("%08X %08X %08X %08X \t%d %d %d   %d\n",
+	maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
+	maxdelayts.t2 - maxdelayts.t1,
+	maxdelayts.t3 - maxdelayts.t2,
+	maxdelayts.t4 - maxdelayts.t3,
+	maxdelayts.t4 - maxdelayts.t1);
+}
+
+
+static int
+dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
+{
+	wl_ioctl_t ioc;
+	char buf[32];
+	int ret;
+	uint32 s1, s2;
+
+	struct tsf {
+		uint32 low;
+		uint32 high;
+	} tsf_buf;
+
+	memset(&ioc, 0, sizeof(ioc));
+	memset(&tsf_buf, 0, sizeof(tsf_buf));
+
+	ioc.cmd = WLC_GET_VAR;
+	ioc.buf = buf;
+	ioc.len = (uint)sizeof(buf);
+	ioc.set = FALSE;
+
+	strncpy(buf, "tsf", sizeof(buf) - 1);
+	buf[sizeof(buf) - 1] = '\0';
+	s1 = dhd_get_htsf(dhd, 0);
+	if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
+		if (ret == -EIO) {
+			DHD_ERROR(("%s: tsf is not supported by device\n",
+				dhd_ifname(&dhd->pub, ifidx)));
+			return -EOPNOTSUPP;
+		}
+		return ret;
+	}
+	s2 = dhd_get_htsf(dhd, 0);
+
+	memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+	printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
+		tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
+		dhd->htsf.coefdec2, s2-tsf_buf.low);
+	printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
+	return 0;
+}
+
+void htsf_update(dhd_info_t *dhd, void *data)
+{
+	static ulong  cur_cycle = 0, prev_cycle = 0;
+	uint32 htsf, tsf_delta = 0;
+	uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
+	ulong b, a;
+	cycles_t t;
+
+	/* cycles_t in inlcude/mips/timex.h */
+
+	t = get_cycles();
+
+	prev_cycle = cur_cycle;
+	cur_cycle = t;
+
+	if (cur_cycle > prev_cycle)
+		cyc_delta = cur_cycle - prev_cycle;
+	else {
+		b = cur_cycle;
+		a = prev_cycle;
+		cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
+	}
+
+	if (data == NULL)
+		printf(" tsf update ata point er is null \n");
+
+	memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
+	memcpy(&cur_tsf, data, sizeof(tsf_t));
+
+	if (cur_tsf.low == 0) {
+		DHD_INFO((" ---- 0 TSF, do not update, return\n"));
+		return;
+	}
+
+	if (cur_tsf.low > prev_tsf.low)
+		tsf_delta = (cur_tsf.low - prev_tsf.low);
+	else {
+		DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
+		 cur_tsf.low, prev_tsf.low));
+		if (cur_tsf.high > prev_tsf.high) {
+			tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
+			DHD_INFO((" ---- Wrap around tsf coutner  adjusted TSF=%08X\n", tsf_delta));
+		}
+		else
+			return; /* do not update */
+	}
+
+	if (tsf_delta)  {
+		hfactor = cyc_delta / tsf_delta;
+		tmp  = 	(cyc_delta - (hfactor * tsf_delta))*10;
+		dec1 =  tmp/tsf_delta;
+		dec2 =  ((tmp - dec1*tsf_delta)*10) / tsf_delta;
+		tmp  = 	(tmp   - (dec1*tsf_delta))*10;
+		dec3 =  ((tmp - dec2*tsf_delta)*10) / tsf_delta;
+
+		if (dec3 > 4) {
+			if (dec2 == 9) {
+				dec2 = 0;
+				if (dec1 == 9) {
+					dec1 = 0;
+					hfactor++;
+				}
+				else {
+					dec1++;
+				}
+			}
+			else
+				dec2++;
+		}
+	}
+
+	if (hfactor) {
+		htsf = ((cyc_delta * 10)  / (hfactor*10+dec1)) + prev_tsf.low;
+		dhd->htsf.coef = hfactor;
+		dhd->htsf.last_cycle = cur_cycle;
+		dhd->htsf.last_tsf = cur_tsf.low;
+		dhd->htsf.coefdec1 = dec1;
+		dhd->htsf.coefdec2 = dec2;
+	}
+	else {
+		htsf = prev_tsf.low;
+	}
+}
+
+#endif /* WLMEDIA_HTSF */
+
+#ifdef CUSTOM_SET_CPUCORE
+void dhd_set_cpucore(dhd_pub_t *dhd, int set)
+{
+	int e_dpc = 0, e_rxf = 0, retry_set = 0;
+
+	if (!(dhd->chan_isvht80)) {
+		DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
+		return;
+	}
+
+	if (DPC_CPUCORE) {
+		do {
+			if (set == TRUE) {
+				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+					cpumask_of(DPC_CPUCORE));
+			} else {
+				e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+					cpumask_of(PRIMARY_CPUCORE));
+			}
+			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+				DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
+				return;
+			}
+			if (e_dpc < 0)
+				OSL_SLEEP(1);
+		} while (e_dpc < 0);
+	}
+	if (RXF_CPUCORE) {
+		do {
+			if (set == TRUE) {
+				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+					cpumask_of(RXF_CPUCORE));
+			} else {
+				e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+					cpumask_of(PRIMARY_CPUCORE));
+			}
+			if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+				DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
+				return;
+			}
+			if (e_rxf < 0)
+				OSL_SLEEP(1);
+		} while (e_rxf < 0);
+	}
+#ifdef DHD_OF_SUPPORT
+	interrupt_set_cpucore(set);
+#endif /* DHD_OF_SUPPORT */
+	DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
+
+	return;
+}
+#endif /* CUSTOM_SET_CPUCORE */
diff --git a/drivers/staging/edison-bcm43340/dhd_linux.h b/drivers/staging/edison-bcm43340/dhd_linux.h
new file mode 100644
index 0000000..93a7022
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_linux.h
@@ -0,0 +1,78 @@
+/*
+ * DHD Linux header file (dhd_linux exports for cfg80211 and other components)
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux.h 399301 2013-04-29 21:41:52Z $
+ */
+
+/* wifi platform functions for power, interrupt and pre-alloc, either
+ * from Android-like platform device data, or Broadcom wifi platform
+ * device data.
+ *
+ */
+#ifndef __DHD_LINUX_H__
+#define __DHD_LINUX_H__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#define DHD_REGISTRATION_TIMEOUT  12000  /* msec : allowed time to finished dhd registration */
+
+typedef struct wifi_adapter_info {
+	const char	*name;
+	uint		irq_num;
+	uint		intr_flags;
+	const char	*fw_path;
+	const char	*nv_path;
+	void		*wifi_plat_data;	/* wifi ctrl func, for backward compatibility */
+	uint		bus_type;
+	uint		bus_num;
+	uint		slot_num;
+} wifi_adapter_info_t;
+
+typedef struct bcmdhd_wifi_platdata {
+	uint				num_adapters;
+	wifi_adapter_info_t	*adapters;
+	struct mutex load_lock;
+} bcmdhd_wifi_platdata_t;
+
+int dhd_wifi_platform_register_drv(void);
+void dhd_wifi_platform_unregister_drv(void);
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num,
+	uint32 slot_num);
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec);
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present);
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr);
+bool wifi_platform_irq_is_fastirq(wifi_adapter_info_t *adapter);
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf);
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode);
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size);
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter);
+void dhd_wifi_platform_load_lock(void);
+void dhd_wifi_platform_load_unlock(void);
+
+int dhd_get_fw_mode(struct dhd_info *dhdinfo);
+bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo);
+
+#endif /* __DHD_LINUX_H__ */
diff --git a/drivers/staging/edison-bcm43340/dhd_linux_platdev.c b/drivers/staging/edison-bcm43340/dhd_linux_platdev.c
new file mode 100644
index 0000000..fd6c2ef
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_linux_platdev.c
@@ -0,0 +1,805 @@
+/*
+ * Linux platform device for DHD WLAN adapter
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_platdev.c 401742 2013-05-13 15:03:21Z $
+ */
+#include <typedefs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+#include <asm/intel-mid.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_linux.h>
+#include <wl_android.h>
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+#include <linux/wlan_plat.h>
+#endif
+
+
+#if !defined(CONFIG_WIFI_CONTROL_FUNC)
+struct wifi_platform_data {
+	int (*set_power)(int val);
+	int (*set_reset)(int val);
+	int (*set_carddetect)(int val);
+	void *(*mem_prealloc)(int section, unsigned long size);
+	int (*get_mac_addr)(unsigned char *buf);
+	void *(*get_country_code)(char *ccode);
+        char *nvram_id;
+	bool use_fast_irq;
+};
+#endif /* CONFIG_WIFI_CONTROL_FUNC */
+
+#define WIFI_PLAT_NAME		"wlan"
+#define WIFI_PLAT_NAME2		"bcm4329_wlan"
+#define WIFI_PLAT_EXT		"bcmdhd_wifi_platform"
+
+bool cfg_multichip = FALSE;
+bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL;
+static int wifi_plat_dev_probe_ret = 0;
+static bool is_power_on = FALSE;
+#ifdef DHD_OF_SUPPORT
+static bool dts_enabled = TRUE;
+extern struct resource dhd_wlan_resources;
+extern struct wifi_platform_data dhd_wlan_control;
+#else
+static bool dts_enabled = FALSE;
+struct resource dhd_wlan_resources = {0};
+extern int bcmsdh_sdmmc_set_power(int on);
+struct wifi_platform_data dhd_wlan_control = {
+	.set_power = bcmsdh_sdmmc_set_power,
+	.get_mac_addr = dhd_custom_get_mac_address,
+};
+#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */
+
+static int dhd_wifi_platform_load(void);
+
+extern void* wl_cfg80211_get_dhdp(void);
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24;	/* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num)
+{
+	int i;
+
+	if (dhd_wifi_platdata == NULL)
+		return NULL;
+
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i];
+		if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) &&
+			(adapter->bus_num == -1 || adapter->bus_num == bus_num) &&
+			(adapter->slot_num == -1 || adapter->slot_num == slot_num)) {
+			DHD_TRACE(("found adapter info '%s'\n", adapter->name));
+			return adapter;
+		}
+	}
+	return NULL;
+}
+
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size)
+{
+	void *alloc_ptr = NULL;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+	if (plat_data->mem_prealloc) {
+		alloc_ptr = plat_data->mem_prealloc(section, size);
+		if (alloc_ptr) {
+			DHD_INFO(("success alloc section %d\n", section));
+			if (size != 0L)
+				bzero(alloc_ptr, size);
+			return alloc_ptr;
+		}
+	}
+	return NULL;
+}
+
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter)
+{
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+	return plat_data->mem_prealloc;
+}
+
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr)
+{
+	if (adapter == NULL)
+		return -1;
+	if (irq_flags_ptr)
+		*irq_flags_ptr = adapter->intr_flags;
+	return adapter->irq_num;
+}
+
+bool wifi_platform_irq_is_fastirq(wifi_adapter_info_t *adapter)
+{
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return false;
+	plat_data = adapter->wifi_plat_data;
+	return plat_data->use_fast_irq;
+}
+
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec)
+{
+	int err = 0;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+	if (plat_data->set_power) {
+#ifdef ENABLE_4335BT_WAR
+		if (on) {
+			printk("WiFi: trying to acquire BT lock\n");
+			if (bcm_bt_lock(lock_cookie_wifi) != 0)
+				printk("** WiFi: timeout in acquiring bt lock**\n");
+			printk("%s: btlock acquired\n", __FUNCTION__);
+		}
+		else {
+			/* For a exceptional case, release btlock */
+			bcm_bt_unlock(lock_cookie_wifi);
+		}
+#endif /* ENABLE_4335BT_WAR */
+		err = plat_data->set_power(on);
+	}
+
+	if (msec && !err)
+		OSL_SLEEP(msec);
+
+	if (on && !err)
+		is_power_on = TRUE;
+	else
+		is_power_on = FALSE;
+
+	return err;
+}
+
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present)
+{
+	int err = 0;
+	struct wifi_platform_data *plat_data;
+
+	if (!adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present));
+	if (plat_data->set_carddetect) {
+		err = plat_data->set_carddetect(device_present);
+	}
+	return err;
+
+}
+
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf)
+{
+	struct wifi_platform_data *plat_data;
+
+	DHD_ERROR(("%s\n", __FUNCTION__));
+	if (!buf || !adapter || !adapter->wifi_plat_data)
+		return -EINVAL;
+	plat_data = adapter->wifi_plat_data;
+	if (plat_data->get_mac_addr) {
+		return plat_data->get_mac_addr(buf);
+	}
+	return -EOPNOTSUPP;
+}
+
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode)
+{
+	/* get_country_code was added after 2.6.39 */
+#if	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+	struct wifi_platform_data *plat_data;
+
+	if (!ccode || !adapter || !adapter->wifi_plat_data)
+		return NULL;
+	plat_data = adapter->wifi_plat_data;
+
+	DHD_TRACE(("%s\n", __FUNCTION__));
+	if (plat_data->get_country_code) {
+		return plat_data->get_country_code(ccode);
+	}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+	return NULL;
+}
+
+static int wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+	struct resource *resource;
+	wifi_adapter_info_t *adapter;
+
+	/* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+	 * is kept for backward compatibility and supports only 1 adapter
+	 */
+	ASSERT(dhd_wifi_platdata != NULL);
+	ASSERT(dhd_wifi_platdata->num_adapters == 1);
+	adapter = &dhd_wifi_platdata->adapters[0];
+
+	adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data);
+	if (!adapter->wifi_plat_data) {
+		DHD_ERROR(("%s: unable to get platform data !\n", __FUNCTION__));
+		return -ENODATA;
+	}
+	((struct wifi_platform_data *)(adapter->wifi_plat_data))->set_power = bcmsdh_sdmmc_set_power;
+	((struct wifi_platform_data *)(adapter->wifi_plat_data))->get_mac_addr = dhd_custom_get_mac_address;
+
+	resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (resource == NULL) {
+		DHD_ERROR(("%s: unable to get IORESOURCE_IRQ !\n", __FUNCTION__));
+		return -ENODATA;
+	}
+
+	adapter->irq_num = resource->start;
+	adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+
+	wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+	return wifi_plat_dev_probe_ret;
+}
+
+#ifdef CONFIG_ACPI
+static int wifi_plat_dev_drv_probe_acpi(struct platform_device *pdev)
+{
+	wifi_adapter_info_t *adapter;
+	acpi_handle handle;
+	struct acpi_device *adev;
+	int irq_num;
+
+	/* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+	 * is kept for backward compatibility and supports only 1 adapter
+	 */
+	ASSERT(dhd_wifi_platdata != NULL);
+	ASSERT(dhd_wifi_platdata->num_adapters == 1);
+	adapter = &dhd_wifi_platdata->adapters[0];
+	adapter->wifi_plat_data = (void *)&dhd_wlan_control;
+
+	if (ACPI_HANDLE(&pdev->dev)) {
+		handle = ACPI_HANDLE(&pdev->dev);
+
+		/* Dont try to do acpi pm for the wifi module */
+		if (!handle || acpi_bus_get_device(handle, &adev))
+			DHD_ERROR(("%s: could not get acpi pointer!\n", __FUNCTION__));
+		else
+			adev->flags.power_manageable = 0;
+		irq_num = acpi_get_gpio_by_index(&pdev->dev, 0, NULL);
+		DHD_INFO(("%s: Using ACPI table to get IRQ number: %d\n", __FUNCTION__, irq_num));
+		if (irq_num < 0) {
+			if (INTEL_MID_BOARD(2, TABLET, BYT, BLB, PRO) ||
+			    INTEL_MID_BOARD(2, TABLET, BYT, BLB, ENG)) {
+				DHD_INFO(("%s: BYT-M hardcoding\n", __FUNCTION__));
+				irq_num = acpi_get_gpio("\\_SB.GPO2", 17);
+			}
+			else {
+				DHD_INFO(("%s: BYT-T hardcoding\n", __FUNCTION__));
+				irq_num = acpi_get_gpio("\\_SB.GPO2", 15);
+			}
+		}
+	} else {
+		DHD_ERROR(("%s: Null ACPI_HANDLE, try legacy probe\n", __FUNCTION__));
+		return wifi_plat_dev_drv_probe(pdev);
+	}
+
+	adapter->irq_num = irq_num;
+	adapter->intr_flags = IRQF_TRIGGER_FALLING;
+
+	wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+	return wifi_plat_dev_probe_ret;
+}
+#endif /* CONFIG_ACPI */
+
+static int wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+	wifi_adapter_info_t *adapter;
+
+	/* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+	 * is kept for backward compatibility and supports only 1 adapter
+	 */
+	ASSERT(dhd_wifi_platdata != NULL);
+	ASSERT(dhd_wifi_platdata->num_adapters == 1);
+	adapter = &dhd_wifi_platdata->adapters[0];
+	if (is_power_on) {
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+
+	return 0;
+}
+
+static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+static int wifi_plat_dev_drv_resume(struct platform_device *pdev)
+{
+	DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY)
+	if (dhd_os_check_if_up(wl_cfg80211_get_dhdp()))
+		bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+	return 0;
+}
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id bcm_acpi_id[] = {
+/* ACPI IDs here */
+	{ "BCM43241" },
+	{ "BCM4321" },
+	{ "RTL8723" },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, bcm_acpi_id);
+#endif
+
+static struct platform_driver wifi_platform_dev_driver = {
+#ifdef CONFIG_ACPI
+	.probe          = wifi_plat_dev_drv_probe_acpi,
+#else
+	.probe          = wifi_plat_dev_drv_probe,
+#endif
+	.remove         = wifi_plat_dev_drv_remove,
+	.suspend        = wifi_plat_dev_drv_suspend,
+	.resume         = wifi_plat_dev_drv_resume,
+	.driver         = {
+#ifdef CONFIG_ACPI
+	.acpi_match_table = ACPI_PTR(bcm_acpi_id),
+#endif
+	.name   = WIFI_PLAT_NAME,
+	}
+};
+
+static struct platform_driver wifi_platform_dev_driver_legacy = {
+	.probe          = wifi_plat_dev_drv_probe,
+	.remove         = wifi_plat_dev_drv_remove,
+	.suspend        = wifi_plat_dev_drv_suspend,
+	.resume         = wifi_plat_dev_drv_resume,
+	.driver         = {
+	.name	= WIFI_PLAT_NAME2,
+	}
+};
+
+#ifdef CONFIG_ACPI
+static int wifi_acpi_match(struct device *dev, void *data)
+{
+	struct acpi_device_id *ids = data, *id;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	for (id = ids; id->id[0]; id++) {
+		if (!strncmp(id->id, pdev->name, strlen(id->id))) {
+			DHD_ERROR(("found wifi acpi device %s\n", id->id));
+			return TRUE;
+		}
+	}
+	return FALSE;
+}
+#endif /* CONFIG_ACPI */
+
+static int wifi_platdev_match(struct device *dev, void *data)
+{
+	char *name = (char*)data;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (strcmp(pdev->name, name) == 0) {
+		DHD_ERROR(("found wifi platform device %s\n", name));
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+static int wifi_ctrlfunc_register_drv(void)
+{
+	int err = 0;
+	struct device *dev1, *dev2 = NULL;
+	wifi_adapter_info_t *adapter;
+
+#ifdef CONFIG_ACPI
+	dev1 = bus_find_device(&platform_bus_type, NULL, bcm_acpi_id, wifi_acpi_match);
+	if (!dev1)
+#endif /* CONFIG_ACPI */
+	dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+	dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+
+	if (!dts_enabled) {
+		if (dev1 == NULL && dev2 == NULL) {
+			DHD_ERROR(("no wifi platform data, skip\n"));
+			return -ENXIO;
+		}
+	}
+
+	/* multi-chip support not enabled, build one adapter information for
+	 * DHD (either SDIO, USB or PCIe)
+	 */
+	adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL);
+	adapter->name = "DHD generic adapter";
+	adapter->bus_type = -1;
+	adapter->bus_num = -1;
+	adapter->slot_num = -1;
+	adapter->irq_num = -1;
+	is_power_on = FALSE;
+	wifi_plat_dev_probe_ret = 0;
+	dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL);
+	dhd_wifi_platdata->num_adapters = 1;
+	dhd_wifi_platdata->adapters = adapter;
+	mutex_init(&dhd_wifi_platdata->load_lock);
+
+	if (dev1) {
+		err = platform_driver_register(&wifi_platform_dev_driver);
+		if (err) {
+			DHD_ERROR(("%s: failed to register wifi ctrl func driver\n",
+				__FUNCTION__));
+			return err;
+		}
+	}
+	if (dev2) {
+		err = platform_driver_register(&wifi_platform_dev_driver_legacy);
+		if (err) {
+			DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n",
+				__FUNCTION__));
+			return err;
+		}
+	}
+
+	if (dts_enabled) {
+		struct resource *resource;
+		adapter->wifi_plat_data = (void *)&dhd_wlan_control;
+		resource = &dhd_wlan_resources;
+		adapter->irq_num = resource->start;
+		adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+		wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+	}
+
+	/* return probe function's return value if registeration succeeded */
+	return wifi_plat_dev_probe_ret;
+}
+
+void wifi_ctrlfunc_unregister_drv(void)
+{
+	struct device *dev1, *dev2 = NULL;
+
+#ifdef CONFIG_ACPI
+	dev1 = bus_find_device(&platform_bus_type, NULL, bcm_acpi_id, wifi_acpi_match);
+	if (!dev1)
+#endif /* CONFIG_ACPI */
+	dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+	dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+	if (!dts_enabled)
+		if (dev1 == NULL && dev2 == NULL)
+			return;
+
+	DHD_ERROR(("unregister wifi platform drivers\n"));
+	if (dev1)
+		platform_driver_unregister(&wifi_platform_dev_driver);
+	if (dev2)
+		platform_driver_unregister(&wifi_platform_dev_driver_legacy);
+	if (dts_enabled) {
+		wifi_adapter_info_t *adapter;
+		adapter = &dhd_wifi_platdata->adapters[0];
+		if (is_power_on) {
+			wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+			wifi_platform_bus_enumerate(adapter, FALSE);
+		}
+	}
+	kfree(dhd_wifi_platdata->adapters);
+	dhd_wifi_platdata->adapters = NULL;
+	dhd_wifi_platdata->num_adapters = 0;
+	kfree(dhd_wifi_platdata);
+	dhd_wifi_platdata = NULL;
+}
+
+static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+	dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data);
+
+	return dhd_wifi_platform_load();
+}
+
+static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+	int i;
+	wifi_adapter_info_t *adapter;
+	ASSERT(dhd_wifi_platdata != NULL);
+
+	/* power down all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+	return 0;
+}
+
+static struct platform_driver dhd_wifi_platform_dev_driver = {
+	.probe          = bcmdhd_wifi_plat_dev_drv_probe,
+	.remove         = bcmdhd_wifi_plat_dev_drv_remove,
+	.driver         = {
+	.name   = WIFI_PLAT_EXT,
+	}
+};
+
+int dhd_wifi_platform_register_drv(void)
+{
+	int err = 0;
+	struct device *dev;
+
+	/* register Broadcom wifi platform data driver if multi-chip is enabled,
+	 * otherwise use Android style wifi platform data (aka wifi control function)
+	 * if it exists
+	 *
+	 * to support multi-chip DHD, Broadcom wifi platform data device must
+	 * be added in kernel early boot (e.g. board config file).
+	 */
+	if (cfg_multichip) {
+#ifdef CONFIG_ACPI
+		dev = bus_find_device(&platform_bus_type, NULL, bcm_acpi_id, wifi_acpi_match);
+		if (!dev)
+#endif /* CONFIG_ACPI */
+		dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match);
+		if (dev == NULL) {
+			DHD_ERROR(("bcmdhd wifi platform data device not found!!\n"));
+			return -ENXIO;
+		}
+		err = platform_driver_register(&dhd_wifi_platform_dev_driver);
+	} else {
+		err = wifi_ctrlfunc_register_drv();
+
+		/* no wifi ctrl func either, load bus directly and ignore this error */
+		if (err) {
+			if (err == -ENXIO) {
+				/* wifi ctrl function does not exist */
+				err = dhd_wifi_platform_load();
+			} else {
+				/* unregister driver due to initialization failure */
+				wifi_ctrlfunc_unregister_drv();
+			}
+		}
+	}
+
+	return err;
+}
+
+#ifdef BCMPCIE
+static int dhd_wifi_platform_load_pcie(void)
+{
+	int err = 0;
+	err = dhd_bus_register();
+	return err;
+}
+#else
+static int dhd_wifi_platform_load_pcie(void)
+{
+	return 0;
+}
+#endif /* BCMPCIE  */
+
+
+void dhd_wifi_platform_unregister_drv(void)
+{
+	if (cfg_multichip)
+		platform_driver_unregister(&dhd_wifi_platform_dev_driver);
+	else
+		wifi_ctrlfunc_unregister_drv();
+}
+
+extern int dhd_watchdog_prio;
+extern int dhd_dpc_prio;
+extern uint dhd_deferred_tx;
+#if defined(BCMLXSDMMC)
+extern struct semaphore dhd_registration_sem;
+#endif 
+
+static int dhd_wifi_platform_load_sdio(void)
+{
+	int i;
+	int err = 0;
+	wifi_adapter_info_t *adapter;
+
+	BCM_REFERENCE(i);
+	BCM_REFERENCE(adapter);
+	/* Sanity check on the module parameters
+	 * - Both watchdog and DPC as tasklets are ok
+	 * - If both watchdog and DPC are threads, TX must be deferred
+	 */
+	if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) &&
+		!(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx))
+		return -EINVAL;
+
+#if defined(BCMLXSDMMC)
+	if (dhd_wifi_platdata == NULL) {
+		DHD_ERROR(("DHD wifi platform data is required for Android build\n"));
+		return -EINVAL;
+	}
+
+	sema_init(&dhd_registration_sem, 0);
+	/* power up all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		bool chip_up = FALSE;
+		int retry = POWERUP_MAX_RETRY;
+		struct semaphore dhd_chipup_sem;
+
+		adapter = &dhd_wifi_platdata->adapters[i];
+
+		DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+		DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+			adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path));
+		DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+			adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+		do {
+			sema_init(&dhd_chipup_sem, 0);
+			err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
+			if (err) {
+				DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n",
+					__FUNCTION__, err));
+				return err;
+			}
+			err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
+			if (err) {
+				/* WL_REG_ON state unknown, Power off forcely */
+				wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+				continue;
+			} else {
+				wifi_platform_bus_enumerate(adapter, TRUE);
+				err = 0;
+			}
+
+			if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
+				dhd_bus_unreg_sdio_notify();
+				chip_up = TRUE;
+				break;
+			}
+
+			DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry));
+			dhd_bus_unreg_sdio_notify();
+			wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+			wifi_platform_bus_enumerate(adapter, FALSE);
+		} while (retry--);
+
+		if (!chip_up) {
+			DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name));
+			return -ENODEV;
+		}
+
+	}
+
+	err = dhd_bus_register();
+
+	if (err) {
+		DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+
+	/*
+	 * Wait till MMC sdio_register_driver callback called and made driver attach.
+	 * It's needed to make sync up exit from dhd insmod  and
+	 * Kernel MMC sdio device callback registration
+	 */
+	err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT));
+	if (err) {
+		DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__));
+		dhd_bus_unregister();
+		goto fail;
+	}
+
+	return err;
+
+fail:
+	/* power down all adapters */
+	for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+		adapter = &dhd_wifi_platdata->adapters[i];
+		wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+		wifi_platform_bus_enumerate(adapter, FALSE);
+	}
+#else
+
+	/* x86 bring-up PC needs no power-up operations */
+	err = dhd_bus_register();
+
+#endif 
+
+	return err;
+}
+
+static int dhd_wifi_platform_load_usb(void)
+{
+	return 0;
+}
+
+static int dhd_wifi_platform_load()
+{
+	int err = 0;
+
+	/* As chipset is power down after the register_netdev(),
+	 * there is a race condition where an app can trigger a dhd_open()
+	 * before the end power down done sequence.
+	 * Use a semaphore to handle this race condtion
+	 */
+	dhd_wifi_platform_load_lock();
+
+	wl_android_init();
+
+
+	if ((err = dhd_wifi_platform_load_usb()))
+		goto end;
+	else if ((err = dhd_wifi_platform_load_sdio()))
+		goto end;
+	else
+		err = dhd_wifi_platform_load_pcie();
+
+end:
+	if (err)
+		wl_android_exit();
+	else
+		wl_android_post_init();
+
+	if (!dhd_download_fw_on_driverload) {
+		wifi_adapter_info_t *adapter;
+		adapter = &dhd_wifi_platdata->adapters[0];
+		if (is_power_on) {
+			wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+			wifi_platform_bus_enumerate(adapter, FALSE);
+		}
+	}
+
+	dhd_wifi_platform_load_unlock();
+
+	return err;
+}
+
+void dhd_wifi_platform_load_lock(void)
+{
+	ASSERT(dhd_wifi_platdata != NULL);
+	mutex_lock(&dhd_wifi_platdata->load_lock);
+}
+
+void dhd_wifi_platform_load_unlock(void)
+{
+	ASSERT(dhd_wifi_platdata != NULL);
+	mutex_unlock(&dhd_wifi_platdata->load_lock);
+}
+
diff --git a/drivers/staging/edison-bcm43340/dhd_linux_sched.c b/drivers/staging/edison-bcm43340/dhd_linux_sched.c
new file mode 100644
index 0000000..ba78dfd
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_linux_sched.c
@@ -0,0 +1,48 @@
+/*
+ * Expose some of the kernel scheduler routines
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_sched.c 457596 2014-02-24 02:24:14Z $
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <typedefs.h>
+#include <linuxver.h>
+
+int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
+{
+	int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
+	return rc;
+}
+
+int get_scheduler_policy(struct task_struct *p)
+{
+	int rc = SCHED_NORMAL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+	rc = p->policy;
+#endif /* LinuxVer */
+	return rc;
+}
diff --git a/drivers/staging/edison-bcm43340/dhd_linux_wq.c b/drivers/staging/edison-bcm43340/dhd_linux_wq.c
new file mode 100644
index 0000000..0364c9d
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_linux_wq.c
@@ -0,0 +1,316 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_wq.c 411851 2013-07-10 20:48:00Z $
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/kfifo.h>
+
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_linux_wq.h>
+
+struct dhd_deferred_event_t {
+	u8	event; /* holds the event */
+	void	*event_data; /* Holds event specific data */
+	event_handler_t event_handler;
+};
+#define DEFRD_EVT_SIZE	sizeof(struct dhd_deferred_event_t)
+
+struct dhd_deferred_wq {
+	struct work_struct	deferred_work; /* should be the first member */
+
+	/*
+	 * work events may occur simultaneously.
+	 * Can hold upto 64 low priority events and 4 high priority events
+	 */
+#define DHD_PRIO_WORK_FIFO_SIZE	(4 * sizeof(struct dhd_deferred_event_t))
+#define DHD_WORK_FIFO_SIZE	(64 * sizeof(struct dhd_deferred_event_t))
+	struct kfifo			*prio_fifo;
+	struct kfifo			*work_fifo;
+	u8				*prio_fifo_buf;
+	u8				*work_fifo_buf;
+	spinlock_t			work_lock;
+	void				*dhd_info; /* review: does it require */
+};
+struct dhd_deferred_wq	*deferred_wq = NULL;
+
+static inline struct kfifo*
+dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
+{
+	struct kfifo *fifo;
+	gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+	fifo = kfifo_init(buf, size, flags, lock);
+#else
+	fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
+	if (!fifo) {
+		return NULL;
+	}
+	kfifo_init(fifo, buf, size);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+	return fifo;
+}
+
+static inline void
+dhd_kfifo_free(struct kfifo *fifo)
+{
+	kfifo_free(fifo);
+}
+
+/* deferred work functions */
+static void dhd_deferred_work_handler(struct work_struct *data);
+
+void*
+dhd_deferred_work_init(void *dhd_info)
+{
+	struct dhd_deferred_wq	*work = NULL;
+	u8*	buf;
+	unsigned long	fifo_size = 0;
+	gfp_t	flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+	if (!dhd_info) {
+		DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
+		goto return_null;
+	}
+
+	work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
+		flags);
+
+	if (!work) {
+		DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__));
+		goto return_null;
+	}
+
+	INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
+
+	/* initialize event fifo */
+	spin_lock_init(&work->work_lock);
+
+	/* allocate buffer to hold prio events */
+	fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
+	fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
+	buf = (u8*)kzalloc(fifo_size, flags);
+	if (!buf) {
+		DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__));
+		goto return_null;
+	}
+
+	/* Initialize prio event fifo */
+	work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+	if (!work->prio_fifo) {
+		kfree(buf);
+		goto return_null;
+	}
+
+	/* allocate buffer to hold work events */
+	fifo_size = DHD_WORK_FIFO_SIZE;
+	fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
+	buf = (u8*)kzalloc(fifo_size, flags);
+	if (!buf) {
+		DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__));
+		goto return_null;
+	}
+
+	/* Initialize event fifo */
+	work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+	if (!work->work_fifo) {
+		kfree(buf);
+		goto return_null;
+	}
+
+	work->dhd_info = dhd_info;
+	deferred_wq = work;
+	DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__));
+	return work;
+
+return_null:
+
+	if (work)
+		dhd_deferred_work_deinit(work);
+
+	return NULL;
+}
+
+void
+dhd_deferred_work_deinit(void *work)
+{
+	struct dhd_deferred_wq *deferred_work = work;
+
+
+	if (!deferred_work) {
+		DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__));
+		return;
+	}
+
+	/* cancel the deferred work handling */
+	cancel_work_sync((struct work_struct *)deferred_work);
+
+	/*
+	 * free work event fifo.
+	 * kfifo_free frees locally allocated fifo buffer
+	 */
+	if (deferred_work->prio_fifo)
+		dhd_kfifo_free(deferred_work->prio_fifo);
+
+	if (deferred_work->work_fifo)
+		dhd_kfifo_free(deferred_work->work_fifo);
+
+	kfree(deferred_work);
+
+	/* deinit internal reference pointer */
+	deferred_wq = NULL;
+}
+
+/*
+ *	Prepares event to be queued
+ *	Schedules the event
+ */
+int
+dhd_deferred_schedule_work(void *event_data, u8 event, event_handler_t event_handler, u8 priority)
+{
+	struct	dhd_deferred_event_t	deferred_event;
+	int	status;
+
+	if (!deferred_wq) {
+		DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
+		ASSERT(0);
+		return DHD_WQ_STS_UNINITIALIZED;
+	}
+
+	if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
+		DHD_ERROR(("%s: Unknown event \n", __FUNCTION__));
+		return DHD_WQ_STS_UNKNOWN_EVENT;
+	}
+
+	/*
+	 * default element size is 1, which can be changed
+	 * using kfifo_esize(). Older kernel(FC11) doesn't support
+	 * changing element size. For compatibility changing
+	 * element size is not prefered
+	 */
+	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+	deferred_event.event = event;
+	deferred_event.event_data = event_data;
+	deferred_event.event_handler = event_handler;
+
+	if (priority == DHD_WORK_PRIORITY_HIGH) {
+		status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	} else {
+		status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	}
+
+	if (!status) {
+		return DHD_WQ_STS_SCHED_FAILED;
+	}
+	schedule_work((struct work_struct *)deferred_wq);
+	return DHD_WQ_STS_OK;
+}
+
+static int
+dhd_get_scheduled_work(struct dhd_deferred_event_t *event)
+{
+	int	status = 0;
+
+	if (!deferred_wq) {
+		DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
+		return DHD_WQ_STS_UNINITIALIZED;
+	}
+
+	/*
+	 * default element size is 1 byte, which can be changed
+	 * using kfifo_esize(). Older kernel(FC11) doesn't support
+	 * changing element size. For compatibility changing
+	 * element size is not prefered
+	 */
+	ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+	ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+	/* first read  priorit event fifo */
+	status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event,
+		DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+
+	if (!status) {
+		/* priority fifo is empty. Now read low prio work fifo */
+		status = kfifo_out_spinlocked(deferred_wq->work_fifo, event,
+			DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+	}
+
+	return status;
+}
+
+/*
+ *	Called when work is scheduled
+ */
+static void
+dhd_deferred_work_handler(struct work_struct *work)
+{
+	struct dhd_deferred_wq		*deferred_work = (struct dhd_deferred_wq *)work;
+	struct dhd_deferred_event_t	work_event;
+	int				status;
+
+	if (!deferred_work) {
+		DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+		return;
+	}
+
+	do {
+		status = dhd_get_scheduled_work(&work_event);
+		DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status));
+		if (!status) {
+			DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status));
+			break;
+		}
+
+		if (work_event.event > DHD_MAX_WQ_EVENTS) {
+			DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event));
+			break;
+		}
+
+		if (work_event.event_handler) {
+			work_event.event_handler(deferred_work->dhd_info,
+				work_event.event_data, work_event.event);
+		} else {
+			DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event));
+		}
+	} while (1);
+	return;
+}
diff --git a/drivers/staging/edison-bcm43340/dhd_linux_wq.h b/drivers/staging/edison-bcm43340/dhd_linux_wq.h
new file mode 100644
index 0000000..3a4ad1c
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_linux_wq.h
@@ -0,0 +1,64 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_linux_wq.h 408802 2013-06-20 19:08:47Z $
+ */
+#ifndef _dhd_linux_wq_h_
+#define _dhd_linux_wq_h_
+/*
+ *	Work event definitions
+ */
+enum _wq_event {
+	DHD_WQ_WORK_IF_ADD = 1,
+	DHD_WQ_WORK_IF_DEL,
+	DHD_WQ_WORK_SET_MAC,
+	DHD_WQ_WORK_SET_MCAST_LIST,
+	DHD_WQ_WORK_IPV6_NDO,
+	DHD_WQ_WORK_HANG_MSG,
+
+	DHD_MAX_WQ_EVENTS
+};
+
+/*
+ *	Work event priority
+ */
+#define DHD_WORK_PRIORITY_LOW	0
+#define DHD_WORK_PRIORITY_HIGH	1
+
+/*
+ *	Error definitions
+ */
+#define DHD_WQ_STS_OK			 0
+#define DHD_WQ_STS_FAILED		-1	/* General failure */
+#define DHD_WQ_STS_UNINITIALIZED	-2
+#define DHD_WQ_STS_SCHED_FAILED		-3
+#define DHD_WQ_STS_UNKNOWN_EVENT	-4
+
+typedef void (*event_handler_t)(void *handle, void *event_data, u8 event);
+
+void *dhd_deferred_work_init(void *dhd);
+void dhd_deferred_work_deinit(void *work);
+int dhd_deferred_schedule_work(void *event_data, u8 event,
+	event_handler_t evt_handler, u8 priority);
+#endif /* _dhd_linux_wq_h_ */
diff --git a/drivers/staging/edison-bcm43340/dhd_pno.c b/drivers/staging/edison-bcm43340/dhd_pno.c
new file mode 100644
index 0000000..398418b
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_pno.c
@@ -0,0 +1,1898 @@
+/*
+ * Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload and Wi-Fi Location Service(WLS) code.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_pno.c 423669 2013-09-18 13:01:55Z yangj$
+ */
+#ifdef PNO_SUPPORT
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+
+#include <proto/bcmevent.h>
+#include <dhd.h>
+#include <dhd_pno.h>
+#include <dhd_dbg.h>
+
+#ifdef __BIG_ENDIAN
+#include <bcmendian.h>
+#define htod32(i) (bcmswap32(i))
+#define htod16(i) (bcmswap16(i))
+#define dtoh32(i) (bcmswap32(i))
+#define dtoh16(i) (bcmswap16(i))
+#define htodchanspec(i) htod16(i)
+#define dtohchanspec(i) dtoh16(i)
+#else
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+#endif /* IL_BIGENDINA */
+
+#define NULL_CHECK(p, s, err)  \
+			do { \
+				if (!(p)) { \
+					printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+					err = BCME_ERROR; \
+					return err; \
+				} \
+			} while (0)
+#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state)
+#define PNO_BESTNET_LEN 1024
+#define PNO_ON 1
+#define PNO_OFF 0
+#define CHANNEL_2G_MAX 14
+#define MAX_NODE_CNT 5
+#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE)
+#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000)  \
+						- (uint32)(timestamp2/1000)))
+
+#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====")
+#define TIME_MIN_DIFF 5
+static inline bool
+is_dfs(uint16 channel)
+{
+	if (channel >= 52 && channel <= 64)			/* class 2 */
+		return TRUE;
+	else if (channel >= 100 && channel <= 140)	/* class 4 */
+		return TRUE;
+	else
+		return FALSE;
+}
+int
+dhd_pno_clean(dhd_pub_t *dhd)
+{
+	int pfn = 0;
+	int err;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	/* Disable PNO */
+	err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn(error : %d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	_pno_state->pno_status = DHD_PNO_DISABLED;
+	err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n",
+			__FUNCTION__, err));
+	}
+exit:
+	return err;
+}
+
+static int
+_dhd_pno_suspend(dhd_pub_t *dhd)
+{
+	int err;
+	int suspend = 1;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err));
+		goto exit;
+
+	}
+	_pno_state->pno_status = DHD_PNO_SUSPEND;
+exit:
+	return err;
+}
+static int
+_dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (enable & 0xfffe) {
+		DHD_ERROR(("%s invalid value\n", __FUNCTION__));
+		err = BCME_BADARG;
+		goto exit;
+	}
+	if (!dhd_support_sta_mode(dhd)) {
+		DHD_ERROR(("PNO is not allowed for non-STA mode"));
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (enable) {
+		if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
+			dhd_is_associated(dhd, NULL, NULL)) {
+			DHD_ERROR(("%s Legacy PNO mode cannot be enabled "
+				"in assoc mode , ignore it\n", __FUNCTION__));
+			err = BCME_BADOPTION;
+			goto exit;
+		}
+	}
+	/* Enable/Disable PNO */
+	err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_set\n", __FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_status = (enable)?
+		DHD_PNO_ENABLED : DHD_PNO_DISABLED;
+	if (!enable)
+		_pno_state->pno_mode = DHD_PNO_NONE_MODE;
+
+	DHD_PNO(("%s set pno as %s\n",
+		__FUNCTION__, enable ? "Enable" : "Disable"));
+exit:
+	return err;
+}
+
+static int
+_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode)
+{
+	int err = BCME_OK;
+	wl_pfn_param_t pfn_param;
+	dhd_pno_params_t *_params;
+	dhd_pno_status_info_t *_pno_state;
+	bool combined_scan = FALSE;
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	memset(&pfn_param, 0, sizeof(pfn_param));
+
+	/* set pfn parameters */
+	pfn_param.version = htod32(PFN_VERSION);
+	pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) |
+		(ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT));
+	if (mode == DHD_PNO_LEGACY_MODE) {
+		/* check and set extra pno params */
+		if ((pno_params->params_legacy.pno_repeat != 0) ||
+			(pno_params->params_legacy.pno_freq_expo_max != 0)) {
+			pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
+			pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat);
+			pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max);
+		}
+		/* set up pno scan fr */
+		if (pno_params->params_legacy.scan_fr != 0)
+			pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr);
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n"));
+			mode |= DHD_PNO_BATCH_MODE;
+			combined_scan = TRUE;
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n"));
+			mode |= DHD_PNO_HOTLIST_MODE;
+			combined_scan = TRUE;
+		}
+	}
+	if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		/* Scan frequency of 30 sec */
+		pfn_param.scan_freq = htod32(30);
+		/* slow adapt scan is off by default */
+		pfn_param.slow_freq = htod32(0);
+		/* RSSI margin of 30 dBm */
+		pfn_param.rssi_margin = htod16(30);
+		/* Network timeout 60 sec */
+		pfn_param.lost_network_timeout = htod32(60);
+		/* best n = 2 by default */
+		pfn_param.bestn = DEFAULT_BESTN;
+		/* mscan m=0 by default, so not record best networks by default */
+		pfn_param.mscan = DEFAULT_MSCAN;
+		/*  default repeat = 10 */
+		pfn_param.repeat = DEFAULT_REPEAT;
+		/* by default, maximum scan interval = 2^2
+		 * scan_freq when adaptive scan is turned on
+		 */
+		pfn_param.exp = DEFAULT_EXP;
+		if (mode == DHD_PNO_BATCH_MODE) {
+			/* In case of BATCH SCAN */
+			if (pno_params->params_batch.bestn)
+				pfn_param.bestn = pno_params->params_batch.bestn;
+			if (pno_params->params_batch.scan_fr)
+				pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr);
+			if (pno_params->params_batch.mscan)
+				pfn_param.mscan = pno_params->params_batch.mscan;
+			/* enable broadcast scan */
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		} else if (mode == DHD_PNO_HOTLIST_MODE) {
+			/* In case of HOTLIST SCAN */
+			if (pno_params->params_hotlist.scan_fr)
+				pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr);
+			pfn_param.bestn = 0;
+			pfn_param.repeat = 0;
+			/* enable broadcast scan */
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+		}
+		if (combined_scan) {
+			/* Disable Adaptive Scan */
+			pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT));
+			pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+			pfn_param.repeat = 0;
+			pfn_param.exp = 0;
+			if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+				/* In case of Legacy PNO + BATCH SCAN */
+				_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+				if (_params->params_batch.bestn)
+					pfn_param.bestn = _params->params_batch.bestn;
+				if (_params->params_batch.scan_fr)
+					pfn_param.scan_freq = htod32(_params->params_batch.scan_fr);
+				if (_params->params_batch.mscan)
+					pfn_param.mscan = _params->params_batch.mscan;
+			} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+				/* In case of Legacy PNO + HOTLIST SCAN */
+				_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+				if (_params->params_hotlist.scan_fr)
+				pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr);
+				pfn_param.bestn = 0;
+				pfn_param.repeat = 0;
+			}
+		}
+	}
+	if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) ||
+		pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) {
+		DHD_ERROR(("%s pno freq(%d sec) is not valid \n",
+			__FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+		err = BCME_BADARG;
+		goto exit;
+	}
+	if (mode == DHD_PNO_BATCH_MODE) {
+		int _tmp = pfn_param.bestn;
+		/* set bestn to calculate the max mscan which firmware supports */
+		err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__));
+			goto exit;
+		}
+		/* get max mscan which the firmware supports */
+		err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 0);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__));
+			goto exit;
+		}
+		DHD_PNO((" returned mscan : %d, set bestn : %d\n", _tmp, pfn_param.bestn));
+		pfn_param.mscan = MIN(pfn_param.mscan, _tmp);
+	}
+	err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_set\n", __FUNCTION__));
+		goto exit;
+	}
+	/* need to return mscan if this is for batch scan instead of err */
+	err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err;
+exit:
+	return err;
+}
+static int
+_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssids_list, int nssid)
+{
+	int err = BCME_OK;
+	int i = 0;
+	wl_pfn_t pfn_element;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nssid) {
+		NULL_CHECK(ssids_list, "ssid list is NULL", err);
+	}
+	memset(&pfn_element, 0, sizeof(pfn_element));
+	{
+		int j;
+		for (j = 0; j < nssid; j++) {
+			DHD_PNO(("%d: scan  for  %s size = %d\n", j,
+				ssids_list[j].SSID, ssids_list[j].SSID_len));
+		}
+	}
+	/* Check for broadcast ssid */
+	for (i = 0; i < nssid; i++) {
+		if (!ssids_list[i].SSID_len) {
+			DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", i));
+			err = BCME_ERROR;
+			goto exit;
+		}
+	}
+	/* set all pfn ssid */
+	for (i = 0; i < nssid; i++) {
+		pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
+		pfn_element.auth = (DOT11_OPEN_SYSTEM);
+		pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY);
+		pfn_element.wsec = htod32(0);
+		pfn_element.infra = htod32(1);
+		pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+		memcpy((char *)pfn_element.ssid.SSID, ssids_list[i].SSID,
+			ssids_list[i].SSID_len);
+		pfn_element.ssid.SSID_len = ssids_list[i].SSID_len;
+		err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_element,
+			sizeof(pfn_element), 1);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
+			goto exit;
+		}
+	}
+exit:
+	return err;
+}
+/* qsort compare function */
+static int
+_dhd_pno_cmpfunc(const void *a, const void *b)
+{
+	return (*(uint16*)a - *(uint16*)b);
+}
+static int
+_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan,
+	uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2)
+{
+	int err = BCME_OK;
+	int i = 0, j = 0, k = 0;
+	uint16 tmp;
+	NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+	NULL_CHECK(nchan, "nchan is NULL", err);
+	NULL_CHECK(chan_list1, "chan_list1 is NULL", err);
+	NULL_CHECK(chan_list2, "chan_list2 is NULL", err);
+	/* chan_list1 and chan_list2 should be sorted at first */
+	while (i < nchan1 && j < nchan2) {
+		tmp = chan_list1[i] < chan_list2[j]?
+			chan_list1[i++] : chan_list2[j++];
+		for (; i < nchan1 && chan_list1[i] == tmp; i++);
+		for (; j < nchan2 && chan_list2[j] == tmp; j++);
+		d_chan_list[k++] = tmp;
+	}
+
+	while (i < nchan1) {
+		tmp = chan_list1[i++];
+		for (; i < nchan1 && chan_list1[i] == tmp; i++);
+		d_chan_list[k++] = tmp;
+	}
+
+	while (j < nchan2) {
+		tmp = chan_list2[j++];
+		for (; j < nchan2 && chan_list2[j] == tmp; j++);
+		d_chan_list[k++] = tmp;
+
+	}
+	*nchan = k;
+	return err;
+}
+static int
+_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list,
+	int *nchan, uint8 band, bool skip_dfs)
+{
+	int err = BCME_OK;
+	int i, j;
+	uint32 chan_buf[WL_NUMCHANNELS + 1];
+	wl_uint32_list_t *list;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (*nchan) {
+		NULL_CHECK(d_chan_list, "d_chan_list is NULL", err);
+	}
+	list = (wl_uint32_list_t *) (void *)chan_buf;
+	list->count = htod32(WL_NUMCHANNELS);
+	err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0);
+	if (err < 0) {
+		DHD_ERROR(("failed to get channel list (err: %d)\n", err));
+		goto exit;
+	}
+	for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) {
+		if (band == WLC_BAND_2G) {
+			if (dtoh32(list->element[i]) > CHANNEL_2G_MAX)
+				continue;
+		} else if (band == WLC_BAND_5G) {
+			if (dtoh32(list->element[i]) <= CHANNEL_2G_MAX)
+				continue;
+			if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+				continue;
+
+		} else { /* All channels */
+			if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+				continue;
+		}
+		d_chan_list[j++] = dtoh32(list->element[i]);
+	}
+	*nchan = j;
+exit:
+	return err;
+}
+static int
+_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch,
+	char *buf, int nbufsize)
+{
+	int err = BCME_OK;
+	int bytes_written = 0, nreadsize = 0;
+	int t_delta = 0;
+	int nleftsize = nbufsize;
+	uint8 cnt = 0;
+	char *bp = buf;
+	char eabuf[ETHER_ADDR_STR_LEN];
+#ifdef PNO_DEBUG
+	char *_base_bp;
+	char msg[150];
+#endif
+	dhd_pno_bestnet_entry_t *iter, *next;
+	dhd_pno_scan_results_t *siter, *snext;
+	dhd_pno_best_header_t *phead, *pprev;
+	NULL_CHECK(params_batch, "params_batch is NULL", err);
+	if (nbufsize > 0)
+		NULL_CHECK(buf, "buf is NULL", err);
+	/* initialize the buffer */
+	memset(buf, 0, nbufsize);
+	DHD_PNO(("%s enter \n", __FUNCTION__));
+	/* # of scans */
+	if (!params_batch->get_batch.batch_started) {
+		bp += nreadsize = sprintf(bp, "scancount=%d\n",
+			params_batch->get_batch.expired_tot_scan_cnt);
+		nleftsize -= nreadsize;
+		params_batch->get_batch.batch_started = TRUE;
+	}
+	DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt));
+	/* preestimate scan count until which scan result this report is going to end */
+	list_for_each_entry_safe(siter, snext,
+		&params_batch->get_batch.expired_scan_results_list, list) {
+		phead = siter->bestnetheader;
+		while (phead != NULL) {
+			/* if left_size is less than bestheader total size , stop this */
+			if (nleftsize <=
+				(phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD))
+				goto exit;
+			/* increase scan count */
+			cnt++;
+			/* # best of each scan */
+			DHD_PNO(("\n<loop : %d, apcount %d>\n", cnt - 1, phead->tot_cnt));
+			/* attribute of the scan */
+			if (phead->reason & PNO_STATUS_ABORT_MASK) {
+				bp += nreadsize = sprintf(bp, "trunc\n");
+				nleftsize -= nreadsize;
+			}
+			list_for_each_entry_safe(iter, next,
+				&phead->entry_list, list) {
+				t_delta = jiffies_to_msecs(jiffies - iter->recorded_time);
+#ifdef PNO_DEBUG
+				_base_bp = bp;
+				memset(msg, 0, sizeof(msg));
+#endif
+				/* BSSID info */
+				bp += nreadsize = sprintf(bp, "bssid=%s\n",
+				bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf));
+				nleftsize -= nreadsize;
+				/* SSID */
+				bp += nreadsize = sprintf(bp, "ssid=%s\n", iter->SSID);
+				nleftsize -= nreadsize;
+				/* channel */
+				bp += nreadsize = sprintf(bp, "freq=%d\n",
+				wf_channel2mhz(iter->channel,
+				iter->channel <= CH_MAX_2G_CHANNEL?
+				WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+				nleftsize -= nreadsize;
+				/* RSSI */
+				bp += nreadsize = sprintf(bp, "level=%d\n", iter->RSSI);
+				nleftsize -= nreadsize;
+				/* add the time consumed in Driver to the timestamp of firmware */
+				iter->timestamp += t_delta;
+				bp += nreadsize = sprintf(bp, "age=%d\n", iter->timestamp);
+				nleftsize -= nreadsize;
+				/* RTT0 */
+				bp += nreadsize = sprintf(bp, "dist=%d\n",
+				(iter->rtt0 == 0)? -1 : iter->rtt0);
+				nleftsize -= nreadsize;
+				/* RTT1 */
+				bp += nreadsize = sprintf(bp, "distSd=%d\n",
+				(iter->rtt0 == 0)? -1 : iter->rtt1);
+				nleftsize -= nreadsize;
+				bp += nreadsize = sprintf(bp, "%s", AP_END_MARKER);
+				nleftsize -= nreadsize;
+				list_del(&iter->list);
+				MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+#ifdef PNO_DEBUG
+				memcpy(msg, _base_bp, bp - _base_bp);
+				DHD_PNO(("Entry : \n%s", msg));
+#endif
+			}
+			bp += nreadsize = sprintf(bp, "%s", SCAN_END_MARKER);
+			DHD_PNO(("%s", SCAN_END_MARKER));
+			nleftsize -= nreadsize;
+			pprev = phead;
+			/* reset the header */
+			siter->bestnetheader = phead = phead->next;
+			MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+
+			siter->cnt_header--;
+		}
+		if (phead == NULL) {
+			/* we store all entry in this scan , so it is ok to delete */
+			list_del(&siter->list);
+			MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+		}
+	}
+exit:
+	if (cnt < params_batch->get_batch.expired_tot_scan_cnt) {
+		DHD_ERROR(("Buffer size is small to save all batch entry,"
+			" cnt : %d (remained_scan_cnt): %d\n",
+			cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt));
+	}
+	params_batch->get_batch.expired_tot_scan_cnt -= cnt;
+	/* set FALSE only if the link list  is empty after returning the data */
+	if (list_empty(&params_batch->get_batch.expired_scan_results_list)) {
+		params_batch->get_batch.batch_started = FALSE;
+		bp += sprintf(bp, "%s", RESULTS_END_MARKER);
+		DHD_PNO(("%s", RESULTS_END_MARKER));
+		DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__));
+	}
+	/* return used memory in buffer */
+	bytes_written = (int32)(bp - buf);
+	return bytes_written;
+}
+static int
+_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last)
+{
+	int err = BCME_OK;
+	int removed_scan_cnt = 0;
+	dhd_pno_scan_results_t *siter, *snext;
+	dhd_pno_best_header_t *phead, *pprev;
+	dhd_pno_bestnet_entry_t *iter, *next;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(head, "head is NULL", err);
+	NULL_CHECK(head->next, "head->next is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	list_for_each_entry_safe(siter, snext,
+		head, list) {
+		if (only_last) {
+			/* in case that we need to delete only last one */
+			if (!list_is_last(&siter->list, head)) {
+				/* skip if the one is not last */
+				continue;
+			}
+		}
+		/* delete all data belong if the one is last */
+		phead = siter->bestnetheader;
+		while (phead != NULL) {
+			removed_scan_cnt++;
+			list_for_each_entry_safe(iter, next,
+			&phead->entry_list, list) {
+				list_del(&iter->list);
+				MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE);
+			}
+			pprev = phead;
+			phead = phead->next;
+			MFREE(dhd->osh, pprev, BEST_HEADER_SIZE);
+		}
+		if (phead == NULL) {
+			/* it is ok to delete top node */
+			list_del(&siter->list);
+			MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
+		}
+	}
+	return removed_scan_cnt;
+}
+
+static int
+_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan)
+{
+	int err = BCME_OK;
+	int i = 0;
+	wl_pfn_cfg_t pfncfg_param;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nchan) {
+		NULL_CHECK(channel_list, "nchan is NULL", err);
+	}
+	DHD_PNO(("%s enter :  nchan : %d\n", __FUNCTION__, nchan));
+	memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t));
+	/* Setup default values */
+	pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET);
+	pfncfg_param.channel_num = htod32(0);
+
+	for (i = 0; i < nchan && nchan < WL_NUMCHANNELS; i++)
+		pfncfg_param.channel_list[i] = channel_list[i];
+
+	pfncfg_param.channel_num = htod32(nchan);
+	err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+static int
+_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL\n", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	mutex_lock(&_pno_state->pno_mutex);
+	switch (mode) {
+	case DHD_PNO_LEGACY_MODE: {
+		struct dhd_pno_ssid *iter, *next;
+		if (params->params_legacy.nssid > 0) {
+			list_for_each_entry_safe(iter, next,
+				&params->params_legacy.ssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+		params->params_legacy.nssid = 0;
+		params->params_legacy.scan_fr = 0;
+		params->params_legacy.pno_freq_expo_max = 0;
+		params->params_legacy.pno_repeat = 0;
+		params->params_legacy.nchan = 0;
+		memset(params->params_legacy.chan_list, 0,
+			sizeof(params->params_legacy.chan_list));
+		break;
+	}
+	case DHD_PNO_BATCH_MODE: {
+		params->params_batch.scan_fr = 0;
+		params->params_batch.mscan = 0;
+		params->params_batch.nchan = 0;
+		params->params_batch.rtt = 0;
+		params->params_batch.bestn = 0;
+		params->params_batch.nchan = 0;
+		params->params_batch.band = WLC_BAND_AUTO;
+		memset(params->params_batch.chan_list, 0,
+			sizeof(params->params_batch.chan_list));
+		params->params_batch.get_batch.batch_started = FALSE;
+		params->params_batch.get_batch.buf = NULL;
+		params->params_batch.get_batch.bufsize = 0;
+		params->params_batch.get_batch.reason = 0;
+		_dhd_pno_clear_all_batch_results(dhd,
+			&params->params_batch.get_batch.scan_results_list, FALSE);
+		_dhd_pno_clear_all_batch_results(dhd,
+			&params->params_batch.get_batch.expired_scan_results_list, FALSE);
+		params->params_batch.get_batch.tot_scan_cnt = 0;
+		params->params_batch.get_batch.expired_tot_scan_cnt = 0;
+		params->params_batch.get_batch.top_node_cnt = 0;
+		INIT_LIST_HEAD(&params->params_batch.get_batch.scan_results_list);
+		INIT_LIST_HEAD(&params->params_batch.get_batch.expired_scan_results_list);
+		break;
+	}
+	case DHD_PNO_HOTLIST_MODE: {
+		struct dhd_pno_bssid *iter, *next;
+		if (params->params_hotlist.nbssid > 0) {
+			list_for_each_entry_safe(iter, next,
+				&params->params_hotlist.bssid_list, list) {
+				list_del(&iter->list);
+				kfree(iter);
+			}
+		}
+		params->params_hotlist.scan_fr = 0;
+		params->params_hotlist.nbssid = 0;
+		params->params_hotlist.nchan = 0;
+		params->params_batch.band = WLC_BAND_AUTO;
+		memset(params->params_hotlist.chan_list, 0,
+			sizeof(params->params_hotlist.chan_list));
+		break;
+	}
+	default:
+		DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode));
+		break;
+	}
+	mutex_unlock(&_pno_state->pno_mutex);
+	return err;
+}
+static int
+_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	if (nbssid) {
+		NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err);
+	}
+	err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)&p_pfn_bssid,
+		sizeof(wl_pfn_bssid_t) * nbssid, 1);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
+		goto exit;
+	}
+exit:
+	return err;
+}
+int
+dhd_pno_stop_for_ssid(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 mode = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wl_pfn_bssid_t *p_pfn_bssid = NULL;
+	NULL_CHECK(dhd, "dev is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) {
+		DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+	/* restart Batch mode  if the batch mode is on */
+	if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* save current pno_mode before calling dhd_pno_clean */
+		mode = _pno_state->pno_mode;
+		dhd_pno_clean(dhd);
+		/* restore previous pno_mode */
+		_pno_state->pno_mode = mode;
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			/* restart BATCH SCAN */
+			err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+				DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			/* restart HOTLIST SCAN */
+			struct dhd_pno_bssid *iter, *next;
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+			_params->params_hotlist.nbssid, GFP_KERNEL);
+			if (p_pfn_bssid == NULL) {
+				DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+				" (count: %d)",
+					__FUNCTION__, _params->params_hotlist.nbssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				goto exit;
+			}
+			/* convert dhd_pno_bssid to wl_pfn_bssid */
+			list_for_each_entry_safe(iter, next,
+			&_params->params_hotlist.bssid_list, list) {
+				memcpy(&p_pfn_bssid->macaddr,
+				&iter->macaddr, ETHER_ADDR_LEN);
+				p_pfn_bssid->flags = iter->flags;
+				p_pfn_bssid++;
+			}
+			err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	if (p_pfn_bssid)
+		kfree(p_pfn_bssid);
+	return err;
+}
+
+int
+dhd_pno_enable(dhd_pub_t *dhd, int enable)
+{
+	int err = BCME_OK;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	return (_dhd_pno_enable(dhd, enable));
+}
+
+int
+dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssid_list, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
+{
+	struct dhd_pno_ssid *_pno_ssid;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	dhd_pno_status_info_t *_pno_state;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int32 tot_nchan = 0;
+	int err = BCME_OK;
+	int i;
+	int mode = 0;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	DHD_PNO(("%s enter : scan_fr :%d, pno_repeat :%d,"
+			"pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__,
+			scan_fr, pno_repeat, pno_freq_expo_max, nchan));
+
+	_params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		DHD_ERROR(("%s : Legacy PNO mode was already started, "
+			"will disable previous one to start new one\n", __FUNCTION__));
+		err = dhd_pno_stop_for_ssid(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	_pno_state->pno_mode |= DHD_PNO_LEGACY_MODE;
+	err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	memset(_chan_list, 0, sizeof(_chan_list));
+	tot_nchan = nchan;
+	if (tot_nchan > 0 && channel_list) {
+		for (i = 0; i < nchan; i++)
+		_params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i];
+	}
+	if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
+		DHD_PNO(("BATCH SCAN is on progress in firmware\n"));
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = _dhd_pno_enable(dhd, PNO_OFF);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			goto exit;
+		}
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+		/* use superset of channel list between two mode */
+		if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			if (_params2->params_batch.nchan > 0 && nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_batch.chan_list[0],
+					_params2->params_batch.nchan,
+					&channel_list[0], nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+					" between legacy and batch\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}  else {
+				DHD_PNO(("superset channel will use"
+				" all channels in firmware\n"));
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			if (_params2->params_hotlist.nchan > 0 && nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_hotlist.chan_list[0],
+					_params2->params_hotlist.nchan,
+					&channel_list[0], nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+					" between legacy and hotlist\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}
+		}
+	}
+	_params->params_legacy.scan_fr = scan_fr;
+	_params->params_legacy.pno_repeat = pno_repeat;
+	_params->params_legacy.pno_freq_expo_max = pno_freq_expo_max;
+	_params->params_legacy.nchan = nchan;
+	_params->params_legacy.nssid = nssid;
+	INIT_LIST_HEAD(&_params->params_legacy.ssid_list);
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) {
+		DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
+		goto exit;
+	}
+	if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) {
+		DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid));
+		goto exit;
+	}
+	for (i = 0; i < nssid; i++) {
+		_pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL);
+		if (_pno_ssid == NULL) {
+			DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n",
+				__FUNCTION__));
+			goto exit;
+		}
+		_pno_ssid->SSID_len = ssid_list[i].SSID_len;
+		memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len);
+		list_add_tail(&_pno_ssid->list, &_params->params_legacy.ssid_list);
+
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+	return err;
+}
+int
+dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params)
+{
+	int err = BCME_OK;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int rem_nchan = 0, tot_nchan = 0;
+	int mode = 0, mscan = 0;
+	int i = 0;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	dhd_pno_status_info_t *_pno_state;
+	wlc_ssid_t *p_ssid_list = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(batch_params, "batch_params is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		_pno_state->pno_mode |= DHD_PNO_BATCH_MODE;
+		err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+				__FUNCTION__));
+			goto exit;
+		}
+	} else {
+		/* batch mode is already started */
+		return -EBUSY;
+	}
+	_params->params_batch.scan_fr = batch_params->scan_fr;
+	_params->params_batch.bestn = batch_params->bestn;
+	_params->params_batch.mscan = (batch_params->mscan)?
+		batch_params->mscan : DEFAULT_BATCH_MSCAN;
+	_params->params_batch.nchan = batch_params->nchan;
+	memcpy(_params->params_batch.chan_list, batch_params->chan_list,
+		sizeof(_params->params_batch.chan_list));
+
+	memset(_chan_list, 0, sizeof(_chan_list));
+
+	rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan;
+	if (batch_params->band == WLC_BAND_2G || batch_params->band == WLC_BAND_5G) {
+		/* get a valid channel list based on band B or A */
+		err = _dhd_pno_get_channels(dhd,
+		&_params->params_batch.chan_list[batch_params->nchan],
+		&rem_nchan, batch_params->band, FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+				__FUNCTION__, batch_params->band));
+			goto exit;
+		}
+		/* now we need to update nchan because rem_chan has valid channel count */
+		_params->params_batch.nchan += rem_nchan;
+		/* need to sort channel list */
+		sort(_params->params_batch.chan_list, _params->params_batch.nchan,
+			sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+	}
+#ifdef PNO_DEBUG
+{
+		DHD_PNO(("Channel list : "));
+		for (i = 0; i < _params->params_batch.nchan; i++) {
+			DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+		}
+		DHD_PNO(("\n"));
+}
+#endif
+	if (_params->params_batch.nchan) {
+		/* copy the channel list into local array */
+		memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list));
+		tot_nchan = _params->params_batch.nchan;
+	}
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		struct dhd_pno_ssid *iter, *next;
+		DHD_PNO(("PNO SSID is on progress in firmware\n"));
+		/* store current pno_mode before disabling pno */
+		mode = _pno_state->pno_mode;
+		err = _dhd_pno_enable(dhd, PNO_OFF);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+			goto exit;
+		}
+		/* restore the previous mode */
+		_pno_state->pno_mode = mode;
+		/* Use the superset for channelist between two mode */
+		_params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+		if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) {
+			err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+				&_params2->params_legacy.chan_list[0],
+				_params2->params_legacy.nchan,
+				&_params->params_batch.chan_list[0], _params->params_batch.nchan);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to merge channel list"
+				" between legacy and batch\n",
+					__FUNCTION__));
+				goto exit;
+			}
+		} else {
+			DHD_PNO(("superset channel will use all channels in firmware\n"));
+		}
+		p_ssid_list = kzalloc(sizeof(wlc_ssid_t) *
+							_params2->params_legacy.nssid, GFP_KERNEL);
+		if (p_ssid_list == NULL) {
+			DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)",
+				__FUNCTION__, _params2->params_legacy.nssid));
+			err = BCME_ERROR;
+			_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+			goto exit;
+		}
+		i = 0;
+		/* convert dhd_pno_ssid to dhd_pno_ssid */
+		list_for_each_entry_safe(iter, next, &_params2->params_legacy.ssid_list, list) {
+			p_ssid_list[i].SSID_len = iter->SSID_len;
+			memcpy(p_ssid_list->SSID, iter->SSID, p_ssid_list[i].SSID_len);
+			i++;
+		}
+		if ((err = _dhd_pno_add_ssid(dhd, p_ssid_list,
+			_params2->params_legacy.nssid)) < 0) {
+			DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+			goto exit;
+		}
+	}
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	} else {
+		/* we need to return mscan */
+		mscan = err;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+	else {
+		/* return #max scan firmware can do */
+		err = mscan;
+	}
+	if (p_ssid_list)
+		kfree(p_ssid_list);
+	return err;
+}
+
+static int
+_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+	int err = BCME_OK;
+	int i, j;
+	uint32 timestamp = 0;
+	dhd_pno_params_t *_params = NULL;
+	dhd_pno_status_info_t *_pno_state = NULL;
+	wl_pfn_lscanresults_t *plbestnet = NULL;
+	wl_pfn_lnet_info_t *plnetinfo;
+	dhd_pno_bestnet_entry_t *pbestnet_entry;
+	dhd_pno_best_header_t *pbestnetheader = NULL;
+	dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext;
+	bool allocate_header = FALSE;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	mutex_lock(&_pno_state->pno_mutex);
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	if (buf && bufsize) {
+		if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) {
+			/* need to check whether we have cashed data or not */
+			DHD_PNO(("%s: have cashed batching data in Driver\n",
+				__FUNCTION__));
+			/* convert to results format */
+			goto convert_format;
+		} else {
+			/* this is a first try to get batching results */
+			if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+				/* move the scan_results_list to expired_scan_results_lists */
+				list_for_each_entry_safe(siter, snext,
+					&_params->params_batch.get_batch.scan_results_list, list) {
+					list_move_tail(&siter->list,
+					&_params->params_batch.get_batch.expired_scan_results_list);
+				}
+				_params->params_batch.get_batch.top_node_cnt = 0;
+				_params->params_batch.get_batch.expired_tot_scan_cnt =
+					_params->params_batch.get_batch.tot_scan_cnt;
+				_params->params_batch.get_batch.tot_scan_cnt = 0;
+				goto convert_format;
+			}
+		}
+	}
+	/* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */
+	pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE);
+	if (pscan_results == NULL) {
+		err = BCME_NOMEM;
+		DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n"));
+		goto exit;
+	}
+	pscan_results->bestnetheader = NULL;
+	pscan_results->cnt_header = 0;
+	/* add the element into list unless total node cnt is less than MAX_NODE_ CNT */
+	if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) {
+		list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+		_params->params_batch.get_batch.top_node_cnt++;
+	} else {
+		int _removed_scan_cnt;
+		/* remove oldest one and add new one */
+		DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__));
+		_removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd,
+			&_params->params_batch.get_batch.scan_results_list, TRUE);
+		_params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt;
+		list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
+
+	}
+	plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+	NULL_CHECK(plbestnet, "failed to allocate buffer for bestnet", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	memset(plbestnet, 0, PNO_BESTNET_LEN);
+	while (plbestnet->status != PFN_COMPLETE) {
+		memset(plbestnet, 0, PNO_BESTNET_LEN);
+		err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0);
+		if (err < 0) {
+			if (err == BCME_EPERM) {
+				DHD_ERROR(("we cannot get the batching data "
+					"during scanning in firmware, try again\n,"));
+				msleep(500);
+				continue;
+			} else {
+				DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+		DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
+			plbestnet->status, plbestnet->count));
+		if (plbestnet->version != PFN_SCANRESULT_VERSION) {
+			err = BCME_VERSION;
+			DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
+				plbestnet->version, PFN_SCANRESULT_VERSION));
+			goto exit;
+		}
+		plnetinfo = plbestnet->netinfo;
+		for (i = 0; i < plbestnet->count; i++) {
+			pbestnet_entry = (dhd_pno_bestnet_entry_t *)
+			MALLOC(dhd->osh, BESTNET_ENTRY_SIZE);
+			if (pbestnet_entry == NULL) {
+				err = BCME_NOMEM;
+				DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+				goto exit;
+			}
+			memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
+			pbestnet_entry->recorded_time = jiffies; /* record the current time */
+			/* create header for the first entry */
+			allocate_header = (i == 0)? TRUE : FALSE;
+			/* check whether the new generation is started or not */
+			if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp)
+				> TIME_MIN_DIFF))
+				allocate_header = TRUE;
+			timestamp = plnetinfo->timestamp;
+			if (allocate_header) {
+				pbestnetheader = (dhd_pno_best_header_t *)
+				MALLOC(dhd->osh, BEST_HEADER_SIZE);
+				if (pbestnetheader == NULL) {
+					err = BCME_NOMEM;
+					if (pbestnet_entry)
+						MFREE(dhd->osh, pbestnet_entry,
+						BESTNET_ENTRY_SIZE);
+					DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
+					goto exit;
+				}
+				/* increase total cnt of bestnet header */
+				pscan_results->cnt_header++;
+				/* need to record the reason to call dhd_pno_get_for_bach */
+				if (reason)
+					pbestnetheader->reason = (ENABLE << reason);
+				memset(pbestnetheader, 0, BEST_HEADER_SIZE);
+				/* initialize the head of linked list */
+				INIT_LIST_HEAD(&(pbestnetheader->entry_list));
+				/* link the pbestnet heaer into existed list */
+				if (pscan_results->bestnetheader == NULL)
+					/* In case of header */
+					pscan_results->bestnetheader = pbestnetheader;
+				else {
+					dhd_pno_best_header_t *head = pscan_results->bestnetheader;
+					pscan_results->bestnetheader = pbestnetheader;
+					pbestnetheader->next = head;
+				}
+			}
+			/* fills the best network info */
+			pbestnet_entry->channel = plnetinfo->pfnsubnet.channel;
+			pbestnet_entry->RSSI = plnetinfo->RSSI;
+			if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+				/* if RSSI is positive value, we assume that
+				 * this scan is aborted by other scan
+				 */
+				DHD_PNO(("This scan is aborted\n"));
+				pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
+			}
+			pbestnet_entry->rtt0 = plnetinfo->rtt0;
+			pbestnet_entry->rtt1 = plnetinfo->rtt1;
+			pbestnet_entry->timestamp = plnetinfo->timestamp;
+			pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len;
+			memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID,
+				pbestnet_entry->SSID_len);
+			memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+			/* add the element into list */
+			list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list);
+			/* increase best entry count */
+			pbestnetheader->tot_cnt++;
+			pbestnetheader->tot_size += BESTNET_ENTRY_SIZE;
+			DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1));
+			DHD_PNO(("\tSSID : "));
+			for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++)
+				DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j]));
+			DHD_PNO(("\n"));
+			DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
+				plnetinfo->pfnsubnet.BSSID.octet[0],
+				plnetinfo->pfnsubnet.BSSID.octet[1],
+				plnetinfo->pfnsubnet.BSSID.octet[2],
+				plnetinfo->pfnsubnet.BSSID.octet[3],
+				plnetinfo->pfnsubnet.BSSID.octet[4],
+				plnetinfo->pfnsubnet.BSSID.octet[5]));
+			DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+				plnetinfo->pfnsubnet.channel,
+				plnetinfo->RSSI, plnetinfo->timestamp));
+			DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, plnetinfo->rtt1));
+			plnetinfo++;
+		}
+	}
+	if (pscan_results->cnt_header == 0) {
+		/* In case that we didn't get any data from the firmware
+		 * Remove the current scan_result list from get_bach.scan_results_list.
+		 */
+		DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n"));
+		list_del(&pscan_results->list);
+		MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE);
+		_params->params_batch.get_batch.top_node_cnt--;
+	}
+	/* increase total scan count using current scan count */
+	_params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header;
+
+	if (buf && bufsize) {
+		/* This is a first try to get batching results */
+		if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
+			/* move the scan_results_list to expired_scan_results_lists */
+			list_for_each_entry_safe(siter, snext,
+				&_params->params_batch.get_batch.scan_results_list, list) {
+				list_move_tail(&siter->list,
+					&_params->params_batch.get_batch.expired_scan_results_list);
+			}
+			/* reset gloval values after  moving to expired list */
+			_params->params_batch.get_batch.top_node_cnt = 0;
+			_params->params_batch.get_batch.expired_tot_scan_cnt =
+				_params->params_batch.get_batch.tot_scan_cnt;
+			_params->params_batch.get_batch.tot_scan_cnt = 0;
+		}
+convert_format:
+		err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize);
+		if (err < 0) {
+			DHD_ERROR(("failed to convert the data into upper layer format\n"));
+			goto exit;
+		}
+	}
+exit:
+	if (plbestnet)
+		MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
+	if (_params) {
+		_params->params_batch.get_batch.buf = NULL;
+		_params->params_batch.get_batch.bufsize = 0;
+		_params->params_batch.get_batch.bytes_written = err;
+	}
+	mutex_unlock(&_pno_state->pno_mutex);
+	if (waitqueue_active(&_pno_state->get_batch_done.wait))
+		complete(&_pno_state->get_batch_done);
+	return err;
+}
+static void
+_dhd_pno_get_batch_handler(struct work_struct *work)
+{
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pub_t *dhd;
+	struct dhd_pno_batch_params *params_batch;
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = container_of(work, struct dhd_pno_status_info, work);
+	dhd = _pno_state->dhd;
+	if (dhd == NULL) {
+		DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+		return;
+	}
+	params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+	_dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf,
+		params_batch->get_batch.bufsize, params_batch->get_batch.reason);
+
+}
+
+int
+dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+	int err = BCME_OK;
+	char *pbuf = buf;
+	dhd_pno_status_info_t *_pno_state;
+	struct dhd_pno_batch_params *params_batch;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+		memset(pbuf, 0, bufsize);
+		pbuf += sprintf(pbuf, "scancount=%d\n", 0);
+		sprintf(pbuf, "%s", RESULTS_END_MARKER);
+		err = strlen(buf);
+		goto exit;
+	}
+	params_batch->get_batch.buf = buf;
+	params_batch->get_batch.bufsize = bufsize;
+	params_batch->get_batch.reason = reason;
+	params_batch->get_batch.bytes_written = 0;
+	schedule_work(&_pno_state->work);
+	wait_for_completion(&_pno_state->get_batch_done);
+	err = params_batch->get_batch.bytes_written;
+exit:
+	return err;
+}
+
+int
+dhd_pno_stop_for_batch(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	int mode = 0;
+	int i = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wl_pfn_bssid_t *p_pfn_bssid = NULL;
+	wlc_ssid_t *p_ssid_list = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+		DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+	if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) {
+		mode = _pno_state->pno_mode;
+		dhd_pno_clean(dhd);
+		_pno_state->pno_mode = mode;
+		/* restart Legacy PNO if the Legacy PNO is on */
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			struct dhd_pno_legacy_params *_params_legacy;
+			struct dhd_pno_ssid *iter, *next;
+			_params_legacy =
+				&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+			p_ssid_list = kzalloc(sizeof(wlc_ssid_t) *
+				_params_legacy->nssid, GFP_KERNEL);
+			if (p_ssid_list == NULL) {
+				DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)",
+					__FUNCTION__, _params_legacy->nssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+				goto exit;
+			}
+			i = 0;
+			/* convert dhd_pno_ssid to dhd_pno_ssid */
+			list_for_each_entry_safe(iter, next, &_params_legacy->ssid_list, list) {
+				p_ssid_list[i].SSID_len = iter->SSID_len;
+				memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len);
+				i++;
+			}
+			err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid,
+				_params_legacy->scan_fr, _params_legacy->pno_repeat,
+				_params_legacy->pno_freq_expo_max, _params_legacy->chan_list,
+				_params_legacy->nchan);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+				DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
+			struct dhd_pno_bssid *iter, *next;
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
+			p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+				_params->params_hotlist.nbssid, GFP_KERNEL);
+			if (p_pfn_bssid == NULL) {
+				DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+					" (count: %d)",
+					__FUNCTION__, _params->params_hotlist.nbssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				goto exit;
+			}
+			i = 0;
+			/* convert dhd_pno_bssid to wl_pfn_bssid */
+			list_for_each_entry_safe(iter, next,
+				&_params->params_hotlist.bssid_list, list) {
+				memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN);
+				p_pfn_bssid[i].flags = iter->flags;
+				i++;
+			}
+			err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+				DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+	_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+	if (p_ssid_list)
+		kfree(p_ssid_list);
+	if (p_pfn_bssid)
+		kfree(p_pfn_bssid);
+	return err;
+}
+
+int
+dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params)
+{
+	int err = BCME_OK;
+	int i;
+	uint16 _chan_list[WL_NUMCHANNELS];
+	int rem_nchan = 0;
+	int tot_nchan = 0;
+	int mode = 0;
+	dhd_pno_params_t *_params;
+	dhd_pno_params_t *_params2;
+	struct dhd_pno_bssid *_pno_bssid;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	NULL_CHECK(hotlist_params, "hotlist_params is NULL", err);
+	NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+
+	if (!dhd_support_sta_mode(dhd)) {
+		err = BCME_BADOPTION;
+		goto exit;
+	}
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	_params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS];
+	if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+		_pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE;
+		err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n",
+				__FUNCTION__));
+			goto exit;
+		}
+	}
+	_params->params_batch.nchan = hotlist_params->nchan;
+	_params->params_batch.scan_fr = hotlist_params->scan_fr;
+	if (hotlist_params->nchan)
+		memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list,
+			sizeof(_params->params_hotlist.chan_list));
+	memset(_chan_list, 0, sizeof(_chan_list));
+
+	rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan;
+	if (hotlist_params->band == WLC_BAND_2G || hotlist_params->band == WLC_BAND_5G) {
+		/* get a valid channel list based on band B or A */
+		err = _dhd_pno_get_channels(dhd,
+		&_params->params_hotlist.chan_list[hotlist_params->nchan],
+		&rem_nchan, hotlist_params->band, FALSE);
+		if (err < 0) {
+			DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+				__FUNCTION__, hotlist_params->band));
+			goto exit;
+		}
+		/* now we need to update nchan because rem_chan has valid channel count */
+		_params->params_hotlist.nchan += rem_nchan;
+		/* need to sort channel list */
+		sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan,
+			sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL);
+	}
+#ifdef PNO_DEBUG
+{
+		int i;
+		DHD_PNO(("Channel list : "));
+		for (i = 0; i < _params->params_batch.nchan; i++) {
+			DHD_PNO(("%d ", _params->params_batch.chan_list[i]));
+		}
+		DHD_PNO(("\n"));
+}
+#endif
+	if (_params->params_hotlist.nchan) {
+		/* copy the channel list into local array */
+		memcpy(_chan_list, _params->params_hotlist.chan_list,
+			sizeof(_chan_list));
+		tot_nchan = _params->params_hotlist.nchan;
+	}
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			DHD_PNO(("PNO SSID is on progress in firmware\n"));
+			/* store current pno_mode before disabling pno */
+			mode = _pno_state->pno_mode;
+			err = _dhd_pno_enable(dhd, PNO_OFF);
+			if (err < 0) {
+				DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+				goto exit;
+			}
+			/* restore the previous mode */
+			_pno_state->pno_mode = mode;
+			/* Use the superset for channelist between two mode */
+			_params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+			if (_params2->params_legacy.nchan > 0 &&
+				_params->params_hotlist.nchan > 0) {
+				err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
+					&_params2->params_legacy.chan_list[0],
+					_params2->params_legacy.nchan,
+					&_params->params_hotlist.chan_list[0],
+					_params->params_hotlist.nchan);
+				if (err < 0) {
+					DHD_ERROR(("%s : failed to merge channel list"
+						"between legacy and hotlist\n",
+						__FUNCTION__));
+					goto exit;
+				}
+			}
+
+	}
+
+	INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list));
+
+	err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid);
+	if (err < 0) {
+		DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) {
+		DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n",
+			__FUNCTION__, err));
+		goto exit;
+	}
+	if (tot_nchan > 0) {
+		if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+			DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+	for (i = 0; i < hotlist_params->nbssid; i++) {
+		_pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL);
+		NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err);
+		memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN);
+		_pno_bssid->flags = p_pfn_bssid[i].flags;
+		list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list);
+	}
+	_params->params_hotlist.nbssid = hotlist_params->nbssid;
+	if (_pno_state->pno_status == DHD_PNO_DISABLED) {
+		if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0)
+			DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
+	}
+exit:
+	/* clear mode in case of error */
+	if (err < 0)
+		_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+	return err;
+}
+
+int
+dhd_pno_stop_for_hotlist(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	uint32 mode = 0;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	wlc_ssid_t *p_ssid_list = NULL;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n",
+			__FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) {
+		DHD_ERROR(("%s : Hotlist MODE is not enabled\n",
+			__FUNCTION__));
+		goto exit;
+	}
+	_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+
+	if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) {
+		/* retrieve the batching data from firmware into host */
+		dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+		/* save current pno_mode before calling dhd_pno_clean */
+		mode = _pno_state->pno_mode;
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+		/* restore previos pno mode */
+		_pno_state->pno_mode = mode;
+		if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+			/* restart Legacy PNO Scan */
+			struct dhd_pno_legacy_params *_params_legacy;
+			struct dhd_pno_ssid *iter, *next;
+			_params_legacy =
+			&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+			p_ssid_list =
+			kzalloc(sizeof(wlc_ssid_t) * _params_legacy->nssid, GFP_KERNEL);
+			if (p_ssid_list == NULL) {
+				DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)",
+					__FUNCTION__, _params_legacy->nssid));
+				err = BCME_ERROR;
+				_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+				goto exit;
+			}
+			/* convert dhd_pno_ssid to dhd_pno_ssid */
+			list_for_each_entry_safe(iter, next, &_params_legacy->ssid_list, list) {
+				p_ssid_list->SSID_len = iter->SSID_len;
+				memcpy(p_ssid_list->SSID, iter->SSID, p_ssid_list->SSID_len);
+				p_ssid_list++;
+			}
+			err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid,
+				_params_legacy->scan_fr, _params_legacy->pno_repeat,
+				_params_legacy->pno_freq_expo_max, _params_legacy->chan_list,
+				_params_legacy->nchan);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+				DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+					__FUNCTION__, err));
+				goto exit;
+			}
+		} else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+			/* restart Batching Scan */
+			_params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
+			/* restart BATCH SCAN */
+			err = dhd_pno_set_for_batch(dhd, &_params->params_batch);
+			if (err < 0) {
+				_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
+				DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n",
+					__FUNCTION__,  err));
+				goto exit;
+			}
+		}
+	} else {
+		err = dhd_pno_clean(dhd);
+		if (err < 0) {
+			DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+				__FUNCTION__, err));
+			goto exit;
+		}
+	}
+exit:
+	if (p_ssid_list)
+		kfree(p_ssid_list);
+	return err;
+}
+
+int
+dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+	int err = BCME_OK;
+	uint status, event_type, flags, datalen;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	if (!WLS_SUPPORTED(_pno_state)) {
+		DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+		err = BCME_UNSUPPORTED;
+		goto exit;
+	}
+	event_type = ntoh32(event->event_type);
+	flags = ntoh16(event->flags);
+	status = ntoh32(event->status);
+	datalen = ntoh32(event->datalen);
+	DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type));
+	switch (event_type) {
+	case WLC_E_PFN_BSSID_NET_FOUND:
+	case WLC_E_PFN_BSSID_NET_LOST:
+		/* TODO : need to implement event logic using generic netlink */
+		break;
+	case WLC_E_PFN_BEST_BATCHING:
+	{
+		struct dhd_pno_batch_params *params_batch;
+		params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+		if (!waitqueue_active(&_pno_state->get_batch_done.wait)) {
+			DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__));
+			params_batch->get_batch.buf = NULL;
+			params_batch->get_batch.bufsize = 0;
+			params_batch->get_batch.reason = PNO_STATUS_EVENT;
+			schedule_work(&_pno_state->work);
+		} else
+			DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING"
+				"will skip this event\n", __FUNCTION__));
+		break;
+	}
+	default:
+		DHD_ERROR(("unknown event : %d\n", event_type));
+	}
+exit:
+	return err;
+}
+
+int dhd_pno_init(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	UNUSED_PARAMETER(_dhd_pno_suspend);
+	if (dhd->pno_state)
+		goto exit;
+	dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t));
+	NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err);
+	memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t));
+	/* need to check whether current firmware support batching and hotlist scan */
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	_pno_state->wls_supported = TRUE;
+	_pno_state->dhd = dhd;
+	mutex_init(&_pno_state->pno_mutex);
+	INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler);
+	init_completion(&_pno_state->get_batch_done);
+	err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, 0);
+	if (err == BCME_UNSUPPORTED) {
+		_pno_state->wls_supported = FALSE;
+		DHD_INFO(("Current firmware doesn't support"
+			" Android Location Service\n"));
+	}
+exit:
+	return err;
+}
+int dhd_pno_deinit(dhd_pub_t *dhd)
+{
+	int err = BCME_OK;
+	dhd_pno_status_info_t *_pno_state;
+	dhd_pno_params_t *_params;
+	NULL_CHECK(dhd, "dhd is NULL", err);
+
+	DHD_PNO(("%s enter\n", __FUNCTION__));
+	_pno_state = PNO_GET_PNOSTATE(dhd);
+	NULL_CHECK(_pno_state, "pno_state is NULL", err);
+	/* may need to free legacy ssid_list */
+	if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+	}
+
+	if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+		_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+		/* clear resource if the BATCH MODE is on */
+		_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+	}
+	cancel_work_sync(&_pno_state->work);
+	MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t));
+	dhd->pno_state = NULL;
+	return err;
+}
+#endif /* PNO_SUPPORT */
diff --git a/drivers/staging/edison-bcm43340/dhd_pno.h b/drivers/staging/edison-bcm43340/dhd_pno.h
new file mode 100644
index 0000000..e7d594c
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_pno.h
@@ -0,0 +1,253 @@
+/*
+ * Header file of Broadcom Dongle Host Driver (DHD)
+ * Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_pno.h 423669 2013-09-18 13:01:55Z $
+ */
+
+#ifndef __DHD_PNO_H__
+#define __DHD_PNO_H__
+
+#if defined(PNO_SUPPORT)
+#define PNO_TLV_PREFIX			'S'
+#define PNO_TLV_VERSION			'1'
+#define PNO_TLV_SUBTYPE_LEGACY_PNO '2'
+#define PNO_TLV_RESERVED		'0'
+
+#define PNO_BATCHING_SET "SET"
+#define PNO_BATCHING_GET "GET"
+#define PNO_BATCHING_STOP "STOP"
+
+#define PNO_PARAMS_DELIMETER " "
+#define PNO_PARAM_CHANNEL_DELIMETER ","
+#define PNO_PARAM_VALUE_DELLIMETER '='
+#define PNO_PARAM_SCANFREQ "SCANFREQ"
+#define PNO_PARAM_BESTN	"BESTN"
+#define PNO_PARAM_MSCAN "MSCAN"
+#define PNO_PARAM_CHANNEL "CHANNEL"
+#define PNO_PARAM_RTT "RTT"
+
+#define PNO_TLV_TYPE_SSID_IE		'S'
+#define PNO_TLV_TYPE_TIME		'T'
+#define PNO_TLV_FREQ_REPEAT		'R'
+#define PNO_TLV_FREQ_EXPO_MAX		'M'
+
+#define MAXNUM_SSID_PER_ADD	16
+#define MAXNUM_PNO_PARAMS 2
+#define PNO_TLV_COMMON_LENGTH	1
+#define DEFAULT_BATCH_MSCAN 16
+
+#define RESULTS_END_MARKER "----\n"
+#define SCAN_END_MARKER "####\n"
+#define AP_END_MARKER "====\n"
+
+enum scan_status {
+	/* SCAN ABORT by other scan */
+	PNO_STATUS_ABORT,
+	/* RTT is presence or not */
+	PNO_STATUS_RTT_PRESENCE,
+	/* Disable PNO by Driver */
+	PNO_STATUS_DISABLE,
+	/* NORMAL BATCHING GET */
+	PNO_STATUS_NORMAL,
+	/* WLC_E_PFN_BEST_BATCHING */
+	PNO_STATUS_EVENT,
+	PNO_STATUS_MAX
+};
+#define PNO_STATUS_ABORT_MASK 0x0001
+#define PNO_STATUS_RTT_MASK 0x0002
+#define PNO_STATUS_DISABLE_MASK 0x0004
+#define PNO_STATUS_OOM_MASK 0x0010
+
+enum index_mode {
+	INDEX_OF_LEGACY_PARAMS,
+	INDEX_OF_BATCH_PARAMS,
+	INDEX_OF_HOTLIST_PARAMS,
+	INDEX_MODE_MAX
+};
+enum dhd_pno_status {
+	DHD_PNO_DISABLED,
+	DHD_PNO_ENABLED,
+	DHD_PNO_SUSPEND
+};
+typedef struct cmd_tlv {
+	char prefix;
+	char version;
+	char subtype;
+	char reserved;
+} cmd_tlv_t;
+typedef enum dhd_pno_mode {
+	/* Wi-Fi Legacy PNO Mode */
+	DHD_PNO_NONE_MODE = 0,
+	DHD_PNO_LEGACY_MODE = (1 << (0)),
+	/* Wi-Fi Android BATCH SCAN Mode */
+	DHD_PNO_BATCH_MODE = (1 << (1)),
+	/* Wi-Fi Android Hotlist SCAN Mode */
+	DHD_PNO_HOTLIST_MODE = (1 << (2))
+} dhd_pno_mode_t;
+struct dhd_pno_ssid {
+	uint32		SSID_len;
+	uchar		SSID[DOT11_MAX_SSID_LEN];
+	struct list_head list;
+};
+struct dhd_pno_bssid {
+	struct ether_addr	macaddr;
+	/* Bit4: suppress_lost, Bit3: suppress_found */
+	uint16			flags;
+	struct list_head list;
+};
+typedef struct dhd_pno_bestnet_entry {
+	struct ether_addr BSSID;
+	uint8	SSID_len;
+	uint8	SSID[DOT11_MAX_SSID_LEN];
+	int8	RSSI;
+	uint8	channel;
+	uint32	timestamp;
+	uint16	rtt0; /* distance_cm based on RTT */
+	uint16	rtt1; /* distance_cm based on sample standard deviation */
+	unsigned long recorded_time;
+	struct list_head list;
+} dhd_pno_bestnet_entry_t;
+#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t))
+
+typedef struct dhd_pno_bestnet_header {
+	struct dhd_pno_bestnet_header *next;
+	uint8 reason;
+	uint32 tot_cnt;
+	uint32 tot_size;
+	struct list_head entry_list;
+} dhd_pno_best_header_t;
+#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t))
+
+typedef struct dhd_pno_scan_results {
+	dhd_pno_best_header_t *bestnetheader;
+	uint8 cnt_header;
+	struct list_head list;
+} dhd_pno_scan_results_t;
+#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t))
+
+struct dhd_pno_get_batch_info {
+	/* info related to get batch */
+	char *buf;
+	bool batch_started;
+	uint32 tot_scan_cnt;
+	uint32 expired_tot_scan_cnt;
+	uint32 top_node_cnt;
+	uint32 bufsize;
+	uint32 bytes_written;
+	int reason;
+	struct list_head scan_results_list;
+	struct list_head expired_scan_results_list;
+};
+struct dhd_pno_legacy_params {
+	uint16 scan_fr;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	int pno_repeat;
+	int pno_freq_expo_max;
+	int nssid;
+	struct list_head ssid_list;
+};
+struct dhd_pno_batch_params {
+	int32 scan_fr;
+	uint8 bestn;
+	uint8 mscan;
+	uint8 band;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	uint16 rtt;
+	struct dhd_pno_get_batch_info get_batch;
+};
+struct dhd_pno_hotlist_params {
+	uint8 band;
+	int32 scan_fr;
+	uint16 chan_list[WL_NUMCHANNELS];
+	uint16 nchan;
+	uint16 nbssid;
+	struct list_head bssid_list;
+};
+typedef union dhd_pno_params {
+	struct dhd_pno_legacy_params params_legacy;
+	struct dhd_pno_batch_params params_batch;
+	struct dhd_pno_hotlist_params params_hotlist;
+} dhd_pno_params_t;
+typedef struct dhd_pno_status_info {
+	dhd_pub_t *dhd;
+	struct work_struct work;
+	struct mutex pno_mutex;
+	struct completion get_batch_done;
+	bool wls_supported; /* wifi location service supported or not */
+	enum dhd_pno_status pno_status;
+	enum dhd_pno_mode pno_mode;
+	dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX];
+	struct list_head head_list;
+} dhd_pno_status_info_t;
+
+/* wrapper functions */
+extern int
+dhd_dev_pno_enable(struct net_device *dev, int enable);
+
+extern int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+	uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int
+dhd_dev_pno_set_for_batch(struct net_device *dev,
+	struct dhd_pno_batch_params *batch_params);
+
+extern int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize);
+
+extern int
+dhd_dev_pno_stop_for_batch(struct net_device *dev);
+
+extern int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params);
+
+/* dhd pno fuctions */
+extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int enable);
+extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssid_list, int nssid,
+	uint16  scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
+
+extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params);
+
+extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason);
+
+
+extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd);
+
+extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
+	struct dhd_pno_hotlist_params *hotlist_params);
+
+extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd);
+
+extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+extern int dhd_pno_init(dhd_pub_t *dhd);
+extern int dhd_pno_deinit(dhd_pub_t *dhd);
+#endif 
+
+#endif /* __DHD_PNO_H__ */
diff --git a/drivers/staging/edison-bcm43340/dhd_proto.h b/drivers/staging/edison-bcm43340/dhd_proto.h
new file mode 100644
index 0000000..4b794a4
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_proto.h
@@ -0,0 +1,115 @@
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_proto.h 455951 2014-02-17 10:52:22Z $
+ */
+
+#ifndef _dhd_proto_h_
+#define _dhd_proto_h_
+
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifndef IOCTL_RESP_TIMEOUT
+#define IOCTL_RESP_TIMEOUT  2000  /* In milli second default value for Production FW */
+#endif /* IOCTL_RESP_TIMEOUT */
+
+#ifndef MFG_IOCTL_RESP_TIMEOUT
+#define MFG_IOCTL_RESP_TIMEOUT  20000  /* In milli second default value for MFG FW */
+#endif /* MFG_IOCTL_RESP_TIMEOUT */
+
+/*
+ * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ */
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+extern int dhd_prot_attach(dhd_pub_t *dhdp);
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+extern void dhd_prot_detach(dhd_pub_t *dhdp);
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+extern int dhd_prot_init(dhd_pub_t *dhdp);
+
+/* Stop protocol: sync w/dongle state. */
+extern void dhd_prot_stop(dhd_pub_t *dhdp);
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+
+/* Remove any protocol-specific data header. */
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len);
+
+/* Use protocol to issue ioctl to dongle */
+extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len);
+
+/* Handles a protocol control response asynchronously */
+extern int dhd_prot_ctl_complete(dhd_pub_t *dhd);
+
+/* Check for and handle local prot-specific iovar commands */
+extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
+                             void *params, int plen, void *arg, int len, bool set);
+
+/* Add prot dump output to a buffer */
+extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+
+/* Update local copy of dongle statistics */
+extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+
+extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen);
+
+extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+
+extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+	uint reorder_info_len, void **pkt, uint32 *free_buf_count);
+
+#ifdef BCMPCIE
+extern int dhd_prot_process_msgbuf(dhd_pub_t *dhd);
+extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
+extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd);
+extern int dhd_post_dummy_msg(dhd_pub_t *dhd);
+extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len);
+extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset);
+extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
+extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay);
+#endif
+
+/********************************
+ * For version-string expansion *
+ */
+#if defined(BDC)
+#define DHD_PROTOCOL "bdc"
+#elif defined(CDC)
+#define DHD_PROTOCOL "cdc"
+#else
+#define DHD_PROTOCOL "unknown"
+#endif /* proto */
+
+#endif /* _dhd_proto_h_ */
diff --git a/drivers/staging/edison-bcm43340/dhd_sdio.c b/drivers/staging/edison-bcm43340/dhd_sdio.c
new file mode 100644
index 0000000..df8b22d
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_sdio.c
@@ -0,0 +1,8786 @@
+/*
+ * DHD Bus Module for SDIO
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_sdio.c 476607 2014-05-09 09:25:12Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmsdh.h>
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#include <bcmdefs.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+
+#include <siutils.h>
+#include <hndpmu.h>
+#include <hndsoc.h>
+#include <bcmsdpcm.h>
+#if defined(DHD_DEBUG)
+#include <hndrte_armtrap.h>
+#include <hndrte_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <sbchipc.h>
+#include <sbhnddma.h>
+
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <bcmsdbus.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif
+
+bool dhd_mp_halting(dhd_pub_t *dhdp);
+extern void bcmsdh_waitfor_iodrain(void *sdh);
+extern void bcmsdh_reject_ioreqs(void *sdh, bool reject);
+extern bool  bcmsdh_fatal_error(void *sdh);
+
+#ifndef DHDSDIO_MEM_DUMP_FNAME
+#define DHDSDIO_MEM_DUMP_FNAME         "mem_dump"
+#endif
+
+#define QLEN		(1024) /* bulk rx and tx queue lengths */
+#define FCHI		(QLEN - 10)
+#define FCLOW		(FCHI / 2)
+#define PRIOMASK	7
+
+#define TXRETRIES	2	/* # of retries for tx frames */
+#define READ_FRM_CNT_RETRIES	3
+#ifndef DHD_RXBOUND
+#define DHD_RXBOUND	50	/* Default for max rx frames in one scheduling */
+#endif
+
+#ifndef DHD_TXBOUND
+#define DHD_TXBOUND	20	/* Default for max tx frames in one scheduling */
+#endif
+
+#define DHD_TXMINMAX	1	/* Max tx frames if rx still pending */
+
+#define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE	4096	/* max nvram buf size */
+#define MAX_DATA_BUF	(64 * 1024)	/* Must be large enough to hold biggest possible glom */
+
+#ifndef DHD_FIRSTREAD
+#define DHD_FIRSTREAD   32
+#endif
+#if !ISPOWEROF2(DHD_FIRSTREAD)
+#error DHD_FIRSTREAD is not a power of 2!
+#endif
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN	(SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#define SDPCM_HDRLEN_TXGLOM	(SDPCM_HDRLEN + SDPCM_HWEXT_LEN)
+#define MAX_TX_PKTCHAIN_CNT	SDPCM_MAXGLOM_SIZE
+
+#ifdef SDTEST
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#else
+#define SDPCM_RESERVE	(SDPCM_HDRLEN + DHD_SDALIGN)
+#endif
+
+/* Space for header read, limit for data packets */
+#ifndef MAX_HDR_READ
+#define MAX_HDR_READ	32
+#endif
+#if !ISPOWEROF2(MAX_HDR_READ)
+#error MAX_HDR_READ is not a power of 2!
+#endif
+
+#define MAX_RX_DATASZ	2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define DHD_WAIT_F2RDY	3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#if (PMU_MAX_TRANSITION_DLY <= 1000000)
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+#endif
+
+/* hooks for limiting threshold custom tx num in rx processing */
+#define DEFAULT_TXINRX_THRES	0
+#ifndef	CUSTOM_TXINRX_THRES
+#define CUSTOM_TXINRX_THRES		DEFAULT_TXINRX_THRES
+#endif
+
+/* Value for ChipClockCSR during initial setup */
+#define DHD_INIT_CLKCTL1	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
+#define DHD_INIT_CLKCTL2	(SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
+
+/* Flags for SDH calls */
+#define F2SYNC	(SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* Packet free applicable unconditionally for sdio and sdspi.  Conditional if
+ * bufpool was present for gspi bus.
+ */
+#define PKTFREE2()		if ((bus->bus != SPI_BUS) || bus->usebufpool) \
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+
+
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX	192
+#define CONSOLE_BUFFER_MAX	2024
+typedef struct dhd_console {
+	uint		count;			/* Poll interval msec counter */
+	uint		log_addr;		/* Log struct address (fixed) */
+	hndrte_log_t	log;			/* Log struct (host copy) */
+	uint		bufsize;		/* Size of log buffer */
+	uint8		*buf;			/* Log buffer (host copy) */
+	uint		last;			/* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+
+#define	REMAP_ENAB(bus)			((bus)->remap)
+#define	REMAP_ISADDR(bus, a)		(((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+#define	KSO_ENAB(bus)			((bus)->kso)
+#define	SR_ENAB(bus)			((bus)->_srenab)
+#define	SLPAUTO_ENAB(bus)		((SR_ENAB(bus)) && ((bus)->_slpauto))
+#define	MIN_RSRC_ADDR			(SI_ENUM_BASE + 0x618)
+#define	MIN_RSRC_SR			0x3
+#define	CORE_CAPEXT_ADDR		(SI_ENUM_BASE + 0x64c)
+#define	CORE_CAPEXT_SR_SUPPORTED_MASK	(1 << 1)
+#define RCTL_MACPHY_DISABLE_MASK	(1 << 26)
+#define RCTL_LOGIC_DISABLE_MASK		(1 << 27)
+
+#define	OOB_WAKEUP_ENAB(bus)		((bus)->_oobwakeup)
+#define	GPIO_DEV_SRSTATE		16	/* Host gpio17 mapped to device gpio0 SR state */
+#define	GPIO_DEV_SRSTATE_TIMEOUT	320000	/* 320ms */
+#define	GPIO_DEV_WAKEUP			17	/* Host gpio17 mapped to device gpio1 wakeup */
+#define	CC_CHIPCTRL2_GPIO1_WAKEUP	(1  << 0)
+#define	CC_CHIPCTRL3_SR_ENG_ENABLE	(1  << 2)
+#define OVERFLOW_BLKSZ512_WM		96
+#define OVERFLOW_BLKSZ512_MES		80
+
+#define CC_PMUCC3	(0x3)
+/* Private data for SDIO bus interaction */
+typedef struct dhd_bus {
+	dhd_pub_t	*dhd;
+
+	bcmsdh_info_t	*sdh;			/* Handle for BCMSDH calls */
+	si_t		*sih;			/* Handle for SI calls */
+	char		*vars;			/* Variables (from CIS and/or other) */
+	uint		varsz;			/* Size of variables buffer */
+	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
+
+	sdpcmd_regs_t	*regs;			/* Registers for SDIO core */
+	uint		sdpcmrev;		/* SDIO core revision */
+	uint		armrev;			/* CPU core revision */
+	uint		ramrev;			/* SOCRAM core revision */
+	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
+	uint32		srmemsize;		/* Size of SRMEM */
+
+	uint32		bus;			/* gSPI or SDIO bus */
+	uint32		bus_num;		/* bus number */
+	uint32		slot_num;		/* slot ID */
+	uint32		hostintmask;	/* Copy of Host Interrupt Mask */
+	uint32		intstatus;		/* Intstatus bits (events) pending */
+	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
+	bool		fcstate;		/* State of dongle flow-control */
+
+	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
+	char		*fw_path;		/* module_param: path to firmware image */
+	char		*nv_path;		/* module_param: path to nvram vars file */
+	const char      *nvram_params;		/* user specified nvram params. */
+
+	uint		blocksize;		/* Block size of SDIO transfers */
+	uint		roundup;		/* Max roundup limit */
+
+	struct pktq	txq;			/* Queue length used for flow-control */
+	uint8		flowcontrol;		/* per prio flow control bitmask */
+	uint8		tx_seq;			/* Transmit sequence number (next) */
+	uint8		tx_max;			/* Maximum transmit sequence allowed */
+
+	uint8		hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+	uint8		*rxhdr;			/* Header of current rx frame (in hdrbuf) */
+	uint16		nextlen;		/* Next Read Len from last header */
+	uint8		rx_seq;			/* Receive sequence number (expected) */
+	bool		rxskip;			/* Skip receive (awaiting NAK ACK) */
+
+	void		*glomd;			/* Packet containing glomming descriptor */
+	void		*glom;			/* Packet chain for glommed superframe */
+	uint		glomerr;		/* Glom packet read errors */
+
+	uint8		*rxbuf;			/* Buffer for receiving control packets */
+	uint		rxblen;			/* Allocated length of rxbuf */
+	uint8		*rxctl;			/* Aligned pointer into rxbuf */
+	uint8		*databuf;		/* Buffer for receiving big glom packet */
+	uint8		*dataptr;		/* Aligned pointer into databuf */
+	uint		rxlen;			/* Length of valid data in buffer */
+
+	uint8		sdpcm_ver;		/* Bus protocol reported by dongle */
+
+	bool		intr;			/* Use interrupts */
+	bool		poll;			/* Use polling */
+	bool		ipend;			/* Device interrupt is pending */
+	bool		intdis;			/* Interrupts disabled by isr */
+	uint 		intrcount;		/* Count of device interrupt callbacks */
+	uint		lastintrs;		/* Count as of last watchdog timer */
+	uint		spurious;		/* Count of spurious interrupts */
+	uint		pollrate;		/* Ticks between device polls */
+	uint		polltick;		/* Tick counter */
+	uint		pollcnt;		/* Count of active polls */
+
+#ifdef DHD_DEBUG
+	dhd_console_t	console;		/* Console output polling support */
+	uint		console_addr;		/* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+	uint		regfails;		/* Count of R_REG/W_REG failures */
+
+	uint		clkstate;		/* State of sd and backplane clock(s) */
+	bool		activity;		/* Activity flag for clock down */
+	int32		idletime;		/* Control for activity timeout */
+	int32		idlecount;		/* Activity timeout counter */
+	int32		idleclock;		/* How to set bus driver when idle */
+	int32		sd_divisor;		/* Speed control to bus driver */
+	int32		sd_mode;		/* Mode control to bus driver */
+	int32		sd_rxchain;		/* If bcmsdh api accepts PKT chains */
+	bool		use_rxchain;		/* If dhd should use PKT chains */
+	bool		sleeping;		/* Is SDIO bus sleeping? */
+	wait_queue_head_t bus_sleep;
+	uint		rxflow_mode;		/* Rx flow control mode */
+	bool		rxflow;			/* Is rx flow control on */
+	uint		prev_rxlim_hit;		/* Is prev rx limit exceeded (per dpc schedule) */
+	bool		alp_only;		/* Don't use HT clock (ALP only) */
+	/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+	bool		usebufpool;
+	int32		txinrx_thres;	/* num of in-queued pkts */
+	int32		dotxinrx;	/* tx first in dhdsdio_readframes */
+#ifdef SDTEST
+	/* external loopback */
+	bool		ext_loop;
+	uint8		loopid;
+
+	/* pktgen configuration */
+	uint		pktgen_freq;		/* Ticks between bursts */
+	uint		pktgen_count;		/* Packets to send each burst */
+	uint		pktgen_print;		/* Bursts between count displays */
+	uint		pktgen_total;		/* Stop after this many */
+	uint		pktgen_minlen;		/* Minimum packet data len */
+	uint		pktgen_maxlen;		/* Maximum packet data len */
+	uint		pktgen_mode;		/* Configured mode: tx, rx, or echo */
+	uint		pktgen_stop;		/* Number of tx failures causing stop */
+
+	/* active pktgen fields */
+	uint		pktgen_tick;		/* Tick counter for bursts */
+	uint		pktgen_ptick;		/* Burst counter for printing */
+	uint		pktgen_sent;		/* Number of test packets generated */
+	uint		pktgen_rcvd;		/* Number of test packets received */
+	uint		pktgen_prev_time;	/* Time at which previous stats where printed */
+	uint		pktgen_prev_sent;	/* Number of test packets generated when
+						 * previous stats were printed
+						 */
+	uint		pktgen_prev_rcvd;	/* Number of test packets received when
+						 * previous stats were printed
+						 */
+	uint		pktgen_fail;		/* Number of failed send attempts */
+	uint16		pktgen_len;		/* Length of next packet to send */
+#define PKTGEN_RCV_IDLE     (0)
+#define PKTGEN_RCV_ONGOING  (1)
+	uint16		pktgen_rcv_state;		/* receive state */
+	uint		pktgen_rcvd_rcvsession;	/* test pkts rcvd per rcv session. */
+#endif /* SDTEST */
+
+	/* Some additional counters */
+	uint		tx_sderrs;		/* Count of tx attempts with sd errors */
+	uint		fcqueued;		/* Tx packets that got queued */
+	uint		rxrtx;			/* Count of rtx requests (NAK to dongle) */
+	uint		rx_toolong;		/* Receive frames too long to receive */
+	uint		rxc_errors;		/* SDIO errors when reading control frames */
+	uint		rx_hdrfail;		/* SDIO errors on header reads */
+	uint		rx_badhdr;		/* Bad received headers (roosync?) */
+	uint		rx_badseq;		/* Mismatched rx sequence number */
+	uint		fc_rcvd;		/* Number of flow-control events received */
+	uint		fc_xoff;		/* Number which turned on flow-control */
+	uint		fc_xon;			/* Number which turned off flow-control */
+	uint		rxglomfail;		/* Failed deglom attempts */
+	uint		rxglomframes;		/* Number of glom frames (superframes) */
+	uint		rxglompkts;		/* Number of packets from glom frames */
+	uint		f2rxhdrs;		/* Number of header reads */
+	uint		f2rxdata;		/* Number of frame data reads */
+	uint		f2txdata;		/* Number of f2 frame writes */
+	uint		f1regdata;		/* Number of f1 register accesses */
+#ifdef DHDENABLE_TAILPAD
+	uint		tx_tailpad_chain;	/* Number of tail padding by chaining pad_pkt */
+	uint		tx_tailpad_pktget;	/* Number of tail padding by new PKTGET */
+#endif
+	uint8		*ctrl_frame_buf;
+	uint32		ctrl_frame_len;
+	bool		ctrl_frame_stat;
+	uint32		rxint_mode;	/* rx interrupt mode */
+	bool		remap;		/* Contiguous 1MB RAM: 512K socram + 512K devram
+					 * Available with socram rev 16
+					 * Remap region not DMA-able
+					 */
+	bool		kso;
+	bool		_slpauto;
+	bool		_oobwakeup;
+	bool		_srenab;
+	bool        readframes;
+	bool        reqbussleep;
+	uint32		resetinstr;
+	uint32		dongle_ram_base;
+
+	void		*glom_pkt_arr[SDPCM_MAXGLOM_SIZE];	/* Array of pkts for glomming */
+	uint32		txglom_cnt;	/* Number of pkts in the glom array */
+	uint32		txglom_total_len;	/* Total length of pkts in glom array */
+	bool		txglom_enable;	/* Flag to indicate whether tx glom is enabled/disabled */
+	uint32		txglomsize;	/* Glom size limitation */
+	void		*pad_pkt;
+} dhd_bus_t;
+
+/* clkstate */
+#define CLK_NONE	0
+#define CLK_SDONLY	1
+#define CLK_PENDING	2	/* Not used yet */
+#define CLK_AVAIL	3
+
+#define DHD_NOPMU(dhd)	(FALSE)
+
+#ifdef DHD_DEBUG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* DHD_DEBUG */
+
+/* Deferred transmit */
+const uint dhd_deferred_tx = 1;
+
+extern uint dhd_watchdog_ms;
+
+extern void dhd_os_wd_timer(void *bus, uint wdtick);
+
+/* Tx/Rx bounds */
+uint dhd_txbound;
+uint dhd_rxbound;
+uint dhd_txminmax = DHD_TXMINMAX;
+
+/* override the RAM size if possible */
+#define DONGLE_MIN_RAMSIZE (128 *1024)
+int dhd_dongle_ramsize;
+
+uint dhd_doflow = TRUE;
+uint dhd_dpcpoll = FALSE;
+
+module_param(dhd_doflow, uint, 0644);
+module_param(dhd_dpcpoll, uint, 0644);
+
+static bool dhd_alignctl;
+
+static bool sd1idle;
+
+static bool retrydata;
+#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
+
+static uint watermark = 8;
+static uint mesbusyctrl = 0;
+static const uint firstread = DHD_FIRSTREAD;
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Force even SD lengths (some host controllers mess up on odd bytes) */
+static bool forcealign;
+
+#define ALIGNMENT  4
+
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
+#endif
+
+#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
+#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
+#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
+#define PKTALIGN(osh, p, len, align)					\
+	do {								\
+		uintptr datalign;						\
+		datalign = (uintptr)PKTDATA((osh), (p));		\
+		datalign = ROUNDUP(datalign, (align)) - datalign;	\
+		ASSERT(datalign < (align));				\
+		ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign));	\
+		if (datalign)						\
+			PKTPULL((osh), (p), (uint)datalign);			\
+		PKTSETLEN((osh), (p), (len));				\
+	} while (0)
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+/* Try doing readahead */
+static bool dhd_readahead;
+
+/* To check if there's window offered */
+#define DATAOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* To check if there's window offered for ctrl frame */
+#define TXCTLOK(bus) \
+	(((uint8)(bus->tx_max - bus->tx_seq) != 0) && \
+	(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+
+/* Number of pkts available in dongle for data RX */
+#define DATABUFCNT(bus) \
+	((uint8)(bus->tx_max - bus->tx_seq) - 1)
+
+/* Macros to get register read/write status */
+/* NOTE: these assume a local dhdsdio_bus_t *bus! */
+#define R_SDREG(regvar, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		regvar = R_REG(bus->dhd->osh, regaddr); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) { \
+			DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+			regvar = 0; \
+		} \
+	} \
+} while (0)
+
+#define W_SDREG(regval, regaddr, retryvar) \
+do { \
+	retryvar = 0; \
+	do { \
+		W_REG(bus->dhd->osh, regaddr, regval); \
+	} while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
+	if (retryvar) { \
+		bus->regfails += (retryvar-1); \
+		if (retryvar > retry_limit) \
+			DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
+			           __FUNCTION__, __LINE__)); \
+	} \
+} while (0)
+
+#define BUS_WAKE(bus) \
+	do { \
+		bus->idlecount = 0; \
+		if ((bus)->sleeping) \
+			dhdsdio_bussleep((bus), FALSE); \
+	} while (0);
+
+/*
+ * pktavail interrupts from dongle to host can be managed in 3 different ways
+ * whenever there is a packet available in dongle to transmit to host.
+ *
+ * Mode 0:	Dongle writes the software host mailbox and host is interrupted.
+ * Mode 1:	(sdiod core rev >= 4)
+ *		Device sets a new bit in the intstatus whenever there is a packet
+ *		available in fifo.  Host can't clear this specific status bit until all the
+ *		packets are read from the FIFO.  No need to ack dongle intstatus.
+ * Mode 2:	(sdiod core rev >= 4)
+ *		Device sets a bit in the intstatus, and host acks this by writing
+ *		one to this bit.  Dongle won't generate anymore packet interrupts
+ *		until host reads all the packets from the dongle and reads a zero to
+ *		figure that there are no more packets.  No need to disable host ints.
+ *		Need to ack the intstatus.
+ */
+
+#define SDIO_DEVICE_HMB_RXINT		0	/* default old way */
+#define SDIO_DEVICE_RXDATAINT_MODE_0	1	/* from sdiod rev 4 */
+#define SDIO_DEVICE_RXDATAINT_MODE_1	2	/* from sdiod rev 4 */
+
+
+#define FRAME_AVAIL_MASK(bus) 	\
+	((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
+
+#define DHD_BUS			SDIO_BUS
+
+#define PKT_AVAILABLE(bus, intstatus)	((intstatus) & (FRAME_AVAIL_MASK(bus)))
+
+#define HOSTINTMASK		(I_HMB_SW_MASK | I_CHIPACTIVE)
+
+#define GSPI_PR55150_BAILOUT
+
+#if defined(SUPPORT_MULTIPLE_REVISION)
+static uint bcm_chip_id = 0;
+#endif
+
+#ifdef SDTEST
+static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
+static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count);
+#endif
+
+#ifdef DHD_DEBUG
+static uint checkdied_dumpram=1;
+static uint checkdied_cnt=0;
+static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
+#endif /* DHD_DEBUG */
+
+static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
+static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
+
+static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_disconnect(void *ptr);
+static bool dhdsdio_chipmatch(uint16 chipid);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
+                                 void * regsva, uint16  devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation,
+	bool reset_flag);
+
+static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size);
+static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
+static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
+	uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry);
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt);
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+	int prev_chain_total_len, bool last_chained_pkt,
+	int *pad_pkt_len, void **new_pkt);
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt);
+
+static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+static int _dhdsdio_download_firmware(dhd_bus_t *bus);
+
+static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
+static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
+static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep);
+static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok);
+static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus);
+
+#ifdef WLMEDIA_HTSF
+#include <htsf.h>
+extern uint32 dhd_get_htsf(void *dhd, int ifidx);
+#endif /* WLMEDIA_HTSF */
+
+static void
+dhdsdio_tune_fifoparam(struct dhd_bus *bus)
+{
+	int err;
+	uint8 devctl, wm, mes;
+
+	if (bus->sih->buscorerev >= 15) {
+		/* See .ppt in PR for these recommended values */
+		if (bus->blocksize == 512) {
+			wm = OVERFLOW_BLKSZ512_WM;
+			mes = OVERFLOW_BLKSZ512_MES;
+		} else {
+			mes = bus->blocksize/4;
+			wm = bus->blocksize/4;
+		}
+
+		watermark = wm;
+		mesbusyctrl = mes;
+	} else {
+		DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n",
+			bus->sih->buscorerev));
+		return;
+	}
+
+	/* Update watermark */
+	if (wm > 0) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err);
+
+		devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+	}
+
+	/* Update MES */
+	if (mes > 0) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+			(mes | SBSDIO_MESBUSYCTRL_ENAB), &err);
+	}
+
+	DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n",
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err),
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err),
+		bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err)));
+}
+
+static void
+dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size)
+{
+	int32 min_size =  DONGLE_MIN_RAMSIZE;
+	/* Restrict the ramsize to user specified limit */
+	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+		dhd_dongle_ramsize, min_size));
+	if ((dhd_dongle_ramsize > min_size) &&
+		(dhd_dongle_ramsize < (int32)bus->orig_ramsize))
+		bus->ramsize = dhd_dongle_ramsize;
+}
+
+static int
+dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address)
+{
+	int err = 0;
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+	                 (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+		                 (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+	if (!err)
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
+		                 (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err);
+	return err;
+}
+
+
+#ifdef USE_OOB_GPIO1
+static int
+dhdsdio_oobwakeup_init(dhd_bus_t *bus)
+{
+	uint32 val, addr, data;
+
+	bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP);
+
+	addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+	data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+
+	/* Set device for gpio1 wakeup */
+	bcmsdh_reg_write(bus->sdh, addr, 4, 2);
+	val = bcmsdh_reg_read(bus->sdh, data, 4);
+	val |= CC_CHIPCTRL2_GPIO1_WAKEUP;
+	bcmsdh_reg_write(bus->sdh, data, 4, val);
+
+	bus->_oobwakeup = TRUE;
+
+	return 0;
+}
+#endif /* USE_OOB_GPIO1 */
+
+/*
+ * Query if FW is in SR mode
+ */
+static bool
+dhdsdio_sr_cap(dhd_bus_t *bus)
+{
+	bool cap = FALSE;
+	uint32  core_capext, addr, data;
+
+	if (bus->sih->chip == BCM43430_CHIP_ID) {
+		/* For now SR capability would not be exercised */
+		return cap;
+	}
+	if (bus->sih->chip == BCM4324_CHIP_ID) {
+			addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+			data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+			bcmsdh_reg_write(bus->sdh, addr, 4, 3);
+			core_capext = bcmsdh_reg_read(bus->sdh, data, 4);
+	} else if (bus->sih->chip == BCM4330_CHIP_ID) {
+			core_capext = FALSE;
+	} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+		(bus->sih->chip == BCM4339_CHIP_ID) ||
+		(bus->sih->chip == BCM43349_CHIP_ID) ||
+		(bus->sih->chip == BCM4345_CHIP_ID) ||
+		(bus->sih->chip == BCM4354_CHIP_ID) ||
+		(bus->sih->chip == BCM4356_CHIP_ID) ||
+		(bus->sih->chip == BCM4350_CHIP_ID)) {
+		core_capext = TRUE;
+	} else {
+			core_capext = bcmsdh_reg_read(bus->sdh, CORE_CAPEXT_ADDR, 4);
+			core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK);
+	}
+	if (!(core_capext))
+		return FALSE;
+
+	if (bus->sih->chip == BCM4324_CHIP_ID) {
+		/* FIX: Should change to query SR control register instead */
+		cap = TRUE;
+	} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+		(bus->sih->chip == BCM4339_CHIP_ID) ||
+		(bus->sih->chip == BCM43349_CHIP_ID) ||
+		(bus->sih->chip == BCM4345_CHIP_ID) ||
+		(bus->sih->chip == BCM4354_CHIP_ID) ||
+		(bus->sih->chip == BCM4356_CHIP_ID) ||
+		(bus->sih->chip == BCM4350_CHIP_ID)) {
+		uint32 enabval = 0;
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+		data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+		bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3);
+		enabval = bcmsdh_reg_read(bus->sdh, data, 4);
+
+		if ((bus->sih->chip == BCM4350_CHIP_ID) ||
+			(bus->sih->chip == BCM4345_CHIP_ID) ||
+			(bus->sih->chip == BCM4356_CHIP_ID) ||
+			(bus->sih->chip == BCM4354_CHIP_ID))
+			enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE;
+
+		if (enabval)
+			cap = TRUE;
+	} else {
+		data = bcmsdh_reg_read(bus->sdh,
+			SI_ENUM_BASE + OFFSETOF(chipcregs_t, retention_ctl), 4);
+		if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0)
+			cap = TRUE;
+	}
+
+	return cap;
+}
+
+static int
+dhdsdio_srwar_init(dhd_bus_t *bus)
+{
+	bcmsdh_gpio_init(bus->sdh);
+
+#ifdef USE_OOB_GPIO1
+	dhdsdio_oobwakeup_init(bus);
+#endif
+
+
+	return 0;
+}
+
+static int
+dhdsdio_sr_init(dhd_bus_t *bus)
+{
+	uint8 val;
+	int err = 0;
+
+	if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2))
+		dhdsdio_srwar_init(bus);
+
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL,
+		1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err);
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
+
+	/* Add CMD14 Support */
+	dhdsdio_devcap_set(bus,
+		(SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT));
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+		SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err);
+
+	bus->_slpauto = dhd_slpauto ? TRUE : FALSE;
+
+	bus->_srenab = TRUE;
+
+	return 0;
+}
+
+/*
+ * FIX: Be sure KSO bit is enabled
+ * Currently, it's defaulting to 0 which should be 1.
+ */
+static int
+dhdsdio_clk_kso_init(dhd_bus_t *bus)
+{
+	uint8 val;
+	int err = 0;
+
+	/* set flag */
+	bus->kso = TRUE;
+
+	/*
+	 * Enable KeepSdioOn (KSO) bit for normal operation
+	 * Default is 0 (4334A0) so set it. Fixed in B0.
+	 */
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL);
+	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err);
+		if (err)
+			DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err));
+	}
+
+	return 0;
+}
+
+#define KSO_DBG(x)
+#define KSO_WAIT_US 50
+#define KSO_WAIT_MS 1
+#define KSO_SLEEP_RETRY_COUNT 20
+#define ERROR_BCME_NODEVICE_MAX 1
+
+#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+#define MAX_KSO_ATTEMPTS PMU_MAX_TRANSITION_DLY/ \
+	(KSO_WAIT_US + (SDIOH_API_ACCESS_RETRY_LIMIT * 2 * BCMSDH_RW_RETRY))
+#else
+#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+#endif /* SDIOH_API_ACCESS_RETRY_LIMIT */
+static int
+dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
+{
+	uint8 wr_val = 0, rd_val, cmp_val, bmask;
+	int err = 0;
+	int try_cnt = 0;
+
+	KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR")));
+
+	wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+
+	if (on) {
+		cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK |  SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK;
+		bmask = cmp_val;
+
+		OSL_SLEEP(3);
+	} else {
+		/* Put device to sleep, turn off  KSO  */
+		cmp_val = 0;
+		bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK;
+	}
+
+	do {
+		rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+		if (((rd_val & bmask) == cmp_val) && !err)
+			break;
+
+		KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err));
+
+		if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) {
+			OSL_SLEEP(KSO_WAIT_MS);
+		} else
+			OSL_DELAY(KSO_WAIT_US);
+
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
+
+
+	if (try_cnt > 2)
+		KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n",
+			__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+
+	if (try_cnt > MAX_KSO_ATTEMPTS)  {
+		DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n",
+			__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
+	}
+	return err;
+}
+
+static int
+dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on)
+{
+	int err = 0;
+
+	if (on == FALSE) {
+
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__,
+			bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+		dhdsdio_clk_kso_enab(bus, FALSE);
+	} else {
+		DHD_ERROR(("%s: KSO enable\n", __FUNCTION__));
+
+		/* Make sure we have SD bus access */
+		if (bus->clkstate == CLK_NONE) {
+			DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__));
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+		}
+
+		dhdsdio_clk_kso_enab(bus, TRUE);
+
+		DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__,
+			dhdsdio_sleepcsr_get(bus)));
+	}
+
+	bus->kso = on;
+	BCM_REFERENCE(err);
+
+	return 0;
+}
+
+static uint8
+dhdsdio_sleepcsr_get(dhd_bus_t *bus)
+{
+	int err = 0;
+	uint8 val = 0;
+
+	val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err);
+	if (err)
+		DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err));
+
+	return val;
+}
+
+uint8
+dhdsdio_devcap_get(dhd_bus_t *bus)
+{
+	return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL);
+}
+
+static int
+dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap)
+{
+	int err = 0;
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err);
+	if (err)
+		DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err));
+
+	return 0;
+}
+
+static int
+dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on)
+{
+	int err = 0, retry;
+	uint8 val;
+
+	retry = 0;
+	if (on == TRUE) {
+		/* Enter Sleep */
+
+		/* Be sure we request clk before going to sleep
+		 * so we can wake-up with clk request already set
+		 * else device can go back to sleep immediately
+		 */
+		if (!SLPAUTO_ENAB(bus))
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		else {
+			val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+			if ((val & SBSDIO_CSR_MASK) == 0) {
+				DHD_ERROR(("%s: No clock before enter sleep:0x%x\n",
+					__FUNCTION__, val));
+
+				/* Reset clock request */
+				bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					SBSDIO_ALP_AVAIL_REQ, &err);
+				DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__,
+					bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+			}
+		}
+
+		DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__,
+			bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, &err)));
+#ifdef USE_CMD14
+		err = bcmsdh_sleep(bus->sdh, TRUE);
+#else
+		err = dhdsdio_clk_kso_enab(bus, FALSE);
+		if (OOB_WAKEUP_ENAB(bus))
+		{
+			err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE);  /* GPIO_1 is off */
+		}
+#endif /* USE_CMD14 */
+	} else {
+		/* Exit Sleep */
+		/* Make sure we have SD bus access */
+		if (bus->clkstate == CLK_NONE) {
+			DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+		}
+
+		if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE),
+				GPIO_DEV_SRSTATE_TIMEOUT);
+
+			if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) {
+				DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
+			}
+		}
+#ifdef USE_CMD14
+		err = bcmsdh_sleep(bus->sdh, FALSE);
+		if (SLPAUTO_ENAB(bus) && (err != 0)) {
+			OSL_DELAY(10000);
+			DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__));
+
+			/* Toggle sleep to resync with host and device */
+			err = bcmsdh_sleep(bus->sdh, TRUE);
+			OSL_DELAY(10000);
+			err = bcmsdh_sleep(bus->sdh, FALSE);
+
+			if (err) {
+				OSL_DELAY(10000);
+				DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__));
+
+				/* Toggle sleep to resync with host and device */
+				err = bcmsdh_sleep(bus->sdh, TRUE);
+				OSL_DELAY(10000);
+				err = bcmsdh_sleep(bus->sdh, FALSE);
+				if (err) {
+					DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__));
+					DHD_ERROR(("%s: FATAL: Device non-response!\n",
+						__FUNCTION__));
+					err = 0;
+				}
+			}
+		}
+#else
+		if (OOB_WAKEUP_ENAB(bus))
+		{
+			err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE);  /* GPIO_1 is on */
+		}
+		do {
+			err = dhdsdio_clk_kso_enab(bus, TRUE);
+			if (err)
+				OSL_SLEEP(10);
+		} while ((err != 0) && (++retry < 3));
+
+		if (err != 0) {
+			DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry));
+			err = 0; /* continue anyway */
+		}
+#endif /* !USE_CMD14 */
+
+		if (err == 0) {
+			uint8 csr;
+
+			/* Wait for device ready during transition to wake-up */
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(((csr = dhdsdio_sleepcsr_get(bus)) &
+				SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) !=
+				(SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000));
+
+			DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr));
+
+			if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) {
+				DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n",
+					__FUNCTION__, csr));
+				err = BCME_NODEVICE;
+			}
+
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				(((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+				SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) !=
+				(SBSDIO_HT_AVAIL)), (10000));
+
+			DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr));
+			if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) {
+				DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n",
+					__FUNCTION__, csr));
+				err = BCME_NODEVICE;
+			}
+		}
+	}
+
+	/* Update if successful */
+	if (err == 0)
+		bus->kso = on ? FALSE : TRUE;
+	else {
+		DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n",
+			__FUNCTION__, bus->kso, on, err));
+		if (!on && retry > 2) {
+			bus->kso = FALSE;
+			dhd_os_send_hang_message(bus->dhd);
+		}
+	}
+
+	return err;
+}
+
+/* Turn backplane clock on or off */
+static int
+dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+{
+#define HT_AVAIL_ERROR_MAX 10
+	static int ht_avail_error = 0;
+	int err;
+	uint8 clkctl, clkreq, devctl;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	clkctl = 0;
+	sdh = bus->sdh;
+
+
+	if (!KSO_ENAB(bus))
+		return BCME_OK;
+
+	if (SLPAUTO_ENAB(bus)) {
+		bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY);
+		return BCME_OK;
+	}
+
+	if (on) {
+		/* Request HT Avail */
+		clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+
+
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+		if (err) {
+			ht_avail_error++;
+			if (ht_avail_error < HT_AVAIL_ERROR_MAX) {
+				DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+			else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
+				dhd_os_send_hang_message(bus->dhd);
+			}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
+			return BCME_ERROR;
+		} else {
+			ht_avail_error = 0;
+		}
+
+
+		/* Check current status */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+
+#if !defined(OOB_INTR_ONLY)
+		/* Go to pending and await interrupt if appropriate */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+			/* Allow only clock-available interrupt */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: Devctl access error setting CA: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			DHD_INFO(("CLKCTL: set PENDING\n"));
+			bus->clkstate = CLK_PENDING;
+			return BCME_OK;
+		} else
+#endif /* !defined (OOB_INTR_ONLY) */
+		{
+			if (bus->clkstate == CLK_PENDING) {
+				/* Cancel CA-only interrupt filter */
+				devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+				devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			}
+		}
+
+		/* Otherwise, wait here (polling) for HT Avail */
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+				((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+			                                    SBSDIO_FUNC1_CHIPCLKCSR, &err)),
+			          !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY);
+		}
+		if (err) {
+			DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
+			return BCME_ERROR;
+		}
+		if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+			DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
+			           __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
+			return BCME_ERROR;
+		}
+
+		/* Mark clock available */
+		bus->clkstate = CLK_AVAIL;
+		DHD_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(DHD_DEBUG)
+		if (bus->alp_only == TRUE) {
+#if !defined(BCMLXSDMMC)
+			if (!SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__));
+			}
+#endif /* !defined(BCMLXSDMMC) */
+		} else {
+			if (SBSDIO_ALPONLY(clkctl)) {
+				DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__));
+			}
+		}
+#endif /* defined (DHD_DEBUG) */
+
+		bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+		bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+	} else {
+		clkreq = 0;
+
+		if (bus->clkstate == CLK_PENDING) {
+			/* Cancel CA-only interrupt filter */
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+		}
+
+		bus->clkstate = CLK_SDONLY;
+		if (!SR_ENAB(bus)) {
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+			DHD_INFO(("CLKCTL: turned OFF\n"));
+			if (err) {
+				DHD_ERROR(("%s: Failed access turning clock off: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+	}
+	return BCME_OK;
+}
+
+/* Change idle/active SD state */
+static int
+dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+{
+	int err;
+	int32 iovalue;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (on) {
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			/* Turn on clock and restore mode */
+			iovalue = 1;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error enabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+
+			iovalue = bus->sd_mode;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_mode: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Restore clock speed */
+			iovalue = bus->sd_divisor;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_SDONLY;
+	} else {
+		/* Stop or slow the SD clock itself */
+		if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
+			DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
+			           __FUNCTION__, bus->sd_divisor, bus->sd_mode));
+			return BCME_ERROR;
+		}
+		if (bus->idleclock == DHD_IDLE_STOP) {
+			if (sd1idle) {
+				/* Change to SD1 mode and turn off clock */
+				iovalue = 1;
+				err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+				                      &iovalue, sizeof(iovalue), TRUE);
+				if (err) {
+					DHD_ERROR(("%s: error changing sd_clock: %d\n",
+					           __FUNCTION__, err));
+					return BCME_ERROR;
+				}
+			}
+
+			iovalue = 0;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error disabling sd_clock: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		} else if (bus->idleclock != DHD_IDLE_ACTIVE) {
+			/* Set divisor to idle value */
+			iovalue = bus->idleclock;
+			err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                      &iovalue, sizeof(iovalue), TRUE);
+			if (err) {
+				DHD_ERROR(("%s: error changing sd_divisor: %d\n",
+				           __FUNCTION__, err));
+				return BCME_ERROR;
+			}
+		}
+		bus->clkstate = CLK_NONE;
+	}
+
+	return BCME_OK;
+}
+
+/* Transition SD and backplane clock readiness */
+static int
+dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+{
+	int ret = BCME_OK;
+#ifdef DHD_DEBUG
+	uint oldstate = bus->clkstate;
+#endif /* DHD_DEBUG */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Early exit if we're already there */
+	if (bus->clkstate == target) {
+		if (target == CLK_AVAIL) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+			bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+			bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+		}
+		return ret;
+	}
+
+	switch (target) {
+	case CLK_AVAIL:
+		/* Make sure SD clock is available */
+		if (bus->clkstate == CLK_NONE)
+			dhdsdio_sdclk(bus, TRUE);
+		/* Now request HT Avail on the backplane */
+		ret = dhdsdio_htclk(bus, TRUE, pendok);
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		bus->activity = TRUE;
+#ifdef DHD_USE_IDLECOUNT
+			bus->idlecount = 0;
+#endif /* DHD_USE_IDLECOUNT */
+		}
+		break;
+
+	case CLK_SDONLY:
+		/* Remove HT request, or bring up SD clock */
+		if (bus->clkstate == CLK_NONE)
+			ret = dhdsdio_sdclk(bus, TRUE);
+		else if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		else
+			DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
+			           bus->clkstate, target));
+		if (ret == BCME_OK) {
+			dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+		}
+		break;
+
+	case CLK_NONE:
+		/* Make sure to remove HT request */
+		if (bus->clkstate == CLK_AVAIL)
+			ret = dhdsdio_htclk(bus, FALSE, FALSE);
+		/* Now remove the SD clock */
+		ret = dhdsdio_sdclk(bus, FALSE);
+#ifdef DHD_DEBUG
+		if (dhd_console_ms == 0)
+#endif /* DHD_DEBUG */
+		if (bus->poll == 0)
+			dhd_os_wd_timer(bus->dhd, 0);
+		break;
+	}
+#ifdef DHD_DEBUG
+	DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
+#endif /* DHD_DEBUG */
+
+	return ret;
+}
+
+static int
+dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+{
+	int err = 0;
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
+	          (sleep ? "SLEEP" : "WAKE"),
+	          (bus->sleeping ? "SLEEP" : "WAKE")));
+
+	if (bus->dhd->hang_was_sent)
+		return BCME_ERROR;
+
+	/* Done if we're already in the requested state */
+	if (sleep == bus->sleeping)
+		return BCME_OK;
+
+	/* Going to sleep: set the alarm and turn off the lights... */
+	if (sleep) {
+		/* Don't sleep if something is pending */
+		if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+			return BCME_BUSY;
+
+
+		if (!SLPAUTO_ENAB(bus)) {
+			/* Disable SDIO interrupts (no longer interested) */
+			bcmsdh_intr_disable(bus->sdh);
+
+			/* Make sure the controller has the bus up */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+			/* Tell device to start using OOB wakeup */
+			W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+			if (retries > retry_limit)
+				DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+			/* Turn off our contribution to the HT clock request */
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+				SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+			/* Isolate the bus */
+			if (bus->sih->chip != BCM4329_CHIP_ID &&
+				bus->sih->chip != BCM4319_CHIP_ID) {
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
+					SBSDIO_DEVCTL_PADS_ISO, NULL);
+			}
+		} else {
+			/* Leave interrupts enabled since device can exit sleep and
+			 * interrupt host
+			 */
+			err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */);
+		}
+
+		/* Change state */
+		bus->sleeping = TRUE;
+		wake_up(&bus->bus_sleep);
+	} else {
+		/* Waking up: bus power up is ok, set local state */
+
+		if (!SLPAUTO_ENAB(bus)) {
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err);
+
+			/* Force pad isolation off if possible (in case power never toggled) */
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
+
+			/* Make sure the controller has the bus up */
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+			/* Send misc interrupt to indicate OOB not needed */
+			W_SDREG(0, &regs->tosbmailboxdata, retries);
+			if (retries <= retry_limit)
+				W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+			if (retries > retry_limit)
+				DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+			/* Make sure we have SD bus access */
+			dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+			/* Enable interrupts again */
+			if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+				bus->intdis = FALSE;
+				bcmsdh_intr_enable(bus->sdh);
+			}
+		} else {
+			err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */);
+		}
+
+		if (err == 0) {
+			/* Change state */
+			bus->sleeping = FALSE;
+		}
+	}
+
+	return err;
+}
+
+
+#if defined(OOB_INTR_ONLY)
+void
+dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
+{
+#if defined(HW_OOB)
+	bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
+#else
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (enable == TRUE) {
+
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+
+	} else {
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+	}
+
+	/* Turn off our contribution to the HT clock request */
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+#endif /* !defined(HW_OOB) */
+}
+#endif 
+
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+{
+	int ret = BCME_ERROR;
+	osl_t *osh;
+	uint datalen, prec;
+#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+	uint8 *dump_data;
+	uint16 protocol;
+#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	osh = bus->dhd->osh;
+	datalen = PKTLEN(osh, pkt);
+
+#ifdef SDTEST
+	/* Push the test header if doing loopback */
+	if (bus->ext_loop) {
+		uint8* data;
+		PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+		data = PKTDATA(osh, pkt);
+		*data++ = SDPCM_TEST_ECHOREQ;
+		*data++ = (uint8)bus->loopid++;
+		*data++ = (datalen >> 0);
+		*data++ = (datalen >> 8);
+		datalen += SDPCM_TEST_HDRLEN;
+	}
+#else /* SDTEST */
+	BCM_REFERENCE(datalen);
+#endif /* SDTEST */
+
+#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+	dump_data = PKTDATA(osh, pkt);
+	dump_data += 4; /* skip 4 bytes header */
+	protocol = (dump_data[12] << 8) | dump_data[13];
+
+	if (protocol == ETHER_TYPE_802_1X) {
+		DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
+			dump_data[14], dump_data[15], dump_data[30]));
+	}
+#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
+
+#if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP)
+	{
+		int i;
+		DHD_ERROR(("TX DUMP\n"));
+
+		for (i = 0; i < (datalen - 4); i++) {
+			DHD_ERROR(("%02X ", dump_data[i]));
+			if ((i & 15) == 15)
+				printk("\n");
+		}
+		DHD_ERROR(("\n"));
+	}
+#endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */
+
+	prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+
+	/* Check for existing queue, current flow-control, pending event, or pending clock */
+	if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+	    (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+	    (bus->clkstate != CLK_AVAIL)) {
+		bool deq_ret;
+		int pkq_len;
+
+		DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, pktq_len(&bus->txq)));
+		bus->fcqueued++;
+
+		/* Priority based enq */
+		dhd_os_sdlock_txq(bus->dhd);
+		deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec);
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if (!deq_ret) {
+#ifdef PROP_TXSTATUS
+			if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0)
+#endif /* PROP_TXSTATUS */
+			{
+#ifdef DHDTCPACK_SUPPRESS
+				if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+					DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n",
+						__FUNCTION__, __LINE__));
+					dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+				}
+#endif /* DHDTCPACK_SUPPRESS */
+				dhd_txcomplete(bus->dhd, pkt, FALSE);
+				PKTFREE(osh, pkt, TRUE);
+			}
+			ret = BCME_NORESOURCE;
+		} else
+			ret = BCME_OK;
+
+		dhd_os_sdlock_txq(bus->dhd);
+		pkq_len = pktq_len(&bus->txq);
+		dhd_os_sdunlock_txq(bus->dhd);
+		if (pkq_len >= FCHI) {
+			bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+			wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) !=
+				WLFC_UNSUPPORTED);
+#endif
+			if (!wlfc_enabled && dhd_doflow) {
+				dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+			}
+		}
+
+#ifdef DHD_DEBUG
+		dhd_os_sdlock_txq(bus->dhd);
+		if (pktq_plen(&bus->txq, prec) > qcount[prec])
+			qcount[prec] = pktq_plen(&bus->txq, prec);
+		dhd_os_sdunlock_txq(bus->dhd);
+#endif
+
+		/* Schedule DPC if needed to send queued packet(s) */
+		if (dhd_deferred_tx && !bus->dpc_sched) {
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);
+		}
+	} else {
+		int chan = SDPCM_DATA_CHANNEL;
+
+#ifdef SDTEST
+		chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL);
+#endif
+		/* Lock: we're about to use shared data/code (and SDIO) */
+		dhd_os_sdlock(bus->dhd);
+
+		/* Otherwise, send it now */
+		BUS_WAKE(bus);
+		/* Make sure back plane ht clk is on, no pending allowed */
+		dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+
+		ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE);
+
+		if (ret != BCME_OK)
+			bus->dhd->tx_errors++;
+		else
+			bus->dhd->dstats.tx_bytes += datalen;
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+	}
+
+	return ret;
+}
+
+/* align packet data pointer and packet length to n-byte boundary, process packet headers,
+ * a new packet may be allocated if there is not enough head and/or tail from for padding.
+ * the caller is responsible for updating the glom size in the head packet (when glom is
+ * used)
+ *
+ * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter
+ * is taken in tx glom mode only
+ *
+ * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment
+ * padding, NULL if not needed, the caller is responsible for freeing the new packet
+ *
+ * return: positive value - length of the packet, including head and tail padding
+ *		   negative value - errors
+ */
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+	int prev_chain_total_len, bool last_chained_pkt,
+	int *pad_pkt_len, void **new_pkt)
+{
+	osl_t *osh;
+	uint8 *frame;
+	int pkt_len;
+	int modulo;
+	int head_padding;
+	int tail_padding = 0;
+	uint32 swheader;
+	uint32 swhdr_offset;
+	bool alloc_new_pkt = FALSE;
+	uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+
+	*new_pkt = NULL;
+	osh = bus->dhd->osh;
+
+#ifdef DHDTCPACK_SUPPRESS
+	if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+		DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+			__FUNCTION__, __LINE__));
+		dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+	}
+#endif /* DHDTCPACK_SUPPRESS */
+
+	/* Add space for the SDPCM hardware/software headers */
+	PKTPUSH(osh, pkt, sdpcm_hdrlen);
+	ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+	pkt_len = (uint16)PKTLEN(osh, pkt);
+
+#ifdef WLMEDIA_HTSF
+	frame = (uint8*)PKTDATA(osh, pkt);
+	if (PKTLEN(osh, pkt) >= 100) {
+		htsf_ts = (htsfts_t*) (frame + HTSF_HOSTOFFSET + 12);
+		if (htsf_ts->magic == HTSFMAGIC) {
+			htsf_ts->c20 = get_cycles();
+			htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+		}
+	}
+#endif /* WLMEDIA_HTSF */
+#ifdef DHD_DEBUG
+	if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets))
+		tx_packets[PKTPRIO(pkt)]++;
+#endif /* DHD_DEBUG */
+
+	/* align the data pointer, allocate a new packet if there is not enough space (new
+	 * packet data pointer will be aligned thus no padding will be needed)
+	 */
+	head_padding = (ulong)frame % DHD_SDALIGN;
+	if (PKTHEADROOM(osh, pkt) < head_padding) {
+		head_padding = 0;
+		alloc_new_pkt = TRUE;
+	} else {
+		uint cur_chain_total_len;
+		int chain_tail_padding = 0;
+
+		/* All packets need to be aligned by DHD_SDALIGN */
+		modulo = (pkt_len + head_padding) % DHD_SDALIGN;
+		tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+
+		/* Total pkt chain length needs to be aligned by block size,
+		 * unless it is a single pkt chain with total length less than one block size,
+		 * which we prefer sending by byte mode.
+		 *
+		 * Do the chain alignment here if
+		 * 1. This is the last pkt of the chain of multiple pkts or a single pkt.
+		 * 2-1. This chain is of multiple pkts, or
+		 * 2-2. This is a single pkt whose size is longer than one block size.
+		 */
+		cur_chain_total_len = prev_chain_total_len +
+			(head_padding + pkt_len + tail_padding);
+		if (last_chained_pkt && bus->blocksize != 0 &&
+			(cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+			modulo = cur_chain_total_len % bus->blocksize;
+			chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+		}
+
+#ifdef DHDENABLE_TAILPAD
+		if (PKTTAILROOM(osh, pkt) < tail_padding) {
+			/* We don't have tail room to align by DHD_SDALIGN */
+			alloc_new_pkt = TRUE;
+			bus->tx_tailpad_pktget++;
+		} else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) {
+			/* We have tail room for tail_padding of this pkt itself, but not for
+			 * total pkt chain alignment by block size.
+			 * Use the padding packet to avoid memory copy if applicable,
+			 * otherwise, just allocate a new pkt.
+			 */
+			if (bus->pad_pkt) {
+				*pad_pkt_len = chain_tail_padding;
+				bus->tx_tailpad_chain++;
+			} else {
+				alloc_new_pkt = TRUE;
+				bus->tx_tailpad_pktget++;
+			}
+		} else
+		/* This last pkt's tailroom is sufficient to hold both tail_padding
+		 * of the pkt itself and chain_tail_padding of total pkt chain
+		 */
+#endif /* DHDENABLE_TAILPAD */
+		tail_padding += chain_tail_padding;
+	}
+
+	DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n",
+		__FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len));
+
+	if (alloc_new_pkt) {
+		void *tmp_pkt;
+		int newpkt_size;
+		int cur_total_len;
+
+		ASSERT(*pad_pkt_len == 0);
+
+		DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__));
+
+		/* head pointer is aligned now, no padding needed */
+		head_padding = 0;
+
+		/* update the tail padding as it depends on the head padding, since a new packet is
+		 * allocated, the head padding is non longer needed and packet length is chagned
+		 */
+
+		cur_total_len = prev_chain_total_len + pkt_len;
+		if (last_chained_pkt && bus->blocksize != 0 &&
+			(cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+			modulo = cur_total_len % bus->blocksize;
+			tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+		}
+		else {
+			modulo = pkt_len % DHD_SDALIGN;
+			tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+		}
+
+		newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN;
+		bus->dhd->tx_realloc++;
+		tmp_pkt = PKTGET(osh, newpkt_size, TRUE);
+		if (tmp_pkt == NULL) {
+			DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size));
+			return BCME_NOMEM;
+		}
+		PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN);
+		bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt));
+		*new_pkt = tmp_pkt;
+		pkt = tmp_pkt;
+	}
+
+	if (head_padding)
+		PKTPUSH(osh, pkt, head_padding);
+
+	frame = (uint8*)PKTDATA(osh, pkt);
+	bzero(frame, head_padding + sdpcm_hdrlen);
+	pkt_len = (uint16)PKTLEN(osh, pkt);
+
+	/* the header has the followming format
+	 * 4-byte HW frame tag: length, ~length (for glom this is the total length)
+	 *
+	 * 8-byte HW extesion flags (glom mode only) as the following:
+	 *			2-byte packet length, excluding HW tag and padding
+	 *			2-byte frame channel and frame flags (e.g. next frame following)
+	 *			2-byte header length
+	 *			2-byte tail padding size
+	 *
+	 * 8-byte SW frame tags as the following
+	 *			4-byte flags: host tx seq, channel, data offset
+	 *			4-byte flags: TBD
+	 */
+
+	swhdr_offset = SDPCM_FRAMETAG_LEN;
+
+	/* hardware frame tag:
+	 *
+	 * in tx-glom mode, dongle only checks the hardware frame tag in the first
+	 * packet and sees it as the total lenght of the glom (including tail padding),
+	 * for each packet in the glom, the packet length needs to be updated, (see
+	 * below PKTSETLEN)
+	 *
+	 * in non tx-glom mode, PKTLEN still need to include tail padding as to be
+	 * referred to in sdioh_request_buffer(). The tail length will be excluded in
+	 * dhdsdio_txpkt_postprocess().
+	 */
+	*(uint16*)frame = (uint16)htol16(pkt_len);
+	*(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len);
+	pkt_len += tail_padding;
+
+	/* hardware extesion flags */
+	if (bus->txglom_enable) {
+		uint32 hwheader1;
+		uint32 hwheader2;
+
+		swhdr_offset += SDPCM_HWEXT_LEN;
+		hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) |
+			(last_chained_pkt << 24);
+		hwheader2 = (tail_padding) << 16;
+		htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+	}
+	PKTSETLEN((osh), (pkt), (pkt_len));
+
+	/* software frame tags */
+	swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+		| (txseq % SDPCM_SEQUENCE_WRAP) |
+		(((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+	htol32_ua_store(swheader, frame + swhdr_offset);
+	htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader));
+
+	return pkt_len;
+}
+
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt)
+{
+	osl_t *osh;
+	uint8 *frame;
+	int data_offset;
+	int tail_padding;
+	int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0);
+
+	(void)osh;
+	osh = bus->dhd->osh;
+
+	/* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */
+	frame = (uint8*)PKTDATA(osh, pkt);
+
+	DHD_INFO(("%s PKTLEN before postprocess %d",
+		__FUNCTION__, PKTLEN(osh, pkt)));
+
+	/* PKTLEN still includes tail_padding, so exclude it.
+	 * We shall have head_padding + original pkt_len for PKTLEN afterwards.
+	 */
+	if (bus->txglom_enable) {
+		/* txglom pkts have tail_padding length in HW ext header */
+		tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
+		PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding);
+		DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n",
+			tail_padding, PKTLEN(osh, pkt)));
+	} else {
+		/* non-txglom pkts have head_padding + original pkt length in HW frame tag.
+		 * We cannot refer to this field for txglom pkts as the first pkt of the chain will
+		 * have the field for the total length of the chain.
+		 */
+		PKTSETLEN(osh, pkt, *(uint16*)frame);
+		DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n",
+			*(uint16*)frame, PKTLEN(osh, pkt)));
+	}
+
+	data_offset = ltoh32_ua(frame + swhdr_offset);
+	data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
+	/* Get rid of sdpcm header + head_padding */
+	PKTPULL(osh, pkt, data_offset);
+
+	DHD_INFO(("%s data_offset %d, PKTLEN %d\n",
+		__FUNCTION__, data_offset, PKTLEN(osh, pkt)));
+
+	return BCME_OK;
+}
+
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt)
+{
+	int i;
+	int ret = 0;
+	osl_t *osh;
+	bcmsdh_info_t *sdh;
+	void *pkt = NULL;
+	void *pkt_chain;
+	int total_len = 0;
+	void *head_pkt = NULL;
+	void *prev_pkt = NULL;
+	int pad_pkt_len = 0;
+	int new_pkt_num = 0;
+	void *new_pkts[MAX_TX_PKTCHAIN_CNT];
+	bool wlfc_enabled = FALSE;
+
+	if (bus->dhd->dongle_reset)
+		return BCME_NOTREADY;
+
+	sdh = bus->sdh;
+	osh = bus->dhd->osh;
+	/* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */
+	new_pkts[0] = NULL;
+
+	for (i = 0; i < num_pkt; i++) {
+		int pkt_len;
+		bool last_pkt;
+		void *new_pkt = NULL;
+
+		pkt = pkts[i];
+		ASSERT(pkt);
+		last_pkt = (i == num_pkt - 1);
+		pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i,
+			total_len, last_pkt, &pad_pkt_len, &new_pkt);
+		if (pkt_len <= 0)
+			goto done;
+		if (new_pkt) {
+			pkt = new_pkt;
+			new_pkts[new_pkt_num++] = new_pkt;
+		}
+		total_len += pkt_len;
+
+		PKTSETNEXT(osh, pkt, NULL);
+		/* insert the packet into the list */
+		head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt);
+		prev_pkt = pkt;
+
+	}
+
+	/* Update the HW frame tag (total length) in the first pkt of the glom */
+	if (bus->txglom_enable) {
+		uint8 *frame;
+
+		total_len += pad_pkt_len;
+		frame = (uint8*)PKTDATA(osh, head_pkt);
+		*(uint16*)frame = (uint16)htol16(total_len);
+		*(((uint16*)frame) + 1) = (uint16)htol16(~total_len);
+
+	}
+
+	/* if a padding packet if needed, insert it to the end of the link list */
+	if (pad_pkt_len) {
+		PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len);
+		PKTSETNEXT(osh, pkt, bus->pad_pkt);
+	}
+
+	/* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet
+	 * parameter is not NULL, for non packet chian we pass NULL pkt pointer
+	 * so it will take the aligned length and buffer pointer.
+	 */
+	pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL;
+	ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES);
+	if (ret == BCME_OK)
+		bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP;
+
+	/* if a padding packet was needed, remove it from the link list as it not a data pkt */
+	if (pad_pkt_len && pkt)
+		PKTSETNEXT(osh, pkt, NULL);
+
+done:
+	pkt = head_pkt;
+	while (pkt) {
+		void *pkt_next = PKTNEXT(osh, pkt);
+		PKTSETNEXT(osh, pkt, NULL);
+		dhdsdio_txpkt_postprocess(bus, pkt);
+		pkt = pkt_next;
+	}
+
+	/* new packets might be allocated due to insufficient room for padding, but we
+	 * still have to indicate the original packets to upper layer
+	 */
+	for (i = 0; i < num_pkt; i++) {
+		pkt = pkts[i];
+		wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+		if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) {
+			wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) !=
+				WLFC_UNSUPPORTED);
+		}
+#endif /* PROP_TXSTATUS */
+		if (!wlfc_enabled) {
+			PKTSETNEXT(osh, pkt, NULL);
+			dhd_txcomplete(bus->dhd, pkt, ret != 0);
+			if (free_pkt)
+				PKTFREE(osh, pkt, TRUE);
+		}
+	}
+
+	for (i = 0; i < new_pkt_num; i++)
+		PKTFREE(osh, new_pkts[i], TRUE);
+
+	return ret;
+}
+
+static uint
+dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+{
+	uint cnt = 0;
+	uint8 tx_prec_map;
+	uint16 txpktqlen = 0;
+	uint32 intstatus = 0;
+	uint retries = 0;
+	osl_t *osh;
+	uint datalen = 0;
+	dhd_pub_t *dhd = bus->dhd;
+	sdpcmd_regs_t *regs = bus->regs;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	osh = dhd->osh;
+	tx_prec_map = ~bus->flowcontrol;
+	for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) {
+		int i;
+		int num_pkt = 1;
+		void *pkts[MAX_TX_PKTCHAIN_CNT];
+		int prec_out;
+
+		dhd_os_sdlock_txq(bus->dhd);
+		if (bus->txglom_enable) {
+			num_pkt = MIN((uint32)DATABUFCNT(bus), (uint32)bus->txglomsize);
+			num_pkt = MIN(num_pkt, ARRAYSIZE(pkts));
+		}
+		num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map));
+		for (i = 0; i < num_pkt; i++) {
+			pkts[i] = pktq_mdeq(&bus->txq, ~bus->flowcontrol, &prec_out);
+			datalen += PKTLEN(osh, pkts[i]);
+		}
+		dhd_os_sdunlock_txq(bus->dhd);
+
+		if (i == 0)
+			break;
+		if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK)
+			dhd->tx_errors++;
+		else
+			dhd->dstats.tx_bytes += datalen;
+		cnt += i;
+
+		/* In poll mode, need to check for other events */
+		if (!bus->intr && cnt)
+		{
+			/* Check device status, signal pending interrupt */
+			R_SDREG(intstatus, &regs->intstatus, retries);
+			bus->f2txdata++;
+			if (bcmsdh_regfail(bus->sdh))
+				break;
+			if (intstatus & bus->hostintmask)
+				bus->ipend = TRUE;
+		}
+
+	}
+
+	dhd_os_sdlock_txq(bus->dhd);
+	txpktqlen = pktq_len(&bus->txq);
+	dhd_os_sdunlock_txq(bus->dhd);
+
+	/* Do flow-control if needed */
+	if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) {
+		bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+		wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED);
+#endif
+		if (!wlfc_enabled && dhd_doflow && dhd->txoff) {
+			dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+		}
+	}
+
+	return cnt;
+}
+
+static void
+dhdsdio_sendpendctl(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	int ret;
+	uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN;
+
+	if (bus->txglom_enable)
+		frame_seq += SDPCM_HWEXT_LEN;
+
+	if (*frame_seq != bus->tx_seq) {
+		DHD_INFO(("%s IOCTL frame seq lag detected!"
+			" frm_seq:%d != bus->tx_seq:%d, corrected\n",
+			__FUNCTION__, *frame_seq, bus->tx_seq));
+		*frame_seq = bus->tx_seq;
+	}
+
+	ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		(uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+		NULL, NULL, NULL, 1);
+	if (ret == BCME_OK)
+		bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+	bus->ctrl_frame_stat = FALSE;
+	dhd_wait_event_wakeup(bus->dhd);
+}
+
+int
+dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	static int err_nodevice = 0;
+	uint8 *frame;
+	uint16 len;
+	uint32 swheader;
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint8 doff = 0;
+	int ret = -1;
+	uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Back the pointer to make a room for bus header */
+	frame = msg - sdpcm_hdrlen;
+	len = (msglen += sdpcm_hdrlen);
+
+	/* Add alignment padding (optional for ctl frames) */
+	if (dhd_alignctl) {
+		if ((doff = ((uintptr)frame % DHD_SDALIGN))) {
+			frame -= doff;
+			len += doff;
+			msglen += doff;
+			bzero(frame, doff + sdpcm_hdrlen);
+		}
+		ASSERT(doff < DHD_SDALIGN);
+	}
+	doff += sdpcm_hdrlen;
+
+	/* Round send length to next SDIO block */
+	if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+		uint16 pad = bus->blocksize - (len % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize))
+			len += pad;
+	} else if (len % DHD_SDALIGN) {
+		len += DHD_SDALIGN - (len % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (len & (ALIGNMENT - 1)))
+		len = ROUNDUP(len, ALIGNMENT);
+
+	ASSERT(ISALIGNED((uintptr)frame, 2));
+
+
+	/* Need to lock here to protect txseq and SDIO tx calls */
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+	*(uint16*)frame = htol16((uint16)msglen);
+	*(((uint16*)frame) + 1) = htol16(~msglen);
+
+	if (bus->txglom_enable) {
+		uint32 hwheader1, hwheader2;
+		/* Software tag: channel, sequence number, data offset */
+		swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+				| bus->tx_seq
+				| ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+		htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
+		htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN
+			+ SDPCM_HWEXT_LEN + sizeof(swheader));
+
+		hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24);
+		hwheader2 = (len - (msglen)) << 16;
+		htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+
+		*(uint16*)frame = htol16(len);
+		*(((uint16*)frame) + 1) = htol16(~(len));
+	} else {
+		/* Software tag: channel, sequence number, data offset */
+		swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+		        | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+		htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+		htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+	}
+	if (!TXCTLOK(bus)) {
+		DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+			__FUNCTION__, bus->tx_max, bus->tx_seq));
+		bus->ctrl_frame_stat = TRUE;
+		/* Send from dpc */
+		bus->ctrl_frame_buf = frame;
+		bus->ctrl_frame_len = len;
+
+		if (!bus->dpc_sched) {
+			bus->dpc_sched = TRUE;
+			dhd_sched_dpc(bus->dhd);
+		}
+		if (bus->ctrl_frame_stat) {
+			dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+		}
+
+		if (bus->ctrl_frame_stat == FALSE) {
+			DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__));
+			ret = 0;
+		} else {
+			bus->dhd->txcnt_timeout++;
+			if (!bus->dhd->hang_was_sent) {
+				DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
+					__FUNCTION__, bus->dhd->txcnt_timeout));
+			}
+			ret = -1;
+			bus->ctrl_frame_stat = FALSE;
+			goto done;
+		}
+	}
+
+	bus->dhd->txcnt_timeout = 0;
+	bus->ctrl_frame_stat = TRUE;
+
+	if (ret == -1) {
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+			prhex("Tx Frame", frame, len);
+		} else if (DHD_HDRS_ON()) {
+			prhex("TxHdr", frame, MIN(len, 16));
+		}
+#endif
+		ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                          frame, len, NULL, NULL, NULL, TXRETRIES);
+		if (ret == BCME_OK)
+			bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+	}
+	bus->ctrl_frame_stat = FALSE;
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	if (ret)
+		bus->dhd->tx_ctlerrs++;
+	else
+		bus->dhd->tx_ctlpkts++;
+
+	if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
+		return -ETIMEDOUT;
+
+	if (ret == BCME_NODEVICE)
+		err_nodevice++;
+	else
+		err_nodevice = 0;
+
+	return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0;
+}
+
+int
+dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+	int timeleft;
+	uint rxlen = 0;
+	bool pending;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->dongle_reset)
+		return -EIO;
+
+	/* Wait until control frame is available */
+	timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+
+	dhd_os_sdlock(bus->dhd);
+	rxlen = bus->rxlen;
+	bcopy(bus->rxctl, msg, MIN(msglen, rxlen));
+	bus->rxlen = 0;
+	dhd_os_sdunlock(bus->dhd);
+
+	if (rxlen) {
+		DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+			__FUNCTION__, rxlen, msglen));
+	} else if (timeleft == 0) {
+#ifdef DHD_DEBUG
+		uint32 status, retry = 0;
+		R_SDREG(status, &bus->regs->intstatus, retry);
+		DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n",
+			__FUNCTION__, status));
+#else
+		DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#endif /* DHD_DEBUG */
+#ifdef DHD_DEBUG
+			dhd_os_sdlock(bus->dhd);
+			dhdsdio_checkdied(bus, NULL, 0);
+			dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+	} else if (pending == TRUE) {
+		/* signal pending */
+		DHD_ERROR(("%s: signal pending\n", __FUNCTION__));
+		return -EINTR;
+
+	} else {
+		DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+#ifdef DHD_DEBUG
+		dhd_os_sdlock(bus->dhd);
+		dhdsdio_checkdied(bus, NULL, 0);
+		dhd_os_sdunlock(bus->dhd);
+#endif /* DHD_DEBUG */
+	}
+	if (timeleft == 0) {
+		if (rxlen == 0)
+			bus->dhd->rxcnt_timeout++;
+		DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__,
+			bus->dhd->rxcnt_timeout, rxlen));
+	}
+	else
+		bus->dhd->rxcnt_timeout = 0;
+
+	if (rxlen)
+		bus->dhd->rx_ctlpkts++;
+	else
+		bus->dhd->rx_ctlerrs++;
+
+	if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT)
+		return -ETIMEDOUT;
+
+	if (bus->dhd->dongle_trap_occured)
+		return -EREMOTEIO;
+
+	return rxlen ? (int)rxlen : -EIO;
+}
+
+/* IOVar table */
+enum {
+	IOV_INTR = 1,
+	IOV_POLLRATE,
+	IOV_SDREG,
+	IOV_SBREG,
+	IOV_SDCIS,
+	IOV_MEMBYTES,
+	IOV_RAMSIZE,
+	IOV_RAMSTART,
+#ifdef DHD_DEBUG
+	IOV_CHECKDIED,
+	IOV_SERIALCONS,
+#endif /* DHD_DEBUG */
+	IOV_SET_DOWNLOAD_STATE,
+	IOV_SOCRAM_STATE,
+	IOV_FORCEEVEN,
+	IOV_SDIOD_DRIVE,
+	IOV_READAHEAD,
+	IOV_SDRXCHAIN,
+	IOV_ALIGNCTL,
+	IOV_SDALIGN,
+	IOV_DEVRESET,
+	IOV_CPU,
+#if defined(USE_SDIOFIFO_IOVAR)
+	IOV_WATERMARK,
+	IOV_MESBUSYCTRL,
+#endif /* USE_SDIOFIFO_IOVAR */
+#ifdef SDTEST
+	IOV_PKTGEN,
+	IOV_EXTLOOP,
+#endif /* SDTEST */
+	IOV_SPROM,
+	IOV_TXBOUND,
+	IOV_RXBOUND,
+	IOV_TXMINMAX,
+	IOV_IDLETIME,
+	IOV_IDLECLOCK,
+	IOV_SD1IDLE,
+	IOV_SLEEP,
+	IOV_DONGLEISOLATION,
+	IOV_KSO,
+	IOV_DEVSLEEP,
+	IOV_DEVCAP,
+	IOV_VARS,
+#ifdef SOFTAP
+	IOV_FWPATH,
+#endif
+	IOV_TXGLOMSIZE,
+	IOV_TXGLOMMODE,
+	IOV_HANGREPORT,
+	IOV_TXINRX_THRES
+};
+
+const bcm_iovar_t dhdsdio_iovars[] = {
+	{"intr",	IOV_INTR,	0,	IOVT_BOOL,	0 },
+	{"sleep",	IOV_SLEEP,	0,	IOVT_BOOL,	0 },
+	{"pollrate",	IOV_POLLRATE,	0,	IOVT_UINT32,	0 },
+	{"idletime",	IOV_IDLETIME,	0,	IOVT_INT32,	0 },
+	{"idleclock",	IOV_IDLECLOCK,	0,	IOVT_INT32,	0 },
+	{"sd1idle",	IOV_SD1IDLE,	0,	IOVT_BOOL,	0 },
+	{"membytes",	IOV_MEMBYTES,	0,	IOVT_BUFFER,	2 * sizeof(int) },
+	{"ramsize",	IOV_RAMSIZE,	0,	IOVT_UINT32,	0 },
+	{"ramstart",	IOV_RAMSTART,	0,	IOVT_UINT32,	0 },
+	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0,	IOVT_BOOL,	0 },
+	{"socram_state",	IOV_SOCRAM_STATE,	0,	IOVT_BOOL,	0 },
+	{"vars",	IOV_VARS,	0,	IOVT_BUFFER,	0 },
+	{"sdiod_drive",	IOV_SDIOD_DRIVE, 0,	IOVT_UINT32,	0 },
+	{"readahead",	IOV_READAHEAD,	0,	IOVT_BOOL,	0 },
+	{"sdrxchain",	IOV_SDRXCHAIN,	0,	IOVT_BOOL,	0 },
+	{"alignctl",	IOV_ALIGNCTL,	0,	IOVT_BOOL,	0 },
+	{"sdalign",	IOV_SDALIGN,	0,	IOVT_BOOL,	0 },
+	{"devreset",	IOV_DEVRESET,	0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"sdreg",	IOV_SDREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sbreg",	IOV_SBREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_cis",	IOV_SDCIS,	0,	IOVT_BUFFER,	DHD_IOCTL_MAXLEN },
+	{"forcealign",	IOV_FORCEEVEN,	0,	IOVT_BOOL,	0 },
+	{"txbound",	IOV_TXBOUND,	0,	IOVT_UINT32,	0 },
+	{"rxbound",	IOV_RXBOUND,	0,	IOVT_UINT32,	0 },
+	{"txminmax",	IOV_TXMINMAX,	0,	IOVT_UINT32,	0 },
+	{"cpu",		IOV_CPU,	0,	IOVT_BOOL,	0 },
+#ifdef DHD_DEBUG
+	{"checkdied",	IOV_CHECKDIED,	0,	IOVT_BUFFER,	0 },
+	{"serial",	IOV_SERIALCONS,	0,	IOVT_UINT32,	0 },
+#endif /* DHD_DEBUG  */
+#endif /* DHD_DEBUG */
+#ifdef SDTEST
+	{"extloop",	IOV_EXTLOOP,	0,	IOVT_BOOL,	0 },
+	{"pktgen",	IOV_PKTGEN,	0,	IOVT_BUFFER,	sizeof(dhd_pktgen_t) },
+#endif /* SDTEST */
+#if defined(USE_SDIOFIFO_IOVAR)
+	{"watermark",	IOV_WATERMARK,	0,	IOVT_UINT32,	0 },
+	{"mesbusyctrl",	IOV_MESBUSYCTRL,	0,	IOVT_UINT32,	0 },
+#endif /* USE_SDIOFIFO_IOVAR */
+	{"devcap", IOV_DEVCAP,	0,	IOVT_UINT32,	0 },
+	{"dngl_isolation", IOV_DONGLEISOLATION,	0,	IOVT_UINT32,	0 },
+	{"kso",	IOV_KSO,	0,	IOVT_UINT32,	0 },
+	{"devsleep", IOV_DEVSLEEP,	0,	IOVT_UINT32,	0 },
+#ifdef SOFTAP
+	{"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 },
+#endif
+	{"txglomsize", IOV_TXGLOMSIZE, 0, IOVT_UINT32, 0 },
+	{"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 },
+	{"txinrx_thres", IOV_TXINRX_THRES, 0, IOVT_INT32, 0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+static void
+dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+{
+	uint q1, q2;
+
+	if (!div) {
+		bcm_bprintf(strbuf, "%s N/A", desc);
+	} else {
+		q1 = num / div;
+		q2 = (100 * (num - (q1 * div))) / div;
+		bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+	}
+}
+
+void
+dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	bcm_bprintf(strbuf, "Bus SDIO structure:\n");
+	bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
+	            bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
+	bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n",
+	            bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+	            bus->rxlen, bus->rx_seq);
+	bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n",
+	            bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
+	bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n",
+	            bus->pollrate, bus->pollcnt, bus->regfails);
+
+	bcm_bprintf(strbuf, "\nAdditional counters:\n");
+#ifdef DHDENABLE_TAILPAD
+	bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n",
+	            bus->tx_tailpad_chain, bus->tx_tailpad_pktget);
+#endif
+	bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n",
+	            bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
+	            bus->rxc_errors);
+	bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n",
+	            bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
+	bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n",
+	            bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+	bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n",
+	            bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
+	bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n",
+	            (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata,
+	            bus->f2txdata, bus->f1regdata);
+	{
+		dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+		             (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+		             bus->dhd->rx_packets);
+		dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata);
+		dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+		             (bus->f2txdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount);
+		bcm_bprintf(strbuf, "\n");
+
+		dhd_dump_pct(strbuf, "Total: pkts/f2rw",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
+		dhd_dump_pct(strbuf, ", pkts/f1sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata);
+		dhd_dump_pct(strbuf, ", pkts/sd",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets),
+		             (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
+		dhd_dump_pct(strbuf, ", pkts/int",
+		             (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount);
+		bcm_bprintf(strbuf, "\n\n");
+	}
+
+#ifdef SDTEST
+	if (bus->pktgen_count) {
+		bcm_bprintf(strbuf, "pktgen config and count:\n");
+		bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n",
+		            bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print,
+		            bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen);
+		bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n",
+		            bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+	}
+#endif /* SDTEST */
+#ifdef DHD_DEBUG
+	bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+	            bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
+	bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup);
+#endif /* DHD_DEBUG */
+	bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
+	            bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping);
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus;
+
+	bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
+	bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
+	bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+#ifdef DHDENABLE_TAILPAD
+	bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0;
+#endif
+	bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
+	bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
+	bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
+}
+
+#ifdef SDTEST
+static int
+dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+
+	pktgen.version = DHD_PKTGEN_VERSION;
+	pktgen.freq = bus->pktgen_freq;
+	pktgen.count = bus->pktgen_count;
+	pktgen.print = bus->pktgen_print;
+	pktgen.total = bus->pktgen_total;
+	pktgen.minlen = bus->pktgen_minlen;
+	pktgen.maxlen = bus->pktgen_maxlen;
+	pktgen.numsent = bus->pktgen_sent;
+	pktgen.numrcvd = bus->pktgen_rcvd;
+	pktgen.numfail = bus->pktgen_fail;
+	pktgen.mode = bus->pktgen_mode;
+	pktgen.stop = bus->pktgen_stop;
+
+	bcopy(&pktgen, arg, sizeof(pktgen));
+
+	return 0;
+}
+
+static int
+dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg)
+{
+	dhd_pktgen_t pktgen;
+	uint oldcnt, oldmode;
+
+	bcopy(arg, &pktgen, sizeof(pktgen));
+	if (pktgen.version != DHD_PKTGEN_VERSION)
+		return BCME_BADARG;
+
+	oldcnt = bus->pktgen_count;
+	oldmode = bus->pktgen_mode;
+
+	bus->pktgen_freq = pktgen.freq;
+	bus->pktgen_count = pktgen.count;
+	bus->pktgen_print = pktgen.print;
+	bus->pktgen_total = pktgen.total;
+	bus->pktgen_minlen = pktgen.minlen;
+	bus->pktgen_maxlen = pktgen.maxlen;
+	bus->pktgen_mode = pktgen.mode;
+	bus->pktgen_stop = pktgen.stop;
+
+	bus->pktgen_tick = bus->pktgen_ptick = 0;
+	bus->pktgen_prev_time = jiffies;
+	bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen);
+	bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen);
+
+	/* Clear counts for a new pktgen (mode change, or was stopped) */
+	if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) {
+		bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0;
+		bus->pktgen_prev_rcvd = bus->pktgen_fail = 0;
+	}
+
+	return 0;
+}
+#endif /* SDTEST */
+
+static void
+dhdsdio_devram_remap(dhd_bus_t *bus, bool val)
+{
+	uint8 enable, protect, remap;
+
+	si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+	remap = val ? TRUE : FALSE;
+	si_socdevram(bus->sih, TRUE, &enable, &protect, &remap);
+}
+
+static int
+dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size)
+{
+	int bcmerror = 0;
+	uint32 sdaddr;
+	uint dsize;
+
+	/* In remap mode, adjust address beyond socram and redirect
+	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+	 * is not backplane accessible
+	 */
+	if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) {
+		address -= bus->orig_ramsize;
+		address += SOCDEVRAM_BP_ADDR;
+	}
+
+	/* Determine initial transfer parameters */
+	sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+	if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+		dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+	else
+		dsize = size;
+
+	/* Set the backplane window to include the start address */
+	if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+		DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+		goto xfer_done;
+	}
+
+	/* Do the transfer(s) */
+	while (size) {
+		DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
+		          __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr,
+		          (address & SBSDIO_SBWINDOW_MASK)));
+		if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) {
+			DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
+			break;
+		}
+
+		/* Adjust for next transfer (if any) */
+		if ((size -= dsize)) {
+			data += dsize;
+			address += dsize;
+			if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) {
+				DHD_ERROR(("%s: window change failed\n", __FUNCTION__));
+				break;
+			}
+			sdaddr = 0;
+			dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size);
+		}
+
+	}
+
+xfer_done:
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
+		DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__,
+			bcmsdh_cur_sbwad(bus->sdh)));
+	}
+
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+static int
+dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+{
+	uint32 addr;
+	int rv, i;
+	uint32 shaddr = 0;
+
+	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+	i = 0;
+	do {
+		/* Read last word in memory to determine address of sdpcm_shared structure */
+		if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0)
+			return rv;
+
+		addr = ltoh32(addr);
+
+		DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+
+		/*
+		 * Check if addr is valid.
+		 * NVRAM length at the end of memory should have been overwritten.
+		 */
+		if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
+			if ((bus->srmemsize > 0) && (i++ == 0)) {
+				shaddr -= bus->srmemsize;
+			} else {
+				DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
+					__FUNCTION__, addr));
+				return BCME_ERROR;
+			}
+		} else
+			break;
+	} while (i < 2);
+
+	/* Read hndrte_shared structure */
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0)
+		return rv;
+
+	/* Endianness */
+	sh->flags = ltoh32(sh->flags);
+	sh->trap_addr = ltoh32(sh->trap_addr);
+	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+	sh->assert_line = ltoh32(sh->assert_line);
+	sh->console_addr = ltoh32(sh->console_addr);
+	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
+		return BCME_OK;
+
+	if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+		DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
+		           "is different than sdpcm_shared version %d in dongle\n",
+		           __FUNCTION__, SDPCM_SHARED_VERSION,
+		           sh->flags & SDPCM_SHARED_VERSION_MASK));
+		return BCME_ERROR;
+	}
+
+	return BCME_OK;
+}
+
+#define CONSOLE_LINE_MAX	192
+
+static int
+dhdsdio_readconsole(dhd_bus_t *bus)
+{
+	dhd_console_t *c = &bus->console;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, idx, addr;
+	int rv;
+
+	/* Don't do anything until FWREADY updates console address */
+	if (bus->console_addr == 0)
+		return 0;
+
+	if (!KSO_ENAB(bus))
+		return 0;
+
+	/* Read console log struct */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+		return rv;
+
+	/* Allocate console buffer (one time only) */
+	if (c->buf == NULL) {
+		c->bufsize = ltoh32(c->log.buf_size);
+		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+			return BCME_NOMEM;
+	}
+
+	idx = ltoh32(c->log.idx);
+
+	/* Protect against corrupt value */
+	if (idx > c->bufsize)
+		return BCME_ERROR;
+
+	/* Skip reading the console buffer if the index pointer has not moved */
+	if (idx == c->last)
+		return BCME_OK;
+
+	/* Read the console buffer */
+	addr = ltoh32(c->log.buf);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+		return rv;
+
+	while (c->last != idx) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			if (c->last == idx) {
+				/* This would output a partial line.  Instead, back up
+				 * the buffer pointer and output this line next time around.
+				 */
+				if (c->last >= n)
+					c->last -= n;
+				else
+					c->last = c->bufsize - n;
+				goto break2;
+			}
+			ch = c->buf[c->last];
+			c->last = (c->last + 1) % c->bufsize;
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			printf("CONSOLE: %s\n", line);
+#ifdef LOG_INTO_TCPDUMP
+			dhd_sendup_log(bus->dhd, line, n);
+#endif /* LOG_INTO_TCPDUMP */
+		}
+	}
+break2:
+
+	return BCME_OK;
+}
+
+static int
+dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+	int bcmerror = 0;
+	uint msize = 512;
+	char *mbuffer = NULL;
+	char *console_buffer = NULL;
+	uint maxstrlen = 256;
+	char *str = NULL;
+	trap_t tr;
+	sdpcm_shared_t sdpcm_shared;
+	struct bcmstrbuf strbuf;
+	uint32 console_ptr, console_size, console_index;
+	uint8 line[CONSOLE_LINE_MAX], ch;
+	uint32 n, i, addr;
+	int rv;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (DHD_NOCHECKDIED_ON())
+		return 0;
+
+	if (data == NULL) {
+		/*
+		 * Called after a rx ctrl timeout. "data" is NULL.
+		 * allocate memory to trace the trap or assert.
+		 */
+		size = msize;
+		mbuffer = data = MALLOC(bus->dhd->osh, msize);
+		if (mbuffer == NULL) {
+			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+			bcmerror = BCME_NOMEM;
+			goto done;
+		}
+	}
+
+	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+		bcmerror = BCME_NOMEM;
+		goto done;
+	}
+
+	if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0)
+		goto done;
+
+	bcm_binit(&strbuf, data, size);
+
+	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
+	            sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
+
+	if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+	}
+
+	if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) {
+		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
+		 * (Avoids conflict with real asserts for programmatic parsing of output.)
+		 */
+		bcm_bprintf(&strbuf, "No trap%s in dongle",
+		          (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
+		          ?"/assrt" :"");
+	} else {
+		if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
+			/* Download assert */
+			bcm_bprintf(&strbuf, "Dongle assert");
+			if (sdpcm_shared.assert_exp_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_exp_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " expr \"%s\"", str);
+			}
+
+			if (sdpcm_shared.assert_file_addr != 0) {
+				str[0] = '\0';
+				if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+				                                 sdpcm_shared.assert_file_addr,
+				                                 (uint8 *)str, maxstrlen)) < 0)
+					goto done;
+
+				str[maxstrlen - 1] = '\0';
+				bcm_bprintf(&strbuf, " file \"%s\"", str);
+			}
+
+			bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line);
+		}
+
+#ifdef BOARD_INTEL
+		if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+			bus->dhd->dongle_trap_occured = TRUE;
+			if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+			                                 sdpcm_shared.trap_addr,
+			                                 (uint8*)&tr, sizeof(trap_t))) < 0)
+				goto done;
+
+			bcm_bprintf(&strbuf,
+			"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+			            "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+			"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+			"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+			ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+			ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+			ltoh32(sdpcm_shared.trap_addr),
+			ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+			ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+		}
+	}
+
+	/* Print systematically the content of console */
+	addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+		                           (uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+		goto printbuf;
+
+	addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.buf_size);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+	                           (uint8 *)&console_size, sizeof(console_size))) < 0)
+		goto printbuf;
+
+	addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.idx);
+	if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+	                           (uint8 *)&console_index, sizeof(console_index))) < 0)
+		goto printbuf;
+
+	console_ptr = ltoh32(console_ptr);
+	console_size = ltoh32(console_size);
+	console_index = ltoh32(console_index);
+
+	if (console_size > CONSOLE_BUFFER_MAX ||
+	    !(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+		goto printbuf;
+
+	if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
+	                           (uint8 *)console_buffer, console_size)) < 0)
+		goto printbuf;
+
+	for (i = 0, n = 0; i < console_size; i += n + 1) {
+		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+			ch = console_buffer[(console_index + i + n) % console_size];
+			if (ch == '\n')
+				break;
+			line[n] = ch;
+		}
+
+
+		if (n > 0) {
+			if (line[n - 1] == '\r')
+				n--;
+			line[n] = 0;
+			/* Don't use DHD_ERROR macro since we print
+			 * a lot of information quickly. The macro
+			 * will truncate a lot of the printfs
+			 */
+
+			if (dhd_msg_level & DHD_ERROR_VAL)
+				printf("CONSOLE: %s\n", line);
+		}
+	}
+
+
+	if (checkdied_dumpram) {
+		/* Dump the entire chipset RAM into a user file */
+		uint8 maxcnt=10;
+
+		if (checkdied_cnt++ < maxcnt) {
+			struct file* f;
+			mm_segment_t oldfs;
+			uint32 offset=bus->dongle_ram_base;
+			uint32 blksize=64;
+			uint8 buf[blksize];
+			loff_t pos=0;
+			int ret;
+			char path[64];
+
+			sprintf(path, "/tmp/4334x_dumpram_%d.bin", checkdied_cnt);
+			DHD_ERROR(("%s: dump ram into %s\n", __FUNCTION__, path));
+
+			oldfs = get_fs();
+			set_fs(get_ds());
+			f = filp_open(path, O_CREAT|O_WRONLY, S_IRWXU);
+			if(!IS_ERR(f)) {
+				while (offset < (bus->dongle_ram_base + bus->ramsize)) {
+					if ((ret = dhdsdio_membytes(bus, FALSE, offset, (uint8 *)&buf, blksize)) < 0) {
+						DHD_ERROR(("%s: dhdsdio_membytes failed. err:%d\n", __FUNCTION__, ret));
+						break;
+					}
+					ret = vfs_write(f, buf, blksize, &pos);
+					if(!ret) {
+						DHD_ERROR(("%s: vfs_write failed. err:%d\n", __FUNCTION__, ret));
+						break;
+					}
+					offset += blksize;
+				}
+
+				filp_close(f, NULL);
+			}
+			else {
+				DHD_ERROR(("%s: filp_open failed\n", __FUNCTION__));
+			}
+			set_fs(oldfs);
+			DHD_ERROR(("%s: dump ram done\n", __FUNCTION__));
+		}
+		else {
+			DHD_ERROR(("%s: cannot dump chipset ram. Max cnt %d of dump reached\n", __FUNCTION__, maxcnt));
+		}
+	}
+#else
+		if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+			bus->dhd->dongle_trap_occured = TRUE;
+			if ((bcmerror = dhdsdio_membytes(bus, FALSE,
+			                                 sdpcm_shared.trap_addr,
+			                                 (uint8*)&tr, sizeof(trap_t))) < 0)
+				goto done;
+
+			bcm_bprintf(&strbuf,
+			"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+			            "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+			"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+			"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+			ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+			ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+			ltoh32(sdpcm_shared.trap_addr),
+			ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+			ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+
+			addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+				goto printbuf;
+
+			addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.buf_size);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_size, sizeof(console_size))) < 0)
+				goto printbuf;
+
+			addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.idx);
+			if ((rv = dhdsdio_membytes(bus, FALSE, addr,
+				(uint8 *)&console_index, sizeof(console_index))) < 0)
+				goto printbuf;
+
+			console_ptr = ltoh32(console_ptr);
+			console_size = ltoh32(console_size);
+			console_index = ltoh32(console_index);
+
+			if (console_size > CONSOLE_BUFFER_MAX ||
+				!(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+				goto printbuf;
+
+			if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr,
+				(uint8 *)console_buffer, console_size)) < 0)
+				goto printbuf;
+
+			for (i = 0, n = 0; i < console_size; i += n + 1) {
+				for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+					ch = console_buffer[(console_index + i + n) % console_size];
+					if (ch == '\n')
+						break;
+					line[n] = ch;
+				}
+
+
+				if (n > 0) {
+					if (line[n - 1] == '\r')
+						n--;
+					line[n] = 0;
+					/* Don't use DHD_ERROR macro since we print
+					 * a lot of information quickly. The macro
+					 * will truncate a lot of the printfs
+					 */
+
+					if (dhd_msg_level & DHD_ERROR_VAL)
+						printf("CONSOLE: %s\n", line);
+				}
+			}
+		}
+	}
+#endif  /* BOARD_INTEL */
+
+printbuf:
+	if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) {
+		DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+	}
+
+
+done:
+	if (mbuffer)
+		MFREE(bus->dhd->osh, mbuffer, msize);
+	if (str)
+		MFREE(bus->dhd->osh, str, maxstrlen);
+	if (console_buffer)
+		MFREE(bus->dhd->osh, console_buffer, console_size);
+
+	return bcmerror;
+}
+#endif /* #ifdef DHD_DEBUG */
+
+
+int
+dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+	int bcmerror = BCME_OK;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Basic sanity checks */
+	if (bus->dhd->up) {
+		bcmerror = BCME_NOTDOWN;
+		goto err;
+	}
+	if (!len) {
+		bcmerror = BCME_BUFTOOSHORT;
+		goto err;
+	}
+
+	/* Free the old ones and replace with passed variables */
+	if (bus->vars)
+		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+	bus->vars = MALLOC(bus->dhd->osh, len);
+	bus->varsz = bus->vars ? len : 0;
+	if (bus->vars == NULL) {
+		bcmerror = BCME_NOMEM;
+		goto err;
+	}
+
+	/* Copy the passed variables, which should include the terminating double-null */
+	bcopy(arg, bus->vars, bus->varsz);
+err:
+	return bcmerror;
+}
+
+#ifdef DHD_DEBUG
+
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB		(1  << 24)
+#define CC_CHIPCTRL_JTAG_SEL			(1  << 3)
+#define CC_CHIPCTRL_GPIO_SEL				(0x3)
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334	(1  << 28)
+
+static int
+dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
+{
+	int int_val;
+	uint32 addr, data, uart_enab = 0;
+	uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL;
+	uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL;
+
+	addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+	data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+	*bcmerror = 0;
+
+	bcmsdh_reg_write(bus->sdh, addr, 4, 1);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	int_val = bcmsdh_reg_read(bus->sdh, data, 4);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	if (bus->sih->chip == BCM4330_CHIP_ID) {
+		uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB;
+	}
+	else if (bus->sih->chip == BCM4334_CHIP_ID ||
+		bus->sih->chip == BCM43340_CHIP_ID ||
+		bus->sih->chip == BCM43341_CHIP_ID ||
+		bus->sih->chip == BCM43342_CHIP_ID ||
+		0) {
+		if (enable) {
+			/* Moved to PMU chipcontrol 1 from 4330 */
+			int_val &= ~gpio_sel;
+			int_val |= jtag_sel;
+		} else {
+			int_val |= gpio_sel;
+			int_val &= ~jtag_sel;
+		}
+		uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334;
+	}
+
+	if (!set)
+		return (int_val & uart_enab);
+	if (enable)
+		int_val |= uart_enab;
+	else
+		int_val &= ~uart_enab;
+	bcmsdh_reg_write(bus->sdh, data, 4, int_val);
+	if (bcmsdh_regfail(bus->sdh)) {
+		*bcmerror = BCME_SDIO_ERROR;
+		return -1;
+	}
+	if (bus->sih->chip == BCM4330_CHIP_ID) {
+		uint32 chipcontrol;
+		addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
+		chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
+		chipcontrol &= ~jtag_sel;
+		if (enable) {
+			chipcontrol |=  jtag_sel;
+			chipcontrol &= ~gpio_sel;
+		}
+		bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
+	}
+
+	return (int_val & uart_enab);
+}
+#endif 
+
+static int
+dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+                void *params, int plen, void *arg, int len, int val_size)
+{
+	int bcmerror = 0;
+	int32 int_val = 0;
+	bool bool_val = 0;
+
+	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+		goto exit;
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+
+	/* Some ioctls use the bus */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
+		bcmerror = BCME_NOTREADY;
+		goto exit;
+	}
+
+	/*
+	 * Special handling for keepSdioOn: New SDIO Wake-up Mechanism
+	 */
+	if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) {
+		dhdsdio_clk_kso_iovar(bus, bool_val);
+		goto exit;
+	} else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) {
+		{
+			dhdsdio_clk_devsleep_iovar(bus, bool_val);
+			if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) {
+				DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n",
+					bus->dpc_sched));
+				if (!bus->dpc_sched) {
+					bus->dpc_sched = TRUE;
+					dhd_sched_dpc(bus->dhd);
+				}
+			}
+		}
+		goto exit;
+	}
+
+	/* Handle sleep stuff before any clock mucking */
+	if (vi->varid == IOV_SLEEP) {
+		if (IOV_ISSET(actionid)) {
+			bcmerror = dhdsdio_bussleep(bus, bool_val);
+		} else {
+			int_val = (int32)bus->sleeping;
+			bcopy(&int_val, arg, val_size);
+		}
+		goto exit;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	if (!bus->dhd->dongle_reset) {
+		BUS_WAKE(bus);
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	}
+
+	switch (actionid) {
+	case IOV_GVAL(IOV_INTR):
+		int_val = (int32)bus->intr;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_INTR):
+		bus->intr = bool_val;
+		bus->intdis = FALSE;
+		if (bus->dhd->up) {
+			if (bus->intr) {
+				DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+				bcmsdh_intr_enable(bus->sdh);
+			} else {
+				DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+				bcmsdh_intr_disable(bus->sdh);
+			}
+		}
+		break;
+
+	case IOV_GVAL(IOV_POLLRATE):
+		int_val = (int32)bus->pollrate;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POLLRATE):
+		bus->pollrate = (uint)int_val;
+		bus->poll = (bus->pollrate != 0);
+		break;
+
+	case IOV_GVAL(IOV_IDLETIME):
+		int_val = bus->idletime;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLETIME):
+		if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->idletime = int_val;
+		}
+		break;
+
+	case IOV_GVAL(IOV_IDLECLOCK):
+		int_val = (int32)bus->idleclock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_IDLECLOCK):
+		bus->idleclock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SD1IDLE):
+		int_val = (int32)sd1idle;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SD1IDLE):
+		sd1idle = bool_val;
+		break;
+
+
+	case IOV_SVAL(IOV_MEMBYTES):
+	case IOV_GVAL(IOV_MEMBYTES):
+	{
+		uint32 address;
+		uint size, dsize;
+		uint8 *data;
+
+		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+		ASSERT(plen >= 2*sizeof(int));
+
+		address = (uint32)int_val;
+		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+		size = (uint)int_val;
+
+		/* Do some validation */
+		dsize = set ? plen - (2 * sizeof(int)) : len;
+		if (dsize < size) {
+			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+			bcmerror = BCME_BADARG;
+			break;
+		}
+
+		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__,
+		          (set ? "write" : "read"), size, address));
+
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/*
+			 * If address is start of RAM (i.e. a downloaded image),
+			 * store the reset instruction to be written in 0
+			 */
+			if (set && address == bus->dongle_ram_base) {
+				bus->resetinstr = *(((uint32*)params) + 2);
+			}
+		} else {
+		/* If we know about SOCRAM, check for a fit */
+		if ((bus->orig_ramsize) &&
+		    ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+		{
+			uint8 enable, protect, remap;
+			si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+			if (!enable || protect) {
+				DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+					__FUNCTION__, bus->orig_ramsize, size, address));
+				DHD_ERROR(("%s: socram enable %d, protect %d\n",
+					__FUNCTION__, enable, protect));
+				bcmerror = BCME_BADARG;
+				break;
+			}
+
+			if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
+				uint32 devramsize = si_socdevram_size(bus->sih);
+				if ((address < SOCDEVRAM_ARM_ADDR) ||
+					(address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
+					DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+						__FUNCTION__, address, size));
+					DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+						__FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
+					bcmerror = BCME_BADARG;
+					break;
+				}
+				/* move it such that address is real now */
+				address -= SOCDEVRAM_ARM_ADDR;
+				address += SOCDEVRAM_BP_ADDR;
+				DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+					__FUNCTION__, (set ? "write" : "read"), size, address));
+			} else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
+				/* Can not access remap region while devram remap bit is set
+				 * ROM content would be returned in this case
+				 */
+				DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
+					__FUNCTION__, address));
+				bcmerror = BCME_ERROR;
+				break;
+			}
+		}
+		}
+
+		/* Generate the actual data pointer */
+		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+		/* Call to do the transfer */
+		bcmerror = dhdsdio_membytes(bus, set, address, data, size);
+
+		break;
+	}
+
+	case IOV_GVAL(IOV_RAMSIZE):
+		int_val = (int32)bus->ramsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_RAMSTART):
+		int_val = (int32)bus->dongle_ram_base;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_SDIOD_DRIVE):
+		int_val = (int32)dhd_sdiod_drive_strength;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDIOD_DRIVE):
+		dhd_sdiod_drive_strength = int_val;
+		si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength);
+		break;
+
+	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_SOCRAM_STATE):
+		bcmerror = dhdsdio_download_state(bus, bool_val);
+		break;
+
+	case IOV_SVAL(IOV_VARS):
+		bcmerror = dhdsdio_downloadvars(bus, arg, len);
+		break;
+
+	case IOV_GVAL(IOV_READAHEAD):
+		int_val = (int32)dhd_readahead;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_READAHEAD):
+		if (bool_val && !dhd_readahead)
+			bus->nextlen = 0;
+		dhd_readahead = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDRXCHAIN):
+		int_val = (int32)bus->use_rxchain;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDRXCHAIN):
+		if (bool_val && !bus->sd_rxchain)
+			bcmerror = BCME_UNSUPPORTED;
+		else
+			bus->use_rxchain = bool_val;
+		break;
+	case IOV_GVAL(IOV_ALIGNCTL):
+		int_val = (int32)dhd_alignctl;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_ALIGNCTL):
+		dhd_alignctl = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_SDALIGN):
+		int_val = DHD_SDALIGN;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_VARS):
+		if (bus->varsz < (uint)len)
+			bcopy(bus->vars, arg, bus->varsz);
+		else
+			bcmerror = BCME_BUFTOOSHORT;
+		break;
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+	case IOV_GVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (ulong)bus->regs + sd_ptr->offset;
+		size = sd_ptr->func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SDREG):
+	{
+		sdreg_t *sd_ptr;
+		uint32 addr, size;
+
+		sd_ptr = (sdreg_t *)params;
+
+		addr = (ulong)bus->regs + sd_ptr->offset;
+		size = sd_ptr->func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	/* Same as above, but offset is not backplane (not SDIO core) */
+	case IOV_GVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		bcopy(&int_val, arg, sizeof(int32));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SBREG):
+	{
+		sdreg_t sdreg;
+		uint32 addr, size;
+
+		bcopy(params, &sdreg, sizeof(sdreg));
+
+		addr = SI_ENUM_BASE + sdreg.offset;
+		size = sdreg.func;
+		bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
+		if (bcmsdh_regfail(bus->sdh))
+			bcmerror = BCME_SDIO_ERROR;
+		break;
+	}
+
+	case IOV_GVAL(IOV_SDCIS):
+	{
+		*(char *)arg = 0;
+
+		bcmstrcat(arg, "\nFunc 0\n");
+		bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 1\n");
+		bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		bcmstrcat(arg, "\nFunc 2\n");
+		bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT);
+		break;
+	}
+
+	case IOV_GVAL(IOV_FORCEEVEN):
+		int_val = (int32)forcealign;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_FORCEEVEN):
+		forcealign = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_TXBOUND):
+		int_val = (int32)dhd_txbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXBOUND):
+		dhd_txbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_RXBOUND):
+		int_val = (int32)dhd_rxbound;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RXBOUND):
+		dhd_rxbound = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_TXMINMAX):
+		int_val = (int32)dhd_txminmax;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXMINMAX):
+		dhd_txminmax = (uint)int_val;
+		break;
+
+	case IOV_GVAL(IOV_SERIALCONS):
+		int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
+		if (bcmerror != 0)
+			break;
+
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SERIALCONS):
+		dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
+		break;
+
+
+#endif /* DHD_DEBUG */
+
+
+#ifdef SDTEST
+	case IOV_GVAL(IOV_EXTLOOP):
+		int_val = (int32)bus->ext_loop;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_EXTLOOP):
+		bus->ext_loop = bool_val;
+		break;
+
+	case IOV_GVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_get(bus, arg);
+		break;
+
+	case IOV_SVAL(IOV_PKTGEN):
+		bcmerror = dhdsdio_pktgen_set(bus, arg);
+		break;
+#endif /* SDTEST */
+
+#if defined(USE_SDIOFIFO_IOVAR)
+	case IOV_GVAL(IOV_WATERMARK):
+		int_val = (int32)watermark;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_WATERMARK):
+		watermark = (uint)int_val;
+		watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark;
+		DHD_ERROR(("Setting watermark as 0x%x.\n", watermark));
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL);
+		break;
+
+	case IOV_GVAL(IOV_MESBUSYCTRL):
+		int_val = (int32)mesbusyctrl;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MESBUSYCTRL):
+		mesbusyctrl = (uint)int_val;
+		mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK)
+			? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl;
+		DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl));
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+			((uint8)mesbusyctrl | 0x80), NULL);
+		break;
+#endif 
+
+
+	case IOV_GVAL(IOV_DONGLEISOLATION):
+		int_val = bus->dhd->dongle_isolation;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DONGLEISOLATION):
+		bus->dhd->dongle_isolation = bool_val;
+		break;
+
+	case IOV_SVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n",
+		           __FUNCTION__, bool_val, bus->dhd->dongle_reset,
+		           bus->dhd->busstate));
+
+		ASSERT(bus->dhd->osh);
+		/* ASSERT(bus->cl_devid); */
+
+		dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+
+		break;
+	/*
+	 * softap firmware is updated through module parameter or android private command
+	 */
+
+	case IOV_GVAL(IOV_DEVRESET):
+		DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
+
+		/* Get its status */
+		int_val = (bool) bus->dhd->dongle_reset;
+		bcopy(&int_val, arg, val_size);
+
+		break;
+
+	case IOV_GVAL(IOV_KSO):
+		int_val = dhdsdio_sleepcsr_get(bus);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DEVCAP):
+		int_val = dhdsdio_devcap_get(bus);
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DEVCAP):
+		dhdsdio_devcap_set(bus, (uint8) int_val);
+		break;
+	case IOV_GVAL(IOV_TXGLOMSIZE):
+		int_val = (int32)bus->txglomsize;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_TXGLOMSIZE):
+		if (int_val > SDPCM_MAXGLOM_SIZE) {
+			bcmerror = BCME_ERROR;
+		} else {
+			bus->txglomsize = (uint)int_val;
+		}
+		break;
+	case IOV_SVAL(IOV_HANGREPORT):
+		bus->dhd->hang_report = bool_val;
+		DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report));
+		break;
+
+	case IOV_GVAL(IOV_HANGREPORT):
+		int_val = (int32)bus->dhd->hang_report;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_TXINRX_THRES):
+		int_val = bus->txinrx_thres;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_SVAL(IOV_TXINRX_THRES):
+		if (int_val < 0) {
+			bcmerror = BCME_BADARG;
+		} else {
+			bus->txinrx_thres = int_val;
+		}
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+
+exit:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_write_vars(dhd_bus_t *bus)
+{
+	int bcmerror = 0;
+	uint32 varsize, phys_size;
+	uint32 varaddr;
+	uint8 *vbuffer;
+	uint32 varsizew;
+#ifdef DHD_DEBUG
+	uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+	/* Even if there are no vars are to be written, we still need to set the ramsize. */
+	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+	varaddr = (bus->ramsize - 4) - varsize;
+
+	varaddr += bus->dongle_ram_base;
+
+	if (bus->vars) {
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) {
+			if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) {
+				DHD_ERROR(("PR85623WAR in place\n"));
+				varsize += 4;
+				varaddr -= 4;
+			}
+		}
+
+		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+		if (!vbuffer)
+			return BCME_NOMEM;
+
+		bzero(vbuffer, varsize);
+		bcopy(bus->vars, vbuffer, bus->varsz);
+
+		/* Write the vars list */
+		bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+#ifdef DHD_DEBUG
+		/* Verify NVRAM bytes */
+		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+		if (!nvram_ularray) {
+			MFREE(bus->dhd->osh, vbuffer, varsize);
+			return BCME_NOMEM;
+		}
+
+		/* Upload image to verify downloaded contents. */
+		memset(nvram_ularray, 0xaa, varsize);
+
+		/* Read the vars list to temp buffer for comparison */
+		bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+		if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, varsize, varaddr));
+		}
+		/* Compare the org NVRAM with the one read from RAM */
+		if (memcmp(vbuffer, nvram_ularray, varsize)) {
+			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+			__FUNCTION__));
+
+		MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+		MFREE(bus->dhd->osh, vbuffer, varsize);
+	}
+
+	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+	phys_size += bus->dongle_ram_base;
+
+	/* adjust to the user specified RAM */
+	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+		phys_size, bus->ramsize));
+	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+		varaddr, varsize));
+	varsize = ((phys_size - 4) - varaddr);
+
+	/*
+	 * Determine the length token:
+	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+	 */
+	if (bcmerror) {
+		varsizew = 0;
+	} else {
+		varsizew = varsize / 4;
+		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+		varsizew = htol32(varsizew);
+	}
+
+	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+	/* Write the length token to the last word */
+	bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4),
+		(uint8*)&varsizew, 4);
+
+	return bcmerror;
+}
+
+static int
+dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+{
+	uint retries;
+	int bcmerror = 0;
+	int foundcr4 = 0;
+
+	if (!bus->sih)
+		return BCME_ERROR;
+	/* To enter download state, disable ARM and reset SOCRAM.
+	 * To exit download state, simply reset ARM (default is RAM boot).
+	 */
+	if (enter) {
+		bus->alp_only = TRUE;
+
+		if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+			if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+				foundcr4 = 1;
+			} else {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		}
+
+		if (!foundcr4) {
+			si_core_disable(bus->sih, 0);
+			if (bcmsdh_regfail(bus->sdh)) {
+				bcmerror = BCME_SDIO_ERROR;
+				goto fail;
+			}
+
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			si_core_reset(bus->sih, 0, 0);
+			if (bcmsdh_regfail(bus->sdh)) {
+				DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n",
+				           __FUNCTION__));
+				bcmerror = BCME_SDIO_ERROR;
+				goto fail;
+			}
+
+			/* Disable remap for download */
+			if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih))
+				dhdsdio_devram_remap(bus, FALSE);
+
+			/* Clear the top bit of memory */
+			if (bus->ramsize) {
+				uint32 zeros = 0;
+				if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4,
+				                     (uint8*)&zeros, 4) < 0) {
+					bcmerror = BCME_SDIO_ERROR;
+					goto fail;
+				}
+			}
+		} else {
+			/* For CR4,
+			 * Halt ARM
+			 * Remove ARM reset
+			 * Read RAM base address [0x18_0000]
+			 * [next] Download firmware
+			 * [done at else] Populate the reset vector
+			 * [done at else] Remove ARM halt
+			*/
+			/* Halt ARM & remove reset */
+			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+		}
+	} else {
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if (!si_iscoreup(bus->sih)) {
+				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+
+			if ((bcmerror = dhdsdio_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+			/* Enable remap before ARM reset but after vars.
+			 * No backplane access in remap mode
+			 */
+			if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih))
+				dhdsdio_devram_remap(bus, TRUE);
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+
+			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+		} else {
+			/* cr4 has no socram, but tcm's */
+			/* write vars */
+			if ((bcmerror = dhdsdio_write_vars(bus))) {
+				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+				goto fail;
+			}
+
+			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
+			/* switch back to arm core again */
+			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+				bcmerror = BCME_ERROR;
+				goto fail;
+			}
+			/* write address 0 with reset instruction */
+			bcmerror = dhdsdio_membytes(bus, TRUE, 0,
+				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+			/* now remove reset and halt and continue to run CR4 */
+		}
+
+		si_core_reset(bus->sih, 0, 0);
+		if (bcmsdh_regfail(bus->sdh)) {
+			DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			goto fail;
+		}
+
+		/* Allow HT Clock now that the ARM is running. */
+		bus->alp_only = FALSE;
+
+		bus->dhd->busstate = DHD_BUS_LOAD;
+	}
+
+fail:
+	/* Always return to SDIOD core */
+	if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0))
+		si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+
+	return bcmerror;
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+                 void *params, int plen, void *arg, int len, bool set)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	uint32 actionid;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get MUST have return space */
+	ASSERT(set || (arg && len));
+
+	/* Set does NOT take qualifiers */
+	ASSERT(!set || (!params && !plen));
+
+	/* Look up var locally; if not found pass to host driver */
+	if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) {
+		dhd_os_sdlock(bus->dhd);
+
+		BUS_WAKE(bus);
+
+		/* Turn on clock in case SD command needs backplane */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set);
+
+		/* Check for bus configuration changes of interest */
+
+		/* If it was divisor change, read the new one */
+		if (set && strcmp(name, "sd_divisor") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
+			                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_divisor = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_divisor));
+			}
+		}
+		/* If it was a mode change, read the new one */
+		if (set && strcmp(name, "sd_mode") == 0) {
+			if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
+			                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+				bus->sd_mode = -1;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, name, bus->sd_mode));
+			}
+		}
+		/* Similar check for blocksize change */
+		if (set && strcmp(name, "sd_blocksize") == 0) {
+			int32 fnum = 2;
+			if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
+			                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+				bus->blocksize = 0;
+				DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+			} else {
+				DHD_INFO(("%s: noted %s update, value now %d\n",
+				          __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+				dhdsdio_tune_fifoparam(bus);
+			}
+		}
+		bus->roundup = MIN(max_roundup, bus->blocksize);
+
+		if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+			bus->activity = FALSE;
+			dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+		}
+
+		dhd_os_sdunlock(bus->dhd);
+		goto exit;
+	}
+
+	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+	         name, (set ? "set" : "get"), len, plen));
+
+	/* set up 'params' pointer in case this is a set command so that
+	 * the convenience int and bool code can be common to set and get
+	 */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		/* all other types are integer sized */
+		val_size = sizeof(int);
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+	return bcmerror;
+}
+
+void
+dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+	osl_t *osh;
+	uint32 local_hostintmask;
+	uint8 saveclk;
+	uint retries;
+	int err;
+	bool wlfc_enabled = FALSE;
+
+	if (!bus->dhd)
+		return;
+
+	osh = bus->dhd->osh;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_waitlockfree(bus->sdh);
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) {
+		/* if Firmware already hangs disbale any interrupt */
+		bus->dhd->busstate = DHD_BUS_DOWN;
+		bus->hostintmask = 0;
+		bcmsdh_intr_disable(bus->sdh);
+	} else {
+
+		BUS_WAKE(bus);
+
+		/* Change our idea of bus state */
+		bus->dhd->busstate = DHD_BUS_DOWN;
+
+		if (KSO_ENAB(bus)) {
+
+		/* Enable clock for device interrupts */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Disable and clear interrupts at the chip level also */
+		W_SDREG(0, &bus->regs->hostintmask, retries);
+		local_hostintmask = bus->hostintmask;
+		bus->hostintmask = 0;
+
+		/* Force clocks on backplane to be sure F2 interrupt propagates */
+		saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (!err) {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+			                 (saveclk | SBSDIO_FORCE_HT), &err);
+		}
+		if (err) {
+			DHD_ERROR(("%s: Failed to force clock for F2: err %d\n",
+			            __FUNCTION__, err));
+		}
+
+		/* Turn off the bus (F2), free any pending packets */
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+		bcmsdh_intr_disable(bus->sdh);
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+		/* Clear any pending interrupts now that F2 is disabled */
+		W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+		}
+
+		/* Turn off the backplane clock (only) */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	}
+
+#ifdef PROP_TXSTATUS
+	wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+	if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+		 * when there is a newly coming packet from network stack.
+		 */
+		dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+		/* Clear the data packet queues */
+		pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
+	}
+
+	/* Clear any held glomming stuff */
+	if (bus->glomd)
+		PKTFREE(osh, bus->glomd, FALSE);
+
+	if (bus->glom)
+		PKTFREE(osh, bus->glom, FALSE);
+
+	bus->glom = bus->glomd = NULL;
+
+	/* Clear rx control and wake any waiters */
+	bus->rxlen = 0;
+	dhd_os_ioctl_resp_wake(bus->dhd);
+
+	/* Reset some F2 state stuff */
+	bus->rxskip = FALSE;
+	bus->tx_seq = bus->rx_seq = 0;
+
+	bus->tx_max = 4;
+
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+}
+
+#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD)
+extern uint sd_txglom;
+#endif
+void
+dhd_txglom_enable(dhd_pub_t *dhdp, bool enable)
+{
+	/* can't enable host txglom by default, some platforms have no
+	 * (or crappy) ADMA support and txglom will cause kernel assertions (e.g.
+	 * panda board)
+	 */
+	dhd_bus_t *bus = dhdp->bus;
+#ifdef BCMSDIOH_TXGLOM
+	char buf[256];
+	uint32 rxglom;
+	int32 ret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BCMSDIOH_STD
+	if (enable)
+		enable = sd_txglom;
+#endif /* BCMSDIOH_STD */
+
+	if (enable) {
+		rxglom = 1;
+		memset(buf, 0, sizeof(buf));
+		bcm_mkiovar("bus:rxglom", (void *)&rxglom, 4, buf, sizeof(buf));
+		ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+		if (ret >= 0)
+			bus->txglom_enable = TRUE;
+		else {
+#ifdef BCMSDIOH_STD
+			sd_txglom = 0;
+#endif /* BCMSDIOH_STD */
+			bus->txglom_enable = FALSE;
+		}
+	} else
+#endif /* BCMSDIOH_TXGLOM */
+		bus->txglom_enable = FALSE;
+}
+
+int
+dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	dhd_timeout_t tmo;
+	uint retries = 0;
+	uint8 ready, enable;
+	int err, ret = 0;
+	uint8 saveclk;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	ASSERT(bus->dhd);
+	if (!bus->dhd)
+		return 0;
+
+	if (enforce_mutex)
+		dhd_os_sdlock(bus->dhd);
+
+	/* Make sure backplane clock is on, needed to generate F2 interrupt */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+	if (bus->clkstate != CLK_AVAIL) {
+		DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate));
+		ret = -1;
+		goto exit;
+	}
+
+
+	/* Force clocks on backplane to be sure F2 interrupt propagates */
+	saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	if (!err) {
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 (saveclk | SBSDIO_FORCE_HT), &err);
+	}
+	if (err) {
+		DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
+		ret = -1;
+		goto exit;
+	}
+
+	/* Enable function 2 (frame transfers) */
+	W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
+	        &bus->regs->tosbmailboxdata, retries);
+	enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+	bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+
+	/* Give the dongle some time to do its thing and set IOR2 */
+	dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+
+	ready = 0;
+	while (ready != enable && !dhd_timeout_expired(&tmo))
+	        ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
+
+	DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+	          __FUNCTION__, enable, ready, tmo.elapsed));
+
+
+	/* If F2 successfully enabled, set core and enable interrupts */
+	if (ready == enable) {
+		/* Make sure we're talking to the core. */
+		if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
+			bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
+		ASSERT(bus->regs != NULL);
+
+		/* Set up the interrupt mask and enable interrupts */
+		bus->hostintmask = HOSTINTMASK;
+		/* corerev 4 could use the newer interrupt logic to detect the frames */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
+			(bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
+			bus->hostintmask &= ~I_HMB_FRAME_IND;
+			bus->hostintmask |= I_XMTDATA_AVAIL;
+		}
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+
+		if (bus->sih->buscorerev < 15) {
+			bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK,
+				(uint8)watermark, &err);
+		}
+
+		/* Set bus state according to enable result */
+		dhdp->busstate = DHD_BUS_DATA;
+
+		/* bcmsdh_intr_unmask(bus->sdh); */
+
+		bus->intdis = FALSE;
+		if (bus->intr) {
+			DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+			bcmsdh_intr_enable(bus->sdh);
+		} else {
+			DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+			bcmsdh_intr_disable(bus->sdh);
+		}
+
+	}
+
+
+	else {
+		/* Disable F2 again */
+		enable = SDIO_FUNC_ENABLE_1;
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+	}
+
+	if (dhdsdio_sr_cap(bus)) {
+		dhdsdio_sr_init(bus);
+		/* Masking the chip active interrupt  permanantly */
+		bus->hostintmask &= ~I_CHIPACTIVE;
+		W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
+		DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n",
+		__FUNCTION__, bus->hostintmask));
+	}
+	else
+		bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+			SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+	/* If we didn't come up, turn off backplane clock */
+	if (dhdp->busstate != DHD_BUS_DATA)
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+exit:
+	if (enforce_mutex)
+		dhd_os_sdunlock(bus->dhd);
+
+	return ret;
+}
+
+static void
+dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+	uint16 lastrbc;
+	uint8 hi, lo;
+	int err;
+
+	DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__,
+	           (abort ? "abort command, " : ""), (rtx ? ", send NAK" : "")));
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return;
+	}
+
+	if (abort) {
+		bcmsdh_abort(sdh, SDIO_FUNC_2);
+	}
+
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err);
+	if (err) {
+		DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->f1regdata++;
+
+	/* Wait until the packet has been flushed (device/FIFO stable) */
+	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+		hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+		lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err);
+		if (err) {
+			DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__));
+			goto fail;
+		}
+
+		bus->f1regdata += 2;
+
+		if ((hi == 0) && (lo == 0))
+			break;
+
+		if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+			DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n",
+			           __FUNCTION__, lastrbc, ((hi << 8) + lo)));
+		}
+		lastrbc = (hi << 8) + lo;
+	}
+
+	if (!retries) {
+		DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc));
+	} else {
+		DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries)));
+	}
+
+	if (rtx) {
+		bus->rxrtx++;
+		W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+		bus->f1regdata++;
+		if (retries <= retry_limit) {
+			bus->rxskip = TRUE;
+		}
+	}
+
+	/* Clear partial in any case */
+	bus->nextlen = 0;
+
+fail:
+	/* If we can't reach the device, signal failure */
+	if (err || bcmsdh_regfail(sdh))
+		bus->dhd->busstate = DHD_BUS_DOWN;
+}
+
+static void
+dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	uint rdlen, pad;
+
+	int sdret;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Control data already received in aligned rxctl */
+	if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
+		goto gotpkt;
+
+	ASSERT(bus->rxbuf);
+	/* Set rxctl for frame (w/optional alignment) */
+	bus->rxctl = bus->rxbuf;
+	if (dhd_alignctl) {
+		bus->rxctl += firstread;
+		if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+			bus->rxctl += (DHD_SDALIGN - pad);
+		bus->rxctl -= firstread;
+	}
+	ASSERT(bus->rxctl >= bus->rxbuf);
+
+	/* Copy the already-read portion over */
+	bcopy(hdr, bus->rxctl, firstread);
+	if (len <= firstread)
+		goto gotpkt;
+
+	/* Copy the full data pkt in gSPI case and process ioctl. */
+	if (bus->bus == SPI_BUS) {
+		bcopy(hdr, bus->rxctl, len);
+		goto gotpkt;
+	}
+
+	/* Raise rdlen to next SDIO block to avoid tail command */
+	rdlen = len - firstread;
+	if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+		pad = bus->blocksize - (rdlen % bus->blocksize);
+		if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+		    ((len + pad) < bus->dhd->maxctl))
+			rdlen += pad;
+	} else if (rdlen % DHD_SDALIGN) {
+		rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+	}
+
+	/* Satisfy length-alignment requirements */
+	if (forcealign && (rdlen & (ALIGNMENT - 1)))
+		rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+	/* Drop if the read is too big or it exceeds our maximum */
+	if ((rdlen + firstread) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
+		           __FUNCTION__, rdlen, bus->dhd->maxctl));
+		bus->dhd->rx_errors++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+	if ((len - doff) > bus->dhd->maxctl) {
+		DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+		           __FUNCTION__, len, (len - doff), bus->dhd->maxctl));
+		bus->dhd->rx_errors++; bus->rx_toolong++;
+		dhdsdio_rxfail(bus, FALSE, FALSE);
+		goto done;
+	}
+
+
+	/* Read remainder of frame body into the rxctl buffer */
+	sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+	                            (bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
+	bus->f2rxdata++;
+	ASSERT(sdret != BCME_PENDING);
+
+	/* Control frame failures need retransmission */
+	if (sdret < 0) {
+		DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret));
+		bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
+		dhdsdio_rxfail(bus, TRUE, TRUE);
+		goto done;
+	}
+
+gotpkt:
+
+#ifdef DHD_DEBUG
+	if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+		prhex("RxCtrl", bus->rxctl, len);
+	}
+#endif
+
+	/* Point to valid data and indicate its length */
+	bus->rxctl += doff;
+	bus->rxlen = len - doff;
+
+done:
+	/* Awake any waiters */
+	dhd_os_ioctl_resp_wake(bus->dhd);
+}
+int
+dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len,
+	void **pkt, uint32 *pkt_count);
+
+static uint8
+dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq)
+{
+	uint16 dlen, totlen;
+	uint8 *dptr, num = 0;
+
+	uint16 sublen, check;
+	void *pfirst, *plast, *pnext;
+	void * list_tail[DHD_MAX_IFS] = { NULL };
+	void * list_head[DHD_MAX_IFS] = { NULL };
+	uint8 idx;
+	osl_t *osh = bus->dhd->osh;
+
+	int errcode;
+	uint8 chan, seq, doff, sfdoff;
+	uint8 txmax;
+	uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+	uint reorder_info_len;
+
+	int ifidx = 0;
+	bool usechain = bus->use_rxchain;
+
+	/* If packets, issue read(s) and send up packet chain */
+	/* Return sequence numbers consumed? */
+
+	DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom));
+
+	/* If there's a descriptor, generate the packet chain */
+	if (bus->glomd) {
+		dhd_os_sdlock_rxq(bus->dhd);
+
+		pfirst = plast = pnext = NULL;
+		dlen = (uint16)PKTLEN(osh, bus->glomd);
+		dptr = PKTDATA(osh, bus->glomd);
+		if (!dlen || (dlen & 1)) {
+			DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n",
+			           __FUNCTION__, dlen));
+			dlen = 0;
+		}
+
+		for (totlen = num = 0; dlen; num++) {
+			/* Get (and move past) next length */
+			sublen = ltoh16_ua(dptr);
+			dlen -= sizeof(uint16);
+			dptr += sizeof(uint16);
+			if ((sublen < SDPCM_HDRLEN) ||
+			    ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+				DHD_ERROR(("%s: descriptor len %d bad: %d\n",
+				           __FUNCTION__, num, sublen));
+				pnext = NULL;
+				break;
+			}
+			if (sublen % DHD_SDALIGN) {
+				DHD_ERROR(("%s: sublen %d not a multiple of %d\n",
+				           __FUNCTION__, sublen, DHD_SDALIGN));
+				usechain = FALSE;
+			}
+			totlen += sublen;
+
+			/* For last frame, adjust read len so total is a block multiple */
+			if (!dlen) {
+				sublen += (ROUNDUP(totlen, bus->blocksize) - totlen);
+				totlen = ROUNDUP(totlen, bus->blocksize);
+			}
+
+			/* Allocate/chain packet for next subframe */
+			if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) {
+				DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+				           __FUNCTION__, num, sublen));
+				break;
+			}
+			ASSERT(!PKTLINK(pnext));
+			if (!pfirst) {
+				ASSERT(!plast);
+				pfirst = plast = pnext;
+			} else {
+				ASSERT(plast);
+				PKTSETNEXT(osh, plast, pnext);
+				plast = pnext;
+			}
+
+			/* Adhere to start alignment requirements */
+			PKTALIGN(osh, pnext, sublen, DHD_SDALIGN);
+		}
+
+		/* If all allocations succeeded, save packet chain in bus structure */
+		if (pnext) {
+			DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n",
+			          __FUNCTION__, totlen, num));
+			if (DHD_GLOM_ON() && bus->nextlen) {
+				if (totlen != bus->nextlen) {
+					DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d "
+					          "rxseq %d\n", __FUNCTION__, bus->nextlen,
+					          totlen, rxseq));
+				}
+			}
+			bus->glom = pfirst;
+			pfirst = pnext = NULL;
+		} else {
+			if (pfirst)
+				PKTFREE(osh, pfirst, FALSE);
+			bus->glom = NULL;
+			num = 0;
+		}
+
+		/* Done with descriptor packet */
+		PKTFREE(osh, bus->glomd, FALSE);
+		bus->glomd = NULL;
+		bus->nextlen = 0;
+
+		dhd_os_sdunlock_rxq(bus->dhd);
+	}
+
+	/* Ok -- either we just generated a packet chain, or had one from before */
+	if (bus->glom) {
+		if (DHD_GLOM_ON()) {
+			DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__));
+			for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) {
+				DHD_GLOM(("    %p: %p len 0x%04x (%d)\n",
+				          pnext, (uint8*)PKTDATA(osh, pnext),
+				          PKTLEN(osh, pnext), PKTLEN(osh, pnext)));
+			}
+		}
+
+		pfirst = bus->glom;
+		dlen = (uint16)pkttotlen(osh, pfirst);
+
+		/* Do an SDIO read for the superframe.  Configurable iovar to
+		 * read directly into the chained packet, or allocate a large
+		 * packet and and copy into the chain.
+		 */
+		if (usechain) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, (uint8*)PKTDATA(osh, pfirst),
+			                              dlen, pfirst, NULL, NULL);
+		} else if (bus->dataptr) {
+			errcode = dhd_bcmsdh_recv_buf(bus,
+			                              bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+			                              F2SYNC, bus->dataptr,
+			                              dlen, NULL, NULL, NULL);
+			sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr);
+			if (sublen != dlen) {
+				DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
+				           __FUNCTION__, dlen, sublen));
+				errcode = -1;
+			}
+			pnext = NULL;
+		} else {
+			DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen));
+			errcode = -1;
+		}
+		bus->f2rxdata++;
+		ASSERT(errcode != BCME_PENDING);
+
+		/* On failure, kill the superframe, allow a couple retries */
+		if (errcode < 0) {
+			DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
+			           __FUNCTION__, dlen, errcode));
+			bus->dhd->rx_errors++;
+
+			if (bus->glomerr++ < 3) {
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			return 0;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_GLOM_ON()) {
+			prhex("SUPERFRAME", PKTDATA(osh, pfirst),
+			      MIN(PKTLEN(osh, pfirst), 48));
+		}
+#endif
+
+
+		/* Validate the superframe header */
+		dptr = (uint8 *)PKTDATA(osh, pfirst);
+		sublen = ltoh16_ua(dptr);
+		check = ltoh16_ua(dptr + sizeof(uint16));
+
+		chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+		bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+		doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+		errcode = 0;
+		if ((uint16)~(sublen^check)) {
+			DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, sublen, check));
+			errcode = -1;
+		} else if (ROUNDUP(sublen, bus->blocksize) != dlen) {
+			DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+			           __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen));
+			errcode = -1;
+		} else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) {
+			DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__,
+			           SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN])));
+			errcode = -1;
+		} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+			DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
+			errcode = -1;
+		} else if ((doff < SDPCM_HDRLEN) ||
+		           (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
+			DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+				__FUNCTION__, doff, sublen, PKTLEN(osh, pfirst),
+				SDPCM_HDRLEN));
+			errcode = -1;
+		}
+
+		/* Check sequence number of superframe SW header */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+			          __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_max;
+		}
+		bus->tx_max = txmax;
+
+		/* Remove superframe header, remember offset */
+		PKTPULL(osh, pfirst, doff);
+		sfdoff = doff;
+
+		/* Validate all the subframe headers */
+		for (num = 0, pnext = pfirst; pnext && !errcode;
+		     num++, pnext = PKTNEXT(osh, pnext)) {
+			dptr = (uint8 *)PKTDATA(osh, pnext);
+			dlen = (uint16)PKTLEN(osh, pnext);
+			sublen = ltoh16_ua(dptr);
+			check = ltoh16_ua(dptr + sizeof(uint16));
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				prhex("subframe", dptr, 32);
+			}
+#endif
+
+			if ((uint16)~(sublen^check)) {
+				DHD_ERROR(("%s (subframe %d): HW hdr error: "
+				           "len/check 0x%04x/0x%04x\n",
+				           __FUNCTION__, num, sublen, check));
+				errcode = -1;
+			} else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+				DHD_ERROR(("%s (subframe %d): length mismatch: "
+				           "len 0x%04x, expect 0x%04x\n",
+				           __FUNCTION__, num, sublen, dlen));
+				errcode = -1;
+			} else if ((chan != SDPCM_DATA_CHANNEL) &&
+			           (chan != SDPCM_EVENT_CHANNEL)) {
+				DHD_ERROR(("%s (subframe %d): bad channel %d\n",
+				           __FUNCTION__, num, chan));
+				errcode = -1;
+			} else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+				DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
+				           __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
+				errcode = -1;
+			}
+		}
+
+		if (errcode) {
+			/* Terminate frame on error, request a couple retries */
+			if (bus->glomerr++ < 3) {
+				/* Restore superframe header space */
+				PKTPUSH(osh, pfirst, sfdoff);
+				dhdsdio_rxfail(bus, TRUE, TRUE);
+			} else {
+				bus->glomerr = 0;
+				dhdsdio_rxfail(bus, TRUE, FALSE);
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE(osh, bus->glom, FALSE);
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rxglomfail++;
+				bus->glom = NULL;
+			}
+			bus->nextlen = 0;
+			return 0;
+		}
+
+		/* Basic SD framing looks ok - process each packet (header) */
+		bus->glom = NULL;
+		plast = NULL;
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+			pnext = PKTNEXT(osh, pfirst);
+			PKTSETNEXT(osh, pfirst, NULL);
+
+			dptr = (uint8 *)PKTDATA(osh, pfirst);
+			sublen = ltoh16_ua(dptr);
+			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+			DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+			          __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst),
+			          PKTLEN(osh, pfirst), sublen, chan, seq));
+
+			ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL));
+
+			if (rxseq != seq) {
+				DHD_GLOM(("%s: rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Subframe Data", dptr, dlen);
+			}
+#endif
+
+			PKTSETLEN(osh, pfirst, sublen);
+			PKTPULL(osh, pfirst, doff);
+
+			reorder_info_len = sizeof(reorder_info_buf);
+
+			if (PKTLEN(osh, pfirst) == 0) {
+				PKTFREE(bus->dhd->osh, pfirst, FALSE);
+				continue;
+			} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf,
+				&reorder_info_len) != 0) {
+				DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+				bus->dhd->rx_errors++;
+				PKTFREE(osh, pfirst, FALSE);
+				continue;
+			}
+			if (reorder_info_len) {
+				uint32 free_buf_count;
+				void *ppfirst;
+
+				ppfirst = pfirst;
+				/* Reordering info from the firmware */
+				dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf,
+					reorder_info_len, &ppfirst, &free_buf_count);
+
+				if (free_buf_count == 0) {
+					continue;
+				}
+				else {
+					void *temp;
+
+					/*  go to the end of the chain and attach the pnext there */
+					temp = ppfirst;
+					while (PKTNEXT(osh, temp) != NULL) {
+						temp = PKTNEXT(osh, temp);
+					}
+					pfirst = temp;
+					if (list_tail[ifidx] == NULL)
+						list_head[ifidx] = ppfirst;
+					else
+						PKTSETNEXT(osh, list_tail[ifidx], ppfirst);
+					list_tail[ifidx] = pfirst;
+				}
+
+				num += (uint8)free_buf_count;
+			}
+			else {
+				/* this packet will go up, link back into chain and count it */
+
+				if (list_tail[ifidx] == NULL) {
+					list_head[ifidx] = list_tail[ifidx] = pfirst;
+				}
+				else {
+					PKTSETNEXT(osh, list_tail[ifidx], pfirst);
+					list_tail[ifidx] = pfirst;
+				}
+				num++;
+			}
+#ifdef DHD_DEBUG
+			if (DHD_GLOM_ON()) {
+				DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n",
+				          __FUNCTION__, num, pfirst,
+				          PKTDATA(osh, pfirst), PKTLEN(osh, pfirst),
+				          PKTNEXT(osh, pfirst), PKTLINK(pfirst)));
+				prhex("", (uint8 *)PKTDATA(osh, pfirst),
+				      MIN(PKTLEN(osh, pfirst), 32));
+			}
+#endif /* DHD_DEBUG */
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+			if (list_head[idx]) {
+				void *temp;
+				uint8 cnt = 0;
+				temp = list_head[idx];
+				do {
+					temp = PKTNEXT(osh, temp);
+					cnt++;
+				} while (temp);
+				if (cnt) {
+					dhd_os_sdunlock(bus->dhd);
+					dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0);
+					dhd_os_sdlock(bus->dhd);
+				}
+			}
+		}
+		bus->rxglomframes++;
+		bus->rxglompkts += num;
+	}
+	return num;
+}
+
+
+#ifdef SDHOST3
+static bool
+dhdsdio_pr94636_WAR(dhd_bus_t *bus)
+{
+	uint cd = 0;
+	uint ld = 0;
+	int bcmerror = 0;
+	uint32 l_data[5];
+	uint32 l_addr = (0x18002200 & SBSDIO_SB_OFT_ADDR_MASK);
+
+	/* Read 20 bytes from 0x18002200
+	 * the sdiod Tx DMA registers address on AI Backplane.
+	*/
+	if ((bcmerror = bcmsdh_rwdata(bus->sdh, FALSE, l_addr, (uint8 *)&l_data[0], 20))) {
+		DHD_ERROR(("%s: bcmsdh_rwdata failed\n", __FUNCTION__));
+		return FALSE;
+	}
+	ld = l_data[1];
+	ld = ld & 0x00001fff;
+	cd = l_data[4];
+	cd = cd & 0x00001fff;
+	if (cd == ld)
+		return TRUE;
+	else
+		return FALSE;
+}
+#endif /* SDHOST3 */
+/* Return TRUE if there may be more frames to read */
+static uint
+dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+{
+	osl_t *osh = bus->dhd->osh;
+	bcmsdh_info_t *sdh = bus->sdh;
+
+	uint16 len, check;	/* Extracted hardware header fields */
+	uint8 chan, seq, doff;	/* Extracted software header fields */
+	uint8 fcbits;		/* Extracted fcbits from software header */
+	uint8 delta;
+
+	void *pkt;	/* Packet for event or data frames */
+	uint16 pad;	/* Number of pad bytes to read */
+	uint16 rdlen;	/* Total number of bytes to read */
+	uint8 rxseq;	/* Next sequence number to expect */
+	uint rxleft = 0;	/* Remaining number of frames allowed */
+	int sdret;	/* Return code from bcmsdh calls */
+	uint8 txmax;	/* Maximum tx sequence offered */
+	bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
+	uint8 *rxbuf;
+	int ifidx = 0;
+	uint rxcount = 0; /* Total frames read */
+	uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN];
+	uint reorder_info_len;
+	uint pkt_count;
+
+#if defined(DHD_DEBUG) || defined(SDTEST)
+	bool sdtest = FALSE;	/* To limit message spew from test mode */
+#endif
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bus->readframes = TRUE;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: KSO off\n", __FUNCTION__));
+		bus->readframes = FALSE;
+		return 0;
+	}
+
+	ASSERT(maxframes);
+
+#ifdef SDTEST
+	/* Allow pktgen to override maxframes */
+	if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+		maxframes = bus->pktgen_count;
+		sdtest = TRUE;
+	}
+#endif
+
+	/* Not finished unless we encounter no more frames indication */
+	*finished = FALSE;
+
+
+	for (rxseq = bus->rx_seq, rxleft = maxframes;
+	     !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN &&
+	     !bus->dhd->hang_was_sent;
+	     rxseq++, rxleft--) {
+#ifdef DHDTCPACK_SUP_DBG
+		if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) {
+			if (bus->dotxinrx == FALSE)
+				DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n",
+					__FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode));
+		}
+#ifdef DEBUG_COUNTER
+		else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) {
+			tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++;
+		}
+#endif /* DEBUG_COUNTER */
+#endif /* DHDTCPACK_SUP_DBG */
+		/* tx more to improve rx performance */
+		if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
+			dhdsdio_sendpendctl(bus);
+		} else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) &&
+			!bus->fcstate && DATAOK(bus) &&
+			(pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) {
+			dhdsdio_sendfromq(bus, dhd_txbound);
+#ifdef DHDTCPACK_SUPPRESS
+			/* In TCPACK_SUP_DELAYTX mode, do txinrx only if
+			 * 1. Any DATA packet to TX
+			 * 2. TCPACK to TCPDATA PSH packets.
+			 * in bus txq.
+			 */
+			bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ?
+				FALSE : TRUE;
+#endif
+		}
+
+		/* Handle glomming separately */
+		if (bus->glom || bus->glomd) {
+			uint8 cnt;
+			DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+			          __FUNCTION__, bus->glomd, bus->glom));
+			cnt = dhdsdio_rxglom(bus, rxseq);
+			DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt));
+			rxseq += cnt - 1;
+			rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+			continue;
+		}
+
+		/* Try doing single read if we can */
+		if (dhd_readahead && bus->nextlen) {
+			uint16 nextlen = bus->nextlen;
+			bus->nextlen = 0;
+
+			if (bus->bus == SPI_BUS) {
+				rdlen = len = nextlen;
+			}
+			else {
+				rdlen = len = nextlen << 4;
+
+				/* Pad read to blocksize for efficiency */
+				if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+					pad = bus->blocksize - (rdlen % bus->blocksize);
+					if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+						((rdlen + pad + firstread) < MAX_RX_DATASZ))
+						rdlen += pad;
+				} else if (rdlen % DHD_SDALIGN) {
+					rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+				}
+			}
+
+			/* We use bus->rxctl buffer in WinXP for initial control pkt receives.
+			 * Later we use buffer-poll for data as well as control packets.
+			 * This is required because dhd receives full frame in gSPI unlike SDIO.
+			 * After the frame is received we have to distinguish whether it is data
+			 * or non-data frame.
+			 */
+			/* Allocate a packet buffer */
+			dhd_os_sdlock_rxq(bus->dhd);
+			if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) {
+				if (bus->bus == SPI_BUS) {
+					bus->usebufpool = FALSE;
+					bus->rxctl = bus->rxbuf;
+					if (dhd_alignctl) {
+						bus->rxctl += firstread;
+						if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN)))
+							bus->rxctl += (DHD_SDALIGN - pad);
+						bus->rxctl -= firstread;
+					}
+					ASSERT(bus->rxctl >= bus->rxbuf);
+					rxbuf = bus->rxctl;
+					/* Read the entire frame */
+					sdret = dhd_bcmsdh_recv_buf(bus,
+					                            bcmsdh_cur_sbwad(sdh),
+					                            SDIO_FUNC_2,
+					                            F2SYNC, rxbuf, rdlen,
+					                            NULL, NULL, NULL);
+					bus->f2rxdata++;
+					ASSERT(sdret != BCME_PENDING);
+
+
+					/* Control frame failures need retransmission */
+					if (sdret < 0) {
+						DHD_ERROR(("%s: read %d control bytes failed: %d\n",
+						   __FUNCTION__, rdlen, sdret));
+						/* dhd.rx_ctlerrs is higher level */
+						bus->rxc_errors++;
+						dhd_os_sdunlock_rxq(bus->dhd);
+						dhdsdio_rxfail(bus, TRUE,
+						    (bus->bus == SPI_BUS) ? FALSE : TRUE);
+						continue;
+					}
+				} else {
+					/* Give up on data, request rtx of events */
+					DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d "
+					           "expected rxseq %d\n",
+					           __FUNCTION__, len, rdlen, rxseq));
+					/* Just go try again w/normal header read */
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			} else {
+				if (bus->bus == SPI_BUS)
+					bus->usebufpool = TRUE;
+
+				ASSERT(!PKTLINK(pkt));
+				PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+				rxbuf = (uint8 *)PKTDATA(osh, pkt);
+				/* Read the entire frame */
+				sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+				                            SDIO_FUNC_2,
+				                            F2SYNC, rxbuf, rdlen,
+				                            pkt, NULL, NULL);
+				bus->f2rxdata++;
+				ASSERT(sdret != BCME_PENDING);
+
+				if (sdret < 0) {
+					DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
+					   __FUNCTION__, rdlen, sdret));
+					PKTFREE(bus->dhd->osh, pkt, FALSE);
+					bus->dhd->rx_errors++;
+					dhd_os_sdunlock_rxq(bus->dhd);
+					/* Force retry w/normal header read.  Don't attempt NAK for
+					 * gSPI
+					 */
+					dhdsdio_rxfail(bus, TRUE,
+					      (bus->bus == SPI_BUS) ? FALSE : TRUE);
+					continue;
+				}
+			}
+			dhd_os_sdunlock_rxq(bus->dhd);
+
+			/* Now check the header */
+			bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
+
+			/* Extract hardware header fields */
+			len = ltoh16_ua(bus->rxhdr);
+			check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+			/* All zeros means readahead info was bad */
+			if (!(len|check)) {
+				DHD_INFO(("%s (nextlen): read zeros in HW header???\n",
+				           __FUNCTION__));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate check bytes */
+			if ((uint16)~(len^check)) {
+				DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check"
+				           " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen,
+				           len, check));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				bus->rx_badhdr++;
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Validate frame length */
+			if (len < SDPCM_HDRLEN) {
+				DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
+				           __FUNCTION__, len));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+			/* Check for consistency with readahead info */
+				len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
+			if (len_consistent) {
+				/* Mismatch, force retry w/normal header (may be >4K) */
+				DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; "
+				           "expected rxseq %d\n",
+				           __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE);
+				GSPI_PR55150_BAILOUT;
+				continue;
+			}
+
+
+			/* Extract software header fields */
+			chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+			txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+				bus->nextlen =
+				         bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+				if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+					DHD_INFO(("%s (nextlen): got frame w/nextlen too large"
+					          " (%d), seq %d\n", __FUNCTION__, bus->nextlen,
+					          seq));
+					bus->nextlen = 0;
+				}
+
+				bus->dhd->rx_readahead_cnt ++;
+			/* Handle Flow Control */
+			fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+			delta = 0;
+			if (~bus->flowcontrol & fcbits) {
+				bus->fc_xoff++;
+				delta = 1;
+			}
+			if (bus->flowcontrol & ~fcbits) {
+				bus->fc_xon++;
+				delta = 1;
+			}
+
+			if (delta) {
+				bus->fc_rcvd++;
+				bus->flowcontrol = fcbits;
+			}
+
+			/* Check and update sequence number */
+			if (rxseq != seq) {
+				DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n",
+				          __FUNCTION__, seq, rxseq));
+				bus->rx_badseq++;
+				rxseq = seq;
+			}
+
+			/* Check window for sanity */
+			if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+					DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+						__FUNCTION__, txmax, bus->tx_seq));
+					txmax = bus->tx_max;
+			}
+			bus->tx_max = txmax;
+
+#ifdef DHD_DEBUG
+			if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+				prhex("Rx Data", rxbuf, len);
+			} else if (DHD_HDRS_ON()) {
+				prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+			}
+#endif
+
+			if (chan == SDPCM_CONTROL_CHANNEL) {
+				if (bus->bus == SPI_BUS) {
+					dhdsdio_read_control(bus, rxbuf, len, doff);
+					if (bus->usebufpool) {
+						dhd_os_sdlock_rxq(bus->dhd);
+						PKTFREE(bus->dhd->osh, pkt, FALSE);
+						dhd_os_sdunlock_rxq(bus->dhd);
+					}
+					continue;
+				} else {
+					DHD_ERROR(("%s (nextlen): readahead on control"
+					           " packet %d?\n", __FUNCTION__, seq));
+					/* Force retry w/normal header read */
+					bus->nextlen = 0;
+					dhdsdio_rxfail(bus, FALSE, TRUE);
+					dhd_os_sdlock_rxq(bus->dhd);
+					PKTFREE2();
+					dhd_os_sdunlock_rxq(bus->dhd);
+					continue;
+				}
+			}
+
+			if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
+				DHD_ERROR(("Received %d bytes on %d channel. Running out of "
+				           "rx pktbuf's or not yet malloced.\n", len, chan));
+				continue;
+			}
+
+			/* Validate data offset */
+			if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+				DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
+				           __FUNCTION__, doff, len, SDPCM_HDRLEN));
+				dhd_os_sdlock_rxq(bus->dhd);
+				PKTFREE2();
+				dhd_os_sdunlock_rxq(bus->dhd);
+				ASSERT(0);
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+				continue;
+			}
+
+			/* All done with this one -- now deliver the packet */
+			goto deliver;
+		}
+		/* gSPI frames should not be handled in fractions */
+		if (bus->bus == SPI_BUS) {
+			break;
+		}
+#ifdef SDHOST3
+		if (((((uint16)bus->sih->chip) == BCM4324_CHIP_ID) && (bus->sih->chiprev <= 1)) ||
+			(((uint16)bus->sih->chip) == BCM43340_CHIP_ID) ||
+			(((uint16)bus->sih->chip) == BCM43341_CHIP_ID) ||
+			(((uint16)bus->sih->chip) == BCM4334_CHIP_ID)) {
+			if (dhdsdio_pr94636_WAR(bus) == TRUE) {
+				*finished = TRUE;
+				break;
+			}
+		}
+#endif /* SDHOST3 */
+
+		/* Read frame header (hardware and software) */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            bus->rxhdr, firstread, NULL, NULL, NULL);
+		bus->f2rxhdrs++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret));
+			bus->rx_hdrfail++;
+			dhdsdio_rxfail(bus, TRUE, TRUE);
+			continue;
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+			prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Extract hardware header fields */
+		len = ltoh16_ua(bus->rxhdr);
+		check = ltoh16_ua(bus->rxhdr + sizeof(uint16));
+
+		/* All zeros means no more frames */
+		if (!(len|check)) {
+			*finished = TRUE;
+			break;
+		}
+
+		/* Validate check bytes */
+		if ((uint16)~(len^check)) {
+			DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n",
+			           __FUNCTION__, len, check));
+			bus->rx_badhdr++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Validate frame length */
+		if (len < SDPCM_HDRLEN) {
+			DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
+			continue;
+		}
+
+		/* Extract software header fields */
+		chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+		txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		/* Validate data offset */
+		if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+			DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
+			           __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
+			bus->rx_badhdr++;
+			ASSERT(0);
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		/* Save the readahead length if there is one */
+		bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+		if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+			DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n",
+			          __FUNCTION__, bus->nextlen, seq));
+			bus->nextlen = 0;
+		}
+
+		/* Handle Flow Control */
+		fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+		delta = 0;
+		if (~bus->flowcontrol & fcbits) {
+			bus->fc_xoff++;
+			delta = 1;
+		}
+		if (bus->flowcontrol & ~fcbits) {
+			bus->fc_xon++;
+			delta = 1;
+		}
+
+		if (delta) {
+			bus->fc_rcvd++;
+			bus->flowcontrol = fcbits;
+		}
+
+		/* Check and update sequence number */
+		if (rxseq != seq) {
+			DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq));
+			bus->rx_badseq++;
+			rxseq = seq;
+		}
+
+		/* Check window for sanity */
+		if ((uint8)(txmax - bus->tx_seq) > 0x70) {
+			DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
+			           __FUNCTION__, txmax, bus->tx_seq));
+			txmax = bus->tx_max;
+		}
+		bus->tx_max = txmax;
+
+		/* Call a separate function for control frames */
+		if (chan == SDPCM_CONTROL_CHANNEL) {
+			dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+			continue;
+		}
+
+		ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) ||
+		       (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL));
+
+		/* Length to read */
+		rdlen = (len > firstread) ? (len - firstread) : 0;
+
+		/* May pad read to blocksize for efficiency */
+		if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+			pad = bus->blocksize - (rdlen % bus->blocksize);
+			if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+			    ((rdlen + pad + firstread) < MAX_RX_DATASZ))
+				rdlen += pad;
+		} else if (rdlen % DHD_SDALIGN) {
+			rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+		}
+
+		/* Satisfy length-alignment requirements */
+		if (forcealign && (rdlen & (ALIGNMENT - 1)))
+			rdlen = ROUNDUP(rdlen, ALIGNMENT);
+
+		if ((rdlen + firstread) > MAX_RX_DATASZ) {
+			/* Too long -- skip this frame */
+			DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen));
+			bus->dhd->rx_errors++; bus->rx_toolong++;
+			dhdsdio_rxfail(bus, FALSE, FALSE);
+			continue;
+		}
+
+		dhd_os_sdlock_rxq(bus->dhd);
+		if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) {
+			/* Give up on data, request rtx of events */
+			DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+			           __FUNCTION__, rdlen, chan));
+			bus->dhd->rx_dropped++;
+			dhd_os_sdunlock_rxq(bus->dhd);
+			dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan));
+			continue;
+		}
+		dhd_os_sdunlock_rxq(bus->dhd);
+
+		ASSERT(!PKTLINK(pkt));
+
+		/* Leave room for what we already read, and align remainder */
+		ASSERT(firstread < (PKTLEN(osh, pkt)));
+		PKTPULL(osh, pkt, firstread);
+		PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
+
+		/* Read the remaining frame data */
+		sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+		                            ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL);
+		bus->f2rxdata++;
+		ASSERT(sdret != BCME_PENDING);
+
+		if (sdret < 0) {
+			DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen,
+			           ((chan == SDPCM_EVENT_CHANNEL) ? "event" :
+			            ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan));
+			continue;
+		}
+
+		/* Copy the already-read portion */
+		PKTPUSH(osh, pkt, firstread);
+		bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread);
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			prhex("Rx Data", PKTDATA(osh, pkt), len);
+		}
+#endif
+
+deliver:
+		/* Save superframe descriptor and allocate packet frame */
+		if (chan == SDPCM_GLOM_CHANNEL) {
+			if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+				DHD_GLOM(("%s: got glom descriptor, %d bytes:\n",
+				          __FUNCTION__, len));
+#ifdef DHD_DEBUG
+				if (DHD_GLOM_ON()) {
+					prhex("Glom Data", PKTDATA(osh, pkt), len);
+				}
+#endif
+				PKTSETLEN(osh, pkt, len);
+				ASSERT(doff == SDPCM_HDRLEN);
+				PKTPULL(osh, pkt, SDPCM_HDRLEN);
+				bus->glomd = pkt;
+			} else {
+				DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
+				dhdsdio_rxfail(bus, FALSE, FALSE);
+			}
+			continue;
+		}
+
+		/* Fill in packet len and prio, deliver upward */
+		PKTSETLEN(osh, pkt, len);
+		PKTPULL(osh, pkt, doff);
+
+#ifdef SDTEST
+		/* Test channel packets are processed separately */
+		if (chan == SDPCM_TEST_CHANNEL) {
+			dhdsdio_testrcv(bus, pkt, seq);
+			continue;
+		}
+#endif /* SDTEST */
+
+		if (PKTLEN(osh, pkt) == 0) {
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			continue;
+		} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf,
+			&reorder_info_len) != 0) {
+			DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__));
+			dhd_os_sdlock_rxq(bus->dhd);
+			PKTFREE(bus->dhd->osh, pkt, FALSE);
+			dhd_os_sdunlock_rxq(bus->dhd);
+			bus->dhd->rx_errors++;
+			continue;
+		}
+		if (reorder_info_len) {
+			/* Reordering info from the firmware */
+			dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len,
+				&pkt, &pkt_count);
+			if (pkt_count == 0)
+				continue;
+		}
+		else
+			pkt_count = 1;
+
+		/* Unlock during rx call */
+		dhd_os_sdunlock(bus->dhd);
+		dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan);
+		dhd_os_sdlock(bus->dhd);
+	}
+	rxcount = maxframes - rxleft;
+#ifdef DHD_DEBUG
+	/* Message if we hit the limit */
+	if (!rxleft && !sdtest)
+		DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes));
+	else
+#endif /* DHD_DEBUG */
+	DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount));
+	/* Back off rxseq if awaiting rtx, update rx_seq */
+	if (bus->rxskip)
+		rxseq--;
+	bus->rx_seq = rxseq;
+
+	if (bus->reqbussleep)
+	{
+	    dhdsdio_bussleep(bus, TRUE);
+		bus->reqbussleep = FALSE;
+	}
+	bus->readframes = FALSE;
+
+	return rxcount;
+}
+
+static uint32
+dhdsdio_hostmail(dhd_bus_t *bus)
+{
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus = 0;
+	uint32 hmb_data;
+	uint8 fcbits;
+	uint retries = 0;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Read mailbox data and ack that we did so */
+	R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+	if (retries <= retry_limit)
+		W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+	bus->f1regdata += 2;
+
+	/* Dongle recomposed rx frames, accept them again */
+	if (hmb_data & HMB_DATA_NAKHANDLED) {
+		DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq));
+		if (!bus->rxskip) {
+			DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__));
+		}
+		bus->rxskip = FALSE;
+		intstatus |= FRAME_AVAIL_MASK(bus);
+	}
+
+	/*
+	 * DEVREADY does not occur with gSPI.
+	 */
+	if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+		bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT;
+		if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+			DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n",
+			           bus->sdpcm_ver, SDPCM_PROT_VERSION));
+		else
+			DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
+		/* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
+		if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		    (bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1)) {
+			uint32 val;
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+			val &= ~CC_XMTDATAAVAIL_MODE;
+			val |= CC_XMTDATAAVAIL_CTRL;
+			W_REG(bus->dhd->osh, &bus->regs->corecontrol, val);
+
+			val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
+		}
+
+#ifdef DHD_DEBUG
+		/* Retrieve console state address now that firmware should have updated it */
+		{
+			sdpcm_shared_t shared;
+			if (dhdsdio_readshared(bus, &shared) == 0)
+				bus->console_addr = shared.console_addr;
+		}
+#endif /* DHD_DEBUG */
+	}
+
+	/*
+	 * Flow Control has been moved into the RX headers and this out of band
+	 * method isn't used any more.  Leave this here for possibly remaining backward
+	 * compatible with older dongles
+	 */
+	if (hmb_data & HMB_DATA_FC) {
+		fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT;
+
+		if (fcbits & ~bus->flowcontrol)
+			bus->fc_xoff++;
+		if (bus->flowcontrol & ~fcbits)
+			bus->fc_xon++;
+
+		bus->fc_rcvd++;
+		bus->flowcontrol = fcbits;
+	}
+
+#ifdef DHD_DEBUG
+	/* At least print a message if FW halted */
+	if (hmb_data & HMB_DATA_FWHALT) {
+		DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n"));
+		dhdsdio_checkdied(bus, NULL, 0);
+		bus->dhd->busstate = DHD_BUS_DOWN;
+	}
+#endif /* DHD_DEBUG */
+
+	/* Shouldn't be any others */
+	if (hmb_data & ~(HMB_DATA_DEVREADY |
+	                 HMB_DATA_FWHALT |
+	                 HMB_DATA_NAKHANDLED |
+	                 HMB_DATA_FC |
+	                 HMB_DATA_FWREADY |
+	                 HMB_DATA_FCDATA_MASK |
+	                 HMB_DATA_VERSION_MASK)) {
+		DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+	}
+
+	return intstatus;
+}
+
+static bool
+dhdsdio_dpc(dhd_bus_t *bus)
+{
+	bcmsdh_info_t *sdh = bus->sdh;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint32 intstatus, newstatus = 0;
+	uint retries = 0;
+	uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
+	uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+	uint framecnt = 0;		  /* Temporary counter of tx/rx frames */
+	bool rxdone = TRUE;		  /* Flag for no more read data */
+	bool resched = FALSE;	  /* Flag indicating resched wanted */
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	dhd_os_sdlock(bus->dhd);
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN ||
+		bus->dhd->hang_was_sent) {
+		DHD_ERROR(("%s: Bus down, avoiding any SDIO access\n", __FUNCTION__));
+		bus->intstatus = 0;
+		dhd_os_sdunlock(bus->dhd);
+		return 0;
+	}
+
+	/* Start with leftover status bits */
+	intstatus = bus->intstatus;
+
+	if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		goto exit;
+	}
+
+	/* If waiting for HTAVAIL, check status */
+	if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) {
+		int err;
+		uint8 clkctl, devctl = 0;
+
+#ifdef DHD_DEBUG
+		/* Check for inconsistent device control */
+		devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		} else {
+			ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+		}
+#endif /* DHD_DEBUG */
+
+		/* Read CSR, if clock on switch to AVAIL, else ignore */
+		clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err) {
+			DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+		}
+
+		DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl));
+
+		if (SBSDIO_HTAV(clkctl)) {
+			devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+			if (err) {
+				DHD_ERROR(("%s: error reading DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+			if (err) {
+				DHD_ERROR(("%s: error writing DEVCTL: %d\n",
+				           __FUNCTION__, err));
+				bus->dhd->busstate = DHD_BUS_DOWN;
+			}
+			bus->clkstate = CLK_AVAIL;
+		} else {
+			goto clkwait;
+		}
+	}
+
+	BUS_WAKE(bus);
+
+	/* Make sure backplane clock is on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+	if (bus->clkstate != CLK_AVAIL)
+		goto clkwait;
+
+	/* Pending interrupt indicates new device status */
+	if (bus->ipend) {
+		bus->ipend = FALSE;
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata++;
+		if (bcmsdh_regfail(bus->sdh))
+			newstatus = 0;
+		newstatus &= bus->hostintmask;
+		bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+		if (newstatus) {
+			bus->f1regdata++;
+			if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
+				(newstatus == I_XMTDATA_AVAIL)) {
+			}
+			else
+				W_SDREG(newstatus, &regs->intstatus, retries);
+		}
+	}
+
+	/* Merge new bits with previous */
+	intstatus |= newstatus;
+	bus->intstatus = 0;
+
+	/* Handle flow-control change: read new state in case our ack
+	 * crossed another change interrupt.  If change still set, assume
+	 * FC ON for safety, let next loop through do the debounce.
+	 */
+	if (intstatus & I_HMB_FC_CHANGE) {
+		intstatus &= ~I_HMB_FC_CHANGE;
+		W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
+		R_SDREG(newstatus, &regs->intstatus, retries);
+		bus->f1regdata += 2;
+		bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+		intstatus |= (newstatus & bus->hostintmask);
+	}
+
+	/* Just being here means nothing more to do for chipactive */
+	if (intstatus & I_CHIPACTIVE) {
+		/* ASSERT(bus->clkstate == CLK_AVAIL); */
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	/* Handle host mailbox indication */
+	if (intstatus & I_HMB_HOST_INT) {
+		intstatus &= ~I_HMB_HOST_INT;
+		intstatus |= dhdsdio_hostmail(bus);
+	}
+
+	/* Generally don't ask for these, can get CRC errors... */
+	if (intstatus & I_WR_OOSYNC) {
+		DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+		intstatus &= ~I_WR_OOSYNC;
+	}
+
+	if (intstatus & I_RD_OOSYNC) {
+		DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+		intstatus &= ~I_RD_OOSYNC;
+	}
+
+	if (intstatus & I_SBINT) {
+		DHD_ERROR(("Dongle reports SBINT\n"));
+		intstatus &= ~I_SBINT;
+	}
+
+	/* Would be active due to wake-wlan in gSPI */
+	if (intstatus & I_CHIPACTIVE) {
+		DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+		intstatus &= ~I_CHIPACTIVE;
+	}
+
+	if (intstatus & I_HMB_FC_STATE) {
+		DHD_INFO(("Dongle reports HMB_FC_STATE\n"));
+		intstatus &= ~I_HMB_FC_STATE;
+	}
+
+	/* Ignore frame indications if rxskip is set */
+	if (bus->rxskip) {
+		intstatus &= ~FRAME_AVAIL_MASK(bus);
+	}
+
+	/* On frame indication, read available frames */
+	if (PKT_AVAILABLE(bus, intstatus)) {
+		framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+		if (rxdone || bus->rxskip)
+			intstatus  &= ~FRAME_AVAIL_MASK(bus);
+		rxlimit -= MIN(framecnt, rxlimit);
+	}
+
+	/* Keep still-pending events for next scheduling */
+	bus->intstatus = intstatus;
+
+clkwait:
+	/* Re-enable interrupts to detect new device events (mailbox, rx frame)
+	 * or clock availability.  (Allows tx loop to check ipend if desired.)
+	 * (Unless register access seems hosed, as we may not be able to ACK...)
+	 */
+	if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
+		DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
+		          __FUNCTION__, rxdone, framecnt));
+		bus->intdis = FALSE;
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+		bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
+		bcmsdh_intr_enable(sdh);
+	}
+
+#if defined(OOB_INTR_ONLY) && !defined(HW_OOB)
+	/* In case of SW-OOB(using edge trigger),
+	 * Check interrupt status in the dongle again after enable irq on the host.
+	 * and rechedule dpc if interrupt is pended in the dongle.
+	 * There is a chance to miss OOB interrupt while irq is disabled on the host.
+	 * No need to do this with HW-OOB(level trigger)
+	 */
+	R_SDREG(newstatus, &regs->intstatus, retries);
+	if (bcmsdh_regfail(bus->sdh))
+		newstatus = 0;
+	if (newstatus & bus->hostintmask) {
+		bus->ipend = TRUE;
+		resched = TRUE;
+	}
+#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */
+
+#ifdef PROP_TXSTATUS
+	dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE);
+#endif
+
+	if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL))
+		dhdsdio_sendpendctl(bus);
+
+	/* Send queued frames (limit 1 if rx may still be pending) */
+	else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+	    pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
+		framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+		framecnt = dhdsdio_sendfromq(bus, framecnt);
+		txlimit -= framecnt;
+	}
+	/* Resched the DPC if ctrl cmd is pending on bus credit */
+	if (bus->ctrl_frame_stat)
+		resched = TRUE;
+
+	/* Resched if events or tx frames are pending, else await next interrupt */
+	/* On failed register access, all bets are off: no resched or interrupts */
+	if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
+		if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) &
+			SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
+			/* Bus failed because of KSO */
+			DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__));
+			bus->kso = FALSE;
+		} else {
+			DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
+				__FUNCTION__));
+			bus->dhd->busstate = DHD_BUS_DOWN;
+			bus->intstatus = 0;
+		}
+	} else if (bus->clkstate == CLK_PENDING) {
+		/* Awaiting I_CHIPACTIVE; don't resched */
+	} else if (bus->intstatus || bus->ipend ||
+	           (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+			PKT_AVAILABLE(bus, bus->intstatus)) {  /* Read multiple frames */
+		resched = TRUE;
+	}
+
+	bus->dpc_sched = resched;
+
+	/* If we're done for now, turn off clock request. */
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+	}
+
+exit:
+
+	if (!resched && dhd_dpcpoll) {
+		if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0)
+			resched = TRUE;
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+	return resched;
+}
+
+bool
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+	bool resched;
+
+	/* Call the DPC directly. */
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	resched = dhdsdio_dpc(bus);
+
+	return resched;
+}
+
+void
+dhdsdio_isr(void *arg)
+{
+	dhd_bus_t *bus = (dhd_bus_t*)arg;
+	bcmsdh_info_t *sdh;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (!bus) {
+		DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+		return;
+	}
+	sdh = bus->sdh;
+
+	if (bus->dhd->busstate == DHD_BUS_DOWN) {
+		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+		return;
+	}
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	/* Count the interrupt call */
+	bus->intrcount++;
+	bus->ipend = TRUE;
+
+	/* Shouldn't get this interrupt if we're sleeping? */
+	if (!SLPAUTO_ENAB(bus)) {
+		if (bus->sleeping) {
+			DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+			return;
+		} else if (!KSO_ENAB(bus)) {
+			DHD_ERROR(("ISR in devsleep 1\n"));
+		}
+	}
+
+	/* Disable additional interrupts (is this needed now)? */
+	if (bus->intr) {
+		DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+	} else {
+		DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+	}
+
+	bcmsdh_intr_disable(sdh);
+	bus->intdis = TRUE;
+
+#if defined(SDIO_ISR_THREAD)
+	DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+	dhdsdio_dpc(bus);
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+
+	bus->dpc_sched = TRUE;
+	dhd_sched_dpc(bus->dhd);
+
+#endif /* defined(SDIO_ISR_THREAD) */
+
+}
+
+#ifdef SDTEST
+static void
+dhdsdio_pktgen_init(dhd_bus_t *bus)
+{
+	/* Default to specified length, or full range */
+	if (dhd_pktgen_len) {
+		bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN);
+		bus->pktgen_minlen = bus->pktgen_maxlen;
+	} else {
+		bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+		bus->pktgen_minlen = 0;
+	}
+	bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+	/* Default to per-watchdog burst with 10s print time */
+	bus->pktgen_freq = 1;
+	bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0;
+	bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+
+	/* Default to echo mode */
+	bus->pktgen_mode = DHD_PKTGEN_ECHO;
+	bus->pktgen_stop = 1;
+}
+
+static void
+dhdsdio_pktgen(dhd_bus_t *bus)
+{
+	void *pkt;
+	uint8 *data;
+	uint pktcount;
+	uint fillbyte;
+	osl_t *osh = bus->dhd->osh;
+	uint16 len;
+	ulong time_lapse;
+	uint sent_pkts;
+	uint rcvd_pkts;
+
+	/* Display current count if appropriate */
+	if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) {
+		bus->pktgen_ptick = 0;
+		printf("%s: send attempts %d, rcvd %d, errors %d\n",
+		       __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail);
+
+		/* Print throughput stats only for constant length packet runs */
+		if (bus->pktgen_minlen == bus->pktgen_maxlen) {
+			time_lapse = jiffies - bus->pktgen_prev_time;
+			bus->pktgen_prev_time = jiffies;
+			sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent;
+			bus->pktgen_prev_sent = bus->pktgen_sent;
+			rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd;
+			bus->pktgen_prev_rcvd = bus->pktgen_rcvd;
+
+			printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n",
+			  __FUNCTION__,
+			  (sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8,
+			  (rcvd_pkts * bus->pktgen_len  / jiffies_to_msecs(time_lapse)) * 8);
+		}
+	}
+
+	/* For recv mode, just make sure dongle has started sending */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING;
+			dhdsdio_sdtest_set(bus, bus->pktgen_total);
+		}
+		return;
+	}
+
+	/* Otherwise, generate or request the specified number of packets */
+	for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) {
+		/* Stop if total has been reached */
+		if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			break;
+		}
+
+		/* Allocate an appropriate-sized packet */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+			len = SDPCM_TEST_PKT_CNT_FLD_LEN;
+		} else {
+			len = bus->pktgen_len;
+		}
+		if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+		                   TRUE))) {;
+			DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+			break;
+		}
+		PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+		data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+		/* Write test header cmd and extra based on mode */
+		switch (bus->pktgen_mode) {
+		case DHD_PKTGEN_ECHO:
+			*data++ = SDPCM_TEST_ECHOREQ;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_SEND:
+			*data++ = SDPCM_TEST_DISCARD;
+			*data++ = (uint8)bus->pktgen_sent;
+			break;
+
+		case DHD_PKTGEN_RXBURST:
+			*data++ = SDPCM_TEST_BURST;
+			*data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */
+			break;
+
+		default:
+			DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode));
+			PKTFREE(osh, pkt, TRUE);
+			bus->pktgen_count = 0;
+			return;
+		}
+
+		/* Write test header length field */
+		*data++ = (bus->pktgen_len >> 0);
+		*data++ = (bus->pktgen_len >> 8);
+
+		/* Write frame count in a 4 byte field adjucent to SDPCM test header for
+		 * burst mode
+		 */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) {
+			*data++ = (uint8)(bus->pktgen_count >> 0);
+			*data++ = (uint8)(bus->pktgen_count >> 8);
+			*data++ = (uint8)(bus->pktgen_count >> 16);
+			*data++ = (uint8)(bus->pktgen_count >> 24);
+		} else {
+
+			/* Then fill in the remainder -- N/A for burst */
+			for (fillbyte = 0; fillbyte < len; fillbyte++)
+				*data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent);
+		}
+
+#ifdef DHD_DEBUG
+		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+			data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+			prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
+		}
+#endif
+
+		/* Send it */
+		if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) {
+			bus->pktgen_fail++;
+			if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
+				bus->pktgen_count = 0;
+		}
+		bus->pktgen_sent++;
+
+		/* Bump length if not fixed, wrap at max */
+		if (++bus->pktgen_len > bus->pktgen_maxlen)
+			bus->pktgen_len = (uint16)bus->pktgen_minlen;
+
+		/* Special case for burst mode: just send one request! */
+		if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+			break;
+	}
+}
+
+static void
+dhdsdio_sdtest_set(dhd_bus_t *bus, uint count)
+{
+	void *pkt;
+	uint8 *data;
+	osl_t *osh = bus->dhd->osh;
+
+	/* Allocate the packet */
+	if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+		SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) {
+		DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__));
+		return;
+	}
+	PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+		SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN);
+	data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
+
+	/* Fill in the test header */
+	*data++ = SDPCM_TEST_SEND;
+	*data++ = (count > 0)?TRUE:FALSE;
+	*data++ = (bus->pktgen_maxlen >> 0);
+	*data++ = (bus->pktgen_maxlen >> 8);
+	*data++ = (uint8)(count >> 0);
+	*data++ = (uint8)(count >> 8);
+	*data++ = (uint8)(count >> 16);
+	*data++ = (uint8)(count >> 24);
+
+	/* Send it */
+	if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK)
+		bus->pktgen_fail++;
+}
+
+
+static void
+dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+{
+	osl_t *osh = bus->dhd->osh;
+	uint8 *data;
+	uint pktlen;
+
+	uint8 cmd;
+	uint8 extra;
+	uint16 len;
+	uint16 offset;
+
+	/* Check for min length */
+	if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) {
+		DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen));
+		PKTFREE(osh, pkt, FALSE);
+		return;
+	}
+
+	/* Extract header fields */
+	data = PKTDATA(osh, pkt);
+	cmd = *data++;
+	extra = *data++;
+	len = *data++; len += *data++ << 8;
+	DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len));
+	/* Check length for relevant commands */
+	if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) {
+		if (pktlen != len + SDPCM_TEST_HDRLEN) {
+			DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d"
+			           " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+			PKTFREE(osh, pkt, FALSE);
+			return;
+		}
+	}
+
+	/* Process as per command */
+	switch (cmd) {
+	case SDPCM_TEST_ECHOREQ:
+		/* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
+		*(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
+		if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) {
+			bus->pktgen_sent++;
+		} else {
+			bus->pktgen_fail++;
+			PKTFREE(osh, pkt, FALSE);
+		}
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_ECHORSP:
+		if (bus->ext_loop) {
+			PKTFREE(osh, pkt, FALSE);
+			bus->pktgen_rcvd++;
+			break;
+		}
+
+		for (offset = 0; offset < len; offset++, data++) {
+			if (*data != SDPCM_TEST_FILL(offset, extra)) {
+				DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: "
+				           "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
+				           offset, len, SDPCM_TEST_FILL(offset, extra), *data));
+				break;
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_DISCARD:
+		{
+			int i = 0;
+			uint8 *prn = data;
+			uint8 testval = extra;
+			for (i = 0; i < len; i++) {
+				if (*prn != testval) {
+					DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n",
+						i, bus->pktgen_rcvd_rcvsession, testval, *prn));
+					prn++; testval++;
+				}
+			}
+		}
+		PKTFREE(osh, pkt, FALSE);
+		bus->pktgen_rcvd++;
+		break;
+
+	case SDPCM_TEST_BURST:
+	case SDPCM_TEST_SEND:
+	default:
+		DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d"
+		          " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len));
+		PKTFREE(osh, pkt, FALSE);
+		break;
+	}
+
+	/* For recv mode, stop at limit (and tell dongle to stop sending) */
+	if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+		if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) {
+			bus->pktgen_rcvd_rcvsession++;
+
+			if (bus->pktgen_total &&
+				(bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) {
+			bus->pktgen_count = 0;
+			DHD_ERROR(("Pktgen:rcv test complete!\n"));
+			bus->pktgen_rcv_state = PKTGEN_RCV_IDLE;
+			dhdsdio_sdtest_set(bus, FALSE);
+				bus->pktgen_rcvd_rcvsession = 0;
+			}
+		}
+	}
+}
+#endif /* SDTEST */
+
+int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
+{
+	int err = 0;
+
+#if defined(OOB_INTR_ONLY)
+	err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus);
+#endif
+	return err;
+}
+
+void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+{
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_unregister(dhdp->bus->sdh);
+#endif
+}
+
+void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+{
+#if defined(OOB_INTR_ONLY)
+	bcmsdh_oob_intr_set(dhdp->bus->sdh, enable);
+#endif
+}
+
+void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub)
+{
+	bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh);
+}
+
+void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub)
+{
+	bcmsdh_dev_relax(dhdpub->bus->sdh);
+}
+
+bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub)
+{
+	bool enabled = FALSE;
+
+	enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh);
+	return enabled;
+}
+
+extern bool
+dhd_bus_watchdog(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus;
+
+	DHD_TIMER(("%s: Enter\n", __FUNCTION__));
+
+	bus = dhdp->bus;
+
+	if (bus->dhd->dongle_reset)
+		return FALSE;
+
+	if (bus->dhd->hang_was_sent) {
+		dhd_os_wd_timer(bus->dhd, 0);
+		return FALSE;
+	}
+
+	/* Ignore the timer if simulating bus down */
+	if (!SLPAUTO_ENAB(bus) && bus->sleeping)
+		return FALSE;
+
+	if (dhdp->busstate == DHD_BUS_DOWN)
+		return FALSE;
+
+	/* Poll period: check device if appropriate. */
+	if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) {
+		uint32 intstatus = 0;
+
+		/* Reset poll tick */
+		bus->polltick = 0;
+
+		/* Check device if no interrupts */
+		if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+			if (!bus->dpc_sched) {
+				uint8 devpend;
+				devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
+				                          SDIOD_CCCR_INTPEND, NULL);
+				intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
+			}
+
+			/* If there is something, make like the ISR and schedule the DPC */
+			if (intstatus) {
+				bus->pollcnt++;
+				bus->ipend = TRUE;
+				if (bus->intr) {
+					bcmsdh_intr_disable(bus->sdh);
+				}
+				bus->dpc_sched = TRUE;
+				dhd_sched_dpc(bus->dhd);
+			}
+		}
+
+		/* Update interrupt tracking */
+		bus->lastintrs = bus->intrcount;
+	}
+
+#ifdef DHD_DEBUG
+	/* Poll for console output periodically */
+	if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+		bus->console.count += dhd_watchdog_ms;
+		if (bus->console.count >= dhd_console_ms) {
+			bus->console.count -= dhd_console_ms;
+			/* Make sure backplane clock is on */
+			if (SLPAUTO_ENAB(bus))
+				dhdsdio_bussleep(bus, FALSE);
+			else
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+			if (dhdsdio_readconsole(bus) < 0)
+				dhd_console_ms = 0;	/* On error, stop trying */
+		}
+	}
+#endif /* DHD_DEBUG */
+
+#ifdef SDTEST
+	/* Generate packets if configured */
+	if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
+		/* Make sure backplane clock is on */
+		if (SLPAUTO_ENAB(bus))
+			dhdsdio_bussleep(bus, FALSE);
+		else
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		bus->pktgen_tick = 0;
+		dhdsdio_pktgen(bus);
+	}
+#endif
+
+	/* On idle timeout clear activity flag and/or turn off clock */
+#ifdef DHD_USE_IDLECOUNT
+	if (bus->activity)
+		bus->activity = FALSE;
+	else {
+		bus->idlecount++;
+
+		if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
+			DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__));
+			if (SLPAUTO_ENAB(bus)) {
+				if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY)
+					dhd_os_wd_timer(bus->dhd, 0);
+			} else
+				dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+
+			bus->idlecount = 0;
+		}
+	}
+#else
+	if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+		if (++bus->idlecount >= bus->idletime) {
+			bus->idlecount = 0;
+			if (bus->activity) {
+				bus->activity = FALSE;
+				if (SLPAUTO_ENAB(bus)) {
+					if (!bus->readframes)
+						dhdsdio_bussleep(bus, TRUE);
+					else
+						bus->reqbussleep = TRUE;
+				}
+				else
+					dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+			}
+		}
+	}
+#endif /* DHD_USE_IDLECOUNT */
+
+	return bus->ipend;
+}
+
+#ifdef DHD_DEBUG
+extern int
+dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	uint32 addr, val;
+	int rv;
+	void *pkt;
+
+	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
+	if (bus->console_addr == 0)
+		return BCME_UNSUPPORTED;
+
+	/* Exclusive bus access */
+	dhd_os_sdlock(bus->dhd);
+
+	/* Don't allow input if dongle is in reset */
+	if (bus->dhd->dongle_reset) {
+		dhd_os_sdunlock(bus->dhd);
+		return BCME_NOTREADY;
+	}
+
+	/* Request clock to allow SDIO accesses */
+	BUS_WAKE(bus);
+	/* No pend allowed since txpkt is called later, ht clk has to be on */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	/* Zero cbuf_index */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx);
+	val = htol32(0);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Write message into cbuf */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+		goto done;
+
+	/* Write length into vcons_in */
+	addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in);
+	val = htol32(msglen);
+	if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+		goto done;
+
+	/* Bump dongle by sending an empty packet on the event channel.
+	 * sdpcm_sendup (RX) checks for virtual console input.
+	 */
+	if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
+		rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE);
+
+done:
+	if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+		bus->activity = FALSE;
+		dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+	}
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rv;
+}
+#endif /* DHD_DEBUG */
+
+#ifdef DHD_DEBUG
+static void
+dhd_dump_cis(uint fn, uint8 *cis)
+{
+	uint byte, tag, tdata;
+	DHD_INFO(("Function %d CIS:\n", fn));
+
+	for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
+		if ((byte % 16) == 0)
+			DHD_INFO(("    "));
+		DHD_INFO(("%02x ", cis[byte]));
+		if ((byte % 16) == 15)
+			DHD_INFO(("\n"));
+		if (!tdata--) {
+			tag = cis[byte];
+			if (tag == 0xff)
+				break;
+			else if (!tag)
+				tdata = 0;
+			else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
+				tdata = cis[byte + 1] + 1;
+			else
+				DHD_INFO(("]"));
+		}
+	}
+	if ((byte % 16) != 15)
+		DHD_INFO(("\n"));
+}
+#endif /* DHD_DEBUG */
+
+static bool
+dhdsdio_chipmatch(uint16 chipid)
+{
+	if (chipid == BCM4325_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4329_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4315_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4319_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4336_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4330_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43237_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43362_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4314_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43242_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43340_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43341_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43143_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43342_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4334_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43239_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4324_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4335_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4339_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43349_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4345_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4350_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4354_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM4356_CHIP_ID)
+		return TRUE;
+	if (chipid == BCM43430_CHIP_ID)
+		return TRUE;
+	return FALSE;
+}
+
+static void *
+dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot,
+	uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh)
+{
+	int ret;
+	dhd_bus_t *bus;
+
+
+	/* Init global variables at run-time, not as part of the declaration.
+	 * This is required to support init/de-init of the driver. Initialization
+	 * of globals as part of the declaration results in non-deterministic
+	 * behavior since the value of the globals may be different on the
+	 * first time that the driver is initialized vs subsequent initializations.
+	 */
+	dhd_txbound = DHD_TXBOUND;
+	dhd_rxbound = DHD_RXBOUND;
+	dhd_alignctl = TRUE;
+	sd1idle = TRUE;
+	dhd_readahead = TRUE;
+	retrydata = FALSE;
+	dhd_dongle_ramsize = 0;
+	dhd_txminmax = DHD_TXMINMAX;
+
+	forcealign = TRUE;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+	DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
+
+	/* We make assumptions about address window mappings */
+	ASSERT((uintptr)regsva == SI_ENUM_BASE);
+
+	/* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
+	 * means early parse could fail, so here we should get either an ID
+	 * we recognize OR (-1) indicating we must request power first.
+	 */
+	/* Check the Vendor ID */
+	switch (venid) {
+		case 0x0000:
+		case VENDOR_BROADCOM:
+			break;
+		default:
+			DHD_ERROR(("%s: unknown vendor: 0x%04x\n",
+			           __FUNCTION__, venid));
+			goto forcereturn;
+	}
+
+	/* Check the Device ID and make sure it's one that we support */
+	switch (devid) {
+		case BCM4325_D11DUAL_ID:		/* 4325 802.11a/g id */
+		case BCM4325_D11G_ID:			/* 4325 802.11g 2.4Ghz band id */
+		case BCM4325_D11A_ID:			/* 4325 802.11a 5Ghz band id */
+			DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4329_D11N_ID:		/* 4329 802.11n dualband device */
+		case BCM4329_D11N2G_ID:		/* 4329 802.11n 2.4G device */
+		case BCM4329_D11N5G_ID:		/* 4329 802.11n 5G device */
+		case 0x4329:
+			DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4315_D11DUAL_ID:		/* 4315 802.11a/g id */
+		case BCM4315_D11G_ID:			/* 4315 802.11g id */
+		case BCM4315_D11A_ID:			/* 4315 802.11a id */
+			DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__));
+			break;
+		case BCM4319_D11N_ID:			/* 4319 802.11n id */
+		case BCM4319_D11N2G_ID:			/* 4319 802.11n2g id */
+		case BCM4319_D11N5G_ID:			/* 4319 802.11n5g id */
+			DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__));
+			break;
+		case 0:
+			DHD_INFO(("%s: allow device id 0, will check chip internals\n",
+			          __FUNCTION__));
+			break;
+
+		default:
+			DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+			           __FUNCTION__, venid, devid));
+			goto forcereturn;
+	}
+
+	if (osh == NULL) {
+		DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__));
+		goto forcereturn;
+	}
+
+	/* Allocate private bus interface state */
+	if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+		DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+		goto fail;
+	}
+	bzero(bus, sizeof(dhd_bus_t));
+	bus->sdh = sdh;
+	bus->cl_devid = (uint16)devid;
+	bus->bus = DHD_BUS;
+	bus->bus_num = bus_no;
+	bus->slot_num = slot;
+	bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+	bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+
+	/* attempt to attach to the dongle */
+	if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
+		DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Attach to the dhd/OS/network interface */
+	if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) {
+		DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	/* Allocate buffers */
+	if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (!(dhdsdio_probe_init(bus, osh, sdh))) {
+		DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__));
+		goto fail;
+	}
+
+	if (bus->intr) {
+		/* Register interrupt callback, but mask it (not operational yet). */
+		DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__));
+		bcmsdh_intr_disable(sdh);
+		if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) {
+			DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
+			           __FUNCTION__, ret));
+			goto fail;
+		}
+		DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__));
+	} else {
+		DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n",
+		           __FUNCTION__));
+	}
+
+	DHD_INFO(("%s: completed!!\n", __FUNCTION__));
+
+	/* if firmware path present try to download and bring up bus */
+	bus->dhd->hang_report  = TRUE;
+	if (dhd_download_fw_on_driverload) {
+		if ((ret = dhd_bus_start(bus->dhd)) != 0) {
+			DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
+				goto fail;
+		}
+	}
+	/* Ok, have the per-port tell the stack we're open for business */
+	if (dhd_register_if(bus->dhd, 0, TRUE) != 0) {
+		DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
+		goto fail;
+	}
+
+
+	init_waitqueue_head(&bus->bus_sleep);
+
+	return bus;
+
+fail:
+	dhdsdio_release(bus, osh);
+
+forcereturn:
+
+	return NULL;
+}
+
+#define MAX_CLKCTL_RETRY 3
+static bool
+dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
+                     uint16 devid)
+{
+	int err = 0;
+	uint8 clkctl = 0;
+	int32 clkctl_retry = MAX_CLKCTL_RETRY;
+
+	bus->alp_only = TRUE;
+	bus->sih = NULL;
+
+	/* Return the window to backplane enumeration space for core access */
+	if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
+		DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
+	}
+
+
+	/* Force PLL off until si_attach() programs PLL control regs */
+
+
+	do {
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
+		if (!err)
+			clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
+			DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x (%d)\n",
+		               err, DHD_INIT_CLKCTL1, clkctl, MAX_CLKCTL_RETRY - clkctl_retry + 1));
+		} else {
+			if (clkctl_retry != MAX_CLKCTL_RETRY)
+				DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: success after %d retries\n",
+					MAX_CLKCTL_RETRY - clkctl_retry));
+			break;
+		}
+		msleep(50);
+	} while (!err && --clkctl_retry > 0);
+
+	if (err || !clkctl_retry) {
+		goto fail;
+	}
+
+#ifdef DHD_DEBUG
+	if (DHD_INFO_ON()) {
+		uint fn, numfn;
+		uint8 *cis[SDIOD_MAX_IOFUNCS];
+		int err = 0;
+
+		numfn = bcmsdh_query_iofnum(sdh);
+		ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+		/* Make sure ALP is available before trying to read CIS */
+		SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+		                                    SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+		          !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
+
+		/* Now request ALP be put on the bus */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+		                 DHD_INIT_CLKCTL2, &err);
+		OSL_DELAY(65);
+
+		for (fn = 0; fn <= numfn; fn++) {
+			if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn));
+				break;
+			}
+			bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+			if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) {
+				DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err));
+				MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+				break;
+			}
+			dhd_dump_cis(fn, cis[fn]);
+		}
+
+		while (fn-- > 0) {
+			ASSERT(cis[fn]);
+			MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+		}
+
+		if (err) {
+			DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
+			goto fail;
+		}
+	}
+#endif /* DHD_DEBUG */
+
+	/* si_attach() will provide an SI handle and scan the backplane */
+	if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh,
+	                           &bus->vars, &bus->varsz))) {
+		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+		goto fail;
+	}
+
+#ifdef DHD_DEBUG
+	DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n",
+		bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
+#endif /* DHD_DEBUG */
+
+
+	bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
+
+	if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
+		DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
+		           __FUNCTION__, bus->sih->chip));
+		goto fail;
+	}
+
+	if (bus->sih->buscorerev >= 12)
+		dhdsdio_clk_kso_init(bus);
+	else
+		bus->kso = TRUE;
+
+	if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) {
+	}
+
+	si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
+
+	/* Get info on the ARM and SOCRAM cores... */
+	if (!DHD_NOPMU(bus)) {
+		if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+		    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+			bus->armrev = si_corerev(bus->sih);
+		} else {
+			DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+			goto fail;
+		}
+
+		if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+				DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+				goto fail;
+			}
+		} else {
+			/* cr4 has a different way to find the RAM size from TCM's */
+			if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+				DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+				goto fail;
+			}
+			/* also populate base address */
+			switch ((uint16)bus->sih->chip) {
+			case BCM4335_CHIP_ID:
+			case BCM4339_CHIP_ID:
+			case BCM43349_CHIP_ID:
+				bus->dongle_ram_base = CR4_4335_RAM_BASE;
+				break;
+			case BCM4350_CHIP_ID:
+			case BCM4354_CHIP_ID:
+			case BCM4356_CHIP_ID:
+				bus->dongle_ram_base = CR4_4350_RAM_BASE;
+				break;
+			case BCM4360_CHIP_ID:
+				bus->dongle_ram_base = CR4_4360_RAM_BASE;
+				break;
+			case BCM4345_CHIP_ID:
+				bus->dongle_ram_base = CR4_4345_RAM_BASE;
+				break;
+			default:
+				bus->dongle_ram_base = 0;
+				DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+				           __FUNCTION__, bus->dongle_ram_base));
+			}
+		}
+		bus->ramsize = bus->orig_ramsize;
+		if (dhd_dongle_ramsize)
+			dhd_dongle_setramsize(bus, dhd_dongle_ramsize);
+
+		DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+		           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+		bus->srmemsize = si_socram_srmem_size(bus->sih);
+	}
+
+	/* ...but normally deal with the SDPCMDEV core */
+	if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
+	    !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
+		DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
+		goto fail;
+	}
+	bus->sdpcmrev = si_corerev(bus->sih);
+
+	/* Set core control so an SDIO reset does a backplane reset */
+	OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
+	bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
+
+	if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
+		(bus->rxint_mode  == SDIO_DEVICE_RXDATAINT_MODE_1))
+	{
+		uint32 val;
+
+		val = R_REG(osh, &bus->regs->corecontrol);
+		val &= ~CC_XMTDATAAVAIL_MODE;
+		val |= CC_XMTDATAAVAIL_CTRL;
+		W_REG(osh, &bus->regs->corecontrol, val);
+	}
+
+
+	pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
+
+	/* Locate an appropriately-aligned portion of hdrbuf */
+	bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN);
+
+	/* Set the poll and/or interrupt flags */
+	bus->intr = (bool)dhd_intr;
+	if ((bus->poll = (bool)dhd_poll))
+		bus->pollrate = 1;
+
+	/* Setting default Glom size */
+	bus->txglomsize = SDPCM_DEFGLOM_SIZE;
+
+	return TRUE;
+
+fail:
+	if (bus->sih != NULL) {
+		si_detach(bus->sih);
+		bus->sih = NULL;
+	}
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd->maxctl) {
+		bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+		if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+			DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
+			           __FUNCTION__, bus->rxblen));
+			goto fail;
+		}
+	}
+	/* Allocate buffer to receive glomed packet */
+	if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+		DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
+			__FUNCTION__, MAX_DATA_BUF));
+		/* release rxbuf which was already located as above */
+		if (!bus->rxblen)
+			DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen);
+		goto fail;
+	}
+
+	/* Align the buffer */
+	if ((uintptr)bus->databuf % DHD_SDALIGN)
+		bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN));
+	else
+		bus->dataptr = bus->databuf;
+
+	return TRUE;
+
+fail:
+	return FALSE;
+}
+
+static bool
+dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+{
+	int32 fnum;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bus->_srenab = FALSE;
+
+#ifdef SDTEST
+	dhdsdio_pktgen_init(bus);
+#endif /* SDTEST */
+
+	/* Disable F2 to clear any intermediate frame state on the dongle */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+
+	bus->dhd->busstate = DHD_BUS_DOWN;
+	bus->sleeping = FALSE;
+	bus->rxflow = FALSE;
+	bus->prev_rxlim_hit = 0;
+
+	/* Done with backplane-dependent accesses, can drop clock... */
+	bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+	/* ...and initialize clock/power states */
+	bus->clkstate = CLK_SDONLY;
+	bus->idletime = (int32)dhd_idletime;
+	bus->idleclock = DHD_IDLE_ACTIVE;
+
+	/* Query the SD clock speed */
+	if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
+	                    &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor"));
+		bus->sd_divisor = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_divisor", bus->sd_divisor));
+	}
+
+	/* Query the SD bus mode */
+	if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
+	                    &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) {
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode"));
+		bus->sd_mode = -1;
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_mode", bus->sd_mode));
+	}
+
+	/* Query the F2 block size, set roundup accordingly */
+	fnum = 2;
+	if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
+	                    &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
+		bus->blocksize = 0;
+		DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+	} else {
+		DHD_INFO(("%s: Initial value for %s is %d\n",
+		          __FUNCTION__, "sd_blocksize", bus->blocksize));
+
+		dhdsdio_tune_fifoparam(bus);
+	}
+	bus->roundup = MIN(max_roundup, bus->blocksize);
+
+	if (bus->pad_pkt)
+		PKTFREE(osh, bus->pad_pkt, FALSE);
+	bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE);
+	if (bus->pad_pkt == NULL)
+		DHD_ERROR(("failed to allocate padding packet\n"));
+	else {
+		int alignment_offset = 0;
+		uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt);
+		if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN)))
+			PKTPUSH(osh, bus->pad_pkt, alignment_offset);
+		PKTSETNEXT(osh, bus->pad_pkt, NULL);
+	}
+
+	/* Query if bus module supports packet chaining, default to use if supported */
+	if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+	                    &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
+		bus->sd_rxchain = FALSE;
+	} else {
+		DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
+		          __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
+	}
+	bus->use_rxchain = (bool)bus->sd_rxchain;
+	bus->txinrx_thres = CUSTOM_TXINRX_THRES;
+	/* TX first in dhdsdio_readframes() */
+	bus->dotxinrx = TRUE;
+
+	return TRUE;
+}
+
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, char *pfw_path, char *pnv_path)
+{
+	int ret;
+
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+
+	ret = dhdsdio_download_firmware(bus, osh, bus->sdh);
+
+
+	return ret;
+}
+
+static int
+dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+{
+	int ret;
+
+	DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
+		__FUNCTION__, bus->fw_path, bus->nv_path));
+	DHD_OS_WAKE_LOCK(bus->dhd);
+
+	/* Download the firmware */
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	ret = _dhdsdio_download_firmware(bus);
+
+	dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+	DHD_OS_WAKE_UNLOCK(bus->dhd);
+	return ret;
+}
+
+/* Detach and free everything */
+static void
+dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+{
+	bool dongle_isolation = FALSE;
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus) {
+		ASSERT(osh);
+
+		if (bus->dhd) {
+			dongle_isolation = bus->dhd->dongle_isolation;
+			dhd_detach(bus->dhd);
+		}
+
+		/* De-register interrupt handler */
+		bcmsdh_intr_disable(bus->sdh);
+		bcmsdh_intr_dereg(bus->sdh);
+
+		if (bus->dhd) {
+			dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE);
+			dhd_free(bus->dhd);
+			bus->dhd = NULL;
+		}
+
+		dhdsdio_release_malloc(bus, osh);
+
+#ifdef DHD_DEBUG
+		if (bus->console.buf != NULL)
+			MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+		if (bus->pad_pkt)
+			PKTFREE(osh, bus->pad_pkt, FALSE);
+
+		MFREE(osh, bus, sizeof(dhd_bus_t));
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	if (bus->dhd && bus->dhd->dongle_reset)
+		return;
+
+	if (bus->rxbuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(osh, bus->rxbuf, bus->rxblen);
+#endif
+		bus->rxctl = bus->rxbuf = NULL;
+		bus->rxlen = 0;
+	}
+
+	if (bus->databuf) {
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+		MFREE(osh, bus->databuf, MAX_DATA_BUF);
+#endif
+		bus->databuf = NULL;
+	}
+
+	if (bus->vars && bus->varsz) {
+		MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+}
+
+
+static void
+dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+		bus->dhd, bus->dhd->dongle_reset));
+
+	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag)
+		return;
+
+	if (bus->sih) {
+#if !defined(BCMLXSDMMC)
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		}
+		if (KSO_ENAB(bus) && (dongle_isolation == FALSE))
+			si_watchdog(bus->sih, 4);
+#endif /* !defined(BCMLXSDMMC) */
+		if (bus->dhd) {
+			dhdsdio_clkctl(bus, CLK_NONE, FALSE);
+		}
+		si_detach(bus->sih);
+		bus->sih = NULL;
+		if (bus->vars && bus->varsz)
+			MFREE(osh, bus->vars, bus->varsz);
+		bus->vars = NULL;
+	}
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static void
+dhdsdio_disconnect(void *ptr)
+{
+	dhd_bus_t *bus = (dhd_bus_t *)ptr;
+
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+
+
+	if (bus) {
+		ASSERT(bus->dhd);
+		dhdsdio_release(bus, bus->dhd->osh);
+	}
+
+
+
+	DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
+}
+
+static int
+dhdsdio_suspend(void *context)
+{
+	int ret = 0;
+
+	dhd_bus_t *bus = (dhd_bus_t*)context;
+	int wait_time = 0;
+
+	if (bus->idletime > 0) {
+		wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms);
+	}
+
+	ret = dhd_os_check_wakelock(bus->dhd);
+	if ((!ret) && (bus->dhd->up)) {
+		if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) {
+			if (!bus->sleeping) {
+				return 1;
+			}
+		}
+	}
+	return ret;
+}
+
+static int
+dhdsdio_resume(void *context)
+{
+#if defined(OOB_INTR_ONLY)
+	dhd_bus_t *bus = (dhd_bus_t*)context;
+
+	if (dhd_os_check_if_up(bus->dhd))
+		bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif 
+	return 0;
+}
+
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+static bcmsdh_driver_t dhd_sdio = {
+	dhdsdio_probe,
+	dhdsdio_disconnect,
+	dhdsdio_suspend,
+	dhdsdio_resume
+};
+
+int
+dhd_bus_register(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	return bcmsdh_register(&dhd_sdio);
+}
+
+void
+dhd_bus_unregister(void)
+{
+	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+	bcmsdh_unregister();
+}
+
+#if defined(BCMLXSDMMC)
+/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
+int dhd_bus_reg_sdio_notify(void* semaphore)
+{
+	return bcmsdh_reg_sdio_notify(semaphore);
+}
+
+void dhd_bus_unreg_sdio_notify(void)
+{
+	bcmsdh_unreg_sdio_notify();
+}
+#endif /* defined(BCMLXSDMMC) */
+
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	unsigned char *ularray = NULL;
+
+	DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+	/* Download image */
+	while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)dlarray));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), MEMBLOCK);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+	if (offset < sizeof(dlarray)) {
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+			(uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+			goto err;
+		}
+	}
+
+#ifdef DHD_DEBUG
+	/* Upload and compare the downloaded code */
+	{
+		ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+		/* Upload image to verify downloaded contents. */
+		offset = 0;
+		memset(ularray, 0xaa, bus->ramsize);
+		while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, MEMBLOCK, offset));
+				goto err;
+			}
+
+			offset += MEMBLOCK;
+		}
+
+		if (offset < sizeof(dlarray)) {
+			bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+				ularray + offset, sizeof(dlarray) - offset);
+			if (bcmerror) {
+				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+					__FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+				goto err;
+			}
+		}
+
+		if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+			goto err;
+		} else
+			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+			           __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+	}
+#endif /* DHD_DEBUG */
+
+err:
+	if (ularray)
+		MFREE(bus->dhd->osh, ularray, bus->ramsize);
+	return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+static int
+dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+	int bcmerror = -1;
+	int offset = 0;
+	int len;
+	void *image = NULL;
+	uint8 *memblock = NULL, *memptr;
+
+	DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+	image = dhd_os_open_image(pfw_path);
+	if (image == NULL) {
+		DHD_ERROR(("%s: Failed to open firmware file %s \n", __FUNCTION__, pfw_path));
+		goto err;
+	}
+
+	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+		goto err;
+	}
+	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+	/* Download image */
+	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+		if (len < 0) {
+			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+			bcmerror = BCME_ERROR;
+			goto err;
+		}
+		/* check if CR4 */
+		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+			/* if address is 0, store the reset instruction to be written in 0 */
+
+			if (offset == 0) {
+				bus->resetinstr = *(((uint32*)memptr));
+				/* Add start of RAM address to the address given by user */
+				offset += bus->dongle_ram_base;
+			}
+		}
+
+		bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+			        __FUNCTION__, bcmerror, MEMBLOCK, offset));
+			goto err;
+		}
+
+		offset += MEMBLOCK;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+/*
+	EXAMPLE: nvram_array
+	nvram_arry format:
+	name=value
+	Use carriage return at the end of each assignment, and an empty string with
+	carriage return at the end of array.
+
+	For example:
+	unsigned char  nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
+	Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
+
+	Search "EXAMPLE: nvram_array" to see how the array is activated.
+*/
+
+void
+dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
+{
+	bus->nvram_params = nvram_params;
+}
+
+static int
+dhdsdio_download_nvram(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+	uint len;
+	void * image = NULL;
+	char * memblock = NULL;
+	char *bufp;
+	char *pnv_path;
+	bool nvram_file_exists;
+
+	pnv_path = bus->nv_path;
+
+	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+	if (!nvram_file_exists && (bus->nvram_params == NULL))
+		return (0);
+
+	if (nvram_file_exists) {
+		image = dhd_os_open_image(pnv_path);
+		if (image == NULL)
+			goto err;
+	}
+
+	memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+	if (memblock == NULL) {
+		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+		           __FUNCTION__, MAX_NVRAMBUF_SIZE));
+		goto err;
+	}
+
+	/* Download variables */
+	if (nvram_file_exists) {
+		len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+	}
+	else {
+		len = strlen(bus->nvram_params);
+		ASSERT(len <= MAX_NVRAMBUF_SIZE);
+		memcpy(memblock, bus->nvram_params, len);
+	}
+	if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+		bufp = (char *)memblock;
+		bufp[len] = 0;
+		len = process_nvram_vars(bufp, len);
+		if (len % 4) {
+			len += 4 - (len % 4);
+		}
+		bufp += len;
+		*bufp++ = 0;
+		if (len)
+			bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
+		if (bcmerror) {
+			DHD_ERROR(("%s: error downloading vars: %d\n",
+			           __FUNCTION__, bcmerror));
+		}
+	}
+	else {
+		DHD_ERROR(("%s: error reading nvram file: %d\n",
+		           __FUNCTION__, len));
+		bcmerror = BCME_SDIO_ERROR;
+	}
+
+err:
+	if (memblock)
+		MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+	if (image)
+		dhd_os_close_image(image);
+
+	return bcmerror;
+}
+
+static int
+_dhdsdio_download_firmware(struct dhd_bus *bus)
+{
+	int bcmerror = -1;
+
+	bool embed = FALSE;	/* download embedded firmware */
+	bool dlok = FALSE;	/* download firmware succeeded */
+
+	/* Out immediately if no image to download */
+	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+		embed = TRUE;
+#else
+		return 0;
+#endif
+	}
+
+	/* Keep arm in reset */
+	if (dhdsdio_download_state(bus, TRUE)) {
+		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* External image takes precedence if specified */
+	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+		if (dhdsdio_download_code_file(bus, bus->fw_path)) {
+			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+			embed = TRUE;
+#else
+			goto err;
+#endif
+		}
+		else {
+			embed = FALSE;
+			dlok = TRUE;
+		}
+	}
+
+#ifdef BCMEMBEDIMAGE
+	if (embed) {
+		if (dhdsdio_download_code_array(bus)) {
+			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+			goto err;
+		}
+		else {
+			dlok = TRUE;
+		}
+	}
+#else
+	BCM_REFERENCE(embed);
+#endif
+	if (!dlok) {
+		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* EXAMPLE: nvram_array */
+	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+	/* External nvram takes precedence if specified */
+	if (dhdsdio_download_nvram(bus)) {
+		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+		goto err;
+	}
+
+	/* Take arm out of reset */
+	if (dhdsdio_download_state(bus, FALSE)) {
+		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+		goto err;
+	}
+
+	bcmerror = 0;
+
+err:
+	return bcmerror;
+}
+
+static int
+dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+{
+	int status;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle);
+
+	return status;
+}
+
+static int
+dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+	void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry)
+{
+	int ret;
+	int i = 0;
+	int retries = 0;
+	bcmsdh_info_t *sdh;
+
+	if (!KSO_ENAB(bus)) {
+		DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+		return BCME_NODEVICE;
+	}
+
+	sdh = bus->sdh;
+	do {
+		ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes,
+			pkt, complete, handle);
+
+		bus->f2txdata++;
+		ASSERT(ret != BCME_PENDING);
+
+		if (ret == BCME_NODEVICE) {
+			DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+		} else if (ret < 0) {
+			/* On failure, abort the command and terminate the frame */
+			DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n",
+				__FUNCTION__, ret));
+			bus->tx_sderrs++;
+			bus->f1regdata++;
+			bus->dhd->tx_errors++;
+			bcmsdh_abort(sdh, SDIO_FUNC_2);
+			bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+				SFC_WF_TERM, NULL);
+			for (i = 0; i < READ_FRM_CNT_RETRIES; i++) {
+				uint8 hi, lo;
+				hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI,
+					NULL);
+				lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO,
+					NULL);
+				bus->f1regdata += 2;
+				if ((hi == 0) && (lo == 0))
+					break;
+			}
+		}
+	} while ((ret < 0) && retrydata && ++retries < max_retry);
+
+	return ret;
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+	ASSERT(bus);
+	ASSERT(bus->sih != NULL);
+	return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+	return bus->dhd;
+}
+
+void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+	return (void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+	return &bus->txq;
+}
+
+void
+dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val)
+{
+	bus->dotxinrx = val;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+	int bcmerror = 0;
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+
+	if (flag == TRUE) {
+		if (!bus->dhd->dongle_reset) {
+			dhd_os_sdlock(dhdp);
+			dhd_os_wd_timer(dhdp, 0);
+#if !defined(IGNORE_ETH0_DOWN)
+			/* Force flow control as protection when stop come before ifconfig_down */
+			dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+#endif /* !defined(IGNORE_ETH0_DOWN) */
+			/* Expect app to have torn down any connection before calling */
+			/* Stop the bus, disable F2 */
+			dhd_bus_stop(bus, FALSE);
+
+#if defined(OOB_INTR_ONLY)
+			/* Clean up any pending IRQ */
+			dhd_enable_oob_intr(bus, FALSE);
+			bcmsdh_oob_intr_set(bus->sdh, FALSE);
+			bcmsdh_oob_intr_unregister(bus->sdh);
+#endif 
+
+			/* Clean tx/rx buffer pointers, detach from the dongle */
+			dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
+
+			bus->dhd->dongle_reset = TRUE;
+			bus->dhd->up = FALSE;
+			dhd_txglom_enable(dhdp, FALSE);
+			dhd_os_sdunlock(dhdp);
+
+			DHD_TRACE(("%s:  WLAN OFF DONE\n", __FUNCTION__));
+			/* App can now remove power from device */
+		} else
+			bcmerror = BCME_SDIO_ERROR;
+	} else {
+		/* App must have restored power to device before calling */
+
+		DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+
+		if (bus->dhd->dongle_reset) {
+			/* Turn on WLAN */
+			dhd_os_sdlock(dhdp);
+			/* Reset SD client */
+			bcmsdh_reset(bus->sdh);
+
+			/* Attempt to re-attach & download */
+			if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
+				(uint32 *)SI_ENUM_BASE,
+				bus->cl_devid)) {
+				/* Attempt to download binary to the dongle */
+				if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
+				    dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) {
+
+					/* Re-init bus, enable F2 transfer */
+					bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
+					if (bcmerror == BCME_OK) {
+#if defined(OOB_INTR_ONLY)
+						dhd_enable_oob_intr(bus, TRUE);
+						bcmsdh_oob_intr_register(bus->sdh,
+							dhdsdio_isr, bus);
+						bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif 
+
+						bus->dhd->dongle_reset = FALSE;
+						bus->dhd->up = TRUE;
+
+#if !defined(IGNORE_ETH0_DOWN)
+						/* Restore flow control  */
+						dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+#endif 
+						dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
+
+						DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
+					} else {
+						dhd_bus_stop(bus, FALSE);
+						dhdsdio_release_dongle(bus, bus->dhd->osh,
+							TRUE, FALSE);
+					}
+				} else
+					bcmerror = BCME_SDIO_ERROR;
+			} else
+				bcmerror = BCME_SDIO_ERROR;
+
+				dhd_os_sdunlock(dhdp);
+		} else {
+			bcmerror = BCME_SDIO_ERROR;
+			DHD_INFO(("%s called when dongle is not in reset\n",
+				__FUNCTION__));
+			DHD_INFO(("Will call dhd_bus_start instead\n"));
+			dhd_bus_resume(dhdp, 1);
+			if ((bcmerror = dhd_bus_start(dhdp)) != 0)
+				DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
+					__FUNCTION__, bcmerror));
+		}
+	}
+	return bcmerror;
+}
+
+int dhd_bus_suspend(dhd_pub_t *dhdpub)
+{
+	return bcmsdh_stop(dhdpub->bus->sdh);
+}
+
+int dhd_bus_resume(dhd_pub_t *dhdpub, int stage)
+{
+	return bcmsdh_start(dhdpub->bus->sdh, stage);
+}
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return  bus->sih->chip;
+}
+
+/* Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return bus->sih->chiprev;
+}
+
+/* Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+
+	return bus->sih->chippkg;
+}
+
+int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num)
+{
+	*bus_type = bus->bus;
+	*bus_num = bus->bus_num;
+	*slot_num = bus->slot_num;
+	return 0;
+}
+
+int
+dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
+{
+	dhd_bus_t *bus;
+
+	bus = dhdp->bus;
+	return dhdsdio_membytes(bus, set, address, data, size);
+}
+#if defined(SUPPORT_MULTIPLE_REVISION)
+/* Just print chip revision for BCM4330 */
+static int
+concate_revision_bcm4330(dhd_bus_t *bus)
+{
+	uint chipver;
+	chipver = bus->sih->chiprev;
+
+	if (chipver == 3)
+		DHD_ERROR(("---- CHIP bcm4330 B1 ----\n"));
+	else if (chipver == 4)
+		DHD_ERROR(("---- CHIP bcm4330 B2 ----\n"));
+	else
+		DHD_ERROR(("----- Invalid chip version -----\n"));
+	return 0;
+}
+
+static int
+concate_revision_bcm4334(dhd_bus_t *bus,
+	char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+#define	REV_ID_ADDR	0x1E008F90
+#define BCM4334_B1_UNIQUE	0x30312E36
+
+	uint chipver;
+	uint32 unique_id;
+	uint8 data[4];
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = "_4334";
+#else
+	char chipver_tag[4] = "";
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+	DHD_TRACE(("%s: BCM4334 Multiple Revision Check\n", __FUNCTION__));
+	if (bus->sih->chip != BCM4334_CHIP_ID) {
+		DHD_ERROR(("%s:Chip is not BCM4334\n", __FUNCTION__));
+		return -1;
+	}
+	chipver = bus->sih->chiprev;
+	if (chipver == 0x2) {
+		dhdsdio_membytes(bus, FALSE, REV_ID_ADDR, data, 4);
+		unique_id = load32_ua(data);
+		if (unique_id == BCM4334_B1_UNIQUE)
+			chipver = 0x01;
+	}
+	DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
+	if (chipver == 1) {
+		DHD_ERROR(("----- CHIP bcm4334_B0 -----\n"));
+		strcat(chipver_tag, "_b0");
+	} else if (chipver == 2) {
+		DHD_ERROR(("----- CHIP bcm4334_B1 -----\n"));
+		strcat(chipver_tag, "_b1");
+	} else if (chipver == 3) {
+		DHD_ERROR(("----- CHIP bcm4334_B2 -----\n"));
+		strcat(chipver_tag, "_b2");
+	}
+	else {
+		DHD_ERROR(("----- Invalid chip version -----\n"));
+		return -1;
+	}
+
+	if (fw_path)
+		strcat(fw_path, chipver_tag);
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	if (nv_path)
+		strcat(nv_path, chipver_tag);
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+#undef REV_ID_ADDR
+#undef BCM4334_B1_UNIQUE
+	return 0;
+}
+
+static int
+concate_revision_bcm4335
+	(dhd_bus_t *bus, char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+
+	uint chipver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = "_4335";
+#else
+	char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+	DHD_TRACE(("%s: BCM4335 Multiple Revision Check\n", __FUNCTION__));
+	if (bus->sih->chip != BCM4335_CHIP_ID) {
+		DHD_ERROR(("%s:Chip is not BCM4335\n", __FUNCTION__));
+		return -1;
+	}
+	chipver = bus->sih->chiprev;
+	DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
+	if (chipver == 0x0) {
+		DHD_ERROR(("----- CHIP bcm4335_A0 -----\n"));
+		strlcat(chipver_tag, "_a0", sizeof(chipver_tag));
+	} else if (chipver == 0x1) {
+		DHD_ERROR(("----- CHIP bcm4335_B0 -----\n"));
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+		strlcat(chipver_tag, "_b0", sizeof(chipver_tag));
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+	}
+
+	if (fw_path)
+		strlcat(fw_path, chipver_tag, fw_path_len);
+	if (nv_path) {
+		strlcat(nv_path, chipver_tag, nv_path_len);
+#ifdef HW_OOB
+		strlcat(nv_path, "_level", nv_path_len);
+#else
+		strlcat(nv_path, "_edge", nv_path_len);
+#endif
+	}
+
+	return 0;
+}
+
+static int
+concate_revision_bcm4339
+	(dhd_bus_t *bus, char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+
+	uint chipver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = "_4339";
+#else
+	char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+	DHD_TRACE(("%s: BCM4339 Multiple Revision Check\n", __FUNCTION__));
+	if (bus->sih->chip != BCM4339_CHIP_ID) {
+		DHD_ERROR(("%s:Chip is not BCM4339\n", __FUNCTION__));
+		return -1;
+	}
+	chipver = bus->sih->chiprev;
+	DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
+	if (chipver == 0x1) {
+		DHD_ERROR(("----- CHIP bcm4339_A0 -----\n"));
+		strlcat(chipver_tag, "_a0", sizeof(chipver_tag));
+	} else {
+		DHD_ERROR(("----- CHIP bcm4339 unknown revision %d -----\n",
+			chipver));
+	}
+
+	if (fw_path)
+		strlcat(fw_path, chipver_tag, fw_path_len);
+	if (nv_path) {
+		strlcat(nv_path, chipver_tag, nv_path_len);
+#ifdef HW_OOB
+		strlcat(nv_path, "_level", nv_path_len);
+#else
+		strlcat(nv_path, "_edge", nv_path_len);
+#endif
+#ifdef LOW_POWER_NVRAM
+		strlcat(nv_path, "_lp", nv_path_len);
+#endif
+#ifdef LOW_POWER_NVRAM_V1
+		strlcat(nv_path, "_lp_v1", nv_path_len);
+#endif
+
+	}
+
+	return 0;
+}
+
+static int
+concate_revision_bcm43349
+	(dhd_bus_t *bus, char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+
+	uint chipver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = "_43349";
+#else
+	char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+	DHD_TRACE(("%s: BCM43349 Multiple Revision Check\n", __FUNCTION__));
+	if (bus->sih->chip != BCM43349_CHIP_ID) {
+		DHD_ERROR(("%s:Chip is not BCM43349\n", __FUNCTION__));
+		return -1;
+	}
+	chipver = bus->sih->chiprev;
+	DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
+	if (chipver == 0x1) {
+		DHD_ERROR(("----- CHIP bcm43349_A0 -----\n"));
+		strcat(chipver_tag, "_a0");
+	} else {
+		DHD_ERROR(("----- CHIP bcm43349 unknown revision %d -----\n",
+			chipver));
+	}
+
+	if (fw_path)
+		strcat(fw_path, chipver_tag);
+	if (nv_path)
+		strcat(nv_path, chipver_tag);
+	return 0;
+}
+static int concate_revision_bcm43241(dhd_bus_t *bus,
+	char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+	uint32 chip_id, chip_ver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = "_43241";
+#else
+	char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+	DHD_TRACE(("%s: BCM43241 Multiple Revision Check\n", __FUNCTION__));
+
+	chip_id = bus->sih->chip;
+	chip_ver = bus->sih->chiprev;
+
+	if (chip_ver == 2) {
+		DHD_ERROR(("----- CHIP bcm43241_B0 -----\n"));
+		strcat(chipver_tag, "_b0");
+	} else if (chip_ver == 5) {
+		DHD_ERROR(("----- CHIP bcm43241_B4 -----\n"));
+		strcat(chipver_tag, "_b4");
+	} else {
+		DHD_ERROR(("----- Invalid chip version -----\n"));
+		return -1;
+	}
+
+	if (fw_path)
+		strcat(fw_path, chipver_tag);
+	if (nv_path)
+		strcat(nv_path, chipver_tag);
+	return 0;
+}
+
+static int concate_revision_bcm4350(dhd_bus_t *bus,
+        char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+	uint32 chip_id, chip_ver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = {0, };
+#else
+	char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+	chip_id = bus->sih->chip;
+	chip_ver = bus->sih->chiprev;
+
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	if (chip_ver == 3)
+		strcat(chipver_tag, "_4354");
+	else
+		strcat(chipver_tag, "_4350");
+#endif
+
+	if (chip_ver == 3) {
+		DHD_ERROR(("----- CHIP 4354 A0 -----\n"));
+		strcat(chipver_tag, "_a0");
+	} else {
+		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
+	}
+
+	if (fw_path)
+		strcat(fw_path, chipver_tag);
+	if (nv_path)
+		strcat(nv_path, chipver_tag);
+	return 0;
+}
+
+static int concate_revision_bcm4354(dhd_bus_t *bus,
+        char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+	uint32 chip_id, chip_ver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = "_4354";
+#else
+	char chipver_tag[4] = {0, };
+#endif /* SUPPORT_MULTIPLE_CHIPS */
+
+	chip_id = bus->sih->chip;
+	chip_ver = bus->sih->chiprev;
+	if (chip_ver == 1) {
+		DHD_ERROR(("----- CHIP 4354 A1 -----\n"));
+		strcat(chipver_tag, "_a1");
+	} else {
+		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
+	}
+
+	if (fw_path)
+		strcat(fw_path, chipver_tag);
+	if (nv_path)
+		strcat(nv_path, chipver_tag);
+
+	return 0;
+}
+
+static int concate_revision_bcm4356(dhd_bus_t *bus,
+        char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+	uint32 chip_id, chip_ver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = "_4356";
+#else
+	char chipver_tag[4] = {0, };
+#endif /* SUPPORT_MULTIPLE_CHIPS */
+
+	chip_id = bus->sih->chip;
+	chip_ver = bus->sih->chiprev;
+	if (chip_ver == 0x2) {
+		DHD_ERROR(("----- CHIP 4356 A2 -----\n"));
+		strcat(chipver_tag, "_a2");
+	} else {
+		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
+	}
+
+	strcat(fw_path, chipver_tag);
+	strcat(nv_path, chipver_tag);
+
+	return 0;
+}
+
+static int concate_revision_bcm43341(dhd_bus_t *bus,
+        char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+	uint chipver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = "_4334x"; /* TODO: firmware name should be changed to 43341 */
+#else
+	char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+	DHD_TRACE(("%s: BCM43341 Multiple Revision Check\n", __FUNCTION__));
+	if (bus->sih->chip != BCM43341_CHIP_ID) {
+		DHD_ERROR(("%s:Chip is not BCM43341\n", __FUNCTION__));
+		return -1;
+	}
+	chipver = bus->sih->chiprev;
+	DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
+	if (chipver == 0x1) {
+		DHD_ERROR(("----- CHIP bcm43341_A0 -----\n"));
+		strcat(chipver_tag, "_a0");
+	} else if (chipver == 0x2) {
+		DHD_ERROR(("----- CHIP bcm43341_B0 -----\n"));
+		strcat(chipver_tag, "_b0");
+	} else {
+		DHD_ERROR(("----- CHIP bcm43341 unknown revision %d -----\n",
+			chipver));
+	}
+
+	strcat(fw_path, chipver_tag);
+	strcat(nv_path, chipver_tag);
+	return 0;
+}
+
+static int concate_revision_bcm43430(dhd_bus_t *bus,
+        char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+	uint chipver;
+#if defined(SUPPORT_MULTIPLE_CHIPS)
+	char chipver_tag[10] = "_43430";
+#else
+	char chipver_tag[4] = {0, };
+#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
+
+	DHD_TRACE(("%s: BCM43430 Multiple Revision Check\n", __FUNCTION__));
+	if (bus->sih->chip != BCM43430_CHIP_ID) {
+		DHD_ERROR(("%s:Chip is not BCM43430\n", __FUNCTION__));
+		return -1;
+	}
+	chipver = bus->sih->chiprev;
+	DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
+	if (chipver == 0x0) {
+		DHD_ERROR(("----- CHIP BCM43430_A0 -----\n"));
+		strcat(chipver_tag, "_a0");
+	} else {
+		DHD_ERROR(("----- CHIP BCM43430 unknown revision %d -----\n",
+			chipver));
+	}
+
+	strcat(fw_path, chipver_tag);
+	strcat(nv_path, chipver_tag);
+	return 0;
+}
+
+int
+concate_revision(dhd_bus_t *bus, char *fw_path, int fw_path_len, char *nv_path, int nv_path_len)
+{
+	int res = 0;
+
+	if (!bus || !bus->sih) {
+		DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
+		return -1;
+	}
+
+	if (!fw_path && !nv_path)
+		return res;
+
+	bcm_chip_id = bus->sih->chip;
+	switch (bus->sih->chip) {
+	case BCM4330_CHIP_ID:
+		res = concate_revision_bcm4330(bus);
+		break;
+	case BCM4334_CHIP_ID:
+		res =  concate_revision_bcm4334(bus, fw_path, fw_path_len,
+			nv_path, nv_path_len);
+		break;
+	case BCM4335_CHIP_ID:
+		res = concate_revision_bcm4335(bus, fw_path, fw_path_len,
+			nv_path, nv_path_len);
+		break;
+	case BCM4339_CHIP_ID:
+		res = concate_revision_bcm4339(bus, fw_path, fw_path_len,
+			nv_path, nv_path_len);
+		break;
+	case BCM43349_CHIP_ID:
+		res = concate_revision_bcm43349(bus, fw_path, fw_path_len,
+			nv_path, nv_path_len);
+		break;
+	case BCM4324_CHIP_ID:
+		res = concate_revision_bcm43241(bus, fw_path, fw_path_len, nv_path, nv_path_len);
+		break;
+	case BCM4350_CHIP_ID:
+		res = concate_revision_bcm4350(bus, fw_path, fw_path_len, nv_path, nv_path_len);
+		break;
+	case BCM4354_CHIP_ID:
+		res = concate_revision_bcm4354(bus, fw_path, fw_path_len, nv_path, nv_path_len);
+		break;
+	case BCM4356_CHIP_ID:
+		res = concate_revision_bcm4356(bus, fw_path, fw_path_len, nv_path, nv_path_len);
+		break;
+	case BCM43341_CHIP_ID:
+		res = concate_revision_bcm43341(bus, fw_path, fw_path_len, nv_path, nv_path_len);
+		break;
+	case BCM43430_CHIP_ID:
+		res = concate_revision_bcm43430(bus, fw_path, fw_path_len, nv_path, nv_path_len);
+		break;
+
+
+	default:
+		DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
+		return res;
+	}
+
+	if (res == 0) {
+	}
+	return res;
+}
+
+/* return chip ID */
+uint
+bcm_get_chip_id(void)
+{
+	return bcm_chip_id;
+}
+
+#endif /* SUPPORT_MULTIPLE_REVISION */
+
+void
+dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path)
+{
+	bus->fw_path = pfw_path;
+	bus->nv_path = pnv_path;
+}
+
+int
+dhd_enableOOB(dhd_pub_t *dhd, bool sleep)
+{
+	dhd_bus_t *bus = dhd->bus;
+	sdpcmd_regs_t *regs = bus->regs;
+	uint retries = 0;
+
+	if (sleep) {
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+		/* Tell device to start using OOB wakeup */
+		W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+		if (retries > retry_limit) {
+			DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+			return BCME_BUSY;
+		}
+		/* Turn off our contribution to the HT clock request */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	} else {
+		/* Make sure the controller has the bus up */
+		dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+		/* Send misc interrupt to indicate OOB not needed */
+		W_SDREG(0, &regs->tosbmailboxdata, retries);
+		if (retries <= retry_limit)
+			W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+
+		if (retries > retry_limit)
+			DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+
+		/* Make sure we have SD bus access */
+		dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+	}
+	return BCME_OK;
+}
+
+void
+dhd_bus_pktq_flush(dhd_pub_t *dhdp)
+{
+	dhd_bus_t *bus = dhdp->bus;
+	bool wlfc_enabled = FALSE;
+
+#ifdef PROP_TXSTATUS
+	wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+	if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+		 * when there is a newly coming packet from network stack.
+		 */
+		dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+		/* Clear the data packet queues */
+		pktq_flush(dhdp->osh, &bus->txq, TRUE, NULL, 0);
+	}
+}
+
+int
+dhd_sr_config(dhd_pub_t *dhd, bool on)
+{
+	dhd_bus_t *bus = dhd->bus;
+
+	if (!bus->_srenab)
+		return -1;
+
+	return dhdsdio_clk_devsleep_iovar(bus, on);
+}
+
+uint16
+dhd_get_chipid(dhd_pub_t *dhd)
+{
+	dhd_bus_t *bus = dhd->bus;
+
+	if (bus && bus->sih)
+		return (uint16)bus->sih->chip;
+	else
+		return 0;
+}
+
+#ifdef DEBUGGER
+uint32 dhd_sdio_reg_read(void *h, uint32 addr)
+{
+	uint32 rval;
+	struct dhd_bus *bus = (struct dhd_bus *) h;
+
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	rval = bcmsdh_reg_read(bus->sdh, addr, 4);
+
+	dhd_os_sdunlock(bus->dhd);
+
+	return rval;
+}
+
+void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val)
+{
+	struct dhd_bus *bus = (struct dhd_bus *) h;
+
+	dhd_os_sdlock(bus->dhd);
+
+	BUS_WAKE(bus);
+
+	dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+	bcmsdh_reg_write(bus->sdh, addr, 4, val);
+
+	dhd_os_sdunlock(bus->dhd);
+}
+#endif /* DEBUGGER */
diff --git a/drivers/staging/edison-bcm43340/dhd_wlfc.c b/drivers/staging/edison-bcm43340/dhd_wlfc.c
new file mode 100644
index 0000000..0e68bc3
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_wlfc.c
@@ -0,0 +1,3967 @@
+/*
+ * DHD PROP_TXSTATUS Module.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_wlfc.c 472502 2014-04-24 05:59:06Z $
+ *
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+
+/*
+ * wlfc naming and lock rules:
+ *
+ * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation.
+ * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed.
+ * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation.
+ *
+ */
+
+
+#ifdef PROP_TXSTATUS
+
+#define DHD_WLFC_QMON_COMPLETE(entry)
+
+#define LIMIT_BORROW
+
+
+static uint16
+_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq)
+{
+	uint16 seq;
+
+	if (!p) {
+		return 0xffff;
+	}
+
+	seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+	if (seq < current_seq) {
+		/* wrap around */
+		seq += 256;
+	}
+
+	return seq;
+}
+
+static void
+_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead,
+	uint8 current_seq, bool reOrder)
+{
+	struct pktq_prec *q;
+	uint16 seq, seq2;
+	void *p2, *p2_prev;
+
+	if (!p)
+		return;
+
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+	ASSERT(PKTLINK(p) == NULL);         /* queueing chains not allowed */
+
+	ASSERT(!pktq_full(pq));
+	ASSERT(!pktq_pfull(pq, prec));
+
+	q = &pq->q[prec];
+
+	PKTSETLINK(p, NULL);
+	if (q->head == NULL) {
+		/* empty queue */
+		q->head = p;
+		q->tail = p;
+	} else {
+		if (reOrder && (prec & 1)) {
+			seq = _dhd_wlfc_adjusted_seq(p, current_seq);
+			p2 = qHead ? q->head : q->tail;
+			seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+			if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) {
+				/* need reorder */
+				p2 = q->head;
+				p2_prev = NULL;
+				seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+				while (seq > seq2) {
+					p2_prev = p2;
+					p2 = PKTLINK(p2);
+					if (!p2) {
+						break;
+					}
+					seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+				}
+
+				if (p2_prev == NULL) {
+					/* insert head */
+					PKTSETLINK(p, q->head);
+					q->head = p;
+				} else if (p2 == NULL) {
+					/* insert tail */
+					PKTSETLINK(p2_prev, p);
+					q->tail = p;
+				} else {
+					/* insert after p2_prev */
+					PKTSETLINK(p, PKTLINK(p2_prev));
+					PKTSETLINK(p2_prev, p);
+				}
+				goto exit;
+			}
+		}
+
+		if (qHead) {
+			PKTSETLINK(p, q->head);
+			q->head = p;
+		} else {
+			PKTSETLINK(q->tail, p);
+			q->tail = p;
+		}
+	}
+
+exit:
+
+	q->len++;
+	pq->len++;
+
+	if (pq->hi_prec < prec)
+		pq->hi_prec = (uint8)prec;
+}
+
+/* Create a place to store all packet pointers submitted to the firmware until
+	a status comes back, suppress or otherwise.
+
+	hang-er: noun, a contrivance on which things are hung, as a hook.
+*/
+static void*
+_dhd_wlfc_hanger_create(osl_t *osh, int max_items)
+{
+	int i;
+	wlfc_hanger_t* hanger;
+
+	/* allow only up to a specific size for now */
+	ASSERT(max_items == WLFC_HANGER_MAXITEMS);
+
+	if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL)
+		return NULL;
+
+	memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
+	hanger->max_items = max_items;
+
+	for (i = 0; i < hanger->max_items; i++) {
+		hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+	}
+	return hanger;
+}
+
+static int
+_dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
+{
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items));
+		return BCME_OK;
+	}
+	return BCME_BADARG;
+}
+
+static uint16
+_dhd_wlfc_hanger_get_free_slot(void* hanger)
+{
+	uint32 i;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h) {
+		i = h->slot_pos + 1;
+		if (i == h->max_items) {
+			i = 0;
+		}
+		while (i != h->slot_pos) {
+			if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) {
+				h->slot_pos = i;
+				return (uint16)i;
+			}
+			i++;
+			if (i == h->max_items)
+				i = 0;
+		}
+		h->failed_slotfind++;
+	}
+	return WLFC_HANGER_MAXITEMS;
+}
+
+static int
+_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	*gen = 0xff;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+
+	if (h) {
+		if ((h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+			(h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+			*gen = h->items[slot_id].gen;
+		}
+		else {
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if (h && (slot_id < WLFC_HANGER_MAXITEMS)) {
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
+			h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
+			h->items[slot_id].pkt = pkt;
+			h->pushed++;
+		}
+		else {
+			h->failed_to_push++;
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, int remove_from_hanger)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+
+	if (h) {
+		if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) {
+			*pktout = h->items[slot_id].pkt;
+			if (remove_from_hanger) {
+				h->items[slot_id].state =
+					WLFC_HANGER_ITEM_STATE_FREE;
+				h->items[slot_id].pkt = NULL;
+				h->items[slot_id].gen = 0xff;
+				h->items[slot_id].identifier = 0;
+				h->popped++;
+			}
+		}
+		else {
+			h->failed_to_pop++;
+			rc = BCME_NOTFOUND;
+		}
+	}
+	else
+		rc = BCME_BADARG;
+	return rc;
+}
+
+static int
+_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen)
+{
+	int rc = BCME_OK;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	/* this packet was not pushed at the time it went to the firmware */
+	if (slot_id == WLFC_HANGER_MAXITEMS)
+		return BCME_NOTFOUND;
+	if (h) {
+		h->items[slot_id].gen = gen;
+		if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
+			h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED;
+		}
+		else
+			rc = BCME_BADARG;
+	}
+	else
+		rc = BCME_BADARG;
+
+	return rc;
+}
+
+static void
+_dhd_wlfc_hanger_waitevent_set(void* hanger, uint32 hslot, uint8 waitevent)
+{
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+	ASSERT(h && (hslot < (uint32) h->max_items));
+
+	h->items[hslot].waitevent = waitevent;
+}
+
+static uint8
+_dhd_wlfc_hanger_waitevent_decreturn(void* hanger, uint32 hslot)
+{
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+	ASSERT(h && (hslot < (uint32) h->max_items));
+
+	h->items[hslot].waitevent--;
+	return h->items[hslot].waitevent;
+}
+
+/* return true if the slot is only waiting for clean */
+static bool
+_dhd_wlfc_hanger_wait_clean(void* hanger, uint32 hslot)
+{
+	wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+
+	if ((hslot < (uint32) h->max_items) &&
+		(h->items[hslot].state == WLFC_HANGER_ITEM_STATE_WAIT_CLEAN)) {
+		/* the packet should be already freed by _dhd_wlfc_cleanup */
+		h->items[hslot].state = WLFC_HANGER_ITEM_STATE_FREE;
+		h->items[hslot].pkt = NULL;
+		h->items[hslot].gen = 0xff;
+		h->items[hslot].identifier = 0;
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+/* remove reference of specific packet in hanger */
+static bool
+_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt)
+{
+	int i;
+
+	if (!h || !pkt) {
+		return FALSE;
+	}
+
+	for (i = 0; i < h->max_items; i++) {
+		if (pkt == h->items[i].pkt) {
+			if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+				(h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+				h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+				h->items[i].pkt = NULL;
+				h->items[i].gen = 0xff;
+				h->items[i].identifier = 0;
+			}
+			return TRUE;
+		}
+	}
+
+	return FALSE;
+}
+
+
+static int
+_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p)
+{
+	wlfc_mac_descriptor_t* entry;
+	uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+	uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+
+	if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE)
+		entry  = &ctx->destination_entries.nodes[entry_idx];
+	else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+		entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE];
+	else
+		entry = &ctx->destination_entries.other;
+
+	pktq_penq(&entry->afq, prec, p);
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec,
+	void **pktout)
+{
+	wlfc_mac_descriptor_t *entry;
+	struct pktq *pq;
+	struct pktq_prec *q;
+	void *p, *b;
+
+	if (!ctx) {
+		DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout));
+		return BCME_BADARG;
+	}
+
+	if (pktout) {
+		*pktout = NULL;
+	}
+
+	ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1));
+
+	if (hslot < WLFC_MAC_DESC_TABLE_SIZE)
+		entry  = &ctx->destination_entries.nodes[hslot];
+	else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+		entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE];
+	else
+		entry = &ctx->destination_entries.other;
+
+	pq = &entry->afq;
+
+	ASSERT(prec < pq->num_prec);
+
+	q = &pq->q[prec];
+
+	b = NULL;
+	p = q->head;
+
+	while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)))))
+	{
+		b = p;
+		p = PKTLINK(p);
+	}
+
+	if (p == NULL) {
+		/* none is matched */
+		if (b) {
+			DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt));
+		} else {
+			DHD_ERROR(("%s: queue is empty\n", __FUNCTION__));
+		}
+
+		return BCME_ERROR;
+	}
+
+	if (!b) {
+		/* head packet is matched */
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		/* middle packet is matched */
+		DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt,
+			WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head)))));
+		ctx->stats.ooo_pkts[prec]++;
+		PKTSETLINK(b, PKTLINK(p));
+		if (PKTLINK(p) == NULL) {
+			q->tail = b;
+		}
+	}
+
+	q->len--;
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	if (pktout) {
+		*pktout = p;
+	}
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
+	uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr)
+{
+	uint32 wl_pktinfo = 0;
+	uint8* wlh;
+	uint8 dataOffset = 0;
+	uint8 fillers;
+	uint8 tim_signal_len = 0;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	struct bdc_header *h;
+
+	if (skip_wlfc_hdr)
+		goto push_bdc_hdr;
+
+	if (tim_signal) {
+		tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+	}
+
+	/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+	dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len;
+	if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+		dataOffset += WLFC_CTL_VALUE_LEN_SEQ;
+	}
+
+	fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+	dataOffset += fillers;
+
+	PKTPUSH(ctx->osh, p, dataOffset);
+	wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+	wl_pktinfo = htol32(htodtag);
+
+	wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG;
+	wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG;
+	memcpy(&wlh[TLV_HDR_LEN], &wl_pktinfo, sizeof(uint32));
+
+	if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+		uint16 wl_seqinfo = htol16(htodseq);
+		wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ;
+		memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo,
+			WLFC_CTL_VALUE_LEN_SEQ);
+	}
+
+	if (tim_signal_len) {
+		wlh[dataOffset - fillers - tim_signal_len ] =
+			WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 1] =
+			WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+		wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+		wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+	}
+	if (fillers)
+		memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+push_bdc_hdr:
+
+	PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
+	h = (struct bdc_header *)PKTDATA(ctx->osh, p);
+	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
+	if (PKTSUMNEEDED(p))
+		h->flags |= BDC_FLAG_SUM_NEEDED;
+
+
+	h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
+	h->flags2 = 0;
+	h->dataOffset = dataOffset >> 2;
+	BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
+{
+	struct bdc_header *h;
+
+	if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
+		return BCME_ERROR;
+	}
+	h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf);
+
+	/* pull BDC header */
+	PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
+
+	if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) {
+		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+		           PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2)));
+		return BCME_ERROR;
+	}
+
+	/* pull wl-header */
+	PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2));
+	return BCME_OK;
+}
+
+static wlfc_mac_descriptor_t*
+_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
+{
+	int i;
+	wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
+	uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
+	uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
+	wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p));
+	int iftype = ctx->destination_entries.interfaces[ifid].iftype;
+
+	/* saved one exists, return it */
+	if (entry)
+		return entry;
+
+	/* Multicast destination, STA and P2P clients get the interface entry.
+	 * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
+	 * have their own entry.
+	 */
+	if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) ||
+		iftype == WLC_E_IF_ROLE_P2P_CLIENT) &&
+		(ctx->destination_entries.interfaces[ifid].occupied)) {
+			entry = &ctx->destination_entries.interfaces[ifid];
+	}
+
+	if (entry && ETHER_ISMULTI(dstn)) {
+		DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+		return entry;
+	}
+
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (table[i].occupied) {
+			if (table[i].interface_id == ifid) {
+				if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) {
+					entry = &table[i];
+					break;
+				}
+			}
+		}
+	}
+
+	if (entry == NULL)
+		entry = &ctx->destination_entries.other;
+
+	DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+
+	return entry;
+}
+
+static int
+_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ)
+{
+	athost_wl_status_info_t* ctx;
+	void *pout = NULL;
+
+	ASSERT(dhdp && p);
+	ASSERT(prec >= 0 && prec <= WLFC_PSQ_PREC_COUNT);
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+		/* suppressed queue, need pop from hanger */
+		_dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG
+					(PKTTAG(p))), &pout, 1);
+		ASSERT(p == pout);
+	}
+
+	if (!(prec & 1)) {
+#ifdef DHDTCPACK_SUPPRESS
+		/* pkt in delayed q, so fake push BDC header for
+		 * dhd_tcpack_check_xmit() and dhd_txcomplete().
+		 */
+		_dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0, 0, 0, TRUE);
+
+		/* This packet is about to be freed, so remove it from tcp_ack_info_tbl
+		 * This must be one of...
+		 * 1. A pkt already in delayQ is evicted by another pkt with higher precedence
+		 * in _dhd_wlfc_prec_enq_with_drop()
+		 * 2. A pkt could not be enqueued to delayQ because it is full,
+		 * in _dhd_wlfc_enque_delayq().
+		 * 3. A pkt could not be enqueued to delayQ because it is full,
+		 * in _dhd_wlfc_rollback_packet_toq().
+		 */
+		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+				" Stop using it\n",
+				__FUNCTION__, __LINE__));
+			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+		}
+#endif /* DHDTCPACK_SUPPRESS */
+	}
+
+	if (bPktInQ) {
+		ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+		ctx->pkt_cnt_per_ac[prec>>1]--;
+	}
+
+	dhd_txcomplete(dhdp, p, FALSE);
+	PKTFREE(ctx->osh, p, TRUE);
+	ctx->stats.pktout++;
+	ctx->stats.drop_pkts[prec]++;
+
+	return 0;
+}
+
+static bool
+_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead,
+	uint8 current_seq)
+{
+	void *p = NULL;
+	int eprec = -1;		/* precedence to evict from */
+	athost_wl_status_info_t* ctx;
+
+	ASSERT(dhdp && pq && pkt);
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	/* Fast case, precedence queue is not full and we are also not
+	 * exceeding total queue length
+	 */
+	if (!pktq_pfull(pq, prec) && !pktq_full(pq)) {
+		goto exit;
+	}
+
+	/* Determine precedence from which to evict packet, if any */
+	if (pktq_pfull(pq, prec))
+		eprec = prec;
+	else if (pktq_full(pq)) {
+		p = pktq_peek_tail(pq, &eprec);
+		if (!p) {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+			return FALSE;
+		}
+		if ((eprec > prec) || (eprec < 0)) {
+			if (!pktq_pempty(pq, prec)) {
+				eprec = prec;
+			} else {
+				return FALSE;
+			}
+		}
+	}
+
+	/* Evict if needed */
+	if (eprec >= 0) {
+		/* Detect queueing to unconfigured precedence */
+		ASSERT(!pktq_pempty(pq, eprec));
+		/* Evict all fragmented frames */
+		dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop);
+	}
+
+exit:
+	/* Enqueue */
+	_dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq,
+		WLFC_GET_REORDERSUPP(dhdp->wlfc_mode));
+	ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++;
+	ctx->pkt_cnt_per_ac[prec>>1]++;
+
+	return TRUE;
+}
+
+
+static int
+_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
+	void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
+{
+	/*
+	put the packet back to the head of queue
+
+	- suppressed packet goes back to suppress sub-queue
+	- pull out the header, if new or delayed packet
+
+	Note: hslot is used only when header removal is done.
+	*/
+	wlfc_mac_descriptor_t* entry;
+	int rc = BCME_OK;
+	int prec, fifo_id;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+	fifo_id = prec << 1;
+	if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED)
+		fifo_id += 1;
+	if (entry != NULL) {
+		/*
+		if this packet did not count against FIFO credit, it must have
+		taken a requested_credit from the firmware (for pspoll etc.)
+		*/
+		if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p)))
+			entry->requested_credit++;
+
+		if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+			/* decrement sequence count */
+			WLFC_DECR_SEQCOUNT(entry, prec);
+			/* remove header first */
+			rc = _dhd_wlfc_pullheader(ctx, p);
+			if (rc != BCME_OK) {
+				DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+				goto exit;
+			}
+		}
+
+		if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE,
+			WLFC_SEQCOUNT(entry, fifo_id>>1))
+			== FALSE) {
+			/* enque failed */
+			DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n",
+				__FUNCTION__, __LINE__, fifo_id));
+			rc = BCME_ERROR;
+		}
+	} else {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		rc = BCME_ERROR;
+	}
+exit:
+	if (rc != BCME_OK) {
+		ctx->stats.rollback_failed++;
+		_dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE);
+	}
+	else
+		ctx->stats.rollback++;
+
+	return rc;
+}
+
+static bool
+_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid)
+{
+	int prec, ac_traffic = WLFC_NO_TRAFFIC;
+
+	for (prec = 0; prec < AC_COUNT; prec++) {
+		if (ctx->pkt_cnt_in_q[ifid][prec] > 0) {
+			if (ac_traffic == WLFC_NO_TRAFFIC)
+				ac_traffic = prec + 1;
+			else if (ac_traffic != (prec + 1))
+				ac_traffic = WLFC_MULTI_TRAFFIC;
+		}
+	}
+
+	if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) {
+		/* single AC (BE/BK/VI/VO) in queue */
+		if (ctx->allow_fc) {
+			return TRUE;
+		} else {
+			uint32 delta;
+			uint32 curr_t = OSL_SYSUPTIME();
+
+			if (ctx->fc_defer_timestamp == 0) {
+				/* first signle ac scenario */
+				ctx->fc_defer_timestamp = curr_t;
+				return FALSE;
+			}
+
+			/* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */
+			delta = curr_t - ctx->fc_defer_timestamp;
+			if (delta >= WLFC_FC_DEFER_PERIOD_MS) {
+				ctx->allow_fc = TRUE;
+			}
+		}
+	} else {
+		/* multiple ACs or BCMC in queue */
+		ctx->allow_fc = FALSE;
+		ctx->fc_defer_timestamp = 0;
+	}
+
+	return ctx->allow_fc;
+}
+
+static void
+_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
+{
+	dhd_pub_t *dhdp;
+
+	ASSERT(ctx);
+
+	dhdp = (dhd_pub_t *)ctx->dhdp;
+	ASSERT(dhdp);
+
+	if (dhdp->skip_fc && dhdp->skip_fc())
+		return;
+
+	if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id))
+		return;
+
+	if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+		/* start traffic */
+		ctx->hostif_flow_state[if_id] = OFF;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("F"));
+
+		dhd_txflowcontrol(dhdp, if_id, OFF);
+
+		ctx->toggle_host_if = 0;
+	}
+
+	if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
+		/* stop traffic */
+		ctx->hostif_flow_state[if_id] = ON;
+		/*
+		WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic   %s()\n",
+		pq->len, if_id, __FUNCTION__));
+		*/
+		WLFC_DBGMESG(("N"));
+
+		dhd_txflowcontrol(dhdp, if_id, ON);
+
+		ctx->host_ifidx = if_id;
+		ctx->toggle_host_if = 1;
+	}
+
+	return;
+}
+
+static int
+_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	uint8 ta_bmp)
+{
+	int rc = BCME_OK;
+	void* p = NULL;
+	int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	if (dhdp->proptxstatus_txoff) {
+		rc = BCME_NORESOURCE;
+		return rc;
+	}
+
+	/* allocate a dummy packet */
+	p = PKTGET(ctx->osh, dummylen, TRUE);
+	if (p) {
+		PKTPULL(ctx->osh, p, dummylen);
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
+		_dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE);
+		DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+		DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1);
+#ifdef PROP_TXSTATUS_DEBUG
+		ctx->stats.signal_only_pkts_sent++;
+#endif
+
+#if defined(BCMPCIE)
+		rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx);
+#else
+		rc = dhd_bus_txdata(dhdp->bus, p);
+#endif
+		if (rc != BCME_OK) {
+			_dhd_wlfc_pullheader(ctx, p);
+			PKTFREE(ctx->osh, p, TRUE);
+		}
+	}
+	else {
+		DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+		           __FUNCTION__, dummylen));
+		rc = BCME_NOMEM;
+	}
+	return rc;
+}
+
+/* Return TRUE if traffic availability changed */
+static bool
+_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	int prec)
+{
+	bool rc = FALSE;
+
+	if (entry->state == WLFC_STATE_CLOSE) {
+		if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
+			(pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
+
+			if (entry->traffic_pending_bmp & NBITVAL(prec)) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp & ~ NBITVAL(prec);
+			}
+		}
+		else {
+			if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
+				rc = TRUE;
+				entry->traffic_pending_bmp =
+					entry->traffic_pending_bmp | NBITVAL(prec);
+			}
+		}
+	}
+	if (rc) {
+		/* request a TIM update to firmware at the next piggyback opportunity */
+		if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
+			entry->send_tim_signal = 1;
+			_dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
+			entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+			entry->send_tim_signal = 0;
+		}
+		else {
+			rc = FALSE;
+		}
+	}
+	return rc;
+}
+
+static int
+_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
+{
+	wlfc_mac_descriptor_t* entry;
+
+	entry = _dhd_wlfc_find_table_entry(ctx, p);
+	if (entry == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_NOTFOUND;
+	}
+	/*
+	- suppressed packets go to sub_queue[2*prec + 1] AND
+	- delayed packets go to sub_queue[2*prec + 0] to ensure
+	order of delivery.
+	*/
+	if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE,
+		WLFC_SEQCOUNT(entry, prec))
+		== FALSE) {
+		ctx->stats.delayq_full_error++;
+		/* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
+		WLFC_DBGMESG(("s"));
+		return BCME_ERROR;
+	}
+
+	/* A packet has been pushed, update traffic availability bitmap, if applicable */
+	_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+	_dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, void* p, int header_needed, uint32* slot)
+{
+	int rc = BCME_OK;
+	int hslot = WLFC_HANGER_MAXITEMS;
+	bool send_tim_update = FALSE;
+	uint32 htod = 0;
+	uint16 htodseq = 0;
+	uint8 free_ctr;
+	int gen = 0xff;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	*slot = hslot;
+
+	if (entry == NULL) {
+		entry = _dhd_wlfc_find_table_entry(ctx, p);
+	}
+
+	if (entry == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_ERROR;
+	}
+
+	if (entry->send_tim_signal) {
+		send_tim_update = TRUE;
+		entry->send_tim_signal = 0;
+		entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
+	}
+
+	if (header_needed) {
+		if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+			hslot = (uint)(entry - &ctx->destination_entries.nodes[0]);
+		} else {
+			hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+		}
+		gen = entry->generation;
+		free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+	} else {
+		if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+			htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p));
+		}
+
+		hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+
+		if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) {
+			gen = entry->generation;
+		} else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+			gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		} else {
+			_dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen);
+		}
+
+		free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+		/* remove old header */
+		_dhd_wlfc_pullheader(ctx, p);
+	}
+
+	if (hslot >= WLFC_HANGER_MAXITEMS) {
+		DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__));
+		return BCME_ERROR;
+	}
+
+	WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
+	WL_TXSTATUS_SET_HSLOT(htod, hslot);
+	WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+	WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+	WL_TXSTATUS_SET_GENERATION(htod, gen);
+	DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+
+	if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+		/*
+		Indicate that this packet is being sent in response to an
+		explicit request from the firmware side.
+		*/
+		WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
+	} else {
+		WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
+	}
+
+	rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+		entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE);
+	if (rc == BCME_OK) {
+		DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+
+		if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && header_needed) {
+			/*
+			a new header was created for this packet.
+			push to hanger slot and scrub q. Since bus
+			send succeeded, increment seq number as well.
+			*/
+			rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+			if (rc == BCME_OK) {
+#ifdef PROP_TXSTATUS_DEBUG
+				((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time =
+					OSL_SYSUPTIME();
+#endif
+			} else {
+				DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n",
+					__FUNCTION__, rc));
+			}
+		}
+
+		if ((rc == BCME_OK) && header_needed) {
+			/* increment free running sequence count */
+			WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
+		}
+	}
+	*slot = hslot;
+	return rc;
+}
+
+static int
+_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx,
+	wlfc_mac_descriptor_t* entry, int prec)
+{
+	if (entry->interface_id >= WLFC_MAX_IFNUM) {
+		ASSERT(&ctx->destination_entries.other == entry);
+		return 1;
+	}
+	if (ctx->destination_entries.interfaces[entry->interface_id].iftype ==
+		WLC_E_IF_ROLE_P2P_GO) {
+		/* - destination interface is of type p2p GO.
+		For a p2pGO interface, if the destination is OPEN but the interface is
+		CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is
+		destination-specific-credit left send packets. This is because the
+		firmware storing the destination-specific-requested packet in queue.
+		*/
+		if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+			(entry->requested_packet == 0)) {
+			return 0;
+		}
+	}
+	/* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
+	if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
+		(entry->requested_packet == 0)) ||
+		(!(entry->ac_bitmap & (1 << prec)))) {
+		return 0;
+	}
+
+	return 1;
+}
+
+static void*
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec,
+	uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out,
+	bool only_no_credit)
+{
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+	wlfc_mac_descriptor_t* entry;
+	int total_entries;
+	void* p = NULL;
+	int i;
+
+	*entry_out = NULL;
+	/* most cases a packet will count against FIFO credit */
+	*ac_credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1;
+
+	/* search all entries, include nodes as well as interfaces */
+	if (only_no_credit) {
+		total_entries = ctx->requested_entry_count;
+	} else {
+		total_entries = ctx->active_entry_count;
+	}
+
+	for (i = 0; i < total_entries; i++) {
+		if (only_no_credit) {
+			entry = ctx->requested_entry[i];
+		} else {
+			entry = ctx->active_entry_head;
+			/* move head to ensure fair round-robin */
+			ctx->active_entry_head = ctx->active_entry_head->next;
+		}
+		ASSERT(entry);
+
+		if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) &&
+			(entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) &&
+			!(WLFC_GET_REORDERSUPP(dhdp->wlfc_mode) && entry->suppressed)) {
+			if (entry->state == WLFC_STATE_CLOSE) {
+				*ac_credit_spent = 0;
+			}
+
+			/* higher precedence will be picked up first,
+			 * i.e. suppressed packets before delayed ones
+			 */
+			p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec));
+			*needs_hdr = 0;
+			if (p == NULL) {
+				if (entry->suppressed == TRUE) {
+					/* skip this entry */
+					continue;
+				}
+				/* De-Q from delay Q */
+				p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec));
+				*needs_hdr = 1;
+			}
+
+			if (p != NULL) {
+				/* did the packet come from suppress sub-queue? */
+				if (entry->requested_credit > 0) {
+					entry->requested_credit--;
+#ifdef PROP_TXSTATUS_DEBUG
+					entry->dstncredit_sent_packets++;
+#endif
+				} else if (entry->requested_packet > 0) {
+					entry->requested_packet--;
+					DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
+				}
+
+				*entry_out = entry;
+				ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
+				ctx->pkt_cnt_per_ac[prec]--;
+				_dhd_wlfc_flow_control_check(ctx, &entry->psq,
+					DHD_PKTTAG_IF(PKTTAG(p)));
+				/*
+				A packet has been picked up, update traffic
+				availability bitmap, if applicable
+				*/
+				_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+				return p;
+			}
+		}
+	}
+	return NULL;
+}
+
+static int
+_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec)
+{
+	wlfc_mac_descriptor_t* entry;
+
+	if (pktbuf != NULL) {
+		entry = _dhd_wlfc_find_table_entry(ctx, pktbuf);
+		if (entry == NULL) {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+			return BCME_ERROR;
+		}
+
+		/*
+		- suppressed packets go to sub_queue[2*prec + 1] AND
+		- delayed packets go to sub_queue[2*prec + 0] to ensure
+		order of delivery.
+		*/
+		if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1),
+			FALSE, WLFC_SEQCOUNT(entry, prec))
+			== FALSE) {
+			WLFC_DBGMESG(("D"));
+			ctx->stats.delayq_full_error++;
+			return BCME_ERROR;
+		}
+
+
+		/*
+		A packet has been pushed, update traffic availability bitmap,
+		if applicable
+		*/
+		_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+	}
+
+	return BCME_OK;
+}
+
+static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid)
+{
+	if (!p || !p_ifid)
+		return FALSE;
+
+	return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p))));
+}
+
+static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry)
+{
+	if (!p || !entry)
+		return FALSE;
+
+	return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p))));
+}
+
+static void
+_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt)
+{
+	dhd_pub_t *dhdp;
+
+	if (!wlfc || !pkt) {
+		return;
+	}
+
+	dhdp = (dhd_pub_t *)(wlfc->dhdp);
+	if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) &&
+		DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+		int lender, credit_returned = 0;
+		uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt));
+
+		/* Note that borrower is fifo_id */
+		/* Return credits to highest priority lender first */
+		for (lender = AC_COUNT; lender >= 0; lender--) {
+			if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+				wlfc->FIFO_credit[lender]++;
+				wlfc->credits_borrowed[fifo_id][lender]--;
+				credit_returned = 1;
+				break;
+			}
+		}
+
+		if (!credit_returned) {
+			wlfc->FIFO_credit[fifo_id]++;
+		}
+	}
+}
+
+static void
+_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq,
+	bool dir, f_processpkt_t fn, void *arg, q_type_t q_type)
+{
+	int prec;
+	dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+	ASSERT(dhdp);
+
+	/* Optimize flush, if pktq len = 0, just return.
+	 * pktq len of 0 means pktq's prec q's are all empty.
+	 */
+	if (pq->len == 0) {
+		return;
+	}
+
+
+	for (prec = 0; prec < pq->num_prec; prec++) {
+		struct pktq_prec *q;
+		void *p, *prev = NULL;
+
+		q = &pq->q[prec];
+		p = q->head;
+		while (p) {
+			if (fn == NULL || (*fn)(p, arg)) {
+				bool head = (p == q->head);
+				if (head)
+					q->head = PKTLINK(p);
+				else
+					PKTSETLINK(prev, PKTLINK(p));
+				if (q_type == Q_TYPE_PSQ) {
+					if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+						_dhd_wlfc_hanger_remove_reference(ctx->hanger, p);
+					}
+					ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+					ctx->pkt_cnt_per_ac[prec>>1]--;
+					ctx->stats.cleanup_psq_cnt++;
+					if (!(prec & 1)) {
+						/* pkt in delayed q, so fake push BDC header for
+						 * dhd_tcpack_check_xmit() and dhd_txcomplete().
+						 */
+						_dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0,
+							0, 0, TRUE);
+#ifdef DHDTCPACK_SUPPRESS
+						if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+							DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+								" Stop using it\n",
+								__FUNCTION__, __LINE__));
+							dhd_tcpack_suppress_set(dhdp,
+								TCPACK_SUP_OFF);
+						}
+#endif /* DHDTCPACK_SUPPRESS */
+					}
+				} else if (q_type == Q_TYPE_AFQ) {
+					wlfc_mac_descriptor_t* entry =
+						_dhd_wlfc_find_table_entry(ctx, p);
+					entry->transit_count--;
+					if (entry->suppressed &&
+						(--entry->suppr_transit_count == 0)) {
+						entry->suppressed = FALSE;
+					}
+					_dhd_wlfc_return_implied_credit(ctx, p);
+					ctx->stats.cleanup_fw_cnt++;
+				}
+				PKTSETLINK(p, NULL);
+				dhd_txcomplete(dhdp, p, FALSE);
+				PKTFREE(ctx->osh, p, dir);
+				if (dir) {
+					ctx->stats.pktout++;
+				}
+				q->len--;
+				pq->len--;
+				p = (head ? q->head : PKTLINK(prev));
+			} else {
+				prev = p;
+				p = PKTLINK(p);
+			}
+		}
+
+		if (q->head == NULL) {
+			ASSERT(q->len == 0);
+			q->tail = NULL;
+		}
+
+	}
+
+	if (fn == NULL)
+		ASSERT(pq->len == 0);
+}
+
+static void*
+_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg)
+{
+	struct pktq_prec *q;
+	void *p, *prev = NULL;
+
+	ASSERT(prec >= 0 && prec < pq->num_prec);
+
+	q = &pq->q[prec];
+	p = q->head;
+
+	while (p) {
+		if (fn == NULL || (*fn)(p, arg)) {
+			break;
+		} else {
+			prev = p;
+			p = PKTLINK(p);
+		}
+	}
+	if (p == NULL)
+		return NULL;
+
+	if (prev == NULL) {
+		if ((q->head = PKTLINK(p)) == NULL) {
+			q->tail = NULL;
+		}
+	} else {
+		PKTSETLINK(prev, PKTLINK(p));
+		if (q->tail == p) {
+			q->tail = prev;
+		}
+	}
+
+	q->len--;
+
+	pq->len--;
+
+	PKTSETLINK(p, NULL);
+
+	return p;
+}
+
+static void
+_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	int prec;
+	void *pkt = NULL, *head = NULL, *tail = NULL;
+	struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+	wlfc_mac_descriptor_t* entry;
+
+	dhd_os_sdlock_txq(dhd);
+	for (prec = 0; prec < txq->num_prec; prec++) {
+		while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+#ifdef DHDTCPACK_SUPPRESS
+			if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) {
+				DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+					__FUNCTION__, __LINE__));
+				dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF);
+			}
+#endif /* DHDTCPACK_SUPPRESS */
+			if (!head) {
+				head = pkt;
+			}
+			if (tail) {
+				PKTSETLINK(tail, pkt);
+			}
+			tail = pkt;
+		}
+	}
+	dhd_os_sdunlock_txq(dhd);
+
+
+	while ((pkt = head)) {
+		head = PKTLINK(pkt);
+		PKTSETLINK(pkt, NULL);
+		entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode) &&
+			!_dhd_wlfc_hanger_remove_reference(h, pkt)) {
+			DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n",
+				__FUNCTION__, pkt));
+		}
+		entry->transit_count--;
+		if (entry->suppressed &&
+			(--entry->suppr_transit_count == 0)) {
+			entry->suppressed = FALSE;
+		}
+		_dhd_wlfc_return_implied_credit(wlfc, pkt);
+		dhd_txcomplete(dhd, pkt, FALSE);
+		PKTFREE(wlfc->osh, pkt, TRUE);
+		wlfc->stats.pktout++;
+		wlfc->stats.cleanup_txq_cnt++;
+
+	}
+}
+
+void
+_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	int i;
+	int total_entries;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+
+	wlfc->stats.cleanup_txq_cnt = 0;
+	wlfc->stats.cleanup_psq_cnt = 0;
+	wlfc->stats.cleanup_fw_cnt = 0;
+	/*
+	*  flush sequence shoulde be txq -> psq -> hanger/afq, hanger has to be last one
+	*/
+	/* flush bus->txq */
+	_dhd_wlfc_cleanup_txq(dhd, fn, arg);
+
+
+	/* flush psq, search all entries, include nodes as well as interfaces */
+	total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+	table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+	for (i = 0; i < total_entries; i++) {
+		if (table[i].occupied) {
+			/* release packets held in PSQ (both delayed and suppressed) */
+			if (table[i].psq.len) {
+				WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n",
+					__FUNCTION__, i, table[i].psq.len));
+				_dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE,
+					fn, arg, Q_TYPE_PSQ);
+			}
+
+			/* free packets held in AFQ */
+			if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.len)) {
+				_dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE,
+					fn, arg, Q_TYPE_AFQ);
+			}
+
+			if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) {
+				table[i].occupied = 0;
+				if (table[i].transit_count || table[i].suppr_transit_count) {
+					DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n",
+						__FUNCTION__, i,
+						table[i].transit_count,
+						table[i].suppr_transit_count));
+				}
+			}
+		}
+	}
+
+	/*
+		. flush remained pkt in hanger queue, not in bus->txq nor psq.
+		. the remained pkt was successfully downloaded to dongle already.
+		. hanger slot state cannot be set to free until receive txstatus update.
+	*/
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		for (i = 0; i < h->max_items; i++) {
+			if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+				(h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+				if (fn == NULL || (*fn)(h->items[i].pkt, arg)) {
+					table = _dhd_wlfc_find_table_entry(wlfc, h->items[i].pkt);
+					table->transit_count--;
+					if (table->suppressed &&
+						(--table->suppr_transit_count == 0)) {
+						table->suppressed = FALSE;
+					}
+					_dhd_wlfc_return_implied_credit(wlfc, h->items[i].pkt);
+					dhd_txcomplete(dhd, h->items[i].pkt, FALSE);
+					PKTFREE(wlfc->osh, h->items[i].pkt, TRUE);
+					wlfc->stats.pktout++;
+					wlfc->stats.cleanup_fw_cnt++;
+					h->items[i].state = WLFC_HANGER_ITEM_STATE_WAIT_CLEAN;
+				}
+			}
+		}
+	}
+
+	return;
+}
+
+static int
+_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
+	uint8 action, uint8 ifid, uint8 iftype, uint8* ea,
+	f_processpkt_t fn, void *arg)
+{
+	int rc = BCME_OK;
+
+
+	if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) {
+		entry->occupied = 1;
+		entry->state = WLFC_STATE_OPEN;
+		entry->requested_credit = 0;
+		entry->interface_id = ifid;
+		entry->iftype = iftype;
+		entry->ac_bitmap = 0xff; /* update this when handling APSD */
+		/* for an interface entry we may not care about the MAC address */
+		if (ea != NULL)
+			memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
+
+		if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+			dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+			pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+			if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+				pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN);
+			}
+
+			if (entry->next == NULL) {
+				/* not linked to anywhere, add to tail */
+				if (ctx->active_entry_head) {
+					entry->prev = ctx->active_entry_head->prev;
+					ctx->active_entry_head->prev->next = entry;
+					ctx->active_entry_head->prev = entry;
+					entry->next = ctx->active_entry_head;
+
+				} else {
+					ASSERT(ctx->active_entry_count == 0);
+					entry->prev = entry->next = entry;
+					ctx->active_entry_head = entry;
+				}
+				ctx->active_entry_count++;
+			} else {
+				DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__,
+					(int)(entry - &ctx->destination_entries.nodes[0])));
+			}
+		}
+	} else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+		/* When the entry is deleted, the packets that are queued in the entry must be
+		   cleanup. The cleanup action should be before the occupied is set as 0.
+		*/
+		_dhd_wlfc_cleanup(ctx->dhdp, fn, arg);
+		_dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid);
+
+		entry->occupied = 0;
+		entry->suppressed = 0;
+		entry->state = WLFC_STATE_CLOSE;
+		entry->requested_credit = 0;
+		entry->transit_count = 0;
+		entry->suppr_transit_count = 0;
+		memset(&entry->ea[0], 0, ETHER_ADDR_LEN);
+
+		if (entry->next) {
+			/* not floating, remove from Q */
+			if (ctx->active_entry_count <= 1) {
+				/* last item */
+				ctx->active_entry_head = NULL;
+				ctx->active_entry_count = 0;
+			} else {
+				entry->prev->next = entry->next;
+				entry->next->prev = entry->prev;
+				if (entry == ctx->active_entry_head) {
+					ctx->active_entry_head = entry->next;
+				}
+				ctx->active_entry_count--;
+			}
+			entry->next = entry->prev = NULL;
+		} else {
+			DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		}
+	}
+	return rc;
+}
+
+#ifdef LIMIT_BORROW
+static int
+_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac)
+{
+	int lender_ac;
+	int rc = -1;
+
+	if (ctx == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return -1;
+	}
+
+	/* Borrow from lowest priority available AC (including BC/MC credits) */
+	for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) {
+		if (ctx->FIFO_credit[lender_ac] > 0) {
+			ctx->credits_borrowed[borrower_ac][lender_ac]++;
+			ctx->FIFO_credit[lender_ac]--;
+			rc = lender_ac;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac)
+{
+	if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) ||
+		(borrower_ac < 0) || (borrower_ac > AC_COUNT)) {
+		DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n",
+			__FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac));
+
+		return BCME_BADARG;
+	}
+
+	ctx->credits_borrowed[borrower_ac][lender_ac]--;
+	ctx->FIFO_credit[lender_ac]++;
+
+	return BCME_OK;
+}
+#endif /* LIMIT_BORROW */
+
+static int
+_dhd_wlfc_interface_entry_update(void* state,
+	uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	wlfc_mac_descriptor_t* entry;
+
+	if (ifid >= WLFC_MAX_IFNUM)
+		return BCME_BADARG;
+
+	entry = &ctx->destination_entries.interfaces[ifid];
+
+	return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea,
+		_dhd_wlfc_ifpkt_fn, &ifid);
+}
+
+static int
+_dhd_wlfc_BCMCCredit_support_update(void* state)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+	ctx->bcmc_credit_supported = TRUE;
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+{
+	athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+	int i;
+
+	for (i = 0; i <= 4; i++) {
+		if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) {
+			DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n",
+				__FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i]));
+		}
+	}
+
+	/* update the AC FIFO credit map */
+	ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]);
+	ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]);
+	ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]);
+	ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]);
+	ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]);
+
+	ctx->Init_FIFO_credit[0] = credits[0];
+	ctx->Init_FIFO_credit[1] = credits[1];
+	ctx->Init_FIFO_credit[2] = credits[2];
+	ctx->Init_FIFO_credit[3] = credits[3];
+	ctx->Init_FIFO_credit[4] = credits[4];
+
+	/* credit for ATIM FIFO is not used yet. */
+	ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0;
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
+    dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
+{
+	uint32 hslot;
+	int	rc;
+	dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+
+	/*
+		if ac_fifo_credit_spent = 0
+
+		This packet will not count against the FIFO credit.
+		To ensure the txstatus corresponding to this packet
+		does not provide an implied credit (default behavior)
+		mark the packet accordingly.
+
+		if ac_fifo_credit_spent = 1
+
+		This is a normal packet and it counts against the FIFO
+		credit count.
+	*/
+	DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent);
+	rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p,
+	     commit_info->needs_hdr, &hslot);
+
+	if (rc == BCME_OK) {
+		DHD_PKTTAG_WLFCPKT_SET(PKTTAG(commit_info->p), 1);
+		rc = fcommit(commit_ctx, commit_info->p);
+		if (rc == BCME_OK) {
+			uint8 gen = WL_TXSTATUS_GET_GENERATION(
+				DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p)));
+			if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+				_dhd_wlfc_hanger_waitevent_set(ctx->hanger, hslot,
+					WLFC_HANGER_ITEM_WAIT_EVENT_COUNT);
+			}
+			ctx->stats.pkt2bus++;
+			if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) {
+				ctx->stats.send_pkts[ac]++;
+				WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
+			}
+
+			if (gen != commit_info->mac_entry->generation) {
+				/* will be suppressed back by design */
+				if (!commit_info->mac_entry->suppressed) {
+					commit_info->mac_entry->suppressed = TRUE;
+				}
+				commit_info->mac_entry->suppr_transit_count++;
+			}
+			commit_info->mac_entry->transit_count++;
+		} else if (commit_info->needs_hdr) {
+			if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+				void *pout = NULL;
+				/* pop hanger for delayed packet */
+				_dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(
+					DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, 1);
+				ASSERT(commit_info->p == pout);
+			}
+		}
+	} else {
+		ctx->stats.generic_error++;
+	}
+
+	if (rc != BCME_OK) {
+		/*
+		   pretx pkt process or bus commit has failed, rollback.
+		   - remove wl-header for a delayed packet
+		   - save wl-header header for suppressed packets
+		   - reset credit check flag
+		*/
+		_dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot);
+		DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0);
+	}
+
+	return rc;
+}
+
+static uint8
+_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
+{
+	wlfc_mac_descriptor_t* table =
+		((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
+	uint8 table_index;
+
+	if (ea != NULL) {
+		for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) {
+			if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) &&
+				table[table_index].occupied)
+				return table_index;
+		}
+	}
+	return WLFC_MAC_DESC_ID_INVALID;
+}
+
+static int
+_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac)
+{
+	uint8 status_flag;
+	uint32 status;
+	int ret = BCME_OK;
+	int remove_from_hanger = 1;
+	void* pktbuf = NULL;
+	uint8 fifo_id = 0, gen = 0, count = 0, hcnt;
+	uint16 hslot;
+	wlfc_mac_descriptor_t* entry = NULL;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	uint16 seq = 0, seq_fromfw = 0, seq_num = 0;
+
+	memcpy(&status, pkt_info, sizeof(uint32));
+	status_flag = WL_TXSTATUS_GET_FLAGS(status);
+	hcnt = WL_TXSTATUS_GET_FREERUNCTR(status);
+	hslot = WL_TXSTATUS_GET_HSLOT(status);
+	fifo_id = WL_TXSTATUS_GET_FIFO(status);
+	gen = WL_TXSTATUS_GET_GENERATION(status);
+
+	if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+		memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ);
+		seq_fromfw = WL_SEQ_GET_FROMFW(seq);
+		seq_num = WL_SEQ_GET_NUM(seq);
+	}
+
+	wlfc->stats.txstatus_in += len;
+
+	if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
+		wlfc->stats.pkt_freed += len;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) {
+		wlfc->stats.pkt_freed += len;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+		wlfc->stats.d11_suppress += len;
+		remove_from_hanger = 0;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+		wlfc->stats.wl_suppress += len;
+		remove_from_hanger = 0;
+	}
+
+	else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+		wlfc->stats.wlc_tossed_pkts += len;
+	}
+
+	if (dhd->proptxstatus_txstatus_ignore) {
+		if (!remove_from_hanger) {
+			DHD_ERROR(("suppress txstatus: %d\n", status_flag));
+		}
+		return BCME_OK;
+	}
+
+	while (count < len) {
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf);
+		} else {
+			if (_dhd_wlfc_hanger_wait_clean(wlfc->hanger, hslot)) {
+				goto cont;
+			}
+
+			ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, 0);
+		}
+
+		if ((ret != BCME_OK) || !pktbuf) {
+			goto cont;
+		}
+
+		/* set fifo_id to correct value because not all FW does that */
+		fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
+		entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
+
+		if (!remove_from_hanger) {
+			/* this packet was suppressed */
+			if (!entry->suppressed || (entry->generation != gen)) {
+				if (!entry->suppressed) {
+					entry->suppr_transit_count = entry->transit_count;
+					if (p_mac) {
+						*p_mac = entry;
+					}
+				} else {
+					DHD_ERROR(("gen(%d), entry->generation(%d)\n",
+						gen, entry->generation));
+				}
+				entry->suppressed = TRUE;
+
+			}
+			entry->generation = gen;
+		}
+
+#ifdef PROP_TXSTATUS_DEBUG
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode))
+		{
+			uint32 new_t = OSL_SYSUPTIME();
+			uint32 old_t;
+			uint32 delta;
+			old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time;
+
+
+			wlfc->stats.latency_sample_count++;
+			if (new_t > old_t)
+				delta = new_t - old_t;
+			else
+				delta = 0xffffffff + new_t - old_t;
+			wlfc->stats.total_status_latency += delta;
+			wlfc->stats.latency_most_recent = delta;
+
+			wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
+			if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
+				wlfc->stats.idx_delta = 0;
+		}
+#endif /* PROP_TXSTATUS_DEBUG */
+
+		/* pick up the implicit credit from this packet */
+		if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
+			_dhd_wlfc_return_implied_credit(wlfc, pktbuf);
+		} else {
+			/*
+			if this packet did not count against FIFO credit, it must have
+			taken a requested_credit from the destination entry (for pspoll etc.)
+			*/
+			if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
+				entry->requested_credit++;
+#ifdef PROP_TXSTATUS_DEBUG
+			entry->dstncredit_acks++;
+#endif
+		}
+
+		if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
+			(status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+			/* save generation bit inside packet */
+			WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen);
+
+			if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+				WL_SEQ_SET_FROMDRV(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw);
+				WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num);
+			}
+
+			ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
+			if (ret != BCME_OK) {
+				/* delay q is full, drop this packet */
+				DHD_WLFC_QMON_COMPLETE(entry);
+				_dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE);
+			} else {
+				if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+					/* Mark suppressed to avoid a double free
+					during wlfc cleanup
+					*/
+					_dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen);
+				}
+			}
+		} else {
+			uint8 waitevent = 0;
+			void *pktbuf_tmp = NULL;
+			dhd_txcomplete(dhd, pktbuf, TRUE);
+
+			DHD_WLFC_QMON_COMPLETE(entry);
+			wlfc->stats.pktout++;
+
+			if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+				waitevent = _dhd_wlfc_hanger_waitevent_decreturn(wlfc->hanger,
+				        hslot);
+				if (!waitevent) {
+					ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger,
+					        hslot, &pktbuf_tmp, 1);
+					ASSERT((ret == BCME_OK) && (pktbuf == pktbuf_tmp));
+				}
+			}
+			if (!waitevent) {
+				/* free the packet */
+				PKTFREE(wlfc->osh, pktbuf, TRUE);
+			}
+		}
+		/* pkt back from firmware side */
+		entry->transit_count--;
+		if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
+			entry->suppressed = FALSE;
+		}
+
+cont:
+		hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK;
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK;
+		}
+
+		if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) {
+			seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK;
+		}
+
+		count++;
+	}
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+	int i;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
+
+		/* update FIFO credits */
+		if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+		{
+			int lender; /* Note that borrower is i */
+
+			/* Return credits to highest priority lender first */
+			for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) {
+				if (wlfc->credits_borrowed[i][lender] > 0) {
+					if (credits[i] >= wlfc->credits_borrowed[i][lender]) {
+						credits[i] -=
+							(uint8)wlfc->credits_borrowed[i][lender];
+						wlfc->FIFO_credit[lender] +=
+						    wlfc->credits_borrowed[i][lender];
+						wlfc->credits_borrowed[i][lender] = 0;
+					}
+					else {
+						wlfc->credits_borrowed[i][lender] -= credits[i];
+						wlfc->FIFO_credit[lender] += credits[i];
+						credits[i] = 0;
+					}
+				}
+			}
+
+			/* If we have more credits left over, these must belong to the AC */
+			if (credits[i] > 0) {
+				wlfc->FIFO_credit[i] += credits[i];
+			}
+
+			if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) {
+				wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i];
+			}
+		}
+	}
+
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* entry;
+	int prec;
+	void *pkt = NULL, *head = NULL, *tail = NULL;
+	struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+	uint8	results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ];
+	uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0};
+	uint32 htod = 0;
+	uint16 htodseq = 0;
+	bool bCreditUpdate = FALSE;
+
+	dhd_os_sdlock_txq(dhd);
+	for (prec = 0; prec < txq->num_prec; prec++) {
+		while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+			if (!head) {
+				head = pkt;
+			}
+			if (tail) {
+				PKTSETLINK(tail, pkt);
+			}
+			tail = pkt;
+		}
+	}
+	dhd_os_sdunlock_txq(dhd);
+
+	while ((pkt = head)) {
+		head = PKTLINK(pkt);
+		PKTSETLINK(pkt, NULL);
+
+		entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+		/* fake a suppression txstatus */
+		htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt));
+		WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS);
+		WL_TXSTATUS_SET_GENERATION(htod, entry->generation);
+		memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS);
+		if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+			htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt));
+			if (WL_SEQ_GET_FROMDRV(htodseq)) {
+				WL_SEQ_SET_FROMFW(htodseq, 1);
+				WL_SEQ_SET_FROMDRV(htodseq, 0);
+			}
+			memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq,
+				WLFC_CTL_VALUE_LEN_SEQ);
+		}
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_enque_afq(wlfc, pkt);
+		}
+		_dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL);
+
+		/* fake a fifo credit back */
+		if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+			credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++;
+			bCreditUpdate = TRUE;
+		}
+	}
+
+	if (bCreditUpdate) {
+		_dhd_wlfc_fifocreditback_indicate(dhd, credits);
+	}
+}
+
+
+static int
+_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value)
+{
+	uint32 timestamp;
+
+	(void)dhd;
+
+	bcopy(&value[2], &timestamp, sizeof(uint32));
+	DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp));
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+{
+	(void)dhd;
+	(void)rssi;
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+	int i;
+
+	if (!wlfc || !entry) {
+		return;
+	}
+
+	for (i = 0; i < wlfc->requested_entry_count; i++) {
+		if (entry == wlfc->requested_entry[i]) {
+			break;
+		}
+	}
+
+	if (i == wlfc->requested_entry_count) {
+		/* no match entry found */
+		ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1));
+		wlfc->requested_entry[wlfc->requested_entry_count++] = entry;
+	}
+}
+
+static void
+_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+	int i;
+
+	if (!wlfc || !entry) {
+		return;
+	}
+
+	for (i = 0; i < wlfc->requested_entry_count; i++) {
+		if (entry == wlfc->requested_entry[i]) {
+			break;
+		}
+	}
+
+	if (i < wlfc->requested_entry_count) {
+		/* found */
+		ASSERT(wlfc->requested_entry_count > 0);
+		wlfc->requested_entry_count--;
+		if (i != wlfc->requested_entry_count) {
+			wlfc->requested_entry[i] =
+				wlfc->requested_entry[wlfc->requested_entry_count];
+		}
+		wlfc->requested_entry[wlfc->requested_entry_count] = NULL;
+	}
+}
+
+static int
+_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	int rc;
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 existing_index;
+	uint8 table_index;
+	uint8 ifid;
+	uint8* ea;
+
+	WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n",
+		__FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7],
+		((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
+		WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
+
+	table = wlfc->destination_entries.nodes;
+	table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]);
+	ifid = value[1];
+	ea = &value[2];
+
+	_dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]);
+	if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
+		existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+		if ((existing_index != WLFC_MAC_DESC_ID_INVALID) &&
+			(existing_index != table_index) && table[existing_index].occupied) {
+			/*
+			there is an existing different entry, free the old one
+			and move it to new index if necessary.
+			*/
+			rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index],
+				eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id,
+				table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn,
+				&table[existing_index]);
+		}
+
+		if (!table[table_index].occupied) {
+			/* this new MAC entry does not exist, create one */
+			table[table_index].mac_handle = value[0];
+			rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+				eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+				wlfc->destination_entries.interfaces[ifid].iftype,
+				ea, NULL, NULL);
+		} else {
+			/* the space should have been empty, but it's not */
+			wlfc->stats.mac_update_failed++;
+		}
+	}
+
+	if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
+		if (table[table_index].occupied) {
+				rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+					eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
+					wlfc->destination_entries.interfaces[ifid].iftype,
+					ea, _dhd_wlfc_entrypkt_fn, &table[table_index]);
+		} else {
+			/* the space should have been occupied, but it's not */
+			wlfc->stats.mac_update_failed++;
+		}
+	}
+	BCM_REFERENCE(rc);
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle = value[0];
+	int i;
+
+	table = wlfc->destination_entries.nodes;
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		/* a fresh PS mode should wipe old ps credits? */
+		desc->requested_credit = 0;
+		if (type == WLFC_CTL_TYPE_MAC_OPEN) {
+			desc->state = WLFC_STATE_OPEN;
+			desc->ac_bitmap = 0xff;
+			DHD_WLFC_CTRINC_MAC_OPEN(desc);
+			_dhd_wlfc_remove_requested_entry(wlfc, desc);
+		}
+		else {
+			desc->state = WLFC_STATE_CLOSE;
+			DHD_WLFC_CTRINC_MAC_CLOSE(desc);
+			/*
+			Indicate to firmware if there is any traffic pending.
+			*/
+			for (i = 0; i < AC_COUNT; i++) {
+				_dhd_wlfc_traffic_pending_check(wlfc, desc, i);
+			}
+		}
+	}
+	else {
+		wlfc->stats.psmode_update_failed++;
+	}
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+{
+	/* Handle PS on/off indication */
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	uint8 if_id = value[0];
+
+	if (if_id < WLFC_MAX_IFNUM) {
+		table = wlfc->destination_entries.interfaces;
+		if (table[if_id].occupied) {
+			if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
+				table[if_id].state = WLFC_STATE_OPEN;
+				/* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
+			}
+			else {
+				table[if_id].state = WLFC_STATE_CLOSE;
+				/* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
+			}
+			return BCME_OK;
+		}
+	}
+	wlfc->stats.interface_update_failed++;
+
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 credit;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	credit = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_credit = credit;
+
+		desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+		_dhd_wlfc_add_requested_entry(wlfc, desc);
+	}
+	else {
+		wlfc->stats.credit_request_failed++;
+	}
+	return BCME_OK;
+}
+
+static int
+_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+{
+	athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	wlfc_mac_descriptor_t* table;
+	wlfc_mac_descriptor_t* desc;
+	uint8 mac_handle;
+	uint8 packet_count;
+
+	table = wlfc->destination_entries.nodes;
+	mac_handle = value[1];
+	packet_count = value[0];
+
+	desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
+	if (desc->occupied) {
+		desc->requested_packet = packet_count;
+
+		desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+		_dhd_wlfc_add_requested_entry(wlfc, desc);
+	}
+	else {
+		wlfc->stats.packet_request_failed++;
+	}
+	return BCME_OK;
+}
+
+static void
+_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
+{
+	if (info_len) {
+		if (info_buf) {
+			bcopy(val, info_buf, len);
+			*info_len = len;
+		}
+		else
+			*info_len = 0;
+	}
+}
+
+/*
+ * public functions
+ */
+
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd)
+{
+	bool rc = TRUE;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return FALSE;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		rc =  FALSE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+
+int dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+	int i, rc = BCME_OK;
+	athost_wl_status_info_t* wlfc;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_enabled || dhd->wlfc_state) {
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	/* allocate space to track txstatus propagated from firmware */
+	dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
+	if (dhd->wlfc_state == NULL) {
+		rc = BCME_NOMEM;
+		goto exit;
+	}
+
+	/* initialize state space */
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+	/* remember osh & dhdp */
+	wlfc->osh = dhd->osh;
+	wlfc->dhdp = dhd;
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		wlfc->hanger = _dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
+		if (wlfc->hanger == NULL) {
+			MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+			dhd->wlfc_state = NULL;
+			rc = BCME_NOMEM;
+			goto exit;
+		}
+	}
+
+	dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+
+	/* initialize all interfaces to accept traffic */
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		wlfc->hostif_flow_state[i] = OFF;
+	}
+
+	_dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other,
+		eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL);
+
+	wlfc->allow_credit_borrow = 0;
+	wlfc->single_ac = 0;
+	wlfc->single_ac_timestamp = 0;
+
+
+exit:
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+
+int
+dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf,
+	uint *reorder_info_len)
+{
+	uint8 type, len;
+	uint8* value;
+	uint8* tmpbuf;
+	uint16 remainder = (uint16)tlv_hdr_len;
+	uint16 processed = 0;
+	athost_wl_status_info_t* wlfc = NULL;
+	void* entry;
+
+	if ((dhd == NULL) || (pktbuf == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) {
+		if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+			dhd_os_wlfc_unblock(dhd);
+			return WLFC_UNSUPPORTED;
+		}
+		wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	}
+
+	tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+
+	if (remainder) {
+		while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
+			type = tmpbuf[processed];
+			if (type == WLFC_CTL_TYPE_FILLER) {
+				remainder -= 1;
+				processed += 1;
+				continue;
+			}
+
+			len  = tmpbuf[processed + 1];
+			value = &tmpbuf[processed + 2];
+
+			if (remainder < (2 + len))
+				break;
+
+			remainder -= 2 + len;
+			processed += 2 + len;
+			entry = NULL;
+
+			DHD_INFO(("%s():%d type %d remainder %d processed %d\n",
+				__FUNCTION__, __LINE__, type, remainder, processed));
+
+			if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS)
+				_dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf,
+					reorder_info_len);
+
+			if (wlfc == NULL) {
+				ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER);
+
+				if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS &&
+					type != WLFC_CTL_TYPE_TRANS_ID)
+					DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!"
+					" type %d remainder %d processed %d\n",
+					__FUNCTION__, __LINE__, type, remainder, processed));
+				continue;
+			}
+
+			if (type == WLFC_CTL_TYPE_TXSTATUS) {
+				_dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry);
+			}
+			else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) {
+				uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS;
+
+				if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+					compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ;
+				}
+				_dhd_wlfc_compressed_txstatus_update(dhd, value,
+					value[compcnt_offset], &entry);
+			}
+			else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK)
+				_dhd_wlfc_fifocreditback_indicate(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_RSSI)
+				_dhd_wlfc_rssi_indicate(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT)
+				_dhd_wlfc_credit_request(dhd, value);
+
+			else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET)
+				_dhd_wlfc_packet_request(dhd, value);
+
+			else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+				(type == WLFC_CTL_TYPE_MAC_CLOSE))
+				_dhd_wlfc_psmode_update(dhd, value, type);
+
+			else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+				(type == WLFC_CTL_TYPE_MACDESC_DEL))
+				_dhd_wlfc_mac_table_update(dhd, value, type);
+
+			else if (type == WLFC_CTL_TYPE_TRANS_ID)
+				_dhd_wlfc_dbg_senum_check(dhd, value);
+
+			else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+				(type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
+				_dhd_wlfc_interface_update(dhd, value, type);
+			}
+
+			if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) {
+				/* suppress all packets for this mac entry from bus->txq */
+				_dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry);
+			}
+		}
+		if (remainder != 0 && wlfc) {
+			/* trouble..., something is not right */
+			wlfc->stats.tlv_parse_failed++;
+		}
+	}
+
+	if (wlfc)
+		wlfc->stats.dhd_hdrpulls++;
+
+	dhd_os_wlfc_unblock(dhd);
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf,
+	bool need_toggle_host_if)
+{
+	int ac, single_ac = 0, rc = BCME_OK;
+	dhd_wlfc_commit_info_t  commit_info;
+	athost_wl_status_info_t* ctx;
+	int bus_retry_count = 0;
+
+	uint8 traffic_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */
+	uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */
+	bool no_credit = FALSE;
+
+#ifdef LIMIT_BORROW
+	int lender;
+#endif
+
+	if ((dhdp == NULL) || (fcommit == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		if (pktbuf) {
+			DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0);
+		}
+		rc =  WLFC_UNSUPPORTED;
+		goto exit2;
+	}
+
+	ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+
+	if (dhdp->proptxstatus_module_ignore) {
+		if (pktbuf) {
+			uint32 htod = 0;
+			WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+			_dhd_wlfc_pushheader(ctx, pktbuf, FALSE, 0, 0, htod, 0, FALSE);
+			if (!fcommit(commit_ctx, pktbuf))
+				PKTFREE(ctx->osh, pktbuf, TRUE);
+			rc = BCME_OK;
+		}
+		goto exit;
+	}
+
+	memset(&commit_info, 0, sizeof(commit_info));
+
+	/*
+	Commit packets for regular AC traffic. Higher priority first.
+	First, use up FIFO credits available to each AC. Based on distribution
+	and credits left, borrow from other ACs as applicable
+
+	-NOTE:
+	If the bus between the host and firmware is overwhelmed by the
+	traffic from host, it is possible that higher priority traffic
+	starves the lower priority queue. If that occurs often, we may
+	have to employ weighted round-robin or ucode scheme to avoid
+	low priority packet starvation.
+	*/
+
+	if (pktbuf) {
+		ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+		/* en-queue the packets to respective queue. */
+		rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac);
+		if (rc)
+			_dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE);
+		else
+			ctx->stats.pktin++;
+	}
+
+	for (ac = AC_COUNT; ac >= 0; ac--) {
+		if (ctx->pkt_cnt_per_ac[ac] == 0) {
+			continue;
+		}
+		traffic_map |= (1 << ac);
+		single_ac = ac + 1;
+		while (FALSE == dhdp->proptxstatus_txoff) {
+			/* packets from delayQ with less priority are fresh and
+			 * they'd need header and have no MAC entry
+			 */
+			no_credit = (ctx->FIFO_credit[ac] < 1);
+			if (dhdp->proptxstatus_credit_ignore ||
+				((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) {
+				no_credit = FALSE;
+			}
+			commit_info.needs_hdr = 1;
+			commit_info.mac_entry = NULL;
+			commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+				&(commit_info.ac_fifo_credit_spent),
+				&(commit_info.needs_hdr),
+				&(commit_info.mac_entry),
+				no_credit);
+			commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+				eWLFC_PKTTYPE_SUPPRESSED;
+
+			if (commit_info.p == NULL) {
+				break;
+			}
+
+			if (!dhdp->proptxstatus_credit_ignore) {
+				ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent);
+			}
+			/* here we can ensure have credit or no credit needed */
+			rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, fcommit,
+				commit_ctx);
+
+			/* Bus commits may fail (e.g. flow control); abort after retries */
+			if (rc == BCME_OK) {
+				if (commit_info.ac_fifo_credit_spent)
+					ctx->FIFO_credit[ac]--;
+			} else {
+				bus_retry_count++;
+				if (bus_retry_count >= BUS_RETRIES) {
+					DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+					goto exit;
+				}
+			}
+		}
+
+		if (ctx->pkt_cnt_per_ac[ac]) {
+			packets_map |= (1 << ac);
+		}
+	}
+
+	if ((traffic_map == 0) || dhdp->proptxstatus_credit_ignore) {
+		/* nothing send out or remain in queue */
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	if ((traffic_map & (traffic_map - 1)) == 0) {
+		/* only one ac exist */
+		if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) {
+			ac = single_ac - 1;
+		} else {
+			uint32 delta;
+			uint32 curr_t = OSL_SYSUPTIME();
+
+			if (single_ac != ctx->single_ac) {
+				/* new single ac traffic (first single ac or different single ac) */
+				ctx->allow_credit_borrow = 0;
+				ctx->single_ac_timestamp = curr_t;
+				ctx->single_ac = (uint8)single_ac;
+				rc = BCME_OK;
+				goto exit;
+			}
+			/* same ac traffic, check if it lasts enough time */
+			if (curr_t > ctx->single_ac_timestamp)
+				delta = curr_t - ctx->single_ac_timestamp;
+			else
+				delta = (~(uint32)0) - ctx->single_ac_timestamp + curr_t;
+
+			if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
+				/* wait enough time, can borrow now */
+				ctx->allow_credit_borrow = 1;
+				ac = single_ac - 1;
+			} else {
+				rc = BCME_OK;
+				goto exit;
+			}
+		}
+	} else {
+		/* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
+		ctx->allow_credit_borrow = 0;
+		ctx->single_ac_timestamp = 0;
+		ctx->single_ac = 0;
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	if (packets_map == 0) {
+		/* nothing to send, skip borrow */
+		rc = BCME_OK;
+		goto exit;
+	}
+
+	/* At this point, borrow all credits only for ac */
+	while (FALSE == dhdp->proptxstatus_txoff) {
+#ifdef LIMIT_BORROW
+		if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac)) == -1) {
+			break;
+		}
+#endif
+		commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+			&(commit_info.ac_fifo_credit_spent),
+			&(commit_info.needs_hdr),
+			&(commit_info.mac_entry),
+			FALSE);
+		if (commit_info.p == NULL) {
+			/* before borrow only one ac exists and now this only ac is empty */
+#ifdef LIMIT_BORROW
+			_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			break;
+		}
+
+		commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+			eWLFC_PKTTYPE_SUPPRESSED;
+
+		rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+		     fcommit, commit_ctx);
+
+		/* Bus commits may fail (e.g. flow control); abort after retries */
+		if (rc == BCME_OK) {
+
+			if (commit_info.ac_fifo_credit_spent) {
+#ifndef LIMIT_BORROW
+				ctx->FIFO_credit[ac]--;
+#endif
+			} else {
+#ifdef LIMIT_BORROW
+				_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			}
+		} else {
+#ifdef LIMIT_BORROW
+			_dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+			bus_retry_count++;
+			if (bus_retry_count >= BUS_RETRIES) {
+				DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+				goto exit;
+			}
+		}
+	}
+
+exit:
+	if (need_toggle_host_if && ctx->toggle_host_if) {
+		ctx->toggle_host_if = 0;
+	}
+
+exit2:
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+int
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+	athost_wl_status_info_t* wlfc;
+	void* pout = NULL;
+	int rtn = BCME_OK;
+	if ((dhd == NULL) || (txp == NULL)) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		rtn = WLFC_UNSUPPORTED;
+		goto EXIT;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+	if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+		wlfc->stats.signal_only_pkts_freed++;
+#endif
+		/* is this a signal-only packet? */
+		_dhd_wlfc_pullheader(wlfc, txp);
+		PKTFREE(wlfc->osh, txp, TRUE);
+		goto EXIT;
+	}
+
+	if (!success || dhd->proptxstatus_txstatus_ignore) {
+		wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, txp);
+
+		WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+			__FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+		if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT(
+				DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, 1);
+			ASSERT(txp == pout);
+		}
+
+		/* indicate failure and free the packet */
+		dhd_txcomplete(dhd, txp, success);
+
+		/* return the credit, if necessary */
+		_dhd_wlfc_return_implied_credit(wlfc, txp);
+
+		entry->transit_count--;
+		if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
+			entry->suppressed = FALSE;
+		}
+
+		PKTFREE(wlfc->osh, txp, TRUE);
+		wlfc->stats.pktout++;
+	} else {
+		/* bus confirmed pkt went to firmware side */
+		if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+			_dhd_wlfc_enque_afq(wlfc, txp);
+		} else {
+			int ret;
+			void *pktbuf_tmp = NULL;
+			int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp)));
+			if (_dhd_wlfc_hanger_waitevent_decreturn(wlfc->hanger, hslot) == 0) {
+				ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf_tmp, 1);
+				BCM_REFERENCE(ret);
+				ASSERT((ret == BCME_OK) && pktbuf_tmp && (txp == pktbuf_tmp));
+
+				/* free the packet */
+				PKTFREE(wlfc->osh, txp, TRUE);
+			}
+		}
+	}
+
+EXIT:
+	dhd_os_wlfc_unblock(dhd);
+	return rtn;
+}
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+	char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+	/* enable all signals & indicate host proptxstatus logic is active */
+	uint32 tlv, mode, fw_caps;
+	int ret = 0;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+	if (dhd->wlfc_enabled) {
+		DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+	dhd->wlfc_enabled = TRUE;
+	dhd_os_wlfc_unblock(dhd);
+
+	tlv = WLFC_FLAGS_RSSI_SIGNALS |
+		WLFC_FLAGS_XONXOFF_SIGNALS |
+		WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+		WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
+		WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+
+	/*
+	try to enable/disable signaling by sending "tlv" iovar. if that fails,
+	fallback to no flow control? Print a message for now.
+	*/
+
+	/* enable proptxtstatus signaling by default */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
+	}
+	else {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+			dhd->wlfc_enabled?"enabled":"disabled", tlv));
+	}
+
+	/* query caps */
+	ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
+	if (ret > 0) {
+		ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+	}
+
+	if (ret >= 0) {
+		fw_caps = *((uint32 *)iovbuf);
+		mode = 0;
+		DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
+
+		if (WLFC_IS_OLD_DEF(fw_caps)) {
+			/* enable proptxtstatus v2 by default */
+			mode = WLFC_MODE_AFQ;
+		} else {
+			WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps));
+			WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps));
+			WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps));
+		}
+		ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
+		if (ret > 0) {
+			ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+		}
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	dhd->wlfc_mode = 0;
+	if (ret >= 0) {
+		if (WLFC_IS_OLD_DEF(mode)) {
+			WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ));
+		} else {
+			dhd->wlfc_mode = mode;
+		}
+	}
+	DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (dhd->plat_init)
+		dhd->plat_init((void *)dhd);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_hostreorder_init(dhd_pub_t *dhd)
+{
+	char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+	/* enable only ampdu hostreorder here */
+	uint32 tlv;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__));
+
+	tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+	/* enable proptxtstatus signaling by default */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n",
+			__FUNCTION__));
+	}
+	else {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n",
+			__FUNCTION__, tlv));
+	}
+
+	dhd_os_wlfc_block(dhd);
+	dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER;
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+#ifdef SUPPORT_P2P_GO_PS
+int
+dhd_wlfc_suspend(dhd_pub_t *dhd)
+{
+
+	uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
+	uint32 tlv = 0;
+
+	DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__));
+	if (!dhd->wlfc_enabled)
+		return -1;
+
+	bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+		DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+		return -1;
+	}
+	tlv = iovbuf[0];
+	if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0)
+		return 0;
+	tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+	bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+			__FUNCTION__, tlv));
+		return -1;
+	}
+
+	return 0;
+}
+
+	int
+dhd_wlfc_resume(dhd_pub_t *dhd)
+{
+	uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
+	uint32 tlv = 0;
+
+	DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__));
+	if (!dhd->wlfc_enabled)
+		return -1;
+
+	bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+		DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+		return -1;
+	}
+	tlv = iovbuf[0];
+	if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) ==
+		(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS))
+		return 0;
+	tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+	bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
+	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, (char*)iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+		DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+			__FUNCTION__, tlv));
+		return -1;
+	}
+
+	return 0;
+}
+#endif /* SUPPORT_P2P_GO_PS */
+
+int
+dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	_dhd_wlfc_cleanup_txq(dhd, fn, arg);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+/* release all packet resources */
+int
+dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	_dhd_wlfc_cleanup(dhd, fn, arg);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+	char iovbuf[32]; /* Room for "ampdu_hostreorder" or "tlv" + '\0' + parameter */
+	/* cleanup all psq related resources */
+	athost_wl_status_info_t* wlfc;
+	uint32 tlv = 0;
+	uint32 hostreorder = 0;
+	int ret = BCME_OK;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+	if (!dhd->wlfc_enabled) {
+		DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__));
+		dhd_os_wlfc_unblock(dhd);
+		return BCME_OK;
+	}
+	dhd->wlfc_enabled = FALSE;
+	dhd_os_wlfc_unblock(dhd);
+
+	/* query ampdu hostreorder */
+	bcm_mkiovar("ampdu_hostreorder", NULL, 0, iovbuf, sizeof(iovbuf));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+	if (ret == BCME_OK)
+		hostreorder = *((uint32 *)iovbuf);
+	else {
+		hostreorder = 0;
+		DHD_ERROR(("%s():%d, ampdu_hostreorder get failed Err = %d\n",
+			__FUNCTION__, __LINE__, ret));
+	}
+
+	if (hostreorder) {
+		tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+		DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n",
+			__FUNCTION__, __LINE__));
+	}
+
+	/* Disable proptxtstatus signaling for deinit */
+	bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+	ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+	if (ret == BCME_OK) {
+		/*
+		Leaving the message for now, it should be removed after a while; once
+		the tlv situation is stable.
+		*/
+		DHD_ERROR(("%s():%d successfully %s bdcv2 tlv signaling, %d\n",
+			__FUNCTION__, __LINE__,
+			dhd->wlfc_enabled?"enabled":"disabled", tlv));
+	} else
+		DHD_ERROR(("%s():%d failed to enable/disable bdcv2 tlv signaling Err = %d\n",
+			__FUNCTION__, __LINE__, ret));
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+#ifdef PROP_TXSTATUS_DEBUG
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode))
+	{
+		int i;
+		wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+		for (i = 0; i < h->max_items; i++) {
+			if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) {
+				WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
+					__FUNCTION__, i, h->items[i].pkt,
+					DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
+			}
+		}
+	}
+#endif
+
+	_dhd_wlfc_cleanup(dhd, NULL, NULL);
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		/* delete hanger */
+		_dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
+	}
+
+
+	/* free top structure */
+	MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+	dhd->wlfc_state = NULL;
+	dhd->proptxstatus_mode = hostreorder ?
+		WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (dhd->plat_deinit)
+		dhd->plat_deinit((void *)dhd);
+	return BCME_OK;
+}
+
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea);
+
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data);
+
+	dhd_os_wlfc_unblock(dhdp);
+
+	return rc;
+}
+
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp)
+{
+	int rc;
+
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state);
+
+	dhd_os_wlfc_unblock(dhdp);
+	return rc;
+}
+
+int
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+	int i;
+	uint8* ea;
+	athost_wl_status_info_t* wlfc;
+	wlfc_hanger_t* h;
+	wlfc_mac_descriptor_t* mac_table;
+	wlfc_mac_descriptor_t* interfaces;
+	char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+	if (!dhdp || !strbuf) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhdp);
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhdp);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+	h = (wlfc_hanger_t*)wlfc->hanger;
+	if (h == NULL) {
+		bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+	}
+
+	mac_table = wlfc->destination_entries.nodes;
+	interfaces = wlfc->destination_entries.interfaces;
+	bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+
+	if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+		h = (wlfc_hanger_t*)wlfc->hanger;
+		if (h == NULL) {
+			bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+		} else {
+			bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+				"f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+				h->pushed,
+				h->popped,
+				h->failed_to_push,
+				h->failed_to_pop,
+				h->failed_slotfind,
+				(h->pushed - h->popped));
+		}
+	}
+
+	bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+		"(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n",
+		wlfc->stats.tlv_parse_failed,
+		wlfc->stats.credit_request_failed,
+		wlfc->stats.mac_update_failed,
+		wlfc->stats.psmode_update_failed,
+		wlfc->stats.delayq_full_error,
+		wlfc->stats.rollback_failed);
+
+	bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) "
+		"(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d],"
+		"AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n",
+		wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0],
+		wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0],
+		wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1],
+		wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1],
+		wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2],
+		wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2],
+		wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3],
+		wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3],
+		wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4],
+		wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]);
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		if (interfaces[i].occupied) {
+			char* iftype_desc;
+
+			if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+				iftype_desc = "<Unknown";
+			else
+				iftype_desc = iftypes[interfaces[i].iftype];
+
+			ea = interfaces[i].ea;
+			bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s "
+				"netif_flow_control:%s\n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				interfaces[i].interface_id,
+				iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
+				? " OFF":" ON"));
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit),(trans,supp_trans)"
+				"= (%d,%s,%d),(%d,%d)\n",
+				i,
+				interfaces[i].psq.len,
+				((interfaces[i].state ==
+				WLFC_STATE_OPEN) ? "OPEN":"CLOSE"),
+				interfaces[i].requested_credit,
+				interfaces[i].transit_count, interfaces[i].suppr_transit_count);
+
+			bcm_bprintf(strbuf, "INTERFACE[%d].PSQ"
+				"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+				"(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d),"
+				"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+				i,
+				interfaces[i].psq.q[0].len,
+				interfaces[i].psq.q[1].len,
+				interfaces[i].afq.q[0].len,
+				interfaces[i].psq.q[2].len,
+				interfaces[i].psq.q[3].len,
+				interfaces[i].afq.q[1].len,
+				interfaces[i].psq.q[4].len,
+				interfaces[i].psq.q[5].len,
+				interfaces[i].afq.q[2].len,
+				interfaces[i].psq.q[6].len,
+				interfaces[i].psq.q[7].len,
+				interfaces[i].afq.q[3].len,
+				interfaces[i].psq.q[8].len,
+				interfaces[i].psq.q[9].len,
+				interfaces[i].afq.q[4].len);
+		}
+	}
+
+	bcm_bprintf(strbuf, "\n");
+	for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+		if (mac_table[i].occupied) {
+			ea = mac_table[i].ea;
+			bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+				"[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i,
+				ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+				mac_table[i].interface_id);
+
+			bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),(trans,supp_trans)"
+				"= (%d,%s,%d),(%d,%d)\n",
+				i,
+				mac_table[i].psq.len,
+				((mac_table[i].state ==
+				WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+				mac_table[i].requested_credit,
+				mac_table[i].transit_count, mac_table[i].suppr_transit_count);
+#ifdef PROP_TXSTATUS_DEBUG
+			bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+				i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+			bcm_bprintf(strbuf, "MAC_table[%d].PSQ"
+				"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+				"(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d),"
+				"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+				i,
+				mac_table[i].psq.q[0].len,
+				mac_table[i].psq.q[1].len,
+				mac_table[i].afq.q[0].len,
+				mac_table[i].psq.q[2].len,
+				mac_table[i].psq.q[3].len,
+				mac_table[i].afq.q[1].len,
+				mac_table[i].psq.q[4].len,
+				mac_table[i].psq.q[5].len,
+				mac_table[i].afq.q[2].len,
+				mac_table[i].psq.q[6].len,
+				mac_table[i].psq.q[7].len,
+				mac_table[i].afq.q[3].len,
+				mac_table[i].psq.q[8].len,
+				mac_table[i].psq.q[9].len,
+				mac_table[i].afq.q[4].len);
+
+		}
+	}
+
+#ifdef PROP_TXSTATUS_DEBUG
+	{
+		int avg;
+		int moving_avg = 0;
+		int moving_samples;
+
+		if (wlfc->stats.latency_sample_count) {
+			moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+			for (i = 0; i < moving_samples; i++)
+				moving_avg += wlfc->stats.deltas[i];
+			moving_avg /= moving_samples;
+
+			avg = (100 * wlfc->stats.total_status_latency) /
+				wlfc->stats.latency_sample_count;
+			bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+				"(%d.%d, %03d, %03d)\n",
+				moving_samples, avg/100, (avg - (avg/100)*100),
+				wlfc->stats.latency_most_recent,
+				moving_avg);
+		}
+	}
+
+	bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+		"back = (%d,%d,%d,%d,%d,%d)\n",
+		wlfc->stats.fifo_credits_sent[0],
+		wlfc->stats.fifo_credits_sent[1],
+		wlfc->stats.fifo_credits_sent[2],
+		wlfc->stats.fifo_credits_sent[3],
+		wlfc->stats.fifo_credits_sent[4],
+		wlfc->stats.fifo_credits_sent[5],
+
+		wlfc->stats.fifo_credits_back[0],
+		wlfc->stats.fifo_credits_back[1],
+		wlfc->stats.fifo_credits_back[2],
+		wlfc->stats.fifo_credits_back[3],
+		wlfc->stats.fifo_credits_back[4],
+		wlfc->stats.fifo_credits_back[5]);
+	{
+		uint32 fifo_cr_sent = 0;
+		uint32 fifo_cr_acked = 0;
+		uint32 request_cr_sent = 0;
+		uint32 request_cr_ack = 0;
+		uint32 bc_mc_cr_ack = 0;
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+			fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+		}
+
+		for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+			fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+		}
+
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_sent +=
+					wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_sent +=
+				wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+			}
+		}
+		for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+			if (wlfc->destination_entries.nodes[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.nodes[i].dstncredit_acks;
+			}
+		}
+		for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+			if (wlfc->destination_entries.interfaces[i].occupied) {
+				request_cr_ack +=
+					wlfc->destination_entries.interfaces[i].dstncredit_acks;
+			}
+		}
+		bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+			"other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+			fifo_cr_sent, fifo_cr_acked,
+			request_cr_sent, request_cr_ack,
+			wlfc->destination_entries.other.dstncredit_acks,
+			bc_mc_cr_ack,
+			wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+	}
+#endif /* PROP_TXSTATUS_DEBUG */
+	bcm_bprintf(strbuf, "\n");
+	bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),(dropped,hdr_only,wlc_tossed)"
+		"(freed,free_err,rollback)) = "
+		"((%d,%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
+		wlfc->stats.pktin,
+		wlfc->stats.pkt2bus,
+		wlfc->stats.txstatus_in,
+		wlfc->stats.dhd_hdrpulls,
+		wlfc->stats.pktout,
+
+		wlfc->stats.pktdropped,
+		wlfc->stats.wlfc_header_only_pkt,
+		wlfc->stats.wlc_tossed_pkts,
+
+		wlfc->stats.pkt_freed,
+		wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+	bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+		"((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+		wlfc->stats.d11_suppress,
+		wlfc->stats.wl_suppress,
+		wlfc->stats.bad_suppress,
+
+		wlfc->stats.psq_d11sup_enq,
+		wlfc->stats.psq_wlsup_enq,
+		wlfc->stats.psq_hostq_enq,
+		wlfc->stats.mac_handle_notfound,
+
+		wlfc->stats.psq_d11sup_retx,
+		wlfc->stats.psq_wlsup_retx,
+		wlfc->stats.psq_hostq_retx);
+
+	bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n",
+		wlfc->stats.cleanup_txq_cnt,
+		wlfc->stats.cleanup_psq_cnt,
+		wlfc->stats.cleanup_fw_cnt);
+
+	bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error);
+
+	for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+		bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i,
+			wlfc->pkt_cnt_in_q[i][0],
+			wlfc->pkt_cnt_in_q[i][1],
+			wlfc->pkt_cnt_in_q[i][2],
+			wlfc->pkt_cnt_in_q[i][3],
+			wlfc->pkt_cnt_in_q[i][4]);
+	}
+	bcm_bprintf(strbuf, "\n");
+
+	dhd_os_wlfc_unblock(dhdp);
+	return BCME_OK;
+}
+
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd)
+{
+	athost_wl_status_info_t* wlfc;
+	wlfc_hanger_t* hanger;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return WLFC_UNSUPPORTED;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+	if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+		hanger = (wlfc_hanger_t*)wlfc->hanger;
+
+		hanger->pushed = 0;
+		hanger->popped = 0;
+		hanger->failed_slotfind = 0;
+		hanger->failed_to_pop = 0;
+		hanger->failed_to_push = 0;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_enabled;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	/* two locks for write variable, then read can use any one lock */
+	if (dhd->wlfc_state) {
+		dhd->proptxstatus_mode = val & 0xff;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf)
+{
+	athost_wl_status_info_t* wlfc;
+	bool rc = FALSE;
+
+	if (dhd == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return FALSE;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+		dhd_os_wlfc_unblock(dhd);
+		return FALSE;
+	}
+
+	wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+	if (PKTLEN(wlfc->osh, pktbuf) == 0) {
+		wlfc->stats.wlfc_header_only_pkt++;
+		rc = TRUE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return rc;
+}
+
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock)
+{
+	if (dhdp == NULL) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	if (bAcquireLock) {
+		dhd_os_wlfc_block(dhdp);
+	}
+
+	if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) ||
+		dhdp->proptxstatus_module_ignore) {
+		if (bAcquireLock) {
+			dhd_os_wlfc_unblock(dhdp);
+		}
+		return WLFC_UNSUPPORTED;
+	}
+
+	if (state != dhdp->proptxstatus_txoff) {
+		dhdp->proptxstatus_txoff = state;
+	}
+
+	if (bAcquireLock) {
+		dhd_os_wlfc_unblock(dhdp);
+	}
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_module_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val)
+{
+	char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+	uint32 tlv = 0;
+	bool bChanged = FALSE;
+
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	if ((bool)val != dhd->proptxstatus_module_ignore) {
+		/* two locks for write variable, then read can use any one lock */
+		dhd->proptxstatus_module_ignore = (val != 0);
+		/* force txstatus_ignore sync with proptxstatus_module_ignore */
+		dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore;
+		if (FALSE == dhd->proptxstatus_module_ignore) {
+			tlv = WLFC_FLAGS_RSSI_SIGNALS |
+				WLFC_FLAGS_XONXOFF_SIGNALS |
+				WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+				WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
+		}
+		/* always enable host reorder */
+		tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+		bChanged = TRUE;
+	}
+
+	dhd_os_wlfc_unblock(dhd);
+
+	if (bChanged) {
+		/* select enable proptxtstatus signaling */
+		bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+		if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+			DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+				__FUNCTION__, tlv));
+		}
+		else {
+			DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n",
+				__FUNCTION__, tlv));
+		}
+	}
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_credit_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	/* two locks for write variable, then read can use any one lock */
+	dhd->proptxstatus_credit_ignore = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val)
+{
+	if (!dhd || !val) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	*val = dhd->proptxstatus_txstatus_ignore;
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val)
+{
+	if (!dhd) {
+		DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+		return BCME_BADARG;
+	}
+
+	dhd_os_wlfc_block(dhd);
+
+	/* two locks for write variable, then read can use any one lock */
+	dhd->proptxstatus_txstatus_ignore = (val != 0);
+
+	dhd_os_wlfc_unblock(dhd);
+
+	return BCME_OK;
+}
+
+#endif /* PROP_TXSTATUS */
diff --git a/drivers/staging/edison-bcm43340/dhd_wlfc.h b/drivers/staging/edison-bcm43340/dhd_wlfc.h
new file mode 100644
index 0000000..8275741
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dhd_wlfc.h
@@ -0,0 +1,505 @@
+/*
+* Copyright (C) 1999-2014, Broadcom Corporation
+* 
+*      Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+* 
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+* 
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: dhd_wlfc.h 472502 2014-04-24 05:59:06Z $
+*
+*/
+#ifndef __wlfc_host_driver_definitions_h__
+#define __wlfc_host_driver_definitions_h__
+
+
+/* #define OOO_DEBUG */
+
+#define WLFC_UNSUPPORTED -9999
+
+#define WLFC_NO_TRAFFIC	-1
+#define WLFC_MULTI_TRAFFIC 0
+
+#define BUS_RETRIES 1	/* # of retries before aborting a bus tx operation */
+
+/* 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 3072
+
+#define WLFC_HANGER_ITEM_STATE_FREE			1
+#define WLFC_HANGER_ITEM_STATE_INUSE			2
+#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED		3
+#define WLFC_HANGER_ITEM_STATE_WAIT_CLEAN		4
+
+#define WLFC_HANGER_ITEM_WAIT_EVENT_COUNT		2
+#define WLFC_HANGER_ITEM_WAIT_EVENT_INVALID		255
+
+typedef enum {
+	Q_TYPE_PSQ,
+	Q_TYPE_AFQ
+} q_type_t;
+
+typedef enum ewlfc_packet_state {
+	eWLFC_PKTTYPE_NEW,
+	eWLFC_PKTTYPE_DELAYED,
+	eWLFC_PKTTYPE_SUPPRESSED,
+	eWLFC_PKTTYPE_MAX
+} ewlfc_packet_state_t;
+
+typedef enum ewlfc_mac_entry_action {
+	eWLFC_MAC_ENTRY_ACTION_ADD,
+	eWLFC_MAC_ENTRY_ACTION_DEL,
+	eWLFC_MAC_ENTRY_ACTION_UPDATE,
+	eWLFC_MAC_ENTRY_ACTION_MAX
+} ewlfc_mac_entry_action_t;
+
+typedef struct wlfc_hanger_item {
+	uint8	state;
+	uint8   gen;
+	uint8	waitevent;	/* wait txstatus_update and txcomplete before free a packet */
+	uint8	pad;
+	uint32	identifier;
+	void*	pkt;
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32	push_time;
+#endif
+	struct wlfc_hanger_item *next;
+} wlfc_hanger_item_t;
+
+typedef struct wlfc_hanger {
+	int max_items;
+	uint32 pushed;
+	uint32 popped;
+	uint32 failed_to_push;
+	uint32 failed_to_pop;
+	uint32 failed_slotfind;
+	uint32 slot_pos;
+	wlfc_hanger_item_t items[1];
+} wlfc_hanger_t;
+
+#define WLFC_HANGER_SIZE(n)	((sizeof(wlfc_hanger_t) - \
+	sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
+
+#define WLFC_STATE_OPEN		1
+#define WLFC_STATE_CLOSE	2
+
+#define WLFC_PSQ_PREC_COUNT		((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */
+#define WLFC_AFQ_PREC_COUNT		(AC_COUNT + 1)
+
+#define WLFC_PSQ_LEN			2048
+
+#define WLFC_FLOWCONTROL_HIWATER	(2048 - 256)
+#define WLFC_FLOWCONTROL_LOWATER	256
+
+#define WLFC_LOG_BUF_SIZE		(1024*1024)
+
+typedef struct wlfc_mac_descriptor {
+	uint8 occupied;
+	uint8 interface_id;
+	uint8 iftype;
+	uint8 state;
+	uint8 ac_bitmap; /* for APSD */
+	uint8 requested_credit;
+	uint8 requested_packet;
+	uint8 ea[ETHER_ADDR_LEN];
+	/*
+	maintain (MAC,AC) based seq count for
+	packets going to the device. As well as bc/mc.
+	*/
+	uint8 seq[AC_COUNT + 1];
+	uint8 generation;
+	struct pktq	psq;
+	/* packets at firmware */
+	struct pktq	afq;
+	/* The AC pending bitmap that was reported to the fw at last change */
+	uint8 traffic_lastreported_bmp;
+	/* The new AC pending bitmap */
+	uint8 traffic_pending_bmp;
+	/* 1= send on next opportunity */
+	uint8 send_tim_signal;
+	uint8 mac_handle;
+	/* Number of packets at dongle for this entry. */
+	uint transit_count;
+	/* Numbe of suppression to wait before evict from delayQ */
+	uint suppr_transit_count;
+	/* flag. TRUE when in suppress state */
+	uint8 suppressed;
+
+
+#ifdef PROP_TXSTATUS_DEBUG
+	uint32 dstncredit_sent_packets;
+	uint32 dstncredit_acks;
+	uint32 opened_ct;
+	uint32 closed_ct;
+#endif
+	struct wlfc_mac_descriptor* prev;
+	struct wlfc_mac_descriptor* next;
+} wlfc_mac_descriptor_t;
+
+typedef struct dhd_wlfc_commit_info {
+	uint8					needs_hdr;
+	uint8					ac_fifo_credit_spent;
+	ewlfc_packet_state_t	pkt_type;
+	wlfc_mac_descriptor_t*	mac_entry;
+	void*					p;
+} dhd_wlfc_commit_info_t;
+
+#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
+	entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
+
+#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++
+#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)]
+
+typedef struct athost_wl_stat_counters {
+	uint32	pktin;
+	uint32	pktout;
+	uint32	pkt2bus;
+	uint32	pktdropped;
+	uint32	tlv_parse_failed;
+	uint32	rollback;
+	uint32	rollback_failed;
+	uint32	delayq_full_error;
+	uint32	credit_request_failed;
+	uint32	packet_request_failed;
+	uint32	mac_update_failed;
+	uint32	psmode_update_failed;
+	uint32	interface_update_failed;
+	uint32	wlfc_header_only_pkt;
+	uint32	txstatus_in;
+	uint32	d11_suppress;
+	uint32	wl_suppress;
+	uint32	bad_suppress;
+	uint32	pkt_freed;
+	uint32	pkt_free_err;
+	uint32	psq_wlsup_retx;
+	uint32	psq_wlsup_enq;
+	uint32	psq_d11sup_retx;
+	uint32	psq_d11sup_enq;
+	uint32	psq_hostq_retx;
+	uint32	psq_hostq_enq;
+	uint32	mac_handle_notfound;
+	uint32	wlc_tossed_pkts;
+	uint32	dhd_hdrpulls;
+	uint32	generic_error;
+	/* an extra one for bc/mc traffic */
+	uint32	send_pkts[AC_COUNT + 1];
+	uint32	drop_pkts[WLFC_PSQ_PREC_COUNT];
+	uint32	ooo_pkts[AC_COUNT + 1];
+#ifdef PROP_TXSTATUS_DEBUG
+	/* all pkt2bus -> txstatus latency accumulated */
+	uint32	latency_sample_count;
+	uint32	total_status_latency;
+	uint32	latency_most_recent;
+	int	idx_delta;
+	uint32	deltas[10];
+	uint32	fifo_credits_sent[6];
+	uint32	fifo_credits_back[6];
+	uint32	dropped_qfull[6];
+	uint32	signal_only_pkts_sent;
+	uint32	signal_only_pkts_freed;
+#endif
+	uint32	cleanup_txq_cnt;
+	uint32	cleanup_psq_cnt;
+	uint32	cleanup_fw_cnt;
+} athost_wl_stat_counters_t;
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_sent[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \
+	(ctx)->stats.fifo_credits_back[(ac)]++;} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \
+	(ctx)->stats.dropped_qfull[(ac)]++;} while (0)
+#else
+#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
+#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
+#endif
+
+#define WLFC_FCMODE_NONE				0
+#define WLFC_FCMODE_IMPLIED_CREDIT		1
+#define WLFC_FCMODE_EXPLICIT_CREDIT		2
+#define WLFC_ONLY_AMPDU_HOSTREORDER		3
+
+/* How long to defer borrowing in milliseconds */
+#define WLFC_BORROW_DEFER_PERIOD_MS 100
+
+/* How long to defer flow control in milliseconds */
+#define WLFC_FC_DEFER_PERIOD_MS 200
+
+/* Mask to represent available ACs (note: BC/MC is ignored */
+#define WLFC_AC_MASK 0xF
+
+typedef struct athost_wl_status_info {
+	uint8	last_seqid_to_wlc;
+
+	/* OSL handle */
+	osl_t*	osh;
+	/* dhd pub */
+	void*	dhdp;
+
+	/* stats */
+	athost_wl_stat_counters_t stats;
+
+	int		Init_FIFO_credit[AC_COUNT + 2];
+
+	/* the additional ones are for bc/mc and ATIM FIFO */
+	int		FIFO_credit[AC_COUNT + 2];
+
+	/* Credit borrow counts for each FIFO from each of the other FIFOs */
+	int		credits_borrowed[AC_COUNT + 2][AC_COUNT + 2];
+
+	/* packet hanger and MAC->handle lookup table */
+	void*	hanger;
+	struct {
+		/* table for individual nodes */
+		wlfc_mac_descriptor_t	nodes[WLFC_MAC_DESC_TABLE_SIZE];
+		/* table for interfaces */
+		wlfc_mac_descriptor_t	interfaces[WLFC_MAX_IFNUM];
+		/* OS may send packets to unknown (unassociated) destinations */
+		/* A place holder for bc/mc and packets to unknown destinations */
+		wlfc_mac_descriptor_t	other;
+	} destination_entries;
+
+	wlfc_mac_descriptor_t *active_entry_head;
+	int active_entry_count;
+
+	wlfc_mac_descriptor_t* requested_entry[WLFC_MAC_DESC_TABLE_SIZE];
+	int requested_entry_count;
+
+	/* pkt counts for each interface and ac */
+	int	pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1];
+	int	pkt_cnt_per_ac[AC_COUNT+1];
+	uint8	allow_fc;
+	uint32  fc_defer_timestamp;
+	/* ON/OFF state for flow control to the host network interface */
+	uint8	hostif_flow_state[WLFC_MAX_IFNUM];
+	uint8	host_ifidx;
+	/* to flow control an OS interface */
+	uint8	toggle_host_if;
+
+	/* To borrow credits */
+	uint8   allow_credit_borrow;
+
+	/* ac number for the first single ac traffic */
+	uint8	single_ac;
+
+	/* Timestamp for the first single ac traffic */
+	uint32  single_ac_timestamp;
+
+	bool	bcmc_credit_supported;
+
+} athost_wl_status_info_t;
+
+/* Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+	/*
+	b[15]  - 1 = wlfc packet
+	b[14:13]  - encryption exemption
+	b[12 ] - 1 = event channel
+	b[11 ] - 1 = this packet was sent in response to one time packet request,
+	do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+	b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+	b[9  ] - 1 = packet is host->firmware (transmit direction)
+	       - 0 = packet received from firmware (firmware->host)
+	b[8  ] - 1 = packet was sent due to credit_request (pspoll),
+	             packet does not count against FIFO credit.
+	       - 0 = normal transaction, packet counts against FIFO credit
+	b[7  ] - 1 = AP, 0 = STA
+	b[6:4] - AC FIFO number
+	b[3:0] - interface index
+	*/
+	uint16	if_flags;
+	/* destination MAC address for this packet so that not every
+	module needs to open the packet to find this
+	*/
+	uint8	dstn_ether[ETHER_ADDR_LEN];
+	/*
+	This 32-bit goes from host to device for every packet.
+	*/
+	uint32	htod_tag;
+
+	/*
+	This 16-bit is original seq number for every suppress packet.
+	*/
+	uint16	htod_seq;
+
+	/*
+	This address is mac entry for every packet.
+	*/
+	void*	entry;
+	/* bus specific stuff */
+	union {
+		struct {
+			void* stuff;
+			uint32 thing1;
+			uint32 thing2;
+		} sd;
+		struct {
+			void* bus;
+			void* urb;
+		} usb;
+	} bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_WLFCPKT_MASK			0x1
+#define DHD_PKTTAG_WLFCPKT_SHIFT		15
+#define DHD_PKTTAG_WLFCPKT_SET(tag, value)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \
+	(((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT)
+#define DHD_PKTTAG_WLFCPKT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK)
+
+#define DHD_PKTTAG_EXEMPT_MASK			0x3
+#define DHD_PKTTAG_EXEMPT_SHIFT			13
+#define DHD_PKTTAG_EXEMPT_SET(tag, value)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \
+	(((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT)
+#define DHD_PKTTAG_EXEMPT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK)
+
+#define DHD_PKTTAG_EVENT_MASK			0x1
+#define DHD_PKTTAG_EVENT_SHIFT			12
+#define DHD_PKTTAG_SETEVENT(tag, event)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \
+	(((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT)
+#define DHD_PKTTAG_EVENT(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK)
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK		0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT		11
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+	(1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SIGNALONLY_MASK		0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT		10
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+	(((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_PKTDIR_MASK			0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT			9
+#define DHD_PKTTAG_SETPKTDIR(tag, dir)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+	(((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_CREDITCHECK_MASK		0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT		8
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+	(((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_IFTYPE_MASK			0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT			7
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & \
+	~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+	(((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag)	((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_FIFO_MASK			0x7
+#define DHD_PKTTAG_FIFO_SHIFT			4
+#define DHD_PKTTAG_SETFIFO(tag, fifo)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+	(((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag)		((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_IF_MASK			0xf
+#define DHD_PKTTAG_IF_SHIFT			0
+#define DHD_PKTTAG_SETIF(tag, if)	((dhd_pkttag_t*)(tag))->if_flags = \
+	(((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \
+	(((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT)
+#define DHD_PKTTAG_IF(tag)		((((dhd_pkttag_t*)(tag))->if_flags >> \
+	DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea)	memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+	(dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag)	((dhd_pkttag_t*)(tag))->dstn_ether
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue)	((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag)			(((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_SET_H2DSEQ(tag, seq)		((dhd_pkttag_t*)(tag))->htod_seq = (seq)
+#define DHD_PKTTAG_H2DSEQ(tag)			(((dhd_pkttag_t*)(tag))->htod_seq)
+
+#define DHD_PKTTAG_SET_ENTRY(tag, entry)	((dhd_pkttag_t*)(tag))->entry = (entry)
+#define DHD_PKTTAG_ENTRY(tag)			(((dhd_pkttag_t*)(tag))->entry)
+
+#define PSQ_SUP_IDX(x) (x * 2 + 1)
+#define PSQ_DLY_IDX(x) (x * 2)
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+typedef bool (*f_processpkt_t)(void* p, void* arg);
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry)	do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry)		do {} while (0)
+#endif
+
+/* public functions */
+int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len,
+	uchar *reorder_info_buf, uint *reorder_info_len);
+int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit,
+	void* commit_ctx, void *pktbuf, bool need_toggle_host_if);
+int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+int dhd_wlfc_init(dhd_pub_t *dhd);
+int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd);
+#ifdef SUPPORT_P2P_GO_PS
+int dhd_wlfc_suspend(dhd_pub_t *dhd);
+int dhd_wlfc_resume(dhd_pub_t *dhd);
+#endif /* SUPPORT_P2P_GO_PS */
+int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg);
+int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg);
+int dhd_wlfc_deinit(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data);
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp);
+int dhd_wlfc_enable(dhd_pub_t *dhdp);
+int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd);
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val);
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val);
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd);
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf);
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock);
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val);
+#endif /* __wlfc_host_driver_definitions_h__ */
diff --git a/drivers/staging/edison-bcm43340/dngl_stats.h b/drivers/staging/edison-bcm43340/dngl_stats.h
new file mode 100644
index 0000000..ac84522
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dngl_stats.h
@@ -0,0 +1,43 @@
+/*
+ * Common stats definitions for clients of dongle
+ * ports
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_stats.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _dngl_stats_h_
+#define _dngl_stats_h_
+
+typedef struct {
+	unsigned long	rx_packets;		/* total packets received */
+	unsigned long	tx_packets;		/* total packets transmitted */
+	unsigned long	rx_bytes;		/* total bytes received */
+	unsigned long	tx_bytes;		/* total bytes transmitted */
+	unsigned long	rx_errors;		/* bad packets received */
+	unsigned long	tx_errors;		/* packet transmit problems */
+	unsigned long	rx_dropped;		/* packets dropped by dongle */
+	unsigned long	tx_dropped;		/* packets dropped by dongle */
+	unsigned long   multicast;      /* multicast packets received */
+} dngl_stats_t;
+
+#endif /* _dngl_stats_h_ */
diff --git a/drivers/staging/edison-bcm43340/dngl_wlhdr.h b/drivers/staging/edison-bcm43340/dngl_wlhdr.h
new file mode 100644
index 0000000..2c1366a
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/dngl_wlhdr.h
@@ -0,0 +1,40 @@
+/*
+ * Dongle WL Header definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dngl_wlhdr.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _dngl_wlhdr_h_
+#define _dngl_wlhdr_h_
+
+typedef struct wl_header {
+    uint8   type;           /* Header type */
+    uint8   version;        /* Header version */
+	int8	rssi;			/* RSSI */
+	uint8	pad;			/* Unused */
+} wl_header_t;
+
+#define WL_HEADER_LEN   sizeof(wl_header_t)
+#define WL_HEADER_TYPE  0
+#define WL_HEADER_VER   1
+#endif /* _dngl_wlhdr_h_ */
diff --git a/drivers/staging/edison-bcm43340/hndpmu.c b/drivers/staging/edison-bcm43340/hndpmu.c
new file mode 100644
index 0000000..70d383e
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/hndpmu.c
@@ -0,0 +1,282 @@
+/*
+ * Misc utility routines for accessing PMU corerev specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.c 433378 2013-10-31 17:19:39Z $
+ */
+
+
+/*
+ * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs.
+ * However, in the context of this file the baseband ('BB') PLL/FLL is referred to.
+ *
+ * Throughout this code, the prefixes 'pmu0_', 'pmu1_' and 'pmu2_' are used.
+ * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012)
+ * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports
+ * fractional frequency generation. pmu2_ does not support fractional frequency generation.
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+
+#define	PMU_ERROR(args)
+
+#define	PMU_MSG(args)
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define	PMU_NONE(args)
+
+/** contains resource bit positions for a specific chip */
+struct rsc_per_chip_s {
+	uint8 ht_avail;
+	uint8 macphy_clkavail;
+	uint8 ht_start;
+	uint8 otp_pu;
+};
+
+typedef struct rsc_per_chip_s rsc_per_chip_t;
+
+
+/* SDIO Pad drive strength to select value mappings.
+ * The last strength value in each table must be 0 (the tri-state value).
+ */
+typedef struct {
+	uint8 strength;			/* Pad Drive Strength in mA */
+	uint8 sel;			/* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+	{4, 0x2},
+	{2, 0x3},
+	{1, 0x0},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+	{12, 0x7},
+	{10, 0x6},
+	{8, 0x5},
+	{6, 0x4},
+	{4, 0x2},
+	{2, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
+	{32, 0x7},
+	{26, 0x6},
+	{22, 0x5},
+	{16, 0x4},
+	{12, 0x3},
+	{8, 0x2},
+	{4, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = {
+	{32, 0x6},
+	{26, 0x7},
+	{22, 0x4},
+	{16, 0x5},
+	{12, 0x2},
+	{8, 0x3},
+	{4, 0x0},
+	{0, 0x1} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = {
+	{6, 0x7},
+	{5, 0x6},
+	{4, 0x5},
+	{3, 0x4},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
+
+/** SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab6_1v8[] = {
+	{3, 0x3},
+	{2, 0x2},
+	{1, 0x1},
+	{0, 0x0} };
+
+
+/**
+ * SDIO Drive Strength to sel value table for 43143 PMU Rev 17, see Confluence 43143 Toplevel
+ * architecture page, section 'PMU Chip Control 1 Register definition', click link to picture
+ * BCM43143_sel_sdio_signals.jpg. Valid after PMU Chip Control 0 Register, bit31 (override) has
+ * been written '1'.
+ */
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_3v3[] = {
+	/* note: for 14, 10, 6 and 2mA hw timing is not met according to rtl team */
+	{16, 0x7},
+	{12, 0x5},
+	{8,  0x3},
+	{4,  0x1} }; /* note: 43143 does not support tristate */
+
+#else
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_1v8[] = {
+	/* note: for 7, 5, 3 and 1mA hw timing is not met according to rtl team */
+	{8, 0x7},
+	{6, 0x5},
+	{4,  0x3},
+	{2,  0x1} }; /* note: 43143 does not support tristate */
+
+#endif /* BCM_SDIO_VDDIO */
+
+#define SDIOD_DRVSTR_KEY(chip, pmu)	(((chip) << 16) | (pmu))
+
+/**
+ * Balance between stable SDIO operation and power consumption is achieved using this function.
+ * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this
+ * function should read the VDDIO itself to select the correct table. For now it has been solved
+ * with the 'BCM_SDIO_VDDIO' preprocessor constant.
+ *
+ * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if
+ *		    hardware supports this), if no hw support drive strength is not programmed.
+ */
+void
+si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
+{
+	chipcregs_t *cc;
+	uint origidx, intr_val = 0;
+	sdiod_drive_str_t *str_tab = NULL;
+	uint32 str_mask = 0;	/* only alter desired bits in PMU chipcontrol 1 register */
+	uint32 str_shift = 0;
+	uint32 str_ovr_pmuctl = PMU_CHIPCTL0; /* PMU chipcontrol register containing override bit */
+	uint32 str_ovr_pmuval = 0;            /* position of bit within this register */
+
+	if (!(sih->cccaps & CC_CAP_PMU)) {
+		return;
+	}
+
+	/* Remember original core before switch to chipc */
+	cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+
+	switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
+		str_mask = 0x30000000;
+		str_shift = 28;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+	case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+	case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+	case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11):
+		if (sih->pmurev == 8) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3;
+		}
+		else if (sih->pmurev == 11) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		}
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8;
+		str_mask = 0x00003800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+		str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab6_1v8;
+		str_mask = 0x00001800;
+		str_shift = 11;
+		break;
+	case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+		if (drivestrength >=  ARRAYLAST(sdiod_drive_strength_tab7_3v3)->strength) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_3v3;
+		}
+#else
+		if (drivestrength >=  ARRAYLAST(sdiod_drive_strength_tab7_1v8)->strength) {
+			str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_1v8;
+		}
+#endif /* BCM_SDIO_VDDIO */
+		str_mask = 0x00000007;
+		str_ovr_pmuval = PMU43143_CC0_SDIO_DRSTR_OVR;
+		break;
+	default:
+		PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+		         bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev));
+		break;
+	}
+
+	if (str_tab != NULL && cc != NULL) {
+		uint32 cc_data_temp;
+		int i;
+
+		/* Pick the lowest available drive strength equal or greater than the
+		 * requested strength.	Drive strength of 0 requests tri-state.
+		 */
+		for (i = 0; drivestrength < str_tab[i].strength; i++)
+			;
+
+		if (i > 0 && drivestrength > str_tab[i].strength)
+			i--;
+
+		W_REG(osh, &cc->chipcontrol_addr, PMU_CHIPCTL1);
+		cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+		cc_data_temp &= ~str_mask;
+		cc_data_temp |= str_tab[i].sel << str_shift;
+		W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+		if (str_ovr_pmuval) { /* enables the selected drive strength */
+			W_REG(osh,  &cc->chipcontrol_addr, str_ovr_pmuctl);
+			OR_REG(osh, &cc->chipcontrol_data, str_ovr_pmuval);
+		}
+		PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+		         drivestrength, str_tab[i].strength));
+	}
+
+	/* Return to original core */
+	si_restore_core(sih, origidx, intr_val);
+} /* si_sdiod_drive_strength_init */
diff --git a/drivers/staging/edison-bcm43340/include/Makefile b/drivers/staging/edison-bcm43340/include/Makefile
new file mode 100644
index 0000000..fef8365
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/Makefile
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# This script serves following purpose:
+#
+# 1. It generates native version information by querying
+#    automerger maintained database to see where src/include
+#    came from
+# 2. For select components, as listed in compvers.sh
+#    it generates component version files
+#
+# Copyright 2005, Broadcom, Inc.
+#
+# $Id: Makefile 347587 2012-07-27 09:13:31Z $
+#
+
+export SRCBASE:=..
+
+TARGETS := epivers.h
+
+ifdef VERBOSE
+export VERBOSE
+endif
+
+all release: epivers compvers
+
+# Generate epivers.h for native branch url
+epivers:
+	bash epivers.sh
+
+# Generate component versions based on component url
+compvers:
+	@if [ -s "compvers.sh" ]; then \
+		echo "Generating component versions, if any"; \
+		bash compvers.sh; \
+	else \
+		echo "Skipping component version generation"; \
+	fi
+
+# Generate epivers.h for native branch version
+clean_compvers:
+	@if [ -s "compvers.sh" ]; then \
+		echo "bash compvers.sh clean"; \
+		bash compvers.sh clean; \
+	else \
+		echo "Skipping component version clean"; \
+	fi
+
+clean:
+	rm -f $(TARGETS) *.prev
+
+clean_all: clean clean_compvers
+
+.PHONY: all release clean epivers compvers clean_compvers
+
diff --git a/drivers/staging/edison-bcm43340/include/aidmp.h b/drivers/staging/edison-bcm43340/include/aidmp.h
new file mode 100644
index 0000000..519d8be
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/aidmp.h
@@ -0,0 +1,386 @@
+/*
+ * Broadcom AMBA Interconnect definitions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: aidmp.h 404499 2013-05-28 01:06:37Z $
+ */
+
+#ifndef	_AIDMP_H
+#define	_AIDMP_H
+
+/* Manufacturer Ids */
+#define	MFGID_ARM		0x43b
+#define	MFGID_BRCM		0x4bf
+#define	MFGID_MIPS		0x4a7
+
+/* Component Classes */
+#define	CC_SIM			0
+#define	CC_EROM			1
+#define	CC_CORESIGHT		9
+#define	CC_VERIF		0xb
+#define	CC_OPTIMO		0xd
+#define	CC_GEN			0xe
+#define	CC_PRIMECELL		0xf
+
+/* Enumeration ROM registers */
+#define	ER_EROMENTRY		0x000
+#define	ER_REMAPCONTROL		0xe00
+#define	ER_REMAPSELECT		0xe04
+#define	ER_MASTERSELECT		0xe10
+#define	ER_ITCR			0xf00
+#define	ER_ITIP			0xf04
+
+/* Erom entries */
+#define	ER_TAG			0xe
+#define	ER_TAG1			0x6
+#define	ER_VALID		1
+#define	ER_CI			0
+#define	ER_MP			2
+#define	ER_ADD			4
+#define	ER_END			0xe
+#define	ER_BAD			0xffffffff
+
+/* EROM CompIdentA */
+#define	CIA_MFG_MASK		0xfff00000
+#define	CIA_MFG_SHIFT		20
+#define	CIA_CID_MASK		0x000fff00
+#define	CIA_CID_SHIFT		8
+#define	CIA_CCL_MASK		0x000000f0
+#define	CIA_CCL_SHIFT		4
+
+/* EROM CompIdentB */
+#define	CIB_REV_MASK		0xff000000
+#define	CIB_REV_SHIFT		24
+#define	CIB_NSW_MASK		0x00f80000
+#define	CIB_NSW_SHIFT		19
+#define	CIB_NMW_MASK		0x0007c000
+#define	CIB_NMW_SHIFT		14
+#define	CIB_NSP_MASK		0x00003e00
+#define	CIB_NSP_SHIFT		9
+#define	CIB_NMP_MASK		0x000001f0
+#define	CIB_NMP_SHIFT		4
+
+/* EROM MasterPortDesc */
+#define	MPD_MUI_MASK		0x0000ff00
+#define	MPD_MUI_SHIFT		8
+#define	MPD_MP_MASK		0x000000f0
+#define	MPD_MP_SHIFT		4
+
+/* EROM AddrDesc */
+#define	AD_ADDR_MASK		0xfffff000
+#define	AD_SP_MASK		0x00000f00
+#define	AD_SP_SHIFT		8
+#define	AD_ST_MASK		0x000000c0
+#define	AD_ST_SHIFT		6
+#define	AD_ST_SLAVE		0x00000000
+#define	AD_ST_BRIDGE		0x00000040
+#define	AD_ST_SWRAP		0x00000080
+#define	AD_ST_MWRAP		0x000000c0
+#define	AD_SZ_MASK		0x00000030
+#define	AD_SZ_SHIFT		4
+#define	AD_SZ_4K		0x00000000
+#define	AD_SZ_8K		0x00000010
+#define	AD_SZ_16K		0x00000020
+#define	AD_SZ_SZD		0x00000030
+#define	AD_AG32			0x00000008
+#define	AD_ADDR_ALIGN		0x00000fff
+#define	AD_SZ_BASE		0x00001000	/* 4KB */
+
+/* EROM SizeDesc */
+#define	SD_SZ_MASK		0xfffff000
+#define	SD_SG32			0x00000008
+#define	SD_SZ_ALIGN		0x00000fff
+
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _aidmp {
+	uint32	oobselina30;	/* 0x000 */
+	uint32	oobselina74;	/* 0x004 */
+	uint32	PAD[6];
+	uint32	oobselinb30;	/* 0x020 */
+	uint32	oobselinb74;	/* 0x024 */
+	uint32	PAD[6];
+	uint32	oobselinc30;	/* 0x040 */
+	uint32	oobselinc74;	/* 0x044 */
+	uint32	PAD[6];
+	uint32	oobselind30;	/* 0x060 */
+	uint32	oobselind74;	/* 0x064 */
+	uint32	PAD[38];
+	uint32	oobselouta30;	/* 0x100 */
+	uint32	oobselouta74;	/* 0x104 */
+	uint32	PAD[6];
+	uint32	oobseloutb30;	/* 0x120 */
+	uint32	oobseloutb74;	/* 0x124 */
+	uint32	PAD[6];
+	uint32	oobseloutc30;	/* 0x140 */
+	uint32	oobseloutc74;	/* 0x144 */
+	uint32	PAD[6];
+	uint32	oobseloutd30;	/* 0x160 */
+	uint32	oobseloutd74;	/* 0x164 */
+	uint32	PAD[38];
+	uint32	oobsynca;	/* 0x200 */
+	uint32	oobseloutaen;	/* 0x204 */
+	uint32	PAD[6];
+	uint32	oobsyncb;	/* 0x220 */
+	uint32	oobseloutben;	/* 0x224 */
+	uint32	PAD[6];
+	uint32	oobsyncc;	/* 0x240 */
+	uint32	oobseloutcen;	/* 0x244 */
+	uint32	PAD[6];
+	uint32	oobsyncd;	/* 0x260 */
+	uint32	oobseloutden;	/* 0x264 */
+	uint32	PAD[38];
+	uint32	oobaextwidth;	/* 0x300 */
+	uint32	oobainwidth;	/* 0x304 */
+	uint32	oobaoutwidth;	/* 0x308 */
+	uint32	PAD[5];
+	uint32	oobbextwidth;	/* 0x320 */
+	uint32	oobbinwidth;	/* 0x324 */
+	uint32	oobboutwidth;	/* 0x328 */
+	uint32	PAD[5];
+	uint32	oobcextwidth;	/* 0x340 */
+	uint32	oobcinwidth;	/* 0x344 */
+	uint32	oobcoutwidth;	/* 0x348 */
+	uint32	PAD[5];
+	uint32	oobdextwidth;	/* 0x360 */
+	uint32	oobdinwidth;	/* 0x364 */
+	uint32	oobdoutwidth;	/* 0x368 */
+	uint32	PAD[37];
+	uint32	ioctrlset;	/* 0x400 */
+	uint32	ioctrlclear;	/* 0x404 */
+	uint32	ioctrl;		/* 0x408 */
+	uint32	PAD[61];
+	uint32	iostatus;	/* 0x500 */
+	uint32	PAD[127];
+	uint32	ioctrlwidth;	/* 0x700 */
+	uint32	iostatuswidth;	/* 0x704 */
+	uint32	PAD[62];
+	uint32	resetctrl;	/* 0x800 */
+	uint32	resetstatus;	/* 0x804 */
+	uint32	resetreadid;	/* 0x808 */
+	uint32	resetwriteid;	/* 0x80c */
+	uint32	PAD[60];
+	uint32	errlogctrl;	/* 0x900 */
+	uint32	errlogdone;	/* 0x904 */
+	uint32	errlogstatus;	/* 0x908 */
+	uint32	errlogaddrlo;	/* 0x90c */
+	uint32	errlogaddrhi;	/* 0x910 */
+	uint32	errlogid;	/* 0x914 */
+	uint32	errloguser;	/* 0x918 */
+	uint32	errlogflags;	/* 0x91c */
+	uint32	PAD[56];
+	uint32	intstatus;	/* 0xa00 */
+	uint32	PAD[255];
+	uint32	config;		/* 0xe00 */
+	uint32	PAD[63];
+	uint32	itcr;		/* 0xf00 */
+	uint32	PAD[3];
+	uint32	itipooba;	/* 0xf10 */
+	uint32	itipoobb;	/* 0xf14 */
+	uint32	itipoobc;	/* 0xf18 */
+	uint32	itipoobd;	/* 0xf1c */
+	uint32	PAD[4];
+	uint32	itipoobaout;	/* 0xf30 */
+	uint32	itipoobbout;	/* 0xf34 */
+	uint32	itipoobcout;	/* 0xf38 */
+	uint32	itipoobdout;	/* 0xf3c */
+	uint32	PAD[4];
+	uint32	itopooba;	/* 0xf50 */
+	uint32	itopoobb;	/* 0xf54 */
+	uint32	itopoobc;	/* 0xf58 */
+	uint32	itopoobd;	/* 0xf5c */
+	uint32	PAD[4];
+	uint32	itopoobain;	/* 0xf70 */
+	uint32	itopoobbin;	/* 0xf74 */
+	uint32	itopoobcin;	/* 0xf78 */
+	uint32	itopoobdin;	/* 0xf7c */
+	uint32	PAD[4];
+	uint32	itopreset;	/* 0xf90 */
+	uint32	PAD[15];
+	uint32	peripherialid4;	/* 0xfd0 */
+	uint32	peripherialid5;	/* 0xfd4 */
+	uint32	peripherialid6;	/* 0xfd8 */
+	uint32	peripherialid7;	/* 0xfdc */
+	uint32	peripherialid0;	/* 0xfe0 */
+	uint32	peripherialid1;	/* 0xfe4 */
+	uint32	peripherialid2;	/* 0xfe8 */
+	uint32	peripherialid3;	/* 0xfec */
+	uint32	componentid0;	/* 0xff0 */
+	uint32	componentid1;	/* 0xff4 */
+	uint32	componentid2;	/* 0xff8 */
+	uint32	componentid3;	/* 0xffc */
+} aidmp_t;
+
+#endif /* _LANGUAGE_ASSEMBLY */
+
+/* Out-of-band Router registers */
+#define	OOB_BUSCONFIG		0x020
+#define	OOB_STATUSA		0x100
+#define	OOB_STATUSB		0x104
+#define	OOB_STATUSC		0x108
+#define	OOB_STATUSD		0x10c
+#define	OOB_ENABLEA0		0x200
+#define	OOB_ENABLEA1		0x204
+#define	OOB_ENABLEA2		0x208
+#define	OOB_ENABLEA3		0x20c
+#define	OOB_ENABLEB0		0x280
+#define	OOB_ENABLEB1		0x284
+#define	OOB_ENABLEB2		0x288
+#define	OOB_ENABLEB3		0x28c
+#define	OOB_ENABLEC0		0x300
+#define	OOB_ENABLEC1		0x304
+#define	OOB_ENABLEC2		0x308
+#define	OOB_ENABLEC3		0x30c
+#define	OOB_ENABLED0		0x380
+#define	OOB_ENABLED1		0x384
+#define	OOB_ENABLED2		0x388
+#define	OOB_ENABLED3		0x38c
+#define	OOB_ITCR		0xf00
+#define	OOB_ITIPOOBA		0xf10
+#define	OOB_ITIPOOBB		0xf14
+#define	OOB_ITIPOOBC		0xf18
+#define	OOB_ITIPOOBD		0xf1c
+#define	OOB_ITOPOOBA		0xf30
+#define	OOB_ITOPOOBB		0xf34
+#define	OOB_ITOPOOBC		0xf38
+#define	OOB_ITOPOOBD		0xf3c
+
+/* DMP wrapper registers */
+#define	AI_OOBSELINA30		0x000
+#define	AI_OOBSELINA74		0x004
+#define	AI_OOBSELINB30		0x020
+#define	AI_OOBSELINB74		0x024
+#define	AI_OOBSELINC30		0x040
+#define	AI_OOBSELINC74		0x044
+#define	AI_OOBSELIND30		0x060
+#define	AI_OOBSELIND74		0x064
+#define	AI_OOBSELOUTA30		0x100
+#define	AI_OOBSELOUTA74		0x104
+#define	AI_OOBSELOUTB30		0x120
+#define	AI_OOBSELOUTB74		0x124
+#define	AI_OOBSELOUTC30		0x140
+#define	AI_OOBSELOUTC74		0x144
+#define	AI_OOBSELOUTD30		0x160
+#define	AI_OOBSELOUTD74		0x164
+#define	AI_OOBSYNCA		0x200
+#define	AI_OOBSELOUTAEN		0x204
+#define	AI_OOBSYNCB		0x220
+#define	AI_OOBSELOUTBEN		0x224
+#define	AI_OOBSYNCC		0x240
+#define	AI_OOBSELOUTCEN		0x244
+#define	AI_OOBSYNCD		0x260
+#define	AI_OOBSELOUTDEN		0x264
+#define	AI_OOBAEXTWIDTH		0x300
+#define	AI_OOBAINWIDTH		0x304
+#define	AI_OOBAOUTWIDTH		0x308
+#define	AI_OOBBEXTWIDTH		0x320
+#define	AI_OOBBINWIDTH		0x324
+#define	AI_OOBBOUTWIDTH		0x328
+#define	AI_OOBCEXTWIDTH		0x340
+#define	AI_OOBCINWIDTH		0x344
+#define	AI_OOBCOUTWIDTH		0x348
+#define	AI_OOBDEXTWIDTH		0x360
+#define	AI_OOBDINWIDTH		0x364
+#define	AI_OOBDOUTWIDTH		0x368
+
+
+#define	AI_IOCTRLSET		0x400
+#define	AI_IOCTRLCLEAR		0x404
+#define	AI_IOCTRL		0x408
+#define	AI_IOSTATUS		0x500
+#define	AI_RESETCTRL		0x800
+#define	AI_RESETSTATUS		0x804
+
+#define	AI_IOCTRLWIDTH		0x700
+#define	AI_IOSTATUSWIDTH	0x704
+
+#define	AI_RESETREADID		0x808
+#define	AI_RESETWRITEID		0x80c
+#define	AI_ERRLOGCTRL		0xa00
+#define	AI_ERRLOGDONE		0xa04
+#define	AI_ERRLOGSTATUS		0xa08
+#define	AI_ERRLOGADDRLO		0xa0c
+#define	AI_ERRLOGADDRHI		0xa10
+#define	AI_ERRLOGID		0xa14
+#define	AI_ERRLOGUSER		0xa18
+#define	AI_ERRLOGFLAGS		0xa1c
+#define	AI_INTSTATUS		0xa00
+#define	AI_CONFIG		0xe00
+#define	AI_ITCR			0xf00
+#define	AI_ITIPOOBA		0xf10
+#define	AI_ITIPOOBB		0xf14
+#define	AI_ITIPOOBC		0xf18
+#define	AI_ITIPOOBD		0xf1c
+#define	AI_ITIPOOBAOUT		0xf30
+#define	AI_ITIPOOBBOUT		0xf34
+#define	AI_ITIPOOBCOUT		0xf38
+#define	AI_ITIPOOBDOUT		0xf3c
+#define	AI_ITOPOOBA		0xf50
+#define	AI_ITOPOOBB		0xf54
+#define	AI_ITOPOOBC		0xf58
+#define	AI_ITOPOOBD		0xf5c
+#define	AI_ITOPOOBAIN		0xf70
+#define	AI_ITOPOOBBIN		0xf74
+#define	AI_ITOPOOBCIN		0xf78
+#define	AI_ITOPOOBDIN		0xf7c
+#define	AI_ITOPRESET		0xf90
+#define	AI_PERIPHERIALID4	0xfd0
+#define	AI_PERIPHERIALID5	0xfd4
+#define	AI_PERIPHERIALID6	0xfd8
+#define	AI_PERIPHERIALID7	0xfdc
+#define	AI_PERIPHERIALID0	0xfe0
+#define	AI_PERIPHERIALID1	0xfe4
+#define	AI_PERIPHERIALID2	0xfe8
+#define	AI_PERIPHERIALID3	0xfec
+#define	AI_COMPONENTID0		0xff0
+#define	AI_COMPONENTID1		0xff4
+#define	AI_COMPONENTID2		0xff8
+#define	AI_COMPONENTID3		0xffc
+
+/* resetctrl */
+#define	AIRC_RESET		1
+
+/* config */
+#define	AICFG_OOB		0x00000020
+#define	AICFG_IOS		0x00000010
+#define	AICFG_IOC		0x00000008
+#define	AICFG_TO		0x00000004
+#define	AICFG_ERRL		0x00000002
+#define	AICFG_RST		0x00000001
+
+/* bit defines for AI_OOBSELOUTB74 reg */
+#define OOB_SEL_OUTEN_B_5	15
+#define OOB_SEL_OUTEN_B_6	23
+
+/* AI_OOBSEL for A/B/C/D, 0-7 */
+#define AI_OOBSEL_MASK		0x1F
+#define AI_OOBSEL_0_SHIFT	0
+#define AI_OOBSEL_1_SHIFT	8
+#define AI_OOBSEL_2_SHIFT	16
+#define AI_OOBSEL_3_SHIFT	24
+#define AI_OOBSEL_4_SHIFT	0
+#define AI_OOBSEL_5_SHIFT	8
+#define AI_OOBSEL_6_SHIFT	16
+#define AI_OOBSEL_7_SHIFT	24
+
+#endif	/* _AIDMP_H */
diff --git a/drivers/staging/edison-bcm43340/include/bcm_cfg.h b/drivers/staging/edison-bcm43340/include/bcm_cfg.h
new file mode 100644
index 0000000..fa2db7c
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcm_cfg.h
@@ -0,0 +1,29 @@
+/*
+ * BCM common config options
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcm_cfg.h 351867 2012-08-21 18:46:16Z $
+ */
+
+#ifndef _bcm_cfg_h_
+#define _bcm_cfg_h_
+#endif /* _bcm_cfg_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcm_mpool_pub.h b/drivers/staging/edison-bcm43340/include/bcm_mpool_pub.h
new file mode 100644
index 0000000..ee06f3b
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcm_mpool_pub.h
@@ -0,0 +1,361 @@
+/*
+ * Memory pools library, Public interface
+ *
+ * API Overview
+ *
+ * This package provides a memory allocation subsystem based on pools of
+ * homogenous objects.
+ *
+ * Instrumentation is available for reporting memory utilization both
+ * on a per-data-structure basis and system wide.
+ *
+ * There are two main types defined in this API.
+ *
+ *    pool manager: A singleton object that acts as a factory for
+ *                  pool allocators. It also is used for global
+ *                  instrumentation, such as reporting all blocks
+ *                  in use across all data structures. The pool manager
+ *                  creates and provides individual memory pools
+ *                  upon request to application code.
+ *
+ *    memory pool:  An object for allocating homogenous memory blocks.
+ *
+ * Global identifiers in this module use the following prefixes:
+ *    bcm_mpm_*     Memory pool manager
+ *    bcm_mp_*      Memory pool
+ *
+ * There are two main types of memory pools:
+ *
+ *    prealloc: The contiguous memory block of objects can either be supplied
+ *              by the client or malloc'ed by the memory manager. The objects are
+ *              allocated out of a block of memory and freed back to the block.
+ *
+ *    heap:     The memory pool allocator uses the heap (malloc/free) for memory.
+ *              In this case, the pool allocator is just providing statistics
+ *              and instrumentation on top of the heap, without modifying the heap
+ *              allocation implementation.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcm_mpool_pub.h 407097 2013-06-11 18:43:16Z $
+ */
+
+#ifndef _BCM_MPOOL_PUB_H
+#define _BCM_MPOOL_PUB_H 1
+
+#include <typedefs.h> /* needed for uint16 */
+
+
+/*
+**************************************************************************
+*
+* Type definitions, handles
+*
+**************************************************************************
+*/
+
+/* Forward declaration of OSL handle. */
+struct osl_info;
+
+/* Forward declaration of string buffer. */
+struct bcmstrbuf;
+
+/*
+ * Opaque type definition for the pool manager handle. This object is used for global
+ * memory pool operations such as obtaining a new pool, deleting a pool, iterating and
+ * instrumentation/debugging.
+ */
+struct bcm_mpm_mgr;
+typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h;
+
+/*
+ * Opaque type definition for an instance of a pool. This handle is used for allocating
+ * and freeing memory through the pool, as well as management/instrumentation on this
+ * specific pool.
+ */
+struct bcm_mp_pool;
+typedef struct bcm_mp_pool *bcm_mp_pool_h;
+
+
+/*
+ * To make instrumentation more readable, every memory
+ * pool must have a readable name. Pool names are up to
+ * 8 bytes including '\0' termination. (7 printable characters.)
+ */
+#define BCM_MP_NAMELEN 8
+
+
+/*
+ * Type definition for pool statistics.
+ */
+typedef struct bcm_mp_stats {
+	char name[BCM_MP_NAMELEN];  /* Name of this pool. */
+	unsigned int objsz;         /* Object size allocated in this pool */
+	uint16 nobj;                /* Total number of objects in this pool */
+	uint16 num_alloc;           /* Number of objects currently allocated */
+	uint16 high_water;          /* Max number of allocated objects. */
+	uint16 failed_alloc;        /* Failed allocations. */
+} bcm_mp_stats_t;
+
+
+/*
+**************************************************************************
+*
+* API Routines on the pool manager.
+*
+**************************************************************************
+*/
+
+/*
+ * bcm_mpm_init() - initialize the whole memory pool system.
+ *
+ * Parameters:
+ *    osh:       INPUT  Operating system handle. Needed for heap memory allocation.
+ *    max_pools: INPUT Maximum number of mempools supported.
+ *    mgr:       OUTPUT The handle is written with the new pools manager object/handle.
+ *
+ * Returns:
+ *    BCME_OK     Object initialized successfully. May be used.
+ *    BCME_NOMEM  Initialization failed due to no memory. Object must not be used.
+ */
+int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp);
+
+
+/*
+ * bcm_mpm_deinit() - de-initialize the whole memory pool system.
+ *
+ * Parameters:
+ *    mgr:     INPUT  Pointer to pool manager handle.
+ *
+ * Returns:
+ *    BCME_OK  Memory pool manager successfully de-initialized.
+ *    other    Indicated error occured during de-initialization.
+ */
+int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp);
+
+/*
+ * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The
+ *                                  pool uses a contiguous block of pre-alloced
+ *                                  memory. The memory block may either be provided
+ *                                  by the client or dynamically allocated by the
+ *                                  pool manager.
+ *
+ * Parameters:
+ *    mgr:      INPUT  The handle to the pool manager
+ *    obj_sz:   INPUT  Size of objects that will be allocated by the new pool
+ *                     Must be >= sizeof(void *).
+ *    nobj:     INPUT  Maximum number of concurrently existing objects to support
+ *    memstart  INPUT  Pointer to the memory to use, or NULL to malloc()
+ *    memsize   INPUT  Number of bytes referenced from memstart (for error checking).
+ *                     Must be 0 if 'memstart' is NULL.
+ *    poolname  INPUT  For instrumentation, the name of the pool
+ *    newp:     OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ *    BCME_OK   Pool created ok.
+ *    other     Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr,
+                                 unsigned int obj_sz,
+                                 int nobj,
+                                 void *memstart,
+                                 unsigned int memsize,
+                                 const char poolname[BCM_MP_NAMELEN],
+                                 bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after
+ *                                  all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ *    mgr:     INPUT The handle to the pools manager
+ *    pool:    INPUT The handle of the  pool to delete
+ *
+ * Returns:
+ *    BCME_OK   Pool deleted ok.
+ *    other     Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+/*
+ * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory
+ *                              pool allocator uses the heap (malloc/free) for memory.
+ *                              In this case, the pool allocator is just providing
+ *                              statistics and instrumentation on top of the heap,
+ *                              without modifying the heap allocation implementation.
+ *
+ * Parameters:
+ *    mgr:      INPUT  The handle to the pool manager
+ *    obj_sz:   INPUT  Size of objects that will be allocated by the new pool
+ *    poolname  INPUT  For instrumentation, the name of the pool
+ *    newp:     OUTPUT The handle for the new pool, if creation is successful
+ *
+ * Returns:
+ *    BCME_OK   Pool created ok.
+ *    other     Pool not created due to indicated error. newpoolp set to NULL.
+ *
+ *
+ */
+int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz,
+                             const char poolname[BCM_MP_NAMELEN],
+                             bcm_mp_pool_h *newp);
+
+
+/*
+ * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after
+ *                              all memory objects have been freed back to the pool.
+ *
+ * Parameters:
+ *    mgr:     INPUT The handle to the pools manager
+ *    pool:    INPUT The handle of the  pool to delete
+ *
+ * Returns:
+ *    BCME_OK   Pool deleted ok.
+ *    other     Pool not deleted due to indicated error.
+ *
+ */
+int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
+
+/*
+ * bcm_mpm_stats() - Return stats for all pools
+ *
+ * Parameters:
+ *    mgr:         INPUT   The handle to the pools manager
+ *    stats:       OUTPUT  Array of pool statistics.
+ *    nentries:    MOD     Max elements in 'stats' array on INPUT. Actual number
+ *                         of array elements copied to 'stats' on OUTPUT.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error getting stats.
+ *
+ */
+int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries);
+
+
+/*
+ * bcm_mpm_dump() - Display statistics on all pools
+ *
+ * Parameters:
+ *    mgr:     INPUT  The handle to the pools manager
+ *    b:       OUTPUT Output buffer.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during dump.
+ *
+ */
+int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b);
+
+
+/*
+ * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to
+ *                          compensate for alignment requirements of the objects.
+ *                          This function provides the padded object size. If clients
+ *                          pre-allocate a memory slab for a memory pool, the
+ *                          padded object size should be used by the client to allocate
+ *                          the memory slab (in order to provide sufficent space for
+ *                          the maximum number of objects).
+ *
+ * Parameters:
+ *    mgr:            INPUT   The handle to the pools manager.
+ *    obj_sz:         INPUT   Input object size.
+ *    padded_obj_sz:  OUTPUT  Padded object size.
+ *
+ * Returns:
+ *    BCME_OK      Ok
+ *    BCME_BADARG  Bad arguments.
+ *
+ */
+int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz);
+
+
+/*
+***************************************************************************
+*
+* API Routines on a specific pool.
+*
+***************************************************************************
+*/
+
+
+/*
+ * bcm_mp_alloc() - Allocate a memory pool object.
+ *
+ * Parameters:
+ *    pool:    INPUT    The handle to the pool.
+ *
+ * Returns:
+ *    A pointer to the new object. NULL on error.
+ *
+ */
+void* bcm_mp_alloc(bcm_mp_pool_h pool);
+
+/*
+ * bcm_mp_free() - Free a memory pool object.
+ *
+ * Parameters:
+ *    pool:  INPUT   The handle to the pool.
+ *    objp:  INPUT   A pointer to the object to free.
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during free.
+ *
+ */
+int bcm_mp_free(bcm_mp_pool_h pool, void *objp);
+
+/*
+ * bcm_mp_stats() - Return stats for this pool
+ *
+ * Parameters:
+ *    pool:     INPUT    The handle to the pool
+ *    stats:    OUTPUT   Pool statistics
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error getting statistics.
+ *
+ */
+int bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats);
+
+
+/*
+ * bcm_mp_dump() - Dump a pool
+ *
+ * Parameters:
+ *    pool:    INPUT    The handle to the pool
+ *    b        OUTPUT   Output buffer
+ *
+ * Returns:
+ *    BCME_OK   Ok
+ *    other     Error during dump.
+ *
+ */
+int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b);
+
+
+#endif /* _BCM_MPOOL_PUB_H */
diff --git a/drivers/staging/edison-bcm43340/include/bcmcdc.h b/drivers/staging/edison-bcm43340/include/bcmcdc.h
new file mode 100644
index 0000000..1028bb3
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmcdc.h
@@ -0,0 +1,132 @@
+/*
+ * CDC network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmcdc.h 318308 2012-03-02 02:23:42Z $
+ */
+#ifndef _bcmcdc_h_
+#define	_bcmcdc_h_
+#include <proto/ethernet.h>
+
+typedef struct cdc_ioctl {
+	uint32 cmd;      /* ioctl command value */
+	uint32 len;      /* lower 16: output buflen; upper 16: input buflen (excludes header) */
+	uint32 flags;    /* flag defns given below */
+	uint32 status;   /* status code returned from the device */
+} cdc_ioctl_t;
+
+/* Max valid buffer size that can be sent to the dongle */
+#define CDC_MAX_MSG_SIZE   ETHER_MAX_LEN
+
+/* len field is divided into input and output buffer lengths */
+#define CDCL_IOC_OUTLEN_MASK   0x0000FFFF  /* maximum or expected response length, */
+					   /* excluding IOCTL header */
+#define CDCL_IOC_OUTLEN_SHIFT  0
+#define CDCL_IOC_INLEN_MASK    0xFFFF0000   /* input buffer length, excluding IOCTL header */
+#define CDCL_IOC_INLEN_SHIFT   16
+
+/* CDC flag definitions */
+#define CDCF_IOC_ERROR		0x01	/* 0=success, 1=ioctl cmd failed */
+#define CDCF_IOC_SET		0x02	/* 0=get, 1=set cmd */
+#define CDCF_IOC_OVL_IDX_MASK	0x3c	/* overlay region index mask */
+#define CDCF_IOC_OVL_RSV	0x40	/* 1=reserve this overlay region */
+#define CDCF_IOC_OVL		0x80	/* 1=this ioctl corresponds to an overlay */
+#define CDCF_IOC_ACTION_MASK	0xfe	/* SET/GET, OVL_IDX, OVL_RSV, OVL mask */
+#define CDCF_IOC_ACTION_SHIFT	1	/* SET/GET, OVL_IDX, OVL_RSV, OVL shift */
+#define CDCF_IOC_IF_MASK	0xF000	/* I/F index */
+#define CDCF_IOC_IF_SHIFT	12
+#define CDCF_IOC_ID_MASK	0xFFFF0000	/* used to uniquely id an ioctl req/resp pairing */
+#define CDCF_IOC_ID_SHIFT	16		/* # of bits of shift for ID Mask */
+
+#define CDC_IOC_IF_IDX(flags)	(((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
+#define CDC_IOC_ID(flags)	(((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+
+#define CDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
+#define CDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
+
+/*
+ * BDC header
+ *
+ *   The BDC header is used on data packets to convey priority across USB.
+ */
+
+struct bdc_header {
+	uint8	flags;			/* Flags */
+	uint8	priority;		/* 802.1d Priority 0:2 bits, 4:7 USB flow control info */
+	uint8	flags2;
+	uint8	dataOffset;		/* Offset from end of BDC header to packet data, in
+					 * 4-byte words.  Leaves room for optional headers.
+					 */
+};
+
+#define	BDC_HEADER_LEN		4
+
+/* flags field bitmap */
+#define BDC_FLAG_80211_PKT	0x01	/* Packet is in 802.11 format (dongle -> host) */
+#define BDC_FLAG_SUM_GOOD	0x04	/* Dongle has verified good RX checksums */
+#define BDC_FLAG_SUM_NEEDED	0x08	/* Dongle needs to do TX checksums: host->device */
+#define BDC_FLAG_EVENT_MSG	0x08	/* Payload contains an event msg: device->host */
+#define BDC_FLAG_VER_MASK	0xf0	/* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT	4	/* Protocol version shift */
+
+/* priority field bitmap */
+#define BDC_PRIORITY_MASK	0x07
+#define BDC_PRIORITY_FC_MASK	0xf0	/* flow control info mask */
+#define BDC_PRIORITY_FC_SHIFT	4	/* flow control info shift */
+
+/* flags2 field bitmap */
+#define BDC_FLAG2_IF_MASK	0x0f	/* interface index (host <-> dongle) */
+#define BDC_FLAG2_IF_SHIFT	0
+#define BDC_FLAG2_FC_FLAG	0x10	/* flag to indicate if pkt contains */
+					/* FLOW CONTROL info only */
+
+/* version numbers */
+#define BDC_PROTO_VER_1		1	/* Old Protocol version */
+#define BDC_PROTO_VER		2	/* Protocol version */
+
+/* flags2.if field access macros */
+#define BDC_GET_IF_IDX(hdr) \
+	((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+#define BDC_FLAG2_PAD_MASK		0xf0
+#define BDC_FLAG_PAD_MASK		0x03
+#define BDC_FLAG2_PAD_SHIFT		2
+#define BDC_FLAG_PAD_SHIFT		0
+#define BDC_FLAG2_PAD_IDX		0x3c
+#define BDC_FLAG_PAD_IDX		0x03
+#define BDC_GET_PAD_LEN(hdr) \
+	((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \
+	((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT)))
+#define BDC_SET_PAD_LEN(hdr, idx) \
+	((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \
+	(((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \
+	((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
+	(((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
+
+#endif /* _bcmcdc_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmdefs.h b/drivers/staging/edison-bcm43340/include/bcmdefs.h
new file mode 100644
index 0000000..1569fa5
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmdefs.h
@@ -0,0 +1,351 @@
+/*
+ * Misc system wide definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdefs.h 433011 2013-10-30 09:19:54Z $
+ */
+
+#ifndef	_bcmdefs_h_
+#define	_bcmdefs_h_
+
+/*
+ * One doesn't need to include this file explicitly, gets included automatically if
+ * typedefs.h is included.
+ */
+
+/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function
+ * arguments or local variables.
+ */
+#define BCM_REFERENCE(data)	((void)(data))
+
+/* Allow for suppressing unused variable warnings. */
+#ifdef __GNUC__
+#define UNUSED_VAR     __attribute__ ((unused))
+#else
+#define UNUSED_VAR
+#endif
+
+/* Compile-time assert can be used in place of ASSERT if the expression evaluates
+ * to a constant at compile time.
+ */
+#define STATIC_ASSERT(expr) { \
+	/* Make sure the expression is constant. */ \
+	typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \
+	/* Make sure the expression is true. */ \
+	typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \
+}
+
+/* Reclaiming text and data :
+ * The following macros specify special linker sections that can be reclaimed
+ * after a system is considered 'up'.
+ * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN,
+ * as in most cases, the attach function calls the detach function to clean up on error).
+ */
+
+#define bcmreclaimed 		0
+#define _data	_data
+#define _fn	_fn
+#define BCMPREATTACHDATA(_data)	_data
+#define BCMPREATTACHFN(_fn)	_fn
+#define _data	_data
+#define _fn		_fn
+#define _fn	_fn
+#define	BCMNMIATTACHFN(_fn)	_fn
+#define	BCMNMIATTACHDATA(_data)	_data
+#define CONST	const
+
+/* Do not put BCM47XX and __ARM_ARCH_7A__ to the same line.
+ * DHD build has problem because the BCM47XX will be excluded in DHD release.
+ */
+#undef BCM47XX_CA9
+
+#ifndef BCMFASTPATH
+#if defined(BCM47XX_CA9)
+#define BCMFASTPATH		__attribute__ ((__section__ (".text.fastpath")))
+#define BCMFASTPATH_HOST	__attribute__ ((__section__ (".text.fastpath_host")))
+#else
+#define BCMFASTPATH
+#define BCMFASTPATH_HOST
+#endif
+#endif /* BCMFASTPATH */
+
+
+/* Use the BCMRAMFN() macro to tag functions in source that must be included in RAM (excluded from
+ * ROM). This should eliminate the need to manually specify these functions in the ROM config file.
+ * It should only be used in special cases where the function must be in RAM for *all* ROM-based
+ * chips.
+ */
+	#define BCMRAMFN(_fn)	_fn
+
+
+
+/* Put some library data/code into ROM to reduce RAM requirements */
+#define _data	_data
+#define BCMROMDAT_NAME(_data)	_data
+#define _fn		_fn
+#define _fn	_fn
+#define STATIC	static
+#define BCMROMDAT_ARYSIZ(data)	ARRAYSIZE(data)
+#define BCMROMDAT_SIZEOF(data)	sizeof(data)
+#define BCMROMDAT_APATCH(data)
+#define BCMROMDAT_SPATCH(data)
+
+/* Bus types */
+#define	SI_BUS			0	/* SOC Interconnect */
+#define	PCI_BUS			1	/* PCI target */
+#define	PCMCIA_BUS		2	/* PCMCIA target */
+#define SDIO_BUS		3	/* SDIO target */
+#define JTAG_BUS		4	/* JTAG */
+#define USB_BUS			5	/* USB (does not support R/W REG) */
+#define SPI_BUS			6	/* gSPI target */
+#define RPC_BUS			7	/* RPC target */
+
+/* Allows size optimization for single-bus image */
+#ifdef BCMBUSTYPE
+#define BUSTYPE(bus) 	(BCMBUSTYPE)
+#else
+#define BUSTYPE(bus) 	(bus)
+#endif
+
+/* Allows size optimization for single-backplane image */
+#ifdef BCMCHIPTYPE
+#define CHIPTYPE(bus) 	(BCMCHIPTYPE)
+#else
+#define CHIPTYPE(bus) 	(bus)
+#endif
+
+
+/* Allows size optimization for SPROM support */
+#if defined(BCMSPROMBUS)
+#define SPROMBUS	(BCMSPROMBUS)
+#elif defined(SI_PCMCIA_SROM)
+#define SPROMBUS	(PCMCIA_BUS)
+#else
+#define SPROMBUS	(PCI_BUS)
+#endif
+
+/* Allows size optimization for single-chip image */
+#ifdef BCMCHIPID
+#define CHIPID(chip)	(BCMCHIPID)
+#else
+#define CHIPID(chip)	(chip)
+#endif
+
+#ifdef BCMCHIPREV
+#define CHIPREV(rev)	(BCMCHIPREV)
+#else
+#define CHIPREV(rev)	(rev)
+#endif
+
+/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
+#define DMADDR_MASK_32 0x0		/* Address mask for 32-bits */
+#define DMADDR_MASK_30 0xc0000000	/* Address mask for 30-bits */
+#define DMADDR_MASK_0  0xffffffff	/* Address mask for 0-bits (hi-part) */
+
+#define	DMADDRWIDTH_30  30 /* 30-bit addressing capability */
+#define	DMADDRWIDTH_32  32 /* 32-bit addressing capability */
+#define	DMADDRWIDTH_63  63 /* 64-bit addressing capability */
+#define	DMADDRWIDTH_64  64 /* 64-bit addressing capability */
+
+typedef struct {
+	uint32 loaddr;
+	uint32 hiaddr;
+} dma64addr_t;
+
+#define PHYSADDR64HI(_pa) ((_pa).hiaddr)
+#define PHYSADDR64HISET(_pa, _val) \
+	do { \
+		(_pa).hiaddr = (_val);		\
+	} while (0)
+#define PHYSADDR64LO(_pa) ((_pa).loaddr)
+#define PHYSADDR64LOSET(_pa, _val) \
+	do { \
+		(_pa).loaddr = (_val);		\
+	} while (0)
+
+#ifdef BCMDMA64OSL
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa)
+#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val)
+#define PHYSADDRLO(_pa)  PHYSADDR64LO(_pa)
+#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val)
+
+#else
+typedef unsigned long dmaaddr_t;
+#define PHYSADDRHI(_pa) (0)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+	do { \
+		(_pa) = (_val);			\
+	} while (0)
+#endif /* BCMDMA64OSL */
+
+/* One physical DMA segment */
+typedef struct  {
+	dmaaddr_t addr;
+	uint32	  length;
+} hnddma_seg_t;
+
+#define MAX_DMA_SEGS 8
+
+
+typedef struct {
+	void *oshdmah; /* Opaque handle for OSL to store its information */
+	uint origsize; /* Size of the virtual packet */
+	uint nsegs;
+	hnddma_seg_t segs[MAX_DMA_SEGS];
+} hnddma_seg_map_t;
+
+
+/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
+ * By doing, we avoid the need  to allocate an extra buffer for the header when bridging to WL.
+ * There is a compile time check in wlc.c which ensure that this value is at least as big
+ * as TXOFF. This value is used in dma_rxfill (hnddma.c).
+ */
+
+#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY)
+/* add 40 bytes to allow for extra RPC header and info  */
+#define BCMEXTRAHDROOM 260
+#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
+#if defined(BCM47XX_CA9)
+#define BCMEXTRAHDROOM 224
+#else
+#define BCMEXTRAHDROOM 204
+#endif /* linux && BCM47XX_CA9 */
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef SDALIGN
+#define SDALIGN	32
+#endif
+
+/* Headroom required for dongle-to-host communication.  Packets allocated
+ * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should
+ * leave this much room in front for low-level message headers which may
+ * be needed to get across the dongle bus to the host.  (These messages
+ * don't go over the network, so room for the full WL header above would
+ * be a waste.).
+*/
+#define BCMDONGLEHDRSZ 12
+#define BCMDONGLEPADSZ 16
+
+#define BCMDONGLEOVERHEAD	(BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
+
+#if defined(NO_BCMDBG_ASSERT)
+# undef BCMDBG_ASSERT
+# undef BCMASSERT_LOG
+#endif
+
+#if defined(BCMASSERT_LOG)
+#define BCMASSERT_SUPPORT
+#endif 
+
+/* Macros for doing definition and get/set of bitfields
+ * Usage example, e.g. a three-bit field (bits 4-6):
+ *    #define <NAME>_M	BITFIELD_MASK(3)
+ *    #define <NAME>_S	4
+ * ...
+ *    regval = R_REG(osh, &regs->regfoo);
+ *    field = GFIELD(regval, <NAME>);
+ *    regval = SFIELD(regval, <NAME>, 1);
+ *    W_REG(osh, &regs->regfoo, regval);
+ */
+#define BITFIELD_MASK(width) \
+		(((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+		(((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+		(((val) & (~(field ## _M << field ## _S))) | \
+		 ((unsigned)(bits) << field ## _S))
+
+/* define BCMSMALL to remove misc features for memory-constrained environments */
+#ifdef BCMSMALL
+#undef	BCMSPACE
+#define bcmspace	FALSE	/* if (bcmspace) code is discarded */
+#else
+#define	BCMSPACE
+#define bcmspace	TRUE	/* if (bcmspace) code is retained */
+#endif
+
+/* Max. nvram variable table size */
+#ifndef MAXSZ_NVRAM_VARS
+#define	MAXSZ_NVRAM_VARS	4096
+#endif
+
+
+
+/* WL_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also
+ * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles).
+ */
+
+
+#ifdef BCMLFRAG /* BCMLFRAG support enab macros  */
+	extern bool _bcmlfrag;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCMLFRAG_ENAB() (_bcmlfrag)
+	#elif defined(BCMLFRAG_DISABLED)
+		#define BCMLFRAG_ENAB()	(0)
+	#else
+		#define BCMLFRAG_ENAB()	(1)
+	#endif
+#else
+	#define BCMLFRAG_ENAB()		(0)
+#endif /* BCMLFRAG_ENAB */
+#ifdef BCMSPLITRX /* BCMLFRAG support enab macros  */
+	extern bool _bcmsplitrx;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCMSPLITRX_ENAB() (_bcmsplitrx)
+	#elif defined(BCMSPLITRX_DISABLED)
+		#define BCMSPLITRX_ENAB()	(0)
+	#else
+		#define BCMSPLITRX_ENAB()	(1)
+	#endif
+#else
+	#define BCMSPLITRX_ENAB()		(0)
+#endif /* BCMSPLITRX */
+#ifdef BCM_SPLITBUF
+	extern bool _bcmsplitbuf;
+	#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+		#define BCM_SPLITBUF_ENAB() (_bcmsplitbuf)
+	#elif defined(BCM_SPLITBUF_DISABLED)
+		#define BCM_SPLITBUF_ENAB()	(0)
+	#else
+		#define BCM_SPLITBUF_ENAB()	(1)
+	#endif
+#else
+	#define BCM_SPLITBUF_ENAB()		(0)
+#endif	/* BCM_SPLITBUF */
+/* Max size for reclaimable NVRAM array */
+#ifdef DL_NVRAM
+#define NVRAM_ARRAY_MAXSIZE	DL_NVRAM
+#else
+#define NVRAM_ARRAY_MAXSIZE	MAXSZ_NVRAM_VARS
+#endif /* DL_NVRAM */
+
+#ifdef BCMUSBDEV_ENABLED
+extern uint32 gFWID;
+#endif
+
+
+#endif /* _bcmdefs_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmdevs.h b/drivers/staging/edison-bcm43340/include/bcmdevs.h
new file mode 100644
index 0000000..5a85775
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmdevs.h
@@ -0,0 +1,688 @@
+/*
+ * Broadcom device-specific manifest constants.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmdevs.h 475454 2014-05-05 20:54:58Z $
+ */
+
+#ifndef	_BCMDEVS_H
+#define	_BCMDEVS_H
+
+/* PCI vendor IDs */
+#define	VENDOR_EPIGRAM		0xfeda
+#define	VENDOR_BROADCOM		0x14e4
+#define	VENDOR_3COM		0x10b7
+#define	VENDOR_NETGEAR		0x1385
+#define	VENDOR_DIAMOND		0x1092
+#define	VENDOR_INTEL		0x8086
+#define	VENDOR_DELL		0x1028
+#define	VENDOR_HP		0x103c
+#define	VENDOR_HP_COMPAQ	0x0e11
+#define	VENDOR_APPLE		0x106b
+#define VENDOR_SI_IMAGE		0x1095		/* Silicon Image, used by Arasan SDIO Host */
+#define VENDOR_BUFFALO		0x1154		/* Buffalo vendor id */
+#define VENDOR_TI		0x104c		/* Texas Instruments */
+#define VENDOR_RICOH		0x1180		/* Ricoh */
+#define VENDOR_JMICRON		0x197b
+
+
+/* PCMCIA vendor IDs */
+#define	VENDOR_BROADCOM_PCMCIA	0x02d0
+
+/* SDIO vendor IDs */
+#define	VENDOR_BROADCOM_SDIO	0x00BF
+
+/* DONGLE VID/PIDs */
+#define BCM_DNGL_VID		0x0a5c
+#define BCM_DNGL_BL_PID_4328	0xbd12
+#define BCM_DNGL_BL_PID_4322	0xbd13
+#define BCM_DNGL_BL_PID_4319    0xbd16
+#define BCM_DNGL_BL_PID_43236   0xbd17
+#define BCM_DNGL_BL_PID_4332	0xbd18
+#define BCM_DNGL_BL_PID_4330	0xbd19
+#define BCM_DNGL_BL_PID_4334	0xbd1a
+#define BCM_DNGL_BL_PID_43239   0xbd1b
+#define BCM_DNGL_BL_PID_4324	0xbd1c
+#define BCM_DNGL_BL_PID_4360	0xbd1d
+#define BCM_DNGL_BL_PID_43143	0xbd1e
+#define BCM_DNGL_BL_PID_43242	0xbd1f
+#define BCM_DNGL_BL_PID_43342	0xbd21
+#define BCM_DNGL_BL_PID_4335	0xbd20
+#define BCM_DNGL_BL_PID_43341	0xbd22
+#define BCM_DNGL_BL_PID_4350    0xbd23
+#define BCM_DNGL_BL_PID_4345    0xbd24
+#define BCM_DNGL_BL_PID_4349	0xbd25
+#define BCM_DNGL_BL_PID_4354	0xbd26
+
+#define BCM_DNGL_BDC_PID	0x0bdc
+#define BCM_DNGL_JTAG_PID	0x4a44
+
+/* HW USB BLOCK [CPULESS USB] PIDs */
+#define BCM_HWUSB_PID_43239     43239
+
+/* PCI Device IDs */
+#define	BCM4210_DEVICE_ID	0x1072		/* never used */
+#define	BCM4230_DEVICE_ID	0x1086		/* never used */
+#define	BCM4401_ENET_ID		0x170c		/* 4401b0 production enet cards */
+#define	BCM3352_DEVICE_ID	0x3352		/* bcm3352 device id */
+#define	BCM3360_DEVICE_ID	0x3360		/* bcm3360 device id */
+#define	BCM4211_DEVICE_ID	0x4211
+#define	BCM4231_DEVICE_ID	0x4231
+#define	BCM4303_D11B_ID		0x4303		/* 4303 802.11b */
+#define	BCM4311_D11G_ID		0x4311		/* 4311 802.11b/g id */
+#define	BCM4311_D11DUAL_ID	0x4312		/* 4311 802.11a/b/g id */
+#define	BCM4311_D11A_ID		0x4313		/* 4311 802.11a id */
+#define	BCM4328_D11DUAL_ID	0x4314		/* 4328/4312 802.11a/g id */
+#define	BCM4328_D11G_ID		0x4315		/* 4328/4312 802.11g id */
+#define	BCM4328_D11A_ID		0x4316		/* 4328/4312 802.11a id */
+#define	BCM4318_D11G_ID		0x4318		/* 4318 802.11b/g id */
+#define	BCM4318_D11DUAL_ID	0x4319		/* 4318 802.11a/b/g id */
+#define	BCM4318_D11A_ID		0x431a		/* 4318 802.11a id */
+#define	BCM4325_D11DUAL_ID	0x431b		/* 4325 802.11a/g id */
+#define	BCM4325_D11G_ID		0x431c		/* 4325 802.11g id */
+#define	BCM4325_D11A_ID		0x431d		/* 4325 802.11a id */
+#define	BCM4306_D11G_ID		0x4320		/* 4306 802.11g */
+#define	BCM4306_D11A_ID		0x4321		/* 4306 802.11a */
+#define	BCM4306_UART_ID		0x4322		/* 4306 uart */
+#define	BCM4306_V90_ID		0x4323		/* 4306 v90 codec */
+#define	BCM4306_D11DUAL_ID	0x4324		/* 4306 dual A+B */
+#define	BCM4306_D11G_ID2	0x4325		/* BCM4306_D11G_ID; INF w/loose binding war */
+#define	BCM4321_D11N_ID		0x4328		/* 4321 802.11n dualband id */
+#define	BCM4321_D11N2G_ID	0x4329		/* 4321 802.11n 2.4Ghz band id */
+#define	BCM4321_D11N5G_ID	0x432a		/* 4321 802.11n 5Ghz band id */
+#define BCM4322_D11N_ID		0x432b		/* 4322 802.11n dualband device */
+#define BCM4322_D11N2G_ID	0x432c		/* 4322 802.11n 2.4GHz device */
+#define BCM4322_D11N5G_ID	0x432d		/* 4322 802.11n 5GHz device */
+#define BCM4329_D11N_ID		0x432e		/* 4329 802.11n dualband device */
+#define BCM4329_D11N2G_ID	0x432f		/* 4329 802.11n 2.4G device */
+#define BCM4329_D11N5G_ID	0x4330		/* 4329 802.11n 5G device */
+#define	BCM4315_D11DUAL_ID	0x4334		/* 4315 802.11a/g id */
+#define	BCM4315_D11G_ID		0x4335		/* 4315 802.11g id */
+#define	BCM4315_D11A_ID		0x4336		/* 4315 802.11a id */
+#define BCM4319_D11N_ID		0x4337		/* 4319 802.11n dualband device */
+#define BCM4319_D11N2G_ID	0x4338		/* 4319 802.11n 2.4G device */
+#define BCM4319_D11N5G_ID	0x4339		/* 4319 802.11n 5G device */
+#define BCM43231_D11N2G_ID	0x4340		/* 43231 802.11n 2.4GHz device */
+#define BCM43221_D11N2G_ID	0x4341		/* 43221 802.11n 2.4GHz device */
+#define BCM43222_D11N_ID	0x4350		/* 43222 802.11n dualband device */
+#define BCM43222_D11N2G_ID	0x4351		/* 43222 802.11n 2.4GHz device */
+#define BCM43222_D11N5G_ID	0x4352		/* 43222 802.11n 5GHz device */
+#define BCM43224_D11N_ID	0x4353		/* 43224 802.11n dualband device */
+#define BCM43224_D11N_ID_VEN1	0x0576		/* Vendor specific 43224 802.11n db device */
+#define BCM43226_D11N_ID	0x4354		/* 43226 802.11n dualband device */
+#define BCM43236_D11N_ID	0x4346		/* 43236 802.11n dualband device */
+#define BCM43236_D11N2G_ID	0x4347		/* 43236 802.11n 2.4GHz device */
+#define BCM43236_D11N5G_ID	0x4348		/* 43236 802.11n 5GHz device */
+#define BCM43225_D11N2G_ID	0x4357		/* 43225 802.11n 2.4GHz device */
+#define BCM43421_D11N_ID	0xA99D		/* 43421 802.11n dualband device */
+#define BCM4313_D11N2G_ID	0x4727		/* 4313 802.11n 2.4G device */
+#define BCM4330_D11N_ID         0x4360          /* 4330 802.11n dualband device */
+#define BCM4330_D11N2G_ID       0x4361          /* 4330 802.11n 2.4G device */
+#define BCM4330_D11N5G_ID       0x4362          /* 4330 802.11n 5G device */
+#define BCM4336_D11N_ID		0x4343		/* 4336 802.11n 2.4GHz device */
+#define BCM6362_D11N_ID		0x435f		/* 6362 802.11n dualband device */
+#define BCM6362_D11N2G_ID	0x433f		/* 6362 802.11n 2.4Ghz band id */
+#define BCM6362_D11N5G_ID	0x434f		/* 6362 802.11n 5Ghz band id */
+#define BCM4331_D11N_ID		0x4331		/* 4331 802.11n dualband id */
+#define BCM4331_D11N2G_ID	0x4332		/* 4331 802.11n 2.4Ghz band id */
+#define BCM4331_D11N5G_ID	0x4333		/* 4331 802.11n 5Ghz band id */
+#define BCM43237_D11N_ID	0x4355		/* 43237 802.11n dualband device */
+#define BCM43237_D11N5G_ID	0x4356		/* 43237 802.11n 5GHz device */
+#define BCM43227_D11N2G_ID	0x4358		/* 43228 802.11n 2.4GHz device */
+#define BCM43228_D11N_ID	0x4359		/* 43228 802.11n DualBand device */
+#define BCM43228_D11N5G_ID	0x435a		/* 43228 802.11n 5GHz device */
+#define BCM43362_D11N_ID	0x4363		/* 43362 802.11n 2.4GHz device */
+#define BCM43239_D11N_ID	0x4370		/* 43239 802.11n dualband device */
+#define BCM4324_D11N_ID		0x4374		/* 4324 802.11n dualband device */
+#define BCM43217_D11N2G_ID	0x43a9		/* 43217 802.11n 2.4GHz device */
+#define BCM43131_D11N2G_ID	0x43aa		/* 43131 802.11n 2.4GHz device */
+#define BCM4314_D11N2G_ID	0x4364		/* 4314 802.11n 2.4G device */
+#define BCM43142_D11N2G_ID	0x4365		/* 43142 802.11n 2.4G device */
+#define BCM43143_D11N2G_ID	0x4366		/* 43143 802.11n 2.4G device */
+#define BCM4334_D11N_ID		0x4380		/* 4334 802.11n dualband device */
+#define BCM4334_D11N2G_ID	0x4381		/* 4334 802.11n 2.4G device */
+#define BCM4334_D11N5G_ID	0x4382		/* 4334 802.11n 5G device */
+#define BCM43342_D11N_ID	0x4383		/* 43342 802.11n dualband device */
+#define BCM43342_D11N2G_ID	0x4384		/* 43342 802.11n 2.4G device */
+#define BCM43342_D11N5G_ID	0x4385		/* 43342 802.11n 5G device */
+#define BCM43341_D11N_ID	0x4386		/* 43341 802.11n dualband device */
+#define BCM43341_D11N2G_ID	0x4387		/* 43341 802.11n 2.4G device */
+#define BCM43341_D11N5G_ID	0x4388		/* 43341 802.11n 5G device */
+#define BCM4360_D11AC_ID	0x43a0
+#define BCM4360_D11AC2G_ID	0x43a1
+#define BCM4360_D11AC5G_ID	0x43a2
+#define BCM4345_D11AC_ID	0x43ab		/* 4345 802.11ac dualband device */
+#define BCM4345_D11AC2G_ID	0x43ac		/* 4345 802.11ac 2.4G device */
+#define BCM4345_D11AC5G_ID	0x43ad		/* 4345 802.11ac 5G device */
+#define BCM4335_D11AC_ID	0x43ae
+#define BCM4335_D11AC2G_ID	0x43af
+#define BCM4335_D11AC5G_ID	0x43b0
+#define BCM4352_D11AC_ID	0x43b1		/* 4352 802.11ac dualband device */
+#define BCM4352_D11AC2G_ID	0x43b2		/* 4352 802.11ac 2.4G device */
+#define BCM4352_D11AC5G_ID	0x43b3		/* 4352 802.11ac 5G device */
+#define BCM43602_D11AC_ID	0x43ba		/* ac dualband PCI devid SPROM programmed */
+#define BCM43602_D11AC2G_ID	0x43bb		/* 43602 802.11ac 2.4G device */
+#define BCM43602_D11AC5G_ID	0x43bc		/* 43602 802.11ac 5G device */
+
+/* PCI Subsystem ID */
+#define BCM943228HMB_SSID_VEN1	0x0607
+#define BCM94313HMGBL_SSID_VEN1	0x0608
+#define BCM94313HMG_SSID_VEN1	0x0609
+#define BCM943142HM_SSID_VEN1	0x0611
+
+#define BCM43143_D11N2G_ID	0x4366		/* 43143 802.11n 2.4G device */
+
+#define BCM43242_D11N_ID	0x4367		/* 43242 802.11n dualband device */
+#define BCM43242_D11N2G_ID	0x4368		/* 43242 802.11n 2.4G device */
+#define BCM43242_D11N5G_ID	0x4369		/* 43242 802.11n 5G device */
+
+#define BCM4350_D11AC_ID	0x43a3
+#define BCM4350_D11AC2G_ID	0x43a4
+#define BCM4350_D11AC5G_ID	0x43a5
+
+#define BCM43556_D11AC_ID	0x43b7
+#define BCM43556_D11AC2G_ID	0x43b8
+#define BCM43556_D11AC5G_ID	0x43b9
+
+#define BCM43558_D11AC_ID	0x43c0
+#define BCM43558_D11AC2G_ID	0x43c1
+#define BCM43558_D11AC5G_ID	0x43c2
+
+#define BCM43566_D11AC_ID	0x43d3
+#define BCM43566_D11AC2G_ID	0x43d4
+#define BCM43566_D11AC5G_ID	0x43d5
+
+#define BCM43568_D11AC_ID	0x43d6
+#define BCM43568_D11AC2G_ID	0x43d7
+#define BCM43568_D11AC5G_ID	0x43d8
+
+#define BCM43569_D11AC_ID	0x43d9
+#define BCM43569_D11AC2G_ID	0x43da
+#define BCM43569_D11AC5G_ID	0x43db
+
+#define BCM4354_D11AC_ID	0x43df		/* 4354 802.11ac dualband device */
+#define BCM4354_D11AC2G_ID	0x43e0		/* 4354 802.11ac 2.4G device */
+#define BCM4354_D11AC5G_ID	0x43e1		/* 4354 802.11ac 5G device */
+#define BCM43430_D11N2G_ID	0x43e2		/* 43430 802.11n 2.4G device */
+
+
+#define BCM43349_D11N_ID	0x43e6		/* 43349 802.11n dualband id */
+#define BCM43349_D11N2G_ID	0x43e7		/* 43349 802.11n 2.4Ghz band id */
+#define BCM43349_D11N5G_ID	0x43e8		/* 43349 802.11n 5Ghz band id */
+
+#define BCM4356_D11AC_ID	0x43ec		/* 4356 802.11ac dualband device */
+#define BCM4356_D11AC2G_ID	0x43ed		/* 4356 802.11ac 2.4G device */
+#define BCM4356_D11AC5G_ID	0x43ee		/* 4356 802.11ac 5G device */
+
+#define	BCMGPRS_UART_ID		0x4333		/* Uart id used by 4306/gprs card */
+#define	BCMGPRS2_UART_ID	0x4344		/* Uart id used by 4306/gprs card */
+#define FPGA_JTAGM_ID		0x43f0		/* FPGA jtagm device id */
+#define BCM_JTAGM_ID		0x43f1		/* BCM jtagm device id */
+#define SDIOH_FPGA_ID		0x43f2		/* sdio host fpga */
+#define BCM_SDIOH_ID		0x43f3		/* BCM sdio host id */
+#define SDIOD_FPGA_ID		0x43f4		/* sdio device fpga */
+#define SPIH_FPGA_ID		0x43f5		/* PCI SPI Host Controller FPGA */
+#define BCM_SPIH_ID		0x43f6		/* Synopsis SPI Host Controller */
+#define MIMO_FPGA_ID		0x43f8		/* FPGA mimo minimacphy device id */
+#define BCM_JTAGM2_ID		0x43f9		/* BCM alternate jtagm device id */
+#define SDHCI_FPGA_ID		0x43fa		/* Standard SDIO Host Controller FPGA */
+#define	BCM4402_ENET_ID		0x4402		/* 4402 enet */
+#define	BCM4402_V90_ID		0x4403		/* 4402 v90 codec */
+#define	BCM4410_DEVICE_ID	0x4410		/* bcm44xx family pci iline */
+#define	BCM4412_DEVICE_ID	0x4412		/* bcm44xx family pci enet */
+#define	BCM4430_DEVICE_ID	0x4430		/* bcm44xx family cardbus iline */
+#define	BCM4432_DEVICE_ID	0x4432		/* bcm44xx family cardbus enet */
+#define	BCM4704_ENET_ID		0x4706		/* 4704 enet (Use 47XX_ENET_ID instead!) */
+#define	BCM4710_DEVICE_ID	0x4710		/* 4710 primary function 0 */
+#define	BCM47XX_AUDIO_ID	0x4711		/* 47xx audio codec */
+#define	BCM47XX_V90_ID		0x4712		/* 47xx v90 codec */
+#define	BCM47XX_ENET_ID		0x4713		/* 47xx enet */
+#define	BCM47XX_EXT_ID		0x4714		/* 47xx external i/f */
+#define	BCM47XX_GMAC_ID		0x4715		/* 47xx Unimac based GbE */
+#define	BCM47XX_USBH_ID		0x4716		/* 47xx usb host */
+#define	BCM47XX_USBD_ID		0x4717		/* 47xx usb device */
+#define	BCM47XX_IPSEC_ID	0x4718		/* 47xx ipsec */
+#define	BCM47XX_ROBO_ID		0x4719		/* 47xx/53xx roboswitch core */
+#define	BCM47XX_USB20H_ID	0x471a		/* 47xx usb 2.0 host */
+#define	BCM47XX_USB20D_ID	0x471b		/* 47xx usb 2.0 device */
+#define	BCM47XX_ATA100_ID	0x471d		/* 47xx parallel ATA */
+#define	BCM47XX_SATAXOR_ID	0x471e		/* 47xx serial ATA & XOR DMA */
+#define	BCM47XX_GIGETH_ID	0x471f		/* 47xx GbE (5700) */
+#define	BCM4712_MIPS_ID		0x4720		/* 4712 base devid */
+#define	BCM4716_DEVICE_ID	0x4722		/* 4716 base devid */
+#define	BCM47XX_USB30H_ID	0x472a		/* 47xx usb 3.0 host */
+#define	BCM47XX_USB30D_ID	0x472b		/* 47xx usb 3.0 device */
+#define BCM47XX_SMBUS_EMU_ID	0x47fe		/* 47xx emulated SMBus device */
+#define	BCM47XX_XOR_EMU_ID	0x47ff		/* 47xx emulated XOR engine */
+#define	EPI41210_DEVICE_ID	0xa0fa		/* bcm4210 */
+#define	EPI41230_DEVICE_ID	0xa10e		/* bcm4230 */
+#define JINVANI_SDIOH_ID	0x4743		/* Jinvani SDIO Gold Host */
+#define BCM27XX_SDIOH_ID	0x2702		/* BCM27xx Standard SDIO Host */
+#define PCIXX21_FLASHMEDIA_ID	0x803b		/* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH_ID	0x803c		/* TI PCI xx21 Standard Host Controller */
+#define R5C822_SDIOH_ID		0x0822		/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */
+#define JMICRON_SDIOH_ID	0x2381		/* JMicron Standard SDIO Host Controller */
+
+/* Chip IDs */
+#define	BCM4306_CHIP_ID		0x4306		/* 4306 chipcommon chipid */
+#define	BCM4311_CHIP_ID		0x4311		/* 4311 PCIe 802.11a/b/g */
+#define	BCM43111_CHIP_ID	43111		/* 43111 chipcommon chipid (OTP chipid) */
+#define	BCM43112_CHIP_ID	43112		/* 43112 chipcommon chipid (OTP chipid) */
+#define	BCM4312_CHIP_ID		0x4312		/* 4312 chipcommon chipid */
+#define BCM4313_CHIP_ID		0x4313		/* 4313 chip id */
+#define	BCM43131_CHIP_ID	43131		/* 43131 chip id (OTP chipid) */
+#define	BCM4315_CHIP_ID		0x4315		/* 4315 chip id */
+#define	BCM4318_CHIP_ID		0x4318		/* 4318 chipcommon chipid */
+#define	BCM4319_CHIP_ID		0x4319		/* 4319 chip id */
+#define	BCM4320_CHIP_ID		0x4320		/* 4320 chipcommon chipid */
+#define	BCM4321_CHIP_ID		0x4321		/* 4321 chipcommon chipid */
+#define	BCM43217_CHIP_ID	43217		/* 43217 chip id (OTP chipid) */
+#define	BCM4322_CHIP_ID		0x4322		/* 4322 chipcommon chipid */
+#define	BCM43221_CHIP_ID	43221		/* 43221 chipcommon chipid (OTP chipid) */
+#define	BCM43222_CHIP_ID	43222		/* 43222 chipcommon chipid */
+#define	BCM43224_CHIP_ID	43224		/* 43224 chipcommon chipid */
+#define	BCM43225_CHIP_ID	43225		/* 43225 chipcommon chipid */
+#define	BCM43227_CHIP_ID	43227		/* 43227 chipcommon chipid */
+#define	BCM43228_CHIP_ID	43228		/* 43228 chipcommon chipid */
+#define	BCM43226_CHIP_ID	43226		/* 43226 chipcommon chipid */
+#define	BCM43231_CHIP_ID	43231		/* 43231 chipcommon chipid (OTP chipid) */
+#define	BCM43234_CHIP_ID	43234		/* 43234 chipcommon chipid */
+#define	BCM43235_CHIP_ID	43235		/* 43235 chipcommon chipid */
+#define	BCM43236_CHIP_ID	43236		/* 43236 chipcommon chipid */
+#define	BCM43237_CHIP_ID	43237		/* 43237 chipcommon chipid */
+#define	BCM43238_CHIP_ID	43238		/* 43238 chipcommon chipid */
+#define	BCM43239_CHIP_ID	43239		/* 43239 chipcommon chipid */
+#define	BCM43420_CHIP_ID	43420		/* 43222 chipcommon chipid (OTP, RBBU) */
+#define	BCM43421_CHIP_ID	43421		/* 43224 chipcommon chipid (OTP, RBBU) */
+#define	BCM43428_CHIP_ID	43428		/* 43228 chipcommon chipid (OTP, RBBU) */
+#define	BCM43431_CHIP_ID	43431		/* 4331  chipcommon chipid (OTP, RBBU) */
+#define	BCM43460_CHIP_ID	43460		/* 4360  chipcommon chipid (OTP, RBBU) */
+#define	BCM4325_CHIP_ID		0x4325		/* 4325 chip id */
+#define	BCM4328_CHIP_ID		0x4328		/* 4328 chip id */
+#define	BCM4329_CHIP_ID		0x4329		/* 4329 chipcommon chipid */
+#define	BCM4331_CHIP_ID		0x4331		/* 4331 chipcommon chipid */
+#define BCM4336_CHIP_ID		0x4336		/* 4336 chipcommon chipid */
+#define BCM43362_CHIP_ID	43362		/* 43362 chipcommon chipid */
+#define BCM4330_CHIP_ID		0x4330		/* 4330 chipcommon chipid */
+#define BCM6362_CHIP_ID		0x6362		/* 6362 chipcommon chipid */
+#define BCM4314_CHIP_ID		0x4314		/* 4314 chipcommon chipid */
+#define BCM43142_CHIP_ID	43142		/* 43142 chipcommon chipid */
+#define BCM43143_CHIP_ID	43143		/* 43143 chipcommon chipid */
+#define	BCM4324_CHIP_ID		0x4324		/* 4324 chipcommon chipid */
+#define	BCM43242_CHIP_ID	43242		/* 43242 chipcommon chipid */
+#define	BCM43243_CHIP_ID	43243		/* 43243 chipcommon chipid */
+#define BCM4334_CHIP_ID		0x4334		/* 4334 chipcommon chipid */
+#define BCM4335_CHIP_ID		0x4335		/* 4335 chipcommon chipid */
+#define BCM4339_CHIP_ID		0x4339		/* 4339 chipcommon chipid */
+#define BCM43349_CHIP_ID	43349			/* 43349(0xA955) chipcommon chipid */
+#define BCM4360_CHIP_ID		0x4360          /* 4360 chipcommon chipid */
+#define BCM4352_CHIP_ID		0x4352          /* 4352 chipcommon chipid */
+#define BCM43526_CHIP_ID	0xAA06
+#define BCM43340_CHIP_ID	43340		/* 43340 chipcommon chipid */
+#define BCM43341_CHIP_ID	43341		/* 43341 chipcommon chipid */
+#define BCM43342_CHIP_ID	43342		/* 43342 chipcommon chipid */
+#define BCM4350_CHIP_ID		0x4350          /* 4350 chipcommon chipid */
+#define BCM4354_CHIP_ID		0x4354          /* 4354 chipcommon chipid */
+#define BCM4356_CHIP_ID		0x4356          /* 4356 chipcommon chipid */
+#define BCM43556_CHIP_ID	0xAA24          /* 43556 chipcommon chipid */
+#define BCM43558_CHIP_ID	0xAA26          /* 43558 chipcommon chipid */
+#define BCM43566_CHIP_ID	0xAA2E          /* 43566 chipcommon chipid */
+#define BCM43568_CHIP_ID	0xAA30          /* 43568 chipcommon chipid */
+#define BCM43569_CHIP_ID	0xAA31          /* 43569 chipcommon chipid */
+#define BCM4350_CHIP(chipid)	((CHIPID(chipid) == BCM4350_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4354_CHIP_ID) || \
+				(CHIPID(chipid) == BCM4356_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43556_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43558_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43566_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43568_CHIP_ID) || \
+				(CHIPID(chipid) == BCM43569_CHIP_ID)) /* 4350 variations */
+#define BCM4345_CHIP_ID		0x4345		/* 4345 chipcommon chipid */
+#define BCM43430_CHIP_ID	43430		/* 43430 chipcommon chipid */
+
+#define BCM43602_CHIP_ID	0xaa52		/* 43602 chipcommon chipid */
+
+#define	BCM4342_CHIP_ID		4342		/* 4342 chipcommon chipid (OTP, RBBU) */
+#define	BCM4402_CHIP_ID		0x4402		/* 4402 chipid */
+#define	BCM4704_CHIP_ID		0x4704		/* 4704 chipcommon chipid */
+#define	BCM4706_CHIP_ID		0x5300		/* 4706 chipcommon chipid */
+#define BCM4707_CHIP_ID		53010		/* 4707 chipcommon chipid */
+#define BCM53018_CHIP_ID	53018		/* 53018 chipcommon chipid */
+#define BCM4707_CHIP(chipid)	(((chipid) == BCM4707_CHIP_ID) || ((chipid) == BCM53018_CHIP_ID))
+#define	BCM4710_CHIP_ID		0x4710		/* 4710 chipid */
+#define	BCM4712_CHIP_ID		0x4712		/* 4712 chipcommon chipid */
+#define	BCM4716_CHIP_ID		0x4716		/* 4716 chipcommon chipid */
+#define	BCM47162_CHIP_ID	47162		/* 47162 chipcommon chipid */
+#define	BCM4748_CHIP_ID		0x4748		/* 4716 chipcommon chipid (OTP, RBBU) */
+#define	BCM4749_CHIP_ID		0x4749		/* 5357 chipcommon chipid (OTP, RBBU) */
+#define BCM4785_CHIP_ID		0x4785		/* 4785 chipcommon chipid */
+#define	BCM5350_CHIP_ID		0x5350		/* 5350 chipcommon chipid */
+#define	BCM5352_CHIP_ID		0x5352		/* 5352 chipcommon chipid */
+#define	BCM5354_CHIP_ID		0x5354		/* 5354 chipcommon chipid */
+#define BCM5365_CHIP_ID		0x5365		/* 5365 chipcommon chipid */
+#define	BCM5356_CHIP_ID		0x5356		/* 5356 chipcommon chipid */
+#define	BCM5357_CHIP_ID		0x5357		/* 5357 chipcommon chipid */
+#define	BCM53572_CHIP_ID	53572		/* 53572 chipcommon chipid */
+
+/* Package IDs */
+#define	BCM4303_PKG_ID		2		/* 4303 package id */
+#define	BCM4309_PKG_ID		1		/* 4309 package id */
+#define	BCM4712LARGE_PKG_ID	0		/* 340pin 4712 package id */
+#define	BCM4712SMALL_PKG_ID	1		/* 200pin 4712 package id */
+#define	BCM4712MID_PKG_ID	2		/* 225pin 4712 package id */
+#define BCM4328USBD11G_PKG_ID	2		/* 4328 802.11g USB package id */
+#define BCM4328USBDUAL_PKG_ID	3		/* 4328 802.11a/g USB package id */
+#define BCM4328SDIOD11G_PKG_ID	4		/* 4328 802.11g SDIO package id */
+#define BCM4328SDIODUAL_PKG_ID	5		/* 4328 802.11a/g SDIO package id */
+#define BCM4329_289PIN_PKG_ID	0		/* 4329 289-pin package id */
+#define BCM4329_182PIN_PKG_ID	1		/* 4329N 182-pin package id */
+#define BCM5354E_PKG_ID		1		/* 5354E package id */
+#define	BCM4716_PKG_ID		8		/* 4716 package id */
+#define	BCM4717_PKG_ID		9		/* 4717 package id */
+#define	BCM4718_PKG_ID		10		/* 4718 package id */
+#define BCM5356_PKG_NONMODE	1		/* 5356 package without nmode suppport */
+#define BCM5358U_PKG_ID		8		/* 5358U package id */
+#define BCM5358_PKG_ID		9		/* 5358 package id */
+#define BCM47186_PKG_ID		10		/* 47186 package id */
+#define BCM5357_PKG_ID		11		/* 5357 package id */
+#define BCM5356U_PKG_ID		12		/* 5356U package id */
+#define BCM53572_PKG_ID		8		/* 53572 package id */
+#define BCM5357C0_PKG_ID	8		/* 5357c0 package id (the same as 53572) */
+#define BCM47188_PKG_ID		9		/* 47188 package id */
+#define BCM5358C0_PKG_ID	0xa		/* 5358c0 package id */
+#define BCM5356C0_PKG_ID	0xb		/* 5356c0 package id */
+#define BCM4331TT_PKG_ID        8		/* 4331 12x12 package id */
+#define BCM4331TN_PKG_ID        9		/* 4331 12x9 package id */
+#define BCM4331TNA0_PKG_ID     0xb		/* 4331 12x9 package id */
+#define	BCM4706L_PKG_ID		1		/* 4706L package id */
+
+#define HDLSIM5350_PKG_ID	1		/* HDL simulator package id for a 5350 */
+#define HDLSIM_PKG_ID		14		/* HDL simulator package id */
+#define HWSIM_PKG_ID		15		/* Hardware simulator package id */
+#define BCM43224_FAB_CSM	0x8		/* the chip is manufactured by CSM */
+#define BCM43224_FAB_SMIC	0xa		/* the chip is manufactured by SMIC */
+#define BCM4336_WLBGA_PKG_ID	0x8
+#define BCM4330_WLBGA_PKG_ID	0x0
+#define BCM4314PCIE_ARM_PKG_ID		(8 | 0)	/* 4314 QFN PCI package id, bit 3 tie high */
+#define BCM4314SDIO_PKG_ID		(8 | 1)	/* 4314 QFN SDIO package id */
+#define BCM4314PCIE_PKG_ID		(8 | 2)	/* 4314 QFN PCI (ARM-less) package id */
+#define BCM4314SDIO_ARM_PKG_ID		(8 | 3)	/* 4314 QFN SDIO (ARM-less) package id */
+#define BCM4314SDIO_FPBGA_PKG_ID	(8 | 4)	/* 4314 FpBGA SDIO package id */
+#define BCM4314DEV_PKG_ID		(8 | 6)	/* 4314 Developement package id */
+
+#define BCM4707_PKG_ID		1		/* 4707 package id */
+#define BCM4708_PKG_ID		2		/* 4708 package id */
+#define BCM4709_PKG_ID		0		/* 4709 package id */
+
+#define PCIXX21_FLASHMEDIA0_ID	0x8033		/* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH0_ID	0x8034		/* TI PCI xx21 Standard Host Controller */
+
+#define BCM4335_WLCSP_PKG_ID	(0x0)	/* WLCSP Module/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGA_PKG_ID	(0x1)	/* FCBGA PC/Embeded/Media PCIE/SDIO */
+#define BCM4335_WLBGA_PKG_ID	(0x2)	/* WLBGA COB/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGAD_PKG_ID	(0x3)	/* FCBGA Debug Debug/Dev All if's. */
+#define BCM4335_PKG_MASK	(0x3)
+
+/* boardflags */
+#define	BFL_BTC2WIRE		0x00000001  /* old 2wire Bluetooth coexistence, OBSOLETE */
+#define BFL_BTCOEX      0x00000001      /* Board supports BTCOEX */
+#define	BFL_PACTRL		0x00000002  /* Board has gpio 9 controlling the PA */
+#define BFL_AIRLINEMODE	0x00000004  /* Board implements gpio 13 radio disable indication, UNUSED */
+#define	BFL_ADCDIV		0x00000008  /* Board has the rssi ADC divider */
+#define BFL_DIS_256QAM		0x00000008
+#define	BFL_ENETROBO		0x00000010  /* Board has robo switch or core */
+#define	BFL_NOPLLDOWN		0x00000020  /* Not ok to power down the chip pll and oscillator */
+#define	BFL_CCKHIPWR		0x00000040  /* Can do high-power CCK transmission */
+#define	BFL_ENETADM		0x00000080  /* Board has ADMtek switch */
+#define	BFL_ENETVLAN		0x00000100  /* Board has VLAN capability */
+#define	BFL_UNUSED		0x00000200
+#define BFL_NOPCI		0x00000400  /* Board leaves PCI floating */
+#define BFL_FEM			0x00000800  /* Board supports the Front End Module */
+#define BFL_EXTLNA		0x00001000  /* Board has an external LNA in 2.4GHz band */
+#define BFL_HGPA		0x00002000  /* Board has a high gain PA */
+#define	BFL_BTC2WIRE_ALTGPIO	0x00004000  /* Board's BTC 2wire is in the alternate gpios */
+#define	BFL_ALTIQ		0x00008000  /* Alternate I/Q settings */
+#define BFL_NOPA		0x00010000  /* Board has no PA */
+#define BFL_RSSIINV		0x00020000  /* Board's RSSI uses positive slope(not TSSI) */
+#define BFL_PAREF		0x00040000  /* Board uses the PARef LDO */
+#define BFL_3TSWITCH		0x00080000  /* Board uses a triple throw switch shared with BT */
+#define BFL_PHASESHIFT		0x00100000  /* Board can support phase shifter */
+#define BFL_BUCKBOOST		0x00200000  /* Power topology uses BUCKBOOST */
+#define BFL_FEM_BT		0x00400000  /* Board has FEM and switch to share antenna w/ BT */
+#define BFL_NOCBUCK		0x00800000  /* Power topology doesn't use CBUCK */
+#define BFL_CCKFAVOREVM		0x01000000  /* Favor CCK EVM over spectral mask */
+#define BFL_PALDO		0x02000000  /* Power topology uses PALDO */
+#define BFL_LNLDO2_2P5		0x04000000  /* Select 2.5V as LNLDO2 output voltage */
+#define BFL_FASTPWR		0x08000000
+#define BFL_UCPWRCTL_MININDX	0x08000000  /* Enforce min power index to avoid FEM damage */
+#define BFL_EXTLNA_5GHz		0x10000000  /* Board has an external LNA in 5GHz band */
+#define BFL_TRSW_1by2		0x20000000  /* Board has 2 TRSW's in 1by2 designs */
+#define BFL_GAINBOOSTA01        0x20000000  /* 5g Gainboost for core0 and core1 */
+#define BFL_LO_TRSW_R_5GHz	0x40000000  /* In 5G do not throw TRSW to T for clipLO gain */
+#define BFL_ELNA_GAINDEF	0x80000000  /* Backoff InitGain based on elna_2g/5g field
+					     * when this flag is set
+					     */
+#define BFL_EXTLNA_TX	0x20000000	/* Temp boardflag to indicate to */
+
+/* boardflags2 */
+#define BFL2_RXBB_INT_REG_DIS	0x00000001  /* Board has an external rxbb regulator */
+#define BFL2_APLL_WAR		0x00000002  /* Flag to implement alternative A-band PLL settings */
+#define BFL2_TXPWRCTRL_EN	0x00000004  /* Board permits enabling TX Power Control */
+#define BFL2_2X4_DIV		0x00000008  /* Board supports the 2X4 diversity switch */
+#define BFL2_5G_PWRGAIN		0x00000010  /* Board supports 5G band power gain */
+#define BFL2_PCIEWAR_OVR	0x00000020  /* Board overrides ASPM and Clkreq settings */
+#define BFL2_CAESERS_BRD	0x00000040  /* Board is Caesers brd (unused by sw) */
+#define BFL2_BTC3WIRE		0x00000080  /* Board support legacy 3 wire or 4 wire */
+#define BFL2_BTCLEGACY          0x00000080  /* Board support legacy 3/4 wire, to replace
+					     * BFL2_BTC3WIRE
+					     */
+#define BFL2_SKWRKFEM_BRD	0x00000100  /* 4321mcm93 board uses Skyworks FEM */
+#define BFL2_SPUR_WAR		0x00000200  /* Board has a WAR for clock-harmonic spurs */
+#define BFL2_GPLL_WAR		0x00000400  /* Flag to narrow G-band PLL loop b/w */
+#define BFL2_TRISTATE_LED	0x00000800  /* Tri-state the LED */
+#define BFL2_SINGLEANT_CCK	0x00001000  /* Tx CCK pkts on Ant 0 only */
+#define BFL2_2G_SPUR_WAR	0x00002000  /* WAR to reduce and avoid clock-harmonic spurs in 2G */
+#define BFL2_BPHY_ALL_TXCORES	0x00004000  /* Transmit bphy frames using all tx cores */
+#define BFL2_FCC_BANDEDGE_WAR	0x00008000  /* Activates WAR to improve FCC bandedge performance */
+#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000       /* Reducing DAC Spurs */
+#define BFL2_GPLL_WAR2	        0x00010000  /* Flag to widen G-band PLL loop b/w */
+#define BFL2_IPALVLSHIFT_3P3    0x00020000
+#define BFL2_INTERNDET_TXIQCAL  0x00040000  /* Use internal envelope detector for TX IQCAL */
+#define BFL2_XTALBUFOUTEN       0x00080000  /* Keep the buffered Xtal output from radio on */
+				/* Most drivers will turn it off without this flag */
+				/* to save power. */
+
+#define BFL2_ANAPACTRL_2G	0x00100000  /* 2G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ANAPACTRL_5G	0x00200000  /* 5G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ELNACTRL_TRSW_2G	0x00400000  /* AZW4329: 2G gmode_elna_gain controls TR Switch */
+#define BFL2_BT_SHARE_ANT0	0x00800000  /* share core0 antenna with BT */
+#define BFL2_TEMPSENSE_HIGHER	0x01000000  /* The tempsense threshold can sustain higher value
+					     * than programmed. The exact delta is decided by
+					     * driver per chip/boardtype. This can be used
+					     * when tempsense qualification happens after shipment
+					     */
+#define BFL2_BTC3WIREONLY       0x02000000  /* standard 3 wire btc only.  4 wire not supported */
+#define BFL2_PWR_NOMINAL	0x04000000  /* 0: power reduction on, 1: no power reduction */
+#define BFL2_EXTLNA_PWRSAVE	0x08000000  /* boardflag to enable ucode to apply power save */
+						/* ucode control of eLNA during Tx */
+#define BFL2_4313_RADIOREG	0x10000000
+									   /*  board rework */
+#define BFL2_DYNAMIC_VMID	0x10000000  /* enable dynamic Vmid in idle TSSI CAL for 4331 */
+
+#define BFL2_SDR_EN		0x20000000  /* SDR enabled or disabled */
+#define BFL2_DYNAMIC_VMID	0x10000000  /* boardflag to enable dynamic Vmid idle TSSI CAL */
+#define BFL2_LNA1BYPFORTR2G	0x40000000  /* acphy, enable lna1 bypass for clip gain, 2g */
+#define BFL2_LNA1BYPFORTR5G	0x80000000  /* acphy, enable lna1 bypass for clip gain, 5g */
+
+/* SROM 11 - 11ac boardflag definitions */
+#define BFL_SROM11_BTCOEX  0x00000001  /* Board supports BTCOEX */
+#define BFL_SROM11_WLAN_BT_SH_XTL  0x00000002  /* bluetooth and wlan share same crystal */
+#define BFL_SROM11_EXTLNA	0x00001000  /* Board has an external LNA in 2.4GHz band */
+#define BFL_SROM11_EXTLNA_5GHz	0x10000000  /* Board has an external LNA in 5GHz band */
+#define BFL_SROM11_GAINBOOSTA01	0x20000000  /* 5g Gainboost for core0 and core1 */
+#define BFL2_SROM11_APLL_WAR	0x00000002  /* Flag to implement alternative A-band PLL settings */
+#define BFL2_SROM11_ANAPACTRL_2G  0x00100000  /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_ANAPACTRL_5G  0x00200000  /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_SINGLEANT_CCK	0x00001000  /* Tx CCK pkts on Ant 0 only */
+
+/* boardflags3 */
+#define BFL3_FEMCTRL_SUB	  0x00000007  /* acphy, subrevs of femctrl on top of srom_femctrl */
+#define BFL3_RCAL_WAR		  0x00000008  /* acphy, rcal war active on this board (4335a0) */
+#define BFL3_TXGAINTBLID	  0x00000070  /* acphy, txgain table id */
+#define BFL3_TXGAINTBLID_SHIFT	  0x4         /* acphy, txgain table id shift bit */
+#define BFL3_TSSI_DIV_WAR	  0x00000080  /* acphy, Seperate paparam for 20/40/80 */
+#define BFL3_TSSI_DIV_WAR_SHIFT	  0x7         /* acphy, Seperate paparam for 20/40/80 shift bit */
+#define BFL3_FEMTBL_FROM_NVRAM    0x00000100  /* acphy, femctrl table is read from nvram */
+#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8         /* acphy, femctrl table is read from nvram */
+#define BFL3_AGC_CFG_2G           0x00000200  /* acphy, gain control configuration for 2G */
+#define BFL3_AGC_CFG_5G           0x00000400  /* acphy, gain control configuration for 5G */
+#define BFL3_PPR_BIT_EXT          0x00000800  /* acphy, bit position for 1bit extension for ppr */
+#define BFL3_PPR_BIT_EXT_SHIFT    11          /* acphy, bit shift for 1bit extension for ppr */
+#define BFL3_BBPLL_SPR_MODE_DIS	  0x00001000  /* acphy, disables bbpll spur modes */
+#define BFL3_RCAL_OTP_VAL_EN      0x00002000  /* acphy, to read rcal_trim value from otp */
+#define BFL3_2GTXGAINTBL_BLANK	  0x00004000  /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14       /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK	  0x00008000  /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15       /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_PHASETRACK_MAX_ALPHABETA	  0x00010000  /* acphy, to max out alpha,beta to 511 */
+#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16       /* acphy, to max out alpha,beta to 511 */
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN           0x00060000
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17
+#define BFL3_5G_SPUR_WAR          0x00080000  /* acphy, enable spur WAR in 5G band */
+
+/* acphy: lpmode2g and lpmode_5g related boardflags */
+#define BFL3_ACPHY_LPMODE_2G	  0x00300000  /* bits 20:21 for lpmode_2g choice */
+#define BFL3_ACPHY_LPMODE_2G_SHIFT	  20
+
+#define BFL3_ACPHY_LPMODE_5G	  0x00C00000  /* bits 22:23 for lpmode_5g choice */
+#define BFL3_ACPHY_LPMODE_5G_SHIFT	  22
+
+#define BFL3_EXT_LPO_ISCLOCK      0x02000000  /* External LPO is clock, not x-tal */
+#define BFL3_FORCE_INT_LPO_SEL    0x04000000  /* Force internal lpo */
+#define BFL3_FORCE_EXT_LPO_SEL    0x08000000  /* Force external lpo */
+
+#define BFL3_EN_BRCM_IMPBF        0x10000000  /* acphy, Allow BRCM Implicit TxBF */
+#define BFL3_AVVMID_FROM_NVRAM    0x40000000  /* Read Av Vmid from NVRAM  */
+#define BFL3_VLIN_EN_FROM_NVRAM    0x80000000  /* Read Vlin En from NVRAM  */
+
+#define BFL3_AVVMID_FROM_NVRAM_SHIFT   30   /* Read Av Vmid from NVRAM  */
+#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT   31   /* Enable Vlin  from NVRAM  */
+
+
+/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
+#define	BOARD_GPIO_BTC3W_IN	0x850	/* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */
+#define	BOARD_GPIO_BTC3W_OUT	0x020	/* bit 5 is TX_CONF */
+#define	BOARD_GPIO_BTCMOD_IN	0x010	/* bit 4 is the alternate BT Coexistence Input */
+#define	BOARD_GPIO_BTCMOD_OUT	0x020	/* bit 5 is the alternate BT Coexistence Out */
+#define	BOARD_GPIO_BTC_IN	0x080	/* bit 7 is BT Coexistence Input */
+#define	BOARD_GPIO_BTC_OUT	0x100	/* bit 8 is BT Coexistence Out */
+#define	BOARD_GPIO_PACTRL	0x200	/* bit 9 controls the PA on new 4306 boards */
+#define BOARD_GPIO_12		0x1000	/* gpio 12 */
+#define BOARD_GPIO_13		0x2000	/* gpio 13 */
+#define BOARD_GPIO_BTC4_IN	0x0800	/* gpio 11, coex4, in */
+#define BOARD_GPIO_BTC4_BT	0x2000	/* gpio 12, coex4, bt active */
+#define BOARD_GPIO_BTC4_STAT	0x4000	/* gpio 14, coex4, status */
+#define BOARD_GPIO_BTC4_WLAN	0x8000	/* gpio 15, coex4, wlan active */
+#define	BOARD_GPIO_1_WLAN_PWR	0x02	/* throttle WLAN power on X21 board */
+#define	BOARD_GPIO_2_WLAN_PWR	0x04	/* throttle WLAN power on X29C board */
+#define	BOARD_GPIO_3_WLAN_PWR	0x08	/* throttle WLAN power on X28 board */
+#define	BOARD_GPIO_4_WLAN_PWR	0x10	/* throttle WLAN power on X19 board */
+
+#define GPIO_BTC4W_OUT_4312  0x010  /* bit 4 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224  0x020  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224_SHARED  0x0e0  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43225  0x0e0  /* bit 5 BT_IODISABLE, bit 6 SW_BT, bit 7 SW_WL */
+#define GPIO_BTC4W_OUT_43421  0x020  /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_4313  0x060  /* bit 5 SW_BT, bit 6 SW_WL */
+#define GPIO_BTC4W_OUT_4331_SHARED  0x010  /* GPIO 4  */
+
+#define	PCI_CFG_GPIO_SCS	0x10	/* PCI config space bit 4 for 4306c0 slow clock source */
+#define PCI_CFG_GPIO_HWRAD	0x20	/* PCI config space GPIO 13 for hw radio disable */
+#define PCI_CFG_GPIO_XTAL	0x40	/* PCI config space GPIO 14 for Xtal power-up */
+#define PCI_CFG_GPIO_PLL	0x80	/* PCI config space GPIO 15 for PLL power-down */
+
+/* power control defines */
+#define PLL_DELAY		150		/* us pll on delay */
+#define FREF_DELAY		200		/* us fref change delay */
+#define MIN_SLOW_CLK		32		/* us Slow clock period */
+#define	XTAL_ON_DELAY		1000		/* us crystal power-on delay */
+
+
+/* 43341 Boards */
+#define BCM943341WLABGS_SSID	0x062d
+
+/* 43342 Boards */
+#define BCM943342FCAGBI_SSID	0x0641
+
+/* 43602 Boards, unclear yet what boards will be created. */
+#define BCM943602RSVD1_SSID	0x06a5
+#define BCM943602RSVD2_SSID	0x06a6
+
+/* # of GPIO pins */
+#define GPIO_NUMPINS		32
+
+/* These values are used by dhd host driver. */
+#define RDL_RAM_BASE_4319 0x60000000
+#define RDL_RAM_BASE_4329 0x60000000
+#define RDL_RAM_SIZE_4319 0x48000
+#define RDL_RAM_SIZE_4329  0x48000
+#define RDL_RAM_SIZE_43236 0x70000
+#define RDL_RAM_BASE_43236 0x60000000
+#define RDL_RAM_SIZE_4328 0x60000
+#define RDL_RAM_BASE_4328 0x80000000
+#define RDL_RAM_SIZE_4322 0x60000
+#define RDL_RAM_BASE_4322 0x60000000
+#define RDL_RAM_SIZE_4360  0xA0000
+#define RDL_RAM_BASE_4360  0x60000000
+#define RDL_RAM_SIZE_43242  0x90000
+#define RDL_RAM_BASE_43242  0x60000000
+#define RDL_RAM_SIZE_43143  0x70000
+#define RDL_RAM_BASE_43143  0x60000000
+#define RDL_RAM_SIZE_4350  0xC0000
+#define RDL_RAM_BASE_4350  0x180800
+
+/* generic defs for nvram "muxenab" bits
+* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options.
+*/
+#define MUXENAB_UART		0x00000001
+#define MUXENAB_GPIO		0x00000002
+#define MUXENAB_ERCX		0x00000004	/* External Radio BT coex */
+#define MUXENAB_JTAG		0x00000008
+#define MUXENAB_HOST_WAKE	0x00000010	/* configure GPIO for SDIO host_wake */
+#define MUXENAB_I2S_EN		0x00000020
+#define MUXENAB_I2S_MASTER	0x00000040
+#define MUXENAB_I2S_FULL	0x00000080
+#define MUXENAB_SFLASH		0x00000100
+#define MUXENAB_RFSWCTRL0	0x00000200
+#define MUXENAB_RFSWCTRL1	0x00000400
+#define MUXENAB_RFSWCTRL2	0x00000800
+#define MUXENAB_SECI		0x00001000
+#define MUXENAB_BT_LEGACY	0x00002000
+#define MUXENAB_HOST_WAKE1	0x00004000	/* configure alternative GPIO for SDIO host_wake */
+
+/* Boot flags */
+#define FLASH_KERNEL_NFLASH	0x00000001
+#define FLASH_BOOT_NFLASH	0x00000002
+
+#endif /* _BCMDEVS_H */
diff --git a/drivers/staging/edison-bcm43340/include/bcmendian.h b/drivers/staging/edison-bcm43340/include/bcmendian.h
new file mode 100644
index 0000000..ff527f6
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmendian.h
@@ -0,0 +1,329 @@
+/*
+ * Byte order utilities
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *  $Id: bcmendian.h 402715 2013-05-16 18:50:09Z $
+ *
+ * This file by default provides proper behavior on little-endian architectures.
+ * On big-endian architectures, IL_BIGENDIAN should be defined.
+ */
+
+#ifndef _BCMENDIAN_H_
+#define _BCMENDIAN_H_
+
+#include <typedefs.h>
+
+/* Reverse the bytes in a 16-bit value */
+#define BCMSWAP16(val) \
+	((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
+		  (((uint16)(val) & (uint16)0xff00U) >> 8)))
+
+/* Reverse the bytes in a 32-bit value */
+#define BCMSWAP32(val) \
+	((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
+		  (((uint32)(val) & (uint32)0x0000ff00U) <<  8) | \
+		  (((uint32)(val) & (uint32)0x00ff0000U) >>  8) | \
+		  (((uint32)(val) & (uint32)0xff000000U) >> 24)))
+
+/* Reverse the two 16-bit halves of a 32-bit value */
+#define BCMSWAP32BY16(val) \
+	((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
+		  (((uint32)(val) & (uint32)0xffff0000U) >> 16)))
+
+/* Reverse the bytes in a 64-bit value */
+#define BCMSWAP64(val) \
+	((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \
+	          (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \
+	          (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \
+	          (((uint64)(val) & 0x00000000ff000000ULL) <<  8) | \
+	          (((uint64)(val) & 0x000000ff00000000ULL) >>  8) | \
+	          (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \
+	          (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \
+	          (((uint64)(val) & 0xff00000000000000ULL) >> 56)))
+
+/* Reverse the two 32-bit halves of a 64-bit value */
+#define BCMSWAP64BY32(val) \
+	((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \
+	          (((uint64)(val) & 0xffffffff00000000ULL) >> 32)))
+
+
+/* Byte swapping macros
+ *    Host <=> Network (Big Endian) for 16- and 32-bit values
+ *    Host <=> Little-Endian for 16- and 32-bit values
+ */
+#ifndef hton16
+#define HTON16(i) BCMSWAP16(i)
+#define	hton16(i) bcmswap16(i)
+#define	HTON32(i) BCMSWAP32(i)
+#define	hton32(i) bcmswap32(i)
+#define	NTOH16(i) BCMSWAP16(i)
+#define	ntoh16(i) bcmswap16(i)
+#define	NTOH32(i) BCMSWAP32(i)
+#define	ntoh32(i) bcmswap32(i)
+#define LTOH16(i) (i)
+#define ltoh16(i) (i)
+#define LTOH32(i) (i)
+#define ltoh32(i) (i)
+#define HTOL16(i) (i)
+#define htol16(i) (i)
+#define HTOL32(i) (i)
+#define htol32(i) (i)
+#define HTOL64(i) (i)
+#define htol64(i) (i)
+#endif /* hton16 */
+
+#define ltoh16_buf(buf, i)
+#define htol16_buf(buf, i)
+
+/* Unaligned loads and stores in host byte order */
+#define load32_ua(a)		ltoh32_ua(a)
+#define store32_ua(a, v)	htol32_ua_store(v, a)
+#define load16_ua(a)		ltoh16_ua(a)
+#define store16_ua(a, v)	htol16_ua_store(v, a)
+
+#define _LTOH16_UA(cp)	((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp)	((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp)	(((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp)	(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
+
+#define ltoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#define ntoh_ua(ptr) \
+	(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
+	 sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \
+	 sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \
+	 *(uint8 *)0)
+
+#ifdef __GNUC__
+
+/* GNU macro versions avoid referencing the argument multiple times, while also
+ * avoiding the -fno-inline used in ROM builds.
+ */
+
+#define bcmswap16(val) ({ \
+	uint16 _val = (val); \
+	BCMSWAP16(_val); \
+})
+
+#define bcmswap32(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32(_val); \
+})
+
+#define bcmswap64(val) ({ \
+	uint64 _val = (val); \
+	BCMSWAP64(_val); \
+})
+
+#define bcmswap32by16(val) ({ \
+	uint32 _val = (val); \
+	BCMSWAP32BY16(_val); \
+})
+
+#define bcmswap16_buf(buf, len) ({ \
+	uint16 *_buf = (uint16 *)(buf); \
+	uint _wds = (len) / 2; \
+	while (_wds--) { \
+		*_buf = bcmswap16(*_buf); \
+		_buf++; \
+	} \
+})
+
+#define htol16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = _val >> 8; \
+})
+
+#define htol32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val & 0xff; \
+	_bytes[1] = (_val >> 8) & 0xff; \
+	_bytes[2] = (_val >> 16) & 0xff; \
+	_bytes[3] = _val >> 24; \
+})
+
+#define hton16_ua_store(val, bytes) ({ \
+	uint16 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 8; \
+	_bytes[1] = _val & 0xff; \
+})
+
+#define hton32_ua_store(val, bytes) ({ \
+	uint32 _val = (val); \
+	uint8 *_bytes = (uint8 *)(bytes); \
+	_bytes[0] = _val >> 24; \
+	_bytes[1] = (_val >> 16) & 0xff; \
+	_bytes[2] = (_val >> 8) & 0xff; \
+	_bytes[3] = _val & 0xff; \
+})
+
+#define ltoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH16_UA(_bytes); \
+})
+
+#define ltoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_LTOH32_UA(_bytes); \
+})
+
+#define ntoh16_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH16_UA(_bytes); \
+})
+
+#define ntoh32_ua(bytes) ({ \
+	const uint8 *_bytes = (const uint8 *)(bytes); \
+	_NTOH32_UA(_bytes); \
+})
+
+#else /* !__GNUC__ */
+
+/* Inline versions avoid referencing the argument multiple times */
+static INLINE uint16
+bcmswap16(uint16 val)
+{
+	return BCMSWAP16(val);
+}
+
+static INLINE uint32
+bcmswap32(uint32 val)
+{
+	return BCMSWAP32(val);
+}
+
+static INLINE uint64
+bcmswap64(uint64 val)
+{
+	return BCMSWAP64(val);
+}
+
+static INLINE uint32
+bcmswap32by16(uint32 val)
+{
+	return BCMSWAP32BY16(val);
+}
+
+/* Reverse pairs of bytes in a buffer (not for high-performance use) */
+/* buf	- start of buffer of shorts to swap */
+/* len  - byte length of buffer */
+static INLINE void
+bcmswap16_buf(uint16 *buf, uint len)
+{
+	len = len / 2;
+
+	while (len--) {
+		*buf = bcmswap16(*buf);
+		buf++;
+	}
+}
+
+/*
+ * Store 16-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = val >> 8;
+}
+
+/*
+ * Store 32-bit value to unaligned little-endian byte array.
+ */
+static INLINE void
+htol32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val & 0xff;
+	bytes[1] = (val >> 8) & 0xff;
+	bytes[2] = (val >> 16) & 0xff;
+	bytes[3] = val >> 24;
+}
+
+/*
+ * Store 16-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton16_ua_store(uint16 val, uint8 *bytes)
+{
+	bytes[0] = val >> 8;
+	bytes[1] = val & 0xff;
+}
+
+/*
+ * Store 32-bit value to unaligned network-(big-)endian byte array.
+ */
+static INLINE void
+hton32_ua_store(uint32 val, uint8 *bytes)
+{
+	bytes[0] = val >> 24;
+	bytes[1] = (val >> 16) & 0xff;
+	bytes[2] = (val >> 8) & 0xff;
+	bytes[3] = val & 0xff;
+}
+
+/*
+ * Load 16-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint16
+ltoh16_ua(const void *bytes)
+{
+	return _LTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned little-endian byte array.
+ */
+static INLINE uint32
+ltoh32_ua(const void *bytes)
+{
+	return _LTOH32_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 16-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint16
+ntoh16_ua(const void *bytes)
+{
+	return _NTOH16_UA((const uint8 *)bytes);
+}
+
+/*
+ * Load 32-bit value from unaligned big-(network-)endian byte array.
+ */
+static INLINE uint32
+ntoh32_ua(const void *bytes)
+{
+	return _NTOH32_UA((const uint8 *)bytes);
+}
+
+#endif /* !__GNUC__ */
+#endif /* !_BCMENDIAN_H_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmperf.h b/drivers/staging/edison-bcm43340/include/bcmperf.h
new file mode 100644
index 0000000..acebfa3
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmperf.h
@@ -0,0 +1,36 @@
+/*
+ * Performance counters software interface.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmperf.h 241182 2011-02-17 21:50:03Z $
+ */
+/* essai */
+#ifndef _BCMPERF_H_
+#define _BCMPERF_H_
+/* get cache hits and misses */
+#define BCMPERF_ENABLE_INSTRCOUNT()
+#define BCMPERF_ENABLE_ICACHE_MISS()
+#define BCMPERF_ENABLE_ICACHE_HIT()
+#define	BCMPERF_GETICACHE_MISS(x)	((x) = 0)
+#define	BCMPERF_GETICACHE_HIT(x)	((x) = 0)
+#define	BCMPERF_GETINSTRCOUNT(x)	((x) = 0)
+#endif /* _BCMPERF_H_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmsdbus.h b/drivers/staging/edison-bcm43340/include/bcmsdbus.h
new file mode 100644
index 0000000..a0cd8dc
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmsdbus.h
@@ -0,0 +1,143 @@
+/*
+ * Definitions for API from sdio common code (bcmsdh) to individual
+ * host controller drivers.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdbus.h 408158 2013-06-17 22:15:35Z $
+ */
+
+#ifndef	_sdio_api_h_
+#define	_sdio_api_h_
+
+
+#define SDIOH_API_RC_SUCCESS                          (0x00)
+#define SDIOH_API_RC_FAIL	                      (0x01)
+#define SDIOH_API_SUCCESS(status) (status == 0)
+
+#define SDIOH_READ              0	/* Read request */
+#define SDIOH_WRITE             1	/* Write request */
+
+#define SDIOH_DATA_FIX          0	/* Fixed addressing */
+#define SDIOH_DATA_INC          1	/* Incremental addressing */
+
+#define SDIOH_CMD_TYPE_NORMAL   0       /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND   1       /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU  2       /* Cut-through command */
+
+#define SDIOH_DATA_PIO          0       /* PIO mode */
+#define SDIOH_DATA_DMA          1       /* DMA mode */
+
+/* Max number of glommed pkts */
+#ifdef CUSTOM_MAX_TXGLOM_SIZE
+#define SDPCM_MAXGLOM_SIZE  CUSTOM_MAX_TXGLOM_SIZE
+#else
+#define SDPCM_MAXGLOM_SIZE	40
+#endif /* CUSTOM_MAX_TXGLOM_SIZE */
+
+#define SDPCM_TXGLOM_CPY 0			/* SDIO 2.0 should use copy mode */
+#define SDPCM_TXGLOM_MDESC	1		/* SDIO 3.0 should use multi-desc mode */
+
+#ifdef CUSTOM_DEF_TXGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE  CUSTOM_DEF_TXGLOM_SIZE
+#else
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif /* CUSTOM_DEF_TXGLOM_SIZE */
+
+#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE
+#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!"
+#undef SDPCM_DEFGLOM_SIZE
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
+#endif
+
+typedef int SDIOH_API_RC;
+
+/* SDio Host structure */
+typedef struct sdioh_info sdioh_info_t;
+
+/* callback function, taking one arg */
+typedef void (*sdioh_cb_fn_t)(void *);
+
+extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
+extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
+
+/* query whether SD interrupt is enabled or not */
+extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
+
+/* enable or disable SD interrupt */
+extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
+
+#if defined(DHD_DEBUG)
+extern bool sdioh_interrupt_pending(sdioh_info_t *si);
+#endif
+
+/* read or write one byte using cmd52 */
+extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc,
+	uint addr, uint32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+	uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+	void *pkt);
+
+/* get cis data */
+extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
+
+extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data);
+
+/* query number of io functions */
+extern uint sdioh_query_iofnum(sdioh_info_t *si);
+
+/* handle iovars */
+extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
+                          void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int sdioh_abort(sdioh_info_t *si, uint fnc);
+
+/* Start and Stop SDIO without re-enumerating the SD card. */
+extern int sdioh_start(sdioh_info_t *si, int stage);
+extern int sdioh_stop(sdioh_info_t *si);
+
+/* Wait system lock free */
+extern int sdioh_waitlockfree(sdioh_info_t *si);
+
+/* Reset and re-initialize the device */
+extern int sdioh_sdio_reset(sdioh_info_t *si);
+
+
+
+#if defined(BCMSDIOH_STD)
+	#define SDIOH_SLEEP_ENABLED
+#endif
+extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab);
+
+/* GPIO support */
+extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd);
+extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio);
+extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab);
+
+#endif /* _sdio_api_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmsdh.h b/drivers/staging/edison-bcm43340/include/bcmsdh.h
new file mode 100644
index 0000000..abe9761
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmsdh.h
@@ -0,0 +1,254 @@
+/*
+ * SDIO host client driver interface of Broadcom HNBU
+ *     export functions to client drivers
+ *     abstract OS and BUS specific details of SDIO
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh.h 455573 2014-02-14 17:49:31Z $
+ */
+
+/**
+ * @file bcmsdh.h
+ */
+
+#ifndef	_bcmsdh_h_
+#define	_bcmsdh_h_
+
+#define BCMSDH_ERROR_VAL	0x0001 /* Error */
+#define BCMSDH_INFO_VAL		0x0002 /* Info */
+extern const uint bcmsdh_msglevel;
+
+#define BCMSDH_ERROR(x)
+#define BCMSDH_INFO(x)
+#define SDIOH_API_ACCESS_RETRY_LIMIT	2
+
+#define BCMSDH_RW_RETRY 1000 /* in us */
+
+#if (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || defined(BCMSDIOH_SPI))
+#define BCMSDH_ADAPTER
+#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */
+
+/* forward declarations */
+typedef struct bcmsdh_info bcmsdh_info_t;
+typedef void (*bcmsdh_cb_fn_t)(void *);
+
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva);
+/**
+ * BCMSDH API context
+ */
+struct bcmsdh_info
+{
+	bool	init_success;	/* underlying driver successfully attached */
+	void	*sdioh;		/* handler for sdioh */
+	uint32  vendevid;	/* Target Vendor and Device ID on SD bus */
+	osl_t   *osh;
+	bool	regfail;	/* Save status of last reg_read/reg_write call */
+	uint32	sbwad;		/* Save backplane window address */
+	void	*os_cxt;        /* Pointer to per-OS private data */
+};
+
+/* Detach - freeup resources allocated in attach */
+extern int bcmsdh_detach(osl_t *osh, void *sdh);
+
+/* Query if SD device interrupts are enabled */
+extern bool bcmsdh_intr_query(void *sdh);
+
+/* Enable/disable SD interrupt */
+extern int bcmsdh_intr_enable(void *sdh);
+extern int bcmsdh_intr_disable(void *sdh);
+
+/* Register/deregister device interrupt handler. */
+extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+extern int bcmsdh_intr_dereg(void *sdh);
+/* Enable/disable SD card interrupt forward */
+extern void bcmsdh_intr_forward(void *sdh, bool pass);
+
+#if defined(DHD_DEBUG)
+/* Query pending interrupt status from the host controller */
+extern bool bcmsdh_intr_pending(void *sdh);
+#endif
+
+/* Register a callback to be called if and when bcmsdh detects
+ * device removal. No-op in the case of non-removable/hardwired devices.
+ */
+extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ *   fn:   function number
+ *   addr: unmodified SDIO-space address
+ *   data: data byte to write
+ *   err:  pointer to error code (or NULL)
+ */
+extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err);
+extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err);
+extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err);
+
+/* Read CIS content for specified function.
+ *   fn:     function whose CIS is being requested (0 is common CIS)
+ *   cis:    pointer to memory location to place results
+ *   length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ *   addr: backplane address (i.e. >= regsva from attach)
+ *   size: register width in bytes (2 or 4)
+ *   data: data for register write
+ */
+extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size);
+extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data);
+
+/* set sb address window */
+extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set);
+
+/* Indicate if last reg read/write failed */
+extern bool bcmsdh_regfail(void *sdh);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ *   fn:       function number
+ *   addr:     backplane address (i.e. >= regsva from attach)
+ *   flags:    backplane width, address increment, sync/async
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ *   pkt:      pointer to packet associated with buf (if any)
+ *   complete: callback function for command completion (async only)
+ *   handle:   handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting);
+extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete_fn, void *handle);
+extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
+                           uint8 *buf, uint nbytes, void *pkt,
+                           bcmsdh_cmplt_fn_t complete_fn, void *handle);
+
+extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len);
+extern void bcmsdh_glom_clear(void *sdh);
+extern uint bcmsdh_set_mode(void *sdh, uint mode);
+extern bool bcmsdh_glom_enabled(void);
+/* Flags bits */
+#define SDIO_REQ_4BYTE	0x1	/* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED	0x2	/* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC	0x4	/* Async request (vs. sync request) */
+#define SDIO_BYTE_MODE	0x8	/* Byte mode request(non-block mode) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING	1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ *   rw:       read or write (0/1)
+ *   addr:     direct SDIO address
+ *   buf:      pointer to memory data buffer
+ *   nbytes:   number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int bcmsdh_abort(void *sdh, uint fn);
+
+/* Start SDIO Host Controller communication */
+extern int bcmsdh_start(void *sdh, int stage);
+
+/* Stop SDIO Host Controller communication */
+extern int bcmsdh_stop(void *sdh);
+
+/* Wait system lock free */
+extern int bcmsdh_waitlockfree(void *sdh);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int bcmsdh_query_device(void *sdh);
+
+/* Returns the number of IO functions reported by the device */
+extern uint bcmsdh_query_iofnum(void *sdh);
+
+/* Miscellaneous knob tweaker. */
+extern int bcmsdh_iovar_op(void *sdh, const char *name,
+                           void *params, int plen, void *arg, int len, bool set);
+
+/* Reset and reinitialize the device */
+extern int bcmsdh_reset(bcmsdh_info_t *sdh);
+
+/* helper functions */
+
+/* callback functions */
+typedef struct {
+	/* probe the device */
+	void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+	                uint16 func, uint bustype, void * regsva, osl_t * osh,
+	                void * param);
+	/* remove the device */
+	void (*remove)(void *context);
+	/* can we suspend now */
+	int (*suspend)(void *context);
+	/* resume from suspend */
+	int (*resume)(void *context);
+} bcmsdh_driver_t;
+
+/* platform specific/high level functions */
+extern int bcmsdh_register(bcmsdh_driver_t *driver);
+extern void bcmsdh_unregister(void);
+extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device);
+extern void bcmsdh_device_remove(void * sdh);
+
+extern int bcmsdh_reg_sdio_notify(void* semaphore);
+extern void bcmsdh_unreg_sdio_notify(void);
+
+#if defined(OOB_INTR_ONLY)
+extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+	void* oob_irq_handler_context);
+extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh);
+extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable);
+#endif 
+extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh);
+extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh);
+extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh);
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh);
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh);
+
+/* Function to pass device-status bits to DHD. */
+extern uint32 bcmsdh_get_dstatus(void *sdh);
+
+/* Function to return current window addr */
+extern uint32 bcmsdh_cur_sbwad(void *sdh);
+
+/* Function to pass chipid and rev to lower layers for controlling pr's */
+extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
+
+
+extern int bcmsdh_sleep(void *sdh, bool enab);
+
+/* GPIO support */
+extern int bcmsdh_gpio_init(void *sd);
+extern bool bcmsdh_gpioin(void *sd, uint32 gpio);
+extern int bcmsdh_gpioouten(void *sd, uint32 gpio);
+extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab);
+
+#endif	/* _bcmsdh_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmsdh_sdmmc.h b/drivers/staging/edison-bcm43340/include/bcmsdh_sdmmc.h
new file mode 100644
index 0000000..dcfd7bf
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmsdh_sdmmc.h
@@ -0,0 +1,131 @@
+/*
+ * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdh_sdmmc.h 444019 2013-12-18 08:36:54Z $
+ */
+
+#ifndef __BCMSDH_SDMMC_H__
+#define __BCMSDH_SDMMC_H__
+
+#if defined(DHD_DEBUG)
+
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x) 	do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x)	do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0)
+#define sd_debug(x)	do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x)	do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
+#define sd_ctrl(x)	do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
+#define sd_dma(x)	do { if (sd_msglevel & SDH_DMA_VAL) printf x; } while (0)
+
+#else
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#endif /* DHD_DEBUG */
+
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SD4		2
+#define CLIENT_INTR			0x100	/* Get rid of this! */
+#define SDIOH_SDMMC_MAX_SG_ENTRIES	(SDPCM_MAXGLOM_SIZE+2)
+
+struct sdioh_info {
+	osl_t		*osh;			/* osh handler */
+	void		*bcmsdh;		/* upper layer handle */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	uint16		intmask;		/* Current active interrupts */
+
+	int		intrcount;		/* Client interrupts */
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool		use_client_ints;	/* If this is false, make sure to restore */
+	int		sd_mode;		/* SD1/SD4/SPI */
+	int		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint8		num_funcs;		/* Supported funcs on client */
+	uint32		com_cis_ptr;
+	uint32		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	bool		use_rxchain;
+	struct scatterlist	sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES];
+	struct sdio_func	fake_func0;
+	struct sdio_func	*func[SDIOD_MAX_IOFUNCS];
+
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdh_sdmmc.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
+extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmsdpcm.h b/drivers/staging/edison-bcm43340/include/bcmsdpcm.h
new file mode 100644
index 0000000..273a4d7
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmsdpcm.h
@@ -0,0 +1,281 @@
+/*
+ * Broadcom SDIO/PCMCIA
+ * Software-specific definitions shared between device and host side
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdpcm.h 414378 2013-07-24 15:58:50Z $
+ */
+
+#ifndef	_bcmsdpcm_h_
+#define	_bcmsdpcm_h_
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_SMB_NAK	I_SMB_SW0	/* To SB Mailbox Frame NAK */
+#define I_SMB_INT_ACK	I_SMB_SW1	/* To SB Mailbox Host Interrupt ACK */
+#define I_SMB_USE_OOB	I_SMB_SW2	/* To SB Mailbox Use OOB Wakeup */
+#define I_SMB_DEV_INT	I_SMB_SW3	/* To SB Mailbox Miscellaneous Interrupt */
+
+#define I_TOSBMAIL      (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK		(1 << 0)	/* To SB Mailbox Frame NAK */
+#define SMB_INT_ACK	(1 << 1)	/* To SB Mailbox Host Interrupt ACK */
+#define SMB_USE_OOB	(1 << 2)	/* To SB Mailbox Use OOB Wakeup */
+#define SMB_DEV_INT	(1 << 3)	/* To SB Mailbox Miscellaneous Interrupt */
+#define SMB_MASK	0x0000000f	/* To SB Mailbox Mask */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_MASK	0x00ff0000	/* host protocol version (sent with F2 enable) */
+#define SMB_DATA_VERSION_SHIFT	16		/* host protocol version (sent with F2 enable) */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE	I_HMB_SW0	/* To Host Mailbox Flow Control State */
+#define I_HMB_FC_CHANGE	I_HMB_SW1	/* To Host Mailbox Flow Control State Changed */
+#define I_HMB_FRAME_IND	I_HMB_SW2	/* To Host Mailbox Frame Indication */
+#define I_HMB_HOST_INT	I_HMB_SW3	/* To Host Mailbox Miscellaneous Interrupt */
+
+#define I_TOHOSTMAIL    (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT)
+
+/* tohostmailbox bits corresponding to intstatus bits */
+#define HMB_FC_ON	(1 << 0)	/* To Host Mailbox Flow Control State */
+#define HMB_FC_CHANGE	(1 << 1)	/* To Host Mailbox Flow Control State Changed */
+#define HMB_FRAME_IND	(1 << 2)	/* To Host Mailbox Frame Indication */
+#define HMB_HOST_INT	(1 << 3)	/* To Host Mailbox Miscellaneous Interrupt */
+#define HMB_MASK	0x0000000f	/* To Host Mailbox Mask */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED	0x01	/* we're ready to retransmit NAK'd frame to host */
+#define HMB_DATA_DEVREADY	0x02	/* we're ready to to talk to host after enable */
+#define HMB_DATA_FC		0x04	/* per prio flowcontrol update flag to host */
+#define HMB_DATA_FWREADY	0x08	/* firmware is ready for protocol activity */
+#define HMB_DATA_FWHALT		0x10	/* firmware has halted operation */
+
+#define HMB_DATA_FCDATA_MASK	0xff000000	/* per prio flowcontrol data */
+#define HMB_DATA_FCDATA_SHIFT	24		/* per prio flowcontrol data */
+
+#define HMB_DATA_VERSION_MASK	0x00ff0000	/* device protocol version (with devready) */
+#define HMB_DATA_VERSION_SHIFT	16		/* device protocol version (with devready) */
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION	4
+
+/* SW frame header */
+#define SDPCM_SEQUENCE_MASK		0x000000ff	/* Sequence Number Mask */
+#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */
+
+#define SDPCM_CHANNEL_MASK		0x00000f00	/* Channel Number Mask */
+#define SDPCM_CHANNEL_SHIFT		8		/* Channel Number Shift */
+#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */
+
+#define SDPCM_FLAGS_MASK		0x0000f000	/* Mask of flag bits */
+#define SDPCM_FLAGS_SHIFT		12		/* Flag bits shift */
+#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
+
+/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
+#define SDPCM_NEXTLEN_MASK		0x00ff0000	/* Next Read Len Mask */
+#define SDPCM_NEXTLEN_SHIFT		16		/* Next Read Len Shift */
+#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
+#define SDPCM_NEXTLEN_OFFSET		2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET		3		/* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) 		(((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK		0xff000000
+#define SDPCM_DOFFSET_SHIFT		24
+
+#define SDPCM_FCMASK_OFFSET		4		/* Flow control */
+#define SDPCM_FCMASK_VALUE(p)		(((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff)
+#define SDPCM_WINDOW_OFFSET		5		/* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p)		(((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+#define SDPCM_VERSION_OFFSET		6		/* Version # */
+#define SDPCM_VERSION_VALUE(p)		(((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
+#define SDPCM_UNUSED_OFFSET		7		/* Spare */
+#define SDPCM_UNUSED_VALUE(p)		(((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN	8	/* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL	0	/* Control Request/Response Channel Id */
+#define SDPCM_EVENT_CHANNEL	1	/* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL	2	/* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL	3	/* For coalesced packets (superframes) */
+#define SDPCM_TEST_CHANNEL	15	/* Reserved for test/debug packets */
+#define SDPCM_MAX_CHANNEL	15
+
+#define SDPCM_SEQUENCE_WRAP	256	/* wrap-around val for eight-bit frame seq number */
+
+#define SDPCM_FLAG_RESVD0	0x01
+#define SDPCM_FLAG_RESVD1	0x02
+#define SDPCM_FLAG_GSPI_TXENAB	0x04
+#define SDPCM_FLAG_GLOMDESC	0x08	/* Superframe descriptor mask */
+
+/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
+#define SDPCM_GLOMDESC_FLAG	(SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
+
+#define SDPCM_GLOMDESC(p)	(((uint8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN		4	/* Generally: Cmd(1), Ext(1), Len(2);
+						 * Semantics of Ext byte depend on command.
+						 * Len is current or requested frame length, not
+						 * including test header; sent little-endian.
+						 */
+#define SDPCM_TEST_PKT_CNT_FLD_LEN	4	/* Packet count filed legth */
+#define SDPCM_TEST_DISCARD		0x01	/* Receiver discards. Ext is a pattern id. */
+#define SDPCM_TEST_ECHOREQ		0x02	/* Echo request. Ext is a pattern id. */
+#define SDPCM_TEST_ECHORSP		0x03	/* Echo response. Ext is a pattern id. */
+#define SDPCM_TEST_BURST		0x04	/* Receiver to send a burst. Ext is a frame count
+						 * (Backward compatabilty) Set frame count in a
+						 * 4 byte filed adjacent to the HDR
+						 */
+#define SDPCM_TEST_SEND			0x05	/* Receiver sets send mode. Ext is boolean on/off
+						 * Set frame count in a 4 byte filed adjacent to
+						 * the HDR
+						 */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id)	((uint8)(id + byteno))
+
+/*
+ * Software counters (first part matches hardware counters)
+ */
+
+typedef volatile struct {
+	uint32 cmd52rd;		/* Cmd52RdCount, SDIO: cmd52 reads */
+	uint32 cmd52wr;		/* Cmd52WrCount, SDIO: cmd52 writes */
+	uint32 cmd53rd;		/* Cmd53RdCount, SDIO: cmd53 reads */
+	uint32 cmd53wr;		/* Cmd53WrCount, SDIO: cmd53 writes */
+	uint32 abort;		/* AbortCount, SDIO: aborts */
+	uint32 datacrcerror;	/* DataCrcErrorCount, SDIO: frames w/CRC error */
+	uint32 rdoutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
+	uint32 wroutofsync;	/* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
+	uint32 writebusy;	/* WriteBusyCount, SDIO: device asserted "busy" */
+	uint32 readwait;	/* ReadWaitCount, SDIO: no data ready for a read cmd */
+	uint32 readterm;	/* ReadTermCount, SDIO: read frame termination cmds */
+	uint32 writeterm;	/* WriteTermCount, SDIO: write frames termination cmds */
+	uint32 rxdescuflo;	/* receive descriptor underflows */
+	uint32 rxfifooflo;	/* receive fifo overflows */
+	uint32 txfifouflo;	/* transmit fifo underflows */
+	uint32 runt;		/* runt (too short) frames recv'd from bus */
+	uint32 badlen;		/* frame's rxh len does not match its hw tag len */
+	uint32 badcksum;	/* frame's hw tag chksum doesn't agree with len value */
+	uint32 seqbreak;	/* break in sequence # space from one rx frame to the next */
+	uint32 rxfcrc;		/* frame rx header indicates crc error */
+	uint32 rxfwoos;		/* frame rx header indicates write out of sync */
+	uint32 rxfwft;		/* frame rx header indicates write frame termination */
+	uint32 rxfabort;	/* frame rx header indicates frame aborted */
+	uint32 woosint;		/* write out of sync interrupt */
+	uint32 roosint;		/* read out of sync interrupt */
+	uint32 rftermint;	/* read frame terminate interrupt */
+	uint32 wftermint;	/* write frame terminate interrupt */
+} sdpcmd_cnt_t;
+
+/*
+ * Register Access Macros
+ */
+
+#define SDIODREV_IS(var, val)	((var) == (val))
+#define SDIODREV_GE(var, val)	((var) >= (val))
+#define SDIODREV_GT(var, val)	((var) > (val))
+#define SDIODREV_LT(var, val)	((var) < (val))
+#define SDIODREV_LE(var, val)	((var) <= (val))
+
+#define SDIODDMAREG32(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv))
+
+#define SDIODDMAREG64(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv))
+
+#define SDIODDMAREG(h, dir, chnl) \
+	(SDIODREV_LT((h)->corerev, 1) ? \
+	 SDIODDMAREG32((h), (dir), (chnl)) : \
+	 SDIODDMAREG64((h), (dir), (chnl)))
+
+#define PCMDDMAREG(h, dir, chnl) \
+	((dir) == DMA_TX ? \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \
+	 (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv))
+
+#define SDPCMDMAREG(h, dir, chnl, coreid) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODDMAREG(h, dir, chnl) : \
+	 PCMDDMAREG(h, dir, chnl))
+
+#define SDIODFIFOREG(h, corerev) \
+	(SDIODREV_LT((corerev), 1) ? \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \
+	 ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo)))
+
+#define PCMDFIFOREG(h) \
+	((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo))
+
+#define SDPCMFIFOREG(h, coreid, corerev) \
+	((coreid) == SDIOD_CORE_ID ? \
+	 SDIODFIFOREG(h, corerev) : \
+	 PCMDFIFOREG(h))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION       0x0001
+#define SDPCM_SHARED_VERSION_MASK  0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT  0x0100
+#define SDPCM_SHARED_ASSERT        0x0200
+#define SDPCM_SHARED_TRAP          0x0400
+#define SDPCM_SHARED_IN_BRPT       0x0800
+#define SDPCM_SHARED_SET_BRPT      0x1000
+#define SDPCM_SHARED_PENDING_BRPT  0x2000
+
+typedef struct {
+	uint32	flags;
+	uint32  trap_addr;
+	uint32  assert_exp_addr;
+	uint32  assert_file_addr;
+	uint32  assert_line;
+	uint32	console_addr;		/* Address of hndrte_cons_t */
+	uint32  msgtrace_addr;
+	uint32  fwid;
+} sdpcm_shared_t;
+
+extern sdpcm_shared_t sdpcm_shared;
+
+/* Function can be used to notify host of FW halt */
+extern void sdpcmd_fwhalt(void);
+
+#endif	/* _bcmsdpcm_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmsdspi.h b/drivers/staging/edison-bcm43340/include/bcmsdspi.h
new file mode 100644
index 0000000..b5a0caf
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmsdspi.h
@@ -0,0 +1,135 @@
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdspi.h 294363 2011-11-06 23:02:20Z $
+ */
+#ifndef	_BCM_SD_SPI_H
+#define	_BCM_SD_SPI_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#undef ERROR
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint		bar0;			/* BAR0 for PCI Device */
+	osl_t 		*osh;			/* osh handler */
+	void		*controller;	/* Pointer to SPI Controller's private data struct */
+
+	uint		lockcount; 		/* nest count of sdspi_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint32		target_dev;		/* Target device ID */
+	uint32		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint 		irq;			/* Client irq */
+	uint32 		intrcount;		/* Client interrupts */
+	uint32 		local_intrcount;	/* Controller interrupts */
+	bool 		host_init_done;		/* Controller initted */
+	bool 		card_init_done;		/* Client SDIO interface initted */
+	bool 		polled_mode;		/* polling for command completion */
+
+	bool		sd_use_dma;		/* DMA on CMD53 */
+	bool 		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool 		use_client_ints;	/* If this is false, make sure to restore */
+	bool		got_hcint;		/* Host Controller interrupt. */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int 		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int 		sd_mode;		/* SD1/SD4/SPI */
+	int 		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32 		data_xfer_count;	/* Current register transfer size */
+	uint32		cmd53_wr_data;		/* Used to pass CMD53 write data */
+	uint32		card_response;		/* Used to pass back response status byte */
+	uint32		card_rsp_data;		/* Used to pass back response data word */
+	uint16 		card_rca;		/* Current Address */
+	uint8 		num_funcs;		/* Supported funcs on client */
+	uint32 		com_cis_ptr;
+	uint32 		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;
+	ulong		dma_phys;
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdspi.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmsdspi.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size);
+extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#endif /* _BCM_SD_SPI_H */
diff --git a/drivers/staging/edison-bcm43340/include/bcmsdstd.h b/drivers/staging/edison-bcm43340/include/bcmsdstd.h
new file mode 100644
index 0000000..97c2a5a
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmsdstd.h
@@ -0,0 +1,282 @@
+/*
+ *  'Standard' SDIO HOST CONTROLLER driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmsdstd.h 455427 2014-02-14 00:11:19Z $
+ */
+#ifndef	_BCM_SD_STD_H
+#define	_BCM_SD_STD_H
+
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+#define sd_err(x)	do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#define sd_dma(x)
+
+#define sd_sync_dma(sd, read, nbytes)
+#define sd_init_dma(sd)
+#define sd_ack_intr(sd)
+#define sd_wakeup(sd);
+/* Allocate/init/free per-OS private data */
+extern int sdstd_osinit(sdioh_info_t *sd);
+extern void sdstd_osfree(sdioh_info_t *sd);
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+	do { if (!(exp)) \
+		printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+	} while (0)
+
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
+
+/* internal return code */
+#define SUCCESS	0
+#define ERROR	1
+
+/* private bus modes */
+#define SDIOH_MODE_SPI		0
+#define SDIOH_MODE_SD1		1
+#define SDIOH_MODE_SD4		2
+
+#define MAX_SLOTS 6 	/* For PCI: Only 6 BAR entries => 6 slots */
+#define SDIOH_REG_WINSZ	0x100 /* Number of registers in Standard Host Controller */
+
+#define SDIOH_TYPE_ARASAN_HDK	1
+#define SDIOH_TYPE_BCM27XX	2
+#define SDIOH_TYPE_TI_PCIXX21	4	/* TI PCIxx21 Standard Host Controller */
+#define SDIOH_TYPE_RICOH_R5C822	5	/* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */
+#define SDIOH_TYPE_JMICRON	6	/* JMicron Standard SDIO Host Controller */
+
+/* For linux, allow yielding for dongle */
+#define BCMSDYIELD
+
+/* Expected card status value for CMD7 */
+#define SDIOH_CMD7_EXP_STATUS   0x00001E00
+
+#define RETRIES_LARGE 100000
+#define sdstd_os_yield(sd)	do {} while (0)
+#define RETRIES_SMALL 100
+
+
+#define USE_BLOCKMODE		0x2	/* Block mode can be single block or multi */
+#define USE_MULTIBLOCK		0x4
+
+#define USE_FIFO		0x8	/* Fifo vs non-fifo */
+
+#define CLIENT_INTR 		0x100	/* Get rid of this! */
+
+#define HC_INTR_RETUNING	0x1000
+
+
+#ifdef BCMSDIOH_TXGLOM
+/* Total glom pkt can not exceed 64K
+ * need one more slot for glom padding packet
+ */
+#define SDIOH_MAXGLOM_SIZE	(40+1)
+
+typedef struct glom_buf {
+	uint32 count;				/* Total number of pkts queued */
+	void *dma_buf_arr[SDIOH_MAXGLOM_SIZE];	/* Frame address */
+	ulong dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */
+	uint16 nbytes[SDIOH_MAXGLOM_SIZE];	/* Size of each frame */
+} glom_buf_t;
+#endif
+
+struct sdioh_info {
+	uint cfg_bar;                   	/* pci cfg address for bar */
+	uint32 caps;                    	/* cached value of capabilities reg */
+	uint32 curr_caps;                    	/* max current capabilities reg */
+
+	osl_t 		*osh;			/* osh handler */
+	volatile char 	*mem_space;		/* pci device memory va */
+	uint		lockcount; 		/* nest count of sdstd_lock() calls */
+	bool		client_intr_enabled;	/* interrupt connnected flag */
+	bool		intr_handler_valid;	/* client driver interrupt handler valid */
+	sdioh_cb_fn_t	intr_handler;		/* registered interrupt handler */
+	void		*intr_handler_arg;	/* argument to call interrupt handler */
+	bool		initialized;		/* card initialized */
+	uint		target_dev;		/* Target device ID */
+	uint16		intmask;		/* Current active interrupts */
+	void		*sdos_info;		/* Pointer to per-OS private data */
+	void		*bcmsdh;		/* handler to upper layer stack (bcmsdh) */
+
+	uint32		controller_type;	/* Host controller type */
+	uint8		version;		/* Host Controller Spec Compliance Version */
+	uint		irq;			/* Client irq */
+	int		intrcount;		/* Client interrupts */
+	int		local_intrcount;	/* Controller interrupts */
+	bool		host_init_done;		/* Controller initted */
+	bool		card_init_done;		/* Client SDIO interface initted */
+	bool		polled_mode;		/* polling for command completion */
+
+	bool		sd_blockmode;		/* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+						/*  Must be on for sd_multiblock to be effective */
+	bool		use_client_ints;	/* If this is false, make sure to restore */
+						/*  polling hack in wl_linux.c:wl_timer() */
+	int		adapter_slot;		/* Maybe dealing with multiple slots/controllers */
+	int		sd_mode;		/* SD1/SD4/SPI */
+	int		client_block_size[SDIOD_MAX_IOFUNCS];		/* Blocksize */
+	uint32		data_xfer_count;	/* Current transfer */
+	uint16		card_rca;		/* Current Address */
+	int8		sd_dma_mode;		/* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
+	uint8		num_funcs;		/* Supported funcs on client */
+	uint32		com_cis_ptr;
+	uint32		func_cis_ptr[SDIOD_MAX_IOFUNCS];
+	void		*dma_buf;		/* DMA Buffer virtual address */
+	ulong		dma_phys;		/* DMA Buffer physical address */
+	void		*adma2_dscr_buf;	/* ADMA2 Descriptor Buffer virtual address */
+	ulong		adma2_dscr_phys;	/* ADMA2 Descriptor Buffer physical address */
+
+	/* adjustments needed to make the dma align properly */
+	void		*dma_start_buf;
+	ulong		dma_start_phys;
+	uint		alloced_dma_size;
+	void		*adma2_dscr_start_buf;
+	ulong		adma2_dscr_start_phys;
+	uint		alloced_adma2_dscr_size;
+
+	int 		r_cnt;			/* rx count */
+	int 		t_cnt;			/* tx_count */
+	bool		got_hcint;		/* local interrupt flag */
+	uint16		last_intrstatus;	/* to cache intrstatus */
+	int 	host_UHSISupported;		/* whether UHSI is supported for HC. */
+	int 	card_UHSI_voltage_Supported; 	/* whether UHSI is supported for
+						 * Card in terms of Voltage [1.8 or 3.3].
+						 */
+	int	global_UHSI_Supp;	/* type of UHSI support in both host and card.
+					 * HOST_SDR_UNSUPP: capabilities not supported/matched
+					 * HOST_SDR_12_25: SDR12 and SDR25 supported
+					 * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
+					 */
+	volatile int	sd3_dat_state; 		/* data transfer state used for retuning check */
+	volatile int	sd3_tun_state; 		/* tuning state used for retuning check */
+	bool	sd3_tuning_reqd; 	/* tuning requirement parameter */
+	uint32	caps3;			/* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
+#ifdef BCMSDIOH_TXGLOM
+	glom_buf_t glom_info;		/* pkt information used for glomming */
+	uint	txglom_mode;		/* Txglom mode: 0 - copy, 1 - multi-descriptor */
+#endif
+};
+
+#define DMA_MODE_NONE	0
+#define DMA_MODE_SDMA	1
+#define DMA_MODE_ADMA1	2
+#define DMA_MODE_ADMA2	3
+#define DMA_MODE_ADMA2_64 4
+#define DMA_MODE_AUTO	-1
+
+#define USE_DMA(sd)		((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE))
+
+/* States for Tuning and corr data */
+#define TUNING_IDLE 			0
+#define TUNING_START 			1
+#define TUNING_START_AFTER_DAT 	2
+#define TUNING_ONGOING 			3
+
+#define DATA_TRANSFER_IDLE 		0
+#define DATA_TRANSFER_ONGOING	1
+
+#define CHECK_TUNING_PRE_DATA	1
+#define CHECK_TUNING_POST_DATA	2
+
+
+#ifdef DHD_DEBUG
+#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01
+#define SD_DHD_ENABLE_PERIODIC_TUNING  0x00
+#endif
+
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmsdstd.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/* OS-independent interrupt handler */
+extern bool check_client_intr(sdioh_info_t *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void sdstd_devintr_on(sdioh_info_t *sd);
+extern void sdstd_devintr_off(sdioh_info_t *sd);
+
+/* Enable/disable interrupts for local controller events */
+extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err);
+extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+/* Wait for specified interrupt and error bits to be set */
+extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
+
+/**************************************************************
+ * Internal interfaces: bcmsdstd.c references to per-port code
+ */
+
+/* Register mapping routines */
+extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size);
+
+/* Interrupt (de)registration routines */
+extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
+extern void sdstd_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void sdstd_lock(sdioh_info_t *sd);
+extern void sdstd_unlock(sdioh_info_t *sd);
+extern void sdstd_waitlockfree(sdioh_info_t *sd);
+
+/* OS-specific wrappers for safe concurrent register access */
+extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags);
+extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags);
+
+/* OS-specific wait-for-interrupt-or-status */
+extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
+
+/* used by bcmsdstd_linux [implemented in sdstd] */
+extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd);
+extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd);
+extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd);
+extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param);
+extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd);
+extern int sdstd_3_get_tune_state(sdioh_info_t *sd);
+extern int sdstd_3_get_data_state(sdioh_info_t *sd);
+extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state);
+extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state);
+extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd);
+extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd);
+extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode);
+
+/* used by sdstd [implemented in bcmsdstd_linux/ndis] */
+extern void sdstd_3_start_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
+extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+
+extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif /* _BCM_SD_STD_H */
diff --git a/drivers/staging/edison-bcm43340/include/bcmspi.h b/drivers/staging/edison-bcm43340/include/bcmspi.h
new file mode 100644
index 0000000..cf814ce
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmspi.h
@@ -0,0 +1,40 @@
+/*
+ * Broadcom SPI Low-Level Hardware Driver API
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmspi.h 241182 2011-02-17 21:50:03Z $
+ */
+#ifndef	_BCM_SPI_H
+#define	_BCM_SPI_H
+
+extern void spi_devintr_off(sdioh_info_t *sd);
+extern void spi_devintr_on(sdioh_info_t *sd);
+extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor);
+extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr);
+extern bool spi_hw_attach(sdioh_info_t *sd);
+extern bool spi_hw_detach(sdioh_info_t *sd);
+extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
+extern void spi_spinbits(sdioh_info_t *sd);
+extern void spi_waitbits(sdioh_info_t *sd, bool yield);
+
+#endif /* _BCM_SPI_H */
diff --git a/drivers/staging/edison-bcm43340/include/bcmutils.h b/drivers/staging/edison-bcm43340/include/bcmutils.h
new file mode 100644
index 0000000..ac2539a
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmutils.h
@@ -0,0 +1,1148 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmutils.h 457888 2014-02-25 03:34:39Z $
+ */
+
+#ifndef	_bcmutils_h_
+#define	_bcmutils_h_
+
+#define bcm_strcpy_s(dst, noOfElements, src)            strcpy((dst), (src))
+#define bcm_strncpy_s(dst, noOfElements, src, count)    strncpy((dst), (src), (count))
+#define bcm_strcat_s(dst, noOfElements, src)            strcat((dst), (src))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef PKTQ_LOG
+#include <wlioctl.h>
+#endif
+
+/* ctype replacement */
+#define _BCM_U	0x01	/* upper */
+#define _BCM_L	0x02	/* lower */
+#define _BCM_D	0x04	/* digit */
+#define _BCM_C	0x08	/* cntrl */
+#define _BCM_P	0x10	/* punct */
+#define _BCM_S	0x20	/* white space (space/lf/tab) */
+#define _BCM_X	0x40	/* hex digit */
+#define _BCM_SP	0x80	/* hard space (0x20) */
+
+extern const unsigned char bcm_ctype[];
+#define bcm_ismask(x)	(bcm_ctype[(int)(unsigned char)(x)])
+
+#define bcm_isalnum(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_isalpha(c)	((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0)
+#define bcm_iscntrl(c)	((bcm_ismask(c)&(_BCM_C)) != 0)
+#define bcm_isdigit(c)	((bcm_ismask(c)&(_BCM_D)) != 0)
+#define bcm_isgraph(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0)
+#define bcm_islower(c)	((bcm_ismask(c)&(_BCM_L)) != 0)
+#define bcm_isprint(c)	((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0)
+#define bcm_ispunct(c)	((bcm_ismask(c)&(_BCM_P)) != 0)
+#define bcm_isspace(c)	((bcm_ismask(c)&(_BCM_S)) != 0)
+#define bcm_isupper(c)	((bcm_ismask(c)&(_BCM_U)) != 0)
+#define bcm_isxdigit(c)	((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0)
+#define bcm_tolower(c)	(bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#define bcm_toupper(c)	(bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+
+/* Buffer structure for collecting string-formatted data
+* using bcm_bprintf() API.
+* Use bcm_binit() to initialize before use
+*/
+
+struct bcmstrbuf {
+	char *buf;	/* pointer to current position in origbuf */
+	unsigned int size;	/* current (residual) size in bytes */
+	char *origbuf;	/* unmodified pointer to orignal buffer */
+	unsigned int origsize;	/* unmodified orignal buffer size in bytes */
+};
+
+/* ** driver-only section ** */
+#ifdef BCMDRIVER
+#include <osl.h>
+
+#define GPIO_PIN_NOTDEFINED 	0x20	/* Pin not defined */
+
+/*
+ * Spin at most 'us' microseconds while 'exp' is true.
+ * Caller should explicitly test 'exp' when this completes
+ * and take appropriate error action if 'exp' is still true.
+ */
+#ifndef SPINWAIT_POLL_PERIOD
+#define SPINWAIT_POLL_PERIOD	10
+#endif
+
+#define SPINWAIT(exp, us) { \
+	uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1); \
+	while ((exp) && (countdown >= SPINWAIT_POLL_PERIOD)) { \
+		OSL_DELAY(SPINWAIT_POLL_PERIOD); \
+		countdown -= SPINWAIT_POLL_PERIOD; \
+	} \
+}
+
+/* osl multi-precedence packet queue */
+#define PKTQ_LEN_MAX            0xFFFF  /* Max uint16 65535 packets */
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT        128	/* Max 128 packets */
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC           16	/* Maximum precedence levels */
+#endif
+
+typedef struct pktq_prec {
+	void *head;     /* first packet to dequeue */
+	void *tail;     /* last packet to dequeue */
+	uint16 len;     /* number of queued packets */
+	uint16 max;     /* maximum number of queued packets */
+} pktq_prec_t;
+
+#ifdef PKTQ_LOG
+typedef struct {
+	uint32 requested;    /* packets requested to be stored */
+	uint32 stored;	     /* packets stored */
+	uint32 saved;	     /* packets saved,
+	                            because a lowest priority queue has given away one packet
+	                      */
+	uint32 selfsaved;    /* packets saved,
+	                            because an older packet from the same queue has been dropped
+	                      */
+	uint32 full_dropped; /* packets dropped,
+	                            because pktq is full with higher precedence packets
+	                      */
+	uint32 dropped;      /* packets dropped because pktq per that precedence is full */
+	uint32 sacrificed;   /* packets dropped,
+	                            in order to save one from a queue of a highest priority
+	                      */
+	uint32 busy;         /* packets droped because of hardware/transmission error */
+	uint32 retry;        /* packets re-sent because they were not received */
+	uint32 ps_retry;     /* packets retried again prior to moving power save mode */
+	uint32 suppress;     /* packets which were suppressed and not transmitted */
+	uint32 retry_drop;   /* packets finally dropped after retry limit */
+	uint32 max_avail;    /* the high-water mark of the queue capacity for packets -
+	                            goes to zero as queue fills
+	                      */
+	uint32 max_used;     /* the high-water mark of the queue utilisation for packets -
+						        increases with use ('inverse' of max_avail)
+				          */
+	uint32 queue_capacity; /* the maximum capacity of the queue */
+	uint32 rtsfail;        /* count of rts attempts that failed to receive cts */
+	uint32 acked;          /* count of packets sent (acked) successfully */
+	uint32 txrate_succ;    /* running total of phy rate of packets sent successfully */
+	uint32 txrate_main;    /* running totoal of primary phy rate of all packets */
+	uint32 throughput;     /* actual data transferred successfully */
+	uint32 airtime;        /* cumulative total medium access delay in useconds */
+	uint32  _logtime;      /* timestamp of last counter clear  */
+} pktq_counters_t;
+
+typedef struct {
+	uint32                  _prec_log;
+	pktq_counters_t*        _prec_cnt[PKTQ_MAX_PREC];     /* Counters per queue  */
+} pktq_log_t;
+#endif /* PKTQ_LOG */
+
+
+#define PKTQ_COMMON	\
+	uint16 num_prec;        /* number of precedences in use */			\
+	uint16 hi_prec;         /* rapid dequeue hint (>= highest non-empty prec) */	\
+	uint16 max;             /* total max packets */					\
+	uint16 len;             /* total number of packets */
+
+/* multi-priority pkt queue */
+struct pktq {
+	PKTQ_COMMON
+	/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+	struct pktq_prec q[PKTQ_MAX_PREC];
+#ifdef PKTQ_LOG
+	pktq_log_t*      pktqlog;
+#endif
+};
+
+/* simple, non-priority pkt queue */
+struct spktq {
+	PKTQ_COMMON
+	/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+	struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec)        for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+/* fn(pkt, arg).  return true if pkt belongs to if */
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool)		((pool) && (pool)->inited)
+#define SHARED_POOL		(pktpool_shared)
+#else /* BCMPKTPOOL */
+#define POOL_ENAB(bus)		0
+#define SHARED_POOL		((struct pktpool *)NULL)
+#endif /* BCMPKTPOOL */
+
+#ifdef BCMFRAGPOOL
+#define SHARED_FRAG_POOL	(pktpool_shared_lfrag)
+#endif
+#define SHARED_RXFRAG_POOL	(pktpool_shared_rxlfrag)
+
+
+#ifndef PKTPOOL_LEN_MAX
+#define PKTPOOL_LEN_MAX		40
+#endif /* PKTPOOL_LEN_MAX */
+#define PKTPOOL_CB_MAX		3
+
+struct pktpool;
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+	pktpool_cb_t cb;
+	void *arg;
+} pktpool_cbinfo_t;
+/* call back fn extension to populate host address in pool pkt */
+typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg, void* pkt);
+typedef struct {
+	pktpool_cb_extn_t cb;
+	void *arg;
+} pktpool_cbextn_info_t;
+
+
+#ifdef BCMDBG_POOL
+/* pkt pool debug states */
+#define POOL_IDLE	0
+#define POOL_RXFILL	1
+#define POOL_RXDH	2
+#define POOL_RXD11	3
+#define POOL_TXDH	4
+#define POOL_TXD11	5
+#define POOL_AMPDU	6
+#define POOL_TXENQ	7
+
+typedef struct {
+	void *p;
+	uint32 cycles;
+	uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+	uint8 txdh;	/* tx to host */
+	uint8 txd11;	/* tx to d11 */
+	uint8 enq;	/* waiting in q */
+	uint8 rxdh;	/* rx from host */
+	uint8 rxd11;	/* rx from d11 */
+	uint8 rxfill;	/* dma_rxfill */
+	uint8 idle;	/* avail in pool */
+} pktpool_stats_t;
+#endif /* BCMDBG_POOL */
+
+typedef struct pktpool {
+	bool inited;            /* pktpool_init was successful */
+	uint8 type;             /* type of lbuf: basic, frag, etc */
+	uint8 id;               /* pktpool ID:  index in registry */
+	bool istx;              /* direction: transmit or receive data path */
+
+	void * freelist;        /* free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */
+	uint16 avail;           /* number of packets in pool's free list */
+	uint16 len;             /* number of packets managed by pool */
+	uint16 maxlen;          /* maximum size of pool <= PKTPOOL_LEN_MAX */
+	uint16 plen;            /* size of pkt buffer, excluding lbuf|lbuf_frag */
+
+	bool empty;
+	uint8 cbtoggle;
+	uint8 cbcnt;
+	uint8 ecbcnt;
+	bool emptycb_disable;
+	pktpool_cbinfo_t *availcb_excl;
+	pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
+	pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+	pktpool_cbextn_info_t cbext;
+#ifdef BCMDBG_POOL
+	uint8 dbg_cbcnt;
+	pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+	uint16 dbg_qlen;
+	pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+} pktpool_t;
+
+extern pktpool_t *pktpool_shared;
+#ifdef BCMFRAGPOOL
+extern pktpool_t *pktpool_shared_lfrag;
+#endif
+extern pktpool_t *pktpool_shared_rxlfrag;
+
+/* Incarnate a pktpool registry. On success returns total_pools. */
+extern int pktpool_attach(osl_t *osh, uint32 total_pools);
+extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern void* pktpool_get(pktpool_t *pktp);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
+int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg);
+#define POOLPTR(pp)         ((pktpool_t *)(pp))
+#define POOLID(pp)          (POOLPTR(pp)->id)
+
+#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid))
+
+#define pktpool_len(pp)     (POOLPTR(pp)->len)
+#define pktpool_avail(pp)   (POOLPTR(pp)->avail)
+#define pktpool_plen(pp)    (POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp)  (POOLPTR(pp)->maxlen)
+
+
+/*
+ * ----------------------------------------------------------------------------
+ * A pool ID is assigned with a pkt pool during pool initialization. This is
+ * done by maintaining a registry of all initialized pools, and the registry
+ * index at which the pool is registered is used as the pool's unique ID.
+ * ID 0 is reserved and is used to signify an invalid pool ID.
+ * All packets henceforth allocated from a pool will be tagged with the pool's
+ * unique ID. Packets allocated from the heap will use the reserved ID = 0.
+ * Packets with non-zero pool id signify that they were allocated from a pool.
+ * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used
+ * in place of a 32bit pool pointer in each packet.
+ * ----------------------------------------------------------------------------
+ */
+#define PKTPOOL_INVALID_ID          (0)
+#define PKTPOOL_MAXIMUM_ID          (15)
+
+/* Registry of pktpool(s) */
+extern pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1];
+
+/* Pool ID to/from Pool Pointer converters */
+#define PKTPOOL_ID2PTR(id)          (pktpools_registry[id])
+#define PKTPOOL_PTR2ID(pp)          (POOLID(pp))
+
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif /* BCMDBG_POOL */
+
+/* forward definition of ether_addr structure used by some function prototypes */
+
+struct ether_addr;
+
+extern int ether_isbcast(const void *ea);
+extern int ether_isnulladdr(const void *ea);
+
+/* operations on a specific precedence in packet queue */
+
+#define pktq_psetmax(pq, prec, _max)	((pq)->q[prec].max = (_max))
+#define pktq_pmax(pq, prec)		((pq)->q[prec].max)
+#define pktq_plen(pq, prec)		((pq)->q[prec].len)
+#define pktq_pavail(pq, prec)		((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec)		((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec)		((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec)		((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec)	((pq)->q[prec].tail)
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
+extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+/* Empty the queue at particular precedence level */
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir,
+	ifpkt_cb_t fn, int arg);
+/* Remove a specified packet from its queue */
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+/* operations on a set of precedences in packet queue */
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+/* operations on packet queue as a whole */
+
+#define pktq_len(pq)		((int)(pq)->len)
+#define pktq_max(pq)		((int)(pq)->max)
+#define pktq_avail(pq)		((int)((pq)->max - (pq)->len))
+#define pktq_full(pq)		((pq)->len >= (pq)->max)
+#define pktq_empty(pq)		((pq)->len == 0)
+
+/* operations for single precedence queues */
+#define pktenq(pq, p)		pktq_penq(((struct pktq *)(void *)pq), 0, (p))
+#define pktenq_head(pq, p)	pktq_penq_head(((struct pktq *)(void *)pq), 0, (p))
+#define pktdeq(pq)		pktq_pdeq(((struct pktq *)(void *)pq), 0)
+#define pktdeq_tail(pq)		pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
+#define pktqinit(pq, len)	pktq_init(((struct pktq *)(void *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
+
+/* prec_out may be NULL if caller is not interested in return value */
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg);
+
+/* externs */
+/* packet */
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+extern uint pktsegcnt(osl_t *osh, void *p);
+extern uint pktsegcnt_war(osl_t *osh, void *p);
+extern uint8 *pktdataoffset(osl_t *osh, void *p,  uint offset);
+extern void *pktoffset(osl_t *osh, void *p,  uint offset);
+
+/* Get priority from a packet and pass it back in scb (or equiv) */
+#define	PKTPRIO_VDSCP	0x100		/* DSCP prio found after VLAN tag */
+#define	PKTPRIO_VLAN	0x200		/* VLAN prio found */
+#define	PKTPRIO_UPD	0x400		/* DSCP used to update VLAN prio */
+#define	PKTPRIO_DSCP	0x800		/* DSCP prio found */
+
+/* DSCP type definitions (RFC4594) */
+/* AF1x: High-Throughput Data (RFC2597) */
+#define DSCP_AF11	0x0A
+#define DSCP_AF12	0x0C
+#define DSCP_AF13	0x0E
+/* AF2x: Low-Latency Data (RFC2597) */
+#define DSCP_AF21	0x12
+#define DSCP_AF22	0x14
+#define DSCP_AF23	0x16
+/* AF3x: Multimedia Streaming (RFC2597) */
+#define DSCP_AF31	0x1A
+#define DSCP_AF32	0x1C
+#define DSCP_AF33	0x1E
+/* EF: Telephony (RFC3246) */
+#define DSCP_EF		0x2E
+
+extern uint pktsetprio(void *pkt, bool update_vtag);
+
+/* string */
+extern int bcm_atoi(const char *s);
+extern ulong bcm_strtoul(const char *cp, char **endp, uint base);
+extern char *bcmstrstr(const char *haystack, const char *needle);
+extern char *bcmstrcat(char *dest, const char *src);
+extern char *bcmstrncat(char *dest, const char *src, uint size);
+extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
+char* bcmstrtok(char **string, const char *delimiters, char *tokdelim);
+int bcmstricmp(const char *s1, const char *s2);
+int bcmstrnicmp(const char* s1, const char* s2, int cnt);
+
+
+/* ethernet address */
+extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
+extern int bcm_ether_atoe(const char *p, struct ether_addr *ea);
+
+/* ip address */
+struct ipv4_addr;
+extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+extern char *bcm_ipv6_ntoa(void *ipv6, char *buf);
+extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip);
+
+/* delay */
+extern void bcm_mdelay(uint ms);
+/* variable access */
+#define NVRAM_RECLAIM_CHECK(name)
+
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
+extern int getintvararray(char *vars, const char *name, int index);
+extern int getintvararraysize(char *vars, const char *name);
+extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
+#define bcm_perf_enable()
+#define bcmstats(fmt)
+#define	bcmlog(fmt, a1, a2)
+#define	bcmdumplog(buf, size)	*buf = '\0'
+#define	bcmdumplogent(buf, idx)	-1
+
+#define TSF_TICKS_PER_MS	1000
+
+#define bcmtslog(tstamp, fmt, a1, a2)
+#define bcmprinttslogs()
+#define bcmprinttstamp(us)
+#define bcmdumptslog(buf, size)
+
+extern char *bcm_nvram_vars(uint *length);
+extern int bcm_nvram_cache(void *sih);
+
+/* Support for sharing code across in-driver iovar implementations.
+ * The intent is that a driver use this structure to map iovar names
+ * to its (private) iovar identifiers, and the lookup function to
+ * find the entry.  Macros are provided to map ids and get/set actions
+ * into a single number space for a switch statement.
+ */
+
+/* iovar structure */
+typedef struct bcm_iovar {
+	const char *name;	/* name for lookup and display */
+	uint16 varid;		/* id for switch */
+	uint16 flags;		/* driver-specific flag bits */
+	uint16 type;		/* base type of argument */
+	uint16 minlen;		/* min length for buffer vars */
+} bcm_iovar_t;
+
+/* varid definitions are per-driver, may use these get/set bits */
+
+/* IOVar action bits for id mapping */
+#define IOV_GET 0 /* Get an iovar */
+#define IOV_SET 1 /* Set an iovar */
+
+/* Varid to actionid mapping */
+#define IOV_GVAL(id)		((id) * 2)
+#define IOV_SVAL(id)		((id) * 2 + IOV_SET)
+#define IOV_ISSET(actionid)	((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid)	(actionid >> 1)
+
+/* flags are per-driver based on driver attributes */
+
+extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
+extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+	defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
+#endif 
+#endif	/* BCMDRIVER */
+
+/* Base type definitions */
+#define IOVT_VOID	0	/* no value (implictly set only) */
+#define IOVT_BOOL	1	/* any value ok (zero/nonzero) */
+#define IOVT_INT8	2	/* integer values are range-checked */
+#define IOVT_UINT8	3	/* unsigned int 8 bits */
+#define IOVT_INT16	4	/* int 16 bits */
+#define IOVT_UINT16	5	/* unsigned int 16 bits */
+#define IOVT_INT32	6	/* int 32 bits */
+#define IOVT_UINT32	7	/* unsigned int 32 bits */
+#define IOVT_BUFFER	8	/* buffer is size-checked as per minlen */
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+/* Initializer for IOV type strings */
+#define BCM_IOV_TYPE_INIT { \
+	"void", \
+	"bool", \
+	"int8", \
+	"uint8", \
+	"int16", \
+	"uint16", \
+	"int32", \
+	"uint32", \
+	"buffer", \
+	"" }
+
+#define BCM_IOVT_IS_INT(type) (\
+	(type == IOVT_BOOL) || \
+	(type == IOVT_INT8) || \
+	(type == IOVT_UINT8) || \
+	(type == IOVT_INT16) || \
+	(type == IOVT_UINT16) || \
+	(type == IOVT_INT32) || \
+	(type == IOVT_UINT32))
+
+/* ** driver/apps-shared section ** */
+
+#define BCME_STRLEN 		64	/* Max string length for BCM errors */
+#define VALID_BCMERROR(e)  ((e <= 0) && (e >= BCME_LAST))
+
+
+/*
+ * error codes could be added but the defined ones shouldn't be changed/deleted
+ * these error codes are exposed to the user code
+ * when ever a new error code is added to this list
+ * please update errorstring table with the related error string and
+ * update osl files with os specific errorcode map
+*/
+
+#define BCME_OK				0	/* Success */
+#define BCME_ERROR			-1	/* Error generic */
+#define BCME_BADARG			-2	/* Bad Argument */
+#define BCME_BADOPTION			-3	/* Bad option */
+#define BCME_NOTUP			-4	/* Not up */
+#define BCME_NOTDOWN			-5	/* Not down */
+#define BCME_NOTAP			-6	/* Not AP */
+#define BCME_NOTSTA			-7	/* Not STA  */
+#define BCME_BADKEYIDX			-8	/* BAD Key Index */
+#define BCME_RADIOOFF 			-9	/* Radio Off */
+#define BCME_NOTBANDLOCKED		-10	/* Not  band locked */
+#define BCME_NOCLK			-11	/* No Clock */
+#define BCME_BADRATESET			-12	/* BAD Rate valueset */
+#define BCME_BADBAND			-13	/* BAD Band */
+#define BCME_BUFTOOSHORT		-14	/* Buffer too short */
+#define BCME_BUFTOOLONG			-15	/* Buffer too long */
+#define BCME_BUSY			-16	/* Busy */
+#define BCME_NOTASSOCIATED		-17	/* Not Associated */
+#define BCME_BADSSIDLEN			-18	/* Bad SSID len */
+#define BCME_OUTOFRANGECHAN		-19	/* Out of Range Channel */
+#define BCME_BADCHAN			-20	/* Bad Channel */
+#define BCME_BADADDR			-21	/* Bad Address */
+#define BCME_NORESOURCE			-22	/* Not Enough Resources */
+#define BCME_UNSUPPORTED		-23	/* Unsupported */
+#define BCME_BADLEN			-24	/* Bad length */
+#define BCME_NOTREADY			-25	/* Not Ready */
+#define BCME_EPERM			-26	/* Not Permitted */
+#define BCME_NOMEM			-27	/* No Memory */
+#define BCME_ASSOCIATED			-28	/* Associated */
+#define BCME_RANGE			-29	/* Not In Range */
+#define BCME_NOTFOUND			-30	/* Not Found */
+#define BCME_WME_NOT_ENABLED		-31	/* WME Not Enabled */
+#define BCME_TSPEC_NOTFOUND		-32	/* TSPEC Not Found */
+#define BCME_ACM_NOTSUPPORTED		-33	/* ACM Not Supported */
+#define BCME_NOT_WME_ASSOCIATION	-34	/* Not WME Association */
+#define BCME_SDIO_ERROR			-35	/* SDIO Bus Error */
+#define BCME_DONGLE_DOWN		-36	/* Dongle Not Accessible */
+#define BCME_VERSION			-37 	/* Incorrect version */
+#define BCME_TXFAIL			-38 	/* TX failure */
+#define BCME_RXFAIL			-39	/* RX failure */
+#define BCME_NODEVICE			-40 	/* Device not present */
+#define BCME_NMODE_DISABLED		-41 	/* NMODE disabled */
+#define BCME_NONRESIDENT		-42 /* access to nonresident overlay */
+#define BCME_SCANREJECT			-43 	/* reject scan request */
+#define BCME_USAGE_ERROR                -44     /* WLCMD usage error */
+#define BCME_IOCTL_ERROR                -45     /* WLCMD ioctl error */
+#define BCME_SERIAL_PORT_ERR            -46     /* RWL serial port error */
+#define BCME_DISABLED			-47     /* Disabled in this build */
+#define BCME_DECERR				-48		/* Decrypt error */
+#define BCME_ENCERR				-49		/* Encrypt error */
+#define BCME_MICERR				-50		/* Integrity/MIC error */
+#define BCME_REPLAY				-51		/* Replay */
+#define BCME_IE_NOTFOUND		-52		/* IE not found */
+#define BCME_LAST			BCME_IE_NOTFOUND
+
+#define BCME_NOTENABLED BCME_DISABLED
+
+/* These are collection of BCME Error strings */
+#define BCMERRSTRINGTABLE {		\
+	"OK",				\
+	"Undefined error",		\
+	"Bad Argument",			\
+	"Bad Option",			\
+	"Not up",			\
+	"Not down",			\
+	"Not AP",			\
+	"Not STA",			\
+	"Bad Key Index",		\
+	"Radio Off",			\
+	"Not band locked",		\
+	"No clock",			\
+	"Bad Rate valueset",		\
+	"Bad Band",			\
+	"Buffer too short",		\
+	"Buffer too long",		\
+	"Busy",				\
+	"Not Associated",		\
+	"Bad SSID len",			\
+	"Out of Range Channel",		\
+	"Bad Channel",			\
+	"Bad Address",			\
+	"Not Enough Resources",		\
+	"Unsupported",			\
+	"Bad length",			\
+	"Not Ready",			\
+	"Not Permitted",		\
+	"No Memory",			\
+	"Associated",			\
+	"Not In Range",			\
+	"Not Found",			\
+	"WME Not Enabled",		\
+	"TSPEC Not Found",		\
+	"ACM Not Supported",		\
+	"Not WME Association",		\
+	"SDIO Bus Error",		\
+	"Dongle Not Accessible",	\
+	"Incorrect version",		\
+	"TX Failure",			\
+	"RX Failure",			\
+	"Device Not Present",		\
+	"NMODE Disabled",		\
+	"Nonresident overlay access", \
+	"Scan Rejected",		\
+	"WLCMD usage error",		\
+	"WLCMD ioctl error",		\
+	"RWL serial port error", 	\
+	"Disabled",			\
+	"Decrypt error", \
+	"Encrypt error", \
+	"MIC error", \
+	"Replay", \
+	"IE not found", \
+}
+
+#ifndef ABS
+#define	ABS(a)			(((a) < 0) ? -(a) : (a))
+#endif /* ABS */
+
+#ifndef MIN
+#define	MIN(a, b)		(((a) < (b)) ? (a) : (b))
+#endif /* MIN */
+
+#ifndef MAX
+#define	MAX(a, b)		(((a) > (b)) ? (a) : (b))
+#endif /* MAX */
+
+/* limit to [min, max] */
+#ifndef LIMIT_TO_RANGE
+#define LIMIT_TO_RANGE(x, min, max) \
+	((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_RANGE */
+
+/* limit to  max */
+#ifndef LIMIT_TO_MAX
+#define LIMIT_TO_MAX(x, max) \
+	(((x) > (max) ? (max) : (x)))
+#endif /* LIMIT_TO_MAX */
+
+/* limit to min */
+#ifndef LIMIT_TO_MIN
+#define LIMIT_TO_MIN(x, min) \
+	(((x) < (min) ? (min) : (x)))
+#endif /* LIMIT_TO_MIN */
+
+#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \
+	(0xffffffff - (prev) + (curr) + 1))
+#define CEIL(x, y)		(((x) + ((y) - 1)) / (y))
+#define ROUNDUP(x, y)		((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDDN(p, align)	((p) & ~((align) - 1))
+#define	ISALIGNED(a, x)		(((uintptr)(a) & ((x) - 1)) == 0)
+#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \
+	                                         & ~((boundary) - 1))
+#define	ISPOWEROF2(x)		((((x) - 1) & (x)) == 0)
+#define VALID_MASK(mask)	!((mask) & ((mask) + 1))
+
+#ifndef OFFSETOF
+#ifdef __ARMCC_VERSION
+/*
+ * The ARM RVCT compiler complains when using OFFSETOF where a constant
+ * expression is expected, such as an initializer for a static object.
+ * offsetof from the runtime library doesn't have that problem.
+ */
+#include <stddef.h>
+#define	OFFSETOF(type, member)	offsetof(type, member)
+#else
+#  if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8))
+/* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */
+#    define	OFFSETOF(type, member)	__builtin_offsetof(type, member)
+#  else
+#    define	OFFSETOF(type, member)	((uint)(uintptr)&((type *)0)->member)
+#  endif /* GCC 4.8 or newer */
+#endif /* __ARMCC_VERSION */
+#endif /* OFFSETOF */
+
+#ifndef ARRAYSIZE
+#define ARRAYSIZE(a)		(sizeof(a) / sizeof(a[0]))
+#endif
+
+#ifndef ARRAYLAST /* returns pointer to last array element */
+#define ARRAYLAST(a)		(&a[ARRAYSIZE(a)-1])
+#endif
+
+/* Reference a function; used to prevent a static function from being optimized out */
+extern void *_bcmutils_dummy_fn;
+#define REFERENCE_FUNCTION(f)	(_bcmutils_dummy_fn = (void *)(f))
+
+/* bit map related macros */
+#ifndef setbit
+#ifndef NBBY		/* the BSD family defines NBBY */
+#define	NBBY	8	/* 8 bits per byte */
+#endif /* #ifndef NBBY */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+extern void setbit(void *array, uint bit);
+extern void clrbit(void *array, uint bit);
+extern bool isset(const void *array, uint bit);
+extern bool isclr(const void *array, uint bit);
+#else
+#define	setbit(a, i)	(((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY))
+#define	clrbit(a, i)	(((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY)))
+#define	isset(a, i)	(((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY)))
+#define	isclr(a, i)	((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0)
+#endif
+#endif /* setbit */
+
+#define	isbitset(a, i)	(((a) & (1 << (i))) != 0)
+
+#define	NBITS(type)	(sizeof(type) * 8)
+#define NBITVAL(nbits)	(1 << (nbits))
+#define MAXBITVAL(nbits)	((1 << (nbits)) - 1)
+#define	NBITMASK(nbits)	MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte)	MAXBITVAL((nbyte) * 8)
+
+extern void bcm_bitprint32(const uint32 u32);
+
+/*
+ * ----------------------------------------------------------------------------
+ * Multiword map of 2bits, nibbles
+ * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val)
+ * getbit2 getbit4 (void *ptr, uint32 ix)
+ * ----------------------------------------------------------------------------
+ */
+
+#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK)                     \
+static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val)     \
+{                                                                   \
+	uint32 *addr = (uint32 *)ptr;                                   \
+	uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */              \
+	uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */      \
+	uint32 mask = (MSK << pos);                                     \
+	uint32 tmp = *a & ~mask;                                        \
+	*a = tmp | (val << pos);                                        \
+}                                                                   \
+static INLINE uint32 getbit##NB(void *ptr, uint32 ix)               \
+{                                                                   \
+	uint32 *addr = (uint32 *)ptr;                                   \
+	uint32 *a = addr + (ix >> RSH);                                 \
+	uint32 pos = (ix & OFF) << LSH;                                 \
+	return ((*a >> pos) & MSK);                                     \
+}
+
+DECLARE_MAP_API(2,  4, 1, 15U, 0x0003) /* setbit2() and getbit2() */
+DECLARE_MAP_API(4,  3, 2,  7U, 0x000F) /* setbit4() and getbit4() */
+
+
+/* basic mux operation - can be optimized on several architectures */
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+/* modulo inc/dec - assumes x E [0, bound - 1] */
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+/* modulo inc/dec, bound = 2^k */
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+/* modulo add/sub - assumes x, y E [0, bound - 1] */
+#define MODADD(x, y, bound) \
+    MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+    MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+/* module add/sub, bound = 2^k */
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+/* crc defines */
+#define CRC8_INIT_VALUE  0xff		/* Initial CRC8 checksum value */
+#define CRC8_GOOD_VALUE  0x9f		/* Good final CRC8 checksum value */
+#define CRC16_INIT_VALUE 0xffff		/* Initial CRC16 checksum value */
+#define CRC16_GOOD_VALUE 0xf0b8		/* Good final CRC16 checksum value */
+#define CRC32_INIT_VALUE 0xffffffff	/* Initial CRC32 checksum value */
+#define CRC32_GOOD_VALUE 0xdebb20e3	/* Good final CRC32 checksum value */
+
+/* use for direct output of MAC address in printf etc */
+#define MACF				"%02x:%02x:%02x:%02x:%02x:%02x"
+#define ETHERP_TO_MACF(ea)	((struct ether_addr *) (ea))->octet[0], \
+							((struct ether_addr *) (ea))->octet[1], \
+							((struct ether_addr *) (ea))->octet[2], \
+							((struct ether_addr *) (ea))->octet[3], \
+							((struct ether_addr *) (ea))->octet[4], \
+							((struct ether_addr *) (ea))->octet[5]
+
+#define ETHER_TO_MACF(ea) 	(ea).octet[0], \
+							(ea).octet[1], \
+							(ea).octet[2], \
+							(ea).octet[3], \
+							(ea).octet[4], \
+							(ea).octet[5]
+#if !defined(SIMPLE_MAC_PRINT)
+#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC2STRDBG(ea) (ea)[0], (ea)[1], (ea)[2], (ea)[3], (ea)[4], (ea)[5]
+#else
+#define MACDBG				"%02x:%02x:%02x"
+#define MAC2STRDBG(ea) (ea)[0], (ea)[4], (ea)[5]
+#endif /* SIMPLE_MAC_PRINT */
+
+/* bcm_format_flags() bit description structure */
+typedef struct bcm_bit_desc {
+	uint32	bit;
+	const char* name;
+} bcm_bit_desc_t;
+
+/* bcm_format_field */
+typedef struct bcm_bit_desc_ex {
+	uint32 mask;
+	const bcm_bit_desc_t *bitfield;
+} bcm_bit_desc_ex_t;
+
+/* buffer length for ethernet address from bcm_ether_ntoa() */
+#define ETHER_ADDR_STR_LEN	18	/* 18-bytes of Ethernet address buffer length */
+
+/* crypto utility function */
+/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */
+static INLINE void
+xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
+{
+	if (
+#ifdef __i386__
+	    1 ||
+#endif
+	    (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
+		/* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */
+		/* x86 supports unaligned.  This version runs 6x-9x faster on x86. */
+		((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
+		((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
+		((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
+		((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
+	} else {
+		/* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */
+		int k;
+		for (k = 0; k < 16; k++)
+			dst[k] = src1[k] ^ src2[k];
+	}
+}
+
+/* externs */
+/* crc */
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
+
+/* format/print */
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+	defined(WLMSG_ASSOC)
+/* print out the value a field has: fields may have 1-32 bits and may hold any value */
+extern int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, int len);
+/* print out which bits in flags are set */
+extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
+#endif
+
+#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
+	defined(WLMSG_ASSOC) || defined(WLMEDIA_PEAKRATE)
+extern int bcm_format_hex(char *str, const void *bytes, int len);
+#endif
+
+extern const char *bcm_crypto_algo_name(uint algo);
+extern char *bcm_chipname(uint chipid, char *buf, uint len);
+extern char *bcm_brev_str(uint32 brev, char *buf);
+extern void printbig(char *buf);
+extern void prhex(const char *msg, uchar *buf, uint len);
+
+/* IE parsing */
+
+/* tag_ID/length/value_buffer tuple */
+typedef struct bcm_tlv {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} bcm_tlv_t;
+
+#define BCM_TLV_MAX_DATA_SIZE (255)
+
+#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data))
+
+/* Check that bcm_tlv_t fits into the given buflen */
+#define bcm_valid_tlv(elt, buflen) (\
+	 ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \
+	 ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len)))
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+extern bcm_tlv_t *bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type,
+	int type_len);
+
+extern uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst);
+extern uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst,
+	int dst_maxlen);
+
+extern uint8 *bcm_copy_tlv(const void *src, uint8 *dst);
+extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen);
+
+/* bcmerror */
+extern const char *bcmerrorstr(int bcmerror);
+
+/* multi-bool data type: set of bools, mbool is true if any is set */
+typedef uint32 mbool;
+#define mboolset(mb, bit)		((mb) |= (bit))		/* set one bool */
+#define mboolclr(mb, bit)		((mb) &= ~(bit))	/* clear one bool */
+#define mboolisset(mb, bit)		(((mb) & (bit)) != 0)	/* TRUE if one bool is set */
+#define	mboolmaskset(mb, mask, val)	((mb) = (((mb) & ~(mask)) | (val)))
+
+/* generic datastruct to help dump routines */
+struct fielddesc {
+	const char *nameandfmt;
+	uint32 	offset;
+	uint32 	len;
+};
+
+extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
+extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len);
+
+extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
+extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes);
+extern void bcm_print_bytes(const char *name, const uchar *cdata, int len);
+
+typedef  uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset);
+extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str,
+                          char *buf, uint32 bufsize);
+extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
+
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
+
+/* power conversion */
+extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
+extern uint8 bcm_mw_to_qdbm(uint16 mw);
+extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+
+unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+
+/* calculate a * b + c */
+extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c);
+/* calculate a / b */
+extern void bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+
+/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */
+
+/* Table driven count set bits. */
+static const uint8 /* Table only for use by bcm_cntsetbits */
+_CSBTBL[256] =
+{
+#	define B2(n)    n,     n + 1,     n + 1,     n + 2
+#	define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2)
+#	define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2)
+	B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2)
+};
+
+static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */
+bcm_cntsetbits(const uint32 u32)
+{
+	/* function local scope declaration of const _CSBTBL[] */
+	const uint8 * p = (const uint8 *)&u32;
+	return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]);
+}
+
+
+static INLINE int /* C equivalent count of leading 0's in a u32 */
+C_bcm_count_leading_zeros(uint32 u32)
+{
+	int shifts = 0;
+	while (u32) {
+		shifts++; u32 >>= 1;
+	}
+	return (32U - shifts);
+}
+
+#ifdef BCMDRIVER
+/*
+ * Assembly instructions: Count Leading Zeros
+ * "clz"	: MIPS, ARM
+ * "cntlzw"	: PowerPC
+ * "BSF"	: x86
+ * "lzcnt"	: AMD, SPARC
+ */
+
+#if defined(__arm__)
+
+#if defined(__ARM_ARCH_7M__) /* Cortex M3 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7M__ */
+
+#if defined(__ARM_ARCH_7R__) /* Cortex R4 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7R__ */
+
+#endif /* __arm__ */
+
+static INLINE int
+bcm_count_leading_zeros(uint32 u32)
+{
+#if defined(__USE_ASM_CLZ__)
+	int zeros;
+	__asm__ volatile("clz    %0, %1 \n" : "=r" (zeros) : "r"  (u32));
+	return zeros;
+#else	/* C equivalent */
+	return C_bcm_count_leading_zeros(u32);
+#endif  /* C equivalent */
+}
+
+/* INTERFACE: Multiword bitmap based small id allocator. */
+struct bcm_mwbmap;	/* forward declaration for use as an opaque mwbmap handle */
+
+#define BCM_MWBMAP_INVALID_HDL	((struct bcm_mwbmap *)NULL)
+#define BCM_MWBMAP_INVALID_IDX	((uint32)(~0U))
+
+/* Incarnate a multiword bitmap based small index allocator */
+extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max);
+
+/* Free up the multiword bitmap index allocator */
+extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl);
+
+/* Allocate a unique small index using a multiword bitmap index allocator */
+extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Force an index at a specified position to be in use */
+extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Determine whether an index is inuse or free */
+extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Debug dump a multiword bitmap allocator */
+extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl);
+
+extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl);
+/* End - Multiword bitmap based small Id allocator. */
+#endif /* BCMDRIVER */
+
+extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+
+/* return chip ID */
+#if defined(SUPPORT_MULTIPLE_REVISION)
+extern uint bcm_get_chip_id(void);
+#endif /* SUPPORT_MULTIPLE_REVISION */
+
+#ifdef __cplusplus
+	}
+#endif
+
+#ifdef DEBUG_COUNTER
+#define CNTR_TBL_MAX 10
+typedef struct _counter_tbl_t {
+	char name[16];				/* name of this counter table */
+	uint32 prev_log_print;		/* Internal use. Timestamp of the previous log print */
+	uint log_print_interval;	/* Desired interval to print logs in ms */
+	uint needed_cnt;			/* How many counters need to be used */
+	uint32 cnt[CNTR_TBL_MAX];		/* Counting entries to increase at desired places */
+	bool enabled;				/* Whether to enable printing log */
+} counter_tbl_t;
+
+
+void counter_printlog(counter_tbl_t *ctr_tbl);
+#endif /* DEBUG_COUNTER */
+
+#endif	/* _bcmutils_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmwifi_channels.h b/drivers/staging/edison-bcm43340/include/bcmwifi_channels.h
new file mode 100644
index 0000000..f642555
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmwifi_channels.h
@@ -0,0 +1,484 @@
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi_channels.h 309193 2012-01-19 00:03:57Z $
+ */
+
+#ifndef	_bcmwifi_channels_h_
+#define	_bcmwifi_channels_h_
+
+
+/* A chanspec holds the channel number, band, bandwidth and control sideband */
+typedef uint16 chanspec_t;
+
+/* channel defines */
+#define CH_UPPER_SB			0x01
+#define CH_LOWER_SB			0x02
+#define CH_EWA_VALID			0x04
+#define CH_80MHZ_APART			16
+#define CH_40MHZ_APART			8
+#define CH_20MHZ_APART			4
+#define CH_10MHZ_APART			2
+#define CH_5MHZ_APART			1	/* 2G band channels are 5 Mhz apart */
+#define CH_MAX_2G_CHANNEL		14	/* Max channel in 2G band */
+#define	MAXCHANNEL		224	/* max # supported channels. The max channel no is 216,
+					 * this is that + 1 rounded up to a multiple of NBBY (8).
+					 * DO NOT MAKE it > 255: channels are uint8's all over
+					 */
+#define CHSPEC_CTLOVLP(sp1, sp2, sep)	(ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < \
+				  (sep))
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef  D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#define WL_CHANSPEC_CHAN_MASK		0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT		0
+#define WL_CHANSPEC_CHAN1_MASK		0x000f
+#define WL_CHANSPEC_CHAN1_SHIFT		0
+#define WL_CHANSPEC_CHAN2_MASK		0x00f0
+#define WL_CHANSPEC_CHAN2_SHIFT		4
+
+#define WL_CHANSPEC_CTL_SB_MASK		0x0700
+#define WL_CHANSPEC_CTL_SB_SHIFT	8
+#define WL_CHANSPEC_CTL_SB_LLL		0x0000
+#define WL_CHANSPEC_CTL_SB_LLU		0x0100
+#define WL_CHANSPEC_CTL_SB_LUL		0x0200
+#define WL_CHANSPEC_CTL_SB_LUU		0x0300
+#define WL_CHANSPEC_CTL_SB_ULL		0x0400
+#define WL_CHANSPEC_CTL_SB_ULU		0x0500
+#define WL_CHANSPEC_CTL_SB_UUL		0x0600
+#define WL_CHANSPEC_CTL_SB_UUU		0x0700
+#define WL_CHANSPEC_CTL_SB_LL		WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_LU		WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_UL		WL_CHANSPEC_CTL_SB_LUL
+#define WL_CHANSPEC_CTL_SB_UU		WL_CHANSPEC_CTL_SB_LUU
+#define WL_CHANSPEC_CTL_SB_L		WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_U		WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_LOWER	WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_UPPER	WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_NONE		WL_CHANSPEC_CTL_SB_LLL
+
+#define WL_CHANSPEC_BW_MASK		0x3800
+#define WL_CHANSPEC_BW_SHIFT		11
+#define WL_CHANSPEC_BW_5		0x0000
+#define WL_CHANSPEC_BW_10		0x0800
+#define WL_CHANSPEC_BW_20		0x1000
+#define WL_CHANSPEC_BW_40		0x1800
+#define WL_CHANSPEC_BW_80		0x2000
+#define WL_CHANSPEC_BW_160		0x2800
+#define WL_CHANSPEC_BW_8080		0x3000
+
+#define WL_CHANSPEC_BAND_MASK		0xc000
+#define WL_CHANSPEC_BAND_SHIFT		14
+#define WL_CHANSPEC_BAND_2G		0x0000
+#define WL_CHANSPEC_BAND_3G		0x4000
+#define WL_CHANSPEC_BAND_4G		0x8000
+#define WL_CHANSPEC_BAND_5G		0xc000
+#define INVCHANSPEC			255
+
+/* channel defines */
+#define LOWER_20_SB(channel)		(((channel) > CH_10MHZ_APART) ? \
+					((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel)		(((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+					((channel) + CH_10MHZ_APART) : 0)
+
+#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0)
+#define UU_20_SB(channel) 	(((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \
+				((channel) + 3 * CH_10MHZ_APART) : 0)
+#define LU_20_SB(channel) LOWER_20_SB(channel)
+#define UL_20_SB(channel) UPPER_20_SB(channel)
+
+#define LOWER_40_SB(channel)		((channel) - CH_20MHZ_APART)
+#define UPPER_40_SB(channel)		((channel) + CH_20MHZ_APART)
+#define CHSPEC_WLCBANDUNIT(chspec)	(CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel)		(chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+					(((channel) <= CH_MAX_2G_CHANNEL) ? \
+					WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel)	(((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+					((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+					((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+					WL_CHANSPEC_BAND_5G))
+#define CH80MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | \
+					 WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G)
+#define CH160MHZ_CHSPEC(channel, ctlsb)	(chanspec_t) \
+					((channel) | (ctlsb) | \
+					 WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
+
+/* simple MACROs to get different fields of chanspec */
+#define CHSPEC_CHANNEL(chspec)	((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#define CHSPEC_CHAN1(chspec)	((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT
+#define CHSPEC_CHAN2(chspec)	((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT
+#define CHSPEC_BAND(chspec)		((chspec) & WL_CHANSPEC_BAND_MASK)
+#define CHSPEC_CTL_SB(chspec)	((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec)		((chspec) & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS10(chspec)	0
+#define CHSPEC_IS20(chspec)	1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	0
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec)	0
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec)	0
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec)	0
+#endif
+
+#else /* !WL11N_20MHZONLY */
+
+#define CHSPEC_IS10(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec)	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080)
+#endif
+
+#endif /* !WL11N_20MHZONLY */
+
+#define CHSPEC_IS5G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec)	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_UPPER(chspec)	\
+	((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \
+	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC_SB_LOWER(chspec)	\
+	((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \
+	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+/**
+ * Number of chars needed for wf_chspec_ntoa() destination character buffer.
+ */
+#define CHANSPEC_STR_LEN    20
+
+
+#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\
+	CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080)
+
+/* BW inequality comparisons, LE (<=), GE (>=), LT (<), GT (>), comparisons can be made
+* as simple numeric comparisons, with the exception that 160 is the same BW as 80+80,
+* but have different numeric values; (WL_CHANSPEC_BW_160 < WL_CHANSPEC_BW_8080).
+*
+* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide.
+* If both chspec bandwidth and bw is not 160 wide, then the comparison is made.
+*/
+#define CHSPEC_BW_GE(chspec, bw) \
+	((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\
+	(CHSPEC_BW(chspec) >= bw))
+
+#define CHSPEC_BW_LE(chspec, bw) \
+	((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\
+	(CHSPEC_BW(chspec) <= bw))
+
+#define CHSPEC_BW_GT(chspec, bw) \
+	(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\
+	(CHSPEC_BW(chspec) > bw))
+
+#define CHSPEC_BW_LT(chspec, bw) \
+	(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+	(bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\
+	(CHSPEC_BW(chspec) < bw))
+
+/* Legacy Chanspec defines
+ * These are the defines for the previous format of the chanspec_t
+ */
+#define WL_LCHANSPEC_CHAN_MASK		0x00ff
+#define WL_LCHANSPEC_CHAN_SHIFT		     0
+
+#define WL_LCHANSPEC_CTL_SB_MASK	0x0300
+#define WL_LCHANSPEC_CTL_SB_SHIFT	     8
+#define WL_LCHANSPEC_CTL_SB_LOWER	0x0100
+#define WL_LCHANSPEC_CTL_SB_UPPER	0x0200
+#define WL_LCHANSPEC_CTL_SB_NONE	0x0300
+
+#define WL_LCHANSPEC_BW_MASK		0x0C00
+#define WL_LCHANSPEC_BW_SHIFT		    10
+#define WL_LCHANSPEC_BW_10		0x0400
+#define WL_LCHANSPEC_BW_20		0x0800
+#define WL_LCHANSPEC_BW_40		0x0C00
+
+#define WL_LCHANSPEC_BAND_MASK		0xf000
+#define WL_LCHANSPEC_BAND_SHIFT		    12
+#define WL_LCHANSPEC_BAND_5G		0x1000
+#define WL_LCHANSPEC_BAND_2G		0x2000
+
+#define LCHSPEC_CHANNEL(chspec)	((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK))
+#define LCHSPEC_BAND(chspec)	((chspec) & WL_LCHANSPEC_BAND_MASK)
+#define LCHSPEC_CTL_SB(chspec)	((chspec) & WL_LCHANSPEC_CTL_SB_MASK)
+#define LCHSPEC_BW(chspec)	((chspec) & WL_LCHANSPEC_BW_MASK)
+#define LCHSPEC_IS10(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10)
+#define LCHSPEC_IS20(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20)
+#define LCHSPEC_IS40(chspec)	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)
+#define LCHSPEC_IS5G(chspec)	(((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G)
+#define LCHSPEC_IS2G(chspec)	(((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G)
+
+#define LCHSPEC_SB_UPPER(chspec)	\
+	((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \
+	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+#define LCHSPEC_SB_LOWER(chspec)	\
+	((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \
+	(((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+
+#define LCHSPEC_CREATE(chan, band, bw, sb)  ((uint16)((chan) | (sb) | (bw) | (band)))
+
+#define CH20MHZ_LCHSPEC(channel) \
+	(chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \
+	WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+	WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G))
+
+/*
+ * WF_CHAN_FACTOR_* constants are used to calculate channel frequency
+ * given a channel number.
+ * chan_freq = chan_factor * 500Mhz + chan_number * 5
+ */
+
+/**
+ * Channel Factor for the starting frequence of 2.4 GHz channels.
+ * The value corresponds to 2407 MHz.
+ */
+#define WF_CHAN_FACTOR_2_4_G		4814	/* 2.4 GHz band, 2407 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 5 GHz channels.
+ * The value corresponds to 5000 MHz.
+ */
+#define WF_CHAN_FACTOR_5_G		10000	/* 5   GHz band, 5000 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 4.9 GHz channels.
+ * The value corresponds to 4000 MHz.
+ */
+#define WF_CHAN_FACTOR_4_G		8000	/* 4.9 GHz band for Japan */
+
+#define WLC_2G_25MHZ_OFFSET		5	/* 2.4GHz band channel offset */
+
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param	chspec		chanspec format
+ * @param	buf		ascii string of chanspec
+ *
+ * @return	pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ *
+ * @see		CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+/**
+ * Convert ascii string to chanspec
+ *
+ * @param	a     pointer to input string
+ *
+ * @return	>= 0 if successful or 0 otherwise
+ */
+extern chanspec_t wf_chspec_aton(const char *a);
+
+/**
+ * Verify the chanspec fields are valid.
+ *
+ * Verify the chanspec is using a legal set field values, i.e. that the chanspec
+ * specified a band, bw, ctl_sb and channel and that the combination could be
+ * legal given some set of circumstances.
+ *
+ * @param	chanspec   input chanspec to verify
+ *
+ * @return TRUE if the chanspec is malformed, FALSE if it looks good.
+ */
+extern bool wf_chspec_malformed(chanspec_t chanspec);
+
+/**
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ *
+ * @param	chanspec   input chanspec to verify
+ *
+ * @return TRUE if the chanspec is a valid 802.11 channel
+ */
+extern bool wf_chspec_valid(chanspec_t chanspec);
+
+/**
+ * Return the primary (control) channel.
+ *
+ * This function returns the channel number of the primary 20MHz channel. For
+ * 20MHz channels this is just the channel number. For 40MHz or wider channels
+ * it is the primary 20MHz channel specified by the chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the channel number of the primary 20MHz channel
+ */
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+
+/**
+ * Return the primary (control) chanspec.
+ *
+ * This function returns the chanspec of the primary 20MHz channel. For 20MHz
+ * channels this is just the chanspec. For 40MHz or wider channels it is the
+ * chanspec of the primary 20MHZ channel specified by the chanspec.
+ *
+ * @param	chspec    input chanspec
+ *
+ * @return Returns the chanspec of the primary 20MHz channel
+ */
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
+
+/**
+ * Return a channel number corresponding to a frequency.
+ *
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param	freq          frequency in MHz
+ * @param	start_factor  base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a channel number
+ *
+ * @see  WF_CHAN_FACTOR_2_4_G
+ * @see  WF_CHAN_FACTOR_5_G
+ */
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+/**
+ * Return the center frequency in MHz of the given channel and base frequency.
+ *
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param	channel       input channel number
+ * @param	start_factor  base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a frequency in MHz
+ *
+ * @see  WF_CHAN_FACTOR_2_4_G
+ * @see  WF_CHAN_FACTOR_5_G
+ */
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+/**
+ * Convert ctl chan and bw to chanspec
+ *
+ * @param	ctl_ch		channel
+ * @param	bw	        bandwidth
+ *
+ * @return	> 0 if successful or 0 otherwise
+ *
+ */
+extern uint16 wf_channel2chspec(uint ctl_ch, uint bw);
+
+extern uint wf_channel2freq(uint channel);
+extern uint wf_freq2channel(uint freq);
+
+/*
+ * Returns the 80+80 chanspec corresponding to the following input parameters
+ *
+ *    primary_20mhz - Primary 20 Mhz channel
+ *    chan1 - channel number of first 80 Mhz band
+ *    chan2 - channel number of second 80 Mhz band
+ *
+ *  parameters chan1 and chan2  are channel numbers in {42, 58, 106, 122, 138, 155}
+ *
+ *  returns INVCHANSPEC in case of error
+ */
+
+extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz,
+uint8 chan1_80Mhz, uint8 chan2_80Mhz);
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec);
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ *    chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ *  returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec);
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ */
+extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec);
+
+
+#endif	/* _bcmwifi_channels_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/bcmwifi_rates.h b/drivers/staging/edison-bcm43340/include/bcmwifi_rates.h
new file mode 100644
index 0000000..38d339b
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/bcmwifi_rates.h
@@ -0,0 +1,458 @@
+/*
+ * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: bcmwifi_rates.h 5187 2012-06-29 06:17:50Z $
+ */
+
+#ifndef _bcmwifi_rates_h_
+#define _bcmwifi_rates_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+#define WL_RATESET_SZ_DSSS		4
+#define WL_RATESET_SZ_OFDM		8
+#define WL_RATESET_SZ_HT_MCS	8
+#define WL_RATESET_SZ_VHT_MCS	10
+
+#define WL_TX_CHAINS_MAX	3
+
+#define WL_RATE_DISABLED		(-128) /* Power value corresponding to unsupported rate */
+
+/* Transmit channel bandwidths */
+typedef enum wl_tx_bw {
+	WL_TX_BW_20,
+	WL_TX_BW_40,
+	WL_TX_BW_80,
+	WL_TX_BW_160,
+	WL_TX_BW_20IN40,
+	WL_TX_BW_20IN80,
+	WL_TX_BW_40IN80,
+	WL_TX_BW_20IN160,
+	WL_TX_BW_40IN160,
+	WL_TX_BW_80IN160,
+	WL_TX_BW_ALL
+} wl_tx_bw_t;
+
+
+/*
+ * Transmit modes.
+ * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed
+ */
+typedef enum wl_tx_mode {
+	WL_TX_MODE_NONE,
+	WL_TX_MODE_STBC,
+	WL_TX_MODE_CDD,
+	WL_TX_MODE_TXBF,
+	WL_NUM_TX_MODES
+} wl_tx_mode_t;
+
+
+/* Number of transmit chains */
+typedef enum wl_tx_chains {
+	WL_TX_CHAINS_1 = 1,
+	WL_TX_CHAINS_2,
+	WL_TX_CHAINS_3
+} wl_tx_chains_t;
+
+
+/* Number of transmit streams */
+typedef enum wl_tx_nss {
+	WL_TX_NSS_1 = 1,
+	WL_TX_NSS_2,
+	WL_TX_NSS_3
+} wl_tx_nss_t;
+
+
+typedef enum clm_rates {
+	/************
+	* 1 chain  *
+	************
+	*/
+
+	/* 1 Stream */
+	WL_RATE_1X1_DSSS_1         = 0,
+	WL_RATE_1X1_DSSS_2         = 1,
+	WL_RATE_1X1_DSSS_5_5       = 2,
+	WL_RATE_1X1_DSSS_11        = 3,
+
+	WL_RATE_1X1_OFDM_6         = 4,
+	WL_RATE_1X1_OFDM_9         = 5,
+	WL_RATE_1X1_OFDM_12        = 6,
+	WL_RATE_1X1_OFDM_18        = 7,
+	WL_RATE_1X1_OFDM_24        = 8,
+	WL_RATE_1X1_OFDM_36        = 9,
+	WL_RATE_1X1_OFDM_48        = 10,
+	WL_RATE_1X1_OFDM_54        = 11,
+
+	WL_RATE_1X1_MCS0           = 12,
+	WL_RATE_1X1_MCS1           = 13,
+	WL_RATE_1X1_MCS2           = 14,
+	WL_RATE_1X1_MCS3           = 15,
+	WL_RATE_1X1_MCS4           = 16,
+	WL_RATE_1X1_MCS5           = 17,
+	WL_RATE_1X1_MCS6           = 18,
+	WL_RATE_1X1_MCS7           = 19,
+
+	WL_RATE_1X1_VHT0SS1        = 12,
+	WL_RATE_1X1_VHT1SS1        = 13,
+	WL_RATE_1X1_VHT2SS1        = 14,
+	WL_RATE_1X1_VHT3SS1        = 15,
+	WL_RATE_1X1_VHT4SS1        = 16,
+	WL_RATE_1X1_VHT5SS1        = 17,
+	WL_RATE_1X1_VHT6SS1        = 18,
+	WL_RATE_1X1_VHT7SS1        = 19,
+	WL_RATE_1X1_VHT8SS1        = 20,
+	WL_RATE_1X1_VHT9SS1        = 21,
+
+
+	/************
+	* 2 chains *
+	************
+	*/
+
+	/* 1 Stream expanded + 1 */
+	WL_RATE_1X2_DSSS_1         = 22,
+	WL_RATE_1X2_DSSS_2         = 23,
+	WL_RATE_1X2_DSSS_5_5       = 24,
+	WL_RATE_1X2_DSSS_11        = 25,
+
+	WL_RATE_1X2_CDD_OFDM_6     = 26,
+	WL_RATE_1X2_CDD_OFDM_9     = 27,
+	WL_RATE_1X2_CDD_OFDM_12    = 28,
+	WL_RATE_1X2_CDD_OFDM_18    = 29,
+	WL_RATE_1X2_CDD_OFDM_24    = 30,
+	WL_RATE_1X2_CDD_OFDM_36    = 31,
+	WL_RATE_1X2_CDD_OFDM_48    = 32,
+	WL_RATE_1X2_CDD_OFDM_54    = 33,
+
+	WL_RATE_1X2_CDD_MCS0       = 34,
+	WL_RATE_1X2_CDD_MCS1       = 35,
+	WL_RATE_1X2_CDD_MCS2       = 36,
+	WL_RATE_1X2_CDD_MCS3       = 37,
+	WL_RATE_1X2_CDD_MCS4       = 38,
+	WL_RATE_1X2_CDD_MCS5       = 39,
+	WL_RATE_1X2_CDD_MCS6       = 40,
+	WL_RATE_1X2_CDD_MCS7       = 41,
+
+	WL_RATE_1X2_VHT0SS1        = 34,
+	WL_RATE_1X2_VHT1SS1        = 35,
+	WL_RATE_1X2_VHT2SS1        = 36,
+	WL_RATE_1X2_VHT3SS1        = 37,
+	WL_RATE_1X2_VHT4SS1        = 38,
+	WL_RATE_1X2_VHT5SS1        = 39,
+	WL_RATE_1X2_VHT6SS1        = 40,
+	WL_RATE_1X2_VHT7SS1        = 41,
+	WL_RATE_1X2_VHT8SS1        = 42,
+	WL_RATE_1X2_VHT9SS1        = 43,
+
+	/* 2 Streams */
+	WL_RATE_2X2_STBC_MCS0      = 44,
+	WL_RATE_2X2_STBC_MCS1      = 45,
+	WL_RATE_2X2_STBC_MCS2      = 46,
+	WL_RATE_2X2_STBC_MCS3      = 47,
+	WL_RATE_2X2_STBC_MCS4      = 48,
+	WL_RATE_2X2_STBC_MCS5      = 49,
+	WL_RATE_2X2_STBC_MCS6      = 50,
+	WL_RATE_2X2_STBC_MCS7      = 51,
+
+	WL_RATE_2X2_STBC_VHT0SS1   = 44,
+	WL_RATE_2X2_STBC_VHT1SS1   = 45,
+	WL_RATE_2X2_STBC_VHT2SS1   = 46,
+	WL_RATE_2X2_STBC_VHT3SS1   = 47,
+	WL_RATE_2X2_STBC_VHT4SS1   = 48,
+	WL_RATE_2X2_STBC_VHT5SS1   = 49,
+	WL_RATE_2X2_STBC_VHT6SS1   = 50,
+	WL_RATE_2X2_STBC_VHT7SS1   = 51,
+	WL_RATE_2X2_STBC_VHT8SS1   = 52,
+	WL_RATE_2X2_STBC_VHT9SS1   = 53,
+
+	WL_RATE_2X2_SDM_MCS8       = 54,
+	WL_RATE_2X2_SDM_MCS9       = 55,
+	WL_RATE_2X2_SDM_MCS10      = 56,
+	WL_RATE_2X2_SDM_MCS11      = 57,
+	WL_RATE_2X2_SDM_MCS12      = 58,
+	WL_RATE_2X2_SDM_MCS13      = 59,
+	WL_RATE_2X2_SDM_MCS14      = 60,
+	WL_RATE_2X2_SDM_MCS15      = 61,
+
+	WL_RATE_2X2_VHT0SS2        = 54,
+	WL_RATE_2X2_VHT1SS2        = 55,
+	WL_RATE_2X2_VHT2SS2        = 56,
+	WL_RATE_2X2_VHT3SS2        = 57,
+	WL_RATE_2X2_VHT4SS2        = 58,
+	WL_RATE_2X2_VHT5SS2        = 59,
+	WL_RATE_2X2_VHT6SS2        = 60,
+	WL_RATE_2X2_VHT7SS2        = 61,
+	WL_RATE_2X2_VHT8SS2        = 62,
+	WL_RATE_2X2_VHT9SS2        = 63,
+
+	/************
+	* 3 chains *
+	************
+	*/
+
+	/* 1 Stream expanded + 2 */
+	WL_RATE_1X3_DSSS_1         = 64,
+	WL_RATE_1X3_DSSS_2         = 65,
+	WL_RATE_1X3_DSSS_5_5       = 66,
+	WL_RATE_1X3_DSSS_11        = 67,
+
+	WL_RATE_1X3_CDD_OFDM_6     = 68,
+	WL_RATE_1X3_CDD_OFDM_9     = 69,
+	WL_RATE_1X3_CDD_OFDM_12    = 70,
+	WL_RATE_1X3_CDD_OFDM_18    = 71,
+	WL_RATE_1X3_CDD_OFDM_24    = 72,
+	WL_RATE_1X3_CDD_OFDM_36    = 73,
+	WL_RATE_1X3_CDD_OFDM_48    = 74,
+	WL_RATE_1X3_CDD_OFDM_54    = 75,
+
+	WL_RATE_1X3_CDD_MCS0       = 76,
+	WL_RATE_1X3_CDD_MCS1       = 77,
+	WL_RATE_1X3_CDD_MCS2       = 78,
+	WL_RATE_1X3_CDD_MCS3       = 79,
+	WL_RATE_1X3_CDD_MCS4       = 80,
+	WL_RATE_1X3_CDD_MCS5       = 81,
+	WL_RATE_1X3_CDD_MCS6       = 82,
+	WL_RATE_1X3_CDD_MCS7       = 83,
+
+	WL_RATE_1X3_VHT0SS1        = 76,
+	WL_RATE_1X3_VHT1SS1        = 77,
+	WL_RATE_1X3_VHT2SS1        = 78,
+	WL_RATE_1X3_VHT3SS1        = 79,
+	WL_RATE_1X3_VHT4SS1        = 80,
+	WL_RATE_1X3_VHT5SS1        = 81,
+	WL_RATE_1X3_VHT6SS1        = 82,
+	WL_RATE_1X3_VHT7SS1        = 83,
+	WL_RATE_1X3_VHT8SS1        = 84,
+	WL_RATE_1X3_VHT9SS1        = 85,
+
+	/* 2 Streams expanded + 1 */
+	WL_RATE_2X3_STBC_MCS0      = 86,
+	WL_RATE_2X3_STBC_MCS1      = 87,
+	WL_RATE_2X3_STBC_MCS2      = 88,
+	WL_RATE_2X3_STBC_MCS3      = 89,
+	WL_RATE_2X3_STBC_MCS4      = 90,
+	WL_RATE_2X3_STBC_MCS5      = 91,
+	WL_RATE_2X3_STBC_MCS6      = 92,
+	WL_RATE_2X3_STBC_MCS7      = 93,
+
+	WL_RATE_2X3_STBC_VHT0SS1   = 86,
+	WL_RATE_2X3_STBC_VHT1SS1   = 87,
+	WL_RATE_2X3_STBC_VHT2SS1   = 88,
+	WL_RATE_2X3_STBC_VHT3SS1   = 89,
+	WL_RATE_2X3_STBC_VHT4SS1   = 90,
+	WL_RATE_2X3_STBC_VHT5SS1   = 91,
+	WL_RATE_2X3_STBC_VHT6SS1   = 92,
+	WL_RATE_2X3_STBC_VHT7SS1   = 93,
+	WL_RATE_2X3_STBC_VHT8SS1   = 94,
+	WL_RATE_2X3_STBC_VHT9SS1   = 95,
+
+	WL_RATE_2X3_SDM_MCS8       = 96,
+	WL_RATE_2X3_SDM_MCS9       = 97,
+	WL_RATE_2X3_SDM_MCS10      = 98,
+	WL_RATE_2X3_SDM_MCS11      = 99,
+	WL_RATE_2X3_SDM_MCS12      = 100,
+	WL_RATE_2X3_SDM_MCS13      = 101,
+	WL_RATE_2X3_SDM_MCS14      = 102,
+	WL_RATE_2X3_SDM_MCS15      = 103,
+
+	WL_RATE_2X3_VHT0SS2        = 96,
+	WL_RATE_2X3_VHT1SS2        = 97,
+	WL_RATE_2X3_VHT2SS2        = 98,
+	WL_RATE_2X3_VHT3SS2        = 99,
+	WL_RATE_2X3_VHT4SS2        = 100,
+	WL_RATE_2X3_VHT5SS2        = 101,
+	WL_RATE_2X3_VHT6SS2        = 102,
+	WL_RATE_2X3_VHT7SS2        = 103,
+	WL_RATE_2X3_VHT8SS2        = 104,
+	WL_RATE_2X3_VHT9SS2        = 105,
+
+	/* 3 Streams */
+	WL_RATE_3X3_SDM_MCS16      = 106,
+	WL_RATE_3X3_SDM_MCS17      = 107,
+	WL_RATE_3X3_SDM_MCS18      = 108,
+	WL_RATE_3X3_SDM_MCS19      = 109,
+	WL_RATE_3X3_SDM_MCS20      = 110,
+	WL_RATE_3X3_SDM_MCS21      = 111,
+	WL_RATE_3X3_SDM_MCS22      = 112,
+	WL_RATE_3X3_SDM_MCS23      = 113,
+
+	WL_RATE_3X3_VHT0SS3        = 106,
+	WL_RATE_3X3_VHT1SS3        = 107,
+	WL_RATE_3X3_VHT2SS3        = 108,
+	WL_RATE_3X3_VHT3SS3        = 109,
+	WL_RATE_3X3_VHT4SS3        = 110,
+	WL_RATE_3X3_VHT5SS3        = 111,
+	WL_RATE_3X3_VHT6SS3        = 112,
+	WL_RATE_3X3_VHT7SS3        = 113,
+	WL_RATE_3X3_VHT8SS3        = 114,
+	WL_RATE_3X3_VHT9SS3        = 115,
+
+
+	/****************************
+	 * TX Beamforming, 2 chains *
+	 ****************************
+	 */
+
+	/* 1 Stream expanded + 1 */
+
+	WL_RATE_1X2_TXBF_OFDM_6    = 116,
+	WL_RATE_1X2_TXBF_OFDM_9    = 117,
+	WL_RATE_1X2_TXBF_OFDM_12   = 118,
+	WL_RATE_1X2_TXBF_OFDM_18   = 119,
+	WL_RATE_1X2_TXBF_OFDM_24   = 120,
+	WL_RATE_1X2_TXBF_OFDM_36   = 121,
+	WL_RATE_1X2_TXBF_OFDM_48   = 122,
+	WL_RATE_1X2_TXBF_OFDM_54   = 123,
+
+	WL_RATE_1X2_TXBF_MCS0      = 124,
+	WL_RATE_1X2_TXBF_MCS1      = 125,
+	WL_RATE_1X2_TXBF_MCS2      = 126,
+	WL_RATE_1X2_TXBF_MCS3      = 127,
+	WL_RATE_1X2_TXBF_MCS4      = 128,
+	WL_RATE_1X2_TXBF_MCS5      = 129,
+	WL_RATE_1X2_TXBF_MCS6      = 130,
+	WL_RATE_1X2_TXBF_MCS7      = 131,
+
+	WL_RATE_1X2_TXBF_VHT0SS1   = 124,
+	WL_RATE_1X2_TXBF_VHT1SS1   = 125,
+	WL_RATE_1X2_TXBF_VHT2SS1   = 126,
+	WL_RATE_1X2_TXBF_VHT3SS1   = 127,
+	WL_RATE_1X2_TXBF_VHT4SS1   = 128,
+	WL_RATE_1X2_TXBF_VHT5SS1   = 129,
+	WL_RATE_1X2_TXBF_VHT6SS1   = 130,
+	WL_RATE_1X2_TXBF_VHT7SS1   = 131,
+	WL_RATE_1X2_TXBF_VHT8SS1   = 132,
+	WL_RATE_1X2_TXBF_VHT9SS1   = 133,
+
+	/* 2 Streams */
+
+	WL_RATE_2X2_TXBF_SDM_MCS8  = 134,
+	WL_RATE_2X2_TXBF_SDM_MCS9  = 135,
+	WL_RATE_2X2_TXBF_SDM_MCS10 = 136,
+	WL_RATE_2X2_TXBF_SDM_MCS11 = 137,
+	WL_RATE_2X2_TXBF_SDM_MCS12 = 138,
+	WL_RATE_2X2_TXBF_SDM_MCS13 = 139,
+	WL_RATE_2X2_TXBF_SDM_MCS14 = 140,
+	WL_RATE_2X2_TXBF_SDM_MCS15 = 141,
+
+	WL_RATE_2X2_TXBF_VHT0SS2   = 134,
+	WL_RATE_2X2_TXBF_VHT1SS2   = 135,
+	WL_RATE_2X2_TXBF_VHT2SS2   = 136,
+	WL_RATE_2X2_TXBF_VHT3SS2   = 137,
+	WL_RATE_2X2_TXBF_VHT4SS2   = 138,
+	WL_RATE_2X2_TXBF_VHT5SS2   = 139,
+	WL_RATE_2X2_TXBF_VHT6SS2   = 140,
+	WL_RATE_2X2_TXBF_VHT7SS2   = 141,
+
+
+	/****************************
+	 * TX Beamforming, 3 chains *
+	 ****************************
+	 */
+
+	/* 1 Stream expanded + 2 */
+
+	WL_RATE_1X3_TXBF_OFDM_6    = 142,
+	WL_RATE_1X3_TXBF_OFDM_9    = 143,
+	WL_RATE_1X3_TXBF_OFDM_12   = 144,
+	WL_RATE_1X3_TXBF_OFDM_18   = 145,
+	WL_RATE_1X3_TXBF_OFDM_24   = 146,
+	WL_RATE_1X3_TXBF_OFDM_36   = 147,
+	WL_RATE_1X3_TXBF_OFDM_48   = 148,
+	WL_RATE_1X3_TXBF_OFDM_54   = 149,
+
+	WL_RATE_1X3_TXBF_MCS0      = 150,
+	WL_RATE_1X3_TXBF_MCS1      = 151,
+	WL_RATE_1X3_TXBF_MCS2      = 152,
+	WL_RATE_1X3_TXBF_MCS3      = 153,
+	WL_RATE_1X3_TXBF_MCS4      = 154,
+	WL_RATE_1X3_TXBF_MCS5      = 155,
+	WL_RATE_1X3_TXBF_MCS6      = 156,
+	WL_RATE_1X3_TXBF_MCS7      = 157,
+
+	WL_RATE_1X3_TXBF_VHT0SS1   = 150,
+	WL_RATE_1X3_TXBF_VHT1SS1   = 151,
+	WL_RATE_1X3_TXBF_VHT2SS1   = 152,
+	WL_RATE_1X3_TXBF_VHT3SS1   = 153,
+	WL_RATE_1X3_TXBF_VHT4SS1   = 154,
+	WL_RATE_1X3_TXBF_VHT5SS1   = 155,
+	WL_RATE_1X3_TXBF_VHT6SS1   = 156,
+	WL_RATE_1X3_TXBF_VHT7SS1   = 157,
+	WL_RATE_1X3_TXBF_VHT8SS1   = 158,
+	WL_RATE_1X3_TXBF_VHT9SS1   = 159,
+
+	/* 2 Streams expanded + 1 */
+
+	WL_RATE_2X3_TXBF_SDM_MCS8  = 160,
+	WL_RATE_2X3_TXBF_SDM_MCS9  = 161,
+	WL_RATE_2X3_TXBF_SDM_MCS10 = 162,
+	WL_RATE_2X3_TXBF_SDM_MCS11 = 163,
+	WL_RATE_2X3_TXBF_SDM_MCS12 = 164,
+	WL_RATE_2X3_TXBF_SDM_MCS13 = 165,
+	WL_RATE_2X3_TXBF_SDM_MCS14 = 166,
+	WL_RATE_2X3_TXBF_SDM_MCS15 = 167,
+
+	WL_RATE_2X3_TXBF_VHT0SS2   = 160,
+	WL_RATE_2X3_TXBF_VHT1SS2   = 161,
+	WL_RATE_2X3_TXBF_VHT2SS2   = 162,
+	WL_RATE_2X3_TXBF_VHT3SS2   = 163,
+	WL_RATE_2X3_TXBF_VHT4SS2   = 164,
+	WL_RATE_2X3_TXBF_VHT5SS2   = 165,
+	WL_RATE_2X3_TXBF_VHT6SS2   = 166,
+	WL_RATE_2X3_TXBF_VHT7SS2   = 167,
+	WL_RATE_2X3_TXBF_VHT8SS2   = 168,
+	WL_RATE_2X3_TXBF_VHT9SS2   = 169,
+
+	/* 3 Streams */
+
+	WL_RATE_3X3_TXBF_SDM_MCS16 = 170,
+	WL_RATE_3X3_TXBF_SDM_MCS17 = 171,
+	WL_RATE_3X3_TXBF_SDM_MCS18 = 172,
+	WL_RATE_3X3_TXBF_SDM_MCS19 = 173,
+	WL_RATE_3X3_TXBF_SDM_MCS20 = 174,
+	WL_RATE_3X3_TXBF_SDM_MCS21 = 175,
+	WL_RATE_3X3_TXBF_SDM_MCS22 = 176,
+	WL_RATE_3X3_TXBF_SDM_MCS23 = 177,
+
+	WL_RATE_3X3_TXBF_VHT0SS3   = 170,
+	WL_RATE_3X3_TXBF_VHT1SS3   = 171,
+	WL_RATE_3X3_TXBF_VHT2SS3   = 172,
+	WL_RATE_3X3_TXBF_VHT3SS3   = 173,
+	WL_RATE_3X3_TXBF_VHT4SS3   = 174,
+	WL_RATE_3X3_TXBF_VHT5SS3   = 175,
+	WL_RATE_3X3_TXBF_VHT6SS3   = 176,
+	WL_RATE_3X3_TXBF_VHT7SS3   = 177
+} clm_rates_t;
+
+/* Number of rate codes */
+#define WL_NUMRATES 178
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _bcmwifi_rates_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/brcm_nl80211.h b/drivers/staging/edison-bcm43340/include/brcm_nl80211.h
new file mode 100644
index 0000000..dbf1d23
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/brcm_nl80211.h
@@ -0,0 +1,56 @@
+/*
+ * Definitions for nl80211 testmode access to host driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: brcm_nl80211.h 454792 2014-02-11 20:40:19Z $
+ *
+ */
+
+#ifndef _brcm_nl80211_h_
+#define _brcm_nl80211_h_
+
+struct bcm_nlmsg_hdr {
+	uint cmd;	/* common ioctl definition */
+	uint len;	/* attached buffer length */
+	uint offset;	/* user buffer offset */
+	uint set;	/* get or set request optional */
+	uint magic;	/* magic number for verification */
+};
+
+enum bcmnl_attrs {
+	BCM_NLATTR_UNSPEC,
+
+	BCM_NLATTR_LEN,
+	BCM_NLATTR_DATA,
+
+	__BCM_NLATTR_AFTER_LAST,
+	BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1
+};
+
+struct nl_prv_data {
+	int err;			/* return result */
+	void *data;			/* ioctl return buffer pointer */
+	uint len;			/* ioctl return buffer length */
+	struct bcm_nlmsg_hdr *nlioc;	/* bcm_nlmsg_hdr header pointer */
+};
+
+#endif /* _brcm_nl80211_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/dhdioctl.h b/drivers/staging/edison-bcm43340/include/dhdioctl.h
new file mode 100644
index 0000000..187fd07
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/dhdioctl.h
@@ -0,0 +1,135 @@
+/*
+ * Definitions for ioctls to access DHD iovars.
+ * Based on wlioctl.h (for Broadcom 802.11abg driver).
+ * (Moves towards generic ioctls for BCM drivers/iovars.)
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhdioctl.h 454792 2014-02-11 20:40:19Z $
+ */
+
+#ifndef _dhdioctl_h_
+#define	_dhdioctl_h_
+
+#include <typedefs.h>
+
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+/* Linux network driver ioctl encoding */
+typedef struct dhd_ioctl {
+	uint cmd;	/* common ioctl definition */
+	void *buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	bool set;	/* get or set request (optional) */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+	uint driver;	/* to identify target driver */
+} dhd_ioctl_t;
+
+/* Underlying BUS definition */
+enum {
+	BUS_TYPE_USB = 0, /* for USB dongles */
+	BUS_TYPE_SDIO, /* for SDIO dongles */
+	BUS_TYPE_PCIE /* for PCIE dongles */
+};
+
+/* per-driver magic numbers */
+#define DHD_IOCTL_MAGIC		0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define DHD_IOCTL_VERSION	1
+
+#define	DHD_IOCTL_MAXLEN	8192		/* max length ioctl buffer required */
+#define	DHD_IOCTL_SMLEN		256		/* "small" length ioctl buffer required */
+
+/* common ioctl definitions */
+#define DHD_GET_MAGIC				0
+#define DHD_GET_VERSION				1
+#define DHD_GET_VAR				2
+#define DHD_SET_VAR				3
+
+/* message levels */
+#define DHD_ERROR_VAL	0x0001
+#define DHD_TRACE_VAL	0x0002
+#define DHD_INFO_VAL	0x0004
+#define DHD_DATA_VAL	0x0008
+#define DHD_CTL_VAL	0x0010
+#define DHD_TIMER_VAL	0x0020
+#define DHD_HDRS_VAL	0x0040
+#define DHD_BYTES_VAL	0x0080
+#define DHD_INTR_VAL	0x0100
+#define DHD_LOG_VAL	0x0200
+#define DHD_GLOM_VAL	0x0400
+#define DHD_EVENT_VAL	0x0800
+#define DHD_BTA_VAL	0x1000
+#define DHD_ISCAN_VAL	0x2000
+#define DHD_ARPOE_VAL	0x4000
+#define DHD_REORDER_VAL	0x8000
+#define DHD_WL_VAL		0x10000
+#define DHD_NOCHECKDIED_VAL		0x20000 /* UTF WAR */
+#define DHD_WL_VAL2		0x40000
+#define DHD_PNO_VAL		0x80000
+
+#ifdef SDTEST
+/* For pktgen iovar */
+typedef struct dhd_pktgen {
+	uint version;		/* To allow structure change tracking */
+	uint freq;		/* Max ticks between tx/rx attempts */
+	uint count;		/* Test packets to send/rcv each attempt */
+	uint print;		/* Print counts every <print> attempts */
+	uint total;		/* Total packets (or bursts) */
+	uint minlen;		/* Minimum length of packets to send */
+	uint maxlen;		/* Maximum length of packets to send */
+	uint numsent;		/* Count of test packets sent */
+	uint numrcvd;		/* Count of test packets received */
+	uint numfail;		/* Count of test send failures */
+	uint mode;		/* Test mode (type of test packets) */
+	uint stop;		/* Stop after this many tx failures */
+} dhd_pktgen_t;
+
+/* Version in case structure changes */
+#define DHD_PKTGEN_VERSION 2
+
+/* Type of test packets to use */
+#define DHD_PKTGEN_ECHO		1 /* Send echo requests */
+#define DHD_PKTGEN_SEND 	2 /* Send discard packets */
+#define DHD_PKTGEN_RXBURST	3 /* Request dongle send N packets */
+#define DHD_PKTGEN_RECV		4 /* Continuous rx from continuous tx dongle */
+#endif /* SDTEST */
+
+/* Enter idle immediately (no timeout) */
+#define DHD_IDLE_IMMEDIATE	(-1)
+
+/* Values for idleclock iovar: other values are the sd_divisor to use when idle */
+#define DHD_IDLE_ACTIVE	0	/* Do not request any SD clock change when idle */
+#define DHD_IDLE_STOP   (-1)	/* Request SD clock be stopped (and use SD1 mode) */
+
+
+/* require default structure packing */
+#include <packed_section_end.h>
+
+#endif /* _dhdioctl_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/epivers.h b/drivers/staging/edison-bcm43340/include/epivers.h
new file mode 100644
index 0000000..0996de2
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/epivers.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $
+ *
+*/
+
+#ifndef _epivers_h_
+#define _epivers_h_
+
+#define	EPI_MAJOR_VERSION	1
+
+#define	EPI_MINOR_VERSION	141
+
+#define	EPI_RC_NUMBER		59
+
+#define	EPI_INCREMENTAL_NUMBER	0
+
+#define	EPI_BUILD_NUMBER	0
+
+#define	EPI_VERSION		1, 141, 59, 0
+
+#define	EPI_VERSION_NUM		0x018d3b00
+
+#define EPI_VERSION_DEV		1.141.59
+
+/* Driver Version String, ASCII, 32 chars max */
+#define	EPI_VERSION_STR		"1.141.59 (r)"
+
+#endif /* _epivers_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/hndpmu.h b/drivers/staging/edison-bcm43340/include/hndpmu.h
new file mode 100644
index 0000000..f760e62
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/hndpmu.h
@@ -0,0 +1,36 @@
+/*
+ * HND SiliconBackplane PMU support.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndpmu.h 431134 2013-10-22 18:25:42Z $
+ */
+
+#ifndef _hndpmu_h_
+#define _hndpmu_h_
+
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask);
+extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+
+extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear);
+
+#endif /* _hndpmu_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/hndrte_armtrap.h b/drivers/staging/edison-bcm43340/include/hndrte_armtrap.h
new file mode 100644
index 0000000..70cfa91
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/hndrte_armtrap.h
@@ -0,0 +1,88 @@
+/*
+ * HNDRTE arm trap handling.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_armtrap.h 261365 2011-05-24 20:42:23Z $
+ */
+
+#ifndef	_hndrte_armtrap_h
+#define	_hndrte_armtrap_h
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define	TRAP_STRIDE	4
+#define FIRST_TRAP	TR_RST
+#define LAST_TRAP	(TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define	MAX_TRAP_TYPE	(TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define	MAX_TRAP_TYPE	(TR_ISR + ARMCM3_NUMINTS)
+#endif	/* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define	TR_TYPE		0x00
+#define	TR_EPC		0x04
+#define	TR_CPSR		0x08
+#define	TR_SPSR		0x0c
+#define	TR_REGS		0x10
+#define	TR_REG(n)	(TR_REGS + (n) * 4)
+#define	TR_SP		TR_REG(13)
+#define	TR_LR		TR_REG(14)
+#define	TR_PC		TR_REG(15)
+
+#define	TRAP_T_SIZE	80
+
+#ifndef	_LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+	uint32		type;
+	uint32		epc;
+	uint32		cpsr;
+	uint32		spsr;
+	uint32		r0;	/* a1 */
+	uint32		r1;	/* a2 */
+	uint32		r2;	/* a3 */
+	uint32		r3;	/* a4 */
+	uint32		r4;	/* v1 */
+	uint32		r5;	/* v2 */
+	uint32		r6;	/* v3 */
+	uint32		r7;	/* v4 */
+	uint32		r8;	/* v5 */
+	uint32		r9;	/* sb/v6 */
+	uint32		r10;	/* sl/v7 */
+	uint32		r11;	/* fp/v8 */
+	uint32		r12;	/* ip */
+	uint32		r13;	/* sp */
+	uint32		r14;	/* lr */
+	uint32		pc;	/* r15 */
+} trap_t;
+
+#endif	/* !_LANGUAGE_ASSEMBLY */
+
+#endif	/* _hndrte_armtrap_h */
diff --git a/drivers/staging/edison-bcm43340/include/hndrte_cons.h b/drivers/staging/edison-bcm43340/include/hndrte_cons.h
new file mode 100644
index 0000000..6cc846f
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/hndrte_cons.h
@@ -0,0 +1,78 @@
+/*
+ * Console support for hndrte.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_cons.h 427140 2013-10-02 18:07:07Z $
+ */
+#ifndef	_HNDRTE_CONS_H
+#define	_HNDRTE_CONS_H
+
+#include <typedefs.h>
+
+#if defined(RWL_DONGLE) || defined(UART_REFLECTOR)
+/* For Dongle uart tranport max cmd len is 256 bytes + header length (16 bytes)
+ *  In case of ASD commands we are not sure about how much is the command size
+ *  To be on the safe side, input buf len CBUF_LEN is increased to max (512) bytes.
+ */
+#define RWL_MAX_DATA_LEN 	(512 + 8)	/* allow some extra bytes for '/n' termination */
+#define CBUF_LEN	(RWL_MAX_DATA_LEN + 64)  /* allow 64 bytes for header ("rwl...") */
+#else
+#define CBUF_LEN	(128)
+#endif /* RWL_DONGLE || UART_REFLECTOR */
+
+#define LOG_BUF_LEN	1024
+
+typedef struct {
+	uint32		buf;		/* Can't be pointer on (64-bit) hosts */
+	uint		buf_size;
+	uint		idx;
+	uint		out_idx;	/* output index */
+} hndrte_log_t;
+
+typedef struct {
+	/* Virtual UART
+	 *   When there is no UART (e.g. Quickturn), the host should write a complete
+	 *   input line directly into cbuf and then write the length into vcons_in.
+	 *   This may also be used when there is a real UART (at risk of conflicting with
+	 *   the real UART).  vcons_out is currently unused.
+	 */
+	volatile uint	vcons_in;
+	volatile uint	vcons_out;
+
+	/* Output (logging) buffer
+	 *   Console output is written to a ring buffer log_buf at index log_idx.
+	 *   The host may read the output when it sees log_idx advance.
+	 *   Output will be lost if the output wraps around faster than the host polls.
+	 */
+	hndrte_log_t	log;
+
+	/* Console input line buffer
+	 *   Characters are read one at a time into cbuf until <CR> is received, then
+	 *   the buffer is processed as a command line.  Also used for virtual UART.
+	 */
+	uint		cbuf_idx;
+	char		cbuf[CBUF_LEN];
+} hndrte_cons_t;
+
+hndrte_cons_t *hndrte_get_active_cons_state(void);
+
+#endif /* _HNDRTE_CONS_H */
diff --git a/drivers/staging/edison-bcm43340/include/hndrte_debug.h b/drivers/staging/edison-bcm43340/include/hndrte_debug.h
new file mode 100644
index 0000000..6203925
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/hndrte_debug.h
@@ -0,0 +1,117 @@
+/*
+ * HND Run Time Environment debug info area
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndrte_debug.h 342211 2012-07-02 02:23:04Z $
+ */
+
+#ifndef	_HNDRTE_DEBUG_H
+#define	_HNDRTE_DEBUG_H
+
+/* Magic number at a magic location to find HNDRTE_DEBUG pointers */
+#define HNDRTE_DEBUG_PTR_PTR_MAGIC 0x50504244  	/* DBPP */
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* Includes only when building dongle code */
+
+
+#define NUM_EVENT_LOG_SETS 4
+
+/* We use explicit sizes here since this gets included from different
+ * systems.  The sizes must be the size of the creating system
+ * (currently 32 bit ARM) since this is gleaned from  dump.
+ */
+
+#ifdef FWID
+extern uint32 gFWID;
+#endif
+
+/* Define pointers for use on other systems */
+#define _HD_EVLOG_P	uint32
+#define _HD_CONS_P	uint32
+#define _HD_TRAP_P	uint32
+
+typedef struct hndrte_debug {
+	uint32	magic;
+#define HNDRTE_DEBUG_MAGIC 0x47424544	/* 'DEBG' */
+
+	uint32	version;		/* Debug struct version */
+#define HNDRTE_DEBUG_VERSION 1
+
+	uint32	fwid;			/* 4 bytes of fw info */
+	char	epivers[32];
+
+	_HD_TRAP_P trap_ptr;		/* trap_t data struct */
+	_HD_CONS_P console;		/* Console  */
+
+	uint32	ram_base;
+	uint32	ram_size;
+
+	uint32	rom_base;
+	uint32	rom_size;
+
+	_HD_EVLOG_P event_log_top;
+
+} hndrte_debug_t;
+
+/*
+ * timeval_t and prstatus_t are copies of the Linux structures.
+ * Included here because we need the definitions for the target processor
+ * (32 bits) and not the definition on the host this is running on
+ * (which could be 64 bits).
+ */
+
+typedef struct             {    /* Time value with microsecond resolution    */
+	uint32 tv_sec;	/* Seconds                                   */
+	uint32 tv_usec;	/* Microseconds                              */
+} timeval_t;
+
+
+/* Linux/ARM 32 prstatus for notes section */
+typedef struct prstatus {
+	  int32 si_signo; 	/* Signal number */
+	  int32 si_code; 	/* Extra code */
+	  int32 si_errno; 	/* Errno */
+	  uint16 pr_cursig; 	/* Current signal.  */
+	  uint16 unused;
+	  uint32 pr_sigpend;	/* Set of pending signals.  */
+	  uint32 pr_sighold;	/* Set of held signals.  */
+	  uint32 pr_pid;
+	  uint32 pr_ppid;
+	  uint32 pr_pgrp;
+	  uint32 pr_sid;
+	  timeval_t pr_utime;	/* User time.  */
+	  timeval_t pr_stime;	/* System time.  */
+	  timeval_t pr_cutime;	/* Cumulative user time.  */
+	  timeval_t pr_cstime;	/* Cumulative system time.  */
+	  uint32 uregs[18];
+	  int32 pr_fpvalid;	/* True if math copro being used.  */
+} prstatus_t;
+
+#ifdef __GNUC__
+extern hndrte_debug_t hndrte_debug_info;
+#endif /* __GNUC__ */
+
+#endif /* !LANGUAGE_ASSEMBLY */
+
+#endif /* _HNDRTE_DEBUG_H */
diff --git a/drivers/staging/edison-bcm43340/include/hndsoc.h b/drivers/staging/edison-bcm43340/include/hndsoc.h
new file mode 100644
index 0000000..7726a8a
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/hndsoc.h
@@ -0,0 +1,282 @@
+/*
+ * Broadcom HND chip & on-chip-interconnect-related definitions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: hndsoc.h 432420 2013-10-28 14:14:02Z $
+ */
+
+#ifndef	_HNDSOC_H
+#define	_HNDSOC_H
+
+/* Include the soci specific files */
+#include <sbconfig.h>
+#include <aidmp.h>
+
+/*
+ * SOC Interconnect Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SI_SDRAM_BASE		0x00000000	/* Physical SDRAM */
+#define SI_PCI_MEM		0x08000000	/* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI_MEM_SZ		(64 * 1024 * 1024)
+#define SI_PCI_CFG		0x0c000000	/* Host Mode sb2pcitranslation1 (64 MB) */
+#define	SI_SDRAM_SWAPPED	0x10000000	/* Byteswapped Physical SDRAM */
+#define SI_SDRAM_R2		0x80000000	/* Region 2 for sdram (512 MB) */
+
+#define SI_ENUM_BASE    	0x18000000	/* Enumeration space base */
+
+#define SI_WRAP_BASE    	0x18100000	/* Wrapper space base */
+#define SI_CORE_SIZE    	0x1000		/* each core gets 4Kbytes for registers */
+
+#ifndef SI_MAXCORES
+#define	SI_MAXCORES		32		/* NorthStar has more cores */
+#endif /* SI_MAXCORES */
+
+#define	SI_FASTRAM		0x19000000	/* On-chip RAM on chips that also have DDR */
+#define	SI_FASTRAM_SWAPPED	0x19800000
+
+#define	SI_FLASH2		0x1c000000	/* Flash Region 2 (region 1 shadowed here) */
+#define	SI_FLASH2_SZ		0x02000000	/* Size of Flash Region 2 */
+#define	SI_ARMCM3_ROM		0x1e000000	/* ARM Cortex-M3 ROM */
+#define	SI_FLASH1		0x1fc00000	/* MIPS Flash Region 1 */
+#define	SI_FLASH1_SZ		0x00400000	/* MIPS Size of Flash Region 1 */
+#define	SI_FLASH_WINDOW		0x01000000	/* Flash XIP Window */
+
+#define SI_NS_NANDFLASH		0x1c000000	/* NorthStar NAND flash base */
+#define SI_NS_NORFLASH		0x1e000000	/* NorthStar NOR flash base */
+#define SI_NS_ROM		0xfffd0000	/* NorthStar ROM */
+#define	SI_NS_FLASH_WINDOW	0x02000000	/* Flash XIP Window */
+
+#define	SI_ARM7S_ROM		0x20000000	/* ARM7TDMI-S ROM */
+#define	SI_ARMCR4_ROM		0x000f0000	/* ARM Cortex-R4 ROM */
+#define	SI_ARMCM3_SRAM2		0x60000000	/* ARM Cortex-M3 SRAM Region 2 */
+#define	SI_ARM7S_SRAM2		0x80000000	/* ARM7TDMI-S SRAM Region 2 */
+#define	SI_ARM_FLASH1		0xffff0000	/* ARM Flash Region 1 */
+#define	SI_ARM_FLASH1_SZ	0x00010000	/* ARM Size of Flash Region 1 */
+
+#define SI_PCI_DMA		0x40000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA2		0x80000000	/* Client Mode sb2pcitranslation2 (1 GB) */
+#define SI_PCI_DMA_SZ		0x40000000	/* Client Mode sb2pcitranslation2 size in bytes */
+#define SI_PCIE_DMA_L32		0x00000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), low 32 bits
+						 */
+#define SI_PCIE_DMA_H32		0x80000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+/* core codes */
+#define	NODEV_CORE_ID		0x700		/* Invalid coreid */
+#define	CC_CORE_ID		0x800		/* chipcommon core */
+#define	ILINE20_CORE_ID		0x801		/* iline20 core */
+#define	SRAM_CORE_ID		0x802		/* sram core */
+#define	SDRAM_CORE_ID		0x803		/* sdram core */
+#define	PCI_CORE_ID		0x804		/* pci core */
+#define	MIPS_CORE_ID		0x805		/* mips core */
+#define	ENET_CORE_ID		0x806		/* enet mac core */
+#define	CODEC_CORE_ID		0x807		/* v90 codec core */
+#define	USB_CORE_ID		0x808		/* usb 1.1 host/device core */
+#define	ADSL_CORE_ID		0x809		/* ADSL core */
+#define	ILINE100_CORE_ID	0x80a		/* iline100 core */
+#define	IPSEC_CORE_ID		0x80b		/* ipsec core */
+#define	UTOPIA_CORE_ID		0x80c		/* utopia core */
+#define	PCMCIA_CORE_ID		0x80d		/* pcmcia core */
+#define	SOCRAM_CORE_ID		0x80e		/* internal memory core */
+#define	MEMC_CORE_ID		0x80f		/* memc sdram core */
+#define	OFDM_CORE_ID		0x810		/* OFDM phy core */
+#define	EXTIF_CORE_ID		0x811		/* external interface core */
+#define	D11_CORE_ID		0x812		/* 802.11 MAC core */
+#define	APHY_CORE_ID		0x813		/* 802.11a phy core */
+#define	BPHY_CORE_ID		0x814		/* 802.11b phy core */
+#define	GPHY_CORE_ID		0x815		/* 802.11g phy core */
+#define	MIPS33_CORE_ID		0x816		/* mips3302 core */
+#define	USB11H_CORE_ID		0x817		/* usb 1.1 host core */
+#define	USB11D_CORE_ID		0x818		/* usb 1.1 device core */
+#define	USB20H_CORE_ID		0x819		/* usb 2.0 host core */
+#define	USB20D_CORE_ID		0x81a		/* usb 2.0 device core */
+#define	SDIOH_CORE_ID		0x81b		/* sdio host core */
+#define	ROBO_CORE_ID		0x81c		/* roboswitch core */
+#define	ATA100_CORE_ID		0x81d		/* parallel ATA core */
+#define	SATAXOR_CORE_ID		0x81e		/* serial ATA & XOR DMA core */
+#define	GIGETH_CORE_ID		0x81f		/* gigabit ethernet core */
+#define	PCIE_CORE_ID		0x820		/* pci express core */
+#define	NPHY_CORE_ID		0x821		/* 802.11n 2x2 phy core */
+#define	SRAMC_CORE_ID		0x822		/* SRAM controller core */
+#define	MINIMAC_CORE_ID		0x823		/* MINI MAC/phy core */
+#define	ARM11_CORE_ID		0x824		/* ARM 1176 core */
+#define	ARM7S_CORE_ID		0x825		/* ARM7tdmi-s core */
+#define	LPPHY_CORE_ID		0x826		/* 802.11a/b/g phy core */
+#define	PMU_CORE_ID		0x827		/* PMU core */
+#define	SSNPHY_CORE_ID		0x828		/* 802.11n single-stream phy core */
+#define	SDIOD_CORE_ID		0x829		/* SDIO device core */
+#define	ARMCM3_CORE_ID		0x82a		/* ARM Cortex M3 core */
+#define	HTPHY_CORE_ID		0x82b		/* 802.11n 4x4 phy core */
+#define	MIPS74K_CORE_ID		0x82c		/* mips 74k core */
+#define	GMAC_CORE_ID		0x82d		/* Gigabit MAC core */
+#define	DMEMC_CORE_ID		0x82e		/* DDR1/2 memory controller core */
+#define	PCIERC_CORE_ID		0x82f		/* PCIE Root Complex core */
+#define	OCP_CORE_ID		0x830		/* OCP2OCP bridge core */
+#define	SC_CORE_ID		0x831		/* shared common core */
+#define	AHB_CORE_ID		0x832		/* OCP2AHB bridge core */
+#define	SPIH_CORE_ID		0x833		/* SPI host core */
+#define	I2S_CORE_ID		0x834		/* I2S core */
+#define	DMEMS_CORE_ID		0x835		/* SDR/DDR1 memory controller core */
+#define	DEF_SHIM_COMP		0x837		/* SHIM component in ubus/6362 */
+
+#define ACPHY_CORE_ID		0x83b		/* Dot11 ACPHY */
+#define PCIE2_CORE_ID		0x83c		/* pci express Gen2 core */
+#define USB30D_CORE_ID		0x83d		/* usb 3.0 device core */
+#define ARMCR4_CORE_ID		0x83e		/* ARM CR4 CPU */
+#define GCI_CORE_ID		0x840		/* GCI Core */
+#define APB_BRIDGE_CORE_ID	0x135		/* APB bridge core ID */
+#define AXI_CORE_ID		0x301		/* AXI/GPV core ID */
+#define EROM_CORE_ID		0x366		/* EROM core ID */
+#define OOB_ROUTER_CORE_ID	0x367		/* OOB router core ID */
+#define DEF_AI_COMP		0xfff		/* Default component, in ai chips it maps all
+						 * unused address ranges
+						 */
+
+#define CC_4706_CORE_ID		0x500		/* chipcommon core */
+#define NS_PCIEG2_CORE_ID	0x501		/* PCIE Gen 2 core */
+#define NS_DMA_CORE_ID		0x502		/* DMA core */
+#define NS_SDIO3_CORE_ID	0x503		/* SDIO3 core */
+#define NS_USB20_CORE_ID	0x504		/* USB2.0 core */
+#define NS_USB30_CORE_ID	0x505		/* USB3.0 core */
+#define NS_A9JTAG_CORE_ID	0x506		/* ARM Cortex A9 JTAG core */
+#define NS_DDR23_CORE_ID	0x507		/* Denali DDR2/DDR3 memory controller */
+#define NS_ROM_CORE_ID		0x508		/* ROM core */
+#define NS_NAND_CORE_ID		0x509		/* NAND flash controller core */
+#define NS_QSPI_CORE_ID		0x50a		/* SPI flash controller core */
+#define NS_CCB_CORE_ID		0x50b		/* ChipcommonB core */
+#define SOCRAM_4706_CORE_ID	0x50e		/* internal memory core */
+#define NS_SOCRAM_CORE_ID	SOCRAM_4706_CORE_ID
+#define	ARMCA9_CORE_ID		0x510		/* ARM Cortex A9 core (ihost) */
+#define	NS_IHOST_CORE_ID	ARMCA9_CORE_ID	/* ARM Cortex A9 core (ihost) */
+#define GMAC_COMMON_4706_CORE_ID	0x5dc		/* Gigabit MAC core */
+#define GMAC_4706_CORE_ID	0x52d		/* Gigabit MAC core */
+#define AMEMC_CORE_ID		0x52e		/* DDR1/2 memory controller core */
+#define ALTA_CORE_ID		0x534		/* I2S core */
+#define DDR23_PHY_CORE_ID	0x5dd
+
+#define SI_PCI1_MEM     0x40000000  /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SI_PCI1_CFG     0x44000000  /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SI_PCIE1_DMA_H32		0xc0000000	/* PCIE Client Mode sb2pcitranslation2
+						 * (2 ZettaBytes), high 32 bits
+						 */
+#define CC_4706B0_CORE_REV	0x8000001f		/* chipcommon core */
+#define SOCRAM_4706B0_CORE_REV	0x80000005		/* internal memory core */
+#define GMAC_4706B0_CORE_REV	0x80000000		/* Gigabit MAC core */
+
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
+ * and chipcommon being the first core:
+ */
+#define	SI_CC_IDX		0
+/* SOC Interconnect types (aka chip types) */
+#define	SOCI_SB			0
+#define	SOCI_AI			1
+#define	SOCI_UBUS		2
+#define	SOCI_NAI		3
+
+/* Common core control flags */
+#define	SICF_BIST_EN		0x8000
+#define	SICF_PME_EN		0x4000
+#define	SICF_CORE_BITS		0x3ffc
+#define	SICF_FGC		0x0002
+#define	SICF_CLOCK_EN		0x0001
+
+/* Common core status flags */
+#define	SISF_BIST_DONE		0x8000
+#define	SISF_BIST_ERROR		0x4000
+#define	SISF_GATED_CLK		0x2000
+#define	SISF_DMA64		0x1000
+#define	SISF_CORE_BITS		0x0fff
+
+/* Norstar core status flags */
+#define SISF_NS_BOOTDEV_MASK	0x0003	/* ROM core */
+#define SISF_NS_BOOTDEV_NOR	0x0000	/* ROM core */
+#define SISF_NS_BOOTDEV_NAND	0x0001	/* ROM core */
+#define SISF_NS_BOOTDEV_ROM	0x0002	/* ROM core */
+#define SISF_NS_BOOTDEV_OFFLOAD	0x0003	/* ROM core */
+#define SISF_NS_SKUVEC_MASK	0x000c	/* ROM core */
+
+/* A register that is common to all cores to
+ * communicate w/PMU regarding clock control.
+ */
+#define SI_CLK_CTL_ST		0x1e0		/* clock control and status */
+#define SI_PWR_CTL_ST		0x1e8		/* For memory clock gating */
+
+/* clk_ctl_st register */
+#define	CCS_FORCEALP		0x00000001	/* force ALP request */
+#define	CCS_FORCEHT		0x00000002	/* force HT request */
+#define	CCS_FORCEILP		0x00000004	/* force ILP request */
+#define	CCS_ALPAREQ		0x00000008	/* ALP Avail Request */
+#define	CCS_HTAREQ		0x00000010	/* HT Avail Request */
+#define	CCS_FORCEHWREQOFF	0x00000020	/* Force HW Clock Request Off */
+#define CCS_HQCLKREQ		0x00000040	/* HQ Clock Required */
+#define CCS_USBCLKREQ		0x00000100	/* USB Clock Req */
+#define CCS_SECICLKREQ		0x00000100	/* SECI Clock Req */
+#define CCS_ARMFASTCLOCKREQ	0x00000100	/* ARM CR4 fast clock request */
+#define CCS_ERSRC_REQ_MASK	0x00000700	/* external resource requests */
+#define CCS_ERSRC_REQ_SHIFT	8
+#define	CCS_ALPAVAIL		0x00010000	/* ALP is available */
+#define	CCS_HTAVAIL		0x00020000	/* HT is available */
+#define CCS_BP_ON_APL		0x00040000	/* RO: Backplane is running on ALP clock */
+#define CCS_BP_ON_HT		0x00080000	/* RO: Backplane is running on HT clock */
+#define CCS_ARMFASTCLOCKSTATUS	0x01000000	/* Fast CPU clock is running */
+#define CCS_ERSRC_STS_MASK	0x07000000	/* external resource status */
+#define CCS_ERSRC_STS_SHIFT	24
+
+#define	CCS0_HTAVAIL		0x00010000	/* HT avail in chipc and pcmcia on 4328a0 */
+#define	CCS0_ALPAVAIL		0x00020000	/* ALP avail in chipc and pcmcia on 4328a0 */
+
+/* Not really related to SOC Interconnect, but a couple of software
+ * conventions for the use the flash space:
+ */
+
+/* Minumum amount of flash we support */
+#define FLASH_MIN		0x00020000	/* Minimum flash size */
+
+/* A boot/binary may have an embedded block that describes its size  */
+#define	BISZ_OFFSET		0x3e0		/* At this offset into the binary */
+#define	BISZ_MAGIC		0x4249535a	/* Marked with this value: 'BISZ' */
+#define	BISZ_MAGIC_IDX		0		/* Word 0: magic */
+#define	BISZ_TXTST_IDX		1		/*	1: text start */
+#define	BISZ_TXTEND_IDX		2		/*	2: text end */
+#define	BISZ_DATAST_IDX		3		/*	3: data start */
+#define	BISZ_DATAEND_IDX	4		/*	4: data end */
+#define	BISZ_BSSST_IDX		5		/*	5: bss start */
+#define	BISZ_BSSEND_IDX		6		/*	6: bss end */
+#define BISZ_SIZE		7		/* descriptor size in 32-bit integers */
+
+/* Boot/Kernel related defintion and functions */
+#define	SOC_BOOTDEV_ROM		0x00000001
+#define	SOC_BOOTDEV_PFLASH	0x00000002
+#define	SOC_BOOTDEV_SFLASH	0x00000004
+#define	SOC_BOOTDEV_NANDFLASH	0x00000008
+
+#define	SOC_KNLDEV_NORFLASH	0x00000002
+#define	SOC_KNLDEV_NANDFLASH	0x00000004
+
+#ifndef _LANGUAGE_ASSEMBLY
+int soc_boot_dev(void *sih);
+int soc_knl_dev(void *sih);
+#endif	/* _LANGUAGE_ASSEMBLY */
+
+#endif /* _HNDSOC_H */
diff --git a/drivers/staging/edison-bcm43340/include/linux_osl.h b/drivers/staging/edison-bcm43340/include/linux_osl.h
new file mode 100644
index 0000000..f6903ff
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/linux_osl.h
@@ -0,0 +1,756 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.h 432719 2013-10-29 12:04:59Z $
+ */
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+
+/* Linux Kernel: File Operations: start */
+extern void * osl_os_open_image(char * filename);
+extern int osl_os_get_image_block(char * buf, int len, void * image);
+extern void osl_os_close_image(void * image);
+extern int osl_os_image_size(void *image);
+/* Linux Kernel: File Operations: end */
+
+#ifdef BCMDRIVER
+
+/* OSL initialization */
+#ifdef SHARED_OSL_CMN
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn);
+#else
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+#endif /* SHARED_OSL_CMN */
+
+extern void osl_detach(osl_t *osh);
+extern int osl_static_mem_init(osl_t *osh, void *adapter);
+extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
+extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
+extern void* osl_get_bus_handle(osl_t *osh);
+
+/* Global ASSERT type */
+extern uint32 g_assert_type;
+
+/* ASSERT */
+#if defined(BCMASSERT_LOG)
+	#define ASSERT(exp) \
+	  do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(const char *exp, const char *file, int line);
+#else
+	#ifdef __GNUC__
+		#define GCC_VERSION \
+			(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+		#if GCC_VERSION > 30100
+			#define ASSERT(exp)	do {} while (0)
+		#else
+			/* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
+			#define ASSERT(exp)
+		#endif /* GCC_VERSION > 30100 */
+	#endif /* __GNUC__ */
+#endif 
+
+/* microsecond delay */
+#define	OSL_DELAY(usec)		osl_delay(usec)
+extern void osl_delay(uint usec);
+
+#define OSL_SLEEP(ms)			osl_sleep(ms)
+extern void osl_sleep(uint ms);
+
+#define	OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_read_attr((osh), (offset), (buf), (size))
+#define	OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \
+	osl_pcmcia_write_attr((osh), (offset), (buf), (size))
+extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
+extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
+
+/* PCI configuration space access macros */
+#define	OSL_PCI_READ_CONFIG(osh, offset, size) \
+	osl_pci_read_config((osh), (offset), (size))
+#define	OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
+	osl_pci_write_config((osh), (offset), (size), (val))
+extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
+extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
+
+/* PCI device bus # and slot # */
+#define OSL_PCI_BUS(osh)	osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh)	osl_pci_slot(osh)
+extern uint osl_pci_bus(osl_t *osh);
+extern uint osl_pci_slot(osl_t *osh);
+extern struct pci_dev *osl_pci_device(osl_t *osh);
+
+/* Pkttag flag should be part of public information */
+typedef struct {
+	bool pkttag;
+	bool mmbus;		/* Bus supports memory-mapped register accesses */
+	pktfree_cb_fn_t tx_fn;  /* Callback function for PKTFREE */
+	void *tx_ctx;		/* Context to the callback function */
+	void	*unused[3];
+} osl_pubinfo_t;
+
+#define PKTFREESETCB(osh, _tx_fn, _tx_ctx)		\
+	do {						\
+	   ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn;	\
+	   ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx;	\
+	} while (0)
+
+
+/* host/bus architecture-specific byte swap */
+#define BUS_SWAP32(v)		(v)
+	#define MALLOC(osh, size)	osl_malloc((osh), (size))
+	#define MALLOCZ(osh, size)	osl_mallocz((osh), (size))
+	#define MFREE(osh, addr, size)	osl_mfree((osh), (addr), (size))
+	#define MALLOCED(osh)		osl_malloced((osh))
+	#define MEMORY_LEFTOVER(osh) osl_check_memleak(osh)
+	extern void *osl_malloc(osl_t *osh, uint size);
+	extern void *osl_mallocz(osl_t *osh, uint size);
+	extern void osl_mfree(osl_t *osh, void *addr, uint size);
+	extern uint osl_malloced(osl_t *osh);
+	extern uint osl_check_memleak(osl_t *osh);
+
+
+#define	MALLOC_FAILED(osh)	osl_malloc_failed((osh))
+extern uint osl_malloc_failed(osl_t *osh);
+
+/* allocate/free shared (dma-able) consistent memory */
+#define	DMA_CONSISTENT_ALIGN	osl_dma_consistent_align()
+#define	DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define	DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+#define	DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define	DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+
+extern uint osl_dma_consistent_align(void);
+extern void *
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, uint *tot, dmaaddr_t *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
+
+/* map/unmap direction */
+#define	DMA_TX	1	/* TX direction for DMA */
+#define	DMA_RX	2	/* RX direction for DMA */
+
+/* map/unmap shared (dma-able) memory */
+#define	DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+	osl_dma_unmap((osh), (pa), (size), (direction))
+extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+	hnddma_seg_map_t *txp_dmah);
+extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+
+/* API for DMA addressing capability */
+#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
+
+#if defined(__ARM_ARCH_7A__)
+	extern void osl_cache_flush(void *va, uint size);
+	extern void osl_cache_inv(void *va, uint size);
+	extern void osl_prefetch(const void *ptr);
+	#define OSL_CACHE_FLUSH(va, len)	osl_cache_flush((void *) va, len)
+	#define OSL_CACHE_INV(va, len)		osl_cache_inv((void *) va, len)
+	#define OSL_PREFETCH(ptr)			osl_prefetch(ptr)
+#else
+	#define OSL_CACHE_FLUSH(va, len)	BCM_REFERENCE(va)
+	#define OSL_CACHE_INV(va, len)		BCM_REFERENCE(va)
+	#define OSL_PREFETCH(ptr)			prefetch(ptr)
+#endif 
+
+/* register access macros */
+	#include <bcmsdh.h>
+	#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \
+		(uintptr)(r), sizeof(*(r)), (v)))
+	#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
+		(uintptr)(r), sizeof(*(r))))
+
+	#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
+		mmap_op else bus_op
+	#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
+		mmap_op : bus_op
+
+#define OSL_ERROR(bcmerror)	osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define	PKTBUFSZ	2048   /* largest reasonable packet buffer, driver uses for ethernet MTU */
+
+#define OSH_NULL   NULL
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ * Macros expand to calls to functions defined in linux_osl.c .
+ */
+#include <linuxver.h>           /* use current 2.4.x calling conventions */
+#include <linux/kernel.h>       /* for vsn/printf's */
+#include <linux/string.h>       /* for mem*, str* */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
+#define OSL_SYSUPTIME()		((uint32)jiffies_to_msecs(jiffies))
+#else
+#define OSL_SYSUPTIME()		((uint32)jiffies * (1000 / HZ))
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
+#define	printf(fmt, args...)	printk(fmt , ## args)
+#include <linux/kernel.h>	/* for vsn/printf's */
+#include <linux/string.h>	/* for mem*, str* */
+/* bcopy's: Linux kernel doesn't provide these (anymore) */
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+/* register access macros */
+
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			switch (sizeof(*(r))) { \
+				case sizeof(uint8):	__osl_v = \
+					readb((volatile uint8*)(r)); break; \
+				case sizeof(uint16):	__osl_v = \
+					readw((volatile uint16*)(r)); break; \
+				case sizeof(uint32):	__osl_v = \
+					readl((volatile uint32*)(r)); break; \
+			} \
+			__osl_v; \
+		}), \
+		OSL_READ_REG(osh, r)) \
+)
+
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh, \
+		switch (sizeof(*(r))) { \
+			case sizeof(uint8):	writeb((uint8)(v), (volatile uint8*)(r)); break; \
+			case sizeof(uint16):	writew((uint16)(v), (volatile uint16*)(r)); break; \
+			case sizeof(uint32):	writel((uint32)(v), (volatile uint32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+
+#define	AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#define	OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+
+/* bcopy, bcmp, and bzero functions */
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
+#define	bzero(b, len)		memset((b), '\0', (len))
+
+/* uncached/cached virtual address */
+#define OSL_UNCACHED(va)	((void *)va)
+#define OSL_CACHED(va)		((void *)va)
+
+#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va)
+#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va)
+
+/* get processor cycle count */
+#if defined(__i386__)
+#define	OSL_GETCYCLES(x)	rdtscl((x))
+#else
+#define OSL_GETCYCLES(x)	((x) = 0)
+#endif 
+
+/* dereference an address that may cause a bus exception */
+#define	BUSPROBE(val, addr)	({ (val) = R_REG(NULL, (addr)); 0; })
+
+/* map/unmap physical to virtual I/O */
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define	REG_MAP(pa, size)	ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
+#else
+#define REG_MAP(pa, size)       (void *)(0)
+#endif /* !defined(CONFIG_MMC_MSM7X00A */
+#define	REG_UNMAP(va)		iounmap((va))
+
+/* shared (dma-able) memory access macros */
+#define	R_SM(r)			*(r)
+#define	W_SM(r, v)		(*(r) = (v))
+#define	BZERO_SM(r, len)	memset((r), '\0', (len))
+
+/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
+ * performance reasons),  we need the Linux headers.
+ */
+#include <linuxver.h>		/* use current 2.4.x calling conventions */
+
+/* packet primitives */
+#ifdef BCMDBG_CTRACE
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len), __LINE__, __FILE__)
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb), __LINE__, __FILE__)
+#else
+#define	PKTGET(osh, len, send)		osl_pktget((osh), (len))
+#define	PKTDUP(osh, skb)		osl_pktdup((osh), (skb))
+#endif /* BCMDBG_CTRACE */
+#define PKTLIST_DUMP(osh, buf)		BCM_REFERENCE(osh)
+#define PKTDBG_TRACE(osh, pkt, bit)	BCM_REFERENCE(osh)
+#define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send))
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define	PKTGET_STATIC(osh, len, send)		osl_pktget_static((osh), (len))
+#define	PKTFREE_STATIC(osh, skb, send)		osl_pktfree_static((osh), (skb), (send))
+#else
+#define	PKTGET_STATIC	PKTGET
+#define	PKTFREE_STATIC	PKTFREE
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+#define	PKTDATA(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
+#define	PKTLEN(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
+#define PKTHEADROOM(osh, skb)		(PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTEXPHEADROOM(osh, skb, b)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
+	 })
+#define PKTTAILROOM(osh, skb)		\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_tailroom((struct sk_buff*)(skb)); \
+	 })
+#define PKTPADTAILROOM(osh, skb, padlen) \
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_pad((struct sk_buff*)(skb), (padlen)); \
+	 })
+#define	PKTNEXT(osh, skb)		({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
+#define	PKTSETNEXT(osh, skb, x)		\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
+	 })
+#define	PKTSETLEN(osh, skb, len)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 __skb_trim((struct sk_buff*)(skb), (len)); \
+	 })
+#define	PKTPUSH(osh, skb, bytes)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_push((struct sk_buff*)(skb), (bytes)); \
+	 })
+#define	PKTPULL(osh, skb, bytes)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 skb_pull((struct sk_buff*)(skb), (bytes)); \
+	 })
+#define	PKTTAG(skb)			((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTSETPOOL(osh, skb, x, y)	BCM_REFERENCE(osh)
+#define	PKTPOOL(osh, skb)		({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#define PKTFREELIST(skb)        PKTLINK(skb)
+#define PKTSETFREELIST(skb, x)  PKTSETLINK((skb), (x))
+#define PKTPTR(skb)             (skb)
+#define PKTID(skb)              ({BCM_REFERENCE(skb); 0;})
+#define PKTSETID(skb, id)       ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
+#define PKTSHRINK(osh, m)		({BCM_REFERENCE(osh); m;})
+
+#ifdef BCMDBG_CTRACE
+#define	DEL_CTRACE(zosh, zskb) { \
+	unsigned long zflags; \
+	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+	list_del(&(zskb)->ctrace_list); \
+	(zosh)->ctrace_num--; \
+	(zskb)->ctrace_start = 0; \
+	(zskb)->ctrace_count = 0; \
+	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define	UPDATE_CTRACE(zskb, zfile, zline) { \
+	struct sk_buff *_zskb = (struct sk_buff *)(zskb); \
+	if (_zskb->ctrace_count < CTRACE_NUM) { \
+		_zskb->func[_zskb->ctrace_count] = zfile; \
+		_zskb->line[_zskb->ctrace_count] = zline; \
+		_zskb->ctrace_count++; \
+	} \
+	else { \
+		_zskb->func[_zskb->ctrace_start] = zfile; \
+		_zskb->line[_zskb->ctrace_start] = zline; \
+		_zskb->ctrace_start++; \
+		if (_zskb->ctrace_start >= CTRACE_NUM) \
+			_zskb->ctrace_start = 0; \
+	} \
+}
+
+#define	ADD_CTRACE(zosh, zskb, zfile, zline) { \
+	unsigned long zflags; \
+	spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+	list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \
+	(zosh)->ctrace_num++; \
+	UPDATE_CTRACE(zskb, zfile, zline); \
+	spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define PKTCALLER(zskb)	UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__)
+#endif /* BCMDBG_CTRACE */
+
+#ifdef CTFPOOL
+#define	CTFPOOL_REFILL_THRESH	3
+typedef struct ctfpool {
+	void		*head;
+	spinlock_t	lock;
+	uint		max_obj;
+	uint		curr_obj;
+	uint		obj_size;
+	uint		refills;
+	uint		fast_allocs;
+	uint 		fast_frees;
+	uint 		slow_allocs;
+} ctfpool_t;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	FASTBUF	(1 << 0)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->pktc_flags)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	FASTBUF	(1 << 16)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->mac_len)
+#else
+#define	FASTBUF	(1 << 0)
+#define	PKTSETFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \
+	 })
+#define	PKTCLRFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \
+	 })
+#define	PKTISFAST(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \
+	 })
+#define	PKTFAST(osh, skb)	(((struct sk_buff*)(skb))->__unused)
+#endif /* 2.6.22 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->ctfpool)
+#define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head)
+#else
+#define	CTFPOOLPTR(osh, skb)	(((struct sk_buff*)(skb))->sk)
+#define	CTFPOOLHEAD(osh, skb)	(((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
+#endif
+
+extern void *osl_ctfpool_add(osl_t *osh);
+extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
+extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
+extern void osl_ctfpool_cleanup(osl_t *osh);
+extern void osl_ctfpool_stats(osl_t *osh, void *b);
+#else /* CTFPOOL */
+#define	PKTSETFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISFAST(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#endif /* CTFPOOL */
+
+#define	PKTSETCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTISCTF(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#ifdef HNDCTF
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define	SKIPCT	(1 << 2)
+#define	CHAINED	(1 << 3)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->pktc_flags & CHAINED)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define	SKIPCT	(1 << 18)
+#define	CHAINED	(1 << 19)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->mac_len & CHAINED)
+#else /* 2.6.22 */
+#define	SKIPCT	(1 << 2)
+#define	CHAINED	(1 << 3)
+#define	PKTSETSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= SKIPCT); \
+	 })
+#define	PKTCLRSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \
+	 })
+#define	PKTSKIPCT(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused & SKIPCT); \
+	 })
+#define	PKTSETCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused |= CHAINED); \
+	 })
+#define	PKTCLRCHAINED(osh, skb)	\
+	({ \
+	 BCM_REFERENCE(osh); \
+	 (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \
+	 })
+#define	PKTISCHAINED(skb)	(((struct sk_buff*)(skb))->__unused & CHAINED)
+#endif /* 2.6.22 */
+typedef struct ctf_mark {
+	uint32	value;
+}	ctf_mark_t;
+#define CTF_MARK(m)				(m.value)
+#else /* HNDCTF */
+#define	PKTSETSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTCLRSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define	PKTSKIPCT(osh, skb)	({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define CTF_MARK(m)		({BCM_REFERENCE(m); 0;})
+#endif /* HNDCTF */
+
+#ifdef BCMFA
+#ifdef BCMFA_HW_HASH
+#define PKTSETFAHIDX(skb, idx)	(((struct sk_buff*)(skb))->napt_idx = idx)
+#else
+#define PKTSETFAHIDX(skb, idx)	({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
+#endif /* BCMFA_SW_HASH */
+#define PKTGETFAHIDX(skb)	(((struct sk_buff*)(skb))->napt_idx)
+#define PKTSETFADEV(skb, imp)	(((struct sk_buff*)(skb))->dev = imp)
+#define PKTSETRXDEV(skb)	(((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
+
+#define	AUX_TCP_FIN_RST	(1 << 0)
+#define	AUX_FREED	(1 << 1)
+#define PKTSETFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
+#define	PKTCLRFAAUX(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
+#define	PKTISFAAUX(skb)		(((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
+#define PKTSETFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
+#define	PKTCLRFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
+#define	PKTISFAFREED(skb)	(((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
+#define	PKTISFABRIDGED(skb)	PKTISFAAUX(skb)
+#else
+#define	PKTISFAAUX(skb)		({BCM_REFERENCE(skb); FALSE;})
+#define	PKTISFABRIDGED(skb)	({BCM_REFERENCE(skb); FALSE;})
+#define	PKTISFAFREED(skb)	({BCM_REFERENCE(skb); FALSE;})
+
+#define	PKTCLRFAAUX(skb)	BCM_REFERENCE(skb)
+#define PKTSETFAFREED(skb)	BCM_REFERENCE(skb)
+#define	PKTCLRFAFREED(skb)	BCM_REFERENCE(skb)
+#endif /* BCMFA */
+
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+
+#ifdef BCMDBG_CTRACE
+#define PKT_CTRACE_DUMP(osh, b)	osl_ctrace_dump((osh), (b))
+extern void *osl_pktget(osl_t *osh, uint len, int line, char *file);
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
+extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
+struct bcmstrbuf;
+extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b);
+#else
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+#endif /* BCMDBG_CTRACE */
+extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
+#ifdef BCMDBG_CTRACE
+#define PKTFRMNATIVE(osh, skb)  osl_pkt_frmnative(((osl_t *)osh), \
+				(struct sk_buff*)(skb), __LINE__, __FILE__)
+#define	PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb))
+#else
+#define PKTFRMNATIVE(osh, skb)	osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
+#endif /* BCMDBG_CTRACE */
+#define PKTTONATIVE(osh, pkt)		osl_pkt_tonative((osl_t *)(osh), (pkt))
+
+#define	PKTLINK(skb)			(((struct sk_buff*)(skb))->prev)
+#define	PKTSETLINK(skb, x)		(((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define	PKTPRIO(skb)			(((struct sk_buff*)(skb))->priority)
+#define	PKTSETPRIO(skb, x)		(((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb)		(((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x)		(((struct sk_buff*)(skb))->ip_summed = \
+						((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
+#define PKTSHARED(skb)                  (((struct sk_buff*)(skb))->cloned)
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define PKTMARK(p)                     (((struct sk_buff *)(p))->mark)
+#define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->mark = (m)
+#else /* !2.6.0 */
+#define PKTMARK(p)                     (((struct sk_buff *)(p))->nfmark)
+#define PKTSETMARK(p, m)               ((struct sk_buff *)(p))->nfmark = (m)
+#endif /* 2.6.0 */
+#else /* CONFIG_NF_CONNTRACK_MARK */
+#define PKTMARK(p)                     0
+#define PKTSETMARK(p, m)
+#endif /* CONFIG_NF_CONNTRACK_MARK */
+
+#define PKTALLOCED(osh)		osl_pktalloced(osh)
+extern uint osl_pktalloced(osl_t *osh);
+
+#define	DMA_MAP(osh, va, size, direction, p, dmah) \
+	osl_dma_map((osh), (va), (size), (direction), (p), (dmah))
+
+#ifdef PKTC
+/* Use 8 bytes of skb tstamp field to store below info */
+struct chain_node {
+	struct sk_buff	*link;
+	unsigned int	flags:3, pkts:9, bytes:20;
+};
+
+#define CHAIN_NODE(skb)		((struct chain_node*)(((struct sk_buff*)skb)->pktc_cb))
+
+#define	PKTCSETATTR(s, f, p, b)	({CHAIN_NODE(s)->flags = (f); CHAIN_NODE(s)->pkts = (p); \
+	                         CHAIN_NODE(s)->bytes = (b);})
+#define	PKTCCLRATTR(s)		({CHAIN_NODE(s)->flags = CHAIN_NODE(s)->pkts = \
+	                         CHAIN_NODE(s)->bytes = 0;})
+#define	PKTCGETATTR(s)		(CHAIN_NODE(s)->flags << 29 | CHAIN_NODE(s)->pkts << 20 | \
+	                         CHAIN_NODE(s)->bytes)
+#define	PKTCCNT(skb)		(CHAIN_NODE(skb)->pkts)
+#define	PKTCLEN(skb)		(CHAIN_NODE(skb)->bytes)
+#define	PKTCGETFLAGS(skb)	(CHAIN_NODE(skb)->flags)
+#define	PKTCSETFLAGS(skb, f)	(CHAIN_NODE(skb)->flags = (f))
+#define	PKTCCLRFLAGS(skb)	(CHAIN_NODE(skb)->flags = 0)
+#define	PKTCFLAGS(skb)		(CHAIN_NODE(skb)->flags)
+#define	PKTCSETCNT(skb, c)	(CHAIN_NODE(skb)->pkts = (c))
+#define	PKTCINCRCNT(skb)	(CHAIN_NODE(skb)->pkts++)
+#define	PKTCADDCNT(skb, c)	(CHAIN_NODE(skb)->pkts += (c))
+#define	PKTCSETLEN(skb, l)	(CHAIN_NODE(skb)->bytes = (l))
+#define	PKTCADDLEN(skb, l)	(CHAIN_NODE(skb)->bytes += (l))
+#define	PKTCSETFLAG(skb, fb)	(CHAIN_NODE(skb)->flags |= (fb))
+#define	PKTCCLRFLAG(skb, fb)	(CHAIN_NODE(skb)->flags &= ~(fb))
+#define	PKTCLINK(skb)		(CHAIN_NODE(skb)->link)
+#define	PKTSETCLINK(skb, x)	(CHAIN_NODE(skb)->link = (struct sk_buff*)(x))
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+	for (; (skb) != NULL; (skb) = (nskb)) \
+		if ((nskb) = (PKTISCHAINED(skb) ? PKTCLINK(skb) : NULL), \
+		    PKTSETCLINK((skb), NULL), 1)
+#define	PKTCFREE(osh, skb, send) \
+do { \
+	void *nskb; \
+	ASSERT((skb) != NULL); \
+	FOREACH_CHAINED_PKT((skb), nskb) { \
+		PKTCLRCHAINED((osh), (skb)); \
+		PKTCCLRFLAGS((skb)); \
+		PKTFREE((osh), (skb), (send)); \
+	} \
+} while (0)
+#define PKTCENQTAIL(h, t, p) \
+do { \
+	if ((t) == NULL) { \
+		(h) = (t) = (p); \
+	} else { \
+		PKTSETCLINK((t), (p)); \
+		(t) = (p); \
+	} \
+} while (0)
+#endif /* PKTC */
+
+#else /* ! BCMDRIVER */
+
+
+/* ASSERT */
+	#define ASSERT(exp)	do {} while (0)
+
+/* MALLOC and MFREE */
+#define MALLOC(o, l) malloc(l)
+#define MFREE(o, p, l) free(p)
+#include <stdlib.h>
+
+/* str* and mem* functions */
+#include <string.h>
+
+/* *printf functions */
+#include <stdio.h>
+
+/* bcopy, bcmp, and bzero */
+extern void bcopy(const void *src, void *dst, size_t len);
+extern int bcmp(const void *b1, const void *b2, size_t len);
+extern void bzero(void *b, size_t len);
+#endif /* ! BCMDRIVER */
+
+#endif	/* _linux_osl_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/linuxver.h b/drivers/staging/edison-bcm43340/include/linuxver.h
new file mode 100644
index 0000000..4952127
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/linuxver.h
@@ -0,0 +1,750 @@
+/*
+ * Linux-specific abstractions to gain some independence from linux kernel versions.
+ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linuxver.h 431983 2013-10-25 06:53:27Z $
+ */
+
+#ifndef _linuxver_h_
+#define _linuxver_h_
+
+#include <typedefs.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#include <linux/config.h>
+#else
+/*
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+*/
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+#include <linux/module.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+#include <linux/kconfig.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
+#ifdef __UNDEF_NO_VERSION__
+#undef __NO_VERSION__
+#else
+#define __NO_VERSION__
+#endif
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
+#define module_param(_name_, _type_, _perm_)	MODULE_PARM(_name_, "i")
+#define module_param_string(_name_, _string_, _size_, _perm_) \
+		MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
+#endif
+
+/* linux/malloc.h is deprecated, use linux/slab.h instead. */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/semaphore.h>
+#else
+#include <asm/semaphore.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#undef IP_TOS
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
+#include <asm/io.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#ifndef work_struct
+#define work_struct tq_struct
+#endif
+#ifndef INIT_WORK
+#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
+#endif
+#ifndef schedule_work
+#define schedule_work(_work) schedule_task((_work))
+#endif
+#ifndef flush_scheduled_work
+#define flush_scheduled_work() flush_scheduled_tasks()
+#endif
+#endif	/* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a)	do { \
+		allow_signal(SIGKILL);	\
+		allow_signal(SIGTERM);	\
+	} while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func)
+#else
+#define	MY_INIT_WORK(_work, _func)	INIT_WORK(_work, _func, _work)
+#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
+	(RHEL_MAJOR == 5))
+/* Exclude RHEL 5 */
+typedef void (*work_func_t)(void *work);
+#endif
+#endif	/* >= 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* Some distributions have their own 2.6.x compatibility layers */
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+#else
+typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
+#define IRQF_SHARED	SA_SHIRQ
+#endif /* < 2.6.18 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
+#ifdef	CONFIG_NET_RADIO
+#define	CONFIG_WIRELESS_EXT
+#endif
+#endif	/* < 2.6.17 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
+#include <linux/sched.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+#include <linux/sched/rt.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <net/lib80211.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
+#include <linux/ieee80211.h>
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+#include <net/ieee80211.h>
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
+
+
+#ifndef __exit
+#define __exit
+#endif
+#ifndef __devexit
+#define __devexit
+#endif
+#ifndef __devinit
+#  if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+#    define __devinit	__init
+#  else
+/* All devices are hotpluggable since linux 3.8.0 */
+#    define __devinit
+#  endif
+#endif /* !__devinit */
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+#ifndef __devexit_p
+#define __devexit_p(x)	x
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
+
+#define pci_get_drvdata(dev)		(dev)->sysdata
+#define pci_set_drvdata(dev, value)	(dev)->sysdata = (value)
+
+/*
+ * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
+ */
+
+struct pci_device_id {
+	unsigned int vendor, device;		/* Vendor and device ID or PCI_ANY_ID */
+	unsigned int subvendor, subdevice;	/* Subsystem ID's or PCI_ANY_ID */
+	unsigned int class, class_mask;		/* (class,subclass,prog-if) triplet */
+	unsigned long driver_data;		/* Data private to the driver */
+};
+
+struct pci_driver {
+	struct list_head node;
+	char *name;
+	const struct pci_device_id *id_table;	/* NULL if wants all devices */
+	int (*probe)(struct pci_dev *dev,
+	             const struct pci_device_id *id); /* New device inserted */
+	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug
+						 * capable driver)
+						 */
+	void (*suspend)(struct pci_dev *dev);	/* Device suspended */
+	void (*resume)(struct pci_dev *dev);	/* Device woken up */
+};
+
+#define MODULE_DEVICE_TABLE(type, name)
+#define PCI_ANY_ID (~0)
+
+/* compatpci.c */
+#define pci_module_init pci_register_driver
+extern int pci_register_driver(struct pci_driver *drv);
+extern void pci_unregister_driver(struct pci_driver *drv);
+
+#endif /* PCI registration */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
+#define pci_module_init pci_register_driver
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
+#ifdef MODULE
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#else
+#define module_init(x)	__initcall(x);
+#define module_exit(x)	__exitcall(x);
+#endif
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
+#define WL_USE_NETDEV_OPS
+#else
+#undef WL_USE_NETDEV_OPS
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
+#define WL_CONFIG_RFKILL
+#else
+#undef WL_CONFIG_RFKILL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
+#define list_for_each(pos, head) \
+	for (pos = (head)->next; pos != (head); pos = pos->next)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
+#define pci_resource_start(dev, bar)	((dev)->base_address[(bar)])
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
+#define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
+#define pci_enable_device(dev) do { } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
+#define net_device device
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
+
+/*
+ * DMA mapping
+ *
+ * See linux/Documentation/DMA-mapping.txt
+ */
+
+#ifndef PCI_DMA_TODEVICE
+#define	PCI_DMA_TODEVICE	1
+#define	PCI_DMA_FROMDEVICE	2
+#endif
+
+typedef u32 dma_addr_t;
+
+/* Pure 2^n version of get_order */
+static inline int get_order(unsigned long size)
+{
+	int order;
+
+	size = (size-1) >> (PAGE_SHIFT-1);
+	order = -1;
+	do {
+		size >>= 1;
+		order++;
+	} while (size);
+	return order;
+}
+
+static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+                                         dma_addr_t *dma_handle)
+{
+	void *ret;
+	int gfp = GFP_ATOMIC | GFP_DMA;
+
+	ret = (void *)__get_free_pages(gfp, get_order(size));
+
+	if (ret != NULL) {
+		memset(ret, 0, size);
+		*dma_handle = virt_to_bus(ret);
+	}
+	return ret;
+}
+static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+                                       void *vaddr, dma_addr_t dma_handle)
+{
+	free_pages((unsigned long)vaddr, get_order(size));
+}
+#define pci_map_single(cookie, address, size, dir)	virt_to_bus(address)
+#define pci_unmap_single(cookie, address, size, dir)
+
+#endif /* DMA mapping */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
+
+#define dev_kfree_skb_any(a)		dev_kfree_skb(a)
+#define netif_down(dev)			do { (dev)->start = 0; } while (0)
+
+/* pcmcia-cs provides its own netdevice compatibility layer */
+#ifndef _COMPAT_NETDEVICE_H
+
+/*
+ * SoftNet
+ *
+ * For pre-softnet kernels we need to tell the upper layer not to
+ * re-enter start_xmit() while we are in there. However softnet
+ * guarantees not to enter while we are in there so there is no need
+ * to do the netif_stop_queue() dance unless the transmit queue really
+ * gets stuck. This should also improve performance according to tests
+ * done by Aman Singla.
+ */
+
+#define dev_kfree_skb_irq(a)	dev_kfree_skb(a)
+#define netif_wake_queue(dev) \
+		do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_stop_queue(dev)	set_bit(0, &(dev)->tbusy)
+
+static inline void netif_start_queue(struct net_device *dev)
+{
+	dev->tbusy = 0;
+	dev->interrupt = 0;
+	dev->start = 1;
+}
+
+#define netif_queue_stopped(dev)	(dev)->tbusy
+#define netif_running(dev)		(dev)->start
+
+#endif /* _COMPAT_NETDEVICE_H */
+
+#define netif_device_attach(dev)	netif_start_queue(dev)
+#define netif_device_detach(dev)	netif_stop_queue(dev)
+
+/* 2.4.x renamed bottom halves to tasklets */
+#define tasklet_struct				tq_struct
+static inline void tasklet_schedule(struct tasklet_struct *tasklet)
+{
+	queue_task(tasklet, &tq_immediate);
+	mark_bh(IMMEDIATE_BH);
+}
+
+static inline void tasklet_init(struct tasklet_struct *tasklet,
+                                void (*func)(unsigned long),
+                                unsigned long data)
+{
+	tasklet->next = NULL;
+	tasklet->sync = 0;
+	tasklet->routine = (void (*)(void *))func;
+	tasklet->data = (void *)data;
+}
+#define tasklet_kill(tasklet)	{ do {} while (0); }
+
+/* 2.4.x introduced del_timer_sync() */
+#define del_timer_sync(timer) del_timer(timer)
+
+#else
+
+#define netif_down(dev)
+
+#endif /* SoftNet */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
+
+/*
+ * Emit code to initialise a tq_struct's routine and data pointers
+ */
+#define PREPARE_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		(_tq)->routine = _routine;			\
+		(_tq)->data = _data;				\
+	} while (0)
+
+/*
+ * Emit code to initialise all of a tq_struct
+ */
+#define INIT_TQUEUE(_tq, _routine, _data)			\
+	do {							\
+		INIT_LIST_HEAD(&(_tq)->list);			\
+		(_tq)->sync = 0;				\
+		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
+	} while (0)
+
+#endif	/* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
+
+/* Power management related macro & routines */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a)
+#else
+#define	PCI_SAVE_STATE(a, b)	pci_save_state(a, b)
+#define	PCI_RESTORE_STATE(a, b)	pci_restore_state(a, b)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
+static inline int
+pci_save_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_read_config_dword(dev, i * 4, &buffer[i]);
+	}
+	return 0;
+}
+
+static inline int
+pci_restore_state(struct pci_dev *dev, u32 *buffer)
+{
+	int i;
+
+	if (buffer) {
+		for (i = 0; i < 16; i++)
+			pci_write_config_dword(dev, i * 4, buffer[i]);
+	}
+	/*
+	 * otherwise, write the context information we know from bootup.
+	 * This works around a problem where warm-booting from Windows
+	 * combined with a D3(hot)->D0 transition causes PCI config
+	 * header data to be forgotten.
+	 */
+	else {
+		for (i = 0; i < 6; i ++)
+			pci_write_config_dword(dev,
+			                       PCI_BASE_ADDRESS_0 + (i * 4),
+			                       pci_resource_start(dev, i));
+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+	}
+	return 0;
+}
+#endif /* PCI power management */
+
+/* Old cp0 access macros deprecated in 2.4.19 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
+#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
+#endif
+
+/* Module refcount handled internally in 2.6.x */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#else
+#define OLD_MOD_INC_USE_COUNT		do {} while (0)
+#define OLD_MOD_DEC_USE_COUNT		do {} while (0)
+#endif
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+#ifndef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev)		do {} while (0)
+#endif
+#ifndef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT			do {} while (0)
+#endif
+#ifndef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT			do {} while (0)
+#endif
+#define OLD_MOD_INC_USE_COUNT		MOD_INC_USE_COUNT
+#define OLD_MOD_DEC_USE_COUNT		MOD_DEC_USE_COUNT
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)	do {} while (0)
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(dev)		kfree(dev)
+#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/* struct packet_type redefined in 2.6.x */
+#define af_packet_priv			data
+#endif
+
+/* suspend args */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
+#define DRV_SUSPEND_STATE_TYPE pm_message_t
+#else
+#define DRV_SUSPEND_STATE_TYPE uint32
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
+#define CHECKSUM_HW	CHECKSUM_PARTIAL
+#endif
+
+typedef struct {
+	void	*parent;  /* some external entity that the thread supposed to work for */
+	char	*proc_name;
+	struct	task_struct *p_task;
+	long	thr_pid;
+	int		prio; /* priority */
+	struct	semaphore sema;
+	int	terminated;
+	struct	completion completed;
+	spinlock_t	spinlock;
+	int		up_cnt;
+} tsk_ctl_t;
+
+
+/* requires  tsk_ctl_t tsk  argument, the caller's priv data is passed in owner ptr */
+/* note this macro assumes there may be only one context waiting on thread's completion */
+#ifdef DHD_DEBUG
+#define DBG_THR(x) printk x
+#else
+#define DBG_THR(x)
+#endif
+
+static inline bool binary_sema_down(tsk_ctl_t *tsk)
+{
+	if (down_interruptible(&tsk->sema) == 0) {
+		unsigned long flags = 0;
+		spin_lock_irqsave(&tsk->spinlock, flags);
+		if (tsk->up_cnt == 1)
+			tsk->up_cnt--;
+		else {
+			DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
+		}
+		spin_unlock_irqrestore(&tsk->spinlock, flags);
+		return false;
+	} else
+		return true;
+}
+
+static inline bool binary_sema_up(tsk_ctl_t *tsk)
+{
+	bool sem_up = false;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&tsk->spinlock, flags);
+	if (tsk->up_cnt == 0) {
+		tsk->up_cnt++;
+		sem_up = true;
+	} else if (tsk->up_cnt == 1) {
+		/* dhd_sched_dpc: dpc is alread up! */
+	} else
+		DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
+
+	spin_unlock_irqrestore(&tsk->spinlock, flags);
+
+	if (sem_up)
+		up(&tsk->sema);
+
+	return sem_up;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
+#else
+#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
+#endif
+
+#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
+{ \
+	sema_init(&((tsk_ctl)->sema), 0); \
+	init_completion(&((tsk_ctl)->completed)); \
+	(tsk_ctl)->parent = owner; \
+	(tsk_ctl)->proc_name = name;  \
+	(tsk_ctl)->terminated = FALSE; \
+	(tsk_ctl)->p_task  = kthread_run(thread_func, tsk_ctl, (char*)name); \
+	(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
+	spin_lock_init(&((tsk_ctl)->spinlock)); \
+	DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
+		(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+}
+
+#define PROC_STOP(tsk_ctl) \
+{ \
+	(tsk_ctl)->terminated = TRUE; \
+	smp_wmb(); \
+	up(&((tsk_ctl)->sema));	\
+	wait_for_completion(&((tsk_ctl)->completed)); \
+	DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+			 (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+	(tsk_ctl)->thr_pid = -1; \
+}
+
+/*  ----------------------- */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+#define KILL_PROC(nr, sig) \
+{ \
+struct task_struct *tsk; \
+struct pid *pid;    \
+pid = find_get_pid((pid_t)nr);    \
+tsk = pid_task(pid, PIDTYPE_PID);    \
+if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(2, 6, 30))
+#define KILL_PROC(pid, sig) \
+{ \
+	struct task_struct *tsk; \
+	tsk = find_task_by_vpid(pid); \
+	if (tsk) send_sig(sig, tsk, 1); \
+}
+#else
+#define KILL_PROC(pid, sig) \
+{ \
+	kill_proc(pid, sig, 1); \
+}
+#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#include <linux/time.h>
+#include <linux/wait.h>
+#else
+#include <linux/sched.h>
+
+#define __wait_event_interruptible_timeout(wq, condition, ret)		\
+do {									\
+	wait_queue_t __wait;						\
+	init_waitqueue_entry(&__wait, current);				\
+									\
+	add_wait_queue(&wq, &__wait);					\
+	for (;;) {							\
+		set_current_state(TASK_INTERRUPTIBLE);			\
+		if (condition)						\
+			break;						\
+		if (!signal_pending(current)) {				\
+			ret = schedule_timeout(ret);			\
+			if (!ret)					\
+				break;					\
+			continue;					\
+		}							\
+		ret = -ERESTARTSYS;					\
+		break;							\
+	}								\
+	current->state = TASK_RUNNING;					\
+	remove_wait_queue(&wq, &__wait);				\
+} while (0)
+
+#define wait_event_interruptible_timeout(wq, condition, timeout)	\
+({									\
+	long __ret = timeout;						\
+	if (!(condition))						\
+		__wait_event_interruptible_timeout(wq, condition, __ret); \
+	__ret;								\
+})
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+/*
+For < 2.6.24, wl creates its own netdev but doesn't
+align the priv area like the genuine alloc_netdev().
+Since netdev_priv() always gives us the aligned address, it will
+not match our unaligned address for < 2.6.24
+*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define DEV_PRIV(dev)	(dev->priv)
+#else
+#define DEV_PRIV(dev)	netdev_priv(dev)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+#define WL_ISR(i, d, p)         wl_isr((i), (d))
+#else
+#define WL_ISR(i, d, p)         wl_isr((i), (d), (p))
+#endif  /* < 2.6.20 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#define netdev_priv(dev) dev->priv
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+#define CAN_SLEEP()	((!in_atomic() && !irqs_disabled()))
+#else
+#define CAN_SLEEP()	(FALSE)
+#endif
+
+#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define RANDOM32	prandom_u32
+#else
+#define RANDOM32	random32
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define SRANDOM32(entropy)	prandom_seed(entropy)
+#else
+#define SRANDOM32(entropy)	srandom32(entropy)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+/*
+ * Overide latest kfifo functions with
+ * older version to work on older kernels
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+#define kfifo_in_spinlocked(a, b, c, d)		kfifo_put(a, (u8 *)b, c)
+#define kfifo_out_spinlocked(a, b, c, d)	kfifo_get(a, (u8 *)b, c)
+#define kfifo_esize(a)				1
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
+	(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) &&	!defined(WL_COMPAT_WIRELESS)
+#define kfifo_in_spinlocked(a, b, c, d)		kfifo_in_locked(a, b, c, d)
+#define kfifo_out_spinlocked(a, b, c, d)	kfifo_out_locked(a, b, c, d)
+#define kfifo_esize(a)				1
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+
+#endif /* _linuxver_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/miniopt.h b/drivers/staging/edison-bcm43340/include/miniopt.h
new file mode 100644
index 0000000..73212a8
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/miniopt.h
@@ -0,0 +1,77 @@
+/*
+ * Command line options parser.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: miniopt.h 241182 2011-02-17 21:50:03Z $
+ */
+
+
+#ifndef MINI_OPT_H
+#define MINI_OPT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Include Files ---------------------------------------------------- */
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define MINIOPT_MAXKEY	128	/* Max options */
+typedef struct miniopt {
+
+	/* These are persistent after miniopt_init() */
+	const char* name;		/* name for prompt in error strings */
+	const char* flags;		/* option chars that take no args */
+	bool longflags;		/* long options may be flags */
+	bool opt_end;		/* at end of options (passed a "--") */
+
+	/* These are per-call to miniopt() */
+
+	int consumed;		/* number of argv entries cosumed in
+				 * the most recent call to miniopt()
+				 */
+	bool positional;
+	bool good_int;		/* 'val' member is the result of a sucessful
+				 * strtol conversion of the option value
+				 */
+	char opt;
+	char key[MINIOPT_MAXKEY];
+	char* valstr;		/* positional param, or value for the option,
+				 * or null if the option had
+				 * no accompanying value
+				 */
+	uint uval;		/* strtol translation of valstr */
+	int  val;		/* strtol translation of valstr */
+} miniopt_t;
+
+void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
+int miniopt(miniopt_t *t, char **argv);
+
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+#ifdef __cplusplus
+	}
+#endif
+
+#endif  /* MINI_OPT_H  */
diff --git a/drivers/staging/edison-bcm43340/include/msgtrace.h b/drivers/staging/edison-bcm43340/include/msgtrace.h
new file mode 100644
index 0000000..c01676f
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/msgtrace.h
@@ -0,0 +1,77 @@
+/*
+ * Trace messages sent over HBUS
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: msgtrace.h 369735 2012-11-19 22:50:22Z $
+ */
+
+#ifndef	_MSGTRACE_H
+#define	_MSGTRACE_H
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+#define MSGTRACE_VERSION 1
+
+/* Message trace header */
+typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr {
+	uint8	version;
+	uint8   trace_type;
+#define MSGTRACE_HDR_TYPE_MSG 0
+#define MSGTRACE_HDR_TYPE_LOG 1
+	uint16	len;	/* Len of the trace */
+	uint32	seqnum;	/* Sequence number of message. Useful if the messsage has been lost
+			 * because of DMA error or a bus reset (ex: SDIO Func2)
+			 */
+	/* Msgtrace type  only */
+	uint32  discarded_bytes;  /* Number of discarded bytes because of trace overflow  */
+	uint32  discarded_printf; /* Number of discarded printf because of trace overflow */
+} BWL_POST_PACKED_STRUCT msgtrace_hdr_t;
+
+#define MSGTRACE_HDRLEN 	sizeof(msgtrace_hdr_t)
+
+/* The hbus driver generates traces when sending a trace message. This causes endless traces.
+ * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put.
+ * This prevents endless traces but generates hasardous lost of traces only in bus device code.
+ * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing
+ * hbus error traces. hbus error trace should not generates endless traces.
+ */
+extern bool msgtrace_hbus_trace;
+
+typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr,
+                                     uint16 hdrlen, uint8 *buf, uint16 buflen);
+extern void msgtrace_start(void);
+extern void msgtrace_stop(void);
+extern int msgtrace_sent(void);
+extern void msgtrace_put(char *buf, int count);
+extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send);
+extern bool msgtrace_event_enabled(void);
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif	/* _MSGTRACE_H */
diff --git a/drivers/staging/edison-bcm43340/include/osl.h b/drivers/staging/edison-bcm43340/include/osl.h
new file mode 100644
index 0000000..cdfb107
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/osl.h
@@ -0,0 +1,142 @@
+/*
+ * OS Abstraction Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: osl.h 424562 2013-09-18 10:57:30Z $
+ */
+
+#ifndef _osl_h_
+#define _osl_h_
+
+/* osl handle type forward declaration */
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#define OSL_PKTTAG_SZ	32 /* Size of PktTag */
+
+/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
+
+/* Drivers use REGOPSSET() to register register read/write funcitons */
+typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size);
+typedef void  (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size);
+
+
+#include <linux_osl.h>
+
+#ifndef PKTDBG_TRACE
+#define PKTDBG_TRACE(osh, pkt, bit)	BCM_REFERENCE(osh)
+#endif
+
+#define PKTCTFMAP(osh, p)		BCM_REFERENCE(osh)
+
+/* --------------------------------------------------------------------------
+** Register manipulation macros.
+*/
+
+#define	SET_REG(osh, r, mask, val)	W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
+#ifndef AND_REG
+#define AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
+#endif   /* !AND_REG */
+
+#ifndef OR_REG
+#define OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
+#endif   /* !OR_REG */
+
+#if !defined(OSL_SYSUPTIME)
+#define OSL_SYSUPTIME() (0)
+#define OSL_SYSUPTIME_SUPPORT FALSE
+#else
+#define OSL_SYSUPTIME_SUPPORT TRUE
+#endif /* OSL_SYSUPTIME */
+
+#if !defined(PKTC) && !defined(PKTC_DONGLE)
+#define	PKTCGETATTR(skb)	(0)
+#define	PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb)
+#define	PKTCCLRATTR(skb)	BCM_REFERENCE(skb)
+#define	PKTCCNT(skb)		(1)
+#define	PKTCLEN(skb)		PKTLEN(NULL, skb)
+#define	PKTCGETFLAGS(skb)	(0)
+#define	PKTCSETFLAGS(skb, f)	BCM_REFERENCE(skb)
+#define	PKTCCLRFLAGS(skb)	BCM_REFERENCE(skb)
+#define	PKTCFLAGS(skb)		(0)
+#define	PKTCSETCNT(skb, c)	BCM_REFERENCE(skb)
+#define	PKTCINCRCNT(skb)	BCM_REFERENCE(skb)
+#define	PKTCADDCNT(skb, c)	BCM_REFERENCE(skb)
+#define	PKTCSETLEN(skb, l)	BCM_REFERENCE(skb)
+#define	PKTCADDLEN(skb, l)	BCM_REFERENCE(skb)
+#define	PKTCSETFLAG(skb, fb)	BCM_REFERENCE(skb)
+#define	PKTCCLRFLAG(skb, fb)	BCM_REFERENCE(skb)
+#define	PKTCLINK(skb)		NULL
+#define	PKTSETCLINK(skb, x)	BCM_REFERENCE(skb)
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+	for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb))
+#define	PKTCFREE		PKTFREE
+#define PKTCENQTAIL(h, t, p) \
+do { \
+	if ((t) == NULL) { \
+		(h) = (t) = (p); \
+	} \
+} while (0)
+#endif /* !linux || !PKTC */
+
+#if !defined(HNDCTF) && !defined(PKTC_TX_DONGLE)
+#define PKTSETCHAINED(osh, skb)		BCM_REFERENCE(osh)
+#define PKTCLRCHAINED(osh, skb)		BCM_REFERENCE(osh)
+#define PKTISCHAINED(skb)		FALSE
+#endif
+
+/* Lbuf with fraglist */
+#define PKTFRAGPKTID(osh, lb)		(0)
+#define PKTSETFRAGPKTID(osh, lb, id)	BCM_REFERENCE(osh)
+#define PKTFRAGTOTNUM(osh, lb)		(0)
+#define PKTSETFRAGTOTNUM(osh, lb, tot)	BCM_REFERENCE(osh)
+#define PKTFRAGTOTLEN(osh, lb)		(0)
+#define PKTSETFRAGTOTLEN(osh, lb, len)	BCM_REFERENCE(osh)
+#define PKTFRAGIFINDEX(osh, lb)		(0)
+#define PKTSETFRAGIFINDEX(osh, lb, idx)	BCM_REFERENCE(osh)
+
+/* in rx path, reuse totlen as used len */
+#define PKTFRAGUSEDLEN(osh, lb)			(0)
+#define PKTSETFRAGUSEDLEN(osh, lb, len)		BCM_REFERENCE(osh)
+
+#define PKTFRAGLEN(osh, lb, ix)			(0)
+#define PKTSETFRAGLEN(osh, lb, ix, len)		BCM_REFERENCE(osh)
+#define PKTFRAGDATA_LO(osh, lb, ix)		(0)
+#define PKTSETFRAGDATA_LO(osh, lb, ix, addr)	BCM_REFERENCE(osh)
+#define PKTFRAGDATA_HI(osh, lb, ix)		(0)
+#define PKTSETFRAGDATA_HI(osh, lb, ix, addr)	BCM_REFERENCE(osh)
+
+/* RX FRAG */
+#define PKTISRXFRAG(osh, lb)    	(0)
+#define PKTSETRXFRAG(osh, lb)		BCM_REFERENCE(osh)
+#define PKTRESETRXFRAG(osh, lb)		BCM_REFERENCE(osh)
+
+/* TX FRAG */
+#define PKTISTXFRAG(osh, lb)       	(0)
+#define PKTSETTXFRAG(osh, lb)		BCM_REFERENCE(osh)
+
+#define PKTISFRAG(osh, lb)		(0)
+#define PKTFRAGISCHAINED(osh, i)	(0)
+
+#endif	/* _osl_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/packed_section_end.h b/drivers/staging/edison-bcm43340/include/packed_section_end.h
new file mode 100644
index 0000000..a7133c2
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/packed_section_end.h
@@ -0,0 +1,59 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_end.h 241182 2011-02-17 21:50:03Z $
+ */
+
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is NOT defined at this
+ * point, then there is a missing include of packed_section_start.h.
+ */
+#ifdef BWL_PACKED_SECTION
+	#undef BWL_PACKED_SECTION
+#else
+	#error "BWL_PACKED_SECTION is NOT defined!"
+#endif
+
+
+
+
+/* Compiler-specific directives for structure packing are declared in
+ * packed_section_start.h. This marks the end of the structure packing section,
+ * so, undef them here.
+ */
+#undef	BWL_PRE_PACKED_STRUCT
+#undef	BWL_POST_PACKED_STRUCT
diff --git a/drivers/staging/edison-bcm43340/include/packed_section_start.h b/drivers/staging/edison-bcm43340/include/packed_section_start.h
new file mode 100644
index 0000000..ed5045c
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/packed_section_start.h
@@ -0,0 +1,63 @@
+/*
+ * Declare directives for structure packing. No padding will be provided
+ * between the members of packed structures, and therefore, there is no
+ * guarantee that structure members will be aligned.
+ *
+ * Declaring packed structures is compiler specific. In order to handle all
+ * cases, packed structures should be delared as:
+ *
+ * #include <packed_section_start.h>
+ *
+ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t {
+ *    some_struct_members;
+ * } BWL_POST_PACKED_STRUCT foobar_t;
+ *
+ * #include <packed_section_end.h>
+ *
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: packed_section_start.h 286783 2011-09-29 06:18:57Z $
+ */
+
+
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is already defined at this
+ * point, then there is a missing include of packed_section_end.h.
+ */
+#ifdef BWL_PACKED_SECTION
+	#error "BWL_PACKED_SECTION is already defined!"
+#else
+	#define BWL_PACKED_SECTION
+#endif
+
+
+
+
+/* Declare compiler-specific directives for structure packing. */
+#if defined(__GNUC__) || defined(__lint)
+	#define	BWL_PRE_PACKED_STRUCT
+	#define	BWL_POST_PACKED_STRUCT	__attribute__ ((packed))
+#elif defined(__CC_ARM)
+	#define	BWL_PRE_PACKED_STRUCT	__packed
+	#define	BWL_POST_PACKED_STRUCT
+#else
+	#error "Unknown compiler!"
+#endif
diff --git a/drivers/staging/edison-bcm43340/include/pcicfg.h b/drivers/staging/edison-bcm43340/include/pcicfg.h
new file mode 100644
index 0000000..2d28dde
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/pcicfg.h
@@ -0,0 +1,101 @@
+/*
+ * pcicfg.h: PCI configuration constants and structures.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: pcicfg.h 413666 2013-07-20 01:16:40Z $
+ */
+
+#ifndef	_h_pcicfg_
+#define	_h_pcicfg_
+
+/* A structure for the config registers is nice, but in most
+ * systems the config space is not memory mapped, so we need
+ * field offsetts. :-(
+ */
+#define	PCI_CFG_VID		0
+#define	PCI_CFG_DID		2
+#define	PCI_CFG_CMD		4
+#define	PCI_CFG_STAT		6
+#define	PCI_CFG_REV		8
+#define	PCI_CFG_PROGIF		9
+#define	PCI_CFG_SUBCL		0xa
+#define	PCI_CFG_BASECL		0xb
+#define	PCI_CFG_CLSZ		0xc
+#define	PCI_CFG_LATTIM		0xd
+#define	PCI_CFG_HDR		0xe
+#define	PCI_CFG_BIST		0xf
+#define	PCI_CFG_BAR0		0x10
+#define	PCI_CFG_BAR1		0x14
+#define	PCI_CFG_BAR2		0x18
+#define	PCI_CFG_BAR3		0x1c
+#define	PCI_CFG_BAR4		0x20
+#define	PCI_CFG_BAR5		0x24
+#define	PCI_CFG_CIS		0x28
+#define	PCI_CFG_SVID		0x2c
+#define	PCI_CFG_SSID		0x2e
+#define	PCI_CFG_ROMBAR		0x30
+#define PCI_CFG_CAPPTR		0x34
+#define	PCI_CFG_INT		0x3c
+#define	PCI_CFG_PIN		0x3d
+#define	PCI_CFG_MINGNT		0x3e
+#define	PCI_CFG_MAXLAT		0x3f
+#define	PCI_CFG_DEVCTRL		0xd8
+#define	PCI_BAR0_WIN		0x80	/* backplane addres space accessed by BAR0 */
+#define	PCI_BAR1_WIN		0x84	/* backplane addres space accessed by BAR1 */
+#define	PCI_SPROM_CONTROL	0x88	/* sprom property control */
+#define	PCI_BAR1_CONTROL	0x8c	/* BAR1 region burst control */
+#define	PCI_INT_STATUS		0x90	/* PCI and other cores interrupts */
+#define	PCI_INT_MASK		0x94	/* mask of PCI and other cores interrupts */
+#define PCI_TO_SB_MB		0x98	/* signal backplane interrupts */
+#define PCI_BACKPLANE_ADDR	0xa0	/* address an arbitrary location on the system backplane */
+#define PCI_BACKPLANE_DATA	0xa4	/* data at the location specified by above address */
+#define	PCI_CLK_CTL_ST		0xa8	/* pci config space clock control/status (>=rev14) */
+#define	PCI_BAR0_WIN2		0xac	/* backplane addres space accessed by second 4KB of BAR0 */
+#define	PCI_GPIO_IN		0xb0	/* pci config space gpio input (>=rev3) */
+#define	PCI_GPIO_OUT		0xb4	/* pci config space gpio output (>=rev3) */
+#define	PCI_GPIO_OUTEN		0xb8	/* pci config space gpio output enable (>=rev3) */
+
+#define	PCI_BAR0_SHADOW_OFFSET	(2 * 1024)	/* bar0 + 2K accesses sprom shadow (in pci core) */
+#define	PCI_BAR0_SPROM_OFFSET	(4 * 1024)	/* bar0 + 4K accesses external sprom */
+#define	PCI_BAR0_PCIREGS_OFFSET	(6 * 1024)	/* bar0 + 6K accesses pci core registers */
+#define	PCI_BAR0_PCISBR_OFFSET	(4 * 1024)	/* pci core SB registers are at the end of the
+						 * 8KB window, so their address is the "regular"
+						 * address plus 4K
+						 */
+/*
+ * PCIE GEN2 changed some of the above locations for
+ * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase
+ * BAR0 maps 32K of register space
+*/
+#define PCIE2_BAR0_WIN2		0x70 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN	0x74 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN2	0x78 /* backplane addres space accessed by second 4KB of BAR0 */
+
+#define PCI_BAR0_WINSZ		(16 * 1024)	/* bar0 window size Match with corerev 13 */
+/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
+#define	PCI_16KB0_PCIREGS_OFFSET (8 * 1024)	/* bar0 + 8K accesses pci/pcie core registers */
+#define	PCI_16KB0_CCREGS_OFFSET	(12 * 1024)	/* bar0 + 12K accesses chipc core registers */
+#define PCI_16KBB0_WINSZ	(16 * 1024)	/* bar0 window size */
+
+
+#define PCI_CONFIG_SPACE_SIZE	256
+#endif	/* _h_pcicfg_ */
diff --git a/drivers/staging/edison-bcm43340/include/sbchipc.h b/drivers/staging/edison-bcm43340/include/sbchipc.h
new file mode 100644
index 0000000..4fe5763
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sbchipc.h
@@ -0,0 +1,3303 @@
+/*
+ * SiliconBackplane Chipcommon core hardware definitions.
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
+ * GPIO interface, extbus, and support for serial and parallel flashes.
+ *
+ * $Id: sbchipc.h 468568 2014-04-08 05:33:12Z $
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ */
+
+#ifndef	_SBCHIPC_H
+#define	_SBCHIPC_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+typedef struct eci_prerev35 {
+	uint32	eci_output;
+	uint32	eci_control;
+	uint32	eci_inputlo;
+	uint32	eci_inputmi;
+	uint32	eci_inputhi;
+	uint32	eci_inputintpolaritylo;
+	uint32	eci_inputintpolaritymi;
+	uint32	eci_inputintpolarityhi;
+	uint32	eci_intmasklo;
+	uint32	eci_intmaskmi;
+	uint32	eci_intmaskhi;
+	uint32	eci_eventlo;
+	uint32	eci_eventmi;
+	uint32	eci_eventhi;
+	uint32	eci_eventmasklo;
+	uint32	eci_eventmaskmi;
+	uint32	eci_eventmaskhi;
+	uint32	PAD[3];
+} eci_prerev35_t;
+
+typedef struct eci_rev35 {
+	uint32	eci_outputlo;
+	uint32	eci_outputhi;
+	uint32	eci_controllo;
+	uint32	eci_controlhi;
+	uint32	eci_inputlo;
+	uint32	eci_inputhi;
+	uint32	eci_inputintpolaritylo;
+	uint32	eci_inputintpolarityhi;
+	uint32	eci_intmasklo;
+	uint32	eci_intmaskhi;
+	uint32	eci_eventlo;
+	uint32	eci_eventhi;
+	uint32	eci_eventmasklo;
+	uint32	eci_eventmaskhi;
+	uint32	eci_auxtx;
+	uint32	eci_auxrx;
+	uint32	eci_datatag;
+	uint32	eci_uartescvalue;
+	uint32	eci_autobaudctr;
+	uint32	eci_uartfifolevel;
+} eci_rev35_t;
+
+typedef struct flash_config {
+	uint32	PAD[19];
+	/* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */
+	uint32 flashstrconfig;
+} flash_config_t;
+
+typedef volatile struct {
+	uint32	chipid;			/* 0x0 */
+	uint32	capabilities;
+	uint32	corecontrol;		/* corerev >= 1 */
+	uint32	bist;
+
+	/* OTP */
+	uint32	otpstatus;		/* 0x10, corerev >= 10 */
+	uint32	otpcontrol;
+	uint32	otpprog;
+	uint32	otplayout;		/* corerev >= 23 */
+
+	/* Interrupt control */
+	uint32	intstatus;		/* 0x20 */
+	uint32	intmask;
+
+	/* Chip specific regs */
+	uint32	chipcontrol;		/* 0x28, rev >= 11 */
+	uint32	chipstatus;		/* 0x2c, rev >= 11 */
+
+	/* Jtag Master */
+	uint32	jtagcmd;		/* 0x30, rev >= 10 */
+	uint32	jtagir;
+	uint32	jtagdr;
+	uint32	jtagctrl;
+
+	/* serial flash interface registers */
+	uint32	flashcontrol;		/* 0x40 */
+	uint32	flashaddress;
+	uint32	flashdata;
+	uint32	otplayoutextension;	/* rev >= 35 */
+
+	/* Silicon backplane configuration broadcast control */
+	uint32	broadcastaddress;	/* 0x50 */
+	uint32	broadcastdata;
+
+	/* gpio - cleared only by power-on-reset */
+	uint32	gpiopullup;		/* 0x58, corerev >= 20 */
+	uint32	gpiopulldown;		/* 0x5c, corerev >= 20 */
+	uint32	gpioin;			/* 0x60 */
+	uint32	gpioout;		/* 0x64 */
+	uint32	gpioouten;		/* 0x68 */
+	uint32	gpiocontrol;		/* 0x6C */
+	uint32	gpiointpolarity;	/* 0x70 */
+	uint32	gpiointmask;		/* 0x74 */
+
+	/* GPIO events corerev >= 11 */
+	uint32	gpioevent;
+	uint32	gpioeventintmask;
+
+	/* Watchdog timer */
+	uint32	watchdog;		/* 0x80 */
+
+	/* GPIO events corerev >= 11 */
+	uint32	gpioeventintpolarity;
+
+	/* GPIO based LED powersave registers corerev >= 16 */
+	uint32  gpiotimerval;		/* 0x88 */
+	uint32  gpiotimeroutmask;
+
+	/* clock control */
+	uint32	clockcontrol_n;		/* 0x90 */
+	uint32	clockcontrol_sb;	/* aka m0 */
+	uint32	clockcontrol_pci;	/* aka m1 */
+	uint32	clockcontrol_m2;	/* mii/uart/mipsref */
+	uint32	clockcontrol_m3;	/* cpu */
+	uint32	clkdiv;			/* corerev >= 3 */
+	uint32	gpiodebugsel;		/* corerev >= 28 */
+	uint32	capabilities_ext;               	/* 0xac  */
+
+	/* pll delay registers (corerev >= 4) */
+	uint32	pll_on_delay;		/* 0xb0 */
+	uint32	fref_sel_delay;
+	uint32	slow_clk_ctl;		/* 5 < corerev < 10 */
+	uint32	PAD;
+
+	/* Instaclock registers (corerev >= 10) */
+	uint32	system_clk_ctl;		/* 0xc0 */
+	uint32	clkstatestretch;
+	uint32	PAD[2];
+
+	/* Indirect backplane access (corerev >= 22) */
+	uint32	bp_addrlow;		/* 0xd0 */
+	uint32	bp_addrhigh;
+	uint32	bp_data;
+	uint32	PAD;
+	uint32	bp_indaccess;
+	/* SPI registers, corerev >= 37 */
+	uint32	gsioctrl;
+	uint32	gsioaddress;
+	uint32	gsiodata;
+
+	/* More clock dividers (corerev >= 32) */
+	uint32	clkdiv2;
+	/* FAB ID (corerev >= 40) */
+	uint32	otpcontrol1;
+	uint32	fabid;			/* 0xf8 */
+
+	/* In AI chips, pointer to erom */
+	uint32	eromptr;		/* 0xfc */
+
+	/* ExtBus control registers (corerev >= 3) */
+	uint32	pcmcia_config;		/* 0x100 */
+	uint32	pcmcia_memwait;
+	uint32	pcmcia_attrwait;
+	uint32	pcmcia_iowait;
+	uint32	ide_config;
+	uint32	ide_memwait;
+	uint32	ide_attrwait;
+	uint32	ide_iowait;
+	uint32	prog_config;
+	uint32	prog_waitcount;
+	uint32	flash_config;
+	uint32	flash_waitcount;
+	uint32  SECI_config;		/* 0x130 SECI configuration */
+	uint32	SECI_status;
+	uint32	SECI_statusmask;
+	uint32	SECI_rxnibchanged;
+
+	uint32	PAD[20];
+
+	/* SROM interface (corerev >= 32) */
+	uint32	sromcontrol;		/* 0x190 */
+	uint32	sromaddress;
+	uint32	sromdata;
+	uint32	PAD[1];				/* 0x19C */
+	/* NAND flash registers for BCM4706 (corerev = 31) */
+    uint32  nflashctrl;         /* 0x1a0 */
+    uint32  nflashconf;
+    uint32  nflashcoladdr;
+    uint32  nflashrowaddr;
+    uint32  nflashdata;
+    uint32  nflashwaitcnt0;		/* 0x1b4 */
+    uint32  PAD[2];
+
+	uint32  seci_uart_data;		/* 0x1C0 */
+	uint32  seci_uart_bauddiv;
+	uint32  seci_uart_fcr;
+	uint32  seci_uart_lcr;
+	uint32  seci_uart_mcr;
+	uint32  seci_uart_lsr;
+	uint32  seci_uart_msr;
+	uint32  seci_uart_baudadj;
+	/* Clock control and hardware workarounds (corerev >= 20) */
+	uint32	clk_ctl_st;		/* 0x1e0 */
+	uint32	hw_war;
+	uint32	PAD[70];
+
+	/* UARTs */
+	uint8	uart0data;		/* 0x300 */
+	uint8	uart0imr;
+	uint8	uart0fcr;
+	uint8	uart0lcr;
+	uint8	uart0mcr;
+	uint8	uart0lsr;
+	uint8	uart0msr;
+	uint8	uart0scratch;
+	uint8	PAD[248];		/* corerev >= 1 */
+
+	uint8	uart1data;		/* 0x400 */
+	uint8	uart1imr;
+	uint8	uart1fcr;
+	uint8	uart1lcr;
+	uint8	uart1mcr;
+	uint8	uart1lsr;
+	uint8	uart1msr;
+	uint8	uart1scratch;		/* 0x407 */
+	uint32	PAD[62];
+
+	/* save/restore, corerev >= 48 */
+	uint32	sr_capability;		/* 0x500 */
+	uint32	sr_control0;		/* 0x504 */
+	uint32	sr_control1;		/* 0x508 */
+	uint32  gpio_control;		/* 0x50C */
+	uint32	PAD[60];
+
+	/* PMU registers (corerev >= 20) */
+	/* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP.
+	 * The CPU must read them twice, compare, and retry if different.
+	 */
+	uint32	pmucontrol;		/* 0x600 */
+	uint32	pmucapabilities;
+	uint32	pmustatus;
+	uint32	res_state;
+	uint32	res_pending;
+	uint32	pmutimer;
+	uint32	min_res_mask;
+	uint32	max_res_mask;
+	uint32	res_table_sel;
+	uint32	res_dep_mask;
+	uint32	res_updn_timer;
+	uint32	res_timer;
+	uint32	clkstretch;
+	uint32	pmuwatchdog;
+	uint32	gpiosel;		/* 0x638, rev >= 1 */
+	uint32	gpioenable;		/* 0x63c, rev >= 1 */
+	uint32	res_req_timer_sel;
+	uint32	res_req_timer;
+	uint32	res_req_mask;
+	uint32	PAD;
+	uint32	chipcontrol_addr;	/* 0x650 */
+	uint32	chipcontrol_data;	/* 0x654 */
+	uint32	regcontrol_addr;
+	uint32	regcontrol_data;
+	uint32	pllcontrol_addr;
+	uint32	pllcontrol_data;
+	uint32	pmustrapopt;		/* 0x668, corerev >= 28 */
+	uint32	pmu_xtalfreq;		/* 0x66C, pmurev >= 10 */
+	uint32  retention_ctl;		/* 0x670 */
+	uint32  PAD[3];
+	uint32  retention_grpidx;	/* 0x680 */
+	uint32  retention_grpctl;	/* 0x684 */
+	uint32  PAD[94];
+	uint16	sromotp[512];		/* 0x800 */
+#ifdef NFLASH_SUPPORT
+	/* Nand flash MLC controller registers (corerev >= 38) */
+	uint32	nand_revision;		/* 0xC00 */
+	uint32	nand_cmd_start;
+	uint32	nand_cmd_addr_x;
+	uint32	nand_cmd_addr;
+	uint32	nand_cmd_end_addr;
+	uint32	nand_cs_nand_select;
+	uint32	nand_cs_nand_xor;
+	uint32	PAD;
+	uint32	nand_spare_rd0;
+	uint32	nand_spare_rd4;
+	uint32	nand_spare_rd8;
+	uint32	nand_spare_rd12;
+	uint32	nand_spare_wr0;
+	uint32	nand_spare_wr4;
+	uint32	nand_spare_wr8;
+	uint32	nand_spare_wr12;
+	uint32	nand_acc_control;
+	uint32	PAD;
+	uint32	nand_config;
+	uint32	PAD;
+	uint32	nand_timing_1;
+	uint32	nand_timing_2;
+	uint32	nand_semaphore;
+	uint32	PAD;
+	uint32	nand_devid;
+	uint32	nand_devid_x;
+	uint32	nand_block_lock_status;
+	uint32	nand_intfc_status;
+	uint32	nand_ecc_corr_addr_x;
+	uint32	nand_ecc_corr_addr;
+	uint32	nand_ecc_unc_addr_x;
+	uint32	nand_ecc_unc_addr;
+	uint32	nand_read_error_count;
+	uint32	nand_corr_stat_threshold;
+	uint32	PAD[2];
+	uint32	nand_read_addr_x;
+	uint32	nand_read_addr;
+	uint32	nand_page_program_addr_x;
+	uint32	nand_page_program_addr;
+	uint32	nand_copy_back_addr_x;
+	uint32	nand_copy_back_addr;
+	uint32	nand_block_erase_addr_x;
+	uint32	nand_block_erase_addr;
+	uint32	nand_inv_read_addr_x;
+	uint32	nand_inv_read_addr;
+	uint32	PAD[2];
+	uint32	nand_blk_wr_protect;
+	uint32	PAD[3];
+	uint32	nand_acc_control_cs1;
+	uint32	nand_config_cs1;
+	uint32	nand_timing_1_cs1;
+	uint32	nand_timing_2_cs1;
+	uint32	PAD[20];
+	uint32	nand_spare_rd16;
+	uint32	nand_spare_rd20;
+	uint32	nand_spare_rd24;
+	uint32	nand_spare_rd28;
+	uint32	nand_cache_addr;
+	uint32	nand_cache_data;
+	uint32	nand_ctrl_config;
+	uint32	nand_ctrl_status;
+#endif /* NFLASH_SUPPORT */
+	uint32  gci_corecaps0; /* GCI starting at 0xC00 */
+	uint32  gci_corecaps1;
+	uint32  gci_corecaps2;
+	uint32  gci_corectrl;
+	uint32  gci_corestat; /* 0xC10 */
+	uint32  gci_intstat; /* 0xC14 */
+	uint32  gci_intmask; /* 0xC18 */
+	uint32  gci_wakemask; /* 0xC1C */
+	uint32  gci_levelintstat; /* 0xC20 */
+	uint32  gci_eventintstat; /* 0xC24 */
+	uint32  PAD[6];
+	uint32  gci_indirect_addr; /* 0xC40 */
+	uint32  gci_gpioctl; /* 0xC44 */
+	uint32	gci_gpiostatus;
+	uint32  gci_gpiomask; /* 0xC4C */
+	uint32  PAD;
+	uint32  gci_miscctl; /* 0xC54 */
+	uint32	gci_gpiointmask;
+	uint32	gci_gpiowakemask;
+	uint32  gci_input[32]; /* C60 */
+	uint32  gci_event[32]; /* CE0 */
+	uint32  gci_output[4]; /* D60 */
+	uint32  gci_control_0; /* 0xD70 */
+	uint32  gci_control_1; /* 0xD74 */
+	uint32  gci_level_polreg; /* 0xD78 */
+	uint32  gci_levelintmask; /* 0xD7C */
+	uint32  gci_eventintmask; /* 0xD80 */
+	uint32  PAD[3];
+	uint32  gci_inbandlevelintmask; /* 0xD90 */
+	uint32  gci_inbandeventintmask; /* 0xD94 */
+	uint32  PAD[2];
+	uint32  gci_seciauxtx; /* 0xDA0 */
+	uint32  gci_seciauxrx; /* 0xDA4 */
+	uint32  gci_secitx_datatag; /* 0xDA8 */
+	uint32  gci_secirx_datatag; /* 0xDAC */
+	uint32  gci_secitx_datamask; /* 0xDB0 */
+	uint32  gci_seciusef0tx_reg; /* 0xDB4 */
+	uint32  gci_secif0tx_offset; /* 0xDB8 */
+	uint32  gci_secif0rx_offset; /* 0xDBC */
+	uint32  gci_secif1tx_offset; /* 0xDC0 */
+	uint32  PAD[3];
+	uint32  gci_uartescval; /* DD0 */
+	uint32  PAD[3];
+	uint32  gci_secibauddiv; /* DE0 */
+	uint32  gci_secifcr; /* DE4 */
+	uint32  gci_secilcr; /* DE8 */
+	uint32  gci_secimcr; /* DEC */
+	uint32  PAD[2];
+	uint32  gci_baudadj; /* DF8 */
+	uint32  PAD;
+	uint32  gci_chipctrl; /* 0xE00 */
+	uint32  gci_chipsts; /* 0xE04 */
+} chipcregs_t;
+
+#endif /* _LANGUAGE_ASSEMBLY */
+
+
+#define	CC_CHIPID		0
+#define	CC_CAPABILITIES		4
+#define	CC_CHIPST		0x2c
+#define	CC_EROMPTR		0xfc
+
+#define CC_OTPST		0x10
+#define	CC_JTAGCMD		0x30
+#define	CC_JTAGIR		0x34
+#define	CC_JTAGDR		0x38
+#define	CC_JTAGCTRL		0x3c
+#define	CC_GPIOPU		0x58
+#define	CC_GPIOPD		0x5c
+#define	CC_GPIOIN		0x60
+#define	CC_GPIOOUT		0x64
+#define	CC_GPIOOUTEN		0x68
+#define	CC_GPIOCTRL		0x6c
+#define	CC_GPIOPOL		0x70
+#define	CC_GPIOINTM		0x74
+#define	CC_WATCHDOG		0x80
+#define	CC_CLKC_N		0x90
+#define	CC_CLKC_M0		0x94
+#define	CC_CLKC_M1		0x98
+#define	CC_CLKC_M2		0x9c
+#define	CC_CLKC_M3		0xa0
+#define	CC_CLKDIV		0xa4
+#define	CC_SYS_CLK_CTL		0xc0
+#define	CC_CLK_CTL_ST		SI_CLK_CTL_ST
+#define	PMU_CTL			0x600
+#define	PMU_CAP			0x604
+#define	PMU_ST			0x608
+#define PMU_RES_STATE		0x60c
+#define PMU_TIMER		0x614
+#define	PMU_MIN_RES_MASK	0x618
+#define	PMU_MAX_RES_MASK	0x61c
+#define CC_CHIPCTL_ADDR         0x650
+#define CC_CHIPCTL_DATA         0x654
+#define PMU_REG_CONTROL_ADDR	0x658
+#define PMU_REG_CONTROL_DATA	0x65C
+#define PMU_PLL_CONTROL_ADDR 	0x660
+#define PMU_PLL_CONTROL_DATA 	0x664
+#define CC_SROM_CTRL		0x190
+#define	CC_SROM_OTP		0x800		/* SROM/OTP address space */
+#define CC_GCI_INDIRECT_ADDR_REG	0xC40
+#define CC_GCI_CHIP_CTRL_REG	0xE00
+#define CC_GCI_CC_OFFSET_2	2
+#define CC_GCI_CC_OFFSET_5	5
+
+#define CHIPCTRLREG0 0x0
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define CHIPCTRLREG6 0x6
+#define REGCTRLREG4 0x4
+#define REGCTRLREG5 0x5
+#define REGCTRLREG6 0x6
+#define PMU_RES_STATE	0x60c
+#define PMU_RES_PENDING 0x610
+#define PMU_TIMER		0x614
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define PMU_RES_DEP_MASK 0x624
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+#define EXT_LPO_AVAIL 0x100
+#define LPO_SEL					(1 << 0)
+#define CC_EXT_LPO_PU 0x200000
+#define GC_EXT_LPO_PU 0x2
+#define CC_INT_LPO_PU 0x100000
+#define GC_INT_LPO_PU 0x1
+#define EXT_LPO_SEL 0x8
+#define INT_LPO_SEL 0x4
+#define ENABLE_FINE_CBUCK_CTRL 			(1 << 30)
+#define REGCTRL5_PWM_AUTO_CTRL_MASK 		0x007e0000
+#define REGCTRL5_PWM_AUTO_CTRL_SHIFT		17
+#define REGCTRL6_PWM_AUTO_CTRL_MASK 		0x3fff0000
+#define REGCTRL6_PWM_AUTO_CTRL_SHIFT		16
+
+#ifdef NFLASH_SUPPORT
+/* NAND flash support */
+#define CC_NAND_REVISION	0xC00
+#define CC_NAND_CMD_START	0xC04
+#define CC_NAND_CMD_ADDR	0xC0C
+#define CC_NAND_SPARE_RD_0	0xC20
+#define CC_NAND_SPARE_RD_4	0xC24
+#define CC_NAND_SPARE_RD_8	0xC28
+#define CC_NAND_SPARE_RD_C	0xC2C
+#define CC_NAND_CONFIG		0xC48
+#define CC_NAND_DEVID		0xC60
+#define CC_NAND_DEVID_EXT	0xC64
+#define CC_NAND_INTFC_STATUS	0xC6C
+#endif /* NFLASH_SUPPORT */
+
+/* chipid */
+#define	CID_ID_MASK		0x0000ffff	/* Chip Id mask */
+#define	CID_REV_MASK		0x000f0000	/* Chip Revision mask */
+#define	CID_REV_SHIFT		16		/* Chip Revision shift */
+#define	CID_PKG_MASK		0x00f00000	/* Package Option mask */
+#define	CID_PKG_SHIFT		20		/* Package Option shift */
+#define	CID_CC_MASK		0x0f000000	/* CoreCount (corerev >= 4) */
+#define CID_CC_SHIFT		24
+#define	CID_TYPE_MASK		0xf0000000	/* Chip Type */
+#define CID_TYPE_SHIFT		28
+
+/* capabilities */
+#define	CC_CAP_UARTS_MASK	0x00000003	/* Number of UARTs */
+#define CC_CAP_MIPSEB		0x00000004	/* MIPS is in big-endian mode */
+#define CC_CAP_UCLKSEL		0x00000018	/* UARTs clock select */
+#define CC_CAP_UINTCLK		0x00000008	/* UARTs are driven by internal divided clock */
+#define CC_CAP_UARTGPIO		0x00000020	/* UARTs own GPIOs 15:12 */
+#define CC_CAP_EXTBUS_MASK	0x000000c0	/* External bus mask */
+#define CC_CAP_EXTBUS_NONE	0x00000000	/* No ExtBus present */
+#define CC_CAP_EXTBUS_FULL	0x00000040	/* ExtBus: PCMCIA, IDE & Prog */
+#define CC_CAP_EXTBUS_PROG	0x00000080	/* ExtBus: ProgIf only */
+#define	CC_CAP_FLASH_MASK	0x00000700	/* Type of flash */
+#define	CC_CAP_PLL_MASK		0x00038000	/* Type of PLL */
+#define CC_CAP_PWR_CTL		0x00040000	/* Power control */
+#define CC_CAP_OTPSIZE		0x00380000	/* OTP Size (0 = none) */
+#define CC_CAP_OTPSIZE_SHIFT	19		/* OTP Size shift */
+#define CC_CAP_OTPSIZE_BASE	5		/* OTP Size base */
+#define CC_CAP_JTAGP		0x00400000	/* JTAG Master Present */
+#define CC_CAP_ROM		0x00800000	/* Internal boot rom active */
+#define CC_CAP_BKPLN64		0x08000000	/* 64-bit backplane */
+#define	CC_CAP_PMU		0x10000000	/* PMU Present, rev >= 20 */
+#define	CC_CAP_ECI		0x20000000	/* ECI Present, rev >= 21 */
+#define	CC_CAP_SROM		0x40000000	/* Srom Present, rev >= 32 */
+#define	CC_CAP_NFLASH		0x80000000	/* Nand flash present, rev >= 35 */
+
+#define	CC_CAP2_SECI		0x00000001	/* SECI Present, rev >= 36 */
+#define	CC_CAP2_GSIO		0x00000002	/* GSIO (spi/i2c) present, rev >= 37 */
+
+/* capabilities extension */
+#define CC_CAP_EXT_SECI_PRESENT	0x00000001    /* SECI present */
+#define CC_CAP_EXT_GCI_PRESENT  0x00000004    /* GCI present */
+
+/* WL Channel Info to BT via GCI - bits 40 - 47 */
+#define GCI_WL_CHN_INFO_MASK 	(0xFF00)
+/* PLL type */
+#define PLL_NONE		0x00000000
+#define PLL_TYPE1		0x00010000	/* 48MHz base, 3 dividers */
+#define PLL_TYPE2		0x00020000	/* 48MHz, 4 dividers */
+#define PLL_TYPE3		0x00030000	/* 25MHz, 2 dividers */
+#define PLL_TYPE4		0x00008000	/* 48MHz, 4 dividers */
+#define PLL_TYPE5		0x00018000	/* 25MHz, 4 dividers */
+#define PLL_TYPE6		0x00028000	/* 100/200 or 120/240 only */
+#define PLL_TYPE7		0x00038000	/* 25MHz, 4 dividers */
+
+/* ILP clock */
+#define	ILP_CLOCK		32000
+
+/* ALP clock on pre-PMU chips */
+#define	ALP_CLOCK		20000000
+
+#ifdef CFG_SIM
+#define NS_ALP_CLOCK		84922
+#define NS_SLOW_ALP_CLOCK	84922
+#define NS_CPU_CLOCK		534500
+#define NS_SLOW_CPU_CLOCK	534500
+#define NS_SI_CLOCK		271750
+#define NS_SLOW_SI_CLOCK	271750
+#define NS_FAST_MEM_CLOCK	271750
+#define NS_MEM_CLOCK		271750
+#define NS_SLOW_MEM_CLOCK	271750
+#else
+#define NS_ALP_CLOCK		125000000
+#define NS_SLOW_ALP_CLOCK	100000000
+#define NS_CPU_CLOCK		1000000000
+#define NS_SLOW_CPU_CLOCK	800000000
+#define NS_SI_CLOCK		250000000
+#define NS_SLOW_SI_CLOCK	200000000
+#define NS_FAST_MEM_CLOCK	800000000
+#define NS_MEM_CLOCK		533000000
+#define NS_SLOW_MEM_CLOCK	400000000
+#endif /* CFG_SIM */
+
+/* HT clock */
+#define	HT_CLOCK		80000000
+
+/* corecontrol */
+#define CC_UARTCLKO		0x00000001	/* Drive UART with internal clock */
+#define	CC_SE			0x00000002	/* sync clk out enable (corerev >= 3) */
+#define CC_ASYNCGPIO	0x00000004	/* 1=generate GPIO interrupt without backplane clock */
+#define CC_UARTCLKEN		0x00000008	/* enable UART Clock (corerev > = 21 */
+
+/* 4321 chipcontrol */
+#define CHIPCTRL_4321A0_DEFAULT	0x3a4
+#define CHIPCTRL_4321A1_DEFAULT	0x0a4
+#define CHIPCTRL_4321_PLL_DOWN	0x800000	/* serdes PLL down override */
+
+/* Fields in the otpstatus register in rev >= 21 */
+#define OTPS_OL_MASK		0x000000ff
+#define OTPS_OL_MFG		0x00000001	/* manuf row is locked */
+#define OTPS_OL_OR1		0x00000002	/* otp redundancy row 1 is locked */
+#define OTPS_OL_OR2		0x00000004	/* otp redundancy row 2 is locked */
+#define OTPS_OL_GU		0x00000008	/* general use region is locked */
+#define OTPS_GUP_MASK		0x00000f00
+#define OTPS_GUP_SHIFT		8
+#define OTPS_GUP_HW		0x00000100	/* h/w subregion is programmed */
+#define OTPS_GUP_SW		0x00000200	/* s/w subregion is programmed */
+#define OTPS_GUP_CI		0x00000400	/* chipid/pkgopt subregion is programmed */
+#define OTPS_GUP_FUSE		0x00000800	/* fuse subregion is programmed */
+#define OTPS_READY		0x00001000
+#define OTPS_RV(x)		(1 << (16 + (x)))	/* redundancy entry valid */
+#define OTPS_RV_MASK		0x0fff0000
+#define OTPS_PROGOK     0x40000000
+
+/* Fields in the otpcontrol register in rev >= 21 */
+#define OTPC_PROGSEL		0x00000001
+#define OTPC_PCOUNT_MASK	0x0000000e
+#define OTPC_PCOUNT_SHIFT	1
+#define OTPC_VSEL_MASK		0x000000f0
+#define OTPC_VSEL_SHIFT		4
+#define OTPC_TMM_MASK		0x00000700
+#define OTPC_TMM_SHIFT		8
+#define OTPC_ODM		0x00000800
+#define OTPC_PROGEN		0x80000000
+
+/* Fields in the 40nm otpcontrol register in rev >= 40 */
+#define OTPC_40NM_PROGSEL_SHIFT	0
+#define OTPC_40NM_PCOUNT_SHIFT	1
+#define OTPC_40NM_PCOUNT_WR	0xA
+#define OTPC_40NM_PCOUNT_V1X	0xB
+#define OTPC_40NM_REGCSEL_SHIFT	5
+#define OTPC_40NM_REGCSEL_DEF	0x4
+#define OTPC_40NM_PROGIN_SHIFT	8
+#define OTPC_40NM_R2X_SHIFT	10
+#define OTPC_40NM_ODM_SHIFT	11
+#define OTPC_40NM_DF_SHIFT	15
+#define OTPC_40NM_VSEL_SHIFT	16
+#define OTPC_40NM_VSEL_WR	0xA
+#define OTPC_40NM_VSEL_V1X	0xA
+#define OTPC_40NM_VSEL_R1X	0x5
+#define OTPC_40NM_COFAIL_SHIFT	30
+
+#define OTPC1_CPCSEL_SHIFT	0
+#define OTPC1_CPCSEL_DEF	6
+#define OTPC1_TM_SHIFT		8
+#define OTPC1_TM_WR		0x84
+#define OTPC1_TM_V1X		0x84
+#define OTPC1_TM_R1X		0x4
+
+/* Fields in otpprog in rev >= 21 and HND OTP */
+#define OTPP_COL_MASK		0x000000ff
+#define OTPP_COL_SHIFT		0
+#define OTPP_ROW_MASK		0x0000ff00
+#define OTPP_ROW_SHIFT		8
+#define OTPP_OC_MASK		0x0f000000
+#define OTPP_OC_SHIFT		24
+#define OTPP_READERR		0x10000000
+#define OTPP_VALUE_MASK		0x20000000
+#define OTPP_VALUE_SHIFT	29
+#define OTPP_START_BUSY		0x80000000
+#define	OTPP_READ		0x40000000	/* HND OTP */
+
+/* Fields in otplayout register */
+#define OTPL_HWRGN_OFF_MASK	0x00000FFF
+#define OTPL_HWRGN_OFF_SHIFT	0
+#define OTPL_WRAP_REVID_MASK	0x00F80000
+#define OTPL_WRAP_REVID_SHIFT	19
+#define OTPL_WRAP_TYPE_MASK	0x00070000
+#define OTPL_WRAP_TYPE_SHIFT	16
+#define OTPL_WRAP_TYPE_65NM	0
+#define OTPL_WRAP_TYPE_40NM	1
+#define OTPL_ROW_SIZE_MASK	0x0000F000
+#define OTPL_ROW_SIZE_SHIFT	12
+
+/* otplayout reg corerev >= 36 */
+#define OTP_CISFORMAT_NEW	0x80000000
+
+/* Opcodes for OTPP_OC field */
+#define OTPPOC_READ		0
+#define OTPPOC_BIT_PROG		1
+#define OTPPOC_VERIFY		3
+#define OTPPOC_INIT		4
+#define OTPPOC_SET		5
+#define OTPPOC_RESET		6
+#define OTPPOC_OCST		7
+#define OTPPOC_ROW_LOCK		8
+#define OTPPOC_PRESCN_TEST	9
+
+/* Opcodes for OTPP_OC field (40NM) */
+#define OTPPOC_READ_40NM	0
+#define OTPPOC_PROG_ENABLE_40NM 1
+#define OTPPOC_PROG_DISABLE_40NM	2
+#define OTPPOC_VERIFY_40NM	3
+#define OTPPOC_WORD_VERIFY_1_40NM	4
+#define OTPPOC_ROW_LOCK_40NM	5
+#define OTPPOC_STBY_40NM	6
+#define OTPPOC_WAKEUP_40NM	7
+#define OTPPOC_WORD_VERIFY_0_40NM	8
+#define OTPPOC_PRESCN_TEST_40NM 9
+#define OTPPOC_BIT_PROG_40NM	10
+#define OTPPOC_WORDPROG_40NM	11
+#define OTPPOC_BURNIN_40NM	12
+#define OTPPOC_AUTORELOAD_40NM	13
+#define OTPPOC_OVST_READ_40NM	14
+#define OTPPOC_OVST_PROG_40NM	15
+
+/* Fields in otplayoutextension */
+#define OTPLAYOUTEXT_FUSE_MASK	0x3FF
+
+
+/* Jtagm characteristics that appeared at a given corerev */
+#define	JTAGM_CREV_OLD		10	/* Old command set, 16bit max IR */
+#define	JTAGM_CREV_IRP		22	/* Able to do pause-ir */
+#define	JTAGM_CREV_RTI		28	/* Able to do return-to-idle */
+
+/* jtagcmd */
+#define JCMD_START		0x80000000
+#define JCMD_BUSY		0x80000000
+#define JCMD_STATE_MASK		0x60000000
+#define JCMD_STATE_TLR		0x00000000	/* Test-logic-reset */
+#define JCMD_STATE_PIR		0x20000000	/* Pause IR */
+#define JCMD_STATE_PDR		0x40000000	/* Pause DR */
+#define JCMD_STATE_RTI		0x60000000	/* Run-test-idle */
+#define JCMD0_ACC_MASK		0x0000f000
+#define JCMD0_ACC_IRDR		0x00000000
+#define JCMD0_ACC_DR		0x00001000
+#define JCMD0_ACC_IR		0x00002000
+#define JCMD0_ACC_RESET		0x00003000
+#define JCMD0_ACC_IRPDR		0x00004000
+#define JCMD0_ACC_PDR		0x00005000
+#define JCMD0_IRW_MASK		0x00000f00
+#define JCMD_ACC_MASK		0x000f0000	/* Changes for corerev 11 */
+#define JCMD_ACC_IRDR		0x00000000
+#define JCMD_ACC_DR		0x00010000
+#define JCMD_ACC_IR		0x00020000
+#define JCMD_ACC_RESET		0x00030000
+#define JCMD_ACC_IRPDR		0x00040000
+#define JCMD_ACC_PDR		0x00050000
+#define JCMD_ACC_PIR		0x00060000
+#define JCMD_ACC_IRDR_I		0x00070000	/* rev 28: return to run-test-idle */
+#define JCMD_ACC_DR_I		0x00080000	/* rev 28: return to run-test-idle */
+#define JCMD_IRW_MASK		0x00001f00
+#define JCMD_IRW_SHIFT		8
+#define JCMD_DRW_MASK		0x0000003f
+
+/* jtagctrl */
+#define JCTRL_FORCE_CLK		4		/* Force clock */
+#define JCTRL_EXT_EN		2		/* Enable external targets */
+#define JCTRL_EN		1		/* Enable Jtag master */
+
+/* Fields in clkdiv */
+#define	CLKD_SFLASH		0x0f000000
+#define	CLKD_SFLASH_SHIFT	24
+#define	CLKD_OTP		0x000f0000
+#define	CLKD_OTP_SHIFT		16
+#define	CLKD_JTAG		0x00000f00
+#define	CLKD_JTAG_SHIFT		8
+#define	CLKD_UART		0x000000ff
+
+#define	CLKD2_SROM		0x00000003
+
+/* intstatus/intmask */
+#define	CI_GPIO			0x00000001	/* gpio intr */
+#define	CI_EI			0x00000002	/* extif intr (corerev >= 3) */
+#define	CI_TEMP			0x00000004	/* temp. ctrl intr (corerev >= 15) */
+#define	CI_SIRQ			0x00000008	/* serial IRQ intr (corerev >= 15) */
+#define	CI_ECI			0x00000010	/* eci intr (corerev >= 21) */
+#define	CI_PMU			0x00000020	/* pmu intr (corerev >= 21) */
+#define	CI_UART			0x00000040	/* uart intr (corerev >= 21) */
+#define	CI_WDRESET		0x80000000	/* watchdog reset occurred */
+
+/* slow_clk_ctl */
+#define SCC_SS_MASK		0x00000007	/* slow clock source mask */
+#define	SCC_SS_LPO		0x00000000	/* source of slow clock is LPO */
+#define	SCC_SS_XTAL		0x00000001	/* source of slow clock is crystal */
+#define	SCC_SS_PCI		0x00000002	/* source of slow clock is PCI */
+#define SCC_LF			0x00000200	/* LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SCC_LP			0x00000400	/* LPOPowerDown, 1: LPO is disabled,
+						 * 0: LPO is enabled
+						 */
+#define SCC_FS			0x00000800	/* ForceSlowClk, 1: sb/cores running on slow clock,
+						 * 0: power logic control
+						 */
+#define SCC_IP			0x00001000	/* IgnorePllOffReq, 1/0: power logic ignores/honors
+						 * PLL clock disable requests from core
+						 */
+#define SCC_XC			0x00002000	/* XtalControlEn, 1/0: power logic does/doesn't
+						 * disable crystal when appropriate
+						 */
+#define SCC_XP			0x00004000	/* XtalPU (RO), 1/0: crystal running/disabled */
+#define SCC_CD_MASK		0xffff0000	/* ClockDivider (SlowClk = 1/(4+divisor)) */
+#define SCC_CD_SHIFT		16
+
+/* system_clk_ctl */
+#define	SYCC_IE			0x00000001	/* ILPen: Enable Idle Low Power */
+#define	SYCC_AE			0x00000002	/* ALPen: Enable Active Low Power */
+#define	SYCC_FP			0x00000004	/* ForcePLLOn */
+#define	SYCC_AR			0x00000008	/* Force ALP (or HT if ALPen is not set */
+#define	SYCC_HR			0x00000010	/* Force HT */
+#define SYCC_CD_MASK		0xffff0000	/* ClkDiv  (ILP = 1/(4 * (divisor + 1)) */
+#define SYCC_CD_SHIFT		16
+
+/* Indirect backplane access */
+#define	BPIA_BYTEEN		0x0000000f
+#define	BPIA_SZ1		0x00000001
+#define	BPIA_SZ2		0x00000003
+#define	BPIA_SZ4		0x00000007
+#define	BPIA_SZ8		0x0000000f
+#define	BPIA_WRITE		0x00000100
+#define	BPIA_START		0x00000200
+#define	BPIA_BUSY		0x00000200
+#define	BPIA_ERROR		0x00000400
+
+/* pcmcia/prog/flash_config */
+#define	CF_EN			0x00000001	/* enable */
+#define	CF_EM_MASK		0x0000000e	/* mode */
+#define	CF_EM_SHIFT		1
+#define	CF_EM_FLASH		0		/* flash/asynchronous mode */
+#define	CF_EM_SYNC		2		/* synchronous mode */
+#define	CF_EM_PCMCIA		4		/* pcmcia mode */
+#define	CF_DS			0x00000010	/* destsize:  0=8bit, 1=16bit */
+#define	CF_BS			0x00000020	/* byteswap */
+#define	CF_CD_MASK		0x000000c0	/* clock divider */
+#define	CF_CD_SHIFT		6
+#define	CF_CD_DIV2		0x00000000	/* backplane/2 */
+#define	CF_CD_DIV3		0x00000040	/* backplane/3 */
+#define	CF_CD_DIV4		0x00000080	/* backplane/4 */
+#define	CF_CE			0x00000100	/* clock enable */
+#define	CF_SB			0x00000200	/* size/bytestrobe (synch only) */
+
+/* pcmcia_memwait */
+#define	PM_W0_MASK		0x0000003f	/* waitcount0 */
+#define	PM_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PM_W1_SHIFT		8
+#define	PM_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PM_W2_SHIFT		16
+#define	PM_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PM_W3_SHIFT		24
+
+/* pcmcia_attrwait */
+#define	PA_W0_MASK		0x0000003f	/* waitcount0 */
+#define	PA_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PA_W1_SHIFT		8
+#define	PA_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PA_W2_SHIFT		16
+#define	PA_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PA_W3_SHIFT		24
+
+/* pcmcia_iowait */
+#define	PI_W0_MASK		0x0000003f	/* waitcount0 */
+#define	PI_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PI_W1_SHIFT		8
+#define	PI_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PI_W2_SHIFT		16
+#define	PI_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PI_W3_SHIFT		24
+
+/* prog_waitcount */
+#define	PW_W0_MASK		0x0000001f	/* waitcount0 */
+#define	PW_W1_MASK		0x00001f00	/* waitcount1 */
+#define	PW_W1_SHIFT		8
+#define	PW_W2_MASK		0x001f0000	/* waitcount2 */
+#define	PW_W2_SHIFT		16
+#define	PW_W3_MASK		0x1f000000	/* waitcount3 */
+#define	PW_W3_SHIFT		24
+
+#define PW_W0       		0x0000000c
+#define PW_W1       		0x00000a00
+#define PW_W2       		0x00020000
+#define PW_W3       		0x01000000
+
+/* flash_waitcount */
+#define	FW_W0_MASK		0x0000003f	/* waitcount0 */
+#define	FW_W1_MASK		0x00001f00	/* waitcount1 */
+#define	FW_W1_SHIFT		8
+#define	FW_W2_MASK		0x001f0000	/* waitcount2 */
+#define	FW_W2_SHIFT		16
+#define	FW_W3_MASK		0x1f000000	/* waitcount3 */
+#define	FW_W3_SHIFT		24
+
+/* When Srom support present, fields in sromcontrol */
+#define	SRC_START		0x80000000
+#define	SRC_BUSY		0x80000000
+#define	SRC_OPCODE		0x60000000
+#define	SRC_OP_READ		0x00000000
+#define	SRC_OP_WRITE		0x20000000
+#define	SRC_OP_WRDIS		0x40000000
+#define	SRC_OP_WREN		0x60000000
+#define	SRC_OTPSEL		0x00000010
+#define SRC_OTPPRESENT		0x00000020
+#define	SRC_LOCK		0x00000008
+#define	SRC_SIZE_MASK		0x00000006
+#define	SRC_SIZE_1K		0x00000000
+#define	SRC_SIZE_4K		0x00000002
+#define	SRC_SIZE_16K		0x00000004
+#define	SRC_SIZE_SHIFT		1
+#define	SRC_PRESENT		0x00000001
+
+/* Fields in pmucontrol */
+#define	PCTL_ILP_DIV_MASK	0xffff0000
+#define	PCTL_ILP_DIV_SHIFT	16
+#define PCTL_LQ_REQ_EN		0x00008000
+#define PCTL_PLL_PLLCTL_UPD	0x00000400	/* rev 2 */
+#define PCTL_NOILP_ON_WAIT	0x00000200	/* rev 1 */
+#define	PCTL_HT_REQ_EN		0x00000100
+#define	PCTL_ALP_REQ_EN		0x00000080
+#define	PCTL_XTALFREQ_MASK	0x0000007c
+#define	PCTL_XTALFREQ_SHIFT	2
+#define	PCTL_ILP_DIV_EN		0x00000002
+#define	PCTL_LPO_SEL		0x00000001
+
+/*  Retention Control */
+#define PMU_RCTL_CLK_DIV_SHIFT		0
+#define PMU_RCTL_CHAIN_LEN_SHIFT	12
+#define PMU_RCTL_MACPHY_DISABLE_SHIFT	26
+#define PMU_RCTL_MACPHY_DISABLE_MASK	(1 << 26)
+#define PMU_RCTL_LOGIC_DISABLE_SHIFT	27
+#define PMU_RCTL_LOGIC_DISABLE_MASK	(1 << 27)
+#define PMU_RCTL_MEMSLP_LOG_SHIFT	28
+#define PMU_RCTL_MEMSLP_LOG_MASK	(1 << 28)
+#define PMU_RCTL_MEMRETSLP_LOG_SHIFT	29
+#define PMU_RCTL_MEMRETSLP_LOG_MASK	(1 << 29)
+
+/*  Retention Group Control */
+#define PMU_RCTLGRP_CHAIN_LEN_SHIFT	0
+#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT	14
+#define PMU_RCTLGRP_RMODE_ENABLE_MASK	(1 << 14)
+#define PMU_RCTLGRP_DFT_ENABLE_SHIFT	15
+#define PMU_RCTLGRP_DFT_ENABLE_MASK	(1 << 15)
+#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT	16
+#define PMU_RCTLGRP_NSRST_DISABLE_MASK	(1 << 16)
+/*  Retention Group Control special for 4334 */
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP0	338
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP1	315
+/*  Retention Group Control special for 43341 */
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP0	366
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP1	330
+
+/* Fields in clkstretch */
+#define CSTRETCH_HT		0xffff0000
+#define CSTRETCH_ALP		0x0000ffff
+
+/* gpiotimerval */
+#define GPIO_ONTIME_SHIFT	16
+
+/* clockcontrol_n */
+#define	CN_N1_MASK		0x3f		/* n1 control */
+#define	CN_N2_MASK		0x3f00		/* n2 control */
+#define	CN_N2_SHIFT		8
+#define	CN_PLLC_MASK		0xf0000		/* pll control */
+#define	CN_PLLC_SHIFT		16
+
+/* clockcontrol_sb/pci/uart */
+#define	CC_M1_MASK		0x3f		/* m1 control */
+#define	CC_M2_MASK		0x3f00		/* m2 control */
+#define	CC_M2_SHIFT		8
+#define	CC_M3_MASK		0x3f0000	/* m3 control */
+#define	CC_M3_SHIFT		16
+#define	CC_MC_MASK		0x1f000000	/* mux control */
+#define	CC_MC_SHIFT		24
+
+/* N3M Clock control magic field values */
+#define	CC_F6_2			0x02		/* A factor of 2 in */
+#define	CC_F6_3			0x03		/* 6-bit fields like */
+#define	CC_F6_4			0x05		/* N1, M1 or M3 */
+#define	CC_F6_5			0x09
+#define	CC_F6_6			0x11
+#define	CC_F6_7			0x21
+
+#define	CC_F5_BIAS		5		/* 5-bit fields get this added */
+
+#define	CC_MC_BYPASS		0x08
+#define	CC_MC_M1		0x04
+#define	CC_MC_M1M2		0x02
+#define	CC_MC_M1M2M3		0x01
+#define	CC_MC_M1M3		0x11
+
+/* Type 2 Clock control magic field values */
+#define	CC_T2_BIAS		2		/* n1, n2, m1 & m3 bias */
+#define	CC_T2M2_BIAS		3		/* m2 bias */
+
+#define	CC_T2MC_M1BYP		1
+#define	CC_T2MC_M2BYP		2
+#define	CC_T2MC_M3BYP		4
+
+/* Type 6 Clock control magic field values */
+#define	CC_T6_MMASK		1		/* bits of interest in m */
+#define	CC_T6_M0		120000000	/* sb clock for m = 0 */
+#define	CC_T6_M1		100000000	/* sb clock for m = 1 */
+#define	SB2MIPS_T6(sb)		(2 * (sb))
+
+/* Common clock base */
+#define	CC_CLOCK_BASE1		24000000	/* Half the clock freq */
+#define CC_CLOCK_BASE2		12500000	/* Alternate crystal on some PLLs */
+
+/* Clock control values for 200MHz in 5350 */
+#define	CLKC_5350_N		0x0311
+#define	CLKC_5350_M		0x04020009
+
+/* Flash types in the chipcommon capabilities register */
+#define FLASH_NONE		0x000		/* No flash */
+#define SFLASH_ST		0x100		/* ST serial flash */
+#define SFLASH_AT		0x200		/* Atmel serial flash */
+#define NFLASH			0x300
+#define	PFLASH			0x700		/* Parallel flash */
+#define QSPIFLASH_ST		0x800
+#define QSPIFLASH_AT		0x900
+
+/* Bits in the ExtBus config registers */
+#define	CC_CFG_EN		0x0001		/* Enable */
+#define	CC_CFG_EM_MASK		0x000e		/* Extif Mode */
+#define	CC_CFG_EM_ASYNC		0x0000		/*   Async/Parallel flash */
+#define	CC_CFG_EM_SYNC		0x0002		/*   Synchronous */
+#define	CC_CFG_EM_PCMCIA	0x0004		/*   PCMCIA */
+#define	CC_CFG_EM_IDE		0x0006		/*   IDE */
+#define	CC_CFG_DS		0x0010		/* Data size, 0=8bit, 1=16bit */
+#define	CC_CFG_CD_MASK		0x00e0		/* Sync: Clock divisor, rev >= 20 */
+#define	CC_CFG_CE		0x0100		/* Sync: Clock enable, rev >= 20 */
+#define	CC_CFG_SB		0x0200		/* Sync: Size/Bytestrobe, rev >= 20 */
+#define	CC_CFG_IS		0x0400		/* Extif Sync Clk Select, rev >= 20 */
+
+/* ExtBus address space */
+#define	CC_EB_BASE		0x1a000000	/* Chipc ExtBus base address */
+#define	CC_EB_PCMCIA_MEM	0x1a000000	/* PCMCIA 0 memory base address */
+#define	CC_EB_PCMCIA_IO		0x1a200000	/* PCMCIA 0 I/O base address */
+#define	CC_EB_PCMCIA_CFG	0x1a400000	/* PCMCIA 0 config base address */
+#define	CC_EB_IDE		0x1a800000	/* IDE memory base */
+#define	CC_EB_PCMCIA1_MEM	0x1a800000	/* PCMCIA 1 memory base address */
+#define	CC_EB_PCMCIA1_IO	0x1aa00000	/* PCMCIA 1 I/O base address */
+#define	CC_EB_PCMCIA1_CFG	0x1ac00000	/* PCMCIA 1 config base address */
+#define	CC_EB_PROGIF		0x1b000000	/* ProgIF Async/Sync base address */
+
+
+/* Start/busy bit in flashcontrol */
+#define SFLASH_OPCODE		0x000000ff
+#define SFLASH_ACTION		0x00000700
+#define	SFLASH_CS_ACTIVE	0x00001000	/* Chip Select Active, rev >= 20 */
+#define SFLASH_START		0x80000000
+#define SFLASH_BUSY		SFLASH_START
+
+/* flashcontrol action codes */
+#define	SFLASH_ACT_OPONLY	0x0000		/* Issue opcode only */
+#define	SFLASH_ACT_OP1D		0x0100		/* opcode + 1 data byte */
+#define	SFLASH_ACT_OP3A		0x0200		/* opcode + 3 addr bytes */
+#define	SFLASH_ACT_OP3A1D	0x0300		/* opcode + 3 addr & 1 data bytes */
+#define	SFLASH_ACT_OP3A4D	0x0400		/* opcode + 3 addr & 4 data bytes */
+#define	SFLASH_ACT_OP3A4X4D	0x0500		/* opcode + 3 addr, 4 don't care & 4 data bytes */
+#define	SFLASH_ACT_OP3A1X4D	0x0700		/* opcode + 3 addr, 1 don't care & 4 data bytes */
+
+/* flashcontrol action+opcodes for ST flashes */
+#define SFLASH_ST_WREN		0x0006		/* Write Enable */
+#define SFLASH_ST_WRDIS		0x0004		/* Write Disable */
+#define SFLASH_ST_RDSR		0x0105		/* Read Status Register */
+#define SFLASH_ST_WRSR		0x0101		/* Write Status Register */
+#define SFLASH_ST_READ		0x0303		/* Read Data Bytes */
+#define SFLASH_ST_PP		0x0302		/* Page Program */
+#define SFLASH_ST_SE		0x02d8		/* Sector Erase */
+#define SFLASH_ST_BE		0x00c7		/* Bulk Erase */
+#define SFLASH_ST_DP		0x00b9		/* Deep Power-down */
+#define SFLASH_ST_RES		0x03ab		/* Read Electronic Signature */
+#define SFLASH_ST_CSA		0x1000		/* Keep chip select asserted */
+#define SFLASH_ST_SSE		0x0220		/* Sub-sector Erase */
+
+#define SFLASH_MXIC_RDID	0x0390		/* Read Manufacture ID */
+#define SFLASH_MXIC_MFID	0xc2		/* MXIC Manufacture ID */
+
+/* Status register bits for ST flashes */
+#define SFLASH_ST_WIP		0x01		/* Write In Progress */
+#define SFLASH_ST_WEL		0x02		/* Write Enable Latch */
+#define SFLASH_ST_BP_MASK	0x1c		/* Block Protect */
+#define SFLASH_ST_BP_SHIFT	2
+#define SFLASH_ST_SRWD		0x80		/* Status Register Write Disable */
+
+/* flashcontrol action+opcodes for Atmel flashes */
+#define SFLASH_AT_READ				0x07e8
+#define SFLASH_AT_PAGE_READ			0x07d2
+#define SFLASH_AT_BUF1_READ
+#define SFLASH_AT_BUF2_READ
+#define SFLASH_AT_STATUS			0x01d7
+#define SFLASH_AT_BUF1_WRITE			0x0384
+#define SFLASH_AT_BUF2_WRITE			0x0387
+#define SFLASH_AT_BUF1_ERASE_PROGRAM		0x0283
+#define SFLASH_AT_BUF2_ERASE_PROGRAM		0x0286
+#define SFLASH_AT_BUF1_PROGRAM			0x0288
+#define SFLASH_AT_BUF2_PROGRAM			0x0289
+#define SFLASH_AT_PAGE_ERASE			0x0281
+#define SFLASH_AT_BLOCK_ERASE			0x0250
+#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM	0x0382
+#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM	0x0385
+#define SFLASH_AT_BUF1_LOAD			0x0253
+#define SFLASH_AT_BUF2_LOAD			0x0255
+#define SFLASH_AT_BUF1_COMPARE			0x0260
+#define SFLASH_AT_BUF2_COMPARE			0x0261
+#define SFLASH_AT_BUF1_REPROGRAM		0x0258
+#define SFLASH_AT_BUF2_REPROGRAM		0x0259
+
+/* Status register bits for Atmel flashes */
+#define SFLASH_AT_READY				0x80
+#define SFLASH_AT_MISMATCH			0x40
+#define SFLASH_AT_ID_MASK			0x38
+#define SFLASH_AT_ID_SHIFT			3
+
+/* SPI register bits, corerev >= 37 */
+#define GSIO_START			0x80000000
+#define GSIO_BUSY			GSIO_START
+
+/*
+ * These are the UART port assignments, expressed as offsets from the base
+ * register.  These assignments should hold for any serial port based on
+ * a 8250, 16450, or 16550(A).
+ */
+
+#define UART_RX		0	/* In:  Receive buffer (DLAB=0) */
+#define UART_TX		0	/* Out: Transmit buffer (DLAB=0) */
+#define UART_DLL	0	/* Out: Divisor Latch Low (DLAB=1) */
+#define UART_IER	1	/* In/Out: Interrupt Enable Register (DLAB=0) */
+#define UART_DLM	1	/* Out: Divisor Latch High (DLAB=1) */
+#define UART_IIR	2	/* In: Interrupt Identity Register  */
+#define UART_FCR	2	/* Out: FIFO Control Register */
+#define UART_LCR	3	/* Out: Line Control Register */
+#define UART_MCR	4	/* Out: Modem Control Register */
+#define UART_LSR	5	/* In:  Line Status Register */
+#define UART_MSR	6	/* In:  Modem Status Register */
+#define UART_SCR	7	/* I/O: Scratch Register */
+#define UART_LCR_DLAB	0x80	/* Divisor latch access bit */
+#define UART_LCR_WLEN8	0x03	/* Word length: 8 bits */
+#define UART_MCR_OUT2	0x08	/* MCR GPIO out 2 */
+#define UART_MCR_LOOP	0x10	/* Enable loopback test mode */
+#define UART_LSR_RX_FIFO 	0x80	/* Receive FIFO error */
+#define UART_LSR_TDHR		0x40	/* Data-hold-register empty */
+#define UART_LSR_THRE		0x20	/* Transmit-hold-register empty */
+#define UART_LSR_BREAK		0x10	/* Break interrupt */
+#define UART_LSR_FRAMING	0x08	/* Framing error */
+#define UART_LSR_PARITY		0x04	/* Parity error */
+#define UART_LSR_OVERRUN	0x02	/* Overrun error */
+#define UART_LSR_RXRDY		0x01	/* Receiver ready */
+#define UART_FCR_FIFO_ENABLE 1	/* FIFO control register bit controlling FIFO enable/disable */
+
+/* Interrupt Identity Register (IIR) bits */
+#define UART_IIR_FIFO_MASK	0xc0	/* IIR FIFO disable/enabled mask */
+#define UART_IIR_INT_MASK	0xf	/* IIR interrupt ID source */
+#define UART_IIR_MDM_CHG	0x0	/* Modem status changed */
+#define UART_IIR_NOINT		0x1	/* No interrupt pending */
+#define UART_IIR_THRE		0x2	/* THR empty */
+#define UART_IIR_RCVD_DATA	0x4	/* Received data available */
+#define UART_IIR_RCVR_STATUS 	0x6	/* Receiver status */
+#define UART_IIR_CHAR_TIME 	0xc	/* Character time */
+
+/* Interrupt Enable Register (IER) bits */
+#define UART_IER_PTIME	128	/* Programmable THRE Interrupt Mode Enable */
+#define UART_IER_EDSSI	8	/* enable modem status interrupt */
+#define UART_IER_ELSI	4	/* enable receiver line status interrupt */
+#define UART_IER_ETBEI  2	/* enable transmitter holding register empty interrupt */
+#define UART_IER_ERBFI	1	/* enable data available interrupt */
+
+/* pmustatus */
+#define PST_EXTLPOAVAIL	0x0100
+#define PST_WDRESET	0x0080
+#define	PST_INTPEND	0x0040
+#define	PST_SBCLKST	0x0030
+#define	PST_SBCLKST_ILP	0x0010
+#define	PST_SBCLKST_ALP	0x0020
+#define	PST_SBCLKST_HT	0x0030
+#define	PST_ALPAVAIL	0x0008
+#define	PST_HTAVAIL	0x0004
+#define	PST_RESINIT	0x0003
+
+/* pmucapabilities */
+#define PCAP_REV_MASK	0x000000ff
+#define PCAP_RC_MASK	0x00001f00
+#define PCAP_RC_SHIFT	8
+#define PCAP_TC_MASK	0x0001e000
+#define PCAP_TC_SHIFT	13
+#define PCAP_PC_MASK	0x001e0000
+#define PCAP_PC_SHIFT	17
+#define PCAP_VC_MASK	0x01e00000
+#define PCAP_VC_SHIFT	21
+#define PCAP_CC_MASK	0x1e000000
+#define PCAP_CC_SHIFT	25
+#define PCAP5_PC_MASK	0x003e0000	/* PMU corerev >= 5 */
+#define PCAP5_PC_SHIFT	17
+#define PCAP5_VC_MASK	0x07c00000
+#define PCAP5_VC_SHIFT	22
+#define PCAP5_CC_MASK	0xf8000000
+#define PCAP5_CC_SHIFT	27
+
+/* PMU Resource Request Timer registers */
+/* This is based on PmuRev0 */
+#define	PRRT_TIME_MASK	0x03ff
+#define	PRRT_INTEN	0x0400
+#define	PRRT_REQ_ACTIVE	0x0800
+#define	PRRT_ALP_REQ	0x1000
+#define	PRRT_HT_REQ	0x2000
+#define PRRT_HQ_REQ 0x4000
+
+/* PMU resource bit position */
+#define PMURES_BIT(bit)	(1 << (bit))
+
+/* PMU resource number limit */
+#define PMURES_MAX_RESNUM	30
+
+/* PMU chip control0 register */
+#define	PMU_CHIPCTL0		0
+#define PMU43143_CC0_SDIO_DRSTR_OVR	(1 << 31) /* sdio drive strength override enable */
+
+/* clock req types */
+#define PMU_CC1_CLKREQ_TYPE_SHIFT	19
+#define PMU_CC1_CLKREQ_TYPE_MASK	(1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
+
+#define CLKREQ_TYPE_CONFIG_OPENDRAIN		0
+#define CLKREQ_TYPE_CONFIG_PUSHPULL		1
+
+/* PMU chip control1 register */
+#define	PMU_CHIPCTL1			1
+#define	PMU_CC1_RXC_DLL_BYPASS		0x00010000
+#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN	0x00000010
+
+#define PMU_CC1_IF_TYPE_MASK   		0x00000030
+#define PMU_CC1_IF_TYPE_RMII    	0x00000000
+#define PMU_CC1_IF_TYPE_MII     	0x00000010
+#define PMU_CC1_IF_TYPE_RGMII   	0x00000020
+
+#define PMU_CC1_SW_TYPE_MASK    	0x000000c0
+#define PMU_CC1_SW_TYPE_EPHY    	0x00000000
+#define PMU_CC1_SW_TYPE_EPHYMII 	0x00000040
+#define PMU_CC1_SW_TYPE_EPHYRMII	0x00000080
+#define PMU_CC1_SW_TYPE_RGMII   	0x000000c0
+
+/* PMU chip control2 register */
+#define	PMU_CHIPCTL2		2
+#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON   	(1 << 18)
+#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON   	(1 << 19)
+#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON   	(1 << 20)
+#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON   	(1 << 21)
+
+/* PMU chip control3 register */
+#define	PMU_CHIPCTL3		3
+
+/* PMU chip control6 register */
+#define	PMU_CHIPCTL6		6
+#define PMU_CC6_ENABLE_CLKREQ_WAKEUP   		(1 << 4)
+#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP   	(1 << 6)
+
+#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT  19
+#define PMU_CC3_ENABLE_RF_SHIFT           22
+#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT   23
+
+/* PMU chip control5 register */
+#define PMU_CHIPCTL5                    5
+
+/* PMU chip control6 register */
+#define PMU_CHIPCTL6                    6
+#define PMU_CC6_ENABLE_CLKREQ_WAKEUP    (1 << 4)
+#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP   (1 << 6)
+
+/* PMU corerev and chip specific PLL controls.
+ * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
+ * to differentiate different PLLs controlled by the same PMU rev.
+ */
+/* pllcontrol registers */
+/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */
+#define	PMU0_PLL0_PLLCTL0		0
+#define	PMU0_PLL0_PC0_PDIV_MASK		1
+#define	PMU0_PLL0_PC0_PDIV_FREQ		25000
+#define PMU0_PLL0_PC0_DIV_ARM_MASK	0x00000038
+#define PMU0_PLL0_PC0_DIV_ARM_SHIFT	3
+#define PMU0_PLL0_PC0_DIV_ARM_BASE	8
+
+/* PC0_DIV_ARM for PLLOUT_ARM */
+#define PMU0_PLL0_PC0_DIV_ARM_110MHZ	0
+#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ	1
+#define PMU0_PLL0_PC0_DIV_ARM_88MHZ	2
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ	3 /* Default */
+#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ	4
+#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ	5
+#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ	6
+#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ	7
+
+/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */
+#define	PMU0_PLL0_PLLCTL1		1
+#define	PMU0_PLL0_PC1_WILD_INT_MASK	0xf0000000
+#define	PMU0_PLL0_PC1_WILD_INT_SHIFT	28
+#define	PMU0_PLL0_PC1_WILD_FRAC_MASK	0x0fffff00
+#define	PMU0_PLL0_PC1_WILD_FRAC_SHIFT	8
+#define	PMU0_PLL0_PC1_STOP_MOD		0x00000040
+
+/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */
+#define	PMU0_PLL0_PLLCTL2		2
+#define	PMU0_PLL0_PC2_WILD_INT_MASK	0xf
+#define	PMU0_PLL0_PC2_WILD_INT_SHIFT	4
+
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU1_PLL0_PLLCTL0		0
+#define PMU1_PLL0_PC0_P1DIV_MASK	0x00f00000
+#define PMU1_PLL0_PC0_P1DIV_SHIFT	20
+#define PMU1_PLL0_PC0_P2DIV_MASK	0x0f000000
+#define PMU1_PLL0_PC0_P2DIV_SHIFT	24
+
+/* m<x>div */
+#define PMU1_PLL0_PLLCTL1		1
+#define PMU1_PLL0_PC1_M1DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC1_M1DIV_SHIFT	0
+#define PMU1_PLL0_PC1_M2DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC1_M2DIV_SHIFT	8
+#define PMU1_PLL0_PC1_M3DIV_MASK	0x00ff0000
+#define PMU1_PLL0_PC1_M3DIV_SHIFT	16
+#define PMU1_PLL0_PC1_M4DIV_MASK	0xff000000
+#define PMU1_PLL0_PC1_M4DIV_SHIFT	24
+#define PMU1_PLL0_PC1_M4DIV_BY_9	9
+#define PMU1_PLL0_PC1_M4DIV_BY_18	0x12
+#define PMU1_PLL0_PC1_M4DIV_BY_36	0x24
+
+#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
+#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL  (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU1_PLL0_PLLCTL2		2
+#define PMU1_PLL0_PC2_M5DIV_MASK	0x000000ff
+#define PMU1_PLL0_PC2_M5DIV_SHIFT	0
+#define PMU1_PLL0_PC2_M5DIV_BY_12	0xc
+#define PMU1_PLL0_PC2_M5DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M5DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_M6DIV_MASK	0x0000ff00
+#define PMU1_PLL0_PC2_M6DIV_SHIFT	8
+#define PMU1_PLL0_PC2_M6DIV_BY_18	0x12
+#define PMU1_PLL0_PC2_M6DIV_BY_36	0x24
+#define PMU1_PLL0_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT	17
+#define PMU1_PLL0_PC2_NDIV_MODE_MASH	1
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB	2	/* recommended for 4319 */
+#define PMU1_PLL0_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU1_PLL0_PC2_NDIV_INT_SHIFT	20
+
+/* ndiv_frac */
+#define PMU1_PLL0_PLLCTL3		3
+#define PMU1_PLL0_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT	0
+
+/* pll_ctrl */
+#define PMU1_PLL0_PLLCTL4		4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU1_PLL0_PLLCTL5		5
+#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
+#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
+
+#define PMU1_PLL0_PLLCTL6		6
+#define PMU1_PLL0_PLLCTL7		7
+
+/* PMU rev 2 control words */
+#define PMU2_PHY_PLL_PLLCTL		4
+#define PMU2_SI_PLL_PLLCTL		10
+
+/* PMU rev 2 */
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU2_PLL_PLLCTL0		0
+#define PMU2_PLL_PC0_P1DIV_MASK 	0x00f00000
+#define PMU2_PLL_PC0_P1DIV_SHIFT	20
+#define PMU2_PLL_PC0_P2DIV_MASK 	0x0f000000
+#define PMU2_PLL_PC0_P2DIV_SHIFT	24
+
+/* m<x>div */
+#define PMU2_PLL_PLLCTL1		1
+#define PMU2_PLL_PC1_M1DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC1_M1DIV_SHIFT	0
+#define PMU2_PLL_PC1_M2DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC1_M2DIV_SHIFT	8
+#define PMU2_PLL_PC1_M3DIV_MASK 	0x00ff0000
+#define PMU2_PLL_PC1_M3DIV_SHIFT	16
+#define PMU2_PLL_PC1_M4DIV_MASK 	0xff000000
+#define PMU2_PLL_PC1_M4DIV_SHIFT	24
+
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
+#define PMU2_PLL_PLLCTL2		2
+#define PMU2_PLL_PC2_M5DIV_MASK 	0x000000ff
+#define PMU2_PLL_PC2_M5DIV_SHIFT	0
+#define PMU2_PLL_PC2_M6DIV_MASK 	0x0000ff00
+#define PMU2_PLL_PC2_M6DIV_SHIFT	8
+#define PMU2_PLL_PC2_NDIV_MODE_MASK	0x000e0000
+#define PMU2_PLL_PC2_NDIV_MODE_SHIFT	17
+#define PMU2_PLL_PC2_NDIV_INT_MASK	0x1ff00000
+#define PMU2_PLL_PC2_NDIV_INT_SHIFT	20
+
+/* ndiv_frac */
+#define PMU2_PLL_PLLCTL3		3
+#define PMU2_PLL_PC3_NDIV_FRAC_MASK	0x00ffffff
+#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT	0
+
+/* pll_ctrl */
+#define PMU2_PLL_PLLCTL4		4
+
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
+#define PMU2_PLL_PLLCTL5		5
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK	0x00000f00
+#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT	8
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK	0x0000f000
+#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT	12
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK	0x000f0000
+#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT	16
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK	0x00f00000
+#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT	20
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK	0x0f000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT	24
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK	0xf0000000
+#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT	28
+
+/* PMU rev 5 (& 6) */
+#define	PMU5_PLL_P1P2_OFF		0
+#define	PMU5_PLL_P1_MASK		0x0f000000
+#define	PMU5_PLL_P1_SHIFT		24
+#define	PMU5_PLL_P2_MASK		0x00f00000
+#define	PMU5_PLL_P2_SHIFT		20
+#define	PMU5_PLL_M14_OFF		1
+#define	PMU5_PLL_MDIV_MASK		0x000000ff
+#define	PMU5_PLL_MDIV_WIDTH		8
+#define	PMU5_PLL_NM5_OFF		2
+#define	PMU5_PLL_NDIV_MASK		0xfff00000
+#define	PMU5_PLL_NDIV_SHIFT		20
+#define	PMU5_PLL_NDIV_MODE_MASK		0x000e0000
+#define	PMU5_PLL_NDIV_MODE_SHIFT	17
+#define	PMU5_PLL_FMAB_OFF		3
+#define	PMU5_PLL_MRAT_MASK		0xf0000000
+#define	PMU5_PLL_MRAT_SHIFT		28
+#define	PMU5_PLL_ABRAT_MASK		0x08000000
+#define	PMU5_PLL_ABRAT_SHIFT		27
+#define	PMU5_PLL_FDIV_MASK		0x07ffffff
+#define	PMU5_PLL_PLLCTL_OFF		4
+#define	PMU5_PLL_PCHI_OFF		5
+#define	PMU5_PLL_PCHI_MASK		0x0000003f
+
+/* pmu XtalFreqRatio */
+#define	PMU_XTALFREQ_REG_ILPCTR_MASK	0x00001FFF
+#define	PMU_XTALFREQ_REG_MEASURE_MASK	0x80000000
+#define	PMU_XTALFREQ_REG_MEASURE_SHIFT	31
+
+/* Divider allocation in 4716/47162/5356/5357 */
+#define	PMU5_MAINPLL_CPU		1
+#define	PMU5_MAINPLL_MEM		2
+#define	PMU5_MAINPLL_SI			3
+
+/* 4706 PMU */
+#define PMU4706_MAINPLL_PLL0	0
+#define PMU6_4706_PROCPLL_OFF	4	/* The CPU PLL */
+#define PMU6_4706_PROC_P2DIV_MASK		0x000f0000
+#define PMU6_4706_PROC_P2DIV_SHIFT	16
+#define PMU6_4706_PROC_P1DIV_MASK		0x0000f000
+#define PMU6_4706_PROC_P1DIV_SHIFT	12
+#define PMU6_4706_PROC_NDIV_INT_MASK	0x00000ff8
+#define PMU6_4706_PROC_NDIV_INT_SHIFT	3
+#define PMU6_4706_PROC_NDIV_MODE_MASK		0x00000007
+#define PMU6_4706_PROC_NDIV_MODE_SHIFT	0
+
+#define PMU7_PLL_PLLCTL7                7
+#define PMU7_PLL_CTL7_M4DIV_MASK	0xff000000
+#define PMU7_PLL_CTL7_M4DIV_SHIFT 	24
+#define PMU7_PLL_CTL7_M4DIV_BY_6	6
+#define PMU7_PLL_CTL7_M4DIV_BY_12	0xc
+#define PMU7_PLL_CTL7_M4DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL8                8
+#define PMU7_PLL_CTL8_M5DIV_MASK	0x000000ff
+#define PMU7_PLL_CTL8_M5DIV_SHIFT	0
+#define PMU7_PLL_CTL8_M5DIV_BY_8	8
+#define PMU7_PLL_CTL8_M5DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M5DIV_BY_24	0x18
+#define PMU7_PLL_CTL8_M6DIV_MASK	0x0000ff00
+#define PMU7_PLL_CTL8_M6DIV_SHIFT	8
+#define PMU7_PLL_CTL8_M6DIV_BY_12	0xc
+#define PMU7_PLL_CTL8_M6DIV_BY_24	0x18
+#define PMU7_PLL_PLLCTL11		11
+#define PMU7_PLL_PLLCTL11_MASK		0xffffff00
+#define PMU7_PLL_PLLCTL11_VAL		0x22222200
+
+/* PMU rev 15 */
+#define PMU15_PLL_PLLCTL0		0
+#define PMU15_PLL_PC0_CLKSEL_MASK	0x00000003
+#define PMU15_PLL_PC0_CLKSEL_SHIFT	0
+#define PMU15_PLL_PC0_FREQTGT_MASK	0x003FFFFC
+#define PMU15_PLL_PC0_FREQTGT_SHIFT	2
+#define PMU15_PLL_PC0_PRESCALE_MASK	0x00C00000
+#define PMU15_PLL_PC0_PRESCALE_SHIFT	22
+#define PMU15_PLL_PC0_KPCTRL_MASK	0x07000000
+#define PMU15_PLL_PC0_KPCTRL_SHIFT	24
+#define PMU15_PLL_PC0_FCNTCTRL_MASK	0x38000000
+#define PMU15_PLL_PC0_FCNTCTRL_SHIFT	27
+#define PMU15_PLL_PC0_FDCMODE_MASK	0x40000000
+#define PMU15_PLL_PC0_FDCMODE_SHIFT	30
+#define PMU15_PLL_PC0_CTRLBIAS_MASK	0x80000000
+#define PMU15_PLL_PC0_CTRLBIAS_SHIFT	31
+
+#define PMU15_PLL_PLLCTL1			1
+#define PMU15_PLL_PC1_BIAS_CTLM_MASK		0x00000060
+#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT		5
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK	0x00000040
+#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT	6
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK		0x0001FF80
+#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT	7
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK	0x03FE0000
+#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT	17
+#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK		0x0C000000
+#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT	26
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK	0x10000000
+#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT	28
+#define PMU15_PLL_PC1_OPENLP_EN_MASK		0x40000000
+#define PMU15_PLL_PC1_OPENLP_EN_SHIFT		30
+
+#define PMU15_PLL_PLLCTL2			2
+#define PMU15_PLL_PC2_CTEN_MASK			0x00000001
+#define PMU15_PLL_PC2_CTEN_SHIFT		0
+
+#define PMU15_PLL_PLLCTL3			3
+#define PMU15_PLL_PC3_DITHER_EN_MASK		0x00000001
+#define PMU15_PLL_PC3_DITHER_EN_SHIFT		0
+#define PMU15_PLL_PC3_DCOCTLSP_MASK		0xFE000000
+#define PMU15_PLL_PC3_DCOCTLSP_SHIFT		25
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK	0x01
+#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT	0
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK	0x02
+#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT	1
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK	0x04
+#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT	2
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK	0x18
+#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT	3
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK	0x60
+#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT	5
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1	0
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2	1
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3	2
+#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5	3
+
+#define PMU15_PLL_PLLCTL4			4
+#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK		0x00000007
+#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT		0
+#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK		0x00000038
+#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT		3
+#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK		0x000001C0
+#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT		6
+#define PMU15_PLL_PC4_DBGMODE_MASK		0x00000E00
+#define PMU15_PLL_PC4_DBGMODE_SHIFT		9
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK	0x00001000
+#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT	12
+#define PMU15_PLL_PC4_FLL480_CTLSP_MASK		0x000FE000
+#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT	13
+#define PMU15_PLL_PC4_DINPOL_MASK		0x00100000
+#define PMU15_PLL_PC4_DINPOL_SHIFT		20
+#define PMU15_PLL_PC4_CLKOUT_PD_MASK		0x00200000
+#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT		21
+#define PMU15_PLL_PC4_CLKDIV2_PD_MASK		0x00400000
+#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT		22
+#define PMU15_PLL_PC4_CLKDIV4_PD_MASK		0x00800000
+#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT		23
+#define PMU15_PLL_PC4_CLKDIV8_PD_MASK		0x01000000
+#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT		24
+#define PMU15_PLL_PC4_CLKDIV16_PD_MASK		0x02000000
+#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT		25
+#define PMU15_PLL_PC4_TEST_EN_MASK		0x04000000
+#define PMU15_PLL_PC4_TEST_EN_SHIFT		26
+
+#define PMU15_PLL_PLLCTL5			5
+#define PMU15_PLL_PC5_FREQTGT_MASK		0x000FFFFF
+#define PMU15_PLL_PC5_FREQTGT_SHIFT		0
+#define PMU15_PLL_PC5_DCOCTLSP_MASK		0x07F00000
+#define PMU15_PLL_PC5_DCOCTLSP_SHIFT		20
+#define PMU15_PLL_PC5_PRESCALE_MASK		0x18000000
+#define PMU15_PLL_PC5_PRESCALE_SHIFT		27
+
+#define PMU15_PLL_PLLCTL6		6
+#define PMU15_PLL_PC6_FREQTGT_MASK	0x000FFFFF
+#define PMU15_PLL_PC6_FREQTGT_SHIFT	0
+#define PMU15_PLL_PC6_DCOCTLSP_MASK	0x07F00000
+#define PMU15_PLL_PC6_DCOCTLSP_SHIFT	20
+#define PMU15_PLL_PC6_PRESCALE_MASK	0x18000000
+#define PMU15_PLL_PC6_PRESCALE_SHIFT	27
+
+#define PMU15_FREQTGT_480_DEFAULT	0x19AB1
+#define PMU15_FREQTGT_492_DEFAULT	0x1A4F5
+#define PMU15_ARM_96MHZ			96000000	/* 96 Mhz */
+#define PMU15_ARM_98MHZ			98400000	/* 98.4 Mhz */
+#define PMU15_ARM_97MHZ			97000000	/* 97 Mhz */
+
+
+#define PMU17_PLLCTL2_NDIVTYPE_MASK		0x00000070
+#define PMU17_PLLCTL2_NDIVTYPE_SHIFT		4
+
+#define PMU17_PLLCTL2_NDIV_MODE_INT		0
+#define PMU17_PLLCTL2_NDIV_MODE_INT1B8		1
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111		2
+#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8	3
+
+#define PMU17_PLLCTL0_BBPLL_PWRDWN		0
+#define PMU17_PLLCTL0_BBPLL_DRST		3
+#define PMU17_PLLCTL0_BBPLL_DISBL_CLK		8
+
+/* PLL usage in 4716/47162 */
+#define	PMU4716_MAINPLL_PLL0		12
+
+/* PLL usage in 4335 */
+#define PMU4335_PLL0_PC2_P1DIV_MASK			0x000f0000
+#define PMU4335_PLL0_PC2_P1DIV_SHIFT		16
+#define PMU4335_PLL0_PC2_NDIV_INT_MASK		0xff800000
+#define PMU4335_PLL0_PC2_NDIV_INT_SHIFT		23
+#define PMU4335_PLL0_PC1_MDIV2_MASK			0x0000ff00
+#define PMU4335_PLL0_PC1_MDIV2_SHIFT		8
+
+
+/* PLL usage in 5356/5357 */
+#define	PMU5356_MAINPLL_PLL0		0
+#define	PMU5357_MAINPLL_PLL0		0
+
+/* 4716/47162 resources */
+#define RES4716_PROC_PLL_ON		0x00000040
+#define RES4716_PROC_HT_AVAIL		0x00000080
+
+/* 4716/4717/4718 Chip specific ChipControl register bits */
+#define CCTRL_471X_I2S_PINS_ENABLE	0x0080 /* I2S pins off by default, shared w/ pflash */
+
+/* 5357 Chip specific ChipControl register bits */
+/* 2nd - 32-bit reg */
+#define CCTRL_5357_I2S_PINS_ENABLE	0x00040000 /* I2S pins enable */
+#define CCTRL_5357_I2CSPI_PINS_ENABLE	0x00080000 /* I2C/SPI pins enable */
+
+/* 5354 resources */
+#define RES5354_EXT_SWITCHER_PWM	0	/* 0x00001 */
+#define RES5354_BB_SWITCHER_PWM		1	/* 0x00002 */
+#define RES5354_BB_SWITCHER_BURST	2	/* 0x00004 */
+#define RES5354_BB_EXT_SWITCHER_BURST	3	/* 0x00008 */
+#define RES5354_ILP_REQUEST		4	/* 0x00010 */
+#define RES5354_RADIO_SWITCHER_PWM	5	/* 0x00020 */
+#define RES5354_RADIO_SWITCHER_BURST	6	/* 0x00040 */
+#define RES5354_ROM_SWITCH		7	/* 0x00080 */
+#define RES5354_PA_REF_LDO		8	/* 0x00100 */
+#define RES5354_RADIO_LDO		9	/* 0x00200 */
+#define RES5354_AFE_LDO			10	/* 0x00400 */
+#define RES5354_PLL_LDO			11	/* 0x00800 */
+#define RES5354_BG_FILTBYP		12	/* 0x01000 */
+#define RES5354_TX_FILTBYP		13	/* 0x02000 */
+#define RES5354_RX_FILTBYP		14	/* 0x04000 */
+#define RES5354_XTAL_PU			15	/* 0x08000 */
+#define RES5354_XTAL_EN			16	/* 0x10000 */
+#define RES5354_BB_PLL_FILTBYP		17	/* 0x20000 */
+#define RES5354_RF_PLL_FILTBYP		18	/* 0x40000 */
+#define RES5354_BB_PLL_PU		19	/* 0x80000 */
+
+/* 5357 Chip specific ChipControl register bits */
+#define CCTRL5357_EXTPA                 (1<<14) /* extPA in ChipControl 1, bit 14 */
+#define CCTRL5357_ANT_MUX_2o3		(1<<15) /* 2o3 in ChipControl 1, bit 15 */
+#define CCTRL5357_NFLASH		(1<<16) /* Nandflash in ChipControl 1, bit 16 */
+
+/* 43217 Chip specific ChipControl register bits */
+#define CCTRL43217_EXTPA_C0             (1<<13) /* core0 extPA in ChipControl 1, bit 13 */
+#define CCTRL43217_EXTPA_C1             (1<<8)  /* core1 extPA in ChipControl 1, bit 8 */
+
+/* 43228 Chip specific ChipControl register bits */
+#define CCTRL43228_EXTPA_C0             (1<<14) /* core1 extPA in ChipControl 1, bit 14 */
+#define CCTRL43228_EXTPA_C1             (1<<9)  /* core0 extPA in ChipControl 1, bit 1 */
+
+/* 4328 resources */
+#define RES4328_EXT_SWITCHER_PWM	0	/* 0x00001 */
+#define RES4328_BB_SWITCHER_PWM		1	/* 0x00002 */
+#define RES4328_BB_SWITCHER_BURST	2	/* 0x00004 */
+#define RES4328_BB_EXT_SWITCHER_BURST	3	/* 0x00008 */
+#define RES4328_ILP_REQUEST		4	/* 0x00010 */
+#define RES4328_RADIO_SWITCHER_PWM	5	/* 0x00020 */
+#define RES4328_RADIO_SWITCHER_BURST	6	/* 0x00040 */
+#define RES4328_ROM_SWITCH		7	/* 0x00080 */
+#define RES4328_PA_REF_LDO		8	/* 0x00100 */
+#define RES4328_RADIO_LDO		9	/* 0x00200 */
+#define RES4328_AFE_LDO			10	/* 0x00400 */
+#define RES4328_PLL_LDO			11	/* 0x00800 */
+#define RES4328_BG_FILTBYP		12	/* 0x01000 */
+#define RES4328_TX_FILTBYP		13	/* 0x02000 */
+#define RES4328_RX_FILTBYP		14	/* 0x04000 */
+#define RES4328_XTAL_PU			15	/* 0x08000 */
+#define RES4328_XTAL_EN			16	/* 0x10000 */
+#define RES4328_BB_PLL_FILTBYP		17	/* 0x20000 */
+#define RES4328_RF_PLL_FILTBYP		18	/* 0x40000 */
+#define RES4328_BB_PLL_PU		19	/* 0x80000 */
+
+/* 4325 A0/A1 resources */
+#define RES4325_BUCK_BOOST_BURST	0	/* 0x00000001 */
+#define RES4325_CBUCK_BURST		1	/* 0x00000002 */
+#define RES4325_CBUCK_PWM		2	/* 0x00000004 */
+#define RES4325_CLDO_CBUCK_BURST	3	/* 0x00000008 */
+#define RES4325_CLDO_CBUCK_PWM		4	/* 0x00000010 */
+#define RES4325_BUCK_BOOST_PWM		5	/* 0x00000020 */
+#define RES4325_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4325_ABUCK_BURST		7	/* 0x00000080 */
+#define RES4325_ABUCK_PWM		8	/* 0x00000100 */
+#define RES4325_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4325_OTP_PU			10	/* 0x00000400 */
+#define RES4325_LNLDO3_PU		11	/* 0x00000800 */
+#define RES4325_LNLDO4_PU		12	/* 0x00001000 */
+#define RES4325_XTAL_PU			13	/* 0x00002000 */
+#define RES4325_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4325_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4325_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4325_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4325_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4325_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4325_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4325_HT_AVAIL		21	/* 0x00200000 */
+
+/* 4325 B0/C0 resources */
+#define RES4325B0_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4325B0_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4325B0_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4325B0_CLDO_PU		4	/* 0x00000010 */
+
+/* 4325 C1 resources */
+#define RES4325C1_LNLDO2_PU		12	/* 0x00001000 */
+
+/* 4325 chip-specific ChipStatus register bits */
+#define CST4325_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4325_DEFCIS_SEL		0	/* OTP is powered up, use def. CIS, no SPROM */
+#define CST4325_SPROM_SEL		1	/* OTP is powered up, SPROM is present */
+#define CST4325_OTP_SEL			2	/* OTP is powered up, no SPROM */
+#define CST4325_OTP_PWRDN		3	/* OTP is powered down, SPROM is present */
+#define CST4325_SDIO_USB_MODE_MASK	0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT	2
+#define CST4325_RCAL_VALID_MASK		0x00000008
+#define CST4325_RCAL_VALID_SHIFT	3
+#define CST4325_RCAL_VALUE_MASK		0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT	4
+#define CST4325_PMUTOP_2B_MASK 		0x00000200	/* 1 for 2b, 0 for to 2a */
+#define CST4325_PMUTOP_2B_SHIFT   	9
+
+#define RES4329_RESERVED0		0	/* 0x00000001 */
+#define RES4329_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4329_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4329_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4329_CLDO_PU			4	/* 0x00000010 */
+#define RES4329_PALDO_PU		5	/* 0x00000020 */
+#define RES4329_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4329_RESERVED7		7	/* 0x00000080 */
+#define RES4329_RESERVED8		8	/* 0x00000100 */
+#define RES4329_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4329_OTP_PU			10	/* 0x00000400 */
+#define RES4329_RESERVED11		11	/* 0x00000800 */
+#define RES4329_LNLDO2_PU		12	/* 0x00001000 */
+#define RES4329_XTAL_PU			13	/* 0x00002000 */
+#define RES4329_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4329_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4329_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4329_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4329_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4329_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4329_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4329_HT_AVAIL		21	/* 0x00200000 */
+
+#define CST4329_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4329_DEFCIS_SEL		0	/* OTP is powered up, use def. CIS, no SPROM */
+#define CST4329_SPROM_SEL		1	/* OTP is powered up, SPROM is present */
+#define CST4329_OTP_SEL			2	/* OTP is powered up, no SPROM */
+#define CST4329_OTP_PWRDN		3	/* OTP is powered down, SPROM is present */
+#define CST4329_SPI_SDIO_MODE_MASK	0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT	2
+
+/* 4312 chip-specific ChipStatus register bits */
+#define CST4312_SPROM_OTP_SEL_MASK	0x00000003
+#define CST4312_DEFCIS_SEL		0	/* OTP is powered up, use def. CIS, no SPROM */
+#define CST4312_SPROM_SEL		1	/* OTP is powered up, SPROM is present */
+#define CST4312_OTP_SEL			2	/* OTP is powered up, no SPROM */
+#define CST4312_OTP_BAD			3	/* OTP is broken, SPROM is present */
+
+/* 4312 resources (all PMU chips with little memory constraint) */
+#define RES4312_SWITCHER_BURST		0	/* 0x00000001 */
+#define RES4312_SWITCHER_PWM    	1	/* 0x00000002 */
+#define RES4312_PA_REF_LDO		2	/* 0x00000004 */
+#define RES4312_CORE_LDO_BURST		3	/* 0x00000008 */
+#define RES4312_CORE_LDO_PWM		4	/* 0x00000010 */
+#define RES4312_RADIO_LDO		5	/* 0x00000020 */
+#define RES4312_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4312_BG_FILTBYP		7	/* 0x00000080 */
+#define RES4312_TX_FILTBYP		8	/* 0x00000100 */
+#define RES4312_RX_FILTBYP		9	/* 0x00000200 */
+#define RES4312_XTAL_PU			10	/* 0x00000400 */
+#define RES4312_ALP_AVAIL		11	/* 0x00000800 */
+#define RES4312_BB_PLL_FILTBYP		12	/* 0x00001000 */
+#define RES4312_RF_PLL_FILTBYP		13	/* 0x00002000 */
+#define RES4312_HT_AVAIL		14	/* 0x00004000 */
+
+/* 4322 resources */
+#define RES4322_RF_LDO			0
+#define RES4322_ILP_REQUEST		1
+#define RES4322_XTAL_PU			2
+#define RES4322_ALP_AVAIL		3
+#define RES4322_SI_PLL_ON		4
+#define RES4322_HT_SI_AVAIL		5
+#define RES4322_PHY_PLL_ON		6
+#define RES4322_HT_PHY_AVAIL		7
+#define RES4322_OTP_PU			8
+
+/* 4322 chip-specific ChipStatus register bits */
+#define CST4322_XTAL_FREQ_20_40MHZ	0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK	0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT	6
+#define CST4322_NO_SPROM_OTP		0	/* no OTP, no SPROM */
+#define CST4322_SPROM_PRESENT		1	/* SPROM is present */
+#define CST4322_OTP_PRESENT		2	/* OTP is present */
+#define CST4322_PCI_OR_USB		0x00000100
+#define CST4322_BOOT_MASK		0x00000600
+#define CST4322_BOOT_SHIFT		9
+#define CST4322_BOOT_FROM_SRAM		0	/* boot from SRAM, ARM in reset */
+#define CST4322_BOOT_FROM_ROM		1	/* boot from ROM */
+#define CST4322_BOOT_FROM_FLASH		2	/* boot from FLASH */
+#define CST4322_BOOT_FROM_INVALID	3
+#define CST4322_ILP_DIV_EN		0x00000800
+#define CST4322_FLASH_TYPE_MASK		0x00001000
+#define CST4322_FLASH_TYPE_SHIFT	12
+#define CST4322_FLASH_TYPE_SHIFT_ST	0	/* ST serial FLASH */
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL	1	/* ATMEL flash */
+#define CST4322_ARM_TAP_SEL		0x00002000
+#define CST4322_RES_INIT_MODE_MASK	0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT	14
+#define CST4322_RES_INIT_MODE_ILPAVAIL	0	/* resinitmode: ILP available */
+#define CST4322_RES_INIT_MODE_ILPREQ	1	/* resinitmode: ILP request */
+#define CST4322_RES_INIT_MODE_ALPAVAIL	2	/* resinitmode: ALP available */
+#define CST4322_RES_INIT_MODE_HTAVAIL	3	/* resinitmode: HT available */
+#define CST4322_PCIPLLCLK_GATING	0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP	0x00020000
+#define CST4322_PCI_CARDBUS_MODE	0x00040000
+
+/* 43224 chip-specific ChipControl register bits */
+#define CCTRL43224_GPIO_TOGGLE          0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */
+#define CCTRL_43224A0_12MA_LED_DRIVE    0x00F000F0 /* 12 mA drive strength */
+#define CCTRL_43224B0_12MA_LED_DRIVE    0xF0    /* 12 mA drive strength for later 43224s */
+
+/* 43236 resources */
+#define RES43236_REGULATOR		0
+#define RES43236_ILP_REQUEST		1
+#define RES43236_XTAL_PU		2
+#define RES43236_ALP_AVAIL		3
+#define RES43236_SI_PLL_ON		4
+#define RES43236_HT_SI_AVAIL		5
+
+/* 43236 chip-specific ChipControl register bits */
+#define CCTRL43236_BT_COEXIST		(1<<0)	/* 0 disable */
+#define CCTRL43236_SECI			(1<<1)	/* 0 SECI is disabled (JATG functional) */
+#define CCTRL43236_EXT_LNA		(1<<2)	/* 0 disable */
+#define CCTRL43236_ANT_MUX_2o3          (1<<3)	/* 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43236_GSIO			(1<<4)	/* 0 disable */
+
+/* 43236 Chip specific ChipStatus register bits */
+#define CST43236_SFLASH_MASK		0x00000040
+#define CST43236_OTP_SEL_MASK		0x00000080
+#define CST43236_OTP_SEL_SHIFT		7
+#define CST43236_HSIC_MASK		0x00000100	/* USB/HSIC */
+#define CST43236_BP_CLK			0x00000200	/* 120/96Mbps */
+#define CST43236_BOOT_MASK		0x00001800
+#define CST43236_BOOT_SHIFT		11
+#define CST43236_BOOT_FROM_SRAM		0	/* boot from SRAM, ARM in reset */
+#define CST43236_BOOT_FROM_ROM		1	/* boot from ROM */
+#define CST43236_BOOT_FROM_FLASH	2	/* boot from FLASH */
+#define CST43236_BOOT_FROM_INVALID	3
+
+/* 43237 resources */
+#define RES43237_REGULATOR		0
+#define RES43237_ILP_REQUEST		1
+#define RES43237_XTAL_PU		2
+#define RES43237_ALP_AVAIL		3
+#define RES43237_SI_PLL_ON		4
+#define RES43237_HT_SI_AVAIL		5
+
+/* 43237 chip-specific ChipControl register bits */
+#define CCTRL43237_BT_COEXIST		(1<<0)	/* 0 disable */
+#define CCTRL43237_SECI			(1<<1)	/* 0 SECI is disabled (JATG functional) */
+#define CCTRL43237_EXT_LNA		(1<<2)	/* 0 disable */
+#define CCTRL43237_ANT_MUX_2o3          (1<<3)	/* 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43237_GSIO			(1<<4)	/* 0 disable */
+
+/* 43237 Chip specific ChipStatus register bits */
+#define CST43237_SFLASH_MASK		0x00000040
+#define CST43237_OTP_SEL_MASK		0x00000080
+#define CST43237_OTP_SEL_SHIFT		7
+#define CST43237_HSIC_MASK		0x00000100	/* USB/HSIC */
+#define CST43237_BP_CLK			0x00000200	/* 120/96Mbps */
+#define CST43237_BOOT_MASK		0x00001800
+#define CST43237_BOOT_SHIFT		11
+#define CST43237_BOOT_FROM_SRAM		0	/* boot from SRAM, ARM in reset */
+#define CST43237_BOOT_FROM_ROM		1	/* boot from ROM */
+#define CST43237_BOOT_FROM_FLASH	2	/* boot from FLASH */
+#define CST43237_BOOT_FROM_INVALID	3
+
+/* 43239 resources */
+#define RES43239_OTP_PU			9
+#define RES43239_MACPHY_CLKAVAIL	23
+#define RES43239_HT_AVAIL		24
+
+/* 43239 Chip specific ChipStatus register bits */
+#define CST43239_SPROM_MASK			0x00000002
+#define CST43239_SFLASH_MASK		0x00000004
+#define	CST43239_RES_INIT_MODE_SHIFT	7
+#define	CST43239_RES_INIT_MODE_MASK		0x000001f0
+#define CST43239_CHIPMODE_SDIOD(cs)	((cs) & (1 << 15))	/* SDIO || gSPI */
+#define CST43239_CHIPMODE_USB20D(cs)	(~(cs) & (1 << 15))	/* USB || USBDA */
+#define CST43239_CHIPMODE_SDIO(cs)	(((cs) & (1 << 0)) == 0)	/* SDIO */
+#define CST43239_CHIPMODE_GSPI(cs)	(((cs) & (1 << 0)) == (1 << 0))	/* gSPI */
+
+/* 4324 resources */
+/* 43242 use same PMU as 4324 */
+#define RES4324_LPLDO_PU			0
+#define RES4324_RESET_PULLDN_DIS		1
+#define RES4324_PMU_BG_PU			2
+#define RES4324_HSIC_LDO_PU			3
+#define RES4324_CBUCK_LPOM_PU			4
+#define RES4324_CBUCK_PFM_PU			5
+#define RES4324_CLDO_PU				6
+#define RES4324_LPLDO2_LVM			7
+#define RES4324_LNLDO1_PU			8
+#define RES4324_LNLDO2_PU			9
+#define RES4324_LDO3P3_PU			10
+#define RES4324_OTP_PU				11
+#define RES4324_XTAL_PU				12
+#define RES4324_BBPLL_PU			13
+#define RES4324_LQ_AVAIL			14
+#define RES4324_WL_CORE_READY			17
+#define RES4324_ILP_REQ				18
+#define RES4324_ALP_AVAIL			19
+#define RES4324_PALDO_PU			20
+#define RES4324_RADIO_PU			21
+#define RES4324_SR_CLK_STABLE			22
+#define RES4324_SR_SAVE_RESTORE			23
+#define RES4324_SR_PHY_PWRSW			24
+#define RES4324_SR_PHY_PIC			25
+#define RES4324_SR_SUBCORE_PWRSW		26
+#define RES4324_SR_SUBCORE_PIC			27
+#define RES4324_SR_MEM_PM0			28
+#define RES4324_HT_AVAIL			29
+#define RES4324_MACPHY_CLKAVAIL			30
+
+/* 4324 Chip specific ChipStatus register bits */
+#define CST4324_SPROM_MASK			0x00000080
+#define CST4324_SFLASH_MASK			0x00400000
+#define	CST4324_RES_INIT_MODE_SHIFT	10
+#define	CST4324_RES_INIT_MODE_MASK	0x00000c00
+#define CST4324_CHIPMODE_MASK		0x7
+#define CST4324_CHIPMODE_SDIOD(cs)	((~(cs)) & (1 << 2))	/* SDIO || gSPI */
+#define CST4324_CHIPMODE_USB20D(cs)	(((cs) & CST4324_CHIPMODE_MASK) == 0x6)	/* USB || USBDA */
+
+/* 43242 Chip specific ChipStatus register bits */
+#define CST43242_SFLASH_MASK                    0x00000008
+#define CST43242_SR_HALT			(1<<25)
+#define CST43242_SR_CHIP_STATUS_2		27 /* bit 27 */
+
+/* 4331 resources */
+#define RES4331_REGULATOR		0
+#define RES4331_ILP_REQUEST		1
+#define RES4331_XTAL_PU			2
+#define RES4331_ALP_AVAIL		3
+#define RES4331_SI_PLL_ON		4
+#define RES4331_HT_SI_AVAIL		5
+
+/* 4331 chip-specific ChipControl register bits */
+#define CCTRL4331_BT_COEXIST		(1<<0)	/* 0 disable */
+#define CCTRL4331_SECI			(1<<1)	/* 0 SECI is disabled (JATG functional) */
+#define CCTRL4331_EXT_LNA_G		(1<<2)	/* 0 disable */
+#define CCTRL4331_SPROM_GPIO13_15       (1<<3)  /* sprom/gpio13-15 mux */
+#define CCTRL4331_EXTPA_EN		(1<<4)	/* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_GPIOCLK_ON_SPROMCS	(1<<5)	/* set drive out GPIO_CLK on sprom_cs pin */
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS	(1<<6)	/* use sprom_cs pin as PCIE mdio interface */
+#define CCTRL4331_EXTPA_ON_GPIO2_5	(1<<7)	/* aband extpa will be at gpio2/5 and sprom_dout */
+#define CCTRL4331_OVR_PIPEAUXCLKEN	(1<<8)	/* override core control on pipe_AuxClkEnable */
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN	(1<<9)	/* override core control on pipe_AuxPowerDown */
+#define CCTRL4331_PCIE_AUXCLKEN		(1<<10)	/* pcie_auxclkenable */
+#define CCTRL4331_PCIE_PIPE_PLLDOWN	(1<<11)	/* pcie_pipe_pllpowerdown */
+#define CCTRL4331_EXTPA_EN2		(1<<12)	/* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_EXT_LNA_A		(1<<13)	/* 0 disable */
+#define CCTRL4331_BT_SHD0_ON_GPIO4	(1<<16)	/* enable bt_shd0 at gpio4 */
+#define CCTRL4331_BT_SHD1_ON_GPIO5	(1<<17)	/* enable bt_shd1 at gpio5 */
+#define CCTRL4331_EXTPA_ANA_EN		(1<<24)	/* 0 ext pa disable, 1 ext pa enabled */
+
+/* 4331 Chip specific ChipStatus register bits */
+#define	CST4331_XTAL_FREQ		0x00000001	/* crystal frequency 20/40Mhz */
+#define	CST4331_SPROM_OTP_SEL_MASK	0x00000006
+#define	CST4331_SPROM_OTP_SEL_SHIFT	1
+#define	CST4331_SPROM_PRESENT		0x00000002
+#define	CST4331_OTP_PRESENT		0x00000004
+#define	CST4331_LDO_RF			0x00000008
+#define	CST4331_LDO_PAR			0x00000010
+
+/* 4315 resource */
+#define RES4315_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4315_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4315_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4315_CLDO_PU			4	/* 0x00000010 */
+#define RES4315_PALDO_PU		5	/* 0x00000020 */
+#define RES4315_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4315_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4315_OTP_PU			10	/* 0x00000400 */
+#define RES4315_LNLDO2_PU		12	/* 0x00001000 */
+#define RES4315_XTAL_PU			13	/* 0x00002000 */
+#define RES4315_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4315_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4315_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4315_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4315_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4315_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4315_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4315_HT_AVAIL		21	/* 0x00200000 */
+
+/* 4315 chip-specific ChipStatus register bits */
+#define CST4315_SPROM_OTP_SEL_MASK	0x00000003	/* gpio [7:6], SDIO CIS selection */
+#define CST4315_DEFCIS_SEL		0x00000000	/* use default CIS, OTP is powered up */
+#define CST4315_SPROM_SEL		0x00000001	/* use SPROM, OTP is powered up */
+#define CST4315_OTP_SEL			0x00000002	/* use OTP, OTP is powered up */
+#define CST4315_OTP_PWRDN		0x00000003	/* use SPROM, OTP is powered down */
+#define CST4315_SDIO_MODE		0x00000004	/* gpio [8], sdio/usb mode */
+#define CST4315_RCAL_VALID		0x00000008
+#define CST4315_RCAL_VALUE_MASK		0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT	4
+#define CST4315_PALDO_EXTPNP		0x00000200	/* PALDO is configured with external PNP */
+#define CST4315_CBUCK_MODE_MASK		0x00000c00
+#define CST4315_CBUCK_MODE_BURST	0x00000400
+#define CST4315_CBUCK_MODE_LPBURST	0x00000c00
+
+/* 4319 resources */
+#define RES4319_CBUCK_LPOM		1	/* 0x00000002 */
+#define RES4319_CBUCK_BURST		2	/* 0x00000004 */
+#define RES4319_CBUCK_PWM		3	/* 0x00000008 */
+#define RES4319_CLDO_PU			4	/* 0x00000010 */
+#define RES4319_PALDO_PU		5	/* 0x00000020 */
+#define RES4319_ILP_REQUEST		6	/* 0x00000040 */
+#define RES4319_LNLDO1_PU		9	/* 0x00000200 */
+#define RES4319_OTP_PU			10	/* 0x00000400 */
+#define RES4319_LNLDO2_PU		12	/* 0x00001000 */
+#define RES4319_XTAL_PU			13	/* 0x00002000 */
+#define RES4319_ALP_AVAIL		14	/* 0x00004000 */
+#define RES4319_RX_PWRSW_PU		15	/* 0x00008000 */
+#define RES4319_TX_PWRSW_PU		16	/* 0x00010000 */
+#define RES4319_RFPLL_PWRSW_PU		17	/* 0x00020000 */
+#define RES4319_LOGEN_PWRSW_PU		18	/* 0x00040000 */
+#define RES4319_AFE_PWRSW_PU		19	/* 0x00080000 */
+#define RES4319_BBPLL_PWRSW_PU		20	/* 0x00100000 */
+#define RES4319_HT_AVAIL		21	/* 0x00200000 */
+
+/* 4319 chip-specific ChipStatus register bits */
+#define	CST4319_SPI_CPULESSUSB		0x00000001
+#define	CST4319_SPI_CLK_POL		0x00000002
+#define	CST4319_SPI_CLK_PH		0x00000008
+#define	CST4319_SPROM_OTP_SEL_MASK	0x000000c0	/* gpio [7:6], SDIO CIS selection */
+#define	CST4319_SPROM_OTP_SEL_SHIFT	6
+#define	CST4319_DEFCIS_SEL		0x00000000	/* use default CIS, OTP is powered up */
+#define	CST4319_SPROM_SEL		0x00000040	/* use SPROM, OTP is powered up */
+#define	CST4319_OTP_SEL			0x00000080      /* use OTP, OTP is powered up */
+#define	CST4319_OTP_PWRDN		0x000000c0      /* use SPROM, OTP is powered down */
+#define	CST4319_SDIO_USB_MODE		0x00000100	/* gpio [8], sdio/usb mode */
+#define	CST4319_REMAP_SEL_MASK		0x00000600
+#define	CST4319_ILPDIV_EN		0x00000800
+#define	CST4319_XTAL_PD_POL		0x00001000
+#define	CST4319_LPO_SEL			0x00002000
+#define	CST4319_RES_INIT_MODE		0x0000c000
+#define	CST4319_PALDO_EXTPNP		0x00010000	/* PALDO is configured with external PNP */
+#define	CST4319_CBUCK_MODE_MASK		0x00060000
+#define CST4319_CBUCK_MODE_BURST	0x00020000
+#define CST4319_CBUCK_MODE_LPBURST	0x00060000
+#define	CST4319_RCAL_VALID		0x01000000
+#define	CST4319_RCAL_VALUE_MASK		0x3e000000
+#define	CST4319_RCAL_VALUE_SHIFT	25
+
+#define PMU1_PLL0_CHIPCTL0		0
+#define PMU1_PLL0_CHIPCTL1		1
+#define PMU1_PLL0_CHIPCTL2		2
+#define CCTL_4319USB_XTAL_SEL_MASK	0x00180000
+#define CCTL_4319USB_XTAL_SEL_SHIFT	19
+#define CCTL_4319USB_48MHZ_PLL_SEL	1
+#define CCTL_4319USB_24MHZ_PLL_SEL	2
+
+/* PMU resources for 4336 */
+#define	RES4336_CBUCK_LPOM		0
+#define	RES4336_CBUCK_BURST		1
+#define	RES4336_CBUCK_LP_PWM		2
+#define	RES4336_CBUCK_PWM		3
+#define	RES4336_CLDO_PU			4
+#define	RES4336_DIS_INT_RESET_PD	5
+#define	RES4336_ILP_REQUEST		6
+#define	RES4336_LNLDO_PU		7
+#define	RES4336_LDO3P3_PU		8
+#define	RES4336_OTP_PU			9
+#define	RES4336_XTAL_PU			10
+#define	RES4336_ALP_AVAIL		11
+#define	RES4336_RADIO_PU		12
+#define	RES4336_BG_PU			13
+#define	RES4336_VREG1p4_PU_PU		14
+#define	RES4336_AFE_PWRSW_PU		15
+#define	RES4336_RX_PWRSW_PU		16
+#define	RES4336_TX_PWRSW_PU		17
+#define	RES4336_BB_PWRSW_PU		18
+#define	RES4336_SYNTH_PWRSW_PU		19
+#define	RES4336_MISC_PWRSW_PU		20
+#define	RES4336_LOGEN_PWRSW_PU		21
+#define	RES4336_BBPLL_PWRSW_PU		22
+#define	RES4336_MACPHY_CLKAVAIL		23
+#define	RES4336_HT_AVAIL		24
+#define	RES4336_RSVD			25
+
+/* 4336 chip-specific ChipStatus register bits */
+#define	CST4336_SPI_MODE_MASK		0x00000001
+#define	CST4336_SPROM_PRESENT		0x00000002
+#define	CST4336_OTP_PRESENT		0x00000004
+#define	CST4336_ARMREMAP_0		0x00000008
+#define	CST4336_ILPDIV_EN_MASK		0x00000010
+#define	CST4336_ILPDIV_EN_SHIFT		4
+#define	CST4336_XTAL_PD_POL_MASK	0x00000020
+#define	CST4336_XTAL_PD_POL_SHIFT	5
+#define	CST4336_LPO_SEL_MASK		0x00000040
+#define	CST4336_LPO_SEL_SHIFT		6
+#define	CST4336_RES_INIT_MODE_MASK	0x00000180
+#define	CST4336_RES_INIT_MODE_SHIFT	7
+#define	CST4336_CBUCK_MODE_MASK		0x00000600
+#define	CST4336_CBUCK_MODE_SHIFT	9
+
+/* 4336 Chip specific PMU ChipControl register bits */
+#define PCTL_4336_SERIAL_ENAB	(1  << 24)
+
+/* 4330 resources */
+#define	RES4330_CBUCK_LPOM		0
+#define	RES4330_CBUCK_BURST		1
+#define	RES4330_CBUCK_LP_PWM		2
+#define	RES4330_CBUCK_PWM		3
+#define	RES4330_CLDO_PU			4
+#define	RES4330_DIS_INT_RESET_PD	5
+#define	RES4330_ILP_REQUEST		6
+#define	RES4330_LNLDO_PU		7
+#define	RES4330_LDO3P3_PU		8
+#define	RES4330_OTP_PU			9
+#define	RES4330_XTAL_PU			10
+#define	RES4330_ALP_AVAIL		11
+#define	RES4330_RADIO_PU		12
+#define	RES4330_BG_PU			13
+#define	RES4330_VREG1p4_PU_PU		14
+#define	RES4330_AFE_PWRSW_PU		15
+#define	RES4330_RX_PWRSW_PU		16
+#define	RES4330_TX_PWRSW_PU		17
+#define	RES4330_BB_PWRSW_PU		18
+#define	RES4330_SYNTH_PWRSW_PU		19
+#define	RES4330_MISC_PWRSW_PU		20
+#define	RES4330_LOGEN_PWRSW_PU		21
+#define	RES4330_BBPLL_PWRSW_PU		22
+#define	RES4330_MACPHY_CLKAVAIL		23
+#define	RES4330_HT_AVAIL		24
+#define	RES4330_5gRX_PWRSW_PU		25
+#define	RES4330_5gTX_PWRSW_PU		26
+#define	RES4330_5g_LOGEN_PWRSW_PU	27
+
+/* 4330 chip-specific ChipStatus register bits */
+#define CST4330_CHIPMODE_SDIOD(cs)	(((cs) & 0x7) < 6)	/* SDIO || gSPI */
+#define CST4330_CHIPMODE_USB20D(cs)	(((cs) & 0x7) >= 6)	/* USB || USBDA */
+#define CST4330_CHIPMODE_SDIO(cs)	(((cs) & 0x4) == 0)	/* SDIO */
+#define CST4330_CHIPMODE_GSPI(cs)	(((cs) & 0x6) == 4)	/* gSPI */
+#define CST4330_CHIPMODE_USB(cs)	(((cs) & 0x7) == 6)	/* USB packet-oriented */
+#define CST4330_CHIPMODE_USBDA(cs)	(((cs) & 0x7) == 7)	/* USB Direct Access */
+#define	CST4330_OTP_PRESENT		0x00000010
+#define	CST4330_LPO_AUTODET_EN		0x00000020
+#define	CST4330_ARMREMAP_0		0x00000040
+#define	CST4330_SPROM_PRESENT		0x00000080	/* takes priority over OTP if both set */
+#define	CST4330_ILPDIV_EN		0x00000100
+#define	CST4330_LPO_SEL			0x00000200
+#define	CST4330_RES_INIT_MODE_SHIFT	10
+#define	CST4330_RES_INIT_MODE_MASK	0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT	12
+#define CST4330_CBUCK_MODE_MASK		0x00003000
+#define	CST4330_CBUCK_POWER_OK		0x00004000
+#define	CST4330_BB_PLL_LOCKED		0x00008000
+#define SOCDEVRAM_BP_ADDR		0x1E000000
+#define SOCDEVRAM_ARM_ADDR		0x00800000
+
+/* 4330 Chip specific PMU ChipControl register bits */
+#define PCTL_4330_SERIAL_ENAB	(1  << 24)
+
+/* 4330 Chip specific ChipControl register bits */
+#define CCTRL_4330_GPIO_SEL		0x00000001    /* 1=select GPIOs to be muxed out */
+#define CCTRL_4330_ERCX_SEL		0x00000002    /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL_4330_SDIO_HOST_WAKE	0x00000004    /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL_4330_JTAG_DISABLE	0x00000008    /* 1=disable JTAG interface on mux'd pins */
+
+#define PMU_VREG0_ADDR				0
+#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT	2
+#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT	3
+
+#define PMU_VREG4_ADDR			4
+
+#define PMU_VREG4_CLDO_PWM_SHIFT	4
+#define PMU_VREG4_CLDO_PWM_MASK		0x7
+
+#define PMU_VREG4_LPLDO1_SHIFT		15
+#define PMU_VREG4_LPLDO1_MASK		0x7
+#define PMU_VREG4_LPLDO1_1p20V		0
+#define PMU_VREG4_LPLDO1_1p15V		1
+#define PMU_VREG4_LPLDO1_1p10V		2
+#define PMU_VREG4_LPLDO1_1p25V		3
+#define PMU_VREG4_LPLDO1_1p05V		4
+#define PMU_VREG4_LPLDO1_1p00V		5
+#define PMU_VREG4_LPLDO1_0p95V		6
+#define PMU_VREG4_LPLDO1_0p90V		7
+
+#define PMU_VREG4_LPLDO2_LVM_SHIFT	18
+#define PMU_VREG4_LPLDO2_LVM_MASK	0x7
+#define PMU_VREG4_LPLDO2_HVM_SHIFT	21
+#define PMU_VREG4_LPLDO2_HVM_MASK	0x7
+#define PMU_VREG4_LPLDO2_LVM_HVM_MASK	0x3f
+#define PMU_VREG4_LPLDO2_1p00V		0
+#define PMU_VREG4_LPLDO2_1p15V		1
+#define PMU_VREG4_LPLDO2_1p20V		2
+#define PMU_VREG4_LPLDO2_1p10V		3
+#define PMU_VREG4_LPLDO2_0p90V		4	/* 4 - 7 is 0.90V */
+
+#define PMU_VREG4_HSICLDO_BYPASS_SHIFT	27
+#define PMU_VREG4_HSICLDO_BYPASS_MASK	0x1
+
+#define PMU_VREG5_ADDR			5
+#define PMU_VREG5_HSICAVDD_PD_SHIFT	6
+#define PMU_VREG5_HSICAVDD_PD_MASK	0x1
+#define PMU_VREG5_HSICDVDD_PD_SHIFT	11
+#define PMU_VREG5_HSICDVDD_PD_MASK	0x1
+
+/* 4334 resources */
+#define RES4334_LPLDO_PU		0
+#define RES4334_RESET_PULLDN_DIS	1
+#define RES4334_PMU_BG_PU		2
+#define RES4334_HSIC_LDO_PU		3
+#define RES4334_CBUCK_LPOM_PU		4
+#define RES4334_CBUCK_PFM_PU		5
+#define RES4334_CLDO_PU			6
+#define RES4334_LPLDO2_LVM		7
+#define RES4334_LNLDO_PU		8
+#define RES4334_LDO3P3_PU		9
+#define RES4334_OTP_PU			10
+#define RES4334_XTAL_PU			11
+#define RES4334_WL_PWRSW_PU		12
+#define RES4334_LQ_AVAIL		13
+#define RES4334_LOGIC_RET		14
+#define RES4334_MEM_SLEEP		15
+#define RES4334_MACPHY_RET		16
+#define RES4334_WL_CORE_READY		17
+#define RES4334_ILP_REQ			18
+#define RES4334_ALP_AVAIL		19
+#define RES4334_MISC_PWRSW_PU		20
+#define RES4334_SYNTH_PWRSW_PU		21
+#define RES4334_RX_PWRSW_PU		22
+#define RES4334_RADIO_PU		23
+#define RES4334_WL_PMU_PU		24
+#define RES4334_VCO_LDO_PU		25
+#define RES4334_AFE_LDO_PU		26
+#define RES4334_RX_LDO_PU		27
+#define RES4334_TX_LDO_PU		28
+#define RES4334_HT_AVAIL		29
+#define RES4334_MACPHY_CLK_AVAIL	30
+
+/* 4334 chip-specific ChipStatus register bits */
+#define CST4334_CHIPMODE_MASK		7
+#define CST4334_SDIO_MODE		0x00000000
+#define CST4334_SPI_MODE		0x00000004
+#define CST4334_HSIC_MODE		0x00000006
+#define CST4334_BLUSB_MODE		0x00000007
+#define CST4334_CHIPMODE_HSIC(cs)	(((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE)
+#define CST4334_OTP_PRESENT		0x00000010
+#define CST4334_LPO_AUTODET_EN		0x00000020
+#define CST4334_ARMREMAP_0		0x00000040
+#define CST4334_SPROM_PRESENT		0x00000080
+#define CST4334_ILPDIV_EN_MASK		0x00000100
+#define CST4334_ILPDIV_EN_SHIFT		8
+#define CST4334_LPO_SEL_MASK		0x00000200
+#define CST4334_LPO_SEL_SHIFT		9
+#define CST4334_RES_INIT_MODE_MASK	0x00000C00
+#define CST4334_RES_INIT_MODE_SHIFT	10
+
+/* 4334 Chip specific PMU ChipControl register bits */
+#define PCTL_4334_GPIO3_ENAB    (1  << 3)
+
+/* 4334 Chip control */
+#define CCTRL4334_PMU_WAKEUP_GPIO1	(1  << 0)
+#define CCTRL4334_PMU_WAKEUP_HSIC	(1  << 1)
+#define CCTRL4334_PMU_WAKEUP_AOS	(1  << 2)
+#define CCTRL4334_HSIC_WAKE_MODE	(1  << 3)
+#define CCTRL4334_HSIC_INBAND_GPIO1	(1  << 4)
+#define CCTRL4334_HSIC_LDO_PU		(1  << 23)
+
+/* 4334 Chip control 3 */
+#define CCTRL4334_BLOCK_EXTRNL_WAKE		(1  << 4)
+#define CCTRL4334_SAVERESTORE_FIX		(1  << 5)
+
+/* 43341 Chip control 3 */
+#define CCTRL43341_BLOCK_EXTRNL_WAKE		(1  << 13)
+#define CCTRL43341_SAVERESTORE_FIX		(1  << 14)
+#define CCTRL43341_BT_ISO_SEL			(1  << 16)
+
+/* 4334 Chip specific ChipControl1 register bits */
+#define CCTRL1_4334_GPIO_SEL		(1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4334_ERCX_SEL		(1 << 1)    /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL1_4334_JTAG_DISABLE	(1 << 3)    /* 1=disable JTAG interface on mux'd pins */
+#define CCTRL1_4334_UART_ON_4_5	(1 << 28)  	/* 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */
+
+/* 4324 Chip specific ChipControl1 register bits */
+#define CCTRL1_4324_GPIO_SEL            (1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 43143 chip-specific ChipStatus register bits based on Confluence documentation */
+/* register contains strap values sampled during POR */
+#define CST43143_REMAP_TO_ROM	 (3 << 0)    /* 00=Boot SRAM, 01=Boot ROM, 10=Boot SFLASH */
+#define CST43143_SDIO_EN	 (1 << 2)    /* 0 = USB Enab, SDIO pins are GPIO or I2S */
+#define CST43143_SDIO_ISO	 (1 << 3)    /* 1 = SDIO isolated */
+#define CST43143_USB_CPU_LESS	 (1 << 4)   /* 1 = CPULess mode Enabled */
+#define CST43143_CBUCK_MODE	 (3 << 6)   /* Indicates what controller mode CBUCK is in */
+#define CST43143_POK_CBUCK	 (1 << 8)   /* 1 = 1.2V CBUCK voltage ready */
+#define CST43143_PMU_OVRSPIKE	 (1 << 9)
+#define CST43143_PMU_OVRTEMP	 (0xF << 10)
+#define CST43143_SR_FLL_CAL_DONE (1 << 14)
+#define CST43143_USB_PLL_LOCKDET (1 << 15)
+#define CST43143_PMU_PLL_LOCKDET (1 << 16)
+#define CST43143_CHIPMODE_SDIOD(cs)	(((cs) & CST43143_SDIO_EN) != 0) /* SDIO */
+
+/* 43143 Chip specific ChipControl register bits */
+/* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire  */
+#define CCTRL_43143_SECI		(1<<0)
+#define CCTRL_43143_BT_LEGACY		(1<<1)
+#define CCTRL_43143_I2S_MODE		(1<<2)	/* 0: SDIO enabled */
+#define CCTRL_43143_I2S_MASTER		(1<<3)	/* 0: I2S MCLK input disabled */
+#define CCTRL_43143_I2S_FULL		(1<<4)	/* 0: I2S SDIN and SPDIF_TX inputs disabled */
+#define CCTRL_43143_GSIO		(1<<5)	/* 0: sFlash enabled */
+#define CCTRL_43143_RF_SWCTRL_MASK	(7<<6)	/* 0: disabled */
+#define CCTRL_43143_RF_SWCTRL_0		(1<<6)
+#define CCTRL_43143_RF_SWCTRL_1		(2<<6)
+#define CCTRL_43143_RF_SWCTRL_2		(4<<6)
+#define CCTRL_43143_RF_XSWCTRL		(1<<9)	/* 0: UART enabled */
+#define CCTRL_43143_HOST_WAKE0		(1<<11)	/* 1: SDIO separate interrupt output from GPIO4 */
+#define CCTRL_43143_HOST_WAKE1		(1<<12)	/* 1: SDIO separate interrupt output from GPIO16 */
+
+/* 43143 resources, based on pmu_params.xls V1.19 */
+#define RES43143_EXT_SWITCHER_PWM	0	/* 0x00001 */
+#define RES43143_XTAL_PU		1	/* 0x00002 */
+#define RES43143_ILP_REQUEST		2	/* 0x00004 */
+#define RES43143_ALP_AVAIL		3	/* 0x00008 */
+#define RES43143_WL_CORE_READY		4	/* 0x00010 */
+#define RES43143_BBPLL_PWRSW_PU		5	/* 0x00020 */
+#define RES43143_HT_AVAIL		6	/* 0x00040 */
+#define RES43143_RADIO_PU		7	/* 0x00080 */
+#define RES43143_MACPHY_CLK_AVAIL	8	/* 0x00100 */
+#define RES43143_OTP_PU			9	/* 0x00200 */
+#define RES43143_LQ_AVAIL		10	/* 0x00400 */
+
+#define PMU43143_XTAL_CORE_SIZE_MASK	0x3F
+
+/* 4313 resources */
+#define	RES4313_BB_PU_RSRC		0
+#define	RES4313_ILP_REQ_RSRC		1
+#define	RES4313_XTAL_PU_RSRC		2
+#define	RES4313_ALP_AVAIL_RSRC		3
+#define	RES4313_RADIO_PU_RSRC		4
+#define	RES4313_BG_PU_RSRC		5
+#define	RES4313_VREG1P4_PU_RSRC		6
+#define	RES4313_AFE_PWRSW_RSRC		7
+#define	RES4313_RX_PWRSW_RSRC		8
+#define	RES4313_TX_PWRSW_RSRC		9
+#define	RES4313_BB_PWRSW_RSRC		10
+#define	RES4313_SYNTH_PWRSW_RSRC	11
+#define	RES4313_MISC_PWRSW_RSRC		12
+#define	RES4313_BB_PLL_PWRSW_RSRC	13
+#define	RES4313_HT_AVAIL_RSRC		14
+#define	RES4313_MACPHY_CLK_AVAIL_RSRC	15
+
+/* 4313 chip-specific ChipStatus register bits */
+#define	CST4313_SPROM_PRESENT			1
+#define	CST4313_OTP_PRESENT			2
+#define	CST4313_SPROM_OTP_SEL_MASK		0x00000002
+#define	CST4313_SPROM_OTP_SEL_SHIFT		0
+
+/* 4313 Chip specific ChipControl register bits */
+#define CCTRL_4313_12MA_LED_DRIVE    0x00000007    /* 12 mA drive strengh for later 4313 */
+
+/* PMU respources for 4314 */
+#define RES4314_LPLDO_PU		0
+#define RES4314_PMU_SLEEP_DIS		1
+#define RES4314_PMU_BG_PU		2
+#define RES4314_CBUCK_LPOM_PU		3
+#define RES4314_CBUCK_PFM_PU		4
+#define RES4314_CLDO_PU			5
+#define RES4314_LPLDO2_LVM		6
+#define RES4314_WL_PMU_PU		7
+#define RES4314_LNLDO_PU		8
+#define RES4314_LDO3P3_PU		9
+#define RES4314_OTP_PU			10
+#define RES4314_XTAL_PU			11
+#define RES4314_WL_PWRSW_PU		12
+#define RES4314_LQ_AVAIL		13
+#define RES4314_LOGIC_RET		14
+#define RES4314_MEM_SLEEP		15
+#define RES4314_MACPHY_RET		16
+#define RES4314_WL_CORE_READY		17
+#define RES4314_ILP_REQ			18
+#define RES4314_ALP_AVAIL		19
+#define RES4314_MISC_PWRSW_PU		20
+#define RES4314_SYNTH_PWRSW_PU		21
+#define RES4314_RX_PWRSW_PU		22
+#define RES4314_RADIO_PU		23
+#define RES4314_VCO_LDO_PU		24
+#define RES4314_AFE_LDO_PU		25
+#define RES4314_RX_LDO_PU		26
+#define RES4314_TX_LDO_PU		27
+#define RES4314_HT_AVAIL		28
+#define RES4314_MACPHY_CLK_AVAIL	29
+
+/* 4314 chip-specific ChipStatus register bits */
+#define CST4314_OTP_ENABLED		0x00200000
+
+/* 43228 resources */
+#define RES43228_NOT_USED		0
+#define RES43228_ILP_REQUEST		1
+#define RES43228_XTAL_PU		2
+#define RES43228_ALP_AVAIL		3
+#define RES43228_PLL_EN			4
+#define RES43228_HT_PHY_AVAIL		5
+
+/* 43228 chipstatus  reg bits */
+#define CST43228_ILP_DIV_EN		0x1
+#define	CST43228_OTP_PRESENT		0x2
+#define	CST43228_SERDES_REFCLK_PADSEL	0x4
+#define	CST43228_SDIO_MODE		0x8
+#define	CST43228_SDIO_OTP_PRESENT	0x10
+#define	CST43228_SDIO_RESET		0x20
+
+/* 4706 chipstatus reg bits */
+#define	CST4706_PKG_OPTION		(1<<0) /* 0: full-featured package 1: low-cost package */
+#define	CST4706_SFLASH_PRESENT	(1<<1) /* 0: parallel, 1: serial flash is present */
+#define	CST4706_SFLASH_TYPE		(1<<2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
+#define	CST4706_MIPS_BENDIAN	(1<<3) /* 0: little,  1: big endian */
+#define	CST4706_PCIE1_DISABLE	(1<<5) /* PCIE1 enable strap pin */
+
+/* 4706 flashstrconfig reg bits */
+#define FLSTRCF4706_MASK		0x000000ff
+#define FLSTRCF4706_SF1			0x00000001	/* 2nd serial flash present */
+#define FLSTRCF4706_PF1			0x00000002	/* 2nd parallel flash present */
+#define FLSTRCF4706_SF1_TYPE	0x00000004	/* 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define FLSTRCF4706_NF1			0x00000008	/* 2nd NAND flash present */
+#define FLSTRCF4706_1ST_MADDR_SEG_MASK		0x000000f0	/* Valid value mask */
+#define FLSTRCF4706_1ST_MADDR_SEG_4MB		0x00000010	/* 4MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_8MB		0x00000020	/* 8MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_16MB		0x00000030	/* 16MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_32MB		0x00000040	/* 32MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_64MB		0x00000050	/* 64MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_128MB		0x00000060	/* 128MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_256MB		0x00000070	/* 256MB */
+
+/* 4360 Chip specific ChipControl register bits */
+#define CCTRL4360_I2C_MODE			(1 << 0)
+#define CCTRL4360_UART_MODE			(1 << 1)
+#define CCTRL4360_SECI_MODE			(1 << 2)
+#define CCTRL4360_BTSWCTRL_MODE			(1 << 3)
+#define CCTRL4360_DISCRETE_FEMCTRL_MODE		(1 << 4)
+#define CCTRL4360_DIGITAL_PACTRL_MODE		(1 << 5)
+#define CCTRL4360_BTSWCTRL_AND_DIGPA_PRESENT	(1 << 6)
+#define CCTRL4360_EXTRA_GPIO_MODE		(1 << 7)
+#define CCTRL4360_EXTRA_FEMCTRL_MODE		(1 << 8)
+#define CCTRL4360_BT_LGCY_MODE			(1 << 9)
+#define CCTRL4360_CORE2FEMCTRL4_ON		(1 << 21)
+#define CCTRL4360_SECI_ON_GPIO01		(1 << 24)
+
+/* 4360 Chip specific Regulator Control register bits */
+#define RCTRL4360_RFLDO_PWR_DOWN		(1 << 1)
+
+/* 4360 PMU resources and chip status bits */
+#define RES4360_REGULATOR          0
+#define RES4360_ILP_AVAIL          1
+#define RES4360_ILP_REQ            2
+#define RES4360_XTAL_LDO_PU        3
+#define RES4360_XTAL_PU            4
+#define RES4360_ALP_AVAIL          5
+#define RES4360_BBPLLPWRSW_PU      6
+#define RES4360_HT_AVAIL           7
+#define RES4360_OTP_PU             8
+
+#define CST4360_XTAL_40MZ                  0x00000001
+#define CST4360_SFLASH                     0x00000002
+#define CST4360_SPROM_PRESENT              0x00000004
+#define CST4360_SFLASH_TYPE                0x00000004
+#define CST4360_OTP_ENABLED                0x00000008
+#define CST4360_REMAP_ROM                  0x00000010
+#define CST4360_RSRC_INIT_MODE_MASK        0x00000060
+#define CST4360_RSRC_INIT_MODE_SHIFT       5
+#define CST4360_ILP_DIVEN                  0x00000080
+#define CST4360_MODE_USB                   0x00000100
+#define CST4360_SPROM_SIZE_MASK            0x00000600
+#define CST4360_SPROM_SIZE_SHIFT           9
+#define CST4360_BBPLL_LOCK                 0x00000800
+#define CST4360_AVBBPLL_LOCK               0x00001000
+#define CST4360_USBBBPLL_LOCK              0x00002000
+#define CST4360_RSRC_INIT_MODE(cs)	((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+					CST4360_RSRC_INIT_MODE_SHIFT)
+
+#define CCTRL_4360_UART_SEL	0x2
+
+
+/* 43602 PMU resources based on pmu_params.xls version v0.95 */
+#define RES43602_LPLDO_PU		0
+#define RES43602_REGULATOR		1
+#define RES43602_PMU_SLEEP		2
+#define RES43602_RSVD_3			3
+#define RES43602_XTALLDO_PU		4
+#define RES43602_SERDES_PU		5
+#define RES43602_BBPLL_PWRSW_PU		6
+#define RES43602_SR_CLK_START		7
+#define RES43602_SR_PHY_PWRSW		8
+#define RES43602_SR_SUBCORE_PWRSW	9
+#define RES43602_XTAL_PU		10
+#define	RES43602_PERST_OVR		11
+#define RES43602_SR_CLK_STABLE		12
+#define RES43602_SR_SAVE_RESTORE	13
+#define RES43602_SR_SLEEP		14
+#define RES43602_LQ_START		15
+#define RES43602_LQ_AVAIL		16
+#define RES43602_WL_CORE_RDY		17
+#define RES43602_ILP_REQ		18
+#define RES43602_ALP_AVAIL		19
+#define RES43602_RADIO_PU		20
+#define RES43602_RFLDO_PU		21
+#define RES43602_HT_START		22
+#define RES43602_HT_AVAIL		23
+#define RES43602_MACPHY_CLKAVAIL	24
+#define RES43602_PARLDO_PU		25
+#define RES43602_RSVD_26		26
+
+/* 43602 chip status bits */
+#define CST43602_SPROM_PRESENT             (1<<1)
+#define CST43602_SPROM_SIZE                (1<<10) /* 0 = 16K, 1 = 4K */
+#define CST43602_BBPLL_LOCK                (1<<11)
+#define CST43602_RF_LDO_OUT_OK             (1<<15) /* RF LDO output OK */
+
+#define PMU43602_CC2_FORCE_EXT_LPO         (1 << 19) /* 1=ext LPO clock is the final LPO clock */
+#define PMU43602_CC2_XTAL32_SEL            (1 << 30) /* 0=ext_clock, 1=xtal */
+
+#define CC_SR1_43602_SR_ASM_ADDR	(0x0)
+
+/* PLL CTL register values for open loop, used during S/R operation */
+#define PMU43602_PLL_CTL6_VAL		0x68000528
+#define PMU43602_PLL_CTL7_VAL		0x6
+
+#define PMU43602_CC3_ARMCR4_DBG_CLK	(1 << 29)
+
+
+/* 43430 PMU resources based on pmu_params.xls */
+#define RES43430_LPLDO_PU				0
+#define RES43430_BG_PU					1
+#define RES43430_PMU_SLEEP				2
+#define RES43430_RSVD_3					3
+#define RES43430_CBUCK_LPOM_PU			4
+#define RES43430_CBUCK_PFM_PU			5
+#define RES43430_COLD_START_WAIT		6
+#define RES43430_RSVD_7					7
+#define RES43430_LNLDO_PU				8
+#define RES43430_RSVD_9					9
+#define RES43430_LDO3P3_PU				10
+#define RES43430_OTP_PU					11
+#define RES43430_XTAL_PU				12
+#define RES43430_SR_CLK_START			13
+#define RES43430_LQ_AVAIL				14
+#define RES43430_LQ_START				15
+#define RES43430_RSVD_16				16
+#define RES43430_WL_CORE_RDY			17
+#define RES43430_ILP_REQ				18
+#define RES43430_ALP_AVAIL				19
+#define RES43430_MINI_PMU				20
+#define RES43430_RADIO_PU				21
+#define RES43430_SR_CLK_STABLE			22
+#define RES43430_SR_SAVE_RESTORE		23
+#define RES43430_SR_PHY_PWRSW			24
+#define RES43430_SR_VDDM_PWRSW			25
+#define RES43430_SR_SUBCORE_PWRSW		26
+#define RES43430_SR_SLEEP				27
+#define RES43430_HT_START				28
+#define RES43430_HT_AVAIL				29
+#define RES43430_MACPHY_CLK_AVAIL		30
+
+/* 43430 chip status bits */
+#define CST43430_SDIO_MODE				0x00000001
+#define CST43430_GSPI_MODE				0x00000002
+#define CST43430_RSRC_INIT_MODE_0		0x00000080
+#define CST43430_RSRC_INIT_MODE_1		0x00000100
+#define CST43430_SEL0_SDIO				0x00000200
+#define CST43430_SEL1_SDIO				0x00000400
+#define CST43430_SEL2_SDIO				0x00000800
+#define CST43430_BBPLL_LOCKED			0x00001000
+#define CST43430_DBG_INST_DETECT		0x00004000
+#define CST43430_CLB2WL_BT_READY		0x00020000
+#define CST43430_JTAG_MODE				0x00100000
+#define CST43430_HOST_IFACE				0x00400000
+#define CST43430_TRIM_EN				0x00800000
+#define CST43430_DIN_PACKAGE_OPTION		0x10000000
+
+/* defines to detect active host interface in use */
+#define CHIP_HOSTIF_PCIEMODE	0x1
+#define CHIP_HOSTIF_USBMODE	0x2
+#define CHIP_HOSTIF_SDIOMODE	0x4
+#define CHIP_HOSTIF_PCIE(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE)
+#define CHIP_HOSTIF_USB(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE)
+#define CHIP_HOSTIF_SDIO(sih)	(si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE)
+
+/* 4335 resources */
+#define RES4335_LPLDO_PO           0
+#define RES4335_PMU_BG_PU          1
+#define RES4335_PMU_SLEEP          2
+#define RES4335_RSVD_3             3
+#define RES4335_CBUCK_LPOM_PU		4
+#define RES4335_CBUCK_PFM_PU		5
+#define RES4335_RSVD_6             6
+#define RES4335_RSVD_7             7
+#define RES4335_LNLDO_PU           8
+#define RES4335_XTALLDO_PU         9
+#define RES4335_LDO3P3_PU			10
+#define RES4335_OTP_PU				11
+#define RES4335_XTAL_PU				12
+#define RES4335_SR_CLK_START       13
+#define RES4335_LQ_AVAIL			14
+#define RES4335_LQ_START           15
+#define RES4335_RSVD_16            16
+#define RES4335_WL_CORE_RDY        17
+#define RES4335_ILP_REQ				18
+#define RES4335_ALP_AVAIL			19
+#define RES4335_MINI_PMU           20
+#define RES4335_RADIO_PU			21
+#define RES4335_SR_CLK_STABLE		22
+#define RES4335_SR_SAVE_RESTORE		23
+#define RES4335_SR_PHY_PWRSW		24
+#define RES4335_SR_VDDM_PWRSW      25
+#define RES4335_SR_SUBCORE_PWRSW	26
+#define RES4335_SR_SLEEP           27
+#define RES4335_HT_START           28
+#define RES4335_HT_AVAIL			29
+#define RES4335_MACPHY_CLKAVAIL		30
+
+/* 4335 Chip specific ChipStatus register bits */
+#define CST4335_SPROM_MASK			0x00000020
+#define CST4335_SFLASH_MASK			0x00000040
+#define	CST4335_RES_INIT_MODE_SHIFT	7
+#define	CST4335_RES_INIT_MODE_MASK	0x00000180
+#define CST4335_CHIPMODE_MASK		0xF
+#define CST4335_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 0)) != 0)	/* SDIO */
+#define CST4335_CHIPMODE_GSPI(cs)	(((cs) & (1 << 1)) != 0)	/* gSPI */
+#define CST4335_CHIPMODE_USB20D(cs)	(((cs) & (1 << 2)) != 0)	/* HSIC || USBDA */
+#define CST4335_CHIPMODE_PCIE(cs)	(((cs) & (1 << 3)) != 0)	/* PCIE */
+
+/* 4335 Chip specific ChipControl1 register bits */
+#define CCTRL1_4335_GPIO_SEL		(1 << 0)    /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2)  /* SDIO: 1=configure GPIO0 for host wake */
+
+#define PATCHTBL_SIZE			(0x800)
+#define CR4_4335_RAM_BASE                    (0x180000)
+#define CR4_4345_RAM_BASE                    (0x1b0000)
+#define CR4_4349_RAM_BASE                    (0x180000)
+#define CR4_4350_RAM_BASE                    (0x180000)
+#define CR4_4360_RAM_BASE                    (0x0)
+#define CR4_43602_RAM_BASE                   (0x180000)
+
+/* 4335 chip OTP present & OTP select bits. */
+#define SPROM4335_OTP_SELECT	0x00000010
+#define SPROM4335_OTP_PRESENT	0x00000020
+
+/* 4335 GCI specific bits. */
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT	(1 << 24)
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE	25
+#define CC4335_GCI_FUNC_SEL_PAD_SDIO	0x00707770
+
+/* SFLASH clkdev specific bits. */
+#define CC4335_SFLASH_CLKDIV_MASK	0x1F000000
+#define CC4335_SFLASH_CLKDIV_SHIFT	25
+
+/* 4335 OTP bits for SFLASH. */
+#define CC4335_SROM_OTP_SFLASH	40
+#define CC4335_SROM_OTP_SFLASH_PRESENT	0x1
+#define CC4335_SROM_OTP_SFLASH_TYPE	0x2
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK	0x003C
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT	2
+
+
+/* 4335 chip OTP present & OTP select bits. */
+#define SPROM4335_OTP_SELECT	0x00000010
+#define SPROM4335_OTP_PRESENT	0x00000020
+
+/* 4335 GCI specific bits. */
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT	(1 << 24)
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE	25
+#define CC4335_GCI_FUNC_SEL_PAD_SDIO	0x00707770
+
+/* SFLASH clkdev specific bits. */
+#define CC4335_SFLASH_CLKDIV_MASK	0x1F000000
+#define CC4335_SFLASH_CLKDIV_SHIFT	25
+
+/* 4335 OTP bits for SFLASH. */
+#define CC4335_SROM_OTP_SFLASH	40
+#define CC4335_SROM_OTP_SFLASH_PRESENT	0x1
+#define CC4335_SROM_OTP_SFLASH_TYPE	0x2
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK	0x003C
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT	2
+
+/* 4335 resources--END */
+
+/* 4345 Chip specific ChipStatus register bits */
+#define CST4345_SPROM_MASK		0x00000020
+#define CST4345_SFLASH_MASK		0x00000040
+#define CST4345_RES_INIT_MODE_SHIFT	7
+#define CST4345_RES_INIT_MODE_MASK	0x00000180
+#define CST4345_CHIPMODE_MASK		0x4000F
+#define CST4345_CHIPMODE_SDIOD(cs)	(((cs) & (1 << 0)) != 0)	/* SDIO */
+#define CST4345_CHIPMODE_GSPI(cs)	(((cs) & (1 << 1)) != 0)	/* gSPI */
+#define CST4345_CHIPMODE_HSIC(cs)	(((cs) & (1 << 2)) != 0)	/* HSIC */
+#define CST4345_CHIPMODE_PCIE(cs)	(((cs) & (1 << 3)) != 0)	/* PCIE */
+#define CST4345_CHIPMODE_USB20D(cs)	(((cs) & (1 << 18)) != 0)	/* USBDA */
+
+/* 4350 Chipcommon ChipStatus bits */
+#define CST4350_SDIO_MODE		0x00000001
+#define CST4350_HSIC20D_MODE		0x00000002
+#define CST4350_BP_ON_HSIC_CLK		0x00000004
+#define CST4350_PCIE_MODE		0x00000008
+#define CST4350_USB20D_MODE		0x00000010
+#define CST4350_USB30D_MODE		0x00000020
+#define CST4350_SPROM_PRESENT		0x00000040
+#define CST4350_RSRC_INIT_MODE_0	0x00000080
+#define CST4350_RSRC_INIT_MODE_1	0x00000100
+#define CST4350_SEL0_SDIO		0x00000200
+#define CST4350_SEL1_SDIO		0x00000400
+#define CST4350_SDIO_PAD_MODE		0x00000800
+#define CST4350_BBPLL_LOCKED		0x00001000
+#define CST4350_USBPLL_LOCKED		0x00002000
+#define CST4350_LINE_STATE		0x0000C000
+#define CST4350_SERDES_PIPE_PLLLOCK	0x00010000
+#define CST4350_BT_READY		0x00020000
+#define CST4350_SFLASH_PRESENT		0x00040000
+#define CST4350_CPULESS_ENABLE		0x00080000
+#define CST4350_STRAP_HOST_IFC_1	0x00100000
+#define CST4350_STRAP_HOST_IFC_2	0x00200000
+#define CST4350_STRAP_HOST_IFC_3	0x00400000
+#define CST4350_RAW_SPROM_PRESENT	0x00800000
+#define CST4350_APP_CLK_SWITCH_SEL_RDBACK	0x01000000
+#define CST4350_RAW_RSRC_INIT_MODE_0	0x02000000
+#define CST4350_SDIO_PAD_VDDIO		0x04000000
+#define CST4350_GSPI_MODE		0x08000000
+#define CST4350_PACKAGE_OPTION		0xF0000000
+#define CST4350_PACKAGE_SHIFT		28
+
+/* package option for 4350 */
+#define CST4350_PACKAGE_WLCSP		0x0
+#define CST4350_PACKAGE_PCIE		0x1
+#define CST4350_PACKAGE_WLBGA		0x2
+#define CST4350_PACKAGE_DBG		0x3
+#define CST4350_PACKAGE_USB		0x4
+#define CST4350_PACKAGE_USB_HSIC	0x4
+
+#define CST4350_PKG_MODE(cs)	((cs & CST4350_PACKAGE_OPTION) >> CST4350_PACKAGE_SHIFT)
+
+#define CST4350_PKG_WLCSP(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLCSP))
+#define CST4350_PKG_PCIE(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_PCIE))
+#define CST4350_PKG_WLBGA(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLBGA))
+#define CST4350_PKG_USB(cs)		(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB))
+#define CST4350_PKG_USB_HSIC(cs)	(CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB_HSIC))
+
+/* 4350C0 USB PACKAGE using raw_sprom_present to indicate 40mHz xtal */
+#define CST4350_PKG_USB_40M(cs)		(cs & CST4350_RAW_SPROM_PRESENT)
+
+#define CST4350_CHIPMODE_SDIOD(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD))
+#define CST4350_CHIPMODE_USB20D(cs)	((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D))
+#define CST4350_CHIPMODE_HSIC20D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D))
+#define CST4350_CHIPMODE_HSIC30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D))
+#define CST4350_CHIPMODE_USB30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D))
+#define CST4350_CHIPMODE_USB30D_WL(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL))
+#define CST4350_CHIPMODE_PCIE(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE))
+
+/* strap_host_ifc strap value */
+#define CST4350_HOST_IFC_MASK		0x00700000
+#define CST4350_HOST_IFC_SHIFT		20
+
+/* host_ifc raw mode */
+#define CST4350_IFC_MODE_SDIOD			0x0
+#define CST4350_IFC_MODE_HSIC20D		0x1
+#define CST4350_IFC_MODE_HSIC30D		0x2
+#define CST4350_IFC_MODE_PCIE			0x3
+#define CST4350_IFC_MODE_USB20D			0x4
+#define CST4350_IFC_MODE_USB30D			0x5
+#define CST4350_IFC_MODE_USB30D_WL		0x6
+#define CST4350_IFC_MODE_USB30D_BT		0x7
+
+#define CST4350_IFC_MODE(cs)	((cs & CST4350_HOST_IFC_MASK) >> CST4350_HOST_IFC_SHIFT)
+
+#define CST4350_CHIPMODE_SDIOD(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD))
+#define CST4350_CHIPMODE_USB20D(cs)	((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D))
+#define CST4350_CHIPMODE_HSIC20D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D))
+#define CST4350_CHIPMODE_HSIC30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D))
+#define CST4350_CHIPMODE_USB30D(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D))
+#define CST4350_CHIPMODE_USB30D_WL(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL))
+#define CST4350_CHIPMODE_PCIE(cs)	(CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE))
+
+/* 4350 PMU resources */
+#define RES4350_LPLDO_PU	0
+#define RES4350_PMU_BG_PU	1
+#define RES4350_PMU_SLEEP	2
+#define RES4350_RSVD_3		3
+#define RES4350_CBUCK_LPOM_PU	4
+#define RES4350_CBUCK_PFM_PU	5
+#define RES4350_COLD_START_WAIT	6
+#define RES4350_RSVD_7		7
+#define RES4350_LNLDO_PU	8
+#define RES4350_XTALLDO_PU	9
+#define RES4350_LDO3P3_PU	10
+#define RES4350_OTP_PU		11
+#define RES4350_XTAL_PU		12
+#define RES4350_SR_CLK_START	13
+#define RES4350_LQ_AVAIL	14
+#define RES4350_LQ_START	15
+#define RES4350_RSVD_16		16
+#define RES4350_WL_CORE_RDY	17
+#define RES4350_ILP_REQ		18
+#define RES4350_ALP_AVAIL	19
+#define RES4350_MINI_PMU	20
+#define RES4350_RADIO_PU	21
+#define RES4350_SR_CLK_STABLE	22
+#define RES4350_SR_SAVE_RESTORE	23
+#define RES4350_SR_PHY_PWRSW	24
+#define RES4350_SR_VDDM_PWRSW	25
+#define RES4350_SR_SUBCORE_PWRSW	26
+#define RES4350_SR_SLEEP	27
+#define RES4350_HT_START	28
+#define RES4350_HT_AVAIL	29
+#define RES4350_MACPHY_CLKAVAIL	30
+
+#define MUXENAB4350_UART_MASK		(0x0000000f)
+#define MUXENAB4350_UART_SHIFT		0
+#define MUXENAB4350_HOSTWAKE_MASK	(0x000000f0)	/* configure GPIO for SDIO host_wake */
+#define MUXENAB4350_HOSTWAKE_SHIFT	4
+
+
+/* 4350 GCI function sel values */
+#define CC4350_FNSEL_HWDEF		(0)
+#define CC4350_FNSEL_SAMEASPIN		(1)
+#define CC4350_FNSEL_UART		(2)
+#define CC4350_FNSEL_SFLASH		(3)
+#define CC4350_FNSEL_SPROM		(4)
+#define CC4350_FNSEL_I2C		(5)
+#define CC4350_FNSEL_MISC0		(6)
+#define CC4350_FNSEL_GCI		(7)
+#define CC4350_FNSEL_MISC1		(8)
+#define CC4350_FNSEL_MISC2		(9)
+#define CC4350_FNSEL_PWDOG 		(10)
+#define CC4350_FNSEL_IND		(12)
+#define CC4350_FNSEL_PDN		(13)
+#define CC4350_FNSEL_PUP		(14)
+#define CC4350_FNSEL_TRISTATE		(15)
+#define CC4350C_FNSEL_UART		(3)
+
+
+/* 4350 GPIO */
+#define CC4350_PIN_GPIO_00		(0)
+#define CC4350_PIN_GPIO_01		(1)
+#define CC4350_PIN_GPIO_02		(2)
+#define CC4350_PIN_GPIO_03		(3)
+#define CC4350_PIN_GPIO_04		(4)
+#define CC4350_PIN_GPIO_05		(5)
+#define CC4350_PIN_GPIO_06		(6)
+#define CC4350_PIN_GPIO_07		(7)
+#define CC4350_PIN_GPIO_08		(8)
+#define CC4350_PIN_GPIO_09		(9)
+#define CC4350_PIN_GPIO_10		(10)
+#define CC4350_PIN_GPIO_11		(11)
+#define CC4350_PIN_GPIO_12		(12)
+#define CC4350_PIN_GPIO_13		(13)
+#define CC4350_PIN_GPIO_14		(14)
+#define CC4350_PIN_GPIO_15		(15)
+
+#define CC2_4350_PHY_PWRSW_UPTIME_MASK		(0xf << 0)
+#define CC2_4350_PHY_PWRSW_UPTIME_SHIFT		(0)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_MASK	(0xf << 4)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_SHIFT	(4)
+#define CC2_4350_VDDM_PWRSW_UPTIME_MASK		(0xf << 8)
+#define CC2_4350_VDDM_PWRSW_UPTIME_SHIFT	(8)
+#define CC2_4350_SBC_PWRSW_DNDELAY_MASK		(0x3 << 12)
+#define CC2_4350_SBC_PWRSW_DNDELAY_SHIFT	(12)
+#define CC2_4350_PHY_PWRSW_DNDELAY_MASK		(0x3 << 14)
+#define CC2_4350_PHY_PWRSW_DNDELAY_SHIFT	(14)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_MASK	(0x3 << 16)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_SHIFT	(16)
+#define CC2_4350_VDDM_PWRSW_EN_MASK		(1 << 20)
+#define CC2_4350_VDDM_PWRSW_EN_SHIFT		(20)
+#define CC2_4350_MEMLPLDO_PWRSW_EN_MASK		(1 << 21)
+#define CC2_4350_MEMLPLDO_PWRSW_EN_SHIFT	(21)
+#define CC2_4350_SDIO_AOS_WAKEUP_MASK		(1 << 24)
+#define CC2_4350_SDIO_AOS_WAKEUP_SHIFT		(24)
+
+/* Applies to 4335/4350/4345 */
+#define CC3_SR_CLK_SR_MEM_MASK			(1 << 0)
+#define CC3_SR_CLK_SR_MEM_SHIFT			(0)
+#define CC3_SR_BIT1_TBD_MASK			(1 << 1)
+#define CC3_SR_BIT1_TBD_SHIFT			(1)
+#define CC3_SR_ENGINE_ENABLE_MASK		(1 << 2)
+#define CC3_SR_ENGINE_ENABLE_SHIFT		(2)
+#define CC3_SR_BIT3_TBD_MASK			(1 << 3)
+#define CC3_SR_BIT3_TBD_SHIFT			(3)
+#define CC3_SR_MINDIV_FAST_CLK_MASK		(0xF << 4)
+#define CC3_SR_MINDIV_FAST_CLK_SHIFT		(4)
+#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_MASK	(1 << 8)
+#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_SHIFT	(8)
+#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_MASK	(1 << 9)
+#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_SHIFT	(9)
+#define CC3_SR_R23_SR_RISE_EDGE_TRIG_MASK	(1 << 10)
+#define CC3_SR_R23_SR_RISE_EDGE_TRIG_SHIFT	(10)
+#define CC3_SR_R23_SR_FALL_EDGE_TRIG_MASK	(1 << 11)
+#define CC3_SR_R23_SR_FALL_EDGE_TRIG_SHIFT	(11)
+#define CC3_SR_NUM_CLK_HIGH_MASK		(0x7 << 12)
+#define CC3_SR_NUM_CLK_HIGH_SHIFT		(12)
+#define CC3_SR_BIT15_TBD_MASK			(1 << 15)
+#define CC3_SR_BIT15_TBD_SHIFT			(15)
+#define CC3_SR_PHY_FUNC_PIC_MASK		(1 << 16)
+#define CC3_SR_PHY_FUNC_PIC_SHIFT		(16)
+#define CC3_SR_BIT17_19_TBD_MASK		(0x7 << 17)
+#define CC3_SR_BIT17_19_TBD_SHIFT		(17)
+#define CC3_SR_CHIP_TRIGGER_1_MASK		(1 << 20)
+#define CC3_SR_CHIP_TRIGGER_1_SHIFT		(20)
+#define CC3_SR_CHIP_TRIGGER_2_MASK		(1 << 21)
+#define CC3_SR_CHIP_TRIGGER_2_SHIFT		(21)
+#define CC3_SR_CHIP_TRIGGER_3_MASK		(1 << 22)
+#define CC3_SR_CHIP_TRIGGER_3_SHIFT		(22)
+#define CC3_SR_CHIP_TRIGGER_4_MASK		(1 << 23)
+#define CC3_SR_CHIP_TRIGGER_4_SHIFT		(23)
+#define CC3_SR_ALLOW_SBC_FUNC_PIC_MASK		(1 << 24)
+#define CC3_SR_ALLOW_SBC_FUNC_PIC_SHIFT		(24)
+#define CC3_SR_BIT25_26_TBD_MASK		(0x3 << 25)
+#define CC3_SR_BIT25_26_TBD_SHIFT		(25)
+#define CC3_SR_ALLOW_SBC_STBY_MASK		(1 << 27)
+#define CC3_SR_ALLOW_SBC_STBY_SHIFT		(27)
+#define CC3_SR_GPIO_MUX_MASK			(0xF << 28)
+#define CC3_SR_GPIO_MUX_SHIFT			(28)
+
+/* Applies to 4335/4350/4345 */
+#define CC4_SR_INIT_ADDR_MASK		(0x3FF0000)
+#define 	CC4_4350_SR_ASM_ADDR	(0x30)
+#define 	CC4_4335_SR_ASM_ADDR	(0x48)
+#define 	CC4_4345_SR_ASM_ADDR	(0x48)
+#define CC4_SR_INIT_ADDR_SHIFT		(16)
+
+#define CC4_4350_EN_SR_CLK_ALP_MASK	(1 << 30)
+#define CC4_4350_EN_SR_CLK_ALP_SHIFT	(30)
+#define CC4_4350_EN_SR_CLK_HT_MASK	(1 << 31)
+#define CC4_4350_EN_SR_CLK_HT_SHIFT	(31)
+
+#define VREG4_4350_MEMLPDO_PU_MASK	(1 << 31)
+#define VREG4_4350_MEMLPDO_PU_SHIFT	31
+
+#define CC6_4350_PCIE_CLKREQ_WAKEUP_MASK	(1 << 4)
+#define CC6_4350_PCIE_CLKREQ_WAKEUP_SHIFT	(4)
+#define CC6_4350_PMU_WAKEUP_ALPAVAIL_MASK	(1 << 6)
+#define CC6_4350_PMU_WAKEUP_ALPAVAIL_SHIFT	(6)
+
+/* GCI chipcontrol register indices */
+#define CC_GCI_CHIPCTRL_00	(0)
+#define CC_GCI_CHIPCTRL_01	(1)
+#define CC_GCI_CHIPCTRL_02	(2)
+#define CC_GCI_CHIPCTRL_03	(3)
+#define CC_GCI_CHIPCTRL_04	(4)
+#define CC_GCI_CHIPCTRL_05	(5)
+#define CC_GCI_CHIPCTRL_06	(6)
+#define CC_GCI_CHIPCTRL_07	(7)
+#define CC_GCI_CHIPCTRL_08	(8)
+#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12)
+
+#define CC_GCI_06_JTAG_SEL_SHIFT	4
+#define CC_GCI_06_JTAG_SEL_MASK		(1 << 4)
+
+#define CC_GCI_NUMCHIPCTRLREGS(cap1)	((cap1 & 0xF00) >> 8)
+
+/* 4345 PMU resources */
+#define RES4345_LPLDO_PU		0
+#define RES4345_PMU_BG_PU		1
+#define RES4345_PMU_SLEEP 		2
+#define RES4345_HSICLDO_PU		3
+#define RES4345_CBUCK_LPOM_PU		4
+#define RES4345_CBUCK_PFM_PU		5
+#define RES4345_COLD_START_WAIT		6
+#define RES4345_RSVD_7			7
+#define RES4345_LNLDO_PU		8
+#define RES4345_XTALLDO_PU		9
+#define RES4345_LDO3P3_PU		10
+#define RES4345_OTP_PU			11
+#define RES4345_XTAL_PU			12
+#define RES4345_SR_CLK_START		13
+#define RES4345_LQ_AVAIL		14
+#define RES4345_LQ_START		15
+#define RES4345_PERST_OVR		16
+#define RES4345_WL_CORE_RDY		17
+#define RES4345_ILP_REQ			18
+#define RES4345_ALP_AVAIL		19
+#define RES4345_MINI_PMU		20
+#define RES4345_RADIO_PU		21
+#define RES4345_SR_CLK_STABLE		22
+#define RES4345_SR_SAVE_RESTORE		23
+#define RES4345_SR_PHY_PWRSW		24
+#define RES4345_SR_VDDM_PWRSW		25
+#define RES4345_SR_SUBCORE_PWRSW	26
+#define RES4345_SR_SLEEP		27
+#define RES4345_HT_START		28
+#define RES4345_HT_AVAIL		29
+#define RES4345_MACPHY_CLK_AVAIL	30
+
+/* 4335 pins
+* note: only the values set as default/used are added here.
+*/
+#define CC4335_PIN_GPIO_00		(0)
+#define CC4335_PIN_GPIO_01		(1)
+#define CC4335_PIN_GPIO_02		(2)
+#define CC4335_PIN_GPIO_03		(3)
+#define CC4335_PIN_GPIO_04		(4)
+#define CC4335_PIN_GPIO_05		(5)
+#define CC4335_PIN_GPIO_06		(6)
+#define CC4335_PIN_GPIO_07		(7)
+#define CC4335_PIN_GPIO_08		(8)
+#define CC4335_PIN_GPIO_09		(9)
+#define CC4335_PIN_GPIO_10		(10)
+#define CC4335_PIN_GPIO_11		(11)
+#define CC4335_PIN_GPIO_12		(12)
+#define CC4335_PIN_GPIO_13		(13)
+#define CC4335_PIN_GPIO_14		(14)
+#define CC4335_PIN_GPIO_15		(15)
+#define CC4335_PIN_SDIO_CLK		(16)
+#define CC4335_PIN_SDIO_CMD		(17)
+#define CC4335_PIN_SDIO_DATA0	(18)
+#define CC4335_PIN_SDIO_DATA1	(19)
+#define CC4335_PIN_SDIO_DATA2	(20)
+#define CC4335_PIN_SDIO_DATA3	(21)
+#define CC4335_PIN_RF_SW_CTRL_6	(22)
+#define CC4335_PIN_RF_SW_CTRL_7	(23)
+#define CC4335_PIN_RF_SW_CTRL_8	(24)
+#define CC4335_PIN_RF_SW_CTRL_9	(25)
+
+/* 4335 GCI function sel values
+*/
+#define CC4335_FNSEL_HWDEF		(0)
+#define CC4335_FNSEL_SAMEASPIN	(1)
+#define CC4335_FNSEL_GPIO0		(2)
+#define CC4335_FNSEL_GPIO1		(3)
+#define CC4335_FNSEL_GCI0		(4)
+#define CC4335_FNSEL_GCI1		(5)
+#define CC4335_FNSEL_UART		(6)
+#define CC4335_FNSEL_SFLASH		(7)
+#define CC4335_FNSEL_SPROM		(8)
+#define CC4335_FNSEL_MISC0		(9)
+#define CC4335_FNSEL_MISC1		(10)
+#define CC4335_FNSEL_MISC2		(11)
+#define CC4335_FNSEL_IND		(12)
+#define CC4335_FNSEL_PDN		(13)
+#define CC4335_FNSEL_PUP		(14)
+#define CC4335_FNSEL_TRI		(15)
+
+/* 4345 pins
+* note: only the values set as default/used are added here.
+*/
+#define CC4345_PIN_GPIO_00		(0)
+#define CC4345_PIN_GPIO_01		(1)
+#define CC4345_PIN_GPIO_02		(2)
+#define CC4345_PIN_GPIO_03		(3)
+#define CC4345_PIN_GPIO_04		(4)
+#define CC4345_PIN_GPIO_05		(5)
+#define CC4345_PIN_GPIO_06		(6)
+#define CC4345_PIN_GPIO_07		(7)
+#define CC4345_PIN_GPIO_08		(8)
+#define CC4345_PIN_GPIO_09		(9)
+#define CC4345_PIN_GPIO_10		(10)
+#define CC4345_PIN_GPIO_11		(11)
+#define CC4345_PIN_GPIO_12		(12)
+#define CC4345_PIN_GPIO_13		(13)
+#define CC4345_PIN_GPIO_14		(14)
+#define CC4345_PIN_GPIO_15		(15)
+#define CC4345_PIN_GPIO_16		(16)
+#define CC4345_PIN_SDIO_CLK		(17)
+#define CC4345_PIN_SDIO_CMD		(18)
+#define CC4345_PIN_SDIO_DATA0	(19)
+#define CC4345_PIN_SDIO_DATA1	(20)
+#define CC4345_PIN_SDIO_DATA2	(21)
+#define CC4345_PIN_SDIO_DATA3	(22)
+#define CC4345_PIN_RF_SW_CTRL_0	(23)
+#define CC4345_PIN_RF_SW_CTRL_1	(24)
+#define CC4345_PIN_RF_SW_CTRL_2	(25)
+#define CC4345_PIN_RF_SW_CTRL_3	(26)
+#define CC4345_PIN_RF_SW_CTRL_4	(27)
+#define CC4345_PIN_RF_SW_CTRL_5	(28)
+#define CC4345_PIN_RF_SW_CTRL_6	(29)
+#define CC4345_PIN_RF_SW_CTRL_7	(30)
+#define CC4345_PIN_RF_SW_CTRL_8	(31)
+#define CC4345_PIN_RF_SW_CTRL_9	(32)
+
+/* 4345 GCI function sel values
+*/
+#define CC4345_FNSEL_HWDEF		(0)
+#define CC4345_FNSEL_SAMEASPIN		(1)
+#define CC4345_FNSEL_GPIO0		(2)
+#define CC4345_FNSEL_GPIO1		(3)
+#define CC4345_FNSEL_GCI0		(4)
+#define CC4345_FNSEL_GCI1		(5)
+#define CC4345_FNSEL_UART		(6)
+#define CC4345_FNSEL_SFLASH		(7)
+#define CC4345_FNSEL_SPROM		(8)
+#define CC4345_FNSEL_MISC0		(9)
+#define CC4345_FNSEL_MISC1		(10)
+#define CC4345_FNSEL_MISC2		(11)
+#define CC4345_FNSEL_IND		(12)
+#define CC4345_FNSEL_PDN		(13)
+#define CC4345_FNSEL_PUP		(14)
+#define CC4345_FNSEL_TRI		(15)
+
+#define MUXENAB4345_UART_MASK		(0x0000000f)
+#define MUXENAB4345_UART_SHIFT		0
+#define MUXENAB4345_HOSTWAKE_MASK	(0x000000f0)
+#define MUXENAB4345_HOSTWAKE_SHIFT	4
+
+/* GCI GPIO for function sel GCI-0/GCI-1 */
+#define CC_GCI_GPIO_0			(0)
+#define CC_GCI_GPIO_1			(1)
+#define CC_GCI_GPIO_2			(2)
+#define CC_GCI_GPIO_3			(3)
+#define CC_GCI_GPIO_4			(4)
+#define CC_GCI_GPIO_5			(5)
+#define CC_GCI_GPIO_6			(6)
+#define CC_GCI_GPIO_7			(7)
+
+/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */
+#define CC_GCI_GPIO_INVALID		0xFF
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK(pos)  (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL(val, pos)  ((((uint32)val) << pos) & GCIMASK(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL(val, pos)	((val >> pos) & 0xF)
+
+
+/* find the 8 bit mask given the bit position */
+#define GCIMASK_8B(pos)  (((uint32)0xFF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_8B(val, pos)  ((((uint32)val) << pos) & GCIMASK_8B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_8B(val, pos)	((val >> pos) & 0xFF)
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK_4B(pos)  (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_4B(val, pos)  ((((uint32)val) << pos) & GCIMASK_4B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_4B(val, pos)	((val >> pos) & 0xF)
+
+
+#define GCI_INTSTATUS_GPIOINT		(1 << 25)
+#define GCI_INTSTATUS_GPIOWAKE		(1 << 26)
+#define GCI_INTMASK_GPIOINT		(1 << 25)
+#define GCI_INTMASK_GPIOWAKE		(1 << 26)
+#define GCI_WAKEMASK_GPIOINT		(1 << 25)
+#define GCI_WAKEMASK_GPIOWAKE		(1 << 26)
+
+
+/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic
+* for now only UART for bootloader.
+*/
+#define MUXENAB4335_UART_MASK		(0x0000000f)
+
+#define MUXENAB4335_UART_SHIFT		0
+#define MUXENAB4335_HOSTWAKE_MASK	(0x000000f0)	/* configure GPIO for SDIO host_wake */
+#define MUXENAB4335_HOSTWAKE_SHIFT	4
+#define MUXENAB4335_GETIX(val, name) \
+	((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1)
+
+/*
+* Maximum delay for the PMU state transition in us.
+* This is an upper bound intended for spinwaits etc.
+*/
+#define PMU_MAX_TRANSITION_DLY	15000
+
+/* PMU resource up transition time in ILP cycles */
+#define PMURES_UP_TRANSITION	2
+
+
+/* SECI configuration */
+#define SECI_MODE_UART			0x0
+#define SECI_MODE_SECI			0x1
+#define SECI_MODE_LEGACY_3WIRE_BT	0x2
+#define SECI_MODE_LEGACY_3WIRE_WLAN	0x3
+#define SECI_MODE_HALF_SECI		0x4
+
+#define SECI_RESET		(1 << 0)
+#define SECI_RESET_BAR_UART	(1 << 1)
+#define SECI_ENAB_SECI_ECI	(1 << 2)
+#define SECI_ENAB_SECIOUT_DIS	(1 << 3)
+#define SECI_MODE_MASK		0x7
+#define SECI_MODE_SHIFT		4 /* (bits 5, 6, 7) */
+#define SECI_UPD_SECI		(1 << 7)
+
+#define SECI_SIGNOFF_0     0xDB
+#define SECI_SIGNOFF_1     0
+
+/* seci clk_ctl_st bits */
+#define CLKCTL_STS_SECI_CLK_REQ		(1 << 8)
+#define CLKCTL_STS_SECI_CLK_AVAIL	(1 << 24)
+
+#define SECI_UART_MSR_CTS_STATE		(1 << 0)
+#define SECI_UART_MSR_RTS_STATE		(1 << 1)
+#define SECI_UART_SECI_IN_STATE		(1 << 2)
+#define SECI_UART_SECI_IN2_STATE	(1 << 3)
+
+/* SECI UART LCR/MCR register bits */
+#define SECI_UART_LCR_STOP_BITS		(1 << 0) /* 0 - 1bit, 1 - 2bits */
+#define SECI_UART_LCR_PARITY_EN		(1 << 1)
+#define SECI_UART_LCR_PARITY		(1 << 2) /* 0 - odd, 1 - even */
+#define SECI_UART_LCR_RX_EN		(1 << 3)
+#define SECI_UART_LCR_LBRK_CTRL		(1 << 4) /* 1 => SECI_OUT held low */
+#define SECI_UART_LCR_TXO_EN		(1 << 5)
+#define SECI_UART_LCR_RTSO_EN		(1 << 6)
+#define SECI_UART_LCR_SLIPMODE_EN	(1 << 7)
+#define SECI_UART_LCR_RXCRC_CHK		(1 << 8)
+#define SECI_UART_LCR_TXCRC_INV		(1 << 9)
+#define SECI_UART_LCR_TXCRC_LSBF	(1 << 10)
+#define SECI_UART_LCR_TXCRC_EN		(1 << 11)
+
+#define SECI_UART_MCR_TX_EN		(1 << 0)
+#define SECI_UART_MCR_PRTS		(1 << 1)
+#define SECI_UART_MCR_SWFLCTRL_EN	(1 << 2)
+#define SECI_UART_MCR_HIGHRATE_EN	(1 << 3)
+#define SECI_UART_MCR_LOOPBK_EN		(1 << 4)
+#define SECI_UART_MCR_AUTO_RTS		(1 << 5)
+#define SECI_UART_MCR_AUTO_TX_DIS	(1 << 6)
+#define SECI_UART_MCR_BAUD_ADJ_EN	(1 << 7)
+#define SECI_UART_MCR_XONOFF_RPT	(1 << 9)
+
+/* WLAN channel numbers - used from wifi.h */
+
+/* WLAN BW */
+#define ECI_BW_20   0x0
+#define ECI_BW_25   0x1
+#define ECI_BW_30   0x2
+#define ECI_BW_35   0x3
+#define ECI_BW_40   0x4
+#define ECI_BW_45   0x5
+#define ECI_BW_50   0x6
+#define ECI_BW_ALL  0x7
+
+/* WLAN - number of antenna */
+#define WLAN_NUM_ANT1 TXANT_0
+#define WLAN_NUM_ANT2 TXANT_1
+
+/* otpctrl1 0xF4 */
+#define OTPC_FORCE_PWR_OFF	0x02000000
+/* chipcommon s/r registers introduced with cc rev >= 48 */
+#define CC_SR_CTL0_ENABLE_MASK             0x1
+#define CC_SR_CTL0_ENABLE_SHIFT              0
+#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT       1 /* sr_clk to sr_memory enable */
+#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT        2 /* Rising edge resource trigger 0 to sr_engine  */
+#define CC_SR_CTL0_MIN_DIV_SHIFT             6 /* Min division value for fast clk in sr_engine */
+#define CC_SR_CTL0_EN_SBC_STBY_SHIFT        16 /* Allow Subcore mem StandBy? */
+#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18
+#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT       19
+#define CC_SR_CTL0_ALLOW_PIC_SHIFT          20 /* Allow pic to separate power domains */
+#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT  25
+#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30
+
+#define	ECI_INLO_PKTDUR_MASK	0x000000f0 /* [7:4] - 4 bits */
+#define ECI_INLO_PKTDUR_SHIFT	4
+
+/* gci chip control bits */
+#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT		0
+#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT		1
+#define GCI_GPIO_CHIPCTRL_INVERT_BIT		2
+#define GCI_GPIO_CHIPCTRL_PULLUP_BIT		3
+#define GCI_GPIO_CHIPCTRL_PULLDN_BIT		4
+#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT	5
+#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT	6
+#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT	7
+
+/* gci GPIO input status bits */
+#define GCI_GPIO_STS_VALUE_BIT			0
+#define GCI_GPIO_STS_POS_EDGE_BIT		1
+#define GCI_GPIO_STS_NEG_EDGE_BIT		2
+#define GCI_GPIO_STS_FAST_EDGE_BIT		3
+#define GCI_GPIO_STS_CLEAR			0xF
+
+#define GCI_GPIO_STS_VALUE	(1 << GCI_GPIO_STS_VALUE_BIT)
+
+#endif	/* _SBCHIPC_H */
diff --git a/drivers/staging/edison-bcm43340/include/sbconfig.h b/drivers/staging/edison-bcm43340/include/sbconfig.h
new file mode 100644
index 0000000..83f7d66
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sbconfig.h
@@ -0,0 +1,282 @@
+/*
+ * Broadcom SiliconBackplane hardware register definitions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbconfig.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef	_SBCONFIG_H
+#define	_SBCONFIG_H
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif
+
+/* enumeration in SB is based on the premise that cores are contiguos in the
+ * enumeration space.
+ */
+#define SB_BUS_SIZE		0x10000		/* Each bus gets 64Kbytes for cores */
+#define SB_BUS_BASE(b)		(SI_ENUM_BASE + (b) * SB_BUS_SIZE)
+#define	SB_BUS_MAXCORES		(SB_BUS_SIZE / SI_CORE_SIZE)	/* Max cores per bus */
+
+/*
+ * Sonics Configuration Space Registers.
+ */
+#define	SBCONFIGOFF		0xf00		/* core sbconfig regs are top 256bytes of regs */
+#define	SBCONFIGSIZE		256		/* sizeof (sbconfig_t) */
+
+#define SBIPSFLAG		0x08
+#define SBTPSFLAG		0x18
+#define	SBTMERRLOGA		0x48		/* sonics >= 2.3 */
+#define	SBTMERRLOG		0x50		/* sonics >= 2.3 */
+#define SBADMATCH3		0x60
+#define SBADMATCH2		0x68
+#define SBADMATCH1		0x70
+#define SBIMSTATE		0x90
+#define SBINTVEC		0x94
+#define SBTMSTATELOW		0x98
+#define SBTMSTATEHIGH		0x9c
+#define SBBWA0			0xa0
+#define SBIMCONFIGLOW		0xa8
+#define SBIMCONFIGHIGH		0xac
+#define SBADMATCH0		0xb0
+#define SBTMCONFIGLOW		0xb8
+#define SBTMCONFIGHIGH		0xbc
+#define SBBCONFIG		0xc0
+#define SBBSTATE		0xc8
+#define SBACTCNFG		0xd8
+#define	SBFLAGST		0xe8
+#define SBIDLOW			0xf8
+#define SBIDHIGH		0xfc
+
+/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have
+ * a few registers *below* that line. I think it would be very confusing to try
+ * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here,
+ */
+
+#define SBIMERRLOGA		0xea8
+#define SBIMERRLOG		0xeb0
+#define SBTMPORTCONNID0		0xed8
+#define SBTMPORTLOCK0		0xef8
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+typedef volatile struct _sbconfig {
+	uint32	PAD[2];
+	uint32	sbipsflag;		/* initiator port ocp slave flag */
+	uint32	PAD[3];
+	uint32	sbtpsflag;		/* target port ocp slave flag */
+	uint32	PAD[11];
+	uint32	sbtmerrloga;		/* (sonics >= 2.3) */
+	uint32	PAD;
+	uint32	sbtmerrlog;		/* (sonics >= 2.3) */
+	uint32	PAD[3];
+	uint32	sbadmatch3;		/* address match3 */
+	uint32	PAD;
+	uint32	sbadmatch2;		/* address match2 */
+	uint32	PAD;
+	uint32	sbadmatch1;		/* address match1 */
+	uint32	PAD[7];
+	uint32	sbimstate;		/* initiator agent state */
+	uint32	sbintvec;		/* interrupt mask */
+	uint32	sbtmstatelow;		/* target state */
+	uint32	sbtmstatehigh;		/* target state */
+	uint32	sbbwa0;			/* bandwidth allocation table0 */
+	uint32	PAD;
+	uint32	sbimconfiglow;		/* initiator configuration */
+	uint32	sbimconfighigh;		/* initiator configuration */
+	uint32	sbadmatch0;		/* address match0 */
+	uint32	PAD;
+	uint32	sbtmconfiglow;		/* target configuration */
+	uint32	sbtmconfighigh;		/* target configuration */
+	uint32	sbbconfig;		/* broadcast configuration */
+	uint32	PAD;
+	uint32	sbbstate;		/* broadcast state */
+	uint32	PAD[3];
+	uint32	sbactcnfg;		/* activate configuration */
+	uint32	PAD[3];
+	uint32	sbflagst;		/* current sbflags */
+	uint32	PAD[3];
+	uint32	sbidlow;		/* identification */
+	uint32	sbidhigh;		/* identification */
+} sbconfig_t;
+
+#endif /* _LANGUAGE_ASSEMBLY */
+
+/* sbipsflag */
+#define	SBIPS_INT1_MASK		0x3f		/* which sbflags get routed to mips interrupt 1 */
+#define	SBIPS_INT1_SHIFT	0
+#define	SBIPS_INT2_MASK		0x3f00		/* which sbflags get routed to mips interrupt 2 */
+#define	SBIPS_INT2_SHIFT	8
+#define	SBIPS_INT3_MASK		0x3f0000	/* which sbflags get routed to mips interrupt 3 */
+#define	SBIPS_INT3_SHIFT	16
+#define	SBIPS_INT4_MASK		0x3f000000	/* which sbflags get routed to mips interrupt 4 */
+#define	SBIPS_INT4_SHIFT	24
+
+/* sbtpsflag */
+#define	SBTPS_NUM0_MASK		0x3f		/* interrupt sbFlag # generated by this core */
+#define	SBTPS_F0EN0		0x40		/* interrupt is always sent on the backplane */
+
+/* sbtmerrlog */
+#define	SBTMEL_CM		0x00000007	/* command */
+#define	SBTMEL_CI		0x0000ff00	/* connection id */
+#define	SBTMEL_EC		0x0f000000	/* error code */
+#define	SBTMEL_ME		0x80000000	/* multiple error */
+
+/* sbimstate */
+#define	SBIM_PC			0xf		/* pipecount */
+#define	SBIM_AP_MASK		0x30		/* arbitration policy */
+#define	SBIM_AP_BOTH		0x00		/* use both timeslaces and token */
+#define	SBIM_AP_TS		0x10		/* use timesliaces only */
+#define	SBIM_AP_TK		0x20		/* use token only */
+#define	SBIM_AP_RSV		0x30		/* reserved */
+#define	SBIM_IBE		0x20000		/* inbanderror */
+#define	SBIM_TO			0x40000		/* timeout */
+#define	SBIM_BY			0x01800000	/* busy (sonics >= 2.3) */
+#define	SBIM_RJ			0x02000000	/* reject (sonics >= 2.3) */
+
+/* sbtmstatelow */
+#define	SBTML_RESET		0x0001		/* reset */
+#define	SBTML_REJ_MASK		0x0006		/* reject field */
+#define	SBTML_REJ		0x0002		/* reject */
+#define	SBTML_TMPREJ		0x0004		/* temporary reject, for error recovery */
+
+#define	SBTML_SICF_SHIFT	16		/* Shift to locate the SI control flags in sbtml */
+
+/* sbtmstatehigh */
+#define	SBTMH_SERR		0x0001		/* serror */
+#define	SBTMH_INT		0x0002		/* interrupt */
+#define	SBTMH_BUSY		0x0004		/* busy */
+#define	SBTMH_TO		0x0020		/* timeout (sonics >= 2.3) */
+
+#define	SBTMH_SISF_SHIFT	16		/* Shift to locate the SI status flags in sbtmh */
+
+/* sbbwa0 */
+#define	SBBWA_TAB0_MASK		0xffff		/* lookup table 0 */
+#define	SBBWA_TAB1_MASK		0xffff		/* lookup table 1 */
+#define	SBBWA_TAB1_SHIFT	16
+
+/* sbimconfiglow */
+#define	SBIMCL_STO_MASK		0x7		/* service timeout */
+#define	SBIMCL_RTO_MASK		0x70		/* request timeout */
+#define	SBIMCL_RTO_SHIFT	4
+#define	SBIMCL_CID_MASK		0xff0000	/* connection id */
+#define	SBIMCL_CID_SHIFT	16
+
+/* sbimconfighigh */
+#define	SBIMCH_IEM_MASK		0xc		/* inband error mode */
+#define	SBIMCH_TEM_MASK		0x30		/* timeout error mode */
+#define	SBIMCH_TEM_SHIFT	4
+#define	SBIMCH_BEM_MASK		0xc0		/* bus error mode */
+#define	SBIMCH_BEM_SHIFT	6
+
+/* sbadmatch0 */
+#define	SBAM_TYPE_MASK		0x3		/* address type */
+#define	SBAM_AD64		0x4		/* reserved */
+#define	SBAM_ADINT0_MASK	0xf8		/* type0 size */
+#define	SBAM_ADINT0_SHIFT	3
+#define	SBAM_ADINT1_MASK	0x1f8		/* type1 size */
+#define	SBAM_ADINT1_SHIFT	3
+#define	SBAM_ADINT2_MASK	0x1f8		/* type2 size */
+#define	SBAM_ADINT2_SHIFT	3
+#define	SBAM_ADEN		0x400		/* enable */
+#define	SBAM_ADNEG		0x800		/* negative decode */
+#define	SBAM_BASE0_MASK		0xffffff00	/* type0 base address */
+#define	SBAM_BASE0_SHIFT	8
+#define	SBAM_BASE1_MASK		0xfffff000	/* type1 base address for the core */
+#define	SBAM_BASE1_SHIFT	12
+#define	SBAM_BASE2_MASK		0xffff0000	/* type2 base address for the core */
+#define	SBAM_BASE2_SHIFT	16
+
+/* sbtmconfiglow */
+#define	SBTMCL_CD_MASK		0xff		/* clock divide */
+#define	SBTMCL_CO_MASK		0xf800		/* clock offset */
+#define	SBTMCL_CO_SHIFT		11
+#define	SBTMCL_IF_MASK		0xfc0000	/* interrupt flags */
+#define	SBTMCL_IF_SHIFT		18
+#define	SBTMCL_IM_MASK		0x3000000	/* interrupt mode */
+#define	SBTMCL_IM_SHIFT		24
+
+/* sbtmconfighigh */
+#define	SBTMCH_BM_MASK		0x3		/* busy mode */
+#define	SBTMCH_RM_MASK		0x3		/* retry mode */
+#define	SBTMCH_RM_SHIFT		2
+#define	SBTMCH_SM_MASK		0x30		/* stop mode */
+#define	SBTMCH_SM_SHIFT		4
+#define	SBTMCH_EM_MASK		0x300		/* sb error mode */
+#define	SBTMCH_EM_SHIFT		8
+#define	SBTMCH_IM_MASK		0xc00		/* int mode */
+#define	SBTMCH_IM_SHIFT		10
+
+/* sbbconfig */
+#define	SBBC_LAT_MASK		0x3		/* sb latency */
+#define	SBBC_MAX0_MASK		0xf0000		/* maxccntr0 */
+#define	SBBC_MAX0_SHIFT		16
+#define	SBBC_MAX1_MASK		0xf00000	/* maxccntr1 */
+#define	SBBC_MAX1_SHIFT		20
+
+/* sbbstate */
+#define	SBBS_SRD		0x1		/* st reg disable */
+#define	SBBS_HRD		0x2		/* hold reg disable */
+
+/* sbidlow */
+#define	SBIDL_CS_MASK		0x3		/* config space */
+#define	SBIDL_AR_MASK		0x38		/* # address ranges supported */
+#define	SBIDL_AR_SHIFT		3
+#define	SBIDL_SYNCH		0x40		/* sync */
+#define	SBIDL_INIT		0x80		/* initiator */
+#define	SBIDL_MINLAT_MASK	0xf00		/* minimum backplane latency */
+#define	SBIDL_MINLAT_SHIFT	8
+#define	SBIDL_MAXLAT		0xf000		/* maximum backplane latency */
+#define	SBIDL_MAXLAT_SHIFT	12
+#define	SBIDL_FIRST		0x10000		/* this initiator is first */
+#define	SBIDL_CW_MASK		0xc0000		/* cycle counter width */
+#define	SBIDL_CW_SHIFT		18
+#define	SBIDL_TP_MASK		0xf00000	/* target ports */
+#define	SBIDL_TP_SHIFT		20
+#define	SBIDL_IP_MASK		0xf000000	/* initiator ports */
+#define	SBIDL_IP_SHIFT		24
+#define	SBIDL_RV_MASK		0xf0000000	/* sonics backplane revision code */
+#define	SBIDL_RV_SHIFT		28
+#define	SBIDL_RV_2_2		0x00000000	/* version 2.2 or earlier */
+#define	SBIDL_RV_2_3		0x10000000	/* version 2.3 */
+
+/* sbidhigh */
+#define	SBIDH_RC_MASK		0x000f		/* revision code */
+#define	SBIDH_RCE_MASK		0x7000		/* revision code extension field */
+#define	SBIDH_RCE_SHIFT		8
+#define	SBCOREREV(sbidh) \
+	((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define	SBIDH_CC_MASK		0x8ff0		/* core code */
+#define	SBIDH_CC_SHIFT		4
+#define	SBIDH_VC_MASK		0xffff0000	/* vendor code */
+#define	SBIDH_VC_SHIFT		16
+
+#define	SB_COMMIT		0xfd8		/* update buffered registers value */
+
+/* vendor codes */
+#define	SB_VEND_BCM		0x4243		/* Broadcom's SB vendor code */
+
+#endif	/* _SBCONFIG_H */
diff --git a/drivers/staging/edison-bcm43340/include/sbhnddma.h b/drivers/staging/edison-bcm43340/include/sbhnddma.h
new file mode 100644
index 0000000..9db0fa1
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sbhnddma.h
@@ -0,0 +1,416 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbhnddma.h 424099 2013-09-16 07:44:34Z $
+ */
+
+#ifndef	_sbhnddma_h_
+#define	_sbhnddma_h_
+
+/* DMA structure:
+ *  support two DMA engines: 32 bits address or 64 bit addressing
+ *  basic DMA register set is per channel(transmit or receive)
+ *  a pair of channels is defined for convenience
+ */
+
+
+/* 32 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+	uint32	control;		/* enable, et al */
+	uint32	addr;			/* descriptor ring base address (4K aligned) */
+	uint32	ptr;			/* last descriptor posted to chip */
+	uint32	status;			/* current active descriptor, et al */
+} dma32regs_t;
+
+typedef volatile struct {
+	dma32regs_t	xmt;		/* dma tx channel */
+	dma32regs_t	rcv;		/* dma rx channel */
+} dma32regp_t;
+
+typedef volatile struct {	/* diag access */
+	uint32	fifoaddr;		/* diag address */
+	uint32	fifodatalow;		/* low 32bits of data */
+	uint32	fifodatahigh;		/* high 32bits of data */
+	uint32	pad;			/* reserved */
+} dma32diag_t;
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+	uint32	ctrl;		/* misc control bits & bufcount */
+	uint32	addr;		/* data buffer address */
+} dma32dd_t;
+
+/*
+ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
+ */
+#define	D32RINGALIGN_BITS	12
+#define	D32MAXRINGSZ		(1 << D32RINGALIGN_BITS)
+#define	D32RINGALIGN		(1 << D32RINGALIGN_BITS)
+
+#define	D32MAXDD	(D32MAXRINGSZ / sizeof (dma32dd_t))
+
+/* transmit channel control */
+#define	XC_XE		((uint32)1 << 0)	/* transmit enable */
+#define	XC_SE		((uint32)1 << 1)	/* transmit suspend request */
+#define	XC_LE		((uint32)1 << 2)	/* loopback enable */
+#define	XC_FL		((uint32)1 << 4)	/* flush request */
+#define XC_MR_MASK	0x000001C0		/* Multiple outstanding reads */
+#define XC_MR_SHIFT	6
+#define	XC_PD		((uint32)1 << 11)	/* parity check disable */
+#define	XC_AE		((uint32)3 << 16)	/* address extension bits */
+#define	XC_AE_SHIFT	16
+#define XC_BL_MASK	0x001C0000		/* BurstLen bits */
+#define XC_BL_SHIFT	18
+#define XC_PC_MASK	0x00E00000		/* Prefetch control */
+#define XC_PC_SHIFT	21
+#define XC_PT_MASK	0x03000000		/* Prefetch threshold */
+#define XC_PT_SHIFT	24
+
+/* Multiple outstanding reads */
+#define DMA_MR_1	0
+#define DMA_MR_2	1
+#define DMA_MR_4	2
+#define DMA_MR_8	3
+#define DMA_MR_12	4
+#define DMA_MR_16	5
+#define DMA_MR_20	6
+#define DMA_MR_32	7
+
+/* DMA Burst Length in bytes */
+#define DMA_BL_16	0
+#define DMA_BL_32	1
+#define DMA_BL_64	2
+#define DMA_BL_128	3
+#define DMA_BL_256	4
+#define DMA_BL_512	5
+#define DMA_BL_1024	6
+
+/* Prefetch control */
+#define DMA_PC_0	0
+#define DMA_PC_4	1
+#define DMA_PC_8	2
+#define DMA_PC_16	3
+/* others: reserved */
+
+/* Prefetch threshold */
+#define DMA_PT_1	0
+#define DMA_PT_2	1
+#define DMA_PT_4	2
+#define DMA_PT_8	3
+
+/* transmit descriptor table pointer */
+#define	XP_LD_MASK	0xfff			/* last valid descriptor */
+
+/* transmit channel status */
+#define	XS_CD_MASK	0x0fff			/* current descriptor pointer */
+#define	XS_XS_MASK	0xf000			/* transmit state */
+#define	XS_XS_SHIFT	12
+#define	XS_XS_DISABLED	0x0000			/* disabled */
+#define	XS_XS_ACTIVE	0x1000			/* active */
+#define	XS_XS_IDLE	0x2000			/* idle wait */
+#define	XS_XS_STOPPED	0x3000			/* stopped */
+#define	XS_XS_SUSP	0x4000			/* suspend pending */
+#define	XS_XE_MASK	0xf0000			/* transmit errors */
+#define	XS_XE_SHIFT	16
+#define	XS_XE_NOERR	0x00000			/* no error */
+#define	XS_XE_DPE	0x10000			/* descriptor protocol error */
+#define	XS_XE_DFU	0x20000			/* data fifo underrun */
+#define	XS_XE_BEBR	0x30000			/* bus error on buffer read */
+#define	XS_XE_BEDA	0x40000			/* bus error on descriptor access */
+#define	XS_AD_MASK	0xfff00000		/* active descriptor */
+#define	XS_AD_SHIFT	20
+
+/* receive channel control */
+#define	RC_RE		((uint32)1 << 0)	/* receive enable */
+#define	RC_RO_MASK	0xfe			/* receive frame offset */
+#define	RC_RO_SHIFT	1
+#define	RC_FM		((uint32)1 << 8)	/* direct fifo receive (pio) mode */
+#define	RC_SH		((uint32)1 << 9)	/* separate rx header descriptor enable */
+#define	RC_OC		((uint32)1 << 10)	/* overflow continue */
+#define	RC_PD		((uint32)1 << 11)	/* parity check disable */
+#define	RC_AE		((uint32)3 << 16)	/* address extension bits */
+#define	RC_AE_SHIFT	16
+#define RC_BL_MASK	0x001C0000		/* BurstLen bits */
+#define RC_BL_SHIFT	18
+#define RC_PC_MASK	0x00E00000		/* Prefetch control */
+#define RC_PC_SHIFT	21
+#define RC_PT_MASK	0x03000000		/* Prefetch threshold */
+#define RC_PT_SHIFT	24
+
+/* receive descriptor table pointer */
+#define	RP_LD_MASK	0xfff			/* last valid descriptor */
+
+/* receive channel status */
+#define	RS_CD_MASK	0x0fff			/* current descriptor pointer */
+#define	RS_RS_MASK	0xf000			/* receive state */
+#define	RS_RS_SHIFT	12
+#define	RS_RS_DISABLED	0x0000			/* disabled */
+#define	RS_RS_ACTIVE	0x1000			/* active */
+#define	RS_RS_IDLE	0x2000			/* idle wait */
+#define	RS_RS_STOPPED	0x3000			/* reserved */
+#define	RS_RE_MASK	0xf0000			/* receive errors */
+#define	RS_RE_SHIFT	16
+#define	RS_RE_NOERR	0x00000			/* no error */
+#define	RS_RE_DPE	0x10000			/* descriptor protocol error */
+#define	RS_RE_DFO	0x20000			/* data fifo overflow */
+#define	RS_RE_BEBW	0x30000			/* bus error on buffer write */
+#define	RS_RE_BEDA	0x40000			/* bus error on descriptor access */
+#define	RS_AD_MASK	0xfff00000		/* active descriptor */
+#define	RS_AD_SHIFT	20
+
+/* fifoaddr */
+#define	FA_OFF_MASK	0xffff			/* offset */
+#define	FA_SEL_MASK	0xf0000			/* select */
+#define	FA_SEL_SHIFT	16
+#define	FA_SEL_XDD	0x00000			/* transmit dma data */
+#define	FA_SEL_XDP	0x10000			/* transmit dma pointers */
+#define	FA_SEL_RDD	0x40000			/* receive dma data */
+#define	FA_SEL_RDP	0x50000			/* receive dma pointers */
+#define	FA_SEL_XFD	0x80000			/* transmit fifo data */
+#define	FA_SEL_XFP	0x90000			/* transmit fifo pointers */
+#define	FA_SEL_RFD	0xc0000			/* receive fifo data */
+#define	FA_SEL_RFP	0xd0000			/* receive fifo pointers */
+#define	FA_SEL_RSD	0xe0000			/* receive frame status data */
+#define	FA_SEL_RSP	0xf0000			/* receive frame status pointers */
+
+/* descriptor control flags */
+#define	CTRL_BC_MASK	0x00001fff		/* buffer byte count, real data len must <= 4KB */
+#define	CTRL_AE		((uint32)3 << 16)	/* address extension bits */
+#define	CTRL_AE_SHIFT	16
+#define	CTRL_PARITY	((uint32)3 << 18)	/* parity bit */
+#define	CTRL_EOT	((uint32)1 << 28)	/* end of descriptor table */
+#define	CTRL_IOC	((uint32)1 << 29)	/* interrupt on completion */
+#define	CTRL_EOF	((uint32)1 << 30)	/* end of frame */
+#define	CTRL_SOF	((uint32)1 << 31)	/* start of frame */
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define	CTRL_CORE_MASK	0x0ff00000
+
+/* 64 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+	uint32	control;		/* enable, et al */
+	uint32	ptr;			/* last descriptor posted to chip */
+	uint32	addrlow;		/* descriptor ring base address low 32-bits (8K aligned) */
+	uint32	addrhigh;		/* descriptor ring base address bits 63:32 (8K aligned) */
+	uint32	status0;		/* current descriptor, xmt state */
+	uint32	status1;		/* active descriptor, xmt error */
+} dma64regs_t;
+
+typedef volatile struct {
+	dma64regs_t	tx;		/* dma64 tx channel */
+	dma64regs_t	rx;		/* dma64 rx channel */
+} dma64regp_t;
+
+typedef volatile struct {		/* diag access */
+	uint32	fifoaddr;		/* diag address */
+	uint32	fifodatalow;		/* low 32bits of data */
+	uint32	fifodatahigh;		/* high 32bits of data */
+	uint32	pad;			/* reserved */
+} dma64diag_t;
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+	uint32	ctrl1;		/* misc control bits */
+	uint32	ctrl2;		/* buffer count and address extension */
+	uint32	addrlow;	/* memory address of the date buffer, bits 31:0 */
+	uint32	addrhigh;	/* memory address of the date buffer, bits 63:32 */
+} dma64dd_t;
+
+/*
+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
+ */
+#define D64RINGALIGN_BITS	13
+#define	D64MAXRINGSZ		(1 << D64RINGALIGN_BITS)
+#define	D64RINGBOUNDARY		(1 << D64RINGALIGN_BITS)
+
+#define	D64MAXDD	(D64MAXRINGSZ / sizeof (dma64dd_t))
+
+/* for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
+#define	D64MAXDD_LARGE		((1 << 16) / sizeof (dma64dd_t))
+
+/* for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
+ * 64K boundary
+ */
+#define	D64RINGBOUNDARY_LARGE	(1 << 16)
+
+/*
+ * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11.
+ * When this field contains the value N, the burst length is 2**(N + 4) bytes.
+ */
+#define D64_DEF_USBBURSTLEN     2
+#define D64_DEF_SDIOBURSTLEN    1
+
+
+#ifndef D64_USBBURSTLEN
+#define D64_USBBURSTLEN	DMA_BL_64
+#endif
+#ifndef D64_SDIOBURSTLEN
+#define D64_SDIOBURSTLEN	DMA_BL_32
+#endif
+
+/* transmit channel control */
+#define	D64_XC_XE		0x00000001	/* transmit enable */
+#define	D64_XC_SE		0x00000002	/* transmit suspend request */
+#define	D64_XC_LE		0x00000004	/* loopback enable */
+#define	D64_XC_FL		0x00000010	/* flush request */
+#define D64_XC_MR_MASK		0x000001C0	/* Multiple outstanding reads */
+#define D64_XC_MR_SHIFT		6
+#define	D64_XC_PD		0x00000800	/* parity check disable */
+#define	D64_XC_AE		0x00030000	/* address extension bits */
+#define	D64_XC_AE_SHIFT		16
+#define D64_XC_BL_MASK		0x001C0000	/* BurstLen bits */
+#define D64_XC_BL_SHIFT		18
+#define D64_XC_PC_MASK		0x00E00000		/* Prefetch control */
+#define D64_XC_PC_SHIFT		21
+#define D64_XC_PT_MASK		0x03000000		/* Prefetch threshold */
+#define D64_XC_PT_SHIFT		24
+
+/* transmit descriptor table pointer */
+#define	D64_XP_LD_MASK		0x00001fff	/* last valid descriptor */
+
+/* transmit channel status */
+#define	D64_XS0_CD_MASK		(di->d64_xs0_cd_mask)	/* current descriptor pointer */
+#define	D64_XS0_XS_MASK		0xf0000000     	/* transmit state */
+#define	D64_XS0_XS_SHIFT		28
+#define	D64_XS0_XS_DISABLED	0x00000000	/* disabled */
+#define	D64_XS0_XS_ACTIVE	0x10000000	/* active */
+#define	D64_XS0_XS_IDLE		0x20000000	/* idle wait */
+#define	D64_XS0_XS_STOPPED	0x30000000	/* stopped */
+#define	D64_XS0_XS_SUSP		0x40000000	/* suspend pending */
+
+#define	D64_XS1_AD_MASK		(di->d64_xs1_ad_mask)	/* active descriptor */
+#define	D64_XS1_XE_MASK		0xf0000000     	/* transmit errors */
+#define	D64_XS1_XE_SHIFT		28
+#define	D64_XS1_XE_NOERR	0x00000000	/* no error */
+#define	D64_XS1_XE_DPE		0x10000000	/* descriptor protocol error */
+#define	D64_XS1_XE_DFU		0x20000000	/* data fifo underrun */
+#define	D64_XS1_XE_DTE		0x30000000	/* data transfer error */
+#define	D64_XS1_XE_DESRE	0x40000000	/* descriptor read error */
+#define	D64_XS1_XE_COREE	0x50000000	/* core error */
+
+/* receive channel control */
+#define	D64_RC_RE		0x00000001	/* receive enable */
+#define	D64_RC_RO_MASK		0x000000fe	/* receive frame offset */
+#define	D64_RC_RO_SHIFT		1
+#define	D64_RC_FM		0x00000100	/* direct fifo receive (pio) mode */
+#define	D64_RC_SH		0x00000200	/* separate rx header descriptor enable */
+#define	D64_RC_SHIFT		9	/* separate rx header descriptor enable */
+#define	D64_RC_OC		0x00000400	/* overflow continue */
+#define	D64_RC_PD		0x00000800	/* parity check disable */
+#define D64_RC_GE		0x00004000	/* Glom enable */
+#define	D64_RC_AE		0x00030000	/* address extension bits */
+#define	D64_RC_AE_SHIFT		16
+#define D64_RC_BL_MASK		0x001C0000	/* BurstLen bits */
+#define D64_RC_BL_SHIFT		18
+#define D64_RC_PC_MASK		0x00E00000	/* Prefetch control */
+#define D64_RC_PC_SHIFT		21
+#define D64_RC_PT_MASK		0x03000000	/* Prefetch threshold */
+#define D64_RC_PT_SHIFT		24
+
+/* flags for dma controller */
+#define DMA_CTRL_PEN		(1 << 0)	/* partity enable */
+#define DMA_CTRL_ROC		(1 << 1)	/* rx overflow continue */
+#define DMA_CTRL_RXMULTI	(1 << 2)	/* allow rx scatter to multiple descriptors */
+#define DMA_CTRL_UNFRAMED	(1 << 3)	/* Unframed Rx/Tx data */
+#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
+#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5)	/* DMA avoidance WAR for 4331 */
+#define DMA_CTRL_RXSINGLE	(1 << 6)	/* always single buffer */
+
+/* receive descriptor table pointer */
+#define	D64_RP_LD_MASK		0x00001fff	/* last valid descriptor */
+
+/* receive channel status */
+#define	D64_RS0_CD_MASK		(di->d64_rs0_cd_mask)	/* current descriptor pointer */
+#define	D64_RS0_RS_MASK		0xf0000000     	/* receive state */
+#define	D64_RS0_RS_SHIFT		28
+#define	D64_RS0_RS_DISABLED	0x00000000	/* disabled */
+#define	D64_RS0_RS_ACTIVE	0x10000000	/* active */
+#define	D64_RS0_RS_IDLE		0x20000000	/* idle wait */
+#define	D64_RS0_RS_STOPPED	0x30000000	/* stopped */
+#define	D64_RS0_RS_SUSP		0x40000000	/* suspend pending */
+
+#define	D64_RS1_AD_MASK		0x0001ffff	/* active descriptor */
+#define	D64_RS1_RE_MASK		0xf0000000     	/* receive errors */
+#define	D64_RS1_RE_SHIFT		28
+#define	D64_RS1_RE_NOERR	0x00000000	/* no error */
+#define	D64_RS1_RE_DPO		0x10000000	/* descriptor protocol error */
+#define	D64_RS1_RE_DFU		0x20000000	/* data fifo overflow */
+#define	D64_RS1_RE_DTE		0x30000000	/* data transfer error */
+#define	D64_RS1_RE_DESRE	0x40000000	/* descriptor read error */
+#define	D64_RS1_RE_COREE	0x50000000	/* core error */
+
+/* fifoaddr */
+#define	D64_FA_OFF_MASK		0xffff		/* offset */
+#define	D64_FA_SEL_MASK		0xf0000		/* select */
+#define	D64_FA_SEL_SHIFT	16
+#define	D64_FA_SEL_XDD		0x00000		/* transmit dma data */
+#define	D64_FA_SEL_XDP		0x10000		/* transmit dma pointers */
+#define	D64_FA_SEL_RDD		0x40000		/* receive dma data */
+#define	D64_FA_SEL_RDP		0x50000		/* receive dma pointers */
+#define	D64_FA_SEL_XFD		0x80000		/* transmit fifo data */
+#define	D64_FA_SEL_XFP		0x90000		/* transmit fifo pointers */
+#define	D64_FA_SEL_RFD		0xc0000		/* receive fifo data */
+#define	D64_FA_SEL_RFP		0xd0000		/* receive fifo pointers */
+#define	D64_FA_SEL_RSD		0xe0000		/* receive frame status data */
+#define	D64_FA_SEL_RSP		0xf0000		/* receive frame status pointers */
+
+/* descriptor control flags 1 */
+#define D64_CTRL_COREFLAGS	0x0ff00000	/* core specific flags */
+#define	D64_CTRL1_NOTPCIE	((uint32)1 << 18)	/* buirst size control */
+#define	D64_CTRL1_EOT		((uint32)1 << 28)	/* end of descriptor table */
+#define	D64_CTRL1_IOC		((uint32)1 << 29)	/* interrupt on completion */
+#define	D64_CTRL1_EOF		((uint32)1 << 30)	/* end of frame */
+#define	D64_CTRL1_SOF		((uint32)1 << 31)	/* start of frame */
+
+/* descriptor control flags 2 */
+#define	D64_CTRL2_BC_MASK	0x00007fff	/* buffer byte count. real data len must <= 16KB */
+#define	D64_CTRL2_AE		0x00030000	/* address extension bits */
+#define	D64_CTRL2_AE_SHIFT	16
+#define D64_CTRL2_PARITY	0x00040000      /* parity bit */
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define	D64_CTRL_CORE_MASK	0x0ff00000
+
+#define D64_RX_FRM_STS_LEN	0x0000ffff	/* frame length mask */
+#define D64_RX_FRM_STS_OVFL	0x00800000	/* RxOverFlow */
+#define D64_RX_FRM_STS_DSCRCNT	0x0f000000	/* no. of descriptors used - 1, d11corerev >= 22 */
+#define D64_RX_FRM_STS_DATATYPE	0xf0000000	/* core-dependent data type */
+
+/* receive frame status */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} dma_rxh_t;
+
+#endif	/* _sbhnddma_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/sbpcmcia.h b/drivers/staging/edison-bcm43340/include/sbpcmcia.h
new file mode 100644
index 0000000..f746ddc
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sbpcmcia.h
@@ -0,0 +1,113 @@
+/*
+ * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbpcmcia.h 427964 2013-10-07 07:13:33Z $
+ */
+
+#ifndef	_SBPCMCIA_H
+#define	_SBPCMCIA_H
+
+/* All the addresses that are offsets in attribute space are divided
+ * by two to account for the fact that odd bytes are invalid in
+ * attribute space and our read/write routines make the space appear
+ * as if they didn't exist. Still we want to show the original numbers
+ * as documented in the hnd_pcmcia core manual.
+ */
+
+/* PCMCIA Function Configuration Registers */
+#define	PCMCIA_FCR		(0x700 / 2)
+
+#define	FCR0_OFF		0
+#define	FCR1_OFF		(0x40 / 2)
+#define	FCR2_OFF		(0x80 / 2)
+#define	FCR3_OFF		(0xc0 / 2)
+
+#define	PCMCIA_FCR0		(0x700 / 2)
+#define	PCMCIA_FCR1		(0x740 / 2)
+#define	PCMCIA_FCR2		(0x780 / 2)
+#define	PCMCIA_FCR3		(0x7c0 / 2)
+
+/* Standard PCMCIA FCR registers */
+
+#define	PCMCIA_COR		0
+
+#define	COR_RST			0x80
+#define	COR_LEV			0x40
+#define	COR_IRQEN		0x04
+#define	COR_BLREN		0x01
+#define	COR_FUNEN		0x01
+
+
+#define	PCICIA_FCSR		(2 / 2)
+#define	PCICIA_PRR		(4 / 2)
+#define	PCICIA_SCR		(6 / 2)
+#define	PCICIA_ESR		(8 / 2)
+
+
+#define PCM_MEMOFF		0x0000
+#define F0_MEMOFF		0x1000
+#define F1_MEMOFF		0x2000
+#define F2_MEMOFF		0x3000
+#define F3_MEMOFF		0x4000
+
+/* Memory base in the function fcr's */
+#define MEM_ADDR0		(0x728 / 2)
+#define MEM_ADDR1		(0x72a / 2)
+#define MEM_ADDR2		(0x72c / 2)
+
+/* PCMCIA base plus Srom access in fcr0: */
+#define PCMCIA_ADDR0		(0x072e / 2)
+#define PCMCIA_ADDR1		(0x0730 / 2)
+#define PCMCIA_ADDR2		(0x0732 / 2)
+
+#define MEM_SEG			(0x0734 / 2)
+#define SROM_CS			(0x0736 / 2)
+#define SROM_DATAL		(0x0738 / 2)
+#define SROM_DATAH		(0x073a / 2)
+#define SROM_ADDRL		(0x073c / 2)
+#define SROM_ADDRH		(0x073e / 2)
+#define	SROM_INFO2		(0x0772 / 2)	/* Corerev >= 2 && <= 5 */
+#define	SROM_INFO		(0x07be / 2)	/* Corerev >= 6 */
+
+/*  Values for srom_cs: */
+#define SROM_IDLE		0
+#define SROM_WRITE		1
+#define SROM_READ		2
+#define SROM_WEN		4
+#define SROM_WDS		7
+#define SROM_DONE		8
+
+/* Fields in srom_info: */
+#define	SRI_SZ_MASK		0x03
+#define	SRI_BLANK		0x04
+#define	SRI_OTP			0x80
+
+
+/* sbtmstatelow */
+#define SBTML_INT_ACK		0x40000		/* ack the sb interrupt */
+#define SBTML_INT_EN		0x20000		/* enable sb interrupt */
+
+/* sbtmstatehigh */
+#define SBTMH_INT_STATUS	0x40000		/* sb interrupt status */
+
+#endif	/* _SBPCMCIA_H */
diff --git a/drivers/staging/edison-bcm43340/include/sbsdio.h b/drivers/staging/edison-bcm43340/include/sbsdio.h
new file mode 100644
index 0000000..0196257
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sbsdio.h
@@ -0,0 +1,186 @@
+/*
+ * SDIO device core hardware definitions.
+ * sdio is a portion of the pcmcia core in core rev 3 - rev 8
+ *
+ * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdio.h 383835 2013-02-07 23:32:39Z $
+ */
+
+#ifndef	_SBSDIO_H
+#define	_SBSDIO_H
+
+#define SBSDIO_NUM_FUNCTION		3	/* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS			0x10000		/* sprom command and status */
+#define SBSDIO_SPROM_INFO		0x10001		/* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW		0x10002		/* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH		0x10003 	/* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW		0x10004		/* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH		0x10005		/* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA		0x10006		/* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN		0x10007		/* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK		0x10008		/* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL		0x10009		/* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW		0x1000A		/* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID		0x1000B		/* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH		0x1000C		/* SB Address Window High (b31:b24)    */
+#define SBSDIO_FUNC1_FRAMECTRL		0x1000D		/* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR		0x1000E		/* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 	0x1000F		/* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO		0x10019		/* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI		0x1001A		/* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO		0x1001B		/* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI		0x1001C		/* Read Frame Byte Count High */
+#define SBSDIO_FUNC1_MESBUSYCTRL	0x1001D		/* MesBusyCtl at 0x1001D (rev 11) */
+
+#define SBSDIO_FUNC1_MISC_REG_START	0x10000 	/* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT	0x1001C 	/* f1 misc register end */
+
+/* Sdio Core Rev 12 */
+#define SBSDIO_FUNC1_WAKEUPCTRL			0x1001E
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK		0x1
+#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT	0
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK		0x2
+#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT		1
+#define SBSDIO_FUNC1_SLEEPCSR			0x1001F
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK		0x1
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT		0
+#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN		1
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK	0x2
+#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT	1
+
+/* SBSDIO_SPROM_CS */
+#define SBSDIO_SPROM_IDLE		0
+#define SBSDIO_SPROM_WRITE		1
+#define SBSDIO_SPROM_READ		2
+#define SBSDIO_SPROM_WEN		4
+#define SBSDIO_SPROM_WDS		7
+#define SBSDIO_SPROM_DONE		8
+
+/* SBSDIO_SPROM_INFO */
+#define SROM_SZ_MASK			0x03		/* SROM size, 1: 4k, 2: 16k */
+#define SROM_BLANK			0x04		/* depreciated in corerev 6 */
+#define	SROM_OTP			0x80		/* OTP present */
+
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL		0x01		/* or'd with onchip xtal_pu,
+							 * 1: power on oscillator
+							 * (for 4318 only)
+							 */
+/* SBSDIO_WATERMARK */
+#define SBSDIO_WATERMARK_MASK		0x7f		/* number of words - 1 for sd device
+							 * to wait before sending data to host
+							 */
+
+/* SBSDIO_MESBUSYCTRL */
+/* When RX FIFO has less entries than this & MBE is set
+ * => busy signal is asserted between data blocks.
+*/
+#define SBSDIO_MESBUSYCTRL_MASK		0x7f
+#define SBSDIO_MESBUSYCTRL_ENAB		0x80		/* Enable busy capability for MES access */
+
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY		0x01		/* 1: device will assert busy signal when
+							 * receiving CMD53
+							 */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC	0x02		/* 1: assertion of sdio interrupt is
+							 * synchronous to the sdio clock
+							 */
+#define SBSDIO_DEVCTL_CA_INT_ONLY	0x04		/* 1: mask all interrupts to host
+							 * except the chipActive (rev 8)
+							 */
+#define SBSDIO_DEVCTL_PADS_ISO		0x08		/* 1: isolate internal sdio signals, put
+							 * external pads in tri-state; requires
+							 * sdio bus power cycle to clear (rev 9)
+							 */
+#define SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK 0x10  /* Enable function 2 tx for each block */
+#define SBSDIO_DEVCTL_F2WM_ENAB		0x10		/* Enable F2 Watermark */
+#define SBSDIO_DEVCTL_NONDAT_PADS_ISO 	0x20		/* Isolate sdio clk and cmd (non-data) */
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP		0x01		/* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT			0x02		/* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP		0x04		/* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ		0x08		/* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ		0x10		/* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF	0x20		/* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL		0x40		/* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL			0x80		/* Status: HT is ready */
+/* In rev8, actual avail bits followed original docs */
+#define SBSDIO_Rev8_HT_AVAIL		0x40
+#define SBSDIO_Rev8_ALP_AVAIL		0x80
+#define SBSDIO_CSR_MASK			0x1F
+
+#define SBSDIO_AVBITS			(SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)		((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)		(((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval)		(SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly)	(SBSDIO_ALPAV(regval) && \
+					(alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* SBSDIO_FUNC1_SDIOPULLUP */
+#define SBSDIO_PULLUP_D0		0x01		/* Enable D0/MISO pullup */
+#define SBSDIO_PULLUP_D1		0x02		/* Enable D1/INT# pullup */
+#define SBSDIO_PULLUP_D2		0x04		/* Enable D2 pullup */
+#define SBSDIO_PULLUP_CMD		0x08		/* Enable CMD/MOSI pullup */
+#define SBSDIO_PULLUP_ALL		0x0f		/* All valid bits */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK		0x07FFF		/* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT	0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG	0x08000		/* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK		0x80		/* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK		0xff		/* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK		0xffU		/* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK		0xffff8000	/* Address bits from SBADDR regs */
+
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON		0x1000		/* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT		0x200		/* maximum bytes in one CIS */
+#define SBSDIO_OTP_CIS_SIZE_LIMIT       0x078           /* maximum bytes OTP CIS */
+
+#define SBSDIO_CIS_OFT_ADDR_MASK	0x1FFFF		/* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN	6		/* manfid tuple length, include tuple,
+							 * link bytes
+							 */
+
+/* indirect cis access (in sprom) */
+#define SBSDIO_SPROM_CIS_OFFSET		0x8		/* 8 control bytes first, CIS starts from
+							 * 8th byte
+							 */
+
+#define SBSDIO_BYTEMODE_DATALEN_MAX	64		/* sdio byte mode: maximum length of one
+							 * data comamnd
+							 */
+
+#define SBSDIO_CORE_ADDR_MASK		0x1FFFF		/* sdio core function one address mask */
+
+#endif	/* _SBSDIO_H */
diff --git a/drivers/staging/edison-bcm43340/include/sbsdpcmdev.h b/drivers/staging/edison-bcm43340/include/sbsdpcmdev.h
new file mode 100644
index 0000000..97051ce
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sbsdpcmdev.h
@@ -0,0 +1,295 @@
+/*
+ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
+ * device core support
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsdpcmdev.h 416730 2013-08-06 09:33:19Z $
+ */
+
+#ifndef	_sbsdpcmdev_h_
+#define	_sbsdpcmdev_h_
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+
+typedef volatile struct {
+	dma64regs_t	xmt;		/* dma tx */
+	uint32 PAD[2];
+	dma64regs_t	rcv;		/* dma rx */
+	uint32 PAD[2];
+} dma64p_t;
+
+/* dma64 sdiod corerev >= 1 */
+typedef volatile struct {
+	dma64p_t dma64regs[2];
+	dma64diag_t dmafifo;		/* DMA Diagnostic Regs, 0x280-0x28c */
+	uint32 PAD[92];
+} sdiodma64_t;
+
+/* dma32 sdiod corerev == 0 */
+typedef volatile struct {
+	dma32regp_t dma32regs[2];	/* dma tx & rx, 0x200-0x23c */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x240-0x24c */
+	uint32 PAD[108];
+} sdiodma32_t;
+
+/* dma32 regs for pcmcia core */
+typedef volatile struct {
+	dma32regp_t dmaregs;		/* DMA Regs, 0x200-0x21c, rev8 */
+	dma32diag_t dmafifo;		/* DMA Diagnostic Regs, 0x220-0x22c */
+	uint32 PAD[116];
+} pcmdma32_t;
+
+/* core registers */
+typedef volatile struct {
+	uint32 corecontrol;		/* CoreControl, 0x000, rev8 */
+	uint32 corestatus;		/* CoreStatus, 0x004, rev8  */
+	uint32 PAD[1];
+	uint32 biststatus;		/* BistStatus, 0x00c, rev8  */
+
+	/* PCMCIA access */
+	uint16 pcmciamesportaladdr;	/* PcmciaMesPortalAddr, 0x010, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciamesportalmask;	/* PcmciaMesPortalMask, 0x014, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciawrframebc;		/* PcmciaWrFrameBC, 0x018, rev8   */
+	uint16 PAD[1];
+	uint16 pcmciaunderflowtimer;	/* PcmciaUnderflowTimer, 0x01c, rev8   */
+	uint16 PAD[1];
+
+	/* interrupt */
+	uint32 intstatus;		/* IntStatus, 0x020, rev8   */
+	uint32 hostintmask;		/* IntHostMask, 0x024, rev8   */
+	uint32 intmask;			/* IntSbMask, 0x028, rev8   */
+	uint32 sbintstatus;		/* SBIntStatus, 0x02c, rev8   */
+	uint32 sbintmask;		/* SBIntMask, 0x030, rev8   */
+	uint32 funcintmask;		/* SDIO Function Interrupt Mask, SDIO rev4 */
+	uint32 PAD[2];
+	uint32 tosbmailbox;		/* ToSBMailbox, 0x040, rev8   */
+	uint32 tohostmailbox;		/* ToHostMailbox, 0x044, rev8   */
+	uint32 tosbmailboxdata;		/* ToSbMailboxData, 0x048, rev8   */
+	uint32 tohostmailboxdata;	/* ToHostMailboxData, 0x04c, rev8   */
+
+	/* synchronized access to registers in SDIO clock domain */
+	uint32 sdioaccess;		/* SdioAccess, 0x050, rev8   */
+	uint32 PAD[3];
+
+	/* PCMCIA frame control */
+	uint8 pcmciaframectrl;		/* pcmciaFrameCtrl, 0x060, rev8   */
+	uint8 PAD[3];
+	uint8 pcmciawatermark;		/* pcmciaWaterMark, 0x064, rev8   */
+	uint8 PAD[155];
+
+	/* interrupt batching control */
+	uint32 intrcvlazy;		/* IntRcvLazy, 0x100, rev8 */
+	uint32 PAD[3];
+
+	/* counters */
+	uint32 cmd52rd;			/* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
+	uint32 cmd52wr;			/* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
+	uint32 cmd53rd;			/* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
+	uint32 cmd53wr;			/* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
+	uint32 abort;			/* AbortCount, 0x120, rev8, SDIO: aborts */
+	uint32 datacrcerror;		/* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
+	uint32 rdoutofsync;		/* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
+	uint32 wroutofsync;		/* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
+	uint32 writebusy;		/* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
+	uint32 readwait;		/* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
+	uint32 readterm;		/* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
+	uint32 writeterm;		/* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
+	uint32 PAD[40];
+	uint32 clockctlstatus;		/* ClockCtlStatus, 0x1e0, rev8 */
+	uint32 PAD[7];
+
+	/* DMA engines */
+	volatile union {
+		pcmdma32_t pcm32;
+		sdiodma32_t sdiod32;
+		sdiodma64_t sdiod64;
+	} dma;
+
+	/* SDIO/PCMCIA CIS region */
+	char cis[512];			/* 512 byte CIS, 0x400-0x5ff, rev6 */
+
+	/* PCMCIA function control registers */
+	char pcmciafcr[256];		/* PCMCIA FCR, 0x600-6ff, rev6 */
+	uint16 PAD[55];
+
+	/* PCMCIA backplane access */
+	uint16 backplanecsr;		/* BackplaneCSR, 0x76E, rev6 */
+	uint16 backplaneaddr0;		/* BackplaneAddr0, 0x770, rev6 */
+	uint16 backplaneaddr1;		/* BackplaneAddr1, 0x772, rev6 */
+	uint16 backplaneaddr2;		/* BackplaneAddr2, 0x774, rev6 */
+	uint16 backplaneaddr3;		/* BackplaneAddr3, 0x776, rev6 */
+	uint16 backplanedata0;		/* BackplaneData0, 0x778, rev6 */
+	uint16 backplanedata1;		/* BackplaneData1, 0x77a, rev6 */
+	uint16 backplanedata2;		/* BackplaneData2, 0x77c, rev6 */
+	uint16 backplanedata3;		/* BackplaneData3, 0x77e, rev6 */
+	uint16 PAD[31];
+
+	/* sprom "size" & "blank" info */
+	uint16 spromstatus;		/* SPROMStatus, 0x7BE, rev2 */
+	uint32 PAD[464];
+
+	/* Sonics SiliconBackplane registers */
+	sbconfig_t sbconfig;		/* SbConfig Regs, 0xf00-0xfff, rev8 */
+} sdpcmd_regs_t;
+
+/* corecontrol */
+#define CC_CISRDY		(1 << 0)	/* CIS Ready */
+#define CC_BPRESEN		(1 << 1)	/* CCCR RES signal causes backplane reset */
+#define CC_F2RDY		(1 << 2)	/* set CCCR IOR2 bit */
+#define CC_CLRPADSISO		(1 << 3)	/* clear SDIO pads isolation bit (rev 11) */
+#define CC_XMTDATAAVAIL_MODE	(1 << 4)	/* data avail generates an interrupt */
+#define CC_XMTDATAAVAIL_CTRL	(1 << 5)	/* data avail interrupt ctrl */
+
+/* corestatus */
+#define CS_PCMCIAMODE	(1 << 0)	/* Device Mode; 0=SDIO, 1=PCMCIA */
+#define CS_SMARTDEV	(1 << 1)	/* 1=smartDev enabled */
+#define CS_F2ENABLED	(1 << 2)	/* 1=host has enabled the device */
+
+#define PCMCIA_MES_PA_MASK	0x7fff	/* PCMCIA Message Portal Address Mask */
+#define PCMCIA_MES_PM_MASK	0x7fff	/* PCMCIA Message Portal Mask Mask */
+#define PCMCIA_WFBC_MASK	0xffff	/* PCMCIA Write Frame Byte Count Mask */
+#define PCMCIA_UT_MASK		0x07ff	/* PCMCIA Underflow Timer Mask */
+
+/* intstatus */
+#define I_SMB_SW0	(1 << 0)	/* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1	(1 << 1)	/* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2	(1 << 2)	/* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3	(1 << 3)	/* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK	0x0000000f	/* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT	0		/* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0	(1 << 4)	/* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1	(1 << 5)	/* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2	(1 << 6)	/* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3	(1 << 7)	/* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK	0x000000f0	/* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT	4		/* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC	(1 << 8)	/* Write Frame Out Of Sync */
+#define I_RD_OOSYNC	(1 << 9)	/* Read Frame Out Of Sync */
+#define	I_PC		(1 << 10)	/* descriptor error */
+#define	I_PD		(1 << 11)	/* data error */
+#define	I_DE		(1 << 12)	/* Descriptor protocol Error */
+#define	I_RU		(1 << 13)	/* Receive descriptor Underflow */
+#define	I_RO		(1 << 14)	/* Receive fifo Overflow */
+#define	I_XU		(1 << 15)	/* Transmit fifo Underflow */
+#define	I_RI		(1 << 16)	/* Receive Interrupt */
+#define I_BUSPWR	(1 << 17)	/* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23)	/* bits in fifo */
+#define	I_XI		(1 << 24)	/* Transmit Interrupt */
+#define I_RF_TERM	(1 << 25)	/* Read Frame Terminate */
+#define I_WF_TERM	(1 << 26)	/* Write Frame Terminate */
+#define I_PCMCIA_XU	(1 << 27)	/* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT		(1 << 28)	/* sbintstatus Interrupt */
+#define I_CHIPACTIVE	(1 << 29)	/* chip transitioned from doze to active state */
+#define I_SRESET	(1 << 30)	/* CCCR RES interrupt */
+#define I_IOE2		(1U << 31)	/* CCCR IOE2 Bit Changed */
+#define	I_ERRORS	(I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)	/* DMA Errors */
+#define I_DMA		(I_RI | I_XI | I_ERRORS)
+
+/* sbintstatus */
+#define I_SB_SERR	(1 << 8)	/* Backplane SError (write) */
+#define I_SB_RESPERR	(1 << 9)	/* Backplane Response Error (read) */
+#define I_SB_SPROMERR	(1 << 10)	/* Error accessing the sprom */
+
+/* sdioaccess */
+#define SDA_DATA_MASK	0x000000ff	/* Read/Write Data Mask */
+#define SDA_ADDR_MASK	0x000fff00	/* Read/Write Address Mask */
+#define SDA_ADDR_SHIFT	8		/* Read/Write Address Shift */
+#define SDA_WRITE	0x01000000	/* Write bit  */
+#define SDA_READ	0x00000000	/* Write bit cleared for Read */
+#define SDA_BUSY	0x80000000	/* Busy bit */
+
+/* sdioaccess-accessible register address spaces */
+#define SDA_CCCR_SPACE		0x000	/* sdioAccess CCCR register space */
+#define SDA_F1_FBR_SPACE	0x100	/* sdioAccess F1 FBR register space */
+#define SDA_F2_FBR_SPACE	0x200	/* sdioAccess F2 FBR register space */
+#define SDA_F1_REG_SPACE	0x300	/* sdioAccess F1 core-specific register space */
+
+/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
+#define SDA_CHIPCONTROLDATA	0x006	/* ChipControlData */
+#define SDA_CHIPCONTROLENAB	0x007	/* ChipControlEnable */
+#define SDA_F2WATERMARK		0x008	/* Function 2 Watermark */
+#define SDA_DEVICECONTROL	0x009	/* DeviceControl */
+#define SDA_SBADDRLOW		0x00a	/* SbAddrLow */
+#define SDA_SBADDRMID		0x00b	/* SbAddrMid */
+#define SDA_SBADDRHIGH		0x00c	/* SbAddrHigh */
+#define SDA_FRAMECTRL		0x00d	/* FrameCtrl */
+#define SDA_CHIPCLOCKCSR	0x00e	/* ChipClockCSR */
+#define SDA_SDIOPULLUP		0x00f	/* SdioPullUp */
+#define SDA_SDIOWRFRAMEBCLOW	0x019	/* SdioWrFrameBCLow */
+#define SDA_SDIOWRFRAMEBCHIGH	0x01a	/* SdioWrFrameBCHigh */
+#define SDA_SDIORDFRAMEBCLOW	0x01b	/* SdioRdFrameBCLow */
+#define SDA_SDIORDFRAMEBCHIGH	0x01c	/* SdioRdFrameBCHigh */
+
+/* SDA_F2WATERMARK */
+#define SDA_F2WATERMARK_MASK	0x7f	/* F2Watermark Mask */
+
+/* SDA_SBADDRLOW */
+#define SDA_SBADDRLOW_MASK	0x80	/* SbAddrLow Mask */
+
+/* SDA_SBADDRMID */
+#define SDA_SBADDRMID_MASK	0xff	/* SbAddrMid Mask */
+
+/* SDA_SBADDRHIGH */
+#define SDA_SBADDRHIGH_MASK	0xff	/* SbAddrHigh Mask */
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define SFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+#define SFC_CRC4WOOS	(1 << 2)	/* HW reports CRC error for write out of sync */
+#define SFC_ABORTALL	(1 << 3)	/* Abort cancels all in-progress frames */
+
+/* pcmciaframectrl */
+#define PFC_RF_TERM	(1 << 0)	/* Read Frame Terminate */
+#define PFC_WF_TERM	(1 << 1)	/* Write Frame Terminate */
+
+/* intrcvlazy */
+#define	IRL_TO_MASK	0x00ffffff	/* timeout */
+#define	IRL_FC_MASK	0xff000000	/* frame count */
+#define	IRL_FC_SHIFT	24		/* frame count */
+
+/* rx header */
+typedef volatile struct {
+	uint16 len;
+	uint16 flags;
+} sdpcmd_rxh_t;
+
+/* rx header flags */
+#define RXF_CRC		0x0001		/* CRC error detected */
+#define RXF_WOOS	0x0002		/* write frame out of sync */
+#define RXF_WF_TERM	0x0004		/* write frame terminated */
+#define RXF_ABORT	0x0008		/* write frame aborted */
+#define RXF_DISCARD	(RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT)	/* bad frame */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN	4	/* HW frametag: 2 bytes len, 2 bytes check val */
+
+#define SDPCM_HWEXT_LEN	8
+
+#endif	/* _sbsdpcmdev_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/sbsocram.h b/drivers/staging/edison-bcm43340/include/sbsocram.h
new file mode 100644
index 0000000..790e3f1
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sbsocram.h
@@ -0,0 +1,199 @@
+/*
+ * BCM47XX Sonics SiliconBackplane embedded ram core
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbsocram.h 271781 2011-07-13 20:00:06Z $
+ */
+
+#ifndef	_SBSOCRAM_H
+#define	_SBSOCRAM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define	_PADLINE(line)	pad ## line
+#define	_XSTR(line)	_PADLINE(line)
+#define	PAD		_XSTR(__LINE__)
+#endif	/* PAD */
+
+/* Memcsocram core registers */
+typedef volatile struct sbsocramregs {
+	uint32	coreinfo;
+	uint32	bwalloc;
+	uint32	extracoreinfo;
+	uint32	biststat;
+	uint32	bankidx;
+	uint32	standbyctrl;
+
+	uint32	errlogstatus;	/* rev 6 */
+	uint32	errlogaddr;	/* rev 6 */
+	/* used for patching rev 3 & 5 */
+	uint32	cambankidx;
+	uint32	cambankstandbyctrl;
+	uint32	cambankpatchctrl;
+	uint32	cambankpatchtblbaseaddr;
+	uint32	cambankcmdreg;
+	uint32	cambankdatareg;
+	uint32	cambankmaskreg;
+	uint32	PAD[1];
+	uint32	bankinfo;	/* corev 8 */
+	uint32	PAD[15];
+	uint32	extmemconfig;
+	uint32	extmemparitycsr;
+	uint32	extmemparityerrdata;
+	uint32	extmemparityerrcnt;
+	uint32	extmemwrctrlandsize;
+	uint32	PAD[84];
+	uint32	workaround;
+	uint32	pwrctl;		/* corerev >= 2 */
+	uint32	PAD[133];
+	uint32  sr_control;     /* corerev >= 15 */
+	uint32  sr_status;      /* corerev >= 15 */
+	uint32  sr_address;     /* corerev >= 15 */
+	uint32  sr_data;        /* corerev >= 15 */
+} sbsocramregs_t;
+
+#endif	/* _LANGUAGE_ASSEMBLY */
+
+/* Register offsets */
+#define	SR_COREINFO		0x00
+#define	SR_BWALLOC		0x04
+#define	SR_BISTSTAT		0x0c
+#define	SR_BANKINDEX		0x10
+#define	SR_BANKSTBYCTL		0x14
+#define SR_PWRCTL		0x1e8
+
+/* Coreinfo register */
+#define	SRCI_PT_MASK		0x00070000	/* corerev >= 6; port type[18:16] */
+#define	SRCI_PT_SHIFT		16
+/* port types : SRCI_PT_<processorPT>_<backplanePT> */
+#define SRCI_PT_OCP_OCP		0
+#define SRCI_PT_AXI_OCP		1
+#define SRCI_PT_ARM7AHB_OCP	2
+#define SRCI_PT_CM3AHB_OCP	3
+#define SRCI_PT_AXI_AXI		4
+#define SRCI_PT_AHB_AXI		5
+/* corerev >= 3 */
+#define SRCI_LSS_MASK		0x00f00000
+#define SRCI_LSS_SHIFT		20
+#define SRCI_LRS_MASK		0x0f000000
+#define SRCI_LRS_SHIFT		24
+
+/* In corerev 0, the memory size is 2 to the power of the
+ * base plus 16 plus to the contents of the memsize field plus 1.
+ */
+#define	SRCI_MS0_MASK		0xf
+#define SR_MS0_BASE		16
+
+/*
+ * In corerev 1 the bank size is 2 ^ the bank size field plus 14,
+ * the memory size is number of banks times bank size.
+ * The same applies to rom size.
+ */
+#define	SRCI_ROMNB_MASK		0xf000
+#define	SRCI_ROMNB_SHIFT	12
+#define	SRCI_ROMBSZ_MASK	0xf00
+#define	SRCI_ROMBSZ_SHIFT	8
+#define	SRCI_SRNB_MASK		0xf0
+#define	SRCI_SRNB_SHIFT		4
+#define	SRCI_SRBSZ_MASK		0xf
+#define	SRCI_SRBSZ_SHIFT	0
+
+#define SR_BSZ_BASE		14
+
+/* Standby control register */
+#define	SRSC_SBYOVR_MASK	0x80000000
+#define	SRSC_SBYOVR_SHIFT	31
+#define	SRSC_SBYOVRVAL_MASK	0x60000000
+#define	SRSC_SBYOVRVAL_SHIFT	29
+#define	SRSC_SBYEN_MASK		0x01000000	/* rev >= 3 */
+#define	SRSC_SBYEN_SHIFT	24
+
+/* Power control register */
+#define SRPC_PMU_STBYDIS_MASK	0x00000010	/* rev >= 3 */
+#define SRPC_PMU_STBYDIS_SHIFT	4
+#define SRPC_STBYOVRVAL_MASK	0x00000008
+#define SRPC_STBYOVRVAL_SHIFT	3
+#define SRPC_STBYOVR_MASK	0x00000007
+#define SRPC_STBYOVR_SHIFT	0
+
+/* Extra core capability register */
+#define SRECC_NUM_BANKS_MASK   0x000000F0
+#define SRECC_NUM_BANKS_SHIFT  4
+#define SRECC_BANKSIZE_MASK    0x0000000F
+#define SRECC_BANKSIZE_SHIFT   0
+
+#define SRECC_BANKSIZE(value)	 (1 << (value))
+
+/* CAM bank patch control */
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS   0x0001FFFC
+#define SRP_VALID     0x8000
+
+/* CAM bank command reg */
+#define SRCMD_WRITE  0x00020000
+#define SRCMD_READ   0x00010000
+#define SRCMD_DONE   0x80000000
+
+#define SRCMD_DONE_DLY	1000
+
+/* bankidx and bankinfo reg defines corerev >= 8 */
+#define SOCRAM_BANKINFO_SZMASK		0x7f
+#define SOCRAM_BANKIDX_ROM_MASK		0x100
+
+#define SOCRAM_BANKIDX_MEMTYPE_SHIFT	8
+/* socram bankinfo memtype */
+#define SOCRAM_MEMTYPE_RAM		0
+#define SOCRAM_MEMTYPE_R0M		1
+#define SOCRAM_MEMTYPE_DEVRAM		2
+
+#define	SOCRAM_BANKINFO_REG		0x40
+#define	SOCRAM_BANKIDX_REG		0x10
+#define	SOCRAM_BANKINFO_STDBY_MASK	0x400
+#define	SOCRAM_BANKINFO_STDBY_TIMER	0x800
+
+/* bankinfo rev >= 10 */
+#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT		13
+#define SOCRAM_BANKINFO_DEVRAMSEL_MASK		0x2000
+#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT		14
+#define SOCRAM_BANKINFO_DEVRAMPRO_MASK		0x4000
+#define SOCRAM_BANKINFO_SLPSUPP_SHIFT		15
+#define SOCRAM_BANKINFO_SLPSUPP_MASK		0x8000
+#define SOCRAM_BANKINFO_RETNTRAM_SHIFT		16
+#define SOCRAM_BANKINFO_RETNTRAM_MASK		0x00010000
+#define SOCRAM_BANKINFO_PDASZ_SHIFT		17
+#define SOCRAM_BANKINFO_PDASZ_MASK		0x003E0000
+#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT	24
+#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK	0x01000000
+
+/* extracoreinfo register */
+#define SOCRAM_DEVRAMBANK_MASK		0xF000
+#define SOCRAM_DEVRAMBANK_SHIFT		12
+
+/* bank info to calculate bank size */
+#define   SOCRAM_BANKINFO_SZBASE          8192
+#define SOCRAM_BANKSIZE_SHIFT         13      /* SOCRAM_BANKINFO_SZBASE */
+
+
+#endif	/* _SBSOCRAM_H */
diff --git a/drivers/staging/edison-bcm43340/include/sdio.h b/drivers/staging/edison-bcm43340/include/sdio.h
new file mode 100644
index 0000000..5b9f737
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sdio.h
@@ -0,0 +1,619 @@
+/*
+ * SDIO spec header file
+ * Protocol and standard (common) device definitions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdio.h 416730 2013-08-06 09:33:19Z $
+ */
+
+#ifndef	_SDIO_H
+#define	_SDIO_H
+
+
+/* CCCR structure for function 0 */
+typedef volatile struct {
+	uint8	cccr_sdio_rev;		/* RO, cccr and sdio revision */
+	uint8	sd_rev;			/* RO, sd spec revision */
+	uint8	io_en;			/* I/O enable */
+	uint8	io_rdy;			/* I/O ready reg */
+	uint8	intr_ctl;		/* Master and per function interrupt enable control */
+	uint8	intr_status;		/* RO, interrupt pending status */
+	uint8	io_abort;		/* read/write abort or reset all functions */
+	uint8	bus_inter;		/* bus interface control */
+	uint8	capability;		/* RO, card capability */
+
+	uint8	cis_base_low;		/* 0x9 RO, common CIS base address, LSB */
+	uint8	cis_base_mid;
+	uint8	cis_base_high;		/* 0xB RO, common CIS base address, MSB */
+
+	/* suspend/resume registers */
+	uint8	bus_suspend;		/* 0xC */
+	uint8	func_select;		/* 0xD */
+	uint8	exec_flag;		/* 0xE */
+	uint8	ready_flag;		/* 0xF */
+
+	uint8	fn0_blk_size[2];	/* 0x10(LSB), 0x11(MSB) */
+
+	uint8	power_control;		/* 0x12 (SDIO version 1.10) */
+
+	uint8	speed_control;		/* 0x13 */
+} sdio_regs_t;
+
+/* SDIO Device CCCR offsets */
+#define SDIOD_CCCR_REV			0x00
+#define SDIOD_CCCR_SDREV		0x01
+#define SDIOD_CCCR_IOEN			0x02
+#define SDIOD_CCCR_IORDY		0x03
+#define SDIOD_CCCR_INTEN		0x04
+#define SDIOD_CCCR_INTPEND		0x05
+#define SDIOD_CCCR_IOABORT		0x06
+#define SDIOD_CCCR_BICTRL		0x07
+#define SDIOD_CCCR_CAPABLITIES		0x08
+#define SDIOD_CCCR_CISPTR_0		0x09
+#define SDIOD_CCCR_CISPTR_1		0x0A
+#define SDIOD_CCCR_CISPTR_2		0x0B
+#define SDIOD_CCCR_BUSSUSP		0x0C
+#define SDIOD_CCCR_FUNCSEL		0x0D
+#define SDIOD_CCCR_EXECFLAGS		0x0E
+#define SDIOD_CCCR_RDYFLAGS		0x0F
+#define SDIOD_CCCR_BLKSIZE_0		0x10
+#define SDIOD_CCCR_BLKSIZE_1		0x11
+#define SDIOD_CCCR_POWER_CONTROL	0x12
+#define SDIOD_CCCR_SPEED_CONTROL	0x13
+#define SDIOD_CCCR_UHSI_SUPPORT		0x14
+#define SDIOD_CCCR_DRIVER_STRENGTH	0x15
+#define SDIOD_CCCR_INTR_EXTN		0x16
+
+/* Broadcom extensions (corerev >= 1) */
+#define SDIOD_CCCR_BRCM_CARDCAP		0xf0
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT	0x02
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT	0x04
+#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC	0x08
+#define SDIOD_CCCR_BRCM_CARDCTL			0xf1
+#define SDIOD_CCCR_BRCM_SEPINT			0xf2
+
+/* cccr_sdio_rev */
+#define SDIO_REV_SDIOID_MASK	0xf0	/* SDIO spec revision number */
+#define SDIO_REV_CCCRID_MASK	0x0f	/* CCCR format version number */
+#define SDIO_SPEC_VERSION_3_0	0x40	/* SDIO spec version 3.0 */
+
+/* sd_rev */
+#define SD_REV_PHY_MASK		0x0f	/* SD format version number */
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1	0x02	/* function 1 I/O enable */
+#define SDIO_FUNC_ENABLE_2	0x04	/* function 2 I/O enable */
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1	0x02	/* function 1 I/O ready */
+#define SDIO_FUNC_READY_2	0x04	/* function 2 I/O ready */
+
+/* intr_ctl */
+#define INTR_CTL_MASTER_EN	0x1	/* interrupt enable master */
+#define INTR_CTL_FUNC1_EN	0x2	/* interrupt enable for function 1 */
+#define INTR_CTL_FUNC2_EN	0x4	/* interrupt enable for function 2 */
+
+/* intr_status */
+#define INTR_STATUS_FUNC1	0x2	/* interrupt pending for function 1 */
+#define INTR_STATUS_FUNC2	0x4	/* interrupt pending for function 2 */
+
+/* io_abort */
+#define IO_ABORT_RESET_ALL	0x08	/* I/O card reset */
+#define IO_ABORT_FUNC_MASK	0x07	/* abort selction: function x */
+
+/* bus_inter */
+#define BUS_CARD_DETECT_DIS	0x80	/* Card Detect disable */
+#define BUS_SPI_CONT_INTR_CAP	0x40	/* support continuous SPI interrupt */
+#define BUS_SPI_CONT_INTR_EN	0x20	/* continuous SPI interrupt enable */
+#define BUS_SD_DATA_WIDTH_MASK	0x03	/* bus width mask */
+#define BUS_SD_DATA_WIDTH_4BIT	0x02	/* bus width 4-bit mode */
+#define BUS_SD_DATA_WIDTH_1BIT	0x00	/* bus width 1-bit mode */
+
+/* capability */
+#define SDIO_CAP_4BLS		0x80	/* 4-bit support for low speed card */
+#define SDIO_CAP_LSC		0x40	/* low speed card */
+#define SDIO_CAP_E4MI		0x20	/* enable interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_S4MI		0x10	/* support interrupt between block of data in 4-bit mode */
+#define SDIO_CAP_SBS		0x08	/* support suspend/resume */
+#define SDIO_CAP_SRW		0x04	/* support read wait */
+#define SDIO_CAP_SMB		0x02	/* support multi-block transfer */
+#define SDIO_CAP_SDC		0x01	/* Support Direct commands during multi-byte transfer */
+
+/* power_control */
+#define SDIO_POWER_SMPC		0x01	/* supports master power control (RO) */
+#define SDIO_POWER_EMPC		0x02	/* enable master power control (allow > 200mA) (RW) */
+
+/* speed_control (control device entry into high-speed clocking mode) */
+#define SDIO_SPEED_SHS		0x01	/* supports high-speed [clocking] mode (RO) */
+#define SDIO_SPEED_EHS		0x02	/* enable high-speed [clocking] mode (RW) */
+#define SDIO_SPEED_UHSI_DDR50	   0x08
+
+/* for setting bus speed in card: 0x13h */
+#define SDIO_BUS_SPEED_UHSISEL_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSISEL_S	1
+
+/* for getting bus speed cap in card: 0x14h */
+#define SDIO_BUS_SPEED_UHSICAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_SPEED_UHSICAP_S	0
+
+/* for getting driver type CAP in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_CAP_M	BITFIELD_MASK(3)
+#define SDIO_BUS_DRVR_TYPE_CAP_S	0
+
+/* for setting driver type selection in card: 0x15h */
+#define SDIO_BUS_DRVR_TYPE_SEL_M	BITFIELD_MASK(2)
+#define SDIO_BUS_DRVR_TYPE_SEL_S	4
+
+/* for getting async int support in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_CAP_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_CAP_S	0
+
+/* for setting async int selection in card: 0x16h */
+#define SDIO_BUS_ASYNCINT_SEL_M	BITFIELD_MASK(1)
+#define SDIO_BUS_ASYNCINT_SEL_S	1
+
+/* brcm sepint */
+#define SDIO_SEPINT_MASK	0x01	/* route sdpcmdev intr onto separate pad (chip-specific) */
+#define SDIO_SEPINT_OE		0x02	/* 1 asserts output enable for above pad */
+#define SDIO_SEPINT_ACT_HI	0x04	/* use active high interrupt level instead of active low */
+
+/* FBR structure for function 1-7, FBR addresses and register offsets */
+typedef volatile struct {
+	uint8	devctr;			/* device interface, CSA control */
+	uint8	ext_dev;		/* extended standard I/O device type code */
+	uint8	pwr_sel;		/* power selection support */
+	uint8	PAD[6];			/* reserved */
+
+	uint8	cis_low;		/* CIS LSB */
+	uint8	cis_mid;
+	uint8	cis_high;		/* CIS MSB */
+	uint8	csa_low;		/* code storage area, LSB */
+	uint8	csa_mid;
+	uint8	csa_high;		/* code storage area, MSB */
+	uint8	csa_dat_win;		/* data access window to function */
+
+	uint8	fnx_blk_size[2];	/* block size, little endian */
+} sdio_fbr_t;
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_FUNCS			8
+#define SDIOD_MAX_IOFUNCS		7
+
+/* SDIO Device FBR Start Address  */
+#define SDIOD_FBR_STARTADDR		0x100
+
+/* SDIO Device FBR Size */
+#define SDIOD_FBR_SIZE			0x100
+
+/* Macro to calculate FBR register base */
+#define SDIOD_FBR_BASE(n)		((n) * 0x100)
+
+/* Function register offsets */
+#define SDIOD_FBR_DEVCTR		0x00	/* basic info for function */
+#define SDIOD_FBR_EXT_DEV		0x01	/* extended I/O device code */
+#define SDIOD_FBR_PWR_SEL		0x02	/* power selection bits */
+
+/* SDIO Function CIS ptr offset */
+#define SDIOD_FBR_CISPTR_0		0x09
+#define SDIOD_FBR_CISPTR_1		0x0A
+#define SDIOD_FBR_CISPTR_2		0x0B
+
+/* Code Storage Area pointer */
+#define SDIOD_FBR_CSA_ADDR_0		0x0C
+#define SDIOD_FBR_CSA_ADDR_1		0x0D
+#define SDIOD_FBR_CSA_ADDR_2		0x0E
+#define SDIOD_FBR_CSA_DATA		0x0F
+
+/* SDIO Function I/O Block Size */
+#define SDIOD_FBR_BLKSIZE_0		0x10
+#define SDIOD_FBR_BLKSIZE_1		0x11
+
+/* devctr */
+#define SDIOD_FBR_DEVCTR_DIC	0x0f	/* device interface code */
+#define SDIOD_FBR_DECVTR_CSA	0x40	/* CSA support flag */
+#define SDIOD_FBR_DEVCTR_CSA_EN	0x80	/* CSA enabled */
+/* interface codes */
+#define SDIOD_DIC_NONE		0	/* SDIO standard interface is not supported */
+#define SDIOD_DIC_UART		1
+#define SDIOD_DIC_BLUETOOTH_A	2
+#define SDIOD_DIC_BLUETOOTH_B	3
+#define SDIOD_DIC_GPS		4
+#define SDIOD_DIC_CAMERA	5
+#define SDIOD_DIC_PHS		6
+#define SDIOD_DIC_WLAN		7
+#define SDIOD_DIC_EXT		0xf	/* extended device interface, read ext_dev register */
+
+/* pwr_sel */
+#define SDIOD_PWR_SEL_SPS	0x01	/* supports power selection */
+#define SDIOD_PWR_SEL_EPS	0x02	/* enable power selection (low-current mode) */
+
+/* misc defines */
+#define SDIO_FUNC_0		0
+#define SDIO_FUNC_1		1
+#define SDIO_FUNC_2		2
+#define SDIO_FUNC_3		3
+#define SDIO_FUNC_4		4
+#define SDIO_FUNC_5		5
+#define SDIO_FUNC_6		6
+#define SDIO_FUNC_7		7
+
+#define SD_CARD_TYPE_UNKNOWN	0	/* bad type or unrecognized */
+#define SD_CARD_TYPE_IO		1	/* IO only card */
+#define SD_CARD_TYPE_MEMORY	2	/* memory only card */
+#define SD_CARD_TYPE_COMBO	3	/* IO and memory combo card */
+
+#define SDIO_MAX_BLOCK_SIZE	2048	/* maximum block size for block mode operation */
+#define SDIO_MIN_BLOCK_SIZE	1	/* minimum block size for block mode operation */
+
+/* Card registers: status bit position */
+#define CARDREG_STATUS_BIT_OUTOFRANGE		31
+#define CARDREG_STATUS_BIT_COMCRCERROR		23
+#define CARDREG_STATUS_BIT_ILLEGALCOMMAND	22
+#define CARDREG_STATUS_BIT_ERROR		19
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE3	12
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE2	11
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE1	10
+#define CARDREG_STATUS_BIT_IOCURRENTSTATE0	9
+#define CARDREG_STATUS_BIT_FUN_NUM_ERROR	4
+
+
+
+#define SD_CMD_GO_IDLE_STATE		0	/* mandatory for SDIO */
+#define SD_CMD_SEND_OPCOND		1
+#define SD_CMD_MMC_SET_RCA		3
+#define SD_CMD_IO_SEND_OP_COND		5	/* mandatory for SDIO */
+#define SD_CMD_SELECT_DESELECT_CARD	7
+#define SD_CMD_SEND_CSD			9
+#define SD_CMD_SEND_CID			10
+#define SD_CMD_STOP_TRANSMISSION	12
+#define SD_CMD_SEND_STATUS		13
+#define SD_CMD_GO_INACTIVE_STATE	15
+#define SD_CMD_SET_BLOCKLEN		16
+#define SD_CMD_READ_SINGLE_BLOCK	17
+#define SD_CMD_READ_MULTIPLE_BLOCK	18
+#define SD_CMD_WRITE_BLOCK		24
+#define SD_CMD_WRITE_MULTIPLE_BLOCK	25
+#define SD_CMD_PROGRAM_CSD		27
+#define SD_CMD_SET_WRITE_PROT		28
+#define SD_CMD_CLR_WRITE_PROT		29
+#define SD_CMD_SEND_WRITE_PROT		30
+#define SD_CMD_ERASE_WR_BLK_START	32
+#define SD_CMD_ERASE_WR_BLK_END		33
+#define SD_CMD_ERASE			38
+#define SD_CMD_LOCK_UNLOCK		42
+#define SD_CMD_IO_RW_DIRECT		52	/* mandatory for SDIO */
+#define SD_CMD_IO_RW_EXTENDED		53	/* mandatory for SDIO */
+#define SD_CMD_APP_CMD			55
+#define SD_CMD_GEN_CMD			56
+#define SD_CMD_READ_OCR			58
+#define SD_CMD_CRC_ON_OFF		59	/* mandatory for SDIO */
+#define SD_ACMD_SD_STATUS		13
+#define SD_ACMD_SEND_NUM_WR_BLOCKS	22
+#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT	23
+#define SD_ACMD_SD_SEND_OP_COND		41
+#define SD_ACMD_SET_CLR_CARD_DETECT	42
+#define SD_ACMD_SEND_SCR		51
+
+/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
+#define SD_IO_OP_READ		0   /* Read_Write: Read */
+#define SD_IO_OP_WRITE		1   /* Read_Write: Write */
+#define SD_IO_RW_NORMAL		0   /* no RAW */
+#define SD_IO_RW_RAW		1   /* RAW */
+#define SD_IO_BYTE_MODE		0   /* Byte Mode */
+#define SD_IO_BLOCK_MODE	1   /* BlockMode */
+#define SD_IO_FIXED_ADDRESS	0   /* fix Address */
+#define SD_IO_INCREMENT_ADDRESS	1   /* IncrementAddress */
+
+/* build SD_CMD_IO_RW_DIRECT Argument */
+#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
+	 (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
+
+/* build SD_CMD_IO_RW_EXTENDED Argument */
+#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
+	((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
+	 (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
+
+/* SDIO response parameters */
+#define SD_RSP_NO_NONE			0
+#define SD_RSP_NO_1			1
+#define SD_RSP_NO_2			2
+#define SD_RSP_NO_3			3
+#define SD_RSP_NO_4			4
+#define SD_RSP_NO_5			5
+#define SD_RSP_NO_6			6
+
+	/* Modified R6 response (to CMD3) */
+#define SD_RSP_MR6_COM_CRC_ERROR	0x8000
+#define SD_RSP_MR6_ILLEGAL_COMMAND	0x4000
+#define SD_RSP_MR6_ERROR		0x2000
+
+	/* Modified R1 in R4 Response (to CMD5) */
+#define SD_RSP_MR1_SBIT			0x80
+#define SD_RSP_MR1_PARAMETER_ERROR	0x40
+#define SD_RSP_MR1_RFU5			0x20
+#define SD_RSP_MR1_FUNC_NUM_ERROR	0x10
+#define SD_RSP_MR1_COM_CRC_ERROR	0x08
+#define SD_RSP_MR1_ILLEGAL_COMMAND	0x04
+#define SD_RSP_MR1_RFU1			0x02
+#define SD_RSP_MR1_IDLE_STATE		0x01
+
+	/* R5 response (to CMD52 and CMD53) */
+#define SD_RSP_R5_COM_CRC_ERROR		0x80
+#define SD_RSP_R5_ILLEGAL_COMMAND	0x40
+#define SD_RSP_R5_IO_CURRENTSTATE1	0x20
+#define SD_RSP_R5_IO_CURRENTSTATE0	0x10
+#define SD_RSP_R5_ERROR			0x08
+#define SD_RSP_R5_RFU			0x04
+#define SD_RSP_R5_FUNC_NUM_ERROR	0x02
+#define SD_RSP_R5_OUT_OF_RANGE		0x01
+
+#define SD_RSP_R5_ERRBITS		0xCB
+
+
+/* ------------------------------------------------
+ *  SDIO Commands and responses
+ *
+ *  I/O only commands are:
+ *      CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+/* SDIO Commands */
+#define SDIOH_CMD_0		0
+#define SDIOH_CMD_3		3
+#define SDIOH_CMD_5		5
+#define SDIOH_CMD_7		7
+#define SDIOH_CMD_11		11
+#define SDIOH_CMD_14		14
+#define SDIOH_CMD_15		15
+#define SDIOH_CMD_19		19
+#define SDIOH_CMD_52		52
+#define SDIOH_CMD_53		53
+#define SDIOH_CMD_59		59
+
+/* SDIO Command Responses */
+#define SDIOH_RSP_NONE		0
+#define SDIOH_RSP_R1		1
+#define SDIOH_RSP_R2		2
+#define SDIOH_RSP_R3		3
+#define SDIOH_RSP_R4		4
+#define SDIOH_RSP_R5		5
+#define SDIOH_RSP_R6		6
+
+/*
+ *  SDIO Response Error flags
+ */
+#define SDIOH_RSP5_ERROR_FLAGS	0xCB
+
+/* ------------------------------------------------
+ * SDIO Command structures. I/O only commands are:
+ *
+ * 	CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
+ * ------------------------------------------------
+ */
+
+#define CMD5_OCR_M		BITFIELD_MASK(24)
+#define CMD5_OCR_S		0
+
+#define CMD5_S18R_M		BITFIELD_MASK(1)
+#define CMD5_S18R_S		24
+
+#define CMD7_RCA_M		BITFIELD_MASK(16)
+#define CMD7_RCA_S		16
+
+#define CMD14_RCA_M		BITFIELD_MASK(16)
+#define CMD14_RCA_S		16
+#define CMD14_SLEEP_M		BITFIELD_MASK(1)
+#define CMD14_SLEEP_S		15
+
+#define CMD_15_RCA_M		BITFIELD_MASK(16)
+#define CMD_15_RCA_S		16
+
+#define CMD52_DATA_M		BITFIELD_MASK(8)  /* Bits [7:0]    - Write Data/Stuff bits of CMD52
+						   */
+#define CMD52_DATA_S		0
+#define CMD52_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD52_REG_ADDR_S	9
+#define CMD52_RAW_M		BITFIELD_MASK(1)  /* Bit  27       - Read after Write flag */
+#define CMD52_RAW_S		27
+#define CMD52_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD52_FUNCTION_S	28
+#define CMD52_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD52_RW_FLAG_S		31
+
+
+#define CMD53_BYTE_BLK_CNT_M	BITFIELD_MASK(9) /* Bits [8:0]     - Byte/Block Count of CMD53 */
+#define CMD53_BYTE_BLK_CNT_S	0
+#define CMD53_REG_ADDR_M	BITFIELD_MASK(17) /* Bits [25:9]   - register address */
+#define CMD53_REG_ADDR_S	9
+#define CMD53_OP_CODE_M		BITFIELD_MASK(1)  /* Bit  26       - R/W Operation Code */
+#define CMD53_OP_CODE_S		26
+#define CMD53_BLK_MODE_M	BITFIELD_MASK(1)  /* Bit  27       - Block Mode */
+#define CMD53_BLK_MODE_S	27
+#define CMD53_FUNCTION_M	BITFIELD_MASK(3)  /* Bits [30:28]  - Function number */
+#define CMD53_FUNCTION_S	28
+#define CMD53_RW_FLAG_M		BITFIELD_MASK(1)  /* Bit  31       - R/W flag */
+#define CMD53_RW_FLAG_S		31
+
+/* ------------------------------------------------------
+ * SDIO Command Response structures for SD1 and SD4 modes
+ *  -----------------------------------------------------
+ */
+#define RSP4_IO_OCR_M		BITFIELD_MASK(24) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_IO_OCR_S		0
+
+#define RSP4_S18A_M			BITFIELD_MASK(1) /* Bits [23:0]  - Card's OCR Bits [23:0] */
+#define RSP4_S18A_S			24
+
+#define RSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [26:24] - Stuff bits */
+#define RSP4_STUFF_S		24
+#define RSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  27      - Memory present */
+#define RSP4_MEM_PRESENT_S	27
+#define RSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [30:28] - Number of I/O funcs */
+#define RSP4_NUM_FUNCS_S	28
+#define RSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  31      - SDIO card ready */
+#define RSP4_CARD_READY_S	31
+
+#define RSP6_STATUS_M		BITFIELD_MASK(16) /* Bits [15:0]  - Card status bits [19,22,23,12:0]
+						   */
+#define RSP6_STATUS_S		0
+#define RSP6_IO_RCA_M		BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
+#define RSP6_IO_RCA_S		16
+
+#define RSP1_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3       - Authentication seq error */
+#define RSP1_AKE_SEQ_ERROR_S	3
+#define RSP1_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5       - Card expects ACMD */
+#define RSP1_APP_CMD_S		5
+#define RSP1_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8       - Ready for data (buff empty) */
+#define RSP1_READY_FOR_DATA_S	8
+#define RSP1_CURR_STATE_M	BITFIELD_MASK(4)  /* Bits [12:9] - State of card
+						   * when Cmd was received
+						   */
+#define RSP1_CURR_STATE_S	9
+#define RSP1_EARSE_RESET_M	BITFIELD_MASK(1)  /* Bit 13   - Erase seq cleared */
+#define RSP1_EARSE_RESET_S	13
+#define RSP1_CARD_ECC_DISABLE_M	BITFIELD_MASK(1)  /* Bit 14   - Card ECC disabled */
+#define RSP1_CARD_ECC_DISABLE_S	14
+#define RSP1_WP_ERASE_SKIP_M	BITFIELD_MASK(1)  /* Bit 15   - Partial blocks erased due to W/P */
+#define RSP1_WP_ERASE_SKIP_S	15
+#define RSP1_CID_CSD_OVERW_M	BITFIELD_MASK(1)  /* Bit 16   - Illegal write to CID or R/O bits
+						   * of CSD
+						   */
+#define RSP1_CID_CSD_OVERW_S	16
+#define RSP1_ERROR_M		BITFIELD_MASK(1)  /* Bit 19   - General/Unknown error */
+#define RSP1_ERROR_S		19
+#define RSP1_CC_ERROR_M		BITFIELD_MASK(1)  /* Bit 20   - Internal Card Control error */
+#define RSP1_CC_ERROR_S		20
+#define RSP1_CARD_ECC_FAILED_M	BITFIELD_MASK(1)  /* Bit 21   - Card internal ECC failed
+						   * to correct data
+						   */
+#define RSP1_CARD_ECC_FAILED_S	21
+#define RSP1_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit 22   - Cmd not legal for the card state */
+#define RSP1_ILLEGAL_CMD_S	22
+#define RSP1_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 23   - CRC check of previous command failed
+						   */
+#define RSP1_COM_CRC_ERROR_S	23
+#define RSP1_LOCK_UNLOCK_FAIL_M	BITFIELD_MASK(1)  /* Bit 24   - Card lock-unlock Cmd Seq error */
+#define RSP1_LOCK_UNLOCK_FAIL_S	24
+#define RSP1_CARD_LOCKED_M	BITFIELD_MASK(1)  /* Bit 25   - Card locked by the host */
+#define RSP1_CARD_LOCKED_S	25
+#define RSP1_WP_VIOLATION_M	BITFIELD_MASK(1)  /* Bit 26   - Attempt to program
+						   * write-protected blocks
+						   */
+#define RSP1_WP_VIOLATION_S	26
+#define RSP1_ERASE_PARAM_M	BITFIELD_MASK(1)  /* Bit 27   - Invalid erase blocks */
+#define RSP1_ERASE_PARAM_S	27
+#define RSP1_ERASE_SEQ_ERR_M	BITFIELD_MASK(1)  /* Bit 28   - Erase Cmd seq error */
+#define RSP1_ERASE_SEQ_ERR_S	28
+#define RSP1_BLK_LEN_ERR_M	BITFIELD_MASK(1)  /* Bit 29   - Block length error */
+#define RSP1_BLK_LEN_ERR_S	29
+#define RSP1_ADDR_ERR_M		BITFIELD_MASK(1)  /* Bit 30   - Misaligned address */
+#define RSP1_ADDR_ERR_S		30
+#define RSP1_OUT_OF_RANGE_M	BITFIELD_MASK(1)  /* Bit 31   - Cmd arg was out of range */
+#define RSP1_OUT_OF_RANGE_S	31
+
+
+#define RSP5_DATA_M		BITFIELD_MASK(8)  /* Bits [0:7]   - data */
+#define RSP5_DATA_S		0
+#define RSP5_FLAGS_M		BITFIELD_MASK(8)  /* Bit  [15:8]  - Rsp flags */
+#define RSP5_FLAGS_S		8
+#define RSP5_STUFF_M		BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
+#define RSP5_STUFF_S		16
+
+/* ----------------------------------------------
+ * SDIO Command Response structures for SPI mode
+ * ----------------------------------------------
+ */
+#define SPIRSP4_IO_OCR_M	BITFIELD_MASK(16) /* Bits [15:0]    - Card's OCR Bits [23:8] */
+#define SPIRSP4_IO_OCR_S	0
+#define SPIRSP4_STUFF_M		BITFIELD_MASK(3)  /* Bits [18:16]   - Stuff bits */
+#define SPIRSP4_STUFF_S		16
+#define SPIRSP4_MEM_PRESENT_M	BITFIELD_MASK(1)  /* Bit  19        - Memory present */
+#define SPIRSP4_MEM_PRESENT_S	19
+#define SPIRSP4_NUM_FUNCS_M	BITFIELD_MASK(3)  /* Bits [22:20]   - Number of I/O funcs */
+#define SPIRSP4_NUM_FUNCS_S	20
+#define SPIRSP4_CARD_READY_M	BITFIELD_MASK(1)  /* Bit  23        - SDIO card ready */
+#define SPIRSP4_CARD_READY_S	23
+#define SPIRSP4_IDLE_STATE_M	BITFIELD_MASK(1)  /* Bit  24        - idle state */
+#define SPIRSP4_IDLE_STATE_S	24
+#define SPIRSP4_ILLEGAL_CMD_M	BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP4_ILLEGAL_CMD_S	26
+#define SPIRSP4_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP4_COM_CRC_ERROR_S	27
+#define SPIRSP4_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP4_FUNC_NUM_ERROR_S	28
+#define SPIRSP4_PARAM_ERROR_M	BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP4_PARAM_ERROR_S	30
+#define SPIRSP4_START_BIT_M	BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP4_START_BIT_S	31
+
+#define SPIRSP5_DATA_M			BITFIELD_MASK(8)  /* Bits [23:16]   - R/W Data */
+#define SPIRSP5_DATA_S			16
+#define SPIRSP5_IDLE_STATE_M		BITFIELD_MASK(1)  /* Bit  24        - Idle state */
+#define SPIRSP5_IDLE_STATE_S		24
+#define SPIRSP5_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit  26        - Illegal Cmd error */
+#define SPIRSP5_ILLEGAL_CMD_S		26
+#define SPIRSP5_COM_CRC_ERROR_M		BITFIELD_MASK(1)  /* Bit  27        - COM CRC error */
+#define SPIRSP5_COM_CRC_ERROR_S		27
+#define SPIRSP5_FUNC_NUM_ERROR_M	BITFIELD_MASK(1)  /* Bit  28        - Function number error
+							   */
+#define SPIRSP5_FUNC_NUM_ERROR_S	28
+#define SPIRSP5_PARAM_ERROR_M		BITFIELD_MASK(1)  /* Bit  30        - Parameter Error Bit */
+#define SPIRSP5_PARAM_ERROR_S		30
+#define SPIRSP5_START_BIT_M		BITFIELD_MASK(1)  /* Bit  31        - Start Bit */
+#define SPIRSP5_START_BIT_S		31
+
+/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
+#define RSP6STAT_AKE_SEQ_ERROR_M	BITFIELD_MASK(1)  /* Bit 3	- Authentication seq error
+							   */
+#define RSP6STAT_AKE_SEQ_ERROR_S	3
+#define RSP6STAT_APP_CMD_M		BITFIELD_MASK(1)  /* Bit 5	- Card expects ACMD */
+#define RSP6STAT_APP_CMD_S		5
+#define RSP6STAT_READY_FOR_DATA_M	BITFIELD_MASK(1)  /* Bit 8	- Ready for data
+							   * (buff empty)
+							   */
+#define RSP6STAT_READY_FOR_DATA_S	8
+#define RSP6STAT_CURR_STATE_M		BITFIELD_MASK(4)  /* Bits [12:9] - Card state at
+							   * Cmd reception
+							   */
+#define RSP6STAT_CURR_STATE_S		9
+#define RSP6STAT_ERROR_M		BITFIELD_MASK(1)  /* Bit 13  - General/Unknown error Bit 19
+							   */
+#define RSP6STAT_ERROR_S		13
+#define RSP6STAT_ILLEGAL_CMD_M		BITFIELD_MASK(1)  /* Bit 14  - Illegal cmd for
+							   * card state Bit 22
+							   */
+#define RSP6STAT_ILLEGAL_CMD_S		14
+#define RSP6STAT_COM_CRC_ERROR_M	BITFIELD_MASK(1)  /* Bit 15  - CRC previous command
+							   * failed Bit 23
+							   */
+#define RSP6STAT_COM_CRC_ERROR_S	15
+
+#define SDIOH_XFER_TYPE_READ    SD_IO_OP_READ
+#define SDIOH_XFER_TYPE_WRITE   SD_IO_OP_WRITE
+
+/* command issue options */
+#define CMD_OPTION_DEFAULT	0
+#define CMD_OPTION_TUNING	1
+#endif /* _SDIO_H */
diff --git a/drivers/staging/edison-bcm43340/include/sdioh.h b/drivers/staging/edison-bcm43340/include/sdioh.h
new file mode 100644
index 0000000..f2bd9ae
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sdioh.h
@@ -0,0 +1,445 @@
+/*
+ * SDIO Host Controller Spec header file
+ * Register map and definitions for the Standard Host Controller
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdioh.h 345499 2012-07-18 06:59:05Z $
+ */
+
+#ifndef	_SDIOH_H
+#define	_SDIOH_H
+
+#define SD_SysAddr			0x000
+#define SD_BlockSize			0x004
+#define SD_BlockCount 			0x006
+#define SD_Arg0				0x008
+#define SD_Arg1 			0x00A
+#define SD_TransferMode			0x00C
+#define SD_Command 			0x00E
+#define SD_Response0			0x010
+#define SD_Response1 			0x012
+#define SD_Response2			0x014
+#define SD_Response3 			0x016
+#define SD_Response4			0x018
+#define SD_Response5 			0x01A
+#define SD_Response6			0x01C
+#define SD_Response7 			0x01E
+#define SD_BufferDataPort0		0x020
+#define SD_BufferDataPort1 		0x022
+#define SD_PresentState			0x024
+#define SD_HostCntrl			0x028
+#define SD_PwrCntrl			0x029
+#define SD_BlockGapCntrl 		0x02A
+#define SD_WakeupCntrl 			0x02B
+#define SD_ClockCntrl			0x02C
+#define SD_TimeoutCntrl 		0x02E
+#define SD_SoftwareReset		0x02F
+#define SD_IntrStatus			0x030
+#define SD_ErrorIntrStatus 		0x032
+#define SD_IntrStatusEnable		0x034
+#define SD_ErrorIntrStatusEnable 	0x036
+#define SD_IntrSignalEnable		0x038
+#define SD_ErrorIntrSignalEnable 	0x03A
+#define SD_CMD12ErrorStatus		0x03C
+#define SD_Capabilities			0x040
+#define SD_Capabilities3		0x044
+#define SD_MaxCurCap			0x048
+#define SD_MaxCurCap_Reserved		0x04C
+#define SD_ADMA_ErrStatus		0x054
+#define SD_ADMA_SysAddr			0x58
+#define SD_SlotInterruptStatus		0x0FC
+#define SD_HostControllerVersion 	0x0FE
+#define	SD_GPIO_Reg			0x100
+#define	SD_GPIO_OE			0x104
+#define	SD_GPIO_Enable			0x108
+
+/* SD specific registers in PCI config space */
+#define SD_SlotInfo	0x40
+
+/* HC 3.0 specific registers and offsets */
+#define SD3_HostCntrl2			0x03E
+/* preset regsstart and count */
+#define SD3_PresetValStart		0x060
+#define SD3_PresetValCount		8
+/* preset-indiv regs */
+#define SD3_PresetVal_init		0x060
+#define SD3_PresetVal_default	0x062
+#define SD3_PresetVal_HS		0x064
+#define SD3_PresetVal_SDR12		0x066
+#define SD3_PresetVal_SDR25		0x068
+#define SD3_PresetVal_SDR50		0x06a
+#define SD3_PresetVal_SDR104	0x06c
+#define SD3_PresetVal_DDR50		0x06e
+/* SDIO3.0 Revx specific Registers */
+#define SD3_Tuning_Info_Register 0x0EC
+#define SD3_WL_BT_reset_register 0x0F0
+
+
+/* preset value indices */
+#define SD3_PRESETVAL_INITIAL_IX	0
+#define SD3_PRESETVAL_DESPEED_IX	1
+#define SD3_PRESETVAL_HISPEED_IX	2
+#define SD3_PRESETVAL_SDR12_IX		3
+#define SD3_PRESETVAL_SDR25_IX		4
+#define SD3_PRESETVAL_SDR50_IX		5
+#define SD3_PRESETVAL_SDR104_IX		6
+#define SD3_PRESETVAL_DDR50_IX		7
+
+/* SD_Capabilities reg (0x040) */
+#define CAP_TO_CLKFREQ_M 	BITFIELD_MASK(6)
+#define CAP_TO_CLKFREQ_S 	0
+#define CAP_TO_CLKUNIT_M  	BITFIELD_MASK(1)
+#define CAP_TO_CLKUNIT_S 	7
+/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2
+	bits are reserved. going ahead with 8 bits, as it is req for 3.0
+*/
+#define CAP_BASECLK_M 		BITFIELD_MASK(8)
+#define CAP_BASECLK_S 		8
+#define CAP_MAXBLOCK_M 		BITFIELD_MASK(2)
+#define CAP_MAXBLOCK_S		16
+#define CAP_ADMA2_M		BITFIELD_MASK(1)
+#define CAP_ADMA2_S		19
+#define CAP_ADMA1_M		BITFIELD_MASK(1)
+#define CAP_ADMA1_S		20
+#define CAP_HIGHSPEED_M		BITFIELD_MASK(1)
+#define CAP_HIGHSPEED_S		21
+#define CAP_DMA_M		BITFIELD_MASK(1)
+#define CAP_DMA_S		22
+#define CAP_SUSPEND_M		BITFIELD_MASK(1)
+#define CAP_SUSPEND_S		23
+#define CAP_VOLT_3_3_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_3_S		24
+#define CAP_VOLT_3_0_M		BITFIELD_MASK(1)
+#define CAP_VOLT_3_0_S		25
+#define CAP_VOLT_1_8_M		BITFIELD_MASK(1)
+#define CAP_VOLT_1_8_S		26
+#define CAP_64BIT_HOST_M	BITFIELD_MASK(1)
+#define CAP_64BIT_HOST_S	28
+
+#define SDIO_OCR_READ_FAIL	(2)
+
+
+#define CAP_ASYNCINT_SUP_M	BITFIELD_MASK(1)
+#define CAP_ASYNCINT_SUP_S	29
+
+#define CAP_SLOTTYPE_M		BITFIELD_MASK(2)
+#define CAP_SLOTTYPE_S		30
+
+#define CAP3_MSBits_OFFSET	(32)
+/* note: following are caps MSB32 bits.
+	So the bits start from 0, instead of 32. that is why
+	CAP3_MSBits_OFFSET is subtracted.
+*/
+#define CAP3_SDR50_SUP_M		BITFIELD_MASK(1)
+#define CAP3_SDR50_SUP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_SDR104_SUP_M	BITFIELD_MASK(1)
+#define CAP3_SDR104_SUP_S	(33 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DDR50_SUP_M	BITFIELD_MASK(1)
+#define CAP3_DDR50_SUP_S	(34 - CAP3_MSBits_OFFSET)
+
+/* for knowing the clk caps in a single read */
+#define CAP3_30CLKCAP_M		BITFIELD_MASK(3)
+#define CAP3_30CLKCAP_S		(32 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_A_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_A_S	(36 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_C_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_C_S	(37 - CAP3_MSBits_OFFSET)
+
+#define CAP3_DRIVTYPE_D_M	BITFIELD_MASK(1)
+#define CAP3_DRIVTYPE_D_S	(38 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_TC_M	BITFIELD_MASK(4)
+#define CAP3_RETUNING_TC_S	(40 - CAP3_MSBits_OFFSET)
+
+#define CAP3_TUNING_SDR50_M	BITFIELD_MASK(1)
+#define CAP3_TUNING_SDR50_S	(45 - CAP3_MSBits_OFFSET)
+
+#define CAP3_RETUNING_MODES_M	BITFIELD_MASK(2)
+#define CAP3_RETUNING_MODES_S	(46 - CAP3_MSBits_OFFSET)
+
+#define CAP3_CLK_MULT_M		BITFIELD_MASK(8)
+#define CAP3_CLK_MULT_S		(48 - CAP3_MSBits_OFFSET)
+
+#define PRESET_DRIVR_SELECT_M	BITFIELD_MASK(2)
+#define PRESET_DRIVR_SELECT_S	14
+
+#define PRESET_CLK_DIV_M	BITFIELD_MASK(10)
+#define PRESET_CLK_DIV_S	0
+
+/* SD_MaxCurCap reg (0x048) */
+#define CAP_CURR_3_3_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_3_S		0
+#define CAP_CURR_3_0_M		BITFIELD_MASK(8)
+#define CAP_CURR_3_0_S		8
+#define CAP_CURR_1_8_M		BITFIELD_MASK(8)
+#define CAP_CURR_1_8_S		16
+
+/* SD_SysAddr: Offset 0x0000, Size 4 bytes */
+
+/* SD_BlockSize: Offset 0x004, Size 2 bytes */
+#define BLKSZ_BLKSZ_M		BITFIELD_MASK(12)
+#define BLKSZ_BLKSZ_S		0
+#define BLKSZ_BNDRY_M		BITFIELD_MASK(3)
+#define BLKSZ_BNDRY_S		12
+
+/* SD_BlockCount: Offset 0x006, size 2 bytes */
+
+/* SD_Arg0: Offset 0x008, size = 4 bytes  */
+/* SD_TransferMode Offset 0x00C, size = 2 bytes */
+#define XFER_DMA_ENABLE_M   	BITFIELD_MASK(1)
+#define XFER_DMA_ENABLE_S	0
+#define XFER_BLK_COUNT_EN_M 	BITFIELD_MASK(1)
+#define XFER_BLK_COUNT_EN_S	1
+#define XFER_CMD_12_EN_M    	BITFIELD_MASK(1)
+#define XFER_CMD_12_EN_S 	2
+#define XFER_DATA_DIRECTION_M	BITFIELD_MASK(1)
+#define XFER_DATA_DIRECTION_S	4
+#define XFER_MULTI_BLOCK_M	BITFIELD_MASK(1)
+#define XFER_MULTI_BLOCK_S	5
+
+/* SD_Command: Offset 0x00E, size = 2 bytes */
+/* resp_type field */
+#define RESP_TYPE_NONE 		0
+#define RESP_TYPE_136  		1
+#define RESP_TYPE_48   		2
+#define RESP_TYPE_48_BUSY	3
+/* type field */
+#define CMD_TYPE_NORMAL		0
+#define CMD_TYPE_SUSPEND	1
+#define CMD_TYPE_RESUME		2
+#define CMD_TYPE_ABORT		3
+
+#define CMD_RESP_TYPE_M		BITFIELD_MASK(2)	/* Bits [0-1] 	- Response type */
+#define CMD_RESP_TYPE_S		0
+#define CMD_CRC_EN_M		BITFIELD_MASK(1)	/* Bit 3 	- CRC enable */
+#define CMD_CRC_EN_S		3
+#define CMD_INDEX_EN_M		BITFIELD_MASK(1)	/* Bit 4 	- Enable index checking */
+#define CMD_INDEX_EN_S		4
+#define CMD_DATA_EN_M		BITFIELD_MASK(1)	/* Bit 5 	- Using DAT line */
+#define CMD_DATA_EN_S		5
+#define CMD_TYPE_M		BITFIELD_MASK(2)	/* Bit [6-7] 	- Normal, abort, resume, etc
+							 */
+#define CMD_TYPE_S		6
+#define CMD_INDEX_M		BITFIELD_MASK(6)	/* Bits [8-13] 	- Command number */
+#define CMD_INDEX_S		8
+
+/* SD_BufferDataPort0	: Offset 0x020, size = 2 or 4 bytes */
+/* SD_BufferDataPort1 	: Offset 0x022, size = 2 bytes */
+/* SD_PresentState	: Offset 0x024, size = 4 bytes */
+#define PRES_CMD_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 0	May use CMD */
+#define PRES_CMD_INHIBIT_S	0
+#define PRES_DAT_INHIBIT_M	BITFIELD_MASK(1)	/* Bit 1	May use DAT */
+#define PRES_DAT_INHIBIT_S	1
+#define PRES_DAT_BUSY_M		BITFIELD_MASK(1)	/* Bit 2	DAT is busy */
+#define PRES_DAT_BUSY_S		2
+#define PRES_PRESENT_RSVD_M	BITFIELD_MASK(5)	/* Bit [3-7]	rsvd */
+#define PRES_PRESENT_RSVD_S	3
+#define PRES_WRITE_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 8	Write is active */
+#define PRES_WRITE_ACTIVE_S	8
+#define PRES_READ_ACTIVE_M	BITFIELD_MASK(1)	/* Bit 9	Read is active */
+#define PRES_READ_ACTIVE_S	9
+#define PRES_WRITE_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 10	Write buf is avail */
+#define PRES_WRITE_DATA_RDY_S	10
+#define PRES_READ_DATA_RDY_M	BITFIELD_MASK(1)	/* Bit 11	Read buf data avail */
+#define PRES_READ_DATA_RDY_S	11
+#define PRES_CARD_PRESENT_M	BITFIELD_MASK(1)	/* Bit 16	Card present - debounced */
+#define PRES_CARD_PRESENT_S	16
+#define PRES_CARD_STABLE_M	BITFIELD_MASK(1)	/* Bit 17	Debugging */
+#define PRES_CARD_STABLE_S	17
+#define PRES_CARD_PRESENT_RAW_M	BITFIELD_MASK(1)	/* Bit 18	Not debounced */
+#define PRES_CARD_PRESENT_RAW_S	18
+#define PRES_WRITE_ENABLED_M	BITFIELD_MASK(1)	/* Bit 19	Write protected? */
+#define PRES_WRITE_ENABLED_S	19
+#define PRES_DAT_SIGNAL_M	BITFIELD_MASK(4)	/* Bit [20-23]	Debugging */
+#define PRES_DAT_SIGNAL_S	20
+#define PRES_CMD_SIGNAL_M	BITFIELD_MASK(1)	/* Bit 24	Debugging */
+#define PRES_CMD_SIGNAL_S	24
+
+/* SD_HostCntrl: Offset 0x028, size = 1 bytes */
+#define HOST_LED_M		BITFIELD_MASK(1)	/* Bit 0	LED On/Off */
+#define HOST_LED_S		0
+#define HOST_DATA_WIDTH_M	BITFIELD_MASK(1)	/* Bit 1	4 bit enable */
+#define HOST_DATA_WIDTH_S	1
+#define HOST_HI_SPEED_EN_M	BITFIELD_MASK(1)	/* Bit 2	High speed vs low speed */
+#define HOST_DMA_SEL_S		3
+#define HOST_DMA_SEL_M		BITFIELD_MASK(2)	/* Bit 4:3	DMA Select */
+#define HOST_HI_SPEED_EN_S	2
+
+/* Host Control2: */
+#define HOSTCtrl2_PRESVAL_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_PRESVAL_EN_S	15					/* bit# */
+
+#define HOSTCtrl2_ASYINT_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_ASYINT_EN_S	14					/* bit# */
+
+#define HOSTCtrl2_SAMPCLK_SEL_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_SAMPCLK_SEL_S	7					/* bit# */
+
+#define HOSTCtrl2_EXEC_TUNING_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_EXEC_TUNING_S	6					/* bit# */
+
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_M	BITFIELD_MASK(2)	/* 2 bit */
+#define HOSTCtrl2_DRIVSTRENGTH_SEL_S	4					/* bit# */
+
+#define HOSTCtrl2_1_8SIG_EN_M	BITFIELD_MASK(1)	/* 1 bit */
+#define HOSTCtrl2_1_8SIG_EN_S	3					/* bit# */
+
+#define HOSTCtrl2_UHSMODE_SEL_M	BITFIELD_MASK(3)	/* 3 bit */
+#define HOSTCtrl2_UHSMODE_SEL_S	0					/* bit# */
+
+#define HOST_CONTR_VER_2		(1)
+#define HOST_CONTR_VER_3		(2)
+
+/* misc defines */
+#define SD1_MODE 		0x1	/* SD Host Cntrlr Spec */
+#define SD4_MODE 		0x2	/* SD Host Cntrlr Spec */
+
+/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */
+#define PWR_BUS_EN_M		BITFIELD_MASK(1)	/* Bit 0	Power the bus */
+#define PWR_BUS_EN_S		0
+#define PWR_VOLTS_M		BITFIELD_MASK(3)	/* Bit [1-3]	Voltage Select */
+#define PWR_VOLTS_S		1
+
+/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */
+#define SW_RESET_ALL_M		BITFIELD_MASK(1)	/* Bit 0	Reset All */
+#define SW_RESET_ALL_S		0
+#define SW_RESET_CMD_M		BITFIELD_MASK(1)	/* Bit 1	CMD Line Reset */
+#define SW_RESET_CMD_S		1
+#define SW_RESET_DAT_M		BITFIELD_MASK(1)	/* Bit 2	DAT Line Reset */
+#define SW_RESET_DAT_S		2
+
+/* SD_IntrStatus: Offset 0x030, size = 2 bytes */
+/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */
+#define INTSTAT_CMD_COMPLETE_M		BITFIELD_MASK(1)	/* Bit 0 */
+#define INTSTAT_CMD_COMPLETE_S		0
+#define INTSTAT_XFER_COMPLETE_M		BITFIELD_MASK(1)
+#define INTSTAT_XFER_COMPLETE_S		1
+#define INTSTAT_BLOCK_GAP_EVENT_M	BITFIELD_MASK(1)
+#define INTSTAT_BLOCK_GAP_EVENT_S	2
+#define INTSTAT_DMA_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_DMA_INT_S		3
+#define INTSTAT_BUF_WRITE_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_WRITE_READY_S	4
+#define INTSTAT_BUF_READ_READY_M	BITFIELD_MASK(1)
+#define INTSTAT_BUF_READ_READY_S	5
+#define INTSTAT_CARD_INSERTION_M	BITFIELD_MASK(1)
+#define INTSTAT_CARD_INSERTION_S	6
+#define INTSTAT_CARD_REMOVAL_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_REMOVAL_S		7
+#define INTSTAT_CARD_INT_M		BITFIELD_MASK(1)
+#define INTSTAT_CARD_INT_S		8
+#define INTSTAT_RETUNING_INT_M		BITFIELD_MASK(1)	/* Bit 12 */
+#define INTSTAT_RETUNING_INT_S		12
+#define INTSTAT_ERROR_INT_M		BITFIELD_MASK(1)	/* Bit 15 */
+#define INTSTAT_ERROR_INT_S		15
+
+/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */
+/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */
+#define ERRINT_CMD_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_TIMEOUT_S		0
+#define ERRINT_CMD_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_CRC_S		1
+#define ERRINT_CMD_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_ENDBIT_S		2
+#define ERRINT_CMD_INDEX_M		BITFIELD_MASK(1)
+#define ERRINT_CMD_INDEX_S		3
+#define ERRINT_DATA_TIMEOUT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_TIMEOUT_S		4
+#define ERRINT_DATA_CRC_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_CRC_S		5
+#define ERRINT_DATA_ENDBIT_M		BITFIELD_MASK(1)
+#define ERRINT_DATA_ENDBIT_S		6
+#define ERRINT_CURRENT_LIMIT_M		BITFIELD_MASK(1)
+#define ERRINT_CURRENT_LIMIT_S		7
+#define ERRINT_AUTO_CMD12_M		BITFIELD_MASK(1)
+#define ERRINT_AUTO_CMD12_S		8
+#define ERRINT_VENDOR_M			BITFIELD_MASK(4)
+#define ERRINT_VENDOR_S			12
+#define ERRINT_ADMA_M			BITFIELD_MASK(1)
+#define ERRINT_ADMA_S			9
+
+/* Also provide definitions in "normal" form to allow combined masks */
+#define ERRINT_CMD_TIMEOUT_BIT		0x0001
+#define ERRINT_CMD_CRC_BIT		0x0002
+#define ERRINT_CMD_ENDBIT_BIT		0x0004
+#define ERRINT_CMD_INDEX_BIT		0x0008
+#define ERRINT_DATA_TIMEOUT_BIT		0x0010
+#define ERRINT_DATA_CRC_BIT		0x0020
+#define ERRINT_DATA_ENDBIT_BIT		0x0040
+#define ERRINT_CURRENT_LIMIT_BIT	0x0080
+#define ERRINT_AUTO_CMD12_BIT		0x0100
+#define ERRINT_ADMA_BIT		0x0200
+
+/* Masks to select CMD vs. DATA errors */
+#define ERRINT_CMD_ERRS		(ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\
+				 ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT)
+#define ERRINT_DATA_ERRS	(ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\
+				 ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT)
+#define ERRINT_TRANSFER_ERRS	(ERRINT_CMD_ERRS | ERRINT_DATA_ERRS)
+
+/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */
+/* SD_ClockCntrl	: Offset 0x02C , size = bytes */
+/* SD_SoftwareReset_TimeoutCntrl 	: Offset 0x02E , size = bytes */
+/* SD_IntrStatus	: Offset 0x030 , size = bytes */
+/* SD_ErrorIntrStatus 	: Offset 0x032 , size = bytes */
+/* SD_IntrStatusEnable	: Offset 0x034 , size = bytes */
+/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */
+/* SD_IntrSignalEnable	: Offset 0x038 , size = bytes */
+/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */
+/* SD_CMD12ErrorStatus	: Offset 0x03C , size = bytes */
+/* SD_Capabilities	: Offset 0x040 , size = bytes */
+/* SD_MaxCurCap		: Offset 0x048 , size = bytes */
+/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */
+/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */
+/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */
+
+/* SDIO Host Control Register DMA Mode Definitions */
+#define SDIOH_SDMA_MODE			0
+#define SDIOH_ADMA1_MODE		1
+#define SDIOH_ADMA2_MODE		2
+#define SDIOH_ADMA2_64_MODE		3
+
+#define ADMA2_ATTRIBUTE_VALID		(1 << 0)	/* ADMA Descriptor line valid */
+#define ADMA2_ATTRIBUTE_END			(1 << 1)	/* End of Descriptor */
+#define ADMA2_ATTRIBUTE_INT			(1 << 2)	/* Interrupt when line is done */
+#define ADMA2_ATTRIBUTE_ACT_NOP		(0 << 4)	/* Skip current line, go to next. */
+#define ADMA2_ATTRIBUTE_ACT_RSV		(1 << 4)	/* Same as NOP */
+#define ADMA1_ATTRIBUTE_ACT_SET		(1 << 4)	/* ADMA1 Only - set transfer length */
+#define ADMA2_ATTRIBUTE_ACT_TRAN	(2 << 4)	/* Transfer Data of one descriptor line. */
+#define ADMA2_ATTRIBUTE_ACT_LINK	(3 << 4)	/* Link Descriptor */
+
+/* ADMA2 Descriptor Table Entry for 32-bit Address */
+typedef struct adma2_dscr_32b {
+	uint32 len_attr;
+	uint32 phys_addr;
+} adma2_dscr_32b_t;
+
+/* ADMA1 Descriptor Table Entry */
+typedef struct adma1_dscr {
+	uint32 phys_addr_attr;
+} adma1_dscr_t;
+
+#endif /* _SDIOH_H */
diff --git a/drivers/staging/edison-bcm43340/include/sdiovar.h b/drivers/staging/edison-bcm43340/include/sdiovar.h
new file mode 100644
index 0000000..5335ea1
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/sdiovar.h
@@ -0,0 +1,58 @@
+/*
+ * Structure used by apps whose drivers access SDIO drivers.
+ * Pulled out separately so dhdu and wlu can both use it.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sdiovar.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _sdiovar_h_
+#define _sdiovar_h_
+
+#include <typedefs.h>
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+typedef struct sdreg {
+	int func;
+	int offset;
+	int value;
+} sdreg_t;
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL		0x0001	/* Error */
+#define SDH_TRACE_VAL		0x0002	/* Trace */
+#define SDH_INFO_VAL		0x0004	/* Info */
+#define SDH_DEBUG_VAL		0x0008	/* Debug */
+#define SDH_DATA_VAL		0x0010	/* Data */
+#define SDH_CTRL_VAL		0x0020	/* Control Regs */
+#define SDH_LOG_VAL		0x0040	/* Enable bcmlog */
+#define SDH_DMA_VAL		0x0080	/* DMA */
+
+#define NUM_PREV_TRANSACTIONS	16
+
+
+#include <packed_section_end.h>
+
+#endif /* _sdiovar_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/siutils.h b/drivers/staging/edison-bcm43340/include/siutils.h
new file mode 100644
index 0000000..4be9a0d
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/siutils.h
@@ -0,0 +1,439 @@
+/*
+ * Misc utility routines for accessing the SOC Interconnects
+ * of Broadcom HNBU chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.h 433599 2013-11-01 18:31:27Z $
+ */
+
+#ifndef	_siutils_h_
+#define	_siutils_h_
+
+
+#include <bcmutils.h>
+/*
+ * Data structure to export all chip specific common variables
+ *   public (read-only) portion of siutils handle returned by si_attach()/si_kattach()
+ */
+struct si_pub {
+	uint	socitype;		/* SOCI_SB, SOCI_AI */
+
+	uint	bustype;		/* SI_BUS, PCI_BUS */
+	uint	buscoretype;		/* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
+	uint	buscorerev;		/* buscore rev */
+	uint	buscoreidx;		/* buscore index */
+	int	ccrev;			/* chip common core rev */
+	uint32	cccaps;			/* chip common capabilities */
+	uint32  cccaps_ext;			/* chip common capabilities extension */
+	int	pmurev;			/* pmu core rev */
+	uint32	pmucaps;		/* pmu capabilities */
+	uint	boardtype;		/* board type */
+	uint    boardrev;               /* board rev */
+	uint	boardvendor;		/* board vendor */
+	uint	boardflags;		/* board flags */
+	uint	boardflags2;		/* board flags2 */
+	uint	chip;			/* chip number */
+	uint	chiprev;		/* chip revision */
+	uint	chippkg;		/* chip package option */
+	uint32	chipst;			/* chip status */
+	bool	issim;			/* chip is in simulation or emulation */
+	uint    socirev;		/* SOC interconnect rev */
+	bool	pci_pr32414;
+
+};
+
+/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver
+ * for monolithic driver, it is readonly to prevent accident change
+ */
+typedef const struct si_pub si_t;
+
+
+/*
+ * Many of the routines below take an 'sih' handle as their first arg.
+ * Allocate this by calling si_attach().  Free it by calling si_detach().
+ * At any one time, the sih is logically focused on one particular si core
+ * (the "current core").
+ * Use si_setcore() or si_setcoreidx() to change the association to another core.
+ */
+#define	SI_OSH		NULL	/* Use for si_kattach when no osh is available */
+
+#define	BADIDX		(SI_MAXCORES + 1)
+
+/* clkctl xtal what flags */
+#define	XTAL			0x1	/* primary crystal oscillator (2050) */
+#define	PLL			0x2	/* main chip pll */
+
+/* clkctl clk mode */
+#define	CLK_FAST		0	/* force fast (pll) clock */
+#define	CLK_DYNAMIC		2	/* enable dynamic clock control */
+
+/* GPIO usage priorities */
+#define GPIO_DRV_PRIORITY	0	/* Driver */
+#define GPIO_APP_PRIORITY	1	/* Application */
+#define GPIO_HI_PRIORITY	2	/* Highest priority. Ignore GPIO reservation */
+
+/* GPIO pull up/down */
+#define GPIO_PULLUP		0
+#define GPIO_PULLDN		1
+
+/* GPIO event regtype */
+#define GPIO_REGEVT		0	/* GPIO register event */
+#define GPIO_REGEVT_INTMSK	1	/* GPIO register event int mask */
+#define GPIO_REGEVT_INTPOL	2	/* GPIO register event int polarity */
+
+/* device path */
+#define SI_DEVPATH_BUFSZ	16	/* min buffer size in bytes */
+
+/* SI routine enumeration: to be used by update function with multiple hooks */
+#define	SI_DOATTACH	1
+#define SI_PCIDOWN	2
+#define SI_PCIUP	3
+
+#define	ISSIM_ENAB(sih)	FALSE
+
+/* PMU clock/power control */
+#if defined(BCMPMUCTL)
+#define PMUCTL_ENAB(sih)	(BCMPMUCTL)
+#else
+#define PMUCTL_ENAB(sih)	((sih)->cccaps & CC_CAP_PMU)
+#endif
+
+/* chipcommon clock/power control (exclusive with PMU's) */
+#if defined(BCMPMUCTL) && BCMPMUCTL
+#define CCCTL_ENAB(sih)		(0)
+#define CCPLL_ENAB(sih)		(0)
+#else
+#define CCCTL_ENAB(sih)		((sih)->cccaps & CC_CAP_PWR_CTL)
+#define CCPLL_ENAB(sih)		((sih)->cccaps & CC_CAP_PLL_MASK)
+#endif
+
+typedef void (*gpio_handler_t)(uint32 stat, void *arg);
+typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg);
+/* External BT Coex enable mask */
+#define CC_BTCOEX_EN_MASK  0x01
+/* External PA enable mask */
+#define GPIO_CTRL_EPA_EN_MASK 0x40
+/* WL/BT control enable mask */
+#define GPIO_CTRL_5_6_EN_MASK 0x60
+#define GPIO_CTRL_7_6_EN_MASK 0xC0
+#define GPIO_OUT_7_EN_MASK 0x80
+
+
+/* CR4 specific defines used by the host driver */
+#define SI_CR4_CAP			(0x04)
+#define SI_CR4_BANKIDX		(0x40)
+#define SI_CR4_BANKINFO		(0x44)
+#define SI_CR4_BANKPDA		(0x4C)
+
+#define	ARMCR4_TCBBNB_MASK	0xf0
+#define	ARMCR4_TCBBNB_SHIFT	4
+#define	ARMCR4_TCBANB_MASK	0xf
+#define	ARMCR4_TCBANB_SHIFT	0
+
+#define	SICF_CPUHALT		(0x0020)
+#define	ARMCR4_BSZ_MASK		0x3f
+#define	ARMCR4_BSZ_MULT		8192
+
+/* === exported functions === */
+extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *si_kattach(osl_t *osh);
+extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
+
+extern uint si_corelist(si_t *sih, uint coreid[]);
+extern uint si_coreid(si_t *sih);
+extern uint si_flag(si_t *sih);
+extern uint si_flag_alt(si_t *sih);
+extern uint si_intflag(si_t *sih);
+extern uint si_coreidx(si_t *sih);
+extern uint si_coreunit(si_t *sih);
+extern uint si_corevendor(si_t *sih);
+extern uint si_corerev(si_t *sih);
+extern void *si_osh(si_t *sih);
+extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern void *si_coreregs(si_t *sih);
+extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val);
+extern void *si_wrapperregs(si_t *sih);
+extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern bool si_iscoreup(si_t *sih);
+extern uint si_numcoreunits(si_t *sih, uint coreid);
+extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
+extern void *si_setcoreidx(si_t *sih, uint coreidx);
+extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
+extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
+extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
+extern int si_numaddrspaces(si_t *sih);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
+extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern int si_corebist(si_t *sih);
+extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void si_core_disable(si_t *sih, uint32 bits);
+extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
+extern uint si_chip_hostif(si_t *sih);
+extern bool si_read_pmu_autopll(si_t *sih);
+extern uint32 si_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih);
+extern uint32 si_ilp_clock(si_t *sih);
+extern void si_pci_setup(si_t *sih, uint coremask);
+extern void si_pcmcia_init(si_t *sih);
+extern void si_setint(si_t *sih, int siflag);
+extern bool si_backplane64(si_t *sih);
+extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+	void *intrsenabled_fn, void *intr_arg);
+extern void si_deregister_intr_callback(si_t *sih);
+extern void si_clkctl_init(si_t *sih);
+extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih);
+extern bool si_clkctl_cc(si_t *sih, uint mode);
+extern int si_clkctl_xtal(si_t *sih, uint what, bool on);
+extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
+extern void si_btcgpiowar(si_t *sih);
+extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_socram_size(si_t *sih);
+extern uint32 si_socdevram_size(si_t *sih);
+extern uint32 si_socram_srmem_size(si_t *sih);
+extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap);
+extern bool si_socdevram_pkg(si_t *sih);
+extern bool si_socdevram_remap_isenb(si_t *sih);
+extern uint32 si_socdevram_remap_size(si_t *sih);
+
+extern void si_watchdog(si_t *sih, uint ticks);
+extern void si_watchdog_ms(si_t *sih, uint32 ms);
+extern uint32 si_watchdog_msticks(void);
+extern void *si_gpiosetcore(si_t *sih);
+extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioin(si_t *sih);
+extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority);
+extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority);
+extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
+extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
+extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value);
+extern uint8 si_gci_host_wake_gpio_init(si_t *sih);
+extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state);
+
+/* GPIO event handlers */
+extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
+extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
+extern void si_gpio_handler_process(si_t *sih);
+
+/* GCI interrupt handlers */
+extern void si_gci_handler_process(si_t *sih);
+
+/* GCI GPIO event handlers */
+extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts,
+	gci_gpio_handler_t cb, void *arg);
+extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i);
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool si_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool si_pci_fastpmecap(struct osl_info *osh);
+extern bool si_pci_pmestat(si_t *sih);
+extern void si_pci_pmeclr(si_t *sih);
+extern void si_pci_pmeen(si_t *sih);
+extern void si_pci_pmestatclr(si_t *sih);
+extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val);
+
+
+extern void si_sdio_init(si_t *sih);
+
+extern uint16 si_d11_devid(si_t *sih);
+extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
+	uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader);
+
+#define si_eci(sih) 0
+static INLINE void * si_eci_init(si_t *sih) {return NULL;}
+#define si_eci_notify_bt(sih, type, val)  (0)
+#define si_seci(sih) 0
+#define si_seci_upd(sih, a)	do {} while (0)
+static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;}
+static INLINE void * si_gci_init(si_t *sih) {return NULL;}
+#define si_seci_down(sih) do {} while (0)
+#define si_gci(sih) 0
+
+/* OTP status */
+extern bool si_is_otp_disabled(si_t *sih);
+extern bool si_is_otp_powered(si_t *sih);
+extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask);
+
+/* SPROM availability */
+extern bool si_is_sprom_available(si_t *sih);
+extern bool si_is_sprom_enabled(si_t *sih);
+extern void si_sprom_enable(si_t *sih, bool enable);
+
+/* OTP/SROM CIS stuff */
+extern int si_cis_source(si_t *sih);
+#define CIS_DEFAULT	0
+#define CIS_SROM	1
+#define CIS_OTP		2
+
+/* Fab-id information */
+#define	DEFAULT_FAB	0x0	/* Original/first fab used for this chip */
+#define	CSM_FAB7	0x1	/* CSM Fab7 chip */
+#define	TSMC_FAB12	0x2	/* TSMC Fab12/Fab14 chip */
+#define	SMIC_FAB4	0x3	/* SMIC Fab4 chip */
+
+extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw);
+extern uint16 si_fabid(si_t *sih);
+
+/*
+ * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
+ * The returned path is NULL terminated and has trailing '/'.
+ * Return 0 on success, nonzero otherwise.
+ */
+extern int si_devpath(si_t *sih, char *path, int size);
+/* Read variable with prepending the devpath to the name */
+extern char *si_getdevpathvar(si_t *sih, const char *name);
+extern int si_getdevpathintvar(si_t *sih, const char *name);
+extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name);
+
+
+extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieltrenable(si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieobffenable(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltr_reg(si_t *sih, uint32 reg, uint32 mask, uint32 val);
+extern uint32 si_pcieltrspacing_reg(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltrhysteresiscnt_reg(si_t *sih, uint32 mask, uint32 val);
+extern void si_pcie_set_error_injection(si_t *sih, uint32 mode);
+extern void si_pcie_set_L1substate(si_t *sih, uint32 substate);
+extern uint32 si_pcie_get_L1substate(si_t *sih);
+extern void si_war42780_clkreq(si_t *sih, bool clkreq);
+extern void si_pci_down(si_t *sih);
+extern void si_pci_up(si_t *sih);
+extern void si_pci_sleep(si_t *sih);
+extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm);
+extern void si_pcie_power_save_enable(si_t *sih, bool enable);
+extern void si_pcie_extendL1timer(si_t *sih, bool extend);
+extern int si_pci_fixcfg(si_t *sih);
+extern void si_chippkg_set(si_t *sih, uint);
+
+extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on);
+extern void si_chipcontrl_restore(si_t *sih, uint32 val);
+extern uint32 si_chipcontrl_read(si_t *sih);
+extern void si_chipcontrl_epa4331(si_t *sih, bool on);
+extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl);
+extern void si_chipcontrl_srom4360(si_t *sih, bool on);
+/* Enable BT-COEX & Ex-PA for 4313 */
+extern void si_epa_4313war(si_t *sih);
+extern void si_btc_enable_chipcontrol(si_t *sih);
+/* BT/WL selection for 4313 bt combo >= P250 boards */
+extern void si_btcombo_p250_4313_war(si_t *sih);
+extern void si_btcombo_43228_war(si_t *sih);
+extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear);
+extern void si_pmu_synth_pwrsw_4313_war(si_t *sih);
+extern uint si_pll_reset(si_t *sih);
+/* === debug routines === */
+
+extern bool si_taclear(si_t *sih, bool details);
+
+
+
+extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val);
+extern void si_pcie_set_request_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_request_size(si_t *sih);
+extern void si_pcie_set_maxpayload_size(si_t *sih, uint16 size);
+extern uint16 si_pcie_get_maxpayload_size(si_t *sih);
+extern uint16 si_pcie_get_ssid(si_t *sih);
+extern uint32 si_pcie_get_bar0(si_t *sih);
+extern int si_pcie_configspace_cache(si_t *sih);
+extern int si_pcie_configspace_restore(si_t *sih);
+extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size);
+
+char *si_getnvramflvar(si_t *sih, const char *name);
+
+
+extern uint32 si_tcm_size(si_t *sih);
+extern bool si_has_flops(si_t *sih);
+
+extern int si_set_sromctl(si_t *sih, uint32 value);
+extern uint32 si_get_sromctl(si_t *sih);
+
+extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_input(si_t *sih, uint reg);
+extern uint32 si_gci_int_enable(si_t *sih, bool enable);
+extern void si_gci_reset(si_t *sih);
+extern void si_ercx_init(si_t *sih);
+extern void si_wci2_init(si_t *sih, uint baudrate);
+extern void si_gci_seci_init(si_t *sih);
+extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel);
+extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos);
+extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_chipstatus(si_t *sih, uint reg);
+extern uint16 si_cc_get_reg16(uint32 reg_offs);
+extern uint32 si_cc_get_reg32(uint32 reg_offs);
+extern uint32 si_cc_set_reg32(uint32 reg_offs, uint32 val);
+extern uint32 si_gci_preinit_upd_indirect(uint32 regidx, uint32 setval, uint32 mask);
+extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status);
+
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+
+void si_update_masks(si_t *sih);
+void si_force_islanding(si_t *sih, bool enable);
+extern uint32 si_pmu_res_req_timer_clr(si_t *sih);
+extern void si_pmu_rfldo(si_t *sih, bool on);
+extern void si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 spert_val);
+extern void si_pcie_ltr_war(si_t *sih);
+
+/* Macro to enable clock gating changes in different cores */
+#define MEM_CLK_GATE_BIT 	5
+#define GCI_CLK_GATE_BIT 	18
+
+#define USBAPP_CLK_BIT		0
+#define PCIE_CLK_BIT		3
+#define ARMCR4_DBG_CLK_BIT	4
+#define SAMPLE_SYNC_CLK_BIT 	17
+#define PCIE_TL_CLK_BIT		18
+#define HQ_REQ_BIT		24
+#define PLL_DIV2_BIT_START	9
+#define PLL_DIV2_MASK		(0x37 << PLL_DIV2_BIT_START)
+#define PLL_DIV2_DIS_OP		(0x37 << PLL_DIV2_BIT_START)
+
+#endif	/* _siutils_h_ */
diff --git a/drivers/staging/edison-bcm43340/include/trxhdr.h b/drivers/staging/edison-bcm43340/include/trxhdr.h
new file mode 100644
index 0000000..6e55b15
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/trxhdr.h
@@ -0,0 +1,92 @@
+/*
+ * TRX image file header format.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: trxhdr.h 349211 2012-08-07 09:45:24Z $
+ */
+
+#ifndef _TRX_HDR_H
+#define _TRX_HDR_H
+
+#include <typedefs.h>
+
+#define TRX_MAGIC	0x30524448	/* "HDR0" */
+#define TRX_MAX_LEN	0x3B0000	/* Max length */
+#define TRX_NO_HEADER	1		/* Do not write TRX header */
+#define TRX_GZ_FILES	0x2     /* Contains up to TRX_MAX_OFFSET individual gzip files */
+#define TRX_EMBED_UCODE	0x8	/* Trx contains embedded ucode image */
+#define TRX_ROMSIM_IMAGE	0x10	/* Trx contains ROM simulation image */
+#define TRX_UNCOMP_IMAGE	0x20	/* Trx contains uncompressed rtecdc.bin image */
+#define TRX_BOOTLOADER		0x40	/* the image is a bootloader */
+
+#define TRX_V1		1
+#define TRX_V1_MAX_OFFSETS	3		/* V1: Max number of individual files */
+
+#ifndef BCMTRXV2
+#define TRX_VERSION	TRX_V1		/* Version 1 */
+#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS
+#endif
+
+/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as
+ * Ver 2 of trx header. To make it generic, trx_header is structure is modified
+ * as below where size of "offsets" field will vary as per the TRX version.
+ * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well.
+ * To make sure, other applications like "dhdl" which are yet to be enhanced to support
+ * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2
+ * is defined.
+ */
+struct trx_header {
+	uint32 magic;		/* "HDR0" */
+	uint32 len;		/* Length of file including header */
+	uint32 crc32;		/* 32-bit CRC from flag_version to end of file */
+	uint32 flag_version;	/* 0:15 flags, 16:31 version */
+#ifndef BCMTRXV2
+	uint32 offsets[TRX_MAX_OFFSET];	/* Offsets of partitions from start of header */
+#else
+	uint32 offsets[1];	/* Offsets of partitions from start of header */
+#endif
+};
+
+#ifdef BCMTRXV2
+#define TRX_VERSION		TRX_V2		/* Version 2 */
+#define TRX_MAX_OFFSET  TRX_V2_MAX_OFFSETS
+
+#define TRX_V2		2
+/* V2: Max number of individual files
+ * To support SDR signature + Config data region
+ */
+#define TRX_V2_MAX_OFFSETS	5
+#define SIZEOF_TRXHDR_V1	(sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32))
+#define SIZEOF_TRXHDR_V2	(sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32))
+#define TRX_VER(trx)		(trx->flag_version>>16)
+#define ISTRX_V1(trx)		(TRX_VER(trx) == TRX_V1)
+#define ISTRX_V2(trx)		(TRX_VER(trx) == TRX_V2)
+/* For V2, return size of V2 size: others, return V1 size */
+#define SIZEOF_TRX(trx)	    (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1)
+#else
+#define SIZEOF_TRX(trx)	    (sizeof(struct trx_header))
+#endif /* BCMTRXV2 */
+
+/* Compatibility */
+typedef struct trx_header TRXHDR, *PTRXHDR;
+
+#endif /* _TRX_HDR_H */
diff --git a/drivers/staging/edison-bcm43340/include/typedefs.h b/drivers/staging/edison-bcm43340/include/typedefs.h
new file mode 100644
index 0000000..473ed9e
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/typedefs.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * $Id: typedefs.h 453696 2014-02-06 01:10:20Z $
+ */
+
+#ifndef _TYPEDEFS_H_
+#define _TYPEDEFS_H_
+
+#ifdef SITE_TYPEDEFS
+
+/*
+ * Define SITE_TYPEDEFS in the compile to include a site-specific
+ * typedef file "site_typedefs.h".
+ *
+ * If SITE_TYPEDEFS is not defined, then the code section below makes
+ * inferences about the compile environment based on defined symbols and
+ * possibly compiler pragmas.
+ *
+ * Following these two sections is the Default Typedefs section.
+ * This section is only processed if USE_TYPEDEF_DEFAULTS is
+ * defined. This section has a default set of typedefs and a few
+ * preprocessor symbols (TRUE, FALSE, NULL, ...).
+ */
+
+#include "site_typedefs.h"
+
+#else
+
+/*
+ * Infer the compile environment based on preprocessor symbols and pragmas.
+ * Override type definitions as needed, and include configuration-dependent
+ * header files to define types.
+ */
+
+#ifdef __cplusplus
+
+#define TYPEDEF_BOOL
+#ifndef FALSE
+#define FALSE	false
+#endif
+#ifndef TRUE
+#define TRUE	true
+#endif
+
+#else	/* ! __cplusplus */
+
+
+#endif	/* ! __cplusplus */
+
+#if defined(__LP64__)
+#define TYPEDEF_UINTPTR
+typedef unsigned long long int uintptr;
+#endif
+
+
+
+
+
+#if defined(_NEED_SIZE_T_)
+typedef long unsigned int size_t;
+#endif
+
+
+
+
+
+#if defined(__sparc__)
+#define TYPEDEF_ULONG
+#endif
+
+
+/*
+ * If this is either a Linux hybrid build or the per-port code of a hybrid build
+ * then use the Linux header files to get some of the typedefs.  Otherwise, define
+ * them entirely in this file.  We can't always define the types because we get
+ * a duplicate typedef error; there is no way to "undefine" a typedef.
+ * We know when it's per-port code because each file defines LINUX_PORT at the top.
+ */
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#define TYPEDEF_UINT
+#ifndef TARGETENV_android
+#define TYPEDEF_USHORT
+#define TYPEDEF_ULONG
+#endif /* TARGETENV_android */
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
+#define TYPEDEF_BOOL
+#endif	/* >= 2.6.19 */
+/* special detection for 2.6.18-128.7.1.0.1.el5 */
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
+#include <linux/compiler.h>
+#ifdef noinline_for_stack
+#define TYPEDEF_BOOL
+#endif
+#endif	/* == 2.6.18 */
+#endif	/* __KERNEL__ */
+#endif  /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
+
+
+
+
+/* Do not support the (u)int64 types with strict ansi for GNU C */
+#if defined(__GNUC__) && defined(__STRICT_ANSI__)
+#define TYPEDEF_INT64
+#define TYPEDEF_UINT64
+#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */
+
+/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode
+ * for signed or unsigned
+ */
+#if defined(__ICL)
+
+#define TYPEDEF_INT64
+
+#if defined(__STDC__)
+#define TYPEDEF_UINT64
+#endif
+
+#endif /* __ICL */
+
+#if !defined(__DJGPP__)
+
+/* pick up ushort & uint from standard types.h */
+#if defined(__KERNEL__)
+
+/* See note above */
+#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
+#include <linux/types.h>	/* sys/types.h and linux/types.h are oil and water */
+#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
+
+#else
+
+
+#include <sys/types.h>
+
+#endif /* linux && __KERNEL__ */
+
+#endif 
+
+
+
+/* use the default typedefs in the next section of this file */
+#define USE_TYPEDEF_DEFAULTS
+
+#endif /* SITE_TYPEDEFS */
+
+
+/*
+ * Default Typedefs
+ */
+
+#ifdef USE_TYPEDEF_DEFAULTS
+#undef USE_TYPEDEF_DEFAULTS
+
+#ifndef TYPEDEF_BOOL
+typedef	/* @abstract@ */ unsigned char	bool;
+#endif
+
+/* define uchar, ushort, uint, ulong */
+
+#ifndef TYPEDEF_UCHAR
+typedef unsigned char	uchar;
+#endif
+
+#ifndef TYPEDEF_USHORT
+typedef unsigned short	ushort;
+#endif
+
+#ifndef TYPEDEF_UINT
+typedef unsigned int	uint;
+#endif
+
+#ifndef TYPEDEF_ULONG
+typedef unsigned long	ulong;
+#endif
+
+/* define [u]int8/16/32/64, uintptr */
+
+#ifndef TYPEDEF_UINT8
+typedef unsigned char	uint8;
+#endif
+
+#ifndef TYPEDEF_UINT16
+typedef unsigned short	uint16;
+#endif
+
+#ifndef TYPEDEF_UINT32
+typedef unsigned int	uint32;
+#endif
+
+#ifndef TYPEDEF_UINT64
+typedef unsigned long long uint64;
+#endif
+
+#ifndef TYPEDEF_UINTPTR
+typedef unsigned int	uintptr;
+#endif
+
+#ifndef TYPEDEF_INT8
+typedef signed char	int8;
+#endif
+
+#ifndef TYPEDEF_INT16
+typedef signed short	int16;
+#endif
+
+#ifndef TYPEDEF_INT32
+typedef signed int	int32;
+#endif
+
+#ifndef TYPEDEF_INT64
+typedef signed long long int64;
+#endif
+
+/* define float32/64, float_t */
+
+#ifndef TYPEDEF_FLOAT32
+typedef float		float32;
+#endif
+
+#ifndef TYPEDEF_FLOAT64
+typedef double		float64;
+#endif
+
+/*
+ * abstracted floating point type allows for compile time selection of
+ * single or double precision arithmetic.  Compiling with -DFLOAT32
+ * selects single precision; the default is double precision.
+ */
+
+#ifndef TYPEDEF_FLOAT_T
+
+#if defined(FLOAT32)
+typedef float32 float_t;
+#else /* default to double precision floating point */
+typedef float64 float_t;
+#endif
+
+#endif /* TYPEDEF_FLOAT_T */
+
+/* define macro values */
+
+#ifndef FALSE
+#define FALSE	0
+#endif
+
+#ifndef TRUE
+#define TRUE	1  /* TRUE */
+#endif
+
+#ifndef NULL
+#define	NULL	0
+#endif
+
+#ifndef OFF
+#define	OFF	0
+#endif
+
+#ifndef ON
+#define	ON	1  /* ON = 1 */
+#endif
+
+#define	AUTO	(-1) /* Auto = -1 */
+
+/* define PTRSZ, INLINE */
+
+#ifndef PTRSZ
+#define	PTRSZ	sizeof(char*)
+#endif
+
+
+/* Detect compiler type. */
+#if defined(__GNUC__) || defined(__lint)
+	#define BWL_COMPILER_GNU
+#elif defined(__CC_ARM) && __CC_ARM
+	#define BWL_COMPILER_ARMCC
+#else
+	#error "Unknown compiler!"
+#endif 
+
+
+#ifndef INLINE
+	#if defined(BWL_COMPILER_MICROSOFT)
+		#define INLINE __inline
+	#elif defined(BWL_COMPILER_GNU)
+		#define INLINE __inline__
+	#elif defined(BWL_COMPILER_ARMCC)
+		#define INLINE	__inline
+	#else
+		#define INLINE
+	#endif 
+#endif /* INLINE */
+
+#undef TYPEDEF_BOOL
+#undef TYPEDEF_UCHAR
+#undef TYPEDEF_USHORT
+#undef TYPEDEF_UINT
+#undef TYPEDEF_ULONG
+#undef TYPEDEF_UINT8
+#undef TYPEDEF_UINT16
+#undef TYPEDEF_UINT32
+#undef TYPEDEF_UINT64
+#undef TYPEDEF_UINTPTR
+#undef TYPEDEF_INT8
+#undef TYPEDEF_INT16
+#undef TYPEDEF_INT32
+#undef TYPEDEF_INT64
+#undef TYPEDEF_FLOAT32
+#undef TYPEDEF_FLOAT64
+#undef TYPEDEF_FLOAT_T
+
+#endif /* USE_TYPEDEF_DEFAULTS */
+
+/* Suppress unused parameter warning */
+#define UNUSED_PARAMETER(x) (void)(x)
+
+/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */
+#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
+
+/*
+ * Including the bcmdefs.h here, to make sure everyone including typedefs.h
+ * gets this automatically
+*/
+#include <bcmdefs.h>
+#endif /* _TYPEDEFS_H_ */
diff --git a/drivers/staging/edison-bcm43340/include/usbrdl.h b/drivers/staging/edison-bcm43340/include/usbrdl.h
new file mode 100644
index 0000000..fd57362
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/usbrdl.h
@@ -0,0 +1,130 @@
+/*
+ * Broadcom USB remote download definitions
+ *
+ * Copyright (C) 2014, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: usbrdl.h 412875 2013-07-16 23:54:25Z $
+ */
+
+#ifndef _USB_RDL_H
+#define _USB_RDL_H
+
+/* Control messages: bRequest values */
+#define DL_GETSTATE		0	/* returns the rdl_state_t struct */
+#define DL_CHECK_CRC		1	/* currently unused */
+#define DL_GO			2	/* execute downloaded image */
+#define DL_START		3	/* initialize dl state */
+#define DL_REBOOT		4	/* reboot the device in 2 seconds */
+#define DL_GETVER		5	/* returns the bootrom_id_t struct */
+#define DL_GO_PROTECTED		6	/* execute the downloaded code and set reset event
+					 * to occur in 2 seconds.  It is the responsibility
+					 * of the downloaded code to clear this event
+					 */
+#define DL_EXEC			7	/* jump to a supplied address */
+#define DL_RESETCFG		8	/* To support single enum on dongle
+					 * - Not used by bootloader
+					 */
+#define DL_DEFER_RESP_OK	9	/* Potentially defer the response to setup
+					 * if resp unavailable
+					 */
+#define DL_CHGSPD		0x0A
+
+#define	DL_HWCMD_MASK		0xfc	/* Mask for hardware read commands: */
+#define	DL_RDHW			0x10	/* Read a hardware address (Ctl-in) */
+#define	DL_RDHW32		0x10	/* Read a 32 bit word */
+#define	DL_RDHW16		0x11	/* Read 16 bits */
+#define	DL_RDHW8		0x12	/* Read an 8 bit byte */
+#define	DL_WRHW			0x14	/* Write a hardware address (Ctl-out) */
+#define DL_WRHW_BLK 	0x13	/* Block write to hardware access */
+
+#define DL_CMD_RDHW		1	/* read data from a backplane address */
+#define DL_CMD_WRHW		2	/* write data to a backplane address */
+
+
+/* states */
+#define DL_WAITING	0	/* waiting to rx first pkt that includes the hdr info */
+#define DL_READY	1	/* hdr was good, waiting for more of the compressed image */
+#define DL_BAD_HDR	2	/* hdr was corrupted */
+#define DL_BAD_CRC	3	/* compressed image was corrupted */
+#define DL_RUNNABLE	4	/* download was successful, waiting for go cmd */
+#define DL_START_FAIL	5	/* failed to initialize correctly */
+#define DL_NVRAM_TOOBIG	6	/* host specified nvram data exceeds DL_NVRAM value */
+#define DL_IMAGE_TOOBIG	7	/* download image too big (exceeds DATA_START for rdl) */
+
+#define TIMEOUT		5000	/* Timeout for usb commands */
+
+struct bcm_device_id {
+	char	*name;
+	uint32	vend;
+	uint32	prod;
+};
+
+typedef struct {
+	uint32	state;
+	uint32	bytes;
+} rdl_state_t;
+
+typedef struct {
+	uint32	chip;		/* Chip id */
+	uint32	chiprev;	/* Chip rev */
+	uint32  ramsize;    /* Size of RAM */
+	uint32  remapbase;   /* Current remap base address */
+	uint32  boardtype;   /* Type of board */
+	uint32  boardrev;    /* Board revision */
+} bootrom_id_t;
+
+/* struct for backplane & jtag accesses */
+typedef struct {
+	uint32	cmd;		/* tag to identify the cmd */
+	uint32	addr;		/* backplane address for write */
+	uint32	len;		/* length of data: 1, 2, 4 bytes */
+	uint32	data;		/* data to write */
+} hwacc_t;
+
+/* struct for backplane */
+typedef struct {
+	uint32  cmd;            /* tag to identify the cmd */
+	uint32  addr;           /* backplane address for write */
+	uint32  len;            /* length of data: 1, 2, 4 bytes */
+	uint8   data[1];                /* data to write */
+} hwacc_blk_t;
+
+
+/* struct for querying nvram params from bootloader */
+#define QUERY_STRING_MAX 32
+typedef struct {
+	uint32  cmd;                    /* tag to identify the cmd */
+	char    var[QUERY_STRING_MAX];  /* param name */
+} nvparam_t;
+
+typedef void (*exec_fn_t)(void *sih);
+
+#define USB_CTRL_IN (USB_TYPE_VENDOR | 0x80 | USB_RECIP_INTERFACE)
+#define USB_CTRL_OUT (USB_TYPE_VENDOR | 0 | USB_RECIP_INTERFACE)
+
+#define USB_CTRL_EP_TIMEOUT 500 /* Timeout used in USB control_msg transactions. */
+
+#define RDL_CHUNK	1500  /* size of each dl transfer */
+
+/* bootloader makes special use of trx header "offsets" array */
+#define TRX_OFFSETS_DLFWLEN_IDX	0	/* Size of the fw; used in uncompressed case */
+#define TRX_OFFSETS_JUMPTO_IDX	1	/* RAM address for jumpto after download */
+#define TRX_OFFSETS_NVM_LEN_IDX	2	/* Length of appended NVRAM data */
+#ifdef BCMTRXV2
+/* The NVRAM region part of trx will be digitally signed in SDR image,
+ * so is the need for new cfg region which could pass parameters
+ * which dones not need to be digitally signed
+ */
+#define TRX_OFFSETS_DSG_LEN_IDX	3	/* Length of digital signature for the first image */
+#define TRX_OFFSETS_CFG_LEN_IDX	4	/* Length of config region, which is not digitally signed */
+#endif /* BCMTRXV2 */
+
+#define TRX_OFFSETS_DLBASE_IDX  0       /* RAM start address for download */
+
+#endif  /* _USB_RDL_H */
diff --git a/drivers/staging/edison-bcm43340/include/wlfc_proto.h b/drivers/staging/edison-bcm43340/include/wlfc_proto.h
new file mode 100644
index 0000000..9be11f8
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/wlfc_proto.h
@@ -0,0 +1,303 @@
+/*
+* Copyright (C) 1999-2013, Broadcom Corporation
+* 
+*      Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2 (the "GPL"),
+* available at http://www.broadcom.com/licenses/GPLv2.php, with the
+* following added to such license:
+* 
+*      As a special exception, the copyright holders of this software give you
+* permission to link this software with independent modules, and to copy and
+* distribute the resulting executable under terms of your choice, provided that
+* you also meet, for each linked independent module, the terms and conditions of
+* the license of that module.  An independent module is a module which is not
+* derived from this software.  The special exception does not apply to any
+* modifications of the software.
+* 
+*      Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a license
+* other than the GPL, without Broadcom's express prior written consent.
+* $Id: wlfc_proto.h 431159 2013-10-22 19:40:51Z $
+*
+*/
+#ifndef __wlfc_proto_definitions_h__
+#define __wlfc_proto_definitions_h__
+
+	/* Use TLV to convey WLFC information.
+	 ---------------------------------------------------------------------------
+	| Type |  Len | value                    | Description
+	 ---------------------------------------------------------------------------
+	|  1   |   1  | (handle)                 | MAC OPEN
+	 ---------------------------------------------------------------------------
+	|  2   |   1  | (handle)                 | MAC CLOSE
+	 ---------------------------------------------------------------------------
+	|  3   |   2  | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
+	 ---------------------------------------------------------------------------
+	|  4   |   4+ | see pkttag comments      | TXSTATUS
+	|      |      | TX status & timestamps   | Present only when pkt timestamp is enabled
+	 ---------------------------------------------------------------------------
+	|  5   |   4  | see pkttag comments      | PKKTTAG [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  6   |   8  | (handle, ifid, MAC)      | MAC ADD
+	 ---------------------------------------------------------------------------
+	|  7   |   8  | (handle, ifid, MAC)      | MAC DEL
+	 ---------------------------------------------------------------------------
+	|  8   |   1  | (rssi)                   | RSSI - RSSI value for the packet.
+	 ---------------------------------------------------------------------------
+	|  9   |   1  | (interface ID)           | Interface OPEN
+	 ---------------------------------------------------------------------------
+	|  10  |   1  | (interface ID)           | Interface CLOSE
+	 ---------------------------------------------------------------------------
+	|  11  |   8  | fifo credit returns map  | FIFO credits back to the host
+	|      |      |                          |
+	|      |      |                          | --------------------------------------
+	|      |      |                          | | ac0 | ac1 | ac2 | ac3 | bcmc | atim |
+	|      |      |                          | --------------------------------------
+	|      |      |                          |
+	 ---------------------------------------------------------------------------
+	|  12  |   2  | MAC handle,              | Host provides a bitmap of pending
+	|      |      | AC[0-3] traffic bitmap   | unicast traffic for MAC-handle dstn.
+	|      |      |                          | [host->firmware]
+	 ---------------------------------------------------------------------------
+	|  13  |   3  | (count, handle, prec_bmp)| One time request for packet to a specific
+	|      |      |                          | MAC destination.
+	 ---------------------------------------------------------------------------
+	|  15  |  12  | (pkttag, timestamps)     | Send TX timestamp at reception from host
+	 ---------------------------------------------------------------------------
+	|  16  |  12  | (pkttag, timestamps)     | Send WLAN RX timestamp along with RX frame
+	 ---------------------------------------------------------------------------
+	| 255  |  N/A |  N/A                     | FILLER - This is a special type
+	|      |      |                          | that has no length or value.
+	|      |      |                          | Typically used for padding.
+	 ---------------------------------------------------------------------------
+	*/
+
+#define WLFC_CTL_TYPE_MAC_OPEN			1
+#define WLFC_CTL_TYPE_MAC_CLOSE			2
+#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT	3
+#define WLFC_CTL_TYPE_TXSTATUS			4
+#define WLFC_CTL_TYPE_PKTTAG			5
+
+#define WLFC_CTL_TYPE_MACDESC_ADD		6
+#define WLFC_CTL_TYPE_MACDESC_DEL		7
+#define WLFC_CTL_TYPE_RSSI			8
+
+#define WLFC_CTL_TYPE_INTERFACE_OPEN		9
+#define WLFC_CTL_TYPE_INTERFACE_CLOSE		10
+
+#define WLFC_CTL_TYPE_FIFO_CREDITBACK		11
+
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP	12
+#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET	13
+#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS	14
+
+#define WLFC_CTL_TYPE_TX_ENTRY_STAMP		15
+#define WLFC_CTL_TYPE_RX_STAMP			16
+
+#define WLFC_CTL_TYPE_TRANS_ID			18
+#define WLFC_CTL_TYPE_COMP_TXSTATUS		19
+
+
+#define WLFC_CTL_TYPE_FILLER			255
+
+#define WLFC_CTL_VALUE_LEN_MACDESC		8	/* handle, interface, MAC */
+
+#define WLFC_CTL_VALUE_LEN_MAC			1	/* MAC-handle */
+#define WLFC_CTL_VALUE_LEN_RSSI			1
+
+#define WLFC_CTL_VALUE_LEN_INTERFACE		1
+#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP	2
+
+#define WLFC_CTL_VALUE_LEN_TXSTATUS		4
+#define WLFC_CTL_VALUE_LEN_PKTTAG		4
+
+#define WLFC_CTL_VALUE_LEN_SEQ			2
+
+/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
+#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK	6
+
+#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT	3	/* credit, MAC-handle, prec_bitmap */
+#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET	3	/* credit, MAC-handle, prec_bitmap */
+
+
+#define WLFC_PKTFLAG_PKTFROMHOST	0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED	0x02
+
+#define WL_TXSTATUS_STATUS_MASK			0xff /* allow 8 bits */
+#define WL_TXSTATUS_STATUS_SHIFT		24
+
+#define WL_TXSTATUS_SET_STATUS(x, status)	((x)  = \
+	((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \
+	(((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT))
+#define WL_TXSTATUS_GET_STATUS(x)	(((x) >> WL_TXSTATUS_STATUS_SHIFT) & \
+	WL_TXSTATUS_STATUS_MASK)
+
+#define WL_TXSTATUS_GENERATION_MASK		1 /* allow 1 bit */
+#define WL_TXSTATUS_GENERATION_SHIFT		31
+
+#define WL_TXSTATUS_SET_GENERATION(x, gen)	((x) = \
+	((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+	(((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WL_TXSTATUS_GET_GENERATION(x)	(((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+	WL_TXSTATUS_GENERATION_MASK)
+
+#define WL_TXSTATUS_FLAGS_MASK			0xf /* allow 4 bits only */
+#define WL_TXSTATUS_FLAGS_SHIFT			27
+
+#define WL_TXSTATUS_SET_FLAGS(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT))
+#define WL_TXSTATUS_GET_FLAGS(x)		(((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \
+	WL_TXSTATUS_FLAGS_MASK)
+
+#define WL_TXSTATUS_FIFO_MASK			0x7 /* allow 3 bits for FIFO ID */
+#define WL_TXSTATUS_FIFO_SHIFT			24
+
+#define WL_TXSTATUS_SET_FIFO(x, flags)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \
+	(((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT))
+#define WL_TXSTATUS_GET_FIFO(x)		(((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK)
+
+#define WL_TXSTATUS_PKTID_MASK			0xffffff /* allow 24 bits */
+#define WL_TXSTATUS_SET_PKTID(x, num)	((x) = \
+	((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
+#define WL_TXSTATUS_GET_PKTID(x)		((x) & WL_TXSTATUS_PKTID_MASK)
+
+#define WL_TXSTATUS_HSLOT_MASK			0xffff /* allow 16 bits */
+#define WL_TXSTATUS_HSLOT_SHIFT			8
+
+#define WL_TXSTATUS_SET_HSLOT(x, hslot)	((x)  = \
+	((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \
+	(((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT))
+#define WL_TXSTATUS_GET_HSLOT(x)	(((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \
+	WL_TXSTATUS_HSLOT_MASK)
+
+#define WL_TXSTATUS_FREERUNCTR_MASK		0xff /* allow 8 bits */
+
+#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr)	((x)  = \
+	((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \
+	((ctr) & WL_TXSTATUS_FREERUNCTR_MASK))
+#define WL_TXSTATUS_GET_FREERUNCTR(x)		((x)& WL_TXSTATUS_FREERUNCTR_MASK)
+
+#define WL_SEQ_FROMFW_MASK		0x1 /* allow 1 bit */
+#define WL_SEQ_FROMFW_SHIFT		13
+#define WL_SEQ_SET_FROMFW(x, val)	((x) = \
+	((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \
+	(((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT))
+#define WL_SEQ_GET_FROMFW(x)	(((x) >> WL_SEQ_FROMFW_SHIFT) & \
+	WL_SEQ_FROMFW_MASK)
+
+#define WL_SEQ_FROMDRV_MASK		0x1 /* allow 1 bit */
+#define WL_SEQ_FROMDRV_SHIFT		12
+#define WL_SEQ_SET_FROMDRV(x, val)	((x) = \
+	((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \
+	(((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT))
+#define WL_SEQ_GET_FROMDRV(x)	(((x) >> WL_SEQ_FROMDRV_SHIFT) & \
+	WL_SEQ_FROMDRV_MASK)
+
+#define WL_SEQ_NUM_MASK			0xfff /* allow 12 bit */
+#define WL_SEQ_NUM_SHIFT		0
+#define WL_SEQ_SET_NUM(x, val)	((x) = \
+	((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \
+	(((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT))
+#define WL_SEQ_GET_NUM(x)	(((x) >> WL_SEQ_NUM_SHIFT) & \
+	WL_SEQ_NUM_MASK)
+
+/* 32 STA should be enough??, 6 bits; Must be power of 2 */
+#define WLFC_MAC_DESC_TABLE_SIZE	32
+#define WLFC_MAX_IFNUM				16
+#define WLFC_MAC_DESC_ID_INVALID	0xff
+
+/* b[7:5] -reuse guard, b[4:0] -value */
+#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+
+#define WLFC_PKTFLAG_SET_PKTREQUESTED(x)	(x) |= \
+	(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x)	(x) &= \
+	~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+
+#define WLFC_MAX_PENDING_DATALEN	120
+
+/* host is free to discard the packet */
+#define WLFC_CTL_PKTFLAG_DISCARD	0
+/* D11 suppressed a packet */
+#define WLFC_CTL_PKTFLAG_D11SUPPRESS	1
+/* WL firmware suppressed a packet because MAC is
+	already in PSMode (short time window)
+*/
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS	2
+/* Firmware tossed this packet */
+#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC	3
+/* Firmware tossed after retries */
+#define WLFC_CTL_PKTFLAG_DISCARD_NOACK	4
+
+#define WLFC_D11_STATUS_INTERPRET(txs)	\
+	(((txs)->status.suppr_ind !=  TX_STATUS_SUPR_NONE) ? \
+	WLFC_CTL_PKTFLAG_D11SUPPRESS : \
+	((txs)->status.was_acked ? \
+		WLFC_CTL_PKTFLAG_DISCARD : WLFC_CTL_PKTFLAG_DISCARD_NOACK))
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define WLFC_DBGMESG(x) printf x
+/* wlfc-breadcrumb */
+#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
+	{printf("WLFC: %s():%d:caller:%p\n", \
+	__FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0)
+#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \
+	banner, ea[0], 	ea[1], 	ea[2], 	ea[3], 	ea[4], 	ea[5]); } while (0)
+#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s))
+#else
+#define WLFC_DBGMESG(x)
+#define WLFC_BREADCRUMB(x)
+#define WLFC_PRINTMAC(banner, ea)
+#define WLFC_WHEREIS(s)
+#endif
+
+/* AMPDU host reorder packet flags */
+#define WLHOST_REORDERDATA_MAXFLOWS		256
+#define WLHOST_REORDERDATA_LEN		 10
+#define WLHOST_REORDERDATA_TOTLEN	(WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */
+
+#define WLHOST_REORDERDATA_FLOWID_OFFSET		0
+#define WLHOST_REORDERDATA_MAXIDX_OFFSET		2
+#define WLHOST_REORDERDATA_FLAGS_OFFSET			4
+#define WLHOST_REORDERDATA_CURIDX_OFFSET		6
+#define WLHOST_REORDERDATA_EXPIDX_OFFSET		8
+
+#define WLHOST_REORDERDATA_DEL_FLOW		0x01
+#define WLHOST_REORDERDATA_FLUSH_ALL		0x02
+#define WLHOST_REORDERDATA_CURIDX_VALID		0x04
+#define WLHOST_REORDERDATA_EXPIDX_VALID		0x08
+#define WLHOST_REORDERDATA_NEW_HOLE		0x10
+
+/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */
+#define WLFC_CTL_TRANS_ID_LEN			6
+#define WLFC_TYPE_TRANS_ID_LEN			6
+
+#define WLFC_MODE_HANGER	1 /* use hanger */
+#define WLFC_MODE_AFQ		2 /* use afq */
+#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2))
+
+#define WLFC_MODE_AFQ_SHIFT		2	/* afq bit */
+#define WLFC_SET_AFQ(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_AFQ_SHIFT))
+#define WLFC_GET_AFQ(x)	(((x) >> WLFC_MODE_AFQ_SHIFT) & 1)
+
+#define WLFC_MODE_REUSESEQ_SHIFT	3	/* seq reuse bit */
+#define WLFC_SET_REUSESEQ(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT))
+#define WLFC_GET_REUSESEQ(x)	(((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1)
+
+#define WLFC_MODE_REORDERSUPP_SHIFT	4	/* host reorder suppress pkt bit */
+#define WLFC_SET_REORDERSUPP(x, val)	((x) = \
+	((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \
+	(((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT))
+#define WLFC_GET_REORDERSUPP(x)	(((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1)
+
+#endif /* __wlfc_proto_definitions_h__ */
diff --git a/drivers/staging/edison-bcm43340/include/wlioctl.h b/drivers/staging/edison-bcm43340/include/wlioctl.h
new file mode 100644
index 0000000..640c024
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/include/wlioctl.h
@@ -0,0 +1,4338 @@
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wlioctl.h 475499 2014-05-06 00:56:56Z $
+ */
+
+#ifndef _wlioctl_h_
+#define	_wlioctl_h_
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <proto/bcmip.h>
+#include <proto/bcmeth.h>
+#include <proto/bcmip.h>
+#include <proto/bcmevent.h>
+#include <proto/802.11.h>
+#include <proto/802.1d.h>
+#include <bcmwifi_channels.h>
+#include <bcmwifi_rates.h>
+#include <devctrl_if/wlioctl_defs.h>
+
+#include <bcm_mpool_pub.h>
+#include <bcmcdc.h>
+
+
+
+
+#ifndef INTF_NAME_SIZ
+#define INTF_NAME_SIZ	16
+#endif
+
+/* Used to send ioctls over the transport pipe */
+typedef struct remote_ioctl {
+	cdc_ioctl_t	msg;
+	uint32		data_len;
+	char           intf_name[INTF_NAME_SIZ];
+} rem_ioctl_t;
+#define REMOTE_SIZE	sizeof(rem_ioctl_t)
+
+typedef struct {
+	uint32 num;
+	chanspec_t list[1];
+} chanspec_list_t;
+
+/* association decision information */
+typedef struct {
+	bool		assoc_approved;		/* (re)association approved */
+	uint16		reject_reason;		/* reason code for rejecting association */
+	struct		ether_addr   da;
+	int64		sys_time;		/* current system time */
+} assoc_decision_t;
+
+#define ACTION_FRAME_SIZE 1800
+
+typedef struct wl_action_frame {
+	struct ether_addr 	da;
+	uint16 			len;
+	uint32 			packetId;
+	uint8			data[ACTION_FRAME_SIZE];
+} wl_action_frame_t;
+
+#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame)
+
+typedef struct ssid_info
+{
+	uint8		ssid_len;	/* the length of SSID */
+	uint8		ssid[32];	/* SSID string */
+} ssid_info_t;
+
+typedef struct wl_af_params {
+	uint32 			channel;
+	int32 			dwell_time;
+	struct ether_addr 	BSSID;
+	wl_action_frame_t	action_frame;
+} wl_af_params_t;
+
+#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params)
+
+#define MFP_TEST_FLAG_NORMAL	0
+#define MFP_TEST_FLAG_ANY_KEY	1
+typedef struct wl_sa_query {
+	uint32			flag;
+	uint8 			action;
+	uint16 			id;
+	struct ether_addr 	da;
+} wl_sa_query_t;
+
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
+
+
+/* Legacy structure to help keep backward compatible wl tool and tray app */
+
+#define	LEGACY_WL_BSS_INFO_VERSION	107	/* older version of wl_bss_info struct */
+
+typedef struct wl_bss_info_107 {
+	uint32		version;		/* version field */
+	uint32		length;			/* byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/* units are Kusec */
+	uint16		capability;		/* Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			/* # rates in this set */
+		uint8	rates[16];		/* rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/* supported rates */
+	uint8		channel;		/* Channel no. */
+	uint16		atim_window;		/* units are Kusec */
+	uint8		dtim_period;		/* DTIM period */
+	int16		RSSI;			/* receive signal strength (in dBm) */
+	int8		phy_noise;		/* noise (in dBm) */
+	uint32		ie_length;		/* byte length of Information Elements */
+	/* variable length Information Elements */
+} wl_bss_info_107_t;
+
+/*
+ * Per-BSS information structure.
+ */
+
+#define	LEGACY2_WL_BSS_INFO_VERSION	108		/* old version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info_108 {
+	uint32		version;		/* version field */
+	uint32		length;			/* byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/* units are Kusec */
+	uint16		capability;		/* Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			/* # rates in this set */
+		uint8	rates[16];		/* rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/* supported rates */
+	chanspec_t	chanspec;		/* chanspec for bss */
+	uint16		atim_window;		/* units are Kusec */
+	uint8		dtim_period;		/* DTIM period */
+	int16		RSSI;			/* receive signal strength (in dBm) */
+	int8		phy_noise;		/* noise (in dBm) */
+
+	uint8		n_cap;			/* BSS is 802.11N Capable */
+	uint32		nbss_cap;		/* 802.11N BSS Capabilities (based on HT_CAP_*) */
+	uint8		ctl_ch;			/* 802.11N BSS control channel number */
+	uint32		reserved32[1];		/* Reserved for expansion of BSS properties */
+	uint8		flags;			/* flags */
+	uint8		reserved[3];		/* Reserved for expansion of BSS properties */
+	uint8		basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
+
+	uint16		ie_offset;		/* offset at which IEs start, from beginning */
+	uint32		ie_length;		/* byte length of Information Elements */
+	/* Add new fields here */
+	/* variable length Information Elements */
+} wl_bss_info_108_t;
+
+#define	WL_BSS_INFO_VERSION	109		/* current version of wl_bss_info struct */
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in wl_scan_results_t)
+ */
+typedef struct wl_bss_info {
+	uint32		version;		/* version field */
+	uint32		length;			/* byte length of data in this record,
+						 * starting at version and including IEs
+						 */
+	struct ether_addr BSSID;
+	uint16		beacon_period;		/* units are Kusec */
+	uint16		capability;		/* Capability information */
+	uint8		SSID_len;
+	uint8		SSID[32];
+	struct {
+		uint	count;			/* # rates in this set */
+		uint8	rates[16];		/* rates in 500kbps units w/hi bit set if basic */
+	} rateset;				/* supported rates */
+	chanspec_t	chanspec;		/* chanspec for bss */
+	uint16		atim_window;		/* units are Kusec */
+	uint8		dtim_period;		/* DTIM period */
+	int16		RSSI;			/* receive signal strength (in dBm) */
+	int8		phy_noise;		/* noise (in dBm) */
+
+	uint8		n_cap;			/* BSS is 802.11N Capable */
+	uint32		nbss_cap;		/* 802.11N+AC BSS Capabilities */
+	uint8		ctl_ch;			/* 802.11N BSS control channel number */
+	uint8		padding1[3];		/* explicit struct alignment padding */
+	uint16		vht_rxmcsmap;	/* VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+	uint16		vht_txmcsmap;	/* VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+	uint8		flags;			/* flags */
+	uint8		vht_cap;		/* BSS is vht capable */
+	uint8		reserved[2];		/* Reserved for expansion of BSS properties */
+	uint8		basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
+
+	uint16		ie_offset;		/* offset at which IEs start, from beginning */
+	uint32		ie_length;		/* byte length of Information Elements */
+	int16		SNR;			/* average SNR of during frame reception */
+	/* Add new fields here */
+	/* variable length Information Elements */
+} wl_bss_info_t;
+
+
+typedef struct wl_bsscfg {
+	uint32  bsscfg_idx;
+	uint32  wsec;
+	uint32  WPA_auth;
+	uint32  wsec_index;
+	uint32  associated;
+	uint32  BSS;
+	uint32  phytest_on;
+	struct ether_addr   prev_BSSID;
+	struct ether_addr   BSSID;
+	uint32  targetbss_wpa2_flags;
+	uint32 assoc_type;
+	uint32 assoc_state;
+} wl_bsscfg_t;
+
+typedef struct wl_if_add {
+	uint32  bsscfg_flags;
+	uint32  if_flags;
+	uint32  ap;
+	struct ether_addr   mac_addr;
+} wl_if_add_t;
+
+typedef struct wl_bss_config {
+	uint32	atim_window;
+	uint32	beacon_period;
+	uint32	chanspec;
+} wl_bss_config_t;
+
+#define WL_BSS_USER_RADAR_CHAN_SELECT	0x1	/* User application will randomly select
+						 * radar channel.
+						 */
+
+#define DLOAD_HANDLER_VER			1	/* Downloader version */
+#define DLOAD_FLAG_VER_MASK		0xf000	/* Downloader version mask */
+#define DLOAD_FLAG_VER_SHIFT	12	/* Downloader version shift */
+
+#define DL_CRC_NOT_INUSE 			0x0001
+
+/* generic download types & flags */
+enum {
+	DL_TYPE_UCODE = 1,
+	DL_TYPE_CLM = 2
+};
+
+/* ucode type values */
+enum {
+	UCODE_FW,
+	INIT_VALS,
+	BS_INIT_VALS
+};
+
+struct wl_dload_data {
+	uint16 flag;
+	uint16 dload_type;
+	uint32 len;
+	uint32 crc;
+	uint8  data[1];
+};
+typedef struct wl_dload_data wl_dload_data_t;
+
+struct wl_ucode_info {
+	uint32 ucode_type;
+	uint32 num_chunks;
+	uint32 chunk_len;
+	uint32 chunk_num;
+	uint8  data_chunk[1];
+};
+typedef struct wl_ucode_info wl_ucode_info_t;
+
+struct wl_clm_dload_info {
+	uint32 ds_id;
+	uint32 clm_total_len;
+	uint32 num_chunks;
+	uint32 chunk_len;
+	uint32 chunk_offset;
+	uint8  data_chunk[1];
+};
+typedef struct wl_clm_dload_info wl_clm_dload_info_t;
+
+typedef struct wlc_ssid {
+	uint32		SSID_len;
+	uchar		SSID[DOT11_MAX_SSID_LEN];
+} wlc_ssid_t;
+
+
+#define MAX_PREFERRED_AP_NUM     5
+typedef struct wlc_fastssidinfo {
+	uint32				SSID_channel[MAX_PREFERRED_AP_NUM];
+	wlc_ssid_t		SSID_info[MAX_PREFERRED_AP_NUM];
+} wlc_fastssidinfo_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wnm_url {
+	uint8   len;
+	uint8   data[1];
+} BWL_POST_PACKED_STRUCT wnm_url_t;
+
+typedef struct chan_scandata {
+	uint8		txpower;
+	uint8		pad;
+	chanspec_t	channel;	/* Channel num, bw, ctrl_sb and band */
+	uint32		channel_mintime;
+	uint32		channel_maxtime;
+} chan_scandata_t;
+
+typedef enum wl_scan_type {
+	EXTDSCAN_FOREGROUND_SCAN,
+	EXTDSCAN_BACKGROUND_SCAN,
+	EXTDSCAN_FORCEDBACKGROUND_SCAN
+} wl_scan_type_t;
+
+#define WLC_EXTDSCAN_MAX_SSID		5
+
+typedef struct wl_extdscan_params {
+	int8 		nprobes;		/* 0, passive, otherwise active */
+	int8    	split_scan;		/* split scan */
+	int8		band;			/* band */
+	int8		pad;
+	wlc_ssid_t 	ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */
+	uint32		tx_rate;		/* in 500ksec units */
+	wl_scan_type_t	scan_type;		/* enum */
+	int32 		channel_num;
+	chan_scandata_t channel_list[1];	/* list of chandata structs */
+} wl_extdscan_params_t;
+
+#define WL_EXTDSCAN_PARAMS_FIXED_SIZE 	(sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+
+typedef struct wl_scan_params {
+	wlc_ssid_t ssid;		/* default: {0, ""} */
+	struct ether_addr bssid;	/* default: bcast */
+	int8 bss_type;			/* default: any,
+					 * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+					 */
+	uint8 scan_type;		/* flags, 0 use default */
+	int32 nprobes;			/* -1 use default, number of probes per channel */
+	int32 active_time;		/* -1 use default, dwell time per channel for
+					 * active scanning
+					 */
+	int32 passive_time;		/* -1 use default, dwell time per channel
+					 * for passive scanning
+					 */
+	int32 home_time;		/* -1 use default, dwell time for the home channel
+					 * between channel scans
+					 */
+	int32 channel_num;		/* count of channels and ssids that follow
+					 *
+					 * low half is count of channels in channel_list, 0
+					 * means default (use all available channels)
+					 *
+					 * high half is entries in wlc_ssid_t array that
+					 * follows channel_list, aligned for int32 (4 bytes)
+					 * meaning an odd channel count implies a 2-byte pad
+					 * between end of channel_list and first ssid
+					 *
+					 * if ssid count is zero, single ssid in the fixed
+					 * parameter portion is assumed, otherwise ssid in
+					 * the fixed portion is ignored
+					 */
+	uint16 channel_list[1];		/* list of chanspecs */
+} wl_scan_params_t;
+
+/* size of wl_scan_params not including variable length array */
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+
+#define ISCAN_REQ_VERSION 1
+
+/* incremental scan struct */
+typedef struct wl_iscan_params {
+	uint32 version;
+	uint16 action;
+	uint16 scan_duration;
+	wl_scan_params_t params;
+} wl_iscan_params_t;
+
+/* 3 fields + size of wl_scan_params, not including variable length array */
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_scan_results {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
+
+/* size of wl_scan_results not including variable length array */
+#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
+
+/* Used in EXT_STA */
+#define DNGL_RXCTXT_SIZE	45
+
+
+#define ESCAN_REQ_VERSION 1
+
+typedef struct wl_escan_params {
+	uint32 version;
+	uint16 action;
+	uint16 sync_id;
+	wl_scan_params_t params;
+} wl_escan_params_t;
+
+#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+
+typedef struct wl_escan_result {
+	uint32 buflen;
+	uint32 version;
+	uint16 sync_id;
+	uint16 bss_count;
+	wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+
+/* incremental scan results struct */
+typedef struct wl_iscan_results {
+	uint32 status;
+	wl_scan_results_t results;
+} wl_iscan_results_t;
+
+/* size of wl_iscan_results not including variable length array */
+#define WL_ISCAN_RESULTS_FIXED_SIZE \
+	(WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+
+#define SCANOL_PARAMS_VERSION	1
+
+typedef struct scanol_params {
+	uint32 version;
+	uint32 flags;	/* offload scanning flags */
+	int32 active_time;	/* -1 use default, dwell time per channel for active scanning */
+	int32 passive_time;	/* -1 use default, dwell time per channel for passive scanning */
+	int32 idle_rest_time;	/* -1 use default, time idle between scan cycle */
+	int32 idle_rest_time_multiplier;
+	int32 active_rest_time;
+	int32 active_rest_time_multiplier;
+	int32 scan_cycle_idle_rest_time;
+	int32 scan_cycle_idle_rest_multiplier;
+	int32 scan_cycle_active_rest_time;
+	int32 scan_cycle_active_rest_multiplier;
+	int32 max_rest_time;
+	int32 max_scan_cycles;
+	int32 nprobes;		/* -1 use default, number of probes per channel */
+	int32 scan_start_delay;
+	uint32 nchannels;
+	uint32 ssid_count;
+	wlc_ssid_t ssidlist[1];
+} scanol_params_t;
+
+typedef struct wl_probe_params {
+	wlc_ssid_t ssid;
+	struct ether_addr bssid;
+	struct ether_addr mac;
+} wl_probe_params_t;
+
+#define WL_MAXRATES_IN_SET		16	/* max # of rates in a rateset */
+typedef struct wl_rateset {
+	uint32	count;			/* # rates in this set */
+	uint8	rates[WL_MAXRATES_IN_SET];	/* rates in 500kbps units w/hi bit set if basic */
+} wl_rateset_t;
+
+typedef struct wl_rateset_args {
+	uint32	count;			/* # rates in this set */
+	uint8	rates[WL_MAXRATES_IN_SET];	/* rates in 500kbps units w/hi bit set if basic */
+	uint8   mcs[MCSSET_LEN];        /* supported mcs index bit map */
+	uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
+} wl_rateset_args_t;
+
+#define TXBF_RATE_MCS_ALL		4
+#define TXBF_RATE_VHT_ALL		4
+#define TXBF_RATE_OFDM_ALL		8
+
+typedef struct wl_txbf_rateset {
+	uint8	txbf_rate_mcs[TXBF_RATE_MCS_ALL];	/* one for each stream */
+	uint8	txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL];	/* one for each stream */
+	uint16	txbf_rate_vht[TXBF_RATE_VHT_ALL];	/* one for each stream */
+	uint16	txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL];	/* one for each stream */
+	uint8	txbf_rate_ofdm[TXBF_RATE_OFDM_ALL];	/* bitmap of ofdm rates that enables txbf */
+	uint8	txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */
+	uint8	txbf_rate_ofdm_cnt;
+	uint8	txbf_rate_ofdm_cnt_bcm;
+} wl_txbf_rateset_t;
+
+#define OFDM_RATE_MASK			0x0000007f
+typedef uint8 ofdm_rates_t;
+
+typedef struct wl_rates_info {
+	wl_rateset_t rs_tgt;
+	uint32 phy_type;
+	int32 bandtype;
+	uint8 cck_only;
+	uint8 rate_mask;
+	uint8 mcsallow;
+	uint8 bw;
+	uint8 txstreams;
+} wl_rates_info_t;
+
+/* uint32 list */
+typedef struct wl_uint32_list {
+	/* in - # of elements, out - # of entries */
+	uint32 count;
+	/* variable length uint32 list */
+	uint32 element[1];
+} wl_uint32_list_t;
+
+/* used for association with a specific BSSID and chanspec list */
+typedef struct wl_assoc_params {
+	struct ether_addr bssid;	/* 00:00:00:00:00:00: broadcast scan */
+	uint16 bssid_cnt;		/* 0: use chanspec_num, and the single bssid,
+					* otherwise count of chanspecs in chanspec_list
+					* AND paired bssids following chanspec_list
+					* also, chanspec_num has to be set to zero
+					* for bssid list to be used
+					*/
+	int32 chanspec_num;		/* 0: all available channels,
+					* otherwise count of chanspecs in chanspec_list
+					*/
+	chanspec_t chanspec_list[1];	/* list of chanspecs */
+} wl_assoc_params_t;
+
+#define WL_ASSOC_PARAMS_FIXED_SIZE 	OFFSETOF(wl_assoc_params_t, chanspec_list)
+
+/* used for reassociation/roam to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_reassoc_params_t;
+#define WL_REASSOC_PARAMS_FIXED_SIZE	WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for association to a specific BSSID and channel */
+typedef wl_assoc_params_t wl_join_assoc_params_t;
+#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE	WL_ASSOC_PARAMS_FIXED_SIZE
+
+/* used for join with or without a specific bssid and channel list */
+typedef struct wl_join_params {
+	wlc_ssid_t ssid;
+	wl_assoc_params_t params;	/* optional field, but it must include the fixed portion
+					 * of the wl_assoc_params_t struct when it does present.
+					 */
+} wl_join_params_t;
+
+#define WL_JOIN_PARAMS_FIXED_SIZE 	(OFFSETOF(wl_join_params_t, params) + \
+					 WL_ASSOC_PARAMS_FIXED_SIZE)
+/* scan params for extended join */
+typedef struct wl_join_scan_params {
+	uint8 scan_type;		/* 0 use default, active or passive scan */
+	int32 nprobes;			/* -1 use default, number of probes per channel */
+	int32 active_time;		/* -1 use default, dwell time per channel for
+					 * active scanning
+					 */
+	int32 passive_time;		/* -1 use default, dwell time per channel
+					 * for passive scanning
+					 */
+	int32 home_time;		/* -1 use default, dwell time for the home channel
+					 * between channel scans
+					 */
+} wl_join_scan_params_t;
+
+/* extended join params */
+typedef struct wl_extjoin_params {
+	wlc_ssid_t ssid;		/* {0, ""}: wildcard scan */
+	wl_join_scan_params_t scan;
+	wl_join_assoc_params_t assoc;	/* optional field, but it must include the fixed portion
+					 * of the wl_join_assoc_params_t struct when it does
+					 * present.
+					 */
+} wl_extjoin_params_t;
+#define WL_EXTJOIN_PARAMS_FIXED_SIZE 	(OFFSETOF(wl_extjoin_params_t, assoc) + \
+					 WL_JOIN_ASSOC_PARAMS_FIXED_SIZE)
+
+#define ANT_SELCFG_MAX		4	/* max number of antenna configurations */
+#define MAX_STREAMS_SUPPORTED	4	/* max number of streams supported */
+typedef struct {
+	uint8 ant_config[ANT_SELCFG_MAX];	/* antenna configuration */
+	uint8 num_antcfg;	/* number of available antenna configurations */
+} wlc_antselcfg_t;
+
+typedef struct {
+	uint32 duration;	/* millisecs spent sampling this channel */
+	uint32 congest_ibss;	/* millisecs in our bss (presumably this traffic will */
+				/*  move if cur bss moves channels) */
+	uint32 congest_obss;	/* traffic not in our bss */
+	uint32 interference;	/* millisecs detecting a non 802.11 interferer. */
+	uint32 timestamp;	/* second timestamp */
+} cca_congest_t;
+
+typedef struct {
+	chanspec_t chanspec;	/* Which channel? */
+	uint8 num_secs;		/* How many secs worth of data */
+	cca_congest_t  secs[1];	/* Data */
+} cca_congest_channel_req_t;
+
+/* interference sources */
+enum interference_source {
+	ITFR_NONE = 0,		/* interference */
+	ITFR_PHONE,		/* wireless phone */
+	ITFR_VIDEO_CAMERA,	/* wireless video camera */
+	ITFR_MICROWAVE_OVEN,	/* microwave oven */
+	ITFR_BABY_MONITOR,	/* wireless baby monitor */
+	ITFR_BLUETOOTH,		/* bluetooth */
+	ITFR_VIDEO_CAMERA_OR_BABY_MONITOR,	/* wireless camera or baby monitor */
+	ITFR_BLUETOOTH_OR_BABY_MONITOR,	/* bluetooth or baby monitor */
+	ITFR_VIDEO_CAMERA_OR_PHONE,	/* video camera or phone */
+	ITFR_UNIDENTIFIED	/* interference from unidentified source */
+};
+
+/* structure for interference source report */
+typedef struct {
+	uint32 flags;	/* flags.  bit definitions below */
+	uint32 source;	/* last detected interference source */
+	uint32 timestamp;	/* second timestamp on interferenced flag change */
+} interference_source_rep_t;
+
+#define WLC_CNTRY_BUF_SZ	4		/* Country string is 3 bytes + NUL */
+
+
+typedef struct wl_country {
+	char country_abbrev[WLC_CNTRY_BUF_SZ];	/* nul-terminated country code used in
+						 * the Country IE
+						 */
+	int32 rev;				/* revision specifier for ccode
+						 * on set, -1 indicates unspecified.
+						 * on get, rev >= 0
+						 */
+	char ccode[WLC_CNTRY_BUF_SZ];		/* nul-terminated built-in country code.
+						 * variable length, but fixed size in
+						 * struct allows simple allocation for
+						 * expected country strings <= 3 chars.
+						 */
+} wl_country_t;
+
+typedef struct wl_channels_in_country {
+	uint32 buflen;
+	uint32 band;
+	char country_abbrev[WLC_CNTRY_BUF_SZ];
+	uint32 count;
+	uint32 channel[1];
+} wl_channels_in_country_t;
+
+typedef struct wl_country_list {
+	uint32 buflen;
+	uint32 band_set;
+	uint32 band;
+	uint32 count;
+	char country_abbrev[1];
+} wl_country_list_t;
+
+typedef struct wl_rm_req_elt {
+	int8	type;
+	int8	flags;
+	chanspec_t	chanspec;
+	uint32	token;		/* token for this measurement */
+	uint32	tsf_h;		/* TSF high 32-bits of Measurement start time */
+	uint32	tsf_l;		/* TSF low 32-bits */
+	uint32	dur;		/* TUs */
+} wl_rm_req_elt_t;
+
+typedef struct wl_rm_req {
+	uint32	token;		/* overall measurement set token */
+	uint32	count;		/* number of measurement requests */
+	void	*cb;		/* completion callback function: may be NULL */
+	void	*cb_arg;	/* arg to completion callback function */
+	wl_rm_req_elt_t	req[1];	/* variable length block of requests */
+} wl_rm_req_t;
+#define WL_RM_REQ_FIXED_LEN	OFFSETOF(wl_rm_req_t, req)
+
+typedef struct wl_rm_rep_elt {
+	int8	type;
+	int8	flags;
+	chanspec_t	chanspec;
+	uint32	token;		/* token for this measurement */
+	uint32	tsf_h;		/* TSF high 32-bits of Measurement start time */
+	uint32	tsf_l;		/* TSF low 32-bits */
+	uint32	dur;		/* TUs */
+	uint32	len;		/* byte length of data block */
+	uint8	data[1];	/* variable length data block */
+} wl_rm_rep_elt_t;
+#define WL_RM_REP_ELT_FIXED_LEN	24	/* length excluding data block */
+
+#define WL_RPI_REP_BIN_NUM 8
+typedef struct wl_rm_rpi_rep {
+	uint8	rpi[WL_RPI_REP_BIN_NUM];
+	int8	rpi_max[WL_RPI_REP_BIN_NUM];
+} wl_rm_rpi_rep_t;
+
+typedef struct wl_rm_rep {
+	uint32	token;		/* overall measurement set token */
+	uint32	len;		/* length of measurement report block */
+	wl_rm_rep_elt_t	rep[1];	/* variable length block of reports */
+} wl_rm_rep_t;
+#define WL_RM_REP_FIXED_LEN	8
+
+
+typedef enum sup_auth_status {
+	/* Basic supplicant authentication states */
+	WLC_SUP_DISCONNECTED = 0,
+	WLC_SUP_CONNECTING,
+	WLC_SUP_IDREQUIRED,
+	WLC_SUP_AUTHENTICATING,
+	WLC_SUP_AUTHENTICATED,
+	WLC_SUP_KEYXCHANGE,
+	WLC_SUP_KEYED,
+	WLC_SUP_TIMEOUT,
+	WLC_SUP_LAST_BASIC_STATE,
+
+	/* Extended supplicant authentication states */
+	/* Waiting to receive handshake msg M1 */
+	WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
+	/* Preparing to send handshake msg M2 */
+	WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
+	/* Waiting to receive handshake msg M3 */
+	WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
+	WLC_SUP_KEYXCHANGE_PREP_M4,	/* Preparing to send handshake msg M4 */
+	WLC_SUP_KEYXCHANGE_WAIT_G1,	/* Waiting to receive handshake msg G1 */
+	WLC_SUP_KEYXCHANGE_PREP_G2	/* Preparing to send handshake msg G2 */
+} sup_auth_status_t;
+
+typedef struct wl_wsec_key {
+	uint32		index;		/* key index */
+	uint32		len;		/* key length */
+	uint8		data[DOT11_MAX_KEY_SIZE];	/* key data */
+	uint32		pad_1[18];
+	uint32		algo;		/* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+	uint32		flags;		/* misc flags */
+	uint32		pad_2[2];
+	int		pad_3;
+	int		iv_initialized;	/* has IV been initialized already? */
+	int		pad_4;
+	/* Rx IV */
+	struct {
+		uint32	hi;		/* upper 32 bits of IV */
+		uint16	lo;		/* lower 16 bits of IV */
+	} rxiv;
+	uint32		pad_5[2];
+	struct ether_addr ea;		/* per station */
+} wl_wsec_key_t;
+
+#define WSEC_MIN_PSK_LEN	8
+#define WSEC_MAX_PSK_LEN	64
+
+/* Flag for key material needing passhash'ing */
+#define WSEC_PASSPHRASE		(1<<0)
+
+/* receptacle for WLC_SET_WSEC_PMK parameter */
+typedef struct {
+	ushort	key_len;		/* octets in key material */
+	ushort	flags;			/* key handling qualification */
+	uint8	key[WSEC_MAX_PSK_LEN];	/* PMK material */
+} wsec_pmk_t;
+
+typedef struct _pmkid {
+	struct ether_addr	BSSID;
+	uint8			PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+	uint32	npmkid;
+	pmkid_t	pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+	struct ether_addr	BSSID;
+	uint8			preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+	uint32	npmkid_cand;
+	pmkid_cand_t	pmkid_cand[1];
+} pmkid_cand_list_t;
+
+#define WL_STA_ANT_MAX		4	/* max possible rx antennas */
+
+typedef struct wl_assoc_info {
+	uint32		req_len;
+	uint32		resp_len;
+	uint32		flags;
+	struct dot11_assoc_req req;
+	struct ether_addr reassoc_bssid; /* used in reassoc's */
+	struct dot11_assoc_resp resp;
+} wl_assoc_info_t;
+
+typedef struct wl_led_info {
+	uint32      index;      /* led index */
+	uint32      behavior;
+	uint8       activehi;
+} wl_led_info_t;
+
+
+/* srom read/write struct passed through ioctl */
+typedef struct {
+	uint	byteoff;	/* byte offset */
+	uint	nbytes;		/* number of bytes */
+	uint16	buf[1];
+} srom_rw_t;
+
+/* similar cis (srom or otp) struct [iovar: may not be aligned] */
+typedef struct {
+	uint32	source;		/* cis source */
+	uint32	byteoff;	/* byte offset */
+	uint32	nbytes;		/* number of bytes */
+	/* data follows here */
+} cis_rw_t;
+
+/* R_REG and W_REG struct passed through ioctl */
+typedef struct {
+	uint32	byteoff;	/* byte offset of the field in d11regs_t */
+	uint32	val;		/* read/write value of the field */
+	uint32	size;		/* sizeof the field */
+	uint	band;		/* band (optional) */
+} rw_reg_t;
+
+/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */
+/* PCL - Power Control Loop */
+typedef struct {
+	uint16	auto_ctrl;	/* WL_ATTEN_XX */
+	uint16	bb;		/* Baseband attenuation */
+	uint16	radio;		/* Radio attenuation */
+	uint16	txctl1;		/* Radio TX_CTL1 value */
+} atten_t;
+
+/* Per-AC retry parameters */
+struct wme_tx_params_s {
+	uint8  short_retry;
+	uint8  short_fallback;
+	uint8  long_retry;
+	uint8  long_fallback;
+	uint16 max_rate;  /* In units of 512 Kbps */
+};
+
+typedef struct wme_tx_params_s wme_tx_params_t;
+
+#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
+
+typedef struct wl_plc_nodelist {
+	uint32 count;			/* Number of nodes */
+	struct _node {
+		struct ether_addr ea;	/* Node ether address */
+		uint32 node_type;	/* Node type */
+		uint32 cost;		/* PLC affinity */
+	} node[1];
+} wl_plc_nodelist_t;
+
+typedef struct wl_plc_params {
+	uint32	cmd;			/* Command */
+	uint8	plc_failover;		/* PLC failover control/status */
+	struct	ether_addr node_ea;	/* Node ether address */
+	uint32	cost;			/* Link cost or mac cost */
+} wl_plc_params_t;
+
+/* Used to get specific link/ac parameters */
+typedef struct {
+	int32 ac;
+	uint8 val;
+	struct ether_addr ea;
+} link_val_t;
+
+
+
+typedef struct {
+	uint16			ver;		/* version of this struct */
+	uint16			len;		/* length in bytes of this structure */
+	uint16			cap;		/* sta's advertised capabilities */
+	uint32			flags;		/* flags defined below */
+	uint32			idle;		/* time since data pkt rx'd from sta */
+	struct ether_addr	ea;		/* Station address */
+	wl_rateset_t		rateset;	/* rateset in use */
+	uint32			in;		/* seconds elapsed since associated */
+	uint32			listen_interval_inms; /* Min Listen interval in ms for this STA */
+	uint32			tx_pkts;	/* # of packets transmitted */
+	uint32			tx_failures;	/* # of packets failed */
+	uint32			rx_ucast_pkts;	/* # of unicast packets received */
+	uint32			rx_mcast_pkts;	/* # of multicast packets received */
+	uint32			tx_rate;	/* Rate of last successful tx frame */
+	uint32			rx_rate;	/* Rate of last successful rx frame */
+	uint32			rx_decrypt_succeeds;	/* # of packet decrypted successfully */
+	uint32			rx_decrypt_failures;	/* # of packet decrypted unsuccessfully */
+	uint32			tx_tot_pkts;	/* # of tx pkts (ucast + mcast) */
+	uint32			rx_tot_pkts;	/* # of data packets recvd (uni + mcast) */
+	uint32			tx_mcast_pkts;	/* # of mcast pkts txed */
+	uint64			tx_tot_bytes;	/* data bytes txed (ucast + mcast) */
+	uint64			rx_tot_bytes;	/* data bytes recvd (ucast + mcast) */
+	uint64			tx_ucast_bytes;	/* data bytes txed (ucast) */
+	uint64			tx_mcast_bytes;	/* # data bytes txed (mcast) */
+	uint64			rx_ucast_bytes;	/* data bytes recvd (ucast) */
+	uint64			rx_mcast_bytes;	/* data bytes recvd (mcast) */
+	int8			rssi[WL_STA_ANT_MAX]; /* average rssi per antenna
+										   * of data frames
+										   */
+	int8			nf[WL_STA_ANT_MAX];	/* per antenna noise floor */
+	uint16			aid;		/* association ID */
+	uint16			ht_capabilities;	/* advertised ht caps */
+	uint16			vht_flags;		/* converted vht flags */
+	uint32			tx_pkts_retried; /* # of frames where a retry was necessary */
+	uint32			tx_pkts_retry_exhausted; /* # of frames where a retry was
+											* exhausted
+											*/
+	int8			rx_lastpkt_rssi[WL_STA_ANT_MAX]; /* Per antenna RSSI of last
+								* received data frame.
+								*/
+} sta_info_t;
+
+#define WL_OLD_STAINFO_SIZE	OFFSETOF(sta_info_t, tx_tot_pkts)
+
+#define WL_STA_VER		4
+
+#define	WLC_NUMRATES	16	/* max # of rates in a rateset */
+
+typedef struct wlc_rateset {
+	uint32	count;			/* number of rates in rates[] */
+	uint8	rates[WLC_NUMRATES];	/* rates in 500kbps units w/hi bit set if basic */
+	uint8	htphy_membership;	/* HT PHY Membership */
+	uint8	mcs[MCSSET_LEN];	/* supported mcs index bit map */
+	uint16  vht_mcsmap;		/* supported vht mcs nss bit map */
+} wlc_rateset_t;
+
+/* Used to get specific STA parameters */
+typedef struct {
+	uint32	val;
+	struct ether_addr ea;
+} scb_val_t;
+
+/* Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */
+typedef struct {
+	uint32 code;
+	scb_val_t ioctl_args;
+} authops_t;
+
+/* channel encoding */
+typedef struct channel_info {
+	int hw_channel;
+	int target_channel;
+	int scan_channel;
+} channel_info_t;
+
+/* For ioctls that take a list of MAC addresses */
+struct maclist {
+	uint count;			/* number of MAC addresses */
+	struct ether_addr ea[1];	/* variable length array of MAC addresses */
+};
+
+/* get pkt count struct passed through ioctl */
+typedef struct get_pktcnt {
+	uint rx_good_pkt;
+	uint rx_bad_pkt;
+	uint tx_good_pkt;
+	uint tx_bad_pkt;
+	uint rx_ocast_good_pkt; /* unicast packets destined for others */
+} get_pktcnt_t;
+
+/* NINTENDO2 */
+#define LQ_IDX_MIN              0
+#define LQ_IDX_MAX              1
+#define LQ_IDX_AVG              2
+#define LQ_IDX_SUM              2
+#define LQ_IDX_LAST             3
+#define LQ_STOP_MONITOR         0
+#define LQ_START_MONITOR        1
+
+/* Get averages RSSI, Rx PHY rate and SNR values */
+typedef struct {
+	int rssi[LQ_IDX_LAST];  /* Array to keep min, max, avg rssi */
+	int snr[LQ_IDX_LAST];   /* Array to keep min, max, avg snr */
+	int isvalid;            /* Flag indicating whether above data is valid */
+} wl_lq_t; /* Link Quality */
+
+typedef enum wl_wakeup_reason_type {
+	LCD_ON = 1,
+	LCD_OFF,
+	DRC1_WAKE,
+	DRC2_WAKE,
+	REASON_LAST
+} wl_wr_type_t;
+
+typedef struct {
+/* Unique filter id */
+	uint32	id;
+
+/* stores the reason for the last wake up */
+	uint8	reason;
+} wl_wr_t;
+
+/* Get MAC specific rate histogram command */
+typedef struct {
+	struct	ether_addr ea;	/* MAC Address */
+	uint8	ac_cat;	/* Access Category */
+	uint8	num_pkts;	/* Number of packet entries to be averaged */
+} wl_mac_ratehisto_cmd_t;	/* MAC Specific Rate Histogram command */
+
+/* Get MAC rate histogram response */
+typedef struct {
+	uint32	rate[DOT11_RATE_MAX + 1];	/* Rates */
+	uint32	mcs[WL_RATESET_SZ_HT_MCS * WL_TX_CHAINS_MAX];	/* MCS counts */
+	uint32	vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX];	/* VHT counts */
+	uint32	tsf_timer[2][2];	/* Start and End time for 8bytes value */
+} wl_mac_ratehisto_res_t;	/* MAC Specific Rate Histogram Response */
+
+/* Linux network driver ioctl encoding */
+typedef struct wl_ioctl {
+	uint cmd;	/* common ioctl definition */
+	void *buf;	/* pointer to user buffer */
+	uint len;	/* length of user buffer */
+	uint8 set;		/* 1=set IOCTL; 0=query IOCTL */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+} wl_ioctl_t;
+#ifdef CONFIG_COMPAT
+typedef struct compat_wl_ioctl {
+	uint cmd;	/* common ioctl definition */
+	compat_uptr_t buf;	/* pointer to user buffer (compat buffer)*/
+	uint len;	/* length of user buffer */
+	uint8 set;		/* 1=set IOCTL; 0=query IOCTL */
+	uint used;	/* bytes read or written (optional) */
+	uint needed;	/* bytes needed (optional) */
+} compat_wl_ioctl_t;
+#endif /* CONFIG_COMPAT */
+
+
+/*
+ * Structure for passing hardware and software
+ * revision info up from the driver.
+ */
+typedef struct wlc_rev_info {
+	uint		vendorid;	/* PCI vendor id */
+	uint		deviceid;	/* device id of chip */
+	uint		radiorev;	/* radio revision */
+	uint		chiprev;	/* chip revision */
+	uint		corerev;	/* core revision */
+	uint		boardid;	/* board identifier (usu. PCI sub-device id) */
+	uint		boardvendor;	/* board vendor (usu. PCI sub-vendor id) */
+	uint		boardrev;	/* board revision */
+	uint		driverrev;	/* driver version */
+	uint		ucoderev;	/* microcode version */
+	uint		bus;		/* bus type */
+	uint		chipnum;	/* chip number */
+	uint		phytype;	/* phy type */
+	uint		phyrev;		/* phy revision */
+	uint		anarev;		/* anacore rev */
+	uint		chippkg;	/* chip package info */
+	uint		nvramrev;	/* nvram revision number */
+} wlc_rev_info_t;
+
+#define WL_REV_INFO_LEGACY_LENGTH	48
+
+#define WL_BRAND_MAX 10
+typedef struct wl_instance_info {
+	uint instance;
+	char brand[WL_BRAND_MAX];
+} wl_instance_info_t;
+
+/* structure to change size of tx fifo */
+typedef struct wl_txfifo_sz {
+	uint16	magic;
+	uint16	fifo;
+	uint16	size;
+} wl_txfifo_sz_t;
+
+/* Transfer info about an IOVar from the driver */
+/* Max supported IOV name size in bytes, + 1 for nul termination */
+#define WLC_IOV_NAME_LEN 30
+typedef struct wlc_iov_trx_s {
+	uint8 module;
+	uint8 type;
+	char name[WLC_IOV_NAME_LEN];
+} wlc_iov_trx_t;
+
+/* bump this number if you change the ioctl interface */
+#define WLC_IOCTL_VERSION	2
+#define WLC_IOCTL_VERSION_LEGACY_IOTYPES	1
+
+#ifdef CONFIG_USBRNDIS_RETAIL
+/* struct passed in for WLC_NDCONFIG_ITEM */
+typedef struct {
+	char *name;
+	void *param;
+} ndconfig_item_t;
+#endif
+
+
+#define WL_PHY_PAVARS_LEN	32	/* Phy type, Band range, chain, a1[0], b0[0], b1[0] ... */
+
+#define WL_PHY_PAVAR_VER	1	/* pavars version */
+#define WL_PHY_PAVARS2_NUM	3	/* a1, b0, b1 */
+typedef struct wl_pavars2 {
+	uint16 ver;		/* version of this struct */
+	uint16 len;		/* len of this structure */
+	uint16 inuse;		/* driver return 1 for a1,b0,b1 in current band range */
+	uint16 phy_type;	/* phy type */
+	uint16 bandrange;
+	uint16 chain;
+	uint16 inpa[WL_PHY_PAVARS2_NUM];	/* phy pavars for one band range */
+} wl_pavars2_t;
+
+typedef struct wl_po {
+	uint16	phy_type;	/* Phy type */
+	uint16	band;
+	uint16	cckpo;
+	uint32	ofdmpo;
+	uint16	mcspo[8];
+} wl_po_t;
+
+#define WL_NUM_RPCALVARS 5	/* number of rpcal vars */
+
+typedef struct wl_rpcal {
+	uint16 value;
+	uint16 update;
+} wl_rpcal_t;
+
+typedef struct wl_aci_args {
+	int enter_aci_thresh; /* Trigger level to start detecting ACI */
+	int exit_aci_thresh; /* Trigger level to exit ACI mode */
+	int usec_spin; /* microsecs to delay between rssi samples */
+	int glitch_delay; /* interval between ACI scans when glitch count is consistently high */
+	uint16 nphy_adcpwr_enter_thresh;	/* ADC power to enter ACI mitigation mode */
+	uint16 nphy_adcpwr_exit_thresh;	/* ADC power to exit ACI mitigation mode */
+	uint16 nphy_repeat_ctr;		/* Number of tries per channel to compute power */
+	uint16 nphy_num_samples;	/* Number of samples to compute power on one channel */
+	uint16 nphy_undetect_window_sz;	/* num of undetects to exit ACI Mitigation mode */
+	uint16 nphy_b_energy_lo_aci;	/* low ACI power energy threshold for bphy */
+	uint16 nphy_b_energy_md_aci;	/* mid ACI power energy threshold for bphy */
+	uint16 nphy_b_energy_hi_aci;	/* high ACI power energy threshold for bphy */
+	uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */
+	uint16 nphy_noise_noassoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_glitch_th_up;
+	uint16 nphy_noise_assoc_glitch_th_dn;
+	uint16 nphy_noise_assoc_aci_glitch_th_up;
+	uint16 nphy_noise_assoc_aci_glitch_th_dn;
+	uint16 nphy_noise_assoc_enter_th;
+	uint16 nphy_noise_noassoc_enter_th;
+	uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th;
+	uint16 nphy_noise_noassoc_crsidx_incr;
+	uint16 nphy_noise_assoc_crsidx_incr;
+	uint16 nphy_noise_crsidx_decr;
+} wl_aci_args_t;
+
+#define WL_ACI_ARGS_LEGACY_LENGTH	16	/* bytes of pre NPHY aci args */
+#define	WL_SAMPLECOLLECT_T_VERSION	2	/* version of wl_samplecollect_args_t struct */
+typedef struct wl_samplecollect_args {
+	/* version 0 fields */
+	uint8 coll_us;
+	int cores;
+	/* add'l version 1 fields */
+	uint16 version;     /* see definition of WL_SAMPLECOLLECT_T_VERSION */
+	uint16 length;      /* length of entire structure */
+	int8 trigger;
+	uint16 timeout;
+	uint16 mode;
+	uint32 pre_dur;
+	uint32 post_dur;
+	uint8 gpio_sel;
+	uint8 downsamp;
+	uint8 be_deaf;
+	uint8 agc;		/* loop from init gain and going down */
+	uint8 filter;		/* override high pass corners to lowest */
+	/* add'l version 2 fields */
+	uint8 trigger_state;
+	uint8 module_sel1;
+	uint8 module_sel2;
+	uint16 nsamps;
+	int bitStart;
+	uint32 gpioCapMask;
+} wl_samplecollect_args_t;
+
+#define	WL_SAMPLEDATA_T_VERSION		1	/* version of wl_samplecollect_args_t struct */
+/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */
+#define	WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
+
+typedef struct wl_sampledata {
+	uint16 version;	/* structure version */
+	uint16 size;	/* size of structure */
+	uint16 tag;	/* Header/Data */
+	uint16 length;	/* data length */
+	uint32 flag;	/* bit def */
+} wl_sampledata_t;
+
+
+/* WL_OTA START */
+/* OTA Test Status */
+enum {
+	WL_OTA_TEST_IDLE = 0,	/* Default Idle state */
+	WL_OTA_TEST_ACTIVE = 1,	/* Test Running */
+	WL_OTA_TEST_SUCCESS = 2,	/* Successfully Finished Test */
+	WL_OTA_TEST_FAIL = 3	/* Test Failed in the Middle */
+};
+/* OTA SYNC Status */
+enum {
+	WL_OTA_SYNC_IDLE = 0,	/* Idle state */
+	WL_OTA_SYNC_ACTIVE = 1,	/* Waiting for Sync */
+	WL_OTA_SYNC_FAIL = 2	/* Sync pkt not recieved */
+};
+
+/* Various error states dut can get stuck during test */
+enum {
+	WL_OTA_SKIP_TEST_CAL_FAIL = 1,		/* Phy calibration failed */
+	WL_OTA_SKIP_TEST_SYNCH_FAIL = 2,		/* Sync Packet not recieved */
+	WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3,	/* Cmd flow file download failed */
+	WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4,	/* No test found in Flow file */
+	WL_OTA_SKIP_TEST_WL_NOT_UP = 5,		/* WL UP failed */
+	WL_OTA_SKIP_TEST_UNKNOWN_CALL		/* Unintentional scheduling on ota test */
+};
+
+/* Differentiator for ota_tx and ota_rx */
+enum {
+	WL_OTA_TEST_TX = 0,		/* ota_tx */
+	WL_OTA_TEST_RX = 1,		/* ota_rx */
+};
+
+/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */
+enum {
+	WL_OTA_TEST_BW_20_IN_40MHZ = 0,	/* 20 in 40 operation */
+	WL_OTA_TEST_BW_20MHZ = 1,		/* 20 Mhz operation */
+	WL_OTA_TEST_BW_40MHZ = 2		/* full 40Mhz operation */
+};
+typedef struct ota_rate_info {
+	uint8 rate_cnt;					/* Total number of rates */
+	uint8 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE];	/* array of rates from 1mbps to 130mbps */
+							/* for legacy rates : ratein mbps * 2 */
+							/* for HT rates : mcs index */
+} ota_rate_info_t;
+
+typedef struct ota_power_info {
+	int8 pwr_ctrl_on;	/* power control on/off */
+	int8 start_pwr;		/* starting power/index */
+	int8 delta_pwr;		/* delta power/index */
+	int8 end_pwr;		/* end power/index */
+} ota_power_info_t;
+
+typedef struct ota_packetengine {
+	uint16 delay;           /* Inter-packet delay */
+				/* for ota_tx, delay is tx ifs in micro seconds */
+				/* for ota_rx, delay is wait time in milliseconds */
+	uint16 nframes;         /* Number of frames */
+	uint16 length;          /* Packet length */
+} ota_packetengine_t;
+
+/* Test info vector */
+typedef struct wl_ota_test_args {
+	uint8 cur_test;			/* test phase */
+	uint8 chan;			/* channel */
+	uint8 bw;			/* bandwidth */
+	uint8 control_band;		/* control band */
+	uint8 stf_mode;			/* stf mode */
+	ota_rate_info_t rt_info;	/* Rate info */
+	ota_packetengine_t pkteng;	/* packeteng info */
+	uint8 txant;			/* tx antenna */
+	uint8 rxant;			/* rx antenna */
+	ota_power_info_t pwr_info;	/* power sweep info */
+	uint8 wait_for_sync;		/* wait for sync or not */
+} wl_ota_test_args_t;
+
+typedef struct wl_ota_test_vector {
+	wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ];	/* Test argument struct */
+	uint16 test_cnt;					/* Total no of test */
+	uint8 file_dwnld_valid;					/* File successfully downloaded */
+	uint8 sync_timeout;					/* sync packet timeout */
+	int8 sync_fail_action;					/* sync fail action */
+	struct ether_addr sync_mac;				/* macaddress for sync pkt */
+	struct ether_addr tx_mac;				/* macaddress for tx */
+	struct ether_addr rx_mac;				/* macaddress for rx */
+	int8 loop_test;					/* dbg feature to loop the test */
+} wl_ota_test_vector_t;
+
+
+/* struct copied back form dongle to host to query the status */
+typedef struct wl_ota_test_status {
+	int16 cur_test_cnt;		/* test phase */
+	int8 skip_test_reason;		/* skip test reasoin */
+	wl_ota_test_args_t test_arg;	/* cur test arg details */
+	uint16 test_cnt;		/* total no of test downloaded */
+	uint8 file_dwnld_valid;		/* file successfully downloaded ? */
+	uint8 sync_timeout;		/* sync timeout */
+	int8 sync_fail_action;		/* sync fail action */
+	struct ether_addr sync_mac;	/* macaddress for sync pkt */
+	struct ether_addr tx_mac;	/* tx mac address */
+	struct ether_addr rx_mac;	/* rx mac address */
+	uint8  test_stage;		/* check the test status */
+	int8 loop_test;		/* Debug feature to puts test enfine in a loop */
+	uint8 sync_status;		/* sync status */
+} wl_ota_test_status_t;
+
+/* WL_OTA END */
+
+/* wl_radar_args_t */
+typedef struct {
+	int npulses;	/* required number of pulses at n * t_int */
+	int ncontig;	/* required number of pulses at t_int */
+	int min_pw;	/* minimum pulse width (20 MHz clocks) */
+	int max_pw;	/* maximum pulse width (20 MHz clocks) */
+	uint16 thresh0;	/* Radar detection, thresh 0 */
+	uint16 thresh1;	/* Radar detection, thresh 1 */
+	uint16 blank;	/* Radar detection, blank control */
+	uint16 fmdemodcfg;	/* Radar detection, fmdemod config */
+	int npulses_lp;  /* Radar detection, minimum long pulses */
+	int min_pw_lp; /* Minimum pulsewidth for long pulses */
+	int max_pw_lp; /* Maximum pulsewidth for long pulses */
+	int min_fm_lp; /* Minimum fm for long pulses */
+	int max_span_lp;  /* Maximum deltat for long pulses */
+	int min_deltat; /* Minimum spacing between pulses */
+	int max_deltat; /* Maximum spacing between pulses */
+	uint16 autocorr;	/* Radar detection, autocorr on or off */
+	uint16 st_level_time;	/* Radar detection, start_timing level */
+	uint16 t2_min; /* minimum clocks needed to remain in state 2 */
+	uint32 version; /* version */
+	uint32 fra_pulse_err;	/* sample error margin for detecting French radar pulsed */
+	int npulses_fra;  /* Radar detection, minimum French pulses set */
+	int npulses_stg2;  /* Radar detection, minimum staggered-2 pulses set */
+	int npulses_stg3;  /* Radar detection, minimum staggered-3 pulses set */
+	uint16 percal_mask;	/* defines which period cal is masked from radar detection */
+	int quant;	/* quantization resolution to pulse positions */
+	uint32 min_burst_intv_lp;	/* minimum burst to burst interval for bin3 radar */
+	uint32 max_burst_intv_lp;	/* maximum burst to burst interval for bin3 radar */
+	int nskip_rst_lp;	/* number of skipped pulses before resetting lp buffer */
+	int max_pw_tol;	/* maximum tollerance allowed in detected pulse width for radar detection */
+	uint16 feature_mask; /* 16-bit mask to specify enabled features */
+} wl_radar_args_t;
+
+#define WL_RADAR_ARGS_VERSION 2
+
+typedef struct {
+	uint32 version; /* version */
+	uint16 thresh0_20_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */
+	uint16 thresh1_20_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */
+	uint16 thresh0_40_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */
+	uint16 thresh1_40_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */
+	uint16 thresh0_80_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */
+	uint16 thresh1_80_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */
+	uint16 thresh0_20_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */
+	uint16 thresh1_20_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */
+	uint16 thresh0_40_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */
+	uint16 thresh1_40_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */
+	uint16 thresh0_80_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */
+	uint16 thresh1_80_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */
+#ifdef WL11AC160
+	uint16 thresh0_160_lo;	/* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */
+	uint16 thresh1_160_lo;	/* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */
+	uint16 thresh0_160_hi;	/* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */
+	uint16 thresh1_160_hi;	/* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */
+#endif /* WL11AC160 */
+} wl_radar_thr_t;
+
+#define WL_RADAR_THR_VERSION	2
+
+/* RSSI per antenna */
+typedef struct {
+	uint32	version;		/* version field */
+	uint32	count;			/* number of valid antenna rssi */
+	int8 rssi_ant[WL_RSSI_ANT_MAX];	/* rssi per antenna */
+} wl_rssi_ant_t;
+
+/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */
+typedef struct {
+	uint state;		/* noted by WL_DFS_CACSTATE_XX. */
+	uint duration;		/* time spent in ms in state. */
+	/* as dfs enters ISM state, it removes the operational channel from quiet channel
+	 * list and notes the channel in channel_cleared. set to 0 if no channel is cleared
+	 */
+	chanspec_t chanspec_cleared;
+	/* chanspec cleared used to be a uint, add another to uint16 to maintain size */
+	uint16 pad;
+} wl_dfs_status_t;
+
+/* data structure used in 'radar_status' wl interface, which is use to query radar det status */
+typedef struct {
+	bool detected;
+	int count;
+	bool pretended;
+	uint32 radartype;
+	uint32 timenow;
+	uint32 timefromL;
+	int lp_csect_single;
+	int detected_pulse_index;
+	int nconsecq_pulses;
+	chanspec_t ch;
+	int pw[10];
+	int intv[10];
+	int fm[10];
+} wl_radar_status_t;
+
+#define NUM_PWRCTRL_RATES 12
+
+typedef struct {
+	uint8 txpwr_band_max[NUM_PWRCTRL_RATES];	/* User set target */
+	uint8 txpwr_limit[NUM_PWRCTRL_RATES];		/* reg and local power limit */
+	uint8 txpwr_local_max;				/* local max according to the AP */
+	uint8 txpwr_local_constraint;			/* local constraint according to the AP */
+	uint8 txpwr_chan_reg_max;			/* Regulatory max for this channel */
+	uint8 txpwr_target[2][NUM_PWRCTRL_RATES];	/* Latest target for 2.4 and 5 Ghz */
+	uint8 txpwr_est_Pout[2];			/* Latest estimate for 2.4 and 5 Ghz */
+	uint8 txpwr_opo[NUM_PWRCTRL_RATES];		/* On G phy, OFDM power offset */
+	uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES];	/* Max CCK power for this band (SROM) */
+	uint8 txpwr_bphy_ofdm_max;			/* Max OFDM power for this band (SROM) */
+	uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES];	/* Max power for A band (SROM) */
+	int8  txpwr_antgain[2];				/* Ant gain for each band - from SROM */
+	uint8 txpwr_est_Pout_gofdm;			/* Pwr estimate for 2.4 OFDM */
+} tx_power_legacy_t;
+
+#define WL_TX_POWER_RATES_LEGACY    45
+#define WL_TX_POWER_MCS20_FIRST         12
+#define WL_TX_POWER_MCS20_NUM           16
+#define WL_TX_POWER_MCS40_FIRST         28
+#define WL_TX_POWER_MCS40_NUM           17
+
+typedef struct {
+	uint32 flags;
+	chanspec_t chanspec;                 /* txpwr report for this channel */
+	chanspec_t local_chanspec;           /* channel on which we are associated */
+	uint8 local_max;                 /* local max according to the AP */
+	uint8 local_constraint;              /* local constraint according to the AP */
+	int8  antgain[2];                /* Ant gain for each band - from SROM */
+	uint8 rf_cores;                  /* count of RF Cores being reported */
+	uint8 est_Pout[4];                           /* Latest tx power out estimate per RF
+							  * chain without adjustment
+							  */
+	uint8 est_Pout_cck;                          /* Latest CCK tx power out estimate */
+	uint8 user_limit[WL_TX_POWER_RATES_LEGACY];  /* User limit */
+	uint8 reg_limit[WL_TX_POWER_RATES_LEGACY];   /* Regulatory power limit */
+	uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */
+	uint8 target[WL_TX_POWER_RATES_LEGACY];      /* Latest target power */
+} tx_power_legacy2_t;
+
+/* TX Power index defines */
+#define WL_NUM_RATES_CCK			4 /* 1, 2, 5.5, 11 Mbps */
+#define WL_NUM_RATES_OFDM			8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */
+#define WL_NUM_RATES_MCS_1STREAM	8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
+#define WL_NUM_RATES_EXTRA_VHT		2 /* Additional VHT 11AC rates */
+#define WL_NUM_RATES_VHT			10
+#define WL_NUM_RATES_MCS32			1
+
+#define WLC_NUM_RATES_CCK       WL_NUM_RATES_CCK
+#define WLC_NUM_RATES_OFDM      WL_NUM_RATES_OFDM
+#define WLC_NUM_RATES_MCS_1_STREAM  WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS_2_STREAM  WL_NUM_RATES_MCS_1STREAM
+#define WLC_NUM_RATES_MCS32     WL_NUM_RATES_MCS32
+#define WL_TX_POWER_CCK_NUM     WL_NUM_RATES_CCK
+#define WL_TX_POWER_OFDM_NUM        WL_NUM_RATES_OFDM
+#define WL_TX_POWER_MCS_1_STREAM_NUM    WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_2_STREAM_NUM    WL_NUM_RATES_MCS_1STREAM
+#define WL_TX_POWER_MCS_32_NUM      WL_NUM_RATES_MCS32
+
+#define WL_NUM_2x2_ELEMENTS		4
+#define WL_NUM_3x3_ELEMENTS		6
+
+typedef struct {
+	uint16 ver;				/* version of this struct */
+	uint16 len;				/* length in bytes of this structure */
+	uint32 flags;
+	chanspec_t chanspec;			/* txpwr report for this channel */
+	chanspec_t local_chanspec;		/* channel on which we are associated */
+	uint32     buflen;			/* ppr buffer length */
+	uint8      pprbuf[1];			/* Latest target power buffer */
+} wl_txppr_t;
+
+#define WL_TXPPR_VERSION	1
+#define WL_TXPPR_LENGTH	(sizeof(wl_txppr_t))
+#define TX_POWER_T_VERSION	44
+
+
+typedef struct tx_inst_power {
+	uint8 txpwr_est_Pout[2];			/* Latest estimate for 2.4 and 5 Ghz */
+	uint8 txpwr_est_Pout_gofdm;			/* Pwr estimate for 2.4 OFDM */
+} tx_inst_power_t;
+
+#define WL_NUM_TXCHAIN_MAX	4
+typedef struct wl_txchain_pwr_offsets {
+	int8 offset[WL_NUM_TXCHAIN_MAX];	/* quarter dBm signed offset for each chain */
+} wl_txchain_pwr_offsets_t;
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS		64
+
+/*
+ * Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
+ * a one-byte length, and a variable length value.  RSSI type tuple must be present
+ * in the array.
+ *
+ * Types are defined in "join preference types" section.
+ *
+ * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple
+ * and must be set to zero.
+ *
+ * Values are defined below.
+ *
+ * 1. RSSI - 2 octets
+ * offset 0: reserved
+ * offset 1: reserved
+ *
+ * 2. WPA - 2 + 12 * n octets (n is # tuples defined below)
+ * offset 0: reserved
+ * offset 1: # of tuples
+ * offset 2: tuple 1
+ * offset 14: tuple 2
+ * ...
+ * offset 2 + 12 * (n - 1) octets: tuple n
+ *
+ * struct wpa_cfg_tuple {
+ *   uint8 akm[DOT11_OUI_LEN+1];     akm suite
+ *   uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite
+ *   uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite
+ * };
+ *
+ * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY.
+ *
+ * 3. BAND - 2 octets
+ * offset 0: reserved
+ * offset 1: see "band preference" and "band types"
+ *
+ * 4. BAND RSSI - 2 octets
+ * offset 0: band types
+ * offset 1: +ve RSSI boost value in dB
+ */
+
+struct tsinfo_arg {
+	uint8 octets[3];
+};
+
+#define	NFIFO			6	/* # tx/rx fifopairs */
+#define NREINITREASONCOUNT	8
+#define REINITREASONIDX(_x)	(((_x) < NREINITREASONCOUNT) ? (_x) : 0)
+
+#define	WL_CNT_T_VERSION	10	/* current version of wl_cnt_t struct */
+
+typedef struct {
+	uint16	version;	/* see definition of WL_CNT_T_VERSION */
+	uint16	length;		/* length of entire structure */
+
+	/* transmit stat counters */
+	uint32	txframe;	/* tx data frames */
+	uint32	txbyte;		/* tx data bytes */
+	uint32	txretrans;	/* tx mac retransmits */
+	uint32	txerror;	/* tx data errors (derived: sum of others) */
+	uint32	txctl;		/* tx management frames */
+	uint32	txprshort;	/* tx short preamble frames */
+	uint32	txserr;		/* tx status errors */
+	uint32	txnobuf;	/* tx out of buffers errors */
+	uint32	txnoassoc;	/* tx discard because we're not associated */
+	uint32	txrunt;		/* tx runt frames */
+	uint32	txchit;		/* tx header cache hit (fastpath) */
+	uint32	txcmiss;	/* tx header cache miss (slowpath) */
+
+	/* transmit chip error counters */
+	uint32	txuflo;		/* tx fifo underflows */
+	uint32	txphyerr;	/* tx phy errors (indicated in tx status) */
+	uint32	txphycrs;
+
+	/* receive stat counters */
+	uint32	rxframe;	/* rx data frames */
+	uint32	rxbyte;		/* rx data bytes */
+	uint32	rxerror;	/* rx data errors (derived: sum of others) */
+	uint32	rxctl;		/* rx management frames */
+	uint32	rxnobuf;	/* rx out of buffers errors */
+	uint32	rxnondata;	/* rx non data frames in the data channel errors */
+	uint32	rxbadds;	/* rx bad DS errors */
+	uint32	rxbadcm;	/* rx bad control or management frames */
+	uint32	rxfragerr;	/* rx fragmentation errors */
+	uint32	rxrunt;		/* rx runt frames */
+	uint32	rxgiant;	/* rx giant frames */
+	uint32	rxnoscb;	/* rx no scb error */
+	uint32	rxbadproto;	/* rx invalid frames */
+	uint32	rxbadsrcmac;	/* rx frames with Invalid Src Mac */
+	uint32	rxbadda;	/* rx frames tossed for invalid da */
+	uint32	rxfilter;	/* rx frames filtered out */
+
+	/* receive chip error counters */
+	uint32	rxoflo;		/* rx fifo overflow errors */
+	uint32	rxuflo[NFIFO];	/* rx dma descriptor underflow errors */
+
+	uint32	d11cnt_txrts_off;	/* d11cnt txrts value when reset d11cnt */
+	uint32	d11cnt_rxcrc_off;	/* d11cnt rxcrc value when reset d11cnt */
+	uint32	d11cnt_txnocts_off;	/* d11cnt txnocts value when reset d11cnt */
+
+	/* misc counters */
+	uint32	dmade;		/* tx/rx dma descriptor errors */
+	uint32	dmada;		/* tx/rx dma data errors */
+	uint32	dmape;		/* tx/rx dma descriptor protocol errors */
+	uint32	reset;		/* reset count */
+	uint32	tbtt;		/* cnts the TBTT int's */
+	uint32	txdmawar;
+	uint32	pkt_callback_reg_fail;	/* callbacks register failure */
+
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32	txallfrm;	/* total number of frames sent, incl. Data, ACK, RTS, CTS,
+				 * Control Management (includes retransmissions)
+				 */
+	uint32	txrtsfrm;	/* number of RTS sent out by the MAC */
+	uint32	txctsfrm;	/* number of CTS sent out by the MAC */
+	uint32	txackfrm;	/* number of ACK frames sent out */
+	uint32	txdnlfrm;	/* Not used */
+	uint32	txbcnfrm;	/* beacons transmitted */
+	uint32	txfunfl[6];	/* per-fifo tx underflows */
+	uint32	rxtoolate;	/* receive too late */
+	uint32  txfbw;		/* transmit at fallback bw (dynamic bw) */
+	uint32	txtplunfl;	/* Template underflows (mac was too slow to transmit ACK/CTS
+				 * or BCN)
+				 */
+	uint32	txphyerror;	/* Transmit phy error, type of error is reported in tx-status for
+				 * driver enqueued frames
+				 */
+	uint32	rxfrmtoolong;	/* Received frame longer than legal limit (2346 bytes) */
+	uint32	rxfrmtooshrt;	/* Received frame did not contain enough bytes for its frame type */
+	uint32	rxinvmachdr;	/* Either the protocol version != 0 or frame type not
+				 * data/control/management
+				 */
+	uint32	rxbadfcs;	/* number of frames for which the CRC check failed in the MAC */
+	uint32	rxbadplcp;	/* parity check of the PLCP header failed */
+	uint32	rxcrsglitch;	/* PHY was able to correlate the preamble but not the header */
+	uint32	rxstrt;		/* Number of received frames with a good PLCP
+				 * (i.e. passing parity check)
+				 */
+	uint32	rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+	uint32	rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+	uint32	rxcfrmucast;	/* number of received CNTRL frames with good FCS and matching RA */
+	uint32	rxrtsucast;	/* number of unicast RTS addressed to the MAC (good FCS) */
+	uint32	rxctsucast;	/* number of unicast CTS addressed to the MAC (good FCS) */
+	uint32	rxackucast;	/* number of ucast ACKS received (good FCS) */
+	uint32	rxdfrmocast;	/* number of received DATA frames (good FCS and not matching RA) */
+	uint32	rxmfrmocast;	/* number of received MGMT frames (good FCS and not matching RA) */
+	uint32	rxcfrmocast;	/* number of received CNTRL frame (good FCS and not matching RA) */
+	uint32	rxrtsocast;	/* number of received RTS not addressed to the MAC */
+	uint32	rxctsocast;	/* number of received CTS not addressed to the MAC */
+	uint32	rxdfrmmcast;	/* number of RX Data multicast frames received by the MAC */
+	uint32	rxmfrmmcast;	/* number of RX Management multicast frames received by the MAC */
+	uint32	rxcfrmmcast;	/* number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32	rxbeaconmbss;	/* beacons received from member of BSS */
+	uint32	rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32	rxbeaconobss;	/* beacons received from other BSS */
+	uint32	rxrsptmout;	/* Number of response timeouts for transmitted frames
+				 * expecting a response
+				 */
+	uint32	bcntxcancl;	/* transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32	rxf0ovfl;	/* Number of receive fifo 0 overflows */
+	uint32	rxf1ovfl;	/* Number of receive fifo 1 overflows (obsolete) */
+	uint32	rxf2ovfl;	/* Number of receive fifo 2 overflows (obsolete) */
+	uint32	txsfovfl;	/* Number of transmit status fifo overflows (obsolete) */
+	uint32	pmqovfl;	/* Number of PMQ overflows */
+	uint32	rxcgprqfrm;	/* Number of received Probe requests that made it into
+				 * the PRQ fifo
+				 */
+	uint32	rxcgprsqovfl;	/* Rx Probe Request Que overflow in the AP */
+	uint32	txcgprsfail;	/* Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32	txcgprssuc;	/* Tx Probe Response Success (ACK was received) */
+	uint32	prs_timeout;	/* Number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32	rxnack;		/* obsolete */
+	uint32	frmscons;	/* obsolete */
+	uint32  txnack;		/* obsolete */
+	uint32	rxback;		/* blockack rxcnt */
+	uint32	txback;		/* blockack txcnt */
+
+	/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+	uint32	txfrag;		/* dot11TransmittedFragmentCount */
+	uint32	txmulti;	/* dot11MulticastTransmittedFrameCount */
+	uint32	txfail;		/* dot11FailedCount */
+	uint32	txretry;	/* dot11RetryCount */
+	uint32	txretrie;	/* dot11MultipleRetryCount */
+	uint32	rxdup;		/* dot11FrameduplicateCount */
+	uint32	txrts;		/* dot11RTSSuccessCount */
+	uint32	txnocts;	/* dot11RTSFailureCount */
+	uint32	txnoack;	/* dot11ACKFailureCount */
+	uint32	rxfrag;		/* dot11ReceivedFragmentCount */
+	uint32	rxmulti;	/* dot11MulticastReceivedFrameCount */
+	uint32	rxcrc;		/* dot11FCSErrorCount */
+	uint32	txfrmsnt;	/* dot11TransmittedFrameCount (bogus MIB?) */
+	uint32	rxundec;	/* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill;	/* TKIPLocalMICFailures */
+	uint32	tkipcntrmsr;	/* TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay;	/* TKIPReplays */
+	uint32	ccmpfmterr;	/* CCMPFormatErrors */
+	uint32	ccmpreplay;	/* CCMPReplays */
+	uint32	ccmpundec;	/* CCMPDecryptErrors */
+	uint32	fourwayfail;	/* FourWayHandshakeFailures */
+	uint32	wepundec;	/* dot11WEPUndecryptableCount */
+	uint32	wepicverr;	/* dot11WEPICVErrorCount */
+	uint32	decsuccess;	/* DecryptSuccessCount */
+	uint32	tkipicverr;	/* TKIPICVErrorCount */
+	uint32	wepexcluded;	/* dot11WEPExcludedCount */
+
+	uint32	txchanrej;	/* Tx frames suppressed due to channel rejection */
+	uint32	psmwds;		/* Count PSM watchdogs */
+	uint32	phywatchdog;	/* Count Phy watchdogs (triggered by ucode) */
+
+	/* MBSS counters, AP only */
+	uint32	prq_entries_handled;	/* PRQ entries read in */
+	uint32	prq_undirected_entries;	/*    which were bcast bss & ssid */
+	uint32	prq_bad_entries;	/*    which could not be translated to info */
+	uint32	atim_suppress_count;	/* TX suppressions on ATIM fifo */
+	uint32	bcn_template_not_ready;	/* Template marked in use on send bcn ... */
+	uint32	bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+	uint32	late_tbtt_dpc;	/* TBTT DPC did not happen in time */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;	/* packets rx at 1Mbps */
+	uint32  rx2mbps;	/* packets rx at 2Mbps */
+	uint32  rx5mbps5;	/* packets rx at 5.5Mbps */
+	uint32  rx6mbps;	/* packets rx at 6Mbps */
+	uint32  rx9mbps;	/* packets rx at 9Mbps */
+	uint32  rx11mbps;	/* packets rx at 11Mbps */
+	uint32  rx12mbps;	/* packets rx at 12Mbps */
+	uint32  rx18mbps;	/* packets rx at 18Mbps */
+	uint32  rx24mbps;	/* packets rx at 24Mbps */
+	uint32  rx36mbps;	/* packets rx at 36Mbps */
+	uint32  rx48mbps;	/* packets rx at 48Mbps */
+	uint32  rx54mbps;	/* packets rx at 54Mbps */
+	uint32  rx108mbps;	/* packets rx at 108mbps */
+	uint32  rx162mbps;	/* packets rx at 162mbps */
+	uint32  rx216mbps;	/* packets rx at 216 mbps */
+	uint32  rx270mbps;	/* packets rx at 270 mbps */
+	uint32  rx324mbps;	/* packets rx at 324 mbps */
+	uint32  rx378mbps;	/* packets rx at 378 mbps */
+	uint32  rx432mbps;	/* packets rx at 432 mbps */
+	uint32  rx486mbps;	/* packets rx at 486 mbps */
+	uint32  rx540mbps;	/* packets rx at 540 mbps */
+
+	/* pkteng rx frame stats */
+	uint32	pktengrxducast; /* unicast frames rxed by the pkteng code */
+	uint32	pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+	uint32	rfdisable;	/* count of radio disables */
+	uint32	bphy_rxcrsglitch;	/* PHY count of bphy glitches */
+	uint32  bphy_badplcp;
+
+	uint32	txexptime;	/* Tx frames suppressed due to timer expiration */
+
+	uint32	txmpdu_sgi;	/* count for sgi transmit */
+	uint32	rxmpdu_sgi;	/* count for sgi received */
+	uint32	txmpdu_stbc;	/* count for stbc transmit */
+	uint32	rxmpdu_stbc;	/* count for stbc received */
+
+	uint32	rxundec_mcst;	/* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32	tkipmicfaill_mcst;	/* TKIPLocalMICFailures */
+	uint32	tkipcntrmsr_mcst;	/* TKIPCounterMeasuresInvoked */
+	uint32	tkipreplay_mcst;	/* TKIPReplays */
+	uint32	ccmpfmterr_mcst;	/* CCMPFormatErrors */
+	uint32	ccmpreplay_mcst;	/* CCMPReplays */
+	uint32	ccmpundec_mcst;	/* CCMPDecryptErrors */
+	uint32	fourwayfail_mcst;	/* FourWayHandshakeFailures */
+	uint32	wepundec_mcst;	/* dot11WEPUndecryptableCount */
+	uint32	wepicverr_mcst;	/* dot11WEPICVErrorCount */
+	uint32	decsuccess_mcst;	/* DecryptSuccessCount */
+	uint32	tkipicverr_mcst;	/* TKIPICVErrorCount */
+	uint32	wepexcluded_mcst;	/* dot11WEPExcludedCount */
+
+	uint32	dma_hang;	/* count for dma hang */
+	uint32	reinit;		/* count for reinit */
+
+	uint32  pstatxucast;	/* count of ucast frames xmitted on all psta assoc */
+	uint32  pstatxnoassoc;	/* count of txnoassoc frames xmitted on all psta assoc */
+	uint32  pstarxucast;	/* count of ucast frames received on all psta assoc */
+	uint32  pstarxbcmc;	/* count of bcmc frames received on all psta */
+	uint32  pstatxbcmc;	/* count of bcmc frames transmitted on all psta */
+
+	uint32  cso_passthrough; /* hw cso required but passthrough */
+	uint32	cso_normal;	/* hw cso hdr for normal process */
+	uint32	chained;	/* number of frames chained */
+	uint32	chainedsz1;	/* number of chain size 1 frames */
+	uint32	unchained;	/* number of frames not chained */
+	uint32	maxchainsz;	/* max chain size so far */
+	uint32	currchainsz;	/* current chain size */
+	uint32	rxdrop20s;	/* drop secondary cnt */
+	uint32	pciereset;	/* Secondary Bus Reset issued by driver */
+	uint32	cfgrestore;	/* configspace restore by driver */
+	uint32	reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */
+} wl_cnt_t;
+
+typedef struct {
+	uint16  version;    /* see definition of WL_CNT_T_VERSION */
+	uint16  length;     /* length of entire structure */
+
+	/* transmit stat counters */
+	uint32  txframe;    /* tx data frames */
+	uint32  txbyte;     /* tx data bytes */
+	uint32  txretrans;  /* tx mac retransmits */
+	uint32  txerror;    /* tx data errors (derived: sum of others) */
+	uint32  txctl;      /* tx management frames */
+	uint32  txprshort;  /* tx short preamble frames */
+	uint32  txserr;     /* tx status errors */
+	uint32  txnobuf;    /* tx out of buffers errors */
+	uint32  txnoassoc;  /* tx discard because we're not associated */
+	uint32  txrunt;     /* tx runt frames */
+	uint32  txchit;     /* tx header cache hit (fastpath) */
+	uint32  txcmiss;    /* tx header cache miss (slowpath) */
+
+	/* transmit chip error counters */
+	uint32  txuflo;     /* tx fifo underflows */
+	uint32  txphyerr;   /* tx phy errors (indicated in tx status) */
+	uint32  txphycrs;
+
+	/* receive stat counters */
+	uint32  rxframe;    /* rx data frames */
+	uint32  rxbyte;     /* rx data bytes */
+	uint32  rxerror;    /* rx data errors (derived: sum of others) */
+	uint32  rxctl;      /* rx management frames */
+	uint32  rxnobuf;    /* rx out of buffers errors */
+	uint32  rxnondata;  /* rx non data frames in the data channel errors */
+	uint32  rxbadds;    /* rx bad DS errors */
+	uint32  rxbadcm;    /* rx bad control or management frames */
+	uint32  rxfragerr;  /* rx fragmentation errors */
+	uint32  rxrunt;     /* rx runt frames */
+	uint32  rxgiant;    /* rx giant frames */
+	uint32  rxnoscb;    /* rx no scb error */
+	uint32  rxbadproto; /* rx invalid frames */
+	uint32  rxbadsrcmac;    /* rx frames with Invalid Src Mac */
+	uint32  rxbadda;    /* rx frames tossed for invalid da */
+	uint32  rxfilter;   /* rx frames filtered out */
+
+	/* receive chip error counters */
+	uint32  rxoflo;     /* rx fifo overflow errors */
+	uint32  rxuflo[NFIFO];  /* rx dma descriptor underflow errors */
+
+	uint32  d11cnt_txrts_off;   /* d11cnt txrts value when reset d11cnt */
+	uint32  d11cnt_rxcrc_off;   /* d11cnt rxcrc value when reset d11cnt */
+	uint32  d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
+
+	/* misc counters */
+	uint32  dmade;      /* tx/rx dma descriptor errors */
+	uint32  dmada;      /* tx/rx dma data errors */
+	uint32  dmape;      /* tx/rx dma descriptor protocol errors */
+	uint32  reset;      /* reset count */
+	uint32  tbtt;       /* cnts the TBTT int's */
+	uint32  txdmawar;
+	uint32  pkt_callback_reg_fail;  /* callbacks register failure */
+
+	/* MAC counters: 32-bit version of d11.h's macstat_t */
+	uint32  txallfrm;   /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+			     * Control Management (includes retransmissions)
+			     */
+	uint32  txrtsfrm;   /* number of RTS sent out by the MAC */
+	uint32  txctsfrm;   /* number of CTS sent out by the MAC */
+	uint32  txackfrm;   /* number of ACK frames sent out */
+	uint32  txdnlfrm;   /* Not used */
+	uint32  txbcnfrm;   /* beacons transmitted */
+	uint32  txfunfl[6]; /* per-fifo tx underflows */
+	uint32	rxtoolate;	/* receive too late */
+	uint32  txfbw;	    /* transmit at fallback bw (dynamic bw) */
+	uint32  txtplunfl;  /* Template underflows (mac was too slow to transmit ACK/CTS
+			     * or BCN)
+			     */
+	uint32  txphyerror; /* Transmit phy error, type of error is reported in tx-status for
+			     * driver enqueued frames
+			     */
+	uint32  rxfrmtoolong;   /* Received frame longer than legal limit (2346 bytes) */
+	uint32  rxfrmtooshrt;   /* Received frame did not contain enough bytes for its frame type */
+	uint32  rxinvmachdr;    /* Either the protocol version != 0 or frame type not
+				 * data/control/management
+			   */
+	uint32  rxbadfcs;   /* number of frames for which the CRC check failed in the MAC */
+	uint32  rxbadplcp;  /* parity check of the PLCP header failed */
+	uint32  rxcrsglitch;    /* PHY was able to correlate the preamble but not the header */
+	uint32  rxstrt;     /* Number of received frames with a good PLCP
+			     * (i.e. passing parity check)
+			     */
+	uint32  rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
+	uint32  rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+	uint32  rxcfrmucast;    /* number of received CNTRL frames with good FCS and matching RA */
+	uint32  rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
+	uint32  rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
+	uint32  rxackucast; /* number of ucast ACKS received (good FCS) */
+	uint32  rxdfrmocast;    /* number of received DATA frames (good FCS and not matching RA) */
+	uint32  rxmfrmocast;    /* number of received MGMT frames (good FCS and not matching RA) */
+	uint32  rxcfrmocast;    /* number of received CNTRL frame (good FCS and not matching RA) */
+	uint32  rxrtsocast; /* number of received RTS not addressed to the MAC */
+	uint32  rxctsocast; /* number of received CTS not addressed to the MAC */
+	uint32  rxdfrmmcast;    /* number of RX Data multicast frames received by the MAC */
+	uint32  rxmfrmmcast;    /* number of RX Management multicast frames received by the MAC */
+	uint32  rxcfrmmcast;    /* number of RX Control multicast frames received by the MAC
+				 * (unlikely to see these)
+				 */
+	uint32  rxbeaconmbss;   /* beacons received from member of BSS */
+	uint32  rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
+				  * other BSS (WDS FRAME)
+				  */
+	uint32  rxbeaconobss;   /* beacons received from other BSS */
+	uint32  rxrsptmout; /* Number of response timeouts for transmitted frames
+			     * expecting a response
+			     */
+	uint32  bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
+	uint32  rxf0ovfl;   /* Number of receive fifo 0 overflows */
+	uint32  rxf1ovfl;   /* Number of receive fifo 1 overflows (obsolete) */
+	uint32  rxf2ovfl;   /* Number of receive fifo 2 overflows (obsolete) */
+	uint32  txsfovfl;   /* Number of transmit status fifo overflows (obsolete) */
+	uint32  pmqovfl;    /* Number of PMQ overflows */
+	uint32  rxcgprqfrm; /* Number of received Probe requests that made it into
+			     * the PRQ fifo
+			     */
+	uint32  rxcgprsqovfl;   /* Rx Probe Request Que overflow in the AP */
+	uint32  txcgprsfail;    /* Tx Probe Response Fail. AP sent probe response but did
+				 * not get ACK
+				 */
+	uint32  txcgprssuc; /* Tx Probe Response Success (ACK was received) */
+	uint32  prs_timeout;    /* Number of probe requests that were dropped from the PRQ
+				 * fifo because a probe response could not be sent out within
+				 * the time limit defined in M_PRS_MAXTIME
+				 */
+	uint32  rxnack;
+	uint32  frmscons;
+	uint32  txnack;		/* obsolete */
+	uint32	rxback;		/* blockack rxcnt */
+	uint32	txback;		/* blockack txcnt */
+
+	/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+	uint32  txfrag;     /* dot11TransmittedFragmentCount */
+	uint32  txmulti;    /* dot11MulticastTransmittedFrameCount */
+	uint32  txfail;     /* dot11FailedCount */
+	uint32  txretry;    /* dot11RetryCount */
+	uint32  txretrie;   /* dot11MultipleRetryCount */
+	uint32  rxdup;      /* dot11FrameduplicateCount */
+	uint32  txrts;      /* dot11RTSSuccessCount */
+	uint32  txnocts;    /* dot11RTSFailureCount */
+	uint32  txnoack;    /* dot11ACKFailureCount */
+	uint32  rxfrag;     /* dot11ReceivedFragmentCount */
+	uint32  rxmulti;    /* dot11MulticastReceivedFrameCount */
+	uint32  rxcrc;      /* dot11FCSErrorCount */
+	uint32  txfrmsnt;   /* dot11TransmittedFrameCount (bogus MIB?) */
+	uint32  rxundec;    /* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32  tkipmicfaill;   /* TKIPLocalMICFailures */
+	uint32  tkipcntrmsr;    /* TKIPCounterMeasuresInvoked */
+	uint32  tkipreplay; /* TKIPReplays */
+	uint32  ccmpfmterr; /* CCMPFormatErrors */
+	uint32  ccmpreplay; /* CCMPReplays */
+	uint32  ccmpundec;  /* CCMPDecryptErrors */
+	uint32  fourwayfail;    /* FourWayHandshakeFailures */
+	uint32  wepundec;   /* dot11WEPUndecryptableCount */
+	uint32  wepicverr;  /* dot11WEPICVErrorCount */
+	uint32  decsuccess; /* DecryptSuccessCount */
+	uint32  tkipicverr; /* TKIPICVErrorCount */
+	uint32  wepexcluded;    /* dot11WEPExcludedCount */
+
+	uint32  rxundec_mcst;   /* dot11WEPUndecryptableCount */
+
+	/* WPA2 counters (see rxundec for DecryptFailureCount) */
+	uint32  tkipmicfaill_mcst;  /* TKIPLocalMICFailures */
+	uint32  tkipcntrmsr_mcst;   /* TKIPCounterMeasuresInvoked */
+	uint32  tkipreplay_mcst;    /* TKIPReplays */
+	uint32  ccmpfmterr_mcst;    /* CCMPFormatErrors */
+	uint32  ccmpreplay_mcst;    /* CCMPReplays */
+	uint32  ccmpundec_mcst; /* CCMPDecryptErrors */
+	uint32  fourwayfail_mcst;   /* FourWayHandshakeFailures */
+	uint32  wepundec_mcst;  /* dot11WEPUndecryptableCount */
+	uint32  wepicverr_mcst; /* dot11WEPICVErrorCount */
+	uint32  decsuccess_mcst;    /* DecryptSuccessCount */
+	uint32  tkipicverr_mcst;    /* TKIPICVErrorCount */
+	uint32  wepexcluded_mcst;   /* dot11WEPExcludedCount */
+
+	uint32  txchanrej;  /* Tx frames suppressed due to channel rejection */
+	uint32  txexptime;  /* Tx frames suppressed due to timer expiration */
+	uint32  psmwds;     /* Count PSM watchdogs */
+	uint32  phywatchdog;    /* Count Phy watchdogs (triggered by ucode) */
+
+	/* MBSS counters, AP only */
+	uint32  prq_entries_handled;    /* PRQ entries read in */
+	uint32  prq_undirected_entries; /*    which were bcast bss & ssid */
+	uint32  prq_bad_entries;    /*    which could not be translated to info */
+	uint32  atim_suppress_count;    /* TX suppressions on ATIM fifo */
+	uint32  bcn_template_not_ready; /* Template marked in use on send bcn ... */
+	uint32  bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+	uint32  late_tbtt_dpc;  /* TBTT DPC did not happen in time */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;    /* packets rx at 1Mbps */
+	uint32  rx2mbps;    /* packets rx at 2Mbps */
+	uint32  rx5mbps5;   /* packets rx at 5.5Mbps */
+	uint32  rx6mbps;    /* packets rx at 6Mbps */
+	uint32  rx9mbps;    /* packets rx at 9Mbps */
+	uint32  rx11mbps;   /* packets rx at 11Mbps */
+	uint32  rx12mbps;   /* packets rx at 12Mbps */
+	uint32  rx18mbps;   /* packets rx at 18Mbps */
+	uint32  rx24mbps;   /* packets rx at 24Mbps */
+	uint32  rx36mbps;   /* packets rx at 36Mbps */
+	uint32  rx48mbps;   /* packets rx at 48Mbps */
+	uint32  rx54mbps;   /* packets rx at 54Mbps */
+	uint32  rx108mbps;  /* packets rx at 108mbps */
+	uint32  rx162mbps;  /* packets rx at 162mbps */
+	uint32  rx216mbps;  /* packets rx at 216 mbps */
+	uint32  rx270mbps;  /* packets rx at 270 mbps */
+	uint32  rx324mbps;  /* packets rx at 324 mbps */
+	uint32  rx378mbps;  /* packets rx at 378 mbps */
+	uint32  rx432mbps;  /* packets rx at 432 mbps */
+	uint32  rx486mbps;  /* packets rx at 486 mbps */
+	uint32  rx540mbps;  /* packets rx at 540 mbps */
+
+	/* pkteng rx frame stats */
+	uint32  pktengrxducast; /* unicast frames rxed by the pkteng code */
+	uint32  pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+
+	uint32  rfdisable;  /* count of radio disables */
+	uint32  bphy_rxcrsglitch;   /* PHY count of bphy glitches */
+	uint32  bphy_badplcp;
+
+	uint32  txmpdu_sgi; /* count for sgi transmit */
+	uint32  rxmpdu_sgi; /* count for sgi received */
+	uint32  txmpdu_stbc;    /* count for stbc transmit */
+	uint32  rxmpdu_stbc;    /* count for stbc received */
+
+	uint32	rxdrop20s;	/* drop secondary cnt */
+
+} wl_cnt_ver_six_t;
+
+#define	WL_DELTA_STATS_T_VERSION	2	/* current version of wl_delta_stats_t struct */
+
+typedef struct {
+	uint16 version;     /* see definition of WL_DELTA_STATS_T_VERSION */
+	uint16 length;      /* length of entire structure */
+
+	/* transmit stat counters */
+	uint32 txframe;     /* tx data frames */
+	uint32 txbyte;      /* tx data bytes */
+	uint32 txretrans;   /* tx mac retransmits */
+	uint32 txfail;      /* tx failures */
+
+	/* receive stat counters */
+	uint32 rxframe;     /* rx data frames */
+	uint32 rxbyte;      /* rx data bytes */
+
+	/* per-rate receive stat counters */
+	uint32  rx1mbps;	/* packets rx at 1Mbps */
+	uint32  rx2mbps;	/* packets rx at 2Mbps */
+	uint32  rx5mbps5;	/* packets rx at 5.5Mbps */
+	uint32  rx6mbps;	/* packets rx at 6Mbps */
+	uint32  rx9mbps;	/* packets rx at 9Mbps */
+	uint32  rx11mbps;	/* packets rx at 11Mbps */
+	uint32  rx12mbps;	/* packets rx at 12Mbps */
+	uint32  rx18mbps;	/* packets rx at 18Mbps */
+	uint32  rx24mbps;	/* packets rx at 24Mbps */
+	uint32  rx36mbps;	/* packets rx at 36Mbps */
+	uint32  rx48mbps;	/* packets rx at 48Mbps */
+	uint32  rx54mbps;	/* packets rx at 54Mbps */
+	uint32  rx108mbps;	/* packets rx at 108mbps */
+	uint32  rx162mbps;	/* packets rx at 162mbps */
+	uint32  rx216mbps;	/* packets rx at 216 mbps */
+	uint32  rx270mbps;	/* packets rx at 270 mbps */
+	uint32  rx324mbps;	/* packets rx at 324 mbps */
+	uint32  rx378mbps;	/* packets rx at 378 mbps */
+	uint32  rx432mbps;	/* packets rx at 432 mbps */
+	uint32  rx486mbps;	/* packets rx at 486 mbps */
+	uint32  rx540mbps;	/* packets rx at 540 mbps */
+
+	/* phy stats */
+	uint32 rxbadplcp;
+	uint32 rxcrsglitch;
+	uint32 bphy_rxcrsglitch;
+	uint32 bphy_badplcp;
+
+} wl_delta_stats_t;
+
+typedef struct {
+	uint32 packets;
+	uint32 bytes;
+} wl_traffic_stats_t;
+
+typedef struct {
+	uint16	version;	/* see definition of WL_WME_CNT_VERSION */
+	uint16	length;		/* length of entire structure */
+
+	wl_traffic_stats_t tx[AC_COUNT];	/* Packets transmitted */
+	wl_traffic_stats_t tx_failed[AC_COUNT];	/* Packets dropped or failed to transmit */
+	wl_traffic_stats_t rx[AC_COUNT];	/* Packets received */
+	wl_traffic_stats_t rx_failed[AC_COUNT];	/* Packets failed to receive */
+
+	wl_traffic_stats_t forward[AC_COUNT];	/* Packets forwarded by AP */
+
+	wl_traffic_stats_t tx_expired[AC_COUNT];	/* packets dropped due to lifetime expiry */
+
+} wl_wme_cnt_t;
+
+struct wl_msglevel2 {
+	uint32 low;
+	uint32 high;
+};
+
+typedef struct wl_mkeep_alive_pkt {
+	uint16	version; /* Version for mkeep_alive */
+	uint16	length; /* length of fixed parameters in the structure */
+	uint32	period_msec;
+	uint16	len_bytes;
+	uint8	keep_alive_id; /* 0 - 3 for N = 4 */
+	uint8	data[1];
+} wl_mkeep_alive_pkt_t;
+
+#define WL_MKEEP_ALIVE_VERSION		1
+#define WL_MKEEP_ALIVE_FIXED_LEN	OFFSETOF(wl_mkeep_alive_pkt_t, data)
+#define WL_MKEEP_ALIVE_PRECISION	500
+
+/* TCP Keep-Alive conn struct */
+typedef struct wl_mtcpkeep_alive_conn_pkt {
+	struct ether_addr saddr;		/* src mac address */
+	struct ether_addr daddr;		/* dst mac address */
+	struct ipv4_addr sipaddr;		/* source IP addr */
+	struct ipv4_addr dipaddr;		/* dest IP addr */
+	uint16 sport;				/* src port */
+	uint16 dport;				/* dest port */
+	uint32 seq;				/* seq number */
+	uint32 ack;				/* ACK number */
+	uint16 tcpwin;				/* TCP window */
+} wl_mtcpkeep_alive_conn_pkt_t;
+
+/* TCP Keep-Alive interval struct */
+typedef struct wl_mtcpkeep_alive_timers_pkt {
+	uint16 interval;		/* interval timer */
+	uint16 retry_interval;		/* retry_interval timer */
+	uint16 retry_count;		/* retry_count */
+} wl_mtcpkeep_alive_timers_pkt_t;
+
+typedef struct wake_info {
+	uint32 wake_reason;
+	uint32 wake_info_len;		/* size of packet */
+	uchar  packet[1];
+} wake_info_t;
+
+typedef struct wake_pkt {
+	uint32 wake_pkt_len;		/* size of packet */
+	uchar  packet[1];
+} wake_pkt_t;
+
+
+#define WL_MTCPKEEP_ALIVE_VERSION		1
+
+#ifdef WLBA
+
+#define WLC_BA_CNT_VERSION  1   /* current version of wlc_ba_cnt_t */
+
+/* block ack related stats */
+typedef struct wlc_ba_cnt {
+	uint16  version;    /* WLC_BA_CNT_VERSION */
+	uint16  length;     /* length of entire structure */
+
+	/* transmit stat counters */
+	uint32 txpdu;       /* pdus sent */
+	uint32 txsdu;       /* sdus sent */
+	uint32 txfc;        /* tx side flow controlled packets */
+	uint32 txfci;       /* tx side flow control initiated */
+	uint32 txretrans;   /* retransmitted pdus */
+	uint32 txbatimer;   /* ba resend due to timer */
+	uint32 txdrop;      /* dropped packets */
+	uint32 txaddbareq;  /* addba req sent */
+	uint32 txaddbaresp; /* addba resp sent */
+	uint32 txdelba;     /* delba sent */
+	uint32 txba;        /* ba sent */
+	uint32 txbar;       /* bar sent */
+	uint32 txpad[4];    /* future */
+
+	/* receive side counters */
+	uint32 rxpdu;       /* pdus recd */
+	uint32 rxqed;       /* pdus buffered before sending up */
+	uint32 rxdup;       /* duplicate pdus */
+	uint32 rxnobuf;     /* pdus discarded due to no buf */
+	uint32 rxaddbareq;  /* addba req recd */
+	uint32 rxaddbaresp; /* addba resp recd */
+	uint32 rxdelba;     /* delba recd */
+	uint32 rxba;        /* ba recd */
+	uint32 rxbar;       /* bar recd */
+	uint32 rxinvba;     /* invalid ba recd */
+	uint32 rxbaholes;   /* ba recd with holes */
+	uint32 rxunexp;     /* unexpected packets */
+	uint32 rxpad[4];    /* future */
+} wlc_ba_cnt_t;
+#endif /* WLBA */
+
+/* structure for per-tid ampdu control */
+struct ampdu_tid_control {
+	uint8 tid;			/* tid */
+	uint8 enable;			/* enable/disable */
+};
+
+/* struct for per-tid, per-mode ampdu control */
+struct ampdu_tid_control_mode {
+	struct ampdu_tid_control control[NUMPRIO]; /* tid will be 0xff for not used element */
+	char mode_name[8]; /* supported mode : AIBSS */
+};
+
+/* structure for identifying ea/tid for sending addba/delba */
+struct ampdu_ea_tid {
+	struct ether_addr ea;		/* Station address */
+	uint8 tid;			/* tid */
+};
+/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */
+struct ampdu_retry_tid {
+	uint8 tid;	/* tid */
+	uint8 retry;	/* retry value */
+};
+
+/* structure for dpt iovars */
+typedef struct dpt_iovar {
+	struct ether_addr ea;		/* Station address */
+	uint8 mode;			/* mode: depends on iovar */
+	uint32 pad;			/* future */
+} dpt_iovar_t;
+
+#define	DPT_FNAME_LEN		48	/* Max length of friendly name */
+
+typedef struct dpt_status {
+	uint8 status;			/* flags to indicate status */
+	uint8 fnlen;			/* length of friendly name */
+	uchar name[DPT_FNAME_LEN];	/* friendly name */
+	uint32 rssi;			/* RSSI of the link */
+	sta_info_t sta;			/* sta info */
+} dpt_status_t;
+
+/* structure for dpt list */
+typedef struct dpt_list {
+	uint32 num;			/* number of entries in struct */
+	dpt_status_t status[1];		/* per station info */
+} dpt_list_t;
+
+/* structure for dpt friendly name */
+typedef struct dpt_fname {
+	uint8 len;			/* length of friendly name */
+	uchar name[DPT_FNAME_LEN];	/* friendly name */
+} dpt_fname_t;
+
+#define BDD_FNAME_LEN       32  /* Max length of friendly name */
+typedef struct bdd_fname {
+	uint8 len;          /* length of friendly name */
+	uchar name[BDD_FNAME_LEN];  /* friendly name */
+} bdd_fname_t;
+
+/* structure for addts arguments */
+/* For ioctls that take a list of TSPEC */
+struct tslist {
+	int count;			/* number of tspecs */
+	struct tsinfo_arg tsinfo[1];	/* variable length array of tsinfo */
+};
+
+#ifdef WLTDLS
+/* structure for tdls iovars */
+typedef struct tdls_iovar {
+	struct ether_addr ea;		/* Station address */
+	uint8 mode;			/* mode: depends on iovar */
+	chanspec_t chanspec;
+	uint32 pad;			/* future */
+} tdls_iovar_t;
+
+#define TDLS_WFD_IE_SIZE		512
+/* structure for tdls wfd ie */
+typedef struct tdls_wfd_ie_iovar {
+	struct ether_addr ea;		/* Station address */
+	uint8 mode;
+	uint16 length;
+	uint8 data[TDLS_WFD_IE_SIZE];
+} tdls_wfd_ie_iovar_t;
+#endif /* WLTDLS */
+
+/* structure for addts/delts arguments */
+typedef struct tspec_arg {
+	uint16 version;			/* see definition of TSPEC_ARG_VERSION */
+	uint16 length;			/* length of entire structure */
+	uint flag;			/* bit field */
+	/* TSPEC Arguments */
+	struct tsinfo_arg tsinfo;	/* TS Info bit field */
+	uint16 nom_msdu_size;		/* (Nominal or fixed) MSDU Size (bytes) */
+	uint16 max_msdu_size;		/* Maximum MSDU Size (bytes) */
+	uint min_srv_interval;		/* Minimum Service Interval (us) */
+	uint max_srv_interval;		/* Maximum Service Interval (us) */
+	uint inactivity_interval;	/* Inactivity Interval (us) */
+	uint suspension_interval;	/* Suspension Interval (us) */
+	uint srv_start_time;		/* Service Start Time (us) */
+	uint min_data_rate;		/* Minimum Data Rate (bps) */
+	uint mean_data_rate;		/* Mean Data Rate (bps) */
+	uint peak_data_rate;		/* Peak Data Rate (bps) */
+	uint max_burst_size;		/* Maximum Burst Size (bytes) */
+	uint delay_bound;		/* Delay Bound (us) */
+	uint min_phy_rate;		/* Minimum PHY Rate (bps) */
+	uint16 surplus_bw;		/* Surplus Bandwidth Allowance (range 1.0 to 8.0) */
+	uint16 medium_time;		/* Medium Time (32 us/s periods) */
+	uint8 dialog_token;		/* dialog token */
+} tspec_arg_t;
+
+/* tspec arg for desired station */
+typedef	struct tspec_per_sta_arg {
+	struct ether_addr ea;
+	struct tspec_arg ts;
+} tspec_per_sta_arg_t;
+
+/* structure for max bandwidth for each access category */
+typedef	struct wme_max_bandwidth {
+	uint32	ac[AC_COUNT];	/* max bandwidth for each access category */
+} wme_max_bandwidth_t;
+
+#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t))
+
+/* current version of wl_tspec_arg_t struct */
+#define	TSPEC_ARG_VERSION		2	/* current version of wl_tspec_arg_t struct */
+#define TSPEC_ARG_LENGTH		55	/* argument length from tsinfo to medium_time */
+#define TSPEC_DEFAULT_DIALOG_TOKEN	42	/* default dialog token */
+#define TSPEC_DEFAULT_SBW_FACTOR	0x3000	/* default surplus bw */
+
+
+#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE  80
+#define WLC_WOWL_MAX_KEEPALIVE	2
+
+/* Packet lifetime configuration per ac */
+typedef struct wl_lifetime {
+	uint32 ac;	        /* access class */
+	uint32 lifetime;    /* Packet lifetime value in ms */
+} wl_lifetime_t;
+
+/* Channel Switch Announcement param */
+typedef struct wl_chan_switch {
+	uint8 mode;		/* value 0 or 1 */
+	uint8 count;		/* count # of beacons before switching */
+	chanspec_t chspec;	/* chanspec */
+	uint8 reg;		/* regulatory class */
+	uint8 frame_type;		/* csa frame type, unicast or broadcast */
+} wl_chan_switch_t;
+
+enum {
+	PFN_LIST_ORDER,
+	PFN_RSSI
+};
+
+enum {
+	DISABLE,
+	ENABLE
+};
+
+enum {
+	OFF_ADAPT,
+	SMART_ADAPT,
+	STRICT_ADAPT,
+	SLOW_ADAPT
+};
+
+#define SORT_CRITERIA_BIT		0
+#define AUTO_NET_SWITCH_BIT		1
+#define ENABLE_BKGRD_SCAN_BIT		2
+#define IMMEDIATE_SCAN_BIT		3
+#define	AUTO_CONNECT_BIT		4
+#define	ENABLE_BD_SCAN_BIT		5
+#define ENABLE_ADAPTSCAN_BIT		6
+#define IMMEDIATE_EVENT_BIT		8
+#define SUPPRESS_SSID_BIT		9
+#define ENABLE_NET_OFFLOAD_BIT		10
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT		11
+
+#define SORT_CRITERIA_MASK	0x0001
+#define AUTO_NET_SWITCH_MASK	0x0002
+#define ENABLE_BKGRD_SCAN_MASK	0x0004
+#define IMMEDIATE_SCAN_MASK	0x0008
+#define AUTO_CONNECT_MASK	0x0010
+
+#define ENABLE_BD_SCAN_MASK	0x0020
+#define ENABLE_ADAPTSCAN_MASK	0x00c0
+#define IMMEDIATE_EVENT_MASK	0x0100
+#define SUPPRESS_SSID_MASK	0x0200
+#define ENABLE_NET_OFFLOAD_MASK	0x0400
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK	0x0800
+
+#define PFN_VERSION			2
+#define PFN_SCANRESULT_VERSION		1
+#define MAX_PFN_LIST_COUNT		16
+
+#define PFN_COMPLETE			1
+#define PFN_INCOMPLETE			0
+
+#define DEFAULT_BESTN			2
+#define DEFAULT_MSCAN			0
+#define DEFAULT_REPEAT			10
+#define DEFAULT_EXP			2
+
+#define PFN_PARTIAL_SCAN_BIT		0
+#define PFN_PARTIAL_SCAN_MASK		1
+
+/* PFN network info structure */
+typedef struct wl_pfn_subnet_info {
+	struct ether_addr BSSID;
+	uint8	channel; /* channel number only */
+	uint8	SSID_len;
+	uint8	SSID[32];
+} wl_pfn_subnet_info_t;
+
+typedef struct wl_pfn_net_info {
+	wl_pfn_subnet_info_t pfnsubnet;
+	int16	RSSI; /* receive signal strength (in dBm) */
+	uint16	timestamp; /* age in seconds */
+} wl_pfn_net_info_t;
+
+typedef struct wl_pfn_lnet_info {
+	wl_pfn_subnet_info_t pfnsubnet; /* BSSID + channel + SSID len + SSID */
+	uint16	flags; /* partial scan, etc */
+	int16	RSSI; /* receive signal strength (in dBm) */
+	uint32	timestamp; /* age in miliseconds */
+	uint16	rtt0; /* estimated distance to this AP in centimeters */
+	uint16	rtt1; /* standard deviation of the distance to this AP in centimeters */
+} wl_pfn_lnet_info_t;
+
+typedef struct wl_pfn_lscanresults {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_lnet_info_t netinfo[1];
+} wl_pfn_lscanresults_t;
+
+typedef struct wl_pfn_scanresults {
+	uint32 version;
+	uint32 status;
+	uint32 count;
+	wl_pfn_net_info_t netinfo[1];
+} wl_pfn_scanresults_t;
+
+/* PFN data structure */
+typedef struct wl_pfn_param {
+	int32 version;			/* PNO parameters version */
+	int32 scan_freq;		/* Scan frequency */
+	int32 lost_network_timeout;	/* Timeout in sec. to declare
+								* discovered network as lost
+								*/
+	int16 flags;			/* Bit field to control features
+							* of PFN such as sort criteria auto
+							* enable switch and background scan
+							*/
+	int16 rssi_margin;		/* Margin to avoid jitter for choosing a
+							* PFN based on RSSI sort criteria
+							*/
+	uint8 bestn; /* number of best networks in each scan */
+	uint8 mscan; /* number of scans recorded */
+	uint8 repeat; /* Minimum number of scan intervals
+				     *before scan frequency changes in adaptive scan
+				     */
+	uint8 exp; /* Exponent of 2 for maximum scan interval */
+	int32 slow_freq; /* slow scan period */
+} wl_pfn_param_t;
+
+typedef struct wl_pfn_bssid {
+	struct ether_addr  macaddr;
+	/* Bit4: suppress_lost, Bit3: suppress_found */
+	uint16             flags;
+} wl_pfn_bssid_t;
+#define WL_PFN_SUPPRESSFOUND_MASK	0x08
+#define WL_PFN_SUPPRESSLOST_MASK	0x10
+#define WL_PFN_RSSI_MASK		0xff00
+#define WL_PFN_RSSI_SHIFT		8
+
+typedef struct wl_pfn_cfg {
+	uint32	reporttype;
+	int32	channel_num;
+	uint16	channel_list[WL_NUMCHANNELS];
+	uint32	flags;
+} wl_pfn_cfg_t;
+#define WL_PFN_REPORT_ALLNET    0
+#define WL_PFN_REPORT_SSIDNET   1
+#define WL_PFN_REPORT_BSSIDNET  2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED	0x00000001	/* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_RESERVED	0xfffffffe	/* Remaining reserved for future use */
+
+typedef struct wl_pfn {
+	wlc_ssid_t		ssid;			/* ssid name and its length */
+	int32			flags;			/* bit2: hidden */
+	int32			infra;			/* BSS Vs IBSS */
+	int32			auth;			/* Open Vs Closed */
+	int32			wpa_auth;		/* WPA type */
+	int32			wsec;			/* wsec value */
+} wl_pfn_t;
+
+typedef struct wl_pfn_list {
+	uint32		version;
+	uint32		enabled;
+	uint32		count;
+	wl_pfn_t	pfn[1];
+} wl_pfn_list_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t {
+	wlc_ssid_t ssid;
+	uint32	cipher_type;
+	uint32	auth_type;
+	uint8	channels[4];
+} BWL_POST_PACKED_STRUCT pfn_olmsg_params;
+
+#define WL_PFN_HIDDEN_BIT		2
+#define WL_PFN_HIDDEN_MASK		0x4
+
+#ifndef BESTN_MAX
+#define BESTN_MAX			3
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX			90
+#endif
+
+/* Service discovery */
+typedef struct {
+	uint8	transaction_id;	/* Transaction id */
+	uint8	protocol;	/* Service protocol type */
+	uint16	query_len;	/* Length of query */
+	uint16	response_len;	/* Length of response */
+	uint8	qrbuf[1];
+} wl_p2po_qr_t;
+
+typedef struct {
+	uint16			period;			/* extended listen period */
+	uint16			interval;		/* extended listen interval */
+} wl_p2po_listen_t;
+
+/* ANQP offload */
+
+#define ANQPO_MAX_QUERY_SIZE		256
+typedef struct {
+	uint16 max_retransmit;		/* ~0 use default, max retransmit on no ACK from peer */
+	uint16 response_timeout;	/* ~0 use default, msec to wait for resp after tx packet */
+	uint16 max_comeback_delay;	/* ~0 use default, max comeback delay in resp else fail */
+	uint16 max_retries;			/* ~0 use default, max retries on failure */
+	uint16 query_len;			/* length of ANQP query */
+	uint8 query_data[1];		/* ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */
+} wl_anqpo_set_t;
+
+typedef struct {
+	uint16 channel;				/* channel of the peer */
+	struct ether_addr addr;		/* addr of the peer */
+} wl_anqpo_peer_t;
+
+#define ANQPO_MAX_PEER_LIST			64
+typedef struct {
+	uint16 count;				/* number of peers in list */
+	wl_anqpo_peer_t peer[1];	/* max ANQPO_MAX_PEER_LIST */
+} wl_anqpo_peer_list_t;
+
+#define ANQPO_MAX_IGNORE_SSID		64
+typedef struct {
+	bool is_clear;				/* set to clear list (not used on GET) */
+	uint16 count;				/* number of SSID in list */
+	wlc_ssid_t ssid[1];			/* max ANQPO_MAX_IGNORE_SSID */
+} wl_anqpo_ignore_ssid_list_t;
+
+#define ANQPO_MAX_IGNORE_BSSID		64
+typedef struct {
+	bool is_clear;				/* set to clear list (not used on GET) */
+	uint16 count;				/* number of addr in list */
+	struct ether_addr bssid[1];	/* max ANQPO_MAX_IGNORE_BSSID */
+} wl_anqpo_ignore_bssid_list_t;
+
+
+struct toe_ol_stats_t {
+	/* Num of tx packets that don't need to be checksummed */
+	uint32 tx_summed;
+
+	/* Num of tx packets where checksum is filled by offload engine */
+	uint32 tx_iph_fill;
+	uint32 tx_tcp_fill;
+	uint32 tx_udp_fill;
+	uint32 tx_icmp_fill;
+
+	/*  Num of rx packets where toe finds out if checksum is good or bad */
+	uint32 rx_iph_good;
+	uint32 rx_iph_bad;
+	uint32 rx_tcp_good;
+	uint32 rx_tcp_bad;
+	uint32 rx_udp_good;
+	uint32 rx_udp_bad;
+	uint32 rx_icmp_good;
+	uint32 rx_icmp_bad;
+
+	/* Num of tx packets in which csum error is injected */
+	uint32 tx_tcp_errinj;
+	uint32 tx_udp_errinj;
+	uint32 tx_icmp_errinj;
+
+	/* Num of rx packets in which csum error is injected */
+	uint32 rx_tcp_errinj;
+	uint32 rx_udp_errinj;
+	uint32 rx_icmp_errinj;
+};
+
+/* Arp offload statistic counts */
+struct arp_ol_stats_t {
+	uint32  host_ip_entries;	/* Host IP table addresses (more than one if multihomed) */
+	uint32  host_ip_overflow;	/* Host IP table additions skipped due to overflow */
+
+	uint32  arp_table_entries;	/* ARP table entries */
+	uint32  arp_table_overflow;	/* ARP table additions skipped due to overflow */
+
+	uint32  host_request;		/* ARP requests from host */
+	uint32  host_reply;		/* ARP replies from host */
+	uint32  host_service;		/* ARP requests from host serviced by ARP Agent */
+
+	uint32  peer_request;		/* ARP requests received from network */
+	uint32  peer_request_drop;	/* ARP requests from network that were dropped */
+	uint32  peer_reply;		/* ARP replies received from network */
+	uint32  peer_reply_drop;	/* ARP replies from network that were dropped */
+	uint32  peer_service;		/* ARP request from host serviced by ARP Agent */
+};
+
+/* NS offload statistic counts */
+struct nd_ol_stats_t {
+	uint32  host_ip_entries;    /* Host IP table addresses (more than one if multihomed) */
+	uint32  host_ip_overflow;   /* Host IP table additions skipped due to overflow */
+	uint32  peer_request;       /* NS requests received from network */
+	uint32  peer_request_drop;  /* NS requests from network that were dropped */
+	uint32  peer_reply_drop;    /* NA replies from network that were dropped */
+	uint32  peer_service;       /* NS request from host serviced by firmware */
+};
+
+/*
+ * Keep-alive packet offloading.
+ */
+
+/* NAT keep-alive packets format: specifies the re-transmission period, the packet
+ * length, and packet contents.
+ */
+typedef struct wl_keep_alive_pkt {
+	uint32	period_msec;	/* Retransmission period (0 to disable packet re-transmits) */
+	uint16	len_bytes;	/* Size of packet to transmit (0 to disable packet re-transmits) */
+	uint8	data[1];	/* Variable length packet to transmit.  Contents should include
+				 * entire ethernet packet (enet header, IP header, UDP header,
+				 * and UDP payload) in network byte order.
+				 */
+} wl_keep_alive_pkt_t;
+
+#define WL_KEEP_ALIVE_FIXED_LEN		OFFSETOF(wl_keep_alive_pkt_t, data)
+
+
+/*
+ * Dongle pattern matching filter.
+ */
+
+#define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */
+
+#define MAX_WAKE_PACKET_BYTES	    (DOT11_A3_HDR_LEN +			    \
+				     DOT11_QOS_LEN +			    \
+				     sizeof(struct dot11_llc_snap_header) + \
+				     ETHER_MAX_DATA)
+
+typedef struct pm_wake_packet {
+	uint32	status;		/* Is the wake reason a packet (if all the other field's valid) */
+	uint32	pattern_id;	/* Pattern ID that matched */
+	uint32	original_packet_size;
+	uint32	saved_packet_size;
+	uchar	packet[MAX_WAKE_PACKET_CACHE_BYTES];
+} pm_wake_packet_t;
+
+/* Packet filter types. Currently, only pattern matching is supported. */
+typedef enum wl_pkt_filter_type {
+	WL_PKT_FILTER_TYPE_PATTERN_MATCH=0,	/* Pattern matching filter */
+	WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /* Magic packet match */
+	WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2	/* A pattern list (match all to match filter) */
+} wl_pkt_filter_type_t;
+
+#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+
+/* String mapping for types that may be used by applications or debug */
+#define WL_PKT_FILTER_TYPE_NAMES \
+	{ "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH },       \
+	{ "MAGIC",   WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH }, \
+	{ "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH }
+
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+typedef struct wl_pkt_filter_pattern {
+	uint32	offset;		/* Offset within received packet to start pattern matching.
+				 * Offset '0' is the first byte of the ethernet header.
+				 */
+	uint32	size_bytes;	/* Size of the pattern.  Bitmask must be the same size. */
+	uint8   mask_and_pattern[1]; /* Variable length mask and pattern data.  mask starts
+				      * at offset 0.  Pattern immediately follows mask.
+				      */
+} wl_pkt_filter_pattern_t;
+
+/* A pattern list is a numerically specified list of modified pattern structures. */
+typedef struct wl_pkt_filter_pattern_listel {
+	uint16 rel_offs;	/* Offset to begin match (relative to 'base' below) */
+	uint16 base_offs;	/* Base for offset (defined below) */
+	uint16 size_bytes;	/* Size of mask/pattern */
+	uint16 match_flags;	/* Addition flags controlling the match */
+	uint8  mask_and_data[1]; /* Variable length mask followed by data, each size_bytes */
+} wl_pkt_filter_pattern_listel_t;
+
+typedef struct wl_pkt_filter_pattern_list {
+	uint8 list_cnt;		/* Number of elements in the list */
+	uint8 PAD1[1];		/* Reserved (possible version: reserved) */
+	uint16 totsize;		/* Total size of this pattern list (includes this struct) */
+	wl_pkt_filter_pattern_listel_t patterns[1]; /* Variable number of list elements */
+} wl_pkt_filter_pattern_list_t;
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+typedef struct wl_pkt_filter {
+	uint32	id;		/* Unique filter id, specified by app. */
+	uint32	type;		/* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+	uint32	negate_match;	/* Negate the result of filter matches */
+	union {			/* Filter definitions */
+		wl_pkt_filter_pattern_t pattern;	/* Pattern matching filter */
+		wl_pkt_filter_pattern_list_t patlist; /* List of patterns to match */
+	} u;
+} wl_pkt_filter_t;
+
+/* IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */
+typedef struct wl_tcp_keep_set {
+	uint32	val1;
+	uint32	val2;
+} wl_tcp_keep_set_t;
+
+#define WL_PKT_FILTER_FIXED_LEN		  OFFSETOF(wl_pkt_filter_t, u)
+#define WL_PKT_FILTER_PATTERN_FIXED_LEN	  OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+#define WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_list_t, patterns)
+#define WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN	\
+			OFFSETOF(wl_pkt_filter_pattern_listel_t, mask_and_data)
+
+/* IOVAR "pkt_filter_enable" parameter. */
+typedef struct wl_pkt_filter_enable {
+	uint32	id;		/* Unique filter id */
+	uint32	enable;		/* Enable/disable bool */
+} wl_pkt_filter_enable_t;
+
+/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
+typedef struct wl_pkt_filter_list {
+	uint32	num;		/* Number of installed packet filters */
+	wl_pkt_filter_t	filter[1];	/* Variable array of packet filters. */
+} wl_pkt_filter_list_t;
+
+#define WL_PKT_FILTER_LIST_FIXED_LEN	  OFFSETOF(wl_pkt_filter_list_t, filter)
+
+/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */
+typedef struct wl_pkt_filter_stats {
+	uint32	num_pkts_matched;	/* # filter matches for specified filter id */
+	uint32	num_pkts_forwarded;	/* # packets fwded from dongle to host for all filters */
+	uint32	num_pkts_discarded;	/* # packets discarded by dongle for all filters */
+} wl_pkt_filter_stats_t;
+
+/* IOVAR "pkt_filter_ports" parameter.  Configure TCP/UDP port filters. */
+typedef struct wl_pkt_filter_ports {
+	uint8 version;		/* Be proper */
+	uint8 reserved;		/* Be really proper */
+	uint16 count;		/* Number of ports following */
+	/* End of fixed data */
+	uint16 ports[1];	/* Placeholder for ports[<count>] */
+} wl_pkt_filter_ports_t;
+
+#define WL_PKT_FILTER_PORTS_FIXED_LEN	OFFSETOF(wl_pkt_filter_ports_t, ports)
+
+#define WL_PKT_FILTER_PORTS_VERSION	0
+#define WL_PKT_FILTER_PORTS_MAX		128
+
+#define RSN_KCK_LENGTH 16
+#define RSN_KEK_LENGTH 16
+#define RSN_REPLAY_LEN 8
+typedef struct _gtkrefresh {
+	uchar	KCK[RSN_KCK_LENGTH];
+	uchar	KEK[RSN_KEK_LENGTH];
+	uchar	ReplayCounter[RSN_REPLAY_LEN];
+} gtk_keyinfo_t, *pgtk_keyinfo_t;
+
+/* Sequential Commands ioctl */
+typedef struct wl_seq_cmd_ioctl {
+	uint32 cmd;		/* common ioctl definition */
+	uint32 len;		/* length of user buffer */
+} wl_seq_cmd_ioctl_t;
+
+#define WL_SEQ_CMD_ALIGN_BYTES	4
+
+/* These are the set of get IOCTLs that should be allowed when using
+ * IOCTL sequence commands. These are issued implicitly by wl.exe each time
+ * it is invoked. We never want to buffer these, or else wl.exe will stop working.
+ */
+#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \
+	(((cmd) == WLC_GET_MAGIC)		|| \
+	 ((cmd) == WLC_GET_VERSION)		|| \
+	 ((cmd) == WLC_GET_AP)			|| \
+	 ((cmd) == WLC_GET_INSTANCE))
+
+typedef struct wl_pkteng {
+	uint32 flags;
+	uint32 delay;			/* Inter-packet delay */
+	uint32 nframes;			/* Number of frames */
+	uint32 length;			/* Packet length */
+	uint8  seqno;			/* Enable/disable sequence no. */
+	struct ether_addr dest;		/* Destination address */
+	struct ether_addr src;		/* Source address */
+} wl_pkteng_t;
+
+typedef struct wl_pkteng_stats {
+	uint32 lostfrmcnt;		/* RX PER test: no of frames lost (skip seqno) */
+	int32 rssi;			/* RSSI */
+	int32 snr;			/* signal to noise ratio */
+	uint16 rxpktcnt[NUM_80211_RATES+1];
+	uint8 rssi_qdb;			/* qdB portion of the computed rssi */
+} wl_pkteng_stats_t;
+
+
+typedef enum {
+	wowl_pattern_type_bitmap = 0,
+	wowl_pattern_type_arp,
+	wowl_pattern_type_na
+} wowl_pattern_type_t;
+
+typedef struct wl_wowl_pattern {
+	uint32		    masksize;		/* Size of the mask in #of bytes */
+	uint32		    offset;		/* Pattern byte offset in packet */
+	uint32		    patternoffset;	/* Offset of start of pattern in the structure */
+	uint32		    patternsize;	/* Size of the pattern itself in #of bytes */
+	uint32		    id;			/* id */
+	uint32		    reasonsize;		/* Size of the wakeup reason code */
+	wowl_pattern_type_t type;		/* Type of pattern */
+	/* Mask follows the structure above */
+	/* Pattern follows the mask is at 'patternoffset' from the start */
+} wl_wowl_pattern_t;
+
+typedef struct wl_wowl_pattern_list {
+	uint			count;
+	wl_wowl_pattern_t	pattern[1];
+} wl_wowl_pattern_list_t;
+
+typedef struct wl_wowl_wakeind {
+	uint8	pci_wakeind;	/* Whether PCI PMECSR PMEStatus bit was set */
+	uint32	ucode_wakeind;	/* What wakeup-event indication was set by ucode */
+} wl_wowl_wakeind_t;
+
+typedef struct {
+	uint32		pktlen;		    /* size of packet */
+	void		*sdu;
+} tcp_keepalive_wake_pkt_infop_t;
+
+/* per AC rate control related data structure */
+typedef struct wl_txrate_class {
+	uint8		init_rate;
+	uint8		min_rate;
+	uint8		max_rate;
+} wl_txrate_class_t;
+
+/* structure for Overlap BSS scan arguments */
+typedef struct wl_obss_scan_arg {
+	int16	passive_dwell;
+	int16	active_dwell;
+	int16	bss_widthscan_interval;
+	int16	passive_total;
+	int16	active_total;
+	int16	chanwidth_transition_delay;
+	int16	activity_threshold;
+} wl_obss_scan_arg_t;
+
+#define WL_OBSS_SCAN_PARAM_LEN	sizeof(wl_obss_scan_arg_t)
+
+/* RSSI event notification configuration. */
+typedef struct wl_rssi_event {
+	uint32 rate_limit_msec;		/* # of events posted to application will be limited to
+					 * one per specified period (0 to disable rate limit).
+					 */
+	uint8 num_rssi_levels;		/* Number of entries in rssi_levels[] below */
+	int8 rssi_levels[MAX_RSSI_LEVELS];	/* Variable number of RSSI levels. An event
+						 * will be posted each time the RSSI of received
+						 * beacons/packets crosses a level.
+						 */
+} wl_rssi_event_t;
+
+typedef struct wl_action_obss_coex_req {
+	uint8 info;
+	uint8 num;
+	uint8 ch_list[1];
+} wl_action_obss_coex_req_t;
+
+
+/* IOVar parameter block for small MAC address array with type indicator */
+#define WL_IOV_MAC_PARAM_LEN  4
+
+#define WL_IOV_PKTQ_LOG_PRECS 16
+
+typedef struct {
+	uint32 num_addrs;
+	char   addr_type[WL_IOV_MAC_PARAM_LEN];
+	struct ether_addr ea[WL_IOV_MAC_PARAM_LEN];
+} wl_iov_mac_params_t;
+
+/* This is extra info that follows wl_iov_mac_params_t */
+typedef struct {
+	uint32 addr_info[WL_IOV_MAC_PARAM_LEN];
+} wl_iov_mac_extra_params_t;
+
+/* Combined structure */
+typedef struct {
+	wl_iov_mac_params_t params;
+	wl_iov_mac_extra_params_t extra_params;
+} wl_iov_mac_full_params_t;
+
+/* Parameter block for PKTQ_LOG statistics */
+#define PKTQ_LOG_COUNTERS_V4 \
+	/* packets requested to be stored */ \
+	uint32 requested; \
+	/* packets stored */ \
+	uint32 stored; \
+	/* packets saved, because a lowest priority queue has given away one packet */ \
+	uint32 saved; \
+	/* packets saved, because an older packet from the same queue has been dropped */ \
+	uint32 selfsaved; \
+	/* packets dropped, because pktq is full with higher precedence packets */ \
+	uint32 full_dropped; \
+	 /* packets dropped because pktq per that precedence is full */ \
+	uint32 dropped; \
+	/* packets dropped, in order to save one from a queue of a highest priority */ \
+	uint32 sacrificed; \
+	/* packets droped because of hardware/transmission error */ \
+	uint32 busy; \
+	/* packets re-sent because they were not received */ \
+	uint32 retry; \
+	/* packets retried again (ps pretend) prior to moving power save mode */ \
+	uint32 ps_retry; \
+	 /* suppressed packet count */ \
+	uint32 suppress; \
+	/* packets finally dropped after retry limit */ \
+	uint32 retry_drop; \
+	/* the high-water mark of the queue capacity for packets - goes to zero as queue fills */ \
+	uint32 max_avail; \
+	/* the high-water mark of the queue utilisation for packets - ('inverse' of max_avail) */ \
+	uint32 max_used; \
+	 /* the maximum capacity of the queue */ \
+	uint32 queue_capacity; \
+	/* count of rts attempts that failed to receive cts */ \
+	uint32 rtsfail; \
+	/* count of packets sent (acked) successfully */ \
+	uint32 acked; \
+	/* running total of phy rate of packets sent successfully */ \
+	uint32 txrate_succ; \
+	/* running total of phy 'main' rate */ \
+	uint32 txrate_main; \
+	/* actual data transferred successfully */ \
+	uint32 throughput; \
+	/* time difference since last pktq_stats */ \
+	uint32 time_delta;
+
+typedef struct {
+	PKTQ_LOG_COUNTERS_V4
+} pktq_log_counters_v04_t;
+
+/* v5 is the same as V4 with extra parameter */
+typedef struct {
+	PKTQ_LOG_COUNTERS_V4
+	/* cumulative time to transmit */
+	uint32 airtime;
+} pktq_log_counters_v05_t;
+
+typedef struct {
+	uint8                num_prec[WL_IOV_MAC_PARAM_LEN];
+	pktq_log_counters_v04_t  counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+	uint32               counter_info[WL_IOV_MAC_PARAM_LEN];
+	uint32               pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+	char                 headings[1];
+} pktq_log_format_v04_t;
+
+typedef struct {
+	uint8                num_prec[WL_IOV_MAC_PARAM_LEN];
+	pktq_log_counters_v05_t  counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+	uint32               counter_info[WL_IOV_MAC_PARAM_LEN];
+	uint32               pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
+	char                 headings[1];
+} pktq_log_format_v05_t;
+
+
+typedef struct {
+	uint32               version;
+	wl_iov_mac_params_t  params;
+	union {
+		pktq_log_format_v04_t v04;
+		pktq_log_format_v05_t v05;
+	} pktq_log;
+} wl_iov_pktq_log_t;
+
+/* PKTQ_LOG_AUTO, PKTQ_LOG_DEF_PREC flags introduced in v05, they are ignored by v04 */
+#define PKTQ_LOG_AUTO     (1 << 31)
+#define PKTQ_LOG_DEF_PREC (1 << 30)
+
+/*
+ * SCB_BS_DATA iovar definitions start.
+ */
+#define SCB_BS_DATA_STRUCT_VERSION	1
+
+/* The actual counters maintained for each station */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	/* The following counters are a subset of what pktq_stats provides per precedence. */
+	uint32 retry;          /* packets re-sent because they were not received */
+	uint32 retry_drop;     /* packets finally dropped after retry limit */
+	uint32 rtsfail;        /* count of rts attempts that failed to receive cts */
+	uint32 acked;          /* count of packets sent (acked) successfully */
+	uint32 txrate_succ;    /* running total of phy rate of packets sent successfully */
+	uint32 txrate_main;    /* running total of phy 'main' rate */
+	uint32 throughput;     /* actual data transferred successfully */
+	uint32 time_delta;     /* time difference since last pktq_stats */
+	uint32 airtime;        /* cumulative total medium access delay in useconds */
+} BWL_POST_PACKED_STRUCT iov_bs_data_counters_t;
+
+/* The structure for individual station information. */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ether_addr	station_address;	/* The station MAC address */
+	uint16			station_flags;		/* Bit mask of flags, for future use. */
+	iov_bs_data_counters_t	station_counters;	/* The actual counter values */
+} BWL_POST_PACKED_STRUCT iov_bs_data_record_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16	structure_version;	/* Structure version number (for wl/wlu matching) */
+	uint16	structure_count;	/* Number of iov_bs_data_record_t records following */
+	iov_bs_data_record_t	structure_record[1];	/* 0 - structure_count records */
+} BWL_POST_PACKED_STRUCT iov_bs_data_struct_t;
+
+/* Bitmask of options that can be passed in to the iovar. */
+enum {
+	SCB_BS_DATA_FLAG_NO_RESET = (1<<0)	/* Do not clear the counters after reading */
+};
+/*
+ * SCB_BS_DATA iovar definitions end.
+ */
+
+typedef struct wlc_extlog_cfg {
+	int max_number;
+	uint16 module;	/* bitmap */
+	uint8 level;
+	uint8 flag;
+	uint16 version;
+} wlc_extlog_cfg_t;
+
+typedef struct log_record {
+	uint32 time;
+	uint16 module;
+	uint16 id;
+	uint8 level;
+	uint8 sub_unit;
+	uint8 seq_num;
+	int32 arg;
+	char str[MAX_ARGSTR_LEN];
+} log_record_t;
+
+typedef struct wlc_extlog_req {
+	uint32 from_last;
+	uint32 num;
+} wlc_extlog_req_t;
+
+typedef struct wlc_extlog_results {
+	uint16 version;
+	uint16 record_len;
+	uint32 num;
+	log_record_t logs[1];
+} wlc_extlog_results_t;
+
+typedef struct log_idstr {
+	uint16	id;
+	uint16	flag;
+	uint8	arg_type;
+	const char	*fmt_str;
+} log_idstr_t;
+
+#define FMTSTRF_USER		1
+
+/* flat ID definitions
+ * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will
+ * affect backward compatibility with pre-existing apps
+ */
+typedef enum {
+	FMTSTR_DRIVER_UP_ID = 0,
+	FMTSTR_DRIVER_DOWN_ID = 1,
+	FMTSTR_SUSPEND_MAC_FAIL_ID = 2,
+	FMTSTR_NO_PROGRESS_ID = 3,
+	FMTSTR_RFDISABLE_ID = 4,
+	FMTSTR_REG_PRINT_ID = 5,
+	FMTSTR_EXPTIME_ID = 6,
+	FMTSTR_JOIN_START_ID = 7,
+	FMTSTR_JOIN_COMPLETE_ID = 8,
+	FMTSTR_NO_NETWORKS_ID = 9,
+	FMTSTR_SECURITY_MISMATCH_ID = 10,
+	FMTSTR_RATE_MISMATCH_ID = 11,
+	FMTSTR_AP_PRUNED_ID = 12,
+	FMTSTR_KEY_INSERTED_ID = 13,
+	FMTSTR_DEAUTH_ID = 14,
+	FMTSTR_DISASSOC_ID = 15,
+	FMTSTR_LINK_UP_ID = 16,
+	FMTSTR_LINK_DOWN_ID = 17,
+	FMTSTR_RADIO_HW_OFF_ID = 18,
+	FMTSTR_RADIO_HW_ON_ID = 19,
+	FMTSTR_EVENT_DESC_ID = 20,
+	FMTSTR_PNP_SET_POWER_ID = 21,
+	FMTSTR_RADIO_SW_OFF_ID = 22,
+	FMTSTR_RADIO_SW_ON_ID = 23,
+	FMTSTR_PWD_MISMATCH_ID = 24,
+	FMTSTR_FATAL_ERROR_ID = 25,
+	FMTSTR_AUTH_FAIL_ID = 26,
+	FMTSTR_ASSOC_FAIL_ID = 27,
+	FMTSTR_IBSS_FAIL_ID = 28,
+	FMTSTR_EXTAP_FAIL_ID = 29,
+	FMTSTR_MAX_ID
+} log_fmtstr_id_t;
+
+#ifdef DONGLEOVERLAYS
+typedef struct {
+	uint32 flags_idx;	/* lower 8 bits: overlay index; upper 24 bits: flags */
+	uint32 offset;		/* offset into overlay region to write code */
+	uint32 len;			/* overlay code len */
+	/* overlay code follows this struct */
+} wl_ioctl_overlay_t;
+#endif /* DONGLEOVERLAYS */
+
+/* 11k Neighbor Report element */
+typedef struct nbr_element {
+	uint8 id;
+	uint8 len;
+	struct ether_addr bssid;
+	uint32 bssid_info;
+	uint8 reg;
+	uint8 channel;
+	uint8 phytype;
+	uint8 pad;
+} nbr_element_t;
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+typedef struct keepalives_max_idle {
+	uint16  keepalive_count;        /* nmbr of keepalives per bss_max_idle period */
+	uint8   mkeepalive_index;       /* mkeepalive_index for keepalive frame to be used */
+	uint8   PAD;			/* to align next field */
+	uint16  max_interval;           /* seconds */
+} keepalives_max_idle_t;
+
+#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0)
+#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1)
+
+/* require strict packing */
+#include <packed_section_start.h>
+
+
+/* Structures and constants used for "vndr_ie" IOVar interface */
+#define VNDR_IE_CMD_LEN		4	/* length of the set command string:
+					 * "add", "del" (+ NUL)
+					 */
+
+#define VNDR_IE_INFO_HDR_LEN	(sizeof(uint32))
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;			/* bitmask indicating which packet(s) contain this IE */
+	vndr_ie_t vndr_ie_data;		/* vendor IE data */
+} BWL_POST_PACKED_STRUCT vndr_ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int iecount;			/* number of entries in the vndr_ie_list[] array */
+	vndr_ie_info_t vndr_ie_list[1];	/* variable size list of vndr_ie_info_t structs */
+} BWL_POST_PACKED_STRUCT vndr_ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char cmd[VNDR_IE_CMD_LEN];	/* vndr_ie IOVar set command : "add", "del" + NUL */
+	vndr_ie_buf_t vndr_ie_buffer;	/* buffer containing Vendor IE list information */
+} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t;
+
+/* tag_ID/length/value_buffer tuple */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint8	id;
+	uint8	len;
+	uint8	data[1];
+} BWL_POST_PACKED_STRUCT tlv_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;			/* bitmask indicating which packet(s) contain this IE */
+	tlv_t ie_data;		/* IE data */
+} BWL_POST_PACKED_STRUCT ie_info_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int iecount;			/* number of entries in the ie_list[] array */
+	ie_info_t ie_list[1];	/* variable size list of ie_info_t structs */
+} BWL_POST_PACKED_STRUCT ie_buf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char cmd[VNDR_IE_CMD_LEN];	/* ie IOVar set command : "add" + NUL */
+	ie_buf_t ie_buffer;	/* buffer containing IE list information */
+} BWL_POST_PACKED_STRUCT ie_setbuf_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 pktflag;		/* bitmask indicating which packet(s) contain this IE */
+	uint8 id;		/* IE type */
+} BWL_POST_PACKED_STRUCT ie_getbuf_t;
+
+/* structures used to define format of wps ie data from probe requests */
+/* passed up to applications via iovar "prbreq_wpsie" */
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr {
+	struct ether_addr staAddr;
+	uint16 ieLen;
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data {
+	sta_prbreq_wps_ie_hdr_t hdr;
+	uint8 ieData[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list {
+	uint32 totLen;
+	uint8 ieDataList[1];
+} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t;
+
+
+#ifdef WLMEDIA_TXFAILEVENT
+typedef BWL_PRE_PACKED_STRUCT struct {
+	char   dest[ETHER_ADDR_LEN]; /* destination MAC */
+	uint8  prio;            /* Packet Priority */
+	uint8  flags;           /* Flags           */
+	uint32 tsf_l;           /* TSF timer low   */
+	uint32 tsf_h;           /* TSF timer high  */
+	uint16 rates;           /* Main Rates      */
+	uint16 txstatus;        /* TX Status       */
+} BWL_POST_PACKED_STRUCT txfailinfo_t;
+#endif /* WLMEDIA_TXFAILEVENT */
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 flags;
+	chanspec_t chanspec;			/* txpwr report for this channel */
+	chanspec_t local_chanspec;		/* channel on which we are associated */
+	uint8 local_max;			/* local max according to the AP */
+	uint8 local_constraint;			/* local constraint according to the AP */
+	int8  antgain[2];			/* Ant gain for each band - from SROM */
+	uint8 rf_cores;				/* count of RF Cores being reported */
+	uint8 est_Pout[4];			/* Latest tx power out estimate per RF chain */
+	uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain w/o adjustment */
+	uint8 est_Pout_cck;			/* Latest CCK tx power out estimate */
+	uint8 tx_power_max[4];		/* Maximum target power among all rates */
+	uint tx_power_max_rate_ind[4];		/* Index of the rate with the max target power */
+	int8 clm_limits[WL_NUMRATES];		/* regulatory limits - 20, 40 or 80MHz */
+	int8 clm_limits_subchan1[WL_NUMRATES];	/* regulatory limits - 20in40 or 40in80 */
+	int8 clm_limits_subchan2[WL_NUMRATES];	/* regulatory limits - 20in80MHz */
+	int8 sar;					/* SAR limit for display by wl executable */
+	int8 channel_bandwidth;		/* 20, 40 or 80 MHz bandwidth? */
+	uint8 version;				/* Version of the data format wlu <--> driver */
+	uint8 display_core;			/* Displayed curpower core */
+	int8 target_offsets[4];		/* Target power offsets for current rate per core */
+	uint32 last_tx_ratespec;	/* Ratespec for last transmition */
+	uint   user_target;		/* user limit */
+	uint32 board_limit_len;		/* length of board limit buffer */
+	uint32 target_len;		/* length of target power buffer */
+	int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
+	uint8  pprdata[1];		/* ppr serialization buffer */
+} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	struct ipv4_addr	ipv4_addr;
+	struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT ibss_route_entry_t;
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint32 num_entry;
+	ibss_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT ibss_route_tbl_t;
+
+#define MAX_IBSS_ROUTE_TBL_ENTRY	64
+
+#define TXPWR_TARGET_VERSION  0
+typedef BWL_PRE_PACKED_STRUCT struct {
+	int32 version;		/* version number */
+	chanspec_t chanspec;	/* txpwr report for this channel */
+	int8 txpwr[WL_STA_ANT_MAX]; /* Max tx target power, in qdb */
+	uint8 rf_cores;		/* count of RF Cores being reported */
+} BWL_POST_PACKED_STRUCT txpwr_target_max_t;
+
+#define BSS_PEER_INFO_PARAM_CUR_VER	0
+/* Input structure for IOV_BSS_PEER_INFO */
+typedef BWL_PRE_PACKED_STRUCT	struct {
+	uint16			version;
+	struct	ether_addr ea;	/* peer MAC address */
+} BWL_POST_PACKED_STRUCT bss_peer_info_param_t;
+
+#define BSS_PEER_INFO_CUR_VER		0
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16			version;
+	struct ether_addr	ea;
+	int32			rssi;
+	uint32			tx_rate;	/* current tx rate */
+	uint32			rx_rate;	/* current rx rate */
+	wl_rateset_t		rateset;	/* rateset in use */
+	uint32			age;		/* age in seconds */
+} BWL_POST_PACKED_STRUCT bss_peer_info_t;
+
+#define BSS_PEER_LIST_INFO_CUR_VER	0
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16			version;
+	uint16			bss_peer_info_len;	/* length of bss_peer_info_t */
+	uint32			count;			/* number of peer info */
+	bss_peer_info_t		peer_info[1];		/* peer info */
+} BWL_POST_PACKED_STRUCT bss_peer_list_info_t;
+
+#define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info)
+
+#define AIBSS_BCN_FORCE_CONFIG_VER_0	0
+
+/* structure used to configure AIBSS beacon force xmit */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16  version;
+	uint16	len;
+	uint32 initial_min_bcn_dur;	/* dur in ms to check a bcn in bcn_flood period */
+	uint32 min_bcn_dur;	/* dur in ms to check a bcn after bcn_flood period */
+	uint32 bcn_flood_dur; /* Initial bcn xmit period in ms */
+} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t;
+
+#define AIBSS_TXFAIL_CONFIG_VER_0    0
+
+/* structure used to configure aibss tx fail event */
+typedef BWL_PRE_PACKED_STRUCT struct {
+	uint16  version;
+	uint16  len;
+	uint32 bcn_timeout;     /* dur in seconds to receive 1 bcn */
+	uint32 max_tx_retry;     /* no of consecutive no acks to send txfail event */
+} BWL_POST_PACKED_STRUCT aibss_txfail_config_t;
+
+/* no strict structure packing */
+#include <packed_section_end.h>
+
+	/* Global ASSERT Logging */
+#define ASSERTLOG_CUR_VER	0x0100
+#define MAX_ASSRTSTR_LEN	64
+
+	typedef struct assert_record {
+		uint32 time;
+		uint8 seq_num;
+		char str[MAX_ASSRTSTR_LEN];
+	} assert_record_t;
+
+	typedef struct assertlog_results {
+		uint16 version;
+		uint16 record_len;
+		uint32 num;
+		assert_record_t logs[1];
+	} assertlog_results_t;
+
+#define LOGRRC_FIX_LEN	8
+#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
+
+#ifdef BCMWAPI_WAI
+#define IV_LEN 16
+	struct wapi_sta_msg_t
+	{
+		uint16	msg_type;
+		uint16	datalen;
+		uint8	vap_mac[6];
+		uint8	reserve_data1[2];
+		uint8	sta_mac[6];
+		uint8	reserve_data2[2];
+		uint8	gsn[IV_LEN];
+		uint8	wie[256];
+	};
+#endif /* BCMWAPI_WAI */
+
+	/* chanim acs record */
+	typedef struct {
+		bool valid;
+		uint8 trigger;
+		chanspec_t selected_chspc;
+		int8 bgnoise;
+		uint32 glitch_cnt;
+		uint8 ccastats;
+		uint timestamp;
+	} chanim_acs_record_t;
+
+	typedef struct {
+		chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+		uint8 count;
+		uint timestamp;
+	} wl_acs_record_t;
+
+	typedef struct chanim_stats {
+		uint32 glitchcnt;               /* normalized as per second count */
+		uint32 badplcp;                 /* normalized as per second count */
+		uint8 ccastats[CCASTATS_MAX];   /* normalized as 0-255 */
+		int8 bgnoise;			/* background noise level (in dBm) */
+		chanspec_t chanspec;
+		uint32 timestamp;
+		uint32 bphy_glitchcnt;          /* normalized as per second count */
+		uint32 bphy_badplcp;            /* normalized as per second count */
+		uint8 chan_idle;                /* normalized as 0~255 */
+	} chanim_stats_t;
+
+#define WL_CHANIM_STATS_VERSION 2
+
+typedef struct {
+	uint32 buflen;
+	uint32 version;
+	uint32 count;
+	chanim_stats_t stats[1];
+} wl_chanim_stats_t;
+
+#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats)
+
+/* Noise measurement metrics. */
+#define NOISE_MEASURE_KNOISE	0x1
+
+/* scb probe parameter */
+typedef struct {
+	uint32 scb_timeout;
+	uint32 scb_activity_time;
+	uint32 scb_max_probe;
+} wl_scb_probe_t;
+
+/* structure/defines for selective mgmt frame (smf) stats support */
+
+#define SMFS_VERSION 1
+/* selected mgmt frame (smf) stats element */
+typedef struct wl_smfs_elem {
+	uint32 count;
+	uint16 code;  /* SC or RC code */
+} wl_smfs_elem_t;
+
+typedef struct wl_smf_stats {
+	uint32 version;
+	uint16 length;	/* reserved for future usage */
+	uint8 type;
+	uint8 codetype;
+	uint32 ignored_cnt;
+	uint32 malformed_cnt;
+	uint32 count_total; /* count included the interested group */
+	wl_smfs_elem_t elem[1];
+} wl_smf_stats_t;
+
+#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem);
+
+enum {
+	SMFS_CODETYPE_SC,
+	SMFS_CODETYPE_RC
+};
+
+typedef enum smfs_type {
+	SMFS_TYPE_AUTH,
+	SMFS_TYPE_ASSOC,
+	SMFS_TYPE_REASSOC,
+	SMFS_TYPE_DISASSOC_TX,
+	SMFS_TYPE_DISASSOC_RX,
+	SMFS_TYPE_DEAUTH_TX,
+	SMFS_TYPE_DEAUTH_RX,
+	SMFS_TYPE_MAX
+} smfs_type_t;
+
+#ifdef PHYMON
+
+#define PHYMON_VERSION 1
+
+typedef struct wl_phycal_core_state {
+	/* Tx IQ/LO calibration coeffs */
+	int16 tx_iqlocal_a;
+	int16 tx_iqlocal_b;
+	int8 tx_iqlocal_ci;
+	int8 tx_iqlocal_cq;
+	int8 tx_iqlocal_di;
+	int8 tx_iqlocal_dq;
+	int8 tx_iqlocal_ei;
+	int8 tx_iqlocal_eq;
+	int8 tx_iqlocal_fi;
+	int8 tx_iqlocal_fq;
+
+	/* Rx IQ calibration coeffs */
+	int16 rx_iqcal_a;
+	int16 rx_iqcal_b;
+
+	uint8 tx_iqlocal_pwridx; /* Tx Power Index for Tx IQ/LO calibration */
+	uint32 papd_epsilon_table[64]; /* PAPD epsilon table */
+	int16 papd_epsilon_offset; /* PAPD epsilon offset */
+	uint8 curr_tx_pwrindex; /* Tx power index */
+	int8 idle_tssi; /* Idle TSSI */
+	int8 est_tx_pwr; /* Estimated Tx Power (dB) */
+	int8 est_rx_pwr; /* Estimated Rx Power (dB) from RSSI */
+	uint16 rx_gaininfo; /* Rx gain applied on last Rx pkt */
+	uint16 init_gaincode; /* initgain required for ACI */
+	int8 estirr_tx;
+	int8 estirr_rx;
+
+} wl_phycal_core_state_t;
+
+typedef struct wl_phycal_state {
+	int version;
+	int8 num_phy_cores; /* number of cores */
+	int8 curr_temperature; /* on-chip temperature sensor reading */
+	chanspec_t chspec; /* channspec for this state */
+	bool aci_state; /* ACI state: ON/OFF */
+	uint16 crsminpower; /* crsminpower required for ACI */
+	uint16 crsminpowerl; /* crsminpowerl required for ACI */
+	uint16 crsminpoweru; /* crsminpoweru required for ACI */
+	wl_phycal_core_state_t phycal_core[1];
+} wl_phycal_state_t;
+
+#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core)
+#endif /* PHYMON */
+
+/* discovery state */
+typedef struct wl_p2p_disc_st {
+	uint8 state;	/* see state */
+	chanspec_t chspec;	/* valid in listen state */
+	uint16 dwell;	/* valid in listen state, in ms */
+} wl_p2p_disc_st_t;
+
+/* scan request */
+typedef struct wl_p2p_scan {
+	uint8 type;		/* 'S' for WLC_SCAN, 'E' for "escan" */
+	uint8 reserved[3];
+	/* scan or escan parms... */
+} wl_p2p_scan_t;
+
+/* i/f request */
+typedef struct wl_p2p_if {
+	struct ether_addr addr;
+	uint8 type;	/* see i/f type */
+	chanspec_t chspec;	/* for p2p_ifadd GO */
+} wl_p2p_if_t;
+
+/* i/f query */
+typedef struct wl_p2p_ifq {
+	uint bsscfgidx;
+	char ifname[BCM_MSG_IFNAME_MAX];
+} wl_p2p_ifq_t;
+
+/* OppPS & CTWindow */
+typedef struct wl_p2p_ops {
+	uint8 ops;	/* 0: disable 1: enable */
+	uint8 ctw;	/* >= 10 */
+} wl_p2p_ops_t;
+
+/* absence and presence request */
+typedef struct wl_p2p_sched_desc {
+	uint32 start;
+	uint32 interval;
+	uint32 duration;
+	uint32 count;	/* see count */
+} wl_p2p_sched_desc_t;
+
+typedef struct wl_p2p_sched {
+	uint8 type;	/* see schedule type */
+	uint8 action;	/* see schedule action */
+	uint8 option;	/* see schedule option */
+	wl_p2p_sched_desc_t desc[1];
+} wl_p2p_sched_t;
+
+typedef struct wl_bcmdcs_data {
+	uint reason;
+	chanspec_t chspec;
+} wl_bcmdcs_data_t;
+
+
+/* NAT configuration */
+typedef struct {
+	uint32 ipaddr;		/* interface ip address */
+	uint32 ipaddr_mask;	/* interface ip address mask */
+	uint32 ipaddr_gateway;	/* gateway ip address */
+	uint8 mac_gateway[6];	/* gateway mac address */
+	uint32 ipaddr_dns;	/* DNS server ip address, valid only for public if */
+	uint8 mac_dns[6];	/* DNS server mac address,  valid only for public if */
+	uint8 GUID[38];		/* interface GUID */
+} nat_if_info_t;
+
+typedef struct {
+	uint op;		/* operation code */
+	bool pub_if;		/* set for public if, clear for private if */
+	nat_if_info_t if_info;	/* interface info */
+} nat_cfg_t;
+
+typedef struct {
+	int state;	/* NAT state returned */
+} nat_state_t;
+
+
+#define BTA_STATE_LOG_SZ	64
+
+/* BTAMP Statemachine states */
+enum {
+	HCIReset = 1,
+	HCIReadLocalAMPInfo,
+	HCIReadLocalAMPASSOC,
+	HCIWriteRemoteAMPASSOC,
+	HCICreatePhysicalLink,
+	HCIAcceptPhysicalLinkRequest,
+	HCIDisconnectPhysicalLink,
+	HCICreateLogicalLink,
+	HCIAcceptLogicalLink,
+	HCIDisconnectLogicalLink,
+	HCILogicalLinkCancel,
+	HCIAmpStateChange,
+	HCIWriteLogicalLinkAcceptTimeout
+};
+
+typedef struct flush_txfifo {
+	uint32 txfifobmp;
+	uint32 hwtxfifoflush;
+	struct ether_addr ea;
+} flush_txfifo_t;
+
+enum {
+	SPATIAL_MODE_2G_IDX = 0,
+	SPATIAL_MODE_5G_LOW_IDX,
+	SPATIAL_MODE_5G_MID_IDX,
+	SPATIAL_MODE_5G_HIGH_IDX,
+	SPATIAL_MODE_5G_UPPER_IDX,
+	SPATIAL_MODE_MAX_IDX
+};
+
+#define WLC_TXCORE_MAX	4	/* max number of txcore supports */
+#define WLC_SUBBAND_MAX	4	/* max number of sub-band supports */
+typedef struct {
+	uint8	band2g[WLC_TXCORE_MAX];
+	uint8	band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX];
+} sar_limit_t;
+
+/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
+typedef struct wl_mempool_stats {
+	int	num;		/* Number of memory pools */
+	bcm_mp_stats_t s[1];	/* Variable array of memory pool stats. */
+} wl_mempool_stats_t;
+
+typedef struct {
+	uint32 ipaddr;
+	uint32 ipaddr_netmask;
+	uint32 ipaddr_gateway;
+} nwoe_ifconfig_t;
+
+/* Traffic management priority classes */
+typedef enum trf_mgmt_priority_class {
+	trf_mgmt_priority_low           = 0,        /* Maps to 802.1p BK */
+	trf_mgmt_priority_medium        = 1,        /* Maps to 802.1p BE */
+	trf_mgmt_priority_high          = 2,        /* Maps to 802.1p VI */
+	trf_mgmt_priority_nochange	= 3,	    /* do not update the priority */
+	trf_mgmt_priority_invalid       = (trf_mgmt_priority_nochange + 1)
+} trf_mgmt_priority_class_t;
+
+/* Traffic management configuration parameters */
+typedef struct trf_mgmt_config {
+	uint32  trf_mgmt_enabled;                           /* 0 - disabled, 1 - enabled */
+	uint32  flags;                                      /* See TRF_MGMT_FLAG_xxx defines */
+	uint32  host_ip_addr;                               /* My IP address to determine subnet */
+	uint32  host_subnet_mask;                           /* My subnet mask */
+	uint32  downlink_bandwidth;                         /* In units of kbps */
+	uint32  uplink_bandwidth;                           /* In units of kbps */
+	uint32  min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES];  /* Minimum guaranteed tx bandwidth */
+	uint32  min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES];  /* Minimum guaranteed rx bandwidth */
+} trf_mgmt_config_t;
+
+/* Traffic management filter */
+typedef struct trf_mgmt_filter {
+	struct ether_addr           dst_ether_addr;         /* His L2 address */
+	uint32                      dst_ip_addr;            /* His IP address */
+	uint16                      dst_port;               /* His L4 port */
+	uint16                      src_port;               /* My L4 port */
+	uint16                      prot;                   /* L4 protocol (only TCP or UDP) */
+	uint16                      flags;                  /* TBD. For now, this must be zero. */
+	trf_mgmt_priority_class_t   priority;               /* Priority for filtered packets */
+	uint32                      dscp;                   /* DSCP */
+} trf_mgmt_filter_t;
+
+/* Traffic management filter list (variable length) */
+typedef struct trf_mgmt_filter_list     {
+	uint32              num_filters;
+	trf_mgmt_filter_t   filter[1];
+} trf_mgmt_filter_list_t;
+
+/* Traffic management global info used for all queues */
+typedef struct trf_mgmt_global_info {
+	uint32  maximum_bytes_per_second;
+	uint32  maximum_bytes_per_sampling_period;
+	uint32  total_bytes_consumed_per_second;
+	uint32  total_bytes_consumed_per_sampling_period;
+	uint32  total_unused_bytes_per_sampling_period;
+} trf_mgmt_global_info_t;
+
+/* Traffic management shaping info per priority queue */
+typedef struct trf_mgmt_shaping_info {
+	uint32  gauranteed_bandwidth_percentage;
+	uint32  guaranteed_bytes_per_second;
+	uint32  guaranteed_bytes_per_sampling_period;
+	uint32  num_bytes_produced_per_second;
+	uint32  num_bytes_consumed_per_second;
+	uint32  num_queued_packets;                         /* Number of packets in queue */
+	uint32  num_queued_bytes;                           /* Number of bytes in queue */
+} trf_mgmt_shaping_info_t;
+
+/* Traffic management shaping info array */
+typedef struct trf_mgmt_shaping_info_array {
+	trf_mgmt_global_info_t   tx_global_shaping_info;
+	trf_mgmt_shaping_info_t  tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+	trf_mgmt_global_info_t   rx_global_shaping_info;
+	trf_mgmt_shaping_info_t  rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_shaping_info_array_t;
+
+
+/* Traffic management statistical counters */
+typedef struct trf_mgmt_stats {
+	uint32  num_processed_packets;      /* Number of packets processed */
+	uint32  num_processed_bytes;        /* Number of bytes processed */
+	uint32  num_discarded_packets;      /* Number of packets discarded from queue */
+} trf_mgmt_stats_t;
+
+/* Traffic management statisics array */
+typedef struct trf_mgmt_stats_array {
+	trf_mgmt_stats_t  tx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+	trf_mgmt_stats_t  rx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_stats_array_t;
+
+typedef struct powersel_params {
+	/* LPC Params exposed via IOVAR */
+	int32		tp_ratio_thresh;  /* Throughput ratio threshold */
+	uint8		rate_stab_thresh; /* Thresh for rate stability based on nupd */
+	uint8		pwr_stab_thresh; /* Number of successes before power step down */
+	uint8		pwr_sel_exp_time; /* Time lapse for expiry of database */
+} powersel_params_t;
+
+typedef struct lpc_params {
+	/* LPC Params exposed via IOVAR */
+	uint8		rate_stab_thresh; /* Thresh for rate stability based on nupd */
+	uint8		pwr_stab_thresh; /* Number of successes before power step down */
+	uint8		lpc_exp_time; /* Time lapse for expiry of database */
+	uint8		pwrup_slow_step; /* Step size for slow step up */
+	uint8		pwrup_fast_step; /* Step size for fast step up */
+	uint8		pwrdn_slow_step; /* Step size for slow step down */
+} lpc_params_t;
+
+/* tx pkt delay statistics */
+#define	SCB_RETRY_SHORT_DEF	7	/* Default Short retry Limit */
+#define WLPKTDLY_HIST_NBINS	16	/* number of bins used in the Delay histogram */
+
+/* structure to store per-AC delay statistics */
+typedef struct scb_delay_stats {
+	uint32 txmpdu_lost;	/* number of MPDUs lost */
+	uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /* retry times histogram */
+	uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /* cumulative packet latency */
+	uint32 delay_min;	/* minimum packet latency observed */
+	uint32 delay_max;	/* maximum packet latency observed */
+	uint32 delay_avg;	/* packet latency average */
+	uint32 delay_hist[WLPKTDLY_HIST_NBINS];	/* delay histogram */
+} scb_delay_stats_t;
+
+/* structure for txdelay event */
+typedef struct txdelay_event {
+	uint8	status;
+	int		rssi;
+	chanim_stats_t		chanim_stats;
+	scb_delay_stats_t	delay_stats[AC_COUNT];
+} txdelay_event_t;
+
+/* structure for txdelay parameters */
+typedef struct txdelay_params {
+	uint16	ratio;	/* Avg Txdelay Delta */
+	uint8	cnt;	/* Sample cnt */
+	uint8	period;	/* Sample period */
+	uint8	tune;	/* Debug */
+} txdelay_params_t;
+
+enum {
+	WNM_SERVICE_DMS = 1,
+	WNM_SERVICE_FMS = 2,
+	WNM_SERVICE_TFS = 3
+};
+
+/* Definitions for WNM/NPS TCLAS */
+typedef struct wl_tclas {
+	uint8 user_priority;
+	uint8 fc_len;
+	dot11_tclas_fc_t fc;
+} wl_tclas_t;
+
+#define WL_TCLAS_FIXED_SIZE	OFFSETOF(wl_tclas_t, fc)
+
+typedef struct wl_tclas_list {
+	uint32 num;
+	wl_tclas_t tclas[1];
+} wl_tclas_list_t;
+
+/* Definitions for WNM/NPS Traffic Filter Service */
+typedef struct wl_tfs_req {
+	uint8 tfs_id;
+	uint8 tfs_actcode;
+	uint8 tfs_subelem_id;
+	uint8 send;
+} wl_tfs_req_t;
+
+typedef struct wl_tfs_filter {
+	uint8 status;			/* Status returned by the AP */
+	uint8 tclas_proc;		/* TCLAS processing value (0:and, 1:or)  */
+	uint8 tclas_cnt;		/* count of all wl_tclas_t in tclas array */
+	uint8 tclas[1];			/* VLA of wl_tclas_t */
+} wl_tfs_filter_t;
+#define WL_TFS_FILTER_FIXED_SIZE	OFFSETOF(wl_tfs_filter_t, tclas)
+
+typedef struct wl_tfs_fset {
+	struct ether_addr ea;		/* Address of AP/STA involved with this filter set */
+	uint8 tfs_id;			/* TFS ID field chosen by STA host */
+	uint8 status;			/* Internal status TFS_STATUS_xxx */
+	uint8 actcode;			/* Action code DOT11_TFS_ACTCODE_xxx */
+	uint8 token;			/* Token used in last request frame */
+	uint8 notify;			/* Notify frame sent/received because of this set */
+	uint8 filter_cnt;		/* count of all wl_tfs_filter_t in filter array */
+	uint8 filter[1];		/* VLA of wl_tfs_filter_t */
+} wl_tfs_fset_t;
+#define WL_TFS_FSET_FIXED_SIZE		OFFSETOF(wl_tfs_fset_t, filter)
+
+enum {
+	TFS_STATUS_DISABLED = 0,	/* TFS filter set disabled by user */
+	TFS_STATUS_DISABLING = 1,	/* Empty request just sent to AP */
+	TFS_STATUS_VALIDATED = 2,	/* Filter set validated by AP (but maybe not enabled!) */
+	TFS_STATUS_VALIDATING = 3,	/* Filter set just sent to AP */
+	TFS_STATUS_NOT_ASSOC = 4,	/* STA not associated */
+	TFS_STATUS_NOT_SUPPORT = 5,	/* TFS not supported by AP */
+	TFS_STATUS_DENIED = 6,		/* Filter set refused by AP (=> all sets are disabled!) */
+};
+
+typedef struct wl_tfs_status {
+	uint8 fset_cnt;			/* count of all wl_tfs_fset_t in fset array */
+	wl_tfs_fset_t fset[1];		/* VLA of wl_tfs_fset_t */
+} wl_tfs_status_t;
+
+typedef struct wl_tfs_set {
+	uint8 send;			/* Immediatly register registered sets on AP side */
+	uint8 tfs_id;			/* ID of a specific set (existing or new), or nul for all */
+	uint8 actcode;			/* Action code for this filter set */
+	uint8 tclas_proc;		/* TCLAS processing operator for this filter set */
+} wl_tfs_set_t;
+
+typedef struct wl_tfs_term {
+	uint8 del;			/* Delete internal set once confirmation received */
+	uint8 tfs_id;			/* ID of a specific set (existing), or nul for all */
+} wl_tfs_term_t;
+
+
+#define DMS_DEP_PROXY_ARP (1 << 0)
+
+/* Definitions for WNM/NPS Directed Multicast Service */
+enum {
+	DMS_STATUS_DISABLED = 0,	/* DMS desc disabled by user */
+	DMS_STATUS_ACCEPTED = 1,	/* Request accepted by AP */
+	DMS_STATUS_NOT_ASSOC = 2,	/* STA not associated */
+	DMS_STATUS_NOT_SUPPORT = 3,	/* DMS not supported by AP */
+	DMS_STATUS_DENIED = 4,		/* Request denied by AP */
+	DMS_STATUS_TERM = 5,		/* Request terminated by AP */
+	DMS_STATUS_REMOVING = 6,	/* Remove request just sent */
+	DMS_STATUS_ADDING = 7,		/* Add request just sent */
+	DMS_STATUS_ERROR = 8,		/* Non compliant AP behvior */
+	DMS_STATUS_IN_PROGRESS = 9, /* Request just sent */
+	DMS_STATUS_REQ_MISMATCH = 10 /* Conditions for sending DMS req not met */
+};
+
+typedef struct wl_dms_desc {
+	uint8 user_id;
+	uint8 status;
+	uint8 token;
+	uint8 dms_id;
+	uint8 tclas_proc;
+	uint8 mac_len;		/* length of all ether_addr in data array, 0 if STA */
+	uint8 tclas_len;	/* length of all wl_tclas_t in data array */
+	uint8 data[1];		/* VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */
+} wl_dms_desc_t;
+
+#define WL_DMS_DESC_FIXED_SIZE	OFFSETOF(wl_dms_desc_t, data)
+
+typedef struct wl_dms_status {
+	uint32 cnt;
+	wl_dms_desc_t desc[1];
+} wl_dms_status_t;
+
+typedef struct wl_dms_set {
+	uint8 send;
+	uint8 user_id;
+	uint8 tclas_proc;
+} wl_dms_set_t;
+
+typedef struct wl_dms_term {
+	uint8 del;
+	uint8 user_id;
+} wl_dms_term_t;
+
+typedef struct wl_service_term {
+	uint8 service;
+	union {
+		wl_dms_term_t dms;
+	} u;
+} wl_service_term_t;
+
+/* Definitions for WNM/NPS BSS Transistion */
+typedef struct wl_bsstrans_req {
+	uint16 tbtt;			/* time of BSS to end of life, in unit of TBTT */
+	uint16 dur;			/* time of BSS to keep off, in unit of minute */
+	uint8 reqmode;			/* request mode of BSS transition request */
+	uint8 unicast;			/* request by unicast or by broadcast */
+} wl_bsstrans_req_t;
+
+enum {
+	BSSTRANS_RESP_AUTO = 0,		/* Currently equivalent to ENABLE */
+	BSSTRANS_RESP_DISABLE = 1,	/* Never answer BSS Trans Req frames */
+	BSSTRANS_RESP_ENABLE = 2,	/* Always answer Req frames with preset data */
+	BSSTRANS_RESP_WAIT = 3,		/* Send ind, wait and/or send preset data (NOT IMPL) */
+	BSSTRANS_RESP_IMMEDIATE = 4	/* After an ind, set data and send resp (NOT IMPL) */
+};
+
+typedef struct wl_bsstrans_resp {
+	uint8 policy;
+	uint8 status;
+	uint8 delay;
+	struct ether_addr target;
+} wl_bsstrans_resp_t;
+
+/* "wnm_bsstrans_resp" argument programming behavior after BSSTRANS Req reception */
+enum {
+	WL_BSSTRANS_RESP_ROAM_ALWAYS = 0,	/* Roam (or disassociate) in all cases */
+	WL_BSSTRANS_RESP_ROAM_IF_MODE = 1,	/* Roam only if requested by Request Mode field */
+	WL_BSSTRANS_RESP_ROAM_IF_PREF = 2,	/* Roam only if Preferred BSS provided */
+	WL_BSSTRANS_RESP_WAIT = 3		/* Wait for deauth and send Accepted status */
+};
+
+/* Definitions for WNM/NPS TIM Broadcast */
+typedef struct wl_timbc_offset {
+	int16 offset;		/* offset in us */
+	uint16 fix_intv;	/* override interval sent from STA */
+	uint16 rate_override;	/* use rate override to send high rate TIM broadcast frame */
+	uint8 tsf_present;	/* show timestamp in TIM broadcast frame */
+} wl_timbc_offset_t;
+
+typedef struct wl_timbc_set {
+	uint8 interval;		/* Interval in DTIM wished or required. */
+	uint8 flags;		/* Bitfield described below */
+	uint16 rate_min;	/* Minimum rate required for High/Low TIM frames. Optionnal */
+	uint16 rate_max;	/* Maximum rate required for High/Low TIM frames. Optionnal */
+} wl_timbc_set_t;
+
+enum {
+	WL_TIMBC_SET_TSF_REQUIRED = 1,	/* Enable TIMBC only if TSF in TIM frames */
+	WL_TIMBC_SET_NO_OVERRIDE = 2,	/* ... if AP does not override interval */
+	WL_TIMBC_SET_PROXY_ARP = 4,	/* ... if AP support Proxy ARP */
+	WL_TIMBC_SET_DMS_ACCEPTED = 8	/* ... if all DMS desc have been accepted */
+};
+
+typedef struct wl_timbc_status {
+	uint8 status_sta;		/* Status from internal state machine (check below) */
+	uint8 status_ap;		/* From AP response frame (check 8.4.2.86 from 802.11) */
+	uint8 interval;
+	uint8 pad;
+	int32 offset;
+	uint16 rate_high;
+	uint16 rate_low;
+} wl_timbc_status_t;
+
+enum {
+	WL_TIMBC_STATUS_DISABLE = 0,		/* TIMBC disabled by user */
+	WL_TIMBC_STATUS_REQ_MISMATCH = 1,	/* AP settings do no match user requirements */
+	WL_TIMBC_STATUS_NOT_ASSOC = 2,		/* STA not associated */
+	WL_TIMBC_STATUS_NOT_SUPPORT = 3,	/* TIMBC not supported by AP */
+	WL_TIMBC_STATUS_DENIED = 4,		/* Req to disable TIMBC sent to AP */
+	WL_TIMBC_STATUS_ENABLE = 5		/* TIMBC enabled */
+};
+
+/* Definitions for PM2 Dynamic Fast Return To Sleep */
+typedef struct wl_pm2_sleep_ret_ext {
+	uint8 logic;			/* DFRTS logic: see WL_DFRTS_LOGIC_* below */
+	uint16 low_ms;			/* Low FRTS timeout */
+	uint16 high_ms;			/* High FRTS timeout */
+	uint16 rx_pkts_threshold;	/* switching threshold: # rx pkts */
+	uint16 tx_pkts_threshold;	/* switching threshold: # tx pkts */
+	uint16 txrx_pkts_threshold;	/* switching threshold: # (tx+rx) pkts */
+	uint32 rx_bytes_threshold;	/* switching threshold: # rx bytes */
+	uint32 tx_bytes_threshold;	/* switching threshold: # tx bytes */
+	uint32 txrx_bytes_threshold;	/* switching threshold: # (tx+rx) bytes */
+} wl_pm2_sleep_ret_ext_t;
+
+#define WL_DFRTS_LOGIC_OFF	0	/* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR	1	/* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND	2	/* AND all non-zero threshold conditions */
+
+/* Values for the passive_on_restricted_mode iovar.  When set to non-zero, this iovar
+ * disables automatic conversions of a channel from passively scanned to
+ * actively scanned.  These values only have an effect for country codes such
+ * as XZ where some 5 GHz channels are defined to be passively scanned.
+ */
+#define WL_PASSACTCONV_DISABLE_NONE	0	/* Enable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_ALL	1	/* Disable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_PERM	2	/* Disable only permanent conversions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RMC_CNT_VERSION	   1
+#define WL_RMC_MAX_CLIENT	   32
+#define WL_RMC_FLAG_INBLACKLIST	   1
+#define WL_RMC_FLAG_ACTIVEACKER	   2
+#define WL_RMC_FLAG_RELMCAST	   4
+#define WL_RMC_MAX_TABLE_ENTRY     4
+
+#define WL_RMC_VER		   1
+#define WL_RMC_INDEX_ACK_ALL       255
+#define WL_RMC_NUM_OF_MC_STREAMS   4
+#define WL_RMC_MAX_TRS_PER_GROUP   1
+#define WL_RMC_MAX_TRS_IN_ACKALL   1
+#define WL_RMC_ACK_MCAST0          0x02
+#define WL_RMC_ACK_MCAST_ALL       0x01
+#define WL_RMC_ACTF_TIME_MIN       300	 /* time in ms */
+#define WL_RMC_ACTF_TIME_MAX       20000 /* time in ms */
+#define WL_RMC_ARTMO_MIN           350	 /* time in ms */
+#define WL_RMC_ARTMO_MAX           40000	 /* time in ms */
+
+/* RMC events in action frames */
+enum rmc_opcodes {
+	RELMCAST_ENTRY_OP_DISABLE = 0,   /* Disable multi-cast group */
+	RELMCAST_ENTRY_OP_DELETE  = 1,   /* Delete multi-cast group */
+	RELMCAST_ENTRY_OP_ENABLE  = 2,   /* Enable multi-cast group */
+	RELMCAST_ENTRY_OP_ACK_ALL = 3    /* Enable ACK ALL bit in AMT */
+};
+
+/* RMC operational modes */
+enum rmc_modes {
+	WL_RMC_MODE_RECEIVER    = 0,	 /* Receiver mode by default */
+	WL_RMC_MODE_TRANSMITTER = 1,	 /* Transmitter mode using wl ackreq */
+	WL_RMC_MODE_INITIATOR   = 2	 /* Initiator mode using wl ackreq */
+};
+
+/* Each RMC mcast client info */
+typedef struct wl_relmcast_client {
+	uint8 flag;			/* status of client such as AR, R, or blacklisted */
+	int16 rssi;			/* rssi value of RMC client */
+	struct ether_addr addr;		/* mac address of RMC client */
+} wl_relmcast_client_t;
+
+/* RMC Counters */
+typedef struct wl_rmc_cnts {
+	uint16  version;		/* see definition of WL_CNT_T_VERSION */
+	uint16  length;			/* length of entire structure */
+	uint16	dupcnt;			/* counter for duplicate rmc MPDU */
+	uint16	ackreq_err;		/* counter for wl ackreq error    */
+	uint16	af_tx_err;		/* error count for action frame transmit   */
+	uint16	null_tx_err;		/* error count for rmc null frame transmit */
+	uint16	af_unicast_tx_err;	/* error count for rmc unicast frame transmit */
+	uint16	mc_no_amt_slot;		/* No mcast AMT entry available */
+	uint16	mc_no_glb_slot;		/* No mcast entry available in global table */
+	uint16	mc_not_mirrored;	/* mcast group is not mirrored */
+	uint16	mc_existing_tr;		/* mcast group is already taken by transmitter */
+	uint16	mc_exist_in_amt;	/* mcast group is already programmed in amt */
+	uint16	mc_not_exist_in_gbl;	/* mcast group is not in global table */
+	uint16	mc_not_exist_in_amt;	/* mcast group is not in AMT table */
+	uint16	mc_utilized;		/* mcast addressed is already taken */
+	uint16	mc_taken_other_tr;	/* multi-cast addressed is already taken */
+	uint32	rmc_rx_frames_mac;      /* no of mc frames received from mac */
+	uint32	rmc_tx_frames_mac;      /* no of mc frames transmitted to mac */
+	uint32	mc_null_ar_cnt;         /* no. of times NULL AR is received */
+	uint32	mc_ar_role_selected;	/* no. of times took AR role */
+	uint32	mc_ar_role_deleted;	/* no. of times AR role cancelled */
+	uint32	mc_noacktimer_expired;  /* no. of times noack timer expired */
+} wl_rmc_cnts_t;
+
+/* RMC Status */
+typedef struct wl_relmcast_st {
+	uint8         ver;		/* version of RMC */
+	uint8         num;		/* number of clients detected by transmitter */
+	wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT];
+	uint16        err;		/* error status (used in infra) */
+	uint16        actf_time;	/* action frame time period */
+} wl_relmcast_status_t;
+
+/* Entry for each STA/node */
+typedef struct wl_rmc_entry {
+	/* operation on multi-cast entry such add,
+	 * delete, ack-all
+	 */
+	int8    flag;
+	struct ether_addr addr;		/* multi-cast group mac address */
+} wl_rmc_entry_t;
+
+/* RMC table */
+typedef struct wl_rmc_entry_table {
+	uint8   index;			/* index to a particular mac entry in table */
+	uint8   opcode;			/* opcodes or operation on entry */
+	wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY];
+} wl_rmc_entry_table_t;
+
+/* Transmitter Info */
+typedef struct wl_rmc_trans_info {
+	struct ether_addr addr;		/* transmitter mac */
+	uint32 time_val;		/* timer val in case aging of entry is required */
+	uint16 seq;			/* last seq number of packet received from transmitter */
+	uint16 artmo;
+} wl_rmc_trans_info_t;
+
+/* Multicast Group */
+typedef struct wl_rmc_grp_entry {
+	struct ether_addr   mcaddr;	/* multi-cast group mac */
+	struct ether_addr   ar;		/* active receiver for the group */
+	wl_rmc_trans_info_t tr_info[WL_RMC_MAX_TRS_PER_GROUP];
+} wl_rmc_grp_entry_t;
+
+/* RMC ACKALL Table */
+typedef struct wl_rmc_ackall_entry {
+	struct ether_addr   ar;		/* active receiver for the entry */
+	wl_rmc_trans_info_t tr_info[WL_RMC_NUM_OF_MC_STREAMS];
+} wl_rmc_ackall_entry_t;
+
+/* RMC Peers Table */
+typedef struct wl_rmc_gbl_table {
+	uint8     activeMask;		/* mask to denote the entry(s) that are active */
+	wl_rmc_ackall_entry_t ackAll;   /* structure to keep info related to ACK all */
+	wl_rmc_grp_entry_t mc_entry[WL_RMC_NUM_OF_MC_STREAMS];
+} wl_rmc_gbl_table_t;
+
+/* To update vendor specific ie for RMC */
+typedef struct wl_rmc_vsie {
+	uint8	oui[DOT11_OUI_LEN];
+	uint16	payload;	/* IE Data Payload */
+} wl_rmc_vsie_t;
+
+typedef struct wl_proxd_iovar {
+	uint16	method;		/* Proxmity Detection method */
+	uint16	mode;		/* Mode (neutral, initiator, target) */
+} wl_proxd_iovar_t;
+
+/* structures for proximity detection parameters */
+typedef struct wl_proxd_params_rssi_method {
+	chanspec_t	chanspec;	/* chanspec for home channel */
+	uint16		interval;	/* interval between neighbor finding attempts (in TU) */
+	uint16		duration;	/* duration of neighbor finding attempts (in ms) */
+	int16		rssi_thresh;	/* RSSI threshold (in dBm) */
+	int16		tx_power;	/* tx power of Proximity Detection frames (in dBm) */
+	uint16		tx_rate;	/* tx rate of Proximity Detection frames
+					 * (in 500kbps units)
+					 */
+	uint16		timeout;	/* state machine wait timeout of the frames (in ms) */
+	uint16		maxconvergtmo;	/* max wait converge timeout (in ms) */
+} wl_proxd_params_rssi_method_t;
+
+typedef struct wl_proxd_params_iovar {
+	uint16	method;			/* Proxmity Detection method */
+	union {
+		wl_proxd_params_rssi_method_t rssi_params;
+	} u;				/* Method specific optional parameters */
+} wl_proxd_params_iovar_t;
+
+enum {
+	RSSI_REASON_UNKNOW,
+	RSSI_REASON_LOWRSSI,
+	RSSI_REASON_NSYC,
+	RSSI_REASON_TIMEOUT
+};
+
+enum {
+	RSSI_STATE_POLL,
+	RSSI_STATE_TPAIRING,
+	RSSI_STATE_IPAIRING,
+	RSSI_STATE_THANDSHAKE,
+	RSSI_STATE_IHANDSHAKE,
+	RSSI_STATE_CONFIRMED,
+	RSSI_STATE_PIPELINE,
+	RSSI_STATE_NEGMODE,
+	RSSI_STATE_MONITOR,
+	RSSI_STATE_LAST
+};
+
+typedef struct wl_proxd_status_iovar {
+	uint8			mode;
+	uint8			peermode;
+	uint8			state;
+	uint8			reason;
+	uint32			txcnt;
+	uint32			rxcnt;
+	struct ether_addr	peer;
+	int16			hi_rssi;
+	int16			low_rssi;
+} wl_proxd_status_iovar_t;
+
+#ifdef NET_DETECT
+typedef struct net_detect_adapter_features {
+	bool	wowl_enabled;
+	bool	net_detect_enabled;
+	bool	nlo_enabled;
+} net_detect_adapter_features_t;
+
+typedef enum net_detect_bss_type {
+	nd_bss_any = 0,
+	nd_ibss,
+	nd_ess
+} net_detect_bss_type_t;
+
+typedef struct net_detect_profile {
+	wlc_ssid_t		ssid;
+	net_detect_bss_type_t   bss_type;	/* Ignore for now since Phase 1 is only for ESS */
+	uint32			cipher_type;	/* DOT11_CIPHER_ALGORITHM enumeration values */
+	uint32			auth_type;	/* DOT11_AUTH_ALGORITHM enumeration values */
+} net_detect_profile_t;
+
+typedef struct net_detect_profile_list {
+	uint32			num_nd_profiles;
+	net_detect_profile_t	nd_profile[0];
+} net_detect_profile_list_t;
+
+typedef struct net_detect_config {
+	bool			    nd_enabled;
+	uint32			    scan_interval;
+	uint32			    wait_period;
+	bool			    wake_if_connected;
+	bool			    wake_if_disconnected;
+	net_detect_profile_list_t   nd_profile_list;
+} net_detect_config_t;
+
+typedef enum net_detect_wake_reason {
+	nd_reason_unknown,
+	nd_net_detected,
+	nd_wowl_event,
+	nd_ucode_error
+} net_detect_wake_reason_t;
+
+typedef struct net_detect_wake_data {
+	net_detect_wake_reason_t    nd_wake_reason;
+	uint32			    nd_wake_date_length;
+	uint8			    nd_wake_data[0];	    /* Wake data (currently unused) */
+} net_detect_wake_data_t;
+
+#endif /* NET_DETECT */
+
+typedef struct bcnreq {
+	uint8 bcn_mode;
+	int dur;
+	int channel;
+	struct ether_addr da;
+	uint16 random_int;
+	wlc_ssid_t ssid;
+	uint16 reps;
+} bcnreq_t;
+
+typedef struct rrmreq {
+	struct ether_addr da;
+	uint8 reg;
+	uint8 chan;
+	uint16 random_int;
+	uint16 dur;
+	uint16 reps;
+} rrmreq_t;
+
+typedef struct framereq {
+	struct ether_addr da;
+	uint8 reg;
+	uint8 chan;
+	uint16 random_int;
+	uint16 dur;
+	struct ether_addr ta;
+	uint16 reps;
+} framereq_t;
+
+typedef struct statreq {
+	struct ether_addr da;
+	struct ether_addr peer;
+	uint16 random_int;
+	uint16 dur;
+	uint8 group_id;
+	uint16 reps;
+} statreq_t;
+
+typedef struct wlc_l2keepalive_ol_params {
+	uint8	flags;
+	uint8	prio;
+	uint16	period_ms;
+} wlc_l2keepalive_ol_params_t;
+
+typedef struct wlc_dwds_config {
+	uint32		enable;
+	uint32		mode; /* STA/AP interface */
+	struct ether_addr ea;
+} wlc_dwds_config_t;
+
+typedef struct wl_el_set_params_s {
+	uint8 set;	/* Set number */
+	uint32 size;	/* Size to make/expand */
+} wl_el_set_params_t;
+
+typedef struct wl_el_tag_params_s {
+	uint16 tag;
+	uint8 set;
+	uint8 flags;
+} wl_el_tag_params_t;
+
+/* Video Traffic Interference Monitor config */
+#define INTFER_VERSION		1
+typedef struct wl_intfer_params {
+	uint16 version;			/* version */
+	uint8 period;			/* sample period */
+	uint8 cnt;			/* sample cnt */
+	uint8 txfail_thresh;	/* non-TCP txfail threshold */
+	uint8 tcptxfail_thresh;	/* tcptxfail threshold */
+} wl_intfer_params_t;
+
+typedef struct wl_staprio_cfg {
+	struct ether_addr ea;	/* mac addr */
+	uint8 prio;		/* scb priority */
+} wl_staprio_cfg_t;
+
+typedef enum wl_stamon_cfg_cmd_type {
+	STAMON_CFG_CMD_DEL = 0,
+	STAMON_CFG_CMD_ADD = 1
+} wl_stamon_cfg_cmd_type_t;
+
+typedef struct wlc_stamon_sta_config {
+	wl_stamon_cfg_cmd_type_t cmd; /* 0 - delete, 1 - add */
+	struct ether_addr ea;
+} wlc_stamon_sta_config_t;
+
+/* Received Beacons lengths information */
+#define WL_LAST_BCNS_INFO_FIXED_LEN		OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring)
+typedef struct wlc_bcn_len_hist {
+	uint16	ver;				/* version field */
+	uint16	cur_index;			/* current pointed index in ring buffer */
+	uint32	max_bcnlen;		/* Max beacon length received */
+	uint32	min_bcnlen;		/* Min beacon length received */
+	uint32	ringbuff_len;		/* Length of the ring buffer 'bcnlen_ring' */
+	uint32	bcnlen_ring[1];	/* ring buffer storing received beacon lengths */
+} wlc_bcn_len_hist_t;
+
+/* WDS net interface types */
+#define WL_WDSIFTYPE_NONE  0x0 /* The interface type is neither WDS nor DWDS. */
+#define WL_WDSIFTYPE_WDS   0x1 /* The interface is WDS type. */
+#define WL_WDSIFTYPE_DWDS  0x2 /* The interface is DWDS type. */
+
+typedef struct wl_bssload_static {
+	bool is_static;
+	uint16 sta_count;
+	uint8 chan_util;
+	uint16 aac;
+} wl_bssload_static_t;
+
+
+#endif /* _wlioctl_h_ */
diff --git a/drivers/staging/edison-bcm43340/linux_osl.c b/drivers/staging/edison-bcm43340/linux_osl.c
new file mode 100644
index 0000000..5d9e862
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/linux_osl.c
@@ -0,0 +1,1520 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: linux_osl.c 451649 2014-01-27 17:23:38Z $
+ */
+
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+
+#ifdef __ARM_ARCH_7A__
+#include <asm/cacheflush.h>
+#endif /* __ARM_ARCH_7A__ */
+
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#include <pcicfg.h>
+
+
+
+#include <linux/fs.h>
+
+#define PCI_CFG_RETRY		10
+
+#define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN	24		/* Mem. filename length */
+#define DUMPBUFSZ 1024
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define DHD_SKB_HDRSIZE		336
+#define DHD_SKB_1PAGE_BUFSIZE	((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_2PAGE_BUFSIZE	((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_4PAGE_BUFSIZE	((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
+
+#define STATIC_BUF_MAX_NUM	16
+#define STATIC_BUF_SIZE	(PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN	(STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+
+typedef struct bcm_static_buf {
+	struct semaphore static_sem;
+	unsigned char *buf_ptr;
+	unsigned char buf_use[STATIC_BUF_MAX_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#define STATIC_PKT_MAX_NUM	8
+#if defined(ENHANCED_STATIC_BUF)
+#define STATIC_PKT_4PAGE_NUM	1
+#define DHD_SKB_MAX_BUFSIZE	DHD_SKB_4PAGE_BUFSIZE
+#else
+#define STATIC_PKT_4PAGE_NUM	0
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
+#endif /* ENHANCED_STATIC_BUF */
+
+typedef struct bcm_static_pkt {
+	struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
+	struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
+#ifdef ENHANCED_STATIC_BUF
+	struct sk_buff *skb_16k;
+#endif
+	struct semaphore osl_pkt_sem;
+	unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM];
+} bcm_static_pkt_t;
+
+static bcm_static_pkt_t *bcm_static_skb = 0;
+
+void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+typedef struct bcm_mem_link {
+	struct bcm_mem_link *prev;
+	struct bcm_mem_link *next;
+	uint	size;
+	int	line;
+	void 	*osh;
+	char	file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_cmn_info {
+	atomic_t malloced;
+	atomic_t pktalloced;    /* Number of allocated packet buffers */
+	spinlock_t dbgmem_lock;
+	bcm_mem_link_t *dbgmem_list;
+	spinlock_t pktalloc_lock;
+	atomic_t refcount; /* Number of references to this shared structure. */
+};
+typedef struct osl_cmn_info osl_cmn_t;
+
+struct osl_info {
+	osl_pubinfo_t pub;
+#ifdef CTFPOOL
+	ctfpool_t *ctfpool;
+#endif /* CTFPOOL */
+	uint magic;
+	void *pdev;
+	uint failed;
+	uint bustype;
+	osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
+
+	void *bus_handle;
+#ifdef BCMDBG_CTRACE
+	spinlock_t ctrace_lock;
+	struct list_head ctrace_list;
+	int ctrace_num;
+#endif /* BCMDBG_CTRACE */
+};
+
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+	struct sk_buff *s = (struct sk_buff *)(p); \
+	ASSERT(OSL_PKTTAG_SZ == 32); \
+	*(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+	*(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+	*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+	*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+
+/* PCMCIA attribute space access macros */
+
+/* Global ASSERT type flag */
+uint32 g_assert_type = FALSE;
+
+static int16 linuxbcmerrormap[] =
+{	0, 			/* 0 */
+	-EINVAL,		/* BCME_ERROR */
+	-EINVAL,		/* BCME_BADARG */
+	-EINVAL,		/* BCME_BADOPTION */
+	-EINVAL,		/* BCME_NOTUP */
+	-EINVAL,		/* BCME_NOTDOWN */
+	-EINVAL,		/* BCME_NOTAP */
+	-EINVAL,		/* BCME_NOTSTA */
+	-EINVAL,		/* BCME_BADKEYIDX */
+	-EINVAL,		/* BCME_RADIOOFF */
+	-EINVAL,		/* BCME_NOTBANDLOCKED */
+	-EINVAL, 		/* BCME_NOCLK */
+	-EINVAL, 		/* BCME_BADRATESET */
+	-EINVAL, 		/* BCME_BADBAND */
+	-E2BIG,			/* BCME_BUFTOOSHORT */
+	-E2BIG,			/* BCME_BUFTOOLONG */
+	-EBUSY, 		/* BCME_BUSY */
+	-EINVAL, 		/* BCME_NOTASSOCIATED */
+	-EINVAL, 		/* BCME_BADSSIDLEN */
+	-EINVAL, 		/* BCME_OUTOFRANGECHAN */
+	-EINVAL, 		/* BCME_BADCHAN */
+	-EFAULT, 		/* BCME_BADADDR */
+	-ENOMEM, 		/* BCME_NORESOURCE */
+	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
+	-EMSGSIZE,		/* BCME_BADLENGTH */
+	-EINVAL,		/* BCME_NOTREADY */
+	-EPERM,			/* BCME_EPERM */
+	-ENOMEM, 		/* BCME_NOMEM */
+	-EINVAL, 		/* BCME_ASSOCIATED */
+	-ERANGE, 		/* BCME_RANGE */
+	-EINVAL, 		/* BCME_NOTFOUND */
+	-EINVAL, 		/* BCME_WME_NOT_ENABLED */
+	-EINVAL, 		/* BCME_TSPEC_NOTFOUND */
+	-EINVAL, 		/* BCME_ACM_NOTSUPPORTED */
+	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
+	-EIO,			/* BCME_SDIO_ERROR */
+	-ENODEV,		/* BCME_DONGLE_DOWN */
+	-EINVAL,		/* BCME_VERSION */
+	-EIO,			/* BCME_TXFAIL */
+	-EIO,			/* BCME_RXFAIL */
+	-ENODEV,		/* BCME_NODEVICE */
+	-EINVAL,		/* BCME_NMODE_DISABLED */
+	-ENODATA,		/* BCME_NONRESIDENT */
+	-EINVAL,		/* BCME_SCANREJECT */
+	-EINVAL,		/* BCME_USAGE_ERROR */
+	-EIO,     		/* BCME_IOCTL_ERROR */
+	-EIO,			/* BCME_SERIAL_PORT_ERR */
+	-EOPNOTSUPP,	/* BCME_DISABLED, BCME_NOTENABLED */
+	-EIO,			/* BCME_DECERR */
+	-EIO,			/* BCME_ENCERR */
+	-EIO,			/* BCME_MICERR */
+	-ERANGE,		/* BCME_REPLAY */
+	-EINVAL,		/* BCME_IE_NOTFOUND */
+
+/* When an new error code is added to bcmutils.h, add os
+ * specific error translation here as well
+ */
+/* check if BCME_LAST changed since the last time this function was updated */
+#if BCME_LAST != -52
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+	for new error code defined in bcmutils.h"
+#endif
+};
+
+/* translate bcmerrors into linux errors */
+int
+osl_error(int bcmerror)
+{
+	if (bcmerror > 0)
+		bcmerror = 0;
+	else if (bcmerror < BCME_LAST)
+		bcmerror = BCME_ERROR;
+
+	/* Array bounds covered by ASSERT in osl_attach */
+	return linuxbcmerrormap[-bcmerror];
+}
+#ifdef SHARED_OSL_CMN
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
+{
+#else
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+	void **osl_cmn = NULL;
+#endif /* SHARED_OSL_CMN */
+	osl_t *osh;
+	gfp_t flags;
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	if (!(osh = kmalloc(sizeof(osl_t), flags)))
+		return osh;
+
+	ASSERT(osh);
+
+	bzero(osh, sizeof(osl_t));
+
+	if (osl_cmn == NULL || *osl_cmn == NULL) {
+		if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
+			kfree(osh);
+			return NULL;
+		}
+		bzero(osh->cmn, sizeof(osl_cmn_t));
+		if (osl_cmn)
+			*osl_cmn = osh->cmn;
+		atomic_set(&osh->cmn->malloced, 0);
+		osh->cmn->dbgmem_list = NULL;
+		spin_lock_init(&(osh->cmn->dbgmem_lock));
+
+		spin_lock_init(&(osh->cmn->pktalloc_lock));
+
+	} else {
+		osh->cmn = *osl_cmn;
+	}
+	atomic_add(1, &osh->cmn->refcount);
+
+	/* Check that error map has the right number of entries in it */
+	ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+	osh->failed = 0;
+	osh->pdev = pdev;
+	osh->pub.pkttag = pkttag;
+	osh->bustype = bustype;
+	osh->magic = OS_HANDLE_MAGIC;
+
+	switch (bustype) {
+		case PCI_BUS:
+		case SI_BUS:
+		case PCMCIA_BUS:
+			osh->pub.mmbus = TRUE;
+			break;
+		case JTAG_BUS:
+		case SDIO_BUS:
+		case USB_BUS:
+		case SPI_BUS:
+		case RPC_BUS:
+			osh->pub.mmbus = FALSE;
+			break;
+		default:
+			ASSERT(FALSE);
+			break;
+	}
+
+#ifdef BCMDBG_CTRACE
+	spin_lock_init(&osh->ctrace_lock);
+	INIT_LIST_HEAD(&osh->ctrace_list);
+	osh->ctrace_num = 0;
+#endif /* BCMDBG_CTRACE */
+
+
+	return osh;
+}
+
+int osl_static_mem_init(osl_t *osh, void *adapter)
+{
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+		if (!bcm_static_buf && adapter) {
+			if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
+				3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+				printk("can not alloc static buf!\n");
+				bcm_static_skb = NULL;
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				kfree(osh);
+				return -ENOMEM;
+			}
+			else
+				printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
+
+
+			sema_init(&bcm_static_buf->static_sem, 1);
+
+			bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+		}
+
+		if (!bcm_static_skb && adapter) {
+			int i;
+			void *skb_buff_ptr = 0;
+			bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+			skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
+			if (!skb_buff_ptr) {
+				printk("cannot alloc static buf!\n");
+				bcm_static_buf = NULL;
+				bcm_static_skb = NULL;
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				kfree(osh);
+				return -ENOMEM;
+			}
+
+			bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
+				(STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM));
+			for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++)
+				bcm_static_skb->pkt_use[i] = 0;
+
+			sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+		}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	return 0;
+}
+
+void osl_set_bus_handle(osl_t *osh, void *bus_handle)
+{
+	osh->bus_handle = bus_handle;
+}
+
+void* osl_get_bus_handle(osl_t *osh)
+{
+	return osh->bus_handle;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+	if (osh == NULL)
+		return;
+
+	ASSERT(osh->magic == OS_HANDLE_MAGIC);
+	atomic_sub(1, &osh->cmn->refcount);
+	if (atomic_read(&osh->cmn->refcount) == 0) {
+			kfree(osh->cmn);
+	}
+	kfree(osh);
+}
+
+int osl_static_mem_deinit(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif
+	return 0;
+}
+
+static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
+{
+	struct sk_buff *skb;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+	gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
+	flags |= GFP_ATOMIC;
+#endif
+	skb = __dev_alloc_skb(len, flags);
+#else
+	skb = dev_alloc_skb(len);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
+	return skb;
+}
+
+#ifdef CTFPOOL
+
+#ifdef CTFPOOL_SPINLOCK
+#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_irqsave(&(ctfpool)->lock, flags)
+#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_irqrestore(&(ctfpool)->lock, flags)
+#else
+#define CTFPOOL_LOCK(ctfpool, flags)	spin_lock_bh(&(ctfpool)->lock)
+#define CTFPOOL_UNLOCK(ctfpool, flags)	spin_unlock_bh(&(ctfpool)->lock)
+#endif /* CTFPOOL_SPINLOCK */
+/*
+ * Allocate and add an object to packet pool.
+ */
+void *
+osl_ctfpool_add(osl_t *osh)
+{
+	struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return NULL;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+	ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
+
+	/* No need to allocate more objects */
+	if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	/* Allocate a new skb and add it to the ctfpool */
+	skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
+	if (skb == NULL) {
+		printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
+		       osh->ctfpool->obj_size);
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	/* Add to ctfpool */
+	skb->next = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = skb;
+	osh->ctfpool->fast_frees++;
+	osh->ctfpool->curr_obj++;
+
+	/* Hijack a skb member to store ptr to ctfpool */
+	CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
+
+	/* Use bit flag to indicate skb from fast ctfpool */
+	PKTFAST(osh, skb) = FASTBUF;
+
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	return skb;
+}
+
+/*
+ * Add new objects to the pool.
+ */
+void
+osl_ctfpool_replenish(osl_t *osh, uint thresh)
+{
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	/* Do nothing if no refills are required */
+	while ((osh->ctfpool->refills > 0) && (thresh--)) {
+		osl_ctfpool_add(osh);
+		osh->ctfpool->refills--;
+	}
+}
+
+/*
+ * Initialize the packet pool with specified number of objects.
+ */
+int32
+osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
+{
+	gfp_t flags;
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
+	ASSERT(osh->ctfpool);
+
+	osh->ctfpool->max_obj = numobj;
+	osh->ctfpool->obj_size = size;
+
+	spin_lock_init(&osh->ctfpool->lock);
+
+	while (numobj--) {
+		if (!osl_ctfpool_add(osh))
+			return -1;
+		osh->ctfpool->fast_frees--;
+	}
+
+	return 0;
+}
+
+/*
+ * Cleanup the packet pool objects.
+ */
+void
+osl_ctfpool_cleanup(osl_t *osh)
+{
+	struct sk_buff *skb, *nskb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+
+	skb = osh->ctfpool->head;
+
+	while (skb != NULL) {
+		nskb = skb->next;
+		dev_kfree_skb(skb);
+		skb = nskb;
+		osh->ctfpool->curr_obj--;
+	}
+
+	ASSERT(osh->ctfpool->curr_obj == 0);
+	osh->ctfpool->head = NULL;
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	kfree(osh->ctfpool);
+	osh->ctfpool = NULL;
+}
+
+void
+osl_ctfpool_stats(osl_t *osh, void *b)
+{
+	struct bcmstrbuf *bb;
+
+	if ((osh == NULL) || (osh->ctfpool == NULL))
+		return;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf) {
+		bcm_static_buf = 0;
+	}
+	if (bcm_static_skb) {
+		bcm_static_skb = 0;
+	}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	bb = b;
+
+	ASSERT((osh != NULL) && (bb != NULL));
+
+	bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
+	            osh->ctfpool->max_obj, osh->ctfpool->obj_size,
+	            osh->ctfpool->curr_obj, osh->ctfpool->refills);
+	bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
+	            osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
+	            osh->ctfpool->slow_allocs);
+}
+
+static inline struct sk_buff *
+osl_pktfastget(osl_t *osh, uint len)
+{
+	struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+	/* Try to do fast allocate. Return null if ctfpool is not in use
+	 * or if there are no items in the ctfpool.
+	 */
+	if (osh->ctfpool == NULL)
+		return NULL;
+
+	CTFPOOL_LOCK(osh->ctfpool, flags);
+	if (osh->ctfpool->head == NULL) {
+		ASSERT(osh->ctfpool->curr_obj == 0);
+		osh->ctfpool->slow_allocs++;
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	if (len > osh->ctfpool->obj_size) {
+		CTFPOOL_UNLOCK(osh->ctfpool, flags);
+		return NULL;
+	}
+
+	ASSERT(len <= osh->ctfpool->obj_size);
+
+	/* Get an object from ctfpool */
+	skb = (struct sk_buff *)osh->ctfpool->head;
+	osh->ctfpool->head = (void *)skb->next;
+
+	osh->ctfpool->fast_allocs++;
+	osh->ctfpool->curr_obj--;
+	ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
+	CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+	/* Init skb struct */
+	skb->next = skb->prev = NULL;
+#if defined(__ARM_ARCH_7A__)
+	skb->data = skb->head + NET_SKB_PAD;
+	skb->tail = skb->head + NET_SKB_PAD;
+#else
+	skb->data = skb->head + 16;
+	skb->tail = skb->head + 16;
+#endif /* __ARM_ARCH_7A__ */
+	skb->len = 0;
+	skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+	skb->list = NULL;
+#endif
+	atomic_set(&skb->users, 1);
+
+	PKTSETCLINK(skb, NULL);
+	PKTCCLRATTR(skb);
+	PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
+
+	return skb;
+}
+#endif /* CTFPOOL */
+/* Convert a driver packet to native(OS) packet
+ * In the process, packettag is zeroed out before sending up
+ * IP code depends on skb->cb to be setup correctly with various options
+ * In our case, that means it should be 0
+ */
+struct sk_buff * BCMFASTPATH
+osl_pkt_tonative(osl_t *osh, void *pkt)
+{
+	struct sk_buff *nskb;
+#ifdef BCMDBG_CTRACE
+	struct sk_buff *nskb1, *nskb2;
+#endif
+
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(pkt);
+
+	/* Decrement the packet counter */
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
+
+#ifdef BCMDBG_CTRACE
+		for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
+			if (PKTISCHAINED(nskb1)) {
+				nskb2 = PKTCLINK(nskb1);
+			}
+			else
+				nskb2 = NULL;
+
+			DEL_CTRACE(osh, nskb1);
+		}
+#endif /* BCMDBG_CTRACE */
+	}
+	return (struct sk_buff *)pkt;
+}
+
+/* Convert a native(OS) packet to driver packet.
+ * In the process, native packet is destroyed, there is no copying
+ * Also, a packettag is zeroed out
+ */
+#ifdef BCMDBG_CTRACE
+void * BCMFASTPATH
+osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
+#else
+void * BCMFASTPATH
+osl_pkt_frmnative(osl_t *osh, void *pkt)
+#endif /* BCMDBG_CTRACE */
+{
+	struct sk_buff *nskb;
+#ifdef BCMDBG_CTRACE
+	struct sk_buff *nskb1, *nskb2;
+#endif
+
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(pkt);
+
+	/* Increment the packet counter */
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+		atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
+
+#ifdef BCMDBG_CTRACE
+		for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
+			if (PKTISCHAINED(nskb1)) {
+				nskb2 = PKTCLINK(nskb1);
+			}
+			else
+				nskb2 = NULL;
+
+			ADD_CTRACE(osh, nskb1, file, line);
+		}
+#endif /* BCMDBG_CTRACE */
+	}
+	return (void *)pkt;
+}
+
+/* Return a new packet. zero out pkttag */
+#ifdef BCMDBG_CTRACE
+void * BCMFASTPATH
+osl_pktget(osl_t *osh, uint len, int line, char *file)
+#else
+void * BCMFASTPATH
+osl_pktget(osl_t *osh, uint len)
+#endif /* BCMDBG_CTRACE */
+{
+	struct sk_buff *skb;
+
+#ifdef CTFPOOL
+	/* Allocate from local pool */
+	skb = osl_pktfastget(osh, len);
+	if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) {
+#else /* CTFPOOL */
+	if ((skb = osl_alloc_skb(osh, len))) {
+#endif /* CTFPOOL */
+		skb->tail += len;
+		skb->len  += len;
+		skb->priority = 0;
+
+#ifdef BCMDBG_CTRACE
+		ADD_CTRACE(osh, skb, file, line);
+#endif
+		atomic_inc(&osh->cmn->pktalloced);
+	}
+
+	return ((void*) skb);
+}
+
+#ifdef CTFPOOL
+static inline void
+osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
+{
+	ctfpool_t *ctfpool;
+#ifdef CTFPOOL_SPINLOCK
+	unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+	skb->tstamp.tv.sec = 0;
+#else
+	skb->stamp.tv_sec = 0;
+#endif
+
+	/* We only need to init the fields that we change */
+	skb->dev = NULL;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+	skb->dst = NULL;
+#endif
+	OSL_PKTTAG_CLEAR(skb);
+	skb->ip_summed = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	skb_orphan(skb);
+#else
+	skb->destructor = NULL;
+#endif
+
+	ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+	ASSERT(ctfpool != NULL);
+
+	/* Add object to the ctfpool */
+	CTFPOOL_LOCK(ctfpool, flags);
+	skb->next = (struct sk_buff *)ctfpool->head;
+	ctfpool->head = (void *)skb;
+
+	ctfpool->fast_frees++;
+	ctfpool->curr_obj++;
+
+	ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
+	CTFPOOL_UNLOCK(ctfpool, flags);
+}
+#endif /* CTFPOOL */
+
+/* Free the driver packet. Free the tag if present */
+void BCMFASTPATH
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+	struct sk_buff *skb, *nskb;
+	if (osh == NULL)
+		return;
+
+	skb = (struct sk_buff*) p;
+
+	if (send && osh->pub.tx_fn)
+		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+	PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
+
+	/* perversion: we use skb->next to chain multi-skb packets */
+	while (skb) {
+		nskb = skb->next;
+		skb->next = NULL;
+
+#ifdef BCMDBG_CTRACE
+		DEL_CTRACE(osh, skb);
+#endif
+
+
+#ifdef CTFPOOL
+		if (PKTISFAST(osh, skb)) {
+			if (atomic_read(&skb->users) == 1)
+				smp_rmb();
+			else if (!atomic_dec_and_test(&skb->users))
+				goto next_skb;
+			osl_pktfastfree(osh, skb);
+		} else
+#endif
+		{
+			if (skb->destructor)
+				/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+				 * destructor exists
+				 */
+				dev_kfree_skb_any(skb);
+			else
+				/* can free immediately (even in_irq()) if destructor
+				 * does not exist
+				 */
+				dev_kfree_skb(skb);
+		}
+#ifdef CTFPOOL
+next_skb:
+#endif
+		atomic_dec(&osh->cmn->pktalloced);
+		skb = nskb;
+	}
+}
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+	int i = 0;
+	struct sk_buff *skb;
+
+	if (len > DHD_SKB_MAX_BUFSIZE) {
+		printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+		return osl_pktget(osh, len);
+	}
+
+	down(&bcm_static_skb->osl_pkt_sem);
+
+	if (len <= DHD_SKB_1PAGE_BUFSIZE) {
+		for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+			if (bcm_static_skb->pkt_use[i] == 0)
+				break;
+		}
+
+		if (i != STATIC_PKT_MAX_NUM) {
+			bcm_static_skb->pkt_use[i] = 1;
+
+			skb = bcm_static_skb->skb_4k[i];
+			skb->tail = skb->data + len;
+			skb->len = len;
+
+			up(&bcm_static_skb->osl_pkt_sem);
+			return skb;
+		}
+	}
+
+	if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+		for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+			if (bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM]
+				== 0)
+				break;
+		}
+
+		if (i != STATIC_PKT_MAX_NUM) {
+			bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 1;
+			skb = bcm_static_skb->skb_8k[i];
+			skb->tail = skb->data + len;
+			skb->len = len;
+
+			up(&bcm_static_skb->osl_pkt_sem);
+			return skb;
+		}
+	}
+
+#if defined(ENHANCED_STATIC_BUF)
+	if (bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] == 0) {
+		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 1;
+
+		skb = bcm_static_skb->skb_16k;
+		skb->tail = skb->data + len;
+		skb->len = len;
+
+		up(&bcm_static_skb->osl_pkt_sem);
+		return skb;
+	}
+#endif
+
+	up(&bcm_static_skb->osl_pkt_sem);
+	printk("%s: all static pkt in use!\n", __FUNCTION__);
+	return osl_pktget(osh, len);
+}
+
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
+{
+	int i;
+	if (!bcm_static_skb) {
+		osl_pktfree(osh, p, send);
+		return;
+	}
+
+	down(&bcm_static_skb->osl_pkt_sem);
+	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+		if (p == bcm_static_skb->skb_4k[i]) {
+			bcm_static_skb->pkt_use[i] = 0;
+			up(&bcm_static_skb->osl_pkt_sem);
+			return;
+		}
+	}
+
+	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+		if (p == bcm_static_skb->skb_8k[i]) {
+			bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
+			up(&bcm_static_skb->osl_pkt_sem);
+			return;
+		}
+	}
+#ifdef ENHANCED_STATIC_BUF
+	if (p == bcm_static_skb->skb_16k) {
+		bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 0;
+		up(&bcm_static_skb->osl_pkt_sem);
+		return;
+	}
+#endif
+	up(&bcm_static_skb->osl_pkt_sem);
+	osl_pktfree(osh, p, send);
+}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+uint32
+osl_pci_read_config(osl_t *osh, uint offset, uint size)
+{
+	uint val = 0;
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	/* only 4byte access supported */
+	ASSERT(size == 4);
+
+	do {
+		pci_read_config_dword(osh->pdev, offset, &val);
+		if (val != 0xffffffff)
+			break;
+	} while (retry--);
+
+
+	return (val);
+}
+
+void
+osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
+{
+	uint retry = PCI_CFG_RETRY;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	/* only 4byte access supported */
+	ASSERT(size == 4);
+
+	do {
+		pci_write_config_dword(osh->pdev, offset, val);
+		if (offset != PCI_BAR0_WIN)
+			break;
+		if (osl_pci_read_config(osh, offset, size) == val)
+			break;
+	} while (retry--);
+
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pci_bus(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+/* return slot # for the pci device pointed by osh->pdev */
+uint
+osl_pci_slot(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
+#else
+	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+#endif
+}
+
+/* return the pci device pointed by osh->pdev */
+struct pci_dev *
+osl_pci_device(osl_t *osh)
+{
+	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+	return osh->pdev;
+}
+
+static void
+osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void
+osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
+}
+
+void
+osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
+{
+	osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
+}
+
+void *
+osl_malloc(osl_t *osh, uint size)
+{
+	void *addr;
+	gfp_t flags;
+
+	/* only ASSERT if osh is defined */
+	if (osh)
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf)
+	{
+		int i = 0;
+		if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
+		{
+			down(&bcm_static_buf->static_sem);
+
+			for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
+			{
+				if (bcm_static_buf->buf_use[i] == 0)
+					break;
+			}
+
+			if (i == STATIC_BUF_MAX_NUM)
+			{
+				up(&bcm_static_buf->static_sem);
+				printk("all static buff in use!\n");
+				goto original;
+			}
+
+			bcm_static_buf->buf_use[i] = 1;
+			up(&bcm_static_buf->static_sem);
+
+			bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
+			if (osh)
+				atomic_add(size, &osh->cmn->malloced);
+
+			return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
+		}
+	}
+original:
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+	flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+	if ((addr = kmalloc(size, flags)) == NULL) {
+		if (osh)
+			osh->failed++;
+		return (NULL);
+	}
+	if (osh && osh->cmn)
+		atomic_add(size, &osh->cmn->malloced);
+
+	return (addr);
+}
+
+void *
+osl_mallocz(osl_t *osh, uint size)
+{
+	void *ptr;
+
+	ptr = osl_malloc(osh, size);
+
+	if (ptr != NULL) {
+		bzero(ptr, size);
+	}
+
+	return ptr;
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+	if (bcm_static_buf)
+	{
+		if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
+			<= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
+		{
+			int buf_idx = 0;
+
+			buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
+
+			down(&bcm_static_buf->static_sem);
+			bcm_static_buf->buf_use[buf_idx] = 0;
+			up(&bcm_static_buf->static_sem);
+
+			if (osh && osh->cmn) {
+				ASSERT(osh->magic == OS_HANDLE_MAGIC);
+				atomic_sub(size, &osh->cmn->malloced);
+			}
+			return;
+		}
+	}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	if (osh && osh->cmn) {
+		ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+		ASSERT(size <= osl_malloced(osh));
+
+		atomic_sub(size, &osh->cmn->malloced);
+	}
+	kfree(addr);
+}
+
+uint
+osl_check_memleak(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	if (atomic_read(&osh->cmn->refcount) == 1)
+		return (atomic_read(&osh->cmn->malloced));
+	else
+		return 0;
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+		return (atomic_read(&osh->cmn->malloced));
+}
+
+uint
+osl_malloc_failed(osl_t *osh)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	return (osh->failed);
+}
+
+
+uint
+osl_dma_consistent_align(void)
+{
+	return (PAGE_SIZE);
+}
+
+void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
+{
+	void *va;
+	uint16 align = (1 << align_bits);
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+	if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+		size += align;
+	*alloced = size;
+
+#ifdef __ARM_ARCH_7A__
+	va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
+	if (va)
+		*pap = (ulong)__virt_to_phys((ulong)va);
+#else
+	{
+		dma_addr_t pap_lin;
+		va = pci_alloc_consistent(osh->pdev, size, &pap_lin);
+		*pap = (dmaaddr_t)pap_lin;
+	}
+#endif
+	return va;
+}
+
+void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
+{
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+#ifdef __ARM_ARCH_7A__
+	kfree(va);
+#else
+	pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+#endif
+}
+
+dmaaddr_t BCMFASTPATH
+osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
+{
+	int dir;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+
+#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
+	if (dmah != NULL) {
+		int32 nsegs, i, totsegs = 0, totlen = 0;
+		struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2];
+		struct sk_buff *skb;
+		for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
+			sg = &_sg[totsegs];
+			if (skb_is_nonlinear(skb)) {
+				nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
+				ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS));
+				pci_map_sg(osh->pdev, sg, nsegs, dir);
+			} else {
+				nsegs = 1;
+				ASSERT(totsegs + nsegs <= MAX_DMA_SEGS);
+				sg->page_link = 0;
+				sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));
+				pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
+			}
+			totsegs += nsegs;
+			totlen += PKTLEN(osh, skb);
+		}
+		dmah->nsegs = totsegs;
+		dmah->origsize = totlen;
+		for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
+			dmah->segs[i].addr = sg_phys(sg);
+			dmah->segs[i].length = sg->length;
+		}
+		return dmah->segs[0].addr;
+	}
+#endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
+
+	return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void BCMFASTPATH
+osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+{
+	int dir;
+
+	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+	dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+	pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+}
+
+
+#if defined(__ARM_ARCH_7A__)
+
+inline void BCMFASTPATH
+osl_cache_flush(void *va, uint size)
+{
+	dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX);
+}
+
+inline void BCMFASTPATH
+osl_cache_inv(void *va, uint size)
+{
+	dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX);
+}
+
+inline void osl_prefetch(const void *ptr)
+{
+	/* Borrowed from linux/linux-2.6/include/asm-arm/processor.h */
+	__asm__ __volatile__(
+		"pld\t%0"
+		:
+		: "o" (*(char *)ptr)
+		: "cc");
+}
+#endif 
+
+#if defined(BCMASSERT_LOG)
+void
+osl_assert(const char *exp, const char *file, int line)
+{
+	char tempbuf[256];
+	const char *basename;
+
+	basename = strrchr(file, '/');
+	/* skip the '/' */
+	if (basename)
+		basename++;
+
+	if (!basename)
+		basename = file;
+
+#ifdef BCMASSERT_LOG
+	snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
+		exp, basename, line);
+	printk("%s", tempbuf);
+#endif /* BCMASSERT_LOG */
+
+
+}
+#endif 
+
+void
+osl_delay(uint usec)
+{
+	uint d;
+
+	while (usec > 0) {
+		d = MIN(usec, 1000);
+		udelay(d);
+		usec -= d;
+	}
+}
+
+void
+osl_sleep(uint ms)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	if (ms < 20)
+		usleep_range(ms*1000, ms*1000 + 1000);
+	else
+#endif
+	msleep(ms);
+}
+
+
+
+/* Clone a packet.
+ * The pkttag contents are NOT cloned.
+ */
+#ifdef BCMDBG_CTRACE
+void *
+osl_pktdup(osl_t *osh, void *skb, int line, char *file)
+#else
+void *
+osl_pktdup(osl_t *osh, void *skb)
+#endif /* BCMDBG_CTRACE */
+{
+	void * p;
+
+	ASSERT(!PKTISCHAINED(skb));
+
+	/* clear the CTFBUF flag if set and map the rest of the buffer
+	 * before cloning.
+	 */
+	PKTCTFMAP(osh, skb);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+	if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#else
+	if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#endif
+		return NULL;
+
+#ifdef CTFPOOL
+	if (PKTISFAST(osh, skb)) {
+		ctfpool_t *ctfpool;
+
+		/* if the buffer allocated from ctfpool is cloned then
+		 * we can't be sure when it will be freed. since there
+		 * is a chance that we will be losing a buffer
+		 * from our pool, we increment the refill count for the
+		 * object to be alloced later.
+		 */
+		ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+		ASSERT(ctfpool != NULL);
+		PKTCLRFAST(osh, p);
+		PKTCLRFAST(osh, skb);
+		ctfpool->refills++;
+	}
+#endif /* CTFPOOL */
+
+	/* Clear PKTC  context */
+	PKTSETCLINK(p, NULL);
+	PKTCCLRFLAGS(p);
+	PKTCSETCNT(p, 1);
+	PKTCSETLEN(p, PKTLEN(osh, skb));
+
+	/* skb_clone copies skb->cb.. we don't want that */
+	if (osh->pub.pkttag)
+		OSL_PKTTAG_CLEAR(p);
+
+	/* Increment the packet counter */
+	atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCMDBG_CTRACE
+	ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
+#endif
+	return (p);
+}
+
+#ifdef BCMDBG_CTRACE
+int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
+{
+	unsigned long flags;
+	struct sk_buff *skb;
+	int ck = FALSE;
+
+	spin_lock_irqsave(&osh->ctrace_lock, flags);
+
+	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+		if (pkt == skb) {
+			ck = TRUE;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
+	return ck;
+}
+
+void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
+{
+	unsigned long flags;
+	struct sk_buff *skb;
+	int idx = 0;
+	int i, j;
+
+	spin_lock_irqsave(&osh->ctrace_lock, flags);
+
+	if (b != NULL)
+		bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
+	else
+		printk(" Total %d sbk not free\n", osh->ctrace_num);
+
+	list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+		if (b != NULL)
+			bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
+		else
+			printk("[%d] skb %p:\n", ++idx, skb);
+
+		for (i = 0; i < skb->ctrace_count; i++) {
+			j = (skb->ctrace_start + i) % CTRACE_NUM;
+			if (b != NULL)
+				bcm_bprintf(b, "    [%s(%d)]\n", skb->func[j], skb->line[j]);
+			else
+				printk("    [%s(%d)]\n", skb->func[j], skb->line[j]);
+		}
+		if (b != NULL)
+			bcm_bprintf(b, "\n");
+		else
+			printk("\n");
+	}
+
+	spin_unlock_irqrestore(&osh->ctrace_lock, flags);
+
+	return;
+}
+#endif /* BCMDBG_CTRACE */
+
+
+/*
+ * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
+ */
+
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ */
+
+uint
+osl_pktalloced(osl_t *osh)
+{
+	if (atomic_read(&osh->cmn->refcount) == 1)
+		return (atomic_read(&osh->cmn->pktalloced));
+	else
+		return 0;
+}
+
+/* Linux Kernel: File Operations: start */
+void *
+osl_os_open_image(char *filename)
+{
+	struct file *fp;
+
+	fp = filp_open(filename, O_RDONLY, 0);
+	/*
+	 * 2.6.11 (FC4) supports filp_open() but later revs don't?
+	 * Alternative:
+	 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+	 * ???
+	 */
+	 if (IS_ERR(fp))
+		 fp = NULL;
+
+	 return fp;
+}
+
+int
+osl_os_get_image_block(char *buf, int len, void *image)
+{
+	struct file *fp = (struct file *)image;
+	int rdlen;
+
+	if (!image)
+		return 0;
+
+	rdlen = kernel_read(fp, fp->f_pos, buf, len);
+	if (rdlen > 0)
+		fp->f_pos += rdlen;
+
+	return rdlen;
+}
+
+void
+osl_os_close_image(void *image)
+{
+	if (image)
+		filp_close((struct file *)image, NULL);
+}
+
+int
+osl_os_image_size(void *image)
+{
+	int len = 0, curroffset;
+
+	if (image) {
+		/* store the current offset */
+		curroffset = generic_file_llseek(image, 0, 1);
+		/* goto end of file to get length */
+		len = generic_file_llseek(image, 0, 2);
+		/* restore back the offset */
+		generic_file_llseek(image, curroffset, 0);
+	}
+	return len;
+}
+
+/* Linux Kernel: File Operations: end */
diff --git a/drivers/staging/edison-bcm43340/sbutils.c b/drivers/staging/edison-bcm43340/sbutils.c
new file mode 100644
index 0000000..712f721
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/sbutils.c
@@ -0,0 +1,1063 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: sbutils.c 431423 2013-10-23 16:07:35Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+
+#include "siutils_priv.h"
+
+
+/* local prototypes */
+static uint _sb_coreidx(si_info_t *sii, uint32 sba);
+static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
+                     uint ncores);
+static uint32 _sb_coresba(si_info_t *sii);
+static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
+#define	SET_SBREG(sii, r, mask, val)	\
+		W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
+#define	REGS2SB(va)	(sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
+
+/* sonicsrev */
+#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
+#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
+
+#define	R_SBREG(sii, sbr)	sb_read_sbreg((sii), (sbr))
+#define	W_SBREG(sii, sbr, v)	sb_write_sbreg((sii), (sbr), (v))
+#define	AND_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
+#define	OR_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
+
+static uint32
+sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint8 tmp;
+	uint32 val, intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	val = R_REG(sii->osh, sbr);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (val);
+}
+
+static void
+sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint8 tmp;
+	volatile uint32 dummy;
+	uint32 intr_val = 0;
+
+
+	/*
+	 * compact flash only has 11 bits address, while we needs 12 bits address.
+	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
+	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
+	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
+	 */
+	if (PCMCIA(sii)) {
+		INTR_OFF(sii, intr_val);
+		tmp = 1;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
+	}
+
+	if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
+		dummy = R_REG(sii->osh, sbr);
+		BCM_REFERENCE(dummy);
+		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
+		dummy = R_REG(sii->osh, sbr);
+		BCM_REFERENCE(dummy);
+		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
+	} else
+		W_REG(sii->osh, sbr, v);
+
+	if (PCMCIA(sii)) {
+		tmp = 0;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
+		INTR_RESTORE(sii, intr_val);
+	}
+}
+
+uint
+sb_coreid(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
+}
+
+uint
+sb_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	void *corereg;
+	sbconfig_t *sb;
+	uint origidx, intflag, intr_val = 0;
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+	corereg = si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(corereg != NULL);
+	sb = REGS2SB(corereg);
+	intflag = R_SBREG(sii, &sb->sbflagst);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+
+	return intflag;
+}
+
+uint
+sb_flag(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
+}
+
+void
+sb_setint(si_t *sih, int siflag)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 vec;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	if (siflag == -1)
+		vec = 0;
+	else
+		vec = 1 << siflag;
+	W_SBREG(sii, &sb->sbintvec, vec);
+}
+
+/* return core index of the core with address 'sba' */
+static uint
+_sb_coreidx(si_info_t *sii, uint32 sba)
+{
+	uint i;
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	for (i = 0; i < sii->numcores; i ++)
+		if (sba == cores_info->coresba[i])
+			return i;
+	return BADIDX;
+}
+
+/* return core address of the current core */
+static uint32
+_sb_coresba(si_info_t *sii)
+{
+	uint32 sbaddr;
+
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS: {
+		sbconfig_t *sb = REGS2SB(sii->curmap);
+		sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
+		break;
+	}
+
+	case PCI_BUS:
+		sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = 0;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		sbaddr  = (uint32)tmp << 12;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		sbaddr |= (uint32)tmp << 16;
+		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		sbaddr |= (uint32)tmp << 24;
+		break;
+	}
+
+	case SPI_BUS:
+	case SDIO_BUS:
+		sbaddr = (uint32)(uintptr)sii->curmap;
+		break;
+
+
+	default:
+		sbaddr = BADCOREADDR;
+		break;
+	}
+
+	return sbaddr;
+}
+
+uint
+sb_corevendor(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
+}
+
+uint
+sb_corerev(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint sbidh;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+	sbidh = R_SBREG(sii, &sb->sbidhigh);
+
+	return (SBCOREREV(sbidh));
+}
+
+/* set core-specific control flags */
+void
+sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+	        (val << SBTML_SICF_SHIFT);
+	W_SBREG(sii, &sb->sbtmstatelow, w);
+}
+
+/* set/clear core-specific control flags */
+uint32
+sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
+		        (val << SBTML_SICF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatelow, w);
+	}
+
+	/* return the new value
+	 * for write operation, the following readback ensures the completion of write opration.
+	 */
+	return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
+}
+
+/* set/clear core-specific status flags */
+uint32
+sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	uint32 w;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	ASSERT((val & ~mask) == 0);
+	ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+	/* mask and set */
+	if (mask || val) {
+		w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
+		        (val << SBTMH_SISF_SHIFT);
+		W_SBREG(sii, &sb->sbtmstatehigh, w);
+	}
+
+	/* return the new value */
+	return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
+}
+
+bool
+sb_iscoreup(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	return ((R_SBREG(sii, &sb->sbtmstatelow) &
+	         (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
+	        (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint
+sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	uint origidx = 0;
+	uint32 *r = NULL;
+	uint w;
+	uint intr_val = 0;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+	ASSERT((val & ~mask) == 0);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast) {
+		INTR_OFF(sii, intr_val);
+
+		/* save current core index */
+		origidx = si_coreidx(&sii->pub);
+
+		/* switch core */
+		r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
+	}
+	ASSERT(r != NULL);
+
+	/* mask and set */
+	if (mask || val) {
+		if (regoff >= SBCONFIGOFF) {
+			w = (R_SBREG(sii, r) & ~mask) | val;
+			W_SBREG(sii, r, w);
+		} else {
+			w = (R_REG(sii->osh, r) & ~mask) | val;
+			W_REG(sii->osh, r, w);
+		}
+	}
+
+	/* readback */
+	if (regoff >= SBCONFIGOFF)
+		w = R_SBREG(sii, r);
+	else {
+		if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
+		    (coreidx == SI_CC_IDX) &&
+		    (regoff == OFFSETOF(chipcregs_t, watchdog))) {
+			w = val;
+		} else
+			w = R_REG(sii->osh, r);
+	}
+
+	if (!fast) {
+		/* restore core index */
+		if (origidx != coreidx)
+			sb_setcoreidx(&sii->pub, origidx);
+
+		INTR_RESTORE(sii, intr_val);
+	}
+
+	return (w);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	uint32 *r = NULL;
+	bool fast = FALSE;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	ASSERT(GOODIDX(coreidx));
+	ASSERT(regoff < SI_CORE_SIZE);
+
+	if (coreidx >= SI_MAXCORES)
+		return 0;
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+		/* If internal bus, we can always get at everything */
+		fast = TRUE;
+		/* map if does not exist */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+			                            SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+			/* Chipc registers are mapped at 12KB */
+
+			fast = TRUE;
+			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+		} else if (sii->pub.buscoreidx == coreidx) {
+			/* pci registers are at either in the last 2KB of an 8KB window
+			 * or, in pcie and pci rev 13 at 8KB
+			 */
+			fast = TRUE;
+			if (SI_FAST(sii))
+				r = (uint32 *)((char *)sii->curmap +
+				               PCI_16KB0_PCIREGS_OFFSET + regoff);
+			else
+				r = (uint32 *)((char *)sii->curmap +
+				               ((regoff >= SBCONFIGOFF) ?
+				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+				               regoff);
+		}
+	}
+
+	if (!fast)
+		return 0;
+
+	return (r);
+}
+
+/* Scan the enumeration space to find all cores starting from the given
+ * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
+ * is the default core address at chip POR time and 'regs' is the virtual
+ * address that the default core is mapped at. 'ncores' is the number of
+ * cores expected on bus 'sbba'. It returns the total number of cores
+ * starting from bus 'sbba', inclusive.
+ */
+#define SB_MAXBUSES	2
+static uint
+_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
+{
+	uint next;
+	uint ncc = 0;
+	uint i;
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (bus >= SB_MAXBUSES) {
+		SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
+		return 0;
+	}
+	SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
+
+	/* Scan all cores on the bus starting from core 0.
+	 * Core addresses must be contiguous on each bus.
+	 */
+	for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
+		cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
+
+		/* keep and reuse the initial register mapping */
+		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
+			SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
+			cores_info->regs[next] = regs;
+		}
+
+		/* change core to 'next' and read its coreid */
+		sii->curmap = _sb_setcoreidx(sii, next);
+		sii->curidx = next;
+
+		cores_info->coreid[next] = sb_coreid(&sii->pub);
+
+		/* core specific processing... */
+		/* chipc provides # cores */
+		if (cores_info->coreid[next] == CC_CORE_ID) {
+			chipcregs_t *cc = (chipcregs_t *)sii->curmap;
+			uint32 ccrev = sb_corerev(&sii->pub);
+
+			/* determine numcores - this is the total # cores in the chip */
+			if (((ccrev == 4) || (ccrev >= 6))) {
+				ASSERT(cc);
+				numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
+				        CID_CC_SHIFT;
+			} else {
+				/* Older chips */
+				uint chip = CHIPID(sii->pub.chip);
+
+				if (chip == BCM4306_CHIP_ID)	/* < 4306c0 */
+					numcores = 6;
+				else if (chip == BCM4704_CHIP_ID)
+					numcores = 9;
+				else if (chip == BCM5365_CHIP_ID)
+					numcores = 7;
+				else {
+					SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
+					          chip));
+					ASSERT(0);
+					numcores = 1;
+				}
+			}
+			SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
+				sii->pub.issim ? "QT" : ""));
+		}
+		/* scan bridged SB(s) and add results to the end of the list */
+		else if (cores_info->coreid[next] == OCP_CORE_ID) {
+			sbconfig_t *sb = REGS2SB(sii->curmap);
+			uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
+			uint nsbcc;
+
+			sii->numcores = next + 1;
+
+			if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
+				continue;
+			nsbba &= 0xfffff000;
+			if (_sb_coreidx(sii, nsbba) != BADIDX)
+				continue;
+
+			nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
+			nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+			if (sbba == SI_ENUM_BASE)
+				numcores -= nsbcc;
+			ncc += nsbcc;
+		}
+	}
+
+	SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
+
+	sii->numcores = i + ncc;
+	return sii->numcores;
+}
+
+/* scan the sb enumerated space to identify all cores */
+void
+sb_scan(si_t *sih, void *regs, uint devid)
+{
+	uint32 origsba;
+	sbconfig_t *sb;
+	si_info_t *sii = SI_INFO(sih);
+
+	sb = REGS2SB(sii->curmap);
+
+	sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
+
+	/* Save the current core info and validate it later till we know
+	 * for sure what is good and what is bad.
+	 */
+	origsba = _sb_coresba(sii);
+
+	/* scan all SB(s) starting from SI_ENUM_BASE */
+	sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+sb_setcoreidx(si_t *sih, uint coreidx)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (coreidx >= sii->numcores)
+		return (NULL);
+
+	/*
+	 * If the user has provided an interrupt mask enabled function,
+	 * then assert interrupts are disabled before switching the core.
+	 */
+	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
+
+	sii->curmap = _sb_setcoreidx(sii, coreidx);
+	sii->curidx = coreidx;
+
+	return (sii->curmap);
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+static void *
+_sb_setcoreidx(si_info_t *sii, uint coreidx)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint32 sbaddr = cores_info->coresba[coreidx];
+	void *regs;
+
+	switch (BUSTYPE(sii->pub.bustype)) {
+	case SI_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		regs = cores_info->regs[coreidx];
+		break;
+
+	case PCI_BUS:
+		/* point bar0 window */
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
+		regs = sii->curmap;
+		break;
+
+	case PCMCIA_BUS: {
+		uint8 tmp = (sbaddr >> 12) & 0x0f;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
+		tmp = (sbaddr >> 16) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
+		tmp = (sbaddr >> 24) & 0xff;
+		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
+		regs = sii->curmap;
+		break;
+	}
+	case SPI_BUS:
+	case SDIO_BUS:
+		/* map new one */
+		if (!cores_info->regs[coreidx]) {
+			cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
+			ASSERT(GOODREGS(cores_info->regs[coreidx]));
+		}
+		regs = cores_info->regs[coreidx];
+		break;
+
+
+	default:
+		ASSERT(0);
+		regs = NULL;
+		break;
+	}
+
+	return regs;
+}
+
+/* Return the address of sbadmatch0/1/2/3 register */
+static volatile uint32 *
+sb_admatch(si_info_t *sii, uint asidx)
+{
+	sbconfig_t *sb;
+	volatile uint32 *addrm;
+
+	sb = REGS2SB(sii->curmap);
+
+	switch (asidx) {
+	case 0:
+		addrm =  &sb->sbadmatch0;
+		break;
+
+	case 1:
+		addrm =  &sb->sbadmatch1;
+		break;
+
+	case 2:
+		addrm =  &sb->sbadmatch2;
+		break;
+
+	case 3:
+		addrm =  &sb->sbadmatch3;
+		break;
+
+	default:
+		SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
+		return 0;
+	}
+
+	return (addrm);
+}
+
+/* Return the number of address spaces in current core */
+int
+sb_numaddrspaces(si_t *sih)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+	sb = REGS2SB(sii->curmap);
+
+	/* + 1 because of enumeration space */
+	return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32
+sb_addrspace(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+/* Return the size of the nth address space in the current core */
+uint32
+sb_addrspacesize(si_t *sih, uint asidx)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
+}
+
+
+/* do buffered registers update */
+void
+sb_commit(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+
+	origidx = sii->curidx;
+	ASSERT(GOODIDX(origidx));
+
+	INTR_OFF(sii, intr_val);
+
+	/* switch over to chipcommon core if there is one, else use pci */
+	if (sii->pub.ccrev != NOREV) {
+		chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+		ASSERT(ccregs != NULL);
+
+		/* do the buffer registers update */
+		W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
+		W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
+	} else
+		ASSERT(0);
+
+	/* restore core index */
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+sb_core_disable(si_t *sih, uint32 bits)
+{
+	si_info_t *sii;
+	volatile uint32 dummy;
+	sbconfig_t *sb;
+
+	sii = SI_INFO(sih);
+
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/* if core is already in reset, just return */
+	if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
+		return;
+
+	/* if clocks are not enabled, put into reset and return */
+	if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
+		goto disable;
+
+	/* set target reject and spin until busy is clear (preserve core-specific bits) */
+	OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+	SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
+		SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
+
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
+		OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
+		dummy = R_SBREG(sii, &sb->sbimstate);
+		BCM_REFERENCE(dummy);
+		OSL_DELAY(1);
+		SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
+	}
+
+	/* set reset and reject while enabling the clocks */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_REJ | SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(10);
+
+	/* don't forget to clear the initiator reject bit */
+	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
+		AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
+
+disable:
+	/* leave reset and reject asserted */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
+	OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void
+sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	si_info_t *sii;
+	sbconfig_t *sb;
+	volatile uint32 dummy;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+	sb = REGS2SB(sii->curmap);
+
+	/*
+	 * Must do the disable sequence first to work for arbitrary current core state.
+	 */
+	sb_core_disable(sih, (bits | resetbits));
+
+	/*
+	 * Now do the initialization sequence.
+	 */
+
+	/* set reset while enabling the clock and forcing them on throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+	         SBTML_RESET));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
+		W_SBREG(sii, &sb->sbtmstatehigh, 0);
+	}
+	if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
+		AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
+	}
+
+	/* clear reset and allow it to propagate throughout the core */
+	W_SBREG(sii, &sb->sbtmstatelow,
+	        ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+
+	/* leave clock enabled */
+	W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
+	dummy = R_SBREG(sii, &sb->sbtmstatelow);
+	BCM_REFERENCE(dummy);
+	OSL_DELAY(1);
+}
+
+/*
+ * Set the initiator timeout for the "master core".
+ * The master core is defined to be the core in control
+ * of the chip and so it issues accesses to non-memory
+ * locations (Because of dma *any* core can access memeory).
+ *
+ * The routine uses the bus to decide who is the master:
+ *	SI_BUS => mips
+ *	JTAG_BUS => chipc
+ *	PCI_BUS => pci or pcie
+ *	PCMCIA_BUS => pcmcia
+ *	SDIO_BUS => pcmcia
+ *
+ * This routine exists so callers can disable initiator
+ * timeouts so accesses to very slow devices like otp
+ * won't cause an abort. The routine allows arbitrary
+ * settings of the service and request timeouts, though.
+ *
+ * Returns the timeout state before changing it or -1
+ * on error.
+ */
+
+#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
+
+uint32
+sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 tmp, ret = 0xffffffff;
+	sbconfig_t *sb;
+
+
+	if ((to & ~TO_MASK) != 0)
+		return ret;
+
+	/* Figure out the master core */
+	if (idx == BADIDX) {
+		switch (BUSTYPE(sii->pub.bustype)) {
+		case PCI_BUS:
+			idx = sii->pub.buscoreidx;
+			break;
+		case JTAG_BUS:
+			idx = SI_CC_IDX;
+			break;
+		case PCMCIA_BUS:
+		case SDIO_BUS:
+			idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
+			break;
+		case SI_BUS:
+			idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
+			break;
+		default:
+			ASSERT(0);
+		}
+		if (idx == BADIDX)
+			return ret;
+	}
+
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	sb = REGS2SB(sb_setcoreidx(sih, idx));
+
+	tmp = R_SBREG(sii, &sb->sbimconfiglow);
+	ret = tmp & TO_MASK;
+	W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
+
+	sb_commit(sih);
+	sb_setcoreidx(sih, origidx);
+	INTR_RESTORE(sii, intr_val);
+	return ret;
+}
+
+uint32
+sb_base(uint32 admatch)
+{
+	uint32 base;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	base = 0;
+
+	if (type == 0) {
+		base = admatch & SBAM_BASE0_MASK;
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE1_MASK;
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		base = admatch & SBAM_BASE2_MASK;
+	}
+
+	return (base);
+}
+
+uint32
+sb_size(uint32 admatch)
+{
+	uint32 size;
+	uint type;
+
+	type = admatch & SBAM_TYPE_MASK;
+	ASSERT(type < 3);
+
+	size = 0;
+
+	if (type == 0) {
+		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
+	} else if (type == 1) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
+	} else if (type == 2) {
+		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
+		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
+	}
+
+	return (size);
+}
diff --git a/drivers/staging/edison-bcm43340/siutils.c b/drivers/staging/edison-bcm43340/siutils.c
new file mode 100644
index 0000000..f3f1e55
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/siutils.c
@@ -0,0 +1,2796 @@
+/*
+ * Misc utility routines for accessing chip-specific features
+ * of the SiliconBackplane-based Broadcom chips.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils.c 475454 2014-05-05 20:54:58Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <sbpcmcia.h>
+#include <sbsocram.h>
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#include <hndpmu.h>
+
+#ifdef BCM_SDRBL
+#include <hndcpu.h>
+#endif /* BCM_SDRBL */
+
+#include "siutils_priv.h"
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                              uint bustype, void *sdh, char **vars, uint *varsz);
+static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
+static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs);
+
+
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+
+int do_4360_pcie2_war = 0;
+
+/* global kernel resource */
+static si_info_t ksii;
+static si_cores_info_t ksii_cores_info;
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/pcmcia/sb/sdio/etc
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ *        function set 'vars' to NULL, making dereferencing of this parameter undesired.
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *
+si_attach(uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	si_info_t *sii;
+	si_cores_info_t *cores_info;
+	/* alloc si_info_t */
+	if ((sii = MALLOCZ(osh, sizeof (si_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		return (NULL);
+	}
+
+	/* alloc si_cores_info_t */
+	if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, sizeof (si_cores_info_t))) == NULL) {
+		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+		MFREE(osh, sii, sizeof(si_info_t));
+		return (NULL);
+	}
+	sii->cores_info = cores_info;
+
+	if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
+		MFREE(osh, sii, sizeof(si_info_t));
+		MFREE(osh, cores_info, sizeof(si_cores_info_t));
+		return (NULL);
+	}
+	sii->vars = vars ? *vars : NULL;
+	sii->varsz = varsz ? *varsz : 0;
+
+	return (si_t *)sii;
+}
+
+
+static uint32	wd_msticks;		/* watchdog timer ticks normalized to ms */
+
+/** generic kernel variant of si_attach() */
+si_t *
+si_kattach(osl_t *osh)
+{
+	static bool ksii_attached = FALSE;
+	si_cores_info_t *cores_info;
+
+	if (!ksii_attached) {
+		void *regs = NULL;
+		regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+		cores_info = (si_cores_info_t *)&ksii_cores_info;
+		ksii.cores_info = cores_info;
+
+		ASSERT(osh);
+		if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
+		                SI_BUS, NULL,
+		                osh != SI_OSH ? &(ksii.vars) : NULL,
+		                osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) {
+			SI_ERROR(("si_kattach: si_doattach failed\n"));
+			REG_UNMAP(regs);
+			return NULL;
+		}
+		REG_UNMAP(regs);
+
+		/* save ticks normalized to ms for si_watchdog_ms() */
+		if (PMUCTL_ENAB(&ksii.pub)) {
+				/* based on 32KHz ILP clock */
+				wd_msticks = 32;
+		} else {
+			wd_msticks = ALP_CLOCK / 1000;
+		}
+
+		ksii_attached = TRUE;
+		SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
+		        ksii.pub.ccrev, wd_msticks));
+	}
+
+	return &ksii.pub;
+}
+
+
+static bool
+si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
+{
+	/* need to set memseg flag for CF card first before any sb registers access */
+	if (BUSTYPE(bustype) == PCMCIA_BUS)
+		sii->memseg = TRUE;
+
+
+	if (BUSTYPE(bustype) == SDIO_BUS) {
+		int err;
+		uint8 clkset;
+
+		/* Try forcing SDIO core to do ALPAvail request only */
+		clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+		if (!err) {
+			uint8 clkval;
+
+			/* If register supported, wait for ALPAvail and then force ALP */
+			clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+			if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+				SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+					SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
+					PMU_MAX_TRANSITION_DLY);
+				if (!SBSDIO_ALPAV(clkval)) {
+					SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
+						clkval));
+					return FALSE;
+				}
+				clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+					clkset, &err);
+				OSL_DELAY(65);
+			}
+		}
+
+		/* Also, disable the extra SDIO pull-ups */
+		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+	}
+
+
+	return TRUE;
+}
+
+static bool
+si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
+	uint *origidx, void *regs)
+{
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	bool pci, pcie, pcie_gen2 = FALSE;
+	uint i;
+	uint pciidx, pcieidx, pcirev, pcierev;
+
+	cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+	/* get chipcommon chipstatus */
+	if (sii->pub.ccrev >= 11)
+		sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+	/* get chipcommon capabilites */
+	sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+	/* get chipcommon extended capabilities */
+
+	if (sii->pub.ccrev >= 35)
+		sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+	/* get pmu rev and caps */
+	if (sii->pub.cccaps & CC_CAP_PMU) {
+		sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+		sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+	}
+
+	SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+		sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+		sii->pub.pmucaps));
+
+	/* figure out bus/orignal core idx */
+	sii->pub.buscoretype = NODEV_CORE_ID;
+	sii->pub.buscorerev = (uint)NOREV;
+	sii->pub.buscoreidx = BADIDX;
+
+	pci = pcie = FALSE;
+	pcirev = pcierev = (uint)NOREV;
+	pciidx = pcieidx = BADIDX;
+
+	for (i = 0; i < sii->numcores; i++) {
+		uint cid, crev;
+
+		si_setcoreidx(&sii->pub, i);
+		cid = si_coreid(&sii->pub);
+		crev = si_corerev(&sii->pub);
+
+		/* Display cores found */
+		SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+		        i, cid, crev, cores_info->coresba[i], cores_info->regs[i]));
+
+		if (BUSTYPE(bustype) == SI_BUS) {
+			/* now look at the chipstatus register to figure the pacakge */
+			/* for SDIO but downloaded on PCIE dev */
+			if (cid == PCIE2_CORE_ID) {
+				if ((CHIPID(sii->pub.chip) == BCM43602_CHIP_ID) ||
+					((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID) &&
+					CST4345_CHIPMODE_PCIE(sii->pub.chipst))) {
+					pcieidx = i;
+					pcierev = crev;
+					pcie = TRUE;
+					pcie_gen2 = TRUE;
+				}
+			}
+
+		}
+		else if (BUSTYPE(bustype) == PCI_BUS) {
+			if (cid == PCI_CORE_ID) {
+				pciidx = i;
+				pcirev = crev;
+				pci = TRUE;
+			} else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) {
+				pcieidx = i;
+				pcierev = crev;
+				pcie = TRUE;
+				if (cid == PCIE2_CORE_ID)
+					pcie_gen2 = TRUE;
+			}
+		} else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
+		           (cid == PCMCIA_CORE_ID)) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+		else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+		          (BUSTYPE(bustype) == SPI_BUS)) &&
+		         ((cid == PCMCIA_CORE_ID) ||
+		          (cid == SDIOD_CORE_ID))) {
+			sii->pub.buscorerev = crev;
+			sii->pub.buscoretype = cid;
+			sii->pub.buscoreidx = i;
+		}
+
+		/* find the core idx before entering this func. */
+		if ((savewin && (savewin == cores_info->coresba[i])) ||
+		    (regs == cores_info->regs[i]))
+			*origidx = i;
+	}
+
+#if defined(PCIE_FULL_DONGLE)
+	pci = FALSE;
+#endif
+	if (pci) {
+		sii->pub.buscoretype = PCI_CORE_ID;
+		sii->pub.buscorerev = pcirev;
+		sii->pub.buscoreidx = pciidx;
+	} else if (pcie) {
+		if (pcie_gen2)
+			sii->pub.buscoretype = PCIE2_CORE_ID;
+		else
+			sii->pub.buscoretype = PCIE_CORE_ID;
+		sii->pub.buscorerev = pcierev;
+		sii->pub.buscoreidx = pcieidx;
+	}
+
+	SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
+	         sii->pub.buscorerev));
+
+	if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
+	    (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3))
+		OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+
+
+	/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
+	 * already running.
+	 */
+	if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+		if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
+		    si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
+			si_core_disable(&sii->pub, 0);
+	}
+
+	/* return to the original core */
+	si_setcoreidx(&sii->pub, *origidx);
+
+	return TRUE;
+}
+
+
+
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ *        function set 'vars' to NULL.
+ */
+static si_info_t *
+si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
+                       uint bustype, void *sdh, char **vars, uint *varsz)
+{
+	struct si_pub *sih = &sii->pub;
+	uint32 w, savewin;
+	chipcregs_t *cc;
+	char *pvars = NULL;
+	uint origidx;
+#if !defined(_CFEZ_) || defined(CFG_WL)
+#endif 
+
+	ASSERT(GOODREGS(regs));
+
+	savewin = 0;
+
+	sih->buscoreidx = BADIDX;
+
+	sii->curmap = regs;
+	sii->sdh = sdh;
+	sii->osh = osh;
+
+
+	/* check to see if we are a si core mimic'ing a pci core */
+	if ((bustype == PCI_BUS) &&
+	    (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) {
+		SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI "
+		          "devid:0x%x\n", __FUNCTION__, devid));
+		bustype = SI_BUS;
+	}
+
+	/* find Chipcommon address */
+	if (bustype == PCI_BUS) {
+		savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+			savewin = SI_ENUM_BASE;
+		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+		if (!regs)
+			return NULL;
+		cc = (chipcregs_t *)regs;
+	} else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
+		cc = (chipcregs_t *)sii->curmap;
+	} else {
+		cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+	}
+
+	sih->bustype = bustype;
+	if (bustype != BUSTYPE(bustype)) {
+		SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
+			bustype, BUSTYPE(bustype)));
+		return NULL;
+	}
+
+	/* bus/core/clk setup for register access */
+	if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+		SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
+		return NULL;
+	}
+
+	/* ChipID recognition.
+	 *   We assume we can read chipid at offset 0 from the regs arg.
+	 *   If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+	 *   some way of recognizing them needs to be added here.
+	 */
+	if (!cc) {
+		SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
+		return NULL;
+	}
+	w = R_REG(osh, &cc->chipid);
+	if ((w & 0xfffff) == 148277) w -= 65532;
+	sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+	/* Might as wll fill in chip id rev & pkg */
+	sih->chip = w & CID_ID_MASK;
+	sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+	sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+	if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
+		(sih->chippkg != BCM4329_289PIN_PKG_ID)) {
+		sih->chippkg = BCM4329_182PIN_PKG_ID;
+	}
+	sih->issim = IS_SIM(sih->chippkg);
+
+	/* scan for cores */
+	if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
+		SI_MSG(("Found chip type SB (0x%08x)\n", w));
+		sb_scan(&sii->pub, regs, devid);
+	} else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) ||
+		(CHIPTYPE(sii->pub.socitype) == SOCI_NAI)) {
+		if (CHIPTYPE(sii->pub.socitype) == SOCI_AI)
+			SI_MSG(("Found chip type AI (0x%08x)\n", w));
+		else
+			SI_MSG(("Found chip type NAI (0x%08x)\n", w));
+		/* pass chipc address instead of original core base */
+		ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
+		SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
+		/* pass chipc address instead of original core base */
+		ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
+	} else {
+		SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+		return NULL;
+	}
+	/* no cores found, bail out */
+	if (sii->numcores == 0) {
+		SI_ERROR(("si_doattach: could not find any cores\n"));
+		return NULL;
+	}
+	/* bus/core/clk setup */
+	origidx = SI_CC_IDX;
+	if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+		SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+		goto exit;
+	}
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+	if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK)
+		>> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT |
+		CST4322_SPROM_PRESENT))) {
+		SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__));
+		return NULL;
+	}
+
+	/* assume current core is CC */
+	if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43235_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43234_CHIP_ID ||
+	                                  CHIPID(sih->chip) == BCM43238_CHIP_ID) &&
+	                                 (CHIPREV(sii->pub.chiprev) <= 2))) {
+
+		if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
+			uint clkdiv;
+			clkdiv = R_REG(osh, &cc->clkdiv);
+			/* otp_clk_div is even number, 120/14 < 9mhz */
+			clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
+			W_REG(osh, &cc->clkdiv, clkdiv);
+			SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv));
+		}
+		OSL_DELAY(10);
+	}
+
+	if (bustype == PCI_BUS) {
+
+	}
+#endif 
+#ifdef BCM_SDRBL
+	/* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is
+	 * not turned on, then we want to hold arm in reset.
+	 * Bottomline: In sdrenable case, we allow arm to boot only when protection is
+	 * turned on.
+	 */
+	if (CHIP_HOSTIF_PCIE(&(sii->pub))) {
+		uint32 sflags = si_arm_sflags(&(sii->pub));
+
+		/* If SDR is enabled but protection is not turned on
+		* then we want to force arm to WFI.
+		*/
+		if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) {
+			disable_arm_irq();
+			while (1) {
+				hnd_cpu_wait(sih);
+			}
+		}
+	}
+#endif /* BCM_SDRBL */
+
+	pvars = NULL;
+	BCM_REFERENCE(pvars);
+
+
+
+		if (sii->pub.ccrev >= 20) {
+			uint32 gpiopullup = 0, gpiopulldown = 0;
+			cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+			ASSERT(cc != NULL);
+
+			/* 4314/43142 has pin muxing, don't clear gpio bits */
+			if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) ||
+				(CHIPID(sih->chip) == BCM43142_CHIP_ID)) {
+				gpiopullup |= 0x402e0;
+				gpiopulldown |= 0x20500;
+			}
+
+			W_REG(osh, &cc->gpiopullup, gpiopullup);
+			W_REG(osh, &cc->gpiopulldown, gpiopulldown);
+			si_setcoreidx(sih, origidx);
+		}
+
+
+	/* clear any previous epidiag-induced target abort */
+	ASSERT(!si_taclear(sih, FALSE));
+
+
+	return (sii);
+
+exit:
+
+	return NULL;
+}
+
+/** may be called with core in reset */
+void
+si_detach(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint idx;
+
+
+	if (BUSTYPE(sih->bustype) == SI_BUS)
+		for (idx = 0; idx < SI_MAXCORES; idx++)
+			if (cores_info->regs[idx]) {
+				REG_UNMAP(cores_info->regs[idx]);
+				cores_info->regs[idx] = NULL;
+			}
+
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (cores_info != &ksii_cores_info)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, cores_info, sizeof(si_cores_info_t));
+
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+	if (sii != &ksii)
+#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+		MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *
+si_osh(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->osh;
+}
+
+void
+si_setosh(si_t *sih, osl_t *osh)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	if (sii->osh != NULL) {
+		SI_ERROR(("osh is already set....\n"));
+		ASSERT(!sii->osh);
+	}
+	sii->osh = osh;
+}
+
+/** register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+                          void *intrsenabled_fn, void *intr_arg)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	sii->intr_arg = intr_arg;
+	sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
+	sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
+	sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
+	/* save current core id.  when this function called, the current core
+	 * must be the core which provides driver functions(il, et, wl, etc.)
+	 */
+	sii->dev_coreid = cores_info->coreid[sii->curidx];
+}
+
+void
+si_deregister_intr_callback(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	sii->intrsoff_fn = NULL;
+}
+
+uint
+si_intflag(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_intflag(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return R_REG(sii->osh, ((uint32 *)(uintptr)
+			    (sii->oob_router + OOB_STATUSA)));
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_flag(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_flag(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_flag(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint
+si_flag_alt(si_t *sih)
+{
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_flag_alt(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_setint(si_t *sih, int siflag)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_setint(sih, siflag);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_setint(sih, siflag);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_setint(sih, siflag);
+	else
+		ASSERT(0);
+}
+
+uint
+si_coreid(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	return cores_info->coreid[sii->curidx];
+}
+
+uint
+si_coreidx(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	return sii->curidx;
+}
+
+/** return the core-type instantiation # of the current core */
+uint
+si_coreunit(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint idx;
+	uint coreid;
+	uint coreunit;
+	uint i;
+
+	coreunit = 0;
+
+	idx = sii->curidx;
+
+	ASSERT(GOODREGS(sii->curmap));
+	coreid = si_coreid(sih);
+
+	/* count the cores of our type */
+	for (i = 0; i < idx; i++)
+		if (cores_info->coreid[i] == coreid)
+			coreunit++;
+
+	return (coreunit);
+}
+
+uint
+si_corevendor(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corevendor(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corevendor(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corevendor(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_backplane64(si_t *sih)
+{
+	return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+uint
+si_corerev(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corerev(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corerev(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corerev(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+/** return index of coreid or BADIDX if not found */
+uint
+si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint found;
+	uint i;
+
+
+	found = 0;
+
+	for (i = 0; i < sii->numcores; i++)
+		if (cores_info->coreid[i] == coreid) {
+			if (found == coreunit)
+				return (i);
+			found++;
+		}
+
+	return (BADIDX);
+}
+
+/** return total coreunit of coreid or zero if not found */
+uint
+si_numcoreunits(si_t *sih, uint coreid)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint found;
+	uint i;
+
+	found = 0;
+
+	for (i = 0; i < sii->numcores; i++)
+		if (cores_info->coreid[i] == coreid) {
+			found++;
+		}
+
+	return (found == 0? 0:found);
+}
+
+/** return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	bcopy((uchar*)cores_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+	return (sii->numcores);
+}
+
+/** return current wrapper mapping */
+void *
+si_wrapperregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curwrap));
+
+	return (sii->curwrap);
+}
+
+/** return current register mapping */
+void *
+si_coreregs(si_t *sih)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+	ASSERT(GOODREGS(sii->curmap));
+
+	return (sii->curmap);
+}
+
+/**
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *
+si_setcore(si_t *sih, uint coreid, uint coreunit)
+{
+	uint idx;
+
+	idx = si_findcoreidx(sih, coreid, coreunit);
+	if (!GOODIDX(idx))
+		return (NULL);
+
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, idx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_setcoreidx(sih, idx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, idx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+void *
+si_setcoreidx(si_t *sih, uint coreidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_setcoreidx(sih, coreidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_setcoreidx(sih, coreidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_setcoreidx(sih, coreidx);
+	else {
+		ASSERT(0);
+		return NULL;
+	}
+}
+
+/** Turn off interrupt as required by sb_setcore, before switch core */
+void *
+si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+{
+	void *cc;
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (SI_FAST(sii)) {
+		/* Overloading the origidx variable to remember the coreid,
+		 * this works because the core ids cannot be confused with
+		 * core indices.
+		 */
+		*origidx = coreid;
+		if (coreid == CC_CORE_ID)
+			return (void *)CCREGS_FAST(sii);
+		else if (coreid == sih->buscoretype)
+			return (void *)PCIEREGS(sii);
+	}
+	INTR_OFF(sii, *intr_val);
+	*origidx = sii->curidx;
+	cc = si_setcore(sih, coreid, 0);
+	ASSERT(cc != NULL);
+
+	return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void
+si_restore_core(si_t *sih, uint coreid, uint intr_val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+	if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
+		return;
+
+	si_setcoreidx(sih, coreid);
+	INTR_RESTORE(sii, intr_val);
+}
+
+int
+si_numaddrspaces(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_numaddrspaces(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_numaddrspaces(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_numaddrspaces(sih);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspace(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspace(sih, asidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_addrspace(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspace(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+uint32
+si_addrspacesize(si_t *sih, uint asidx)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_addrspacesize(sih, asidx);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_addrspacesize(sih, asidx);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_addrspacesize(sih, asidx);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
+{
+	/* Only supported for SOCI_AI */
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_coreaddrspaceX(sih, asidx, addr, size);
+	else
+		*size = 0;
+}
+
+uint32
+si_core_cflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_cflags(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_core_cflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_cflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+void
+si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_cflags_wo(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_cflags_wo(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_cflags_wo(sih, mask, val);
+	else
+		ASSERT(0);
+}
+
+uint32
+si_core_sflags(si_t *sih, uint32 mask, uint32 val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_core_sflags(sih, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_core_sflags(sih, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_core_sflags(sih, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+bool
+si_iscoreup(si_t *sih)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_iscoreup(sih);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_iscoreup(sih);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_iscoreup(sih);
+	else {
+		ASSERT(0);
+		return FALSE;
+	}
+}
+
+uint
+si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
+{
+	/* only for AI back plane chips */
+	if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return (ai_wrap_reg(sih, offset, mask, val));
+	return 0;
+}
+
+uint
+si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg(sih, coreidx, regoff, mask, val);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corereg(sih, coreidx, regoff, mask, val);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		return ub_corereg(sih, coreidx, regoff, mask, val);
+	else {
+		ASSERT(0);
+		return 0;
+	}
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+si_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		return sb_corereg_addr(sih, coreidx, regoff);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		return ai_corereg_addr(sih, coreidx, regoff);
+	else {
+		return 0;
+	}
+}
+
+void
+si_core_disable(si_t *sih, uint32 bits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_disable(sih, bits);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_disable(sih, bits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_disable(sih, bits);
+}
+
+void
+si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+	if (CHIPTYPE(sih->socitype) == SOCI_SB)
+		sb_core_reset(sih, bits, resetbits);
+	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+		ai_core_reset(sih, bits, resetbits);
+	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
+		ub_core_reset(sih, bits, resetbits);
+}
+
+/** Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int
+si_corebist(si_t *sih)
+{
+	uint32 cflags;
+	int result = 0;
+
+	/* Read core control flags */
+	cflags = si_core_cflags(sih, 0, 0);
+
+	/* Set bist & fgc */
+	si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+	/* Wait for bist done */
+	SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+	if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+		result = BCME_ERROR;
+
+	/* Reset core control flags */
+	si_core_cflags(sih, 0xffff, cflags);
+
+	return result;
+}
+
+static uint32
+factor6(uint32 x)
+{
+	switch (x) {
+	case CC_F6_2:	return 2;
+	case CC_F6_3:	return 3;
+	case CC_F6_4:	return 4;
+	case CC_F6_5:	return 5;
+	case CC_F6_6:	return 6;
+	case CC_F6_7:	return 7;
+	default:	return 0;
+	}
+}
+
+/** calculate the speed the SI would run at given a set of clockcontrol values */
+uint32
+si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
+{
+	uint32 n1, n2, clock, m1, m2, m3, mc;
+
+	n1 = n & CN_N1_MASK;
+	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+	if (pll_type == PLL_TYPE6) {
+		if (m & CC_T6_MMASK)
+			return CC_T6_M1;
+		else
+			return CC_T6_M0;
+	} else if ((pll_type == PLL_TYPE1) ||
+	           (pll_type == PLL_TYPE3) ||
+	           (pll_type == PLL_TYPE4) ||
+	           (pll_type == PLL_TYPE7)) {
+		n1 = factor6(n1);
+		n2 += CC_F5_BIAS;
+	} else if (pll_type == PLL_TYPE2) {
+		n1 += CC_T2_BIAS;
+		n2 += CC_T2_BIAS;
+		ASSERT((n1 >= 2) && (n1 <= 7));
+		ASSERT((n2 >= 5) && (n2 <= 23));
+	} else if (pll_type == PLL_TYPE5) {
+		return (100000000);
+	} else
+		ASSERT(0);
+	/* PLL types 3 and 7 use BASE2 (25Mhz) */
+	if ((pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE7)) {
+		clock = CC_CLOCK_BASE2 * n1 * n2;
+	} else
+		clock = CC_CLOCK_BASE1 * n1 * n2;
+
+	if (clock == 0)
+		return 0;
+
+	m1 = m & CC_M1_MASK;
+	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+	if ((pll_type == PLL_TYPE1) ||
+	    (pll_type == PLL_TYPE3) ||
+	    (pll_type == PLL_TYPE4) ||
+	    (pll_type == PLL_TYPE7)) {
+		m1 = factor6(m1);
+		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+			m2 += CC_F5_BIAS;
+		else
+			m2 = factor6(m2);
+		m3 = factor6(m3);
+
+		switch (mc) {
+		case CC_MC_BYPASS:	return (clock);
+		case CC_MC_M1:		return (clock / m1);
+		case CC_MC_M1M2:	return (clock / (m1 * m2));
+		case CC_MC_M1M2M3:	return (clock / (m1 * m2 * m3));
+		case CC_MC_M1M3:	return (clock / (m1 * m3));
+		default:		return (0);
+		}
+	} else {
+		ASSERT(pll_type == PLL_TYPE2);
+
+		m1 += CC_T2_BIAS;
+		m2 += CC_T2M2_BIAS;
+		m3 += CC_T2_BIAS;
+		ASSERT((m1 >= 2) && (m1 <= 7));
+		ASSERT((m2 >= 3) && (m2 <= 10));
+		ASSERT((m3 >= 2) && (m3 <= 7));
+
+		if ((mc & CC_T2MC_M1BYP) == 0)
+			clock /= m1;
+		if ((mc & CC_T2MC_M2BYP) == 0)
+			clock /= m2;
+		if ((mc & CC_T2MC_M3BYP) == 0)
+			clock /= m3;
+
+		return (clock);
+	}
+}
+
+/**
+ * Some chips could have multiple host interfaces, however only one will be active.
+ * For a given chip. Depending pkgopt and cc_chipst return the active host interface.
+ */
+uint
+si_chip_hostif(si_t *sih)
+{
+	uint hosti = 0;
+
+	switch (CHIPID(sih->chip)) {
+
+	case BCM43602_CHIP_ID:
+		hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4360_CHIP_ID:
+		/* chippkg bit-0 == 0 is PCIE only pkgs
+		 * chippkg bit-0 == 1 has both PCIE and USB cores enabled
+		 */
+		if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else
+			hosti = CHIP_HOSTIF_PCIEMODE;
+
+		break;
+
+	case BCM4335_CHIP_ID:
+		/* TBD: like in 4360, do we need to check pkg? */
+		if (CST4335_CHIPMODE_USB20D(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4335_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	case BCM4345_CHIP_ID:
+		if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4345_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4345_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+
+	case BCM4350_CHIP_ID:
+	case BCM4354_CHIP_ID:
+	case BCM4356_CHIP_ID:
+	case BCM43556_CHIP_ID:
+	case BCM43558_CHIP_ID:
+	case BCM43566_CHIP_ID:
+	case BCM43568_CHIP_ID:
+	case BCM43569_CHIP_ID:
+		if (CST4350_CHIPMODE_USB20D(sih->chipst) ||
+		    CST4350_CHIPMODE_HSIC20D(sih->chipst) ||
+		    CST4350_CHIPMODE_USB30D(sih->chipst) ||
+		    CST4350_CHIPMODE_USB30D_WL(sih->chipst) ||
+		    CST4350_CHIPMODE_HSIC30D(sih->chipst))
+			hosti = CHIP_HOSTIF_USBMODE;
+		else if (CST4350_CHIPMODE_SDIOD(sih->chipst))
+			hosti = CHIP_HOSTIF_SDIOMODE;
+		else if (CST4350_CHIPMODE_PCIE(sih->chipst))
+			hosti = CHIP_HOSTIF_PCIEMODE;
+		break;
+
+	default:
+		break;
+	}
+
+	return hosti;
+}
+
+
+/** set chip watchdog reset timer to fire in 'ticks' */
+void
+si_watchdog(si_t *sih, uint ticks)
+{
+	uint nb, maxt;
+
+	if (PMUCTL_ENAB(sih)) {
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+		if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
+		    (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+			si_setcore(sih, USB20D_CORE_ID, 0);
+			si_core_disable(sih, 1);
+			si_setcore(sih, CC_CORE_ID, 0);
+		}
+#endif 
+
+			nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24);
+		/* The mips compiler uses the sllv instruction,
+		 * so we specially handle the 32-bit case.
+		 */
+		if (nb == 32)
+			maxt = 0xffffffff;
+		else
+			maxt = ((1 << nb) - 1);
+
+		if (ticks == 1)
+			ticks = 2;
+		else if (ticks > maxt)
+			ticks = maxt;
+
+		si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
+	} else {
+		maxt = (1 << 28) - 1;
+		if (ticks > maxt)
+			ticks = maxt;
+
+		si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
+	}
+}
+
+/** trigger watchdog reset after ms milliseconds */
+void
+si_watchdog_ms(si_t *sih, uint32 ms)
+{
+	si_watchdog(sih, wd_msticks * ms);
+}
+
+uint32 si_watchdog_msticks(void)
+{
+	return wd_msticks;
+}
+
+bool
+si_taclear(si_t *sih, bool details)
+{
+	return FALSE;
+}
+
+
+
+/** return the slow clock source - LPO, XTAL, or PCI */
+static uint
+si_slowclk_src(si_info_t *sii)
+{
+	chipcregs_t *cc;
+
+	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+	if (sii->pub.ccrev < 6) {
+		if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
+		    (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) &
+		     PCI_CFG_GPIO_SCS))
+			return (SCC_SS_PCI);
+		else
+			return (SCC_SS_XTAL);
+	} else if (sii->pub.ccrev < 10) {
+		cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx);
+		ASSERT(cc);
+		return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
+	} else	/* Insta-clock */
+		return (SCC_SS_XTAL);
+}
+
+/** return the ILP (slowclock) min or max frequency */
+static uint
+si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
+{
+	uint32 slowclk;
+	uint div;
+
+	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+	/* shouldn't be here unless we've established the chip has dynamic clk control */
+	ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
+
+	slowclk = si_slowclk_src(sii);
+	if (sii->pub.ccrev < 6) {
+		if (slowclk == SCC_SS_PCI)
+			return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
+		else
+			return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
+	} else if (sii->pub.ccrev < 10) {
+		div = 4 *
+		        (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
+		if (slowclk == SCC_SS_LPO)
+			return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
+		else if (slowclk == SCC_SS_XTAL)
+			return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
+		else if (slowclk == SCC_SS_PCI)
+			return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
+		else
+			ASSERT(0);
+	} else {
+		/* Chipc rev 10 is InstaClock */
+		div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
+		div = 4 * (div + 1);
+		return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
+	}
+	return (0);
+}
+
+static void
+si_clkctl_setdelay(si_info_t *sii, void *chipcregs)
+{
+	chipcregs_t *cc = (chipcregs_t *)chipcregs;
+	uint slowmaxfreq, pll_delay, slowclk;
+	uint pll_on_delay, fref_sel_delay;
+
+	pll_delay = PLL_DELAY;
+
+	/* If the slow clock is not sourced by the xtal then add the xtal_on_delay
+	 * since the xtal will also be powered down by dynamic clk control logic.
+	 */
+
+	slowclk = si_slowclk_src(sii);
+	if (slowclk != SCC_SS_XTAL)
+		pll_delay += XTAL_ON_DELAY;
+
+	/* Starting with 4318 it is ILP that is used for the delays */
+	slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc);
+
+	pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
+	fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
+
+	W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
+	W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
+}
+
+/** initialize power control delay registers */
+void
+si_clkctl_init(si_t *sih)
+{
+	si_info_t *sii;
+	uint origidx = 0;
+	chipcregs_t *cc;
+	bool fast;
+
+	if (!CCCTL_ENAB(sih))
+		return;
+
+	sii = SI_INFO(sih);
+	fast = SI_FAST(sii);
+	if (!fast) {
+		origidx = sii->curidx;
+		if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+			return;
+	} else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
+		return;
+	ASSERT(cc != NULL);
+
+	/* set all Instaclk chip ILP to 1 MHz */
+	if (sih->ccrev >= 10)
+		SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
+		        (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+
+	si_clkctl_setdelay(sii, (void *)(uintptr)cc);
+
+	OSL_DELAY(20000);
+
+	if (!fast)
+		si_setcoreidx(sih, origidx);
+}
+
+
+/** change logical "focus" to the gpio core for optimized access */
+void *
+si_gpiosetcore(si_t *sih)
+{
+	return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/**
+ * mask & set gpiocontrol bits.
+ * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
+ * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
+ *   to some chip-specific purpose.
+ */
+uint32
+si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output enable bits */
+uint32
+si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioouten);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** mask&set gpio output bits */
+uint32
+si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	regoff = 0;
+
+	/* gpios could be shared on router platforms
+	 * ignore reservation if it's high priority (e.g., test apps)
+	 */
+	if ((priority != GPIO_HI_PRIORITY) &&
+	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpioout);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/** reserve one gpio */
+uint32
+si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already reserved */
+	if (si_gpioreservation & gpio_bitmask)
+		return 0xffffffff;
+	/* set reservation */
+	si_gpioreservation |= gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/**
+ * release one gpio.
+ *
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till someone overwrites it.
+ */
+uint32
+si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
+{
+	/* only cores on SI_BUS share GPIO's and only applcation users need to
+	 * reserve/release GPIO
+	 */
+	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+		return 0xffffffff;
+	}
+	/* make sure only one bit is set */
+	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
+		return 0xffffffff;
+	}
+
+	/* already released */
+	if (!(si_gpioreservation & gpio_bitmask))
+		return 0xffffffff;
+
+	/* clear reservation */
+	si_gpioreservation &= ~gpio_bitmask;
+
+	return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32
+si_gpioin(si_t *sih)
+{
+	uint regoff;
+
+	regoff = OFFSETOF(chipcregs_t, gpioin);
+	return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32
+si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32
+si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
+{
+	uint regoff;
+
+	/* gpios could be shared on router platforms */
+	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+		mask = priority ? (si_gpioreservation & mask) :
+			((si_gpioreservation | mask) & ~(si_gpioreservation));
+		val &= mask;
+	}
+
+	regoff = OFFSETOF(chipcregs_t, gpiointmask);
+	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32
+si_gpioled(si_t *sih, uint32 mask, uint32 val)
+{
+	if (sih->ccrev < 16)
+		return 0xffffffff;
+
+	/* gpio led powersave reg */
+	return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
+}
+
+/* mask&set gpio timer val */
+uint32
+si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
+{
+	if (sih->ccrev < 16)
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX,
+		OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
+}
+
+uint32
+si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
+{
+	uint offs;
+
+	if (sih->ccrev < 20)
+		return 0xffffffff;
+
+	offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32
+si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
+{
+	uint offs;
+
+	if (sih->ccrev < 11)
+		return 0xffffffff;
+
+	if (regtype == GPIO_REGEVT)
+		offs = OFFSETOF(chipcregs_t, gpioevent);
+	else if (regtype == GPIO_REGEVT_INTMSK)
+		offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+	else if (regtype == GPIO_REGEVT_INTPOL)
+		offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+	else
+		return 0xffffffff;
+
+	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *
+si_gpio_handler_register(si_t *sih, uint32 event,
+	bool level, gpio_handler_t cb, void *arg)
+{
+	si_info_t *sii = SI_INFO(sih);
+	gpioh_item_t *gi;
+
+	ASSERT(event);
+	ASSERT(cb != NULL);
+
+	if (sih->ccrev < 11)
+		return NULL;
+
+	if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+		return NULL;
+
+	bzero(gi, sizeof(gpioh_item_t));
+	gi->event = event;
+	gi->handler = cb;
+	gi->arg = arg;
+	gi->level = level;
+
+	gi->next = sii->gpioh_head;
+	sii->gpioh_head = gi;
+
+	return (void *)(gi);
+}
+
+void
+si_gpio_handler_unregister(si_t *sih, void *gpioh)
+{
+	si_info_t *sii = SI_INFO(sih);
+	gpioh_item_t *p, *n;
+
+	if (sih->ccrev < 11)
+		return;
+
+	ASSERT(sii->gpioh_head != NULL);
+	if ((void*)sii->gpioh_head == gpioh) {
+		sii->gpioh_head = sii->gpioh_head->next;
+		MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+		return;
+	} else {
+		p = sii->gpioh_head;
+		n = p->next;
+		while (n) {
+			if ((void*)n == gpioh) {
+				p->next = n->next;
+				MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+				return;
+			}
+			p = n;
+			n = n->next;
+		}
+	}
+
+	ASSERT(0); /* Not found in list */
+}
+
+void
+si_gpio_handler_process(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	gpioh_item_t *h;
+	uint32 level = si_gpioin(sih);
+	uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
+	uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+	uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
+
+	for (h = sii->gpioh_head; h != NULL; h = h->next) {
+		if (h->handler) {
+			uint32 status = (h->level ? level : edge) & h->event;
+			uint32 polarity = (h->level ? levelp : edgep) & h->event;
+
+			/* polarity bitval is opposite of status bitval */
+			if (status ^ polarity)
+				h->handler(status, h->arg);
+		}
+	}
+
+	si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
+{
+	uint offs;
+
+	if (sih->ccrev < 11)
+		return 0xffffffff;
+
+	offs = OFFSETOF(chipcregs_t, intmask);
+	return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+
+/** Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
+{
+	uint banksize, bankinfo;
+	uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+	ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+	W_REG(sii->osh, &regs->bankidx, bankidx);
+	bankinfo = R_REG(sii->osh, &regs->bankinfo);
+	banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+	return banksize;
+}
+
+void
+si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	if (!set)
+		*enable = *protect = *remap = 0;
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+		uint32 bankidx, bankinfo;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (set) {
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
+				bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK;
+				if (*enable) {
+					bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
+					if (*protect)
+						bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
+					if ((corerev >= 16) && *remap)
+						bankinfo |=
+							(1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT);
+				}
+				W_REG(sii->osh, &regs->bankinfo, bankinfo);
+			}
+			else if (i == 0) {
+				if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
+					*enable = 1;
+					if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK)
+						*protect = 1;
+					if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK)
+						*remap = 1;
+				}
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+}
+
+bool
+si_socdevram_remap_isenb(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	sbsocramregs_t *regs;
+	bool wasup, remap = FALSE;
+	uint corerev;
+	uint32 extcinfo;
+	uint8 nb;
+	uint8 i;
+	uint32 bankidx, bankinfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+				remap = TRUE;
+				break;
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+	return remap;
+}
+
+bool
+si_socdevram_pkg(si_t *sih)
+{
+	if (si_socdevram_size(sih) > 0)
+		return TRUE;
+	else
+		return FALSE;
+}
+
+uint32
+si_socdevram_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 memsize = 0;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 10) {
+		uint32 extcinfo;
+		uint8 nb;
+		uint8 i;
+
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+uint32
+si_socdevram_remap_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint32 memsize = 0, banksz;
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 extcinfo;
+	uint8 nb;
+	uint8 i;
+	uint32 bankidx, bankinfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+
+	corerev = si_corerev(sih);
+	if (corerev >= 16) {
+		extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+		nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
+
+		/*
+		 * FIX: A0 Issue: Max addressable is 512KB, instead 640KB
+		 * Only four banks are accessible to ARM
+		 */
+		if ((corerev == 16) && (nb == 5))
+			nb = 4;
+
+		for (i = 0; i < nb; i++) {
+			bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+			W_REG(sii->osh, &regs->bankidx, bankidx);
+			bankinfo = R_REG(sii->osh, &regs->bankinfo);
+			if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
+				banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
+				memsize += banksz;
+			} else {
+				/* Account only consecutive banks for now */
+				break;
+			}
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+/** Return the RAM size of the SOCRAM core */
+uint32
+si_socram_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev == 0)
+		memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+	else if (corerev < 3) {
+		memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+		memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+	} else if ((corerev <= 7) || (corerev == 12)) {
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+		uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+		if (lss != 0)
+			nb --;
+		memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+		if (lss != 0)
+			memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+	} else {
+		uint8 i;
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		for (i = 0; i < nb; i++)
+			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+/** Return the TCM-RAM size of the ARMCR4 core. */
+uint32
+si_tcm_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	uint8 *regs;
+	bool wasup;
+	uint32 corecap;
+	uint memsize = 0;
+	uint32 nab = 0;
+	uint32 nbb = 0;
+	uint32 totb = 0;
+	uint32 bxinfo = 0;
+	uint32 idx = 0;
+	uint32 *arm_cap_reg;
+	uint32 *arm_bidx;
+	uint32 *arm_binfo;
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to CR4 core */
+	if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size. If in reset, come out of reset,
+	 * but remain in halt
+	 */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT);
+
+	arm_cap_reg = (uint32 *)(regs + SI_CR4_CAP);
+	corecap = R_REG(sii->osh, arm_cap_reg);
+
+	nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
+	nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
+	totb = nab + nbb;
+
+	arm_bidx = (uint32 *)(regs + SI_CR4_BANKIDX);
+	arm_binfo = (uint32 *)(regs + SI_CR4_BANKINFO);
+	for (idx = 0; idx < totb; idx++) {
+		W_REG(sii->osh, arm_bidx, idx);
+
+		bxinfo = R_REG(sii->osh, arm_binfo);
+		memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+bool
+si_has_flops(si_t *sih)
+{
+	uint origidx, cr4_rev;
+
+	/* Find out CR4 core revision */
+	origidx = si_coreidx(sih);
+	if (si_setcore(sih, ARMCR4_CORE_ID, 0)) {
+		cr4_rev = si_corerev(sih);
+		si_setcoreidx(sih, origidx);
+
+		if (cr4_rev == 1 || cr4_rev >= 3)
+			return TRUE;
+	}
+	return FALSE;
+}
+
+uint32
+si_socram_srmem_size(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+
+	sbsocramregs_t *regs;
+	bool wasup;
+	uint corerev;
+	uint32 coreinfo;
+	uint memsize = 0;
+
+	if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) {
+		return (32 * 1024);
+	}
+
+	/* Block ints and save current core */
+	INTR_OFF(sii, intr_val);
+	origidx = si_coreidx(sih);
+
+	/* Switch to SOCRAM core */
+	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+		goto done;
+
+	/* Get info for determining size */
+	if (!(wasup = si_iscoreup(sih)))
+		si_core_reset(sih, 0, 0);
+	corerev = si_corerev(sih);
+	coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+	/* Calculate size from coreinfo based on rev */
+	if (corerev >= 16) {
+		uint8 i;
+		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+		for (i = 0; i < nb; i++) {
+			W_REG(sii->osh, &regs->bankidx, i);
+			if (R_REG(sii->osh, &regs->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK)
+				memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+		}
+	}
+
+	/* Return to previous state and core */
+	if (!wasup)
+		si_core_disable(sih, 0);
+	si_setcoreidx(sih, origidx);
+
+done:
+	INTR_RESTORE(sii, intr_val);
+
+	return memsize;
+}
+
+
+#if !defined(_CFEZ_) || defined(CFG_WL)
+void
+si_btcgpiowar(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	uint origidx;
+	uint intr_val = 0;
+	chipcregs_t *cc;
+
+	/* Make sure that there is ChipCommon core present &&
+	 * UART_TX is strapped to 1
+	 */
+	if (!(sih->cccaps & CC_CAP_UARTGPIO))
+		return;
+
+	/* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	ASSERT(cc != NULL);
+
+	W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_btshd0_4331(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+	chipcregs_t *cc;
+	uint origidx;
+	uint32 val;
+	uint intr_val = 0;
+
+	INTR_OFF(sii, intr_val);
+
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	/* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */
+	if (on) {
+		/* Enable bt_shd0 on gpio4: */
+		val |= (CCTRL4331_BT_SHD0_ON_GPIO4);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+		val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+
+	/* restore the original index */
+	si_setcoreidx(sih, origidx);
+
+	INTR_RESTORE(sii, intr_val);
+}
+
+void
+si_chipcontrl_restore(si_t *sih, uint32 val)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	W_REG(sii->osh, &cc->chipcontrol, val);
+	si_setcoreidx(sih, origidx);
+}
+
+uint32
+si_chipcontrl_read(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	val = R_REG(sii->osh, &cc->chipcontrol);
+	si_setcoreidx(sih, origidx);
+	return val;
+}
+
+void
+si_chipcontrl_epa4331(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (on) {
+		if (sih->chippkg == 9 || sih->chippkg == 0xb) {
+			val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+			/* Ext PA Controls for 4331 12x9 Package */
+			W_REG(sii->osh, &cc->chipcontrol, val);
+		} else {
+			/* Ext PA Controls for 4331 12x12 Package */
+			if (sih->chiprev > 0) {
+				W_REG(sii->osh, &cc->chipcontrol, val |
+				      (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2));
+			} else {
+				W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN));
+			}
+		}
+	} else {
+		val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+
+	si_setcoreidx(sih, origidx);
+}
+
+/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */
+void
+si_chipcontrl_srom4360(si_t *sih, bool on)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 val;
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (on) {
+		val &= ~(CCTRL4360_SECI_MODE |
+			CCTRL4360_BTSWCTRL_MODE |
+			CCTRL4360_EXTRA_FEMCTRL_MODE |
+			CCTRL4360_BT_LGCY_MODE |
+			CCTRL4360_CORE2FEMCTRL4_ON);
+
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+	}
+
+	si_setcoreidx(sih, origidx);
+}
+
+void
+si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl)
+{
+	si_info_t *sii;
+	chipcregs_t *cc;
+	uint origidx;
+	uint32 val;
+	bool sel_chip;
+
+	sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) ||
+		(CHIPID(sih->chip) == BCM43431_CHIP_ID);
+	sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb));
+
+	if (!sel_chip)
+		return;
+
+	sii = SI_INFO(sih);
+	origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	val = R_REG(sii->osh, &cc->chipcontrol);
+
+	if (enter_wowl) {
+		val |= CCTRL4331_EXTPA_EN;
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	} else {
+		val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+		W_REG(sii->osh, &cc->chipcontrol, val);
+	}
+	si_setcoreidx(sih, origidx);
+}
+#endif 
+
+uint
+si_pll_reset(si_t *sih)
+{
+	uint err = 0;
+
+	return (err);
+}
+
+/** Enable BT-COEX & Ex-PA for 4313 */
+void
+si_epa_4313war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	/* EPA Fix */
+	W_REG(sii->osh, &cc->gpiocontrol,
+	R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+
+void
+si_clk_pmu_htavail_set(si_t *sih, bool set_clear)
+{
+}
+
+/** Re-enable synth_pwrsw resource in min_res_mask for 4313 */
+void
+si_pmu_synth_pwrsw_4313_war(si_t *sih)
+{
+}
+
+/** WL/BT control for 4313 btcombo boards >= P250 */
+void
+si_btcombo_p250_4313_war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	W_REG(sii->osh, &cc->gpiocontrol,
+		R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK);
+
+	W_REG(sii->osh, &cc->gpioouten,
+		R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+void
+si_btc_enable_chipcontrol(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	/* BT fix */
+	W_REG(sii->osh, &cc->chipcontrol,
+		R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+void
+si_btcombo_43228_war(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+
+	W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK);
+	W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK);
+
+	si_setcoreidx(sih, origidx);
+}
+
+/** check if the device is removed */
+bool
+si_deviceremoved(si_t *sih)
+{
+	uint32 w;
+
+	switch (BUSTYPE(sih->bustype)) {
+	case PCI_BUS:
+		ASSERT(SI_INFO(sih)->osh != NULL);
+		w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32));
+		if ((w & 0xFFFF) != VENDOR_BROADCOM)
+			return TRUE;
+		break;
+	}
+	return FALSE;
+}
+
+bool
+si_is_sprom_available(si_t *sih)
+{
+	if (sih->ccrev >= 31) {
+		si_info_t *sii;
+		uint origidx;
+		chipcregs_t *cc;
+		uint32 sromctrl;
+
+		if ((sih->cccaps & CC_CAP_SROM) == 0)
+			return FALSE;
+
+		sii = SI_INFO(sih);
+		origidx = sii->curidx;
+		cc = si_setcoreidx(sih, SI_CC_IDX);
+		ASSERT(cc);
+		sromctrl = R_REG(sii->osh, &cc->sromcontrol);
+		si_setcoreidx(sih, origidx);
+		return (sromctrl & SRC_PRESENT);
+	}
+
+	switch (CHIPID(sih->chip)) {
+	case BCM4312_CHIP_ID:
+		return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL);
+	case BCM4325_CHIP_ID:
+		return (sih->chipst & CST4325_SPROM_SEL) != 0;
+	case BCM4322_CHIP_ID:	case BCM43221_CHIP_ID:	case BCM43231_CHIP_ID:
+	case BCM43222_CHIP_ID:	case BCM43111_CHIP_ID:	case BCM43112_CHIP_ID:
+	case BCM4342_CHIP_ID: {
+		uint32 spromotp;
+		spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >>
+		        CST4322_SPROM_OTP_SEL_SHIFT;
+		return (spromotp & CST4322_SPROM_PRESENT) != 0;
+	}
+	case BCM4329_CHIP_ID:
+		return (sih->chipst & CST4329_SPROM_SEL) != 0;
+	case BCM4315_CHIP_ID:
+		return (sih->chipst & CST4315_SPROM_SEL) != 0;
+	case BCM4319_CHIP_ID:
+		return (sih->chipst & CST4319_SPROM_SEL) != 0;
+	case BCM4336_CHIP_ID:
+	case BCM43362_CHIP_ID:
+		return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
+	case BCM4330_CHIP_ID:
+		return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
+	case BCM4313_CHIP_ID:
+		return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+	case BCM4331_CHIP_ID:
+	case BCM43431_CHIP_ID:
+		return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
+	case BCM43239_CHIP_ID:
+		return ((sih->chipst & CST43239_SPROM_MASK) &&
+			!(sih->chipst & CST43239_SFLASH_MASK));
+	case BCM4324_CHIP_ID:
+	case BCM43242_CHIP_ID:
+		return ((sih->chipst & CST4324_SPROM_MASK) &&
+			!(sih->chipst & CST4324_SFLASH_MASK));
+	case BCM4335_CHIP_ID:
+	case BCM4345_CHIP_ID:
+		return ((sih->chipst & CST4335_SPROM_MASK) &&
+			!(sih->chipst & CST4335_SFLASH_MASK));
+	case BCM4350_CHIP_ID:
+	case BCM4354_CHIP_ID:
+	case BCM4356_CHIP_ID:
+	case BCM43556_CHIP_ID:
+	case BCM43558_CHIP_ID:
+	case BCM43566_CHIP_ID:
+	case BCM43568_CHIP_ID:
+	case BCM43569_CHIP_ID:
+		return (sih->chipst & CST4350_SPROM_PRESENT) != 0;
+	case BCM43602_CHIP_ID:
+		return (sih->chipst & CST43602_SPROM_PRESENT) != 0;
+	case BCM43131_CHIP_ID:
+	case BCM43217_CHIP_ID:
+	case BCM43227_CHIP_ID:
+	case BCM43228_CHIP_ID:
+	case BCM43428_CHIP_ID:
+		return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT;
+	default:
+		return TRUE;
+	}
+}
+
+
+uint32 si_get_sromctl(si_t *sih)
+{
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	uint32 sromctl;
+	osl_t *osh = si_osh(sih);
+
+	cc = si_setcoreidx(sih, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	sromctl = R_REG(osh, &cc->sromcontrol);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	return sromctl;
+}
+
+int si_set_sromctl(si_t *sih, uint32 value)
+{
+	chipcregs_t *cc;
+	uint origidx = si_coreidx(sih);
+	osl_t *osh = si_osh(sih);
+
+	cc = si_setcoreidx(sih, SI_CC_IDX);
+	ASSERT((uintptr)cc);
+
+	/* get chipcommon rev */
+	if (si_corerev(sih) < 32)
+		return BCME_UNSUPPORTED;
+
+	W_REG(osh, &cc->sromcontrol, value);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	return BCME_OK;
+
+}
+
+uint
+si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val)
+{
+	uint origidx;
+	uint ret_val;
+
+	origidx = si_coreidx(sih);
+
+	si_setcoreidx(sih, coreidx);
+
+	ret_val = si_wrapperreg(sih, offset, mask, val);
+
+	/* return to the original core */
+	si_setcoreidx(sih, origidx);
+	return ret_val;
+}
+
+
+/* cleanup the hndrte timer from the host when ARM is been halted
+ * without a chance for ARM cleanup its resources
+ * If left not cleanup, Intr from a software timer can still
+ * request HT clk when ARM is halted.
+ */
+uint32
+si_pmu_res_req_timer_clr(si_t *sih)
+{
+	uint32 mask;
+
+	mask = PRRT_REQ_ACTIVE | PRRT_INTEN;
+	if (CHIPID(sih->chip) != BCM4328_CHIP_ID)
+		mask <<= 14;
+	/* clear mask bits */
+	si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, res_req_timer), mask, 0);
+	/* readback to ensure write completes */
+	return si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, res_req_timer), 0, 0);
+}
+
+/** turn on/off rfldo */
+void
+si_pmu_rfldo(si_t *sih, bool on)
+{
+}
+
+#ifdef SURVIVE_PERST_ENAB
+static uint32
+si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val)
+{
+	si_info_t *sii;
+
+	sii = SI_INFO(sih);
+
+	if (!PCIE(sii))
+		return (0);
+
+	return pcie_survive_perst(sii->pch, mask, val);
+}
+
+static void
+si_watchdog_reset(si_t *sih)
+{
+	si_info_t *sii = SI_INFO(sih);
+	chipcregs_t *cc;
+	uint32 origidx, i;
+
+	origidx = si_coreidx(sih);
+	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+	/* issue a watchdog reset */
+	W_REG(sii->osh, &cc->pmuwatchdog, 2);
+	/* do busy wait for 20ms */
+	for (i = 0; i < 2000; i++) {
+		OSL_DELAY(10);
+	}
+	si_setcoreidx(sih, origidx);
+}
+#endif /* SURVIVE_PERST_ENAB */
+
+void
+si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_val)
+{
+#ifdef SURVIVE_PERST_ENAB
+	if (BUSTYPE(sih->bustype) != PCI_BUS)
+		  return;
+
+	if ((CHIPID(sih->chip) != BCM4360_CHIP_ID && CHIPID(sih->chip) != BCM4352_CHIP_ID) ||
+	    (CHIPREV(sih->chiprev) >= 4))
+		return;
+
+	if (reset) {
+		si_info_t *sii = SI_INFO(sih);
+		uint32 bar0win, bar0win_after;
+
+		/* save the bar0win */
+		bar0win = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+
+		si_watchdog_reset(sih);
+
+		bar0win_after = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+		if (bar0win_after != bar0win) {
+			SI_ERROR(("%s: bar0win before %08x, bar0win after %08x\n",
+				__FUNCTION__, bar0win, bar0win_after));
+			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32), bar0win);
+		}
+	}
+	if (sperst_mask) {
+		/* enable survive perst */
+		si_pcie_survive_perst(sih, sperst_mask, sperst_val);
+	}
+#endif /* SURVIVE_PERST_ENAB */
+}
+
+void
+si_pcie_ltr_war(si_t *sih)
+{
+}
diff --git a/drivers/staging/edison-bcm43340/siutils_priv.h b/drivers/staging/edison-bcm43340/siutils_priv.h
new file mode 100644
index 0000000..a7d8ffc
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/siutils_priv.h
@@ -0,0 +1,272 @@
+/*
+ * Include file private to the SOC Interconnect support files.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: siutils_priv.h 431423 2013-10-23 16:07:35Z $
+ */
+
+#ifndef	_siutils_priv_h_
+#define	_siutils_priv_h_
+
+#define	SI_ERROR(args)
+
+#define	SI_MSG(args)
+
+#ifdef BCMDBG_SI
+#define	SI_VMSG(args)	printf args
+#else
+#define	SI_VMSG(args)
+#endif
+
+#define	IS_SIM(chippkg)	((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
+
+typedef uint32 (*si_intrsoff_t)(void *intr_arg);
+typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
+typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
+typedef struct gpioh_item {
+	void			*arg;
+	bool			level;
+	gpio_handler_t		handler;
+	uint32			event;
+	struct gpioh_item	*next;
+} gpioh_item_t;
+
+
+#define SI_GPIO_MAX		16
+
+typedef struct gci_gpio_item {
+	void			*arg;
+	uint8			gci_gpio;
+	uint8			status;
+	gci_gpio_handler_t	handler;
+	struct gci_gpio_item	*next;
+} gci_gpio_item_t;
+
+
+typedef struct si_cores_info {
+	void	*regs[SI_MAXCORES];	/* other regs va */
+
+	uint	coreid[SI_MAXCORES];	/* id of each core */
+	uint32	coresba[SI_MAXCORES];	/* backplane address of each core */
+	void	*regs2[SI_MAXCORES];	/* va of each core second register set (usbh20) */
+	uint32	coresba2[SI_MAXCORES];	/* address of each core second register set (usbh20) */
+	uint32	coresba_size[SI_MAXCORES]; /* backplane address space size */
+	uint32	coresba2_size[SI_MAXCORES]; /* second address space size */
+
+	void	*wrappers[SI_MAXCORES];	/* other cores wrapper va */
+	uint32	wrapba[SI_MAXCORES];	/* address of controlling wrapper */
+
+	uint32	cia[SI_MAXCORES];	/* erom cia entry for each core */
+	uint32	cib[SI_MAXCORES];	/* erom cia entry for each core */
+} si_cores_info_t;
+
+/* misc si info needed by some of the routines */
+typedef struct si_info {
+	struct si_pub pub;		/* back plane public state (must be first field) */
+
+	void	*osh;			/* osl os handle */
+	void	*sdh;			/* bcmsdh handle */
+
+	uint	dev_coreid;		/* the core provides driver functions */
+	void	*intr_arg;		/* interrupt callback function arg */
+	si_intrsoff_t intrsoff_fn;	/* turns chip interrupts off */
+	si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
+	si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
+
+	void *pch;			/* PCI/E core handle */
+
+	gpioh_item_t *gpioh_head; 	/* GPIO event handlers list */
+
+	bool	memseg;			/* flag to toggle MEM_SEG register */
+
+	char *vars;
+	uint varsz;
+
+	void	*curmap;		/* current regs va */
+
+	uint	curidx;			/* current core index */
+	uint	numcores;		/* # discovered cores */
+
+	void	*curwrap;		/* current wrapper va */
+
+	uint32	oob_router;		/* oob router registers for axi */
+
+	void *cores_info;
+	gci_gpio_item_t	*gci_gpio_head;	/* gci gpio interrupts head */
+} si_info_t;
+
+
+#define	SI_INFO(sih)	((si_info_t *)(uintptr)sih)
+
+#define	GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
+		ISALIGNED((x), SI_CORE_SIZE))
+#define	GOODREGS(regs)	((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE))
+#define BADCOREADDR	0
+#define	GOODIDX(idx)	(((uint)idx) < SI_MAXCORES)
+#define	NOREV		-1		/* Invalid rev */
+
+#define PCI(si)		((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCI_CORE_ID))
+
+#define PCIE_GEN1(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE_CORE_ID))
+
+#define PCIE_GEN2(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+			 ((si)->pub.buscoretype == PCIE2_CORE_ID))
+
+#define PCIE(si)	(PCIE_GEN1(si) || PCIE_GEN2(si))
+
+#define PCMCIA(si)	((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE))
+
+/* Newer chips can access PCI/PCIE and CC core without requiring to change
+ * PCI BAR0 WIN
+ */
+#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13)))
+
+#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET))
+#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET))
+
+/*
+ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
+ * after core switching to avoid invalid register accesss inside ISR.
+ */
+#define INTR_OFF(si, intr_val) \
+	if ((si)->intrsoff_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
+		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
+#define INTR_RESTORE(si, intr_val) \
+	if ((si)->intrsrestore_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
+		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
+
+/* dynamic clock control defines */
+#define	LPOMINFREQ		25000		/* low power oscillator min */
+#define	LPOMAXFREQ		43000		/* low power oscillator max */
+#define	XTALMINFREQ		19800000	/* 20 MHz - 1% */
+#define	XTALMAXFREQ		20200000	/* 20 MHz + 1% */
+#define	PCIMINFREQ		25000000	/* 25 MHz */
+#define	PCIMAXFREQ		34000000	/* 33 MHz + fudge */
+
+#define	ILP_DIV_5MHZ		0		/* ILP = 5 MHz */
+#define	ILP_DIV_1MHZ		4		/* ILP = 1 MHz */
+
+/* Force fast clock for 4360b0 */
+#define PCI_FORCEHT(si)	\
+	(((PCIE_GEN1(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
+	((PCI(si) || PCIE_GEN1(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \
+	(PCIE_GEN1(si) && (si->pub.chip == BCM4716_CHIP_ID)) || \
+	(PCIE_GEN1(si) && (si->pub.chip == BCM4748_CHIP_ID)))
+
+/* GPIO Based LED powersave defines */
+#define DEFAULT_GPIO_ONTIME	10		/* Default: 10% on */
+#define DEFAULT_GPIO_OFFTIME	90		/* Default: 10% on */
+
+#ifndef DEFAULT_GPIOTIMERVAL
+#define DEFAULT_GPIOTIMERVAL  ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
+#endif
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t *sih, void *regs, uint devid);
+extern uint sb_coreid(si_t *sih);
+extern uint sb_intflag(si_t *sih);
+extern uint sb_flag(si_t *sih);
+extern void sb_setint(si_t *sih, int siflag);
+extern uint sb_corevendor(si_t *sih);
+extern uint sb_corerev(si_t *sih);
+extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern bool sb_iscoreup(si_t *sih);
+extern void *sb_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void sb_commit(si_t *sih);
+extern uint32 sb_base(uint32 admatch);
+extern uint32 sb_size(uint32 admatch);
+extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(si_t *sih, uint32 bits);
+extern uint32 sb_addrspace(si_t *sih, uint asidx);
+extern uint32 sb_addrspacesize(si_t *sih, uint asidx);
+extern int sb_numaddrspaces(si_t *sih);
+
+extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx);
+
+extern bool sb_taclear(si_t *sih, bool details);
+
+
+/* Wake-on-wireless-LAN (WOWL) */
+extern bool sb_pci_pmecap(si_t *sih);
+struct osl_info;
+extern bool sb_pci_fastpmecap(struct osl_info *osh);
+extern bool sb_pci_pmeclr(si_t *sih);
+extern void sb_pci_pmeen(si_t *sih);
+extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset);
+
+/* AMBA Interconnect exported externs */
+extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
+                       void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(osl_t *osh);
+extern void ai_scan(si_t *sih, void *regs, uint devid);
+
+extern uint ai_flag(si_t *sih);
+extern uint ai_flag_alt(si_t *sih);
+extern void ai_setint(si_t *sih, int siflag);
+extern uint ai_coreidx(si_t *sih);
+extern uint ai_corevendor(si_t *sih);
+extern uint ai_corerev(si_t *sih);
+extern uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff);
+extern bool ai_iscoreup(si_t *sih);
+extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
+extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
+extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_core_disable(si_t *sih, uint32 bits);
+extern int ai_numaddrspaces(si_t *sih);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
+extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
+extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+
+
+
+#define ub_scan(a, b, c) do {} while (0)
+#define ub_flag(a) (0)
+#define ub_setint(a, b) do {} while (0)
+#define ub_coreidx(a) (0)
+#define ub_corevendor(a) (0)
+#define ub_corerev(a) (0)
+#define ub_iscoreup(a) (0)
+#define ub_setcoreidx(a, b) (0)
+#define ub_core_cflags(a, b, c) (0)
+#define ub_core_cflags_wo(a, b, c) do {} while (0)
+#define ub_core_sflags(a, b, c) (0)
+#define ub_corereg(a, b, c, d, e) (0)
+#define ub_core_reset(a, b, c) do {} while (0)
+#define ub_core_disable(a, b) do {} while (0)
+#define ub_numaddrspaces(a) (0)
+#define ub_addrspace(a, b)  (0)
+#define ub_addrspacesize(a, b) (0)
+#define ub_view(a, b) do {} while (0)
+#define ub_dumpregs(a, b) do {} while (0)
+
+#endif	/* _siutils_priv_h_ */
diff --git a/drivers/staging/edison-bcm43340/uamp_api.h b/drivers/staging/edison-bcm43340/uamp_api.h
new file mode 100644
index 0000000..dde4e1c
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/uamp_api.h
@@ -0,0 +1,176 @@
+/*
+ *  Name:       uamp_api.h
+ *
+ *  Description: Universal AMP API
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: uamp_api.h 294267 2011-11-04 23:41:52Z $
+ *
+ */
+#ifndef UAMP_API_H
+#define UAMP_API_H
+
+
+#include "typedefs.h"
+
+
+/*****************************************************************************
+**  Constant and Type Definitions
+******************************************************************************
+*/
+
+#define BT_API
+
+/* Types. */
+typedef bool	BOOLEAN;
+typedef uint8	UINT8;
+typedef uint16	UINT16;
+
+
+/* UAMP identifiers */
+#define UAMP_ID_1   1
+#define UAMP_ID_2   2
+typedef UINT8 tUAMP_ID;
+
+/* UAMP event ids (used by UAMP_CBACK) */
+#define UAMP_EVT_RX_READY           0   /* Data from AMP controller is ready to be read */
+#define UAMP_EVT_CTLR_REMOVED       1   /* Controller removed */
+#define UAMP_EVT_CTLR_READY         2   /* Controller added/ready */
+typedef UINT8 tUAMP_EVT;
+
+
+/* UAMP Channels */
+#define UAMP_CH_HCI_CMD            0   /* HCI Command channel */
+#define UAMP_CH_HCI_EVT            1   /* HCI Event channel */
+#define UAMP_CH_HCI_DATA           2   /* HCI ACL Data channel */
+typedef UINT8 tUAMP_CH;
+
+/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */
+typedef union {
+    tUAMP_CH channel;       /* UAMP_EVT_RX_READY: channel for which rx occured */
+} tUAMP_EVT_DATA;
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_CBACK
+**
+** Description: Callback for events. Register callback using UAMP_Init.
+**
+** Parameters   amp_id:         AMP device identifier that generated the event
+**              amp_evt:        event id
+**              p_amp_evt_data: pointer to event-specific data
+**
+******************************************************************************
+*/
+typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data);
+
+/*****************************************************************************
+**  external function declarations
+******************************************************************************
+*/
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/*****************************************************************************
+**
+** Function:    UAMP_Init
+**
+** Description: Initialize UAMP driver
+**
+** Parameters   p_cback:    Callback function for UAMP event notification
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback);
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_Open
+**
+** Description: Open connection to local AMP device.
+**
+** Parameters   app_id: Application specific AMP identifer. This value
+**                      will be included in AMP messages sent to the
+**                      BTU task, to identify source of the message
+**
+******************************************************************************
+*/
+BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id);
+
+/*****************************************************************************
+**
+** Function:    UAMP_Close
+**
+** Description: Close connection to local AMP device.
+**
+** Parameters   app_id: Application specific AMP identifer.
+**
+******************************************************************************
+*/
+BT_API void UAMP_Close(tUAMP_ID amp_id);
+
+
+/*****************************************************************************
+**
+** Function:    UAMP_Write
+**
+** Description: Send buffer to AMP device. Frees GKI buffer when done.
+**
+**
+** Parameters:  app_id:     AMP identifer.
+**              p_buf:      pointer to buffer to write
+**              num_bytes:  number of bytes to write
+**              channel:    UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD
+**
+** Returns:     number of bytes written
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel);
+
+/*****************************************************************************
+**
+** Function:    UAMP_Read
+**
+** Description: Read incoming data from AMP. Call after receiving a
+**              UAMP_EVT_RX_READY callback event.
+**
+** Parameters:  app_id:     AMP identifer.
+**              p_buf:      pointer to buffer for holding incoming AMP data
+**              buf_size:   size of p_buf
+**              channel:    UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT
+**
+** Returns:     number of bytes read
+**
+******************************************************************************
+*/
+BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* UAMP_API_H */
diff --git a/drivers/staging/edison-bcm43340/wl_android.c b/drivers/staging/edison-bcm43340/wl_android.c
new file mode 100644
index 0000000..07fcb07
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_android.c
@@ -0,0 +1,1812 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.c 477039 2014-05-12 10:59:46Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/netlink.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+#include <asm/intel-mid.h>
+
+#include <wl_android.h>
+#include <wldev_common.h>
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <proto/bcmip.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif
+#include <bcmsdbus.h>
+#ifdef WL_CFG80211
+#include <wl_cfg80211.h>
+#endif
+
+#include <linux/acpi.h>
+#include <linux/acpi_gpio.h>
+
+/*
+ * Android private command strings, PLEASE define new private commands here
+ * so they can be updated easily in the future (if needed)
+ */
+
+#define CMD_START		"START"
+#define CMD_STOP		"STOP"
+#define	CMD_SCAN_ACTIVE		"SCAN-ACTIVE"
+#define	CMD_SCAN_PASSIVE	"SCAN-PASSIVE"
+#define CMD_RSSI		"RSSI"
+#define CMD_LINKSPEED		"LINKSPEED"
+#define CMD_RXFILTER_START	"RXFILTER-START"
+#define CMD_RXFILTER_STOP	"RXFILTER-STOP"
+#define CMD_RXFILTER_ADD	"RXFILTER-ADD"
+#define CMD_RXFILTER_REMOVE	"RXFILTER-REMOVE"
+#define CMD_BTCOEXSCAN_START	"BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP	"BTCOEXSCAN-STOP"
+#define CMD_BTCOEXMODE		"BTCOEXMODE"
+#define CMD_SETSUSPENDOPT	"SETSUSPENDOPT"
+#define CMD_SETSUSPENDMODE      "SETSUSPENDMODE"
+#define CMD_P2P_DEV_ADDR	"P2P_DEV_ADDR"
+#define CMD_SETFWPATH		"SETFWPATH"
+#define CMD_SETBAND		"SETBAND"
+#define CMD_GETBAND		"GETBAND"
+#define CMD_COUNTRY		"COUNTRY"
+#define CMD_P2P_SET_NOA		"P2P_SET_NOA"
+#if !defined WL_ENABLE_P2P_IF
+#define CMD_P2P_GET_NOA			"P2P_GET_NOA"
+#endif /* WL_ENABLE_P2P_IF */
+#define CMD_P2P_SD_OFFLOAD		"P2P_SD_"
+#define CMD_P2P_SET_PS		"P2P_SET_PS"
+#define CMD_SET_AP_WPS_P2P_IE 		"SET_AP_WPS_P2P_IE"
+#define CMD_SETROAMMODE 	"SETROAMMODE"
+#define CMD_SETIBSSBEACONOUIDATA	"SETIBSSBEACONOUIDATA"
+#define CMD_MIRACAST		"MIRACAST"
+
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+#define CMD_GET_BEST_CHANNELS	"GET_BEST_CHANNELS"
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#define CMD_RTCOEX		"MWS_COEX_BITMAP"
+#define CMD_KEEP_ALIVE		"KEEPALIVE"
+#define CMD_CONFIG_RTCOEX "CONFIG_WLAN_LTE_COEX_RT"
+
+/* CCX Private Commands */
+
+#ifdef PNO_SUPPORT
+#define CMD_PNOSSIDCLR_SET	"PNOSSIDCLR"
+#define CMD_PNOSETUP_SET	"PNOSETUP "
+#define CMD_PNOENABLE_SET	"PNOFORCE"
+#define CMD_PNODEBUG_SET	"PNODEBUG"
+#define CMD_WLS_BATCHING	"WLS_BATCHING"
+#endif /* PNO_SUPPORT */
+
+#define CMD_OKC_SET_PMK		"SET_PMK"
+#define CMD_OKC_ENABLE		"OKC_ENABLE"
+
+#define	CMD_HAPD_MAC_FILTER	"HAPD_MAC_FILTER"
+
+
+
+#define CMD_ROAM_OFFLOAD			"SETROAMOFFLOAD"
+
+/* miracast related definition */
+#define MIRACAST_MODE_OFF	0
+#define MIRACAST_MODE_SOURCE	1
+#define MIRACAST_MODE_SINK	2
+
+#ifndef MIRACAST_AMPDU_SIZE
+#define MIRACAST_AMPDU_SIZE	8
+#endif
+
+#ifndef MIRACAST_MCHAN_ALGO
+#define MIRACAST_MCHAN_ALGO     1
+#endif
+
+#ifndef MIRACAST_MCHAN_BW
+#define MIRACAST_MCHAN_BW       25
+#endif
+
+/* WiFi/LTE Real Time coexistence parameters definition */
+#define MWS_WLANRX_PROT 3   /* WLAN F/W will determine optimal protection mode */
+#define MWS_SCANJOIN_PROT 1 /* 802_RX_PRI enabled for protection of WLAN scan and join frame exch.*/
+#define MWS_LTERX_PROT 1    /* Enable Protection mode for LTE RX */
+#define MWS_LTETX_ADV 1500  /* LTE_TX advance duration set to 1500 us */
+
+#ifdef CONNECTION_STATISTICS
+#define CMD_GET_CONNECTION_STATS	"GET_CONNECTION_STATS"
+
+struct connection_stats {
+	u32 txframe;
+	u32 txbyte;
+	u32 txerror;
+	u32 rxframe;
+	u32 rxbyte;
+	u32 txfail;
+	u32 txretry;
+	u32 txretrie;
+	u32 txrts;
+	u32 txnocts;
+	u32 txexptime;
+	u32 txrate;
+	u8	chan_idle;
+};
+#endif /* CONNECTION_STATISTICS */
+
+static LIST_HEAD(miracast_resume_list);
+static u8 miracast_cur_mode;
+
+struct io_cfg {
+	s8 *iovar;
+	s32 param;
+	u32 ioctl;
+	void *arg;
+	u32 len;
+	struct list_head list;
+};
+
+typedef struct _android_wifi_priv_cmd {
+	char *buf;
+	int used_len;
+	int total_len;
+} android_wifi_priv_cmd;
+
+#ifdef CONFIG_COMPAT
+typedef struct _compat_android_wifi_priv_cmd {
+	compat_caddr_t buf;
+	int used_len;
+	int total_len;
+} compat_android_wifi_priv_cmd;
+#endif /* CONFIG_COMPAT */
+
+#if defined(BCMFW_ROAM_ENABLE)
+#define CMD_SET_ROAMPREF	"SET_ROAMPREF"
+
+#define MAX_NUM_SUITES		10
+#define WIDTH_AKM_SUITE		8
+#define JOIN_PREF_RSSI_LEN		0x02
+#define JOIN_PREF_RSSI_SIZE		4	/* RSSI pref header size in bytes */
+#define JOIN_PREF_WPA_HDR_SIZE		4 /* WPA pref header size in bytes */
+#define JOIN_PREF_WPA_TUPLE_SIZE	12	/* Tuple size in bytes */
+#define JOIN_PREF_MAX_WPA_TUPLES	16
+#define MAX_BUF_SIZE		(JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +	\
+				           (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES))
+#endif /* BCMFW_ROAM_ENABLE */
+
+
+/**
+ * Extern function declarations (TODO: move them to dhd_linux.h)
+ */
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+int dhd_dev_init_ioctl(struct net_device *dev);
+#ifdef WL_CFG80211
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command);
+#else
+int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{ return 0; }
+int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{ return 0; }
+int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{ return 0; }
+#endif /* WK_CFG80211 */
+
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24;	/* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+char nv_id[30];
+
+extern bool ap_fw_loaded;
+#if defined(CUSTOMER_HW2)
+extern char iface_name[IFNAMSIZ];
+#endif 
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+static int g_wifi_on = TRUE;
+
+/**
+ * Local (static) function definitions
+ */
+static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
+{
+	int link_speed;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_link_speed(net, &link_speed);
+	if (error)
+		return -1;
+
+	/* Convert Kbps to Android Mbps */
+	link_speed = link_speed / 1000;
+	bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
+	DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command));
+	return bytes_written;
+}
+
+static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
+{
+	wlc_ssid_t ssid = {0};
+	int rssi;
+	int bytes_written = 0;
+	int error;
+
+	error = wldev_get_rssi(net, &rssi);
+	if (error)
+		return -1;
+
+	error = wldev_get_ssid(net, &ssid);
+	if (error)
+		return -1;
+	if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
+		DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
+	} else {
+		memcpy(command, ssid.SSID, ssid.SSID_len);
+		bytes_written = ssid.SSID_len;
+	}
+	bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi);
+	DHD_INFO(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+	return bytes_written;
+}
+
+static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len)
+{
+	int suspend_flag;
+	int ret_now;
+	int ret = 0;
+
+		suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
+
+		if (suspend_flag != 0)
+			suspend_flag = 1;
+		ret_now = net_os_set_suspend_disable(dev, suspend_flag);
+
+		if (ret_now != suspend_flag) {
+			if (!(ret = net_os_set_suspend(dev, ret_now, 1)))
+				DHD_INFO(("%s: Suspend Flag %d -> %d\n",
+					__FUNCTION__, ret_now, suspend_flag));
+			else
+				DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+		}
+	return ret;
+}
+
+static int wl_android_set_suspendmode(struct net_device *dev, char *command, int total_len)
+{
+	int ret = 0;
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND)
+	int suspend_flag;
+
+	suspend_flag = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0';
+	if (suspend_flag != 0)
+		suspend_flag = 1;
+
+	if (!(ret = net_os_set_suspend(dev, suspend_flag, 0)))
+		DHD_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag));
+	else
+		DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+#endif
+
+	return ret;
+}
+
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+	uint band;
+	int bytes_written;
+	int error;
+
+	error = wldev_get_band(dev, &band);
+	if (error)
+		return -1;
+	bytes_written = snprintf(command, total_len, "Band %d", band);
+	return bytes_written;
+}
+
+
+#ifdef PNO_SUPPORT
+#define PNO_PARAM_SIZE 50
+#define VALUE_SIZE 50
+#define LIMIT_STR_FMT  ("%50s %50s")
+static int
+wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len)
+{
+	int err = BCME_OK;
+	uint i, tokens;
+	char *pos, *pos2, *token, *token2, *delim;
+	char param[PNO_PARAM_SIZE], value[VALUE_SIZE];
+	struct dhd_pno_batch_params batch_params;
+	DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+	if (total_len < strlen(CMD_WLS_BATCHING)) {
+		DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+		err = BCME_ERROR;
+		goto exit;
+	}
+	pos = command + strlen(CMD_WLS_BATCHING) + 1;
+	memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params));
+
+	if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) {
+		pos += strlen(PNO_BATCHING_SET) + 1;
+		while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) {
+			memset(param, 0, sizeof(param));
+			memset(value, 0, sizeof(value));
+			if (token == NULL || !*token)
+				break;
+			if (*token == '\0')
+				continue;
+			delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER);
+			if (delim != NULL)
+				*delim = ' ';
+
+			tokens = sscanf(token, LIMIT_STR_FMT, param, value);
+			if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) {
+				batch_params.scan_fr = simple_strtol(value, NULL, 0);
+				DHD_PNO(("scan_freq : %d\n", batch_params.scan_fr));
+			} else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) {
+				batch_params.bestn = simple_strtol(value, NULL, 0);
+				DHD_PNO(("bestn : %d\n", batch_params.bestn));
+			} else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) {
+				batch_params.mscan = simple_strtol(value, NULL, 0);
+				DHD_PNO(("mscan : %d\n", batch_params.mscan));
+			} else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) {
+				i = 0;
+				pos2 = value;
+				tokens = sscanf(value, "<%s>", value);
+				if (tokens != 1) {
+					err = BCME_ERROR;
+					DHD_ERROR(("%s : invalid format for channel"
+					" <> params\n", __FUNCTION__));
+					goto exit;
+				}
+					while ((token2 = strsep(&pos2,
+					PNO_PARAM_CHANNEL_DELIMETER)) != NULL) {
+					if (token2 == NULL || !*token2)
+						break;
+					if (*token2 == '\0')
+						continue;
+					if (*token2 == 'A' || *token2 == 'B') {
+						batch_params.band = (*token2 == 'A')?
+							WLC_BAND_5G : WLC_BAND_2G;
+						DHD_PNO(("band : %s\n",
+							(*token2 == 'A')? "A" : "B"));
+					} else {
+						batch_params.chan_list[i++] =
+						simple_strtol(token2, NULL, 0);
+						batch_params.nchan++;
+						DHD_PNO(("channel :%d\n",
+						batch_params.chan_list[i-1]));
+					}
+				 }
+			} else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) {
+				batch_params.rtt = simple_strtol(value, NULL, 0);
+				DHD_PNO(("rtt : %d\n", batch_params.rtt));
+			} else {
+				DHD_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param));
+				err = BCME_ERROR;
+				goto exit;
+			}
+		}
+		err = dhd_dev_pno_set_for_batch(dev, &batch_params);
+		if (err < 0) {
+			DHD_ERROR(("failed to configure batch scan\n"));
+		} else {
+			memset(command, 0, total_len);
+			err = sprintf(command, "%d", err);
+		}
+	} else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
+		err = dhd_dev_pno_get_for_batch(dev, command, total_len);
+		if (err < 0) {
+			DHD_ERROR(("failed to getting batching results\n"));
+		} else {
+			err = strlen(command);
+		}
+	} else if (!strncmp(pos, PNO_BATCHING_STOP, strlen(PNO_BATCHING_STOP))) {
+		err = dhd_dev_pno_stop_for_batch(dev);
+		if (err < 0) {
+			DHD_ERROR(("failed to stop batching scan\n"));
+		} else {
+			memset(command, 0, total_len);
+			err = sprintf(command, "OK");
+		}
+	} else {
+		DHD_ERROR(("%s : unknown command\n", __FUNCTION__));
+		err = BCME_ERROR;
+		goto exit;
+	}
+exit:
+	return err;
+}
+#ifndef WL_SCHED_SCAN
+static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len)
+{
+	wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+	int res = -1;
+	int nssid = 0;
+	cmd_tlv_t *cmd_tlv_temp;
+	char *str_ptr;
+	int tlv_size_left;
+	int pno_time = 0;
+	int pno_repeat = 0;
+	int pno_freq_expo_max = 0;
+
+#ifdef PNO_SET_DEBUG
+	int i;
+	char pno_in_example[] = {
+		'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ',
+		'S', '1', '2', '0',
+		'S',
+		0x05,
+		'd', 'l', 'i', 'n', 'k',
+		'S',
+		0x04,
+		'G', 'O', 'O', 'G',
+		'T',
+		'0', 'B',
+		'R',
+		'2',
+		'M',
+		'2',
+		0x00
+		};
+#endif /* PNO_SET_DEBUG */
+	DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+
+	if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
+		DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
+		goto exit_proc;
+	}
+#ifdef PNO_SET_DEBUG
+	memcpy(command, pno_in_example, sizeof(pno_in_example));
+	total_len = sizeof(pno_in_example);
+#endif
+	str_ptr = command + strlen(CMD_PNOSETUP_SET);
+	tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
+
+	cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
+	memset(ssids_local, 0, sizeof(ssids_local));
+
+	if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
+		(cmd_tlv_temp->version == PNO_TLV_VERSION) &&
+		(cmd_tlv_temp->subtype == PNO_TLV_SUBTYPE_LEGACY_PNO)) {
+
+		str_ptr += sizeof(cmd_tlv_t);
+		tlv_size_left -= sizeof(cmd_tlv_t);
+
+		if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local,
+			MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) {
+			DHD_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid));
+			goto exit_proc;
+		} else {
+			if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
+				DHD_ERROR(("%s scan duration corrupted field size %d\n",
+					__FUNCTION__, tlv_size_left));
+				goto exit_proc;
+			}
+			str_ptr++;
+			pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
+			DHD_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
+
+			if (str_ptr[0] != 0) {
+				if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
+					DHD_ERROR(("%s pno repeat : corrupted field\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
+				DHD_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
+				if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
+					DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+						__FUNCTION__));
+					goto exit_proc;
+				}
+				str_ptr++;
+				pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
+				DHD_PNO(("%s: pno_freq_expo_max=%d\n",
+					__FUNCTION__, pno_freq_expo_max));
+			}
+		}
+	} else {
+		DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
+		goto exit_proc;
+	}
+
+	res = dhd_dev_pno_set_for_ssid(dev, ssids_local, nssid, pno_time, pno_repeat,
+		pno_freq_expo_max, NULL, 0);
+exit_proc:
+	return res;
+}
+#endif /* !WL_SCHED_SCAN */
+#endif /* PNO_SUPPORT  */
+
+static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len)
+{
+	int ret;
+	int bytes_written = 0;
+
+	ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command);
+	if (ret)
+		return 0;
+	bytes_written = sizeof(struct ether_addr);
+	return bytes_written;
+}
+
+
+int
+wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
+{
+	int i, j, match;
+	int ret	= 0;
+	char mac_buf[MAX_NUM_OF_ASSOCLIST *
+		sizeof(struct ether_addr) + sizeof(uint)] = {0};
+	struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+	/* set filtering mode */
+	if ((ret = wldev_ioctl(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode), true)) != 0) {
+		DHD_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret));
+		return ret;
+	}
+	if (macmode != MACLIST_MODE_DISABLED) {
+		/* set the MAC filter list */
+		if ((ret = wldev_ioctl(dev, WLC_SET_MACLIST, maclist,
+			sizeof(int) + sizeof(struct ether_addr) * maclist->count, true)) != 0) {
+			DHD_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		/* get the current list of associated STAs */
+		assoc_maclist->count = MAX_NUM_OF_ASSOCLIST;
+		if ((ret = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist,
+			sizeof(mac_buf), false)) != 0) {
+			DHD_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret));
+			return ret;
+		}
+		/* do we have any STA associated?  */
+		if (assoc_maclist->count) {
+			/* iterate each associated STA */
+			for (i = 0; i < assoc_maclist->count; i++) {
+				match = 0;
+				/* compare with each entry */
+				for (j = 0; j < maclist->count; j++) {
+					DHD_INFO(("%s : associated="MACDBG " list="MACDBG "\n",
+					__FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet),
+					MAC2STRDBG(maclist->ea[j].octet)));
+					if (memcmp(assoc_maclist->ea[i].octet,
+						maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) {
+						match = 1;
+						break;
+					}
+				}
+				/* do conditional deauth */
+				/*   "if not in the allow list" or "if in the deny list" */
+				if ((macmode == MACLIST_MODE_ALLOW && !match) ||
+					(macmode == MACLIST_MODE_DENY && match)) {
+					scb_val_t scbval;
+
+					scbval.val = htod32(1);
+					memcpy(&scbval.ea, &assoc_maclist->ea[i],
+						ETHER_ADDR_LEN);
+					if ((ret = wldev_ioctl(dev,
+						WLC_SCB_DEAUTHENTICATE_FOR_REASON,
+						&scbval, sizeof(scb_val_t), true)) != 0)
+						DHD_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n",
+							__FUNCTION__, ret));
+				}
+			}
+		}
+	}
+	return ret;
+}
+
+/*
+ * HAPD_MAC_FILTER mac_mode mac_cnt mac_addr1 mac_addr2
+ *
+ */
+static int
+wl_android_set_mac_address_filter(struct net_device *dev, const char* str)
+{
+	int i;
+	int ret = 0;
+	int macnum = 0;
+	int macmode = MACLIST_MODE_DISABLED;
+	struct maclist *list;
+	char eabuf[ETHER_ADDR_STR_LEN];
+
+	/* string should look like below (macmode/macnum/maclist) */
+	/*   1 2 00:11:22:33:44:55 00:11:22:33:44:ff  */
+
+	/* get the MAC filter mode */
+	macmode = bcm_atoi(strsep((char**)&str, " "));
+
+	if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) {
+		DHD_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode));
+		return -1;
+	}
+
+	macnum = bcm_atoi(strsep((char**)&str, " "));
+	if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+		DHD_ERROR(("%s : invalid number of MAC address entries %d\n",
+			__FUNCTION__, macnum));
+		return -1;
+	}
+	/* allocate memory for the MAC list */
+	list = (struct maclist*)kmalloc(sizeof(int) +
+		sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+	if (!list) {
+		DHD_ERROR(("%s : failed to allocate memory\n", __FUNCTION__));
+		return -1;
+	}
+	/* prepare the MAC list */
+	list->count = htod32(macnum);
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+	for (i = 0; i < list->count; i++) {
+		strncpy(eabuf, strsep((char**)&str, " "), ETHER_ADDR_STR_LEN - 1);
+		if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) {
+			DHD_ERROR(("%s : mac parsing err index=%d, addr=%s\n",
+				__FUNCTION__, i, eabuf));
+			list->count--;
+			break;
+		}
+		DHD_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf));
+	}
+	/* set the list */
+	if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0)
+		DHD_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+	kfree(list);
+
+	return 0;
+}
+
+/**
+ * Global function definitions (declared in wl_android.h)
+ */
+
+#define WIFI_ON_MAX_RETRY 3
+int wl_android_wifi_on(struct net_device *dev)
+{
+	int ret = 0;
+	int retry = POWERUP_MAX_RETRY;
+	int retry_wifi_on = WIFI_ON_MAX_RETRY;
+
+	printk("%s in\n", __FUNCTION__);
+	if (!dev) {
+		DHD_ERROR(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+wifi_on:
+	dhd_net_if_lock(dev);
+	if (!g_wifi_on) {
+		do {
+			dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY);
+			ret = dhd_net_bus_resume(dev, 0);
+			if (ret == 0)
+				break;
+			DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
+				retry+1));
+			dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+		} while (retry-- >= 0);
+		if (ret != 0) {
+			DHD_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
+			goto exit;
+		}
+		ret = dhd_net_bus_devreset(dev, FALSE);
+		dhd_net_bus_resume(dev, 1);
+		if (!ret) {
+			if (dhd_dev_init_ioctl(dev) < 0)
+				ret = -EFAULT;
+		}
+		g_wifi_on = TRUE;
+	}
+
+exit:
+	dhd_net_if_unlock(dev);
+
+	if (ret && --retry_wifi_on > 0) {
+		DHD_ERROR(("\nRetry WIFI ON procedure (%d)\n\n", WIFI_ON_MAX_RETRY - retry_wifi_on));
+		wl_android_wifi_off(dev);
+		goto wifi_on;
+	}
+
+	return ret;
+}
+
+int wl_android_wifi_off(struct net_device *dev)
+{
+	int ret = 0;
+
+	DHD_ERROR(("%s in\n", __FUNCTION__));
+	if (!dev) {
+		DHD_TRACE(("%s: dev is null\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	dhd_net_if_lock(dev);
+	if (g_wifi_on) {
+		ret = dhd_net_bus_devreset(dev, TRUE);
+		dhd_net_bus_suspend(dev);
+		dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+		g_wifi_on = FALSE;
+	}
+	dhd_net_if_unlock(dev);
+
+	return ret;
+}
+
+static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len)
+{
+	if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
+		return -1;
+	return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1);
+}
+
+#ifdef CONNECTION_STATISTICS
+static int
+wl_chanim_stats(struct net_device *dev, u8 *chan_idle)
+{
+	int err;
+	wl_chanim_stats_t *list;
+	/* Parameter _and_ returned buffer of chanim_stats. */
+	wl_chanim_stats_t param;
+	u8 result[WLC_IOCTL_SMLEN];
+	chanim_stats_t *stats;
+
+	memset(&param, 0, sizeof(param));
+	memset(result, 0, sizeof(result));
+
+	param.buflen = htod32(sizeof(wl_chanim_stats_t));
+	param.count = htod32(WL_CHANIM_COUNT_ONE);
+
+	if ((err = wldev_iovar_getbuf(dev, "chanim_stats", (char*)&param, sizeof(wl_chanim_stats_t),
+		(char*)result, sizeof(result), 0)) < 0) {
+		WL_ERR(("Failed to get chanim results %d \n", err));
+		return err;
+	}
+
+	list = (wl_chanim_stats_t*)result;
+
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+
+	if (list->buflen == 0) {
+		list->version = 0;
+		list->count = 0;
+	} else if (list->version != WL_CHANIM_STATS_VERSION) {
+		WL_ERR(("Sorry, firmware has wl_chanim_stats version %d "
+			"but driver supports only version %d.\n",
+				list->version, WL_CHANIM_STATS_VERSION));
+		list->buflen = 0;
+		list->count = 0;
+	}
+
+	stats = list->stats;
+	stats->glitchcnt = dtoh32(stats->glitchcnt);
+	stats->badplcp = dtoh32(stats->badplcp);
+	stats->chanspec = dtoh16(stats->chanspec);
+	stats->timestamp = dtoh32(stats->timestamp);
+	stats->chan_idle = dtoh32(stats->chan_idle);
+
+	WL_INFO(("chanspec: 0x%4x glitch: %d badplcp: %d idle: %d timestamp: %d\n",
+		stats->chanspec, stats->glitchcnt, stats->badplcp, stats->chan_idle,
+		stats->timestamp));
+
+	*chan_idle = stats->chan_idle;
+
+	return (err);
+}
+
+static int
+wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len)
+{
+	wl_cnt_t* cnt = NULL;
+	int link_speed = 0;
+	struct connection_stats *output;
+	unsigned int bufsize = 0;
+	int bytes_written = 0;
+	int ret = 0;
+
+	WL_INFO(("%s: enter Get Connection Stats\n", __FUNCTION__));
+
+	if (total_len <= 0) {
+		WL_ERR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len));
+		goto error;
+	}
+
+	bufsize = total_len;
+	if (bufsize < sizeof(struct connection_stats)) {
+		WL_ERR(("%s: not enough buffer size, provided=%u, requires=%u\n",
+			__FUNCTION__, bufsize,
+			sizeof(struct connection_stats)));
+		goto error;
+	}
+
+	if ((cnt = kmalloc(sizeof(*cnt), GFP_KERNEL)) == NULL) {
+		WL_ERR(("kmalloc failed\n"));
+		return -1;
+	}
+	memset(cnt, 0, sizeof(*cnt));
+
+	ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, (char *)cnt, sizeof(wl_cnt_t), NULL);
+	if (ret) {
+		WL_ERR(("%s: wldev_iovar_getbuf() failed, ret=%d\n",
+			__FUNCTION__, ret));
+		goto error;
+	}
+
+	if (dtoh16(cnt->version) > WL_CNT_T_VERSION) {
+		WL_ERR(("%s: incorrect version of wl_cnt_t, expected=%u got=%u\n",
+			__FUNCTION__,  WL_CNT_T_VERSION, cnt->version));
+		goto error;
+	}
+
+	/* link_speed is in kbps */
+	ret = wldev_get_link_speed(dev, &link_speed);
+	if (ret || link_speed < 0) {
+		WL_ERR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n",
+			__FUNCTION__, ret, link_speed));
+		goto error;
+	}
+
+	output = (struct connection_stats *)command;
+	output->txframe   = dtoh32(cnt->txframe);
+	output->txbyte    = dtoh32(cnt->txbyte);
+	output->txerror   = dtoh32(cnt->txerror);
+	output->rxframe   = dtoh32(cnt->rxframe);
+	output->rxbyte    = dtoh32(cnt->rxbyte);
+	output->txfail    = dtoh32(cnt->txfail);
+	output->txretry   = dtoh32(cnt->txretry);
+	output->txretrie  = dtoh32(cnt->txretrie);
+	output->txrts     = dtoh32(cnt->txrts);
+	output->txnocts   = dtoh32(cnt->txnocts);
+	output->txexptime = dtoh32(cnt->txexptime);
+	output->txrate    = link_speed;
+
+	/* Channel idle ratio. */
+	if (wl_chanim_stats(dev, &(output->chan_idle)) < 0) {
+		output->chan_idle = 0;
+	};
+
+	kfree(cnt);
+
+	bytes_written = sizeof(struct connection_stats);
+	return bytes_written;
+
+error:
+	if (cnt) {
+		kfree(cnt);
+	}
+	return -1;
+}
+#endif /* CONNECTION_STATISTICS */
+
+static int
+wl_android_set_pmk(struct net_device *dev, char *command, int total_len)
+{
+	uchar pmk[33];
+	int error = 0;
+	char smbuf[WLC_IOCTL_SMLEN];
+#ifdef OKC_DEBUG
+	int i = 0;
+#endif
+
+	bzero(pmk, sizeof(pmk));
+	memcpy((char *)pmk, command + strlen("SET_PMK "), 32);
+	error = wldev_iovar_setbuf(dev, "okc_info_pmk", pmk, 32, smbuf, sizeof(smbuf), NULL);
+	if (error) {
+		DHD_ERROR(("Failed to set PMK for OKC, error = %d\n", error));
+	}
+#ifdef OKC_DEBUG
+	DHD_ERROR(("PMK is "));
+	for (i = 0; i < 32; i++)
+		DHD_ERROR(("%02X ", pmk[i]));
+
+	DHD_ERROR(("\n"));
+#endif
+	return error;
+}
+
+static int
+wl_android_okc_enable(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	char okc_enable = 0;
+
+	okc_enable = command[strlen(CMD_OKC_ENABLE) + 1] - '0';
+	error = wldev_iovar_setint(dev, "okc_enable", okc_enable);
+	if (error) {
+		DHD_ERROR(("Failed to %s OKC, error = %d\n",
+			okc_enable ? "enable" : "disable", error));
+	}
+
+	wldev_iovar_setint(dev, "ccx_enable", 0);
+
+	return error;
+}
+
+
+
+int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int mode = 0;
+
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	error = wldev_iovar_setint(dev, "roam_off", mode);
+	if (error) {
+		DHD_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n",
+		__FUNCTION__, mode, error));
+		return -1;
+	}
+	else
+		DHD_ERROR(("%s: succeeded to set roaming Mode %d, error = %d\n",
+		__FUNCTION__, mode, error));
+	return 0;
+}
+
+int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, int total_len)
+{
+	char ie_buf[VNDR_IE_MAX_LEN];
+	char *ioctl_buf = NULL;
+	char hex[] = "XX";
+	char *pcmd = NULL;
+	int ielen = 0, datalen = 0, idx = 0, tot_len = 0;
+	vndr_ie_setbuf_t *vndr_ie = NULL;
+	s32 iecount;
+	uint32 pktflag;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+	s32 err = BCME_OK;
+
+	/* Check the VSIE (Vendor Specific IE) which was added.
+	 *  If exist then send IOVAR to delete it
+	 */
+	if (wl_cfg80211_ibss_vsie_delete(dev) != BCME_OK) {
+		return -EINVAL;
+	}
+
+	pcmd = command + strlen(CMD_SETIBSSBEACONOUIDATA) + 1;
+	for (idx = 0; idx < DOT11_OUI_LEN; idx++) {
+		hex[0] = *pcmd++;
+		hex[1] = *pcmd++;
+		ie_buf[idx] =  (uint8)simple_strtoul(hex, NULL, 16);
+	}
+	pcmd++;
+	while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) {
+		hex[0] = *pcmd++;
+		hex[1] = *pcmd++;
+		ie_buf[idx++] =  (uint8)simple_strtoul(hex, NULL, 16);
+		datalen++;
+	}
+	tot_len = sizeof(vndr_ie_setbuf_t) + (datalen - 1);
+	vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags);
+	if (!vndr_ie) {
+		WL_ERR(("IE memory alloc failed\n"));
+		return -ENOMEM;
+	}
+	/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+	strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1);
+	vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Set the IE count - the buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+	/* Set packet flag to indicate that BEACON's will contain this IE */
+	pktflag = htod32(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG);
+	memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+		sizeof(u32));
+	/* Set the IE ID */
+	vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID;
+
+	memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf,
+		DOT11_OUI_LEN);
+	memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data,
+		&ie_buf[DOT11_OUI_LEN], datalen);
+
+	ielen = DOT11_OUI_LEN + datalen;
+	vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen;
+
+	ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+	if (!ioctl_buf) {
+		WL_ERR(("ioctl memory alloc failed\n"));
+		if (vndr_ie) {
+			kfree(vndr_ie);
+		}
+		return -ENOMEM;
+	}
+	memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN);	/* init the buffer */
+	err = wldev_iovar_setbuf(dev, "ie", vndr_ie, tot_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+
+
+	if (err != BCME_OK) {
+		err = -EINVAL;
+		if (vndr_ie) {
+			kfree(vndr_ie);
+		}
+	}
+	else {
+		/* do NOT free 'vndr_ie' for the next process */
+		wl_cfg80211_ibss_vsie_set_buffer(vndr_ie, tot_len);
+	}
+
+	if (ioctl_buf) {
+		kfree(ioctl_buf);
+	}
+
+	return err;
+}
+
+#if defined(BCMFW_ROAM_ENABLE)
+static int
+wl_android_set_roampref(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	char smbuf[WLC_IOCTL_SMLEN];
+	uint8 buf[MAX_BUF_SIZE];
+	uint8 *pref = buf;
+	char *pcmd;
+	int num_ucipher_suites = 0;
+	int num_akm_suites = 0;
+	wpa_suite_t ucipher_suites[MAX_NUM_SUITES];
+	wpa_suite_t akm_suites[MAX_NUM_SUITES];
+	int num_tuples = 0;
+	int total_bytes = 0;
+	int total_len_left;
+	int i, j;
+	char hex[] = "XX";
+
+	pcmd = command + strlen(CMD_SET_ROAMPREF) + 1;
+	total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1;
+
+	num_akm_suites = simple_strtoul(pcmd, NULL, 16);
+	/* Increment for number of AKM suites field + space */
+	pcmd += 3;
+	total_len_left -= 3;
+
+	/* check to make sure pcmd does not overrun */
+	if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE))
+		return -1;
+
+	memset(buf, 0, sizeof(buf));
+	memset(akm_suites, 0, sizeof(akm_suites));
+	memset(ucipher_suites, 0, sizeof(ucipher_suites));
+
+	/* Save the AKM suites passed in the command */
+	for (i = 0; i < num_akm_suites; i++) {
+		/* Store the MSB first, as required by join_pref */
+		for (j = 0; j < 4; j++) {
+			hex[0] = *pcmd++;
+			hex[1] = *pcmd++;
+			buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+		}
+		memcpy((uint8 *)&akm_suites[i], buf, sizeof(uint32));
+	}
+
+	total_len_left -= (num_akm_suites * WIDTH_AKM_SUITE);
+	num_ucipher_suites = simple_strtoul(pcmd, NULL, 16);
+	/* Increment for number of cipher suites field + space */
+	pcmd += 3;
+	total_len_left -= 3;
+
+	if (total_len_left < (num_ucipher_suites * WIDTH_AKM_SUITE))
+		return -1;
+
+	/* Save the cipher suites passed in the command */
+	for (i = 0; i < num_ucipher_suites; i++) {
+		/* Store the MSB first, as required by join_pref */
+		for (j = 0; j < 4; j++) {
+			hex[0] = *pcmd++;
+			hex[1] = *pcmd++;
+			buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+		}
+		memcpy((uint8 *)&ucipher_suites[i], buf, sizeof(uint32));
+	}
+
+	/* Join preference for RSSI
+	 * Type	  : 1 byte (0x01)
+	 * Length : 1 byte (0x02)
+	 * Value  : 2 bytes	(reserved)
+	 */
+	*pref++ = WL_JOIN_PREF_RSSI;
+	*pref++ = JOIN_PREF_RSSI_LEN;
+	*pref++ = 0;
+	*pref++ = 0;
+
+	/* Join preference for WPA
+	 * Type	  : 1 byte (0x02)
+	 * Length : 1 byte (not used)
+	 * Value  : (variable length)
+	 *		reserved: 1 byte
+	 *      count	: 1 byte (no of tuples)
+	 *		Tuple1	: 12 bytes
+	 *			akm[4]
+	 *			ucipher[4]
+	 *			mcipher[4]
+	 *		Tuple2	: 12 bytes
+	 *		Tuplen	: 12 bytes
+	 */
+	num_tuples = num_akm_suites * num_ucipher_suites;
+	if (num_tuples != 0) {
+		if (num_tuples <= JOIN_PREF_MAX_WPA_TUPLES) {
+			*pref++ = WL_JOIN_PREF_WPA;
+			*pref++ = 0;
+			*pref++ = 0;
+			*pref++ = (uint8)num_tuples;
+			total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +
+				(JOIN_PREF_WPA_TUPLE_SIZE * num_tuples);
+		} else {
+			DHD_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__));
+			return -1;
+		}
+	} else {
+		/* No WPA config, configure only RSSI preference */
+		total_bytes = JOIN_PREF_RSSI_SIZE;
+	}
+
+	/* akm-ucipher-mcipher tuples in the format required for join_pref */
+	for (i = 0; i < num_ucipher_suites; i++) {
+		for (j = 0; j < num_akm_suites; j++) {
+			memcpy(pref, (uint8 *)&akm_suites[j], WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+			memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+			/* Set to 0 to match any available multicast cipher */
+			memset(pref, 0, WPA_SUITE_LEN);
+			pref += WPA_SUITE_LEN;
+		}
+	}
+
+	prhex("join pref", (uint8 *)buf, total_bytes);
+	error = wldev_iovar_setbuf(dev, "join_pref", buf, total_bytes, smbuf, sizeof(smbuf), NULL);
+	if (error) {
+		DHD_ERROR(("Failed to set join_pref, error = %d\n", error));
+	}
+	return error;
+}
+#endif /* defined(BCMFW_ROAM_ENABLE */
+
+static int
+wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_cfg *config)
+{
+	struct io_cfg *resume_cfg;
+	s32 ret;
+
+	resume_cfg = kzalloc(sizeof(struct io_cfg), GFP_KERNEL);
+	if (!resume_cfg)
+		return -ENOMEM;
+
+	if (config->iovar) {
+		ret = wldev_iovar_getint(dev, config->iovar, &resume_cfg->param);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to get current %s value\n",
+				__FUNCTION__, config->iovar));
+			goto error;
+		}
+
+		ret = wldev_iovar_setint(dev, config->iovar, config->param);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+				config->iovar, config->param));
+			goto error;
+		}
+
+		resume_cfg->iovar = config->iovar;
+	} else {
+		resume_cfg->arg = kzalloc(config->len, GFP_KERNEL);
+		if (!resume_cfg->arg) {
+			ret = -ENOMEM;
+			goto error;
+		}
+		ret = wldev_ioctl(dev, config->ioctl, resume_cfg->arg, config->len, false);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__,
+				config->ioctl));
+			goto error;
+		}
+		ret = wldev_ioctl(dev, config->ioctl + 1, config->arg, config->len, true);
+		if (ret) {
+			DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__,
+				config->iovar, config->param));
+			goto error;
+		}
+		if (config->ioctl + 1 == WLC_SET_PM)
+			wl_cfg80211_update_power_mode(dev);
+		resume_cfg->ioctl = config->ioctl;
+		resume_cfg->len = config->len;
+	}
+
+	list_add(&resume_cfg->list, head);
+
+	return 0;
+error:
+	kfree(resume_cfg->arg);
+	kfree(resume_cfg);
+	return ret;
+}
+
+static void
+wl_android_iolist_resume(struct net_device *dev, struct list_head *head)
+{
+	struct io_cfg *config;
+	struct list_head *cur, *q;
+	s32 ret = 0;
+
+	list_for_each_safe(cur, q, head) {
+		config = list_entry(cur, struct io_cfg, list);
+		if (config->iovar) {
+			if (!ret)
+				ret = wldev_iovar_setint(dev, config->iovar,
+					config->param);
+		} else {
+			if (!ret)
+				ret = wldev_ioctl(dev, config->ioctl + 1,
+					config->arg, config->len, true);
+			if (config->ioctl + 1 == WLC_SET_PM)
+				wl_cfg80211_update_power_mode(dev);
+			kfree(config->arg);
+		}
+		list_del(cur);
+		kfree(config);
+	}
+}
+
+static int
+wl_android_set_miracast(struct net_device *dev, char *command, int total_len)
+{
+	int mode, val;
+	int ret = 0;
+	struct io_cfg config;
+
+	if (sscanf(command, "%*s %d", &mode) != 1) {
+		DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	DHD_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode));
+
+	if (miracast_cur_mode == mode)
+		return 0;
+
+	wl_android_iolist_resume(dev, &miracast_resume_list);
+	miracast_cur_mode = MIRACAST_MODE_OFF;
+
+	switch (mode) {
+	case MIRACAST_MODE_SOURCE:
+		/* setting mchan_algo to platform specific value */
+		config.iovar = "mchan_algo";
+
+		ret = wldev_ioctl(dev, WLC_GET_BCNPRD, &val, sizeof(int), false);
+		if (!ret && val > 100) {
+			config.param = 0;
+			DHD_ERROR(("%s: Connected station's beacon interval: "
+				"%d and set mchan_algo to %d \n",
+				__FUNCTION__, val, config.param));
+		}
+		else {
+			config.param = MIRACAST_MCHAN_ALGO;
+		}
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+
+		/* setting mchan_bw to platform specific value */
+		config.iovar = "mchan_bw";
+		config.param = MIRACAST_MCHAN_BW;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+
+		/* setting apmdu to platform specific value */
+		config.iovar = "ampdu_mpdu";
+		config.param = MIRACAST_AMPDU_SIZE;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+		/* FALLTROUGH */
+		/* Source mode shares most configurations with sink mode.
+		 * Fall through here to avoid code duplication
+		 */
+	case MIRACAST_MODE_SINK:
+		/* disable internal roaming */
+		config.iovar = "roam_off";
+		config.param = 1;
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+		/* tunr off pm */
+		val = 0;
+		config.iovar = NULL;
+		config.ioctl = WLC_GET_PM;
+		config.arg = &val;
+		config.len = sizeof(int);
+		ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+		if (ret)
+			goto resume;
+
+		break;
+	case MIRACAST_MODE_OFF:
+	default:
+		break;
+	}
+	miracast_cur_mode = mode;
+
+	return 0;
+
+resume:
+	DHD_ERROR(("%s: turnoff miracast mode because of err%d\n", __FUNCTION__, ret));
+	wl_android_iolist_resume(dev, &miracast_resume_list);
+	return ret;
+}
+
+int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len)
+{
+	char 				buf[256];
+	const char 			*str;
+	wl_mkeep_alive_pkt_t	mkeep_alive_pkt;
+	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
+	int					buf_len;
+	int					str_len;
+	int res 				= -1;
+	uint period_msec = 0;
+
+	if (extra == NULL)
+	{
+		 DHD_ERROR(("%s: extra is NULL\n", __FUNCTION__));
+		 return -1;
+	}
+	if (sscanf(extra, "%d", &period_msec) != 1)
+	{
+		 DHD_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__));
+		 return -EINVAL;
+	}
+	DHD_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec));
+
+	memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+
+	str = "mkeep_alive";
+	str_len = strlen(str);
+	strncpy(buf, str, str_len);
+	buf[ str_len ] = '\0';
+	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+	mkeep_alive_pkt.period_msec = period_msec;
+	buf_len = str_len + 1;
+	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+
+	/* Setup keep alive zero for null packet generation */
+	mkeep_alive_pkt.keep_alive_id = 0;
+	mkeep_alive_pkt.len_bytes = 0;
+	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+	/* Keep-alive attributes are set in local	variable (mkeep_alive_pkt), and
+	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+	 * guarantee that the buffer is properly aligned.
+	 */
+	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+	if ((res = wldev_ioctl(dev, WLC_SET_VAR, buf, buf_len, TRUE)) < 0)
+	{
+		DHD_ERROR(("%s:keep_alive set failed. res[%d]\n", __FUNCTION__, res));
+	}
+	else
+	{
+		DHD_ERROR(("%s:keep_alive set ok. res[%d]\n", __FUNCTION__, res));
+	}
+
+	return res;
+}
+
+int wl_android_set_rt_coex(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+	int bitmap = 0;
+
+	if (sscanf(command, "%*s %x", &bitmap) != 1) {
+		DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+		return -1;
+	}
+
+	error = wldev_iovar_setint(dev, "mws_coex_bitmap", bitmap);
+	if (error) {
+		DHD_ERROR(("%s: Failed to set rt coex bitmap %x, error = %d\n",
+		__FUNCTION__, bitmap, error));
+		return -1;
+	}
+	else
+		DHD_ERROR(("%s: succeeded to set rt coex bitmap %x, error = %d\n",
+		__FUNCTION__, bitmap, error));
+	return 0;
+}
+
+int wl_android_config_rt_coex(struct net_device *dev, char *command, int total_len)
+{
+	int error = 0;
+
+	/* set coexistence setting for Real Time LTE/Wifi*/
+
+	/* mws_wlanrx_prot - Protection mode for WLAN Rx */
+	error = wldev_iovar_setint(dev, "mws_wlanrx_prot", MWS_WLANRX_PROT);
+	if (error) {
+		DHD_ERROR(("%s: Failed to configure RT coex: mws_wlanrx_prot, error = %d\n",
+		__FUNCTION__, error));
+		return -1;
+	}
+	/* mws_ltetx_adv - Set LTE_TX advance duration */
+	error = wldev_iovar_setint(dev, "mws_ltetx_adv" , MWS_LTETX_ADV);
+	if (error) {
+		DHD_ERROR(("%s: Failed to configure RT coex: mws_ltetx_adv, error = %d\n",
+		__FUNCTION__, error));
+		return -1;
+	}
+	/* mws_scanjoin_prot - Enable Protection mode for LTE Rx */
+	error = wldev_iovar_setint(dev, "mws_scanjoin_prot" , MWS_SCANJOIN_PROT);
+	if (error) {
+		DHD_ERROR(("%s: Failed to configure RT coex: mws_scanjoin_prot, error = %d\n",
+		__FUNCTION__, error));
+		return -1;
+	}
+	/* mws_lterx_prot - Enable Protection mode for LTE RX */
+	error = wldev_iovar_setint(dev, "mws_lterx_prot" , MWS_LTERX_PROT);
+	if (error) {
+		DHD_ERROR(("%s: Failed to configure RT coex: mws_lterx_prot, error = %d\n",
+		__FUNCTION__, error));
+		return -1;
+	}
+	DHD_ERROR(("%s: succeeded to configure RT coex", __FUNCTION__));
+	return 0;
+}
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+#define PRIVATE_COMMAND_MAX_LEN	8192
+	int ret = 0;
+	char *command = NULL;
+	int bytes_written = 0;
+	android_wifi_priv_cmd priv_cmd;
+
+	net_os_wake_lock(net);
+
+	if (!ifr->ifr_data) {
+		ret = -EINVAL;
+		goto exit;
+	}
+#ifdef CONFIG_COMPAT
+	if (is_compat_task()) {
+		compat_android_wifi_priv_cmd compat_priv_cmd;
+		if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
+			sizeof(compat_android_wifi_priv_cmd))) {
+			ret = -EFAULT;
+			goto exit;
+
+		}
+		priv_cmd.buf = compat_ptr(compat_priv_cmd.buf);
+		priv_cmd.used_len = compat_priv_cmd.used_len;
+		priv_cmd.total_len = compat_priv_cmd.total_len;
+	} else
+#endif /* CONFIG_COMPAT */
+	{
+		if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+			ret = -EFAULT;
+			goto exit;
+		}
+	}
+	if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
+		DHD_ERROR(("%s: invalid length of private command : %d\n",
+			__FUNCTION__, priv_cmd.total_len));
+		ret = -EINVAL;
+		goto exit;
+	}
+	command = kmalloc((priv_cmd.total_len + 1), GFP_KERNEL);
+	if (!command)
+	{
+		DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+		ret = -ENOMEM;
+		goto exit;
+	}
+	if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+		ret = -EFAULT;
+		goto exit;
+	}
+	command[priv_cmd.total_len] = '\0';
+
+	DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+	if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+		DHD_INFO(("%s, Received regular START command\n", __FUNCTION__));
+		bytes_written = wl_android_wifi_on(net);
+	}
+	else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+		bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+	}
+
+	if (!g_wifi_on) {
+		DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+			__FUNCTION__, command, ifr->ifr_name));
+		ret = 0;
+		goto exit;
+	}
+
+	if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+		bytes_written = wl_android_wifi_off(net);
+	}
+	else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+		/* TBD: SCAN-ACTIVE */
+	}
+	else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+		/* TBD: SCAN-PASSIVE */
+	}
+	else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+		bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+		bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+	}
+#ifdef PKT_FILTER_SUPPORT
+	else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+		bytes_written = net_os_enable_packet_filter(net, 1);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+		bytes_written = net_os_enable_packet_filter(net, 0);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+	}
+	else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+		int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+		bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+	}
+#endif /* PKT_FILTER_SUPPORT */
+	else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+		/* TBD: BTCOEXSCAN-START */
+	}
+	else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+		/* TBD: BTCOEXSCAN-STOP */
+	}
+	else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+#ifdef WL_CFG80211
+		void *dhdp = wl_cfg80211_get_dhdp();
+		bytes_written = wl_cfg80211_set_btcoex_dhcp(net, dhdp, command);
+#else
+#ifdef PKT_FILTER_SUPPORT
+		uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+
+		if (mode == 1)
+			net_os_enable_packet_filter(net, 0); /* DHCP starts */
+		else
+			net_os_enable_packet_filter(net, 1); /* DHCP ends */
+#endif /* PKT_FILTER_SUPPORT */
+#endif /* WL_CFG80211 */
+	}
+	else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+		bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
+		bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+		uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+		bytes_written = wldev_set_band(net, band);
+	}
+	else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
+		bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
+	}
+#ifdef WL_CFG80211
+	/* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
+	else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+		char *country_code = command + strlen(CMD_COUNTRY) + 1;
+		bytes_written = wldev_set_country(net, country_code, true, true);
+	}
+#endif /* WL_CFG80211 */
+
+
+#ifdef PNO_SUPPORT
+	else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
+		bytes_written = dhd_dev_pno_stop_for_ssid(net);
+	}
+#ifndef WL_SCHED_SCAN
+	else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) {
+		bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len);
+	}
+#endif /* !WL_SCHED_SCAN */
+	else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) {
+		int enable = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0';
+		bytes_written = (enable)? 0 : dhd_dev_pno_stop_for_ssid(net);
+	}
+	else if (strnicmp(command, CMD_WLS_BATCHING, strlen(CMD_WLS_BATCHING)) == 0) {
+		bytes_written = wls_parse_batching_cmd(net, command, priv_cmd.total_len);
+	}
+#endif /* PNO_SUPPORT */
+	else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
+		bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
+		int skip = strlen(CMD_P2P_SET_NOA) + 1;
+		bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+#if !defined WL_ENABLE_P2P_IF
+	else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) {
+		bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len);
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) {
+		int skip = strlen(CMD_P2P_SET_PS) + 1;
+		bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
+			priv_cmd.total_len - skip);
+	}
+#ifdef WL_CFG80211
+	else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
+		strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
+		int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3;
+		bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
+			priv_cmd.total_len - skip, *(command + skip - 2) - '0');
+	}
+#endif /* WL_CFG80211 */
+	else if (strnicmp(command, CMD_OKC_SET_PMK, strlen(CMD_OKC_SET_PMK)) == 0)
+		bytes_written = wl_android_set_pmk(net, command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_OKC_ENABLE, strlen(CMD_OKC_ENABLE)) == 0)
+		bytes_written = wl_android_okc_enable(net, command, priv_cmd.total_len);
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+	else if (strnicmp(command, CMD_GET_BEST_CHANNELS,
+		strlen(CMD_GET_BEST_CHANNELS)) == 0) {
+		bytes_written = wl_cfg80211_get_best_channels(net, command,
+			priv_cmd.total_len);
+	}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+	else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) {
+		int skip = strlen(CMD_HAPD_MAC_FILTER) + 1;
+		wl_android_set_mac_address_filter(net, (const char*)command+skip);
+	}
+	else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0)
+		bytes_written = wl_android_set_roam_mode(net, command, priv_cmd.total_len);
+#if defined(BCMFW_ROAM_ENABLE)
+	else if (strnicmp(command, CMD_SET_ROAMPREF, strlen(CMD_SET_ROAMPREF)) == 0) {
+		bytes_written = wl_android_set_roampref(net, command, priv_cmd.total_len);
+	}
+#endif /* BCMFW_ROAM_ENABLE */
+	else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)
+		bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len);
+	else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA,
+		strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
+		bytes_written = wl_android_set_ibss_beacon_ouidata(net, command,
+			priv_cmd.total_len);
+	else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
+		int skip = strlen(CMD_KEEP_ALIVE) + 1;
+		bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip);
+	}
+	else if (strnicmp(command, CMD_ROAM_OFFLOAD, strlen(CMD_ROAM_OFFLOAD)) == 0) {
+		int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0';
+		bytes_written = wl_cfg80211_enable_roam_offload(net, enable);
+	}
+#ifdef CONNECTION_STATISTICS
+	else if (strnicmp(command, CMD_GET_CONNECTION_STATS,
+		strlen(CMD_GET_CONNECTION_STATS)) == 0) {
+		bytes_written = wl_android_get_connection_stats(net, command,
+			priv_cmd.total_len);
+	}
+#endif
+	else if (strnicmp(command, CMD_RTCOEX, strlen(CMD_RTCOEX)) == 0) {
+		bytes_written = wl_android_set_rt_coex(net, command, priv_cmd.total_len);
+	}
+	else if (strnicmp(command, CMD_CONFIG_RTCOEX, strlen(CMD_CONFIG_RTCOEX)) == 0)
+		bytes_written = wl_android_config_rt_coex(net, command, priv_cmd.total_len);
+	else {
+		DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
+		snprintf(command, 3, "OK");
+		bytes_written = strlen("OK");
+	}
+
+	if (bytes_written >= 0) {
+		if ((bytes_written == 0) && (priv_cmd.total_len > 0))
+			command[0] = '\0';
+		if (bytes_written >= priv_cmd.total_len) {
+			DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written));
+			bytes_written = priv_cmd.total_len;
+		} else {
+			bytes_written++;
+		}
+		priv_cmd.used_len = bytes_written;
+		if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
+			DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+			ret = -EFAULT;
+		}
+	}
+	else {
+		ret = bytes_written;
+	}
+
+exit:
+	net_os_wake_unlock(net);
+	if (command) {
+		kfree(command);
+	}
+
+	return ret;
+}
+
+int wl_android_init(void)
+{
+	int ret = 0;
+
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
+	dhd_download_fw_on_driverload = FALSE;
+#endif /* ENABLE_INSMOD_NO_FW_LOAD */
+#if defined(CUSTOMER_HW2)
+	if (!iface_name[0]) {
+		memset(iface_name, 0, IFNAMSIZ);
+		bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
+	}
+#endif 
+
+
+	return ret;
+}
+
+int wl_android_exit(void)
+{
+	int ret = 0;
+	struct io_cfg *cur, *q;
+
+
+	list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
+		list_del(&cur->list);
+		kfree(cur);
+	}
+
+	return ret;
+}
+
+void wl_android_post_init(void)
+{
+
+#ifdef ENABLE_4335BT_WAR
+	bcm_bt_unlock(lock_cookie_wifi);
+	printk("%s: btlock released\n", __FUNCTION__);
+#endif /* ENABLE_4335BT_WAR */
+
+	if (!dhd_download_fw_on_driverload)
+		g_wifi_on = FALSE;
+}
diff --git a/drivers/staging/edison-bcm43340/wl_android.h b/drivers/staging/edison-bcm43340/wl_android.h
new file mode 100644
index 0000000..d58c399
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_android.h
@@ -0,0 +1,68 @@
+/*
+ * Linux cfg80211 driver - Android related functions
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_android.h 459598 2014-03-04 09:14:30Z $
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <wldev_common.h>
+
+/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL
+ * automatically
+ */
+
+
+
+/**
+ * Android platform dependent functions, feel free to add Android specific functions here
+ * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd
+ * or cfg, define them as static in wl_android.c
+ */
+
+/**
+ * wl_android_init will be called from module init function (dhd_module_init now), similarly
+ * wl_android_exit will be called from module exit function (dhd_module_cleanup now)
+ */
+int wl_android_init(void);
+int wl_android_exit(void);
+void wl_android_post_init(void);
+int wl_android_wifi_on(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+bool wifi_irq_is_fastirq(void);
+
+/* hostap mac mode */
+#define MACLIST_MODE_DISABLED   0
+#define MACLIST_MODE_DENY       1
+#define MACLIST_MODE_ALLOW      2
+
+/* max number of assoc list */
+#define MAX_NUM_OF_ASSOCLIST    64
+
+/* max number of mac filter list
+ * restrict max number to 10 as maximum cmd string size is 255
+ */
+#define MAX_NUM_MAC_FILT        10
+
+int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
diff --git a/drivers/staging/edison-bcm43340/wl_cfg80211.c b/drivers/staging/edison-bcm43340/wl_cfg80211.c
new file mode 100644
index 0000000..a665633
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_cfg80211.c
@@ -0,0 +1,12201 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.c 476678 2014-05-09 14:46:37Z $
+ */
+/* */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+
+#include <proto/ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_android.h>
+
+//#include <linux/kct.h>
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) && defined(WL_VENDOR_EXT_SUPPORT)
+#include <wl_cfgvendor.h>
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) && defined(WL_VENDOR_EXT_SUPPORT) */
+#ifdef WL11U
+#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
+#error You should enable 'WL_ENABLE_P2P_IF' or 'WL_CFG80211_P2P_DEV_IF' \
+	according to Kernel version and is supported only in Android-JB
+#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
+#endif /* WL11U */
+
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1  0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4     0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
+
+#ifdef BCMWAPI_WPI
+#define IW_WSEC_ENABLED(wsec)   ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define IW_WSEC_ENABLED(wsec)   ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+
+static struct device *cfg80211_parent_dev = NULL;
+/* g_bcm_cfg should be static. Do not change */
+static struct bcm_cfg80211 *g_bcm_cfg = NULL;
+u32 wl_dbg_level = WL_DBG_ERR;
+
+#define MAX_WAIT_TIME 1500
+
+#ifdef VSDB
+/* sleep time to keep STA's connecting or connection for continuous af tx or finding a peer */
+#define DEFAULT_SLEEP_TIME_VSDB		120
+#define OFF_CHAN_TIME_THRESHOLD_MS	200
+#define AF_RETRY_DELAY_TIME			40
+
+/* if sta is connected or connecting, sleep for a while before retry af tx or finding a peer */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)	\
+	do {	\
+		if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) ||	\
+			wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {	\
+			OSL_SLEEP(DEFAULT_SLEEP_TIME_VSDB);			\
+		}	\
+	} while (0)
+#else /* VSDB */
+/* if not VSDB, do nothing */
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)
+#endif /* VSDB */
+
+#ifdef WL_CFG80211_SYNC_GON
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \
+	(wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \
+		wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN))
+#else
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM)
+#endif /* WL_CFG80211_SYNC_GON */
+
+#define DNGL_FUNC(func, parameters) func parameters
+#define COEX_DHCP
+
+#define WLAN_EID_SSID	0
+#define CH_MIN_5G_CHANNEL 34
+#define CH_MIN_2G_CHANNEL 1
+
+/* This is to override regulatory domains defined in cfg80211 module (reg.c)
+ * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
+ * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
+ * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels.
+ * All the chnages in world regulatory domain are to be done here.
+ */
+static const struct ieee80211_regdomain brcm_regdom = {
+	.n_reg_rules = 4,
+	.alpha2 =  "99",
+	.reg_rules = {
+		/* IEEE 802.11b/g, channels 1..11 */
+		REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
+		/* If any */
+		/* IEEE 802.11 channel 14 - Only JP enables
+		 * this and for 802.11b only
+		 */
+		REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
+		/* IEEE 802.11a, channel 36..64 */
+		REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
+		/* IEEE 802.11a, channel 100..165 */
+		REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+	(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
+/*
+ * Possible interface combinations supported by driver
+ *
+ * ADHOC Mode     - #ADHOC <= 1 on channels = 1
+ * SoftAP Mode    - #AP <= 1 on channels = 1
+ * STA + P2P Mode - #STA <= 2, #{P2P-GO, P2P-client} <= 1, #P2P-device <= 1
+ *                  on channels = 2
+ */
+static const struct ieee80211_iface_limit common_if_limits[] = {
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_AP),
+	},
+	{
+	/*
+	 * During P2P-GO removal, P2P-GO is first changed to STA and later only
+	 * removed. So setting maximum possible number of STA interfaces according
+	 * to kernel version.
+	 *
+	 * less than linux-3.8 - max:3 (wlan0 + p2p0 + group removal of p2p-p2p0-x)
+	 * linux-3.8 and above - max:2 (wlan0 + group removal of p2p-wlan0-x)
+	 */
+#ifdef WL_ENABLE_P2P_IF
+	.max = 3,
+#else
+	.max = 2,
+#endif /* WL_ENABLE_P2P_IF */
+	.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+	.max = 2,
+	.types = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	{
+	.max = 1,
+	.types = BIT(NL80211_IFTYPE_ADHOC),
+	},
+};
+#ifdef BCM4330_CHIP
+#define NUM_DIFF_CHANNELS 1
+#else
+#define NUM_DIFF_CHANNELS 2
+#endif
+static const struct ieee80211_iface_combination
+common_iface_combinations[] = {
+	{
+	.num_different_channels = NUM_DIFF_CHANNELS,
+	.max_interfaces = 4,
+	.limits = common_if_limits,
+	.n_limits = ARRAY_SIZE(common_if_limits),
+	},
+};
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+/* Data Element Definitions */
+#define WPS_ID_CONFIG_METHODS     0x1008
+#define WPS_ID_REQ_TYPE           0x103A
+#define WPS_ID_DEVICE_NAME        0x1011
+#define WPS_ID_VERSION            0x104A
+#define WPS_ID_DEVICE_PWD_ID      0x1012
+#define WPS_ID_REQ_DEV_TYPE       0x106A
+#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE      0x1054
+
+/* Device Password ID */
+#define DEV_PW_DEFAULT 0x0000
+#define DEV_PW_USER_SPECIFIED 0x0001,
+#define DEV_PW_MACHINE_SPECIFIED 0x0002
+#define DEV_PW_REKEY 0x0003
+#define DEV_PW_PUSHBUTTON 0x0004
+#define DEV_PW_REGISTRAR_SPECIFIED 0x0005
+
+/* Config Methods */
+#define WPS_CONFIG_USBA 0x0001
+#define WPS_CONFIG_ETHERNET 0x0002
+#define WPS_CONFIG_LABEL 0x0004
+#define WPS_CONFIG_DISPLAY 0x0008
+#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010
+#define WPS_CONFIG_INT_NFC_TOKEN 0x0020
+#define WPS_CONFIG_NFC_INTERFACE 0x0040
+#define WPS_CONFIG_PUSHBUTTON 0x0080
+#define WPS_CONFIG_KEYPAD 0x0100
+#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280
+#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480
+#define WPS_CONFIG_VIRT_DISPLAY 0x2008
+#define WPS_CONFIG_PHY_DISPLAY 0x4008
+
+#define PM_BLOCK 1
+#define PM_ENABLE 0
+
+
+#ifdef MFP
+#define WL_AKM_SUITE_MFP_1X  0x000FAC05
+#define WL_AKM_SUITE_MFP_PSK 0x000FAC06
+#endif /* MFP */
+
+#ifndef RSSI_OFFSET
+#define RSSI_OFFSET	0
+#endif
+
+#ifndef IBSS_COALESCE_ALLOWED
+#define IBSS_COALESCE_ALLOWED 1
+#endif
+
+#ifndef IBSS_INITIAL_SCAN_ALLOWED
+#define IBSS_INITIAL_SCAN_ALLOWED 0
+#endif
+
+#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */
+/*
+ * cfg80211_ops api/callback list
+ */
+static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
+#else
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params);
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
+	struct net_device *dev);
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+	struct net_device *dev, u8 *mac,
+	struct station_info *sinfo);
+static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+	struct net_device *dev, bool enabled,
+	s32 timeout);
+static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+	enum nl80211_tx_power_setting type, s32 mbm);
+#else
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type, s32 dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+	struct wireless_dev *wdev, s32 *dbm);
+#else
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+	struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast);
+static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params);
+static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr);
+static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	void *cookie, void (*callback) (void *cookie,
+	struct key_params *params));
+static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev,	u8 key_idx);
+static s32 wl_cfg80211_resume(struct wiphy *wiphy);
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie);
+static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
+	struct net_device *ndev, u8* mac_addr);
+static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
+	struct net_device *dev, u8 *mac, struct station_parameters *params);
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
+#endif
+static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa);
+static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+	struct net_device *dev);
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev, bool aborted, bool fw_abort);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, enum nl80211_tdls_operation oper);
+#endif 
+#ifdef WL_SCHED_SCAN
+static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
+#endif
+
+/*
+ * event & event Q handlers for cfg80211 interfaces
+ */
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg);
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg);
+static s32 wl_event_handler(void *data);
+static void wl_init_eq(struct bcm_cfg80211 *cfg);
+static void wl_flush_eq(struct bcm_cfg80211 *cfg);
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg);
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags);
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg);
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg);
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg);
+static s32 wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 type,
+	const wl_event_msg_t *msg, void *data);
+static void wl_put_event(struct wl_event_q *e);
+static void wl_wakeup_event(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_connect_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg,
+	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed);
+static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+static s32 wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#ifdef WL_SCHED_SCAN
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data);
+#endif /* WL_SCHED_SCAN */
+#ifdef PNO_SUPPORT
+static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* PNO_SUPPORT */
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+	enum wl_status state, bool set);
+
+#ifdef WLTDLS
+static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+#endif /* WLTDLS */
+/*
+ * register/deregister parent device
+ */
+static void wl_cfg80211_clear_parent_dev(void);
+
+/*
+ * ioctl utilites
+ */
+
+/*
+ * cfg80211 set_wiphy_params utilities
+ */
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+
+/*
+ * cfg profile utilities
+ */
+static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, s32 item);
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+/*
+ * cfg80211 connect utilites
+ */
+static s32 wl_set_wpa_version(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_auth_type(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_set_cipher(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_key_mgmt(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+static s32 wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme);
+#ifdef BCMWAPI_WPI
+static s32 wl_set_set_wapi_ie(struct net_device *dev,
+        struct cfg80211_connect_params *sme);
+#endif
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static void wl_ch_to_chanspec(int ch,
+	struct wl_join_params *join_params, size_t *join_params_size);
+
+/*
+ * information element utilities
+ */
+static void wl_rst_ie(struct bcm_cfg80211 *cfg);
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam);
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size);
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size);
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg);
+#ifdef MFP
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa);
+#endif
+
+#ifdef WL11U
+bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(u8 *parse, u32 len);
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+            uint8 ie_id, uint8 *data, uint8 data_len);
+#endif /* WL11U */
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, void *data);
+static void wl_free_wdev(struct bcm_cfg80211 *cfg);
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+static int
+wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam);
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
+s32 wl_cfg80211_channel_to_freq(u32 channel);
+
+
+static void wl_cfg80211_work_handler(struct work_struct *work);
+static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr,
+	struct key_params *params);
+/*
+ * key indianess swap utilities
+ */
+static void swap_key_from_BE(struct wl_wsec_key *key);
+static void swap_key_to_BE(struct wl_wsec_key *key);
+
+/*
+ * bcm_cfg80211 memory init/deinit utilities
+ */
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg);
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg);
+
+static void wl_delay(u32 ms);
+
+/*
+ * ibss mode utilities
+ */
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg);
+
+/*
+ * link up/down , default configuration utilities
+ */
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg);
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg);
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e,
+	struct net_device *ndev);
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+static void wl_link_up(struct bcm_cfg80211 *cfg);
+static void wl_link_down(struct bcm_cfg80211 *cfg);
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype);
+static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+	struct net_device* ndev);
+
+int wl_cfg80211_get_ioctl_version(void);
+
+/*
+ * find most significant bit set
+ */
+static __used u32 wl_find_msb(u16 bit16);
+
+/*
+ * rfkill support
+ */
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup);
+static int wl_rfkill_set(void *data, bool blocked);
+#ifdef DEBUGFS_CFG80211
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg);
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg);
+#endif
+
+static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel,
+	int nprobes, int *out_params_size);
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role);
+
+#ifdef WL_CFG80211_ACL
+/* ACL */
+static int wl_cfg80211_set_mac_acl(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	const struct cfg80211_acl_data *acl);
+#endif /* WL_CFG80211_ACL */
+
+/*
+ * Some external functions, TODO: move them to dhd_linux.h
+ */
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+
+
+static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const struct ether_addr *bssid);
+
+
+#define RETURN_EIO_IF_NOT_UP(wlpriv)						\
+do {									\
+	struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv);       	\
+	if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) {	\
+		WL_INFO(("device is not ready\n"));			\
+		return -EIO;						\
+	}								\
+} while (0)
+
+#ifdef RSSI_OFFSET
+static s32 wl_rssi_offset(s32 rssi)
+{
+	rssi += RSSI_OFFSET;
+	if (rssi > 0)
+		rssi = 0;
+	return rssi;
+}
+#else
+#define wl_rssi_offset(x)	x
+#endif
+
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || 			\
+				 (akm) == RSN_AKM_UNSPECIFIED || 	\
+				 (akm) == RSN_AKM_PSK)
+
+
+extern int dhd_wait_pend8021x(struct net_device *dev);
+#ifdef PROP_TXSTATUS_VSDB
+extern int disable_proptx;
+#endif /* PROP_TXSTATUS_VSDB */
+
+#if (WL_DBG_LEVEL > 0)
+#define WL_DBG_ESTR_MAX	50
+static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = {
+	"SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND",
+	"DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC",
+	"REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END",
+	"BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM",
+	"TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH",
+	"EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND",
+	"BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND",
+	"PFN_NET_LOST",
+	"RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START",
+	"IBSS_ASSOC",
+	"RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT",
+	"PROBREQ_MSG",
+	"SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED",
+	"EXCEEDED_MEDIUM_TIME", "ICV_ERROR",
+	"UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE",
+	"WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE",
+	"RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG",
+	"ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND",
+	"WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED",
+	"WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT",
+	"WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE",
+	"WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP",
+	"WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE"
+};
+#endif				/* WL_DBG_LEVEL */
+
+#define CHAN2G(_channel, _freq, _flags) {			\
+	.band			= IEEE80211_BAND_2GHZ,		\
+	.center_freq		= (_freq),			\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define CHAN5G(_channel, _flags) {				\
+	.band			= IEEE80211_BAND_5GHZ,		\
+	.center_freq		= 5000 + (5 * (_channel)),	\
+	.hw_value		= (_channel),			\
+	.flags			= (_flags),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+#define RATE_TO_BASE100KBPS(rate)   (((rate) * 10) / 2)
+#define RATETAB_ENT(_rateid, _flags) \
+	{								\
+		.bitrate	= RATE_TO_BASE100KBPS(_rateid),     \
+		.hw_value	= (_rateid),			    \
+		.flags	  = (_flags),			     \
+	}
+
+static struct ieee80211_rate __wl_rates[] = {
+	RATETAB_ENT(DOT11_RATE_1M, 0),
+	RATETAB_ENT(DOT11_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+	RATETAB_ENT(DOT11_RATE_6M, 0),
+	RATETAB_ENT(DOT11_RATE_9M, 0),
+	RATETAB_ENT(DOT11_RATE_12M, 0),
+	RATETAB_ENT(DOT11_RATE_18M, 0),
+	RATETAB_ENT(DOT11_RATE_24M, 0),
+	RATETAB_ENT(DOT11_RATE_36M, 0),
+	RATETAB_ENT(DOT11_RATE_48M, 0),
+	RATETAB_ENT(DOT11_RATE_54M, 0)
+};
+
+#define wl_a_rates		(__wl_rates + 4)
+#define wl_a_rates_size	8
+#define wl_g_rates		(__wl_rates + 0)
+#define wl_g_rates_size	12
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+	CHAN2G(1, 2412, 0),
+	CHAN2G(2, 2417, 0),
+	CHAN2G(3, 2422, 0),
+	CHAN2G(4, 2427, 0),
+	CHAN2G(5, 2432, 0),
+	CHAN2G(6, 2437, 0),
+	CHAN2G(7, 2442, 0),
+	CHAN2G(8, 2447, 0),
+	CHAN2G(9, 2452, 0),
+	CHAN2G(10, 2457, 0),
+	CHAN2G(11, 2462, 0),
+	CHAN2G(12, 2467, 0),
+	CHAN2G(13, 2472, 0),
+	CHAN2G(14, 2484, 0)
+};
+
+static struct ieee80211_channel __wl_5ghz_a_channels[] = {
+	CHAN5G(34, 0), CHAN5G(36, 0),
+	CHAN5G(38, 0), CHAN5G(40, 0),
+	CHAN5G(42, 0), CHAN5G(44, 0),
+	CHAN5G(46, 0), CHAN5G(48, 0),
+	CHAN5G(52, 0), CHAN5G(56, 0),
+	CHAN5G(60, 0), CHAN5G(64, 0),
+	CHAN5G(100, 0), CHAN5G(104, 0),
+	CHAN5G(108, 0), CHAN5G(112, 0),
+	CHAN5G(116, 0), CHAN5G(120, 0),
+	CHAN5G(124, 0), CHAN5G(128, 0),
+	CHAN5G(132, 0), CHAN5G(136, 0),
+	CHAN5G(140, 0), CHAN5G(149, 0),
+	CHAN5G(153, 0), CHAN5G(157, 0),
+	CHAN5G(161, 0), CHAN5G(165, 0)
+};
+
+static struct ieee80211_supported_band __wl_band_2ghz = {
+	.band = IEEE80211_BAND_2GHZ,
+	.channels = __wl_2ghz_channels,
+	.n_channels = ARRAY_SIZE(__wl_2ghz_channels),
+	.bitrates = wl_g_rates,
+	.n_bitrates = wl_g_rates_size
+};
+
+static struct ieee80211_supported_band __wl_band_5ghz_a = {
+	.band = IEEE80211_BAND_5GHZ,
+	.channels = __wl_5ghz_a_channels,
+	.n_channels = ARRAY_SIZE(__wl_5ghz_a_channels),
+	.bitrates = wl_a_rates,
+	.n_bitrates = wl_a_rates_size
+};
+
+static const u32 __wl_cipher_suites[] = {
+	WLAN_CIPHER_SUITE_WEP40,
+	WLAN_CIPHER_SUITE_WEP104,
+	WLAN_CIPHER_SUITE_TKIP,
+	WLAN_CIPHER_SUITE_CCMP,
+	WLAN_CIPHER_SUITE_AES_CMAC,
+#ifdef BCMWAPI_WPI
+	WLAN_CIPHER_SUITE_SMS4,
+#endif
+};
+
+
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+static int maxrxpktglom = 0;
+#endif
+
+/* IOCtl version read from targeted driver */
+static int ioctl_version;
+#ifdef DEBUGFS_CFG80211
+#define S_SUBLOGLEVEL 20
+static const struct {
+	u32 log_level;
+	char *sublogname;
+} sublogname_map[] = {
+	{WL_DBG_ERR, "ERR"},
+	{WL_DBG_INFO, "INFO"},
+	{WL_DBG_DBG, "DBG"},
+	{WL_DBG_SCAN, "SCAN"},
+	{WL_DBG_TRACE, "TRACE"},
+	{WL_DBG_P2P_ACTION, "P2PACTION"}
+};
+#endif
+
+
+static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg, bool add_remove,
+	enum wl_handler_del_type type)
+{
+	if (cfg == NULL)
+		return;
+
+	if (cfg->pm_enable_work_on) {
+		if (add_remove) {
+			schedule_delayed_work(&cfg->pm_enable_work,
+				msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
+		} else {
+			cancel_delayed_work_sync(&cfg->pm_enable_work);
+			switch (type) {
+				case WL_HANDLER_MAINTAIN:
+					schedule_delayed_work(&cfg->pm_enable_work,
+						msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
+					break;
+				case WL_HANDLER_PEND:
+					schedule_delayed_work(&cfg->pm_enable_work,
+						msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT*2));
+					break;
+				case WL_HANDLER_DEL:
+				default:
+					cfg->pm_enable_work_on = false;
+					break;
+			}
+		}
+	}
+}
+
+/* Return a new chanspec given a legacy chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_from_legacy(chanspec_t legacy_chspec)
+{
+	chanspec_t chspec;
+
+	/* get the channel number */
+	chspec = LCHSPEC_CHANNEL(legacy_chspec);
+
+	/* convert the band */
+	if (LCHSPEC_IS2G(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BAND_2G;
+	} else {
+		chspec |= WL_CHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (LCHSPEC_IS20(legacy_chspec)) {
+		chspec |= WL_CHANSPEC_BW_20;
+	} else {
+		chspec |= WL_CHANSPEC_BW_40;
+		if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) {
+			chspec |= WL_CHANSPEC_CTL_SB_L;
+		} else {
+			chspec |= WL_CHANSPEC_CTL_SB_U;
+		}
+	}
+
+	if (wf_chspec_malformed(chspec)) {
+		WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	return chspec;
+}
+
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_to_legacy(chanspec_t chspec)
+{
+	chanspec_t lchspec;
+
+	if (wf_chspec_malformed(chspec)) {
+		WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+		        chspec));
+		return INVCHANSPEC;
+	}
+
+	/* get the channel number */
+	lchspec = CHSPEC_CHANNEL(chspec);
+
+	/* convert the band */
+	if (CHSPEC_IS2G(chspec)) {
+		lchspec |= WL_LCHANSPEC_BAND_2G;
+	} else {
+		lchspec |= WL_LCHANSPEC_BAND_5G;
+	}
+
+	/* convert the bw and sideband */
+	if (CHSPEC_IS20(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_20;
+		lchspec |= WL_LCHANSPEC_CTL_SB_NONE;
+	} else if (CHSPEC_IS40(chspec)) {
+		lchspec |= WL_LCHANSPEC_BW_40;
+		if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) {
+			lchspec |= WL_LCHANSPEC_CTL_SB_LOWER;
+		} else {
+			lchspec |= WL_LCHANSPEC_CTL_SB_UPPER;
+		}
+	} else {
+		/* cannot express the bandwidth */
+		char chanbuf[CHANSPEC_STR_LEN];
+		WL_ERR((
+		        "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+		        "to pre-11ac format\n",
+		        wf_chspec_ntoa(chspec, chanbuf), chspec));
+		return INVCHANSPEC;
+	}
+
+	return lchspec;
+}
+
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec)
+{
+	if (ioctl_version == 1) {
+		chanspec = wl_chspec_to_legacy(chanspec);
+		if (chanspec == INVCHANSPEC) {
+			return chanspec;
+		}
+	}
+	chanspec = htodchanspec(chanspec);
+
+	return chanspec;
+}
+
+/* given a channel value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+chanspec_t
+wl_ch_host_to_driver(u16 channel)
+{
+
+	chanspec_t chanspec;
+
+	chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		chanspec |= WL_CHANSPEC_BAND_2G;
+	else
+		chanspec |= WL_CHANSPEC_BAND_5G;
+
+	chanspec |= WL_CHANSPEC_BW_20;
+	chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+	return wl_chspec_host_to_driver(chanspec);
+}
+
+/* given a chanspec value from the driver, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
+static chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec)
+{
+	chanspec = dtohchanspec(chanspec);
+	if (ioctl_version == 1) {
+		chanspec = wl_chspec_from_legacy(chanspec);
+	}
+
+	return chanspec;
+}
+
+/* There isn't a lot of sense in it, but you can transmit anything you like */
+static const struct ieee80211_txrx_stypes
+wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+	[NL80211_IFTYPE_ADHOC] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_STATION] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_AP] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_AP_VLAN] = {
+		/* copy AP */
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+	[NL80211_IFTYPE_P2P_CLIENT] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+	[NL80211_IFTYPE_P2P_GO] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+		BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+		BIT(IEEE80211_STYPE_AUTH >> 4) |
+		BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+		BIT(IEEE80211_STYPE_ACTION >> 4)
+	},
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	[NL80211_IFTYPE_P2P_DEVICE] = {
+		.tx = 0xffff,
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
+#endif /* WL_CFG80211_P2P_DEV_IF */
+};
+
+static void swap_key_from_BE(struct wl_wsec_key *key)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(struct wl_wsec_key *key)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+/* Dump the contents of the encoded wps ie buffer and get pbc value */
+static void
+wl_validate_wps_ie(char *wps_ie, s32 wps_ie_len, bool *pbc)
+{
+	#define WPS_IE_FIXED_LEN 6
+	u16 len;
+	u8 *subel = NULL;
+	u16 subelt_id;
+	u16 subelt_len;
+	u16 val;
+	u8 *valptr = (uint8*) &val;
+	if (wps_ie == NULL || wps_ie_len < WPS_IE_FIXED_LEN) {
+		WL_ERR(("invalid argument : NULL\n"));
+		return;
+	}
+	len = (u16)wps_ie[TLV_LEN_OFF];
+
+	if (len > wps_ie_len) {
+		WL_ERR(("invalid length len %d, wps ie len %d\n", len, wps_ie_len));
+		return;
+	}
+	WL_DBG(("wps_ie len=%d\n", len));
+	len -= 4;	/* for the WPS IE's OUI, oui_type fields */
+	subel = wps_ie + WPS_IE_FIXED_LEN;
+	while (len >= 4) {		/* must have attr id, attr len fields */
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_id = HTON16(val);
+
+		valptr[0] = *subel++;
+		valptr[1] = *subel++;
+		subelt_len = HTON16(val);
+
+		len -= 4;			/* for the attr id, attr len fields */
+		len -= subelt_len;	/* for the remaining fields in this attribute */
+		WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n",
+			subel, subelt_id, subelt_len));
+
+		if (subelt_id == WPS_ID_VERSION) {
+			WL_DBG(("  attr WPS_ID_VERSION: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_REQ_TYPE) {
+			WL_DBG(("  attr WPS_ID_REQ_TYPE: %u\n", *subel));
+		} else if (subelt_id == WPS_ID_CONFIG_METHODS) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_DEVICE_NAME) {
+			char devname[100];
+			memcpy(devname, subel, subelt_len);
+			devname[subelt_len] = '\0';
+			WL_DBG(("  attr WPS_ID_DEVICE_NAME: %s (len %u)\n",
+				devname, subelt_len));
+		} else if (subelt_id == WPS_ID_DEVICE_PWD_ID) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val)));
+			*pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false;
+		} else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_REQ_DEV_TYPE) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val)));
+			valptr[0] = *(subel + 6);
+			valptr[1] = *(subel + 7);
+			WL_DBG(("  attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val)));
+		} else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) {
+			valptr[0] = *subel;
+			valptr[1] = *(subel + 1);
+			WL_DBG(("  attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS"
+				": cat=%u\n", HTON16(val)));
+		} else {
+			WL_DBG(("  unknown attr 0x%x\n", subelt_id));
+		}
+
+		subel += subelt_len;
+	}
+}
+
+s32 wl_set_tx_power(struct net_device *dev,
+	enum nl80211_tx_power_setting type, s32 dbm)
+{
+	s32 err = 0;
+	s32 disable = 0;
+	s32 txpwrqdbm;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+#ifdef TEST_TX_POWER_CONTROL
+	char *tmppwr_str;
+#endif /* TEST_TX_POWER_CONTROL */
+
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = WL_RADIO_SW_DISABLE << 16;
+	disable = htod32(disable);
+	err = wldev_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+		return err;
+	}
+
+	if (dbm > 0xffff)
+		dbm = 0xffff;
+	txpwrqdbm = dbm * 4;
+
+	err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm,
+		sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+
+	if (unlikely(err))
+		WL_ERR(("qtxpower error (%d)\n", err));
+	else
+		WL_ERR(("dBm=%d, txpwrqdbm=0x%x\n", dbm, txpwrqdbm));
+
+	return err;
+}
+
+s32 wl_get_tx_power(struct net_device *dev, s32 *dbm)
+{
+	s32 err = 0;
+	s32 txpwrdbm;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	err = wldev_iovar_getint(dev, "qtxpower", &txpwrdbm);
+	err = wldev_iovar_getbuf_bsscfg(dev, "qtxpower",
+		NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	memcpy(&txpwrdbm, cfg->ioctl_buf, sizeof(txpwrdbm));
+	txpwrdbm = dtoh32(txpwrdbm);
+	*dbm = (txpwrdbm & ~WL_TXPWR_OVERRIDE) / 4;
+
+	WL_INFO(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm));
+
+	return err;
+}
+
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
+{
+	chanspec_t chspec;
+	int err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	struct ether_addr bssid;
+	struct wl_bss_info *bss = NULL;
+
+	if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) {
+		/* STA interface is not associated. So start the new interface on a temp
+		 * channel . Later proper channel will be applied by the above framework
+		 * via set_channel (cfg80211 API).
+		 */
+		WL_DBG(("Not associated. Return a temp channel. \n"));
+		return wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+	}
+
+
+	*(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+	if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf,
+		WL_EXTRA_BUF_MAX, false))) {
+			WL_ERR(("Failed to get associated bss info, use temp channel \n"));
+			chspec = wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+	}
+	else {
+			bss = (struct wl_bss_info *) (cfg->extra_buf + 4);
+			chspec =  bss->chanspec;
+
+			WL_DBG(("Valid BSS Found. chanspec:%d \n", chspec));
+	}
+	return chspec;
+}
+
+static bcm_struct_cfgdev *
+wl_cfg80211_add_monitor_if(char *name)
+{
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+	WL_INFO(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
+	return ERR_PTR(-EOPNOTSUPP);
+#else
+	struct net_device* ndev = NULL;
+
+	dhd_add_monitor(name, &ndev);
+	WL_INFO(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+	return ndev_to_cfgdev(ndev);
+#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
+}
+
+static bcm_struct_cfgdev *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	const char *name,
+#else
+	char *name,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	enum nl80211_iftype type, u32 *flags,
+	struct vif_params *params)
+{
+	s32 err;
+	s32 timeout = -1;
+	s32 wlif_type = -1;
+	s32 mode = 0;
+	s32 val = 0;
+	s32 dhd_mode = 0;
+	chanspec_t chspec;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *primary_ndev;
+	struct net_device *new_ndev;
+	struct ether_addr primary_mac;
+#ifdef PROP_TXSTATUS_VSDB
+	s32 up = 1;
+	dhd_pub_t *dhd;
+	bool enabled;
+#endif /* PROP_TXSTATUS_VSDB */
+
+	if (!cfg)
+		return ERR_PTR(-EINVAL);
+
+#ifdef PROP_TXSTATUS_VSDB
+	dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* PROP_TXSTATUS_VSDB */
+
+
+	/* Use primary I/F for sending cmds down to firmware */
+	primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
+		WL_ERR(("device is not ready\n"));
+		return ERR_PTR(-ENODEV);
+	}
+
+	WL_DBG(("if name: %s, type: %d\n", name, type));
+	switch (type) {
+	case NL80211_IFTYPE_ADHOC:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MESH_POINT:
+		WL_ERR(("Unsupported interface type\n"));
+		mode = WL_MODE_IBSS;
+		return NULL;
+	case NL80211_IFTYPE_MONITOR:
+		return wl_cfg80211_add_monitor_if((char *)name);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	case NL80211_IFTYPE_P2P_DEVICE:
+		return wl_cfgp2p_add_p2p_disc_if(cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	case NL80211_IFTYPE_P2P_CLIENT:
+	case NL80211_IFTYPE_STATION:
+		wlif_type = WL_P2P_IF_CLIENT;
+		mode = WL_MODE_BSS;
+		break;
+	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_AP:
+		wlif_type = WL_P2P_IF_GO;
+		mode = WL_MODE_AP;
+		break;
+	default:
+		WL_ERR(("Unsupported interface type\n"));
+		return NULL;
+		break;
+	}
+
+	if (!name) {
+		WL_ERR(("name is NULL\n"));
+		return NULL;
+	}
+	if (cfg->p2p_supported && (wlif_type != -1)) {
+		ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */
+
+#ifdef PROP_TXSTATUS_VSDB
+		if (!dhd)
+			return ERR_PTR(-ENODEV);
+#endif /* PROP_TXSTATUS_VSDB */
+		if (!cfg->p2p)
+			return ERR_PTR(-ENODEV);
+
+		if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+			p2p_on(cfg) = true;
+			wl_cfgp2p_set_firm_p2p(cfg);
+			wl_cfgp2p_init_discovery(cfg);
+			get_primary_mac(cfg, &primary_mac);
+			wl_cfgp2p_generate_bss_mac(&primary_mac,
+				&cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+		}
+
+		memset(cfg->p2p->vir_ifname, 0, IFNAMSIZ);
+		strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
+
+		wl_cfg80211_scan_abort(cfg);
+#ifdef PROP_TXSTATUS_VSDB
+		if (!cfg->wlfc_on && !disable_proptx) {
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (!enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+				dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+				dhd_wlfc_init(dhd);
+				err = wldev_ioctl(primary_ndev, WLC_UP, &up, sizeof(s32), true);
+				if (err < 0)
+					WL_ERR(("WLC_UP return err:%d\n", err));
+			}
+			cfg->wlfc_on = true;
+		}
+#endif /* PROP_TXSTATUS_VSDB */
+
+		/* In concurrency case, STA may be already associated in a particular channel.
+		 * so retrieve the current channel of primary interface and then start the virtual
+		 * interface on that.
+		 */
+		 chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+		/* For P2P mode, use P2P-specific driver features to create the
+		 * bss: "cfg p2p_ifadd"
+		 */
+		wl_set_p2p_status(cfg, IF_ADDING);
+		memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+		if (wlif_type == WL_P2P_IF_GO)
+			wldev_iovar_setint(primary_ndev, "mpc", 0);
+		err = wl_cfgp2p_ifadd(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+		if (unlikely(err)) {
+			wl_clr_p2p_status(cfg, IF_ADDING);
+			WL_ERR((" virtual iface add failed (%d) \n", err));
+			return ERR_PTR(-ENOMEM);
+		}
+
+		timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+			(wl_get_p2p_status(cfg, IF_ADDING) == false),
+			msecs_to_jiffies(MAX_WAIT_TIME));
+
+		if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
+			struct wireless_dev *vwdev;
+			int pm_mode = PM_ENABLE;
+			wl_if_event_info *event = &cfg->if_event_info;
+
+			/* IF_ADD event has come back, we can proceed to to register
+			 * the new interface now, use the interface name provided by caller (thus
+			 * ignore the one from wlc)
+			 */
+			strncpy(cfg->if_event_info.name, name, IFNAMSIZ - 1);
+			new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname,
+				event->mac, event->bssidx);
+			if (new_ndev == NULL)
+				goto fail;
+
+			wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = new_ndev;
+			wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = event->bssidx;
+			vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+			if (unlikely(!vwdev)) {
+				WL_ERR(("Could not allocate wireless device\n"));
+				goto fail;
+			}
+			vwdev->wiphy = cfg->wdev->wiphy;
+			WL_INFO(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname));
+			vwdev->iftype = type;
+			vwdev->netdev = new_ndev;
+			new_ndev->ieee80211_ptr = vwdev;
+			SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy));
+			wl_set_drv_status(cfg, READY, new_ndev);
+			cfg->p2p->vif_created = true;
+			wl_set_mode_by_netdev(cfg, new_ndev, mode);
+
+			if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
+				wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+				goto fail;
+			}
+			wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode);
+			val = 1;
+			/* Disable firmware roaming for P2P interface  */
+			wldev_iovar_setint(new_ndev, "roam_off", val);
+
+			if (mode != WL_MODE_AP)
+				wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1);
+
+			WL_ERR((" virtual interface(%s) is "
+				"created net attach done\n", cfg->p2p->vir_ifname));
+			if (mode == WL_MODE_AP)
+				wl_set_drv_status(cfg, CONNECTED, new_ndev);
+			if (type == NL80211_IFTYPE_P2P_CLIENT)
+				dhd_mode = DHD_FLAG_P2P_GC_MODE;
+			else if (type == NL80211_IFTYPE_P2P_GO)
+				dhd_mode = DHD_FLAG_P2P_GO_MODE;
+			DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
+			/* reinitialize completion to clear previous count */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+			INIT_COMPLETION(cfg->iface_disable);
+#else
+			init_completion(&cfg->iface_disable);
+#endif
+			return ndev_to_cfgdev(new_ndev);
+		} else {
+			wl_clr_p2p_status(cfg, IF_ADDING);
+			WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname));
+			memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+			cfg->p2p->vif_created = false;
+#ifdef PROP_TXSTATUS_VSDB
+			dhd_wlfc_get_enable(dhd, &enabled);
+		if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+			dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+			dhd_wlfc_deinit(dhd);
+			cfg->wlfc_on = false;
+		}
+#endif /* PROP_TXSTATUS_VSDB */
+		}
+	}
+
+fail:
+	if (wlif_type == WL_P2P_IF_GO)
+		wldev_iovar_setint(primary_ndev, "mpc", 1);
+	return ERR_PTR(-ENODEV);
+}
+
+static s32
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+	struct net_device *dev = NULL;
+	struct ether_addr p2p_mac;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 timeout = -1;
+	s32 ret = 0;
+	s32 index = -1;
+#ifdef CUSTOM_SET_CPUCORE
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
+	WL_DBG(("Enter\n"));
+
+#ifdef CUSTOM_SET_CPUCORE
+	dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE;
+	if (!(dhd->chan_isvht80))
+		dhd_set_cpucore(dhd, FALSE);
+#endif /* CUSTOM_SET_CPUCORE */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+		return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg);
+	}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &index) != BCME_OK) {
+		WL_ERR(("Find p2p index from ndev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (cfg->p2p_supported) {
+		memcpy(p2p_mac.octet, cfg->p2p->int_addr.octet, ETHER_ADDR_LEN);
+
+		/* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases
+		 */
+		WL_DBG(("P2P: GO_NEG_PHASE status cleared "));
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+		if (cfg->p2p->vif_created) {
+			if (wl_get_drv_status(cfg, SCANNING, dev)) {
+				wl_notify_escan_complete(cfg, dev, true, true);
+			}
+			wldev_iovar_setint(dev, "mpc", 1);
+			/* Delete pm_enable_work */
+			wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
+			/* for GC */
+			if (wl_get_drv_status(cfg, DISCONNECTING, dev) &&
+				(wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)) {
+				WL_ERR(("Wait for Link Down event for GC !\n"));
+				wait_for_completion_timeout
+					(&cfg->iface_disable, msecs_to_jiffies(500));
+			}
+
+			memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+			wl_set_p2p_status(cfg, IF_DELETING);
+			DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
+
+			/* for GO */
+			if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+				/* disable interface before bsscfg free */
+				ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac);
+				/* if fw doesn't support "ifdis",
+				   do not wait for link down of ap mode
+				 */
+				if (ret == 0) {
+					WL_ERR(("Wait for Link Down event for GO !!!\n"));
+					wait_for_completion_timeout(&cfg->iface_disable,
+						msecs_to_jiffies(500));
+				} else if (ret != BCME_UNSUPPORTED) {
+					msleep(300);
+				}
+			}
+			wl_cfgp2p_clear_management_ie(cfg, index);
+
+			if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)
+				wldev_iovar_setint(dev, "buf_key_b4_m4", 0);
+
+			/* delete interface after link down */
+			ret = wl_cfgp2p_ifdel(cfg, &p2p_mac);
+
+			if (ret != BCME_OK) {
+				struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+				WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n",
+					ret, ndev->name));
+				#if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+				net_os_send_hang_message(ndev);
+				#endif 
+			} else {
+				/* Wait for IF_DEL operation to be finished */
+				timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+					(wl_get_p2p_status(cfg, IF_DELETING) == false),
+					msecs_to_jiffies(MAX_WAIT_TIME));
+				if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+					cfg->if_event_info.valid) {
+
+					WL_DBG(("IFDEL operation done\n"));
+					wl_cfg80211_handle_ifdel(cfg, &cfg->if_event_info, dev);
+				} else {
+					WL_ERR(("IFDEL didn't complete properly\n"));
+				}
+			}
+
+			ret = dhd_del_monitor(dev);
+			if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+				DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL((dhd_pub_t *)(cfg->pub));
+			}
+		}
+	}
+	return ret;
+}
+
+static s32
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+	enum nl80211_iftype type, u32 *flags,
+	struct vif_params *params)
+{
+	s32 ap = 0;
+	s32 infra = 0;
+	s32 ibss = 0;
+	s32 wlif_type;
+	s32 mode = 0;
+	s32 err = BCME_OK;
+	chanspec_t chspec;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	WL_DBG(("Enter type %d\n", type));
+	switch (type) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+	case NL80211_IFTYPE_MESH_POINT:
+		ap = 1;
+		WL_ERR(("type (%d) : currently we do not support this type\n",
+			type));
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		ibss = 1;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		mode = WL_MODE_BSS;
+		infra = 1;
+		break;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_P2P_GO:
+		mode = WL_MODE_AP;
+		ap = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (!dhd)
+		return -EINVAL;
+	if (ap) {
+		wl_set_mode_by_netdev(cfg, ndev, mode);
+		if (cfg->p2p_supported && cfg->p2p->vif_created) {
+			WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", cfg->p2p->vif_created,
+			p2p_on(cfg)));
+			wldev_iovar_setint(ndev, "mpc", 0);
+			wl_notify_escan_complete(cfg, ndev, true, true);
+
+			/* In concurrency case, STA may be already associated in a particular
+			 * channel. so retrieve the current channel of primary interface and
+			 * then start the virtual interface on that.
+			 */
+			chspec = wl_cfg80211_get_shared_freq(wiphy);
+
+			wlif_type = WL_P2P_IF_GO;
+			WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d)\n",
+				ndev->name, ap, infra, type));
+			wl_set_p2p_status(cfg, IF_CHANGING);
+			wl_clr_p2p_status(cfg, IF_CHANGED);
+			wl_cfgp2p_ifchange(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+			wait_event_interruptible_timeout(cfg->netif_change_event,
+				(wl_get_p2p_status(cfg, IF_CHANGED) == true),
+				msecs_to_jiffies(MAX_WAIT_TIME));
+			wl_set_mode_by_netdev(cfg, ndev, mode);
+			dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE;
+			dhd->op_mode |= DHD_FLAG_P2P_GO_MODE;
+			wl_clr_p2p_status(cfg, IF_CHANGING);
+			wl_clr_p2p_status(cfg, IF_CHANGED);
+			if (mode == WL_MODE_AP)
+				wl_set_drv_status(cfg, CONNECTED, ndev);
+		} else if (ndev == bcmcfg_to_prmry_ndev(cfg) &&
+			!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+			wl_set_drv_status(cfg, AP_CREATING, ndev);
+			if (!cfg->ap_info &&
+				!(cfg->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
+				WL_ERR(("struct ap_saved_ie allocation failed\n"));
+				return -ENOMEM;
+			}
+		} else {
+			WL_ERR(("Cannot change the interface for GO or SOFTAP\n"));
+			return -EINVAL;
+		}
+	} else {
+		WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA"));
+	}
+
+	if (ibss) {
+		infra = 0;
+		wl_set_mode_by_netdev(cfg, ndev, mode);
+		err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET Adhoc error %d\n", err));
+			return -EINVAL;
+		}
+	}
+
+	ndev->ieee80211_ptr->iftype = type;
+	return 0;
+}
+
+s32
+wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
+	 * redirect the IF_ADD event to ifchange as it is not a real "new" interface
+	 */
+	if (wl_get_p2p_status(cfg, IF_CHANGING))
+		return wl_cfg80211_notify_ifchange(ifidx, name, mac, bssidx);
+
+	/* Okay, we are expecting IF_ADD (as IF_ADDING is true) */
+	if (wl_get_p2p_status(cfg, IF_ADDING)) {
+		wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+		if_event_info->valid = TRUE;
+		if_event_info->ifidx = ifidx;
+		if_event_info->bssidx = bssidx;
+		strncpy(if_event_info->name, name, IFNAMSIZ);
+		if_event_info->name[IFNAMSIZ] = '\0';
+		if (mac)
+			memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+
+		wl_clr_p2p_status(cfg, IF_ADDING);
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+	if (wl_get_p2p_status(cfg, IF_DELETING)) {
+		if_event_info->valid = TRUE;
+		if_event_info->ifidx = ifidx;
+		if_event_info->bssidx = bssidx;
+		wl_clr_p2p_status(cfg, IF_DELETING);
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (wl_get_p2p_status(cfg, IF_CHANGING)) {
+		wl_set_p2p_status(cfg, IF_CHANGED);
+		wake_up_interruptible(&cfg->netif_change_event);
+		return BCME_OK;
+	}
+
+	return BCME_ERROR;
+}
+
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+	struct net_device* ndev)
+{
+	s32 type = -1;
+	s32 bssidx = -1;
+#ifdef PROP_TXSTATUS_VSDB
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	bool enabled;
+#endif /* PROP_TXSTATUS_VSDB */
+
+	bssidx = if_event_info->bssidx;
+	if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION)) {
+		WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx));
+		return BCME_ERROR;
+	}
+
+	if (p2p_is_on(cfg) && cfg->p2p->vif_created) {
+
+		if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) {
+			/* Abort any pending scan requests */
+			cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+			WL_DBG(("ESCAN COMPLETED\n"));
+			wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, false);
+		}
+
+		memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+		if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK) {
+			WL_ERR(("Find p2p type from bssidx(%d) failed\n", bssidx));
+			return BCME_ERROR;
+		}
+		wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type));
+		wl_to_p2p_bss_ndev(cfg, type) = NULL;
+		wl_to_p2p_bss_bssidx(cfg, type) = WL_INVALID;
+		cfg->p2p->vif_created = false;
+
+#ifdef PROP_TXSTATUS_VSDB
+		dhd_wlfc_get_enable(dhd, &enabled);
+		if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+			dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+			dhd_wlfc_deinit(dhd);
+			cfg->wlfc_on = false;
+		}
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+
+	wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev);
+	return BCME_OK;
+}
+
+/* Find listen channel */
+static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
+	const u8 *ie, u32 ie_len)
+{
+	wifi_p2p_ie_t *p2p_ie;
+	u8 *end, *pos;
+	s32 listen_channel;
+
+	pos = (u8 *)ie;
+	p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
+
+	if (p2p_ie == NULL)
+		return 0;
+
+	pos = p2p_ie->subelts;
+	end = p2p_ie->subelts + (p2p_ie->len - 4);
+
+	CFGP2P_DBG((" found p2p ie ! lenth %d \n",
+		p2p_ie->len));
+
+	while (pos < end) {
+		uint16 attr_len;
+		if (pos + 2 >= end) {
+			CFGP2P_DBG((" -- Invalid P2P attribute"));
+			return 0;
+		}
+		attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
+
+		if (pos + 3 + attr_len > end) {
+			CFGP2P_DBG(("P2P: Attribute underflow "
+				   "(len=%u left=%d)",
+				   attr_len, (int) (end - pos - 3)));
+			return 0;
+		}
+
+		/* if Listen Channel att id is 6 and the vailue is valid,
+		 * return the listen channel
+		 */
+		if (pos[0] == 6) {
+			/* listen channel subel length format
+			 * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
+			 */
+			listen_channel = pos[1 + 2 + 3 + 1];
+
+			if (listen_channel == SOCIAL_CHAN_1 ||
+				listen_channel == SOCIAL_CHAN_2 ||
+				listen_channel == SOCIAL_CHAN_3) {
+				CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
+				return listen_channel;
+			}
+		}
+		pos += 3 + attr_len;
+	}
+	return 0;
+}
+
+static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request)
+{
+	u32 n_ssids;
+	u32 n_channels;
+	u16 channel;
+	chanspec_t chanspec;
+	s32 i = 0, j = 0, offset;
+	char *ptr;
+	wlc_ssid_t ssid;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+	memset(&params->ssid, 0, sizeof(wlc_ssid_t));
+
+	WL_SCAN(("Preparing Scan request\n"));
+	WL_SCAN(("nprobes=%d\n", params->nprobes));
+	WL_SCAN(("active_time=%d\n", params->active_time));
+	WL_SCAN(("passive_time=%d\n", params->passive_time));
+	WL_SCAN(("home_time=%d\n", params->home_time));
+	WL_SCAN(("scan_type=%d\n", params->scan_type));
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+
+	/* if request is null just exit so it will be all channel broadcast scan */
+	if (!request)
+		return;
+
+	n_ssids = request->n_ssids;
+	n_channels = request->n_channels;
+
+	/* Copy channel array if applicable */
+	WL_SCAN(("### List of channelspecs to scan ###\n"));
+	if (n_channels > 0) {
+		for (i = 0; i < n_channels; i++) {
+			chanspec = 0;
+			channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+			/* SKIP DFS channels for Secondary interface */
+			if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
+				(request->channels[i]->flags &
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+				(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN)))
+#else
+				(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)))
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
+				continue;
+
+			if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
+				chanspec |= WL_CHANSPEC_BAND_2G;
+			} else {
+				chanspec |= WL_CHANSPEC_BAND_5G;
+			}
+
+			chanspec |= WL_CHANSPEC_BW_20;
+			chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+			params->channel_list[j] = channel;
+			params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
+			params->channel_list[j] |= chanspec;
+			WL_SCAN(("Chan : %d, Channel spec: %x \n",
+				channel, params->channel_list[j]));
+			params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]);
+			j++;
+		}
+	} else {
+		WL_SCAN(("Scanning all channels\n"));
+	}
+	n_channels = j;
+	/* Copy ssid array if applicable */
+	WL_SCAN(("### List of SSIDs to scan ###\n"));
+	if (n_ssids > 0) {
+		offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		ptr = (char*)params + offset;
+		for (i = 0; i < n_ssids; i++) {
+			memset(&ssid, 0, sizeof(wlc_ssid_t));
+			ssid.SSID_len = request->ssids[i].ssid_len;
+			memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
+			if (!ssid.SSID_len)
+				WL_SCAN(("%d: Broadcast scan\n", i));
+			else
+				WL_SCAN(("%d: scan  for  %s size =%d\n", i,
+				ssid.SSID, ssid.SSID_len));
+			memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
+			ptr += sizeof(wlc_ssid_t);
+		}
+	} else {
+		WL_SCAN(("Broadcast scan\n"));
+	}
+	/* Adding mask to channel numbers */
+	params->channel_num =
+	        htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	               (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
+
+	if (n_channels == 1) {
+		params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+		params->nprobes = htod32(params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+	}
+}
+
+static s32
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
+{
+	wl_uint32_list_t *list;
+	s32 err = BCME_OK;
+	if (valid_chan_list == NULL || size <= 0)
+		return -ENOMEM;
+
+	memset(valid_chan_list, 0, size);
+	list = (wl_uint32_list_t *)(void *) valid_chan_list;
+	list->count = htod32(WL_NUMCHANNELS);
+	err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false);
+	if (err != 0) {
+		WL_ERR(("get channels failed with %d\n", err));
+	}
+
+	return err;
+}
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
+bool g_first_broadcast_scan = TRUE;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+static s32
+wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct cfg80211_scan_request *request, uint16 action)
+{
+	s32 err = BCME_OK;
+	u32 n_channels;
+	u32 n_ssids;
+	s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+	wl_escan_params_t *params = NULL;
+	u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+	u32 num_chans = 0;
+	s32 channel;
+	s32 n_valid_chan;
+	s32 search_state = WL_P2P_DISC_ST_SCAN;
+	u32 i, j, n_nodfs = 0;
+	u16 *default_chan_list = NULL;
+	wl_uint32_list_t *list;
+	struct net_device *dev = NULL;
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+	bool is_first_init_2g_scan = false;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+	p2p_scan_purpose_t	p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
+
+	WL_DBG(("Enter \n"));
+
+	/* scan request can come with empty request : perform all default scan */
+	if (!cfg) {
+		err = -EINVAL;
+		goto exit;
+	}
+	if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+		/* LEGACY SCAN TRIGGER */
+		WL_SCAN((" LEGACY E-SCAN START\n"));
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+		if (!request) {
+			err = -EINVAL;
+			goto exit;
+		}
+		if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
+#ifdef USE_INITIAL_2G_SCAN
+			struct ieee80211_channel tmp_channel_list[CH_MAX_2G_CHANNEL];
+			/* allow one 5G channel to add previous connected channel in 5G */
+			bool allow_one_5g_channel = TRUE;
+			j = 0;
+			for (i = 0; i < request->n_channels; i++) {
+				int tmp_chan = ieee80211_frequency_to_channel
+					(request->channels[i]->center_freq);
+				if (tmp_chan > CH_MAX_2G_CHANNEL) {
+					if (allow_one_5g_channel)
+						allow_one_5g_channel = FALSE;
+					else
+						continue;
+				}
+				if (j > CH_MAX_2G_CHANNEL) {
+					WL_ERR(("Index %d exceeds max 2.4GHz channels %d"
+						" and previous 5G connected channel\n",
+						j, CH_MAX_2G_CHANNEL));
+					break;
+				}
+				bcopy(request->channels[i], &tmp_channel_list[j],
+					sizeof(struct ieee80211_channel));
+				WL_SCAN(("channel of request->channels[%d]=%d\n", i, tmp_chan));
+				j++;
+			}
+			if ((j > 0) && (j <= CH_MAX_2G_CHANNEL)) {
+				for (i = 0; i < j; i++)
+					bcopy(&tmp_channel_list[i], request->channels[i],
+						sizeof(struct ieee80211_channel));
+
+				request->n_channels = j;
+				is_first_init_2g_scan = true;
+			}
+			else
+				WL_ERR(("Invalid number of 2.4GHz channels %d\n", j));
+
+			WL_SCAN(("request->n_channels=%d\n", request->n_channels));
+#else /* USE_INITIAL_SHORT_DWELL_TIME */
+			is_first_init_2g_scan = true;
+#endif /* USE_INITIAL_2G_SCAN */
+			g_first_broadcast_scan = false;
+		}
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+		/* if scan request is not empty parse scan request paramters */
+		if (request != NULL) {
+			n_channels = request->n_channels;
+			n_ssids = request->n_ssids;
+			if (n_channels % 2)
+				/* If n_channels is odd, add a padd of u16 */
+				params_size += sizeof(u16) * (n_channels + 1);
+			else
+				params_size += sizeof(u16) * n_channels;
+
+			/* Allocate space for populating ssids in wl_escan_params_t struct */
+			params_size += sizeof(struct wlc_ssid) * n_ssids;
+		}
+		params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+		if (params == NULL) {
+			err = -ENOMEM;
+			goto exit;
+		}
+		wl_scan_prep(&params->params, request);
+
+#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
+		/* Override active_time to reduce scan time if it's first bradcast scan. */
+		if (is_first_init_2g_scan)
+			params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
+
+		params->version = htod32(ESCAN_REQ_VERSION);
+		params->action =  htod16(action);
+		wl_escan_set_sync_id(params->sync_id, cfg);
+		wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
+		if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+			WL_ERR(("ioctl buffer length not sufficient\n"));
+			kfree(params);
+			err = -ENOMEM;
+			goto exit;
+		}
+		err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+			cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		if (unlikely(err)) {
+			if (err == BCME_EPERM)
+				/* Scan Not permitted at this point of time */
+				WL_DBG((" Escan not permitted at this time (%d)\n", err));
+			else
+				WL_ERR((" Escan set error (%d)\n", err));
+		}
+		kfree(params);
+	}
+	else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+		/* P2P SCAN TRIGGER */
+		s32 _freq = 0;
+		n_nodfs = 0;
+		if (request && request->n_channels) {
+			num_chans = request->n_channels;
+			WL_SCAN((" chann number : %d\n", num_chans));
+			default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list),
+				GFP_KERNEL);
+			if (default_chan_list == NULL) {
+				WL_ERR(("channel list allocation failed \n"));
+				err = -ENOMEM;
+				goto exit;
+			}
+			if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+				list = (wl_uint32_list_t *) chan_buf;
+				n_valid_chan = dtoh32(list->count);
+				for (i = 0; i < num_chans; i++)
+				{
+					_freq = request->channels[i]->center_freq;
+					channel = ieee80211_frequency_to_channel(_freq);
+
+					/* ignore DFS channels */
+					if (request->channels[i]->flags &
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+						(IEEE80211_CHAN_NO_IR
+						| IEEE80211_CHAN_RADAR))
+#else
+						(IEEE80211_CHAN_RADAR
+						| IEEE80211_CHAN_PASSIVE_SCAN))
+#endif
+						continue;
+
+					for (j = 0; j < n_valid_chan; j++) {
+						/* allows only supported channel on
+						*  current reguatory
+						*/
+						if (channel == (dtoh32(list->element[j])))
+							default_chan_list[n_nodfs++] =
+								channel;
+					}
+
+				}
+			}
+			if (num_chans == SOCIAL_CHAN_CNT && (
+						(default_chan_list[0] == SOCIAL_CHAN_1) &&
+						(default_chan_list[1] == SOCIAL_CHAN_2) &&
+						(default_chan_list[2] == SOCIAL_CHAN_3))) {
+				/* SOCIAL CHANNELS 1, 6, 11 */
+				search_state = WL_P2P_DISC_ST_SEARCH;
+				p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+				WL_INFO(("P2P SEARCH PHASE START \n"));
+			} else if ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
+				(wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) {
+				/* If you are already a GO, then do SEARCH only */
+				WL_INFO(("Already a GO. Do SEARCH Only"));
+				search_state = WL_P2P_DISC_ST_SEARCH;
+				num_chans = n_nodfs;
+				p2p_scan_purpose = P2P_SCAN_NORMAL;
+
+			} else if (num_chans == 1) {
+				p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
+			} else if (num_chans == SOCIAL_CHAN_CNT + 1) {
+			/* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
+			 * the supplicant
+			 */
+				p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+			} else {
+				WL_INFO(("P2P SCAN STATE START \n"));
+				num_chans = n_nodfs;
+				p2p_scan_purpose = P2P_SCAN_NORMAL;
+			}
+		} else {
+			err = -EINVAL;
+			goto exit;
+		}
+		err = wl_cfgp2p_escan(cfg, ndev, cfg->active_scan, num_chans, default_chan_list,
+			search_state, action,
+			wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
+			p2p_scan_purpose);
+
+		if (!err)
+			cfg->p2p->search_state = search_state;
+
+		kfree(default_chan_list);
+	}
+exit:
+	if (unlikely(err)) {
+		/* Don't print Error incase of Scan suppress */
+		if ((err == BCME_EPERM) && cfg->scan_suppressed)
+			WL_DBG(("Escan failed: Scan Suppressed \n"));
+		else
+			WL_ERR(("error (%d)\n", err));
+	}
+	return err;
+}
+
+
+static s32
+wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request)
+{
+	s32 err = BCME_OK;
+	s32 passive_scan;
+	wl_scan_results_t *results;
+	WL_SCAN(("Enter \n"));
+	mutex_lock(&cfg->usr_sync);
+
+	results = wl_escan_get_buf(cfg, FALSE);
+	results->version = 0;
+	results->count = 0;
+	results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+	cfg->escan_info.ndev = ndev;
+	cfg->escan_info.wiphy = wiphy;
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+	passive_scan = cfg->active_scan ? 0 : 1;
+	err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+		&passive_scan, sizeof(passive_scan), true);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		goto exit;
+	}
+
+	err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
+exit:
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request,
+	struct cfg80211_ssid *this_ssid)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_ssid *ssids;
+	struct ether_addr primary_mac;
+	bool p2p_ssid;
+#ifdef WL11U
+	bcm_tlv_t *interworking_ie;
+#endif
+	s32 err = 0;
+	s32 bssidx = -1;
+	s32 i;
+
+	unsigned long flags;
+	static s32 busy_count = 0;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	struct net_device *remain_on_channel_ndev = NULL;
+#endif
+
+	dhd_pub_t *dhd;
+
+	dhd = (dhd_pub_t *)(cfg->pub);
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		WL_ERR(("Invalid Scan Command at SoftAP mode\n"));
+		return -EINVAL;
+	}
+
+	ndev = ndev_to_wlc_ndev(ndev, cfg);
+
+	if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+		WL_ERR(("Sending Action Frames. Try it again.\n"));
+		return -EAGAIN;
+	}
+
+	WL_DBG(("Enter wiphy (%p)\n", wiphy));
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		if (cfg->scan_request == NULL) {
+			wl_clr_drv_status_all(cfg, SCANNING);
+			WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
+		} else {
+			WL_ERR(("Scanning already\n"));
+			return -EAGAIN;
+		}
+	}
+	if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
+		WL_ERR(("Scanning being aborted\n"));
+		return -EAGAIN;
+	}
+	if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+		WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+		return -EOPNOTSUPP;
+	}
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
+	if (remain_on_channel_ndev) {
+		WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
+		wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
+	}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+
+	/* Arm scan timeout timer */
+	mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS));
+	if (request) {		/* scan bss */
+		ssids = request->ssids;
+		p2p_ssid = false;
+		for (i = 0; i < request->n_ssids; i++) {
+			if (ssids[i].ssid_len &&
+				IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
+				p2p_ssid = true;
+				break;
+			}
+		}
+		if (p2p_ssid) {
+			if (cfg->p2p_supported) {
+				/* p2p scan trigger */
+				if (p2p_on(cfg) == false) {
+					/* p2p on at the first time */
+					p2p_on(cfg) = true;
+					wl_cfgp2p_set_firm_p2p(cfg);
+					get_primary_mac(cfg, &primary_mac);
+					wl_cfgp2p_generate_bss_mac(&primary_mac,
+						&cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+				}
+				wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+				WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+				p2p_scan(cfg) = true;
+			}
+		} else {
+			/* legacy scan trigger
+			 * So, we have to disable p2p discovery if p2p discovery is on
+			 */
+			if (cfg->p2p_supported) {
+				p2p_scan(cfg) = false;
+				/* If Netdevice is not equals to primary and p2p is on
+				*  , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+				*/
+
+				if (p2p_scan(cfg) == false) {
+					if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+						err = wl_cfgp2p_discover_enable_search(cfg,
+						false);
+						if (unlikely(err)) {
+							goto scan_out;
+						}
+
+					}
+				}
+			}
+			if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+
+				if (wl_cfgp2p_find_idx(cfg, ndev, &bssidx) != BCME_OK) {
+					WL_ERR(("Find p2p index from ndev(%p) failed\n",
+						ndev));
+					err = BCME_ERROR;
+					goto scan_out;
+				}
+#ifdef WL11U
+				if ((interworking_ie = wl_cfg80211_find_interworking_ie(
+					(u8 *)request->ie, request->ie_len)) != NULL) {
+					err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+					       VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+					       interworking_ie->data, interworking_ie->len);
+
+					if (unlikely(err)) {
+						goto scan_out;
+					}
+				} else if (cfg->iw_ie_len != 0) {
+				/* we have to clear IW IE and disable gratuitous APR */
+					wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+						VNDR_IE_CUSTOM_FLAG,
+						DOT11_MNG_INTERWORKING_ID,
+						0, 0);
+
+					wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0,
+						bssidx);
+					cfg->iw_ie_len = 0;
+					memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN);
+
+					cfg->wl11u = FALSE;
+					/* we don't care about error */
+				}
+#endif /* WL11U */
+				err = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx,
+					VNDR_IE_PRBREQ_FLAG, (u8 *)request->ie,
+					request->ie_len);
+
+				if (unlikely(err)) {
+					goto scan_out;
+				}
+
+			}
+		}
+	} else {		/* scan in ibss */
+		ssids = this_ssid;
+	}
+
+	if (request && !p2p_scan(cfg)) {
+		WL_TRACE_HW4(("START SCAN\n"));
+	}
+
+	cfg->scan_request = request;
+	wl_set_drv_status(cfg, SCANNING, ndev);
+
+	if (cfg->p2p_supported) {
+		if (p2p_on(cfg) && p2p_scan(cfg)) {
+
+			/* find my listen channel */
+			cfg->afx_hdl->my_listen_chan =
+				wl_find_listen_channel(cfg, request->ie,
+				request->ie_len);
+			err = wl_cfgp2p_enable_discovery(cfg, ndev,
+			request->ie, request->ie_len);
+
+			if (unlikely(err)) {
+				goto scan_out;
+			}
+		}
+	}
+	err = wl_do_escan(cfg, wiphy, ndev, request);
+	if (likely(!err))
+		goto scan_success;
+	else
+		goto scan_out;
+
+scan_success:
+	busy_count = 0;
+
+	return 0;
+
+scan_out:
+	if (err == BCME_BUSY || err == BCME_NOTREADY) {
+		WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
+		err = -EBUSY;
+	}
+
+#define SCAN_EBUSY_RETRY_LIMIT 10
+	if (err == -EBUSY) {
+		if (busy_count++ > SCAN_EBUSY_RETRY_LIMIT) {
+			struct ether_addr bssid;
+			s32 ret = 0;
+			busy_count = 0;
+			WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
+				wl_get_drv_status(cfg, SCANNING, ndev),
+				wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
+				wl_get_drv_status(cfg, CONNECTING, ndev),
+				wl_get_drv_status(cfg, CONNECTED, ndev),
+				wl_get_drv_status(cfg, DISCONNECTING, ndev),
+				wl_get_drv_status(cfg, AP_CREATING, ndev),
+				wl_get_drv_status(cfg, AP_CREATED, ndev),
+				wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
+				wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
+
+			bzero(&bssid, sizeof(bssid));
+			if ((ret = wldev_ioctl(ndev, WLC_GET_BSSID,
+				&bssid, ETHER_ADDR_LEN, false)) == 0)
+				WL_ERR(("FW is connected with " MACDBG "/n",
+					MAC2STRDBG(bssid.octet)));
+			else
+				WL_ERR(("GET BSSID failed with %d\n", ret));
+
+			wl_cfg80211_scan_abort(cfg);
+
+		}
+	} else {
+		busy_count = 0;
+	}
+
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	cfg->scan_request = NULL;
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
+#else
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+	struct cfg80211_scan_request *request)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	WL_DBG(("Enter \n"));
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+	if (unlikely(err)) {
+		if ((err == BCME_EPERM) && cfg->scan_suppressed)
+			WL_DBG(("scan not permitted at this time (%d)\n", err));
+		else
+			WL_ERR(("scan error (%d)\n", err));
+		return err;
+	}
+
+	return err;
+}
+
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+{
+	s32 err = 0;
+
+	err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+{
+	s32 err = 0;
+
+	err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+	s32 err = 0;
+	u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+
+	retry = htod32(retry);
+	err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), true);
+	if (unlikely(err)) {
+		WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+		return err;
+	}
+	return err;
+}
+
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_DBG(("Enter\n"));
+	if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+		(cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+		cfg->conf->rts_threshold = wiphy->rts_threshold;
+		err = wl_set_rts(ndev, cfg->conf->rts_threshold);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+		(cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+		cfg->conf->frag_threshold = wiphy->frag_threshold;
+		err = wl_set_frag(ndev, cfg->conf->frag_threshold);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_LONG &&
+		(cfg->conf->retry_long != wiphy->retry_long)) {
+		cfg->conf->retry_long = wiphy->retry_long;
+		err = wl_set_retry(ndev, cfg->conf->retry_long, true);
+		if (!err)
+			return err;
+	}
+	if (changed & WIPHY_PARAM_RETRY_SHORT &&
+		(cfg->conf->retry_short != wiphy->retry_short)) {
+		cfg->conf->retry_short = wiphy->retry_short;
+		err = wl_set_retry(ndev, cfg->conf->retry_short, false);
+		if (!err) {
+			return err;
+		}
+	}
+
+	return err;
+}
+static chanspec_t
+channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	u8 *buf = NULL;
+	wl_uint32_list_t *list;
+	int err = BCME_OK;
+	chanspec_t c = 0, ret_c = 0;
+	int bw = 0, tmp_bw = 0;
+	int i;
+	u32 tmp_c, sb;
+	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#define LOCAL_BUF_SIZE	1024
+	buf = (u8 *) kzalloc(LOCAL_BUF_SIZE, kflags);
+	if (!buf) {
+		WL_ERR(("buf memory alloc failed\n"));
+		goto exit;
+	}
+	list = (wl_uint32_list_t *)(void *)buf;
+	list->count = htod32(WL_NUMCHANSPECS);
+	err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+		0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
+	if (err != BCME_OK) {
+		WL_ERR(("get chanspecs failed with %d\n", err));
+		goto exit;
+	}
+	for (i = 0; i < dtoh32(list->count); i++) {
+		c = dtoh32(list->element[i]);
+		c = wl_chspec_driver_to_host(c);
+		if (channel <= CH_MAX_2G_CHANNEL) {
+			if (!CHSPEC_IS20(c))
+				continue;
+			if (channel == CHSPEC_CHANNEL(c)) {
+				ret_c = c;
+				bw = 20;
+				goto exit;
+			}
+		}
+		if (CHSPEC_IS20(c)) {
+			tmp_c = CHSPEC_CHANNEL(c);
+			tmp_bw = WLC_BW_CAP_20MHZ;
+		}
+		else if (CHSPEC_IS40(c)) {
+			tmp_c = CHSPEC_CHANNEL(c);
+			if (CHSPEC_SB_UPPER(c)) {
+				tmp_c += CH_10MHZ_APART;
+			} else {
+				tmp_c -= CH_10MHZ_APART;
+			}
+			tmp_bw = WLC_BW_CAP_40MHZ;
+		}
+		else {
+			tmp_c = CHSPEC_CHANNEL(c);
+			sb = c & WL_CHANSPEC_CTL_SB_MASK;
+			if (sb == WL_CHANSPEC_CTL_SB_LL) {
+				tmp_c -= (CH_10MHZ_APART + CH_20MHZ_APART);
+			} else if (sb == WL_CHANSPEC_CTL_SB_LU) {
+				tmp_c -= CH_10MHZ_APART;
+			} else if (sb == WL_CHANSPEC_CTL_SB_UL) {
+				tmp_c += CH_10MHZ_APART;
+			} else {
+				/* WL_CHANSPEC_CTL_SB_UU */
+				tmp_c += (CH_10MHZ_APART + CH_20MHZ_APART);
+			}
+			tmp_bw = WLC_BW_CAP_80MHZ;
+		}
+		if (tmp_c != channel)
+			continue;
+
+		if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
+			bw = tmp_bw;
+			ret_c = c;
+			if (bw == bw_cap)
+				goto exit;
+		}
+	}
+exit:
+	if (buf)
+		kfree(buf);
+#undef LOCAL_BUF_SIZE
+	if (ret_c)
+		ret_c = wl_chspec_host_to_driver(ret_c);
+	WL_INFO(("return chanspec %x %d\n", ret_c, bw));
+	return ret_c;
+}
+
+void
+wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (cfg != NULL && ibss_vsie != NULL) {
+		if (cfg->ibss_vsie != NULL) {
+			kfree(cfg->ibss_vsie);
+		}
+		cfg->ibss_vsie = ibss_vsie;
+		cfg->ibss_vsie_len = ibss_vsie_len;
+	}
+}
+
+static void
+wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
+{
+	/* free & initiralize VSIE (Vendor Specific IE) */
+	if (cfg->ibss_vsie != NULL) {
+		kfree(cfg->ibss_vsie);
+		cfg->ibss_vsie = NULL;
+		cfg->ibss_vsie_len = 0;
+	}
+}
+
+s32
+wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	char *ioctl_buf = NULL;
+	s32 ret = BCME_OK;
+
+	if (cfg != NULL && cfg->ibss_vsie != NULL) {
+		ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+		if (!ioctl_buf) {
+			WL_ERR(("ioctl memory alloc failed\n"));
+			return -ENOMEM;
+		}
+
+		/* change the command from "add" to "del" */
+		strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1);
+		cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+		ret = wldev_iovar_setbuf(dev, "ie",
+			cfg->ibss_vsie, cfg->ibss_vsie_len,
+			ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+		WL_ERR(("ret=%d\n", ret));
+
+		if (ret == BCME_OK) {
+			/* free & initiralize VSIE */
+			kfree(cfg->ibss_vsie);
+			cfg->ibss_vsie = NULL;
+			cfg->ibss_vsie_len = 0;
+		}
+
+		if (ioctl_buf) {
+			kfree(ioctl_buf);
+		}
+	}
+
+	return ret;
+}
+
+static s32
+wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_ibss_params *params)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_bss *bss;
+	struct ieee80211_channel *chan;
+	struct wl_join_params join_params;
+	int scan_suppress;
+	struct cfg80211_ssid ssid;
+	s32 scan_retry = 0;
+	s32 err = 0;
+	size_t join_params_size;
+	chanspec_t chanspec = 0;
+	struct {
+		u32 band;
+		u32 bw_cap;
+	} param = {0, 0};
+	u32 bw_cap = 0;
+
+	WL_TRACE(("In\n"));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_INFO(("JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
+	if (!params->ssid || params->ssid_len <= 0) {
+		WL_ERR(("Invalid parameter\n"));
+		return -EINVAL;
+	}
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	chan = params->chandef.chan;
+#else
+	chan = params->channel;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	if (chan)
+		cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+	if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+		struct wlc_ssid *ssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
+		u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID);
+		u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN);
+		if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) &&
+			(memcmp(params->ssid, ssid->SSID, ssid->SSID_len) == 0) &&
+			(*channel == cfg->channel))) {
+			WL_ERR(("Connection already existed to " MACDBG "\n",
+				MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID))));
+			return -EISCONN;
+		}
+		WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n",
+			ssid->SSID, MAC2STRDBG(bssid)));
+	}
+
+	/* remove the VSIE */
+	wl_cfg80211_ibss_vsie_delete(dev);
+
+	bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
+	if (!bss) {
+		if (IBSS_INITIAL_SCAN_ALLOWED == TRUE) {
+			memcpy(ssid.ssid, params->ssid, params->ssid_len);
+			ssid.ssid_len = params->ssid_len;
+			do {
+				if (unlikely
+					(__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) ==
+					 -EBUSY)) {
+					wl_delay(150);
+				} else {
+					break;
+				}
+			} while (++scan_retry < WL_SCAN_RETRY_MAX);
+
+			/* rtnl lock code is removed here. don't see why rtnl lock
+			 * needs to be released.
+			 */
+
+			/* wait 4 secons till scan done.... */
+			schedule_timeout_interruptible(msecs_to_jiffies(4000));
+
+			bss = cfg80211_get_ibss(wiphy, NULL,
+				params->ssid, params->ssid_len);
+		}
+	}
+	if (bss && ((IBSS_COALESCE_ALLOWED == TRUE) ||
+		((IBSS_COALESCE_ALLOWED == FALSE) && params->bssid &&
+		!memcmp(bss->bssid, params->bssid, ETHER_ADDR_LEN)))) {
+		cfg->ibss_starter = false;
+		WL_DBG(("Found IBSS\n"));
+	} else {
+		cfg->ibss_starter = true;
+	}
+	if (chan) {
+		if (chan->band == IEEE80211_BAND_5GHZ) {
+			param.band = WLC_BAND_5G;
+			err = wldev_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+			                         cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+			if (err) {
+				if (err != BCME_UNSUPPORTED) {
+					WL_ERR(("bw_cap failed, %d\n", err));
+					return err;
+				} else {
+					err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+					if (err) {
+						WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+					}
+					if (bw_cap != WLC_N_BW_20ALL)
+						bw_cap = WL_CHANSPEC_BW_40;
+				}
+			}
+			else {
+				if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0]))
+					bw_cap = WL_CHANSPEC_BW_80;
+				else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0]))
+					bw_cap = WL_CHANSPEC_BW_40;
+				else
+					bw_cap = WL_CHANSPEC_BW_20;
+			}
+		}
+		else if (chan->band == IEEE80211_BAND_2GHZ)
+			bw_cap = WL_CHANSPEC_BW_20;
+
+		chanspec = channel_to_chanspec(wiphy, dev, cfg->channel, bw_cap);
+	}
+	/*
+	 * Join with specific BSSID and cached SSID
+	 * If SSID is zero join based on BSSID only
+	 */
+	memset(&join_params, 0, sizeof(join_params));
+	memcpy((void *)join_params.ssid.SSID, (void *)params->ssid,
+		params->ssid_len);
+	join_params.ssid.SSID_len = htod32(params->ssid_len);
+	if (params->bssid) {
+		memcpy(&join_params.params.bssid, params->bssid, ETHER_ADDR_LEN);
+		err = wldev_ioctl(dev, WLC_SET_DESIRED_BSSID, &join_params.params.bssid,
+			ETHER_ADDR_LEN, true);
+		if (unlikely(err)) {
+			WL_ERR(("Error (%d)\n", err));
+			return err;
+		}
+	} else
+		memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+	wldev_iovar_setint(dev, "ibss_coalesce_allowed", IBSS_COALESCE_ALLOWED);
+
+	if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+		scan_suppress = TRUE;
+		/* Set the SCAN SUPPRESS Flag in the firmware to skip join scan */
+		err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
+			&scan_suppress, sizeof(int), true);
+		if (unlikely(err)) {
+			WL_ERR(("Scan Suppress Setting Failed (%d)\n", err));
+			return err;
+		}
+	}
+
+	if (chan) {
+		join_params.params.chanspec_list[0] = chanspec;
+		join_params.params.chanspec_num = 1;
+		wldev_iovar_setint(dev, "chanspec", chanspec);
+	}
+	join_params_size = sizeof(join_params);
+
+	/* Disable Authentication, IBSS will add key if it required */
+	wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+	wldev_iovar_setint(dev, "wsec", 0);
+
+
+	err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+		join_params_size, true);
+	if (unlikely(err)) {
+		WL_ERR(("Error (%d)\n", err));
+		return err;
+	}
+
+	if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
+		scan_suppress = FALSE;
+		/* Reset the SCAN SUPPRESS Flag */
+		err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
+			&scan_suppress, sizeof(int), true);
+		if (unlikely(err)) {
+			WL_ERR(("Reset Scan Suppress Flag Failed (%d)\n", err));
+			return err;
+		}
+	}
+	wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+	wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN);
+	return err;
+}
+
+static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	scb_val_t scbval;
+	u8 *curbssid;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	wl_link_down(cfg);
+
+	WL_ERR(("Leave IBSS\n"));
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+	wl_set_drv_status(cfg, DISCONNECTING, dev);
+	scbval.val = 0;
+	memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+	err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+		sizeof(scb_val_t), true);
+	if (unlikely(err)) {
+		wl_clr_drv_status(cfg, DISCONNECTING, dev);
+		WL_ERR(("error(%d)\n", err));
+		return err;
+	}
+
+	/* remove the VSIE */
+	wl_cfg80211_ibss_vsie_delete(dev);
+
+	return err;
+}
+
+#ifdef MFP
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa)
+{
+	u16 suite_count;
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	u16 len;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+
+	if (!wpa2ie)
+		return -1;
+
+	len = wpa2ie->len;
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	if ((len -= WPA_SUITE_LEN) <= 0)
+		return BCME_BADLEN;
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+		(len -= (WPA_IE_SUITE_COUNT_LEN +
+		(WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = ltoh16_ua(&mgmt->count);
+
+	if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+		(len -= (WPA_IE_SUITE_COUNT_LEN +
+		(WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		capa[0] = *(u8 *)&mgmt->list[suite_count];
+		capa[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+	} else
+		return BCME_BADLEN;
+
+	return 0;
+}
+#endif /* MFP */
+
+static s32
+wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+		val = WPA_AUTH_PSK |
+			WPA_AUTH_UNSPECIFIED;
+	else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+		val = WPA2_AUTH_PSK|
+			WPA2_AUTH_UNSPECIFIED;
+	else
+		val = WPA_AUTH_DISABLED;
+
+	if (is_wps_conn(sme))
+		val = WPA_AUTH_DISABLED;
+
+#ifdef BCMWAPI_WPI
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+		WL_DBG((" * wl_set_wpa_version, set wpa_auth"
+			" to WPA_AUTH_WAPI 0x400"));
+		val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED;
+	}
+#endif
+	WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wpa_auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->wpa_versions = sme->crypto.wpa_versions;
+	return err;
+}
+
+#ifdef BCMWAPI_WPI
+static s32
+wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	WL_DBG((" %s \n", __FUNCTION__));
+
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+		err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", sme->ie, sme->ie_len,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+		if (unlikely(err)) {
+			WL_ERR(("===> set_wapi_ie Error (%d)\n", err));
+			return err;
+		}
+	} else
+		WL_DBG((" * skip \n"));
+	return err;
+}
+#endif /* BCMWAPI_WPI */
+
+static s32
+wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	switch (sme->auth_type) {
+	case NL80211_AUTHTYPE_OPEN_SYSTEM:
+		val = WL_AUTH_OPEN_SYSTEM;
+		WL_DBG(("open system\n"));
+		break;
+	case NL80211_AUTHTYPE_SHARED_KEY:
+		val = WL_AUTH_SHARED_KEY;
+		WL_DBG(("shared key\n"));
+		break;
+	case NL80211_AUTHTYPE_AUTOMATIC:
+		val = WL_AUTH_OPEN_SHARED;
+		WL_DBG(("automatic\n"));
+		break;
+	default:
+		val = 2;
+		WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+		break;
+	}
+
+	err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set auth failed (%d)\n", err));
+		return err;
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->auth_type = sme->auth_type;
+	return err;
+}
+
+static s32
+wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 pval = 0;
+	s32 gval = 0;
+	s32 err = 0;
+	s32 wsec_val = 0;
+#ifdef MFP
+	s32 mfp = 0;
+	bcm_tlv_t *wpa2_ie;
+	u8 rsn_cap[2];
+#endif /* MFP */
+
+#ifdef BCMWAPI_WPI
+	s32 val = 0;
+#endif
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.n_ciphers_pairwise) {
+		switch (sme->crypto.ciphers_pairwise[0]) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			pval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			pval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			val = SMS4_ENABLED;
+			pval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("invalid cipher pairwise (%d)\n",
+				sme->crypto.ciphers_pairwise[0]));
+			return -EINVAL;
+		}
+	}
+	if (sme->crypto.cipher_group) {
+		switch (sme->crypto.cipher_group) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+			gval = WEP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			gval = AES_ENABLED;
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			gval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			val = SMS4_ENABLED;
+			gval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("invalid cipher group (%d)\n",
+				sme->crypto.cipher_group));
+			return -EINVAL;
+		}
+	}
+
+	WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+
+	if (is_wps_conn(sme)) {
+		if (sme->privacy)
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx);
+		else
+			/* WPS-2.0 allows no security */
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+	} else {
+#ifdef BCMWAPI_WPI
+		if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_SMS4) {
+			WL_DBG((" NO, is_wps_conn, WAPI set to SMS4_ENABLED"));
+			err = wldev_iovar_setint_bsscfg(dev, "wsec", val, bssidx);
+		} else {
+#endif
+			WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC"));
+			wsec_val = pval | gval;
+
+#ifdef MFP
+			if (pval == AES_ENABLED) {
+				if (((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+					DOT11_MNG_RSN_ID)) != NULL) &&
+					(wl_cfg80211_get_rsn_capa(wpa2_ie, rsn_cap) == 0)) {
+
+					if (rsn_cap[0] & RSN_CAP_MFPC) {
+						/* MFP Capability advertised by supplicant. Check
+						 * whether MFP is supported in the firmware
+						 */
+						if ((err = wldev_iovar_getint_bsscfg(dev,
+								"mfp", &mfp, bssidx)) < 0) {
+							WL_ERR(("Get MFP failed! "
+								"Check MFP support in FW \n"));
+							return -1;
+						}
+
+						if ((sme->crypto.n_akm_suites == 1) &&
+							((sme->crypto.akm_suites[0] ==
+							WL_AKM_SUITE_MFP_PSK) ||
+							(sme->crypto.akm_suites[0] ==
+							WL_AKM_SUITE_MFP_1X))) {
+							wsec_val |= MFP_SHA256;
+						} else if (sme->crypto.n_akm_suites > 1) {
+							WL_ERR(("Multiple AKM Specified \n"));
+							return -EINVAL;
+						}
+
+						wsec_val |= MFP_CAPABLE;
+						if (rsn_cap[0] & RSN_CAP_MFPR)
+							wsec_val |= MFP_REQUIRED;
+					}
+				}
+			}
+#endif /* MFP */
+			WL_DBG((" Set WSEC to fW 0x%x \n", wsec_val));
+			err = wldev_iovar_setint_bsscfg(dev, "wsec",
+				wsec_val, bssidx);
+#ifdef BCMWAPI_WPI
+		}
+#endif
+	}
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
+	sec->cipher_group = sme->crypto.cipher_group;
+
+	return err;
+}
+
+static s32
+wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	s32 val = 0;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (sme->crypto.n_akm_suites) {
+		err = wldev_iovar_getint(dev, "wpa_auth", &val);
+		if (unlikely(err)) {
+			WL_ERR(("could not get wpa_auth (%d)\n", err));
+			return err;
+		}
+		if (val & (WPA_AUTH_PSK |
+			WPA_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		} else if (val & (WPA2_AUTH_PSK |
+			WPA2_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_8021X:
+				val = WPA2_AUTH_UNSPECIFIED;
+				break;
+#ifdef MFP
+			case WL_AKM_SUITE_MFP_1X:
+				val = WPA2_AUTH_UNSPECIFIED;
+				break;
+			case WL_AKM_SUITE_MFP_PSK:
+				val = WPA2_AUTH_PSK;
+				break;
+#endif
+			case WLAN_AKM_SUITE_PSK:
+				val = WPA2_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		}
+#ifdef BCMWAPI_WPI
+		else if (val & (WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED)) {
+			switch (sme->crypto.akm_suites[0]) {
+			case WLAN_AKM_SUITE_WAPI_CERT:
+				val = WAPI_AUTH_UNSPECIFIED;
+				break;
+			case WLAN_AKM_SUITE_WAPI_PSK:
+				val = WAPI_AUTH_PSK;
+				break;
+			default:
+				WL_ERR(("invalid cipher group (%d)\n",
+					sme->crypto.cipher_group));
+				return -EINVAL;
+			}
+		}
+#endif
+		WL_DBG(("setting wpa_auth to %d\n", val));
+
+		err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
+		if (unlikely(err)) {
+			WL_ERR(("could not set wpa_auth (%d)\n", err));
+			return err;
+		}
+	}
+	sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+	sec->wpa_auth = sme->crypto.akm_suites[0];
+
+	return err;
+}
+
+static s32
+wl_set_set_sharedkey(struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_security *sec;
+	struct wl_wsec_key key;
+	s32 val;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	WL_DBG(("key len (%d)\n", sme->key_len));
+	if (sme->key_len) {
+		sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+		WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+			sec->wpa_versions, sec->cipher_pairwise));
+		if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
+#ifdef BCMWAPI_WPI
+			NL80211_WPA_VERSION_2 | NL80211_WAPI_VERSION_1)) &&
+#else
+			NL80211_WPA_VERSION_2)) &&
+#endif
+			(sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
+#ifdef BCMWAPI_WPI
+		WLAN_CIPHER_SUITE_WEP104 | WLAN_CIPHER_SUITE_SMS4)))
+#else
+		WLAN_CIPHER_SUITE_WEP104)))
+#endif
+		{
+			memset(&key, 0, sizeof(key));
+			key.len = (u32) sme->key_len;
+			key.index = (u32) sme->key_idx;
+			if (unlikely(key.len > sizeof(key.data))) {
+				WL_ERR(("Too long key length (%u)\n", key.len));
+				return -EINVAL;
+			}
+			memcpy(key.data, sme->key, key.len);
+			key.flags = WL_PRIMARY_KEY;
+			switch (sec->cipher_pairwise) {
+			case WLAN_CIPHER_SUITE_WEP40:
+				key.algo = CRYPTO_ALGO_WEP1;
+				break;
+			case WLAN_CIPHER_SUITE_WEP104:
+				key.algo = CRYPTO_ALGO_WEP128;
+				break;
+#ifdef BCMWAPI_WPI
+			case WLAN_CIPHER_SUITE_SMS4:
+				key.algo = CRYPTO_ALGO_SMS4;
+				break;
+#endif
+			default:
+				WL_ERR(("Invalid algorithm (%d)\n",
+					sme->crypto.ciphers_pairwise[0]));
+				return -EINVAL;
+			}
+			/* Set the new key/index */
+			WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
+				key.len, key.index, key.algo));
+			WL_DBG(("key \"%s\"\n", key.data));
+			swap_key_from_BE(&key);
+			err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+			if (unlikely(err)) {
+				WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+				return err;
+			}
+			if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+				WL_DBG(("set auth_type to shared key\n"));
+				val = WL_AUTH_SHARED_KEY;	/* shared key */
+				err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
+				if (unlikely(err)) {
+					WL_ERR(("set auth failed (%d)\n", err));
+					return err;
+				}
+			}
+		}
+	}
+	return err;
+}
+
+#if defined(ESCAN_RESULT_PATCH)
+static u8 connect_req_bssid[6];
+static u8 broad_bssid[6];
+#endif /* ESCAN_RESULT_PATCH */
+
+
+
+#if defined(CUSTOM_SET_CPUCORE) || defined(CONFIG_TCPACK_FASTTX)
+static bool wl_get_chan_isvht80(struct net_device *net, dhd_pub_t *dhd)
+{
+	u32 chanspec = 0;
+	bool isvht80 = 0;
+
+	if (wldev_iovar_getint(net, "chanspec", (s32 *)&chanspec) == BCME_OK)
+		chanspec = wl_chspec_driver_to_host(chanspec);
+
+	isvht80 = chanspec & WL_CHANSPEC_BW_80;
+	WL_INFO(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80));
+
+	return isvht80;
+}
+#endif /* CUSTOM_SET_CPUCORE || CONFIG_TCPACK_FASTTX */
+
+static s32
+wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_connect_params *sme)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct ieee80211_channel *chan = sme->channel;
+	wl_extjoin_params_t *ext_join_params;
+	struct wl_join_params join_params;
+	size_t join_params_size;
+	s32 err = 0;
+	wpa_ie_fixed_t *wpa_ie;
+	bcm_tlv_t *wpa2_ie;
+	u8* wpaie  = 0;
+	u32 wpaie_len = 0;
+	u32 chan_cnt = 0;
+	struct ether_addr bssid;
+	s32 bssidx;
+	int ret;
+	int wait_cnt;
+
+	WL_DBG(("In\n"));
+
+	if (unlikely(!sme->ssid)) {
+		WL_ERR(("Invalid ssid\n"));
+		return -EOPNOTSUPP;
+	}
+
+	if (unlikely(sme->ssid_len > DOT11_MAX_SSID_LEN)) {
+		WL_ERR(("Invalid SSID info: SSID=%s, length=%zd\n",
+			sme->ssid, sme->ssid_len));
+		return -EINVAL;
+	}
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	/*
+	 * Cancel ongoing scan to sync up with sme state machine of cfg80211.
+	 */
+#if !defined(ESCAN_RESULT_PATCH)
+	if (cfg->scan_request) {
+		wl_notify_escan_complete(cfg, dev, true, true);
+	}
+#endif
+#ifdef WL_SCHED_SCAN
+	if (cfg->sched_scan_req) {
+		wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg));
+	}
+#endif
+#if defined(ESCAN_RESULT_PATCH)
+	if (sme->bssid)
+		memcpy(connect_req_bssid, sme->bssid, ETHER_ADDR_LEN);
+	else
+		bzero(connect_req_bssid, ETHER_ADDR_LEN);
+	bzero(broad_bssid, ETHER_ADDR_LEN);
+#endif
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+	maxrxpktglom = 0;
+#endif
+	bzero(&bssid, sizeof(bssid));
+	if (!wl_get_drv_status(cfg, CONNECTED, dev)&&
+		(ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false)) == 0) {
+		if (!ETHER_ISNULLADDR(&bssid)) {
+			scb_val_t scbval;
+			wl_set_drv_status(cfg, DISCONNECTING, dev);
+			scbval.val = DOT11_RC_DISASSOC_LEAVING;
+			memcpy(&scbval.ea, &bssid, ETHER_ADDR_LEN);
+			scbval.val = htod32(scbval.val);
+
+			WL_DBG(("drv status CONNECTED is not set, but connected in FW!" MACDBG "/n",
+				MAC2STRDBG(bssid.octet)));
+			err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+				sizeof(scb_val_t), true);
+			if (unlikely(err)) {
+				wl_clr_drv_status(cfg, DISCONNECTING, dev);
+				WL_ERR(("error (%d)\n", err));
+				return err;
+			}
+			wait_cnt = 500/10;
+			while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+				WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n",
+					wait_cnt));
+				wait_cnt--;
+				OSL_SLEEP(10);
+			}
+		} else
+			WL_DBG(("Currently not associated!\n"));
+	} else {
+		/* if status is DISCONNECTING, wait for disconnection terminated max 500 ms */
+		wait_cnt = 500/10;
+		while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
+			WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n", wait_cnt));
+			wait_cnt--;
+			OSL_SLEEP(10);
+		}
+	}
+
+	/* Clean BSSID */
+	bzero(&bssid, sizeof(bssid));
+	if (!wl_get_drv_status(cfg, DISCONNECTING, dev))
+		wl_update_prof(cfg, dev, NULL, (void *)&bssid, WL_PROF_BSSID);
+
+	if (p2p_is_on(cfg) && (dev != bcmcfg_to_prmry_ndev(cfg))) {
+		/* we only allow to connect using virtual interface in case of P2P */
+			if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+				WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+				return BCME_ERROR;
+			}
+			wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+				VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
+	} else if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		/* find the RSN_IE */
+		if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+			DOT11_MNG_RSN_ID)) != NULL) {
+			WL_DBG((" WPA2 IE is found\n"));
+		}
+		/* find the WPA_IE */
+		if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie,
+			sme->ie_len)) != NULL) {
+			WL_DBG((" WPA IE is found\n"));
+		}
+		if (wpa_ie != NULL || wpa2_ie != NULL) {
+			wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie;
+			wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
+			wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
+			wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		} else {
+			wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		}
+
+		if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			return BCME_ERROR;
+		}
+		err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+			VNDR_IE_ASSOCREQ_FLAG, (u8 *)sme->ie, sme->ie_len);
+		if (unlikely(err)) {
+			return err;
+		}
+	}
+	if (chan) {
+		cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+		chan_cnt = 1;
+		WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel,
+			chan->center_freq, chan_cnt));
+	} else
+		cfg->channel = 0;
+#ifdef BCMWAPI_WPI
+	WL_DBG(("1. enable wapi auth\n"));
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
+		WL_DBG(("2. set wapi ie  \n"));
+		err = wl_set_set_wapi_ie(dev, sme);
+		if (unlikely(err))
+			return err;
+	} else
+		WL_DBG(("2. Not wapi ie  \n"));
+#endif
+	WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
+	WL_DBG(("3. set wapi version \n"));
+	err = wl_set_wpa_version(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid wpa_version\n"));
+		return err;
+	}
+#ifdef BCMWAPI_WPI
+	if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1)
+		WL_DBG(("4. WAPI Dont Set wl_set_auth_type\n"));
+	else {
+		WL_DBG(("4. wl_set_auth_type\n"));
+#endif
+		err = wl_set_auth_type(dev, sme);
+		if (unlikely(err)) {
+			WL_ERR(("Invalid auth type\n"));
+			return err;
+		}
+#ifdef BCMWAPI_WPI
+	}
+#endif
+
+	err = wl_set_set_cipher(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid ciper\n"));
+		return err;
+	}
+
+	err = wl_set_key_mgmt(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid key mgmt\n"));
+		return err;
+	}
+
+	err = wl_set_set_sharedkey(dev, sme);
+	if (unlikely(err)) {
+		WL_ERR(("Invalid shared key\n"));
+		return err;
+	}
+
+	/*
+	 *  Join with specific BSSID and cached SSID
+	 *  If SSID is zero join based on BSSID only
+	 */
+	join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+		chan_cnt * sizeof(chanspec_t);
+	ext_join_params =  (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+	if (ext_join_params == NULL) {
+		err = -ENOMEM;
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+		goto exit;
+	}
+	ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+	memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
+	wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
+	ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+	/* increate dwell time to receive probe response or detect Beacon
+	* from target AP at a noisy air only during connect command
+	*/
+	ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1;
+	ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
+	/* Set up join scan parameters */
+	ext_join_params->scan.scan_type = -1;
+	ext_join_params->scan.nprobes = chan_cnt ?
+		(ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
+	ext_join_params->scan.home_time = -1;
+
+	if (sme->bssid)
+		memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN);
+	else
+		memcpy(&ext_join_params->assoc.bssid, &ether_bcast, ETH_ALEN);
+	ext_join_params->assoc.chanspec_num = chan_cnt;
+	if (chan_cnt) {
+		u16 channel, band, bw, ctl_sb;
+		chanspec_t chspec;
+		channel = cfg->channel;
+		band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+			: WL_CHANSPEC_BAND_5G;
+		bw = WL_CHANSPEC_BW_20;
+		ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+		chspec = (channel | band | bw | ctl_sb);
+		ext_join_params->assoc.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		ext_join_params->assoc.chanspec_list[0] |= chspec;
+		ext_join_params->assoc.chanspec_list[0] =
+			wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]);
+	}
+	ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+	if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		WL_INFO(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+			ext_join_params->ssid.SSID_len));
+	}
+	wl_set_drv_status(cfg, CONNECTING, dev);
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		kfree(ext_join_params);
+		return BCME_ERROR;
+	}
+	err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	WL_ERR(("Connectting with" MACDBG " channel (%d) ssid \"%s\", len (%d)\n\n",
+		MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), cfg->channel,
+		ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len));
+
+	kfree(ext_join_params);
+	if (err) {
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+		if (err == BCME_UNSUPPORTED) {
+			WL_DBG(("join iovar is not supported\n"));
+			goto set_ssid;
+		} else {
+			WL_ERR(("error (%d)\n", err));
+			goto exit;
+		}
+	} else
+		goto exit;
+
+set_ssid:
+	memset(&join_params, 0, sizeof(join_params));
+	join_params_size = sizeof(join_params.ssid);
+
+	join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+	memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
+	join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+	wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+	if (sme->bssid)
+		memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN);
+	else
+		memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
+
+	wl_ch_to_chanspec(cfg->channel, &join_params, &join_params_size);
+	WL_DBG(("join_param_size %zu\n", join_params_size));
+
+	if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+		WL_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+			join_params.ssid.SSID_len));
+	}
+	wl_set_drv_status(cfg, CONNECTING, dev);
+	err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true);
+	if (err) {
+		WL_ERR(("error (%d)\n", err));
+		wl_clr_drv_status(cfg, CONNECTING, dev);
+	}
+exit:
+	return err;
+}
+
+static s32
+wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+	u16 reason_code)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scbval;
+	bool act = false;
+	s32 err = 0;
+	u8 *curbssid;
+#ifdef CUSTOM_SET_CPUCORE
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
+	WL_ERR(("Reason %d\n", reason_code));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT);
+	curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+	if (act) {
+		/*
+		* Cancel ongoing scan to sync up with sme state machine of cfg80211.
+		*/
+#if !defined(ESCAN_RESULT_PATCH)
+		/* Let scan aborted by F/W */
+		if (cfg->scan_request) {
+			wl_notify_escan_complete(cfg, dev, true, true);
+		}
+#endif /* ESCAN_RESULT_PATCH */
+		wl_set_drv_status(cfg, DISCONNECTING, dev);
+		scbval.val = reason_code;
+		memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+		scbval.val = htod32(scbval.val);
+		err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+			sizeof(scb_val_t), true);
+		if (unlikely(err)) {
+			wl_clr_drv_status(cfg, DISCONNECTING, dev);
+			WL_ERR(("error (%d)\n", err));
+			return err;
+		}
+	}
+#ifdef CUSTOM_SET_CPUCORE
+	/* set default cpucore */
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dhd->chan_isvht80 &= ~DHD_FLAG_STA_MODE;
+		if (!(dhd->chan_isvht80))
+			dhd_set_cpucore(dhd, FALSE);
+	}
+#endif /* CUSTOM_SET_CPUCORE */
+
+	return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+	enum nl80211_tx_power_setting type, s32 mbm)
+#else
+static s32
+wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+	enum nl80211_tx_power_setting type, s32 dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	s32 dbm = MBM_TO_DBM(mbm);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \
+	defined(WL_COMPAT_WIRELESS) || defined(WL_SUPPORT_BACKPORTED_KPATCHES)
+	dbm = MBM_TO_DBM(dbm);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	switch (type) {
+	case NL80211_TX_POWER_AUTOMATIC:
+		break;
+	case NL80211_TX_POWER_LIMITED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+			return -EINVAL;
+		}
+		break;
+	case NL80211_TX_POWER_FIXED:
+		if (dbm < 0) {
+			WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+			return -EINVAL;
+		}
+		break;
+	}
+
+	err = wl_set_tx_power(ndev, type, dbm);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+
+	cfg->conf->tx_power = dbm;
+
+	return err;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+	struct wireless_dev *wdev, s32 *dbm)
+#else
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	err = wl_get_tx_power(ndev, dbm);
+	if (unlikely(err))
+		WL_ERR(("error (%d)\n", err));
+
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool unicast, bool multicast)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	u32 index;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	if (wsec == WEP_ENABLED) {
+		/* Just select a new current key */
+		index = (u32) key_idx;
+		index = htod32(index);
+		err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
+			sizeof(index), true);
+		if (unlikely(err)) {
+			WL_ERR(("error (%d)\n", err));
+		}
+	}
+	return err;
+}
+
+static s32
+wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, const u8 *mac_addr, struct key_params *params)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_wsec_key key;
+	s32 err = 0;
+	s32 bssidx;
+	s32 mode = wl_get_mode_by_netdev(cfg, dev);
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	memset(&key, 0, sizeof(key));
+	key.index = (u32) key_idx;
+
+	if (!ETHER_ISMULTI(mac_addr))
+		memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+	key.len = (u32) params->key_len;
+
+	/* check for key index change */
+	if (key.len == 0) {
+		/* key delete */
+		swap_key_from_BE(&key);
+		err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(err)) {
+			WL_ERR(("key delete error (%d)\n", err));
+			return err;
+		}
+	} else {
+		if (key.len > sizeof(key.data)) {
+			WL_ERR(("Invalid key length (%d)\n", key.len));
+			return -EINVAL;
+		}
+		WL_DBG(("Setting the key index %d\n", key.index));
+		memcpy(key.data, params->key, key.len);
+
+		if ((mode == WL_MODE_BSS) &&
+			(params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+			u8 keybuf[8];
+			memcpy(keybuf, &key.data[24], sizeof(keybuf));
+			memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+			memcpy(&key.data[16], keybuf, sizeof(keybuf));
+		}
+
+		/* if IW_ENCODE_EXT_RX_SEQ_VALID set */
+		if (params->seq && params->seq_len == 6) {
+			/* rx iv */
+			u8 *ivptr;
+			ivptr = (u8 *) params->seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = true;
+		}
+
+		switch (params->cipher) {
+		case WLAN_CIPHER_SUITE_WEP40:
+			key.algo = CRYPTO_ALGO_WEP1;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			break;
+		case WLAN_CIPHER_SUITE_WEP104:
+			key.algo = CRYPTO_ALGO_WEP128;
+			WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			key.algo = CRYPTO_ALGO_TKIP;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			key.algo = CRYPTO_ALGO_SMS4;
+			WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+			break;
+#endif
+		default:
+			WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+			return -EINVAL;
+		}
+		swap_key_from_BE(&key);
+		/* need to guarantee EAPOL 4/4 send out before set key */
+		dhd_wait_pend8021x(dev);
+		err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+			return err;
+		}
+	}
+	return err;
+}
+
+int
+wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable)
+{
+	int err;
+	wl_eventmsg_buf_t ev_buf;
+
+	if (dev != bcmcfg_to_prmry_ndev(g_bcm_cfg)) {
+		/* roam offload is only for the primary device */
+		return -1;
+	}
+	err = wldev_iovar_setint(dev, "roam_offload", (int)enable);
+	if (err)
+		return err;
+
+	bzero(&ev_buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable);
+	wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);
+	err = wl_cfg80211_apply_eventbuffer(dev, g_bcm_cfg, &ev_buf);
+	if (!err) {
+		g_bcm_cfg->roam_offload = enable;
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr,
+	struct key_params *params)
+{
+	struct wl_wsec_key key;
+	s32 val = 0;
+	s32 wsec = 0;
+	s32 err = 0;
+	u8 keybuf[8];
+	s32 bssidx = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 mode = wl_get_mode_by_netdev(cfg, dev);
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+
+	if (mac_addr &&
+		((params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+		(params->cipher != WLAN_CIPHER_SUITE_WEP104))) {
+			wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+			goto exit;
+	}
+	memset(&key, 0, sizeof(key));
+
+	key.len = (u32) params->key_len;
+	key.index = (u32) key_idx;
+
+	if (unlikely(key.len > sizeof(key.data))) {
+		WL_ERR(("Too long key length (%u)\n", key.len));
+		return -EINVAL;
+	}
+	memcpy(key.data, params->key, key.len);
+
+	key.flags = WL_PRIMARY_KEY;
+	switch (params->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		key.algo = CRYPTO_ALGO_WEP1;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		key.algo = CRYPTO_ALGO_WEP128;
+		val = WEP_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key.algo = CRYPTO_ALGO_TKIP;
+		val = TKIP_ENABLED;
+		/* wpa_supplicant switches the third and fourth quarters of the TKIP key */
+		if (mode == WL_MODE_BSS) {
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+		WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key.algo = CRYPTO_ALGO_AES_CCM;
+		val = AES_ENABLED;
+		WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+		break;
+#ifdef BCMWAPI_WPI
+	case WLAN_CIPHER_SUITE_SMS4:
+		key.algo = CRYPTO_ALGO_SMS4;
+		WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+		val = SMS4_ENABLED;
+		break;
+#endif /* BCMWAPI_WPI */
+	default:
+		WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+		return -EINVAL;
+	}
+
+	/* Set the new key/index */
+	if ((mode == WL_MODE_IBSS) && (val & (TKIP_ENABLED | AES_ENABLED))) {
+		WL_ERR(("IBSS KEY setted\n"));
+		wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE);
+	}
+	swap_key_from_BE(&key);
+	err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+		WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		return err;
+	}
+
+exit:
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("get wsec error (%d)\n", err));
+		return err;
+	}
+
+	wsec |= val;
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("set wsec error (%d)\n", err));
+		return err;
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr)
+{
+	struct wl_wsec_key key;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	WL_DBG(("Enter\n"));
+
+#ifndef IEEE80211W
+	if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2))
+		return -EINVAL;
+#endif
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(&key, 0, sizeof(key));
+
+	key.flags = WL_PRIMARY_KEY;
+	key.algo = CRYPTO_ALGO_OFF;
+	key.index = (u32) key_idx;
+
+	WL_DBG(("key index (%d)\n", key_idx));
+	/* Set the new key/index */
+	swap_key_from_BE(&key);
+	err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+		WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (unlikely(err)) {
+		if (err == -EINVAL) {
+			if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
+				/* we ignore this key index in this case */
+				WL_DBG(("invalid key index (%d)\n", key_idx));
+			}
+		} else {
+			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		}
+		return err;
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+	u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
+	void (*callback) (void *cookie, struct key_params * params))
+{
+	struct key_params params;
+	struct wl_wsec_key key;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct wl_security *sec;
+	s32 wsec;
+	s32 err = 0;
+	s32 bssidx;
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	WL_DBG(("key index (%d)\n", key_idx));
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(&key, 0, sizeof(key));
+	key.index = key_idx;
+	swap_key_to_BE(&key);
+	memset(&params, 0, sizeof(params));
+	params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
+	memcpy(params.key, key.data, params.key_len);
+
+	err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		return err;
+	}
+	switch (wsec & ~SES_OW_ENABLED) {
+		case WEP_ENABLED:
+			sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+			if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
+				params.cipher = WLAN_CIPHER_SUITE_WEP40;
+				WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			} else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
+				params.cipher = WLAN_CIPHER_SUITE_WEP104;
+				WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			}
+			break;
+		case TKIP_ENABLED:
+			params.cipher = WLAN_CIPHER_SUITE_TKIP;
+			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			break;
+		case AES_ENABLED:
+			params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			break;
+#ifdef BCMWAPI_WPI
+		case WLAN_CIPHER_SUITE_SMS4:
+			key.algo = CRYPTO_ALGO_SMS4;
+			WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
+			break;
+#endif
+		default:
+			WL_ERR(("Invalid algo (0x%x)\n", wsec));
+			return -EINVAL;
+	}
+
+	callback(cookie, &params);
+	return err;
+}
+
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+	struct net_device *dev, u8 key_idx)
+{
+	WL_INFO(("Not supported\n"));
+	return -EOPNOTSUPP;
+}
+
+static s32
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+	u8 *mac, struct station_info *sinfo)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scb_val;
+	s32 rssi;
+	s32 rate;
+	s32 err = 0;
+	sta_info_t *sta;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+#endif
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+	RETURN_EIO_IF_NOT_UP(cfg);
+	if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+		err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
+			ETHER_ADDR_LEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		if (err < 0) {
+			WL_ERR(("GET STA INFO failed, %d\n", err));
+			return err;
+		}
+		sinfo->filled = STATION_INFO_INACTIVE_TIME;
+		sta = (sta_info_t *)cfg->ioctl_buf;
+		sta->len = dtoh16(sta->len);
+		sta->cap = dtoh16(sta->cap);
+		sta->flags = dtoh32(sta->flags);
+		sta->idle = dtoh32(sta->idle);
+		sta->in = dtoh32(sta->in);
+		sinfo->inactive_time = sta->idle * 1000;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+		if (sta->flags & WL_STA_ASSOC) {
+			sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+			sinfo->connected_time = sta->in;
+		}
+		WL_INFO(("STA %s : idle time : %d sec, connected time :%d ms\n",
+			bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time,
+			sta->idle * 1000));
+#endif
+	} else if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS ||
+		wl_get_mode_by_netdev(cfg, dev) == WL_MODE_IBSS) {
+		get_pktcnt_t pktcnt;
+		u8 *curmacp;
+
+		if (cfg->roam_offload) {
+			struct ether_addr bssid;
+			err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+			if (err) {
+				WL_ERR(("Failed to get current BSSID\n"));
+			} else {
+				if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+					/* roaming is detected */
+					err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
+					if (err)
+						WL_ERR(("Failed to handle the delayed roam, "
+							"err=%d", err));
+					mac = (u8 *)bssid.octet;
+				}
+			}
+		}
+		if (!wl_get_drv_status(cfg, CONNECTED, dev) ||
+			(dhd_is_associated(dhd, NULL, &err) == FALSE)) {
+			WL_ERR(("NOT assoc\n"));
+			if (err == -ERESTARTSYS)
+				return err;
+			err = -ENODEV;
+			return err;
+		}
+		curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+		if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+			WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
+				MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
+		}
+
+		/* Report the current tx rate */
+		err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false);
+		if (err) {
+			WL_ERR(("Could not get rate (%d)\n", err));
+		} else {
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+			int rxpktglom;
+#endif
+			rate = dtoh32(rate);
+			sinfo->filled |= STATION_INFO_TX_BITRATE;
+			sinfo->txrate.legacy = rate * 5;
+			WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+			rxpktglom = ((rate/2) > 150) ? 20 : 10;
+
+			if (maxrxpktglom != rxpktglom) {
+				maxrxpktglom = rxpktglom;
+				WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2),
+					maxrxpktglom));
+				err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
+					(char*)&maxrxpktglom, 4, cfg->ioctl_buf,
+					WLC_IOCTL_MAXLEN, NULL);
+				if (err < 0) {
+					WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+				}
+			}
+#endif
+		}
+
+		memset(&scb_val, 0, sizeof(scb_val));
+		scb_val.val = 0;
+		err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val,
+			sizeof(scb_val_t), false);
+		if (err) {
+			WL_ERR(("Could not get rssi (%d)\n", err));
+			goto get_station_err;
+		}
+		rssi = wl_rssi_offset(dtoh32(scb_val.val));
+		sinfo->filled |= STATION_INFO_SIGNAL;
+		sinfo->signal = rssi;
+		WL_DBG(("RSSI %d dBm\n", rssi));
+		err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt,
+			sizeof(pktcnt), false);
+		if (!err) {
+			sinfo->filled |= (STATION_INFO_RX_PACKETS |
+				STATION_INFO_RX_DROP_MISC |
+				STATION_INFO_TX_PACKETS |
+				STATION_INFO_TX_FAILED);
+			sinfo->rx_packets = pktcnt.rx_good_pkt;
+			sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
+			sinfo->tx_packets = pktcnt.tx_good_pkt;
+			sinfo->tx_failed  = pktcnt.tx_bad_pkt;
+		}
+get_station_err:
+		if (err && (err != -ERESTARTSYS)) {
+			/* Disconnect due to zero BSSID or error to get RSSI */
+			WL_ERR(("force cfg80211_disconnected: %d\n", err));
+			wl_clr_drv_status(cfg, CONNECTED, dev);
+			cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL);
+			wl_link_down(cfg);
+		}
+	}
+	else {
+		WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+	bool enabled, s32 timeout)
+{
+	s32 pm;
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_info *_net_info = wl_get_netinfo_by_netdev(cfg, dev);
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	WL_DBG(("Enter\n"));
+	if (cfg->p2p_net == dev || _net_info == NULL || cfg->vsdb_mode ||
+		!wl_get_drv_status(cfg, CONNECTED, dev)) {
+		return err;
+	}
+
+	/* Delete pm_enable_work */
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_PEND);
+
+	pm = enabled ? PM_FAST : PM_OFF;
+	if (_net_info->pm_block) {
+		WL_ERR(("%s:Do not enable the power save for pm_block %d\n",
+			dev->name, _net_info->pm_block));
+		pm = PM_OFF;
+	}
+	pm = htod32(pm);
+	WL_DBG(("%s:power save %s\n", dev->name, (pm ? "enabled" : "disabled")));
+	err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true);
+	if (unlikely(err)) {
+		if (err == -ENODEV)
+			WL_DBG(("net_device is not ready yet\n"));
+		else
+			WL_ERR(("error (%d)\n", err));
+		return err;
+	}
+	wl_cfg80211_update_power_mode(dev);
+	return err;
+}
+
+void wl_cfg80211_update_power_mode(struct net_device *dev)
+{
+	int err, pm = -1;
+
+	err = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), true);
+	if (err)
+		WL_ERR(("%s:error (%d)\n", __FUNCTION__, err));
+	else if (pm != -1 && dev->ieee80211_ptr)
+		dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
+}
+
+static __used u32 wl_find_msb(u16 bit16)
+{
+	u32 ret = 0;
+
+	if (bit16 & 0xff00) {
+		ret += 8;
+		bit16 >>= 8;
+	}
+
+	if (bit16 & 0xf0) {
+		ret += 4;
+		bit16 >>= 4;
+	}
+
+	if (bit16 & 0xc) {
+		ret += 2;
+		bit16 >>= 2;
+	}
+
+	if (bit16 & 2)
+		ret += bit16 & 2;
+	else if (bit16)
+		ret += bit16;
+
+	return ret;
+}
+
+static s32 wl_cfg80211_resume(struct wiphy *wiphy)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+		WL_INFO(("device is not ready\n"));
+		return 0;
+	}
+
+	return err;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+#else
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
+#endif
+{
+#ifdef DHD_CLEAR_ON_SUSPEND
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_info *iter, *next;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	unsigned long flags;
+	if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+		WL_INFO(("device is not ready : status (%d)\n",
+			(int)cfg->status));
+		return 0;
+	}
+	for_each_ndev(cfg, iter, next)
+		wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+		cfg80211_scan_done(cfg->scan_request, true);
+		cfg->scan_request = NULL;
+	}
+	for_each_ndev(cfg, iter, next) {
+		wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+		wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	for_each_ndev(cfg, iter, next) {
+		if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+			wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false);
+		}
+	}
+#endif /* DHD_CLEAR_ON_SUSPEND */
+	return 0;
+}
+
+static s32
+wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
+	s32 err)
+{
+	int i, j;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+
+	if (!pmk_list) {
+		printk("pmk_list is NULL\n");
+		return -EINVAL;
+	}
+	/* pmk list is supported only for STA interface i.e. primary interface
+	 * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
+	 */
+	if (primary_dev != dev) {
+		WL_INFO(("Not supporting Flushing pmklist on virtual"
+			" interfaces than primary interface\n"));
+		return err;
+	}
+
+	WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+	for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+		WL_DBG(("PMKID[%d]: %pM =\n", i,
+			&pmk_list->pmkids.pmkid[i].BSSID));
+		for (j = 0; j < WPA2_PMKID_LEN; j++) {
+			WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+		}
+	}
+	if (likely(!err)) {
+		err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+			sizeof(*pmk_list), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	int i;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+			ETHER_ADDR_LEN))
+			break;
+	if (i < WL_NUM_PMKIDS_MAX) {
+		memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+			ETHER_ADDR_LEN);
+		memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+			WPA2_PMKID_LEN);
+		if (i == cfg->pmk_list->pmkids.npmkid)
+			cfg->pmk_list->pmkids.npmkid++;
+	} else {
+		err = -EINVAL;
+	}
+	WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+		&cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n",
+			cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].
+			PMKID[i]));
+	}
+
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+	return err;
+}
+
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+	struct cfg80211_pmksa *pmksa)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct _pmkid_list pmkid = {0};
+	s32 err = 0;
+	int i;
+
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+	memcpy(pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
+
+	WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+		&pmkid.pmkid[0].BSSID));
+	for (i = 0; i < WPA2_PMKID_LEN; i++) {
+		WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+	}
+
+	for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+		if (!memcmp
+		    (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+		     ETHER_ADDR_LEN))
+			break;
+
+	if ((cfg->pmk_list->pmkids.npmkid > 0) &&
+		(i < cfg->pmk_list->pmkids.npmkid)) {
+		memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+		for (; i < (cfg->pmk_list->pmkids.npmkid - 1); i++) {
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+				&cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
+				ETHER_ADDR_LEN);
+			memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+				&cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
+				WPA2_PMKID_LEN);
+		}
+		cfg->pmk_list->pmkids.npmkid--;
+	} else {
+		err = -EINVAL;
+	}
+
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+
+	return err;
+
+}
+
+static s32
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = 0;
+	RETURN_EIO_IF_NOT_UP(cfg);
+	memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+	err = wl_update_pmklist(dev, cfg->pmk_list, err);
+	return err;
+
+}
+
+static wl_scan_params_t *
+wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size)
+{
+	wl_scan_params_t *params;
+	int params_size;
+	int num_chans;
+
+	*out_params_size = 0;
+
+	/* Our scan params only need space for 1 channel and 0 ssids */
+	params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+	params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
+		return params;
+	}
+	memset(params, 0, params_size);
+	params->nprobes = nprobes;
+
+	num_chans = (channel == 0) ? 0 : 1;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = DOT11_SCANTYPE_ACTIVE;
+	params->nprobes = htod32(1);
+	params->active_time = htod32(-1);
+	params->passive_time = htod32(-1);
+	params->home_time = htod32(10);
+	if (channel == -1)
+		params->channel_list[0] = htodchanspec(channel);
+	else
+		params->channel_list[0] = wl_ch_host_to_driver(channel);
+
+	/* Our scan params have 1 channel and 0 ssids */
+	params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+		(num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	*out_params_size = params_size;	/* rtn size to the caller */
+	return params;
+}
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel *channel, unsigned int duration, u64 *cookie)
+#else
+static s32
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel * channel,
+	enum nl80211_channel_type channel_type,
+	unsigned int duration, u64 *cookie)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+{
+	s32 target_channel;
+	u32 id;
+	s32 err = BCME_OK;
+	struct ether_addr primary_mac;
+	struct net_device *ndev = NULL;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n",
+		ieee80211_frequency_to_channel(channel->center_freq),
+		duration, (wl_get_drv_status(cfg, SCANNING, ndev)) ? "YES":"NO"));
+
+	if (!cfg->p2p) {
+		WL_ERR(("cfg->p2p is not initialized\n"));
+		err = BCME_ERROR;
+		goto exit;
+	}
+
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+	target_channel = ieee80211_frequency_to_channel(channel->center_freq);
+	memcpy(&cfg->remain_on_chan, channel, sizeof(struct ieee80211_channel));
+#if defined(WL_ENABLE_P2P_IF)
+	cfg->remain_on_chan_type = channel_type;
+#endif /* WL_ENABLE_P2P_IF */
+	id = ++cfg->last_roc_id;
+	if (id == 0)
+		id = ++cfg->last_roc_id;
+	*cookie = id;
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	if (wl_get_drv_status(cfg, SCANNING, ndev)) {
+		struct timer_list *_timer;
+		WL_DBG(("scan is running. go to fake listen state\n"));
+
+		wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			WL_DBG(("cancel current listen timer \n"));
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+
+		_timer = &cfg->p2p->listen_timer;
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+		INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0);
+
+		err = BCME_OK;
+		goto exit;
+	}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_CFG80211_SYNC_GON
+	if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+		/* do not enter listen mode again if we are in listen mode already for next af.
+		 * remain on channel completion will be returned by waiting next af completion.
+		 */
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#else
+		wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		goto exit;
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+	if (cfg->p2p && !cfg->p2p->on) {
+		/* In case of p2p_listen command, supplicant send remain_on_channel
+		 * without turning on P2P
+		 */
+		get_primary_mac(cfg, &primary_mac);
+		wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+		p2p_on(cfg) = true;
+	}
+
+	if (p2p_is_on(cfg)) {
+		err = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
+		if (unlikely(err)) {
+			goto exit;
+		}
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		err = wl_cfgp2p_discover_listen(cfg, target_channel, duration);
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		if (err == BCME_OK) {
+			wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+		} else {
+			/* if failed, firmware may be internal scanning state.
+			 * so other scan request shall not abort it
+			 */
+			wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+		}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		/* WAR: set err = ok to prevent cookie mismatch in wpa_supplicant
+		 * and expire timer will send a completion to the upper layer
+		 */
+		err = BCME_OK;
+	}
+
+exit:
+	if (err == BCME_OK) {
+		WL_INFO(("Success\n"));
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		cfg80211_ready_on_channel(cfgdev, *cookie, channel,
+			duration, GFP_KERNEL);
+#else
+		cfg80211_ready_on_channel(cfgdev, *cookie, channel,
+			channel_type, duration, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	} else {
+		WL_ERR(("Fail to Set (err=%d cookie:%llu)\n", err, *cookie));
+	}
+	return err;
+}
+
+static s32
+wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+	s32 err = 0;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+		WL_DBG((" enter ) on P2P dedicated discover interface\n"));
+	}
+#else
+	WL_DBG((" enter ) netdev_ifidx: %d \n", cfgdev->ifindex));
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	return err;
+}
+
+static void
+wl_cfg80211_afx_handler(struct work_struct *work)
+{
+	struct afx_hdl *afx_instance;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 ret = BCME_OK;
+
+	afx_instance = container_of(work, struct afx_hdl, work);
+	if (afx_instance != NULL && cfg->afx_hdl->is_active) {
+		if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) {
+			ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan,
+				(100 * (1 + (RANDOM32() % 3)))); /* 100ms ~ 300ms */
+		} else {
+			ret = wl_cfgp2p_act_frm_search(cfg, cfg->afx_hdl->dev,
+				cfg->afx_hdl->bssidx, cfg->afx_hdl->peer_listen_chan,
+				NULL);
+		}
+		if (unlikely(ret != BCME_OK)) {
+			WL_ERR(("ERROR occurred! returned value is (%d)\n", ret));
+			if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL))
+				complete(&cfg->act_frm_scan);
+		}
+	}
+}
+
+static s32
+wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev)
+{
+	u32 max_retry = WL_CHANNEL_SYNC_RETRY;
+
+	if (dev == NULL)
+		return -1;
+
+	WL_DBG((" enter ) \n"));
+
+	wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+	cfg->afx_hdl->is_active = TRUE;
+
+	/* Loop to wait until we find a peer's channel or the
+	 * pending action frame tx is cancelled.
+	 */
+	while ((cfg->afx_hdl->retry < max_retry) &&
+		(cfg->afx_hdl->peer_chan == WL_INVALID)) {
+		cfg->afx_hdl->is_listen = FALSE;
+		wl_set_drv_status(cfg, SCANNING, dev);
+		WL_DBG(("Scheduling the action frame for sending.. retry %d\n",
+			cfg->afx_hdl->retry));
+		/* search peer on peer's listen channel */
+		schedule_work(&cfg->afx_hdl->work);
+		wait_for_completion_timeout(&cfg->act_frm_scan,
+			msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+
+		if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+			!(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+			break;
+
+		if (cfg->afx_hdl->my_listen_chan) {
+			WL_DBG(("Scheduling Listen peer in my listen channel = %d\n",
+				cfg->afx_hdl->my_listen_chan));
+			/* listen on my listen channel */
+			cfg->afx_hdl->is_listen = TRUE;
+			schedule_work(&cfg->afx_hdl->work);
+			wait_for_completion_timeout(&cfg->act_frm_scan,
+				msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
+		}
+		if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+			!(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
+			break;
+
+		cfg->afx_hdl->retry++;
+
+		WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+	}
+
+	cfg->afx_hdl->is_active = FALSE;
+
+	wl_clr_drv_status(cfg, SCANNING, dev);
+	wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+
+	return (cfg->afx_hdl->peer_chan);
+}
+
+struct p2p_config_af_params {
+	s32 max_tx_retry;	/* max tx retry count if tx no ack */
+	/* To make sure to send successfully action frame, we have to turn off mpc
+	 * 0: off, 1: on,  (-1): do nothing
+	 */
+	s32 mpc_onoff;
+#ifdef WL_CFG80211_SYNC_GON
+	bool extra_listen;
+#endif
+	bool search_channel;	/* 1: search peer's channel to send af */
+};
+
+static s32
+wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy,
+	wl_action_frame_t *action_frame, wl_af_params_t *af_params,
+	struct p2p_config_af_params *config_af_params)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	wifi_p2p_pub_act_frame_t *act_frm =
+		(wifi_p2p_pub_act_frame_t *) (action_frame->data);
+
+	/* initialize default value */
+#ifdef WL_CFG80211_SYNC_GON
+	config_af_params->extra_listen = true;
+#endif
+	config_af_params->search_channel = false;
+	config_af_params->max_tx_retry = WL_AF_TX_MAX_RETRY;
+	config_af_params->mpc_onoff = -1;
+	cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+
+	switch (act_frm->subtype) {
+	case P2P_PAF_GON_REQ: {
+		WL_DBG(("P2P: GO_NEG_PHASE status set \n"));
+		wl_set_p2p_status(cfg, GO_NEG_PHASE);
+
+		config_af_params->mpc_onoff = 0;
+		config_af_params->search_channel = true;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+
+		break;
+	}
+	case P2P_PAF_GON_RSP: {
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for CONF frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME + 100;
+		break;
+	}
+	case P2P_PAF_GON_CONF: {
+		/* If we reached till GO Neg confirmation reset the filter */
+		WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+
+		/* turn on mpc again if go nego is done */
+		config_af_params->mpc_onoff = 1;
+
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	}
+	case P2P_PAF_INVITE_REQ: {
+		config_af_params->search_channel = true;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+
+		/* increase dwell time */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_INVITE_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	case P2P_PAF_DEVDIS_REQ: {
+		if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+			action_frame->len)) {
+			config_af_params->search_channel = true;
+		}
+
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* maximize dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_LONG_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_DEVDIS_RSP:
+		/* minimize dwell time */
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	case P2P_PAF_PROVDIS_REQ: {
+		if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0],
+			action_frame->len)) {
+			config_af_params->search_channel = true;
+		}
+
+		config_af_params->mpc_onoff = 0;
+		cfg->next_af_subtype = act_frm->subtype + 1;
+		/* increase dwell time to wait for RESP frame */
+		af_params->dwell_time = WL_MED_DWELL_TIME;
+		break;
+	}
+	case P2P_PAF_PROVDIS_RSP: {
+		cfg->next_af_subtype = P2P_PAF_GON_REQ;
+		af_params->dwell_time = WL_MIN_DWELL_TIME;
+#ifdef WL_CFG80211_SYNC_GON
+		config_af_params->extra_listen = false;
+#endif /* WL_CFG80211_SYNC_GON */
+		break;
+	}
+	default:
+		WL_DBG(("Unknown p2p pub act frame subtype: %d\n",
+			act_frm->subtype));
+		err = BCME_BADARG;
+	}
+	return err;
+}
+
+#ifdef WL11U
+static bool
+wl_cfg80211_check_DFS_channel(struct bcm_cfg80211 *cfg, wl_af_params_t *af_params,
+	void *frame, u16 frame_len)
+{
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;
+	bool result = false;
+	s32 i;
+	chanspec_t chanspec;
+
+	/* If DFS channel is 52~148, check to block it or not */
+	if (af_params &&
+		(af_params->channel >= 52 && af_params->channel <= 148)) {
+		if (!wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+			bss_list = cfg->bss_list;
+			bi = next_bss(bss_list, bi);
+			for_each_bss(bss_list, bi, i) {
+				chanspec = wl_chspec_driver_to_host(bi->chanspec);
+				if (CHSPEC_IS5G(chanspec) &&
+					((bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(chanspec))
+					== af_params->channel)) {
+					result = true;	/* do not block the action frame */
+					break;
+				}
+			}
+		}
+	}
+	else {
+		result = true;
+	}
+
+	WL_DBG(("result=%s", result?"true":"false"));
+	return result;
+}
+#endif /* WL11U */
+
+
+static bool
+wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev,
+	bcm_struct_cfgdev *cfgdev, wl_af_params_t *af_params,
+	wl_action_frame_t *action_frame, u16 action_frame_len, s32 bssidx)
+{
+#ifdef WL11U
+	struct net_device *ndev = NULL;
+#endif /* WL11U */
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	bool ack = false;
+	u8 category, action;
+	s32 tx_retry;
+	struct p2p_config_af_params config_af_params;
+#ifdef VSDB
+	ulong off_chan_started_jiffies = 0;
+#endif
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+
+	/* Add the default dwell time
+	 * Dwell time to stay off-channel to wait for a response action frame
+	 * after transmitting an GO Negotiation action frame
+	 */
+	af_params->dwell_time = WL_DWELL_TIME;
+
+#ifdef WL11U
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	ndev = dev;
+#else
+	ndev = ndev_to_cfgdev(cfgdev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* WL11U */
+
+	category = action_frame->data[DOT11_ACTION_CAT_OFF];
+	action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+	/* initialize variables */
+	tx_retry = 0;
+	cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+	config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY;
+	config_af_params.mpc_onoff = -1;
+	config_af_params.search_channel = false;
+#ifdef WL_CFG80211_SYNC_GON
+	config_af_params.extra_listen = false;
+#endif
+
+	/* config parameters */
+	/* Public Action Frame Process - DOT11_ACTION_CAT_PUBLIC */
+	if (category == DOT11_ACTION_CAT_PUBLIC) {
+		if ((action == P2P_PUB_AF_ACTION) &&
+			(action_frame_len >= sizeof(wifi_p2p_pub_act_frame_t))) {
+			/* p2p public action frame process */
+			if (BCME_OK != wl_cfg80211_config_p2p_pub_af_tx(wiphy,
+				action_frame, af_params, &config_af_params)) {
+				WL_DBG(("Unknown subtype.\n"));
+			}
+
+		} else if (action_frame_len >= sizeof(wifi_p2psd_gas_pub_act_frame_t)) {
+			/* service discovery process */
+			if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+				action == P2PSD_ACTION_ID_GAS_CREQ) {
+				/* configure service discovery query frame */
+
+				config_af_params.search_channel = true;
+
+				/* save next af suptype to cancel remained dwell time */
+				cfg->next_af_subtype = action + 1;
+
+				af_params->dwell_time = WL_MED_DWELL_TIME;
+			} else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+				action == P2PSD_ACTION_ID_GAS_CRESP) {
+				/* configure service discovery response frame */
+				af_params->dwell_time = WL_MIN_DWELL_TIME;
+			} else {
+				WL_DBG(("Unknown action type: %d\n", action));
+			}
+		} else {
+			WL_DBG(("Unknown Frame: category 0x%x, action 0x%x, length %d\n",
+				category, action, action_frame_len));
+	}
+	} else if (category == P2P_AF_CATEGORY) {
+		/* do not configure anything. it will be sent with a default configuration */
+	} else {
+		WL_DBG(("Unknown Frame: category 0x%x, action 0x%x\n",
+			category, action));
+		if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+			wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+			return false;
+		}
+	}
+
+	/* To make sure to send successfully action frame, we have to turn off mpc */
+	if (config_af_params.mpc_onoff == 0) {
+		wldev_iovar_setint(dev, "mpc", 0);
+	}
+
+	/* validate channel and p2p ies */
+	if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) &&
+		wl_to_p2p_bss_saved_ie(cfg, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
+		config_af_params.search_channel = true;
+	} else {
+		config_af_params.search_channel = false;
+	}
+#ifdef WL11U
+	if (ndev == bcmcfg_to_prmry_ndev(cfg))
+		config_af_params.search_channel = false;
+#endif /* WL11U */
+
+#ifdef VSDB
+	/* if connecting on primary iface, sleep for a while before sending af tx for VSDB */
+	if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
+		OSL_SLEEP(50);
+	}
+#endif
+
+	/* if scan is ongoing, abort current scan. */
+	if (wl_get_drv_status_all(cfg, SCANNING)) {
+		wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+	}
+
+#ifdef WL11U
+	/* handling DFS channel exceptions */
+	if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) {
+		return false;	/* the action frame was blocked */
+	}
+#endif /* WL11U */
+
+	/* set status and destination address before sending af */
+	if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+		/* set this status to cancel the remained dwell time in rx process */
+		wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+	}
+	wl_set_drv_status(cfg, SENDING_ACT_FRM, dev);
+	memcpy(cfg->afx_hdl->tx_dst_addr.octet,
+		af_params->action_frame.da.octet,
+		sizeof(cfg->afx_hdl->tx_dst_addr.octet));
+
+	/* save af_params for rx process */
+	cfg->afx_hdl->pending_tx_act_frm = af_params;
+
+	/* search peer's channel */
+	if (config_af_params.search_channel) {
+		/* initialize afx_hdl */
+		if (wl_cfgp2p_find_idx(cfg, dev, &cfg->afx_hdl->bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			goto exit;
+		}
+		cfg->afx_hdl->dev = dev;
+		cfg->afx_hdl->retry = 0;
+		cfg->afx_hdl->peer_chan = WL_INVALID;
+
+		if (wl_cfg80211_af_searching_channel(cfg, dev) == WL_INVALID) {
+			WL_ERR(("couldn't find peer's channel.\n"));
+			wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len,
+				af_params->channel);
+			goto exit;
+		}
+
+		wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+		/*
+		 * Abort scan even for VSDB scenarios. Scan gets aborted in firmware
+		 * but after the check of piggyback algorithm.
+		 * To take care of current piggback algo, lets abort the scan here itself.
+		 */
+		wl_notify_escan_complete(cfg, dev, true, true);
+		/* Suspend P2P discovery's search-listen to prevent it from
+		 * starting a scan or changing the channel.
+		 */
+		wl_cfgp2p_discover_enable_search(cfg, false);
+
+		/* update channel */
+		af_params->channel = cfg->afx_hdl->peer_chan;
+	}
+
+#ifdef VSDB
+	off_chan_started_jiffies = jiffies;
+#endif /* VSDB */
+
+	wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel);
+
+	/* Now send a tx action frame */
+	ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true;
+
+	/* if failed, retry it. tx_retry_max value is configure by .... */
+	while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry)) {
+#ifdef VSDB
+		if (af_params->channel) {
+			if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) >
+				OFF_CHAN_TIME_THRESHOLD_MS) {
+				WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
+				off_chan_started_jiffies = jiffies;
+			} else
+				OSL_SLEEP(AF_RETRY_DELAY_TIME);
+		}
+#endif /* VSDB */
+		ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ?
+			false : true;
+	}
+
+	if (ack == false) {
+		WL_ERR(("Failed to send Action Frame(retry %d)\n", tx_retry));
+	}
+	WL_DBG(("Complete to send action frame\n"));
+exit:
+	/* Clear SENDING_ACT_FRM after all sending af is done */
+	wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
+
+#ifdef WL_CFG80211_SYNC_GON
+	/* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+	 * if we coundn't get the next action response frame and dongle does not keep
+	 * the dwell time, go to listen state again to get next action response frame.
+	 */
+	if (ack && config_af_params.extra_listen &&
+		wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM) &&
+		cfg->af_sent_channel == cfg->afx_hdl->my_listen_chan) {
+		s32 extar_listen_time;
+
+		extar_listen_time = af_params->dwell_time -
+			jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies);
+
+		if (extar_listen_time > 50) {
+			wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+			WL_DBG(("Wait more time! actual af time:%d,"
+				"calculated extar listen:%d\n",
+				af_params->dwell_time, extar_listen_time));
+			if (wl_cfgp2p_discover_listen(cfg, cfg->af_sent_channel,
+				extar_listen_time + 100) == BCME_OK) {
+				wait_for_completion_timeout(&cfg->wait_next_af,
+					msecs_to_jiffies(extar_listen_time + 100 + 300));
+			}
+			wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+		}
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+	wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
+
+	if (cfg->afx_hdl->pending_tx_act_frm)
+		cfg->afx_hdl->pending_tx_act_frm = NULL;
+
+	WL_INFO(("-- sending Action Frame is %s, listen chan: %d\n",
+		(ack) ? "Succeeded!!":"Failed!!", cfg->afx_hdl->my_listen_chan));
+
+
+	/* if all done, turn mpc on again */
+	if (config_af_params.mpc_onoff == 1) {
+		wldev_iovar_setint(dev, "mpc", 1);
+	}
+
+	return ack;
+}
+
+#define MAX_NUM_OF_ASSOCIATED_DEV       64
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+#else
+static s32
+wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	struct ieee80211_channel *channel, bool offchan,
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0))
+	enum nl80211_channel_type channel_type,
+	bool channel_type_valid,
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) */
+	unsigned int wait, const u8* buf, size_t len,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	bool no_cck,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+	bool dont_wait_for_ack,
+#endif
+	u64 *cookie)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+{
+	wl_action_frame_t *action_frame;
+	wl_af_params_t *af_params;
+	scb_val_t scb_val;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	struct ieee80211_channel *channel = params->chan;
+	const u8 *buf = params->buf;
+	size_t len = params->len;
+#endif
+	const struct ieee80211_mgmt *mgmt;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *dev = NULL;
+	s32 err = BCME_OK;
+	s32 bssidx = 0;
+	u32 id;
+	bool ack = false;
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+
+	WL_DBG(("Enter \n"));
+
+	dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	/* set bsscfg idx for iovar (wlan0: P2PAPI_BSSCFG_PRIMARY, p2p: P2PAPI_BSSCFG_DEVICE)	*/
+	if (discover_cfgdev(cfgdev, cfg)) {
+		bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	}
+	else {
+		if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			return BCME_ERROR;
+		}
+	}
+
+	WL_DBG(("TX target bssidx=%d\n", bssidx));
+
+	if (p2p_is_on(cfg)) {
+		/* Suspend P2P discovery search-listen to prevent it from changing the
+		 * channel.
+		 */
+		if ((err = wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+			WL_ERR(("Can not disable discovery mode\n"));
+			return -EFAULT;
+		}
+	}
+	*cookie = 0;
+	id = cfg->send_action_id++;
+	if (id == 0)
+		id = cfg->send_action_id++;
+	*cookie = id;
+	mgmt = (const struct ieee80211_mgmt *)buf;
+	if (ieee80211_is_mgmt(mgmt->frame_control)) {
+		if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+			s32 ie_offset =  DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+			s32 ie_len = len - ie_offset;
+			if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p)
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+				wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+				VNDR_IE_PRBRSP_FLAG, (u8 *)(buf + ie_offset), ie_len);
+			cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+			goto exit;
+		} else if (ieee80211_is_disassoc(mgmt->frame_control) ||
+			ieee80211_is_deauth(mgmt->frame_control)) {
+			char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+				sizeof(struct ether_addr) + sizeof(uint)] = {0};
+			int num_associated = 0;
+			struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+			if (!bcmp((const uint8 *)BSSID_BROADCAST,
+				(const struct ether_addr *)mgmt->da, ETHER_ADDR_LEN)) {
+				assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+				err = wldev_ioctl(dev, WLC_GET_ASSOCLIST,
+					assoc_maclist, sizeof(mac_buf), false);
+				if (err < 0)
+					WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+				else
+					num_associated = assoc_maclist->count;
+			}
+			memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN);
+			scb_val.val = mgmt->u.disassoc.reason_code;
+			err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+				sizeof(scb_val_t), true);
+			if (err < 0)
+				WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON error %d\n", err));
+			WL_ERR(("Disconnect STA : %s scb_val.val %d\n",
+				bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf),
+				scb_val.val));
+
+			if (num_associated > 0 && ETHER_ISBCAST(mgmt->da))
+				wl_delay(400);
+
+			cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+			goto exit;
+
+		} else if (ieee80211_is_action(mgmt->frame_control)) {
+			/* Abort the dwell time of any previous off-channel
+			* action frame that may be still in effect.  Sending
+			* off-channel action frames relies on the driver's
+			* scan engine.  If a previous off-channel action frame
+			* tx is still in progress (including the dwell time),
+			* then this new action frame will not be sent out.
+			*/
+/* Do not abort scan for VSDB. Scan will be aborted in firmware if necessary.
+ * And previous off-channel action frame must be ended before new af tx.
+ */
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+			wl_notify_escan_complete(cfg, dev, true, true);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+		}
+
+	} else {
+		WL_ERR(("Driver only allows MGMT packet type\n"));
+		goto exit;
+	}
+
+	af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
+
+	if (af_params == NULL)
+	{
+		WL_ERR(("unable to allocate frame\n"));
+		return -ENOMEM;
+	}
+
+	action_frame = &af_params->action_frame;
+
+	/* Add the packet Id */
+	action_frame->packetId = *cookie;
+	WL_DBG(("action frame %d\n", action_frame->packetId));
+	/* Add BSSID */
+	memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN);
+	memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN);
+
+	/* Add the length exepted for 802.11 header  */
+	action_frame->len = len - DOT11_MGMT_HDR_LEN;
+	WL_DBG(("action_frame->len: %d\n", action_frame->len));
+
+	/* Add the channel */
+	af_params->channel =
+		ieee80211_frequency_to_channel(channel->center_freq);
+	/* Save listen_chan for searching common channel */
+	cfg->afx_hdl->peer_listen_chan = af_params->channel;
+	WL_DBG(("channel from upper layer %d\n", cfg->afx_hdl->peer_listen_chan));
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	af_params->dwell_time = params->wait;
+#else
+	af_params->dwell_time = wait;
+#endif
+
+	memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
+
+	ack = wl_cfg80211_send_action_frame(wiphy, dev, cfgdev, af_params,
+		action_frame, action_frame->len, bssidx);
+	cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL);
+
+	kfree(af_params);
+exit:
+	return err;
+}
+
+
+static void
+wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	u16 frame_type, bool reg)
+{
+
+	WL_DBG(("frame_type: %x, reg: %d\n", frame_type, reg));
+
+	if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+		return;
+
+	return;
+}
+
+
+static s32
+wl_cfg80211_change_bss(struct wiphy *wiphy,
+	struct net_device *dev,
+	struct bss_parameters *params)
+{
+	s32 err = 0;
+	s32 ap_isolate = 0;
+
+	if (params->use_cts_prot >= 0) {
+	}
+
+	if (params->use_short_preamble >= 0) {
+	}
+
+	if (params->use_short_slot_time >= 0) {
+	}
+
+	if (params->basic_rates) {
+	}
+
+	if (params->ap_isolate >= 0) {
+		ap_isolate = params->ap_isolate;
+		err = wldev_iovar_setint(dev, "ap_isolate", ap_isolate);
+		if (unlikely(err))
+		{
+			WL_ERR(("set ap_isolate Error (%d)\n", err));
+		}
+	}
+
+	if (params->ht_opmode >= 0) {
+	}
+
+
+	return 0;
+}
+
+static s32
+wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
+	struct ieee80211_channel *chan,
+	enum nl80211_chan_width channel_width)
+{
+	s32 _chan;
+	chanspec_t chspec = 0;
+	chanspec_t fw_chspec = 0;
+	u32 bw = WL_CHANSPEC_BW_20;
+	u32 dev_role = 0;
+	s32 bssidx = 0;
+	s32 err = BCME_OK;
+	s32 bw_cap = 0;
+	struct {
+		u32 band;
+		u32 bw_cap;
+	} param = {0, 0};
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#ifdef CUSTOM_SET_CPUCORE
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
+
+	dev = ndev_to_wlc_ndev(dev, cfg);
+	_chan = ieee80211_frequency_to_channel(chan->center_freq);
+	WL_ERR(("netdev_ifidx(%d), chan_width(%d) target channel(%d) \n",
+		dev->ifindex, channel_width, _chan));
+
+
+	if (chan->band == IEEE80211_BAND_5GHZ) {
+		param.band = WLC_BAND_5G;
+		err = wldev_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+			cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		if (err) {
+			if (err != BCME_UNSUPPORTED) {
+				WL_ERR(("bw_cap failed, %d\n", err));
+				return err;
+			} else {
+				err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+				if (err) {
+					WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+				}
+				if (bw_cap != WLC_N_BW_20ALL)
+					bw = WL_CHANSPEC_BW_40;
+			}
+		} else {
+			if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0]))
+				bw = WL_CHANSPEC_BW_80;
+			else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0]))
+				bw = WL_CHANSPEC_BW_40;
+			else
+				bw = WL_CHANSPEC_BW_20;
+
+		}
+
+	} else if (chan->band == IEEE80211_BAND_2GHZ)
+		bw = WL_CHANSPEC_BW_20;
+
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	/* Do not follow upper layer request in case of P2P GO */
+	if (dev_role != NL80211_IFTYPE_P2P_GO) {
+		switch (channel_width) {
+		case NL80211_CHAN_WIDTH_40:
+			if(bw >= WL_CHANSPEC_BW_40)
+				bw = WL_CHANSPEC_BW_40;
+			else
+				WL_ERR(("Fail to set channel bandwidth to 40 MHz, Hardware does not support it !\n"));
+			break;
+		case NL80211_CHAN_WIDTH_80:
+			if(bw >= WL_CHANSPEC_BW_80)
+				bw = WL_CHANSPEC_BW_80;
+			else
+				WL_ERR(("Fail to set channel bandwidth to 80 MHz, Hardware does not support it !\n"));
+			break;
+		case NL80211_CHAN_WIDTH_80P80:
+			if(bw >= WL_CHANSPEC_BW_8080)
+				bw = WL_CHANSPEC_BW_8080;
+			else
+				WL_ERR(("Fail to set channel bandwidth to 80 + 80 MHz, Hardware does not support it !\n"));
+			break;
+		case NL80211_CHAN_WIDTH_160:
+			if(bw >= WL_CHANSPEC_BW_160)
+				bw = WL_CHANSPEC_BW_160;
+			else
+				WL_ERR(("Fail to set channel bandwidth to 160 MHz, Hardware does not support it !\n"));
+			break;
+		case NL80211_CHAN_WIDTH_20_NOHT:
+		case NL80211_CHAN_WIDTH_20:
+		default:
+			bw = WL_CHANSPEC_BW_20;
+			break;
+		}
+	}
+
+set_channel:
+	chspec = wf_channel2chspec(_chan, bw);
+	if (wf_chspec_valid(chspec)) {
+		fw_chspec = wl_chspec_host_to_driver(chspec);
+		if (fw_chspec != INVCHANSPEC) {
+			if ((err = wldev_iovar_setint(dev, "chanspec",
+				fw_chspec)) == BCME_BADCHAN) {
+				if (bw == WL_CHANSPEC_BW_80)
+					goto change_bw;
+				err = wldev_ioctl(dev, WLC_SET_CHANNEL,
+					&_chan, sizeof(_chan), true);
+				if (err < 0) {
+					WL_ERR(("WLC_SET_CHANNEL error %d"
+					"chip may not be supporting this channel\n", err));
+				}
+			} else if (err) {
+				WL_ERR(("failed to set chanspec error %d\n", err));
+			}
+		} else {
+			WL_ERR(("failed to convert host chanspec to fw chanspec\n"));
+			err = BCME_ERROR;
+		}
+	} else {
+change_bw:
+		if (bw == WL_CHANSPEC_BW_80)
+			bw = WL_CHANSPEC_BW_40;
+		else if (bw == WL_CHANSPEC_BW_40)
+			bw = WL_CHANSPEC_BW_20;
+		else
+			bw = 0;
+		if (bw)
+			goto set_channel;
+		WL_ERR(("Invalid chanspec 0x%x\n", chspec));
+		err = BCME_ERROR;
+	}
+#ifdef CUSTOM_SET_CPUCORE
+	if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) {
+		WL_DBG(("SoftAP mode do not need to set cpucore\n"));
+	} else if ((dev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
+		(chspec & WL_CHANSPEC_BW_80)) {
+		/* If GO is vht80 */
+		dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE;
+		dhd_set_cpucore(dhd, TRUE);
+	}
+#endif
+	return err;
+}
+
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *
+wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *_net_info, *next;
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->ndev &&
+			test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state))
+			return _net_info->ndev;
+	}
+	return NULL;
+}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+static s32
+wl_validate_opensecurity(struct net_device *dev, s32 bssidx)
+{
+	s32 err = BCME_OK;
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", WPA_AUTH_NONE, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+
+	return 0;
+}
+
+static s32
+wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+	s32 len = 0;
+	s32 err = BCME_OK;
+	u16 auth = 0; /* d11 open authentication */
+	u32 wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+
+	u16 suite_count;
+	u8 rsn_cap[2];
+	u32 wme_bss_disable;
+
+	if (wpa2ie == NULL)
+		goto exit;
+
+	WL_DBG(("Enter \n"));
+	len =  wpa2ie->len;
+	/* check the mcast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+	switch (mcast->type) {
+		case WPA_CIPHER_NONE:
+			gval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			gval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			gval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			gval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WAPI_CIPHER_SMS4:
+			gval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("No Security Info\n"));
+			break;
+	}
+	if ((len -= WPA_SUITE_LEN) <= 0)
+		return BCME_BADLEN;
+
+	/* check the unicast cipher */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	suite_count = ltoh16_ua(&ucast->count);
+	switch (ucast->list[0].type) {
+		case WPA_CIPHER_NONE:
+			pval = 0;
+			break;
+		case WPA_CIPHER_WEP_40:
+		case WPA_CIPHER_WEP_104:
+			pval = WEP_ENABLED;
+			break;
+		case WPA_CIPHER_TKIP:
+			pval = TKIP_ENABLED;
+			break;
+		case WPA_CIPHER_AES_CCM:
+			pval = AES_ENABLED;
+			break;
+#ifdef BCMWAPI_WPI
+		case WAPI_CIPHER_SMS4:
+			pval = SMS4_ENABLED;
+			break;
+#endif
+		default:
+			WL_ERR(("No Security Info\n"));
+	}
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
+		return BCME_BADLEN;
+
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+	/* check the AKM */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+	suite_count = ltoh16_ua(&mgmt->count);
+	switch (mgmt->list[0].type) {
+		case RSN_AKM_NONE:
+			wpa_auth = WPA_AUTH_NONE;
+			break;
+		case RSN_AKM_UNSPECIFIED:
+			wpa_auth = WPA2_AUTH_UNSPECIFIED;
+			break;
+		case RSN_AKM_PSK:
+			wpa_auth = WPA2_AUTH_PSK;
+			break;
+		default:
+			WL_ERR(("No Key Mgmt Info\n"));
+	}
+
+	if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+		rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
+		rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+
+		if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
+			wme_bss_disable = 0;
+		} else {
+			wme_bss_disable = 1;
+		}
+
+		/* set wme_bss_disable to sync RSN Capabilities */
+		err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
+		if (err < 0) {
+			WL_ERR(("wme_bss_disable error %d\n", err));
+			return BCME_ERROR;
+		}
+	} else {
+		WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
+	}
+
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+
+static s32
+wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx)
+{
+	wpa_suite_mcast_t *mcast;
+	wpa_suite_ucast_t *ucast;
+	wpa_suite_auth_key_mgmt_t *mgmt;
+	u16 auth = 0; /* d11 open authentication */
+	u16 count;
+	s32 err = BCME_OK;
+	s32 len = 0;
+	u32 i;
+	u32 wsec;
+	u32 pval = 0;
+	u32 gval = 0;
+	u32 wpa_auth = 0;
+	u32 tmp = 0;
+
+	if (wpaie == NULL)
+		goto exit;
+	WL_DBG(("Enter \n"));
+	len = wpaie->length;    /* value length */
+	len -= WPA_IE_TAG_FIXED_LEN;
+	/* check for multicast cipher suite */
+	if (len < WPA_SUITE_LEN) {
+		WL_INFO(("no multicast cipher suite\n"));
+		goto exit;
+	}
+
+	/* pick up multicast cipher */
+	mcast = (wpa_suite_mcast_t *)&wpaie[1];
+	len -= WPA_SUITE_LEN;
+	if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+		if (IS_WPA_CIPHER(mcast->type)) {
+			tmp = 0;
+			switch (mcast->type) {
+				case WPA_CIPHER_NONE:
+					tmp = 0;
+					break;
+				case WPA_CIPHER_WEP_40:
+				case WPA_CIPHER_WEP_104:
+					tmp = WEP_ENABLED;
+					break;
+				case WPA_CIPHER_TKIP:
+					tmp = TKIP_ENABLED;
+					break;
+				case WPA_CIPHER_AES_CCM:
+					tmp = AES_ENABLED;
+					break;
+				default:
+					WL_ERR(("No Security Info\n"));
+			}
+			gval |= tmp;
+		}
+	}
+	/* Check for unicast suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFO(("no unicast suite\n"));
+		goto exit;
+	}
+	/* walk thru unicast cipher list and pick up what we recognize */
+	ucast = (wpa_suite_ucast_t *)&mcast[1];
+	count = ltoh16_ua(&ucast->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_CIPHER(ucast->list[i].type)) {
+				tmp = 0;
+				switch (ucast->list[i].type) {
+					case WPA_CIPHER_NONE:
+						tmp = 0;
+						break;
+					case WPA_CIPHER_WEP_40:
+					case WPA_CIPHER_WEP_104:
+						tmp = WEP_ENABLED;
+						break;
+					case WPA_CIPHER_TKIP:
+						tmp = TKIP_ENABLED;
+						break;
+					case WPA_CIPHER_AES_CCM:
+						tmp = AES_ENABLED;
+						break;
+					default:
+						WL_ERR(("No Security Info\n"));
+				}
+				pval |= tmp;
+			}
+		}
+	}
+	len -= (count - i) * WPA_SUITE_LEN;
+	/* Check for auth key management suite(s) */
+	if (len < WPA_IE_SUITE_COUNT_LEN) {
+		WL_INFO((" no auth key mgmt suite\n"));
+		goto exit;
+	}
+	/* walk thru auth management suite list and pick up what we recognize */
+	mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+	count = ltoh16_ua(&mgmt->count);
+	len -= WPA_IE_SUITE_COUNT_LEN;
+	for (i = 0; i < count && len >= WPA_SUITE_LEN;
+		i++, len -= WPA_SUITE_LEN) {
+		if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+			if (IS_WPA_AKM(mgmt->list[i].type)) {
+				tmp = 0;
+				switch (mgmt->list[i].type) {
+					case RSN_AKM_NONE:
+						tmp = WPA_AUTH_NONE;
+						break;
+					case RSN_AKM_UNSPECIFIED:
+						tmp = WPA_AUTH_UNSPECIFIED;
+						break;
+					case RSN_AKM_PSK:
+						tmp = WPA_AUTH_PSK;
+						break;
+					default:
+						WL_ERR(("No Key Mgmt Info\n"));
+				}
+				wpa_auth |= tmp;
+			}
+		}
+
+	}
+	/* FOR WPS , set SEC_OW_ENABLED */
+	wsec = (pval | gval | SES_OW_ENABLED);
+	/* set auth */
+	err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("auth error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set wsec */
+	err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+	if (err < 0) {
+		WL_ERR(("wsec error %d\n", err));
+		return BCME_ERROR;
+	}
+	/* set upper-layer auth */
+	err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+	if (err < 0) {
+		WL_ERR(("wpa_auth error %d\n", err));
+		return BCME_ERROR;
+	}
+exit:
+	return 0;
+}
+
+
+static s32
+wl_cfg80211_bcn_validate_sec(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	u32 dev_role,
+	s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) {
+		/* For P2P GO, the sec type is WPA2-PSK */
+		WL_DBG(("P2P GO: validating wpa2_ie"));
+		if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0)
+			return BCME_ERROR;
+
+	} else if (dev_role == NL80211_IFTYPE_AP) {
+
+		WL_DBG(("SoftAP: validating security"));
+		/* If wpa2_ie or wpa_ie is present validate it */
+
+		if ((ies->wpa2_ie || ies->wpa_ie) &&
+			((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0 ||
+			wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) {
+			cfg->ap_info->security_mode = false;
+			return BCME_ERROR;
+		}
+
+		cfg->ap_info->security_mode = true;
+		if (cfg->ap_info->rsn_ie) {
+			kfree(cfg->ap_info->rsn_ie);
+			cfg->ap_info->rsn_ie = NULL;
+		}
+		if (cfg->ap_info->wpa_ie) {
+			kfree(cfg->ap_info->wpa_ie);
+			cfg->ap_info->wpa_ie = NULL;
+		}
+		if (cfg->ap_info->wps_ie) {
+			kfree(cfg->ap_info->wps_ie);
+			cfg->ap_info->wps_ie = NULL;
+		}
+		if (ies->wpa_ie != NULL) {
+			/* WPAIE */
+			cfg->ap_info->rsn_ie = NULL;
+			cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+				ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+				GFP_KERNEL);
+		} else if (ies->wpa2_ie != NULL) {
+			/* RSNIE */
+			cfg->ap_info->wpa_ie = NULL;
+			cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+				ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+				GFP_KERNEL);
+		}
+		if (!ies->wpa2_ie && !ies->wpa_ie) {
+			wl_validate_opensecurity(dev, bssidx);
+			cfg->ap_info->security_mode = false;
+		}
+
+		if (ies->wps_ie) {
+			cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		}
+	}
+
+	return 0;
+
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static s32 wl_cfg80211_bcn_set_params(
+	struct cfg80211_ap_settings *info,
+	struct net_device *dev,
+	u32 dev_role, s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	s32 err = BCME_OK;
+
+	WL_DBG(("interval (%d) \ndtim_period (%d) \n",
+		info->beacon_interval, info->dtim_period));
+
+	if (info->beacon_interval) {
+		if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+			&info->beacon_interval, sizeof(s32), true)) < 0) {
+			WL_ERR(("Beacon Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if (info->dtim_period) {
+		if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+			&info->dtim_period, sizeof(s32), true)) < 0) {
+			WL_ERR(("DTIM Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if ((info->ssid) && (info->ssid_len > 0) &&
+		(info->ssid_len <= 32)) {
+		WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
+		if (dev_role == NL80211_IFTYPE_AP) {
+			/* Store the hostapd SSID */
+			memset(cfg->hostapd_ssid.SSID, 0x00, 32);
+			memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
+			cfg->hostapd_ssid.SSID_len = info->ssid_len;
+		} else {
+				/* P2P GO */
+			memset(cfg->p2p->ssid.SSID, 0x00, 32);
+			memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
+			cfg->p2p->ssid.SSID_len = info->ssid_len;
+		}
+	}
+
+	if (info->hidden_ssid) {
+		if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
+			WL_ERR(("failed to set hidden : %d\n", err));
+		WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
+	}
+
+	return err;
+}
+#endif 
+
+static s32
+wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies)
+{
+	s32 err = BCME_OK;
+
+	memset(ies, 0, sizeof(struct parsed_ies));
+
+	/* find the WPSIE */
+	if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) {
+		WL_DBG(("WPSIE in beacon \n"));
+		ies->wps_ie_len = ies->wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
+	} else {
+		WL_ERR(("No WPSIE in beacon \n"));
+	}
+
+	/* find the RSN_IE */
+	if ((ies->wpa2_ie = bcm_parse_tlvs(ptr, len,
+		DOT11_MNG_RSN_ID)) != NULL) {
+		WL_DBG((" WPA2 IE found\n"));
+		ies->wpa2_ie_len = ies->wpa2_ie->len;
+	}
+
+	/* find the WPA_IE */
+	if ((ies->wpa_ie = wl_cfgp2p_find_wpaie(ptr, len)) != NULL) {
+		WL_DBG((" WPA found\n"));
+		ies->wpa_ie_len = ies->wpa_ie->length;
+	}
+
+	return err;
+
+}
+
+static s32
+wl_cfg80211_bcn_bringup_ap(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	u32 dev_role, s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wl_join_params join_params;
+	bool is_bssup = false;
+	s32 infra = 1;
+	s32 join_params_size = 0;
+	s32 ap = 1;
+	s32 err = BCME_OK;
+
+	WL_DBG(("Enter dev_role: %d\n", dev_role));
+
+	/* Common code for SoftAP and P2P GO */
+	wldev_iovar_setint(dev, "mpc", 0);
+
+	if (dev_role == NL80211_IFTYPE_P2P_GO) {
+		is_bssup = wl_cfgp2p_bss_isup(dev, bssidx);
+		if (!is_bssup && (ies->wpa2_ie != NULL)) {
+
+			err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+			if (err < 0) {
+				WL_ERR(("SET INFRA error %d\n", err));
+				goto exit;
+			}
+
+			err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &cfg->p2p->ssid,
+				sizeof(cfg->p2p->ssid), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+				bssidx, &cfg->ioctl_buf_sync);
+			if (err < 0) {
+				WL_ERR(("GO SSID setting error %d\n", err));
+				goto exit;
+			}
+
+			/* Do abort scan before creating GO */
+			wl_cfg80211_scan_abort(cfg);
+
+			if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) {
+				WL_ERR(("GO Bring up error %d\n", err));
+				goto exit;
+			}
+		} else
+			WL_DBG(("Bss is already up\n"));
+	} else if ((dev_role == NL80211_IFTYPE_AP) &&
+		(wl_get_drv_status(cfg, AP_CREATING, dev))) {
+		/* Device role SoftAP */
+		err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("WLC_DOWN error %d\n", err));
+			goto exit;
+		}
+		err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET INFRA error %d\n", err));
+			goto exit;
+		}
+		if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+			WL_ERR(("setting AP mode failed %d \n", err));
+			goto exit;
+		}
+
+		err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_UP error (%d)\n", err));
+			goto exit;
+		}
+
+		memset(&join_params, 0, sizeof(join_params));
+		/* join parameters starts with ssid */
+		join_params_size = sizeof(join_params.ssid);
+		memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID,
+			cfg->hostapd_ssid.SSID_len);
+		join_params.ssid.SSID_len = htod32(cfg->hostapd_ssid.SSID_len);
+
+		/* create softap */
+		if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
+			join_params_size, true)) == 0) {
+			WL_DBG(("SoftAP set SSID (%s) success\n", join_params.ssid.SSID));
+			wl_clr_drv_status(cfg, AP_CREATING, dev);
+			wl_set_drv_status(cfg, AP_CREATED, dev);
+		}
+	}
+
+
+exit:
+	return err;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+s32
+wl_cfg80211_parse_ap_ies(
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info,
+	struct parsed_ies *ies)
+{
+	struct parsed_ies prb_ies;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	u8 *vndr = NULL;
+	u32 vndr_ie_len = 0;
+	s32 err = BCME_OK;
+
+	/* Parse Beacon IEs */
+	if (wl_cfg80211_parse_ies((u8 *)info->tail,
+		info->tail_len, ies) < 0) {
+		WL_ERR(("Beacon get IEs failed \n"));
+		err = -EINVAL;
+		goto fail;
+	}
+
+	vndr = (u8 *)info->proberesp_ies;
+	vndr_ie_len = info->proberesp_ies_len;
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		/* SoftAP mode */
+		struct ieee80211_mgmt *mgmt;
+		mgmt = (struct ieee80211_mgmt *)info->probe_resp;
+		if (mgmt != NULL) {
+			vndr = (u8 *)&mgmt->u.probe_resp.variable;
+			vndr_ie_len = info->probe_resp_len -
+				offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+		}
+	}
+
+	/* Parse Probe Response IEs */
+	if (wl_cfg80211_parse_ies(vndr, vndr_ie_len, &prb_ies) < 0) {
+		WL_ERR(("PROBE RESP get IEs failed \n"));
+		err = -EINVAL;
+	}
+
+fail:
+
+	return err;
+}
+
+s32
+wl_cfg80211_set_ies(
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info,
+	s32 bssidx)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	u8 *vndr = NULL;
+	u32 vndr_ie_len = 0;
+	s32 err = BCME_OK;
+
+	/* Set Beacon IEs to FW */
+	if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
+		info->tail_len)) < 0) {
+		WL_ERR(("Set Beacon IE Failed \n"));
+	} else {
+		WL_DBG(("Applied Vndr IEs for Beacon \n"));
+	}
+
+	vndr = (u8 *)info->proberesp_ies;
+	vndr_ie_len = info->proberesp_ies_len;
+
+	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+		/* SoftAP mode */
+		struct ieee80211_mgmt *mgmt;
+		mgmt = (struct ieee80211_mgmt *)info->probe_resp;
+		if (mgmt != NULL) {
+			vndr = (u8 *)&mgmt->u.probe_resp.variable;
+			vndr_ie_len = info->probe_resp_len -
+				offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+		}
+	}
+
+	/* Set Probe Response IEs to FW */
+	if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) {
+		WL_ERR(("Set Probe Resp IE Failed \n"));
+	} else {
+		WL_DBG(("Applied Vndr IEs for Probe Resp \n"));
+	}
+
+	return err;
+}
+#endif 
+
+static s32 wl_cfg80211_hostapd_sec(
+	struct net_device *dev,
+	struct parsed_ies *ies,
+	s32 bssidx)
+{
+	bool update_bss = 0;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+
+	if (ies->wps_ie) {
+		if (cfg->ap_info->wps_ie &&
+			memcmp(cfg->ap_info->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
+			WL_DBG((" WPS IE is changed\n"));
+			kfree(cfg->ap_info->wps_ie);
+			cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		} else if (cfg->ap_info->wps_ie == NULL) {
+			WL_DBG((" WPS IE is added\n"));
+			cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+		}
+
+		if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) {
+			if (!cfg->ap_info->security_mode) {
+				/* change from open mode to security mode */
+				update_bss = true;
+				if (ies->wpa_ie != NULL) {
+					cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+					ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				} else {
+					cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				}
+			} else if (cfg->ap_info->wpa_ie) {
+				/* change from WPA2 mode to WPA mode */
+				if (ies->wpa_ie != NULL) {
+					update_bss = true;
+					kfree(cfg->ap_info->rsn_ie);
+					cfg->ap_info->rsn_ie = NULL;
+					cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+					ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+				} else if (memcmp(cfg->ap_info->rsn_ie,
+					ies->wpa2_ie, ies->wpa2_ie->len
+					+ WPA_RSN_IE_TAG_FIXED_LEN)) {
+					update_bss = true;
+					kfree(cfg->ap_info->rsn_ie);
+					cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+					ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+					GFP_KERNEL);
+					cfg->ap_info->wpa_ie = NULL;
+				}
+			}
+			if (update_bss) {
+				cfg->ap_info->security_mode = true;
+				wl_cfgp2p_bss(cfg, dev, bssidx, 0);
+				if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx)  < 0 ||
+					wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) {
+					return BCME_ERROR;
+				}
+				wl_cfgp2p_bss(cfg, dev, bssidx, 1);
+			}
+		}
+	} else {
+		WL_ERR(("No WPSIE in beacon \n"));
+	}
+	return 0;
+}
+
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32
+wl_cfg80211_del_station(
+	struct wiphy *wiphy,
+	struct net_device *ndev,
+	u8* mac_addr)
+{
+	struct net_device *dev;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	scb_val_t scb_val;
+	s8 eabuf[ETHER_ADDR_STR_LEN];
+	int err;
+	char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
+		sizeof(struct ether_addr) + sizeof(uint)] = {0};
+	struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+	int num_associated = 0;
+
+	WL_DBG(("Entry\n"));
+	if (mac_addr == NULL) {
+		WL_DBG(("mac_addr is NULL ignore it\n"));
+		return 0;
+	}
+
+	dev = ndev_to_wlc_ndev(ndev, cfg);
+
+	if (p2p_is_on(cfg)) {
+		/* Suspend P2P discovery search-listen to prevent it from changing the
+		 * channel.
+		 */
+		if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+			WL_ERR(("Can not disable discovery mode\n"));
+			return -EFAULT;
+		}
+	}
+
+	assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
+	err = wldev_ioctl(ndev, WLC_GET_ASSOCLIST,
+		assoc_maclist, sizeof(mac_buf), false);
+	if (err < 0)
+		WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
+	else
+		num_associated = assoc_maclist->count;
+
+	memcpy(scb_val.ea.octet, mac_addr, ETHER_ADDR_LEN);
+	scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+	err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+		sizeof(scb_val_t), true);
+	if (err < 0)
+		WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
+	WL_ERR(("Disconnect STA : %s scb_val.val %d\n",
+		bcm_ether_ntoa((const struct ether_addr *)mac_addr, eabuf),
+		scb_val.val));
+
+	if (num_associated > 0 && ETHER_ISBCAST(mac_addr))
+		wl_delay(400);
+
+	return 0;
+}
+
+static s32
+wl_cfg80211_change_station(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	u8 *mac,
+	struct station_parameters *params)
+{
+	int err;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/* Processing only authorize/de-authorize flag for now */
+	if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+		return -ENOTSUPP;
+
+	if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+		err = wldev_ioctl(primary_ndev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN, true);
+		if (err)
+			WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err));
+		return err;
+	}
+
+	err = wldev_ioctl(primary_ndev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN, true);
+	if (err)
+		WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err));
+	return err;
+}
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static s32
+wl_cfg80211_start_ap(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	struct cfg80211_ap_settings *info)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 err = BCME_OK;
+	struct parsed_ies ies;
+	s32 bssidx = 0;
+	u32 dev_role = 0;
+
+	WL_DBG(("Enter \n"));
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		WL_DBG(("Start AP req on primary iface: Softap\n"));
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		WL_DBG(("Start AP req on P2P iface: GO\n"));
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+		WL_DBG(("Start AP req on P2P connection iface\n"));
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto fail;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+	if ((err = wl_cfg80211_set_channel(wiphy, dev,
+		info->chandef.chan,
+		info->chandef.width) < 0)) {
+		WL_ERR(("Set channel failed \n"));
+		goto fail;
+	}
+#endif 
+
+	if ((err = wl_cfg80211_bcn_set_params(info, dev,
+		dev_role, bssidx)) < 0) {
+		WL_ERR(("Beacon params set failed \n"));
+		goto fail;
+	}
+
+	/* Parse IEs */
+	if ((err = wl_cfg80211_parse_ap_ies(dev, &info->beacon, &ies)) < 0) {
+		WL_ERR(("Set IEs failed \n"));
+		goto fail;
+	}
+
+	if ((wl_cfg80211_bcn_validate_sec(dev, &ies,
+		dev_role, bssidx)) < 0)
+	{
+		WL_ERR(("Beacon set security failed \n"));
+		goto fail;
+	}
+
+	if ((err = wl_cfg80211_bcn_bringup_ap(dev, &ies,
+		dev_role, bssidx)) < 0) {
+		WL_ERR(("Beacon bring up AP/GO failed \n"));
+		goto fail;
+	}
+
+//	kct_log(CT_EV_INFO, "cws.wifi", "hotspot_on", EV_FLAGS_PRIORITY_LOW,
+//		cfg->hostapd_ssid.SSID);
+
+	WL_DBG(("** AP/GO Created **\n"));
+
+#ifdef WL_CFG80211_ACL
+	/* Enfoce Admission Control. */
+	if ((err = wl_cfg80211_set_mac_acl(wiphy, dev, info->acl)) < 0) {
+		WL_ERR(("Set ACL failed\n"));
+	}
+#endif /* WL_CFG80211_ACL */
+
+	/* Set IEs to FW */
+	if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0)
+		WL_ERR(("Set IEs failed \n"));
+
+	/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+	if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+		bool pbc = 0;
+		wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+		if (pbc) {
+			WL_DBG(("set WLC_E_PROBREQ_MSG\n"));
+			wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+		}
+	}
+
+fail:
+	if (err) {
+		WL_ERR(("ADD/SET beacon failed\n"));
+		wldev_iovar_setint(dev, "mpc", 1);
+	}
+
+	return err;
+}
+
+static s32
+wl_cfg80211_stop_ap(
+	struct wiphy *wiphy,
+	struct net_device *dev)
+{
+	int err = 0;
+	u32 dev_role = 0;
+	int infra = 0;
+	int ap = 0;
+	s32 bssidx = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	WL_DBG(("Enter \n"));
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto exit;
+
+	if (dev_role == NL80211_IFTYPE_AP) {
+		/* SoftAp on primary Interface.
+		 * Shut down AP and turn on MPC
+		 */
+		if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+			WL_ERR(("setting AP mode failed %d \n", err));
+			err = -ENOTSUPP;
+			goto exit;
+		}
+		err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+		if (err < 0) {
+			WL_ERR(("SET INFRA error %d\n", err));
+			err = -ENOTSUPP;
+			goto exit;
+		}
+
+		err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_UP error (%d)\n", err));
+			err = -EINVAL;
+			goto exit;
+		}
+
+		wl_clr_drv_status(cfg, AP_CREATED, dev);
+		/* Turn on the MPC */
+		wldev_iovar_setint(dev, "mpc", 1);
+		if (cfg->ap_info) {
+			kfree(cfg->ap_info->wpa_ie);
+			kfree(cfg->ap_info->rsn_ie);
+			kfree(cfg->ap_info->wps_ie);
+			kfree(cfg->ap_info);
+			cfg->ap_info = NULL;
+		}
+	} else {
+		WL_DBG(("Stopping P2P GO \n"));
+		DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub),
+			DHD_EVENT_TIMEOUT_MS*3);
+		DHD_OS_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub));
+	}
+
+exit:
+//	kct_log(CT_EV_INFO, "cws.wifi", "hotspot_off", EV_FLAGS_PRIORITY_LOW, cfg->hostapd_ssid.SSID);
+	return err;
+}
+
+static s32
+wl_cfg80211_change_beacon(
+	struct wiphy *wiphy,
+	struct net_device *dev,
+	struct cfg80211_beacon_data *info)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct parsed_ies ies;
+	u32 dev_role = 0;
+	s32 bssidx = 0;
+	bool pbc = 0;
+
+	WL_DBG(("Enter \n"));
+
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto fail;
+
+	/* Parse IEs */
+	if ((err = wl_cfg80211_parse_ap_ies(dev, info, &ies)) < 0) {
+		WL_ERR(("Parse IEs failed \n"));
+		goto fail;
+	}
+
+	/* Set IEs to FW */
+	if ((err = wl_cfg80211_set_ies(dev, info, bssidx)) < 0) {
+		WL_ERR(("Set IEs failed \n"));
+		goto fail;
+	}
+
+	if (dev_role == NL80211_IFTYPE_AP) {
+		if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+			WL_ERR(("Hostapd update sec failed \n"));
+			err = -EINVAL;
+			goto fail;
+		}
+		/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+		if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+			wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+			WL_DBG((" WPS AP, wps_ie is exists pbc=%d\n", pbc));
+			if (pbc)
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+			else
+				wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+		}
+	}
+
+fail:
+	return err;
+}
+#else
+static s32
+wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+	struct beacon_parameters *info)
+{
+	s32 err = BCME_OK;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	s32 ie_offset = 0;
+	s32 bssidx = 0;
+	u32 dev_role = NL80211_IFTYPE_AP;
+	struct parsed_ies ies;
+	bcm_tlv_t *ssid_ie;
+	bool pbc = 0;
+	WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
+		info->interval, info->dtim_period, info->head_len, info->tail_len));
+
+	if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+		dev_role = NL80211_IFTYPE_AP;
+	}
+#if defined(WL_ENABLE_P2P_IF)
+	else if (dev == cfg->p2p_net) {
+		/* Group Add request on p2p0 */
+		dev = bcmcfg_to_prmry_ndev(cfg);
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+#endif /* WL_ENABLE_P2P_IF */
+	if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+		WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+		return BCME_ERROR;
+	}
+	if (p2p_is_on(cfg) &&
+		(bssidx == wl_to_p2p_bss_bssidx(cfg,
+		P2PAPI_BSSCFG_CONNECTION))) {
+		dev_role = NL80211_IFTYPE_P2P_GO;
+	}
+
+	if (!check_dev_role_integrity(cfg, dev_role))
+		goto fail;
+
+	ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+	/* find the SSID */
+	if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset],
+		info->head_len - ie_offset,
+		DOT11_MNG_SSID_ID)) != NULL) {
+		if (dev_role == NL80211_IFTYPE_AP) {
+			/* Store the hostapd SSID */
+			memset(&cfg->hostapd_ssid.SSID[0], 0x00, 32);
+			memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data, ssid_ie->len);
+			cfg->hostapd_ssid.SSID_len = ssid_ie->len;
+		} else {
+				/* P2P GO */
+			memset(&cfg->p2p->ssid.SSID[0], 0x00, 32);
+			memcpy(cfg->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len);
+			cfg->p2p->ssid.SSID_len = ssid_ie->len;
+		}
+	}
+
+	if (wl_cfg80211_parse_ies((u8 *)info->tail,
+		info->tail_len, &ies) < 0) {
+		WL_ERR(("Beacon get IEs failed \n"));
+		err = -EINVAL;
+		goto fail;
+	}
+
+	if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+		VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
+		info->tail_len) < 0) {
+		WL_ERR(("Beacon set IEs failed \n"));
+		goto fail;
+	} else {
+		WL_DBG(("Applied Vndr IEs for Beacon \n"));
+	}
+	if (!wl_cfgp2p_bss_isup(dev, bssidx) &&
+		(wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx) < 0))
+	{
+		WL_ERR(("Beacon set security failed \n"));
+		goto fail;
+	}
+
+	/* Set BI and DTIM period */
+	if (info->interval) {
+		if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+			&info->interval, sizeof(s32), true)) < 0) {
+			WL_ERR(("Beacon Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+	if (info->dtim_period) {
+		if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+			&info->dtim_period, sizeof(s32), true)) < 0) {
+			WL_ERR(("DTIM Interval Set Error, %d\n", err));
+			return err;
+		}
+	}
+
+	if (wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx) < 0) {
+		WL_ERR(("Beacon bring up AP/GO failed \n"));
+		goto fail;
+	}
+
+	if (wl_get_drv_status(cfg, AP_CREATED, dev)) {
+		/* Soft AP already running. Update changed params */
+		if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
+			WL_ERR(("Hostapd update sec failed \n"));
+			err = -EINVAL;
+			goto fail;
+		}
+	}
+
+	/* Enable Probe Req filter */
+	if (((dev_role == NL80211_IFTYPE_P2P_GO) ||
+		(dev_role == NL80211_IFTYPE_AP)) && (ies.wps_ie != NULL)) {
+		wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+		if (pbc)
+			wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+	}
+
+	WL_DBG(("** ADD/SET beacon done **\n"));
+
+fail:
+	if (err) {
+		WL_ERR(("ADD/SET beacon failed\n"));
+		wldev_iovar_setint(dev, "mpc", 1);
+	}
+	return err;
+
+}
+#endif 
+
+#ifdef WL_SCHED_SCAN
+#define PNO_TIME		19
+#define PNO_REPEAT		1
+#define PNO_FREQ_EXPO_MAX	4
+static int
+wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
+                             struct net_device *dev,
+                             struct cfg80211_sched_scan_request *request)
+{
+	ushort pno_time = PNO_TIME;
+	int pno_repeat = PNO_REPEAT;
+	int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
+	wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+	struct cfg80211_ssid *ssid = NULL;
+	int ssid_count = 0;
+	int i;
+	int ret = 0;
+
+	WL_DBG(("Enter \n"));
+	WL_PNO((">>> SCHED SCAN START\n"));
+	WL_PNO(("Enter n_match_sets:%d   n_ssids:%d \n",
+		request->n_match_sets, request->n_ssids));
+	WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
+		request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
+
+
+	if (!request || !request->n_ssids || !request->n_match_sets) {
+		if (request)
+			WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
+		else
+			WL_ERR(("Invalid sched scan req!! request is NULL \n"));
+		return -EINVAL;
+	}
+
+	memset(&ssids_local, 0, sizeof(ssids_local));
+
+	if (request->n_match_sets > 0) {
+		for (i = 0; i < request->n_match_sets; i++) {
+			ssid = &request->match_sets[i].ssid;
+			memcpy(ssids_local[i].SSID, ssid->ssid, ssid->ssid_len);
+			ssids_local[i].SSID_len = ssid->ssid_len;
+			WL_PNO((">>> PNO filter set for ssid (%s) \n", ssid->ssid));
+			ssid_count++;
+		}
+	}
+
+	if (request->n_ssids > 0) {
+		for (i = 0; i < request->n_ssids; i++) {
+			/* Active scan req for ssids */
+			WL_PNO((">>> Active scan req for ssid (%s) \n", request->ssids[i].ssid));
+
+			/* match_set ssids is a supert set of n_ssid list, so we need
+			 * not add these set seperately
+			 */
+		}
+	}
+
+	if (ssid_count) {
+		if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, request->n_match_sets,
+			pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
+			WL_ERR(("PNO setup failed!! ret=%d \n", ret));
+			return -EINVAL;
+		}
+		cfg->sched_scan_req = request;
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+{
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	WL_DBG(("Enter \n"));
+	WL_PNO((">>> SCHED SCAN STOP\n"));
+
+	if (dhd_dev_pno_stop_for_ssid(dev) < 0)
+		WL_ERR(("PNO Stop for SSID failed"));
+
+	if (cfg->scan_request && cfg->sched_scan_running) {
+		WL_PNO((">>> Sched scan running. Aborting it..\n"));
+		wl_notify_escan_complete(cfg, dev, true, true);
+	}
+
+	 cfg->sched_scan_req = NULL;
+	 cfg->sched_scan_running = FALSE;
+
+	return 0;
+}
+#endif /* WL_SCHED_SCAN */
+
+static struct cfg80211_ops wl_cfg80211_ops = {
+	.add_virtual_intf = wl_cfg80211_add_virtual_iface,
+	.del_virtual_intf = wl_cfg80211_del_virtual_iface,
+	.change_virtual_intf = wl_cfg80211_change_virtual_iface,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	.start_p2p_device = wl_cfgp2p_start_p2p_device,
+	.stop_p2p_device = wl_cfgp2p_stop_p2p_device,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	.scan = wl_cfg80211_scan,
+	.set_wiphy_params = wl_cfg80211_set_wiphy_params,
+	.join_ibss = wl_cfg80211_join_ibss,
+	.leave_ibss = wl_cfg80211_leave_ibss,
+	.get_station = wl_cfg80211_get_station,
+	.set_tx_power = wl_cfg80211_set_tx_power,
+	.get_tx_power = wl_cfg80211_get_tx_power,
+	.add_key = wl_cfg80211_add_key,
+	.del_key = wl_cfg80211_del_key,
+	.get_key = wl_cfg80211_get_key,
+	.set_default_key = wl_cfg80211_config_default_key,
+	.set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
+	.set_power_mgmt = wl_cfg80211_set_power_mgmt,
+	.connect = wl_cfg80211_connect,
+	.disconnect = wl_cfg80211_disconnect,
+	.suspend = wl_cfg80211_suspend,
+	.resume = wl_cfg80211_resume,
+	.set_pmksa = wl_cfg80211_set_pmksa,
+	.del_pmksa = wl_cfg80211_del_pmksa,
+	.flush_pmksa = wl_cfg80211_flush_pmksa,
+	.remain_on_channel = wl_cfg80211_remain_on_channel,
+	.cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel,
+	.mgmt_tx = wl_cfg80211_mgmt_tx,
+	.mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
+	.change_bss = wl_cfg80211_change_bss,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+	.set_channel = wl_cfg80211_set_channel,
+#endif 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+	.set_beacon = wl_cfg80211_add_set_beacon,
+	.add_beacon = wl_cfg80211_add_set_beacon,
+#else
+	.change_beacon = wl_cfg80211_change_beacon,
+	.start_ap = wl_cfg80211_start_ap,
+	.stop_ap = wl_cfg80211_stop_ap,
+#endif 
+#ifdef WL_SCHED_SCAN
+	.sched_scan_start = wl_cfg80211_sched_scan_start,
+	.sched_scan_stop = wl_cfg80211_sched_scan_stop,
+#endif /* WL_SCHED_SCAN */
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+	.del_station = wl_cfg80211_del_station,
+	.change_station = wl_cfg80211_change_station,
+	.mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait,
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+	.tdls_oper = wl_cfg80211_tdls_oper,
+#endif 
+#ifdef WL_CFG80211_ACL
+	.set_mac_acl = wl_cfg80211_set_mac_acl,
+#endif /* WL_CFG80211_ACL */
+	CFG80211_TESTMODE_CMD(dhd_cfg80211_testmode_cmd)
+};
+
+s32 wl_mode_to_nl80211_iftype(s32 mode)
+{
+	s32 err = 0;
+
+	switch (mode) {
+	case WL_MODE_BSS:
+		return NL80211_IFTYPE_STATION;
+	case WL_MODE_IBSS:
+		return NL80211_IFTYPE_ADHOC;
+	case WL_MODE_AP:
+		return NL80211_IFTYPE_AP;
+	default:
+		return NL80211_IFTYPE_UNSPECIFIED;
+	}
+
+	return err;
+}
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+static int
+wl_cfg80211_reg_notifier(
+	struct wiphy *wiphy,
+	struct regulatory_request *request)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+	int ret = 0;
+
+	if (!request || !cfg) {
+		WL_ERR(("Invalid arg\n"));
+		return -EINVAL;
+	}
+
+	WL_DBG(("ccode: %c%c Initiator: %d\n",
+		request->alpha2[0], request->alpha2[1], request->initiator));
+
+	/* We support only REGDOM_SET_BY_USER as of now */
+	if ((request->initiator != NL80211_REGDOM_SET_BY_USER) &&
+		(request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+		WL_ERR(("reg_notifier for intiator:%d not supported : set default\n",
+			request->initiator));
+		/* in case of no supported country by regdb
+		     lets driver setup platform default Locale
+		*/
+	}
+
+	WL_ERR(("Set country code %c%c from %s\n",
+		request->alpha2[0], request->alpha2[1],
+		((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User")));
+
+	if ((ret = wldev_set_country(bcmcfg_to_prmry_ndev(cfg), request->alpha2,
+		false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false))) < 0) {
+		WL_ERR(("set country Failed :%d\n", ret));
+	}
+
+	return ret;
+}
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static const struct wiphy_wowlan_support brcm_wowlan_support = {
+	.flags = WIPHY_WOWLAN_ANY,
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+static struct cfg80211_wowlan brcm_wowlan_config = {
+	.disconnect = true,
+	.gtk_rekey_failure = true,
+	.eap_identity_req = true,
+	.four_way_handshake = true,
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM */
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, void *context)
+{
+	s32 err = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+	dhd_pub_t *dhd = (dhd_pub_t *)context;
+	BCM_REFERENCE(dhd);
+
+	if (!dhd) {
+		WL_ERR(("DHD is NULL!!"));
+		err = -ENODEV;
+		return err;
+	}
+#endif 
+
+	wdev->wiphy =
+	    wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211));
+	if (unlikely(!wdev->wiphy)) {
+		WL_ERR(("Couldn not allocate wiphy device\n"));
+		err = -ENOMEM;
+		return err;
+	}
+	set_wiphy_dev(wdev->wiphy, sdiofunc_dev);
+	wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+	/* Report  how many SSIDs Driver can support per Scan request */
+	wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX;
+	wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+#ifdef WL_SCHED_SCAN
+	wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT;
+	wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT;
+	wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX;
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+#endif /* WL_SCHED_SCAN */
+	wdev->wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION)
+		| BIT(NL80211_IFTYPE_ADHOC)
+#if !defined(WL_ENABLE_P2P_IF)
+		| BIT(NL80211_IFTYPE_MONITOR)
+#endif /* !WL_ENABLE_P2P_IF */
+#if defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_P2P_CLIENT)
+		| BIT(NL80211_IFTYPE_P2P_GO)
+#endif /* WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		| BIT(NL80211_IFTYPE_P2P_DEVICE)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		| BIT(NL80211_IFTYPE_AP);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
+	(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
+	WL_DBG(("Setting interface combinations for common mode\n"));
+	wdev->wiphy->iface_combinations = common_iface_combinations;
+	wdev->wiphy->n_iface_combinations =
+		ARRAY_SIZE(common_iface_combinations);
+#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+
+	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+
+	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wdev->wiphy->cipher_suites = __wl_cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+	wdev->wiphy->max_remain_on_channel_duration = 5000;
+	wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes;
+#ifndef WL_POWERSAVE_DISABLED
+	wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#else
+	wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif				/* !WL_POWERSAVE_DISABLED */
+	wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+		WIPHY_FLAG_4ADDR_AP |
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39))
+		WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
+#endif
+		WIPHY_FLAG_4ADDR_STATION;
+#if (defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && (LINUX_VERSION_CODE >= \
+	KERNEL_VERSION(3, 2, 0))
+	/* Please use supplicant ver >= 76 if FW_ROAM is enabled
+	 * If driver advertises FW_ROAM, older supplicant wouldn't
+	 * send the BSSID & Freq in the connect req command. This
+	 * will delay the ASSOC as the FW need to do a full scan
+	 * before attempting to connect. Supplicant >=76 has patch
+	 * to allow bssid & freq to be sent down to driver even if
+	 * FW ROAM is advertised.
+	 */
+	/* wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; */
+#endif 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
+	wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+		WIPHY_FLAG_OFFCHAN_TX;
+#endif
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	4, 0))
+	/* From 3.4 kernel ownards AP_SME flag can be advertised
+	 * to remove the patch from supplicant
+	 */
+	wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
+
+#ifdef WL_CFG80211_ACL
+	/* Configure ACL capabilities. */
+	wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+	/* Supplicant distinguish between the SoftAP mode and other
+	 * modes (e.g. P2P, WPS, HS2.0) when it builds the probe
+	 * response frame from Supplicant MR1 and Kernel 3.4.0 or
+	 * later version. To add Vendor specific IE into the
+	 * probe response frame in case of SoftAP mode,
+	 * AP_PROBE_RESP_OFFLOAD flag is set to wiphy->flags variable.
+	 */
+	if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) {
+		wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+		wdev->wiphy->probe_resp_offload = 0;
+	}
+#endif 
+#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+	wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier;
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+	wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+#endif
+
+#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
+	/*
+	 * From linux-3.10 kernel, wowlan packet filter is mandated to avoid the
+	 * disconnection of connected network before suspend. So a dummy wowlan
+	 * filter is configured for kernels linux-3.8 and above.
+	 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+	wdev->wiphy->wowlan = &brcm_wowlan_support;
+
+	/* If this is not provided cfg stack will get disconnect
+	* during suspend.
+	*/
+	wdev->wiphy->wowlan_config = &brcm_wowlan_config;
+#else
+	wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
+
+	WL_DBG(("Registering custom regulatory)\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	wdev->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+#else
+	wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+#endif
+	wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) && defined(WL_VENDOR_EXT_SUPPORT)
+	WL_ERR(("Registering Vendor80211)\n"));
+	err = wl_cfgvendor_attach(wdev->wiphy);
+	if (unlikely(err < 0)) {
+		WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
+	}
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) && defined(WL_VENDOR_EXT_SUPPORT) */
+
+
+	/* Now we can register wiphy with cfg80211 module */
+	err = wiphy_register(wdev->wiphy);
+	if (unlikely(err < 0)) {
+		WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+		wiphy_free(wdev->wiphy);
+	}
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && (LINUX_VERSION_CODE <= \
+	KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS)
+	wdev->wiphy->flags &= ~WIPHY_FLAG_ENFORCE_COMBINATIONS;
+#endif
+
+	return err;
+}
+
+static void wl_free_wdev(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev = cfg->wdev;
+	struct wiphy *wiphy;
+	if (!wdev) {
+		WL_ERR(("wdev is invalid\n"));
+		return;
+	}
+	wiphy = wdev->wiphy;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) && defined(WL_VENDOR_EXT_SUPPORT)
+	wl_cfgvendor_detach(wdev->wiphy);
+#endif /* if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) && defined(WL_VENDOR_EXT_SUPPORT) */
+
+	wiphy_unregister(wdev->wiphy);
+	wdev->wiphy->dev.parent = NULL;
+
+	wl_delete_all_netinfo(cfg);
+	wiphy_free(wiphy);
+	/* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg",
+	 * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
+	 */
+}
+
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
+{
+	struct wl_scan_results *bss_list;
+	struct wl_bss_info *bi = NULL;	/* must be initialized */
+	s32 err = 0;
+	s32 i;
+
+	bss_list = cfg->bss_list;
+	WL_DBG(("scanned AP count (%d)\n", bss_list->count));
+	bi = next_bss(bss_list, bi);
+	for_each_bss(bss_list, bi, i) {
+		err = wl_inform_single_bss(cfg, bi, false);
+		if (unlikely(err))
+			break;
+	}
+	return err;
+}
+
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam)
+{
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_channel *channel;
+	struct ieee80211_supported_band *band;
+	struct wl_cfg80211_bss_info *notif_bss_info;
+	struct wl_scan_req *sr = wl_to_sr(cfg);
+	struct beacon_proberesp *beacon_proberesp;
+	struct cfg80211_bss *cbss = NULL;
+	s32 mgmt_type;
+	s32 signal;
+	u32 freq;
+	s32 err = 0;
+	gfp_t aflags;
+
+	if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
+		WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+		return err;
+	}
+	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt)
+		- sizeof(u8) + WL_BSS_INFO_MAX, aflags);
+	if (unlikely(!notif_bss_info)) {
+		WL_ERR(("notif_bss_info alloc failed\n"));
+		return -ENOMEM;
+	}
+	mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
+	notif_bss_info->channel =
+		bi->ctl_ch ? bi->ctl_ch : wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+
+	if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	notif_bss_info->rssi = wl_rssi_offset(dtoh16(bi->RSSI));
+	memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+	mgmt_type = cfg->active_scan ?
+		IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
+	if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
+	    mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
+	}
+	beacon_proberesp = cfg->active_scan ?
+		(struct beacon_proberesp *)&mgmt->u.probe_resp :
+		(struct beacon_proberesp *)&mgmt->u.beacon;
+	beacon_proberesp->timestamp = 0;
+	beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
+	beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
+	wl_rst_ie(cfg);
+	wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam);
+	wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+	wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+		offsetof(struct wl_cfg80211_bss_info, frame_buf));
+	notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
+		u.beacon.variable) + wl_get_ielen(cfg);
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band);
+#endif
+	if (freq == 0) {
+		WL_ERR(("Invalid channel, fail to chcnage channel to freq\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	channel = ieee80211_get_channel(wiphy, freq);
+	if (unlikely(!channel)) {
+		WL_ERR(("ieee80211_get_channel error\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+	WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM"
+			"mgmt_type %d frame_len %d\n", bi->SSID,
+			notif_bss_info->rssi, notif_bss_info->channel,
+			mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type,
+			notif_bss_info->frame_len));
+
+	signal = notif_bss_info->rssi * 100;
+	if (!mgmt->u.probe_resp.timestamp) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+		struct timespec ts;
+		get_monotonic_boottime(&ts);
+		mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000)
+				+ ts.tv_nsec / 1000;
+#else
+		struct timeval tv;
+		do_gettimeofday(&tv);
+		mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec*1000000)
+				+ tv.tv_usec;
+#endif
+	}
+
+
+	cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+		le16_to_cpu(notif_bss_info->frame_len), signal, aflags);
+	if (unlikely(!cbss)) {
+		WL_ERR(("cfg80211_inform_bss_frame error\n"));
+		kfree(notif_bss_info);
+		return -EINVAL;
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+	cfg80211_put_bss(wiphy, cbss);
+#else
+	cfg80211_put_bss(cbss);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+	kfree(notif_bss_info);
+	return err;
+}
+
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, struct net_device *ndev)
+{
+	u32 event = ntoh32(e->event_type);
+	u32 status =  ntoh32(e->status);
+	u16 flags = ntoh16(e->flags);
+
+	WL_DBG(("event %d, status %d flags %x\n", event, status, flags));
+	if (event == WLC_E_SET_SSID) {
+		if (status == WLC_E_STATUS_SUCCESS) {
+			if (!wl_is_ibssmode(cfg, ndev))
+				return true;
+		}
+	} else if (event == WLC_E_LINK) {
+		if (flags & WLC_EVENT_MSG_LINK)
+			return true;
+	}
+
+	WL_DBG(("wl_is_linkup false\n"));
+	return false;
+}
+
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+
+	if (event == WLC_E_DEAUTH_IND ||
+	event == WLC_E_DISASSOC_IND ||
+	event == WLC_E_DISASSOC ||
+	event == WLC_E_DEAUTH) {
+#if (WL_DBG_LEVEL > 0)
+	WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event]));
+#endif /* (WL_DBG_LEVEL > 0) */
+		return true;
+	} else if (event == WLC_E_LINK) {
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+#if (WL_DBG_LEVEL > 0)
+	WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event]));
+#endif /* (WL_DBG_LEVEL > 0) */
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+	u32 event = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+
+	if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS)
+		return true;
+	if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
+		return true;
+
+	return false;
+}
+
+/* The mainline kernel >= 3.2.0 has support for indicating new/del station
+ * to AP/P2P GO via events. If this change is backported to kernel for which
+ * this driver is being built, then define WL_CFG80211_STA_EVENT. You
+ * should use this new/del sta event mechanism for BRCM supplicant >= 22.
+ */
+static s32
+wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+	u32 reason = ntoh32(e->reason);
+	u32 len = ntoh32(e->datalen);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+	bool isfree = false;
+	u8 *mgmt_frame;
+	u8 bsscfgidx = e->bsscfgidx;
+	s32 freq;
+	s32 channel;
+	u8 *body = NULL;
+	u16 fc = 0;
+
+	struct ieee80211_supported_band *band;
+	struct ether_addr da;
+	struct ether_addr bssid;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	channel_info_t ci;
+#else
+	struct station_info sinfo;
+#endif 
+
+	WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason));
+	/* if link down, bsscfg is disabled. */
+	if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
+		wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
+		wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
+		WL_INFO(("AP mode link down !! \n"));
+		complete(&cfg->iface_disable);
+		return 0;
+	}
+
+	if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
+		WL_ERR(("event %s(%d) status %d reason %d\n",
+		bcmevent_names[event].name, event, ntoh32(e->status), reason));
+	}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
+	WL_DBG(("Enter \n"));
+	if (!len && (event == WLC_E_DEAUTH)) {
+		len = 2; /* reason code field */
+		data = &reason;
+	}
+	if (len) {
+		body = kzalloc(len, GFP_KERNEL);
+
+		if (body == NULL) {
+			WL_ERR(("wl_notify_connect_status: Failed to allocate body\n"));
+			return WL_INVALID;
+		}
+	}
+	memset(&bssid, 0, ETHER_ADDR_LEN);
+	WL_DBG(("Enter event %d ndev %p\n", event, ndev));
+	if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
+		kfree(body);
+		return WL_INVALID;
+	}
+	if (len)
+		memcpy(body, data, len);
+
+	wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+		NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+	memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+	err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+	switch (event) {
+		case WLC_E_ASSOC_IND:
+			fc = FC_ASSOC_REQ;
+			break;
+		case WLC_E_REASSOC_IND:
+			fc = FC_REASSOC_REQ;
+			break;
+		case WLC_E_DISASSOC_IND:
+			fc = FC_DISASSOC;
+			break;
+		case WLC_E_DEAUTH_IND:
+			fc = FC_DISASSOC;
+			break;
+		case WLC_E_DEAUTH:
+			fc = FC_DISASSOC;
+			break;
+		default:
+			fc = 0;
+			goto exit;
+	}
+	if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false))) {
+		kfree(body);
+		return err;
+	}
+
+	channel = dtoh32(ci.hw_channel);
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band"));
+		if (body)
+			kfree(body);
+		return -EINVAL;
+	}
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+	freq = ieee80211_channel_to_frequency(channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+
+	err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid,
+		&mgmt_frame, &len, body);
+	if (err < 0)
+		goto exit;
+	isfree = true;
+
+	if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif 
+	} else if (event == WLC_E_DISASSOC_IND) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif 
+	} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+		cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+#else
+		cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
+#endif 
+	}
+
+exit:
+	if (isfree)
+		kfree(mgmt_frame);
+	if (body)
+		kfree(body);
+#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+	sinfo.filled = 0;
+	if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
+		reason == DOT11_SC_SUCCESS) {
+		sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+		if (!data) {
+			WL_ERR(("No IEs present in ASSOC/REASSOC_IND"));
+			return -EINVAL;
+		}
+		sinfo.assoc_req_ies = data;
+		sinfo.assoc_req_ies_len = len;
+		cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC);
+	} else if (event == WLC_E_DISASSOC_IND) {
+		cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+	} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
+		cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+	}
+#endif 
+	return err;
+}
+
+static s32
+wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e)
+{
+	u32 reason = ntoh32(e->reason);
+	u32 event = ntoh32(e->event_type);
+	struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+	WL_DBG(("event type : %d, reason : %d\n", event, reason));
+	if (sec) {
+		switch (event) {
+		case WLC_E_ASSOC:
+		case WLC_E_AUTH:
+				sec->auth_assoc_res_status = reason;
+		default:
+			break;
+		}
+	} else
+		WL_ERR(("sec is NULL\n"));
+	return 0;
+}
+
+static s32
+wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+	u16 flags = ntoh16(e->flags);
+	u32 status =  ntoh32(e->status);
+	bool active;
+
+	if (event == WLC_E_JOIN) {
+		WL_DBG(("joined in IBSS network\n"));
+	}
+	if (event == WLC_E_START) {
+		WL_DBG(("started IBSS network\n"));
+	}
+	if (event == WLC_E_JOIN || event == WLC_E_START ||
+		(event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) {
+		if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+			/* ROAM or Redundant */
+			u8 *cur_bssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+			if (memcmp(cur_bssid, &e->addr, ETHER_ADDR_LEN) == 0) {
+				WL_DBG(("IBSS connected event from same BSSID("
+					MACDBG "), ignore it\n", MAC2STRDBG(cur_bssid)));
+				return err;
+			}
+			WL_INFO(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
+				MAC2STRDBG(cur_bssid), MAC2STRDBG((u8 *)&e->addr)));
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+		}
+		else {
+			/* New connection */
+			WL_INFO(("IBSS connected to " MACDBG "\n", MAC2STRDBG((u8 *)&e->addr)));
+			wl_link_up(cfg);
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+			wl_set_drv_status(cfg, CONNECTED, ndev);
+			active = true;
+			wl_update_prof(cfg, ndev, NULL, (void *)&active, WL_PROF_ACT);
+		}
+	} else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) ||
+		event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) {
+		wl_clr_drv_status(cfg, CONNECTED, ndev);
+		wl_link_down(cfg);
+		wl_init_prof(cfg, ndev);
+	}
+	else if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_NO_NETWORKS) {
+		WL_DBG(("no action - join fail (IBSS mode)\n"));
+	}
+	else {
+		WL_DBG(("no action (IBSS mode)\n"));
+}
+	return err;
+}
+
+static s32
+wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	struct net_device *ndev = NULL;
+	s32 err = 0;
+	u32 event = ntoh32(e->event_type);
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+		err = wl_notify_connect_status_ap(cfg, ndev, e, data);
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) {
+		err = wl_notify_connect_status_ibss(cfg, ndev, e, data);
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
+		WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n",
+			ntoh32(e->event_type), ntoh32(e->status), ndev));
+		if (event == WLC_E_ASSOC || event == WLC_E_AUTH) {
+			wl_get_auth_assoc_status(cfg, ndev, e);
+			return 0;
+		}
+		if (wl_is_linkup(cfg, e, ndev)) {
+			wl_link_up(cfg);
+			act = true;
+			if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+					printk("wl_bss_connect_done succeeded with " MACDBG "\n",
+						MAC2STRDBG((u8*)(&e->addr)));
+					wl_bss_connect_done(cfg, ndev, e, data, true);
+					WL_DBG(("joined in BSS network \"%s\"\n",
+					((struct wlc_ssid *)
+					 wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID));
+				}
+			wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+			wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+
+		} else if (wl_is_linkdown(cfg, e)) {
+			if (cfg->scan_request)
+				wl_notify_escan_complete(cfg, ndev, true, true);
+			if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+				scb_val_t scbval;
+				u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+				s32 reason = 0;
+				if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND)
+					reason = ntoh32(e->reason);
+				/* WLAN_REASON_UNSPECIFIED is used for hang up event in Android */
+				reason = (reason == WLAN_REASON_UNSPECIFIED)? 0 : reason;
+
+				printk("link down if %s may call cfg80211_disconnected. "
+					"event : %d, reason=%d from " MACDBG "\n",
+					ndev->name, event, ntoh32(e->reason),
+					MAC2STRDBG((u8*)(&e->addr)));
+				if (!cfg->roam_offload &&
+					memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
+					WL_ERR(("BSSID of event is not the connected BSSID"
+						"(ignore it) cur: " MACDBG " event: " MACDBG"\n",
+						MAC2STRDBG(curbssid), MAC2STRDBG((u8*)(&e->addr))));
+					return 0;
+				}
+				wl_clr_drv_status(cfg, CONNECTED, ndev);
+				if (! wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+					/* To make sure disconnect, explictly send dissassoc
+					*  for BSSID 00:00:00:00:00:00 issue
+					*/
+					scbval.val = WLAN_REASON_DEAUTH_LEAVING;
+
+					memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+					scbval.val = htod32(scbval.val);
+					err = wldev_ioctl(ndev, WLC_DISASSOC, &scbval,
+						sizeof(scb_val_t), true);
+					if (err < 0) {
+						WL_ERR(("WLC_DISASSOC error %d\n", err));
+						err = 0;
+					}
+					cfg80211_disconnected(ndev, reason, NULL, 0, GFP_KERNEL);
+					wl_link_down(cfg);
+					wl_init_prof(cfg, ndev);
+				}
+			}
+			else if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+				printk("link down, during connecting\n");
+#ifdef ESCAN_RESULT_PATCH
+				if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+					(memcmp(&e->addr, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+					(memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0))
+					/* In case this event comes while associating another AP */
+#endif /* ESCAN_RESULT_PATCH */
+					wl_bss_connect_done(cfg, ndev, e, data, false);
+			}
+			wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+
+			/* if link down, bsscfg is diabled */
+			if (ndev != bcmcfg_to_prmry_ndev(cfg))
+				complete(&cfg->iface_disable);
+
+		} else if (wl_is_nonetwork(cfg, e)) {
+			printk("connect failed event=%d e->status %d e->reason %d \n",
+				event, (int)ntoh32(e->status), (int)ntoh32(e->reason));
+			/* Clean up any pending scan request */
+			if (cfg->scan_request)
+				wl_notify_escan_complete(cfg, ndev, true, true);
+			if (wl_get_drv_status(cfg, CONNECTING, ndev))
+				wl_bss_connect_done(cfg, ndev, e, data, false);
+		} else {
+			WL_DBG(("%s nothing\n", __FUNCTION__));
+		}
+	}
+		else {
+		WL_ERR(("Invalid ndev status %d\n", wl_get_mode_by_netdev(cfg, ndev)));
+	}
+	return err;
+}
+
+
+static s32
+wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	bool act;
+	struct net_device *ndev = NULL;
+	s32 err = 0;
+	u32 event = be32_to_cpu(e->event_type);
+	u32 status = be32_to_cpu(e->status);
+	WL_DBG(("Enter \n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) {
+		wl_add_remove_eventmsg(ndev, WLC_E_ROAM, false);
+		cfg->disable_roam_event = TRUE;
+	}
+
+	if ((cfg->disable_roam_event) && (event == WLC_E_ROAM))
+		return err;
+
+	if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) {
+		if (wl_get_drv_status(cfg, CONNECTED, ndev))
+			wl_bss_roaming_done(cfg, ndev, e, data);
+		else
+			wl_bss_connect_done(cfg, ndev, e, data, true);
+		act = true;
+		wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+		wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+	}
+	return err;
+}
+
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	wl_assoc_info_t assoc_info;
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf,
+		WL_ASSOC_INFO_MAX, NULL);
+	if (unlikely(err)) {
+		WL_ERR(("could not get assoc info (%d)\n", err));
+		return err;
+	}
+	memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t));
+	assoc_info.req_len = htod32(assoc_info.req_len);
+	assoc_info.resp_len = htod32(assoc_info.resp_len);
+	assoc_info.flags = htod32(assoc_info.flags);
+	if (conn_info->req_ie_len) {
+		conn_info->req_ie_len = 0;
+		bzero(conn_info->req_ie, sizeof(conn_info->req_ie));
+	}
+	if (conn_info->resp_ie_len) {
+		conn_info->resp_ie_len = 0;
+		bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
+	}
+	if (assoc_info.req_len) {
+		err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf,
+			WL_ASSOC_INFO_MAX, NULL);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc req (%d)\n", err));
+			return err;
+		}
+		conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
+		if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
+			conn_info->req_ie_len -= ETHER_ADDR_LEN;
+		}
+		if (conn_info->req_ie_len <= MAX_REQ_LINE)
+			memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
+		else {
+			WL_ERR(("IE size %d above max %d size \n",
+				conn_info->req_ie_len, MAX_REQ_LINE));
+			return err;
+		}
+	} else {
+		conn_info->req_ie_len = 0;
+	}
+	if (assoc_info.resp_len) {
+		err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf,
+			WL_ASSOC_INFO_MAX, NULL);
+		if (unlikely(err)) {
+			WL_ERR(("could not get assoc resp (%d)\n", err));
+			return err;
+		}
+		conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
+		if (conn_info->resp_ie_len <= MAX_REQ_LINE)
+			memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
+		else {
+			WL_ERR(("IE size %d above max %d size \n",
+				conn_info->resp_ie_len, MAX_REQ_LINE));
+			return err;
+		}
+	} else {
+		conn_info->resp_ie_len = 0;
+	}
+	WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
+		conn_info->resp_ie_len));
+
+	return err;
+}
+
+static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+        size_t *join_params_size)
+{
+	chanspec_t chanspec = 0;
+	if (ch != 0) {
+		join_params->params.chanspec_num = 1;
+		join_params->params.chanspec_list[0] = ch;
+
+		if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+			chanspec |= WL_CHANSPEC_BAND_2G;
+		else
+			chanspec |= WL_CHANSPEC_BAND_5G;
+
+		chanspec |= WL_CHANSPEC_BW_20;
+		chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+		*join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+			join_params->params.chanspec_num * sizeof(chanspec_t);
+
+		join_params->params.chanspec_list[0]  &= WL_CHANSPEC_CHAN_MASK;
+		join_params->params.chanspec_list[0] |= chanspec;
+		join_params->params.chanspec_list[0] =
+			wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
+
+		join_params->params.chanspec_num =
+			htod32(join_params->params.chanspec_num);
+		WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n",
+			join_params->params.chanspec_list[0],
+			join_params->params.chanspec_num));
+	}
+}
+
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam)
+{
+	struct cfg80211_bss *bss;
+	struct wl_bss_info *bi;
+	struct wlc_ssid *ssid;
+	struct bcm_tlv *tim;
+	s32 beacon_interval;
+	s32 dtim_period;
+	size_t ie_len;
+	u8 *ie;
+	u8 *curbssid;
+	s32 err = 0;
+	struct wiphy *wiphy;
+	u32 channel;
+
+	wiphy = bcmcfg_to_wiphy(cfg);
+
+	ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	bss = cfg80211_get_bss(wiphy, NULL, curbssid,
+		ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
+		WLAN_CAPABILITY_ESS);
+
+	mutex_lock(&cfg->usr_sync);
+
+	*(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+	err = wldev_ioctl(ndev, WLC_GET_BSS_INFO,
+		cfg->extra_buf, WL_EXTRA_BUF_MAX, false);
+	if (unlikely(err)) {
+		WL_ERR(("Could not get bss info %d\n", err));
+		goto update_bss_info_out;
+	}
+	bi = (struct wl_bss_info *)(cfg->extra_buf + 4);
+	channel = bi->ctl_ch ? bi->ctl_ch :
+		CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
+	wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN);
+
+	if (!bss) {
+		WL_DBG(("Could not find the AP\n"));
+		if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+			WL_ERR(("Bssid doesn't match\n"));
+			err = -EIO;
+			goto update_bss_info_out;
+		}
+		err = wl_inform_single_bss(cfg, bi, roam);
+		if (unlikely(err))
+			goto update_bss_info_out;
+
+		ie = ((u8 *)bi) + bi->ie_offset;
+		ie_len = bi->ie_length;
+		beacon_interval = cpu_to_le16(bi->beacon_period);
+	} else {
+		WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		ie = (u8 *)bss->ies->data;
+		ie_len = bss->ies->len;
+#else
+		ie = bss->information_elements;
+		ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+		beacon_interval = bss->beacon_interval;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+		cfg80211_put_bss(wiphy, bss);
+#else
+		cfg80211_put_bss(bss);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+	}
+
+	tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+	if (tim) {
+		dtim_period = tim->data[1];
+	} else {
+		/*
+		* active scan was done so we could not get dtim
+		* information out of probe response.
+		* so we speficially query dtim information.
+		*/
+		err = wldev_ioctl(ndev, WLC_GET_DTIMPRD,
+			&dtim_period, sizeof(dtim_period), false);
+		if (unlikely(err)) {
+			WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+			goto update_bss_info_out;
+		}
+	}
+
+	wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
+	wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+
+update_bss_info_out:
+	if (unlikely(err)) {
+		WL_ERR(("Failed with error %d\n", err));
+	}
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	s32 err = 0;
+	u8 *curbssid;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ieee80211_supported_band *band;
+	struct ieee80211_channel *notify_channel = NULL;
+	u32 *channel;
+	u32 freq;
+#endif 
+
+
+	wl_get_assoc_ies(cfg, ndev);
+	wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+	curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	wl_update_bss_info(cfg, ndev, true);
+	wl_update_pmklist(ndev, cfg->pmk_list, err);
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+	/* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */
+	channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+	if (*channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	freq = ieee80211_channel_to_frequency(*channel, band->band);
+	notify_channel = ieee80211_get_channel(wiphy, freq);
+#endif 
+	printk("wl_bss_roaming_done succeeded to " MACDBG "\n",
+		MAC2STRDBG((u8*)(&e->addr)));
+
+	cfg80211_roamed(ndev,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+		notify_channel,
+#endif
+		curbssid,
+		conn_info->req_ie, conn_info->req_ie_len,
+		conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+	WL_DBG(("Report roaming result\n"));
+
+	wl_set_drv_status(cfg, CONNECTED, ndev);
+
+	return err;
+}
+
+static s32
+wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, bool completed)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+	struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+#if defined(CUSTOM_SET_CPUCORE)
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif 
+	s32 err = 0;
+	u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+	if (!sec) {
+		WL_ERR(("sec is NULL\n"));
+		return -ENODEV;
+	}
+	WL_DBG((" enter\n"));
+#ifdef ESCAN_RESULT_PATCH
+	if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+		if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) {
+			WL_DBG((" Connected event of connected device e=%d s=%d, ignore it\n",
+				ntoh32(e->event_type), ntoh32(e->status)));
+			return err;
+		}
+	}
+	if (memcmp(curbssid, broad_bssid, ETHER_ADDR_LEN) == 0 &&
+		memcmp(broad_bssid, connect_req_bssid, ETHER_ADDR_LEN) != 0) {
+		WL_DBG(("copy bssid\n"));
+		memcpy(curbssid, connect_req_bssid, ETHER_ADDR_LEN);
+	}
+
+#else
+	if (cfg->scan_request) {
+		wl_notify_escan_complete(cfg, ndev, true, true);
+	}
+#endif /* ESCAN_RESULT_PATCH */
+	if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+		wl_cfg80211_scan_abort(cfg);
+		wl_clr_drv_status(cfg, CONNECTING, ndev);
+		if (completed) {
+			wl_get_assoc_ies(cfg, ndev);
+			wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+			curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+			wl_update_bss_info(cfg, ndev, false);
+			wl_update_pmklist(ndev, cfg->pmk_list, err);
+			wl_set_drv_status(cfg, CONNECTED, ndev);
+			if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+				init_completion(&cfg->iface_disable);
+#else
+				/* reinitialize completion to clear previous count */
+				INIT_COMPLETION(cfg->iface_disable);
+#endif
+			}
+#ifdef CUSTOM_SET_CPUCORE
+			if (wl_get_chan_isvht80(ndev, dhd)) {
+				if (ndev == bcmcfg_to_prmry_ndev(cfg))
+					dhd->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */
+				else if (ndev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION))
+					dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */
+				dhd_set_cpucore(dhd, TRUE);
+			}
+#endif /* CUSTOM_SET_CPUCORE */
+
+		}
+		cfg80211_connect_result(ndev,
+			curbssid,
+			conn_info->req_ie,
+			conn_info->req_ie_len,
+			conn_info->resp_ie,
+			conn_info->resp_ie_len,
+			completed ? WLAN_STATUS_SUCCESS :
+			(sec->auth_assoc_res_status) ?
+			sec->auth_assoc_res_status :
+			WLAN_STATUS_UNSPECIFIED_FAILURE,
+			GFP_KERNEL);
+		if (completed)
+			WL_INFO(("Report connect result - connection succeeded\n"));
+		else
+			WL_ERR(("Report connect result - connection failed\n"));
+	}
+#ifdef CONFIG_TCPACK_FASTTX
+	if (wl_get_chan_isvht80(ndev, dhd))
+		wldev_iovar_setint(ndev, "tcpack_fast_tx", 0);
+	else
+		wldev_iovar_setint(ndev, "tcpack_fast_tx", 1);
+#endif /* CONFIG_TCPACK_FASTTX */
+
+	return err;
+}
+
+static s32
+wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+	u16 flags = ntoh16(e->flags);
+	enum nl80211_key_type key_type;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	if (flags & WLC_EVENT_MSG_GROUP)
+		key_type = NL80211_KEYTYPE_GROUP;
+	else
+		key_type = NL80211_KEYTYPE_PAIRWISE;
+
+	cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+		NULL, GFP_KERNEL);
+	mutex_unlock(&cfg->usr_sync);
+
+	return 0;
+}
+
+#ifdef PNO_SUPPORT
+static s32
+wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct net_device *ndev = NULL;
+
+	WL_ERR((">>> PNO Event\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifndef WL_SCHED_SCAN
+	mutex_lock(&cfg->usr_sync);
+	/* TODO: Use cfg80211_sched_scan_results(wiphy); */
+	cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL);
+	mutex_unlock(&cfg->usr_sync);
+#else
+	/* If cfg80211 scheduled scan is supported, report the pno results via sched
+	 * scan results
+	 */
+	wl_notify_sched_scan_results(cfg, ndev, e, data);
+#endif /* WL_SCHED_SCAN */
+	return 0;
+}
+#endif /* PNO_SUPPORT */
+
+static s32
+wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct channel_info channel_inform;
+	struct wl_scan_results *bss_list;
+	struct net_device *ndev = NULL;
+	u32 len = WL_SCAN_BUF_MAX;
+	s32 err = 0;
+	unsigned long flags;
+
+	WL_DBG(("Enter \n"));
+	if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
+		WL_ERR(("scan is not ready \n"));
+		return err;
+	}
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+	err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
+		sizeof(channel_inform), false);
+	if (unlikely(err)) {
+		WL_ERR(("scan busy (%d)\n", err));
+		goto scan_done_out;
+	}
+	channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+	if (unlikely(channel_inform.scan_channel)) {
+
+		WL_DBG(("channel_inform.scan_channel (%d)\n",
+			channel_inform.scan_channel));
+	}
+	cfg->bss_list = cfg->scan_results;
+	bss_list = cfg->bss_list;
+	memset(bss_list, 0, len);
+	bss_list->buflen = htod32(len);
+	err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false);
+	if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
+		WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+		err = -EINVAL;
+		goto scan_done_out;
+	}
+	bss_list->buflen = dtoh32(bss_list->buflen);
+	bss_list->version = dtoh32(bss_list->version);
+	bss_list->count = dtoh32(bss_list->count);
+
+	err = wl_inform_bss(cfg);
+
+scan_done_out:
+	del_timer_sync(&cfg->scan_timeout);
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+		cfg80211_scan_done(cfg->scan_request, false);
+		cfg->scan_request = NULL;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	WL_DBG(("cfg80211_scan_done\n"));
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32
+wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+	const struct ether_addr *sa, const struct ether_addr *bssid,
+	u8 **pheader, u32 *body_len, u8 *pbody)
+{
+	struct dot11_management_header *hdr;
+	u32 totlen = 0;
+	s32 err = 0;
+	u8 *offset;
+	u32 prebody_len = *body_len;
+	switch (fc) {
+		case FC_ASSOC_REQ:
+			/* capability , listen interval */
+			totlen = DOT11_ASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_ASSOC_REQ_FIXED_LEN;
+			break;
+
+		case FC_REASSOC_REQ:
+			/* capability, listen inteval, ap address */
+			totlen = DOT11_REASSOC_REQ_FIXED_LEN;
+			*body_len += DOT11_REASSOC_REQ_FIXED_LEN;
+			break;
+	}
+	totlen += DOT11_MGMT_HDR_LEN + prebody_len;
+	*pheader = kzalloc(totlen, GFP_KERNEL);
+	if (*pheader == NULL) {
+		WL_ERR(("memory alloc failed \n"));
+		return -ENOMEM;
+	}
+	hdr = (struct dot11_management_header *) (*pheader);
+	hdr->fc = htol16(fc);
+	hdr->durid = 0;
+	hdr->seq = 0;
+	offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len);
+	bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN);
+	bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN);
+	bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN);
+	if ((pbody != NULL) && prebody_len)
+		bcopy((const char*)pbody, offset, prebody_len);
+	*body_len = totlen;
+	return err;
+}
+
+
+void
+wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+		if (cfg->afx_hdl != NULL) {
+			if (cfg->afx_hdl->dev != NULL) {
+				wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+				wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, cfg->afx_hdl->dev);
+			}
+			cfg->afx_hdl->peer_chan = WL_INVALID;
+		}
+		complete(&cfg->act_frm_scan);
+		WL_DBG(("*** Wake UP ** Working afx searching is cleared\n"));
+	} else if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+		if (!(wl_get_p2p_status(cfg, ACTION_TX_COMPLETED) ||
+			wl_get_p2p_status(cfg, ACTION_TX_NOACK)))
+			wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+
+		WL_DBG(("*** Wake UP ** abort actframe iovar\n"));
+		/* if channel is not zero, "actfame" uses off channel scan.
+		 * So abort scan for off channel completion.
+		 */
+		if (cfg->af_sent_channel)
+			wl_cfg80211_scan_abort(cfg);
+	}
+#ifdef WL_CFG80211_SYNC_GON
+	else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+		WL_DBG(("*** Wake UP ** abort listen for next af frame\n"));
+		/* So abort scan to cancel listen */
+		wl_cfg80211_scan_abort(cfg);
+	}
+#endif /* WL_CFG80211_SYNC_GON */
+}
+
+
+int wl_cfg80211_get_ioctl_version(void)
+{
+	return ioctl_version;
+}
+
+static s32
+wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	struct ieee80211_supported_band *band;
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct ether_addr da;
+	struct ether_addr bssid;
+	bool isfree = false;
+	s32 err = 0;
+	s32 freq;
+	struct net_device *ndev = NULL;
+	wifi_p2p_pub_act_frame_t *act_frm = NULL;
+	wifi_p2p_action_frame_t *p2p_act_frm = NULL;
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+	wl_event_rx_frame_data_t *rxframe =
+		(wl_event_rx_frame_data_t*)data;
+	u32 event = ntoh32(e->event_type);
+	u8 *mgmt_frame;
+	u8 bsscfgidx = e->bsscfgidx;
+	u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
+	u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK));
+
+	memset(&bssid, 0, ETHER_ADDR_LEN);
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (channel <= CH_MAX_2G_CHANNEL)
+		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	else
+		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	if (!band) {
+		WL_ERR(("No valid band"));
+		return -EINVAL;
+	}
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+	freq = ieee80211_channel_to_frequency(channel);
+	(void)band->band;
+#else
+	freq = ieee80211_channel_to_frequency(channel, band->band);
+#endif
+	if (event == WLC_E_ACTION_FRAME_RX) {
+		wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
+			NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+
+		err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+		if (err < 0)
+			 WL_ERR(("WLC_GET_BSSID error %d\n", err));
+		memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+		err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
+			&mgmt_frame, &mgmt_frame_len,
+			(u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
+		if (err < 0) {
+			WL_ERR(("Error in receiving action frame len %d channel %d freq %d\n",
+				mgmt_frame_len, channel, freq));
+			goto exit;
+		}
+		isfree = true;
+		if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+			act_frm = (wifi_p2p_pub_act_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+		} else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+			p2p_act_frm = (wifi_p2p_action_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+			(void) p2p_act_frm;
+		} else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
+
+			sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)
+					(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
+			if (sd_act_frm && wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+				if (cfg->next_af_subtype == sd_act_frm->action) {
+					WL_DBG(("We got a right next frame of SD!(%d)\n",
+						sd_act_frm->action));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev);
+				}
+			}
+			(void) sd_act_frm;
+		} else {
+			/*
+			 *  if we got normal action frame and ndev is p2p0,
+			 *  we have to change ndev from p2p0 to wlan0
+			 */
+
+			/* use primary device instead of p2p's */
+			if (discover_cfgdev(cfgdev, cfg))
+				cfgdev = bcmcfg_to_prmry_cfgdev(cfgdev, cfg);
+
+			if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+				u8 action = 0;
+				if (wl_get_public_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+					mgmt_frame_len - DOT11_MGMT_HDR_LEN, &action) != BCME_OK) {
+					WL_DBG(("Recived action is not public action frame\n"));
+				} else if (cfg->next_af_subtype == action) {
+					WL_DBG(("Recived action is the waiting action(%d)\n",
+						action));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev);
+				}
+			}
+		}
+
+		if (act_frm) {
+
+			if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+				if (cfg->next_af_subtype == act_frm->subtype) {
+					WL_DBG(("We got a right next frame!(%d)\n",
+						act_frm->subtype));
+					wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+					if (cfg->next_af_subtype == P2P_PAF_GON_CONF) {
+						OSL_SLEEP(20);
+					}
+
+					/* Stop waiting for next AF. */
+					wl_stop_wait_next_action_frame(cfg, ndev);
+				}
+			}
+		}
+
+		wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN],
+			mgmt_frame_len - DOT11_MGMT_HDR_LEN, channel);
+		/*
+		 * After complete GO Negotiation, roll back to mpc mode
+		 */
+		if (act_frm && ((act_frm->subtype == P2P_PAF_GON_CONF) ||
+			(act_frm->subtype == P2P_PAF_PROVDIS_RSP))) {
+			wldev_iovar_setint(ndev, "mpc", 1);
+		}
+		if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) {
+			WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+			wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+		}
+	} else if (event == WLC_E_PROBREQ_MSG) {
+
+		/* Handle probe reqs frame
+		 * WPS-AP certification 4.2.13
+		 */
+		struct parsed_ies prbreq_ies;
+		u32 prbreq_ie_len = 0;
+		bool pbc = 0;
+
+		WL_DBG((" Event WLC_E_PROBREQ_MSG received\n"));
+		mgmt_frame = (u8 *)(data);
+		mgmt_frame_len = ntoh32(e->datalen);
+
+		prbreq_ie_len = mgmt_frame_len - DOT11_MGMT_HDR_LEN;
+
+		/* Parse prob_req IEs */
+		if (wl_cfg80211_parse_ies(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+			prbreq_ie_len, &prbreq_ies) < 0) {
+			WL_ERR(("Prob req get IEs failed\n"));
+			return 0;
+		}
+		if (prbreq_ies.wps_ie != NULL) {
+			wl_validate_wps_ie((char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc);
+			WL_DBG((" wps_ie exist pbc = %d\n", pbc));
+			/* if pbc method, send prob_req mgmt frame to upper layer */
+			if (!pbc)
+				return 0;
+		} else
+			return 0;
+	} else {
+		mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
+
+		/* wpa supplicant use probe request event for restarting another GON Req.
+		 * but it makes GON Req repetition.
+		 * so if src addr of prb req is same as my target device,
+		 * do not send probe request event during sending action frame.
+		 */
+		if (event == WLC_E_P2P_PROBREQ_MSG) {
+			WL_DBG((" Event %s\n", (event == WLC_E_P2P_PROBREQ_MSG) ?
+				"WLC_E_P2P_PROBREQ_MSG":"WLC_E_PROBREQ_MSG"));
+
+
+			/* Filter any P2P probe reqs arriving during the
+			 * GO-NEG Phase
+			 */
+			if (cfg->p2p &&
+				wl_get_p2p_status(cfg, GO_NEG_PHASE)) {
+				WL_DBG(("Filtering P2P probe_req while "
+					"being in GO-Neg state\n"));
+				return 0;
+			}
+		}
+	}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+	cfg80211_rx_mgmt(cfgdev, freq, 0,  mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+	defined(WL_COMPAT_WIRELESS)
+	cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#else
+	cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 14, 0) */
+
+	WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n",
+		mgmt_frame_len, ntoh32(e->datalen), channel, freq));
+exit:
+	if (isfree)
+		kfree(mgmt_frame);
+	return 0;
+}
+
+#ifdef WL_SCHED_SCAN
+/* If target scan is not reliable, set the below define to "1" to do a
+ * full escan
+ */
+#define FULL_ESCAN_ON_PFN_NET_FOUND		0
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data)
+{
+	wl_pfn_net_info_t *netinfo, *pnetinfo;
+	struct wiphy *wiphy	= bcmcfg_to_wiphy(cfg);
+	int err = 0;
+	struct cfg80211_scan_request *request = NULL;
+	struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
+	struct ieee80211_channel *channel = NULL;
+	int channel_req = 0;
+	int band = 0;
+	struct wl_pfn_scanresults *pfn_result = (struct wl_pfn_scanresults *)data;
+	int n_pfn_results = pfn_result->count;
+
+	WL_DBG(("Enter\n"));
+
+	if (e->event_type == WLC_E_PFN_NET_LOST) {
+		WL_PNO(("PFN NET LOST event. Do Nothing \n"));
+		return 0;
+	}
+	WL_PNO((">>> PFN NET FOUND event. count:%d \n", n_pfn_results));
+	if (n_pfn_results > 0) {
+		int i;
+
+		if (n_pfn_results > MAX_PFN_LIST_COUNT)
+			n_pfn_results = MAX_PFN_LIST_COUNT;
+		pnetinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t)
+				- sizeof(wl_pfn_net_info_t));
+
+		memset(&ssid, 0x00, sizeof(ssid));
+
+		request = kzalloc(sizeof(*request)
+			+ sizeof(*request->channels) * n_pfn_results,
+			GFP_KERNEL);
+		channel = (struct ieee80211_channel *)kzalloc(
+			(sizeof(struct ieee80211_channel) * n_pfn_results),
+			GFP_KERNEL);
+		if (!request || !channel) {
+			WL_ERR(("No memory"));
+			err = -ENOMEM;
+			goto out_err;
+		}
+
+		request->wiphy = wiphy;
+
+		for (i = 0; i < n_pfn_results; i++) {
+			netinfo = &pnetinfo[i];
+			if (!netinfo) {
+				WL_ERR(("Invalid netinfo ptr. index:%d", i));
+				err = -EINVAL;
+				goto out_err;
+			}
+			WL_PNO((">>> SSID:%s Channel:%d \n",
+				netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel));
+			/* PFN result doesn't have all the info which are required by the supplicant
+			 * (For e.g IEs) Do a target Escan so that sched scan results are reported
+			 * via wl_inform_single_bss in the required format. Escan does require the
+			 * scan request in the form of cfg80211_scan_request. For timebeing, create
+			 * cfg80211_scan_request one out of the received PNO event.
+			 */
+			memcpy(ssid[i].ssid, netinfo->pfnsubnet.SSID,
+				netinfo->pfnsubnet.SSID_len);
+			ssid[i].ssid_len = netinfo->pfnsubnet.SSID_len;
+			request->n_ssids++;
+
+			channel_req = netinfo->pfnsubnet.channel;
+			band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+				: NL80211_BAND_5GHZ;
+			channel[i].center_freq = ieee80211_channel_to_frequency(channel_req, band);
+			channel[i].band = band;
+			channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+			request->channels[i] = &channel[i];
+			request->n_channels++;
+		}
+
+		/* assign parsed ssid array */
+		if (request->n_ssids)
+			request->ssids = &ssid[0];
+
+		if (wl_get_drv_status_all(cfg, SCANNING)) {
+			/* Abort any on-going scan */
+			wl_notify_escan_complete(cfg, ndev, true, true);
+		}
+
+		if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+			WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+			err = wl_cfgp2p_discover_enable_search(cfg, false);
+			if (unlikely(err)) {
+				wl_clr_drv_status(cfg, SCANNING, ndev);
+				goto out_err;
+			}
+			p2p_scan(cfg) = false;
+		}
+
+		wl_set_drv_status(cfg, SCANNING, ndev);
+#if FULL_ESCAN_ON_PFN_NET_FOUND
+		WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
+		err = wl_do_escan(cfg, wiphy, ndev, NULL);
+#else
+		WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+		err = wl_do_escan(cfg, wiphy, ndev, request);
+#endif
+		if (err) {
+			wl_clr_drv_status(cfg, SCANNING, ndev);
+			goto out_err;
+		}
+		cfg->sched_scan_running = TRUE;
+	}
+	else {
+		WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+	}
+out_err:
+	if (request)
+		kfree(request);
+	if (channel)
+		kfree(channel);
+	return err;
+}
+#endif /* WL_SCHED_SCAN */
+
+static void wl_init_conf(struct wl_conf *conf)
+{
+	WL_DBG(("Enter \n"));
+	conf->frag_threshold = (u32)-1;
+	conf->rts_threshold = (u32)-1;
+	conf->retry_short = (u32)-1;
+	conf->retry_long = (u32)-1;
+	conf->tx_power = -1;
+}
+
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	unsigned long flags;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	memset(profile, 0, sizeof(struct wl_profile));
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+}
+
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg)
+{
+	memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler));
+
+	cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+	cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ASSOC] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+	cfg->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+	cfg->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+	cfg->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+	cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+	cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
+	cfg->evt_handler[WLC_E_START] = wl_notify_connect_status;
+#ifdef PNO_SUPPORT
+	cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
+#endif /* PNO_SUPPORT */
+#ifdef WLTDLS
+	cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler;
+#endif /* WLTDLS */
+	cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status;
+}
+
+#if defined(STATIC_WL_PRIV_STRUCT)
+static void
+wl_init_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+	cfg->escan_info.escan_buf = DHD_OS_PREALLOC(cfg->pub, DHD_PREALLOC_WIPHY_ESCAN0, 0);
+	bzero(cfg->escan_info.escan_buf, ESCAN_BUF_SIZE);
+}
+
+static void
+wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg)
+{
+	cfg->escan_info.escan_buf = NULL;
+
+}
+#endif /* STATIC_WL_PRIV_STRUCT */
+
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg)
+{
+	WL_DBG(("Enter \n"));
+	cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!cfg->scan_results)) {
+		WL_ERR(("Scan results alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->conf = (void *)kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
+	if (unlikely(!cfg->conf)) {
+		WL_ERR(("wl_conf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->scan_req_int =
+	    (void *)kzalloc(sizeof(*cfg->scan_req_int), GFP_KERNEL);
+	if (unlikely(!cfg->scan_req_int)) {
+		WL_ERR(("Scan req alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!cfg->ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+	if (unlikely(!cfg->escan_ioctl_buf)) {
+		WL_ERR(("Ioctl buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	if (unlikely(!cfg->extra_buf)) {
+		WL_ERR(("Extra buf alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->pmk_list = (void *)kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
+	if (unlikely(!cfg->pmk_list)) {
+		WL_ERR(("pmk list alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->sta_info = (void *)kzalloc(sizeof(*cfg->sta_info), GFP_KERNEL);
+	if (unlikely(!cfg->sta_info)) {
+		WL_ERR(("sta info  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+
+#if defined(STATIC_WL_PRIV_STRUCT)
+	cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL);
+	if (unlikely(!cfg->conn_info)) {
+		WL_ERR(("cfg->conn_info  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	cfg->ie = (void *)kzalloc(sizeof(*cfg->ie), GFP_KERNEL);
+	if (unlikely(!cfg->ie)) {
+		WL_ERR(("cfg->ie  alloc failed\n"));
+		goto init_priv_mem_out;
+	}
+	wl_init_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+	cfg->afx_hdl = (void *)kzalloc(sizeof(*cfg->afx_hdl), GFP_KERNEL);
+	if (unlikely(!cfg->afx_hdl)) {
+		WL_ERR(("afx hdl  alloc failed\n"));
+		goto init_priv_mem_out;
+	} else {
+		init_completion(&cfg->act_frm_scan);
+		init_completion(&cfg->wait_next_af);
+
+		INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler);
+	}
+	return 0;
+
+init_priv_mem_out:
+	wl_deinit_priv_mem(cfg);
+
+	return -ENOMEM;
+}
+
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg)
+{
+	kfree(cfg->scan_results);
+	cfg->scan_results = NULL;
+	kfree(cfg->conf);
+	cfg->conf = NULL;
+	kfree(cfg->scan_req_int);
+	cfg->scan_req_int = NULL;
+	kfree(cfg->ioctl_buf);
+	cfg->ioctl_buf = NULL;
+	kfree(cfg->escan_ioctl_buf);
+	cfg->escan_ioctl_buf = NULL;
+	kfree(cfg->extra_buf);
+	cfg->extra_buf = NULL;
+	kfree(cfg->pmk_list);
+	cfg->pmk_list = NULL;
+	kfree(cfg->sta_info);
+	cfg->sta_info = NULL;
+#if defined(STATIC_WL_PRIV_STRUCT)
+	kfree(cfg->conn_info);
+	cfg->conn_info = NULL;
+	kfree(cfg->ie);
+	cfg->ie = NULL;
+	wl_deinit_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+	if (cfg->afx_hdl) {
+		cancel_work_sync(&cfg->afx_hdl->work);
+		kfree(cfg->afx_hdl);
+		cfg->afx_hdl = NULL;
+	}
+
+	if (cfg->ap_info) {
+		kfree(cfg->ap_info->wpa_ie);
+		kfree(cfg->ap_info->rsn_ie);
+		kfree(cfg->ap_info->wps_ie);
+		kfree(cfg->ap_info);
+		cfg->ap_info = NULL;
+	}
+}
+
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg)
+{
+	int ret = 0;
+	WL_DBG(("Enter \n"));
+
+	/* Do not use DHD in cfg driver */
+	cfg->event_tsk.thr_pid = -1;
+
+	PROC_START(wl_event_handler, cfg, &cfg->event_tsk, 0, "wl_event_handler");
+	if (cfg->event_tsk.thr_pid < 0)
+		ret = -ENOMEM;
+	return ret;
+}
+
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg)
+{
+	if (cfg->event_tsk.thr_pid >= 0)
+		PROC_STOP(&cfg->event_tsk);
+}
+
+static void wl_scan_timeout(unsigned long data)
+{
+	wl_event_msg_t msg;
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+	if (!(cfg->scan_request)) {
+		WL_ERR(("timer expired but no scan request\n"));
+		return;
+	}
+	bzero(&msg, sizeof(wl_event_msg_t));
+	WL_ERR(("timer expired\n"));
+	msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+	msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+	msg.reason = 0xFFFFFFFF;
+	wl_cfg80211_event(bcmcfg_to_prmry_ndev(cfg), &msg, NULL);
+}
+
+static s32
+wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
+	unsigned long state,
+	void *ndev)
+{
+	struct net_device *dev = ndev;
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	WL_DBG(("Enter \n"));
+
+	if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg))
+		return NOTIFY_DONE;
+
+	switch (state) {
+		case NETDEV_DOWN:
+		{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+			int max_wait_timeout = 2;
+			int max_wait_count = 100;
+			int refcnt = 0;
+			unsigned long limit = jiffies + max_wait_timeout * HZ;
+			while (work_pending(&wdev->cleanup_work)) {
+				if (refcnt%5 == 0) {
+					WL_ERR(("[NETDEV_DOWN] wait for "
+						"complete of cleanup_work"
+						" (%d th)\n", refcnt));
+				}
+				if (!time_before(jiffies, limit)) {
+					WL_ERR(("[NETDEV_DOWN] cleanup_work"
+						" of CFG80211 is not"
+						" completed in %d sec\n",
+						max_wait_timeout));
+					break;
+				}
+				if (refcnt >= max_wait_count) {
+					WL_ERR(("[NETDEV_DOWN] cleanup_work"
+						" of CFG80211 is not"
+						" completed in %d loop\n",
+						max_wait_count));
+					break;
+				}
+				set_current_state(TASK_INTERRUPTIBLE);
+				schedule_timeout(100);
+				set_current_state(TASK_RUNNING);
+				refcnt++;
+			}
+#endif /* LINUX_VERSION <  VERSION(3, 14, 0) */
+			break;
+		}
+
+		case NETDEV_UNREGISTER:
+			/* after calling list_del_rcu(&wdev->list) */
+			wl_dealloc_netinfo(cfg, ndev);
+			break;
+		case NETDEV_GOING_DOWN:
+			/* At NETDEV_DOWN state, wdev_cleanup_work work will be called.
+			*  In front of door, the function checks
+			*  whether current scan is working or not.
+			*  If the scanning is still working, wdev_cleanup_work call WARN_ON and
+			*  make the scan done forcibly.
+			*/
+			if (wl_get_drv_status(cfg, SCANNING, dev))
+				wl_notify_escan_complete(cfg, dev, true, true);
+			break;
+	}
+	return NOTIFY_DONE;
+}
+static struct notifier_block wl_cfg80211_netdev_notifier = {
+	.notifier_call = wl_cfg80211_netdev_notifier_call,
+};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool wl_cfg80211_netdev_notifier_registered = FALSE;
+
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+{
+	wl_scan_params_t *params = NULL;
+	s32 params_size = 0;
+	s32 err = BCME_OK;
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	if (!in_atomic()) {
+		/* Our scan params only need space for 1 channel and 0 ssids */
+		params = wl_cfg80211_scan_alloc_params(-1, 0, &params_size);
+		if (params == NULL) {
+			WL_ERR(("scan params allocation failed \n"));
+			err = -ENOMEM;
+		} else {
+			/* Do a scan abort to stop the driver's scan engine */
+			err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true);
+			if (err < 0) {
+				WL_ERR(("scan abort  failed \n"));
+			}
+			kfree(params);
+		}
+	}
+}
+
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+	struct net_device *ndev,
+	bool aborted, bool fw_abort)
+{
+	s32 err = BCME_OK;
+	unsigned long flags;
+	struct net_device *dev;
+
+	WL_DBG(("Enter \n"));
+	if (!ndev) {
+		WL_ERR(("ndev is null\n"));
+		err = BCME_ERROR;
+		return err;
+	}
+
+	if (cfg->escan_info.ndev != ndev) {
+		WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev));
+		err = BCME_ERROR;
+		return err;
+	}
+
+	if (cfg->scan_request) {
+		dev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_ENABLE_P2P_IF)
+		if (cfg->scan_request->dev != cfg->p2p_net)
+			dev = cfg->scan_request->dev;
+#endif /* WL_ENABLE_P2P_IF */
+	}
+	else {
+		WL_DBG(("cfg->scan_request is NULL may be internal scan."
+			"doing scan_abort for ndev %p primary %p",
+				ndev, bcmcfg_to_prmry_ndev(cfg)));
+		dev = ndev;
+	}
+	if (fw_abort && !in_atomic())
+		wl_cfg80211_scan_abort(cfg);
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+#if defined(ESCAN_RESULT_PATCH)
+	if (likely(cfg->scan_request)) {
+		cfg->bss_list = wl_escan_get_buf(cfg, aborted);
+		wl_inform_bss(cfg);
+	}
+#endif /* ESCAN_RESULT_PATCH */
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_SCHED_SCAN
+	if (cfg->sched_scan_req && !cfg->scan_request) {
+		WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n"));
+		if (!aborted)
+			cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
+		cfg->sched_scan_running = FALSE;
+		cfg->sched_scan_req = NULL;
+	}
+#endif /* WL_SCHED_SCAN */
+	if (likely(cfg->scan_request)) {
+		cfg80211_scan_done(cfg->scan_request, aborted);
+		cfg->scan_request = NULL;
+	}
+	if (p2p_is_on(cfg))
+		wl_clr_p2p_status(cfg, SCANNING);
+	wl_clr_drv_status(cfg, SCANNING, dev);
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	return err;
+}
+
+static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 err = BCME_OK;
+	s32 status = ntoh32(e->status);
+	wl_bss_info_t *bi;
+	wl_escan_result_t *escan_result;
+	wl_bss_info_t *bss = NULL;
+	wl_scan_results_t *list;
+	wifi_p2p_ie_t * p2p_ie;
+	struct net_device *ndev = NULL;
+	u32 bi_length;
+	u32 i;
+	u8 *p2p_dev_addr = NULL;
+
+	WL_DBG((" enter event type : %d, status : %d \n",
+		ntoh32(e->event_type), ntoh32(e->status)));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	mutex_lock(&cfg->usr_sync);
+	/* P2P SCAN is coming from primary interface */
+	if (wl_get_p2p_status(cfg, SCANNING)) {
+		if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+			ndev = cfg->afx_hdl->dev;
+		else
+			ndev = cfg->escan_info.ndev;
+
+	}
+	if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
+		WL_ERR(("escan is not ready ndev %p drv_status 0x%x e_type %d e_states %d\n",
+			ndev, wl_get_drv_status(cfg, SCANNING, ndev),
+			ntoh32(e->event_type), ntoh32(e->status)));
+		goto exit;
+	}
+	escan_result = (wl_escan_result_t *)data;
+
+	if (status == WLC_E_STATUS_PARTIAL) {
+		WL_INFO(("WLC_E_STATUS_PARTIAL \n"));
+		if (!escan_result) {
+			WL_ERR(("Invalid escan result (NULL pointer)\n"));
+			goto exit;
+		}
+		if (dtoh16(escan_result->bss_count) != 1) {
+			WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+			goto exit;
+		}
+		bi = escan_result->bss_info;
+		if (!bi) {
+			WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+			goto exit;
+		}
+		bi_length = dtoh32(bi->length);
+		if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+			WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+			goto exit;
+		}
+		if (wl_escan_check_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id) < 0)
+			goto exit;
+
+		if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+			if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+				WL_DBG(("Ignoring IBSS result\n"));
+				goto exit;
+			}
+		}
+
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+			if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+				cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+				s32 channel = wf_chspec_ctlchan(
+					wl_chspec_driver_to_host(bi->chanspec));
+
+				if ((channel > MAXCHANNEL) || (channel <= 0))
+					channel = WL_INVALID;
+				else
+					WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+						" channel : %d\n",
+						MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+						channel));
+
+				wl_clr_p2p_status(cfg, SCANNING);
+				cfg->afx_hdl->peer_chan = channel;
+				complete(&cfg->act_frm_scan);
+				goto exit;
+			}
+
+		} else {
+			int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+			list = wl_escan_get_buf(cfg, FALSE);
+			if (scan_req_match(cfg)) {
+				/* p2p scan && allow only probe response */
+				if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+					(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+					goto exit;
+				if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
+					bi->ie_length)) == NULL) {
+						WL_ERR(("Couldn't find P2PIE in probe"
+							" response/beacon\n"));
+						goto exit;
+				}
+			}
+			for (i = 0; i < list->count; i++) {
+				bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+					: list->bss_info;
+
+				if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+					(CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+					== CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+					bi->SSID_len == bss->SSID_len &&
+					!bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+					/* do not allow beacon data to update
+					*the data recd from a probe response
+					*/
+					if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+						(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+						goto exit;
+
+					WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d"
+						" flags 0x%x, new: RSSI %d flags 0x%x\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+						bss->RSSI, bss->flags, bi->RSSI, bi->flags));
+
+					if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+						/* preserve max RSSI if the measurements are
+						* both on-channel or both off-channel
+						*/
+						WL_SCAN(("%s("MACDBG"), same onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+					} else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+						(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+						/* preserve the on-channel rssi measurement
+						* if the new measurement is off channel
+						*/
+						WL_SCAN(("%s("MACDBG"), prev onchan"
+						", RSSI: prev %d new %d\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						bss->RSSI, bi->RSSI));
+						bi->RSSI = bss->RSSI;
+						bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+					}
+					if (dtoh32(bss->length) != bi_length) {
+						u32 prev_len = dtoh32(bss->length);
+
+						WL_SCAN(("bss info replacement"
+							" is occured(bcast:%d->probresp%d)\n",
+							bss->ie_length, bi->ie_length));
+						WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n",
+						bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+						prev_len, bi_length));
+
+						if (list->buflen - prev_len + bi_length
+							> ESCAN_BUF_SIZE) {
+							WL_ERR(("Buffer is too small: keep the"
+								" previous result of this AP\n"));
+							/* Only update RSSI */
+							bss->RSSI = bi->RSSI;
+							bss->flags |= (bi->flags
+								& WL_BSS_FLAGS_RSSI_ONCHANNEL);
+							goto exit;
+						}
+
+						if (i < list->count - 1) {
+							/* memory copy required by this case only */
+							memmove((u8 *)bss + bi_length,
+								(u8 *)bss + prev_len,
+								list->buflen - cur_len - prev_len);
+						}
+						list->buflen -= prev_len;
+						list->buflen += bi_length;
+					}
+					list->version = dtoh32(bi->version);
+					memcpy((u8 *)bss, (u8 *)bi, bi_length);
+					goto exit;
+				}
+				cur_len += dtoh32(bss->length);
+			}
+			if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+				WL_ERR(("Buffer is too small: ignoring\n"));
+				goto exit;
+			}
+
+			memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
+			list->version = dtoh32(bi->version);
+			list->buflen += bi_length;
+			list->count++;
+
+		}
+
+	}
+	else if (status == WLC_E_STATUS_SUCCESS) {
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
+			escan_result->sync_id);
+
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFO(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_p2p_status(cfg, SCANNING);
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			WL_INFO(("ESCAN COMPLETED\n"));
+			cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN COMPLETED: scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, false, false);
+		}
+		wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
+	}
+	else if (status == WLC_E_STATUS_ABORT) {
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id);
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFO(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			wl_clr_p2p_status(cfg, SCANNING);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			WL_INFO(("ESCAN ABORTED\n"));
+			cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, true, false);
+		}
+		wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
+	} else if (status == WLC_E_STATUS_NEWSCAN) {
+		WL_ERR(("WLC_E_STATUS_NEWSCAN : scan_request[%p]\n", cfg->scan_request));
+		WL_ERR(("sync_id[%d], bss_count[%d]\n", escan_result->sync_id,
+			escan_result->bss_count));
+	} else if (status == WLC_E_STATUS_TIMEOUT) {
+		WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+		WL_ERR(("reason[0x%x]\n", e->reason));
+		if (e->reason == 0xFFFFFFFF) {
+			wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+		}
+	} else {
+		WL_ERR(("unexpected Escan Event %d : abort\n", status));
+		cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+		wl_escan_print_sync_id(status, escan_result->sync_id,
+			cfg->escan_info.cur_sync_id);
+		if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_INFO(("ACTION FRAME SCAN DONE\n"));
+			wl_clr_p2p_status(cfg, SCANNING);
+			wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+			if (cfg->afx_hdl->peer_chan == WL_INVALID)
+				complete(&cfg->act_frm_scan);
+		} else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+			cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+			if (!scan_req_match(cfg)) {
+				WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
+					"scanned AP count=%d\n",
+					cfg->bss_list->count));
+			}
+			wl_inform_bss(cfg);
+			wl_notify_escan_complete(cfg, ndev, true, false);
+		}
+		wl_escan_increment_sync_id(cfg, 2);
+	}
+exit:
+	mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
+{
+	u32 connected_cnt  = wl_get_drv_status_all(cfg, CONNECTED);
+	struct net_info *iter, *next;
+	int err;
+
+	if (!cfg->roamoff_on_concurrent)
+		return;
+	if (enable && connected_cnt > 1) {
+		for_each_ndev(cfg, iter, next) {
+			/* Save the current roam setting */
+			if ((err = wldev_iovar_getint(iter->ndev, "roam_off",
+				(s32 *)&iter->roam_off)) != BCME_OK) {
+				WL_ERR(("%s:Failed to get current roam setting err %d\n",
+					iter->ndev->name, err));
+				continue;
+			}
+			if ((err = wldev_iovar_setint(iter->ndev, "roam_off", 1)) != BCME_OK) {
+				WL_ERR((" %s:failed to set roam_off : %d\n",
+					iter->ndev->name, err));
+			}
+		}
+	}
+	else if (!enable) {
+		for_each_ndev(cfg, iter, next) {
+			if (iter->roam_off != WL_INVALID) {
+				if ((err = wldev_iovar_setint(iter->ndev, "roam_off",
+					iter->roam_off)) == BCME_OK)
+					iter->roam_off = WL_INVALID;
+				else {
+					WL_ERR((" %s:failed to set roam_off : %d\n",
+						iter->ndev->name, err));
+				}
+			}
+		}
+	}
+	return;
+}
+
+static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *iter, *next;
+	u32 ctl_chan = 0;
+	u32 chanspec = 0;
+	u32 pre_ctl_chan = 0;
+	u32 connected_cnt  = wl_get_drv_status_all(cfg, CONNECTED);
+	cfg->vsdb_mode = false;
+
+	if (connected_cnt <= 1)  {
+		return;
+	}
+	for_each_ndev(cfg, iter, next) {
+		chanspec = 0;
+		ctl_chan = 0;
+		if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+			if (wldev_iovar_getint(iter->ndev, "chanspec",
+				(s32 *)&chanspec) == BCME_OK) {
+				chanspec = wl_chspec_driver_to_host(chanspec);
+				ctl_chan = wf_chspec_ctlchan(chanspec);
+				wl_update_prof(cfg, iter->ndev, NULL,
+					&ctl_chan, WL_PROF_CHAN);
+			}
+			if (!cfg->vsdb_mode) {
+				if (!pre_ctl_chan && ctl_chan)
+					pre_ctl_chan = ctl_chan;
+				else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
+					cfg->vsdb_mode = true;
+				}
+			}
+		}
+	}
+	WL_ERR(("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel"));
+	return;
+}
+
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+	enum wl_status state, bool set)
+{
+	s32 pm = PM_FAST;
+	s32 err = BCME_OK;
+	u32 mode;
+	u32 chan = 0;
+	struct net_info *iter, *next;
+	struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+	WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
+		state, set, _net_info->pm_restore, _net_info->ndev->name));
+
+	if (state != WL_STATUS_CONNECTED)
+		return 0;
+	mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
+	if (set) {
+		wl_cfg80211_concurrent_roam(cfg, 1);
+
+		if (mode == WL_MODE_AP) {
+
+			if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
+				WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+		}
+		wl_cfg80211_determine_vsdb_mode(cfg);
+		if (cfg->vsdb_mode || _net_info->pm_block) {
+			/* Delete pm_enable_work */
+			wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_MAINTAIN);
+			/* save PM_FAST in _net_info to restore this
+			 * if _net_info->pm_block is false
+			 */
+			if (!_net_info->pm_block && (mode == WL_MODE_BSS)) {
+				_net_info->pm = PM_FAST;
+				_net_info->pm_restore = true;
+			}
+			pm = PM_OFF;
+			for_each_ndev(cfg, iter, next) {
+				if (iter->pm_restore)
+					continue;
+				/* Save the current power mode */
+				err = wldev_ioctl(iter->ndev, WLC_GET_PM, &iter->pm,
+					sizeof(iter->pm), false);
+				WL_DBG(("%s:power save %s\n", iter->ndev->name,
+					iter->pm ? "enabled" : "disabled"));
+				if (!err && iter->pm) {
+					iter->pm_restore = true;
+				}
+
+			}
+			for_each_ndev(cfg, iter, next) {
+				if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
+					sizeof(pm), true)) != 0) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+					else
+						WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
+					wl_cfg80211_update_power_mode(iter->ndev);
+				}
+			}
+		} else {
+			/* add PM Enable timer to go to power save mode
+			 * if supplicant control pm mode, it will be cleared or
+			 * updated by wl_cfg80211_set_power_mgmt() if not - for static IP & HW4 P2P,
+			 * PM will be configured when timer expired
+			 */
+
+			/*
+			 * before calling pm_enable_timer, we need to set PM -1 for all ndev
+			 */
+			pm = PM_OFF;
+
+			for_each_ndev(cfg, iter, next) {
+				if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
+					sizeof(pm), true)) != 0) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+					else
+						WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
+				}
+			}
+
+			if (cfg->pm_enable_work_on) {
+				wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+			}
+
+			cfg->pm_enable_work_on = true;
+			wl_add_remove_pm_enable_work(cfg, TRUE, WL_HANDLER_NOTUSE);
+		}
+#if defined(WLTDLS)
+#if defined(DISABLE_TDLS_IN_P2P)
+		if (cfg->vsdb_mode || p2p_is_on(cfg))
+#else
+		if (cfg->vsdb_mode)
+#endif /* defined(DISABLE_TDLS_IN_P2P) */
+		{
+
+			err = wldev_iovar_setint(primary_dev, "tdls_enable", 0);
+		}
+#endif /* defined(WLTDLS) */
+	}
+	 else { /* clear */
+		chan = 0;
+		/* clear chan information when the net device is disconnected */
+		wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
+		wl_cfg80211_determine_vsdb_mode(cfg);
+		for_each_ndev(cfg, iter, next) {
+			if (iter->pm_restore && iter->pm) {
+				WL_DBG(("%s:restoring power save %s\n",
+					iter->ndev->name, (iter->pm ? "enabled" : "disabled")));
+				err = wldev_ioctl(iter->ndev,
+					WLC_SET_PM, &iter->pm, sizeof(iter->pm), true);
+				if (unlikely(err)) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+					else
+						WL_ERR(("%s:error(%d)\n", iter->ndev->name, err));
+					break;
+				}
+				iter->pm_restore = 0;
+				wl_cfg80211_update_power_mode(iter->ndev);
+			}
+		}
+		wl_cfg80211_concurrent_roam(cfg, 0);
+#if defined(WLTDLS)
+		if (!cfg->vsdb_mode) {
+			err = wldev_iovar_setint(primary_dev, "tdls_enable", 1);
+		}
+#endif /* defined(WLTDLS) */
+	}
+	return err;
+}
+static s32 wl_init_scan(struct bcm_cfg80211 *cfg)
+{
+	int err = 0;
+
+	cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+	cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+	wl_escan_init_sync_id(cfg);
+
+	/* Init scan_timeout timer */
+	init_timer(&cfg->scan_timeout);
+	cfg->scan_timeout.data = (unsigned long) cfg;
+	cfg->scan_timeout.function = wl_scan_timeout;
+
+	return err;
+}
+
+static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
+{
+	struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	s32 err = 0;
+
+	cfg->scan_request = NULL;
+	cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+	cfg->roam_on = false;
+	cfg->active_scan = true;
+	cfg->rf_blocked = false;
+	cfg->vsdb_mode = false;
+	cfg->wlfc_on = false;
+	cfg->roamoff_on_concurrent = true;
+	cfg->disable_roam_event = false;
+	/* register interested state */
+	set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
+	spin_lock_init(&cfg->cfgdrv_lock);
+	mutex_init(&cfg->ioctl_buf_sync);
+	init_waitqueue_head(&cfg->netif_change_event);
+	init_completion(&cfg->send_af_done);
+	init_completion(&cfg->iface_disable);
+	wl_init_eq(cfg);
+	err = wl_init_priv_mem(cfg);
+	if (err)
+		return err;
+	if (wl_create_event_handler(cfg))
+		return -ENOMEM;
+	wl_init_event_handler(cfg);
+	mutex_init(&cfg->usr_sync);
+	mutex_init(&cfg->event_sync);
+	mutex_init(&cfg->p2p_wdev_sync);
+	err = wl_init_scan(cfg);
+	if (err)
+		return err;
+	wl_init_conf(cfg->conf);
+	wl_init_prof(cfg, ndev);
+	wl_link_down(cfg);
+	DNGL_FUNC(dhd_cfg80211_init, (cfg));
+
+	return err;
+}
+
+static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+	DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
+	wl_destroy_event_handler(cfg);
+	wl_flush_eq(cfg);
+	wl_link_down(cfg);
+	del_timer_sync(&cfg->scan_timeout);
+	wl_deinit_priv_mem(cfg);
+	if (wl_cfg80211_netdev_notifier_registered) {
+		wl_cfg80211_netdev_notifier_registered = FALSE;
+		unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+	}
+}
+
+#if defined(WL_ENABLE_P2P_IF)
+static s32 wl_cfg80211_attach_p2p(void)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	WL_TRACE(("Enter \n"));
+
+	if (wl_cfgp2p_register_ndev(cfg) < 0) {
+		WL_ERR(("P2P attach failed. \n"));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static s32  wl_cfg80211_detach_p2p(void)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct wireless_dev *wdev;
+
+	WL_DBG(("Enter \n"));
+	if (!cfg) {
+		WL_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	} else
+		wdev = cfg->p2p_wdev;
+
+	if (!wdev) {
+		WL_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	}
+
+	wl_cfgp2p_unregister_ndev(cfg);
+
+	cfg->p2p_wdev = NULL;
+	cfg->p2p_net = NULL;
+	WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev));
+	kfree(wdev);
+
+	return 0;
+}
+#endif 
+
+s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+	struct bcm_cfg80211 * cfg = NULL;
+	s32 err = 0;
+	s32 ret = 0;
+	WL_TRACE(("In\n"));
+	if (unlikely(!ndev)) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	cfg = g_bcm_cfg;
+	if (unlikely(!cfg)) {
+		WL_ERR(("cfg is invaild\n"));
+		return -EINVAL;
+	}
+	if (!wl_get_drv_status(cfg, READY, ndev)) {
+		if (cfg->wdev) {
+			ret = wl_cfgp2p_supported(cfg, ndev);
+			if (ret > 0) {
+#if !defined(WL_ENABLE_P2P_IF)
+				cfg->wdev->wiphy->interface_modes |=
+					(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+					BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_ENABLE_P2P_IF */
+				if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+					goto fail;
+
+#if defined(WL_ENABLE_P2P_IF)
+				if (cfg->p2p_net) {
+					/* Update MAC addr for p2p0 interface here. */
+					memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+					cfg->p2p_net->dev_addr[0] |= 0x02;
+					WL_ERR(("%s: p2p_dev_addr="MACDBG "\n",
+						cfg->p2p_net->name,
+						MAC2STRDBG(cfg->p2p_net->dev_addr)));
+				} else {
+					WL_ERR(("p2p_net not yet populated."
+					" Couldn't update the MAC Address for p2p0 \n"));
+					return -ENODEV;
+				}
+#endif /* WL_ENABLE_P2P_IF */
+				cfg->p2p_supported = true;
+			} else if (ret == 0) {
+				if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+					goto fail;
+			} else {
+				/* SDIO bus timeout */
+				err = -ENODEV;
+				goto fail;
+			}
+		}
+	}
+	wl_set_drv_status(cfg, READY, ndev);
+fail:
+	return err;
+}
+
+s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
+{
+	struct wireless_dev *wdev;
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+	struct device *dev;
+
+	WL_TRACE(("In\n"));
+	if (!ndev) {
+		WL_ERR(("ndev is invaild\n"));
+		return -ENODEV;
+	}
+	WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
+	dev = wl_cfg80211_get_parent_dev();
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		return -ENOMEM;
+	}
+	err = wl_setup_wiphy(wdev, dev, context);
+	if (unlikely(err)) {
+		kfree(wdev);
+		return -ENOMEM;
+	}
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+	cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+	cfg->wdev = wdev;
+	cfg->pub = context;
+	INIT_LIST_HEAD(&cfg->net_list);
+	ndev->ieee80211_ptr = wdev;
+	SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+	wdev->netdev = ndev;
+	cfg->state_notifier = wl_notifier_change_state;
+	err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE);
+	if (err) {
+		WL_ERR(("Failed to alloc net_info (%d)\n", err));
+		goto cfg80211_attach_out;
+	}
+	err = wl_init_priv(cfg);
+	if (err) {
+		WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+		goto cfg80211_attach_out;
+	}
+	if (timer_pending(&cfg->scan_timeout))
+		del_timer_sync(&cfg->scan_timeout);
+
+	err = wl_setup_rfkill(cfg, TRUE);
+	if (err) {
+		WL_ERR(("Failed to setup rfkill %d\n", err));
+		goto cfg80211_attach_out;
+	}
+#ifdef DEBUGFS_CFG80211
+	err = wl_setup_debugfs(cfg);
+	if (err) {
+		WL_ERR(("Failed to setup debugfs %d\n", err));
+		goto cfg80211_attach_out;
+	}
+#endif
+	if (!wl_cfg80211_netdev_notifier_registered) {
+		wl_cfg80211_netdev_notifier_registered = TRUE;
+		err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+		if (err) {
+			wl_cfg80211_netdev_notifier_registered = FALSE;
+			WL_ERR(("Failed to register notifierl %d\n", err));
+			goto cfg80211_attach_out;
+		}
+	}
+#if defined(COEX_DHCP)
+	cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev);
+	if (!cfg->btcoex_info)
+		goto cfg80211_attach_out;
+#endif 
+
+	g_bcm_cfg = cfg;
+
+#if defined(WL_ENABLE_P2P_IF)
+	err = wl_cfg80211_attach_p2p();
+	if (err)
+		goto cfg80211_attach_out;
+#endif 
+
+	return err;
+
+cfg80211_attach_out:
+	wl_setup_rfkill(cfg, FALSE);
+	wl_free_wdev(cfg);
+	return err;
+}
+
+void wl_cfg80211_detach(void *para)
+{
+	struct bcm_cfg80211 *cfg;
+
+	(void)para;
+	cfg = g_bcm_cfg;
+
+	WL_TRACE(("In\n"));
+
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
+#if defined(COEX_DHCP)
+	wl_cfg80211_btcoex_deinit();
+	cfg->btcoex_info = NULL;
+#endif 
+
+	wl_setup_rfkill(cfg, FALSE);
+#ifdef DEBUGFS_CFG80211
+	wl_free_debugfs(cfg);
+#endif
+	if (cfg->p2p_supported)
+		wl_cfgp2p_down(cfg);
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF  */
+#if defined(WL_ENABLE_P2P_IF)
+	wl_cfg80211_detach_p2p();
+#endif 
+
+	wl_cfg80211_ibss_vsie_free(cfg);
+	wl_deinit_priv(cfg);
+	g_bcm_cfg = NULL;
+	wl_cfg80211_clear_parent_dev();
+	wl_free_wdev(cfg);
+	/* PLEASE do NOT call any function after wl_free_wdev, the driver's private
+	 * structure "cfg", which is the private part of wiphy, has been freed in
+	 * wl_free_wdev !!!!!!!!!!!
+	 */
+}
+
+static void wl_wakeup_event(struct bcm_cfg80211 *cfg)
+{
+	if (cfg->event_tsk.thr_pid >= 0) {
+		DHD_OS_WAKE_LOCK(cfg->pub);
+		up(&cfg->event_tsk.sema);
+	}
+}
+
+#if (defined(WL_CFG80211_P2P_DEV_IF) || defined(WL_ENABLE_P2P_IF))
+static int wl_is_p2p_event(struct wl_event_q *e)
+{
+	switch (e->etype) {
+	/* We have to seperate out the P2P events received
+	 * on primary interface so that it can be send up
+	 * via p2p0 interface.
+	*/
+	case WLC_E_P2P_PROBREQ_MSG:
+	case WLC_E_P2P_DISC_LISTEN_COMPLETE:
+	case WLC_E_ACTION_FRAME_RX:
+	case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE:
+	case WLC_E_ACTION_FRAME_COMPLETE:
+
+		if (e->emsg.ifidx != 0) {
+			WL_TRACE(("P2P event(%d) on virtual interface(ifidx:%d)\n",
+				e->etype, e->emsg.ifidx));
+			/* We are only bothered about the P2P events received
+			 * on primary interface. For rest of them return false
+			 * so that it is sent over the interface corresponding
+			 * to the ifidx.
+			 */
+			return FALSE;
+#if defined(WL_CFG80211_P2P_DEV_IF)
+		} else if (e->emsg.bsscfgidx == P2PAPI_BSSCFG_PRIMARY) {
+			WL_TRACE(("P2P event(%d) on STA interface\n",
+				  e->etype));
+			return FALSE;
+#endif
+		} else {
+			WL_TRACE(("P2P event(%d) on interface(ifidx:%d)\n",
+				e->etype, e->emsg.ifidx));
+			return TRUE;
+		}
+		break;
+
+	default:
+		WL_TRACE(("NON-P2P event(%d) on interface(ifidx:%d)\n",
+			e->etype, e->emsg.ifidx));
+		return FALSE;
+	}
+}
+#endif /* BCMDONGLEHOST && (WL_CFG80211_P2P_DEV_IF || WL_ENABLE_P2P_IF) */
+
+static s32 wl_event_handler(void *data)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct wl_event_q *e;
+	tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+	bcm_struct_cfgdev *cfgdev = NULL;
+
+	cfg = (struct bcm_cfg80211 *)tsk->parent;
+
+	WL_ERR(("tsk Enter, tsk = 0x%p\n", tsk));
+
+	while (down_interruptible (&tsk->sema) == 0) {
+		SMP_RD_BARRIER_DEPENDS();
+		if (tsk->terminated)
+			break;
+		while ((e = wl_deq_event(cfg))) {
+			WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx));
+			/* All P2P device address related events comes on primary interface since
+			 * there is no corresponding bsscfg for P2P interface. Map it to p2p0
+			 * interface.
+			 */
+#if defined(WL_CFG80211_P2P_DEV_IF)
+			if ((wl_is_p2p_event(e) == TRUE) && (cfg->p2p_wdev)) {
+				cfgdev = bcmcfg_to_p2p_wdev(cfg);
+			} else {
+				struct net_device *ndev = NULL;
+
+				ndev = dhd_idx2net((struct dhd_pub *)(cfg->pub), e->emsg.ifidx);
+				if (ndev)
+					cfgdev = ndev_to_wdev(ndev);
+			}
+#elif defined(WL_ENABLE_P2P_IF)
+			if ((wl_is_p2p_event(e) == TRUE) && (cfg->p2p_net)) {
+				cfgdev = cfg->p2p_net;
+			} else {
+				cfgdev = dhd_idx2net((struct dhd_pub *)(cfg->pub),
+					e->emsg.ifidx);
+			}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+			if (!cfgdev) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+				cfgdev = bcmcfg_to_prmry_wdev(cfg);
+#elif defined(WL_ENABLE_P2P_IF)
+				cfgdev = bcmcfg_to_prmry_ndev(cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+			}
+			if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
+				cfg->evt_handler[e->etype] (cfg, cfgdev, &e->emsg, e->edata);
+			} else {
+				WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+			}
+			wl_put_event(e);
+		}
+		DHD_OS_WAKE_UNLOCK(cfg->pub);
+	}
+	WL_ERR(("was terminated\n"));
+	complete_and_exit(&tsk->completed, 0);
+	return 0;
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+{
+	u32 event_type = ntoh32(e->event_type);
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+#if (WL_DBG_LEVEL > 0)
+	s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
+	    wl_dbg_estr[event_type] : (s8 *) "Unknown";
+	WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
+#endif /* (WL_DBG_LEVEL > 0) */
+
+	if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) {
+		WL_ERR(("during IF change, ignore event %d\n", event_type));
+		return;
+	}
+
+	if (ndev != bcmcfg_to_prmry_ndev(cfg) && cfg->p2p_supported) {
+		if (ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) &&
+#if defined(WL_ENABLE_P2P_IF)
+			(ndev != (cfg->p2p_net ? cfg->p2p_net :
+			wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE))) &&
+#else
+			(ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE)) &&
+#endif /* WL_ENABLE_P2P_IF */
+			TRUE) {
+			WL_ERR(("ignore event %d, not interested\n", event_type));
+			return;
+		}
+	}
+
+	if (event_type == WLC_E_PFN_NET_FOUND) {
+		WL_DBG((" PNOEVENT: PNO_NET_FOUND\n"));
+	}
+	else if (event_type == WLC_E_PFN_NET_LOST) {
+		WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
+	}
+
+	if (likely(!wl_enq_event(cfg, ndev, event_type, e, data)))
+		wl_wakeup_event(cfg);
+}
+
+static void wl_init_eq(struct bcm_cfg80211 *cfg)
+{
+	wl_init_eq_lock(cfg);
+	INIT_LIST_HEAD(&cfg->eq_list);
+}
+
+static void wl_flush_eq(struct bcm_cfg80211 *cfg)
+{
+	struct wl_event_q *e;
+	unsigned long flags;
+
+	flags = wl_lock_eq(cfg);
+	while (!list_empty(&cfg->eq_list)) {
+		e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+		kfree(e);
+	}
+	wl_unlock_eq(cfg, flags);
+}
+
+/*
+* retrieve first queued event from head
+*/
+
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg)
+{
+	struct wl_event_q *e = NULL;
+	unsigned long flags;
+
+	flags = wl_lock_eq(cfg);
+	if (likely(!list_empty(&cfg->eq_list))) {
+		e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
+		list_del(&e->eq_list);
+	}
+	wl_unlock_eq(cfg, flags);
+
+	return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
+static s32
+wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event,
+	const wl_event_msg_t *msg, void *data)
+{
+	struct wl_event_q *e;
+	s32 err = 0;
+	uint32 evtq_size;
+	uint32 data_len;
+	unsigned long flags;
+	gfp_t aflags;
+
+	data_len = 0;
+	if (data)
+		data_len = ntoh32(msg->datalen);
+	evtq_size = sizeof(struct wl_event_q) + data_len;
+	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+	e = kzalloc(evtq_size, aflags);
+	if (unlikely(!e)) {
+		WL_ERR(("event alloc failed\n"));
+		return -ENOMEM;
+	}
+	e->etype = event;
+	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+	if (data)
+		memcpy(e->edata, data, data_len);
+	flags = wl_lock_eq(cfg);
+	list_add_tail(&e->eq_list, &cfg->eq_list);
+	wl_unlock_eq(cfg, flags);
+
+	return err;
+}
+
+static void wl_put_event(struct wl_event_q *e)
+{
+	kfree(e);
+}
+
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype)
+{
+	s32 infra = 0;
+	s32 err = 0;
+	s32 mode = 0;
+	switch (iftype) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_WDS:
+		WL_ERR(("type (%d) : currently we do not support this mode\n",
+			iftype));
+		err = -EINVAL;
+		return err;
+	case NL80211_IFTYPE_ADHOC:
+		mode = WL_MODE_IBSS;
+		break;
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		mode = WL_MODE_BSS;
+		infra = 1;
+		break;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_P2P_GO:
+		mode = WL_MODE_AP;
+		infra = 1;
+		break;
+	default:
+		err = -EINVAL;
+		WL_ERR(("invalid type (%d)\n", iftype));
+		return err;
+	}
+	infra = htod32(infra);
+	err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true);
+	if (unlikely(err)) {
+		WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+		return err;
+	}
+
+	wl_set_mode_by_netdev(cfg, ndev, mode);
+
+	return 0;
+}
+
+void wl_cfg80211_add_to_eventbuffer(struct wl_eventmsg_buf *ev, u16 event, bool set)
+{
+	if (!ev || (event > WLC_E_LAST))
+		return;
+
+	if (ev->num < MAX_EVENT_BUF_NUM) {
+		ev->event[ev->num].type = event;
+		ev->event[ev->num].set = set;
+		ev->num++;
+	} else {
+		WL_ERR(("evenbuffer doesn't support > %u events. Update"
+			" the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM));
+		ASSERT(0);
+	}
+}
+
+s32 wl_cfg80211_apply_eventbuffer(
+	struct net_device *ndev,
+	struct bcm_cfg80211 *cfg,
+	wl_eventmsg_buf_t *ev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	int i, ret = 0;
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+	if (!ev || (!ev->num))
+		return -EINVAL;
+
+	mutex_lock(&cfg->event_sync);
+
+	/* Read event_msgs mask */
+	bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
+		sizeof(iovbuf));
+	ret = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false);
+	if (unlikely(ret)) {
+		WL_ERR(("Get event_msgs error (%d)\n", ret));
+		goto exit;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+
+	/* apply the set bits */
+	for (i = 0; i < ev->num; i++) {
+		if (ev->event[i].set)
+			setbit(eventmask, ev->event[i].type);
+		else
+			clrbit(eventmask, ev->event[i].type);
+	}
+
+	/* Write updated Event mask */
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+		sizeof(iovbuf));
+	ret = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+	if (unlikely(ret)) {
+		WL_ERR(("Set event_msgs error (%d)\n", ret));
+	}
+
+exit:
+	mutex_unlock(&cfg->event_sync);
+	return ret;
+}
+
+s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
+{
+	s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
+	s8 eventmask[WL_EVENTING_MASK_LEN];
+	s32 err = 0;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	if (!ndev || !cfg)
+		return -ENODEV;
+
+	mutex_lock(&cfg->event_sync);
+
+	/* Setup event_msgs */
+	bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false);
+	if (unlikely(err)) {
+		WL_ERR(("Get event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
+	if (add) {
+		setbit(eventmask, event);
+	} else {
+		clrbit(eventmask, event);
+	}
+	bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+		sizeof(iovbuf));
+	err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+	if (unlikely(err)) {
+		WL_ERR(("Set event_msgs error (%d)\n", err));
+		goto eventmsg_out;
+	}
+
+eventmsg_out:
+	mutex_unlock(&cfg->event_sync);
+	return err;
+}
+
+static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap)
+{
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+	struct ieee80211_channel *band_chan_arr = NULL;
+	wl_uint32_list_t *list;
+	u32 i, j, index, n_2g, n_5g, band, channel, array_size;
+	u32 *n_cnt = NULL;
+	chanspec_t c = 0;
+	s32 err = BCME_OK;
+	bool update;
+	bool ht40_allowed;
+	u8 *pbuf = NULL;
+	bool dfs_radar_disabled = FALSE;
+
+#define LOCAL_BUF_LEN 1024
+	pbuf = kzalloc(LOCAL_BUF_LEN, GFP_KERNEL);
+
+	if (pbuf == NULL) {
+		WL_ERR(("failed to allocate local buf\n"));
+		return -ENOMEM;
+	}
+	list = (wl_uint32_list_t *)(void *)pbuf;
+	list->count = htod32(WL_NUMCHANSPECS);
+
+
+	err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+		0, pbuf, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync);
+	if (err != 0) {
+		WL_ERR(("get chanspecs failed with %d\n", err));
+		kfree(pbuf);
+		return err;
+	}
+#undef LOCAL_BUF_LEN
+
+	list = (wl_uint32_list_t *)(void *)pbuf;
+	band = array_size = n_2g = n_5g = 0;
+	for (i = 0; i < dtoh32(list->count); i++) {
+		index = 0;
+		update = false;
+		ht40_allowed = false;
+		c = (chanspec_t)dtoh32(list->element[i]);
+		c = wl_chspec_driver_to_host(c);
+		channel = CHSPEC_CHANNEL(c);
+		if (CHSPEC_IS40(c)) {
+			if (CHSPEC_SB_UPPER(c))
+				channel += CH_10MHZ_APART;
+			else
+				channel -= CH_10MHZ_APART;
+		} else if (CHSPEC_IS80(c)) {
+			WL_DBG(("HT80 center channel : %d\n", channel));
+			continue;
+		}
+		if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) &&
+			(channel <= CH_MAX_2G_CHANNEL)) {
+			band_chan_arr = __wl_2ghz_channels;
+			array_size = ARRAYSIZE(__wl_2ghz_channels);
+			n_cnt = &n_2g;
+			band = IEEE80211_BAND_2GHZ;
+			ht40_allowed = (bw_cap  == WLC_N_BW_40ALL)? true : false;
+		} else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) {
+			band_chan_arr = __wl_5ghz_a_channels;
+			array_size = ARRAYSIZE(__wl_5ghz_a_channels);
+			n_cnt = &n_5g;
+			band = IEEE80211_BAND_5GHZ;
+			ht40_allowed = (bw_cap  == WLC_N_BW_20ALL)? false : true;
+		} else {
+			WL_ERR(("Invalid channel Sepc. 0x%x.\n", c));
+			continue;
+		}
+		if (!ht40_allowed && CHSPEC_IS40(c))
+			continue;
+		for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
+			if (band_chan_arr[j].hw_value == channel) {
+				update = true;
+				break;
+			}
+		}
+		if (update)
+			index = j;
+		else
+			index = *n_cnt;
+		if (index <  array_size) {
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+			band_chan_arr[index].center_freq =
+				ieee80211_channel_to_frequency(channel);
+#else
+			band_chan_arr[index].center_freq =
+				ieee80211_channel_to_frequency(channel, band);
+#endif
+			band_chan_arr[index].hw_value = channel;
+
+			if (CHSPEC_IS40(c) && ht40_allowed) {
+				/* assuming the order is HT20, HT40 Upper,
+				 *  HT40 lower from chanspecs
+				 */
+				u32 ht40_flag = band_chan_arr[index].flags & IEEE80211_CHAN_NO_HT40;
+				if (CHSPEC_SB_UPPER(c)) {
+					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+						band_chan_arr[index].flags &=
+							~IEEE80211_CHAN_NO_HT40;
+					band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40PLUS;
+				} else {
+					/* It should be one of
+					 * IEEE80211_CHAN_NO_HT40 or IEEE80211_CHAN_NO_HT40PLUS
+					 */
+					band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40;
+					if (ht40_flag == IEEE80211_CHAN_NO_HT40)
+						band_chan_arr[index].flags |=
+							IEEE80211_CHAN_NO_HT40MINUS;
+				}
+			} else {
+				band_chan_arr[index].flags = IEEE80211_CHAN_NO_HT40;
+				if (!dfs_radar_disabled) {
+					if (band == IEEE80211_BAND_2GHZ)
+						channel |= WL_CHANSPEC_BAND_2G;
+					else
+						channel |= WL_CHANSPEC_BAND_5G;
+					channel |= WL_CHANSPEC_BW_20;
+					channel = wl_chspec_host_to_driver(channel);
+					err = wldev_iovar_getint(dev, "per_chan_info", &channel);
+					if (!err) {
+						if (channel & WL_CHAN_RADAR) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+							band_chan_arr[index].flags |=
+								(IEEE80211_CHAN_RADAR
+								| IEEE80211_CHAN_NO_IBSS);
+#else
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_RADAR;
+#endif
+						}
+
+						if (channel & WL_CHAN_PASSIVE)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_PASSIVE_SCAN;
+#else
+							band_chan_arr[index].flags |=
+								IEEE80211_CHAN_NO_IR;
+#endif
+					} else if (err == BCME_UNSUPPORTED) {
+						dfs_radar_disabled = TRUE;
+						WL_ERR(("does not support per_chan_info\n"));
+					}
+				}
+			}
+			if (!update)
+				(*n_cnt)++;
+		}
+
+	}
+	__wl_band_2ghz.n_channels = n_2g;
+	__wl_band_5ghz_a.n_channels = n_5g;
+	kfree(pbuf);
+	return err;
+}
+
+s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
+{
+	struct wiphy *wiphy;
+	struct net_device *dev;
+	u32 bandlist[3];
+	u32 nband = 0;
+	u32 i = 0;
+	s32 err = 0;
+	s32 index = 0;
+	s32 nmode = 0;
+	bool rollback_lock = false;
+	s32 mimo_bw_cap = 0;
+	s32 cur_band = -1;
+	struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS] = {NULL, };
+	u32 bw = WL_CHANSPEC_BW_20;
+	struct {
+		u32 band;
+		u32 bw_cap;
+	} param = {0, 0};
+
+	if (cfg == NULL) {
+		cfg = g_bcm_cfg;
+		mutex_lock(&cfg->usr_sync);
+		rollback_lock = true;
+	}
+	dev = bcmcfg_to_prmry_ndev(cfg);
+
+	memset(bandlist, 0, sizeof(bandlist));
+	err = wldev_ioctl(dev, WLC_GET_BANDLIST, bandlist,
+		sizeof(bandlist), false);
+	if (unlikely(err)) {
+		WL_ERR(("error read bandlist (%d)\n", err));
+		goto end_bands;
+	}
+	err = wldev_ioctl(dev, WLC_GET_BAND, &cur_band,
+		sizeof(s32), false);
+	if (unlikely(err)) {
+		WL_ERR(("error (%d)\n", err));
+		goto end_bands;
+	}
+
+	err = wldev_iovar_getint(dev, "nmode", &nmode);
+	if (unlikely(err)) {
+		WL_ERR(("error reading nmode (%d)\n", err));
+	} else {
+		/* For nmodeonly  check bw cap */
+		err = wldev_iovar_getint(dev, "mimo_bw_cap", &mimo_bw_cap);
+		if (unlikely(err)) {
+			WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+		}
+
+		/* Check 5GHz BW capability (20MHz / 40MHz or 80MHz...) */
+		param.band = WLC_BAND_5G;
+		err = wldev_iovar_getbuf(dev, "bw_cap", &param, sizeof(param),
+			cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+		if (err) {
+			WL_ERR(("bw_cap failed, %d\n", err));
+		} else {
+			if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0])) {
+				bw = WL_CHANSPEC_BW_80;
+				mimo_bw_cap = WLC_N_BW_20IN2G_40IN5G;
+				nmode = 1;
+			} else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0])) {
+				bw = WL_CHANSPEC_BW_40;
+				mimo_bw_cap = WLC_N_BW_20IN2G_40IN5G;
+				nmode = 1;
+			} else {
+				bw = WL_CHANSPEC_BW_20;
+			}
+		}
+	}
+
+	err = wl_construct_reginfo(cfg, mimo_bw_cap);
+	if (err) {
+		WL_ERR(("wl_construct_reginfo() fails err=%d\n", err));
+		if (err != BCME_UNSUPPORTED)
+			goto end_bands;
+		err = 0;
+	}
+	wiphy = bcmcfg_to_wiphy(cfg);
+	nband = bandlist[0];
+
+	for (i = 1; i <= nband && i < ARRAYSIZE(bandlist); i++) {
+		index = -1;
+		if (bandlist[i] == WLC_BAND_5G && __wl_band_5ghz_a.n_channels > 0) {
+			bands[IEEE80211_BAND_5GHZ] =
+				&__wl_band_5ghz_a;
+			index = IEEE80211_BAND_5GHZ;
+			if (mimo_bw_cap == WLC_N_BW_40ALL || mimo_bw_cap == WLC_N_BW_20IN2G_40IN5G ||
+			    bw == WL_CHANSPEC_BW_40 || bw == WL_CHANSPEC_BW_80) {
+				bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+				bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+				/* Set VHT capabilities */
+				if (bw == WL_CHANSPEC_BW_80) {
+					bands[index]->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
+					bands[index]->vht_cap.vht_supported = TRUE;
+				}
+			}
+		}
+		else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) {
+			bands[IEEE80211_BAND_2GHZ] =
+				&__wl_band_2ghz;
+			index = IEEE80211_BAND_2GHZ;
+			if (mimo_bw_cap == WLC_N_BW_40ALL)
+				bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+		}
+
+		if ((index >= 0) && nmode) {
+			bands[index]->ht_cap.cap |=
+				(IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40);
+			bands[index]->ht_cap.ht_supported = TRUE;
+			bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+			bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+			/* An HT shall support all EQM rates for one spatial stream */
+			bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+		}
+
+	}
+
+	wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
+	wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
+
+	/* check if any bands populated otherwise makes 2Ghz as default */
+	if (wiphy->bands[IEEE80211_BAND_2GHZ] == NULL &&
+		wiphy->bands[IEEE80211_BAND_5GHZ] == NULL) {
+		/* Setup 2Ghz band as default */
+		wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
+	}
+
+	if (notify)
+		wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+
+	end_bands:
+		if (rollback_lock)
+			mutex_unlock(&cfg->usr_sync);
+	return err;
+}
+
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+	WL_DBG(("In\n"));
+
+	err = dhd_config_dongle(cfg);
+	if (unlikely(err))
+		return err;
+
+	err = wl_config_ifmode(cfg, ndev, wdev->iftype);
+	if (unlikely(err && err != -EINPROGRESS)) {
+		WL_ERR(("wl_config_ifmode failed\n"));
+		if (err == -1) {
+			WL_ERR(("return error %d\n", err));
+			return err;
+		}
+	}
+	err = wl_update_wiphybands(cfg, true);
+	if (unlikely(err)) {
+		WL_ERR(("wl_update_wiphybands failed\n"));
+		if (err == -1) {
+			WL_ERR(("return error %d\n", err));
+			return err;
+		}
+	}
+
+	err = dhd_monitor_init(cfg->pub);
+
+	INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+	wl_set_drv_status(cfg, READY, ndev);
+	return err;
+}
+
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	unsigned long flags;
+	struct net_info *iter, *next;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF)
+	struct net_device *p2p_net = cfg->p2p_net;
+#endif 
+	u32 bssidx = 0;
+#ifdef PROP_TXSTATUS_VSDB
+	dhd_pub_t *dhd =  (dhd_pub_t *)(cfg->pub);
+#endif /* PROP_TXSTATUS_VSDB */
+	WL_DBG(("In\n"));
+	/* Delete pm_enable_work */
+	wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
+	if (cfg->p2p_supported) {
+		wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+#ifdef PROP_TXSTATUS_VSDB
+		if (cfg->p2p->vif_created) {
+			bool enabled = false;
+			dhd_wlfc_get_enable(dhd, &enabled);
+			if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+				dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+				dhd_wlfc_deinit(dhd);
+				cfg->wlfc_on = false;
+			}
+		}
+#endif /* PROP_TXSTATUS_VSDB */
+	}
+
+
+	/* If primary BSS is operational (for e.g SoftAP), bring it down */
+	if (!(wl_cfgp2p_find_idx(cfg, ndev, &bssidx)) &&
+		wl_cfgp2p_bss_isup(ndev, bssidx)) {
+		if (wl_cfgp2p_bss(cfg, ndev, bssidx, 0) < 0)
+			WL_ERR(("BSS down failed \n"));
+	}
+
+	/* Check if cfg80211 interface is already down */
+	if (!wl_get_drv_status(cfg, READY, ndev))
+		return err;	/* it is even not ready */
+	for_each_ndev(cfg, iter, next)
+		wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	if (cfg->scan_request) {
+		cfg80211_scan_done(cfg->scan_request, true);
+		cfg->scan_request = NULL;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	for_each_ndev(cfg, iter, next) {
+		wl_clr_drv_status(cfg, READY, iter->ndev);
+		wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+		wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+		wl_clr_drv_status(cfg, CONNECTING, iter->ndev);
+		wl_clr_drv_status(cfg, CONNECTED, iter->ndev);
+		wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev);
+		wl_clr_drv_status(cfg, AP_CREATED, iter->ndev);
+		wl_clr_drv_status(cfg, AP_CREATING, iter->ndev);
+	}
+	bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
+		NL80211_IFTYPE_STATION;
+#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF)
+		if (p2p_net)
+			dev_close(p2p_net);
+#endif 
+	DNGL_FUNC(dhd_cfg80211_down, (cfg));
+	wl_flush_eq(cfg);
+	wl_link_down(cfg);
+	if (cfg->p2p_supported)
+		wl_cfgp2p_down(cfg);
+	dhd_monitor_uninit();
+
+#ifdef WL11U
+	/* Clear interworking element. */
+	if (cfg->wl11u) {
+		cfg->wl11u = FALSE;
+		cfg->iw_ie_len = 0;
+		memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN);
+	}
+#endif /* WL11U */
+
+	return err;
+}
+
+extern char fwversion[];
+
+s32 wl_cfg80211_up(void *para)
+{
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+	int val = 1;
+	dhd_pub_t *dhd;
+
+	(void)para;
+	WL_DBG(("In\n"));
+	cfg = g_bcm_cfg;
+
+	if ((err = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val,
+		sizeof(int), false) < 0)) {
+		WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
+		return err;
+	}
+	val = dtoh32(val);
+	if (val != WLC_IOCTL_VERSION && val != 1) {
+		WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+			val, WLC_IOCTL_VERSION));
+		return BCME_VERSION;
+	}
+	ioctl_version = val;
+	WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version));
+
+	mutex_lock(&cfg->usr_sync);
+	dhd = (dhd_pub_t *)(cfg->pub);
+	if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+		err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg));
+		if (unlikely(err))
+			return err;
+	}
+	err = __wl_cfg80211_up(cfg);
+	if (unlikely(err))
+		WL_ERR(("__wl_cfg80211_up failed\n"));
+	mutex_unlock(&cfg->usr_sync);
+//	kct_log(CT_EV_INFO, "cws.wifi", "ON", EV_FLAGS_PRIORITY_LOW, fwversion);
+	return err;
+}
+
+void log_hang(u16 reason)
+{
+	char buf[32];
+
+	buf[sizeof(buf)-1] = '\0';
+	snprintf(buf, sizeof(buf)-1, "%d", reason);
+//	kct_log(CT_EV_CRASH, "cws.wifi", "crash_eventing", 0, buf);
+}
+
+/* Private Event to Supplicant with indication that chip hangs */
+int wl_cfg80211_hang(struct net_device *dev, u16 reason)
+{
+	struct bcm_cfg80211 *cfg;
+	cfg = g_bcm_cfg;
+
+	WL_ERR(("In : chip crash eventing\n"));
+	if (cfg != NULL) {
+		wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+	}
+	cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+	if (cfg != NULL) {
+		wl_link_down(cfg);
+	}
+
+	log_hang(reason);
+
+	return 0;
+}
+
+s32 wl_cfg80211_down(void *para)
+{
+	struct bcm_cfg80211 *cfg;
+	s32 err = 0;
+
+	(void)para;
+	WL_DBG(("In\n"));
+	cfg = g_bcm_cfg;
+	mutex_lock(&cfg->usr_sync);
+	err = __wl_cfg80211_down(cfg);
+	mutex_unlock(&cfg->usr_sync);
+
+//	 kct_log(CT_EV_INFO, "cws.wifi", "OFF", EV_FLAGS_PRIORITY_LOW);
+
+	return err;
+}
+
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
+{
+	unsigned long flags;
+	void *rptr = NULL;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	if (!profile)
+		return NULL;
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	switch (item) {
+	case WL_PROF_SEC:
+		rptr = &profile->sec;
+		break;
+	case WL_PROF_ACT:
+		rptr = &profile->active;
+		break;
+	case WL_PROF_BSSID:
+		rptr = profile->bssid;
+		break;
+	case WL_PROF_SSID:
+		rptr = &profile->ssid;
+		break;
+	case WL_PROF_CHAN:
+		rptr = &profile->channel;
+		break;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+	if (!rptr)
+		WL_ERR(("invalid item (%d)\n", item));
+	return rptr;
+}
+
+static s32
+wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const wl_event_msg_t *e, void *data, s32 item)
+{
+	s32 err = 0;
+	struct wlc_ssid *ssid;
+	unsigned long flags;
+	struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
+
+	if (!profile)
+		return WL_INVALID;
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+	switch (item) {
+	case WL_PROF_SSID:
+		ssid = (wlc_ssid_t *) data;
+		memset(profile->ssid.SSID, 0,
+			sizeof(profile->ssid.SSID));
+		memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len);
+		profile->ssid.SSID_len = ssid->SSID_len;
+		break;
+	case WL_PROF_BSSID:
+		if (data)
+			memcpy(profile->bssid, data, ETHER_ADDR_LEN);
+		else
+			memset(profile->bssid, 0, ETHER_ADDR_LEN);
+		break;
+	case WL_PROF_SEC:
+		memcpy(&profile->sec, data, sizeof(profile->sec));
+		break;
+	case WL_PROF_ACT:
+		profile->active = *(bool *)data;
+		break;
+	case WL_PROF_BEACONINT:
+		profile->beacon_interval = *(u16 *)data;
+		break;
+	case WL_PROF_DTIMPERIOD:
+		profile->dtim_period = *(u8 *)data;
+		break;
+	case WL_PROF_CHAN:
+		profile->channel = *(u32*)data;
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	if (err == -EOPNOTSUPP)
+		WL_ERR(("unsupported item (%d)\n", item));
+
+	return err;
+}
+
+void wl_cfg80211_dbg_level(u32 level)
+{
+	/*
+	* prohibit to change debug level
+	* by insmod parameter.
+	* eventually debug level will be configured
+	* in compile time by using CONFIG_XXX
+	*/
+	/* wl_dbg_level = level; */
+}
+
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	return wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS;
+}
+
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg)
+{
+	return cfg->ibss_starter;
+}
+
+static void wl_rst_ie(struct bcm_cfg80211 *cfg)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+
+	ie->offset = 0;
+}
+
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	ie->buf[ie->offset] = t;
+	ie->buf[ie->offset + 1] = l;
+	memcpy(&ie->buf[ie->offset + 2], v, l);
+	ie->offset += l + 2;
+
+	return err;
+}
+
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam)
+{
+	u8 *ssidie;
+	ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
+	if (!ssidie)
+		return;
+	if (ssidie[1] != bi->SSID_len) {
+		if (ssidie[1]) {
+			WL_ERR(("%s: Wrong SSID len: %d != %d\n",
+				__FUNCTION__, ssidie[1], bi->SSID_len));
+		}
+		if (roam) {
+			WL_ERR(("Changing the SSID Info.\n"));
+			memmove(ssidie + bi->SSID_len + 2,
+				(ssidie + 2) + ssidie[1],
+				*ie_size - (ssidie + 2 + ssidie[1] - ie_stream));
+			memcpy(ssidie + 2, bi->SSID, bi->SSID_len);
+			*ie_size = *ie_size + bi->SSID_len - ssidie[1];
+			ssidie[1] = bi->SSID_len;
+		}
+		return;
+	}
+	if (*(ssidie + 2) == '\0')
+		 memcpy(ssidie + 2, bi->SSID, bi->SSID_len);
+	return;
+}
+
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+		WL_ERR(("ei_stream crosses buffer boundary\n"));
+		return -ENOSPC;
+	}
+	memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+	ie->offset += ie_size;
+
+	return err;
+}
+
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+	s32 err = 0;
+
+	if (unlikely(ie->offset > dst_size)) {
+		WL_ERR(("dst_size is not enough\n"));
+		return -ENOSPC;
+	}
+	memcpy(dst, &ie->buf[0], ie->offset);
+
+	return err;
+}
+
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg)
+{
+	struct wl_ie *ie = wl_to_ie(cfg);
+
+	return ie->offset;
+}
+
+static void wl_link_up(struct bcm_cfg80211 *cfg)
+{
+	cfg->link_up = true;
+}
+
+static void wl_link_down(struct bcm_cfg80211 *cfg)
+{
+	struct wl_connect_info *conn_info = wl_to_conn(cfg);
+
+	WL_DBG(("In\n"));
+	cfg->link_up = false;
+	conn_info->req_ie_len = 0;
+	conn_info->resp_ie_len = 0;
+}
+
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cfg->eq_lock, flags);
+	return flags;
+}
+
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
+{
+	spin_unlock_irqrestore(&cfg->eq_lock, flags);
+}
+
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
+{
+	spin_lock_init(&cfg->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+	if (in_atomic() || (ms < jiffies_to_msecs(1))) {
+		OSL_DELAY(ms*1000);
+	} else {
+		OSL_SLEEP(ms);
+	}
+}
+
+s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+	struct ether_addr p2pif_addr;
+	struct ether_addr primary_mac;
+	if (!cfg->p2p)
+		return -1;
+	if (!p2p_is_on(cfg)) {
+		get_primary_mac(cfg, &primary_mac);
+		wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr);
+	} else {
+		memcpy(p2pdev_addr->octet,
+			cfg->p2p->dev_addr.octet, ETHER_ADDR_LEN);
+	}
+
+
+	return 0;
+}
+s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg;
+
+	cfg = g_bcm_cfg;
+
+	return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg;
+	cfg = g_bcm_cfg;
+
+	return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
+{
+	struct bcm_cfg80211 *cfg;
+	cfg = g_bcm_cfg;
+
+	return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
+}
+
+s32 wl_cfg80211_channel_to_freq(u32 channel)
+{
+	int freq = 0;
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+	freq = ieee80211_channel_to_frequency(channel);
+#else
+	{
+		u16 band = 0;
+		if (channel <= CH_MAX_2G_CHANNEL)
+			band = IEEE80211_BAND_2GHZ;
+		else
+			band = IEEE80211_BAND_5GHZ;
+		freq = ieee80211_channel_to_frequency(channel, band);
+	}
+#endif
+	return freq;
+}
+
+
+#ifdef WLTDLS
+static s32
+wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data) {
+
+	struct net_device *ndev = NULL;
+	u32 reason = ntoh32(e->reason);
+	s8 *msg = NULL;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	switch (reason) {
+	case WLC_E_TDLS_PEER_DISCOVERED :
+		msg = " TDLS PEER DISCOVERD ";
+		break;
+	case WLC_E_TDLS_PEER_CONNECTED :
+		msg = " TDLS PEER CONNECTED ";
+		break;
+	case WLC_E_TDLS_PEER_DISCONNECTED :
+		msg = "TDLS PEER DISCONNECTED ";
+		break;
+	}
+	if (msg) {
+		WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)),
+			(bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
+	}
+	return 0;
+
+}
+#endif  /* WLTDLS */
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+static s32
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+	u8 *peer, enum nl80211_tdls_operation oper)
+{
+	s32 ret = 0;
+#ifdef WLTDLS
+	struct bcm_cfg80211 *cfg;
+	tdls_iovar_t info;
+	cfg = g_bcm_cfg;
+	memset(&info, 0, sizeof(tdls_iovar_t));
+	if (peer)
+		memcpy(&info.ea, peer, ETHER_ADDR_LEN);
+	switch (oper) {
+	case NL80211_TDLS_DISCOVERY_REQ:
+		/* turn on TDLS */
+		ret = dhd_tdls_enable(dev, true, false, NULL);
+		if (ret < 0)
+			return ret;
+		info.mode = TDLS_MANUAL_EP_DISCOVERY;
+		break;
+	case NL80211_TDLS_SETUP:
+		/* auto mode on */
+		ret = dhd_tdls_enable(dev, true, true, (struct ether_addr *)peer);
+		if (ret < 0)
+			return ret;
+		break;
+	case NL80211_TDLS_TEARDOWN:
+		info.mode = TDLS_MANUAL_EP_DELETE;
+		/* auto mode off */
+		ret = dhd_tdls_enable(dev, true, false, (struct ether_addr *)peer);
+		if (ret < 0)
+			return ret;
+		break;
+	default:
+		WL_ERR(("Unsupported operation : %d\n", oper));
+		goto out;
+	}
+	if (info.mode) {
+		ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+		if (ret) {
+			WL_ERR(("tdls_endpoint error %d\n", ret));
+		}
+	}
+out:
+#endif /* WLTDLS */
+	return ret;
+}
+#endif 
+
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+	enum wl_management_type type)
+{
+	struct bcm_cfg80211 *cfg;
+	struct net_device *ndev = NULL;
+	struct ether_addr primary_mac;
+	s32 ret = 0;
+	s32 bssidx = 0;
+	s32 pktflag = 0;
+	cfg = g_bcm_cfg;
+
+	if (wl_get_drv_status(cfg, AP_CREATING, net)) {
+		/* Vendor IEs should be set to FW
+		 * after SoftAP interface is brought up
+		 */
+		goto exit;
+	} else if (wl_get_drv_status(cfg, AP_CREATED, net)) {
+		ndev = net;
+		bssidx = 0;
+	} else if (cfg->p2p) {
+		net = ndev_to_wlc_ndev(net, cfg);
+		if (!cfg->p2p->on) {
+			get_primary_mac(cfg, &primary_mac);
+			wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr,
+				&cfg->p2p->int_addr);
+			/* In case of p2p_listen command, supplicant send remain_on_channel
+			* without turning on P2P
+			*/
+
+			p2p_on(cfg) = true;
+			ret = wl_cfgp2p_enable_discovery(cfg, net, NULL, 0);
+
+			if (unlikely(ret)) {
+				goto exit;
+			}
+		}
+		if (net  != bcmcfg_to_prmry_ndev(cfg)) {
+			if (wl_get_mode_by_netdev(cfg, net) == WL_MODE_AP) {
+				ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION);
+			}
+		} else {
+				ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+				bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+		}
+	}
+	if (ndev != NULL) {
+		switch (type) {
+			case WL_BEACON:
+				pktflag = VNDR_IE_BEACON_FLAG;
+				break;
+			case WL_PROBE_RESP:
+				pktflag = VNDR_IE_PRBRSP_FLAG;
+				break;
+			case WL_ASSOC_RESP:
+				pktflag = VNDR_IE_ASSOCRSP_FLAG;
+				break;
+		}
+		if (pktflag)
+			ret = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, pktflag, buf, len);
+	}
+exit:
+	return ret;
+}
+
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+static s32
+wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev)
+{
+	u32 val = 0;
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* Disable mpc, to avoid automatic interface down. */
+	val = 0;
+
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+		sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("set 'mpc' failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Set interface up, explicitly. */
+	val = 1;
+
+	ret = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true);
+	if (ret < 0) {
+		WL_ERR(("set interface up failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Stop all scan explicitly, till auto channel selection complete. */
+	wl_set_drv_status(cfg, SCANNING, ndev);
+	if (cfg->escan_info.ndev == NULL) {
+		ret = BCME_OK;
+		goto done;
+	}
+	ret = wl_notify_escan_complete(cfg, ndev, true, true);
+	if (ret < 0) {
+		WL_ERR(("set scan abort failed, error = %d\n", ret));
+		goto done;
+	}
+
+done:
+	return ret;
+}
+
+static bool
+wl_cfg80211_valid_chanspec_p2p(chanspec_t chanspec)
+{
+	bool valid = false;
+
+	/* channel 1 to 14 */
+	if ((chanspec >= 0x2b01) && (chanspec <= 0x2b0e)) {
+		valid = true;
+	}
+	/* channel 36 to 48 */
+	else if ((chanspec >= 0x1b24) && (chanspec <= 0x1b30)) {
+		valid = true;
+	}
+	/* channel 149 to 161 */
+	else if ((chanspec >= 0x1b95) && (chanspec <= 0x1ba1)) {
+		valid = true;
+	}
+	else {
+		valid = false;
+		WL_INFO(("invalid P2P chanspec, channel = %d, chanspec = %04x\n",
+			CHSPEC_CHANNEL(chanspec), chanspec));
+	}
+
+	return valid;
+}
+
+static s32
+wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen)
+{
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = NULL;
+	wl_uint32_list_t *list = NULL;
+	chanspec_t chanspec = 0;
+
+	memset(buf, 0, buflen);
+
+	cfg = g_bcm_cfg;
+	list = (wl_uint32_list_t *)buf;
+	list->count = htod32(WL_NUMCHANSPECS);
+
+	/* Restrict channels to 2.4GHz, 20MHz BW, no SB. */
+	chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 |
+		WL_CHANSPEC_CTL_SB_NONE);
+	chanspec = wl_chspec_host_to_driver(chanspec);
+
+	ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+		sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+	}
+
+	return ret;
+}
+
+static s32
+wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen)
+{
+	u32 channel = 0;
+	s32 ret = BCME_ERROR;
+	s32 i = 0;
+	s32 j = 0;
+	struct bcm_cfg80211 *cfg = NULL;
+	wl_uint32_list_t *list = NULL;
+	chanspec_t chanspec = 0;
+
+	memset(buf, 0, buflen);
+
+	cfg = g_bcm_cfg;
+	list = (wl_uint32_list_t *)buf;
+	list->count = htod32(WL_NUMCHANSPECS);
+
+	/* Restrict channels to 5GHz, 20MHz BW, no SB. */
+	chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 |
+		WL_CHANSPEC_CTL_SB_NONE);
+	chanspec = wl_chspec_host_to_driver(chanspec);
+
+	ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+		sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Skip DFS and inavlid P2P channel. */
+	for (i = 0, j = 0; i < dtoh32(list->count); i++) {
+		chanspec = (chanspec_t) dtoh32(list->element[i]);
+		channel = CHSPEC_CHANNEL(chanspec);
+
+		ret = wldev_iovar_getint(ndev, "per_chan_info", &channel);
+		if (ret < 0) {
+			WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret));
+			goto done;
+		}
+
+		if (CHANNEL_IS_RADAR(channel) ||
+			!(wl_cfg80211_valid_chanspec_p2p(chanspec))) {
+			continue;
+		} else {
+			list->element[j] = list->element[i];
+		}
+
+		j++;
+	}
+
+	list->count = j;
+
+done:
+	return ret;
+}
+
+static s32
+wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen,
+	int *channel)
+{
+	s32 ret = BCME_ERROR;
+	int chosen = 0;
+	int retry = 0;
+
+	/* Start auto channel selection scan. */
+	ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true);
+	if (ret < 0) {
+		WL_ERR(("can't start auto channel scan, error = %d\n", ret));
+		*channel = 0;
+		goto done;
+	}
+
+	/* Wait for auto channel selection, worst case possible delay is 5250ms. */
+	retry = CHAN_SEL_RETRY_COUNT;
+
+	while (retry--) {
+		OSL_SLEEP(CHAN_SEL_IOCTL_DELAY);
+
+		ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen),
+			false);
+		if ((ret == 0) && (dtoh32(chosen) != 0)) {
+			*channel = (u16)(chosen & 0x00FF);
+			WL_INFO(("selected channel = %d\n", *channel));
+			break;
+		}
+		WL_INFO(("attempt = %d, ret = %d, chosen = %d\n",
+			(CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
+	}
+
+	if (retry <= 0)	{
+		WL_ERR(("failure, auto channel selection timed out\n"));
+		*channel = 0;
+		ret = BCME_ERROR;
+	}
+
+done:
+	return ret;
+}
+
+static s32
+wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev)
+{
+	u32 val = 0;
+	s32 ret = BCME_ERROR;
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	/* Clear scan stop driver status. */
+	wl_clr_drv_status(cfg, SCANNING, ndev);
+
+	/* Enable mpc back to 1, irrespective of initial state. */
+	val = 1;
+
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+		sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+		&cfg->ioctl_buf_sync);
+	if (ret < 0) {
+		WL_ERR(("set 'mpc' failed, error = %d\n", ret));
+	}
+
+	return ret;
+}
+
+s32
+wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len)
+{
+	int channel = 0;
+	s32 ret = BCME_ERROR;
+	u8 *buf = NULL;
+	char *pos = cmd;
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_device *ndev = NULL;
+
+	memset(cmd, 0, total_len);
+
+	buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
+	if (buf == NULL) {
+		WL_ERR(("failed to allocate chanspec buffer\n"));
+		return -ENOMEM;
+	}
+
+	/*
+	 * Always use primary interface, irrespective of interface on which
+	 * command came.
+	 */
+	cfg = g_bcm_cfg;
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/*
+	 * Make sure that FW and driver are in right state to do auto channel
+	 * selection scan.
+	 */
+	ret = wl_cfg80211_set_auto_channel_scan_state(ndev);
+	if (ret < 0) {
+		WL_ERR(("can't set auto channel scan state, error = %d\n", ret));
+		goto done;
+	}
+
+	/* Best channel selection in 2.4GHz band. */
+	ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+	if (ret < 0) {
+		WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+		&channel);
+	if (ret < 0) {
+		WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	if (CHANNEL_IS_2G(channel)) {
+		channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+	} else {
+		WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel));
+		channel = 0;
+	}
+
+	sprintf(pos, "%04d ", channel);
+	pos += 5;
+
+	/* Best channel selection in 5GHz band. */
+	ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+	if (ret < 0) {
+		WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+		&channel);
+	if (ret < 0) {
+		WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret));
+		goto done;
+	}
+
+	if (CHANNEL_IS_5G(channel)) {
+		channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+	} else {
+		WL_ERR(("invalid 5GHz channel, channel = %d\n", channel));
+		channel = 0;
+	}
+
+	sprintf(pos, "%04d ", channel);
+	pos += 5;
+
+	/* Set overall best channel same as 5GHz best channel. */
+	sprintf(pos, "%04d ", channel);
+	pos += 5;
+
+done:
+	if (NULL != buf) {
+		kfree(buf);
+	}
+
+	/* Restore FW and driver back to normal state. */
+	ret = wl_cfg80211_restore_auto_channel_scan_state(ndev);
+	if (ret < 0) {
+		WL_ERR(("can't restore auto channel scan state, error = %d\n", ret));
+	}
+
+	return (pos - cmd);
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+static const struct rfkill_ops wl_rfkill_ops = {
+	.set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+	WL_DBG(("Enter \n"));
+	WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+	if (!cfg)
+		return -EINVAL;
+
+	cfg->rf_blocked = blocked;
+
+	return 0;
+}
+
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
+{
+	s32 err = 0;
+
+	WL_DBG(("Enter \n"));
+	if (!cfg)
+		return -EINVAL;
+	if (setup) {
+		cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
+			wl_cfg80211_get_parent_dev(),
+			RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
+
+		if (!cfg->rfkill) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		err = rfkill_register(cfg->rfkill);
+
+		if (err)
+			rfkill_destroy(cfg->rfkill);
+	} else {
+		if (!cfg->rfkill) {
+			err = -ENOMEM;
+			goto err_out;
+		}
+
+		rfkill_unregister(cfg->rfkill);
+		rfkill_destroy(cfg->rfkill);
+	}
+
+err_out:
+	return err;
+}
+
+#ifdef DEBUGFS_CFG80211
+/**
+* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level
+* to turn on SCAN and DBG log.
+* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level
+* To see current setting of debug level,
+* cat /sys/kernel/debug/dhd/debug_level
+*/
+static ssize_t
+wl_debuglevel_write(struct file *file, const char __user *userbuf,
+	size_t count, loff_t *ppos)
+{
+	char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL];
+	char *params, *token, *colon;
+	uint i, tokens, log_on = 0;
+	memset(tbuf, 0, sizeof(tbuf));
+	memset(sublog, 0, sizeof(sublog));
+	if (copy_from_user(&tbuf, userbuf, min_t(size_t, sizeof(tbuf), count)))
+		return -EFAULT;
+
+	params = &tbuf[0];
+	colon = strchr(params, '\n');
+	if (colon != NULL)
+		*colon = '\0';
+	while ((token = strsep(&params, " ")) != NULL) {
+		memset(sublog, 0, sizeof(sublog));
+		if (token == NULL || !*token)
+			break;
+		if (*token == '\0')
+			continue;
+		colon = strchr(token, ':');
+		if (colon != NULL) {
+			*colon = ' ';
+		}
+		tokens = sscanf(token, "%s %u", sublog, &log_on);
+		if (colon != NULL)
+			*colon = ':';
+
+		if (tokens == 2) {
+				for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+					if (!strncmp(sublog, sublogname_map[i].sublogname,
+						strlen(sublogname_map[i].sublogname))) {
+						if (log_on)
+							wl_dbg_level |=
+							(sublogname_map[i].log_level);
+						else
+							wl_dbg_level &=
+							~(sublogname_map[i].log_level);
+					}
+				}
+		} else
+			WL_ERR(("%s: can't parse '%s' as a "
+			       "SUBMODULE:LEVEL (%d tokens)\n",
+			       tbuf, token, tokens));
+
+
+	}
+	return count;
+}
+
+static ssize_t
+wl_debuglevel_read(struct file *file, char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	char *param;
+	char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)];
+	uint i;
+	memset(tbuf, 0, sizeof(tbuf));
+	param = &tbuf[0];
+	for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+		param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
+			sublogname_map[i].sublogname,
+			(wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0);
+	}
+	*param = '\n';
+	return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0]));
+
+}
+static const struct file_operations fops_debuglevel = {
+	.open = NULL,
+	.write = wl_debuglevel_write,
+	.read = wl_debuglevel_read,
+	.owner = THIS_MODULE,
+	.llseek = NULL,
+};
+
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg)
+{
+	s32 err = 0;
+	struct dentry *_dentry;
+	if (!cfg)
+		return -EINVAL;
+	cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!cfg->debugfs || IS_ERR(cfg->debugfs)) {
+		if (cfg->debugfs == ERR_PTR(-ENODEV))
+			WL_ERR(("Debugfs is not enabled on this kernel\n"));
+		else
+			WL_ERR(("Can not create debugfs directory\n"));
+		cfg->debugfs = NULL;
+		goto exit;
+
+	}
+	_dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR,
+		cfg->debugfs, cfg, &fops_debuglevel);
+	if (!_dentry || IS_ERR(_dentry)) {
+		WL_ERR(("failed to create debug_level debug file\n"));
+		wl_free_debugfs(cfg);
+	}
+exit:
+	return err;
+}
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg)
+{
+	if (!cfg)
+		return -EINVAL;
+	if (cfg->debugfs)
+		debugfs_remove_recursive(cfg->debugfs);
+	cfg->debugfs = NULL;
+	return 0;
+}
+#endif /* DEBUGFS_CFG80211 */
+
+struct device *wl_cfg80211_get_parent_dev(void)
+{
+	return cfg80211_parent_dev;
+}
+
+void wl_cfg80211_set_parent_dev(void *dev)
+{
+	cfg80211_parent_dev = dev;
+}
+
+static void wl_cfg80211_clear_parent_dev(void)
+{
+	cfg80211_parent_dev = NULL;
+}
+
+void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL,
+		0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+	memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+}
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+{
+	dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+	if (((dev_role == NL80211_IFTYPE_AP) &&
+		!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
+		((dev_role == NL80211_IFTYPE_P2P_GO) &&
+		!(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
+	{
+		WL_ERR(("device role select failed\n"));
+		return false;
+	}
+	return true;
+}
+
+int wl_cfg80211_do_driver_init(struct net_device *net)
+{
+	struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+
+	if (!cfg || !cfg->wdev)
+		return -EINVAL;
+
+	if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
+		return -1;
+
+	return 0;
+}
+
+void wl_cfg80211_enable_trace(bool set, u32 level)
+{
+	if (set)
+		wl_dbg_level = level & WL_DBG_LEVEL;
+	else
+		wl_dbg_level |= (WL_DBG_LEVEL & level);
+}
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+	2, 0))
+static s32
+wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+	bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+	/* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION
+	 * is passed with CMD_FRAME. This callback is supposed to cancel
+	 * the OFFCHANNEL Wait. Since we are already taking care of that
+	 *  with the tx_mgmt logic, do nothing here.
+	 */
+
+	return 0;
+}
+#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
+
+#ifdef WL11U
+bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_INTERWORKING_ID))) {
+			return (bcm_tlv_t *)ie;
+	}
+	return NULL;
+}
+
+
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+            uint8 ie_id, uint8 *data, uint8 data_len)
+{
+	s32 err = BCME_OK;
+	s32 buf_len;
+	s32 iecount;
+	ie_setbuf_t *ie_setbuf;
+
+	if (ie_id != DOT11_MNG_INTERWORKING_ID)
+		return BCME_UNSUPPORTED;
+
+	/* Validate the pktflag parameter */
+	if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+	            VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+	            VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
+	            VNDR_IE_CUSTOM_FLAG))) {
+		WL_ERR(("cfg80211 Add IE: Invalid packet flag 0x%x\n", pktflag));
+		return -1;
+	}
+
+	/* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
+	pktflag = htod32(pktflag);
+
+	buf_len = sizeof(ie_setbuf_t) + data_len - 1;
+	ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+
+	if (!ie_setbuf) {
+		WL_ERR(("Error allocating buffer for IE\n"));
+		return -ENOMEM;
+	}
+
+	if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) {
+		WL_ERR(("Previous IW IE is equals to current IE\n"));
+		err = BCME_OK;
+		goto exit;
+	}
+
+	strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1);
+	ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&ie_setbuf->ie_buffer.iecount, &iecount, sizeof(int));
+	memcpy((void *)&ie_setbuf->ie_buffer.ie_list[0].pktflag, &pktflag, sizeof(uint32));
+
+	/* Now, add the IE to the buffer */
+	ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id;
+
+	/* if already set with previous values, delete it first */
+	if (cfg->iw_ie_len != 0) {
+		WL_DBG(("Different IW_IE was already set. clear first\n"));
+
+		ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0;
+
+		err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+			cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+		if (err != BCME_OK)
+			goto exit;
+	}
+
+	ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
+	memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
+
+	err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	if (err == BCME_OK) {
+		if (data_len >= 0 && data_len < IW_IES_MAX_BUF_LEN) {
+		    memcpy(cfg->iw_ie, data, data_len);
+		}
+		else {
+		    WL_ERR(("DBG: Out of range index\n"));
+		    err = -ENOMEM;
+		    goto exit;
+		}
+		cfg->iw_ie_len = data_len;
+		cfg->wl11u = TRUE;
+
+		err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+	}
+
+exit:
+	if (ie_setbuf)
+		kfree(ie_setbuf);
+	return err;
+}
+#endif /* WL11U */
+
+
+
+int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_device *ndev = NULL;
+	unsigned long flags;
+	int clear_flag = 0;
+	int ret = 0;
+
+	WL_TRACE(("Enter\n"));
+
+	cfg = g_bcm_cfg;
+	if (!cfg)
+		return -EINVAL;
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_CFG80211_P2P_DEV_IF
+	if (cfg->scan_request && cfg->scan_request->wdev == cfgdev) {
+#else
+	if (cfg->scan_request && cfg->scan_request->dev == cfgdev) {
+#endif
+		cfg80211_scan_done(cfg->scan_request, true);
+		cfg->scan_request = NULL;
+		clear_flag = 1;
+	}
+	spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+	if (clear_flag)
+		wl_clr_drv_status(cfg, SCANNING, ndev);
+
+	return ret;
+}
+
+bool wl_cfg80211_is_vsdb_mode(void)
+{
+	return (g_bcm_cfg && g_bcm_cfg->vsdb_mode);
+}
+
+void* wl_cfg80211_get_dhdp()
+{
+	struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+	return cfg->pub;
+}
+
+bool wl_cfg80211_is_p2p_active(void)
+{
+	return (g_bcm_cfg && g_bcm_cfg->p2p);
+}
+
+static void wl_cfg80211_work_handler(struct work_struct * work)
+{
+	struct bcm_cfg80211 *cfg = NULL;
+	struct net_info *iter, *next;
+	s32 err = BCME_OK;
+	s32 pm = PM_FAST;
+
+	cfg = container_of(work, struct bcm_cfg80211, pm_enable_work.work);
+	WL_DBG(("Enter \n"));
+	if (cfg->pm_enable_work_on) {
+		cfg->pm_enable_work_on = false;
+		for_each_ndev(cfg, iter, next) {
+			if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
+				(wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS))
+				continue;
+			if (iter->ndev) {
+				if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM,
+					&pm, sizeof(pm), true)) != 0) {
+					if (err == -ENODEV)
+						WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
+					else
+						WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
+				} else
+					wl_cfg80211_update_power_mode(iter->ndev);
+			}
+		}
+	}
+}
+
+u8
+wl_get_action_category(void *frame, u32 frame_len)
+{
+	u8 category;
+	u8 *ptr = (u8 *)frame;
+	if (frame == NULL)
+		return DOT11_ACTION_CAT_ERR_MASK;
+	if (frame_len < DOT11_ACTION_HDR_LEN)
+		return DOT11_ACTION_CAT_ERR_MASK;
+	category = ptr[DOT11_ACTION_CAT_OFF];
+	WL_INFO(("Action Category: %d\n", category));
+	return category;
+}
+
+int
+wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action)
+{
+	u8 *ptr = (u8 *)frame;
+	if (frame == NULL || ret_action == NULL)
+		return BCME_ERROR;
+	if (frame_len < DOT11_ACTION_HDR_LEN)
+		return BCME_ERROR;
+	if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
+		return BCME_ERROR;
+	*ret_action = ptr[DOT11_ACTION_ACT_OFF];
+	WL_INFO(("Public Action : %d\n", *ret_action));
+	return BCME_OK;
+}
+
+static int
+wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	const struct ether_addr *bssid)
+{
+	s32 err;
+	wl_event_msg_t e;
+
+	bzero(&e, sizeof(e));
+	e.event_type = cpu_to_be32(WLC_E_ROAM);
+	memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
+	/* trigger the roam event handler */
+	err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
+
+	return err;
+}
+
+
+#ifdef WL_CFG80211_ACL
+static int
+wl_cfg80211_set_mac_acl(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+	const struct cfg80211_acl_data *acl)
+{
+	int i;
+	int ret = 0;
+	int macnum = 0;
+	int macmode = MACLIST_MODE_DISABLED;
+	struct maclist *list;
+
+	/* get the MAC filter mode */
+	if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
+		macmode = MACLIST_MODE_ALLOW;
+	} else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
+	acl->n_acl_entries) {
+		macmode = MACLIST_MODE_DENY;
+	}
+
+	/* if acl == NULL, macmode is still disabled.. */
+	if (macmode == MACLIST_MODE_DISABLED) {
+		if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
+			WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+		return ret;
+	}
+
+	macnum = acl->n_acl_entries;
+	if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+		WL_ERR(("%s : invalid number of MAC address entries %d\n",
+			__FUNCTION__, macnum));
+		return -1;
+	}
+
+	/* allocate memory for the MAC list */
+	list = (struct maclist*)kmalloc(sizeof(int) +
+		sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+	if (!list) {
+		WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
+		return -1;
+	}
+
+	/* prepare the MAC list */
+	list->count = htod32(macnum);
+	for (i = 0; i < macnum; i++) {
+		memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
+	}
+	/* set the list */
+	if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
+		WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+	kfree(list);
+
+	return ret;
+}
+#endif /* WL_CFG80211_ACL */
diff --git a/drivers/staging/edison-bcm43340/wl_cfg80211.h b/drivers/staging/edison-bcm43340/wl_cfg80211.h
new file mode 100644
index 0000000..2a33925
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_cfg80211.h
@@ -0,0 +1,937 @@
+/*
+ * Linux cfg80211 driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg80211.h 470107 2014-04-13 13:08:18Z $
+ */
+
+#ifndef _wl_cfg80211_h_
+#define _wl_cfg80211_h_
+
+#include <linux/wireless.h>
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+#include <linux/rfkill.h>
+
+#include <wl_cfgp2p.h>
+
+struct wl_conf;
+struct wl_iface;
+struct bcm_cfg80211;
+struct wl_security;
+struct wl_ibss;
+
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#define WL_DBG_NONE	0
+#define WL_DBG_P2P_ACTION (1 << 5)
+#define WL_DBG_TRACE	(1 << 4)
+#define WL_DBG_SCAN 	(1 << 3)
+#define WL_DBG_DBG 	(1 << 2)
+#define WL_DBG_INFO	(1 << 1)
+#define WL_DBG_ERR	(1 << 0)
+
+/* 0 invalidates all debug messages.  default is 1 */
+#define WL_DBG_LEVEL 0xFF
+
+#define CFG80211_ERROR_TEXT		"CFG80211-ERROR) "
+
+#if defined(DHD_DEBUG)
+#define	WL_ERR(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+#else /* defined(DHD_DEBUG) */
+#define	WL_ERR(args)									\
+do {										\
+	if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) {				\
+			printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+#endif /* defined(DHD_DEBUG) */
+
+#ifdef WL_INFO
+#undef WL_INFO
+#endif
+#define	WL_INFO(args)									\
+do {										\
+	if (wl_dbg_level & WL_DBG_INFO) {				\
+			printk(KERN_INFO "CFG80211-INFO) %s : ", __func__);	\
+			printk args;						\
+		}								\
+} while (0)
+#ifdef WL_SCAN
+#undef WL_SCAN
+#endif
+#define	WL_SCAN(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_SCAN) {			\
+		printk(KERN_INFO "CFG80211-SCAN) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#ifdef WL_TRACE
+#undef WL_TRACE
+#endif
+#define	WL_TRACE(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_TRACE) {			\
+		printk(KERN_INFO "CFG80211-TRACE) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#ifdef WL_TRACE_HW4
+#undef WL_TRACE_HW4
+#endif
+#define	WL_TRACE_HW4			WL_TRACE
+#if (WL_DBG_LEVEL > 0)
+#define	WL_DBG(args)								\
+do {									\
+	if (wl_dbg_level & WL_DBG_DBG) {			\
+		printk(KERN_DEBUG "CFG80211-DEBUG) %s :", __func__);	\
+		printk args;							\
+	}									\
+} while (0)
+#else				/* !(WL_DBG_LEVEL > 0) */
+#define	WL_DBG(args)
+#endif				/* (WL_DBG_LEVEL > 0) */
+#define WL_PNO(x)
+#define WL_SD(x)
+
+
+#define WL_SCAN_RETRY_MAX	3
+#define WL_NUM_PMKIDS_MAX	MAXPMKID
+#define WL_SCAN_BUF_MAX 	(1024 * 8)
+#define WL_TLV_INFO_MAX 	1500
+#define WL_SCAN_IE_LEN_MAX      2048
+#define WL_BSS_INFO_MAX		2048
+#define WL_ASSOC_INFO_MAX	512
+#define WL_IOCTL_LEN_MAX	2048
+#define WL_EXTRA_BUF_MAX	2048
+#define WL_SCAN_ERSULTS_LAST 	(WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_AP_MAX		256
+#define WL_FILE_NAME_MAX	256
+#define WL_DWELL_TIME 		200
+#define WL_MED_DWELL_TIME       400
+#define WL_MIN_DWELL_TIME	100
+#define WL_LONG_DWELL_TIME 	1000
+#define IFACE_MAX_CNT 		2
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 		200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 		20
+#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 	320
+#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 	400
+#define WL_AF_TX_MAX_RETRY 	5
+
+#define WL_AF_SEARCH_TIME_MAX           450
+#define WL_AF_TX_EXTRA_TIME_MAX         200
+
+#define WL_SCAN_TIMER_INTERVAL_MS	10000 /* Scan timeout */
+#define WL_CHANNEL_SYNC_RETRY 	5
+#define WL_INVALID 		-1
+
+/* Bring down SCB Timeout to 20secs from 60secs default */
+#ifndef WL_SCB_TIMEOUT
+#define WL_SCB_TIMEOUT 20
+#endif
+
+/* SCAN_SUPPRESS timer values in ms */
+#define WL_SCAN_SUPPRESS_TIMEOUT 31000 /* default Framwork DHCP timeout is 30 sec */
+#define WL_SCAN_SUPPRESS_RETRY 3000
+
+#define WL_PM_ENABLE_TIMEOUT 10000
+
+
+/* driver status */
+enum wl_status {
+	WL_STATUS_READY = 0,
+	WL_STATUS_SCANNING,
+	WL_STATUS_SCAN_ABORTING,
+	WL_STATUS_CONNECTING,
+	WL_STATUS_CONNECTED,
+	WL_STATUS_DISCONNECTING,
+	WL_STATUS_AP_CREATING,
+	WL_STATUS_AP_CREATED,
+	/* whole sending action frame procedure:
+	 * includes a) 'finding common channel' for public action request frame
+	 * and b) 'sending af via 'actframe' iovar'
+	 */
+	WL_STATUS_SENDING_ACT_FRM,
+	/* find a peer to go to a common channel before sending public action req frame */
+	WL_STATUS_FINDING_COMMON_CHANNEL,
+	/* waiting for next af to sync time of supplicant.
+	 * it includes SENDING_ACT_FRM and WAITING_NEXT_ACT_FRM_LISTEN
+	 */
+	WL_STATUS_WAITING_NEXT_ACT_FRM,
+#ifdef WL_CFG80211_SYNC_GON
+	/* go to listen state to wait for next af after SENDING_ACT_FRM */
+	WL_STATUS_WAITING_NEXT_ACT_FRM_LISTEN,
+#endif /* WL_CFG80211_SYNC_GON */
+	/* it will be set when upper layer requests listen and succeed in setting listen mode.
+	 * if set, other scan request can abort current listen state
+	 */
+	WL_STATUS_REMAINING_ON_CHANNEL,
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	/* it's fake listen state to keep current scan state.
+	 * it will be set when upper layer requests listen but scan is running. then just run
+	 * a expire timer without actual listen state.
+	 * if set, other scan request does not need to abort scan.
+	 */
+	WL_STATUS_FAKE_REMAINING_ON_CHANNEL
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+};
+
+/* wi-fi mode */
+enum wl_mode {
+	WL_MODE_BSS,
+	WL_MODE_IBSS,
+	WL_MODE_AP
+};
+
+/* driver profile list */
+enum wl_prof_list {
+	WL_PROF_MODE,
+	WL_PROF_SSID,
+	WL_PROF_SEC,
+	WL_PROF_IBSS,
+	WL_PROF_BAND,
+	WL_PROF_CHAN,
+	WL_PROF_BSSID,
+	WL_PROF_ACT,
+	WL_PROF_BEACONINT,
+	WL_PROF_DTIMPERIOD
+};
+
+/* donlge escan state */
+enum wl_escan_state {
+    WL_ESCAN_STATE_IDLE,
+    WL_ESCAN_STATE_SCANING
+};
+/* fw downloading status */
+enum wl_fw_status {
+	WL_FW_LOADING_DONE,
+	WL_NVRAM_LOADING_DONE
+};
+
+enum wl_management_type {
+	WL_BEACON = 0x1,
+	WL_PROBE_RESP = 0x2,
+	WL_ASSOC_RESP = 0x4
+};
+
+enum wl_handler_del_type {
+	WL_HANDLER_NOTUSE,
+	WL_HANDLER_DEL,
+	WL_HANDLER_MAINTAIN,
+	WL_HANDLER_PEND
+};
+
+/* beacon / probe_response */
+struct beacon_proberesp {
+	__le64 timestamp;
+	__le16 beacon_int;
+	__le16 capab_info;
+	u8 variable[0];
+} __attribute__ ((packed));
+
+/* driver configuration */
+struct wl_conf {
+	u32 frag_threshold;
+	u32 rts_threshold;
+	u32 retry_short;
+	u32 retry_long;
+	s32 tx_power;
+	struct ieee80211_channel channel;
+};
+
+typedef s32(*EVENT_HANDLER) (struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+                            const wl_event_msg_t *e, void *data);
+
+/* bss inform structure for cfg80211 interface */
+struct wl_cfg80211_bss_info {
+	u16 band;
+	u16 channel;
+	s16 rssi;
+	u16 frame_len;
+	u8 frame_buf[1];
+};
+
+/* basic structure of scan request */
+struct wl_scan_req {
+	struct wlc_ssid ssid;
+};
+
+/* basic structure of information element */
+struct wl_ie {
+	u16 offset;
+	u8 buf[WL_TLV_INFO_MAX];
+};
+
+/* event queue for cfg80211 main event */
+struct wl_event_q {
+	struct list_head eq_list;
+	u32 etype;
+	wl_event_msg_t emsg;
+	s8 edata[1];
+};
+
+/* security information with currently associated ap */
+struct wl_security {
+	u32 wpa_versions;
+	u32 auth_type;
+	u32 cipher_pairwise;
+	u32 cipher_group;
+	u32 wpa_auth;
+	u32 auth_assoc_res_status;
+};
+
+/* ibss information for currently joined ibss network */
+struct wl_ibss {
+	u8 beacon_interval;	/* in millisecond */
+	u8 atim;		/* in millisecond */
+	s8 join_only;
+	u8 band;
+	u8 channel;
+};
+
+/* cfg driver profile */
+struct wl_profile {
+	u32 mode;
+	s32 band;
+	u32 channel;
+	struct wlc_ssid ssid;
+	struct wl_security sec;
+	struct wl_ibss ibss;
+	u8 bssid[ETHER_ADDR_LEN];
+	u16 beacon_interval;
+	u8 dtim_period;
+	bool active;
+};
+
+struct net_info {
+	struct net_device *ndev;
+	struct wireless_dev *wdev;
+	struct wl_profile profile;
+	s32 mode;
+	s32 roam_off;
+	unsigned long sme_state;
+	bool pm_restore;
+	bool pm_block;
+	s32 pm;
+	struct list_head list; /* list of all net_info structure */
+};
+
+/* association inform */
+#define MAX_REQ_LINE 1024
+struct wl_connect_info {
+	u8 req_ie[MAX_REQ_LINE];
+	s32 req_ie_len;
+	u8 resp_ie[MAX_REQ_LINE];
+	s32 resp_ie_len;
+};
+
+/* firmware /nvram downloading controller */
+struct wl_fw_ctrl {
+	const struct firmware *fw_entry;
+	unsigned long status;
+	u32 ptr;
+	s8 fw_name[WL_FILE_NAME_MAX];
+	s8 nvram_name[WL_FILE_NAME_MAX];
+};
+
+/* assoc ie length */
+struct wl_assoc_ielen {
+	u32 req_len;
+	u32 resp_len;
+};
+
+/* wpa2 pmk list */
+struct wl_pmk_list {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID - 1];
+};
+
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+struct escan_info {
+	u32 escan_state;
+#if defined(STATIC_WL_PRIV_STRUCT)
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+#error STATIC_WL_PRIV_STRUCT should be used with CONFIG_DHD_USE_STATIC_BUF
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+	u8 *escan_buf;
+#else
+	u8 escan_buf[ESCAN_BUF_SIZE];
+#endif /* STATIC_WL_PRIV_STRUCT */
+	struct wiphy *wiphy;
+	struct net_device *ndev;
+};
+
+struct ap_info {
+/* Structure to hold WPS, WPA IEs for a AP */
+	u8   probe_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u8   beacon_ie[VNDR_IES_MAX_BUF_LEN];
+	u8   assoc_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u32 probe_res_ie_len;
+	u32 beacon_ie_len;
+	u32 assoc_res_ie_len;
+	u8 *wpa_ie;
+	u8 *rsn_ie;
+	u8 *wps_ie;
+	bool security_mode;
+};
+
+struct sta_info {
+	/* Structure to hold WPS IE for a STA */
+	u8  probe_req_ie[VNDR_IES_BUF_LEN];
+	u8  assoc_req_ie[VNDR_IES_BUF_LEN];
+	u32 probe_req_ie_len;
+	u32 assoc_req_ie_len;
+};
+
+struct afx_hdl {
+	wl_af_params_t *pending_tx_act_frm;
+	struct ether_addr	tx_dst_addr;
+	struct net_device *dev;
+	struct work_struct work;
+	u32 bssidx;
+	u32 retry;
+	s32 peer_chan;
+	s32 peer_listen_chan; /* search channel: configured by upper layer */
+	s32 my_listen_chan;	/* listen chanel: extract it from prb req or gon req */
+	bool is_listen;
+	bool ack_recv;
+	bool is_active;
+};
+
+struct parsed_ies {
+	wpa_ie_fixed_t *wps_ie;
+	u32 wps_ie_len;
+	wpa_ie_fixed_t *wpa_ie;
+	u32 wpa_ie_len;
+	bcm_tlv_t *wpa2_ie;
+	u32 wpa2_ie_len;
+};
+
+
+#ifdef WL11U
+/* Max length of Interworking element */
+#define IW_IES_MAX_BUF_LEN 		9
+#endif
+#define MAX_EVENT_BUF_NUM 16
+typedef struct wl_eventmsg_buf {
+    u16 num;
+    struct {
+		u16 type;
+		bool set;
+	} event [MAX_EVENT_BUF_NUM];
+} wl_eventmsg_buf_t;
+
+typedef struct wl_if_event_info {
+	bool valid;
+	int ifidx;
+	int bssidx;
+	uint8 mac[ETHER_ADDR_LEN];
+	char name[IFNAMSIZ+1];
+} wl_if_event_info;
+
+/* private data of cfg80211 interface */
+struct bcm_cfg80211 {
+	struct wireless_dev *wdev;	/* representing cfg cfg80211 device */
+
+	struct wireless_dev *p2p_wdev;	/* representing wl cfg80211 device for P2P */
+	struct mutex p2p_wdev_sync; /* protect p2p_wdev from race conditions */
+
+	struct net_device *p2p_net;    /* reference to p2p0 interface */
+
+	struct wl_conf *conf;
+	struct cfg80211_scan_request *scan_request;	/* scan request object */
+	EVENT_HANDLER evt_handler[WLC_E_LAST];
+	struct list_head eq_list;	/* used for event queue */
+	struct list_head net_list;     /* used for struct net_info */
+	spinlock_t eq_lock;	/* for event queue synchronization */
+	spinlock_t cfgdrv_lock;	/* to protect scan status (and others if needed) */
+	struct completion act_frm_scan;
+	struct completion iface_disable;
+	struct completion wait_next_af;
+	struct mutex usr_sync;	/* maily for up/down synchronization */
+	struct wl_scan_results *bss_list;
+	struct wl_scan_results *scan_results;
+
+	/* scan request object for internal purpose */
+	struct wl_scan_req *scan_req_int;
+	/* information element object for internal purpose */
+#if defined(STATIC_WL_PRIV_STRUCT)
+	struct wl_ie *ie;
+#else
+	struct wl_ie ie;
+#endif
+
+	/* association information container */
+#if defined(STATIC_WL_PRIV_STRUCT)
+	struct wl_connect_info *conn_info;
+#else
+	struct wl_connect_info conn_info;
+#endif
+#ifdef DEBUGFS_CFG80211
+	struct dentry		*debugfs;
+#endif /* DEBUGFS_CFG80211 */
+	struct wl_pmk_list *pmk_list;	/* wpa2 pmk list */
+	tsk_ctl_t event_tsk;  		/* task of main event handler thread */
+	void *pub;
+	u32 iface_cnt;
+	u32 channel;		/* current channel */
+	u32 af_sent_channel;	/* channel action frame is sent */
+	/* next af subtype to cancel the remained dwell time in rx process */
+	u8 next_af_subtype;
+#ifdef WL_CFG80211_SYNC_GON
+	ulong af_tx_sent_jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+	struct escan_info escan_info;   /* escan information */
+	bool active_scan;	/* current scan mode */
+	bool ibss_starter;	/* indicates this sta is ibss starter */
+	bool link_up;		/* link/connection up flag */
+
+	/* indicate whether chip to support power save mode */
+	bool pwr_save;
+	bool roam_on;		/* on/off switch for self-roaming */
+	bool scan_tried;	/* indicates if first scan attempted */
+	bool wlfc_on;
+	bool vsdb_mode;
+	bool roamoff_on_concurrent;
+	u8 *ioctl_buf;		/* ioctl buffer */
+	struct mutex ioctl_buf_sync;
+	u8 *escan_ioctl_buf;
+	u8 *extra_buf;	/* maily to grab assoc information */
+	struct dentry *debugfsdir;
+	struct rfkill *rfkill;
+	bool rf_blocked;
+	struct ieee80211_channel remain_on_chan;
+	enum nl80211_channel_type remain_on_chan_type;
+	u64 send_action_id;
+	u64 last_roc_id;
+	wait_queue_head_t netif_change_event;
+	wl_if_event_info if_event_info;
+	struct completion send_af_done;
+	struct afx_hdl *afx_hdl;
+	struct ap_info *ap_info;
+	struct sta_info *sta_info;
+	struct p2p_info *p2p;
+	bool p2p_supported;
+	void *btcoex_info;
+	struct timer_list scan_timeout;   /* Timer for catch scan event timeout */
+	s32(*state_notifier) (struct bcm_cfg80211 *cfg,
+		struct net_info *_net_info, enum wl_status state, bool set);
+	unsigned long interrested_state;
+	wlc_ssid_t hostapd_ssid;
+#ifdef WL11U
+	bool wl11u;
+	u8 iw_ie[IW_IES_MAX_BUF_LEN];
+	u32 iw_ie_len;
+#endif /* WL11U */
+	bool sched_scan_running;	/* scheduled scan req status */
+#ifdef WL_SCHED_SCAN
+	struct cfg80211_sched_scan_request *sched_scan_req;	/* scheduled scan req */
+#endif /* WL_SCHED_SCAN */
+	bool scan_suppressed;
+	struct timer_list scan_supp_timer;
+	struct work_struct wlan_work;
+	struct mutex event_sync;	/* maily for up/down synchronization */
+	bool disable_roam_event;
+	bool pm_enable_work_on;
+	struct delayed_work pm_enable_work;
+	vndr_ie_setbuf_t *ibss_vsie;	/* keep the VSIE for IBSS */
+	int ibss_vsie_len;
+	bool roam_offload;
+};
+
+
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
+{
+	return bss = bss ?
+		(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+}
+static inline s32
+wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct wireless_dev * wdev, s32 mode, bool pm_block)
+{
+	struct net_info *_net_info;
+	s32 err = 0;
+	if (cfg->iface_cnt == IFACE_MAX_CNT)
+		return -ENOMEM;
+	_net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL);
+	if (!_net_info)
+		err = -ENOMEM;
+	else {
+		_net_info->mode = mode;
+		_net_info->ndev = ndev;
+		_net_info->wdev = wdev;
+		_net_info->pm_restore = 0;
+		_net_info->pm = 0;
+		_net_info->pm_block = pm_block;
+		_net_info->roam_off = WL_INVALID;
+		cfg->iface_cnt++;
+		list_add(&_net_info->list, &cfg->net_list);
+	}
+	return err;
+}
+static inline void
+wl_dealloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			list_del(&_net_info->list);
+			cfg->iface_cnt--;
+			kfree(_net_info);
+		}
+	}
+
+}
+static inline void
+wl_delete_all_netinfo(struct bcm_cfg80211 *cfg)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		list_del(&_net_info->list);
+			if (_net_info->wdev)
+				kfree(_net_info->wdev);
+			kfree(_net_info);
+	}
+	cfg->iface_cnt = 0;
+}
+static inline u32
+wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
+
+{
+	struct net_info *_net_info, *next;
+	u32 cnt = 0;
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (_net_info->ndev &&
+			test_bit(status, &_net_info->sme_state))
+			cnt++;
+	}
+	return cnt;
+}
+static inline void
+wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op)
+{
+	struct net_info *_net_info, *next;
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		switch (op) {
+			case 1:
+				return; /* set all status is not allowed */
+			case 2:
+				clear_bit(status, &_net_info->sme_state);
+				if (cfg->state_notifier &&
+					test_bit(status, &(cfg->interrested_state)))
+					cfg->state_notifier(cfg, _net_info, status, false);
+				break;
+			case 4:
+				return; /* change all status is not allowed */
+			default:
+				return; /* unknown operation */
+		}
+	}
+}
+static inline void
+wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+	struct net_device *ndev, u32 op)
+{
+
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+		if (ndev && (_net_info->ndev == ndev)) {
+			switch (op) {
+				case 1:
+					set_bit(status, &_net_info->sme_state);
+					if (cfg->state_notifier &&
+						test_bit(status, &(cfg->interrested_state)))
+						cfg->state_notifier(cfg, _net_info, status, true);
+					break;
+				case 2:
+					clear_bit(status, &_net_info->sme_state);
+					if (cfg->state_notifier &&
+						test_bit(status, &(cfg->interrested_state)))
+						cfg->state_notifier(cfg, _net_info, status, false);
+					break;
+				case 4:
+					change_bit(status, &_net_info->sme_state);
+					break;
+			}
+		}
+
+	}
+
+}
+
+static inline u32
+wl_get_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
+	struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return test_bit(status, &_net_info->sme_state);
+	}
+	return 0;
+}
+
+static inline s32
+wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return _net_info->mode;
+	}
+	return -1;
+}
+
+
+static inline void
+wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 mode)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					_net_info->mode = mode;
+	}
+}
+static inline struct wl_profile *
+wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return &_net_info->profile;
+	}
+	return NULL;
+}
+static inline struct net_info *
+wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	struct net_info *_net_info, *next;
+
+	list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+				if (ndev && (_net_info->ndev == ndev))
+					return _net_info;
+	}
+	return NULL;
+}
+#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
+#define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev)
+#define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev)
+#define bcmcfg_to_p2p_wdev(cfg) (cfg->p2p_wdev)
+#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
+#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr)
+#define wdev_to_ndev(wdev) (wdev->netdev)
+
+#if defined(WL_ENABLE_P2P_IF)
+#define ndev_to_wlc_ndev(ndev, cfg)	((ndev == cfg->p2p_net) ? \
+	bcmcfg_to_prmry_ndev(cfg) : ndev)
+#else
+#define ndev_to_wlc_ndev(ndev, cfg)	(ndev)
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define wdev_to_wlc_ndev(wdev, cfg)	\
+	((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) ? \
+	bcmcfg_to_prmry_ndev(cfg) : wdev_to_ndev(wdev))
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	wdev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg)
+#elif defined(WL_ENABLE_P2P_IF)
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	ndev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_ndev(cfg)
+#else
+#define cfgdev_to_wlc_ndev(cfgdev, cfg)	(cfgdev)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) (cfgdev)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define ndev_to_cfgdev(ndev)	ndev_to_wdev(ndev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE)
+#else
+#define ndev_to_cfgdev(ndev)	(ndev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define scan_req_match(cfg)	(((cfg) && (cfg->scan_request) && \
+	(cfg->scan_request->wdev == cfg->p2p_wdev)) ? true : false)
+#elif defined(WL_ENABLE_P2P_IF)
+#define scan_req_match(cfg)	(((cfg) && (cfg->scan_request) && \
+	(cfg->scan_request->dev == cfg->p2p_net)) ? true : false)
+#else
+#define scan_req_match(cfg)	(((cfg) && p2p_is_on(cfg) && p2p_scan(cfg)) ? \
+	true : false)
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#define wl_to_sr(w) (w->scan_req_int)
+#if defined(STATIC_WL_PRIV_STRUCT)
+#define wl_to_ie(w) (w->ie)
+#define wl_to_conn(w) (w->conn_info)
+#else
+#define wl_to_ie(w) (&w->ie)
+#define wl_to_conn(w) (&w->conn_info)
+#endif
+#define wiphy_from_scan(w) (w->escan_info.wiphy)
+#define wl_get_drv_status_all(cfg, stat) \
+	(wl_get_status_all(cfg, WL_STATUS_ ## stat))
+#define wl_get_drv_status(cfg, stat, ndev)  \
+	(wl_get_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev))
+#define wl_set_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 1))
+#define wl_clr_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 2))
+#define wl_clr_drv_status_all(cfg, stat)  \
+	(wl_set_status_all(cfg, WL_STATUS_ ## stat, 2))
+#define wl_chg_drv_status(cfg, stat, ndev)  \
+	(wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 4))
+
+#define for_each_bss(list, bss, __i)	\
+	for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
+
+#define for_each_ndev(cfg, iter, next) \
+	list_for_each_entry_safe(iter, next, &cfg->net_list, list)
+
+
+/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
+ * In addtion to that, wpa_version is WPA_VERSION_1
+ */
+#define is_wps_conn(_sme) \
+	((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \
+	 (!_sme->crypto.n_ciphers_pairwise) && \
+	 (!_sme->crypto.cipher_group))
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context);
+extern s32 wl_cfg80211_attach_post(struct net_device *ndev);
+extern void wl_cfg80211_detach(void *para);
+
+extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
+            void *data);
+void wl_cfg80211_set_parent_dev(void *dev);
+struct device *wl_cfg80211_get_parent_dev(void);
+
+extern s32 wl_cfg80211_up(void *para);
+extern s32 wl_cfg80211_down(void *para);
+extern s32 wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
+	uint8 *mac, uint8 bssidx);
+extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
+extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
+extern int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev);
+extern bool wl_cfg80211_is_vsdb_mode(void);
+extern void* wl_cfg80211_get_dhdp(void);
+extern bool wl_cfg80211_is_p2p_active(void);
+extern void wl_cfg80211_dbg_level(u32 level);
+extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
+extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
+	enum wl_management_type type);
+extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+
+/* btcoex functions */
+void* wl_cfg80211_btcoex_init(struct net_device *ndev);
+void wl_cfg80211_btcoex_deinit(void);
+
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+#define CHANSPEC_BUF_SIZE	1024
+#define CHAN_SEL_IOCTL_DELAY	300
+#define CHAN_SEL_RETRY_COUNT	15
+#define CHANNEL_IS_RADAR(channel)	(((channel & WL_CHAN_RADAR) || \
+	(channel & WL_CHAN_PASSIVE)) ? true : false)
+#define CHANNEL_IS_2G(channel)	(((channel >= 1) && (channel <= 14)) ? \
+	true : false)
+#define CHANNEL_IS_5G(channel)	(((channel >= 36) && (channel <= 165)) ? \
+	true : false)
+extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command,
+	int total_len);
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
+extern s32 wl_mode_to_nl80211_iftype(s32 mode);
+int wl_cfg80211_do_driver_init(struct net_device *net);
+void wl_cfg80211_enable_trace(bool set, u32 level);
+extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
+extern s32 wl_cfg80211_if_is_group_owner(void);
+extern chanspec_t wl_ch_host_to_driver(u16 channel);
+extern s32 wl_set_tx_power(struct net_device *dev,
+	enum nl80211_tx_power_setting type, s32 dbm);
+extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm);
+extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
+extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set);
+extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev,
+	struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
+extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern void wl_cfg80211_update_power_mode(struct net_device *dev);
+#define SCAN_BUF_CNT	2
+#define SCAN_BUF_NEXT	1
+#define WL_SCANTYPE_LEGACY	0x1
+#define WL_SCANTYPE_P2P		0x2
+#define wl_escan_set_sync_id(a, b) ((a) = htod16(0x1234))
+#define wl_escan_set_type(a, b)
+#define wl_escan_get_buf(a, b) ((wl_scan_results_t *) (a)->escan_info.escan_buf)
+#define wl_escan_check_sync_id(a, b, c) 0
+#define wl_escan_print_sync_id(a, b, c)
+#define wl_escan_increment_sync_id(a, b)
+#define wl_escan_init_sync_id(a)
+
+extern void wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len);
+extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev);
+
+/* Action frame specific functions */
+extern u8 wl_get_action_category(void *frame, u32 frame_len);
+extern int wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action);
+
+extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable);
+
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+extern int wl_cfg80211_get_ioctl_version(void);
+
+#endif				/* _wl_cfg80211_h_ */
diff --git a/drivers/staging/edison-bcm43340/wl_cfg_btcoex.c b/drivers/staging/edison-bcm43340/wl_cfg_btcoex.c
new file mode 100644
index 0000000..a0b0bf4
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_cfg_btcoex.c
@@ -0,0 +1,549 @@
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfg_btcoex.c 427707 2013-10-04 10:28:29Z $
+ */
+
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+struct btcoex_info {
+	struct timer_list timer;
+	u32 timer_ms;
+	u32 timer_on;
+	u32 ts_dhcp_start;	/* ms ts ecord time stats */
+	u32 ts_dhcp_ok;		/* ms ts ecord time stats */
+	bool dhcp_done;	/* flag, indicates that host done with
+					 * dhcp before t1/t2 expiration
+					 */
+	s32 bt_state;
+	struct work_struct work;
+	struct net_device *dev;
+};
+
+static struct btcoex_info *btcoex_info_loc = NULL;
+
+/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */
+
+/* use New SCO/eSCO smart YG suppression */
+#define BT_DHCP_eSCO_FIX
+/* this flag boost wifi pkt priority to max, caution: -not fair to sco */
+#define BT_DHCP_USE_FLAGS
+/* T1 start SCO/ESCo priority suppression */
+#define BT_DHCP_OPPR_WIN_TIME	2500
+/* T2 turn off SCO/SCO supperesion is (timeout) */
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+
+enum wl_cfg80211_btcoex_status {
+	BT_DHCP_IDLE,
+	BT_DHCP_START,
+	BT_DHCP_OPPR_WIN,
+	BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+/*
+ * get named driver variable to uint register value and return error indication
+ * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, &reg_value)
+ */
+static int
+dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
+	uint reg, int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	bcm_mkiovar(name, (char *)(&reg), sizeof(reg),
+		(char *)(&var), sizeof(var.buf));
+	error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false);
+
+	*retval = dtoh32(var.val);
+	return (error);
+}
+
+static int
+dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+	char ioctlbuf_local[1024];
+#else
+	static char ioctlbuf_local[1024];
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+	bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
+
+	return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true));
+}
+/*
+get named driver variable to uint register value and return error indication
+calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
+*/
+static int
+dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+{
+	char reg_addr[8];
+
+	memset(reg_addr, 0, sizeof(reg_addr));
+	memcpy((char *)&reg_addr[0], (char *)addr, 4);
+	memcpy((char *)&reg_addr[4], (char *)val, 4);
+
+	return (dev_wlc_bufvar_set(dev, name, (char *)&reg_addr[0], sizeof(reg_addr)));
+}
+
+static bool btcoex_is_sco_active(struct net_device *dev)
+{
+	int ioc_res = 0;
+	bool res = FALSE;
+	int sco_id_cnt = 0;
+	int param27;
+	int i;
+
+	for (i = 0; i < 12; i++) {
+
+		ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, &param27);
+
+		WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27));
+
+		if (ioc_res < 0) {
+			WL_ERR(("ioc read btc params error\n"));
+			break;
+		}
+
+		if ((param27 & 0x6) == 2) { /* count both sco & esco  */
+			sco_id_cnt++;
+		}
+
+		if (sco_id_cnt > 2) {
+			WL_TRACE(("sco/esco detected, pkt id_cnt:%d  samples:%d\n",
+				sco_id_cnt, i));
+			res = TRUE;
+			break;
+		}
+
+		OSL_SLEEP(5);
+	}
+
+	return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+	static bool saved_status = FALSE;
+
+	char buf_reg50va_dhcp_on[8] =
+		{ 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+	char buf_reg51va_dhcp_on[8] =
+		{ 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg64va_dhcp_on[8] =
+		{ 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg65va_dhcp_on[8] =
+		{ 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	char buf_reg71va_dhcp_on[8] =
+		{ 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+	uint32 regaddr;
+	static uint32 saved_reg50;
+	static uint32 saved_reg51;
+	static uint32 saved_reg64;
+	static uint32 saved_reg65;
+	static uint32 saved_reg71;
+
+	if (trump_sco) {
+		/* this should reduce eSCO agressive retransmit
+		 * w/o breaking it
+		 */
+
+		/* 1st save current */
+		WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+			  "override}\n"));
+		if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+			saved_status = TRUE;
+			WL_TRACE(("saved bt_params[50,51,64,65,71]:"
+				  "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				  saved_reg50, saved_reg51,
+				  saved_reg64, saved_reg65, saved_reg71));
+		} else {
+			WL_ERR((":%s: save btc_params failed\n",
+				__FUNCTION__));
+			saved_status = FALSE;
+			return -1;
+		}
+
+		WL_TRACE(("override with [50,51,64,65,71]:"
+			  "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			  *(u32 *)(buf_reg50va_dhcp_on+4),
+			  *(u32 *)(buf_reg51va_dhcp_on+4),
+			  *(u32 *)(buf_reg64va_dhcp_on+4),
+			  *(u32 *)(buf_reg65va_dhcp_on+4),
+			  *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg50va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg51va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg64va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg65va_dhcp_on[0], 8);
+		dev_wlc_bufvar_set(dev, "btc_params",
+			(char *)&buf_reg71va_dhcp_on[0], 8);
+
+		saved_status = TRUE;
+	} else if (saved_status) {
+		/* restore previously saved bt params */
+		WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+			  "override}\n"));
+
+		regaddr = 50;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg50);
+		regaddr = 51;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg51);
+		regaddr = 64;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg64);
+		regaddr = 65;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg65);
+		regaddr = 71;
+		dev_wlc_intvar_set_reg(dev, "btc_params",
+			(char *)&regaddr, (char *)&saved_reg71);
+
+		WL_TRACE(("restore bt_params[50,51,64,65,71]:"
+			"0x%x 0x%x 0x%x 0x%x 0x%x\n",
+			saved_reg50, saved_reg51, saved_reg64,
+			saved_reg65, saved_reg71));
+
+		saved_status = FALSE;
+	} else {
+		WL_ERR((":%s att to restore not saved BTCOEX params\n",
+			__FUNCTION__));
+		return -1;
+	}
+	return 0;
+}
+#endif /* BT_DHCP_eSCO_FIX */
+
+static void
+wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+	char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+	char buf_flag7_default[8]   = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+
+#if defined(BT_DHCP_eSCO_FIX)
+	/* set = 1, save & turn on  0 - off & restore prev settings */
+	set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+	WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
+	if (set == TRUE)
+		/* Forcing bt_flag7  */
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_dhcp_on[0],
+			sizeof(buf_flag7_dhcp_on));
+	else
+		/* Restoring default bt flag7 */
+		dev_wlc_bufvar_set(dev, "btc_flags",
+			(char *)&buf_flag7_default[0],
+			sizeof(buf_flag7_default));
+#endif
+}
+
+static void wl_cfg80211_bt_timerfunc(ulong data)
+{
+	struct btcoex_info *bt_local = (struct btcoex_info *)data;
+	WL_TRACE(("Enter\n"));
+	bt_local->timer_on = 0;
+	schedule_work(&bt_local->work);
+}
+
+static void wl_cfg80211_bt_handler(struct work_struct *work)
+{
+	struct btcoex_info *btcx_inf;
+
+	btcx_inf = container_of(work, struct btcoex_info, work);
+
+	if (btcx_inf->timer_on) {
+		btcx_inf->timer_on = 0;
+		del_timer_sync(&btcx_inf->timer);
+	}
+
+	switch (btcx_inf->bt_state) {
+		case BT_DHCP_START:
+			/* DHCP started
+			 * provide OPPORTUNITY window to get DHCP address
+			 */
+			WL_TRACE(("bt_dhcp stm: started \n"));
+
+			btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
+			mod_timer(&btcx_inf->timer,
+				jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME));
+			btcx_inf->timer_on = 1;
+			break;
+
+		case BT_DHCP_OPPR_WIN:
+			if (btcx_inf->dhcp_done) {
+				WL_TRACE(("DHCP Done before T1 expiration\n"));
+				goto btc_coex_idle;
+			}
+
+			/* DHCP is not over yet, start lowering BT priority
+			 * enforce btc_params + flags if necessary
+			 */
+			WL_TRACE(("DHCP T1:%d expired\n", BT_DHCP_OPPR_WIN_TIME));
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
+			btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+			mod_timer(&btcx_inf->timer,
+				jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME));
+			btcx_inf->timer_on = 1;
+			break;
+
+		case BT_DHCP_FLAG_FORCE_TIMEOUT:
+			if (btcx_inf->dhcp_done) {
+				WL_TRACE(("DHCP Done before T2 expiration\n"));
+			} else {
+				/* Noo dhcp during T1+T2, restore BT priority */
+				WL_TRACE(("DHCP wait interval T2:%d msec expired\n",
+					BT_DHCP_FLAG_FORCE_TIME));
+			}
+
+			/* Restoring default bt priority */
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+btc_coex_idle:
+			btcx_inf->bt_state = BT_DHCP_IDLE;
+			btcx_inf->timer_on = 0;
+			break;
+
+		default:
+			WL_ERR(("error g_status=%d !!!\n",	btcx_inf->bt_state));
+			if (btcx_inf->dev)
+				wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+			btcx_inf->bt_state = BT_DHCP_IDLE;
+			btcx_inf->timer_on = 0;
+			break;
+	}
+
+	net_os_wake_unlock(btcx_inf->dev);
+}
+
+void* wl_cfg80211_btcoex_init(struct net_device *ndev)
+{
+	struct btcoex_info *btco_inf = NULL;
+
+	btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
+	if (!btco_inf)
+		return NULL;
+
+	btco_inf->bt_state = BT_DHCP_IDLE;
+	btco_inf->ts_dhcp_start = 0;
+	btco_inf->ts_dhcp_ok = 0;
+	/* Set up timer for BT  */
+	btco_inf->timer_ms = 10;
+	init_timer(&btco_inf->timer);
+	btco_inf->timer.data = (ulong)btco_inf;
+	btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
+
+	btco_inf->dev = ndev;
+
+	INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
+
+	btcoex_info_loc = btco_inf;
+	return btco_inf;
+}
+
+void wl_cfg80211_btcoex_deinit()
+{
+	if (!btcoex_info_loc)
+		return;
+
+	if (btcoex_info_loc->timer_on) {
+		btcoex_info_loc->timer_on = 0;
+		del_timer_sync(&btcoex_info_loc->timer);
+	}
+
+	cancel_work_sync(&btcoex_info_loc->work);
+
+	kfree(btcoex_info_loc);
+}
+
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command)
+{
+
+	struct btcoex_info *btco_inf = btcoex_info_loc;
+	char powermode_val = 0;
+	char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+	char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+	char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+	uint32 regaddr;
+	static uint32 saved_reg66;
+	static uint32 saved_reg41;
+	static uint32 saved_reg68;
+	static bool saved_status = FALSE;
+
+	char buf_flag7_default[8] =   { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+
+	/* Figure out powermode 1 or o command */
+	strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+
+	if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+		WL_TRACE_HW4(("DHCP session starts\n"));
+
+
+#ifdef PKT_FILTER_SUPPORT
+		dhd->dhcp_in_progress = 1;
+
+		if (dhd->early_suspended) {
+			WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n"));
+			dhd_enable_packet_filter(0, dhd);
+		}
+#endif
+
+		/* Retrieve and saved orig regs value */
+		if ((saved_status == FALSE) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 66,  &saved_reg66)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 41,  &saved_reg41)) &&
+			(!dev_wlc_intvar_get_reg(dev, "btc_params", 68,  &saved_reg68)))   {
+				saved_status = TRUE;
+				WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+					saved_reg66, saved_reg41, saved_reg68));
+
+				/* Disable PM mode during dhpc session */
+
+				/* Disable PM mode during dhpc session */
+				/* Start  BT timer only for SCO connection */
+				if (btcoex_is_sco_active(dev)) {
+					/* btc_params 66 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg66va_dhcp_on[0],
+						sizeof(buf_reg66va_dhcp_on));
+					/* btc_params 41 0x33 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg41va_dhcp_on[0],
+						sizeof(buf_reg41va_dhcp_on));
+					/* btc_params 68 0x190 */
+					dev_wlc_bufvar_set(dev, "btc_params",
+						(char *)&buf_reg68va_dhcp_on[0],
+						sizeof(buf_reg68va_dhcp_on));
+					saved_status = TRUE;
+
+					btco_inf->bt_state = BT_DHCP_START;
+					btco_inf->timer_on = 1;
+					mod_timer(&btco_inf->timer, btco_inf->timer.expires);
+					WL_TRACE(("enable BT DHCP Timer\n"));
+				}
+		}
+		else if (saved_status == TRUE) {
+			WL_ERR(("was called w/o DHCP OFF. Continue\n"));
+		}
+	}
+	else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+
+#ifdef PKT_FILTER_SUPPORT
+		dhd->dhcp_in_progress = 0;
+		WL_TRACE_HW4(("DHCP is complete \n"));
+
+		/* Enable packet filtering */
+		if (dhd->early_suspended) {
+			WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n"));
+			dhd_enable_packet_filter(1, dhd);
+		}
+#endif /* PKT_FILTER_SUPPORT */
+
+		/* Restoring PM mode */
+
+		/* Stop any bt timer because DHCP session is done */
+		WL_TRACE(("disable BT DHCP Timer\n"));
+		if (btco_inf->timer_on) {
+			btco_inf->timer_on = 0;
+			del_timer_sync(&btco_inf->timer);
+
+			if (btco_inf->bt_state != BT_DHCP_IDLE) {
+			/* need to restore original btc flags & extra btc params */
+				WL_TRACE(("bt->bt_state:%d\n", btco_inf->bt_state));
+				/* wake up btcoex thread to restore btlags+params  */
+				schedule_work(&btco_inf->work);
+			}
+		}
+
+		/* Restoring btc_flag paramter anyway */
+		if (saved_status == TRUE)
+			dev_wlc_bufvar_set(dev, "btc_flags",
+				(char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+
+		/* Restore original values */
+		if (saved_status == TRUE) {
+			regaddr = 66;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg66);
+			regaddr = 41;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg41);
+			regaddr = 68;
+			dev_wlc_intvar_set_reg(dev, "btc_params",
+				(char *)&regaddr, (char *)&saved_reg68);
+
+			WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+				saved_reg66, saved_reg41, saved_reg68));
+		}
+		saved_status = FALSE;
+
+	}
+	else {
+		WL_ERR(("Unkwown yet power setting, ignored\n"));
+	}
+
+	snprintf(command, 3, "OK");
+
+	return (strlen("OK"));
+}
diff --git a/drivers/staging/edison-bcm43340/wl_cfgp2p.c b/drivers/staging/edison-bcm43340/wl_cfgp2p.c
new file mode 100644
index 0000000..11dee7b
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_cfgp2p.c
@@ -0,0 +1,2675 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.c 470093 2014-04-13 05:05:57Z $
+ *
+ */
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <net/rtnetlink.h>
+
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wldev_common.h>
+#include <wl_android.h>
+
+static s8 scanparambuf[WLC_IOCTL_SMLEN];
+static s8 g_mgmt_ie_buf[2048];
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
+
+static u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+            s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd);
+static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	struct wireless_dev *wdev, bool notify);
+
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+static int wl_cfgp2p_if_open(struct net_device *net);
+static int wl_cfgp2p_if_stop(struct net_device *net);
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+	.ndo_open       = wl_cfgp2p_if_open,
+	.ndo_stop       = wl_cfgp2p_if_stop,
+	.ndo_do_ioctl   = wl_cfgp2p_do_ioctl,
+	.ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+#endif /* WL_ENABLE_P2P_IF */
+
+
+bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
+{
+	wifi_p2p_pub_act_frame_t *pact_frm;
+
+	if (frame == NULL)
+		return false;
+	pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+	if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1)
+		return false;
+
+	if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+		pact_frm->action == P2P_PUB_AF_ACTION &&
+		pact_frm->oui_type == P2P_VER &&
+		memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) {
+		return true;
+	}
+
+	return false;
+}
+
+bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len)
+{
+	wifi_p2p_action_frame_t *act_frm;
+
+	if (frame == NULL)
+		return false;
+	act_frm = (wifi_p2p_action_frame_t *)frame;
+	if (frame_len < sizeof(wifi_p2p_action_frame_t) -1)
+		return false;
+
+	if (act_frm->category == P2P_AF_CATEGORY &&
+		act_frm->type  == P2P_VER &&
+		memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) {
+		return true;
+	}
+
+	return false;
+}
+
+#define GAS_RESP_LEN		2
+#define DOUBLE_TLV_BODY_OFF	4
+#define GAS_RESP_OFFSET		4
+#define GAS_CRESP_OFFSET	5
+
+bool wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len)
+{
+	bcm_tlv_t *ie = (bcm_tlv_t *)data;
+	u8 *frame = NULL;
+	u16 id, flen;
+
+	/* Skipped first ANQP Element, if frame has anqp elemnt */
+	ie = bcm_parse_tlvs(ie, (int)len, DOT11_MNG_ADVERTISEMENT_ID);
+
+	if (ie == NULL)
+		return false;
+
+	frame = (uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN;
+	id = ((u16) (((frame)[1] << 8) | (frame)[0]));
+	flen = ((u16) (((frame)[3] << 8) | (frame)[2]));
+
+	/* If the contents match the OUI and the type */
+	if (flen >= WFA_OUI_LEN + 1 &&
+		id ==  P2PSD_GAS_NQP_INFOID &&
+		!bcmp(&frame[DOUBLE_TLV_BODY_OFF], (const uint8*)WFA_OUI, WFA_OUI_LEN) &&
+		subtype == frame[DOUBLE_TLV_BODY_OFF+WFA_OUI_LEN]) {
+		return true;
+	}
+
+	return false;
+}
+
+bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len)
+{
+
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+	if (frame == NULL)
+		return false;
+
+	sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+	if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1))
+		return false;
+	if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+		return false;
+
+#ifdef WL11U
+	if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP)
+		return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
+			(u8 *)sd_act_frm->query_data + GAS_RESP_OFFSET,
+			frame_len);
+
+	else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+		return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
+			(u8 *)sd_act_frm->query_data + GAS_CRESP_OFFSET,
+			frame_len);
+	else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ)
+		return true;
+	else
+		return false;
+#else
+	if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+		sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+		return true;
+	else
+		return false;
+#endif /* WL11U */
+}
+void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel)
+{
+	wifi_p2p_pub_act_frame_t *pact_frm;
+	wifi_p2p_action_frame_t *act_frm;
+	wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+	if (!frame || frame_len <= 2)
+		return;
+
+	if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+		pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+		switch (pact_frm->subtype) {
+			case P2P_PAF_GON_REQ:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Req Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_GON_RSP:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Rsp Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_GON_CONF:
+				CFGP2P_ACTION(("%s P2P Group Owner Negotiation Confirm Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_INVITE_REQ:
+				CFGP2P_ACTION(("%s P2P Invitation Request  Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_INVITE_RSP:
+				CFGP2P_ACTION(("%s P2P Invitation Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_DEVDIS_REQ:
+				CFGP2P_ACTION(("%s P2P Device Discoverability Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_DEVDIS_RSP:
+				CFGP2P_ACTION(("%s P2P Device Discoverability Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_PROVDIS_REQ:
+				CFGP2P_ACTION(("%s P2P Provision Discovery Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_PAF_PROVDIS_RSP:
+				CFGP2P_ACTION(("%s P2P Provision Discovery Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P Public Action Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+
+		}
+
+	} else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+		act_frm = (wifi_p2p_action_frame_t *)frame;
+		switch (act_frm->subtype) {
+			case P2P_AF_NOTICE_OF_ABSENCE:
+				CFGP2P_ACTION(("%s P2P Notice of Absence Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_PRESENCE_REQ:
+				CFGP2P_ACTION(("%s P2P Presence Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_PRESENCE_RSP:
+				CFGP2P_ACTION(("%s P2P Presence Response Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			case P2P_AF_GO_DISC_REQ:
+				CFGP2P_ACTION(("%s P2P Discoverability Request Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P Action Frame,"
+					" channel=%d\n", (tx)? "TX": "RX", channel));
+		}
+
+	} else if (wl_cfgp2p_is_gas_action(frame, frame_len)) {
+		sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+		switch (sd_act_frm->action) {
+			case P2PSD_ACTION_ID_GAS_IREQ:
+				CFGP2P_ACTION(("%s P2P GAS Initial Request,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_IRESP:
+				CFGP2P_ACTION(("%s P2P GAS Initial Response,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_CREQ:
+				CFGP2P_ACTION(("%s P2P GAS Comback Request,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			case P2PSD_ACTION_ID_GAS_CRESP:
+				CFGP2P_ACTION(("%s P2P GAS Comback Response,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+				break;
+			default:
+				CFGP2P_ACTION(("%s Unknown P2P GAS Frame,"
+					" channel=%d\n", (tx)? "TX" : "RX", channel));
+		}
+
+
+	}
+}
+
+/*
+ *  Initialize variables related to P2P
+ *
+ */
+s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg)
+{
+	if (!(cfg->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
+		CFGP2P_ERR(("struct p2p_info allocation failed\n"));
+		return -ENOMEM;
+	}
+#define INIT_IE(IE_TYPE, BSS_TYPE)		\
+	do {							\
+		memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+		   sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+		wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+	} while (0);
+
+	INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_PRIMARY);
+	INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_DEVICE);
+	INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
+	INIT_IE(beacon,    P2PAPI_BSSCFG_CONNECTION);
+#undef INIT_IE
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg);
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = NULL;
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = 0;
+	return BCME_OK;
+
+}
+/*
+ *  Deinitialize variables related to P2P
+ *
+ */
+void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg)
+{
+	CFGP2P_DBG(("In\n"));
+	if (cfg->p2p) {
+		kfree(cfg->p2p);
+		cfg->p2p = NULL;
+	}
+	cfg->p2p_supported = 0;
+}
+/*
+ * Set P2P functions into firmware
+ */
+s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
+	s32 ret = BCME_OK;
+	s32 val = 0;
+	/* Do we have to check whether APSTA is enabled or not ? */
+	ret = wldev_iovar_getint(ndev, "apsta", &val);
+	if (ret < 0) {
+		CFGP2P_ERR(("get apsta error %d\n", ret));
+		return ret;
+	}
+	if (val == 0) {
+		val = 1;
+		ret = wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true);
+		if (ret < 0) {
+			CFGP2P_ERR(("WLC_DOWN error %d\n", ret));
+			return ret;
+		}
+		wldev_iovar_setint(ndev, "apsta", val);
+		ret = wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true);
+		if (ret < 0) {
+			CFGP2P_ERR(("WLC_UP error %d\n", ret));
+			return ret;
+		}
+	}
+
+	/* In case of COB type, firmware has default mac address
+	 * After Initializing firmware, we have to set current mac address to
+	 * firmware for P2P device address
+	 */
+	ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
+		sizeof(null_eth_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
+	if (ret && ret != BCME_UNSUPPORTED) {
+		CFGP2P_ERR(("failed to update device address ret %d\n", ret));
+	}
+	return ret;
+}
+
+/* Create a new P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to create
+ * @if_type  : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT
+ * @chspec   : chspec to use if creating a GO BSS.
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+	u32 scb_timeout = WL_SCB_TIMEOUT;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_DBG(("---cfg p2p_ifadd "MACDBG" %s %u\n",
+		MAC2STRDBG(ifreq.addr.octet),
+		(if_type == WL_P2P_IF_GO) ? "go" : "client",
+	        (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
+
+	err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (unlikely(err < 0))
+		printk("'cfg p2p_ifadd' error %d\n", err);
+	else if (if_type == WL_P2P_IF_GO) {
+		err = wldev_ioctl(ndev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
+		if (unlikely(err < 0))
+			printk("'cfg scb_timeout' error %d\n", err);
+	}
+	return err;
+}
+
+/* Disable a P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to disable
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	s32 ret;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdis "MACDBG"\n",
+		netdev->ifindex, MAC2STRDBG(mac->octet)));
+	ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(ret < 0)) {
+		printk("'cfg p2p_ifdis' error %d\n", ret);
+	}
+	return ret;
+}
+
+/* Delete a P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the BSS to delete
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+	s32 ret;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdel "MACDBG"\n",
+	    netdev->ifindex, MAC2STRDBG(mac->octet)));
+	ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+	if (unlikely(ret < 0)) {
+		printk("'cfg p2p_ifdel' error %d\n", ret);
+	}
+	return ret;
+}
+
+/* Change a P2P Role.
+ * Parameters:
+ * @mac      : MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec)
+{
+	wl_p2p_if_t ifreq;
+	s32 err;
+	u32 scb_timeout = WL_SCB_TIMEOUT;
+
+	struct net_device *netdev =  wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+
+	ifreq.type = if_type;
+	ifreq.chspec = chspec;
+	memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
+
+	CFGP2P_INFO(("---cfg p2p_ifchange "MACDBG" %s %u"
+		" chanspec 0x%04x\n", MAC2STRDBG(ifreq.addr.octet),
+		(if_type == WL_P2P_IF_GO) ? "go" : "client",
+		(chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT,
+		ifreq.chspec));
+
+	err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (unlikely(err < 0)) {
+		printk("'cfg p2p_ifupd' error %d\n", err);
+	} else if (if_type == WL_P2P_IF_GO) {
+		err = wldev_ioctl(netdev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
+		if (unlikely(err < 0))
+			printk("'cfg scb_timeout' error %d\n", err);
+	}
+	return err;
+}
+
+
+/* Get the index of a created P2P BSS.
+ * Parameters:
+ * @mac      : MAC address of the created BSS
+ * @index    : output: index of created BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index)
+{
+	s32 ret;
+	u8 getbuf[64];
+	struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_INFO(("---cfg p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet)));
+
+	ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf,
+		sizeof(getbuf), wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY), NULL);
+
+	if (ret == 0) {
+		memcpy(index, getbuf, sizeof(s32));
+		CFGP2P_INFO(("---cfg p2p_if   ==> %d\n", *index));
+	}
+
+	return ret;
+}
+
+static s32
+wl_cfgp2p_set_discovery(struct bcm_cfg80211 *cfg, s32 on)
+{
+	s32 ret = BCME_OK;
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+	CFGP2P_DBG(("enter\n"));
+
+	ret = wldev_iovar_setint(ndev, "p2p_disc", on);
+
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret));
+	}
+
+	return ret;
+}
+
+/* Set the WL driver's P2P mode.
+ * Parameters :
+ * @mode      : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}.
+ * @channel   : the channel to listen
+ * @listen_ms : the time (milli seconds) to wait
+ * @bssidx    : bss index for BSSCFG
+ * Returns 0 if success
+ */
+
+s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+{
+	wl_p2p_disc_st_t discovery_mode;
+	s32 ret;
+	struct net_device *dev;
+	CFGP2P_DBG(("enter\n"));
+
+	if (unlikely(bssidx == WL_INVALID)) {
+		CFGP2P_ERR((" %d index out of range\n", bssidx));
+		return -1;
+	}
+
+	dev = wl_cfgp2p_find_ndev(cfg, bssidx);
+	if (unlikely(dev == NULL)) {
+		CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
+		return BCME_NOTFOUND;
+	}
+
+	/* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
+	discovery_mode.state = mode;
+	discovery_mode.chspec = wl_ch_host_to_driver(channel);
+	discovery_mode.dwell = listen_ms;
+	ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
+		sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+		bssidx, &cfg->ioctl_buf_sync);
+
+	return ret;
+}
+
+/* Get the index of the P2P Discovery BSS */
+static s32
+wl_cfgp2p_get_disc_idx(struct bcm_cfg80211 *cfg, s32 *index)
+{
+	s32 ret;
+	struct net_device *dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+
+	ret = wldev_iovar_getint(dev, "p2p_dev", index);
+	CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
+
+	if (unlikely(ret <  0)) {
+	    CFGP2P_ERR(("'p2p_dev' error %d\n", ret));
+		return ret;
+	}
+	return ret;
+}
+
+s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg)
+{
+
+	s32 index = 0;
+	s32 ret = BCME_OK;
+
+	CFGP2P_DBG(("enter\n"));
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) != 0) {
+		CFGP2P_ERR(("do nothing, already initialized\n"));
+		return ret;
+	}
+
+	ret = wl_cfgp2p_set_discovery(cfg, 1);
+	if (ret < 0) {
+		CFGP2P_ERR(("set discover error\n"));
+		return ret;
+	}
+	/* Enable P2P Discovery in the WL Driver */
+	ret = wl_cfgp2p_get_disc_idx(cfg, &index);
+
+	if (ret < 0) {
+		return ret;
+	}
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) =
+	    wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = index;
+
+	/* Set the initial discovery state to SCAN */
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
+	if (unlikely(ret != 0)) {
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+		wl_cfgp2p_set_discovery(cfg, 0);
+		wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+		wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+		return 0;
+	}
+	return ret;
+}
+
+/* Deinitialize P2P Discovery
+ * Parameters :
+ * @cfg        : wl_private data
+ * Returns 0 if succes
+ */
+static s32
+wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG(("enter\n"));
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
+		CFGP2P_ERR(("do nothing, not initialized\n"));
+		return -1;
+	}
+	/* Set the discovery state to SCAN */
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	/* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
+	ret = wl_cfgp2p_set_discovery(cfg, 0);
+
+	/* Clear our saved WPS and P2P IEs for the discovery BSS.  The driver
+	 * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
+	 * BSS.
+	 */
+
+	/* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
+	 * have no discovery BSS.
+	 */
+	wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID;
+	wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+
+	return ret;
+
+}
+/* Enable P2P Discovery
+ * Parameters:
+ * @cfg	: wl_private data
+ * @ie  : probe request ie (WPS IE + P2P IE)
+ * @ie_len   : probe request ie length
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	const u8 *ie, u32 ie_len)
+{
+	s32 ret = BCME_OK;
+	s32 bssidx;
+
+	if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+		CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
+		goto set_ie;
+	}
+
+	wl_set_p2p_status(cfg, DISCOVERY_ON);
+
+	CFGP2P_DBG(("enter\n"));
+
+	ret = wl_cfgp2p_init_discovery(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" init discovery error %d\n", ret));
+		goto exit;
+	}
+	/* Set wsec to any non-zero value in the discovery bsscfg to ensure our
+	 * P2P probe responses have the privacy bit set in the 802.11 WPA IE.
+	 * Some peer devices may not initiate WPS with us if this bit is not set.
+	 */
+	ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE),
+			"wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR((" wsec error %d\n", ret));
+	}
+set_ie:
+	if (ie_len) {
+		if (bcmcfg_to_prmry_ndev(cfg) == dev) {
+			bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+		} else if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+			WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+			return BCME_ERROR;
+		}
+
+		ret = wl_cfgp2p_set_management_ie(cfg, dev,
+			bssidx,
+			VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+
+		if (unlikely(ret < 0)) {
+			CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
+			goto exit;
+		}
+	}
+exit:
+	return ret;
+}
+
+/* Disable P2P Discovery
+ * Parameters:
+ * @cfg       : wl_private_data
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" enter\n"));
+
+	if (!cfg || !(cfg->p2p_supported)) {
+		ret = BCME_NOTUP;
+		goto exit;
+	}
+
+	wl_clr_p2p_status(cfg, DISCOVERY_ON);
+
+	if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
+		CFGP2P_ERR((" do nothing, not initialized\n"));
+		goto exit;
+	}
+
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+
+	if (unlikely(ret < 0)) {
+
+		CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
+	}
+	/* Do a scan abort to stop the driver's scan engine in case it is still
+	 * waiting out an action frame tx dwell time.
+	 */
+	wl_clr_p2p_status(cfg, DISCOVERY_ON);
+	ret = wl_cfgp2p_deinit_discovery(cfg);
+
+exit:
+	return ret;
+}
+
+s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active,
+	u32 num_chans, u16 *channels,
+	s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+	p2p_scan_purpose_t p2p_scan_purpose)
+{
+	s32 ret = BCME_OK;
+	s32 memsize;
+	s32 eparams_size;
+	u32 i;
+	s8 *memblk;
+	wl_p2p_scan_t *p2p_params;
+	wl_escan_params_t *eparams;
+	wlc_ssid_t ssid;
+	/* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+
+	struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+	/* Allocate scan params which need space for 3 channels and 0 ssids */
+	eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+	    OFFSETOF(wl_escan_params_t, params)) +
+		num_chans * sizeof(eparams->params.channel_list[0]);
+
+	memsize = sizeof(wl_p2p_scan_t) + eparams_size;
+	memblk = scanparambuf;
+	if (memsize > sizeof(scanparambuf)) {
+		CFGP2P_ERR((" scanpar buf too small (%u > %zu)\n",
+		    memsize, sizeof(scanparambuf)));
+		return -1;
+	}
+	memset(memblk, 0, memsize);
+	memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
+	if (search_state == WL_P2P_DISC_ST_SEARCH) {
+		/*
+		 * If we in SEARCH STATE, we don't need to set SSID explictly
+		 * because dongle use P2P WILDCARD internally by default
+		 */
+		wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+		/* use null ssid */
+		ssid.SSID_len = 0;
+		memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+	} else if (search_state == WL_P2P_DISC_ST_SCAN) {
+		/* SCAN STATE 802.11 SCAN
+		 * WFD Supplicant has p2p_find command with (type=progressive, type= full)
+		 * So if P2P_find command with type=progressive,
+		 * we have to set ssid to P2P WILDCARD because
+		 * we just do broadcast scan unless setting SSID
+		 */
+		wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+		/* use wild card ssid */
+		ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
+		memset(&ssid.SSID, 0, sizeof(ssid.SSID));
+		memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN);
+	} else {
+		CFGP2P_ERR((" invalid search state %d\n", search_state));
+		return -1;
+	}
+
+
+	/* Fill in the P2P scan structure at the start of the iovar param block */
+	p2p_params = (wl_p2p_scan_t*) memblk;
+	p2p_params->type = 'E';
+	/* Fill in the Scan structure that follows the P2P scan structure */
+	eparams = (wl_escan_params_t*) (p2p_params + 1);
+	eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+	if (active)
+		eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+	else
+		eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+
+	if (tx_dst_addr == NULL)
+		memcpy(&eparams->params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+	else
+		memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN);
+
+	if (ssid.SSID_len)
+		memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+
+	eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
+
+	switch (p2p_scan_purpose) {
+		case P2P_SCAN_SOCIAL_CHANNEL:
+		eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+			break;
+		case P2P_SCAN_AFX_PEER_NORMAL:
+		case P2P_SCAN_AFX_PEER_REDUCED:
+		eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
+			break;
+		case P2P_SCAN_CONNECT_TRY:
+			eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+			break;
+		default :
+			if (wl_get_drv_status_all(cfg, CONNECTED))
+		eparams->params.active_time = -1;
+	else
+		eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+			break;
+	}
+
+	if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY)
+		eparams->params.nprobes = htod32(eparams->params.active_time /
+			WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+	else
+	eparams->params.nprobes = htod32((eparams->params.active_time /
+		P2PAPI_SCAN_NPROBS_TIME_MS));
+
+
+	if (eparams->params.nprobes <= 0)
+		eparams->params.nprobes = 1;
+	CFGP2P_DBG(("nprobes # %d, active_time %d\n",
+		eparams->params.nprobes, eparams->params.active_time));
+	eparams->params.passive_time = htod32(-1);
+	eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+	    (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+
+	for (i = 0; i < num_chans; i++) {
+		eparams->params.channel_list[i] = wl_ch_host_to_driver(channels[i]);
+	}
+	eparams->version = htod32(ESCAN_REQ_VERSION);
+	eparams->action =  htod16(action);
+	wl_escan_set_sync_id(eparams->sync_id, cfg);
+	wl_escan_set_type(cfg, WL_SCANTYPE_P2P);
+	CFGP2P_INFO(("SCAN CHANNELS : "));
+
+	for (i = 0; i < num_chans; i++) {
+		if (i == 0) CFGP2P_INFO(("%d", channels[i]));
+		else CFGP2P_INFO((",%d", channels[i]));
+	}
+
+	CFGP2P_INFO(("\n"));
+
+	ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
+		memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+	if (ret == BCME_OK)
+		wl_set_p2p_status(cfg, SCANNING);
+	return ret;
+}
+
+/* search function to reach at common channel to send action frame
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device for bssidx
+ * @bssidx   : bssidx for BSS
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr)
+{
+	s32 ret = 0;
+	u32 chan_cnt = 0;
+	u16 *default_chan_list = NULL;
+	p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL;
+	if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID)
+		return -BCME_ERROR;
+	WL_TRACE_HW4((" Enter\n"));
+	if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY))
+		bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+	if (channel)
+		chan_cnt = AF_PEER_SEARCH_CNT;
+	else
+		chan_cnt = SOCIAL_CHAN_CNT;
+	default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL);
+	if (default_chan_list == NULL) {
+		CFGP2P_ERR(("channel list allocation failed \n"));
+		ret = -ENOMEM;
+		goto exit;
+	}
+	if (channel) {
+		u32 i;
+		/* insert same channel to the chan_list */
+		for (i = 0; i < chan_cnt; i++) {
+			default_chan_list[i] = channel;
+		}
+	} else {
+		default_chan_list[0] = SOCIAL_CHAN_1;
+		default_chan_list[1] = SOCIAL_CHAN_2;
+		default_chan_list[2] = SOCIAL_CHAN_3;
+	}
+	ret = wl_cfgp2p_escan(cfg, ndev, true, chan_cnt,
+		default_chan_list, WL_P2P_DISC_ST_SEARCH,
+		WL_SCAN_ACTION_START, bssidx, NULL, p2p_scan_purpose);
+	kfree(default_chan_list);
+exit:
+	return ret;
+}
+
+/* Check whether pointed-to IE looks like WPA. */
+#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE)
+/* Check whether pointed-to IE looks like WPS. */
+#define wl_cfgp2p_is_wps_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE)
+/* Check whether the given IE looks like WFA P2P IE. */
+#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P)
+/* Check whether the given IE looks like WFA WFDisplay IE. */
+#ifndef WFA_OUI_TYPE_WFD
+#define WFA_OUI_TYPE_WFD	0x0a			/* WiFi Display OUI TYPE */
+#endif
+#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len)	wl_cfgp2p_has_ie(ie, tlvs, len, \
+		(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
+
+static s32
+wl_cfgp2p_parse_vndr_ies(u8 *parse, u32 len,
+	struct parsed_vndr_ies *vndr_ies)
+{
+	s32 err = BCME_OK;
+	vndr_ie_t *vndrie;
+	bcm_tlv_t *ie;
+	struct parsed_vndr_ie_info *parsed_info;
+	u32	count = 0;
+	s32 remained_len;
+
+	remained_len = (s32)len;
+	memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+	WL_INFO(("---> len %d\n", len));
+	ie = (bcm_tlv_t *) parse;
+	if (!bcm_valid_tlv(ie, remained_len))
+		ie = NULL;
+	while (ie) {
+		if (count >= MAX_VNDR_IE_NUMBER)
+			break;
+		if (ie->id == DOT11_MNG_VS_ID) {
+			vndrie = (vndr_ie_t *) ie;
+			/* len should be bigger than OUI length + one data length at least */
+			if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+				CFGP2P_ERR(("%s: invalid vndr ie. length is too small %d\n",
+					__FUNCTION__, vndrie->len));
+				goto end;
+			}
+			/* if wpa or wme ie, do not add ie */
+			if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
+				((vndrie->data[0] == WPA_OUI_TYPE) ||
+				(vndrie->data[0] == WME_OUI_TYPE))) {
+				CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
+				goto end;
+			}
+
+			parsed_info = &vndr_ies->ie_info[count++];
+
+			/* save vndr ie information */
+			parsed_info->ie_ptr = (char *)vndrie;
+			parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
+			memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
+
+			vndr_ies->count = count;
+
+			CFGP2P_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x \n",
+				parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1],
+				parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0]));
+		}
+end:
+		ie = bcm_next_tlv(ie, &remained_len);
+	}
+	return err;
+}
+
+
+/* Delete and Set a management vndr ie to firmware
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device for bssidx
+ * @bssidx   : bssidx for BSS
+ * @pktflag  : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+ *                                 VNDR_IE_ASSOCREQ_FLAG)
+ * @ie       :  VNDR IE (such as P2P IE , WPS IE)
+ * @ie_len   : VNDR IE Length
+ * Returns 0 if success.
+ */
+
+s32
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
+    s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
+{
+	s32 ret = BCME_OK;
+	u8  *curr_ie_buf = NULL;
+	u8  *mgmt_ie_buf = NULL;
+	u32 mgmt_ie_buf_len = 0;
+	u32 *mgmt_ie_len = 0;
+	u32 del_add_ie_buf_len = 0;
+	u32 total_ie_buf_len = 0;
+	u32 parsed_ie_buf_len = 0;
+	struct parsed_vndr_ies old_vndr_ies;
+	struct parsed_vndr_ies new_vndr_ies;
+	s32 i;
+	u8 *ptr;
+	s32 type = -1;
+	s32 remained_buf_len;
+#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie)
+#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie_len)
+	memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
+	curr_ie_buf = g_mgmt_ie_buf;
+	CFGP2P_DBG((" bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag));
+	if (cfg->p2p != NULL) {
+		if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
+			CFGP2P_ERR(("cannot find type from bssidx : %d\n", bssidx));
+			return BCME_ERROR;
+		}
+
+		switch (pktflag) {
+			case VNDR_IE_PRBREQ_FLAG :
+				mgmt_ie_buf = IE_TYPE(probe_req, type);
+				mgmt_ie_len = &IE_TYPE_LEN(probe_req, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, type));
+				break;
+			case VNDR_IE_PRBRSP_FLAG :
+				mgmt_ie_buf = IE_TYPE(probe_res, type);
+				mgmt_ie_len = &IE_TYPE_LEN(probe_res, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, type));
+				break;
+			case VNDR_IE_ASSOCREQ_FLAG :
+				mgmt_ie_buf = IE_TYPE(assoc_req, type);
+				mgmt_ie_len = &IE_TYPE_LEN(assoc_req, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, type));
+				break;
+			case VNDR_IE_ASSOCRSP_FLAG :
+				mgmt_ie_buf = IE_TYPE(assoc_res, type);
+				mgmt_ie_len = &IE_TYPE_LEN(assoc_res, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, type));
+				break;
+			case VNDR_IE_BEACON_FLAG :
+				mgmt_ie_buf = IE_TYPE(beacon, type);
+				mgmt_ie_len = &IE_TYPE_LEN(beacon, type);
+				mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, type));
+				break;
+			default:
+				mgmt_ie_buf = NULL;
+				mgmt_ie_len = NULL;
+				CFGP2P_ERR(("not suitable type\n"));
+				return BCME_ERROR;
+		}
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+		switch (pktflag) {
+			case VNDR_IE_PRBRSP_FLAG :
+				mgmt_ie_buf = cfg->ap_info->probe_res_ie;
+				mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->ap_info->probe_res_ie);
+				break;
+			case VNDR_IE_BEACON_FLAG :
+				mgmt_ie_buf = cfg->ap_info->beacon_ie;
+				mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+				break;
+			case VNDR_IE_ASSOCRSP_FLAG :
+				/* WPS-AP WSC2.0 assoc res includes wps_ie */
+				mgmt_ie_buf = cfg->ap_info->assoc_res_ie;
+				mgmt_ie_len = &cfg->ap_info->assoc_res_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->ap_info->assoc_res_ie);
+				break;
+			default:
+				mgmt_ie_buf = NULL;
+				mgmt_ie_len = NULL;
+				CFGP2P_ERR(("not suitable type\n"));
+				return BCME_ERROR;
+		}
+		bssidx = 0;
+	} else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
+		switch (pktflag) {
+			case VNDR_IE_PRBREQ_FLAG :
+				mgmt_ie_buf = cfg->sta_info->probe_req_ie;
+				mgmt_ie_len = &cfg->sta_info->probe_req_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->sta_info->probe_req_ie);
+				break;
+			case VNDR_IE_ASSOCREQ_FLAG :
+				mgmt_ie_buf = cfg->sta_info->assoc_req_ie;
+				mgmt_ie_len = &cfg->sta_info->assoc_req_ie_len;
+				mgmt_ie_buf_len = sizeof(cfg->sta_info->assoc_req_ie);
+				break;
+			default:
+				mgmt_ie_buf = NULL;
+				mgmt_ie_len = NULL;
+				CFGP2P_ERR(("not suitable type\n"));
+				return BCME_ERROR;
+		}
+		bssidx = 0;
+	} else {
+		CFGP2P_ERR(("not suitable type\n"));
+		return BCME_ERROR;
+	}
+
+	if (vndr_ie_len > mgmt_ie_buf_len) {
+		CFGP2P_ERR(("extra IE size too big\n"));
+		ret = -ENOMEM;
+	} else {
+		/* parse and save new vndr_ie in curr_ie_buff before comparing it */
+		if (vndr_ie && vndr_ie_len && curr_ie_buf) {
+			ptr = curr_ie_buf;
+
+			wl_cfgp2p_parse_vndr_ies((u8*)vndr_ie,
+				vndr_ie_len, &new_vndr_ies);
+
+			for (i = 0; i < new_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&new_vndr_ies.ie_info[i];
+
+				memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+					vndrie_info->ie_len);
+				parsed_ie_buf_len += vndrie_info->ie_len;
+			}
+		}
+
+		if (mgmt_ie_buf != NULL) {
+			if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+			     (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
+				CFGP2P_INFO(("Previous mgmt IE is equals to current IE"));
+				goto exit;
+			}
+
+			/* parse old vndr_ie */
+			wl_cfgp2p_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
+				&old_vndr_ies);
+
+			/* make a command to delete old ie */
+			for (i = 0; i < old_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&old_vndr_ies.ie_info[i];
+
+				CFGP2P_INFO(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+					vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+					vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+					vndrie_info->vndrie.oui[2]));
+
+				del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+					pktflag, vndrie_info->vndrie.oui,
+					vndrie_info->vndrie.id,
+					vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+					vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+					"del");
+
+				curr_ie_buf += del_add_ie_buf_len;
+				total_ie_buf_len += del_add_ie_buf_len;
+			}
+		}
+
+		*mgmt_ie_len = 0;
+		/* Add if there is any extra IE */
+		if (mgmt_ie_buf && parsed_ie_buf_len) {
+			ptr = mgmt_ie_buf;
+
+			remained_buf_len = mgmt_ie_buf_len;
+
+			/* make a command to add new ie */
+			for (i = 0; i < new_vndr_ies.count; i++) {
+				struct parsed_vndr_ie_info *vndrie_info =
+					&new_vndr_ies.ie_info[i];
+
+				CFGP2P_INFO(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n",
+					vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+					vndrie_info->ie_len - 2,
+					vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+					vndrie_info->vndrie.oui[2]));
+
+				del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+					pktflag, vndrie_info->vndrie.oui,
+					vndrie_info->vndrie.id,
+					vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+					vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+					"add");
+
+				/* verify remained buf size before copy data */
+				if (remained_buf_len >= vndrie_info->ie_len) {
+					remained_buf_len -= vndrie_info->ie_len;
+				} else {
+					CFGP2P_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
+						"found vndr ies # = %d(cur %d), remained len %d, "
+						"cur mgmt_ie_len %d, new ie len = %d\n",
+						pktflag, new_vndr_ies.count, i, remained_buf_len,
+						*mgmt_ie_len, vndrie_info->ie_len));
+					break;
+				}
+
+				/* save the parsed IE in cfg struct */
+				memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+					vndrie_info->ie_len);
+				*mgmt_ie_len += vndrie_info->ie_len;
+
+				curr_ie_buf += del_add_ie_buf_len;
+				total_ie_buf_len += del_add_ie_buf_len;
+			}
+		}
+		if (total_ie_buf_len) {
+			ret  = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
+				total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+				bssidx, &cfg->ioctl_buf_sync);
+			if (ret)
+				CFGP2P_ERR(("vndr ie set error : %d\n", ret));
+		}
+	}
+#undef IE_TYPE
+#undef IE_TYPE_LEN
+exit:
+	return ret;
+}
+
+/* Clear the manament IE buffer of BSSCFG
+ * Parameters:
+ * @cfg       : wl_private data
+ * @bssidx   : bssidx for BSS
+ *
+ * Returns 0 if success.
+ */
+s32
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+
+	s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
+		VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
+	s32 index = -1;
+	s32 type = -1;
+	struct net_device *ndev = wl_cfgp2p_find_ndev(cfg, bssidx);
+#define INIT_IE(IE_TYPE, BSS_TYPE)		\
+	do {							\
+		memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+		   sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+		wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+	} while (0);
+
+	if (bssidx < 0 || ndev == NULL) {
+		CFGP2P_ERR(("invalid %s\n", (bssidx < 0) ? "bssidx" : "ndev"));
+		return BCME_BADARG;
+	}
+
+	if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
+		CFGP2P_ERR(("invalid argument\n"));
+		return BCME_BADARG;
+	}
+	for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
+		/* clean up vndr ies in dongle */
+		wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, vndrie_flag[index], NULL, 0);
+	}
+	INIT_IE(probe_req, type);
+	INIT_IE(probe_res, type);
+	INIT_IE(assoc_req, type);
+	INIT_IE(assoc_res, type);
+	INIT_IE(beacon, type);
+	return BCME_OK;
+}
+
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
+{
+	/* If the contents match the OUI and the type */
+	if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+		!bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+		type == ie[TLV_BODY_OFF + oui_len]) {
+		return TRUE;
+	}
+
+	if (tlvs == NULL)
+		return FALSE;
+	/* point to the next ie */
+	ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+
+	return FALSE;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
+			return (wpa_ie_fixed_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
+			return (wifi_p2p_ie_t *)ie;
+		}
+	}
+	return NULL;
+}
+
+wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len)
+{
+	bcm_tlv_t *ie;
+
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) {
+			return (wifi_wfd_ie_t *)ie;
+		}
+	}
+	return NULL;
+}
+static u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+            s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd)
+{
+	vndr_ie_setbuf_t hdr;	/* aligned temporary vndr_ie buffer header */
+	s32 iecount;
+	u32 data_offset;
+
+	/* Validate the pktflag parameter */
+	if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+	            VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+	            VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
+		CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
+		return -1;
+	}
+
+	/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
+	strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+	hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+	/* Set the IE count - the buffer contains only 1 IE */
+	iecount = htod32(1);
+	memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32));
+
+	/* Copy packet flags that indicate which packets will contain this IE */
+	pktflag = htod32(pktflag);
+	memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
+		sizeof(u32));
+
+	/* Add the IE ID to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id;
+
+	/* Add the IE length to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len =
+		(uint8) VNDR_IE_MIN_LEN + datalen;
+
+	/* Add the IE OUI to the buffer */
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[0] = oui[0];
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[1] = oui[1];
+	hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[2] = oui[2];
+
+	/* Copy the aligned temporary vndr_ie buffer header to the IE buffer */
+	memcpy(iebuf, &hdr, sizeof(hdr) - 1);
+
+	/* Copy the IE data to the IE buffer */
+	data_offset =
+		(u8*)&hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data[0] -
+		(u8*)&hdr;
+	memcpy(iebuf + data_offset, data, datalen);
+	return data_offset + datalen;
+
+}
+
+/*
+ * Search the bssidx based on dev argument
+ * Parameters:
+ * @cfg       : wl_private data
+ * @ndev     : net device to search bssidx
+ * @bssidx  : output arg to store bssidx of the bsscfg of firmware.
+ * Returns error
+ */
+s32
+wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *bssidx)
+{
+	u32 i;
+	if (ndev == NULL || bssidx == NULL) {
+		CFGP2P_ERR((" argument is invalid\n"));
+		return BCME_BADARG;
+	}
+	if (!cfg->p2p_supported) {
+		*bssidx = P2PAPI_BSSCFG_PRIMARY;
+		return BCME_OK;
+	}
+	/* we cannot find the bssidx of DISCOVERY BSS
+	 *  because the ndev is same with ndev of PRIMARY BSS.
+	 */
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (ndev == wl_to_p2p_bss_ndev(cfg, i)) {
+			*bssidx = wl_to_p2p_bss_bssidx(cfg, i);
+			return BCME_OK;
+		}
+	}
+	return BCME_BADARG;
+}
+struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+	u32 i;
+	struct net_device *ndev = NULL;
+	if (bssidx < 0) {
+		CFGP2P_ERR((" bsscfg idx is invalid\n"));
+		goto exit;
+	}
+
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+			ndev = wl_to_p2p_bss_ndev(cfg, i);
+			break;
+		}
+	}
+
+exit:
+	return ndev;
+}
+/*
+ * Search the driver array idx based on bssidx argument
+ * Parameters:
+ * @cfg     : wl_private data
+ * @bssidx : bssidx which indicate bsscfg->idx of firmware.
+ * @type   : output arg to store array idx of p2p->bss.
+ * Returns error
+ */
+
+s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type)
+{
+	u32 i;
+	if (bssidx < 0 || type == NULL) {
+		CFGP2P_ERR((" argument is invalid\n"));
+		goto exit;
+	}
+
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+		if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+			*type = i;
+			return BCME_OK;
+		}
+	}
+
+exit:
+	return BCME_BADARG;
+}
+
+/*
+ * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
+ */
+s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+	struct net_device *ndev = NULL;
+
+	if (!cfg || !cfg->p2p)
+		return BCME_ERROR;
+
+	CFGP2P_DBG((" Enter\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) {
+		wl_set_p2p_status(cfg, LISTEN_EXPIRED);
+		if (timer_pending(&cfg->p2p->listen_timer)) {
+			del_timer_sync(&cfg->p2p->listen_timer);
+		}
+
+		if (cfg->afx_hdl->is_listen == TRUE &&
+			wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+			WL_DBG(("Listen DONE for action frame\n"));
+			complete(&cfg->act_frm_scan);
+		}
+#ifdef WL_CFG80211_SYNC_GON
+		else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+			wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, ndev);
+			WL_DBG(("Listen DONE and wake up wait_next_af !!(%d)\n",
+				jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies)));
+
+			if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM))
+				wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+			complete(&cfg->wait_next_af);
+		}
+#endif /* WL_CFG80211_SYNC_GON */
+
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+		if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL)) {
+#else
+		if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL) ||
+			wl_get_drv_status_all(cfg, FAKE_REMAINING_ON_CHANNEL)) {
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+			WL_DBG(("Listen DONE for ramain on channel expired\n"));
+			wl_clr_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+			wl_clr_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+			if (ndev && (ndev->ieee80211_ptr != NULL)) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+				cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+					&cfg->remain_on_chan, GFP_KERNEL);
+#else
+				cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+					&cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+			}
+		}
+		if (wl_add_remove_eventmsg(bcmcfg_to_prmry_ndev(cfg),
+			WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) {
+			CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+		}
+	} else
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+
+	return ret;
+
+}
+
+/*
+ *  Timer expire callback function for LISTEN
+ *  We can't report cfg80211_remain_on_channel_expired from Timer ISR context,
+ *  so lets do it from thread context.
+ */
+void
+wl_cfgp2p_listen_expired(unsigned long data)
+{
+	wl_event_msg_t msg;
+	struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data;
+	CFGP2P_DBG((" Enter\n"));
+	bzero(&msg, sizeof(wl_event_msg_t));
+	msg.event_type =  hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+	msg.bsscfgidx = P2PAPI_BSSCFG_DEVICE;
+#if defined(WL_ENABLE_P2P_IF)
+	wl_cfg80211_event(cfg->p2p_net ? cfg->p2p_net :
+		wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
+#else
+	wl_cfg80211_event(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg,
+		NULL);
+#endif /* WL_ENABLE_P2P_IF */
+}
+/*
+ *  Routine for cancelling the P2P LISTEN
+ */
+static s32
+wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+                         struct wireless_dev *wdev, bool notify)
+{
+	WL_DBG(("Enter \n"));
+	/* Irrespective of whether timer is running or not, reset
+	 * the LISTEN state.
+	 */
+	if (timer_pending(&cfg->p2p->listen_timer)) {
+		del_timer_sync(&cfg->p2p->listen_timer);
+		if (notify)
+			if (ndev && ndev->ieee80211_ptr) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+				cfg80211_remain_on_channel_expired(wdev, cfg->last_roc_id,
+					&cfg->remain_on_chan, GFP_KERNEL);
+#else
+				cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id,
+					&cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+			}
+	}
+	return 0;
+}
+/*
+ * Do a P2P Listen on the given channel for the given duration.
+ * A listen consists of sitting idle and responding to P2P probe requests
+ * with a P2P probe response.
+ *
+ * This fn assumes dongle p2p device discovery is already enabled.
+ * Parameters   :
+ * @cfg          : wl_private data
+ * @channel     : channel to listen
+ * @duration_ms : the time (milli seconds) to wait
+ */
+s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms)
+{
+#define EXTRA_DELAY_TIME	100
+	s32 ret = BCME_OK;
+	struct timer_list *_timer;
+	s32 extra_delay;
+	struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
+
+	CFGP2P_DBG((" Enter Listen Channel : %d, Duration : %d\n", channel, duration_ms));
+	if (unlikely(wl_get_p2p_status(cfg, DISCOVERY_ON) == 0)) {
+
+		CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
+
+		ret = BCME_NOTREADY;
+		goto exit;
+	}
+	if (timer_pending(&cfg->p2p->listen_timer)) {
+		CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
+		goto exit;
+
+	}
+#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	else
+		wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+	if (wl_add_remove_eventmsg(netdev, WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) {
+			CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n"));
+	}
+
+	ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+	            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	_timer = &cfg->p2p->listen_timer;
+
+	/*  We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
+	 *  otherwise we will wait up to duration_ms + 100ms + duration / 10
+	 */
+	if (ret == BCME_OK) {
+		extra_delay = EXTRA_DELAY_TIME + (duration_ms / 10);
+	} else {
+		/* if failed to set listen, it doesn't need to wait whole duration. */
+		duration_ms = 100 + duration_ms / 20;
+		extra_delay = 0;
+	}
+
+	INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, extra_delay);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+	wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#undef EXTRA_DELAY_TIME
+exit:
+	return ret;
+}
+
+
+s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable)
+{
+	s32 ret = BCME_OK;
+	CFGP2P_DBG((" Enter\n"));
+	if (!wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+
+		CFGP2P_DBG((" do nothing, discovery is off\n"));
+		return ret;
+	}
+	if (wl_get_p2p_status(cfg, SEARCH_ENABLED) == enable) {
+		CFGP2P_DBG(("already : %d\n", enable));
+		return ret;
+	}
+
+	wl_chg_p2p_status(cfg, SEARCH_ENABLED);
+	/* When disabling Search, reset the WL driver's p2p discovery state to
+	 * WL_P2P_DISC_ST_SCAN.
+	 */
+	if (!enable) {
+		wl_clr_p2p_status(cfg, SCANNING);
+		ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+		            wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+	}
+
+	return ret;
+}
+
+/*
+ * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
+ */
+s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+            const wl_event_msg_t *e, void *data)
+{
+	s32 ret = BCME_OK;
+	u32 event_type = ntoh32(e->event_type);
+	u32 status = ntoh32(e->status);
+	struct net_device *ndev = NULL;
+	CFGP2P_DBG((" Enter\n"));
+
+	ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+	if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+		if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
+
+			CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+			if (status == WLC_E_STATUS_SUCCESS) {
+				wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
+				CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
+			}
+			else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+				wl_set_p2p_status(cfg, ACTION_TX_NOACK);
+				CFGP2P_INFO(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
+				wl_stop_wait_next_action_frame(cfg, ndev);
+			}
+		} else {
+			CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+						"status : %d\n", status));
+
+			if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+				complete(&cfg->send_af_done);
+		}
+	}
+	return ret;
+}
+/* Send an action frame immediately without doing channel synchronization.
+ *
+ * This function does not wait for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an
+ * 802.11 ack has been received for the sent action frame.
+ */
+s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx)
+{
+	s32 ret = BCME_OK;
+	s32 evt_ret = BCME_OK;
+	s32 timeout = 0;
+	wl_eventmsg_buf_t buf;
+
+
+	CFGP2P_INFO(("\n"));
+	CFGP2P_INFO(("channel : %u , dwell time : %u\n",
+	    af_params->channel, af_params->dwell_time));
+
+	wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+	wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+	bzero(&buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, true);
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, true);
+	if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0)
+		return evt_ret;
+
+	cfg->af_sent_channel  = af_params->channel;
+#ifdef WL_CFG80211_SYNC_GON
+	cfg->af_tx_sent_jiffies = jiffies;
+#endif /* WL_CFG80211_SYNC_GON */
+
+	ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+	if (ret < 0) {
+		CFGP2P_ERR((" sending action frame is failed\n"));
+		goto exit;
+	}
+
+	timeout = wait_for_completion_timeout(&cfg->send_af_done,
+		msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX));
+
+	if (timeout >= 0 && wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+		CFGP2P_INFO(("tx action frame operation is completed\n"));
+		ret = BCME_OK;
+	} else if (ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) {
+		CFGP2P_INFO(("bcast tx action frame operation is completed\n"));
+		ret = BCME_OK;
+	} else {
+		ret = BCME_ERROR;
+		CFGP2P_INFO(("tx action frame operation is failed\n"));
+	}
+	/* clear status bit for action tx */
+	wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+	wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
+
+exit:
+	CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
+
+	bzero(&buf, sizeof(wl_eventmsg_buf_t));
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false);
+	wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, false);
+	if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) {
+		WL_ERR(("TX frame events revert back failed \n"));
+		return evt_ret;
+	}
+
+	return ret;
+}
+
+/* Generate our P2P Device Address and P2P Interface Address from our primary
+ * MAC address.
+ */
+void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr,
+            struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr)
+{
+	memset(out_dev_addr, 0, sizeof(*out_dev_addr));
+	memset(out_int_addr, 0, sizeof(*out_int_addr));
+
+	/* Generate the P2P Device Address.  This consists of the device's
+	 * primary MAC address with the locally administered bit set.
+	 */
+	memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr));
+	out_dev_addr->octet[0] |= 0x02;
+
+	/* Generate the P2P Interface Address.  If the discovery and connection
+	 * BSSCFGs need to simultaneously co-exist, then this address must be
+	 * different from the P2P Device Address.
+	 */
+	memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
+	out_int_addr->octet[4] ^= 0x80;
+
+}
+
+/* P2P IF Address change to Virtual Interface MAC Address */
+void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id)
+{
+	wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf;
+	u16 len = ie->len;
+	u8 *subel;
+	u8 subelt_id;
+	u16 subelt_len;
+	CFGP2P_DBG((" Enter\n"));
+
+	/* Point subel to the P2P IE's subelt field.
+	 * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+	 */
+	subel = ie->subelts;
+	len -= 4;	/* exclude OUI + OUI_TYPE */
+
+	while (len >= 3) {
+	/* attribute id */
+		subelt_id = *subel;
+		subel += 1;
+		len -= 1;
+
+		/* 2-byte little endian */
+		subelt_len = *subel++;
+		subelt_len |= *subel++ << 8;
+
+		len -= 2;
+		len -= subelt_len;	/* for the remaining subelt fields */
+
+		if (subelt_id == element_id) {
+			if (subelt_id == P2P_SEID_INTINTADDR) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device ID ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_DEV_INFO) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("Device INFO ATTR FOUND\n"));
+			} else if (subelt_id == P2P_SEID_GROUP_ID) {
+				memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN);
+				CFGP2P_INFO(("GROUP ID ATTR FOUND\n"));
+			}			return;
+		} else {
+			CFGP2P_DBG(("OTHER id : %d\n", subelt_id));
+		}
+		subel += subelt_len;
+	}
+}
+/*
+ * Check if a BSS is up.
+ * This is a common implementation called by most OSL implementations of
+ * p2posl_bss_isup().  DO NOT call this function directly from the
+ * common code -- call p2posl_bss_isup() instead to allow the OSL to
+ * override the common implementation if necessary.
+ */
+bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx)
+{
+	s32 result, val;
+	bool isup = false;
+	s8 getbuf[64];
+
+	/* Check if the BSS is up */
+	*(int*)getbuf = -1;
+	result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+		sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
+	if (result != 0) {
+		CFGP2P_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
+		CFGP2P_ERR(("NOTE: this ioctl error is normal "
+					"when the BSS has not been created yet.\n"));
+	} else {
+		val = *(int*)getbuf;
+		val = dtoh32(val);
+		CFGP2P_INFO(("---cfg bss -C %d   ==> %d\n", bsscfg_idx, val));
+		isup = (val ? TRUE : FALSE);
+	}
+	return isup;
+}
+
+
+/* Bring up or down a BSS */
+s32
+wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up)
+{
+	s32 ret = BCME_OK;
+	s32 val = up ? 1 : 0;
+
+	struct {
+		s32 cfg;
+		s32 val;
+	} bss_setbuf;
+
+	bss_setbuf.cfg = htod32(bsscfg_idx);
+	bss_setbuf.val = htod32(val);
+	CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
+	ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+		cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+	if (ret != 0) {
+		CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
+	}
+
+	return ret;
+}
+
+/* Check if 'p2p' is supported in the driver */
+s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+	s32 ret = BCME_OK;
+	s32 p2p_supported = 0;
+	ret = wldev_iovar_getint(ndev, "p2p",
+	               &p2p_supported);
+	if (ret < 0) {
+		if (ret == BCME_UNSUPPORTED) {
+			CFGP2P_INFO(("p2p is unsupported\n"));
+			return 0;
+		} else {
+			CFGP2P_ERR(("cfg p2p error %d\n", ret));
+			return ret;
+		}
+	}
+	if (p2p_supported == 1) {
+		CFGP2P_INFO(("p2p is supported\n"));
+	} else {
+		CFGP2P_INFO(("p2p is unsupported\n"));
+		p2p_supported = 0;
+	}
+	return p2p_supported;
+}
+/* Cleanup P2P resources */
+s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg)
+{
+	struct net_device *ndev = NULL;
+	struct wireless_dev *wdev = NULL;
+	s32 i = 0, index = -1;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	ndev = bcmcfg_to_prmry_ndev(cfg);
+	wdev = bcmcfg_to_p2p_wdev(cfg);
+#elif defined(WL_ENABLE_P2P_IF)
+	ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg);
+	wdev = ndev_to_wdev(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+	wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE);
+	for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+			index = wl_to_p2p_bss_bssidx(cfg, i);
+			if (index != WL_INVALID)
+				wl_cfgp2p_clear_management_ie(cfg, index);
+	}
+#if defined(WL_CFG80211_P2P_DEV_IF)
+	wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+	wl_cfgp2p_deinit_priv(cfg);
+	return 0;
+}
+s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	s32 ret = -1;
+	int count, start, duration;
+	wl_p2p_sched_t dongle_noa;
+
+	CFGP2P_DBG((" Enter\n"));
+
+	memset(&dongle_noa, 0, sizeof(dongle_noa));
+
+	if (cfg->p2p && cfg->p2p->vif_created) {
+
+		cfg->p2p->noa.desc[0].start = 0;
+
+		sscanf(buf, "%10d %10d %10d", &count, &start, &duration);
+		CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
+			count, start, duration));
+		if (count != -1)
+			cfg->p2p->noa.desc[0].count = count;
+
+		/* supplicant gives interval as start */
+		if (start != -1)
+			cfg->p2p->noa.desc[0].interval = start;
+
+		if (duration != -1)
+			cfg->p2p->noa.desc[0].duration = duration;
+
+		if (cfg->p2p->noa.desc[0].count < 255 && cfg->p2p->noa.desc[0].count > 1) {
+			cfg->p2p->noa.desc[0].start = 0;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+			dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
+			dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
+		}
+		else if (cfg->p2p->noa.desc[0].count == 1) {
+			cfg->p2p->noa.desc[0].start = 200;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
+			dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
+			dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
+		}
+		else if (cfg->p2p->noa.desc[0].count == 0) {
+			cfg->p2p->noa.desc[0].start = 0;
+			dongle_noa.action = WL_P2P_SCHED_ACTION_RESET;
+		}
+		else {
+			/* Continuous NoA interval. */
+			dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
+			dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+			if ((cfg->p2p->noa.desc[0].interval == 102) ||
+				(cfg->p2p->noa.desc[0].interval == 100)) {
+				cfg->p2p->noa.desc[0].start = 100 -
+					cfg->p2p->noa.desc[0].duration;
+				dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
+			}
+			else {
+				dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+			}
+		}
+		/* Put the noa descriptor in dongle format for dongle */
+		dongle_noa.desc[0].count = htod32(cfg->p2p->noa.desc[0].count);
+		if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
+			dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start);
+			dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration);
+		}
+		else {
+			dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start*1000);
+			dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000);
+		}
+		dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000);
+
+		ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION),
+			"p2p_noa", &dongle_noa, sizeof(dongle_noa), cfg->ioctl_buf,
+			WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+		if (ret < 0) {
+			CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
+		}
+	}
+	else {
+		CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n"));
+	}
+	return ret;
+}
+s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int buf_len)
+{
+
+	wifi_p2p_noa_desc_t *noa_desc;
+	int len = 0, i;
+	char _buf[200];
+
+	CFGP2P_DBG((" Enter\n"));
+	buf[0] = '\0';
+	if (cfg->p2p && cfg->p2p->vif_created) {
+		if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) {
+			_buf[0] = 1; /* noa index */
+			_buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) |
+				(cfg->p2p->ops.ctw & 0x7f); /* ops + ctw */
+			len += 2;
+			if (cfg->p2p->noa.desc[0].count) {
+				noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
+				noa_desc->cnt_type = cfg->p2p->noa.desc[0].count;
+				noa_desc->duration = cfg->p2p->noa.desc[0].duration;
+				noa_desc->interval = cfg->p2p->noa.desc[0].interval;
+				noa_desc->start = cfg->p2p->noa.desc[0].start;
+				len += sizeof(wifi_p2p_noa_desc_t);
+			}
+			if (buf_len <= len * 2) {
+				CFGP2P_ERR(("ERROR: buf_len %d in not enough for"
+					"returning noa in string format\n", buf_len));
+				return -1;
+			}
+			/* We have to convert the buffer data into ASCII strings */
+			for (i = 0; i < len; i++) {
+				snprintf(buf, 3, "%02x", _buf[i]);
+				buf += 2;
+			}
+			buf[i*2] = '\0';
+		}
+	}
+	else {
+		CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n"));
+		return -1;
+	}
+	return len * 2;
+}
+s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+	int ps, ctw;
+	int ret = -1;
+	s32 legacy_ps;
+	struct net_device *dev;
+
+	CFGP2P_DBG((" Enter\n"));
+	if (cfg->p2p && cfg->p2p->vif_created) {
+		sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw);
+		CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
+		dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+		if (ctw != -1) {
+			cfg->p2p->ops.ctw = ctw;
+			ret = 0;
+		}
+		if (ps != -1) {
+			cfg->p2p->ops.ops = ps;
+			ret = wldev_iovar_setbuf(dev,
+				"p2p_ops", &cfg->p2p->ops, sizeof(cfg->p2p->ops),
+				cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+			if (ret < 0) {
+				CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
+			}
+		}
+
+		if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) {
+			ret = wldev_ioctl(dev,
+				WLC_SET_PM, &legacy_ps, sizeof(legacy_ps), true);
+			if (unlikely(ret))
+				CFGP2P_ERR(("error (%d)\n", ret));
+			wl_cfg80211_update_power_mode(dev);
+		}
+		else
+			CFGP2P_ERR(("ilegal setting\n"));
+	}
+	else {
+		CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n"));
+		ret = -1;
+	}
+	return ret;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id)
+{
+	wifi_p2p_ie_t *ie = NULL;
+	u16 len = 0;
+	u8 *subel;
+	u8 subelt_id;
+	u16 subelt_len;
+
+	if (!buf) {
+		WL_ERR(("P2P IE not present"));
+		return 0;
+	}
+
+	ie = (wifi_p2p_ie_t*) buf;
+	len = ie->len;
+
+	/* Point subel to the P2P IE's subelt field.
+	 * Subtract the preceding fields (id, len, OUI, oui_type) from the length.
+	 */
+	subel = ie->subelts;
+	len -= 4;	/* exclude OUI + OUI_TYPE */
+
+	while (len >= 3) {
+		/* attribute id */
+		subelt_id = *subel;
+		subel += 1;
+		len -= 1;
+
+		/* 2-byte little endian */
+		subelt_len = *subel++;
+		subelt_len |= *subel++ << 8;
+
+		len -= 2;
+		len -= subelt_len;	/* for the remaining subelt fields */
+
+		if (subelt_id == element_id) {
+			/* This will point to start of subelement attrib after
+			 * attribute id & len
+			 */
+			return subel;
+		}
+
+		/* Go to next subelement */
+		subel += subelt_len;
+	}
+
+	/* Not Found */
+	return NULL;
+}
+
+#define P2P_GROUP_CAPAB_GO_BIT	0x01
+
+u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib)
+{
+	bcm_tlv_t *ie;
+	u8* pAttrib;
+
+	CFGP2P_INFO(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
+	while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+		if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len) == TRUE) {
+			/* Have the P2p ie. Now check for attribute */
+			if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(parse, attrib)) != NULL) {
+				CFGP2P_INFO(("P2P attribute %d was found at parse %p",
+					attrib, parse));
+				return pAttrib;
+			}
+			else {
+				parse += (ie->len + TLV_HDR_LEN);
+				len -= (ie->len + TLV_HDR_LEN);
+				CFGP2P_INFO(("P2P Attribute %d not found Moving parse"
+					" to %p len to %d", attrib, parse, len));
+			}
+		}
+		else {
+			/* It was not p2p IE. parse will get updated automatically to next TLV */
+			CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len));
+		}
+	}
+	CFGP2P_ERR(("P2P attribute %d was NOT found", attrib));
+	return NULL;
+}
+
+u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length)
+{
+	u8 *capability = NULL;
+	bool p2p_go	= 0;
+	u8 *ptr = NULL;
+
+	if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+	bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) {
+		WL_ERR(("P2P Capability attribute not found"));
+		return NULL;
+	}
+
+	/* Check Group capability for Group Owner bit */
+	p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT;
+	if (!p2p_go) {
+		return bi->BSSID.octet;
+	}
+
+	/* In probe responses, DEVICE INFO attribute will be present */
+	if (!(ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+	bi->ie_length,  P2P_SEID_DEV_INFO))) {
+		/* If DEVICE_INFO is not found, this might be a beacon frame.
+		 * check for DEVICE_ID in the beacon frame.
+		 */
+		ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
+		bi->ie_length,  P2P_SEID_DEV_ID);
+	}
+
+	if (!ptr)
+		WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE "));
+
+	return ptr;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+wl_cfgp2p_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	snprintf(info->driver, sizeof(info->driver), "p2p");
+	snprintf(info->version, sizeof(info->version), "%lu", (unsigned long)(0));
+}
+
+struct ethtool_ops cfgp2p_ethtool_ops = {
+	.get_drvinfo = wl_cfgp2p_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+#if defined(WL_ENABLE_P2P_IF)
+s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
+{
+	int ret = 0;
+	struct net_device* net = NULL;
+	struct wireless_dev *wdev = NULL;
+	uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
+
+	if (cfg->p2p_net) {
+		CFGP2P_ERR(("p2p_net defined already.\n"));
+		return -EINVAL;
+	}
+
+	/* Allocate etherdev, including space for private structure */
+	if (!(net = alloc_etherdev(sizeof(struct bcm_cfg80211 *)))) {
+		CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
+		return -ENODEV;
+	}
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		free_netdev(net);
+		return -ENOMEM;
+	}
+
+	strncpy(net->name, "p2p%d", sizeof(net->name) - 1);
+	net->name[IFNAMSIZ - 1] = '\0';
+
+	/* Copy the reference to bcm_cfg80211 */
+	memcpy((void *)netdev_priv(net), &cfg, sizeof(struct bcm_cfg80211 *));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+	ASSERT(!net->open);
+	net->do_ioctl = wl_cfgp2p_do_ioctl;
+	net->hard_start_xmit = wl_cfgp2p_start_xmit;
+	net->open = wl_cfgp2p_if_open;
+	net->stop = wl_cfgp2p_if_stop;
+#else
+	ASSERT(!net->netdev_ops);
+	net->netdev_ops = &wl_cfgp2p_if_ops;
+#endif
+
+	/* Register with a dummy MAC addr */
+	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+
+	wdev->wiphy = cfg->wdev->wiphy;
+
+	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+
+	net->ieee80211_ptr = wdev;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+	net->ethtool_ops = &cfgp2p_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+	SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
+
+	/* Associate p2p0 network interface with new wdev */
+	wdev->netdev = net;
+
+	ret = register_netdev(net);
+	if (ret) {
+		CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
+		free_netdev(net);
+		kfree(wdev);
+		return -ENODEV;
+	}
+
+	/* store p2p net ptr for further reference. Note that iflist won't have this
+	 * entry as there corresponding firmware interface is a "Hidden" interface.
+	 */
+	cfg->p2p_wdev = wdev;
+	cfg->p2p_net = net;
+
+	printk("%s: P2P Interface Registered\n", net->name);
+
+	return ret;
+}
+
+s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg)
+{
+
+	if (!cfg || !cfg->p2p_net) {
+		CFGP2P_ERR(("Invalid Ptr\n"));
+		return -EINVAL;
+	}
+
+	unregister_netdev(cfg->p2p_net);
+	free_netdev(cfg->p2p_net);
+
+	return 0;
+}
+
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+
+	if (skb)
+	{
+		CFGP2P_DBG(("(%s) is not used for data operations.Droping the packet.\n",
+			ndev->name));
+		dev_kfree_skb_any(skb);
+	}
+
+	return 0;
+}
+
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+	struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+	/* There is no ifidx corresponding to p2p0 in our firmware. So we should
+	 * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs.
+	 * For Android PRIV CMD handling map it to primary I/F
+	 */
+	if (cmd == SIOCDEVPRIVATE+1) {
+		ret = wl_android_priv_cmd(ndev, ifr, cmd);
+
+	} else {
+		CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n",
+		__FUNCTION__, cmd));
+		return -1;
+	}
+
+	return ret;
+}
+#endif 
+
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_if_open(struct net_device *net)
+{
+	struct wireless_dev *wdev = net->ieee80211_ptr;
+
+	if (!wdev || !wl_cfg80211_is_p2p_active())
+		return -EINVAL;
+	WL_TRACE(("Enter\n"));
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+	/* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now,
+	 * do it here. This will make sure that in concurrent mode, supplicant
+	 * is not dependent on a particular order of interface initialization.
+	 * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N
+	 * -iwlan0.
+	 */
+	wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT)
+		| BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+	wl_cfg80211_do_driver_init(net);
+
+	return 0;
+}
+
+static int wl_cfgp2p_if_stop(struct net_device *net)
+{
+	struct wireless_dev *wdev = net->ieee80211_ptr;
+
+	if (!wdev)
+		return -EINVAL;
+
+	wl_cfg80211_scan_stop(net);
+
+#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
+	wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
+					& (~(BIT(NL80211_IFTYPE_P2P_CLIENT)|
+					BIT(NL80211_IFTYPE_P2P_GO)));
+#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
+	return 0;
+}
+
+bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops)
+{
+	return (if_ops == &wl_cfgp2p_if_ops);
+}
+#endif /* WL_ENABLE_P2P_IF */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg)
+{
+	struct wireless_dev *wdev = NULL;
+	struct ether_addr primary_mac;
+
+	if (!cfg || !cfg->p2p) {
+		CFGP2P_ERR(("%s: Bad values\n", __FUNCTION__));
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&cfg->p2p_wdev_sync);
+	WL_TRACE(("Enter\n"));
+
+	if (cfg->p2p_wdev) {
+		CFGP2P_ERR(("p2p_wdev defined already.\n"));
+		mutex_unlock(&cfg->p2p_wdev_sync);
+		return ERR_PTR(-EEXIST);
+	}
+
+	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+	if (unlikely(!wdev)) {
+		WL_ERR(("Could not allocate wireless device\n"));
+		mutex_unlock(&cfg->p2p_wdev_sync);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	memset(&primary_mac, 0, sizeof(primary_mac));
+	get_primary_mac(cfg, &primary_mac);
+	wl_cfgp2p_generate_bss_mac(&primary_mac,
+		&cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+
+	wdev->wiphy = cfg->wdev->wiphy;
+	wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
+	memcpy(wdev->address, &cfg->p2p->dev_addr, ETHER_ADDR_LEN);
+
+
+	/* store p2p wdev ptr for further reference. */
+	cfg->p2p_wdev = wdev;
+
+	CFGP2P_ERR(("P2P interface registered\n"));
+	mutex_unlock(&cfg->p2p_wdev_sync);
+
+	return wdev;
+}
+
+int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	if (!cfg)
+		return -EINVAL;
+
+	WL_TRACE(("Enter\n"));
+
+	ret = wl_cfgp2p_set_firm_p2p(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("Set P2P in firmware failed, ret=%d\n", ret));
+		goto exit;
+	}
+
+	ret = wl_cfgp2p_enable_discovery(cfg, bcmcfg_to_prmry_ndev(cfg), NULL, 0);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P enable discovery failed, ret=%d\n", ret));
+		goto exit;
+	}
+
+	p2p_on(cfg) = true;
+
+	CFGP2P_DBG(("P2P interface started\n"));
+
+exit:
+	return ret;
+}
+
+void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+	int ret = 0;
+	struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+	if (!cfg)
+		return;
+
+	WL_TRACE(("Enter\n"));
+
+	ret = wl_cfg80211_scan_stop(wdev);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+	}
+
+	if (!cfg->p2p)
+		return;
+
+	ret = wl_cfgp2p_disable_discovery(cfg);
+	if (unlikely(ret < 0)) {
+		CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret));
+	}
+
+	p2p_on(cfg) = false;
+
+	CFGP2P_DBG(("P2P interface stopped\n"));
+
+	return;
+}
+
+int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg)
+{
+	bool rollback_lock = false;
+
+	if (!wdev)
+		return -EINVAL;
+
+	mutex_lock(&cfg->p2p_wdev_sync);
+	WL_TRACE(("Enter\n"));
+
+	if (!rtnl_is_locked()) {
+		rtnl_lock();
+		rollback_lock = true;
+	}
+
+	cfg80211_unregister_wdev(wdev);
+
+	if (rollback_lock)
+		rtnl_unlock();
+	
+	kfree(wdev);
+
+	if (cfg)
+		cfg->p2p_wdev = NULL;
+
+	CFGP2P_ERR(("P2P interface unregistered\n"));
+	mutex_unlock(&cfg->p2p_wdev_sync);
+
+	return 0;
+}
+#endif /* WL_CFG80211_P2P_DEV_IF */
diff --git a/drivers/staging/edison-bcm43340/wl_cfgp2p.h b/drivers/staging/edison-bcm43340/wl_cfgp2p.h
new file mode 100644
index 0000000..f999ddc
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_cfgp2p.h
@@ -0,0 +1,406 @@
+/*
+ * Linux cfgp2p driver
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_cfgp2p.h 476678 2014-05-09 14:46:37Z $
+ */
+#ifndef _wl_cfgp2p_h_
+#define _wl_cfgp2p_h_
+#include <proto/802.11.h>
+#include <proto/p2p.h>
+
+struct bcm_cfg80211;
+extern u32 wl_dbg_level;
+
+typedef struct wifi_p2p_ie wifi_wfd_ie_t;
+/* Enumeration of the usages of the BSSCFGs used by the P2P Library.  Do not
+ * confuse this with a bsscfg index.  This value is an index into the
+ * saved_ie[] array of structures which in turn contains a bsscfg index field.
+ */
+typedef enum {
+	P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+	P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+	P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+	P2PAPI_BSSCFG_MAX
+} p2p_bsscfg_type_t;
+
+typedef enum {
+	P2P_SCAN_PURPOSE_MIN,
+	P2P_SCAN_SOCIAL_CHANNEL, /* scan for social channel */
+	P2P_SCAN_AFX_PEER_NORMAL, /* scan for action frame search */
+	P2P_SCAN_AFX_PEER_REDUCED, /* scan for action frame search with short time */
+	P2P_SCAN_DURING_CONNECTED, /* scan during connected status */
+	P2P_SCAN_CONNECT_TRY, /* scan for connecting */
+	P2P_SCAN_NORMAL, /* scan during not-connected status */
+	P2P_SCAN_PURPOSE_MAX
+} p2p_scan_purpose_t;
+
+/* vendor ies max buffer length for probe response or beacon */
+#define VNDR_IES_MAX_BUF_LEN	1400
+/* normal vendor ies buffer length */
+#define VNDR_IES_BUF_LEN 		512
+
+/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */
+struct p2p_saved_ie {
+	u8  p2p_probe_req_ie[VNDR_IES_BUF_LEN];
+	u8  p2p_probe_res_ie[VNDR_IES_MAX_BUF_LEN];
+	u8  p2p_assoc_req_ie[VNDR_IES_BUF_LEN];
+	u8  p2p_assoc_res_ie[VNDR_IES_BUF_LEN];
+	u8  p2p_beacon_ie[VNDR_IES_MAX_BUF_LEN];
+	u32 p2p_probe_req_ie_len;
+	u32 p2p_probe_res_ie_len;
+	u32 p2p_assoc_req_ie_len;
+	u32 p2p_assoc_res_ie_len;
+	u32 p2p_beacon_ie_len;
+};
+
+struct p2p_bss {
+	u32 bssidx;
+	struct net_device *dev;
+	struct p2p_saved_ie saved_ie;
+	void *private_data;
+};
+
+struct p2p_info {
+	bool on;    /* p2p on/off switch */
+	bool scan;
+	int16 search_state;
+	bool vif_created;
+	s8 vir_ifname[IFNAMSIZ];
+	unsigned long status;
+	struct ether_addr dev_addr;
+	struct ether_addr int_addr;
+	struct p2p_bss bss[P2PAPI_BSSCFG_MAX];
+	struct timer_list listen_timer;
+	wl_p2p_sched_t noa;
+	wl_p2p_ops_t ops;
+	wlc_ssid_t ssid;
+};
+
+#define MAX_VNDR_IE_NUMBER	5
+
+struct parsed_vndr_ie_info {
+	char *ie_ptr;
+	u32 ie_len;	/* total length including id & length field */
+	vndr_ie_t vndrie;
+};
+
+struct parsed_vndr_ies {
+	u32 count;
+	struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+};
+
+/* dongle status */
+enum wl_cfgp2p_status {
+	WLP2P_STATUS_DISCOVERY_ON = 0,
+	WLP2P_STATUS_SEARCH_ENABLED,
+	WLP2P_STATUS_IF_ADDING,
+	WLP2P_STATUS_IF_DELETING,
+	WLP2P_STATUS_IF_CHANGING,
+	WLP2P_STATUS_IF_CHANGED,
+	WLP2P_STATUS_LISTEN_EXPIRED,
+	WLP2P_STATUS_ACTION_TX_COMPLETED,
+	WLP2P_STATUS_ACTION_TX_NOACK,
+	WLP2P_STATUS_SCANNING,
+	WLP2P_STATUS_GO_NEG_PHASE,
+	WLP2P_STATUS_DISC_IN_PROGRESS
+};
+
+
+#define wl_to_p2p_bss_ndev(cfg, type)		((cfg)->p2p->bss[type].dev)
+#define wl_to_p2p_bss_bssidx(cfg, type)		((cfg)->p2p->bss[type].bssidx)
+#define wl_to_p2p_bss_saved_ie(cfg, type)	((cfg)->p2p->bss[type].saved_ie)
+#define wl_to_p2p_bss_private(cfg, type)		((cfg)->p2p->bss[type].private_data)
+#define wl_to_p2p_bss(cfg, type)			((cfg)->p2p->bss[type])
+#define wl_get_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		test_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_set_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		set_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_clr_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+		clear_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_chg_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+	change_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define p2p_on(cfg) ((cfg)->p2p->on)
+#define p2p_scan(cfg) ((cfg)->p2p->scan)
+#define p2p_is_on(cfg) ((cfg)->p2p && (cfg)->p2p->on)
+
+/* dword align allocation */
+#define WLC_IOCTL_MAXLEN 8192
+
+#define CFGP2P_ERROR_TEXT		"CFGP2P-ERROR) "
+
+
+#define CFGP2P_ERR(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_ERR) {				\
+			printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__);	\
+			printk args;						\
+		}									\
+	} while (0)
+#define	CFGP2P_INFO(args)									\
+	do {										\
+		if (wl_dbg_level & WL_DBG_INFO) {				\
+			printk(KERN_INFO "CFGP2P-INFO) %s : ", __func__);	\
+			printk args;						\
+		}									\
+	} while (0)
+#define	CFGP2P_DBG(args)								\
+	do {									\
+		if (wl_dbg_level & WL_DBG_DBG) {			\
+			printk(KERN_DEBUG "CFGP2P-DEBUG) %s :", __func__);	\
+			printk args;							\
+		}									\
+	} while (0)
+
+#define	CFGP2P_ACTION(args)								\
+	do {									\
+		if (wl_dbg_level & WL_DBG_P2P_ACTION) {			\
+			printk(KERN_DEBUG "CFGP2P-ACTION) %s :", __func__);	\
+			printk args;							\
+		}									\
+	} while (0)
+#define INIT_TIMER(timer, func, duration, extra_delay)	\
+	do {				   \
+		init_timer(timer); \
+		timer->function = func; \
+		timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+		timer->data = (unsigned long) cfg; \
+		add_timer(timer); \
+	} while (0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_CFG80211_P2P_DEV_IF)
+#define WL_CFG80211_P2P_DEV_IF
+
+#ifdef WL_ENABLE_P2P_IF
+#undef WL_ENABLE_P2P_IF
+#endif
+
+#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
+#undef WL_SUPPORT_BACKPORTED_KPATCHES
+#endif
+#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */
+
+#ifndef WL_CFG80211_P2P_DEV_IF
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+#if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)))
+#error Disable 'WL_ENABLE_P2P_IF', if 'WL_CFG80211_P2P_DEV_IF' is enabled \
+	or kernel version is 3.8.0 or above
+#endif /* WL_ENABLE_P2P_IF && (WL_CFG80211_P2P_DEV_IF || (LINUX_VERSION >= VERSION(3, 8, 0))) */
+
+#if !defined(WLP2P) && (defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF))
+#error WLP2P not defined
+#endif /* !WLP2P && (WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF) */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+#define bcm_struct_cfgdev	struct wireless_dev
+#else
+#define bcm_struct_cfgdev	struct net_device
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+extern void
+wl_cfgp2p_listen_expired(unsigned long data);
+extern bool
+wl_cfgp2p_is_pub_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_is_gas_action(void *frame, u32 frame_len);
+extern bool
+wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len);
+extern void
+wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel);
+extern s32
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg);
+extern void
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode,
+            u32 channel, u16 listen_ms, int bssidx);
+extern s32
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+            chanspec_t chspec);
+extern s32
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern s32
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
+
+extern s32
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index);
+
+extern s32
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 *ie,
+	u32 ie_len);
+extern s32
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg);
+extern s32
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, u32 num_chans,
+	u16 *channels,
+	s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
+	p2p_scan_purpose_t p2p_scan_purpose);
+
+extern s32
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+	s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len);
+
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len);
+
+extern wifi_p2p_ie_t *
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len);
+
+extern wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len);
+extern s32
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
+            s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
+extern s32
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx);
+
+extern s32
+wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *index);
+extern struct net_device *
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx);
+extern s32
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type);
+
+
+extern s32
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+extern s32
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms);
+
+extern s32
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable);
+
+extern s32
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+	const wl_event_msg_t *e, void *data);
+
+extern s32
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
+	wl_af_params_t *af_params, s32 bssidx);
+
+extern void
+wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr,
+            struct ether_addr *out_int_addr);
+
+extern void
+wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
+extern bool
+wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx);
+
+extern s32
+wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up);
+
+
+extern s32
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+
+extern s32
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern s32
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
+extern u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id);
+
+extern u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib);
+
+extern u8 *
+wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length);
+
+extern s32
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg);
+
+extern s32
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg);
+
+extern bool
+wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops);
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+extern struct wireless_dev *
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg);
+
+extern int
+wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern void
+wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+
+extern int
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
+/* WiFi Direct */
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+					(channel == SOCIAL_CHAN_2) || \
+					(channel == SOCIAL_CHAN_3))
+#define SOCIAL_CHAN_CNT 3
+#define AF_PEER_SEARCH_CNT 2
+#define WL_P2P_WILDCARD_SSID "DIRECT-"
+#define WL_P2P_WILDCARD_SSID_LEN 7
+#define WL_P2P_INTERFACE_PREFIX "p2p"
+#define WL_P2P_TEMP_CHAN 11
+
+/* If the provision discovery is for JOIN operations,
+ * or the device discoverablity frame is destined to GO
+ * then we need not do an internal scan to find GO.
+ */
+#define IS_ACTPUB_WITHOUT_GROUP_ID(p2p_ie, len) \
+	(wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_GROUP_ID) == NULL)
+
+#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \
+					((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \
+					(frame->action == P2PSD_ACTION_ID_GAS_CREQ)))
+
+#define IS_P2P_PUB_ACT_RSP_SUBTYPE(subtype) ((subtype == P2P_PAF_GON_RSP) || \
+							((subtype == P2P_PAF_GON_CONF) || \
+							(subtype == P2P_PAF_INVITE_RSP) || \
+							(subtype == P2P_PAF_PROVDIS_RSP)))
+#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3))
+#define IS_P2P_SSID(ssid, len) (!memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) && \
+					(len == WL_P2P_WILDCARD_SSID_LEN))
+#endif				/* _wl_cfgp2p_h_ */
diff --git a/drivers/staging/edison-bcm43340/wl_dbg.h b/drivers/staging/edison-bcm43340/wl_dbg.h
new file mode 100644
index 0000000..67349a1
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_dbg.h
@@ -0,0 +1,67 @@
+/*
+ * Minimal debug/trace/assert driver definitions for
+ * Broadcom 802.11 Networking Adapter.
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_dbg.h 430628 2013-10-19 04:07:25Z $
+ */
+
+
+#ifndef _wl_dbg_h_
+#define _wl_dbg_h_
+
+/* wl_msg_level is a bit vector with defs in wlioctl.h */
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+
+#define WL_TIMESTAMP()
+
+#define WL_PRINT(args)		do { WL_TIMESTAMP(); printf args; } while (0)
+
+#if defined(EVENT_LOG_COMPILE) && defined(WLMSG_SRSCAN)
+#define _WL_SRSCAN(fmt, ...)	EVENT_LOG(EVENT_LOG_TAG_SRSCAN, fmt, ##__VA_ARGS__)
+#define WL_SRSCAN(args)		_WL_SRSCAN args
+#else
+#define WL_SRSCAN(args)
+#endif
+
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+
+#define	WL_ERROR(args)
+#define	WL_TRACE(args)
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#ifdef WLMSG_WSEC
+#define WL_WSEC(args)		WL_PRINT(args)
+#define WL_WSEC_DUMP(args)	WL_PRINT(args)
+#else
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#endif
+#define WL_PCIE(args)		do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0)
+#define WL_PCIE_ON()		(wl_msg_level2 & WL_PCIE_VAL)
+
+extern uint32 wl_msg_level;
+extern uint32 wl_msg_level2;
+#endif /* _wl_dbg_h_ */
diff --git a/drivers/staging/edison-bcm43340/wl_iw.c b/drivers/staging/edison-bcm43340/wl_iw.c
new file mode 100644
index 0000000..c78f279
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_iw.c
@@ -0,0 +1,3846 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.c 425990 2013-09-26 07:28:16Z $
+ */
+
+#if defined(USE_IW)
+#define LINUX_PORT
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+
+typedef const struct si_pub	si_t;
+#include <wlioctl.h>
+
+
+#include <wl_dbg.h>
+#include <wl_iw.h>
+
+#ifdef BCMWAPI_WPI
+/* these items should evetually go into wireless.h of the linux system headfile dir */
+#ifndef IW_ENCODE_ALG_SM4
+#define IW_ENCODE_ALG_SM4 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_ENABLED
+#define IW_AUTH_WAPI_ENABLED 0x20
+#endif
+
+#ifndef IW_AUTH_WAPI_VERSION_1
+#define IW_AUTH_WAPI_VERSION_1	0x00000008
+#endif
+
+#ifndef IW_AUTH_CIPHER_SMS4
+#define IW_AUTH_CIPHER_SMS4	0x00000020
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
+#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
+#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
+#endif
+#endif /* BCMWAPI_WPI */
+
+/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */
+#ifndef IW_AUTH_KEY_MGMT_FT_802_1X
+#define IW_AUTH_KEY_MGMT_FT_802_1X 0x04
+#endif
+
+#ifndef IW_AUTH_KEY_MGMT_FT_PSK
+#define IW_AUTH_KEY_MGMT_FT_PSK 0x08
+#endif
+
+#ifndef IW_ENC_CAPA_FW_ROAM_ENABLE
+#define IW_ENC_CAPA_FW_ROAM_ENABLE	0x00000020
+#endif
+
+
+/* FC9: wireless.h 2.6.25-14.fc9.i686 is missing these, even though WIRELESS_EXT is set to latest
+ * version 22.
+ */
+#ifndef IW_ENCODE_ALG_PMK
+#define IW_ENCODE_ALG_PMK 4
+#endif
+#ifndef IW_ENC_CAPA_4WAY_HANDSHAKE
+#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010
+#endif
+/* End FC9. */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#include <linux/rtnetlink.h>
+#endif
+#if defined(SOFTAP)
+struct net_device *ap_net_dev = NULL;
+tsk_ctl_t ap_eth_ctl;  /* apsta AP netdev waiter thread */
+#endif /* SOFTAP */
+
+extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
+	uint32 reason, char* stringBuf, uint buflen);
+
+uint wl_msg_level = WL_ERROR_VAL;
+
+#define MAX_WLIW_IOCTL_LEN 1024
+
+/* IOCTL swapping mode for Big Endian host with Little Endian dongle.  Default to off */
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+extern int dhd_wait_pend8021x(struct net_device *dev);
+
+#if WIRELESS_EXT < 19
+#define IW_IOCTL_IDX(cmd)	((cmd) - SIOCIWFIRST)
+#define IW_EVENT_IDX(cmd)	((cmd) - IWEVFIRST)
+#endif /* WIRELESS_EXT < 19 */
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#define DAEMONIZE(a)	do { \
+		allow_signal(SIGKILL);	\
+		allow_signal(SIGTERM);	\
+	} while (0)
+#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
+	(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
+#define DAEMONIZE(a) daemonize(a); \
+	allow_signal(SIGKILL); \
+	allow_signal(SIGTERM);
+#else /* Linux 2.4 (w/o preemption patch) */
+#define RAISE_RX_SOFTIRQ() \
+	cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
+#define DAEMONIZE(a) daemonize(); \
+	do { if (a) \
+		strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
+	} while (0);
+#endif /* LINUX_VERSION_CODE  */
+
+#define ISCAN_STATE_IDLE   0
+#define ISCAN_STATE_SCANING 1
+
+/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */
+#define WLC_IW_ISCAN_MAXLEN   2048
+typedef struct iscan_buf {
+	struct iscan_buf * next;
+	char   iscan_buf[WLC_IW_ISCAN_MAXLEN];
+} iscan_buf_t;
+
+typedef struct iscan_info {
+	struct net_device *dev;
+	struct timer_list timer;
+	uint32 timer_ms;
+	uint32 timer_on;
+	int    iscan_state;
+	iscan_buf_t * list_hdr;
+	iscan_buf_t * list_cur;
+
+	/* Thread to work on iscan */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	struct task_struct *kthread;
+#endif
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+
+
+	char ioctlbuf[WLC_IOCTL_SMLEN];
+} iscan_info_t;
+iscan_info_t *g_iscan = NULL;
+static void wl_iw_timerfunc(ulong data);
+static void wl_iw_set_event_mask(struct net_device *dev);
+static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
+
+/* priv_link becomes netdev->priv and is the link between netdev and wlif struct */
+typedef struct priv_link {
+	wl_iw_t *wliw;
+} priv_link_t;
+
+/* dev to priv_link */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+#define WL_DEV_LINK(dev)       (priv_link_t*)(dev->priv)
+#else
+#define WL_DEV_LINK(dev)       (priv_link_t*)netdev_priv(dev)
+#endif
+
+/* dev to wl_iw_t */
+#define IW_DEV_IF(dev)          ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw)
+
+static void swap_key_from_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = htod32(key->index);
+	key->len = htod32(key->len);
+	key->algo = htod32(key->algo);
+	key->flags = htod32(key->flags);
+	key->rxiv.hi = htod32(key->rxiv.hi);
+	key->rxiv.lo = htod16(key->rxiv.lo);
+	key->iv_initialized = htod32(key->iv_initialized);
+}
+
+static void swap_key_to_BE(
+	        wl_wsec_key_t *key
+)
+{
+	key->index = dtoh32(key->index);
+	key->len = dtoh32(key->len);
+	key->algo = dtoh32(key->algo);
+	key->flags = dtoh32(key->flags);
+	key->rxiv.hi = dtoh32(key->rxiv.hi);
+	key->rxiv.lo = dtoh16(key->rxiv.lo);
+	key->iv_initialized = dtoh32(key->iv_initialized);
+}
+
+static int
+dev_wlc_ioctl(
+	struct net_device *dev,
+	int cmd,
+	void *arg,
+	int len
+)
+{
+	struct ifreq ifr;
+	wl_ioctl_t ioc;
+	mm_segment_t fs;
+	int ret;
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+
+	strcpy(ifr.ifr_name, dev->name);
+	ifr.ifr_data = (caddr_t) &ioc;
+
+	fs = get_fs();
+	set_fs(get_ds());
+#if defined(WL_USE_NETDEV_OPS)
+	ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#else
+	ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
+#endif
+	set_fs(fs);
+
+	return ret;
+}
+
+/*
+set named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_set(dev, "arate", rate)
+*/
+
+static int
+dev_wlc_intvar_set(
+	struct net_device *dev,
+	char *name,
+	int val)
+{
+	char buf[WLC_IOCTL_SMLEN];
+	uint len;
+
+	val = htod32(val);
+	len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+	ASSERT(len);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len));
+}
+
+static int
+dev_iw_iovar_setbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+	BCM_REFERENCE(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen));
+}
+
+static int
+dev_iw_iovar_getbuf(
+	struct net_device *dev,
+	char *iovar,
+	void *param,
+	int paramlen,
+	void *bufptr,
+	int buflen)
+{
+	int iolen;
+
+	iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+	ASSERT(iolen);
+	BCM_REFERENCE(iolen);
+
+	return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen));
+}
+
+#if WIRELESS_EXT > 17
+static int
+dev_wlc_bufvar_set(
+	struct net_device *dev,
+	char *name,
+	char *buf, int len)
+{
+	char *ioctlbuf;
+	uint buflen;
+	int error;
+
+	ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+	if (!ioctlbuf)
+		return -ENOMEM;
+
+	buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	ASSERT(buflen);
+	error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen);
+
+	kfree(ioctlbuf);
+	return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_bufvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_bufvar_get(
+	struct net_device *dev,
+	char *name,
+	char *buf, int buflen)
+{
+	char *ioctlbuf;
+	int error;
+
+	uint len;
+
+	ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL);
+	if (!ioctlbuf)
+		return -ENOMEM;
+	len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	ASSERT(len);
+	BCM_REFERENCE(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN);
+	if (!error)
+		bcopy(ioctlbuf, buf, buflen);
+
+	kfree(ioctlbuf);
+	return (error);
+}
+
+/*
+get named driver variable to int value and return error indication
+calling example: dev_wlc_intvar_get(dev, "arate", &rate)
+*/
+
+static int
+dev_wlc_intvar_get(
+	struct net_device *dev,
+	char *name,
+	int *retval)
+{
+	union {
+		char buf[WLC_IOCTL_SMLEN];
+		int val;
+	} var;
+	int error;
+
+	uint len;
+	uint data_null;
+
+	len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf));
+	ASSERT(len);
+	error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
+
+	*retval = dtoh32(var.val);
+
+	return (error);
+}
+
+/* Maintain backward compatibility */
+#if WIRELESS_EXT < 13
+struct iw_request_info
+{
+	__u16		cmd;		/* Wireless Extension command */
+	__u16		flags;		/* More to come ;-) */
+};
+
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+	void *wrqu, char *extra);
+#endif /* WIRELESS_EXT < 13 */
+
+#if WIRELESS_EXT > 12
+static int
+wl_iw_set_leddc(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int dc = *(int *)extra;
+	int error;
+
+	error = dev_wlc_intvar_set(dev, "leddc", dc);
+	return error;
+}
+
+static int
+wl_iw_set_vlanmode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int mode = *(int *)extra;
+	int error;
+
+	mode = htod32(mode);
+	error = dev_wlc_intvar_set(dev, "vlan_mode", mode);
+	return error;
+}
+
+static int
+wl_iw_set_pm(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	int pm = *(int *)extra;
+	int error;
+
+	pm = htod32(pm);
+	error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+	return error;
+}
+
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+#endif /* WIRELESS_EXT > 12 */
+
+int
+wl_iw_send_priv_event(
+	struct net_device *dev,
+	char *flag
+)
+{
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd;
+
+	cmd = IWEVCUSTOM;
+	memset(&wrqu, 0, sizeof(wrqu));
+	if (strlen(flag) > sizeof(extra))
+		return -1;
+
+	strcpy(extra, flag);
+	wrqu.data.length = strlen(extra);
+	wireless_send_event(dev, cmd, &wrqu, extra);
+	WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
+
+	return 0;
+}
+
+static int
+wl_iw_config_commit(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	void *zwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+	struct sockaddr bssid;
+
+	WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid))))
+		return error;
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	if (!ssid.SSID_len)
+		return 0;
+
+	bzero(&bssid, sizeof(struct sockaddr));
+	if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
+		WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error));
+		return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_name(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *cwrq,
+	char *extra
+)
+{
+	int phytype, err;
+	uint band[3];
+	char cap[5];
+
+	WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+
+	cap[0] = 0;
+	if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0)
+		goto done;
+	if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0)
+		goto done;
+
+	band[0] = dtoh32(band[0]);
+	switch (phytype) {
+		case WLC_PHY_TYPE_A:
+			strcpy(cap, "a");
+			break;
+		case WLC_PHY_TYPE_B:
+			strcpy(cap, "b");
+			break;
+		case WLC_PHY_TYPE_LP:
+		case WLC_PHY_TYPE_G:
+			if (band[0] >= 2)
+				strcpy(cap, "abg");
+			else
+				strcpy(cap, "bg");
+			break;
+		case WLC_PHY_TYPE_N:
+			if (band[0] >= 2)
+				strcpy(cap, "abgn");
+			else
+				strcpy(cap, "bgn");
+			break;
+	}
+done:
+	snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap);
+	return 0;
+}
+
+static int
+wl_iw_set_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	int error, chan;
+	uint sf = 0;
+
+	WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name));
+
+	/* Setting by channel number */
+	if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
+		chan = fwrq->m;
+	}
+
+	/* Setting by frequency */
+	else {
+		/* Convert to MHz as best we can */
+		if (fwrq->e >= 6) {
+			fwrq->e -= 6;
+			while (fwrq->e--)
+				fwrq->m *= 10;
+		} else if (fwrq->e < 6) {
+			while (fwrq->e++ < 6)
+				fwrq->m /= 10;
+		}
+	/* handle 4.9GHz frequencies as Japan 4 GHz based channelization */
+	if (fwrq->m > 4000 && fwrq->m < 5000)
+		sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */
+
+		chan = wf_mhz2channel(fwrq->m, sf);
+	}
+	chan = htod32(chan);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan))))
+		return error;
+
+	/* -EINPROGRESS: Call commit handler */
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_freq(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_freq *fwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+
+	/* Return radio channel in channel form */
+	fwrq->m = dtoh32(ci.hw_channel);
+	fwrq->e = dtoh32(0);
+	return 0;
+}
+
+static int
+wl_iw_set_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int infra = 0, ap = 0, error = 0;
+
+	WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+
+	switch (*uwrq) {
+	case IW_MODE_MASTER:
+		infra = ap = 1;
+		break;
+	case IW_MODE_ADHOC:
+	case IW_MODE_AUTO:
+		break;
+	case IW_MODE_INFRA:
+		infra = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	infra = htod32(infra);
+	ap = htod32(ap);
+
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap))))
+		return error;
+
+	/* -EINPROGRESS: Call commit handler */
+	return -EINPROGRESS;
+}
+
+static int
+wl_iw_get_mode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	__u32 *uwrq,
+	char *extra
+)
+{
+	int error, infra = 0, ap = 0;
+
+	WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap))))
+		return error;
+
+	infra = dtoh32(infra);
+	ap = dtoh32(ap);
+	*uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
+
+	return 0;
+}
+
+static int
+wl_iw_get_range(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	struct iw_range *range = (struct iw_range *) extra;
+	static int channels[MAXCHANNEL+1];
+	wl_uint32_list_t *list = (wl_uint32_list_t *) channels;
+	wl_rateset_t rateset;
+	int error, i, k;
+	uint sf, ch;
+
+	int phytype;
+	int bw_cap = 0, sgi_tx = 0, nmode = 0;
+	channel_info_t ci;
+	uint8 nrate_list2copy = 0;
+	uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
+		{14, 29, 43, 58, 87, 116, 130, 144},
+		{27, 54, 81, 108, 162, 216, 243, 270},
+		{30, 60, 90, 120, 180, 240, 270, 300}};
+	int fbt_cap = 0;
+
+	WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = sizeof(struct iw_range);
+	memset(range, 0, sizeof(*range));
+
+	/* We don't use nwids */
+	range->min_nwid = range->max_nwid = 0;
+
+	/* Set available channels/frequencies */
+	list->count = htod32(MAXCHANNEL);
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels))))
+		return error;
+	for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) {
+		range->freq[i].i = dtoh32(list->element[i]);
+
+		ch = dtoh32(list->element[i]);
+		if (ch <= CH_MAX_2G_CHANNEL)
+			sf = WF_CHAN_FACTOR_2_4_G;
+		else
+			sf = WF_CHAN_FACTOR_5_G;
+
+		range->freq[i].m = wf_channel2mhz(ch, sf);
+		range->freq[i].e = 6;
+	}
+	range->num_frequency = range->num_channels = i;
+
+	/* Link quality (use NDIS cutoffs) */
+	range->max_qual.qual = 5;
+	/* Signal level (use RSSI) */
+	range->max_qual.level = 0x100 - 200;	/* -200 dBm */
+	/* Noise level (use noise) */
+	range->max_qual.noise = 0x100 - 200;	/* -200 dBm */
+	/* Signal level threshold range (?) */
+	range->sensitivity = 65535;
+
+#if WIRELESS_EXT > 11
+	/* Link quality (use NDIS cutoffs) */
+	range->avg_qual.qual = 3;
+	/* Signal level (use RSSI) */
+	range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
+	/* Noise level (use noise) */
+	range->avg_qual.noise = 0x100 - 75;	/* -75 dBm */
+#endif /* WIRELESS_EXT > 11 */
+
+	/* Set available bitrates */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+	rateset.count = dtoh32(rateset.count);
+	range->num_bitrates = rateset.count;
+	for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
+		range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000; /* convert to bps */
+	if ((error = dev_wlc_intvar_get(dev, "nmode", &nmode)))
+		return error;
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))))
+		return error;
+	if (nmode == 1 && ((phytype == WLC_PHY_TYPE_SSN) || (phytype == WLC_PHY_TYPE_LCN) ||
+		(phytype == WLC_PHY_TYPE_LCN40))) {
+		if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap)))
+			return error;
+		if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx)))
+			return error;
+		if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t))))
+			return error;
+		ci.hw_channel = dtoh32(ci.hw_channel);
+
+		if (bw_cap == 0 ||
+			(bw_cap == 2 && ci.hw_channel <= 14)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 0;
+			else
+				nrate_list2copy = 1;
+		}
+		if (bw_cap == 1 ||
+			(bw_cap == 2 && ci.hw_channel >= 36)) {
+			if (sgi_tx == 0)
+				nrate_list2copy = 2;
+			else
+				nrate_list2copy = 3;
+		}
+		range->num_bitrates += 8;
+		ASSERT(range->num_bitrates < IW_MAX_BITRATES);
+		for (k = 0; i < range->num_bitrates; k++, i++) {
+			/* convert to bps */
+			range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000;
+		}
+	}
+
+	/* Set an indication of the max TCP throughput
+	 * in bit/s that we can expect using this interface.
+	 * May be use for QoS stuff... Jean II
+	 */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i))))
+		return error;
+	i = dtoh32(i);
+	if (i == WLC_PHY_TYPE_A)
+		range->throughput = 24000000;	/* 24 Mbits/s */
+	else
+		range->throughput = 1500000;	/* 1.5 Mbits/s */
+
+	/* RTS and fragmentation thresholds */
+	range->min_rts = 0;
+	range->max_rts = 2347;
+	range->min_frag = 256;
+	range->max_frag = 2346;
+
+	range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
+	range->num_encoding_sizes = 4;
+	range->encoding_size[0] = WEP1_KEY_SIZE;
+	range->encoding_size[1] = WEP128_KEY_SIZE;
+#if WIRELESS_EXT > 17
+	range->encoding_size[2] = TKIP_KEY_SIZE;
+#else
+	range->encoding_size[2] = 0;
+#endif
+	range->encoding_size[3] = AES_KEY_SIZE;
+
+	/* Do not support power micro-management */
+	range->min_pmp = 0;
+	range->max_pmp = 0;
+	range->min_pmt = 0;
+	range->max_pmt = 0;
+	range->pmp_flags = 0;
+	range->pm_capa = 0;
+
+	/* Transmit Power - values are in mW */
+	range->num_txpower = 2;
+	range->txpower[0] = 1;
+	range->txpower[1] = 255;
+	range->txpower_capa = IW_TXPOW_MWATT;
+
+#if WIRELESS_EXT > 10
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 19;
+
+	/* Only support retry limits */
+	range->retry_capa = IW_RETRY_LIMIT;
+	range->retry_flags = IW_RETRY_LIMIT;
+	range->r_time_flags = 0;
+	/* SRL and LRL limits */
+	range->min_retry = 1;
+	range->max_retry = 255;
+	/* Retry lifetime limits unsupported */
+	range->min_r_time = 0;
+	range->max_r_time = 0;
+#endif /* WIRELESS_EXT > 10 */
+
+#if WIRELESS_EXT > 17
+	range->enc_capa = IW_ENC_CAPA_WPA;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
+	range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
+	range->enc_capa |= IW_ENC_CAPA_WPA2;
+
+	/* Determine driver FBT capability. */
+	if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+		if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+			/* Tell the host (e.g. wpa_supplicant) to let driver do the handshake */
+			range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE;
+		}
+	}
+
+#ifdef BCMFW_ROAM_ENABLE_WEXT
+	/* Advertise firmware roam capability to the external supplicant */
+	range->enc_capa |= IW_ENC_CAPA_FW_ROAM_ENABLE;
+#endif /* BCMFW_ROAM_ENABLE_WEXT */
+
+	/* Event capability (kernel) */
+	IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+	/* Event capability (driver) */
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+	IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE);
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
+
+#if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID)
+	/* FC7 wireless.h defines EXT 22 but doesn't define scan_capa bits */
+	range->scan_capa = IW_SCAN_CAPA_ESSID;
+#endif
+#endif /* WIRELESS_EXT > 17 */
+
+	return 0;
+}
+
+static int
+rssi_to_qual(int rssi)
+{
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		return 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		return 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		return 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		return 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		return 4;
+	else
+		return 5;
+}
+
+static int
+wl_iw_set_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	int i;
+
+	WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length);
+	for (i = 0; i < iw->spy_num; i++)
+		memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+	memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
+
+	return 0;
+}
+
+static int
+wl_iw_get_spy(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num];
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	dwrq->length = iw->spy_num;
+	for (i = 0; i < iw->spy_num; i++) {
+		memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+		addr[i].sa_family = AF_UNIX;
+		memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
+		iw->spy_qual[i].updated = 0;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_set_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	int error = -EINVAL;
+
+	WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+
+	if (awrq->sa_family != ARPHRD_ETHER) {
+		WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__));
+		return -EINVAL;
+	}
+
+	/* Ignore "auto" or "off" */
+	if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+		scb_val_t scbval;
+		bzero(&scbval, sizeof(scb_val_t));
+		if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
+			WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
+		}
+		return 0;
+	}
+	/* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data),
+	 * eabuf)));
+	 */
+	/* Reassociate to the specified AP */
+	if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
+		WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error));
+		return error;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_wap(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+
+	awrq->sa_family = ARPHRD_ETHER;
+	memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+
+	/* Ignore error (may be down or disassociated) */
+	(void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_mlme(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct sockaddr *awrq,
+	char *extra
+)
+{
+	struct iw_mlme *mlme;
+	scb_val_t scbval;
+	int error  = -EINVAL;
+
+	WL_TRACE(("%s: SIOCSIWMLME\n", dev->name));
+
+	mlme = (struct iw_mlme *)extra;
+	if (mlme == NULL) {
+		WL_ERROR(("Invalid ioctl data.\n"));
+		return error;
+	}
+
+	scbval.val = mlme->reason_code;
+	bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+
+	if (mlme->cmd == IW_MLME_DISASSOC) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+	}
+	else if (mlme->cmd == IW_MLME_DEAUTH) {
+		scbval.val = htod32(scbval.val);
+		error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
+			sizeof(scb_val_t));
+	}
+	else {
+		WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__));
+		return error;
+	}
+
+	return error;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static int
+wl_iw_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int error, i;
+	uint buflen = dwrq->length;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Get scan results (too large to put on the stack) */
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+	ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			buflen));
+
+		/* Infrastructure only */
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		/* BSSID */
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+		qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+		/* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+		dwrq->length++;
+	}
+
+	kfree(list);
+
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		/* Provided qual */
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_get_aplist(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	iscan_buf_t * buf;
+	iscan_info_t *iscan = g_iscan;
+
+	struct sockaddr *addr = (struct sockaddr *) extra;
+	struct iw_quality qual[IW_MAX_AP];
+	wl_bss_info_t *bi = NULL;
+	int i;
+
+	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_get_aplist(dev, info, dwrq, extra);
+	}
+
+	buf = iscan->list_hdr;
+	/* Get scan results (too large to put on the stack) */
+	while (buf) {
+	    list = &((wl_iscan_results_t*)buf->iscan_buf)->results;
+	    ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	    bi = NULL;
+	for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			WLC_IW_ISCAN_MAXLEN));
+
+		/* Infrastructure only */
+		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
+			continue;
+
+		/* BSSID */
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		addr[dwrq->length].sa_family = ARPHRD_ETHER;
+		qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
+		qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
+		qual[dwrq->length].noise = 0x100 + bi->phy_noise;
+
+		/* Updated qual, level, and noise */
+#if WIRELESS_EXT > 18
+		qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+#else
+		qual[dwrq->length].updated = 7;
+#endif /* WIRELESS_EXT > 18 */
+
+		dwrq->length++;
+	    }
+	    buf = buf->next;
+	}
+	if (dwrq->length) {
+		memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length);
+		/* Provided qual */
+		dwrq->flags = 1;
+	}
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 13
+static int
+wl_iw_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+
+	WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+	/* default Broadcast scan */
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	/* check for given essid */
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+		}
+	}
+#endif
+	/* Ignore error (most likely scan in progress) */
+	(void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid));
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_set_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	union iwreq_data *wrqu,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	iscan_info_t *iscan = g_iscan;
+
+	WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+
+	/* use backup if our thread is not successful */
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_set_scan(dev, info, wrqu, extra);
+	}
+	if (iscan->iscan_state == ISCAN_STATE_SCANING) {
+		return 0;
+	}
+
+	/* default Broadcast scan */
+	memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+	/* check for given essid */
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			struct iw_scan_req *req = (struct iw_scan_req *)extra;
+			ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+			memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+			ssid.SSID_len = htod32(ssid.SSID_len);
+		}
+	}
+#endif
+
+	iscan->list_cur = iscan->list_hdr;
+	iscan->iscan_state = ISCAN_STATE_SCANING;
+
+
+	wl_iw_set_event_mask(dev);
+	wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+
+	iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+	add_timer(&iscan->timer);
+	iscan->timer_on = 1;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static bool
+ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPA entry? If */
+/* not update the tlvs buffer pointer/length */
+	uint8 *ie = *wpaie;
+
+	/* If the contents match the WPA_OUI and type=1 */
+	if ((ie[1] >= 6) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+		return TRUE;
+	}
+
+	/* point to the next ie */
+	ie += ie[1] + 2;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+	return FALSE;
+}
+
+static bool
+ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len)
+{
+/* Is this body of this tlvs entry a WPS entry? If */
+/* not update the tlvs buffer pointer/length */
+	uint8 *ie = *wpsie;
+
+	/* If the contents match the WPA_OUI and type=4 */
+	if ((ie[1] >= 4) &&
+		!bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+		return TRUE;
+	}
+
+	/* point to the next ie */
+	ie += ie[1] + 2;
+	/* calculate the length of the rest of the buffer */
+	*tlvs_len -= (int)(ie - *tlvs);
+	/* update the pointer to the start of the buffer */
+	*tlvs = ie;
+	return FALSE;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+#ifdef BCMWAPI_WPI
+static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
+	size_t len, int uppercase)
+{
+	size_t i;
+	char *pos = buf, *end = buf + buf_size;
+	int ret;
+	if (buf_size == 0)
+		return 0;
+	for (i = 0; i < len; i++) {
+		ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
+			data[i]);
+		if (ret < 0 || ret >= end - pos) {
+			end[-1] = '\0';
+			return pos - buf;
+		}
+		pos += ret;
+	}
+	end[-1] = '\0';
+	return pos - buf;
+}
+
+/**
+ * wpa_snprintf_hex - Print data as a hex string into a buffer
+ * @buf: Memory area to use as the output buffer
+ * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1)
+ * @data: Data to be printed
+ * @len: Length of data in bytes
+ * Returns: Number of bytes written
+ */
+static int
+wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
+{
+	return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
+}
+#endif /* BCMWAPI_WPI */
+
+static int
+wl_iw_handle_scanresults_ies(char **event_p, char *end,
+	struct iw_request_info *info, wl_bss_info_t *bi)
+{
+#if WIRELESS_EXT > 17
+	struct iw_event	iwe;
+	char *event;
+#ifdef BCMWAPI_WPI
+	char *buf;
+	int custom_event_len;
+#endif
+
+	event = *event_p;
+	if (bi->ie_length) {
+		/* look for wpa/rsn ies in the ie list... */
+		bcm_tlv_t *ie;
+		uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		int ptr_len = bi->ie_length;
+
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+		if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) {
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+		}
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			/* look for WPS IE */
+			if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		ptr_len = bi->ie_length;
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
+			if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
+				iwe.cmd = IWEVGENIE;
+				iwe.u.data.length = ie->len + 2;
+				event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+				break;
+			}
+		}
+
+#ifdef BCMWAPI_WPI
+		ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+		ptr_len = bi->ie_length;
+
+		while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
+			WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__));
+#ifdef WAPI_IE_USE_GENIE
+			iwe.cmd = IWEVGENIE;
+			iwe.u.data.length = ie->len + 2;
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+#else /* using CUSTOM event */
+			iwe.cmd = IWEVCUSTOM;
+			custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
+			iwe.u.data.length = custom_event_len;
+
+			buf = kmalloc(custom_event_len+1, GFP_KERNEL);
+			if (buf == NULL)
+			{
+				WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
+				break;
+			}
+
+			memcpy(buf, "wapi_ie=", 8);
+			wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
+			wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
+			wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
+			event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
+			kfree(buf);
+#endif /* WAPI_IE_USE_GENIE */
+			break;
+		}
+#endif /* BCMWAPI_WPI */
+	*event_p = event;
+	}
+
+#endif /* WIRELESS_EXT > 17 */
+	return 0;
+}
+static int
+wl_iw_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	channel_info_t ci;
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int error, i, j;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	uint buflen = dwrq->length;
+
+	WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Check for scan in progress */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci))))
+		return error;
+	ci.scan_channel = dtoh32(ci.scan_channel);
+	if (ci.scan_channel)
+		return -EAGAIN;
+
+	/* Get scan results (too large to put on the stack) */
+	list = kmalloc(buflen, GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+	memset(list, 0, buflen);
+	list->buflen = htod32(buflen);
+	if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) {
+		kfree(list);
+		return error;
+	}
+	list->buflen = dtoh32(list->buflen);
+	list->version = dtoh32(list->version);
+	list->count = dtoh32(list->count);
+
+	ASSERT(list->version == WL_BSS_INFO_VERSION);
+
+	for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			buflen));
+
+		/* First entry must be the BSSID */
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+		/* SSID */
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		/* Mode */
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		/* Channel */
+		iwe.cmd = SIOCGIWFREQ;
+		iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+			CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		/* Channel quality */
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+		iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		/* WPA, WPA2, WPS, WAPI IEs */
+		 wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		/* Encryption */
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		/* Rates */
+		if (bi->rateset.count) {
+			value = event + IW_EV_LCP_LEN;
+			iwe.cmd = SIOCGIWRATE;
+			/* Those two flags are ignored... */
+			iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+			for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+				iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+				value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+					IW_EV_PARAM_LEN);
+			}
+			event = value;
+		}
+	}
+
+	kfree(list);
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	/* todo */
+
+	return 0;
+}
+
+static int
+wl_iw_iscan_get_scan(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_scan_results_t *list;
+	struct iw_event	iwe;
+	wl_bss_info_t *bi = NULL;
+	int ii, j;
+	int apcnt;
+	char *event = extra, *end = extra + dwrq->length, *value;
+	iscan_info_t *iscan = g_iscan;
+	iscan_buf_t * p_buf;
+
+	WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* use backup if our thread is not successful */
+	if ((!iscan) || (iscan->sysioc_pid < 0)) {
+		return wl_iw_get_scan(dev, info, dwrq, extra);
+	}
+
+	/* Check for scan in progress */
+	if (iscan->iscan_state == ISCAN_STATE_SCANING)
+		return -EAGAIN;
+
+	apcnt = 0;
+	p_buf = iscan->list_hdr;
+	/* Get scan results */
+	while (p_buf != iscan->list_cur) {
+	    list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results;
+
+	    if (list->version != WL_BSS_INFO_VERSION) {
+		WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version));
+	    }
+
+	    bi = NULL;
+	    for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) {
+		bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info;
+		ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list +
+			WLC_IW_ISCAN_MAXLEN));
+
+		/* overflow check cover fields before wpa IEs */
+		if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+			IW_EV_QUAL_LEN >= end)
+			return -E2BIG;
+		/* First entry must be the BSSID */
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+		/* SSID */
+		iwe.u.data.length = dtoh32(bi->SSID_len);
+		iwe.cmd = SIOCGIWESSID;
+		iwe.u.data.flags = 1;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+		/* Mode */
+		if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+			iwe.cmd = SIOCGIWMODE;
+			if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+				iwe.u.mode = IW_MODE_INFRA;
+			else
+				iwe.u.mode = IW_MODE_ADHOC;
+			event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+		}
+
+		/* Channel */
+		iwe.cmd = SIOCGIWFREQ;
+		iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
+			CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+			WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+		iwe.u.freq.e = 6;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+		/* Channel quality */
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI));
+		iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI);
+		iwe.u.qual.noise = 0x100 + bi->phy_noise;
+		event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+		/* WPA, WPA2, WPS, WAPI IEs */
+		wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+		/* Encryption */
+		iwe.cmd = SIOCGIWENCODE;
+		if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+		else
+			iwe.u.data.flags = IW_ENCODE_DISABLED;
+		iwe.u.data.length = 0;
+		event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+		/* Rates */
+		if (bi->rateset.count <= sizeof(bi->rateset.rates)) {
+			if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end)
+				return -E2BIG;
+
+			value = event + IW_EV_LCP_LEN;
+			iwe.cmd = SIOCGIWRATE;
+			/* Those two flags are ignored... */
+			iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+			for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+				iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+				value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+					IW_EV_PARAM_LEN);
+			}
+			event = value;
+		}
+	    }
+	    p_buf = p_buf->next;
+	} /* while (p_buf) */
+
+	dwrq->length = event - extra;
+	dwrq->flags = 0;	/* todo */
+
+	return 0;
+}
+
+#endif /* WIRELESS_EXT > 13 */
+
+
+static int
+wl_iw_set_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+
+	WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+
+	/* default Broadcast SSID */
+	memset(&ssid, 0, sizeof(ssid));
+	if (dwrq->length && extra) {
+#if WIRELESS_EXT > 20
+		ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length);
+#else
+		ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1);
+#endif
+		memcpy(ssid.SSID, extra, ssid.SSID_len);
+		ssid.SSID_len = htod32(ssid.SSID_len);
+
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid))))
+			return error;
+	}
+	/* If essid null then it is "iwconfig <interface> essid off" command */
+	else {
+		scb_val_t scbval;
+		bzero(&scbval, sizeof(scb_val_t));
+		if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t))))
+			return error;
+	}
+	return 0;
+}
+
+static int
+wl_iw_get_essid(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wlc_ssid_t ssid;
+	int error;
+
+	WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
+		WL_ERROR(("Error getting the SSID\n"));
+		return error;
+	}
+
+	ssid.SSID_len = dtoh32(ssid.SSID_len);
+
+	/* Get the current SSID */
+	memcpy(extra, ssid.SSID, ssid.SSID_len);
+
+	dwrq->length = ssid.SSID_len;
+
+	dwrq->flags = 1; /* active */
+
+	return 0;
+}
+
+static int
+wl_iw_set_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	/* Check the size of the string */
+	if (dwrq->length > sizeof(iw->nickname))
+		return -E2BIG;
+
+	memcpy(iw->nickname, extra, dwrq->length);
+	iw->nickname[dwrq->length - 1] = '\0';
+
+	return 0;
+}
+
+static int
+wl_iw_get_nick(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_iw_t *iw = IW_DEV_IF(dev);
+	WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+
+	if (!extra)
+		return -EINVAL;
+
+	strcpy(extra, iw->nickname);
+	dwrq->length = strlen(extra) + 1;
+
+	return 0;
+}
+
+static int wl_iw_set_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	wl_rateset_t rateset;
+	int error, rate, i, error_bg, error_a;
+
+	WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+
+	/* Get current rateset */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset))))
+		return error;
+
+	rateset.count = dtoh32(rateset.count);
+
+	if (vwrq->value < 0) {
+		/* Select maximum rate */
+		rate = rateset.rates[rateset.count - 1] & 0x7f;
+	} else if (vwrq->value < rateset.count) {
+		/* Select rate by rateset index */
+		rate = rateset.rates[vwrq->value] & 0x7f;
+	} else {
+		/* Specified rate in bps */
+		rate = vwrq->value / 500000;
+	}
+
+	if (vwrq->fixed) {
+		/*
+			Set rate override,
+			Since the is a/b/g-blind, both a/bg_rate are enforced.
+		*/
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
+		error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+	} else {
+		/*
+			clear rate override
+			Since the is a/b/g-blind, both a/bg_rate are enforced.
+		*/
+		/* 0 is for clearing rate override */
+		error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
+		/* 0 is for clearing rate override */
+		error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
+
+		if (error_bg && error_a)
+			return (error_bg | error_a);
+
+		/* Remove rates above selected rate */
+		for (i = 0; i < rateset.count; i++)
+			if ((rateset.rates[i] & 0x7f) > rate)
+				break;
+		rateset.count = htod32(i);
+
+		/* Set current rateset */
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset))))
+			return error;
+	}
+
+	return 0;
+}
+
+static int wl_iw_get_rate(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rate;
+
+	WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+
+	/* Report the current tx rate */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate))))
+		return error;
+	rate = dtoh32(rate);
+	vwrq->value = rate * 500000;
+
+	return 0;
+}
+
+static int
+wl_iw_set_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+
+	if (vwrq->disabled)
+		rts = DOT11_DEFAULT_RTS_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
+		return -EINVAL;
+	else
+		rts = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_rts(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, rts;
+
+	WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts)))
+		return error;
+
+	vwrq->value = rts;
+	vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, frag;
+
+	WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+
+	if (vwrq->disabled)
+		frag = DOT11_DEFAULT_FRAG_LEN;
+	else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
+		return -EINVAL;
+	else
+		frag = vwrq->value;
+
+	if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag)))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_frag(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, fragthreshold;
+
+	WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+
+	if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold)))
+		return error;
+
+	vwrq->value = fragthreshold;
+	vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
+	vwrq->fixed = 1;
+
+	return 0;
+}
+
+static int
+wl_iw_set_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable;
+	uint16 txpwrmw;
+	WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+
+	/* Make sure radio is off or on as far as software is concerned */
+	disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
+	disable += WL_RADIO_SW_DISABLE << 16;
+
+	disable = htod32(disable);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable))))
+		return error;
+
+	/* If Radio is off, nothing more to do */
+	if (disable & WL_RADIO_SW_DISABLE)
+		return 0;
+
+	/* Only handle mW */
+	if (!(vwrq->flags & IW_TXPOW_MWATT))
+		return -EINVAL;
+
+	/* Value < 0 means just "on" or "off" */
+	if (vwrq->value < 0)
+		return 0;
+
+	if (vwrq->value > 0xffff) txpwrmw = 0xffff;
+	else txpwrmw = (uint16)vwrq->value;
+
+
+	error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
+	return error;
+}
+
+static int
+wl_iw_get_txpow(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, disable, txpwrdbm;
+	uint8 result;
+
+	WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) ||
+	    (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm)))
+		return error;
+
+	disable = dtoh32(disable);
+	result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE);
+	vwrq->value = (int32)bcm_qdbm_to_mw(result);
+	vwrq->fixed = 0;
+	vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
+	vwrq->flags = IW_TXPOW_MWATT;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 10
+static int
+wl_iw_set_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+
+	/* Do not handle "off" or "lifetime" */
+	if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
+		return -EINVAL;
+
+	/* Handle "[min|max] limit" */
+	if (vwrq->flags & IW_RETRY_LIMIT) {
+		/* "max limit" or just "limit" */
+#if WIRELESS_EXT > 20
+		if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) ||
+			!((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) {
+#else
+		if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) {
+#endif /* WIRELESS_EXT > 20 */
+
+			lrl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl))))
+				return error;
+		}
+		/* "min limit" or just "limit" */
+#if WIRELESS_EXT > 20
+		if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) ||
+			!((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) {
+#else
+		if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) {
+#endif /* WIRELESS_EXT > 20 */
+
+			srl = htod32(vwrq->value);
+			if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl))))
+				return error;
+		}
+	}
+
+	return 0;
+}
+
+static int
+wl_iw_get_retry(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, lrl, srl;
+
+	WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+
+	vwrq->disabled = 0;      /* Can't be disabled */
+
+	/* Do not handle lifetime queries */
+	if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+		return -EINVAL;
+
+	/* Get retry limits */
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl))))
+		return error;
+
+	lrl = dtoh32(lrl);
+	srl = dtoh32(srl);
+
+	/* Note : by default, display the min retry number */
+	if (vwrq->flags & IW_RETRY_MAX) {
+		vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+		vwrq->value = lrl;
+	} else {
+		vwrq->flags = IW_RETRY_LIMIT;
+		vwrq->value = srl;
+		if (srl != lrl)
+			vwrq->flags |= IW_RETRY_MIN;
+	}
+
+	return 0;
+}
+#endif /* WIRELESS_EXT > 10 */
+
+static int
+wl_iw_set_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec;
+
+	WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+
+	memset(&key, 0, sizeof(key));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		/* Find the current key */
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = htod32(key.index);
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+		/* Default to 0 */
+		if (key.index == DOT11_MAX_DEFAULT_KEYS)
+			key.index = 0;
+	} else {
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+		if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+			return -EINVAL;
+	}
+
+	/* Interpret "off" to mean no encryption */
+	wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
+
+	if ((error = dev_wlc_intvar_set(dev, "wsec", wsec)))
+		return error;
+
+	/* Old API used to pass a NULL pointer instead of IW_ENCODE_NOKEY */
+	if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
+		/* Just select a new current key */
+		val = htod32(key.index);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val))))
+			return error;
+	} else {
+		key.len = dwrq->length;
+
+		if (dwrq->length > sizeof(key.data))
+			return -EINVAL;
+
+		memcpy(key.data, extra, dwrq->length);
+
+		key.flags = WL_PRIMARY_KEY;
+		switch (key.len) {
+		case WEP1_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP1;
+			break;
+		case WEP128_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_WEP128;
+			break;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+		case TKIP_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_TKIP;
+			break;
+#endif
+		case AES_KEY_SIZE:
+			key.algo = CRYPTO_ALGO_AES_CCM;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		/* Set the new key/index */
+		swap_key_from_BE(&key);
+		if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key))))
+			return error;
+	}
+
+	/* Interpret "restricted" to mean shared key authentication */
+	val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
+	val = htod32(val);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_encode(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error, val, wsec, auth;
+
+	WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+
+	/* assure default values of zero for things we don't touch */
+	bzero(&key, sizeof(wl_wsec_key_t));
+
+	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
+		/* Find the current key */
+		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) {
+			val = key.index;
+			if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val))))
+				return error;
+			val = dtoh32(val);
+			if (val)
+				break;
+		}
+	} else
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	if (key.index >= DOT11_MAX_DEFAULT_KEYS)
+		key.index = 0;
+
+	/* Get info */
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) ||
+	    (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth))))
+		return error;
+
+	swap_key_to_BE(&key);
+
+	wsec = dtoh32(wsec);
+	auth = dtoh32(auth);
+	/* Get key length */
+	dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len);
+
+	/* Get flags */
+	dwrq->flags = key.index + 1;
+	if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) {
+		/* Interpret "off" to mean no encryption */
+		dwrq->flags |= IW_ENCODE_DISABLED;
+	}
+	if (auth) {
+		/* Interpret "restricted" to mean shared key authentication */
+		dwrq->flags |= IW_ENCODE_RESTRICTED;
+	}
+
+	/* Get key */
+	if (dwrq->length && extra)
+		memcpy(extra, key.data, dwrq->length);
+
+	return 0;
+}
+
+static int
+wl_iw_set_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+
+	pm = vwrq->disabled ? PM_OFF : PM_MAX;
+
+	pm = htod32(pm);
+	if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm))))
+		return error;
+
+	return 0;
+}
+
+static int
+wl_iw_get_power(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error, pm;
+
+	WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+
+	if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm))))
+		return error;
+
+	pm = dtoh32(pm);
+	vwrq->disabled = pm ? 0 : 1;
+	vwrq->flags = IW_POWER_ALL_R;
+
+	return 0;
+}
+
+#if WIRELESS_EXT > 17
+static int
+wl_iw_set_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+#if defined(BCMWAPI_WPI)
+	uchar buf[WLC_IOCTL_SMLEN] = {0};
+	uchar *p = buf;
+	int wapi_ie_size;
+
+	WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+
+	if (extra[0] == DOT11_MNG_WAPI_ID)
+	{
+		wapi_ie_size = iwp->length;
+		memcpy(p, extra, iwp->length);
+		dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
+	}
+	else
+#endif
+		dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
+
+	return 0;
+}
+
+static int
+wl_iw_get_wpaie(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *iwp,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+	iwp->length = 64;
+	dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
+	return 0;
+}
+
+static int
+wl_iw_set_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *dwrq,
+	char *extra
+)
+{
+	wl_wsec_key_t key;
+	int error;
+	struct iw_encode_ext *iwe;
+
+	WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+
+	memset(&key, 0, sizeof(key));
+	iwe = (struct iw_encode_ext *)extra;
+
+	/* disable encryption completely  */
+	if (dwrq->flags & IW_ENCODE_DISABLED) {
+
+	}
+
+	/* get the key index */
+	key.index = 0;
+	if (dwrq->flags & IW_ENCODE_INDEX)
+		key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+	key.len = iwe->key_len;
+
+	/* Instead of bcast for ea address for default wep keys, driver needs it to be Null */
+	if (!ETHER_ISMULTI(iwe->addr.sa_data))
+		bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN);
+
+	/* check for key index change */
+	if (key.len == 0) {
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("Changing the the primary Key to %d\n", key.index));
+			/* change the key index .... */
+			key.index = htod32(key.index);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
+				&key.index, sizeof(key.index));
+			if (error)
+				return error;
+		}
+		/* key delete */
+		else {
+			swap_key_from_BE(&key);
+			error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+			if (error)
+				return error;
+		}
+	}
+	/* This case is used to allow an external 802.1x supplicant
+	 * to pass the PMK to the in-driver supplicant for use in
+	 * the 4-way handshake.
+	 */
+	else if (iwe->alg == IW_ENCODE_ALG_PMK) {
+		int j;
+		wsec_pmk_t pmk;
+		char keystring[WSEC_MAX_PSK_LEN + 1];
+		char* charptr = keystring;
+		uint len;
+
+		/* copy the raw hex key to the appropriate format */
+		for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
+			sprintf(charptr, "%02x", iwe->key[j]);
+			charptr += 2;
+		}
+		len = strlen(keystring);
+		pmk.key_len = htod16(len);
+		bcopy(keystring, pmk.key, len);
+		pmk.flags = htod16(WSEC_PASSPHRASE);
+
+		error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
+		if (error)
+			return error;
+	}
+
+	else {
+		if (iwe->key_len > sizeof(key.data))
+			return -EINVAL;
+
+		WL_WSEC(("Setting the key index %d\n", key.index));
+		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+			WL_WSEC(("key is a Primary Key\n"));
+			key.flags = WL_PRIMARY_KEY;
+		}
+
+		bcopy((void *)iwe->key, key.data, iwe->key_len);
+
+		if (iwe->alg == IW_ENCODE_ALG_TKIP) {
+			uint8 keybuf[8];
+			bcopy(&key.data[24], keybuf, sizeof(keybuf));
+			bcopy(&key.data[16], &key.data[24], sizeof(keybuf));
+			bcopy(keybuf, &key.data[16], sizeof(keybuf));
+		}
+
+		/* rx iv */
+		if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+			uchar *ivptr;
+			ivptr = (uchar *)iwe->rx_seq;
+			key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
+				(ivptr[3] << 8) | ivptr[2];
+			key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
+			key.iv_initialized = TRUE;
+		}
+
+		switch (iwe->alg) {
+			case IW_ENCODE_ALG_NONE:
+				key.algo = CRYPTO_ALGO_OFF;
+				break;
+			case IW_ENCODE_ALG_WEP:
+				if (iwe->key_len == WEP1_KEY_SIZE)
+					key.algo = CRYPTO_ALGO_WEP1;
+				else
+					key.algo = CRYPTO_ALGO_WEP128;
+				break;
+			case IW_ENCODE_ALG_TKIP:
+				key.algo = CRYPTO_ALGO_TKIP;
+				break;
+			case IW_ENCODE_ALG_CCMP:
+				key.algo = CRYPTO_ALGO_AES_CCM;
+				break;
+#ifdef BCMWAPI_WPI
+			case IW_ENCODE_ALG_SM4:
+				key.algo = CRYPTO_ALGO_SMS4;
+				if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+					key.flags &= ~WL_PRIMARY_KEY;
+				}
+				break;
+#endif
+			default:
+				break;
+		}
+		swap_key_from_BE(&key);
+
+		dhd_wait_pend8021x(dev);
+
+		error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+		if (error)
+			return error;
+	}
+	return 0;
+}
+
+
+#if WIRELESS_EXT > 17
+struct {
+	pmkid_list_t pmkids;
+	pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
+static int
+wl_iw_set_pmksa(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	struct iw_pmksa *iwpmksa;
+	uint i;
+	char eabuf[ETHER_ADDR_STR_LEN];
+	pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid;
+
+	WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name));
+	iwpmksa = (struct iw_pmksa *)extra;
+	bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
+	if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
+		WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
+		bzero((char *)&pmkid_list, sizeof(pmkid_list));
+	}
+	if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
+		pmkid_list_t pmkid, *pmkidptr;
+		pmkidptr = &pmkid;
+		bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+		bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN);
+		{
+			uint j;
+			WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
+			WL_TRACE(("\n"));
+		}
+		for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
+			if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
+				ETHER_ADDR_LEN))
+				break;
+		for (; i < pmkid_list.pmkids.npmkid; i++) {
+			bcopy(&pmkid_array[i+1].BSSID,
+				&pmkid_array[i].BSSID,
+				ETHER_ADDR_LEN);
+			bcopy(&pmkid_array[i+1].PMKID,
+				&pmkid_array[i].PMKID,
+				WPA2_PMKID_LEN);
+		}
+		pmkid_list.pmkids.npmkid--;
+	}
+	if (iwpmksa->cmd == IW_PMKSA_ADD) {
+		bcopy(&iwpmksa->bssid.sa_data[0],
+			&pmkid_array[pmkid_list.pmkids.npmkid].BSSID,
+			ETHER_ADDR_LEN);
+		bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID,
+			WPA2_PMKID_LEN);
+		{
+			uint j;
+			uint k;
+			k = pmkid_list.pmkids.npmkid;
+			BCM_REFERENCE(k);
+			WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
+				bcm_ether_ntoa(&pmkid_array[k].BSSID,
+				eabuf)));
+			for (j = 0; j < WPA2_PMKID_LEN; j++)
+				WL_TRACE(("%02x ", pmkid_array[k].PMKID[j]));
+			WL_TRACE(("\n"));
+		}
+		pmkid_list.pmkids.npmkid++;
+	}
+	WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid));
+	for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
+		uint j;
+		WL_TRACE(("PMKID[%d]: %s = ", i,
+			bcm_ether_ntoa(&pmkid_array[i].BSSID,
+			eabuf)));
+		for (j = 0; j < WPA2_PMKID_LEN; j++)
+			WL_TRACE(("%02x ", pmkid_array[i].PMKID[j]));
+		printf("\n");
+	}
+	WL_TRACE(("\n"));
+	dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
+	return 0;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static int
+wl_iw_get_encodeext(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+	return 0;
+}
+
+static int
+wl_iw_set_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error = 0;
+	int paramid;
+	int paramval;
+	uint32 cipher_combined;
+	int val = 0;
+	wl_iw_t *iw = IW_DEV_IF(dev);
+
+	WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+	paramval = vwrq->value;
+
+	WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+		dev->name, paramid, paramval));
+
+	switch (paramid) {
+
+	case IW_AUTH_WPA_VERSION:
+		/* supported wpa version disabled or wpa or wpa2 */
+		if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
+			val = WPA_AUTH_DISABLED;
+		else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
+			val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
+		else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
+			val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+#ifdef BCMWAPI_WPI
+		else if (paramval & IW_AUTH_WAPI_VERSION_1)
+			val = WAPI_AUTH_UNSPECIFIED;
+#endif
+		WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP: {
+		int fbt_cap = 0;
+
+		if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
+			iw->pwsec = paramval;
+		}
+		else {
+			iw->gwsec = paramval;
+		}
+
+		if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+			return error;
+
+		cipher_combined = iw->gwsec | iw->pwsec;
+		val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
+		if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
+			val |= WEP_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_TKIP)
+			val |= TKIP_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_CCMP)
+			val |= AES_ENABLED;
+#ifdef BCMWAPI_WPI
+		val &= ~SMS4_ENABLED;
+		if (cipher_combined & IW_AUTH_CIPHER_SMS4)
+			val |= SMS4_ENABLED;
+#endif
+
+		if (iw->privacy_invoked && !val) {
+			WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+			         "we're a WPS enrollee\n", dev->name, __FUNCTION__));
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+				WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else if (val) {
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		}
+
+		if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+			return error;
+
+		/* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
+		 * handshake.
+		 */
+		if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+			if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
+				if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) {
+					if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1)))
+						return error;
+				}
+				else if (val == 0) {
+					if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0)))
+						return error;
+				}
+			}
+		}
+		break;
+	}
+
+	case IW_AUTH_KEY_MGMT:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+
+		if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+				val = WPA_AUTH_PSK;
+			else
+				val = WPA_AUTH_UNSPECIFIED;
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+				val |= WPA2_AUTH_FT;
+		}
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
+				val = WPA2_AUTH_PSK;
+			else
+				val = WPA2_AUTH_UNSPECIFIED;
+			if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
+				val |= WPA2_AUTH_FT;
+		}
+#ifdef BCMWAPI_WPI
+		if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
+			val = WAPI_AUTH_UNSPECIFIED;
+#endif
+		WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+		if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		/* open shared */
+		WL_ERROR(("Setting the D11auth %d\n", paramval));
+		if (paramval & IW_AUTH_ALG_OPEN_SYSTEM)
+			val = 0;
+		else if (paramval & IW_AUTH_ALG_SHARED_KEY)
+			val = 1;
+		else
+			error = 1;
+		if (!error && (error = dev_wlc_intvar_set(dev, "auth", val)))
+			return error;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		if (paramval == 0) {
+			val = 0;
+			WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
+			error = dev_wlc_intvar_set(dev, "wpa_auth", val);
+			return error;
+		}
+		else {
+			/* If WPA is enabled, wpa_auth is set elsewhere */
+		}
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+#if WIRELESS_EXT > 17
+
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		/* driver control or user space app control */
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED: {
+		int wsec;
+
+		if (paramval == 0) {
+			iw->privacy_invoked = FALSE;
+			if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+				WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+				return error;
+			}
+		} else {
+			iw->privacy_invoked = TRUE;
+			if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec)))
+				return error;
+
+			if (!WSEC_ENABLED(wsec)) {
+				/* if privacy is true, but wsec is false, we are a WPS enrollee */
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
+					WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			} else {
+				if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) {
+					WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n"));
+					return error;
+				}
+			}
+		}
+		break;
+	}
+
+
+#endif /* WIRELESS_EXT > 17 */
+
+#ifdef BCMWAPI_WPI
+
+	case IW_AUTH_WAPI_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+			return error;
+		if (paramval) {
+			val |= SMS4_ENABLED;
+			if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+				WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n",
+					__FUNCTION__, val, error));
+				return error;
+			}
+			if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) {
+				WL_ERROR(("%s: setting wpa_auth(%d) returned %d\n",
+					__FUNCTION__, WAPI_AUTH_UNSPECIFIED,
+					error));
+				return error;
+			}
+		}
+
+		break;
+
+#endif /* BCMWAPI_WPI */
+
+	default:
+		break;
+	}
+	return 0;
+}
+#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
+
+static int
+wl_iw_get_wpaauth(
+	struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_param *vwrq,
+	char *extra
+)
+{
+	int error;
+	int paramid;
+	int paramval = 0;
+	int val;
+	wl_iw_t *iw = IW_DEV_IF(dev);
+
+	WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+
+	paramid = vwrq->flags & IW_AUTH_INDEX;
+
+	switch (paramid) {
+	case IW_AUTH_WPA_VERSION:
+		/* supported wpa version disabled or wpa or wpa2 */
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
+			paramval = IW_AUTH_WPA_VERSION_DISABLED;
+		else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA;
+		else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
+			paramval = IW_AUTH_WPA_VERSION_WPA2;
+		break;
+
+	case IW_AUTH_CIPHER_PAIRWISE:
+		paramval = iw->pwsec;
+		break;
+
+	case IW_AUTH_CIPHER_GROUP:
+		paramval = iw->gwsec;
+		break;
+
+	case IW_AUTH_KEY_MGMT:
+		/* psk, 1x */
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (VAL_PSK(val))
+			paramval = IW_AUTH_KEY_MGMT_PSK;
+		else
+			paramval = IW_AUTH_KEY_MGMT_802_1X;
+
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)&paramval, 1);
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		/* open, shared, leap */
+		if ((error = dev_wlc_intvar_get(dev, "auth", &val)))
+			return error;
+		if (!val)
+			paramval = IW_AUTH_ALG_OPEN_SYSTEM;
+		else
+			paramval = IW_AUTH_ALG_SHARED_KEY;
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+			return error;
+		if (val)
+			paramval = TRUE;
+		else
+			paramval = FALSE;
+		break;
+
+#if WIRELESS_EXT > 17
+
+	case IW_AUTH_ROAMING_CONTROL:
+		WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
+		/* driver control or user space app control */
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED:
+		paramval = iw->privacy_invoked;
+		break;
+
+#endif /* WIRELESS_EXT > 17 */
+	}
+	vwrq->value = paramval;
+	return 0;
+}
+#endif /* WIRELESS_EXT > 17 */
+
+static const iw_handler wl_iw_handler[] =
+{
+	(iw_handler) wl_iw_config_commit,	/* SIOCSIWCOMMIT */
+	(iw_handler) wl_iw_get_name,		/* SIOCGIWNAME */
+	(iw_handler) NULL,			/* SIOCSIWNWID */
+	(iw_handler) NULL,			/* SIOCGIWNWID */
+	(iw_handler) wl_iw_set_freq,		/* SIOCSIWFREQ */
+	(iw_handler) wl_iw_get_freq,		/* SIOCGIWFREQ */
+	(iw_handler) wl_iw_set_mode,		/* SIOCSIWMODE */
+	(iw_handler) wl_iw_get_mode,		/* SIOCGIWMODE */
+	(iw_handler) NULL,			/* SIOCSIWSENS */
+	(iw_handler) NULL,			/* SIOCGIWSENS */
+	(iw_handler) NULL,			/* SIOCSIWRANGE */
+	(iw_handler) wl_iw_get_range,		/* SIOCGIWRANGE */
+	(iw_handler) NULL,			/* SIOCSIWPRIV */
+	(iw_handler) NULL,			/* SIOCGIWPRIV */
+	(iw_handler) NULL,			/* SIOCSIWSTATS */
+	(iw_handler) NULL,			/* SIOCGIWSTATS */
+	(iw_handler) wl_iw_set_spy,		/* SIOCSIWSPY */
+	(iw_handler) wl_iw_get_spy,		/* SIOCGIWSPY */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_wap,		/* SIOCSIWAP */
+	(iw_handler) wl_iw_get_wap,		/* SIOCGIWAP */
+#if WIRELESS_EXT > 17
+	(iw_handler) wl_iw_mlme,		/* SIOCSIWMLME */
+#else
+	(iw_handler) NULL,			/* -- hole -- */
+#endif
+	(iw_handler) wl_iw_iscan_get_aplist,	/* SIOCGIWAPLIST */
+#if WIRELESS_EXT > 13
+	(iw_handler) wl_iw_iscan_set_scan,	/* SIOCSIWSCAN */
+	(iw_handler) wl_iw_iscan_get_scan,	/* SIOCGIWSCAN */
+#else	/* WIRELESS_EXT > 13 */
+	(iw_handler) NULL,			/* SIOCSIWSCAN */
+	(iw_handler) NULL,			/* SIOCGIWSCAN */
+#endif	/* WIRELESS_EXT > 13 */
+	(iw_handler) wl_iw_set_essid,		/* SIOCSIWESSID */
+	(iw_handler) wl_iw_get_essid,		/* SIOCGIWESSID */
+	(iw_handler) wl_iw_set_nick,		/* SIOCSIWNICKN */
+	(iw_handler) wl_iw_get_nick,		/* SIOCGIWNICKN */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_rate,		/* SIOCSIWRATE */
+	(iw_handler) wl_iw_get_rate,		/* SIOCGIWRATE */
+	(iw_handler) wl_iw_set_rts,		/* SIOCSIWRTS */
+	(iw_handler) wl_iw_get_rts,		/* SIOCGIWRTS */
+	(iw_handler) wl_iw_set_frag,		/* SIOCSIWFRAG */
+	(iw_handler) wl_iw_get_frag,		/* SIOCGIWFRAG */
+	(iw_handler) wl_iw_set_txpow,		/* SIOCSIWTXPOW */
+	(iw_handler) wl_iw_get_txpow,		/* SIOCGIWTXPOW */
+#if WIRELESS_EXT > 10
+	(iw_handler) wl_iw_set_retry,		/* SIOCSIWRETRY */
+	(iw_handler) wl_iw_get_retry,		/* SIOCGIWRETRY */
+#endif /* WIRELESS_EXT > 10 */
+	(iw_handler) wl_iw_set_encode,		/* SIOCSIWENCODE */
+	(iw_handler) wl_iw_get_encode,		/* SIOCGIWENCODE */
+	(iw_handler) wl_iw_set_power,		/* SIOCSIWPOWER */
+	(iw_handler) wl_iw_get_power,		/* SIOCGIWPOWER */
+#if WIRELESS_EXT > 17
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) NULL,			/* -- hole -- */
+	(iw_handler) wl_iw_set_wpaie,		/* SIOCSIWGENIE */
+	(iw_handler) wl_iw_get_wpaie,		/* SIOCGIWGENIE */
+	(iw_handler) wl_iw_set_wpaauth,		/* SIOCSIWAUTH */
+	(iw_handler) wl_iw_get_wpaauth,		/* SIOCGIWAUTH */
+	(iw_handler) wl_iw_set_encodeext,	/* SIOCSIWENCODEEXT */
+	(iw_handler) wl_iw_get_encodeext,	/* SIOCGIWENCODEEXT */
+	(iw_handler) wl_iw_set_pmksa,		/* SIOCSIWPMKSA */
+#endif /* WIRELESS_EXT > 17 */
+};
+
+#if WIRELESS_EXT > 12
+enum {
+	WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV,
+	WL_IW_SET_VLANMODE,
+	WL_IW_SET_PM,
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+	WL_IW_SET_LAST
+};
+
+static iw_handler wl_iw_priv_handler[] = {
+	wl_iw_set_leddc,
+	wl_iw_set_vlanmode,
+	wl_iw_set_pm,
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+	NULL
+};
+
+static struct iw_priv_args wl_iw_priv_args[] = {
+	{
+		WL_IW_SET_LEDDC,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_leddc"
+	},
+	{
+		WL_IW_SET_VLANMODE,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_vlanmode"
+	},
+	{
+		WL_IW_SET_PM,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		0,
+		"set_pm"
+	},
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
+	{ 0, 0, 0, { 0 } }
+};
+
+const struct iw_handler_def wl_iw_handler_def =
+{
+	.num_standard = ARRAYSIZE(wl_iw_handler),
+	.num_private = ARRAY_SIZE(wl_iw_priv_handler),
+	.num_private_args = ARRAY_SIZE(wl_iw_priv_args),
+	.standard = (iw_handler *) wl_iw_handler,
+	.private = wl_iw_priv_handler,
+	.private_args = wl_iw_priv_args,
+#if WIRELESS_EXT >= 19
+	get_wireless_stats: dhd_get_wireless_stats,
+#endif /* WIRELESS_EXT >= 19 */
+	};
+#endif /* WIRELESS_EXT > 12 */
+
+int
+wl_iw_ioctl(
+	struct net_device *dev,
+	struct ifreq *rq,
+	int cmd
+)
+{
+	struct iwreq *wrq = (struct iwreq *) rq;
+	struct iw_request_info info;
+	iw_handler handler;
+	char *extra = NULL;
+	size_t token_size = 1;
+	int max_tokens = 0, ret = 0;
+
+	if (cmd < SIOCIWFIRST ||
+		IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
+		!(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)]))
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+
+	case SIOCSIWESSID:
+	case SIOCGIWESSID:
+	case SIOCSIWNICKN:
+	case SIOCGIWNICKN:
+		max_tokens = IW_ESSID_MAX_SIZE + 1;
+		break;
+
+	case SIOCSIWENCODE:
+	case SIOCGIWENCODE:
+#if WIRELESS_EXT > 17
+	case SIOCSIWENCODEEXT:
+	case SIOCGIWENCODEEXT:
+#endif
+		max_tokens = IW_ENCODING_TOKEN_MAX;
+		break;
+
+	case SIOCGIWRANGE:
+		max_tokens = sizeof(struct iw_range);
+		break;
+
+	case SIOCGIWAPLIST:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_AP;
+		break;
+
+#if WIRELESS_EXT > 13
+	case SIOCGIWSCAN:
+	if (g_iscan)
+		max_tokens = wrq->u.data.length;
+	else
+		max_tokens = IW_SCAN_MAX_DATA;
+		break;
+#endif /* WIRELESS_EXT > 13 */
+
+	case SIOCSIWSPY:
+		token_size = sizeof(struct sockaddr);
+		max_tokens = IW_MAX_SPY;
+		break;
+
+	case SIOCGIWSPY:
+		token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality);
+		max_tokens = IW_MAX_SPY;
+		break;
+	default:
+		break;
+	}
+
+	if (max_tokens && wrq->u.data.pointer) {
+		if (wrq->u.data.length > max_tokens)
+			return -E2BIG;
+
+		if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL)))
+			return -ENOMEM;
+
+		if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+	}
+
+	info.cmd = cmd;
+	info.flags = 0;
+
+	ret = handler(dev, &info, &wrq->u, extra);
+
+	if (extra) {
+		if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) {
+			kfree(extra);
+			return -EFAULT;
+		}
+
+		kfree(extra);
+	}
+
+	return ret;
+}
+
+/* Convert a connection status event into a connection status string.
+ * Returns TRUE if a matching connection status string was found.
+ */
+bool
+wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason,
+	char* stringBuf, uint buflen)
+{
+	typedef struct conn_fail_event_map_t {
+		uint32 inEvent;			/* input: event type to match */
+		uint32 inStatus;		/* input: event status code to match */
+		uint32 inReason;		/* input: event reason code to match */
+		const char* outName;	/* output: failure type */
+		const char* outCause;	/* output: failure cause */
+	} conn_fail_event_map_t;
+
+	/* Map of WLC_E events to connection failure strings */
+#	define WL_IW_DONT_CARE	9999
+	const conn_fail_event_map_t event_map [] = {
+		/* inEvent           inStatus                inReason         */
+		/* outName outCause                                           */
+		{WLC_E_SET_SSID,     WLC_E_STATUS_SUCCESS,   WL_IW_DONT_CARE,
+		"Conn", "Success"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
+		"Conn", "NoNetworks"},
+		{WLC_E_SET_SSID,     WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ConfigMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_PRUNE_ENCR_MISMATCH,
+		"Conn", "EncrypMismatch"},
+		{WLC_E_PRUNE,        WL_IW_DONT_CARE,        WLC_E_RSN_MISMATCH,
+		"Conn", "RsnMismatch"},
+		{WLC_E_AUTH,         WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "AuthTimeout"},
+		{WLC_E_AUTH,         WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "AuthFail"},
+		{WLC_E_AUTH,         WLC_E_STATUS_NO_ACK,    WL_IW_DONT_CARE,
+		"Conn", "AuthNoAck"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_FAIL,      WL_IW_DONT_CARE,
+		"Conn", "ReassocFail"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_TIMEOUT,   WL_IW_DONT_CARE,
+		"Conn", "ReassocTimeout"},
+		{WLC_E_REASSOC,      WLC_E_STATUS_ABORT,     WL_IW_DONT_CARE,
+		"Conn", "ReassocAbort"},
+		{WLC_E_PSK_SUP,      WLC_SUP_KEYED,          WL_IW_DONT_CARE,
+		"Sup", "ConnSuccess"},
+		{WLC_E_PSK_SUP,      WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Sup", "WpaHandshakeFail"},
+		{WLC_E_DEAUTH_IND,   WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Deauth"},
+		{WLC_E_DISASSOC_IND, WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "DisassocInd"},
+		{WLC_E_DISASSOC,     WL_IW_DONT_CARE,        WL_IW_DONT_CARE,
+		"Conn", "Disassoc"}
+	};
+
+	const char* name = "";
+	const char* cause = NULL;
+	int i;
+
+	/* Search the event map table for a matching event */
+	for (i = 0;  i < sizeof(event_map)/sizeof(event_map[0]);  i++) {
+		const conn_fail_event_map_t* row = &event_map[i];
+		if (row->inEvent == event_type &&
+		    (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+		    (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+			name = row->outName;
+			cause = row->outCause;
+			break;
+		}
+	}
+
+	/* If found, generate a connection failure string and return TRUE */
+	if (cause) {
+		memset(stringBuf, 0, buflen);
+		snprintf(stringBuf, buflen, "%s %s %02d %02d",
+			name, cause, status, reason);
+		WL_TRACE(("Connection status: %s\n", stringBuf));
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
+#if (WIRELESS_EXT > 14)
+/* Check if we have received an event that indicates connection failure
+ * If so, generate a connection failure report string.
+ * The caller supplies a buffer to hold the generated string.
+ */
+static bool
+wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen)
+{
+	uint32 event = ntoh32(e->event_type);
+	uint32 status =  ntoh32(e->status);
+	uint32 reason =  ntoh32(e->reason);
+
+	if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
+		return TRUE;
+	} else
+	{
+		return FALSE;
+	}
+}
+#endif /* WIRELESS_EXT > 14 */
+
+#ifndef IW_CUSTOM_MAX
+#define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */
+#endif /* IW_CUSTOM_MAX */
+
+void
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
+{
+#if WIRELESS_EXT > 13
+	union iwreq_data wrqu;
+	char extra[IW_CUSTOM_MAX + 1];
+	int cmd = 0;
+	uint32 event_type = ntoh32(e->event_type);
+	uint16 flags =  ntoh16(e->flags);
+	uint32 datalen = ntoh32(e->datalen);
+	uint32 status =  ntoh32(e->status);
+
+	memset(&wrqu, 0, sizeof(wrqu));
+	memset(extra, 0, sizeof(extra));
+
+	memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+	wrqu.addr.sa_family = ARPHRD_ETHER;
+
+	switch (event_type) {
+	case WLC_E_TXFAIL:
+		cmd = IWEVTXDROP;
+		break;
+#if WIRELESS_EXT > 14
+	case WLC_E_JOIN:
+	case WLC_E_ASSOC_IND:
+	case WLC_E_REASSOC_IND:
+		cmd = IWEVREGISTERED;
+		break;
+	case WLC_E_DEAUTH_IND:
+	case WLC_E_DISASSOC_IND:
+		cmd = SIOCGIWAP;
+		wrqu.data.length = strlen(extra);
+		bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+		bzero(&extra, ETHER_ADDR_LEN);
+		break;
+
+	case WLC_E_LINK:
+	case WLC_E_NDIS_LINK:
+		cmd = SIOCGIWAP;
+		wrqu.data.length = strlen(extra);
+		if (!(flags & WLC_EVENT_MSG_LINK)) {
+			bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+			bzero(&extra, ETHER_ADDR_LEN);
+		}
+		break;
+	case WLC_E_ACTION_FRAME:
+		cmd = IWEVCUSTOM;
+		if (datalen + 1 <= sizeof(extra)) {
+			wrqu.data.length = datalen + 1;
+			extra[0] = WLC_E_ACTION_FRAME;
+			memcpy(&extra[1], data, datalen);
+			WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length));
+		}
+		break;
+
+	case WLC_E_ACTION_FRAME_COMPLETE:
+		cmd = IWEVCUSTOM;
+		if (sizeof(status) + 1 <= sizeof(extra)) {
+			wrqu.data.length = sizeof(status) + 1;
+			extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
+			memcpy(&extra[1], &status, sizeof(status));
+			WL_TRACE(("wl_iw_event status %d  \n", status));
+		}
+		break;
+#endif /* WIRELESS_EXT > 14 */
+#if WIRELESS_EXT > 17
+	case WLC_E_MIC_ERROR: {
+		struct	iw_michaelmicfailure  *micerrevt = (struct  iw_michaelmicfailure  *)&extra;
+		cmd = IWEVMICHAELMICFAILURE;
+		wrqu.data.length = sizeof(struct iw_michaelmicfailure);
+		if (flags & WLC_EVENT_MSG_GROUP)
+			micerrevt->flags |= IW_MICFAILURE_GROUP;
+		else
+			micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
+		memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		micerrevt->src_addr.sa_family = ARPHRD_ETHER;
+
+		break;
+	}
+
+	case WLC_E_ASSOC_REQ_IE:
+		cmd = IWEVASSOCREQIE;
+		wrqu.data.length = datalen;
+		if (datalen < sizeof(extra))
+			memcpy(extra, data, datalen);
+		break;
+
+	case WLC_E_ASSOC_RESP_IE:
+		cmd = IWEVASSOCRESPIE;
+		wrqu.data.length = datalen;
+		if (datalen < sizeof(extra))
+			memcpy(extra, data, datalen);
+		break;
+
+	case WLC_E_PMKID_CACHE: {
+		struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra;
+		pmkid_cand_list_t *pmkcandlist;
+		pmkid_cand_t	*pmkidcand;
+		int count;
+
+		if (data == NULL)
+			break;
+
+		cmd = IWEVPMKIDCAND;
+		pmkcandlist = data;
+		count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand);
+		wrqu.data.length = sizeof(struct iw_pmkid_cand);
+		pmkidcand = pmkcandlist->pmkid_cand;
+		while (count) {
+			bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand));
+			if (pmkidcand->preauth)
+				iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH;
+			bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data,
+			      ETHER_ADDR_LEN);
+			wireless_send_event(dev, cmd, &wrqu, extra);
+			pmkidcand++;
+			count--;
+		}
+		break;
+	}
+#endif /* WIRELESS_EXT > 17 */
+
+	case WLC_E_SCAN_COMPLETE:
+#if WIRELESS_EXT > 14
+		cmd = SIOCGIWSCAN;
+#endif
+		WL_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
+		if ((g_iscan) && (g_iscan->sysioc_pid >= 0) &&
+			(g_iscan->iscan_state != ISCAN_STATE_IDLE))
+			up(&g_iscan->sysioc_sem);
+		break;
+
+	default:
+		/* Cannot translate event */
+		break;
+	}
+
+	if (cmd) {
+		if (cmd == SIOCGIWSCAN)
+			wireless_send_event(dev, cmd, &wrqu, NULL);
+		else
+			wireless_send_event(dev, cmd, &wrqu, extra);
+	}
+
+#if WIRELESS_EXT > 14
+	/* Look for WLC events that indicate a connection failure.
+	 * If found, generate an IWEVCUSTOM event.
+	 */
+	memset(extra, 0, sizeof(extra));
+	if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
+		cmd = IWEVCUSTOM;
+		wrqu.data.length = strlen(extra);
+		wireless_send_event(dev, cmd, &wrqu, extra);
+	}
+#endif /* WIRELESS_EXT > 14 */
+
+#endif /* WIRELESS_EXT > 13 */
+}
+
+int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
+{
+	int res = 0;
+	wl_cnt_t cnt;
+	int phy_noise;
+	int rssi;
+	scb_val_t scb_val;
+
+	phy_noise = 0;
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
+		goto done;
+
+	phy_noise = dtoh32(phy_noise);
+	WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise));
+
+	scb_val.val = 0;
+	if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t))))
+		goto done;
+
+	rssi = dtoh32(scb_val.val);
+	WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi));
+	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+		wstats->qual.qual = 0;
+	else if (rssi <= WL_IW_RSSI_VERY_LOW)
+		wstats->qual.qual = 1;
+	else if (rssi <= WL_IW_RSSI_LOW)
+		wstats->qual.qual = 2;
+	else if (rssi <= WL_IW_RSSI_GOOD)
+		wstats->qual.qual = 3;
+	else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+		wstats->qual.qual = 4;
+	else
+		wstats->qual.qual = 5;
+
+	/* Wraps to 0 if RSSI is 0 */
+	wstats->qual.level = 0x100 + rssi;
+	wstats->qual.noise = 0x100 + phy_noise;
+#if WIRELESS_EXT > 18
+	wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM);
+#else
+	wstats->qual.updated |= 7;
+#endif /* WIRELESS_EXT > 18 */
+
+#if WIRELESS_EXT > 11
+	WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", (int)sizeof(wl_cnt_t)));
+
+	memset(&cnt, 0, sizeof(wl_cnt_t));
+	res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+	if (res)
+	{
+		WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res));
+		goto done;
+	}
+
+	cnt.version = dtoh16(cnt.version);
+	if (cnt.version != WL_CNT_T_VERSION) {
+		WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
+			WL_CNT_T_VERSION, cnt.version));
+		goto done;
+	}
+
+	wstats->discard.nwid = 0;
+	wstats->discard.code = dtoh32(cnt.rxundec);
+	wstats->discard.fragment = dtoh32(cnt.rxfragerr);
+	wstats->discard.retries = dtoh32(cnt.txfail);
+	wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
+	wstats->miss.beacon = 0;
+
+	WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+		dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
+	WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+
+#endif /* WIRELESS_EXT > 11 */
+
+done:
+	return res;
+}
+
+static void
+wl_iw_timerfunc(ulong data)
+{
+	iscan_info_t *iscan = (iscan_info_t *)data;
+	iscan->timer_on = 0;
+	if (iscan->iscan_state != ISCAN_STATE_IDLE) {
+		WL_TRACE(("timer trigger\n"));
+		up(&iscan->sysioc_sem);
+	}
+}
+
+static void
+wl_iw_set_event_mask(struct net_device *dev)
+{
+	char eventmask[WL_EVENTING_MASK_LEN];
+	char iovbuf[WL_EVENTING_MASK_LEN + 12];	/* Room for "event_msgs" + '\0' + bitvec */
+
+	dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
+	bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+	setbit(eventmask, WLC_E_SCAN_COMPLETE);
+	dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
+		iovbuf, sizeof(iovbuf));
+
+}
+
+static int
+wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
+{
+	int err = 0;
+
+	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	params->bss_type = DOT11_BSSTYPE_ANY;
+	params->scan_type = 0;
+	params->nprobes = -1;
+	params->active_time = -1;
+	params->passive_time = -1;
+	params->home_time = -1;
+	params->channel_num = 0;
+
+	params->nprobes = htod32(params->nprobes);
+	params->active_time = htod32(params->active_time);
+	params->passive_time = htod32(params->passive_time);
+	params->home_time = htod32(params->home_time);
+	if (ssid && ssid->SSID_len)
+		memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+
+	return err;
+}
+
+static int
+wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action)
+{
+	int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params));
+	wl_iscan_params_t *params;
+	int err = 0;
+
+	if (ssid && ssid->SSID_len) {
+		params_size += sizeof(wlc_ssid_t);
+	}
+	params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL);
+	if (params == NULL) {
+		return -ENOMEM;
+	}
+	memset(params, 0, params_size);
+	ASSERT(params_size < WLC_IOCTL_SMLEN);
+
+	err = wl_iw_iscan_prep(&params->params, ssid);
+
+	if (!err) {
+		params->version = htod32(ISCAN_REQ_VERSION);
+		params->action = htod16(action);
+		params->scan_duration = htod16(0);
+
+		/* params_size += OFFSETOF(wl_iscan_params_t, params); */
+		(void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+			iscan->ioctlbuf, WLC_IOCTL_SMLEN);
+	}
+
+	kfree(params);
+	return err;
+}
+
+static uint32
+wl_iw_iscan_get(iscan_info_t *iscan)
+{
+	iscan_buf_t * buf;
+	iscan_buf_t * ptr;
+	wl_iscan_results_t * list_buf;
+	wl_iscan_results_t list;
+	wl_scan_results_t *results;
+	uint32 status;
+
+	/* buffers are allocated on demand */
+	if (iscan->list_cur) {
+		buf = iscan->list_cur;
+		iscan->list_cur = buf->next;
+	}
+	else {
+		buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
+		if (!buf)
+			return WL_SCAN_RESULTS_ABORTED;
+		buf->next = NULL;
+		if (!iscan->list_hdr)
+			iscan->list_hdr = buf;
+		else {
+			ptr = iscan->list_hdr;
+			while (ptr->next) {
+				ptr = ptr->next;
+			}
+			ptr->next = buf;
+		}
+	}
+	memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
+	list_buf = (wl_iscan_results_t*)buf->iscan_buf;
+	results = &list_buf->results;
+	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+	results->version = 0;
+	results->count = 0;
+
+	memset(&list, 0, sizeof(list));
+	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
+	(void) dev_iw_iovar_getbuf(
+		iscan->dev,
+		"iscanresults",
+		&list,
+		WL_ISCAN_RESULTS_FIXED_SIZE,
+		buf->iscan_buf,
+		WLC_IW_ISCAN_MAXLEN);
+	results->buflen = dtoh32(results->buflen);
+	results->version = dtoh32(results->version);
+	results->count = dtoh32(results->count);
+	WL_TRACE(("results->count = %d\n", results->count));
+
+	WL_TRACE(("results->buflen = %d\n", results->buflen));
+	status = dtoh32(list_buf->status);
+	return status;
+}
+
+static void wl_iw_send_scan_complete(iscan_info_t *iscan)
+{
+	union iwreq_data wrqu;
+
+	memset(&wrqu, 0, sizeof(wrqu));
+
+	/* wext expects to get no data for SIOCGIWSCAN Event  */
+	wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+static int
+_iscan_sysioc_thread(void *data)
+{
+	uint32 status;
+	iscan_info_t *iscan = (iscan_info_t *)data;
+
+	DAEMONIZE("iscan_sysioc");
+
+	status = WL_SCAN_RESULTS_PARTIAL;
+	while (down_interruptible(&iscan->sysioc_sem) == 0) {
+		if (iscan->timer_on) {
+			del_timer(&iscan->timer);
+			iscan->timer_on = 0;
+		}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_lock();
+#endif
+		status = wl_iw_iscan_get(iscan);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+		rtnl_unlock();
+#endif
+
+		switch (status) {
+			case WL_SCAN_RESULTS_PARTIAL:
+				WL_TRACE(("iscanresults incomplete\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_lock();
+#endif
+				/* make sure our buffer size is enough before going next round */
+				wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+				rtnl_unlock();
+#endif
+				/* Reschedule the timer */
+				iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+				add_timer(&iscan->timer);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_SUCCESS:
+				WL_TRACE(("iscanresults complete\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			case WL_SCAN_RESULTS_PENDING:
+				WL_TRACE(("iscanresults pending\n"));
+				/* Reschedule the timer */
+				iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms);
+				add_timer(&iscan->timer);
+				iscan->timer_on = 1;
+				break;
+			case WL_SCAN_RESULTS_ABORTED:
+				WL_TRACE(("iscanresults aborted\n"));
+				iscan->iscan_state = ISCAN_STATE_IDLE;
+				wl_iw_send_scan_complete(iscan);
+				break;
+			default:
+				WL_TRACE(("iscanresults returned unknown status %d\n", status));
+				break;
+		 }
+	}
+	complete_and_exit(&iscan->sysioc_exited, 0);
+}
+
+int
+wl_iw_attach(struct net_device *dev, void * dhdp)
+{
+	iscan_info_t *iscan = NULL;
+
+	if (!dev)
+		return 0;
+
+	iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+	if (!iscan)
+		return -ENOMEM;
+	memset(iscan, 0, sizeof(iscan_info_t));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	iscan->kthread = NULL;
+#endif
+	iscan->sysioc_pid = -1;
+	/* we only care about main interface so save a global here */
+	g_iscan = iscan;
+	iscan->dev = dev;
+	iscan->iscan_state = ISCAN_STATE_IDLE;
+
+
+	/* Set up the timer */
+	iscan->timer_ms    = 2000;
+	init_timer(&iscan->timer);
+	iscan->timer.data = (ulong)iscan;
+	iscan->timer.function = wl_iw_timerfunc;
+
+	sema_init(&iscan->sysioc_sem, 0);
+	init_completion(&iscan->sysioc_exited);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+	iscan->kthread = kthread_run(_iscan_sysioc_thread, iscan, "iscan_sysioc");
+	iscan->sysioc_pid = iscan->kthread->pid;
+#else
+	iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+#endif
+	if (iscan->sysioc_pid < 0)
+		return -ENOMEM;
+	return 0;
+}
+
+void wl_iw_detach(void)
+{
+	iscan_buf_t  *buf;
+	iscan_info_t *iscan = g_iscan;
+	if (!iscan)
+		return;
+	if (iscan->sysioc_pid >= 0) {
+		KILL_PROC(iscan->sysioc_pid, SIGTERM);
+		wait_for_completion(&iscan->sysioc_exited);
+	}
+
+	while (iscan->list_hdr) {
+		buf = iscan->list_hdr->next;
+		kfree(iscan->list_hdr);
+		iscan->list_hdr = buf;
+	}
+	kfree(iscan);
+	g_iscan = NULL;
+}
+
+#endif /* USE_IW */
diff --git a/drivers/staging/edison-bcm43340/wl_iw.h b/drivers/staging/edison-bcm43340/wl_iw.h
new file mode 100644
index 0000000..d86042a
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_iw.h
@@ -0,0 +1,164 @@
+/*
+ * Linux Wireless Extensions support
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_iw.h 291086 2011-10-21 01:17:24Z $
+ */
+
+#ifndef _wl_iw_h_
+#define _wl_iw_h_
+
+#include <linux/wireless.h>
+
+#include <typedefs.h>
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+
+#define WL_SCAN_PARAMS_SSID_MAX 	10
+#define GET_SSID			"SSID="
+#define GET_CHANNEL			"CH="
+#define GET_NPROBE 			"NPROBE="
+#define GET_ACTIVE_ASSOC_DWELL  	"ACTIVE="
+#define GET_PASSIVE_ASSOC_DWELL  	"PASSIVE="
+#define GET_HOME_DWELL  		"HOME="
+#define GET_SCAN_TYPE			"TYPE="
+
+#define BAND_GET_CMD				"GETBAND"
+#define BAND_SET_CMD				"SETBAND"
+#define DTIM_SKIP_GET_CMD			"DTIMSKIPGET"
+#define DTIM_SKIP_SET_CMD			"DTIMSKIPSET"
+#define SETSUSPEND_CMD				"SETSUSPENDOPT"
+#define PNOSSIDCLR_SET_CMD			"PNOSSIDCLR"
+/* Lin - Is the extra space needed? */
+#define PNOSETUP_SET_CMD			"PNOSETUP " /* TLV command has extra end space */
+#define PNOENABLE_SET_CMD			"PNOFORCE"
+#define PNODEBUG_SET_CMD			"PNODEBUG"
+#define TXPOWER_SET_CMD			"TXPOWER"
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+/* Structure to keep global parameters */
+typedef struct wl_iw_extra_params {
+	int 	target_channel; /* target channel */
+} wl_iw_extra_params_t;
+
+struct cntry_locales_custom {
+#if defined(SUPPORT_MULTIPLE_REVISION)
+	uint chip_id;
+#endif
+	char iso_abbrev[WLC_CNTRY_BUF_SZ];	/* ISO 3166-1 country abbreviation */
+	char custom_locale[WLC_CNTRY_BUF_SZ];	/* Custom firmware locale */
+	int32 custom_locale_rev;		/* Custom local revisin default -1 */
+};
+/* ============================================== */
+/* Defines from wlc_pub.h */
+#define	WL_IW_RSSI_MINVAL		-200	/* Low value, e.g. for forcing roam */
+#define	WL_IW_RSSI_NO_SIGNAL	-91	/* NDIS RSSI link quality cutoffs */
+#define	WL_IW_RSSI_VERY_LOW	-80	/* Very low quality cutoffs */
+#define	WL_IW_RSSI_LOW		-70	/* Low quality cutoffs */
+#define	WL_IW_RSSI_GOOD		-68	/* Good quality cutoffs */
+#define	WL_IW_RSSI_VERY_GOOD	-58	/* Very good quality cutoffs */
+#define	WL_IW_RSSI_EXCELLENT	-57	/* Excellent quality cutoffs */
+#define	WL_IW_RSSI_INVALID	 0	/* invalid RSSI value */
+#define MAX_WX_STRING 80
+#define SSID_FMT_BUF_LEN	((4 * 32) + 1)
+#define isprint(c) bcm_isprint(c)
+#define WL_IW_SET_ACTIVE_SCAN	(SIOCIWFIRSTPRIV+1)
+#define WL_IW_GET_RSSI			(SIOCIWFIRSTPRIV+3)
+#define WL_IW_SET_PASSIVE_SCAN	(SIOCIWFIRSTPRIV+5)
+#define WL_IW_GET_LINK_SPEED	(SIOCIWFIRSTPRIV+7)
+#define WL_IW_GET_CURR_MACADDR	(SIOCIWFIRSTPRIV+9)
+#define WL_IW_SET_STOP				(SIOCIWFIRSTPRIV+11)
+#define WL_IW_SET_START			(SIOCIWFIRSTPRIV+13)
+
+#define 		G_SCAN_RESULTS 8*1024
+#define 		WE_ADD_EVENT_FIX	0x80
+#define          G_WLAN_SET_ON	0
+#define          G_WLAN_SET_OFF	1
+
+
+typedef struct wl_iw {
+	char nickname[IW_ESSID_MAX_SIZE];
+
+	struct iw_statistics wstats;
+
+	int spy_num;
+	uint32 pwsec;			/* pairwise wsec setting */
+	uint32 gwsec;			/* group wsec setting  */
+	bool privacy_invoked; 		/* IW_AUTH_PRIVACY_INVOKED setting */
+	struct ether_addr spy_addr[IW_MAX_SPY];
+	struct iw_quality spy_qual[IW_MAX_SPY];
+	void  *wlinfo;
+} wl_iw_t;
+
+struct wl_ctrl {
+	struct timer_list *timer;
+	struct net_device *dev;
+	long sysioc_pid;
+	struct semaphore sysioc_sem;
+	struct completion sysioc_exited;
+};
+
+
+#if WIRELESS_EXT > 12
+#include <net/iw_handler.h>
+extern const struct iw_handler_def wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+
+extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
+extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
+int wl_iw_send_priv_event(struct net_device *dev, char *flag);
+
+void wl_iw_detach(void);
+
+#define CSCAN_COMMAND				"CSCAN "
+#define CSCAN_TLV_PREFIX 			'S'
+#define CSCAN_TLV_VERSION			1
+#define CSCAN_TLV_SUBVERSION			0
+#define CSCAN_TLV_TYPE_SSID_IE          'S'
+#define CSCAN_TLV_TYPE_CHANNEL_IE   'C'
+#define CSCAN_TLV_TYPE_NPROBE_IE     'N'
+#define CSCAN_TLV_TYPE_ACTIVE_IE      'A'
+#define CSCAN_TLV_TYPE_PASSIVE_IE    'P'
+#define CSCAN_TLV_TYPE_HOME_IE         'H'
+#define CSCAN_TLV_TYPE_STYPE_IE        'T'
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(info, stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(info, event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(info, stream, ends, iwe, extra)
+#else
+#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_event(stream, ends, iwe, extra)
+#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
+	iwe_stream_add_value(event, value, ends, iwe, event_len)
+#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
+	iwe_stream_add_point(stream, ends, iwe, extra)
+#endif
+
+#endif /* _wl_iw_h_ */
diff --git a/drivers/staging/edison-bcm43340/wl_linux_mon.c b/drivers/staging/edison-bcm43340/wl_linux_mon.c
new file mode 100644
index 0000000..210d171
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wl_linux_mon.c
@@ -0,0 +1,403 @@
+/*
+ * Broadcom Dongle Host Driver (DHD), Linux monitor network interface
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wl_linux_mon.c 425343 2013-09-23 23:04:47Z $
+ */
+
+#include <osl.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ieee80211.h>
+#include <linux/rtnetlink.h>
+#include <net/ieee80211_radiotap.h>
+
+#include <wlioctl.h>
+#include <bcmutils.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+typedef enum monitor_states
+{
+	MONITOR_STATE_DEINIT = 0x0,
+	MONITOR_STATE_INIT = 0x1,
+	MONITOR_STATE_INTERFACE_ADDED = 0x2,
+	MONITOR_STATE_INTERFACE_DELETED = 0x4
+} monitor_states_t;
+int dhd_add_monitor(char *name, struct net_device **new_ndev);
+extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+int dhd_del_monitor(struct net_device *ndev);
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+/**
+ * Local declarations and defintions (not exposed)
+ */
+#ifndef DHD_MAX_IFS
+#define DHD_MAX_IFS 16
+#endif
+#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__)
+#define MON_TRACE MON_PRINT
+
+typedef struct monitor_interface {
+	int radiotap_enabled;
+	struct net_device* real_ndev;	/* The real interface that the monitor is on */
+	struct net_device* mon_ndev;
+} monitor_interface;
+
+typedef struct dhd_linux_monitor {
+	void *dhd_pub;
+	monitor_states_t monitor_state;
+	monitor_interface mon_if[DHD_MAX_IFS];
+	struct mutex lock;		/* lock to protect mon_if */
+} dhd_linux_monitor_t;
+
+static dhd_linux_monitor_t g_monitor;
+
+static struct net_device* lookup_real_netdev(char *name);
+static monitor_interface* ndev_to_monif(struct net_device *ndev);
+static int dhd_mon_if_open(struct net_device *ndev);
+static int dhd_mon_if_stop(struct net_device *ndev);
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev);
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr);
+
+static const struct net_device_ops dhd_mon_if_ops = {
+	.ndo_open		= dhd_mon_if_open,
+	.ndo_stop		= dhd_mon_if_stop,
+	.ndo_start_xmit		= dhd_mon_if_subif_start_xmit,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+	.ndo_set_rx_mode = dhd_mon_if_set_multicast_list,
+#else
+	.ndo_set_multicast_list = dhd_mon_if_set_multicast_list,
+#endif
+	.ndo_set_mac_address 	= dhd_mon_if_change_mac,
+};
+
+/**
+ * Local static function defintions
+ */
+
+/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0"
+ * "p2p-eth0-0" is a match for "mon.p2p-eth0-0")
+ */
+static struct net_device* lookup_real_netdev(char *name)
+{
+	struct net_device *ndev_found = NULL;
+
+	int i;
+	int len = 0;
+	int last_name_len = 0;
+	struct net_device *ndev;
+
+	/* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0",
+	 * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon
+	 * iface would be mon-p2p0-0.
+	 */
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		ndev = dhd_idx2net(g_monitor.dhd_pub, i);
+
+		/* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it
+		 * it matches, then this netdev is the corresponding real_netdev.
+		 */
+		if (ndev && strstr(ndev->name, "p2p-p2p0")) {
+			len = strlen("p2p");
+		} else {
+		/* if p2p- is not present, then the IFNAMSIZ have reached and name
+		 * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x
+		 */
+			len = 0;
+		}
+		if (ndev && strstr(name, (ndev->name + len))) {
+			if (strlen(ndev->name) > last_name_len) {
+				ndev_found = ndev;
+				last_name_len = strlen(ndev->name);
+			}
+		}
+	}
+
+	return ndev_found;
+}
+
+static monitor_interface* ndev_to_monif(struct net_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		if (g_monitor.mon_if[i].mon_ndev == ndev)
+			return &g_monitor.mon_if[i];
+	}
+
+	return NULL;
+}
+
+static int dhd_mon_if_open(struct net_device *ndev)
+{
+	int ret = 0;
+
+	MON_PRINT("enter\n");
+	return ret;
+}
+
+static int dhd_mon_if_stop(struct net_device *ndev)
+{
+	int ret = 0;
+
+	MON_PRINT("enter\n");
+	return ret;
+}
+
+static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	int ret = 0;
+	int rtap_len;
+	int qos_len = 0;
+	int dot11_hdr_len = 24;
+	int snap_len = 6;
+	unsigned char *pdata;
+	unsigned short frame_ctl;
+	unsigned char src_mac_addr[6];
+	unsigned char dst_mac_addr[6];
+	struct ieee80211_hdr *dot11_hdr;
+	struct ieee80211_radiotap_header *rtap_hdr;
+	monitor_interface* mon_if;
+
+	MON_PRINT("enter\n");
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+		goto fail;
+	}
+
+	if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+		goto fail;
+
+	rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+	if (unlikely(rtap_hdr->it_version))
+		goto fail;
+
+	rtap_len = ieee80211_get_radiotap_len(skb->data);
+	if (unlikely(skb->len < rtap_len))
+		goto fail;
+
+	MON_PRINT("radiotap len (should be 14): %d\n", rtap_len);
+
+	/* Skip the ratio tap header */
+	skb_pull(skb, rtap_len);
+
+	dot11_hdr = (struct ieee80211_hdr *)skb->data;
+	frame_ctl = le16_to_cpu(dot11_hdr->frame_control);
+	/* Check if the QoS bit is set */
+	if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
+		/* Check if this ia a Wireless Distribution System (WDS) frame
+		 * which has 4 MAC addresses
+		 */
+		if (dot11_hdr->frame_control & 0x0080)
+			qos_len = 2;
+		if ((dot11_hdr->frame_control & 0x0300) == 0x0300)
+			dot11_hdr_len += 6;
+
+		memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
+		memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
+
+		/* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
+		 * for two MAC addresses
+		 */
+		skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
+		pdata = (unsigned char*)skb->data;
+		memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
+		memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
+		PKTSETPRIO(skb, 0);
+
+		MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name);
+
+		/* Use the real net device to transmit the packet */
+		ret = dhd_start_xmit(skb, mon_if->real_ndev);
+
+		return ret;
+	}
+fail:
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+static void dhd_mon_if_set_multicast_list(struct net_device *ndev)
+{
+	monitor_interface* mon_if;
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+	} else {
+		MON_PRINT("enter, if name: %s, matched if name %s\n",
+		ndev->name, mon_if->real_ndev->name);
+	}
+}
+
+static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr)
+{
+	int ret = 0;
+	monitor_interface* mon_if;
+
+	mon_if = ndev_to_monif(ndev);
+	if (mon_if == NULL || mon_if->real_ndev == NULL) {
+		MON_PRINT(" cannot find matched net dev, skip the packet\n");
+	} else {
+		MON_PRINT("enter, if name: %s, matched if name %s\n",
+		ndev->name, mon_if->real_ndev->name);
+	}
+	return ret;
+}
+
+/**
+ * Global function definitions (declared in dhd_linux_mon.h)
+ */
+
+int dhd_add_monitor(char *name, struct net_device **new_ndev)
+{
+	int i;
+	int idx = -1;
+	int ret = 0;
+	struct net_device* ndev = NULL;
+	dhd_linux_monitor_t **dhd_mon;
+
+	mutex_lock(&g_monitor.lock);
+
+	MON_TRACE("enter, if name: %s\n", name);
+	if (!name || !new_ndev) {
+		MON_PRINT("invalid parameters\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Find a vacancy
+	 */
+	for (i = 0; i < DHD_MAX_IFS; i++)
+		if (g_monitor.mon_if[i].mon_ndev == NULL) {
+			idx = i;
+			break;
+		}
+	if (idx == -1) {
+		MON_PRINT("exceeds maximum interfaces\n");
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*));
+	if (!ndev) {
+		MON_PRINT("failed to allocate memory\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+	strncpy(ndev->name, name, IFNAMSIZ);
+	ndev->name[IFNAMSIZ - 1] = 0;
+	ndev->netdev_ops = &dhd_mon_if_ops;
+
+	ret = register_netdevice(ndev);
+	if (ret) {
+		MON_PRINT(" register_netdevice failed (%d)\n", ret);
+		goto out;
+	}
+
+	*new_ndev = ndev;
+	g_monitor.mon_if[idx].radiotap_enabled = TRUE;
+	g_monitor.mon_if[idx].mon_ndev = ndev;
+	g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name);
+	dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev);
+	*dhd_mon = &g_monitor;
+	g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED;
+	MON_PRINT("net device returned: 0x%p\n", ndev);
+	MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name);
+
+out:
+	if (ret && ndev)
+		free_netdev(ndev);
+
+	mutex_unlock(&g_monitor.lock);
+	return ret;
+
+}
+
+int dhd_del_monitor(struct net_device *ndev)
+{
+	int i;
+	if (!ndev)
+		return -EINVAL;
+	mutex_lock(&g_monitor.lock);
+	for (i = 0; i < DHD_MAX_IFS; i++) {
+		if (g_monitor.mon_if[i].mon_ndev == ndev ||
+			g_monitor.mon_if[i].real_ndev == ndev) {
+
+			g_monitor.mon_if[i].real_ndev = NULL;
+			unregister_netdevice(g_monitor.mon_if[i].mon_ndev);
+			free_netdev(g_monitor.mon_if[i].mon_ndev);
+			g_monitor.mon_if[i].mon_ndev = NULL;
+			g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED;
+			break;
+		}
+	}
+
+	if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED)
+		MON_PRINT("IF not found in monitor array, is this a monitor IF? 0x%p\n", ndev);
+	mutex_unlock(&g_monitor.lock);
+
+	return 0;
+}
+
+int dhd_monitor_init(void *dhd_pub)
+{
+	if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) {
+		g_monitor.dhd_pub = dhd_pub;
+		mutex_init(&g_monitor.lock);
+		g_monitor.monitor_state = MONITOR_STATE_INIT;
+	}
+	return 0;
+}
+
+int dhd_monitor_uninit(void)
+{
+	int i;
+	struct net_device *ndev;
+	mutex_lock(&g_monitor.lock);
+	if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) {
+		for (i = 0; i < DHD_MAX_IFS; i++) {
+			ndev = g_monitor.mon_if[i].mon_ndev;
+			if (ndev) {
+				unregister_netdevice(ndev);
+				free_netdev(ndev);
+				g_monitor.mon_if[i].real_ndev = NULL;
+				g_monitor.mon_if[i].mon_ndev = NULL;
+			}
+		}
+		g_monitor.monitor_state = MONITOR_STATE_DEINIT;
+	}
+	mutex_unlock(&g_monitor.lock);
+	return 0;
+}
diff --git a/drivers/staging/edison-bcm43340/wldev_common.c b/drivers/staging/edison-bcm43340/wldev_common.c
new file mode 100644
index 0000000..2d4831b
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wldev_common.c
@@ -0,0 +1,385 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.c 432642 2013-10-29 04:23:40Z $
+ */
+
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+
+#include <wldev_common.h>
+#include <bcmutils.h>
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#define	WLDEV_ERROR(args)						\
+	do {										\
+		printk(KERN_ERR "WLDEV-ERROR) %s : ", __func__);	\
+		printk args;							\
+	} while (0)
+
+extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
+
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+	s32 ret = 0;
+	struct wl_ioctl ioc;
+
+
+	memset(&ioc, 0, sizeof(ioc));
+	ioc.cmd = cmd;
+	ioc.buf = arg;
+	ioc.len = len;
+	ioc.set = set;
+
+	ret = dhd_ioctl_entry_local(dev, &ioc, cmd);
+
+	return ret;
+}
+
+/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
+ * taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ * wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+static s32 wldev_mkiovar(
+	s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, u32 buflen)
+{
+	s32 iolen = 0;
+
+	iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
+	return iolen;
+}
+
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+	ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+	if (buf_sync)
+		mutex_unlock(buf_sync);
+	return ret;
+}
+
+
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
+	if (iovar_len > 0)
+		ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+	else
+		ret = BCME_BUFTOOSHORT;
+
+	if (buf_sync)
+		mutex_unlock(buf_sync);
+	return ret;
+}
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf), NULL);
+}
+
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf), NULL);
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
+ *  taken care of in dhd_ioctl_entry. Internal use only, not exposed to
+ *  wl_iw, wl_cfg80211 and wl_cfgp2p
+ */
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx)
+{
+	const s8 *prefix = "bsscfg:";
+	s8 *p;
+	u32 prefixlen;
+	u32 namelen;
+	u32 iolen;
+
+	if (bssidx == 0) {
+		return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen,
+			(s8 *) iovar_buf, buflen);
+	}
+
+	prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+	namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar  name + null */
+	iolen = prefixlen + namelen + sizeof(u32) + paramlen;
+
+	if (buflen < 0 || iolen > (u32)buflen)
+	{
+		WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__));
+		return BCME_BUFTOOSHORT;
+	}
+
+	p = (s8 *)iovar_buf;
+
+	/* copy prefix, no null */
+	memcpy(p, prefix, prefixlen);
+	p += prefixlen;
+
+	/* copy iovar name including null */
+	memcpy(p, iovar_name, namelen);
+	p += namelen;
+
+	/* bss config index as first param */
+	bssidx = htod32(bssidx);
+	memcpy(p, &bssidx, sizeof(u32));
+	p += sizeof(u32);
+
+	/* parameter buffer follows */
+	if (paramlen)
+		memcpy(p, param, paramlen);
+
+	return iolen;
+
+}
+
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+
+	wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
+	if (buf_sync) {
+		mutex_unlock(buf_sync);
+	}
+	return ret;
+
+}
+
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+{
+	s32 ret = 0;
+	s32 iovar_len;
+	if (buf_sync) {
+		mutex_lock(buf_sync);
+	}
+	iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
+	if (iovar_len > 0)
+		ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
+	else {
+		ret = BCME_BUFTOOSHORT;
+	}
+
+	if (buf_sync) {
+		mutex_unlock(buf_sync);
+	}
+	return ret;
+}
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+	val = htod32(val);
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
+		sizeof(iovar_buf), bssidx, NULL);
+}
+
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
+{
+	s8 iovar_buf[WLC_IOCTL_SMLEN];
+	s32 err;
+
+	memset(iovar_buf, 0, sizeof(iovar_buf));
+	err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
+		sizeof(iovar_buf), bssidx, NULL);
+	if (err == 0)
+	{
+		memcpy(pval, iovar_buf, sizeof(*pval));
+		*pval = dtoh32(*pval);
+	}
+	return err;
+}
+
+int wldev_get_link_speed(
+	struct net_device *dev, int *plink_speed)
+{
+	int error;
+
+	if (!plink_speed)
+		return -ENOMEM;
+	error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0);
+	if (unlikely(error))
+		return error;
+
+	/* Convert internal 500Kbps to Kbps */
+	*plink_speed *= 500;
+	return error;
+}
+
+int wldev_get_rssi(
+	struct net_device *dev, int *prssi)
+{
+	scb_val_t scb_val;
+	int error;
+
+	if (!prssi)
+		return -ENOMEM;
+	bzero(&scb_val, sizeof(scb_val_t));
+
+	error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
+	if (unlikely(error))
+		return error;
+
+	*prssi = dtoh32(scb_val.val);
+	return error;
+}
+
+int wldev_get_ssid(
+	struct net_device *dev, wlc_ssid_t *pssid)
+{
+	int error;
+
+	if (!pssid)
+		return -ENOMEM;
+	error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0);
+	if (unlikely(error))
+		return error;
+	pssid->SSID_len = dtoh32(pssid->SSID_len);
+	return error;
+}
+
+int wldev_get_band(
+	struct net_device *dev, uint *pband)
+{
+	int error;
+
+	error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0);
+	return error;
+}
+
+int wldev_set_band(
+	struct net_device *dev, uint band)
+{
+	int error = -1;
+
+	if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
+		error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true);
+		if (!error)
+			dhd_bus_band_set(dev, band);
+	}
+	return error;
+}
+
+int wldev_set_country(
+	struct net_device *dev, char *country_code, bool notify, bool user_enforced)
+{
+	int error = -1;
+	wl_country_t cspec = {{0}, 0, {0}};
+	scb_val_t scbval;
+	char smbuf[WLC_IOCTL_SMLEN];
+
+	if (!country_code)
+		return error;
+
+	bzero(&scbval, sizeof(scb_val_t));
+	error = wldev_iovar_getbuf(dev, "country", NULL, 0, &cspec, sizeof(cspec), NULL);
+	if (error < 0) {
+		WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error));
+		return error;
+	}
+
+	if ((error < 0) ||
+	    (strncmp(country_code, cspec.country_abbrev, WLC_CNTRY_BUF_SZ) != 0)) {
+
+		if (user_enforced) {
+			bzero(&scbval, sizeof(scb_val_t));
+			error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
+			if (error < 0) {
+				WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n",
+					__FUNCTION__, error));
+				return error;
+			}
+		}
+
+		cspec.rev = -1;
+		memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+		memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+		dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
+		error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
+			smbuf, sizeof(smbuf), NULL);
+		if (error < 0) {
+			WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n",
+				__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+			return error;
+		}
+		dhd_bus_country_set(dev, &cspec, notify);
+		WLDEV_ERROR(("%s: set country for %s as %s rev %d\n",
+			__FUNCTION__, country_code, cspec.ccode, cspec.rev));
+	}
+	return 0;
+}
diff --git a/drivers/staging/edison-bcm43340/wldev_common.h b/drivers/staging/edison-bcm43340/wldev_common.h
new file mode 100644
index 0000000..378d7bd
--- /dev/null
+++ b/drivers/staging/edison-bcm43340/wldev_common.h
@@ -0,0 +1,118 @@
+/*
+ * Common function shared by Linux WEXT, cfg80211 and p2p drivers
+ *
+ * Copyright (C) 1999-2014, Broadcom Corporation
+ * 
+ *      Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ * 
+ *      As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module.  An independent module is a module which is not
+ * derived from this software.  The special exception does not apply to any
+ * modifications of the software.
+ * 
+ *      Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: wldev_common.h 434085 2013-11-05 06:09:49Z $
+ */
+#ifndef __WLDEV_COMMON_H__
+#define __WLDEV_COMMON_H__
+
+#include <wlioctl.h>
+
+/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or
+ *  netdev_ops->ndo_do_ioctl in new kernels)
+ *  @dev: the net_device handle
+ */
+s32 wldev_ioctl(
+	struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set);
+
+/** Retrieve named IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+/** Set named IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf(
+	struct net_device *dev, s8 *iovar_name,
+	void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+
+s32 wldev_iovar_setint(
+	struct net_device *dev, s8 *iovar, s32 val);
+
+s32 wldev_iovar_getint(
+	struct net_device *dev, s8 *iovar, s32 *pval);
+
+/** The following function can be implemented if there is a need for bsscfg
+ *  indexed IOVARs
+ */
+
+s32 wldev_mkiovar_bsscfg(
+	const s8 *iovar_name, s8 *param, s32 paramlen,
+	s8 *iovar_buf, s32 buflen, s32 bssidx);
+
+/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_GET_VAR IOCTL code
+ */
+s32 wldev_iovar_getbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+	void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
+ *  WLC_SET_VAR IOCTL code
+ */
+s32 wldev_iovar_setbuf_bsscfg(
+	struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
+	void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
+
+s32 wldev_iovar_getint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx);
+
+s32 wldev_iovar_setint_bsscfg(
+	struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
+
+extern int dhd_net_set_fw_path(struct net_device *dev, char *fw);
+extern int dhd_net_bus_suspend(struct net_device *dev);
+extern int dhd_net_bus_resume(struct net_device *dev, uint8 stage);
+extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on,
+	unsigned long delay_msec);
+extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+	wl_country_t *cspec);
+extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify);
+extern void dhd_bus_band_set(struct net_device *dev, uint band);
+extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify,
+	bool user_enforced);
+extern int net_os_wake_lock(struct net_device *dev);
+extern int net_os_wake_unlock(struct net_device *dev);
+extern int net_os_wake_lock_timeout(struct net_device *dev);
+extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val);
+extern int net_os_set_dtim_skip(struct net_device *dev, int val);
+extern int net_os_set_suspend_disable(struct net_device *dev, int val);
+extern int net_os_set_suspend(struct net_device *dev, int val, int force);
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid,
+	int max, int *bytes_left);
+
+/* Get the link speed from dongle, speed is in kpbs */
+int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
+
+int wldev_get_rssi(struct net_device *dev, int *prssi);
+
+int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
+
+int wldev_get_band(struct net_device *dev, uint *pband);
+
+int wldev_set_band(struct net_device *dev, uint band);
+
+#endif /* __WLDEV_COMMON_H__ */
diff --git a/drivers/staging/sep54/Kconfig b/drivers/staging/sep54/Kconfig
new file mode 100644
index 0000000..d502943
--- /dev/null
+++ b/drivers/staging/sep54/Kconfig
@@ -0,0 +1,13 @@
+config DX_SEP54
+	tristate "Discretix SEP driver (CC54)"
+	depends on PCI && MMC
+	select CRYPTO_BLKCIPHER
+	help
+	  Discretix SEP driver for CC54; used for the security processor subsystem
+	  on board the Intel Mobile Internet Device and adds SEP availability
+	  to the kernel crypto infrastructure
+
+	  The driver's name is sep_driver.
+
+	  If unsure, select N.
+
diff --git a/drivers/staging/sep54/Makefile b/drivers/staging/sep54/Makefile
new file mode 100644
index 0000000..bd556d0
--- /dev/null
+++ b/drivers/staging/sep54/Makefile
@@ -0,0 +1,14 @@
+obj-$(CONFIG_DX_SEP54) += sep54.o
+sep54-objs := dx_driver.o sep_init.o crypto_ctx_mgr.o sep_sysfs.o \
+                   desc_mgr.o lli_mgr.o crypto_api.o sep_request_mgr.o \
+                   sep_power.o sepapp.o crypto_hwk.o
+
+ifeq ($(CONFIG_COMPAT),y)
+	sep54-objs += sep_compat_ioctl.o
+endif
+
+ccflags-y += -DSEP_SUPPORT_SHA=256 -DCONFIG_NOT_COHERENT_CACHE -DSEP_HWK_UNIT_TEST -DDEBUG
+
+ifeq ($(CONFIG_PM_RUNTIME),y)
+	ccflags-y += -DSEP_RUNTIME_PM
+endif
diff --git a/drivers/staging/sep54/crypto_api.c b/drivers/staging/sep54/crypto_api.c
new file mode 100644
index 0000000..6b23fa8
--- /dev/null
+++ b/drivers/staging/sep54/crypto_api.c
@@ -0,0 +1,1526 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/* \file cryto_api.c - Implementation of wrappers for Linux Crypto API */
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_CRYPTO_API
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/workqueue.h>
+#include "dx_driver_abi.h"
+#include "dx_driver.h"
+#include "sep_power.h"
+#include "crypto_ctx_mgr.h"
+#include "crypto_api.h"
+#include "sepapp.h"
+#include "sep_applets.h"
+#include "dx_sepapp_kapi.h"
+
+#include <linux/sched.h>
+
+#define CRYPTO_API_QID 0
+/* Priority assigned to our algorithms implementation */
+#define DX_CRYPTO_PRIO (300 + (100 * CRYPTO_API_QID))
+
+#define SYMCIPHER_ALG_NAME_LEN 8	/* Format: "mod(alg)" */
+#define SYMCIPHER_ALG_NAME_MODE_OFFSET 0
+#define SYMCIPHER_ALG_NAME_MODE_SIZE 3
+#define SYMCIPHER_ALG_NAME_ALG_OFFSET 4
+#define SYMCIPHER_ALG_NAME_ALG_SIZE 3
+
+#define CMD_DO_CRYPTO 7
+#define DISK_ENC_APP_UUID "INTEL DISK ENC01"
+
+/**
+ * struct async_digest_req_ctx - Context for async. digest algorithms requests
+ * @host_ctx:	Host crypto context allocated per request
+ * @result:	Where to copy the digest result.
+ *		When NULL the result is retained in the sep_ctx until "final"
+ *		and this field holds the pointer to its location.
+ * @async_req:	The generic async request context for completion notification
+ */
+struct async_digest_req_ctx {
+	union {
+		struct host_crypto_ctx_hash hash_ctx;
+		struct host_crypto_ctx_mac mac_ctx;
+	} host_ctx;
+	u8 *result;
+	struct async_req_ctx async_req;
+};
+
+/* Client context for the Crypto API operations */
+/* To be initialized by sep_setup */
+static struct sep_client_ctx crypto_api_ctx;
+static struct dma_pool *sep_ctx_pool;
+
+/* Functions from the main driver code that are shared with this module */
+int prepare_data_for_sep(struct sep_op_ctx *op_ctx,
+			 u8 __user *data_in,
+			 struct scatterlist *sgl_in,
+			 u8 __user *data_out,
+			 struct scatterlist *sgl_out,
+			 u32 data_in_size,
+			 enum crypto_data_intent data_intent);
+
+/* Local (static) functions */
+static void release_symcipher_ctx(struct sep_op_ctx *op_ctx,
+				  u8 *iv_crypto);
+
+/****************************************/
+/* Block cipher algorithms declarations */
+/****************************************/
+static int symcipher_set_key(struct crypto_ablkcipher *tfm,
+			     const u8 *key, unsigned int keylen);
+static int symcipher_encrypt(struct ablkcipher_request *req);
+static int symcipher_decrypt(struct ablkcipher_request *req);
+static int symcipher_ctx_init(struct crypto_tfm *tfm);
+static void crypto_ctx_cleanup(struct crypto_tfm *tfm);
+
+/* Template for block ciphers */
+static struct crypto_alg blkcipher_algs_base = {
+	.cra_priority = DX_CRYPTO_PRIO,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_ctxsize = sizeof(struct host_crypto_ctx_sym_cipher),
+	.cra_alignmask = 0,	/* Cannot use this due to bug in kernel */
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_u = {
+		  .ablkcipher = {
+				 .setkey = symcipher_set_key,
+				 .encrypt = symcipher_encrypt,
+				 .decrypt = symcipher_decrypt}
+		  },
+	.cra_init = symcipher_ctx_init,
+	.cra_exit = crypto_ctx_cleanup
+};
+
+/* Block cipher specific attributes */
+static struct crypto_alg dx_ablkcipher_algs[] = {
+	{			/* xxx(aes) */
+	 .cra_name = "xxx(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-xxx",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+#ifdef USE_SEP54_AES
+	{			/* ecb(aes) */
+	 .cra_name = "ecb(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-ecb",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+	{			/* cbc(aes) */
+	 .cra_name = "cbc(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-cbc",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+	{			/* ctr(aes) */
+	 .cra_name = "ctr(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-ctr",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+	{			/* xts(aes) */
+	 .cra_name = "xts(aes)",
+	 .cra_driver_name = MODULE_NAME "-aes-xts",
+	 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+	 .cra_u = {
+			/* AES-XTS uses two keys, so the key size is doubled */
+		   .ablkcipher = {
+				  .min_keysize = SEP_AES_128_BIT_KEY_SIZE * 2,
+				  .max_keysize = SEP_AES_256_BIT_KEY_SIZE * 2,
+				  .ivsize = SEP_AES_IV_SIZE}
+		   }
+	 },
+#endif /* USE_SEP54_AES */
+	{			/* ecb(des) */
+	 .cra_name = "ecb(des)",
+	 .cra_driver_name = MODULE_NAME "-des-ecb",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .max_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 },
+	{			/* cbc(des) */
+	 .cra_name = "cbc(des)",
+	 .cra_driver_name = MODULE_NAME "-des-cbc",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .max_keysize = SEP_DES_ONE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 },
+	{			/* ecb(des3_ede) */
+	 .cra_name = "ecb(des3_ede)",
+	 .cra_driver_name = MODULE_NAME "-des3-ecb",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .max_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 },
+	{			/* cbc(des3_ede) */
+	 .cra_name = "cbc(des3_ede)",
+	 .cra_driver_name = MODULE_NAME "-des3-cbc",
+	 .cra_blocksize = SEP_DES_BLOCK_SIZE,
+	 .cra_u = {
+		   .ablkcipher = {
+				  .min_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .max_keysize = SEP_DES_TRIPLE_KEY_SIZE,
+				  .ivsize = SEP_DES_IV_SIZE}
+		   }
+	 }
+};				/* ablkcipher_algs[] */
+
+#define DX_ABLKCIPHER_NUM \
+	(sizeof(dx_ablkcipher_algs) / sizeof(struct crypto_alg))
+
+static const enum dxdi_sym_cipher_type dx_algs_cipher_types[] = {
+	DXDI_SYMCIPHER_AES_XXX,
+#ifdef USE_SEP54_AES
+	DXDI_SYMCIPHER_AES_ECB,
+	DXDI_SYMCIPHER_AES_CBC,
+	DXDI_SYMCIPHER_AES_CTR,
+	DXDI_SYMCIPHER_AES_XTS,
+#endif
+	DXDI_SYMCIPHER_DES_ECB,
+	DXDI_SYMCIPHER_DES_CBC,
+	DXDI_SYMCIPHER_DES_ECB,
+	DXDI_SYMCIPHER_DES_CBC,
+};
+
+/*********************************************/
+/* Digest (hash/MAC) algorithms declarations */
+/*********************************************/
+static int digest_tfm_init(struct crypto_tfm *tfm);
+static int digest_init(struct ahash_request *req);
+static int digest_update(struct ahash_request *req);
+static int digest_final(struct ahash_request *req);
+static int digest_finup(struct ahash_request *req);
+static int digest_integrated(struct ahash_request *req);
+static int mac_setkey(struct crypto_ahash *tfm,
+		      const u8 *key, unsigned int keylen);
+
+/* Save set key in tfm ctx */
+struct mac_key_data {
+	u32 key_size;	/* In octets */
+	u8 key[DXDI_MAC_KEY_SIZE_MAX];
+};
+
+/* Description of a digest (hash/MAC) algorithm */
+struct dx_digest_alg {
+	enum dxdi_hash_type hash_type;
+	enum dxdi_mac_type mac_type;
+	struct ahash_alg ahash;
+};
+
+/* Common attributes for all the digest (hash/MAC) algorithms */
+static struct ahash_alg digest_algs_base = {
+	.init = digest_init,
+	.update = digest_update,
+	.final = digest_final,
+	.finup = digest_finup,
+	.digest = digest_integrated,
+	.halg.base = {
+		      .cra_type = &crypto_ahash_type,
+		      .cra_priority = DX_CRYPTO_PRIO,
+		      .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+		      .cra_alignmask = 0,
+		      .cra_module = THIS_MODULE,
+		      .cra_init = digest_tfm_init}
+};
+
+/* Algorithm specific attributes */
+static struct dx_digest_alg dx_digest_algs[] = {
+#ifdef USE_SEP54_AHASH
+	{			/* sha1 */
+	 .hash_type = DXDI_HASH_SHA1,
+	 .mac_type = DXDI_MAC_NONE,
+	 .ahash = {
+		   .halg.base = {
+				 .cra_name = "sha1",
+				 .cra_driver_name = MODULE_NAME "-sha1",
+				 .cra_blocksize = SHA1_BLOCK_SIZE},
+		   .halg.digestsize = SHA1_DIGEST_SIZE,
+		   .halg.statesize = SHA1_BLOCK_SIZE}
+	 },
+	{			/* sha224 */
+	 .hash_type = DXDI_HASH_SHA224,
+	 .mac_type = DXDI_MAC_NONE,
+	 .ahash = {
+		   .halg.base = {
+				 .cra_name = "sha224",
+				 .cra_driver_name = MODULE_NAME "-sha224",
+				 .cra_blocksize = SHA224_BLOCK_SIZE},
+		   .halg.digestsize = SHA224_DIGEST_SIZE,
+		   .halg.statesize = SHA224_BLOCK_SIZE}
+	 },
+	{			/* sha256 */
+	 .hash_type = DXDI_HASH_SHA256,
+	 .mac_type = DXDI_MAC_NONE,
+	 .ahash = {
+		   .halg.base = {
+				 .cra_name = "sha256",
+				 .cra_driver_name = MODULE_NAME "-sha256",
+				 .cra_blocksize = SHA256_BLOCK_SIZE},
+		   .halg.digestsize = SHA256_DIGEST_SIZE,
+		   .halg.statesize = SHA256_BLOCK_SIZE}
+	 },
+	{			/* hmac(sha1) */
+	 .hash_type = DXDI_HASH_SHA1,
+	 .mac_type = DXDI_MAC_HMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "hmac(sha1)",
+				 .cra_driver_name = MODULE_NAME "-hmac-sha1",
+				 .cra_blocksize = SHA1_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SHA1_DIGEST_SIZE,
+		   .halg.statesize = SHA1_BLOCK_SIZE}
+	 },
+	{			/* hmac(sha224) */
+	 .hash_type = DXDI_HASH_SHA224,
+	 .mac_type = DXDI_MAC_HMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "hmac(sha224)",
+				 .cra_driver_name = MODULE_NAME "-hmac-sha224",
+				 .cra_blocksize = SHA224_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SHA224_DIGEST_SIZE,
+		   .halg.statesize = SHA224_BLOCK_SIZE}
+	 },
+	{			/* hmac(sha256) */
+	 .hash_type = DXDI_HASH_SHA256,
+	 .mac_type = DXDI_MAC_HMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "hmac(sha256)",
+				 .cra_driver_name = MODULE_NAME "-hmac-sha256",
+				 .cra_blocksize = SHA256_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SHA256_DIGEST_SIZE,
+		   .halg.statesize = SHA256_BLOCK_SIZE}
+	 },
+#ifdef USE_SEP54_AES
+	{			/* xcbc(aes) */
+	 .hash_type = DXDI_HASH_NONE,
+	 .mac_type = DXDI_MAC_AES_XCBC_MAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "xcbc(aes)",
+				 .cra_driver_name = MODULE_NAME "-aes-xcbc",
+				 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SEP_AES_BLOCK_SIZE,
+		   .halg.statesize = SEP_AES_BLOCK_SIZE}
+	 },
+	{			/* cmac(aes) */
+	 .hash_type = DXDI_HASH_NONE,
+	 .mac_type = DXDI_MAC_AES_CMAC,
+	 .ahash = {
+		   .setkey = mac_setkey,
+		   .halg.base = {
+				 .cra_name = "cmac(aes)",
+				 .cra_driver_name = MODULE_NAME "-aes-cmac",
+				 .cra_blocksize = SEP_AES_BLOCK_SIZE,
+				 .cra_ctxsize = sizeof(struct mac_key_data)
+				 },
+		   .halg.digestsize = SEP_AES_BLOCK_SIZE,
+		   .halg.statesize = SEP_AES_BLOCK_SIZE}
+	 }
+#endif /* USE_SEP54_AES */
+#endif /* USE_SEP54_AHASH */
+};				/*dx_ahash_algs[] */
+
+#define DX_DIGEST_NUM \
+	(sizeof(dx_digest_algs) / sizeof(struct dx_digest_alg))
+
+static void crypto_ctx_cleanup(struct crypto_tfm *tfm)
+{
+	struct host_crypto_ctx *host_ctx_p = crypto_tfm_ctx(tfm);
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct client_crypto_ctx_info _ctx_info;
+	struct client_crypto_ctx_info *ctx_info = &_ctx_info;
+	int rc;
+
+	pr_debug("Cleaning context @%p for %s\n",
+		      host_ctx_p, crypto_tfm_alg_name(tfm));
+
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev, host_ctx_p->alg_class,
+				   (struct host_crypto_ctx *)host_ctx_p, NULL,
+				   0);
+	if (rc != 0) {
+		pr_err("Failed mapping context @%p (rc=%d)\n",
+			    host_ctx_p, rc);
+		return;
+	}
+
+	/* New TEE method */
+	if (!memcmp(crypto_tfm_alg_name(tfm), "xxx(aes)", 8)) {
+		if (dx_sepapp_session_close(host_ctx_p->sctx,
+						host_ctx_p->sess_id))
+			BUG(); /* TODO */
+		dx_sepapp_context_free(host_ctx_p->sctx);
+	}
+
+	ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+}
+
+/**
+ * dispatch_crypto_op() - Dispatch (async.) CRYPTO_OP descriptor operation
+ * @op_ctx:		Operation context
+ * @may_backlog:	If software queue is full, may be put in backlog queue
+ * @do_init:		Initialize given crypto context
+ * @proc_mode:		Processing mode code
+ * @keep_in_cache:	Retain crypto context in cache after dispatching req.
+ *
+ * Returns -EINPROGRESS on success to dispatch into the SW desc. q.
+ * Returns -EBUSY if may_backlog==true and the descriptor was enqueued in the
+ * the backlog queue.
+ * Returns -ENOMEM if queue is full and cannot enqueue in the backlog queue
+ */
+static int dispatch_crypto_op(struct sep_op_ctx *op_ctx, bool may_backlog,
+			      bool do_init, enum sep_proc_mode proc_mode,
+			      bool keep_in_cache)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	int sep_ctx_load_req;
+	struct crypto_ctx_uid ctx_id = ctxmgr_get_ctx_id(ctx_info);
+	int rc;
+	struct sep_sw_desc desc;
+
+	/* Start critical section -
+	   cache allocation must be coupled to descriptor enqueue */
+	mutex_lock(&drvdata->desc_queue_sequencer);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+	ctxmgr_set_sep_cache_idx(ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+							ctx_id,
+							&sep_ctx_load_req));
+	desc_q_pack_crypto_op_desc(&desc, op_ctx, sep_ctx_load_req, do_init,
+				   proc_mode);
+	/* op_state must be updated before dispatching descriptor */
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, may_backlog);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc)))
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	if ((!keep_in_cache) || unlikely(IS_DESCQ_ENQUEUE_ERR(rc)))
+		ctxmgr_sep_cache_invalidate(drvdata->sep_cache, ctx_id,
+					    CRYPTO_CTX_ID_SINGLE_MASK);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	mutex_unlock(&drvdata->desc_queue_sequencer);
+	return rc;
+}
+
+/**
+ * process_digest_fin() - Process finilization event for hash operation
+ *
+ * @digest_req:	The async. digest request context
+ *
+ */
+static void process_digest_fin(struct async_digest_req_ctx *digest_req)
+{
+	u8 digest_size;
+	u8 *digest_ptr;
+	struct sep_op_ctx *op_ctx = &digest_req->async_req.op_ctx;
+#ifdef DEBUG
+	struct crypto_ahash *ahash_tfm;
+#endif
+
+	if (op_ctx->op_type == SEP_OP_CRYPTO_FINI) {
+		/* Handle digest copy back to "result" */
+		digest_size =
+		    ctxmgr_get_digest_or_mac_ptr(&op_ctx->ctx_info,
+						 &digest_ptr);
+		if (unlikely(digest_ptr == NULL)) {
+			pr_err("Failed fetching digest/MAC\n");
+			return;
+		}
+		if (digest_req->result != NULL)
+			memcpy(digest_req->result, digest_ptr, digest_size);
+		else	/* Save pointer to result (to be copied on "final") */
+			digest_req->result = digest_ptr;
+#ifdef DEBUG
+		dump_byte_array("digest", digest_req->result, digest_size);
+		ahash_tfm =
+		    crypto_ahash_reqtfm(container_of
+					(digest_req->async_req.initiating_req,
+					 struct ahash_request, base));
+		if (digest_size != crypto_ahash_digestsize(ahash_tfm))
+			pr_err("Read digest of %u B. Expected %u B.\n",
+				    digest_size,
+				    crypto_ahash_digestsize(ahash_tfm));
+#endif
+	}
+	crypto_op_completion_cleanup(op_ctx);
+	ctxmgr_unmap_kernel_ctx(&op_ctx->ctx_info);
+}
+
+static void dx_crypto_api_handle_op_completion(struct work_struct *work)
+{
+	struct async_req_ctx *areq_ctx =
+	    container_of(work, struct async_req_ctx, comp_work);
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	struct ablkcipher_request *ablkcipher_req;
+	struct crypto_async_request *initiating_req = areq_ctx->initiating_req;
+	int err = 0;
+	u8 *req_info_p;/* For state persistency in caller's context (IV) */
+
+	pr_debug("req=%p op_ctx=%p\n", initiating_req, op_ctx);
+	if (op_ctx == NULL) {
+		pr_err("Invalid work context (%p)\n", work);
+		return;
+	}
+
+	if (op_ctx->op_state == USER_OP_COMPLETED) {
+
+		if (unlikely(op_ctx->error_info != 0)) {
+			pr_err("SeP crypto-op failed (sep_rc=0x%08X)\n",
+				    op_ctx->error_info);
+		}
+		switch (crypto_tfm_alg_type(initiating_req->tfm)) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			/* Resolve to "info" (IV, etc.) for given alg_type */
+			crypto_op_completion_cleanup(op_ctx);
+			ablkcipher_req = (struct ablkcipher_request *)
+			    container_of(initiating_req,
+					 struct ablkcipher_request, base);
+			req_info_p = ablkcipher_req->info;
+			release_symcipher_ctx(op_ctx, req_info_p);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			process_digest_fin(container_of(areq_ctx,
+					struct async_digest_req_ctx,
+					async_req));
+			break;
+		default:
+			pr_err("Unsupported alg_type (%d)\n",
+				    crypto_tfm_alg_type(initiating_req->tfm));
+		}
+		/* Save ret_code info before cleaning op_ctx */
+		err = -(op_ctx->error_info);
+		if (unlikely(err == -EINPROGRESS)) {
+			/* SeP error code collides with EINPROGRESS */
+			pr_err("Invalid SeP error code 0x%08X\n",
+				    op_ctx->error_info);
+			err = -EINVAL;	/* fallback */
+		}
+		op_ctx_fini(op_ctx);
+	} else if (op_ctx->op_state == USER_OP_INPROC) {
+		/* Report with the callback the dispatch from backlog to
+		   the actual processing in the SW descriptors queue
+		   (Returned -EBUSY when the request was dispatched) */
+		err = -EINPROGRESS;
+	} else {
+		pr_err("Invalid state (%d) for op_ctx %p\n",
+			    op_ctx->op_state, op_ctx);
+		BUG();
+	}
+	if (likely(initiating_req->complete != NULL))
+		initiating_req->complete(initiating_req, err);
+	else
+		pr_err("Async. operation has no completion callback.\n");
+}
+
+/****************************************************/
+/* Block cipher algorithms                          */
+/****************************************************/
+
+/**
+ * get_symcipher_tfm_cipher_type() - Get cipher type of given symcipher
+ *					transform
+ * @tfm:
+ *
+ * Returns enum dxdi_sym_cipher_type (DXDI_SYMCIPHER_NONE for invalid)
+ */
+static enum dxdi_sym_cipher_type get_symcipher_tfm_cipher_type(struct crypto_tfm
+							       *tfm)
+{
+	const int alg_index = tfm->__crt_alg - dx_ablkcipher_algs;
+
+	if ((alg_index < 0) || (alg_index >= DX_ABLKCIPHER_NUM)) {
+		pr_err("Unknown alg: %s\n", crypto_tfm_alg_name(tfm));
+		return DXDI_SYMCIPHER_NONE;
+	}
+
+	return dx_algs_cipher_types[alg_index];
+}
+
+static int symcipher_ctx_init(struct crypto_tfm *tfm)
+{
+	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
+	struct host_crypto_ctx_sym_cipher *host_ctx_p = crypto_tfm_ctx(tfm);
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct client_crypto_ctx_info _ctx_info;
+	struct client_crypto_ctx_info *ctx_info = &_ctx_info;
+	enum dxdi_sym_cipher_type cipher_type =
+	    get_symcipher_tfm_cipher_type(tfm);
+	int rc;
+
+	pr_debug("Initializing context @%p for %s (%d)\n",
+		      host_ctx_p, crypto_tfm_alg_name(tfm), cipher_type);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+	ablktfm->reqsize += sizeof(struct async_req_ctx);
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev, ALG_CLASS_SYM_CIPHER,
+				   (struct host_crypto_ctx *)host_ctx_p, NULL,
+				   0);
+	if (rc != 0) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+#ifdef SEP_RUNTIME_PM
+		dx_sep_pm_runtime_put();
+#endif
+		return rc;
+	}
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(ctx_info, alloc_crypto_ctx_id(&crypto_api_ctx));
+	rc = ctxmgr_init_symcipher_ctx_no_props(ctx_info, cipher_type);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed initializing context\n");
+		ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+	} else {
+		ctxmgr_set_ctx_state(ctx_info, CTX_STATE_PARTIAL_INIT);
+	}
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+
+	/* New TEE method */
+	if (!memcmp(crypto_tfm_alg_name(tfm), "xxx(aes)", 8)) {
+		u8 uuid[16] = DISK_ENC_APP_UUID;
+		enum dxdi_sep_module ret_origin;
+
+		host_ctx_p->sctx = dx_sepapp_context_alloc();
+		if (unlikely(!host_ctx_p->sctx)) {
+			rc = -ENOMEM;
+			goto init_end;
+		}
+
+		rc = dx_sepapp_session_open(host_ctx_p->sctx,
+				uuid, 0, NULL, NULL, &host_ctx_p->sess_id,
+				&ret_origin);
+		if (unlikely(rc != 0))
+			dx_sepapp_context_free(host_ctx_p->sctx);
+	}
+
+init_end:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	return rc;
+}
+
+/**
+ * symcipher_set_key() - Set key for given symmetric cipher context
+ * @tfm:
+ * @key:
+ * @keylen:
+ *
+ * Set key for given symmetric cipher context
+ * Setting a key implies initialization of the context
+ * Returns int
+ */
+static int symcipher_set_key(struct crypto_ablkcipher *tfm,
+			     const u8 *key, unsigned int keylen)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct client_crypto_ctx_info _ctx_info;
+	struct client_crypto_ctx_info *ctx_info = &_ctx_info;
+	enum dxdi_sym_cipher_type cipher_type =
+	    get_symcipher_tfm_cipher_type(crypto_ablkcipher_tfm(tfm));
+	u32 tfm_flags = crypto_ablkcipher_get_flags(tfm);
+	int rc;
+
+	if (cipher_type == DXDI_SYMCIPHER_NONE)
+		return -EINVAL;
+
+	if (keylen > DXDI_SYM_KEY_SIZE_MAX) {
+		pr_err("keylen=%u > %u\n", keylen, DXDI_SYM_KEY_SIZE_MAX);
+		tfm_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		crypto_ablkcipher_set_flags(tfm, tfm_flags);
+		return -EINVAL;
+	}
+
+	pr_debug("alg=%s (%d) , keylen=%u\n",
+		      crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)),
+		      cipher_type, keylen);
+
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev, ALG_CLASS_SYM_CIPHER,
+				   (struct host_crypto_ctx *)host_ctx_p, NULL,
+				   0);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+		return rc;
+	}
+
+	if (ctxmgr_get_ctx_state(ctx_info) == CTX_STATE_UNINITIALIZED) {
+		pr_err("Invoked for uninitialized context @%p\n",
+			    host_ctx_p);
+		rc = -EINVAL;
+	} else {		/* Modify algorithm key */
+		rc = ctxmgr_set_symcipher_key(ctx_info, keylen, key);
+		if (rc != 0) {
+			if (rc == -EINVAL) {
+				pr_info("Invalid keylen=%u\n", keylen);
+				tfm_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+			} else if (rc == -EPERM) {
+				pr_info("Invalid/weak key\n");
+				tfm_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			} else {
+				pr_err("Unknown key setting error (%d)\n",
+					    rc);
+			}
+		}
+	}
+
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+	crypto_ablkcipher_set_flags(tfm, tfm_flags);
+	return rc;
+}
+
+/**
+ * prepare_symcipher_ctx_for_processing() - Prepare crypto context resources
+ *						before dispatching an operation
+ *
+ * @op_ctx:	The associate operation context (from async req ctx)
+ * @host_ctx_p:	The host context to use (from tfm)
+ * @iv_crypto:	A pointer to IV (from req->info)
+ * @direction:	Requested cipher direction
+ */
+static int prepare_symcipher_ctx_for_processing(struct sep_op_ctx *op_ctx,
+						struct
+						host_crypto_ctx_sym_cipher
+						*host_ctx_p,
+						u8 *iv_crypto,
+						enum dxdi_cipher_direction
+						direction)
+{
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct sep_ctx_cache_entry *sep_ctx_p;
+	dma_addr_t sep_ctx_dma_addr;
+	int rc;
+
+	sep_ctx_p = dma_pool_alloc(sep_ctx_pool, GFP_KERNEL, &sep_ctx_dma_addr);
+	if (sep_ctx_p == NULL) {
+		pr_err("Failed allocating SeP context buffer\n");
+		return -ENOMEM;
+	}
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev,
+				   ALG_CLASS_SYM_CIPHER,
+				   (struct host_crypto_ctx *)host_ctx_p,
+				   sep_ctx_p, sep_ctx_dma_addr);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+	} else {
+		ctxmgr_set_symcipher_iv(ctx_info, iv_crypto);
+		rc = ctxmgr_set_symcipher_direction(ctx_info, direction);
+		if (unlikely(rc != 0)) {
+			pr_err("Failed setting direction %d (rc=%d)\n",
+				    direction, rc);
+		}
+	}
+
+	if (unlikely(rc != 0)) {
+		/* Invalidate context on error */
+		ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+		ctxmgr_unmap_kernel_ctx(ctx_info);
+		dma_pool_free(sep_ctx_pool, sep_ctx_p, sep_ctx_dma_addr);
+#ifdef DEBUG
+	} else {		/* Context was changed by host */
+		ctxmgr_dump_sep_ctx(ctx_info);
+#endif
+		/* No need to dma_sync - sep_ctx is dma coherent mem. */
+	}
+
+	return rc;
+}
+
+/**
+ * release_symcipher_ctx() - Sync. back crypto context (IV) to desc->info)
+ *				to be able to track
+ * @op_ctx:	The associated operation context (from async req ctx)
+ * @iv_crypto:	The Crypto API IV buffer (req->info)
+ *
+ * Sync. back crypto context (IV) to desc->info) to be able to track
+ * IV changes, then unmap the context.
+ */
+static void release_symcipher_ctx(struct sep_op_ctx *op_ctx,
+				  u8 *iv_crypto)
+{
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	u8 iv_sep_ctx[DXDI_AES_BLOCK_SIZE];
+	u8 iv_size = DXDI_AES_BLOCK_SIZE;	/* Init. to max. */
+	struct sep_ctx_cache_entry *sep_ctx_p = ctx_info->sep_ctx_kptr;
+	dma_addr_t sep_ctx_dma_addr = ctx_info->sep_ctx_dma_addr;
+	int rc;
+
+	if (iv_crypto != NULL) {	/* Save IV (block state) */
+		rc = ctxmgr_get_symcipher_iv(ctx_info, NULL, iv_sep_ctx,
+					     &iv_size);
+		if (likely(rc == 0)) {
+			if (iv_size > 0) {
+				if (unlikely(iv_crypto == NULL)) {
+					pr_err(
+						    "iv_crypto==NULL when iv_size==%u\n",
+						    iv_size);
+				} else {
+					memcpy(iv_crypto, iv_sep_ctx, iv_size);
+				}
+			}
+		} else {
+			pr_err("Fail: getting IV information for ctx@%p\n",
+				    ctx_info->ctx_kptr);
+		}
+	}
+
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+	dma_pool_free(sep_ctx_pool, sep_ctx_p, sep_ctx_dma_addr);
+}
+
+/**
+ * symcipher_process() - Process (encrypt/decrypt) data for given block-cipher
+ *			algorithm
+ * @req:	The async. request structure
+ * @direction:	Cipher operation direction
+ *
+ */
+static int symcipher_process(struct ablkcipher_request *req,
+			     enum dxdi_cipher_direction direction)
+{
+	struct async_req_ctx *areq_ctx = ablkcipher_request_ctx(req);
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    crypto_ablkcipher_ctx(tfm);
+	int rc;
+
+	pr_debug("alg=%s %scrypt (req=%p, op_ctx=%p, host_ctx=%p)\n",
+		      crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)),
+		      direction == DXDI_CDIR_ENC ? "en" : "de",
+		      req, op_ctx, host_ctx_p);
+
+	/* Initialize async. req. context */
+	areq_ctx->initiating_req = &req->base;
+
+	/* Old method */
+	if (memcmp(crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm)),
+			"xxx(aes)", 8)) {
+		INIT_WORK(&areq_ctx->comp_work,
+				dx_crypto_api_handle_op_completion);
+		op_ctx_init(op_ctx, &crypto_api_ctx);
+		op_ctx->op_type = SEP_OP_CRYPTO_PROC;
+		op_ctx->comp_work = &areq_ctx->comp_work;
+
+		rc = prepare_symcipher_ctx_for_processing(op_ctx,
+				host_ctx_p, req->info,
+				direction);
+		if (unlikely(rc != 0)) {
+			op_ctx_fini(op_ctx);
+			return rc;
+		}
+
+		rc = prepare_data_for_sep(op_ctx, NULL, req->src,
+					  NULL, req->dst,
+					  req->nbytes, CRYPTO_DATA_TEXT);
+		if (unlikely(rc != 0)) {
+			SEP_LOG_ERR(
+				    "Failed preparing DMA buffers (rc=%d, err_info=0x%08X\n)\n",
+				    rc, op_ctx->error_info);
+			if (op_ctx->error_info == DXDI_ERROR_INVAL_DATA_SIZE) {
+				SEP_LOG_ERR("Invalid data unit size %u\n",
+						req->nbytes);
+				req->base.flags |=
+						CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+			}
+		} else {		/* Initiate processing */
+			/* Async. block cipher op. cannot reuse cache entry
+			   bacause the IV is set on every operation. Invalidate
+			   before releasing the sequencer (that's "async"
+			   invalidation) */
+			rc = dispatch_crypto_op(op_ctx,
+					req->base.
+					flags & CRYPTO_TFM_REQ_MAY_BACKLOG,
+					true /*init. */ , SEP_PROC_MODE_PROC_T,
+					false /*cache */);
+		}
+		if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) { /* Dispatch failure */
+			crypto_op_completion_cleanup(op_ctx);
+			release_symcipher_ctx(op_ctx, req->info);
+			op_ctx_fini(op_ctx);
+		}
+	} else {
+		/* New method vith TEE api */
+		struct dxdi_sepapp_kparams *cmd_params =
+			kzalloc(sizeof(struct dxdi_sepapp_kparams), GFP_KERNEL);
+		enum dxdi_sep_module ret_origin;
+		struct scatterlist sg_iv;
+		u8 iv[SEP_AES_IV_SIZE];
+
+		if (cmd_params == NULL)
+			return -ENOMEM;
+
+		memcpy(iv, req->info, SEP_AES_IV_SIZE);
+		sg_init_one(&sg_iv, iv, SEP_AES_IV_SIZE);
+
+		cmd_params->params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+		cmd_params->params[0].val.data[0] = direction;
+		cmd_params->params[0].val.data[1] = 0;
+		cmd_params->params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+		cmd_params->params_types[1] = DXDI_SEPAPP_PARAM_MEMREF;
+		cmd_params->params[1].kmemref.dma_direction =
+					DXDI_DATA_TO_DEVICE;
+		cmd_params->params[1].kmemref.sgl = &sg_iv;
+		cmd_params->params[1].kmemref.nbytes = SEP_AES_IV_SIZE;
+
+		cmd_params->params_types[2] = DXDI_SEPAPP_PARAM_MEMREF;
+		cmd_params->params[2].kmemref.dma_direction =
+					DXDI_DATA_TO_DEVICE;
+		cmd_params->params[2].kmemref.sgl = req->src;
+		cmd_params->params[2].kmemref.nbytes = req->nbytes;
+
+		cmd_params->params_types[3] = DXDI_SEPAPP_PARAM_MEMREF;
+		cmd_params->params[3].kmemref.dma_direction =
+					DXDI_DATA_FROM_DEVICE;
+		cmd_params->params[3].kmemref.sgl = req->dst;
+		cmd_params->params[3].kmemref.nbytes = req->nbytes;
+
+		rc = async_sepapp_command_invoke(host_ctx_p->sctx,
+					host_ctx_p->sess_id, CMD_DO_CRYPTO,
+					cmd_params, &ret_origin, areq_ctx);
+	}
+
+	return rc;
+}
+
+/**
+ * symcipher_encrypt() - Encrypt for given sym-cipher context
+ * @req: The async. request structure
+ *
+ */
+static int symcipher_encrypt(struct ablkcipher_request *req)
+{
+	return symcipher_process(req, DXDI_CDIR_ENC);
+}
+
+/**
+ * symcipher_encrypt() - Decrypt for given sym-cipher context
+ * @req: The async. request structure
+ *
+ */
+static int symcipher_decrypt(struct ablkcipher_request *req)
+{
+	return symcipher_process(req, DXDI_CDIR_DEC);
+}
+
+static int ablkcipher_algs_init(void)
+{
+	int i, rc;
+	/* scratchpad to build crypto_alg from template + alg.specific data */
+	struct crypto_alg alg_spad;
+
+	/* Create block cipher algorithms from base + specs via scratchpad */
+	for (i = 0; i < DX_ABLKCIPHER_NUM; i++) {
+		/* Get base template */
+		memcpy(&alg_spad, &blkcipher_algs_base,
+		       sizeof(struct crypto_alg));
+		/* Get alg. specific attributes over base */
+		strcpy(alg_spad.cra_name, dx_ablkcipher_algs[i].cra_name);
+		strcpy(alg_spad.cra_driver_name,
+		       dx_ablkcipher_algs[i].cra_driver_name);
+		alg_spad.cra_blocksize = dx_ablkcipher_algs[i].cra_blocksize;
+		alg_spad.cra_u.ablkcipher.min_keysize =
+		    dx_ablkcipher_algs[i].cra_u.ablkcipher.min_keysize;
+		alg_spad.cra_u.ablkcipher.max_keysize =
+		    dx_ablkcipher_algs[i].cra_u.ablkcipher.max_keysize;
+		alg_spad.cra_u.ablkcipher.ivsize =
+		    dx_ablkcipher_algs[i].cra_u.ablkcipher.ivsize;
+		/* Copy scratchpad to real entry */
+		memcpy(&dx_ablkcipher_algs[i], &alg_spad,
+		       sizeof(struct crypto_alg));
+		/* The list must be initialized in place (pointers based) */
+		INIT_LIST_HEAD(&dx_ablkcipher_algs[i].cra_list);
+	}
+
+	/* Register algs */
+	pr_debug("Registering CryptoAPI blkciphers:\n");
+	for (i = 0, rc = 0; (i < DX_ABLKCIPHER_NUM) && (rc == 0); i++) {
+		pr_debug("%d. %s (__crt_alg=%p)\n", i,
+			      dx_ablkcipher_algs[i].cra_name,
+			      &dx_ablkcipher_algs[i]);
+		rc = crypto_register_alg(&dx_ablkcipher_algs[i]);
+		if (rc != 0)
+			break;
+	}
+	/* Failure: cleanup algorithms that already registered */
+	if (rc != 0) {
+		pr_err("Failed registering %s\n",
+			    dx_ablkcipher_algs[i].cra_name);
+		if (i > 0)
+			for (; i >= 0; i--)
+				crypto_unregister_alg(
+						&dx_ablkcipher_algs[i]);
+	}
+	return rc;
+}
+
+static void ablkcipher_algs_exit(void)
+{
+	int i;
+
+	for (i = 0; i < DX_ABLKCIPHER_NUM; i++)
+		crypto_unregister_alg(&dx_ablkcipher_algs[i]);
+}
+
+/****************************************************/
+/* Digest (hash/MAC) algorithms                     */
+/****************************************************/
+
+static struct dx_digest_alg *get_digest_alg(struct crypto_tfm *tfm)
+{
+	struct hash_alg_common *halg_common =
+	    container_of(tfm->__crt_alg, struct hash_alg_common, base);
+	struct ahash_alg *this_ahash =
+	    container_of(halg_common, struct ahash_alg, halg);
+	struct dx_digest_alg *this_digest_alg =
+	    container_of(this_ahash, struct dx_digest_alg, ahash);
+	int alg_index = this_digest_alg - dx_digest_algs;
+
+	/* Verify that the tfm is valid (inside our dx_digest_algs array) */
+	if ((alg_index < 0) || (alg_index >= DX_DIGEST_NUM)) {
+		pr_err("Invalid digest tfm @%p\n", tfm);
+		return NULL;
+	}
+	return this_digest_alg;
+}
+
+/**
+ * prepare_digest_context_for_processing() - Prepare the crypto context of
+ *	async. hash/mac operation. Initialize context if requested.
+ *
+ * @req:		Crypto request context
+ * @do_init:		When "true" the given context is initialized
+ */
+static int prepare_digest_context_for_processing(struct ahash_request *req,
+						 bool do_init)
+{
+	struct crypto_ahash *ahash_tfm = crypto_ahash_reqtfm(req);
+	struct crypto_tfm *tfm = &ahash_tfm->base;
+	struct dx_digest_alg *digest_alg = get_digest_alg(tfm);
+	struct mac_key_data *key_data = crypto_tfm_ctx(tfm);/* For MACS only */
+	struct async_digest_req_ctx *req_ctx = ahash_request_ctx(req);
+	struct device *mydev = crypto_api_ctx.drv_data->sep_data->dev;
+	struct sep_op_ctx *op_ctx = &req_ctx->async_req.op_ctx;
+	struct client_crypto_ctx_info *ctx_info;
+	enum dxdi_mac_type mac_type;
+	struct dxdi_mac_props mac_props;	/* For MAC init. */
+#ifdef DEBUG
+	enum host_ctx_state ctx_state;
+#endif
+	int error_info;
+	int rc;
+
+	if (unlikely(digest_alg == NULL))
+		return -EINVAL;
+
+	pr_debug("op_ctx=%p op_state=%d\n", op_ctx, op_ctx->op_state);
+	ctx_info = &op_ctx->ctx_info;
+	mac_type = digest_alg->mac_type;
+
+	if (!do_init) {
+		/* Verify given request context was initialized */
+		if (req_ctx->async_req.initiating_req == NULL) {
+			pr_err(
+				    "Invoked for uninitialized async. req. context\n");
+			return -EINVAL;
+		}
+		/* Verify this request context that is not in use */
+		if (op_ctx->op_state != USER_OP_NOP) {
+			pr_err("Invoked for context in use!\n");
+			return -EINVAL;
+			/*
+			 * We do not return -EBUSY because this is a valid
+			 * return code for async crypto operations that
+			 * indicates the given request was actually dispatched.
+			 */
+		}
+	}
+	op_ctx_init(op_ctx, &crypto_api_ctx);
+	op_ctx->comp_work = &req_ctx->async_req.comp_work;
+	rc = ctxmgr_map_kernel_ctx(ctx_info, mydev,
+				   (mac_type != DXDI_MAC_NONE) ?
+				   ALG_CLASS_MAC : ALG_CLASS_HASH,
+				   (struct host_crypto_ctx *)&req_ctx->host_ctx,
+				   NULL, 0);
+	if (rc != 0) {
+		pr_err("Failed mapping context (rc=%d)\n", rc);
+		return rc;
+	}
+	if (do_init) {
+		/* Allocate a new Crypto context ID */
+		ctxmgr_set_ctx_id(ctx_info,
+				  alloc_crypto_ctx_id(&crypto_api_ctx));
+		if (mac_type == DXDI_MAC_NONE) {	/* Hash alg. */
+			rc = ctxmgr_init_hash_ctx(ctx_info,
+						  digest_alg->hash_type,
+						  &error_info);
+		} else {	/* MAC */
+			mac_props.mac_type = mac_type;
+			mac_props.key_size = key_data->key_size;
+			memcpy(mac_props.key, key_data->key,
+			       key_data->key_size);
+			if (mac_type == DXDI_MAC_HMAC)
+				mac_props.alg_specific.hmac.hash_type =
+				    digest_alg->hash_type;
+			rc = ctxmgr_init_mac_ctx(ctx_info, &mac_props,
+						 &error_info);
+		}
+		if (unlikely(rc != 0)) {
+			pr_err("Failed initializing context\n");
+			ctxmgr_set_ctx_state(ctx_info, CTX_STATE_UNINITIALIZED);
+		} else {
+			ctxmgr_set_ctx_state(ctx_info, CTX_STATE_PARTIAL_INIT);
+			/* Init. the async. request context */
+			req_ctx->async_req.initiating_req = &req->base;
+			INIT_WORK(&req_ctx->async_req.comp_work,
+				  dx_crypto_api_handle_op_completion);
+			req_ctx->result = NULL;
+		}
+#ifdef DEBUG
+	} else {		/* Should have been initialized before */
+		ctx_state = ctxmgr_get_ctx_state(ctx_info);
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err("Invoked for context in state %d!\n",
+				    ctx_state);
+			rc = -EINVAL;
+		}
+#endif		 /*DEBUG*/
+	}
+	if (likely(rc == 0)) {
+#ifdef DEBUG
+		ctxmgr_dump_sep_ctx(ctx_info);
+#endif
+		/* Flush sep_ctx out of host cache */
+		ctxmgr_sync_sep_ctx(ctx_info, mydev);
+	}
+	return rc;
+}
+
+static int digest_req_dispatch(struct ahash_request *req,
+			       bool do_init, bool is_last,
+			       struct scatterlist *src, unsigned int nbytes)
+{
+	struct crypto_ahash *ahash_tfm = crypto_ahash_reqtfm(req);
+	struct dx_digest_alg *digest_alg = get_digest_alg(&ahash_tfm->base);
+	struct async_digest_req_ctx *req_ctx =
+	    (struct async_digest_req_ctx *)ahash_request_ctx(req);
+	struct sep_op_ctx *op_ctx = &req_ctx->async_req.op_ctx;
+	struct client_crypto_ctx_info *ctx_info = &op_ctx->ctx_info;
+	int rc;
+
+	if (digest_alg == NULL)
+		return -EINVAL;
+	if ((!do_init) && (req_ctx->result != NULL)) {
+		/* already finalized (AES based MACs) */
+		if (unlikely(nbytes > 0)) {
+			pr_err("Invoked with %u B after finalized\n",
+				    nbytes);
+			return -EINVAL;
+		}
+		if (is_last) {
+			/* Fetch saved result */
+			memcpy(req->result, req_ctx->result,
+			       SEP_AES_BLOCK_SIZE);
+			return 0;
+		}
+	}
+	rc = prepare_digest_context_for_processing(req, do_init);
+	if (unlikely(rc != 0))
+		return rc;
+	/* Preapre req_ctx->result */
+	if (is_last) {		/* Plain finalization */
+		req_ctx->result = req->result;
+	} else if (((digest_alg->mac_type == DXDI_MAC_AES_XCBC_MAC) ||
+		    (digest_alg->mac_type == DXDI_MAC_AES_CMAC)) &&
+		   (!IS_MULT_OF(nbytes, SEP_AES_BLOCK_SIZE))) {
+		/* Handle special case of AES based MAC update when not AES
+		   block multiple --> dispatch as final update */
+		is_last = true;
+		/* req_Ctx->result remains NULL. This would cause setting
+		   it to the result location in the sep context,
+		   when completed */
+	}
+
+	op_ctx->op_type = is_last ? SEP_OP_CRYPTO_FINI :
+	    do_init ? SEP_OP_CRYPTO_INIT : SEP_OP_CRYPTO_PROC;
+	if (op_ctx->op_type != SEP_OP_CRYPTO_INIT) {
+		rc = prepare_data_for_sep(op_ctx, NULL, src, NULL, NULL, nbytes,
+					  is_last ? CRYPTO_DATA_TEXT_FINALIZE :
+					  CRYPTO_DATA_TEXT);
+		if (rc == -ENOTBLK) {
+			/* Data was accumulated but less than a hash block */
+			/* Complete operation immediately */
+			rc = 0;
+			goto digest_proc_exit;
+		}
+		if (unlikely(rc != 0)) {
+			pr_err("Failed mapping client DMA buffer.\n");
+			goto digest_proc_exit;
+		}
+	}
+	rc = dispatch_crypto_op(op_ctx,
+				req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG,
+				do_init,
+				is_last ? SEP_PROC_MODE_FIN : do_init ?
+				SEP_PROC_MODE_NOP : SEP_PROC_MODE_PROC_T,
+				true /*cache */);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		pr_err("Failed dispatching CRYPTO_OP (rc=%d)\n", rc);
+		crypto_op_completion_cleanup(op_ctx);
+		ctxmgr_unmap_kernel_ctx(ctx_info);
+		op_ctx_fini(op_ctx);
+	}
+	return rc;
+/* Exit when there is no pending request (error or not enough data) */
+ digest_proc_exit:
+	ctxmgr_unmap_kernel_ctx(ctx_info);
+	op_ctx_fini(op_ctx);
+	return rc;
+}
+
+static int digest_init(struct ahash_request *req)
+{
+	pr_debug("\n");
+	return digest_req_dispatch(req, true, false, NULL, 0);
+}
+
+static int digest_update(struct ahash_request *req)
+{
+	pr_debug("nbytes=%u\n", req->nbytes);
+	if (req->nbytes == 0)
+		return 0;	/* Nothing to do (but valid for 0 data MACs */
+
+	return digest_req_dispatch(req, false, false, req->src, req->nbytes);
+}
+
+static int digest_final(struct ahash_request *req)
+{
+	pr_debug("\n");
+	return digest_req_dispatch(req, false, true, NULL, 0);
+}
+
+static int digest_finup(struct ahash_request *req)
+{
+	pr_debug("nbytes=%u\n", req->nbytes);
+	return digest_req_dispatch(req, false, true, req->src, req->nbytes);
+}
+
+static int digest_integrated(struct ahash_request *req)
+{
+	pr_debug("nbytes=%u\n", req->nbytes);
+	return digest_req_dispatch(req, true, true, req->src, req->nbytes);
+}
+
+/**
+ * do_hash_sync() - Do integrated hash operation synchronously
+ *
+ * @hash_type:		The hash type used for this HMAC
+ * @data_in:		The input data
+ * @data_len:		Size data_in in bytes
+ * @digest:		The hash result
+ * @digest_len_p:	Returned digest size
+ *
+ * This function is used to shorten long HMAC keys.
+ */
+static int do_hash_sync(enum dxdi_hash_type hash_type,
+			const u8 *data_in, unsigned int data_len,
+			u8 *digest, unsigned int *digest_len_p)
+{
+	int rc;
+	struct queue_drvdata *drvdata = crypto_api_ctx.drv_data;
+	struct host_crypto_ctx_hash host_ctx;
+	struct sep_op_ctx op_ctx;
+	struct client_crypto_ctx_info *ctx_info_p = &op_ctx.ctx_info;
+	struct scatterlist din_sgl;
+
+	op_ctx_init(&op_ctx, &crypto_api_ctx);
+	rc = ctxmgr_map_kernel_ctx(ctx_info_p, drvdata->sep_data->dev,
+				   ALG_CLASS_HASH,
+				   (struct host_crypto_ctx *)&host_ctx, NULL,
+				   0);
+	if (rc != 0) {
+		pr_err("Failed mapping crypto context (rc=%d)\n", rc);
+		op_ctx_fini(&op_ctx);
+		return rc;
+	}
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(ctx_info_p, alloc_crypto_ctx_id(op_ctx.client_ctx));
+	/* Algorithm class specific initialization */
+	rc = ctxmgr_init_hash_ctx(ctx_info_p, hash_type, &op_ctx.error_info);
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+	ctxmgr_set_ctx_state(ctx_info_p, CTX_STATE_PARTIAL_INIT);
+	op_ctx.op_type = SEP_OP_CRYPTO_FINI;	/* Integrated is also fin. */
+	sg_init_one(&din_sgl, data_in, data_len);
+	rc = prepare_data_for_sep(&op_ctx, NULL, &din_sgl, NULL, NULL,
+				  data_len, CRYPTO_DATA_TEXT_FINALIZE);
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(ctx_info_p);
+#endif
+	/* Flush sep_ctx out of host cache */
+	ctxmgr_sync_sep_ctx(ctx_info_p, drvdata->sep_data->dev);
+	rc = dispatch_crypto_op(&op_ctx, true, true, SEP_PROC_MODE_FIN, false);
+	if (likely(!IS_DESCQ_ENQUEUE_ERR(rc))) {
+		rc = 0;	/* Clear valid return code from dispatch_crypto_op */
+		wait_for_completion(&op_ctx.ioctl_op_compl);
+		if (likely(op_ctx.error_info == 0)) {
+			*digest_len_p =
+			    ctxmgr_get_digest_or_mac(ctx_info_p, digest);
+		}
+	}
+	crypto_op_completion_cleanup(&op_ctx);
+
+ unmap_ctx_and_exit:
+	ctxmgr_unmap_kernel_ctx(ctx_info_p);
+	return rc;
+}
+
+static int mac_setkey(struct crypto_ahash *tfm,
+		      const u8 *key, unsigned int keylen)
+{
+	struct dx_digest_alg *digest_alg = get_digest_alg(&tfm->base);
+	u32 tfm_flags = crypto_ahash_get_flags(tfm);
+	struct mac_key_data *key_data = crypto_tfm_ctx(&tfm->base);
+	int rc = 0;
+
+	if (unlikely(digest_alg == NULL))
+		return -EINVAL;
+	if (unlikely(digest_alg->mac_type == DXDI_MAC_NONE)) {
+		pr_err("Given algorithm which is not MAC\n");
+		return -EINVAL;
+	}
+	/* Pre-process HMAC key if larger than hash block size */
+	if ((digest_alg->hash_type != DXDI_HASH_NONE) &&
+	    (keylen > digest_alg->ahash.halg.base.cra_blocksize)) {
+		rc = do_hash_sync(digest_alg->hash_type, key, keylen,
+				  key_data->key, &key_data->key_size);
+		if (unlikely(rc != 0))
+			pr_err("Failed digesting key of %u bytes\n",
+			       keylen);
+		if (key_data->key_size != digest_alg->ahash.halg.digestsize)
+			pr_err("Returned digest size is %u != %u (expected)\n",
+			       key_data->key_size,
+			       digest_alg->ahash.halg.digestsize);
+	} else {		/* No need to digest the key */
+		/* Verify that the key size for AES based MACs is not too
+		   large. */
+		if ((digest_alg->hash_type == DXDI_HASH_NONE) &&
+		    (keylen > SEP_AES_KEY_SIZE_MAX)) {
+			pr_err("Invalid key size %u for %s\n",
+			       keylen,
+			       digest_alg->ahash.halg.base.cra_name);
+			tfm_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+			crypto_ahash_set_flags(tfm, tfm_flags);
+			rc = -EINVAL;
+		} else {
+			key_data->key_size = keylen;
+			memcpy(&key_data->key, key, keylen);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * digest_tfm_init() - Initialize tfm with our reqsize (to accomodate context)
+ *	(cra_init entry point)
+ * @tfm:
+ */
+static int digest_tfm_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash_tfm =
+	    container_of(tfm, struct crypto_ahash, base);
+
+	ahash_tfm->reqsize = sizeof(struct async_digest_req_ctx);
+	return 0;
+}
+
+static int digest_algs_init(void)
+{
+	int i, rc;
+
+	/* Create hash algorithms from base + specs via scratchpad */
+	for (i = 0; i < DX_DIGEST_NUM; i++) {
+		/* Apply template values into given algorithms */
+		dx_digest_algs[i].ahash.init = digest_algs_base.init;
+		dx_digest_algs[i].ahash.update = digest_algs_base.update;
+		dx_digest_algs[i].ahash.final = digest_algs_base.final;
+		dx_digest_algs[i].ahash.finup = digest_algs_base.finup;
+		dx_digest_algs[i].ahash.digest = digest_algs_base.digest;
+		dx_digest_algs[i].ahash.halg.base.cra_type =
+		    digest_algs_base.halg.base.cra_type;
+		dx_digest_algs[i].ahash.halg.base.cra_priority =
+		    digest_algs_base.halg.base.cra_priority;
+		dx_digest_algs[i].ahash.halg.base.cra_flags =
+		    digest_algs_base.halg.base.cra_flags;
+		dx_digest_algs[i].ahash.halg.base.cra_module =
+		    digest_algs_base.halg.base.cra_module;
+		dx_digest_algs[i].ahash.halg.base.cra_init =
+		    digest_algs_base.halg.base.cra_init;
+		INIT_LIST_HEAD(&dx_digest_algs[i].ahash.halg.base.cra_list);
+	}
+
+	/* Register algs */
+	pr_debug("Registering CryptoAPI digest algorithms:\n");
+	for (i = 0, rc = 0; (i < DX_DIGEST_NUM) && (rc == 0); i++) {
+		pr_debug("%d. %s (__crt_alg=%p)\n", i,
+			      dx_digest_algs[i].ahash.halg.base.cra_name,
+			      &dx_digest_algs[i].ahash);
+		rc = crypto_register_ahash(&dx_digest_algs[i].ahash);
+		if (rc != 0)
+			break;
+	}
+	if (unlikely(rc != 0)) {
+		/* Failure: cleanup algorithms that already registered */
+		pr_err("Failed registering %s\n",
+			    dx_digest_algs[i].ahash.halg.base.cra_name);
+		if (i > 0)
+			for (; i >= 0; i--)
+				crypto_unregister_ahash(
+						&dx_digest_algs[i].ahash);
+	}
+	return rc;
+}
+
+static void digest_algs_exit(void)
+{
+	int i;
+
+	for (i = 0; i < DX_DIGEST_NUM; i++)
+		crypto_unregister_ahash(&dx_digest_algs[i].ahash);
+}
+
+/****************************************************/
+int dx_crypto_api_init(struct sep_drvdata *drvdata)
+{
+	/* Init. return code of each init. function to know which one to
+	   cleanup (only those with rc==0) */
+	int rc_ablkcipher_init = -EINVAL;
+	int rc_digest_init = -EINVAL;
+	int rc;
+
+	init_client_ctx(drvdata->queue + CRYPTO_API_QID, &crypto_api_ctx);
+
+	sep_ctx_pool = dma_pool_create("dx_sep_ctx",
+				       crypto_api_ctx.drv_data->sep_data->dev,
+				       sizeof(struct sep_ctx_cache_entry),
+				       L1_CACHE_BYTES, 0);
+	if (sep_ctx_pool == NULL) {
+		pr_err("Failed allocating pool for SeP contexts\n");
+		rc = -ENOMEM;
+		goto init_error;
+	}
+	rc_ablkcipher_init = ablkcipher_algs_init();
+	if (unlikely(rc_ablkcipher_init != 0)) {
+		rc = rc_ablkcipher_init;
+		goto init_error;
+	}
+	rc_digest_init = digest_algs_init();
+	if (unlikely(rc_digest_init != 0)) {
+		rc = rc_digest_init;
+		goto init_error;
+	}
+
+	return 0;
+
+ init_error:
+	if (rc_ablkcipher_init == 0)
+		ablkcipher_algs_exit();
+	if (sep_ctx_pool != NULL)
+		dma_pool_destroy(sep_ctx_pool);
+	cleanup_client_ctx(drvdata->queue + CRYPTO_API_QID, &crypto_api_ctx);
+	return rc;
+}
+
+void dx_crypto_api_fini(void)
+{
+	digest_algs_exit();
+	ablkcipher_algs_exit();
+	dma_pool_destroy(sep_ctx_pool);
+	cleanup_client_ctx(crypto_api_ctx.drv_data, &crypto_api_ctx);
+}
diff --git a/drivers/staging/sep54/crypto_api.h b/drivers/staging/sep54/crypto_api.h
new file mode 100644
index 0000000..67a546c
--- /dev/null
+++ b/drivers/staging/sep54/crypto_api.h
@@ -0,0 +1,64 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+
+/* \file cryto_api.h
+   Definitions of Linux Crypto API shared with the main driver code
+ */
+
+#ifndef __CRYPTO_API_H__
+#define __CRYPTO_API_H__
+
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct async_req_ctx - Context for async. request (__ctx of request)
+ * @op_ctx:		SeP operation context
+ * @initiating_req:	The initiating crypto request
+ * @comp_work:		Completion work handler
+ */
+struct async_req_ctx {
+	struct sep_op_ctx op_ctx;
+	struct crypto_async_request *initiating_req;
+	struct work_struct comp_work;
+};
+
+/* Crypto-API init. entry point (to be used by sep_setup) */
+int dx_crypto_api_init(struct sep_drvdata *drvdata);
+void dx_crypto_api_fini(void);
+
+int hwk_init(void);
+void hwk_fini(void);
+
+#endif /*__CRYPTO_API_H__*/
+
+
+
diff --git a/drivers/staging/sep54/crypto_ctx_mgr.c b/drivers/staging/sep54/crypto_ctx_mgr.c
new file mode 100644
index 0000000..bd82f2f
--- /dev/null
+++ b/drivers/staging/sep54/crypto_ctx_mgr.c
@@ -0,0 +1,2559 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*! \file
+ * This source file implements crypto context management services.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_CTX_MGR
+#include "sep_log.h"
+#include "dx_driver.h"
+#include "crypto_ctx_mgr.h"
+
+struct ctxmgr_cache_entry {
+	struct crypto_ctx_uid ctx_id;/* Allocated context ID or CTX_INVALID_ID */
+	unsigned long lru_time;	/* Monotonically incrementing counter for LRU */
+};
+
+struct sep_ctx_cache {
+	unsigned long lru_clk;	/* Virtual clock counter */
+	/* The virtual clock counter is incremented for each entry
+	   allocation/reuse in order to provide LRU info */
+	int cache_size;		/* Num. of cache entries */
+	struct ctxmgr_cache_entry entries[1];	/*embedded entries */
+	/* The "entries" element is only a start point for an array with
+	   cache_size entries that starts in this location */
+};
+
+/* DES weak keys checking data */
+static const u8 des_key_parity[] = {
+	8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 3,
+	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 0, 8,
+	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 0, 8,
+	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 0,
+	0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 0, 8,
+	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 0,
+	8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8, 0, 8, 8, 0, 8, 0, 0, 8,
+	    8,
+	0, 0, 8, 0, 8, 8, 0,
+	4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0, 8, 5, 0, 8, 0, 8, 8, 0,
+	    0,
+	8, 8, 0, 8, 0, 6, 8,
+};
+
+/**
+ * ctxmgr_get_ctx_size() - Get host context size for given algorithm class
+ * @alg_class:	 Queries algorithm class
+ *
+ * Returns size_t Size in bytes of host context
+ */
+size_t ctxmgr_get_ctx_size(enum crypto_alg_class alg_class)
+{
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		return sizeof(struct host_crypto_ctx_sym_cipher);
+	case ALG_CLASS_AUTH_ENC:
+		return sizeof(struct host_crypto_ctx_auth_enc);
+	case ALG_CLASS_MAC:
+		return sizeof(struct host_crypto_ctx_mac);
+	case ALG_CLASS_HASH:
+		return sizeof(struct host_crypto_ctx_hash);
+	default:
+		return 0;
+	}
+}
+
+static size_t get_sep_ctx_offset(enum crypto_alg_class alg_class)
+{
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		return offsetof(struct host_crypto_ctx_sym_cipher, sep_ctx);
+	case ALG_CLASS_AUTH_ENC:
+		return offsetof(struct host_crypto_ctx_auth_enc, sep_ctx);
+	case ALG_CLASS_MAC:
+		return offsetof(struct host_crypto_ctx_mac, sep_ctx);
+	case ALG_CLASS_HASH:
+		return offsetof(struct host_crypto_ctx_hash, sep_ctx);
+	default:
+		pr_err("Invalid algorith class = %d\n", alg_class);
+		return 0;
+	}
+
+}
+
+/**
+ * get_hash_digest_size() - Get hash digest size (in octets) of given (SeP)
+ *				hash mode
+ * @hash_mode:
+ *
+ * Returns u32 Digest size in octets (0 if unknown hash mode)
+ */
+static u32 get_hash_digest_size(enum sep_hash_mode hash_mode)
+{
+	switch (hash_mode) {
+	case SEP_HASH_SHA1:
+		return 160 >> 3;	/* 160 bit */
+	case SEP_HASH_SHA224:
+		return 224 >> 3;
+	case SEP_HASH_SHA256:
+		return 256 >> 3;
+	case SEP_HASH_SHA384:
+		return 384 >> 3;
+	case SEP_HASH_SHA512:
+		return 512 >> 3;
+	default:
+		pr_err("Unknown hash mode %d\n", hash_mode);
+	}
+	return 0;
+}
+
+static u16 get_hash_block_size(enum dxdi_hash_type hash_type)
+{
+
+	switch (hash_type) {
+	case DXDI_HASH_MD5:
+		pr_err("MD5 not supported\n");
+		break;
+	case DXDI_HASH_SHA1:
+	case DXDI_HASH_SHA224:
+	case DXDI_HASH_SHA256:
+		return 512 >> 3;
+	case DXDI_HASH_SHA384:
+	case DXDI_HASH_SHA512:
+		return 1024 >> 3;
+	default:
+		pr_err("Invalid hash type %d", hash_type);
+	}
+	return 0;
+}
+
+enum sep_hash_mode get_sep_hash_mode(enum dxdi_hash_type hash_type)
+{
+	switch (hash_type) {
+	case DXDI_HASH_MD5:
+		pr_err("MD5 not supported\n");
+		return SEP_HASH_NULL;
+	case DXDI_HASH_SHA1:
+		return SEP_HASH_SHA1;
+	case DXDI_HASH_SHA224:
+		return SEP_HASH_SHA224;
+	case DXDI_HASH_SHA256:
+		return SEP_HASH_SHA256;
+	case DXDI_HASH_SHA384:
+		return SEP_HASH_SHA384;
+	case DXDI_HASH_SHA512:
+		return SEP_HASH_SHA512;
+	default:
+		pr_err("Invalid hash type=%d\n", hash_type);
+		return SEP_HASH_NULL;
+	}
+}
+
+/**
+ * ctxmgr_map_user_ctx() - Map given user context to kernel space + DMA
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_crypto_ctx is used to verify mapped buffer size.
+ * @user_ctx_ptr: Pointer to user space crypto context
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_user_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct device *mydev,
+			enum crypto_alg_class alg_class,
+			u32 __user *user_ctx_ptr)
+{
+	const unsigned long offset_in_page =
+	    ((unsigned long)user_ctx_ptr & ~PAGE_MASK);
+	const unsigned long dist_from_page_end = PAGE_SIZE - offset_in_page;
+	size_t ctx_size = ctxmgr_get_ctx_size(alg_class);
+	int pages_mapped;
+	int rc;
+
+#ifdef DEBUG
+	if (ctx_info->user_ptr != NULL) {
+		pr_err("User context already mapped to 0x%p\n",
+			    ctx_info->user_ptr);
+		return -EINVAL;
+	}
+#endif
+	ctx_info->dev = mydev;
+
+	/* If unknown class, verify that it is at least host_ctx size */
+	/* (so we can access the alg_class field in it) */
+	ctx_size = (ctx_size == 0) ? sizeof(struct host_crypto_ctx) : ctx_size;
+	if (dist_from_page_end < ctx_size) {
+		pr_err("Given user context that crosses a page (0x%p)\n",
+			    user_ctx_ptr);
+		return -EINVAL;
+	}
+
+	down_read(&current->mm->mmap_sem);
+	pages_mapped = get_user_pages(current, current->mm,
+				      (unsigned long)user_ctx_ptr, 1, 1, 0,
+				      &ctx_info->ctx_page, 0);
+	up_read(&current->mm->mmap_sem);
+	if (pages_mapped < 1) {
+		pr_err("Failed getting user page\n");
+		return -ENOMEM;
+	}
+
+	/* Set pointer to correct offset in mapped page */
+	ctx_info->ctx_kptr = (struct host_crypto_ctx *)
+	    ((unsigned long)ctx_info->ctx_page |
+	     ((unsigned long)user_ctx_ptr & ~PAGE_MASK));
+
+	ctx_info->ctx_kptr = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (ctx_info->ctx_kptr == NULL) {
+		SEP_LOG_ERR("Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	if (alg_class == ALG_CLASS_NONE) {
+		size_t host_ctx_size = sizeof(struct host_crypto_ctx);
+		/* Copy common header to get the alg class */
+		if (copy_from_user(ctx_info->ctx_kptr,
+				user_ctx_ptr, host_ctx_size)) {
+			pr_err("Copy from user failed\n");
+			rc = -EINVAL;
+			goto copy_from_user_failed;
+		}
+		/* Verify actual context size with class saved in context */
+		alg_class = ctx_info->ctx_kptr->alg_class;
+		ctx_size = ctxmgr_get_ctx_size(alg_class);
+		if (ctx_size == 0) {	/* Unknown class */
+			pr_err("Unknown alg class\n");
+			rc = -EINVAL;
+			goto unknown_alg_class;
+		}
+		if (dist_from_page_end < ctx_size) {
+			pr_err(
+				    "Given user context that crosses a page (0x%p)\n",
+				    user_ctx_ptr);
+			rc = -EINVAL;
+			goto ctx_cross_page;
+		}
+		/* Copy rest of the context when we know the actual size */
+		if (copy_from_user((u8 *)ctx_info->ctx_kptr + host_ctx_size,
+				(u8 *)user_ctx_ptr + host_ctx_size,
+				ctx_size - host_ctx_size)) {
+			pr_err("Copy from user failed\n");
+			rc = -EINVAL;
+			goto copy_from_user_failed;
+		}
+	}
+
+	/* Map sep_ctx */
+	ctx_info->sep_ctx_kptr = (struct sep_ctx_cache_entry *)
+	    (((unsigned long)ctx_info->ctx_kptr) +
+	     get_sep_ctx_offset(alg_class));
+	ctx_info->sep_ctx_dma_addr = dma_map_single(mydev,
+						    (void *)ctx_info->
+						    sep_ctx_kptr,
+						    sizeof(struct
+							   sep_ctx_cache_entry),
+						    DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(mydev, ctx_info->sep_ctx_dma_addr)) {
+		pr_err("Mapping sep_ctx for DMA failed");
+		rc = -ENOMEM;
+		goto sep_ctx_dma_map_failed;
+	}
+
+	ctx_info->sep_cache_idx = -1;
+
+	ctx_info->user_ptr = user_ctx_ptr;
+
+	return 0;
+
+ sep_ctx_dma_map_failed:
+ copy_from_user_failed:
+ ctx_cross_page:
+ unknown_alg_class:
+	kfree(ctx_info->ctx_kptr);
+	ctx_info->ctx_kptr = NULL;
+	page_cache_release(ctx_info->ctx_page);
+	ctx_info->ctx_page = NULL;
+	return rc;
+}
+
+/**
+ * ctxmgr_unmap_user_ctx() - Unmap given currently mapped user context
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_user_ctx(struct client_crypto_ctx_info *ctx_info)
+{
+	size_t ctx_size;
+
+	if (ctx_info->ctx_kptr == NULL) {
+		/* This is a valid case since we invoke this function in some
+		   error cases without knowing if context was mapped or not */
+		pr_debug("Context not mapped\n");
+		return;
+	}
+
+	ctx_size = ctxmgr_get_ctx_size(ctx_info->ctx_kptr->alg_class);
+
+	dma_unmap_single(ctx_info->dev, ctx_info->sep_ctx_dma_addr,
+			 sizeof(struct sep_ctx_cache_entry), DMA_BIDIRECTIONAL);
+	ctx_info->sep_ctx_dma_addr = 0;
+
+	if (copy_to_user(ctx_info->user_ptr, ctx_info->ctx_kptr, ctx_size))
+		pr_err("Copy to user failed\n");
+
+	kfree(ctx_info->ctx_kptr);
+	ctx_info->ctx_kptr = NULL;
+
+	if (!PageReserved(ctx_info->ctx_page))
+		SetPageDirty(ctx_info->ctx_page);
+	page_cache_release(ctx_info->ctx_page);
+	ctx_info->ctx_page = NULL;
+
+	ctx_info->sep_cache_idx = -1;
+
+	ctx_info->user_ptr = NULL;
+
+}
+
+/**
+ * ctxmgr_map_kernel_ctx() - Map given kernel context + clone SeP context into
+ *				Privately allocated DMA buffer
+ *				(required for async. ops. on the same context)
+ * @ctx_info:	Client crypto context info structure
+ * @mydev:	Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_crypto_ctx is used to verify mapped buffer size.
+ * @kernel_ctx_p:	Pointer to kernel space crypto context
+ * @sep_ctx_p:	Pointer to (private) SeP context. If !NULL the embedded sep
+ *		context is copied into this buffer.
+ *		Set to NULL to use the one embedded in host_crypto_ctx.
+ * @sep_ctx_dma_addr:	DMA address of private SeP context (if sep_ctx_p!=NULL)
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_kernel_ctx(struct client_crypto_ctx_info *ctx_info,
+			  struct device *mydev,
+			  enum crypto_alg_class alg_class,
+			  struct host_crypto_ctx *kernel_ctx_p,
+			  struct sep_ctx_cache_entry *sep_ctx_p,
+			  dma_addr_t sep_ctx_dma_addr)
+{
+	int rc = 0;
+	size_t embedded_sep_ctx_offset = get_sep_ctx_offset(alg_class);
+	struct sep_ctx_cache_entry *embedded_sep_ctx_p =
+	    (struct sep_ctx_cache_entry *)(((unsigned long)kernel_ctx_p) +
+					   embedded_sep_ctx_offset);
+
+	if (embedded_sep_ctx_offset == 0)
+		return -EINVAL;
+	if (sep_ctx_p == NULL)	/* Default context is the one inside */
+		sep_ctx_p = embedded_sep_ctx_p;
+
+	pr_debug("kernel_ctx_p=%p\n", kernel_ctx_p);
+
+	ctx_info->dev = mydev;
+	/* These fields are only relevant for user space context mapping */
+	ctx_info->user_ptr = NULL;
+	ctx_info->ctx_page = NULL;
+	ctx_info->ctx_kptr = kernel_ctx_p;
+	ctx_info->sep_ctx_kptr = sep_ctx_p;
+	/* We assume that the CryptoAPI context of kernel is allocated using
+	   the slab pools, thus aligned to one of the standard blocks and
+	   does not cross page boundary (It is required to be physically
+	   contiguous for SeP DMA access) */
+	if ((((unsigned long)sep_ctx_p + sizeof(struct sep_ctx_cache_entry))
+	     >> PAGE_SHIFT) != ((unsigned long)sep_ctx_p >> PAGE_SHIFT)) {
+		pr_err("SeP context cross page boundary start=0x%lx len=0x%zX\n",
+		       (unsigned long)sep_ctx_p,
+		       sizeof(struct sep_ctx_cache_entry));
+		return -EINVAL;
+	}
+
+	/* Map sep_ctx if embedded in given host context */
+	/* (otherwise, assumed to be cache coherent DMA buffer) */
+	if (sep_ctx_p == embedded_sep_ctx_p) {
+		ctx_info->sep_ctx_dma_addr =
+				dma_map_single(mydev, sep_ctx_p,
+					       sizeof(struct
+						      sep_ctx_cache_entry),
+					       DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(mydev, ctx_info->sep_ctx_dma_addr)) {
+			pr_err("Mapping sep_ctx for DMA failed");
+			rc = -ENOMEM;
+		}
+	} else {
+		ctx_info->sep_ctx_dma_addr = sep_ctx_dma_addr;
+		/* Clone base context into external SeP context buffer */
+		memcpy(sep_ctx_p, embedded_sep_ctx_p,
+		       sizeof(struct sep_ctx_cache_entry));
+	}
+
+	ctx_info->sep_cache_idx = -1;
+
+	return rc;
+}
+
+/**
+ * ctxmgr_unmap_kernel_ctx() - Unmap given currently mapped kernel context
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_kernel_ctx(struct client_crypto_ctx_info *ctx_info)
+{
+	struct sep_ctx_cache_entry *embedded_sep_ctx_p;
+	size_t embedded_sep_ctx_offset;
+
+	if (ctx_info == NULL) {
+		pr_err("Context not mapped\n");
+		return;
+	}
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		/* This is a valid case since we invoke this function in some
+		   error cases without knowing if context was mapped or not */
+		pr_debug("Context not mapped\n");
+		return;
+	}
+#endif
+
+	embedded_sep_ctx_offset =
+	    get_sep_ctx_offset(ctx_info->ctx_kptr->alg_class);
+
+#ifdef DEBUG
+	if (embedded_sep_ctx_offset == 0) {
+		pr_err("Invalid algorithm class\n");
+		return;
+	}
+#endif
+
+	pr_debug("kernel_ctx_ptr=%p\n", ctx_info->ctx_kptr);
+	embedded_sep_ctx_p = (struct sep_ctx_cache_entry *)
+	    (((unsigned long)ctx_info->ctx_kptr) + embedded_sep_ctx_offset);
+
+	if (embedded_sep_ctx_p == ctx_info->sep_ctx_kptr) {
+		dma_unmap_single(ctx_info->dev, ctx_info->sep_ctx_dma_addr,
+				 sizeof(struct sep_ctx_cache_entry),
+				 DMA_BIDIRECTIONAL);
+	}
+
+	ctx_info->sep_ctx_kptr = NULL;
+	ctx_info->sep_ctx_dma_addr = 0;
+	ctx_info->ctx_kptr = NULL;
+	ctx_info->sep_cache_idx = -1;
+	ctx_info->dev = NULL;
+}
+
+/**
+ * get_blk_rem_buf() - Get a pointer to the (hash) block remainder buffer
+ *			structure
+ * @ctx_info:
+ *
+ * Returns struct hash_block_remainder*
+ */
+static struct hash_block_remainder *get_blk_rem_buf(struct
+						    client_crypto_ctx_info
+						    *ctx_info)
+{
+	struct host_crypto_ctx_hash *hash_ctx_p =
+	    (struct host_crypto_ctx_hash *)ctx_info->ctx_kptr;
+	struct host_crypto_ctx_mac *mac_ctx_p =
+	    (struct host_crypto_ctx_mac *)ctx_info->ctx_kptr;
+	struct hash_block_remainder *blk_rem_p = NULL;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+	if ((hash_ctx_p->alg_class != ALG_CLASS_HASH) &&
+	    ctxmgr_get_mac_type(ctx_info) != DXDI_MAC_HMAC) {
+		pr_err("Not a hash/HMAC context\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	/* Get the correct block remainder buffer structure */
+	if (hash_ctx_p->alg_class == ALG_CLASS_HASH)
+		blk_rem_p = &hash_ctx_p->hash_tail;
+	else			/* HMAC */
+		blk_rem_p = &mac_ctx_p->hmac_tail;
+
+	return blk_rem_p;
+}
+
+/**
+ * ctxmgr_map2dev_hash_tail() - Map hash data tail buffer in the host context
+ *				for DMA to device
+ * @ctx_info:
+ * @mydev:
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_map2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+			     struct device *mydev)
+{
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	if (blk_rem_p->size > 0) {
+		ctx_info->hash_tail_dma_addr = dma_map_single(mydev,
+							      (void *)
+							      blk_rem_p->data,
+							      blk_rem_p->size,
+							      DMA_TO_DEVICE);
+		if (dma_mapping_error(mydev, ctx_info->hash_tail_dma_addr)) {
+			pr_err("Mapping hash_tail for DMA failed");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_unmap2dev_hash_tail() - Unmap hash data tail buffer from DMA to device
+ * @ctx_info:
+ * @mydev:
+ *
+ */
+void ctxmgr_unmap2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+				struct device *mydev)
+{
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	if (blk_rem_p->size > 0) {
+		dma_unmap_single(mydev, ctx_info->hash_tail_dma_addr,
+				 blk_rem_p->size, DMA_TO_DEVICE);
+	}
+}
+
+/**
+ * ctxmgr_set_ctx_state() - Set context state
+ * @ctx_info:	User context info structure
+ * @state:	State to set in context
+ *
+ * Returns void
+ */
+void ctxmgr_set_ctx_state(struct client_crypto_ctx_info *ctx_info,
+			  const enum host_ctx_state state)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	ctx_info->ctx_kptr->state = state;
+}
+
+/**
+ * ctxmgr_get_ctx_state() - Get context state
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current context state
+ */
+enum host_ctx_state ctxmgr_get_ctx_state(const struct client_crypto_ctx_info
+					 *ctx_info)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->ctx_kptr->state;
+}
+
+/**
+ * ctxmgr_set_ctx_id() - Allocate unique ID for (initialized) user context
+ * @ctx_info:	 Client crypto context info structure
+ * @ctx_id:	 The unique ID allocated for given context
+ *
+ * Allocate unique ID for (initialized) user context
+ * (Assumes invoked within session mutex so no need for counter protection)
+ */
+void ctxmgr_set_ctx_id(struct client_crypto_ctx_info *ctx_info,
+		       const struct crypto_ctx_uid ctx_id)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+	pr_debug("ctx_id=0x%16llX for ctx@0x%p\n",
+		      ctx_id, (ctx_info->user_ptr == NULL) ?
+		      (void *)ctx_info->ctx_kptr : (void *)ctx_info->user_ptr);
+#endif
+	memcpy(&ctx_info->ctx_kptr->uid, &ctx_id,
+		sizeof(struct crypto_ctx_uid));
+}
+
+/**
+ * ctxmgr_get_ctx_id() - Return the unique ID for current user context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Allocated ID (or CTX_ID_INVALID if none)
+ */
+struct crypto_ctx_uid ctxmgr_get_ctx_id(struct client_crypto_ctx_info *ctx_info)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->ctx_kptr->uid;
+}
+
+/**
+ * ctxmgr_get_session_id() - Return the session ID of given context ID
+ * @ctx_info:
+ *
+ * Return the session ID of given context ID
+ * This may be used to validate ID and verify that it was not tampered
+ * in a manner that can allow access to a session of another process
+ * Returns u32
+ */
+u64 ctxmgr_get_session_id(struct client_crypto_ctx_info *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	/* session ID is the 32 MS-bits of the context ID */
+	return ctx_info->ctx_kptr->uid.addr;
+}
+
+/**
+ * ctxmgr_get_alg_class() - Get algorithm class of context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current algorithm class of context
+ */
+enum crypto_alg_class ctxmgr_get_alg_class(const struct client_crypto_ctx_info
+					   *ctx_info)
+{
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->ctx_kptr->alg_class;
+}
+
+/**
+ * ctxmgr_get_crypto_blk_size() - Get the crypto-block length of given context
+ *					in octets
+ * @ctx_info:	 User context info structure
+ *
+ * Returns u32 Cypto-block size in bytes, 0 if invalid/unsupported alg.
+ */
+u32 ctxmgr_get_crypto_blk_size(struct client_crypto_ctx_info *ctx_info)
+{
+	u32 cblk_size = 0;
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	switch (ctx_info->ctx_kptr->alg_class) {
+
+	case ALG_CLASS_SYM_CIPHER:{
+			enum dxdi_sym_cipher_type cipher_type =
+			    ((struct host_crypto_ctx_sym_cipher *)
+			     ctx_info->ctx_kptr)->props.cipher_type;
+			if ((cipher_type >= _DXDI_SYMCIPHER_AES_FIRST) &&
+			    (cipher_type <= _DXDI_SYMCIPHER_AES_LAST)) {
+				cblk_size = SEP_AES_BLOCK_SIZE;
+			} else
+			    if (((cipher_type >= _DXDI_SYMCIPHER_DES_FIRST) &&
+				 (cipher_type <= _DXDI_SYMCIPHER_DES_LAST)) ||
+					    ((cipher_type >=
+					      _DXDI_SYMCIPHER_C2_FIRST) &&
+					     (cipher_type <=
+					      _DXDI_SYMCIPHER_C2_LAST))) {
+				/* DES and C2 have the same block size */
+				cblk_size = SEP_DES_BLOCK_SIZE;
+			} else {
+				pr_err("Invalid sym.cipher type %d",
+					    cipher_type);
+			}
+			break;	/*ALG_CLASS_SYM_CIPHER */
+		}
+
+	case ALG_CLASS_AUTH_ENC:{
+			enum dxdi_auth_enc_type ae_type =
+			    ((struct host_crypto_ctx_auth_enc *)
+			     ctx_info->ctx_kptr)->props.ae_type;
+			if (ae_type == DXDI_AUTHENC_AES_CCM)
+				cblk_size = SEP_AES_BLOCK_SIZE;
+			else
+				pr_err("Invalid auth.enc. type %d",
+					    ae_type);
+			break;
+		}
+
+	case ALG_CLASS_MAC:{
+			struct host_crypto_ctx_mac *ctx_p =
+			    ((struct host_crypto_ctx_mac *)ctx_info->ctx_kptr);
+			const enum dxdi_mac_type mac_type =
+			    ctx_p->props.mac_type;
+			switch (mac_type) {	/* switch for block size */
+			case DXDI_MAC_HMAC:
+				cblk_size =
+				    get_hash_block_size(ctx_p->props.
+							alg_specific.hmac.
+							hash_type);
+				break;
+			case DXDI_MAC_AES_CMAC:
+			case DXDI_MAC_AES_XCBC_MAC:
+			case DXDI_MAC_AES_MAC:
+				cblk_size = SEP_AES_BLOCK_SIZE;
+				break;
+			default:
+				pr_err("Invalid MAC type %d\n",
+					    ctx_p->props.mac_type);
+			}
+			break;	/* ALG_CLASS_MAC */
+		}
+
+	case ALG_CLASS_HASH:{
+			enum dxdi_hash_type hash_type =
+			    ((struct host_crypto_ctx_hash *)
+			     ctx_info->ctx_kptr)->hash_type;
+			cblk_size = get_hash_block_size(hash_type);
+			break;
+		}
+
+	default:
+		pr_err("Invalid algorithm class %d\n",
+			    ctx_info->ctx_kptr->alg_class);
+
+	}			/*switch alg_class */
+
+	return cblk_size;
+}
+
+/**
+ * ctxmgr_is_valid_adata_size() - Validate additional/associated data for
+ * @ctx_info:
+ * @adata_size:
+ *
+ * Validate additional/associated data for
+ * auth/enc algs.
+ * Returns bool
+ */
+bool ctxmgr_is_valid_adata_size(struct client_crypto_ctx_info *ctx_info,
+				unsigned long adata_size)
+{
+	struct host_crypto_ctx_auth_enc *host_ctx_p =
+	    (struct host_crypto_ctx_auth_enc *)ctx_info->ctx_kptr;
+
+	if (ctx_info->ctx_kptr->alg_class != ALG_CLASS_AUTH_ENC)
+		return false;
+
+	if ((adata_size != host_ctx_p->props.adata_size) || (adata_size == 0))
+		return false;
+
+	return true;
+}
+
+/**
+ * ctxmgr_is_valid_data_unit_size() - Validate given data unit for given
+ *					alg./mode
+ * @ctx_info:
+ * @data_unit_size:
+ * @is_finalize:
+ *
+ * Returns bool true is valid.
+ */
+bool ctxmgr_is_valid_size(struct client_crypto_ctx_info *ctx_info,
+				    unsigned long data_unit_size,
+				    bool is_finalize)
+{
+	if (!is_finalize && (data_unit_size == 0)) {
+		/* None allow 0 data for intermediate processing blocks */
+		pr_err("Given 0 B for intermediate processing!");
+		return false;
+	}
+
+	switch (ctx_info->ctx_kptr->alg_class) {
+
+	case ALG_CLASS_SYM_CIPHER:{
+		struct host_crypto_ctx_sym_cipher *host_ctx_p =
+		    (struct host_crypto_ctx_sym_cipher *)ctx_info->
+		    ctx_kptr;
+		struct sep_ctx_cipher *aes_ctx_p;
+
+		switch (host_ctx_p->props.cipher_type) {
+		case DXDI_SYMCIPHER_AES_XTS:
+			if (host_ctx_p->props.alg_specific.aes_xts.
+			    data_unit_size == 0) {
+				/* Initialize on first data unit if not
+				   provided by the user */
+				host_ctx_p->props.alg_specific.aes_xts.
+				    data_unit_size = data_unit_size;
+				aes_ctx_p = (struct sep_ctx_cipher *)
+				    &(host_ctx_p->sep_ctx);
+				aes_ctx_p->data_unit_size =
+				    cpu_to_le32(host_ctx_p->
+						props.
+						alg_specific.aes_xts.
+						data_unit_size);
+				break;
+			} else if (((is_finalize) &&
+				    (data_unit_size > 0)) ||
+					    (!is_finalize)) {
+				/* finalize which is not empty must be
+				   consistent with prev. data unit */
+				if (host_ctx_p->props.
+				    alg_specific.aes_xts.
+				    data_unit_size != data_unit_size) {
+					pr_err("Data unit mismatch. was %u. now %lu.\n",
+					       host_ctx_p->props.alg_specific.
+					       aes_xts.data_unit_size,
+					       data_unit_size);
+					return false;
+				}
+			}
+			break;
+		case DXDI_SYMCIPHER_AES_CTR:
+			if (!is_finalize) {	/* !finalize */
+				if (!IS_MULT_OF(data_unit_size,
+						SEP_AES_BLOCK_SIZE)) {
+					pr_err(
+						"Data unit size (%lu) is not AES block multiple\n",
+						data_unit_size);
+					return false;
+				}
+			}
+			break;
+		case DXDI_SYMCIPHER_AES_ECB:
+		case DXDI_SYMCIPHER_AES_CBC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_AES_BLOCK_SIZE)) {
+				pr_err(
+					"Data unit size (%lu) is not AES block multiple\n",
+					data_unit_size);
+				return false;
+			}
+			break;
+		case DXDI_SYMCIPHER_DES_ECB:
+		case DXDI_SYMCIPHER_DES_CBC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_DES_BLOCK_SIZE)) {
+				pr_err(
+					"Data unit size (%lu) is not DES block multiple\n",
+					data_unit_size);
+				return false;
+			}
+			break;
+		case DXDI_SYMCIPHER_C2_ECB:
+		case DXDI_SYMCIPHER_C2_CBC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_C2_BLOCK_SIZE)) {
+				pr_err(
+					"Data unit size (%lu) is not C2 block multiple\n",
+					data_unit_size);
+				return false;
+			}
+			break;
+		default:
+			pr_err("Invalid cipher type %d\n",
+				    host_ctx_p->props.cipher_type);
+			return false;
+		}
+
+		break;	/*ALG_CLASS_SYM_CIPHER */
+	}
+
+	case ALG_CLASS_AUTH_ENC:{
+		enum dxdi_auth_enc_type ae_type =
+		    ((struct host_crypto_ctx_auth_enc *)
+		     ctx_info->ctx_kptr)->props.ae_type;
+		if (ae_type == DXDI_AUTHENC_AES_CCM) {
+			if (!is_finalize) {	/* !finalize */
+				if (!IS_MULT_OF(data_unit_size,
+						SEP_AES_BLOCK_SIZE)) {
+					pr_err(
+						    "Data unit size (%lu) is not AES block multiple\n",
+						    data_unit_size);
+					return false;
+				}
+			}
+		} else {
+			pr_err("Invalid auth.enc. type %d",
+				    ae_type);
+			return false;
+		}
+		break;
+	}
+
+	case ALG_CLASS_MAC:{
+		struct host_crypto_ctx_mac *ctx_p =
+		    ((struct host_crypto_ctx_mac *)ctx_info->ctx_kptr);
+		const enum dxdi_mac_type mac_type =
+		    ctx_p->props.mac_type;
+		switch (mac_type) {	/* switch for block size */
+		case DXDI_MAC_HMAC:
+			break;	/* Any data unit size is allowed */
+		case DXDI_MAC_AES_CMAC:
+		case DXDI_MAC_AES_XCBC_MAC:
+			if (!is_finalize) {
+				if (!IS_MULT_OF(data_unit_size,
+						SEP_AES_BLOCK_SIZE)) {
+					pr_err(
+						    "Data unit size (%lu) is not AES block multiple\n",
+						    data_unit_size);
+					return false;
+				}
+			}
+			break;
+		case DXDI_MAC_AES_MAC:
+			if (!IS_MULT_OF
+			    (data_unit_size, SEP_AES_BLOCK_SIZE)) {
+				pr_err(
+					    "Data unit size (%lu) is not AES block multiple\n",
+					    data_unit_size);
+				return false;
+			}
+			break;
+		default:
+			pr_err("Invalid MAC type %d\n",
+				    ctx_p->props.mac_type);
+		}
+
+		ctx_p->client_data_size += data_unit_size;
+		break;	/* ALG_CLASS_MAC */
+	}
+
+	case ALG_CLASS_HASH:{
+		break;	/* Any data unit size is allowed for hash */
+	}
+
+	default:
+		pr_err("Invalid algorithm class %d\n",
+			    ctx_info->ctx_kptr->alg_class);
+
+	}			/*switch alg_class */
+
+	return true;		/* passed validations */
+}
+
+/**
+ * ctxmgr_get_sym_cipher_type() - Returns the sym cipher specific type.
+ * @ctx_info:	 The context info object of the sym cipher alg.
+ *
+ * Returns enum dxdi_sym_cipher_type The sym cipher type.
+ */
+enum dxdi_sym_cipher_type ctxmgr_get_sym_cipher_type(const struct
+						     client_crypto_ctx_info
+						     *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	return ((struct host_crypto_ctx_sym_cipher *)
+		ctx_info->ctx_kptr)->props.cipher_type;
+}
+
+/**
+ * ctxmgr_get_mac_type() - Returns the mac specific type.
+ * @ctx_info:	 The context info object of the mac alg.
+ *
+ * Returns enum dxdi_mac_type The mac type.
+ */
+enum dxdi_mac_type ctxmgr_get_mac_type(const struct client_crypto_ctx_info
+				       *ctx_info)
+{
+	struct host_crypto_ctx_mac *mac_ctx =
+	    (struct host_crypto_ctx_mac *)ctx_info->ctx_kptr;
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	if (mac_ctx->alg_class == ALG_CLASS_MAC)
+		return mac_ctx->props.mac_type;
+	else
+		return DXDI_MAC_NONE;
+}
+
+/**
+ * ctxmgr_get_hash_type() - Returns the hash specific type.
+ * @ctx_info:	 The context info object of the hash alg.
+ *
+ * Returns dxdi_hash_type The hash type.
+ */
+enum dxdi_hash_type ctxmgr_get_hash_type(const struct client_crypto_ctx_info
+					 *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	return ((struct host_crypto_ctx_hash *)ctx_info->ctx_kptr)->hash_type;
+}
+
+/**
+ * ctxmgr_save_hash_blk_remainder() - Save hash block tail data in given
+ *	context. The data is taken from the save4next chunk of given client
+ *	buffer.
+ * @ctx_info:	 Client context info structure (HASH's or HMAC's)
+ * @client_dma_buf_p:	A client DMA buffer object. Data is taken from the
+ *			save4next chunk of this buffer.
+ * @append_data:	When true, given data is appended to existing
+ *
+ * Returns 0 on success
+ */
+int ctxmgr_save_hash_blk_remainder(struct client_crypto_ctx_info *ctx_info,
+				   struct client_dma_buffer *client_dma_buf_p,
+				   bool append_data)
+{
+	u16 copy_offset;
+	int rc;
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	copy_offset = append_data ? blk_rem_p->size : 0;
+	rc = llimgr_copy_from_client_buf_save4next(client_dma_buf_p,
+						   blk_rem_p->data +
+						   copy_offset,
+						   HASH_BLK_SIZE_MAX -
+						   copy_offset);
+	if (likely(rc >= 0)) {	/* rc is the num. of bytes copied */
+		blk_rem_p->size = copy_offset + rc;
+		pr_debug("Accumalted %u B at offset %u\n",
+			      rc, copy_offset);
+		rc = 0;	/* Caller of this function expects 0 on success */
+	} else {
+		pr_err("Failed copying hash block tail from user\n");
+	}
+	return rc;
+}
+
+/**
+ * ctxmgr_get_hash_blk_remainder_buf() - Get DMA info for hash block remainder
+ *	buffer from given context
+ * @ctx_info:			User context info structure
+ * @hash_blk_tail_dma_p:	Returned tail buffer DMA address
+ *
+ * Note: This function must be invoked only when tail_buf is mapped2dev
+ *	(using ctxmgr_map2dev_hash_tail)
+ * Returns u16 Number of valid bytes/octets in tail buffer
+ */
+u16 ctxmgr_get_hash_blk_remainder_buf(struct client_crypto_ctx_info *
+					   ctx_info,
+					   dma_addr_t *
+					   hash_blk_remainder_dma_p)
+{
+	struct hash_block_remainder *blk_rem_p = get_blk_rem_buf(ctx_info);
+
+	*hash_blk_remainder_dma_p = ctx_info->hash_tail_dma_addr;
+	return blk_rem_p->size;
+}
+
+/**
+ * ctxmgr_get_digest_or_mac() - Get the digest/MAC result when applicable
+ * @ctx_info:		User context info structure
+ * @digest_or_mac:	Pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ */
+u32 ctxmgr_get_digest_or_mac(struct client_crypto_ctx_info *ctx_info,
+				  u8 *digest_or_mac)
+{
+	u8 *digest_mac_source;
+	u32 digest_or_mac_size;
+
+	digest_or_mac_size = ctxmgr_get_digest_or_mac_ptr(ctx_info,
+							  &digest_mac_source);
+	if (digest_mac_source != NULL)
+		memcpy(digest_or_mac, digest_mac_source, digest_or_mac_size);
+	return digest_or_mac_size;
+}
+
+/**
+ * ctxmgr_get_digest_or_mac_ptr() - Get the digest/MAC pointer in SeP context
+ * @ctx_info:		User context info structure
+ * @digest_or_mac_pp:	Returned pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ * This function may be used for the caller to reference the result instead
+ * of always copying
+ */
+u32 ctxmgr_get_digest_or_mac_ptr(struct client_crypto_ctx_info *ctx_info,
+				      u8 **digest_or_mac_pp)
+{
+	struct sep_ctx_cache_entry *sep_ctx_p;
+	u32 digest_or_mac_size = 0;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	*digest_or_mac_pp = NULL;	/* default */
+	sep_ctx_p = ctx_info->sep_ctx_kptr;
+	switch (le32_to_cpu(sep_ctx_p->alg)) {
+	case SEP_CRYPTO_ALG_HMAC:
+	/* HMAC context holds the MAC (digest) in the same place as
+	   HASH context */
+	case SEP_CRYPTO_ALG_HASH:{
+		struct sep_ctx_hash *hash_ctx_p =
+		    (struct sep_ctx_hash *)sep_ctx_p;
+		digest_or_mac_size =
+		    get_hash_digest_size(le32_to_cpu(hash_ctx_p->mode));
+		*digest_or_mac_pp = hash_ctx_p->digest;
+		break;
+	}
+	case SEP_CRYPTO_ALG_AES:{
+		struct sep_ctx_cipher *aes_ctx_p =
+		    (struct sep_ctx_cipher *)sep_ctx_p;
+		switch (le32_to_cpu(aes_ctx_p->mode)) {
+		case SEP_CIPHER_CBC_MAC:
+		case SEP_CIPHER_XCBC_MAC:
+		case SEP_CIPHER_CMAC:
+			digest_or_mac_size = SEP_AES_BLOCK_SIZE;
+		/* The AES MACs are returned in the block_state field */
+			*digest_or_mac_pp = aes_ctx_p->block_state;
+			break;
+		default:
+			break;	/* No MAC for others */
+		}
+		break;
+	}
+	case SEP_CRYPTO_ALG_AEAD:{
+		struct sep_ctx_aead *aead_ctx_p =
+		    (struct sep_ctx_aead *)sep_ctx_p;
+
+		if (le32_to_cpu(aead_ctx_p->mode) == SEP_CIPHER_CCM) {
+			digest_or_mac_size =
+			    le32_to_cpu(aead_ctx_p->tag_size);
+			*digest_or_mac_pp = aead_ctx_p->mac_state;
+		} else {
+			pr_err(
+				    "Invalid mode (%d) for SEP_CRYPTO_ALG_AEAD\n",
+				    le32_to_cpu(aead_ctx_p->mode));
+		}
+		break;
+	}
+	default:
+		;		/* No MAC/digest for the other algorithms */
+	}
+
+	return digest_or_mac_size;
+}
+
+static int set_sep_ctx_alg_mode(struct client_crypto_ctx_info *ctx_info,
+				const enum dxdi_sym_cipher_type cipher_type)
+{
+	struct sep_ctx_cipher *aes_ctx_p;
+	struct sep_ctx_cipher *des_ctx_p;
+	struct sep_ctx_c2 *c2_ctx_p;
+
+	if (ctx_info == NULL) {
+		pr_err("Context not mapped\n");
+		return -EINVAL;
+	}
+
+	aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+	des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+	c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+
+	switch (cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_AES);
+		break;
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_DES);
+		break;
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_C2);
+		break;
+	case DXDI_SYMCIPHER_RC4:
+		return -ENOSYS;	/* Not supported via this API (only MSGs) */
+	default:
+		return -EINVAL;
+	}
+
+	/* mode specific initializations */
+	switch (cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_ECB);
+		break;
+	case DXDI_SYMCIPHER_AES_CBC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CBC);
+		break;
+	case DXDI_SYMCIPHER_AES_CTR:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CTR);
+		break;
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_XTS);
+		break;
+	case DXDI_SYMCIPHER_DES_ECB:
+		des_ctx_p->mode = cpu_to_le32(SEP_CIPHER_ECB);
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CBC);
+		break;
+	case DXDI_SYMCIPHER_C2_ECB:
+		c2_ctx_p->mode = cpu_to_le32(SEP_C2_ECB);
+		break;
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p->mode = cpu_to_le32(SEP_C2_CBC);
+		break;
+	case DXDI_SYMCIPHER_RC4:
+		return -ENOSYS;	/* Not supported via this API (only RPC) */
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_set_symcipher_iv_user() - Set IV of symcipher context given in
+ *					user space pointer
+ * @user_ctx_ptr:	 A user space pointer to the host context
+ * @iv_ptr:	 The IV to set
+ *
+ * Returns int
+ */
+int ctxmgr_set_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr)
+{
+	struct host_crypto_ctx_sym_cipher __user *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher __user *)user_ctx_ptr;
+	struct sep_ctx_cipher *aes_ctx_p =
+	    (struct sep_ctx_cipher *)&host_ctx_p->sep_ctx;
+	enum dxdi_sym_cipher_type cipher_type;
+
+	/* Copy cypher type from user context */
+	if (copy_from_user(&cipher_type, &host_ctx_p->props.cipher_type,
+			   sizeof(enum dxdi_sym_cipher_type))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	if ((cipher_type != DXDI_SYMCIPHER_AES_CBC) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_CTR) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_XTS)) {
+		return -EINVAL;
+	}
+
+	if (copy_to_user(aes_ctx_p->block_state, iv_ptr, SEP_AES_IV_SIZE) ||
+	    copy_to_user(host_ctx_p->props.alg_specific.aes_cbc.iv, iv_ptr,
+		     SEP_AES_IV_SIZE)) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_get_symcipher_iv_user() - Read current "IV"
+ * @user_ctx_ptr:	 A user space pointer to the host context
+ * @iv_ptr:	 Where to return the read IV
+ *
+ * Read current "IV" (block state - not the actual set IV during initialization)
+ * This function works directly over a user space context
+ * Returns int
+ */
+int ctxmgr_get_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr)
+{
+	struct host_crypto_ctx_sym_cipher __user *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher __user *)user_ctx_ptr;
+	struct sep_ctx_cipher *aes_ctx_p =
+	    (struct sep_ctx_cipher *)&host_ctx_p->sep_ctx;
+	enum dxdi_sym_cipher_type cipher_type;
+
+	/* Copy cypher type from user context */
+	if (copy_from_user(&cipher_type, &host_ctx_p->props.cipher_type,
+			   sizeof(enum dxdi_sym_cipher_type))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	if ((cipher_type != DXDI_SYMCIPHER_AES_CBC) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_CTR) &&
+	    (cipher_type != DXDI_SYMCIPHER_AES_XTS)) {
+		return -EINVAL;
+	}
+
+	if (copy_from_user(iv_ptr, aes_ctx_p->block_state, SEP_AES_IV_SIZE)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * get_symcipher_iv_info() - Return the IV location in given context
+ *				(based on symcipher type)
+ * @ctx_info:
+ * @host_ctx_iv:	Returned IV pointer in host props field
+ * @sep_ctx_iv:		Returned IV state block in sep context
+ * @iv_size:	Size of IV in bytes (0 if IV is not applicable for this alg.)
+ *
+ */
+static void get_symcipher_iv_info(struct client_crypto_ctx_info *ctx_info,
+				  u8 **host_ctx_iv, u8 **sep_ctx_iv,
+				  unsigned long *iv_size)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		*sep_ctx_iv = ((struct sep_ctx_cipher *)
+			       ctx_info->sep_ctx_kptr)->block_state;
+		*iv_size = SEP_AES_IV_SIZE;
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		*sep_ctx_iv = ((struct sep_ctx_cipher *)
+			       ctx_info->sep_ctx_kptr)->block_state;
+		*iv_size = SEP_DES_IV_SIZE;
+		break;
+	default:
+		*sep_ctx_iv = NULL;
+		*iv_size = 0;
+	}
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_CBC:
+		*host_ctx_iv = host_ctx_p->props.alg_specific.aes_cbc.iv;
+		break;
+	case DXDI_SYMCIPHER_AES_CTR:
+		*host_ctx_iv = host_ctx_p->props.alg_specific.aes_ctr.cntr;
+		break;
+	case DXDI_SYMCIPHER_AES_XTS:
+		*host_ctx_iv =
+		    host_ctx_p->props.alg_specific.aes_xts.init_tweak;
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		*host_ctx_iv = host_ctx_p->props.alg_specific.des_cbc.iv;
+		break;
+	default:
+		*host_ctx_iv = NULL;
+	}
+
+}
+
+/**
+ * ctxmgr_set_symcipher_iv() - Set IV for given block symcipher algorithm
+ * @ctx_info:	Context to update
+ * @iv:		New IV
+ *
+ * Returns int 0 if changed IV, -EINVAL for error
+ * (given cipher type does not have IV)
+ */
+int ctxmgr_set_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv)
+{
+	u8 *host_ctx_iv;
+	u8 *sep_ctx_iv;
+	unsigned long iv_size = 0;
+
+	get_symcipher_iv_info(ctx_info, &host_ctx_iv, &sep_ctx_iv, &iv_size);
+	if (iv_size > 0 && host_ctx_iv != NULL && sep_ctx_iv != NULL) {
+		memcpy(sep_ctx_iv, iv, iv_size);
+		memcpy(host_ctx_iv, iv, iv_size);
+	}
+
+	return (iv_size > 0) ? 0 : -EINVAL;
+}
+
+/**
+ * ctxmgr_get_symcipher_iv() - Return given cipher IV
+ * @ctx_info:	Context to query
+ * @iv_user:	The IV given by the user on last ctxmgr_set_symcipher_iv
+ * @iv_current:	The current IV state block
+ * @iv_size_p:	[I/O] The given buffers size and returns actual IV size
+ *
+ * Return given cipher IV - Original IV given by user and current state "IV"
+ * The given IV buffers must be large enough to accomodate the IVs
+ * Returns int 0 on success, -ENOMEM if given iv_size is too small
+ */
+int ctxmgr_get_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv_user, u8 *iv_current,
+			    u8 *iv_size_p)
+{
+	u8 *host_ctx_iv;
+	u8 *sep_ctx_iv;
+	unsigned long iv_size;
+	int rc = 0;
+
+	get_symcipher_iv_info(ctx_info, &host_ctx_iv, &sep_ctx_iv, &iv_size);
+	if (iv_size > 0) {
+		if (*iv_size_p < iv_size) {
+			rc = -ENOMEM;
+		} else {
+			if (iv_current != NULL && sep_ctx_iv != NULL)
+				memcpy(iv_current, sep_ctx_iv, iv_size);
+			if (iv_user != NULL && host_ctx_iv != NULL)
+				memcpy(iv_user, host_ctx_iv, iv_size);
+		}
+	}
+
+	/* Always return IV size for informational purposes */
+	*iv_size_p = iv_size;
+	return rc;
+}
+
+/**
+ * ctxmgr_set_symcipher_direction() - Set the operation direction for given
+ *					symcipher context
+ * @ctx_info:		Context to update
+ * @dxdi_direction:	Requested cipher direction
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_set_symcipher_direction(struct client_crypto_ctx_info *ctx_info,
+				   enum dxdi_cipher_direction dxdi_direction)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+	enum sep_crypto_direction sep_direction;
+	struct sep_ctx_cipher *aes_ctx_p;
+	struct sep_ctx_cipher *des_ctx_p;
+	struct sep_ctx_c2 *c2_ctx_p;
+
+	/* Translate direction from driver ABI to SeP ABI */
+	if (dxdi_direction == DXDI_CDIR_ENC) {
+		sep_direction = SEP_CRYPTO_DIRECTION_ENCRYPT;
+	} else if (dxdi_direction == DXDI_CDIR_DEC) {
+		sep_direction = SEP_CRYPTO_DIRECTION_DECRYPT;
+	} else {
+		pr_err("Invalid direction=%d\n", dxdi_direction);
+		return -EINVAL;
+	}
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		aes_ctx_p->direction = cpu_to_le32(sep_direction);
+		break;
+
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		des_ctx_p->direction = cpu_to_le32(sep_direction);
+		break;
+
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+		c2_ctx_p->direction = cpu_to_le32(sep_direction);
+		break;
+
+	case DXDI_SYMCIPHER_RC4:
+		pr_err("Invoked for RC4!\n");
+		return -ENOSYS;	/* Not supported via this API (only RPC) */
+	default:
+		pr_err("Invalid symcipher type %d\n",
+			    host_ctx_p->props.cipher_type);
+		return -EINVAL;
+	}
+
+	host_ctx_p->props.direction = dxdi_direction;
+
+	return 0;
+}
+
+/**
+ * ctxmgr_get_symcipher_direction() - Return the operation direction of given
+ *					symcipher context
+ * @ctx_info:	Context to query
+ *
+ * Returns enum dxdi_cipher_direction (<0 on error)
+ */
+enum dxdi_cipher_direction ctxmgr_get_symcipher_direction(struct
+							  client_crypto_ctx_info
+							  *ctx_info)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+	enum sep_crypto_direction sep_direction;
+	enum dxdi_cipher_direction dxdi_direction;
+	struct sep_ctx_cipher *aes_ctx_p;
+	struct sep_ctx_cipher *des_ctx_p;
+	struct sep_ctx_c2 *c2_ctx_p;
+
+	switch (host_ctx_p->props.cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		sep_direction = le32_to_cpu(aes_ctx_p->direction);
+		break;
+
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		sep_direction = le32_to_cpu(des_ctx_p->direction);
+		break;
+
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+		sep_direction = le32_to_cpu(c2_ctx_p->direction);
+		break;
+
+	case DXDI_SYMCIPHER_RC4:
+		pr_err("Invoked for RC4!\n");
+		return -ENOSYS;	/* Not supported via this API (only RPC) */
+	default:
+		pr_err("Invalid symcipher type %d\n",
+			    host_ctx_p->props.cipher_type);
+		return -EINVAL;
+	}
+
+	/* Translate direction from driver ABI to SeP ABI */
+	if (sep_direction == SEP_CRYPTO_DIRECTION_ENCRYPT) {
+		dxdi_direction = DXDI_CDIR_ENC;
+	} else if (sep_direction == SEP_CRYPTO_DIRECTION_DECRYPT) {
+		dxdi_direction = DXDI_CDIR_DEC;
+	} else {
+		pr_err("Invalid (sep) direction=%d\n", sep_direction);
+		dxdi_direction = -EINVAL;
+	}
+
+	return dxdi_direction;
+}
+
+/*
+ * RFC2451: Weak DES key check
+ * Returns: 1 (weak), 0 (not weak)
+ */
+/**
+ * is_weak_des_key() - Validate DES weak keys per RFC2451 (section 2.3)
+ * @key:
+ *
+ * @keylen: 8 or 24 bytes
+ *
+ *
+ * Validate DES weak keys per RFC2451 (section 2.3)
+ * (weak key validation based on DX_CRYPTO_DES.c of cc52_crypto)
+ * Returns bool "true" for weak keys
+ */
+static bool is_weak_des_key(const u8 *key, unsigned int keylen)
+{
+	u32 n, w;
+	u64 *k1, *k2, *k3;	/* For 3DES/2DES checks */
+
+	if (keylen > 8) {/* For 3DES/2DES only validate no key repeatition */
+		k1 = (u64 *)key;
+		k2 = k1 + 1;
+		if (*k1 == *k2)
+			return true;
+		if (keylen > 16) {	/* 3DES */
+			k3 = k2 + 1;
+			if (*k2 == *k3)
+				return true;
+		}
+	}
+
+	/* Only for single-DES, check weak keys */
+	n = des_key_parity[key[0]];
+	n <<= 4;
+	n |= des_key_parity[key[1]];
+	n <<= 4;
+	n |= des_key_parity[key[2]];
+	n <<= 4;
+	n |= des_key_parity[key[3]];
+	n <<= 4;
+	n |= des_key_parity[key[4]];
+	n <<= 4;
+	n |= des_key_parity[key[5]];
+	n <<= 4;
+	n |= des_key_parity[key[6]];
+	n <<= 4;
+	n |= des_key_parity[key[7]];
+	w = 0x88888888L;
+
+	/* 1 in 10^10 keys passes this test */
+	if (!((n - (w >> 3)) & w)) {
+		switch (n) {
+		case 0x11111111:
+		case 0x13131212:
+		case 0x14141515:
+		case 0x16161616:
+		case 0x31312121:
+		case 0x33332222:
+		case 0x34342525:
+		case 0x36362626:
+		case 0x41415151:
+		case 0x43435252:
+		case 0x44445555:
+		case 0x46465656:
+		case 0x61616161:
+		case 0x63636262:
+		case 0x64646565:
+		case 0x66666666:
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * ctxmgr_set_symcipher_key() - Set a symcipher context key
+ * @ctx_info:	Context to update
+ * @key_size:	Size of key in bytes
+ * @key:	New key pointer
+ *
+ * Set a symcipher context key
+ * After invoking this function the context should be reinitialized by SeP
+ * (set its state to "partial init" if not done in this sequence)
+ * Returns int 0 on success, -EINVAL Invalid key len, -EPERM Forbidden/weak key
+ */
+int ctxmgr_set_symcipher_key(struct client_crypto_ctx_info *ctx_info,
+			     u8 key_size, const u8 *key)
+{
+	struct sep_ctx_cipher *aes_ctx_p = NULL;
+	struct sep_ctx_cipher *des_ctx_p = NULL;
+	struct sep_ctx_c2 *c2_ctx_p = NULL;
+	struct dxdi_sym_cipher_props *props =
+	    &((struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr)->props;
+
+	/* Update the respective sep context fields if valid */
+	switch (props->cipher_type) {
+	case DXDI_SYMCIPHER_AES_ECB:
+	case DXDI_SYMCIPHER_AES_CBC:
+	case DXDI_SYMCIPHER_AES_CTR:
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		/* Validate key size before copying */
+		if (props->cipher_type == DXDI_SYMCIPHER_AES_XTS) {
+			/* XTS has two keys of either 128b or 256b */
+			if ((key_size != 32) && (key_size != 64)) {
+				pr_err(
+					"Invalid key size for AES-XTS (%u bits)\n",
+					key_size * 8);
+				return -EINVAL;
+			}
+			/* Divide by two (we have two keys of the same size) */
+			key_size >>= 1;
+			/* copy second half of the double-key as XEX-key */
+			memcpy(aes_ctx_p->xex_key, key + key_size, key_size);
+			/* Always clear data_unit_size on key change
+			   (Assumes new data source with possibly different
+			   data unit size). The actual data unit size
+			   would be concluded on the next data processing. */
+			props->alg_specific.aes_xts.data_unit_size = 0;
+			aes_ctx_p->data_unit_size = cpu_to_le32(0);
+		} else {	/* AES engine support 128b/192b/256b keys */
+			if ((key_size != 16) &&
+			    (key_size != 24) && (key_size != 32)) {
+				pr_err(
+					"Invalid key size for AES (%u bits)\n",
+					key_size * 8);
+				return -EINVAL;
+			}
+		}
+		memcpy(aes_ctx_p->key, key, key_size);
+		aes_ctx_p->key_size = cpu_to_le32(key_size);
+		break;
+
+	case DXDI_SYMCIPHER_DES_ECB:
+	case DXDI_SYMCIPHER_DES_CBC:
+		if (is_weak_des_key(key, key_size)) {
+			pr_info("Weak DES key.\n");
+			return -EPERM;
+		}
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		des_ctx_p->key_size = cpu_to_le32(key_size);
+		if ((key_size != 8) && (key_size != 16) && (key_size != 24)) {
+			/* Avoid copying a key too large */
+			pr_err("Invalid key size for DES (%u bits)\n",
+				    key_size * 8);
+			return -EINVAL;
+		}
+		memcpy(des_ctx_p->key, key, key_size);
+		break;
+
+	case DXDI_SYMCIPHER_C2_ECB:
+	case DXDI_SYMCIPHER_C2_CBC:
+		c2_ctx_p = (struct sep_ctx_c2 *)ctx_info->sep_ctx_kptr;
+		c2_ctx_p->key_size = cpu_to_le32(key_size);
+		if (key_size != SEP_C2_KEY_SIZE_MAX) {
+			/* Avoid copying a key too large */
+			pr_err("Invalid key size for C2 (%u bits)\n",
+				    key_size * 8);
+			return -EINVAL;
+		}
+		memcpy(c2_ctx_p->key, key, key_size);
+		break;
+
+	case DXDI_SYMCIPHER_RC4:
+		return -ENOSYS;	/* Not supported via this API (only MSGs) */
+	default:
+		return -EINVAL;
+	}
+
+	/* If reached here then all validations passed */
+	/* Update in props of host context */
+	memcpy(props->key, key, key_size);
+	props->key_size = key_size;
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_symcipher_ctx_no_props() - Initialize symcipher context when full
+ *					props are not available, yet.
+ * @ctx_info:		Context to init.
+ * @cipher_type:	Cipher type for context
+ *
+ * Initialize symcipher context when full props are not available, yet.
+ * Later set_key and set_iv may update the context.
+ * Returns int 0 on success
+ */
+int ctxmgr_init_symcipher_ctx_no_props(struct client_crypto_ctx_info *ctx_info,
+				       enum dxdi_sym_cipher_type cipher_type)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+
+	/* Initialize host context part with just cipher type */
+	host_ctx_p->alg_class = ALG_CLASS_SYM_CIPHER;
+	memset(&(host_ctx_p->props), 0, sizeof(struct dxdi_sym_cipher_props));
+	host_ctx_p->props.cipher_type = cipher_type;
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(ctx_info->sep_ctx_kptr, 0, sizeof(struct sep_ctx_cache_entry));
+
+	/* with only cipher_type we can initialize just the alg/mode fields */
+	return set_sep_ctx_alg_mode(ctx_info, cipher_type);
+}
+
+/**
+ * ctxmgr_init_symcipher_ctx() - Initialize symCipher context based on given
+ *				properties.
+ * @ctx_info:	 User context mapping info.
+ * @props:	 The initialization properties
+ * @postpone_init:	Return "true" if INIT on SeP should be postponed
+ *			to first processing (e.g, in AES-XTS)
+ * @error_info:	Error info
+ *
+ * Returns 0 on success, otherwise on error
+ */
+int ctxmgr_init_symcipher_ctx(struct client_crypto_ctx_info *ctx_info,
+			      struct dxdi_sym_cipher_props *props,
+			      bool *postpone_init, u32 *error_info)
+{
+	struct host_crypto_ctx_sym_cipher *host_ctx_p =
+	    (struct host_crypto_ctx_sym_cipher *)ctx_info->ctx_kptr;
+	struct sep_ctx_cipher *aes_ctx_p = NULL;
+	struct sep_ctx_cipher *des_ctx_p = NULL;
+	int rc;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	*postpone_init = false;	/* default */
+	*error_info = DXDI_ERROR_NULL;	/* assume no error */
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_SYM_CIPHER;
+	memcpy(&(host_ctx_p->props), props,
+	       sizeof(struct dxdi_sym_cipher_props));
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(&(host_ctx_p->sep_ctx), 0, sizeof(host_ctx_p->sep_ctx));
+
+	rc = set_sep_ctx_alg_mode(ctx_info, props->cipher_type);
+	if (rc != 0) {
+		*error_info = DXDI_ERROR_INVAL_MODE;
+		return rc;
+	}
+
+	rc = ctxmgr_set_symcipher_direction(ctx_info, props->direction);
+	if (rc != 0) {
+		*error_info = DXDI_ERROR_INVAL_DIRECTION;
+		return rc;
+	}
+
+	rc = ctxmgr_set_symcipher_key(ctx_info, props->key_size, props->key);
+	if (rc != 0) {
+		*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+		return rc;
+	}
+
+	/* mode specific initializations */
+	switch (props->cipher_type) {
+	case DXDI_SYMCIPHER_AES_CBC:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(aes_ctx_p->block_state, props->alg_specific.aes_cbc.iv,
+		       SEP_AES_IV_SIZE);
+		break;
+	case DXDI_SYMCIPHER_AES_CTR:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(aes_ctx_p->block_state, props->alg_specific.aes_ctr.cntr,
+		       SEP_AES_IV_SIZE);
+		break;
+	case DXDI_SYMCIPHER_AES_XTS:
+		aes_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(aes_ctx_p->block_state,
+		       props->alg_specific.aes_xts.init_tweak, SEP_AES_IV_SIZE);
+		aes_ctx_p->data_unit_size =
+		    cpu_to_le32(props->alg_specific.aes_xts.data_unit_size);
+		/* update in context because was cleared by
+		   ctxmgr_set_symcipher_key */
+		host_ctx_p->props.alg_specific.aes_xts.data_unit_size =
+		    props->alg_specific.aes_xts.data_unit_size;
+		if (props->alg_specific.aes_xts.data_unit_size == 0)
+			*postpone_init = true;
+		break;
+	case DXDI_SYMCIPHER_DES_CBC:
+		des_ctx_p = (struct sep_ctx_cipher *)ctx_info->sep_ctx_kptr;
+		memcpy(des_ctx_p->block_state, props->alg_specific.des_cbc.iv,
+		       SEP_DES_IV_SIZE);
+		break;
+	case DXDI_SYMCIPHER_C2_CBC:
+		/*C2 reset interval is not supported, yet */
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;
+	case DXDI_SYMCIPHER_RC4:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;	/* Not supported via this API (only MSGs) */
+	default:
+		break;	/* No specific initializations for other modes */
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_auth_enc_ctx() - Initialize Authenticated Encryption class
+ *				context
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_auth_enc_ctx(struct client_crypto_ctx_info *ctx_info,
+			     struct dxdi_auth_enc_props *props,
+			     u32 *error_info)
+{
+	struct host_crypto_ctx_auth_enc *host_ctx_p =
+	    (struct host_crypto_ctx_auth_enc *)ctx_info->ctx_kptr;
+	struct sep_ctx_aead *aead_ctx_p = NULL;
+	enum sep_crypto_direction direction;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	*error_info = DXDI_ERROR_NULL;
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_AUTH_ENC;
+	host_ctx_p->is_adata_processed = 0;
+	memcpy(&(host_ctx_p->props), props, sizeof(struct dxdi_auth_enc_props));
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(&(host_ctx_p->sep_ctx), 0, sizeof(host_ctx_p->sep_ctx));
+
+	/* Translate direction from driver ABI to SeP ABI */
+	if (props->direction == DXDI_CDIR_ENC) {
+		direction = SEP_CRYPTO_DIRECTION_ENCRYPT;
+	} else if (props->direction == DXDI_CDIR_DEC) {
+		direction = SEP_CRYPTO_DIRECTION_DECRYPT;
+	} else {
+		pr_err("Invalid direction=%d\n", props->direction);
+		*error_info = DXDI_ERROR_INVAL_DIRECTION;
+		return -EINVAL;
+	}
+
+	/* initialize SEP context */
+	aead_ctx_p = (struct sep_ctx_aead *)&(host_ctx_p->sep_ctx);
+	aead_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_AEAD);
+	aead_ctx_p->direction = cpu_to_le32(direction);
+	aead_ctx_p->header_size = cpu_to_le32(props->adata_size);
+	if (props->nonce_size > SEP_AES_BLOCK_SIZE) {
+		pr_err("Invalid nonce size=%u\n", aead_ctx_p->nonce_size);
+		*error_info = DXDI_ERROR_INVAL_NONCE_SIZE;
+		return -EINVAL;
+	}
+	aead_ctx_p->nonce_size = cpu_to_le32(props->nonce_size);
+	memcpy(aead_ctx_p->nonce, props->nonce, props->nonce_size);
+	if (props->tag_size > SEP_AES_BLOCK_SIZE) {
+		pr_err("Invalid tag_size size=%u\n", aead_ctx_p->tag_size);
+		*error_info = DXDI_ERROR_INVAL_TAG_SIZE;
+		return -EINVAL;
+	}
+	aead_ctx_p->tag_size = cpu_to_le32(props->tag_size);
+	aead_ctx_p->text_size = cpu_to_le32(props->text_size);
+	if ((props->key_size != 16) &&
+	    (props->key_size != 24) && (props->key_size != 32)) {
+		pr_err("Invalid key size for AEAD (%u bits)\n",
+			    props->key_size * 8);
+		*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+		return -EINVAL;
+	}
+	memcpy(aead_ctx_p->key, props->key, props->key_size);
+	aead_ctx_p->key_size = cpu_to_le32(props->key_size);
+
+	/* mode specific initializations */
+	switch (props->ae_type) {
+	case DXDI_AUTHENC_AES_CCM:
+		aead_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CCM);
+		break;
+	case DXDI_AUTHENC_AES_GCM:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;	/* Not supported */
+	default:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_mac_ctx() - Initialize context for MAC algorithm
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_mac_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct dxdi_mac_props *props, u32 *error_info)
+{
+	struct host_crypto_ctx_mac *host_ctx_p =
+	    (struct host_crypto_ctx_mac *)ctx_info->ctx_kptr;
+	struct sep_ctx_cipher *aes_ctx_p = NULL;
+	struct sep_ctx_hmac *hmac_ctx_p = NULL;
+	enum dxdi_hash_type hash_type;
+	enum sep_hash_mode hash_mode;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	*error_info = DXDI_ERROR_NULL;
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_MAC;
+	host_ctx_p->client_data_size = 0;
+	memcpy(&(host_ctx_p->props), props, sizeof(struct dxdi_mac_props));
+	host_ctx_p->hmac_tail.size = 0;
+	host_ctx_p->is_encrypted = false;
+
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(&(host_ctx_p->sep_ctx), 0, sizeof(host_ctx_p->sep_ctx));
+
+	switch (props->mac_type) {
+	case DXDI_MAC_HMAC:
+		hmac_ctx_p = (struct sep_ctx_hmac *)&(host_ctx_p->sep_ctx);
+		hmac_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_HMAC);
+		hash_type = props->alg_specific.hmac.hash_type;
+		hash_mode = get_sep_hash_mode(hash_type);
+		if (hash_mode == SEP_HASH_NULL) {
+			*error_info = DXDI_ERROR_INVAL_MODE;
+			return -EINVAL;
+		}
+		if (get_hash_block_size(hash_type) > SEP_HMAC_BLOCK_SIZE_MAX) {
+			pr_err(
+				    "Given hash type (%d) is not supported for HMAC\n",
+				    hash_type);
+			*error_info = DXDI_ERROR_UNSUP;
+			return -EINVAL;
+		}
+		hmac_ctx_p->mode = cpu_to_le32(hash_mode);
+		hmac_ctx_p->k0_size = cpu_to_le32(props->key_size);
+		if (props->key_size > SEP_HMAC_BLOCK_SIZE_MAX) {
+			pr_err("Invalid key size %u bits\n",
+				    props->key_size * 8);
+			*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+			return -EINVAL;
+		}
+		memcpy(&(hmac_ctx_p->k0), props->key, props->key_size);
+		break;
+
+	case DXDI_MAC_AES_MAC:
+	case DXDI_MAC_AES_CMAC:
+	case DXDI_MAC_AES_XCBC_MAC:
+		aes_ctx_p = (struct sep_ctx_cipher *)&(host_ctx_p->sep_ctx);
+		aes_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_AES);
+		aes_ctx_p->direction =
+		    cpu_to_le32(SEP_CRYPTO_DIRECTION_ENCRYPT);
+		aes_ctx_p->key_size = cpu_to_le32(props->key_size);
+		if ((props->key_size > SEP_AES_KEY_SIZE_MAX) ||
+		    ((props->mac_type == DXDI_MAC_AES_XCBC_MAC) &&
+		     (props->key_size != SEP_AES_128_BIT_KEY_SIZE))) {
+			/* Avoid copying a key too large */
+			pr_err("Invalid key size for MAC (%u bits)\n",
+				    props->key_size * 8);
+			*error_info = DXDI_ERROR_INVAL_KEY_SIZE;
+			return -EINVAL;
+		}
+		memcpy(aes_ctx_p->key, props->key, props->key_size);
+		break;
+
+	default:
+		*error_info = DXDI_ERROR_UNSUP;
+		return -EINVAL;
+	}
+
+	/* AES mode specific initializations */
+	switch (props->mac_type) {
+	case DXDI_MAC_AES_MAC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CBC_MAC);
+		memcpy(aes_ctx_p->block_state, props->alg_specific.aes_mac.iv,
+		       SEP_AES_IV_SIZE);
+		break;
+	case DXDI_MAC_AES_CMAC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_CMAC);
+		break;
+	case DXDI_MAC_AES_XCBC_MAC:
+		aes_ctx_p->mode = cpu_to_le32(SEP_CIPHER_XCBC_MAC);
+		break;
+	default:
+		/* Invalid type was already handled in previous "switch" */
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * ctxmgr_init_hash_ctx() - Initialize hash context
+ * @ctx_info:	 User context mapping info.
+ * @hash_type:	 Assigned hash type
+ * @error_info:	Error info
+ *
+ * Returns int 0 on success, -EINVAL, -ENOSYS
+ */
+int ctxmgr_init_hash_ctx(struct client_crypto_ctx_info *ctx_info,
+			 enum dxdi_hash_type hash_type, u32 *error_info)
+{
+	struct host_crypto_ctx_hash *host_ctx_p =
+	    (struct host_crypto_ctx_hash *)ctx_info->ctx_kptr;
+	struct sep_ctx_hash *sep_ctx_p;
+	enum sep_hash_mode hash_mode;
+
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	*error_info = DXDI_ERROR_NULL;
+
+	/* Limit to hash types supported by SeP */
+	if ((hash_type != DXDI_HASH_SHA1) &&
+	    (hash_type != DXDI_HASH_SHA224) &&
+	    (hash_type != DXDI_HASH_SHA256)) {
+		pr_err("Unsupported hash type %d\n", hash_type);
+		*error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;
+	}
+
+	/* Initialize host context part */
+	host_ctx_p->alg_class = ALG_CLASS_HASH;
+	host_ctx_p->hash_type = hash_type;
+	host_ctx_p->hash_tail.size = 0;
+	host_ctx_p->is_encrypted = false;
+
+	/* Initialize SeP/FW context part */
+	sep_ctx_p = (struct sep_ctx_hash *)&(host_ctx_p->sep_ctx);
+	/* Clear SeP context before setting specific fields */
+	/* This helps in case SeP-FW assumes zero on uninitialized fields */
+	memset(sep_ctx_p, 0, sizeof(struct sep_ctx_hash));
+	sep_ctx_p->alg = cpu_to_le32(SEP_CRYPTO_ALG_HASH);
+	hash_mode = get_sep_hash_mode(hash_type);
+	if (hash_mode == SEP_HASH_NULL) {
+		*error_info = DXDI_ERROR_INVAL_MODE;
+		return -EINVAL;
+	}
+	sep_ctx_p->mode = cpu_to_le32(hash_mode);
+
+	return 0;
+}
+
+/**
+ * ctxmgr_set_sep_cache_idx() - Set the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ * @sep_cache_idx:	 The allocated index in SeP cache for this context
+ *
+ * Returns void
+ */
+void ctxmgr_set_sep_cache_idx(struct client_crypto_ctx_info *ctx_info,
+			      int sep_cache_idx)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	ctx_info->sep_cache_idx = sep_cache_idx;
+}
+
+/**
+ * ctxmgr_get_sep_cache_idx() - Get the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ *
+ * Returns The allocated index in SeP cache for this context
+ */
+int ctxmgr_get_sep_cache_idx(struct client_crypto_ctx_info *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->ctx_kptr == NULL) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->sep_cache_idx;
+}
+
+#ifdef DEBUG
+static void dump_sep_aes_ctx(struct sep_ctx_cipher *ctx_p)
+{
+	pr_debug("Alg.=AES , Mode=%d , Direction=%d , Key size=%d\n",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("block_state",
+			ctx_p->block_state, sizeof(ctx_p->block_state));
+	if (le32_to_cpu(ctx_p->mode) == SEP_CIPHER_XTS) {
+		pr_debug("data_unit_size=%u\n",
+			      le32_to_cpu(ctx_p->data_unit_size));
+		dump_byte_array("XEX-Key",
+				ctx_p->xex_key, le32_to_cpu(ctx_p->key_size));
+	}
+}
+
+static void dump_sep_aead_ctx(struct sep_ctx_aead *ctx_p)
+{
+	pr_debug(
+		      "Alg.=AEAD, Mode=%d, Direction=%d, Key size=%d, header size=%d, nonce size=%d, tag size=%d, text size=%d\n",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size),
+		      le32_to_cpu(ctx_p->header_size),
+		      le32_to_cpu(ctx_p->nonce_size),
+		      le32_to_cpu(ctx_p->tag_size),
+		      le32_to_cpu(ctx_p->text_size));
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("block_state",
+			ctx_p->block_state, sizeof(ctx_p->block_state));
+	dump_byte_array("mac_state",
+			ctx_p->mac_state, sizeof(ctx_p->mac_state));
+	dump_byte_array("nonce", ctx_p->nonce, le32_to_cpu(ctx_p->nonce_size));
+}
+
+static void dump_sep_des_ctx(struct sep_ctx_cipher *ctx_p)
+{
+	pr_debug("Alg.=DES, Mode=%d, Direction=%d, Key size=%d\n",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+	dump_byte_array("IV", ctx_p->block_state, SEP_DES_IV_SIZE);
+}
+
+static void dump_sep_c2_ctx(struct sep_ctx_c2 *ctx_p)
+{
+	pr_debug("Alg.=C2, Mode=%d, Direction=%d, KeySz=%d, ResetInt.=%d",
+		      le32_to_cpu(ctx_p->mode),
+		      le32_to_cpu(ctx_p->direction),
+		      le32_to_cpu(ctx_p->key_size),
+		      0 /* reset_interval (CBC) not implemented, yet */);
+	dump_byte_array("Key", ctx_p->key, le32_to_cpu(ctx_p->key_size));
+}
+
+static const char *hash_mode_str(enum sep_hash_mode mode)
+{
+	switch (mode) {
+	case SEP_HASH_SHA1:
+		return "SHA1";
+	case SEP_HASH_SHA224:
+		return "SHA224";
+	case SEP_HASH_SHA256:
+		return "SHA256";
+	case SEP_HASH_SHA384:
+		return "SHA384";
+	case SEP_HASH_SHA512:
+		return "SHA512";
+	default:
+		return "(unknown)";
+	}
+}
+
+static void dump_sep_hash_ctx(struct sep_ctx_hash *ctx_p)
+{
+	pr_debug("Alg.=Hash , Mode=%s\n",
+		      hash_mode_str(le32_to_cpu(ctx_p->mode)));
+}
+
+static void dump_sep_hmac_ctx(struct sep_ctx_hmac *ctx_p)
+{
+	/* Alg./Mode of HMAC is identical to HASH */
+	pr_debug("Alg.=HMAC , Mode=%s\n",
+		      hash_mode_str(le32_to_cpu(ctx_p->mode)));
+	pr_debug("K0 size = %u B\n", le32_to_cpu(ctx_p->k0_size));
+	dump_byte_array("K0", ctx_p->k0, le32_to_cpu(ctx_p->k0_size));
+}
+
+/**
+ * ctxmgr_dump_sep_ctx() - Dump SeP context data
+ * @ctx_info:
+ *
+ */
+void ctxmgr_dump_sep_ctx(const struct client_crypto_ctx_info *ctx_info)
+{
+	struct sep_ctx_cache_entry *sep_ctx_p = ctx_info->sep_ctx_kptr;
+	enum sep_crypto_alg alg;
+	int ctx_idx;
+
+	/* For combined mode call recursively for each sub-context */
+	if (ctx_info->ctx_kptr->alg_class == ALG_CLASS_COMBINED) {
+		for (ctx_idx = 0; (ctx_info[ctx_idx].ctx_kptr != NULL) &&
+		     (ctx_idx < DXDI_COMBINED_NODES_MAX); ctx_idx++) {
+			ctxmgr_dump_sep_ctx(&ctx_info[ctx_idx]);
+		}
+		return;
+	}
+
+	alg = (enum sep_crypto_alg)le32_to_cpu(sep_ctx_p->alg);
+
+	pr_debug("SeP crypto context at %p: Algorithm=%d\n",
+		      sep_ctx_p, alg);
+	switch (alg) {
+	case SEP_CRYPTO_ALG_NULL:
+		break;		/* Nothing to dump */
+	case SEP_CRYPTO_ALG_AES:
+		dump_sep_aes_ctx((struct sep_ctx_cipher *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_AEAD:
+		dump_sep_aead_ctx((struct sep_ctx_aead *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_DES:
+		dump_sep_des_ctx((struct sep_ctx_cipher *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_C2:
+		dump_sep_c2_ctx((struct sep_ctx_c2 *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_HASH:
+		dump_sep_hash_ctx((struct sep_ctx_hash *)sep_ctx_p);
+		break;
+	case SEP_CRYPTO_ALG_HMAC:
+		dump_sep_hmac_ctx((struct sep_ctx_hmac *)sep_ctx_p);
+		break;
+	default:
+		pr_debug("(Unsupported algorithm dump - %d)\n", alg);
+	}
+
+}
+#endif
+
+/**
+ * ctxmgr_sync_sep_ctx() - Sync. SeP context to device (flush from cache...)
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ *
+ * Returns void
+ */
+void ctxmgr_sync_sep_ctx(const struct client_crypto_ctx_info *ctx_info,
+			 struct device *mydev)
+{
+	size_t embedded_sep_ctx_offset =
+	    get_sep_ctx_offset(ctx_info->ctx_kptr->alg_class);
+	struct sep_ctx_cache_entry *embedded_sep_ctx_p =
+	    (struct sep_ctx_cache_entry *)
+	    (((unsigned long)ctx_info->ctx_kptr) + embedded_sep_ctx_offset);
+
+#ifdef DEBUG
+	if (ctx_info->sep_ctx_dma_addr == 0) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+	if (embedded_sep_ctx_offset == 0)
+		pr_err("Invalid alg. class for algorithm\n");
+#endif
+
+	/* Only the embedded SeP context requires sync (it is in user memory)
+	   Otherwise, it is a cache coherent DMA buffer.                      */
+	if (ctx_info->sep_ctx_kptr == embedded_sep_ctx_p) {
+		dma_sync_single_for_device(mydev, ctx_info->sep_ctx_dma_addr,
+					   SEP_CTX_SIZE, DMA_BIDIRECTIONAL);
+	}
+}
+
+/**
+ * ctxmgr_get_sep_ctx_dma_addr() - Return DMA address of SeP (FW) area of
+ *					the context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns DMA address of SeP (FW) area of the context
+ */
+dma_addr_t ctxmgr_get_sep_ctx_dma_addr(const struct client_crypto_ctx_info
+				       *ctx_info)
+{
+#ifdef DEBUG
+	if (ctx_info->sep_ctx_dma_addr == 0) {
+		pr_err("Context not mapped\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+	return ctx_info->sep_ctx_dma_addr;
+}
+
+/**
+ * ctxmgr_sep_cache_create() - Create a SeP (FW) cache manager of given num.
+ *				of entries
+ * @num_of_entries:	 = Number of entries available in cache
+ *
+ * Returns void * handle (NULL on failure)
+ */
+void *ctxmgr_sep_cache_create(int num_of_entries)
+{
+	struct sep_ctx_cache *new_cache;
+	int i;
+
+	/* Allocate the sep_ctx_cache + additional entries beyond the one
+	 * that is included in the sep_ctx_cache structure */
+	new_cache = kmalloc(sizeof(struct sep_ctx_cache) +
+			    (num_of_entries - 1) *
+			    sizeof(struct ctxmgr_cache_entry), GFP_KERNEL);
+	if (new_cache == NULL) {
+		pr_err("Failed allocating SeP cache of %d entries\n",
+			    num_of_entries);
+		return NULL;
+	}
+
+	/* Initialize */
+	for (i = 0; i < num_of_entries; i++)
+		new_cache->entries[i].ctx_id.addr = CTX_ID_INVALID;
+
+	new_cache->cache_size = num_of_entries;
+
+	new_cache->lru_clk = 0;
+
+	return (void *)new_cache;
+}
+
+/**
+ * ctxmgr_sep_cache_destroy() - Destory SeP (FW) cache manager object
+ * @sep_cache:	 = The cache object
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_destroy(void *sep_cache)
+{
+	struct sep_ctx_cache *this_cache = (struct sep_ctx_cache *)sep_cache;
+
+	kfree(this_cache);
+}
+
+/**
+ * ctxmgr_sep_cache_get_size() - Get cache size (entries count)
+ * @sep_cache:
+ *
+ * Returns int Number of cache entries available
+ */
+int ctxmgr_sep_cache_get_size(void *sep_cache)
+{
+	struct sep_ctx_cache *this_cache = sep_cache;
+
+	return this_cache->cache_size;
+}
+
+/**
+ * ctxmgr_sep_cache_alloc() - Allocate a cache entry of given SeP context cache
+ * @sep_cache:	 The cache object
+ * @ctx_id:	 The host crypto. context ID
+ * @load_required_p:	Pointed int is set to !0 if a cache load is required
+ *			(i.e., if item already loaded in cache it would be 0)
+ *
+ * Returns cache index
+ */
+int ctxmgr_sep_cache_alloc(void *sep_cache,
+			   struct crypto_ctx_uid ctx_id, int *load_required_p)
+{
+	struct sep_ctx_cache *this_cache = (struct sep_ctx_cache *)sep_cache;
+	int i;
+	int chosen_idx = 0;	/* first candidate... */
+
+	*load_required_p = 1;	/* until found assume a load is required */
+
+	/* First search for given ID or free/older entry  */
+	for (i = 0; i < this_cache->cache_size; i++) {
+		if (this_cache->entries[i].ctx_id.addr == ctx_id.addr
+		    && this_cache->entries[i].ctx_id.cntr == ctx_id.cntr) {
+			/* if found */
+			chosen_idx = i;
+			*load_required_p = 0;
+			break;
+		}
+		/* else... if no free entry, replace candidate with invalid
+		   or older entry */
+		if (this_cache->entries[chosen_idx].ctx_id.addr
+			!= CTX_ID_INVALID) {
+			if ((this_cache->entries[i].ctx_id.addr
+				== CTX_ID_INVALID) ||
+			    (this_cache->entries[chosen_idx].lru_time >
+			     this_cache->entries[i].lru_time)) {
+				/* Found free OR older entry */
+				chosen_idx = i;
+			}
+		}
+	}
+
+	/* Record allocation + update LRU "timestamp" */
+	this_cache->entries[chosen_idx].ctx_id.addr = ctx_id.addr;
+	this_cache->entries[chosen_idx].ctx_id.cntr = ctx_id.cntr;
+	this_cache->entries[chosen_idx].lru_time = this_cache->lru_clk++;
+
+#ifdef DEBUG
+	if (this_cache->lru_clk == 0xFFFFFFFF) {
+		pr_err("Reached lru_clk limit!\n");
+		SEP_DRIVER_BUG();
+		/* If this limit is found to be a practical real life
+		   case, a few workarounds may be used:
+		   1. Use a larger (64b) lru_clk
+		   2. Invalidate the whole cache before wrapping to 0
+		   3. Ignore this case - old contexts would persist over newer
+		   until they are all FINALIZEd and invalidated "manually".
+		   4. "shift down" existing timestamps so the lowest would be 0
+		   5. "pack" timestamps to be 0,1,2,... based on exisitng order
+		   and set lru_clk to the largest (which is the num. of
+		   entries)
+		 */
+	}
+#endif
+
+	return chosen_idx;
+}
+
+/**
+ * ctxmgr_sep_cache_invalidate() - Invalidate cache entry for given context ID
+ * @sep_cache:	 The cache object
+ * @ctx_id:	 The host crypto. context ID
+ * @id_mask:	 A bit mask to be used when comparing the ID
+ *                (to be used for a set of entries from the same client)
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_invalidate(void *sep_cache,
+				 struct crypto_ctx_uid ctx_id,
+				 u64 id_mask)
+{
+	struct sep_ctx_cache *this_cache = (struct sep_ctx_cache *)sep_cache;
+	int i;
+
+	/* Search for given ID */
+	for (i = 0; i < this_cache->cache_size; i++) {
+		if ((this_cache->entries[i].ctx_id.addr) == ctx_id.addr) {
+			/* When invalidating single, check also counter */
+			if (id_mask == CRYPTO_CTX_ID_SINGLE_MASK
+			    && this_cache->entries[i].ctx_id.cntr
+			       != ctx_id.cntr)
+				continue;
+			this_cache->entries[i].ctx_id.addr = CTX_ID_INVALID;
+		}
+	}
+
+}
diff --git a/drivers/staging/sep54/crypto_ctx_mgr.h b/drivers/staging/sep54/crypto_ctx_mgr.h
new file mode 100644
index 0000000..a65eaf2
--- /dev/null
+++ b/drivers/staging/sep54/crypto_ctx_mgr.h
@@ -0,0 +1,694 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _CRYPTO_CTX_MGR_H_
+#define _CRYPTO_CTX_MGR_H_
+
+#include "sep_ctx.h"
+#include "dx_driver_abi.h"
+#include "lli_mgr.h"
+
+/* The largest hash block size is for SHA512 - 1024 bits */
+#define HASH_BLK_SIZE_MAX (1024>>3)	/*octets */
+
+/* Unique ID for a user context */
+/* This value is made unique by concatenating session ptr (in kernel)
+   with global counter incremented on each INIT phase                */
+#define CTX_ID_INVALID ((u64)0)
+
+enum host_ctx_state {
+	CTX_STATE_UNINITIALIZED = 0,
+	/* When a context is unitialized is can be any "garbage" since
+	   the context buffer is given from the user... */
+	CTX_STATE_INITIALIZED = 0x10000001,
+	/* INITIALIZED = Initialized */
+	CTX_STATE_PARTIAL_INIT = 0x10101011,
+	/* PARTIAL_INIT = Init. was done only on host. INIT on SeP was postponed
+	   - Requires INIT on next SeP operations. */
+};
+
+struct crypto_ctx_uid {
+	u64 addr;
+	u32 cntr;
+};
+
+/* Algorithm family/class enumeration */
+enum crypto_alg_class {
+	ALG_CLASS_NONE = 0,
+	ALG_CLASS_SYM_CIPHER,
+	ALG_CLASS_AUTH_ENC,
+	ALG_CLASS_MAC,
+	ALG_CLASS_HASH,
+	ALG_CLASS_COMBINED,
+	ALG_CLASS_MAX = ALG_CLASS_HASH
+};
+
+/* The common fields at start of a user context strcuture */
+#define HOST_CTX_COMMON						\
+		struct crypto_ctx_uid uid;					\
+		/* To hold CTX_VALID_SIG when initialized */	\
+		enum host_ctx_state state;			\
+		/*determine the context specification*/		\
+		enum crypto_alg_class alg_class;		\
+		/* Cast the whole struct to matching user_ctx_* struct */ \
+	/* When is_encrypted==true the props are not initialized and */\
+	/* the contained sep_ctx is encrypted (when created in SeP)  */\
+	/* Algorithm properties are encrypted */		\
+		bool is_encrypted; \
+		u32 sess_id; \
+		struct sep_client_ctx *sctx
+
+/* SeP context segment of a context */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+/* Allocate cache line bytes before/after sep context to assure
+   its cache line does not enter the cache during its mapping to SeP */
+#define SEP_CTX_SEGMENT                                                        \
+	u8 reserved_before[L1_CACHE_BYTES];			       \
+	struct sep_ctx_cache_entry sep_ctx;                                    \
+	u8 reserved_after[L1_CACHE_BYTES]
+#else
+/* Cache is coherent - no need for "protection" margins */
+#define SEP_CTX_SEGMENT \
+	struct sep_ctx_cache_entry sep_ctx
+#endif
+
+/* Generic host context*/
+struct host_crypto_ctx {
+	HOST_CTX_COMMON;
+};
+
+/* user_ctx specification for symettric ciphers */
+struct host_crypto_ctx_sym_cipher {
+	HOST_CTX_COMMON;
+	struct dxdi_sym_cipher_props props;
+	 SEP_CTX_SEGMENT;
+};
+
+/* user_ctx specification for authenticated encryption */
+struct host_crypto_ctx_auth_enc {
+	HOST_CTX_COMMON;
+	bool is_adata_processed;/* flag indicates if adata was processed */
+	struct dxdi_auth_enc_props props;
+	 SEP_CTX_SEGMENT;
+};
+
+/* Host data for hash block remainders */
+struct hash_block_remainder {
+	u16 size;		/* Octets available in data_blk_tail */
+	u8 data[HASH_BLK_SIZE_MAX] __aligned(8);
+	/* data_blk_tail holds remainder of user data because the HW requires
+	   integral hash blocks but for last data block.
+	   We take it aligned to 8 in order to optimize HW access to it via
+	   the 64bit AXI bus */
+};
+
+/* user_ctx specification for MAC algorithms */
+struct host_crypto_ctx_mac {
+	HOST_CTX_COMMON;
+	u64 client_data_size;	/* Sum. up the data processed so far */
+	struct dxdi_mac_props props;
+	struct hash_block_remainder hmac_tail;
+	 SEP_CTX_SEGMENT;
+};
+
+/* user_ctx specification for Hash algorithms */
+struct host_crypto_ctx_hash {
+	HOST_CTX_COMMON;
+	enum dxdi_hash_type hash_type;
+	struct hash_block_remainder hash_tail;
+	 SEP_CTX_SEGMENT;
+};
+
+/**
+ * struct client_crypto_ctx_info - Meta data on the client application crypto
+ *					context buffer and its mapping
+ * @dev:	Device context of the context (DMA) mapping.
+ * @user_ptr:	address of current context in user space (if no kernel op.)
+ * @ctx_page:	Mapped user page where user_ptr is located
+ * @ctx_kptr:	Mapping context to kernel VA
+ * @sep_ctx_kptr:	Kernel VA of SeP context portion of the host context
+ *			(for async. operations, this may be outside of host
+ *			 context)
+ * @sep_ctx_dma_addr:	DMA address of SeP context
+ * @hash_tail_dma_addr:	DMA of host_ctx_hash:data_blk_tail
+ * @sep_cache_idx:	if >=0, saves the allocated sep cache entry index
+ */
+struct client_crypto_ctx_info {
+	struct device *dev;
+	u32 __user *user_ptr;	/* */
+	struct page *ctx_page;
+	struct host_crypto_ctx *ctx_kptr;
+	struct sep_ctx_cache_entry *sep_ctx_kptr;
+	dma_addr_t sep_ctx_dma_addr;
+	dma_addr_t hash_tail_dma_addr;
+	int sep_cache_idx;
+
+	int sess_id;
+	struct sep_client_ctx *sctx;
+};
+/* Macro to initialize the context info structure */
+#define USER_CTX_INFO_INIT(ctx_info_p)	\
+do {					\
+	memset((ctx_info_p), 0, sizeof(struct client_crypto_ctx_info)); \
+	(ctx_info_p)->sep_cache_idx = -1; /* 0 is a valid entry idx */ \
+} while (0)
+
+#define SEP_CTX_CACHE_NULL_HANDLE NULL
+
+/**
+ * ctxmgr_get_ctx_size() - Get host context size for given algorithm class
+ * @alg_class:	 Queries algorithm class
+ *
+ * Returns size_t Size in bytes of host context
+ */
+size_t ctxmgr_get_ctx_size(enum crypto_alg_class alg_class);
+
+/**
+ * ctxmgr_map_user_ctx() - Map given user context to kernel space + DMA
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_ctx is used to verify mapped buffer size.
+ * @user_ctx_ptr: Pointer to user space context
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_user_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct device *mydev,
+			enum crypto_alg_class alg_class,
+			u32 __user *user_ctx_ptr);
+
+/**
+ * ctxmgr_unmap_user_ctx() - Unmap given currently mapped user context
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_user_ctx(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_map_kernel_ctx() - Map given kernel context + clone SeP context into
+ *				Privately allocated DMA buffer
+ *				(required for async. ops. on the same context)
+ * @ctx_info:	Client crypto context info structure
+ * @mydev:	Associated device context
+ * @alg_class:	If !ALG_CLASS_NONE, consider context of given class for
+ *		size validation (used in uninitialized context mapping)
+ *		When set to ALG_CLASS_NONE, the alg_class field of the
+ *		host_crypto_ctx is used to verify mapped buffer size.
+ * @kernel_ctx_p:	Pointer to kernel space crypto context
+ * @sep_ctx_p:	Pointer to (private) SeP context. If !NULL the embedded sep
+ *		context is copied into this buffer.
+ *		Set to NULL to use the one embedded in host_crypto_ctx.
+ * @sep_ctx_dma_addr:	DMA address of private SeP context (if sep_ctx_p!=NULL)
+ *
+ * Returns int 0 for success
+ */
+int ctxmgr_map_kernel_ctx(struct client_crypto_ctx_info *ctx_info,
+			  struct device *mydev,
+			  enum crypto_alg_class alg_class,
+			  struct host_crypto_ctx *kernel_ctx_p,
+			  struct sep_ctx_cache_entry *sep_ctx_p,
+			  dma_addr_t sep_ctx_dma_addr);
+
+/**
+ * ctxmgr_unmap_kernel_ctx() - Unmap given currently mapped kernel context
+ *				(was mapped with map_kernel_ctx)
+ * @ctx_info:	 User context info structure
+ */
+void ctxmgr_unmap_kernel_ctx(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_map2dev_hash_tail() - Map hash data tail buffer in the host context
+ *				for DMA to device
+ * @ctx_info:
+ * @mydev:
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_map2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+			     struct device *mydev);
+/**
+ * ctxmgr_unmap2dev_hash_tail() - Unmap hash data tail buffer from DMA tp device
+ * @ctx_info:
+ * @mydev:
+ *
+ */
+void ctxmgr_unmap2dev_hash_tail(struct client_crypto_ctx_info *ctx_info,
+				struct device *mydev);
+
+/**
+ * ctxmgr_set_ctx_state() - Set context state
+ * @ctx_info:	 User context info structure
+ * @state:	    State to set in context
+ *
+ * Returns void
+ */
+void ctxmgr_set_ctx_state(struct client_crypto_ctx_info *ctx_info,
+			  const enum host_ctx_state state);
+
+/**
+ * ctxmgr_get_ctx_state() - Set context state
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current context state
+ */
+enum host_ctx_state ctxmgr_get_ctx_state(const struct client_crypto_ctx_info
+					 *ctx_info);
+
+/**
+ * ctxmgr_set_ctx_id() - Allocate unique ID for (initialized) user context
+ * @ctx_info:	 Client crypto context info structure
+ * @ctx_id:	 The unique ID allocated for given context
+ *
+ * Allocate unique ID for (initialized) user context
+ * (Assumes invoked within session mutex so no need for counter protection)
+ */
+void ctxmgr_set_ctx_id(struct client_crypto_ctx_info *ctx_info,
+		       const struct crypto_ctx_uid ctx_id);
+
+/**
+ * ctxmgr_get_ctx_id() - Return the unique ID for current user context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Allocated ID (or CTX_INVALID_ID if none)
+ */
+struct crypto_ctx_uid ctxmgr_get_ctx_id(struct client_crypto_ctx_info
+					*ctx_info);
+
+/**
+ * ctxmgr_get_session_id() - Return the session ID of given context ID
+ * @ctx_info:
+ *
+ * Return the session ID of given context ID
+ * This may be used to validate ID and verify that it was not tampered
+ * in a manner that can allow access to a session of another process
+ * Returns u32
+ */
+u64 ctxmgr_get_session_id(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_get_alg_class() - Get algorithm class of context
+ *				(set during _init_ of a context)
+ * @ctx_info:	 User context info structure
+ *
+ * Returns Current algorithm class of context
+ */
+enum crypto_alg_class ctxmgr_get_alg_class(const struct client_crypto_ctx_info
+					   *ctx_info);
+
+/**
+ * ctxmgr_get_crypto_blk_size() - Get the crypto-block length of given context
+ *					in octets
+ * @ctx_info:	 User context info structure
+ *
+ * Returns u32 Cypto-block size in bytes, 0 if invalid/unsupported alg.
+ */
+u32 ctxmgr_get_crypto_blk_size(struct client_crypto_ctx_info *ctx_info);
+
+/**
+ * ctxmgr_is_valid_adata_size() - Validate additional/associated data for
+ * @ctx_info:
+ * @adata_size:
+ *
+ * Validate additional/associated data for
+ * auth/enc algs.
+ * Returns bool
+ */
+bool ctxmgr_is_valid_adata_size(struct client_crypto_ctx_info *ctx_info,
+				unsigned long adata_size);
+
+/**
+ * ctxmgr_is_valid_size() - Validate given data unit for given alg./mode
+ * @ctx_info:
+ * @data_unit_size:
+ * @is_finalize:
+ *
+ * Returns bool true is valid.
+ */
+bool ctxmgr_is_valid_size(struct client_crypto_ctx_info *ctx_info,
+				    unsigned long data_unit_size,
+				    bool is_finalize);
+
+/**
+ * ctxmgr_get_sym_cipher_type() - Returns the sym cipher specific type.
+ * @ctx_info:	 The context info object of the sym cipher alg.
+ *
+ * Returns enum dxdi_sym_cipher_type The sym cipher type.
+ */
+enum dxdi_sym_cipher_type ctxmgr_get_sym_cipher_type(const struct
+						     client_crypto_ctx_info
+						     *ctx_info);
+
+/**
+ * ctxmgr_get_mac_type() - Returns the mac specific type.
+ * @ctx_info:	 The context info object of the mac alg.
+ *
+ * Returns enum host_crypto_ctx_mac The mac type.
+ */
+enum dxdi_mac_type ctxmgr_get_mac_type(const struct client_crypto_ctx_info
+				       *ctx_info);
+
+/**
+ * ctxmgr_get_hash_type() - Returns the hash specific type.
+ * @ctx_info:	 The context info object of the hash alg.
+ *
+ * Returns enum dxdi_hash_type The hash type.
+ */
+enum dxdi_hash_type ctxmgr_get_hash_type(const struct client_crypto_ctx_info
+					 *ctx_info);
+
+/**
+ * ctxmgr_get_mac_type() - Returns the hash specific type.
+ * @ctx_info:	 The context info object of the hash alg.
+ *
+ * Returns enum dxdi_mac_type The hash type.
+ */
+enum dxdi_mac_type ctxmgr_get_mac_type(const struct client_crypto_ctx_info
+				       *ctx_info);
+
+/**
+ * ctxmgr_save_hash_blk_remainder() - Save hash block tail data in given
+ *	context. The data is taken from the save4next chunk of given client
+ *	buffer.
+ * @ctx_info:	 Client context info structure (HASH's or HMAC's)
+ * @client_dma_buf_p:	A client DMA buffer object. Data is taken from the
+ *			save4next chunk of this buffer.
+ * @append_data:	When true, given data is appended to existing
+ *
+ * Returns 0 on success
+ */
+int ctxmgr_save_hash_blk_remainder(struct client_crypto_ctx_info *ctx_info,
+				   struct client_dma_buffer *client_dma_buf_p,
+				   bool append_data);
+
+/**
+ * ctxmgr_get_hash_blk_remainder_buf() - Get DMA info for hash block remainder
+ *	buffer from given context
+ * @ctx_info:			User context info structure
+ * @hash_blk_tail_dma_p:	Returned tail buffer DMA address
+ *
+ * Note: This function must be invoked only when tail_buf is mapped2dev
+ *	(using ctxmgr_map2dev_hash_tail)
+ * Returns u16 Number of valid bytes/octets in tail buffer
+ */
+u16 ctxmgr_get_hash_blk_remainder_buf(struct client_crypto_ctx_info
+					   *ctx_info,
+					   dma_addr_t *
+					   hash_blk_remainder_dma_p);
+
+/**
+ * ctxmgr_get_digest_or_mac() - Get the digest/MAC result when applicable
+ * @ctx_info:		User context info structure
+ * @digest_or_mac:	Pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ */
+u32 ctxmgr_get_digest_or_mac(struct client_crypto_ctx_info *ctx_info,
+				  u8 *digest_or_mac);
+
+/**
+ * ctxmgr_get_digest_or_mac_ptr() - Get the digest/MAC pointer in SeP context
+ * @ctx_info:		User context info structure
+ * @digest_or_mac_pp:	Returned pointer to digest/MAC buffer
+ *
+ * Returns Returned digest/MAC size
+ * This function may be used for the caller to reference the result instead
+ * of always copying
+ */
+u32 ctxmgr_get_digest_or_mac_ptr(struct client_crypto_ctx_info *ctx_info,
+				      u8 **digest_or_mac_pp);
+
+/**
+ * ctxmgr_set_symcipher_iv_user() - Set IV of symcipher context given in
+ *					user space pointer
+ * @user_ctx_ptr:	 A user space pointer to the host context
+ * @iv_ptr:	 The IV to set
+ *
+ * Returns int
+ */
+int ctxmgr_set_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr);
+
+/**
+ * ctxmgr_get_symcipher_iv_user() - Read current "IV"
+ * @user_ctx_ptr:	A user space pointer to the host context
+ * @iv_ptr:		Where to return the read IV
+ *
+ * Read current "IV" (block state - not the actual set IV during initialization)
+ * This function works directly over a user space context
+ * Returns int
+ */
+int ctxmgr_get_symcipher_iv_user(u32 __user *user_ctx_ptr,
+				 u8 *iv_ptr);
+
+/**
+ * ctxmgr_set_symcipher_iv() - Set IV for given block symcipher algorithm
+ * @ctx_info:	Context to update
+ * @iv:		New IV
+ *
+ * Returns int 0 if changed IV, -EINVAL for error
+ * (given cipher type does not have IV)
+ */
+int ctxmgr_set_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv);
+
+/**
+ * ctxmgr_get_symcipher_iv() - Return given cipher IV
+ * @ctx_info:	Context to query
+ * @iv_user:	The IV given by the user on last ctxmgr_set_symcipher_iv
+ * @iv_current:	The current IV state block
+ * @iv_size_p:	[I/O] The given buffers size and returns actual IV size
+ *
+ * Return given cipher IV - Original IV given by user and current state "IV"
+ * The given IV buffers must be large enough to accomodate the IVs
+ * Returns int 0 on success, -ENOMEM if given iv_size is too small
+ */
+int ctxmgr_get_symcipher_iv(struct client_crypto_ctx_info *ctx_info,
+			    u8 *iv_user, u8 *iv_current,
+			    u8 *iv_size_p);
+
+/**
+ * ctxmgr_set_symcipher_direction() - Set the operation direction for given
+ *					symcipher context
+ * @ctx_info:		Context to update
+ * @dxdi_direction:	Requested cipher direction
+ *
+ * Returns int 0 on success
+ */
+int ctxmgr_set_symcipher_direction(struct client_crypto_ctx_info *ctx_info,
+				   enum dxdi_cipher_direction dxdi_direction);
+
+/**
+ * ctxmgr_get_symcipher_direction() - Return the operation direction of given
+ *					symcipher context
+ * @ctx_info:	Context to query
+ *
+ * Returns enum dxdi_cipher_direction (<0 on error)
+ */
+enum dxdi_cipher_direction ctxmgr_get_symcipher_direction(struct
+							  client_crypto_ctx_info
+							  *ctx_info);
+
+/**
+ * ctxmgr_set_symcipher_key() - Set a symcipher context key
+ * @ctx_info:	Context to update
+ * @key_size:	Size of key in bytes
+ * @key:	New key pointer
+ *
+ * Set a symcipher context key
+ * After invoking this function the context should be reinitialized by SeP
+ * (set its state to "partial init" if not done in this sequence)
+ * Returns int 0 on success, -EINVAL Invalid key len, -EPERM Forbidden/weak key
+ */
+int ctxmgr_set_symcipher_key(struct client_crypto_ctx_info *ctx_info,
+			     u8 key_size, const u8 *key);
+
+/**
+ * ctxmgr_init_symcipher_ctx_no_props() - Initialize symcipher context when full
+ *					props are not available, yet.
+ * @ctx_info:		Context to init.
+ * @cipher_type:	Cipher type for context
+ *
+ * Initialize symcipher context when full props are not available, yet.
+ * Later set_key and set_iv may update the context.
+ * Returns int 0 on success
+ */
+int ctxmgr_init_symcipher_ctx_no_props(struct client_crypto_ctx_info *ctx_info,
+				       enum dxdi_sym_cipher_type cipher_type);
+
+/**
+ * ctxmgr_init_symcipher_ctx() - Initialize symCipher context based on given
+ *				properties.
+ * @ctx_info:	 User context mapping info.
+ * @props:	 The initialization properties
+ * @postpone_init:	Return "true" if INIT on SeP should be postponed
+ *			to first processing (e.g, in AES-XTS)
+ * @error_info:	Error info
+ *
+ * Returns 0 on success, otherwise on error
+ */
+int ctxmgr_init_symcipher_ctx(struct client_crypto_ctx_info *ctx_info,
+			      struct dxdi_sym_cipher_props *props,
+			      bool *postpone_init, u32 *error_info);
+
+/**
+ * ctxmgr_init_auth_enc_ctx() - Initialize Authenticated Encryption class
+ *				context
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_auth_enc_ctx(struct client_crypto_ctx_info *ctx_info,
+			     struct dxdi_auth_enc_props *props,
+			     u32 *error_info);
+
+/**
+ * ctxmgr_init_mac_ctx() - Initialize context for MAC algorithm
+ * @ctx_info:
+ * @props:
+ * @error_info:	Error info
+ *
+ * Returns int
+ */
+int ctxmgr_init_mac_ctx(struct client_crypto_ctx_info *ctx_info,
+			struct dxdi_mac_props *props, u32 *error_info);
+
+/**
+ * ctxmgr_init_hash_ctx() - Initialize hash context
+ * @ctx_info:	 User context mapping info.
+ * @hash_type:	 Assigned hash type
+ * @error_info:	Error info
+ *
+ * Returns int 0 on success, -EINVAL, -ENOSYS
+ */
+int ctxmgr_init_hash_ctx(struct client_crypto_ctx_info *ctx_info,
+			 enum dxdi_hash_type hash_type, u32 *error_info);
+
+/**
+ * ctxmgr_set_sep_cache_idx() - Set the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ * @sep_cache_idx:	 The allocated index in SeP cache for this context
+ *
+ * Returns void
+ */
+void ctxmgr_set_sep_cache_idx(struct client_crypto_ctx_info *ctx_info,
+			      int sep_cache_idx);
+
+/**
+ * ctxmgr_get_sep_cache_idx() - Get the index of this context in the sep_cache
+ * @ctx_info:	 User context info structure
+ *
+ * Returns The allocated index in SeP cache for this context
+ */
+int ctxmgr_get_sep_cache_idx(struct client_crypto_ctx_info *ctx_info);
+
+#ifdef DEBUG
+/**
+ * ctxmgr_dump_sep_ctx() - Dump SeP context data
+ * @ctx_info:
+ *
+ */
+void ctxmgr_dump_sep_ctx(const struct client_crypto_ctx_info *ctx_info);
+#else
+#define ctxmgr_dump_sep_ctx(ctx_info) do {} while (0)
+#endif /*DEBUG*/
+/**
+ * ctxmgr_sync_sep_ctx() - Sync. SeP context to device (flush from cache...)
+ * @ctx_info:	 User context info structure
+ * @mydev:	 Associated device context
+ *
+ * Returns void
+ */
+void ctxmgr_sync_sep_ctx(const struct client_crypto_ctx_info *ctx_info,
+			 struct device *mydev);
+
+/**
+ * ctxmgr_get_sep_ctx_dma_addr() - Return DMA address of SeP (FW) area of the context
+ * @ctx_info:	 User context info structure
+ *
+ * Returns DMA address of SeP (FW) area of the context
+ */
+dma_addr_t ctxmgr_get_sep_ctx_dma_addr(const struct client_crypto_ctx_info
+				       *ctx_info);
+
+/******************************
+ * SeP context cache functions
+ ******************************/
+
+/**
+ * ctxmgr_sep_cache_create() - Create a SeP (FW) cache manager of given num. of
+ *				entries
+ * @num_of_entries:	 = Number of entries available in cache
+ *
+ * Returns void * handle (NULL on failure)
+ */
+void *ctxmgr_sep_cache_create(int num_of_entries);
+
+/**
+ * ctxmgr_sep_cache_destroy() - Destory SeP (FW) cache manager object
+ * @sep_cache:	 = The cache object
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_destroy(void *sep_cache);
+
+/**
+ * ctxmgr_sep_cache_get_size() - Get cache size (entries count)
+ * @sep_cache:
+ *
+ * Returns int Number of cache entries available
+ */
+int ctxmgr_sep_cache_get_size(void *sep_cache);
+
+/**
+ * ctxmgr_sep_cache_alloc() - Allocate a cache entry of given SeP context cache
+ * @sep_cache:	 The cache object
+ * @ctx_id:	 The user context ID
+ * @load_required_p:	Pointed int is set to !0 if a cache load is required
+ *			(i.e., if item already loaded in cache it would be 0)
+ *
+ * Returns cache index
+ */
+int ctxmgr_sep_cache_alloc(void *sep_cache,
+			   struct crypto_ctx_uid ctx_id, int *load_required_p);
+
+/**
+ * ctxmgr_sep_cache_invalidate() - Invalidate cache entry for given context ID
+ * @sep_cache:	The cache object
+ * @ctx_id:	The host crypto. context ID
+ * @id_mask:	A bit mask to be used when comparing the ID
+ *		(to be used for a set of entries from the same client)
+ *
+ * Returns void
+ */
+void ctxmgr_sep_cache_invalidate(void *sep_cache,
+				 struct crypto_ctx_uid ctx_id,
+				 u64 id_mask);
+
+#endif /*_CRYPTO_CTX_MGR_H_*/
diff --git a/drivers/staging/sep54/crypto_hwk.c b/drivers/staging/sep54/crypto_hwk.c
new file mode 100644
index 0000000..32d999c
--- /dev/null
+++ b/drivers/staging/sep54/crypto_hwk.c
@@ -0,0 +1,419 @@
+/*
+ *  Copyright(c) 2012-2013 Intel Corporation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "sep_hwk: " fmt
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/highmem.h>
+
+#include "dx_driver.h"
+#include "sep_sysfs.h"
+#include "sep_power.h"
+#include "dx_sepapp_kapi.h"
+
+#define HWK_APP_UUID "INTEL HWK 000001"
+#define HWK_CMD_CRYPTO 8
+
+struct hwk_context {
+	struct sep_client_ctx *sctx;
+	u32 sess_id, key_id;
+};
+
+static inline void hwk_pm_runtime_get(void)
+{
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+}
+
+static inline void hwk_pm_runtime_put(void)
+{
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+}
+
+static int hwk_ctx_init(struct crypto_tfm *tfm)
+{
+	struct hwk_context *hctx = crypto_tfm_ctx(tfm);
+	u8 uuid[16] = HWK_APP_UUID;
+	enum dxdi_sep_module ret;
+	int rc;
+
+	hwk_pm_runtime_get();
+
+	hctx->sctx = dx_sepapp_context_alloc();
+	if (!hctx->sctx) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	pr_debug("%s: opening session\n", __func__);
+	rc = dx_sepapp_session_open(hctx->sctx, uuid, 0, NULL, NULL,
+				    &hctx->sess_id, &ret);
+	pr_debug("%s: %d: %p %d\n", __func__, rc, hctx->sctx, hctx->sess_id);
+	if (rc != 0)
+		dx_sepapp_context_free(hctx->sctx);
+
+out:
+	hwk_pm_runtime_put();
+
+	return rc;
+}
+
+static void hwk_ctx_cleanup(struct crypto_tfm *tfm)
+{
+	struct hwk_context *hctx = crypto_tfm_ctx(tfm);
+
+	pr_debug("%s: %p %d\n", __func__, hctx->sctx, hctx->sess_id);
+	if (dx_sepapp_session_close(hctx->sctx, hctx->sess_id))
+		BUG();
+	dx_sepapp_context_free(hctx->sctx);
+	pr_debug("%s: session closed\n", __func__);
+}
+
+static int hwk_set_key(struct crypto_ablkcipher *tfm,
+			     const u8 *key, unsigned int keylen)
+{
+	struct hwk_context *hctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+
+	hctx->key_id = *((u32 *)key);
+	pr_debug("%s: key_id=%d\n", __func__, hctx->key_id);
+	return 0;
+}
+
+#if defined(HWK_ST_DUMP_BUF) || defined(HWK_DUMP_BUF)
+static void hwk_dump_buf(u8 *buf, const char *buf_name, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (i % 64 == 0)
+			printk("\n%s: ", buf_name);
+		printk("%02x", buf[i]);
+	}
+	printk("\n");
+}
+#endif
+
+#ifdef HWK_DUMP_BUF
+static void hwk_dump_sg(struct scatterlist *sg, const char *buf_name)
+{
+	u8 *buf = kmap(sg_page(sg));
+	hwk_dump_buf(buf + sg->offset, buf_name, sg->length);
+	kunmap(sg_page(sg));
+}
+#endif
+
+static int hwk_process(struct ablkcipher_request *req, bool encrypt)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct hwk_context *hctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+	struct dxdi_sepapp_kparams p;
+	enum dxdi_sep_module ret_origin;
+	struct scatterlist iv_sg;
+	struct page *iv_page;
+	int rc;
+
+	iv_page = virt_to_page(req->info);
+	sg_init_table(&iv_sg, 1);
+	sg_set_page(&iv_sg, iv_page, SEP_AES_IV_SIZE,
+		    (unsigned long)req->info % PAGE_SIZE);
+
+#ifdef HWK_DUMP_BUF
+	hwk_dump_buf(req->info, "iv", SEP_AES_IV_SIZE);
+	hwk_dump_sg(&iv_sg, "iv");
+	hwk_dump_sg(req->src, "src");
+#endif
+
+	memset(&p, 0, sizeof(p));
+
+	p.params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+	p.params[0].val.data[0] = hctx->key_id;
+	p.params[0].val.data[1] = req->nbytes | (encrypt << 16);
+	p.params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	p.params_types[1] = DXDI_SEPAPP_PARAM_MEMREF;
+	p.params[1].kmemref.dma_direction = DXDI_DATA_TO_DEVICE;
+	p.params[1].kmemref.sgl = &iv_sg;
+	p.params[1].kmemref.nbytes = SEP_AES_IV_SIZE;
+
+	p.params_types[2] = DXDI_SEPAPP_PARAM_MEMREF;
+	p.params[2].kmemref.dma_direction = DXDI_DATA_TO_DEVICE;
+	p.params[2].kmemref.sgl = req->src;
+	p.params[2].kmemref.nbytes = req->nbytes;
+
+	p.params_types[3] = DXDI_SEPAPP_PARAM_MEMREF;
+	p.params[3].kmemref.dma_direction = DXDI_DATA_FROM_DEVICE;
+	p.params[3].kmemref.sgl = req->dst;
+	p.params[3].kmemref.nbytes = req->nbytes;
+
+	pr_debug("%s: size=%d dir=%d\n", __func__, req->nbytes, encrypt);
+	rc = dx_sepapp_command_invoke(hctx->sctx, hctx->sess_id,
+				      HWK_CMD_CRYPTO, &p, &ret_origin);
+	pr_debug("%s: done: %d\n", __func__, rc);
+
+	if (rc != 0) {
+		pr_err("%s: error invoking command %d: %x (ret_origin= %x)\n",
+			__func__, HWK_CMD_CRYPTO, rc, ret_origin);
+		return -EINVAL;
+	}
+
+#ifdef HWK_DUMP_BUF
+	hwk_dump_sg(req->dst, "dst");
+#endif
+
+	return rc;
+}
+
+static int hwk_encrypt(struct ablkcipher_request *req)
+{
+	return hwk_process(req, true);
+}
+
+static int hwk_decrypt(struct ablkcipher_request *req)
+{
+	return hwk_process(req, false);
+}
+
+static struct crypto_alg hwk_alg = {
+	.cra_priority = 0,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_ctxsize = sizeof(struct hwk_context),
+	.cra_alignmask = 0, /* Cannot use this due to bug in kernel */
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_name = "cbchk(aes)",
+	.cra_driver_name = MODULE_NAME "-aes-cbchk",
+	.cra_blocksize = SEP_AES_BLOCK_SIZE,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize = SEP_AES_256_BIT_KEY_SIZE,
+			.max_keysize = SEP_AES_256_BIT_KEY_SIZE,
+			.ivsize = SEP_AES_IV_SIZE,
+			.setkey = hwk_set_key,
+			.encrypt = hwk_encrypt,
+			.decrypt = hwk_decrypt,
+		},
+	},
+	.cra_init = hwk_ctx_init,
+	.cra_exit = hwk_ctx_cleanup
+};
+
+int hwk_init(void)
+{
+	int rc = crypto_register_alg(&hwk_alg);
+	if (rc != 0)
+		pr_err("failed to register %s\n", hwk_alg.cra_name);
+	return rc;
+}
+
+void hwk_fini(void)
+{
+	crypto_unregister_alg(&hwk_alg);
+}
+
+#ifdef SEP_HWK_UNIT_TEST
+enum hwk_self_test {
+	HWK_ST_NOT_STARTED = 0,
+	HWK_ST_RUNNING,
+	HWK_ST_SUCCESS,
+	HWK_ST_ERROR
+};
+
+static enum hwk_self_test hwk_st_status = HWK_ST_NOT_STARTED;
+static const char * const hwk_st_strings[] = {
+	"not started",
+	"running",
+	"success",
+	"error"
+};
+
+ssize_t sys_hwk_st_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf)
+{
+
+	return sprintf(buf, "%s\n", hwk_st_strings[hwk_st_status]);
+}
+
+struct hwk_st_op_result {
+	struct completion completion;
+	int rc;
+};
+
+static void hwk_st_op_complete(struct crypto_async_request *req, int rc)
+{
+	struct hwk_st_op_result *hr = req->data;
+
+	if (rc == -EINPROGRESS)
+		return;
+
+	hr->rc = rc;
+	complete(&hr->completion);
+}
+
+static int hwk_st_do_op(struct ablkcipher_request *req, struct page *src,
+		struct page *dst, bool enc)
+{
+	struct scatterlist src_sg, dst_sg;
+	struct hwk_st_op_result hr = { .rc = 0 };
+	char iv[SEP_AES_IV_SIZE] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+				     12, 13, 14, 15 };
+	int ret;
+
+	init_completion(&hr.completion);
+	ablkcipher_request_set_callback(req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+			hwk_st_op_complete, &hr);
+
+	sg_init_table(&src_sg, 1);
+	sg_set_page(&src_sg, src, PAGE_SIZE, 0);
+	sg_init_table(&dst_sg, 1);
+	sg_set_page(&dst_sg, dst, PAGE_SIZE, 0);
+	ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, PAGE_SIZE, iv);
+
+	pr_info("%s: submiting %s op..\n", __func__, enc ? "enc" : "dec");
+	if (enc)
+		ret = crypto_ablkcipher_encrypt(req);
+	else
+		ret = crypto_ablkcipher_decrypt(req);
+	pr_info("%s: op submitted\n", __func__);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		wait_for_completion(&hr.completion);
+		ret = hr.rc;
+	}
+	pr_info("%s: op completed\n", __func__);
+
+	return ret;
+}
+
+ssize_t sys_hwk_st_start(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct ablkcipher_request *req;
+	struct page *src, *enc, *dec;
+	struct crypto_ablkcipher *acipher;
+	char *tmp, *tmp2, *tmp3;
+	int ret = -EINVAL, i;
+	u32 hwk_id;
+
+	if (hwk_st_status == HWK_ST_RUNNING)
+		return count;
+
+	hwk_st_status = HWK_ST_RUNNING;
+
+	ret = kstrtouint(buf, 10, &hwk_id);
+	if (ret) {
+		pr_err("bad hardware key id: %d\n", ret);
+		goto out;
+	}
+
+	ret = -ENOMEM;
+	src = alloc_page(GFP_KERNEL);
+	if (!src) {
+		pr_err("failed to allocate src page\n");
+		goto out;
+	}
+	enc = alloc_page(GFP_KERNEL);
+	if (!enc) {
+		pr_err("failed to allocate enc page\n");
+		goto out_free_src;
+	}
+	dec = alloc_page(GFP_KERNEL);
+	if (!dec) {
+		pr_err("failed to allocate dec page\n");
+		goto out_free_enc;
+	}
+
+	acipher = crypto_alloc_ablkcipher("cbchk(aes)", 0, 0);
+	if (IS_ERR(acipher)) {
+		pr_err("error allocating cipher: %ld\n", PTR_ERR(acipher));
+		ret = -EINVAL;
+		goto out_free_dec;
+	}
+
+	tmp = kmap(src);
+	for (i = 0; i < PAGE_SIZE; i++)
+		tmp[i] = i;
+	kunmap(src);
+
+	crypto_ablkcipher_set_flags(acipher, CRYPTO_TFM_REQ_WEAK_KEY);
+
+	pr_debug("setting hardware key %d\n", hwk_id);
+	ret = crypto_ablkcipher_setkey(acipher, (u8 *)&hwk_id, sizeof(hwk_id));
+	if (ret) {
+		pr_err("error setting hardware key: %d\n", ret);
+		goto out_free_cipher;
+	}
+
+	req = ablkcipher_request_alloc(acipher, GFP_NOFS);
+	if (!req) {
+		ret = -EINVAL;
+		pr_err("failed to allocate cipher request\n");
+		goto out_free_cipher;
+	}
+
+
+	ret = hwk_st_do_op(req, src, enc, true);
+	if (ret) {
+		pr_err("encryption failed: %d\n", ret);
+		goto out_free_req;
+	}
+
+	ret = hwk_st_do_op(req, enc, dec, false);
+	if (ret) {
+		pr_err("decryption failed: %d\n", ret);
+		goto out_free_req;
+	}
+
+	tmp = kmap(src); tmp2 = kmap(enc); tmp3 = kmap(dec);
+#ifdef HWK_ST_DUMP_BUF
+	hwk_dump_buf(tmp, "src", PAGE_SIZE);
+	hwk_dump_buf(tmp2, "enc", PAGE_SIZE);
+	hwk_dump_buf(tmp3, "dec", PAGE_SIZE);
+#endif
+	for (i = 0; i < PAGE_SIZE; i++) {
+		if (tmp[i] != tmp3[i]) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+	kunmap(src); kunmap(enc); kunmap(dec);
+
+	if (ret)
+		pr_err("dec != src\n");
+
+out_free_req:
+	ablkcipher_request_free(req);
+out_free_cipher:
+	crypto_free_ablkcipher(acipher);
+out_free_dec:
+	__free_pages(dec, 0);
+out_free_enc:
+	__free_pages(enc, 0);
+out_free_src:
+	__free_pages(src, 0);
+out:
+	if (ret)
+		hwk_st_status = HWK_ST_ERROR;
+	else
+		hwk_st_status = HWK_ST_SUCCESS;
+	return count;
+}
+#endif
diff --git a/drivers/staging/sep54/desc_mgr.c b/drivers/staging/sep54/desc_mgr.c
new file mode 100644
index 0000000..1aac086
--- /dev/null
+++ b/drivers/staging/sep54/desc_mgr.c
@@ -0,0 +1,1234 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_DESC_MGR
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include "dx_driver.h"
+#include "dx_bitops.h"
+#include "sep_log.h"
+#include "sep_sw_desc.h"
+#include "crypto_ctx_mgr.h"
+#include "sep_sysfs.h"
+/* Registers definitions from shared/hw/include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "sep_power.h"
+#include "desc_mgr.h"
+#include "dx_init_cc_abi.h"
+
+/* Queue buffer log(size in bytes) */
+#define SEP_SW_DESC_Q_MEM_SIZE_LOG 12	/*4KB */
+#define SEP_SW_DESC_Q_MEM_SIZE (1 << SEP_SW_DESC_Q_MEM_SIZE_LOG)
+#define WORD_SIZE_LOG 2		/*32b=4B=2^2 */
+/* Number of entries (descriptors in a queue) */
+#define SEP_DESC_Q_ENTRIES_NUM_LOG \
+	(SEP_SW_DESC_Q_MEM_SIZE_LOG - WORD_SIZE_LOG - SEP_SW_DESC_WORD_SIZE_LOG)
+#define SEP_DESC_Q_ENTRIES_NUM (1 << SEP_DESC_Q_ENTRIES_NUM_LOG)
+#define SEP_DESC_Q_ENTRIES_MASK BITMASK(SEP_DESC_Q_ENTRIES_NUM_LOG)
+
+/* This watermark is used to initiate dispatching after the queue entered
+   the FULL state in order to avoid interrupts flooding at SeP */
+#define SEP_DESC_Q_WATERMARK_MARGIN ((SEP_DESC_Q_ENTRIES_NUM)/4)
+
+/* convert from descriptor counters index in descriptors array */
+#define GET_DESC_IDX(cntr) ((cntr) & SEP_DESC_Q_ENTRIES_MASK)
+#define GET_DESC_PTR(q_p, idx)				\
+	((struct sep_sw_desc *)((q_p)->q_base_p +	\
+				(idx << SEP_SW_DESC_WORD_SIZE_LOG)))
+#define GET_Q_PENDING_DESCS(q_p) ((q_p)->sent_cntr - (q_p)->completed_cntr)
+#define GET_Q_FREE_DESCS(q_p) \
+	 (SEP_DESC_Q_ENTRIES_NUM - GET_Q_PENDING_DESCS(q_p))
+#define IS_Q_FULL(q_p) (GET_Q_FREE_DESCS(q_p) == 0)
+
+/* LUT for GPRs registers offsets (to be added to cc_regs_base) */
+static const unsigned long host_to_sep_gpr_offset[] = {
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR0),
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR1),
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR2),
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR3),
+};
+
+static const unsigned long sep_to_host_gpr_offset[] = {
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR0),
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR1),
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR2),
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR3),
+};
+
+/**
+ * struct descs_backlog_item - Item of the queue descs_backlog_queue
+ */
+struct descs_backlog_item {
+	struct list_head list;
+	struct sep_sw_desc desc;
+};
+
+/**
+ * struct descs_backlog_queue - Queue of backlog descriptors
+ * @list:		List head item
+ * @cur_q_len:		Current number of entries in the backlog_q
+ * @backlog_items_pool:	Memory pool for allocating elements of this queue
+ * @backlog_items_pool_name:	Pool name string for kmem_cache object
+ */
+struct descs_backlog_queue {
+	struct list_head list;
+	unsigned int cur_q_len;
+	struct kmem_cache *backlog_items_pool;
+	char backlog_items_pool_name[24];
+};
+
+/**
+ * struct desc_q - Descriptor queue object
+ * @qid:		The associated software queue ID
+ * @qstate:		Operational state of the queue
+ * @gpr_to_sep:		Pointer to host-to-sep GPR for this queue (requests)
+ * @gpr_from_sep:	Pointer to sep-to-host GPR for this queue (completion)
+ * @qlock:		Protect data structure in non-interrupt context
+ * @q_base_p:		The base address of the descriptors cyclic queue buffer
+ * @q_base_dma:		The DMA address for q_base_p
+ * @sent_cntr:		Sent descriptors counter
+ * @completed_cntr:	Completed descriptors counter as reported by SeP
+ * @idle_jiffies:	jiffies value when the queue became idle (empty)
+ * @backlog_q:		Queue of backlog descriptors - pending to be dispatched
+ *			into the descriptors queue (were not dispatched because
+ *			it was full or in "sleep" state)
+ * @backlog_work:	Work task for handling/equeuing backlog descriptors
+ * @enqueue_time:	Array to save descriptor start [ns] per descriptor
+ */
+struct desc_q {
+	int qid;
+	enum desc_q_state qstate;
+	void __iomem *gpr_to_sep;
+	void __iomem *gpr_from_sep;
+	struct queue_drvdata *drvdata;
+	struct mutex qlock;
+	u32 *q_base_p;
+	dma_addr_t q_base_dma;
+	u32 sent_cntr;
+	u32 completed_cntr;
+	unsigned long idle_jiffies;
+	struct descs_backlog_queue backlog_q;
+	struct work_struct backlog_work;
+	unsigned long long *enqueue_time;
+};
+
+static uintptr_t cookies[SEP_DESC_Q_ENTRIES_NUM];
+DEFINE_MUTEX(cookie_lock);
+
+u32 add_cookie(uintptr_t op_ctx)
+{
+	u32 i;
+
+	mutex_lock(&cookie_lock);
+	for (i = 0; i < SEP_DESC_Q_ENTRIES_NUM; i++) {
+		if (cookies[i] == 0) {
+			cookies[i] = op_ctx;
+			break;
+		}
+	}
+	mutex_unlock(&cookie_lock);
+
+	return i;
+}
+
+void delete_cookie(u32 index)
+{
+	mutex_lock(&cookie_lock);
+	cookies[index] = 0;
+	mutex_unlock(&cookie_lock);
+}
+
+void delete_context(uintptr_t op_ctx)
+{
+	u32 i;
+
+	mutex_lock(&cookie_lock);
+	for (i = 0; i < 128; i++) {
+		if (cookies[i] == op_ctx) {
+			cookies[i] = 0;
+			break;
+		}
+	}
+	mutex_unlock(&cookie_lock);
+}
+
+uintptr_t get_cookie(u32 index)
+{
+	return cookies[index];
+}
+
+#ifdef DEBUG
+static void dump_desc(const struct sep_sw_desc *desc_p);
+#else
+#define dump_desc(desc_p) do {} while (0)
+#endif /*DEBUG*/
+static int backlog_q_init(struct desc_q *q_p);
+static void backlog_q_cleanup(struct desc_q *q_p);
+static void backlog_q_process(struct work_struct *work);
+
+/**
+ * desc_q_create() - Create descriptors queue object
+ * @qid:	 The queue ID (index)
+ * @drvdata:	 The associated queue driver data
+ * @state: Current state of sep
+ *
+ * Returns Allocated queue object handle (DESC_Q_INVALID_HANDLE for failure)
+ */
+void *desc_q_create(int qid, struct queue_drvdata *drvdata, int state)
+{
+	struct device *dev = drvdata->sep_data->dev;
+	void __iomem *cc_regs_base = drvdata->sep_data->cc_base;
+	struct desc_q *new_q_p;
+
+	new_q_p = kzalloc(sizeof(struct desc_q), GFP_KERNEL);
+	if (unlikely(new_q_p == NULL)) {
+		pr_err("Q%d: Failed allocating %zu B for new_q\n",
+			    qid, sizeof(struct desc_q));
+		goto desc_q_create_failed;
+	}
+
+	/* Initialize fields */
+	mutex_init(&new_q_p->qlock);
+	new_q_p->drvdata = drvdata;
+	new_q_p->qid = qid;
+	new_q_p->gpr_to_sep = cc_regs_base + host_to_sep_gpr_offset[qid];
+	new_q_p->gpr_from_sep = cc_regs_base + sep_to_host_gpr_offset[qid];
+	new_q_p->sent_cntr = 0;
+	new_q_p->completed_cntr = 0;
+	new_q_p->idle_jiffies = jiffies;
+
+	new_q_p->q_base_p = dma_alloc_coherent(dev, SEP_SW_DESC_Q_MEM_SIZE,
+					       &new_q_p->q_base_dma,
+					       GFP_KERNEL);
+	if (unlikely(new_q_p->q_base_p == NULL)) {
+		pr_err("Q%d: Failed allocating %d B for desc buffer\n",
+			    qid, SEP_SW_DESC_Q_MEM_SIZE);
+		goto desc_q_create_failed;
+	}
+
+	new_q_p->enqueue_time = kmalloc(SEP_DESC_Q_ENTRIES_NUM *
+					sizeof(u64), GFP_KERNEL);
+	if (new_q_p->enqueue_time == NULL) {
+		pr_err("Q%d: Failed allocating time stats array\n", qid);
+		goto desc_q_create_failed;
+	}
+
+	if (backlog_q_init(new_q_p) != 0) {
+		pr_err("Q%d: Failed creating backlog queue\n", qid);
+		goto desc_q_create_failed;
+	}
+	INIT_WORK(&new_q_p->backlog_work, backlog_q_process);
+
+	/* Initialize respective GPR before SeP would be initialized.
+	   Required because the GPR may be non-zero as a result of CC-init
+	   sequence leftovers */
+	if (state != DX_SEP_STATE_DONE_FW_INIT)
+		WRITE_REGISTER(new_q_p->gpr_to_sep, new_q_p->sent_cntr);
+
+	new_q_p->qstate = DESC_Q_ACTIVE;
+	return (void *)new_q_p;
+
+	/* Error cases cleanup */
+ desc_q_create_failed:
+	if (new_q_p != NULL) {
+		kfree(new_q_p->enqueue_time);
+		if (new_q_p->q_base_p != NULL)
+			dma_free_coherent(dev, SEP_SW_DESC_Q_MEM_SIZE,
+					  new_q_p->q_base_p,
+					  new_q_p->q_base_dma);
+		mutex_destroy(&new_q_p->qlock);
+		kfree(new_q_p);
+	}
+	return DESC_Q_INVALID_HANDLE;
+}
+
+/**
+ * desc_q_destroy() - Destroy descriptors queue object (free resources)
+ * @q_h:	 The queue object handle
+ *
+ */
+void desc_q_destroy(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct device *dev = q_p->drvdata->sep_data->dev;
+
+	if (q_p->sent_cntr != q_p->completed_cntr) {
+		pr_err(
+			    "Q%d: destroyed while there are outstanding descriptors\n",
+			    q_p->qid);
+	}
+	backlog_q_cleanup(q_p);
+	kfree(q_p->enqueue_time);
+	dma_free_coherent(dev, SEP_SW_DESC_Q_MEM_SIZE,
+			  q_p->q_base_p, q_p->q_base_dma);
+	mutex_destroy(&q_p->qlock);
+	kfree(q_p);
+}
+
+/**
+ * desc_q_set_state() - Set queue state (SLEEP or ACTIVE)
+ * @q_h:	The queue object handle
+ * @state:	The requested state
+ */
+int desc_q_set_state(void *q_h, enum desc_q_state state)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	int rc = 0;
+
+#ifdef DEBUG
+	if ((q_p->qstate != DESC_Q_ACTIVE) && (q_p->qstate != DESC_Q_ASLEEP)) {
+		pr_err("Q%d is in invalid state: %d\n",
+			    q_p->qid, q_p->qstate);
+		return -EINVAL;
+	}
+#endif
+	mutex_lock(&q_p->qlock);
+	switch (state) {
+	case DESC_Q_ASLEEP:
+		if (q_p->qstate != DESC_Q_ASLEEP) {
+			/* If not already in this state */
+			if (desc_q_is_idle(q_h, NULL))
+				q_p->qstate = DESC_Q_ASLEEP;
+			else
+				rc = -EBUSY;
+		}		/* else: already asleep */
+		break;
+	case DESC_Q_ACTIVE:
+		if (q_p->qstate != DESC_Q_ACTIVE) {
+			/* Initiate enqueue from backlog if any is pending */
+			if (q_p->backlog_q.cur_q_len > 0)
+				(void)schedule_work(&q_p->backlog_work);
+			else	/* Empty --> Back to idle state */
+				q_p->idle_jiffies = jiffies;
+			q_p->qstate = DESC_Q_ACTIVE;
+		}		/* else: already active */
+		break;
+	default:
+		pr_err("Invalid requested state: %d\n", state);
+		rc = -EINVAL;
+	}
+	mutex_unlock(&q_p->qlock);
+	return rc;
+}
+
+/**
+ * desc_q_get_state() - Get queue state
+ * @q_h:	The queue object handle
+ */
+enum desc_q_state desc_q_get_state(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	return q_p->qstate;
+}
+
+/**
+ * desc_q_is_idle() - Report if given queue is active but empty/idle.
+ * @q_h:		The queue object handle
+ * @idle_jiffies_p:	Return jiffies at which the queue became idle
+ */
+bool desc_q_is_idle(void *q_h, unsigned long *idle_jiffies_p)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	if (idle_jiffies_p != NULL)
+		*idle_jiffies_p = q_p->idle_jiffies;
+	/* No need to lock the queue - returned information is "fluid" anyway */
+	return ((q_p->qstate == DESC_Q_ACTIVE) &&
+		(GET_Q_PENDING_DESCS(q_p) == 0) &&
+		(q_p->backlog_q.cur_q_len == 0));
+}
+
+/**
+ * desc_q_cntr_set() - set counters of the queue
+ * @q_h:	The queue object handle
+ */
+int desc_q_cntr_set(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	uint32_t val = READ_REGISTER(q_p->gpr_from_sep);
+
+	q_p->sent_cntr = val;
+	q_p->completed_cntr = val;
+
+	return 0;
+}
+
+/**
+ * desc_q_reset() - Reset sent/completed counters of queue
+ * @q_h:	The queue object handle
+ *
+ * This function should be invoked only when the queue is in ASLEEP state
+ * after the transition of SeP to sleep state completed.
+ * Returns -EBUSY if the queue is not in the correct state for reset.
+ */
+int desc_q_reset(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	int rc = 0;
+
+	mutex_lock(&q_p->qlock);
+	if ((q_p->qstate == DESC_Q_ASLEEP) && (GET_Q_PENDING_DESCS(q_p) == 0)) {
+		q_p->sent_cntr = 0;
+		q_p->completed_cntr = 0;
+	} else {
+		pr_err("Invoked when queue is not ASLEEP\n");
+		rc = -EBUSY;
+	}
+	mutex_unlock(&q_p->qlock);
+	return rc;
+}
+
+/**
+ * dispatch_sw_desc() - Copy given descriptor into next free entry in the
+ *			descriptors queue and signal SeP.
+ *
+ * @q_p:	Desc. queue context
+ * @desc_p:	The descriptor to dispatch
+ *
+ * This function should be called with qlock locked (non-interrupt context)
+ * and only if queue is not full (i.e., this function does not validate
+ * queue utilization)
+ */
+static inline void dispatch_sw_desc(struct desc_q *q_p,
+				    struct sep_sw_desc *desc_p)
+{
+	const u32 desc_idx = GET_DESC_IDX(q_p->sent_cntr);
+
+	dump_desc(desc_p);
+	preempt_disable_notrace();
+	q_p->enqueue_time[desc_idx] = sched_clock();	/* Save start time */
+	preempt_enable_notrace();
+	/* copy descriptor to free entry in queue */
+	SEP_SW_DESC_COPY_TO_SEP(GET_DESC_PTR(q_p, desc_idx), desc_p);
+	q_p->sent_cntr++;
+}
+
+/**
+ * desc_q_enqueue_sleep_req() - Enqueue SLEEP_REQ descriptor
+ * @q_h:	The queue object handle
+ * @op_ctx:	The operation context for this descriptor
+ * This function may be invoked only when the queue is in ASLEEP state
+ * (assuming SeP is still active).
+ * If the queue is not in ASLEEP state this function would return -EBUSY.
+ */
+int desc_q_enqueue_sleep_req(void *q_h, struct sep_op_ctx *op_ctx)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_sw_desc desc;
+	int rc = 0;
+
+	SEP_SW_DESC_INIT(&desc);
+	SEP_SW_DESC_SET(&desc, TYPE, SEP_SW_DESC_TYPE_SLEEP_REQ);
+	SEP_SW_DESC_SET_COOKIE(&desc, op_ctx);
+
+	mutex_lock(&q_p->qlock);
+	if (q_p->qstate == DESC_Q_ASLEEP) {
+		op_ctx->op_state = USER_OP_INPROC;
+		/* In ASLEEP state the queue assumed to be empty... */
+		dispatch_sw_desc(q_p, &desc);
+		WRITE_REGISTER(q_p->gpr_to_sep, q_p->sent_cntr);
+		pr_debug("Sent SLEEP_REQ\n");
+	} else {
+		rc = -EBUSY;
+	}
+	mutex_unlock(&q_p->qlock);
+	return rc;
+}
+
+static int backlog_q_init(struct desc_q *q_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+	int rc = 0;
+
+	snprintf(backlog_q_p->backlog_items_pool_name,
+		 sizeof(backlog_q_p->backlog_items_pool_name),
+		 "dx_sep_backlog%d", q_p->qid);
+	backlog_q_p->backlog_items_pool =
+	    kmem_cache_create(backlog_q_p->backlog_items_pool_name,
+			      sizeof(struct descs_backlog_item),
+			      sizeof(u32), 0, NULL);
+	if (unlikely(backlog_q_p->backlog_items_pool == NULL)) {
+		pr_err("Q%d: Failed allocating backlog_items_pool\n",
+			    q_p->qid);
+		rc = -ENOMEM;
+	} else {
+		INIT_LIST_HEAD(&backlog_q_p->list);
+		backlog_q_p->cur_q_len = 0;
+	}
+	return rc;
+}
+
+static void backlog_q_cleanup(struct desc_q *q_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+
+	if (backlog_q_p->cur_q_len > 0) {
+		pr_err("Q%d: Cleanup while have %u pending items!",
+			    q_p->qid, backlog_q_p->cur_q_len);
+		/* TODO: Handle freeing of pending items? */
+	}
+	kmem_cache_destroy(backlog_q_p->backlog_items_pool);
+}
+
+/**
+ * backlog_q_enqueue() - Enqueue given descriptor for postponed processing
+ *				(e.g., in case of full desc_q)
+ *
+ * @q_p:	Desc. queue object
+ * @desc_p:	Descriptor to enqueue
+ *
+ * Caller must call this function with the qlock locked (non-interrupt context
+ * only)
+ */
+static int backlog_q_enqueue(struct desc_q *q_p, struct sep_sw_desc *desc_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+	struct sep_op_ctx *op_ctx = SEP_SW_DESC_GET_COOKIE(desc_p);
+	struct descs_backlog_item *new_q_item;
+
+	pr_debug("->backlog(op_ctx=%p):\n", op_ctx);
+	dump_desc(desc_p);
+
+	new_q_item =
+	    kmem_cache_alloc(backlog_q_p->backlog_items_pool, GFP_KERNEL);
+	if (unlikely(new_q_item == NULL)) {
+		pr_err("Failed allocating descs_queue_item");
+		op_ctx->op_state = USER_OP_NOP;
+		return -ENOMEM;
+	}
+	op_ctx->op_state = USER_OP_PENDING;
+	memcpy(&new_q_item->desc, desc_p, sizeof(struct sep_sw_desc));
+	list_add_tail(&new_q_item->list, &backlog_q_p->list);
+	op_ctx->backlog_descs_cntr++;
+	backlog_q_p->cur_q_len++;
+	return 0;
+}
+
+/**
+ * backlog_q_dequeue() - Dequeue from pending descriptors queue and dispatch
+ *			into the SW-q the first pending descriptor
+ *
+ * @q_p:	Desc. queue object
+ *
+ * This function must be called with qlock locked and only if there is free
+ * space in the given descriptor queue.
+ * It returns 0 on success and -ENOMEM if there is no pending request
+ */
+static int backlog_q_dequeue(struct desc_q *q_p)
+{
+	struct descs_backlog_queue *backlog_q_p = &q_p->backlog_q;
+	struct descs_backlog_item *first_item;
+	struct sep_sw_desc *desc_p;
+	struct sep_op_ctx *op_ctx;
+
+	if (list_empty(&backlog_q_p->list))
+		return -ENOMEM;
+	/* Remove from the first item from the list but keep the item */
+	first_item = list_first_entry(&backlog_q_p->list,
+				      struct descs_backlog_item, list);
+	list_del(&first_item->list);
+	backlog_q_p->cur_q_len--;
+	/* Process/dispatch the descriptor to the SW-q. */
+	desc_p = &first_item->desc;
+	dump_desc(desc_p);
+	op_ctx = SEP_SW_DESC_GET_COOKIE(desc_p);
+	if (unlikely(op_ctx == NULL)) {
+		pr_err("Invalid desc - COOKIE is NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("backlog(op_ctx=%p)->descQ:\n", op_ctx);
+	dispatch_sw_desc(q_p, desc_p);
+	op_ctx->backlog_descs_cntr--;
+	/* Now we can free the list item */
+	kmem_cache_free(backlog_q_p->backlog_items_pool, first_item);
+	if (op_ctx->backlog_descs_cntr == 0) {
+		/* All the operation descriptors reached the SW-q. */
+		op_ctx->op_state = USER_OP_INPROC;
+		if (op_ctx->comp_work != NULL)
+			/* Async. (CryptoAPI) */
+			/* Invoke completion callback directly because
+			   we are already in work_queue context and we wish
+			   to assure this state update (EINPROGRESS)
+			   is delivered before the request is completed */
+			op_ctx->comp_work->func(op_ctx->comp_work);
+	}
+	return 0;
+}
+
+/**
+ * backlog_q_process() - Handler for dispatching backlog descriptors
+ *			into the SW desc.Q when possible (dispatched from
+ *			the completion interrupt handler)
+ *
+ * @work:	The work context
+ */
+static void backlog_q_process(struct work_struct *work)
+{
+	int descs_to_enqueue;
+	struct desc_q *q_p = container_of(work, struct desc_q, backlog_work);
+
+	mutex_lock(&q_p->qlock);
+	if (q_p->qstate == DESC_Q_ACTIVE) {	/* Avoid on ASLEEP state */
+		descs_to_enqueue = GET_Q_FREE_DESCS(q_p);
+		/* Not more than pending descriptors */
+		if (descs_to_enqueue > q_p->backlog_q.cur_q_len)
+			descs_to_enqueue = q_p->backlog_q.cur_q_len;
+		pr_debug("Q%d: Dispatching %d descs. from pendQ\n",
+			      q_p->qid, descs_to_enqueue);
+		while (descs_to_enqueue > 0) {
+			/* From backlog queue to SW descriptors queue */
+			if (!backlog_q_dequeue(q_p))
+				descs_to_enqueue--;
+			else
+				break;
+		}
+		/* Signal SeP once of all new descriptors
+		   (interrupt coalescing) */
+		WRITE_REGISTER(q_p->gpr_to_sep, q_p->sent_cntr);
+	}
+	mutex_unlock(&q_p->qlock);
+}
+
+/**
+ * desc_q_get_info4sep() - Get queue address and size to be used in FW init
+ *				phase
+ * @q_h:	 The queue object handle
+ * @base_addr_p:	 Base address return parameter
+ * @size_p:	 Queue size (in bytes) return parameter
+ *
+ */
+void desc_q_get_info4sep(void *q_h,
+			 dma_addr_t *base_addr_p, unsigned long *size_p)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+
+	*base_addr_p = q_p->q_base_dma;
+	*size_p = SEP_SW_DESC_Q_MEM_SIZE;
+}
+
+/**
+ * desc_q_enqueue() - Enqueue given descriptor in given queue
+ * @q_h:		The queue object handle
+ * @desc_p:		Pointer to descriptor
+ * @may_backlog:	When "true" and descQ is full or ASLEEP, may enqueue
+ *			the given desc. in the backlog queue.
+ *			When "false", any of the above cases would cause
+ *			returning -ENOMEM.
+ *
+ * The function updates the op_ctx->op_state accoring to its results.
+ * Returns -EINPROGRESS on success to dispatch into the SW desc. q.
+ * Returns -EBUSY if may_backlog==true and the descriptor was enqueued in the
+ * the backlog queue.
+ * Returns -ENOMEM if queue is full and cannot enqueue in the backlog queue
+ */
+int desc_q_enqueue(void *q_h, struct sep_sw_desc *desc_p, bool may_backlog)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_op_ctx *op_ctx = SEP_SW_DESC_GET_COOKIE(desc_p);
+	int rc;
+
+	mutex_lock(&q_p->qlock);
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	if (IS_Q_FULL(q_p) ||	/* Queue is full */
+	    (q_p->backlog_q.cur_q_len > 0) ||	/* or already have pending d. */
+	    (q_p->qstate == DESC_Q_ASLEEP)) {	/* or in sleep state */
+		if (may_backlog) {
+			pr_debug("Enqueuing desc. to queue@%s\n",
+				 q_p->qstate == DESC_Q_ASLEEP ?
+					 "ASLEEP" : "FULL");
+			rc = backlog_q_enqueue(q_p, desc_p);
+			if (unlikely(rc != 0)) {
+				pr_err("Failed enqueuing desc. to queue@%s\n",
+				       q_p->qstate == DESC_Q_ASLEEP ?
+					       "ASLEEP" : "FULL");
+			} else {
+				rc = -EBUSY;
+			}
+		} else {
+			pr_debug("Q%d: %s and may not backlog.\n",
+				 q_p->qid,
+				 q_p->qstate == DESC_Q_ASLEEP ?
+					 "ASLEEP" : "FULL");
+			rc = -ENOMEM;
+		}
+
+	} else {		/* Can dispatch to actual descriptors queue */
+		op_ctx->op_state = USER_OP_INPROC;
+		dispatch_sw_desc(q_p, desc_p);
+		/* Signal SeP of new descriptors */
+		WRITE_REGISTER(q_p->gpr_to_sep, q_p->sent_cntr);
+		pr_debug("Q#%d: Sent SwDesc #%u (op_ctx=%p)\n",
+			      q_p->qid, q_p->sent_cntr, op_ctx);
+		rc = -EINPROGRESS;
+	}
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	mutex_unlock(&q_p->qlock);
+
+	return rc;		/* Enqueued to desc. queue */
+}
+
+/**
+ * desc_q_mark_invalid_cookie() - Mark given cookie as invalid in case marked as
+ *					completed after a timeout
+ * @q_h:	 Descriptor queue handle
+ * @cookie:	 Invalidate descriptors with this cookie
+ *
+ * Mark given cookie as invalid in case marked as completed after a timeout
+ * Invoke this before releasing the op_ctx object.
+ * There is no race with the interrupt because the op_ctx (cookie) is still
+ * valid when invoking this function.
+ */
+void desc_q_mark_invalid_cookie(void *q_h, void *cookie)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_sw_desc desc;
+	u32 cur_desc_cntr, cur_desc_idx;
+	unsigned int drop_cnt = 0;
+
+	mutex_lock(&q_p->qlock);
+
+	for (cur_desc_cntr = q_p->completed_cntr;
+	     cur_desc_cntr < q_p->sent_cntr; cur_desc_cntr++) {
+		/* Mark all outstanding of given cookie as invalid with NULL */
+		cur_desc_idx = GET_DESC_IDX(cur_desc_cntr);
+		/* Copy descriptor to spad (endianess fix-up) */
+		/* TODO: Optimize to avoid full copy back...  */
+		/* (we only need the cookie) */
+		SEP_SW_DESC_COPY_FROM_SEP(&desc,
+					  GET_DESC_PTR(q_p, cur_desc_idx));
+		if (SEP_SW_DESC_GET_COOKIE(&desc) == cookie) {
+			SEP_SW_DESC_SET_COOKIE(&desc, (uintptr_t *)0);	/* Invalidate */
+			SEP_SW_DESC_COPY_TO_SEP(GET_DESC_PTR(q_p, cur_desc_idx),
+						&desc);
+			pr_debug("Invalidated desc at desc_cnt=%u\n",
+				      cur_desc_idx);
+			drop_cnt++;
+		}
+	}
+
+	mutex_unlock(&q_p->qlock);
+
+	if (drop_cnt > 0)
+		pr_warn("Invalidated %u descriptors of cookie=0x%p\n",
+			drop_cnt, cookie);
+
+}
+
+/**
+ * desc_q_process_completed() - Dequeue and process any completed descriptors in
+ *				the queue
+ * @q_h:	 The queue object handle
+ *
+ * Dequeue and process any completed descriptors in the queue
+ * (This function assumes non-reentrancy since it is invoked from
+ *  either interrupt handler or in workqueue context)
+ */
+void desc_q_process_completed(void *q_h)
+{
+	struct desc_q *q_p = (struct desc_q *)q_h;
+	struct sep_op_ctx *op_ctx;
+	struct sep_sw_desc desc;
+	enum sep_sw_desc_type desc_type;
+	struct sep_op_ctx *cookie;
+	u32 ret_code;
+	u32 desc_idx;
+	u32 new_completed_cntr;
+
+	new_completed_cntr = READ_REGISTER(q_p->gpr_from_sep);
+	/* Sanity check for read GPR value (must be between sent and completed).
+	   This arithmetic is cyclic so should work even after the counter
+	   wraps around. */
+	if ((q_p->sent_cntr - new_completed_cntr) >
+	    (q_p->sent_cntr - q_p->completed_cntr)) {
+		/* More completions than outstanding descriptors ?! */
+		pr_err(
+			    "sent_cntr=0x%08X completed_cntr=0x%08X gpr=0x%08X\n",
+			    q_p->sent_cntr, q_p->completed_cntr,
+			    new_completed_cntr);
+		return;		/*SEP_DRIVER_BUG() */
+		/* This is a (sep) bug case that is not supposed to happen,
+		   but we must verify this to avoid accessing stale descriptor
+		   data (which may cause system memory corruption).
+		   Returning to the caller may result in interrupt loss, but we
+		   prefer losing a completion and blocking the caller forever
+		   than invoking BUG() that would crash the whole system and may
+		   even loose the error log message. This would give a chance
+		   for a subsequent pending descriptor completion recover this
+		   case or in the worst case let the system administrator
+		   understand what is going on and let her perform a graceful
+		   reboot. */
+	}
+
+	while (new_completed_cntr > q_p->completed_cntr) {
+
+		desc_idx = GET_DESC_IDX(q_p->completed_cntr);
+		/* Copy descriptor to spad (endianess fix-up) */
+		/* TODO: Optimize to avoid full copy back...  */
+		/* (we only need type the fields: type, retcode, cookie) */
+		SEP_SW_DESC_COPY_FROM_SEP(&desc, GET_DESC_PTR(q_p, desc_idx));
+		desc_type = SEP_SW_DESC_GET(&desc, TYPE);
+		cookie = SEP_SW_DESC_GET_COOKIE(&desc);
+		ret_code = SEP_SW_DESC_GET(&desc, RET_CODE);
+		sysfs_update_sep_stats(q_p->qid, desc_type,
+				       q_p->enqueue_time[desc_idx],
+				       sched_clock());
+		q_p->completed_cntr++;	/* prepare for next */
+		pr_debug("type=%u retcode=0x%08X cookie=0x%p",
+			 desc_type, ret_code, cookie);
+		if (cookie == 0) {
+			/* Probably late completion on invalidated cookie */
+			pr_err("Got completion with NULL cookie\n");
+			continue;
+		}
+
+		op_ctx = (struct sep_op_ctx *)cookie;
+		if (desc_type == SEP_SW_DESC_TYPE_APP_REQ) {/* Applet Req. */
+			/* "internal error" flag is currently available only
+			   in this descriptor type. */
+			op_ctx->internal_error =
+			    SEP_SW_DESC_GET4TYPE(&desc, APP_REQ, INTERNAL_ERR);
+			/* Get session ID for SESSION_OPEN case */
+			op_ctx->session_ctx->sep_session_id =
+			    SEP_SW_DESC_GET4TYPE(&desc, APP_REQ, SESSION_ID);
+		}
+
+#ifdef DEBUG
+		if (op_ctx->pending_descs_cntr > MAX_PENDING_DESCS)
+			pr_err("Invalid num of pending descs %d\n",
+				    op_ctx->pending_descs_cntr);
+#endif
+		/* pending descriptors counter (apply for transactions composed
+		   of more than a single descriptor) */
+		op_ctx->pending_descs_cntr--;
+		/* Update associated operation context and notify it */
+		op_ctx->error_info |= ret_code;
+		if (op_ctx->pending_descs_cntr == 0) {
+			op_ctx->op_state = USER_OP_COMPLETED;
+			if (op_ctx->comp_work != NULL)	/* Async. (CryptoAPI) */
+				(void)schedule_work(op_ctx->comp_work);
+			else	/* Sync. (IOCTL or dx_sepapp_ API) */
+				complete(&(op_ctx->ioctl_op_compl));
+		}
+	}			/* while(new_completed_cntr) */
+
+	/* Dispatch pending requests */
+	/* if any pending descs. & utilization is below watermark & !ASLEEP */
+	if ((q_p->backlog_q.cur_q_len > 0) &&
+	    (GET_Q_FREE_DESCS(q_p) > SEP_DESC_Q_WATERMARK_MARGIN) &&
+	    (q_p->qstate != DESC_Q_ASLEEP)) {
+		(void)schedule_work(&q_p->backlog_work);
+	} else if (desc_q_is_idle(q_h, NULL)) {
+		q_p->idle_jiffies = jiffies;
+	}
+
+}
+
+/**
+ * desq_q_pack_debug_desc() - Create a debug descriptor in given buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ *
+ * TODO: Get additional debug descriptors (in addition to loopback)
+ *
+ */
+void desq_q_pack_debug_desc(struct sep_sw_desc *desc_p,
+			    struct sep_op_ctx *op_ctx)
+{
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_DEBUG);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+}
+
+/**
+ * desc_q_pack_crypto_op_desc() - Pack a CRYPTO_OP descriptor in given
+ *				descriptor buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @sep_ctx_load_req:	 Context load request flag
+ * @sep_ctx_init_req:	 Context initialize request flag
+ * @proc_mode:	 Descriptor processing mode
+ *
+ */
+void desc_q_pack_crypto_op_desc(struct sep_sw_desc *desc_p,
+				struct sep_op_ctx *op_ctx,
+				int sep_ctx_load_req, int sep_ctx_init_req,
+				enum sep_proc_mode proc_mode)
+{
+	u32 xlli_addr;
+	u16 xlli_size;
+	u16 table_count;
+
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_CRYPTO_OP);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, FW_CACHE_IDX,
+			     ctxmgr_get_sep_cache_idx(&op_ctx->ctx_info));
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, L, sep_ctx_load_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, HCB_ADDR,
+			     ctxmgr_get_sep_ctx_dma_addr(&op_ctx->ctx_info));
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, I, sep_ctx_init_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, PROC_MODE, proc_mode);
+
+	if (proc_mode != SEP_PROC_MODE_NOP) {	/* no need for IFT/OFT in NOP */
+		/* IFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->ift,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_NUM, table_count);
+
+		/* OFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->oft,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_NUM, table_count);
+	}
+}
+
+/**
+ * desc_q_pack_combined_op_desc() - Pack a COMBINED_OP descriptor in given
+ *					descriptor buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @sep_ctx_load_req:	 Context load request flag
+ * @sep_ctx_init_req:	 Context initialize request flag
+ * @proc_mode:	 Descriptor processing mode
+ * @cfg_scheme:	 The SEP format configuration scheme claimed by the user
+ *
+ */
+void desc_q_pack_combined_op_desc(struct sep_sw_desc *desc_p,
+				  struct sep_op_ctx *op_ctx,
+				  int sep_ctx_load_req, int sep_ctx_init_req,
+				  enum sep_proc_mode proc_mode,
+				  u32 cfg_scheme)
+{
+	u32 xlli_addr;
+	u16 xlli_size;
+	u16 table_count;
+
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_COMBINED_OP);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, L, sep_ctx_load_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, CONFIG_SCHEME, cfg_scheme);
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, I, sep_ctx_init_req);
+	SEP_SW_DESC_SET4TYPE(desc_p, COMBINED_OP, PROC_MODE, proc_mode);
+
+	if (proc_mode != SEP_PROC_MODE_NOP) {	/* no need for IFT/OFT in NOP */
+		/* IFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->ift,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, IFT_NUM, table_count);
+
+		/* OFT details */
+		llimgr_get_mlli_desc_info(&op_ctx->oft,
+					  &xlli_addr, &xlli_size, &table_count);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_ADDR, xlli_addr);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_SIZE, xlli_size);
+		SEP_SW_DESC_SET4TYPE(desc_p, CRYPTO_OP, OFT_NUM, table_count);
+	}
+}
+
+/**
+ * desc_q_pack_load_op_desc() - Pack a LOAD_OP descriptor in given descriptor
+ *				buffer
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @sep_ctx_load_req:	 Context load request flag
+ *
+ */
+void desc_q_pack_load_op_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx, int *sep_ctx_load_req)
+{
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 *p = (u32 *)desc_p;
+	int idx;
+
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_LOAD_OP);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	for (idx = 0; idx < SEP_MAX_COMBINED_ENGINES; idx++, ctx_info_p++) {
+		BITFIELD_SET(p[SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_WORD_OFFSET],
+			     SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_OFFSET(idx),
+			     SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_SIZE,
+			     (ctx_info_p->ctx_kptr == NULL) ? (-1) :
+			     ctxmgr_get_sep_cache_idx(ctx_info_p));
+
+		BITFIELD_SET(p[SEP_SW_DESC_LOAD_OP_HCB_ADDR_WORD_OFFSET(idx)],
+			     SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET,
+			     SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_SIZE,
+			     (ctx_info_p->ctx_kptr == NULL) ? 0 :
+			     (ctxmgr_get_sep_ctx_dma_addr(ctx_info_p) >>
+			      SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET));
+		/* shiffting DMA address due to the "L" bit in the
+		   LS bit */
+
+		BITFIELD_SET(p[SEP_SW_DESC_LOAD_OP_L_WORD_OFFSET(idx)],
+			     SEP_SW_DESC_LOAD_OP_L_BIT_OFFSET,
+			     SEP_SW_DESC_LOAD_OP_L_BIT_SIZE,
+			     sep_ctx_load_req[idx]);
+	}
+}
+
+/**
+ * desc_q_pack_rpc_desc() - Pack the RPC (message) descriptor type
+ * @desc_p:	 The descriptor buffer
+ * @op_ctx:	 The operation context
+ * @agent_id:	 RPC agent (API) ID
+ * @func_id:	 Function ID (index)
+ * @rpc_msg_size:	 Size of RPC parameters message buffer
+ * @rpc_msg_dma_addr:	 DMA address of RPC parameters message buffer
+ *
+ */
+void desc_q_pack_rpc_desc(struct sep_sw_desc *desc_p,
+			  struct sep_op_ctx *op_ctx,
+			  u16 agent_id,
+			  u16 func_id,
+			  unsigned long rpc_msg_size,
+			  dma_addr_t rpc_msg_dma_addr)
+{
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_RPC_MSG);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+#ifdef DEBUG
+	/* Verify that given agent_id is not too large for AGENT_ID field */
+	if (agent_id >= (1 << SEP_SW_DESC_RPC_MSG_AGENT_ID_BIT_SIZE)) {
+		pr_err(
+			    "Given agent_id=%d is too large for AGENT_ID field. Value truncated!",
+			    agent_id);
+	}
+#endif
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, AGENT_ID, agent_id);
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, FUNC_ID, func_id);
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, HMB_SIZE, rpc_msg_size);
+	SEP_SW_DESC_SET4TYPE(desc_p, RPC_MSG, HMB_ADDR, rpc_msg_dma_addr);
+}
+
+/**
+ * desc_q_pack_app_req_desc() - Pack the Applet Request descriptor
+ * @desc_p:	The descriptor buffer
+ * @op_ctx:	The operation context
+ * @req_type:	The Applet request type
+ * @session_id:	Session ID - Required only for SESSION_CLOSE and
+ *		COMMAND_INVOKE requests
+ * @inparams_addr:	DMA address of the "In Params." structure for the
+ *			request.
+ *
+ */
+void desc_q_pack_app_req_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx,
+			      enum sepapp_req_type req_type,
+			      u16 session_id, dma_addr_t inparams_addr)
+{
+	SEP_SW_DESC_INIT(desc_p);
+	SEP_SW_DESC_SET(desc_p, TYPE, SEP_SW_DESC_TYPE_APP_REQ);
+	SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx);
+
+	SEP_SW_DESC_SET4TYPE(desc_p, APP_REQ, REQ_TYPE, req_type);
+	SEP_SW_DESC_SET4TYPE(desc_p, APP_REQ, SESSION_ID, session_id);
+	SEP_SW_DESC_SET4TYPE(desc_p, APP_REQ, IN_PARAMS_ADDR, inparams_addr);
+}
+
+/**
+ * crypto_proc_mode_to_str() - Convert from crypto_proc_mode to string
+ * @proc_mode:	 The proc_mode enumeration value
+ *
+ * Returns A string description of the processing mode (NULL if invalid mode)
+ */
+const char *crypto_proc_mode_to_str(enum sep_proc_mode proc_mode)
+{
+	switch (proc_mode) {
+	case SEP_PROC_MODE_NOP:
+		return "NOP";
+	case SEP_PROC_MODE_PROC_T:
+		return "PROC_T";
+	case SEP_PROC_MODE_FIN:
+		return "FIN";
+	case SEP_PROC_MODE_PROC_A:
+		return "PROC_A";
+	default:
+		return "?";
+	}
+}
+
+#ifdef DEBUG
+static void dump_crypto_op_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug("CRYPTO_OP::%s (type=%lu,cookie=0x%08lX)\n",
+		crypto_proc_mode_to_str(SEP_SW_DESC_GET4TYPE
+				(desc_p, CRYPTO_OP, PROC_MODE)),
+				SEP_SW_DESC_GET(desc_p, TYPE),
+				(uintptr_t)SEP_SW_DESC_GET_COOKIE(desc_p));
+
+	pr_debug("HCB=0x%08lX @ FwIdx=%lu %s%s\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, HCB_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, FW_CACHE_IDX),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, L) ? "(load)" : "",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, I) ? "(init)" : "");
+
+	pr_debug("IFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, IFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, IFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, IFT_NUM));
+
+	pr_debug("OFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, OFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, OFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, CRYPTO_OP, OFT_NUM));
+
+	pr_debug("0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+		 ((u32 *)desc_p)[0], ((u32 *)desc_p)[1],
+		 ((u32 *)desc_p)[2], ((u32 *)desc_p)[3],
+		 ((u32 *)desc_p)[4], ((u32 *)desc_p)[5],
+		 ((u32 *)desc_p)[6], ((u32 *)desc_p)[7]);
+}
+
+static void dump_load_op_desc(const struct sep_sw_desc *desc_p)
+{
+	u32 *p = (u32 *)desc_p;
+	u32 hcb, cache_idx, is_load;
+	int idx;
+
+	pr_debug("LOAD_OP (type=%lu,cookie=0x%08lX)\n",
+		SEP_SW_DESC_GET(desc_p, TYPE),
+		(uintptr_t)SEP_SW_DESC_GET_COOKIE(desc_p));
+
+	for (idx = 0; idx < SEP_MAX_COMBINED_ENGINES; idx++) {
+		cache_idx =
+		    BITFIELD_GET(p
+				 [SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_WORD_OFFSET],
+				 SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_OFFSET
+				 (idx),
+				 SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_SIZE);
+
+		hcb =
+		    BITFIELD_GET(p
+				 [SEP_SW_DESC_LOAD_OP_HCB_ADDR_WORD_OFFSET
+				 (idx)],
+				 SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET,
+				 SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_SIZE);
+
+		is_load =
+		    BITFIELD_GET(p[SEP_SW_DESC_LOAD_OP_L_WORD_OFFSET(idx)],
+				 SEP_SW_DESC_LOAD_OP_L_BIT_OFFSET,
+				 SEP_SW_DESC_LOAD_OP_L_BIT_SIZE);
+
+		pr_debug("[%d] HCB=0x%08X FwIdx=%u %s\n",
+			      idx, hcb, cache_idx,
+			      is_load ? "(load)" : "(do not load)");
+	}
+}
+
+static void dump_combined_op_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug("COMBINED_OP::%s (type=%lu,cookie=0x%08lX)\n",
+		crypto_proc_mode_to_str(SEP_SW_DESC_GET4TYPE
+				(desc_p, COMBINED_OP, PROC_MODE)),
+				SEP_SW_DESC_GET(desc_p, TYPE),
+				(uintptr_t)SEP_SW_DESC_GET_COOKIE(desc_p));
+
+	pr_debug("SCHEME=0x%08lX %s%s\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, CONFIG_SCHEME),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, L) ? "(load)" : "",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, I) ? "(init)" : "");
+
+	pr_debug("IFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, IFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, IFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, IFT_NUM));
+
+	pr_debug("OFT: addr=0x%08lX , size=0x%08lX , tbl_num=%lu\n",
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, OFT_ADDR),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, OFT_SIZE),
+		 SEP_SW_DESC_GET4TYPE(desc_p, COMBINED_OP, OFT_NUM));
+}
+
+static void dump_rpc_msg_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug(
+		      "RPC_MSG: agentId=%lu, funcId=%lu, HmbAddr=0x%08lX, HmbSize=%lu\n",
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, AGENT_ID),
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, FUNC_ID),
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, HMB_ADDR),
+		      SEP_SW_DESC_GET4TYPE(desc_p, RPC_MSG, HMB_SIZE));
+}
+
+static void dump_app_req_desc(const struct sep_sw_desc *desc_p)
+{
+	pr_debug(
+		      "APP_REQ: reqType=%lu, sessionId=%lu, InParamsAddr=0x%08lX\n",
+		      SEP_SW_DESC_GET4TYPE(desc_p, APP_REQ, REQ_TYPE),
+		      SEP_SW_DESC_GET4TYPE(desc_p, APP_REQ, SESSION_ID),
+		      SEP_SW_DESC_GET4TYPE(desc_p, APP_REQ, IN_PARAMS_ADDR));
+}
+
+static void dump_desc(const struct sep_sw_desc *desc_p)
+{				/* dump descriptor based on its type */
+	switch (SEP_SW_DESC_GET(desc_p, TYPE)) {
+	case SEP_SW_DESC_TYPE_NULL:
+		pr_debug("NULL descriptor type.\n");
+		break;
+	case SEP_SW_DESC_TYPE_CRYPTO_OP:
+		dump_crypto_op_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_LOAD_OP:
+		dump_load_op_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_COMBINED_OP:
+		dump_combined_op_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_RPC_MSG:
+		dump_rpc_msg_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_APP_REQ:
+		dump_app_req_desc(desc_p);
+		break;
+	case SEP_SW_DESC_TYPE_DEBUG:
+		pr_debug("DEBUG descriptor type.\n");
+		break;
+	default:
+		pr_warn("Unknown descriptor type = %lu\n",
+			     SEP_SW_DESC_GET(desc_p, TYPE));
+	}
+}
+#endif /*DEBUG*/
diff --git a/drivers/staging/sep54/desc_mgr.h b/drivers/staging/sep54/desc_mgr.h
new file mode 100644
index 0000000..11405b0
--- /dev/null
+++ b/drivers/staging/sep54/desc_mgr.h
@@ -0,0 +1,282 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*! \file desc_mgr.h
+    Descriptor manager API and associated data structures
+*/
+
+#ifndef _DESC_MGR_H_
+#define _DESC_MGR_H_
+
+#include "sep_sw_desc.h"
+
+/* Max. from Combined mode operation */
+#define MAX_PENDING_DESCS 2
+
+/* Evaluate desc_q_enqueue() return code (EINPROGRESS and EBUSY are ok) */
+#define IS_DESCQ_ENQUEUE_ERR(_rc) ((_rc != -EINPROGRESS) && (_rc != -EBUSY))
+
+#define DESC_Q_INVALID_HANDLE NULL
+
+/* Opaque structure - accessed using SEP_SW_DESC_* macros */
+struct sep_sw_desc {
+	u32 data[SEP_SW_DESC_WORD_SIZE];
+};
+
+enum desc_q_state {
+	DESC_Q_UNINITIALIZED,	/* Before initializing the queue */
+	DESC_Q_ACTIVE,		/* Queue has pending requests    */
+	DESC_Q_ASLEEP		/* Queue is in "sleep" state (cannot accept
+				   additional requests - descs should be
+				   enqueued in descs backlog queue) */
+};
+
+/* Declare like this to avoid cyclic inclusion with dx_cc54_driver.h */
+struct sep_op_ctx;
+struct queue_drvdata;
+struct sep_app_session;
+
+/**
+ * desc_q_create() - Create descriptors queue object
+ * @qid:	 The queue ID (index)
+ * @drvdata:	 The associated queue driver data
+ *
+ * Returns Allocated queue object handle (DESC_Q_INVALID_HANDLE for failure)
+ */
+void *desc_q_create(int qid, struct queue_drvdata *drvdata, int state);
+
+/**
+ * desc_q_destroy() - Destroy descriptors queue object (free resources)
+ * @q_h: The queue object handle
+ */
+void desc_q_destroy(void *q_h);
+
+/**
+ * desc_q_set_state() - Set queue state (SLEEP or ACTIVE)
+ * @q_h:	The queue object handle
+ * @state:	The requested state
+ */
+int desc_q_set_state(void *q_h, enum desc_q_state state);
+
+/**
+ * desc_q_cntr_set() - Set counters for  queue
+ * @q_h:	The queue object handle
+ */
+int desc_q_cntr_set(void *q_h);
+
+/**
+ * desc_q_get_state() - Get queue state
+ * @q_h:	The queue object handle
+ */
+enum desc_q_state desc_q_get_state(void *q_h);
+
+/**
+ * desc_q_is_idle() - Report if given queue is active but empty/idle.
+ * @q_h:		The queue object handle
+ * @idle_jiffies_p:	Return jiffies at which the queue became idle
+ */
+bool desc_q_is_idle(void *q_h, unsigned long *idle_jiffies_p);
+
+/**
+ * desc_q_reset() - Reset sent/completed counters of queue
+ * @q_h:	The queue object handle
+ *
+ * This function should be invoked only when the queue is in ASLEEP state
+ * after the transition of SeP to sleep state completed.
+ * Returns -EBUSY if the queue is not in the correct state for reset.
+ */
+int desc_q_reset(void *q_h);
+
+/**
+ * desc_q_enqueue_sleep_req() - Enqueue SLEEP_REQ descriptor
+ * @q_h:	The queue object handle
+ * @op_ctx:	The operation context for this descriptor
+ */
+int desc_q_enqueue_sleep_req(void *q_h, struct sep_op_ctx *op_ctx);
+
+/**
+ * desc_q_get_info4sep()- Get queue address and size to be used in FW init phase
+ * @q_h:		The queue object handle
+ * @base_addr_p:	Base address return parameter
+ * @size_p:		Queue size (in bytes) return parameter
+ */
+void desc_q_get_info4sep(void *q_h,
+			 dma_addr_t *base_addr_p, unsigned long *size_p);
+
+/**
+ * desc_q_enqueue() - Enqueue given descriptor in given queue
+ * @q_h:		The queue object handle
+ * @desc_p:		Pointer to descriptor
+ * @may_backlog:	When "true" and descQ is full or ASLEEP, may enqueue
+ *			the given desc. in the backlog queue.
+ *			When "false", any of the above cases would cause
+ *			returning -ENOMEM.
+ *
+ * The function updates the op_ctx->op_state accoring to its results.
+ * Returns -EINPROGRESS on success to dispatch into the SW desc. q.
+ * Returns -EBUSY if may_backlog==true and the descriptor was enqueued in the
+ * the backlog queue.
+ * Returns -ENOMEM if queue is full and cannot enqueue in the backlog queue
+ */
+int desc_q_enqueue(void *q_h, struct sep_sw_desc *desc_p, bool may_backlog);
+
+/*!
+ * Mark given cookie as invalid in case marked as completed after a timeout
+ * Invoke this before releasing the op_ctx object.
+ * There is no race with the interrupt because the client_ctx (cookie) is still
+ * valid when invoking this function.
+ *
+ * \param q_h Descriptor queue handle
+ * \param cookie Invalidate descriptors with this cookie
+ */
+void desc_q_mark_invalid_cookie(void *q_h, void *cookie);
+
+/*!
+ * Dequeue and process any completed descriptors in the queue
+ * (This function assumes non-reentrancy since it is invoked from
+ *  either interrupt handler or in workqueue context)
+ *
+ * \param q_h The queue object handle
+ *
+ */
+void desc_q_process_completed(void *q_h);
+
+/*!
+ * Create a debug descriptor in given buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * TODO: Get additional debug descriptors (in addition to loopback)
+ */
+void desq_q_pack_debug_desc(struct sep_sw_desc *desc_p,
+			    struct sep_op_ctx *op_ctx);
+
+/*!
+ * Pack a CRYPTO_OP descriptor in given descriptor buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param sep_ctx_load_req Context load request flag
+ * \param sep_ctx_init_req Context initialize request flag
+ * \param proc_mode Descriptor processing mode
+ */
+void desc_q_pack_crypto_op_desc(struct sep_sw_desc *desc_p,
+				struct sep_op_ctx *op_ctx,
+				int sep_ctx_load_req, int sep_ctx_init_req,
+				enum sep_proc_mode proc_mode);
+
+/*!
+ * Pack a COMBINED_OP descriptor in given descriptor buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param sep_ctx_load_req Context load request flag
+ * \param sep_ctx_init_req Context initialize request flag
+ * \param proc_mode Descriptor processing mode
+ * \param cfg_scheme The SEP format configuration scheme claimed by the user
+ */
+void desc_q_pack_combined_op_desc(struct sep_sw_desc *desc_p,
+				  struct sep_op_ctx *op_ctx,
+				  int sep_ctx_load_req, int sep_ctx_init_req,
+				  enum sep_proc_mode proc_mode,
+				  u32 cfg_scheme);
+/*!
+ * Pack a LOAD_OP descriptor in given descriptor buffer
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param sep_ctx_load_req Context load request flag
+ */
+void desc_q_pack_load_op_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx, int *sep_ctx_load_req);
+
+/*!
+ * Pack the RPC (message) descriptor type
+ *
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param agent_id RPC agent (API) ID
+ * \param func_id Function ID (index)
+ * \param rpc_msg_size Size of RPC parameters message buffer
+ * \param rpc_msg_dma_addr DMA address of RPC parameters message buffer
+ */
+void desc_q_pack_rpc_desc(struct sep_sw_desc *desc_p,
+			  struct sep_op_ctx *op_ctx,
+			  u16 agent_id,
+			  u16 func_id,
+			  unsigned long rpc_msg_size,
+			  dma_addr_t rpc_msg_dma_addr);
+
+/*!
+ * Pack the Applet Request descriptor
+ *
+ * \param desc_p The descriptor buffer
+ * \param op_ctx The operation context
+ * \param req_type The Applet request type
+ * \param session_id Session ID - Required only for SESSION_CLOSE and
+ *                   COMMAND_INVOKE requests
+ * \param inparams_addr DMA address of the "In Params." structure for the
+ *                      request.
+ */
+void desc_q_pack_app_req_desc(struct sep_sw_desc *desc_p,
+			      struct sep_op_ctx *op_ctx,
+			      enum sepapp_req_type req_type,
+			      u16 session_id, dma_addr_t inparams_addr);
+
+/*!
+ * Convert from crypto_proc_mode to string
+ *
+ * \param proc_mode The proc_mode enumeration value
+ *
+ * \return A string description of the processing mode
+ */
+const char *crypto_proc_mode_to_str(enum sep_proc_mode proc_mode);
+
+inline u32 add_cookie(uintptr_t op_ctx);
+
+inline void delete_cookie(u32 index);
+
+inline void delete_context(uintptr_t op_ctx);
+
+inline uintptr_t get_cookie(u32 index);
+
+#define SEP_SW_DESC_GET_COOKIE(desc_p)\
+	((struct sep_op_ctx *)get_cookie(((u32 *)desc_p)[SEP_SW_DESC_COOKIE_WORD_OFFSET]))
+
+#define SEP_SW_DESC_SET_COOKIE(desc_p, op_ctx) \
+do {\
+	u32 __ctx_ptr__ = 0;\
+	if (op_ctx == NULL) {\
+		delete_cookie(((u32 *)desc_p)[SEP_SW_DESC_COOKIE_WORD_OFFSET]);\
+	} else {\
+		__ctx_ptr__ = add_cookie((uintptr_t)op_ctx);\
+	} \
+	memcpy(((u32 *)desc_p) + SEP_SW_DESC_COOKIE_WORD_OFFSET,\
+	&__ctx_ptr__, sizeof(u32));\
+} while (0)
+
+#endif /*_DESC_MGR_H_*/
diff --git a/drivers/staging/sep54/dx_bitops.h b/drivers/staging/sep54/dx_bitops.h
new file mode 100644
index 0000000..fd9524c
--- /dev/null
+++ b/drivers/staging/sep54/dx_bitops.h
@@ -0,0 +1,58 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*!
+ * \file dx_bitops.h
+ * Bit fields operations macros.
+ */
+#ifndef _DX_BITOPS_H_
+#define _DX_BITOPS_H_
+
+#define BITMASK(mask_size) (((mask_size) < 32) ?	\
+	((1UL << (mask_size)) - 1) : 0xFFFFFFFFUL)
+#define BITMASK_AT(mask_size, mask_offset) (BITMASK(mask_size) << (mask_offset))
+
+#define BITFIELD_GET(word, bit_offset, bit_size)	\
+	(((word) >> (bit_offset)) & BITMASK(bit_size))
+#define BITFIELD_SET(word, bit_offset, bit_size, new_val) \
+	(word = ((word) & ~BITMASK_AT(bit_size, bit_offset)) |		\
+		(((new_val) & BITMASK(bit_size)) << (bit_offset)))
+
+/* Is val aligned to "align" ("align" must be power of 2) */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(val, align)	(((u32)(val) & ((align) - 1)) == 0)
+#endif
+
+#define SWAP_ENDIAN(word)		\
+	(((word) >> 24) | (((word) & 0x00FF0000) >> 8) | \
+	(((word) & 0x0000FF00) << 8) | (((word) & 0x000000FF) << 24))
+
+/* Is val a multiple of "mult" ("mult" must be power of 2) */
+#define IS_MULT(val, mult)	(((val) & ((mult) - 1)) == 0)
+
+#define IS_NULL_ADDR(adr)	(!(adr))
+
+#endif /*_DX_BITOPS_H_*/
diff --git a/drivers/staging/sep54/dx_cc_defs.h b/drivers/staging/sep54/dx_cc_defs.h
new file mode 100644
index 0000000..0bdd38d
--- /dev/null
+++ b/drivers/staging/sep54/dx_cc_defs.h
@@ -0,0 +1,42 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _DX_CC_DEFS_H_
+#define _DX_CC_DEFS_H
+
+#define DX_INT32_MAX 0x7FFFFFFFL
+
+enum dx_crypto_key_type {
+	DX_USER_KEY = 0,
+	DX_ROOT_KEY = 1,
+	DX_PROVISIONING_KEY = 2,
+	DX_XOR_HDCP_KEY = 3,
+	DX_APPLET_KEY = 4,
+	DX_SESSION_KEY = 5,
+	DX_END_OF_KEYS = DX_INT32_MAX
+};
+
+#endif
diff --git a/drivers/staging/sep54/dx_cc_regs.h b/drivers/staging/sep54/dx_cc_regs.h
new file mode 100644
index 0000000..2f91bd6
--- /dev/null
+++ b/drivers/staging/sep54/dx_cc_regs.h
@@ -0,0 +1,162 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*!
+ * \file dx_cc_regs.h
+ * \brief Macro definitions for accessing Dx CryptoCell register space
+ *
+ * For SeP code define DX_CC_SEP
+ * For Host physical/direct registers access define DX_CC_HOST
+ * For Host virtual mapping of registers define DX_CC_HOST_VIRT
+ */
+
+#ifndef _DX_CC_REGS_H_
+#define _DX_CC_REGS_H_
+
+#include "dx_bitops.h"
+
+/* Include register base addresses data */
+#if defined(DX_CC_SEP)
+#include "dx_reg_base_sep.h"
+
+#elif defined(DX_CC_HOST) || defined(DX_CC_HOST_VIRT) || defined(DX_CC_TEE)
+#include "dx_reg_base_host.h"
+
+#else
+#error Define either DX_CC_SEP or DX_CC_HOST or DX_CC_HOST_VIRT
+#endif
+
+/* CC registers address calculation */
+#if defined(DX_CC_SEP)
+#define DX_CC_REG_ADDR(unit_name, reg_name)            \
+	 (DX_BASE_CC_PERIF + DX_BASE_ ## unit_name + \
+	  DX_ ## reg_name ## _REG_OFFSET)
+
+/* In host macros we ignore the unit_name because all offsets are from base */
+#elif defined(DX_CC_HOST)
+#define DX_CC_REG_ADDR(unit_name, reg_name)            \
+	(DX_BASE_CC + DX_ ## reg_name ## _REG_OFFSET)
+
+#elif defined(DX_CC_TEE)
+#define DX_CC_REG_ADDR(unit_name, reg_name)            \
+	(DX_BASE_CC + DX_ ## reg_name ## _REG_OFFSET)
+
+#elif defined(DX_CC_HOST_VIRT)
+#define DX_CC_REG_ADDR(cc_base_virt, unit_name, reg_name) \
+	(((unsigned long)(cc_base_virt)) + DX_ ## reg_name ## _REG_OFFSET)
+
+#endif
+
+/* Register Offset macros (from registers base address in host) */
+#if defined(DX_CC_HOST) || defined(DX_CC_HOST_VIRT)
+
+#define DX_CC_REG_OFFSET(reg_domain, reg_name)               \
+	(DX_ ## reg_domain ## _ ## reg_name ## _REG_OFFSET)
+
+/* Indexed GPR offset macros - note the (not original) preprocessor tricks...*/
+/* (Using the macro without the "_" prefix is allowed with another macro      *
+ *  as the gpr_idx) */
+#define _SEP_HOST_GPR_REG_OFFSET(gpr_idx) \
+	DX_CC_REG_OFFSET(HOST, SEP_HOST_GPR ## gpr_idx)
+#define SEP_HOST_GPR_REG_OFFSET(gpr_idx) _SEP_HOST_GPR_REG_OFFSET(gpr_idx)
+#define _HOST_SEP_GPR_REG_OFFSET(gpr_idx) \
+	DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR ## gpr_idx)
+#define HOST_SEP_GPR_REG_OFFSET(gpr_idx) _HOST_SEP_GPR_REG_OFFSET(gpr_idx)
+
+/* GPR IRQ bit mask by GPR index */
+#define _SEP_HOST_GPR_IRQ_MASK(gpr_idx) \
+	(1 << DX_HOST_IRR_SEP_HOST_GPR ## gpr_idx ## _INT_BIT_SHIFT)
+#define SEP_HOST_GPR_IRQ_MASK(gpr_idx) _SEP_HOST_GPR_IRQ_MASK(gpr_idx)
+
+#elif defined(DX_CC_SEP)
+
+#define DX_CC_REG_OFFSET(unit_name, reg_name)               \
+	(DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
+
+/* Indexed GPR address macros - note the (not original) preprocessor tricks...*/
+/* (Using the macro without the "_" prefix is allowed with another macro      *
+ *  as the gpr_idx) */
+#define _SEP_HOST_GPR_REG_ADDR(gpr_idx) \
+	DX_CC_REG_ADDR(SEP_RGF, SEP_SEP_HOST_GPR ## gpr_idx)
+#define SEP_HOST_GPR_REG_ADDR(gpr_idx) _SEP_HOST_GPR_REG_ADDR(gpr_idx)
+#define _HOST_SEP_GPR_REG_ADDR(gpr_idx) \
+	DX_CC_REG_ADDR(SEP_RGF, SEP_HOST_SEP_GPR ## gpr_idx)
+#define HOST_SEP_GPR_REG_ADDR(gpr_idx) _HOST_SEP_GPR_REG_ADDR(gpr_idx)
+
+#elif defined(DX_CC_TEE)
+
+#define DX_CC_REG_OFFSET(unit_name, reg_name) \
+	(DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
+
+#else
+#error "Undef exec domain,not DX_CC_SEP, DX_CC_HOST, DX_CC_HOST_VIRT, DX_CC_TEE"
+#endif
+
+/* Registers address macros for ENV registers (development FPGA only) */
+#ifdef DX_BASE_ENV_REGS
+
+#if defined(DX_CC_HOST)
+#define DX_ENV_REG_ADDR(reg_name) \
+	(DX_BASE_ENV_REGS + DX_ENV_ ## reg_name ## _REG_OFFSET)
+
+#elif defined(DX_CC_HOST_VIRT)
+/* The OS driver resource address space covers the ENV registers, too */
+/* Since DX_BASE_ENV_REGS is given in absolute address, we calc. the distance */
+#define DX_ENV_REG_ADDR(cc_base_virt, reg_name) \
+	(((cc_base_virt) + (DX_BASE_ENV_REGS - DX_BASE_CC)) + \
+	 DX_ENV_ ## reg_name ## _REG_OFFSET)
+
+#endif
+
+#endif				/*DX_BASE_ENV_REGS */
+
+/* Bit fields access */
+#define DX_CC_REG_FLD_GET(unit_name, reg_name, fld_name, reg_val)	      \
+	(DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20 ?	      \
+	reg_val /* Optimization for 32b fields */ :			      \
+	BITFIELD_GET(reg_val, DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
+		     DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE))
+
+#define DX_CC_REG_FLD_SET(                                               \
+	unit_name, reg_name, fld_name, reg_shadow_var, new_fld_val)      \
+do {                                                                     \
+	if (DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20)       \
+		reg_shadow_var = new_fld_val; /* Optimization for 32b fields */\
+	else                                                             \
+		BITFIELD_SET(reg_shadow_var,                             \
+			DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT,  \
+			DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE,   \
+			new_fld_val);                                    \
+} while (0)
+
+/* Usage example:
+   u32 reg_shadow = READ_REGISTER(DX_CC_REG_ADDR(CRY_KERNEL,AES_CONTROL));
+   DX_CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY0,reg_shadow, 3);
+   DX_CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY1,reg_shadow, 1);
+   WRITE_REGISTER(DX_CC_REG_ADDR(CRY_KERNEL,AES_CONTROL), reg_shadow);
+ */
+
+#endif /*_DX_CC_REGS_H_*/
diff --git a/drivers/staging/sep54/dx_dev_defs.h b/drivers/staging/sep54/dx_dev_defs.h
new file mode 100644
index 0000000..1626df8
--- /dev/null
+++ b/drivers/staging/sep54/dx_dev_defs.h
@@ -0,0 +1,67 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/** @file: dx_dev_defs.h
+ * Device specific definitions for the CC device driver */
+
+#ifndef _DX_DEV_DEFS_H_
+#define _DX_DEV_DEFS_H_
+
+#define DRIVER_NAME MODULE_NAME
+
+/* OpenFirmware matches */
+#define DX_DEV_OF_MATCH  {                        \
+	{.name = "dx_cc54" },                     \
+	{.compatible = "xlnx,plbv46-cc-1.00.c",}, \
+	{}                                        \
+}
+
+/* Firmware images file names (for request_firmware) */
+#define RESIDENT_IMAGE_NAME DRIVER_NAME "-resident.bin"
+#define CACHE_IMAGE_NAME DRIVER_NAME "-cache.bin"
+#define VRL_IMAGE_NAME DRIVER_NAME "-Primary_VRL.bin"
+
+/* OTP index of verification hash for key in VRL */
+#define VRL_KEY_INDEX 0
+
+#define ICACHE_SIZE_LOG2_DEFAULT 20	/* 1MB */
+#define DCACHE_SIZE_LOG2_DEFAULT 20	/* 1MB */
+
+#define EXPECTED_FW_VER 0x01000000
+
+/* The known SeP clock frequency in MHz (30 MHz on Virtex-5 FPGA) */
+/* Comment this line if SeP frequency is already initialized in CC_INIT ext. */
+/*#define SEP_FREQ_MHZ 30*/
+
+/* Number of SEP descriptor queues */
+#define SEP_MAX_NUM_OF_DESC_Q  2
+
+/* Maximum number of registered memory buffers per user context */
+#define MAX_REG_MEMREF_PER_CLIENT_CTX 16
+
+/* Maximum number of SeP Applets session per client context */
+#define MAX_SEPAPP_SESSION_PER_CLIENT_CTX 16
+
+#endif				/* _DX_DEV_DEFS_H_ */
diff --git a/drivers/staging/sep54/dx_driver.c b/drivers/staging/sep54/dx_driver.c
new file mode 100644
index 0000000..5a97c29
--- /dev/null
+++ b/drivers/staging/sep54/dx_driver.c
@@ -0,0 +1,4935 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_MAIN
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/fcntl.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/sysctl.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+/* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */
+#include <linux/cache.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/pagemap.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
+#include <linux/genhd.h>
+#include <linux/mmc/card.h>
+
+#include <generated/autoconf.h>
+#if defined(DEBUG) && defined(CONFIG_KGDB)
+/* For setup_break option */
+#include <linux/kgdb.h>
+#endif
+
+#include "sep_log.h"
+#include "sep_init.h"
+#include "desc_mgr.h"
+#include "lli_mgr.h"
+#include "sep_sysfs.h"
+#include "dx_driver_abi.h"
+#include "crypto_ctx_mgr.h"
+#include "crypto_api.h"
+#include "sep_request.h"
+#include "dx_sep_kapi.h"
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+#include "sepapp.h"
+#endif
+#include "sep_power.h"
+#include "sep_request_mgr.h"
+#include "sep_applets.h"
+
+/* Registers definitions from shared/hw/include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#ifdef DX_BASE_ENV_REGS
+#include "dx_env.h"
+#endif
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "dx_init_cc_abi.h"
+#include "dx_driver.h"
+
+#ifdef CONFIG_COMPAT
+#include "sep_compat_ioctl.h"
+#endif
+
+#if SEPAPP_UUID_SIZE != DXDI_SEPAPP_UUID_SIZE
+#error Size mismatch of SEPAPP_UUID_SIZE and DXDI_SEPAPP_UUID_SIZE
+#endif
+
+#define DEVICE_NAME_PREFIX  "dx_sep_q"
+#define SEP_DEVICES         SEP_MAX_NUM_OF_DESC_Q
+
+#define DRIVER_NAME     MODULE_NAME
+#define SEP_DEVICES     SEP_MAX_NUM_OF_DESC_Q
+
+#ifdef SEP_PRINTF
+#define SEP_PRINTF_H2S_GPR_OFFSET \
+	HOST_SEP_GPR_REG_OFFSET(DX_SEP_HOST_PRINTF_GPR_IDX)
+#define SEP_PRINTF_S2H_GPR_OFFSET \
+	SEP_HOST_GPR_REG_OFFSET(DX_SEP_HOST_PRINTF_GPR_IDX)
+/* Ack is allocated only upper 24 bits */
+#define SEP_PRINTF_ACK_MAX 0xFFFFFF
+/* Sync. host-SeP value */
+#define SEP_PRINTF_ACK_SYNC_VAL SEP_PRINTF_ACK_MAX
+#endif
+
+static struct class *sep_class;
+
+int q_num;			/* Initialized to 0 */
+module_param(q_num, int, 0444);
+MODULE_PARM_DESC(q_num, "Num. of active queues 1-2");
+
+int sep_log_level = SEP_BASE_LOG_LEVEL;
+module_param(sep_log_level, int, 0644);
+MODULE_PARM_DESC(sep_log_level, "Log level: min ERR = 0, max TRACE = 4");
+
+int sep_log_mask = SEP_LOG_MASK_ALL;
+module_param(sep_log_mask, int, 0644);
+MODULE_PARM_DESC(sep_log_mask, "Log components mask");
+
+int disable_linux_crypto;
+module_param(disable_linux_crypto, int, 0444);
+MODULE_PARM_DESC(disable_linux_crypto,
+		 "Set to !0 to disable registration with Linux CryptoAPI");
+
+/* Parameters to override default sep cache memories reserved pages */
+#ifdef ICACHE_SIZE_LOG2_DEFAULT
+#include "dx_init_cc_defs.h"
+int icache_size_log2 = ICACHE_SIZE_LOG2_DEFAULT;
+module_param(icache_size_log2, int, 0444);
+MODULE_PARM_DESC(icache_size_log2, "Size of Icache memory in log2(bytes)");
+
+int dcache_size_log2 = DCACHE_SIZE_LOG2_DEFAULT;
+module_param(dcache_size_log2, int, 0444);
+MODULE_PARM_DESC(icache_size_log2, "Size of Dcache memory in log2(bytes)");
+#endif
+
+#ifdef SEP_BACKUP_BUF_SIZE
+int sep_backup_buf_size = SEP_BACKUP_BUF_SIZE;
+module_param(sep_backup_buf_size, int, 0444);
+MODULE_PARM_DESC(sep_backup_buf_size,
+		 "Size of backup buffer of SeP context (for warm-boot)");
+#endif
+
+/* Interrupt mask assigned to GPRs */
+/* Used for run time lookup, where SEP_HOST_GPR_IRQ_MASK() cannot be used */
+static const u32 gpr_interrupt_mask[] = {
+	SEP_HOST_GPR_IRQ_MASK(0),
+	SEP_HOST_GPR_IRQ_MASK(1),
+	SEP_HOST_GPR_IRQ_MASK(2),
+	SEP_HOST_GPR_IRQ_MASK(3),
+	SEP_HOST_GPR_IRQ_MASK(4),
+	SEP_HOST_GPR_IRQ_MASK(5),
+	SEP_HOST_GPR_IRQ_MASK(6),
+	SEP_HOST_GPR_IRQ_MASK(7)
+};
+
+u32 __iomem *security_cfg_reg;
+
+#ifdef DEBUG
+void dump_byte_array(const char *name, const u8 *the_array,
+		     unsigned long size)
+{
+	int i, line_offset = 0;
+	const u8 *cur_byte;
+	char line_buf[80];
+
+	line_offset = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
+			       name, size);
+
+	for (i = 0, cur_byte = the_array;
+	     (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
+		line_offset += snprintf(line_buf + line_offset,
+					sizeof(line_buf) - line_offset,
+					"%02X ", *cur_byte);
+		if (line_offset > 75) {	/* Cut before line end */
+			pr_debug("%s\n", line_buf);
+			line_offset = 0;
+		}
+	}
+
+	if (line_offset > 0)	/* Dump remainding line */
+		pr_debug("%s\n", line_buf);
+
+}
+
+void dump_word_array(const char *name, const u32 *the_array,
+		     unsigned long size_in_words)
+{
+	int i, line_offset = 0;
+	const u32 *cur_word;
+	char line_buf[80];
+
+	line_offset = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
+			       name, size_in_words);
+
+	for (i = 0, cur_word = the_array;
+	     (i < size_in_words) && (line_offset < sizeof(line_buf));
+	     i++, cur_word++) {
+		line_offset += snprintf(line_buf + line_offset,
+					sizeof(line_buf) - line_offset,
+					"%08X ", *cur_word);
+		if (line_offset > 70) {	/* Cut before line end */
+			pr_debug("%s\n", line_buf);
+			line_offset = 0;
+		}
+	}
+
+	if (line_offset > 0)	/* Dump remainding line */
+		pr_debug("%s\n", line_buf);
+}
+
+#endif /*DEBUG*/
+/**** SeP descriptor operations implementation functions *****/
+/* (send descriptor, wait for completion, process result)    */
+/**
+ * send_crypto_op_desc() - Pack crypto op. descriptor and send
+ * @op_ctx:
+ * @sep_ctx_load_req:	Flag if context loading is required
+ * @sep_ctx_init_req:	Flag if context init is required
+ * @proc_mode:		Processing mode
+ *
+ * On failure desc_type is retained so process_desc_completion cleans up
+ * resources anyway (error_info denotes failure to send/complete)
+ * Returns int 0 on success
+ */
+static int send_crypto_op_desc(struct sep_op_ctx *op_ctx,
+			       int sep_ctx_load_req, int sep_ctx_init_req,
+			       enum sep_proc_mode proc_mode)
+{
+	const struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	int rc;
+
+	desc_q_pack_crypto_op_desc(&desc, op_ctx,
+				   sep_ctx_load_req, sep_ctx_init_req,
+				   proc_mode);
+	/* op_state must be updated before dispatching descriptor */
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		/* Desc. sending failed - "signal" process_desc_completion */
+		op_ctx->error_info = DXDI_ERROR_INTERNAL;
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+/**
+ * send_combined_op_desc() - Pack combined or/and load operation descriptor(s)
+ * @op_ctx:
+ * @sep_ctx_load_req: Flag if context loading is required
+ * @sep_ctx_init_req: Flag if context init is required
+ * @proc_mode: Processing mode
+ * @cfg_scheme: The SEP format configuration scheme claimed by the user
+ *
+ * On failure desc_type is retained so process_desc_completion cleans up
+ * resources anyway (error_info denotes failure to send/complete)
+ * Returns int 0 on success
+ */
+static int send_combined_op_desc(struct sep_op_ctx *op_ctx,
+				 int *sep_ctx_load_req, int sep_ctx_init_req,
+				 enum sep_proc_mode proc_mode,
+				 u32 cfg_scheme)
+{
+	const struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	int rc;
+
+	/* transaction of two descriptors */
+	op_ctx->pending_descs_cntr = 2;
+
+	/* prepare load descriptor of combined associated contexts */
+	desc_q_pack_load_op_desc(&desc, op_ctx, sep_ctx_load_req);
+
+	/* op_state must be updated before dispatching descriptor */
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		/* Desc. sending failed - "signal" process_desc_completion */
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		goto send_combined_op_exit;
+	}
+
+	/* prepare crypto descriptor for the combined scheme operation */
+	desc_q_pack_combined_op_desc(&desc, op_ctx, 0/* contexts already loaded
+						      * in prior descriptor */ ,
+				     sep_ctx_init_req, proc_mode, cfg_scheme);
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+		/*invalidate first descriptor (if still pending) */
+		desc_q_mark_invalid_cookie(drvdata->desc_queue, (void *)op_ctx);
+		/* Desc. sending failed - "signal" process_desc_completion */
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	} else {
+		rc = 0;
+	}
+
+ send_combined_op_exit:
+	return rc;
+}
+
+/**
+ * register_client_memref() - Register given client memory buffer reference
+ * @client_ctx:		User context data
+ * @user_buf_ptr:	Buffer address in user space. NULL if sgl!=NULL.
+ * @sgl:		Scatter/gather list (for kernel buffers)
+ *			NULL if user_buf_ptr!=NULL.
+ * @buf_size:		Buffer size in bytes
+ * @dma_direction:	DMA direction
+ *
+ * Returns int >= is the registered memory reference ID, <0 for error
+ */
+int register_client_memref(struct sep_client_ctx *client_ctx,
+			   u8 __user *user_buf_ptr,
+			   struct scatterlist *sgl,
+			   const unsigned long buf_size,
+			   const enum dma_data_direction dma_direction)
+{
+	int free_memref_idx, rc;
+	struct registered_memref *regmem_p;
+
+	if (unlikely((user_buf_ptr != NULL) && (sgl != NULL))) {
+		pr_err("Both user_buf_ptr and sgl are given!\n");
+		return -EINVAL;
+	}
+
+	/* Find free entry in user_memref */
+	for (free_memref_idx = 0, regmem_p = &client_ctx->reg_memrefs[0];
+	     free_memref_idx < MAX_REG_MEMREF_PER_CLIENT_CTX;
+	     free_memref_idx++, regmem_p++) {
+		mutex_lock(&regmem_p->buf_lock);
+		if (regmem_p->ref_cnt == 0)
+			break;	/* found free entry */
+		mutex_unlock(&regmem_p->buf_lock);
+	}
+	if (unlikely(free_memref_idx == MAX_REG_MEMREF_PER_CLIENT_CTX)) {
+		pr_warn("No free entry for user memory registration\n");
+		free_memref_idx = -ENOMEM;/* Negative error code as index */
+	} else {
+		pr_debug("Allocated memref_idx=%d (regmem_p=%p)\n",
+			      free_memref_idx, regmem_p);
+		regmem_p->ref_cnt = 1;	/* Capture entry */
+		/* Lock user pages for DMA and save pages info.
+		 * (prepare for MLLI) */
+		rc = llimgr_register_client_dma_buf(client_ctx->drv_data->
+						    sep_data->llimgr,
+						    user_buf_ptr, sgl, buf_size,
+						    0, dma_direction,
+						    &regmem_p->dma_obj);
+		if (unlikely(rc < 0)) {
+			/* Release entry */
+			regmem_p->ref_cnt = 0;
+			free_memref_idx = rc;
+		}
+		mutex_unlock(&regmem_p->buf_lock);
+	}
+
+	/* Return user_memref[] entry index as the memory reference ID */
+	return free_memref_idx;
+}
+
+/**
+ * free_user_memref() - Free resources associated with a user memory reference
+ * @client_ctx:	 User context data
+ * @memref_idx:	 Index of the user memory reference
+ *
+ * Free resources associated with a user memory reference
+ * (The referenced memory may be locked user pages or allocated DMA-coherent
+ *  memory mmap'ed to the user space)
+ * Returns int !0 on failure (memref still in use or unknown)
+ */
+int free_client_memref(struct sep_client_ctx *client_ctx,
+		       int memref_idx)
+{
+	struct registered_memref *regmem_p =
+	    &client_ctx->reg_memrefs[memref_idx];
+	int rc = 0;
+
+	if (!IS_VALID_MEMREF_IDX(memref_idx)) {
+		pr_err("Invalid memref ID %d\n", memref_idx);
+		return -EINVAL;
+	}
+
+	mutex_lock(&regmem_p->buf_lock);
+
+	if (likely(regmem_p->ref_cnt == 1)) {
+		/* TODO: support case of allocated DMA-coherent buffer */
+		llimgr_deregister_client_dma_buf(client_ctx->drv_data->
+						 sep_data->llimgr,
+						 &regmem_p->dma_obj);
+		regmem_p->ref_cnt = 0;
+	} else if (unlikely(regmem_p->ref_cnt == 0)) {
+		pr_err("Invoked for free memref ID=%d\n", memref_idx);
+		rc = -EINVAL;
+	} else {		/* ref_cnt > 1 */
+		pr_err(
+			    "BUSY/Invalid memref to release: ref_cnt=%d, user_buf_ptr=%p\n",
+			    regmem_p->ref_cnt, regmem_p->dma_obj.user_buf_ptr);
+		rc = -EBUSY;
+	}
+
+	mutex_unlock(&regmem_p->buf_lock);
+
+	return rc;
+}
+
+/**
+ * acquire_dma_obj() - Get the memref object of given memref_idx and increment
+ *			reference count of it
+ * @client_ctx:	Associated client context
+ * @memref_idx:	Required registered memory reference ID (index)
+ *
+ * Get the memref object of given memref_idx and increment reference count of it
+ * The returned object must be released by invoking release_dma_obj() before
+ * the object (memref) may be freed.
+ * Returns struct user_dma_buffer The memref object or NULL for invalid
+ */
+struct client_dma_buffer *acquire_dma_obj(struct sep_client_ctx *client_ctx,
+					  int memref_idx)
+{
+	struct registered_memref *regmem_p =
+	    &client_ctx->reg_memrefs[memref_idx];
+	struct client_dma_buffer *rc;
+
+	if (!IS_VALID_MEMREF_IDX(memref_idx)) {
+		pr_err("Invalid memref ID %d\n", memref_idx);
+		return NULL;
+	}
+
+	mutex_lock(&regmem_p->buf_lock);
+	if (regmem_p->ref_cnt < 1) {
+		pr_err("Invalid memref (ID=%d, ref_cnt=%d)\n",
+			    memref_idx, regmem_p->ref_cnt);
+		rc = NULL;
+	} else {
+		regmem_p->ref_cnt++;
+		rc = &regmem_p->dma_obj;
+	}
+	mutex_unlock(&regmem_p->buf_lock);
+
+	return rc;
+}
+
+/**
+ * release_dma_obj() - Release memref object taken with get_memref_obj
+ *			(Does not free!)
+ * @client_ctx:	Associated client context
+ * @dma_obj:	The DMA object returned from acquire_dma_obj()
+ *
+ * Returns void
+ */
+void release_dma_obj(struct sep_client_ctx *client_ctx,
+		     struct client_dma_buffer *dma_obj)
+{
+	struct registered_memref *regmem_p;
+	int memref_idx;
+
+	if (dma_obj == NULL)	/* Probably failed on acquire_dma_obj */
+		return;
+	/* Verify valid container */
+	memref_idx = DMA_OBJ_TO_MEMREF_IDX(client_ctx, dma_obj);
+	if (!IS_VALID_MEMREF_IDX(memref_idx)) {
+		pr_err("Given DMA object is not registered\n");
+		return;
+	}
+	/* Get container */
+	regmem_p = &client_ctx->reg_memrefs[memref_idx];
+	mutex_lock(&regmem_p->buf_lock);
+	if (regmem_p->ref_cnt < 2) {
+		pr_err("Invalid memref (ref_cnt=%d, user_buf_ptr=%p)\n",
+			    regmem_p->ref_cnt, regmem_p->dma_obj.user_buf_ptr);
+	} else {
+		regmem_p->ref_cnt--;
+	}
+	mutex_unlock(&regmem_p->buf_lock);
+}
+
+/**
+ * crypto_op_completion_cleanup() - Cleanup CRYPTO_OP descriptor operation
+ *					resources after completion
+ * @op_ctx:
+ *
+ * Returns int
+ */
+int crypto_op_completion_cleanup(struct sep_op_ctx *op_ctx)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	void *llimgr = drvdata->sep_data->llimgr;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	enum sep_op_type op_type = op_ctx->op_type;
+	u32 error_info = op_ctx->error_info;
+	u32 ctx_info_idx;
+	bool data_in_place;
+	const enum crypto_alg_class alg_class =
+	    ctxmgr_get_alg_class(&op_ctx->ctx_info);
+
+	/* Resources cleanup on data processing operations (PROC/FINI) */
+	if (op_type & (SEP_OP_CRYPTO_PROC | SEP_OP_CRYPTO_FINI)) {
+		if ((alg_class == ALG_CLASS_HASH) ||
+		    (ctxmgr_get_mac_type(ctx_info_p) == DXDI_MAC_HMAC)) {
+			/* Unmap what was mapped in prepare_data_for_sep() */
+			ctxmgr_unmap2dev_hash_tail(ctx_info_p,
+						   drvdata->sep_data->dev);
+			/* Save last data block tail (remainder of crypto-block)
+			 * or clear that buffer after it was used if there is
+			 * no new block remainder data */
+			ctxmgr_save_hash_blk_remainder(ctx_info_p,
+						       &op_ctx->din_dma_obj,
+						       false);
+		}
+		data_in_place = llimgr_is_same_mlli_tables(llimgr,
+							   &op_ctx->ift,
+							   &op_ctx->oft);
+		/* First free IFT resources */
+		llimgr_destroy_mlli(llimgr, &op_ctx->ift);
+		llimgr_deregister_client_dma_buf(llimgr, &op_ctx->din_dma_obj);
+		/* Free OFT resources */
+		if (data_in_place) {
+			/* OFT already destroyed as IFT. Just clean it. */
+			MLLI_TABLES_LIST_INIT(&op_ctx->oft);
+			CLEAN_DMA_BUFFER_INFO(&op_ctx->din_dma_obj);
+		} else {	/* OFT resources cleanup */
+			llimgr_destroy_mlli(llimgr, &op_ctx->oft);
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->dout_dma_obj);
+		}
+	}
+
+	for (ctx_info_idx = 0;
+	     ctx_info_idx < op_ctx->ctx_info_num;
+	     ctx_info_idx++, ctx_info_p++) {
+		if ((op_type & SEP_OP_CRYPTO_FINI) || (error_info != 0)) {
+			/* If this was a finalizing descriptor, or any error,
+			 * invalidate from cache */
+			ctxmgr_sep_cache_invalidate(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(ctx_info_p),
+					CRYPTO_CTX_ID_SINGLE_MASK);
+		}
+		/* Update context state */
+		if ((op_type & SEP_OP_CRYPTO_FINI) ||
+		    ((op_type & SEP_OP_CRYPTO_INIT) && (error_info != 0))) {
+			/* If this was a finalizing descriptor,
+			 * or a failing initializing descriptor: */
+			ctxmgr_set_ctx_state(ctx_info_p,
+					CTX_STATE_UNINITIALIZED);
+		} else if (op_type & SEP_OP_CRYPTO_INIT)
+			ctxmgr_set_ctx_state(ctx_info_p,
+					CTX_STATE_INITIALIZED);
+	}
+
+	return 0;
+}
+
+/**
+ * wait_for_sep_op_result() - Wait for outstanding SeP operation to complete and
+ *				fetch SeP ret-code
+ * @op_ctx:
+ *
+ * Wait for outstanding SeP operation to complete and fetch SeP ret-code
+ * into op_ctx->sep_ret_code
+ * Returns int
+ */
+int wait_for_sep_op_result(struct sep_op_ctx *op_ctx)
+{
+#ifdef DEBUG
+	if (unlikely(op_ctx->op_state == USER_OP_NOP)) {
+		pr_err("Operation context is inactive!\n");
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+		return -EINVAL;
+	}
+#endif
+
+	/* wait until crypto operation is completed.
+	 * We cannot timeout this operation because hardware operations may
+	 * be still pending on associated data buffers.
+	 * Only system reboot can take us out of this abnormal state in a safe
+	 * manner (avoiding data corruption) */
+	wait_for_completion(&(op_ctx->ioctl_op_compl));
+#ifdef DEBUG
+	if (unlikely(op_ctx->op_state != USER_OP_COMPLETED)) {
+		pr_err(
+			    "Op. state is not COMPLETED after getting completion event (op_ctx=0x%p, op_state=%d)\n",
+			    op_ctx, op_ctx->op_state);
+		dump_stack();	/*SEP_DRIVER_BUG(); */
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+		return -EINVAL;
+	}
+#endif
+
+	return 0;
+}
+
+/**
+ * get_num_of_ctx_info() - Count the number of valid contexts assigned for the
+ *				combined operation.
+ * @config:	 The user configuration scheme array
+ *
+ * Returns int
+ */
+static int get_num_of_ctx_info(struct dxdi_combined_props *config)
+{
+	int valid_ctx_n;
+
+	for (valid_ctx_n = 0;
+	     (valid_ctx_n < DXDI_COMBINED_NODES_MAX) &&
+	     (config->node_props[valid_ctx_n].context != NULL)
+	     ; valid_ctx_n++) {
+		/*NOOP*/
+	}
+
+	return valid_ctx_n;
+}
+
+/***** Driver Interface implementation functions *****/
+
+/**
+ * format_sep_combined_cfg_scheme() - Encode the user configuration scheme to
+ *					SeP format.
+ * @config:	 The user configuration scheme array
+ * @op_ctx:	 Operation context
+ *
+ * Returns u32
+ */
+static u32 format_sep_combined_cfg_scheme(struct dxdi_combined_props
+					       *config,
+					       struct sep_op_ctx *op_ctx)
+{
+	enum dxdi_input_engine_type engine_src = DXDI_INPUT_NULL;
+	enum sep_engine_type engine_type = SEP_ENGINE_NULL;
+	enum crypto_alg_class alg_class;
+	u32 sep_cfg_scheme = 0;	/* the encoded config scheme */
+	struct client_crypto_ctx_info *ctx_info_p = &op_ctx->ctx_info;
+	enum dxdi_sym_cipher_type symc_type;
+	int eng_idx, done = 0;
+	int prev_direction = -1;
+
+	/* encode engines connections into SEP format */
+	for (eng_idx = 0;
+	     (eng_idx < DXDI_COMBINED_NODES_MAX) && (!done);
+	     eng_idx++, ctx_info_p++) {
+
+		/* set engine source */
+		engine_src = config->node_props[eng_idx].eng_input;
+
+		/* set engine type */
+		if (config->node_props[eng_idx].context != NULL) {
+			int dir;
+			alg_class = ctxmgr_get_alg_class(ctx_info_p);
+			switch (alg_class) {
+			case ALG_CLASS_HASH:
+				engine_type = SEP_ENGINE_HASH;
+				break;
+			case ALG_CLASS_SYM_CIPHER:
+				symc_type =
+				    ctxmgr_get_sym_cipher_type(ctx_info_p);
+				if ((symc_type == DXDI_SYMCIPHER_AES_ECB) ||
+				    (symc_type == DXDI_SYMCIPHER_AES_CBC) ||
+				    (symc_type == DXDI_SYMCIPHER_AES_CTR))
+					engine_type = SEP_ENGINE_AES;
+				else
+					engine_type = SEP_ENGINE_NULL;
+
+				dir =
+				    (int)
+				    ctxmgr_get_symcipher_direction(ctx_info_p);
+				if (prev_direction == -1) {
+					prev_direction = dir;
+				} else {
+					/* Only decrypt->encrypt operation */
+					if (!
+					    (prev_direction ==
+					     SEP_CRYPTO_DIRECTION_DECRYPT &&
+					     (dir ==
+					      SEP_CRYPTO_DIRECTION_ENCRYPT))) {
+						pr_err(
+						    "Invalid direction combination %s->%s\n",
+						    prev_direction ==
+						    SEP_CRYPTO_DIRECTION_DECRYPT
+						    ? "DEC" : "ENC",
+						    dir ==
+						    SEP_CRYPTO_DIRECTION_DECRYPT
+						    ? "DEC" : "ENC");
+						op_ctx->error_info =
+						    DXDI_ERROR_INVAL_DIRECTION;
+					}
+				}
+				break;
+			default:
+				engine_type = SEP_ENGINE_NULL;
+				break;	/*unsupported alg class */
+			}
+		} else if (engine_src != DXDI_INPUT_NULL) {
+			/* incase engine source is not NULL and NULL sub-context
+			 * is passed then DOUT is -DOUT type */
+			engine_type = SEP_ENGINE_DOUT;
+			/* exit after props set */
+			done = 1;
+		} else {
+			/* both context pointer & input type are
+			 * NULL -we're done */
+			break;
+		}
+
+		sep_comb_eng_props_set(&sep_cfg_scheme, eng_idx,
+					  engine_src, engine_type);
+	}
+
+	return sep_cfg_scheme;
+}
+
+/**
+ * init_crypto_context() - Initialize host crypto context
+ * @op_ctx:
+ * @context_buf:
+ * @alg_class:
+ * @props:	Pointer to configuration properties which match given alg_class:
+ *		ALG_CLASS_SYM_CIPHER: struct dxdi_sym_cipher_props
+ *		ALG_CLASS_AUTH_ENC: struct dxdi_auth_enc_props
+ *		ALG_CLASS_MAC: struct dxdi_mac_props
+ *		ALG_CLASS_HASH: enum dxdi_hash_type
+ *
+ * Returns int 0 if operation executed in SeP.
+ * See error_info for actual results.
+ */
+static int init_crypto_context(struct sep_op_ctx *op_ctx,
+			       u32 __user *context_buf,
+			       enum crypto_alg_class alg_class, void *props)
+{
+	int rc;
+	int sep_cache_load_req;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	bool postpone_init = false;
+
+	rc = ctxmgr_map_user_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev,
+				 alg_class, context_buf);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	op_ctx->op_type = SEP_OP_CRYPTO_INIT;
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_UNINITIALIZED);
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(&op_ctx->ctx_info,
+			  alloc_crypto_ctx_id(op_ctx->client_ctx));
+
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		rc = ctxmgr_init_symcipher_ctx(&op_ctx->ctx_info,
+					       (struct dxdi_sym_cipher_props *)
+					       props, &postpone_init,
+					       &op_ctx->error_info);
+		break;
+	case ALG_CLASS_AUTH_ENC:
+		rc = ctxmgr_init_auth_enc_ctx(&op_ctx->ctx_info,
+					      (struct dxdi_auth_enc_props *)
+					      props, &op_ctx->error_info);
+		break;
+	case ALG_CLASS_MAC:
+		rc = ctxmgr_init_mac_ctx(&op_ctx->ctx_info,
+					 (struct dxdi_mac_props *)props,
+					 &op_ctx->error_info);
+		break;
+	case ALG_CLASS_HASH:
+		rc = ctxmgr_init_hash_ctx(&op_ctx->ctx_info,
+					  *((enum dxdi_hash_type *)props),
+					  &op_ctx->error_info);
+		break;
+	default:
+		pr_err("Invalid algorithm class %d\n", alg_class);
+		op_ctx->error_info = DXDI_ERROR_UNSUP;
+		rc = -EINVAL;
+	}
+	if (rc != 0)
+		goto ctx_init_exit;
+
+	/* After inialization above we are partially init. missing SeP init. */
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_PARTIAL_INIT);
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+
+	/* If not all the init. information is available at this time
+	 * we postpone INIT in SeP to processing phase */
+	if (postpone_init) {
+		pr_debug("Init. postponed to processing phase\n");
+		ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+		/* must be valid on "success" */
+		op_ctx->error_info = DXDI_ERROR_NULL;
+		return 0;
+	}
+
+	/* Flush out of host cache */
+	ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc != 0) {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+		       op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		goto ctx_init_exit;
+	}
+
+	ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+							ctxmgr_get_ctx_id
+							(&op_ctx->ctx_info),
+							&sep_cache_load_req));
+	if (!sep_cache_load_req)
+		pr_err("New context already in SeP cache?!");
+
+	rc = send_crypto_op_desc(op_ctx,
+				 1 /*always load on init */ , 1 /*INIT*/,
+				 SEP_PROC_MODE_NOP);
+
+	mutex_unlock(&drvdata->desc_queue_sequencer);
+	if (likely(rc == 0))
+		rc = wait_for_sep_op_result(op_ctx);
+
+ ctx_init_exit:
+	/* Cleanup resources and update context state */
+	crypto_op_completion_cleanup(op_ctx);
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * map_ctx_for_proc() - Map previously initialized crypto context before data
+ *			processing
+ * @op_ctx:
+ * @context_buf:
+ * @ctx_state:	 Returned current context state
+ *
+ * Returns int
+ */
+static int map_ctx_for_proc(struct sep_client_ctx *client_ctx,
+			    struct client_crypto_ctx_info *ctx_info,
+			    u32 __user *context_buf,
+			    enum host_ctx_state *ctx_state_p)
+{
+	int rc;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+
+	/* default state in case of error */
+	*ctx_state_p = CTX_STATE_UNINITIALIZED;
+
+	rc = ctxmgr_map_user_ctx(ctx_info, drvdata->sep_data->dev,
+				 ALG_CLASS_NONE, context_buf);
+	if (rc != 0) {
+		pr_err("Failed mapping context\n");
+		return rc;
+	}
+	if (ctxmgr_get_session_id(ctx_info) != (uintptr_t) client_ctx) {
+		pr_err("Context ID is not associated with this session\n");
+		rc = -EINVAL;
+	}
+	if (rc == 0)
+		*ctx_state_p = ctx_info->ctx_kptr->state;
+
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(ctx_info);
+	/* Flush out of host cache */
+	ctxmgr_sync_sep_ctx(ctx_info, drvdata->sep_data->dev);
+#endif
+	if (rc != 0)
+		ctxmgr_unmap_user_ctx(ctx_info);
+
+	return rc;
+}
+
+/**
+ * init_combined_context() - Initialize Combined context
+ * @op_ctx:
+ * @config: Pointer to configuration scheme to be validate by the SeP
+ *
+ * Returns int 0 if operation executed in SeP.
+ * See error_info for actual results.
+ */
+static int init_combined_context(struct sep_op_ctx *op_ctx,
+				 struct dxdi_combined_props *config)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &op_ctx->ctx_info;
+	int sep_ctx_load_req[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	int rc, ctx_idx, ctx_mapped_n = 0;
+
+	op_ctx->op_type = SEP_OP_CRYPTO_INIT;
+
+	/* no context to load -clear buffer */
+	memset(sep_ctx_load_req, 0, sizeof(sep_ctx_load_req));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx, ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      /*user ctx */ &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto ctx_init_exit;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		/* context must be initialzed */
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing -%d\n",
+				    ctx_idx, ctx_state);
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			rc = -EINVAL;
+			goto ctx_init_exit;
+		}
+	}
+
+	ctx_info_p = &op_ctx->ctx_info;
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_ctx_load_req[ctx_idx]));
+		}
+
+		rc = send_combined_op_desc(op_ctx,
+					   sep_ctx_load_req /*nothing to load */
+					   ,
+					   1 /*INIT*/, SEP_PROC_MODE_NOP,
+					   0 /*no scheme in init */);
+
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ ctx_init_exit:
+	ctx_info_p = &op_ctx->ctx_info;
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * prepare_adata_for_sep() - Generate MLLI tables for additional/associated data
+ *				(input only)
+ * @op_ctx:	 Operation context
+ * @adata_in:
+ * @adata_in_size:
+ *
+ * Returns int
+ */
+static inline int prepare_adata_for_sep(struct sep_op_ctx *op_ctx,
+					u8 __user *adata_in,
+					u32 adata_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	struct dma_pool *spad_buf_pool =
+	    client_ctx->drv_data->sep_data->spad_buf_pool;
+	struct mlli_tables_list *ift_p = &op_ctx->ift;
+	unsigned long a0_buf_size;
+	u8 *a0_buf_p;
+	int rc = 0;
+
+	if (adata_in == NULL) {	/*data_out required for this alg_class */
+		pr_err("adata_in==NULL for authentication\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DIN_PTR;
+		return -EINVAL;
+	}
+
+	op_ctx->spad_buf_p = dma_pool_alloc(spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err("Failed allocating from spad_buf_pool for A0\n");
+		return -ENOMEM;
+	}
+	a0_buf_p = op_ctx->spad_buf_p;
+
+	/* format A0 (the first 2 words in the first block) */
+	if (adata_in_size < ((1UL << 16) - (1UL << 8))) {
+		a0_buf_size = 2;
+
+		a0_buf_p[0] = (adata_in_size >> 8) & 0xFF;
+		a0_buf_p[1] = adata_in_size & 0xFF;
+	} else {
+		a0_buf_size = 6;
+
+		a0_buf_p[0] = 0xFF;
+		a0_buf_p[1] = 0xFE;
+		a0_buf_p[2] = (adata_in_size >> 24) & 0xFF;
+		a0_buf_p[3] = (adata_in_size >> 16) & 0xFF;
+		a0_buf_p[4] = (adata_in_size >> 8) & 0xFF;
+		a0_buf_p[5] = adata_in_size & 0xFF;
+	}
+
+	/* Create IFT (MLLI table) */
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    adata_in, NULL, adata_in_size, 0,
+					    DMA_TO_DEVICE,
+					    &op_ctx->din_dma_obj);
+	if (likely(rc == 0)) {
+		rc = llimgr_create_mlli(llimgr, ift_p, DMA_TO_DEVICE,
+					&op_ctx->din_dma_obj,
+					op_ctx->spad_buf_dma_addr, a0_buf_size);
+		if (unlikely(rc != 0)) {
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->din_dma_obj);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_cipher_data_for_sep() - Generate MLLI tables for cipher algorithms
+ *				(input + output)
+ * @op_ctx:	 Operation context
+ * @data_in:	User space pointer for input data (NULL for kernel data)
+ * @sgl_in:	Kernel buffers s/g list for input data (NULL for user data)
+ * @data_out:	User space pointer for output data (NULL for kernel data)
+ * @sgl_out:	Kernel buffers s/g list for output data (NULL for user data)
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static inline int prepare_cipher_data_for_sep(struct sep_op_ctx *op_ctx,
+					      u8 __user *data_in,
+					      struct scatterlist *sgl_in,
+					      u8 __user *data_out,
+					      struct scatterlist *sgl_out,
+					      u32 data_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	const bool is_data_inplace =
+	    data_in != NULL ? (data_in == data_out) : (sgl_in == sgl_out);
+	const enum dma_data_direction din_dma_direction =
+	    is_data_inplace ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+	int rc;
+
+	/* Check parameters */
+	if (data_in_size == 0) {	/* No data to prepare */
+		return 0;
+	}
+	if ((data_out == NULL) && (sgl_out == NULL)) {
+		/* data_out required for this alg_class */
+		pr_err("data_out/sgl_out==NULL for enc/decryption\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DOUT_PTR;
+		return -EINVAL;
+	}
+
+	/* Avoid partial overlapping of data_in with data_out */
+	if (!is_data_inplace)
+		if (data_in != NULL) {	/* User space buffer */
+			if (((data_in < data_out) &&
+			     ((data_in + data_in_size) > data_out)) ||
+			    ((data_out < data_in) &&
+			     ((data_out + data_in_size) > data_in))) {
+				pr_err("Buffers partially overlap!\n");
+				op_ctx->error_info =
+				    DXDI_ERROR_DIN_DOUT_OVERLAP;
+				return -EINVAL;
+			}
+		}
+	/* else: TODO - scan s/g lists for overlapping */
+
+	/* Create IFT + OFT (MLLI tables) */
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    data_in, sgl_in, data_in_size, 0,
+					    din_dma_direction,
+					    &op_ctx->din_dma_obj);
+	if (likely(rc == 0)) {
+		rc = llimgr_create_mlli(llimgr, &op_ctx->ift, din_dma_direction,
+					&op_ctx->din_dma_obj, 0, 0);
+	}
+	if (likely(rc == 0)) {
+		if (is_data_inplace) {
+			/* Mirror IFT data in OFT */
+			op_ctx->dout_dma_obj = op_ctx->din_dma_obj;
+			op_ctx->oft = op_ctx->ift;
+		} else {	/* Create OFT */
+			rc = llimgr_register_client_dma_buf(llimgr,
+							    data_out, sgl_out,
+							    data_in_size, 0,
+							    DMA_FROM_DEVICE,
+							    &op_ctx->
+							    dout_dma_obj);
+			if (likely(rc == 0)) {
+				rc = llimgr_create_mlli(llimgr, &op_ctx->oft,
+							DMA_FROM_DEVICE,
+							&op_ctx->dout_dma_obj,
+							0, 0);
+			}
+		}
+	}
+
+	if (unlikely(rc != 0)) {	/* Failure cleanup */
+		/* No output MLLI to free in error case */
+		if (!is_data_inplace) {
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->dout_dma_obj);
+		}
+		llimgr_destroy_mlli(llimgr, &op_ctx->ift);
+		llimgr_deregister_client_dma_buf(llimgr, &op_ctx->din_dma_obj);
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_hash_data_for_sep() - Prepare data for hash operation
+ * @op_ctx:		Operation context
+ * @is_finalize:	Is hash finialize operation (last)
+ * @data_in:		Pointer to user buffer OR...
+ * @sgl_in:		Gather list for kernel buffers
+ * @data_in_size:	Data size in bytes
+ *
+ * Returns 0 on success
+ */
+static int prepare_hash_data_for_sep(struct sep_op_ctx *op_ctx,
+				     bool is_finalize,
+				     u8 __user *data_in,
+				     struct scatterlist *sgl_in,
+				     u32 data_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	void *llimgr = drvdata->sep_data->llimgr;
+	u32 crypto_blk_size;
+	/* data size for processing this op. (incl. prev. block remainder) */
+	u32 data_size4hash;
+	/* amount of data_in to process in this op. */
+	u32 data_in_save4next;
+	u16 last_hash_blk_tail_size;
+	dma_addr_t last_hash_blk_tail_dma;
+	int rc;
+
+	if ((data_in != NULL) && (sgl_in != NULL)) {
+		pr_err("Given valid data_in+sgl_in!\n");
+		return -EINVAL;
+	}
+
+	/*Hash block size required in order to buffer block remainders */
+	crypto_blk_size = ctxmgr_get_crypto_blk_size(&op_ctx->ctx_info);
+	if (crypto_blk_size == 0) {	/* Unsupported algorithm?... */
+		op_ctx->error_info = DXDI_ERROR_UNSUP;
+		return -ENOSYS;
+	}
+
+	/* Map for DMA the last block tail, if any */
+	rc = ctxmgr_map2dev_hash_tail(&op_ctx->ctx_info,
+				      drvdata->sep_data->dev);
+	if (rc != 0) {
+		pr_err("Failed mapping hash data tail buffer\n");
+		return rc;
+	}
+	last_hash_blk_tail_size =
+	    ctxmgr_get_hash_blk_remainder_buf(&op_ctx->ctx_info,
+					      &last_hash_blk_tail_dma);
+	data_size4hash = data_in_size + last_hash_blk_tail_size;
+	if (!is_finalize) {
+		/* Not last. Round to hash block size. */
+		data_size4hash = (data_size4hash & ~(crypto_blk_size - 1));
+		/* On the last hash op. take all that's left */
+	}
+	data_in_save4next = (data_size4hash > 0) ?
+	    data_in_size - (data_size4hash - last_hash_blk_tail_size) :
+	    data_in_size;
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    data_in, sgl_in, data_in_size,
+					    data_in_save4next, DMA_TO_DEVICE,
+					    &op_ctx->din_dma_obj);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed registering client buffer (rc=%d)\n", rc);
+	} else {
+		if ((!is_finalize) && (data_size4hash == 0)) {
+			/* Not enough for even one hash block
+			 * (all saved for next) */
+			ctxmgr_unmap2dev_hash_tail(&op_ctx->ctx_info,
+						   drvdata->sep_data->dev);
+			/* Append to existing tail buffer */
+			rc = ctxmgr_save_hash_blk_remainder(&op_ctx->ctx_info,
+							    &op_ctx->
+							    din_dma_obj,
+							    true /*append */);
+			if (rc == 0)	/* signal: not even one block */
+				rc = -ENOTBLK;
+		} else {
+			rc = llimgr_create_mlli(llimgr, &op_ctx->ift,
+						DMA_TO_DEVICE,
+						&op_ctx->din_dma_obj,
+						last_hash_blk_tail_dma,
+						last_hash_blk_tail_size);
+		}
+	}
+
+	if (unlikely(rc != 0)) {	/* Failed (or -ENOTBLK) */
+		/* No harm if we invoke deregister if it was not registered */
+		llimgr_deregister_client_dma_buf(llimgr, &op_ctx->din_dma_obj);
+		/* Unmap hash block tail buffer */
+		ctxmgr_unmap2dev_hash_tail(&op_ctx->ctx_info,
+					   drvdata->sep_data->dev);
+	}
+	/* Hash block remainder would be copied into tail buffer only after
+	 * operation completion, because this buffer is still in use for
+	 * current operation */
+
+	return rc;
+}
+
+/**
+ * prepare_mac_data_for_sep() - Prepare data memory objects for (AES) MAC
+ *				operations
+ * @op_ctx:
+ * @data_in:		Pointer to user buffer OR...
+ * @sgl_in:		Gather list for kernel buffers
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static inline int prepare_mac_data_for_sep(struct sep_op_ctx *op_ctx,
+					   u8 __user *data_in,
+					   struct scatterlist *sgl_in,
+					   u32 data_in_size)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	int rc;
+
+	rc = llimgr_register_client_dma_buf(llimgr,
+					    data_in, sgl_in, data_in_size, 0,
+					    DMA_TO_DEVICE,
+					    &op_ctx->din_dma_obj);
+	if (likely(rc == 0)) {
+		rc = llimgr_create_mlli(llimgr, &op_ctx->ift,
+					DMA_TO_DEVICE, &op_ctx->din_dma_obj, 0,
+					0);
+		if (rc != 0) {
+			llimgr_deregister_client_dma_buf(llimgr,
+							 &op_ctx->din_dma_obj);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_data_for_sep() - Prepare data for processing by SeP
+ * @op_ctx:
+ * @data_in:	User space pointer for input data (NULL for kernel data)
+ * @sgl_in:	Kernel buffers s/g list for input data (NULL for user data)
+ * @data_out:	User space pointer for output data (NULL for kernel data)
+ * @sgl_out:	Kernel buffers s/g list for output data (NULL for user data)
+ * @data_in_size:	 data_in buffer size (and data_out's if not NULL)
+ * @data_intent:	 the purpose of the given data
+ *
+ * Prepare data for processing by SeP
+ * (common flow for sep_proc_dblk and sep_fin_proc) .
+ * Returns int
+ */
+int prepare_data_for_sep(struct sep_op_ctx *op_ctx,
+			 u8 __user *data_in,
+			 struct scatterlist *sgl_in,
+			 u8 __user *data_out,
+			 struct scatterlist *sgl_out,
+			 u32 data_in_size,
+			 enum crypto_data_intent data_intent)
+{
+	int rc;
+	enum crypto_alg_class alg_class;
+
+	if (data_intent == CRYPTO_DATA_ADATA) {
+		/* additional/associated data */
+		if (!ctxmgr_is_valid_adata_size(&op_ctx->ctx_info,
+						data_in_size)) {
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	} else {
+		/* cipher/text data */
+		if (!ctxmgr_is_valid_size(&op_ctx->ctx_info,
+					  data_in_size,
+					  (data_intent ==
+					   CRYPTO_DATA_TEXT_FINALIZE))) {
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	}
+
+	pr_debug("data_in=0x%p/0x%p data_out=0x%p/0x%p data_in_size=%uB\n",
+		      data_in, sgl_in, data_out, sgl_out, data_in_size);
+
+	alg_class = ctxmgr_get_alg_class(&op_ctx->ctx_info);
+	pr_debug("alg_class = %d\n", alg_class);
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		rc = prepare_cipher_data_for_sep(op_ctx,
+						 data_in, sgl_in, data_out,
+						 sgl_out, data_in_size);
+		break;
+	case ALG_CLASS_AUTH_ENC:
+		if (data_intent == CRYPTO_DATA_ADATA) {
+			struct host_crypto_ctx_auth_enc *aead_ctx_p =
+			    (struct host_crypto_ctx_auth_enc *)op_ctx->ctx_info.
+			    ctx_kptr;
+
+			if (!aead_ctx_p->is_adata_processed) {
+				rc = prepare_adata_for_sep(op_ctx,
+							   data_in,
+							   data_in_size);
+				/* no more invocation to adata process
+				 * is allowed */
+				aead_ctx_p->is_adata_processed = 1;
+			} else {
+				/* additional data may be processed
+				 * only once */
+				return -EPERM;
+			}
+		} else {
+			rc = prepare_cipher_data_for_sep(op_ctx,
+							 data_in, sgl_in,
+							 data_out, sgl_out,
+							 data_in_size);
+		}
+		break;
+	case ALG_CLASS_MAC:
+	case ALG_CLASS_HASH:
+		if ((alg_class == ALG_CLASS_MAC) &&
+		    (ctxmgr_get_mac_type(&op_ctx->ctx_info) != DXDI_MAC_HMAC)) {
+			/* Handle all MACs but HMAC */
+			if (data_in_size == 0) {	/* No data to prepare */
+				rc = 0;
+				break;
+			}
+#if 0
+			/* ignore checking the user out pointer due to crys api
+			 * limitation */
+			if (data_out != NULL) {
+				pr_err("data_out!=NULL for MAC\n");
+				return -EINVAL;
+			}
+#endif
+			rc = prepare_mac_data_for_sep(op_ctx, data_in, sgl_in,
+						      data_in_size);
+			break;
+		}
+
+		/* else: HASH or HMAC require the same handling */
+		rc = prepare_hash_data_for_sep(op_ctx,
+					       (data_intent ==
+						CRYPTO_DATA_TEXT_FINALIZE),
+					       data_in, sgl_in, data_in_size);
+		break;
+
+	default:
+		pr_err("Invalid algorithm class %d in context\n",
+			    alg_class);
+		/* probably context was corrupted since init. phase */
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+/**
+ * prepare_combined_data_for_sep() - Prepare combined data for processing by SeP
+ * @op_ctx:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:	 data_in buffer size (and data_out's if not NULL)
+ * @data_intent:	 the purpose of the given data
+ *
+ * Prepare combined data for processing by SeP
+ * (common flow for sep_proc_dblk and sep_fin_proc) .
+ * Returns int
+ */
+static int prepare_combined_data_for_sep(struct sep_op_ctx *op_ctx,
+					 u8 __user *data_in,
+					 u8 __user *data_out,
+					 u32 data_in_size,
+					 enum crypto_data_intent data_intent)
+{
+	int rc;
+
+	if (data_intent == CRYPTO_DATA_TEXT) {
+		/* restrict data unit size to the max block size multiple */
+		if (!IS_MULT_OF(data_in_size, SEP_HASH_BLOCK_SIZE_MAX)) {
+			pr_err(
+				    "Data unit size (%u) is not HASH block multiple\n",
+				    data_in_size);
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	} else if (data_intent == CRYPTO_DATA_TEXT_FINALIZE) {
+		/* user may finalize with zero or AES block size multiple */
+		if (!IS_MULT_OF(data_in_size, SEP_AES_BLOCK_SIZE)) {
+			pr_err("Data size (%u), not AES block multiple\n",
+				    data_in_size);
+			op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			return -EINVAL;
+		}
+	}
+
+	pr_debug("data_in=0x%08lX data_out=0x%08lX data_in_size=%uB\n",
+		      (unsigned long)data_in, (unsigned long)data_out,
+		      data_in_size);
+
+	pr_debug("alg_class = COMBINED\n");
+	if (data_out == NULL)
+		rc = prepare_mac_data_for_sep(op_ctx,
+					      data_in, NULL, data_in_size);
+	else
+		rc = prepare_cipher_data_for_sep(op_ctx,
+						 data_in, NULL, data_out, NULL,
+						 data_in_size);
+
+	return rc;
+}
+
+/**
+ * sep_proc_dblk() - Process data block
+ * @op_ctx:
+ * @context_buf:
+ * @data_block_type:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static int sep_proc_dblk(struct sep_op_ctx *op_ctx,
+			 u32 __user *context_buf,
+			 enum dxdi_data_block_type data_block_type,
+			 u8 __user *data_in,
+			 u8 __user *data_out, u32 data_in_size)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	int rc;
+	int sep_cache_load_required;
+	int sep_ctx_init_required = 0;
+	enum crypto_alg_class alg_class;
+	enum host_ctx_state ctx_state;
+
+	if (data_in_size == 0) {
+		pr_err("Got empty data_in\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+		return -EINVAL;
+	}
+
+	rc = map_ctx_for_proc(op_ctx->client_ctx, &op_ctx->ctx_info,
+			      context_buf, &ctx_state);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	op_ctx->op_type = SEP_OP_CRYPTO_PROC;
+	if (ctx_state == CTX_STATE_PARTIAL_INIT) {
+		/* case of postponed sep context init. */
+		sep_ctx_init_required = 1;
+		op_ctx->op_type |= SEP_OP_CRYPTO_INIT;
+	} else if (ctx_state != CTX_STATE_INITIALIZED) {
+		pr_err("Context in invalid state for processing %d\n",
+			    ctx_state);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+		goto unmap_ctx_and_return;
+	}
+	alg_class = ctxmgr_get_alg_class(&op_ctx->ctx_info);
+
+	rc = prepare_data_for_sep(op_ctx, data_in, NULL, data_out, NULL,
+				  data_in_size,
+				  (data_block_type ==
+				   DXDI_DATA_TYPE_TEXT ? CRYPTO_DATA_TEXT :
+				   CRYPTO_DATA_ADATA));
+	if (rc != 0) {
+		if (rc == -ENOTBLK) {
+			/* Did not accumulate even a single hash block */
+			/* The data_in already copied to context, in addition
+			 * to existing data. Report as success with no op. */
+			op_ctx->error_info = DXDI_ERROR_NULL;
+			rc = 0;
+		}
+		goto unmap_ctx_and_return;
+	}
+
+	if (sep_ctx_init_required) {
+		/* If this flag is set it implies that we have updated
+		 * parts of the sep_ctx structure during data preparation -
+		 * need to sync. context to memory (from host cache...) */
+#ifdef DEBUG
+		ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+		ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+	}
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* coupled sequence */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+			 ctxmgr_sep_cache_alloc(drvdata->
+						sep_cache,
+						ctxmgr_get_ctx_id
+						(&op_ctx->ctx_info),
+						&sep_cache_load_required));
+		rc = send_crypto_op_desc(op_ctx, sep_cache_load_required,
+					 sep_ctx_init_required,
+					 (data_block_type ==
+					  DXDI_DATA_TYPE_TEXT ?
+					  SEP_PROC_MODE_PROC_T :
+					  SEP_PROC_MODE_PROC_A));
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * sep_fin_proc() - Finalize processing of given context with given (optional)
+ *			data
+ * @op_ctx:
+ * @context_buf:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @digest_or_mac_p:
+ * @digest_or_mac_size_p:
+ *
+ * Returns int
+ */
+static int sep_fin_proc(struct sep_op_ctx *op_ctx,
+			u32 __user *context_buf,
+			u8 __user *data_in,
+			u8 __user *data_out,
+			u32 data_in_size,
+			u8 *digest_or_mac_p,
+			u8 *digest_or_mac_size_p)
+{
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	int rc;
+	int sep_cache_load_required;
+	int sep_ctx_init_required = 0;
+	enum host_ctx_state ctx_state;
+
+	rc = map_ctx_for_proc(op_ctx->client_ctx, &op_ctx->ctx_info,
+			      context_buf, &ctx_state);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	if (ctx_state == CTX_STATE_PARTIAL_INIT) {
+		/* case of postponed sep context init. */
+		sep_ctx_init_required = 1;
+	} else if (ctx_state != CTX_STATE_INITIALIZED) {
+		pr_err("Context in invalid state for finalizing %d\n",
+			    ctx_state);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+		goto data_prepare_err;
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;
+
+	rc = prepare_data_for_sep(op_ctx, data_in, NULL, data_out, NULL,
+				  data_in_size, CRYPTO_DATA_TEXT_FINALIZE);
+	if (rc != 0)
+		goto data_prepare_err;
+
+	if (sep_ctx_init_required) {
+		/* If this flag is set it implies that we have updated
+		 * parts of the sep_ctx structure during data preparation -
+		 * need to sync. context to memory (from host cache...) */
+#ifdef DEBUG
+		ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+		ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+	}
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(&op_ctx->ctx_info),
+					&sep_cache_load_required));
+		rc = send_crypto_op_desc(op_ctx, sep_cache_load_required,
+					 sep_ctx_init_required,
+					 SEP_PROC_MODE_FIN);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if ((rc == 0) && (op_ctx->error_info == 0)) {
+		/* Digest or MAC are embedded in the SeP/FW context */
+		*digest_or_mac_size_p =
+		    ctxmgr_get_digest_or_mac(&op_ctx->ctx_info,
+					     digest_or_mac_p);
+		/* If above is not applicable for given algorithm,
+		 *digest_or_mac_size_p would be set to 0          */
+	} else {
+		/* Nothing valid in digest_or_mac_p */
+		*digest_or_mac_size_p = 0;
+	}
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ data_prepare_err:
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * sep_combined_proc_dblk() - Process Combined operation block
+ * @op_ctx:
+ * @config:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ *
+ * Returns int
+ */
+static int sep_combined_proc_dblk(struct sep_op_ctx *op_ctx,
+				  struct dxdi_combined_props *config,
+				  u8 __user *data_in,
+				  u8 __user *data_out,
+				  u32 data_in_size)
+{
+	int rc;
+	int ctx_idx, ctx_mapped_n = 0;
+	int sep_cache_load_required[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 cfg_scheme;
+
+	if (data_in_size == 0) {
+		pr_err("Got empty data_in\n");
+		op_ctx->error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+		return -EINVAL;
+	}
+
+	/* assume nothing to load */
+	memset(sep_cache_load_required, 0, sizeof(sep_cache_load_required));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx,
+				      ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      /*user ctx */ &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		/* context must be initialzed */
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing -%d\n",
+				    ctx_idx, ctx_state);
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			rc = -EINVAL;
+			goto unmap_ctx_and_return;
+		}
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_PROC;
+
+	/* Construct SeP combined scheme */
+	cfg_scheme = format_sep_combined_cfg_scheme(config, op_ctx);
+	pr_debug("SeP Config. Scheme: 0x%08X\n", cfg_scheme);
+
+	rc = prepare_combined_data_for_sep(op_ctx, data_in, data_out,
+					   data_in_size, CRYPTO_DATA_TEXT);
+	if (unlikely(rc != 0)) {
+		SEP_LOG_ERR(
+			    "Failed preparing DMA buffers (rc=%d, err_info=0x%08X\n)\n",
+			    rc, op_ctx->error_info);
+		return rc;
+	}
+
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_cache_load_required
+						  [ctx_idx]));
+		}
+
+		rc = send_combined_op_desc(op_ctx,
+					   sep_cache_load_required, 0 /*INIT*/,
+					   SEP_PROC_MODE_PROC_T, cfg_scheme);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * sep_combined_fin_proc() - Finalize Combined processing
+ *			     with given (optional) data
+ * @op_ctx:
+ * @config:
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @auth_data_p:
+ * @auth_data_size_p:
+ *
+ * Returns int
+ */
+static int sep_combined_fin_proc(struct sep_op_ctx *op_ctx,
+				 struct dxdi_combined_props *config,
+				 u8 __user *data_in,
+				 u8 __user *data_out,
+				 u32 data_in_size,
+				 u8 *auth_data_p,
+				 u8 *auth_data_size_p)
+{
+	int rc;
+	int ctx_idx, ctx_mapped_n = 0;
+	int sep_cache_load_required[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 cfg_scheme;
+
+	/* assume nothing to load */
+	memset(sep_cache_load_required, 0, sizeof(sep_cache_load_required));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx,
+				      ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      /*user ctx */ &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		/* context must be initialzed */
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing -%d\n",
+				    ctx_idx, ctx_state);
+			rc = -EINVAL;
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;
+
+	/* Construct SeP combined scheme */
+	cfg_scheme = format_sep_combined_cfg_scheme(config, op_ctx);
+	pr_debug("SeP Config. Scheme: 0x%08X\n", cfg_scheme);
+
+	rc = prepare_combined_data_for_sep(op_ctx, data_in, data_out,
+					   data_in_size,
+					   CRYPTO_DATA_TEXT_FINALIZE);
+	if (rc != 0)
+		goto unmap_ctx_and_return;
+
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_cache_load_required
+						  [ctx_idx]));
+		}
+		rc = send_combined_op_desc(op_ctx,
+					   sep_cache_load_required, 0 /*INIT*/,
+					   SEP_PROC_MODE_FIN, cfg_scheme);
+
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (auth_data_size_p != NULL) {
+		if ((rc == 0) && (op_ctx->error_info == 0)) {
+			ctx_info_p = &(op_ctx->ctx_info);
+			ctx_info_p += op_ctx->ctx_info_num - 1;
+
+			/* Auth data embedded in the last SeP/FW context */
+			*auth_data_size_p =
+			    ctxmgr_get_digest_or_mac(ctx_info_p,
+						     auth_data_p);
+		} else {	/* Failure */
+			*auth_data_size_p = 0;	/* Nothing valid */
+		}
+	}
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * process_combined_integrated() - Integrated processing of
+ *				   Combined data (init+proc+fin)
+ * @op_ctx:
+ * @props:	 Initialization properties (see init_context)
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @auth_data_p:
+ * @auth_data_size_p:
+ *
+ * Returns int
+ */
+static int process_combined_integrated(struct sep_op_ctx *op_ctx,
+				       struct dxdi_combined_props *config,
+				       u8 __user *data_in,
+				       u8 __user *data_out,
+				       u32 data_in_size,
+				       u8 *auth_data_p,
+				       u8 *auth_data_size_p)
+{
+	int rc;
+	int ctx_idx, ctx_mapped_n = 0;
+	int sep_cache_load_required[DXDI_COMBINED_NODES_MAX];
+	enum host_ctx_state ctx_state;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+	u32 cfg_scheme;
+
+	/* assume nothing to load */
+	memset(sep_cache_load_required, 0, sizeof(sep_cache_load_required));
+
+	/* set the number of active contexts */
+	op_ctx->ctx_info_num = get_num_of_ctx_info(config);
+
+	/* map each context in the configuration scheme */
+	for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+	     ctx_idx++, ctx_info_p++) {
+		/* context already initialized by the user */
+		rc = map_ctx_for_proc(op_ctx->client_ctx,
+				      ctx_info_p,
+				      config->node_props[ctx_idx].context,
+				      &ctx_state);
+		if (rc != 0) {
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			goto unmap_ctx_and_return;
+		}
+
+		ctx_mapped_n++;	/*ctx mapped successfully */
+
+		if (ctx_state != CTX_STATE_INITIALIZED) {
+			pr_err(
+				    "Given context [%d] in invalid state for processing 0x%08X\n",
+				    ctx_idx, ctx_state);
+			op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+			rc = -EINVAL;
+			goto unmap_ctx_and_return;
+		}
+	}
+
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;
+	/* reconstruct combined scheme */
+	cfg_scheme = format_sep_combined_cfg_scheme(config, op_ctx);
+	pr_debug("SeP Config. Scheme: 0x%08X\n", cfg_scheme);
+
+	rc = prepare_combined_data_for_sep(op_ctx, data_in, data_out,
+					   data_in_size,
+					   CRYPTO_DATA_TEXT_FINALIZE /*last */
+					   );
+	if (rc != 0)
+		goto unmap_ctx_and_return;
+
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {		/* Coupled sequence */
+		for (ctx_idx = 0; ctx_idx < op_ctx->ctx_info_num;
+		     ctx_idx++, ctx_info_p++) {
+			ctxmgr_set_sep_cache_idx(ctx_info_p,
+						 ctxmgr_sep_cache_alloc
+						 (drvdata->sep_cache,
+						  ctxmgr_get_ctx_id(ctx_info_p),
+						  &sep_cache_load_required
+						  [ctx_idx]));
+		}
+		rc = send_combined_op_desc(op_ctx,
+					   sep_cache_load_required, 1 /*INIT*/,
+					   SEP_PROC_MODE_FIN, cfg_scheme);
+
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+	} else {
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (auth_data_size_p != NULL) {
+		if ((rc == 0) && (op_ctx->error_info == 0)) {
+			ctx_info_p = &(op_ctx->ctx_info);
+			ctx_info_p += op_ctx->ctx_info_num - 1;
+
+			/* Auth data embedded in the last SeP/FW context */
+			*auth_data_size_p =
+			    ctxmgr_get_digest_or_mac(ctx_info_p,
+						     auth_data_p);
+		} else {	/* Failure */
+			*auth_data_size_p = 0;	/* Nothing valid */
+		}
+	}
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_return:
+	ctx_info_p = &(op_ctx->ctx_info);
+
+	for (ctx_idx = 0; ctx_idx < ctx_mapped_n; ctx_idx++, ctx_info_p++)
+		ctxmgr_unmap_user_ctx(ctx_info_p);
+
+	return rc;
+}
+
+/**
+ * process_integrated() - Integrated processing of data (init+proc+fin)
+ * @op_ctx:
+ * @context_buf:
+ * @alg_class:
+ * @props:	 Initialization properties (see init_context)
+ * @data_in:
+ * @data_out:
+ * @data_in_size:
+ * @digest_or_mac_p:
+ * @digest_or_mac_size_p:
+ *
+ * Returns int
+ */
+static int process_integrated(struct sep_op_ctx *op_ctx,
+			      u32 __user *context_buf,
+			      enum crypto_alg_class alg_class,
+			      void *props,
+			      u8 __user *data_in,
+			      u8 __user *data_out,
+			      u32 data_in_size,
+			      u8 *digest_or_mac_p,
+			      u8 *digest_or_mac_size_p)
+{
+	int rc;
+	int sep_cache_load_required;
+	bool postpone_init;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+
+	rc = ctxmgr_map_user_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev,
+				 alg_class, context_buf);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return rc;
+	}
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_UNINITIALIZED);
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(&op_ctx->ctx_info,
+			  alloc_crypto_ctx_id(op_ctx->client_ctx));
+
+	/* Algorithm class specific initialization */
+	switch (alg_class) {
+	case ALG_CLASS_SYM_CIPHER:
+		rc = ctxmgr_init_symcipher_ctx(&op_ctx->ctx_info,
+					       (struct dxdi_sym_cipher_props *)
+					       props, &postpone_init,
+					       &op_ctx->error_info);
+		/* postpone_init would be ignored because this is an integrated
+		 * operation - all required data would be updated in the
+		 * context before the descriptor is sent */
+		break;
+	case ALG_CLASS_AUTH_ENC:
+		rc = ctxmgr_init_auth_enc_ctx(&op_ctx->ctx_info,
+					      (struct dxdi_auth_enc_props *)
+					      props, &op_ctx->error_info);
+		break;
+	case ALG_CLASS_MAC:
+		rc = ctxmgr_init_mac_ctx(&op_ctx->ctx_info,
+					 (struct dxdi_mac_props *)props,
+					 &op_ctx->error_info);
+		break;
+	case ALG_CLASS_HASH:
+		rc = ctxmgr_init_hash_ctx(&op_ctx->ctx_info,
+					  *((enum dxdi_hash_type *)props),
+					  &op_ctx->error_info);
+		break;
+	default:
+		pr_err("Invalid algorithm class %d\n", alg_class);
+		op_ctx->error_info = DXDI_ERROR_UNSUP;
+		rc = -EINVAL;
+	}
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_PARTIAL_INIT);
+	op_ctx->op_type = SEP_OP_CRYPTO_FINI;	/* Integrated is also fin. */
+	rc = prepare_data_for_sep(op_ctx, data_in, NULL, data_out, NULL,
+				  data_in_size,
+				  CRYPTO_DATA_TEXT_FINALIZE /*last */);
+	if (rc != 0)
+		goto unmap_ctx_and_exit;
+
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+	/* Flush sep_ctx out of host cache */
+	ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {
+		/* Allocate SeP context cache entry */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+				 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(&op_ctx->ctx_info),
+					&sep_cache_load_required));
+		if (!sep_cache_load_required)
+			pr_err("New context already in SeP cache?!");
+		/* Send descriptor with combined load+init+fin */
+		rc = send_crypto_op_desc(op_ctx, 1 /*load */ , 1 /*INIT*/,
+					 SEP_PROC_MODE_FIN);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+
+	} else {		/* failed acquiring mutex */
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (digest_or_mac_size_p != NULL) {
+		if ((rc == 0) && (op_ctx->error_info == 0)) {
+			/* Digest or MAC are embedded in the SeP/FW context */
+			*digest_or_mac_size_p =
+			    ctxmgr_get_digest_or_mac(&op_ctx->ctx_info,
+						     digest_or_mac_p);
+		} else {	/* Failure */
+			*digest_or_mac_size_p = 0;	/* Nothing valid */
+		}
+	}
+
+	/* Hash tail buffer is never used/mapped in integrated op -->
+	 * no need to unmap */
+
+	crypto_op_completion_cleanup(op_ctx);
+
+ unmap_ctx_and_exit:
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	return rc;
+}
+
+/**
+ * process_integrated_auth_enc() - Integrated processing of aead
+ * @op_ctx:
+ * @context_buf:
+ * @alg_class:
+ * @props:
+ * @data_header:
+ * @data_in:
+ * @data_out:
+ * @data_header_size:
+ * @data_in_size:
+ * @mac_p:
+ * @mac_size_p:
+ *
+ * Integrated processing of authenticate & encryption of data
+ * (init+proc_a+proc_t+fin)
+ * Returns int
+ */
+static int process_integrated_auth_enc(struct sep_op_ctx *op_ctx,
+				       u32 __user *context_buf,
+				       enum crypto_alg_class alg_class,
+				       void *props,
+				       u8 __user *data_header,
+				       u8 __user *data_in,
+				       u8 __user *data_out,
+				       u32 data_header_size,
+				       u32 data_in_size,
+				       u8 *mac_p, u8 *mac_size_p)
+{
+	int rc;
+	int sep_cache_load_required;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+
+	rc = ctxmgr_map_user_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev,
+				 alg_class, context_buf);
+	if (rc != 0) {
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		goto integ_ae_exit;
+	}
+
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_UNINITIALIZED);
+	/* Allocate a new Crypto context ID */
+	ctxmgr_set_ctx_id(&op_ctx->ctx_info,
+			  alloc_crypto_ctx_id(op_ctx->client_ctx));
+
+	/* initialization */
+	rc = ctxmgr_init_auth_enc_ctx(&op_ctx->ctx_info,
+				      (struct dxdi_auth_enc_props *)props,
+				      &op_ctx->error_info);
+	if (rc != 0) {
+		ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		goto integ_ae_exit;
+	}
+
+	ctxmgr_set_ctx_state(&op_ctx->ctx_info, CTX_STATE_PARTIAL_INIT);
+	/* Op. type is to init. the context and process Adata */
+	op_ctx->op_type = SEP_OP_CRYPTO_INIT | SEP_OP_CRYPTO_PROC;
+	/* prepare additional/assoc data */
+	rc = prepare_data_for_sep(op_ctx, data_header, NULL, NULL, NULL,
+				  data_header_size, CRYPTO_DATA_ADATA);
+	if (rc != 0) {
+		ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+		goto integ_ae_exit;
+	}
+#ifdef DEBUG
+	ctxmgr_dump_sep_ctx(&op_ctx->ctx_info);
+#endif
+	/* Flush sep_ctx out of host cache */
+	ctxmgr_sync_sep_ctx(&op_ctx->ctx_info, drvdata->sep_data->dev);
+
+	/* Start critical section -
+	 * cache allocation must be coupled to descriptor enqueue */
+	rc = mutex_lock_interruptible(&drvdata->desc_queue_sequencer);
+	if (rc == 0) {
+		/* Allocate SeP context cache entry */
+		ctxmgr_set_sep_cache_idx(&op_ctx->ctx_info,
+			 ctxmgr_sep_cache_alloc(drvdata->sep_cache,
+					ctxmgr_get_ctx_id(&op_ctx->ctx_info),
+					&sep_cache_load_required));
+		if (!sep_cache_load_required)
+			pr_err("New context already in SeP cache?!");
+		/* Send descriptor with combined load+init+fin */
+		rc = send_crypto_op_desc(op_ctx, 1 /*load */ , 1 /*INIT*/,
+					 SEP_PROC_MODE_PROC_A);
+		mutex_unlock(&drvdata->desc_queue_sequencer);
+
+	} else {		/* failed acquiring mutex */
+		pr_err("Failed locking descQ sequencer[%u]\n",
+			    op_ctx->client_ctx->qid);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+	}
+
+	/* set status and cleanup last descriptor */
+	if (rc == 0)
+		rc = wait_for_sep_op_result(op_ctx);
+	crypto_op_completion_cleanup(op_ctx);
+	ctxmgr_unmap_user_ctx(&op_ctx->ctx_info);
+
+	/* process text data only on adata success */
+	if ((rc == 0) && (op_ctx->error_info == 0)) {/* Init+Adata succeeded */
+		/* reset pending descriptor and preserve operation
+		 * context for the finalize phase */
+		op_ctx->pending_descs_cntr = 1;
+		/* process & finalize operation with entire user data */
+		rc = sep_fin_proc(op_ctx, context_buf, data_in,
+				  data_out, data_in_size, mac_p, mac_size_p);
+	}
+
+ integ_ae_exit:
+	return rc;
+}
+
+/**
+ * dxdi_data_dir_to_dma_data_dir() - Convert from DxDI DMA direction type to
+ *					Linux kernel DMA direction type
+ * @dxdi_dir:	 DMA direction in DxDI encoding
+ *
+ * Returns enum dma_data_direction
+ */
+enum dma_data_direction dxdi_data_dir_to_dma_data_dir(enum dxdi_data_direction
+						      dxdi_dir)
+{
+	switch (dxdi_dir) {
+	case DXDI_DATA_BIDIR:
+		return DMA_BIDIRECTIONAL;
+	case DXDI_DATA_TO_DEVICE:
+		return DMA_TO_DEVICE;
+	case DXDI_DATA_FROM_DEVICE:
+		return DMA_FROM_DEVICE;
+	default:
+		return DMA_NONE;
+	}
+}
+
+/**
+ * dispatch_sep_rpc() - Dispatch a SeP RPC descriptor and process results
+ * @op_ctx:
+ * @agent_id:
+ * @func_id:
+ * @mem_refs:
+ * @rpc_params_size:
+ * @rpc_params:
+ *
+ * Returns int
+ */
+static int dispatch_sep_rpc(struct sep_op_ctx *op_ctx,
+			    u16 agent_id,
+			    u16 func_id,
+			    struct dxdi_memref mem_refs[],
+			    unsigned long rpc_params_size,
+			    struct seprpc_params __user *rpc_params)
+{
+	int i, rc = 0;
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	enum dma_data_direction dma_dir;
+	unsigned int num_of_mem_refs;
+	int memref_idx;
+	struct client_dma_buffer *local_dma_objs[SEP_RPC_MAX_MEMREF_PER_FUNC];
+	struct mlli_tables_list mlli_tables[SEP_RPC_MAX_MEMREF_PER_FUNC];
+	struct sep_sw_desc desc;
+	struct seprpc_params *rpc_msg_p;
+
+	/* Verify RPC message size */
+	if (unlikely(SEP_RPC_MAX_MSG_SIZE < rpc_params_size)) {
+		pr_err("Given rpc_params is too big (%lu B)\n",
+		       rpc_params_size);
+		return -EINVAL;
+	}
+
+	op_ctx->spad_buf_p = dma_pool_alloc(drvdata->sep_data->spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err("Fail: alloc from spad_buf_pool for RPC message\n");
+		return -ENOMEM;
+	}
+	rpc_msg_p = (struct seprpc_params *)op_ctx->spad_buf_p;
+
+	/* Copy params to DMA buffer of message */
+	rc = copy_from_user(rpc_msg_p, rpc_params, rpc_params_size);
+	if (rc) {
+		pr_err("Fail: copy RPC message from user at 0x%p, rc=%d\n",
+			    rpc_params, rc);
+		return -EFAULT;
+	}
+	/* Get num. of memory references in host endianess */
+	num_of_mem_refs = le32_to_cpu(rpc_msg_p->num_of_memrefs);
+
+	/* Handle user memory references - prepare DMA buffers */
+	if (unlikely(num_of_mem_refs > SEP_RPC_MAX_MEMREF_PER_FUNC)) {
+		pr_err("agent_id=%d func_id=%d: Invalid # of memref %u\n",
+			    agent_id, func_id, num_of_mem_refs);
+		return -EINVAL;
+	}
+	for (i = 0; i < num_of_mem_refs; i++) {
+		/* Init. tables lists for proper cleanup */
+		MLLI_TABLES_LIST_INIT(mlli_tables + i);
+		local_dma_objs[i] = NULL;
+	}
+	for (i = 0; i < num_of_mem_refs; i++) {
+		pr_debug(
+			"memref[%d]: id=%d dma_dir=%d start/offset 0x%08x size %u\n",
+			i, mem_refs[i].ref_id, mem_refs[i].dma_direction,
+			mem_refs[i].start_or_offset, mem_refs[i].size);
+
+		/* convert DMA direction to enum dma_data_direction */
+		dma_dir =
+		    dxdi_data_dir_to_dma_data_dir(mem_refs[i].dma_direction);
+		if (unlikely(dma_dir == DMA_NONE)) {
+			pr_err(
+				    "agent_id=%d func_id=%d: Invalid DMA direction (%d) for memref %d\n",
+				    agent_id, func_id,
+				    mem_refs[i].dma_direction, i);
+			rc = -EINVAL;
+			break;
+		}
+		/* Temporary memory registration if needed */
+		if (IS_VALID_MEMREF_IDX(mem_refs[i].ref_id)) {
+			memref_idx = mem_refs[i].ref_id;
+			if (unlikely(mem_refs[i].start_or_offset != 0)) {
+				pr_err(
+					    "Offset in memref is not supported for RPC.\n");
+				rc = -EINVAL;
+				break;
+			}
+		} else {
+			memref_idx = register_client_memref(client_ctx,
+							    (u8 __user *)
+							    mem_refs[i].
+							    start_or_offset,
+							    NULL,
+							    mem_refs[i].size,
+							    dma_dir);
+			if (unlikely(!IS_VALID_MEMREF_IDX(memref_idx))) {
+				pr_err("Fail: temp memory registration\n");
+				rc = -ENOMEM;
+				break;
+			}
+		}
+		/* MLLI table creation */
+		local_dma_objs[i] = acquire_dma_obj(client_ctx, memref_idx);
+		if (unlikely(local_dma_objs[i] == NULL)) {
+			pr_err("Failed acquiring DMA objects.\n");
+			rc = -ENOMEM;
+			break;
+		}
+		if (unlikely(local_dma_objs[i]->buf_size != mem_refs[i].size)) {
+			pr_err("RPC: Partial memory ref not supported.\n");
+			rc = -EINVAL;
+			break;
+		}
+		rc = llimgr_create_mlli(drvdata->sep_data->llimgr,
+					mlli_tables + i, dma_dir,
+					local_dma_objs[i], 0, 0);
+		if (unlikely(rc != 0))
+			break;
+		llimgr_mlli_to_seprpc_memref(&(mlli_tables[i]),
+					     &(rpc_msg_p->memref[i]));
+	}
+
+	op_ctx->op_type = SEP_OP_RPC;
+	if (rc == 0) {
+		/* Pack SW descriptor */
+		desc_q_pack_rpc_desc(&desc, op_ctx, agent_id, func_id,
+				     rpc_params_size,
+				     op_ctx->spad_buf_dma_addr);
+		op_ctx->op_state = USER_OP_INPROC;
+		/* Enqueue descriptor */
+		rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+		if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+			rc = 0;
+	}
+
+	if (likely(rc == 0))
+		rc = wait_for_sep_op_result(op_ctx);
+	else
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+
+	/* Process descriptor completion */
+	if ((rc == 0) && (op_ctx->error_info == 0)) {
+		/* Copy back RPC message buffer */
+		rc = copy_to_user(rpc_params, rpc_msg_p, rpc_params_size);
+		if (rc) {
+			pr_err(
+				    "Failed copying back RPC parameters/message to user at 0x%p (rc=%d)\n",
+				    rpc_params, rc);
+			rc = -EFAULT;
+		}
+	}
+	op_ctx->op_state = USER_OP_NOP;
+	for (i = 0; i < num_of_mem_refs; i++) {
+		/* Can call for all - unitialized mlli tables would have
+		 * table_count == 0 */
+		llimgr_destroy_mlli(drvdata->sep_data->llimgr, mlli_tables + i);
+		if (local_dma_objs[i] != NULL) {
+			release_dma_obj(client_ctx, local_dma_objs[i]);
+			memref_idx =
+			    DMA_OBJ_TO_MEMREF_IDX(client_ctx,
+						  local_dma_objs[i]);
+			if (memref_idx != mem_refs[i].ref_id) {
+				/* Memory reference was temp. registered */
+				(void)free_client_memref(client_ctx,
+							 memref_idx);
+			}
+		}
+	}
+
+	return rc;
+}
+
+#if defined(SEP_PRINTF) && defined(DEBUG)
+/* Replace component mask */
+#undef SEP_LOG_CUR_COMPONENT
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_PRINTF
+void sep_printf_handler(struct sep_drvdata *drvdata)
+{
+	int cur_ack_cntr;
+	u32 gpr_val;
+	int i;
+
+	/* Reduce interrupts by polling until no more characters */
+	/* Loop for at most a line - to avoid inifite looping in wq ctx */
+	for (i = 0; i < SEP_PRINTF_LINE_SIZE; i++) {
+
+		gpr_val = READ_REGISTER(drvdata->cc_base +
+					SEP_PRINTF_S2H_GPR_OFFSET);
+		cur_ack_cntr = gpr_val >> 8;
+		/*
+		 * ack as soon as possible (data is already in local variable)
+		 * let SeP push one more character until we finish processing
+		 */
+		WRITE_REGISTER(drvdata->cc_base + SEP_PRINTF_H2S_GPR_OFFSET,
+			       cur_ack_cntr);
+#if 0
+		pr_debug("%d. GPR=0x%08X (cur_ack=0x%08X , last=0x%08X)\n",
+			      i, gpr_val, cur_ack_cntr, drvdata->last_ack_cntr);
+#endif
+		if (cur_ack_cntr == drvdata->last_ack_cntr)
+			break;
+
+		/* Identify lost characters case */
+		if (cur_ack_cntr >
+		    ((drvdata->last_ack_cntr + 1) & SEP_PRINTF_ACK_MAX)) {
+			/* NULL terminate */
+			drvdata->line_buf[drvdata->cur_line_buf_offset] = 0;
+			if (sep_log_mask & SEP_LOG_CUR_COMPONENT)
+				pr_info("SeP(lost %d): %s",
+				       cur_ack_cntr - drvdata->last_ack_cntr
+				       - 1, drvdata->line_buf);
+			drvdata->cur_line_buf_offset = 0;
+		}
+		drvdata->last_ack_cntr = cur_ack_cntr;
+
+		drvdata->line_buf[drvdata->cur_line_buf_offset] =
+		    gpr_val & 0xFF;
+
+		/* Is end of line? */
+		if ((drvdata->line_buf[drvdata->cur_line_buf_offset] == '\n') ||
+		    (drvdata->line_buf[drvdata->cur_line_buf_offset] == 0) ||
+		    (drvdata->cur_line_buf_offset == (SEP_PRINTF_LINE_SIZE - 1))
+		    ) {
+			/* NULL terminate */
+			drvdata->line_buf[drvdata->cur_line_buf_offset + 1] = 0;
+			if (sep_log_mask & SEP_LOG_CUR_COMPONENT)
+				pr_info("SeP: %s", drvdata->line_buf);
+			drvdata->cur_line_buf_offset = 0;
+		} else {
+			drvdata->cur_line_buf_offset++;
+		}
+
+	}
+
+}
+
+/* Restore component mask */
+#undef SEP_LOG_CUR_COMPONENT
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_MAIN
+#endif				/*SEP_PRINTF */
+
+static int sep_interrupt_process(struct sep_drvdata *drvdata)
+{
+	u32 cause_reg = 0;
+	int i;
+
+	/* read the interrupt status */
+	cause_reg = READ_REGISTER(drvdata->cc_base +
+				  DX_CC_REG_OFFSET(HOST, IRR));
+
+	if (cause_reg == 0) {
+		/* pr_debug("Got interrupt with empty cause_reg\n"); */
+		return IRQ_NONE;
+	}
+#if 0
+	pr_debug("cause_reg=0x%08X gpr5=0x%08X\n", cause_reg,
+		      READ_REGISTER(drvdata->cc_base +
+				    SEP_PRINTF_S2H_GPR_OFFSET));
+#endif
+	/* clear interrupt */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, ICR),
+		       cause_reg);
+
+#ifdef SEP_PRINTF
+	if (cause_reg & SEP_HOST_GPR_IRQ_MASK(DX_SEP_HOST_PRINTF_GPR_IDX)) {
+#ifdef DEBUG
+		sep_printf_handler(drvdata);
+#else				/* Just ack to SeP so it does not stall */
+		WRITE_REGISTER(drvdata->cc_base + SEP_PRINTF_H2S_GPR_OFFSET,
+			       READ_REGISTER(drvdata->cc_base +
+					     SEP_PRINTF_S2H_GPR_OFFSET) >> 8);
+#endif
+		/* handled */
+		cause_reg &= ~SEP_HOST_GPR_IRQ_MASK(DX_SEP_HOST_PRINTF_GPR_IDX);
+	}
+#endif
+
+	if (cause_reg & SEP_HOST_GPR_IRQ_MASK(DX_SEP_STATE_GPR_IDX)) {
+		dx_sep_state_change_handler(drvdata);
+		cause_reg &= ~SEP_HOST_GPR_IRQ_MASK(DX_SEP_STATE_GPR_IDX);
+	}
+
+	if (cause_reg & SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX)) {
+		if (drvdata->
+		    irq_mask & SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX)) {
+			dx_sep_req_handler(drvdata);
+		}
+		cause_reg &= ~SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX);
+	}
+
+	/* Check interrupt flag for each queue */
+	for (i = 0; cause_reg && i < drvdata->num_of_desc_queues; i++) {
+		if (cause_reg & gpr_interrupt_mask[i]) {
+			desc_q_process_completed(drvdata->queue[i].desc_queue);
+			cause_reg &= ~gpr_interrupt_mask[i];
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+#ifdef SEP_INTERRUPT_BY_TIMER
+static void sep_timer(unsigned long arg)
+{
+	struct sep_drvdata *drvdata = (struct sep_drvdata *)arg;
+
+	(void)sep_interrupt_process(drvdata);
+
+	mod_timer(&drvdata->delegate, jiffies + msecs_to_jiffies(10));
+}
+#else
+irqreturn_t sep_interrupt(int irq, void *dev_id)
+{
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata((struct device *)dev_id);
+
+	if (drvdata->sep_suspended) {
+		WARN(1, "sep_interrupt rise in suspend!");
+		return IRQ_HANDLED;
+	}
+
+	return sep_interrupt_process(drvdata);
+}
+#endif
+
+/***** IOCTL commands handlers *****/
+
+static int sep_ioctl_get_ver_major(unsigned long arg)
+{
+	u32 __user *ver_p = (u32 __user *)arg;
+	const u32 ver_major = DXDI_VER_MAJOR;
+
+	return put_user(ver_major, ver_p);
+}
+
+static int sep_ioctl_get_ver_minor(unsigned long arg)
+{
+	u32 __user *ver_p = (u32 __user *)arg;
+	const u32 ver_minor = DXDI_VER_MINOR;
+
+	return put_user(ver_minor, ver_p);
+}
+
+static int sep_ioctl_get_sym_cipher_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_sym_cipher_ctx_size_params __user *user_params =
+	    (struct dxdi_get_sym_cipher_ctx_size_params __user *)arg;
+	enum dxdi_sym_cipher_type sym_cipher_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_SYM_CIPHER);
+	int err;
+
+	err = __get_user(sym_cipher_type, &(user_params->sym_cipher_type));
+	if (err)
+		return err;
+
+	if (((sym_cipher_type >= _DXDI_SYMCIPHER_AES_FIRST) &&
+	     (sym_cipher_type <= _DXDI_SYMCIPHER_AES_LAST)) ||
+	    ((sym_cipher_type >= _DXDI_SYMCIPHER_DES_FIRST) &&
+	     (sym_cipher_type <= _DXDI_SYMCIPHER_DES_LAST)) ||
+	    ((sym_cipher_type >= _DXDI_SYMCIPHER_C2_FIRST) &&
+	     (sym_cipher_type <= _DXDI_SYMCIPHER_C2_LAST))
+	    ) {
+		pr_debug("sym_cipher_type=%u\n", sym_cipher_type);
+		return put_user(ctx_size, &(user_params->ctx_size));
+	} else {
+		pr_err("Invalid cipher type=%u\n", sym_cipher_type);
+		return -EINVAL;
+	}
+}
+
+static int sep_ioctl_get_auth_enc_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_auth_enc_ctx_size_params __user *user_params =
+	    (struct dxdi_get_auth_enc_ctx_size_params __user *)arg;
+	enum dxdi_auth_enc_type ae_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_AUTH_ENC);
+	int err;
+
+	err = __get_user(ae_type, &(user_params->ae_type));
+	if (err)
+		return err;
+
+	if ((ae_type == DXDI_AUTHENC_NONE) || (ae_type > DXDI_AUTHENC_MAX)) {
+		pr_err("Invalid auth-enc. type=%u\n", ae_type);
+		return -EINVAL;
+	}
+
+	pr_debug("A.E. type=%u\n", ae_type);
+	return put_user(ctx_size, &(user_params->ctx_size));
+}
+
+static int sep_ioctl_get_mac_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_mac_ctx_size_params __user *user_params =
+	    (struct dxdi_get_mac_ctx_size_params __user *)arg;
+	enum dxdi_mac_type mac_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_MAC);
+	int err;
+
+	err = __get_user(mac_type, &(user_params->mac_type));
+	if (err)
+		return err;
+
+	if ((mac_type == DXDI_MAC_NONE) || (mac_type > DXDI_MAC_MAX)) {
+		pr_err("Invalid MAC type=%u\n", mac_type);
+		return -EINVAL;
+	}
+
+	pr_debug("MAC type=%u\n", mac_type);
+	return put_user(ctx_size, &(user_params->ctx_size));
+}
+
+static int sep_ioctl_get_hash_ctx_size(unsigned long arg)
+{
+	struct dxdi_get_hash_ctx_size_params __user *user_params =
+	    (struct dxdi_get_hash_ctx_size_params __user *)arg;
+	enum dxdi_hash_type hash_type;
+	const u32 ctx_size = ctxmgr_get_ctx_size(ALG_CLASS_HASH);
+	int err;
+
+	err = __get_user(hash_type, &(user_params->hash_type));
+	if (err)
+		return err;
+
+	if ((hash_type == DXDI_HASH_NONE) || (hash_type > DXDI_HASH_MAX)) {
+		pr_err("Invalid hash type=%u\n", hash_type);
+		return -EINVAL;
+	}
+
+	pr_debug("hash type=%u\n", hash_type);
+	return put_user(ctx_size, &(user_params->ctx_size));
+}
+
+static int sep_ioctl_sym_cipher_init(struct sep_client_ctx *client_ctx,
+				     unsigned long arg)
+{
+	struct dxdi_sym_cipher_init_params __user *user_init_params =
+			(struct dxdi_sym_cipher_init_params __user *)arg;
+	struct dxdi_sym_cipher_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+		offsetof(struct dxdi_sym_cipher_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_SYM_CIPHER, &(init_params.props));
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_auth_enc_init(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_auth_enc_init_params __user *user_init_params =
+	    (struct dxdi_auth_enc_init_params __user *)arg;
+	struct dxdi_auth_enc_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sym_cipher_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_AUTH_ENC, &(init_params.props));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_mac_init(struct sep_client_ctx *client_ctx,
+			      unsigned long arg)
+{
+	struct dxdi_mac_init_params __user *user_init_params =
+	    (struct dxdi_mac_init_params __user *)arg;
+	struct dxdi_mac_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_mac_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_MAC, &(init_params.props));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_hash_init(struct sep_client_ctx *client_ctx,
+			       unsigned long arg)
+{
+	struct dxdi_hash_init_params __user *user_init_params =
+	    (struct dxdi_hash_init_params __user *)arg;
+	struct dxdi_hash_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_hash_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_crypto_context(&op_ctx, init_params.context_buf,
+				 ALG_CLASS_HASH, &(init_params.hash_type));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_proc_dblk(struct sep_client_ctx *client_ctx,
+			       unsigned long arg)
+{
+	struct dxdi_process_dblk_params __user *user_dblk_params =
+	    (struct dxdi_process_dblk_params __user *)arg;
+	struct dxdi_process_dblk_params dblk_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_process_dblk_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&dblk_params, user_dblk_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_proc_dblk(&op_ctx, dblk_params.context_buf,
+			   dblk_params.data_block_type,
+			   dblk_params.data_in, dblk_params.data_out,
+			   dblk_params.data_in_size);
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_dblk_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_fin_proc(struct sep_client_ctx *client_ctx,
+			      unsigned long arg)
+{
+	struct dxdi_fin_process_params __user *user_fin_params =
+	    (struct dxdi_fin_process_params __user *)arg;
+	struct dxdi_fin_process_params fin_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_fin_process_params, digest_or_mac);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&fin_params, user_fin_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_fin_proc(&op_ctx, fin_params.context_buf,
+			  fin_params.data_in, fin_params.data_out,
+			  fin_params.data_in_size,
+			  fin_params.digest_or_mac,
+			  &(fin_params.digest_or_mac_size));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	if (rc == 0) {
+		/* Always copy back digest/mac size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(fin_params.digest_or_mac_size,
+			      &user_fin_params->digest_or_mac_size);
+		rc += put_user(fin_params.error_info,
+				 &user_fin_params->error_info);
+
+		/* We always need to copy back the digest/mac size (even if 0)
+		 * in order to indicate validity of digest_or_mac buffer */
+	}
+	if ((rc == 0) && (op_ctx.error_info == 0) &&
+	    (fin_params.digest_or_mac_size > 0)) {
+		if (likely(fin_params.digest_or_mac_size <=
+			   DXDI_DIGEST_SIZE_MAX)) {
+			/* Copy back digest/mac if valid */
+			rc = copy_to_user(&(user_fin_params->digest_or_mac),
+					    fin_params.digest_or_mac,
+					    fin_params.digest_or_mac_size);
+		} else {	/* Invalid digest/mac size! */
+			pr_err("Got invalid digest/MAC size = %u",
+				    fin_params.digest_or_mac_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_fin_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_init(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_combined_init_params __user *user_init_params =
+	    (struct dxdi_combined_init_params __user *)arg;
+	struct dxdi_combined_init_params init_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_init_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&init_params, user_init_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = init_combined_context(&op_ctx, &(init_params.props));
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_init_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_proc_dblk(struct sep_client_ctx *client_ctx,
+					unsigned long arg)
+{
+	struct dxdi_combined_proc_dblk_params __user *user_dblk_params =
+	    (struct dxdi_combined_proc_dblk_params __user *)arg;
+	struct dxdi_combined_proc_dblk_params dblk_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_proc_dblk_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&dblk_params, user_dblk_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_combined_proc_dblk(&op_ctx, &dblk_params.props,
+				    dblk_params.data_in, dblk_params.data_out,
+				    dblk_params.data_in_size);
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_dblk_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_fin_proc(struct sep_client_ctx *client_ctx,
+				       unsigned long arg)
+{
+	struct dxdi_combined_proc_params __user *user_fin_params =
+	    (struct dxdi_combined_proc_params __user *)arg;
+	struct dxdi_combined_proc_params fin_params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_proc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&fin_params, user_fin_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sep_combined_fin_proc(&op_ctx, &fin_params.props,
+				   fin_params.data_in, fin_params.data_out,
+				   fin_params.data_in_size,
+				   fin_params.auth_data,
+				   &(fin_params.auth_data_size));
+
+	if (rc == 0) {
+		/* Always copy back digest size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(fin_params.auth_data_size,
+			      &user_fin_params->auth_data_size);
+		rc += put_user(fin_params.error_info,
+				 &user_fin_params->error_info);
+
+		/* We always need to copy back the digest size (even if 0)
+		 * in order to indicate validity of digest buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((fin_params.auth_data_size > 0) &&
+			   (fin_params.auth_data_size <=
+			    DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back auth if valid */
+			rc = copy_to_user(&(user_fin_params->auth_data),
+					    fin_params.auth_data,
+					    fin_params.auth_data_size);
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_fin_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_combined_proc(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_combined_proc_params __user *user_params =
+	    (struct dxdi_combined_proc_params __user *)arg;
+	struct dxdi_combined_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_combined_proc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_combined_integrated(&op_ctx, &(params.props),
+					 params.data_in, params.data_out,
+					 params.data_in_size, params.auth_data,
+					 &(params.auth_data_size));
+
+	if (rc == 0) {
+		/* Always copy back digest size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(params.auth_data_size,
+			      &user_params->auth_data_size);
+		rc += put_user(params.error_info, &user_params->error_info);
+
+		/* We always need to copy back the digest size (even if 0)
+		 * in order to indicate validity of digest buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((params.auth_data_size > 0) &&
+			   (params.auth_data_size <= DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back auth if valid */
+			rc = copy_to_user(&(user_params->auth_data),
+					  params.auth_data,
+					  params.auth_data_size);
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_sym_cipher_proc(struct sep_client_ctx *client_ctx,
+				     unsigned long arg)
+{
+	struct dxdi_sym_cipher_proc_params __user *user_params =
+	    (struct dxdi_sym_cipher_proc_params __user *)arg;
+	struct dxdi_sym_cipher_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sym_cipher_proc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_integrated(&op_ctx, params.context_buf,
+				ALG_CLASS_SYM_CIPHER, &(params.props),
+				params.data_in, params.data_out,
+				params.data_in_size, NULL, NULL);
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_auth_enc_proc(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_auth_enc_proc_params __user *user_params =
+	    (struct dxdi_auth_enc_proc_params __user *)arg;
+	struct dxdi_auth_enc_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_auth_enc_proc_params, tag);
+	int rc;
+	u8 tag_size;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+
+	if (params.props.adata_size == 0) {
+		/* without assoc data we can optimize for one descriptor
+		 * sequence */
+		rc = process_integrated(&op_ctx, params.context_buf,
+					ALG_CLASS_AUTH_ENC, &(params.props),
+					params.text_data, params.data_out,
+					params.props.text_size, params.tag,
+					&tag_size);
+	} else {
+		/* Integrated processing with auth. enc. algorithms with
+		 * Additional-Data requires special two-descriptors flow */
+		rc = process_integrated_auth_enc(&op_ctx, params.context_buf,
+						 ALG_CLASS_AUTH_ENC,
+						 &(params.props), params.adata,
+						 params.text_data,
+						 params.data_out,
+						 params.props.adata_size,
+						 params.props.text_size,
+						 params.tag, &tag_size);
+
+	}
+
+	if ((rc == 0) && (tag_size != params.props.tag_size)) {
+		pr_warn(
+			"Tag result size different than requested (%u != %u)\n",
+			tag_size, params.props.tag_size);
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0) && (tag_size > 0)) {
+		if (likely(tag_size <= DXDI_DIGEST_SIZE_MAX)) {
+			/* Copy back digest/mac if valid */
+			rc = __copy_to_user(&(user_params->tag), params.tag,
+					    tag_size);
+		} else {	/* Invalid digest/mac size! */
+			pr_err("Got invalid tag size = %u", tag_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_mac_proc(struct sep_client_ctx *client_ctx,
+			      unsigned long arg)
+{
+	struct dxdi_mac_proc_params __user *user_params =
+	    (struct dxdi_mac_proc_params __user *)arg;
+	struct dxdi_mac_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_mac_proc_params, mac);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_integrated(&op_ctx, params.context_buf,
+				ALG_CLASS_MAC, &(params.props),
+				params.data_in, NULL, params.data_in_size,
+				params.mac, &(params.mac_size));
+
+	if (rc == 0) {
+		/* Always copy back mac size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(params.mac_size, &user_params->mac_size);
+		rc += put_user(params.error_info, &user_params->error_info);
+
+		/* We always need to copy back the mac size (even if 0)
+		 * in order to indicate validity of mac buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((params.mac_size > 0) &&
+			   (params.mac_size <= DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back mac if valid */
+			rc = copy_to_user(&(user_params->mac), params.mac,
+					  params.mac_size);
+		} else {	/* Invalid mac size! */
+			pr_err("Got invalid MAC size = %u",
+				    params.mac_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_hash_proc(struct sep_client_ctx *client_ctx,
+			       unsigned long arg)
+{
+	struct dxdi_hash_proc_params __user *user_params =
+	    (struct dxdi_hash_proc_params __user *)arg;
+	struct dxdi_hash_proc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_hash_proc_params, digest);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = process_integrated(&op_ctx, params.context_buf,
+				ALG_CLASS_HASH, &(params.hash_type),
+				params.data_in, NULL, params.data_in_size,
+				params.digest, &(params.digest_size));
+
+	if (rc == 0) {
+		/* Always copy back digest size + error_info */
+		/* (that's the reason for keeping them together)  */
+		rc = put_user(params.digest_size, &user_params->digest_size);
+		rc += put_user(params.error_info, &user_params->error_info);
+
+		/* We always need to copy back the digest size (even if 0)
+		 * in order to indicate validity of digest buffer */
+	}
+
+	if ((rc == 0) && (op_ctx.error_info == 0)) {
+		if (likely((params.digest_size > 0) &&
+			   (params.digest_size <= DXDI_DIGEST_SIZE_MAX))) {
+			/* Copy back mac if valid */
+			rc = copy_to_user(&(user_params->digest),
+					  params.digest, params.digest_size);
+		} else {	/* Invalid digest size! */
+			pr_err("Got invalid digest size = %u",
+				    params.digest_size);
+			op_ctx.error_info = DXDI_ERROR_INVAL_DATA_SIZE;
+			rc = -EINVAL;
+		}
+	}
+
+	/* Even on SeP error the function above
+	 * returns 0 (operation completed with no host side errors) */
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+static int sep_ioctl_sep_rpc(struct sep_client_ctx *client_ctx,
+			     unsigned long arg)
+{
+
+	struct dxdi_sep_rpc_params __user *user_params =
+	    (struct dxdi_sep_rpc_params __user *)arg;
+	struct dxdi_sep_rpc_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sep_rpc_params, error_info);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = dispatch_sep_rpc(&op_ctx, params.agent_id, params.func_id,
+			      params.mem_refs, params.rpc_params_size,
+			      params.rpc_params);
+
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+static int sep_ioctl_register_mem4dma(struct sep_client_ctx *client_ctx,
+				      unsigned long arg)
+{
+
+	struct dxdi_register_mem4dma_params __user *user_params =
+	    (struct dxdi_register_mem4dma_params __user *)arg;
+	struct dxdi_register_mem4dma_params params;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_register_mem4dma_params, memref_id);
+	enum dma_data_direction dma_dir;
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	/* convert DMA direction to enum dma_data_direction */
+	dma_dir = dxdi_data_dir_to_dma_data_dir(params.memref.dma_direction);
+	if (unlikely(dma_dir == DMA_NONE)) {
+		pr_err("Invalid DMA direction (%d)\n",
+			    params.memref.dma_direction);
+		rc = -EINVAL;
+	} else {
+		params.memref_id = register_client_memref(client_ctx,
+							  (u8 __user *)
+							  params.memref.
+							  start_or_offset, NULL,
+							  params.memref.size,
+							  dma_dir);
+		if (unlikely(!IS_VALID_MEMREF_IDX(params.memref_id))) {
+			rc = -ENOMEM;
+		} else {
+			rc = put_user(params.memref_id,
+				      &(user_params->memref_id));
+			if (rc != 0)	/* revert if failed __put_user */
+				(void)free_client_memref(client_ctx,
+							 params.memref_id);
+		}
+	}
+
+	return rc;
+}
+
+static int sep_ioctl_free_mem4dma(struct sep_client_ctx *client_ctx,
+				  unsigned long arg)
+{
+	struct dxdi_free_mem4dma_params __user *user_params =
+	    (struct dxdi_free_mem4dma_params __user *)arg;
+	int memref_id;
+	int err;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	err = __get_user(memref_id, &user_params->memref_id);
+	if (err) {
+		pr_err("Failed reading input parameter\n");
+		return -EFAULT;
+	}
+
+	return free_client_memref(client_ctx, memref_id);
+}
+#endif
+
+static int sep_ioctl_set_iv(struct sep_client_ctx *client_ctx,
+			    unsigned long arg)
+{
+	struct dxdi_aes_iv_params *user_params =
+	    (struct dxdi_aes_iv_params __user *)arg;
+	struct dxdi_aes_iv_params params;
+	struct host_crypto_ctx_sym_cipher *host_context =
+	    (struct host_crypto_ctx_sym_cipher *)user_params->context_buf;
+	struct crypto_ctx_uid uid;
+	int err;
+
+	/* Copy ctx uid from user context */
+	if (copy_from_user(&uid, &host_context->uid,
+			   sizeof(struct crypto_ctx_uid))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	/* Copy IV from user context */
+	if (__copy_from_user(&params, user_params,
+			     sizeof(struct dxdi_aes_iv_params))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+	err =
+	    ctxmgr_set_symcipher_iv_user(user_params->context_buf,
+					 params.iv_ptr);
+	if (err != 0)
+		return err;
+
+	ctxmgr_sep_cache_invalidate(client_ctx->drv_data->sep_cache,
+				    uid, CRYPTO_CTX_ID_SINGLE_MASK);
+
+	return 0;
+}
+
+static int sep_ioctl_get_iv(struct sep_client_ctx *client_ctx,
+			    unsigned long arg)
+{
+	struct dxdi_aes_iv_params *user_params =
+	    (struct dxdi_aes_iv_params __user *)arg;
+	struct dxdi_aes_iv_params params;
+	int err;
+
+	/* copy context ptr from user */
+	if (__copy_from_user(&params, user_params, sizeof(u32))) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	err =
+	    ctxmgr_get_symcipher_iv_user(user_params->context_buf,
+					 params.iv_ptr);
+	if (err != 0)
+		return err;
+
+	if (copy_to_user(user_params, &params,
+	    sizeof(struct dxdi_aes_iv_params))) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/****** Driver entry points (open, release, ioctl, etc.) ******/
+
+/**
+ * init_client_ctx() - Initialize a client context object given
+ * @drvdata:	Queue driver context
+ * @client_ctx:
+ *
+ * Returns void
+ */
+void init_client_ctx(struct queue_drvdata *drvdata,
+		     struct sep_client_ctx *client_ctx)
+{
+	int i;
+	const unsigned int qid = drvdata - (drvdata->sep_data->queue);
+
+	/* Initialize user data structure */
+	client_ctx->qid = qid;
+	client_ctx->drv_data = drvdata;
+	atomic_set(&client_ctx->uid_cntr, 0);
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+	/* Initialize sessions */
+	for (i = 0; i < MAX_SEPAPP_SESSION_PER_CLIENT_CTX; i++) {
+		mutex_init(&client_ctx->sepapp_sessions[i].session_lock);
+		client_ctx->sepapp_sessions[i].sep_session_id =
+		    SEP_SESSION_ID_INVALID;
+		/* The rest of the fields are 0/NULL from kzalloc */
+	}
+#endif
+	/* Initialize memrefs */
+	for (i = 0; i < MAX_REG_MEMREF_PER_CLIENT_CTX; i++) {
+		mutex_init(&client_ctx->reg_memrefs[i].buf_lock);
+		/* The rest of the fields are 0/NULL from kzalloc */
+	}
+
+	init_waitqueue_head(&client_ctx->memref_wq);
+	client_ctx->memref_cnt = 0;
+}
+
+/**
+ * sep_open() - "open" device file entry point.
+ * @inode:
+ * @file:
+ *
+ * Returns int
+ */
+static int sep_open(struct inode *inode, struct file *file)
+{
+	struct queue_drvdata *drvdata;
+	struct sep_client_ctx *client_ctx;
+	unsigned int qid;
+
+	drvdata = container_of(inode->i_cdev, struct queue_drvdata, cdev);
+
+	if (imajor(inode) != MAJOR(drvdata->sep_data->devt_base)) {
+		pr_err("Invalid major device num=%d\n", imajor(inode));
+		return -ENOENT;
+	}
+	qid = iminor(inode) - MINOR(drvdata->sep_data->devt_base);
+	if (qid >= drvdata->sep_data->num_of_desc_queues) {
+		pr_err("Invalid minor device num=%d\n", iminor(inode));
+		return -ENOENT;
+	}
+#ifdef DEBUG
+	/* The qid based on the minor device number must match the offset
+	 * of given drvdata in the queues array of the sep_data context */
+	if (qid != (drvdata - (drvdata->sep_data->queue))) {
+		pr_err("qid=%d but drvdata index is %d\n",
+			    qid, (drvdata - (drvdata->sep_data->queue)));
+		return -EINVAL;
+	}
+#endif
+	pr_debug("qid=%d\n", qid);
+	pr_debug("calling kzalloc\n");
+	client_ctx = kzalloc(sizeof(*client_ctx), GFP_KERNEL);
+	if (client_ctx == NULL)
+		return -ENOMEM;
+	pr_debug("calling init_client_ctx\n");
+	init_client_ctx(drvdata, client_ctx);
+	pr_debug("after init_client_ctx\n");
+	file->private_data = client_ctx;
+	pr_debug("after private_data\n");
+	return 0;
+}
+
+void cleanup_client_ctx(struct queue_drvdata *drvdata,
+			struct sep_client_ctx *client_ctx)
+{
+	int memref_id;
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+	struct sep_op_ctx op_ctx;
+	int session_id;
+	struct crypto_ctx_uid uid;
+
+	/* Free any Applet session left open */
+	for (session_id = 0; session_id < MAX_SEPAPP_SESSION_PER_CLIENT_CTX;
+	     session_id++) {
+		if (IS_VALID_SESSION_CTX
+		    (&client_ctx->sepapp_sessions[session_id])) {
+			pr_debug("Closing session ID=%d\n", session_id);
+			op_ctx_init(&op_ctx, client_ctx);
+			sepapp_session_close(&op_ctx, session_id);
+			/* Note: There is never a problem with the session's
+			 * ref_cnt because when "release" is invoked there
+			 * are no pending IOCTLs, so ref_cnt is at most 1 */
+			op_ctx_fini(&op_ctx);
+		}
+		mutex_destroy(&client_ctx->sepapp_sessions[session_id].
+			      session_lock);
+	}
+#endif				/*MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0 */
+
+	/* Free registered user memory references */
+	for (memref_id = 0; memref_id < MAX_REG_MEMREF_PER_CLIENT_CTX;
+	     memref_id++) {
+		if (client_ctx->reg_memrefs[memref_id].ref_cnt > 0) {
+			pr_debug("Freeing user memref ID=%d\n", memref_id);
+			(void)free_client_memref(client_ctx, memref_id);
+		}
+		/* There is no problem with memref ref_cnt because when
+		 * "release" is invoked there are no pending IOCTLs,
+		 * so ref_cnt is at most 1                             */
+		mutex_destroy(&client_ctx->reg_memrefs[memref_id].buf_lock);
+	}
+
+	/* Invalidate any outstanding descriptors associated with this
+	 * client_ctx */
+	desc_q_mark_invalid_cookie(drvdata->desc_queue, (void *)client_ctx);
+
+	uid.addr = ((u64) (unsigned long)client_ctx);
+	uid.cntr = 0;
+
+	/* Invalidate any crypto context cache entry associated with this client
+	 * context before freeing context data object that may be reused.
+	 * This assures retaining of UIDs uniqueness (and makes sense since all
+	 * associated contexts does not exist anymore) */
+	ctxmgr_sep_cache_invalidate(drvdata->sep_cache, uid,
+				    CRYPTO_CTX_ID_CLIENT_MASK);
+}
+
+static int sep_release(struct inode *inode, struct file *file)
+{
+	struct sep_client_ctx *client_ctx = file->private_data;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+
+	cleanup_client_ctx(drvdata, client_ctx);
+
+	kfree(client_ctx);
+
+	return 0;
+}
+
+static ssize_t
+sep_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+{
+	pr_debug("Invoked for %zu bytes", count);
+	return -ENOSYS;		/* nothing to read... IOCTL only */
+}
+
+/*!
+ * The SeP device does not support read/write
+ * We use it for debug purposes. Currently loopback descriptor is sent given
+ * number of times. Usage example: echo 10 > /dev/dx_sep_q0
+ * TODO: Move this functionality to sysfs?
+ *
+ * \param filp
+ * \param buf
+ * \param count
+ * \param ppos
+ *
+ * \return ssize_t
+ */
+static ssize_t
+sep_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
+{
+#ifdef DEBUG
+	struct sep_sw_desc desc;
+	struct sep_client_ctx *client_ctx = filp->private_data;
+	struct sep_op_ctx op_ctx;
+	unsigned int loop_times = 1;
+	unsigned int i;
+	int rc = 0;
+	char tmp_buf[80];
+
+	if (count > 79)
+		return -ENOMEM;	/* Avoid buffer overflow */
+	memcpy(tmp_buf, buf, count);	/* Copy to NULL terminate */
+	tmp_buf[count] = 0;	/* NULL terminate */
+
+	/*pr_debug("Invoked for %u bytes", count); */
+	sscanf(buf, "%u", &loop_times);
+	pr_debug("Loopback X %u...\n", loop_times);
+
+	op_ctx_init(&op_ctx, client_ctx);
+	/* prepare loopback descriptor */
+	desq_q_pack_debug_desc(&desc, &op_ctx);
+
+	/* Perform loopback for given times */
+	for (i = 0; i < loop_times; i++) {
+		op_ctx.op_state = USER_OP_INPROC;
+		rc = desc_q_enqueue(client_ctx->drv_data->desc_queue, &desc,
+				    true);
+		if (unlikely(IS_DESCQ_ENQUEUE_ERR(rc))) {
+			pr_err("Failed sending desc. %u\n", i);
+			break;
+		}
+		rc = wait_for_sep_op_result(&op_ctx);
+		if (rc != 0) {
+			pr_err("Failed completion of desc. %u\n", i);
+			break;
+		}
+		op_ctx.op_state = USER_OP_NOP;
+	}
+
+	op_ctx_fini(&op_ctx);
+
+	pr_debug("Completed loopback of %u desc.\n", i);
+
+	return count;		/* Noting to write for this device... */
+#else /* DEBUG */
+	pr_debug("Invoked for %zu bytes", count);
+	return -ENOSYS;		/* nothing to write... IOCTL only */
+#endif /* DEBUG */
+}
+
+/*!
+ * IOCTL entry point
+ *
+ * \param filp
+ * \param cmd
+ * \param arg
+ *
+ * \return int
+ * \retval 0 Operation succeeded (but SeP return code may indicate an error)
+ * \retval -ENOTTY  : Unknown IOCTL command
+ * \retval -ENOSYS  : Unsupported/not-implemented (known) operation
+ * \retval -EINVAL  : Invalid parameters
+ * \retval -EFAULT  : Bad pointers for given user memory space
+ * \retval -EPERM   : Not enough permissions for given command
+ * \retval -ENOMEM,-EAGAIN: when not enough resources available for given op.
+ * \retval -EIO     : SeP HW error or another internal error
+ *                    (probably operation timed out or unexpected behavior)
+ */
+long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct sep_client_ctx *client_ctx = filp->private_data;
+	unsigned long long ioctl_start, ioctl_end;
+	int err = 0;
+
+	preempt_disable_notrace();
+	ioctl_start = sched_clock();
+	preempt_enable_notrace();
+
+	/* Verify IOCTL command: magic + number */
+	if (_IOC_TYPE(cmd) != DXDI_IOC_MAGIC) {
+		pr_err("Invalid IOCTL type=%u", _IOC_TYPE(cmd));
+		return -ENOTTY;
+	}
+	if (_IOC_NR(cmd) > DXDI_IOC_NR_MAX) {
+		pr_err("IOCTL NR=%u out of range for ABI ver.=%u.%u",
+			    _IOC_NR(cmd), DXDI_VER_MAJOR, DXDI_VER_MINOR);
+		return -ENOTTY;
+	}
+
+	/* Verify permissions on parameters pointer (arg) */
+	if (_IOC_DIR(cmd) & _IOC_READ)
+		err = !access_ok(ACCESS_WRITE,
+				 (void __user *)arg, _IOC_SIZE(cmd));
+	else if (_IOC_DIR(cmd) & _IOC_WRITE)
+		err = !access_ok(ACCESS_READ,
+				 (void __user *)arg, _IOC_SIZE(cmd));
+	if (err)
+		return -EFAULT;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	switch (_IOC_NR(cmd)) {
+		/* Version info. commands */
+	case DXDI_IOC_NR_GET_VER_MAJOR:
+		pr_debug("DXDI_IOC_NR_GET_VER_MAJOR\n");
+		err = sep_ioctl_get_ver_major(arg);
+		break;
+	case DXDI_IOC_NR_GET_VER_MINOR:
+		pr_debug("DXDI_IOC_NR_GET_VER_MINOR\n");
+		err = sep_ioctl_get_ver_minor(arg);
+		break;
+		/* Context size queries */
+	case DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE\n");
+		err = sep_ioctl_get_sym_cipher_ctx_size(arg);
+		break;
+	case DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE\n");
+		err = sep_ioctl_get_auth_enc_ctx_size(arg);
+		break;
+	case DXDI_IOC_NR_GET_MAC_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_MAC_CTX_SIZE\n");
+		err = sep_ioctl_get_mac_ctx_size(arg);
+		break;
+	case DXDI_IOC_NR_GET_HASH_CTX_SIZE:
+		pr_debug("DXDI_IOC_NR_GET_HASH_CTX_SIZE\n");
+		err = sep_ioctl_get_hash_ctx_size(arg);
+		break;
+		/* Init context commands */
+	case DXDI_IOC_NR_SYMCIPHER_INIT:
+		pr_debug("DXDI_IOC_NR_SYMCIPHER_INIT\n");
+		err = sep_ioctl_sym_cipher_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_AUTH_ENC_INIT:
+		pr_debug("DXDI_IOC_NR_AUTH_ENC_INIT\n");
+		err = sep_ioctl_auth_enc_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_MAC_INIT:
+		pr_debug("DXDI_IOC_NR_MAC_INIT\n");
+		err = sep_ioctl_mac_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_HASH_INIT:
+		pr_debug("DXDI_IOC_NR_HASH_INIT\n");
+		err = sep_ioctl_hash_init(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_INIT:
+		pr_debug("DXDI_IOC_NR_COMBINED_INIT\n");
+		err = sep_ioctl_combined_init(client_ctx, arg);
+		break;
+		/* Processing commands */
+	case DXDI_IOC_NR_PROC_DBLK:
+		pr_debug("DXDI_IOC_NR_PROC_DBLK\n");
+		err = sep_ioctl_proc_dblk(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_PROC_DBLK:
+		pr_debug("DXDI_IOC_NR_COMBINED_PROC_DBLK\n");
+		err = sep_ioctl_combined_proc_dblk(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_FIN_PROC:
+		pr_debug("DXDI_IOC_NR_FIN_PROC\n");
+		err = sep_ioctl_fin_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_PROC_FIN:
+		pr_debug("DXDI_IOC_NR_COMBINED_PROC_FIN\n");
+		err = sep_ioctl_combined_fin_proc(client_ctx, arg);
+		break;
+		/* "Integrated" processing operations */
+	case DXDI_IOC_NR_SYMCIPHER_PROC:
+		pr_debug("DXDI_IOC_NR_SYMCIPHER_PROC\n");
+		err = sep_ioctl_sym_cipher_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_AUTH_ENC_PROC:
+		pr_debug("DXDI_IOC_NR_AUTH_ENC_PROC\n");
+		err = sep_ioctl_auth_enc_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_MAC_PROC:
+		pr_debug("DXDI_IOC_NR_MAC_PROC\n");
+		err = sep_ioctl_mac_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_HASH_PROC:
+		pr_debug("DXDI_IOC_NR_HASH_PROC\n");
+		err = sep_ioctl_hash_proc(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_COMBINED_PROC:
+		pr_debug("DXDI_IOC_NR_COMBINED_PROC\n");
+		err = sep_ioctl_combined_proc(client_ctx, arg);
+		break;
+		/* SeP RPC */
+	case DXDI_IOC_NR_SEP_RPC:
+		err = sep_ioctl_sep_rpc(client_ctx, arg);
+		break;
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+		/* Memory registation */
+	case DXDI_IOC_NR_REGISTER_MEM4DMA:
+		pr_debug("DXDI_IOC_NR_REGISTER_MEM4DMA\n");
+		err = sep_ioctl_register_mem4dma(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_ALLOC_MEM4DMA:
+		pr_err("DXDI_IOC_NR_ALLOC_MEM4DMA: Not supported, yet");
+		err = -ENOTTY;
+		break;
+	case DXDI_IOC_NR_FREE_MEM4DMA:
+		pr_debug("DXDI_IOC_NR_FREE_MEM4DMA\n");
+		err = sep_ioctl_free_mem4dma(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_SEPAPP_SESSION_OPEN:
+		pr_debug("DXDI_IOC_NR_SEPAPP_SESSION_OPEN\n");
+		err = sep_ioctl_sepapp_session_open(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_SEPAPP_SESSION_CLOSE:
+		pr_debug("DXDI_IOC_NR_SEPAPP_SESSION_CLOSE\n");
+		err = sep_ioctl_sepapp_session_close(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE:
+		pr_debug("DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE\n");
+		err = sep_ioctl_sepapp_command_invoke(client_ctx, arg);
+		break;
+#endif
+	case DXDI_IOC_NR_SET_IV:
+		pr_debug("DXDI_IOC_NR_SET_IV\n");
+		err = sep_ioctl_set_iv(client_ctx, arg);
+		break;
+	case DXDI_IOC_NR_GET_IV:
+		pr_debug("DXDI_IOC_NR_GET_IV\n");
+		err = sep_ioctl_get_iv(client_ctx, arg);
+		break;
+	default:/* Not supposed to happen - we already tested for NR range */
+		pr_err("bad IOCTL cmd 0x%08X\n", cmd);
+		err = -ENOTTY;
+	}
+	pr_debug("sep_ioctl error: %d\n", err); 
+	/* Update stats per IOCTL command */
+	if (err == 0) {
+		preempt_disable_notrace();
+		ioctl_end = sched_clock();
+		preempt_enable_notrace();
+		sysfs_update_drv_stats(client_ctx->qid, _IOC_NR(cmd),
+				       ioctl_start, ioctl_end);
+	}
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	return err;
+}
+
+static const struct file_operations sep_fops = {
+	.owner = THIS_MODULE,
+	.open = sep_open,
+	.release = sep_release,
+	.read = sep_read,
+	.write = sep_write,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = sep_compat_ioctl,
+#else
+	.unlocked_ioctl = sep_ioctl,
+#endif
+};
+
+/**
+ * get_q_cache_size() - Get the number of entries to allocate for the SeP/FW
+ *			cache of given queue
+ * @drvdata:	 Driver context
+ * @qid:	 The queue to allocate for
+ *
+ * Get the number of entries to allocate for the SeP/FW cache of given queue
+ * The function assumes that num_of_desc_queues and num_of_sep_cache_entries
+ * are already initialized in drvdata.
+ * Returns Number of cache entries to allocate
+ */
+static int get_q_cache_size(struct sep_drvdata *drvdata, int qid)
+{
+	/* Simple allocation - divide evenly among queues */
+	/* consider prefering higher priority queues...   */
+	return drvdata->num_of_sep_cache_entries / drvdata->num_of_desc_queues;
+}
+
+/**
+ * enable_descq_interrupt() - Enable interrupt for given queue (GPR)
+ * @drvdata:
+ * @qid:
+ *
+ * Returns int
+ */
+static void enable_descq_interrupt(struct sep_drvdata *drvdata, int qid)
+{
+
+	/* clear pending interrupts in GPRs of SW-queues
+	 * (leftovers from init writes to GPRs..) */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, ICR),
+		       gpr_interrupt_mask[qid]);
+
+	drvdata->irq_mask |= gpr_interrupt_mask[qid];
+	/* set IMR register */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR),
+		       ~drvdata->irq_mask);
+}
+
+/**
+ * alloc_host_mem_for_sep() - Allocate memory pages for sep icache/dcache
+ *	or for SEP backup memory in case there is not SEP cache memory.
+ *
+ * @drvdata:
+ *
+ * Currently using alloc_pages to allocate the pages.
+ * Consider using CMA feature for the memory allocation
+ */
+static int alloc_host_mem_for_sep(struct sep_drvdata *drvdata)
+{
+#ifdef CACHE_IMAGE_NAME
+	int i;
+	const int icache_sizes_enum2log[] = DX_CC_ICACHE_SIZE_ENUM2LOG;
+
+	pr_debug("icache_size=%uKB dcache_size=%uKB\n",
+		 1 << (icache_size_log2 - 10),
+		 1 << (dcache_size_log2 - 10));
+
+	/* Verify validity of chosen cache memory sizes */
+	if ((dcache_size_log2 > DX_CC_INIT_D_CACHE_MAX_SIZE_LOG2) ||
+	    (dcache_size_log2 < DX_CC_INIT_D_CACHE_MIN_SIZE_LOG2)) {
+		pr_err("Requested Dcache size (%uKB) is invalid\n",
+		       1 << (dcache_size_log2 - 10));
+		return -EINVAL;
+	}
+	/* Icache size must be one of values defined for this device */
+	for (i = 0; i < sizeof(icache_sizes_enum2log) / sizeof(int); i++)
+		if ((icache_size_log2 == icache_sizes_enum2log[i]) &&
+		    (icache_sizes_enum2log[i] >= 0))
+			/* Found valid value */
+			break;
+	if (unlikely(i == sizeof(icache_sizes_enum2log))) {
+		pr_err("Requested Icache size (%uKB) is invalid\n",
+		       1 << (icache_size_log2 - 10));
+	}
+	drvdata->icache_size_log2 = icache_size_log2;
+	/* Allocate pages suitable for 32bit DMA and out of cache (cold) */
+	drvdata->icache_pages = alloc_pages(GFP_KERNEL | GFP_DMA32 | __GFP_COLD,
+					    icache_size_log2 - PAGE_SHIFT);
+	if (drvdata->icache_pages == NULL) {
+		pr_err("Failed allocating %uKB for Icache\n",
+			    1 << (icache_size_log2 - 10));
+		return -ENOMEM;
+	}
+	drvdata->dcache_size_log2 = dcache_size_log2;
+	/* same as for icache */
+	drvdata->dcache_pages = alloc_pages(GFP_KERNEL | GFP_DMA32 | __GFP_COLD,
+					    dcache_size_log2 - PAGE_SHIFT);
+	if (drvdata->dcache_pages == NULL) {
+		pr_err("Failed allocating %uKB for Dcache\n",
+		       1 << (dcache_size_log2 - 10));
+		__free_pages(drvdata->icache_pages,
+			     drvdata->icache_size_log2 - PAGE_SHIFT);
+		return -ENOMEM;
+	}
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	/* This size is not enforced by power of two, so we use
+	 * alloc_pages_exact() */
+	drvdata->sep_backup_buf = alloc_pages_exact(SEP_BACKUP_BUF_SIZE,
+						    GFP_KERNEL | GFP_DMA32 |
+						    __GFP_COLD);
+	if (unlikely(drvdata->sep_backup_buf == NULL)) {
+		pr_err("Failed allocating %d B for SEP backup buffer\n",
+		       SEP_BACKUP_BUF_SIZE);
+		return -ENOMEM;
+	}
+	drvdata->sep_backup_buf_size = SEP_BACKUP_BUF_SIZE;
+#endif
+	return 0;
+}
+
+/**
+ * free_host_mem_for_sep() - Free the memory resources allocated by
+ *	alloc_host_mem_for_sep()
+ *
+ * @drvdata:
+ */
+static void free_host_mem_for_sep(struct sep_drvdata *drvdata)
+{
+#ifdef CACHE_IMAGE_NAME
+	if (drvdata->dcache_pages != NULL) {
+		__free_pages(drvdata->dcache_pages,
+			     drvdata->dcache_size_log2 - PAGE_SHIFT);
+		drvdata->dcache_pages = NULL;
+	}
+	if (drvdata->icache_pages != NULL) {
+		__free_pages(drvdata->icache_pages,
+			     drvdata->icache_size_log2 - PAGE_SHIFT);
+		drvdata->icache_pages = NULL;
+	}
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	if (drvdata->sep_backup_buf != NULL) {
+		free_pages_exact(drvdata->sep_backup_buf,
+				 drvdata->sep_backup_buf_size);
+		drvdata->sep_backup_buf_size = 0;
+		drvdata->sep_backup_buf = NULL;
+	}
+#endif
+}
+
+static int emmc_match(struct device *dev, const void *data)
+{
+	if (strcmp(dev_name(dev), data) == 0)
+		return 1;
+	return 0;
+}
+
+static int mmc_blk_rpmb_req_handle(struct mmc_ioc_rpmb_req *req)
+{
+#define EMMC_BLK_NAME   "mmcblk0rpmb"
+
+	struct device *emmc = NULL;
+
+	if (!req)
+		return -EINVAL;
+
+	emmc = class_find_device(&block_class, NULL, EMMC_BLK_NAME, emmc_match);
+	if (!emmc) {
+		pr_err("eMMC reg failed\n");
+		return -ENODEV;
+	}
+
+	return mmc_rpmb_req_handle(emmc, req);
+}
+
+static int rpmb_agent(void *unused)
+{
+#define AGENT_TIMEOUT_MS (1000 * 60 * 5) /* 5 minutes */
+
+#define AUTH_DAT_WR_REQ 0x0003
+#define AUTH_DAT_RD_REQ 0x0004
+
+#define RPMB_FRAME_LENGTH      512
+#define RPMB_MAC_KEY_LENGTH     32
+#define RPMB_NONCE_LENGTH       16
+#define RPMB_DATA_LENGTH       256
+#define RPMB_STUFFBYTES_LENGTH 196
+#define RPMB_COUNTER_LENGTH      4
+#define RPMB_ADDR_LENGTH         2
+#define RPMB_BLKCNT_LENGTH       2
+#define RPMB_RESULT_LENGTH       2
+#define RPMB_RSPREQ_LENGTH       2
+
+#define RPMB_STUFFBYTES_OFFSET 0
+#define RPMB_MAC_KEY_OFFSET   (RPMB_STUFFBYTES_OFFSET + RPMB_STUFFBYTES_LENGTH)
+#define RPMB_DATA_OFFSET      (RPMB_MAC_KEY_OFFSET + RPMB_MAC_KEY_LENGTH)
+#define RPMB_NONCE_OFFSET     (RPMB_DATA_OFFSET + RPMB_DATA_LENGTH)
+#define RPMB_COUNTER_OFFSET   (RPMB_NONCE_OFFSET + RPMB_NONCE_LENGTH)
+#define RPMB_ADDR_OFFSET      (RPMB_COUNTER_OFFSET + RPMB_COUNTER_LENGTH)
+#define RPMB_BLKCNT_OFFSET    (RPMB_ADDR_OFFSET + RPMB_ADDR_LENGTH)
+#define RPMB_RESULT_OFFSET    (RPMB_BLKCNT_OFFSET + RPMB_BLKCNT_LENGTH)
+#define RPMB_RSPREQ_OFFSET    (RPMB_RESULT_OFFSET + RPMB_RESULT_LENGTH)
+
+	int ret = 0;
+	u32 tmp = 0;
+	u32 max_buf_size = 0;
+	u8 in_buf[RPMB_FRAME_LENGTH];
+	u8 *out_buf = NULL;
+	u32 in_buf_size = RPMB_FRAME_LENGTH;
+	u32 timeout = INT_MAX;
+	/* structure to pass to the eMMC driver's RPMB API */
+	struct mmc_ioc_rpmb_req req2emmc;
+
+	ret = dx_sep_req_register_agent(RPMB_AGENT_ID, &max_buf_size);
+	if (ret) {
+		pr_err("REG FAIL %d\n", ret);
+		return -EINVAL;
+	}
+
+	out_buf = kmalloc(RPMB_FRAME_LENGTH, GFP_KERNEL);
+	if (!out_buf) {
+		pr_err("MALLOC FAIL\n");
+		return -ENOMEM;
+	}
+
+	while (1) {
+		/* Block until called by SEP */
+		do {
+			pr_info("RPMB AGENT BLOCKED\n");
+			ret = dx_sep_req_wait_for_request(RPMB_AGENT_ID,
+					in_buf, &in_buf_size, timeout);
+		} while (ret == -EAGAIN);
+
+		if (ret) {
+			pr_err("WAIT FAILED %d\n", ret);
+			break;
+		}
+
+		pr_info("RPMB AGENT UNBLOCKED\n");
+
+		/* Process request */
+		memset(&req2emmc, 0x00, sizeof(struct mmc_ioc_rpmb_req));
+
+		/* Copy from incoming buffer into variables and swap
+		 * endianess if needed */
+		req2emmc.addr = *((u16 *)(in_buf+RPMB_ADDR_OFFSET));
+		req2emmc.addr = be16_to_cpu(req2emmc.addr);
+		/* As we are supporting only one block transfers */
+		req2emmc.blk_cnt = 1;
+		req2emmc.data = in_buf+RPMB_DATA_OFFSET;
+		req2emmc.mac = in_buf+RPMB_MAC_KEY_OFFSET;
+		req2emmc.nonce = in_buf+RPMB_NONCE_OFFSET;
+		req2emmc.result = (u16 *)(in_buf+RPMB_RESULT_OFFSET);
+		req2emmc.type = *((u16 *)(in_buf+RPMB_RSPREQ_OFFSET));
+		req2emmc.type = be16_to_cpu(req2emmc.type);
+		req2emmc.wc = (u32 *)(in_buf+RPMB_COUNTER_OFFSET);
+		*req2emmc.wc = be32_to_cpu(*req2emmc.wc);
+
+		/* Send request to eMMC driver */
+		ret = mmc_blk_rpmb_req_handle(&req2emmc);
+		if (ret) {
+			pr_err("mmc_blk_rpmb_req_handle fail %d", ret);
+			/* If access to eMMC driver failed send back
+			 * artificial error */
+			req2emmc.type = 0x0008;
+		}
+
+		/* Rebuild RPMB from response */
+		memset(out_buf, 0, RPMB_FRAME_LENGTH);
+
+		if (req2emmc.type == AUTH_DAT_RD_REQ) {
+			pr_info("READ OPERATION RETURN\n");
+			memcpy(out_buf+RPMB_DATA_OFFSET,
+					req2emmc.data,  RPMB_DATA_LENGTH);
+			memcpy(out_buf+RPMB_NONCE_OFFSET,
+					req2emmc.nonce, RPMB_NONCE_LENGTH);
+
+			out_buf[RPMB_BLKCNT_OFFSET]   = req2emmc.blk_cnt >> 8;
+			out_buf[RPMB_BLKCNT_OFFSET+1] = req2emmc.blk_cnt;
+		} else {
+			pr_info("WRITE OPERATION RETURN\n");
+			memcpy(&tmp, req2emmc.wc, RPMB_COUNTER_LENGTH);
+			tmp = cpu_to_be32(tmp);
+			memcpy(out_buf+RPMB_COUNTER_OFFSET,
+					&tmp, RPMB_COUNTER_LENGTH);
+		}
+
+		memcpy(out_buf+RPMB_MAC_KEY_OFFSET,
+				req2emmc.mac,    RPMB_MAC_KEY_LENGTH);
+		memcpy(out_buf+RPMB_RESULT_OFFSET,
+				req2emmc.result, RPMB_RESULT_LENGTH);
+
+		memcpy(out_buf+RPMB_RSPREQ_OFFSET,
+				&req2emmc.type, RPMB_RSPREQ_LENGTH);
+		out_buf[RPMB_ADDR_OFFSET]   = req2emmc.addr >> 8;
+		out_buf[RPMB_ADDR_OFFSET+1] = req2emmc.addr;
+
+		/* Send response */
+		ret = dx_sep_req_send_response(RPMB_AGENT_ID,
+				out_buf, RPMB_FRAME_LENGTH);
+		if (ret) {
+			pr_err("dx_sep_req_send_response fail %d", ret);
+			break;
+		}
+	}
+
+	kfree(out_buf);
+
+	return ret;
+}
+
+static int sep_setup(struct device *dev,
+		     const struct resource *regs_res,
+		     struct resource *r_irq)
+{
+	dev_t devt;
+	struct sep_drvdata *drvdata = NULL;
+	enum dx_sep_state sep_state;
+	int rc = 0;
+	int i, init_flag = INIT_FW_FLAG;
+	/* Create kernel thread for RPMB agent */
+	static struct task_struct *rpmb_thread;
+	char thread_name[] = "rpmb_agent";
+	int sess_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = DEFAULT_APP_UUID;
+
+	pr_info("Discretix %s Driver initializing...\n", DRIVER_NAME);
+
+	drvdata = kzalloc(sizeof(struct sep_drvdata), GFP_KERNEL);
+	if (unlikely(drvdata == NULL)) {
+		pr_err("Unable to allocate device private record\n");
+		rc = -ENOMEM;
+		goto failed0;
+	}
+	dev_set_drvdata(dev, (void *)drvdata);
+
+	if (!regs_res) {
+		pr_err("Couldn't get registers resource\n");
+		rc = -EFAULT;
+		goto failed1;
+	}
+
+	if (q_num > SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err(
+			    "Requested number of queues (%u) is out of range must be no more than %u\n",
+			    q_num, SEP_MAX_NUM_OF_DESC_Q);
+		rc = -EINVAL;
+		goto failed1;
+	}
+
+	/* TODO: Verify number of queues also with SeP capabilities */
+	/* Initialize objects arrays for proper cleanup in case of error */
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; i++) {
+		drvdata->queue[i].desc_queue = DESC_Q_INVALID_HANDLE;
+		drvdata->queue[i].sep_cache = SEP_CTX_CACHE_NULL_HANDLE;
+	}
+
+	drvdata->mem_start = regs_res->start;
+	drvdata->mem_end = regs_res->end;
+	drvdata->mem_size = regs_res->end - regs_res->start + 1;
+
+	if (!request_mem_region(drvdata->mem_start,
+				drvdata->mem_size, DRIVER_NAME)) {
+		pr_err("Couldn't lock memory region at %Lx\n",
+			    (unsigned long long)regs_res->start);
+		rc = -EBUSY;
+		goto failed1;
+	}
+
+	/* create a mask in the lower 4 GB of memory */
+	if (!dma_set_mask(dev, DMA_BIT_MASK(32)))
+		dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+	else
+		pr_warn("sep54: No suitble DMA available\n");
+
+	drvdata->dev = dev;
+
+	drvdata->cc_base = ioremap(drvdata->mem_start, drvdata->mem_size);
+	if (drvdata->cc_base == NULL) {
+		pr_err("ioremap() failed\n");
+		goto failed2;
+	}
+
+	pr_info("regbase_phys=0x%0x..0x%0x\n", drvdata->mem_start,
+		drvdata->mem_end);
+	pr_info("regbase_virt=0x%p\n", drvdata->cc_base);
+
+#ifdef DX_BASE_ENV_REGS
+	pr_info("FPGA ver. = UNKNOWN\n");
+	/* TODO: verify FPGA version against expected version */
+#endif
+
+#ifdef SEP_PRINTF
+	/* Sync. host to SeP initialization counter */
+	/* After seting the interrupt mask, the interrupt from the GPR would
+	 * trigger host_printf_handler to ack this value */
+	drvdata->last_ack_cntr = SEP_PRINTF_ACK_SYNC_VAL;
+#endif
+
+	dx_sep_power_init(drvdata);
+
+	/* Interrupt handler setup */
+#ifdef SEP_INTERRUPT_BY_TIMER
+	init_timer(&drvdata->delegate);
+	drvdata->delegate.function = sep_timer;
+	drvdata->delegate.data = (unsigned long)drvdata;
+	mod_timer(&drvdata->delegate, jiffies);
+
+#else				/* IRQ handler setup */
+	/* Initialize IMR (mask) before registering interrupt handler */
+	/* Enable only state register interrupt */
+	drvdata->irq_mask = SEP_HOST_GPR_IRQ_MASK(DX_SEP_STATE_GPR_IDX);
+#ifdef SEP_PRINTF
+	/* Enable interrupt from host_printf GPR */
+	drvdata->irq_mask |= SEP_HOST_GPR_IRQ_MASK(DX_SEP_HOST_PRINTF_GPR_IDX);
+#endif
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR),
+		       ~drvdata->irq_mask);
+	/* The GPRs interrupts are set only after sep_init is done to avoid
+	 * "garbage" interrupts as a result of the FW init process */
+	drvdata->irq = r_irq->start;
+	rc = request_irq(drvdata->irq, sep_interrupt,
+			 IRQF_SHARED, DRIVER_NAME, drvdata->dev);
+	if (unlikely(rc != 0)) {
+		pr_err("Could not allocate interrupt %d\n", drvdata->irq);
+		goto failed3;
+	}
+	pr_info("%s at 0x%p mapped to interrupt %d\n",
+		DRIVER_NAME, drvdata->cc_base, drvdata->irq);
+
+#endif				/*SEP_INTERRUPT_BY_TIMER */
+
+	/* SeP FW initialization sequence */
+	/* Cold boot before creating descQ objects */
+	sep_state = GET_SEP_STATE(drvdata);
+	if (sep_state != DX_SEP_STATE_DONE_COLD_BOOT) {
+		pr_debug("sep_state=0x%08X\n", sep_state);
+		/* If INIT_CC was not done externally, take care of it here */
+		rc = alloc_host_mem_for_sep(drvdata);
+		if (unlikely(rc != 0))
+			goto failed4;
+		rc = sepinit_do_cc_init(drvdata);
+		if (unlikely(rc != 0))
+			goto failed5;
+	}
+
+	if (sep_state == DX_SEP_STATE_DONE_FW_INIT) {
+		/*If fw init was done change the state to reload driver state*/
+		rc = sepinit_reload_driver_state(drvdata);
+		if (unlikely(rc != 0))
+			goto failed5;
+	}
+
+	sepinit_get_fw_props(drvdata);
+	if (drvdata->fw_ver != EXPECTED_FW_VER) {
+		pr_warn("Expected FW version %u.%u.%u but got %u.%u.%u\n",
+			     VER_MAJOR(EXPECTED_FW_VER),
+			     VER_MINOR(EXPECTED_FW_VER),
+			     VER_PATCH(EXPECTED_FW_VER),
+			     VER_MAJOR(drvdata->fw_ver),
+			     VER_MINOR(drvdata->fw_ver),
+			     VER_PATCH(drvdata->fw_ver));
+	}
+
+	if (q_num > drvdata->num_of_desc_queues) {
+		pr_err(
+			    "Requested number of queues (%u) is greater than SEP could support (%u)\n",
+			    q_num, drvdata->num_of_desc_queues);
+		rc = -EINVAL;
+		goto failed5;
+	}
+
+	if (q_num == 0) {
+		if (drvdata->num_of_desc_queues > SEP_MAX_NUM_OF_DESC_Q) {
+			pr_info(
+				     "The SEP number of queues (%u) is greater than the driver could support (%u)\n",
+				     drvdata->num_of_desc_queues,
+				     SEP_MAX_NUM_OF_DESC_Q);
+			q_num = SEP_MAX_NUM_OF_DESC_Q;
+		} else {
+			q_num = drvdata->num_of_desc_queues;
+		}
+	}
+	drvdata->num_of_desc_queues = q_num;
+
+	pr_info("q_num=%d\n", drvdata->num_of_desc_queues);
+
+	rc = dx_sep_req_init(drvdata);
+	if (unlikely(rc != 0))
+		goto failed6;
+
+	/* Create descriptor queues objects - must be before
+	 *  sepinit_set_fw_init_params to assure GPRs from host are reset */
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		drvdata->queue[i].sep_data = drvdata;
+		mutex_init(&drvdata->queue[i].desc_queue_sequencer);
+		drvdata->queue[i].desc_queue =
+		    desc_q_create(i, &drvdata->queue[i], sep_state);
+		if (drvdata->queue[i].desc_queue == DESC_Q_INVALID_HANDLE) {
+			pr_err("Unable to allocate desc_q object (%d)\n", i);
+			rc = -ENOMEM;
+			goto failed7;
+		}
+	}
+
+	/* Create context cache management objects */
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		const int num_of_cache_entries = get_q_cache_size(drvdata, i);
+		if (num_of_cache_entries < 1) {
+			pr_err("No SeP cache entries were assigned for qid=%d",
+			       i);
+			rc = -ENOMEM;
+			goto failed7;
+		}
+		drvdata->queue[i].sep_cache =
+		    ctxmgr_sep_cache_create(num_of_cache_entries);
+		if (drvdata->queue[i].sep_cache == SEP_CTX_CACHE_NULL_HANDLE) {
+			pr_err("Unable to allocate SeP cache object (%d)\n", i);
+			rc = -ENOMEM;
+			goto failed7;
+		}
+	}
+
+	if (sep_state != DX_SEP_STATE_DONE_FW_INIT) {
+		rc = sepinit_do_fw_init(drvdata, init_flag);
+	} else {
+		init_flag = INIT_SEP_SWQ_FLAG;
+		/*In case the sep state is DONE perform update counter
+		  of the queues */
+		for (i = 0; i < drvdata->num_of_desc_queues; i++)
+			desc_q_cntr_set(drvdata->queue[i].desc_queue);
+
+		rc = sepinit_do_fw_init(drvdata, init_flag);
+	}
+
+	if (unlikely(rc != 0))
+		goto failed7;
+
+	drvdata->llimgr = llimgr_create(drvdata->dev, drvdata->mlli_table_size);
+	if (drvdata->llimgr == LLIMGR_NULL_HANDLE) {
+		pr_err("Failed creating LLI-manager object\n");
+		rc = -ENOMEM;
+		goto failed7;
+	}
+
+	drvdata->spad_buf_pool = dma_pool_create("dx_sep_rpc_msg", drvdata->dev,
+						 USER_SPAD_SIZE,
+						 L1_CACHE_BYTES, 0);
+	if (drvdata->spad_buf_pool == NULL) {
+		pr_err("Failed allocating DMA pool for RPC messages\n");
+		rc = -ENOMEM;
+		goto failed8;
+	}
+
+	/* Add character device nodes */
+	rc = alloc_chrdev_region(&drvdata->devt_base, 0, SEP_DEVICES,
+				 DRIVER_NAME);
+	if (unlikely(rc != 0))
+		goto failed9;
+	pr_debug("Allocated %u chrdevs at %u:%u\n", SEP_DEVICES,
+		 MAJOR(drvdata->devt_base), MINOR(drvdata->devt_base));
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		devt = MKDEV(MAJOR(drvdata->devt_base),
+			     MINOR(drvdata->devt_base) + i);
+		cdev_init(&drvdata->queue[i].cdev, &sep_fops);
+		drvdata->queue[i].cdev.owner = THIS_MODULE;
+		rc = cdev_add(&drvdata->queue[i].cdev, devt, 1);
+		if (unlikely(rc != 0)) {
+			pr_err("cdev_add() failed for q%d\n", i);
+			goto failed9;
+		}
+		drvdata->queue[i].dev = device_create(sep_class, dev, devt,
+						      &drvdata->queue[i],
+						      "%s%d",
+						      DEVICE_NAME_PREFIX, i);
+		drvdata->queue[i].devt = devt;
+	}
+
+	rc = sep_setup_sysfs(&(dev->kobj), drvdata);
+	if (unlikely(rc != 0))
+		goto failed9;
+
+#ifndef SEP_INTERRUPT_BY_TIMER
+	/* Everything is ready - enable interrupts of desc-Qs */
+	for (i = 0; i < drvdata->num_of_desc_queues; i++)
+		enable_descq_interrupt(drvdata, i);
+#endif
+
+	/* Enable sep request interrupt handling */
+	dx_sep_req_enable(drvdata);
+
+	/* Init DX Linux crypto module */
+	if (!disable_linux_crypto) {
+		rc = dx_crypto_api_init(drvdata);
+		if (unlikely(rc != 0))
+			goto failed10;
+		rc = hwk_init();
+		if (unlikely(rc != 0))
+			goto failed10;
+	}
+#if MAX_SEPAPP_SESSION_PER_CLIENT_CTX > 0
+	dx_sepapp_init(drvdata);
+#endif
+
+	rpmb_thread = kthread_create(rpmb_agent, NULL, thread_name);
+	if (!rpmb_thread) {
+		pr_err("RPMB agent thread create fail");
+		goto failed10;
+	} else {
+		wake_up_process(rpmb_thread);
+	}
+
+	/* Inform SEP RPMB driver that it can enable RPMB access again */
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		goto failed10;
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &sess_id,
+				    &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed11;
+	rc = dx_sepapp_command_invoke(sctx, sess_id, CMD_RPMB_ENABLE, NULL,
+				      &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed11;
+
+	rc = dx_sepapp_session_close(sctx, sess_id);
+	if (unlikely(rc != 0))
+		goto failed11;
+
+	dx_sepapp_context_free(sctx);
+
+	return 0;
+
+/* Error cases cleanup */
+ failed11:
+	dx_sepapp_context_free(sctx);
+ failed10:
+	/* Disable interrupts */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR), ~0);
+	sep_free_sysfs();
+ failed9:
+	dma_pool_destroy(drvdata->spad_buf_pool);
+ failed8:
+	llimgr_destroy(drvdata->llimgr);
+ failed7:
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		if (drvdata->queue[i].devt) {
+			cdev_del(&drvdata->queue[i].cdev);
+			device_destroy(sep_class, drvdata->queue[i].devt);
+		}
+
+		if (drvdata->queue[i].sep_cache != SEP_CTX_CACHE_NULL_HANDLE) {
+			ctxmgr_sep_cache_destroy(drvdata->queue[i].sep_cache);
+			drvdata->queue[i].sep_cache = SEP_CTX_CACHE_NULL_HANDLE;
+		}
+
+		if (drvdata->queue[i].desc_queue != DESC_Q_INVALID_HANDLE) {
+			desc_q_destroy(drvdata->queue[i].desc_queue);
+			drvdata->queue[i].desc_queue = DESC_Q_INVALID_HANDLE;
+			mutex_destroy(&drvdata->queue[i].desc_queue_sequencer);
+		}
+	}
+
+ failed6:
+	dx_sep_req_fini(drvdata);
+ failed5:
+	free_host_mem_for_sep(drvdata);
+ failed4:
+#ifdef SEP_INTERRUPT_BY_TIMER
+	del_timer_sync(&drvdata->delegate);
+#else
+	free_irq(drvdata->irq, dev);
+#endif
+ failed3:
+	dx_sep_power_exit();
+	iounmap(drvdata->cc_base);
+ failed2:
+	release_mem_region(regs_res->start, drvdata->mem_size);
+ failed1:
+	kfree(drvdata);
+ failed0:
+
+	return rc;
+}
+
+static void sep_pci_remove(struct pci_dev *pdev)
+{
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(&pdev->dev);
+	int i;
+
+	if (!drvdata)
+		return;
+	dx_sep_req_fini(drvdata);
+	if (!disable_linux_crypto) {
+		dx_crypto_api_fini();
+		hwk_fini();
+	}
+	/* Disable interrupts */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR), ~0);
+
+#ifdef SEP_RUNTIME_PM
+	pm_runtime_get_noresume(&pdev->dev);
+	pm_runtime_forbid(&pdev->dev);
+#endif
+
+	for (i = 0; i < drvdata->num_of_desc_queues; i++) {
+		if (drvdata->queue[i].desc_queue != DESC_Q_INVALID_HANDLE) {
+			desc_q_destroy(drvdata->queue[i].desc_queue);
+			mutex_destroy(&drvdata->queue[i].desc_queue_sequencer);
+		}
+		if (drvdata->queue[i].sep_cache != NULL)
+			ctxmgr_sep_cache_destroy(drvdata->queue[i].sep_cache);
+		cdev_del(&drvdata->queue[i].cdev);
+		device_destroy(sep_class, drvdata->queue[i].devt);
+	}
+
+	dma_pool_destroy(drvdata->spad_buf_pool);
+	drvdata->spad_buf_pool = NULL;
+	llimgr_destroy(drvdata->llimgr);
+	drvdata->llimgr = LLIMGR_NULL_HANDLE;
+	free_host_mem_for_sep(drvdata);
+#ifdef SEP_INTERRUPT_BY_TIMER
+	del_timer_sync(&drvdata->delegate);
+#else
+	free_irq(drvdata->irq, &pdev->dev);
+#endif
+	dx_sep_power_exit();
+	iounmap(drvdata->cc_base);
+	release_mem_region(drvdata->mem_start, drvdata->mem_size);
+	sep_free_sysfs();
+	kfree(drvdata);
+	dev_set_drvdata(&pdev->dev, NULL);
+	pci_dev_put(pdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+	{
+	PCI_DEVICE(PCI_VENDOR_ID_INTEL, MRLD_SEP_PCI_DEVICE_ID)}, {
+	0}
+};
+
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+/**
+ *	sep_probe - probe a matching PCI device
+ *	@pdev: pci_device
+ *	@end: pci_device_id
+ *
+ *	Attempt to set up and configure a SEP device that has been
+ *	discovered by the PCI layer.
+ */
+static int sep_pci_probe(struct pci_dev *pdev,
+			 const struct pci_device_id *ent)
+{
+	int error;
+	struct resource res;
+	struct resource r_irq;
+
+	security_cfg_reg = ioremap_nocache(SECURITY_CFG_ADDR, 4);
+	if (security_cfg_reg == NULL) {
+		dev_err(&pdev->dev, "ioremap of security_cfg_reg failed\n");
+		return -ENOMEM;
+	}
+
+	/* Enable Chaabi */
+	error = pci_enable_device(pdev);
+	if (error) {
+		dev_err(&pdev->dev, "error enabling SEP device\n");
+		goto end;
+	}
+
+	/* Fill resource variables */
+	res.start = pci_resource_start(pdev, 0);
+
+#ifdef PCI_REGION_BUG		/* TODO for wrong sep address bug */
+	res.start += 0x8000;
+#endif
+
+	if (!res.start) {
+		dev_warn(&pdev->dev, "Error getting register start\n");
+		error = -ENODEV;
+		goto disable_pci;
+	}
+
+	res.end = pci_resource_end(pdev, 0);
+	if (!res.end) {
+		dev_warn(&pdev->dev, "Error getting register end\n");
+		error = -ENODEV;
+		goto disable_pci;
+	}
+
+	/* Fill irq resource variable */
+	r_irq.start = pdev->irq;
+
+	/* Use resource variables */
+	error = sep_setup(&pdev->dev, &res, &r_irq);
+	if (error)
+		goto disable_pci;
+
+	pdev = pci_dev_get(pdev);
+
+#ifdef SEP_RUNTIME_PM
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, SEP_AUTOSUSPEND_DELAY);
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_use_autosuspend(&pdev->dev);
+#endif
+
+	return 0;
+
+ disable_pci:
+	pci_disable_device(pdev);
+ end:
+	iounmap(security_cfg_reg);
+
+	return error;
+}
+
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_RUNTIME_PM)
+static int sep_runtime_suspend(struct device *dev)
+{
+	int ret;
+	int count = 0;
+	u32 val;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+
+	ret = dx_sep_power_state_set(DX_SEP_POWER_HIBERNATED);
+	if (ret) {
+		pr_err("%s failed! ret = %d\n", __func__, ret);
+		return ret;
+	}
+
+	/*poll for chaabi_powerdown_en bit in SECURITY_CFG*/
+	while (count < SEP_TIMEOUT) {
+		val = readl(security_cfg_reg);
+		if (val & PWR_DWN_ENB_MASK)
+			break;
+		usleep_range(40, 60);
+		count++;
+	}
+	if (count >= SEP_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"SEP: timed out waiting for chaabi_powerdown_en\n");
+		WARN_ON(1);
+		/*Let's continue to suspend as chaabi is not stable*/
+	}
+
+	disable_irq(pdev->irq);
+	drvdata->sep_suspended = 1;
+
+	return ret;
+}
+
+static int sep_runtime_resume(struct device *dev)
+{
+	int ret;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+
+	drvdata->sep_suspended = 0;
+	enable_irq(pdev->irq);
+	ret = dx_sep_power_state_set(DX_SEP_POWER_ACTIVE);
+	WARN(ret, "%s failed! ret = %d\n", __func__, ret);
+
+	/*
+	 * sep device might return to ACTIVE in time.
+	 * As sep device is not stable, we choose return 0
+	 * in case it blocks s3.
+	 */
+	return 0;
+}
+
+static int sep_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+	int ret = 0;
+	int count = 0;
+	u32 val;
+
+	ret = dx_sep_power_state_set(DX_SEP_POWER_HIBERNATED);
+	if (ret) {
+		pr_err("%s failed! ret = %d\n", __func__, ret);
+		return ret;
+	}
+
+	/*poll for chaabi_powerdown_en bit in SECURITY_CFG*/
+	while (count < SEP_TIMEOUT) {
+		val = readl(security_cfg_reg);
+		if (val & PWR_DWN_ENB_MASK)
+			break;
+		usleep_range(40, 60);
+		count++;
+	}
+	if (count >= SEP_TIMEOUT) {
+		dev_err(dev,
+			"SEP: timed out waiting for chaabi_powerdown_en\n");
+		WARN_ON(1);
+		/*Let's continue to suspend as chaabi is not stable*/
+	}
+
+	disable_irq(pdev->irq);
+	drvdata->sep_suspended = 1;
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return ret;
+}
+
+static int sep_resume(struct device *dev)
+{
+	int ret = 0;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct sep_drvdata *drvdata =
+	    (struct sep_drvdata *)dev_get_drvdata(dev);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "SEP: pci_enable_device failed\n");
+		return ret;
+	}
+
+	drvdata->sep_suspended = 0;
+	enable_irq(pdev->irq);
+
+	ret = dx_sep_power_state_set(DX_SEP_POWER_ACTIVE);
+	WARN(ret, "%s failed! ret = %d\n", __func__, ret);
+
+	/*
+	 * sep device might return to ACTIVE in time.
+	 * As sep device is not stable, we choose return 0
+	 * in case it blocks s3.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops sep_pm_ops = {
+	.runtime_suspend = sep_runtime_suspend,
+	.runtime_resume = sep_runtime_resume,
+	.suspend = sep_suspend,
+	.resume = sep_resume,
+};
+#endif /* CONFIG_PM_RUNTIME && SEP_RUNTIME_PM */
+
+/* Field for registering driver to PCI device */
+static struct pci_driver sep_pci_driver = {
+#if defined(CONFIG_PM_RUNTIME) && defined(SEP_RUNTIME_PM)
+	.driver = {
+		.pm = &sep_pm_ops,
+	},
+#endif /* CONFIG_PM_RUNTIME && SEP_RUNTIME_PM */
+	.name = DRIVER_NAME,
+	.id_table = sep_pci_id_tbl,
+	.probe = sep_pci_probe,
+	.remove = sep_pci_remove
+};
+
+static int __init sep_module_init(void)
+{
+	int rc;
+
+	sep_class = class_create(THIS_MODULE, "sep_ctl");
+
+	/* Register PCI device */
+	rc = pci_register_driver(&sep_pci_driver);
+	if (rc) {
+		class_destroy(sep_class);
+		return rc;
+	}
+
+	return 0;		/*success */
+}
+
+static void __exit sep_module_cleanup(void)
+{
+	pci_unregister_driver(&sep_pci_driver);
+	class_destroy(sep_class);
+}
+
+/* Entry points  */
+module_init(sep_module_init);
+module_exit(sep_module_cleanup);
+/* Module description */
+MODULE_DESCRIPTION("Discretix " DRIVER_NAME " Driver");
+MODULE_VERSION("0.7");
+MODULE_AUTHOR("Discretix");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/sep54/dx_driver.h b/drivers/staging/sep54/dx_driver.h
new file mode 100644
index 0000000..9325d72
--- /dev/null
+++ b/drivers/staging/sep54/dx_driver.h
@@ -0,0 +1,603 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _DX_DRIVER_H_
+#define _DX_DRIVER_H_
+
+#include <generated/autoconf.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/semaphore.h>
+
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "dx_reg_common.h"
+#include "dx_host.h"
+#include "sep_log.h"
+#include "sep_rpc.h"
+#include "crypto_ctx_mgr.h"
+#include "desc_mgr.h"
+#include "lli_mgr.h"
+#include "dx_driver_abi.h"
+#include "dx_dev_defs.h"
+
+/* Control printf's from SeP via GPR.
+ * Keep this macro defined as long as SeP code uses host_printf
+ * (otherwise, SeP would stall waiting for host to ack characters)
+ */
+#define SEP_PRINTF
+/* Note: If DEBUG macro is undefined, SeP prints would not be printed
+ * but the host driver would still ack the characters.                */
+
+#define MODULE_NAME "sep54"
+
+/* PCI ID's */
+#define MRLD_SEP_PCI_DEVICE_ID 0x1198
+
+#define VER_MAJOR(ver)  ((ver) >> 24)
+#define VER_MINOR(ver)  (((ver) >> 16) & 0xFF)
+#define VER_PATCH(ver)  (((ver) >> 8) & 0xFF)
+#define VER_INTERNAL(ver) ((ver) & 0xFF)
+
+#define SECURITY_CFG_ADDR	0xFF03A01C
+#define PWR_DWN_ENB_MASK	0x20
+#define SEP_TIMEOUT		50000
+#define SEP_POWERON_TIMEOUT     10000
+#define SEP_SLEEP_ENABLE 5
+
+#define SEP_AUTOSUSPEND_DELAY 5000
+
+#define INIT_FW_FLAG 0
+#define INIT_SEP_SWQ_FLAG 1
+
+/* GPR that holds SeP state */
+#define SEP_STATE_GPR_OFFSET SEP_HOST_GPR_REG_OFFSET(DX_SEP_STATE_GPR_IDX)
+/* In case of a change in GPR7 (state) we dump also GPR6 */
+#define SEP_STATUS_GPR_OFFSET SEP_HOST_GPR_REG_OFFSET(DX_SEP_STATUS_GPR_IDX)
+
+/* User memref index access macros */
+#define IS_VALID_MEMREF_IDX(idx) \
+	(((idx) >= 0) && ((idx) < MAX_REG_MEMREF_PER_CLIENT_CTX))
+#define INVALIDATE_MEMREF_IDX(idx) ((idx) = DXDI_MEMREF_ID_NULL)
+
+/* Session context access macros - must be invoked with mutex acquired */
+#define SEP_SESSION_ID_INVALID 0xFFFF
+#define IS_VALID_SESSION_CTX(session_ctx_p) \
+	 (((session_ctx_p)->ref_cnt > 0) && \
+	 ((session_ctx_p)->sep_session_id != SEP_SESSION_ID_INVALID))
+#define INVALIDATE_SESSION_CTX(session_ctx_p) do {              \
+	session_ctx_p->sep_session_id = SEP_SESSION_ID_INVALID; \
+	session_ctx_p->ref_cnt = 0;                             \
+} while (0)
+/* Session index access macros */
+/* Index is considered valid even if the pointed session context is not.
+   One should use IS_VALID_SESSION_CTX to verify the validity of the context. */
+#define IS_VALID_SESSION_IDX(idx) \
+	 (((idx) >= 0) && ((idx) < MAX_SEPAPP_SESSION_PER_CLIENT_CTX))
+#define INVALIDATE_SESSION_IDX(idx) ((idx) = DXDI_SEPAPP_SESSION_INVALID)
+
+/*
+   Size of DMA-coherent scratchpad buffer allocated per client_ctx context
+   Currently, this buffer is used for 3 purposes:
+   1. SeP RPC messages.
+   2. SeP Applets messages.
+   3. AES-CCM A0 (prepend) data.
+*/
+#define USER_SPAD_SIZE SEP_RPC_MAX_MSG_SIZE
+#if (SEP_RPC_MAX_MSG_SIZE >= (1 << SEP_SW_DESC_RPC_MSG_HMB_SIZE_BIT_SIZE))
+#error SEP_RPC_MAX_MSG_SIZE too large for HMB_SIZE field
+#endif
+
+/* Get the memref ID/index for given dma_obj (struct user_dma_buffer) */
+#define DMA_OBJ_TO_MEMREF_IDX(client_ctx, the_dma_obj)                 \
+	(container_of(the_dma_obj, struct registered_memref, dma_obj) - \
+		(client_ctx)->reg_memrefs)
+
+/* Crypto context IDs masks (to be used with ctxmgr_sep_cache_invalidate) */
+#define CRYPTO_CTX_ID_SINGLE_MASK 0xFFFFFFFFFFFFFFFFULL
+#define CRYPTO_CTX_ID_CLIENT_SHIFT 32
+#define CRYPTO_CTX_ID_CLIENT_MASK (0xFFFFFFFFULL << CRYPTO_CTX_ID_CLIENT_SHIFT)
+
+/* Return 'true' if val is a multiple of given blk_size */
+/* blk_size must be a power of 2 */
+#define IS_MULT_OF(val, blk_size)  ((val & (blk_size - 1)) == 0)
+
+struct sep_drvdata;
+
+/**
+ * struct queue_drvdata - Data for a specific SeP queue
+ * @desc_queue:	The associated descriptor queue object
+ * @desc_queue_sequencer: Mutex to assure sequence of operations associated
+ *			  with desc. queue enqueue
+ * @sep_cache:	SeP context cache management object
+ * @cdev:	Assocated character device
+ * @devt:	Associated device
+ * @sep_data:	Associated SeP driver context
+ */
+struct queue_drvdata {
+	void *desc_queue;
+	struct mutex desc_queue_sequencer;
+	void *sep_cache;
+	struct device *dev;
+	struct cdev cdev;
+	dev_t devt;
+	struct sep_drvdata *sep_data;
+};
+
+/**
+ * struct sep_drvdata - SeP driver private data context
+ * @mem_start:	phys. address of the control registers
+ * @mem_end:	phys. address of the control registers
+ * @mem_size:	Control registers memory range size (mem_end - mem_start)
+ * @cc_base:	virt address of the CC registers
+ * @irq:	device IRQ number
+ * @irq_mask:	Interrupt mask
+ * @rom_ver:	SeP ROM version
+ * @fw_ver:	SeP loaded firmware version
+ * @icache_size_log2:	Icache memory size in power of 2 bytes
+ * @dcache_size_log2:	Dcache memory size in power of 2 bytes
+ * @icache_pages:	Pages allocated for Icache
+ * @dcache_pages:	Pages allocated for Dcache
+ * @sep_backup_buf_size:	Size of host memory allocated for sep context
+ * @sep_backup_buf:		Buffer allocated for sep context backup
+ * @sepinit_params_buf_dma:	DMA address of sepinit_params_buf_p
+ * @spad_buf_pool:	DMA pool for RPC messages or scratch pad use
+ * @mlli_table_size:	bytes as set by sepinit_get_fw_props
+ * @llimgr:	LLI-manager object handle
+ * @num_of_desc_queues:	Actual number of (active) queues
+ * @num_of_sep_cache_entries:	Available SeP/FW cache entries
+ * @devt_base:	Allocated char.dev. major/minor (with alloc_chrdev_region)
+ * @dev:	Device context
+ * @queue:	Array of objects for each SeP SW-queue
+ * @last_ack_cntr:	The last counter value ACK'ed over the SEP_PRINTF GPR
+ * @cur_line_buf_offset:	Offset in line_buf
+ * @line_buf:	A buffer to accumulate SEP_PRINTF characters up to EOL
+ * @delegate:	Timer to initiate GPRs polling for interrupt-less system
+ */
+struct sep_drvdata {
+	resource_size_t mem_start;
+	resource_size_t mem_end;
+	resource_size_t mem_size;
+	void __iomem *cc_base;
+	unsigned int irq;
+	u32 irq_mask;
+	u32 rom_ver;
+	u32 fw_ver;
+#ifdef CACHE_IMAGE_NAME
+	u8 icache_size_log2;
+	u8 dcache_size_log2;
+	struct page *icache_pages;
+	struct page *dcache_pages;
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	unsigned long sep_backup_buf_size;
+	void *sep_backup_buf;
+#endif
+	int sep_suspended;
+	struct dma_pool *spad_buf_pool;
+	unsigned long mlli_table_size;
+	void *llimgr;
+	unsigned int num_of_desc_queues;
+	int num_of_sep_cache_entries;
+	dev_t devt_base;
+	struct device *dev;
+	struct queue_drvdata queue[SEP_MAX_NUM_OF_DESC_Q];
+
+#ifdef SEP_PRINTF
+	int last_ack_cntr;
+	int cur_line_buf_offset;
+#define SEP_PRINTF_LINE_SIZE 100
+	char line_buf[SEP_PRINTF_LINE_SIZE + 1];
+#endif
+
+#ifdef SEP_INTERRUPT_BY_TIMER
+	struct timer_list delegate;
+#endif
+};
+
+/* Enumerate the session operational state */
+enum user_op_state {
+	USER_OP_NOP = 0,	/* No operation is in processing */
+	USER_OP_PENDING,	/* Operation waiting to enter the desc. queue */
+	USER_OP_INPROC,		/* Operation is in process       */
+	USER_OP_COMPLETED	/* Operation completed, waiting for "read" */
+};
+
+/* Enumerate the data operation types */
+enum crypto_data_intent {
+	CRYPTO_DATA_NULL = 0,
+	CRYPTO_DATA_TEXT,	/* plain/cipher text */
+	CRYPTO_DATA_TEXT_FINALIZE,
+	CRYPTO_DATA_ADATA,	/* Additional/Associated data for AEAD */
+	CRYPTO_DATA_MAX = CRYPTO_DATA_ADATA,
+};
+
+/* SeP Applet session data */
+struct sep_app_session {
+	struct mutex session_lock;	/* Protect updates in entry */
+	/* Reference count on session (initialized to 1 on opening) */
+	u16 ref_cnt;
+	u16 sep_session_id;
+};
+
+/**
+ * struct registered_memref - Management information for registered memory
+ * @buf_lock:	Mutex on buffer state changes (ref. count, etc.)
+ * @ref_cnt:	Reference count for protecting freeing while in use.
+ * @dma_obj:	The client DMA object container for the registered mem.
+ */
+struct registered_memref {
+	struct mutex buf_lock;
+	unsigned int ref_cnt;
+	struct client_dma_buffer dma_obj;
+};
+
+struct async_ctx_info {
+	struct dxdi_sepapp_params *dxdi_params;
+	struct dxdi_sepapp_kparams *dxdi_kparams;
+	struct sepapp_client_params *sw_desc_params;
+	struct client_dma_buffer *local_dma_objs[SEPAPP_MAX_PARAMS];
+	struct mlli_tables_list mlli_tables[SEPAPP_MAX_PARAMS];
+	int session_id;
+};
+
+/*
+ * struct sep_client_ctx - SeP client application context allocated per each
+ *                         open()
+ * @drv_data:	Associated queue driver context
+ * @qid:	Priority queue ID
+ * @uid_cntr:	Persistent unique ID counter to be used for crypto context UIDs
+ *		allocation
+ * @user_memrefs:	Registered user DMA memory buffers
+ * @sepapp_sessions:	SeP Applet client sessions
+ */
+struct sep_client_ctx {
+	struct queue_drvdata *drv_data;
+	unsigned int qid;
+	atomic_t uid_cntr;
+	struct registered_memref reg_memrefs[MAX_REG_MEMREF_PER_CLIENT_CTX];
+	struct sep_app_session
+	    sepapp_sessions[MAX_SEPAPP_SESSION_PER_CLIENT_CTX];
+
+	wait_queue_head_t memref_wq;
+	int memref_cnt;
+	struct mutex memref_lock;
+};
+
+/**
+ * sep_op_type - Flags to describe dispatched type of sep_op_ctx
+ * The flags may be combined when applicable (primarily for CRYPTO_OP desc.).
+ * Because operations maybe asynchronous, we cannot set operation type in the
+ * crypto context.
+ */
+enum sep_op_type {
+	SEP_OP_NULL = 0,
+	SEP_OP_CRYPTO_INIT = 1,	/* CRYPTO_OP::Init. */
+	SEP_OP_CRYPTO_PROC = (1 << 1),	/* CRYPTO_OP::Process */
+	SEP_OP_CRYPTO_FINI = (1 << 2),	/* CRYPTO_OP::Finalize (integrated) */
+	SEP_OP_RPC = (1 << 3),	/* RPC_MSG */
+	SEP_OP_APP = (1 << 4),	/* APP_REQ */
+	SEP_OP_SLEEP = (1 << 7)	/* SLEEP_REQ */
+};
+
+/*
+ * struct sep_op_ctx - A SeP operation context.
+ * @client_ctx:	The client context associated with this operation
+ * @session_ctx:	For SEP_SW_DESC_TYPE_APP_REQ we need the session context
+ *			(otherwise NULL)
+ * @op_state:	Operation progress state indicator
+ * @ioctl_op_compl:	Operation completion signaling object for IOCTLs
+ *			(updated by desc_mgr on completion)
+ * @comp_work:	Async. completion work. NULL for IOCTLs.
+ * @op_ret_code: The operation return code from SeP (valid on desc. completion)
+ * @internal_error:	Mark that return code (error) is from the SeP FW
+ *			infrastructure and not from the requested operation.
+ *			Currently, this is used only for Applet Manager errors.
+ * @ctx_info:	Current Context. If there are more than one context (such as
+ *		in combined alg.) use (&ctx_info)[] into _ctx_info[].
+ * @_ctx_info:	Extension of ctx_info for additional contexts associated with
+ *		current operation (combined op.)
+ * @ctx_info_num:	number of active ctx_info in (&ctx_info)[]
+ * @pending_descs_cntr:	Pending SW descriptor associated with this operation.
+ *			(Number of descriptor completions required to complete
+ *			this operation)
+ * @backlog_descs_cntr:	Descriptors of this operation enqueued in the backlog q.
+ * @ift:	Input data MLLI table object
+ * @oft:	Output data MLLI table object
+ * @ift_dma_obj:	Temporary user memory registrations for IFT
+ * @oft_dma_obj:	Temporary user memory registrations for OFT
+ * @spad_buf_p:	Scratchpad DMA buffer for different temp. buffers required
+ *		during a specific operation: (allocated from rpc_msg_pool)
+ *		- SeP RPC message buffer         or
+ *		- AES-CCM A0 scratchpad buffers  or
+ *		- Next IV for AES-CBC, AES-CTR, DES-CBC
+ * @spad_buf_dma_addr:	DMA address of spad_buf_p
+ * @next_hash_blk_tail_size:	The tail of data_in which is a remainder of a
+ *				block size. We use this info to copy the
+ *				remainder data to the context after block
+ *				processing completion (saved from
+ *				prepare_data_for_sep).
+ *
+ * Retains the operation status and associated resources while operation in
+ * progress. This object provides threads concurrency support since each thread
+ * may work on different instance of this object, within the scope of the same
+ * client (process) context.
+*/
+struct sep_op_ctx {
+	struct sep_client_ctx *client_ctx;
+	struct sep_app_session *session_ctx;
+	enum sep_op_type op_type;
+	enum user_op_state op_state;
+	struct completion ioctl_op_compl;
+	struct work_struct *comp_work;
+	u32 error_info;
+	bool internal_error;
+	struct client_crypto_ctx_info ctx_info;
+	struct client_crypto_ctx_info _ctx_info[DXDI_COMBINED_NODES_MAX - 1];
+	u8 ctx_info_num;
+	u8 pending_descs_cntr;
+	u8 backlog_descs_cntr;
+	/* Client memory resources for (sym.) crypto-ops */
+	struct mlli_tables_list ift;
+	struct mlli_tables_list oft;
+	struct client_dma_buffer din_dma_obj;
+	struct client_dma_buffer dout_dma_obj;
+	void *spad_buf_p;
+	dma_addr_t spad_buf_dma_addr;
+
+	struct async_ctx_info async_info;
+};
+
+/***************************/
+/* SeP registers access    */
+/***************************/
+/* "Raw" read version to be used in IS_CC_BUS_OPEN and in READ_REGISTER */
+#define _READ_REGISTER(_addr) __raw_readl(        \
+	(const volatile void __iomem *)(_addr))
+/* The device register space is considered accessible when expected
+   device signature value is read from HOST_CC_SIGNATURE register   */
+#define IS_CC_BUS_OPEN(drvdata)                                           \
+	(_READ_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,                  \
+		CRY_KERNEL, HOST_CC_SIGNATURE)) == DX_DEV_SIGNATURE)
+/*FIXME: Temporary w/a to HW problem with registers reading - double-read */
+#define READ_REGISTER(_addr) \
+	({(void)_READ_REGISTER(_addr); _READ_REGISTER(_addr); })
+#define WRITE_REGISTER(_addr, _data)  __raw_writel(_data, \
+	(volatile void __iomem *)(_addr))
+#define GET_SEP_STATE(drvdata)                                           \
+	(IS_CC_BUS_OPEN(drvdata) ?                                       \
+		READ_REGISTER(drvdata->cc_base + SEP_STATE_GPR_OFFSET) : \
+		DX_SEP_STATE_OFF)
+
+#ifdef DEBUG
+void dump_byte_array(const char *name, const u8 *the_array,
+		     unsigned long size);
+void dump_word_array(const char *name, const u32 *the_array,
+		     unsigned long size_in_words);
+#else
+#define dump_byte_array(name, the_array, size) do {} while (0)
+#define dump_word_array(name, the_array, size_in_words) do {} while (0)
+#endif
+
+
+/**
+ * alloc_crypto_ctx_id() - Allocate unique ID for crypto context
+ * @client_ctx:	 The client context object
+ *
+ */
+static inline struct crypto_ctx_uid alloc_crypto_ctx_id(
+	struct sep_client_ctx *client_ctx)
+{
+	struct crypto_ctx_uid uid;
+
+	/* Assuming 32 bit atomic counter is large enough to never wrap
+	 * during a lifetime of a process...
+	 * Someone would laugh (or cry) on this one day */
+#ifdef DEBUG
+	if (atomic_read(&client_ctx->uid_cntr) == 0xFFFFFFFF) {
+		pr_err("uid_cntr overflow for client_ctx=%p\n",
+			    client_ctx);
+		BUG();
+	}
+#endif
+
+	uid.addr = (uintptr_t)client_ctx;
+	uid.cntr = (u32)atomic_inc_return(&client_ctx->uid_cntr);
+
+	return uid;
+}
+
+/**
+ * op_ctx_init() - Initialize an operation context
+ * @op_ctx:	 The allocated struct sep_op_ctx (may be on caller's stack)
+ * @client_ctx:	 The "parent" client context
+ *
+ */
+static inline void op_ctx_init(struct sep_op_ctx *op_ctx,
+			       struct sep_client_ctx *client_ctx)
+{
+	int i;
+	struct client_crypto_ctx_info *ctx_info_p = &(op_ctx->ctx_info);
+
+	pr_debug("op_ctx=%p\n", op_ctx);
+	memset(op_ctx, 0, sizeof(struct sep_op_ctx));
+	op_ctx->client_ctx = client_ctx;
+	op_ctx->ctx_info_num = 1;	/*assume a signle context operation */
+	op_ctx->pending_descs_cntr = 1;	/* assume a single desc. transcation */
+	init_completion(&(op_ctx->ioctl_op_compl));
+	pr_debug("after init_completion\n");
+	MLLI_TABLES_LIST_INIT(&(op_ctx->ift));
+	MLLI_TABLES_LIST_INIT(&(op_ctx->oft));
+	pr_debug("MLLI_TABLES_LIST_INIT\n");
+	for (i = 0; i < DXDI_COMBINED_NODES_MAX; i++, ctx_info_p++)
+		USER_CTX_INFO_INIT(ctx_info_p);
+	pr_debug("USER_CTX_INFO_INIT(ctx_info_p)\n");
+}
+
+/**
+ * op_ctx_fini() - Finalize op_ctx (free associated resources before freeing
+ *		memory)
+ * @op_ctx:	The op_ctx initialized with op_ctx_init
+ *
+ * Returns void
+ */
+static inline void op_ctx_fini(struct sep_op_ctx *op_ctx)
+{
+	pr_debug("op_ctx=%p\n", op_ctx);
+	if (op_ctx->spad_buf_p != NULL)
+		dma_pool_free(op_ctx->client_ctx->drv_data->sep_data->
+			      spad_buf_pool, op_ctx->spad_buf_p,
+			      op_ctx->spad_buf_dma_addr);
+
+	delete_context((uintptr_t)op_ctx);
+	memset(op_ctx, 0, sizeof(struct sep_op_ctx));
+}
+
+/**
+ * init_client_ctx() - Initialize a client context object given
+ * @drvdata:	Queue driver context
+ * @client_ctx:
+ *
+ * Returns void
+ */
+void init_client_ctx(struct queue_drvdata *drvdata,
+		     struct sep_client_ctx *client_ctx);
+
+void cleanup_client_ctx(struct queue_drvdata *drvdata,
+			struct sep_client_ctx *client_ctx);
+
+/**
+ * register_client_memref() - Register given client memory buffer reference
+ * @client_ctx:		User context data
+ * @user_buf_ptr:	Buffer address in user space. NULL if sgl!=NULL.
+ * @sgl:		Scatter/gather list (for kernel buffers)
+ *			NULL if user_buf_ptr!=NULL.
+ * @buf_size:		Buffer size in bytes
+ * @dma_direction:	DMA direction
+ *
+ * Returns int >= is the registered memory reference ID, <0 for error
+ */
+int register_client_memref(struct sep_client_ctx *client_ctx,
+			   u8 __user *user_buf_ptr,
+			   struct scatterlist *sgl,
+			   const unsigned long buf_size,
+			   const enum dma_data_direction dma_direction);
+
+/**
+ * free_client_memref() - Free resources associated with a client mem. reference
+ * @client_ctx:	 User context data
+ * @memref_idx:	 Index of the user memory reference
+ *
+ * Free resources associated with a user memory reference
+ * (The referenced memory may be locked user pages or allocated DMA-coherent
+ *  memory mmap'ed to the user space)
+ * Returns int !0 on failure (memref still in use or unknown)
+ */
+int free_client_memref(struct sep_client_ctx *client_ctx,
+		       int memref_idx);
+
+/**
+ * acquire_dma_obj() - Get the memref object of given memref_idx and increment
+ *			reference count of it
+ * @client_ctx:	Associated client context
+ * @memref_idx:	Required registered memory reference ID (index)
+ *
+ * Get the memref object of given memref_idx and increment reference count of it
+ * The returned object must be released by invoking release_dma_obj() before
+ * the object (memref) may be freed.
+ * Returns struct user_dma_buffer The memref object or NULL for invalid
+ */
+struct client_dma_buffer *acquire_dma_obj(struct sep_client_ctx *client_ctx,
+					  int memref_idx);
+
+/**
+ * release_dma_obj() - Release memref object taken with get_memref_obj
+ *			(Does not free!)
+ * @client_ctx:	Associated client context
+ * @dma_obj:	The DMA object returned from acquire_dma_obj()
+ *
+ * Returns void
+ */
+void release_dma_obj(struct sep_client_ctx *client_ctx,
+		     struct client_dma_buffer *dma_obj);
+
+/**
+ * dxdi_data_dir_to_dma_data_dir() - Convert from DxDI DMA direction type to
+ *					Linux kernel DMA direction type
+ * @dxdi_dir:	 DMA direction in DxDI encoding
+ *
+ * Returns enum dma_data_direction
+ */
+enum dma_data_direction dxdi_data_dir_to_dma_data_dir(enum dxdi_data_direction
+						      dxdi_dir);
+
+/**
+ * wait_for_sep_op_result() - Wait for outstanding SeP operation to complete and
+ *				fetch SeP ret-code
+ * @op_ctx:
+ *
+ * Wait for outstanding SeP operation to complete and fetch SeP ret-code
+ * into op_ctx->sep_ret_code
+ * Returns int
+ */
+int wait_for_sep_op_result(struct sep_op_ctx *op_ctx);
+
+/**
+ * crypto_op_completion_cleanup() - Cleanup CRYPTO_OP descriptor operation
+ *					resources after completion
+ * @op_ctx:
+ *
+ * Returns int
+ */
+int crypto_op_completion_cleanup(struct sep_op_ctx *op_ctx);
+
+
+/*!
+ * IOCTL entry point
+ *
+ * \param filp
+ * \param cmd
+ * \param arg
+ *
+ * \return int
+ * \retval 0 Operation succeeded (but SeP return code may indicate an error)
+ * \retval -ENOTTY  : Unknown IOCTL command
+ * \retval -ENOSYS  : Unsupported/not-implemented (known) operation
+ * \retval -EINVAL  : Invalid parameters
+ * \retval -EFAULT  : Bad pointers for given user memory space
+ * \retval -EPERM   : Not enough permissions for given command
+ * \retval -ENOMEM,-EAGAIN: when not enough resources available for given op.
+ * \retval -EIO     : SeP HW error or another internal error
+ *                    (probably operation timed out or unexpected behavior)
+ */
+long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#endif				/* _DX_DRIVER_H_ */
diff --git a/drivers/staging/sep54/dx_driver_abi.h b/drivers/staging/sep54/dx_driver_abi.h
new file mode 100644
index 0000000..3996375
--- /dev/null
+++ b/drivers/staging/sep54/dx_driver_abi.h
@@ -0,0 +1,656 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __SEP_DRIVER_ABI_H__
+#define __SEP_DRIVER_ABI_H__
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#ifndef INT32_MAX
+#define INT32_MAX 0x7FFFFFFFL
+#endif
+#else
+/* For inclusion in user space library */
+#include <stdint.h>
+#endif
+
+#include <linux/ioctl.h>
+#include <linux/errno.h>
+#include "sep_rpc.h"
+
+/* Proprietary error code for unexpected internal errors */
+#define EBUG 999
+
+/****************************/
+/**** IOCTL return codes ****/
+/*****************************************************************************
+ ENOTTY  : Unknown IOCTL command					     *
+ ENOSYS  : Unsupported/not-implemented (known) operation		     *
+ EINVAL  : Invalid parameters                                                *
+ EFAULT  : Bad pointers for given user memory space                          *
+ EPERM   : Not enough permissions for given command                          *
+ ENOMEM,EAGAIN: when not enough resources available for given op.            *
+ EIO     : SeP HW error or another internal error (probably operation timed  *
+	   out or unexpected behavior)                                       *
+ EBUG    : Driver bug found ("assertion") - see system log                   *
+*****************************************************************************/
+
+/****** IOCTL commands ********/
+/* The magic number appears free in Documentation/ioctl/ioctl-number.txt */
+#define DXDI_IOC_MAGIC 0xD1
+
+/* IOCTL ordinal numbers */
+/*(for backward compatibility, add new ones only at end of list!) */
+enum dxdi_ioc_nr {
+	/* Version info. commands */
+	DXDI_IOC_NR_GET_VER_MAJOR = 0,
+	DXDI_IOC_NR_GET_VER_MINOR = 1,
+	/* Context size queries */
+	DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE = 2,
+	DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE = 3,
+	DXDI_IOC_NR_GET_MAC_CTX_SIZE = 4,
+	DXDI_IOC_NR_GET_HASH_CTX_SIZE = 5,
+	/* Init context commands */
+	DXDI_IOC_NR_SYMCIPHER_INIT = 7,
+	DXDI_IOC_NR_AUTH_ENC_INIT = 8,
+	DXDI_IOC_NR_MAC_INIT = 9,
+	DXDI_IOC_NR_HASH_INIT = 10,
+	/* Processing commands */
+	DXDI_IOC_NR_PROC_DBLK = 12,
+	DXDI_IOC_NR_FIN_PROC = 13,
+	/* "Integrated" processing operations */
+	DXDI_IOC_NR_SYMCIPHER_PROC = 14,
+	DXDI_IOC_NR_AUTH_ENC_PROC = 15,
+	DXDI_IOC_NR_MAC_PROC = 16,
+	DXDI_IOC_NR_HASH_PROC = 17,
+	/* SeP RPC */
+	DXDI_IOC_NR_SEP_RPC = 19,
+	/* Memory registration */
+	DXDI_IOC_NR_REGISTER_MEM4DMA = 20,
+	DXDI_IOC_NR_ALLOC_MEM4DMA = 21,
+	DXDI_IOC_NR_FREE_MEM4DMA = 22,
+	/* SeP Applets API */
+	DXDI_IOC_NR_SEPAPP_SESSION_OPEN = 23,
+	DXDI_IOC_NR_SEPAPP_SESSION_CLOSE = 24,
+	DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE = 25,
+	/* Combined mode */
+	DXDI_IOC_NR_COMBINED_INIT = 26,
+	DXDI_IOC_NR_COMBINED_PROC_DBLK = 27,
+	DXDI_IOC_NR_COMBINED_PROC_FIN = 28,
+	DXDI_IOC_NR_COMBINED_PROC = 29,
+
+	/* AES IV set/get API */
+	DXDI_IOC_NR_SET_IV = 30,
+	DXDI_IOC_NR_GET_IV = 31,
+	DXDI_IOC_NR_MAX = DXDI_IOC_NR_GET_IV
+};
+
+/* In case error is not DXDI_RET_ESEP these are the
+*  errors embedded in the error info field "error_info" */
+enum dxdi_error_info {
+	DXDI_ERROR_NULL = 0,
+	DXDI_ERROR_BAD_CTX = 1,
+	DXDI_ERROR_UNSUP = 2,
+	DXDI_ERROR_INVAL_MODE = 3,
+	DXDI_ERROR_INVAL_DIRECTION = 4,
+	DXDI_ERROR_INVAL_KEY_SIZE = 5,
+	DXDI_ERROR_INVAL_NONCE_SIZE = 6,
+	DXDI_ERROR_INVAL_TAG_SIZE = 7,
+	DXDI_ERROR_INVAL_DIN_PTR = 8,
+	DXDI_ERROR_INVAL_DOUT_PTR = 9,
+	DXDI_ERROR_INVAL_DATA_SIZE = 10,
+	DXDI_ERROR_DIN_DOUT_OVERLAP = 11,
+	DXDI_ERROR_INTERNAL = 12,
+	DXDI_ERROR_NO_RESOURCE = 13,
+	DXDI_ERROR_FATAL = 14,
+	DXDI_ERROR_INFO_RESERVE32B = INT32_MAX
+};
+
+/* ABI Version info. */
+#define DXDI_VER_MAJOR 1
+#define DXDI_VER_MINOR DXDI_IOC_NR_MAX
+
+/******************************/
+/* IOCTL commands definitions */
+/******************************/
+/* Version info. commands */
+#define DXDI_IOC_GET_VER_MAJOR _IOR(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_VER_MAJOR, u32)
+#define DXDI_IOC_GET_VER_MINOR _IOR(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_VER_MINOR, u32)
+/* Context size queries */
+#define DXDI_IOC_GET_SYMCIPHER_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE,\
+		struct dxdi_get_sym_cipher_ctx_size_params)
+#define DXDI_IOC_GET_AUTH_ENC_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE,\
+		struct dxdi_get_auth_enc_ctx_size_params)
+#define DXDI_IOC_GET_MAC_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_MAC_CTX_SIZE,\
+		struct dxdi_get_mac_ctx_size_params)
+#define DXDI_IOC_GET_HASH_CTX_SIZE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_HASH_CTX_SIZE,\
+		struct dxdi_get_hash_ctx_size_params)
+/* Init. Sym. Crypto. */
+#define DXDI_IOC_SYMCIPHER_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SYMCIPHER_INIT, struct dxdi_sym_cipher_init_params)
+#define DXDI_IOC_AUTH_ENC_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_AUTH_ENC_INIT, struct dxdi_auth_enc_init_params)
+#define DXDI_IOC_MAC_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_MAC_INIT, struct dxdi_mac_init_params)
+#define DXDI_IOC_HASH_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_HASH_INIT, struct dxdi_hash_init_params)
+
+/* Sym. Crypto. Processing commands */
+#define DXDI_IOC_PROC_DBLK _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_PROC_DBLK, struct dxdi_process_dblk_params)
+#define DXDI_IOC_FIN_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_FIN_PROC, struct dxdi_fin_process_params)
+
+/* Integrated Sym. Crypto. */
+#define DXDI_IOC_SYMCIPHER_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SYMCIPHER_PROC, struct dxdi_sym_cipher_proc_params)
+#define DXDI_IOC_AUTH_ENC_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_AUTH_ENC_PROC, struct dxdi_auth_enc_proc_params)
+#define DXDI_IOC_MAC_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_MAC_PROC, struct dxdi_mac_proc_params)
+#define DXDI_IOC_HASH_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_HASH_PROC, struct dxdi_hash_proc_params)
+
+/* AES Initial Vector set/get */
+#define DXDI_IOC_SET_IV _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SET_IV, struct dxdi_aes_iv_params)
+#define DXDI_IOC_GET_IV _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_GET_IV, struct dxdi_aes_iv_params)
+
+/* Combined mode  */
+#define DXDI_IOC_COMBINED_INIT _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_INIT,\
+		struct dxdi_combined_init_params)
+#define DXDI_IOC_COMBINED_PROC_DBLK _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_PROC_DBLK,\
+		struct dxdi_combined_proc_dblk_params)
+#define DXDI_IOC_COMBINED_PROC_FIN _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_PROC_FIN,\
+		struct dxdi_combined_proc_params)
+#define DXDI_IOC_COMBINED_PROC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_COMBINED_PROC,\
+		struct dxdi_combined_proc_params)
+
+/* SeP RPC */
+#define DXDI_IOC_SEP_RPC _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEP_RPC, struct dxdi_sep_rpc_params)
+/* Memory registration */
+#define DXDI_IOC_REGISTER_MEM4DMA _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_REGISTER_MEM4DMA, \
+		struct dxdi_register_mem4dma_params)
+#define DXDI_IOC_ALLOC_MEM4DMA _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_ALLOC_MEM4DMA, \
+		struct dxdi_alloc_mem4dma_params)
+#define DXDI_IOC_FREE_MEM4DMA _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_FREE_MEM4DMA, \
+		struct dxdi_free_mem4dma_params)
+/* SeP Applets API */
+#define DXDI_IOC_SEPAPP_SESSION_OPEN _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEPAPP_SESSION_OPEN, \
+		struct dxdi_sepapp_session_open_params)
+#define DXDI_IOC_SEPAPP_SESSION_CLOSE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEPAPP_SESSION_CLOSE, \
+		struct dxdi_sepapp_session_close_params)
+#define DXDI_IOC_SEPAPP_COMMAND_INVOKE _IORW(DXDI_IOC_MAGIC,\
+		DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE, \
+		struct dxdi_sepapp_command_invoke_params)
+
+/*** ABI constants ***/
+/* Max. symmetric crypto key size (512b) */
+#define DXDI_SYM_KEY_SIZE_MAX 64	/*octets */
+/* Max. MAC key size (applicable to HMAC-SHA512) */
+#define DXDI_MAC_KEY_SIZE_MAX 128	/*octets */
+/* AES IV/Counter size (128b) */
+#define DXDI_AES_BLOCK_SIZE 16	/*octets */
+/* DES IV size (64b) */
+#define DXDI_DES_BLOCK_SIZE 8	/*octets */
+/* Max. Nonce size */
+#define DXDI_NONCE_SIZE_MAX 16	/*octets */
+/* Max. digest size */
+#define DXDI_DIGEST_SIZE_MAX 64	/*octets */
+/* Max. nodes */
+#define DXDI_COMBINED_NODES_MAX 4
+#define DXDI_AES_IV_SIZE DXDI_AES_BLOCK_SIZE
+
+/*** ABI data types ***/
+
+enum dxdi_cipher_direction {
+	DXDI_CDIR_ENC = 0,
+	DXDI_CDIR_DEC = 1,
+	DXDI_CDIR_MAX = DXDI_CDIR_DEC,
+	DXDI_CDIR_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_sym_cipher_type {
+	DXDI_SYMCIPHER_NONE = 0,
+	_DXDI_SYMCIPHER_AES_FIRST = 1,
+	DXDI_SYMCIPHER_AES_XXX = _DXDI_SYMCIPHER_AES_FIRST,
+	DXDI_SYMCIPHER_AES_ECB = _DXDI_SYMCIPHER_AES_FIRST + 1,
+	DXDI_SYMCIPHER_AES_CBC = _DXDI_SYMCIPHER_AES_FIRST + 2,
+	DXDI_SYMCIPHER_AES_CTR = _DXDI_SYMCIPHER_AES_FIRST + 3,
+	DXDI_SYMCIPHER_AES_XTS = _DXDI_SYMCIPHER_AES_FIRST + 4,
+	_DXDI_SYMCIPHER_AES_LAST = DXDI_SYMCIPHER_AES_XTS,
+	_DXDI_SYMCIPHER_DES_FIRST = 0x11,
+	DXDI_SYMCIPHER_DES_ECB = _DXDI_SYMCIPHER_DES_FIRST,
+	DXDI_SYMCIPHER_DES_CBC = _DXDI_SYMCIPHER_DES_FIRST + 1,
+	_DXDI_SYMCIPHER_DES_LAST = DXDI_SYMCIPHER_DES_CBC,
+	_DXDI_SYMCIPHER_C2_FIRST = 0x21,
+	DXDI_SYMCIPHER_C2_ECB = _DXDI_SYMCIPHER_C2_FIRST,
+	DXDI_SYMCIPHER_C2_CBC = _DXDI_SYMCIPHER_C2_FIRST + 1,
+	_DXDI_SYMCIPHER_C2_LAST = DXDI_SYMCIPHER_C2_CBC,
+	DXDI_SYMCIPHER_RC4 = 0x31,	/* Supported in message API only */
+	DXDI_SYMCIPHER_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_auth_enc_type {
+	DXDI_AUTHENC_NONE = 0,
+	DXDI_AUTHENC_AES_CCM = 1,
+	DXDI_AUTHENC_AES_GCM = 2,
+	DXDI_AUTHENC_MAX = DXDI_AUTHENC_AES_GCM,
+	DXDI_AUTHENC_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_mac_type {
+	DXDI_MAC_NONE = 0,
+	DXDI_MAC_HMAC = 1,
+	DXDI_MAC_AES_MAC = 2,
+	DXDI_MAC_AES_CMAC = 3,
+	DXDI_MAC_AES_XCBC_MAC = 4,
+	DXDI_MAC_MAX = DXDI_MAC_AES_XCBC_MAC,
+	DXDI_MAC_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_hash_type {
+	DXDI_HASH_NONE = 0,
+	DXDI_HASH_MD5 = 1,
+	DXDI_HASH_SHA1 = 2,
+	DXDI_HASH_SHA224 = 3,
+	DXDI_HASH_SHA256 = 4,
+	DXDI_HASH_SHA384 = 5,
+	DXDI_HASH_SHA512 = 6,
+	DXDI_HASH_MAX = DXDI_HASH_SHA512,
+	DXDI_HASH_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_data_block_type {
+	DXDI_DATA_TYPE_NULL = 0,
+	DXDI_DATA_TYPE_TEXT = 1,/* Plain/cipher text */
+	DXDI_DATA_TYPE_ADATA = 2,/* Additional/Associated data for AEAD */
+	DXDI_DATA_TYPE_MAX = DXDI_DATA_TYPE_ADATA,
+	DXDI_DATA_TYPE_RESERVE32B = INT32_MAX
+};
+
+enum dxdi_input_engine_type {
+	DXDI_INPUT_NULL = 0,	/* no input */
+	DXDI_INPUT_ENGINE_1 = 1,
+	DXID_INPUT_ENGINE_2 = 2,
+	DXDI_INPUT_DIN = 15,	/* input from DIN */
+	DXDI_INPUT_ENGINE_RESERVE32B = INT32_MAX,
+};
+
+#pragma pack(push)
+#pragma pack(4) /* Force to 32 bit alignment */
+/* Properties of specific ciphers */
+/* (for use in alg_specific union of dxdi_cipher_props) */
+struct dxdi_des_cbc_props {
+	u8 iv[DXDI_DES_BLOCK_SIZE];
+};
+struct dxdi_aes_cbc_props {
+	u8 iv[DXDI_AES_BLOCK_SIZE];
+};
+struct dxdi_aes_ctr_props {
+	u8 cntr[DXDI_AES_BLOCK_SIZE];
+};
+struct dxdi_aes_xts_props {
+	u8 init_tweak[DXDI_AES_BLOCK_SIZE];
+	u32 data_unit_size;
+};
+struct dxdi_c2_cbc_props {
+	u32 reset_interval;
+};
+
+struct dxdi_sym_cipher_props {
+	enum dxdi_sym_cipher_type cipher_type;
+	enum dxdi_cipher_direction direction;
+	u8 key_size;	/* In octets */
+	u8 key[DXDI_SYM_KEY_SIZE_MAX];
+	union {			/* cipher specific properties */
+		struct dxdi_des_cbc_props des_cbc;
+		struct dxdi_aes_cbc_props aes_cbc;
+		struct dxdi_aes_ctr_props aes_ctr;
+		struct dxdi_aes_xts_props aes_xts;
+		struct dxdi_c2_cbc_props c2_cbc;
+		u32 __assure_32b_union_alignment;
+		/* Reserve space for future extension? */
+	} alg_specific;
+};
+
+struct dxdi_auth_enc_props {
+	enum dxdi_auth_enc_type ae_type;
+	enum dxdi_cipher_direction direction;
+	u32 adata_size;	/* In octets */
+	u32 text_size;	/* In octets */
+	u8 key_size;	/* In octets */
+	u8 nonce_size;	/* In octets */
+	u8 tag_size;	/* In octets */
+	u8 key[DXDI_SYM_KEY_SIZE_MAX];
+	u8 nonce[DXDI_NONCE_SIZE_MAX];
+};
+
+/* Properties specific for HMAC */
+/* (for use in properties union of dxdi_mac_props) */
+struct dxdi_hmac_props {
+	enum dxdi_hash_type hash_type;
+};
+
+struct dxdi_aes_mac_props {
+	u8 iv[DXDI_AES_BLOCK_SIZE];
+};
+
+struct dxdi_mac_props {
+	enum dxdi_mac_type mac_type;
+	u32 key_size;	/* In octets */
+	u8 key[DXDI_MAC_KEY_SIZE_MAX];
+	union {			/* Union of algorithm specific properties */
+		struct dxdi_hmac_props hmac;
+		struct dxdi_aes_mac_props aes_mac;
+		u32 __assure_32b_union_alignment;
+		/* Reserve space for future extension? */
+	} alg_specific;
+};
+
+/* Combined mode props */
+struct dxdi_combined_node_props {
+	u32 *context;
+	enum dxdi_input_engine_type eng_input;
+};
+
+struct dxdi_combined_props {
+	struct dxdi_combined_node_props node_props[DXDI_COMBINED_NODES_MAX];
+};
+
+/*** IOCTL commands parameters structures ***/
+
+struct dxdi_get_sym_cipher_ctx_size_params {
+	enum dxdi_sym_cipher_type sym_cipher_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+struct dxdi_get_auth_enc_ctx_size_params {
+	enum dxdi_auth_enc_type ae_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+struct dxdi_get_mac_ctx_size_params {
+	enum dxdi_mac_type mac_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+struct dxdi_get_hash_ctx_size_params {
+	enum dxdi_hash_type hash_type;	/*[in] */
+	u32 ctx_size;	/*[out] */
+};
+
+/* Init params */
+struct dxdi_sym_cipher_init_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_sym_cipher_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_auth_enc_init_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_auth_enc_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_mac_init_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_mac_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_hash_init_params {
+	u32 *context_buf;	/*[in] */
+	enum dxdi_hash_type hash_type;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+/* Processing params */
+struct dxdi_process_dblk_params {
+	u32 *context_buf;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[in] */
+	enum dxdi_data_block_type data_block_type;	/*[in] */
+	u32 data_in_size;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_fin_process_params {
+	u32 *context_buf;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 digest_or_mac[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 digest_or_mac_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_sym_cipher_proc_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_sym_cipher_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_auth_enc_proc_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_auth_enc_props props;	/*[in] */
+	u8 *adata;		/*[in] */
+	u8 *text_data;	/*[in] */
+	u8 *data_out;	/*[in] */
+	u8 tag[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_mac_proc_params {
+	u32 *context_buf;	/*[in] */
+	struct dxdi_mac_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 mac[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 mac_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_hash_proc_params {
+	u32 *context_buf;	/*[in] */
+	enum dxdi_hash_type hash_type;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 digest[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 digest_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_aes_iv_params {
+	u32 *context_buf;	/*[in] */
+	u8 iv_ptr[DXDI_AES_IV_SIZE];	/*[in]/[out] */
+};
+
+/* Combined params */
+struct dxdi_combined_init_params {
+	struct dxdi_combined_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_combined_proc_dblk_params {
+	struct dxdi_combined_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[out] */
+	u32 data_in_size;	/*[in] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+/* the structure used in finalize and integrated processing */
+struct dxdi_combined_proc_params {
+	struct dxdi_combined_props props;	/*[in] */
+	u8 *data_in;	/*[in] */
+	u8 *data_out;	/*[out] */
+	u32 data_in_size;	/*[in] (octets) */
+	u8 auth_data[DXDI_DIGEST_SIZE_MAX];	/*[out] */
+	u8 auth_data_size;	/*[out] (octets) */
+	u32 error_info;	/*[out] */
+};
+
+/**************************************/
+/* Memory references and registration */
+/**************************************/
+
+enum dxdi_data_direction {
+	DXDI_DATA_NULL = 0,
+	DXDI_DATA_TO_DEVICE = 1,
+	DXDI_DATA_FROM_DEVICE = (1 << 1),
+	DXDI_DATA_BIDIR = (DXDI_DATA_TO_DEVICE | DXDI_DATA_FROM_DEVICE)
+};
+
+/* Reference to pre-registered memory */
+#define DXDI_MEMREF_ID_NULL -1
+
+struct dxdi_memref {
+	enum dxdi_data_direction dma_direction;
+	/* Memory reference ID - DXDI_MEMREF_ID_NULL if not registered */
+	int ref_id;
+	/* Start address of a non-registered memory or offset within a
+	 * registered memory */
+	u32 start_or_offset;
+	/* Size in bytes of non-registered buffer or size of chunk within a
+	 * registered buffer */
+	u32 size;
+};
+
+struct dxdi_register_mem4dma_params {
+	struct dxdi_memref memref;	/*[in] */
+	int memref_id;	/*[out] */
+};
+
+struct dxdi_alloc_mem4dma_params {
+	u32 size;	/*[in] */
+	int memref_id;	/*[out] */
+};
+
+struct dxdi_free_mem4dma_params {
+	int memref_id;	/*[in] */
+};
+
+/***********/
+/* SeP-RPC */
+/***********/
+struct dxdi_sep_rpc_params {
+	u16 agent_id;	/*[in] */
+	u16 func_id;	/*[in] */
+	struct dxdi_memref mem_refs[SEP_RPC_MAX_MEMREF_PER_FUNC]; /*[in] */
+	u32 rpc_params_size;	/*[in] */
+	struct seprpc_params *rpc_params;	/*[in] */
+	/* rpc_params to be copied into kernel DMA buffer */
+	enum seprpc_retcode error_info;	/*[out] */
+};
+
+/***************/
+/* SeP Applets */
+/***************/
+
+enum dxdi_sepapp_param_type {
+	DXDI_SEPAPP_PARAM_NULL = 0,
+	DXDI_SEPAPP_PARAM_VAL = 1,
+	DXDI_SEPAPP_PARAM_MEMREF = 2,
+	DXDI_SEPAPP_PARAM_RESERVE32B = 0x7FFFFFFF
+};
+
+struct dxdi_val_param {
+	enum dxdi_data_direction copy_dir;	/* Copy direction */
+	u32 data[2];
+};
+
+#define SEP_APP_PARAMS_MAX 4
+
+struct dxdi_sepapp_params {
+	enum dxdi_sepapp_param_type params_types[SEP_APP_PARAMS_MAX];
+	union {
+		struct dxdi_val_param val;
+		struct dxdi_memref memref;
+	} params[SEP_APP_PARAMS_MAX];
+};
+
+/* SeP modules ID for returnOrigin */
+enum dxdi_sep_module {
+	DXDI_SEP_MODULE_NULL = 0,
+	DXDI_SEP_MODULE_HOST_DRIVER = 1,
+	DXDI_SEP_MODULE_SW_QUEUE = 2,	/* SW-queue task: Inc. desc. parsers */
+	DXDI_SEP_MODULE_APP_MGR = 3,	/* Applet Manager */
+	DXDI_SEP_MODULE_APP = 4,	/* Applet */
+	DXDI_SEP_MODULE_RPC_AGENT = 5,	/* Down to RPC parsers */
+	DXDI_SEP_MODULE_SYM_CRYPTO = 6,	/* Symmetric crypto driver */
+	DXDI_SEP_MODULE_RESERVE32B = 0x7FFFFFFF
+};
+
+#define DXDI_SEPAPP_UUID_SIZE 16
+
+#define DXDI_SEPAPP_SESSION_INVALID (-1)
+
+struct dxdi_sepapp_session_open_params {
+	u8 app_uuid[DXDI_SEPAPP_UUID_SIZE];	/*[in] */
+	u32 auth_method;	/*[in] */
+	u32 auth_data[3];	/*[in] */
+	struct dxdi_sepapp_params app_auth_data;	/*[in/out] */
+	int session_id;	/*[out] */
+	enum dxdi_sep_module sep_ret_origin;	/*[out] */
+	u32 error_info;	/*[out] */
+};
+
+struct dxdi_sepapp_session_close_params {
+	int session_id;	/*[in] */
+};
+
+struct dxdi_sepapp_command_invoke_params {
+	int session_id;	/*[in] */
+	u32 command_id;	/*[in] */
+	struct dxdi_sepapp_params command_params;	/*[in/out] */
+	enum dxdi_sep_module sep_ret_origin;	/*[out] */
+	u32 error_info;	/*[out] */
+};
+
+#pragma pack(pop)
+
+#endif /*__SEP_DRIVER_ABI_H__*/
diff --git a/drivers/staging/sep54/dx_env.h b/drivers/staging/sep54/dx_env.h
new file mode 100644
index 0000000..299e3c7
--- /dev/null
+++ b/drivers/staging/sep54/dx_env.h
@@ -0,0 +1,230 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_ENV_H__
+#define __DX_ENV_H__
+
+/*--------------------------------------*/
+/* BLOCK: ENV_REGS                      */
+/*--------------------------------------*/
+#define DX_ENV_CC_GPI_REG_OFFSET     0x18UL
+#define DX_ENV_CC_GPI_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_GPI_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_GPO_REG_OFFSET     0x1cUL
+#define DX_ENV_CC_GPO_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_GPO_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_PKA_DEBUG_MODE_REG_OFFSET     0x24UL
+#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_SCAN_MODE_REG_OFFSET     0x30UL
+#define DX_ENV_SCAN_MODE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_SCAN_MODE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_ALLOW_SCAN_REG_OFFSET     0x34UL
+#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_HOST_CC_EXT_INT_REG_OFFSET     0x38UL
+#define DX_ENV_HOST_CC_EXT_INT_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_HOST_CC_EXT_INT_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_SW_MONITOR_ADDR_REG_OFFSET     0x60UL
+#define DX_ENV_CC_SW_MONITOR_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SW_MONITOR_ADDR_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_HOST_INT_REG_OFFSET     0x0A0UL
+#define DX_ENV_CC_HOST_INT_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_HOST_INT_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_RST_N_REG_OFFSET     0x0A8UL
+#define DX_ENV_CC_RST_N_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_RST_N_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_RST_OVERRIDE_REG_OFFSET     0x0ACUL
+#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_HOST_EXT_ACK_REG_OFFSET     0x0B0UL
+#define DX_ENV_CC_HOST_EXT_ACK_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_HOST_EXT_ACK_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_POR_N_ADDR_REG_OFFSET     0x0E0UL
+#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_WARM_BOOT_REG_OFFSET     0x0E4UL
+#define DX_ENV_CC_WARM_BOOT_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_WARM_BOOT_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_COLD_BOOT_REG_OFFSET     0x0E8UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_FULL_BIT_SHIFT  0x0UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_FULL_BIT_SIZE   0x1UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_SEMI_BIT_SHIFT  0x1UL
+#define DX_ENV_CC_COLD_BOOT_CC_COLD_BOOT_SEMI_BIT_SIZE   0x1UL
+#define DX_ENV_CC_BM_LOWER_BOUND_ADDR_REG_OFFSET     0x0F0UL
+#define DX_ENV_CC_BM_LOWER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_LOWER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_BM_UPPER_BOUND_ADDR_REG_OFFSET     0x0F4UL
+#define DX_ENV_CC_BM_UPPER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_UPPER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_BM_ENB_ADDR_REG_OFFSET     0x0F8UL
+#define DX_ENV_CC_BM_ENB_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_ENB_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_COLD_RST_REG_OFFSET     0x0FCUL
+#define DX_ENV_CC_COLD_RST_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_COLD_RST_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_BM_ERR_ACK_ADDR_REG_OFFSET     0x100UL
+#define DX_ENV_CC_BM_ERR_ACK_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_BM_ERR_ACK_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_BM_CC_ERR_ADDR_REG_OFFSET     0x104UL
+#define DX_ENV_BM_CC_ERR_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_BM_CC_ERR_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_DUMMY_ADDR_REG_OFFSET     0x108UL
+#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CLK_STATUS_REG_OFFSET     0x10CUL
+#define DX_ENV_CLK_STATUS_AES_CLK_STATUS_BIT_SHIFT  0x0UL
+#define DX_ENV_CLK_STATUS_AES_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_DES_CLK_STATUS_BIT_SHIFT  0x1UL
+#define DX_ENV_CLK_STATUS_DES_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_HASH_CLK_STATUS_BIT_SHIFT  0x2UL
+#define DX_ENV_CLK_STATUS_HASH_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_PKA_CLK_STATUS_BIT_SHIFT  0x3UL
+#define DX_ENV_CLK_STATUS_PKA_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_RC4_CLK_STATUS_BIT_SHIFT  0x4UL
+#define DX_ENV_CLK_STATUS_RC4_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_AHB_CLK_STATUS_BIT_SHIFT  0x5UL
+#define DX_ENV_CLK_STATUS_AHB_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_RNG_CLK_STATUS_BIT_SHIFT  0x6UL
+#define DX_ENV_CLK_STATUS_RNG_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_C2_CLK_STATUS_BIT_SHIFT  0x7UL
+#define DX_ENV_CLK_STATUS_C2_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_SEP_CLK_STATUS_BIT_SHIFT  0x8UL
+#define DX_ENV_CLK_STATUS_SEP_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_CLK_STATUS_COMM_CLK_STATUS_BIT_SHIFT  0x9UL
+#define DX_ENV_CLK_STATUS_COMM_CLK_STATUS_BIT_SIZE   0x1UL
+#define DX_ENV_COUNTER_CLR_REG_OFFSET     0x118UL
+#define DX_ENV_COUNTER_CLR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_COUNTER_CLR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_COUNTER_RD_REG_OFFSET     0x11CUL
+#define DX_ENV_COUNTER_RD_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_COUNTER_RD_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_SECOND_BM_LOWER_BOUND_ADDR_REG_OFFSET     0x120UL
+#define DX_ENV_CC_SECOND_BM_LOWER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_LOWER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_SECOND_BM_UPPER_BOUND_ADDR_REG_OFFSET     0x124UL
+#define DX_ENV_CC_SECOND_BM_UPPER_BOUND_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_UPPER_BOUND_ADDR_VALUE_BIT_SIZE    0x14UL
+#define DX_ENV_CC_SECOND_BM_ENB_ADDR_REG_OFFSET     0x128UL
+#define DX_ENV_CC_SECOND_BM_ENB_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_ENB_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_SECOND_BM_ERR_ACK_ADDR_REG_OFFSET     0x12CUL
+#define DX_ENV_CC_SECOND_BM_ERR_ACK_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_SECOND_BM_ERR_ACK_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_SECOND_BM_CC_ERR_ADDR_REG_OFFSET     0x130UL
+#define DX_ENV_SECOND_BM_CC_ERR_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_SECOND_BM_CC_ERR_ADDR_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_RNG_DEBUG_ENABLE_REG_OFFSET     0x430UL
+#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_WARM_BOOT_FINISHED_REG_OFFSET     0x434UL
+#define DX_ENV_CC_WARM_BOOT_FINISHED_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_WARM_BOOT_FINISHED_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CC_LCS_REG_OFFSET     0x43CUL
+#define DX_ENV_CC_LCS_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_LCS_VALUE_BIT_SIZE    0x8UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_REG_OFFSET     0x440UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SHIFT  0x0UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SIZE   0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SHIFT  0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SIZE   0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SHIFT  0x2UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SIZE   0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SHIFT  0x3UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SIZE   0x1UL
+#define DX_ENV_DCU_EN_REG_OFFSET     0x444UL
+#define DX_ENV_DCU_EN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_DCU_EN_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CC_LCS_IS_VALID_REG_OFFSET     0x448UL
+#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_CRYPTOKEY_0_REG_OFFSET     0x450UL
+#define DX_ENV_CRYPTOKEY_0_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_0_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_1_REG_OFFSET     0x454UL
+#define DX_ENV_CRYPTOKEY_1_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_1_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_2_REG_OFFSET     0x458UL
+#define DX_ENV_CRYPTOKEY_2_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_2_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_3_REG_OFFSET     0x45CUL
+#define DX_ENV_CRYPTOKEY_3_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_3_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_4_REG_OFFSET     0x460UL
+#define DX_ENV_CRYPTOKEY_4_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_4_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_5_REG_OFFSET     0x464UL
+#define DX_ENV_CRYPTOKEY_5_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_5_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_6_REG_OFFSET     0x468UL
+#define DX_ENV_CRYPTOKEY_6_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_6_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_CRYPTOKEY_7_REG_OFFSET     0x46CUL
+#define DX_ENV_CRYPTOKEY_7_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_CRYPTOKEY_7_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_POWER_DOWN_REG_OFFSET     0x478UL
+#define DX_ENV_POWER_DOWN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_POWER_DOWN_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_POWER_DOWN_EN_REG_OFFSET     0x47CUL
+#define DX_ENV_POWER_DOWN_EN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_POWER_DOWN_EN_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_OTF_SECURE_BOOT_DONE_REG_OFFSET     0x480UL
+#define DX_ENV_OTF_SECURE_BOOT_DONE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_OTF_SECURE_BOOT_DONE_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_DCU_H_EN_REG_OFFSET     0x484UL
+#define DX_ENV_DCU_H_EN_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_DCU_H_EN_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_VERSION_REG_OFFSET     0x488UL
+#define DX_ENV_VERSION_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_VERSION_VALUE_BIT_SIZE    0x20UL
+#define DX_ENV_FUSE_AIB_1K_OFFSET_REG_OFFSET     0x48CUL
+#define DX_ENV_FUSE_AIB_1K_OFFSET_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_FUSE_AIB_1K_OFFSET_VALUE_BIT_SIZE    0x2UL
+/* --------------------------------------*/
+/* BLOCK: ENV_CC_MEMORIES                */
+/* --------------------------------------*/
+#define DX_ENV_FUSE_READY_REG_OFFSET     0x414UL
+#define DX_ENV_FUSE_READY_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_FUSE_READY_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_ROM_BANK_REG_OFFSET     0x420UL
+#define DX_ENV_ROM_BANK_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_ROM_BANK_VALUE_BIT_SIZE    0x2UL
+#define DX_ENV_PERF_RAM_MASTER_REG_OFFSET     0x500UL
+#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SIZE    0x1UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_REG_OFFSET     0x504UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SIZE    0x2UL
+#define DX_ENV_FUSES_RAM_REG_OFFSET     0x800UL
+#define DX_ENV_FUSES_RAM_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_FUSES_RAM_VALUE_BIT_SIZE    0x20UL
+/* --------------------------------------*/
+/* BLOCK: ENV_PERF_RAM_BASE              */
+/* --------------------------------------*/
+#define DX_ENV_PERF_RAM_BASE_REG_OFFSET     0x0UL
+#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SHIFT   0x0UL
+#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SIZE    0x20UL
+
+#endif /*__DX_ENV_H__*/
diff --git a/drivers/staging/sep54/dx_host.h b/drivers/staging/sep54/dx_host.h
new file mode 100644
index 0000000..0ae53b2
--- /dev/null
+++ b/drivers/staging/sep54/dx_host.h
@@ -0,0 +1,398 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_HOST_H__
+#define __DX_HOST_H__
+/* -------------------------------------- */
+/* BLOCK: HOST                            */
+/* -------------------------------------- */
+#define DX_HOST_IRR_REG_OFFSET     0xA00UL
+#define DX_HOST_IRR_SEP_WATCHDOG_BIT_SHIFT  0x0UL
+#define DX_HOST_IRR_SEP_WATCHDOG_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DSCRPTR_DONE_LOW_INT_BIT_SHIFT  0x2UL
+#define DX_HOST_IRR_DSCRPTR_DONE_LOW_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_HOST_SRAM_VIO_BIT_SHIFT  0x3UL
+#define DX_HOST_IRR_HOST_SRAM_VIO_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SRAM_TO_DIN_INT_BIT_SHIFT  0x4UL
+#define DX_HOST_IRR_SRAM_TO_DIN_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DOUT_TO_SRAM_INT_BIT_SHIFT  0x5UL
+#define DX_HOST_IRR_DOUT_TO_SRAM_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_MEM_TO_DIN_INT_BIT_SHIFT  0x6UL
+#define DX_HOST_IRR_MEM_TO_DIN_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DOUT_TO_MEM_INT_BIT_SHIFT  0x7UL
+#define DX_HOST_IRR_DOUT_TO_MEM_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT  0x8UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_PKA_EXP_INT_BIT_SHIFT  0x9UL
+#define DX_HOST_IRR_PKA_EXP_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_RNG_INT_BIT_SHIFT  0xAUL
+#define DX_HOST_IRR_RNG_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR0_INT_BIT_SHIFT  0xBUL
+#define DX_HOST_IRR_SEP_HOST_GPR0_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR1_INT_BIT_SHIFT  0xCUL
+#define DX_HOST_IRR_SEP_HOST_GPR1_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR2_INT_BIT_SHIFT  0xDUL
+#define DX_HOST_IRR_SEP_HOST_GPR2_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR3_INT_BIT_SHIFT  0xEUL
+#define DX_HOST_IRR_SEP_HOST_GPR3_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR4_INT_BIT_SHIFT  0xFUL
+#define DX_HOST_IRR_SEP_HOST_GPR4_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR5_INT_BIT_SHIFT  0x10UL
+#define DX_HOST_IRR_SEP_HOST_GPR5_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR6_INT_BIT_SHIFT  0x11UL
+#define DX_HOST_IRR_SEP_HOST_GPR6_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_SEP_HOST_GPR7_INT_BIT_SHIFT  0x12UL
+#define DX_HOST_IRR_SEP_HOST_GPR7_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT  0x13UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_REG_OFFSET     0xA04UL
+#define DX_HOST_IMR_SEP_WATCHDOG_MASK_BIT_SHIFT  0x0UL
+#define DX_HOST_IMR_SEP_WATCHDOG_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT  0x1UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT  0x2UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_HOST_SRAM_VIO_MASK_BIT_SHIFT  0x3UL
+#define DX_HOST_IMR_HOST_SRAM_VIO_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SRAM_TO_DIN_MASK_BIT_SHIFT  0x4UL
+#define DX_HOST_IMR_SRAM_TO_DIN_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DOUT_TO_SRAM_MASK_BIT_SHIFT  0x5UL
+#define DX_HOST_IMR_DOUT_TO_SRAM_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_MEM_TO_DIN_MASK_BIT_SHIFT  0x6UL
+#define DX_HOST_IMR_MEM_TO_DIN_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DOUT_TO_MEM_MASK_BIT_SHIFT  0x7UL
+#define DX_HOST_IMR_DOUT_TO_MEM_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT  0x8UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_PKA_EXP_MASK_BIT_SHIFT  0x9UL
+#define DX_HOST_IMR_PKA_EXP_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_RNG_INT_MASK_BIT_SHIFT  0xAUL
+#define DX_HOST_IMR_RNG_INT_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR0_MASK_BIT_SHIFT  0xBUL
+#define DX_HOST_IMR_SEP_HOST_GPR0_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR1_MASK_BIT_SHIFT  0xCUL
+#define DX_HOST_IMR_SEP_HOST_GPR1_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR2_MASK_BIT_SHIFT  0xDUL
+#define DX_HOST_IMR_SEP_HOST_GPR2_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR3_MASK_BIT_SHIFT  0xEUL
+#define DX_HOST_IMR_SEP_HOST_GPR3_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR4_MASK_BIT_SHIFT  0xFUL
+#define DX_HOST_IMR_SEP_HOST_GPR4_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR5_MASK_BIT_SHIFT  0x10UL
+#define DX_HOST_IMR_SEP_HOST_GPR5_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR6_MASK_BIT_SHIFT  0x11UL
+#define DX_HOST_IMR_SEP_HOST_GPR6_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_SEP_HOST_GPR7_MASK_BIT_SHIFT  0x12UL
+#define DX_HOST_IMR_SEP_HOST_GPR7_MASK_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT  0x13UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK1_BIT_SHIFT  0x14UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK1_BIT_SIZE   0x1UL
+#define DX_HOST_IMR_CNTX_SWITCH_CNTR_EXPIRED_BIT_SHIFT  0x15UL
+#define DX_HOST_IMR_CNTX_SWITCH_CNTR_EXPIRED_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_REG_OFFSET     0xA08UL
+#define DX_HOST_ICR_SEP_WATCHDOG_CLEAR_BIT_SHIFT  0x0UL
+#define DX_HOST_ICR_SEP_WATCHDOG_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT  0x2UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_HOST_SRAM_VIO_CLEAR_BIT_SHIFT  0x3UL
+#define DX_HOST_ICR_HOST_SRAM_VIO_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SRAM_TO_DIN_CLEAR_BIT_SHIFT  0x4UL
+#define DX_HOST_ICR_SRAM_TO_DIN_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DOUT_TO_SRAM_CLEAR_BIT_SHIFT  0x5UL
+#define DX_HOST_ICR_DOUT_TO_SRAM_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_MEM_TO_DIN_CLEAR_BIT_SHIFT  0x6UL
+#define DX_HOST_ICR_MEM_TO_DIN_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DOUT_TO_MEM_CLEAR_BIT_SHIFT  0x7UL
+#define DX_HOST_ICR_DOUT_TO_MEM_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT  0x8UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_PKA_EXP_CLEAR_BIT_SHIFT  0x9UL
+#define DX_HOST_ICR_PKA_EXP_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_RNG_INT_CLEAR_BIT_SHIFT  0xAUL
+#define DX_HOST_ICR_RNG_INT_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR0_CLEAR_BIT_SHIFT  0xBUL
+#define DX_HOST_ICR_SEP_HOST_GPR0_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR1_CLEAR_BIT_SHIFT  0xCUL
+#define DX_HOST_ICR_SEP_HOST_GPR1_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR2_CLEAR_BIT_SHIFT  0xDUL
+#define DX_HOST_ICR_SEP_HOST_GPR2_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR3_CLEAR_BIT_SHIFT  0xEUL
+#define DX_HOST_ICR_SEP_HOST_GPR3_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR4_CLEAR_BIT_SHIFT  0xFUL
+#define DX_HOST_ICR_SEP_HOST_GPR4_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR5_CLEAR_BIT_SHIFT  0x10UL
+#define DX_HOST_ICR_SEP_HOST_GPR5_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR6_CLEAR_BIT_SHIFT  0x11UL
+#define DX_HOST_ICR_SEP_HOST_GPR6_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_SEP_HOST_GPR7_CLEAR_BIT_SHIFT  0x12UL
+#define DX_HOST_ICR_SEP_HOST_GPR7_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT  0x13UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE   0x1UL
+#define DX_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET     0xA10UL
+#define DX_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE    0x10UL
+#define DX_HOST_SEP_BUSY_REG_OFFSET     0xA14UL
+#define DX_HOST_SEP_BUSY_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_BUSY_VALUE_BIT_SIZE    0x1UL
+#define DX_HOST_SEP_SW_MONITOR_REG_OFFSET     0xA20UL
+#define DX_HOST_SEP_SW_MONITOR_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_SW_MONITOR_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_CC_SW_RST_REG_OFFSET     0xA40UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_REQ_BIT_SHIFT  0x0UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_REQ_BIT_SIZE   0x1UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_FORCE_BIT_SHIFT  0x1UL
+#define DX_HOST_CC_SW_RST_CC_SW_RST_FORCE_BIT_SIZE   0x1UL
+#define DX_HOST_CC_SW_RST_AXIS_SYSREQ_BIT_SHIFT  0x2UL
+#define DX_HOST_CC_SW_RST_AXIS_SYSREQ_BIT_SIZE   0x1UL
+#define DX_HOST_CC_SW_RST_AXIM_SYSREQ_BIT_SHIFT  0x3UL
+#define DX_HOST_CC_SW_RST_AXIM_SYSREQ_BIT_SIZE   0x1UL
+#define DX_HOST_SEP_HOST_GPR0_REG_OFFSET     0xA80UL
+#define DX_HOST_SEP_HOST_GPR0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR1_REG_OFFSET     0xA88UL
+#define DX_HOST_SEP_HOST_GPR1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR2_REG_OFFSET     0xA90UL
+#define DX_HOST_SEP_HOST_GPR2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR3_REG_OFFSET     0xA98UL
+#define DX_HOST_SEP_HOST_GPR3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR4_REG_OFFSET     0xAA0UL
+#define DX_HOST_SEP_HOST_GPR4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR5_REG_OFFSET     0xAA8UL
+#define DX_HOST_SEP_HOST_GPR5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR6_REG_OFFSET     0xAB0UL
+#define DX_HOST_SEP_HOST_GPR6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_SEP_HOST_GPR7_REG_OFFSET     0xAB8UL
+#define DX_HOST_SEP_HOST_GPR7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_SEP_HOST_GPR7_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR0_REG_OFFSET     0xA84UL
+#define DX_HOST_HOST_SEP_GPR0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR1_REG_OFFSET     0xA8CUL
+#define DX_HOST_HOST_SEP_GPR1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR2_REG_OFFSET     0xA94UL
+#define DX_HOST_HOST_SEP_GPR2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR3_REG_OFFSET     0xA9CUL
+#define DX_HOST_HOST_SEP_GPR3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR4_REG_OFFSET     0xAA4UL
+#define DX_HOST_HOST_SEP_GPR4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR5_REG_OFFSET     0xAACUL
+#define DX_HOST_HOST_SEP_GPR5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR6_REG_OFFSET     0xAB4UL
+#define DX_HOST_HOST_SEP_GPR6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_SEP_GPR7_REG_OFFSET     0xABCUL
+#define DX_HOST_HOST_SEP_GPR7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_HOST_SEP_GPR7_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_HOST_ENDIAN_REG_OFFSET     0xAD0UL
+#define DX_HOST_HOST_ENDIAN_DIN_ICACHE_END_BIT_SHIFT  0x0UL
+#define DX_HOST_HOST_ENDIAN_DIN_ICACHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DCAHE_END_BIT_SHIFT  0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DCAHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DD_END_BIT_SHIFT  0x2UL
+#define DX_HOST_HOST_ENDIAN_DIN_DD_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DIN_DMA_END_BIT_SHIFT  0x3UL
+#define DX_HOST_HOST_ENDIAN_DIN_DMA_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_ICACHE_END_BIT_SHIFT  0x4UL
+#define DX_HOST_HOST_ENDIAN_DOUT_ICACHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DCACHE_END_BIT_SHIFT  0x5UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DCACHE_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DD_END_BIT_SHIFT  0x6UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DD_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DMA_END_BIT_SHIFT  0x7UL
+#define DX_HOST_HOST_ENDIAN_DOUT_DMA_END_BIT_SIZE   0x1UL
+#define DX_HOST_HOST_ENDIAN_INTENAL_WORD_END_BIT_SHIFT  0x8UL
+#define DX_HOST_HOST_ENDIAN_INTENAL_WORD_END_BIT_SIZE   0x8UL
+#define DX_SRAM_DATA_REG_OFFSET     0xF00UL
+#define DX_SRAM_DATA_VALUE_BIT_SHIFT   0x0UL
+#define DX_SRAM_DATA_VALUE_BIT_SIZE    0x20UL
+#define DX_SRAM_ADDR_REG_OFFSET     0xF04UL
+#define DX_SRAM_ADDR_VALUE_BIT_SHIFT   0x0UL
+#define DX_SRAM_ADDR_VALUE_BIT_SIZE    0xFUL
+#define DX_SRAM_DATA_READY_REG_OFFSET     0xF08UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT   0x0UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SIZE    0x1UL
+#define DX_HOST_RKEK1_0_REG_OFFSET     0xA00UL
+#define DX_HOST_RKEK1_0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_1_REG_OFFSET     0xA04UL
+#define DX_HOST_RKEK1_1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_2_REG_OFFSET     0xA08UL
+#define DX_HOST_RKEK1_2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_3_REG_OFFSET     0xA0CUL
+#define DX_HOST_RKEK1_3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_4_REG_OFFSET     0xA10UL
+#define DX_HOST_RKEK1_4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_5_REG_OFFSET     0xA14UL
+#define DX_HOST_RKEK1_5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_6_REG_OFFSET     0xA18UL
+#define DX_HOST_RKEK1_6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_7_REG_OFFSET     0xA1CUL
+#define DX_HOST_RKEK1_7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_7_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK1_ECC_REG_OFFSET     0xA20UL
+#define DX_HOST_RKEK1_ECC_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK1_ECC_VALUE_BIT_SIZE    0x20UL
+#define DX_LCS_REG_REG_OFFSET     0xA24UL
+#define DX_LCS_REG_VALUE_BIT_SHIFT   0x0UL
+#define DX_LCS_REG_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_0_REG_OFFSET     0xA2CUL
+#define DX_HOST_RKEK2_0_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_0_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_1_REG_OFFSET     0xA30UL
+#define DX_HOST_RKEK2_1_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_1_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_2_REG_OFFSET     0xA34UL
+#define DX_HOST_RKEK2_2_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_2_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_3_REG_OFFSET     0xA38UL
+#define DX_HOST_RKEK2_3_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_3_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_4_REG_OFFSET     0xA3CUL
+#define DX_HOST_RKEK2_4_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_4_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_5_REG_OFFSET     0xA40UL
+#define DX_HOST_RKEK2_5_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_5_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_6_REG_OFFSET     0xA44UL
+#define DX_HOST_RKEK2_6_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_6_VALUE_BIT_SIZE    0x20UL
+#define DX_HOST_RKEK2_7_REG_OFFSET     0xA48UL
+#define DX_HOST_RKEK2_7_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_RKEK2_7_VALUE_BIT_SIZE    0x20UL
+#define DX_NVM_CC_BOOT_REG_OFFSET     0xAA4UL
+#define DX_NVM_CC_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT  0x1UL
+#define DX_NVM_CC_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE   0x1UL
+#define DX_NVM_CC_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT  0x2UL
+#define DX_NVM_CC_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CNTR0_REG_OFFSET     0xA50UL
+#define DX_PAU_HOST_CNTR0_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR0_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR1_REG_OFFSET     0xA54UL
+#define DX_PAU_HOST_CNTR1_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR1_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR2_REG_OFFSET     0xA58UL
+#define DX_PAU_HOST_CNTR2_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR2_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR3_REG_OFFSET     0xA5CUL
+#define DX_PAU_HOST_CNTR3_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR3_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_CNTR4_REG_OFFSET     0xA60UL
+#define DX_PAU_HOST_CNTR4_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_CNTR4_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_XOR_REG_OFFSET     0xA64UL
+#define DX_PAU_HOST_XOR_VALUE_BIT_SHIFT   0x0UL
+#define DX_PAU_HOST_XOR_VALUE_BIT_SIZE    0x20UL
+#define DX_PAU_HOST_MASK0_REG_OFFSET     0xA68UL
+#define DX_PAU_HOST_MASK0_PAU_HOST_MASK0_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK0_PAU_HOST_MASK0_BIT_SIZE   0xDUL
+#define DX_PAU_HOST_MASK0_UN_USED_BIT_SHIFT  0xDUL
+#define DX_PAU_HOST_MASK0_UN_USED_BIT_SIZE   0x13UL
+#define DX_PAU_HOST_MASK1_REG_OFFSET     0xA6CUL
+#define DX_PAU_HOST_MASK1_PAU_HOST_MASK1_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK1_PAU_HOST_MASK1_BIT_SIZE   0x19UL
+#define DX_PAU_HOST_MASK1_UN_USED_BIT_SHIFT  0x19UL
+#define DX_PAU_HOST_MASK1_UN_USED_BIT_SIZE   0x7UL
+#define DX_PAU_HOST_MASK2_REG_OFFSET     0xA70UL
+#define DX_PAU_HOST_MASK2_PAU_HOST_MASK2_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK2_PAU_HOST_MASK2_BIT_SIZE   0x19UL
+#define DX_PAU_HOST_MASK2_UN_USED_BIT_SHIFT  0x19UL
+#define DX_PAU_HOST_MASK2_UN_USED_BIT_SIZE   0x7UL
+#define DX_PAU_HOST_MASK3_REG_OFFSET     0xA74UL
+#define DX_PAU_HOST_MASK3_PAU_HOST_MASK3_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK3_PAU_HOST_MASK3_BIT_SIZE   0x1EUL
+#define DX_PAU_HOST_MASK3_UN_USED_BIT_SHIFT  0x1EUL
+#define DX_PAU_HOST_MASK3_UN_USED_BIT_SIZE   0x2UL
+#define DX_PAU_HOST_MASK4_REG_OFFSET     0xA78UL
+#define DX_PAU_HOST_MASK4_PAU_HOST_MASK4_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_MASK4_PAU_HOST_MASK4_BIT_SIZE   0x1EUL
+#define DX_PAU_HOST_MASK4_UN_USED_BIT_SHIFT  0x1EUL
+#define DX_PAU_HOST_MASK4_UN_USED_BIT_SIZE   0x2UL
+#define DX_PAU_HOST_CONFIG_REG_OFFSET     0xA7CUL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND0_BIT_SHIFT  0x0UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND0_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND1_BIT_SHIFT  0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND1_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND2_BIT_SHIFT  0x2UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND2_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND3_BIT_SHIFT  0x3UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND3_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND4_BIT_SHIFT  0x4UL
+#define DX_PAU_HOST_CONFIG_WRAPAROUND4_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL0_BIT_SHIFT  0x5UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL0_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL1_BIT_SHIFT  0x6UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL1_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL2_BIT_SHIFT  0x7UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL2_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL3_BIT_SHIFT  0x8UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL3_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL4_BIT_SHIFT  0x9UL
+#define DX_PAU_HOST_CONFIG_OVERFLOW_SIGNAL4_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING_BIT_SHIFT  0xAUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_AND_COUNETER_EVENT_BIT_SHIFT  0xBUL
+#define DX_PAU_HOST_CONFIG_AND_COUNETER_EVENT_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING0_BIT_SHIFT  0xCUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING0_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING1_BIT_SHIFT  0xDUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING1_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING2_BIT_SHIFT  0xEUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING2_BIT_SIZE   0x1UL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING3_BIT_SHIFT  0xFUL
+#define DX_PAU_HOST_CONFIG_ENABLE_COUNTING3_BIT_SIZE   0x1UL
+#define DX_HOST_REGION_MASK_REG_OFFSET     0xAC4UL
+#define DX_HOST_REGION_MASK_HOST_REGION_SECURED_MASK_BIT_SHIFT  0x0UL
+#define DX_HOST_REGION_MASK_HOST_REGION_SECURED_MASK_BIT_SIZE   0x10UL
+#define DX_HOST_REGION_MASK_HOST_REGION_NON_SECURED_MASK_BIT_SHIFT  0x10UL
+#define DX_HOST_REGION_MASK_HOST_REGION_NON_SECURED_MASK_BIT_SIZE   0x10UL
+#define DX_HOST_REGION_GPRS_MASK_REG_OFFSET     0xAC0UL
+#define DX_HOST_REGION_GPRS_MASK_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_REGION_GPRS_MASK_VALUE_BIT_SIZE    0x8UL
+#define DX_HOST_CC_SW_RESET_ALLOWED_REG_OFFSET     0xA48UL
+#define DX_HOST_CC_SW_RESET_ALLOWED_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_CC_SW_RESET_ALLOWED_VALUE_BIT_SIZE    0x1UL
+#define DX_HOST_CC_SIGNATURE_REG_OFFSET     0xAC8UL
+#define DX_HOST_CC_SIGNATURE_VALUE_BIT_SHIFT   0x0UL
+#define DX_HOST_CC_SIGNATURE_VALUE_BIT_SIZE    0x20UL
+
+#endif /*__DX_HOST_H__*/
diff --git a/drivers/staging/sep54/dx_init_cc_abi.h b/drivers/staging/sep54/dx_init_cc_abi.h
new file mode 100644
index 0000000..ba52531
--- /dev/null
+++ b/drivers/staging/sep54/dx_init_cc_abi.h
@@ -0,0 +1,207 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/***************************************************************************
+ *  This file provides the CC-init ABI (SeP-Host binary interface)         *
+ ***************************************************************************/
+
+#ifndef __DX_INIT_CC_ABI_H__
+#define __DX_INIT_CC_ABI_H__
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+/* For SeP code environment */
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+
+#ifndef INT16_MAX
+#define INT16_MAX		(32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX		(2147483647)
+#endif
+
+/***********************************/
+/* SeP to host communication       */
+/***********************************/
+/* GPRs for CC-init state/status from SeP */
+#define DX_SEP_STATE_GPR_IDX               7	/* SeP state */
+#define DX_SEP_STATUS_GPR_IDX              6	/* SeP status */
+/* GPRs used for passing driver init. parameters */
+/* (Valid while in DX_SEP_STATE_DONE_COLD_BOOT) */
+#define DX_SEP_INIT_SEP_PROPS_GPR_IDX      3	/* SEP properties passed to the
+						 * driver (see fields below) */
+#define DX_SEP_INIT_FW_PROPS_GPR_IDX      2	/* FW properties passed to the
+						 * driver (see fields below) */
+#define DX_SEP_INIT_FW_VER_GPR_IDX         1	/* SeP FW images version */
+#define DX_SEP_INIT_ROM_VER_GPR_IDX        0	/* SeP ROM image version */
+
+/* Debugging "stdout" tunnel via GPR5 - see sep_driver_cc54.c for details */
+#define DX_SEP_HOST_PRINTF_GPR_IDX         5
+
+/* Fields in DX_SEP_INIT_FW_PROPS_GPR_IDX */
+/* MLLI table size in bytes */
+#define DX_SEP_INIT_MLLI_TBL_SIZE_BIT_OFFSET	0
+#define DX_SEP_INIT_MLLI_TBL_SIZE_BIT_SIZE	12
+/* Maximum number of work queues supported */
+#define DX_SEP_INIT_NUM_OF_QUEUES_BIT_OFFSET	12
+#define DX_SEP_INIT_NUM_OF_QUEUES_BIT_SIZE	4
+/* Number of availabel context cache entries */
+#define DX_SEP_INIT_CACHE_CTX_SIZE_BIT_OFFSET	16
+#define DX_SEP_INIT_CACHE_CTX_SIZE_BIT_SIZE     8
+
+/* Fields in DX_SEP_INIT_SEP_PROPS_GPR_IDX */
+/* SEP frequency */
+#define DX_SEP_INIT_SEP_FREQUENCY_BIT_OFFSET	0
+#define DX_SEP_INIT_SEP_FREQUENCY_BIT_SIZE	12
+
+/***********************************/
+/* Host to SeP communication       */
+/***********************************/
+/* GPRs for requests from host to SeP */
+#define DX_HOST_REQ_GPR_IDX 7	/* Host-to-SeP requests */
+#define DX_HOST_REQ_PARAM_GPR_IDX 6	/* Host request parameters */
+/* The parameters in GPR6 must be ready before writing the request to GPR7*/
+
+/* MAGIC value of TLV "FIRST" parameter */
+#define DX_FW_INIT_PARAM_FIRST_MAGIC	0x3243F6A8
+
+/* Type-Length word manipulation macros */
+/* Note that all TLV communication assumes little-endian words */
+/* (i.e., responsibility of host driver to convert to LE before passing) */
+#define DX_TL_WORD(type, length)  ((length) << 16 | (type))
+#define DX_TL_GET_TYPE(tl_word)   ((tl_word) & 0xFFFF)
+#define DX_TL_GET_LENGTH(tl_word) ((tl_word) >> 16)
+
+/* Macros for Assembly code */
+#define ASM_DX_SEP_STATE_ILLEGAL_INST   0x100
+#define ASM_DX_SEP_STATE_STACK_OVERFLOW 0x200
+/* SeP states over (SeP-to-host) GPR7*/
+enum dx_sep_state {
+	DX_SEP_STATE_OFF = 0x0,
+	DX_SEP_STATE_FATAL_ERROR = 0x1,
+	DX_SEP_STATE_START_SECURE_BOOT = 0x2,
+	DX_SEP_STATE_PROC_COLD_BOOT = 0x4,
+	DX_SEP_STATE_PROC_WARM_BOOT = 0x8,
+	DX_SEP_STATE_DONE_COLD_BOOT = 0x10,
+	DX_SEP_STATE_DONE_WARM_BOOT = 0x20,
+	/*DX_SEP_STATE_BM_ERR           = 0x40, */
+	/*DX_SEP_STATE_SECOND_BM_ERR    = 0x80, */
+	DX_SEP_STATE_ILLEGAL_INST = ASM_DX_SEP_STATE_ILLEGAL_INST,
+	DX_SEP_STATE_STACK_OVERFLOW = ASM_DX_SEP_STATE_STACK_OVERFLOW,
+	/* Response to DX_HOST_REQ_FW_INIT: */
+	DX_SEP_STATE_RELOAD_DRIVER	= 0x300,
+	DX_SEP_STATE_DONE_FW_INIT	= 0x400,
+	DX_SEP_STATE_PROC_SLEEP_MODE	= 0x800,
+	DX_SEP_STATE_DONE_SLEEP_MODE	= 0x1000,
+	DX_SEP_STATE_FW_ABORT		= 0xBAD0BAD0,
+	DX_SEP_STATE_ROM_ABORT		= 0xBAD1BAD1,
+	DX_SEP_STATE_RESERVE32B		= INT32_MAX
+};
+
+/* Host requests over (host-to-SeP) GPR7 */
+enum dx_host_req {
+	DX_HOST_REQ_RELEASE_CRYPTO_ENGINES        = 0x0,
+	DX_HOST_REQ_ACQUIRE_CRYPTO_ENGINES        = 0x2,
+	DX_HOST_REQ_CC_INIT                       = 0x1,
+	DX_HOST_REQ_FW_INIT                       = 0x4,
+	DX_HOST_REQ_UPDATE_SWQ_ADDR               = 0x6,
+	DX_HOST_REQ_CHANGE_TO_RELOAD_DRIVER_STATE = 0x7,
+	DX_HOST_REQ_SEP_SLEEP                     = 0x8,
+	DX_HOST_REQ_RESERVE32B                    = INT32_MAX
+};
+
+/* Init. TLV parameters from host to SeP */
+/* Some parameters are used by CC-init flow with DX_HOST_REQ_CC_INIT          *
+ * and the others by the host driver initialization with DX_HOST_REQ_FW_INIT  */
+enum dx_fw_init_tlv_params {
+	DX_FW_INIT_PARAM_NULL = 0,
+	/* Common parameters */
+	DX_FW_INIT_PARAM_FIRST = 1,	/* Param.=FIRST_MAGIC */
+	DX_FW_INIT_PARAM_LAST = 2,	/* Param.=checksum 32b */
+
+	/* CC-init. parameters */
+	DX_FW_INIT_PARAM_DISABLE_MODULES = 3,
+	DX_FW_INIT_PARAM_HOST_AXI_CONFIG = 4,
+	DX_FW_INIT_PARAM_HOST_DEF_APPLET_CONFIG = 5,
+	DX_FW_INIT_PARAM_SEP_FREQ = 6,
+
+	/* Host driver (post-cold-boot) parameters */
+	/* Number of descriptor queues: Length = 1 */
+	DX_FW_INIT_PARAM_NUM_OF_DESC_QS = 0x101,
+	/* The following parameters provide an array with value per queue: */
+	/* Length = num. of queues */
+	DX_FW_INIT_PARAM_DESC_QS_ADDR = 0x102,	/* Queue base addr. */
+	DX_FW_INIT_PARAM_DESC_QS_SIZE = 0x103,	/* Queue size(byte) */
+	/* FW context cache partition (num. of entries) per queue */
+	DX_FW_INIT_PARAM_CTX_CACHE_PART = 0x104,
+	/* sep request module parameters ( msg and response buffers and size */
+	DX_FW_INIT_PARAM_SEP_REQUEST_PARAMS = 0x105,
+	DX_FW_INIT_PARAM_RESERVE16B = INT16_MAX
+};
+
+/* FW-init. error code encoding - GPR6 contents in DX_SEP_STATE_DONE_FW_INIT  */
+/* | 0xE | Param. Type | Error code |  */
+/* |--4--|-----16------|----12------|  */
+#define DX_FW_INIT_ERR_CODE_SIZE 12
+#define DX_FW_INIT_ERR_PARAM_TYPE_SHIFT DX_FW_INIT_ERR_CODE_SIZE
+#define DX_FW_INIT_ERR_PARAM_TYPE_SIZE 16
+#define DX_FW_INIT_ERR_TYPE_SHIFT \
+	(DX_FW_INIT_ERR_PARAM_TYPE_SHIFT + DX_FW_INIT_ERR_PARAM_TYPE_SIZE)
+#define DX_FW_INIT_ERR_TYPE_SIZE 4
+#define DX_FW_INIT_ERR_TYPE_VAL 0xE
+
+#define DX_SEP_REQUEST_PARAM_MSG_LEN		3
+
+/* Build error word to put in status GPR6 */
+#define DX_FW_INIT_ERR_WORD(err_code, param_type)                   \
+	((DX_FW_INIT_ERR_TYPE_VAL << DX_FW_INIT_ERR_TYPE_SHIFT) |   \
+	 ((param_type & BITMASK(DX_FW_INIT_ERR_PARAM_TYPE_SIZE)) << \
+	  DX_FW_INIT_ERR_PARAM_TYPE_SHIFT) |                        \
+	 (err_code))
+/* Parse status of DX_SEP_STATE_DONE_FW_INIT*/
+#define DX_FW_INIT_IS_SUCCESS(status_word) ((err_word) == 0)
+#define DX_FW_INIT_GET_ERR_CODE(status_word) \
+	 ((status_word) & BITMASK(DX_FW_INIT_ERR_CODE_SIZE))
+
+/* FW INIT Error codes */
+/* extract from the status word - GPR6 - using DX_FW_INIT_GET_ERR_CODE() */
+enum dx_fw_init_error_code {
+	DX_FW_INIT_ERR_INVALID_TYPE = 0x001,
+	DX_FW_INIT_ERR_INVALID_LENGTH = 0x002,
+	DX_FW_INIT_ERR_INVALID_VALUE = 0x003,
+	DX_FW_INIT_ERR_PARAM_MISSING = 0x004,
+	DX_FW_INIT_ERR_NOT_SUPPORTED = 0x005,
+	DX_FW_INIT_ERR_RNG_FAILURE = 0x00F,
+	DX_FW_INIT_ERR_MALLOC_FAILURE = 0x0FC,
+	DX_FW_INIT_ERR_INIT_FAILURE = 0x0FD,
+	DX_FW_INIT_ERR_TIMEOUT = 0x0FE,
+	DX_FW_INIT_ERR_GENERAL_FAILURE = 0x0FF
+};
+
+#endif /*__DX_INIT_CC_ABI_H__*/
diff --git a/drivers/staging/sep54/dx_init_cc_defs.h b/drivers/staging/sep54/dx_init_cc_defs.h
new file mode 100644
index 0000000..4524c35
--- /dev/null
+++ b/drivers/staging/sep54/dx_init_cc_defs.h
@@ -0,0 +1,163 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __DX_INIT_CC_DEFS__H__
+#define __DX_INIT_CC_DEFS__H__
+
+/** @file dx_init_cc_defs.h
+*  \brief definitions for the CC54 initialization API
+*
+*  \version
+*  \author avis
+*/
+
+/* message token to sep */
+/* CC_INIT definitions */
+#define DX_CC_INIT_HEAD_MSG_TOKEN		0X544B2FBAUL
+
+/*The below enumerator includes the offsets inside the CC_Init message*/
+/*The last enum is the length of the message */
+enum dx_cc_init_msg_offset {
+	DX_CC_INIT_MSG_TOKEN_OFFSET,
+	DX_CC_INIT_MSG_LENGTH_OFFSET,
+	DX_CC_INIT_MSG_OP_CODE_OFFSET,
+	DX_CC_INIT_MSG_FLAGS_OFFSET,
+	DX_CC_INIT_MSG_RESIDENT_IMAGE_OFFSET,
+	DX_CC_INIT_MSG_I_CACHE_IMAGE_OFFSET,
+	DX_CC_INIT_MSG_I_CACHE_DEST_OFFSET,
+	DX_CC_INIT_MSG_I_CACHE_SIZE_OFFSET,
+	DX_CC_INIT_MSG_D_CACHE_ADDR_OFFSET,
+	DX_CC_INIT_MSG_D_CACHE_SIZE_OFFSET,
+	DX_CC_INIT_MSG_CC_INIT_EXT_ADDR_OFFSET,
+	DX_CC_INIT_MSG_USER_CONFIG_OFFSET,
+	DX_CC_INIT_MSG_VRL_ADDR_OFFSET,
+	DX_CC_INIT_MSG_MAGIC_NUM_OFFSET,
+	DX_CC_INIT_MSG_KEY_INDEX_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_0_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_1_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_2_OFFSET,
+	DX_CC_INIT_MSG_KEY_HASH_3_OFFSET,
+	DX_CC_INIT_MSG_CHECK_SUM_OFFSET,
+	DX_CC_INIT_MSG_LENGTH
+};
+
+/* Set this value if key used in the VRL is to be verified against KEY_HASH
+   fields in the CC_INIT message */
+#define DX_CC_INIT_MSG_VRL_KEY_INDEX_INVALID 0xFFFFFFFF
+
+enum dx_cc_init_msg_icache_size {
+	DX_CC_INIT_MSG_ICACHE_SCR_DISABLE_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_256K_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_1M_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_2M_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_4M_SIZE,
+	DX_CC_INIT_MSG_ICACHE_SCR_INVALID_SIZE
+};
+/* Icache sizes enum to log2 -
+ * Map enum of dx_cc_init_msg_icache_size to log2(size)
+ * (-1) for invalid value. */
+#define DX_CC_ICACHE_SIZE_ENUM2LOG { -1, 18, 20, 21, 22, -1 }
+
+#define DX_CC_INIT_D_CACHE_MIN_SIZE_LOG2 17	/* 128KB */
+#define DX_CC_INIT_D_CACHE_MIN_SIZE (1 << DX_CC_INIT_D_CACHE_MIN_SIZE_LOG2)
+#define DX_CC_INIT_D_CACHE_MAX_SIZE_LOG2 27	/* 128MB */
+#define DX_CC_INIT_D_CACHE_MAX_SIZE (1 << DX_CC_INIT_D_CACHE_MAX_SIZE_LOG2)
+
+/* Bit flags for the CC_Init flags word*/
+/* The CC_Init resident address address is valid (it might be passed via VRL) */
+#define DX_CC_INIT_FLAGS_RESIDENT_ADDR_FLAG		0x00000001
+/* The CC_Init I$ address address is valid (it might be passed via VRL) */
+#define DX_CC_INIT_FLAGS_I_CACHE_ADDR_FLAG		0x00000002
+/* The CC_Init D$ address address is valid (First CC_Init does not config. D$)*/
+#define DX_CC_INIT_FLAGS_D_CACHE_EXIST_FLAG		0x00000004
+/* The CC_Init extension address is valid and should be used */
+#define DX_CC_INIT_FLAGS_INIT_EXT_FLAG			0x00000008
+/* The I$ (and applets) should be encrypted */
+#define DX_CC_INIT_FLAGS_CACHE_ENC_FLAG			0x00000010
+/* The I$ (and applets) should be scrambled */
+#define DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG		0x00000020
+/* The I$ (and applets) should be copied to new address (Icache address) */
+#define DX_CC_INIT_FLAGS_CACHE_COPY_FLAG		0x00000040
+/* use the magic number in the CC_Init to verify the  VRL */
+#define DX_CC_INIT_FLAGS_MAGIC_NUMBER_FLAG		0x00000080
+
+#define DX_CC_INIT_FLAGS_CACHE_COPY_MASK_FLAG \
+	(DX_CC_INIT_FLAGS_CACHE_ENC_FLAG | \
+	DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG | \
+	DX_CC_INIT_FLAGS_CACHE_COPY_FLAG)
+
+/*-------------------------------
+  STRUCTURES
+---------------------------------*/
+struct dx_cc_def_applet_msg {
+	u32 cc_flags;
+	u32 icache_image_addr;
+	u32 vrl_addr;
+	u32 magic_num;
+	u32 ver_key_index;
+	u32 hashed_key_val[4];
+};
+
+/**
+ * struct dx_cc_init_msg - used for passing the parameters to the CC_Init API.
+ * The structure is converted in to the CC_Init message
+ * @cc_flags:		Bits flags for different fields in the message
+ * @res_image_addr:	resident image address in the HOST memory
+ * @Icache_image_addr:	I$ image address in the HOST memeory
+ * @Icache_addr:	I$ memory allocation in case the I$ is not placed
+ *			(scramble or encrypted I$)
+ * @Icache_size:	Icache size ( The totoal I$ for Dx image and all
+ *			applets). The size is limited to: 256K,1M,2M and 4M.
+ * @Dcache_addr:	D$ memory allocation in the HOST memory
+ * @Dcache_size:	D$ memory allocation size
+ * @init_ext_addr:	Address of the cc_Init extension message in the HOST
+ * @vrl_addr:		The address of teh VRL in the HOST memory
+ * @magic_num:		Requested VRL magic number
+ * @ver_key_index:	The index of the verification key
+ * @output_buff_addr:	buffer to the HOST memeory, where the secure boot
+ *			process might write the results of the secure boot
+ * @output_buff_size:	size of the out put buffer ( in bytes)
+ * @Hashed_key_val:	the trunked hash value of teh verification key in case
+ *			the OTP keys are not in use
+*/
+
+struct dx_cc_init_msg {
+	u32 cc_flags;
+	u32 res_image_addr;
+	u32 icache_image_addr;
+	u32 icache_addr;
+	enum dx_cc_init_msg_icache_size icache_size;
+	u32 dcache_addr;
+	u32 dcache_size;
+	u32 init_ext_addr;
+	u32 user_config;
+	u32 vrl_addr;
+	u32 magic_num;
+	u32 ver_key_index;
+	u32 hashed_key_val[4];
+};
+
+#endif /*__DX_INIT_CC_DEFS__H__*/
diff --git a/drivers/staging/sep54/dx_reg_base_host.h b/drivers/staging/sep54/dx_reg_base_host.h
new file mode 100644
index 0000000..afe6ea6
--- /dev/null
+++ b/drivers/staging/sep54/dx_reg_base_host.h
@@ -0,0 +1,37 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_REG_BASE_HOST_H__
+#define __DX_REG_BASE_HOST_H__
+
+#define DX_BASE_CC 0x83F00000
+#define DX_BASE_ENV_REGS 0x83F88000
+#define DX_BASE_ENV_CC_MEMORIES 0x83F88000
+#define DX_BASE_ENV_PERF_RAM 0x83F89000
+
+#define DX_BASE_CRY_KERNEL     0x0UL
+#define DX_BASE_ROM     0x83F80000
+
+#endif /*__DX_REG_BASE_HOST_H__*/
diff --git a/drivers/staging/sep54/dx_reg_common.h b/drivers/staging/sep54/dx_reg_common.h
new file mode 100644
index 0000000..dd43153
--- /dev/null
+++ b/drivers/staging/sep54/dx_reg_common.h
@@ -0,0 +1,39 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __DX_REG_COMMON_H__
+#define __DX_REG_COMMON_H__
+
+/* \file dx_reg_common.h
+   This file includes additions to the HW-specific information that is missing
+   from the header files provided by the HW team */
+
+#define DX_ICACHE_SIZE 0x08000000UL
+#define DX_DCACHE_SIZE 0x08000000UL
+#define DX_DEV_SIGNATURE 0xDCC54000UL
+
+#define DX_DD_REGION_MASK_SIZE 25/* Number of bits in direct-access region */
+
+#endif /*__DX_REG_COMMON_H__*/
diff --git a/drivers/staging/sep54/dx_sep_kapi.h b/drivers/staging/sep54/dx_sep_kapi.h
new file mode 100644
index 0000000..cb71f6a
--- /dev/null
+++ b/drivers/staging/sep54/dx_sep_kapi.h
@@ -0,0 +1,39 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel services API of CC driver:
+ * 1. Host-to-SeP Applet request API.
+ * 2. SeP Request agent API.
+ * 3. Power state control (sleep, warm-boot)
+ */
+#ifndef __DX_SEP_KAPI_H__
+#define __DX_SEP_KAPI_H__
+
+#include "dx_sepapp_kapi.h"
+#include "dx_sep_req_kapi.h"
+#include "dx_sep_power_kapi.h"
+
+#endif /*__DX_SEP_KAPI_H__*/
diff --git a/drivers/staging/sep54/dx_sep_power_kapi.h b/drivers/staging/sep54/dx_sep_power_kapi.h
new file mode 100644
index 0000000..43cb59b
--- /dev/null
+++ b/drivers/staging/sep54/dx_sep_power_kapi.h
@@ -0,0 +1,78 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel services API of power state control (sleep, warm-boot)
+ */
+#ifndef __DX_SEP_POWER_KAPI_H__
+#define __DX_SEP_POWER_KAPI_H__
+
+#include <linux/types.h>
+
+/******************************************/
+/* Power state control (sleep, warm-boot) */
+/******************************************/
+
+/**
+ * Power states of SeP
+ */
+enum dx_sep_power_state {
+	DX_SEP_POWER_INVALID = -1,	/* SeP is in unexpected (error) state */
+	DX_SEP_POWER_OFF = 0,	/* SeP is assumed to be off (unreachable) */
+	DX_SEP_POWER_BOOT,	/* SeP is in (warm) boot process */
+	DX_SEP_POWER_IDLE,	/* SeP is running but no request is pending */
+	DX_SEP_POWER_ACTIVE,	/* SeP is running and processing */
+	DX_SEP_POWER_HIBERNATED	/* SeP is in hibernated (sleep) state */
+};
+
+/* Prototype for callback on sep state change */
+typedef void (*dx_sep_state_change_callback_t) (unsigned long cookie);
+
+/**
+ * dx_sep_power_state_set() - Change power state of SeP (CC)
+ *
+ * @req_state:	The requested power state (_HIBERNATED or _ACTIVE)
+ *
+ * Request changing of power state to given state and block until transition
+ * is completed.
+ * Requesting _HIBERNATED is allowed only from _ACTIVE state.
+ * Requesting _ACTIVE is allowed only after CC was powered back on (warm boot).
+ * Return codes:
+ * 0 -	Power state change completed.
+ * -EINVAL -	This request is not allowed in current SeP state or req_state
+ *		value is invalid.
+ * -EBUSY -	State change request ignored because SeP is busy (primarily,
+ *		when requesting hibernation while SeP is processing something).
+ * -ETIME -	Request timed out (primarily, when asking for _ACTIVE)
+ */
+int dx_sep_power_state_set(enum dx_sep_power_state req_state);
+
+/**
+ * dx_sep_power_state_get() - Get the current power state of SeP (CC)
+ * @state_jiffies_p:	The "jiffies" value at which given state was detected.
+ */
+enum dx_sep_power_state dx_sep_power_state_get(unsigned long *state_jiffies_p);
+
+#endif /*__DX_SEP_POWER_KAPI_H__*/
diff --git a/drivers/staging/sep54/dx_sep_req_kapi.h b/drivers/staging/sep54/dx_sep_req_kapi.h
new file mode 100644
index 0000000..b4ff56f
--- /dev/null
+++ b/drivers/staging/sep54/dx_sep_req_kapi.h
@@ -0,0 +1,78 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel services API of SeP Request agent API.
+ */
+#ifndef __DX_SEP_REQ_KAPI_H__
+#define __DX_SEP_REQ_KAPI_H__
+
+#include <linux/types.h>
+
+/*******************************/
+/* SeP-to-Host request agents  */
+/*******************************/
+
+/**
+ * dx_sep_req_register_agent() - Register an agent
+ * @agent_id: The agent ID
+ * @max_buf_size: A pointer to the max buffer size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_register_agent(u8 agent_id, u32 *max_buf_size);
+
+/**
+ * dx_sep_req_unregister_agent() - Unregister an agent
+ * @agent_id: The agent ID
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_unregister_agent(u8 agent_id);
+
+/**
+ * dx_sep_req_wait_for_request() - Wait from an incoming sep request
+ * @agent_id: The agent ID
+ * @sep_req_buf_p: Pointer to the incoming request buffer
+ * @req_buf_size: Pointer to the incoming request size
+ * @timeout: Time to wait for an incoming request in jiffies
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_wait_for_request(u8 agent_id, u8 *sep_req_buf_p,
+				u32 *req_buf_size, u32 timeout);
+
+/**
+ * dx_sep_req_send_response() - Send a response to the sep
+ * @agent_id: The agent ID
+ * @host_resp_buf_p: Pointer to the outgoing response buffer
+ * @resp_buf_size: Pointer to the outgoing response size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_send_response(u8 agent_id, u8 *host_resp_buf_p,
+			     u32 resp_buf_size);
+
+#endif /*__DX_SEP_REQ_KAPI_H__*/
diff --git a/drivers/staging/sep54/dx_sepapp_kapi.h b/drivers/staging/sep54/dx_sepapp_kapi.h
new file mode 100644
index 0000000..c85f6d3
--- /dev/null
+++ b/drivers/staging/sep54/dx_sepapp_kapi.h
@@ -0,0 +1,135 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * Kernel API for Host-to-SeP Applet request API.
+ */
+#ifndef __DX_SEPAPP_KAPI_H__
+#define __DX_SEPAPP_KAPI_H__
+
+#include <linux/types.h>
+#include "dx_driver_abi.h"
+#include "crypto_api.h"
+
+/**
+ * struct dxdi_kmemref - Kernel memory reference
+ * @dma_direction:	Planned DMA direction
+ * @sgl:	Scatter/Gather list of given buffer (NULL if using buf_p)
+ * @nbytes:	Size in bytes of data referenced by "sgl"
+ */
+struct dxdi_kmemref {
+	enum dxdi_data_direction dma_direction;
+	struct scatterlist *sgl;
+	unsigned long nbytes;	/* data size */
+};
+
+/**
+ * dxdi_sepapp_kparams - Kernel parameters description for dx_sepapp_* func.
+ * @params_types:	The type of each paramter in params[] array
+ * @params:		The given parameters description
+ */
+struct dxdi_sepapp_kparams {
+	enum dxdi_sepapp_param_type params_types[SEP_APP_PARAMS_MAX];
+	union {
+		struct dxdi_val_param val;	/* DXDI_SEPAPP_PARAM_VAL */
+		struct dxdi_kmemref kmemref;	/* DXDI_SEPAPP_PARAM_MEMREF */
+	} params[SEP_APP_PARAMS_MAX];
+};
+
+#define DX_SEPAPP_CLIENT_CTX_NULL NULL
+
+/*******************************/
+/* Host-to-SeP Applet requests */
+/*******************************/
+
+/**
+ * dx_sepapp_context_alloc() - Allocate client context for SeP applets ops.
+ * Returns DX_SEPAPP_CLIENT_CTX_NULL on failure.
+ */
+void *dx_sepapp_context_alloc(void);
+
+/**
+ * dx_sepapp_context_free() - Free client context.
+ *
+ * @ctx: Client context to free.
+ */
+void dx_sepapp_context_free(void *ctx);
+
+/**
+ * dx_sepapp_session_open() - Open a session with a SeP applet
+ *
+ * @ctx:		SeP client context
+ * @sepapp_uuid:	Target applet UUID
+ * @auth_method:	Session connection authentication method
+ *			(Currently only 0/Public is supported)
+ * @auth_data:		Pointer to authentication data - Should be NULL
+ * @open_params:	Parameters for session opening
+ * @session_id:		Returned session ID (on success)
+ * @ret_origin:		Return code origin
+ *
+ * If ret_origin is not DXDI_SEP_MODULE_APP (i.e., above the applet), it must
+ * be 0 on success. For DXDI_SEP_MODULE_APP it is an applet-specific return code
+ */
+int dx_sepapp_session_open(void *ctx,
+			   u8 *sepapp_uuid,
+			   u32 auth_method,
+			   void *auth_data,
+			   struct dxdi_sepapp_kparams *open_params,
+			   int *session_id, enum dxdi_sep_module *ret_origin);
+
+/**
+ * dx_sepapp_session_close() - Close a session with an applet
+ *
+ * @ctx:	SeP client context
+ * @session_id: Session ID as returned from dx_sepapp_open_session()
+ *
+ * Return code would be 0 on success
+ */
+int dx_sepapp_session_close(void *ctx, int session_id);
+
+/**
+ * dx_sepapp_command_invoke() - Initiate command in the applet associated with
+ *				given session ID
+ *
+ * @ctx:	SeP client context
+ * @session_id:	The target session ID
+ * @command_id:	The ID of the command to initiate (applet-specific)
+ * @command_params:	The command parameters
+ * @ret_origin:	The origin of the return code
+ */
+int dx_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin);
+
+int async_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin,
+			     struct async_req_ctx *areq_ctx);
+
+#endif /*__DX_SEPAPP_KAPI_H__*/
diff --git a/drivers/staging/sep54/lli_mgr.c b/drivers/staging/sep54/lli_mgr.c
new file mode 100644
index 0000000..414157e
--- /dev/null
+++ b/drivers/staging/sep54/lli_mgr.c
@@ -0,0 +1,2110 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+ /*!
+  * \file lli_mgr.c
+  * \brief LLI logic: Bulding MLLI tables from user virtual memory buffers
+  */
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_LLI_MGR
+
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/pagemap.h>
+#include "sep_ctx.h"
+#include "dx_driver.h"
+#include "sep_log.h"
+#include "lli_mgr.h"
+
+/* Limitation of DLLI buffer size due to size of "SIZE" field */
+/* Set this to 0 in order to disable DLLI support */
+/*#define DLLI_BUF_LIMIT \
+	((1UL << SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_SIZE) - 1)*/
+/* For now limit to the size planned for DLLI_AUX_BUF_LIMIT because we
+   must always have both din and dout DLLI or MLLI. When mixing would be
+   supported we can increase this limit to the one in comment, above */
+#define DLLI_BUF_LIMIT 2048
+
+/* The following size defines up to which size we would optimize for DLLI
+   buffer descriptor even for non-contiguous buffers. If not physically
+   contiguous (or not cache aligned on platforms with no cache coherency) an
+   auxilliary buffer would be allocated and the data copied to/from it. */
+#define DLLI_AUX_BUF_LIMIT 2048
+/* Note: The value of DLLI_AUX_BUF_LIMIT is tuned based on emipirical tests
+   on our system so the memcpy overhead does not "consume" the performance
+   benefit of using DLLI */
+
+#if DLLI_AUX_BUF_LIMIT > DLLI_BUF_LIMIT
+#error DLLI_AUX_BUF_LIMIT is too large. May be at most 64KB-1
+#endif
+
+#if (SEP_SUPPORT_SHA > 256)
+#define MAX_CRYPTO_BLOCK_LOG2 7
+#else
+#define MAX_CRYPTO_BLOCK_LOG2 6
+#endif
+#define MAX_CRYPTO_BLOCK_SIZE (1 << MAX_CRYPTO_BLOCK_LOG2)
+#define MAX_CRYPTO_BLOCK_MASK (MAX_CRYPTO_BLOCK_SIZE - 1)
+
+#define SEP_LLI_ENTRY_BYTE_SIZE (SEP_LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* Index of first LLI which encodes data buffer
+   (after "next VA" and "next DMA") */
+#define FIRST_DATA_LLI_INDEX 2
+
+/* Overhead for tables linked list:
+ * one entry for FW linked list + one for host/kernel linked list
+ * (required due to difference between dma_addr and kernel virt. addr.) +
+ * last entry is reserved to the protected entry with the stop bit */
+#define SEP_MLLI_LINK_TO_NEXT_OVERHEAD 3
+
+/* macro to set/get link-to-next virtual address for next MLLI
+ * This macro relies on availability of an extra LLI entry per MLLI table.
+ * It uses the space of the first entry so the "SeP" table start after it.
+ */
+#define SEP_MLLI_SET_NEXT_VA(cur_mlli_p, next_mlli_p) \
+do { \
+	u32 __phys_ptr_ = virt_to_phys(next_mlli_p) & (DMA_BIT_MASK(32));\
+	SEP_LLI_SET(cur_mlli_p , ADDR, __phys_ptr_);\
+} while (0)
+#define SEP_MLLI_SET_NEXT_VA_NULL(cur_mlli_start) \
+		SEP_MLLI_SET_NEXT_VA(mlli_table_p, 0)
+#define SEP_MLLI_GET_NEXT_VA(cur_mlli_start) \
+	SEP_LLI_GET((cur_mlli_start), ADDR) == 0 ? 0 :  ((u32 *)phys_to_virt(SEP_LLI_GET((cur_mlli_start), ADDR)));\
+
+
+#define CACHE_LINE_MASK (L1_CACHE_BYTES - 1)
+
+/* Number of data bytes to gather at last LLI of for end of Din buffer */
+#define DIN_LAST_LLI_GATHER_SIZE 32
+
+/* Select the client buffer amount to copy into aux. buffers (head/tail) */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#if (DIN_LAST_LLI_GATHER_SIZE > L1_CACHE_BYTES)
+#define EDGE_BUFS_POOL_ITEM_SIZE DIN_LAST_LLI_GATHER_SIZE
+#else
+#define EDGE_BUFS_POOL_ITEM_SIZE L1_CACHE_BYTES
+#endif
+#else	/* Coherent cache - only tail buffer required per CC requirements */
+#define EDGE_BUFS_POOL_ITEM_SIZE DIN_LAST_LLI_GATHER_SIZE
+#endif
+
+/* Similar to for_each_sg but no need for nents - runs until NULL */
+#define for_each_valid_sg(sglist, cur_sge)	\
+	for (cur_sge = (sglist); cur_sge != NULL; cur_sge = sg_next(cur_sge))
+
+/**
+ * struct llimgr_obj - The LLI manager object (exposed as llimgr_h)
+ * @dev:	The associated device context (for DMA operations)
+ * @mlli_cache:	DMA coherent memory pool for the MLLI tables
+ * @edge_bufs_pool:	Pool for auxilliary buffers used instead of user buffer
+ *			start/end. Used for last LLI data mirroring to fulfil
+ *			requirement of 32B on last LLI.
+ *			In case of a non-coherent cache and a data buffer
+ *			which starts/ends unaligned to cache line we should
+ *			allocate external buffer to be used as the first/last
+ *			LLI entry instead of the "tail". This is required to
+ *			avoid cache incoherency due to access by other process
+ *			entities to the cache lines where the data is. This is
+ *			required only for the output buffer where the same cache
+ *			line is accessed by the host processor while SeP should
+ *			DMA output data into it.
+ * @dlli_bufs_pool:	Pool for client buffers up to DLLI_AUX_BUF_LIMIT which
+ *			are not phys. contig. and copied into it in order to be
+ *			physically contiguous, thus suitable for DLLI access.
+ * @max_lli_num:	Maximum LLI entries number in MLLI table.
+ * @max_data_per_mlli:	Maximum bytes of data mapped by each MLLI table.
+ *
+ */
+struct llimgr_obj {
+	struct device *dev;
+	struct dma_pool *mlli_cache;
+	struct dma_pool *edge_bufs_pool;
+	struct dma_pool *dlli_bufs_pool;
+	unsigned int max_lli_num;
+	unsigned long max_data_per_mlli;
+};
+
+/* Iterator state for building the MLLI tables list */
+struct mlli_tables_list_iterator {
+	u32 *prev_mlli_table_p;
+	u32 *cur_mlli_table_p;
+	dma_addr_t cur_mlli_dma_addr;
+	unsigned int next_lli_idx; /* Data LLI (After FIRST_DATA_LLI_INDEX) */
+	unsigned long cur_mlli_accum_data; /* Accumulated in current MLLI */
+};
+
+static void cleanup_mlli_tables_list(struct llimgr_obj *llimgr_p,
+				     struct mlli_tables_list *mlli_tables_ptr,
+				     int is_data_dirty);
+static inline unsigned int get_sgl_nents(struct scatterlist *sgl);
+
+/**
+ * llimgr_create() - Create LLI-manager object
+ * @dev:	 Device context
+ * @mlli_table_size:	 The maximum size of an MLLI table in bytes
+ *
+ * Returns llimgr_h Created object handle or LLIMGR_NULL_HANDLE if failed
+ */
+void *llimgr_create(struct device *dev, unsigned long mlli_table_size)
+{
+	struct llimgr_obj *new_llimgr_p;
+	unsigned int num_of_full_page_llis;
+
+	new_llimgr_p = kmalloc(sizeof(struct llimgr_obj), GFP_KERNEL);
+	if (new_llimgr_p == NULL)
+		return LLIMGR_NULL_HANDLE;
+	new_llimgr_p->dev = dev;
+	/* create dma "coherent" memory pool for MLLI tables */
+	new_llimgr_p->mlli_cache = dma_pool_create("dx_sep_mlli_tables", dev,
+						   mlli_table_size,
+						   L1_CACHE_BYTES, 0);
+	if (new_llimgr_p->mlli_cache == NULL) {
+		pr_err("Failed creating DMA pool for MLLI tables\n");
+		goto create_failed_mlli_pool;
+	}
+
+	/* Create pool for holding buffer "tails" which share cache lines with
+	 * other data buffers */
+	new_llimgr_p->edge_bufs_pool = dma_pool_create("dx_sep_edge_bufs", dev,
+						       EDGE_BUFS_POOL_ITEM_SIZE,
+						       EDGE_BUFS_POOL_ITEM_SIZE,
+						       0);
+	if (new_llimgr_p->edge_bufs_pool == NULL) {
+		pr_err("Failed creating DMA pool for edge buffers\n");
+		goto create_failed_edge_bufs_pool;
+	}
+
+	new_llimgr_p->max_lli_num =
+	    ((mlli_table_size / SEP_LLI_ENTRY_BYTE_SIZE) -
+	     SEP_MLLI_LINK_TO_NEXT_OVERHEAD);
+	num_of_full_page_llis = new_llimgr_p->max_lli_num;
+	num_of_full_page_llis -= 2;/*First and last entries are partial pages */
+	num_of_full_page_llis -= 1;	/* One less for end aux. buffer */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	num_of_full_page_llis -= 1;	/* One less for start aux. buffer */
+#endif
+	/* Always a multiple of PAGE_SIZE - assures that it is also a
+	 * crypto block multiple. */
+	new_llimgr_p->max_data_per_mlli = num_of_full_page_llis * PAGE_SIZE;
+
+#if DLLI_AUX_BUF_LIMIT > 0
+	new_llimgr_p->dlli_bufs_pool = dma_pool_create("dx_sep_dlli_bufs", dev,
+						       DLLI_AUX_BUF_LIMIT,
+						       DLLI_AUX_BUF_LIMIT, 0);
+	if (new_llimgr_p->dlli_bufs_pool == NULL) {
+		pr_err("Failed creating DMA pool for DLLI buffers\n");
+		goto create_failed_dlli_bufs_pool;
+	}
+#endif
+
+	return new_llimgr_p;
+
+ create_failed_dlli_bufs_pool:
+	dma_pool_destroy(new_llimgr_p->edge_bufs_pool);
+ create_failed_edge_bufs_pool:
+	dma_pool_destroy(new_llimgr_p->mlli_cache);
+ create_failed_mlli_pool:
+	kfree(new_llimgr_p);
+	return LLIMGR_NULL_HANDLE;
+}
+
+/**
+ * llimgr_destroy() - Destroy (free resources of) given LLI-manager object
+ * @llimgr:	 LLI-manager object handle
+ *
+ */
+void llimgr_destroy(void *llimgr)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+
+#if DLLI_AUX_BUF_LIMIT > 0
+	dma_pool_destroy(llimgr_p->dlli_bufs_pool);
+#endif
+	dma_pool_destroy(llimgr_p->edge_bufs_pool);
+	dma_pool_destroy(llimgr_p->mlli_cache);
+	kfree(llimgr_p);
+}
+
+/*****************************************/
+/* Auxilliary buffers handling functions */
+/*****************************************/
+
+/**
+ * calc_aux_bufs_size() - Calculate required aux. buffers for given user buffer
+ * @buf_start:	A pointer value at buffer start (used to calculate alignment)
+ * @buf_size:	User buffer size in bytes
+ * @data_direction:	DMA direction
+ * @last_blk_with_prelast:	Last crypto block must be in the same LLI and
+ *				pre-last block.
+ * @crypto_block_size:	The Crypto-block size in bytes
+ * @start_aux_buf_size_p:	Returned required aux. buffer size at start
+ * @end_aux_buf_size_p:	Returned required aux. buffers size at end
+ *
+ * Returns void
+ */
+static void calc_aux_bufs_size(const unsigned long buf_start,
+			       unsigned long buf_size,
+			       enum dma_data_direction data_direction,
+			       unsigned long *start_aux_buf_size_p,
+			       unsigned long *end_aux_buf_size_p)
+{
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	const bool is_dout = ((data_direction == DMA_BIDIRECTIONAL) ||
+			      (data_direction == DMA_FROM_DEVICE));
+#endif				/*CONFIG_NOT_COHERENT_CACHE */
+
+	/* Calculate required aux. buffers: cache line tails + last w/prelast */
+	*start_aux_buf_size_p = 0;
+	*end_aux_buf_size_p = 0;
+
+	if (buf_size == 0)
+		return;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	/* start of buffer unaligned to cache line... */
+	if ((is_dout) && (buf_start & CACHE_LINE_MASK)) {
+		*start_aux_buf_size_p =	/* Remainder to end of cache line */
+		    L1_CACHE_BYTES - (buf_start & CACHE_LINE_MASK);
+		/* But not more than buffer size */
+		if (*start_aux_buf_size_p > buf_size)
+			*start_aux_buf_size_p = buf_size;
+	}
+#endif				/*CONFIG_NOT_COHERENT_CACHE */
+
+	/* last 32 B must always be on last LLI entry */
+	/* Put 32 B or the whole buffer if smaller than 32 B */
+	*end_aux_buf_size_p = buf_size >= DIN_LAST_LLI_GATHER_SIZE ?
+	    DIN_LAST_LLI_GATHER_SIZE : buf_size;
+	if ((*end_aux_buf_size_p + *start_aux_buf_size_p) > buf_size) {
+		/* End aux. buffer covers part of
+		 * start aux. buffer - leave only remainder in
+		 * start aux. buffer. */
+		*start_aux_buf_size_p = buf_size - *end_aux_buf_size_p;
+	}
+#ifdef DEBUG
+	if (((*end_aux_buf_size_p + *start_aux_buf_size_p) > buf_size) ||
+	    (*start_aux_buf_size_p > EDGE_BUFS_POOL_ITEM_SIZE) ||
+	    (*end_aux_buf_size_p > EDGE_BUFS_POOL_ITEM_SIZE)) {
+		pr_err(
+			    "Invalid aux. buffer sizes: buf_size=%lu B, start_aux=%lu B, end_aux=%lu B\n",
+			    buf_size, *start_aux_buf_size_p,
+			    *end_aux_buf_size_p);
+	} else {
+		pr_debug
+		    ("buf_size=%lu B, start_aux=%lu B, end_aux=%lu B\n",
+		     buf_size, *start_aux_buf_size_p, *end_aux_buf_size_p);
+	}
+#endif
+}
+
+#ifdef DEBUG
+static void dump_client_buf_pages(const struct client_dma_buffer
+				  *client_dma_buf_p)
+{
+	int i;
+	struct scatterlist *sgentry;
+
+	if (client_dma_buf_p->user_buf_ptr != NULL) {
+		pr_debug(
+			      "Client DMA buffer %p maps %lu B over %d pages at user_ptr=0x%p (dma_dir=%d):\n",
+			      client_dma_buf_p, client_dma_buf_p->buf_size,
+			      client_dma_buf_p->num_of_pages,
+			      client_dma_buf_p->user_buf_ptr,
+			      client_dma_buf_p->dma_direction);
+	} else {
+		pr_debug("Client DMA buffer %p maps %lu B (dma_dir=%d):\n",
+			      client_dma_buf_p, client_dma_buf_p->buf_size,
+			      client_dma_buf_p->dma_direction);
+	}
+
+	if (client_dma_buf_p->user_pages != NULL) {
+		pr_debug("%d user_pages:\n",
+			      client_dma_buf_p->num_of_pages);
+		for (i = 0; i < client_dma_buf_p->num_of_pages; i++) {
+			pr_debug("%d. phys_addr=0x%08lX\n", i,
+				      page_to_pfn(client_dma_buf_p->
+						  user_pages[i]) << PAGE_SHIFT);
+		}
+	}
+#if 0
+	pr_debug("sg_head:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_head, sgentry) {
+		pr_debug("%d. phys_addr=0x%08llX len=0x%08X\n", i,
+			      sg_phys(sgentry), sgentry->length);
+		i++;
+	}
+#endif
+	pr_debug("sg_main:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_main, sgentry) {
+		pr_debug("%d. dma_addr=0x%08llX len=0x%08X\n", i,
+			(long long unsigned int)sg_dma_address(sgentry),
+			sg_dma_len(sgentry));
+		i++;
+	}
+	pr_debug("sg_tail:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_tail, sgentry) {
+		pr_debug("%d. phys_addr=0x%08llX len=0x%08X\n", i,
+				(long long unsigned int)sg_phys(sgentry),
+				sgentry->length);
+		i++;
+	}
+	pr_debug("sg_save4next:\n");
+	i = 0;
+	for_each_valid_sg(client_dma_buf_p->sg_save4next, sgentry) {
+		pr_debug("%d. phys_addr=0x%08llX len=0x%08X\n", i,
+				(long long unsigned int)sg_phys(sgentry),
+				sgentry->length);
+		i++;
+	}
+
+}
+#endif /*DEBUG*/
+/**
+ * create_sg_list() - Allocate/create S/G list for given page array,
+ * @page_array:	 The source pages array
+ * @offset_in_first_page:	 Offset in bytes in the first page of page_array
+ * @sg_data_size:	 Number of bytes to include in the create S/G list
+ * @new_sg_list_p:	 The allocated S/G list buffer
+ * @next_page_p:	 The next page to map (for incremental list creation)
+ * @next_page_offset_p:	 The offset to start in next page to map
+ *	(The list is allocated by this func. and should be freed by the caller)
+ *
+ * Allocate/create S/G list for given page array,
+ * starting at given offset in the first page spanning across given sg_data_size
+ * Returns int 0 for success
+ */
+static int create_sg_list(struct page **pages_array,
+			  unsigned long offset_in_first_page,
+			  unsigned long sg_data_size,
+			  struct scatterlist **new_sg_list_p,
+			  struct page ***next_page_p,
+			  unsigned long *next_page_offset_p)
+{
+	const unsigned long end_offset =
+	    offset_in_first_page + sg_data_size - 1;
+	const unsigned long num_of_sg_ents = (end_offset >> PAGE_SHIFT) + 1;
+	const unsigned long size_of_first_page = (num_of_sg_ents == 1) ?
+	    sg_data_size : (PAGE_SIZE - offset_in_first_page);
+	const unsigned long size_of_last_page = (end_offset & ~PAGE_MASK) + 1;
+	struct scatterlist *cur_sge;
+	int i;
+
+	if (sg_data_size == 0) {	/* Empty S/G list */
+		*new_sg_list_p = NULL;
+		*next_page_p = pages_array;
+		*next_page_offset_p = offset_in_first_page;
+		return 0;
+	}
+
+	*new_sg_list_p =
+	    kmalloc(sizeof(struct scatterlist) * num_of_sg_ents, GFP_KERNEL);
+	if (unlikely(*new_sg_list_p == NULL)) {
+		pr_err("Failed allocating sglist array for %lu entries\n",
+			    num_of_sg_ents);
+		return -ENOMEM;
+	}
+
+	/* Set default for next table assuming full pages */
+	*next_page_p = pages_array + num_of_sg_ents;
+	*next_page_offset_p = 0;
+
+	sg_init_table(*new_sg_list_p, num_of_sg_ents);
+	cur_sge = *new_sg_list_p;
+	/* First page is partial
+	 * - May start in middle of page
+	 * - May end in middle of page if single page */
+	sg_set_page(cur_sge, pages_array[0],
+		    size_of_first_page, offset_in_first_page);
+	/* Handle following (whole) pages, but last (which may be partial) */
+	for (i = 1; i < (num_of_sg_ents - 1); i++) {
+		cur_sge = sg_next(cur_sge);
+		if (unlikely(cur_sge == NULL)) {
+			pr_err(
+				    "Reached end of sgl before (%d) num_of_sg_ents (%lu)\n",
+				    i, num_of_sg_ents);
+			kfree(*new_sg_list_p);
+			*new_sg_list_p = NULL;
+			return -EINVAL;
+		}
+		sg_set_page(cur_sge, pages_array[i], PAGE_SIZE, 0);
+	}
+	/* Handle last (partial?) page */
+	if (num_of_sg_ents > 1) {
+		/* only if was not handled already as first */
+		cur_sge = sg_next(cur_sge);
+		if (unlikely(cur_sge == NULL)) {
+			pr_err(
+				    "Cannot put last page in given num_of_sg_ents (%lu)\n",
+				    num_of_sg_ents);
+			kfree(*new_sg_list_p);
+			*new_sg_list_p = NULL;
+			return -EINVAL;
+		}
+		sg_set_page(cur_sge,
+			    pages_array[num_of_sg_ents - 1],
+			    size_of_last_page, 0);
+		if (size_of_last_page < PAGE_SIZE) {
+			(*next_page_p)--; /* Last page was not fully consumed */
+			*next_page_offset_p = size_of_last_page;
+		}
+	} else {		/* First was last */
+		if ((offset_in_first_page + size_of_first_page) < PAGE_SIZE) {
+			(*next_page_p)--; /* Page was not fully consumed */
+			*next_page_offset_p =
+			    (offset_in_first_page + size_of_first_page);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * split_sg_list() - Split SG list at given offset.
+ * @sgl_to_split:	The SG list to split
+ * @split_sg_list:	Returned new SG list which starts with second half
+ *			of split entry.
+ * @split_offset:	Size in bytes of entry part to leave on original list.
+ *
+ * Split SG list at given offset.
+ * The entry is shortened at given length and the remainder is added to
+ * a new SG entry that is chained to the original list following.
+ * Returns int 0 on success
+ */
+static int split_sg_list(struct scatterlist *sgl_to_split,
+			 struct scatterlist **split_sg_list,
+			 unsigned long split_offset)
+{
+	struct scatterlist *cur_sge = sgl_to_split;
+
+	/* Scan list until consuming enough for first part to fit in cur_sge */
+	while ((cur_sge != NULL) && (split_offset > cur_sge->length)) {
+		split_offset -= cur_sge->length;
+		cur_sge = sg_next(cur_sge);
+	}
+	/* After the loop above, split_offset is actually the offset within
+	 * cur_sge */
+
+	if (cur_sge == NULL)
+		return -ENOMEM;	/* SG list too short for given first_part_len */
+
+	if (split_offset < cur_sge->length) {
+		/* Split entry */
+		*split_sg_list =
+		    kmalloc(sizeof(struct scatterlist) * 2, GFP_KERNEL);
+		if (*split_sg_list == NULL) {
+			pr_err("Failed allocating SGE for split entry\n");
+			return -ENOMEM;
+		}
+		sg_init_table(*split_sg_list, 2);
+		sg_set_page(*split_sg_list, sg_page(cur_sge),
+			    cur_sge->length - split_offset,
+			    cur_sge->offset + split_offset);
+		/* Link to second SGE */
+		sg_chain(*split_sg_list, 2, sg_next(cur_sge));
+		cur_sge->length = split_offset;
+	} else {		/* Split at entry boundary */
+		*split_sg_list = sg_next(cur_sge);
+		sg_mark_end(cur_sge);
+	}
+
+	return 0;
+}
+
+/**
+ * link_sg_lists() - Link back split S/G list
+ * @first_sgl:	The first chunk s/g list
+ * @second_sgl:	The second chunk s/g list
+ *
+ * Returns Unified lists list head
+ */
+static struct scatterlist *link_sg_lists(struct scatterlist *first_sgl,
+					 struct scatterlist *second_sgl)
+{
+	struct scatterlist *second_sgl_second = NULL;
+	struct scatterlist *first_sgl_last;
+	struct scatterlist *cur_sge;
+
+	if (first_sgl == NULL)
+		return second_sgl;	/* Second list is the "unified" list */
+	if (second_sgl == NULL)
+		return first_sgl;	/* Nothing to link back */
+	/* Seek end of first s/g list */
+	first_sgl_last = NULL;	/* To save last s/g entry */
+	for_each_valid_sg(first_sgl, cur_sge)
+	    first_sgl_last = cur_sge;
+	if ((sg_page(first_sgl_last) == sg_page(second_sgl)) &&
+	    ((first_sgl_last->offset + first_sgl_last->length) ==
+	     second_sgl->offset)) {
+		/* Case of entry split */
+		/* Restore first entry length */
+		first_sgl_last->length += second_sgl->length;
+		/* Save before freeing */
+		second_sgl_second = sg_next(second_sgl);
+		kfree(second_sgl);
+	}
+	/* This entry was allocated by split_sg_list */
+	/* else, list was split on entry boundary */
+	if (second_sgl_second != NULL) {
+		/*
+		 * Restore link to following entries
+		 * Clear chain termination flag to link back to next sge
+		 * Unfortunately there is no direct function to do this
+		 * so we rely on implementation detail (all flags cleared)
+		 */
+		first_sgl_last->page_link =
+		    (unsigned long)sg_page(first_sgl_last);
+	}
+	return first_sgl;
+}
+
+/**
+ * cleanup_client_dma_buf() - Cleanup client_dma_buf resources (S/G lists,
+ *				pages array, aux. bufs)
+ * @client_dma_buf_p:
+
+ *
+ * Returns void
+ */
+static void cleanup_client_dma_buf(struct llimgr_obj *llimgr_p,
+				   struct client_dma_buffer *client_dma_buf_p)
+{
+	struct page *cur_page;
+	int i;
+	const bool is_outbuf =
+	    (client_dma_buf_p->dma_direction == DMA_FROM_DEVICE) ||
+	    (client_dma_buf_p->dma_direction == DMA_BIDIRECTIONAL);
+
+	/* User space buffer */
+	if (client_dma_buf_p->user_buf_ptr != NULL) {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+		if (client_dma_buf_p->sg_head != NULL)
+			kfree(client_dma_buf_p->sg_head);
+#endif
+		if (client_dma_buf_p->sg_main != NULL)
+			kfree(client_dma_buf_p->sg_main);
+		if (client_dma_buf_p->sg_tail != NULL)
+			kfree(client_dma_buf_p->sg_tail);
+		if (client_dma_buf_p->sg_save4next != NULL)
+			kfree(client_dma_buf_p->sg_save4next);
+		/* Unmap pages that were mapped/locked */
+		if (client_dma_buf_p->user_pages != NULL) {
+			for (i = 0; i < client_dma_buf_p->num_of_pages; i++) {
+				cur_page = client_dma_buf_p->user_pages[i];
+				/* Mark dirty for pages written by HW/DMA */
+				if (is_outbuf && !PageReserved(cur_page))
+					SetPageDirty(cur_page);
+				page_cache_release(cur_page);	/* Unlock */
+			}
+			kfree(client_dma_buf_p->user_pages);
+		}
+
+	} else {
+		/* (kernel) given s/g list */
+		/* Fix S/G list back to what was given */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+		if (client_dma_buf_p->sg_head != NULL) {
+			if (client_dma_buf_p->sg_main != NULL) {
+				client_dma_buf_p->sg_main =
+				    link_sg_lists(client_dma_buf_p->sg_head,
+						  client_dma_buf_p->sg_main);
+			} else {
+				client_dma_buf_p->sg_tail =
+				    link_sg_lists(client_dma_buf_p->sg_head,
+						  client_dma_buf_p->sg_tail);
+			}
+			/* Linked to next */
+			client_dma_buf_p->sg_head = NULL;
+		}
+#endif
+		client_dma_buf_p->sg_tail =
+		    link_sg_lists(client_dma_buf_p->sg_main,
+				  client_dma_buf_p->sg_tail);
+		client_dma_buf_p->sg_save4next =
+		    link_sg_lists(client_dma_buf_p->sg_tail,
+				  client_dma_buf_p->sg_save4next);
+	}
+
+	/* Free aux. buffers */
+	if (client_dma_buf_p->buf_end_aux_buf_va != NULL) {
+		if (client_dma_buf_p->buf_end_aux_buf_size <=
+		    EDGE_BUFS_POOL_ITEM_SIZE) {
+			dma_pool_free(llimgr_p->edge_bufs_pool,
+				      client_dma_buf_p->buf_end_aux_buf_va,
+				      client_dma_buf_p->buf_end_aux_buf_dma);
+		} else {	/* From DLLI buffers pool */
+			dma_pool_free(llimgr_p->dlli_bufs_pool,
+				      client_dma_buf_p->buf_end_aux_buf_va,
+				      client_dma_buf_p->buf_end_aux_buf_dma);
+		}
+	}
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (client_dma_buf_p->buf_start_aux_buf_va != NULL)
+		dma_pool_free(llimgr_p->edge_bufs_pool,
+			      client_dma_buf_p->buf_start_aux_buf_va,
+			      client_dma_buf_p->buf_start_aux_buf_dma);
+#endif
+	CLEAN_DMA_BUFFER_INFO(client_dma_buf_p);
+}
+
+/**
+ * is_sgl_phys_contig() - Check if given scatterlist is physically contig.
+ *
+ * @sgl:		Checked scatter/gather list
+ * @data_size_p:	Size of phys. contig. portion of given sgl
+ */
+static bool is_sgl_phys_contig(struct scatterlist *sgl,
+			       unsigned long *data_size_p)
+{
+	struct scatterlist *cur_sge = sgl;
+	struct scatterlist *next_sge;
+
+	*data_size_p = 0;
+	for (cur_sge = sgl; cur_sge != NULL; cur_sge = next_sge) {
+		(*data_size_p) += cur_sge->length;
+		next_sge = sg_next(cur_sge);
+		if ((next_sge != NULL) &&
+		    /* Check proximity of current entry to next entry */
+		    ((page_to_phys(sg_page(cur_sge)) + cur_sge->length) !=
+		     (page_to_phys(sg_page(next_sge)) + next_sge->offset))) {
+			/* End of cur_sge does not reach start of next_sge */
+			return false;
+		}
+	}
+	/* If we passed the loop then data is phys. contig. */
+	return true;
+}
+
+/**
+ * is_pages_phys_contig() - Check if given pages are phys. contig.
+ *
+ * @pages_list:		Array of pages
+ * @num_of_pages:	Number of pages in provided pages_list
+ */
+static bool is_pages_phys_contig(struct page *pages_list[],
+				 unsigned int num_of_pages)
+{
+	int i;
+
+	for (i = 0; i < (num_of_pages - 1); i++) {
+		if ((page_to_phys(pages_list[i]) + PAGE_SIZE) !=
+		    page_to_phys(pages_list[i + 1]))
+			return false;
+	}
+	/* If reached here then all pages are following each other */
+	return true;
+}
+
+/**
+ * user_buf_to_client_dma_buf() - Apply given user_buf_ptr (user space buffer)
+ *				into client_dma_buffer
+ * @llimgr_p:
+ * @client_dma_buf_p:	 Client DMA object
+ *
+ * Apply given user_buf_ptr (user space buffer) into client_dma_buffer
+ * This function should be invoked after caller has set in the given
+ * client_dma_buf_p object the user_buf_ptr, buf_size and dma_direction
+ * Returns int 0 for success
+ */
+static int user_buf_to_client_dma_buf(struct llimgr_obj *llimgr_p,
+				      struct client_dma_buffer
+				      *client_dma_buf_p)
+{
+	u8 __user const *user_buf_ptr = client_dma_buf_p->user_buf_ptr;
+	unsigned long buf_size = client_dma_buf_p->buf_size;
+	const enum dma_data_direction dma_direction =
+	    client_dma_buf_p->dma_direction;
+	unsigned long buf_end = (unsigned long)user_buf_ptr + buf_size - 1;
+	const int num_of_pages =
+	    (buf_end >> PAGE_SHIFT) -
+	    ((unsigned long)user_buf_ptr >> PAGE_SHIFT) + 1;
+	const unsigned long offset_in_first_page =
+	    (unsigned long)user_buf_ptr & ~PAGE_MASK;
+	const bool is_inbuf = (dma_direction == DMA_TO_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_outbuf = (dma_direction == DMA_FROM_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	unsigned long head_buf_size = 0, tail_buf_size = 0, main_buf_size = 0;
+	struct page **cur_page_p;
+	unsigned long cur_page_offset;
+	int rc = 0;
+
+	/* Verify permissions */
+	if (is_inbuf && !access_ok(ACCESS_READ, user_buf_ptr, buf_size)) {
+		pr_err("No read access to data buffer at %p\n",
+			    user_buf_ptr);
+		return -EFAULT;
+	}
+	if (is_outbuf && !access_ok(ACCESS_WRITE, user_buf_ptr, buf_size)) {
+		pr_err("No write access to data buffer at %p\n",
+			    user_buf_ptr);
+		return -EFAULT;
+	}
+	client_dma_buf_p->user_pages =
+	    kmalloc(sizeof(struct page *)*num_of_pages, GFP_KERNEL);
+	if (unlikely(client_dma_buf_p->user_pages == NULL)) {
+		pr_err("Failed allocating user_pages array for %d pages\n",
+			    num_of_pages);
+		return -ENOMEM;
+	}
+	/* Get user pages structure (also increment ref. count... lock) */
+	client_dma_buf_p->num_of_pages = get_user_pages_fast((unsigned long)
+							     user_buf_ptr,
+							     num_of_pages,
+							     is_outbuf,
+							     client_dma_buf_p->
+							     user_pages);
+	if (client_dma_buf_p->num_of_pages != num_of_pages) {
+		pr_warn(
+			     "Failed to lock all user pages (locked %d, requested lock = %d)\n",
+			     client_dma_buf_p->num_of_pages, num_of_pages);
+		rc = -ENOMEM;
+	}
+	/* Leave only currently processed data (remainder in sg_save4next */
+	buf_size -= client_dma_buf_p->save4next_size;
+	buf_end -= client_dma_buf_p->save4next_size;
+	/* Decide on type of mapping: MLLI, DLLI or DLLI after copy */
+	if (buf_size <= DLLI_BUF_LIMIT) {
+		/* Check if possible to map buffer directly as DLLI */
+		if (
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			   /* For systems with incoherent cache the buffer
+			    * must be cache line aligned to be cosidered
+			    * for this case
+			    */
+			   (((unsigned long)user_buf_ptr & CACHE_LINE_MASK) ==
+			    0) &&
+				((buf_end & CACHE_LINE_MASK) ==
+				 CACHE_LINE_MASK) &&
+#endif
+			   is_pages_phys_contig(client_dma_buf_p->user_pages,
+						client_dma_buf_p->
+						num_of_pages)) {
+			pr_debug(
+				      "Mapping user buffer @%p (0x%08lX B) to DLLI directly\n",
+				      client_dma_buf_p->user_buf_ptr, buf_size);
+			main_buf_size = buf_size;/* Leave 0 for tail_buf_size */
+			/* 0 for the tail buffer indicates that we use this
+			 * optimization, because in any other case there must
+			 * be some data in the tail buffer (if buf_size>0) */
+		} else if (buf_size <= DLLI_AUX_BUF_LIMIT) {
+			pr_debug(
+				      "Mapping user buffer @%p (0x%08lX B) to DLLI via aux. buffer\n",
+				      client_dma_buf_p->user_buf_ptr, buf_size);
+			tail_buf_size = buf_size;
+			/* All data goes to "tail" in order to piggy-back over
+			 * the aux. buffers logic for copying data in/out of the
+			 * temp. DLLI DMA buffer */
+		}
+	}
+	if ((main_buf_size + tail_buf_size) == 0) {
+		/* If none of the optimizations was applied... */
+		calc_aux_bufs_size((unsigned long)user_buf_ptr, buf_size,
+				   dma_direction, &head_buf_size,
+				   &tail_buf_size);
+		main_buf_size = buf_size - head_buf_size - tail_buf_size;
+	}
+
+	/* Create S/G list */
+	cur_page_p = client_dma_buf_p->user_pages;
+	cur_page_offset = offset_in_first_page;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (likely(rc == 0)) {
+		/* Create S/G list for head (aux.) buffer */
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    head_buf_size, &client_dma_buf_p->sg_head,
+				    &cur_page_p, &cur_page_offset);
+	}
+#endif
+	/* Create S/G list for buffer "body" - to be used for DMA */
+	if (likely(rc == 0)) {
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    main_buf_size, &client_dma_buf_p->sg_main,
+				    &cur_page_p, &cur_page_offset);
+	}
+	/* Create S/G list for tail (aux.) buffer */
+	if (likely(rc == 0)) {
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    tail_buf_size, &client_dma_buf_p->sg_tail,
+				    &cur_page_p, &cur_page_offset);
+	}
+	/* Create S/G list for save4next buffer */
+	if (likely(rc == 0)) {
+		rc = create_sg_list(cur_page_p, cur_page_offset,
+				    client_dma_buf_p->save4next_size,
+				    &client_dma_buf_p->sg_save4next,
+				    &cur_page_p, &cur_page_offset);
+	}
+
+	if (unlikely(rc != 0)) {
+		cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+	} else {
+		/* Save head/tail sizes */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+		client_dma_buf_p->buf_start_aux_buf_size = head_buf_size;
+#endif
+		client_dma_buf_p->buf_end_aux_buf_size = tail_buf_size;
+	}
+
+	return rc;
+}
+
+/**
+ * client_sgl_to_client_dma_buf() - Create head/main/tail sg lists from given
+ *					sg list
+ * @llimgr_p:
+ * @sgl:
+ * @client_dma_buf_p:
+ *
+ * Returns int
+ */
+static int client_sgl_to_client_dma_buf(struct llimgr_obj *llimgr_p,
+					struct scatterlist *sgl,
+					struct client_dma_buffer
+					*client_dma_buf_p)
+{
+	const unsigned long buf_size = client_dma_buf_p->buf_size -
+	    client_dma_buf_p->save4next_size;
+	const enum dma_data_direction dma_direction =
+	    client_dma_buf_p->dma_direction;
+	unsigned long sgl_phys_contig_size;	/* Phys. contig. part size */
+	unsigned long head_buf_size = 0, tail_buf_size = 0;
+	unsigned long main_buf_size = 0;
+	unsigned long last_sgl_size = 0;
+	struct scatterlist *last_sgl = NULL;	/* sgl to split of save4next */
+	int rc;
+
+	pr_debug("sgl=%p nbytes=%lu save4next=%lu client_dma_buf=%p\n",
+		      sgl, client_dma_buf_p->buf_size,
+		      client_dma_buf_p->save4next_size, client_dma_buf_p);
+
+	if (buf_size == 0) {	/* all goes to save4next (if anything) */
+		client_dma_buf_p->sg_save4next = sgl;
+		return 0;
+	}
+
+	/* Decide on type of mapping: MLLI, DLLI or DLLI after copy */
+	if (buf_size <= DLLI_BUF_LIMIT) {
+		/* Check if possible to map buffer directly as DLLI */
+		if (
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			   /*
+			    * For systems with incoherent cache the
+			    * buffer must be cache line aligned to be
+			    * cosidered for this case
+			    */
+			   ((sgl->offset & CACHE_LINE_MASK) == 0) &&
+			   (((sgl->offset + buf_size) &
+			     CACHE_LINE_MASK) == 0) &&
+#endif
+			   is_sgl_phys_contig(sgl, &sgl_phys_contig_size)) {
+			pr_debug(
+				      "Mapping sgl buffer (0x%08lX B) to DLLI directly\n",
+				      buf_size);
+			main_buf_size = buf_size;
+			/* Leave 0 for tail_buf_size
+			 * 0 for the tail buffer indicates that we use this
+			 * optimization, because in any other case there must
+			 * be some data in the tail buffer (if buf_size>0)
+			 */
+		} else if (buf_size <= DLLI_AUX_BUF_LIMIT) {
+			pr_debug(
+				      "Mapping sgl buffer (0x%08lX B) to DLLI via aux. buffer\n",
+				      buf_size);
+			tail_buf_size = buf_size;
+			/* All data goes to "tail" in order to piggy-back
+			 * over the aux. buffers logic for copying data
+			 * in/out of the temp. DLLI DMA buffer
+			 */
+		}
+	}
+	if ((main_buf_size + tail_buf_size) == 0) {
+		/* If none of the optimizations was applied... */
+		/* Use first SG entry for start alignment */
+		calc_aux_bufs_size((unsigned long)sgl->offset, buf_size,
+				   dma_direction, &head_buf_size,
+				   &tail_buf_size);
+		main_buf_size = buf_size - head_buf_size - tail_buf_size;
+	}
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (head_buf_size > 0) {
+		client_dma_buf_p->sg_head = sgl;
+		rc = split_sg_list(client_dma_buf_p->sg_head,
+				   &client_dma_buf_p->sg_main, head_buf_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Failed splitting sg_head-sg_main\n");
+			cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+			return rc;
+		}
+		last_sgl_size = head_buf_size;
+		last_sgl = client_dma_buf_p->sg_head;
+	} else
+#endif
+		/* Initilize sg_main to given sgl */
+		client_dma_buf_p->sg_main = sgl;
+
+	if (tail_buf_size > 0) {
+		if (main_buf_size > 0) {
+			rc = split_sg_list(client_dma_buf_p->sg_main,
+					   &client_dma_buf_p->sg_tail,
+					   main_buf_size);
+			if (unlikely(rc != 0)) {
+				pr_err("Fail:splitting sg_main-sg_tail\n");
+				cleanup_client_dma_buf(llimgr_p,
+						       client_dma_buf_p);
+				return rc;
+			}
+		} else {	/* All data moved to sg_tail */
+			client_dma_buf_p->sg_tail = client_dma_buf_p->sg_main;
+			client_dma_buf_p->sg_main = NULL;
+		}
+		last_sgl_size = tail_buf_size;
+		last_sgl = client_dma_buf_p->sg_tail;
+	} else if (main_buf_size > 0) {	/* main only */
+		last_sgl_size = main_buf_size;
+		last_sgl = client_dma_buf_p->sg_main;
+	}
+
+	/* Save head/tail sizes */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	client_dma_buf_p->buf_start_aux_buf_size = head_buf_size;
+#endif
+	client_dma_buf_p->buf_end_aux_buf_size = tail_buf_size;
+
+	if (client_dma_buf_p->save4next_size > 0) {
+		if (last_sgl != NULL) {
+			rc = split_sg_list(last_sgl,
+					   &client_dma_buf_p->sg_save4next,
+					   last_sgl_size);
+			if (unlikely(rc != 0)) {
+				pr_err("Failed splitting sg_save4next\n");
+				cleanup_client_dma_buf(llimgr_p,
+						       client_dma_buf_p);
+				return rc;
+			}
+		} else {	/* Whole buffer goes to save4next */
+			client_dma_buf_p->sg_save4next = sgl;
+		}
+	}
+	return 0;
+}
+
+/**
+ * llimgr_register_client_dma_buf() - Register given client buffer for DMA
+ *					operation.
+ * @llimgr:	 The LLI manager object handle
+ * @user_buf_ptr:	 Pointer in user space of the user buffer
+ * @sgl:	Client provided s/g list. user_buf_ptr is assumed NULL if this
+ *		list is given (!NULL).
+ * @buf_size:	 The user buffer size in bytes (incl. save4next). May be 0.
+ * @save4next_size:	Amount from buffer end to save for next op.
+ *			(split into seperate sgl). May be 0.
+ * @dma_direction:	The DMA direction this buffer would be used for
+ * @client_dma_buf_p:	Pointer to the user DMA buffer "object"
+ *
+ * Register given client buffer for DMA operation.
+ * If user_buf_ptr!=NULL and sgl==NULL it locks the user pages and creates
+ * head/main/tail s/g lists. If sgl!=NULL is splits it into head/main/tail
+ * s/g lists.
+ * Returns 0 for success
+ */
+int llimgr_register_client_dma_buf(void *llimgr,
+				   u8 __user *user_buf_ptr,
+				   struct scatterlist *sgl,
+				   const unsigned long buf_size,
+				   const unsigned long save4next_size,
+				   const enum dma_data_direction dma_direction,
+				   struct client_dma_buffer *client_dma_buf_p)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+	int rc, tmp;
+
+	CLEAN_DMA_BUFFER_INFO(client_dma_buf_p);
+
+	if (buf_size == 0) {	/* Handle empty buffer */
+		pr_debug("buf_size == 0\n");
+		return 0;
+	}
+
+	if ((user_buf_ptr == NULL) && (sgl == NULL)) {
+		pr_err("NULL user_buf_ptr/sgl\n");
+		return -EINVAL;
+	}
+	if ((user_buf_ptr != NULL) && (sgl != NULL)) {
+		pr_err("Provided with dual buffer info (both user+sgl)\n");
+		return -EINVAL;
+	}
+
+	/* Init. basic/common attributes */
+	client_dma_buf_p->user_buf_ptr = user_buf_ptr;
+	client_dma_buf_p->buf_size = buf_size;
+	client_dma_buf_p->save4next_size = save4next_size;
+	client_dma_buf_p->dma_direction = dma_direction;
+
+	if (user_buf_ptr != NULL) {
+		rc = user_buf_to_client_dma_buf(llimgr_p, client_dma_buf_p);
+	} else {
+		rc = client_sgl_to_client_dma_buf(llimgr_p, sgl,
+						  client_dma_buf_p);
+	}
+	if (unlikely(rc != 0))
+		return rc;
+	/* Since sg_main may be large and we need its nents for each
+	 * dma_map_sg/dma_unmap_sg operation, we count its nents once and
+	 * save the result.
+	 * (for the other sgl's in the object we can count when accessed) */
+	client_dma_buf_p->sg_main_nents =
+	    get_sgl_nents(client_dma_buf_p->sg_main);
+
+	/* Allocate auxilliary buffers for sg_head/sg_tail copies */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if ((likely(rc == 0)) &&
+	    (client_dma_buf_p->buf_start_aux_buf_size > 0)) {
+		client_dma_buf_p->buf_start_aux_buf_va =
+		    dma_pool_alloc(llimgr_p->edge_bufs_pool, GFP_KERNEL,
+				   &client_dma_buf_p->buf_start_aux_buf_dma);
+		if (unlikely(client_dma_buf_p->buf_start_aux_buf_va == NULL)) {
+			pr_err("Fail alloc from edge_bufs_pool, head\n");
+			rc = -ENOMEM;
+		} else {
+			pr_debug("start_aux: va=%p dma=0x%08llX\n",
+				      client_dma_buf_p->buf_start_aux_buf_va,
+				      client_dma_buf_p->buf_start_aux_buf_dma);
+		}
+	}
+#endif
+	if ((likely(rc == 0)) && (client_dma_buf_p->buf_end_aux_buf_size > 0)) {
+#ifdef DEBUG
+		if (client_dma_buf_p->buf_end_aux_buf_size >
+				DLLI_AUX_BUF_LIMIT) {
+			pr_err("end_aux_buf size too large = 0x%08lX\n",
+				    client_dma_buf_p->buf_end_aux_buf_size);
+			return -EINVAL;
+		}
+#endif
+		if (client_dma_buf_p->buf_end_aux_buf_size <=
+		    EDGE_BUFS_POOL_ITEM_SIZE) {
+			client_dma_buf_p->buf_end_aux_buf_va =
+			    dma_pool_alloc(llimgr_p->edge_bufs_pool, GFP_KERNEL,
+					   &client_dma_buf_p->
+					   buf_end_aux_buf_dma);
+		} else {
+			/* Allocate from the dedicated DLLI buffers pool */
+			client_dma_buf_p->buf_end_aux_buf_va =
+			    dma_pool_alloc(llimgr_p->dlli_bufs_pool, GFP_KERNEL,
+					   &client_dma_buf_p->
+					   buf_end_aux_buf_dma);
+		}
+		if (unlikely(client_dma_buf_p->buf_end_aux_buf_va == NULL)) {
+			pr_err("Fail:allocating from aux. buf for tail\n");
+			rc = -ENOMEM;
+		} else {
+			pr_debug("end_aux: va=%p dma=0x%08llX\n",
+				client_dma_buf_p->buf_end_aux_buf_va,
+				(long long unsigned int)
+				client_dma_buf_p->buf_end_aux_buf_dma);
+		}
+	}
+
+	/* Map the main sglist (head+tail would not be used for DMA) */
+	if (likely(rc == 0) && (client_dma_buf_p->sg_main != NULL)) {
+		tmp = dma_map_sg(llimgr_p->dev, client_dma_buf_p->sg_main,
+				 client_dma_buf_p->sg_main_nents,
+				 dma_direction);
+		if (unlikely(tmp == 0)) {
+			pr_err("dma_map_sg failed\n");
+			rc = -ENOMEM;
+		}
+	}
+
+#ifdef DEBUG
+	if (likely(rc == 0))
+		dump_client_buf_pages(client_dma_buf_p);
+#endif
+
+	if (unlikely(rc != 0)) {	/* Error cases cleanup */
+		cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+	}
+
+	return rc;
+}
+
+/**
+ * llimgr_deregister_client_dma_buf() - Unmap given user DMA buffer
+ * @llimgr:
+
+ * @client_dma_buf_p:	 User DMA buffer object
+ *
+ * Unmap given user DMA buffer (flush and unlock pages)
+ * (this function can handle client_dma_buffer of size 0)
+ */
+void llimgr_deregister_client_dma_buf(void *llimgr,
+				      struct client_dma_buffer
+				      *client_dma_buf_p)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+
+	/* Cleanup DMA mappings */
+	if (client_dma_buf_p->sg_main != NULL) {
+		dma_unmap_sg(llimgr_p->dev, client_dma_buf_p->sg_main,
+			     client_dma_buf_p->sg_main_nents,
+			     client_dma_buf_p->dma_direction);
+	}
+	cleanup_client_dma_buf(llimgr_p, client_dma_buf_p);
+}
+
+/**
+ * get_sgl_nents() - Get (count) the number of entries in given s/g list
+ *	(used in order to be able to invoke sg_copy_to/from_buffer)
+ * @sgl:	Counted s/g list entries
+ */
+static inline unsigned int get_sgl_nents(struct scatterlist *sgl)
+{
+	int cnt = 0;
+	struct scatterlist *cur_sge;
+	for_each_valid_sg(sgl, cur_sge)
+	    cnt++;
+	return cnt;
+}
+
+/**
+ * copy_to_from_aux_buf() - Copy to/from given aux.buffer from/to given s/g list
+ * @to_buf:	 "TRUE" for copying to the given buffer from sgl
+ * @sgl:	 The S/G list data source/target
+ * @to_buf:	 Target/source buffer
+ * @buf_len:	 Buffer length
+ *
+ * Returns int
+ */
+static inline int copy_to_from_aux_buf(bool to_buf,
+				       struct scatterlist *sgl, void *buf_p,
+				       size_t buf_len)
+{
+	size_t copied_cnt;
+	unsigned int nents = get_sgl_nents(sgl);
+
+	if (to_buf)
+		copied_cnt = sg_copy_to_buffer(sgl, nents, buf_p, buf_len);
+	else
+		copied_cnt = sg_copy_from_buffer(sgl, nents, buf_p, buf_len);
+
+	if (copied_cnt < buf_len) {
+		pr_err("Failed copying %s buf of %zu B\n",
+			    to_buf ? "to" : "from", buf_len);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * sync_client_dma_buf() - Sync. pages before DMA to device at given offset in
+ *				the user buffer
+ * @dev:	 Associated device structure
+ * @client_dma_buf_p:	 The user DMA buffer object
+ * @for_device:	 Set to "true" to sync before device DMA op.
+ * @dma_direction:	 DMA direction for sync.
+ *
+ * Returns int 0 for success
+ */
+static int sync_client_dma_buf(struct device *dev,
+			       struct client_dma_buffer *client_dma_buf_p,
+			       const bool for_device,
+			       const enum dma_data_direction dma_direction)
+{
+	const bool is_from_device = ((dma_direction == DMA_BIDIRECTIONAL) ||
+				     (dma_direction == DMA_FROM_DEVICE));
+	const bool is_to_device = ((dma_direction == DMA_BIDIRECTIONAL) ||
+				   (dma_direction == DMA_TO_DEVICE));
+	int rc;
+
+	pr_debug("DMA buf %p (0x%08lX B) for %s\n",
+		      client_dma_buf_p, client_dma_buf_p->buf_size,
+		      for_device ? "device" : "cpu");
+
+	if (for_device) {
+		/* Copy out aux. buffers if required */
+		/* We should copy before dma_sync_sg_for_device */
+		if (is_to_device) {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			if (client_dma_buf_p->sg_head != NULL) {
+				rc = copy_to_from_aux_buf(true,
+						  client_dma_buf_p->sg_head,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+#endif
+			if (client_dma_buf_p->sg_tail != NULL) {
+				rc = copy_to_from_aux_buf(true,
+						  client_dma_buf_p->sg_tail,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+		}
+		if (client_dma_buf_p->sg_main != NULL) {
+			dma_sync_sg_for_device(dev, client_dma_buf_p->sg_main,
+					       client_dma_buf_p->sg_main_nents,
+					       dma_direction);
+		}
+
+	} else {		/* for CPU */
+		if (client_dma_buf_p->sg_main != NULL) {
+			dma_sync_sg_for_cpu(dev, client_dma_buf_p->sg_main,
+					    client_dma_buf_p->sg_main_nents,
+					    dma_direction);
+		}
+		/* Copy back from aux. buffers */
+		/* We should copy after dma_sync_sg_for_cpu */
+		if (is_from_device) {
+#ifdef CONFIG_NOT_COHERENT_CACHE
+			if (client_dma_buf_p->sg_head != NULL) {
+				rc = copy_to_from_aux_buf(false,
+						  client_dma_buf_p->sg_head,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_start_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+#endif
+			if (client_dma_buf_p->sg_tail != NULL) {
+				rc = copy_to_from_aux_buf(false,
+						  client_dma_buf_p->sg_tail,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_va,
+						  client_dma_buf_p->
+						  buf_end_aux_buf_size);
+				if (rc != 0)
+					return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * llimgr_copy_from_client_buf_save4next() - Copy from the sg_save4next chunk
+ *	of the client DMA buffer to given buffer.
+ *	Used to save hash block remainder.
+ *
+ * @client_dma_buf_p:	The client DMA buffer with the save4next chunk
+ * @to_buf:		Target buffer to copy to.
+ * @buf_len:		Given buffer length (to avoid buffer overflow)
+ *
+ * Returns number of bytes copied or -ENOMEM if given buffer is too small
+ */
+int llimgr_copy_from_client_buf_save4next(struct client_dma_buffer
+					  *client_dma_buf_p, u8 *to_buf,
+					  unsigned long buf_len)
+{
+	int copied_cnt;
+	struct scatterlist *sgl = client_dma_buf_p->sg_save4next;
+	unsigned int nents;
+
+	if (buf_len < client_dma_buf_p->save4next_size) {
+		pr_err("Invoked for copying %lu B to a buffer of %lu B\n",
+			    client_dma_buf_p->save4next_size, buf_len);
+		copied_cnt = -ENOMEM;
+	} else {
+		nents = get_sgl_nents(sgl);
+		if (nents > 0)
+			copied_cnt = sg_copy_to_buffer(sgl, nents,
+						       to_buf, buf_len);
+		else		/* empty */
+			copied_cnt = 0;
+	}
+	return copied_cnt;
+}
+
+#ifdef DEBUG
+/**
+ * dump_mlli_table() - Dump given MLLI table
+ * @table_start_p:	 Pointer to allocated table buffer
+ * @dma_addr:	 The table's DMA address as given to SeP
+ * @table_size:	 The table size in bytes
+ *
+ */
+static void dump_mlli_table(u32 *table_start_p, dma_addr_t dma_addr,
+			    unsigned long table_size)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+	int i;
+	u32 *cur_entry_p;
+	unsigned int num_of_entries = table_size / SEP_LLI_ENTRY_BYTE_SIZE;
+
+	pr_debug("MLLI table at %p (dma_addr=0x%08X) with %u ent.:\n",
+		      table_start_p, (unsigned int)dma_addr, num_of_entries);
+
+	for (i = 0, cur_entry_p = table_start_p + SEP_LLI_ENTRY_WORD_SIZE;
+	     i < num_of_entries; cur_entry_p += SEP_LLI_ENTRY_WORD_SIZE, i++) {
+		/* LE to BE... */
+		SEP_LLI_COPY_FROM_SEP(lli_spad, cur_entry_p);
+		/*
+		 * pr_debug("%02d: addr=0x%08lX , size=0x%08lX\n  %s\n", i,
+		 * SEP_LLI_GET(lli_spad, ADDR),
+		 * SEP_LLI_GET(lli_spad, SIZE),
+		 */
+		pr_debug("%02d: [0x%08X,0x%08X] %s\n", i,
+			      lli_spad[0], lli_spad[1],
+			      i == 0 ? "(next table)" : "");
+	}
+}
+
+/**
+ * llimgr_dump_mlli_tables_list() - Dump all the MLLI tables in a given tables
+ *					list
+ * @mlli_tables_list_p:	 Pointer to tables list structure
+ *
+ */
+void llimgr_dump_mlli_tables_list(struct mlli_tables_list *mlli_tables_list_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+	u32 *cur_table_p;
+	u32 *next_table_p;
+	u32 *link_entry_p;
+	unsigned long next_table_size;
+	dma_addr_t next_table_dma;
+	u16 table_count = 0;
+
+	/* This loop uses "cur_table_p" as the previous table that
+	 * was already dumped */
+	for (cur_table_p = mlli_tables_list_p->link_to_first_table;
+	     cur_table_p != NULL; cur_table_p = next_table_p, table_count++) {
+
+		if (table_count > mlli_tables_list_p->table_count) {
+			pr_err(
+				"MLLI tables list has more tables than table_cnt=%u. Stopping dump.\n",
+				mlli_tables_list_p->table_count);
+			break;
+		}
+
+		/* The SeP link entry is second in the buffer */
+		link_entry_p = cur_table_p + SEP_LLI_ENTRY_WORD_SIZE;
+		SEP_LLI_COPY_FROM_SEP(lli_spad, link_entry_p);/* LE to BE... */
+		next_table_p = SEP_MLLI_GET_NEXT_VA(cur_table_p);
+		next_table_dma = SEP_LLI_GET(lli_spad, ADDR);
+		next_table_size = SEP_LLI_GET(lli_spad, SIZE);
+		if (next_table_p != NULL)
+			dump_mlli_table(next_table_p,
+					next_table_dma, next_table_size);
+
+	}
+
+}
+#endif /*DEBUG*/
+/*****************************************/
+/* MLLI tables construction functions    */
+/*****************************************/
+/**
+ * set_dlli() - Set given mlli object to function as DLLI descriptor
+ *
+ * @mlli_tables_list_p:	The associated "MLLI" tables list
+ * @dlli_addr:	The DMA address for the DLLI
+ * @dlli_size:	The size in bytes of referenced data
+ *
+ * The MLLI tables list object represents DLLI when its table_count is 0
+ * and the "link_to_first_table" actually points to the data.
+ */
+static inline void set_dlli(struct mlli_tables_list *mlli_tables_list_p,
+			    u32 dlli_addr, u16 dlli_size)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, ADDR, dlli_addr);
+	SEP_LLI_SET(lli_spad, SIZE, dlli_size);
+	SEP_LLI_COPY_TO_SEP(mlli_tables_list_p->link_to_first_table +
+			    SEP_LLI_ENTRY_WORD_SIZE, lli_spad);
+	mlli_tables_list_p->table_count = 0;
+}
+
+/**
+ * set_last_lli() - Set "Last" bit on last LLI data entry
+ * @last_lli_p:
+ *
+ */
+static inline void set_last_lli(u32 *last_lli_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+
+	SEP_LLI_COPY_FROM_SEP(lli_spad, last_lli_p);
+	SEP_LLI_SET(lli_spad, LAST, 1);
+	SEP_LLI_COPY_TO_SEP(last_lli_p, lli_spad);
+}
+
+/**
+ * set_last_table() - Set table link to next as NULL (last table).
+ * @mlli_table_p:
+ *
+ */
+static inline void set_last_table(u32 *mlli_table_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+
+	/* Set SeP link entry */
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, FIRST, 1);
+	SEP_LLI_SET(lli_spad, LAST, 1);
+	/* The rest of the field are zero from SEP_LLI_INIT */
+	SEP_LLI_COPY_TO_SEP(mlli_table_p, lli_spad);
+	/* Set NULL for next VA */
+	SEP_MLLI_SET_NEXT_VA_NULL(mlli_table_p);
+}
+
+static inline void link_to_prev_mlli(struct mlli_tables_list_iterator
+				     *mlli_iter_p)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+	const u32 cur_mlli_table_size =
+	    (mlli_iter_p->next_lli_idx + 1) * SEP_LLI_ENTRY_BYTE_SIZE;
+	/* +1 for link entry at table start */
+
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, ADDR, mlli_iter_p->cur_mlli_dma_addr);
+	SEP_LLI_SET(lli_spad, SIZE, cur_mlli_table_size);
+	SEP_LLI_SET(lli_spad, FIRST, 1);
+	SEP_LLI_SET(lli_spad, LAST, 1);
+	SEP_LLI_COPY_TO_SEP(mlli_iter_p->prev_mlli_table_p +
+			    SEP_LLI_ENTRY_WORD_SIZE, lli_spad);
+	SEP_MLLI_SET_NEXT_VA(mlli_iter_p->prev_mlli_table_p,
+			     mlli_iter_p->cur_mlli_table_p);
+}
+
+/**
+ * terminate_mlli_tables_list() - "NULL" terminate the MLLI tables list and link
+ *				to previous table if any.
+ * @mlli_iter_p:	 MLLI tables list iterator
+ *
+ */
+static inline void terminate_mlli_tables_list(struct mlli_tables_list_iterator
+					      *mlli_iter_p)
+{
+	u32 *last_lli_p = mlli_iter_p->cur_mlli_table_p +
+	    ((FIRST_DATA_LLI_INDEX + mlli_iter_p->next_lli_idx - 1) *
+	     SEP_LLI_ENTRY_WORD_SIZE);
+
+	if (mlli_iter_p->prev_mlli_table_p != NULL)
+		link_to_prev_mlli(mlli_iter_p);
+
+	if (mlli_iter_p->cur_mlli_table_p != NULL) {
+		set_last_lli(last_lli_p);
+		set_last_table(mlli_iter_p->cur_mlli_table_p);
+	}
+
+}
+
+static int alloc_next_mlli(struct llimgr_obj *llimgr_p,
+			   struct mlli_tables_list *mlli_tables_list_p,
+			   struct mlli_tables_list_iterator *mlli_iter_p)
+{
+	u32 *last_lli_p = mlli_iter_p->cur_mlli_table_p +
+	    ((FIRST_DATA_LLI_INDEX + mlli_iter_p->next_lli_idx - 1) *
+	     SEP_LLI_ENTRY_WORD_SIZE);
+
+	if (mlli_iter_p->prev_mlli_table_p != NULL) {
+		/* "prev == NULL" means that we are on the stub link entry. */
+		/* If we have "prev" it means that we already have one table */
+		link_to_prev_mlli(mlli_iter_p);
+		set_last_lli(last_lli_p);
+	}
+
+	mlli_iter_p->prev_mlli_table_p = mlli_iter_p->cur_mlli_table_p;
+
+	/* Allocate MLLI table buffer from the pool */
+	mlli_iter_p->cur_mlli_table_p =
+	    dma_pool_alloc(llimgr_p->mlli_cache, GFP_KERNEL,
+			   &mlli_iter_p->cur_mlli_dma_addr);
+	if (mlli_iter_p->cur_mlli_table_p == NULL) {
+		pr_err("Failed allocating MLLI table\n");
+		return -ENOMEM;
+	}
+
+	/* Set DMA addr to the table start from SeP perpective */
+	mlli_iter_p->cur_mlli_dma_addr += SEP_LLI_ENTRY_BYTE_SIZE;
+	mlli_iter_p->next_lli_idx = 0;
+	mlli_iter_p->cur_mlli_accum_data = 0;
+	/* Set as last until linked to next (for keeping a valid tables list) */
+	set_last_table(mlli_iter_p->cur_mlli_table_p);
+
+	mlli_tables_list_p->table_count++;
+
+	return 0;
+}
+
+/**
+ * append_lli_to_mlli() - Set current LLI info and progress to next entry
+ * @mlli_iter_p:
+ * @dma_addr:
+ * @dma_size:
+ *
+ * Returns void
+ */
+static inline void append_lli_to_mlli(struct mlli_tables_list_iterator
+				      *mlli_iter_p, dma_addr_t dma_addr,
+				      u32 data_size)
+{
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];	/* LLI scratchpad */
+	u32 *next_lli_p;
+
+	next_lli_p = mlli_iter_p->cur_mlli_table_p +
+	    ((FIRST_DATA_LLI_INDEX + mlli_iter_p->next_lli_idx) *
+	     SEP_LLI_ENTRY_WORD_SIZE);
+	/* calc. includes first link entry */
+	/* Create LLI entry */
+	SEP_LLI_INIT(lli_spad);
+	SEP_LLI_SET(lli_spad, ADDR, dma_addr);
+	SEP_LLI_SET(lli_spad, SIZE, data_size);
+	SEP_LLI_COPY_TO_SEP(next_lli_p, lli_spad);
+
+	mlli_iter_p->next_lli_idx++;
+	mlli_iter_p->cur_mlli_accum_data += data_size;
+}
+
+/**
+ * append_data_to_mlli() - Append given DMA data chunk to given MLLI tables list
+ * @llimgr_p:
+ * @mlli_tables_list_p:
+ * @mlli_iter_p:
+ * @dma_addr:	 LLI entry ADDR
+ * @dma_size:	 LLI entry SIZE
+ *
+ * Append given DMA data chunk to given MLLI tables list.
+ * Based on given iterator the LLI entry may be added in the current table
+ * or if table end reached, a new table would be allocated.
+ * In the latter case the previous MLLI table would be linked to current.
+ * If not all data fits into current table limit, it would be split in to
+ * last LLI in this table and first LLI in the next table (assuming overall
+ * DMA data is not more than max_data_per_mlli)
+ * Returns int 0 for success
+ */
+static int append_data_to_mlli(struct llimgr_obj *llimgr_p,
+			       struct mlli_tables_list *mlli_tables_list_p,
+			       struct mlli_tables_list_iterator *mlli_iter_p,
+			       dma_addr_t data_dma_addr, u32 data_size)
+{
+	u32 remaining_data_for_mlli;
+	int rc;
+
+#ifdef DEBUG
+	if (data_size > llimgr_p->max_data_per_mlli) {
+		pr_err(
+			    "Given data size (%uB) is too large for MLLI (%luB)\n",
+			    data_size, llimgr_p->max_data_per_mlli);
+		return -EINVAL;
+	}
+#endif
+
+	if (mlli_iter_p->next_lli_idx >= llimgr_p->max_lli_num) {
+		/* Reached end of current MLLI table */
+		rc = alloc_next_mlli(llimgr_p, mlli_tables_list_p, mlli_iter_p);
+		if (rc != 0)
+			return rc;
+	}
+
+	remaining_data_for_mlli =
+	    llimgr_p->max_data_per_mlli - mlli_iter_p->cur_mlli_accum_data;
+
+	if (data_size > remaining_data_for_mlli) {
+		/* This chunk does not fit in this table */
+		if (remaining_data_for_mlli > 0) {/* Space left in this MLLI */
+			/* Add to this table first "half" of the chunk */
+			append_lli_to_mlli(mlli_iter_p, data_dma_addr,
+					   remaining_data_for_mlli);
+			pr_debug("Splitting SG of %uB to %uB+%uB\n",
+				      data_size, remaining_data_for_mlli,
+				      data_size - remaining_data_for_mlli);
+			/* Set the remainder to be pushed in the new table */
+			data_dma_addr += remaining_data_for_mlli;
+			data_size -= remaining_data_for_mlli;
+		}
+		rc = alloc_next_mlli(llimgr_p, mlli_tables_list_p, mlli_iter_p);
+		if (rc != 0)
+			return rc;
+	}
+
+	append_lli_to_mlli(mlli_iter_p, data_dma_addr, data_size);
+
+	return 0;
+}
+
+/**
+ * init_mlli_tables_list() - Initialize MLLI tables list object with user buffer
+ *				information
+ * @mlli_tables_list_p:
+ * @dma_direction:
+ * @user_buf_ptr:
+ * @buf_size:
+ *
+ * Returns int 0 on success
+ */
+static int init_mlli_tables_list(struct llimgr_obj *llimgr_p,
+				 struct mlli_tables_list *mlli_tables_list_p,
+				 struct client_dma_buffer *client_memref,
+				 enum dma_data_direction dma_direction)
+{
+	const bool is_inbuf = (dma_direction == DMA_TO_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_outbuf = (dma_direction == DMA_FROM_DEVICE) ||
+	    (dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_memref_inbuf =
+	    (client_memref->dma_direction == DMA_TO_DEVICE) ||
+	    (client_memref->dma_direction == DMA_BIDIRECTIONAL);
+	const bool is_memref_outbuf =
+	    (client_memref->dma_direction == DMA_FROM_DEVICE) ||
+	    (client_memref->dma_direction == DMA_BIDIRECTIONAL);
+	int rc;
+
+#ifdef DEBUG
+	/* Verify that given MLLI tables list is "clean" */
+	if (mlli_tables_list_p->user_memref != NULL) {
+		pr_err("Got \"dirty\" MLLI tables list!\n");
+		return -EINVAL;
+	}
+#endif	 /*DEBUG*/
+	    MLLI_TABLES_LIST_INIT(mlli_tables_list_p);
+	if (client_memref->buf_size > 0) {
+		/* Validate buffer access permissions */
+		if (is_inbuf && !is_memref_inbuf) {
+			pr_err("No read access (%d) to user buffer @ %p\n",
+				    client_memref->dma_direction,
+				    client_memref->user_buf_ptr);
+			return -EFAULT;
+		}
+		if (is_outbuf && !is_memref_outbuf) {
+			pr_err("No write access (%d), data buffer @ %p\n",
+				    client_memref->dma_direction,
+				    client_memref->user_buf_ptr);
+			return -EFAULT;
+		}
+
+	}
+	rc = sync_client_dma_buf(llimgr_p->dev,
+				 client_memref, true /*for device */ ,
+				 dma_direction);
+	if (likely(rc == 0)) {
+		/* Init. these fields only if the operations above succeeded */
+		mlli_tables_list_p->user_memref = client_memref;
+		mlli_tables_list_p->data_direction = dma_direction;
+	}
+	return rc;
+}
+
+/**
+ * cleanup_mlli_tables_list() - Cleanup MLLI tables resources
+ * @llimgr_p:	 LLI-manager pointer
+ * @mlli_table_p:	The MLLI tables list object
+ * @is_data_dirty:	If true (!0) the (output) data pages are marked as dirty
+ *
+ * Cleanup MLLI tables resources
+ * This function may be invoked for partially constructed MLLI tables list
+ * as tests for existence of released resources before trying to release.
+ */
+static void cleanup_mlli_tables_list(struct llimgr_obj *llimgr_p,
+				     struct mlli_tables_list
+				     *mlli_tables_list_p, int is_data_dirty)
+{
+	dma_addr_t cur_mlli_dma_addr;
+	dma_addr_t next_mlli_dma_addr;
+	u32 *cur_mlli_p;
+	u32 *next_mlli_p;
+	u32 *link_entry_p;
+	u32 lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+
+	pr_debug("mlli_tables_list_p=%p user_memref=%p table_count=%u\n",
+		      mlli_tables_list_p, mlli_tables_list_p->user_memref,
+		      mlli_tables_list_p->table_count);
+	/* Initialize to the first MLLI table */
+	if (mlli_tables_list_p->table_count > 0) {
+		cur_mlli_p =
+		    SEP_MLLI_GET_NEXT_VA(mlli_tables_list_p->
+					 link_to_first_table);
+		link_entry_p =
+		    mlli_tables_list_p->link_to_first_table +
+		    SEP_LLI_ENTRY_WORD_SIZE;
+		/* LE to BE... */
+		SEP_LLI_COPY_FROM_SEP(lli_spad, link_entry_p);
+		/* Actual allocation DMA address is one entry before
+		 * saved address */
+		cur_mlli_dma_addr =
+		    SEP_LLI_GET(lli_spad, ADDR) - SEP_LLI_ENTRY_BYTE_SIZE;
+	} else {
+		/*DLLI*/ cur_mlli_p = NULL;
+		/* Skip the cleanup loop below */
+	}
+
+	/* Cleanup MLLI tables */
+	while (cur_mlli_p != NULL) {
+		pr_debug("Freeing MLLI table buffer at %p (%08llX)\n",
+			cur_mlli_p, (long long unsigned int)cur_mlli_dma_addr);
+		/* The link entry follows the first entry that holds next VA */
+		link_entry_p = cur_mlli_p + SEP_LLI_ENTRY_WORD_SIZE;
+		SEP_LLI_COPY_FROM_SEP(lli_spad, link_entry_p);/* LE to BE... */
+		/* Save link pointers before freeing the table */
+		next_mlli_p = SEP_MLLI_GET_NEXT_VA(cur_mlli_p);
+		/* Actual allocation DMA address is one entry before
+		 * saved address */
+		next_mlli_dma_addr =
+		    SEP_LLI_GET(lli_spad, ADDR) - SEP_LLI_ENTRY_BYTE_SIZE;
+		dma_pool_free(llimgr_p->mlli_cache,
+			      cur_mlli_p, cur_mlli_dma_addr);
+
+		cur_mlli_p = next_mlli_p;
+		cur_mlli_dma_addr = next_mlli_dma_addr;
+	}
+
+	if ((is_data_dirty) && (mlli_tables_list_p->user_memref != NULL))
+		sync_client_dma_buf(llimgr_p->dev,
+				    mlli_tables_list_p->user_memref,
+				    false /*for CPU */ ,
+				    mlli_tables_list_p->data_direction);
+
+	/* Clear traces (pointers) of released resources */
+	MLLI_TABLES_LIST_INIT(mlli_tables_list_p);
+
+}
+
+/**
+ * process_as_dlli() - Consider given MLLI request as DLLI and update the MLLI
+ *			object if possible. Otherwise return error.
+ *
+ * @mlli_tables_p:	Assciated MLLI object with client memref set.
+ * @prepend_data:	Optional prepend data
+ * @prepend_data_size:	Optional prepend data size
+ */
+static int process_as_dlli(struct mlli_tables_list *mlli_tables_p,
+			   dma_addr_t prepend_data,
+			   unsigned long prepend_data_size)
+{
+	struct client_dma_buffer *memref = mlli_tables_p->user_memref;
+	u32 dma_size = memref->buf_size - memref->save4next_size;
+
+	/* Prepend data only (or 0 data) case */
+	if (memref->buf_size == 0) {
+		/* Handle 0-sized buffer or prepend_data only */
+		set_dlli(mlli_tables_p, prepend_data, prepend_data_size);
+		return 0;
+	}
+
+	/* Cannot concatenate prepend_data to client data with DLLI */
+	if (prepend_data_size > 0)
+		return -EINVAL;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	/* None of the DLLI cases is possible with cache line alignment buf. */
+	if (memref->sg_head != NULL)
+		return -EINVAL;
+#endif
+
+	/* Physically contiguous buffer case */
+	if (memref->sg_tail == NULL) {
+		/* If no sg_tail it is an indication that sg_main is phys.
+		 * contiguous - DLLI directly to client buffer */
+		set_dlli(mlli_tables_p, sg_dma_address(memref->sg_main),
+			 dma_size);
+		return 0;
+	}
+
+	/* Small buffer copied to aux. buffer */
+	if (memref->sg_main == NULL) {
+		/* If not sg_main (i.e., only sg_tail) we can
+		 * DLLI to the aux. buf. */
+		set_dlli(mlli_tables_p, memref->buf_end_aux_buf_dma, dma_size);
+		return 0;
+	}
+
+	return -EINVAL;		/* Not suitable for DLLI */
+}
+
+/**
+ * llimgr_create_mlli() - Create MLLI tables list for given user buffer
+ * @llimgr:
+ * @mlli_tables_p:	 A pointer to MLLI tables list object
+ * @dma_direction:	 The DMA direction of data flow
+ * @user_memref:	 User DMA memory reference (locked pages, etc.)
+ * @prepend_data:	 DMA address of data buffer to prepend before user data
+ * @prepend_data_size:	 Size of prepend_data (0 if none)
+ *
+ * Returns int 0 on success
+ */
+int llimgr_create_mlli(void *llimgr,
+		       struct mlli_tables_list *mlli_tables_p,
+		       enum dma_data_direction dma_direction,
+		       struct client_dma_buffer *client_memref,
+		       dma_addr_t prepend_data, unsigned long prepend_data_size)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+	unsigned long remaining_main_data;
+	unsigned int remaining_main_sg_ents;
+	struct mlli_tables_list_iterator mlli_iter;
+	unsigned long client_dma_size;
+	unsigned long cur_sge_len = 0;
+	dma_addr_t cur_sge_addr;
+	/* cur_lli_index initialized to end of "virtual" link table */
+	struct scatterlist *cur_sg_entry;
+	int rc;
+
+	/* client_memref must exist even if no user data (buf_size == 0), i.e.,
+	 * just prepend_data. */
+	if (client_memref == NULL) {
+		pr_err("Client memref is NULL.\n");
+		return -EINVAL;
+	}
+
+	client_dma_size =
+		client_memref->buf_size - client_memref->save4next_size;
+
+	SEP_LOG_TRACE(
+		      "buf @ 0x%08lX, size=0x%08lX B, prepend_size=0x%08lX B, dma_dir=%d\n",
+		      (unsigned long)client_memref->user_buf_ptr,
+		      client_memref->buf_size, prepend_data_size,
+		      dma_direction);
+
+	rc = init_mlli_tables_list(llimgr_p,
+				   mlli_tables_p, client_memref, dma_direction);
+	if (unlikely(rc != 0))
+		return rc;	/* No resources to cleanup */
+
+	rc = process_as_dlli(mlli_tables_p, prepend_data, prepend_data_size);
+	if (rc == 0)		/* Mapped as DLLI */
+		return 0;
+	rc = 0;			/* In case checked below before updating */
+
+	/* Initialize local state to empty list */
+	mlli_iter.prev_mlli_table_p = NULL;
+	mlli_iter.cur_mlli_table_p = mlli_tables_p->link_to_first_table;
+	/* "First" table is the stub in struct mlli_tables_list, so we
+	 * mark it as "full" by setting next_lli_idx to maximum */
+	mlli_iter.next_lli_idx = llimgr_p->max_lli_num;
+	mlli_iter.cur_mlli_accum_data = 0;
+
+	if (prepend_data_size > 0) {
+		rc = append_data_to_mlli(llimgr_p, mlli_tables_p, &mlli_iter,
+					 prepend_data, prepend_data_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Fail: add LLI entry for prepend_data\n");
+			goto mlli_create_exit;	/* do cleanup */
+		}
+	}
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	if (client_memref->buf_start_aux_buf_size > 0) {
+		rc = append_data_to_mlli(llimgr_p, mlli_tables_p, &mlli_iter,
+					 client_memref->buf_start_aux_buf_dma,
+					 client_memref->buf_start_aux_buf_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Fail: add LLI entry for start_aux_buf\n");
+			goto mlli_create_exit;	/* do cleanup */
+		}
+	}
+#endif
+
+	/* Calculate amount of "main" data before last LLI */
+	/* (round to crypto block multiple to avoid having MLLI table
+	 * which is not last with non-crypto-block multiple */
+	remaining_main_data =
+	    (prepend_data_size + client_dma_size -
+	     client_memref->buf_end_aux_buf_size) & ~MAX_CRYPTO_BLOCK_MASK;
+	/* Now remove the data outside of the main buffer */
+	remaining_main_data -= prepend_data_size;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	remaining_main_data -= client_memref->buf_start_aux_buf_size;
+#endif
+	cur_sg_entry = client_memref->sg_main;
+	remaining_main_sg_ents = client_memref->sg_main_nents;
+
+	/* construct MLLI tables for sg_main list */
+	for (cur_sg_entry = client_memref->sg_main,
+	     remaining_main_sg_ents = client_memref->sg_main_nents;
+	     cur_sg_entry != NULL;
+	     cur_sg_entry = sg_next(cur_sg_entry), remaining_main_sg_ents--) {
+		/* Get current S/G entry length */
+		cur_sge_len = sg_dma_len(cur_sg_entry);
+		cur_sge_addr = sg_dma_address(cur_sg_entry);
+
+		/* Reached end of "main" data which is multiple of largest
+		 * crypto block? (consider split to next/last table).
+		 * (Check if needs to skip to next table for 2nd half) */
+		if ((remaining_main_data > 0) &&
+		    (cur_sge_len >=
+		     remaining_main_data) /*last "main" data */ &&
+		    /* NOT at end of table (i.e.,starting a new table anyway) */
+		    (llimgr_p->max_lli_num > mlli_iter.next_lli_idx) &&
+		    /* Checks if remainig entries don't fit into this table */
+		    (remaining_main_sg_ents -
+		     ((cur_sge_len == remaining_main_data) ? 1 : 0) +
+		     ((client_memref->buf_end_aux_buf_size > 0) ? 1 : 0)) >
+		    (llimgr_p->max_lli_num - mlli_iter.next_lli_idx)) {
+			/* "tail" would be in next/last table */
+			/* Add last LLI for "main" data */
+			rc = append_data_to_mlli(llimgr_p, mlli_tables_p,
+						 &mlli_iter, cur_sge_addr,
+						 remaining_main_data);
+			if (unlikely(rc != 0)) {
+				pr_err(
+					    "Failed adding LLI entry for sg_main (last).\n");
+				goto mlli_create_exit;	/* do cleanup */
+			}
+			cur_sge_len -= remaining_main_data;
+			cur_sge_addr += remaining_main_data;
+			/* Skip to next MLLI for tail data */
+			rc = alloc_next_mlli(llimgr_p, mlli_tables_p,
+					     &mlli_iter);
+			if (unlikely(rc != 0)) {
+				pr_err("Fail add MLLI table for tail.\n");
+				goto mlli_create_exit;	/* do cleanup */
+			}
+		}
+
+		if (likely(cur_sge_len > 0)) {
+			/* When entry is split to next table, this would append
+			 * the second half of it. */
+			rc = append_data_to_mlli(llimgr_p, mlli_tables_p,
+						 &mlli_iter, cur_sge_addr,
+						 cur_sge_len);
+			if (unlikely(rc != 0)) {
+				pr_err("Fail add LLI entry for sg_main\n");
+				goto mlli_create_exit;	/* do cleanup */
+			}
+		}
+	}			/*for */
+
+	if (remaining_main_sg_ents > 0) {
+		pr_err("Remaining sg_ents>0 after end of S/G list!\n");
+		rc = -EINVAL;
+		goto mlli_create_exit;	/* do cleanup */
+	}
+
+	/* Append end aux. buffer */
+	if (client_memref->buf_end_aux_buf_size > 0) {
+		rc = append_data_to_mlli(llimgr_p, mlli_tables_p, &mlli_iter,
+					 client_memref->buf_end_aux_buf_dma,
+					 client_memref->buf_end_aux_buf_size);
+		if (unlikely(rc != 0)) {
+			pr_err("Fail: add LLI entry for end_aux_buf\n");
+			goto mlli_create_exit;	/* do cleanup */
+		}
+	}
+
+	terminate_mlli_tables_list(&mlli_iter);
+	pr_debug("MLLI %u tables (rc=%d):\n",
+		      mlli_tables_p->table_count, rc);
+	llimgr_dump_mlli_tables_list(mlli_tables_p);
+
+ mlli_create_exit:
+	if (rc != 0) {
+		/* The MLLI tables list are always consistent at bail-out points
+		 * so we can use the simple cleanup function.                 */
+		cleanup_mlli_tables_list(llimgr_p, mlli_tables_p, 0);
+	}
+
+	return rc;
+
+}
+
+/**
+ * llimgr_destroy_mlli() - Cleanup resources of given MLLI tables list object
+ *				(if has any tables)
+ * @llimgr:
+ * @mlli_tables_p:
+ *
+ */
+void llimgr_destroy_mlli(void *llimgr,
+			 struct mlli_tables_list *mlli_tables_p)
+{
+	struct llimgr_obj *llimgr_p = (struct llimgr_obj *)llimgr;
+	const bool is_dirty =
+	    (mlli_tables_p->data_direction == DMA_BIDIRECTIONAL) ||
+	    (mlli_tables_p->data_direction == DMA_FROM_DEVICE);
+
+	cleanup_mlli_tables_list(llimgr_p, mlli_tables_p, is_dirty);
+}
+
+/**
+ * llimgr_mlli_to_seprpc_memref() - Convert given MLLI tables list into a SeP
+ *					RPC memory reference format
+ * @mlli_tables_p:	 The source MLLI table
+ * @memref_p:	 The destination RPC memory reference
+ *
+ */
+void llimgr_mlli_to_seprpc_memref(struct mlli_tables_list *mlli_tables_p,
+				  struct seprpc_memref *memref_p)
+{
+	u32 xlli_addr;
+	u16 xlli_size;
+	u16 table_count;
+
+	llimgr_get_mlli_desc_info(mlli_tables_p,
+				  &xlli_addr, &xlli_size, &table_count);
+
+	memref_p->ref_type = cpu_to_le32(table_count > 0 ?
+					 SEPRPC_MEMREF_MLLI :
+					 SEPRPC_MEMREF_DLLI);
+	memref_p->location = cpu_to_le32(xlli_addr);
+	memref_p->size = cpu_to_le32(xlli_size);
+	memref_p->count = cpu_to_le32(table_count);
+}
+
+/**
+ * llimgr_get_mlli_desc_info() - Get the MLLI info required for a descriptor.
+ *
+ * @mlli_tables_p:	The source MLLI table
+ * @first_table_addr_p:	First table DMA address or data DMA address for DLLI
+ * @first_table_size_p:	First table size in bytes or data size for DLLI
+ * @num_of_tables:	Number of MLLI tables in the list (0 for DLLI)
+ *
+ * In case of DLLI, first_table_* refers to the client DMA buffer (DLLI info.)
+ */
+void llimgr_get_mlli_desc_info(struct mlli_tables_list *mlli_tables_p,
+			       u32 *first_table_addr_p,
+			       u16 *first_table_size_p,
+			       u16 *num_of_tables_p)
+{
+	u32 link_lli_spad[SEP_LLI_ENTRY_WORD_SIZE];
+	u32 *first_mlli_link_p;
+
+	first_mlli_link_p = mlli_tables_p->link_to_first_table +
+	    SEP_LLI_ENTRY_WORD_SIZE;
+	SEP_LLI_COPY_FROM_SEP(link_lli_spad, first_mlli_link_p);
+	/* Descriptor are read by direct-access which takes care of
+	 * swapping from host endianess to SeP endianess, so we need
+	 * to revert the endiness in the link LLI entry */
+	*first_table_addr_p = SEP_LLI_GET(link_lli_spad, ADDR);
+	*first_table_size_p = SEP_LLI_GET(link_lli_spad, SIZE);
+	*num_of_tables_p = mlli_tables_p->table_count;
+}
diff --git a/drivers/staging/sep54/lli_mgr.h b/drivers/staging/sep54/lli_mgr.h
new file mode 100644
index 0000000..ab4bec9
--- /dev/null
+++ b/drivers/staging/sep54/lli_mgr.h
@@ -0,0 +1,291 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/*!                                                                           *
+ * \file lli_mgr.h                                                            *
+ * \brief LLI logic: API definition                                           *
+ *                                                                            */
+
+#ifndef __LLI_MGR_H__
+#define __LLI_MGR_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include "sep_lli.h"
+
+#define LLIMGR_NULL_HANDLE NULL
+
+/* Using this macros assures correct initialization in case of future changes */
+#define MLLI_TABLES_LIST_INIT(mlli_tables_list_ptr) do {		\
+	memset((mlli_tables_list_ptr), 0,				\
+	       sizeof(struct mlli_tables_list));			\
+} while (0)		/* Executed once */
+
+/**
+ * llimgr_is_same_mlli_tables() - Tell if given tables list are pointing to the
+ *					same tables list
+ * @llimgr:
+ * @mlli_tables1:	 First MLLI tables list object
+ * @mlli_tables2:	 Second MLLI tables list object
+ *
+ * Tell if given tables list are pointing to the same tables list
+ * (used to identify in-place operation)
+ */
+#define llimgr_is_same_mlli_tables(llimgr, mlli_tables1, mlli_tables2)   \
+	((mlli_tables1)->user_memref == (mlli_tables2)->user_memref)
+
+/* Clean struct client_dma_buffer buffer info */
+#define CLEAN_DMA_BUFFER_INFO(_client_dma_buf_p) \
+	memset(_client_dma_buf_p, 0, sizeof(struct client_dma_buffer))
+
+/**
+ * struct client_dma_buffer - Client DMA buffer object
+ * @buf_size: buffer size in bytes
+ * @user_buf_ptr:	Pointer to start of buffer in user space. May be NULL
+ *			for buf_size==0 or if the user is the kernel (given
+ *			scatterlist)
+ * @num_of_pages:	Number of pages in user_pages array
+ * @user_pages:		Locked user pages (for user space buffer)
+ * @dma_direction:	DMA direction over given buffer mapping
+ * @sg_head:		S/G list of buffer header (memcopied)
+ * @sg_main:		S/G list of buffer body (for DMA)
+ * @sg_tail:		S/G list of buffer tail (memcopied)
+ * @sg_main_nents:	Num. of S/G entries for sg_main
+ * @sg_save4next:	S/G list of buffer chunk past "tail" for copying to
+ *			side buffer for next operation (for hash block remains)
+ * @sg_save4next_nents:	Num. of S/G entries for sg_save4next
+ * @save4next_size:	Size of data in sg_save4next
+ * @buf_end_aux_buf_va:	Tail aux. buffer virtual address
+ * @buf_end_aux_buf_dma:	DMA address of buf_end_aux_buf_va
+ * @buf_end_aux_buf_size: Number of bytes copied in tail aux. buffer
+ * @buf_start_aux_buf_va:	Header aux. buffer virtual address
+ * @buf_start_aux_buf_dma:	DMA address of buf_start_aux_buf_va
+ * @buf_start_aux_buf_size: Number of bytes copied in header aux. buffer
+ */
+struct client_dma_buffer {
+	unsigned long buf_size;
+	/* User buffer info. */
+	u8 __user *user_buf_ptr;
+	int num_of_pages;
+	struct page **user_pages;
+
+	/*
+	 * DMA mapping info (either created for user space pages
+	 * or retrieved from kernel client)
+	 */
+	enum dma_data_direction dma_direction;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	struct scatterlist *sg_head;
+#endif
+	struct scatterlist *sg_main;
+	struct scatterlist *sg_tail;
+	struct scatterlist *sg_save4next;
+
+	unsigned int sg_main_nents;
+	unsigned long save4next_size;
+	/* Auxilliary driver buffer for sg_head and sg_tail copies */
+	void *buf_end_aux_buf_va;
+	dma_addr_t buf_end_aux_buf_dma;
+	unsigned long buf_end_aux_buf_size;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+	/* Currently only cache line at buffer start requires aux. buffers */
+	void *buf_start_aux_buf_va;
+	dma_addr_t buf_start_aux_buf_dma;
+	unsigned long buf_start_aux_buf_size;
+#endif				/*CONFIG_NOT_COHERENT_CACHE */
+
+};
+
+/**
+ * struct mlli_tables_list - MLLI tables list object
+ * @link_to_first_table:	"link" to first table (one extra entry for link
+ *				to first table we are using LLI entry in order
+ *				to implement the list head using the same data
+ *				structure used to link subsequent tables. This
+ *				helps avoiding special code for the first table
+ *				linking.
+ * @table_count:	The total number of fragmented tables. table_count is
+ *			only 16b following the size allocated for it in the
+ *			SW descriptor.
+ * @user_memref:	Referenced client DMA buffer
+ * @data_direction:	This operation DMA direction
+ *
+ */
+struct mlli_tables_list {
+	u32 link_to_first_table[2 * SEP_LLI_ENTRY_WORD_SIZE];
+	u16 table_count;
+	struct client_dma_buffer *user_memref;
+	enum dma_data_direction data_direction;
+};
+
+/**
+ * checks if the given MLLI tables list object points to a Direct LLI buffer.
+ * @mlli_tables_p:	a pointer to a table list
+ *
+ * Returns true if DLLI buffer, false otherwise. If mlli_tables_p points to
+ * an empty data buffer this function will return true
+ */
+static inline bool llimgr_mlli_is_dlli(struct mlli_tables_list *mlli_tables_p)
+{
+	return (mlli_tables_p->user_memref != NULL) &&
+	    (mlli_tables_p->table_count == 0);
+}
+
+/**
+ * llimgr_create() - Create LLI-manager object
+ * @dev:	 Device context
+ * @mlli_table_size:	 The maximum size of an MLLI table in bytes
+ *
+ * Returns llimgr_h Created object handle or LLIMGR_NULL_HANDLE if failed
+ */
+void *llimgr_create(struct device *dev, unsigned long mlli_table_size);
+
+/**
+ * llimgr_destroy() - Destroy (free resources of) given LLI-manager object
+ * @llimgr:	 LLI-manager object handle
+ *
+ */
+void llimgr_destroy(void *llimgr);
+
+/**
+ * llimgr_register_client_dma_buf() - Register given client buffer for DMA
+ *					operation.
+ * @llimgr:	 The LLI manager object handle
+ * @user_buf_ptr:	 Pointer in user space of the user buffer
+ * @sgl:	Client provided s/g list. user_buf_ptr is assumed NULL if this
+ *		list is given (!NULL).
+ * @buf_size:	 The user buffer size in bytes (incl. save4next). May be 0.
+ * @save4next_size:	Amount from buffer end to save for next op.
+ *			(split into seperate sgl). May be 0.
+ * @dma_direction:	The DMA direction this buffer would be used for
+ * @client_dma_buf_p:	Pointer to the user DMA buffer "object"
+ *
+ * Register given client buffer for DMA operation.
+ * If user_buf_ptr!=NULL and sgl==NULL it locks the user pages and creates
+ * head/main/tail s/g lists. If sgl!=NULL is splits it into head/main/tail
+ * s/g lists.
+ * Returns 0 for success
+ */
+int llimgr_register_client_dma_buf(void *llimgr,
+				   u8 __user *user_buf_ptr,
+				   struct scatterlist *sgl,
+				   const unsigned long buf_size,
+				   const unsigned long save4next_size,
+				   const enum dma_data_direction dma_direction,
+				   struct client_dma_buffer *client_dma_buf_p);
+
+/**
+ * llimgr_deregister_client_dma_buf() - Unmap given user DMA buffer
+ *					(flush and unlock pages)
+ * @llimgr:
+
+ * @client_dma_buf_p:	 User DMA buffer object
+ *
+ */
+void llimgr_deregister_client_dma_buf(void *llimgr,
+				      struct client_dma_buffer
+				      *client_dma_buf_p);
+
+/**
+ * llimgr_copy_from_client_buf_save4next() - Copy from the sg_save4next chunk
+ *	of the client DMA buffer to given buffer.
+ *	Used to save hash block remainder.
+ *
+ * @client_dma_buf_p:	The client DMA buffer with the save4next chunk
+ * @to_buf:		Target buffer to copy to.
+ * @buf_len:		Given buffer length (to avoid buffer overflow)
+ *
+ * Returns number of bytes copied or -ENOMEM if given buffer is too small
+ */
+int llimgr_copy_from_client_buf_save4next(struct client_dma_buffer
+					  *client_dma_buf_p, u8 *to_buf,
+					  unsigned long buf_len);
+
+/**
+ * llimgr_create_mlli() - Create MLLI tables list for given user buffer
+ * @llimgr:
+ * @mlli_tables_p:	 A pointer to MLLI tables list object
+ * @dma_direction:	 The DMA direction of data flow
+ * @user_memref:	 User DMA memory reference (locked pages, etc.)
+ * @prepend_data:	 DMA address of data buffer to prepend before user data
+ * @prepend_data_size:	 Size of prepend_data (0 if none)
+ *
+ * Returns int 0 on success
+ */
+int llimgr_create_mlli(void *llimgr,
+		       struct mlli_tables_list *mlli_tables_p,
+		       enum dma_data_direction dma_direction,
+		       struct client_dma_buffer *user_memref,
+		       dma_addr_t prepend_data,
+		       unsigned long prepend_data_size);
+
+/**
+ * llimgr_destroy_mlli() - Cleanup resources of given MLLI tables list object
+ *				(if has any tables)
+ * @llimgr:
+ * @mlli_tables_p:
+ *
+ */
+void llimgr_destroy_mlli(void *llimgr,
+			 struct mlli_tables_list *mlli_tables_p);
+
+/**
+ * llimgr_mlli_to_seprpc_memref() - Convert given MLLI tables list into a
+ *					SeP RPC memory reference format
+ * @mlli_tables_p:	 The source MLLI table
+ * @memref_p:	 The destination RPC memory reference
+ *
+ */
+void llimgr_mlli_to_seprpc_memref(struct mlli_tables_list *mlli_tables_p,
+				  struct seprpc_memref *memref_p);
+
+/**
+ * llimgr_get_mlli_desc_info() - Get the MLLI info required for a descriptor.
+ *
+ * @mlli_tables_p:	The source MLLI table
+ * @first_table_addr_p:	First table DMA address or data DMA address for DLLI
+ * @first_table_size_p:	First table size in bytes or data size for DLLI
+ * @num_of_tables:	Number of MLLI tables in the list (0 for DLLI)
+ *
+ * In case of DLLI, first_table_* refers to the client DMA buffer (DLLI info.)
+ */
+void llimgr_get_mlli_desc_info(struct mlli_tables_list *mlli_tables_p,
+			       u32 *first_table_addr_p,
+			       u16 *first_table_size_p,
+			       u16 *num_of_tables_p);
+
+#ifdef DEBUG
+/**
+ * llimgr_dump_mlli_tables_list() - Dump all the MLLI tables in a given tables
+ *					list
+ * @mlli_tables_list_p:	 Pointer to tables list structure
+ *
+ */
+void llimgr_dump_mlli_tables_list(struct mlli_tables_list *mlli_tables_list_p);
+#else
+#define llimgr_dump_mlli_tables_list(mlli_tables_list_p) do {} while (0)
+#endif /*DEBUG*/
+#endif /*__LLI_MGR_H__*/
diff --git a/drivers/staging/sep54/sep_applets.h b/drivers/staging/sep54/sep_applets.h
new file mode 100644
index 0000000..0b65a9f
--- /dev/null
+++ b/drivers/staging/sep54/sep_applets.h
@@ -0,0 +1,37 @@
+/*
+ *  sep_applets.h - Security Processor applet definitions
+ *
+ *  Copyright(c) 2012-2013 Intel Corporation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+#ifndef _SEP_APPLETS_H
+#define _SEP_APPLETS_H
+
+/* Kernel side threads (Agents as DX calls them) */
+#define RPMB_AGENT_ID            0
+
+/* Applet UUIDs */
+#define DEFAULT_APP_UUID { 0x00, 0xDE, 0xFA, 0x01, 0xDE, 0xFA, 0x02, 0xDE, \
+	0xFA, 0x03, 0xDE, 0xFA, 0x04, 0xDE, 0xFA, 0xFF }
+
+#define HDCP_APP_UUID { 0x10, 0x21, 0x32, 0x43, 0x54, 0x65, 0x76, 0x87,     \
+	0x98, 0xA9, 0xBA, 0xCB, 0xDC, 0xED, 0xFE, 0x0F }
+
+#define CMD_RPMB_ENABLE          1
+#define CMD_IMAGE_VERIFY         3
+#define CMD_KEYPOLICY_CHECK      7
+#define HDCP_RX_HDMI_STATUS 0x80000080
+
+#endif /* _SEP_APPLETS_H_ */
diff --git a/drivers/staging/sep54/sep_compat_ioctl.c b/drivers/staging/sep54/sep_compat_ioctl.c
new file mode 100644
index 0000000..26fbb7e
--- /dev/null
+++ b/drivers/staging/sep54/sep_compat_ioctl.c
@@ -0,0 +1,1068 @@
+/*
+ * Copyright (C) 2013 Intel Finland Oy
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/compiler.h>
+#include <linux/uaccess.h>
+#include <linux/printk.h>
+#include "sep_compat_ioctl.h"
+#include "dx_driver_abi.h"
+#include "dx_dev_defs.h"
+#include "sepapp.h"
+
+typedef int sep_ioctl_compat_t(struct file *filp, unsigned int cmd,
+			       unsigned long arg);
+
+static int compat_sep_ioctl_get_ver_major(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_ver_minor(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_sym_cipher_ctx_size(struct file *filp,
+						    unsigned int cmd,
+						    unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_auth_enc_ctx_size(struct file *filp,
+						  unsigned int cmd,
+						  unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+static int compat_sep_ioctl_get_mac_ctx_size(struct file *filp,
+					     unsigned int cmd,
+					     unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+static int compat_sep_ioctl_get_hash_ctx_size(struct file *filp,
+					      unsigned int cmd,
+					      unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sym_cipher_init_params_32 {
+	u32 context_buf;	/*[in] */
+	struct dxdi_sym_cipher_props props;	/*[in] */
+	u32 error_info;	/*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sym_cipher_init(struct file *filp, unsigned int cmd,
+					    unsigned long arg)
+{
+	struct sym_cipher_init_params_32 init_32;
+	struct dxdi_sym_cipher_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&init_32, (void __user *)arg, sizeof(init_32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)init_32.context_buf,
+		       &init_params->context_buf)
+	    || copy_to_user(&init_params->props, &init_32.props,
+			    sizeof(init_32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(init_32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(init_32.error_info,
+		       &((struct sym_cipher_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct auth_enc_init_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_auth_enc_props props; /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_auth_enc_init(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct auth_enc_init_params_32 up32;
+	struct dxdi_auth_enc_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &init_params->context_buf)
+	    || copy_to_user(&init_params->props, &up32.props,
+			    sizeof(up32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct auth_enc_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct mac_init_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_mac_props props;  /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_mac_init(struct file *filp, unsigned int cmd,
+				     unsigned long arg)
+{
+	struct mac_init_params_32 up32;
+	struct dxdi_mac_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &init_params->context_buf)
+	    || copy_to_user(&init_params->props, &up32.props,
+			    sizeof(up32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct mac_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct hash_init_params_32 {
+	u32 context_buf; /*[in] */
+	enum dxdi_hash_type hash_type;  /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_hash_init(struct file *filp, unsigned int cmd,
+				      unsigned long arg)
+{
+	struct hash_init_params_32 up32;
+	struct dxdi_hash_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &init_params->context_buf)
+	    || __put_user(up32.hash_type, &init_params->hash_type))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct hash_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct process_dblk_params_32 {
+	u32 context_buf; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[in] */
+	enum dxdi_data_block_type data_block_type;  /*[in] */
+	u32 data_in_size; /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_proc_dblk(struct file *filp, unsigned int cmd,
+				      unsigned long arg)
+{
+	struct process_dblk_params_32 up32;
+	struct dxdi_process_dblk_params __user *dblk_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	dblk_params = compat_alloc_user_space(sizeof(*dblk_params));
+	if (!access_ok(VERIFY_WRITE, dblk_params, sizeof(*dblk_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &dblk_params->context_buf)
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &dblk_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &dblk_params->data_out)
+	    || __put_user(up32.data_block_type, &dblk_params->data_block_type)
+	    || __put_user(up32.data_in_size, &dblk_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)dblk_params);
+
+	if (__get_user(up32.error_info, &dblk_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct process_dblk_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct fin_process_params_32 {
+	u32 context_buf; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 digest_or_mac[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u8 digest_or_mac_size;  /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_fin_proc(struct file *filp, unsigned int cmd,
+				     unsigned long arg)
+{
+	struct fin_process_params_32 up32;
+	struct dxdi_fin_process_params __user *fin_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	fin_params = compat_alloc_user_space(sizeof(*fin_params));
+	if (!access_ok(VERIFY_WRITE, fin_params, sizeof(*fin_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &fin_params->context_buf)
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &fin_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &fin_params->data_out)
+	    || __put_user(up32.data_in_size, &fin_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)fin_params);
+
+	if (copy_from_user(up32.digest_or_mac, fin_params->digest_or_mac,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.digest_or_mac_size,
+			  &fin_params->digest_or_mac_size)
+	    || __get_user(up32.error_info, &fin_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct combined_init_params_32 {
+	struct dxdi_combined_props props; /*[in] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_combined_init(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct combined_init_params_32 up32;
+	struct dxdi_combined_init_params __user *init_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	init_params = compat_alloc_user_space(sizeof(*init_params));
+	if (!access_ok(VERIFY_WRITE, init_params, sizeof(*init_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&init_params->props, &up32.props, sizeof(up32.props)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)init_params);
+
+	if (__get_user(up32.error_info, &init_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct combined_init_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct combined_proc_dblk_params_32 {
+	struct dxdi_combined_props props; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[out] */
+	u32 data_in_size; /*[in] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_combined_proc_dblk(struct file *filp,
+					       unsigned int cmd,
+					       unsigned long arg)
+{
+	struct combined_proc_dblk_params_32 up32;
+	struct dxdi_combined_proc_dblk_params __user *blk_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	blk_params = compat_alloc_user_space(sizeof(*blk_params));
+	if (!access_ok(VERIFY_WRITE, blk_params, sizeof(*blk_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&blk_params->props, &up32.props,
+			 sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &blk_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &blk_params->data_out)
+	    || __put_user(up32.data_in_size, &blk_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)blk_params);
+
+	if (__get_user(up32.error_info, &blk_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct combined_proc_dblk_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct combined_proc_params_32 {
+	struct dxdi_combined_props props; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[out] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 auth_data[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u8 auth_data_size;  /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_combined_fin_proc(struct file *filp,
+					      unsigned int cmd,
+					      unsigned long arg)
+{
+	struct combined_proc_params_32 up32;
+	struct dxdi_combined_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->props, &up32.props,
+			 sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &user_params->data_out)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.auth_data, user_params->auth_data,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.auth_data_size,
+			  &user_params->auth_data_size)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+static int compat_sep_ioctl_combined_proc(struct file *filp,
+					  unsigned int cmd, unsigned long arg)
+{
+	return compat_sep_ioctl_combined_fin_proc(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sym_cipher_proc_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_sym_cipher_props props; /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_out; /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sym_cipher_proc(struct file *filp, unsigned int cmd,
+					    unsigned long arg)
+{
+	struct sym_cipher_proc_params_32 up32;
+	struct dxdi_sym_cipher_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || copy_to_user(&user_params->props, &up32.props,
+			    sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &user_params->data_out)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (__get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct sym_cipher_proc_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct auth_enc_proc_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_auth_enc_props props; /*[in] */
+	u32 adata;    /*[in] */
+	u32 text_data;  /*[in] */
+	u32 data_out; /*[in] */
+	u8 tag[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_auth_enc_proc(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct auth_enc_proc_params_32 up32;
+	struct dxdi_auth_enc_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || copy_to_user(&user_params->props, &up32.props,
+			    sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.adata,
+			  &user_params->adata)
+	    || __put_user((void __user *)(unsigned long)up32.text_data,
+			  &user_params->text_data)
+	    || __put_user((void __user *)(unsigned long)up32.data_out,
+			  &user_params->data_out))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.tag, user_params->tag,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct mac_proc_params_32 {
+	u32 context_buf; /*[in] */
+	struct dxdi_mac_props props;  /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 mac[DXDI_DIGEST_SIZE_MAX]; /*[out] */
+	u8 mac_size;  /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_mac_proc(struct file *filp, unsigned int cmd,
+				     unsigned long arg)
+{
+	struct mac_proc_params_32 up32;
+	struct dxdi_mac_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || copy_to_user(&user_params->props, &up32.props,
+			 sizeof(up32.props))
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.mac, user_params->mac, DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.mac_size, &user_params->mac_size)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct hash_proc_params_32 {
+	u32 context_buf; /*[in] */
+	enum dxdi_hash_type hash_type;  /*[in] */
+	u32 data_in;  /*[in] */
+	u32 data_in_size; /*[in] (octets) */
+	u8 digest[DXDI_DIGEST_SIZE_MAX];  /*[out] */
+	u8 digest_size; /*[out] (octets) */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_hash_proc(struct file *filp, unsigned int cmd,
+				      unsigned long arg)
+{
+	struct hash_proc_params_32 up32;
+	struct dxdi_hash_proc_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || __put_user(up32.hash_type, &user_params->hash_type)
+	    || __put_user((void __user *)(unsigned long)up32.data_in,
+			  &user_params->data_in)
+	    || __put_user(up32.data_in_size, &user_params->data_in_size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (copy_from_user(up32.digest, user_params->digest,
+			   DXDI_DIGEST_SIZE_MAX)
+	    || __get_user(up32.digest_size, &user_params->digest_size)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sep_rpc_params_32 {
+	u16 agent_id; /*[in] */
+	u16 func_id;  /*[in] */
+	struct dxdi_memref mem_refs[SEP_RPC_MAX_MEMREF_PER_FUNC]; /*[in] */
+	u32 rpc_params_size;  /*[in] */
+	u32 rpc_params; /*[in] */
+	/* rpc_params to be copied into kernel DMA buffer */
+	enum seprpc_retcode error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sep_rpc(struct file *filp, unsigned int cmd,
+				    unsigned long arg)
+{
+	struct sep_rpc_params_32 up32;
+	struct dxdi_sep_rpc_params __user *user_params;
+
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user(up32.agent_id, &user_params->agent_id)
+	    || __put_user(up32.func_id, &user_params->func_id)
+	    || copy_to_user(user_params->mem_refs, up32.mem_refs,
+			    sizeof(struct dxdi_memref)
+			    * SEP_RPC_MAX_MEMREF_PER_FUNC)
+	    || __put_user(up32.rpc_params_size, &user_params->rpc_params_size)
+	    || __put_user((void __user *)(unsigned long)up32.rpc_params,
+			 &user_params->rpc_params))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+	if (__get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (put_user(up32.error_info,
+		       &((struct sep_rpc_params_32 *)arg)->error_info))
+		return -EFAULT;
+
+	return 0;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct register_mem4dma_params_32 {
+	struct dxdi_memref memref;  /*[in] */
+	int memref_id;  /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_register_mem4dma(struct file *filp,
+					     unsigned int cmd,
+					     unsigned long arg)
+{
+	struct register_mem4dma_params_32 up32;
+	struct dxdi_register_mem4dma_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->memref, &up32.memref,
+		sizeof(struct dxdi_memref)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+	if (__get_user(up32.memref_id, &user_params->memref_id))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return ret;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct alloc_mem4dma_params_32 {
+	u32 size; /*[in] */
+	int memref_id;  /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_alloc_mem4dma(struct file *filp, unsigned int cmd,
+					  unsigned long arg)
+{
+	struct alloc_mem4dma_params_32 up32;
+	struct dxdi_alloc_mem4dma_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user(up32.size, &user_params->size))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (__get_user(up32.memref_id, &user_params->memref_id))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct free_mem4dma_params_32 {
+	int memref_id;  /*[in] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_free_mem4dma(struct file *filp, unsigned int cmd,
+					 unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct aes_iv_params_32 {
+	u32 context_buf; /*[in] */
+	u8 iv_ptr[DXDI_AES_IV_SIZE];  /*[in]/[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_set_iv(struct file *filp, unsigned int cmd,
+				   unsigned long arg)
+{
+	struct aes_iv_params_32 up32;
+	struct dxdi_aes_iv_params *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		      &user_params->context_buf)
+	    || __copy_to_user(user_params->iv_ptr, up32.iv_ptr,
+			      DXDI_AES_IV_SIZE))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	return ret;
+}
+
+static int compat_sep_ioctl_get_iv(struct file *filp, unsigned int cmd,
+				   unsigned long arg)
+{
+	struct aes_iv_params_32 up32;
+	struct dxdi_aes_iv_params *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user((void __user *)(unsigned long)up32.context_buf,
+		       &user_params->context_buf)
+	    || __copy_to_user(&user_params->iv_ptr, &up32.iv_ptr,
+			      DXDI_AES_IV_SIZE))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+	if (ret)
+		return ret;
+
+	if (__copy_from_user(up32.iv_ptr, user_params->iv_ptr,
+			     DXDI_AES_IV_SIZE))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sepapp_session_open_params_32 {
+	u8 app_uuid[DXDI_SEPAPP_UUID_SIZE]; /*[in] */
+	u32 auth_method;  /*[in] */
+	u32 auth_data[3]; /*[in] */
+	struct dxdi_sepapp_params app_auth_data;  /*[in/out] */
+	int session_id; /*[out] */
+	enum dxdi_sep_module sep_ret_origin;  /*[out] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sepapp_session_open(struct file *filp,
+						unsigned int cmd,
+						unsigned long arg)
+{
+	struct sepapp_session_open_params_32 up32;
+	struct dxdi_sepapp_session_open_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (copy_to_user(&user_params->app_uuid[0], &up32.app_uuid[0],
+			 DXDI_SEPAPP_UUID_SIZE)
+	    || __put_user(up32.auth_method, &user_params->auth_method)
+	    || copy_to_user(user_params->auth_data, &up32.auth_data,
+			    3 * sizeof(u32))
+	    || copy_to_user(&user_params->app_auth_data, &up32.app_auth_data,
+			    sizeof(struct dxdi_sepapp_params)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+	if (ret)
+		return ret;
+
+	if (__copy_from_user(&up32.app_auth_data, &user_params->app_auth_data,
+			     sizeof(struct dxdi_sepapp_params))
+	    || __get_user(up32.session_id, &user_params->session_id)
+	    || __get_user(up32.sep_ret_origin, &user_params->sep_ret_origin)
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sepapp_session_close_params_32 {
+	int session_id; /*[in] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sepapp_session_close(struct file *filp,
+						 unsigned int cmd,
+						 unsigned long arg)
+{
+	return sep_ioctl(filp, cmd, arg);
+}
+
+#pragma pack(push)
+#pragma pack(4)
+struct sepapp_command_invoke_params_32 {
+	int session_id; /*[in] */
+	u32 command_id; /*[in] */
+	struct dxdi_sepapp_params command_params; /*[in/out] */
+	enum dxdi_sep_module sep_ret_origin;  /*[out] */
+	u32 error_info; /*[out] */
+};
+#pragma pack(pop)
+
+static int compat_sep_ioctl_sepapp_command_invoke(struct file *filp,
+						  unsigned int cmd,
+						  unsigned long arg)
+{
+	struct sepapp_command_invoke_params_32 up32;
+	struct dxdi_sepapp_command_invoke_params __user *user_params;
+	int ret;
+
+	if (copy_from_user(&up32, (void __user *)arg, sizeof(up32)))
+		return -EFAULT;
+
+	user_params = compat_alloc_user_space(sizeof(*user_params));
+	if (!access_ok(VERIFY_WRITE, user_params, sizeof(*user_params)))
+		return -EFAULT;
+
+	if (__put_user(up32.session_id, &user_params->session_id)
+	    || __put_user(up32.command_id, &user_params->command_id)
+	    || copy_to_user(&user_params->command_params,
+			    &up32.command_params,
+			    sizeof(struct dxdi_sepapp_params)))
+		return -EFAULT;
+
+	ret = sep_ioctl(filp, cmd, (unsigned long)user_params);
+
+
+	if (__get_user(up32.sep_ret_origin, &user_params->sep_ret_origin)
+	    || __copy_from_user(&up32.command_params,
+				&user_params->command_params,
+				sizeof(struct dxdi_sepapp_params))
+	    || __get_user(up32.error_info, &user_params->error_info))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &up32, sizeof(up32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static sep_ioctl_compat_t *sep_compat_ioctls[] = {
+	/* Version info. commands */
+	[DXDI_IOC_NR_GET_VER_MAJOR] = compat_sep_ioctl_get_ver_major,
+	[DXDI_IOC_NR_GET_VER_MINOR] = compat_sep_ioctl_get_ver_minor,
+	/* Context size queries */
+	[DXDI_IOC_NR_GET_SYMCIPHER_CTX_SIZE] = compat_sep_ioctl_get_sym_cipher_ctx_size,
+	[DXDI_IOC_NR_GET_AUTH_ENC_CTX_SIZE] = compat_sep_ioctl_get_auth_enc_ctx_size,
+	[DXDI_IOC_NR_GET_MAC_CTX_SIZE] = compat_sep_ioctl_get_mac_ctx_size,
+	[DXDI_IOC_NR_GET_HASH_CTX_SIZE] = compat_sep_ioctl_get_hash_ctx_size,
+	/* Init context commands */
+	[DXDI_IOC_NR_SYMCIPHER_INIT] = compat_sep_ioctl_sym_cipher_init,
+	[DXDI_IOC_NR_AUTH_ENC_INIT] = compat_sep_ioctl_auth_enc_init,
+	[DXDI_IOC_NR_MAC_INIT] = compat_sep_ioctl_mac_init,
+	[DXDI_IOC_NR_HASH_INIT] = compat_sep_ioctl_hash_init,
+	/* Processing commands */
+	[DXDI_IOC_NR_PROC_DBLK] = compat_sep_ioctl_proc_dblk,
+	[DXDI_IOC_NR_FIN_PROC] = compat_sep_ioctl_fin_proc,
+	/* "Integrated" processing operations */
+	[DXDI_IOC_NR_SYMCIPHER_PROC] = compat_sep_ioctl_sym_cipher_proc,
+	[DXDI_IOC_NR_AUTH_ENC_PROC] = compat_sep_ioctl_auth_enc_proc,
+	[DXDI_IOC_NR_MAC_PROC] = compat_sep_ioctl_mac_proc,
+	[DXDI_IOC_NR_HASH_PROC] = compat_sep_ioctl_hash_proc,
+	/* SeP RPC */
+	[DXDI_IOC_NR_SEP_RPC] = compat_sep_ioctl_sep_rpc,
+	/* Memory registration */
+	[DXDI_IOC_NR_REGISTER_MEM4DMA] = compat_sep_ioctl_register_mem4dma,
+	[DXDI_IOC_NR_ALLOC_MEM4DMA] = compat_sep_ioctl_alloc_mem4dma,
+	[DXDI_IOC_NR_FREE_MEM4DMA] = compat_sep_ioctl_free_mem4dma,
+	/* SeP Applets API */
+	[DXDI_IOC_NR_SEPAPP_SESSION_OPEN] = compat_sep_ioctl_sepapp_session_open,
+	[DXDI_IOC_NR_SEPAPP_SESSION_CLOSE] = compat_sep_ioctl_sepapp_session_close,
+	[DXDI_IOC_NR_SEPAPP_COMMAND_INVOKE] = compat_sep_ioctl_sepapp_command_invoke,
+	/* Combined mode */
+	[DXDI_IOC_NR_COMBINED_INIT] = compat_sep_ioctl_combined_init,
+	[DXDI_IOC_NR_COMBINED_PROC_DBLK] = compat_sep_ioctl_combined_proc_dblk,
+	[DXDI_IOC_NR_COMBINED_PROC_FIN] = compat_sep_ioctl_combined_fin_proc,
+	[DXDI_IOC_NR_COMBINED_PROC] = compat_sep_ioctl_combined_proc,
+
+	/* AES IV set/get API */
+	[DXDI_IOC_NR_SET_IV] = compat_sep_ioctl_set_iv,
+	[DXDI_IOC_NR_GET_IV] = compat_sep_ioctl_get_iv,
+};
+
+long sep_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = _IOC_NR(cmd);
+	sep_ioctl_compat_t *callback = NULL;
+	int ret;
+
+	pr_debug("Calling the IOCTL compat %d\n", nr);
+
+	if (nr < ARRAY_SIZE(sep_compat_ioctls))
+		callback = sep_compat_ioctls[nr];
+
+	if (callback != NULL)
+		ret = (*callback) (filp, cmd, arg);
+	else
+		ret = sep_ioctl(filp, cmd, arg);
+
+	return ret;
+}
diff --git a/drivers/staging/sep54/sep_compat_ioctl.h b/drivers/staging/sep54/sep_compat_ioctl.h
new file mode 100644
index 0000000..5188269
--- /dev/null
+++ b/drivers/staging/sep54/sep_compat_ioctl.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2013 Intel Finland Oy
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SEP_COMPAT_IOCTL_H__
+#define __SEP_COMPAT_IOCTL_H__
+
+#include <linux/fs.h>
+
+/**
+ * \brief drm_compat_ioctl
+ *
+ * \param filp
+ * \param cmd
+ * \param arg
+ * \return 0 on success or a negtive number on failure
+ */
+long sep_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#endif
diff --git a/drivers/staging/sep54/sep_ctx.h b/drivers/staging/sep54/sep_ctx.h
new file mode 100644
index 0000000..0fa0259
--- /dev/null
+++ b/drivers/staging/sep54/sep_ctx.h
@@ -0,0 +1,297 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_CTX_H_
+#define _SEP_CTX_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#define INT32_MAX 0x7FFFFFFFL
+#else
+#include <stdint.h>
+#endif
+
+#include "dx_cc_defs.h"
+
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
+/* SeP context size */
+#define SEP_CTX_SIZE_LOG2 7
+#define SEP_CTX_SIZE (1<<SEP_CTX_SIZE_LOG2)
+#define SEP_CTX_SIZE_WORDS (SEP_CTX_SIZE >> 2)
+
+#define SEP_DES_IV_SIZE 8
+#define SEP_DES_BLOCK_SIZE 8
+
+#define SEP_DES_ONE_KEY_SIZE 8
+#define SEP_DES_DOUBLE_KEY_SIZE 16
+#define SEP_DES_TRIPLE_KEY_SIZE 24
+#define SEP_DES_KEY_SIZE_MAX SEP_DES_TRIPLE_KEY_SIZE
+
+#define SEP_AES_IV_SIZE 16
+#define SEP_AES_IV_SIZE_WORDS (SEP_AES_IV_SIZE >> 2)
+
+#define SEP_AES_BLOCK_SIZE 16
+#define SEP_AES_BLOCK_SIZE_WORDS 4
+
+#define SEP_AES_128_BIT_KEY_SIZE 16
+#define SEP_AES_128_BIT_KEY_SIZE_WORDS	(SEP_AES_128_BIT_KEY_SIZE >> 2)
+#define SEP_AES_192_BIT_KEY_SIZE 24
+#define SEP_AES_192_BIT_KEY_SIZE_WORDS	(SEP_AES_192_BIT_KEY_SIZE >> 2)
+#define SEP_AES_256_BIT_KEY_SIZE 32
+#define SEP_AES_256_BIT_KEY_SIZE_WORDS	(SEP_AES_256_BIT_KEY_SIZE >> 2)
+#define SEP_AES_KEY_SIZE_MAX			SEP_AES_256_BIT_KEY_SIZE
+#define SEP_AES_KEY_SIZE_WORDS_MAX		(SEP_AES_KEY_SIZE_MAX >> 2)
+
+#define SEP_SHA1_DIGEST_SIZE 20
+#define SEP_SHA224_DIGEST_SIZE 28
+#define SEP_SHA256_DIGEST_SIZE 32
+#define SEP_SHA384_DIGEST_SIZE 48
+#define SEP_SHA512_DIGEST_SIZE 64
+#define SEP_SHA1024_DIGEST_SIZE 128
+
+#define SEP_SHA1_BLOCK_SIZE 64
+#define SEP_SHA224_BLOCK_SIZE 64
+#define SEP_SHA256_BLOCK_SIZE 64
+#define SEP_SHA1_224_256_BLOCK_SIZE 64
+#define SEP_SHA384_BLOCK_SIZE 128
+#define SEP_SHA512_BLOCK_SIZE 128
+#define SEP_SHA1024_BLOCK_SIZE 128
+
+#if (SEP_SUPPORT_SHA > 256)
+#define SEP_DIGEST_SIZE_MAX SEP_SHA512_DIGEST_SIZE
+#define SEP_HASH_BLOCK_SIZE_MAX SEP_SHA512_BLOCK_SIZE	/*1024b */
+#else				/* Only up to SHA256 */
+#define SEP_DIGEST_SIZE_MAX SEP_SHA256_DIGEST_SIZE
+#define SEP_HASH_BLOCK_SIZE_MAX SEP_SHA256_BLOCK_SIZE	/*256 */
+#endif
+
+#define SEP_HMAC_BLOCK_SIZE_MAX SEP_HASH_BLOCK_SIZE_MAX
+
+#define SEP_RC4_KEY_SIZE_MIN 1
+#define SEP_RC4_KEY_SIZE_MAX 20
+#define SEP_RC4_STATE_SIZE 264
+
+#define SEP_C2_KEY_SIZE_MAX 16
+#define SEP_C2_BLOCK_SIZE 8
+
+#define SEP_ALG_MAX_BLOCK_SIZE SEP_HASH_BLOCK_SIZE_MAX
+
+#define SEP_MAX_COMBINED_ENGINES 4
+
+#define SEP_MAX_CTX_SIZE (max(sizeof(struct sep_ctx_rc4), \
+				sizeof(struct sep_ctx_cache_entry)))
+enum sep_engine_type {
+	SEP_ENGINE_NULL = 0,
+	SEP_ENGINE_AES = 1,
+	SEP_ENGINE_DES = 2,
+	SEP_ENGINE_HASH = 3,
+	SEP_ENGINE_RC4 = 4,
+	SEP_ENGINE_DOUT = 5,
+	SEP_ENGINE_RESERVE32B = INT32_MAX,
+};
+
+enum sep_crypto_alg {
+	SEP_CRYPTO_ALG_NULL = -1,
+	SEP_CRYPTO_ALG_AES = 0,
+	SEP_CRYPTO_ALG_DES = 1,
+	SEP_CRYPTO_ALG_HASH = 2,
+	SEP_CRYPTO_ALG_RC4 = 3,
+	SEP_CRYPTO_ALG_C2 = 4,
+	SEP_CRYPTO_ALG_HMAC = 5,
+	SEP_CRYPTO_ALG_AEAD = 6,
+	SEP_CRYPTO_ALG_BYPASS = 7,
+	SEP_CRYPTO_ALG_COMBINED = 8,
+	SEP_CRYPTO_ALG_NUM = 9,
+	SEP_CRYPTO_ALG_RESERVE32B = INT32_MAX
+};
+
+enum sep_crypto_direction {
+	SEP_CRYPTO_DIRECTION_NULL = -1,
+	SEP_CRYPTO_DIRECTION_ENCRYPT = 0,
+	SEP_CRYPTO_DIRECTION_DECRYPT = 1,
+	SEP_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
+	SEP_CRYPTO_DIRECTION_RESERVE32B = INT32_MAX
+};
+
+enum sep_cipher_mode {
+	SEP_CIPHER_NULL_MODE = -1,
+	SEP_CIPHER_ECB = 0,
+	SEP_CIPHER_CBC = 1,
+	SEP_CIPHER_CTR = 2,
+	SEP_CIPHER_CBC_MAC = 3,
+	SEP_CIPHER_XTS = 4,
+	SEP_CIPHER_XCBC_MAC = 5,
+	SEP_CIPHER_CMAC = 7,
+	SEP_CIPHER_CCM = 8,
+	SEP_CIPHER_RESERVE32B = INT32_MAX
+};
+
+enum sep_hash_mode {
+	SEP_HASH_NULL = -1,
+	SEP_HASH_SHA1 = 0,
+	SEP_HASH_SHA256 = 1,
+	SEP_HASH_SHA224 = 2,
+	SEP_HASH_MODE_NUM = 3,
+
+	/* Unsupported */
+	SEP_HASH_SHA512 = 3,
+	SEP_HASH_SHA384 = 4,
+	SEP_HASH_RESERVE32B = INT32_MAX
+};
+
+enum sep_hash_hw_mode {
+	SEP_HASH_HW_SHA1 = 1,
+	SEP_HASH_HW_SHA256 = 2,
+	SEP_HASH_HW_SHA224 = 10,
+	SEP_HASH_HW_SHA512 = 4,
+	SEP_HASH_HW_SHA384 = 12,
+	SEP_HASH_HW_RESERVE32B = INT32_MAX
+};
+
+enum sep_c2_mode {
+	SEP_C2_NULL = -1,
+	SEP_C2_ECB = 0,
+	SEP_C2_CBC = 1,
+	SEP_C2_RESERVE32B = INT32_MAX
+};
+
+/*******************************************************************/
+/***************** DESCRIPTOR BASED CONTEXTS ***********************/
+/*******************************************************************/
+
+ /* Generic context ("super-class") */
+struct sep_ctx_generic {
+	enum sep_crypto_alg alg;
+} __attribute__ ((__may_alias__));
+
+/* Cache context entry ("sub-class") */
+struct sep_ctx_cache_entry {
+	enum sep_crypto_alg alg;
+	u32 reserved[SEP_CTX_SIZE_WORDS - 1];
+};
+
+struct sep_ctx_c2 {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_C2 */
+	enum sep_c2_mode mode;
+	enum sep_crypto_direction direction;
+	/* reserve to end of allocated context size */
+	u32 key_size;	/* numeric value in bytes */
+	u8 key[SEP_C2_KEY_SIZE_MAX];
+	u8 reserved[SEP_CTX_SIZE - 4 * sizeof(u32) -
+			 SEP_C2_KEY_SIZE_MAX];
+};
+
+struct sep_ctx_hash {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_HASH */
+	enum sep_hash_mode mode;
+	u8 digest[SEP_DIGEST_SIZE_MAX];
+	/* reserve to end of allocated context size */
+	u8 reserved[SEP_CTX_SIZE - 2 * sizeof(u32) -
+			 SEP_DIGEST_SIZE_MAX];
+};
+
+/* !!!! sep_ctx_hmac should have the same structure as sep_ctx_hash except
+   k0, k0_size fields */
+struct sep_ctx_hmac {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_HMAC */
+	enum sep_hash_mode mode;
+	u8 digest[SEP_DIGEST_SIZE_MAX];
+	u8 k0[SEP_HMAC_BLOCK_SIZE_MAX];
+	u32 k0_size;
+	/* reserve to end of allocated context size */
+	u8 reserved[SEP_CTX_SIZE - 3 * sizeof(u32) -
+			 SEP_DIGEST_SIZE_MAX - SEP_HMAC_BLOCK_SIZE_MAX];
+};
+
+struct sep_ctx_cipher {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_AES */
+	enum sep_cipher_mode mode;
+	enum sep_crypto_direction direction;
+	enum dx_crypto_key_type crypto_key_type;
+	u32 key_size;	/* numeric value in bytes   */
+	u32 data_unit_size;	/* required for XTS */
+	/* block_state is the AES engine block state.
+	 *  It is used by the host to pass IV or counter at initialization.
+	 *  It is used by SeP for intermediate block chaining state and for
+	 *  returning MAC algorithms results.           */
+	u8 block_state[SEP_AES_BLOCK_SIZE];
+	u8 key[SEP_AES_KEY_SIZE_MAX];
+	u8 xex_key[SEP_AES_KEY_SIZE_MAX];
+	/* reserve to end of allocated context size */
+	u32 reserved[SEP_CTX_SIZE_WORDS - 6 -
+			  SEP_AES_BLOCK_SIZE / sizeof(u32) - 2 *
+			  (SEP_AES_KEY_SIZE_MAX / sizeof(u32))];
+};
+
+/* authentication and encryption with associated data class */
+struct sep_ctx_aead {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_AES */
+	enum sep_cipher_mode mode;
+	enum sep_crypto_direction direction;
+	enum dx_crypto_key_type crypto_key_type;
+	u32 key_size;	/* numeric value in bytes   */
+	u32 nonce_size;	/* nonce size (octets) */
+	u32 header_size;	/* finit additional data size (octets) */
+	u32 text_size;	/* finit text data size (octets) */
+	/* mac size, element of {4, 6, 8, 10, 12, 14, 16} */
+	u32 tag_size;
+	/* block_state1/2 is the AES engine block state */
+	u8 block_state[SEP_AES_BLOCK_SIZE];
+	u8 mac_state[SEP_AES_BLOCK_SIZE];	/* MAC result */
+	u8 nonce[SEP_AES_BLOCK_SIZE];	/* nonce buffer */
+	u8 key[SEP_AES_KEY_SIZE_MAX];
+	/* reserve to end of allocated context size */
+	u32 reserved[SEP_CTX_SIZE_WORDS - 9 -
+			  3 * (SEP_AES_BLOCK_SIZE / sizeof(u32)) -
+			  SEP_AES_KEY_SIZE_MAX / sizeof(u32)];
+};
+
+/* crys combined context */
+struct sep_ctx_combined {
+	enum sep_crypto_alg alg;
+	u32 mode;
+	/* array of sub contexts used for the combined operation      *
+	 *  according to the given mode                               */
+	struct sep_ctx_cache_entry *sub_ctx[SEP_MAX_COMBINED_ENGINES];
+	/* store the host contexts addresses (optimization) */
+	u32 host_addr[SEP_MAX_COMBINED_ENGINES];
+};
+
+/*******************************************************************/
+/***************** MESSAGE BASED CONTEXTS **************************/
+/*******************************************************************/
+
+struct sep_ctx_rc4 {
+	enum sep_crypto_alg alg;	/* SEP_CRYPTO_ALG_RC4 */
+	u32 key_size;	/* numeric value in bytes */
+	u8 key[SEP_RC4_KEY_SIZE_MAX];
+	u8 state[SEP_RC4_STATE_SIZE];
+};
+
+#endif				/* _SEP_CTX_H_ */
diff --git a/drivers/staging/sep54/sep_init.c b/drivers/staging/sep54/sep_init.c
new file mode 100644
index 0000000..c513512
--- /dev/null
+++ b/drivers/staging/sep54/sep_init.c
@@ -0,0 +1,848 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+/* \file
+   This file implements the SeP FW initialization sequence.
+   This is part of the Discretix CC initalization specifications              */
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_INIT
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+/* Registers definitions from shared/hw/include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "dx_bitops.h"
+#include "sep_log.h"
+#include "dx_driver.h"
+#include "dx_init_cc_abi.h"
+#include "dx_init_cc_defs.h"
+#include "sep_sram_map.h"
+#include "sep_init.h"
+#include "sep_request_mgr.h"
+#include "sep_power.h"
+
+#ifdef DEBUG
+#define FW_INIT_TIMEOUT_SEC     10
+#define COLD_BOOT_TIMEOUT_SEC	10
+#else
+#define FW_INIT_TIMEOUT_SEC     3
+#define COLD_BOOT_TIMEOUT_SEC	3
+#endif
+#define FW_INIT_TIMEOUT_MSEC	(FW_INIT_TIMEOUT_SEC * 1000)
+#define COLD_BOOT_TIMEOUT_MSEC  (COLD_BOOT_TIMEOUT_SEC * 1000)
+
+#define FW_INIT_PARAMS_BUF_LEN		1024
+
+/*** CC_INIT handlers ***/
+
+/**
+ * struct cc_init_ctx - CC init. context
+ * @drvdata:		Associated device driver
+ * @resident_p:		Pointer to resident image buffer
+ * @resident_dma_addr:	DMA address of resident image buffer
+ * @resident_size:	Size in bytes of the resident image
+ * @cache_p:	Pointer to (i)cache image buffer
+ * @cache_dma_addr:	DMA address of the (i)cache image
+ * @cache_size:		Size in bytes if the (i)cache image
+ * @vrl_p:		Pointer to VRL image buffer
+ * @vrl_dma_addr:	DMA address of the VRL
+ * @vrl_size:		Size in bytes of the VRL
+ * @msg_buf:		A buffer for building the CC-Init message
+ */
+struct cc_init_ctx {
+	struct sep_drvdata *drvdata;
+	void *resident_p;
+	dma_addr_t resident_dma_addr;
+	size_t resident_size;
+	void *cache_p;
+	dma_addr_t cache_dma_addr;
+	size_t cache_size;
+	void *vrl_p;
+	dma_addr_t vrl_dma_addr;
+	size_t vrl_size;
+	u32 msg_buf[DX_CC_INIT_MSG_LENGTH];
+};
+
+static void destroy_cc_init_ctx(struct cc_init_ctx *ctx)
+{
+	struct device *mydev = ctx->drvdata->dev;
+
+	if (ctx->vrl_p != NULL)
+		dma_free_coherent(mydev, ctx->vrl_size,
+				  ctx->vrl_p, ctx->vrl_dma_addr);
+	if (ctx->cache_p != NULL)
+		dma_free_coherent(mydev, ctx->cache_size,
+				  ctx->cache_p, ctx->cache_dma_addr);
+	if (ctx->resident_p != NULL)
+		dma_free_coherent(mydev, ctx->resident_size,
+				  ctx->resident_p, ctx->resident_dma_addr);
+	kfree(ctx);
+}
+
+/**
+ * fetch_image() - Fetch CC image using request_firmware mechanism and
+ *	locate it in a DMA coherent buffer.
+ *
+ * @mydev:
+ * @image_name:		Image file name (from /lib/firmware/)
+ * @image_pp:		Allocated image buffer
+ * @image_dma_addr_p:	Allocated image DMA addr
+ * @image_size_p:	Loaded image size
+ */
+static int fetch_image(struct device *mydev, const char *image_name,
+		       void **image_pp, dma_addr_t *image_dma_addr_p,
+		       size_t *image_size_p)
+{
+	const struct firmware *image;
+	int rc;
+
+	rc = request_firmware(&image, image_name, mydev);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed loading image %s (%d)\n", image_name, rc);
+		return -ENODEV;
+	}
+	*image_pp = dma_alloc_coherent(mydev,
+				       image->size, image_dma_addr_p,
+				       GFP_KERNEL);
+	if (unlikely(*image_pp == NULL)) {
+		pr_err("Failed allocating DMA mem. for resident image\n");
+		rc = -ENOMEM;
+	} else {
+		memcpy(*image_pp, image->data, image->size);
+		*image_size_p = image->size;
+	}
+	/* Image copied into the DMA coherent buffer. No need for "firmware" */
+	release_firmware(image);
+	if (likely(rc == 0))
+		pr_debug("%s: %zu Bytes\n", image_name, *image_size_p);
+	return rc;
+}
+
+#ifdef CACHE_IMAGE_NAME
+static enum dx_cc_init_msg_icache_size icache_size_to_enum(u8
+							   icache_size_log2)
+{
+	int i;
+	const int icache_sizes_enum2log[] = DX_CC_ICACHE_SIZE_ENUM2LOG;
+
+	for (i = 0; i < sizeof(icache_sizes_enum2log) / sizeof(int); i++)
+		if ((icache_size_log2 == icache_sizes_enum2log[i]) &&
+		    (icache_sizes_enum2log[i] >= 0))
+			return (enum dx_cc_init_msg_icache_size)i;
+	pr_err("Requested Icache size (%uKB) is invalid\n",
+		    1 << (icache_size_log2 - 10));
+	return DX_CC_INIT_MSG_ICACHE_SCR_INVALID_SIZE;
+}
+#endif
+
+/**
+ * get_cc_init_checksum() - Calculate CC_INIT message checksum
+ *
+ * @msg_p:	Pointer to the message buffer
+ * @length:	Size of message in _bytes_
+ */
+static u32 get_cc_init_checksum(u32 *msg_p)
+{
+	int bytes_remain;
+	u32 sum = 0;
+	u16 *tdata = (u16 *)msg_p;
+
+	for (bytes_remain = DX_CC_INIT_MSG_LENGTH * sizeof(u32);
+	     bytes_remain > 1; bytes_remain -= 2)
+		sum += *tdata++;
+	/*  Add left-over byte, if any */
+	if (bytes_remain > 0)
+		sum += *(u8 *)tdata;
+	/*  Fold 32-bit sum to 16 bits */
+	while ((sum >> 16) != 0)
+		sum = (sum & 0xFFFF) + (sum >> 16);
+	return ~sum & 0xFFFF;
+}
+
+static void build_cc_init_msg(struct cc_init_ctx *init_ctx)
+{
+	u32 *const msg_p = init_ctx->msg_buf;
+	struct sep_drvdata *drvdata = init_ctx->drvdata;
+#ifndef VRL_KEY_INDEX
+	/* Verify VRL key against this truncated hash value */
+	u32 const vrl_key_hash[] = VRL_KEY_HASH;
+#endif
+#ifdef CACHE_IMAGE_NAME
+	enum dx_cc_init_msg_icache_size icache_size_code;
+#endif
+
+	memset(msg_p, 0, DX_CC_INIT_MSG_LENGTH * sizeof(u32));
+	msg_p[DX_CC_INIT_MSG_TOKEN_OFFSET] = DX_CC_INIT_HEAD_MSG_TOKEN;
+	msg_p[DX_CC_INIT_MSG_LENGTH_OFFSET] = DX_CC_INIT_MSG_LENGTH;
+	msg_p[DX_CC_INIT_MSG_OP_CODE_OFFSET] = DX_HOST_REQ_CC_INIT;
+	msg_p[DX_CC_INIT_MSG_FLAGS_OFFSET] =
+	    DX_CC_INIT_FLAGS_RESIDENT_ADDR_FLAG;
+	msg_p[DX_CC_INIT_MSG_RESIDENT_IMAGE_OFFSET] =
+	    init_ctx->resident_dma_addr;
+
+#ifdef CACHE_IMAGE_NAME
+	icache_size_code = icache_size_to_enum(drvdata->icache_size_log2);
+	msg_p[DX_CC_INIT_MSG_FLAGS_OFFSET] |=
+	    DX_CC_INIT_FLAGS_I_CACHE_ADDR_FLAG |
+	    DX_CC_INIT_FLAGS_D_CACHE_EXIST_FLAG |
+	    DX_CC_INIT_FLAGS_CACHE_ENC_FLAG | DX_CC_INIT_FLAGS_CACHE_COPY_FLAG;
+#ifdef DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG
+	/* Enable scrambling if available */
+	msg_p[DX_CC_INIT_MSG_FLAGS_OFFSET] |=
+	    DX_CC_INIT_FLAGS_CACHE_SCRAMBLE_FLAG;
+#endif
+	msg_p[DX_CC_INIT_MSG_I_CACHE_IMAGE_OFFSET] = init_ctx->cache_dma_addr;
+	msg_p[DX_CC_INIT_MSG_I_CACHE_DEST_OFFSET] =
+	    page_to_phys(drvdata->icache_pages);
+	msg_p[DX_CC_INIT_MSG_I_CACHE_SIZE_OFFSET] = icache_size_code;
+	msg_p[DX_CC_INIT_MSG_D_CACHE_ADDR_OFFSET] =
+	    page_to_phys(drvdata->dcache_pages);
+	msg_p[DX_CC_INIT_MSG_D_CACHE_SIZE_OFFSET] =
+	    1 << drvdata->dcache_size_log2;
+#elif defined(SEP_BACKUP_BUF_SIZE)
+	/* Declare SEP backup buffer resources */
+	msg_p[DX_CC_INIT_MSG_HOST_BUFF_ADDR_OFFSET] =
+	    virt_to_phys(drvdata->sep_backup_buf);
+	msg_p[DX_CC_INIT_MSG_HOST_BUFF_SIZE_OFFSET] =
+	    drvdata->sep_backup_buf_size;
+#endif				/*CACHE_IMAGE_NAME */
+
+	msg_p[DX_CC_INIT_MSG_VRL_ADDR_OFFSET] = init_ctx->vrl_dma_addr;
+	/* Handle VRL key hash */
+#ifdef VRL_KEY_INDEX
+	msg_p[DX_CC_INIT_MSG_KEY_INDEX_OFFSET] = VRL_KEY_INDEX;
+#else	/* Key should be validated against VRL_KEY_HASH */
+	msg_p[DX_CC_INIT_MSG_KEY_INDEX_OFFSET] =
+	    DX_CC_INIT_MSG_VRL_KEY_INDEX_INVALID;
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_0_OFFSET] = vrl_key_hash[0];
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_1_OFFSET] = vrl_key_hash[1];
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_2_OFFSET] = vrl_key_hash[2];
+	msg_p[DX_CC_INIT_MSG_KEY_HASH_3_OFFSET] = vrl_key_hash[3];
+#endif
+
+	msg_p[DX_CC_INIT_MSG_CHECK_SUM_OFFSET] = get_cc_init_checksum(msg_p);
+
+	dump_word_array("CC_INIT", msg_p, DX_CC_INIT_MSG_LENGTH);
+}
+
+/**
+ * create_cc_init_ctx() - Create CC-INIT message and allocate associated
+ *	resources (load FW images, etc.)
+ *
+ * @drvdata:		Device context
+ *
+ * Returns the allocated message context or NULL on failure.
+ */
+struct cc_init_ctx *create_cc_init_ctx(struct sep_drvdata *drvdata)
+{
+	struct cc_init_ctx *init_ctx;
+	struct device *const mydev = drvdata->dev;
+	int rc;
+
+	init_ctx = kzalloc(sizeof(struct cc_init_ctx), GFP_KERNEL);
+	if (unlikely(init_ctx == NULL)) {
+		pr_err("Failed allocating CC-Init. context\n");
+		rc = -ENOMEM;
+		goto create_err;
+	}
+	init_ctx->drvdata = drvdata;
+	rc = fetch_image(mydev, RESIDENT_IMAGE_NAME, &init_ctx->resident_p,
+			 &init_ctx->resident_dma_addr,
+			 &init_ctx->resident_size);
+	if (unlikely(rc != 0))
+		goto create_err;
+#ifdef CACHE_IMAGE_NAME
+	rc = fetch_image(mydev, CACHE_IMAGE_NAME, &init_ctx->cache_p,
+			 &init_ctx->cache_dma_addr, &init_ctx->cache_size);
+	if (unlikely(rc != 0))
+		goto create_err;
+#endif				/*CACHE_IMAGE_NAME */
+	rc = fetch_image(mydev, VRL_IMAGE_NAME, &init_ctx->vrl_p,
+			 &init_ctx->vrl_dma_addr, &init_ctx->vrl_size);
+	if (unlikely(rc != 0))
+		goto create_err;
+	build_cc_init_msg(init_ctx);
+	return init_ctx;
+
+ create_err:
+
+	if (init_ctx != NULL)
+		destroy_cc_init_ctx(init_ctx);
+	return NULL;
+}
+
+/**
+ * sepinit_wait_for_cold_boot_finish() - Wait for SeP to reach cold-boot-finish
+ *					state (i.e., ready for driver-init)
+ * @drvdata:
+ *
+ * Returns int 0 for success, !0 on timeout while waiting for cold-boot-finish
+ */
+static int sepinit_wait_for_cold_boot_finish(struct sep_drvdata *drvdata)
+{
+	enum dx_sep_state cur_state;
+	u32 cur_status;
+	int rc = 0;
+
+	cur_state =
+	    dx_sep_wait_for_state(DX_SEP_STATE_DONE_COLD_BOOT |
+				  DX_SEP_STATE_FATAL_ERROR,
+				  COLD_BOOT_TIMEOUT_MSEC);
+	if (cur_state != DX_SEP_STATE_DONE_COLD_BOOT) {
+		rc = -EIO;
+		cur_status =
+		    READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET);
+		pr_err(
+			    "Failed waiting for DONE_COLD_BOOT from SeP (state=0x%08X status=0x%08X)\n",
+			    cur_state, cur_status);
+	}
+
+	return rc;
+}
+
+/**
+ * dispatch_cc_init_msg() - Push given CC_INIT message into SRAM and signal
+ *	SeP to start cold boot sequence
+ *
+ * @drvdata:
+ * @init_cc_msg_p:	A pointer to the message context
+ */
+static int dispatch_cc_init_msg(struct sep_drvdata *drvdata,
+				struct cc_init_ctx *cc_init_ctx_p)
+{
+	int i;
+	u32 is_data_ready;
+	/*
+	 * get the base address of the SRAM and add the offset
+	 * for the CC_Init message
+	 */
+	const u32 msg_target_addr =
+	    READ_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,
+					 HOST, HOST_SEP_SRAM_THRESHOLD)) +
+	    DX_CC_INIT_MSG_OFFSET_IN_SRAM;
+
+	/* Initialize SRAM access address register for message location */
+	WRITE_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base, SRAM, SRAM_ADDR),
+		       msg_target_addr);
+	/* Write the message word by word to the SEP intenral offset */
+	for (i = 0; i < sizeof(cc_init_ctx_p->msg_buf) / sizeof(u32);
+		i++) {
+		/* write data to SRAM */
+		WRITE_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,
+					      SRAM, SRAM_DATA),
+			       cc_init_ctx_p->msg_buf[i]);
+		/* wait for write complete */
+		do {
+			is_data_ready = 1 &
+			    READ_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base,
+							 SRAM,
+							 SRAM_DATA_READY));
+		} while (!is_data_ready);
+		/* TODO: Timeout in case something gets broken */
+	}
+	/* Signal SeP: Request CC_INIT */
+	WRITE_REGISTER(drvdata->cc_base +
+		       HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+		       DX_HOST_REQ_CC_INIT);
+	return 0;
+}
+
+/**
+ * sepinit_do_cc_init() - Initiate SeP cold boot sequence and wait for
+ *	its completion.
+ *
+ * @drvdata:
+ *
+ * This function loads the CC firmware and dispatches a CC_INIT request message
+ * Returns int 0 for success
+ */
+int sepinit_do_cc_init(struct sep_drvdata *drvdata)
+{
+	u32 cur_state;
+	struct cc_init_ctx *cc_init_ctx_p;
+	int rc;
+
+	/* Sep was already initialized */
+	if (GET_SEP_STATE(drvdata) == DX_SEP_STATE_DONE_FW_INIT)
+		return 0;
+
+	cur_state = dx_sep_wait_for_state(DX_SEP_STATE_START_SECURE_BOOT,
+					  COLD_BOOT_TIMEOUT_MSEC);
+	if (cur_state != DX_SEP_STATE_START_SECURE_BOOT) {
+		pr_err("Bad SeP state = 0x%08X\n", cur_state);
+		return -EIO;
+	}
+#ifdef __BIG_ENDIAN
+	/* Enable byte swapping in DMA operations */
+	WRITE_REGISTER(DX_CC_REG_ADDR(drvdata->cc_base, HOST, HOST_HOST_ENDIAN),
+		       0xCCUL);
+	/* TODO: Define value in device specific header files? */
+#endif
+	cc_init_ctx_p = create_cc_init_ctx(drvdata);
+	if (likely(cc_init_ctx_p != NULL))
+		rc = dispatch_cc_init_msg(drvdata, cc_init_ctx_p);
+	else
+		rc = -ENOMEM;
+	if (likely(rc == 0))
+		rc = sepinit_wait_for_cold_boot_finish(drvdata);
+	if (cc_init_ctx_p != NULL)
+		destroy_cc_init_ctx(cc_init_ctx_p);
+	return rc;
+}
+
+/*** FW_INIT handlers ***/
+
+#ifdef DEBUG
+#define ENUM_CASE_RETURN_STR(enum_name) case enum_name: return #enum_name
+
+static const char *param2str(enum dx_fw_init_tlv_params param_type)
+{
+	switch (param_type) {
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_NULL);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_FIRST);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_LAST);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_DISABLE_MODULES);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_HOST_AXI_CONFIG);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_HOST_DEF_APPLET_CONFIG);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_NUM_OF_DESC_QS);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_DESC_QS_ADDR);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_DESC_QS_SIZE);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_CTX_CACHE_PART);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_SEP_REQUEST_PARAMS);
+		ENUM_CASE_RETURN_STR(DX_FW_INIT_PARAM_SEP_FREQ);
+	default:
+		return "(unknown param.)";
+	}
+}
+
+static void dump_fwinit_params(struct sep_drvdata *drvdata,
+			       u32 *fw_init_params_buf_p)
+{
+#define LINE_BUF_LEN 90		/* increased to hold values for 2 queues */
+	const u32 last_tl_word = DX_TL_WORD(DX_FW_INIT_PARAM_LAST, 1);
+	u32 tl_word;
+	u16 type, len;
+	u32 *cur_buf_p;
+	unsigned int i = 0;
+	char line_buf[LINE_BUF_LEN];
+	unsigned int line_offset;
+
+	pr_debug("Dx SeP fw_init params dump:\n");
+	cur_buf_p = fw_init_params_buf_p;
+	do {
+		tl_word = le32_to_cpu(*cur_buf_p);
+		cur_buf_p++;
+		type = DX_TL_GET_TYPE(tl_word);
+		len = DX_TL_GET_LENGTH(tl_word);
+
+		if ((cur_buf_p + len - fw_init_params_buf_p) >
+		    (FW_INIT_PARAMS_BUF_LEN / sizeof(u32))) {
+			pr_err
+			    ("LAST parameter not found up to buffer end\n");
+			break;
+		}
+
+		line_offset = snprintf(line_buf, LINE_BUF_LEN,
+				       "Type=0x%04X (%s), Length=%u , Val={",
+				       type, param2str(type), len);
+		for (i = 0; i < len; i++) {
+			/*
+			 * 11 is length of printed value
+			 * (formatted with 0x%08X in the
+			 * next call to snprintf
+			 */
+			if (line_offset + 11 >= LINE_BUF_LEN) {
+				pr_debug("%s\n", line_buf);
+				line_offset = 0;
+			}
+			line_offset += snprintf(line_buf + line_offset,
+						LINE_BUF_LEN - line_offset,
+						"0x%08X,",
+						le32_to_cpu(*cur_buf_p));
+			cur_buf_p++;
+		}
+		pr_debug("%s}\n", line_buf);
+	} while (tl_word != last_tl_word);
+}
+#else
+#define dump_fwinit_params(drvdata, fw_init_params_buf_p) do {} while (0)
+#endif /*DEBUG*/
+/**
+ * add_fwinit_param() - Add TLV parameter for FW-init.
+ * @tlv_buf:	 The base of the TLV parameters buffers
+ * @idx_p:	 (in/out): Current tlv_buf word index
+ * @checksum_p:	 (in/out): 32bit checksum for TLV array
+ * @type:	 Parameter type
+ * @length:	 Parameter length (size in words of "values")
+ * @values:	 Values array ("length" values)
+ *
+ * Returns void
+ */
+static void add_fwinit_param(u32 *tlv_buf, u32 *idx_p,
+			     u32 *checksum_p,
+			     enum dx_fw_init_tlv_params type, u16 length,
+			     const u32 *values)
+{
+	const u32 tl_word = DX_TL_WORD(type, length);
+	int i;
+
+#ifdef DEBUG
+	/* Verify that we have enough space for LAST param. after this param. */
+	if ((*idx_p + 1 + length + 2) > (FW_INIT_PARAMS_BUF_LEN / 4)) {
+		pr_err("tlv_buf size limit reached!\n");
+		SEP_DRIVER_BUG();
+	}
+#endif
+
+	/* Add type-length word */
+	tlv_buf[(*idx_p)++] = cpu_to_le32(tl_word);
+	*checksum_p += tl_word;
+
+	/* Add values */
+	for (i = 0; i < length; i++) {
+		/* Add value words if any. TL-word is counted as first... */
+		tlv_buf[(*idx_p)++] = cpu_to_le32(values[i]);
+		*checksum_p += values[i];
+	}
+}
+
+/**
+ * init_fwinit_param_list() - Initialize TLV parameters list
+ * @tlv_buf:	The pointer to the TLV list array buffer
+ * @idx_p:	The pointer to the variable that would maintain current
+ *		position in the tlv_array
+ * @checksum_p:	The pointer to the variable that would accumulate the
+ *		TLV array checksum
+ *
+ * Returns void
+ */
+static void init_fwinit_param_list(u32 *tlv_buf, u32 *idx_p,
+				   u32 *checksum_p)
+{
+	const u32 magic = DX_FW_INIT_PARAM_FIRST_MAGIC;
+	/* Initialize index and checksum variables */
+	*idx_p = 0;
+	*checksum_p = 0;
+	/* Start with FIRST_MAGIC parameter */
+	add_fwinit_param(tlv_buf, idx_p, checksum_p,
+			 DX_FW_INIT_PARAM_FIRST, 1, &magic);
+}
+
+/**
+ * terminate_fwinit_param_list() - Terminate the TLV parameters list with
+ *				LAST/checksum parameter
+ * @tlv_buf:	The pointer to the TLV list array buffer
+ * @idx_p:	The pointer to the variable that would maintain current
+ *		position in the tlv_array
+ * @checksum_p:	The pointer to the variable that would accumulate the
+ *		TLV array checksum
+ *
+ * Returns void
+ */
+static void terminate_fwinit_param_list(u32 *tlv_buf, u32 *idx_p,
+					u32 *checksum_p)
+{
+	const u32 tl_word = DX_TL_WORD(DX_FW_INIT_PARAM_LAST, 1);
+
+	tlv_buf[(*idx_p)++] = cpu_to_le32(tl_word);
+	*checksum_p += tl_word;	/* Last TL word is included in checksum */
+	tlv_buf[(*idx_p)++] = cpu_to_le32(~(*checksum_p));
+}
+
+static int create_fwinit_command(struct sep_drvdata *drvdata,
+				 u32 **fw_init_params_buf_pp,
+				 dma_addr_t *fw_init_params_dma_p)
+{
+	u32 idx;
+	u32 checksum = 0;
+#ifdef SEP_FREQ_MHZ
+	u32 sep_freq = SEP_FREQ_MHZ;
+#endif
+	dma_addr_t q_base_dma;
+	unsigned long q_size;
+	/* arrays for queue parameters values */
+	u32 qs_base_dma[SEP_MAX_NUM_OF_DESC_Q];
+	u32 qs_size[SEP_MAX_NUM_OF_DESC_Q];
+	u32 qs_ctx_size[SEP_MAX_NUM_OF_DESC_Q];
+	u32 qs_ctx_size_total;
+	u32 qs_num = drvdata->num_of_desc_queues;
+	u32 sep_request_params[DX_SEP_REQUEST_PARAM_MSG_LEN];
+	int i;
+	int rc;
+
+	/* For klocwork, add extra check */
+	if (qs_num > SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err("Max number of desc queues (%d) exceeded (%d)\n");
+		return -EINVAL;
+	}
+
+	/* allocate coherent working buffer */
+	*fw_init_params_buf_pp = dma_alloc_coherent(drvdata->dev,
+						    FW_INIT_PARAMS_BUF_LEN,
+						    fw_init_params_dma_p,
+						    GFP_KERNEL);
+	pr_debug("fw_init_params_dma=0x%08lX fw_init_params_va=0x%p\n",
+		      (unsigned long)*fw_init_params_dma_p,
+		      *fw_init_params_buf_pp);
+	if (*fw_init_params_buf_pp == NULL) {
+		pr_err("Unable to allocate coherent workspace buffer\n");
+		return -ENOMEM;
+	}
+
+	init_fwinit_param_list(*fw_init_params_buf_pp, &idx, &checksum);
+
+#ifdef SEP_FREQ_MHZ
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_SEP_FREQ, 1, &sep_freq);
+#endif
+
+	/* No need to validate number of queues - already validated in
+	 * sep_setup() */
+
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_NUM_OF_DESC_QS, 1, &qs_num);
+
+	/* Fetch per-queue information */
+	qs_ctx_size_total = 0;
+	for (i = 0; i < qs_num; i++) {
+		desc_q_get_info4sep(drvdata->queue[i].desc_queue,
+				    &q_base_dma, &q_size);
+		/* Data is first fetched into q_base_dma and q_size because
+		 * return value is of different type than u32 */
+		qs_base_dma[i] = q_base_dma;
+		qs_size[i] = q_size;
+		qs_ctx_size[i] =
+		    ctxmgr_sep_cache_get_size(drvdata->queue[i].sep_cache);
+		if ((qs_base_dma[i] == 0) || (qs_size[i] == 0) ||
+		    (qs_ctx_size[i] == 0)) {
+			pr_err(
+				    "Invalid queue %d resources: base=0x%08X size=%u ctx_cache_size=%u\n",
+				    i, qs_base_dma[i], qs_size[i],
+				    qs_ctx_size[i]);
+			rc = -EINVAL;
+			goto fwinit_error;
+		}
+		qs_ctx_size_total += qs_ctx_size[i];
+	}
+
+	if (qs_ctx_size_total > drvdata->num_of_sep_cache_entries) {
+		pr_err("Too many context cache entries allocated(%u>%u)\n",
+			    qs_ctx_size_total,
+			    drvdata->num_of_sep_cache_entries);
+		rc = -EINVAL;
+		goto fwinit_error;
+	}
+
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_DESC_QS_ADDR, qs_num, qs_base_dma);
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_DESC_QS_SIZE, qs_num, qs_size);
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_CTX_CACHE_PART, qs_num, qs_ctx_size);
+
+	/* Prepare sep request params */
+	dx_sep_req_get_sep_init_params(sep_request_params);
+	add_fwinit_param(*fw_init_params_buf_pp, &idx, &checksum,
+			 DX_FW_INIT_PARAM_SEP_REQUEST_PARAMS,
+			 DX_SEP_REQUEST_PARAM_MSG_LEN, sep_request_params);
+
+	terminate_fwinit_param_list(*fw_init_params_buf_pp, &idx, &checksum);
+
+	return 0;
+
+ fwinit_error:
+	dma_free_coherent(drvdata->dev, FW_INIT_PARAMS_BUF_LEN,
+			  *fw_init_params_buf_pp, *fw_init_params_dma_p);
+	return rc;
+}
+
+static void destroy_fwinit_command(struct sep_drvdata *drvdata,
+				   u32 *fw_init_params_buf_p,
+				   dma_addr_t fw_init_params_dma)
+{
+	/* release TLV parameters buffer */
+	dma_free_coherent(drvdata->dev, FW_INIT_PARAMS_BUF_LEN,
+			  fw_init_params_buf_p, fw_init_params_dma);
+}
+
+/**
+ * sepinit_get_fw_props() - Get the FW properties (version, cache size, etc.)
+ *				as given in the
+ * @drvdata:	 Context where to fill retrieved data
+ *
+ * Get the FW properties (version, cache size, etc.) as given in the
+ * respective GPRs.
+ * This function should be called only after sepinit_wait_for_cold_boot_finish
+ */
+void sepinit_get_fw_props(struct sep_drvdata *drvdata)
+{
+
+	u32 init_fw_props;
+	/* SeP ROM version */
+	drvdata->rom_ver = READ_REGISTER(drvdata->cc_base +
+					 SEP_HOST_GPR_REG_OFFSET
+					 (DX_SEP_INIT_ROM_VER_GPR_IDX));
+	drvdata->fw_ver =
+	    READ_REGISTER(drvdata->cc_base +
+			  SEP_HOST_GPR_REG_OFFSET(DX_SEP_INIT_FW_VER_GPR_IDX));
+	mdelay(100);		/* TODO for kernel hang bug */
+	init_fw_props = READ_REGISTER(drvdata->cc_base +
+				      SEP_HOST_GPR_REG_OFFSET
+				      (DX_SEP_INIT_FW_PROPS_GPR_IDX));
+
+	drvdata->num_of_desc_queues =
+	    BITFIELD_GET(init_fw_props,
+			 DX_SEP_INIT_NUM_OF_QUEUES_BIT_OFFSET,
+			 DX_SEP_INIT_NUM_OF_QUEUES_BIT_SIZE);
+	drvdata->num_of_sep_cache_entries =
+	    BITFIELD_GET(init_fw_props,
+			 DX_SEP_INIT_CACHE_CTX_SIZE_BIT_OFFSET,
+			 DX_SEP_INIT_CACHE_CTX_SIZE_BIT_SIZE);
+	drvdata->mlli_table_size =
+	    BITFIELD_GET(init_fw_props,
+			 DX_SEP_INIT_MLLI_TBL_SIZE_BIT_OFFSET,
+			 DX_SEP_INIT_MLLI_TBL_SIZE_BIT_SIZE);
+
+	pr_info("ROM Ver.=0x%08X , FW Ver.=0x%08X\n"
+		     "SEP queues=%u, Ctx.Cache#ent.=%u , MLLIsize=%lu B\n",
+		     drvdata->rom_ver, drvdata->fw_ver,
+		     drvdata->num_of_desc_queues,
+		     drvdata->num_of_sep_cache_entries,
+		     drvdata->mlli_table_size);
+}
+
+/**
+ * sepinit_wait_for_fw_init_done() - Wait for FW initialization to complete
+ * @drvdata:
+ *
+ * Wait for FW initialization to complete
+ * This function should be invoked after sepinit_set_fw_init_params
+ * Returns int
+ */
+static int sepinit_wait_for_fw_init_done(struct sep_drvdata *drvdata)
+{
+	enum dx_sep_state cur_state;
+	u32 cur_status;
+	int rc = 0;
+
+	cur_state =
+	    dx_sep_wait_for_state(DX_SEP_STATE_DONE_FW_INIT |
+				  DX_SEP_STATE_FATAL_ERROR,
+				  FW_INIT_TIMEOUT_MSEC);
+	if (cur_state != DX_SEP_STATE_DONE_FW_INIT) {
+		rc = -EIO;
+		cur_status =
+		    READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET);
+		pr_err(
+			    "Failed waiting for DONE_FW_INIT from SeP (state=0x%08X status=0x%08X)\n",
+			    cur_state, cur_status);
+	} else {
+		pr_info("DONE_FW_INIT\n");
+	}
+
+	return rc;
+}
+/**
+ * sepinit_reload_driver_state() - Wait for FW to update Sep state to reload driver state.
+ * @drvdata:
+ *
+ * Returns int
+ */
+int sepinit_reload_driver_state(struct sep_drvdata *drvdata)
+{
+	int rc = 0;
+	enum dx_sep_state cur_state;
+	uint32_t cur_status;
+
+	WRITE_REGISTER(drvdata->cc_base +
+			HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+			DX_HOST_REQ_CHANGE_TO_RELOAD_DRIVER_STATE);
+
+	cur_state = dx_sep_wait_for_state(
+		DX_SEP_STATE_RELOAD_DRIVER | DX_SEP_STATE_FATAL_ERROR,
+		FW_INIT_TIMEOUT_MSEC);
+	pr_debug("cur_state is 0x%x", cur_state);
+
+	if (cur_state != DX_SEP_STATE_RELOAD_DRIVER) {
+		rc = -EIO;
+		cur_status =
+			READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET);
+		pr_err("Failed waiting for DX_SEP_STATE_RELOAD_DRIVER from SeP (state=0x%08X status=0x%08X)\n",
+			cur_state, cur_status);
+	} else {
+		pr_info("DONE_FW_INIT\n");
+	}
+
+	return rc;
+}
+
+/**
+ * sepinit_do_fw_init() - Initialize SeP FW
+ * @drvdata:
+ *
+ * Provide SeP FW with initialization parameters and wait for DONE_FW_INIT.
+ *
+ * Returns int 0 on success
+ */
+int sepinit_do_fw_init(struct sep_drvdata *drvdata, int init_flag)
+{
+	int rc;
+	u32 *fw_init_params_buf_p;
+	dma_addr_t fw_init_params_dma;
+
+	rc = create_fwinit_command(drvdata,
+				   &fw_init_params_buf_p, &fw_init_params_dma);
+	if (rc != 0)
+		return rc;
+	dump_fwinit_params(drvdata, fw_init_params_buf_p);
+	/* Write the physical address of the FW init parameters buffer */
+	WRITE_REGISTER(drvdata->cc_base +
+		       HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_PARAM_GPR_IDX),
+		       fw_init_params_dma);
+	/* Initiate FW-init */
+	if (init_flag == INIT_FW_FLAG)
+		WRITE_REGISTER(drvdata->cc_base +
+			HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+			DX_HOST_REQ_FW_INIT);
+	else
+		WRITE_REGISTER(drvdata->cc_base +
+			HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+			DX_HOST_REQ_UPDATE_SWQ_ADDR);
+
+	rc = sepinit_wait_for_fw_init_done(drvdata);
+	destroy_fwinit_command(drvdata,
+		fw_init_params_buf_p, fw_init_params_dma);
+
+	WRITE_REGISTER(drvdata->cc_base +
+			HOST_SEP_GPR_REG_OFFSET(DX_HOST_REQ_GPR_IDX),
+			0);
+
+	return rc;
+}
diff --git a/drivers/staging/sep54/sep_init.h b/drivers/staging/sep54/sep_init.h
new file mode 100644
index 0000000..1e16e19
--- /dev/null
+++ b/drivers/staging/sep54/sep_init.h
@@ -0,0 +1,72 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __SEP_INIT_H__
+#define __SEP_INIT_H__
+
+#include "dx_driver.h"
+
+/**
+ * sepinit_do_cc_init() - Initiate SeP cold boot sequence and wait for
+ *	its completion.
+ *
+ * @drvdata:
+ *
+ * This function loads the CC firmware and dispatches an CC_INIT request message
+ * Returns int 0 for success
+ */
+int sepinit_do_cc_init(struct sep_drvdata *drvdata);
+
+/**
+ * sepinit_get_fw_props() - Get the FW properties (version, cache size, etc.)
+ *	after completing cold boot
+ * @drvdata:	 Context where to fill retrieved data
+ *
+ * This function should be called only after sepinit_do_cc_init completes
+ * successfully.
+ */
+void sepinit_get_fw_props(struct sep_drvdata *drvdata);
+
+/**
+ * sepinit_do_fw_init() - Initialize SeP FW
+ * @drvdata:
+ *
+ * Provide SeP FW with initialization parameters and wait for DONE_FW_INIT.
+ *
+ * Returns int 0 on success
+ */
+int sepinit_do_fw_init(struct sep_drvdata *drvdata, int init_flag);
+
+/**
+ * sepinit_reload_driver_state() - Wait for FW to update Sep state to reloaded
+ * driver state.
+ * @drvdata:
+ *
+ * Returns int
+ */
+int sepinit_reload_driver_state(struct sep_drvdata *drvdata);
+
+#endif /*__SEP_INIT_H__*/
diff --git a/drivers/staging/sep54/sep_init_cc_errors.h b/drivers/staging/sep54/sep_init_cc_errors.h
new file mode 100644
index 0000000..3c16d16
--- /dev/null
+++ b/drivers/staging/sep54/sep_init_cc_errors.h
@@ -0,0 +1,84 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef SEP_INIT_CC_ERROR_H
+#define SEP_INIT_CC_ERROR_H
+
+#include "sep_error.h"
+
+	/*! \file lcs.h
+	 * \brief This file containes lcs definitions
+	 */
+#define DX_INIT_CC_OK DX_SEP_OK
+/* DX_INIT_CC_MODULE_ERROR_BASE - 0xE0004000 */
+#define DX_CC_INIT_MSG_CS_ERROR	\
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x1)
+#define DX_CC_INIT_MSG_WRONG_TOKEN_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x2)
+#define DX_CC_INIT_MSG_WRONG_OP_CODE_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x3)
+#define DX_CC_INIT_MSG_WRONG_RESIDENT_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x4)
+#define DX_CC_INIT_MSG_WRONG_I_CACHE_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x5)
+#define DX_CC_INIT_MSG_WRONG_I_CACHE_DEST_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x6)
+#define DX_CC_INIT_MSG_WRONG_D_CACHE_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x7)
+#define DX_CC_INIT_MSG_WRONG_D_CACHE_SIZE_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x8)
+#define DX_CC_INIT_MSG_WRONG_INIT_EXT_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0x9)
+#define DX_CC_INIT_MSG_WRONG_VRL_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xA)
+#define DX_CC_INIT_MSG_WRONG_MAGIC_NUM_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xB)
+#define DX_CC_INIT_MSG_WRONG_OUTPUT_BUFF_ADDR_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xC)
+#define DX_CC_INIT_MSG_WRONG_OUTPUT_BUFF_SIZE_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xD)
+#define DX_RESERVED_0_ERROR \
+	(DX_INIT_CC_MODULE_ERROR_BASE + 0xE)
+
+/* DX_INIT_CC_EXT_MODULE_ERROR_BASE - 0xE0005000 */
+#define DX_CC_INIT_EXT_FIRST_PARAM_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x1)
+#define DX_CC_INIT_EXT_WRONG_LAST_PARAM_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x2)
+#define DX_CC_INIT_EXT_WRONG_CHECKSUM_VALUE_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x3)
+#define DX_CC_INIT_EXT_WRONG_DISABLE_MODULE_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x4)
+#define DX_CC_INIT_EXT_WRONG_AXI_CONFIG_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x5)
+#define DX_CC_INIT_EXT_WRONG_PARAM_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x6)
+#define DX_CC_INIT_EXT_EXCEED_MAX_PARAM_PARAM_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x7)
+#define DX_CC_INIT_EXT_WRONG_SEP_FREQ_LENGTH_ERRR \
+	(DX_INIT_CC_EXT_MODULE_ERROR_BASE + 0x8)
+
+#endif
diff --git a/drivers/staging/sep54/sep_lli.h b/drivers/staging/sep54/sep_lli.h
new file mode 100644
index 0000000..b778f4f
--- /dev/null
+++ b/drivers/staging/sep54/sep_lli.h
@@ -0,0 +1,86 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_LLI_H_
+#define _SEP_LLI_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+
+#define SEP_LLI_GET(lli_p, lli_field) BITFIELD_GET(                            \
+		((u32 *)(lli_p))[SEP_LLI_ ## lli_field ## _WORD_OFFSET],  \
+		SEP_LLI_ ## lli_field ## _BIT_OFFSET,			       \
+		SEP_LLI_ ## lli_field ## _BIT_SIZE)
+#define SEP_LLI_SET(lli_p, lli_field, new_val) BITFIELD_SET(                   \
+		((u32 *)(lli_p))[SEP_LLI_ ## lli_field ## _WORD_OFFSET],  \
+		SEP_LLI_ ## lli_field ## _BIT_OFFSET,			       \
+		SEP_LLI_ ## lli_field ## _BIT_SIZE,			       \
+		new_val)
+
+#define SEP_LLI_INIT(lli_p)  do { \
+	((u32 *)(lli_p))[0] = 0; \
+	((u32 *)(lli_p))[1] = 0; \
+} while (0)
+
+/* Copy local LLI scratchpad to SeP LLI buffer */
+#define SEP_LLI_COPY_TO_SEP(sep_lli_p, host_lli_p) do {             \
+	int i;                                                      \
+	for (i = 0; i < SEP_LLI_ENTRY_WORD_SIZE; i++)               \
+		((u32 *)(sep_lli_p))[i] =                      \
+			cpu_to_le32(((u32 *)(host_lli_p))[i]); \
+} while (0)
+/* and vice-versa */
+#define SEP_LLI_COPY_FROM_SEP(host_lli_p, sep_lli_p) do {                 \
+	int i;                                                            \
+		for (i = 0; i < SEP_LLI_ENTRY_WORD_SIZE; i++)             \
+			((u32 *)(host_lli_p))[i] =                   \
+				le32_to_cpu(((u32 *)(sep_lli_p))[i]);\
+} while (0)
+
+/* Size of entry */
+#define SEP_LLI_ENTRY_WORD_SIZE 2
+#define SEP_LLI_ENTRY_BYTE_SIZE (SEP_LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* (DMA) Address: ADDR */
+#define SEP_LLI_ADDR_WORD_OFFSET 0
+#define SEP_LLI_ADDR_BIT_OFFSET 0
+#define SEP_LLI_ADDR_BIT_SIZE 32
+/* Size: SIZE */
+#define SEP_LLI_SIZE_WORD_OFFSET 1
+#define SEP_LLI_SIZE_BIT_OFFSET 0
+#define SEP_LLI_SIZE_BIT_SIZE 30
+/* First/Last LLI entries bit marks: FIRST, LAST */
+#define SEP_LLI_FIRST_WORD_OFFSET 1
+#define SEP_LLI_FIRST_BIT_OFFSET 30
+#define SEP_LLI_FIRST_BIT_SIZE 1
+#define SEP_LLI_LAST_WORD_OFFSET 1
+#define SEP_LLI_LAST_BIT_OFFSET 31
+#define SEP_LLI_LAST_BIT_SIZE 1
+
+#endif /*_SEP_LLI_H_*/
diff --git a/drivers/staging/sep54/sep_log.h b/drivers/staging/sep54/sep_log.h
new file mode 100644
index 0000000..fd7a9ae
--- /dev/null
+++ b/drivers/staging/sep54/sep_log.h
@@ -0,0 +1,131 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+
+
+#ifndef _SEP_LOG__H_
+#define _SEP_LOG__H_
+
+/* Define different "BUG()" behavior in DEBUG mode */
+#ifdef DEBUG
+/* It is easier to attach a debugger without causing the exception of "BUG()" */
+#define SEP_DRIVER_BUG() do {dump_stack(); while (1); } while (0)
+#else
+#define SEP_DRIVER_BUG() BUG()
+#endif
+
+/* SeP log levels (to be used in sep_log_level and SEP_BASE_LOG_LEVEL) */
+#define SEP_LOG_LEVEL_ERR       0
+#define SEP_LOG_LEVEL_WARN      1
+#define SEP_LOG_LEVEL_INFO      2
+#define SEP_LOG_LEVEL_DEBUG     3
+#define SEP_LOG_LEVEL_TRACE     4
+
+/* SeP log components (to be used in sep_log_mask and SEP_LOG_CUR_COMPONENT) */
+#define SEP_LOG_MASK_MAIN        1 /* dx_driver.c */
+#define SEP_LOG_MASK_LLI_MGR     (1<<1)
+#define SEP_LOG_MASK_CTX_MGR     (1<<2)
+#define SEP_LOG_MASK_DESC_MGR    (1<<3)
+#define SEP_LOG_MASK_SYSFS       (1<<4)
+#define SEP_LOG_MASK_SEP_INIT    (1<<5)
+#define SEP_LOG_MASK_CRYPTO_API  (1<<6)
+#define SEP_LOG_MASK_SEP_REQUEST (1<<7)
+#define SEP_LOG_MASK_SEP_POWER   (1<<8)
+#define SEP_LOG_MASK_SEP_APP     (1<<9)
+#define SEP_LOG_MASK_SEP_PRINTF  (1<<31)
+#define SEP_LOG_MASK_ALL        (SEP_LOG_MASK_MAIN | SEP_LOG_MASK_SEP_INIT |\
+	SEP_LOG_MASK_LLI_MGR | SEP_LOG_MASK_CTX_MGR | SEP_LOG_MASK_DESC_MGR |\
+	SEP_LOG_MASK_SYSFS | SEP_LOG_MASK_CRYPTO_API |\
+	SEP_LOG_MASK_SEP_REQUEST | SEP_LOG_MASK_SEP_POWER |\
+	SEP_LOG_MASK_SEP_APP | SEP_LOG_MASK_SEP_PRINTF)
+
+
+/* This printk wrapper masks maps log level to KERN_* levels and masks prints *
+   from specific components at run time based on SEP_LOG_CUR_COMPONENT and    *
+*  sep_log_component_mask.                                                    */
+#define MODULE_PRINTK(level, format, ...) do {			\
+	if (sep_log_mask & SEP_LOG_CUR_COMPONENT)		\
+		printk(level MODULE_NAME ":%s: " format,	\
+			__func__, ##__VA_ARGS__);		\
+} while (0)
+
+extern int sep_log_level;
+extern int sep_log_mask;
+
+
+/* change this to set the preferred log level */
+#ifdef DEBUG
+#define SEP_BASE_LOG_LEVEL SEP_LOG_LEVEL_TRACE
+#else
+#define SEP_BASE_LOG_LEVEL SEP_LOG_LEVEL_WARN
+#endif
+
+#define SEP_LOG_ERR(format, ...) \
+	MODULE_PRINTK(KERN_ERR, format, ##__VA_ARGS__);
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_WARN)
+#define SEP_LOG_WARN(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_WARN)		\
+		MODULE_PRINTK(KERN_WARNING, format,		\
+		##__VA_ARGS__);					\
+} while (0)
+#else
+#define SEP_LOG_WARN(format, arg...) do {} while (0)
+#endif
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_INFO)
+#define SEP_LOG_INFO(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_INFO)		\
+		MODULE_PRINTK(KERN_INFO, format, ##__VA_ARGS__); \
+} while (0)
+#else
+#define SEP_LOG_INFO(format, arg...) do {} while (0)
+#endif
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_DEBUG)
+#define SEP_LOG_DEBUG(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_DEBUG)		\
+		MODULE_PRINTK(KERN_DEBUG, format, ##__VA_ARGS__);\
+} while (0)
+#else
+#define SEP_LOG_DEBUG(format, arg...) do {} while (0)
+#endif
+
+#if (SEP_BASE_LOG_LEVEL >= SEP_LOG_LEVEL_TRACE)
+#define SEP_LOG_TRACE(format, ...) do {				\
+	if (sep_log_level >= SEP_LOG_LEVEL_TRACE)		\
+		MODULE_PRINTK(KERN_DEBUG, "<trace> " format,	\
+			      ##__VA_ARGS__);			\
+} while (0)
+#else
+#define SEP_LOG_TRACE(format, arg...) do {} while (0)
+#endif
+
+#undef pr_fmt
+#define pr_fmt(fmt)     KBUILD_MODNAME ": %s:%d: " fmt, __func__, __LINE__
+
+#endif
+
diff --git a/drivers/staging/sep54/sep_power.c b/drivers/staging/sep54/sep_power.c
new file mode 100644
index 0000000..d007907
--- /dev/null
+++ b/drivers/staging/sep54/sep_power.c
@@ -0,0 +1,431 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/**
+ * This file implements the power state control functions for SeP/CC
+ */
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_POWER
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+/*#include <linux/export.h>*/
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_driver.h"
+#include "dx_cc_regs.h"
+#include "dx_init_cc_abi.h"
+#include "sep_sw_desc.h"
+#include "dx_sep_kapi.h"
+#include <linux/delay.h>
+
+#define SEP_STATE_CHANGE_TIMEOUT_MSEC 2500
+/**
+ * struct sep_power_control - Control data for power state change operations
+ * @drvdata:		The associated driver context
+ * @state_changed:	Completion object to signal state change
+ * @last_state:		Recorded last state
+ * @state_jiffies:	jiffies at recorded last state
+ *
+ * last_state and state_jiffies are volatile because may be updated in
+ * the interrupt context while tested in _state_get function.
+ */
+struct sep_power_control {
+	struct sep_drvdata *drvdata;
+	struct completion state_changed;
+	volatile enum dx_sep_state last_state;
+	volatile unsigned long state_jiffies;
+};
+
+/* Global context for power management */
+static struct sep_power_control power_control;
+
+static const char *power_state_str(enum dx_sep_power_state pstate)
+{
+	switch (pstate) {
+	case DX_SEP_POWER_INVALID:
+		return "INVALID";
+	case DX_SEP_POWER_OFF:
+		return "OFF";
+	case DX_SEP_POWER_BOOT:
+		return "BOOT";
+	case DX_SEP_POWER_IDLE:
+		return "IDLE";
+	case DX_SEP_POWER_ACTIVE:
+		return "ACTIVE";
+	case DX_SEP_POWER_HIBERNATED:
+		return "HIBERNATED";
+	}
+	return "(unknown)";
+}
+
+/**
+ * dx_sep_state_change_handler() - Interrupt handler for SeP state changes
+ * @drvdata:	Associated driver context
+ */
+void dx_sep_state_change_handler(struct sep_drvdata *drvdata)
+{
+	pr_warn("State=0x%08X Status/RetCode=0x%08X\n",
+		     READ_REGISTER(drvdata->cc_base + SEP_STATE_GPR_OFFSET),
+		     READ_REGISTER(drvdata->cc_base + SEP_STATUS_GPR_OFFSET));
+	power_control.state_jiffies = jiffies;
+	power_control.last_state = GET_SEP_STATE(drvdata);
+	complete(&power_control.state_changed);
+}
+
+/**
+ * dx_sep_wait_for_state() - Wait for SeP to reach one of the states reflected
+ *				with given state mask
+ * @state_mask:		The OR of expected SeP states
+ * @timeout_msec:	Timeout of waiting for the state (in millisec.)
+ *
+ * Returns the state reached. In case of wait timeout the returned state
+ * may not be one of the expected states.
+ */
+enum dx_sep_state dx_sep_wait_for_state(u32 state_mask, int timeout_msec)
+{
+	int wait_jiffies = msecs_to_jiffies(timeout_msec);
+	enum dx_sep_state sep_state;
+
+	do {
+		/* Poll for state transition completion or failure */
+		/* Arm for next state change before reading current state */
+		INIT_COMPLETION(power_control.state_changed);
+		sep_state = GET_SEP_STATE(power_control.drvdata);
+		if ((sep_state & state_mask) || (wait_jiffies == 0))
+			/* It's a match or wait timed out */
+			break;
+		wait_jiffies =
+		    wait_for_completion_timeout(&power_control.state_changed,
+						wait_jiffies);
+	} while (1);
+
+	return sep_state;
+}
+
+void dx_sep_pm_runtime_get(void)
+{
+	pm_runtime_get_sync(power_control.drvdata->dev);
+}
+
+void dx_sep_pm_runtime_put(void)
+{
+	pm_runtime_mark_last_busy(power_control.drvdata->dev);
+	pm_runtime_put_autosuspend(power_control.drvdata->dev);
+}
+
+/**
+ * set_desc_qs_state() - Modify states of all Desc. queues
+ *
+ * @state:	Requested new state
+ */
+static int set_desc_qs_state(enum desc_q_state state)
+{
+	int i, rc;
+
+	for (i = 0, rc = 0;
+	     (i < power_control.drvdata->num_of_desc_queues) && (rc == 0); i++)
+		rc = desc_q_set_state(power_control.drvdata->queue[i].
+				      desc_queue, state);
+	if (unlikely(rc != 0))
+		/* Error - revert state of queues that were already changed */
+		for (i--; i >= 0; i--)
+			desc_q_set_state(power_control.drvdata->queue[i].
+					 desc_queue,
+					 (state ==
+					  DESC_Q_ASLEEP) ? DESC_Q_ACTIVE :
+					 DESC_Q_ASLEEP);
+	return rc;
+}
+
+static bool is_desc_qs_active(void)
+{
+	int i;
+	enum desc_q_state qstate;
+	bool is_all_qs_active = true;
+
+	for (i = 0; i < power_control.drvdata->num_of_desc_queues; i++) {
+		qstate =
+		    desc_q_get_state(power_control.drvdata->queue[i].
+				     desc_queue);
+		if ((qstate != DESC_Q_ACTIVE)) {
+			is_all_qs_active = false;
+			break;
+		}
+	}
+	return is_all_qs_active;
+}
+
+static bool is_desc_qs_idle(unsigned long *idle_jiffies_p)
+{
+	int i;
+	unsigned long this_q_idle_jiffies;
+	bool is_all_qs_idle = true;
+
+	*idle_jiffies_p = 0;	/* Max. of both queues if both idle */
+
+	for (i = 0; i < power_control.drvdata->num_of_desc_queues; i++) {
+		if (!desc_q_is_idle(power_control.drvdata->queue[i].desc_queue,
+				    &this_q_idle_jiffies)) {
+			is_all_qs_idle = false;
+			break;
+		}
+		if (this_q_idle_jiffies > *idle_jiffies_p)
+			*idle_jiffies_p = this_q_idle_jiffies;
+	}
+	return is_all_qs_idle;
+}
+
+/**
+ * reset_desc_qs() - Initiate clearing of desc. queue counters
+ * This function must be called only when transition to hibernation state
+ * is completed successfuly, i.e., the desc. queue is empty and asleep
+ */
+static void reset_desc_qs(void)
+{
+	int i;
+
+	for (i = 0; i < power_control.drvdata->num_of_desc_queues; i++)
+		(void)desc_q_reset(power_control.drvdata->queue[i].desc_queue);
+}
+
+static int process_hibernation_req(void)
+{
+	enum dx_sep_state sep_state;
+	int rc;
+
+	sep_state = GET_SEP_STATE(power_control.drvdata);
+	/* Already off, no need to send the sleep descriptor */
+	if (sep_state == DX_SEP_STATE_OFF ||
+		sep_state == DX_SEP_STATE_DONE_SLEEP_MODE)
+		return 0;
+
+	if (sep_state != DX_SEP_STATE_DONE_FW_INIT ||
+		!(is_desc_qs_active())) {
+		pr_err("Requested hibernation while SeP state=0x%08X\n",
+			    sep_state);
+		return -EINVAL;
+	}
+	rc = set_desc_qs_state(DESC_Q_ASLEEP);
+	if (unlikely(rc != 0)) {
+		pr_err("Failed moving queues to SLEEP state (%d)\n", rc);
+		return rc;
+	}
+	/* Write value SEP_SLEEP_ENABLE command to GPR7 to initialize
+	   sleep sequence */
+	WRITE_REGISTER(power_control.drvdata->cc_base +
+			DX_CC_REG_OFFSET(HOST, HOST_SEP_GPR7),
+			SEP_SLEEP_ENABLE);
+	/* Process state change */
+	sep_state = dx_sep_wait_for_state(DX_SEP_STATE_DONE_SLEEP_MODE,
+					SEP_STATE_CHANGE_TIMEOUT_MSEC);
+	switch (sep_state) {
+	case DX_SEP_STATE_DONE_SLEEP_MODE:
+		break;
+	case DX_SEP_STATE_DONE_FW_INIT:
+		pr_err("Transition to SLEEP mode aborted.\n");
+		rc = -EBUSY;
+		break;
+	case DX_SEP_STATE_PROC_SLEEP_MODE:
+		pr_err("Stuck in processing of SLEEP req.\n");
+		rc = -ETIME;
+		break;
+	default:
+		pr_err(
+			"Unexpected SeP state after SLEEP request: 0x%08X\n",
+			sep_state);
+		rc = -EINVAL;
+	}
+	if (unlikely(rc != 0)) {
+		sep_state = GET_SEP_STATE(power_control.drvdata);
+		if (sep_state == DX_SEP_STATE_DONE_FW_INIT)
+			/* Revert queues state on failure */
+			/* (if remained on active state)  */
+			set_desc_qs_state(DESC_Q_ACTIVE);
+	} else {
+		reset_desc_qs();
+	}
+
+	return rc;
+}
+
+static int process_activate_req(void)
+{
+	enum dx_sep_state sep_state;
+	int rc;
+	int count = 0;
+
+	sep_state = GET_SEP_STATE(power_control.drvdata);
+	if ((sep_state == DX_SEP_STATE_DONE_FW_INIT) && is_desc_qs_active()) {
+		pr_info("Requested activation when in active state\n");
+		return 0;	/* Already in this state */
+	}
+
+	/* make sure Sep is not off before restore IMR */
+	if (sep_state == DX_SEP_STATE_OFF) {
+		while (count < SEP_POWERON_TIMEOUT) {
+			sep_state = GET_SEP_STATE(power_control.drvdata);
+			if (sep_state != DX_SEP_STATE_OFF)
+				break;
+			usleep_range(50, 150);
+			count++;
+		}
+		if (count >= SEP_TIMEOUT) {
+			pr_info("Timeout while waiting SEP poweron\n");
+			return -ETIME;
+		}
+
+	}
+	/* SeP may have been reset - restore IMR if SeP is not off */
+	/* This must be done before dx_sep_wait_for_state() */
+	WRITE_REGISTER(power_control.drvdata->cc_base +
+		       DX_CC_REG_OFFSET(HOST, IMR),
+		       ~power_control.drvdata->irq_mask);
+	/* Nothing to initiate - just wait for FW_INIT_DONE */
+	sep_state = dx_sep_wait_for_state(DX_SEP_STATE_DONE_FW_INIT,
+					  SEP_STATE_CHANGE_TIMEOUT_MSEC);
+	if (sep_state == DX_SEP_STATE_DONE_FW_INIT)
+		rc = set_desc_qs_state(DESC_Q_ACTIVE);
+	else {
+		pr_info("Timeout while waiting SEP wakeup\n");
+		rc = -ETIME;	/* Timed out waiting */
+	}
+
+	return rc;
+}
+
+/**
+ * dx_sep_power_state_set() - Change power state of SeP (CC)
+ *
+ * @req_state:	The requested power state (_HIBERNATED or _ACTIVE)
+ *
+ * Request changing of power state to given state and block until transition
+ * is completed.
+ * Requesting _HIBERNATED is allowed only from _ACTIVE state.
+ * Requesting _ACTIVE is allowed only after CC was powered back on (warm boot).
+ * Return codes:
+ * 0 -	Power state change completed.
+ * -EINVAL -	This request is not allowed in current SeP state or req_state
+ *		value is invalid.
+ * -EBUSY -	State change request ignored because SeP is busy (primarily,
+ *		when requesting hibernation while SeP is processing something).
+ * -ETIME -	Request timed out (primarily, when asking for _ACTIVE)
+ */
+int dx_sep_power_state_set(enum dx_sep_power_state req_state)
+{
+	int rc = 0;
+
+	switch (req_state) {
+	case DX_SEP_POWER_HIBERNATED:
+		rc = process_hibernation_req();
+		break;
+	case DX_SEP_POWER_IDLE:
+	case DX_SEP_POWER_ACTIVE:
+		rc = process_activate_req();
+		break;
+	default:
+		pr_err("Invalid state to request (%s)\n",
+			    power_state_str(req_state));
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(dx_sep_power_state_set);
+
+/**
+ * dx_sep_power_state_get() - Get the current power state of SeP (CC)
+ * @state_jiffies_p:	The "jiffies" value at which given state was detected.
+ */
+enum dx_sep_power_state dx_sep_power_state_get(unsigned long *state_jiffies_p)
+{
+	enum dx_sep_state sep_state;
+	enum dx_sep_power_state rc;
+	unsigned long idle_jiffies;
+
+	sep_state = GET_SEP_STATE(power_control.drvdata);
+	if (sep_state != power_control.last_state) {
+		/* Probably off or after warm-boot with lost IMR */
+		/* Recover last_state */
+		power_control.last_state = sep_state;
+		power_control.state_jiffies = jiffies;
+	}
+	if (state_jiffies_p != NULL)
+		*state_jiffies_p = power_control.state_jiffies;
+	switch (sep_state) {
+	case DX_SEP_STATE_PROC_WARM_BOOT:
+	 /*FALLTRHOUGH*/ case DX_SEP_STATE_DONE_WARM_BOOT:
+		rc = DX_SEP_POWER_BOOT;
+		break;
+	case DX_SEP_STATE_DONE_FW_INIT:
+		if (is_desc_qs_active()) {
+			if (is_desc_qs_idle(&idle_jiffies)) {
+				rc = DX_SEP_POWER_IDLE;
+				if (state_jiffies_p != NULL)
+					*state_jiffies_p = idle_jiffies;
+			} else {
+				rc = DX_SEP_POWER_ACTIVE;
+			}
+		} else {
+			/* SeP was woken up but dx_sep_power_state_set was not
+			 * invoked to activate the queues */
+			rc = DX_SEP_POWER_BOOT;
+		}
+		break;
+	case DX_SEP_STATE_PROC_SLEEP_MODE:
+		/* Report as active until actually asleep */
+		rc = DX_SEP_POWER_ACTIVE;
+		break;
+	case DX_SEP_STATE_DONE_SLEEP_MODE:
+		rc = DX_SEP_POWER_HIBERNATED;
+		break;
+	case DX_SEP_STATE_OFF:
+		rc = DX_SEP_POWER_OFF;
+		break;
+	case DX_SEP_STATE_FATAL_ERROR:
+	default:/* Any state not supposed to happen for the driver */
+		rc = DX_SEP_POWER_INVALID;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(dx_sep_power_state_get);
+
+/**
+ * dx_sep_power_init() - Init resources for this module
+ */
+void dx_sep_power_init(struct sep_drvdata *drvdata)
+{
+	power_control.drvdata = drvdata;
+	init_completion(&power_control.state_changed);
+	/* Init. recorded last state */
+	power_control.last_state = GET_SEP_STATE(drvdata);
+	power_control.state_jiffies = jiffies;
+}
+
+/**
+ * dx_sep_power_exit() - Cleanup resources for this module
+ */
+void dx_sep_power_exit(void)
+{
+}
diff --git a/drivers/staging/sep54/sep_power.h b/drivers/staging/sep54/sep_power.h
new file mode 100644
index 0000000..b92bd65
--- /dev/null
+++ b/drivers/staging/sep54/sep_power.h
@@ -0,0 +1,60 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+#ifndef __SEP_POWER_H__
+#define __SEP_POWER_H__
+
+/**
+ * dx_sep_power_init() - Init resources for this module
+ */
+void dx_sep_power_init(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_power_exit() - Cleanup resources for this module
+ */
+void dx_sep_power_exit(void);
+
+/**
+ * dx_sep_state_change_handler() - Interrupt handler for SeP state changes
+ * @drvdata:	Associated driver context
+ */
+void dx_sep_state_change_handler(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_wait_for_state() - Wait for SeP to reach one of the states reflected
+ *				with given state mask
+ * @state_mask:		The OR of expected SeP states
+ * @timeout_msec:	Timeout of waiting for the state (in millisec.)
+ *
+ * Returns the state reached. In case of wait timeout the returned state
+ * may not be one of the expected states.
+ */
+u32 dx_sep_wait_for_state(u32 state_mask, int timeout_msec);
+
+void dx_sep_pm_runtime_get(void);
+
+void dx_sep_pm_runtime_put(void);
+
+#endif				/* __SEP_POWER_H__ */
diff --git a/drivers/staging/sep54/sep_request.h b/drivers/staging/sep54/sep_request.h
new file mode 100644
index 0000000..dc1f5f0
--- /dev/null
+++ b/drivers/staging/sep54/sep_request.h
@@ -0,0 +1,96 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_REQUEST_H_
+#define _SEP_REQUEST_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+
+#define DX_SEP_REQUEST_GPR_IDX 3
+
+#define DX_SEP_REQUEST_4KB_MASK 0xFFF
+#define DX_SEP_REQUEST_MIN_BUF_SIZE (4*1024)
+#define DX_SEP_REQUEST_MAX_BUF_SIZE (32*1024)
+
+/* Protocol error codes */
+#define DX_SEP_REQUEST_SUCCESS 0x00
+#define DX_SEP_REQUEST_OUT_OF_SYNC_ERR 0x01
+#define DX_SEP_REQUEST_INVALID_REQ_SIZE_ERR 0x02
+#define DX_SEP_REQUEST_INVALID_AGENT_ID_ERR 0x03
+
+/* Sep Request GPR3 format (Sep to Host) */
+#define DX_SEP_REQUEST_AGENT_ID_BIT_OFFSET 0
+#define DX_SEP_REQUEST_AGENT_ID_BIT_SIZE 8
+#define DX_SEP_REQUEST_COUNTER_BIT_OFFSET 8
+#define DX_SEP_REQUEST_COUNTER_BIT_SIZE 8
+#define DX_SEP_REQUEST_REQ_LEN_BIT_OFFSET 16
+#define DX_SEP_REQUEST_REQ_LEN_BIT_SIZE 16
+
+/* Sep Request GPR3 format (Host to Sep) */
+#define DX_SEP_REQUEST_RETURN_CODE_BIT_OFFSET 0
+#define DX_SEP_REQUEST_RETURN_CODE_BIT_SIZE 8
+/* #define DX_SEP_REQUEST_COUNTER_BIT_OFFSET 8 */
+/* #define DX_SEP_REQUEST_COUNTER_BIT_SIZE 8 */
+#define DX_SEP_REQUEST_RESP_LEN_BIT_OFFSET 16
+#define DX_SEP_REQUEST_RESP_LEN_BIT_SIZE 16
+
+/* Get/Set macros */
+#define SEP_REQUEST_GET_AGENT_ID(gpr) BITFIELD_GET(                           \
+	(gpr), DX_SEP_REQUEST_AGENT_ID_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_AGENT_ID_BIT_SIZE)
+#define SEP_REQUEST_SET_AGENT_ID(gpr, val) BITFIELD_SET(                      \
+	(gpr), DX_SEP_REQUEST_AGENT_ID_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_AGENT_ID_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_RETURN_CODE(gpr) BITFIELD_GET(                        \
+	(gpr), DX_SEP_REQUEST_RETURN_CODE_BIT_OFFSET,                         \
+	DX_SEP_REQUEST_RETURN_CODE_BIT_SIZE)
+#define SEP_REQUEST_SET_RETURN_CODE(gpr, val) BITFIELD_SET(                   \
+	(gpr), DX_SEP_REQUEST_RETURN_CODE_BIT_OFFSET,                         \
+	DX_SEP_REQUEST_RETURN_CODE_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_COUNTER(gpr) BITFIELD_GET(                            \
+	(gpr), DX_SEP_REQUEST_COUNTER_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_COUNTER_BIT_SIZE)
+#define SEP_REQUEST_SET_COUNTER(gpr, val) BITFIELD_SET(                       \
+	(gpr), DX_SEP_REQUEST_COUNTER_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_COUNTER_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_REQ_LEN(gpr) BITFIELD_GET(                            \
+	(gpr), DX_SEP_REQUEST_REQ_LEN_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_REQ_LEN_BIT_SIZE)
+#define SEP_REQUEST_SET_REQ_LEN(gpr, val) BITFIELD_SET(                       \
+	(gpr), DX_SEP_REQUEST_REQ_LEN_BIT_OFFSET,                             \
+	DX_SEP_REQUEST_REQ_LEN_BIT_SIZE, (val))
+#define SEP_REQUEST_GET_RESP_LEN(gpr) BITFIELD_GET(                           \
+	(gpr), DX_SEP_REQUEST_RESP_LEN_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_RESP_LEN_BIT_SIZE)
+#define SEP_REQUEST_SET_RESP_LEN(gpr, val) BITFIELD_SET(                      \
+	(gpr), DX_SEP_REQUEST_RESP_LEN_BIT_OFFSET,                            \
+	DX_SEP_REQUEST_RESP_LEN_BIT_SIZE, (val))
+
+#endif /*_SEP_REQUEST_H_*/
diff --git a/drivers/staging/sep54/sep_request_mgr.c b/drivers/staging/sep54/sep_request_mgr.c
new file mode 100644
index 0000000..681ddd0
--- /dev/null
+++ b/drivers/staging/sep54/sep_request_mgr.c
@@ -0,0 +1,515 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_REQUEST
+
+#include <linux/sched.h>
+/*#include <linux/export.h>*/
+#include "dx_driver.h"
+#include "dx_bitops.h"
+#include "sep_log.h"
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT	/* must be defined before including dx_cc_regs.h */
+#include "dx_cc_regs.h"
+#include "sep_request.h"
+#include "sep_request_mgr.h"
+#include "dx_sep_kapi.h"
+
+/* The request/response coherent buffer size */
+#define DX_SEP_REQUEST_BUF_SIZE (4*1024)
+#if (DX_SEP_REQUEST_BUF_SIZE < DX_SEP_REQUEST_MIN_BUF_SIZE)
+#error DX_SEP_REQUEST_BUF_SIZE too small
+#endif
+#if (DX_SEP_REQUEST_BUF_SIZE > DX_SEP_REQUEST_MAX_BUF_SIZE)
+#error DX_SEP_REQUEST_BUF_SIZE too big
+#endif
+#if (DX_SEP_REQUEST_BUF_SIZE & DX_SEP_REQUEST_4KB_MASK)
+#error DX_SEP_REQUEST_BUF_SIZE must be a 4KB multiple
+#endif
+
+/* The maximum number of sep request agents */
+/* Valid IDs are 0 to (DX_SEP_REQUEST_MAX_AGENTS-1) */
+#define DX_SEP_REQUEST_MAX_AGENTS 4
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/* Sep Request state object */
+static struct {
+	u8 *sep_req_buf_p;
+	dma_addr_t sep_req_buf_dma;
+	u8 *host_resp_buf_p;
+	dma_addr_t host_resp_buf_dma;
+	u8 req_counter;
+	wait_queue_head_t agent_event[DX_SEP_REQUEST_MAX_AGENTS];
+	bool agent_valid[DX_SEP_REQUEST_MAX_AGENTS];
+	bool agent_busy[DX_SEP_REQUEST_MAX_AGENTS];
+	bool request_pending;
+	u32 *sep_host_gpr_adr;
+	u32 *host_sep_gpr_adr;
+} sep_req_state;
+
+/* TODO:
+   1) request_pending should use agent id instead of a global flag
+   2) agent id 0 should be changed to non-valid
+   3) Change sep request params for sep init to [] array instead pointer
+   4) Consider usage of a mutex for syncing all access to the state
+*/
+
+/**
+ * dx_sep_req_handler() - SeP request interrupt handler
+ * @drvdata: The driver private info
+ */
+void dx_sep_req_handler(struct sep_drvdata *drvdata)
+{
+	u8 agent_id;
+	u32 gpr_val;
+	u32 sep_req_error = DX_SEP_REQUEST_SUCCESS;
+	u32 counter_val;
+	u32 req_len;
+
+	/* Read GPR3 value */
+	gpr_val = READ_REGISTER(drvdata->cc_base +
+				SEP_HOST_GPR_REG_OFFSET
+				(DX_SEP_REQUEST_GPR_IDX));
+
+	/* Parse the new gpr value */
+	counter_val = SEP_REQUEST_GET_COUNTER(gpr_val);
+	agent_id = SEP_REQUEST_GET_AGENT_ID(gpr_val);
+	req_len = SEP_REQUEST_GET_REQ_LEN(gpr_val);
+
+	/* Increase the req_counter value in the state structure */
+	sep_req_state.req_counter++;
+
+	if (unlikely(counter_val != sep_req_state.req_counter))
+		/* Verify new req_counter value is equal to the req_counter
+		 * value from the state. If not, proceed to critical error flow
+		 * below with error code SEP_REQUEST_OUT_OF_SYNC_ERR. */
+		sep_req_error = DX_SEP_REQUEST_OUT_OF_SYNC_ERR;
+	else if (unlikely((agent_id >= DX_SEP_REQUEST_MAX_AGENTS) ||
+			  (!sep_req_state.agent_valid[agent_id])))
+		/* Verify that the SeP Req Agent ID is registered in the LUT, if
+		 * not proceed to critical error flow below with error code
+		 * SEP_REQUEST_INVALID_AGENT_ID_ERR. */
+		sep_req_error = DX_SEP_REQUEST_INVALID_AGENT_ID_ERR;
+	else if (unlikely(req_len > DX_SEP_REQUEST_BUF_SIZE))
+		/* Verify the request length is not bigger than the maximum
+		 * allocated request buffer size. In bigger, proceed to critical
+		 * error flow below with the SEP_REQUEST_INVALID_REQ_SIZE_ERR
+		 * error code. */
+		sep_req_error = DX_SEP_REQUEST_INVALID_REQ_SIZE_ERR;
+
+	if (likely(sep_req_error == DX_SEP_REQUEST_SUCCESS)) {
+		/* Signal the wake up event according to the LUT */
+		sep_req_state.agent_busy[agent_id] = true;
+		wake_up_interruptible(&sep_req_state.agent_event[agent_id]);
+	} else {
+		/* Critical error flow */
+
+		/* Build the new GPR3 value out of the req_counter from the
+		 * state, the error condition and zero response length value */
+		gpr_val = 0;
+		SEP_REQUEST_SET_COUNTER(gpr_val, sep_req_state.req_counter);
+		SEP_REQUEST_SET_RESP_LEN(gpr_val, 0);
+		SEP_REQUEST_SET_RETURN_CODE(gpr_val, sep_req_error);
+		WRITE_REGISTER(drvdata->cc_base +
+			       HOST_SEP_GPR_REG_OFFSET(DX_SEP_REQUEST_GPR_IDX),
+			       gpr_val);
+	}
+}
+
+/**
+ * dx_sep_req_register_agent() - Regsiter an agent
+ * @agent_id: The agent ID
+ * @max_buf_size: A pointer to the max buffer size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_register_agent(u8 agent_id, u32 *max_buf_size)
+{
+	pr_debug("Regsiter SeP Request agent (id=%d)\n", agent_id);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is not valid */
+	if (sep_req_state.agent_valid[agent_id] == true) {
+		pr_err("Agent already registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify max_buf_size pointer is not NULL */
+	if (max_buf_size == NULL) {
+		pr_err("max_buf_size is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Set "agent_valid" field to TRUE */
+	sep_req_state.agent_valid[agent_id] = true;
+
+	/* Return the request/response max buffer size */
+	*max_buf_size = DX_SEP_REQUEST_BUF_SIZE;
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_register_agent);
+
+/**
+ * dx_sep_req_unregister_agent() - Unregsiter an agent
+ * @agent_id: The agent ID
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_unregister_agent(u8 agent_id)
+{
+	pr_debug("Unregsiter SeP Request agent (id=%d)\n", agent_id);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is valid */
+	if (sep_req_state.agent_valid[agent_id] == false) {
+		pr_err("Agent not registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP agent is not busy */
+	if (sep_req_state.agent_busy[agent_id] == true) {
+		pr_err("Agent is busy\n");
+		return -EBUSY;
+	}
+
+	/* Set "agent_valid" field to FALSE */
+	sep_req_state.agent_valid[agent_id] = false;
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_unregister_agent);
+
+/**
+ * dx_sep_req_wait_for_request() - Wait from an incoming sep request
+ * @agent_id: The agent ID
+ * @sep_req_buf_p: Pointer to the incoming request buffer
+ * @req_buf_size: Pointer to the incoming request size
+ * @timeout: Time to wait for an incoming request in jiffies
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_wait_for_request(u8 agent_id, u8 *sep_req_buf_p,
+				u32 *req_buf_size, u32 timeout)
+{
+	u32 gpr_val;
+
+	pr_debug("Wait for sep request\n");
+	pr_debug("agent_id=%d sep_req_buf_p=0x%p timeout=%d\n",
+		 agent_id, sep_req_buf_p, timeout);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is valid */
+	if (sep_req_state.agent_valid[agent_id] == false) {
+		pr_err("Agent not registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP agent is not busy */
+	if (sep_req_state.agent_busy[agent_id] == true) {
+		pr_err("Agent is busy\n");
+		return -EBUSY;
+	}
+
+	/* Verify that another sep request is not pending */
+	if (sep_req_state.request_pending == true) {
+		pr_err("Agent is busy\n");
+		return -EBUSY;
+	}
+
+	/* Verify sep_req_buf_p pointer is not NULL */
+	if (sep_req_buf_p == NULL) {
+		pr_err("sep_req_buf_p is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Verify req_buf_size pointer is not NULL */
+	if (req_buf_size == NULL) {
+		pr_err("req_buf_size is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Verify *req_buf_size is not zero and not bigger than the
+	 * allocated request buffer */
+	if ((*req_buf_size == 0) || (*req_buf_size > DX_SEP_REQUEST_BUF_SIZE)) {
+		pr_err("Invalid request buffer size\n");
+		return -EINVAL;
+	}
+
+	/* Wait for incoming request */
+	if (wait_event_interruptible_timeout(
+			sep_req_state.agent_event[agent_id],
+			sep_req_state.agent_busy[agent_id] == true,
+			timeout) == 0) {
+		/* operation timed-out */
+		sep_req_state.agent_busy[agent_id] = false;
+
+		pr_err("Request wait timed-out\n");
+		return -EAGAIN;
+	}
+
+	/* Set "request_pending" field to TRUE */
+	sep_req_state.request_pending = true;
+
+	gpr_val = READ_REGISTER(sep_req_state.sep_host_gpr_adr);
+
+	/* If the request length is bigger than the callers' specified
+	 * buffer size, the request is partially copied to the callers'
+	 * buffer (only the first relevant bytes). The caller will not be
+	 * indicated for an error condition in this case. The remaining bytes
+	 * in the callers' request buffer are left as is without clearing.
+	 * If the request length is smaller than the callers' specified buffer
+	 * size, the relevant bytes from the allocated kernel request buffer
+	 * are copied to the callers' request buffer */
+	memcpy(sep_req_buf_p, sep_req_state.sep_req_buf_p,
+	       MIN(*req_buf_size, SEP_REQUEST_GET_REQ_LEN(gpr_val)));
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_wait_for_request);
+
+/**
+ * dx_sep_req_send_response() - Send a response to the sep
+ * @agent_id: The agent ID
+ * @host_resp_buf_p: Pointer to the outgoing response buffer
+ * @resp_buf_size: Pointer to the outgoing response size
+ *
+ * Returns int 0 on success
+ */
+int dx_sep_req_send_response(u8 agent_id, u8 *host_resp_buf_p,
+			     u32 resp_buf_size)
+{
+	u32 gpr_val;
+
+	pr_debug("Send host response\n");
+	pr_debug("agent_id=%d host_resp_buf_p=0x%p resp_buf_size=%d\n",
+		 agent_id, host_resp_buf_p, resp_buf_size);
+
+	/* Validate agent ID is in range */
+	if (agent_id >= DX_SEP_REQUEST_MAX_AGENTS) {
+		pr_err("Invalid agent ID\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP Req Agent ID is valid */
+	if (sep_req_state.agent_valid[agent_id] == false) {
+		pr_err("Agent not registered\n");
+		return -EINVAL;
+	}
+
+	/* Verify SeP agent is busy */
+	if (sep_req_state.agent_busy[agent_id] == false) {
+		pr_err("Agent is not busy\n");
+		return -EBUSY;
+	}
+
+	/* Verify that a sep request is pending */
+	if (sep_req_state.request_pending == false) {
+		pr_err("No requests are pending\n");
+		return -EBUSY;
+	}
+
+	/* Verify host_resp_buf_p pointer is not NULL */
+	if (host_resp_buf_p == NULL) {
+		pr_err("host_resp_buf_p is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Verify resp_buf_size is not zero and not bigger than the allocated
+	 * request buffer */
+	if ((resp_buf_size == 0) || (resp_buf_size > DX_SEP_REQUEST_BUF_SIZE)) {
+		pr_err("Invalid response buffer size\n");
+		return -EINVAL;
+	}
+
+	/* The request is copied from the callers' buffer to the global
+	 * response buffer, only up to the callers' response length */
+	memcpy(sep_req_state.host_resp_buf_p, host_resp_buf_p, resp_buf_size);
+
+	/* Clear the request message buffer */
+	memset(sep_req_state.sep_req_buf_p, 0, DX_SEP_REQUEST_BUF_SIZE);
+
+	/* Clear the response message buffer for all remaining bytes
+	 * beyond the response buffer actual size */
+	memset(sep_req_state.host_resp_buf_p + resp_buf_size, 0,
+	       DX_SEP_REQUEST_BUF_SIZE - resp_buf_size);
+
+	/* Build the new GPR3 value out of the req_counter from the state,
+	 * the response length and the DX_SEP_REQUEST_SUCCESS return code.
+	 * Place the value in GPR3. */
+	gpr_val = 0;
+	SEP_REQUEST_SET_COUNTER(gpr_val, sep_req_state.req_counter);
+	SEP_REQUEST_SET_RESP_LEN(gpr_val, resp_buf_size);
+	SEP_REQUEST_SET_RETURN_CODE(gpr_val, DX_SEP_REQUEST_SUCCESS);
+	WRITE_REGISTER(sep_req_state.host_sep_gpr_adr, gpr_val);
+
+	/* Set "agent_busy" field to TRUE */
+	sep_req_state.agent_busy[agent_id] = false;
+
+	/* Set "request_pending" field to TRUE */
+	sep_req_state.request_pending = false;
+
+	return 0;
+}
+EXPORT_SYMBOL(dx_sep_req_send_response);
+
+/**
+ * dx_sep_req_get_sep_init_params() - Setup sep init params
+ * @sep_request_params: The sep init parameters array
+ */
+void dx_sep_req_get_sep_init_params(u32 *sep_request_params)
+{
+	sep_request_params[0] = (u32) sep_req_state.sep_req_buf_dma;
+	sep_request_params[1] = (u32) sep_req_state.host_resp_buf_dma;
+	sep_request_params[2] = DX_SEP_REQUEST_BUF_SIZE;
+}
+
+/**
+ * dx_sep_req_enable() - Enable the sep request interrupt handling
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_enable(struct sep_drvdata *drvdata)
+{
+	/* clear pending interrupts in GPRs of SEP request
+	 * (leftovers from init writes to GPRs..) */
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, ICR),
+		       SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX));
+
+	/* set IMR register */
+	drvdata->irq_mask |= SEP_HOST_GPR_IRQ_MASK(DX_SEP_REQUEST_GPR_IDX);
+	WRITE_REGISTER(drvdata->cc_base + DX_CC_REG_OFFSET(HOST, IMR),
+		       ~drvdata->irq_mask);
+}
+
+/**
+ * dx_sep_req_init() - Initialize the sep request state
+ * @drvdata: Driver private data
+ */
+int dx_sep_req_init(struct sep_drvdata *drvdata)
+{
+	int i;
+
+	pr_debug("Initialize SeP Request state\n");
+
+	sep_req_state.request_pending = false;
+	sep_req_state.req_counter = 0;
+
+	for (i = 0; i < DX_SEP_REQUEST_MAX_AGENTS; i++) {
+		sep_req_state.agent_valid[i] = false;
+		sep_req_state.agent_busy[i] = false;
+		init_waitqueue_head(&sep_req_state.agent_event[i]);
+	}
+
+	/* allocate coherent request buffer */
+	sep_req_state.sep_req_buf_p = dma_alloc_coherent(drvdata->dev,
+						 DX_SEP_REQUEST_BUF_SIZE,
+						 &sep_req_state.
+						 sep_req_buf_dma,
+						 GFP_KERNEL);
+	pr_debug("sep_req_buf_dma=0x%08X sep_req_buf_p=0x%p size=0x%08X\n",
+		      (u32)sep_req_state.sep_req_buf_dma,
+		      sep_req_state.sep_req_buf_p, DX_SEP_REQUEST_BUF_SIZE);
+
+	if (sep_req_state.sep_req_buf_p == NULL) {
+		pr_err("Unable to allocate coherent request buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Clear the request buffer */
+	memset(sep_req_state.sep_req_buf_p, 0, DX_SEP_REQUEST_BUF_SIZE);
+
+	/* allocate coherent response buffer */
+	sep_req_state.host_resp_buf_p = dma_alloc_coherent(drvdata->dev,
+						   DX_SEP_REQUEST_BUF_SIZE,
+						   &sep_req_state.
+						   host_resp_buf_dma,
+						   GFP_KERNEL);
+	pr_debug(
+		      "host_resp_buf_dma=0x%08X host_resp_buf_p=0x%p size=0x%08X\n",
+		      (u32)sep_req_state.host_resp_buf_dma,
+		      sep_req_state.host_resp_buf_p, DX_SEP_REQUEST_BUF_SIZE);
+
+	if (sep_req_state.host_resp_buf_p == NULL) {
+		pr_err("Unable to allocate coherent response buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Clear the response buffer */
+	memset(sep_req_state.host_resp_buf_p, 0, DX_SEP_REQUEST_BUF_SIZE);
+
+	/* Setup the GPR address */
+	sep_req_state.sep_host_gpr_adr = drvdata->cc_base +
+	    SEP_HOST_GPR_REG_OFFSET(DX_SEP_REQUEST_GPR_IDX);
+
+	sep_req_state.host_sep_gpr_adr = drvdata->cc_base +
+	    HOST_SEP_GPR_REG_OFFSET(DX_SEP_REQUEST_GPR_IDX);
+
+	return 0;
+}
+
+/**
+ * dx_sep_req_fini() - Finalize the sep request state
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_fini(struct sep_drvdata *drvdata)
+{
+	int i;
+
+	pr_debug("Finalize SeP Request state\n");
+
+	sep_req_state.request_pending = false;
+	sep_req_state.req_counter = 0;
+	for (i = 0; i < DX_SEP_REQUEST_MAX_AGENTS; i++) {
+		sep_req_state.agent_valid[i] = false;
+		sep_req_state.agent_busy[i] = false;
+	}
+
+	dma_free_coherent(drvdata->dev, DX_SEP_REQUEST_BUF_SIZE,
+			  sep_req_state.sep_req_buf_p,
+			  sep_req_state.sep_req_buf_dma);
+
+	dma_free_coherent(drvdata->dev, DX_SEP_REQUEST_BUF_SIZE,
+			  sep_req_state.host_resp_buf_p,
+			  sep_req_state.host_resp_buf_dma);
+}
diff --git a/drivers/staging/sep54/sep_request_mgr.h b/drivers/staging/sep54/sep_request_mgr.h
new file mode 100644
index 0000000..6d79651
--- /dev/null
+++ b/drivers/staging/sep54/sep_request_mgr.h
@@ -0,0 +1,63 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_REQUEST_KERNEL_API_H_
+#define _SEP_REQUEST_KERNEL_API_H_
+
+#include <linux/types.h>
+#include "dx_driver.h"
+
+/**
+ * dx_sep_req_handler() - SeP request interrupt handler
+ * @drvdata: The driver private info
+ */
+void dx_sep_req_handler(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_req_get_sep_init_params() - Setup sep init params
+ * @sep_request_params: The sep init parameters array
+ */
+void dx_sep_req_get_sep_init_params(u32 *sep_request_params);
+
+/**
+ * dx_sep_req_enable() - Enable the sep request interrupt handling
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_enable(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_req_init() - Initialize the sep request state
+ * @drvdata: Driver private data
+ */
+int dx_sep_req_init(struct sep_drvdata *drvdata);
+
+/**
+ * dx_sep_req_fini() - Finalize the sep request state
+ * @drvdata: Driver private data
+ */
+void dx_sep_req_fini(struct sep_drvdata *drvdata);
+
+#endif /*_SEP_REQUEST_KERNEL_API_H_*/
diff --git a/drivers/staging/sep54/sep_rpc.h b/drivers/staging/sep54/sep_rpc.h
new file mode 100644
index 0000000..3199a14
--- /dev/null
+++ b/drivers/staging/sep54/sep_rpc.h
@@ -0,0 +1,109 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef __SEP_RPC_H__
+#define __SEP_RPC_H__
+
+/* SeP RPC infrastructure API */
+#ifdef __KERNEL__
+#include <linux/types.h>
+
+#else
+#include "dx_pal_types.h"
+
+#endif /*__KERNEL__*/
+
+/* Maximum size of SeP RPC message in bytes */
+#define SEP_RPC_MAX_MSG_SIZE 8191
+#define SEP_RPC_MAX_WORKSPACE_SIZE 8191
+
+/* The maximum allowed user memory references per function
+   (CRYS requires only 2, but GPAPI/TEE needs up to 4) */
+#define SEP_RPC_MAX_MEMREF_PER_FUNC 4
+
+/* If this macro is not provided by the includer of this file,
+   the log message would be dropped */
+#ifndef SEP_RPC_LOG
+#define SEP_RPC_LOG(format, ...) do {} while (0)
+#endif
+
+#define SEP_RPC_ASSERT(cond, inval_param_retcode) {\
+	if (!(cond)) {\
+		SEP_RPC_LOG("SEP_RPC_ASSERT: %s\n", #cond);\
+		return inval_param_retcode;\
+	} \
+}
+
+/* NOTE:
+   All data must be in little (SeP) endian */
+
+enum seprpc_retcode {
+	SEPRPC_RET_OK = 0,
+	SEPRPC_RET_ERROR,	/*Generic error code (not one of the others) */
+	SEPRPC_RET_EINVAL_AGENT,	/* Unknown agent ID */
+	SEPRPC_RET_EINVAL_FUNC,	/* Unknown function ID for given agent */
+	SEPRPC_RET_EINVAL,	/* Invalid parameter */
+	SEPRPC_RET_ENORSC,	/* Not enough resources to complete request */
+	SEPRPC_RET_RESERVE32 = 0x7FFFFFFF	/* assure this enum is 32b */
+};
+
+enum seprpc_memref_type {
+	SEPRPC_MEMREF_NULL = 0,	/* Invalid memory reference */
+	SEPRPC_MEMREF_EMBED = 1,/* Data embedded in parameters message */
+	SEPRPC_MEMREF_DLLI = 2,
+	SEPRPC_MEMREF_MLLI = 3,
+	SEPRPC_MEMREF_MAX = SEPRPC_MEMREF_MLLI,
+	SEPRPC_MEMREF_RESERVE32 = 0x7FFFFFFF	/* assure this enum is 32b */
+};
+
+#pragma pack(push)
+#pragma pack(4)
+/* A strcuture to pass host memory reference */
+struct seprpc_memref {
+	enum seprpc_memref_type ref_type;
+	u32 location;
+	u32 size;
+	u32 count;
+	/* SEPRPC_MEMREF_EMBED: location= offset in SepRpc_Params .
+	 * size= data size in bytes. count= N/A */
+	/* SEPRPC_MEMREF_DLLI: location= DMA address of data in host memory.
+	 * size= data size in bytes. count= N/A. */
+	/* SEPRPC_MEMREF_MLLI: location= DMA address of first MLLI table.
+	 * size= size in bytes of first table.
+	 * count= Num. of MLLI tables. */
+};
+
+struct seprpc_params {
+	u32 num_of_memrefs;/* Number of elements in the memRef array */
+	struct seprpc_memref memref[1];
+	/* This array is actually in the size of numOfMemRefs
+	 * (i.e., it is just a placeholder that may be void) */
+	/*   Following this array comes the function-specific parameters */
+} __attribute__ ((__may_alias__));
+
+#pragma pack(pop)
+
+#endif /*__SEP_RPC_H__*/
diff --git a/drivers/staging/sep54/sep_sram_map.h b/drivers/staging/sep54/sep_sram_map.h
new file mode 100644
index 0000000..2c7db45
--- /dev/null
+++ b/drivers/staging/sep54/sep_sram_map.h
@@ -0,0 +1,43 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+
+/* This file contains the definitions of the OTP data that the SEP copies
+into the SRAM during the first boot process */
+
+
+#ifndef _SEP_SRAM_MAP_
+#define _SEP_SRAM_MAP_
+
+#define DX_FIRST_OEM_KEY_OFFSET_IN_SRAM         0x0
+#define DX_SECOND_OEM_KEY_OFFSET_IN_SRAM        0x4
+#define DX_THIRD_OEM_KEY_OFFSET_IN_SRAM         0x8
+#define DX_LCS_OFFSET_IN_SRAM                   0xC
+#define DX_MISC_OFFSET_IN_SRAM                  0xD
+#define DX_CC_INIT_MSG_OFFSET_IN_SRAM		0x100
+#define DX_PKA_MEMORY_OFFSET_IN_SRAM		0x200
+
+#endif /*_GEN_SRAM_MAP_*/
diff --git a/drivers/staging/sep54/sep_sw_desc.h b/drivers/staging/sep54/sep_sw_desc.h
new file mode 100644
index 0000000..8538c4f
--- /dev/null
+++ b/drivers/staging/sep54/sep_sw_desc.h
@@ -0,0 +1,468 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_SW_DESC_H_
+#define _SEP_SW_DESC_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "dx_bitops.h"
+#include "sep_rpc.h"
+
+/* Common descriptor fields access (type independent) */
+/* To be used with fields: TYPE, RET_CODE, COOKIE     */
+#define SEP_SW_DESC_GET(desc_p, desc_field) BITFIELD_GET(                     \
+	((u32 *)(desc_p))[SEP_SW_DESC_ ## desc_field ## _WORD_OFFSET],   \
+	SEP_SW_DESC_ ## desc_field ## _BIT_OFFSET,			      \
+	SEP_SW_DESC_ ## desc_field ## _BIT_SIZE)
+#define SEP_SW_DESC_SET(desc_p, desc_field, new_val) BITFIELD_SET(            \
+	((u32 *)(desc_p))[SEP_SW_DESC_ ## desc_field ## _WORD_OFFSET],   \
+	SEP_SW_DESC_ ## desc_field ## _BIT_OFFSET,			      \
+	SEP_SW_DESC_ ## desc_field ## _BIT_SIZE,			      \
+	new_val)
+
+/* Type specific descriptor fields access */
+#define SEP_SW_DESC_GET4TYPE(desc_p, desc_type, desc_field) BITFIELD_GET(     \
+	((u32 *)(desc_p))                                                \
+	[SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _WORD_OFFSET],	      \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ##  _BIT_OFFSET,	      \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _BIT_SIZE)
+#define SEP_SW_DESC_SET4TYPE(desc_p, desc_type, desc_field, new_val)          \
+	BITFIELD_SET(							      \
+	((u32 *)(desc_p))                                                \
+	[SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _WORD_OFFSET],       \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ##  _BIT_OFFSET,	      \
+	SEP_SW_DESC_ ## desc_type ## _ ## desc_field ## _BIT_SIZE, new_val)
+
+#define SEP_SW_DESC_INIT(desc_p) \
+	memset(desc_p, 0, SEP_SW_DESC_WORD_SIZE * sizeof(u32))
+
+/* Total descriptor size in 32b words */
+#define SEP_SW_DESC_WORD_SIZE 8
+#define SEP_SW_DESC_WORD_SIZE_LOG 3
+
+/***********************************/
+/* Common bit fields definitions   */
+/***********************************/
+ /* Descriptor type: TYPE */
+#define SEP_SW_DESC_TYPE_WORD_OFFSET 0
+#define SEP_SW_DESC_TYPE_BIT_OFFSET 0
+#define SEP_SW_DESC_TYPE_BIT_SIZE 4
+/* Descriptor type encoding */
+enum sep_sw_desc_type {
+	SEP_SW_DESC_TYPE_NULL = 0,
+	SEP_SW_DESC_TYPE_CRYPTO_OP = 0x1,
+	SEP_SW_DESC_TYPE_RPC_MSG = 0x2,
+	SEP_SW_DESC_TYPE_APP_REQ = 0x3,
+	SEP_SW_DESC_TYPE_LOAD_OP = 0x4,
+	SEP_SW_DESC_TYPE_COMBINED_OP = 0x5,
+	SEP_SW_DESC_TYPE_SLEEP_REQ = 0x6,
+	SEP_SW_DESC_TYPE_DEBUG = 0xF
+	    /* Only 4 bits - do not extend to 32b */
+};
+
+enum sep_sw_desc_retcode {
+	SEP_SW_DESC_RET_OK = 0,
+	SEP_SW_DESC_RET_EINVAL_DESC_TYPE	/* Invalid descriptor type */
+};
+
+/* Return code: RET_CODE */
+#define SEP_SW_DESC_RET_CODE_WORD_OFFSET 6
+#define SEP_SW_DESC_RET_CODE_BIT_OFFSET 0
+#define SEP_SW_DESC_RET_CODE_BIT_SIZE 32
+
+/* Descriptor cookie: COOKIE */
+#define SEP_SW_DESC_COOKIE_WORD_OFFSET 7
+#define SEP_SW_DESC_COOKIE_BIT_OFFSET 0
+#define SEP_SW_DESC_COOKIE_BIT_SIZE 32
+
+/****************************************/
+/* Crypto-Op descriptor type: CRYPTO_OP */
+/****************************************/
+
+/* L bit: Load cache: L */
+#define SEP_SW_DESC_CRYPTO_OP_L_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_L_BIT_OFFSET 31
+#define SEP_SW_DESC_CRYPTO_OP_L_BIT_SIZE 1
+
+/* I bit: Initialize context: I */
+#define SEP_SW_DESC_CRYPTO_OP_I_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_I_BIT_OFFSET 30
+#define SEP_SW_DESC_CRYPTO_OP_I_BIT_SIZE 1
+
+/* Process mode: PROC_MODE */
+#define SEP_SW_DESC_CRYPTO_OP_PROC_MODE_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_OFFSET 28
+#define SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_SIZE 2
+/* Process mode field options */
+enum sep_proc_mode {
+	SEP_PROC_MODE_NOP = 0,
+	SEP_PROC_MODE_PROC_T = 1,	/* Process (Text data) */
+	SEP_PROC_MODE_FIN = 2,	/* Finalize (optional: with text data) */
+	SEP_PROC_MODE_PROC_A = 3	/* Process (Additional/Auth. data) */
+	    /* Only 2b - do not extend to 32b */
+};
+
+/* SeP/FW Cache Index: FW_CACHE_IDX */
+#define SEP_SW_DESC_CRYPTO_OP_FW_CACHE_IDX_WORD_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_FW_CACHE_IDX_BIT_OFFSET 4
+#define SEP_SW_DESC_CRYPTO_OP_FW_CACHE_IDX_BIT_SIZE 8
+
+/* HCB address: HCB_ADDR */
+#define SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_WORD_OFFSET 3
+#define SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_SIZE 32
+
+/* IFT: IFT_ADDR, IFT_SIZE, IFT_NUM */
+#define SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_WORD_OFFSET 1
+#define SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_SIZE 32
+#define SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_WORD_OFFSET 4
+#define SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_SIZE 16
+#define SEP_SW_DESC_CRYPTO_OP_IFT_NUM_WORD_OFFSET 5
+#define SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_SIZE 16
+
+/* OFT: OFT_ADDR, OFT_SIZE, OFT_NUM */
+#define SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_WORD_OFFSET 2
+#define SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_SIZE 32
+#define SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_WORD_OFFSET 4
+#define SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_OFFSET 16
+#define SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_SIZE 16
+#define SEP_SW_DESC_CRYPTO_OP_OFT_NUM_WORD_OFFSET 5
+#define SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_OFFSET 16
+#define SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_SIZE 16
+
+/********************************************/
+/* Combined-Op descriptor type: COMBINED_OP */
+/********************************************/
+
+/* L bit: Load cache: L */
+#define SEP_SW_DESC_COMBINED_OP_L_WORD_OFFSET \
+	SEP_SW_DESC_CRYPTO_OP_L_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_L_BIT_OFFSET SEP_SW_DESC_CRYPTO_OP_L_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_L_BIT_SIZE SEP_SW_DESC_CRYPTO_OP_L_BIT_SIZE
+
+/* I bit: Initialize context: I */
+#define SEP_SW_DESC_COMBINED_OP_I_WORD_OFFSET \
+	SEP_SW_DESC_CRYPTO_OP_I_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_I_BIT_OFFSET SEP_SW_DESC_CRYPTO_OP_I_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_I_BIT_SIZE SEP_SW_DESC_CRYPTO_OP_I_BIT_SIZE
+
+/* Process mode: PROC_MODE */
+#define SEP_SW_DESC_COMBINED_OP_PROC_MODE_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_PROC_MODE_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_PROC_MODE_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_PROC_MODE_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_PROC_MODE_BIT_SIZE
+
+/* Configuration scheme: CONFIG_SCHEME */
+#define SEP_SW_DESC_COMBINED_OP_CONFIG_SCHEME_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_CONFIG_SCHEME_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_CONFIG_SCHEME_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_HCB_ADDR_BIT_SIZE
+
+/* IFT: IFT_ADDR, IFT_SIZE, IFT_NUM */
+#define SEP_SW_DESC_COMBINED_OP_IFT_ADDR_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_ADDR_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_ADDR_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_IFT_ADDR_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_IFT_SIZE_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_SIZE_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_SIZE_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_IFT_SIZE_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_IFT_NUM_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_NUM_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_NUM_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_IFT_NUM_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_IFT_NUM_BIT_SIZE
+
+/* OFT: OFT_ADDR, OFT_SIZE, OFT_NUM */
+#define SEP_SW_DESC_COMBINED_OP_OFT_ADDR_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_ADDR_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_ADDR_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_OFT_ADDR_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_OFT_SIZE_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_SIZE_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_SIZE_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_OFT_SIZE_BIT_SIZE
+#define SEP_SW_DESC_COMBINED_OP_OFT_NUM_WORD_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_NUM_WORD_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_NUM_BIT_OFFSET \
+			SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_OFFSET
+#define SEP_SW_DESC_COMBINED_OP_OFT_NUM_BIT_SIZE \
+			SEP_SW_DESC_CRYPTO_OP_OFT_NUM_BIT_SIZE
+
+/* combined scheme macros:
+   these set of macros meant for configuration scheme encoding
+   from the user level interface to the SEP combined driver.
+*/
+#define SEP_ENGINE_TYPE_BIT_SHIFT 0
+#define SEP_ENGINE_TYPE_BIT_SIZE 4
+#define SEP_ENGINE_SRC_BIT_SHIFT 4
+#define SEP_ENGINE_SRC_BIT_SIZE 4
+#define SEP_ENGINE_SLOT_BIT_SIZE \
+		(SEP_ENGINE_SRC_BIT_SIZE + SEP_ENGINE_TYPE_BIT_SIZE)
+
+/******************************* MACROS ***********************************/
+#define _sep_comb_eng_pack_item(eng_src, eng_type) \
+		(((eng_src) << SEP_ENGINE_SRC_BIT_SIZE) | \
+		 ((eng_type) << SEP_ENGINE_TYPE_BIT_SHIFT))
+
+#define _sep_comb_eng_pack_n_shift(src, type, slot) \
+		(_sep_comb_eng_pack_item(src, type) << \
+		(slot * SEP_ENGINE_SLOT_BIT_SIZE))
+
+#define sep_comb_eng_props_set(cfg_p, eng_idx, eng_src, eng_type) do { \
+	BITFIELD_SET(*cfg_p, \
+		(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE), \
+		SEP_ENGINE_SLOT_BIT_SIZE, 0); \
+	BITFIELD_SET(*cfg_p, \
+		(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE), \
+		SEP_ENGINE_SLOT_BIT_SIZE, \
+		_sep_comb_eng_pack_item(eng_src, eng_type)); \
+} while (0)
+
+#define sep_comb_eng_props_get(cfg_p, eng_idx, eng_src, eng_type) do { \
+	*(eng_type) = BITFIELD_GET(*cfg_p, \
+				(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE),\
+				SEP_ENGINE_TYPE_BIT_SIZE); \
+	*(eng_src) = BITFIELD_GET(*cfg_p, \
+				(eng_idx * SEP_ENGINE_SLOT_BIT_SIZE) \
+				+ SEP_ENGINE_TYPE_BIT_SIZE, \
+				SEP_ENGINE_SRC_BIT_SIZE); \
+} while (0)
+
+/******************************************/
+/* Message-Op descriptor type: RPC_MSG    */
+/******************************************/
+
+/* Agent ID: AGENT_ID */
+#define SEP_SW_DESC_RPC_MSG_AGENT_ID_WORD_OFFSET 1
+#define SEP_SW_DESC_RPC_MSG_AGENT_ID_BIT_OFFSET 0
+#define SEP_SW_DESC_RPC_MSG_AGENT_ID_BIT_SIZE 8
+
+/* Function ID: FUNC_ID */
+#define SEP_SW_DESC_RPC_MSG_FUNC_ID_WORD_OFFSET 1
+#define SEP_SW_DESC_RPC_MSG_FUNC_ID_BIT_OFFSET 16
+#define SEP_SW_DESC_RPC_MSG_FUNC_ID_BIT_SIZE 16
+
+/* HMB: HMB_ADDR , HMB_SIZE */
+#define SEP_SW_DESC_RPC_MSG_HMB_ADDR_WORD_OFFSET 2
+#define SEP_SW_DESC_RPC_MSG_HMB_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_RPC_MSG_HMB_ADDR_BIT_SIZE 32
+#define SEP_SW_DESC_RPC_MSG_HMB_SIZE_WORD_OFFSET 3
+#define SEP_SW_DESC_RPC_MSG_HMB_SIZE_BIT_OFFSET 0
+#define SEP_SW_DESC_RPC_MSG_HMB_SIZE_BIT_SIZE 13
+
+/************************************************/
+/* SeP Applet Request descriptor type: APP_REQ  */
+/************************************************/
+
+/* Request Type: REQ_TYPE */
+#define SEP_SW_DESC_APP_REQ_REQ_TYPE_WORD_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_REQ_TYPE_BIT_OFFSET 4
+#define SEP_SW_DESC_APP_REQ_REQ_TYPE_BIT_SIZE 2
+
+/* Session ID: SESSION_ID */
+#define SEP_SW_DESC_APP_REQ_SESSION_ID_WORD_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_SESSION_ID_BIT_OFFSET 16
+#define SEP_SW_DESC_APP_REQ_SESSION_ID_BIT_SIZE 12
+
+/* Internal error: INTERNAL_ERR */
+#define SEP_SW_DESC_APP_REQ_INTERNAL_ERR_WORD_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_INTERNAL_ERR_BIT_OFFSET 31
+#define SEP_SW_DESC_APP_REQ_INTERNAL_ERR_BIT_SIZE 1
+
+/* In-Params. Buffer Address: IN_PARAMS_ADDR */
+#define SEP_SW_DESC_APP_REQ_IN_PARAMS_ADDR_WORD_OFFSET 1
+#define SEP_SW_DESC_APP_REQ_IN_PARAMS_ADDR_BIT_OFFSET 0
+#define SEP_SW_DESC_APP_REQ_IN_PARAMS_ADDR_BIT_SIZE 32
+
+/* Return codes for APP_REQ descriptor */
+enum sepapp_retcode {
+	SEPAPP_RET_OK = 0,
+	SEPAPP_RET_EINVAL_QUEUE,	/* Request sent on the wrong SW queue */
+	SEPAPP_RET_EINVAL,	/* Invalid parameters in descriptor */
+};
+
+/* REQ_TYPE field encoding */
+enum sepapp_req_type {
+	SEPAPP_REQ_TYPE_SESSION_OPEN = 1,
+	SEPAPP_REQ_TYPE_SESSION_CLOSE = 2,
+	SEPAPP_REQ_TYPE_COMMAND_INVOKE = 3
+};
+
+/** in-params. data types **/
+
+#define SEPAPP_UUID_SIZE 16
+#define SEPAPP_MAX_PARAMS 4
+#define SEPAPP_MAX_AUTH_DATA_SIZE 16	/* For Application UUID case */
+
+enum sepapp_param_type {
+	SEPAPP_PARAM_TYPE_NULL = 0,
+	SEPAPP_PARAM_TYPE_VAL = 1,
+	SEPAPP_PARAM_TYPE_MEMREF = 2
+};
+
+enum sepapp_data_direction {
+	SEPAPP_DIR_NULL = 0,
+	SEPAPP_DIR_IN = 1,
+	SEPAPP_DIR_OUT = (1 << 1),
+	SEPAPP_DIR_INOUT = SEPAPP_DIR_IN | SEPAPP_DIR_OUT
+};
+
+/* Descriptor for "by value" parameter */
+struct sepapp_val_param {
+	/*enum sepapp_data_direction */ u8 dir;
+	u32 data[2];
+};
+
+/* Depends on seprpc data type defined in sep_rpc.h */
+union sepapp_client_param {
+	struct sepapp_val_param val;
+	struct seprpc_memref memref;
+};
+
+struct sepapp_client_params {
+	u8 params_types[SEPAPP_MAX_PARAMS];
+	union sepapp_client_param params[SEPAPP_MAX_PARAMS];
+};
+
+/* In-params. for SESSION_OPEN request type */
+struct sepapp_in_params_session_open {
+	u8 app_uuid[SEPAPP_UUID_SIZE];
+	u32 auth_method;
+	u8 auth_data[SEPAPP_MAX_AUTH_DATA_SIZE];
+	struct sepapp_client_params client_params;
+};
+
+struct sepapp_in_params_command_invoke {
+	u32 command_id;
+	struct sepapp_client_params client_params;
+};
+
+/* Return codes for SLEEP_REQ descriptor */
+enum sepslp_mode_req_retcode {
+	SEPSLP_MODE_REQ_RET_OK = 0,
+	SEPSLP_MODE_REQ_EGEN,	/* general error */
+	SEPSLP_MODE_REQ_EINVAL_QUEUE,	/* Request sent on the wrong SW queue */
+	SEPSLP_MODE_REQ_EBUSY,/* Request sent while desc. queue is not empty */
+	SEPSLP_MODE_REQ_EABORT	/* Applet requested aborting this request */
+};
+
+/****************************************/
+/* Load-Op descriptor type: LOAD_OP */
+/****************************************/
+
+/* SeP/FW Cache Index: FW_CACHE_IDX */
+#define SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_WORD_OFFSET 1
+#define SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_OFFSET(slot) ((slot) * 8)
+#define SEP_SW_DESC_LOAD_OP_FW_CACHE_IDX_BIT_SIZE 8
+
+/* HCB address: HCB_ADDR */
+#define SEP_SW_DESC_LOAD_OP_HCB_ADDR_WORD_OFFSET(slot) ((slot) + 2)
+#define SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_OFFSET 1
+#define SEP_SW_DESC_LOAD_OP_HCB_ADDR_BIT_SIZE 31
+
+/* L bit: Load cache: L */
+#define SEP_SW_DESC_LOAD_OP_L_WORD_OFFSET(slot) ((slot) + 2)
+#define SEP_SW_DESC_LOAD_OP_L_BIT_OFFSET 0
+#define SEP_SW_DESC_LOAD_OP_L_BIT_SIZE 1
+
+/*****************************/
+/*** Descriptor copy flows ***/
+/*****************************/
+/* Copy host descriptor scratchpad to descriptor queue buffer */
+#ifdef __BIG_ENDIAN
+
+/* Verify descriptor copy flows assumptions at compile time:
+   assumes "retcode" and "cookie" are the last words */
+#if (SEP_SW_DESC_RET_CODE_WORD_OFFSET != 6)
+#error SW_DESC_RET_CODE location assumption is broken!
+#endif
+#if (SEP_SW_DESC_COOKIE_WORD_OFFSET != 7)
+#error SW_DESC_COOKIE location assumption is broken!
+#endif
+
+#define SEP_SW_DESC_COPY_TO_SEP(queue_desc_p, spad_desc_p) do {	               \
+	u32 *cur_q_desc_word_p = (u32 *)queue_desc_p;                \
+	u32 *cur_spad_desc_word_p = (u32 *)spad_desc_p;              \
+	int i;	                                                               \
+	/* First 6 words are input data to SeP-FW. Must be in SeP endianess:*/ \
+	/* Copy 7th word too in order to init./clear retcode field	    */ \
+	for (i = 0; i <= SEP_SW_DESC_RET_CODE_WORD_OFFSET; i++) {              \
+		*cur_q_desc_word_p = cpu_to_le32(*cur_spad_desc_word_p);       \
+		cur_spad_desc_word_p++;                                        \
+		cur_q_desc_word_p++;                                           \
+	}                                                                      \
+	/* Word 8 is the cookie which is referenced only by the host */        \
+	/* No need to swap endianess */                                        \
+	*cur_q_desc_word_p = *cur_spad_desc_word_p;                            \
+} while (0)
+
+/* and vice-versa */
+#define SEP_SW_DESC_COPY_FROM_SEP(spad_desc_p, queue_desc_p) do {              \
+	u32 *cur_q_desc_word_p = (u32 *)queue_desc_p;                \
+	u32 *cur_spad_desc_word_p = (u32 *)spad_desc_p;              \
+	int i;	                                                               \
+	/* First 6 words are input data to SeP-FW in SeP endianess:*/          \
+	/* Copy 7th word too in order to get retcode field	   */          \
+	for (i = 0; i <= SEP_SW_DESC_RET_CODE_WORD_OFFSET; i++) {              \
+		*cur_spad_desc_word_p = le32_to_cpu(*cur_q_desc_word_p);       \
+		cur_spad_desc_word_p++;                                        \
+		cur_q_desc_word_p++;                                           \
+	}                                                                      \
+	/* Word 8 is the cookie which is referenced only by the host */        \
+	/* No need to swap endianess */                                        \
+	*cur_spad_desc_word_p = *cur_q_desc_word_p;                            \
+} while (0)
+
+#else				/* __LITTLE_ENDIAN - simple memcpy */
+#define SEP_SW_DESC_COPY_TO_SEP(queue_desc_p, spad_desc_p)                     \
+	memcpy(queue_desc_p, spad_desc_p, SEP_SW_DESC_WORD_SIZE<<2)
+
+#define SEP_SW_DESC_COPY_FROM_SEP(spad_desc_p, queue_desc_p)                 \
+	memcpy(spad_desc_p, queue_desc_p, SEP_SW_DESC_WORD_SIZE<<2)
+#endif
+
+#endif /*_SEP_SW_DESC_H_*/
diff --git a/drivers/staging/sep54/sep_sysfs.c b/drivers/staging/sep54/sep_sysfs.c
new file mode 100644
index 0000000..9e6448b
--- /dev/null
+++ b/drivers/staging/sep54/sep_sysfs.c
@@ -0,0 +1,468 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SYSFS
+
+#include "dx_driver.h"
+#include "dx_driver_abi.h"
+#include "desc_mgr.h"
+#include "sep_log.h"
+#include "sep_sysfs.h"
+
+#define MAX_QUEUE_NAME_LEN 50
+
+struct sep_stats {
+	spinlock_t stat_lock;
+	unsigned long samples_cnt;	/* Total number of samples */
+	unsigned long long accu_time;/* Accum. samples time (for avg. calc.) */
+	unsigned long long min_time;
+	unsigned long long max_time;
+	/* all times in nano-sec. */
+};
+
+#define DESC_TYPE_NUM (1<<SEP_SW_DESC_TYPE_BIT_SIZE)
+static const char *desc_names[DESC_TYPE_NUM] = {
+	"NULL",			/*SEP_SW_DESC_TYPE_NULL */
+	"CRYPTO",		/*SEP_SW_DESC_TYPE_CRYPTO_OP */
+	"MSG",			/*SEP_SW_DESC_TYPE_MSG_OP */
+	/* Next 12 types are invalid */
+	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+	"DEBUG"			/*SEP_SW_DESC_TYPE_DEBUG */
+};
+
+struct kobj_attribute *queue_size[SEP_MAX_NUM_OF_DESC_Q];
+struct attribute *queue_attrs[SEP_MAX_NUM_OF_DESC_Q];
+
+struct sep_stats drv_lat_stats[SEP_MAX_NUM_OF_DESC_Q][DXDI_IOC_NR_MAX + 1];
+struct sep_stats sep_lat_stats[SEP_MAX_NUM_OF_DESC_Q][DESC_TYPE_NUM];
+
+/*
+ * Structure used to create a directory and its attributes in sysfs
+ */
+struct sys_dir {
+	struct kobject *sys_dir_kobj;
+	struct attribute_group sys_dir_attr_group;
+	struct attribute **sys_dir_attr_list;
+	int num_of_attrs;
+	struct sep_drvdata *drvdata;	/* Associated driver context */
+};
+
+/* directory initialization*/
+static int sys_init_dir(struct sys_dir *sys_dir, struct sep_drvdata *drvdata,
+			struct kobject *parent_dir_kobj,
+			const char *dir_name,
+			struct kobj_attribute *attrs, int num_of_attrs);
+
+/* directory deinitialization */
+static void sys_free_dir(struct sys_dir *sys_dir);
+
+/* top level directory structure */
+struct sys_dir sys_top_dir;
+
+/* queue level directory structures array */
+struct sys_dir sys_queue_dirs[SEP_MAX_NUM_OF_DESC_Q];
+
+/**************************************
+ * Statistics functions section       *
+ **************************************/
+
+static void update_stats(struct sep_stats *stats,
+			 unsigned long long start_ns, unsigned long long end_ns)
+{
+	unsigned long long delta;
+	unsigned long flags;
+
+	spin_lock_irqsave(&(stats->stat_lock), flags);
+
+	delta = end_ns - start_ns;
+	stats->samples_cnt++;
+	stats->accu_time += delta;
+	stats->min_time = min(delta, stats->min_time);
+	stats->max_time = max(delta, stats->max_time);
+
+	spin_unlock_irqrestore(&(stats->stat_lock), flags);
+}
+
+void sysfs_update_drv_stats(unsigned int qid, unsigned int ioctl_cmd_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns)
+{
+	if ((qid >= SEP_MAX_NUM_OF_DESC_Q) ||
+	    (ioctl_cmd_type > DXDI_IOC_NR_MAX)) {
+		pr_err("IDs out of range: qid=%d , ioctl_cmd=%d\n",
+			    qid, ioctl_cmd_type);
+		return;
+	}
+
+	update_stats(&(drv_lat_stats[qid][ioctl_cmd_type]), start_ns, end_ns);
+}
+
+void sysfs_update_sep_stats(unsigned int qid, enum sep_sw_desc_type desc_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns)
+{
+	if ((qid >= SEP_MAX_NUM_OF_DESC_Q) || (desc_type >= DESC_TYPE_NUM)) {
+		pr_err("IDs out of range: qid=%d , descriptor_type=%d\n",
+			    qid, desc_type);
+		return;
+	}
+	update_stats(&(sep_lat_stats[qid][desc_type]), start_ns, end_ns);
+}
+
+/* compute queue number based by kobject passed to attribute show function */
+static int sys_get_queue_num(struct kobject *kobj, struct sys_dir *dirs)
+{
+	int i;
+
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; ++i) {
+		if (dirs[i].sys_dir_kobj == kobj)
+			break;
+	}
+
+	return i;
+}
+
+static struct sep_drvdata *sys_get_drvdata(struct kobject *kobj)
+{
+	/* TODO: supporting multiple SeP devices would require avoiding
+	 * global "top_dir" and finding associated "top_dir" by traversing
+	 * up the tree to the kobject which matches one of the top_dir's */
+	return sys_top_dir.drvdata;
+}
+
+/**************************************
+ * Attributes show functions section  *
+ **************************************/
+
+static ssize_t sys_fw_ver_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buf)
+{
+	struct sep_drvdata *drvdata = sys_get_drvdata(kobj);
+	return sprintf(buf,
+		       "ROM_VER=0x%08X\nFW_VER=0x%08X\n",
+		       drvdata->rom_ver, drvdata->fw_ver);
+}
+
+static ssize_t sys_queue_size_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buf)
+{
+
+	return sprintf(buf, "<not supported>\n");
+}
+
+static ssize_t sys_queue_dump_show(struct kobject *kobj,
+				   struct kobj_attribute *attr, char *buf)
+{
+#ifdef DESCQ_DUMP_SUPPORT
+	int i;
+
+	i = sys_get_queue_num(kobj, (struct sys_dir *)&sys_queue_dirs);
+#endif
+
+	return sprintf(buf, "DescQ dump not supported, yet.\n");
+}
+
+/* time from write to read is measured */
+static ssize_t sys_queue_stats_drv_lat_show(struct kobject *kobj,
+					    struct kobj_attribute *attr,
+					    char *buf)
+{
+	u64 min_usec, max_usec, avg_usec;
+	int qid, i;
+	char *cur_buf_pos = buf;
+
+	qid = sys_get_queue_num(kobj, (struct sys_dir *)&sys_queue_dirs);
+
+	cur_buf_pos += sprintf(cur_buf_pos,
+			       "ioctl#\tmin[us]\tavg[us]\tmax[us]\t#samples\n");
+
+	if (qid >= SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err("ID out of range: qid=%d\n", qid);
+		return 0;
+	}
+
+	for (i = 0; i < DXDI_IOC_NR_MAX + 1; i++) {
+		/* Because we are doing 64 bit (long long) division we
+		 * need to explicitly invoke do_div() */
+		if (drv_lat_stats[qid][i].samples_cnt > 0) {
+			min_usec = drv_lat_stats[qid][i].min_time;
+			do_div(min_usec, 1000);	/* result goes into dividend */
+			max_usec = drv_lat_stats[qid][i].max_time;
+			do_div(max_usec, 1000);
+			avg_usec = drv_lat_stats[qid][i].accu_time;
+			do_div(avg_usec, drv_lat_stats[qid][i].samples_cnt);
+			do_div(avg_usec, 1000);
+		} else {
+			min_usec = 0;
+			max_usec = 0;
+			avg_usec = 0;
+		}
+
+		cur_buf_pos += sprintf(cur_buf_pos,
+				       "%u:\t%6llu\t%6llu\t%6llu\t%7lu\n", i,
+				       min_usec, avg_usec, max_usec,
+				       drv_lat_stats[qid][i].samples_cnt);
+	}
+	return cur_buf_pos - buf;
+}
+
+/* time from descriptor enqueue to interrupt is measured */
+static ssize_t sys_queue_stats_sep_lat_show(struct kobject *kobj,
+					    struct kobj_attribute *attr,
+					    char *buf)
+{
+	u64 min_usec, max_usec, avg_usec;
+	int qid, i, buf_len;
+	char *line;
+
+	buf_len = sprintf(buf,
+			  "desc-type\tmin[us]\tavg[us]\tmax[us]\t#samples\n");
+
+	qid = sys_get_queue_num(kobj, (struct sys_dir *)&sys_queue_dirs);
+
+	if (qid >= SEP_MAX_NUM_OF_DESC_Q) {
+		pr_err("ID out of range: qid=%d\n", qid);
+		return 0;
+	}
+
+	line = kzalloc(256 * sizeof(char), GFP_KERNEL);
+
+	if (line == NULL) {
+		pr_err("Memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < DESC_TYPE_NUM; ++i) {
+		if (desc_names[i] != NULL) {	/*Only if valid desc. type */
+			/* Because we are doing 64 bit (long long) division we*
+			 * need to explicitly invoke do_div() */
+			if (sep_lat_stats[qid][i].samples_cnt > 0) {
+				min_usec = sep_lat_stats[qid][i].min_time;
+				/* result goes into dividend */
+				do_div(min_usec, 1000);
+				max_usec = sep_lat_stats[qid][i].max_time;
+				do_div(max_usec, 1000);
+				avg_usec = sep_lat_stats[qid][i].accu_time;
+				do_div(avg_usec,
+				       sep_lat_stats[qid][i].samples_cnt);
+				do_div(avg_usec, 1000);
+			} else {
+				min_usec = 0;
+				max_usec = 0;
+				avg_usec = 0;
+			}
+
+			buf_len += sprintf(line,
+					   "%s\t\t%6llu\t%6llu\t%6llu\t%7lu\n",
+					   desc_names[i], min_usec, avg_usec,
+					   max_usec,
+					   sep_lat_stats[qid][i].samples_cnt);
+			strcat(buf, line);
+		}
+	}
+
+	kfree(line);
+
+	return buf_len;
+}
+
+/********************************************************
+ *		SYSFS objects				*
+ ********************************************************/
+
+/* TOP LEVEL ATTRIBUTES */
+
+static struct kobj_attribute sys_top_level_attrs[] = {
+	__ATTR(fw_ver, 0444, sys_fw_ver_show, NULL),
+#ifdef SEP_HWK_UNIT_TEST
+	__ATTR(hwk_self_test, 0664, sys_hwk_st_show, sys_hwk_st_start)
+#endif
+};
+
+struct kobj_attribute sys_queue_level_attrs[] = {
+
+	__ATTR(size, 0444, sys_queue_size_show, NULL),
+	__ATTR(dump, 0444, sys_queue_dump_show, NULL),
+	__ATTR(drv_lat, 0444, sys_queue_stats_drv_lat_show, NULL),
+	__ATTR(sep_lat, 0444, sys_queue_stats_sep_lat_show, NULL)
+};
+
+int sys_init_dir(struct sys_dir *sys_dir, struct sep_drvdata *drvdata,
+		 struct kobject *parent_dir_kobj, const char *dir_name,
+		 struct kobj_attribute *attrs, int num_of_attrs)
+{
+	int i;
+
+	memset(sys_dir, 0, sizeof(struct sys_dir));
+
+	sys_dir->drvdata = drvdata;
+
+	/* initialize directory kobject */
+	sys_dir->sys_dir_kobj =
+	    kobject_create_and_add(dir_name, parent_dir_kobj);
+
+	if (!(sys_dir->sys_dir_kobj))
+		return -ENOMEM;
+	/* allocate memory for directory's attributes list */
+	sys_dir->sys_dir_attr_list =
+	    kzalloc(sizeof(struct attribute *)*(num_of_attrs + 1),
+		    GFP_KERNEL);
+
+	if (!(sys_dir->sys_dir_attr_list)) {
+		kobject_put(sys_dir->sys_dir_kobj);
+		return -ENOMEM;
+	}
+
+	sys_dir->num_of_attrs = num_of_attrs;
+
+	/* initialize attributes list */
+	for (i = 0; i < num_of_attrs; ++i)
+		sys_dir->sys_dir_attr_list[i] = &(attrs[i].attr);
+
+	/* last list entry should be NULL */
+	sys_dir->sys_dir_attr_list[num_of_attrs] = NULL;
+
+	sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list;
+
+	return sysfs_create_group(sys_dir->sys_dir_kobj,
+				  &(sys_dir->sys_dir_attr_group));
+}
+
+void sys_free_dir(struct sys_dir *sys_dir)
+{
+	if (!sys_dir)
+		return;
+
+	kfree(sys_dir->sys_dir_attr_list);
+
+	if (sys_dir->sys_dir_kobj)
+		kobject_put(sys_dir->sys_dir_kobj);
+}
+
+/* free sysfs directory structures */
+void sep_free_sysfs(void)
+{
+	int j;
+
+	for (j = 0; (j < SEP_MAX_NUM_OF_DESC_Q) &&
+	     (sys_queue_dirs[j].sys_dir_kobj != NULL); ++j) {
+		sys_free_dir(&(sys_queue_dirs[j]));
+	}
+
+	if (sys_top_dir.sys_dir_kobj != NULL)
+		sys_free_dir(&sys_top_dir);
+
+}
+
+/* initialize sysfs directories structures */
+int sep_setup_sysfs(struct kobject *sys_dev_kobj, struct sep_drvdata *drvdata)
+{
+	int retval = 0, i, j;
+	char queue_name[MAX_QUEUE_NAME_LEN];
+
+	pr_debug("setup sysfs under %s\n", sys_dev_kobj->name);
+	/* reset statistics */
+	memset(drv_lat_stats, 0, sizeof(drv_lat_stats));
+	memset(sep_lat_stats, 0, sizeof(sep_lat_stats));
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; i++) {
+		for (j = 0; j < DXDI_IOC_NR_MAX + 1; j++) {
+			spin_lock_init(&drv_lat_stats[i][j].stat_lock);
+			/* set min_time to largest ULL value so first sample
+			 * becomes the minimum. */
+			drv_lat_stats[i][j].min_time = (unsigned long long)-1;
+		}
+		for (j = 0; j < DESC_TYPE_NUM; j++) {
+			spin_lock_init(&sep_lat_stats[i][j].stat_lock);
+			/* set min_time to largest ULL value so first sample
+			 * becomes the minimum. */
+			sep_lat_stats[i][j].min_time = (unsigned long long)-1;
+		}
+	}
+
+	/* zero all directories structures */
+	memset(&sys_top_dir, 0, sizeof(struct sys_dir));
+	memset(&sys_queue_dirs, 0,
+	       sizeof(struct sys_dir) * SEP_MAX_NUM_OF_DESC_Q);
+
+	/* initialize the top directory */
+	retval =
+	    sys_init_dir(&sys_top_dir, drvdata, sys_dev_kobj,
+			 "sep_info", sys_top_level_attrs,
+			 sizeof(sys_top_level_attrs) /
+			 sizeof(struct kobj_attribute));
+
+	if (retval)
+		return -retval;
+
+	/* initialize decriptor queues directories structures */
+	for (i = 0; i < SEP_MAX_NUM_OF_DESC_Q; ++i) {
+
+		sprintf(queue_name, "queue%d", i);
+
+		retval = sys_init_dir(&(sys_queue_dirs[i]), drvdata,
+				      sys_top_dir.sys_dir_kobj, queue_name,
+				      sys_queue_level_attrs,
+				      sizeof(sys_queue_level_attrs) /
+				      sizeof(struct kobj_attribute));
+
+		if (retval)
+			break;
+
+	}
+
+	if (retval)
+		sep_free_sysfs();
+
+	return -retval;
+}
+
+#ifdef SEP_SYSFS_UNIT_TEST
+
+static int __init sep_init(void)
+{
+	int retval;
+
+	pr_info("i am loading...\n");
+
+	retval = sep_setup_sysfs(kernel_kobj);
+
+	return retval;
+}
+
+static void __exit sep_exit(void)
+{
+	sep_free_sysfs();
+	pr_info("i am unloading...\n");
+}
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+#endif
diff --git a/drivers/staging/sep54/sep_sysfs.h b/drivers/staging/sep54/sep_sysfs.h
new file mode 100644
index 0000000..f8e1959
--- /dev/null
+++ b/drivers/staging/sep54/sep_sysfs.h
@@ -0,0 +1,49 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#ifndef _SEP_SYSFS_H_
+#define _SEP_SYSFS_H_
+
+int sep_setup_sysfs(struct kobject *sys_dev_kobj, struct sep_drvdata *drvdata);
+void sep_free_sysfs(void);
+
+void sysfs_update_drv_stats(unsigned int qid, unsigned int ioctl_cmd_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns);
+
+void sysfs_update_sep_stats(unsigned int qid, enum sep_sw_desc_type desc_type,
+			    unsigned long long start_ns,
+			    unsigned long long end_ns);
+
+#ifdef SEP_HWK_UNIT_TEST
+ssize_t sys_hwk_st_show(struct kobject *kobj, struct kobj_attribute *attr,
+			char *buf);
+ssize_t sys_hwk_st_start(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count);
+#endif
+
+
+#endif /*_SEP_SYSFS_H_*/
diff --git a/drivers/staging/sep54/sepapp.c b/drivers/staging/sep54/sepapp.c
new file mode 100644
index 0000000..f30a646
--- /dev/null
+++ b/drivers/staging/sep54/sepapp.c
@@ -0,0 +1,1160 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+
+#define SEP_LOG_CUR_COMPONENT SEP_LOG_MASK_SEP_APP
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+/*#include <linux/export.h>*/
+#include "dx_driver.h"
+#include "dx_sepapp_kapi.h"
+#include "sep_applets.h"
+#include "sep_power.h"
+#include "crypto_api.h"
+
+/* Global drvdata to be used by kernel clients via dx_sepapp_ API */
+static struct sep_drvdata *kapps_drvdata;
+
+/**
+ * sepapp_params_cleanup() - Clean resources allocated for SeP Applet paramters
+ * @client_ctx:	 The associated client context for this operation
+ * @dxdi_params:	The client parameters as passed from the DriverInterface
+ * @sw_desc_params:	The client parameters DMA information for SeP
+ *			(required for value copy back)
+ * @local_memref_idx:	Reference to array of SEPAPP_MAX_PARAMS
+ *			memory reference indexes.
+ *			Set for parameters of MEMREF type.
+ * @mlli_table:	Reference to array of SEPAPP_MAX_PARAMS MLLI tables
+ *		objects to be used for MEMREF parameters.
+ *
+ * Clean resources allocated for SeP Applet paramters
+ * (primarily, MLLI tables and temporary registered user memory)
+ * Returns void
+ */
+static void sepapp_params_cleanup(struct sep_client_ctx *client_ctx,
+				  struct dxdi_sepapp_params *dxdi_params,
+				  struct dxdi_sepapp_kparams *dxdi_kparams,
+				  struct sepapp_client_params *sw_desc_params,
+				  struct client_dma_buffer *local_dma_objs[],
+				  struct mlli_tables_list mlli_tables[])
+{
+	int i;
+	int memref_idx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	enum dxdi_sepapp_param_type *params_types;
+	struct dxdi_val_param *cur_val;
+
+	if (dxdi_params != NULL)
+		params_types = dxdi_params->params_types;
+	else if (dxdi_kparams != NULL)	/* kernel parameters */
+		params_types = dxdi_kparams->params_types;
+	else			/* No parameters - nothing to clean */
+		return;
+
+	for (i = 0; (i < SEPAPP_MAX_PARAMS); i++) {
+		if (params_types[i] == DXDI_SEPAPP_PARAM_MEMREF) {
+			/* Can call for all (unitialized MLLI ignored) */
+			llimgr_destroy_mlli(drvdata->sep_data->llimgr,
+					    mlli_tables + i);
+			memref_idx =
+			    DMA_OBJ_TO_MEMREF_IDX(client_ctx,
+						  local_dma_objs[i]);
+			release_dma_obj(client_ctx, local_dma_objs[i]);
+			if ((local_dma_objs[i] != NULL) &&
+			    (((dxdi_params != NULL) && (memref_idx !=
+							dxdi_params->params[i].
+							memref.ref_id)) ||
+			     (dxdi_kparams != NULL))) {
+				/* There is DMA object to free
+				 * (either user params of temp. reg. or
+				 * kernel params - always temp.) */
+				(void)free_client_memref(client_ctx,
+							 memref_idx);
+			}
+			local_dma_objs[i] = NULL;
+		} else if (params_types[i] == DXDI_SEPAPP_PARAM_VAL) {
+			if (dxdi_params != NULL)
+				cur_val = &dxdi_params->params[i].val;
+			else	/* kernel parameters */
+				cur_val = &dxdi_kparams->params[i].val;
+			if (cur_val->copy_dir & DXDI_DATA_FROM_DEVICE) {
+				/* Copy back output values */
+				cur_val->data[0] =
+				    sw_desc_params->params[i].val.data[0];
+				cur_val->data[1] =
+				    sw_desc_params->params[i].val.data[1];
+			}
+		}
+	}
+}
+
+static int kernel_memref_to_sw_desc_memref(struct dxdi_kmemref *cur_memref,
+					   struct sep_client_ctx *client_ctx,
+					   u8 *sep_memref_type_p,
+					   struct seprpc_memref *sep_memref_p,
+					   struct client_dma_buffer
+					   **local_dma_obj_pp,
+					   struct mlli_tables_list
+					   *mlli_table_p)
+{
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	enum dma_data_direction dma_dir;
+	int memref_idx;
+	int rc = 0;
+
+	/* convert DMA direction to enum dma_data_direction */
+	dma_dir = dxdi_data_dir_to_dma_data_dir(cur_memref->dma_direction);
+	if (unlikely(dma_dir == DMA_NONE)) {
+		pr_err("Invalid DMA direction (%d) for param.\n",
+			    cur_memref->dma_direction);
+		return -EINVAL;
+	}
+
+	/* For kernel parameters always temp. registration */
+	memref_idx = register_client_memref(client_ctx,
+					    NULL, cur_memref->sgl,
+					    cur_memref->nbytes, dma_dir);
+	if (unlikely(!IS_VALID_MEMREF_IDX(memref_idx))) {
+		pr_err("Failed temp. memory registration (rc=%d)\n",
+			    memref_idx);
+		return -ENOMEM;
+	}
+	*local_dma_obj_pp = acquire_dma_obj(client_ctx, memref_idx);
+	if (*local_dma_obj_pp == NULL)
+		rc = -EINVAL;
+	else
+		/* MLLI table creation */
+		rc = llimgr_create_mlli(llimgr,
+				mlli_table_p, dma_dir, *local_dma_obj_pp, 0, 0);
+
+	if (likely(rc == 0)) {
+		llimgr_mlli_to_seprpc_memref(mlli_table_p, sep_memref_p);
+		*sep_memref_type_p = SEPAPP_PARAM_TYPE_MEMREF;
+	}
+
+	return rc;		/* Cleanup on error in caller */
+}
+
+static int user_memref_to_sw_desc_memref(struct dxdi_memref *cur_memref,
+					 struct sep_client_ctx *client_ctx,
+					 u8 *sep_memref_type_p,
+					 struct seprpc_memref *sep_memref_p,
+					 struct client_dma_buffer
+					 **local_dma_obj_pp,
+					 struct mlli_tables_list *mlli_table_p)
+{
+	void *llimgr = client_ctx->drv_data->sep_data->llimgr;
+	enum dma_data_direction dma_dir;
+	int memref_idx;
+	int rc = 0;
+
+	/* convert DMA direction to enum dma_data_direction */
+	dma_dir = dxdi_data_dir_to_dma_data_dir(cur_memref->dma_direction);
+	if (unlikely(dma_dir == DMA_NONE)) {
+		pr_err("Invalid DMA direction (%d) for param.\n",
+			    cur_memref->dma_direction);
+		return -EINVAL;
+	}
+
+	if (IS_VALID_MEMREF_IDX(cur_memref->ref_id)) {
+		/* Registered mem. */
+		*local_dma_obj_pp =
+		    acquire_dma_obj(client_ctx, cur_memref->ref_id);
+		if (unlikely(*local_dma_obj_pp == NULL)) {
+			pr_err("Failed to acquire DMA obj. at ref_id=%d\n",
+				    cur_memref->ref_id);
+			return -EINVAL;
+		}
+		if ((cur_memref->start_or_offset == 0) &&
+		    (cur_memref->size == (*local_dma_obj_pp)->buf_size)) {
+			/* Whole registered mem. */
+			memref_idx = cur_memref->ref_id;
+		} else {	/* Partial reference */
+			/* Handle as unregistered memory at
+			 * different address/len. */
+			INVALIDATE_MEMREF_IDX(cur_memref->ref_id);
+			cur_memref->start_or_offset += (unsigned long)
+			    (*local_dma_obj_pp)->user_buf_ptr;
+			/* Release base memref - not used */
+			release_dma_obj(client_ctx, *local_dma_obj_pp);
+			*local_dma_obj_pp = NULL;
+		}
+	}
+	/* The following is not "else" of previous, because
+	 * previous block may invalidate ref_id for partial
+	 * reference to cause this temp. registartion. */
+	if (!IS_VALID_MEMREF_IDX(cur_memref->ref_id)) {
+		/* Temp. registration */
+		memref_idx =
+		    register_client_memref(client_ctx,
+					   (u8 __user *)cur_memref->
+					   start_or_offset, NULL,
+					   cur_memref->size, dma_dir);
+		if (unlikely(!IS_VALID_MEMREF_IDX(memref_idx))) {
+			pr_err("Failed temp. memory " "registration\n");
+			return -ENOMEM;
+		}
+		*local_dma_obj_pp = acquire_dma_obj(client_ctx, memref_idx);
+	}
+
+	if (*local_dma_obj_pp == NULL)
+		rc = -EINVAL;
+	else
+		/* MLLI table creation */
+		rc = llimgr_create_mlli(llimgr,
+				mlli_table_p, dma_dir, *local_dma_obj_pp, 0, 0);
+
+	if (likely(rc == 0)) {
+		llimgr_mlli_to_seprpc_memref(mlli_table_p, sep_memref_p);
+		*sep_memref_type_p = SEPAPP_PARAM_TYPE_MEMREF;
+	}
+
+	return rc;		/* Cleanup on error in caller */
+}
+
+/**
+ * dxdi_sepapp_params_to_sw_desc_params() - Convert the client input parameters
+ * @client_ctx:	 The associated client context for this operation
+ * @dxdi_params:	The client parameters as passed from the DriverInterface
+ * @sw_desc_params:	The returned client parameters DMA information for SeP
+ * @local_memref_idx:	Reference to array of SEPAPP_MAX_PARAMS
+ *			memory reference indexes.
+ *			Set for parameters of MEMREF type.
+ * @mlli_table:	Reference to array of SEPAPP_MAX_PARAMS MLLI tables
+ *		objects to be used for MEMREF parameters.
+ *
+ * Convert the client input parameters array from dxdi format to the
+ * SW descriptor format while creating the required MLLI tables
+ * Returns int
+ */
+static int dxdi_sepapp_params_to_sw_desc_params(struct sep_client_ctx
+						*client_ctx,
+						struct dxdi_sepapp_params
+						*dxdi_params,
+						struct dxdi_sepapp_kparams
+						*dxdi_kparams,
+						struct sepapp_client_params
+						*sw_desc_params,
+						struct client_dma_buffer
+						*local_dma_objs[],
+						struct mlli_tables_list
+						mlli_tables[])
+{
+	enum dxdi_sepapp_param_type *params_types;
+	struct dxdi_val_param *cur_val;
+	int i;
+	int rc = 0;
+
+	/* Init./clean arrays for proper cleanup in case of failure */
+	for (i = 0; i < SEPAPP_MAX_PARAMS; i++) {
+		MLLI_TABLES_LIST_INIT(mlli_tables + i);
+		local_dma_objs[i] = NULL;
+		sw_desc_params->params_types[i] = SEPAPP_PARAM_TYPE_NULL;
+	}
+
+	if (dxdi_params != NULL)
+		params_types = dxdi_params->params_types;
+	else if (dxdi_kparams != NULL)	/* kernel parameters */
+		params_types = dxdi_kparams->params_types;
+	else	/* No parameters - nothing to beyond initialization (above) */
+		return 0;
+
+	/* Convert each parameter based on its type */
+	for (i = 0; (i < SEPAPP_MAX_PARAMS) && (rc == 0); i++) {
+		switch (params_types[i]) {
+
+		case DXDI_SEPAPP_PARAM_MEMREF:
+			if (dxdi_params != NULL)
+				rc = user_memref_to_sw_desc_memref
+				    (&dxdi_params->params[i].memref, client_ctx,
+				     &sw_desc_params->params_types[i],
+				     &(sw_desc_params->params[i].memref),
+				     local_dma_objs + i, mlli_tables + i);
+			else
+				rc = kernel_memref_to_sw_desc_memref
+				    (&dxdi_kparams->params[i].kmemref,
+				     client_ctx,
+				     &sw_desc_params->params_types[i],
+				     &(sw_desc_params->params[i].memref),
+				     local_dma_objs + i, mlli_tables + i);
+			break;	/* from switch */
+
+		case DXDI_SEPAPP_PARAM_VAL:
+			if (dxdi_params != NULL)
+				cur_val = &dxdi_params->params[i].val;
+			else	/* kernel parameters */
+				cur_val = &dxdi_kparams->params[i].val;
+
+			sw_desc_params->params[i].val.dir = SEPAPP_DIR_NULL;
+			if (cur_val->copy_dir & DXDI_DATA_TO_DEVICE) {
+				sw_desc_params->params[i].val.dir |=
+				    SEPAPP_DIR_IN;
+				sw_desc_params->params[i].val.data[0] =
+				    cur_val->data[0];
+				sw_desc_params->params[i].val.data[1] =
+				    cur_val->data[1];
+			}
+			if (cur_val->copy_dir & DXDI_DATA_FROM_DEVICE) {
+				sw_desc_params->params[i].val.dir |=
+				    SEPAPP_DIR_OUT;
+			}
+			sw_desc_params->params_types[i] = SEPAPP_PARAM_TYPE_VAL;
+			break;	/* from switch */
+
+		case DXDI_SEPAPP_PARAM_NULL:
+			sw_desc_params->params_types[i] =
+			    SEPAPP_PARAM_TYPE_NULL;
+			break;
+
+		default:
+			pr_err(
+				"Invalid parameter type (%d) for #%d\n",
+				params_types[i], i);
+			rc = -EINVAL;
+		}		/*switch */
+	}			/*for parameters */
+
+	/* Cleanup in case of error */
+	if (rc != 0)
+		sepapp_params_cleanup(client_ctx, dxdi_params, dxdi_kparams,
+				      sw_desc_params, local_dma_objs,
+				      mlli_tables);
+	return rc;
+}
+
+/**
+ * sepapp_session_open() - Open a session with given SeP Applet
+ * @op_ctx:
+ * @sepapp_uuid:	 Applet UUID
+ * @auth_method:	 Client authentication method
+ * @auth_data:	 Authentication data
+ * @app_auth_data:	 Applet specific authentication data
+ * @session_id:	 Returned allocated session ID
+ * @sep_ret_origin:	 Origin in SeP of error code
+ *
+ * Returns int
+ */
+static int sepapp_session_open(struct sep_op_ctx *op_ctx,
+			       u8 *sepapp_uuid,
+			       u32 auth_method,
+			       void *auth_data,
+			       struct dxdi_sepapp_params *app_auth_data,
+			       struct dxdi_sepapp_kparams *kapp_auth_data,
+			       int *session_id,
+			       enum dxdi_sep_module *sep_ret_origin)
+{
+	int rc;
+	struct sep_app_session *new_session;
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct client_dma_buffer *local_dma_objs[SEPAPP_MAX_PARAMS];
+	struct mlli_tables_list mlli_tables[SEPAPP_MAX_PARAMS];
+	struct sep_sw_desc desc;
+	struct sepapp_in_params_session_open *sepapp_msg_p;
+
+	/* Verify that given spad_buf size can accomodate the in_params */
+	BUILD_BUG_ON(sizeof(struct sepapp_in_params_session_open) >
+		     USER_SPAD_SIZE);
+
+	op_ctx->op_type = SEP_OP_APP;
+	*sep_ret_origin = DXDI_SEP_MODULE_HOST_DRIVER;
+
+	op_ctx->spad_buf_p = dma_pool_alloc(drvdata->sep_data->spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err(
+			    "Failed allocating from spad_buf_pool for SeP Applet Request message\n");
+		INVALIDATE_SESSION_IDX(*session_id);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		return -ENOMEM;
+	}
+	sepapp_msg_p =
+	    (struct sepapp_in_params_session_open *)op_ctx->spad_buf_p;
+
+	/* Find free session entry */
+	for ((*session_id) = 0,
+	     new_session = &op_ctx->client_ctx->sepapp_sessions[0];
+	     (*session_id < MAX_SEPAPP_SESSION_PER_CLIENT_CTX);
+	     new_session++, (*session_id)++) {
+		mutex_lock(&new_session->session_lock);
+		if (new_session->ref_cnt == 0)
+			break;
+		mutex_unlock(&new_session->session_lock);
+	}
+	if (*session_id == MAX_SEPAPP_SESSION_PER_CLIENT_CTX) {
+		pr_err(
+			    "Could not allocate session entry. all %u are in use.\n",
+			    MAX_SEPAPP_SESSION_PER_CLIENT_CTX);
+		INVALIDATE_SESSION_IDX(*session_id);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		return -ENOMEM;
+	}
+
+	new_session->ref_cnt = 1;	/* To be decremented by close_session */
+	/* Invalidate session ID so it cannot ne used until opening the session
+	 * is actually opened */
+	new_session->sep_session_id = SEP_SESSION_ID_INVALID;
+	mutex_unlock(&new_session->session_lock);
+
+	/* Convert parameters to SeP Applet format */
+	rc = dxdi_sepapp_params_to_sw_desc_params(op_ctx->client_ctx,
+						  app_auth_data, kapp_auth_data,
+						  &sepapp_msg_p->client_params,
+						  local_dma_objs, mlli_tables);
+
+	if (likely(rc == 0)) {
+		memcpy(&sepapp_msg_p->app_uuid, sepapp_uuid, SEPAPP_UUID_SIZE);
+		sepapp_msg_p->auth_method = cpu_to_le32(auth_method);
+		/* TODO: Fill msg.auth_data as required for supported methods,
+		 * e.g. client application ID */
+
+		/* Pack SW descriptor */
+		/* Set invalid session ID so in case of error the ID set
+		 * in the session context remains invalid. */
+		desc_q_pack_app_req_desc(&desc, op_ctx,
+					 SEPAPP_REQ_TYPE_SESSION_OPEN,
+					 SEP_SESSION_ID_INVALID,
+					 op_ctx->spad_buf_dma_addr);
+		/* Associate operation with the session */
+		op_ctx->session_ctx = new_session;
+		op_ctx->internal_error = false;
+		rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+		if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+			rc = 0;
+	}
+
+	if (likely(rc == 0)) {
+		rc = wait_for_sep_op_result(op_ctx);
+		/* Process descriptor completion */
+		if (likely(rc == 0)) {
+			if ((op_ctx->error_info != 0) &&
+			    (op_ctx->internal_error)) {
+				*sep_ret_origin = DXDI_SEP_MODULE_APP_MGR;
+			} else {	/* Success or error from applet */
+				*sep_ret_origin = DXDI_SEP_MODULE_APP;
+			}
+		} else {	/* Descriptor processing failed */
+			*sep_ret_origin = DXDI_SEP_MODULE_SW_QUEUE;
+			op_ctx->error_info = DXDI_ERROR_INTERNAL;
+		}
+	}
+
+	if (unlikely((rc != 0) || (op_ctx->error_info != 0))) {
+		mutex_lock(&new_session->session_lock);
+		new_session->ref_cnt = 0;
+		mutex_unlock(&new_session->session_lock);
+		INVALIDATE_SESSION_IDX(*session_id);
+	}
+	op_ctx->op_state = USER_OP_NOP;
+	sepapp_params_cleanup(op_ctx->client_ctx, app_auth_data, kapp_auth_data,
+			      &sepapp_msg_p->client_params, local_dma_objs,
+			      mlli_tables);
+
+	return rc;
+}
+
+/**
+ * sepapp_session_close() - Close given SeP Applet context
+ * @op_ctx:
+ * @session_id:
+ *
+ * Returns int
+ */
+int sepapp_session_close(struct sep_op_ctx *op_ctx, int session_id)
+{
+	struct sep_client_ctx *client_ctx = op_ctx->client_ctx;
+	struct sep_app_session *session_ctx =
+	    &client_ctx->sepapp_sessions[session_id];
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	int rc;
+	u16 sep_session_id;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+	if (!IS_VALID_SESSION_IDX(session_id)) {
+		pr_err("Invalid session_id=%d\n", session_id);
+		rc = -EINVAL;
+		goto end;
+	}
+	op_ctx->op_type = SEP_OP_APP;
+
+	mutex_lock(&session_ctx->session_lock);
+
+	if (!IS_VALID_SESSION_CTX(session_ctx)) {
+		mutex_unlock(&session_ctx->session_lock);
+		pr_err("Invalid session ID %d for user %p\n",
+			    session_id, client_ctx);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (session_ctx->ref_cnt > 1) {
+		mutex_unlock(&session_ctx->session_lock);
+		pr_err("Invoked while still has pending commands!\n");
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+		rc = -EBUSY;
+		goto end;
+	}
+
+	sep_session_id = session_ctx->sep_session_id;/* save before release */
+	/* Release host resources anyway... */
+	INVALIDATE_SESSION_CTX(session_ctx);
+	mutex_unlock(&session_ctx->session_lock);
+
+	/* Now release session resources on SeP */
+	/* Pack SW descriptor */
+	desc_q_pack_app_req_desc(&desc, op_ctx,
+				 SEPAPP_REQ_TYPE_SESSION_CLOSE, sep_session_id,
+				 0);
+	/* Associate operation with the session */
+	op_ctx->session_ctx = session_ctx;
+	op_ctx->internal_error = false;
+	rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+	if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+		rc = wait_for_sep_op_result(op_ctx);
+
+	if (unlikely(rc != 0)) {	/* Not supposed to happen */
+		pr_err(
+			    "Failure is SESSION_CLOSE operation for sep_session_id=%u\n",
+			    session_ctx->sep_session_id);
+		op_ctx->error_info = DXDI_ERROR_FATAL;
+	}
+
+end:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+	return rc;
+}
+
+static int sepapp_command_invoke(struct sep_op_ctx *op_ctx,
+				 int session_id,
+				 u32 command_id,
+				 struct dxdi_sepapp_params *command_params,
+				 struct dxdi_sepapp_kparams *command_kparams,
+				 enum dxdi_sep_module *sep_ret_origin,
+				 int async)
+{
+	int rc;
+	struct sep_app_session *session_ctx =
+	    &op_ctx->client_ctx->sepapp_sessions[session_id];
+	struct queue_drvdata *drvdata = op_ctx->client_ctx->drv_data;
+	struct sep_sw_desc desc;
+	struct sepapp_in_params_command_invoke *sepapp_msg_p;
+
+	op_ctx->op_type = SEP_OP_APP;
+	/* Verify that given spad_buf size can accomodate the in_params */
+	BUILD_BUG_ON(sizeof(struct sepapp_in_params_command_invoke) >
+		     USER_SPAD_SIZE);
+
+	if (!IS_VALID_SESSION_IDX(session_id)) {
+		pr_err("Invalid session_id=%d\n", session_id);
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		return -EINVAL;
+	}
+	mutex_lock(&session_ctx->session_lock);
+	if (!IS_VALID_SESSION_CTX(session_ctx)) {
+		mutex_unlock(&session_ctx->session_lock);
+		pr_err("Invalid session ID %d for user %p\n",
+			    session_id, op_ctx->client_ctx);
+		op_ctx->error_info = DXDI_ERROR_BAD_CTX;
+		return -EINVAL;
+	}
+	session_ctx->ref_cnt++;	/* Prevent deletion while in use */
+	/* Unlock to allow concurrent session use from different threads */
+	mutex_unlock(&session_ctx->session_lock);
+
+	op_ctx->spad_buf_p = dma_pool_alloc(drvdata->sep_data->spad_buf_pool,
+					    GFP_KERNEL,
+					    &op_ctx->spad_buf_dma_addr);
+	if (unlikely(op_ctx->spad_buf_p == NULL)) {
+		pr_err(
+			    "Failed allocating from spad_buf_pool for SeP Applet Request message\n");
+		op_ctx->error_info = DXDI_ERROR_NO_RESOURCE;
+		rc = -ENOMEM;
+		goto sepapp_command_exit;
+	}
+	sepapp_msg_p =
+	    (struct sepapp_in_params_command_invoke *)op_ctx->spad_buf_p;
+
+	op_ctx->async_info.dxdi_params = command_params;
+	op_ctx->async_info.dxdi_kparams = command_kparams;
+	op_ctx->async_info.sw_desc_params = &sepapp_msg_p->client_params;
+	op_ctx->async_info.session_id = session_id;
+
+	if (async) {
+		wait_event_interruptible(op_ctx->client_ctx->memref_wq,
+			op_ctx->client_ctx->memref_cnt < 1);
+		mutex_lock(&session_ctx->session_lock);
+		op_ctx->client_ctx->memref_cnt++;
+		mutex_unlock(&session_ctx->session_lock);
+	}
+
+	mutex_lock(&drvdata->desc_queue_sequencer);
+	/* Convert parameters to SeP Applet format */
+	rc = dxdi_sepapp_params_to_sw_desc_params(op_ctx->client_ctx,
+					  command_params,
+					  command_kparams,
+					  &sepapp_msg_p->client_params,
+					  op_ctx->async_info.local_dma_objs,
+					  op_ctx->async_info.mlli_tables);
+
+	if (likely(rc == 0)) {
+		sepapp_msg_p->command_id = cpu_to_le32(command_id);
+		desc_q_pack_app_req_desc(&desc, op_ctx,
+					 SEPAPP_REQ_TYPE_COMMAND_INVOKE,
+					 session_ctx->sep_session_id,
+					 op_ctx->spad_buf_dma_addr);
+		/* Associate operation with the session */
+		op_ctx->session_ctx = session_ctx;
+		op_ctx->internal_error = false;
+		op_ctx->op_state = USER_OP_INPROC;
+		rc = desc_q_enqueue(drvdata->desc_queue, &desc, true);
+		if (likely(!IS_DESCQ_ENQUEUE_ERR(rc)))
+			rc = 0;
+
+		if (async && rc != 0) {
+			mutex_lock(&session_ctx->session_lock);
+			op_ctx->client_ctx->memref_cnt--;
+			mutex_unlock(&session_ctx->session_lock);
+		}
+	}
+	mutex_unlock(&drvdata->desc_queue_sequencer);
+
+	if (likely(rc == 0) && !async)
+		rc = wait_for_sep_op_result(op_ctx);
+
+	/* Process descriptor completion */
+	if (likely(rc == 0)) {
+		if ((op_ctx->error_info != 0) && (op_ctx->internal_error)) {
+			*sep_ret_origin = DXDI_SEP_MODULE_APP_MGR;
+		} else {	/* Success or error from applet */
+			*sep_ret_origin = DXDI_SEP_MODULE_APP;
+		}
+	} else {		/* Descriptor processing failed */
+		*sep_ret_origin = DXDI_SEP_MODULE_SW_QUEUE;
+		op_ctx->error_info = DXDI_ERROR_INTERNAL;
+	}
+	if (!async) {
+		op_ctx->op_state = USER_OP_NOP;
+		sepapp_params_cleanup(op_ctx->client_ctx,
+				command_params, command_kparams,
+				&sepapp_msg_p->client_params,
+				op_ctx->async_info.local_dma_objs,
+				op_ctx->async_info.mlli_tables);
+	}
+
+sepapp_command_exit:
+	if (!async) {
+		/* Release session */
+		mutex_lock(&session_ctx->session_lock);
+		session_ctx->ref_cnt--;
+		mutex_unlock(&session_ctx->session_lock);
+	}
+
+	return rc;
+
+}
+
+int sep_ioctl_sepapp_session_open(struct sep_client_ctx *client_ctx,
+				  unsigned long arg)
+{
+	struct dxdi_sepapp_session_open_params __user *user_params =
+	    (struct dxdi_sepapp_session_open_params __user *)arg;
+	struct dxdi_sepapp_session_open_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sepapp_session_open_params, session_id);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_session_open(&op_ctx,
+				 params.app_uuid, params.auth_method,
+				 &params.auth_data, &params.app_auth_data, NULL,
+				 &params.session_id, &params.sep_ret_origin);
+
+	/* Copying back from app_auth_data in case of output of "by value"... */
+	if (copy_to_user(&user_params->app_auth_data, &params.app_auth_data,
+			   sizeof(struct dxdi_sepapp_params))
+	    || put_user(params.session_id, &user_params->session_id)
+	    || put_user(params.sep_ret_origin,
+			  &user_params->sep_ret_origin)) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+/**
+ * dx_sepapp_session_open() - Open a session with a SeP applet
+ *
+ * @ctx:		SeP client context
+ * @sepapp_uuid:	Target applet UUID
+ * @auth_method:	Session connection authentication method
+ *			(Currently only 0/Public is supported)
+ * @auth_data:		Pointer to authentication data - Should be NULL
+ * @open_params:	Parameters for session opening
+ * @session_id:		Returned session ID (on success)
+ * @ret_origin:		Return code origin
+ *
+ * If ret_origin is not DXDI_SEP_MODULE_APP (i.e., above the applet), it must
+ * be 0 on success. For DXDI_SEP_MODULE_APP it is an applet-specific return code
+ */
+int dx_sepapp_session_open(void *ctx,
+			   u8 *sepapp_uuid,
+			   u32 auth_method,
+			   void *auth_data,
+			   struct dxdi_sepapp_kparams *open_params,
+			   int *session_id, enum dxdi_sep_module *ret_origin)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_session_open(&op_ctx,
+				 sepapp_uuid, auth_method, auth_data,
+				 NULL, open_params, session_id, ret_origin);
+	/* If request operation suceeded return the return code from SeP */
+	if (likely(rc == 0))
+		rc = op_ctx.error_info;
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+EXPORT_SYMBOL(dx_sepapp_session_open);
+
+int sep_ioctl_sepapp_session_close(struct sep_client_ctx *client_ctx,
+				   unsigned long arg)
+{
+	struct dxdi_sepapp_session_close_params __user *user_params =
+	    (struct dxdi_sepapp_session_close_params __user *)arg;
+	int session_id;
+	struct sep_op_ctx op_ctx;
+	int rc;
+	pr_debug("sep_ioctl_sepapp_session_close user_params->session_id:%ld", arg);
+	/* access permissions to arg was already checked in sep_ioctl */
+	rc = __get_user(session_id, &user_params->session_id);
+	if (rc) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	pr_debug("sep_ioctl_sepapp_session_close session_id:%d", session_id);
+	rc = sepapp_session_close(&op_ctx, session_id);
+	pr_debug("sep_ioctl_sepapp_session_close return:%d", rc);
+	op_ctx_fini(&op_ctx);
+
+	return rc;
+}
+
+/**
+ * dx_sepapp_session_close() - Close a session with an applet
+ *
+ * @ctx:	SeP client context
+ * @session_id: Session ID as returned from dx_sepapp_open_session()
+ *
+ * Return code would be 0 on success
+ */
+int dx_sepapp_session_close(void *ctx, int session_id)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	pr_debug("dx_sepapp_session_close session_id:%d", session_id);
+	rc = sepapp_session_close(&op_ctx, session_id);
+	pr_debug("dx_sepapp_session_close rc:%d", rc);
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+EXPORT_SYMBOL(dx_sepapp_session_close);
+
+int sep_ioctl_sepapp_command_invoke(struct sep_client_ctx *client_ctx,
+				    unsigned long arg)
+{
+	struct dxdi_sepapp_command_invoke_params __user *user_params =
+	    (struct dxdi_sepapp_command_invoke_params __user *)arg;
+	struct dxdi_sepapp_command_invoke_params params;
+	struct sep_op_ctx op_ctx;
+	/* Calculate size of input parameters part */
+	const unsigned long input_size =
+	    offsetof(struct dxdi_sepapp_command_invoke_params,
+		     sep_ret_origin);
+	int rc;
+
+	/* access permissions to arg was already checked in sep_ioctl */
+	if (__copy_from_user(&params, user_params, input_size)) {
+		pr_err("Failed reading input parameters");
+		return -EFAULT;
+	}
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_command_invoke(&op_ctx,
+				   params.session_id, params.command_id,
+				   &params.command_params, NULL,
+				   &params.sep_ret_origin, 0);
+
+	/* Copying back from command_params in case of output of "by value" */
+	if (copy_to_user(&user_params->command_params,
+			   &params.command_params,
+			   sizeof(struct dxdi_sepapp_params))
+	    || put_user(params.sep_ret_origin,
+			  &user_params->sep_ret_origin)) {
+		pr_err("Failed writing input parameters");
+		return -EFAULT;
+	}
+
+	put_user(op_ctx.error_info, &(user_params->error_info));
+
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+
+/**
+ * dx_sepapp_command_invoke() - Initiate command in the applet associated with
+ *				given session ID
+ *
+ * @ctx:	SeP client context
+ * @session_id:	The target session ID
+ * @command_id:	The ID of the command to initiate (applet-specific)
+ * @command_params:	The command parameters
+ * @ret_origin:	The origin of the return code
+ */
+int dx_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx op_ctx;
+	int rc;
+
+	op_ctx_init(&op_ctx, client_ctx);
+	rc = sepapp_command_invoke(&op_ctx, session_id, command_id,
+				   NULL, command_params, ret_origin, 0);
+	/* If request operation suceeded return the return code from SeP */
+	if (likely(rc == 0))
+		rc = op_ctx.error_info;
+	op_ctx_fini(&op_ctx);
+	return rc;
+}
+EXPORT_SYMBOL(dx_sepapp_command_invoke);
+
+static void async_app_handle_op_completion(struct work_struct *work)
+{
+	struct async_req_ctx *areq_ctx =
+	    container_of(work, struct async_req_ctx, comp_work);
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	struct crypto_async_request *initiating_req = areq_ctx->initiating_req;
+	int err = 0;
+	struct sep_app_session *session_ctx =
+	  &op_ctx->client_ctx->sepapp_sessions[op_ctx->async_info.session_id];
+
+	SEP_LOG_DEBUG("req=%p op_ctx=%p\n", initiating_req, op_ctx);
+	if (op_ctx == NULL) {
+		SEP_LOG_ERR("Invalid work context (%p)\n", work);
+		return;
+	}
+
+	if (op_ctx->op_state == USER_OP_COMPLETED) {
+
+		if (unlikely(op_ctx->error_info != 0)) {
+			SEP_LOG_ERR("SeP crypto-op failed (sep_rc=0x%08X)\n",
+				    op_ctx->error_info);
+		}
+		/* Save ret_code info before cleaning op_ctx */
+		err = -(op_ctx->error_info);
+		if (unlikely(err == -EINPROGRESS)) {
+			/* SeP error code collides with EINPROGRESS */
+			SEP_LOG_ERR("Invalid SeP error code 0x%08X\n",
+				    op_ctx->error_info);
+			err = -EINVAL;	/* fallback */
+		}
+		sepapp_params_cleanup(op_ctx->client_ctx,
+					op_ctx->async_info.dxdi_params,
+					op_ctx->async_info.dxdi_kparams,
+					op_ctx->async_info.sw_desc_params,
+					op_ctx->async_info.local_dma_objs,
+					op_ctx->async_info.mlli_tables);
+
+		mutex_lock(&session_ctx->session_lock);
+		session_ctx->ref_cnt--;
+		mutex_unlock(&session_ctx->session_lock);
+
+		op_ctx->client_ctx->memref_cnt--;
+		wake_up_interruptible(&op_ctx->client_ctx->memref_wq);
+
+		if (op_ctx->async_info.dxdi_kparams != NULL)
+			kfree(op_ctx->async_info.dxdi_kparams);
+		op_ctx_fini(op_ctx);
+	} else if (op_ctx->op_state == USER_OP_INPROC) {
+		/* Report with the callback the dispatch from backlog to
+		   the actual processing in the SW descriptors queue
+		   (Returned -EBUSY when the request was dispatched) */
+		err = -EINPROGRESS;
+	} else {
+		SEP_LOG_ERR("Invalid state (%d) for op_ctx %p\n",
+			    op_ctx->op_state, op_ctx);
+		BUG();
+	}
+
+	if (likely(initiating_req->complete != NULL))
+		initiating_req->complete(initiating_req, err);
+	else
+		SEP_LOG_ERR("Async. operation has no completion callback.\n");
+}
+
+int async_sepapp_command_invoke(void *ctx,
+			     int session_id,
+			     u32 command_id,
+			     struct dxdi_sepapp_kparams *command_params,
+			     enum dxdi_sep_module *ret_origin,
+			     struct async_req_ctx *areq_ctx)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct sep_op_ctx *op_ctx = &areq_ctx->op_ctx;
+	int rc;
+
+	INIT_WORK(&areq_ctx->comp_work, async_app_handle_op_completion);
+	op_ctx_init(op_ctx, client_ctx);
+	op_ctx->comp_work = &areq_ctx->comp_work;
+	rc = sepapp_command_invoke(op_ctx, session_id, command_id,
+				   NULL, command_params, ret_origin, 1);
+
+	if (rc == 0)
+		return -EINPROGRESS;
+	else
+		return rc;
+}
+
+/**
+ * dx_sepapp_context_alloc() - Allocate client context for SeP applets ops.
+ * Returns DX_SEPAPP_CLIENT_CTX_NULL on failure.
+ */
+void *dx_sepapp_context_alloc(void)
+{
+	struct sep_client_ctx *client_ctx;
+
+	client_ctx = kzalloc(sizeof(struct sep_client_ctx), GFP_KERNEL);
+	if (client_ctx == NULL)
+		return DX_SEPAPP_CLIENT_CTX_NULL;
+
+	/* Always use queue 0 */
+	init_client_ctx(&kapps_drvdata->queue[0], client_ctx);
+
+	return (void *)client_ctx;
+}
+EXPORT_SYMBOL(dx_sepapp_context_alloc);
+
+/**
+ * dx_sepapp_context_free() - Free client context.
+ *
+ * @ctx: Client context to free.
+ *
+ * Returns 0 on success, -EBUSY if resources (sessions) are still allocated
+ */
+void dx_sepapp_context_free(void *ctx)
+{
+	struct sep_client_ctx *client_ctx = (struct sep_client_ctx *)ctx;
+	struct queue_drvdata *drvdata = client_ctx->drv_data;
+
+	cleanup_client_ctx(drvdata, client_ctx);
+	kfree(client_ctx);
+}
+EXPORT_SYMBOL(dx_sepapp_context_free);
+
+void dx_sepapp_init(struct sep_drvdata *drvdata)
+{
+	kapps_drvdata = drvdata;	/* Save for dx_sepapp_ API */
+}
+
+/**
+ * execute_sep() - Execute command in SEP.
+ *
+ * @command: Command ID to execute in SEP.
+ * @addr: Address of buffer to be passed to SEP.
+ * @size: Size of the buffer.
+ * @data1: Arbitrary data to be passed to SEP.
+ * @data2: Arbitrary data to be passed to SEP.
+ *
+ * Returns 0 on success, -EBUSY if resources (sessions) are still allocated
+ */
+static int execute_sep(u32 command, u8 *addr, u32 size, u32 data1, u32 data2)
+{
+	int sess_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = DEFAULT_APP_UUID;
+	struct dxdi_sepapp_kparams cmd_params;
+	int rc = 0;
+
+	cmd_params.params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+	/* addr is already a physical address, so this works on
+	 * a system with <= 4GB RAM.
+	 * TODO revisit this if the physical address of IMR can be higher
+	 */
+	cmd_params.params[0].val.data[0] = (unsigned long)addr & (DMA_BIT_MASK(32));
+	cmd_params.params[0].val.data[1] = 0;
+	cmd_params.params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[1] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[1].val.data[0] = (u32)size;
+	cmd_params.params[1].val.data[1] = 0;
+	cmd_params.params[1].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[2] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[2].val.data[0] = data1;
+	cmd_params.params[2].val.data[1] = 0;
+	cmd_params.params[2].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[3] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[3].val.data[0] = data2;
+	cmd_params.params[3].val.data[1] = 0;
+	cmd_params.params[3].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		return -ENOMEM;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &sess_id,
+					&ret_origin);
+	if (unlikely(rc != 0))
+		goto failed;
+
+	rc = dx_sepapp_command_invoke(sctx, sess_id, command,
+					&cmd_params, &ret_origin);
+
+	dx_sepapp_session_close(sctx, sess_id);
+
+failed:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	dx_sepapp_context_free(sctx);
+	return rc;
+}
+
+int sepapp_image_verify(u8 *addr, ssize_t size, u32 key_index, u32 magic_num)
+{
+	pr_info("image verify: addr 0x%p size: %zd key_index: 0x%08X magic_num: 0x%08X\n",
+		addr, size, key_index, magic_num);
+	return execute_sep(CMD_IMAGE_VERIFY, addr, size, key_index, magic_num);
+}
+EXPORT_SYMBOL(sepapp_image_verify);
+
+/**
+ * sepapp_key_validity_check() - Check VRL header against active key policy.
+ *
+ * @addr: Address of buffer containing the VRL header.
+ * @size: Size of the buffer. (normal VRL 728 bytes)
+ * @flags: VRL_ALLOW_ALL_KEY_INDEXES
+ *          - Ignore key policy and allow all keys found in the device.
+ *
+ * Returns 0 on success
+ */
+int sepapp_key_validity_check(u8 *addr, ssize_t size, u32 flags)
+{
+	pr_info("validity: addr 0x%p size: %zd flags: 0x%08X\n",
+		addr, size, flags);
+	return execute_sep(CMD_KEYPOLICY_CHECK, addr, size, flags, 0);
+}
+EXPORT_SYMBOL(sepapp_key_validity_check);
+
+int sepapp_hdmi_status(u8 status, u8 bksv[5])
+{
+	int sess_id = 0;
+	enum dxdi_sep_module ret_origin;
+	struct sep_client_ctx *sctx = NULL;
+	u8 uuid[16] = HDCP_APP_UUID;
+	struct dxdi_sepapp_kparams cmd_params;
+	int rc = 0;
+
+	pr_info("Hdmi status: status 0x%02x\n", status);
+
+	memset(&cmd_params, 0, sizeof(struct dxdi_sepapp_kparams));
+
+	cmd_params.params_types[0] = DXDI_SEPAPP_PARAM_VAL;
+	cmd_params.params[0].val.data[0] = status;
+	cmd_params.params[0].val.data[1] = 0;
+	cmd_params.params[0].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	cmd_params.params_types[1] = DXDI_SEPAPP_PARAM_VAL;
+	memcpy(&cmd_params.params[1].val.data[0], bksv, sizeof(u32));
+	memcpy((uint8_t *)&cmd_params.params[1].val.data[1],
+		&bksv[4], sizeof(u8));
+	cmd_params.params[1].val.copy_dir = DXDI_DATA_TO_DEVICE;
+
+	sctx = dx_sepapp_context_alloc();
+	if (unlikely(!sctx))
+		return -ENOMEM;
+
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_get();
+#endif
+
+	rc = dx_sepapp_session_open(sctx, uuid, 0, NULL, NULL, &sess_id,
+				    &ret_origin);
+	if (unlikely(rc != 0))
+		goto failed;
+
+	rc = dx_sepapp_command_invoke(sctx, sess_id, HDCP_RX_HDMI_STATUS,
+				      &cmd_params, &ret_origin);
+
+	dx_sepapp_session_close(sctx, sess_id);
+
+failed:
+#ifdef SEP_RUNTIME_PM
+	dx_sep_pm_runtime_put();
+#endif
+
+	dx_sepapp_context_free(sctx);
+	return rc;
+}
+EXPORT_SYMBOL(sepapp_hdmi_status);
diff --git a/drivers/staging/sep54/sepapp.h b/drivers/staging/sep54/sepapp.h
new file mode 100644
index 0000000..ebc24d7
--- /dev/null
+++ b/drivers/staging/sep54/sepapp.h
@@ -0,0 +1,52 @@
+/*******************************************************************
+* (c) Copyright 2011-2012 Discretix Technologies Ltd.              *
+* This software is protected by copyright, international           *
+* treaties and patents, and distributed under multiple licenses.   *
+* Any use of this Software as part of the Discretix CryptoCell or  *
+* Packet Engine products requires a commercial license.            *
+* Copies of this Software that are distributed with the Discretix  *
+* CryptoCell or Packet Engine product drivers, may be used in      *
+* accordance with a commercial license, or at the user's option,   *
+* used and redistributed under the terms and conditions of the GNU *
+* General Public License ("GPL") version 2, as published by the    *
+* Free Software Foundation.                                        *
+* This program is distributed in the hope that it will be useful,  *
+* but WITHOUT ANY LIABILITY AND WARRANTY; without even the implied *
+* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU General Public License version 2 for more details.   *
+* You should have received a copy of the GNU General Public        *
+* License version 2 along with this Software; if not, please write *
+* to the Free Software Foundation, Inc., 59 Temple Place - Suite   *
+* 330, Boston, MA 02111-1307, USA.                                 *
+* Any copy or reproduction of this Software, as permitted under    *
+* the GNU General Public License version 2, must include this      *
+* Copyright Notice as well as any other notices provided under     *
+* the said license.                                                *
+********************************************************************/
+/** SeP Applets support module */
+#ifndef _SEPAPP_H_
+#define _SEPAPP_H_
+
+#include "dx_driver_abi.h"
+#include "dx_driver.h"
+
+/* Global drvdata to be used by kernel clients via dx_sepapp_ API */
+int sep_ioctl_sepapp_session_open(struct sep_client_ctx *client_ctx,
+				  unsigned long arg);
+
+int sep_ioctl_sepapp_session_close(struct sep_client_ctx *client_ctx,
+				   unsigned long arg);
+
+int sep_ioctl_sepapp_command_invoke(struct sep_client_ctx *client_ctx,
+				    unsigned long arg);
+
+void dx_sepapp_init(struct sep_drvdata *drvdata);
+
+int sepapp_session_close(struct sep_op_ctx *op_ctx, int session_id);
+
+int sepapp_image_verify(u8 *addr, ssize_t size, u32 key_index, u32 magic_num);
+
+int sepapp_key_validity_check(u8 *addr, ssize_t size, u32 flag);
+
+int sepapp_hdmi_status(u8 status, u8 bksv[5]);
+#endif /*_SEPAPP_H_*/
diff --git a/drivers/switch/Kconfig b/drivers/switch/Kconfig
new file mode 100644
index 0000000..19404b6
--- /dev/null
+++ b/drivers/switch/Kconfig
@@ -0,0 +1,15 @@
+menuconfig SWITCH
+	tristate "Switch class support"
+	help
+	  Say Y here to enable switch class support. This allows
+	  monitoring switches by userspace via sysfs and uevent.
+
+if SWITCH
+
+config SWITCH_GPIO
+	tristate "GPIO Swith support"
+	depends on GPIOLIB
+	help
+	  Say Y here to enable GPIO based switch support.
+
+endif # SWITCH
diff --git a/drivers/switch/Makefile b/drivers/switch/Makefile
new file mode 100644
index 0000000..f7606ed
--- /dev/null
+++ b/drivers/switch/Makefile
@@ -0,0 +1,4 @@
+# Switch Class Driver
+obj-$(CONFIG_SWITCH)		+= switch_class.o
+obj-$(CONFIG_SWITCH_GPIO)	+= switch_gpio.o
+
diff --git a/drivers/switch/switch_class.c b/drivers/switch/switch_class.c
new file mode 100644
index 0000000..e373b62
--- /dev/null
+++ b/drivers/switch/switch_class.c
@@ -0,0 +1,174 @@
+/*
+ *  drivers/switch/switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/switch.h>
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct switch_dev *sdev = (struct switch_dev *)
+		dev_get_drvdata(dev);
+
+	if (sdev->print_state) {
+		int ret = sdev->print_state(sdev, buf);
+		if (ret >= 0)
+			return ret;
+	}
+	return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct switch_dev *sdev = (struct switch_dev *)
+		dev_get_drvdata(dev);
+
+	if (sdev->print_name) {
+		int ret = sdev->print_name(sdev, buf);
+		if (ret >= 0)
+			return ret;
+	}
+	return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+	char name_buf[120];
+	char state_buf[120];
+	char *prop_buf;
+	char *envp[3];
+	int env_offset = 0;
+	int length;
+
+	if (sdev->state != state) {
+		sdev->state = state;
+
+		prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+		if (prop_buf) {
+			length = name_show(sdev->dev, NULL, prop_buf);
+			if (length > 0) {
+				if (prop_buf[length - 1] == '\n')
+					prop_buf[length - 1] = 0;
+				snprintf(name_buf, sizeof(name_buf),
+					"SWITCH_NAME=%s", prop_buf);
+				envp[env_offset++] = name_buf;
+			}
+			length = state_show(sdev->dev, NULL, prop_buf);
+			if (length > 0) {
+				if (prop_buf[length - 1] == '\n')
+					prop_buf[length - 1] = 0;
+				snprintf(state_buf, sizeof(state_buf),
+					"SWITCH_STATE=%s", prop_buf);
+				envp[env_offset++] = state_buf;
+			}
+			envp[env_offset] = NULL;
+			kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+			free_page((unsigned long)prop_buf);
+		} else {
+			printk(KERN_ERR "out of memory in switch_set_state\n");
+			kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+	if (!switch_class) {
+		switch_class = class_create(THIS_MODULE, "switch");
+		if (IS_ERR(switch_class))
+			return PTR_ERR(switch_class);
+		atomic_set(&device_count, 0);
+	}
+
+	return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+	int ret;
+
+	if (!switch_class) {
+		ret = create_switch_class();
+		if (ret < 0)
+			return ret;
+	}
+
+	sdev->index = atomic_inc_return(&device_count);
+	sdev->dev = device_create(switch_class, NULL,
+		MKDEV(0, sdev->index), NULL, sdev->name);
+	if (IS_ERR(sdev->dev))
+		return PTR_ERR(sdev->dev);
+
+	ret = device_create_file(sdev->dev, &dev_attr_state);
+	if (ret < 0)
+		goto err_create_file_1;
+	ret = device_create_file(sdev->dev, &dev_attr_name);
+	if (ret < 0)
+		goto err_create_file_2;
+
+	dev_set_drvdata(sdev->dev, sdev);
+	sdev->state = 0;
+	return 0;
+
+err_create_file_2:
+	device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+	device_destroy(switch_class, MKDEV(0, sdev->index));
+	printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+	device_remove_file(sdev->dev, &dev_attr_name);
+	device_remove_file(sdev->dev, &dev_attr_state);
+	device_destroy(switch_class, MKDEV(0, sdev->index));
+	dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+	return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+	class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/switch/switch_gpio.c b/drivers/switch/switch_gpio.c
new file mode 100644
index 0000000..621d62d
--- /dev/null
+++ b/drivers/switch/switch_gpio.c
@@ -0,0 +1,172 @@
+/*
+ *  drivers/switch/switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/switch.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+
+struct gpio_switch_data {
+	struct switch_dev sdev;
+	unsigned gpio;
+	const char *name_on;
+	const char *name_off;
+	const char *state_on;
+	const char *state_off;
+	int irq;
+	struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+	int state;
+	struct gpio_switch_data	*data =
+		container_of(work, struct gpio_switch_data, work);
+
+	state = gpio_get_value(data->gpio);
+	switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_switch_data *switch_data =
+	    (struct gpio_switch_data *)dev_id;
+
+	schedule_work(&switch_data->work);
+	return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+	struct gpio_switch_data	*switch_data =
+		container_of(sdev, struct gpio_switch_data, sdev);
+	const char *state;
+	if (switch_get_state(sdev))
+		state = switch_data->state_on;
+	else
+		state = switch_data->state_off;
+
+	if (state)
+		return sprintf(buf, "%s\n", state);
+	return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+	struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+	struct gpio_switch_data *switch_data;
+	int ret = 0;
+
+	if (!pdata)
+		return -EBUSY;
+
+	switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+	if (!switch_data)
+		return -ENOMEM;
+
+	switch_data->sdev.name = pdata->name;
+	switch_data->gpio = pdata->gpio;
+	switch_data->name_on = pdata->name_on;
+	switch_data->name_off = pdata->name_off;
+	switch_data->state_on = pdata->state_on;
+	switch_data->state_off = pdata->state_off;
+	switch_data->sdev.print_state = switch_gpio_print_state;
+
+	ret = switch_dev_register(&switch_data->sdev);
+	if (ret < 0)
+		goto err_switch_dev_register;
+
+	ret = gpio_request(switch_data->gpio, pdev->name);
+	if (ret < 0)
+		goto err_request_gpio;
+
+	ret = gpio_direction_input(switch_data->gpio);
+	if (ret < 0)
+		goto err_set_gpio_input;
+
+	INIT_WORK(&switch_data->work, gpio_switch_work);
+
+	switch_data->irq = gpio_to_irq(switch_data->gpio);
+	if (switch_data->irq < 0) {
+		ret = switch_data->irq;
+		goto err_detect_irq_num_failed;
+	}
+
+	ret = request_irq(switch_data->irq, gpio_irq_handler,
+			  IRQF_TRIGGER_LOW, pdev->name, switch_data);
+	if (ret < 0)
+		goto err_request_irq;
+
+	/* Perform initial detection */
+	gpio_switch_work(&switch_data->work);
+
+	return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+	gpio_free(switch_data->gpio);
+err_request_gpio:
+	switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+	kfree(switch_data);
+
+	return ret;
+}
+
+static int gpio_switch_remove(struct platform_device *pdev)
+{
+	struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+	cancel_work_sync(&switch_data->work);
+	gpio_free(switch_data->gpio);
+	switch_dev_unregister(&switch_data->sdev);
+	kfree(switch_data);
+
+	return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+	.probe		= gpio_switch_probe,
+	.remove		= gpio_switch_remove,
+	.driver		= {
+		.name	= "switch-gpio",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init gpio_switch_init(void)
+{
+	return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+	platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 5e3c025..e0fb348 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -169,4 +169,32 @@
 	  enforce idle time which results in more package C-state residency. The
 	  user interface is exposed via generic thermal framework.
 
+config SENSORS_THERMAL_MRFLD
+	tristate "Thermal driver for Intel Merrifield platform"
+	depends on THERMAL && IIO && IIO_BASINCOVE_GPADC
+	help
+	  Say Y here to enable thermal driver on Intel Merrifield platform.
+
+	  To load this driver as a module, select M here. The module
+	  will be called "mrfl_thermal"
+
+config INTEL_BYT_EC_THERMAL
+	tristate "Thermal driver for Intel Baytrail platform"
+	depends on THERMAL && INTEL_BYT_EC
+	help
+	  Say Y here to enable thermal driver on Intel Baytrail-M platform.
+
+	  To load this driver as a module, select M here. The module
+	  will be called "byt_ec_thermal"
+
+config SOC_THERMAL
+	tristate "SoC Thermal driver"
+	depends on THERMAL
+	help
+	  SoC Thermal driver registers to Generic Thermal Framework.
+	  Exposes SoC DTS and aux trip point values through the framework.
+
+	  Say Y here to enable thermal driver on Intel Merrifield
+	  platform. To load this driver as a module, select M here.
+
 endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index c054d41..46c9aa5 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -2,9 +2,18 @@
 # Makefile for sensor chip drivers.
 #
 
+CFLAGS_intel_mrfl_thermal.o := -Werror
+CFLAGS_intel_soc_thermal.o := -Werror
+CFLAGS_thermal_core.o := -Werror
+
 obj-$(CONFIG_THERMAL)		+= thermal_sys.o
 thermal_sys-y			+= thermal_core.o
 
+obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
+obj-$(CONFIG_INTEL_BYT_THERMAL)  += intel_byt_thermal.o
+obj-$(CONFIG_SENSORS_THERMAL_MRFLD)     += intel_mrfl_thermal.o
+obj-$(CONFIG_SOC_THERMAL)     += intel_soc_thermal.o
+
 # governors
 thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE)	+= fair_share.o
 thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE)	+= step_wise.o
diff --git a/drivers/thermal/intel_mid_thermal.c b/drivers/thermal/intel_mid_thermal.c
new file mode 100644
index 0000000..d577046
--- /dev/null
+++ b/drivers/thermal/intel_mid_thermal.c
@@ -0,0 +1,754 @@
+/*
+ * intel_mid_thermal.c - Intel MID platform thermal driver
+ *
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Ananth Krishna <ananth.krishna.r@intel.com>
+ * Author: Durgadoss <durgadoss.r@intel.com>
+ */
+
+#define pr_fmt(fmt)  "intel_mid_thermal: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/param.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/thermal.h>
+
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel_mid_gpadc.h>
+#include <asm/intel_mid_thermal.h>
+#include <asm/intel_mid_rpmsg.h>
+
+#define DRIVER_NAME "msic_thermal"
+
+/* Cooling device attributes */
+#define SOC_IPC_COMMAND		0xCF
+
+enum {
+	NORMAL = 0,
+	WARNING,
+	ALERT,
+	CRITICAL
+} thermal_state;
+
+enum {
+	SOC_SKIN_NORMAL = 0,
+	SOC_SKIN_WARM = 2,
+	SOC_SKIN_PROCHOT,
+	SOC_MAX_STATES
+} soc_skin_state;
+
+/* MSIC die attributes */
+#define MSIC_DIE_ADC_MIN	488
+#define MSIC_DIE_ADC_MAX	1004
+
+#define TABLE_LENGTH 24
+/*
+ * ADC code vs Temperature table
+ * This table will be different for different thermistors
+ * Row 0: ADC code
+ * Row 1: Temperature (in degree celsius)
+ */
+static const int adc_code[2][TABLE_LENGTH] = {
+	{977, 961, 941, 917, 887, 853, 813, 769, 720, 669, 615, 561, 508, 456,
+		407, 357, 315, 277, 243, 212, 186, 162, 140, 107},
+	{-20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60,
+		65, 70, 75, 80, 85, 90, 100},
+	};
+
+struct ts_cache_info {
+	bool is_cached_data_initialized;
+	struct mutex lock;
+	int *cached_values;
+	unsigned long last_updated;
+};
+
+struct soc_cooling_device_info {
+	unsigned long soc_state;
+	struct mutex lock_cool_state;
+};
+
+static struct soc_cooling_device_info soc_cdev_info;
+
+struct platform_info {
+	struct platform_device *pdev;
+	struct thermal_zone_device **tzd;
+	struct ts_cache_info cacheinfo;
+	/* ADC handle used to read sensor temperature values */
+	void *therm_adc_handle;
+	struct thermal_cooling_device *soc_cdev;
+	int num_sensors;
+	int soc_cooling;
+	struct intel_mid_thermal_sensor *sensors;
+};
+
+static struct platform_info *platforminfo;
+
+struct thermal_device_info {
+	struct intel_mid_thermal_sensor *sensor;
+};
+
+/* SoC cooling device callbacks */
+static int soc_get_max_state(struct thermal_cooling_device *cdev,
+				unsigned long *state)
+{
+	/* SoC has 4 levels of throttling from 0 to 3 */
+	*state = SOC_MAX_STATES - 1;
+	return 0;
+}
+
+static int soc_get_cur_state(struct thermal_cooling_device *cdev,
+				unsigned long *state)
+{
+	mutex_lock(&soc_cdev_info.lock_cool_state);
+	*state = soc_cdev_info.soc_state;
+	mutex_unlock(&soc_cdev_info.lock_cool_state);
+	return 0;
+}
+
+static int soc_set_cur_state(struct thermal_cooling_device *cdev,
+				unsigned long state)
+{
+	int ret;
+	if (state > SOC_MAX_STATES - 1) {
+		pr_err("Invalid SoC throttle state:%ld\n", state);
+		return -EINVAL;
+	}
+
+	switch (state) {
+	/* SoC De-Throttle */
+	case NORMAL:
+		state = SOC_SKIN_NORMAL;
+		break;
+	case WARNING:
+		/*
+		 * New state is assigned based on present state.
+		 * State 1 can be reached from state 0 or 2.
+		 * State 0 to 1 means skin WARM.
+		 * state 2 to 1 means skin no longer PROCHOT but WARM
+		 */
+		state = SOC_SKIN_WARM;
+		break;
+	/* SoC Throttle, PROCHOT */
+	case ALERT:
+	case CRITICAL:
+		state = SOC_SKIN_PROCHOT;
+		break;
+	}
+	/* Send IPC command to throttle SoC */
+	mutex_lock(&soc_cdev_info.lock_cool_state);
+	ret = rpmsg_send_generic_command(SOC_IPC_COMMAND, 0,
+			(u8 *) &state, 4, NULL, 0);
+	if (ret)
+		pr_err("IPC_COMMAND failed: %d\n", ret);
+	else
+		soc_cdev_info.soc_state = state;
+
+	mutex_unlock(&soc_cdev_info.lock_cool_state);
+	return ret;
+}
+
+static struct thermal_cooling_device_ops soc_cooling_ops = {
+	.get_max_state = soc_get_max_state,
+	.get_cur_state = soc_get_cur_state,
+	.set_cur_state = soc_set_cur_state,
+};
+
+static int register_soc_as_cdev(void)
+{
+	int ret = 0;
+	platforminfo->soc_cdev = thermal_cooling_device_register("SoC", NULL,
+						&soc_cooling_ops);
+	if (IS_ERR(platforminfo->soc_cdev)) {
+		ret = PTR_ERR(platforminfo->soc_cdev);
+		platforminfo->soc_cdev = NULL;
+	}
+	return ret;
+}
+
+static void unregister_soc_as_cdev(void)
+{
+	thermal_cooling_device_unregister(platforminfo->soc_cdev);
+}
+
+/**
+ * is_valid_adc - checks whether the adc code is within the defined range
+ * @min: minimum value for the sensor
+ * @max: maximum value for the sensor
+ *
+ * Can sleep
+ */
+static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
+{
+	return (adc_val >= min) && (adc_val <= max);
+}
+
+/**
+ * find_adc_code - searches the ADC code using binary search
+ * @val: value to find in the array
+ *
+ * This function does binary search on an array sorted in 'descending' order
+ * Can sleep
+ */
+static int find_adc_code(uint16_t val)
+{
+	int left = 0;
+	int right = TABLE_LENGTH - 1;
+	int mid;
+	while (left <= right) {
+		mid = (left + right)/2;
+		if (val == adc_code[0][mid] ||
+			(mid > 0 &&
+			val > adc_code[0][mid] && val < adc_code[0][mid-1]))
+			return mid;
+		else if (val > adc_code[0][mid])
+			right = mid - 1;
+		else if (val < adc_code[0][mid])
+			left = mid + 1;
+	}
+	return -1;
+}
+
+/**
+ * linear_interpolate - does interpolation to find temperature
+ * Returns the temperature in milli degree celsius
+ * @adc_val: ADC code(x) at which temperature(y) should be found
+ * @indx: index of the minimum(x0) of the two ADC codes
+ *
+ * Can sleep
+ */
+static int linear_interpolate(int indx, uint16_t adc_val)
+{
+	int x = adc_val;
+	int x0 = adc_code[0][indx];
+	int x1 = adc_code[0][indx - 1];
+	int y0 = adc_code[1][indx];
+	int y1 = adc_code[1][indx - 1];
+
+	/*
+	 * Find y:
+	 * Of course, we can avoid these variables, but keep them
+	 * for readability and maintainability.
+	 */
+	int numerator = (x-x0)*y1 + (x1-x)*y0;
+	int denominator = x1-x0;
+
+	/*
+	 * We have to report the temperature in milli degree celsius.
+	 * So, to reduce the loss of precision, do (Nr*1000)/Dr, instead
+	 * of (Nr/Dr)*1000.
+	 */
+	 return (numerator * 1000)/denominator;
+}
+
+/**
+ * adc_to_temp - converts the ADC code to temperature in C
+ * @direct: true if ths channel is direct index
+ * @adc_val: the adc_val that needs to be converted
+ * @tp: temperature return value
+ *
+ * Can sleep
+ */
+static int adc_to_temp(struct intel_mid_thermal_sensor *sensor,
+		uint16_t adc_val, long *tp)
+{
+	int indx;
+
+	/* Direct conversion for msic die temperature */
+	if (sensor->direct) {
+		if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
+			*tp = sensor->slope * adc_val - sensor->intercept;
+			return 0;
+		}
+		return -ERANGE;
+	}
+
+	indx = find_adc_code(adc_val);
+	if (indx < 0)
+		return -ERANGE;
+
+	if (adc_code[0][indx] == adc_val) {
+		/* Convert temperature in celsius to milli degree celsius */
+		*tp = adc_code[1][indx] * 1000;
+		return 0;
+	}
+
+	/*
+	 * The ADC code is in between two values directly defined in the
+	 * table. So, do linear interpolation to calculate the temperature.
+	 */
+	*tp = linear_interpolate(indx, adc_val);
+	return 0;
+}
+
+int skin0_temp_correlation(void *info, long temp, long *res)
+{
+	struct intel_mid_thermal_sensor *sensor = info;
+
+	*res = ((temp * sensor->slope) / 1000) + sensor->intercept;
+
+	return 0;
+}
+
+int bptherm_temp_correlation(void *info, long temp, long *res)
+{
+	struct intel_mid_thermal_sensor *sensor = info;
+
+	*res = ((temp * sensor->slope) / 1000) + sensor->intercept;
+
+	return 0;
+}
+
+int skin1_temp_correlation(void *info, long temp, long *res)
+{
+	struct intel_mid_thermal_sensor *sensor = info;
+	struct intel_mid_thermal_sensor *dsensor; /* dependent sensor */
+	struct skin1_private_info *skin_info;
+	long sensor_temp = 0, curr_temp;
+	int ret, index;
+
+	skin_info = sensor->priv;
+
+	*res = ((temp * sensor->slope) / 1000) + sensor->intercept;
+
+	/* If we do not have dependent sensors, just return. Not an error */
+	if (!skin_info || !skin_info->dependent || !skin_info->sensors)
+		return 0;
+
+	for (index = 0; index < skin_info->dependent; index++) {
+		if (!skin_info->sensors[index])
+			continue;
+
+		dsensor = skin_info->sensors[index];
+
+		ret = adc_to_temp(dsensor,
+			platforminfo->cacheinfo.cached_values[dsensor->index],
+			&curr_temp);
+		if (ret)
+			return ret;
+
+		if (dsensor->temp_correlation)
+			dsensor->temp_correlation(dsensor, curr_temp,
+						&sensor_temp);
+
+		if (sensor_temp > *res)
+			*res = sensor_temp;
+	}
+
+	return 0;
+}
+
+/**
+ * mid_read_temp - read sensors for temperature
+ * @temp: holds the current temperature for the sensor after reading
+ *
+ * reads the adc_code from the channel and converts it to real
+ * temperature. The converted value is stored in temp.
+ *
+ * Can sleep
+ */
+static int mid_read_temp(struct thermal_zone_device *tzd, long *temp)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+	int ret;
+	long curr_temp;
+	int indx = td_info->sensor->index; /* Required Index */
+
+	mutex_lock(&platforminfo->cacheinfo.lock);
+
+	if (!platforminfo->cacheinfo.is_cached_data_initialized ||
+	time_after(jiffies, platforminfo->cacheinfo.last_updated + HZ)) {
+		ret = get_gpadc_sample(platforminfo->therm_adc_handle, 1,
+					platforminfo->cacheinfo.cached_values);
+		if (ret)
+			goto exit;
+		platforminfo->cacheinfo.last_updated = jiffies;
+		platforminfo->cacheinfo.is_cached_data_initialized = true;
+	}
+
+	/* Convert ADC value to temperature */
+	ret = adc_to_temp(td_info->sensor,
+		platforminfo->cacheinfo.cached_values[indx], &curr_temp);
+	if (ret)
+		goto exit;
+
+	if (td_info->sensor->temp_correlation)
+		ret = td_info->sensor->temp_correlation(td_info->sensor,
+							curr_temp, temp);
+	else
+		*temp = curr_temp;
+
+exit:
+	mutex_unlock(&platforminfo->cacheinfo.lock);
+	return ret;
+}
+
+/**
+ * initialize_sensor - Initializes ADC information for each sensor.
+ * @index: index of the sensor
+ *
+ * Context: can sleep
+ */
+static struct thermal_device_info *initialize_sensor(
+			struct intel_mid_thermal_sensor *sensor)
+{
+	struct thermal_device_info *td_info =
+		kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+	if (!td_info)
+		return NULL;
+
+	td_info->sensor = sensor;
+
+	return td_info;
+}
+
+/**
+ * mid_thermal_resume - resume routine
+ * @dev: device structure
+ */
+static int mid_thermal_resume(struct device *dev)
+{
+	return 0;
+}
+
+/**
+ * mid_thermal_suspend - suspend routine
+ * @dev: device structure
+ */
+static int mid_thermal_suspend(struct device *dev)
+{
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_THERMAL
+static int read_slope(struct thermal_zone_device *tzd, long *slope)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	*slope = td_info->sensor->slope;
+
+	return 0;
+}
+
+static int update_slope(struct thermal_zone_device *tzd, long slope)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	td_info->sensor->slope = slope;
+
+	return 0;
+}
+
+static int read_intercept(struct thermal_zone_device *tzd, long *intercept)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	*intercept = td_info->sensor->intercept;
+
+	return 0;
+}
+
+static int update_intercept(struct thermal_zone_device *tzd, long intercept)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	td_info->sensor->intercept = intercept;
+
+	return 0;
+}
+#endif
+
+/**
+ * read_curr_temp - reads the current temperature and stores in temp
+ * @temp: holds the current temperature value after reading
+ *
+ * Can sleep
+ */
+static int read_curr_temp(struct thermal_zone_device *tzd, long *temp)
+{
+	return (tzd) ? mid_read_temp(tzd, temp) : -EINVAL;
+}
+
+/* Can't be const */
+static struct thermal_zone_device_ops tzd_ops = {
+	.get_temp = read_curr_temp,
+#ifdef CONFIG_DEBUG_THERMAL
+	.get_slope = read_slope,
+	.set_slope = update_slope,
+	.get_intercept = read_intercept,
+	.set_intercept = update_intercept,
+#endif
+};
+
+/**
+ * mid_thermal_probe - mfld thermal initialize
+ * @pdev: platform device structure
+ *
+ * mid thermal probe initializes the hardware and registers
+ * all the sensors with the generic thermal framework. Can sleep.
+ */
+static int mid_thermal_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int i;
+	int *adc_channel_info;
+	struct intel_mid_thermal_platform_data *pdata;
+
+	pdata = pdev->dev.platform_data;
+
+	if (!pdata)
+		return -EINVAL;
+
+	platforminfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
+
+	if (!platforminfo)
+		return -ENOMEM;
+
+	platforminfo->num_sensors = pdata->num_sensors;
+	platforminfo->soc_cooling = pdata->soc_cooling;
+	platforminfo->sensors = pdata->sensors;
+
+	platforminfo->tzd = kzalloc(
+	(sizeof(struct thermal_zone_device *) * platforminfo->num_sensors),
+		 GFP_KERNEL);
+
+	if (!platforminfo->tzd)
+		goto platforminfo_alloc_fail;
+
+	platforminfo->cacheinfo.cached_values =
+		kzalloc((sizeof(int) * platforminfo->num_sensors), GFP_KERNEL);
+
+	if (!platforminfo->cacheinfo.cached_values)
+		goto tzd_alloc_fail;
+
+	adc_channel_info = kzalloc((sizeof(int) * platforminfo->num_sensors),
+			GFP_KERNEL);
+
+	if (!adc_channel_info)
+		goto cachedinfo_alloc_fail;
+
+	/* initialize mutex locks */
+	mutex_init(&platforminfo->cacheinfo.lock);
+
+
+	if (platforminfo->soc_cooling)
+		mutex_init(&soc_cdev_info.lock_cool_state);
+
+	for (i = 0; i < platforminfo->num_sensors; i++)
+		adc_channel_info[i] = platforminfo->sensors[i].adc_channel;
+
+	/* Allocate ADC channels for all sensors */
+	platforminfo->therm_adc_handle = gpadc_alloc_channels(
+				platforminfo->num_sensors, adc_channel_info);
+
+	if (!platforminfo->therm_adc_handle) {
+		ret = -ENOMEM;
+		goto adc_channel_alloc_fail;
+	}
+
+	/* Register each sensor with the generic thermal framework*/
+	for (i = 0; i < platforminfo->num_sensors; i++) {
+		platforminfo->tzd[i] = thermal_zone_device_register(
+				platforminfo->sensors[i].name, 0, 0,
+				initialize_sensor(&platforminfo->sensors[i]),
+				&tzd_ops, NULL, 0, 0);
+		if (IS_ERR(platforminfo->tzd[i]))
+			goto reg_fail;
+	}
+
+	platforminfo->pdev = pdev;
+
+	platform_set_drvdata(pdev, platforminfo);
+
+	/* Register SoC as a cooling device */
+	if (platforminfo->soc_cooling) {
+		ret = register_soc_as_cdev();
+		/* Log this, but keep the driver loaded */
+		if (ret) {
+			dev_err(&pdev->dev,
+				"register_soc_as_cdev failed:%d\n", ret);
+		}
+	}
+
+	kfree(adc_channel_info);
+
+	return 0;
+
+reg_fail:
+	ret = PTR_ERR(platforminfo->tzd[i]);
+	while (--i >= 0)
+		thermal_zone_device_unregister(platforminfo->tzd[i]);
+adc_channel_alloc_fail:
+	kfree(adc_channel_info);
+cachedinfo_alloc_fail:
+	kfree(platforminfo->cacheinfo.cached_values);
+tzd_alloc_fail:
+	kfree(platforminfo->tzd);
+platforminfo_alloc_fail:
+	kfree(platforminfo);
+	return ret;
+}
+
+/**
+ * mid_thermal_remove - mfld thermal finalize
+ * @dev: platform device structure
+ *
+ * MLFD thermal remove unregisters all the sensors from the generic
+ * thermal framework. Can sleep.
+ */
+static int mid_thermal_remove(struct platform_device *pdev)
+{
+	int i;
+
+	for (i = 0; i < platforminfo->num_sensors; i++)
+		thermal_zone_device_unregister(platforminfo->tzd[i]);
+
+	/* Unregister SoC as cooling device */
+	if (platforminfo->soc_cooling)
+		unregister_soc_as_cdev();
+
+	/* Free the allocated ADC channels */
+	if (platforminfo->therm_adc_handle)
+		intel_mid_gpadc_free(platforminfo->therm_adc_handle);
+
+	kfree(platforminfo->cacheinfo.cached_values);
+	kfree(platforminfo->tzd);
+	kfree(platforminfo);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+/*********************************************************************
+ *		Driver initialisation and finalization
+ *********************************************************************/
+
+
+/* Platfrom device functionality */
+
+static const struct dev_pm_ops msic_thermal_pm_ops = {
+	.suspend = mid_thermal_suspend,
+	.resume = mid_thermal_resume,
+};
+
+static const struct platform_device_id mid_therm_table[] = {
+	{ DRIVER_NAME, 1 },
+};
+
+static struct platform_driver mid_therm_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msic_thermal_pm_ops,
+	},
+	.probe = mid_thermal_probe,
+	.remove = mid_thermal_remove,
+	.id_table = mid_therm_table,
+};
+
+static int mid_therm_module_init(void)
+{
+	return platform_driver_register(&mid_therm_driver);
+}
+
+static void mid_therm_module_exit(void)
+{
+	platform_driver_unregister(&mid_therm_driver);
+}
+
+
+/* RPMSG related functionality */
+
+static int mid_therm_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed mid_therm rpmsg device\n");
+
+	ret = mid_therm_module_init();
+out:
+	return ret;
+}
+
+static void mid_therm_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	mid_therm_module_exit();
+	dev_info(&rpdev->dev, "Removed mid_therm rpmsg device\n");
+}
+
+static void mid_therm_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+			int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+				data, len, true);
+}
+
+static struct rpmsg_device_id mid_therm_id_table[] = {
+	{ .name = "rpmsg_mid_thermal" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(rpmsg, mid_therm_id_table);
+
+static struct rpmsg_driver mid_therm_rpmsg_driver = {
+	.drv.name	= DRIVER_NAME,
+	.drv.owner	= THIS_MODULE,
+	.probe		= mid_therm_rpmsg_probe,
+	.callback	= mid_therm_rpmsg_cb,
+	.remove		= mid_therm_rpmsg_remove,
+	.id_table	= mid_therm_id_table,
+};
+
+static int __init mid_therm_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&mid_therm_rpmsg_driver);
+}
+
+static void __exit mid_therm_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&mid_therm_rpmsg_driver);
+}
+
+
+/* Changing _init call to make the thermal driver
+ * load _after_ the GPADC driver
+ * module_init(mid_therm_rpmsg_init);
+ */
+late_initcall(mid_therm_rpmsg_init);
+module_exit(mid_therm_rpmsg_exit);
+
+MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
+MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/intel_mrfl_thermal.c b/drivers/thermal/intel_mrfl_thermal.c
new file mode 100644
index 0000000..ab34846
--- /dev/null
+++ b/drivers/thermal/intel_mrfl_thermal.c
@@ -0,0 +1,909 @@
+/*
+ * intel_mrfl_thermal.c - Intel Merrifield Platform Thermal Driver
+ *
+ *
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Durgadoss R <durgadoss.r@intel.com>
+ *
+ * DEVICE_NAME: Intel Merrifield platform - PMIC: Thermal Monitor
+ */
+
+#define pr_fmt(fmt)  "intel_mrfl_thermal: " fmt
+
+#include <linux/pm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rpmsg.h>
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_basincove_gpadc.h>
+#include <asm/intel_mid_thermal.h>
+#include <linux/iio/consumer.h>
+
+#define DRIVER_NAME "bcove_thrm"
+
+/* Number of Thermal sensors on the PMIC */
+#define PMIC_THERMAL_SENSORS	4
+
+/* Registers that govern Thermal Monitoring */
+#define THRMMONCFG	0xB3
+#define THRMMONCTL	0xB4
+#define THRMIRQ		0x04
+#define MTHRMIRQ	0x0F
+#define STHRMIRQ	0xB2
+#define IRQLVL1		0x01
+#define MIRQLVL1	0x0C
+#define IRQ_MASK_ALL	0x0F
+
+/* PMIC SRAM base address and offset for Thermal register */
+#define PMIC_SRAM_BASE_ADDR	0xFFFFF610
+#define PMIC_SRAM_THRM_OFFSET	0x03
+#define IOMAP_SIZE		0x04
+
+/* NVM BANK REGISTER */
+#define EEPROM_CTRL		0x1FE
+#define EEPROM_REG15		0x1EE
+#define EEPROM_BANK1_SELECT	0x02
+#define EEPROM_BANK1_UNSELECT	0x00
+
+#define PMICALRT	(1 << 3)
+#define SYS2ALRT	(1 << 2)
+#define SYS1ALRT	(1 << 1)
+#define SYS0ALRT	(1 << 0)
+#define THERM_EN	(1 << 0)
+#define THERM_ALRT	(1 << 2)
+
+/* ADC to Temperature conversion table length */
+#define TABLE_LENGTH	34
+#define TEMP_INTERVAL	5
+
+/* Default _max 85 C */
+#define DEFAULT_MAX_TEMP	85
+
+/* Constants defined in BasinCove PMIC spec */
+#define PMIC_DIE_ADC_MIN	395
+#define PMIC_DIE_ADC_MAX	661
+#define PMIC_DIE_TEMP_MIN	-40
+#define PMIC_DIE_TEMP_MAX	125
+#define ADC_VAL_27C		470
+#define ADC_COEFFICIENT		675
+#define TEMP_OFFSET		27000
+
+/* 'enum' of Thermal sensors */
+enum thermal_sensors { SYS0, SYS1, SYS2, PMIC_DIE, _COUNT };
+
+/*
+ * Alert registers store the 'alert' temperature for each sensor,
+ * as 10 bit ADC code. The higher two bits are stored in bits[0:1] of
+ * alert_regs_h. The lower eight bits are stored in alert_regs_l.
+ * The hysteresis value is stored in bits[2:6] of alert_regs_h.
+ * Order: SYS0 SYS1 SYS2 PMIC_DIE
+ *
+ * static const int alert_regs_l[] = { 0xB7, 0xB9, 0xBB, 0xC1 };
+ */
+static const int alert_regs_h[] = { 0xB6, 0xB8, 0xBA, 0xC0 };
+
+/*
+ * ADC code vs Temperature table
+ * This table will be different for different thermistors
+ * Row 0: ADC code
+ * Row 1: Temperature (in degree celsius)
+ */
+static const int adc_code[2][TABLE_LENGTH] = {
+	{952, 932, 906, 877, 843, 804, 761, 714, 665, 614,
+	563, 512, 462, 415, 370, 329, 291, 257, 226, 199,
+	174, 153, 135, 119, 104, 92, 81, 72, 64, 56,
+	50, 45, 40, 36},
+	{-40, -35, -30, -25, -20, -15, -10, -5, 0, 5,
+	10, 15, 20, 25, 30, 35, 40, 45, 50, 55,
+	60, 65, 70, 75, 80, 85, 90, 95, 100, 105,
+	110, 115, 120, 125},
+	};
+
+static DEFINE_MUTEX(thrm_update_lock);
+
+struct thermal_device_info {
+	struct intel_mid_thermal_sensor *sensor;
+};
+
+struct thermal_data {
+	struct platform_device *pdev;
+	struct iio_channel *iio_chan;
+	struct thermal_zone_device **tzd;
+	void *thrm_addr;
+	unsigned int irq;
+	/* Caching information */
+	bool is_initialized;
+	unsigned long last_updated;
+	int cached_vals[PMIC_THERMAL_SENSORS];
+	int num_sensors;
+	struct intel_mid_thermal_sensor *sensors;
+};
+static struct thermal_data *tdata;
+
+static inline int adc_to_pmic_die_temp(unsigned int val)
+{
+	/* return temperature in mC */
+	return (val - ADC_VAL_27C) * ADC_COEFFICIENT + TEMP_OFFSET;
+}
+
+static inline int pmic_die_temp_to_adc(int temp)
+{
+	/* 'temp' is in C, convert to mC and then do calculations */
+	return ((temp * 1000) - TEMP_OFFSET) / ADC_COEFFICIENT + ADC_VAL_27C;
+}
+
+/**
+ * find_adc_code - searches the ADC code using binary search
+ * @val: value to find in the array
+ *
+ * This function does binary search on an array sorted in 'descending' order
+ * Can sleep
+ */
+static int find_adc_code(uint16_t val)
+{
+	int left = 0;
+	int right = TABLE_LENGTH - 1;
+	int mid;
+	while (left <= right) {
+		mid = (left + right)/2;
+		if (val == adc_code[0][mid] ||
+			(mid > 0 &&
+			val > adc_code[0][mid] && val < adc_code[0][mid-1]))
+			return mid;
+		else if (val > adc_code[0][mid])
+			right = mid - 1;
+		else if (val < adc_code[0][mid])
+			left = mid + 1;
+	}
+	return -EINVAL;
+}
+
+/**
+ * adc_to_temp - converts the ADC code to temperature in mC
+ * @direct: true if the sensor uses direct conversion
+ * @adc_val: the ADC code to be converted
+ * @tp: temperature return value
+ *
+ * Can sleep
+ */
+static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
+{
+	int x0, x1, y0, y1;
+	int nr, dr;		/* Numerator & Denominator */
+	int indx;
+	int x = adc_val;
+	int8_t pmic_temp_offset;
+
+	/* Direct conversion for pmic die temperature */
+	if (direct) {
+		if (adc_val < PMIC_DIE_ADC_MIN || adc_val > PMIC_DIE_ADC_MAX)
+			return -EINVAL;
+
+		/* An offset added for pmic temp from NVM in TNG B0 */
+		intel_scu_ipc_iowrite8(EEPROM_CTRL, EEPROM_BANK1_SELECT);
+		intel_scu_ipc_ioread8(EEPROM_REG15, &pmic_temp_offset);
+		intel_scu_ipc_iowrite8(EEPROM_CTRL, EEPROM_BANK1_UNSELECT);
+
+		adc_val = adc_val + pmic_temp_offset;
+
+		*tp = adc_to_pmic_die_temp(adc_val);
+		return 0;
+	}
+
+	indx = find_adc_code(adc_val);
+	if (indx < 0)
+		return -EINVAL;
+
+	if (adc_code[0][indx] == adc_val) {
+		*tp = adc_code[1][indx] * 1000;
+		return 0;
+	}
+
+	/*
+	 * The ADC code is in between two values directly defined in the
+	 * table. So, do linear interpolation to calculate the temperature.
+	 */
+	x0 = adc_code[0][indx];
+	x1 = adc_code[0][indx - 1];
+	y0 = adc_code[1][indx];
+	y1 = adc_code[1][indx - 1];
+
+	/*
+	 * Find y:
+	 * Of course, we can avoid these variables, but keep them
+	 * for readability and maintainability.
+	 */
+	nr = (x-x0)*y1 + (x1-x)*y0;
+	dr =  x1-x0;
+
+	if (!dr)
+		return -EINVAL;
+	/*
+	 * We have to report the temperature in milli degree celsius.
+	 * So, to reduce the loss of precision, do (Nr*1000)/Dr, instead
+	 * of (Nr/Dr)*1000.
+	 */
+	*tp = (nr * 1000)/dr;
+
+	return 0;
+}
+
+/**
+ * temp_to_adc - converts the temperature(in C) to ADC code
+ * @direct: true if the sensor uses direct conversion
+ * @temp: the temperature to be converted
+ * @adc_val: ADC code return value
+ *
+ * Can sleep
+ */
+static int temp_to_adc(int direct, int temp, int *adc_val)
+{
+	int indx;
+	int x0, x1, y0, y1;
+	int nr, dr;		/* Numerator & Denominator */
+	int x = temp;
+
+	/* Direct conversion for pmic die temperature */
+	if (direct) {
+		if (temp < PMIC_DIE_TEMP_MIN || temp > PMIC_DIE_TEMP_MAX)
+			return -EINVAL;
+
+		*adc_val = pmic_die_temp_to_adc(temp);
+		return 0;
+	}
+
+	if (temp < adc_code[1][0] || temp > adc_code[1][TABLE_LENGTH - 1])
+		return -EINVAL;
+
+
+	/* Find the 'indx' of this 'temp' in the table */
+	indx = (temp - adc_code[1][0]) / TEMP_INTERVAL;
+
+	if (temp == adc_code[1][indx]) {
+		*adc_val = adc_code[0][indx];
+		return 0;
+	}
+
+	/*
+	 * Temperature is not a multiple of 'TEMP_INTERVAL'. So,
+	 * do linear interpolation to obtain a better ADC code.
+	 */
+	x0 = adc_code[1][indx];
+	x1 = adc_code[1][indx + 1];
+	y0 = adc_code[0][indx];
+	y1 = adc_code[0][indx + 1];
+
+	nr = (x-x0)*y1 + (x1-x)*y0;
+	dr =  x1-x0;
+
+	if (!dr)
+		return -EINVAL;
+
+	*adc_val = nr/dr;
+
+	return 0;
+}
+
+/**
+ * set_tmax - sets the given 'adc_val' to the 'alert_reg'
+ * @alert_reg: register address
+ * @adc_val: ADC value to be programmed
+ *
+ * Not protected. Calling function should handle synchronization.
+ * Can sleep
+ */
+static int set_tmax(int alert_reg, int adc_val)
+{
+	int ret;
+
+	/* Set bits[0:1] of alert_reg_h to bits[8:9] of 'adc_val' */
+	ret = intel_scu_ipc_update_register(alert_reg, (adc_val >> 8), 0x03);
+	if (ret)
+		return ret;
+
+	/* Extract bits[0:7] of 'adc_val' and write them into alert_reg_l */
+	return intel_scu_ipc_iowrite8(alert_reg + 1, adc_val & 0xFF);
+}
+
+/**
+ * program_tmax - programs a default _max value for each sensor
+ * @dev: device pointer
+ *
+ * Can sleep
+ */
+static int program_tmax(struct device *dev)
+{
+	int i, ret;
+	int pmic_die_val, adc_val;
+
+	ret = temp_to_adc(0, DEFAULT_MAX_TEMP, &adc_val);
+	if (ret)
+		return ret;
+
+	ret = temp_to_adc(1, DEFAULT_MAX_TEMP, &pmic_die_val);
+	if (ret)
+		return ret;
+	/*
+	 * Since this function sets max value, do for all sensors even if
+	 * the sensor does not register as a thermal zone.
+	 */
+	for (i = 0; i < PMIC_THERMAL_SENSORS - 1; i++) {
+		ret = set_tmax(alert_regs_h[i], adc_val);
+		if (ret)
+			goto exit_err;
+	}
+
+	/* Set _max for pmic die sensor */
+	ret = set_tmax(alert_regs_h[i], pmic_die_val);
+	if (ret)
+		goto exit_err;
+
+	return ret;
+
+exit_err:
+	dev_err(dev, "set_tmax for channel %d failed:%d\n", i, ret);
+	return ret;
+}
+
+static int store_trip_hyst(struct thermal_zone_device *tzd,
+				int trip, long hyst)
+{
+	int ret;
+	uint8_t data;
+	struct thermal_device_info *td_info = tzd->devdata;
+	int alert_reg = alert_regs_h[td_info->sensor->index];
+
+	/* Hysteresis value is 5 bits wide */
+	if (hyst > 31)
+		return -EINVAL;
+
+	mutex_lock(&thrm_update_lock);
+
+	ret = intel_scu_ipc_ioread8(alert_reg, &data);
+	if (ret)
+		goto ipc_fail;
+
+	/* Set bits [2:6] to value of hyst */
+	data = (data & 0x83) | (hyst << 2);
+
+	ret = intel_scu_ipc_iowrite8(alert_reg, data);
+
+ipc_fail:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static int show_trip_hyst(struct thermal_zone_device *tzd,
+				int trip, long *hyst)
+{
+	int ret;
+	uint8_t data;
+	struct thermal_device_info *td_info = tzd->devdata;
+	int alert_reg = alert_regs_h[td_info->sensor->index];
+
+	mutex_lock(&thrm_update_lock);
+
+	ret = intel_scu_ipc_ioread8(alert_reg, &data);
+	if (!ret)
+		*hyst = (data >> 2) & 0x1F; /* Extract bits[2:6] of data */
+
+	mutex_unlock(&thrm_update_lock);
+
+	return ret;
+}
+
+static int store_trip_temp(struct thermal_zone_device *tzd,
+				int trip, long trip_temp)
+{
+	int ret, adc_val;
+	struct thermal_device_info *td_info = tzd->devdata;
+	int alert_reg = alert_regs_h[td_info->sensor->index];
+
+	if (trip_temp < 1000) {
+		dev_err(&tzd->device, "Temperature should be in mC\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&thrm_update_lock);
+
+	/* Convert from mC to C */
+	trip_temp /= 1000;
+
+	ret = temp_to_adc(td_info->sensor->direct, (int)trip_temp, &adc_val);
+	if (ret)
+		goto exit;
+
+	ret =  set_tmax(alert_reg, adc_val);
+exit:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static int show_trip_temp(struct thermal_zone_device *tzd,
+				int trip, long *trip_temp)
+{
+	int ret, adc_val;
+	uint8_t l, h;
+	struct thermal_device_info *td_info = tzd->devdata;
+	int alert_reg = alert_regs_h[td_info->sensor->index];
+
+	mutex_lock(&thrm_update_lock);
+
+	ret = intel_scu_ipc_ioread8(alert_reg, &h);
+	if (ret)
+		goto exit;
+
+	ret = intel_scu_ipc_ioread8(alert_reg + 1, &l);
+	if (ret)
+		goto exit;
+
+	/* Concatenate 'h' and 'l' to get 10-bit ADC code */
+	adc_val = ((h & 0x03) << 8) | l;
+
+	ret = adc_to_temp(td_info->sensor->direct, adc_val, trip_temp);
+exit:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static int show_trip_type(struct thermal_zone_device *tzd,
+			int trip, enum thermal_trip_type *trip_type)
+{
+	/* All are passive trip points */
+	*trip_type = THERMAL_TRIP_PASSIVE;
+
+	return 0;
+}
+
+static int show_temp(struct thermal_zone_device *tzd, long *temp)
+{
+	int ret;
+	struct thermal_device_info *td_info = tzd->devdata;
+	int indx = td_info->sensor->index;
+
+	if (!tdata->iio_chan)
+		return -EINVAL;
+
+	mutex_lock(&thrm_update_lock);
+
+	if (!tdata->is_initialized ||
+			time_after(jiffies, tdata->last_updated + HZ)) {
+		ret = iio_read_channel_all_raw(tdata->iio_chan,
+						tdata->cached_vals);
+		if (ret) {
+			dev_err(&tzd->device, "ADC sampling failed:%d\n", ret);
+			goto exit;
+		}
+		tdata->last_updated = jiffies;
+		tdata->is_initialized = true;
+	}
+
+	ret = adc_to_temp(td_info->sensor->direct, tdata->cached_vals[indx],
+								temp);
+	if (ret)
+		goto exit;
+
+	if (td_info->sensor->temp_correlation)
+		ret = td_info->sensor->temp_correlation(td_info->sensor,
+							*temp, temp);
+exit:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+#ifdef CONFIG_DEBUG_THERMAL
+static int read_slope(struct thermal_zone_device *tzd, long *slope)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	*slope = td_info->sensor->slope;
+
+	return 0;
+}
+
+static int update_slope(struct thermal_zone_device *tzd, long slope)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	td_info->sensor->slope = slope;
+
+	return 0;
+}
+
+static int read_intercept(struct thermal_zone_device *tzd, long *intercept)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	*intercept = td_info->sensor->intercept;
+
+	return 0;
+}
+
+static int update_intercept(struct thermal_zone_device *tzd, long intercept)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	td_info->sensor->intercept = intercept;
+
+	return 0;
+}
+#endif
+
+static int enable_tm(void)
+{
+	int ret;
+	uint8_t data;
+
+	mutex_lock(&thrm_update_lock);
+
+	ret = intel_scu_ipc_ioread8(THRMMONCTL, &data);
+	if (ret)
+		goto ipc_fail;
+
+	ret = intel_scu_ipc_iowrite8(THRMMONCTL, data | THERM_EN);
+
+ipc_fail:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static struct thermal_device_info *initialize_sensor(
+				struct intel_mid_thermal_sensor *sensor)
+{
+	struct thermal_device_info *td_info =
+		kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+	if (!td_info)
+		return NULL;
+
+	td_info->sensor = sensor;
+
+	return td_info;
+}
+
+static irqreturn_t thermal_intrpt(int irq, void *dev_data)
+{
+	int ret, sensor, event_type;
+	uint8_t irq_status;
+	unsigned int irq_data;
+	struct thermal_data *tdata = (struct thermal_data *)dev_data;
+
+	if (!tdata)
+		return IRQ_NONE;
+
+	mutex_lock(&thrm_update_lock);
+
+	irq_data = ioread8(tdata->thrm_addr + PMIC_SRAM_THRM_OFFSET);
+
+	ret = intel_scu_ipc_ioread8(STHRMIRQ, &irq_status);
+	if (ret)
+		goto ipc_fail;
+
+	dev_dbg(&tdata->pdev->dev, "STHRMIRQ: %.2x\n", irq_status);
+
+	/*
+	 * -1 for invalid interrupt
+	 * 1 for LOW to HIGH temperature alert
+	 * 0 for HIGH to LOW temperature alert
+	 */
+	event_type = -1;
+
+	/* Check which interrupt occured and for what event */
+	if (irq_data & PMICALRT) {
+		event_type = !!(irq_status & PMICALRT);
+		sensor = PMIC_DIE;
+	} else if (irq_data & SYS2ALRT) {
+		event_type = !!(irq_status & SYS2ALRT);
+		sensor = SYS2;
+	} else if (irq_data & SYS1ALRT) {
+		event_type = !!(irq_status & SYS1ALRT);
+		sensor = SYS1;
+	} else if (irq_data & SYS0ALRT) {
+		event_type = !!(irq_status & SYS0ALRT);
+		sensor = SYS0;
+	} else {
+		dev_err(&tdata->pdev->dev, "Invalid Interrupt\n");
+		ret = IRQ_HANDLED;
+		goto ipc_fail;
+	}
+
+	if (event_type != -1) {
+		dev_info(&tdata->pdev->dev,
+				"%s interrupt for thermal sensor %d\n",
+				event_type ? "HIGH" : "LOW", sensor);
+	}
+
+	/* Notify using UEvent */
+	kobject_uevent(&tdata->pdev->dev.kobj, KOBJ_CHANGE);
+
+	/* Unmask Thermal Interrupt in the mask register */
+	ret = intel_scu_ipc_update_register(MIRQLVL1, 0xFF, THERM_ALRT);
+	if (ret)
+		goto ipc_fail;
+
+	ret = IRQ_HANDLED;
+
+ipc_fail:
+	mutex_unlock(&thrm_update_lock);
+	return ret;
+}
+
+static struct thermal_zone_device_ops tzd_ops = {
+	.get_temp = show_temp,
+	.get_trip_type = show_trip_type,
+	.get_trip_temp = show_trip_temp,
+	.set_trip_temp = store_trip_temp,
+	.get_trip_hyst = show_trip_hyst,
+	.set_trip_hyst = store_trip_hyst,
+#ifdef CONFIG_DEBUG_THERMAL
+	.get_slope = read_slope,
+	.set_slope = update_slope,
+	.get_intercept = read_intercept,
+	.set_intercept = update_intercept,
+#endif
+};
+
+static irqreturn_t mrfl_thermal_intrpt_handler(int irq, void* dev_data)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static int mrfl_thermal_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	struct intel_mid_thermal_platform_data *pdata;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "platform data not found\n");
+		return -EINVAL;
+	}
+
+	tdata = kzalloc(sizeof(struct thermal_data), GFP_KERNEL);
+	if (!tdata) {
+		dev_err(&pdev->dev, "kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	tdata->pdev = pdev;
+	tdata->num_sensors = pdata->num_sensors;
+	tdata->sensors = pdata->sensors;
+	tdata->irq = platform_get_irq(pdev, 0);
+	platform_set_drvdata(pdev, tdata);
+
+	tdata->tzd = kzalloc(
+		(sizeof(struct thermal_zone_device *) * tdata->num_sensors),
+								GFP_KERNEL);
+	if (!tdata->tzd) {
+		dev_err(&pdev->dev, "kzalloc failed\n");
+		ret = -ENOMEM;
+		goto exit_free;
+	}
+
+	/* Program a default _max value for each sensor */
+	ret = program_tmax(&pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Programming _max failed:%d\n", ret);
+		goto exit_tzd;
+	}
+
+	/*
+	 * Register with IIO to sample temperature values
+	 *
+	 * Order of the channels obtained from adc:
+	 * "SYSTHERM0", "SYSTHERM1", "SYSTHERM2", "PMICDIE"
+	 */
+	tdata->iio_chan = iio_channel_get_all(&pdev->dev);
+	if (tdata->iio_chan == NULL) {
+		dev_err(&pdev->dev, "tdata->iio_chan is null\n");
+		ret = -EINVAL;
+		goto exit_tzd;
+	}
+
+	/* Check whether we got all the four channels */
+	ret = iio_channel_get_num(tdata->iio_chan);
+	if (ret != PMIC_THERMAL_SENSORS) {
+		dev_err(&pdev->dev, "incorrect number of channels:%d\n", ret);
+		ret = -EFAULT;
+		goto exit_iio;
+	}
+
+	/* Register each sensor with the generic thermal framework */
+	for (i = 0; i < tdata->num_sensors; i++) {
+		tdata->tzd[i] = thermal_zone_device_register(
+				tdata->sensors[i].name,	1, 1,
+		initialize_sensor(&tdata->sensors[i]), &tzd_ops, NULL, 0, 0);
+
+		if (IS_ERR(tdata->tzd[i])) {
+			ret = PTR_ERR(tdata->tzd[i]);
+			dev_err(&pdev->dev,
+				"registering thermal sensor %s failed: %d\n",
+				tdata->sensors[i].name, ret);
+			goto exit_reg;
+		}
+	}
+
+	tdata->thrm_addr = ioremap_nocache(PMIC_SRAM_BASE_ADDR, IOMAP_SIZE);
+	if (!tdata->thrm_addr) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "ioremap_nocache failed\n");
+		goto exit_reg;
+	}
+
+	/* Register for Interrupt Handler */
+	ret = request_threaded_irq(tdata->irq, mrfl_thermal_intrpt_handler, thermal_intrpt,
+						IRQF_TRIGGER_RISING,
+						DRIVER_NAME, tdata);
+	if (ret) {
+		dev_err(&pdev->dev, "request_threaded_irq failed:%d\n", ret);
+		goto exit_ioremap;
+	}
+
+	/* Enable Thermal Monitoring */
+	ret = enable_tm();
+	if (ret) {
+		dev_err(&pdev->dev, "Enabling TM failed:%d\n", ret);
+		goto exit_irq;
+	}
+
+	return 0;
+
+exit_irq:
+	free_irq(tdata->irq, tdata);
+exit_ioremap:
+	iounmap(tdata->thrm_addr);
+exit_reg:
+	while (--i >= 0)
+		thermal_zone_device_unregister(tdata->tzd[i]);
+exit_iio:
+	iio_channel_release_all(tdata->iio_chan);
+exit_tzd:
+	kfree(tdata->tzd);
+exit_free:
+	kfree(tdata);
+	return ret;
+}
+
+static int mrfl_thermal_resume(struct device *dev)
+{
+	dev_info(dev, "resume called.\n");
+	return 0;
+}
+
+static int mrfl_thermal_suspend(struct device *dev)
+{
+	dev_info(dev, "suspend called.\n");
+	return 0;
+}
+
+static int mrfl_thermal_remove(struct platform_device *pdev)
+{
+	int i;
+	struct thermal_data *tdata = platform_get_drvdata(pdev);
+
+	if (!tdata)
+		return 0;
+
+	for (i = 0; i < tdata->num_sensors; i++)
+		thermal_zone_device_unregister(tdata->tzd[i]);
+
+	free_irq(tdata->irq, tdata);
+	iounmap(tdata->thrm_addr);
+	iio_channel_release_all(tdata->iio_chan);
+	kfree(tdata->tzd);
+	kfree(tdata);
+	return 0;
+}
+
+/*********************************************************************
+ *		Driver initialization and finalization
+ *********************************************************************/
+
+static const struct dev_pm_ops thermal_pm_ops = {
+	.suspend = mrfl_thermal_suspend,
+	.resume = mrfl_thermal_resume,
+};
+
+static struct platform_driver mrfl_thermal_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &thermal_pm_ops,
+		},
+	.probe = mrfl_thermal_probe,
+	.remove = mrfl_thermal_remove,
+};
+
+static int mrfl_thermal_module_init(void)
+{
+	return platform_driver_register(&mrfl_thermal_driver);
+}
+
+static void mrfl_thermal_module_exit(void)
+{
+	platform_driver_unregister(&mrfl_thermal_driver);
+}
+
+/* RPMSG related functionality */
+static int mrfl_thermal_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	if (!rpdev) {
+		pr_err("rpmsg channel not created\n");
+		return -ENODEV;
+	}
+
+	dev_info(&rpdev->dev, "Probed mrfl_thermal rpmsg device\n");
+
+	return mrfl_thermal_module_init();
+}
+
+static void mrfl_thermal_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	mrfl_thermal_module_exit();
+	dev_info(&rpdev->dev, "Removed mrfl_thermal rpmsg device\n");
+}
+
+static void mrfl_thermal_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+			int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+				data, len, true);
+}
+
+static struct rpmsg_device_id mrfl_thermal_id_table[] = {
+	{ .name = "rpmsg_mrfl_thermal" },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(rpmsg, mrfl_thermal_id_table);
+
+static struct rpmsg_driver mrfl_thermal_rpmsg = {
+	.drv.name	= DRIVER_NAME,
+	.drv.owner	= THIS_MODULE,
+	.probe		= mrfl_thermal_rpmsg_probe,
+	.callback	= mrfl_thermal_rpmsg_cb,
+	.remove		= mrfl_thermal_rpmsg_remove,
+	.id_table	= mrfl_thermal_id_table,
+};
+
+static int __init mrfl_thermal_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&mrfl_thermal_rpmsg);
+}
+
+static void __exit mrfl_thermal_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&mrfl_thermal_rpmsg);
+}
+
+module_init(mrfl_thermal_rpmsg_init);
+module_exit(mrfl_thermal_rpmsg_exit);
+
+MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>");
+MODULE_DESCRIPTION("Intel Merrifield Platform Thermal Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index b40b37c..cb604a1 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -675,6 +675,7 @@
 	{ X86_VENDOR_INTEL, 6, 0x2e},
 	{ X86_VENDOR_INTEL, 6, 0x2f},
 	{ X86_VENDOR_INTEL, 6, 0x3a},
+	{ X86_VENDOR_INTEL, 6, 0x4a},
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/intel_soc_thermal.c b/drivers/thermal/intel_soc_thermal.c
new file mode 100644
index 0000000..f30b5371
--- /dev/null
+++ b/drivers/thermal/intel_soc_thermal.c
@@ -0,0 +1,834 @@
+/*
+ * intel_soc_thermal.c - Intel SoC Platform Thermal Driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Author: Shravan B M <shravan.k.b.m@intel.com>
+ *
+ * This driver registers to Thermal framework as SoC zone. It exposes
+ * two SoC DTS temperature with two writeable trip points.
+ */
+
+#define pr_fmt(fmt)  "intel_soc_thermal: " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/thermal.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/msr.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_thermal.h>
+
+#define DRIVER_NAME	"soc_thrm"
+
+/* SOC DTS Registers */
+#define SOC_THERMAL_SENSORS	2
+#define SOC_THERMAL_TRIPS	2
+#define SOC_MAX_STATES		4
+#define DTS_ENABLE_REG		0xB0
+#define DTS_ENABLE		0x03
+#define DTS_TRIP_RW		0x03
+
+#define PUNIT_PORT		0x04
+#define PUNIT_TEMP_REG		0xB1
+#define PUNIT_AUX_REG		0xB2
+
+#define TJMAX_TEMP		90
+#define TJMAX_CODE		0x7F
+
+/* Default hysteresis values in C */
+#define DEFAULT_H2C_HYST	3
+#define MAX_HYST		7
+
+/* Power Limit registers */
+#define PKG_TURBO_POWER_LIMIT	0x610
+#define PKG_TURBO_CFG		0x670
+#define MSR_THERM_CFG1		0x673
+#define CPU_PWR_BUDGET_CTL	0x02
+
+/* PKG_TURBO_PL1 holds PL1 in terms of 32mW */
+#define PL_UNIT_MW		32
+
+/* Magic number symbolising Dynamic Turbo OFF */
+#define DISABLE_DYNAMIC_TURBO	0xB0FF
+
+/* IRQ details */
+#define SOC_DTS_CONTROL		0x80
+#define TRIP_STATUS_RO		0xB3
+#define TRIP_STATUS_RW		0xB4
+/* TE stands for THERMAL_EVENT */
+#define TE_AUX0			0xB5
+#define ENABLE_AUX_INTRPT	0x0F
+#define ENABLE_CPU0		(1 << 16)
+#define RTE_ENABLE		(1 << 9)
+
+static int tjmax_temp;
+
+static DEFINE_MUTEX(thrm_update_lock);
+
+struct platform_soc_data {
+	struct thermal_zone_device *tzd[SOC_THERMAL_SENSORS];
+	struct thermal_cooling_device *soc_cdev; /* PL1 control */
+	int irq;
+};
+
+struct cooling_device_info {
+	struct soc_throttle_data *soc_data;
+	/* Lock protecting the soc_cur_state variable */
+	struct mutex lock_state;
+	unsigned long soc_cur_state;
+};
+
+struct thermal_device_info {
+	int sensor_index;
+	struct mutex lock_aux;
+};
+
+static inline u32 read_soc_reg(unsigned int addr)
+{
+	return intel_mid_msgbus_read32(PUNIT_PORT, addr);
+}
+
+static inline void write_soc_reg(unsigned int addr, u32 val)
+{
+	intel_mid_msgbus_write32(PUNIT_PORT, addr, val);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct dts_regs {
+	char *name;
+	u32 addr;
+} dts_regs[] = {
+	/* Thermal Management Registers */
+	{"PTMC",	0x80},
+	{"TRR0",	0x81},
+	{"TRR1",	0x82},
+	{"TTS",		0x83},
+	{"TELB",	0x84},
+	{"TELT",	0x85},
+	{"GFXT",	0x88},
+	{"VEDT",	0x89},
+	{"VECT",	0x8A},
+	{"VSPT",	0x8B},
+	{"ISPT",	0x8C},
+	{"SWT",		0x8D},
+	/* Trip Event Registers */
+	{"DTSC",	0xB0},
+	{"TRR",		0xB1},
+	{"PTPS",	0xB2},
+	{"PTTS",	0xB3},
+	{"PTTSS",	0xB4},
+	{"TE_AUX0",	0xB5},
+	{"TE_AUX1",	0xB6},
+	{"TE_AUX2",	0xB7},
+	{"TE_AUX3",	0xB8},
+	{"TTE_VRIcc",	0xB9},
+	{"TTE_VRHOT",	0xBA},
+	{"TTE_PROCHOT",	0xBB},
+	{"TTE_SLM0",	0xBC},
+	{"TTE_SLM1",	0xBD},
+	{"BWTE",	0xBE},
+	{"TTE_SWT",	0xBF},
+	/* MSI Message Registers */
+	{"TMA",		0xC0},
+	{"TMD",		0xC1},
+};
+
+/* /sys/kernel/debug/soc_thermal/soc_dts */
+static struct dentry *soc_dts_dent;
+static struct dentry *soc_thermal_dir;
+
+static int soc_dts_debugfs_show(struct seq_file *s, void *unused)
+{
+	int i;
+	u32 val;
+
+	for (i = 0; i < ARRAY_SIZE(dts_regs); i++) {
+		val = read_soc_reg(dts_regs[i].addr);
+		seq_printf(s,
+			"%s[0x%X]	Val: 0x%X\n",
+			dts_regs[i].name, dts_regs[i].addr, val);
+	}
+	return 0;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, soc_dts_debugfs_show, NULL);
+}
+
+static const struct file_operations soc_dts_debugfs_fops = {
+	.open           = debugfs_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static void create_soc_dts_debugfs(void)
+{
+	int err;
+
+	/* /sys/kernel/debug/soc_thermal/ */
+	soc_thermal_dir = debugfs_create_dir("soc_thermal", NULL);
+	if (IS_ERR(soc_thermal_dir)) {
+		err = PTR_ERR(soc_thermal_dir);
+		pr_err("debugfs_create_dir failed:%d\n", err);
+		return;
+	}
+
+	/* /sys/kernel/debug/soc_thermal/soc_dts */
+	soc_dts_dent = debugfs_create_file("soc_dts", S_IFREG | S_IRUGO,
+					soc_thermal_dir, NULL,
+					&soc_dts_debugfs_fops);
+	if (IS_ERR(soc_dts_dent)) {
+		err = PTR_ERR(soc_dts_dent);
+		debugfs_remove_recursive(soc_thermal_dir);
+		pr_err("debugfs_create_file failed:%d\n", err);
+	}
+}
+
+static void remove_soc_dts_debugfs(void)
+{
+	debugfs_remove_recursive(soc_thermal_dir);
+}
+#else
+static inline void create_soc_dts_debugfs(void) { }
+static inline void remove_soc_dts_debugfs(void) { }
+#endif
+
+static
+struct cooling_device_info *initialize_cdev(struct platform_device *pdev)
+{
+	struct cooling_device_info *cdev_info =
+		kzalloc(sizeof(struct cooling_device_info), GFP_KERNEL);
+	if (!cdev_info)
+		return NULL;
+
+	cdev_info->soc_data = pdev->dev.platform_data;
+	mutex_init(&cdev_info->lock_state);
+	return cdev_info;
+}
+
+static struct thermal_device_info *initialize_sensor(int index)
+{
+	struct thermal_device_info *td_info =
+		kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+	if (!td_info)
+		return NULL;
+	td_info->sensor_index = index;
+	mutex_init(&td_info->lock_aux);
+
+	return td_info;
+}
+
+static void enable_soc_dts(void)
+{
+	int i;
+	u32 val, eax, edx;
+
+	rdmsr_on_cpu(0, MSR_THERM_CFG1, &eax, &edx);
+
+	/* B[8:10] H2C Hyst */
+	eax = (eax & ~(0x7 << 8)) | (DEFAULT_H2C_HYST << 8);
+
+	/* Set the Hysteresis value */
+	wrmsr_on_cpu(0, MSR_THERM_CFG1, eax, edx);
+
+	/* Enable the DTS */
+	write_soc_reg(DTS_ENABLE_REG, DTS_ENABLE);
+
+	val = read_soc_reg(SOC_DTS_CONTROL);
+	write_soc_reg(SOC_DTS_CONTROL, val | ENABLE_AUX_INTRPT | ENABLE_CPU0);
+
+	/* Enable Interrupts for all the AUX trips for the DTS */
+	for (i = 0; i < SOC_THERMAL_TRIPS; i++) {
+		val = read_soc_reg(TE_AUX0 + i);
+		write_soc_reg(TE_AUX0 + i, (val | RTE_ENABLE));
+	}
+}
+
+static int show_trip_hyst(struct thermal_zone_device *tzd,
+				int trip, long *hyst)
+{
+	u32 eax, edx;
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	/* Hysteresis is only supported for trip point 0 */
+	if (trip != 0) {
+		*hyst = 0;
+		return 0;
+	}
+
+	mutex_lock(&td_info->lock_aux);
+
+	rdmsr_on_cpu(0, MSR_THERM_CFG1, &eax, &edx);
+
+	/* B[8:10] H2C Hyst, for trip 0. Report hysteresis in mC */
+	*hyst = ((eax >> 8) & 0x7) * 1000;
+
+	mutex_unlock(&td_info->lock_aux);
+	return 0;
+}
+
+static int store_trip_hyst(struct thermal_zone_device *tzd,
+				int trip, long hyst)
+{
+	u32 eax, edx;
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	/* Convert from mC to C */
+	hyst /= 1000;
+
+	if (trip != 0 || hyst < 0 || hyst > MAX_HYST)
+		return -EINVAL;
+
+	mutex_lock(&td_info->lock_aux);
+
+	rdmsr_on_cpu(0, MSR_THERM_CFG1, &eax, &edx);
+
+	/* B[8:10] H2C Hyst */
+	eax = (eax & ~(0x7 << 8)) | (hyst << 8);
+
+	wrmsr_on_cpu(0, MSR_THERM_CFG1, eax, edx);
+
+	mutex_unlock(&td_info->lock_aux);
+	return 0;
+}
+
+static int show_temp(struct thermal_zone_device *tzd, long *temp)
+{
+	struct thermal_device_info *td_info = tzd->devdata;
+	u32 val = read_soc_reg(PUNIT_TEMP_REG);
+
+	/* Extract bits[0:7] or [8:15] using sensor_index */
+	*temp =  (val >> (8 * td_info->sensor_index)) & 0xFF;
+
+	if (*temp == 0)
+		return 0;
+
+	/* Calibrate the temperature */
+	*temp = TJMAX_CODE - *temp + tjmax_temp;
+
+	/* Convert to mC */
+	*temp *= 1000;
+
+	return 0;
+}
+
+static int show_trip_type(struct thermal_zone_device *tzd,
+			int trip, enum thermal_trip_type *trip_type)
+{
+	/* All are passive trip points */
+	*trip_type = THERMAL_TRIP_PASSIVE;
+
+	return 0;
+}
+
+static int show_trip_temp(struct thermal_zone_device *tzd,
+				int trip, long *trip_temp)
+{
+	u32 aux_value = read_soc_reg(PUNIT_AUX_REG);
+
+	/* aux0 b[0:7], aux1 b[8:15], aux2 b[16:23], aux3 b[24:31] */
+	*trip_temp = (aux_value >> (8 * trip)) & 0xFF;
+
+	/* Calibrate the trip point temperature */
+	*trip_temp = tjmax_temp - *trip_temp;
+
+	/* Convert to mC and report */
+	*trip_temp *= 1000;
+
+	return 0;
+}
+
+static int store_trip_temp(struct thermal_zone_device *tzd,
+				int trip, long trip_temp)
+{
+	u32 aux_trip, aux = 0;
+	struct thermal_device_info *td_info = tzd->devdata;
+
+	/* Convert from mC to C */
+	trip_temp /= 1000;
+
+	/* The trip temp is 8 bits wide (unsigned) */
+	if (trip_temp > 255)
+		return -EINVAL;
+
+	/* Assign last byte to unsigned 32 */
+	aux_trip = trip_temp & 0xFF;
+
+	/* Calibrate w.r.t TJMAX_TEMP */
+	aux_trip = tjmax_temp - aux_trip;
+
+	mutex_lock(&td_info->lock_aux);
+	aux = read_soc_reg(PUNIT_AUX_REG);
+	switch (trip) {
+	case 0:
+		/* aux0 bits 0:7 */
+		aux = (aux & 0xFFFFFF00) | (aux_trip << (8 * trip));
+		break;
+	case 1:
+		/* aux1 bits 8:15 */
+		aux = (aux & 0xFFFF00FF) | (aux_trip << (8 * trip));
+		break;
+	}
+	write_soc_reg(PUNIT_AUX_REG, aux);
+
+	mutex_unlock(&td_info->lock_aux);
+
+	return 0;
+}
+
+/* SoC cooling device callbacks */
+static int soc_get_max_state(struct thermal_cooling_device *cdev,
+				unsigned long *state)
+{
+	/* SoC has 4 levels of throttling from 0 to 3 */
+	*state = SOC_MAX_STATES - 1;
+	return 0;
+}
+
+static int soc_get_cur_state(struct thermal_cooling_device *cdev,
+				unsigned long *state)
+{
+	struct cooling_device_info *cdev_info =
+			(struct cooling_device_info *)cdev->devdata;
+
+	mutex_lock(&cdev_info->lock_state);
+	*state = cdev_info->soc_cur_state;
+	mutex_unlock(&cdev_info->lock_state);
+
+	return 0;
+}
+
+static void set_floor_freq(int val)
+{
+	u32 eax;
+
+	eax = read_soc_reg(CPU_PWR_BUDGET_CTL);
+
+	/* Set bits[8:14] of eax to val */
+	eax = (eax & ~(0x7F << 8)) | (val << 8);
+
+	write_soc_reg(CPU_PWR_BUDGET_CTL, eax);
+}
+
+static int disable_dynamic_turbo(struct cooling_device_info *cdev_info)
+{
+	u32 eax, edx;
+
+	mutex_lock(&cdev_info->lock_state);
+
+	rdmsr_on_cpu(0, PKG_TURBO_CFG, &eax, &edx);
+
+	/* Set bits[0:2] to 0 to enable TjMax Turbo mode */
+	eax = eax & ~0x07;
+
+	/* Set bit[8] to 0 to disable Dynamic Turbo */
+	eax = eax & ~(1 << 8);
+
+	/* Set bits[9:11] to 0 disable Dynamic Turbo Policy */
+	eax = eax & ~(0x07 << 9);
+
+	wrmsr_on_cpu(0, PKG_TURBO_CFG, eax, edx);
+
+	/*
+	 * Now that we disabled Dynamic Turbo, we can
+	 * make the floor frequency ratio also 0.
+	 */
+	set_floor_freq(0);
+
+	cdev_info->soc_cur_state = DISABLE_DYNAMIC_TURBO;
+
+	mutex_unlock(&cdev_info->lock_state);
+	return 0;
+}
+
+static int soc_set_cur_state(struct thermal_cooling_device *cdev,
+				unsigned long state)
+{
+	u32 eax, edx;
+	struct soc_throttle_data *data;
+	struct cooling_device_info *cdev_info =
+			(struct cooling_device_info *)cdev->devdata;
+
+	if (state == DISABLE_DYNAMIC_TURBO)
+		return disable_dynamic_turbo(cdev_info);
+
+	if (state >= SOC_MAX_STATES) {
+		pr_err("Invalid SoC throttle state:%ld\n", state);
+		return -EINVAL;
+	}
+
+	mutex_lock(&cdev_info->lock_state);
+
+	data = &cdev_info->soc_data[state];
+
+	rdmsr_on_cpu(0, PKG_TURBO_POWER_LIMIT, &eax, &edx);
+
+	/* Set bits[0:14] of eax to 'data->power_limit' */
+	eax = (eax & ~0x7FFF) | data->power_limit;
+
+	wrmsr_on_cpu(0, PKG_TURBO_POWER_LIMIT, eax, edx);
+
+	set_floor_freq(data->floor_freq);
+
+	cdev_info->soc_cur_state = state;
+
+	mutex_unlock(&cdev_info->lock_state);
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_THERMAL
+static int soc_get_force_state_override(struct thermal_cooling_device *cdev,
+					char *buf)
+{
+	int i;
+	int pl1_vals_mw[SOC_MAX_STATES];
+	struct cooling_device_info *cdev_info =
+			(struct cooling_device_info *)cdev->devdata;
+
+	mutex_lock(&cdev_info->lock_state);
+
+	/* PKG_TURBO_PL1 holds PL1 in terms of 32mW. So, multiply by 32 */
+	for (i = 0; i < SOC_MAX_STATES; i++) {
+		pl1_vals_mw[i] =
+			cdev_info->soc_data[i].power_limit * PL_UNIT_MW;
+	}
+
+	mutex_unlock(&cdev_info->lock_state);
+
+	return sprintf(buf, "%d %d %d %d\n", pl1_vals_mw[0], pl1_vals_mw[1],
+					pl1_vals_mw[2], pl1_vals_mw[3]);
+}
+
+static int soc_set_force_state_override(struct thermal_cooling_device *cdev,
+					char *buf)
+{
+	int i, ret;
+	int pl1_vals_mw[SOC_MAX_STATES];
+	unsigned long cur_state;
+	struct cooling_device_info *cdev_info =
+				(struct cooling_device_info *)cdev->devdata;
+
+	/*
+	 * The four space separated values entered via the sysfs node
+	 * override the default values configured through platform data.
+	 */
+	ret = sscanf(buf, "%d %d %d %d", &pl1_vals_mw[0], &pl1_vals_mw[1],
+					&pl1_vals_mw[2], &pl1_vals_mw[3]);
+	if (ret != SOC_MAX_STATES) {
+		pr_err("Invalid values in soc_set_force_state_override\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&cdev_info->lock_state);
+
+	/* PKG_TURBO_PL1 takes PL1 in terms of 32mW. So, divide by 32 */
+	for (i = 0; i < SOC_MAX_STATES; i++) {
+		cdev_info->soc_data[i].power_limit =
+					pl1_vals_mw[i] / PL_UNIT_MW;
+	}
+
+	/* Update the cur_state value of this cooling device */
+	cur_state = cdev_info->soc_cur_state;
+
+	mutex_unlock(&cdev_info->lock_state);
+
+	return soc_set_cur_state(cdev, cur_state);
+}
+#endif
+
+static void notify_thermal_event(struct thermal_zone_device *tzd,
+				long temp, int event, int level)
+{
+	char *thermal_event[5];
+
+	pr_info("Thermal Event: sensor: %s, cur_temp: %ld, event: %d, level: %d\n",
+				tzd->type, temp, event, level);
+
+	thermal_event[0] = kasprintf(GFP_KERNEL, "NAME=%s", tzd->type);
+	thermal_event[1] = kasprintf(GFP_KERNEL, "TEMP=%ld", temp);
+	thermal_event[2] = kasprintf(GFP_KERNEL, "EVENT=%d", event);
+	thermal_event[3] = kasprintf(GFP_KERNEL, "LEVEL=%d", level);
+	thermal_event[4] = NULL;
+
+	kobject_uevent_env(&tzd->device.kobj, KOBJ_CHANGE, thermal_event);
+
+	kfree(thermal_event[3]);
+	kfree(thermal_event[2]);
+	kfree(thermal_event[1]);
+	kfree(thermal_event[0]);
+
+	return;
+}
+
+static int get_max_temp(struct platform_soc_data *pdata, long *cur_temp)
+{
+	int i, ret;
+	long temp;
+
+	/*
+	 * The SoC has two or more DTS placed, to determine the
+	 * temperature of the SoC. The hardware actions are taken
+	 * using T(DTS) which is MAX(T(DTS0), T(DTS1), ... T(DTSn))
+	 *
+	 * Do not report error, as long as we can read at least
+	 * one DTS correctly.
+	 */
+	ret = show_temp(pdata->tzd[0], cur_temp);
+	if (ret)
+		return ret;
+
+	for (i = 1; i < SOC_THERMAL_SENSORS; i++) {
+		ret = show_temp(pdata->tzd[i], &temp);
+		if (ret)
+			goto fail_safe;
+
+		if (temp > *cur_temp)
+			*cur_temp = temp;
+	}
+
+fail_safe:
+	/*
+	 * We have one valid DTS temperature; Use that,
+	 * instead of reporting error.
+	 */
+	return 0;
+}
+
+static irqreturn_t soc_dts_intrpt(int irq, void *dev_data)
+{
+	u32 irq_sts, cur_sts;
+	int i, ret, event, level = -1;
+	long cur_temp;
+	struct thermal_zone_device *tzd;
+	struct platform_soc_data *pdata = (struct platform_soc_data *)dev_data;
+
+	if (!pdata || !pdata->tzd[0])
+		return IRQ_NONE;
+
+	mutex_lock(&thrm_update_lock);
+
+	tzd = pdata->tzd[0];
+
+	irq_sts = read_soc_reg(TRIP_STATUS_RW);
+	cur_sts = read_soc_reg(TRIP_STATUS_RO);
+
+	for (i = 0; i < SOC_THERMAL_TRIPS; i++) {
+		if (irq_sts & (1 << i)) {
+			level = i;
+			event = !!(cur_sts & (1 << i));
+			/* Clear the status bit by writing 1 */
+			irq_sts |= (1 << i);
+			break;
+		}
+	}
+
+	/* level == -1, indicates an invalid event */
+	if (level == -1) {
+		dev_err(&tzd->device, "Invalid event from SoC DTS\n");
+		goto exit;
+	}
+
+	ret = get_max_temp(pdata, &cur_temp);
+	if (ret) {
+		dev_err(&tzd->device, "Cannot read SoC DTS temperature\n");
+		goto exit;
+	}
+
+	/* Notify using UEvent */
+	notify_thermal_event(tzd, cur_temp, event, level);
+
+	/* Clear the status bits */
+	write_soc_reg(TRIP_STATUS_RW, irq_sts);
+
+exit:
+	mutex_unlock(&thrm_update_lock);
+	return IRQ_HANDLED;
+}
+
+static struct thermal_zone_device_ops tzd_ops = {
+	.get_temp = show_temp,
+	.get_trip_type = show_trip_type,
+	.get_trip_temp = show_trip_temp,
+	.set_trip_temp = store_trip_temp,
+	.get_trip_hyst = show_trip_hyst,
+	.set_trip_hyst = store_trip_hyst,
+};
+
+static struct thermal_cooling_device_ops soc_cooling_ops = {
+	.get_max_state = soc_get_max_state,
+	.get_cur_state = soc_get_cur_state,
+	.set_cur_state = soc_set_cur_state,
+#ifdef CONFIG_DEBUG_THERMAL
+	.get_force_state_override = soc_get_force_state_override,
+	.set_force_state_override = soc_set_force_state_override,
+#endif
+};
+
+/*********************************************************************
+ *		Driver initialization and finalization
+ *********************************************************************/
+
+static irqreturn_t soc_dts_intrpt_handler(int irq, void *dev_data)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+static int soc_thermal_probe(struct platform_device *pdev)
+{
+	struct platform_soc_data *pdata;
+	int i, ret;
+	u32 eax, edx;
+	static char *name[SOC_THERMAL_SENSORS] = {"SoC_DTS0", "SoC_DTS1"};
+
+	pdata = kzalloc(sizeof(struct platform_soc_data), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	ret = rdmsr_safe_on_cpu(0, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+	if (ret) {
+		tjmax_temp = TJMAX_TEMP;
+		dev_err(&pdev->dev, "TjMax read from MSR %x failed, error:%d\n",
+				MSR_IA32_TEMPERATURE_TARGET, ret);
+	} else {
+		tjmax_temp = (eax >> 16) & 0xff;
+		dev_dbg(&pdev->dev, "TjMax is %d degrees C\n", tjmax_temp);
+	}
+
+	/* Register each sensor with the generic thermal framework */
+	for (i = 0; i < SOC_THERMAL_SENSORS; i++) {
+		pdata->tzd[i] = thermal_zone_device_register(name[i],
+					SOC_THERMAL_TRIPS, DTS_TRIP_RW,
+					initialize_sensor(i),
+					&tzd_ops, NULL, 0, 0);
+		if (IS_ERR(pdata->tzd[i])) {
+			ret = PTR_ERR(pdata->tzd[i]);
+			dev_err(&pdev->dev, "tzd register failed: %d\n", ret);
+			goto exit_reg;
+		}
+	}
+
+	/* Register a cooling device for PL1 (power limit) control */
+	pdata->soc_cdev = thermal_cooling_device_register("SoC",
+						initialize_cdev(pdev),
+						&soc_cooling_ops);
+	if (IS_ERR(pdata->soc_cdev)) {
+		ret = PTR_ERR(pdata->soc_cdev);
+		pdata->soc_cdev = NULL;
+		goto exit_reg;
+	}
+
+	platform_set_drvdata(pdev, pdata);
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "platform_get_irq failed:%d\n", ret);
+		goto exit_cdev;
+	}
+
+	pdata->irq = ret;
+
+	/* Register for Interrupt Handler */
+	ret = request_threaded_irq(pdata->irq, soc_dts_intrpt_handler,
+						soc_dts_intrpt,
+						IRQF_TRIGGER_RISING,
+						DRIVER_NAME, pdata);
+	if (ret) {
+		dev_err(&pdev->dev, "request_threaded_irq failed:%d\n", ret);
+		goto exit_cdev;
+	}
+
+	/* Enable DTS0 and DTS1 */
+	enable_soc_dts();
+
+	create_soc_dts_debugfs();
+
+	return 0;
+
+exit_cdev:
+	thermal_cooling_device_unregister(pdata->soc_cdev);
+exit_reg:
+	while (--i >= 0) {
+		struct thermal_device_info *td_info = pdata->tzd[i]->devdata;
+		kfree(td_info);
+		thermal_zone_device_unregister(pdata->tzd[i]);
+	}
+	platform_set_drvdata(pdev, NULL);
+	kfree(pdata);
+	return ret;
+}
+
+static int soc_thermal_remove(struct platform_device *pdev)
+{
+	int i;
+	struct platform_soc_data *pdata = platform_get_drvdata(pdev);
+
+	/* Unregister each sensor with the generic thermal framework */
+	for (i = 0; i < SOC_THERMAL_SENSORS; i++) {
+		struct thermal_device_info *td_info = pdata->tzd[i]->devdata;
+		kfree(td_info);
+		thermal_zone_device_unregister(pdata->tzd[i]);
+	}
+	thermal_cooling_device_unregister(pdata->soc_cdev);
+	platform_set_drvdata(pdev, NULL);
+	free_irq(pdata->irq, pdata);
+	kfree(pdata);
+
+	remove_soc_dts_debugfs();
+
+	return 0;
+}
+
+static const struct platform_device_id therm_id_table[] = {
+	{ DRIVER_NAME, 1},
+};
+
+static struct platform_driver soc_thermal_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DRIVER_NAME,
+	},
+	.probe = soc_thermal_probe,
+	.remove = soc_thermal_remove,
+	.id_table = therm_id_table,
+};
+
+static int __init soc_thermal_module_init(void)
+{
+	return platform_driver_register(&soc_thermal_driver);
+}
+
+static void __exit soc_thermal_module_exit(void)
+{
+	platform_driver_unregister(&soc_thermal_driver);
+}
+
+module_init(soc_thermal_module_init);
+module_exit(soc_thermal_module_exit);
+
+MODULE_AUTHOR("Shravan B M <shravan.k.b.m@intel.com>");
+MODULE_DESCRIPTION("Intel SoC Thermal Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index f9bf597..eb617bd 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -563,7 +563,7 @@
 {
 	struct thermal_zone_device *tz = to_thermal_zone(dev);
 	int trip, ret;
-	unsigned long temperature;
+	long temperature;
 
 	if (!tz->ops->set_trip_temp)
 		return -EPERM;
@@ -707,6 +707,82 @@
 }
 
 static ssize_t
+slope_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	int ret;
+	long slope;
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+	if (!tz->ops->set_slope)
+		return -EPERM;
+
+	if (kstrtol(buf, 10, &slope))
+		return -EINVAL;
+
+	ret = tz->ops->set_slope(tz, slope);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t
+slope_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret;
+	long slope;
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+	if (!tz->ops->get_slope)
+		return -EINVAL;
+
+	ret = tz->ops->get_slope(tz, &slope);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%ld\n", slope);
+}
+
+static ssize_t
+intercept_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	int ret;
+	long intercept;
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+	if (!tz->ops->set_intercept)
+		return -EPERM;
+
+	if (kstrtol(buf, 10, &intercept))
+		return -EINVAL;
+
+	ret = tz->ops->set_intercept(tz, intercept);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t
+intercept_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int ret;
+	long intercept;
+	struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+	if (!tz->ops->get_intercept)
+		return -EINVAL;
+
+	ret = tz->ops->get_intercept(tz, &intercept);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%ld\n", intercept);
+}
+
+static ssize_t
 policy_store(struct device *dev, struct device_attribute *attr,
 		    const char *buf, size_t count)
 {
@@ -765,6 +841,9 @@
 static DEVICE_ATTR(temp, 0444, temp_show, NULL);
 static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
 static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
+static DEVICE_ATTR(slope, S_IRUGO | S_IWUSR, slope_show, slope_store);
+static DEVICE_ATTR(intercept,
+		S_IRUGO | S_IWUSR, intercept_show, intercept_store);
 static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
 
 /* sys I/F for cooling device */
@@ -794,6 +873,43 @@
 	return sprintf(buf, "%ld\n", state);
 }
 
+/*
+ * Sysfs to read the mapped values and to override
+ * the default values mapped to each state during runtime.
+ */
+static ssize_t
+thermal_cooling_device_force_state_override_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+	return cdev->ops->get_force_state_override(cdev, buf);
+}
+
+static ssize_t
+thermal_cooling_device_force_state_override_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int ret;
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+	ret = cdev->ops->set_force_state_override(cdev, (char *) buf);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t
+thermal_cooling_device_available_states_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+	return cdev->ops->get_available_states(cdev, buf);
+}
+
 static ssize_t
 thermal_cooling_device_cur_state_show(struct device *dev,
 				      struct device_attribute *attr, char *buf)
@@ -836,6 +952,11 @@
 static DEVICE_ATTR(cur_state, 0644,
 		   thermal_cooling_device_cur_state_show,
 		   thermal_cooling_device_cur_state_store);
+static DEVICE_ATTR(force_state_override, 0644,
+		thermal_cooling_device_force_state_override_show,
+		thermal_cooling_device_force_state_override_store);
+static DEVICE_ATTR(available_states, 0444,
+		thermal_cooling_device_available_states_show, NULL);
 
 static ssize_t
 thermal_cooling_device_trip_point_show(struct device *dev,
@@ -1017,7 +1138,7 @@
 		goto free_temp_mem;
 
 	if (tz->ops->get_crit_temp) {
-		unsigned long temperature;
+		long temperature;
 		if (!tz->ops->get_crit_temp(tz, &temperature)) {
 			snprintf(temp->temp_crit.name,
 				 sizeof(temp->temp_crit.name),
@@ -1347,13 +1468,26 @@
 
 	result = device_create_file(&cdev->device, &dev_attr_max_state);
 	if (result)
-		goto unregister;
+		goto remove_type;
 
 	result = device_create_file(&cdev->device, &dev_attr_cur_state);
 	if (result)
-		goto unregister;
+		goto remove_max_state;
+
+	if (ops->get_force_state_override) {
+		result = device_create_file(&cdev->device,
+					&dev_attr_force_state_override);
+		if (result)
+			goto remove_cur_state;
+	}
 
 	/* Add 'this' new cdev to the global cdev list */
+	if (ops->get_available_states) {
+		result = device_create_file(&cdev->device,
+						&dev_attr_available_states);
+		if (result)
+			goto remove_force_override;
+	}
 	mutex_lock(&thermal_list_lock);
 	list_add(&cdev->node, &thermal_cdev_list);
 	mutex_unlock(&thermal_list_lock);
@@ -1363,6 +1497,16 @@
 
 	return cdev;
 
+remove_force_override:
+	if (cdev->ops->get_force_state_override)
+		device_remove_file(&cdev->device,
+				&dev_attr_force_state_override);
+remove_cur_state:
+	device_remove_file(&cdev->device, &dev_attr_cur_state);
+remove_max_state:
+	device_remove_file(&cdev->device, &dev_attr_max_state);
+remove_type:
+	device_remove_file(&cdev->device, &dev_attr_cdev_type);
 unregister:
 	release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
 	device_unregister(&cdev->device);
@@ -1423,7 +1567,12 @@
 		device_remove_file(&cdev->device, &dev_attr_cdev_type);
 	device_remove_file(&cdev->device, &dev_attr_max_state);
 	device_remove_file(&cdev->device, &dev_attr_cur_state);
-
+	if (cdev->ops->get_force_state_override)
+		device_remove_file(&cdev->device,
+					&dev_attr_force_state_override);
+	if (cdev->ops->get_available_states)
+		device_remove_file(&cdev->device,
+					&dev_attr_available_states);
 	release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
 	device_unregister(&cdev->device);
 	return;
@@ -1690,6 +1839,19 @@
 			goto unregister;
 	}
 
+	/* Create Sysfs for slope/intercept values */
+	if (tz->ops->get_slope) {
+		result = device_create_file(&tz->device, &dev_attr_slope);
+		if (result)
+			goto unregister;
+	}
+
+	if (tz->ops->get_intercept) {
+		result = device_create_file(&tz->device, &dev_attr_intercept);
+		if (result)
+			goto unregister;
+	}
+
 #ifdef CONFIG_THERMAL_EMULATION
 	result = device_create_file(&tz->device, &dev_attr_emul_temp);
 	if (result)
@@ -1789,6 +1951,11 @@
 	device_remove_file(&tz->device, &dev_attr_temp);
 	if (tz->ops->get_mode)
 		device_remove_file(&tz->device, &dev_attr_mode);
+	if (tz->ops->get_slope)
+		device_remove_file(&tz->device, &dev_attr_slope);
+	if (tz->ops->get_intercept)
+		device_remove_file(&tz->device, &dev_attr_intercept);
+
 	device_remove_file(&tz->device, &dev_attr_policy);
 	remove_trip_attrs(tz);
 	tz->governor = NULL;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 7e7006f..d30915d 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -455,14 +455,14 @@
 	  driver.
 
 config SERIAL_MFD_HSU
-	tristate "Medfield High Speed UART support"
-	depends on PCI
-	select SERIAL_CORE
+        tristate "Medfield High Speed UART support"
+        depends on PCI
+        select SERIAL_CORE
 
 config SERIAL_MFD_HSU_CONSOLE
-	boolean "Medfile HSU serial console support"
-	depends on SERIAL_MFD_HSU=y
-	select SERIAL_CORE_CONSOLE
+        boolean "Medfield HSU serial console support"
+        depends on SERIAL_MFD_HSU=y
+        select SERIAL_CORE_CONSOLE
 
 config SERIAL_BFIN
 	tristate "Blackfin serial port support"
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index eedfec4..948eb05 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -2,6 +2,8 @@
 # Makefile for the kernel serial device drivers.
 #
 
+ccflags-y += -I$(src)                   # needed for trace events
+
 obj-$(CONFIG_SERIAL_CORE) += serial_core.o
 obj-$(CONFIG_SERIAL_21285) += 21285.o
 
@@ -72,7 +74,7 @@
 obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
 obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
 obj-$(CONFIG_SERIAL_MRST_MAX3110)	+= mrst_max3110.o
-obj-$(CONFIG_SERIAL_MFD_HSU)	+= mfd.o
+obj-$(CONFIG_SERIAL_MFD_HSU)	+= mfd_core.o mfd_dma.o mfd_pci.o mfd_plat.o
 obj-$(CONFIG_SERIAL_IFX6X60)  	+= ifx6x60.o
 obj-$(CONFIG_SERIAL_PCH_UART)	+= pch_uart.o
 obj-$(CONFIG_SERIAL_MSM_SMD)	+= msm_smd_tty.o
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
deleted file mode 100644
index 5f4765a..0000000
--- a/drivers/tty/serial/mfd.c
+++ /dev/null
@@ -1,1502 +0,0 @@
-/*
- * mfd.c: driver for High Speed UART device of Intel Medfield platform
- *
- * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
- *
- * (C) Copyright 2010 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-/* Notes:
- * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
- *    2/3 chan to port 1, 4/5 chan to port 3. Even number chans
- *    are used for RX, odd chans for TX
- *
- * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
- *    asserted, only when the HW is reset the DDCD and DDSR will
- *    be triggered
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/slab.h>
-#include <linux/serial_reg.h>
-#include <linux/circ_buf.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial_mfd.h>
-#include <linux/dma-mapping.h>
-#include <linux/pci.h>
-#include <linux/nmi.h>
-#include <linux/io.h>
-#include <linux/debugfs.h>
-#include <linux/pm_runtime.h>
-
-#define HSU_DMA_BUF_SIZE	2048
-
-#define chan_readl(chan, offset)	readl(chan->reg + offset)
-#define chan_writel(chan, offset, val)	writel(val, chan->reg + offset)
-
-#define mfd_readl(obj, offset)		readl(obj->reg + offset)
-#define mfd_writel(obj, offset, val)	writel(val, obj->reg + offset)
-
-static int hsu_dma_enable;
-module_param(hsu_dma_enable, int, 0);
-MODULE_PARM_DESC(hsu_dma_enable,
-		 "It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode.");
-
-struct hsu_dma_buffer {
-	u8		*buf;
-	dma_addr_t	dma_addr;
-	u32		dma_size;
-	u32		ofs;
-};
-
-struct hsu_dma_chan {
-	u32	id;
-	enum dma_data_direction	dirt;
-	struct uart_hsu_port	*uport;
-	void __iomem		*reg;
-};
-
-struct uart_hsu_port {
-	struct uart_port        port;
-	unsigned char           ier;
-	unsigned char           lcr;
-	unsigned char           mcr;
-	unsigned int            lsr_break_flag;
-	char			name[12];
-	int			index;
-	struct device		*dev;
-
-	struct hsu_dma_chan	*txc;
-	struct hsu_dma_chan	*rxc;
-	struct hsu_dma_buffer	txbuf;
-	struct hsu_dma_buffer	rxbuf;
-	int			use_dma;	/* flag for DMA/PIO */
-	int			running;
-	int			dma_tx_on;
-};
-
-/* Top level data structure of HSU */
-struct hsu_port {
-	void __iomem	*reg;
-	unsigned long	paddr;
-	unsigned long	iolen;
-	u32		irq;
-
-	struct uart_hsu_port	port[3];
-	struct hsu_dma_chan	chans[10];
-
-	struct dentry *debugfs;
-};
-
-static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
-{
-	unsigned int val;
-
-	if (offset > UART_MSR) {
-		offset <<= 2;
-		val = readl(up->port.membase + offset);
-	} else
-		val = (unsigned int)readb(up->port.membase + offset);
-
-	return val;
-}
-
-static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
-{
-	if (offset > UART_MSR) {
-		offset <<= 2;
-		writel(value, up->port.membase + offset);
-	} else {
-		unsigned char val = value & 0xff;
-		writeb(val, up->port.membase + offset);
-	}
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#define HSU_REGS_BUFSIZE	1024
-
-
-static ssize_t port_show_regs(struct file *file, char __user *user_buf,
-				size_t count, loff_t *ppos)
-{
-	struct uart_hsu_port *up = file->private_data;
-	char *buf;
-	u32 len = 0;
-	ssize_t ret;
-
-	buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
-	if (!buf)
-		return 0;
-
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"MFD HSU port[%d] regs:\n", up->index);
-
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"=================================\n");
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"IER: \t\t0x%08x\n", serial_in(up, UART_IER));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"PS: \t\t0x%08x\n", serial_in(up, UART_PS));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
-
-	if (len > HSU_REGS_BUFSIZE)
-		len = HSU_REGS_BUFSIZE;
-
-	ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
-				size_t count, loff_t *ppos)
-{
-	struct hsu_dma_chan *chan = file->private_data;
-	char *buf;
-	u32 len = 0;
-	ssize_t ret;
-
-	buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
-	if (!buf)
-		return 0;
-
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"MFD HSU DMA channel [%d] regs:\n", chan->id);
-
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"=================================\n");
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
-	len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
-			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
-
-	if (len > HSU_REGS_BUFSIZE)
-		len = HSU_REGS_BUFSIZE;
-
-	ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);
-	kfree(buf);
-	return ret;
-}
-
-static const struct file_operations port_regs_ops = {
-	.owner		= THIS_MODULE,
-	.open		= simple_open,
-	.read		= port_show_regs,
-	.llseek		= default_llseek,
-};
-
-static const struct file_operations dma_regs_ops = {
-	.owner		= THIS_MODULE,
-	.open		= simple_open,
-	.read		= dma_show_regs,
-	.llseek		= default_llseek,
-};
-
-static int hsu_debugfs_init(struct hsu_port *hsu)
-{
-	int i;
-	char name[32];
-
-	hsu->debugfs = debugfs_create_dir("hsu", NULL);
-	if (!hsu->debugfs)
-		return -ENOMEM;
-
-	for (i = 0; i < 3; i++) {
-		snprintf(name, sizeof(name), "port_%d_regs", i);
-		debugfs_create_file(name, S_IFREG | S_IRUGO,
-			hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
-	}
-
-	for (i = 0; i < 6; i++) {
-		snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
-		debugfs_create_file(name, S_IFREG | S_IRUGO,
-			hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
-	}
-
-	return 0;
-}
-
-static void hsu_debugfs_remove(struct hsu_port *hsu)
-{
-	if (hsu->debugfs)
-		debugfs_remove_recursive(hsu->debugfs);
-}
-
-#else
-static inline int hsu_debugfs_init(struct hsu_port *hsu)
-{
-	return 0;
-}
-
-static inline void hsu_debugfs_remove(struct hsu_port *hsu)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
-
-static void serial_hsu_enable_ms(struct uart_port *port)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-
-	up->ier |= UART_IER_MSI;
-	serial_out(up, UART_IER, up->ier);
-}
-
-void hsu_dma_tx(struct uart_hsu_port *up)
-{
-	struct circ_buf *xmit = &up->port.state->xmit;
-	struct hsu_dma_buffer *dbuf = &up->txbuf;
-	int count;
-
-	/* test_and_set_bit may be better, but anyway it's in lock protected mode */
-	if (up->dma_tx_on)
-		return;
-
-	/* Update the circ buf info */
-	xmit->tail += dbuf->ofs;
-	xmit->tail &= UART_XMIT_SIZE - 1;
-
-	up->port.icount.tx += dbuf->ofs;
-	dbuf->ofs = 0;
-
-	/* Disable the channel */
-	chan_writel(up->txc, HSU_CH_CR, 0x0);
-
-	if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
-		dma_sync_single_for_device(up->port.dev,
-					   dbuf->dma_addr,
-					   dbuf->dma_size,
-					   DMA_TO_DEVICE);
-
-		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
-		dbuf->ofs = count;
-
-		/* Reprogram the channel */
-		chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail);
-		chan_writel(up->txc, HSU_CH_D0TSR, count);
-
-		/* Reenable the channel */
-		chan_writel(up->txc, HSU_CH_DCR, 0x1
-						 | (0x1 << 8)
-						 | (0x1 << 16)
-						 | (0x1 << 24));
-		up->dma_tx_on = 1;
-		chan_writel(up->txc, HSU_CH_CR, 0x1);
-	}
-
-	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
-		uart_write_wakeup(&up->port);
-}
-
-/* The buffer is already cache coherent */
-void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
-{
-	dbuf->ofs = 0;
-
-	chan_writel(rxc, HSU_CH_BSR, 32);
-	chan_writel(rxc, HSU_CH_MOTSR, 4);
-
-	chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
-	chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
-	chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
-					 | (0x1 << 16)
-					 | (0x1 << 24)	/* timeout bit, see HSU Errata 1 */
-					 );
-	chan_writel(rxc, HSU_CH_CR, 0x3);
-}
-
-/* Protected by spin_lock_irqsave(port->lock) */
-static void serial_hsu_start_tx(struct uart_port *port)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-
-	if (up->use_dma) {
-		hsu_dma_tx(up);
-	} else if (!(up->ier & UART_IER_THRI)) {
-		up->ier |= UART_IER_THRI;
-		serial_out(up, UART_IER, up->ier);
-	}
-}
-
-static void serial_hsu_stop_tx(struct uart_port *port)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	struct hsu_dma_chan *txc = up->txc;
-
-	if (up->use_dma)
-		chan_writel(txc, HSU_CH_CR, 0x0);
-	else if (up->ier & UART_IER_THRI) {
-		up->ier &= ~UART_IER_THRI;
-		serial_out(up, UART_IER, up->ier);
-	}
-}
-
-/* This is always called in spinlock protected mode, so
- * modify timeout timer is safe here */
-void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
-{
-	struct hsu_dma_buffer *dbuf = &up->rxbuf;
-	struct hsu_dma_chan *chan = up->rxc;
-	struct uart_port *port = &up->port;
-	struct tty_port *tport = &port->state->port;
-	int count;
-
-	/*
-	 * First need to know how many is already transferred,
-	 * then check if its a timeout DMA irq, and return
-	 * the trail bytes out, push them up and reenable the
-	 * channel
-	 */
-
-	/* Timeout IRQ, need wait some time, see Errata 2 */
-	if (int_sts & 0xf00)
-		udelay(2);
-
-	/* Stop the channel */
-	chan_writel(chan, HSU_CH_CR, 0x0);
-
-	count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
-	if (!count) {
-		/* Restart the channel before we leave */
-		chan_writel(chan, HSU_CH_CR, 0x3);
-		return;
-	}
-
-	dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
-			dbuf->dma_size, DMA_FROM_DEVICE);
-
-	/*
-	 * Head will only wrap around when we recycle
-	 * the DMA buffer, and when that happens, we
-	 * explicitly set tail to 0. So head will
-	 * always be greater than tail.
-	 */
-	tty_insert_flip_string(tport, dbuf->buf, count);
-	port->icount.rx += count;
-
-	dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
-			dbuf->dma_size, DMA_FROM_DEVICE);
-
-	/* Reprogram the channel */
-	chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
-	chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
-	chan_writel(chan, HSU_CH_DCR, 0x1
-					 | (0x1 << 8)
-					 | (0x1 << 16)
-					 | (0x1 << 24)	/* timeout bit, see HSU Errata 1 */
-					 );
-	tty_flip_buffer_push(tport);
-
-	chan_writel(chan, HSU_CH_CR, 0x3);
-
-}
-
-static void serial_hsu_stop_rx(struct uart_port *port)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	struct hsu_dma_chan *chan = up->rxc;
-
-	if (up->use_dma)
-		chan_writel(chan, HSU_CH_CR, 0x2);
-	else {
-		up->ier &= ~UART_IER_RLSI;
-		up->port.read_status_mask &= ~UART_LSR_DR;
-		serial_out(up, UART_IER, up->ier);
-	}
-}
-
-static inline void receive_chars(struct uart_hsu_port *up, int *status)
-{
-	unsigned int ch, flag;
-	unsigned int max_count = 256;
-
-	do {
-		ch = serial_in(up, UART_RX);
-		flag = TTY_NORMAL;
-		up->port.icount.rx++;
-
-		if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
-				       UART_LSR_FE | UART_LSR_OE))) {
-
-			dev_warn(up->dev, "We really rush into ERR/BI case"
-				"status = 0x%02x", *status);
-			/* For statistics only */
-			if (*status & UART_LSR_BI) {
-				*status &= ~(UART_LSR_FE | UART_LSR_PE);
-				up->port.icount.brk++;
-				/*
-				 * We do the SysRQ and SAK checking
-				 * here because otherwise the break
-				 * may get masked by ignore_status_mask
-				 * or read_status_mask.
-				 */
-				if (uart_handle_break(&up->port))
-					goto ignore_char;
-			} else if (*status & UART_LSR_PE)
-				up->port.icount.parity++;
-			else if (*status & UART_LSR_FE)
-				up->port.icount.frame++;
-			if (*status & UART_LSR_OE)
-				up->port.icount.overrun++;
-
-			/* Mask off conditions which should be ignored. */
-			*status &= up->port.read_status_mask;
-
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
-			if (up->port.cons &&
-				up->port.cons->index == up->port.line) {
-				/* Recover the break flag from console xmit */
-				*status |= up->lsr_break_flag;
-				up->lsr_break_flag = 0;
-			}
-#endif
-			if (*status & UART_LSR_BI) {
-				flag = TTY_BREAK;
-			} else if (*status & UART_LSR_PE)
-				flag = TTY_PARITY;
-			else if (*status & UART_LSR_FE)
-				flag = TTY_FRAME;
-		}
-
-		if (uart_handle_sysrq_char(&up->port, ch))
-			goto ignore_char;
-
-		uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
-	ignore_char:
-		*status = serial_in(up, UART_LSR);
-	} while ((*status & UART_LSR_DR) && max_count--);
-	tty_flip_buffer_push(&up->port.state->port);
-}
-
-static void transmit_chars(struct uart_hsu_port *up)
-{
-	struct circ_buf *xmit = &up->port.state->xmit;
-	int count;
-
-	if (up->port.x_char) {
-		serial_out(up, UART_TX, up->port.x_char);
-		up->port.icount.tx++;
-		up->port.x_char = 0;
-		return;
-	}
-	if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
-		serial_hsu_stop_tx(&up->port);
-		return;
-	}
-
-	/* The IRQ is for TX FIFO half-empty */
-	count = up->port.fifosize / 2;
-
-	do {
-		serial_out(up, UART_TX, xmit->buf[xmit->tail]);
-		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-
-		up->port.icount.tx++;
-		if (uart_circ_empty(xmit))
-			break;
-	} while (--count > 0);
-
-	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
-		uart_write_wakeup(&up->port);
-
-	if (uart_circ_empty(xmit))
-		serial_hsu_stop_tx(&up->port);
-}
-
-static inline void check_modem_status(struct uart_hsu_port *up)
-{
-	int status;
-
-	status = serial_in(up, UART_MSR);
-
-	if ((status & UART_MSR_ANY_DELTA) == 0)
-		return;
-
-	if (status & UART_MSR_TERI)
-		up->port.icount.rng++;
-	if (status & UART_MSR_DDSR)
-		up->port.icount.dsr++;
-	/* We may only get DDCD when HW init and reset */
-	if (status & UART_MSR_DDCD)
-		uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
-	/* Will start/stop_tx accordingly */
-	if (status & UART_MSR_DCTS)
-		uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
-
-	wake_up_interruptible(&up->port.state->port.delta_msr_wait);
-}
-
-/*
- * This handles the interrupt from one port.
- */
-static irqreturn_t port_irq(int irq, void *dev_id)
-{
-	struct uart_hsu_port *up = dev_id;
-	unsigned int iir, lsr;
-	unsigned long flags;
-
-	if (unlikely(!up->running))
-		return IRQ_NONE;
-
-	spin_lock_irqsave(&up->port.lock, flags);
-	if (up->use_dma) {
-		lsr = serial_in(up, UART_LSR);
-		if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
-				       UART_LSR_FE | UART_LSR_OE)))
-			dev_warn(up->dev,
-				"Got lsr irq while using DMA, lsr = 0x%2x\n",
-				lsr);
-		check_modem_status(up);
-		spin_unlock_irqrestore(&up->port.lock, flags);
-		return IRQ_HANDLED;
-	}
-
-	iir = serial_in(up, UART_IIR);
-	if (iir & UART_IIR_NO_INT) {
-		spin_unlock_irqrestore(&up->port.lock, flags);
-		return IRQ_NONE;
-	}
-
-	lsr = serial_in(up, UART_LSR);
-	if (lsr & UART_LSR_DR)
-		receive_chars(up, &lsr);
-	check_modem_status(up);
-
-	/* lsr will be renewed during the receive_chars */
-	if (lsr & UART_LSR_THRE)
-		transmit_chars(up);
-
-	spin_unlock_irqrestore(&up->port.lock, flags);
-	return IRQ_HANDLED;
-}
-
-static inline void dma_chan_irq(struct hsu_dma_chan *chan)
-{
-	struct uart_hsu_port *up = chan->uport;
-	unsigned long flags;
-	u32 int_sts;
-
-	spin_lock_irqsave(&up->port.lock, flags);
-
-	if (!up->use_dma || !up->running)
-		goto exit;
-
-	/*
-	 * No matter what situation, need read clear the IRQ status
-	 * There is a bug, see Errata 5, HSD 2900918
-	 */
-	int_sts = chan_readl(chan, HSU_CH_SR);
-
-	/* Rx channel */
-	if (chan->dirt == DMA_FROM_DEVICE)
-		hsu_dma_rx(up, int_sts);
-
-	/* Tx channel */
-	if (chan->dirt == DMA_TO_DEVICE) {
-		chan_writel(chan, HSU_CH_CR, 0x0);
-		up->dma_tx_on = 0;
-		hsu_dma_tx(up);
-	}
-
-exit:
-	spin_unlock_irqrestore(&up->port.lock, flags);
-	return;
-}
-
-static irqreturn_t dma_irq(int irq, void *dev_id)
-{
-	struct hsu_port *hsu = dev_id;
-	u32 int_sts, i;
-
-	int_sts = mfd_readl(hsu, HSU_GBL_DMAISR);
-
-	/* Currently we only have 6 channels may be used */
-	for (i = 0; i < 6; i++) {
-		if (int_sts & 0x1)
-			dma_chan_irq(&hsu->chans[i]);
-		int_sts >>= 1;
-	}
-
-	return IRQ_HANDLED;
-}
-
-static unsigned int serial_hsu_tx_empty(struct uart_port *port)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	unsigned long flags;
-	unsigned int ret;
-
-	spin_lock_irqsave(&up->port.lock, flags);
-	ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
-	spin_unlock_irqrestore(&up->port.lock, flags);
-
-	return ret;
-}
-
-static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	unsigned char status;
-	unsigned int ret;
-
-	status = serial_in(up, UART_MSR);
-
-	ret = 0;
-	if (status & UART_MSR_DCD)
-		ret |= TIOCM_CAR;
-	if (status & UART_MSR_RI)
-		ret |= TIOCM_RNG;
-	if (status & UART_MSR_DSR)
-		ret |= TIOCM_DSR;
-	if (status & UART_MSR_CTS)
-		ret |= TIOCM_CTS;
-	return ret;
-}
-
-static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	unsigned char mcr = 0;
-
-	if (mctrl & TIOCM_RTS)
-		mcr |= UART_MCR_RTS;
-	if (mctrl & TIOCM_DTR)
-		mcr |= UART_MCR_DTR;
-	if (mctrl & TIOCM_OUT1)
-		mcr |= UART_MCR_OUT1;
-	if (mctrl & TIOCM_OUT2)
-		mcr |= UART_MCR_OUT2;
-	if (mctrl & TIOCM_LOOP)
-		mcr |= UART_MCR_LOOP;
-
-	mcr |= up->mcr;
-
-	serial_out(up, UART_MCR, mcr);
-}
-
-static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	unsigned long flags;
-
-	spin_lock_irqsave(&up->port.lock, flags);
-	if (break_state == -1)
-		up->lcr |= UART_LCR_SBC;
-	else
-		up->lcr &= ~UART_LCR_SBC;
-	serial_out(up, UART_LCR, up->lcr);
-	spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-/*
- * What special to do:
- * 1. chose the 64B fifo mode
- * 2. start dma or pio depends on configuration
- * 3. we only allocate dma memory when needed
- */
-static int serial_hsu_startup(struct uart_port *port)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	unsigned long flags;
-
-	pm_runtime_get_sync(up->dev);
-
-	/*
-	 * Clear the FIFO buffers and disable them.
-	 * (they will be reenabled in set_termios())
-	 */
-	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
-			UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
-	serial_out(up, UART_FCR, 0);
-
-	/* Clear the interrupt registers. */
-	(void) serial_in(up, UART_LSR);
-	(void) serial_in(up, UART_RX);
-	(void) serial_in(up, UART_IIR);
-	(void) serial_in(up, UART_MSR);
-
-	/* Now, initialize the UART, default is 8n1 */
-	serial_out(up, UART_LCR, UART_LCR_WLEN8);
-
-	spin_lock_irqsave(&up->port.lock, flags);
-
-	up->port.mctrl |= TIOCM_OUT2;
-	serial_hsu_set_mctrl(&up->port, up->port.mctrl);
-
-	/*
-	 * Finally, enable interrupts.  Note: Modem status interrupts
-	 * are set via set_termios(), which will be occurring imminently
-	 * anyway, so we don't enable them here.
-	 */
-	if (!up->use_dma)
-		up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
-	else
-		up->ier = 0;
-	serial_out(up, UART_IER, up->ier);
-
-	spin_unlock_irqrestore(&up->port.lock, flags);
-
-	/* DMA init */
-	if (up->use_dma) {
-		struct hsu_dma_buffer *dbuf;
-		struct circ_buf *xmit = &port->state->xmit;
-
-		up->dma_tx_on = 0;
-
-		/* First allocate the RX buffer */
-		dbuf = &up->rxbuf;
-		dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
-		if (!dbuf->buf) {
-			up->use_dma = 0;
-			goto exit;
-		}
-		dbuf->dma_addr = dma_map_single(port->dev,
-						dbuf->buf,
-						HSU_DMA_BUF_SIZE,
-						DMA_FROM_DEVICE);
-		dbuf->dma_size = HSU_DMA_BUF_SIZE;
-
-		/* Start the RX channel right now */
-		hsu_dma_start_rx_chan(up->rxc, dbuf);
-
-		/* Next init the TX DMA */
-		dbuf = &up->txbuf;
-		dbuf->buf = xmit->buf;
-		dbuf->dma_addr = dma_map_single(port->dev,
-					       dbuf->buf,
-					       UART_XMIT_SIZE,
-					       DMA_TO_DEVICE);
-		dbuf->dma_size = UART_XMIT_SIZE;
-
-		/* This should not be changed all around */
-		chan_writel(up->txc, HSU_CH_BSR, 32);
-		chan_writel(up->txc, HSU_CH_MOTSR, 4);
-		dbuf->ofs = 0;
-	}
-
-exit:
-	 /* And clear the interrupt registers again for luck. */
-	(void) serial_in(up, UART_LSR);
-	(void) serial_in(up, UART_RX);
-	(void) serial_in(up, UART_IIR);
-	(void) serial_in(up, UART_MSR);
-
-	up->running = 1;
-	return 0;
-}
-
-static void serial_hsu_shutdown(struct uart_port *port)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	unsigned long flags;
-
-	/* Disable interrupts from this port */
-	up->ier = 0;
-	serial_out(up, UART_IER, 0);
-	up->running = 0;
-
-	spin_lock_irqsave(&up->port.lock, flags);
-	up->port.mctrl &= ~TIOCM_OUT2;
-	serial_hsu_set_mctrl(&up->port, up->port.mctrl);
-	spin_unlock_irqrestore(&up->port.lock, flags);
-
-	/* Disable break condition and FIFOs */
-	serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
-	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
-				  UART_FCR_CLEAR_RCVR |
-				  UART_FCR_CLEAR_XMIT);
-	serial_out(up, UART_FCR, 0);
-
-	pm_runtime_put(up->dev);
-}
-
-static void
-serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
-		       struct ktermios *old)
-{
-	struct uart_hsu_port *up =
-			container_of(port, struct uart_hsu_port, port);
-	unsigned char cval, fcr = 0;
-	unsigned long flags;
-	unsigned int baud, quot;
-	u32 ps, mul;
-
-	switch (termios->c_cflag & CSIZE) {
-	case CS5:
-		cval = UART_LCR_WLEN5;
-		break;
-	case CS6:
-		cval = UART_LCR_WLEN6;
-		break;
-	case CS7:
-		cval = UART_LCR_WLEN7;
-		break;
-	default:
-	case CS8:
-		cval = UART_LCR_WLEN8;
-		break;
-	}
-
-	/* CMSPAR isn't supported by this driver */
-	termios->c_cflag &= ~CMSPAR;
-
-	if (termios->c_cflag & CSTOPB)
-		cval |= UART_LCR_STOP;
-	if (termios->c_cflag & PARENB)
-		cval |= UART_LCR_PARITY;
-	if (!(termios->c_cflag & PARODD))
-		cval |= UART_LCR_EPAR;
-
-	/*
-	 * The base clk is 50Mhz, and the baud rate come from:
-	 *	baud = 50M * MUL / (DIV * PS * DLAB)
-	 *
-	 * For those basic low baud rate we can get the direct
-	 * scalar from 2746800, like 115200 = 2746800/24. For those
-	 * higher baud rate, we handle them case by case, mainly by
-	 * adjusting the MUL/PS registers, and DIV register is kept
-	 * as default value 0x3d09 to make things simple
-	 */
-	baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
-
-	quot = 1;
-	ps = 0x10;
-	mul = 0x3600;
-	switch (baud) {
-	case 3500000:
-		mul = 0x3345;
-		ps = 0xC;
-		break;
-	case 1843200:
-		mul = 0x2400;
-		break;
-	case 3000000:
-	case 2500000:
-	case 2000000:
-	case 1500000:
-	case 1000000:
-	case 500000:
-		/* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */
-		mul = baud / 500000 * 0x9C4;
-		break;
-	default:
-		/* Use uart_get_divisor to get quot for other baud rates */
-		quot = 0;
-	}
-
-	if (!quot)
-		quot = uart_get_divisor(port, baud);
-
-	if ((up->port.uartclk / quot) < (2400 * 16))
-		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
-	else if ((up->port.uartclk / quot) < (230400 * 16))
-		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
-	else
-		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
-
-	fcr |= UART_FCR_HSU_64B_FIFO;
-
-	/*
-	 * Ok, we're now changing the port state.  Do it with
-	 * interrupts disabled.
-	 */
-	spin_lock_irqsave(&up->port.lock, flags);
-
-	/* Update the per-port timeout */
-	uart_update_timeout(port, termios->c_cflag, baud);
-
-	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
-	if (termios->c_iflag & INPCK)
-		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-	if (termios->c_iflag & (BRKINT | PARMRK))
-		up->port.read_status_mask |= UART_LSR_BI;
-
-	/* Characters to ignore */
-	up->port.ignore_status_mask = 0;
-	if (termios->c_iflag & IGNPAR)
-		up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
-	if (termios->c_iflag & IGNBRK) {
-		up->port.ignore_status_mask |= UART_LSR_BI;
-		/*
-		 * If we're ignoring parity and break indicators,
-		 * ignore overruns too (for real raw support).
-		 */
-		if (termios->c_iflag & IGNPAR)
-			up->port.ignore_status_mask |= UART_LSR_OE;
-	}
-
-	/* Ignore all characters if CREAD is not set */
-	if ((termios->c_cflag & CREAD) == 0)
-		up->port.ignore_status_mask |= UART_LSR_DR;
-
-	/*
-	 * CTS flow control flag and modem status interrupts, disable
-	 * MSI by default
-	 */
-	up->ier &= ~UART_IER_MSI;
-	if (UART_ENABLE_MS(&up->port, termios->c_cflag))
-		up->ier |= UART_IER_MSI;
-
-	serial_out(up, UART_IER, up->ier);
-
-	if (termios->c_cflag & CRTSCTS)
-		up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
-	else
-		up->mcr &= ~UART_MCR_AFE;
-
-	serial_out(up, UART_LCR, cval | UART_LCR_DLAB);	/* set DLAB */
-	serial_out(up, UART_DLL, quot & 0xff);		/* LS of divisor */
-	serial_out(up, UART_DLM, quot >> 8);		/* MS of divisor */
-	serial_out(up, UART_LCR, cval);			/* reset DLAB */
-	serial_out(up, UART_MUL, mul);			/* set MUL */
-	serial_out(up, UART_PS, ps);			/* set PS */
-	up->lcr = cval;					/* Save LCR */
-	serial_hsu_set_mctrl(&up->port, up->port.mctrl);
-	serial_out(up, UART_FCR, fcr);
-	spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-static void
-serial_hsu_pm(struct uart_port *port, unsigned int state,
-	      unsigned int oldstate)
-{
-}
-
-static void serial_hsu_release_port(struct uart_port *port)
-{
-}
-
-static int serial_hsu_request_port(struct uart_port *port)
-{
-	return 0;
-}
-
-static void serial_hsu_config_port(struct uart_port *port, int flags)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	up->port.type = PORT_MFD;
-}
-
-static int
-serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
-	/* We don't want the core code to modify any port params */
-	return -EINVAL;
-}
-
-static const char *
-serial_hsu_type(struct uart_port *port)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-	return up->name;
-}
-
-/* Mainly for uart console use */
-static struct uart_hsu_port *serial_hsu_ports[3];
-static struct uart_driver serial_hsu_reg;
-
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
-
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
-/* Wait for transmitter & holding register to empty */
-static inline void wait_for_xmitr(struct uart_hsu_port *up)
-{
-	unsigned int status, tmout = 1000;
-
-	/* Wait up to 1ms for the character to be sent. */
-	do {
-		status = serial_in(up, UART_LSR);
-
-		if (status & UART_LSR_BI)
-			up->lsr_break_flag = UART_LSR_BI;
-
-		if (--tmout == 0)
-			break;
-		udelay(1);
-	} while (!(status & BOTH_EMPTY));
-
-	/* Wait up to 1s for flow control if necessary */
-	if (up->port.flags & UPF_CONS_FLOW) {
-		tmout = 1000000;
-		while (--tmout &&
-		       ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
-			udelay(1);
-	}
-}
-
-static void serial_hsu_console_putchar(struct uart_port *port, int ch)
-{
-	struct uart_hsu_port *up =
-		container_of(port, struct uart_hsu_port, port);
-
-	wait_for_xmitr(up);
-	serial_out(up, UART_TX, ch);
-}
-
-/*
- * Print a string to the serial port trying not to disturb
- * any possible real use of the port...
- *
- *	The console_lock must be held when we get here.
- */
-static void
-serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
-{
-	struct uart_hsu_port *up = serial_hsu_ports[co->index];
-	unsigned long flags;
-	unsigned int ier;
-	int locked = 1;
-
-	touch_nmi_watchdog();
-
-	local_irq_save(flags);
-	if (up->port.sysrq)
-		locked = 0;
-	else if (oops_in_progress) {
-		locked = spin_trylock(&up->port.lock);
-	} else
-		spin_lock(&up->port.lock);
-
-	/* First save the IER then disable the interrupts */
-	ier = serial_in(up, UART_IER);
-	serial_out(up, UART_IER, 0);
-
-	uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
-
-	/*
-	 * Finally, wait for transmitter to become empty
-	 * and restore the IER
-	 */
-	wait_for_xmitr(up);
-	serial_out(up, UART_IER, ier);
-
-	if (locked)
-		spin_unlock(&up->port.lock);
-	local_irq_restore(flags);
-}
-
-static struct console serial_hsu_console;
-
-static int __init
-serial_hsu_console_setup(struct console *co, char *options)
-{
-	struct uart_hsu_port *up;
-	int baud = 115200;
-	int bits = 8;
-	int parity = 'n';
-	int flow = 'n';
-
-	if (co->index == -1 || co->index >= serial_hsu_reg.nr)
-		co->index = 0;
-	up = serial_hsu_ports[co->index];
-	if (!up)
-		return -ENODEV;
-
-	if (options)
-		uart_parse_options(options, &baud, &parity, &bits, &flow);
-
-	return uart_set_options(&up->port, co, baud, parity, bits, flow);
-}
-
-static struct console serial_hsu_console = {
-	.name		= "ttyMFD",
-	.write		= serial_hsu_console_write,
-	.device		= uart_console_device,
-	.setup		= serial_hsu_console_setup,
-	.flags		= CON_PRINTBUFFER,
-	.index		= -1,
-	.data		= &serial_hsu_reg,
-};
-
-#define SERIAL_HSU_CONSOLE	(&serial_hsu_console)
-#else
-#define SERIAL_HSU_CONSOLE	NULL
-#endif
-
-struct uart_ops serial_hsu_pops = {
-	.tx_empty	= serial_hsu_tx_empty,
-	.set_mctrl	= serial_hsu_set_mctrl,
-	.get_mctrl	= serial_hsu_get_mctrl,
-	.stop_tx	= serial_hsu_stop_tx,
-	.start_tx	= serial_hsu_start_tx,
-	.stop_rx	= serial_hsu_stop_rx,
-	.enable_ms	= serial_hsu_enable_ms,
-	.break_ctl	= serial_hsu_break_ctl,
-	.startup	= serial_hsu_startup,
-	.shutdown	= serial_hsu_shutdown,
-	.set_termios	= serial_hsu_set_termios,
-	.pm		= serial_hsu_pm,
-	.type		= serial_hsu_type,
-	.release_port	= serial_hsu_release_port,
-	.request_port	= serial_hsu_request_port,
-	.config_port	= serial_hsu_config_port,
-	.verify_port	= serial_hsu_verify_port,
-};
-
-static struct uart_driver serial_hsu_reg = {
-	.owner		= THIS_MODULE,
-	.driver_name	= "MFD serial",
-	.dev_name	= "ttyMFD",
-	.major		= TTY_MAJOR,
-	.minor		= 128,
-	.nr		= 3,
-	.cons		= SERIAL_HSU_CONSOLE,
-};
-
-#ifdef CONFIG_PM
-static int serial_hsu_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-	void *priv = pci_get_drvdata(pdev);
-	struct uart_hsu_port *up;
-
-	/* Make sure this is not the internal dma controller */
-	if (priv && (pdev->device != 0x081E)) {
-		up = priv;
-		uart_suspend_port(&serial_hsu_reg, &up->port);
-	}
-
-	pci_save_state(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
-        return 0;
-}
-
-static int serial_hsu_resume(struct pci_dev *pdev)
-{
-	void *priv = pci_get_drvdata(pdev);
-	struct uart_hsu_port *up;
-	int ret;
-
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-
-	ret = pci_enable_device(pdev);
-	if (ret)
-		dev_warn(&pdev->dev,
-			"HSU: can't re-enable device, try to continue\n");
-
-	if (priv && (pdev->device != 0x081E)) {
-		up = priv;
-		uart_resume_port(&serial_hsu_reg, &up->port);
-	}
-	return 0;
-}
-#else
-#define serial_hsu_suspend	NULL
-#define serial_hsu_resume	NULL
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
-static int serial_hsu_runtime_idle(struct device *dev)
-{
-	int err;
-
-	err = pm_schedule_suspend(dev, 500);
-	if (err)
-		return -EBUSY;
-
-	return 0;
-}
-
-static int serial_hsu_runtime_suspend(struct device *dev)
-{
-	return 0;
-}
-
-static int serial_hsu_runtime_resume(struct device *dev)
-{
-	return 0;
-}
-#else
-#define serial_hsu_runtime_idle		NULL
-#define serial_hsu_runtime_suspend	NULL
-#define serial_hsu_runtime_resume	NULL
-#endif
-
-static const struct dev_pm_ops serial_hsu_pm_ops = {
-	.runtime_suspend = serial_hsu_runtime_suspend,
-	.runtime_resume = serial_hsu_runtime_resume,
-	.runtime_idle = serial_hsu_runtime_idle,
-};
-
-/* temp global pointer before we settle down on using one or four PCI dev */
-static struct hsu_port *phsu;
-
-static int serial_hsu_probe(struct pci_dev *pdev,
-				const struct pci_device_id *ent)
-{
-	struct uart_hsu_port *uport;
-	int index, ret;
-
-	printk(KERN_INFO "HSU: found PCI Serial controller(ID: %04x:%04x)\n",
-		pdev->vendor, pdev->device);
-
-	switch (pdev->device) {
-	case 0x081B:
-		index = 0;
-		break;
-	case 0x081C:
-		index = 1;
-		break;
-	case 0x081D:
-		index = 2;
-		break;
-	case 0x081E:
-		/* internal DMA controller */
-		index = 3;
-		break;
-	default:
-		dev_err(&pdev->dev, "HSU: out of index!");
-		return -ENODEV;
-	}
-
-	ret = pci_enable_device(pdev);
-	if (ret)
-		return ret;
-
-	if (index == 3) {
-		/* DMA controller */
-		ret = request_irq(pdev->irq, dma_irq, 0, "hsu_dma", phsu);
-		if (ret) {
-			dev_err(&pdev->dev, "can not get IRQ\n");
-			goto err_disable;
-		}
-		pci_set_drvdata(pdev, phsu);
-	} else {
-		/* UART port 0~2 */
-		uport = &phsu->port[index];
-		uport->port.irq = pdev->irq;
-		uport->port.dev = &pdev->dev;
-		uport->dev = &pdev->dev;
-
-		ret = request_irq(pdev->irq, port_irq, 0, uport->name, uport);
-		if (ret) {
-			dev_err(&pdev->dev, "can not get IRQ\n");
-			goto err_disable;
-		}
-		uart_add_one_port(&serial_hsu_reg, &uport->port);
-
-		pci_set_drvdata(pdev, uport);
-	}
-
-	pm_runtime_put_noidle(&pdev->dev);
-	pm_runtime_allow(&pdev->dev);
-
-	return 0;
-
-err_disable:
-	pci_disable_device(pdev);
-	return ret;
-}
-
-static void hsu_global_init(void)
-{
-	struct hsu_port *hsu;
-	struct uart_hsu_port *uport;
-	struct hsu_dma_chan *dchan;
-	int i, ret;
-
-	hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL);
-	if (!hsu)
-		return;
-
-	/* Get basic io resource and map it */
-	hsu->paddr = 0xffa28000;
-	hsu->iolen = 0x1000;
-
-	if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global")))
-		pr_warning("HSU: error in request mem region\n");
-
-	hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen);
-	if (!hsu->reg) {
-		pr_err("HSU: error in ioremap\n");
-		ret = -ENOMEM;
-		goto err_free_region;
-	}
-
-	/* Initialise the 3 UART ports */
-	uport = hsu->port;
-	for (i = 0; i < 3; i++) {
-		uport->port.type = PORT_MFD;
-		uport->port.iotype = UPIO_MEM;
-		uport->port.mapbase = (resource_size_t)hsu->paddr
-					+ HSU_PORT_REG_OFFSET
-					+ i * HSU_PORT_REG_LENGTH;
-		uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET
-					+ i * HSU_PORT_REG_LENGTH;
-
-		sprintf(uport->name, "hsu_port%d", i);
-		uport->port.fifosize = 64;
-		uport->port.ops = &serial_hsu_pops;
-		uport->port.line = i;
-		uport->port.flags = UPF_IOREMAP;
-		/* set the scalable maxim support rate to 2746800 bps */
-		uport->port.uartclk = 115200 * 24 * 16;
-
-		uport->running = 0;
-		uport->txc = &hsu->chans[i * 2];
-		uport->rxc = &hsu->chans[i * 2 + 1];
-
-		serial_hsu_ports[i] = uport;
-		uport->index = i;
-
-		if (hsu_dma_enable & (1<<i))
-			uport->use_dma = 1;
-		else
-			uport->use_dma = 0;
-
-		uport++;
-	}
-
-	/* Initialise 6 dma channels */
-	dchan = hsu->chans;
-	for (i = 0; i < 6; i++) {
-		dchan->id = i;
-		dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-		dchan->uport = &hsu->port[i/2];
-		dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET +
-				i * HSU_DMA_CHANS_REG_LENGTH;
-
-		dchan++;
-	}
-
-	phsu = hsu;
-	hsu_debugfs_init(hsu);
-	return;
-
-err_free_region:
-	release_mem_region(hsu->paddr, hsu->iolen);
-	kfree(hsu);
-	return;
-}
-
-static void serial_hsu_remove(struct pci_dev *pdev)
-{
-	void *priv = pci_get_drvdata(pdev);
-	struct uart_hsu_port *up;
-
-	if (!priv)
-		return;
-
-	pm_runtime_forbid(&pdev->dev);
-	pm_runtime_get_noresume(&pdev->dev);
-
-	/* For port 0/1/2, priv is the address of uart_hsu_port */
-	if (pdev->device != 0x081E) {
-		up = priv;
-		uart_remove_one_port(&serial_hsu_reg, &up->port);
-	}
-
-	pci_set_drvdata(pdev, NULL);
-	free_irq(pdev->irq, priv);
-	pci_disable_device(pdev);
-}
-
-/* First 3 are UART ports, and the 4th is the DMA */
-static const struct pci_device_id pci_ids[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081E) },
-	{},
-};
-
-static struct pci_driver hsu_pci_driver = {
-	.name =		"HSU serial",
-	.id_table =	pci_ids,
-	.probe =	serial_hsu_probe,
-	.remove =	serial_hsu_remove,
-	.suspend =	serial_hsu_suspend,
-	.resume	=	serial_hsu_resume,
-	.driver = {
-		.pm = &serial_hsu_pm_ops,
-	},
-};
-
-static int __init hsu_pci_init(void)
-{
-	int ret;
-
-	hsu_global_init();
-
-	ret = uart_register_driver(&serial_hsu_reg);
-	if (ret)
-		return ret;
-
-	return pci_register_driver(&hsu_pci_driver);
-}
-
-static void __exit hsu_pci_exit(void)
-{
-	pci_unregister_driver(&hsu_pci_driver);
-	uart_unregister_driver(&serial_hsu_reg);
-
-	hsu_debugfs_remove(phsu);
-
-	kfree(phsu);
-}
-
-module_init(hsu_pci_init);
-module_exit(hsu_pci_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:medfield-hsu");
diff --git a/drivers/tty/serial/mfd.h b/drivers/tty/serial/mfd.h
new file mode 100644
index 0000000..22cb3f3
--- /dev/null
+++ b/drivers/tty/serial/mfd.h
@@ -0,0 +1,252 @@
+#ifndef _MFD_H
+#define _MFD_H
+
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_mfd.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/dma-direction.h>
+#include <asm/intel_mid_hsu.h>
+
+#define HSU_PORT_MAX		8
+#define HSU_DMA_BUF_SIZE	2048
+#define HSU_Q_MAX		4096
+#define HSU_CL_BUF_LEN		(1 << CONFIG_LOG_BUF_SHIFT)
+#define HSU_DMA_BSR		32
+#define HSU_DMA_MOTSR		4
+#define HSU_PIO_RX_ERR		0x06
+#define HSU_PIO_RX_AVB		0x04
+#define HSU_PIO_RX_TMO		0x0C
+#define HSU_PIO_TX_REQ		0x02
+
+enum {
+	flag_console = 0,
+	flag_reopen,
+	flag_suspend,
+	flag_active,
+	flag_set_alt,
+	flag_tx_on,
+	flag_rx_on,
+	flag_rx_pending,
+	flag_startup,
+	flag_cmd_on,
+	flag_cmd_off,
+};
+
+enum {
+	qcmd_overflow = 0,
+	qcmd_get_msr,
+	qcmd_set_mcr,
+	qcmd_set_ier,
+	qcmd_start_rx,
+	qcmd_stop_rx,
+	qcmd_start_tx,
+	qcmd_stop_tx,
+	qcmd_cl,
+	qcmd_port_irq,
+	qcmd_dma_irq,
+	qcmd_enable_irq,
+	qcmd_cmd_off,
+	qcmd_max,
+};
+
+enum {
+	context_save,
+	context_load,
+};
+
+struct hsu_dma_buffer {
+	u8		*buf;
+	dma_addr_t	dma_addr;
+	u32		dma_size;
+	u32		ofs;
+};
+
+struct hsu_dma_chan {
+	u32	id;
+	enum dma_data_direction	dirt;
+	struct uart_hsu_port	*uport;
+	void __iomem		*reg;
+	u32	cr;
+	u32	dcr;
+	u32	sar;
+	u32	tsr;
+};
+
+struct dw_dma_priv {
+	struct intel_mid_dma_slave	txs;
+	struct intel_mid_dma_slave	rxs;
+
+	struct uart_hsu_port	*up;
+
+	struct dma_chan		*txchan;
+	struct dma_chan		*rxchan;
+
+	/* phy address of the Data register */
+	dma_addr_t		dma_addr;
+	struct pci_dev		*dmac;
+};
+
+struct intel_dma_priv {
+	unsigned int		tx_addr;
+	struct hsu_dma_chan	*txc;
+	struct hsu_dma_chan	*rxc;
+};
+
+struct hsu_dma_ops {
+	int (*init)(struct uart_hsu_port *up);
+	int (*exit)(struct uart_hsu_port *up);
+	int (*suspend)(struct uart_hsu_port *up);
+	int (*resume)(struct uart_hsu_port *up);
+	void (*start_tx)(struct uart_hsu_port *up);
+	void (*stop_tx)(struct uart_hsu_port *up);
+	void (*start_rx)(struct uart_hsu_port *up);
+	void (*stop_rx)(struct uart_hsu_port *up);
+	/* op will be context_save or context_load */
+	void (*context_op)(struct uart_hsu_port *up, int op);
+};
+
+struct uart_hsu_port {
+	struct uart_port        port;
+	struct mutex		q_mutex;
+	int			q_start;
+	struct workqueue_struct *workqueue;
+	struct work_struct	work;
+	struct tasklet_struct	tasklet;
+	struct circ_buf		qcirc;
+	int			qbuf[HSU_Q_MAX];
+	struct circ_buf		cl_circ;
+	spinlock_t		cl_lock;
+
+	/* Intel HSU or Designware */
+	int			hw_type;
+
+	unsigned char           msr;
+	unsigned char           ier;
+	unsigned char           lcr;
+	unsigned char           mcr;
+	unsigned char           lsr;
+	unsigned char           dll;
+	unsigned char           dlm;
+	unsigned char		fcr;
+	/* intel_hsu's clk param */
+	unsigned int		mul;
+	unsigned int		div;
+	unsigned int		ps;
+
+	/* Buffered value due to runtime PM and sharing IRQ */
+	unsigned char		iir;
+
+	/* intel_dw's clk param */
+	unsigned int		m;
+	unsigned int		n;
+
+	unsigned int            lsr_break_flag;
+	char			name[24];
+	int			index;
+	struct device		*dev;
+
+	unsigned int		tx_addr;
+	struct hsu_dma_chan	*txc;
+	struct hsu_dma_chan	*rxc;
+	struct hsu_dma_buffer	txbuf;
+	struct hsu_dma_buffer	rxbuf;
+
+	unsigned char		rxc_chcr_save;
+
+	unsigned long		flags;
+
+	unsigned int		qcmd_num;
+	unsigned int		qcmd_done;
+	unsigned int		port_irq_num;
+	unsigned int		port_irq_cmddone;
+	unsigned int		port_irq_no_alt;
+	unsigned int		port_irq_no_startup;
+	unsigned int		port_irq_pio_no_irq_pend;
+	unsigned int		port_irq_pio_tx_req;
+	unsigned int		port_irq_pio_rx_avb;
+	unsigned int		port_irq_pio_rx_err;
+	unsigned int		port_irq_pio_rx_timeout;
+	unsigned int		cts_status;
+	unsigned int		dma_irq_num;
+	unsigned int		dma_invalid_irq_num;
+	unsigned int		dma_irq_cmddone;
+	unsigned int		dma_tx_irq_cmddone;
+	unsigned int		dma_rx_irq_cmddone;
+	unsigned int		dma_rx_tmt_irq_cmddone;
+	unsigned int		tasklet_done;
+	unsigned int		workq_done;
+	unsigned int		in_workq;
+	unsigned int		in_tasklet;
+
+	unsigned int		byte_delay;
+
+	int			use_dma;	/* flag for DMA/PIO */
+	unsigned int		dma_irq;
+	unsigned int		port_dma_sts;
+
+	void			*dma_priv;
+	struct hsu_dma_ops	*dma_ops;
+	struct pm_qos_request   qos;
+	int			dma_inited;
+};
+
+struct hsu_port {
+	int dma_irq;
+	int port_num;
+	int irq_port_and_dma;
+	struct hsu_port_cfg	*configs[HSU_PORT_MAX];
+	void __iomem	*reg;
+	struct uart_hsu_port	port[HSU_PORT_MAX];
+	struct hsu_dma_chan	chans[HSU_PORT_MAX * 2];
+	spinlock_t		dma_lock;
+	struct dentry *debugfs;
+};
+
+#define chan_readl(chan, offset)	readl(chan->reg + offset)
+#define chan_writel(chan, offset, val)	writel(val, chan->reg + offset)
+
+#define mfd_readl(obj, offset)		readl(obj->reg + offset)
+#define mfd_writel(obj, offset, val)	writel(val, obj->reg + offset)
+
+static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
+{
+	unsigned int val;
+
+	if (offset > UART_MSR || up->hw_type == hsu_dw) {
+		offset <<= 2;
+		val = readl(up->port.membase + offset);
+	} else
+		val = (unsigned int)readb(up->port.membase + offset);
+
+	return val;
+}
+
+static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
+{
+	if (offset > UART_MSR || up->hw_type == hsu_dw) {
+		offset <<= 2;
+		writel(value, up->port.membase + offset);
+	} else {
+		unsigned char val = value & 0xff;
+		writeb(val, up->port.membase + offset);
+	}
+}
+void serial_sched_cmd(struct uart_hsu_port *up, char cmd);
+extern struct hsu_dma_ops *pdw_dma_ops;
+extern struct hsu_dma_ops intel_dma_ops;
+
+struct uart_hsu_port *serial_hsu_port_setup(struct device *pdev, int port,
+	resource_size_t start, resource_size_t len, int irq);
+void serial_hsu_port_free(struct uart_hsu_port *up);
+void serial_hsu_port_shutdown(struct uart_hsu_port *up);
+int serial_hsu_dma_setup(struct device *pdev,
+	resource_size_t start, resource_size_t len, unsigned int irq, int share);
+void serial_hsu_dma_free(void);
+int serial_hsu_do_suspend(struct uart_hsu_port *up);
+int serial_hsu_do_resume(struct uart_hsu_port *up);
+int serial_hsu_do_runtime_idle(struct uart_hsu_port *up);
+
+#include "mfd_trace.h"
+#endif
diff --git a/drivers/tty/serial/mfd_core.c b/drivers/tty/serial/mfd_core.c
new file mode 100644
index 0000000..39f2522
--- /dev/null
+++ b/drivers/tty/serial/mfd_core.c
@@ -0,0 +1,2482 @@
+/*
+ * mfd_core.c: driver core for High Speed UART device of Intel Medfield platform
+ *
+ * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
+ *
+ * (C) Copyright 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/* Notes:
+ * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
+ *    2/3 chan to port 1, 4/5 chan to port 3. Even number chans
+ *    are used for RX, odd chans for TX
+ *
+ * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
+ *    asserted, only when the HW is reset the DDCD and DDSR will
+ *    be triggered
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/irq.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+
+#define CREATE_TRACE_POINTS
+#include "mfd.h"
+
+static int hsu_dma_enable = 0xff;
+module_param(hsu_dma_enable, int, 0);
+MODULE_PARM_DESC(hsu_dma_enable,
+		 "It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode.");
+
+static struct hsu_port hsu;
+static struct hsu_port *phsu = &hsu;
+static struct uart_driver serial_hsu_reg;
+static struct hsu_port_cfg *hsu_port_func_cfg;
+
+static void serial_hsu_command(struct uart_hsu_port *up);
+
+int hsu_register_board_info(void *inf)
+{
+	hsu_port_func_cfg = inf;
+	return 0;
+}
+
+static inline int check_qcmd(struct uart_hsu_port *up, char *cmd)
+{
+	struct circ_buf *circ = &up->qcirc;
+	char *buf;
+
+	buf = circ->buf + circ->tail;
+	*cmd = *buf;
+	return CIRC_CNT(circ->head, circ->tail, HSU_Q_MAX);
+}
+
+static inline void insert_qcmd(struct uart_hsu_port *up, char cmd)
+{
+	struct circ_buf *circ = &up->qcirc;
+	char *buf;
+	char last_cmd;
+
+	trace_hsu_cmd_insert(up->index, cmd);
+	if (check_qcmd(up, &last_cmd) && last_cmd == cmd &&
+		cmd != qcmd_enable_irq && cmd != qcmd_port_irq &&
+                cmd != qcmd_dma_irq)
+		return;
+	trace_hsu_cmd_add(up->index, cmd);
+	up->qcmd_num++;
+	buf = circ->buf + circ->head;
+	if (CIRC_SPACE(circ->head, circ->tail, HSU_Q_MAX) < 1)
+		*buf = qcmd_overflow;
+	else {
+		*buf = cmd;
+		circ->head++;
+		if (circ->head == HSU_Q_MAX)
+			circ->head = 0;
+	}
+}
+
+static inline int get_qcmd(struct uart_hsu_port *up, char *cmd)
+{
+	struct circ_buf *circ = &up->qcirc;
+	char *buf;
+
+	if (!CIRC_CNT(circ->head, circ->tail, HSU_Q_MAX))
+		return 0;
+	buf = circ->buf + circ->tail;
+	*cmd = *buf;
+	circ->tail++;
+	if (circ->tail == HSU_Q_MAX)
+		circ->tail = 0;
+	up->qcmd_done++;
+	return 1;
+}
+
+static inline void cl_put_char(struct uart_hsu_port *up, char c)
+{
+	struct circ_buf *circ = &up->cl_circ;
+	char *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&up->cl_lock, flags);
+	buf = circ->buf + circ->head;
+	if (CIRC_SPACE(circ->head, circ->tail, HSU_CL_BUF_LEN) > 1) {
+		*buf = c;
+		circ->head++;
+		if (circ->head == HSU_CL_BUF_LEN)
+			circ->head = 0;
+	}
+	spin_unlock_irqrestore(&up->cl_lock, flags);
+}
+
+static inline int cl_get_char(struct uart_hsu_port *up, char *c)
+{
+	struct circ_buf *circ = &up->cl_circ;
+	char *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&up->cl_lock, flags);
+	if (!CIRC_CNT(circ->head, circ->tail, HSU_CL_BUF_LEN)) {
+		spin_unlock_irqrestore(&up->cl_lock, flags);
+		return 0;
+	}
+	buf = circ->buf + circ->tail;
+	*c = *buf;
+	circ->tail++;
+	if (circ->tail == HSU_CL_BUF_LEN)
+		circ->tail = 0;
+	spin_unlock_irqrestore(&up->cl_lock, flags);
+	return 1;
+}
+
+
+
+void serial_sched_cmd(struct uart_hsu_port *up, char cmd)
+{
+	pm_runtime_get(up->dev);
+	insert_qcmd(up, cmd);
+	if (test_bit(flag_cmd_on, &up->flags)) {
+		if (up->use_dma)
+			tasklet_schedule(&up->tasklet);
+		else
+			queue_work(up->workqueue, &up->work);
+	}
+	pm_runtime_put(up->dev);
+}
+
+static inline void serial_sched_sync(struct uart_hsu_port *up)
+{
+	mutex_lock(&up->q_mutex);
+	if (up->q_start > 0) {
+		if (up->use_dma) {
+			tasklet_disable(&up->tasklet);
+			serial_hsu_command(up);
+			tasklet_enable(&up->tasklet);
+		} else {
+			flush_workqueue(up->workqueue);
+		}
+	}
+	mutex_unlock(&up->q_mutex);
+}
+
+static inline void serial_sched_start(struct uart_hsu_port *up)
+{
+	unsigned long flags;
+
+	mutex_lock(&up->q_mutex);
+	up->q_start++;
+	if (up->q_start == 1) {
+		clear_bit(flag_cmd_off, &up->flags);
+		spin_lock_irqsave(&up->port.lock, flags);
+		set_bit(flag_cmd_on, &up->flags);
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		if (up->use_dma)
+			tasklet_schedule(&up->tasklet);
+		else
+			queue_work(up->workqueue, &up->work);
+	}
+	mutex_unlock(&up->q_mutex);
+}
+
+static inline void serial_sched_stop(struct uart_hsu_port *up)
+{
+	unsigned long flags;
+
+	mutex_lock(&up->q_mutex);
+	up->q_start--;
+	if (up->q_start == 0) {
+		spin_lock_irqsave(&up->port.lock, flags);
+		clear_bit(flag_cmd_on, &up->flags);
+		insert_qcmd(up, qcmd_cmd_off);
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		if (up->use_dma) {
+			tasklet_schedule(&up->tasklet);
+			while (!test_bit(flag_cmd_off, &up->flags))
+				cpu_relax();
+		} else {
+			queue_work(up->workqueue, &up->work);
+			flush_workqueue(up->workqueue);
+		}
+	}
+	mutex_unlock(&up->q_mutex);
+}
+
+static void serial_set_alt(int index)
+{
+	struct uart_hsu_port *up = phsu->port + index;
+	struct hsu_dma_chan *txc = up->txc;
+	struct hsu_dma_chan *rxc = up->rxc;
+	struct hsu_port_cfg *cfg = phsu->configs[index];
+
+	if (test_bit(flag_set_alt, &up->flags))
+		return;
+
+	trace_hsu_func_start(up->index, __func__);
+	pm_runtime_get_sync(up->dev);
+	disable_irq(up->port.irq);
+	disable_irq(up->dma_irq);
+	serial_sched_stop(up);
+	if (up->use_dma && up->hw_type == hsu_intel) {
+		txc->uport = up;
+		rxc->uport = up;
+	}
+	dev_set_drvdata(up->dev, up);
+	if (cfg->hw_set_alt)
+		cfg->hw_set_alt(index);
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 0);
+	set_bit(flag_set_alt, &up->flags);
+	serial_sched_start(up);
+	enable_irq(up->dma_irq);
+	enable_irq(up->port.irq);
+	pm_runtime_put(up->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_clear_alt(int index)
+{
+	struct uart_hsu_port *up = phsu->port + index;
+	struct hsu_port_cfg *cfg = phsu->configs[index];
+
+	if (!test_bit(flag_set_alt, &up->flags))
+		return;
+
+	pm_runtime_get_sync(up->dev);
+	disable_irq(up->port.irq);
+	disable_irq(up->dma_irq);
+	serial_sched_stop(up);
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 1);
+	clear_bit(flag_set_alt, &up->flags);
+	serial_sched_start(up);
+	enable_irq(up->dma_irq);
+	enable_irq(up->port.irq);
+	pm_runtime_put(up->dev);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define HSU_DBGFS_BUFSIZE	8192
+
+static int hsu_show_regs_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t port_show_regs(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct uart_hsu_port *up = file->private_data;
+	char *buf;
+	u32 len = 0;
+	ssize_t ret;
+
+	buf = kzalloc(HSU_DBGFS_BUFSIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	pm_runtime_get_sync(up->dev);
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MFD HSU port[%d] regs:\n", up->index);
+
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"=================================\n");
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"IER: \t\t0x%08x\n", serial_in(up, UART_IER));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"PS: \t\t0x%08x\n", serial_in(up, UART_PS));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
+	pm_runtime_put(up->dev);
+
+	if (len > HSU_DBGFS_BUFSIZE)
+		len = HSU_DBGFS_BUFSIZE;
+
+	ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct hsu_dma_chan *chan = file->private_data;
+	char *buf;
+	u32 len = 0;
+	ssize_t ret;
+
+	buf = kzalloc(HSU_DBGFS_BUFSIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	pm_runtime_get_sync(chan->uport->dev);
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MFD HSU DMA channel [%d] regs:\n", chan->id);
+
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"=================================\n");
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
+	pm_runtime_put(chan->uport->dev);
+
+	if (len > HSU_DBGFS_BUFSIZE)
+		len = HSU_DBGFS_BUFSIZE;
+
+	ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t hsu_dump_show(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct uart_hsu_port *up;
+	struct hsu_port_cfg *cfg;
+	char *buf;
+	char cmd;
+	int i;
+	u32 len = 0;
+	ssize_t ret;
+	struct irq_desc *dma_irqdesc = irq_to_desc(phsu->dma_irq);
+	struct irq_desc *port_irqdesc;
+	struct circ_buf *xmit;
+
+	buf = kzalloc(HSU_DBGFS_BUFSIZE, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+		"HSU status dump:\n");
+	len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+		"\tdma irq (>0: disable): %d\n",
+		dma_irqdesc ? dma_irqdesc->depth : 0);
+	for (i = 0; i < phsu->port_num; i++) {
+		up = phsu->port + i;
+		cfg = hsu_port_func_cfg + i;
+		port_irqdesc = irq_to_desc(up->port.irq);
+		xmit = &up->port.state->xmit;
+
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"HSU port[%d] %s:\n", up->index, cfg->name);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"xmit empty[%d] xmit pending[%d]\n",
+			uart_circ_empty(xmit),
+			(int)uart_circ_chars_pending(xmit));
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tsuspend idle: %d\n", cfg->idle);
+		if (cfg->has_alt)
+			len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+				"\talt port: %d\n", cfg->alt);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+				"\tforce_suspend: %d\n", cfg->force_suspend);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+				"\tcts status: %d\n", up->cts_status);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tuse_dma: %s\n",
+			up->use_dma ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_console: %s\n",
+			test_bit(flag_console, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_suspend: %s\n",
+			test_bit(flag_suspend, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_active: %s\n",
+			test_bit(flag_active, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_set_alt: %s\n",
+			test_bit(flag_set_alt, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tflag_startup: %s\n",
+			test_bit(flag_startup, &up->flags) ? "yes" : "no");
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tqcmd q_start: %d\n", up->q_start);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tqcmd total count: %d\n", up->qcmd_num);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tqcmd done count: %d\n", up->qcmd_done);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq count: %d\n", up->port_irq_num);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq cmddone: %d\n", up->port_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq cts: %d\n", up->port.icount.cts);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq rng: %d\n", up->port.icount.rng);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq dsr: %d\n", up->port.icount.dsr);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq no irq pending: %d\n",
+			up->port_irq_pio_no_irq_pend);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq no alt: %d\n",
+			up->port_irq_no_alt);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq no startup: %d\n",
+			up->port_irq_no_startup);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq pio rx error: %d\n",
+			up->port_irq_pio_rx_err);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq pio rx available: %d\n",
+			up->port_irq_pio_rx_avb);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq pio rx fifo timeout: %d\n",
+			up->port_irq_pio_rx_timeout);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq pio tx request: %d\n",
+			up->port_irq_pio_tx_req);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tdma invalid irq count: %d\n",
+			up->dma_invalid_irq_num);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tdma irq count: %d\n", up->dma_irq_num);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tdma irq cmddone: %d\n", up->dma_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tdma tx irq cmddone: %d\n",
+			up->dma_tx_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport&dma rx irq cmddone: %d\n",
+			up->dma_rx_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport&dma rx timeout irq cmddone: %d\n",
+			up->dma_rx_tmt_irq_cmddone);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\ttasklet done: %d\n", up->tasklet_done);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tworkq done: %d\n", up->workq_done);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tqcmd pending count: %d\n", check_qcmd(up, &cmd));
+		if (check_qcmd(up, &cmd))
+			len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+				"\tqcmd pending next: %d\n", cmd);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tin tasklet: %d\n", up->in_tasklet);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tin workq: %d\n", up->in_workq);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tport irq (>0: disable): %d\n",
+			port_irqdesc ? port_irqdesc->depth : 0);
+		len += snprintf(buf + len, HSU_DBGFS_BUFSIZE - len,
+			"\tbyte delay: %d\n", up->byte_delay);
+	}
+	if (len > HSU_DBGFS_BUFSIZE)
+		len = HSU_DBGFS_BUFSIZE;
+
+	ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+	return ret;
+}
+
+
+static const struct file_operations port_regs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= hsu_show_regs_open,
+	.read		= port_show_regs,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations dma_regs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= hsu_show_regs_open,
+	.read		= dma_show_regs,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations hsu_dump_ops = {
+	.owner		= THIS_MODULE,
+	.read		= hsu_dump_show,
+	.llseek		= default_llseek,
+};
+
+static int hsu_debugfs_init(struct hsu_port *hsu)
+{
+	int i;
+	char name[32];
+
+	hsu->debugfs = debugfs_create_dir("hsu", NULL);
+	if (!hsu->debugfs)
+		return -ENOMEM;
+
+	for (i = 0; i < 3; i++) {
+		snprintf(name, sizeof(name), "port_%d_regs", i);
+		debugfs_create_file(name, S_IFREG | S_IRUGO,
+			hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
+	}
+
+	for (i = 0; i < 6; i++) {
+		snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
+		debugfs_create_file(name, S_IFREG | S_IRUGO,
+			hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
+	}
+
+	snprintf(name, sizeof(name), "dump_status");
+	debugfs_create_file(name, S_IFREG | S_IRUGO,
+		hsu->debugfs, NULL, &hsu_dump_ops);
+
+	return 0;
+}
+
+static void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+	if (hsu->debugfs)
+		debugfs_remove_recursive(hsu->debugfs);
+}
+
+#else
+static inline int hsu_debugfs_init(struct hsu_port *hsu)
+{
+	return 0;
+}
+
+static inline void hsu_debugfs_remove(struct hsu_port *hsu)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void serial_hsu_enable_ms(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	up->ier |= UART_IER_MSI;
+	serial_sched_cmd(up, qcmd_set_ier);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+/* Protected by spin_lock_irqsave(port->lock) */
+static void serial_hsu_start_tx(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	serial_sched_cmd(up, qcmd_start_tx);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_stop_tx(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	serial_sched_cmd(up, qcmd_stop_tx);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void hsu_stop_tx(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	serial_sched_cmd(up, qcmd_stop_tx);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+/* This is always called in spinlock protected mode, so
+ * modify timeout timer is safe here */
+void intel_dma_do_rx(struct uart_hsu_port *up, u32 int_sts)
+{
+	struct hsu_dma_buffer *dbuf = &up->rxbuf;
+	struct hsu_dma_chan *chan = up->rxc;
+	struct uart_port *port = &up->port;
+	struct tty_struct *tty;
+	struct tty_port *tport = &port->state->port;
+	int count;
+
+	trace_hsu_func_start(up->index, __func__);
+	tty = tty_port_tty_get(&up->port.state->port);
+	if (!tty) {
+		trace_hsu_func_end(up->index, __func__, "notty");
+		return;
+	}
+
+	/*
+	 * First need to know how many is already transferred,
+	 * then check if its a timeout DMA irq, and return
+	 * the trail bytes out, push them up and reenable the
+	 * channel
+	 */
+
+	/* Timeout IRQ, need wait some time, see Errata 2 */
+	if (int_sts & 0xf00) {
+		up->dma_rx_tmt_irq_cmddone++;
+		udelay(2);
+	} else
+		up->dma_rx_irq_cmddone++;
+
+	/* Stop the channel */
+	chan_writel(chan, HSU_CH_CR, 0x0);
+
+	count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
+	if (!count) {
+		/* Restart the channel before we leave */
+		chan_writel(chan, HSU_CH_CR, 0x3);
+		tty_kref_put(tty);
+		trace_hsu_func_end(up->index, __func__, "nodata");
+		return;
+	}
+
+	dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	/*
+	 * Head will only wrap around when we recycle
+	 * the DMA buffer, and when that happens, we
+	 * explicitly set tail to 0. So head will
+	 * always be greater than tail.
+	 */
+	tty_insert_flip_string(tport, dbuf->buf, count);
+	port->icount.rx += count;
+
+	dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	/* Reprogram the channel */
+	chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
+	chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
+	chan_writel(chan, HSU_CH_DCR, 0x1
+					 | (0x1 << 8)
+					 | (0x1 << 16)
+					 | (0x1 << 24)	/* timeout bit, see HSU Errata 1 */
+					 );
+	tty_flip_buffer_push(tport);
+
+	chan_writel(chan, HSU_CH_CR, 0x3);
+	tty_kref_put(tty);
+	trace_hsu_func_end(up->index, __func__, "");
+
+}
+
+static void serial_hsu_stop_rx(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	serial_sched_cmd(up, qcmd_stop_rx);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static inline void receive_chars(struct uart_hsu_port *up, int *status)
+{
+	struct tty_struct *tty = up->port.state->port.tty;
+	struct tty_port *tport = &up->port.state->port;
+	unsigned int ch, flag;
+	unsigned int max_count = 256;
+
+	if (!tty)
+		return;
+
+	trace_hsu_func_start(up->index, __func__);
+	do {
+		ch = serial_in(up, UART_RX);
+		flag = TTY_NORMAL;
+		up->port.icount.rx++;
+
+		if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+				       UART_LSR_FE | UART_LSR_OE))) {
+
+			dev_warn(up->dev,
+				"We really rush into ERR/BI case"
+				"status = 0x%02x\n", *status);
+			/* For statistics only */
+			if (*status & UART_LSR_BI) {
+				*status &= ~(UART_LSR_FE | UART_LSR_PE);
+				up->port.icount.brk++;
+				/*
+				 * We do the SysRQ and SAK checking
+				 * here because otherwise the break
+				 * may get masked by ignore_status_mask
+				 * or read_status_mask.
+				 */
+				if (uart_handle_break(&up->port))
+					goto ignore_char;
+			} else if (*status & UART_LSR_PE)
+				up->port.icount.parity++;
+			else if (*status & UART_LSR_FE)
+				up->port.icount.frame++;
+			if (*status & UART_LSR_OE)
+				up->port.icount.overrun++;
+
+			/* Mask off conditions which should be ignored. */
+			*status &= up->port.read_status_mask;
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+			if (up->port.cons &&
+				up->port.cons->index == up->port.line) {
+				/* Recover the break flag from console xmit */
+				*status |= up->lsr_break_flag;
+				up->lsr_break_flag = 0;
+			}
+#endif
+			if (*status & UART_LSR_BI) {
+				flag = TTY_BREAK;
+			} else if (*status & UART_LSR_PE)
+				flag = TTY_PARITY;
+			else if (*status & UART_LSR_FE)
+				flag = TTY_FRAME;
+		}
+
+		if (uart_handle_sysrq_char(&up->port, ch))
+			goto ignore_char;
+
+		uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
+	ignore_char:
+		*status = serial_in(up, UART_LSR);
+	} while ((*status & UART_LSR_DR) && max_count--);
+
+	tty_flip_buffer_push(tport);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void transmit_chars(struct uart_hsu_port *up)
+{
+	struct circ_buf *xmit = &up->port.state->xmit;
+	unsigned long flags;
+	int count;
+
+	spin_lock_irqsave(&up->port.lock, flags);
+	trace_hsu_func_start(up->index, __func__);
+	if (up->port.x_char) {
+		serial_out(up, UART_TX, up->port.x_char);
+		up->port.icount.tx++;
+		up->port.x_char = 0;
+		trace_hsu_func_end(up->index, __func__, "x_char");
+		goto out;
+	}
+	if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+		hsu_stop_tx(&up->port);
+		if (uart_circ_empty(xmit))
+			trace_hsu_func_end(up->index, __func__, "empty");
+		else
+			trace_hsu_func_end(up->index, __func__, "stop");
+		goto out;
+	}
+
+	/* The IRQ is for TX FIFO half-empty */
+	count = up->port.fifosize / 2;
+
+	do {
+		if (uart_tx_stopped(&up->port)) {
+			hsu_stop_tx(&up->port);
+			break;
+		}
+		serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+
+		up->port.icount.tx++;
+		if (uart_circ_empty(xmit))
+			break;
+	} while (--count > 0);
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(&up->port);
+
+	if (uart_circ_empty(xmit)) {
+		hsu_stop_tx(&up->port);
+		trace_hsu_func_end(up->index, __func__, "tx_complete");
+	}
+	else
+		trace_hsu_func_end(up->index, __func__, "");
+
+out:
+	spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void check_modem_status(struct uart_hsu_port *up)
+{
+	struct uart_port *uport = &up->port;
+	struct tty_port *port = &uport->state->port;
+	struct tty_struct *tty = port->tty;
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+	int status;
+	int delta_msr = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	status = serial_in(up, UART_MSR);
+	trace_hsu_mctrl(up->index, status);
+	if (port->flags & ASYNC_CTS_FLOW && !cfg->hw_ctrl_cts) {
+		if (tty->hw_stopped) {
+			if (status & UART_MSR_CTS) {
+				serial_sched_cmd(up, qcmd_start_tx);
+				tty->hw_stopped = 0;
+				up->cts_status = 0;
+				uport->icount.cts++;
+				delta_msr = 1;
+				uart_write_wakeup(uport);
+			}
+		} else {
+			if (!(status & UART_MSR_CTS)) {
+				/* Is this automitically controlled */
+				if (up->use_dma)
+					up->dma_ops->stop_tx(up);
+				clear_bit(flag_tx_on, &up->flags);
+				tty->hw_stopped = 1;
+				up->cts_status = 1;
+				delta_msr = 1;
+				uport->icount.cts++;
+			}
+		}
+	}
+
+	if ((status & UART_MSR_ANY_DELTA)) {
+		if (status & UART_MSR_TERI)
+			up->port.icount.rng++;
+		if (status & UART_MSR_DDSR)
+			up->port.icount.dsr++;
+		/* We may only get DDCD when HW init and reset */
+		if (status & UART_MSR_DDCD)
+			uart_handle_dcd_change(&up->port,
+					status & UART_MSR_DCD);
+		delta_msr = 1;
+	}
+
+	if (delta_msr)
+		wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void hsu_dma_chan_handler(struct hsu_port *hsu, int index)
+{
+	unsigned long flags;
+	struct uart_hsu_port *up = hsu->chans[index * 2].uport;
+	struct hsu_dma_chan *txc = up->txc;
+	struct hsu_dma_chan *rxc = up->rxc;
+
+	up->dma_irq_num++;
+	if (unlikely(!up->use_dma
+		|| !test_bit(flag_startup, &up->flags))) {
+		up->dma_invalid_irq_num++;
+		chan_readl(txc, HSU_CH_SR);
+		chan_readl(rxc, HSU_CH_SR);
+		return;
+	}
+	disable_irq_nosync(up->dma_irq);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_dma_irq);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static irqreturn_t hsu_port_irq(int irq, void *dev_id)
+{
+	struct uart_hsu_port *up = dev_id;
+	unsigned long flags;
+	u8 lsr;
+
+	trace_hsu_func_start(up->index, __func__);
+	up->port_irq_num++;
+
+	if (up->hw_type == hsu_intel) {
+		if (unlikely(!test_bit(flag_set_alt, &up->flags))) {
+			up->port_irq_no_alt++;
+			trace_hsu_func_end(up->index, __func__, "noalt");
+			return IRQ_NONE;
+		}
+	} else {
+		if (unlikely(test_bit(flag_suspend, &up->flags))) {
+			trace_hsu_func_end(up->index, __func__, "suspend");
+			return IRQ_NONE;
+		}
+
+		/* On BYT, this IRQ may be shared with other HW */
+		up->iir = serial_in(up, UART_IIR);
+		if (unlikely(up->iir & 0x1)) {
+			/*
+			 * Read  UART_BYTE_COUNT and UART_OVERFLOW
+			 * registers to clear the overrun error on
+			 * Tx. This is a HW issue on VLV2 B0.
+			 * more information on HSD 4683358.
+			 */
+			serial_in(up, 0x818 / 4);
+			serial_in(up, 0x820 / 4);
+			trace_hsu_func_end(up->index, __func__, "workaround");
+			return IRQ_NONE;
+		}
+	}
+
+	if (unlikely(!test_bit(flag_startup, &up->flags))) {
+		pr_err("recv IRQ when we are not startup yet\n");
+		/*SCU might forward it too late when it is closed already*/
+		serial_in(up, UART_LSR);
+		up->port_irq_no_startup++;
+		trace_hsu_func_end(up->index, __func__, "nostart");
+		return IRQ_HANDLED;
+	}
+
+	/* DesignWare HW's DMA mode still needs the port irq */
+	if (up->use_dma && up->hw_type == hsu_intel) {
+		lsr = serial_in(up, UART_LSR);
+		spin_lock_irqsave(&up->port.lock, flags);
+		check_modem_status(up);
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
+				UART_LSR_FE | UART_LSR_OE)))
+			dev_warn(up->dev,
+				"Got LSR irq(0x%02x) while using DMA", lsr);
+		trace_hsu_func_end(up->index, __func__, "lsr");
+		return IRQ_HANDLED;
+	}
+
+	disable_irq_nosync(up->port.irq);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_port_irq);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+
+	trace_hsu_func_end(up->index, __func__, "");
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t hsu_dma_irq(int irq, void *dev_id)
+{
+	struct uart_hsu_port *up;
+	unsigned long flags;
+	unsigned int dmairq;
+	int i;
+
+	spin_lock_irqsave(&phsu->dma_lock, flags);
+	dmairq = mfd_readl(phsu, HSU_GBL_DMAISR);
+	if (phsu->irq_port_and_dma) {
+		up = dev_id;
+		up->port_dma_sts = dmairq;
+		if (up->port_dma_sts & (3 << (up->index * 2)))
+			hsu_dma_chan_handler(phsu, up->index);
+	} else {
+		for (i = 0; i < 3; i++)
+			if (dmairq & (3 << (i * 2))) {
+				up = phsu->chans[i * 2].uport;
+				up->port_dma_sts = dmairq;
+				hsu_dma_chan_handler(phsu, i);
+			}
+	}
+	spin_unlock_irqrestore(&phsu->dma_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static unsigned int serial_hsu_tx_empty(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	int ret = 1;
+
+	trace_hsu_func_start(up->index, __func__);
+	pm_runtime_get_sync(up->dev);
+	serial_sched_stop(up);
+
+	if (up->use_dma && test_bit(flag_tx_on, &up->flags))
+		ret = 0;
+	ret = ret &&
+		(serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0);
+	serial_sched_start(up);
+	pm_runtime_put(up->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	unsigned char status = up->msr;
+	unsigned int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	if (status & UART_MSR_DCD)
+		ret |= TIOCM_CAR;
+	if (status & UART_MSR_RI)
+		ret |= TIOCM_RNG;
+	if (status & UART_MSR_DSR)
+		ret |= TIOCM_DSR;
+	if (status & UART_MSR_CTS)
+		ret |= TIOCM_CTS;
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static void set_mctrl(struct uart_hsu_port *up, unsigned int mctrl)
+{
+	trace_hsu_func_start(up->index, __func__);
+	if (mctrl & TIOCM_RTS)
+		up->mcr |= UART_MCR_RTS;
+	if (mctrl & TIOCM_DTR)
+		up->mcr |= UART_MCR_DTR;
+	if (mctrl & TIOCM_OUT1)
+		up->mcr |= UART_MCR_OUT1;
+	if (mctrl & TIOCM_OUT2)
+		up->mcr |= UART_MCR_OUT2;
+	if (mctrl & TIOCM_LOOP)
+		up->mcr |= UART_MCR_LOOP;
+	trace_hsu_mctrl(up->index, mctrl);
+	serial_out(up, UART_MCR, up->mcr);
+	udelay(100);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	if (mctrl & TIOCM_RTS)
+		up->mcr |= UART_MCR_RTS;
+	if (mctrl & TIOCM_DTR)
+		up->mcr |= UART_MCR_DTR;
+	if (mctrl & TIOCM_OUT1)
+		up->mcr |= UART_MCR_OUT1;
+	if (mctrl & TIOCM_OUT2)
+		up->mcr |= UART_MCR_OUT2;
+	if (mctrl & TIOCM_LOOP)
+		up->mcr |= UART_MCR_LOOP;
+	serial_sched_cmd(up, qcmd_set_mcr);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	trace_hsu_func_start(up->index, __func__);
+	pm_runtime_get_sync(up->dev);
+	serial_sched_stop(up);
+	if (break_state == -1)
+		up->lcr |= UART_LCR_SBC;
+	else
+		up->lcr &= ~UART_LCR_SBC;
+	serial_out(up, UART_LCR, up->lcr);
+	serial_sched_start(up);
+	pm_runtime_put(up->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+/*
+ * What special to do:
+ * 1. chose the 64B fifo mode
+ * 2. start dma or pio depends on configuration
+ * 3. we only allocate dma memory when needed
+ */
+static int serial_hsu_startup(struct uart_port *port)
+{
+	static int console_first_init = 1;
+	int ret = 0;
+	unsigned long flags;
+	static DEFINE_MUTEX(lock);
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	mutex_lock(&lock);
+
+	pm_runtime_get_sync(up->dev);
+
+	/* HW start it */
+	if (cfg->hw_reset)
+		cfg->hw_reset(up->port.membase);
+
+	if (console_first_init && test_bit(flag_console, &up->flags)) {
+		serial_sched_stop(up);
+		console_first_init = 0;
+	}
+	clear_bit(flag_reopen, &up->flags);
+	if (cfg->has_alt) {
+		struct hsu_port_cfg *alt_cfg = hsu_port_func_cfg + cfg->alt;
+		struct uart_hsu_port *alt_up = phsu->port + alt_cfg->index;
+
+		if (test_bit(flag_startup, &alt_up->flags) &&
+			alt_up->port.state->port.tty) {
+			if (alt_cfg->force_suspend) {
+				uart_suspend_port(&serial_hsu_reg,
+							&alt_up->port);
+				serial_clear_alt(alt_up->index);
+				set_bit(flag_reopen, &alt_up->flags);
+			} else {
+				int loop = 50;
+
+				while (test_bit(flag_startup,
+						&alt_up->flags) && --loop)
+					msleep(20);
+				if (test_bit(flag_startup, &alt_up->flags)) {
+					WARN(1, "Share port open timeout\n");
+					ret = -EBUSY;
+					goto out;
+				}
+			}
+		}
+	}
+	serial_set_alt(up->index);
+	serial_sched_start(up);
+	serial_sched_stop(up);
+
+	/*
+	 * Clear the FIFO buffers and disable them.
+	 * (they will be reenabled in set_termios())
+	 */
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+			UART_FCR_CLEAR_RCVR |
+			UART_FCR_CLEAR_XMIT);
+	serial_out(up, UART_FCR, 0);
+
+	/* Clear the interrupt registers. */
+	(void) serial_in(up, UART_LSR);
+	(void) serial_in(up, UART_RX);
+	(void) serial_in(up, UART_IIR);
+	(void) serial_in(up, UART_MSR);
+
+	/* Now, initialize the UART, default is 8n1 */
+	serial_out(up, UART_LCR, UART_LCR_WLEN8);
+	up->port.mctrl |= TIOCM_OUT2;
+	set_mctrl(up, up->port.mctrl);
+
+	/* DMA init */
+	if (up->use_dma) {
+		ret = up->dma_ops->init ? up->dma_ops->init(up) : -ENODEV;
+		if (ret) {
+			dev_warn(up->dev, "Fail to init DMA, will use PIO\n");
+			up->use_dma = 0;
+		}
+	}
+
+	/*
+	 * Finally, enable interrupts.  Note: Modem status
+	 * interrupts are set via set_termios(), which will
+	 *  be occurring imminently
+	 * anyway, so we don't enable them here.
+	 */
+	/* bit 4 for DW is reserved, but SEG need it to be set */
+	if (!up->use_dma || up->hw_type == hsu_dw)
+		up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
+	else
+		up->ier = 0;
+	serial_out(up, UART_IER, up->ier);
+
+	/* And clear the interrupt registers again for luck. */
+	(void) serial_in(up, UART_LSR);
+	(void) serial_in(up, UART_RX);
+	(void) serial_in(up, UART_IIR);
+	(void) serial_in(up, UART_MSR);
+
+	set_bit(flag_startup, &up->flags);
+	serial_sched_start(up);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_sync(up);
+
+out:
+	pm_runtime_put(up->dev);
+	mutex_unlock(&lock);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static void serial_hsu_shutdown(struct uart_port *port)
+{
+	static DEFINE_MUTEX(lock);
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	mutex_lock(&lock);
+	pm_runtime_get_sync(up->dev);
+	serial_sched_stop(up);
+	clear_bit(flag_startup, &up->flags);
+
+	/* Disable interrupts from this port */
+	up->ier = 0;
+	serial_out(up, UART_IER, 0);
+
+	clear_bit(flag_tx_on, &up->flags);
+
+	up->port.mctrl &= ~TIOCM_OUT2;
+	set_mctrl(up, up->port.mctrl);
+
+	/* Disable break condition and FIFOs */
+	serial_out(up, UART_LCR,
+			serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+			UART_FCR_CLEAR_RCVR |
+			UART_FCR_CLEAR_XMIT);
+	serial_out(up, UART_FCR, 0);
+
+	/* Free allocated dma buffer */
+	if (up->use_dma)
+		up->dma_ops->exit(up);
+
+	if (cfg->has_alt) {
+		struct hsu_port_cfg *alt_cfg = hsu_port_func_cfg + cfg->alt;
+		struct uart_hsu_port *alt_up = phsu->port + alt_cfg->index;
+
+		if (test_bit(flag_reopen, &alt_up->flags)) {
+			serial_clear_alt(up->index);
+			uart_resume_port(&serial_hsu_reg, &alt_up->port);
+		}
+	}
+
+	pm_runtime_put_sync(up->dev);
+	mutex_unlock(&lock);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+/* calculate mul,div for low fref e.g. TNG B0 38.4M
+ * finally the fref will swith to high fref e.g. 100M
+*/
+static bool calc_for_low_fref(u32 clock, u32 baud, u32 *mul, u32 *div)
+{
+	if (clock == 38400) {
+		switch (baud) {
+		case 3500000:
+			/* ps: 10 */
+			*mul = 350;
+			*div = 384;
+			break;
+		case 3000000:
+			/* ps: 12 */
+			*mul = 360;
+			*div = 384;
+			break;
+		case 2500000:
+			/* ps: 12 */
+			*mul = 300;
+			*div = 384;
+			break;
+		case 2000000:
+			/* ps: 16 */
+			*mul = 320;
+			*div = 384;
+			break;
+		case 1843200:
+			/* ps: 16 */
+			*mul = 294912;
+			*div = 384000;
+			break;
+		case 1500000:
+			/* ps: 16 */
+			*mul = 240;
+			*div = 384;
+			break;
+		case 1000000:
+			/* ps: 16 */
+			*mul = 160;
+			*div = 384;
+			break;
+		case 500000:
+			/* ps: 16 */
+			*mul = 80;
+			*div = 384;
+			break;
+		}
+		return true;
+	} else
+		return false;
+}
+
+static void
+serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
+		       struct ktermios *old)
+{
+	struct uart_hsu_port *up =
+			container_of(port, struct uart_hsu_port, port);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+	unsigned char cval, fcr = 0;
+	unsigned long flags;
+	unsigned int baud, quot, clock, bits;
+	/* 0x3d09 is default dividor value refer for deatils
+	 * please refer high speed UART HAS documents.
+	 */
+	u32 ps = 0, mul = 0, div = 0x3D09, m = 0, n = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	switch (termios->c_cflag & CSIZE) {
+	case CS5:
+		cval = UART_LCR_WLEN5;
+		bits = 7;
+		break;
+	case CS6:
+		cval = UART_LCR_WLEN6;
+		bits = 8;
+		break;
+	case CS7:
+		cval = UART_LCR_WLEN7;
+		bits = 9;
+		break;
+	default:
+	case CS8:
+		cval = UART_LCR_WLEN8;
+		bits = 10;
+		break;
+	}
+
+	/* CMSPAR isn't supported by this driver */
+	termios->c_cflag &= ~CMSPAR;
+
+	if (termios->c_cflag & CSTOPB) {
+		cval |= UART_LCR_STOP;
+		bits++;
+	}
+	if (termios->c_cflag & PARENB) {
+		cval |= UART_LCR_PARITY;
+		bits++;
+	}
+	if (!(termios->c_cflag & PARODD))
+		cval |= UART_LCR_EPAR;
+
+	baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+	trace_hsu_set_termios(up->index, baud, termios->c_cflag & CRTSCTS ? 1 : 0);
+
+	if (up->hw_type == hsu_intel) {
+		/*
+		 * If base clk is 50Mhz, and the baud rate come from:
+		 *	baud = 50M * MUL / (DIV * PS * DLAB)
+		 *
+		 * For those basic low baud rate we can get the direct
+		 * scalar from 2746800, like 115200 = 2746800/24. For those
+		 * higher baud rate, we handle them case by case, mainly by
+		 * adjusting the MUL/PS registers, and DIV register is kept
+		 * as default value 0x3d09 to make things simple
+		 */
+
+		if (cfg->hw_get_clk)
+			clock = cfg->hw_get_clk();
+		else
+			clock = 50000;
+		/* ps = 16 is prefered, if not have to use 12, l0 */
+		if (baud * 16 <= clock * 1000)
+			ps = 16;
+		else if (baud * 12 <= clock * 1000)
+			ps = 12;
+		else if (baud * 10 <= clock * 1000)
+			ps = 10;
+		else
+			pr_err("port:%d baud:%d is too high for clock:%u M\n",
+				up->index, baud, clock / 1000);
+
+		switch (baud) {
+		case 3500000:
+		case 3000000:
+		case 2500000:
+		case 2000000:
+		case 1843200:
+		case 1500000:
+		case 1000000:
+		case 500000:
+			quot = 1;
+			if (!calc_for_low_fref(clock, baud, &mul, &div))
+				/*
+				 * mul = baud * 0x3d09 * ps / 1000 / clock
+				 * change the formula order to avoid overflow
+				 */
+				mul = (0x3d09 * ps / 100) * (baud / 100)
+					* 10 / clock;
+			break;
+		default:
+			/* Use uart_get_divisor to get quot for other baud rates
+			 * avoid overflow: mul = uartclk * 0x3d09 / clock / 1000
+			 * uartclk is multiply of 115200 * n * 16 */
+			mul = (up->port.uartclk / 1600) * 0x3d09 /
+				clock * 16 / 10;
+			quot = 0;
+		}
+
+		if (!quot)
+			quot = uart_get_divisor(port, baud);
+
+		if ((up->port.uartclk / quot) < (2400 * 16))
+			fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
+		else if ((up->port.uartclk / quot) < (230400 * 16))
+			fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
+		else
+			fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
+
+		fcr |= UART_FCR_HSU_64B_FIFO;
+	} else {
+		/* need calc quot here */
+		switch (baud) {
+		case 3000000:
+		case 1500000:
+		case 1000000:
+		case 500000:
+			m = 48;
+			n = 100;
+			quot = 3000000 / baud;
+			break;
+		default:
+			m = 9216;
+			n = 15625;
+			quot = 0;
+		}
+		if (!quot)
+			quot = uart_get_divisor(port, baud);
+
+		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+			UART_FCR_T_TRIG_11;
+		if (baud < 2400) {
+			fcr &= ~UART_FCR_TRIGGER_MASK;
+			fcr |= UART_FCR_TRIGGER_1;
+		}
+	}
+
+	/* one byte transfer duration unit microsecond */
+	up->byte_delay = (bits * 1000000 + baud - 1) / baud;
+
+	pm_runtime_get_sync(up->dev);
+	serial_sched_stop(up);
+	/*
+	 * Ok, we're now changing the port state.  Do it with
+	 * interrupts disabled.
+	 */
+	spin_lock_irqsave(&up->port.lock, flags);
+
+	/* Update the per-port timeout */
+	uart_update_timeout(port, termios->c_cflag, baud);
+
+	up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+	if (termios->c_iflag & INPCK)
+		up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+	if (termios->c_iflag & (BRKINT | PARMRK))
+		up->port.read_status_mask |= UART_LSR_BI;
+
+	/* Characters to ignore */
+	up->port.ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+	if (termios->c_iflag & IGNBRK) {
+		up->port.ignore_status_mask |= UART_LSR_BI;
+		/*
+		 * If we're ignoring parity and break indicators,
+		 * ignore overruns too (for real raw support).
+		 */
+		if (termios->c_iflag & IGNPAR)
+			up->port.ignore_status_mask |= UART_LSR_OE;
+	}
+
+	/* Ignore all characters if CREAD is not set */
+	if ((termios->c_cflag & CREAD) == 0)
+		up->port.ignore_status_mask |= UART_LSR_DR;
+
+	/*
+	 * CTS flow control flag and modem status interrupts, disable
+	 * MSI by default
+	 */
+	up->ier &= ~UART_IER_MSI;
+	if (UART_ENABLE_MS(&up->port, termios->c_cflag))
+		up->ier |= UART_IER_MSI;
+
+	serial_out(up, UART_IER, up->ier);
+
+	if (termios->c_cflag & CRTSCTS)
+		up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
+	else
+		up->mcr &= ~UART_MCR_AFE;
+
+	up->dll	= quot & 0xff;
+	up->dlm	= quot >> 8;
+	up->fcr	= fcr;
+	up->lcr = cval;					/* Save LCR */
+
+	serial_out(up, UART_LCR, cval | UART_LCR_DLAB);	/* set DLAB */
+	serial_out(up, UART_DLL, up->dll);		/* LS of divisor */
+	serial_out(up, UART_DLM, up->dlm);		/* MS of divisor */
+	serial_out(up, UART_LCR, cval);			/* reset DLAB */
+
+	if (up->hw_type == hsu_intel) {
+		up->mul	= mul;
+		up->div = div;
+		up->ps	= ps;
+		serial_out(up, UART_MUL, up->mul);	/* set MUL */
+		serial_out(up, UART_DIV, up->div);	/* set DIV */
+		serial_out(up, UART_PS, up->ps);	/* set PS */
+	} else {
+		if (m != up->m || n != up->n) {
+			if (cfg->set_clk)
+				cfg->set_clk(m, n, up->port.membase);
+			up->m = m;
+			up->n = n;
+		}
+	}
+
+	serial_out(up, UART_FCR, fcr);
+	set_mctrl(up, up->port.mctrl);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_start(up);
+	serial_sched_sync(up);
+	pm_runtime_put(up->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void
+serial_hsu_pm(struct uart_port *port, unsigned int state,
+	      unsigned int oldstate)
+{
+}
+
+static void serial_hsu_release_port(struct uart_port *port)
+{
+}
+
+static int serial_hsu_request_port(struct uart_port *port)
+{
+	return 0;
+}
+
+static void serial_hsu_config_port(struct uart_port *port, int flags)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	up->port.type = PORT_MFD;
+}
+
+static int
+serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+	/* We don't want the core code to modify any port params */
+	return -EINVAL;
+}
+
+static const char *
+serial_hsu_type(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	return up->name;
+}
+
+struct device *intel_mid_hsu_set_wake_peer(int port,
+	void (*wake_peer)(struct device *))
+{
+	struct hsu_port_cfg *cfg = phsu->configs[port];
+
+	cfg->wake_peer = wake_peer;
+	return cfg->dev;
+}
+EXPORT_SYMBOL(intel_mid_hsu_set_wake_peer);
+
+static void serial_hsu_wake_peer(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+			container_of(port, struct uart_hsu_port, port);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	if (cfg->wake_peer)
+		cfg->wake_peer(cfg->dev);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+/* Wait for transmitter & holding register to empty */
+static inline int wait_for_xmitr(struct uart_hsu_port *up)
+{
+	unsigned int status, tmout = 10000;
+
+	while (--tmout) {
+		status = serial_in(up, UART_LSR);
+		if (status & UART_LSR_BI)
+			up->lsr_break_flag = UART_LSR_BI;
+		udelay(1);
+		if (status & BOTH_EMPTY)
+			break;
+	}
+	if (tmout == 0)
+		return 0;
+
+	if (up->port.flags & UPF_CONS_FLOW) {
+		tmout = 10000;
+		while (--tmout &&
+		       ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
+			udelay(1);
+		if (tmout == 0)
+			return 0;
+	}
+	return 1;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int serial_hsu_get_poll_char(struct uart_port *port)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	u8 lsr;
+
+	lsr = serial_in(up, UART_LSR);
+	if (!(lsr & UART_LSR_DR))
+		return NO_POLL_CHAR;
+	return serial_in(up, UART_RX);
+}
+
+static void serial_hsu_put_poll_char(struct uart_port *port,
+			unsigned char c)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+
+	serial_out(up, UART_IER, 0);
+	while (!wait_for_xmitr(up))
+		cpu_relax();
+	serial_out(up, UART_TX, c);
+	while (!wait_for_xmitr(up))
+		cpu_relax();
+	serial_out(up, UART_IER, up->ier);
+}
+#endif
+
+#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
+static void serial_hsu_console_putchar(struct uart_port *port, int ch)
+{
+	struct uart_hsu_port *up =
+		container_of(port, struct uart_hsu_port, port);
+	cl_put_char(up, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ *	The console_lock must be held when we get here.
+ */
+static void
+serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
+{
+	struct uart_hsu_port *up = phsu->port + co->index;
+	unsigned long flags;
+
+	uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
+	spin_lock_irqsave(&up->cl_lock, flags);
+	serial_sched_cmd(up, qcmd_cl);
+	spin_unlock_irqrestore(&up->cl_lock, flags);
+}
+
+static struct console serial_hsu_console;
+
+static int __init
+serial_hsu_console_setup(struct console *co, char *options)
+{
+	struct uart_hsu_port *up = phsu->port + co->index;
+	int baud = 115200;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+	unsigned long flags;
+
+	if (co->index < 0 || co->index >= hsu_port_max)
+		return -ENODEV;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	pm_runtime_get_sync(up->dev);
+	set_bit(flag_console, &up->flags);
+	set_bit(flag_startup, &up->flags);
+	serial_set_alt(up->index);
+	serial_sched_start(up);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_sync(up);
+	pm_runtime_put(up->dev);
+	up->cl_circ.buf = kzalloc(HSU_CL_BUF_LEN, GFP_KERNEL);
+	if (up->cl_circ.buf == NULL)
+		return -ENOMEM;
+	return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static struct console serial_hsu_console = {
+	.name		= "ttyMFD",
+	.write		= serial_hsu_console_write,
+	.device		= uart_console_device,
+	.setup		= serial_hsu_console_setup,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+	.data		= &serial_hsu_reg,
+};
+
+#define SERIAL_HSU_CONSOLE	(&serial_hsu_console)
+#else
+#define SERIAL_HSU_CONSOLE	NULL
+#endif
+
+struct uart_ops serial_hsu_pops = {
+	.tx_empty	= serial_hsu_tx_empty,
+	.set_mctrl	= serial_hsu_set_mctrl,
+	.get_mctrl	= serial_hsu_get_mctrl,
+	.stop_tx	= serial_hsu_stop_tx,
+	.start_tx	= serial_hsu_start_tx,
+	.stop_rx	= serial_hsu_stop_rx,
+	.enable_ms	= serial_hsu_enable_ms,
+	.break_ctl	= serial_hsu_break_ctl,
+	.startup	= serial_hsu_startup,
+	.shutdown	= serial_hsu_shutdown,
+	.set_termios	= serial_hsu_set_termios,
+	.pm		= serial_hsu_pm,
+	.type		= serial_hsu_type,
+	.release_port	= serial_hsu_release_port,
+	.request_port	= serial_hsu_request_port,
+	.config_port	= serial_hsu_config_port,
+	.verify_port	= serial_hsu_verify_port,
+	.wake_peer	= serial_hsu_wake_peer,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_get_char = serial_hsu_get_poll_char,
+	.poll_put_char = serial_hsu_put_poll_char,
+#endif
+};
+
+static struct uart_driver serial_hsu_reg = {
+	.owner		= THIS_MODULE,
+	.driver_name	= "MFD serial",
+	.dev_name	= "ttyMFD",
+	.major		= TTY_MAJOR,
+	.minor		= 128,
+	.nr		= HSU_PORT_MAX,
+};
+
+static irqreturn_t wakeup_irq(int irq, void *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	set_bit(flag_active, &up->flags);
+	if (cfg->preamble && cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 1);
+	pm_runtime_get(dev);
+	pm_runtime_put(dev);
+	trace_hsu_func_end(up->index, __func__, "");
+	return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME)
+static void hsu_flush_rxfifo(struct uart_hsu_port *up)
+{
+	unsigned int lsr, cnt;
+
+	if (up->hw_type == hsu_intel) {
+		cnt = serial_in(up, UART_FOR) & 0x7F;
+		if (cnt)
+			dev_dbg(up->dev,
+				"Warning: %d bytes are received"
+				" in RX fifo after RTS active for %d us\n",
+				cnt, up->byte_delay);
+		lsr = serial_in(up, UART_LSR);
+		if (lsr & UART_LSR_DR && cnt)
+			dev_dbg(up->dev,
+				"flush abnormal data in rx fifo\n");
+			while (cnt) {
+				serial_in(up, UART_RX);
+				cnt--;
+			}
+	}
+}
+
+static void hsu_regs_context(struct uart_hsu_port *up, int op)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	if (op == context_load) {
+		/*
+		 * Delay a while before HW get stable. Without this the
+		 * resume will just fail, as the value you write to the
+		 * HW register will not be really written.
+		 *
+		 * This is only needed for Tangier, which really powers gate
+		 * the HSU HW in runtime suspend. While in Penwell/CLV it is
+		 * only clock gated.
+		*/
+		usleep_range(500, 510);
+
+		if (cfg->hw_reset)
+			cfg->hw_reset(up->port.membase);
+
+		serial_out(up, UART_LCR, up->lcr);
+		serial_out(up, UART_LCR, up->lcr | UART_LCR_DLAB);
+		serial_out(up, UART_DLL, up->dll);
+		serial_out(up, UART_DLM, up->dlm);
+		serial_out(up, UART_LCR, up->lcr);
+
+		if (up->hw_type == hsu_intel) {
+			serial_out(up, UART_MUL, up->mul);
+			serial_out(up, UART_DIV, up->div);
+			serial_out(up, UART_PS, up->ps);
+		} else {
+			if (cfg->set_clk)
+				cfg->set_clk(up->m, up->n, up->port.membase);
+		}
+
+		serial_out(up, UART_MCR, up->mcr);
+		serial_out(up, UART_FCR, up->fcr);
+		serial_out(up, UART_IER, up->ier);
+	}
+
+	if (up->use_dma && up->dma_ops->context_op)
+		up->dma_ops->context_op(up, op);
+}
+
+int serial_hsu_do_suspend(struct uart_hsu_port *up)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+	struct uart_port *uport = &up->port;
+	struct tty_port *tport = &uport->state->port;
+	struct tty_struct *tty = tport->tty;
+	struct circ_buf *xmit = &up->port.state->xmit;
+	char cmd;
+	unsigned long flags;
+
+	trace_hsu_func_start(up->index, __func__);
+
+	if (test_bit(flag_startup, &up->flags)) {
+		if (up->hw_type == hsu_intel &&
+			serial_in(up, UART_FOR) & 0x7F)
+			goto busy;
+		else if (up->hw_type == hsu_dw &&
+			serial_in(up, 0x7c / 4) & BIT(3))
+			goto busy;
+	}
+
+	if (up->use_dma) {
+		if (up->hw_type == hsu_intel) {
+			if (chan_readl(up->rxc, HSU_CH_D0SAR) >
+					up->rxbuf.dma_addr)
+				goto busy;
+		}
+	}
+
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 1);
+
+	disable_irq(up->port.irq);
+	disable_irq(up->dma_irq);
+
+	if (cfg->hw_set_rts)
+		usleep_range(up->byte_delay, up->byte_delay + 1);
+
+	serial_sched_stop(up);
+	set_bit(flag_suspend, &up->flags);
+
+	if (test_bit(flag_startup, &up->flags) && check_qcmd(up, &cmd)) {
+		dev_info(up->dev, "ignore suspend cmd: %d\n", cmd);
+		goto err;
+	}
+
+	if (test_bit(flag_tx_on, &up->flags)) {
+		dev_info(up->dev, "ignore suspend for tx on\n");
+		dev_info(up->dev,
+			"xmit pending:%d, stopped:%d, hw_stopped:%d, MSR:%x\n",
+			(int)uart_circ_chars_pending(xmit), tty->stopped,
+			tty->hw_stopped, serial_in(up, UART_MSR));
+		goto err;
+	}
+
+	if (test_bit(flag_startup, &up->flags) && !uart_circ_empty(xmit) &&
+		!uart_tx_stopped(&up->port)) {
+		dev_info(up->dev, "ignore suspend for xmit\n");
+		dev_info(up->dev,
+			"xmit pending:%d, stopped:%d, hw_stopped:%d, MSR:%x\n",
+			(int)uart_circ_chars_pending(xmit),
+			tty->stopped,
+			tty->hw_stopped,
+			serial_in(up, UART_MSR));
+		goto err;
+	}
+
+	if (up->use_dma) {
+		if (up->dma_ops->suspend(up))
+			goto err;
+	} else if (test_bit(flag_startup, &up->flags)) {
+		if (up->hw_type == hsu_intel &&
+			serial_in(up, UART_FOR) & 0x7F)
+			goto err;
+		else if (up->hw_type == hsu_dw &&
+			serial_in(up, 0x7c / 4) & BIT(3))
+			goto err;
+	}
+
+	if (cfg->hw_suspend)
+		cfg->hw_suspend(up->index, up->dev, wakeup_irq);
+	if (cfg->hw_context_save)
+		hsu_regs_context(up, context_save);
+	if (cfg->preamble && cfg->hw_suspend_post)
+		cfg->hw_suspend_post(up->index);
+	enable_irq(up->dma_irq);
+	if (up->hw_type == hsu_dw)
+		enable_irq(up->port.irq);
+
+	trace_hsu_func_end(up->index, __func__, "");
+	return 0;
+err:
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 0);
+	clear_bit(flag_suspend, &up->flags);
+	enable_irq(up->port.irq);
+	if (up->use_dma && up->hw_type == hsu_intel)
+		intel_dma_do_rx(up, 0);
+	enable_irq(up->dma_irq);
+	serial_sched_start(up);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_sync(up);
+busy:
+	pm_schedule_suspend(up->dev, cfg->idle);
+	trace_hsu_func_end(up->index, __func__, "busy");
+	return -EBUSY;
+}
+EXPORT_SYMBOL(serial_hsu_do_suspend);
+
+int serial_hsu_do_resume(struct uart_hsu_port *up)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+	unsigned long flags;
+
+	trace_hsu_func_start(up->index, __func__);
+	if (!test_and_clear_bit(flag_suspend, &up->flags)) {
+		trace_hsu_func_end(up->index, __func__, "ignore");
+		return 0;
+	}
+	if (up->hw_type == hsu_dw)
+		disable_irq(up->port.irq);
+	if (cfg->hw_context_save)
+		hsu_regs_context(up, context_load);
+	if (cfg->hw_resume)
+		cfg->hw_resume(up->index, up->dev);
+	if (test_bit(flag_startup, &up->flags))
+		hsu_flush_rxfifo(up);
+	if (up->use_dma)
+		up->dma_ops->resume(up);
+	if (cfg->hw_set_rts)
+		cfg->hw_set_rts(up->index, 0);
+	enable_irq(up->port.irq);
+
+	serial_sched_start(up);
+	spin_lock_irqsave(&up->port.lock, flags);
+	serial_sched_cmd(up, qcmd_get_msr);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	serial_sched_sync(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return 0;
+}
+EXPORT_SYMBOL(serial_hsu_do_resume);
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+int serial_hsu_do_runtime_idle(struct uart_hsu_port *up)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	if (cfg->type == debug_port
+			&& system_state == SYSTEM_BOOTING)
+		/* if HSU is set as default console, but earlyprintk is not hsu,
+		 * then it will enter suspend and can not get back since system
+		 * is on boot up, no contex switch to let it resume, here just
+		 * postpone the suspend retry 30 seconds, then system should
+		 * have finished booting
+		 */
+		pm_schedule_suspend(up->dev, 30000);
+	else
+		pm_schedule_suspend(up->dev, cfg->idle);
+	trace_hsu_func_end(up->index, __func__, "");
+	return -EBUSY;
+}
+EXPORT_SYMBOL(serial_hsu_do_runtime_idle);
+#endif
+
+static void serial_hsu_command(struct uart_hsu_port *up)
+{
+	char cmd, c;
+	unsigned long flags;
+	unsigned int iir, lsr;
+	int status;
+	struct hsu_dma_chan *txc = up->txc;
+	struct hsu_dma_chan *rxc = up->rxc;
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	trace_hsu_func_start(up->index, __func__);
+	if (unlikely(test_bit(flag_cmd_off, &up->flags))) {
+		trace_hsu_func_end(up->index, __func__, "cmd_off");
+		return;
+	}
+	if (unlikely(test_bit(flag_suspend, &up->flags))) {
+		dev_err(up->dev,
+			"Error to handle cmd while port is suspended\n");
+		if (check_qcmd(up, &cmd))
+			dev_err(up->dev, "Command pending: %d\n", cmd);
+		trace_hsu_func_end(up->index, __func__, "suspend");
+		return;
+	}
+	set_bit(flag_active, &up->flags);
+	spin_lock_irqsave(&up->port.lock, flags);
+	while (get_qcmd(up, &cmd)) {
+		spin_unlock_irqrestore(&up->port.lock, flags);
+		trace_hsu_cmd_start(up->index, cmd);
+		switch (cmd) {
+		case qcmd_overflow:
+			dev_err(up->dev, "queue overflow!!\n");
+			break;
+		case qcmd_set_mcr:
+			serial_out(up, UART_MCR, up->mcr);
+			break;
+		case qcmd_set_ier:
+			serial_out(up, UART_IER, up->ier);
+			break;
+		case qcmd_start_rx:
+			/* use for DW DMA RX only */
+			if (test_and_clear_bit(flag_rx_pending, &up->flags)) {
+				if (up->use_dma)
+					up->dma_ops->start_rx(up);
+			}
+			break;
+		case qcmd_stop_rx:
+			if (!up->use_dma || up->hw_type == hsu_dw) {
+				up->ier &= ~UART_IER_RLSI;
+				up->port.read_status_mask &= ~UART_LSR_DR;
+				serial_out(up, UART_IER, up->ier);
+			}
+
+			if (up->use_dma)
+				up->dma_ops->stop_rx(up);
+			break;
+		case qcmd_start_tx:
+			if (up->use_dma) {
+				if (!test_bit(flag_tx_on, &up->flags))
+					up->dma_ops->start_tx(up);
+			} else if (!(up->ier & UART_IER_THRI)) {
+				up->ier |= UART_IER_THRI;
+				serial_out(up, UART_IER, up->ier);
+			}
+			break;
+		case qcmd_stop_tx:
+			if (up->use_dma) {
+				spin_lock_irqsave(&up->port.lock, flags);
+				up->dma_ops->stop_tx(up);
+				clear_bit(flag_tx_on, &up->flags);
+				spin_unlock_irqrestore(&up->port.lock, flags);
+			} else if (up->ier & UART_IER_THRI) {
+				up->ier &= ~UART_IER_THRI;
+				serial_out(up, UART_IER, up->ier);
+			}
+			break;
+		case qcmd_cl:
+			serial_out(up, UART_IER, 0);
+			while (cl_get_char(up, &c)) {
+				while (!wait_for_xmitr(up))
+					if (up->in_tasklet)
+						cpu_relax();
+					else
+						schedule();
+				serial_out(up, UART_TX, c);
+			}
+			serial_out(up, UART_IER, up->ier);
+			break;
+		case qcmd_port_irq:
+			up->port_irq_cmddone++;
+
+			/* Baytrail patform use shared IRQ and need more care */
+			if (up->hw_type == hsu_intel) {
+				iir = serial_in(up, UART_IIR);
+			} else {
+				if (up->iir & 0x1)
+					up->iir = serial_in(up, UART_IIR);
+				iir = up->iir;
+				up->iir = 1;
+			}
+
+			if (iir & UART_IIR_NO_INT) {
+				enable_irq(up->port.irq);
+				up->port_irq_pio_no_irq_pend++;
+				break;
+			}
+
+			if (iir & HSU_PIO_RX_ERR)
+				up->port_irq_pio_rx_err++;
+			if (iir & HSU_PIO_RX_AVB)
+				up->port_irq_pio_rx_avb++;
+			if (iir & HSU_PIO_RX_TMO)
+				up->port_irq_pio_rx_timeout++;
+			if (iir & HSU_PIO_TX_REQ)
+				up->port_irq_pio_tx_req++;
+
+			lsr = serial_in(up, UART_LSR);
+
+			/* We need to judge it's timeout or data available */
+			if (lsr & UART_LSR_DR) {
+				if (!up->use_dma) {
+					receive_chars(up, &lsr);
+				} else if (up->hw_type == hsu_dw) {
+					if ((iir & 0xf) == 0xc) {
+						/*
+						 * RX timeout IRQ, the DMA
+						 * channel may be stalled
+						 */
+						up->dma_ops->stop_rx(up);
+						receive_chars(up, &lsr);
+					} else
+						up->dma_ops->start_rx(up);
+				}
+			}
+
+			/* lsr will be renewed during the receive_chars */
+			if (!up->use_dma && (lsr & UART_LSR_THRE))
+				transmit_chars(up);
+
+			spin_lock_irqsave(&up->port.lock, flags);
+			serial_sched_cmd(up, qcmd_enable_irq);
+			spin_unlock_irqrestore(&up->port.lock, flags);
+			break;
+		case qcmd_enable_irq:
+			enable_irq(up->port.irq);
+			break;
+		case qcmd_dma_irq:
+			/* Only hsu_intel has this irq */
+			up->dma_irq_cmddone++;
+			if (up->port_dma_sts & (1 << txc->id)) {
+				up->dma_tx_irq_cmddone++;
+				status = chan_readl(txc, HSU_CH_SR);
+				up->dma_ops->start_tx(up);
+			}
+
+			if (up->port_dma_sts & (1 << rxc->id)) {
+				status = chan_readl(rxc, HSU_CH_SR);
+				intel_dma_do_rx(up, status);
+			}
+			enable_irq(up->dma_irq);
+			break;
+		case qcmd_cmd_off:
+			set_bit(flag_cmd_off, &up->flags);
+			break;
+		case qcmd_get_msr:
+			break;
+		default:
+			dev_err(up->dev, "invalid command!!\n");
+			break;
+		}
+		trace_hsu_cmd_end(up->index, cmd);
+		spin_lock_irqsave(&up->port.lock, flags);
+		if (unlikely(test_bit(flag_cmd_off, &up->flags)))
+			break;
+	}
+	up->msr = serial_in(up, UART_MSR);
+	if (cfg->hw_ctrl_cts)
+		up->msr |= UART_MSR_CTS;
+	check_modem_status(up);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	trace_hsu_func_end(up->index, __func__, "");
+}
+
+static void serial_hsu_tasklet(unsigned long data)
+{
+	struct uart_hsu_port *up = (struct uart_hsu_port *)data;
+
+	up->in_tasklet = 1;
+	serial_hsu_command(up);
+	up->tasklet_done++;
+	up->in_tasklet = 0;
+}
+
+static void serial_hsu_work(struct work_struct *work)
+{
+	struct uart_hsu_port *uport =
+		container_of(work, struct uart_hsu_port, work);
+
+	uport->in_workq = 1;
+	serial_hsu_command(uport);
+	uport->workq_done++;
+	uport->in_workq = 0;
+}
+
+static int serial_port_setup(struct uart_hsu_port *up,
+		struct hsu_port_cfg *cfg)
+{
+	int ret;
+	int index = cfg->index;
+
+	phsu->configs[index] = cfg;
+	up->port.line = index;
+	snprintf(up->name, sizeof(up->name) - 1, "%s_p", cfg->name);
+	up->index = index;
+
+	if ((hsu_dma_enable & (1 << index)) && up->dma_ops)
+		up->use_dma = 1;
+	else
+		up->use_dma = 0;
+
+	if (cfg->hw_init)
+		cfg->hw_init(up->dev, index);
+	mutex_init(&up->q_mutex);
+	tasklet_init(&up->tasklet, serial_hsu_tasklet,
+				(unsigned long)up);
+	up->workqueue =
+		create_singlethread_workqueue(up->name);
+	INIT_WORK(&up->work, serial_hsu_work);
+	up->qcirc.buf = (char *)up->qbuf;
+	spin_lock_init(&up->cl_lock);
+	set_bit(flag_cmd_off, &up->flags);
+
+	if (cfg->type == debug_port) {
+		serial_hsu_reg.cons = SERIAL_HSU_CONSOLE;
+		if (serial_hsu_reg.cons)
+			serial_hsu_reg.cons->index = index;
+	} else
+		serial_hsu_reg.cons = NULL;
+
+	uart_add_one_port(&serial_hsu_reg, &up->port);
+
+	if (phsu->irq_port_and_dma) {
+		up->dma_irq = up->port.irq;
+		ret = request_irq(up->dma_irq, hsu_dma_irq, IRQF_SHARED,
+				"hsu dma", up);
+		if (ret) {
+			dev_err(up->dev, "can not get dma IRQ\n");
+			return ret;
+		}
+		ret = request_irq(up->port.irq, hsu_port_irq, IRQF_SHARED,
+				up->name, up);
+		if (ret) {
+			dev_err(up->dev, "can not get port IRQ\n");
+			return ret;
+		}
+	} else {
+		up->dma_irq = phsu->dma_irq;
+		ret = request_irq(up->port.irq, hsu_port_irq, IRQF_SHARED,
+				up->name, up);
+		if (ret) {
+			dev_err(up->dev, "can not get port IRQ\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+struct uart_hsu_port *serial_hsu_port_setup(struct device *pdev, int port,
+	resource_size_t start, resource_size_t len, int irq)
+{
+	struct uart_hsu_port *up;
+	int index;
+	unsigned int uclk, clock;
+	struct hsu_port_cfg *cfg;
+
+	cfg = hsu_port_func_cfg + port;
+	if (!cfg)
+		return ERR_PTR(-EINVAL);
+
+	pr_info("Found a %s HSU\n", cfg->hw_ip ? "Designware" : "Intel");
+
+	index = cfg->index;
+	up = phsu->port + index;
+
+	up->dev = pdev;
+	up->port.type = PORT_MFD;
+	up->port.iotype = UPIO_MEM;
+	up->port.mapbase = start;
+	up->port.membase = ioremap_nocache(up->port.mapbase, len);
+	up->port.fifosize = 64;
+	up->port.ops = &serial_hsu_pops;
+	up->port.flags = UPF_IOREMAP;
+	up->hw_type = cfg->hw_ip;
+	/* calculate if DLAB=1, the ideal uartclk */
+	if (cfg->hw_get_clk)
+		clock = cfg->hw_get_clk();
+	else
+		clock = 50000;
+	uclk = clock * 1000 / (115200 * 16); /* 16 is default ps */
+	if (uclk >= 24)
+		uclk = 24;
+	else if (uclk >= 16)
+		uclk = 16;
+	else if (uclk >= 8)
+		uclk = 8;
+	else
+		uclk = 1;
+
+	if (up->hw_type == hsu_intel)
+		up->port.uartclk = 115200 * uclk * 16;
+	else
+		up->port.uartclk = 115200 * 32 * 16;
+
+	up->port.irq = irq;
+	up->port.dev = pdev;
+
+	if (up->hw_type == hsu_intel) {
+		up->txc = &phsu->chans[index * 2];
+		up->rxc = &phsu->chans[index * 2 + 1];
+		up->dma_ops = &intel_dma_ops;
+	} else {
+		up->dma_ops = pdw_dma_ops;
+	}
+
+	if (cfg->has_alt) {
+		struct hsu_port_cfg *alt_cfg =
+			hsu_port_func_cfg + cfg->alt;
+		struct uart_hsu_port *alt_up =
+			phsu->port + alt_cfg->index;
+
+		memcpy(alt_up, up, sizeof(*up));
+		serial_port_setup(alt_up, alt_cfg);
+		phsu->port_num++;
+	}
+
+	serial_port_setup(up, cfg);
+	phsu->port_num++;
+
+	return up;
+}
+EXPORT_SYMBOL(serial_hsu_port_setup);
+
+void serial_hsu_port_free(struct uart_hsu_port *up)
+{
+	struct hsu_port_cfg *cfg = phsu->configs[up->index];
+
+	uart_remove_one_port(&serial_hsu_reg, &up->port);
+	free_irq(up->port.irq, up);
+	if (cfg->has_alt) {
+		struct hsu_port_cfg *alt_cfg = phsu->configs[cfg->alt];
+		struct uart_hsu_port *alt_up =
+			phsu->port + alt_cfg->index;
+		uart_remove_one_port(&serial_hsu_reg, &alt_up->port);
+		free_irq(up->port.irq, alt_up);
+	}
+}
+EXPORT_SYMBOL(serial_hsu_port_free);
+
+void serial_hsu_port_shutdown(struct uart_hsu_port *up)
+{
+	uart_suspend_port(&serial_hsu_reg, &up->port);
+}
+EXPORT_SYMBOL(serial_hsu_port_shutdown);
+
+int serial_hsu_dma_setup(struct device *pdev,
+	resource_size_t start, resource_size_t len, unsigned int irq, int share)
+{
+	struct hsu_dma_chan *dchan;
+	int i, ret;
+
+	phsu->reg = ioremap_nocache(start, len);
+	dchan = phsu->chans;
+	for (i = 0; i < 6; i++) {
+		dchan->id = i;
+		dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE :
+			DMA_TO_DEVICE;
+		dchan->uport = &phsu->port[i/2];
+		dchan->reg = phsu->reg + HSU_DMA_CHANS_REG_OFFSET +
+			i * HSU_DMA_CHANS_REG_LENGTH;
+
+		dchan++;
+	}
+
+	/* will share irq with port if irq < 0 */
+	if (share)
+		phsu->irq_port_and_dma = 1;
+	else {
+		phsu->dma_irq = irq;
+		ret = request_irq(irq, hsu_dma_irq, 0, "hsu dma", phsu);
+		if (ret) {
+			dev_err(pdev, "can not get dma IRQ\n");
+			goto err;
+		}
+	}
+
+	dev_set_drvdata(pdev, phsu);
+
+	return 0;
+err:
+	iounmap(phsu->reg);
+	return ret;
+}
+EXPORT_SYMBOL(serial_hsu_dma_setup);
+
+void serial_hsu_dma_free(void)
+{
+	free_irq(phsu->dma_irq, phsu);
+}
+EXPORT_SYMBOL(serial_hsu_dma_free);
+
+static int __init hsu_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&serial_hsu_reg);
+	if (ret)
+		return ret;
+
+	spin_lock_init(&phsu->dma_lock);
+	return hsu_debugfs_init(phsu);
+}
+
+static void __exit hsu_exit(void)
+{
+	uart_unregister_driver(&serial_hsu_reg);
+	hsu_debugfs_remove(phsu);
+}
+
+module_init(hsu_init);
+module_exit(hsu_exit);
+
+MODULE_AUTHOR("Yang Bin <bin.yang@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu");
diff --git a/drivers/tty/serial/mfd_dma.c b/drivers/tty/serial/mfd_dma.c
new file mode 100644
index 0000000..a9371fb
--- /dev/null
+++ b/drivers/tty/serial/mfd_dma.c
@@ -0,0 +1,753 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial_mfd.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/irq.h>
+#include <linux/acpi.h>
+#include <asm/intel_mid_hsu.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+
+#include "mfd.h"
+
+static int dma_init_common(struct uart_hsu_port *up)
+{
+	struct hsu_dma_buffer *dbuf;
+	struct circ_buf *xmit = &up->port.state->xmit;
+
+	/* 1. Allocate the RX buffer */
+	dbuf = &up->rxbuf;
+	dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf->buf) {
+		up->use_dma = 0;
+		dev_err(up->dev, "allocate DMA buffer failed!!\n");
+		return -ENOMEM;
+	}
+
+	dbuf->dma_addr = dma_map_single(up->dev,
+			dbuf->buf,
+			HSU_DMA_BUF_SIZE,
+			DMA_FROM_DEVICE);
+	dbuf->dma_size = HSU_DMA_BUF_SIZE;
+
+	/* 2. prepare teh TX buffer */
+	dbuf = &up->txbuf;
+	dbuf->buf = xmit->buf;
+	dbuf->dma_addr = dma_map_single(up->dev,
+			dbuf->buf,
+			UART_XMIT_SIZE,
+			DMA_TO_DEVICE);
+	dbuf->dma_size = UART_XMIT_SIZE;
+	dbuf->ofs = 0;
+	return 0;
+}
+
+static void dma_exit_common(struct uart_hsu_port *up)
+{
+	struct hsu_dma_buffer *dbuf;
+	struct uart_port *port = &up->port;
+
+	/* Free and unmap rx dma buffer */
+	dbuf = &up->rxbuf;
+	dma_unmap_single(port->dev,
+			dbuf->dma_addr,
+			dbuf->dma_size,
+			DMA_FROM_DEVICE);
+	kfree(dbuf->buf);
+
+	/* Next unmap tx dma buffer*/
+	dbuf = &up->txbuf;
+	dma_unmap_single(port->dev,
+			dbuf->dma_addr,
+			dbuf->dma_size,
+			DMA_TO_DEVICE);
+}
+
+#ifdef CONFIG_INTEL_MID_DMAC
+static bool dw_dma_chan_filter(struct dma_chan *chan, void *param)
+{
+	struct dw_dma_priv *dw_dma = param;
+
+	if (dw_dma->dmac && (&dw_dma->dmac->dev == chan->device->dev))
+		return true;
+	else {
+#ifdef CONFIG_ACPI
+		acpi_handle handle = ACPI_HANDLE(chan->device->dev);
+		struct acpi_device *device;
+		int ret;
+		const char *hid;
+		ret = acpi_bus_get_device(handle, &device);
+		if (ret) {
+			pr_warn("DW HSU: no acpi entry\n");
+			return false;
+		}
+		hid = acpi_device_hid(device);
+		if (!strncmp(hid, "INTL9C60", strlen(hid))) {
+			acpi_status status;
+			unsigned long long tmp;
+			status = acpi_evaluate_integer(handle,
+					"_UID", NULL, &tmp);
+			if (!ACPI_FAILURE(status) && (tmp == 1))
+				return true;
+		}
+		if (!strncmp(hid, "80862286", strlen(hid))) {
+			return true;
+		}
+
+#endif
+		return false;
+	}
+}
+
+/* the RX/TX buffer init should be a common stuff */
+static int dw_dma_init(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma;
+	struct intel_mid_dma_slave *rxs, *txs;
+	dma_cap_mask_t mask;
+	int ret = 0;
+
+	dw_dma = kzalloc(sizeof(*dw_dma), GFP_KERNEL);
+	if (!dw_dma) {
+		pr_warn("DW HSU: Can't alloc memory for dw_dm_priv\n");
+		return -1;
+	}
+
+	up->dma_priv = dw_dma;
+
+	/*
+	 * Get pci device for DMA controller, currently it could only
+	 * be the DMA controller of baytrail
+	 */
+	dw_dma->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0f06, NULL);
+	if (!dw_dma->dmac) {
+		/* still have chance to get from ACPI dev */
+		pr_warn("DW HSU: Can't find LPIO1 DMA controller by PCI, try ACPI\n");
+	}
+
+	ret = dma_init_common(up);
+	if (ret)
+		return ret;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* 1. Init rx channel */
+	dw_dma->rxchan = dma_request_channel(mask, dw_dma_chan_filter, dw_dma);
+	if (!dw_dma->rxchan)
+		goto err_exit;
+	rxs = &dw_dma->rxs;
+	rxs->dma_slave.direction = DMA_FROM_DEVICE;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	rxs->dma_slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+	/* These are fixed HW info from Baytrail datasheet */
+	if (up->index == 0)
+		rxs->device_instance = 3;
+	else
+		rxs->device_instance = 5;
+	dw_dma->rxchan->private = rxs;
+
+	/* 2. Init tx channel */
+	dw_dma->txchan = dma_request_channel(mask, dw_dma_chan_filter, dw_dma);
+	if (!dw_dma->txchan)
+		goto free_rxchan;
+
+	txs = &dw_dma->txs;
+	txs->dma_slave.direction = DMA_TO_DEVICE;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	txs->dma_slave.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	if (up->index == 0)
+		txs->device_instance = 2;
+	else
+		txs->device_instance = 4;
+	dw_dma->txchan->private = txs;
+
+	/* TX/RX reg share the same addr */
+	dw_dma->dma_addr = up->port.mapbase + UART_RX;
+
+	pm_qos_add_request(&up->qos, PM_QOS_CPU_DMA_LATENCY,
+			PM_QOS_DEFAULT_VALUE);
+
+	dw_dma->up = up;
+	up->dma_inited = 1;
+	return 0;
+
+free_rxchan:
+	dma_release_channel(dw_dma->rxchan);
+err_exit:
+	return -1;
+
+}
+
+static int dw_dma_suspend(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_chan *txchan;
+	struct dma_chan *rxchan;
+
+	if (!up->dma_inited)
+		return 0;
+
+	txchan = dw_dma->txchan;
+	rxchan = dw_dma->rxchan;
+
+	if (test_bit(flag_rx_on, &up->flags) ||
+		test_bit(flag_rx_pending, &up->flags)) {
+		dev_warn(up->dev, "ignore suspend for rx dma is running\n");
+		return -1;
+	}
+
+	txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+	rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+
+	txchan->device->device_control(txchan, DMA_PAUSE, 0);
+	rxchan->device->device_control(rxchan, DMA_PAUSE, 0);
+	pm_qos_update_request(&up->qos, PM_QOS_DEFAULT_VALUE);
+	return 0;
+}
+
+static int dw_dma_resume(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_chan *txchan;
+	struct dma_chan *rxchan;
+
+	if (!up->dma_inited)
+		return 0;
+
+	txchan = dw_dma->txchan;
+	rxchan = dw_dma->rxchan;
+
+	rxchan->device->device_control(rxchan, DMA_RESUME, 0);
+	txchan->device->device_control(txchan, DMA_RESUME, 0);
+	pm_qos_update_request(&up->qos, CSTATE_EXIT_LATENCY_C2);
+	return 0;
+}
+
+
+static int dw_dma_exit(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_chan *txchan = dw_dma->txchan;
+	struct dma_chan *rxchan = dw_dma->rxchan;
+
+	pm_qos_remove_request(&up->qos);
+	txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+	rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+	dma_release_channel(dw_dma->txchan);
+	dma_release_channel(dw_dma->rxchan);
+
+	dma_exit_common(up);
+
+	kfree(dw_dma);
+
+	up->dma_inited = 0;
+	up->dma_priv = NULL;
+	return 0;
+}
+
+static void dw_dma_tx_done(void *arg)
+{
+	struct dw_dma_priv *dw_dma = arg;
+	struct uart_hsu_port *up = dw_dma->up;
+	struct circ_buf *xmit = &up->port.state->xmit;
+	struct hsu_dma_buffer *dbuf = &up->txbuf;
+	unsigned long flags;
+	int count = 0;
+
+	count = intel_dma_get_src_addr(dw_dma->txchan) - dbuf->dma_addr
+			- xmit->tail;
+
+	/* Update the circ buf info */
+	xmit->tail += dbuf->ofs;
+	xmit->tail &= UART_XMIT_SIZE - 1;
+	up->port.icount.tx += dbuf->ofs;
+
+	dbuf->ofs = 0;
+
+	clear_bit(flag_tx_on, &up->flags);
+
+	if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
+		spin_lock_irqsave(&up->port.lock, flags);
+		serial_sched_cmd(up, qcmd_start_tx);
+		spin_unlock_irqrestore(&up->port.lock, flags);
+	}
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(&up->port);
+}
+
+static void dw_dma_start_tx(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_async_tx_descriptor *txdesc = NULL;
+	struct dma_chan *txchan;
+	struct dma_slave_config *txconf;
+	struct hsu_dma_buffer *dbuf = &up->txbuf;
+	struct circ_buf *xmit = &up->port.state->xmit;
+	int count;
+	enum dma_ctrl_flags		flag;
+
+	txchan = dw_dma->txchan;
+	txconf = &dw_dma->txs.dma_slave;
+
+	if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+		if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+			uart_write_wakeup(&up->port);
+		return;
+	}
+
+	/*
+	 * Need to check if FCR is set, better to be set only once when
+	 * use_dma == 1
+	 */
+
+	set_bit(flag_tx_on, &up->flags);
+	count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+	if (count >= 2000)
+		count = 2000;
+
+	dbuf->ofs = count;
+
+
+	if (!count) {
+		pr_err("we see a case of TX Len == 0!!!\n\n");
+		dump_stack();
+		clear_bit(flag_tx_on, &up->flags);
+		return;
+	}
+
+	/* 2. Prepare the TX dma transfer */
+	txconf->direction = DMA_TO_DEVICE;
+	txconf->dst_addr = dw_dma->dma_addr;
+	txconf->src_maxburst = LNW_DMA_MSIZE_8;
+	txconf->dst_maxburst = LNW_DMA_MSIZE_8;
+	txconf->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	txconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+				       (unsigned long) txconf);
+
+	dma_sync_single_for_device(up->port.dev,
+					   dbuf->dma_addr,
+					   dbuf->dma_size,
+					   DMA_TO_DEVICE);
+
+	flag = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_CTRL_ACK;
+
+	txdesc = txchan->device->device_prep_dma_memcpy(
+					txchan,		/* DMA Channel */
+					dw_dma->dma_addr,	/* DAR */
+					dbuf->dma_addr + xmit->tail, /* SAR */
+					count,		/* Data len */
+					flag);		/* Flag */
+	if (!txdesc) {
+		pr_warn("DW HSU: fail to prepare TX DMA operation\n");
+		return;
+	}
+
+	txdesc->callback = dw_dma_tx_done;
+	txdesc->callback_param = dw_dma;
+	txdesc->tx_submit(txdesc);
+}
+
+static void dw_dma_stop_tx(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct dma_chan *txchan = dw_dma->txchan;
+	struct hsu_dma_buffer *dbuf = &up->txbuf;
+	int ret;
+	int count;
+
+	if (!test_bit(flag_tx_on, &up->flags))
+		return;
+
+	count = intel_dma_get_src_addr(dw_dma->txchan) - dbuf->dma_addr;
+
+	/* ? this may be sleepable */
+	ret = txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
+	if (ret)
+		dev_warn(up->dev, "Fail to stop DMA RX channel!\n");
+}
+
+static void dw_dma_rx_done(void *arg)
+{
+	struct dw_dma_priv *dw_dma = arg;
+	struct uart_hsu_port *up = dw_dma->up;
+	struct hsu_dma_buffer *dbuf = &up->rxbuf;
+	struct uart_port *port = &up->port;
+	struct tty_struct *tty;
+	struct tty_port *tport = &port->state->port;
+	int count;
+	unsigned long flags;
+
+	tty = tty_port_tty_get(&up->port.state->port);
+	if (!tty)
+		return;
+
+	dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	count = dbuf->ofs;
+	tty_insert_flip_string(tport, dbuf->buf, count);
+	port->icount.rx += count;
+
+	/* Do we really need it for x86? */
+	dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	tty_flip_buffer_push(tport);
+	tty_kref_put(tty);
+
+	clear_bit(flag_rx_on, &up->flags);
+
+	spin_lock_irqsave(&up->port.lock, flags);
+	if (test_bit(flag_rx_pending, &up->flags))
+		serial_sched_cmd(up, qcmd_start_rx);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+
+}
+
+
+static void dw_dma_start_rx(struct uart_hsu_port *up)
+{
+	struct dma_async_tx_descriptor *rxdesc = NULL;
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct hsu_dma_buffer *dbuf = &up->rxbuf;
+	struct dma_chan *rxchan = dw_dma->rxchan;
+	struct dma_slave_config *rxconf = &dw_dma->rxs.dma_slave;
+	enum dma_ctrl_flags flag;
+
+	if (test_and_set_bit(flag_rx_on, &up->flags)) {
+		set_bit(flag_rx_pending, &up->flags);
+		return;
+	}
+
+	dbuf->ofs = 2048 - 64;
+
+	/* Prepare the RX dma transfer */
+	rxconf->direction = DMA_FROM_DEVICE;
+	rxconf->src_addr = dw_dma->dma_addr;
+	rxconf->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	rxconf->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+	/* feng: better to calculate a best size */
+	rxconf->src_maxburst = LNW_DMA_MSIZE_8;
+	rxconf->dst_maxburst = LNW_DMA_MSIZE_8;
+
+	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+				       (unsigned long) rxconf);
+	flag = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_CTRL_ACK;
+	rxdesc = rxchan->device->device_prep_dma_memcpy(
+					rxchan,			/* DMA chan */
+					dbuf->dma_addr,		/* DAR */
+					dw_dma->dma_addr,	/* SAR */
+					dbuf->ofs,		/* data len */
+					flag);
+	if (!rxdesc) {
+		pr_warn("DW HSU: fail to prepare TX DMA operation\n");
+		return;
+	}
+
+	rxdesc->callback = dw_dma_rx_done;
+	rxdesc->callback_param = dw_dma;
+	rxdesc->tx_submit(rxdesc);
+}
+
+static void dw_dma_stop_rx(struct uart_hsu_port *up)
+{
+	struct dw_dma_priv *dw_dma = up->dma_priv;
+	struct hsu_dma_buffer *dbuf = &up->rxbuf;
+	struct dma_chan *rxchan = dw_dma->rxchan;
+	int count, ret;
+	struct uart_port *port = &up->port;
+	struct tty_struct *tty;
+	struct tty_port *tport = &port->state->port;
+
+	if (!test_bit(flag_rx_on, &up->flags)) {
+		clear_bit(flag_rx_pending, &up->flags);
+		return;
+	}
+
+	ret = rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
+	if (ret) {
+		WARN(1, "DMA TERMINATE of TX returns error\n");
+		return;
+	}
+
+	tty = tty_port_tty_get(&up->port.state->port);
+	if (!tty)
+		return;
+
+	count = intel_dma_get_dst_addr(rxchan) - dbuf->dma_addr;
+	if (!count)
+		goto exit;
+
+	dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
+		dbuf->dma_size, DMA_FROM_DEVICE);
+
+	tty_insert_flip_string(tport, dbuf->buf, count);
+	port->icount.rx += count;
+
+	/* Do we really need it for x86? */
+	dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
+			dbuf->dma_size, DMA_FROM_DEVICE);
+
+	tty_flip_buffer_push(tport);
+
+exit:
+	tty_kref_put(tty);
+	clear_bit(flag_rx_on, &up->flags);
+	clear_bit(flag_rx_pending, &up->flags);
+}
+
+struct hsu_dma_ops dw_dma_ops = {
+	.init =		dw_dma_init,
+	.exit =		dw_dma_exit,
+	.suspend =	dw_dma_suspend,
+	.resume	=	dw_dma_resume,
+	.start_tx =	dw_dma_start_tx,
+	.stop_tx =	dw_dma_stop_tx,
+	.start_rx =	dw_dma_start_rx,
+	.stop_rx =	dw_dma_stop_rx,
+};
+
+struct hsu_dma_ops *pdw_dma_ops = &dw_dma_ops;
+
+#else
+struct hsu_dma_ops *pdw_dma_ops = NULL;
+#endif
+
+/* Intel DMA ops */
+
+/* The buffer is already cache coherent */
+void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc,
+			struct hsu_dma_buffer *dbuf)
+{
+	dbuf->ofs = 0;
+
+	chan_writel(rxc, HSU_CH_BSR, HSU_DMA_BSR);
+	chan_writel(rxc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+
+	chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
+	chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
+	chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
+					 | (0x1 << 16)
+					 | (0x1 << 24)	/* timeout, Errata 1 */
+					 );
+	chan_writel(rxc, HSU_CH_CR, 0x3);
+}
+
+static int intel_dma_init(struct uart_hsu_port *up)
+{
+	int ret;
+
+	clear_bit(flag_tx_on, &up->flags);
+
+	ret = dma_init_common(up);
+	if (ret)
+		return ret;
+
+	/* This should not be changed all around */
+	chan_writel(up->txc, HSU_CH_BSR, HSU_DMA_BSR);
+	chan_writel(up->txc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+
+	/* Start the RX channel right now */
+	hsu_dma_start_rx_chan(up->rxc, &up->rxbuf);
+
+	up->dma_inited = 1;
+	return 0;
+}
+
+static int intel_dma_exit(struct uart_hsu_port *up)
+{
+	chan_writel(up->txc, HSU_CH_CR, 0x0);
+	clear_bit(flag_tx_on, &up->flags);
+	chan_writel(up->rxc, HSU_CH_CR, 0x2);
+	dma_exit_common(up);
+
+	up->dma_inited = 0;
+	return 0;
+}
+
+
+static void intel_dma_start_tx(struct uart_hsu_port *up)
+{
+	struct circ_buf *xmit = &up->port.state->xmit;
+	struct hsu_dma_buffer *dbuf = &up->txbuf;
+	unsigned long flags;
+	int count;
+
+	spin_lock_irqsave(&up->port.lock, flags);
+	chan_writel(up->txc, HSU_CH_CR, 0x0);
+	while (chan_readl(up->txc, HSU_CH_CR))
+		cpu_relax();
+	clear_bit(flag_tx_on, &up->flags);
+	if (dbuf->ofs) {
+		u32 real = chan_readl(up->txc, HSU_CH_D0SAR) - up->tx_addr;
+
+		/* we found in flow control case, TX irq came without sending
+		 * all TX buffer
+		 */
+		if (real < dbuf->ofs)
+			dbuf->ofs = real; /* adjust to real chars sent */
+
+		/* Update the circ buf info */
+		xmit->tail += dbuf->ofs;
+		xmit->tail &= UART_XMIT_SIZE - 1;
+
+		up->port.icount.tx += dbuf->ofs;
+		dbuf->ofs = 0;
+	}
+
+	if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
+		set_bit(flag_tx_on, &up->flags);
+		dma_sync_single_for_device(up->port.dev,
+					   dbuf->dma_addr,
+					   dbuf->dma_size,
+					   DMA_TO_DEVICE);
+
+		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+		dbuf->ofs = count;
+
+		/* Reprogram the channel */
+		up->tx_addr = dbuf->dma_addr + xmit->tail;
+		chan_writel(up->txc, HSU_CH_D0SAR, up->tx_addr);
+		chan_writel(up->txc, HSU_CH_D0TSR, count);
+
+		/* Reenable the channel */
+		chan_writel(up->txc, HSU_CH_DCR, 0x1
+						 | (0x1 << 8)
+						 | (0x1 << 16));
+		chan_writel(up->txc, HSU_CH_CR, 0x1);
+	}
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(&up->port);
+
+	spin_unlock_irqrestore(&up->port.lock, flags);
+	return;
+}
+
+static void intel_dma_stop_tx(struct uart_hsu_port *up)
+{
+	chan_writel(up->txc, HSU_CH_CR, 0x0);
+	return;
+}
+
+static void intel_dma_start_rx(struct uart_hsu_port *up)
+{
+	return;
+}
+
+static void intel_dma_stop_rx(struct uart_hsu_port *up)
+{
+	chan_writel(up->rxc, HSU_CH_CR, 0x2);
+	return;
+}
+
+static void intel_dma_context_op(struct uart_hsu_port *up, int op)
+{
+	if (op == context_save) {
+		up->txc->cr  = chan_readl(up->txc, HSU_CH_CR);
+		up->txc->dcr = chan_readl(up->txc, HSU_CH_DCR);
+		up->txc->sar = chan_readl(up->txc, HSU_CH_D0SAR);
+		up->txc->tsr = chan_readl(up->txc, HSU_CH_D0TSR);
+
+		up->rxc->cr  = chan_readl(up->rxc, HSU_CH_CR);
+		up->rxc->dcr = chan_readl(up->rxc, HSU_CH_DCR);
+		up->rxc->sar = chan_readl(up->rxc, HSU_CH_D0SAR);
+		up->rxc->tsr = chan_readl(up->rxc, HSU_CH_D0TSR);
+	} else {
+		chan_writel(up->txc, HSU_CH_DCR, up->txc->dcr);
+		chan_writel(up->txc, HSU_CH_D0SAR, up->txc->sar);
+		chan_writel(up->txc, HSU_CH_D0TSR, up->txc->tsr);
+		chan_writel(up->txc, HSU_CH_BSR, HSU_DMA_BSR);
+		chan_writel(up->txc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+
+		chan_writel(up->rxc, HSU_CH_DCR, up->rxc->dcr);
+		chan_writel(up->rxc, HSU_CH_D0SAR, up->rxc->sar);
+		chan_writel(up->rxc, HSU_CH_D0TSR, up->rxc->tsr);
+		chan_writel(up->rxc, HSU_CH_BSR, HSU_DMA_BSR);
+		chan_writel(up->rxc, HSU_CH_MOTSR, HSU_DMA_MOTSR);
+	}
+}
+
+
+static int intel_dma_resume(struct uart_hsu_port *up)
+{
+	chan_writel(up->rxc, HSU_CH_CR, up->rxc_chcr_save);
+	return 0;
+}
+
+static int intel_dma_suspend(struct uart_hsu_port *up)
+{
+	int loop = 100000;
+	struct hsu_dma_chan *chan = up->rxc;
+
+	up->rxc_chcr_save = chan_readl(up->rxc, HSU_CH_CR);
+
+	if (test_bit(flag_startup, &up->flags)
+			&& serial_in(up, UART_FOR) & 0x7F) {
+		dev_err(up->dev, "ignore suspend for rx fifo\n");
+		return -1;
+	}
+
+	if (chan_readl(up->txc, HSU_CH_CR)) {
+		dev_info(up->dev, "ignore suspend for tx dma\n");
+		return -1;
+	}
+
+	chan_writel(up->rxc, HSU_CH_CR, 0x2);
+	while (--loop) {
+		if (chan_readl(up->rxc, HSU_CH_CR) == 0x2)
+			break;
+		cpu_relax();
+	}
+
+	if (!loop) {
+		dev_err(up->dev, "Can't stop rx dma\n");
+		return -1;
+	}
+
+	if (chan_readl(chan, HSU_CH_D0SAR) - up->rxbuf.dma_addr) {
+		dev_err(up->dev, "ignore suspend for dma pointer\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+struct hsu_dma_ops intel_dma_ops = {
+	.init =		intel_dma_init,
+	.exit =		intel_dma_exit,
+	.suspend =	intel_dma_suspend,
+	.resume	=	intel_dma_resume,
+	.start_tx =	intel_dma_start_tx,
+	.stop_tx =	intel_dma_stop_tx,
+	.start_rx =	intel_dma_start_rx,
+	.stop_rx =	intel_dma_stop_rx,
+	.context_op =	intel_dma_context_op,
+};
+
+
diff --git a/drivers/tty/serial/mfd_pci.c b/drivers/tty/serial/mfd_pci.c
new file mode 100644
index 0000000..66c7138
--- /dev/null
+++ b/drivers/tty/serial/mfd_pci.c
@@ -0,0 +1,298 @@
+/*
+ * mfd_pci.c: driver for High Speed UART device of Intel Medfield platform
+ *
+ * Refer pxa.c, 8250.c and some other drivers in drivers/serial/
+ *
+ * (C) Copyright 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+/* Notes:
+ * 1. DMA channel allocation: 0/1 channel are assigned to port 0,
+ *    2/3 chan to port 1, 4/5 chan to port 3. Even number chans
+ *    are used for RX, odd chans for TX
+ *
+ * 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
+ *    asserted, only when the HW is reset the DDCD and DDSR will
+ *    be triggered
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+
+#include "mfd.h"
+
+#ifdef CONFIG_PM
+static int serial_hsu_pci_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	if (up) {
+		trace_hsu_func_start(up->index, __func__);
+		ret = serial_hsu_do_suspend(up);
+		trace_hsu_func_end(up->index, __func__, "");
+	}
+	return ret;
+}
+
+static int serial_hsu_pci_resume(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	if (up) {
+		trace_hsu_func_start(up->index, __func__);
+		ret = serial_hsu_do_resume(up);
+		trace_hsu_func_end(up->index, __func__, "");
+	}
+	return ret;
+}
+#else
+#define serial_hsu_pci_suspend	NULL
+#define serial_hsu_pci_resume	NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int serial_hsu_pci_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+
+	return serial_hsu_do_runtime_idle(up);
+}
+
+static int serial_hsu_pci_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	ret = serial_hsu_do_suspend(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static int serial_hsu_pci_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+	int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	ret = serial_hsu_do_resume(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+#else
+#define serial_hsu_pci_runtime_idle		NULL
+#define serial_hsu_pci_runtime_suspend	NULL
+#define serial_hsu_pci_runtime_resume	NULL
+#endif
+
+static const struct dev_pm_ops serial_hsu_pci_pm_ops = {
+
+	SET_SYSTEM_SLEEP_PM_OPS(serial_hsu_pci_suspend,
+				serial_hsu_pci_resume)
+	SET_RUNTIME_PM_OPS(serial_hsu_pci_runtime_suspend,
+				serial_hsu_pci_runtime_resume,
+				serial_hsu_pci_runtime_idle)
+};
+
+DEFINE_PCI_DEVICE_TABLE(hsuart_port_pci_ids) = {
+	{ PCI_VDEVICE(INTEL, 0x081B), hsu_port0 },
+	{ PCI_VDEVICE(INTEL, 0x081C), hsu_port1 },
+	{ PCI_VDEVICE(INTEL, 0x081D), hsu_port2 },
+	/* Cloverview support */
+	{ PCI_VDEVICE(INTEL, 0x08FC), hsu_port0 },
+	{ PCI_VDEVICE(INTEL, 0x08FD), hsu_port1 },
+	{ PCI_VDEVICE(INTEL, 0x08FE), hsu_port2 },
+	/* Tangier support */
+	{ PCI_VDEVICE(INTEL, 0x1191), hsu_port0 },
+	/* VLV2 support */
+	{ PCI_VDEVICE(INTEL, 0x0F0A), hsu_port0 },
+	{ PCI_VDEVICE(INTEL, 0x0F0C), hsu_port1 },
+	/* CHV support */
+	{ PCI_VDEVICE(INTEL, 0x228A), hsu_port0 },
+	{ PCI_VDEVICE(INTEL, 0x228C), hsu_port1 },
+	{},
+};
+
+DEFINE_PCI_DEVICE_TABLE(hsuart_dma_pci_ids) = {
+	{ PCI_VDEVICE(INTEL, 0x081E), hsu_dma },
+	/* Cloverview support */
+	{ PCI_VDEVICE(INTEL, 0x08FF), hsu_dma },
+	/* Tangier support */
+	{ PCI_VDEVICE(INTEL, 0x1192), hsu_dma },
+	{},
+};
+
+static int serial_hsu_pci_port_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	struct uart_hsu_port *up;
+	int ret, port, hw_type;
+	resource_size_t start, len;
+
+	start = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+
+	dev_info(&pdev->dev,
+		"FUNC: %d driver: %ld addr:%lx len:%lx\n",
+		PCI_FUNC(pdev->devfn), ent->driver_data,
+		(unsigned long) start, (unsigned long) len);
+
+	port = intel_mid_hsu_func_to_port(PCI_FUNC(pdev->devfn));
+	if (port == -1)
+		return 0;
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pci_request_region(pdev, 0, "hsu");
+	if (ret)
+		goto err;
+
+	up = serial_hsu_port_setup(&pdev->dev, port, start, len,
+			pdev->irq);
+	if (IS_ERR(up))
+		goto err;
+
+	pci_set_drvdata(pdev, up);
+
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_forbid(&pdev->dev);
+	return 0;
+err:
+	pci_disable_device(pdev);
+	return ret;
+}
+
+static void serial_hsu_pci_port_remove(struct pci_dev *pdev)
+{
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+	serial_hsu_port_free(up);
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+}
+
+static void serial_hsu_pci_port_shutdown(struct pci_dev *pdev)
+{
+	struct uart_hsu_port *up = pci_get_drvdata(pdev);
+
+	if (!up)
+		return;
+
+	serial_hsu_port_shutdown(up);
+}
+
+static struct pci_driver hsu_port_pci_driver = {
+	.name =		"HSU serial",
+	.id_table =	hsuart_port_pci_ids,
+	.probe =	serial_hsu_pci_port_probe,
+	.remove =	serial_hsu_pci_port_remove,
+	.shutdown =	serial_hsu_pci_port_shutdown,
+/* Disable PM only when kgdb(poll mode uart) is enabled */
+#if defined(CONFIG_PM) && !defined(CONFIG_CONSOLE_POLL)
+	.driver = {
+		.pm = &serial_hsu_pci_pm_ops,
+	},
+#endif
+};
+
+static int serial_hsu_pci_dma_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	struct hsu_dma_chan *dchan;
+	int ret, share_irq = 0;
+	resource_size_t start, len;
+
+	start = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+
+	dev_info(&pdev->dev,
+		"FUNC: %d driver: %ld addr:%lx len:%lx\n",
+		PCI_FUNC(pdev->devfn), ent->driver_data,
+		(unsigned long) pci_resource_start(pdev, 0),
+		(unsigned long) pci_resource_len(pdev, 0));
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pci_request_region(pdev, 0, "hsu dma");
+	if (ret)
+		goto err;
+
+	/* share irq with port? ANN all and TNG chip from B0 stepping */
+	if ((intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER &&
+		pdev->revision >= 0x1) ||
+		intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_ANNIEDALE)
+		share_irq = 1;
+
+	ret = serial_hsu_dma_setup(&pdev->dev, start, len, pdev->irq, share_irq);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	pci_disable_device(pdev);
+	return ret;
+}
+
+static void serial_hsu_pci_dma_remove(struct pci_dev *pdev)
+{
+	serial_hsu_dma_free();
+	pci_disable_device(pdev);
+	pci_unregister_driver(&hsu_port_pci_driver);
+}
+
+static struct pci_driver hsu_dma_pci_driver = {
+	.name =		"HSU DMA",
+	.id_table =	hsuart_dma_pci_ids,
+	.probe =	serial_hsu_pci_dma_probe,
+	.remove =	serial_hsu_pci_dma_remove,
+};
+
+static int __init hsu_pci_init(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&hsu_dma_pci_driver);
+	if (!ret) {
+		ret = pci_register_driver(&hsu_port_pci_driver);
+		if (ret)
+			pci_unregister_driver(&hsu_dma_pci_driver);
+	}
+
+	return ret;
+}
+
+static void __exit hsu_pci_exit(void)
+{
+	pci_unregister_driver(&hsu_port_pci_driver);
+	pci_unregister_driver(&hsu_dma_pci_driver);
+}
+
+module_init(hsu_pci_init);
+module_exit(hsu_pci_exit);
+
+MODULE_AUTHOR("Yang Bin <bin.yang@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu");
diff --git a/drivers/tty/serial/mfd_plat.c b/drivers/tty/serial/mfd_plat.c
new file mode 100644
index 0000000..261acd5
--- /dev/null
+++ b/drivers/tty/serial/mfd_plat.c
@@ -0,0 +1,244 @@
+/*
+ * mfd_plat.c: driver for High Speed UART device of Intel Medfield platform
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/pm_qos.h>
+#include <linux/pci.h>
+
+#include "mfd.h"
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hsu_acpi_ids[] = {
+	{ "80860F0A", 0 },
+	{ "8086228A", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, hsu_acpi_ids);
+#endif
+
+#ifdef CONFIG_PM
+static int serial_hsu_plat_suspend(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (up) {
+		trace_hsu_func_start(up->index, __func__);
+		ret = serial_hsu_do_suspend(up);
+		trace_hsu_func_end(up->index, __func__, "");
+	}
+	return ret;
+}
+
+static int serial_hsu_plat_resume(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	int ret = 0;
+
+	if (up) {
+		trace_hsu_func_start(up->index, __func__);
+		ret = serial_hsu_do_resume(up);
+		trace_hsu_func_end(up->index, __func__, "");
+	}
+	return ret;
+}
+#else
+#define serial_hsu_plat_suspend	NULL
+#define serial_hsu_plat_resume	NULL
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int serial_hsu_plat_runtime_idle(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+
+	return serial_hsu_do_runtime_idle(up);
+}
+
+static int serial_hsu_plat_runtime_suspend(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	ret = serial_hsu_do_suspend(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+
+static int serial_hsu_plat_runtime_resume(struct device *dev)
+{
+	struct uart_hsu_port *up = dev_get_drvdata(dev);
+	int ret = 0;
+
+	trace_hsu_func_start(up->index, __func__);
+	ret = serial_hsu_do_resume(up);
+	trace_hsu_func_end(up->index, __func__, "");
+	return ret;
+}
+#else
+#define serial_hsu_plat_runtime_idle		NULL
+#define serial_hsu_plat_runtime_suspend	NULL
+#define serial_hsu_plat_runtime_resume	NULL
+#endif
+
+static const struct dev_pm_ops serial_hsu_plat_pm_ops = {
+
+	SET_SYSTEM_SLEEP_PM_OPS(serial_hsu_plat_suspend,
+				serial_hsu_plat_resume)
+	SET_RUNTIME_PM_OPS(serial_hsu_plat_runtime_suspend,
+				serial_hsu_plat_runtime_resume,
+				serial_hsu_plat_runtime_idle)
+};
+
+static int serial_hsu_plat_port_probe(struct platform_device *pdev)
+{
+	struct uart_hsu_port *up;
+	int port = pdev->id, irq;
+	struct resource *mem, *ioarea;
+	const struct acpi_device_id *id;
+	resource_size_t start, len;
+
+#ifdef CONFIG_ACPI
+	for (id = hsu_acpi_ids; id->id[0]; id++)
+		if (!strncmp(id->id, dev_name(&pdev->dev), strlen(id->id))) {
+			acpi_status status;
+			unsigned long long tmp;
+
+			status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+					"_UID", NULL, &tmp);
+			if (ACPI_FAILURE(status))
+				return -ENODEV;
+			port = tmp - 1;
+		}
+#endif
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(&pdev->dev, "no mem resource?\n");
+		return -EINVAL;
+	}
+	start = mem->start;
+	len = resource_size(mem);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "no irq resource?\n");
+		return irq; /* -ENXIO */
+	}
+
+	ioarea = request_mem_region(mem->start, resource_size(mem),
+			pdev->name);
+	if (!ioarea) {
+		dev_err(&pdev->dev, "HSU region already claimed\n");
+		return -EBUSY;
+	}
+
+	up = serial_hsu_port_setup(&pdev->dev, port, start, len,
+			irq);
+	if (IS_ERR(up)) {
+		release_mem_region(mem->start, resource_size(mem));
+		dev_err(&pdev->dev, "failed to setup HSU\n");
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, up);
+
+	if (!pdev->dev.dma_mask) {
+		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	}
+	dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	return 0;
+}
+
+static int serial_hsu_plat_port_remove(struct platform_device *pdev)
+{
+	struct uart_hsu_port *up = platform_get_drvdata(pdev);
+	struct resource *mem;
+
+	pm_runtime_forbid(&pdev->dev);
+	serial_hsu_port_free(up);
+	platform_set_drvdata(pdev, NULL);
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (mem)
+		release_mem_region(mem->start, resource_size(mem));
+
+	return 0;
+}
+
+static void serial_hsu_plat_port_shutdown(struct platform_device *pdev)
+{
+	struct uart_hsu_port *up = platform_get_drvdata(pdev);
+
+	if (!up)
+		return;
+
+	serial_hsu_port_shutdown(up);
+}
+
+static struct platform_driver hsu_plat_driver = {
+	.remove		= serial_hsu_plat_port_remove,
+	.shutdown 	= serial_hsu_plat_port_shutdown,
+	.driver		= {
+		.name	= "HSU serial",
+		.owner	= THIS_MODULE,
+/* Disable PM only when kgdb(poll mode uart) is enabled */
+#if defined(CONFIG_PM) && !defined(CONFIG_CONSOLE_POLL)
+		.pm     = &serial_hsu_plat_pm_ops,
+#endif
+#ifdef CONFIG_ACPI
+		.acpi_match_table = ACPI_PTR(hsu_acpi_ids),
+#endif
+	},
+};
+
+static int __init hsu_plat_init(void)
+{
+	struct pci_dev *hsu_pci;
+
+	/*
+	 * Try to get pci device, if exist, then exit ACPI platform
+	 * register, On BYT FDK, include two enum mode: PCI, ACPI,
+	 * ignore ACPI enum mode.
+	 */
+	hsu_pci = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0F0A, NULL);
+	if (hsu_pci) {
+		pr_info("HSU serial: Find HSU controller in PCI device, "
+			"exit ACPI platform register!\n");
+		return 0;
+	}
+
+	return platform_driver_probe(&hsu_plat_driver, serial_hsu_plat_port_probe);
+}
+
+static void __exit hsu_plat_exit(void)
+{
+	platform_driver_unregister(&hsu_plat_driver);
+}
+
+module_init(hsu_plat_init);
+module_exit(hsu_plat_exit);
+
+MODULE_AUTHOR("Jason Chen <jason.cj.chen@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:medfield-hsu-plat");
diff --git a/drivers/tty/serial/mfd_trace.h b/drivers/tty/serial/mfd_trace.h
new file mode 100644
index 0000000..49afd9b
--- /dev/null
+++ b/drivers/tty/serial/mfd_trace.h
@@ -0,0 +1,197 @@
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mfd_trace
+
+#define TRACE_SYSTEM hsu
+
+#if !defined(_TRACE_HSU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HSU_H
+
+#include <linux/tracepoint.h>
+
+#define hsucmd_name(cmd) { cmd, #cmd }
+#define show_hsucmd_name(val)			\
+	__print_symbolic(val,			\
+		hsucmd_name(qcmd_overflow),	\
+		hsucmd_name(qcmd_get_msr),	\
+		hsucmd_name(qcmd_set_mcr),	\
+		hsucmd_name(qcmd_set_ier),	\
+		hsucmd_name(qcmd_start_rx),	\
+		hsucmd_name(qcmd_stop_rx),	\
+		hsucmd_name(qcmd_start_tx),	\
+		hsucmd_name(qcmd_stop_tx),	\
+		hsucmd_name(qcmd_cl),		\
+		hsucmd_name(qcmd_port_irq),	\
+		hsucmd_name(qcmd_dma_irq),	\
+		hsucmd_name(qcmd_enable_irq),   \
+		hsucmd_name(qcmd_cmd_off))
+
+
+TRACE_EVENT(hsu_cmd_insert,
+
+	TP_PROTO(unsigned port, char cmd),
+
+	TP_ARGS(port, cmd),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(char, cmd)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->cmd = cmd;
+	),
+
+	TP_printk("port=%u cmd=%s", __entry->port,
+		show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_cmd_add,
+
+	TP_PROTO(unsigned port, char cmd),
+
+	TP_ARGS(port, cmd),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(char, cmd)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->cmd = cmd;
+	),
+
+	TP_printk("port=%u cmd=%s", __entry->port,
+		show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_cmd_start,
+
+	TP_PROTO(unsigned port, char cmd),
+
+	TP_ARGS(port, cmd),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(char, cmd)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->cmd = cmd;
+	),
+
+	TP_printk("port=%u cmd=%s", __entry->port,
+		show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_cmd_end,
+
+	TP_PROTO(unsigned port, char cmd),
+
+	TP_ARGS(port, cmd),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(char, cmd)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->cmd = cmd;
+	),
+
+	TP_printk("port=%u cmd=%s", __entry->port,
+		show_hsucmd_name(__entry->cmd))
+);
+
+TRACE_EVENT(hsu_func_start,
+
+	TP_PROTO(unsigned port, const char *func),
+
+	TP_ARGS(port, func),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__string(name, func)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__assign_str(name, func);
+	),
+
+	TP_printk("port=%u func=%s", __entry->port,
+		__get_str(name))
+);
+
+TRACE_EVENT(hsu_func_end,
+
+	TP_PROTO(unsigned port, const char *func, char *err),
+
+	TP_ARGS(port, func, err),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__string(name, func)
+		__string(ret, err)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__assign_str(name, func);
+		__assign_str(ret, err);
+	),
+
+	TP_printk("port=%u func=%s err=%s", __entry->port,
+		__get_str(name), __get_str(ret))
+);
+
+TRACE_EVENT(hsu_mctrl,
+
+	TP_PROTO(unsigned port, unsigned mctrl),
+
+	TP_ARGS(port, mctrl),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(unsigned, mctrl)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->mctrl = mctrl;
+	),
+
+	TP_printk("port=%u mctrl=%d", __entry->port, __entry->mctrl)
+);
+
+TRACE_EVENT(hsu_set_termios,
+
+	TP_PROTO(unsigned port, unsigned int baud, int ctsrts),
+
+	TP_ARGS(port, baud, ctsrts),
+
+	TP_STRUCT__entry(
+		__field(unsigned, port)
+		__field(unsigned int, baud)
+		__field(int, ctsrts)
+	),
+
+	TP_fast_assign(
+		__entry->port = port;
+		__entry->baud = baud;
+		__entry->ctsrts = ctsrts;
+	),
+
+	TP_printk("port=%u baud=%d ctsrts=%d", __entry->port,
+		__entry->baud, __entry->ctsrts)
+);
+
+#endif /* if !defined(_TRACE_HSU_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 9b6ef20..09f437c 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -40,9 +40,11 @@
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
 #include <linux/serial_reg.h>
+#include <linux/serial_max3110.h>
 
 #include <linux/kthread.h>
 #include <linux/spi/spi.h>
+#include <linux/pm.h>
 
 #include "mrst_max3110.h"
 
@@ -61,12 +63,14 @@
 	struct task_struct *main_thread;
 	struct task_struct *read_thread;
 	struct mutex thread_mutex;
+	struct mutex io_mutex;
 
 	u32 baud;
 	u16 cur_conf;
 	u8 clock;
 	u8 parity, word_7bits;
 	u16 irq;
+	u16 irq_edge_triggered;
 
 	unsigned long uart_flags;
 
@@ -90,6 +94,7 @@
 	struct spi_transfer	x;
 	int ret;
 
+	mutex_lock(&max->io_mutex);
 	spi_message_init(&message);
 	memset(&x, 0, sizeof x);
 	x.len = len;
@@ -104,6 +109,7 @@
 
 	/* Do the i/o */
 	ret = spi_sync(spi, &message);
+	mutex_unlock(&max->io_mutex);
 	return ret;
 }
 
@@ -259,7 +265,6 @@
 	return;
 }
 
-#define WORDS_PER_XFER	128
 static void send_circ_buf(struct uart_max3110 *max,
 				struct circ_buf *xmit)
 {
@@ -268,7 +273,7 @@
 	int i, len, blen, dma_size, left, ret = 0;
 
 
-	dma_size = WORDS_PER_XFER * sizeof(u16) * 2;
+	dma_size = M3110_RX_FIFO_DEPTH * sizeof(u16) * 2;
 	buf = kzalloc(dma_size, GFP_KERNEL | GFP_DMA);
 	if (!buf)
 		return;
@@ -278,7 +283,7 @@
 	while (!uart_circ_empty(xmit)) {
 		left = uart_circ_chars_pending(xmit);
 		while (left) {
-			len = min(left, WORDS_PER_XFER);
+			len = min(left, M3110_RX_FIFO_DEPTH);
 			blen = len * sizeof(u16);
 			memset(ibuf, 0, blen);
 
@@ -414,8 +419,8 @@
 				max->uart_flags || kthread_should_stop());
 
 		mutex_lock(&max->thread_mutex);
-
-		if (test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
+		if (max->irq_edge_triggered &&
+			test_and_clear_bit(BIT_IRQ_PENDING, &max->uart_flags))
 			max3110_con_receive(max);
 
 		/* first handle console output */
@@ -437,11 +442,15 @@
 {
 	struct uart_max3110 *max = dev_id;
 
-	/* max3110's irq is a falling edge, not level triggered,
-	 * so no need to disable the irq */
+	if (max->irq_edge_triggered) {
+		/* max3110's irq is a falling edge, not level triggered,
+		 * so no need to disable the irq */
 
-	if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
-		wake_up(&max->wq);
+		if (!test_and_set_bit(BIT_IRQ_PENDING, &max->uart_flags))
+			wake_up(&max->wq);
+	} else {
+		max3110_con_receive(max);
+	}
 
 	return IRQ_HANDLED;
 }
@@ -490,20 +499,10 @@
 	/* as we use thread to handle tx/rx, need set low latency */
 	port->state->port.low_latency = 1;
 
-	if (max->irq) {
-		max->read_thread = NULL;
-		ret = request_irq(max->irq, serial_m3110_irq,
-				IRQ_TYPE_EDGE_FALLING, "max3110", max);
-		if (ret) {
-			max->irq = 0;
-			pr_err(PR_FMT "unable to allocate IRQ, polling\n");
-		}  else {
-			/* Enable RX IRQ only */
-			config |= WC_RXA_IRQ_ENABLE;
-		}
-	}
-
-	if (max->irq == 0) {
+	if (max->irq > 0) {
+		/* Enable RX IRQ only */
+		config |= WC_RXA_IRQ_ENABLE;
+	} else {
 		/* If IRQ is disabled, start a read thread for input data */
 		max->read_thread =
 			kthread_run(max3110_read_thread, max, "max3110_read");
@@ -517,8 +516,6 @@
 
 	ret = max3110_out(max, config);
 	if (ret) {
-		if (max->irq)
-			free_irq(max->irq, max);
 		if (max->read_thread)
 			kthread_stop(max->read_thread);
 		max->read_thread = NULL;
@@ -540,9 +537,6 @@
 		max->read_thread = NULL;
 	}
 
-	if (max->irq)
-		free_irq(max->irq, max);
-
 	/* Disable interrupts from this port */
 	config = WC_TAG | WC_SW_SHDI;
 	max3110_out(max, config);
@@ -749,7 +743,8 @@
 	struct spi_device *spi = to_spi_device(dev);
 	struct uart_max3110 *max = spi_get_drvdata(spi);
 
-	disable_irq(max->irq);
+	if (max->irq > 0)
+		disable_irq(max->irq);
 	uart_suspend_port(&serial_m3110_reg, &max->port);
 	max3110_out(max, max->cur_conf | WC_SW_SHDI);
 	return 0;
@@ -762,7 +757,8 @@
 
 	max3110_out(max, max->cur_conf);
 	uart_resume_port(&serial_m3110_reg, &max->port);
-	enable_irq(max->irq);
+	if (max->irq > 0)
+		enable_irq(max->irq);
 	return 0;
 }
 
@@ -780,6 +776,10 @@
 	void *buffer;
 	u16 res;
 	int ret = 0;
+	struct plat_max3110 *pdata = spi->dev.platform_data;
+
+	if (!pdata)
+		return -EINVAL;
 
 	max = kzalloc(sizeof(*max), GFP_KERNEL);
 	if (!max)
@@ -803,6 +803,7 @@
 	max->irq = (u16)spi->irq;
 
 	mutex_init(&max->thread_mutex);
+	mutex_init(&max->io_mutex);
 
 	max->word_7bits = 0;
 	max->parity = 0;
@@ -840,6 +841,25 @@
 		goto err_kthread;
 	}
 
+	max->irq_edge_triggered = pdata->irq_edge_triggered;
+
+	if (max->irq > 0) {
+		if (max->irq_edge_triggered) {
+			ret = request_irq(max->irq, serial_m3110_irq,
+					IRQ_TYPE_EDGE_FALLING, "max3110", max);
+		} else {
+			ret = request_threaded_irq(max->irq, NULL,
+					serial_m3110_irq,
+					IRQF_ONESHOT, "max3110", max);
+		}
+
+		if (ret) {
+			max->irq = 0;
+			dev_warn(&spi->dev,
+			"unable to allocate IRQ, will use polling method\n");
+		}
+	}
+
 	spi_set_drvdata(spi, max);
 	pmax = max;
 
@@ -867,6 +887,9 @@
 
 	free_page((unsigned long)max->con_xmit.buf);
 
+	if (max->irq)
+		free_irq(max->irq, max);
+
 	if (max->main_thread)
 		kthread_stop(max->main_thread);
 
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 20e4c94..2c5a3e4 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -232,7 +232,7 @@
 	unsigned int iobase;
 	struct pci_dev *pdev;
 	int fifo_size;
-	int uartclk;
+	unsigned int uartclk;
 	int start_tx;
 	int start_rx;
 	int tx_empty;
@@ -373,35 +373,62 @@
 };
 #endif	/* CONFIG_DEBUG_FS */
 
+static struct dmi_system_id pch_uart_dmi_table[] = {
+	{
+		.ident = "CM-iTC",
+		{
+			DMI_MATCH(DMI_BOARD_NAME, "CM-iTC"),
+		},
+		(void *)CMITC_UARTCLK,
+	},
+	{
+		.ident = "FRI2",
+		{
+			DMI_MATCH(DMI_BIOS_VERSION, "FRI2"),
+		},
+		(void *)FRI2_64_UARTCLK,
+	},
+	{
+		.ident = "Fish River Island II",
+		{
+			DMI_MATCH(DMI_PRODUCT_NAME, "Fish River Island II"),
+		},
+		(void *)FRI2_48_UARTCLK,
+	},
+	{
+		.ident = "COMe-mTT",
+		{
+			DMI_MATCH(DMI_BOARD_NAME, "COMe-mTT"),
+		},
+		(void *)NTC1_UARTCLK,
+	},
+	{
+		.ident = "nanoETXexpress-TT",
+		{
+			DMI_MATCH(DMI_BOARD_NAME, "nanoETXexpress-TT"),
+		},
+		(void *)NTC1_UARTCLK,
+	},
+	{
+		.ident = "MinnowBoard",
+		{
+			DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
+		},
+		(void *)MINNOW_UARTCLK,
+	},
+};
+
 /* Return UART clock, checking for board specific clocks. */
-static int pch_uart_get_uartclk(void)
+static unsigned int pch_uart_get_uartclk(void)
 {
-	const char *cmp;
+	const struct dmi_system_id *d;
 
 	if (user_uartclk)
 		return user_uartclk;
 
-	cmp = dmi_get_system_info(DMI_BOARD_NAME);
-	if (cmp && strstr(cmp, "CM-iTC"))
-		return CMITC_UARTCLK;
-
-	cmp = dmi_get_system_info(DMI_BIOS_VERSION);
-	if (cmp && strnstr(cmp, "FRI2", 4))
-		return FRI2_64_UARTCLK;
-
-	cmp = dmi_get_system_info(DMI_PRODUCT_NAME);
-	if (cmp && strstr(cmp, "Fish River Island II"))
-		return FRI2_48_UARTCLK;
-
-	/* Kontron COMe-mTT10 (nanoETXexpress-TT) */
-	cmp = dmi_get_system_info(DMI_BOARD_NAME);
-	if (cmp && (strstr(cmp, "COMe-mTT") ||
-		    strstr(cmp, "nanoETXexpress-TT")))
-		return NTC1_UARTCLK;
-
-	cmp = dmi_get_system_info(DMI_BOARD_NAME);
-	if (cmp && strstr(cmp, "MinnowBoard"))
-		return MINNOW_UARTCLK;
+	d = dmi_first_match(pch_uart_dmi_table);
+	if (d)
+		return (unsigned long)d->driver_data;
 
 	return DEFAULT_UARTCLK;
 }
@@ -422,7 +449,7 @@
 	iowrite8(ier, priv->membase + UART_IER);
 }
 
-static int pch_uart_hal_set_line(struct eg20t_port *priv, int baud,
+static int pch_uart_hal_set_line(struct eg20t_port *priv, unsigned int baud,
 				 unsigned int parity, unsigned int bits,
 				 unsigned int stb)
 {
@@ -457,7 +484,7 @@
 	lcr |= bits;
 	lcr |= stb;
 
-	dev_dbg(priv->port.dev, "%s:baud = %d, div = %04x, lcr = %02x (%lu)\n",
+	dev_dbg(priv->port.dev, "%s:baud = %u, div = %04x, lcr = %02x (%lu)\n",
 		 __func__, baud, div, lcr, jiffies);
 	iowrite8(PCH_UART_LCR_DLAB, priv->membase + UART_LCR);
 	iowrite8(dll, priv->membase + PCH_UART_DLL);
@@ -1366,9 +1393,8 @@
 static void pch_uart_set_termios(struct uart_port *port,
 				 struct ktermios *termios, struct ktermios *old)
 {
-	int baud;
 	int rtn;
-	unsigned int parity, bits, stb;
+	unsigned int baud, parity, bits, stb;
 	struct eg20t_port *priv;
 	unsigned long flags;
 
@@ -1501,6 +1527,7 @@
 	return 0;
 }
 
+#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_PCH_UART_CONSOLE)
 /*
  *	Wait for transmitter & holding register to empty
  */
@@ -1531,6 +1558,7 @@
 		}
 	}
 }
+#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_PCH_UART_CONSOLE */
 
 #ifdef CONFIG_CONSOLE_POLL
 /*
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 1fabb22..57977ec 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -95,6 +95,9 @@
 	struct uart_state *state = tty->driver_data;
 	struct uart_port *port = state->uart_port;
 
+	if (port->ops->wake_peer)
+		port->ops->wake_peer(port);
+
 	if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
 	    !tty->stopped && !tty->hw_stopped)
 		port->ops->start_tx(port);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index cbfd3d1..59a2178 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2138,6 +2138,11 @@
 	usb_lock_device(udev);
 	usb_remote_wakeup(udev);
 	usb_unlock_device(udev);
+	if (HCD_IRQ_DISABLED(hcd)) {
+		/* We can now process IRQs so enable IRQ */
+		clear_bit(HCD_FLAG_IRQ_DISABLED, &hcd->flags);
+		enable_irq(hcd->irq);
+	}
 }
 
 /**
@@ -2225,9 +2230,23 @@
 	 */
 	local_irq_save(flags);
 
-	if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
+	if (unlikely(HCD_DEAD(hcd)))
 		rc = IRQ_NONE;
-	else if (hcd->driver->irq(hcd) == IRQ_NONE)
+	else if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
+		if (hcd->has_wakeup_irq) {
+			/*
+			 * We got a wakeup interrupt while the controller was
+			 * suspending or suspended. We can't handle it now, so
+			 * disable the IRQ and resume the root hub (and hence
+			 * the controller too).
+			 */
+			disable_irq_nosync(hcd->irq);
+			set_bit(HCD_FLAG_IRQ_DISABLED, &hcd->flags);
+			usb_hcd_resume_root_hub(hcd);
+			rc = IRQ_HANDLED;
+		} else
+			rc = IRQ_NONE;
+	} else if (hcd->driver->irq(hcd) == IRQ_NONE)
 		rc = IRQ_NONE;
 	else
 		rc = IRQ_HANDLED;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 757aa18..7aaa51f 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -40,6 +40,72 @@
 
 endchoice
 
+comment "Platform Glue Driver Support"
+
+config USB_DWC3_PCI
+	tristate "PCIe-based Platforms"
+	depends on PCI
+	default USB_DWC3
+	help
+	  If you're using the DesignWare Core IP with a PCIe, please say
+	  'Y' or 'M' here.
+
+	  One such PCIe-based platform is Synopsys' PCIe HAPS model of
+	  this IP.
+
+config USB_DWC3_OTG
+	tristate "DWC3 OTG mode support"
+	depends on USB && PCI
+	select USB_OTG
+	help
+	  Say Y here to enable DWC3 OTG driver.
+	  This driver implement OTG framework for DWC3 OTG controller.
+	  Support role switch and charger detection feature. And maintain
+	  one state machine. This driver should be work with platform
+	  speical driver. Because every platform has their own hardware design.
+
+config USB_DWC3_INTEL_MRFL
+	tristate "DWC OTG 3.0 for Intel Merrifield platforms"
+	depends on USB && USB_DWC3_OTG
+	select USB_DWC3_DEVICE_INTEL
+	help
+	  Say Y here to enable DWC3 OTG driver for Intel Merrifield platforms.
+	  It implement OTG feature on DWC3 OTG controller.
+	  Support role switch and charger detection feature.
+	  This driver is must be set if you want to enable host mode on Intel
+	  Merrifield platforms.
+
+config USB_DWC3_INTEL_BYT
+	tristate "DWC OTG 3.0 for Intel Baytrail platforms"
+	depends on USB && USB_DWC3_OTG
+	select USB_DWC3_DEVICE_INTEL
+	help
+	  Say Y here to enable DWC3 OTG driver for Intel Baytrail platforms.
+	  It implement OTG feature on DWC3 OTG controller.
+	  Support role switch and charger detection feature.
+	  This driver is must be set if you want to enable device mode on Intel
+	  Baytrial platforms.
+
+config USB_DWC3_DEVICE_INTEL
+	bool "DWC3 Device Mode support on Intel platform"
+	depends on USB_DWC3_OTG
+	help
+	  Support Device mode of DWC3 controller on Intel platform.
+	  It implement device mode feature on DWC3 OTG controller.
+	  This driver is must be set if you want to enable device mode for Intel
+	  platforms(e.g Baytrail and Merrifield)
+
+config USB_DWC3_HOST_INTEL
+	bool "DWC3 Host Mode support on Intel Merrifield platform"
+	depends on USB_ARCH_HAS_XHCI && USB_DWC3_INTEL_MRFL
+	help
+	  Support Host mode of DWC3 controller on Intel Merrifield platform.
+	  It is should be enable with DWC3 INTEL driver. Because Intel platform
+	  use different design with standard USB_DWC3_HOST. So if you want to
+	  enable host mode on Intel platform, then you have to enable this config.
+
+comment "Debugging features"
+
 config USB_DWC3_DEBUG
 	bool "Enable Debugging Messages"
 	help
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 0c7ac92..1bc7013 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -1,6 +1,13 @@
 ccflags-$(CONFIG_USB_DWC3_DEBUG)	:= -DDEBUG
 ccflags-$(CONFIG_USB_DWC3_VERBOSE)	+= -DVERBOSE_DEBUG
 
+obj-$(CONFIG_USB_DWC3_DEVICE_INTEL)		+= dwc3-device-intel.o
+obj-$(CONFIG_USB_DWC3_INTEL_MRFL)		+= dwc3-intel-mrfl.o
+ifneq ($(CONFIG_DEBUG_FS),)
+	obj-$(CONFIG_USB_DWC3_DEVICE_INTEL)	+= debugfs.o
+endif
+
+ifeq ($(CONFIG_USB_DWC3_DEVICE_INTEL),)
 obj-$(CONFIG_USB_DWC3)			+= dwc3.o
 
 dwc3-y					:= core.o
@@ -16,6 +23,7 @@
 ifneq ($(CONFIG_DEBUG_FS),)
 	dwc3-y				+= debugfs.o
 endif
+endif
 
 ##
 # Platform-specific glue layers go here
@@ -36,6 +44,6 @@
 obj-$(CONFIG_USB_DWC3)		+= dwc3-exynos.o
 
 ifneq ($(CONFIG_PCI),)
+	obj-$(CONFIG_USB_DWC3_OTG)	+= otg.o
 	obj-$(CONFIG_USB_DWC3)		+= dwc3-pci.o
 endif
-
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 1d38603..6b8a1e0 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -53,6 +53,7 @@
 #include <linux/usb/otg.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/ulpi.h>
 
 #include "core.h"
 #include "gadget.h"
@@ -256,7 +257,8 @@
 		dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
 		dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
 		dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), 0);
-		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n),
+			dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(n)));
 	}
 }
 
@@ -297,6 +299,7 @@
 	unsigned long		timeout;
 	u32			reg;
 	int			ret;
+	struct usb_phy		*usb_phy;
 
 	reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
 	/* This should read as U3 followed by revision number */
@@ -307,6 +310,13 @@
 	}
 	dwc->revision = reg;
 
+	dwc3_core_soft_reset(dwc);
+
+	/* Delay 1 ms Before DCTL soft reset to make it safer from hitting
+	 * Tx-CMD PHY hang issue.
+	 */
+	mdelay(1);
+
 	/* issue device SoftReset too */
 	timeout = jiffies + msecs_to_jiffies(500);
 	dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
@@ -324,7 +334,14 @@
 		cpu_relax();
 	} while (true);
 
-	dwc3_core_soft_reset(dwc);
+	/* DCTL core soft reset may cause PHY hang, delay 1 ms and check ulpi */
+	mdelay(1);
+	usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (usb_phy &&
+		usb_phy_io_read(usb_phy, ULPI_VENDOR_ID_LOW) < 0)
+		dev_err(dwc->dev,
+			"ULPI not working after DCTL soft reset\n");
+	usb_put_phy(usb_phy);
 
 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 	reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 7ab3c99..2610417 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -51,6 +51,7 @@
 #include <linux/usb/gadget.h>
 
 /* Global constants */
+#define DWC3_SCRATCH_BUF_SIZE	4096
 #define DWC3_EP0_BOUNCE_SIZE	512
 #define DWC3_ENDPOINTS_NUM	32
 #define DWC3_XHCI_RESOURCES_NUM	2
@@ -194,6 +195,10 @@
 #define DWC3_GTXFIFOSIZ_TXFDEF(n)	((n) & 0xffff)
 #define DWC3_GTXFIFOSIZ_TXFSTADDR(n)	((n) & 0xffff0000)
 
+/* Global Event Size Registers */
+#define DWC3_GEVNTSIZ_INTMASK		(1 << 31)
+#define DWC3_GEVNTSIZ_SIZE(n)		((n) & 0xffff)
+
 /* Global HWPARAMS1 Register */
 #define DWC3_GHWPARAMS1_EN_PWROPT(n)	(((n) & (3 << 24)) >> 24)
 #define DWC3_GHWPARAMS1_EN_PWROPT_NO	0
@@ -309,6 +314,7 @@
 #define DWC3_DGCMD_SET_LMP		0x01
 #define DWC3_DGCMD_SET_PERIODIC_PAR	0x02
 #define DWC3_DGCMD_XMIT_FUNCTION	0x03
+#define DWC3_DGCMD_SET_SCRATCH_ADDR_LO	0x04
 
 /* These apply for core versions 1.94a and later */
 #define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO	0x04
@@ -408,9 +414,11 @@
  * @trb_pool_dma: dma address of @trb_pool
  * @free_slot: next slot which is going to be used
  * @busy_slot: first slot which is owned by HW
+ * @ep_state: endpoint state
  * @desc: usb_endpoint_descriptor pointer
  * @dwc: pointer to DWC controller
  * @flags: endpoint flags (wedged, stalled, ...)
+ * @flags_backup: backup endpoint flags
  * @current_trb: index of current used trb
  * @number: endpoint number (1 - 15)
  * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
@@ -429,16 +437,23 @@
 	dma_addr_t		trb_pool_dma;
 	u32			free_slot;
 	u32			busy_slot;
+	u32			ep_state;
 	const struct usb_ss_ep_comp_descriptor *comp_desc;
 	struct dwc3		*dwc;
 
+	struct ebc_io		*ebc;
+#define DWC3_EP_EBC_OUT_NB	16
+#define DWC3_EP_EBC_IN_NB	17
+
 	unsigned		flags;
+	unsigned		flags_backup;
 #define DWC3_EP_ENABLED		(1 << 0)
 #define DWC3_EP_STALL		(1 << 1)
 #define DWC3_EP_WEDGE		(1 << 2)
 #define DWC3_EP_BUSY		(1 << 4)
 #define DWC3_EP_PENDING_REQUEST	(1 << 5)
 #define DWC3_EP_MISSED_ISOC	(1 << 6)
+#define DWC3_EP_HIBERNATION	(1 << 7)
 
 	/* This last one is specific to EP0 */
 #define DWC3_EP0_DIR_IN		(1 << 31)
@@ -495,6 +510,13 @@
 	DWC3_LINK_STATE_MASK		= 0x0f,
 };
 
+enum dwc3_pm_state {
+	PM_DISCONNECTED = 0,
+	PM_ACTIVE,
+	PM_SUSPENDED,
+	PM_RESUMING,
+};
+
 /* TRB Length, PCM and Status */
 #define DWC3_TRB_SIZE_MASK	(0x00ffffff)
 #define DWC3_TRB_SIZE_LENGTH(n)	((n) & DWC3_TRB_SIZE_MASK)
@@ -600,6 +622,7 @@
 	unsigned		direction:1;
 	unsigned		mapped:1;
 	unsigned		queued:1;
+	unsigned		short_packet:1;
 };
 
 /*
@@ -611,6 +634,22 @@
 };
 
 /**
+ * struct dwc3_hwregs - registers saved when entering hibernation
+ */
+struct dwc3_hwregs {
+	u32	guctl;
+	u32	dcfg;
+	u32	devten;
+	u32	gctl;
+	u32	gusb3pipectl0;
+	u32	gusb2phycfg0;
+	u32	gevntadrlo;
+	u32	gevntadrhi;
+	u32	gevntsiz;
+	u32	grxthrcfg;
+};
+
+/**
  * struct dwc3 - representation of our controller
  * @ctrl_req: usb control request which is used for ep0
  * @ep0_trb: trb which is used for the ctrl_req
@@ -727,6 +766,7 @@
 	unsigned		needs_fifo_resize:1;
 	unsigned		resize_fifos:1;
 	unsigned		pullups_connected:1;
+	unsigned		quirks_disable_irqthread:1;
 
 	enum dwc3_ep0_next	ep0_next_event;
 	enum dwc3_ep0_state	ep0state;
@@ -748,9 +788,23 @@
 	struct dwc3_hwparams	hwparams;
 	struct dentry		*root;
 	struct debugfs_regset32	*regset;
+	enum dwc3_pm_state	pm_state;
+	u8			is_otg;
+	u8			soft_connected;
 
 	u8			test_mode;
 	u8			test_mode_nr;
+
+	/* delayed work for handling Link State Change */
+	struct delayed_work	link_work;
+
+	u8			is_ebc;
+
+	struct dwc3_scratchpad_array	*scratch_array;
+	dma_addr_t		scratch_array_dma;
+	void			*scratch_buffer[DWC3_MAX_HIBER_SCRATCHBUFS];
+	struct dwc3_hwregs	hwregs;
+	bool			hiber_enabled;
 };
 
 /* -------------------------------------------------------------------------- */
@@ -877,6 +931,23 @@
 	struct dwc3_event_gevt		gevt;
 };
 
+struct ebc_io {
+	const char	*name;
+	const char	*epname;
+	u8		epnum;
+	u8		is_ondemand;
+	u8		static_trb_pool_size;
+	struct list_head	list;
+	int		(*init) (void);
+	void		*(*alloc_static_trb_pool) (dma_addr_t *dma_addr);
+	void		(*free_static_trb_pool) (void);
+	int		(*xfer_start) (void);
+	int		(*xfer_stop) (void);
+};
+
+void dwc3_register_io_ebc(struct ebc_io *ebc);
+void dwc3_unregister_io_ebc(struct ebc_io *ebc);
+
 /*
  * DWC3 Features to be used as Driver Data
  */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9e9f122..ea38e22 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -638,6 +638,48 @@
 	.release		= single_release,
 };
 
+static int dwc3_hiber_enabled_show(struct seq_file *s, void *unused)
+{
+	struct dwc3		*dwc = s->private;
+
+	if (dwc->hiber_enabled)
+		seq_puts(s, "hibernation enabled\n");
+	else
+		seq_puts(s, "hibernation disabled\n");
+
+	return 0;
+}
+
+static int dwc3_hiber_enabled_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dwc3_hiber_enabled_show, inode->i_private);
+}
+
+static ssize_t dwc3_hiber_enabled_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct seq_file		*s = file->private_data;
+	struct dwc3		*dwc = s->private;
+	char			buf[32];
+	int			enabled = 0;
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	sscanf(buf, "%d", &enabled);
+	dwc->hiber_enabled = enabled;
+
+	return count;
+}
+
+static const struct file_operations dwc3_hiber_enabled_fops = {
+	.open			= dwc3_hiber_enabled_open,
+	.write			= dwc3_hiber_enabled_write,
+	.read			= seq_read,
+	.llseek			= seq_lseek,
+	.release		= single_release,
+};
+
 int dwc3_debugfs_init(struct dwc3 *dwc)
 {
 	struct dentry		*root;
@@ -692,6 +734,13 @@
 			ret = -ENOMEM;
 			goto err1;
 		}
+
+		file = debugfs_create_file("hiber_enabled", S_IRUGO | S_IWUSR,
+				root, dwc, &dwc3_hiber_enabled_fops);
+		if (!file) {
+			ret = -ENOMEM;
+			goto err1;
+		}
 	}
 
 	return 0;
diff --git a/drivers/usb/dwc3/dwc3-device-intel.c b/drivers/usb/dwc3/dwc3-device-intel.c
new file mode 100644
index 0000000..0ffde5c
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-device-intel.c
@@ -0,0 +1,648 @@
+/**
+ * Copyright (C) 2012 Intel Corp.
+ * Author: Jiebing Li
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/usb/dwc3-intel-mid.h>
+#include <linux/usb/phy.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+#include "otg.h"
+
+#include "debug.h"
+
+#include "core.c"
+#include "ep0.c"
+#include "gadget.c"
+
+/* FLIS register */
+#define APBFC_EXIOTG3_MISC0_REG		0xF90FF85C
+
+/* Global User Control Register Auto Retry bit*/
+#define DWC3_GUCTL_USB_HST_IN_AUTO_RETRY_EN	(1 << 14)
+
+/* Global Configuration Register */
+#define DWC3_GRXTHRCFG_USBRXPKTCNTSEL		(1 << 29)
+#define DWC3_GRXTHRCFG_USBRXPKTCNT(n)		(n << 24)
+#define DWC3_GRXTHRCFG_USBRXPKTCNT_MASK		(0xf << 24)
+#define DWC3_GRXTHRCFG_USBMAXRXBURSTSIZE(n)	(n << 19)
+#define DWC3_GRXTHRCFG_USBMAXRXBURSTSIZE_MASK	(0x1f << 19)
+
+/**
+ * struct dwc3_dev_data - Structure holding platform related
+ *			information
+ * @flis_reg:		FLIS register
+ * @grxthrcfg:		DWC3 GRXTHCFG register
+ */
+struct dwc3_dev_data {
+	struct dwc3		*dwc;
+	void __iomem		*flis_reg;
+	u32			grxthrcfg;
+	struct mutex		mutex;
+};
+
+static struct dwc3_dev_data	*_dev_data;
+
+/*
+ * dwc3_set_fils_reg - set FLIS register
+ *
+ * This is a workaround for OTG3 IP bug of using EP #8 for host mode
+ */
+static void dwc3_set_flis_reg(void)
+{
+	u32			reg;
+	void __iomem		*flis_reg;
+
+	flis_reg = _dev_data->flis_reg;
+
+	reg = dwc3_readl(flis_reg, DWC3_GLOBALS_REGS_START);
+	reg &= ~(1 << 3);
+	dwc3_writel(flis_reg, DWC3_GLOBALS_REGS_START, reg);
+}
+
+/*
+ * dwc3_disable_multi_packet - set GRXTHRCFG register to disable
+ * reception multi-packet thresholdingfor DWC2.50a.
+ */
+static void dwc3_disable_multi_packet(struct dwc3 *dwc)
+{
+	u32			reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+	_dev_data->grxthrcfg = reg;
+	if (reg) {
+		reg &= ~DWC3_GRXTHRCFG_USBRXPKTCNTSEL;
+		reg &= ~DWC3_GRXTHRCFG_USBRXPKTCNT_MASK;
+		reg &= ~DWC3_GRXTHRCFG_USBMAXRXBURSTSIZE_MASK;
+
+		dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+	}
+}
+
+/*
+ * dwc3_enable_host_auto_retry - clear Auto Retry Enable bit
+ * for device mode
+ */
+static void dwc3_enable_host_auto_retry(struct dwc3 *dwc, bool enable)
+{
+	u32			reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
+
+	if (enable)
+		reg |= DWC3_GUCTL_USB_HST_IN_AUTO_RETRY_EN;
+	else
+		reg &= ~DWC3_GUCTL_USB_HST_IN_AUTO_RETRY_EN;
+
+	dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
+}
+
+static void dwc3_do_extra_change(struct dwc3 *dwc)
+{
+	dwc3_set_flis_reg();
+
+	if (dwc->revision == DWC3_REVISION_250A)
+		dwc3_disable_multi_packet(dwc);
+
+	dwc3_enable_host_auto_retry(dwc, false);
+}
+
+static void dwc3_enable_hibernation(struct dwc3 *dwc)
+{
+	u32 num, reg;
+
+	if (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)
+		!= DWC3_GHWPARAMS1_EN_PWROPT_HIB) {
+		dev_err(dwc->dev, "Device Mode Hibernation is not supported\n");
+		return;
+	}
+
+	num = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(
+		 dwc->hwparams.hwparams4);
+	if (num != 1)
+		dev_err(dwc->dev, "number of scratchpad buffer: %d\n", num);
+
+	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+	reg |= DWC3_GCTL_GBLHIBERNATIONEN;
+	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+	dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_SET_SCRATCH_ADDR_LO,
+		dwc->scratch_array_dma & 0xffffffffU);
+}
+
+/*
+ * Re-write irq functions. Not use irq thread. Because irqthread has negative
+ * impact on usb performance, especially for usb network performance, USB3 UDP
+ * download performance will drop from 80MB/s to 40MB/s if irqthread is enabled.
+ */
+static irqreturn_t dwc3_quirks_process_event_buf(struct dwc3 *dwc, u32 buf)
+{
+	struct dwc3_event_buffer *evt;
+	u32 count;
+	u32 reg;
+	int left;
+
+	evt = dwc->ev_buffs[buf];
+
+	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
+	count &= DWC3_GEVNTCOUNT_MASK;
+	if (!count)
+		return IRQ_NONE;
+
+	evt->count = count;
+
+	/* WORKAROUND: Add 4 us delay workaround to A-unit issue in A0 stepping.
+	* Can be removed after B0.
+	*/
+	if (dwc->is_otg && dwc->revision == DWC3_REVISION_210A)
+		udelay(4);
+
+	left = evt->count;
+
+	while (left > 0) {
+		union dwc3_event event;
+
+		event.raw = *(u32 *) (evt->buf + evt->lpos);
+
+		dwc3_process_event_entry(dwc, &event);
+
+		/*
+		* FIXME we wrap around correctly to the next entry as
+		* almost all entries are 4 bytes in size. There is one
+		* entry which has 12 bytes which is a regular entry
+		* followed by 8 bytes data. ATM I don't know how
+		* things are organized if we get next to the a
+		* boundary so I worry about that once we try to handle
+		* that.
+		*/
+		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+		left -= 4;
+
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
+	}
+
+	evt->count = 0;
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dwc3_quirks_interrupt(int irq, void *_dwc)
+{
+	struct dwc3	*dwc = _dwc;
+	int		i;
+	irqreturn_t	ret = IRQ_NONE;
+
+	spin_lock(&dwc->lock);
+	if (dwc->pm_state != PM_ACTIVE) {
+		if (dwc->pm_state == PM_SUSPENDED) {
+			dev_info(dwc->dev, "u2/u3 pmu is received\n");
+			pm_runtime_get(dwc->dev);
+			dwc->pm_state = PM_RESUMING;
+			ret = IRQ_HANDLED;
+		}
+		goto out;
+	}
+
+	for (i = 0; i < dwc->num_event_buffers; i++) {
+		irqreturn_t status;
+
+		status = dwc3_quirks_process_event_buf(dwc, i);
+		if (status == IRQ_HANDLED)
+			ret = status;
+	}
+
+out:
+	spin_unlock(&dwc->lock);
+
+	return ret;
+}
+
+int dwc3_start_peripheral(struct usb_gadget *g)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+	unsigned long		flags;
+	int			irq;
+	int			ret = 0;
+
+	pm_runtime_get_sync(dwc->dev);
+
+	mutex_lock(&_dev_data->mutex);
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	if (dwc->gadget_driver && dwc->soft_connected) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		dwc3_core_init(dwc);
+		spin_lock_irqsave(&dwc->lock, flags);
+
+		if (dwc->hiber_enabled)
+			dwc3_enable_hibernation(dwc);
+		dwc3_do_extra_change(dwc);
+		dwc3_event_buffers_setup(dwc);
+		ret = dwc3_init_for_enumeration(dwc);
+		if (ret)
+			goto err1;
+
+		if (dwc->soft_connected)
+			dwc3_gadget_run_stop(dwc, 1);
+	}
+
+	dwc->pm_state = PM_ACTIVE;
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+	if (dwc->quirks_disable_irqthread)
+		ret = request_irq(irq, dwc3_quirks_interrupt,
+				IRQF_SHARED, "dwc3", dwc);
+	else
+		ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
+				IRQF_SHARED, "dwc3", dwc);
+	if (ret) {
+		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+				irq, ret);
+		goto err0;
+	}
+	mutex_unlock(&_dev_data->mutex);
+
+	return 0;
+
+err1:
+	spin_unlock_irqrestore(&dwc->lock, flags);
+err0:
+	mutex_unlock(&_dev_data->mutex);
+
+	return ret;
+}
+
+int dwc3_stop_peripheral(struct usb_gadget *g)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+	unsigned long		flags;
+	u8			epnum;
+	int			irq;
+
+	mutex_lock(&_dev_data->mutex);
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	dwc3_stop_active_transfers(dwc);
+
+	if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
+		dwc3_disconnect_gadget(dwc);
+
+		dwc->gadget.speed = USB_SPEED_UNKNOWN;
+	}
+
+	dwc->start_config_issued = false;
+
+	/* Clear Run/Stop bit */
+	dwc3_gadget_run_stop(dwc, 0);
+	dwc3_gadget_keep_conn(dwc, 0);
+
+	for (epnum = 0; epnum < 2; epnum++) {
+		struct dwc3_ep  *dep;
+
+		dep = dwc->eps[epnum];
+
+		if (dep->flags & DWC3_EP_ENABLED)
+			__dwc3_gadget_ep_disable(dep);
+	}
+
+	dwc3_gadget_disable_irq(dwc);
+
+	dwc3_event_buffers_cleanup(dwc);
+
+	if (_dev_data->grxthrcfg && dwc->revision == DWC3_REVISION_250A) {
+		dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, _dev_data->grxthrcfg);
+		_dev_data->grxthrcfg = 0;
+	}
+
+	dwc3_enable_host_auto_retry(dwc, true);
+
+	if (dwc->pm_state != PM_SUSPENDED)
+		pm_runtime_put(dwc->dev);
+
+	dwc->pm_state = PM_DISCONNECTED;
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+	free_irq(irq, dwc);
+
+	mutex_unlock(&_dev_data->mutex);
+
+	cancel_delayed_work_sync(&dwc->link_work);
+
+	return 0;
+}
+
+static int dwc3_device_gadget_pullup(struct usb_gadget *g, int is_on)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+	unsigned long		flags;
+	int			ret;
+
+	/*
+	 * FIXME If pm_state is PM_RESUMING, we should wait for it to
+	 * become PM_ACTIVE before continue. The chance of hitting
+	 * PM_RESUMING is rare, but if so, we'll return directly.
+	 *
+	 * If some gadget reaches here in atomic context,
+	 * pm_runtime_get_sync will cause a sleep problem.
+	 */
+	if (dwc->pm_state == PM_RESUMING) {
+		dev_err(dwc->dev, "%s: PM_RESUMING, return -EIO\n", __func__);
+		return -EIO;
+	}
+
+	if (dwc->pm_state == PM_SUSPENDED)
+		pm_runtime_get_sync(dwc->dev);
+
+	is_on = !!is_on;
+
+	mutex_lock(&_dev_data->mutex);
+
+	if (dwc->soft_connected == is_on)
+		goto done;
+
+	dwc->soft_connected = is_on;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	if (dwc->pm_state == PM_DISCONNECTED) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		goto done;
+	}
+
+	if (is_on) {
+		/* Per dwc3 databook 2.40a section 8.1.9, re-connection
+		 * should follow steps described section 8.1.1 power on
+		 * or soft reset.
+		 */
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		dwc3_core_init(dwc);
+		spin_lock_irqsave(&dwc->lock, flags);
+
+		if (dwc->hiber_enabled)
+			dwc3_enable_hibernation(dwc);
+		dwc3_do_extra_change(dwc);
+		dwc3_event_buffers_setup(dwc);
+		dwc3_init_for_enumeration(dwc);
+		ret = dwc3_gadget_run_stop(dwc, 1);
+		if (dwc->hiber_enabled)
+			dwc3_gadget_keep_conn(dwc, 1);
+	} else {
+		u8 epnum;
+
+		for (epnum = 0; epnum < 2; epnum++) {
+			struct dwc3_ep  *dep;
+
+			dep = dwc->eps[epnum];
+
+			if (dep->flags & DWC3_EP_ENABLED)
+				__dwc3_gadget_ep_disable(dep);
+		}
+
+		dwc3_stop_active_transfers(dwc);
+		dwc3_gadget_keep_conn(dwc, 0);
+		ret = dwc3_gadget_run_stop(dwc, 0);
+		dwc3_gadget_disable_irq(dwc);
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	mutex_unlock(&_dev_data->mutex);
+
+	return ret;
+
+done:
+	mutex_unlock(&_dev_data->mutex);
+
+	return 0;
+}
+
+static const struct usb_gadget_ops dwc3_device_gadget_ops = {
+	.get_frame		= dwc3_gadget_get_frame,
+	.wakeup			= dwc3_gadget_wakeup,
+	.set_selfpowered	= dwc3_gadget_set_selfpowered,
+	.pullup			= dwc3_device_gadget_pullup,
+	.udc_start		= dwc3_gadget_start,
+	.udc_stop		= dwc3_gadget_stop,
+	.vbus_draw		= dwc3_vbus_draw,
+};
+
+static int dwc3_device_intel_probe(struct platform_device *pdev)
+{
+	struct device_node	*node = pdev->dev.of_node;
+	struct dwc3		*dwc;
+	struct device		*dev = &pdev->dev;
+	int			ret = -ENOMEM;
+	void			*mem;
+
+	struct dwc_device_par	*pdata;
+	struct usb_phy		*usb_phy;
+	struct dwc_otg2		*otg;
+
+	mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
+	if (!mem) {
+		dev_err(dev, "not enough memory\n");
+		return -ENOMEM;
+	}
+	dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
+	dwc->mem = mem;
+
+	_dev_data = kzalloc(sizeof(*_dev_data), GFP_KERNEL);
+	if (!_dev_data) {
+		dev_err(dev, "not enough memory\n");
+		return -ENOMEM;
+	}
+
+	_dev_data->dwc = dwc;
+
+	pdata = (struct dwc_device_par *)pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data for %s.\n",
+				dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	if (node) {
+		dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
+		dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
+	} else {
+		dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+		dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
+	}
+
+	if (IS_ERR(dwc->usb2_phy)) {
+		ret = PTR_ERR(dwc->usb2_phy);
+
+		/*
+		 * if -ENXIO is returned, it means PHY layer wasn't
+		 * enabled, so it makes no sense to return -EPROBE_DEFER
+		 * in that case, since no PHY driver will ever probe.
+		 */
+		if (ret == -ENXIO)
+			return ret;
+
+		dev_err(dev, "no usb2 phy configured\n");
+		return -EPROBE_DEFER;
+	}
+
+	if (IS_ERR(dwc->usb3_phy)) {
+		ret = PTR_ERR(dwc->usb2_phy);
+
+		/*
+		 * if -ENXIO is returned, it means PHY layer wasn't
+		 * enabled, so it makes no sense to return -EPROBE_DEFER
+		 * in that case, since no PHY driver will ever probe.
+		 */
+		if (ret == -ENXIO)
+			return ret;
+
+		dev_err(dev, "no usb3 phy configured\n");
+		return -EPROBE_DEFER;
+	}
+
+	mutex_init(&_dev_data->mutex);
+	spin_lock_init(&dwc->lock);
+	platform_set_drvdata(pdev, dwc);
+
+	dwc->regs   = pdata->io_addr + DWC3_GLOBALS_REGS_START;
+	dwc->regs_size  = pdata->len - DWC3_GLOBALS_REGS_START;
+	dwc->dev	= dev;
+
+	dev->dma_mask	= dev->parent->dma_mask;
+	dev->dma_parms	= dev->parent->dma_parms;
+	dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
+
+	if (!strncmp("super", maximum_speed, 5))
+		dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+	else if (!strncmp("high", maximum_speed, 4))
+		dwc->maximum_speed = DWC3_DCFG_HIGHSPEED;
+	else if (!strncmp("full", maximum_speed, 4))
+		dwc->maximum_speed = DWC3_DCFG_FULLSPEED1;
+	else if (!strncmp("low", maximum_speed, 3))
+		dwc->maximum_speed = DWC3_DCFG_LOWSPEED;
+	else
+		dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+
+	dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_get_sync(dev);
+	pm_runtime_forbid(dev);
+
+	dwc3_cache_hwparams(dwc);
+	dwc3_core_num_eps(dwc);
+
+	_dev_data->flis_reg =
+		ioremap_nocache(APBFC_EXIOTG3_MISC0_REG, 4);
+
+	ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
+	if (ret) {
+		dev_err(dwc->dev, "failed to allocate event buffers\n");
+		ret = -ENOMEM;
+		goto err0;
+	}
+
+	/*
+	* Not use irq thread, because irqthread has negative impact
+	* on usb performance, especially for usb network performance.
+	*/
+	dwc->quirks_disable_irqthread = 1;
+
+	usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	otg = container_of(usb_phy, struct dwc_otg2, usb2_phy);
+	otg->start_device = dwc3_start_peripheral;
+	otg->stop_device = dwc3_stop_peripheral;
+	otg->vbus_draw = dwc3_vbus_draw;
+	usb_put_phy(usb_phy);
+	dwc->is_otg = 1;
+
+	dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+	ret = dwc3_gadget_init(dwc);
+	if (ret) {
+		dev_err(dev, "failed to initialize gadget\n");
+		goto err0;
+	}
+	dwc->gadget.ops = &dwc3_device_gadget_ops;
+	dwc->gadget.is_otg = 1;
+
+	dwc->mode = DWC3_MODE_DEVICE;
+
+	ret = dwc3_debugfs_init(dwc);
+	if (ret) {
+		dev_err(dev, "failed to initialize debugfs\n");
+		goto err1;
+	}
+
+	pm_runtime_allow(dev);
+	pm_runtime_put(dev);
+
+	return 0;
+
+err1:
+	dwc3_gadget_exit(dwc);
+
+err0:
+	dwc3_free_event_buffers(dwc);
+
+	return ret;
+}
+
+static int dwc3_device_intel_remove(struct platform_device *pdev)
+{
+	iounmap(_dev_data->flis_reg);
+
+	dwc3_remove(pdev);
+
+	kfree(_dev_data);
+	_dev_data = NULL;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static const struct dev_pm_ops dwc3_device_pm_ops = {
+	.runtime_suspend	= dwc3_runtime_suspend,
+	.runtime_resume		= dwc3_runtime_resume,
+};
+#define DWC3_DEVICE_PM_OPS	(&dwc3_device_pm_ops)
+#else
+#define DWC3_DEVICE_PM_OPS	NULL
+#endif
+
+static struct platform_driver dwc3_device_intel_driver = {
+	.probe		= dwc3_device_intel_probe,
+	.remove		= dwc3_device_intel_remove,
+	.driver		= {
+		.name	= "dwc3-device",
+		.of_match_table	= of_match_ptr(of_dwc3_match),
+		.pm	= DWC3_DEVICE_PM_OPS,
+	},
+};
+
+module_platform_driver(dwc3_device_intel_driver);
+
+MODULE_ALIAS("platform:dwc3");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
diff --git a/drivers/usb/dwc3/dwc3-host-intel.c b/drivers/usb/dwc3/dwc3-host-intel.c
new file mode 100644
index 0000000..558547b
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-host-intel.c
@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2012 Intel Corp.
+ * Author: Yu Wang
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_device.h>
+#include <linux/usb/dwc3-intel-mid.h>
+#include "../host/xhci.h"
+#include "core.h"
+#include "otg.h"
+
+#define WAIT_DISC_EVENT_COMPLETE_TIMEOUT 5 /* 100ms */
+
+static int otg_irqnum;
+
+static int dwc3_start_host(struct usb_hcd *hcd);
+static int dwc3_stop_host(struct usb_hcd *hcd);
+static struct platform_driver dwc3_xhci_driver;
+
+static void xhci_dwc3_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+	/*
+	 * As of now platform drivers don't provide MSI support so we ensure
+	 * here that the generic code does not try to make a pci_dev from our
+	 * dev struct in order to setup MSI
+	 *
+	 * Synopsys DWC3 controller will generate PLC when link transfer to
+	 * compliance/loopback mode.
+	 */
+	xhci->quirks |= XHCI_PLAT;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_dwc3_setup(struct usb_hcd *hcd)
+{
+	return xhci_gen_setup(hcd, xhci_dwc3_quirks);
+}
+
+static int xhci_dwc_bus_resume(struct usb_hcd *hcd)
+{
+	int ret;
+
+	/* before resume bus, delay 1ms to waiting core stable */
+	mdelay(1);
+
+	ret = xhci_bus_resume(hcd);
+	return ret;
+}
+
+static const struct hc_driver xhci_dwc_hc_driver = {
+	.description =		"dwc-xhci",
+	.product_desc =		"xHCI Host Controller",
+	.hcd_priv_size =	sizeof(struct xhci_hcd *),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq =			xhci_irq,
+	.flags =		HCD_MEMORY | HCD_USB3 | HCD_SHARED,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset =		xhci_dwc3_setup,
+	.start =		xhci_run,
+	.stop =			xhci_stop,
+	.shutdown =		xhci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		xhci_urb_enqueue,
+	.urb_dequeue =		xhci_urb_dequeue,
+	.alloc_dev =		xhci_alloc_dev,
+	.free_dev =		xhci_free_dev,
+	.alloc_streams =	xhci_alloc_streams,
+	.free_streams =		xhci_free_streams,
+	.add_endpoint =		xhci_add_endpoint,
+	.drop_endpoint =	xhci_drop_endpoint,
+	.endpoint_reset =	xhci_endpoint_reset,
+	.check_bandwidth =	xhci_check_bandwidth,
+	.reset_bandwidth =	xhci_reset_bandwidth,
+	.address_device =	xhci_address_device,
+	.update_hub_device =	xhci_update_hub_device,
+	.reset_device =		xhci_discover_or_reset_device,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number =	xhci_get_frame,
+
+	/* Root hub support */
+	.hub_control =		xhci_hub_control,
+	.hub_status_data =	xhci_hub_status_data,
+	.bus_suspend =		xhci_bus_suspend,
+	.bus_resume =		xhci_dwc_bus_resume,
+};
+
+static int if_usb_devices_connected(struct xhci_hcd *xhci)
+{
+	struct usb_device		*usb_dev;
+	int i, connected_devices = 0;
+
+	if (!xhci)
+		return -EINVAL;
+
+	usb_dev = xhci->main_hcd->self.root_hub;
+	for (i = 1; i <= usb_dev->maxchild; ++i) {
+		if (usb_hub_find_child(usb_dev, i))
+			connected_devices++;
+	}
+
+	usb_dev = xhci->shared_hcd->self.root_hub;
+	for (i = 1; i <= usb_dev->maxchild; ++i) {
+		if (usb_hub_find_child(usb_dev, i))
+			connected_devices++;
+	}
+
+	if (connected_devices)
+		return 1;
+
+	return 0;
+}
+
+static void dwc_xhci_enable_phy_auto_resume(struct usb_hcd *hcd, bool enable)
+{
+	u32 val;
+
+	val = readl(hcd->regs + GUSB2PHYCFG0);
+	val |= GUSB2PHYCFG_ULPI_EXT_VBUS_DRV;
+	if (enable)
+		val |= GUSB2PHYCFG_ULPI_AUTO_RESUME;
+	else
+		val &= ~GUSB2PHYCFG_ULPI_AUTO_RESUME;
+	writel(val, hcd->regs + GUSB2PHYCFG0);
+}
+
+static void dwc_xhci_enable_phy_suspend(struct usb_hcd *hcd, bool enable)
+{
+	u32 val;
+
+	val = readl(hcd->regs + GUSB3PIPECTL0);
+	if (enable)
+		val |= GUSB3PIPECTL_SUS_EN;
+	else
+		val &= ~GUSB3PIPECTL_SUS_EN;
+	writel(val, hcd->regs + GUSB3PIPECTL0);
+
+	val = readl(hcd->regs + GUSB2PHYCFG0);
+	if (enable)
+		val |= GUSB2PHYCFG_SUS_PHY;
+	else
+		val &= ~GUSB2PHYCFG_SUS_PHY;
+	writel(val, hcd->regs + GUSB2PHYCFG0);
+}
+
+static void dwc_silicon_wa(struct usb_hcd *hcd)
+{
+	void __iomem *addr;
+	u32 val;
+
+	/* Clear GUCTL bit 15 as workaround of DWC controller Bugs
+	 * This Bug cause the xHCI driver does not see any
+	 * transfer complete events for certain EP after exit
+	 * from hibernation mode.*/
+	val = readl(hcd->regs + GUCTL);
+	val &= ~GUCTL_CMDEVADDR;
+	writel(val, hcd->regs + GUCTL);
+
+	/* Disable OTG3-EXI interface by default. It is one
+	 * workaround for silicon BUG. It will cause transfer
+	 * failed on EP#8 of any USB device.
+	 */
+	addr = ioremap_nocache(APBFC_EXIOTG3_MISC0_REG, 4);
+	val = readl(addr);
+	val |= (1 << 3);
+	writel(val, addr);
+	iounmap(addr);
+}
+
+static void dwc_core_reset(struct usb_hcd *hcd)
+{
+	u32 val;
+
+	val = readl(hcd->regs + GCTL);
+	val |= GCTL_CORESOFTRESET;
+	writel(val, hcd->regs + GCTL);
+
+	val = readl(hcd->regs + GUSB3PIPECTL0);
+	val |= GUSB3PIPECTL_PHYSOFTRST;
+	writel(val, hcd->regs + GUSB3PIPECTL0);
+
+	val = readl(hcd->regs + GUSB2PHYCFG0);
+	val |= GUSB2PHYCFG_PHYSOFTRST;
+	writel(val, hcd->regs + GUSB2PHYCFG0);
+
+	msleep(100);
+
+	val = readl(hcd->regs + GUSB3PIPECTL0);
+	val &= ~GUSB3PIPECTL_PHYSOFTRST;
+	writel(val, hcd->regs + GUSB3PIPECTL0);
+
+	val = readl(hcd->regs + GUSB2PHYCFG0);
+	val &= ~GUSB2PHYCFG_PHYSOFTRST;
+	writel(val, hcd->regs + GUSB2PHYCFG0);
+
+	msleep(20);
+
+	val = readl(hcd->regs + GCTL);
+	val &= ~GCTL_CORESOFTRESET;
+	writel(val, hcd->regs + GCTL);
+}
+
+/*
+ * On MERR platform, the suspend clock is 19.2MHz.
+ * Hence PwrDnScale = 19200 / 16 = 1200 (= 0x4B0).
+ * To account for possible jitter of suspend clock and to have margin,
+ * So recommend it to be set to 1250 (= 0x4E2).
+ * */
+static void dwc_set_ssphy_p3_clockrate(struct usb_hcd *hcd)
+{
+	u32 gctl;
+
+	gctl = readl(hcd->regs + GCTL);
+	gctl &= ~GCTL_PWRDNSCALE_MASK;
+	gctl |= GCTL_PWRDNSCALE(0x4E2);
+	writel(gctl, hcd->regs + GCTL);
+}
+
+static ssize_t
+show_pm_get(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct platform_device		*pdev = to_platform_device(_dev);
+	struct usb_hcd		*hcd = platform_get_drvdata(pdev);
+
+	pm_runtime_put(hcd->self.controller);
+	return 0;
+
+}
+static ssize_t store_pm_get(struct device *_dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct platform_device		*pdev = to_platform_device(_dev);
+	struct usb_hcd		*hcd = platform_get_drvdata(pdev);
+
+	pm_runtime_get(hcd->self.controller);
+	return count;
+
+}
+static DEVICE_ATTR(pm_get, S_IRUGO|S_IWUSR|S_IWGRP,
+			show_pm_get, store_pm_get);
+
+static void dwc_set_host_mode(struct usb_hcd *hcd)
+{
+	writel(0x45801000, hcd->regs + GCTL);
+
+	msleep(20);
+}
+
+static int dwc3_start_host(struct usb_hcd *hcd)
+{
+	int ret = -EINVAL;
+	struct xhci_hcd *xhci;
+	struct usb_hcd *xhci_shared_hcd;
+
+	if (!hcd)
+		return ret;
+
+	if (hcd->rh_registered) {
+		dev_dbg(hcd->self.controller,
+				"%s() - Already registered", __func__);
+		return 0;
+	}
+
+	pm_runtime_get_sync(hcd->self.controller);
+
+	dwc_core_reset(hcd);
+	dwc_silicon_wa(hcd);
+	dwc_set_host_mode(hcd);
+	dwc_set_ssphy_p3_clockrate(hcd);
+
+	/* Clear the hcd->flags.
+	 * To prevent incorrect flags set during last time. */
+	hcd->flags = 0;
+
+	ret = usb_add_hcd(hcd, otg_irqnum, IRQF_SHARED);
+	if (ret)
+		return -EINVAL;
+
+	xhci = hcd_to_xhci(hcd);
+	xhci->shared_hcd = usb_create_shared_hcd(&xhci_dwc_hc_driver,
+		   hcd->self.controller, dev_name(hcd->self.controller), hcd);
+	if (!xhci->shared_hcd) {
+		ret = -ENOMEM;
+		goto dealloc_usb2_hcd;
+	}
+
+	xhci->quirks |= XHCI_PLAT;
+
+	/* Set the xHCI pointer before xhci_pci_setup() (aka hcd_driver.reset)
+	 * is called by usb_add_hcd().
+	 */
+	*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
+
+	xhci->shared_hcd->regs = hcd->regs;
+
+	xhci->shared_hcd->rsrc_start = hcd->rsrc_start;
+	xhci->shared_hcd->rsrc_len = hcd->rsrc_len;
+
+	ret = usb_add_hcd(xhci->shared_hcd, otg_irqnum, IRQF_SHARED);
+	if (ret)
+		goto put_usb3_hcd;
+
+	pm_runtime_put(hcd->self.controller);
+
+	ret = device_create_file(hcd->self.controller, &dev_attr_pm_get);
+	if (ret < 0)
+		dev_err(hcd->self.controller,
+			"Can't register sysfs attribute: %d\n", ret);
+
+	dwc3_xhci_driver.shutdown = usb_hcd_platform_shutdown;
+
+	return ret;
+
+put_usb3_hcd:
+	if (xhci->shared_hcd) {
+		xhci_shared_hcd = xhci->shared_hcd;
+		usb_remove_hcd(xhci_shared_hcd);
+		usb_put_hcd(xhci_shared_hcd);
+	}
+
+dealloc_usb2_hcd:
+	local_irq_disable();
+	usb_hcd_irq(0, hcd);
+	local_irq_enable();
+	usb_remove_hcd(hcd);
+
+	kfree(xhci);
+	*((struct xhci_hcd **) hcd->hcd_priv) = NULL;
+
+	pm_runtime_put(hcd->self.controller);
+	return ret;
+}
+
+static int dwc3_stop_host(struct usb_hcd *hcd)
+{
+	int count = 0;
+	struct xhci_hcd *xhci;
+	struct usb_hcd *xhci_shared_hcd;
+
+	if (!hcd)
+		return -EINVAL;
+
+	xhci = hcd_to_xhci(hcd);
+
+	pm_runtime_get_sync(hcd->self.controller);
+
+	/* When plug out micro A cable, there will be two flows be executed.
+	 * The first one is xHCI controller get disconnect event. The
+	 * second one is PMIC get ID change event. During these events
+	 * handling, they both try to call usb_disconnect. Then met some
+	 * conflicts and cause kernel panic.
+	 * So treat disconnect event as first priority, handle the ID change
+	 * event until disconnect event handled done.*/
+	while (if_usb_devices_connected(xhci)) {
+		msleep(20);
+		if (count++ > WAIT_DISC_EVENT_COMPLETE_TIMEOUT)
+			break;
+	};
+	dwc3_xhci_driver.shutdown = NULL;
+
+	if (xhci->shared_hcd) {
+		xhci_shared_hcd = xhci->shared_hcd;
+		usb_remove_hcd(xhci_shared_hcd);
+		usb_put_hcd(xhci_shared_hcd);
+	}
+
+	usb_remove_hcd(hcd);
+
+	kfree(xhci);
+	*((struct xhci_hcd **) hcd->hcd_priv) = NULL;
+
+	dwc_xhci_enable_phy_suspend(hcd, false);
+
+	pm_runtime_put(hcd->self.controller);
+	device_remove_file(hcd->self.controller, &dev_attr_pm_get);
+	return 0;
+}
+static int xhci_dwc_drv_probe(struct platform_device *pdev)
+{
+	struct dwc_otg2 *otg;
+	struct usb_phy *usb_phy;
+	struct dwc_device_par *pdata;
+	struct usb_hcd *hcd;
+	struct resource *res;
+	int retval = 0;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_debug("initializing FSL-SOC USB Controller\n");
+
+	/* Need platform data for setup */
+	pdata = (struct dwc_device_par *)pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev,
+			"No platform data for %s.\n", dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Found HC with no IRQ. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+	otg_irqnum = res->start;
+
+	hcd = usb_create_hcd(&xhci_dwc_hc_driver,
+			&pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		return retval;
+	}
+
+	hcd->regs = pdata->io_addr;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Found HC with no IRQ. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = res->end - res->start;
+
+	usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (usb_phy)
+		otg_set_host(usb_phy->otg, &hcd->self);
+
+	otg = container_of(usb_phy->otg, struct dwc_otg2, otg);
+	if (otg) {
+		otg->start_host = dwc3_start_host;
+		otg->stop_host = dwc3_stop_host;
+	}
+
+
+	usb_put_phy(usb_phy);
+
+	/* Enable wakeup irq */
+	hcd->has_wakeup_irq = 1;
+
+	platform_set_drvdata(pdev, hcd);
+	pm_runtime_enable(hcd->self.controller);
+
+	return retval;
+}
+
+static int xhci_dwc_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct usb_phy *usb_phy;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	otg_set_host(usb_phy->otg, NULL);
+	usb_put_phy(usb_phy);
+
+	if (xhci)
+		dwc3_stop_host(hcd);
+	usb_put_hcd(hcd);
+
+	pm_runtime_disable(hcd->self.controller);
+	pm_runtime_set_suspended(hcd->self.controller);
+	return 0;
+}
+
+
+#ifdef CONFIG_PM
+
+#ifdef CONFIG_PM_RUNTIME
+/*
+ * Do nothing in runtime pm callback.
+ * On HVP platform, if make controller go to hibernation mode.
+ * controller will not send IRQ until restore status which
+ * implement in pm runtime resume callback. So there is no
+ * any one can trigger pm_runtime_get to resume USB3 device.
+ * This issue need to continue investigate. So just implement SW logic at here.
+ */
+static int dwc_hcd_runtime_idle(struct device *dev)
+{
+	return 0;
+}
+
+/* dwc_hcd_suspend_common and dwc_hcd_resume_common are refer to
+ * suspend_common and resume_common in usb core.
+ * Because the usb core function just support PCI device.
+ * So re-write them in here to support platform devices.
+ */
+static int dwc_hcd_suspend_common(struct device *dev)
+{
+	struct platform_device		*pdev = to_platform_device(dev);
+	struct usb_hcd		*hcd = platform_get_drvdata(pdev);
+	struct xhci_hcd		*xhci = hcd_to_xhci(hcd);
+	int			retval = 0;
+	u32 data = 0;
+
+	if (!xhci) {
+		dev_dbg(dev, "%s: host already stop!\n", __func__);
+		return 0;
+	}
+
+	/* Root hub suspend should have stopped all downstream traffic,
+	 * and all bus master traffic.  And done so for both the interface
+	 * and the stub usb_device (which we check here).  But maybe it
+	 * didn't; writing sysfs power/state files ignores such rules...
+	 */
+	if (HCD_RH_RUNNING(hcd)) {
+		dev_warn(dev, "Root hub is not suspended\n");
+		return -EBUSY;
+	}
+	if (hcd->shared_hcd) {
+		hcd = hcd->shared_hcd;
+		if (HCD_RH_RUNNING(hcd)) {
+			dev_warn(dev, "Secondary root hub is not suspended\n");
+			return -EBUSY;
+		}
+	}
+
+	if (!HCD_DEAD(hcd)) {
+		/* Optimization: Don't suspend if a root-hub wakeup is
+		 * pending and it would cause the HCD to wake up anyway.
+		 */
+		if (HCD_WAKEUP_PENDING(hcd))
+			return -EBUSY;
+		if (hcd->shared_hcd &&
+				HCD_WAKEUP_PENDING(hcd->shared_hcd))
+			return -EBUSY;
+		if (hcd->state != HC_STATE_SUSPENDED ||
+				xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+			retval = -EINVAL;
+
+		if (!retval) {
+			/* The auto-resume is diabled by default. Need enable it
+			 * if there have valid connection. To ensure that when
+			 * device resumes, host does resume reflect within
+			 * 900 usec as in USB spec.
+			 */
+			if (if_usb_devices_connected(xhci) == 1)
+				dwc_xhci_enable_phy_auto_resume(
+						xhci->main_hcd, true);
+
+			/* Ensure that suspend enable are set for
+			 * USB2 and USB3 PHY
+			 */
+			dwc_xhci_enable_phy_suspend(hcd, true);
+
+			data = readl(hcd->regs + GCTL);
+			data |= GCTL_GBL_HIBERNATION_EN;
+			writel(data, hcd->regs + GCTL);
+			dev_dbg(hcd->self.controller, "set xhci hibernation enable!\n");
+			retval = xhci_suspend(xhci);
+		}
+
+		/* Check again in case wakeup raced with pci_suspend */
+		if ((retval == 0 && HCD_WAKEUP_PENDING(hcd)) ||
+				(retval == 0 && hcd->shared_hcd &&
+				 HCD_WAKEUP_PENDING(hcd->shared_hcd))) {
+			xhci_resume(xhci, false);
+			retval = -EBUSY;
+		}
+		if (retval)
+			return retval;
+	}
+
+	synchronize_irq(otg_irqnum);
+
+	return retval;
+
+}
+
+static int dwc_hcd_resume_common(struct device *dev)
+{
+	struct platform_device		*pdev = to_platform_device(dev);
+	struct usb_hcd		*hcd = platform_get_drvdata(pdev);
+	struct xhci_hcd		*xhci = hcd_to_xhci(hcd);
+	int			retval = 0;
+
+	if (!xhci)
+		return 0;
+
+	if (HCD_RH_RUNNING(hcd) ||
+			(hcd->shared_hcd &&
+			 HCD_RH_RUNNING(hcd->shared_hcd))) {
+		dev_dbg(dev, "can't resume, not suspended!\n");
+		return 0;
+	}
+
+	if (!HCD_DEAD(hcd)) {
+		retval = xhci_resume(xhci, false);
+		if (retval) {
+			dev_err(dev, "PCI post-resume error %d!\n", retval);
+			if (hcd->shared_hcd)
+				usb_hc_died(hcd->shared_hcd);
+			usb_hc_died(hcd);
+		}
+	}
+
+	dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
+
+	return retval;
+}
+
+static int dwc_hcd_runtime_suspend(struct device *dev)
+{
+	int retval;
+	struct platform_device      *pdev = to_platform_device(dev);
+	struct usb_hcd      *hcd = platform_get_drvdata(pdev);
+
+	retval = dwc_hcd_suspend_common(dev);
+
+	if (retval)
+		dwc_xhci_enable_phy_auto_resume(
+			hcd, false);
+
+	dev_dbg(dev, "hcd_pci_runtime_suspend: %d\n", retval);
+	return retval;
+}
+
+static int dwc_hcd_runtime_resume(struct device *dev)
+{
+	int retval;
+	struct platform_device      *pdev = to_platform_device(dev);
+	struct usb_hcd      *hcd = platform_get_drvdata(pdev);
+
+	dwc_xhci_enable_phy_auto_resume(
+			hcd, false);
+
+	retval = dwc_hcd_resume_common(dev);
+	dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
+
+	return retval;
+}
+#else
+#define dwc_hcd_runtime_idle NULL
+#define dwc_hcd_runtime_suspend NULL
+#define dwc_hcd_runtime_resume NULL
+#endif
+
+
+static int dwc_hcd_suspend(struct device *dev)
+{
+	int retval;
+	struct platform_device      *pdev = to_platform_device(dev);
+	struct usb_hcd      *hcd = platform_get_drvdata(pdev);
+
+	retval = dwc_hcd_suspend_common(dev);
+
+	if (retval)
+		dwc_xhci_enable_phy_auto_resume(
+			hcd, false);
+
+	dev_dbg(dev, "hcd_pci_runtime_suspend: %d\n", retval);
+	return retval;
+}
+
+static int dwc_hcd_resume(struct device *dev)
+{
+	int retval;
+	struct platform_device      *pdev = to_platform_device(dev);
+	struct usb_hcd      *hcd = platform_get_drvdata(pdev);
+
+	dwc_xhci_enable_phy_auto_resume(
+			hcd, false);
+
+	retval = dwc_hcd_resume_common(dev);
+	dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
+
+	return retval;
+}
+
+static const struct dev_pm_ops dwc_usb_hcd_pm_ops = {
+	.runtime_suspend = dwc_hcd_runtime_suspend,
+	.runtime_resume	= dwc_hcd_runtime_resume,
+	.runtime_idle	= dwc_hcd_runtime_idle,
+	.suspend	=	dwc_hcd_suspend,
+	.resume		=	dwc_hcd_resume,
+};
+#endif
+
+static struct platform_driver dwc3_xhci_driver = {
+	.probe = xhci_dwc_drv_probe,
+	.remove = xhci_dwc_drv_remove,
+	.driver = {
+		.name = "dwc3-host",
+#ifdef CONFIG_PM
+		.pm = &dwc_usb_hcd_pm_ops,
+#endif
+	},
+};
diff --git a/drivers/usb/dwc3/dwc3-intel-byt.c b/drivers/usb/dwc3/dwc3-intel-byt.c
new file mode 100644
index 0000000..adae9ef
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-intel-byt.c
@@ -0,0 +1,970 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+#include <linux/gpio.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/dwc3-intel-mid.h>
+#include "otg.h"
+
+#define VERSION "2.10a"
+
+static int otg_id = -1;
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off);
+static int dwc3_intel_byt_notify_charger_type(struct dwc_otg2 *otg,
+		enum power_supply_charger_event event);
+
+static int charger_detect_enable(struct dwc_otg2 *otg)
+{
+	struct intel_dwc_otg_pdata *data;
+
+	if (!otg || !otg->otg_data)
+		return 0;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	return data->charger_detect_enable;
+}
+
+static int sdp_charging(struct dwc_otg2 *otg)
+{
+	struct intel_dwc_otg_pdata *data;
+
+	if (!otg || !otg->otg_data)
+		return 0;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	return data->sdp_charging;
+}
+
+static void usb2phy_eye_optimization(struct dwc_otg2 *otg)
+{
+	struct usb_phy *phy;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!phy)
+		return;
+
+	/* Set 0x7f for better quality in eye diagram
+	 * It means ZHSDRV = 0b11 and IHSTX = 0b1111*/
+	usb_phy_io_write(phy, 0x4f, TUSB1211_VENDOR_SPECIFIC1_SET);
+
+	usb_put_phy(phy);
+}
+
+static int dwc_otg_charger_hwdet(bool enable)
+{
+	int				retval;
+	struct usb_phy *phy;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+
+	/* Just return if charger detection is not enabled */
+	if (!charger_detect_enable(otg))
+		return 0;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!phy)
+		return -ENODEV;
+
+	if (enable) {
+		retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+				TUSB1211_POWER_CONTROL_SET);
+		if (retval)
+			return retval;
+		otg_dbg(otg, "set HWDETECT\n");
+	} else {
+		retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+				TUSB1211_POWER_CONTROL_CLR);
+		if (retval)
+			return retval;
+		otg_dbg(otg, "clear HWDETECT\n");
+	}
+	usb_put_phy(phy);
+
+	return 0;
+}
+
+static ssize_t store_vbus_evt(struct device *_dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	struct dwc_otg2	*otg = dwc3_get_otg();
+
+	if (count != 2) {
+		otg_err(otg, "return EINVAL\n");
+		return -EINVAL;
+	}
+
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;
+
+	switch (buf[0]) {
+	case '1':
+		otg_dbg(otg, "Change the VBUS to High\n");
+		otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+		spin_lock_irqsave(&otg->lock, flags);
+		dwc3_wakeup_otg_thread(otg);
+		spin_unlock_irqrestore(&otg->lock, flags);
+		return count;
+	case '0':
+		otg_dbg(otg, "Change the VBUS to Low\n");
+		otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+		spin_lock_irqsave(&otg->lock, flags);
+		dwc3_wakeup_otg_thread(otg);
+		spin_unlock_irqrestore(&otg->lock, flags);
+		return count;
+	default:
+		return -EINVAL;
+	}
+
+	return count;
+}
+static DEVICE_ATTR(vbus_evt, S_IWUSR|S_IWGRP,
+			NULL, store_vbus_evt);
+
+
+static ssize_t store_otg_id(struct device *_dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+
+	if (!otg)
+		return 0;
+	if (count != 2) {
+		otg_err(otg, "return EINVAL\n");
+		return -EINVAL;
+	}
+
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;
+
+	switch (buf[0]) {
+	case 'a':
+	case 'A':
+		otg_dbg(otg, "Change ID to A\n");
+		otg->user_events |= USER_ID_A_CHANGE_EVENT;
+		spin_lock_irqsave(&otg->lock, flags);
+		dwc3_wakeup_otg_thread(otg);
+		otg_id = 0;
+		spin_unlock_irqrestore(&otg->lock, flags);
+		return count;
+	case 'b':
+	case 'B':
+		otg_dbg(otg, "Change ID to B\n");
+		otg->user_events |= USER_ID_B_CHANGE_EVENT;
+		spin_lock_irqsave(&otg->lock, flags);
+		dwc3_wakeup_otg_thread(otg);
+		otg_id = 1;
+		spin_unlock_irqrestore(&otg->lock, flags);
+		return count;
+	default:
+		otg_err(otg, "Just support change ID to A!\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t
+show_otg_id(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	char				*next;
+	unsigned			size, t;
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size,
+		"USB OTG ID: %s\n",
+		(otg_id ? "B" : "A")
+		);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static DEVICE_ATTR(otg_id, S_IRUGO|S_IWUSR|S_IWGRP,
+			show_otg_id, store_otg_id);
+
+static void set_sus_phy(struct dwc_otg2 *otg, int bit)
+{
+	u32 data = 0;
+
+	data = otg_read(otg, GUSB2PHYCFG0);
+	if (bit)
+		data |= GUSB2PHYCFG_SUS_PHY;
+	else
+		data &= ~GUSB2PHYCFG_SUS_PHY;
+
+	otg_write(otg, GUSB2PHYCFG0, data);
+
+	data = otg_read(otg, GUSB3PIPECTL0);
+	if (bit)
+		data |= GUSB3PIPECTL_SUS_EN;
+	else
+		data &= ~GUSB3PIPECTL_SUS_EN;
+	otg_write(otg, GUSB3PIPECTL0, data);
+}
+
+static int dwc3_check_gpio_id(struct dwc_otg2 *otg2)
+{
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	struct intel_dwc_otg_pdata *data;
+	int id = 0;
+	int next = 0;
+	int count = 0;
+	unsigned long timeout;
+
+	otg_dbg(otg, "start check gpio id\n");
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	/* Polling ID GPIO PIN value for SW debounce as HW debouce chip
+	 * is not connected on BYT CR board */
+	if (data && data->gpio_id) {
+		id = gpio_get_value(data->gpio_id);
+
+		/* If get 20 of the same value in a row by GPIO read,
+		 * then end SW debouce and return the ID value.
+		 * the total length of debouce time is 80ms~100ms for
+		 * 20 times GPIO read on BYT CR, which is longer than
+		 * normal debounce time done by HW chip.
+		 * Also set 200ms timeout value to avoid impact from
+		 * pin unstable cases */
+		timeout = jiffies + msecs_to_jiffies(200);
+		while ((count < 20) && (!time_after(jiffies, timeout))) {
+			next = gpio_get_value(data->gpio_id);
+			otg_dbg(otg, "id value pin %d = %d\n",
+				data->gpio_id, next);
+			if (next < 0)
+				return -EINVAL;
+			else if (id == next)
+				count++;
+			else {
+				id = next;
+				count = 0;
+			}
+		}
+		if (count >= 20) {
+			otg_dbg(otg, "id debounce done = %d\n", id);
+			return id;
+		}
+	}
+
+	return -ENODEV;
+}
+
+static irqreturn_t dwc3_gpio_id_irq(int irq, void *dev)
+{
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	struct intel_dwc_otg_pdata *data;
+	int id;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	id = dwc3_check_gpio_id(otg);
+	if (id == 0 || id == 1) {
+		if (data->id != id) {
+			data->id = id;
+			dev_info(otg->dev, "ID notification (id = %d)\n",
+					data->id);
+			atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+				USB_EVENT_ID, &id);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void dwc_otg_suspend_discon_work(struct work_struct *work)
+{
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	unsigned long flags;
+
+	otg_dbg(otg, "start suspend_disconn work\n");
+
+	spin_lock_irqsave(&otg->lock, flags);
+	otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+	otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+	dwc3_wakeup_otg_thread(otg);
+	spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+int dwc3_intel_byt_platform_init(struct dwc_otg2 *otg)
+{
+	u32 gctl;
+	int id_value;
+	int retval;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	if (data)
+		INIT_DELAYED_WORK(&data->suspend_discon_work,
+			dwc_otg_suspend_discon_work);
+
+	if (data && data->gpio_cs && data->gpio_reset) {
+		retval = gpio_request(data->gpio_cs, "phy_cs");
+		if (retval < 0) {
+			otg_err(otg, "failed to request CS pin %d\n",
+					data->gpio_cs);
+			return retval;
+		}
+
+		retval = gpio_request(data->gpio_reset, "phy_reset");
+		if (retval < 0) {
+			otg_err(otg, "failed to request RESET pin %d\n",
+					data->gpio_reset);
+			return retval;
+		}
+	}
+
+	if (data && data->gpio_id) {
+		dev_info(otg->dev,  "USB ID detection - Enabled - GPIO\n");
+
+		/* Set ID default value to 1 Floating */
+		data->id = 1;
+
+		retval = gpio_request(data->gpio_id, "gpio_id");
+		if (retval < 0) {
+			otg_err(otg, "failed to request ID pin %d\n",
+					data->gpio_id);
+			return retval;
+		}
+
+		retval = gpio_direction_input(data->gpio_id);
+		if (retval < 0) {
+			otg_err(otg, "failed to request ID pin %d\n",
+					data->gpio_id);
+			return retval;
+		}
+
+		retval = request_threaded_irq(gpio_to_irq(data->gpio_id),
+				NULL, dwc3_gpio_id_irq,
+				IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+				IRQF_ONESHOT, "dwc-gpio-id", otg->dev);
+
+		if (retval < 0) {
+			otg_err(otg, "failed to request interrupt gpio ID\n");
+			return retval;
+		}
+
+		otg_dbg(otg, "GPIO ID request/Interrupt reuqest Done\n");
+
+		id_value = dwc3_check_gpio_id(otg);
+		if ((id_value == 0 || id_value == 1) &&
+					(data->id != id_value)) {
+			data->id = id_value;
+			dev_info(otg->dev, "ID notification (id = %d)\n",
+						data->id);
+
+			atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+					USB_EVENT_ID, &id_value);
+		} else
+			otg_dbg(otg, "Get incorrect ID value %d\n", id_value);
+	}
+
+	/* Don't let phy go to suspend mode, which
+	 * will cause FS/LS devices enum failed in host mode.
+	 */
+	set_sus_phy(otg, 0);
+
+	retval = device_create_file(otg->dev, &dev_attr_otg_id);
+	if (retval < 0) {
+		otg_dbg(otg,
+			"Can't register sysfs attribute: %d\n", retval);
+		return -ENOMEM;
+	}
+
+	retval = device_create_file(otg->dev, &dev_attr_vbus_evt);
+	if (retval < 0) {
+		otg_dbg(otg,
+			"Can't register sysfs attribute: %d\n", retval);
+		return -ENOMEM;
+	}
+
+	otg_dbg(otg, "\n");
+	otg_write(otg, OEVTEN, 0);
+	otg_write(otg, OCTL, 0);
+	gctl = otg_read(otg, GCTL);
+	gctl |= GCTL_PRT_CAP_DIR_OTG << GCTL_PRT_CAP_DIR_SHIFT;
+	otg_write(otg, GCTL, gctl);
+
+	return 0;
+}
+
+/* Disable auto-resume feature for USB2 PHY. This is one
+ * silicon workaround. It will cause fabric timeout error
+ * for LS case after resume from hibernation */
+static void disable_phy_auto_resume(struct dwc_otg2 *otg)
+{
+	u32 data = 0;
+
+	data = otg_read(otg, GUSB2PHYCFG0);
+	data &= ~GUSB2PHYCFG_ULPI_AUTO_RESUME;
+	otg_write(otg, GUSB2PHYCFG0, data);
+}
+
+/* This function will control VUSBPHY to power gate/ungate USBPHY */
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off)
+{
+	struct intel_dwc_otg_pdata *data;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	if (data && data->gpio_cs && data->gpio_reset) {
+		if (on_off) {
+			/* Turn ON phy via CS pin */
+			gpio_direction_output(data->gpio_cs, 1);
+			usleep_range(200, 300);
+
+			/* Do PHY reset after enable the PHY */
+			gpio_direction_output(data->gpio_reset, 0);
+			usleep_range(200, 500);
+			gpio_set_value(data->gpio_reset, 1);
+			msleep(30);
+		} else {
+			/* Turn OFF phy via CS pin */
+			gpio_direction_output(data->gpio_cs, 0);
+		}
+	}
+	return 0;
+}
+
+int dwc3_intel_byt_get_id(struct dwc_otg2 *otg)
+{
+	/* For BYT ID is not connected to USB, always FLOAT */
+	return RID_FLOAT;
+}
+
+int dwc3_intel_byt_b_idle(struct dwc_otg2 *otg)
+{
+	u32 gctl, tmp;
+
+	enable_usb_phy(otg, false);
+	dwc_otg_charger_hwdet(false);
+
+	/* Disable hibernation mode by default */
+	gctl = otg_read(otg, GCTL);
+	gctl &= ~GCTL_GBL_HIBERNATION_EN;
+	otg_write(otg, GCTL, gctl);
+
+	/* Reset ADP related registers */
+	otg_write(otg, ADPCFG, 0);
+	otg_write(otg, ADPCTL, 0);
+	otg_write(otg, ADPEVTEN, 0);
+	tmp = otg_read(otg, ADPEVT);
+	otg_write(otg, ADPEVT, tmp);
+
+	otg_write(otg, OCFG, 0);
+	otg_write(otg, OEVTEN, 0);
+	tmp = otg_read(otg, OEVT);
+	otg_write(otg, OEVT, tmp);
+	otg_write(otg, OCTL, OCTL_PERI_MODE);
+
+	/* Force config to device mode as default */
+	gctl = otg_read(otg, GCTL);
+	gctl &= ~GCTL_PRT_CAP_DIR;
+	gctl |= GCTL_PRT_CAP_DIR_DEV << GCTL_PRT_CAP_DIR_SHIFT;
+	otg_write(otg, GCTL, gctl);
+
+	mdelay(100);
+
+	return 0;
+}
+
+static int dwc3_intel_byt_set_power(struct usb_phy *_otg,
+		unsigned ma)
+{
+	unsigned long flags;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	struct power_supply_cable_props cap;
+	struct intel_dwc_otg_pdata *data;
+
+	/* Just return if charger detection is not enabled */
+	if (!charger_detect_enable(otg))
+		return 0;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	/* Needn't notify charger capability if charger_detection disable */
+	if (!charger_detect_enable(otg) && !sdp_charging(otg))
+		return 0;
+	else if (otg->charging_cap.chrg_type !=
+			POWER_SUPPLY_CHARGER_TYPE_USB_SDP) {
+		otg_err(otg, "%s: currently, chrg type is not SDP!\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (ma == OTG_DEVICE_SUSPEND) {
+		spin_lock_irqsave(&otg->lock, flags);
+		cap.chrg_type = otg->charging_cap.chrg_type;
+		cap.ma = otg->charging_cap.ma;
+		cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_SUSPEND;
+		spin_unlock_irqrestore(&otg->lock, flags);
+
+		/* ma is zero mean D+/D- opened cable.
+		 * If SMIP set, then notify 500ma.
+		 * Otherwise, notify 0ma.
+		*/
+		if (!cap.ma) {
+			if (data->charging_compliance) {
+				cap.ma = 500;
+				cap.chrg_evt =
+					POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+			}
+		/* For standard SDP, if SMIP set, then ignore suspend */
+		} else if (data->charging_compliance)
+			return 0;
+		/* Stander SDP(cap.ma != 0) and SMIP not set.
+		 * Should send 0ma with SUSPEND event
+		 */
+		else
+			cap.ma = 2;
+
+		if (sdp_charging(otg))
+			atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+					USB_EVENT_ENUMERATED, &cap.ma);
+		else
+			atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+					USB_EVENT_CHARGER, &cap);
+		otg_dbg(otg, "Notify EM	CHARGER_EVENT_SUSPEND\n");
+
+		return 0;
+	} else if (ma == OTG_DEVICE_RESUME) {
+		otg_dbg(otg, "Notify EM CHARGER_EVENT_CONNECT\n");
+		dwc3_intel_byt_notify_charger_type(otg,
+				POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+		return 0;
+	}
+
+	/* For SMIP set case, only need to report 500/900ma */
+	if (data->charging_compliance) {
+		if ((ma != OTG_USB2_500MA) &&
+				(ma != OTG_USB3_900MA))
+			return 0;
+	}
+
+	/* Covert macro to integer number*/
+	switch (ma) {
+	case OTG_USB2_100MA:
+		ma = 100;
+		break;
+	case OTG_USB3_150MA:
+		ma = 150;
+		break;
+	case OTG_USB2_500MA:
+		ma = 500;
+		break;
+	case OTG_USB3_900MA:
+		ma = 900;
+		break;
+	default:
+		otg_err(otg, "Device driver set invalid SDP current value!\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&otg->lock, flags);
+	otg->charging_cap.ma = ma;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	dwc3_intel_byt_notify_charger_type(otg,
+			POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+	return 0;
+}
+
+int dwc3_intel_byt_enable_vbus(struct dwc_otg2 *otg, int enable)
+{
+	/* Return 0, as VBUS is controlled by FSA in BYT */
+	return 0;
+}
+
+static int dwc3_intel_byt_notify_charger_type(struct dwc_otg2 *otg,
+		enum power_supply_charger_event event)
+{
+	struct power_supply_cable_props cap;
+	unsigned long flags;
+
+	/* Just return if charger detection is not enabled */
+	if (!charger_detect_enable(otg) && !sdp_charging(otg))
+		return 0;
+
+	if (event > POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+		otg_err(otg,
+		"%s: Invalid power_supply_charger_event!\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((otg->charging_cap.chrg_type ==
+			POWER_SUPPLY_CHARGER_TYPE_USB_SDP) &&
+			((otg->charging_cap.ma != 100) &&
+			 (otg->charging_cap.ma != 150) &&
+			 (otg->charging_cap.ma != 500) &&
+			 (otg->charging_cap.ma != 900))) {
+		otg_err(otg, "%s: invalid SDP current!\n", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&otg->lock, flags);
+	cap.chrg_type = otg->charging_cap.chrg_type;
+	cap.ma = otg->charging_cap.ma;
+	cap.chrg_evt = event;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	if (sdp_charging(otg))
+		atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+				USB_EVENT_ENUMERATED, &cap.ma);
+	else
+		atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+				USB_EVENT_CHARGER, &cap);
+
+	return 0;
+}
+
+static enum power_supply_charger_cable_type
+			dwc3_intel_byt_get_charger_type(struct dwc_otg2 *otg)
+{
+	struct usb_phy *phy;
+	u8 val, vdat_det, chgd_serx_dm;
+	unsigned long timeout, interval;
+	enum power_supply_charger_cable_type type =
+		POWER_SUPPLY_CHARGER_TYPE_NONE;
+
+	/* No need to do charger detection if not enabled */
+	if (!charger_detect_enable(otg))
+		return POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!phy) {
+		otg_err(otg, "Get USB2 PHY failed\n");
+		return POWER_SUPPLY_CHARGER_TYPE_NONE;
+	}
+
+	/* PHY Enable:
+	 * Power on PHY
+	 */
+	enable_usb_phy(otg, true);
+
+	/* Wait 10ms (~5ms before PHY de-asserts DIR,
+	 * XXus for initial Link reg sync-up).*/
+	msleep(20);
+
+	/* DCD Enable: Change OPMODE to 01 (Non-driving),
+	 * TermSel to 0, &
+	 * XcvrSel to 01 (enable FS xcvr)
+	 */
+	usb_phy_io_write(phy, FUNCCTRL_OPMODE(1) | FUNCCTRL_XCVRSELECT(1),
+					TUSB1211_FUNC_CTRL_SET);
+
+	usb_phy_io_write(phy, FUNCCTRL_OPMODE(2) | FUNCCTRL_XCVRSELECT(2)
+					| FUNCCTRL_TERMSELECT,
+					TUSB1211_FUNC_CTRL_CLR);
+
+	/*Enable SW control*/
+	usb_phy_io_write(phy, PWCTRL_SW_CONTROL, TUSB1211_POWER_CONTROL_SET);
+
+	/* Enable IDPSRC */
+	usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+			TUSB1211_VENDOR_SPECIFIC3_SET);
+
+	/* Check DCD result, use same polling parameter */
+	timeout = jiffies + msecs_to_jiffies(DATACON_TIMEOUT);
+	interval = DATACON_INTERVAL * 1000; /* us */
+
+	/* DCD Check:
+	 * Delay 66.5 ms. (Note:
+	 * TIDP_SRC_ON + TCHGD_SERX_DEB =
+	 * 347.8us + 66.1ms).
+	 */
+	usleep_range(66500, 67000);
+
+	while (!time_after(jiffies, timeout)) {
+		/* Read DP logic level. */
+		val = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+		if (val < 0) {
+			otg_err(otg, "ULPI read error! try again\n");
+			continue;
+		}
+
+		if (!(val & VS4_CHGD_SERX_DP)) {
+			otg_info(otg, "Data contact detected!\n");
+			break;
+		}
+
+		/* Polling interval */
+		usleep_range(interval, interval + 2000);
+	}
+
+	/* Disable DP pullup (Idp_src) */
+	usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+			TUSB1211_VENDOR_SPECIFIC3_CLR);
+
+	/* SE1 Det Enable:
+	 * Read DP/DM logic level. Note: use DEBUG
+	 * because VS4 isn’t enabled in this situation.
+	 */
+	val = usb_phy_io_read(phy, TUSB1211_DEBUG);
+	if (val < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	val &= DEBUG_LINESTATE;
+
+	/* If '11': SE1 detected; goto 'Cleanup'.
+	 * Else: goto 'Pri Det Enable'.
+	 */
+	if (val == 3) {
+		type = POWER_SUPPLY_CHARGER_TYPE_SE1;
+		goto cleanup;
+	}
+
+	/* Pri Det Enable:
+	 * Enable VDPSRC.
+	 */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+	/* Wait >106.1ms (40ms for BC
+	 * Tvdpsrc_on, 66.1ms for TI CHGD_SERX_DEB).
+	 */
+	msleep(107);
+
+	/* Pri Det Check:
+	 * Check if DM > VDATREF.
+	 */
+	vdat_det = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+	if (vdat_det < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	vdat_det &= PWCTRL_VDAT_DET;
+
+	/* Check if DM<VLGC */
+	chgd_serx_dm = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+	if (chgd_serx_dm < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	chgd_serx_dm &= VS4_CHGD_SERX_DM;
+
+	/* If VDAT_DET==0 || CHGD_SERX_DM==1: SDP detected
+	 * If VDAT_DET==1 && CHGD_SERX_DM==0: CDP/DCP
+	 */
+	if (vdat_det == 0 || chgd_serx_dm == 1)
+		type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+
+	/* Disable VDPSRC. */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+	/* If SDP, goto “Cleanup”.
+	 * Else, goto “Sec Det Enable”
+	 */
+	if (type == POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+		goto cleanup;
+
+	/* Sec Det Enable:
+	 * delay 1ms.
+	 */
+	usleep_range(1000, 1500);
+
+	/* Swap DP & DM */
+	usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_CLR);
+
+	/* Enable 'VDMSRC'. */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+	/* Wait >73ms (40ms for BC Tvdmsrc_on, 33ms for TI TVDPSRC_DEB) */
+	msleep(80);
+
+	/* Sec Det Check:
+	 * Check if DP>VDATREF.
+	 */
+	val = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+	if (val < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	val &= PWCTRL_VDAT_DET;
+
+	/* If VDAT_DET==0: CDP detected.
+	 * If VDAT_DET==1: DCP detected.
+	 */
+	if (!val)
+		type = POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+	else
+		type = POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+
+	/* Disable VDMSRC. */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+	/* Swap DP & DM. */
+	usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_SET);
+
+cleanup:
+
+	/* If DCP detected, assert VDPSRC. */
+	if (type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP)
+		usb_phy_io_write(phy, PWCTRL_SW_CONTROL | PWCTRL_DP_VSRC_EN,
+				TUSB1211_POWER_CONTROL_SET);
+
+	usb_put_phy(phy);
+
+	switch (type) {
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		dwc_otg_charger_hwdet(true);
+		break;
+	default:
+		break;
+	};
+
+	return type;
+}
+
+static int dwc3_intel_byt_handle_notification(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	int state, val;
+	unsigned long flags;
+
+	if (!otg)
+		return NOTIFY_BAD;
+
+	val = *(int *)data;
+
+	spin_lock_irqsave(&otg->lock, flags);
+	switch (event) {
+	case USB_EVENT_VBUS:
+		if (val) {
+			otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+			otg->otg_events &= ~OEVT_A_DEV_SESS_END_DET_EVNT;
+		} else {
+			otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+			otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+		}
+		state = NOTIFY_OK;
+		break;
+	default:
+		otg_dbg(otg, "DWC OTG Notify unknow notify message\n");
+		state = NOTIFY_DONE;
+	}
+	dwc3_wakeup_otg_thread(otg);
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	return state;
+
+}
+
+int dwc3_intel_byt_prepare_start_host(struct dwc_otg2 *otg)
+{
+	return 0;
+}
+
+int dwc3_intel_byt_prepare_start_peripheral(struct dwc_otg2 *otg)
+{
+	enable_usb_phy(otg, true);
+	usb2phy_eye_optimization(otg);
+	disable_phy_auto_resume(otg);
+
+	return 0;
+}
+
+int dwc3_intel_byt_suspend(struct dwc_otg2 *otg)
+{
+	struct pci_dev *pci_dev;
+	pci_power_t state = PCI_D3hot;
+
+	if (!otg)
+		return 0;
+
+	pci_dev = to_pci_dev(otg->dev);
+
+	set_sus_phy(otg, 1);
+
+	if (pci_save_state(pci_dev)) {
+		otg_err(otg, "pci_save_state failed!\n");
+		return -EIO;
+	}
+
+	pci_disable_device(pci_dev);
+	pci_set_power_state(pci_dev, state);
+
+	return 0;
+}
+
+int dwc3_intel_byt_resume(struct dwc_otg2 *otg)
+{
+	struct pci_dev *pci_dev = to_pci_dev(otg->dev);
+
+	if (!otg)
+		return 0;
+
+	/* From synopsys spec 12.2.11.
+	 * Software cannot access memory-mapped I/O space
+	 * for 10ms.
+	 */
+	mdelay(10);
+
+	pci_restore_state(pci_dev);
+	if (pci_enable_device(pci_dev) < 0) {
+		otg_err(otg, "pci_enable_device failed.\n");
+		return -EIO;
+	}
+
+	set_sus_phy(otg, 0);
+
+	return 0;
+}
+
+struct dwc3_otg_hw_ops dwc3_intel_byt_otg_pdata = {
+	.mode = DWC3_DEVICE_ONLY,
+	.bus = DWC3_PCI,
+	.get_id = dwc3_intel_byt_get_id,
+	.b_idle = dwc3_intel_byt_b_idle,
+	.set_power = dwc3_intel_byt_set_power,
+	.enable_vbus = dwc3_intel_byt_enable_vbus,
+	.platform_init = dwc3_intel_byt_platform_init,
+	.get_charger_type = dwc3_intel_byt_get_charger_type,
+	.otg_notifier_handler = dwc3_intel_byt_handle_notification,
+	.prepare_start_peripheral = dwc3_intel_byt_prepare_start_peripheral,
+	.prepare_start_host = dwc3_intel_byt_prepare_start_host,
+	.notify_charger_type = dwc3_intel_byt_notify_charger_type,
+
+	.suspend = dwc3_intel_byt_suspend,
+	.resume = dwc3_intel_byt_resume,
+};
+
+static int __init dwc3_intel_byt_init(void)
+{
+	return dwc3_otg_register(&dwc3_intel_byt_otg_pdata);
+}
+module_init(dwc3_intel_byt_init);
+
+static void __exit dwc3_intel_byt_exit(void)
+{
+	dwc3_otg_unregister(&dwc3_intel_byt_otg_pdata);
+}
+module_exit(dwc3_intel_byt_exit);
+
+MODULE_AUTHOR("Wang Yu <yu.y.wang@intel.com>");
+MODULE_AUTHOR("Wu, Hao <hao.wu@intel.com>");
+MODULE_DESCRIPTION("DWC3 Intel BYT OTG Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(VERSION);
diff --git a/drivers/usb/dwc3/dwc3-intel-mrfl.c b/drivers/usb/dwc3/dwc3-intel-mrfl.c
new file mode 100644
index 0000000..058647c
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-intel-mrfl.c
@@ -0,0 +1,995 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/dwc3-intel-mid.h>
+#include <asm/intel_scu_pmic.h>
+#include "otg.h"
+
+#define VERSION "2.10a"
+
+static int otg_id = -1;
+static int reboot_flag = 1;
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off);
+static int dwc3_intel_notify_charger_type(struct dwc_otg2 *otg,
+		enum power_supply_charger_event event);
+static struct power_supply_cable_props cap_record;
+
+static int charger_detect_enable(struct dwc_otg2 *otg)
+{
+	struct intel_dwc_otg_pdata *data;
+
+	if (!otg || !otg->otg_data)
+		return 0;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	return data->charger_detect_enable;
+}
+
+static int is_basin_cove(struct dwc_otg2 *otg)
+{
+	struct intel_dwc_otg_pdata *data;
+	if (!otg || !otg->otg_data)
+		return -EINVAL;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	return data->pmic_type == BASIN_COVE;
+}
+
+static int is_hybridvp(struct dwc_otg2 *otg)
+{
+	struct intel_dwc_otg_pdata *data;
+	if (!otg || !otg->otg_data)
+		return -EINVAL;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	return data->is_hvp;
+}
+
+static void usb2phy_eye_optimization(struct dwc_otg2 *otg)
+{
+	struct usb_phy *phy;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!phy)
+		return;
+
+	/* Set 0x7f for better quality in eye diagram
+	 * It means ZHSDRV = 0b11 and IHSTX = 0b1111*/
+	usb_phy_io_write(phy, 0x7f, TUSB1211_VENDOR_SPECIFIC1_SET);
+
+	usb_put_phy(phy);
+}
+
+
+/* As we use SW mode to do charger detection, need to notify HW
+ * the result SW get, charging port or not */
+static int dwc_otg_charger_hwdet(bool enable)
+{
+	int				retval;
+	struct usb_phy *phy;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+
+	/* Just return if charger detection is not enabled */
+	if (!charger_detect_enable(otg))
+		return 0;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!phy)
+		return -ENODEV;
+
+	if (enable) {
+		retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+				TUSB1211_POWER_CONTROL_SET);
+		if (retval)
+			return retval;
+		otg_dbg(otg, "set HWDETECT\n");
+	} else {
+		retval = usb_phy_io_write(phy, PWCTRL_HWDETECT,
+				TUSB1211_POWER_CONTROL_CLR);
+		if (retval)
+			return retval;
+		otg_dbg(otg, "clear HWDETECT\n");
+	}
+	usb_put_phy(phy);
+
+	return 0;
+}
+
+static enum power_supply_charger_cable_type
+			basin_cove_aca_check(struct dwc_otg2 *otg)
+{
+	u8 rarbrc;
+	int ret;
+	enum power_supply_charger_cable_type type =
+		POWER_SUPPLY_CHARGER_TYPE_NONE;
+
+	ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+			USBIDCTRL_ACA_DETEN_D1,
+			USBIDCTRL_ACA_DETEN_D1);
+	if (ret)
+		otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+	/* Wait >66.1ms (for TCHGD_SERX_DEB) */
+	msleep(66);
+
+	/* Read decoded RID value */
+	ret = intel_scu_ipc_ioread8(PMIC_USBIDSTS, &rarbrc);
+	if (ret)
+		otg_err(otg, "Fail to read decoded RID value\n");
+	rarbrc &= USBIDSTS_ID_RARBRC_STS(3);
+	rarbrc >>= 1;
+
+	/* If ID_RARBRC_STS==01: ACA-Dock detected
+	 * If ID_RARBRC_STS==00: MHL detected
+	 */
+	if (rarbrc == 1) {
+		/* ACA-Dock */
+		type = POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+	} else if (!rarbrc) {
+		/* MHL */
+		type = POWER_SUPPLY_CHARGER_TYPE_MHL;
+	}
+
+	ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+			USBIDCTRL_ACA_DETEN_D1,
+			0);
+	if (ret)
+		otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+	return type;
+}
+
+static enum power_supply_charger_cable_type
+		dwc3_intel_aca_check(struct dwc_otg2 *otg)
+{
+	return basin_cove_aca_check(otg);
+}
+
+static ssize_t store_otg_id(struct device *_dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long flags;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+
+	if (!otg)
+		return 0;
+	if (count != 2) {
+		otg_err(otg, "return EINVAL\n");
+		return -EINVAL;
+	}
+
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;
+
+	switch (buf[0]) {
+	case 'a':
+	case 'A':
+		otg_dbg(otg, "Change ID to A\n");
+		otg->user_events |= USER_ID_A_CHANGE_EVENT;
+		spin_lock_irqsave(&otg->lock, flags);
+		dwc3_wakeup_otg_thread(otg);
+		otg_id = 0;
+		spin_unlock_irqrestore(&otg->lock, flags);
+		return count;
+	case 'b':
+	case 'B':
+		otg_dbg(otg, "Change ID to B\n");
+		otg->user_events |= USER_ID_B_CHANGE_EVENT;
+		spin_lock_irqsave(&otg->lock, flags);
+		dwc3_wakeup_otg_thread(otg);
+		otg_id = 1;
+		spin_unlock_irqrestore(&otg->lock, flags);
+		return count;
+	default:
+		otg_err(otg, "Just support change ID to A!\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t
+show_otg_id(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	char				*next;
+	unsigned			size, t;
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size,
+		"USB OTG ID: %s\n",
+		(otg_id ? "B" : "A")
+		);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static DEVICE_ATTR(otg_id, S_IRUGO|S_IWUSR|S_IWGRP,
+			show_otg_id, store_otg_id);
+
+static void set_sus_phy(struct dwc_otg2 *otg, int bit)
+{
+	u32 data = 0;
+
+	data = otg_read(otg, GUSB2PHYCFG0);
+	if (bit)
+		data |= GUSB2PHYCFG_SUS_PHY;
+	else
+		data &= ~GUSB2PHYCFG_SUS_PHY;
+
+	otg_write(otg, GUSB2PHYCFG0, data);
+
+	data = otg_read(otg, GUSB3PIPECTL0);
+	if (bit)
+		data |= GUSB3PIPECTL_SUS_EN;
+	else
+		data &= ~GUSB3PIPECTL_SUS_EN;
+	otg_write(otg, GUSB3PIPECTL0, data);
+}
+
+int dwc3_intel_platform_init(struct dwc_otg2 *otg)
+{
+	u32 gctl;
+	int retval;
+
+	otg_info(otg, "De-assert USBRST# to enable PHY\n");
+	retval = intel_scu_ipc_iowrite8(PMIC_USBPHYCTRL,
+			PMIC_USBPHYCTRL_D0);
+	if (retval)
+		otg_err(otg, "Fail to de-assert USBRST#\n");
+
+	/* Don't let phy go to suspend mode, which
+	 * will cause FS/LS devices enum failed in host mode.
+	 */
+	set_sus_phy(otg, 0);
+
+	retval = device_create_file(otg->dev, &dev_attr_otg_id);
+	if (retval < 0) {
+		otg_dbg(otg,
+			"Can't register sysfs attribute: %d\n", retval);
+		return -ENOMEM;
+	}
+
+	otg_dbg(otg, "\n");
+	otg_write(otg, OEVTEN, 0);
+	otg_write(otg, OCTL, 0);
+	gctl = otg_read(otg, GCTL);
+	gctl |= GCTL_PRT_CAP_DIR_OTG << GCTL_PRT_CAP_DIR_SHIFT;
+	otg_write(otg, GCTL, gctl);
+
+	return 0;
+}
+
+/* Disable auto-resume feature for USB2 PHY. This is one
+ * silicon workaround. It will cause fabric timeout error
+ * for LS case after resume from hibernation */
+static void disable_phy_auto_resume(struct dwc_otg2 *otg)
+{
+	u32 data = 0;
+
+	data = otg_read(otg, GUSB2PHYCFG0);
+	data &= ~GUSB2PHYCFG_ULPI_AUTO_RESUME;
+	otg_write(otg, GUSB2PHYCFG0, data);
+}
+
+/* This function will control VUSBPHY to power gate/ungate USBPHY */
+static int enable_usb_phy(struct dwc_otg2 *otg, bool on_off)
+{
+	int ret;
+
+	if (on_off) {
+		ret = intel_scu_ipc_update_register(PMIC_VLDOCNT,
+				0xff, PMIC_VLDOCNT_VUSBPHYEN);
+		if (ret)
+			otg_err(otg, "Fail to enable VBUSPHY\n");
+
+		msleep(20);
+	} else {
+		ret = intel_scu_ipc_update_register(PMIC_VLDOCNT,
+				0x00, PMIC_VLDOCNT_VUSBPHYEN);
+		if (ret)
+			otg_err(otg, "Fail to disable VBUSPHY\n");
+	}
+
+	return 0;
+}
+
+int basin_cove_get_id(struct dwc_otg2 *otg)
+{
+	int ret, id = RID_UNKNOWN;
+	u8 idsts, pmic_id;
+
+	ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+			USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0,
+			USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0);
+	if (ret)
+		otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+	mdelay(50);
+
+	ret = intel_scu_ipc_ioread8(PMIC_USBIDSTS, &idsts);
+	if (ret) {
+		otg_err(otg, "Fail to read id\n");
+		return id;
+	}
+
+	if (idsts & USBIDSTS_ID_FLOAT_STS)
+		id = RID_FLOAT;
+	else if (idsts & USBIDSTS_ID_RARBRC_STS(1))
+		id = RID_A;
+	else if (idsts & USBIDSTS_ID_RARBRC_STS(2))
+		id = RID_B;
+	else if (idsts & USBIDSTS_ID_RARBRC_STS(3))
+		id = RID_C;
+	else {
+		/* PMIC A0 reports ID_GND = 0 for RID_GND but PMIC B0 reports
+		*  ID_GND = 1 for RID_GND
+		*/
+		ret = intel_scu_ipc_ioread8(0x00, &pmic_id);
+		if (ret) {
+			otg_err(otg, "Fail to read PMIC ID register\n");
+		} else if (((pmic_id & VENDOR_ID_MASK) == BASIN_COVE_PMIC_ID) &&
+			((pmic_id & PMIC_MAJOR_REV) == PMIC_A0_MAJOR_REV)) {
+				if (idsts & USBIDSTS_ID_GND)
+					id = RID_GND;
+		} else {
+			if (!(idsts & USBIDSTS_ID_GND))
+				id = RID_GND;
+		}
+	}
+
+	ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+			USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0,
+			0);
+	if (ret)
+		otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+	return id;
+}
+
+int dwc3_intel_get_id(struct dwc_otg2 *otg)
+{
+	return basin_cove_get_id(otg);
+}
+
+int dwc3_intel_b_idle(struct dwc_otg2 *otg)
+{
+	u32 gctl, tmp;
+
+	/* Disable hibernation mode by default */
+	gctl = otg_read(otg, GCTL);
+	gctl &= ~GCTL_GBL_HIBERNATION_EN;
+	otg_write(otg, GCTL, gctl);
+
+	/* Reset ADP related registers */
+	otg_write(otg, ADPCFG, 0);
+	otg_write(otg, ADPCTL, 0);
+	otg_write(otg, ADPEVTEN, 0);
+	tmp = otg_read(otg, ADPEVT);
+	otg_write(otg, ADPEVT, tmp);
+
+	otg_write(otg, OCFG, 0);
+	otg_write(otg, OEVTEN, 0);
+	tmp = otg_read(otg, OEVT);
+	otg_write(otg, OEVT, tmp);
+	otg_write(otg, OCTL, OCTL_PERI_MODE);
+
+	/* Force config to device mode as default */
+	gctl = otg_read(otg, GCTL);
+	gctl &= ~GCTL_PRT_CAP_DIR;
+	gctl |= GCTL_PRT_CAP_DIR_DEV << GCTL_PRT_CAP_DIR_SHIFT;
+	otg_write(otg, GCTL, gctl);
+
+	if (!is_hybridvp(otg)) {
+		dwc_otg_charger_hwdet(false);
+		enable_usb_phy(otg, false);
+	}
+
+	mdelay(100);
+
+	return 0;
+}
+
+static int dwc3_intel_set_power(struct usb_phy *_otg,
+		unsigned ma)
+{
+	unsigned long flags;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	struct power_supply_cable_props cap;
+	struct intel_dwc_otg_pdata *data;
+
+	data = (struct intel_dwc_otg_pdata *)otg->otg_data;
+
+	if (otg->charging_cap.chrg_type ==
+			POWER_SUPPLY_CHARGER_TYPE_USB_CDP)
+		return 0;
+	else if (otg->charging_cap.chrg_type !=
+			POWER_SUPPLY_CHARGER_TYPE_USB_SDP) {
+		otg_err(otg, "%s: currently, chrg type is not SDP!\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (ma == OTG_DEVICE_SUSPEND) {
+		spin_lock_irqsave(&otg->lock, flags);
+		cap.chrg_type = otg->charging_cap.chrg_type;
+		cap.ma = otg->charging_cap.ma;
+		cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_SUSPEND;
+		spin_unlock_irqrestore(&otg->lock, flags);
+
+		/* mA is zero mean D+/D- opened cable.
+		 * If SMIP set, then notify 500mA.
+		 * Otherwise, notify 0mA.
+		*/
+		if (!cap.ma) {
+			if (data->charging_compliance) {
+				cap.ma = 500;
+				cap.chrg_evt =
+					POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+			}
+		/* For standard SDP, if SMIP set, then ignore suspend */
+		} else if (data->charging_compliance)
+			return 0;
+		/* Stander SDP(cap.mA != 0) and SMIP not set.
+		 * Should send 0mA with SUSPEND event
+		 */
+		else
+			cap.ma = 0;
+
+		atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+				USB_EVENT_CHARGER, &cap);
+		otg_dbg(otg, "Notify EM");
+		otg_dbg(otg, "POWER_SUPPLY_CHARGER_EVENT_SUSPEND\n");
+
+		return 0;
+	} else if (ma == OTG_DEVICE_RESUME) {
+		otg_dbg(otg, "Notify EM");
+		otg_dbg(otg, "POWER_SUPPLY_CHARGER_EVENT_CONNECT\n");
+		dwc3_intel_notify_charger_type(otg,
+				POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+		return 0;
+	}
+
+	/* For SMIP set case, only need to report 500/900mA */
+	if (data->charging_compliance) {
+		if ((ma != OTG_USB2_500MA) &&
+				(ma != OTG_USB3_900MA))
+			return 0;
+	}
+
+	/* Covert macro to integer number*/
+	switch (ma) {
+	case OTG_USB2_100MA:
+		ma = 100;
+		break;
+	case OTG_USB3_150MA:
+		ma = 150;
+		break;
+	case OTG_USB2_500MA:
+		ma = 500;
+		break;
+	case OTG_USB3_900MA:
+		ma = 900;
+		break;
+	default:
+		otg_err(otg, "Device driver set invalid SDP current value!\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&otg->lock, flags);
+	otg->charging_cap.ma = ma;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	dwc3_intel_notify_charger_type(otg,
+			POWER_SUPPLY_CHARGER_EVENT_CONNECT);
+
+	return 0;
+}
+
+int dwc3_intel_enable_vbus(struct dwc_otg2 *otg, int enable)
+{
+	if (reboot_flag) {
+		atomic_notifier_call_chain(&otg->usb2_phy.notifier,
+				USB_EVENT_VBUS, &enable);
+		reboot_flag = 0;
+	}
+	return 0;
+}
+
+static int dwc3_intel_notify_charger_type(struct dwc_otg2 *otg,
+		enum power_supply_charger_event event)
+{
+	struct power_supply_cable_props cap;
+	int ret = 0;
+	unsigned long flags;
+
+	if (!charger_detect_enable(otg) &&
+		(otg->charging_cap.chrg_type !=
+		POWER_SUPPLY_CHARGER_TYPE_USB_SDP))
+		return 0;
+
+	if (event > POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+		otg_err(otg,
+		"%s: Invalid power_supply_charger_event!\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((otg->charging_cap.chrg_type ==
+			POWER_SUPPLY_CHARGER_TYPE_USB_SDP) &&
+			((otg->charging_cap.ma != 0) &&
+			 (otg->charging_cap.ma != 100) &&
+			 (otg->charging_cap.ma != 150) &&
+			 (otg->charging_cap.ma != 500) &&
+			 (otg->charging_cap.ma != 900))) {
+		otg_err(otg, "%s: invalid SDP current!\n", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&otg->lock, flags);
+	cap.chrg_type = otg->charging_cap.chrg_type;
+	cap.ma = otg->charging_cap.ma;
+	cap.chrg_evt = event;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	atomic_notifier_call_chain(&otg->usb2_phy.notifier, USB_EVENT_CHARGER,
+			&cap);
+
+	return ret;
+}
+
+static void dwc3_phy_soft_reset(struct dwc_otg2 *otg)
+{
+	u32 val;
+
+	val = otg_read(otg, GCTL);
+	val |= GCTL_CORESOFTRESET;
+	otg_write(otg, GCTL, val);
+
+	val = otg_read(otg, GUSB3PIPECTL0);
+	val |= GUSB3PIPECTL_PHYSOFTRST;
+	otg_write(otg, GUSB3PIPECTL0, val);
+
+	val = otg_read(otg, GUSB2PHYCFG0);
+	val |= GUSB2PHYCFG_PHYSOFTRST;
+	otg_write(otg, GUSB2PHYCFG0, val);
+
+	msleep(50);
+
+	val = otg_read(otg, GUSB3PIPECTL0);
+	val &= ~GUSB3PIPECTL_PHYSOFTRST;
+	otg_write(otg, GUSB3PIPECTL0, val);
+
+	val = otg_read(otg, GUSB2PHYCFG0);
+	val &= ~GUSB2PHYCFG_PHYSOFTRST;
+	otg_write(otg, GUSB2PHYCFG0, val);
+
+	msleep(100);
+
+	val = otg_read(otg, GCTL);
+	val &= ~GCTL_CORESOFTRESET;
+	otg_write(otg, GCTL, val);
+}
+
+static enum power_supply_charger_cable_type
+			dwc3_intel_get_charger_type(struct dwc_otg2 *otg)
+{
+	int ret;
+	struct usb_phy *phy;
+	u8 val, vdat_det, chgd_serx_dm;
+	unsigned long timeout, interval;
+	enum power_supply_charger_cable_type type =
+		POWER_SUPPLY_CHARGER_TYPE_NONE;
+
+	if (!charger_detect_enable(otg))
+		return cap_record.chrg_type;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!phy) {
+		otg_err(otg, "Get USB2 PHY failed\n");
+		return POWER_SUPPLY_CHARGER_TYPE_NONE;
+	}
+
+	/* PHY Enable:
+	 * Power on PHY
+	 */
+	enable_usb_phy(otg, true);
+	dwc3_phy_soft_reset(otg);
+
+	/* Enable ACA:
+	 * Enable ACA & ID detection logic.
+	 */
+	ret = intel_scu_ipc_update_register(PMIC_USBIDCTRL,
+			USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0,
+			USBIDCTRL_ACA_DETEN_D1 | PMIC_USBPHYCTRL_D0);
+	if (ret)
+		otg_err(otg, "Fail to enable ACA&ID detection logic\n");
+
+	/* DCD Enable: Change OPMODE to 01 (Non-driving),
+	 * TermSel to 0, &
+	 * XcvrSel to 01 (enable FS xcvr)
+	 */
+	usb_phy_io_write(phy, FUNCCTRL_OPMODE(1) | FUNCCTRL_XCVRSELECT(1),
+					TUSB1211_FUNC_CTRL_SET);
+
+	usb_phy_io_write(phy, FUNCCTRL_OPMODE(2) | FUNCCTRL_XCVRSELECT(2)
+					| FUNCCTRL_TERMSELECT,
+					TUSB1211_FUNC_CTRL_CLR);
+
+	/*Enable SW control*/
+	usb_phy_io_write(phy, PWCTRL_SW_CONTROL, TUSB1211_POWER_CONTROL_SET);
+
+	/* Enable IDPSRC */
+	usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+			TUSB1211_VENDOR_SPECIFIC3_SET);
+
+	/* Check DCD result, use same polling parameter */
+	timeout = jiffies + msecs_to_jiffies(DATACON_TIMEOUT);
+	interval = DATACON_INTERVAL * 1000; /* us */
+
+	/* DCD Check:
+	 * Delay 66.5 ms. (Note:
+	 * TIDP_SRC_ON + TCHGD_SERX_DEB =
+	 * 347.8us + 66.1ms).
+	 */
+	usleep_range(66500, 67000);
+
+	while (!time_after(jiffies, timeout)) {
+		/* Read DP logic level. */
+		val = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+		if (val < 0) {
+			otg_err(otg, "ULPI read error! try again\n");
+			continue;
+		}
+
+		if (!(val & VS4_CHGD_SERX_DP)) {
+			otg_info(otg, "Data contact detected!\n");
+			break;
+		}
+
+		/* Polling interval */
+		usleep_range(interval, interval + 2000);
+	}
+
+	/* Disable DP pullup (Idp_src) */
+	usb_phy_io_write(phy, VS3_CHGD_IDP_SRC_EN,
+			TUSB1211_VENDOR_SPECIFIC3_CLR);
+
+	/* ID Check:
+	 * Check ID pin state.
+	 */
+	val = dwc3_intel_get_id(otg);
+	if (val != RID_FLOAT) {
+		type = dwc3_intel_aca_check(otg);
+		goto cleanup;
+	}
+
+	/* SE1 Det Enable:
+	 * Read DP/DM logic level. Note: use DEBUG
+	 * because VS4 isn’t enabled in this situation.
+     */
+	val = usb_phy_io_read(phy, TUSB1211_DEBUG);
+	if (val < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	val &= DEBUG_LINESTATE;
+
+	/* If '11': SE1 detected; goto 'Cleanup'.
+	 * Else: goto 'Pri Det Enable'.
+	 */
+	if (val == 3) {
+		type = POWER_SUPPLY_CHARGER_TYPE_SE1;
+		goto cleanup;
+	}
+
+	/* Pri Det Enable:
+	 * Enable VDPSRC.
+	 */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+	/* Wait >106.1ms (40ms for BC
+	 * Tvdpsrc_on, 66.1ms for TI CHGD_SERX_DEB).
+	 */
+	msleep(107);
+
+	/* Pri Det Check:
+	 * Check if DM > VDATREF.
+	 */
+	vdat_det = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+	if (vdat_det < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	vdat_det &= PWCTRL_VDAT_DET;
+
+	/* Check if DM<VLGC */
+	chgd_serx_dm = usb_phy_io_read(phy, TUSB1211_VENDOR_SPECIFIC4);
+	if (chgd_serx_dm < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	chgd_serx_dm &= VS4_CHGD_SERX_DM;
+
+	/* If VDAT_DET==0 || CHGD_SERX_DM==1: SDP detected
+	 * If VDAT_DET==1 && CHGD_SERX_DM==0: CDP/DCP
+	 */
+	if (vdat_det == 0 || chgd_serx_dm == 1)
+		type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+
+	/* Disable VDPSRC. */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+	/* If SDP, goto “Cleanup”.
+	 * Else, goto “Sec Det Enable”
+	 */
+	if (type == POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+		goto cleanup;
+
+	/* Sec Det Enable:
+	 * delay 1ms.
+	 */
+	usleep_range(1000, 1500);
+
+	/* Swap DP & DM */
+	usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_CLR);
+
+	/* Enable 'VDMSRC'. */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_SET);
+
+	/* Wait >73ms (40ms for BC Tvdmsrc_on, 33ms for TI TVDPSRC_DEB) */
+	msleep(80);
+
+	/* Sec Det Check:
+	 * Check if DP>VDATREF.
+	 */
+	val = usb_phy_io_read(phy, TUSB1211_POWER_CONTROL);
+	if (val < 0)
+		otg_err(otg, "ULPI read error!\n");
+
+	val &= PWCTRL_VDAT_DET;
+
+	/* If VDAT_DET==0: CDP detected.
+	 * If VDAT_DET==1: DCP detected.
+	 */
+	if (!val)
+		type = POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+	else
+		type = POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+
+	/* Disable VDMSRC. */
+	usb_phy_io_write(phy, PWCTRL_DP_VSRC_EN, TUSB1211_POWER_CONTROL_CLR);
+
+	/* Swap DP & DM. */
+	usb_phy_io_write(phy, VS1_DATAPOLARITY, TUSB1211_VENDOR_SPECIFIC1_SET);
+
+cleanup:
+
+	/* If DCP detected, assert VDPSRC. */
+	if (type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP)
+		usb_phy_io_write(phy, PWCTRL_SW_CONTROL | PWCTRL_DP_VSRC_EN,
+				TUSB1211_POWER_CONTROL_SET);
+
+	usb_put_phy(phy);
+
+	switch (type) {
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		dwc_otg_charger_hwdet(true);
+		break;
+	default:
+		break;
+	};
+
+	return type;
+}
+
+static int dwc3_intel_handle_notification(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	int state;
+	unsigned long flags, valid_chrg_type;
+	struct dwc_otg2 *otg = dwc3_get_otg();
+	struct power_supply_cable_props *cap;
+
+	if (!otg)
+		return NOTIFY_BAD;
+
+	valid_chrg_type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP |
+		POWER_SUPPLY_CHARGER_TYPE_USB_CDP |
+		POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+
+	spin_lock_irqsave(&otg->lock, flags);
+	switch (event) {
+	case USB_EVENT_ID:
+		otg->otg_events |= OEVT_CONN_ID_STS_CHNG_EVNT;
+		state = NOTIFY_OK;
+		break;
+	case USB_EVENT_VBUS:
+		if (*(int *)data) {
+			otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+			otg->otg_events &= ~OEVT_A_DEV_SESS_END_DET_EVNT;
+		} else {
+			otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+			otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+		}
+		state = NOTIFY_OK;
+		break;
+	case USB_EVENT_CHARGER:
+		if (charger_detect_enable(otg)) {
+			state = NOTIFY_DONE;
+			goto done;
+		}
+		cap = (struct power_supply_cable_props *)data;
+		if (!(cap->chrg_type & valid_chrg_type)) {
+			otg_err(otg, "Invalid charger type!\n");
+			state = NOTIFY_BAD;
+		}
+		if (cap->chrg_evt == POWER_SUPPLY_CHARGER_EVENT_CONNECT) {
+			otg->otg_events |= OEVT_B_DEV_SES_VLD_DET_EVNT;
+			otg->otg_events &= ~OEVT_A_DEV_SESS_END_DET_EVNT;
+
+			cap_record.chrg_type = cap->chrg_type;
+			cap_record.ma = cap->ma;
+			cap_record.chrg_evt = cap->chrg_evt;
+		} else if (cap->chrg_evt ==
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+			otg->otg_events |= OEVT_A_DEV_SESS_END_DET_EVNT;
+			otg->otg_events &= ~OEVT_B_DEV_SES_VLD_DET_EVNT;
+
+			cap_record.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+			cap_record.ma = 0;
+			cap_record.chrg_evt =
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+		}
+		state = NOTIFY_OK;
+		break;
+	default:
+		otg_dbg(otg, "DWC OTG Notify unknow notify message\n");
+		state = NOTIFY_DONE;
+	}
+
+done:
+	dwc3_wakeup_otg_thread(otg);
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	return state;
+
+}
+
+int dwc3_intel_prepare_start_host(struct dwc_otg2 *otg)
+{
+	if (!is_hybridvp(otg)) {
+		enable_usb_phy(otg, true);
+		usb2phy_eye_optimization(otg);
+		disable_phy_auto_resume(otg);
+	}
+
+	return 0;
+}
+
+int dwc3_intel_prepare_start_peripheral(struct dwc_otg2 *otg)
+{
+	if (!is_hybridvp(otg)) {
+		enable_usb_phy(otg, true);
+		usb2phy_eye_optimization(otg);
+		disable_phy_auto_resume(otg);
+	}
+
+	return 0;
+}
+
+int dwc3_intel_suspend(struct dwc_otg2 *otg)
+{
+	struct pci_dev *pci_dev;
+	pci_power_t state = PCI_D3cold;
+
+	if (!otg)
+		return 0;
+
+	pci_dev = to_pci_dev(otg->dev);
+
+	if (otg->state == DWC_STATE_B_PERIPHERAL ||
+			otg->state == DWC_STATE_A_HOST)
+		state = PCI_D3hot;
+
+	set_sus_phy(otg, 1);
+
+	if (pci_save_state(pci_dev)) {
+		otg_err(otg, "pci_save_state failed!\n");
+		return -EIO;
+	}
+
+	pci_disable_device(pci_dev);
+	pci_set_power_state(pci_dev, state);
+
+	return 0;
+}
+
+int dwc3_intel_resume(struct dwc_otg2 *otg)
+{
+	struct pci_dev *pci_dev;
+
+	if (!otg)
+		return 0;
+
+	pci_dev = to_pci_dev(otg->dev);
+
+	/* From synopsys spec 12.2.11.
+	 * Software cannot access memory-mapped I/O space
+	 * for 10ms. Delay 5 ms here should be enough. Too
+	 * long a delay causes hibernation exit failure.
+	 */
+	mdelay(5);
+
+	pci_restore_state(pci_dev);
+	if (pci_enable_device(pci_dev) < 0) {
+		otg_err(otg, "pci_enable_device failed.\n");
+		return -EIO;
+	}
+
+	set_sus_phy(otg, 0);
+
+	/* Delay 1ms waiting PHY clock debounce.
+	 * Without this debounce, will met fabric error randomly.
+	 **/
+	mdelay(1);
+
+	return 0;
+}
+
+struct dwc3_otg_hw_ops dwc3_intel_otg_pdata = {
+	.mode = DWC3_DRD,
+	.bus = DWC3_PCI,
+	.get_id = dwc3_intel_get_id,
+	.b_idle = dwc3_intel_b_idle,
+	.set_power = dwc3_intel_set_power,
+	.enable_vbus = dwc3_intel_enable_vbus,
+	.platform_init = dwc3_intel_platform_init,
+	.get_charger_type = dwc3_intel_get_charger_type,
+	.otg_notifier_handler = dwc3_intel_handle_notification,
+	.prepare_start_peripheral = dwc3_intel_prepare_start_peripheral,
+	.prepare_start_host = dwc3_intel_prepare_start_host,
+	.notify_charger_type = dwc3_intel_notify_charger_type,
+
+	.suspend = dwc3_intel_suspend,
+	.resume = dwc3_intel_resume,
+};
+
+static int __init dwc3_intel_init(void)
+{
+	return dwc3_otg_register(&dwc3_intel_otg_pdata);
+}
+module_init(dwc3_intel_init);
+
+static void __exit dwc3_intel_exit(void)
+{
+	dwc3_otg_unregister(&dwc3_intel_otg_pdata);
+}
+module_exit(dwc3_intel_exit);
+
+MODULE_AUTHOR("Wang Yu <yu.y.wang@intel.com>");
+MODULE_DESCRIPTION("DWC3 Intel OTG Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(VERSION);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 2357c4e..2a36798 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -209,6 +209,10 @@
 	{
 		PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
 				PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+		/* FIXME: move to pci_ids.h */
+	}, {
+#define PCI_DEVICE_ID_INTEL_DWC_TNG	0x119e
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DWC_TNG),
 	},
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 27040a6..8999958 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -417,6 +417,9 @@
 			if (dwc->speed != DWC3_DSTS_SUPERSPEED)
 				return -EINVAL;
 
+			if (dwc->is_ebc)
+				break;
+
 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 			if (set)
 				reg |= DWC3_DCTL_INITU1ENA;
@@ -431,6 +434,9 @@
 			if (dwc->speed != DWC3_DSTS_SUPERSPEED)
 				return -EINVAL;
 
+			if (dwc->is_ebc)
+				break;
+
 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 			if (set)
 				reg |= DWC3_DCTL_INITU2ENA;
@@ -562,9 +568,12 @@
 			 * Enable transition to U1/U2 state when
 			 * nothing is pending from application.
 			 */
-			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-			reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
-			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+			if (!dwc->is_ebc) {
+				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+				reg |= (DWC3_DCTL_ACCEPTU1ENA
+					| DWC3_DCTL_ACCEPTU2ENA);
+				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+			}
 
 			dwc->resize_fifos = true;
 			dev_dbg(dwc->dev, "resize fifos flag SET\n");
@@ -782,6 +791,9 @@
 
 	dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
 
+	if (list_empty(&ep0->request_list))
+		return;
+
 	r = next_request(&ep0->request_list);
 	ur = &r->request;
 
@@ -1030,6 +1042,25 @@
 			return;
 		}
 
+		/*
+		 * Per databook, if an XferNotready(Data) is received after
+		 * XferComplete(Data), one possible reason is host is trying
+		 * to complete data stage by moving a 0-length packet.
+		 *
+		 * REVISIT in case of other cases
+		 */
+		if (dwc->ep0_next_event == DWC3_EP0_NRDY_STATUS) {
+			u32		size = 0;
+			struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
+
+			if (dep->number == 0)
+				size = dep->endpoint.maxpacket;
+
+			dwc3_ep0_start_trans(dwc, dep->number,
+				dwc->ctrl_req_addr, size,
+				DWC3_TRBCTL_CONTROL_DATA);
+		}
+
 		break;
 
 	case DEPEVT_STATUS_CONTROL_STATUS:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0dfee61..eb7558f 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -49,10 +49,16 @@
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
 
 #include "core.h"
 #include "gadget.h"
 #include "io.h"
+#include "otg.h"
+
+static LIST_HEAD(ebc_io_ops);
 
 /**
  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
@@ -267,7 +273,7 @@
 
 	if (dwc->ep0_bounced && dep->number == 0)
 		dwc->ep0_bounced = false;
-	else
+	else if (!dep->ebc)
 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
 				req->direction);
 
@@ -339,7 +345,7 @@
 		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
 {
 	struct dwc3_ep		*dep = dwc->eps[ep];
-	u32			timeout = 500;
+	u32			timeout = 5000;
 	u32			reg;
 
 	dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
@@ -392,9 +398,13 @@
 	if (dep->number == 0 || dep->number == 1)
 		return 0;
 
-	dep->trb_pool = dma_alloc_coherent(dwc->dev,
-			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
-			&dep->trb_pool_dma, GFP_KERNEL);
+	if (dep->ebc)
+		dep->trb_pool = dep->ebc->alloc_static_trb_pool(
+				&dep->trb_pool_dma);
+	else
+		dep->trb_pool = dma_alloc_coherent(dwc->dev,
+				sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+				&dep->trb_pool_dma, GFP_KERNEL);
 	if (!dep->trb_pool) {
 		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
 				dep->name);
@@ -408,7 +418,11 @@
 {
 	struct dwc3		*dwc = dep->dwc;
 
-	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+	if (dep->ebc)
+		dep->ebc->free_static_trb_pool();
+	else
+		dma_free_coherent(dwc->dev,
+			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
 			dep->trb_pool, dep->trb_pool_dma);
 
 	dep->trb_pool = NULL;
@@ -441,35 +455,58 @@
 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
 		const struct usb_endpoint_descriptor *desc,
 		const struct usb_ss_ep_comp_descriptor *comp_desc,
-		bool ignore)
+		bool ignore, u32 cfg_action)
 {
 	struct dwc3_gadget_ep_cmd_params params;
 
 	memset(&params, 0x00, sizeof(params));
 
 	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
-		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
+		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
+		| cfg_action;
 
-	/* Burst size is only needed in SuperSpeed mode */
-	if (dwc->gadget.speed == USB_SPEED_SUPER) {
-		u32 burst = dep->endpoint.maxburst - 1;
+	if (dep->ebc) {
+		if (dwc->gadget.speed == USB_SPEED_SUPER) {
+			u32 burst = 0;
 
-		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
-	}
+			params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+		}
 
-	if (ignore)
 		params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
 
-	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
-		| DWC3_DEPCFG_XFER_NOT_READY_EN;
+		params.param1 = DWC3_DEPCFG_EBC_MODE_EN;
 
-	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
-		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
-			| DWC3_DEPCFG_STREAM_EVENT_EN;
-		dep->stream_capable = true;
+		if (dep->ebc->is_ondemand)
+			params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+		dep->stream_capable = false;
+	} else {
+		/* Burst size is only needed in SuperSpeed mode */
+		if (dwc->gadget.speed == USB_SPEED_SUPER) {
+			/* In case a function forgets to set maxburst, maxburst
+			 * may be still 0, and we shouldn't minus 1 for it.
+			 */
+			u32 burst = dep->endpoint.maxburst ?
+					dep->endpoint.maxburst - 1 : 0;
+
+			params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+		}
+
+		if (ignore)
+			params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
+
+		params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
+			| DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+		if (usb_ss_max_streams(comp_desc) &&
+				usb_endpoint_xfer_bulk(desc)) {
+			params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+				| DWC3_DEPCFG_STREAM_EVENT_EN;
+			dep->stream_capable = true;
+		}
 	}
 
-	if (usb_endpoint_xfer_isoc(desc))
+	if (usb_endpoint_xfer_isoc(desc) || usb_endpoint_is_bulk_out(desc))
 		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
 
 	/*
@@ -492,6 +529,56 @@
 		dep->interval = 1 << (desc->bInterval - 1);
 	}
 
+	if (cfg_action == DWC3_DEPCFG_ACTION_RESTORE)
+		params.param2 = dep->ep_state;
+
+	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
+			DWC3_DEPCMD_SETEPCONFIG, &params);
+}
+
+static int dwc3_gadget_update_ebc_ep_config(struct dwc3 *dwc,
+		struct dwc3_ep *dep,
+		const struct usb_endpoint_descriptor *desc,
+		const struct usb_ss_ep_comp_descriptor *comp_desc,
+		bool ignore_nrdy)
+{
+	u16	maxp;
+	struct dwc3_gadget_ep_cmd_params params;
+
+	if (!dep->ebc)
+		return -EINVAL;
+
+	memset(&params, 0x00, sizeof(params));
+
+	maxp = usb_endpoint_maxp(desc);
+
+	params.param0 =	DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
+		| DWC3_DEPCFG_MAX_PACKET_SIZE(maxp)
+		| DWC3_DEPCFG_ACTION_MODIFY;
+
+	if (dwc->gadget.speed == USB_SPEED_SUPER) {
+		u32 burst = 0;
+
+		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+	}
+	params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
+	params.param1 = DWC3_DEPCFG_EBC_MODE_EN;
+
+	if (!ignore_nrdy)
+		params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
+
+	dep->stream_capable = false;
+
+	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
+
+	if (dep->direction)
+		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+	if (desc->bInterval) {
+		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
+		dep->interval = 1 << (desc->bInterval - 1);
+	}
+
 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
 			DWC3_DEPCMD_SETEPCONFIG, &params);
 }
@@ -530,7 +617,8 @@
 			return ret;
 	}
 
-	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
+	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
+				DWC3_DEPCFG_ACTION_INIT);
 	if (ret)
 		return ret;
 
@@ -551,6 +639,9 @@
 		reg |= DWC3_DALEPENA_EP(dep->number);
 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
+		if (dep->ebc)
+			dwc->is_ebc = 1;
+
 		if (!usb_endpoint_xfer_isoc(desc))
 			return 0;
 
@@ -569,13 +660,13 @@
 	return 0;
 }
 
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, int forcerm);
 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 {
 	struct dwc3_request		*req;
 
 	if (!list_empty(&dep->req_queued)) {
-		dwc3_stop_active_transfer(dwc, dep->number);
+		dwc3_stop_active_transfer(dwc, dep->number, 1);
 
 		/* - giveback all requests to gadget driver */
 		while (!list_empty(&dep->req_queued)) {
@@ -603,8 +694,16 @@
 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
 {
 	struct dwc3		*dwc = dep->dwc;
+	struct ebc_io		*ebc = dep->ebc;
 	u32			reg;
 
+	if (ebc) {
+		dwc->is_ebc = 0;
+
+		if (ebc->is_ondemand && ebc->xfer_stop)
+			ebc->xfer_stop();
+	}
+
 	dwc3_remove_requests(dwc, dep);
 
 	/* make sure HW endpoint isn't stalled */
@@ -621,6 +720,10 @@
 	dep->type = 0;
 	dep->flags = 0;
 
+	/* set normal endpoint maxpacket to default value */
+	if (dep->number > 1)
+		dep->endpoint.maxpacket = 1024;
+
 	return 0;
 }
 
@@ -707,7 +810,8 @@
 	dep = to_dwc3_ep(ep);
 	dwc = dep->dwc;
 
-	if (!(dep->flags & DWC3_EP_ENABLED)) {
+	if (!(dep->flags & DWC3_EP_ENABLED) &&
+		dep->flags != DWC3_EP_HIBERNATION) {
 		dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
 				dep->name);
 		return 0;
@@ -758,7 +862,8 @@
  */
 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 		struct dwc3_request *req, dma_addr_t dma,
-		unsigned length, unsigned last, unsigned chain, unsigned node)
+		unsigned length, unsigned last, unsigned chain,
+		unsigned node, unsigned csp)
 {
 	struct dwc3		*dwc = dep->dwc;
 	struct dwc3_trb		*trb;
@@ -825,6 +930,12 @@
 	if (chain)
 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
 
+	if (csp) {
+		trb->ctrl |= DWC3_TRB_CTRL_CSP;
+		trb->ctrl |= DWC3_TRB_CTRL_IOC;
+	}
+
+
 	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
 		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
 
@@ -889,7 +1000,8 @@
 	}
 
 	/* The last TRB is a link TRB, not used for xfer */
-	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
+	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+		(dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1)
 		return;
 
 	list_for_each_entry_safe(req, n, &dep->request_list, list) {
@@ -924,7 +1036,7 @@
 					chain = false;
 
 				dwc3_prepare_one_trb(dep, req, dma, length,
-						last_one, chain, i);
+						last_one, chain, i, false);
 
 				if (last_one)
 					break;
@@ -933,6 +1045,8 @@
 			if (last_one)
 				break;
 		} else {
+			unsigned csp = false;
+
 			dma = req->request.dma;
 			length = req->request.length;
 			trbs_left--;
@@ -944,8 +1058,13 @@
 			if (list_is_last(&req->list, &dep->request_list))
 				last_one = 1;
 
+			/* For bulk-out ep, if req is the short packet and
+			 * not the last one, enable CSP. */
+			if (req->short_packet && !last_one)
+				csp = true;
+
 			dwc3_prepare_one_trb(dep, req, dma, length,
-					last_one, false, 0);
+					last_one, false, 0, csp);
 
 			if (last_one)
 				break;
@@ -953,6 +1072,115 @@
 	}
 }
 
+/*
+ * dwc3_prepare_ebc_trbs - setup TRBs from DvC endpoint requests
+ * @dep: endpoint for which requests are being prepared
+ * @starting: true if the endpoint is idle and no requests are queued.
+ *
+ * The functions goes through the requests list and setups TRBs for the
+ * transfers.
+ */
+static void dwc3_prepare_ebc_trbs(struct dwc3_ep *dep,
+		bool starting)
+{
+	struct dwc3_request	*req, *n;
+	struct dwc3_trb		*trb_st_hw;
+	struct dwc3_trb		*trb_link;
+	struct dwc3_trb		*trb;
+	u32			trbs_left;
+	u32			trbs_num;
+	u32			trbs_mask;
+
+	/* BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);*/
+	trbs_num = dep->ebc->static_trb_pool_size;
+	trbs_mask = trbs_num - 1;
+
+	/* the first request must not be queued */
+	trbs_left = (dep->busy_slot - dep->free_slot) & trbs_mask;
+	/*
+	 * if busy & slot are equal than it is either full or empty. If we are
+	 * starting to proceed requests then we are empty. Otherwise we ar
+	 * full and don't do anything
+	 */
+	if (!trbs_left) {
+		if (!starting)
+			return;
+		trbs_left = trbs_num;
+		dep->busy_slot = 0;
+		dep->free_slot = 0;
+	}
+
+	/* The tailed TRB is a link TRB, not used for xfer */
+	if ((trbs_left <= 1))
+		return;
+
+	list_for_each_entry_safe(req, n, &dep->request_list, list) {
+		unsigned int last_one = 0;
+		unsigned int cur_slot;
+
+		/* revisit: dont use specific TRB buffer for Debug class? */
+		trb = &dep->trb_pool[dep->free_slot & trbs_mask];
+		cur_slot = dep->free_slot;
+		dep->free_slot++;
+
+		/* Skip the LINK-TRB */
+		if (((cur_slot & trbs_mask) == trbs_num - 1))
+			continue;
+
+		dwc3_gadget_move_request_queued(req);
+		trbs_left--;
+
+		/* Is our TRB pool empty? */
+		if (!trbs_left)
+			last_one = 1;
+		/* Is this the last request? */
+		if (list_empty(&dep->request_list))
+			last_one = 1;
+
+		req->trb = trb;
+		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+		trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
+		trb->bpl = lower_32_bits(req->request.dma);
+		trb->bph = upper_32_bits(req->request.dma);
+
+		switch (usb_endpoint_type(dep->endpoint.desc)) {
+		case USB_ENDPOINT_XFER_BULK:
+			trb->ctrl = DWC3_TRBCTL_NORMAL;
+			break;
+
+		case USB_ENDPOINT_XFER_CONTROL:
+		case USB_ENDPOINT_XFER_ISOC:
+		case USB_ENDPOINT_XFER_INT:
+		default:
+			/*
+			 * This is only possible with faulty memory because we
+			 * checked it already :)
+			 */
+			BUG();
+		}
+
+		trb->ctrl |= DWC3_TRB_CTRL_HWO | DWC3_TRB_CTRL_CHN;
+
+		if (last_one) {
+			if (trbs_left >= 1) {
+				trb_st_hw = &dep->trb_pool[0];
+
+				trb_link = &dep->trb_pool[dep->free_slot &
+						trbs_mask];
+				trb_link->bpl = lower_32_bits(
+					dwc3_trb_dma_offset(dep, trb_st_hw));
+				trb_link->bph = upper_32_bits(
+					dwc3_trb_dma_offset(dep, trb_st_hw));
+				trb_link->ctrl = DWC3_TRBCTL_LINK_TRB;
+				trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
+				trb_link->size = 0;
+			}
+			break;
+		}
+	}
+}
+
 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
 		int start_new)
 {
@@ -973,8 +1201,11 @@
 	 * new requests as we try to set the IOC bit only on the last request.
 	 */
 	if (start_new) {
-		if (list_empty(&dep->req_queued))
-			dwc3_prepare_trbs(dep, start_new);
+		if (dep->ebc)
+			dwc3_prepare_ebc_trbs(dep, start_new);
+		else
+			if (list_empty(&dep->req_queued))
+				dwc3_prepare_trbs(dep, start_new);
 
 		/* req points to the first request which will be sent */
 		req = next_request(&dep->req_queued);
@@ -1011,8 +1242,9 @@
 		 * here and stop, unmap, free and del each of the linked
 		 * requests instead of what we do now.
 		 */
-		usb_gadget_unmap_request(&dwc->gadget, &req->request,
-				req->direction);
+		if (!dep->ebc)
+			usb_gadget_unmap_request(&dwc->gadget, &req->request,
+					req->direction);
 		list_del(&req->list);
 		return ret;
 	}
@@ -1025,6 +1257,26 @@
 		WARN_ON_ONCE(!dep->resource_index);
 	}
 
+	if (dep->ebc) {
+		if (dep->ebc->is_ondemand == 1) {
+			ret = dwc3_gadget_update_ebc_ep_config(dwc, dep,
+				dep->endpoint.desc, dep->comp_desc, true);
+
+			if (ret < 0) {
+				dev_dbg(dwc->dev,
+					"DEPCFG command failed on %s\n",
+					dep->name);
+				return ret;
+			}
+			dev_dbg(dwc->dev,
+				"successfully udpated DEPCFG command on %s\n",
+				dep->name);
+		}
+
+		if (dep->ebc->xfer_start)
+			dep->ebc->xfer_start();
+	}
+
 	return 0;
 }
 
@@ -1067,6 +1319,29 @@
 	req->direction		= dep->direction;
 	req->epnum		= dep->number;
 
+	/* specific handling for debug class */
+	if (dep->ebc) {
+		list_add_tail(&req->list, &dep->request_list);
+
+		if ((dep->ebc->is_ondemand == 1) &&
+			(!(dep->flags & DWC3_EP_PENDING_REQUEST))) {
+			dev_dbg(dwc->dev, "%s: delayed to kick ebc transfers\n",
+				dep->name);
+			return 0;
+		}
+
+		if (dep->flags & DWC3_EP_BUSY) {
+			dwc3_stop_active_transfer(dwc, dep->number, 1);
+			dep->flags = DWC3_EP_ENABLED;
+		}
+
+		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+		if (ret)
+			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
+					dep->name);
+		return ret;
+	}
+
 	/*
 	 * We only add to our list of requests now and
 	 * start consuming the list once we get XferNotReady
@@ -1106,7 +1381,7 @@
 		 */
 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 			if (list_empty(&dep->req_queued)) {
-				dwc3_stop_active_transfer(dwc, dep->number);
+				dwc3_stop_active_transfer(dwc, dep->number, 1);
 				dep->flags = DWC3_EP_ENABLED;
 			}
 			return 0;
@@ -1150,16 +1425,28 @@
 
 	int				ret;
 
+	spin_lock_irqsave(&dwc->lock, flags);
 	if (!dep->endpoint.desc) {
 		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
 				request, ep->name);
+		spin_unlock_irqrestore(&dwc->lock, flags);
 		return -ESHUTDOWN;
 	}
 
 	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
 			request, ep->name, request->length);
 
-	spin_lock_irqsave(&dwc->lock, flags);
+	/* pad OUT endpoint buffer to MaxPacketSize per databook requirement*/
+	req->short_packet = false;
+	if (!IS_ALIGNED(request->length, ep->desc->wMaxPacketSize)
+		&& !(dep->number & 1) && (dep->number != DWC3_EP_EBC_OUT_NB)) {
+		request->length = roundup(request->length,
+					(u32) ep->desc->wMaxPacketSize);
+		/* set flag for bulk-out short request */
+		if (usb_endpoint_is_bulk_out(dep->endpoint.desc))
+			req->short_packet = true;
+	}
+
 	ret = __dwc3_gadget_ep_queue(dep, req);
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -1192,7 +1479,7 @@
 		}
 		if (r == req) {
 			/* wait until it is processed */
-			dwc3_stop_active_transfer(dwc, dep->number);
+			dwc3_stop_active_transfer(dwc, dep->number, 1);
 			goto out1;
 		}
 		dev_err(dwc->dev, "request %p was not queued to %s\n",
@@ -1424,13 +1711,44 @@
 	return 0;
 }
 
-static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+static int __dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
 {
 	u32			reg;
 	u32			timeout = 500;
 
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-	if (is_on) {
+	if (is_on)
+		reg |= DWC3_DCTL_RUN_STOP;
+	else
+		reg &= ~DWC3_DCTL_RUN_STOP;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (is_on) {
+			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
+				break;
+		} else {
+			if (reg & DWC3_DSTS_DEVCTRLHLT)
+				break;
+		}
+		timeout--;
+		if (!timeout)
+			return -ETIMEDOUT;
+		udelay(1);
+	} while (1);
+
+	return 0;
+}
+
+static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+{
+	u32			reg;
+	u32			timeout = 500;
+	struct usb_phy		*usb_phy;
+
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+	if (is_on && !dwc->pullups_connected) {
 		if (dwc->revision <= DWC3_REVISION_187A) {
 			reg &= ~DWC3_DCTL_TRGTULST_MASK;
 			reg |= DWC3_DCTL_TRGTULST_RX_DET;
@@ -1440,10 +1758,20 @@
 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
 		reg |= DWC3_DCTL_RUN_STOP;
 		dwc->pullups_connected = true;
-	} else {
+	} else if (!is_on && dwc->pullups_connected) {
 		reg &= ~DWC3_DCTL_RUN_STOP;
 		dwc->pullups_connected = false;
-	}
+
+		/* WORKAROUND: reset PHY via FUNC_CTRL before disconnect
+		 * to avoid PHY hang
+		 */
+		usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+		if (usb_phy)
+			usb_phy_io_write(usb_phy,
+					0x6D, ULPI_FUNC_CTRL);
+		usb_put_phy(usb_phy);
+	} else
+		return 0;
 
 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 
@@ -1478,6 +1806,11 @@
 
 	is_on = !!is_on;
 
+	if (dwc->soft_connected == is_on)
+		return 0;
+
+	dwc->soft_connected = is_on;
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	ret = dwc3_gadget_run_stop(dwc, is_on);
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1494,6 +1827,7 @@
 			DWC3_DEVTEN_EVNTOVERFLOWEN |
 			DWC3_DEVTEN_CMDCMPLTEN |
 			DWC3_DEVTEN_ERRTICERREN |
+			DWC3_DEVTEN_HIBERNATIONREQEVTEN |
 			DWC3_DEVTEN_WKUPEVTEN |
 			DWC3_DEVTEN_ULSTCNGEN |
 			DWC3_DEVTEN_CONNECTDONEEN |
@@ -1512,37 +1846,12 @@
 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
 
-static int dwc3_gadget_start(struct usb_gadget *g,
-		struct usb_gadget_driver *driver)
+static int dwc3_init_for_enumeration(struct dwc3 *dwc)
 {
-	struct dwc3		*dwc = gadget_to_dwc(g);
 	struct dwc3_ep		*dep;
-	unsigned long		flags;
 	int			ret = 0;
-	int			irq;
 	u32			reg;
 
-	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
-	ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
-			IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
-	if (ret) {
-		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
-				irq, ret);
-		goto err0;
-	}
-
-	spin_lock_irqsave(&dwc->lock, flags);
-
-	if (dwc->gadget_driver) {
-		dev_err(dwc->dev, "%s is already bound to %s\n",
-				dwc->gadget.name,
-				dwc->gadget_driver->driver.name);
-		ret = -EBUSY;
-		goto err1;
-	}
-
-	dwc->gadget_driver	= driver;
-
 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
 	reg &= ~(DWC3_DCFG_SPEED_MASK);
 
@@ -1565,6 +1874,7 @@
 		reg |= dwc->maximum_speed;
 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 
+	dwc->is_ebc = 0;
 	dwc->start_config_issued = false;
 
 	/* Start with SuperSpeed Default */
@@ -1574,14 +1884,14 @@
 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
-		goto err2;
+		return ret;
 	}
 
 	dep = dwc->eps[1];
 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
-		goto err3;
+		goto err0;
 	}
 
 	/* begin to receive SETUP packets */
@@ -1590,20 +1900,74 @@
 
 	dwc3_gadget_enable_irq(dwc);
 
+	return 0;
+err0:
+	__dwc3_gadget_ep_disable(dwc->eps[0]);
+
+	return ret;
+}
+
+static int dwc3_gadget_start(struct usb_gadget *g,
+		struct usb_gadget_driver *driver)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+	unsigned long		flags;
+	int			ret = 0;
+	int			irq = 0;
+	struct usb_phy		*usb_phy;
+
+	if (dwc->is_otg) {
+		usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+		if (!usb_phy) {
+			dev_err(dwc->dev, "OTG driver not available\n");
+			ret = -ENODEV;
+			goto err0;
+		}
+
+		otg_set_peripheral(usb_phy->otg, &dwc->gadget);
+		usb_put_phy(usb_phy);
+	} else {
+		irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+		ret = request_threaded_irq(irq, dwc3_interrupt,
+				dwc3_thread_interrupt, IRQF_SHARED,
+				"dwc3", dwc);
+		if (ret) {
+			dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+					irq, ret);
+			goto err0;
+		}
+	}
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	if (dwc->gadget_driver) {
+		dev_err(dwc->dev, "%s is already bound to %s\n",
+				dwc->gadget.name,
+				dwc->gadget_driver->driver.name);
+		ret = -EBUSY;
+		goto err1;
+	}
+
+	dwc->gadget_driver	= driver;
+
+	if (!dwc->is_otg) {
+		ret = dwc3_init_for_enumeration(dwc);
+		if (ret)
+			goto err2;
+	}
+
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
 	return 0;
 
-err3:
-	__dwc3_gadget_ep_disable(dwc->eps[0]);
-
 err2:
 	dwc->gadget_driver = NULL;
 
 err1:
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
-	free_irq(irq, dwc);
+	if (!dwc->is_otg)
+		free_irq(irq, dwc);
 
 err0:
 	return ret;
@@ -1618,20 +1982,69 @@
 
 	spin_lock_irqsave(&dwc->lock, flags);
 
+#if 0
 	dwc3_gadget_disable_irq(dwc);
 	__dwc3_gadget_ep_disable(dwc->eps[0]);
 	__dwc3_gadget_ep_disable(dwc->eps[1]);
+#endif
 
 	dwc->gadget_driver	= NULL;
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
+#if 0
 	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
 	free_irq(irq, dwc);
+#endif
 
 	return 0;
 }
 
+static int __dwc3_vbus_draw(struct dwc3 *dwc, unsigned ma)
+{
+	int		ret;
+	struct usb_phy	*usb_phy;
+
+	usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (!usb_phy) {
+		dev_err(dwc->dev, "OTG driver not available\n");
+		return -ENODEV;
+	}
+
+	ret = usb_phy_set_power(usb_phy, ma);
+	usb_put_phy(usb_phy);
+
+	return ret;
+}
+
+static int dwc3_vbus_draw(struct usb_gadget *g, unsigned ma)
+{
+	unsigned	ma_otg = 0;
+	struct dwc3	*dwc = gadget_to_dwc(g);
+
+	dev_dbg(dwc->dev, "otg_set_power: %d mA\n", ma);
+
+	switch (ma) {
+	case USB3_I_MAX_OTG:
+		ma_otg = OTG_USB3_900MA;
+		break;
+	case USB3_I_UNIT_OTG:
+		ma_otg = OTG_USB3_150MA;
+		break;
+	case USB2_I_MAX_OTG:
+		ma_otg = OTG_USB2_500MA;
+		break;
+	case USB2_I_UNIT_OTG:
+		ma_otg = OTG_USB2_100MA;
+		break;
+	default:
+		dev_err(dwc->dev,
+			"wrong charging current reported: %dmA\n", ma);
+	}
+
+	return __dwc3_vbus_draw(dwc, ma_otg);
+}
+
 static const struct usb_gadget_ops dwc3_gadget_ops = {
 	.get_frame		= dwc3_gadget_get_frame,
 	.wakeup			= dwc3_gadget_wakeup,
@@ -1647,6 +2060,7 @@
 		u8 num, u32 direction)
 {
 	struct dwc3_ep			*dep;
+	struct ebc_io			*ebc, *n;
 	u8				i;
 
 	for (i = 0; i < num; i++) {
@@ -1669,6 +2083,17 @@
 		dep->endpoint.name = dep->name;
 		dep->direction = (epnum & 1);
 
+		list_for_each_entry_safe(ebc, n, &ebc_io_ops, list) {
+			if (epnum == ebc->epnum) {
+				dep->ebc = ebc;
+				if (ebc->init)
+					if (ebc->init() == -ENODEV)
+						dev_err(dwc->dev,
+						"debug class init fail %d\n",
+						epnum);
+			}
+		}
+
 		if (epnum == 0 || epnum == 1) {
 			dep->endpoint.maxpacket = 512;
 			dep->endpoint.maxburst = 1;
@@ -1872,7 +2297,7 @@
 			 */
 			dep->flags = DWC3_EP_PENDING_REQUEST;
 		} else {
-			dwc3_stop_active_transfer(dwc, dep->number);
+			dwc3_stop_active_transfer(dwc, dep->number, 1);
 			dep->flags = DWC3_EP_ENABLED;
 		}
 		return 1;
@@ -1956,8 +2381,9 @@
 		dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
 		break;
 	case DWC3_DEPEVT_XFERINPROGRESS:
-		if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
-			dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
+		if ((!usb_endpoint_xfer_isoc(dep->endpoint.desc)) &&
+			(!usb_endpoint_xfer_bulk(dep->endpoint.desc))) {
+			dev_dbg(dwc->dev, "%s is not an Isochronous/bulk endpoint\n",
 					dep->name);
 			return;
 		}
@@ -2022,7 +2448,7 @@
 	}
 }
 
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
+static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, int forcerm)
 {
 	struct dwc3_ep *dep;
 	struct dwc3_gadget_ep_cmd_params params;
@@ -2031,6 +2457,31 @@
 
 	dep = dwc->eps[epnum];
 
+	if (dep->ebc) {
+		if (dep->ebc->is_ondemand == 1) {
+			ret = dwc3_gadget_update_ebc_ep_config(dwc, dep,
+				dep->endpoint.desc, dep->comp_desc, false);
+			if (ret < 0) {
+				dev_dbg(dwc->dev,
+					"DEPCFG failed on %s\n",
+					dep->name);
+				return;
+			}
+			dev_dbg(dwc->dev,
+				"successfully udpated DEPCFG command on %s\n",
+				dep->name);
+		}
+
+		if (dep->ebc->xfer_stop)
+			dep->ebc->xfer_stop();
+		else
+			dev_dbg(dwc->dev, "%s xfer_stop() NULL\n", dep->name);
+	}
+
+	if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+		dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
+				dep->number);
+
 	if (!dep->resource_index)
 		return;
 
@@ -2054,8 +2505,10 @@
 	 */
 
 	cmd = DWC3_DEPCMD_ENDTRANSFER;
-	cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
+	cmd |= DWC3_DEPCMD_CMDIOC;
 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
+	if (forcerm)
+		cmd |= DWC3_DEPCMD_HIPRI_FORCERM;
 	memset(&params, 0, sizeof(params));
 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
 	WARN_ON_ONCE(ret);
@@ -2155,6 +2608,17 @@
 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
 }
 
+static void link_state_change_work(struct work_struct *data)
+{
+	struct dwc3 *dwc = container_of((struct delayed_work *)data,
+			struct dwc3, link_work);
+
+	if (dwc->link_state == DWC3_LINK_STATE_U3) {
+		dev_info(dwc->dev, "device suspended; notify OTG\n");
+		__dwc3_vbus_draw(dwc, OTG_DEVICE_SUSPEND);
+	}
+}
+
 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 {
 	u32			reg;
@@ -2295,22 +2759,26 @@
 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 		dwc->gadget.ep0->maxpacket = 512;
 		dwc->gadget.speed = USB_SPEED_SUPER;
+		__dwc3_vbus_draw(dwc, OTG_USB3_150MA);
 		break;
 	case DWC3_DCFG_HIGHSPEED:
 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
 		dwc->gadget.ep0->maxpacket = 64;
 		dwc->gadget.speed = USB_SPEED_HIGH;
+		__dwc3_vbus_draw(dwc, OTG_USB2_100MA);
 		break;
 	case DWC3_DCFG_FULLSPEED2:
 	case DWC3_DCFG_FULLSPEED1:
 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
 		dwc->gadget.ep0->maxpacket = 64;
 		dwc->gadget.speed = USB_SPEED_FULL;
+		__dwc3_vbus_draw(dwc, OTG_USB2_100MA);
 		break;
 	case DWC3_DCFG_LOWSPEED:
 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
 		dwc->gadget.ep0->maxpacket = 8;
 		dwc->gadget.speed = USB_SPEED_LOW;
+		__dwc3_vbus_draw(dwc, OTG_USB2_100MA);
 		break;
 	}
 
@@ -2341,16 +2809,20 @@
 	}
 
 	dep = dwc->eps[0];
-	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+	ret = dwc3_gadget_set_ep_config(dwc, dep,
+			&dwc3_gadget_ep0_desc, NULL, false,
+			DWC3_DEPCFG_ACTION_MODIFY);
 	if (ret) {
-		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+		dev_err(dwc->dev, "failed to update %s\n", dep->name);
 		return;
 	}
 
 	dep = dwc->eps[1];
-	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
+	ret = dwc3_gadget_set_ep_config(dwc, dep,
+			&dwc3_gadget_ep0_desc, NULL, false,
+			DWC3_DEPCFG_ACTION_MODIFY);
 	if (ret) {
-		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+		dev_err(dwc->dev, "failed to update %s\n", dep->name);
 		return;
 	}
 
@@ -2367,6 +2839,9 @@
 {
 	dev_vdbg(dwc->dev, "%s\n", __func__);
 
+	dev_info(dwc->dev, "device resumed; notify OTG\n");
+	__dwc3_vbus_draw(dwc, OTG_DEVICE_RESUME);
+
 	/*
 	 * TODO take core out of low power mode when that's
 	 * implemented.
@@ -2456,12 +2931,26 @@
 
 	dwc->link_state = next;
 
+	if (next ==  DWC3_LINK_STATE_U3)
+		schedule_delayed_work(
+			&dwc->link_work, msecs_to_jiffies(1000));
+
 	dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
 }
 
+static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc)
+{
+	dev_vdbg(dwc->dev, "%s\n", __func__);
+
+	if (dwc->hiber_enabled)
+		pm_runtime_put(dwc->dev);
+}
+
 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 		const struct dwc3_event_devt *event)
 {
+	u32	reg;
+
 	switch (event->type) {
 	case DWC3_DEVICE_EVENT_DISCONNECT:
 		dwc3_gadget_disconnect_interrupt(dwc);
@@ -2478,6 +2967,9 @@
 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
 		break;
+	case DWC3_DEVICE_EVENT_HIBER_REQ:
+		dwc3_gadget_hibernation_interrupt(dwc);
+		break;
 	case DWC3_DEVICE_EVENT_EOPF:
 		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
 		break;
@@ -2486,6 +2978,14 @@
 		break;
 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
 		dev_vdbg(dwc->dev, "Erratic Error\n");
+
+		/* Controller may generate too many Erratic Errors Messages,
+		 * Disable it until we find a way to recover the failure.
+		 */
+		reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+		reg &= ~DWC3_DEVTEN_ERRTICERREN;
+		dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+		dev_info(dwc->dev, "Erratic Error Event disabled\n");
 		break;
 	case DWC3_DEVICE_EVENT_CMD_CMPL:
 		dev_vdbg(dwc->dev, "Command Complete\n");
@@ -2522,6 +3022,7 @@
 	struct dwc3 *dwc = _dwc;
 	unsigned long flags;
 	irqreturn_t ret = IRQ_NONE;
+	u32 reg;
 	int i;
 
 	spin_lock_irqsave(&dwc->lock, flags);
@@ -2561,6 +3062,11 @@
 		evt->count = 0;
 		evt->flags &= ~DWC3_EVENT_PENDING;
 		ret = IRQ_HANDLED;
+
+		/* Unmask interrupt */
+		reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(i));
+		reg &= ~DWC3_GEVNTSIZ_INTMASK;
+		dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(i), reg);
 	}
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2572,6 +3078,7 @@
 {
 	struct dwc3_event_buffer *evt;
 	u32 count;
+	u32 reg;
 
 	evt = dwc->ev_buffs[buf];
 
@@ -2583,6 +3090,11 @@
 	evt->count = count;
 	evt->flags |= DWC3_EVENT_PENDING;
 
+	/* Mask interrupt */
+	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
+	reg |= DWC3_GEVNTSIZ_INTMASK;
+	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+
 	return IRQ_WAKE_THREAD;
 }
 
@@ -2593,6 +3105,15 @@
 	irqreturn_t			ret = IRQ_NONE;
 
 	spin_lock(&dwc->lock);
+	if (dwc->pm_state != PM_ACTIVE) {
+		if (dwc->pm_state == PM_SUSPENDED) {
+			dev_info(dwc->dev, "u2/u3 pmu is received\n");
+			pm_runtime_get(dwc->dev);
+			dwc->pm_state = PM_RESUMING;
+			ret = IRQ_HANDLED;
+		}
+		goto out;
+	}
 
 	for (i = 0; i < dwc->num_event_buffers; i++) {
 		irqreturn_t status;
@@ -2602,6 +3123,7 @@
 			ret = status;
 	}
 
+out:
 	spin_unlock(&dwc->lock);
 
 	return ret;
@@ -2618,6 +3140,23 @@
 	u32					reg;
 	int					ret;
 
+	dwc->scratch_array = dma_alloc_coherent(dwc->dev,
+			sizeof(*dwc->scratch_array),
+			&dwc->scratch_array_dma, GFP_KERNEL);
+	if (!dwc->scratch_array) {
+		dev_err(dwc->dev, "failed to allocate scratch_arrary\n");
+		return -ENOMEM;
+	}
+
+	dwc->scratch_buffer[0] = dma_alloc_coherent(dwc->dev,
+			DWC3_SCRATCH_BUF_SIZE,
+			&dwc->scratch_array->dma_adr[0], GFP_KERNEL);
+	if (!dwc->scratch_buffer[0]) {
+		dev_err(dwc->dev, "failed to allocate scratch_buffer\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
 			&dwc->ctrl_req_addr, GFP_KERNEL);
 	if (!dwc->ctrl_req) {
@@ -2656,6 +3195,8 @@
 	dwc->gadget.sg_supported	= true;
 	dwc->gadget.name		= "dwc3-gadget";
 
+	INIT_DELAYED_WORK(&dwc->link_work, link_state_change_work);
+
 	/*
 	 * REVISIT: Here we should clear all pending IRQs to be
 	 * sure we're starting from a well known location.
@@ -2702,6 +3243,13 @@
 			dwc->ctrl_req, dwc->ctrl_req_addr);
 
 err0:
+	dma_free_coherent(dwc->dev,
+			DWC3_SCRATCH_BUF_SIZE, dwc->scratch_buffer[0],
+			(dma_addr_t)dwc->scratch_array->dma_adr[0]);
+
+err:
+	dma_free_coherent(dwc->dev, sizeof(*dwc->scratch_array),
+			dwc->scratch_array, dwc->scratch_array_dma);
 	return ret;
 }
 
@@ -2723,6 +3271,13 @@
 
 	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
 			dwc->ctrl_req, dwc->ctrl_req_addr);
+
+	dma_free_coherent(dwc->dev,
+			DWC3_SCRATCH_BUF_SIZE, dwc->scratch_buffer[0],
+			(dma_addr_t)dwc->scratch_array->dma_adr[0]);
+
+	dma_free_coherent(dwc->dev, sizeof(*dwc->scratch_array),
+			dwc->scratch_array, dwc->scratch_array_dma);
 }
 
 int dwc3_gadget_prepare(struct dwc3 *dwc)
@@ -2783,3 +3338,359 @@
 err0:
 	return ret;
 }
+
+void dwc3_register_io_ebc(struct ebc_io *ebc)
+{
+	list_add_tail(&ebc->list, &ebc_io_ops);
+}
+
+void dwc3_unregister_io_ebc(struct ebc_io *ebc)
+{
+	list_del(&ebc->list);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static void dwc3_gadget_get_ep_state(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+	struct	dwc3_gadget_ep_cmd_params params;
+	int	ret;
+
+	dev_vdbg(dwc->dev, "%s\n", __func__);
+
+	memset(&params, 0, sizeof(params));
+	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+			DWC3_DEPCMD_GETEPSTATE, &params);
+	WARN_ON_ONCE(ret);
+
+	dep->ep_state = dwc3_readl(dwc->regs, DWC3_DEPCMDPAR2(dep->number));
+}
+
+static void dwc3_cache_hwregs(struct dwc3 *dwc)
+{
+	struct dwc3_hwregs	*regs = &dwc->hwregs;
+
+	dev_vdbg(dwc->dev, "%s\n", __func__);
+
+	regs->guctl = dwc3_readl(dwc->regs, DWC3_GUCTL);
+	regs->grxthrcfg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+	regs->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
+	regs->devten = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+	regs->gctl = dwc3_readl(dwc->regs, DWC3_GCTL);
+	regs->gusb3pipectl0 = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+	regs->gusb2phycfg0 = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+	regs->gevntadrlo = dwc3_readl(dwc->regs, DWC3_GEVNTADRLO(0));
+	regs->gevntadrhi = dwc3_readl(dwc->regs, DWC3_GEVNTADRHI(0));
+	regs->gevntsiz = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
+}
+
+static void dwc3_restore_hwregs(struct dwc3 *dwc)
+{
+	struct dwc3_hwregs	*regs = &dwc->hwregs;
+
+	dev_vdbg(dwc->dev, "%s\n", __func__);
+
+	dwc3_writel(dwc->regs, DWC3_GUCTL, regs->guctl);
+	dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, regs->grxthrcfg);
+	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), regs->gusb3pipectl0);
+	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), regs->gusb2phycfg0);
+	dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), regs->gevntadrlo);
+	dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), regs->gevntadrhi);
+	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), regs->gevntsiz);
+	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
+	dwc3_writel(dwc->regs, DWC3_DCFG, regs->dcfg);
+	dwc3_writel(dwc->regs, DWC3_DEVTEN, regs->devten);
+	dwc3_writel(dwc->regs, DWC3_GCTL, regs->gctl);
+}
+
+static int dwc3_gadget_controller_save_state(struct dwc3 *dwc)
+{
+	u32			reg;
+	u32			timeout = 1000;
+
+	dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+	reg |= DWC3_DCTL_CSS;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (!(reg & DWC3_DSTS_SSS))
+				return 0;
+
+		timeout--;
+		if (!timeout)
+			return -ETIMEDOUT;
+		udelay(1);
+	} while (1);
+
+	dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+}
+
+static int dwc3_gadget_controller_restore_state(struct dwc3 *dwc)
+{
+	u32			reg;
+	u32			timeout = 1000;
+
+	dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+	reg |= DWC3_DCTL_CRS;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (!(reg & DWC3_DSTS_RSS))
+				return 0;
+
+		timeout--;
+		if (!timeout)
+			return -ETIMEDOUT;
+		udelay(1);
+	} while (1);
+
+	dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+}
+
+void dwc3_gadget_keep_conn(struct dwc3 *dwc, int is_on)
+{
+	u32         reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+	if (is_on)
+		reg |= DWC3_DCTL_KEEP_CONNECT;
+	else
+		reg &= ~DWC3_DCTL_KEEP_CONNECT;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+}
+
+int dwc3_runtime_suspend(struct device *device)
+{
+	struct dwc3			*dwc;
+	struct platform_device		*pdev;
+	unsigned long			flags;
+	u32				epnum;
+	struct dwc3_ep			*dep;
+
+	pdev = to_platform_device(device);
+	dwc = platform_get_drvdata(pdev);
+
+	if (!dwc || !dwc->hiber_enabled)
+		return 0;
+
+	dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	if (dwc->pm_state != PM_ACTIVE) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return 0;
+	}
+
+
+	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+		dep = dwc->eps[epnum];
+		if (!(dep->flags & DWC3_EP_ENABLED))
+			continue;
+
+		dep->flags_backup = dep->flags;
+		if (dep->flags & DWC3_EP_BUSY)
+			dwc3_stop_active_transfer(dwc, epnum, 0);
+
+		dwc3_gadget_get_ep_state(dwc, dep);
+
+		dep->flags = DWC3_EP_HIBERNATION;
+	}
+
+	__dwc3_gadget_run_stop(dwc, 0);
+	dwc3_gadget_keep_conn(dwc, 1);
+
+	dwc3_cache_hwregs(dwc);
+
+	dwc3_gadget_disable_irq(dwc);
+	dwc3_event_buffers_cleanup(dwc);
+
+	dwc3_gadget_controller_save_state(dwc);
+
+	dwc->pm_state = PM_SUSPENDED;
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	schedule_delayed_work(&dwc->link_work, msecs_to_jiffies(1000));
+	dev_info(dwc->dev, "suspended\n");
+	dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+
+	return 0;
+}
+
+int dwc3_runtime_resume(struct device *device)
+{
+	struct dwc3			*dwc;
+	struct platform_device		*pdev;
+	unsigned long			flags;
+	int				ret;
+	u32				epnum;
+	u32				timeout = 500;
+	u32				reg;
+	u8				link_state;
+	struct dwc3_ep			*dep;
+
+	pdev = to_platform_device(device);
+	dwc = platform_get_drvdata(pdev);
+
+	if (!dwc || !dwc->hiber_enabled)
+		return 0;
+
+	dev_vdbg(dwc->dev, "---> %s()\n", __func__);
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	if (dwc->pm_state == PM_ACTIVE ||
+		dwc->pm_state == PM_DISCONNECTED) {
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		return 0;
+	}
+
+	dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_SET_SCRATCH_ADDR_LO,
+		dwc->scratch_array_dma & 0xffffffffU);
+
+	dwc3_gadget_controller_restore_state(dwc);
+
+	dwc3_restore_hwregs(dwc);
+
+	dep = dwc->eps[0];
+	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+	if (ret) {
+		dev_err(dwc->dev, "failed to enable %s during runtime resume\n",
+			dep->name);
+		goto err0;
+	}
+
+	dep = dwc->eps[1];
+	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
+	if (ret) {
+		dev_err(dwc->dev, "failed to enable %s during runtime resume\n",
+			dep->name);
+		goto err1;
+	}
+
+	for (epnum = 0; epnum < 2; epnum++) {
+		struct dwc3_gadget_ep_cmd_params params;
+
+		dep = dwc->eps[epnum];
+		if (dep->flags_backup & DWC3_EP_BUSY) {
+			dwc->ep0_trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+			memset(&params, 0, sizeof(params));
+			params.param0 = upper_32_bits(dwc->ep0_trb_addr);
+			params.param1 = lower_32_bits(dwc->ep0_trb_addr);
+
+			ret = dwc3_send_gadget_ep_cmd(dwc, epnum,
+					DWC3_DEPCMD_STARTTRANSFER, &params);
+			WARN_ON_ONCE(ret);
+		}
+
+		dep->flags = dep->flags_backup;
+		dep->flags_backup = 0;
+	}
+
+	__dwc3_gadget_run_stop(dwc, 1);
+	dwc3_gadget_keep_conn(dwc, 1);
+
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (!(reg & DWC3_DSTS_DCNRD))
+				break;
+
+		timeout--;
+		if (!timeout)
+			break;
+		udelay(1);
+	} while (1);
+
+	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+	link_state = DWC3_DSTS_USBLNKST(reg);
+	switch (link_state) {
+	case DWC3_LINK_STATE_U3:
+	case DWC3_LINK_STATE_RESUME:
+		dwc3_gadget_conndone_interrupt(dwc);
+
+		for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+			dep = dwc->eps[epnum];
+			if (!(dep->flags_backup & DWC3_EP_ENABLED))
+				continue;
+			if (dep->endpoint.desc)
+				dwc3_gadget_set_ep_config(dwc,
+					dep, dep->endpoint.desc, dep->comp_desc,
+					false, DWC3_DEPCFG_ACTION_RESTORE);
+
+			dwc3_gadget_set_xfer_resource(dwc, dep);
+
+			reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+			reg |= DWC3_DALEPENA_EP(epnum);
+			dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+
+			if (dep->flags_backup & DWC3_EP_STALL)
+				__dwc3_gadget_ep_set_halt(dep, 1, false);
+
+			if (dep->flags_backup & DWC3_EP_BUSY) {
+				struct dwc3_request			*req;
+				struct dwc3_gadget_ep_cmd_params	params;
+
+				req = next_request(&dep->req_queued);
+				if (!req)
+					break;
+				req->trb->ctrl |= DWC3_TRB_CTRL_HWO;
+				memset(&params, 0, sizeof(params));
+				params.param0 = upper_32_bits(req->trb_dma);
+				params.param1 = lower_32_bits(req->trb_dma);
+
+				ret = dwc3_send_gadget_ep_cmd(dwc, epnum,
+						DWC3_DEPCMD_STARTTRANSFER,
+						&params);
+				WARN_ON_ONCE(ret);
+
+			}
+
+			dep->flags = dep->flags_backup;
+			dep->flags_backup = 0;
+		}
+
+		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+		reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
+		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+		break;
+	case DWC3_LINK_STATE_RESET:
+		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+		reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
+		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+		break;
+	default:
+		/* wait for USB Reset or Connect Done event */
+		break;
+	}
+
+	dwc->pm_state = PM_ACTIVE;
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	__dwc3_vbus_draw(dwc, OTG_DEVICE_RESUME);
+	dev_info(dwc->dev, "resumed\n");
+	dev_vdbg(dwc->dev, "<--- %s()\n", __func__);
+	return 0;
+
+err1:
+	__dwc3_gadget_ep_disable(dwc->eps[0]);
+
+err0:
+	spin_unlock_irqrestore(&dwc->lock, flags);
+
+	return ret;
+}
+#else
+void dwc3_gadget_keep_conn(struct dwc3 *dwc, int is_on) {}
+#endif
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index b3f25c3..a374653 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -47,6 +47,12 @@
 #define to_dwc3_ep(ep)		(container_of(ep, struct dwc3_ep, endpoint))
 #define gadget_to_dwc(g)	(container_of(g, struct dwc3, gadget))
 
+/* max power consumption of the device from the bus */
+#define USB3_I_MAX_OTG		896
+#define USB3_I_UNIT_OTG		144
+#define USB2_I_MAX_OTG		500
+#define USB2_I_UNIT_OTG		100
+
 /* DEPCFG parameter 1 */
 #define DWC3_DEPCFG_INT_NUM(n)		((n) << 0)
 #define DWC3_DEPCFG_XFER_COMPLETE_EN	(1 << 8)
@@ -54,6 +60,7 @@
 #define DWC3_DEPCFG_XFER_NOT_READY_EN	(1 << 10)
 #define DWC3_DEPCFG_FIFO_ERROR_EN	(1 << 11)
 #define DWC3_DEPCFG_STREAM_EVENT_EN	(1 << 13)
+#define DWC3_DEPCFG_EBC_MODE_EN		(1 << 15)
 #define DWC3_DEPCFG_BINTERVAL_M1(n)	((n) << 16)
 #define DWC3_DEPCFG_STREAM_CAPABLE	(1 << 24)
 #define DWC3_DEPCFG_EP_NUMBER(n)	((n) << 25)
diff --git a/drivers/usb/dwc3/otg.c b/drivers/usb/dwc3/otg.c
new file mode 100644
index 0000000..31ee4b9
--- /dev/null
+++ b/drivers/usb/dwc3/otg.c
@@ -0,0 +1,1509 @@
+/*
+ * otg.c - Designware USB3 DRD Controller OTG driver
+ *
+ * Authors: Wang Yu <yu.y.wang@intel.com>
+ *		Synopsys inc
+ *
+ * Description:
+ *
+ * This driver is developed base on dwc_otg3.c which provided by
+ * Synopsys company. Yu removed some unused features from it,
+ * for example: HNP/SRP/ADP support. Because haven't use these features
+ * so far. And add charger detection support into the state machine.
+ * Support SDP/CDP/DCP/Micro-ACA/ACA-Dock and SE1 USB charger type.
+ *
+ * Beside that, make all hardware dependence as arguments, which need
+ * vendor to implemented by themselves. For example: VBus drive, USB ID
+ * pin value and so on.
+ *
+ * To enable this OTG driver, user have to call dwc3_otg_register API to
+ * regiter one dwc3_otg_hw_ops object which include all hardware
+ * dependent code.
+ *
+ * License:
+ * Below declaration is copy from Synopsys DWC3 SW 2.10a released README.txt.
+ *
+ * IMPORTANT:
+ *
+ * Synopsys SS USB3 Linux Driver Software and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+
+#include "otg.h"
+
+#define VERSION "2.10a"
+
+struct dwc3_otg_hw_ops *dwc3_otg_pdata;
+struct dwc_device_par *platform_par;
+
+static struct mutex lock;
+static const char driver_name[] = "dwc3_otg";
+static struct dwc_otg2 *the_transceiver;
+static void dwc_otg_remove(struct pci_dev *pdev);
+
+
+static inline struct dwc_otg2 *xceiv_to_dwc_otg2(struct usb_otg *x)
+{
+	return container_of(x, struct dwc_otg2, otg);
+}
+
+struct dwc_otg2 *dwc3_get_otg(void)
+{
+	return the_transceiver;
+}
+EXPORT_SYMBOL_GPL(dwc3_get_otg);
+
+/* Caller must hold otg->lock */
+void dwc3_wakeup_otg_thread(struct dwc_otg2 *otg)
+{
+	if (!otg->main_thread)
+		return;
+
+	otg_dbg(otg, "\n");
+	/* Tell the main thread that something has happened */
+	otg->main_wakeup_needed = 1;
+	wake_up_interruptible(&otg->main_wq);
+}
+EXPORT_SYMBOL_GPL(dwc3_wakeup_otg_thread);
+
+static int sleep_main_thread_timeout(struct dwc_otg2 *otg, int msecs)
+{
+	signed long jiffies;
+	int rc = msecs;
+
+	if (otg->state == DWC_STATE_EXIT) {
+		otg_dbg(otg, "Main thread exiting\n");
+		rc = -EINTR;
+		goto done;
+	}
+
+	if (signal_pending(current)) {
+		otg_dbg(otg, "Main thread signal pending\n");
+		rc = -EINTR;
+		goto done;
+	}
+	if (otg->main_wakeup_needed) {
+		otg_dbg(otg, "Main thread wakeup needed\n");
+		rc = msecs;
+		goto done;
+	}
+
+	jiffies = msecs_to_jiffies(msecs);
+	rc = wait_event_freezable_timeout(otg->main_wq,
+					otg->main_wakeup_needed,
+					jiffies);
+
+	if (otg->state == DWC_STATE_EXIT) {
+		otg_dbg(otg, "Main thread exiting\n");
+		rc = -EINTR;
+		goto done;
+	}
+
+	if (rc > 0)
+		rc = jiffies_to_msecs(rc);
+
+done:
+	otg->main_wakeup_needed = 0;
+	return rc;
+}
+
+static int sleep_main_thread(struct dwc_otg2 *otg)
+{
+	int rc = 0;
+
+	do {
+		rc = sleep_main_thread_timeout(otg, 5000);
+	} while (rc == 0);
+
+	return rc;
+}
+
+static void get_and_clear_events(struct dwc_otg2 *otg,
+				u32 otg_mask,
+				u32 user_mask,
+				u32 *otg_events,
+				u32 *user_events)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&otg->lock, flags);
+
+	if (otg_events) {
+		if (otg->otg_events & otg_mask) {
+			*otg_events = otg->otg_events;
+			otg->otg_events &= ~otg_mask;
+		} else
+			*otg_events = 0;
+	}
+
+	if (user_events) {
+		if (otg->user_events & user_mask) {
+			*user_events = otg->user_events;
+			otg->user_events &= ~user_mask;
+		} else
+			*user_events = 0;
+	}
+
+	spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+static int check_event(struct dwc_otg2 *otg,
+		u32 otg_mask,
+		u32 user_mask,
+		u32 *otg_events,
+		u32 *user_events)
+{
+	get_and_clear_events(otg, otg_mask, user_mask,
+			otg_events, user_events);
+
+	otg_dbg(otg, "Event occurred:");
+
+	if (otg_events && (*otg_events & otg_mask)) {
+		otg_dbg(otg, "otg_events=0x%x, otg_mask=0x%x",
+				*otg_events, otg_mask);
+		return 1;
+	}
+
+	if (user_events && (*user_events & user_mask)) {
+		otg_dbg(otg, "user_events=0x%x, user_mask=0x%x",
+				*user_events, user_mask);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int sleep_until_event(struct dwc_otg2 *otg,
+			u32 otg_mask, u32 user_mask,
+			u32 *otg_events, u32 *user_events,
+			int timeout)
+{
+	int rc = 0;
+
+	pm_runtime_mark_last_busy(otg->dev);
+	pm_runtime_put_autosuspend(otg->dev);
+	/* Wait until it occurs, or timeout, or interrupt. */
+	if (timeout) {
+		otg_dbg(otg, "Waiting for event (timeout=%d)...\n", timeout);
+		rc = sleep_main_thread_until_condition_timeout(otg,
+				check_event(otg, otg_mask,
+				user_mask, otg_events, user_events), timeout);
+	} else {
+		otg_dbg(otg, "Waiting for event (no timeout)...\n");
+		rc = sleep_main_thread_until_condition(otg,
+				check_event(otg, otg_mask,
+					user_mask, otg_events, user_events));
+	}
+	pm_runtime_get_sync(otg->dev);
+
+	/* Disable the events */
+	otg_write(otg, OEVTEN, 0);
+	otg_write(otg, ADPEVTEN, 0);
+
+	otg_dbg(otg, "Woke up rc=%d\n", rc);
+
+	return rc;
+}
+
+
+static int start_host(struct dwc_otg2 *otg)
+{
+	int ret = 0;
+	struct usb_hcd *hcd = NULL;
+
+	otg_dbg(otg, "\n");
+
+	if (!otg->otg.host) {
+		otg_err(otg, "Haven't set host yet!\n");
+		return -ENODEV;
+	}
+
+	if (dwc3_otg_pdata->prepare_start_host)
+		ret = dwc3_otg_pdata->prepare_start_host(otg);
+
+	/* Start host driver */
+	hcd = container_of(otg->otg.host, struct usb_hcd, self);
+	ret = otg->start_host(hcd);
+
+	return ret;
+}
+
+static int stop_host(struct dwc_otg2 *otg)
+{
+	int ret = -1;
+	struct usb_hcd *hcd = NULL;
+
+	otg_dbg(otg, "\n");
+
+	hcd = container_of(otg->otg.host, struct usb_hcd, self);
+	if (otg->otg.host)
+		ret = otg->stop_host(hcd);
+
+	if (dwc3_otg_pdata->after_stop_host)
+		ret = dwc3_otg_pdata->after_stop_host(otg);
+
+	return ret;
+}
+
+static void start_peripheral(struct dwc_otg2 *otg)
+{
+	struct usb_gadget *gadget;
+	int ret;
+
+	if (dwc3_otg_pdata->prepare_start_peripheral)
+		ret = dwc3_otg_pdata->prepare_start_peripheral(otg);
+
+	gadget = otg->otg.gadget;
+	if (!gadget) {
+		otg_err(otg, "Haven't set gadget yet!\n");
+		return;
+	}
+
+	otg->start_device(gadget);
+}
+
+static void stop_peripheral(struct dwc_otg2 *otg)
+{
+	struct usb_gadget *gadget = otg->otg.gadget;
+	int ret;
+
+	if (!gadget)
+		return;
+
+	otg->stop_device(gadget);
+
+	if (dwc3_otg_pdata->after_stop_peripheral)
+		ret = dwc3_otg_pdata->after_stop_peripheral(otg);
+}
+
+static int get_id(struct dwc_otg2 *otg)
+{
+	if (dwc3_otg_pdata->get_id)
+		return dwc3_otg_pdata->get_id(otg);
+	return RID_UNKNOWN;
+}
+
+static int dwc_otg_notify_charger_type(struct dwc_otg2 *otg,
+		enum power_supply_charger_event event)
+{
+	if (dwc3_otg_pdata->notify_charger_type)
+		return dwc3_otg_pdata->notify_charger_type(otg, event);
+
+	return 0;
+}
+
+static int dwc_otg_get_chrg_status(struct usb_phy *x, void *data)
+{
+	unsigned long flags;
+	struct power_supply_cable_props *cap =
+		(struct power_supply_cable_props *)data;
+	struct dwc_otg2 *otg = the_transceiver;
+
+	if (!x)
+		return -ENODEV;
+
+	if (!data)
+		return -EINVAL;
+
+	spin_lock_irqsave(&otg->lock, flags);
+	cap->chrg_type = otg->charging_cap.chrg_type;
+	cap->chrg_evt = otg->charging_cap.chrg_evt;
+	cap->ma = otg->charging_cap.ma;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	return 0;
+}
+
+static int dwc_otg_enable_vbus(struct dwc_otg2 *otg, int enable)
+{
+	if (dwc3_otg_pdata->enable_vbus)
+		return dwc3_otg_pdata->enable_vbus(otg, enable);
+
+	return -EINVAL;
+}
+
+static int is_self_powered_b_device(struct dwc_otg2 *otg)
+{
+	return get_id(otg) == RID_GND;
+}
+
+static enum dwc_otg_state do_wait_vbus_raise(struct dwc_otg2 *otg)
+{
+	int ret;
+	u32 otg_events = 0;
+	u32 user_events = 0;
+	u32 otg_mask = 0;
+	u32 user_mask = 0;
+
+	otg_mask = OEVT_B_DEV_SES_VLD_DET_EVNT;
+
+	ret = sleep_until_event(otg, otg_mask,
+			user_mask, &otg_events,
+			&user_events, VBUS_TIMEOUT);
+	if (ret < 0)
+		return DWC_STATE_EXIT;
+
+	if (otg_events & OEVT_B_DEV_SES_VLD_DET_EVNT) {
+		otg_dbg(otg, "OEVT_B_SES_VLD_EVT\n");
+		return DWC_STATE_CHARGER_DETECTION;
+	}
+
+	/* timeout*/
+	if (!ret)
+		return DWC_STATE_A_HOST;
+
+	return DWC_STATE_B_IDLE;
+}
+
+static enum dwc_otg_state do_wait_vbus_fall(struct dwc_otg2 *otg)
+{
+	int ret;
+
+	u32 otg_events = 0;
+	u32 user_events = 0;
+	u32 otg_mask = 0;
+	u32 user_mask = 0;
+
+	otg_mask = OEVT_A_DEV_SESS_END_DET_EVNT;
+
+	ret = sleep_until_event(otg, otg_mask,
+			user_mask, &otg_events,
+			&user_events, VBUS_TIMEOUT);
+	if (ret < 0)
+		return DWC_STATE_EXIT;
+
+	if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+		otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+		if (otg->charging_cap.chrg_type ==
+				POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+			dwc_otg_notify_charger_type(otg,
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+		return DWC_STATE_B_IDLE;
+	}
+
+	/* timeout*/
+	if (!ret) {
+		otg_err(otg, "Haven't get VBus drop event! Maybe something wrong\n");
+		return DWC_STATE_B_IDLE;
+	}
+
+	return DWC_STATE_INVALID;
+}
+
+static enum dwc_otg_state do_charging(struct dwc_otg2 *otg)
+{
+	int ret;
+	u32 otg_events = 0;
+	u32 user_events = 0;
+	u32 otg_mask = 0;
+	u32 user_mask = 0;
+
+	otg_mask = OEVT_A_DEV_SESS_END_DET_EVNT;
+
+	if (dwc3_otg_pdata->do_charging)
+		dwc3_otg_pdata->do_charging(otg);
+
+	ret = sleep_until_event(otg, otg_mask,
+			user_mask, &otg_events,
+			&user_events, 0);
+	if (ret < 0)
+		return DWC_STATE_EXIT;
+
+	if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+		otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+		dwc_otg_notify_charger_type(otg,
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+		return DWC_STATE_B_IDLE;
+	}
+
+	return DWC_STATE_INVALID;
+}
+
+static enum power_supply_charger_cable_type
+		get_charger_type(struct dwc_otg2 *otg)
+{
+	if (dwc3_otg_pdata->get_charger_type)
+		return dwc3_otg_pdata->get_charger_type(otg);
+
+	return POWER_SUPPLY_CHARGER_TYPE_NONE;
+}
+
+static enum dwc_otg_state do_charger_detection(struct dwc_otg2 *otg)
+{
+	enum dwc_otg_state state = DWC_STATE_INVALID;
+	enum power_supply_charger_cable_type charger =
+			POWER_SUPPLY_CHARGER_TYPE_NONE;
+	unsigned long flags, ma = 0;
+
+	charger = get_charger_type(otg);
+	switch (charger) {
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+		otg_err(otg, "Ignore micro ACA charger.\n");
+		charger = POWER_SUPPLY_CHARGER_TYPE_NONE;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+		state = DWC_STATE_B_PERIPHERAL;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+		state = DWC_STATE_A_HOST;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		state = DWC_STATE_CHARGING;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_NONE:
+	default:
+		if (is_self_powered_b_device(otg)) {
+			state = DWC_STATE_A_HOST;
+			charger = POWER_SUPPLY_CHARGER_TYPE_B_DEVICE;
+			break;
+		}
+	};
+
+	switch (charger) {
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		ma = 1500;
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+		/* Notify SDP current is 100ma before enumeration. */
+		ma = 100;
+		break;
+	default:
+		otg_err(otg, "Charger type is not valid to notify battery\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&otg->lock, flags);
+	otg->charging_cap.chrg_type = charger;
+	otg->charging_cap.ma = ma;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+	switch (charger) {
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		if (dwc_otg_notify_charger_type(otg,
+					POWER_SUPPLY_CHARGER_EVENT_CONNECT) < 0)
+			otg_err(otg, "Notify battery type failed!\n");
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+	/* SDP is complicate, it will be handle in set_power */
+	default:
+		break;
+	}
+
+	return state;
+}
+
+static enum dwc_otg_state do_connector_id_status(struct dwc_otg2 *otg)
+{
+	int ret;
+	unsigned long flags;
+	u32 events = 0, user_events = 0;
+	u32 otg_mask = 0, user_mask = 0;
+
+	otg_dbg(otg, "\n");
+	spin_lock_irqsave(&otg->lock, flags);
+	otg->charging_cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+	otg->charging_cap.ma = 0;
+	otg->charging_cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+	spin_unlock_irqrestore(&otg->lock, flags);
+
+stay_b_idle:
+	otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+			OEVT_B_DEV_SES_VLD_DET_EVNT;
+
+	user_mask = USER_ID_B_CHANGE_EVENT |
+				USER_ID_A_CHANGE_EVENT;
+
+	if (dwc3_otg_pdata->b_idle)
+		dwc3_otg_pdata->b_idle(otg);
+
+	ret = sleep_until_event(otg, otg_mask,
+			user_mask, &events,
+			&user_events, 0);
+	if (ret < 0)
+		return DWC_STATE_EXIT;
+
+	if (events & OEVT_B_DEV_SES_VLD_DET_EVNT) {
+		otg_dbg(otg, "OEVT_B_DEV_SES_VLD_DET_EVNT\n");
+		return DWC_STATE_CHARGER_DETECTION;
+	}
+
+	if (events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+		otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+
+		/* Prevent user fast plug out after plug in.
+		 * It will cause the first ID change event lost.
+		 * So need to check real ID currently.
+		 */
+		if (get_id(otg) == RID_FLOAT) {
+			otg_dbg(otg, "Stay DWC_STATE_INIT\n");
+			goto stay_b_idle;
+		}
+		return DWC_STATE_WAIT_VBUS_RAISE;
+	}
+
+	if (user_events & USER_ID_A_CHANGE_EVENT) {
+		otg_dbg(otg, "events is user id A change\n");
+		return DWC_STATE_A_HOST;
+	}
+
+	if (user_events & USER_ID_B_CHANGE_EVENT) {
+		otg_dbg(otg, "events is user id B change\n");
+		return DWC_STATE_B_PERIPHERAL;
+	}
+
+	return DWC_STATE_B_IDLE;
+}
+
+static enum dwc_otg_state do_a_host(struct dwc_otg2 *otg)
+{
+	int rc = 0;
+	u32 otg_events, user_events, otg_mask, user_mask;
+	int id = RID_UNKNOWN;
+	unsigned long flags;
+
+	if (otg->charging_cap.chrg_type !=
+			POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK) {
+		dwc_otg_enable_vbus(otg, 1);
+
+		/* meant receive vbus valid event*/
+		if (do_wait_vbus_raise(otg) == DWC_STATE_A_HOST)
+			otg_err(otg, "Drive VBUS maybe fail!\n");
+	}
+
+	rc = start_host(otg);
+	if (rc < 0) {
+		stop_host(otg);
+		otg_err(otg, "start_host failed!");
+		return DWC_STATE_INVALID;
+	}
+
+stay_host:
+	otg_events = 0;
+	user_events = 0;
+
+	user_mask = USER_A_BUS_DROP |
+				USER_ID_B_CHANGE_EVENT;
+	otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+			OEVT_A_DEV_SESS_END_DET_EVNT;
+
+	rc = sleep_until_event(otg,
+			otg_mask, user_mask,
+			&otg_events, &user_events, 0);
+	if (rc < 0) {
+		stop_host(otg);
+		return DWC_STATE_EXIT;
+	}
+
+	/* Higher priority first */
+	if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+		otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+
+		/* ACA-Dock plug out */
+		if (otg->charging_cap.chrg_type ==
+				POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+			dwc_otg_notify_charger_type(otg,
+					POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+		else
+			dwc_otg_enable_vbus(otg, 0);
+
+		stop_host(otg);
+		return DWC_STATE_B_IDLE;
+	}
+
+	if (user_events & USER_A_BUS_DROP) {
+		/* Due to big consume by DUT, even ACA-Dock connected,
+		 * the battery capability still maybe decrease. For this
+		 * case, still save host mode. Because DUT haven't drive VBus.*/
+		if (otg->charging_cap.chrg_type ==
+				POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK)
+			goto stay_host;
+
+		dwc_otg_enable_vbus(otg, 0);
+		stop_host(otg);
+		return DWC_STATE_B_IDLE;
+	}
+
+	if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+		otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+		id = get_id(otg);
+
+		/* Plug out ACA_DOCK/USB device */
+		if (id == RID_FLOAT) {
+			if (otg->charging_cap.chrg_type ==
+					POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK) {
+				/* ACA_DOCK plug out, receive
+				 * id change prior to vBus change
+				 */
+				stop_host(otg);
+			} else {
+				/* Normal USB device plug out */
+				spin_lock_irqsave(&otg->lock, flags);
+				otg->charging_cap.chrg_type =
+					POWER_SUPPLY_CHARGER_TYPE_NONE;
+				spin_unlock_irqrestore(&otg->lock, flags);
+
+				stop_host(otg);
+				dwc_otg_enable_vbus(otg, 0);
+			}
+		} else if (id == RID_GND || id == RID_A) {
+			otg_dbg(otg, "Stay DWC_STATE_A_HOST!!\n");
+			/* Prevent user fast plug in after plug out.
+			 * It will cause the first ID change event lost.
+			 * So need to check real ID currently.
+			 */
+			goto stay_host;
+		} else {
+			otg_err(otg, "Meet invalid charger cases!");
+			spin_lock_irqsave(&otg->lock, flags);
+			otg->charging_cap.chrg_type =
+				POWER_SUPPLY_CHARGER_TYPE_NONE;
+			spin_unlock_irqrestore(&otg->lock, flags);
+
+			stop_host(otg);
+		}
+		return DWC_STATE_WAIT_VBUS_FALL;
+	}
+
+	/* Higher priority first */
+	if (user_events & USER_ID_B_CHANGE_EVENT) {
+		otg_dbg(otg, "USER_ID_B_CHANGE_EVENT\n");
+		stop_host(otg);
+		otg->user_events |= USER_ID_B_CHANGE_EVENT;
+		return DWC_STATE_B_IDLE;
+	}
+
+	/* Invalid state */
+	return DWC_STATE_INVALID;
+}
+
+static int do_b_peripheral(struct dwc_otg2 *otg)
+{
+	int rc = 0;
+	u32 otg_mask, user_mask, otg_events, user_events;
+
+	otg_mask = 0;
+	user_mask = 0;
+	otg_events = 0;
+	user_events = 0;
+
+	otg_mask = OEVT_A_DEV_SESS_END_DET_EVNT;
+	user_mask = USER_ID_A_CHANGE_EVENT;
+
+	rc = sleep_until_event(otg,
+			otg_mask, user_mask,
+			&otg_events, &user_events, 0);
+	if (rc < 0)
+		return DWC_STATE_EXIT;
+
+	if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+		otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+		dwc_otg_notify_charger_type(otg,
+				POWER_SUPPLY_CHARGER_EVENT_DISCONNECT);
+		return DWC_STATE_B_IDLE;
+	}
+
+	if (user_events & USER_ID_A_CHANGE_EVENT) {
+		otg_dbg(otg, "USER_ID_A_CHANGE_EVENT\n");
+		otg->user_events |= USER_ID_A_CHANGE_EVENT;
+		return DWC_STATE_B_IDLE;
+	}
+
+	return DWC_STATE_INVALID;
+}
+
+/* Charger driver may send ID change and VBus change event to OTG driver.
+ * This is like IRQ handler, just the event source is from charger driver.
+ * Because on Merrifield platform, the ID line and VBus line are connect to
+ * PMic which can make USB controller and PHY power off to save power.
+ */
+static int dwc_otg_handle_notification(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	if (dwc3_otg_pdata->otg_notifier_handler)
+		return dwc3_otg_pdata->otg_notifier_handler(nb, event, data);
+
+	return NOTIFY_DONE;
+}
+
+int otg_main_thread(void *data)
+{
+	struct dwc_otg2 *otg = (struct dwc_otg2 *)data;
+
+	/* Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1. */
+	allow_signal(SIGINT);
+	allow_signal(SIGTERM);
+	allow_signal(SIGKILL);
+	allow_signal(SIGUSR1);
+
+	pm_runtime_get_sync(otg->dev);
+
+	/* Allow the thread to be frozen */
+	set_freezable();
+
+	otg_dbg(otg, "Thread running\n");
+	while (otg->state != DWC_STATE_TERMINATED) {
+		int next = DWC_STATE_B_IDLE;
+		otg_dbg(otg, "\n\n\nMain thread entering state\n");
+
+		switch (otg->state) {
+		case DWC_STATE_B_IDLE:
+			otg_dbg(otg, "DWC_STATE_B_IDLE\n");
+			next = do_connector_id_status(otg);
+			break;
+		case DWC_STATE_CHARGER_DETECTION:
+			otg_dbg(otg, "DWC_STATE_CHARGER_DETECTION\n");
+			next = do_charger_detection(otg);
+			break;
+		case DWC_STATE_WAIT_VBUS_RAISE:
+			otg_dbg(otg, "DWC_STATE_WAIT_VBUS_RAISE\n");
+			next = do_wait_vbus_raise(otg);
+			break;
+		case DWC_STATE_WAIT_VBUS_FALL:
+			otg_dbg(otg, "DWC_STATE_WAIT_VBUS_FALL\n");
+			next = do_wait_vbus_fall(otg);
+			break;
+		case DWC_STATE_CHARGING:
+			otg_dbg(otg, "DWC_STATE_CHARGING\n");
+			next = do_charging(otg);
+			break;
+		case DWC_STATE_A_HOST:
+			otg_dbg(otg, "DWC_STATE_A_HOST\n");
+			next = do_a_host(otg);
+			break;
+		case DWC_STATE_B_PERIPHERAL:
+			otg_dbg(otg, "DWC_STATE_B_PERIPHERAL\n");
+			start_peripheral(otg);
+			next = do_b_peripheral(otg);
+
+			stop_peripheral(otg);
+			break;
+		case DWC_STATE_EXIT:
+			otg_dbg(otg, "DWC_STATE_EXIT\n");
+			next = DWC_STATE_TERMINATED;
+			break;
+		case DWC_STATE_INVALID:
+			otg_dbg(otg, "DWC_STATE_INVALID!!!\n");
+		default:
+			otg_dbg(otg, "Unknown State %d, sleeping...\n",
+					otg->state);
+			sleep_main_thread(otg);
+			break;
+		}
+
+		otg->prev = otg->state;
+		otg->state = next;
+	}
+
+	pm_runtime_mark_last_busy(otg->dev);
+	pm_runtime_put_autosuspend(otg->dev);
+	otg->main_thread = NULL;
+	otg_dbg(otg, "OTG main thread exiting....\n");
+
+	return 0;
+}
+
+static void start_main_thread(struct dwc_otg2 *otg)
+{
+	enum dwc3_otg_mode mode = dwc3_otg_pdata->mode;
+	bool children_ready = false;
+
+	mutex_lock(&lock);
+
+	if ((mode == DWC3_DEVICE_ONLY) &&
+			otg->otg.gadget)
+		children_ready = true;
+
+	if ((mode == DWC3_HOST_ONLY) &&
+			otg->otg.host)
+		children_ready = true;
+
+	if ((mode == DWC3_DRD) &&
+			otg->otg.host && otg->otg.gadget)
+		children_ready = true;
+
+	if (!otg->main_thread && children_ready) {
+		otg_dbg(otg, "Starting OTG main thread\n");
+		otg->main_thread = kthread_create(otg_main_thread, otg, "otg");
+		wake_up_process(otg->main_thread);
+	}
+	mutex_unlock(&lock);
+}
+
+static void stop_main_thread(struct dwc_otg2 *otg)
+{
+	mutex_lock(&lock);
+	if (otg->main_thread) {
+		otg_dbg(otg, "Stopping OTG main thread\n");
+		otg->state = DWC_STATE_EXIT;
+		dwc3_wakeup_otg_thread(otg);
+	}
+	mutex_unlock(&lock);
+}
+
+static int dwc_otg2_set_peripheral(struct usb_otg *x,
+		struct usb_gadget *gadget)
+{
+	struct dwc_otg2 *otg;
+
+	if (!x) {
+		otg_err(otg, "otg is NULL!\n");
+		return -ENODEV;
+	}
+
+	otg = xceiv_to_dwc_otg2(x);
+	otg_dbg(otg, "\n");
+
+	if (!gadget) {
+		otg->otg.gadget = NULL;
+		stop_main_thread(otg);
+		return -ENODEV;
+	}
+
+	otg->otg.gadget = gadget;
+	otg->usb2_phy.state = OTG_STATE_B_IDLE;
+	start_main_thread(otg);
+	return 0;
+}
+
+static int dwc_otg2_set_host(struct usb_otg *x, struct usb_bus *host)
+{
+	struct dwc_otg2 *otg;
+
+	if (!x) {
+		otg_dbg(otg, "otg is NULL!\n");
+		return -ENODEV;
+	}
+
+	otg = xceiv_to_dwc_otg2(x);
+	otg_dbg(otg, "\n");
+
+	if (!host) {
+		otg->otg.host = NULL;
+		stop_main_thread(otg);
+		return -ENODEV;
+	}
+
+	otg->otg.host = host;
+	start_main_thread(otg);
+	return 0;
+}
+
+static int ulpi_read(struct usb_phy *phy, u32 reg)
+{
+	struct dwc_otg2 *otg = container_of(phy, struct dwc_otg2, usb2_phy);
+	u32 val32 = 0, count = 200;
+	u8 val, tmp;
+
+	reg &= 0xFF;
+
+	while (count) {
+		if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSBSY)
+			udelay(5);
+		else
+			break;
+
+		count--;
+	}
+
+	if (!count) {
+		otg_err(otg, "USB2 PHY always busy!!\n");
+		return -EBUSY;
+	}
+
+	count = 200;
+	/* Determine if use extend registers access */
+	if (reg & EXTEND_ULPI_REGISTER_ACCESS_MASK) {
+		otg_dbg(otg, "Access extend registers 0x%x\n", reg);
+		val32 = GUSB2PHYACC0_NEWREGREQ
+			| GUSB2PHYACC0_REGADDR(ULPI_ACCESS_EXTENDED)
+			| GUSB2PHYACC0_VCTRL(reg);
+	} else {
+		otg_dbg(otg, "Access normal registers 0x%x\n", reg);
+		val32 = GUSB2PHYACC0_NEWREGREQ | GUSB2PHYACC0_REGADDR(reg)
+			| GUSB2PHYACC0_VCTRL(0x00);
+	}
+	otg_write(otg, GUSB2PHYACC0, val32);
+
+	while (count) {
+		if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSDONE) {
+			val = otg_read(otg, GUSB2PHYACC0) &
+				  GUSB2PHYACC0_REGDATA_MASK;
+			otg_dbg(otg, "%s - reg 0x%x data 0x%x\n",
+					__func__, reg, val);
+			goto cleanup;
+		}
+
+		count--;
+	}
+
+	otg_err(otg, "%s read PHY data failed.\n", __func__);
+
+	return -ETIMEDOUT;
+
+cleanup:
+	/* Clear GUSB2PHYACC0[16:21] before return.
+	 * Otherwise, it will cause PHY can't in workable
+	 * state. This is one dwc3 controller silicon bug. */
+	tmp = otg_read(otg, GUSB2PHYACC0);
+	otg_write(otg, GUSB2PHYACC0, tmp &
+			~GUSB2PHYACC0_REGADDR(0x3F));
+	return val;
+}
+
+static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
+{
+	struct dwc_otg2 *otg = container_of(phy, struct dwc_otg2, usb2_phy);
+	u32 val32 = 0, count = 200;
+	u8 tmp;
+
+	val &= 0xFF;
+	reg &= 0xFF;
+
+	while (count) {
+		if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSBSY)
+			udelay(5);
+		else
+			break;
+
+		count--;
+	}
+
+	if (!count) {
+		otg_err(otg, "USB2 PHY always busy!!\n");
+		return -EBUSY;
+	}
+
+	count = 200;
+
+	if (reg & EXTEND_ULPI_REGISTER_ACCESS_MASK) {
+		otg_dbg(otg, "Access extend registers 0x%x\n", reg);
+		val32 = GUSB2PHYACC0_NEWREGREQ
+			| GUSB2PHYACC0_REGADDR(ULPI_ACCESS_EXTENDED)
+			| GUSB2PHYACC0_VCTRL(reg)
+			| GUSB2PHYACC0_REGWR | GUSB2PHYACC0_REGDATA(val);
+	} else {
+		otg_dbg(otg, "Access normal registers 0x%x\n", reg);
+		val32 = GUSB2PHYACC0_NEWREGREQ
+			| GUSB2PHYACC0_REGADDR(reg)
+			| GUSB2PHYACC0_REGWR
+			| GUSB2PHYACC0_REGDATA(val);
+	}
+	otg_write(otg, GUSB2PHYACC0, val32);
+
+	while (count) {
+		if (otg_read(otg, GUSB2PHYACC0) & GUSB2PHYACC0_VSTSDONE) {
+			otg_dbg(otg, "%s - reg 0x%x data 0x%x write done\n",
+					__func__, reg, val);
+			goto cleanup;
+		}
+
+		count--;
+	}
+
+	otg_err(otg, "%s read PHY data failed.\n", __func__);
+
+	return -ETIMEDOUT;
+
+cleanup:
+	/* Clear GUSB2PHYACC0[16:21] before return.
+	 * Otherwise, it will cause PHY can't in workable
+	 * state. This is one dwc3 controller silicon bug. */
+	tmp = otg_read(otg, GUSB2PHYACC0);
+	otg_write(otg, GUSB2PHYACC0, tmp &
+			~GUSB2PHYACC0_REGADDR(0x3F));
+	return 0;
+}
+
+static struct usb_phy_io_ops dwc_otg_io_ops = {
+	.read = ulpi_read,
+	.write = ulpi_write,
+};
+
+static struct dwc_otg2 *dwc3_otg_alloc(struct device *dev)
+{
+	struct dwc_otg2 *otg = NULL;
+	struct usb_phy *usb_phy;
+	int retval;
+
+	otg = kzalloc(sizeof(*otg), GFP_KERNEL);
+	if (!otg) {
+		otg_err(otg, "Alloc otg failed\n");
+		return NULL;
+	}
+
+	the_transceiver = otg;
+	otg->otg_data = dev->platform_data;
+
+	usb_phy = &otg->usb2_phy;
+	otg->otg.phy = usb_phy;
+	otg->usb2_phy.otg = &otg->otg;
+
+	otg->dev		= dev;
+	otg->usb3_phy.dev		= otg->dev;
+	otg->usb3_phy.label		= "dwc-usb3-phy";
+	otg->usb3_phy.state		= OTG_STATE_UNDEFINED;
+	otg->usb3_phy.otg	= &otg->otg;
+	otg->usb2_phy.dev		= otg->dev;
+	otg->usb2_phy.label		= "dwc-usb2-phy";
+	otg->usb2_phy.state		= OTG_STATE_UNDEFINED;
+	otg->usb2_phy.set_power	= dwc3_otg_pdata->set_power;
+	otg->usb2_phy.get_chrg_status	= dwc_otg_get_chrg_status;
+	otg->usb2_phy.io_ops = &dwc_otg_io_ops;
+	otg->usb2_phy.otg	= &otg->otg;
+	otg->otg.set_host	= dwc_otg2_set_host;
+	otg->otg.set_peripheral	= dwc_otg2_set_peripheral;
+	ATOMIC_INIT_NOTIFIER_HEAD(&otg->usb2_phy.notifier);
+	ATOMIC_INIT_NOTIFIER_HEAD(&otg->usb3_phy.notifier);
+
+	otg->state = DWC_STATE_B_IDLE;
+	spin_lock_init(&otg->lock);
+	init_waitqueue_head(&otg->main_wq);
+
+	/* Register otg notifier to monitor ID and VBus change events */
+	otg->nb.notifier_call = dwc_otg_handle_notification;
+	usb_register_notifier(&otg->usb2_phy, &otg->nb);
+
+	otg_dbg(otg, "Version: %s\n", VERSION);
+	retval = usb_add_phy(&otg->usb2_phy, USB_PHY_TYPE_USB2);
+	if (retval) {
+		otg_err(otg, "can't register transceiver, err: %d\n",
+			retval);
+		goto err1;
+	}
+
+	retval = usb_add_phy(&otg->usb3_phy, USB_PHY_TYPE_USB3);
+	if (retval) {
+		otg_err(otg, "can't register transceiver, err: %d\n",
+			retval);
+		goto err2;
+	}
+
+	return otg;
+
+err2:
+	usb_remove_phy(&otg->usb2_phy);
+
+err1:
+	kfree(otg);
+	otg = NULL;
+
+	return otg;
+}
+
+static int dwc3_otg_create_children(struct dwc_otg2 *otg,
+		struct resource *res, int num)
+{
+	struct platform_device *dwc_host, *dwc_gadget;
+	enum dwc3_otg_mode mode = dwc3_otg_pdata->mode;
+	int retval = 0, i;
+
+	if (!otg || !res)
+		return -EINVAL;
+
+	if (num != 2)
+		return -EINVAL;
+
+	dwc_host = dwc_gadget = NULL;
+
+	for (i = 0; i < 2; i++) {
+		if (res[i].flags == IORESOURCE_MEM) {
+			otg->usb2_phy.io_priv = ioremap_nocache(
+				res[i].start, res[i].end - res[i].start);
+			if (!otg->usb2_phy.io_priv) {
+				otg_err(otg, "dwc3 otg ioremap failed\n");
+				return -ENOMEM;
+			}
+			break;
+		}
+	}
+
+	/* resource have no mem io resource */
+	if (!otg->usb2_phy.io_priv)
+		return -EINVAL;
+
+	platform_par = kzalloc(sizeof(*platform_par), GFP_KERNEL);
+	if (!platform_par) {
+		otg_err(otg, "alloc dwc_device_par failed\n");
+		goto err1;
+	}
+
+	platform_par->io_addr = otg->usb2_phy.io_priv;
+	platform_par->len = res[i].end - res[i].start;
+
+	if (mode == DWC3_DEVICE_ONLY)
+		goto device_only;
+
+	dwc_host = platform_device_alloc(DWC3_HOST_NAME,
+			HOST_DEVID);
+	if (!dwc_host) {
+		otg_err(otg, "couldn't allocate dwc3 host device\n");
+		goto err2;
+	}
+
+	retval = platform_device_add_resources(dwc_host, res, num);
+	if (retval) {
+		otg_err(otg, "couldn't add resources to dwc3 device\n");
+		goto err3;
+	}
+
+	platform_device_add_data(dwc_host, platform_par,
+			sizeof(struct dwc_device_par));
+
+	dwc_host->dev.dma_mask = otg->dev->dma_mask;
+	dwc_host->dev.dma_parms = otg->dev->dma_parms;
+	dwc_host->dev.parent = otg->dev;
+
+	retval = platform_device_add(dwc_host);
+	if (retval)	{
+		otg_err(otg, "failed to register dwc3 host\n");
+		goto err1;
+	}
+
+	otg->host = dwc_host;
+
+	if (mode != DWC3_DRD)
+		return 0;
+
+device_only:
+	dwc_gadget = platform_device_alloc(DWC3_DEVICE_NAME,
+			GADGET_DEVID);
+	if (!dwc_gadget) {
+		otg_err(otg, "couldn't allocate dwc3 device\n");
+		goto err3;
+	}
+
+	retval = platform_device_add_resources(dwc_gadget,
+				res, num);
+	if (retval) {
+		otg_err(otg, "couldn't add resources to dwc3 device\n");
+		goto err3;
+	}
+
+	dwc_gadget->dev.dma_mask = otg->dev->dma_mask;
+	dwc_gadget->dev.dma_parms = otg->dev->dma_parms;
+	dwc_gadget->dev.parent = otg->dev;
+
+	platform_device_add_data(dwc_gadget, platform_par,
+			sizeof(struct dwc_device_par));
+	retval = platform_device_add(dwc_gadget);
+	if (retval) {
+		otg_err(otg, "failed to register dwc3 gadget\n");
+		goto err3;
+	}
+	otg->gadget = dwc_gadget;
+
+	return 0;
+
+err3:
+	if (mode == DWC3_DRD)
+		platform_device_unregister(dwc_host);
+
+err2:
+	kfree(platform_par);
+
+err1:
+	iounmap(otg->usb2_phy.io_priv);
+
+	return retval;
+}
+
+#ifdef CONFIG_PCI
+
+static int dwc_otg_probe(struct pci_dev *pdev,
+			const struct pci_device_id *id)
+{
+	int retval = 0;
+	struct resource		res[2];
+	struct dwc_otg2 *otg = NULL;
+	unsigned long resource, len;
+
+	if (!dwc3_otg_pdata)
+		return -ENODEV;
+
+	if (pci_enable_device(pdev) < 0) {
+		dev_err(&pdev->dev, "pci device enable failed\n");
+		return -ENODEV;
+	}
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_set_master(pdev);
+
+	otg = dwc3_otg_alloc(&pdev->dev);
+	if (!otg) {
+		otg_err(otg, "dwc3 otg init failed\n");
+		goto err;
+	}
+
+	/* control register: BAR 0 */
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!request_mem_region(resource, len, driver_name)) {
+		otg_err(otg, "Request memory region failed\n");
+		retval = -EBUSY;
+		goto err;
+	}
+
+	otg_dbg(otg, "dwc otg pci resouce: 0x%lu, len: 0x%lu\n",
+			resource, len);
+	otg_dbg(otg, "vendor: 0x%x, device: 0x%x\n",
+			pdev->vendor, pdev->device);
+
+	memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+
+	res[0].start	= pci_resource_start(pdev, 0);
+	res[0].end	= pci_resource_end(pdev, 0);
+	res[0].name	= "dwc_usb3_io";
+	res[0].flags	= IORESOURCE_MEM;
+
+	res[1].start	= pdev->irq;
+	res[1].name	= "dwc_usb3_irq";
+	res[1].flags	= IORESOURCE_IRQ;
+
+	retval = dwc3_otg_create_children(otg, res, ARRAY_SIZE(res));
+	if (retval) {
+		otg_err(otg, "dwc3 otg create alloc children failed\n");
+		goto err;
+	}
+
+	otg->irqnum = pdev->irq;
+
+	if (dwc3_otg_pdata->platform_init) {
+		retval = dwc3_otg_pdata->platform_init(otg);
+		if (retval)
+			goto err;
+	}
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+	pm_runtime_mark_last_busy(otg->dev);
+	pm_runtime_put_autosuspend(&pdev->dev);
+
+	return 0;
+
+err:
+	if (the_transceiver)
+		dwc_otg_remove(pdev);
+
+	return retval;
+}
+
+static void dwc_otg_remove(struct pci_dev *pdev)
+{
+	struct dwc_otg2 *otg = the_transceiver;
+	int resource, len;
+
+	if (otg->gadget)
+		platform_device_unregister(otg->gadget);
+	if (otg->host)
+		platform_device_unregister(otg->host);
+
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+
+	kfree(platform_par);
+	iounmap(otg->usb2_phy.io_priv);
+
+	usb_remove_phy(&otg->usb2_phy);
+	usb_remove_phy(&otg->usb3_phy);
+	kfree(otg);
+	otg = NULL;
+
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	release_mem_region(resource, len);
+
+	pci_disable_device(pdev);
+
+	the_transceiver = NULL;
+}
+
+static void dwc_otg_shutdown(struct pci_dev *pdev)
+{
+	struct dwc_otg2 *otg = the_transceiver;
+
+	/* stop main thread, ignore notification events */
+	stop_main_thread(otg);
+
+	pci_disable_device(pdev);
+}
+
+static int dwc_otg_runtime_idle(struct device *dev)
+{
+	if (dwc3_otg_pdata->idle)
+		return dwc3_otg_pdata->idle(the_transceiver);
+
+	return 0;
+}
+
+static int dwc_otg_runtime_suspend(struct device *dev)
+{
+	if (dwc3_otg_pdata->suspend)
+		return dwc3_otg_pdata->suspend(the_transceiver);
+
+	return 0;
+}
+
+static int dwc_otg_runtime_resume(struct device *dev)
+{
+	if (dwc3_otg_pdata->resume)
+		return dwc3_otg_pdata->resume(the_transceiver);
+	return 0;
+}
+
+static int dwc_otg_suspend(struct device *dev)
+{
+	if (dwc3_otg_pdata->suspend)
+		return dwc3_otg_pdata->suspend(the_transceiver);
+	return 0;
+}
+
+static int dwc_otg_resume(struct device *dev)
+{
+	if (dwc3_otg_pdata->resume)
+		return dwc3_otg_pdata->resume(the_transceiver);
+	return 0;
+}
+
+static const struct dev_pm_ops dwc_usb_otg_pm_ops = {
+	.runtime_suspend = dwc_otg_runtime_suspend,
+	.runtime_resume	= dwc_otg_runtime_resume,
+	.runtime_idle = dwc_otg_runtime_idle,
+	.suspend = dwc_otg_suspend,
+	.resume	= dwc_otg_resume
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+	{ PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x20), ~0),
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_DWC,
+	},
+	{ PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x80), ~0),
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_DWC,
+	},
+	{ /* end: all zeroes */ }
+};
+
+static struct pci_driver dwc_otg_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+	.probe =	dwc_otg_probe,
+	.remove =	dwc_otg_remove,
+	.shutdown = dwc_otg_shutdown,
+	.driver = {
+		.name = (char *) driver_name,
+		.pm = &dwc_usb_otg_pm_ops,
+		.owner = THIS_MODULE,
+	},
+};
+#endif
+
+int dwc3_otg_register(struct dwc3_otg_hw_ops *pdata)
+{
+	int retval;
+
+	if (!pdata)
+		return -EINVAL;
+
+	if (dwc3_otg_pdata)
+		return -EBUSY;
+
+	dwc3_otg_pdata = pdata;
+
+#ifdef CONFIG_PCI
+	retval = pci_register_driver(&dwc_otg_pci_driver);
+#endif
+	mutex_init(&lock);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(dwc3_otg_register);
+
+int dwc3_otg_unregister(struct dwc3_otg_hw_ops *pdata)
+{
+	if (!pdata)
+		return -EINVAL;
+
+	if (dwc3_otg_pdata != pdata)
+		return -EINVAL;
+
+	dwc3_otg_pdata = NULL;
+
+#ifdef CONFIG_PCI
+	pci_unregister_driver(&dwc_otg_pci_driver);
+#endif
+	mutex_destroy(&lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dwc3_otg_unregister);
+
+static int __init dwc_otg_init(void)
+{
+	return 0;
+}
+module_init(dwc_otg_init);
+
+static void __exit dwc_otg_exit(void)
+{
+}
+module_exit(dwc_otg_exit);
+
+MODULE_AUTHOR("Synopsys, Inc and Wang Yu <yu.y.wang@intel.com>");
+MODULE_DESCRIPTION("DWC3 OTG Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("1.0");
diff --git a/drivers/usb/dwc3/otg.h b/drivers/usb/dwc3/otg.h
new file mode 100644
index 0000000..a1bcd97
--- /dev/null
+++ b/drivers/usb/dwc3/otg.h
@@ -0,0 +1,432 @@
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DWC3_OTG_H
+#define __DWC3_OTG_H
+
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/compiler.h>
+#include <linux/power_supply.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/ulpi.h>
+
+
+struct dwc_device_par {
+	void __iomem *io_addr;
+	int len;
+};
+
+#define DWC3_DEVICE_NAME "dwc3-device"
+#define DWC3_HOST_NAME "dwc3-host"
+#define GADGET_DEVID 1
+#define HOST_DEVID 2
+#define DRIVER_VERSION "0.1"
+
+#ifdef CONFIG_USB_DWC3_OTG_DEBUG
+#define DWC_OTG_DEBUG 1
+#else
+#define DWC_OTG_DEBUG 0
+#endif
+
+#define otg_dbg(d, fmt, args...)  \
+	do { if (DWC_OTG_DEBUG) dev_dbg((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+#define otg_vdbg(d, fmt, args...)  \
+	do { if (DWC_OTG_DEBUG) dev_dbg((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+#define otg_err(d, fmt, args...)  \
+	do { if (DWC_OTG_DEBUG) dev_err((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+#define otg_warn(d, fmt, args...)  \
+	do { if (DWC_OTG_DEBUG) dev_warn((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+#define otg_info(d, fmt, args...)  \
+	do { if (DWC_OTG_DEBUG) dev_info((d)->dev, \
+			"%s(): " fmt , __func__, ## args); } while (0)
+
+#ifdef DEBUG
+#define otg_write(o, reg, val)	do { \
+		otg_dbg(o, "OTG_WRITE: reg=0x%05x, val=0x%08x\n", reg, val); \
+		writel(val, ((void *)((o)->usb2_phy.io_priv)) + reg);	\
+	} while (0)
+
+#define otg_read(o, reg) ({ \
+		u32 __r; \
+		__r = readl(((void *)((o)->usb2_phy.io_priv)) + reg);	\
+		otg_dbg(o, "OTG_READ: reg=0x%05x, val=0x%08x\n", reg, __r); \
+		__r;							\
+	})
+#else
+#define otg_write(o, reg, val) \
+		writel(val, ((void *)((o)->usb2_phy.io_priv)) + reg);
+
+#define otg_read(o, reg)	({ \
+		readl(((void *)((o)->usb2_phy.io_priv)) + reg); \
+	})
+#endif
+
+#define GUSB2PHYCFG0				0xc200
+#define GUSB2PHYCFG_SUS_PHY                     0x40
+#define GUSB2PHYCFG_PHYSOFTRST (1 << 31)
+#define GUSB2PHYCFG_ULPI_AUTO_RESUME (1 << 15)
+#define GUSB2PHYCFG_ULPI_EXT_VBUS_DRV (1 << 17)
+
+#define EXTEND_ULPI_REGISTER_ACCESS_MASK	0xC0
+#define GUSB2PHYACC0	0xc280
+#define GUSB2PHYACC0_DISULPIDRVR  (1 << 26)
+#define GUSB2PHYACC0_NEWREGREQ  (1 << 25)
+#define GUSB2PHYACC0_VSTSDONE  (1 << 24)
+#define GUSB2PHYACC0_VSTSBSY  (1 << 23)
+#define GUSB2PHYACC0_REGWR  (1 << 22)
+#define GUSB2PHYACC0_REGADDR(v)  ((v & 0x3F) << 16)
+#define GUSB2PHYACC0_EXTREGADDR(v)  ((v & 0x3F) << 8)
+#define GUSB2PHYACC0_VCTRL(v)  ((v & 0xFF) << 8)
+#define GUSB2PHYACC0_REGDATA(v)  (v & 0xFF)
+#define GUSB2PHYACC0_REGDATA_MASK  0xFF
+
+#define GUSB3PIPECTL0                           0xc2c0
+#define GUSB3PIPECTL_SUS_EN                     0x20000
+#define GUSB3PIPE_DISRXDETP3                    (1 << 28)
+#define GUSB3PIPECTL_PHYSOFTRST (1 << 31)
+
+#define GHWPARAMS6				0xc158
+#define GHWPARAMS6_SRP_SUPPORT_ENABLED		0x0400
+#define GHWPARAMS6_HNP_SUPPORT_ENABLED		0x0800
+#define GHWPARAMS6_ADP_SUPPORT_ENABLED		0x1000
+
+#define GUCTL 0xC12C
+#define GUCTL_CMDEVADDR		(1 << 15)
+
+#define GCTL 0xc110
+#define GCTL_PRT_CAP_DIR 0x3000
+#define GCTL_PRT_CAP_DIR_SHIFT 12
+#define GCTL_PRT_CAP_DIR_HOST 1
+#define GCTL_PRT_CAP_DIR_DEV 2
+#define GCTL_PRT_CAP_DIR_OTG 3
+#define GCTL_GBL_HIBERNATION_EN 0x2
+#define GCTL_CORESOFTRESET (1 << 11)
+#define GCTL_PWRDNSCALE(x) (x << 19)
+#define GCTL_PWRDNSCALE_MASK (0x1fff << 19)
+
+#define OCFG					0xcc00
+#define OCFG_SRP_CAP				0x01
+#define OCFG_SRP_CAP_SHIFT			0
+#define OCFG_HNP_CAP				0x02
+#define OCFG_HNP_CAP_SHIFT			1
+#define OCFG_OTG_VERSION			0x04
+#define OCFG_OTG_VERSION_SHIFT			2
+
+#define GCTL					0xc110
+#define OCTL					0xcc04
+#define OCTL_HST_SET_HNP_EN			0x01
+#define OCTL_HST_SET_HNP_EN_SHIFT		0
+#define OCTL_DEV_SET_HNP_EN			0x02
+#define OCTL_DEV_SET_HNP_EN_SHIFT		1
+#define OCTL_TERM_SEL_DL_PULSE			0x04
+#define OCTL_TERM_SEL_DL_PULSE_SHIFT		2
+#define OCTL_SES_REQ				0x08
+#define OCTL_SES_REQ_SHIFT			3
+#define OCTL_HNP_REQ				0x10
+#define OCTL_HNP_REQ_SHIFT			4
+#define OCTL_PRT_PWR_CTL			0x20
+#define OCTL_PRT_PWR_CTL_SHIFT			5
+#define OCTL_PERI_MODE				0x40
+#define OCTL_PERI_MODE_SHIFT			6
+
+#define OEVT					0xcc08
+#define OEVT_ERR				0x00000001
+#define OEVT_ERR_SHIFT				0
+#define OEVT_SES_REQ_SCS			0x00000002
+#define OEVT_SES_REQ_SCS_SHIFT			1
+#define OEVT_HST_NEG_SCS			0x00000004
+#define OEVT_HST_NEG_SCS_SHIFT			2
+#define OEVT_B_SES_VLD_EVT			0x00000008
+#define OEVT_B_SES_VLD_EVT_SHIFT		3
+#define OEVT_B_DEV_VBUS_CHNG_EVNT		0x00000100
+#define OEVT_B_DEV_VBUS_CHNG_EVNT_SHIFT		8
+#define OEVT_B_DEV_SES_VLD_DET_EVNT		0x00000200
+#define OEVT_B_DEV_SES_VLD_DET_EVNT_SHIFT	9
+#define OEVT_B_DEV_HNP_CHNG_EVNT		0x00000400
+#define OEVT_B_DEV_HNP_CHNG_EVNT_SHIFT		10
+#define OEVT_B_DEV_B_HOST_END_EVNT		0x00000800
+#define OEVT_B_DEV_B_HOST_END_EVNT_SHIFT	11
+#define OEVT_A_DEV_SESS_END_DET_EVNT		0x00010000
+#define OEVT_A_DEV_SESS_END_DET_EVNT_SHIFT	16
+#define OEVT_A_DEV_SRP_DET_EVNT			0x00020000
+#define OEVT_A_DEV_SRP_DET_EVNT_SHIFT		17
+#define OEVT_A_DEV_HNP_CHNG_EVNT		0x00040000
+#define OEVT_A_DEV_HNP_CHNG_EVNT_SHIFT		18
+#define OEVT_A_DEV_HOST_EVNT			0x00080000
+#define OEVT_A_DEV_HOST_EVNT_SHIFT		19
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT		0x00100000
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT_SHIFT	20
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT            0x00400000
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT_SHIFT      22
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT         0x00800000
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT_SHIFT   23
+#define OEVT_CONN_ID_STS_CHNG_EVNT		0x01000000
+#define OEVT_CONN_ID_STS_CHNG_EVNT_SHIFT	24
+#define OEVT_DEV_MOD_EVNT			0x80000000
+#define OEVT_DEV_MOD_EVNT_SHIFT			31
+
+#define OEVTEN					0xcc0c
+
+#define OEVT_ALL (OEVT_CONN_ID_STS_CHNG_EVNT | \
+		OEVT_HOST_ROLE_REQ_INIT_EVNT | \
+		OEVT_HOST_ROLE_REQ_CONFIRM_EVNT | \
+		OEVT_A_DEV_B_DEV_HOST_END_EVNT | \
+		OEVT_A_DEV_HOST_EVNT | \
+		OEVT_A_DEV_HNP_CHNG_EVNT | \
+		OEVT_A_DEV_SRP_DET_EVNT | \
+		OEVT_A_DEV_SESS_END_DET_EVNT | \
+		OEVT_B_DEV_B_HOST_END_EVNT | \
+		OEVT_B_DEV_HNP_CHNG_EVNT | \
+		OEVT_B_DEV_SES_VLD_DET_EVNT | \
+		OEVT_B_DEV_VBUS_CHNG_EVNT)
+
+#define OSTS					0xcc10
+#define OSTS_CONN_ID_STS			0x0001
+#define OSTS_CONN_ID_STS_SHIFT			0
+#define OSTS_A_SES_VLD				0x0002
+#define OSTS_A_SES_VLD_SHIFT			1
+#define OSTS_B_SES_VLD				0x0004
+#define OSTS_B_SES_VLD_SHIFT			2
+#define OSTS_XHCI_PRT_PWR			0x0008
+#define OSTS_XHCI_PRT_PWR_SHIFT			3
+#define OSTS_PERIP_MODE				0x0010
+#define OSTS_PERIP_MODE_SHIFT			4
+#define OSTS_OTG_STATES				0x0f00
+#define OSTS_OTG_STATE_SHIFT			8
+
+#define ADPCFG					0xcc20
+#define ADPCFG_PRB_DSCHGS			0x0c000000
+#define ADPCFG_PRB_DSCHG_SHIFT			26
+#define ADPCFG_PRB_DELTAS			0x30000000
+#define ADPCFG_PRB_DELTA_SHIFT			28
+#define ADPCFG_PRB_PERS				0xc0000000
+#define ADPCFG_PRB_PER_SHIFT			30
+
+#define ADPCTL					0xcc24
+#define ADPCTL_WB				0x01000000
+#define ADPCTL_WB_SHIFT				24
+#define ADPCTL_ADP_RES				0x02000000
+#define ADPCTL_ADP_RES_SHIFT			25
+#define ADPCTL_ADP_EN				0x04000000
+#define ADPCTL_ADP_EN_SHIFT			26
+#define ADPCTL_ENA_SNS				0x08000000
+#define ADPCTL_ENA_SNS_SHIFT			27
+#define ADPCTL_ENA_PRB				0x10000000
+#define ADPCTL_ENA_PRB_SHIFT			28
+
+#define ADPEVT					0xcc28
+#define ADPEVT_RTIM_EVNTS			0x000007ff
+#define ADPEVT_RTIM_EVNT_SHIFT			0
+#define ADPEVT_ADP_RST_CMPLT_EVNT		0x02000000
+#define ADPEVT_ADP_RST_CMPLT_EVNT_SHIFT		25
+#define ADPEVT_ADP_TMOUT_EVNT			0x04000000
+#define ADPEVT_ADP_TMOUT_EVNT_SHIFT		26
+#define ADPEVT_ADP_SNS_EVNT			0x08000000
+#define ADPEVT_ADP_SNS_EVNT_SHIFT		27
+#define ADPEVT_ADP_PRB_EVNT			0x10000000
+#define ADPEVT_ADP_PRB_EVNT_SHIFT		28
+
+#define ADPEVTEN				0xcc2c
+#define ADPEVTEN_ACC_DONE_EN			0x01000000
+#define ADPEVTEN_ACC_DONE_EN_SHIFT		24
+#define ADPEVTEN_ADP_RST_CMPLT_EVNT_EN		0x02000000
+#define ADPEVTEN_ADP_RST_CMPLT_EVNT_EN_SHIFT	25
+#define ADPEVTEN_ADP_TMOUT_EVNT_EN		0x04000000
+#define ADPEVTEN_ADP_TMOUT_EVNT_EN_SHIFT	26
+#define ADPEVTEN_ADP_SNS_EVNT_EN		0x08000000
+#define ADPEVTEN_ADP_SNS_EVNT_EN_SHIFT		27
+#define ADPEVTEN_ADP_PRB_EVNT_EN		0x10000000
+#define ADPEVTEN_ADP_PRB_EVNT_EN_SHIFT		28
+
+#define RID_A		0x01
+#define RID_B		0x02
+#define RID_C		0x03
+#define RID_FLOAT	0x04
+#define RID_GND		0x05
+#define RID_UNKNOWN	0x00
+
+/** The states for the OTG driver */
+enum dwc_otg_state {
+	DWC_STATE_INVALID = -1,
+
+	/** The initial state, check the connector
+	 * id status and determine what mode
+	 * (A-device or B-device) to operate in. */
+	DWC_STATE_B_IDLE = 0,
+
+	/* A-Host states */
+	DWC_STATE_A_PROBE,
+	DWC_STATE_A_HOST,
+	DWC_STATE_A_HNP_INIT,
+
+	/* A-Peripheral states */
+	DWC_STATE_A_PERIPHERAL,
+
+	/* B-Peripheral states */
+	DWC_STATE_B_SENSE,
+	DWC_STATE_B_PROBE,
+	DWC_STATE_B_PERIPHERAL,
+	DWC_STATE_B_HNP_INIT,
+
+	/* B-Host states */
+	DWC_STATE_B_HOST,
+
+	/* RSP */
+	DWC_STATE_B_RSP_INIT,
+
+	/* USB charger detection */
+	DWC_STATE_CHARGER_DETECTION,
+
+	/* VBUS */
+	DWC_STATE_WAIT_VBUS_RAISE,
+	DWC_STATE_WAIT_VBUS_FALL,
+
+	/* Charging*/
+	DWC_STATE_CHARGING,
+
+	/* Exit */
+	DWC_STATE_EXIT,
+	DWC_STATE_TERMINATED
+};
+
+/** The main structure to keep track of OTG driver state. */
+struct dwc_otg2 {
+	/** OTG transceiver */
+	struct usb_otg	otg;
+	struct usb_phy	usb2_phy;
+	struct usb_phy	usb3_phy;
+	struct device		*dev;
+	int irqnum;
+
+	int main_wakeup_needed;
+	struct task_struct *main_thread;
+	wait_queue_head_t main_wq;
+
+	spinlock_t lock;
+
+	/* Events */
+	u32 otg_events;
+	u32 user_events;
+
+	/** User space ID switch event */
+#define USER_ID_A_CHANGE_EVENT 0x01
+#define USER_ID_B_CHANGE_EVENT 0x02
+	/** a_bus_drop event from userspace */
+#define USER_A_BUS_DROP 0x40
+
+	/* States */
+	enum dwc_otg_state prev;
+	enum dwc_otg_state state;
+	struct platform_device *host;
+	struct platform_device *gadget;
+
+	/* Charger detection */
+	struct power_supply_cable_props charging_cap;
+	struct notifier_block nb;
+
+	/* Interfaces between host/device driver */
+	int (*start_host) (struct usb_hcd *hcd);
+	int (*stop_host) (struct usb_hcd *hcd);
+	int (*start_device)(struct usb_gadget *);
+	int (*stop_device)(struct usb_gadget *);
+	int (*vbus_draw) (struct usb_gadget *, unsigned ma);
+
+	/* Vendor driver private date */
+	void *otg_data;
+};
+
+#define sleep_main_thread_until_condition_timeout(otg, condition, msecs) ({ \
+		int __timeout = msecs;				\
+		while (!(condition)) {				\
+			otg_dbg(otg, "  ... sleeping for %d\n", __timeout); \
+			__timeout = sleep_main_thread_timeout(otg, __timeout); \
+			if (__timeout <= 0) {			\
+				break;				\
+			}					\
+		}						\
+		__timeout;					\
+	})
+
+#define sleep_main_thread_until_condition(otg, condition) ({	\
+		int __rc = 0;					\
+		do {						\
+			__rc = sleep_main_thread_until_condition_timeout(otg, \
+			condition, 50000); \
+		} while (__rc == 0);				\
+		__rc;						\
+	})
+
+#define VBUS_TIMEOUT	300
+#define PCI_DEVICE_ID_DWC 0x119E
+
+enum dwc3_otg_mode {
+	DWC3_DEVICE_ONLY,
+	DWC3_HOST_ONLY,
+	DWC3_DRD,
+};
+
+enum driver_bus_type {
+	DWC3_PLAT,
+	DWC3_PCI,
+};
+
+struct dwc3_otg_hw_ops {
+	enum dwc3_otg_mode mode;
+	enum driver_bus_type bus;
+
+	int (*set_power)(struct usb_phy *_otg, unsigned ma);
+	int (*platform_init)(struct dwc_otg2 *otg);
+	int (*otg_notifier_handler)(struct notifier_block *nb,
+			unsigned long event, void *data);
+	int (*prepare_start_peripheral)(struct dwc_otg2 *otg);
+	int (*prepare_start_host)(struct dwc_otg2 *otg);
+	int (*after_stop_peripheral)(struct dwc_otg2 *otg);
+	int (*after_stop_host)(struct dwc_otg2 *otg);
+	int (*b_idle)(struct dwc_otg2 *otg);
+	int (*do_charging)(struct dwc_otg2 *otg);
+	int (*notify_charger_type)(struct dwc_otg2 *otg,
+			enum power_supply_charger_event event);
+	enum power_supply_charger_cable_type
+		(*get_charger_type)(struct dwc_otg2 *otg);
+	int (*enable_vbus)(struct dwc_otg2 *otg, int enable);
+	int (*get_id)(struct dwc_otg2 *otg);
+
+	int (*idle)(struct dwc_otg2 *otg);
+	int (*suspend)(struct dwc_otg2 *otg);
+	int (*resume)(struct dwc_otg2 *otg);
+};
+
+#define OTG_USB2_100MA				0xfff1
+#define OTG_USB3_150MA				0xfff2
+#define OTG_USB2_500MA				0xfff3
+#define OTG_USB3_900MA				0xfff4
+#define OTG_DEVICE_SUSPEND			0xfffe
+#define OTG_DEVICE_RESUME			0xffff
+
+void dwc3_wakeup_otg_thread(struct dwc_otg2 *otg);
+struct dwc_otg2 *dwc3_get_otg(void);
+int dwc3_otg_register(struct dwc3_otg_hw_ops *pdata);
+int dwc3_otg_unregister(struct dwc3_otg_hw_ops *pdata);
+#endif /* __DWC3_OTG_H */
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index f41aa0d..9de2eb2 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -818,6 +818,27 @@
 	  For more information, see Documentation/usb/gadget_printer.txt
 	  which includes sample code for accessing the device file.
 
+config USB_G_ANDROID
+	boolean "Android Composite Gadget"
+	depends on SND
+	select SND_PCM
+	select SND_RAWMIDI
+	select USB_F_ACM
+	select USB_LIBCOMPOSITE
+	select USB_U_SERIAL
+	help
+	  The Android Composite Gadget supports multiple USB
+	  functions: adb, acm, mass storage, mtp, accessory
+	  and rndis.
+	  Each function can be configured and enabled/disabled
+	  dynamically from userspace through a sysfs interface.
+
+config USB_ANDROID_RNDIS_DWORD_ALIGNED
+	boolean "Use double word aligned"
+	depends on USB_G_ANDROID
+	help
+		Provides dword aligned for DMA controller.
+
 if TTY
 
 config USB_CDC_COMPOSITE
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 6afd166..0ec50ae 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -65,6 +65,7 @@
 g_webcam-y			:= webcam.o
 g_ncm-y				:= ncm.o
 g_acm_ms-y			:= acm_ms.o
+g_android-y			:= android.o
 g_tcm_usb_gadget-y		:= tcm_usb_gadget.o
 
 obj-$(CONFIG_USB_ZERO)		+= g_zero.o
@@ -84,4 +85,5 @@
 obj-$(CONFIG_USB_G_WEBCAM)	+= g_webcam.o
 obj-$(CONFIG_USB_G_NCM)		+= g_ncm.o
 obj-$(CONFIG_USB_G_ACM_MS)	+= g_acm_ms.o
+obj-$(CONFIG_USB_G_ANDROID)	+= g_android.o
 obj-$(CONFIG_USB_GADGET_TARGET)	+= tcm_usb_gadget.o
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
new file mode 100644
index 0000000..61c5e58
--- /dev/null
+++ b/drivers/usb/gadget/android.c
@@ -0,0 +1,1587 @@
+/*
+ * Gadget Driver for Android
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *         Benoit Goby <benoit@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+#include "f_fs.c"
+#include "f_audio_source.c"
+#include "f_midi.c"
+#include "f_mass_storage.c"
+#include "f_mtp.c"
+#include "f_accessory.c"
+#define USB_ETH_RNDIS y
+#include "f_rndis.c"
+#include "rndis.c"
+#include "u_ether.c"
+
+MODULE_AUTHOR("Mike Lockwood");
+MODULE_DESCRIPTION("Android Composite USB Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static const char longname[] = "Gadget Android";
+
+/* Default vendor and product IDs, overridden by userspace */
+#define VENDOR_ID		0x18D1
+#define PRODUCT_ID		0x0001
+
+/* f_midi configuration */
+#define MIDI_INPUT_PORTS    1
+#define MIDI_OUTPUT_PORTS   1
+#define MIDI_BUFFER_SIZE    256
+#define MIDI_QUEUE_LENGTH   32
+
+struct android_usb_function {
+	char *name;
+	void *config;
+
+	struct device *dev;
+	char *dev_name;
+	struct device_attribute **attributes;
+
+	/* for android_dev.enabled_functions */
+	struct list_head enabled_list;
+
+	/* Optional: initialization during gadget bind */
+	int (*init)(struct android_usb_function *, struct usb_composite_dev *);
+	/* Optional: cleanup during gadget unbind */
+	void (*cleanup)(struct android_usb_function *);
+	/* Optional: called when the function is added the list of
+	 *		enabled functions */
+	void (*enable)(struct android_usb_function *);
+	/* Optional: called when it is removed */
+	void (*disable)(struct android_usb_function *);
+
+	int (*bind_config)(struct android_usb_function *,
+			   struct usb_configuration *);
+
+	/* Optional: called when the configuration is removed */
+	void (*unbind_config)(struct android_usb_function *,
+			      struct usb_configuration *);
+	/* Optional: handle ctrl requests before the device is configured */
+	int (*ctrlrequest)(struct android_usb_function *,
+					struct usb_composite_dev *,
+					const struct usb_ctrlrequest *);
+};
+
+struct android_dev {
+	struct android_usb_function **functions;
+	struct list_head enabled_functions;
+	struct usb_composite_dev *cdev;
+	struct device *dev;
+
+	void (*setup_complete)(struct usb_ep *ep,
+				struct usb_request *req);
+
+	bool enabled;
+	int disable_depth;
+	struct mutex mutex;
+	bool connected;
+	bool sw_connected;
+	struct work_struct work;
+	char ffs_aliases[256];
+};
+
+static struct class *android_class;
+static struct android_dev *_android_dev;
+static int android_bind_config(struct usb_configuration *c);
+static void android_unbind_config(struct usb_configuration *c);
+
+/* string IDs are assigned dynamically */
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+#define STRING_SERIAL_IDX		2
+
+static char manufacturer_string[256];
+static char product_string[256];
+static char serial_string[256];
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer_string,
+	[STRING_PRODUCT_IDX].s = product_string,
+	[STRING_SERIAL_IDX].s = serial_string,
+	{  }			/* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+	.bLength              = sizeof(device_desc),
+	.bDescriptorType      = USB_DT_DEVICE,
+	.bcdUSB               = __constant_cpu_to_le16(0x0200),
+	.bDeviceClass         = USB_CLASS_PER_INTERFACE,
+	.idVendor             = __constant_cpu_to_le16(VENDOR_ID),
+	.idProduct            = __constant_cpu_to_le16(PRODUCT_ID),
+	.bcdDevice            = __constant_cpu_to_le16(0xffff),
+	.bNumConfigurations   = 1,
+};
+
+static struct usb_configuration android_config_driver = {
+	.label		= "android",
+	.unbind		= android_unbind_config,
+	.bConfigurationValue = 1,
+	.bmAttributes	= USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+	.MaxPower	= 500, /* 500ma */
+};
+
+static void android_work(struct work_struct *data)
+{
+	struct android_dev *dev = container_of(data, struct android_dev, work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+	char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
+	char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
+	char **uevent_envp = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		uevent_envp = configured;
+	else if (dev->connected != dev->sw_connected)
+		uevent_envp = dev->connected ? connected : disconnected;
+	dev->sw_connected = dev->connected;
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	if (uevent_envp) {
+		kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, uevent_envp);
+		pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]);
+	} else {
+		pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+			 dev->connected, dev->sw_connected, cdev->config);
+	}
+}
+
+static void android_enable(struct android_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	if (WARN_ON(!dev->disable_depth))
+		return;
+
+	if (--dev->disable_depth == 0) {
+		usb_add_config(cdev, &android_config_driver,
+					android_bind_config);
+		usb_gadget_connect(cdev->gadget);
+	}
+}
+
+static void android_disable(struct android_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	if (dev->disable_depth++ == 0) {
+		usb_gadget_disconnect(cdev->gadget);
+		/* Cancel pending control requests */
+		usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
+		usb_remove_config(cdev, &android_config_driver);
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+/* Supported functions initialization */
+
+struct functionfs_config {
+	bool opened;
+	bool enabled;
+	struct ffs_data *data;
+};
+
+static int ffs_function_init(struct android_usb_function *f,
+			     struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct functionfs_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+
+	return functionfs_init();
+}
+
+static void ffs_function_cleanup(struct android_usb_function *f)
+{
+	functionfs_cleanup();
+	kfree(f->config);
+}
+
+static void ffs_function_enable(struct android_usb_function *f)
+{
+	struct android_dev *dev = _android_dev;
+	struct functionfs_config *config = f->config;
+
+	config->enabled = true;
+
+	/* Disable the gadget until the function is ready */
+	if (!config->opened)
+		android_disable(dev);
+}
+
+static void ffs_function_disable(struct android_usb_function *f)
+{
+	struct android_dev *dev = _android_dev;
+	struct functionfs_config *config = f->config;
+
+	config->enabled = false;
+
+	/* Balance the disable that was called in closed_callback */
+	if (!config->opened)
+		android_enable(dev);
+}
+
+static int ffs_function_bind_config(struct android_usb_function *f,
+				    struct usb_configuration *c)
+{
+	struct functionfs_config *config = f->config;
+	return functionfs_bind_config(c->cdev, c, config->data);
+}
+
+static ssize_t
+ffs_aliases_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+	struct android_dev *dev = _android_dev;
+	int ret;
+
+	mutex_lock(&dev->mutex);
+	ret = sprintf(buf, "%s\n", dev->ffs_aliases);
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static ssize_t
+ffs_aliases_store(struct device *pdev, struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct android_dev *dev = _android_dev;
+	char buff[256];
+
+	mutex_lock(&dev->mutex);
+
+	if (dev->enabled) {
+		mutex_unlock(&dev->mutex);
+		return -EBUSY;
+	}
+
+	strlcpy(buff, buf, sizeof(buff));
+	strlcpy(dev->ffs_aliases, strim(buff), sizeof(dev->ffs_aliases));
+
+	mutex_unlock(&dev->mutex);
+
+	return size;
+}
+
+static DEVICE_ATTR(aliases, S_IRUGO | S_IWUSR, ffs_aliases_show,
+					       ffs_aliases_store);
+static struct device_attribute *ffs_function_attributes[] = {
+	&dev_attr_aliases,
+	NULL
+};
+
+static struct android_usb_function ffs_function = {
+	.name		= "ffs",
+	.init		= ffs_function_init,
+	.enable		= ffs_function_enable,
+	.disable	= ffs_function_disable,
+	.cleanup	= ffs_function_cleanup,
+	.bind_config	= ffs_function_bind_config,
+	.attributes	= ffs_function_attributes,
+};
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+	struct android_dev *dev = _android_dev;
+	struct functionfs_config *config = ffs_function.config;
+	int ret = 0;
+
+	mutex_lock(&dev->mutex);
+
+	ret = functionfs_bind(ffs, dev->cdev);
+	if (ret)
+		goto err;
+
+	config->data = ffs;
+	config->opened = true;
+
+	if (config->enabled)
+		android_enable(dev);
+
+err:
+	mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+	struct android_dev *dev = _android_dev;
+	struct functionfs_config *config = ffs_function.config;
+
+	mutex_lock(&dev->mutex);
+
+	if (config->enabled)
+		android_disable(dev);
+
+	config->opened = false;
+	config->data = NULL;
+
+	functionfs_unbind(ffs);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static void *functionfs_acquire_dev_callback(const char *dev_name)
+{
+	return 0;
+}
+
+static void functionfs_release_dev_callback(struct ffs_data *ffs_data)
+{
+}
+
+#define MAX_ACM_INSTANCES 4
+struct acm_function_config {
+	int instances;
+	int instances_on;
+	struct usb_function *f_acm[MAX_ACM_INSTANCES];
+	struct usb_function_instance *f_acm_inst[MAX_ACM_INSTANCES];
+};
+
+static int
+acm_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	int i;
+	int ret;
+	struct acm_function_config *config;
+
+	config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL);
+	if (!config)
+		return -ENOMEM;
+	f->config = config;
+
+	for (i = 0; i < MAX_ACM_INSTANCES; i++) {
+		config->f_acm_inst[i] = usb_get_function_instance("acm");
+		if (IS_ERR(config->f_acm_inst[i])) {
+			ret = PTR_ERR(config->f_acm_inst[i]);
+			goto err_usb_get_function_instance;
+		}
+		config->f_acm[i] = usb_get_function(config->f_acm_inst[i]);
+		if (IS_ERR(config->f_acm[i])) {
+			ret = PTR_ERR(config->f_acm[i]);
+			goto err_usb_get_function;
+		}
+	}
+	return 0;
+err_usb_get_function_instance:
+	while (i-- > 0) {
+		usb_put_function(config->f_acm[i]);
+err_usb_get_function:
+		usb_put_function_instance(config->f_acm_inst[i]);
+	}
+	return ret;
+}
+
+static void acm_function_cleanup(struct android_usb_function *f)
+{
+	int i;
+	struct acm_function_config *config = f->config;
+
+	for (i = 0; i < MAX_ACM_INSTANCES; i++) {
+		usb_put_function(config->f_acm[i]);
+		usb_put_function_instance(config->f_acm_inst[i]);
+	}
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int
+acm_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	int i;
+	int ret = 0;
+	struct acm_function_config *config = f->config;
+
+	config->instances_on = config->instances;
+	for (i = 0; i < config->instances_on; i++) {
+		ret = usb_add_function(c, config->f_acm[i]);
+		if (ret) {
+			pr_err("Could not bind acm%u config\n", i);
+			goto err_usb_add_function;
+		}
+	}
+
+	return 0;
+
+err_usb_add_function:
+	while (i-- > 0)
+		usb_remove_function(c, config->f_acm[i]);
+	return ret;
+}
+
+static void acm_function_unbind_config(struct android_usb_function *f,
+				       struct usb_configuration *c)
+{
+	int i;
+	struct acm_function_config *config = f->config;
+
+	for (i = 0; i < config->instances_on; i++)
+		usb_remove_function(c, config->f_acm[i]);
+}
+
+static ssize_t acm_instances_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct acm_function_config *config = f->config;
+	return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t acm_instances_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct acm_function_config *config = f->config;
+	int value;
+
+	sscanf(buf, "%d", &value);
+	if (value > MAX_ACM_INSTANCES)
+		value = MAX_ACM_INSTANCES;
+	config->instances = value;
+	return size;
+}
+
+static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show,
+						 acm_instances_store);
+static struct device_attribute *acm_function_attributes[] = {
+	&dev_attr_instances,
+	NULL
+};
+
+static struct android_usb_function acm_function = {
+	.name		= "acm",
+	.init		= acm_function_init,
+	.cleanup	= acm_function_cleanup,
+	.bind_config	= acm_function_bind_config,
+	.unbind_config	= acm_function_unbind_config,
+	.attributes	= acm_function_attributes,
+};
+
+
+static int
+mtp_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	return mtp_setup();
+}
+
+static void mtp_function_cleanup(struct android_usb_function *f)
+{
+	mtp_cleanup();
+}
+
+static int
+mtp_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	return mtp_bind_config(c, false);
+}
+
+static int
+ptp_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	/* nothing to do - initialization is handled by mtp_function_init */
+	return 0;
+}
+
+static void ptp_function_cleanup(struct android_usb_function *f)
+{
+	/* nothing to do - cleanup is handled by mtp_function_cleanup */
+}
+
+static int
+ptp_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	return mtp_bind_config(c, true);
+}
+
+static int mtp_function_ctrlrequest(struct android_usb_function *f,
+					struct usb_composite_dev *cdev,
+					const struct usb_ctrlrequest *c)
+{
+	return mtp_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function mtp_function = {
+	.name		= "mtp",
+	.init		= mtp_function_init,
+	.cleanup	= mtp_function_cleanup,
+	.bind_config	= mtp_function_bind_config,
+	.ctrlrequest	= mtp_function_ctrlrequest,
+};
+
+/* PTP function is same as MTP with slightly different interface descriptor */
+static struct android_usb_function ptp_function = {
+	.name		= "ptp",
+	.init		= ptp_function_init,
+	.cleanup	= ptp_function_cleanup,
+	.bind_config	= ptp_function_bind_config,
+};
+
+
+struct rndis_function_config {
+	u8      ethaddr[ETH_ALEN];
+	u32     vendorID;
+	char	manufacturer[256];
+	/* "Wireless" RNDIS; auto-detected by Windows */
+	bool	wceis;
+	struct eth_dev *dev;
+};
+
+static int
+rndis_function_init(struct android_usb_function *f,
+		struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	return 0;
+}
+
+static void rndis_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int
+rndis_function_bind_config(struct android_usb_function *f,
+		struct usb_configuration *c)
+{
+	int ret;
+	struct eth_dev *dev;
+	struct rndis_function_config *rndis = f->config;
+
+	if (!rndis) {
+		pr_err("%s: rndis_pdata\n", __func__);
+		return -1;
+	}
+
+	pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+		rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+		rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+	dev = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis");
+	if (IS_ERR(dev)) {
+		ret = PTR_ERR(dev);
+		pr_err("%s: gether_setup failed\n", __func__);
+		return ret;
+	}
+	rndis->dev = dev;
+
+	if (rndis->wceis) {
+		/* "Wireless" RNDIS; auto-detected by Windows */
+		rndis_iad_descriptor.bFunctionClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_iad_descriptor.bFunctionSubClass = 0x01;
+		rndis_iad_descriptor.bFunctionProtocol = 0x03;
+		rndis_control_intf.bInterfaceClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_control_intf.bInterfaceSubClass =	 0x01;
+		rndis_control_intf.bInterfaceProtocol =	 0x03;
+	}
+
+	return rndis_bind_config_vendor(c, rndis->ethaddr, rndis->vendorID,
+					   rndis->manufacturer, rndis->dev);
+}
+
+static void rndis_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct rndis_function_config *rndis = f->config;
+	gether_cleanup(rndis->dev);
+}
+
+static ssize_t rndis_manufacturer_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	return sprintf(buf, "%s\n", config->manufacturer);
+}
+
+static ssize_t rndis_manufacturer_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+
+	if (size >= sizeof(config->manufacturer))
+		return -EINVAL;
+	if (sscanf(buf, "%s", config->manufacturer) == 1)
+		return size;
+	return -1;
+}
+
+static DEVICE_ATTR(manufacturer, S_IRUGO | S_IWUSR, rndis_manufacturer_show,
+						    rndis_manufacturer_store);
+
+static ssize_t rndis_wceis_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	return sprintf(buf, "%d\n", config->wceis);
+}
+
+static ssize_t rndis_wceis_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	int value;
+
+	if (sscanf(buf, "%d", &value) == 1) {
+		config->wceis = value;
+		return size;
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(wceis, S_IRUGO | S_IWUSR, rndis_wceis_show,
+					     rndis_wceis_store);
+
+static ssize_t rndis_ethaddr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *rndis = f->config;
+	return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+		rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+}
+
+static ssize_t rndis_ethaddr_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *rndis = f->config;
+
+	if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		    (int *)&rndis->ethaddr[0], (int *)&rndis->ethaddr[1],
+		    (int *)&rndis->ethaddr[2], (int *)&rndis->ethaddr[3],
+		    (int *)&rndis->ethaddr[4], (int *)&rndis->ethaddr[5]) == 6)
+		return size;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr, S_IRUGO | S_IWUSR, rndis_ethaddr_show,
+					       rndis_ethaddr_store);
+
+static ssize_t rndis_vendorID_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	return sprintf(buf, "%04x\n", config->vendorID);
+}
+
+static ssize_t rndis_vendorID_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	int value;
+
+	if (sscanf(buf, "%04x", &value) == 1) {
+		config->vendorID = value;
+		return size;
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
+						rndis_vendorID_store);
+
+static struct device_attribute *rndis_function_attributes[] = {
+	&dev_attr_manufacturer,
+	&dev_attr_wceis,
+	&dev_attr_ethaddr,
+	&dev_attr_vendorID,
+	NULL
+};
+
+static struct android_usb_function rndis_function = {
+	.name		= "rndis",
+	.init		= rndis_function_init,
+	.cleanup	= rndis_function_cleanup,
+	.bind_config	= rndis_function_bind_config,
+	.unbind_config	= rndis_function_unbind_config,
+	.attributes	= rndis_function_attributes,
+};
+
+
+struct mass_storage_function_config {
+	struct fsg_config fsg;
+	struct fsg_common *common;
+};
+
+static int mass_storage_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	struct mass_storage_function_config *config;
+	struct fsg_common *common;
+	int err;
+
+	config = kzalloc(sizeof(struct mass_storage_function_config),
+								GFP_KERNEL);
+	if (!config)
+		return -ENOMEM;
+
+	config->fsg.nluns = 1;
+	config->fsg.luns[0].removable = 1;
+
+	common = fsg_common_init(NULL, cdev, &config->fsg);
+	if (IS_ERR(common)) {
+		kfree(config);
+		return PTR_ERR(common);
+	}
+
+	err = sysfs_create_link(&f->dev->kobj,
+				&common->luns[0].dev.kobj,
+				"lun");
+	if (err) {
+		kfree(config);
+		return err;
+	}
+
+	config->common = common;
+	f->config = config;
+	return 0;
+}
+
+static void mass_storage_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int mass_storage_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct mass_storage_function_config *config = f->config;
+	return fsg_bind_config(c->cdev, c, config->common);
+}
+
+static ssize_t mass_storage_inquiry_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct mass_storage_function_config *config = f->config;
+	return sprintf(buf, "%s\n", config->common->inquiry_string);
+}
+
+static ssize_t mass_storage_inquiry_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct mass_storage_function_config *config = f->config;
+	if (size >= sizeof(config->common->inquiry_string))
+		return -EINVAL;
+	if (sscanf(buf, "%s", config->common->inquiry_string) != 1)
+		return -EINVAL;
+	return size;
+}
+
+static DEVICE_ATTR(inquiry_string, S_IRUGO | S_IWUSR,
+					mass_storage_inquiry_show,
+					mass_storage_inquiry_store);
+
+static struct device_attribute *mass_storage_function_attributes[] = {
+	&dev_attr_inquiry_string,
+	NULL
+};
+
+static struct android_usb_function mass_storage_function = {
+	.name		= "mass_storage",
+	.init		= mass_storage_function_init,
+	.cleanup	= mass_storage_function_cleanup,
+	.bind_config	= mass_storage_function_bind_config,
+	.attributes	= mass_storage_function_attributes,
+};
+
+
+static int accessory_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	return acc_setup();
+}
+
+static void accessory_function_cleanup(struct android_usb_function *f)
+{
+	acc_cleanup();
+}
+
+static int accessory_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	return acc_bind_config(c);
+}
+
+static int accessory_function_ctrlrequest(struct android_usb_function *f,
+						struct usb_composite_dev *cdev,
+						const struct usb_ctrlrequest *c)
+{
+	return acc_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function accessory_function = {
+	.name		= "accessory",
+	.init		= accessory_function_init,
+	.cleanup	= accessory_function_cleanup,
+	.bind_config	= accessory_function_bind_config,
+	.ctrlrequest	= accessory_function_ctrlrequest,
+};
+
+static int audio_source_function_init(struct android_usb_function *f,
+			struct usb_composite_dev *cdev)
+{
+	struct audio_source_config *config;
+
+	config = kzalloc(sizeof(struct audio_source_config), GFP_KERNEL);
+	if (!config)
+		return -ENOMEM;
+	config->card = -1;
+	config->device = -1;
+	f->config = config;
+	return 0;
+}
+
+static void audio_source_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+}
+
+static int audio_source_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct audio_source_config *config = f->config;
+
+	return audio_source_bind_config(c, config);
+}
+
+static void audio_source_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct audio_source_config *config = f->config;
+
+	config->card = -1;
+	config->device = -1;
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct audio_source_config *config = f->config;
+
+	/* print PCM card and device numbers */
+	return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(pcm, S_IRUGO, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+	&dev_attr_pcm,
+	NULL
+};
+
+static struct android_usb_function audio_source_function = {
+	.name		= "audio_source",
+	.init		= audio_source_function_init,
+	.cleanup	= audio_source_function_cleanup,
+	.bind_config	= audio_source_function_bind_config,
+	.unbind_config	= audio_source_function_unbind_config,
+	.attributes	= audio_source_function_attributes,
+};
+
+static int midi_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	struct midi_alsa_config *config;
+
+	config = kzalloc(sizeof(struct midi_alsa_config), GFP_KERNEL);
+	f->config = config;
+	if (!config)
+		return -ENOMEM;
+	config->card = -1;
+	config->device = -1;
+	return 0;
+}
+
+static void midi_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+}
+
+static int midi_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	struct midi_alsa_config *config = f->config;
+
+	return f_midi_bind_config(c, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+			MIDI_INPUT_PORTS, MIDI_OUTPUT_PORTS, MIDI_BUFFER_SIZE,
+			MIDI_QUEUE_LENGTH, config);
+}
+
+static ssize_t midi_alsa_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct midi_alsa_config *config = f->config;
+
+	/* print ALSA card and device numbers */
+	return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(alsa, S_IRUGO, midi_alsa_show, NULL);
+
+static struct device_attribute *midi_function_attributes[] = {
+	&dev_attr_alsa,
+	NULL
+};
+
+static struct android_usb_function midi_function = {
+	.name		= "midi",
+	.init		= midi_function_init,
+	.cleanup	= midi_function_cleanup,
+	.bind_config	= midi_function_bind_config,
+	.attributes	= midi_function_attributes,
+};
+
+static struct android_usb_function *supported_functions[] = {
+	&ffs_function,
+	&acm_function,
+	&mtp_function,
+	&ptp_function,
+	&rndis_function,
+	&mass_storage_function,
+	&accessory_function,
+	&audio_source_function,
+	&midi_function,
+	NULL
+};
+
+static int android_init_functions(struct android_usb_function **functions,
+				  struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+	struct android_usb_function *f;
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+	int err;
+	int index = 0;
+
+	for (; (f = *functions++); index++) {
+		f->dev_name = kasprintf(GFP_KERNEL, "f_%s", f->name);
+		f->dev = device_create(android_class, dev->dev,
+				MKDEV(0, index), f, f->dev_name);
+		if (IS_ERR(f->dev)) {
+			pr_err("%s: Failed to create dev %s", __func__,
+							f->dev_name);
+			err = PTR_ERR(f->dev);
+			goto err_create;
+		}
+
+		if (f->init) {
+			err = f->init(f, cdev);
+			if (err) {
+				pr_err("%s: Failed to init %s", __func__,
+								f->name);
+				goto err_out;
+			}
+		}
+
+		attrs = f->attributes;
+		if (attrs) {
+			while ((attr = *attrs++) && !err)
+				err = device_create_file(f->dev, attr);
+		}
+		if (err) {
+			pr_err("%s: Failed to create function %s attributes",
+					__func__, f->name);
+			goto err_out;
+		}
+	}
+	return 0;
+
+err_out:
+	device_destroy(android_class, f->dev->devt);
+err_create:
+	kfree(f->dev_name);
+	return err;
+}
+
+static void android_cleanup_functions(struct android_usb_function **functions)
+{
+	struct android_usb_function *f;
+
+	while (*functions) {
+		f = *functions++;
+
+		if (f->dev) {
+			device_destroy(android_class, f->dev->devt);
+			kfree(f->dev_name);
+		}
+
+		if (f->cleanup)
+			f->cleanup(f);
+	}
+}
+
+static int
+android_bind_enabled_functions(struct android_dev *dev,
+			       struct usb_configuration *c)
+{
+	struct android_usb_function *f;
+	int ret;
+
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+		ret = f->bind_config(f, c);
+		if (ret) {
+			pr_err("%s: %s failed", __func__, f->name);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static void
+android_unbind_enabled_functions(struct android_dev *dev,
+			       struct usb_configuration *c)
+{
+	struct android_usb_function *f;
+
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+		if (f->unbind_config)
+			f->unbind_config(f, c);
+	}
+}
+
+static int android_enable_function(struct android_dev *dev, char *name)
+{
+	struct android_usb_function **functions = dev->functions;
+	struct android_usb_function *f;
+	while ((f = *functions++)) {
+		if (!strcmp(name, f->name)) {
+			list_add_tail(&f->enabled_list,
+						&dev->enabled_functions);
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+/* /sys/class/android_usb/android%d/ interface */
+
+static ssize_t
+functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct android_usb_function *f;
+	char *buff = buf;
+
+	mutex_lock(&dev->mutex);
+
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list)
+		buff += sprintf(buff, "%s,", f->name);
+
+	mutex_unlock(&dev->mutex);
+
+	if (buff != buf)
+		*(buff-1) = '\n';
+	return buff - buf;
+}
+
+static ssize_t
+functions_store(struct device *pdev, struct device_attribute *attr,
+			       const char *buff, size_t size)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	char *name;
+	char buf[256], *b;
+	char aliases[256], *a;
+	int err;
+	int is_ffs;
+	int ffs_enabled = 0;
+
+	mutex_lock(&dev->mutex);
+
+	if (dev->enabled) {
+		mutex_unlock(&dev->mutex);
+		return -EBUSY;
+	}
+
+	INIT_LIST_HEAD(&dev->enabled_functions);
+
+	strlcpy(buf, buff, sizeof(buf));
+	b = strim(buf);
+
+	while (b) {
+		name = strsep(&b, ",");
+		if (!name)
+			continue;
+
+		is_ffs = 0;
+		strlcpy(aliases, dev->ffs_aliases, sizeof(aliases));
+		a = aliases;
+
+		while (a) {
+			char *alias = strsep(&a, ",");
+			if (alias && !strcmp(name, alias)) {
+				is_ffs = 1;
+				break;
+			}
+		}
+
+		if (is_ffs) {
+			if (ffs_enabled)
+				continue;
+			err = android_enable_function(dev, "ffs");
+			if (err)
+				pr_err("android_usb: Cannot enable ffs (%d)",
+									err);
+			else
+				ffs_enabled = 1;
+			continue;
+		}
+
+		err = android_enable_function(dev, name);
+		if (err)
+			pr_err("android_usb: Cannot enable '%s' (%d)",
+							   name, err);
+	}
+
+	mutex_unlock(&dev->mutex);
+
+	return size;
+}
+
+static ssize_t enable_show(struct device *pdev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	return sprintf(buf, "%d\n", dev->enabled);
+}
+
+static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
+			    const char *buff, size_t size)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct android_usb_function *f;
+	int enabled = 0;
+
+
+	if (!cdev)
+		return -ENODEV;
+
+	mutex_lock(&dev->mutex);
+
+	sscanf(buff, "%d", &enabled);
+	if (enabled && !dev->enabled) {
+		/*
+		 * Update values in composite driver's copy of
+		 * device descriptor.
+		 */
+		cdev->desc.idVendor = device_desc.idVendor;
+		cdev->desc.idProduct = device_desc.idProduct;
+		cdev->desc.bcdDevice = device_desc.bcdDevice;
+		cdev->desc.bDeviceClass = device_desc.bDeviceClass;
+		cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
+		cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
+		list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+			if (f->enable)
+				f->enable(f);
+		}
+		android_enable(dev);
+		dev->enabled = true;
+	} else if (!enabled && dev->enabled) {
+		android_disable(dev);
+		list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+			if (f->disable)
+				f->disable(f);
+		}
+		dev->enabled = false;
+	} else {
+		pr_err("android_usb: already %s\n",
+				dev->enabled ? "enabled" : "disabled");
+	}
+
+	mutex_unlock(&dev->mutex);
+	return size;
+}
+
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct usb_composite_dev *cdev = dev->cdev;
+	char *state = "DISCONNECTED";
+	unsigned long flags;
+
+	if (!cdev)
+		goto out;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		state = "CONFIGURED";
+	else if (dev->connected)
+		state = "CONNECTED";
+	spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+	return sprintf(buf, "%s\n", state);
+}
+
+#define DESCRIPTOR_ATTR(field, format_string)				\
+static ssize_t								\
+field ## _show(struct device *dev, struct device_attribute *attr,	\
+		char *buf)						\
+{									\
+	return sprintf(buf, format_string, device_desc.field);		\
+}									\
+static ssize_t								\
+field ## _store(struct device *dev, struct device_attribute *attr,	\
+		const char *buf, size_t size)				\
+{									\
+	int value;							\
+	if (sscanf(buf, format_string, &value) == 1) {			\
+		device_desc.field = value;				\
+		return size;						\
+	}								\
+	return -1;							\
+}									\
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+#define DESCRIPTOR_STRING_ATTR(field, buffer)				\
+static ssize_t								\
+field ## _show(struct device *dev, struct device_attribute *attr,	\
+		char *buf)						\
+{									\
+	return sprintf(buf, "%s", buffer);				\
+}									\
+static ssize_t								\
+field ## _store(struct device *dev, struct device_attribute *attr,	\
+		const char *buf, size_t size)				\
+{									\
+	if (size >= sizeof(buffer))					\
+		return -EINVAL;						\
+	return strlcpy(buffer, buf, sizeof(buffer));			\
+}									\
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+
+DESCRIPTOR_ATTR(idVendor, "%04x\n")
+DESCRIPTOR_ATTR(idProduct, "%04x\n")
+DESCRIPTOR_ATTR(bcdDevice, "%04x\n")
+DESCRIPTOR_ATTR(bDeviceClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceSubClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceProtocol, "%d\n")
+DESCRIPTOR_STRING_ATTR(iManufacturer, manufacturer_string)
+DESCRIPTOR_STRING_ATTR(iProduct, product_string)
+DESCRIPTOR_STRING_ATTR(iSerial, serial_string)
+
+static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show,
+						 functions_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+	&dev_attr_idVendor,
+	&dev_attr_idProduct,
+	&dev_attr_bcdDevice,
+	&dev_attr_bDeviceClass,
+	&dev_attr_bDeviceSubClass,
+	&dev_attr_bDeviceProtocol,
+	&dev_attr_iManufacturer,
+	&dev_attr_iProduct,
+	&dev_attr_iSerial,
+	&dev_attr_functions,
+	&dev_attr_enable,
+	&dev_attr_state,
+	NULL
+};
+
+/*-------------------------------------------------------------------------*/
+/* Composite driver */
+
+static int android_bind_config(struct usb_configuration *c)
+{
+	struct android_dev *dev = _android_dev;
+	int ret = 0;
+
+	ret = android_bind_enabled_functions(dev, c);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void android_unbind_config(struct usb_configuration *c)
+{
+	struct android_dev *dev = _android_dev;
+
+	android_unbind_enabled_functions(dev, c);
+}
+
+static int android_bind(struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			id, ret;
+
+	/* Save the default handler */
+	dev->setup_complete = cdev->req->complete;
+
+	/*
+	 * Start disconnected. Userspace will connect the gadget once
+	 * it is done configuring the functions.
+	 */
+	usb_gadget_disconnect(gadget);
+
+	ret = android_init_functions(dev->functions, cdev);
+	if (ret)
+		return ret;
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_MANUFACTURER_IDX].id = id;
+	device_desc.iManufacturer = id;
+
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_PRODUCT_IDX].id = id;
+	device_desc.iProduct = id;
+
+	/* Default strings - should be updated by userspace */
+	strncpy(manufacturer_string, "Android", sizeof(manufacturer_string)-1);
+	strncpy(product_string, "Android", sizeof(product_string) - 1);
+	strncpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1);
+
+	id = usb_string_id(cdev);
+	if (id < 0)
+		return id;
+	strings_dev[STRING_SERIAL_IDX].id = id;
+	device_desc.iSerialNumber = id;
+
+	usb_gadget_set_selfpowered(gadget);
+	dev->cdev = cdev;
+
+	return 0;
+}
+
+static int android_usb_unbind(struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+
+	cancel_work_sync(&dev->work);
+	android_cleanup_functions(dev->functions);
+	return 0;
+}
+
+/* HACK: android needs to override setup for accessory to work */
+static int (*composite_setup_func)(struct usb_gadget *gadget, const struct usb_ctrlrequest *c);
+
+static int
+android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
+{
+	struct android_dev		*dev = _android_dev;
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+	struct usb_request		*req = cdev->req;
+	struct android_usb_function	*f;
+	int value = -EOPNOTSUPP;
+	unsigned long flags;
+
+	req->zero = 0;
+	req->length = 0;
+	req->complete = dev->setup_complete;
+	gadget->ep0->driver_data = cdev;
+
+	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+		if (f->ctrlrequest) {
+			value = f->ctrlrequest(f, cdev, c);
+			if (value >= 0)
+				break;
+		}
+	}
+
+	/* Special case the accessory function.
+	 * It needs to handle control requests before it is enabled.
+	 */
+	if (value < 0)
+		value = acc_ctrlrequest(cdev, c);
+
+	if (value < 0)
+		value = composite_setup_func(gadget, c);
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (!dev->connected) {
+		dev->connected = 1;
+		schedule_work(&dev->work);
+	} else if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+						cdev->config) {
+		schedule_work(&dev->work);
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	return value;
+}
+
+static void android_disconnect(struct usb_composite_dev *cdev)
+{
+	struct android_dev *dev = _android_dev;
+
+	/* accessory HID support can be active while the
+	   accessory function is not actually enabled,
+	   so we need to inform it when we are disconnected.
+	 */
+	acc_disconnect();
+
+	dev->connected = 0;
+	schedule_work(&dev->work);
+}
+
+static struct usb_composite_driver android_usb_driver = {
+	.name		= "android_usb",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.bind		= android_bind,
+	.unbind		= android_usb_unbind,
+	.disconnect	= android_disconnect,
+	.max_speed	= USB_SPEED_HIGH,
+};
+
+static int android_create_device(struct android_dev *dev)
+{
+	struct device_attribute **attrs = android_usb_attributes;
+	struct device_attribute *attr;
+	int err;
+
+	dev->dev = device_create(android_class, NULL,
+					MKDEV(0, 0), NULL, "android0");
+	if (IS_ERR(dev->dev))
+		return PTR_ERR(dev->dev);
+
+	dev_set_drvdata(dev->dev, dev);
+
+	while ((attr = *attrs++)) {
+		err = device_create_file(dev->dev, attr);
+		if (err) {
+			device_destroy(android_class, dev->dev->devt);
+			return err;
+		}
+	}
+	return 0;
+}
+
+
+static int __init init(void)
+{
+	struct android_dev *dev;
+	int err;
+
+	android_class = class_create(THIS_MODULE, "android_usb");
+	if (IS_ERR(android_class))
+		return PTR_ERR(android_class);
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		err = -ENOMEM;
+		goto err_dev;
+	}
+
+	dev->disable_depth = 1;
+	dev->functions = supported_functions;
+	INIT_LIST_HEAD(&dev->enabled_functions);
+	INIT_WORK(&dev->work, android_work);
+	mutex_init(&dev->mutex);
+
+	err = android_create_device(dev);
+	if (err) {
+		pr_err("%s: failed to create android device %d", __func__, err);
+		goto err_create;
+	}
+
+	_android_dev = dev;
+
+	err = usb_composite_probe(&android_usb_driver);
+	if (err) {
+		pr_err("%s: failed to probe driver %d", __func__, err);
+		goto err_probe;
+	}
+
+	/* HACK: exchange composite's setup with ours */
+	composite_setup_func = android_usb_driver.gadget_driver.setup;
+	android_usb_driver.gadget_driver.setup = android_setup;
+
+	return 0;
+
+err_probe:
+	device_destroy(android_class, dev->dev->devt);
+err_create:
+	kfree(dev);
+err_dev:
+	class_destroy(android_class);
+	return err;
+}
+late_initcall(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&android_usb_driver);
+	class_destroy(android_class);
+	kfree(_android_dev);
+	_android_dev = NULL;
+}
+module_exit(cleanup);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a660716..6a87860 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -812,7 +812,7 @@
 }
 EXPORT_SYMBOL_GPL(usb_add_config);
 
-static void remove_config(struct usb_composite_dev *cdev,
+static void unbind_config(struct usb_composite_dev *cdev,
 			      struct usb_configuration *config)
 {
 	while (!list_empty(&config->functions)) {
@@ -827,7 +827,6 @@
 			/* may free memory for "f" */
 		}
 	}
-	list_del(&config->list);
 	if (config->unbind) {
 		DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
 		config->unbind(config);
@@ -854,9 +853,11 @@
 	if (cdev->config == config)
 		reset_config(cdev);
 
+	list_del(&config->list);
+
 	spin_unlock_irqrestore(&cdev->lock, flags);
 
-	remove_config(cdev, config);
+	unbind_config(cdev, config);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1525,7 +1526,8 @@
 		struct usb_configuration	*c;
 		c = list_first_entry(&cdev->configs,
 				struct usb_configuration, list);
-		remove_config(cdev, c);
+		list_del(&c->list);
+		unbind_config(cdev, c);
 	}
 	if (cdev->driver->unbind && unbind_driver)
 		cdev->driver->unbind(cdev);
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index a777f7b..3f10698 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -188,6 +188,8 @@
 	ep->address = desc->bEndpointAddress;
 	return 1;
 }
+EXPORT_SYMBOL_GPL(ep_matches);
+
 
 static struct usb_ep *
 find_ep (struct usb_gadget *gadget, const char *name)
@@ -200,6 +202,7 @@
 	}
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(find_ep);
 
 /**
  * usb_ep_autoconfig_ss() - choose an endpoint matching the ep
@@ -253,7 +256,11 @@
 {
 	struct usb_ep	*ep;
 	u8		type;
+#ifdef CONFIG_USB_DWC3_GADGET
+	u8	       addr;
 
+	addr = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+#endif
 	type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
 
 	/* First, apply chip-specific "best usage" knowledge.
@@ -301,10 +308,45 @@
 		if (ep && ep_matches(gadget, ep, desc, ep_comp))
 			goto found_ep;
 #endif
+
+#ifdef CONFIG_USB_DWC3_GADGET
+	} else if (gadget_is_middwc3tng(gadget)) {
+		if (addr == 0x1) {
+			/* statically assigned ebc-ep1 in/out  */
+			if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+			    & USB_DIR_IN)
+				ep = find_ep(gadget, "ep1in");
+			else
+				ep = NULL;
+		} else if (addr == 0x8) {
+			/* statically assigned ebc-ep8 in/out */
+			if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+			    & USB_DIR_IN)
+				ep = find_ep (gadget, "ep8in");
+			else
+				ep = find_ep (gadget, "ep8out");
+		} else
+			ep = NULL;
+		if (ep && ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+#endif
+
 	}
 
 	/* Second, look at endpoints until an unclaimed one looks usable */
 	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+
+#ifdef CONFIG_USB_DWC3_GADGET
+		/* ep1in and ep8in are reserved for DWC3 device controller */
+		if (!strncmp(ep->name, "ep1in", 5) ||
+		    !strncmp(ep->name, "ep8in", 5))
+			continue;
+		if (gadget_is_middwc3tng(gadget))
+			/* ep1out and ep8out are also reserved */
+			if (!strncmp(ep->name, "ep1out", 6) ||
+			    !strncmp(ep->name, "ep8out", 6))
+				continue;
+#endif
 		if (ep_matches(gadget, ep, desc, ep_comp))
 			goto found_ep;
 	}
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
new file mode 100644
index 0000000..0237f1e
--- /dev/null
+++ b/drivers/usb/gadget/f_accessory.c
@@ -0,0 +1,1215 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#define BULK_BUFFER_SIZE    16384
+#define ACC_STRING_SIZE     256
+
+#define PROTOCOL_VERSION    2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+	struct list_head	list;
+	struct hid_device *hid;
+	struct acc_dev *dev;
+	/* accessory defined ID */
+	int id;
+	/* HID report descriptor */
+	u8 *report_desc;
+	/* length of HID report descriptor */
+	int report_desc_len;
+	/* number of bytes of report_desc we have received so far */
+	int report_desc_offset;
+};
+
+struct acc_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	/* set to 1 when we connect */
+	int online:1;
+	/* Set to 1 when we disconnect.
+	 * Not cleared until our file is closed.
+	 */
+	int disconnected:1;
+
+	/* strings sent by the host */
+	char manufacturer[ACC_STRING_SIZE];
+	char model[ACC_STRING_SIZE];
+	char description[ACC_STRING_SIZE];
+	char version[ACC_STRING_SIZE];
+	char uri[ACC_STRING_SIZE];
+	char serial[ACC_STRING_SIZE];
+
+	/* for acc_complete_set_string */
+	int string_index;
+
+	/* set to 1 if we have a pending start request */
+	int start_requested;
+
+	int audio_mode;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+
+	struct list_head tx_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* delayed work for handling ACCESSORY_START */
+	struct delayed_work start_work;
+
+	/* worker for registering and unregistering hid devices */
+	struct work_struct hid_work;
+
+	/* list of active HID devices */
+	struct list_head	hid_list;
+
+	/* list of new HID devices to register */
+	struct list_head	new_hid_list;
+
+	/* list of dead HID devices to unregister */
+	struct list_head	dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_out_desc,
+	NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+	[INTERFACE_STRING_INDEX].s	= "Android Accessory Interface",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+	&acc_string_table,
+	NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+	return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+	dev->online = 0;
+	dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	if (req->status == -ESHUTDOWN) {
+		pr_debug("acc_complete_in set disconnected");
+		acc_set_disconnected(dev);
+	}
+
+	req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	dev->rx_done = 1;
+	if (req->status == -ESHUTDOWN) {
+		pr_debug("acc_complete_out set disconnected");
+		acc_set_disconnected(dev);
+	}
+
+	wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev	*dev = ep->driver_data;
+	char *string_dest = NULL;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_string, err %d\n", req->status);
+		return;
+	}
+
+	switch (dev->string_index) {
+	case ACCESSORY_STRING_MANUFACTURER:
+		string_dest = dev->manufacturer;
+		break;
+	case ACCESSORY_STRING_MODEL:
+		string_dest = dev->model;
+		break;
+	case ACCESSORY_STRING_DESCRIPTION:
+		string_dest = dev->description;
+		break;
+	case ACCESSORY_STRING_VERSION:
+		string_dest = dev->version;
+		break;
+	case ACCESSORY_STRING_URI:
+		string_dest = dev->uri;
+		break;
+	case ACCESSORY_STRING_SERIAL:
+		string_dest = dev->serial;
+		break;
+	}
+	if (string_dest) {
+		unsigned long flags;
+
+		if (length >= ACC_STRING_SIZE)
+			length = ACC_STRING_SIZE - 1;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		memcpy(string_dest, req->buf, length);
+		/* ensure zero termination */
+		string_dest[length] = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	} else {
+		pr_err("unknown accessory string index %d\n",
+			dev->string_index);
+	}
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	struct acc_dev *dev = hid->dev;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_hid_report_desc, err %d\n",
+			req->status);
+		return;
+	}
+
+	memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+	hid->report_desc_offset += length;
+	if (hid->report_desc_offset == hid->report_desc_len) {
+		/* After we have received the entire report descriptor
+		 * we schedule work to initialize the HID device
+		 */
+		schedule_work(&dev->hid_work);
+	}
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+		return;
+	}
+
+	hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+	struct acc_hid_dev *hdev = hid->driver_data;
+
+	hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+	return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+	.parse = acc_hid_parse,
+	.start = acc_hid_start,
+	.stop = acc_hid_stop,
+	.open = acc_hid_open,
+	.close = acc_hid_close,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+		int id, int desc_len)
+{
+	struct acc_hid_dev *hdev;
+
+	hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+	if (!hdev)
+		return NULL;
+	hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+	if (!hdev->report_desc) {
+		kfree(hdev);
+		return NULL;
+	}
+	hdev->dev = dev;
+	hdev->id = id;
+	hdev->report_desc_len = desc_len;
+
+	return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+	struct acc_hid_dev *hid;
+
+	list_for_each_entry(hid, list, list) {
+		if (hid->id == id)
+			return hid;
+	}
+	return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	/* report descriptor length must be > 0 */
+	if (desc_length <= 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	/* replace HID if one already exists with this ID */
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (hid)
+		list_move(&hid->list, &dev->dead_hid_list);
+
+	hid = acc_hid_new(dev, id, desc_length);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -ENOMEM;
+	}
+
+	list_add(&hid->list, &dev->new_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* schedule work to register the HID device */
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -EINVAL;
+	}
+
+	list_move(&hid->list, &dev->dead_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_in;
+		req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_out;
+		dev->rx_req[i] = req;
+	}
+
+	return 0;
+
+fail:
+	pr_err("acc_bind() could not allocate requests\n");
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+	return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req;
+	ssize_t r = count;
+	unsigned xfer;
+	int ret = 0;
+
+	pr_debug("acc_read(%zu)\n", count);
+
+	if (dev->disconnected) {
+		pr_debug("acc_read disconnected");
+		return -ENODEV;
+	}
+
+	if (count > BULK_BUFFER_SIZE)
+		count = BULK_BUFFER_SIZE;
+
+	/* we will block until we're online */
+	pr_debug("acc_read: waiting for online\n");
+	ret = wait_event_interruptible(dev->read_wq, dev->online);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+
+	if (dev->rx_done) {
+		// last req cancelled. try to get it.
+		req = dev->rx_req[0];
+		goto copy_data;
+	}
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		pr_debug("rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		ret = usb_ep_dequeue(dev->ep_out, req);
+		if (ret != 0) {
+			// cancel failed. There can be a data already received.
+			// it will be retrieved in the next read.
+			pr_debug("acc_read: cancelling failed %d", ret);
+		}
+		goto done;
+	}
+
+copy_data:
+	dev->rx_done = 0;
+	if (dev->online) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		pr_debug("rx %p %u\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	pr_debug("acc_read returning %zd\n", r);
+	return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req = 0;
+	ssize_t r = count;
+	unsigned xfer;
+	int ret;
+
+	pr_debug("acc_write(%zu)\n", count);
+
+	if (!dev->online || dev->disconnected) {
+		pr_debug("acc_write disconnected or not online");
+		return -ENODEV;
+	}
+
+	while (count > 0) {
+		if (!dev->online) {
+			pr_debug("acc_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > BULK_BUFFER_SIZE) {
+			xfer = BULK_BUFFER_SIZE;
+			/* ZLP, They will be more TX requests so not yet. */
+			req->zero = 0;
+		} else {
+			xfer = count;
+			/* If the data length is a multple of the
+			 * maxpacket size then send a zero length packet(ZLP).
+			*/
+			req->zero = ((xfer % dev->ep_in->maxpacket) == 0);
+		}
+		if (copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			pr_debug("acc_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		req_put(dev, &dev->tx_idle, req);
+
+	pr_debug("acc_write returning %zd\n", r);
+	return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct acc_dev *dev = fp->private_data;
+	char *src = NULL;
+	int ret;
+
+	switch (code) {
+	case ACCESSORY_GET_STRING_MANUFACTURER:
+		src = dev->manufacturer;
+		break;
+	case ACCESSORY_GET_STRING_MODEL:
+		src = dev->model;
+		break;
+	case ACCESSORY_GET_STRING_DESCRIPTION:
+		src = dev->description;
+		break;
+	case ACCESSORY_GET_STRING_VERSION:
+		src = dev->version;
+		break;
+	case ACCESSORY_GET_STRING_URI:
+		src = dev->uri;
+		break;
+	case ACCESSORY_GET_STRING_SERIAL:
+		src = dev->serial;
+		break;
+	case ACCESSORY_IS_START_REQUESTED:
+		return dev->start_requested;
+	case ACCESSORY_GET_AUDIO_MODE:
+		return dev->audio_mode;
+	}
+	if (!src)
+		return -EINVAL;
+
+	ret = strlen(src) + 1;
+	if (copy_to_user((void __user *)value, src, ret))
+		ret = -EFAULT;
+	return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_open\n");
+	if (atomic_xchg(&_acc_dev->open_excl, 1))
+		return -EBUSY;
+
+	_acc_dev->disconnected = 0;
+	fp->private_data = _acc_dev;
+	return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_release\n");
+
+	WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+	_acc_dev->disconnected = 0;
+	return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+	.owner = THIS_MODULE,
+	.read = acc_read,
+	.write = acc_write,
+	.unlocked_ioctl = acc_ioctl,
+	.open = acc_open,
+	.release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+		const struct hid_device_id *id)
+{
+	int ret;
+
+	ret = hid_parse(hdev);
+	if (ret)
+		return ret;
+	return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_accessory",
+	.fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+	{ HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+	{ }
+};
+
+static struct hid_driver acc_hid_driver = {
+	.name = "USB accessory",
+	.id_table = acc_hid_table,
+	.probe = acc_hid_probe,
+};
+
+static int acc_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct acc_dev	*dev = _acc_dev;
+	int	value = -EOPNOTSUPP;
+	struct acc_hid_dev *hid;
+	int offset;
+	u8 b_requestType = ctrl->bRequestType;
+	u8 b_request = ctrl->bRequest;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long flags;
+
+/*
+	printk(KERN_INFO "acc_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			b_requestType, b_request,
+			w_value, w_index, w_length);
+*/
+
+	if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_START) {
+			dev->start_requested = 1;
+			schedule_delayed_work(
+				&dev->start_work, msecs_to_jiffies(10));
+			value = 0;
+		} else if (b_request == ACCESSORY_SEND_STRING) {
+			dev->string_index = w_index;
+			cdev->gadget->ep0->driver_data = dev;
+			cdev->req->complete = acc_complete_set_string;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+				w_index == 0 && w_length == 0) {
+			dev->audio_mode = w_value;
+			value = 0;
+		} else if (b_request == ACCESSORY_REGISTER_HID) {
+			value = acc_register_hid(dev, w_value, w_index);
+		} else if (b_request == ACCESSORY_UNREGISTER_HID) {
+			value = acc_unregister_hid(dev, w_value);
+		} else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->new_hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			offset = w_index;
+			if (offset != hid->report_desc_offset
+				|| offset + w_length > hid->report_desc_len) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_set_hid_report_desc;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_send_hid_event;
+			value = w_length;
+		}
+	} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_GET_PROTOCOL) {
+			*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+			value = sizeof(u16);
+
+			/* clear any string left over from a previous session */
+			memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+			memset(dev->model, 0, sizeof(dev->model));
+			memset(dev->description, 0, sizeof(dev->description));
+			memset(dev->version, 0, sizeof(dev->version));
+			memset(dev->uri, 0, sizeof(dev->uri));
+			memset(dev->serial, 0, sizeof(dev->serial));
+			dev->start_requested = 0;
+			dev->audio_mode = 0;
+		}
+	}
+
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "%s setup response queue error\n",
+				__func__);
+	}
+
+err:
+	if (value == -EOPNOTSUPP)
+		VDBG(cdev,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	return value;
+}
+
+static int
+acc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct acc_dev	*dev = func_to_dev(f);
+	int			id;
+	int			ret;
+
+	DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+	ret = hid_register_driver(&acc_hid_driver);
+	if (ret)
+		return ret;
+
+	dev->start_requested = 0;
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	acc_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+			&acc_fullspeed_out_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		acc_highspeed_in_desc.bEndpointAddress =
+			acc_fullspeed_in_desc.bEndpointAddress;
+		acc_highspeed_out_desc.bEndpointAddress =
+			acc_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+	struct acc_hid_dev *hid;
+	struct list_head *entry, *temp;
+	unsigned long flags;
+
+	/* do nothing if usb accessory device doesn't exist */
+	if (!dev)
+		return;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(entry, temp, &dev->hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+	hid_unregister_driver(&acc_hid_driver);
+	kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_request *req;
+	int i;
+
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+
+	acc_hid_unbind(dev);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=START", NULL };
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+	struct hid_device *hid;
+	int ret;
+
+	hid = hid_allocate_device();
+	if (IS_ERR(hid))
+		return PTR_ERR(hid);
+
+	hid->ll_driver = &acc_hid_ll_driver;
+	hid->dev.parent = acc_device.this_device;
+
+	hid->bus = BUS_USB;
+	hid->vendor = HID_ANY_ID;
+	hid->product = HID_ANY_ID;
+	hid->driver_data = hdev;
+	ret = hid_add_device(hid);
+	if (ret) {
+		pr_err("can't add hid device: %d\n", ret);
+		hid_destroy_device(hid);
+		return ret;
+	}
+
+	hdev->hid = hid;
+	return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+	kfree(hid->report_desc);
+	kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+	struct acc_dev *dev = _acc_dev;
+	struct list_head	*entry, *temp;
+	struct acc_hid_dev *hid;
+	struct list_head	new_list, dead_list;
+	unsigned long flags;
+
+	INIT_LIST_HEAD(&new_list);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* copy hids that are ready for initialization to new_list */
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (hid->report_desc_offset == hid->report_desc_len)
+			list_move(&hid->list, &new_list);
+	}
+
+	if (list_empty(&dev->dead_hid_list)) {
+		INIT_LIST_HEAD(&dead_list);
+	} else {
+		/* move all of dev->dead_hid_list to dead_list */
+		dead_list.prev = dev->dead_hid_list.prev;
+		dead_list.next = dev->dead_hid_list.next;
+		dead_list.next->prev = &dead_list;
+		dead_list.prev->next = &dead_list;
+		INIT_LIST_HEAD(&dev->dead_hid_list);
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* register new HID devices */
+	list_for_each_safe(entry, temp, &new_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (acc_hid_init(hid)) {
+			pr_err("can't add HID device %p\n", hid);
+			acc_hid_delete(hid);
+		} else {
+			spin_lock_irqsave(&dev->lock, flags);
+			list_move(&hid->list, &dev->hid_list);
+			spin_unlock_irqrestore(&dev->lock, flags);
+		}
+	}
+
+	/* remove dead HID devices */
+	list_for_each_safe(entry, temp, &dead_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		if (hid->hid)
+			hid_destroy_device(hid->hid);
+		acc_hid_delete(hid);
+	}
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	dev->online = 1;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "acc_function_disable\n");
+	acc_set_disconnected(dev);
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_bind_config(struct usb_configuration *c)
+{
+	struct acc_dev *dev = _acc_dev;
+	int ret;
+
+	printk(KERN_INFO "acc_bind_config\n");
+
+	/* allocate a string ID for our interface */
+	if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+		ret = usb_string_id(c->cdev);
+		if (ret < 0)
+			return ret;
+		acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+		acc_interface_desc.iInterface = ret;
+	}
+
+	dev->cdev = c->cdev;
+	dev->function.name = "accessory";
+	dev->function.strings = acc_strings,
+	dev->function.fs_descriptors = fs_acc_descs;
+	dev->function.hs_descriptors = hs_acc_descs;
+	dev->function.bind = acc_function_bind;
+	dev->function.unbind = acc_function_unbind;
+	dev->function.set_alt = acc_function_set_alt;
+	dev->function.disable = acc_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int acc_setup(void)
+{
+	struct acc_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	atomic_set(&dev->open_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->hid_list);
+	INIT_LIST_HEAD(&dev->new_hid_list);
+	INIT_LIST_HEAD(&dev->dead_hid_list);
+	INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+	INIT_WORK(&dev->hid_work, acc_hid_work);
+
+	/* _acc_dev must be set before calling usb_gadget_register_driver */
+	_acc_dev = dev;
+
+	ret = misc_register(&acc_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	pr_err("USB accessory gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void acc_disconnect(void)
+{
+	/* unregister all HID devices if USB is disconnected */
+	kill_all_hid_devices(_acc_dev);
+}
+
+static void acc_cleanup(void)
+{
+	misc_deregister(&acc_device);
+	kfree(_acc_dev);
+	_acc_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
new file mode 100644
index 0000000..07a1a3c
--- /dev/null
+++ b/drivers/usb/gadget/f_audio_source.c
@@ -0,0 +1,834 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 256
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE	0
+#define AUDIO_AS_INTERFACE	1
+#define AUDIO_NUM_INTERFACES	2
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor audio_ac_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH	UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+	+ UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+	+ UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 audio_ac_header_desc = {
+	.bLength =		UAC_DT_AC_HEADER_LENGTH,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_HEADER,
+	.bcdADC =		__constant_cpu_to_le16(0x0100),
+	.wTotalLength =		__constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+	.bInCollection =	AUDIO_NUM_INTERFACES,
+	.baInterfaceNr = {
+		[0] =		AUDIO_AC_INTERFACE,
+		[1] =		AUDIO_AS_INTERFACE,
+	}
+};
+
+#define INPUT_TERMINAL_ID	1
+static struct uac_input_terminal_descriptor audio_input_terminal_desc = {
+	.bLength =		UAC_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_INPUT_TERMINAL,
+	.bTerminalID =		INPUT_TERMINAL_ID,
+	.wTerminalType =	UAC_INPUT_TERMINAL_MICROPHONE,
+	.bAssocTerminal =	0,
+	.wChannelConfig =	0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID		2
+static struct uac_feature_unit_descriptor_0 audio_feature_unit_desc = {
+	.bLength		= UAC_DT_FEATURE_UNIT_SIZE(0),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_FEATURE_UNIT,
+	.bUnitID		= FEATURE_UNIT_ID,
+	.bSourceID		= INPUT_TERMINAL_ID,
+	.bControlSize		= 2,
+};
+
+#define OUTPUT_TERMINAL_ID	3
+static struct uac1_output_terminal_descriptor audio_output_terminal_desc = {
+	.bLength		= UAC_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_OUTPUT_TERMINAL,
+	.bTerminalID		= OUTPUT_TERMINAL_ID,
+	.wTerminalType		= UAC_TERMINAL_STREAMING,
+	.bAssocTerminal		= FEATURE_UNIT_ID,
+	.bSourceID		= FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor audio_as_interface_alt_0_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor audio_as_interface_alt_1_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor audio_as_header_desc = {
+	.bLength =		UAC_DT_AS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_AS_GENERAL,
+	.bTerminalLink =	INPUT_TERMINAL_ID,
+	.bDelay =		1,
+	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 audio_as_type_i_desc = {
+	.bLength =		UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_FORMAT_TYPE,
+	.bFormatType =		UAC_FORMAT_TYPE_I,
+	.bSubframeSize =	2,
+	.bBitResolution =	16,
+	.bSamFreqType =		1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor audio_hs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor audio_fs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor audio_as_iso_in_desc = {
+	.bLength =		UAC_ISO_ENDPOINT_DESC_SIZE,
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	UAC_EP_GENERAL,
+	.bmAttributes =		1,
+	.bLockDelayUnits =	1,
+	.wLockDelay =		__constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+	(struct usb_descriptor_header *)&audio_ac_interface_desc,
+	(struct usb_descriptor_header *)&audio_ac_header_desc,
+
+	(struct usb_descriptor_header *)&audio_input_terminal_desc,
+	(struct usb_descriptor_header *)&audio_output_terminal_desc,
+	(struct usb_descriptor_header *)&audio_feature_unit_desc,
+
+	(struct usb_descriptor_header *)&audio_as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&audio_as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&audio_as_header_desc,
+
+	(struct usb_descriptor_header *)&audio_as_type_i_desc,
+
+	(struct usb_descriptor_header *)&audio_hs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&audio_as_iso_in_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+	(struct usb_descriptor_header *)&audio_ac_interface_desc,
+	(struct usb_descriptor_header *)&audio_ac_header_desc,
+
+	(struct usb_descriptor_header *)&audio_input_terminal_desc,
+	(struct usb_descriptor_header *)&audio_output_terminal_desc,
+	(struct usb_descriptor_header *)&audio_feature_unit_desc,
+
+	(struct usb_descriptor_header *)&audio_as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&audio_as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&audio_as_header_desc,
+
+	(struct usb_descriptor_header *)&audio_as_type_i_desc,
+
+	(struct usb_descriptor_header *)&audio_fs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&audio_as_iso_in_desc,
+	NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+	.info =			SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_BATCH |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE,
+	.channels_min		= 2,
+	.channels_max		= 2,
+	.rate_min		= SAMPLE_RATE,
+	.rate_max		= SAMPLE_RATE,
+
+	.buffer_bytes_max =	1024 * 1024,
+	.period_bytes_min =	64,
+	.period_bytes_max =	512 * 1024,
+	.periods_min =		2,
+	.periods_max =		1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+	int	card;
+	int	device;
+};
+
+struct audio_dev {
+	struct usb_function		func;
+	struct snd_card			*card;
+	struct snd_pcm			*pcm;
+	struct snd_pcm_substream *substream;
+
+	struct list_head		idle_reqs;
+	struct usb_ep			*in_ep;
+
+	spinlock_t			lock;
+
+	/* beginning, end and current position in our buffer */
+	void				*buffer_start;
+	void				*buffer_end;
+	void				*buffer_pos;
+
+	/* byte size of a "period" */
+	unsigned int			period;
+	/* bytes sent since last call to snd_pcm_period_elapsed */
+	unsigned int			period_offset;
+	/* time we started playing */
+	ktime_t				start_time;
+	/* number of frames sent since start_time */
+	s64				frames_sent;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+	return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+	req->length = buffer_size;
+	return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	list_add_tail(&req->list, &audio->idle_reqs);
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	if (list_empty(&audio->idle_reqs)) {
+		req = 0;
+	} else {
+		req = list_first_entry(&audio->idle_reqs, struct usb_request,
+				list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&audio->lock, flags);
+	return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+	struct snd_pcm_runtime *runtime;
+	struct usb_request *req;
+	int length, length1, length2, ret;
+	s64 msecs;
+	s64 frames;
+	ktime_t now;
+
+	/* audio->substream will be null if we have been closed */
+	if (!audio->substream)
+		return;
+	/* audio->buffer_pos will be null if we have been stopped */
+	if (!audio->buffer_pos)
+		return;
+
+	runtime = audio->substream->runtime;
+
+	/* compute number of frames to send */
+	now = ktime_get();
+	msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time);
+	do_div(msecs, 1000000);
+	frames = msecs * SAMPLE_RATE;
+	do_div(frames, 1000);
+
+	/* Readjust our frames_sent if we fall too far behind.
+	 * If we get too far behind it is better to drop some frames than
+	 * to keep sending data too fast in an attempt to catch up.
+	 */
+	if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+		audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+	frames -= audio->frames_sent;
+
+	/* We need to send something to keep the pipeline going */
+	if (frames <= 0)
+		frames = FRAMES_PER_MSEC;
+
+	while (frames > 0) {
+		req = audio_req_get(audio);
+		if (!req)
+			break;
+
+		length = frames_to_bytes(runtime, frames);
+		if (length > IN_EP_MAX_PACKET_SIZE)
+			length = IN_EP_MAX_PACKET_SIZE;
+
+		if (audio->buffer_pos + length > audio->buffer_end)
+			length1 = audio->buffer_end - audio->buffer_pos;
+		else
+			length1 = length;
+		memcpy(req->buf, audio->buffer_pos, length1);
+		if (length1 < length) {
+			/* Wrap around and copy remaining length
+			 * at beginning of buffer.
+			 */
+			length2 = length - length1;
+			memcpy(req->buf + length1, audio->buffer_start,
+					length2);
+			audio->buffer_pos = audio->buffer_start + length2;
+		} else {
+			audio->buffer_pos += length1;
+			if (audio->buffer_pos >= audio->buffer_end)
+				audio->buffer_pos = audio->buffer_start;
+		}
+
+		req->length = length;
+		ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+		if (ret < 0) {
+			pr_err("usb_ep_queue failed ret: %d\n", ret);
+			audio_req_put(audio, req);
+			break;
+		}
+
+		frames -= bytes_to_frames(runtime, length);
+		audio->frames_sent += bytes_to_frames(runtime, length);
+	}
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	/* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct audio_dev *audio = req->context;
+
+	pr_debug("audio_data_complete req->status %d req->actual %d\n",
+		req->status, req->actual);
+
+	audio_req_put(audio, req);
+
+	if (!audio->buffer_start || req->status)
+		return;
+
+	audio->period_offset += req->actual;
+	if (audio->period_offset >= audio->period) {
+		snd_pcm_period_elapsed(audio->substream);
+		audio->period_offset = 0;
+	}
+	audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	int value = -EOPNOTSUPP;
+	u16 ep = le16_to_cpu(ctrl->wIndex);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_SET_CUR:
+	case UAC_SET_MIN:
+	case UAC_SET_MAX:
+	case UAC_SET_RES:
+		value = len;
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int value = -EOPNOTSUPP;
+	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u8 *buf = cdev->req->buf;
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+		switch (ctrl->bRequest) {
+		case UAC_GET_CUR:
+		case UAC_GET_MIN:
+		case UAC_GET_MAX:
+		case UAC_GET_RES:
+			/* return our sample rate */
+			buf[0] = (u8)SAMPLE_RATE;
+			buf[1] = (u8)(SAMPLE_RATE >> 8);
+			buf[2] = (u8)(SAMPLE_RATE >> 16);
+			value = 3;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request *req = cdev->req;
+	int value = -EOPNOTSUPP;
+	u16 w_index = le16_to_cpu(ctrl->wIndex);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u16 w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything; interface
+	 * activation uses set_alt().
+	 */
+	switch (ctrl->bRequestType) {
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_set_endpoint_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_get_endpoint_req(f, ctrl);
+		break;
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		req->complete = audio_control_complete;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("audio response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+	if (ret)
+		return ret;
+
+	usb_ep_enable(audio->in_ep);
+	return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+	struct audio_dev	*audio = func_to_audio(f);
+
+	pr_debug("audio_disable\n");
+	usb_ep_disable(audio->in_ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+	u8 *sam_freq;
+	int rate;
+
+	/* Set channel numbers */
+	audio_input_terminal_desc.bNrChannels = 2;
+	audio_as_type_i_desc.bNrChannels = 2;
+
+	/* Set sample rates */
+	rate = SAMPLE_RATE;
+	sam_freq = audio_as_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
+}
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct audio_dev *audio = func_to_audio(f);
+	int status;
+	struct usb_ep *ep;
+	struct usb_request *req;
+	int i;
+
+	audio_build_desc(audio);
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	audio_ac_interface_desc.bInterfaceNumber = status;
+
+	/* AUDIO_AC_INTERFACE */
+	audio_ac_header_desc.baInterfaceNr[0] = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	audio_as_interface_alt_0_desc.bInterfaceNumber = status;
+	audio_as_interface_alt_1_desc.bInterfaceNumber = status;
+
+	/* AUDIO_AS_INTERFACE */
+	audio_ac_header_desc.baInterfaceNr[1] = status;
+
+	status = -ENODEV;
+
+	/* allocate our endpoint */
+	ep = usb_ep_autoconfig(cdev->gadget, &audio_fs_as_in_ep_desc);
+	if (!ep)
+		goto fail;
+	audio->in_ep = ep;
+	ep->driver_data = audio; /* claim */
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		audio_hs_as_in_ep_desc.bEndpointAddress =
+			audio_fs_as_in_ep_desc.bEndpointAddress;
+
+	f->fs_descriptors = fs_audio_desc;
+	f->hs_descriptors = hs_audio_desc;
+
+	for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+		req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+		if (req) {
+			req->context = audio;
+			req->complete = audio_data_complete;
+			audio_req_put(audio, req);
+		} else
+			status = -ENOMEM;
+	}
+
+fail:
+	return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_request *req;
+
+	while ((req = audio_req_get(audio)))
+		audio_request_free(req, audio->in_ep);
+
+	snd_card_free_when_closed(audio->card);
+	audio->card = NULL;
+	audio->pcm = NULL;
+	audio->substream = NULL;
+	audio->in_ep = NULL;
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+	audio->start_time = ktime_get();
+	audio->frames_sent = 0;
+	audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	audio->buffer_start = 0;
+	audio->buffer_end = 0;
+	audio->buffer_pos = 0;
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = substream->private_data;
+
+	runtime->private_data = audio;
+	runtime->hw = audio_hw_info;
+	snd_pcm_limit_hw_rates(runtime);
+	runtime->hw.channels_max = 2;
+
+	audio->substream = substream;
+	return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct audio_dev *audio = substream->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	audio->substream = NULL;
+	spin_unlock_irqrestore(&audio->lock, flags);
+
+	return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	unsigned int channels = params_channels(params);
+	unsigned int rate = params_rate(params);
+
+	if (rate != SAMPLE_RATE)
+		return -EINVAL;
+	if (channels != 2)
+		return -EINVAL;
+
+	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+		params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+
+	audio->period = snd_pcm_lib_period_bytes(substream);
+	audio->period_offset = 0;
+	audio->buffer_start = runtime->dma_area;
+	audio->buffer_end = audio->buffer_start
+		+ snd_pcm_lib_buffer_bytes(substream);
+	audio->buffer_pos = audio->buffer_start;
+
+	return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+	ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+	/* return offset of next frame to fill in our buffer */
+	return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct audio_dev *audio = substream->runtime->private_data;
+	int ret = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		audio_pcm_playback_start(audio);
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		audio_pcm_playback_stop(audio);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct audio_dev _audio_dev = {
+	.func = {
+		.name = "audio_source",
+		.bind = audio_bind,
+		.unbind = audio_unbind,
+		.set_alt = audio_set_alt,
+		.setup = audio_setup,
+		.disable = audio_disable,
+	},
+	.lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+	.idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+	.open		= audio_pcm_open,
+	.close		= audio_pcm_close,
+	.ioctl		= snd_pcm_lib_ioctl,
+	.hw_params	= audio_pcm_hw_params,
+	.hw_free	= audio_pcm_hw_free,
+	.prepare	= audio_pcm_prepare,
+	.trigger	= audio_pcm_playback_trigger,
+	.pointer	= audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+		struct audio_source_config *config)
+{
+	struct audio_dev *audio;
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	int err;
+
+	config->card = -1;
+	config->device = -1;
+
+	audio = &_audio_dev;
+
+	err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+			THIS_MODULE, 0, &card);
+	if (err)
+		return err;
+
+	snd_card_set_dev(card, &c->cdev->gadget->dev);
+
+	err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+	if (err)
+		goto pcm_fail;
+	pcm->private_data = audio;
+	pcm->info_flags = 0;
+	audio->pcm = pcm;
+
+	strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+				NULL, 0, 64 * 1024);
+
+	strlcpy(card->driver, "audio_source", sizeof(card->driver));
+	strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+	strlcpy(card->longname, "USB accessory audio source",
+		sizeof(card->longname));
+
+	err = snd_card_register(card);
+	if (err)
+		goto register_fail;
+
+	err = usb_add_function(c, &audio->func);
+	if (err)
+		goto add_fail;
+
+	config->card = pcm->card->number;
+	config->device = pcm->device;
+	audio->card = card;
+	return 0;
+
+add_fail:
+register_fail:
+pcm_fail:
+	snd_card_free(audio->card);
+	return err;
+}
diff --git a/drivers/usb/gadget/f_dvc_dfx.c b/drivers/usb/gadget/f_dvc_dfx.c
new file mode 100644
index 0000000..6137e11
--- /dev/null
+++ b/drivers/usb/gadget/f_dvc_dfx.c
@@ -0,0 +1,922 @@
+/*
+ * Gadget Driver for Android DvC.Dfx Debug Capability
+ *
+ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/usb/debug.h>
+#include <linux/sdm.h>
+#include <asm/intel_soc_debug.h>
+
+#define DFX_RX_REQ_MAX 1
+#define DFX_TX_REQ_MAX 2
+#define DFX_BULK_REQ_SIZE 64
+
+#define CONFIG_BOARD_MRFLD_VV
+
+struct dvc_dfx_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+	u8	ctrl_id, data_id;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	int transfering;
+	int online;
+	int online_ctrl;
+	int online_data;
+	int error;
+
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+
+	struct usb_request *rx_req[DFX_RX_REQ_MAX];
+
+	struct list_head tx_idle;
+	struct list_head tx_xfer;
+};
+
+static struct usb_interface_assoc_descriptor dfx_iad_desc = {
+	.bLength		= sizeof(dfx_iad_desc),
+	.bDescriptorType	= USB_DT_INTERFACE_ASSOCIATION,
+	/* .bFirstInterface	= DYNAMIC, */
+	.bInterfaceCount	= 2, /* debug control + data */
+	.bFunctionClass		= USB_CLASS_DEBUG,
+	.bFunctionSubClass	= USB_SUBCLASS_DVC_DFX,
+	/* .bFunctionProtocol	= DC_PROTOCOL_VENDOR, */
+	/* .iFunction		= 0, */
+};
+
+static struct usb_interface_descriptor dfx_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bNumEndpoints          = 0,
+	.bInterfaceClass        = USB_CLASS_DEBUG,
+	.bInterfaceSubClass     = USB_SUBCLASS_DEBUG_CONTROL,
+	/* .bInterfaceProtocol     = DC_PROTOCOL_VENDOR, */
+};
+
+#define DC_DBG_ATTRI_LENGTH	DC_DBG_ATTRI_SIZE(2, 32)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define DC_DBG_TOTAL_LENGTH (DC_DBG_ATTRI_LENGTH)
+
+DECLARE_DC_DEBUG_ATTR_DESCR(DVCD, 2, 32);
+
+static struct DC_DEBUG_ATTR_DESCR(DVCD) dfx_debug_attri_desc = {
+	.bLength		= DC_DBG_ATTRI_LENGTH,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_DEBUG_ATTRIBUTES,
+	.bcdDC			= __constant_cpu_to_le16(0x0100),
+	.wTotalLength		= __constant_cpu_to_le16(DC_DBG_TOTAL_LENGTH),
+	.bmSupportedFeatures	= 0, /* Debug Event Supported, per SAS */
+	.bControlSize		= 2,
+	.bmControl		= {	/* per SAS */
+		[0]		= 0xFF,
+		[1]		= 0x3F,
+	},
+	.wAuxDataSize		= __constant_cpu_to_le16(0x20),
+/* per SAS v0.3*/
+	.dInputBufferSize	= __constant_cpu_to_le32(0x40),
+	.dOutputBufferSize	= __constant_cpu_to_le32(0x80),
+	.qBaseAddress		= 0, /* revision */
+	.hGlobalID		= { /* revision */
+		[0]		= 0,
+		[1]		= 0,
+	}
+};
+
+static struct usb_interface_descriptor dfx_data_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bAlternateSetting	= 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = USB_CLASS_DEBUG,
+	.bInterfaceSubClass     = USB_SUBCLASS_DVC_DFX,
+	/* .bInterfaceProtocol     = DC_PROTOCOL_VENDOR, */
+};
+
+static struct usb_endpoint_descriptor dfx_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor dfx_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor dfx_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dfx_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor dfx_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dfx_superspeed_in_comp_desc = {
+	.bLength		= USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst		= 0,
+	.bmAttributes		= 0,
+};
+
+static struct usb_endpoint_descriptor dfx_superspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor dfx_superspeed_out_comp_desc = {
+	.bLength		= USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst		= 0,
+	.bmAttributes		= 0,
+};
+
+/* no INPUT/OUTPUT CONNECTION and UNIT descriptors for DvC.DFx */
+static struct usb_descriptor_header *fs_dfx_descs[] = {
+	(struct usb_descriptor_header *) &dfx_iad_desc,
+	(struct usb_descriptor_header *) &dfx_data_interface_desc,
+	(struct usb_descriptor_header *) &dfx_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &dfx_fullspeed_out_desc,
+
+	(struct usb_descriptor_header *) &dfx_interface_desc,
+	(struct usb_descriptor_header *) &dfx_debug_attri_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_dfx_descs[] = {
+	(struct usb_descriptor_header *) &dfx_iad_desc,
+	(struct usb_descriptor_header *) &dfx_data_interface_desc,
+	(struct usb_descriptor_header *) &dfx_highspeed_in_desc,
+	(struct usb_descriptor_header *) &dfx_highspeed_out_desc,
+
+	(struct usb_descriptor_header *) &dfx_interface_desc,
+	(struct usb_descriptor_header *) &dfx_debug_attri_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_dfx_descs[] = {
+	(struct usb_descriptor_header *) &dfx_iad_desc,
+	(struct usb_descriptor_header *) &dfx_data_interface_desc,
+	(struct usb_descriptor_header *) &dfx_superspeed_in_desc,
+	(struct usb_descriptor_header *) &dfx_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &dfx_superspeed_out_desc,
+	(struct usb_descriptor_header *) &dfx_superspeed_out_comp_desc,
+
+	(struct usb_descriptor_header *) &dfx_interface_desc,
+	(struct usb_descriptor_header *) &dfx_debug_attri_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+#define DVCDFX_CTRL_IDX	0
+#define DVCDFX_DATA_IDX	1
+#define DVCDFX_IAD_IDX	2
+
+/* static strings, in UTF-8 */
+static struct usb_string dfx_string_defs[] = {
+	[DVCDFX_CTRL_IDX].s = "Debug Sub-Class DvC.DFx (Control)",
+	[DVCDFX_DATA_IDX].s = "Debug Sub-Class DvC.DFx (Data)",
+	[DVCDFX_IAD_IDX].s = "Debug Sub-Class DvC.DFx",
+	{  /* ZEROES END LIST */ },
+};
+
+static struct usb_gadget_strings dfx_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		dfx_string_defs,
+};
+
+static struct usb_gadget_strings *dfx_strings[] = {
+	&dfx_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* temporary variable used between dvc_dfx_open() and dvc_dfx_gadget_bind() */
+static struct dvc_dfx_dev *_dvc_dfx_dev;
+
+static inline struct dvc_dfx_dev *func_to_dvc_dfx(struct usb_function *f)
+{
+	return container_of(f, struct dvc_dfx_dev, function);
+}
+
+static int dvc_dfx_is_enabled(void)
+{
+	if ((!cpu_has_debug_feature(DEBUG_FEATURE_USB3DFX)) ||
+	    (!stm_is_enabled())) {
+		pr_info("%s STM and/or USB3DFX is not enabled\n", __func__);
+		return 0;
+	}
+	return 1;
+}
+
+static struct usb_request *dvc_dfx_request_new(struct usb_ep *ep,
+					       int buffer_size, dma_addr_t dma)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+
+	req->dma = dma;
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void dvc_dfx_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void dvc_dfx_req_put(struct dvc_dfx_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *dvc_dfx_req_get(struct dvc_dfx_dev *dev,
+					   struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void dvc_dfx_set_disconnected(struct dvc_dfx_dev *dev)
+{
+	dev->transfering = 0;
+}
+
+static void dvc_dfx_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+
+	if (req->status != 0)
+		dvc_dfx_set_disconnected(dev);
+
+	dvc_dfx_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void dvc_dfx_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+
+	if (req->status != 0)
+		dvc_dfx_set_disconnected(dev);
+	wake_up(&dev->read_wq);
+}
+
+
+static inline int dvc_dfx_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void dvc_dfx_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+static int dfx_create_bulk_endpoints(struct dvc_dfx_dev *dev,
+				     struct usb_endpoint_descriptor *in_desc,
+				     struct usb_endpoint_descriptor *out_desc,
+				     struct usb_ss_ep_comp_descriptor *in_comp_desc,
+				     struct usb_ss_ep_comp_descriptor *out_comp_desc
+	)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	pr_debug("%s dev: %p\n", __func__, dev);
+
+	in_desc->bEndpointAddress |= 0x8;
+	ep = usb_ep_autoconfig_ss(cdev->gadget, in_desc, in_comp_desc);
+	if (!ep) {
+		pr_debug("%s for ep_in failed\n", __func__);
+		return -ENODEV;
+	}
+	pr_debug("%s for ep_in got %s\n", __func__, ep->name);
+
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	out_desc->bEndpointAddress |= 0x8;
+	ep = usb_ep_autoconfig_ss(cdev->gadget, out_desc, out_comp_desc);
+	if (!ep) {
+		pr_debug("%s for ep_out failed\n", __func__);
+		return -ENODEV;
+	}
+	pr_debug("%s for ep_out got %s\n", __func__, ep->name);
+
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < DFX_TX_REQ_MAX; i++) {
+		if (!(i % 2))
+			req = dvc_dfx_request_new(dev->ep_in,
+				DFX_BULK_BUFFER_SIZE,
+				(dma_addr_t)DFX_BULK_IN_BUFFER_ADDR);
+		else
+			req = dvc_dfx_request_new(dev->ep_in,
+				DFX_BULK_BUFFER_SIZE,
+				(dma_addr_t)DFX_BULK_IN_BUFFER_ADDR_2);
+		if (!req)
+			goto fail;
+		req->complete = dvc_dfx_complete_in;
+		dvc_dfx_req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < DFX_RX_REQ_MAX; i++) {
+		req = dvc_dfx_request_new(dev->ep_out, DFX_BULK_BUFFER_SIZE,
+			(dma_addr_t)DFX_BULK_OUT_BUFFER_ADDR);
+		if (!req)
+			goto fail;
+		req->complete = dvc_dfx_complete_out;
+		dev->rx_req[i] = req;
+	}
+
+	return 0;
+
+fail:
+	pr_err("%s could not allocate requests\n", __func__);
+	while ((req = dvc_dfx_req_get(dev, &dev->tx_idle)))
+		dvc_dfx_request_free(req, dev->ep_out);
+	for (i = 0; i < DFX_RX_REQ_MAX; i++)
+		dvc_dfx_request_free(dev->rx_req[i], dev->ep_out);
+	return -1;
+}
+
+static ssize_t dvc_dfx_start_transfer(size_t count)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret = -ENODEV;
+
+
+	pr_info("%s start\n", __func__);
+	if (!_dvc_dfx_dev)
+		return ret;
+
+	if (dvc_dfx_lock(&dev->read_excl)
+	    && dvc_dfx_lock(&dev->write_excl))
+		return -EBUSY;
+
+	/* we will block until enumeration completes */
+	while (!(dev->online || dev->error)) {
+		pr_debug("%s waiting for online state\n", __func__);
+		ret = wait_event_interruptible(dev->read_wq,
+				(dev->online || dev->error));
+
+		if (ret < 0) {
+			/* not at CONFIGURED state */
+			pr_info("%s USB not at CONFIGURED\n", __func__);
+			dvc_dfx_unlock(&dev->read_excl);
+			dvc_dfx_unlock(&dev->write_excl);
+			return ret;
+		}
+	}
+
+	/* queue a ep_in endless request */
+	while (r > 0) {
+		if (dev->error) {
+			pr_debug("%s dev->error\n", __func__);
+			r = -EIO;
+			break;
+		}
+
+		if (!dev->online) {
+			pr_debug("%s !dev->online issue\n", __func__);
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+				dev->error || !dev->online ||
+				(req = dvc_dfx_req_get(dev, &dev->tx_idle)));
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if (req != 0) {
+			if (count > DFX_BULK_BUFFER_SIZE)
+				xfer = DFX_BULK_BUFFER_SIZE;
+			else
+				xfer = count;
+
+			req->no_interrupt = 1;
+			req->context = &dev->function;
+			req->length = xfer;
+			pr_debug("%s queue tx_idle list req to dev->ep_in\n",
+				__func__);
+			ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+			if (ret < 0) {
+				pr_err("%s xfer error %d\n", __func__, ret);
+					dev->error = 1;
+				r = -EIO;
+				break;
+			}
+			pr_debug("%s xfer=%d/%d  queued req/%x\n", __func__,
+				xfer, r, (uint)req);
+			dvc_dfx_req_put(dev, &dev->tx_xfer, req);
+			r -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+		}
+	}
+	if (req) {
+		pr_debug("%s req re-added to tx_idle on error\n", __func__);
+		dvc_dfx_req_put(dev, &dev->tx_idle, req);
+	}
+
+	pr_debug("%s rx_req to dev->ep_out\n", __func__);
+	/* queue a ep_out endless request */
+	req = dev->rx_req[0];
+	req->length = DFX_BULK_BUFFER_SIZE;
+	req->no_interrupt = 1;
+	req->context = &dev->function;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC);
+	if (ret < 0) {
+		pr_err("%s failed to queue out req %p (%d)\n",
+		       __func__, req, req->length);
+		r = -EIO;
+	} else {
+		dev->transfering = 1;
+	}
+
+	dvc_dfx_unlock(&dev->read_excl);
+	dvc_dfx_unlock(&dev->write_excl);
+	pr_debug("%s returning\n", __func__);
+	return r;
+}
+
+static int dvc_dfx_disable_transfer(void)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+	struct usb_request *req;
+	int r = 1;
+	int ret;
+
+
+	pr_info("%s start\n", __func__);
+	if (!_dvc_dfx_dev)
+		return -ENODEV;
+
+	if (dvc_dfx_lock(&dev->read_excl)
+	    && dvc_dfx_lock(&dev->write_excl))
+		return -EBUSY;
+
+	if (dev->error) {
+		pr_debug("%s dev->error\n", __func__);
+		r = -EIO;
+		goto end;
+	}
+
+	if ((!dev->online) || (!dev->transfering)) {
+		pr_debug("%s !dev->online OR !dev->transfering\n", __func__);
+		r = -EIO;
+		goto end;
+	}
+
+	/* get an xfer tx request to use */
+	while ((req = dvc_dfx_req_get(dev, &dev->tx_xfer))) {
+		ret = usb_ep_dequeue(dev->ep_in, req);
+		if (ret < 0) {
+			pr_err("%s dequeue error %d\n", __func__, ret);
+			dev->error = 1;
+			r = -EIO;
+			goto end;
+		}
+		pr_debug("%s dequeued tx req/%x\n", __func__, (uint)req);
+	}
+	ret = usb_ep_dequeue(dev->ep_out, dev->rx_req[0]);
+	if (ret < 0) {
+		pr_err("%s dequeue rx error %d\n", __func__, ret);
+		dev->error = 1;
+		r = -EIO;
+		goto end;
+	}
+
+end:
+	dvc_dfx_unlock(&dev->read_excl);
+	dvc_dfx_unlock(&dev->write_excl);
+	return r;
+}
+
+static int dvc_dfx_open(struct inode *ip, struct file *fp)
+{
+	pr_info("%s\n", __func__);
+	if (!_dvc_dfx_dev)
+		return -ENODEV;
+
+	if (dvc_dfx_lock(&_dvc_dfx_dev->open_excl))
+		return -EBUSY;
+
+	fp->private_data = _dvc_dfx_dev;
+
+	/* clear the error latch */
+	_dvc_dfx_dev->error = 0;
+	_dvc_dfx_dev->transfering = 0;
+
+	return 0;
+}
+
+static int dvc_dfx_release(struct inode *ip, struct file *fp)
+{
+	pr_info("%s\n", __func__);
+
+	dvc_dfx_unlock(&_dvc_dfx_dev->open_excl);
+	return 0;
+}
+
+/* file operations for DvC.Dfx device /dev/usb_dvc_dfx */
+static const struct file_operations dvc_dfx_fops = {
+	.owner = THIS_MODULE,
+	.open = dvc_dfx_open,
+	.release = dvc_dfx_release,
+};
+
+static struct miscdevice dvc_dfx_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_dvc_dfx",
+	.fops = &dvc_dfx_fops,
+};
+
+static int dvc_dfx_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+	int	value = -EOPNOTSUPP;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+
+	pr_debug("%s %02x.%02x v%04x i%04x l%u\n", __func__,
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* DC_REQUEST_SET_RESET ... stop active transfer */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| DC_REQUEST_SET_RESET:
+		if (w_index != dev->data_id)
+			goto invalid;
+
+		pr_info("%s DC_REQUEST_SET_RESET v%04x i%04x l%u\n", __func__,
+			w_value, w_index, w_length);
+
+		dvc_dfx_disable_transfer();
+		value = 0;
+		break;
+
+	default:
+invalid:
+		pr_debug("unknown class-specific control req "
+			 "%02x.%02x v%04x i%04x l%u\n",
+			 ctrl->bRequestType, ctrl->bRequest,
+			 w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("%s setup response queue error\n", __func__);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int
+dvc_dfx_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev	*cdev = c->cdev;
+	struct dvc_dfx_dev		*dev = func_to_dvc_dfx(f);
+	int			id;
+	int			ret;
+
+	dev->cdev = cdev;
+	pr_info("%s dev: %p\n", __func__, dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->data_id = id;
+	dfx_data_interface_desc.bInterfaceNumber = id;
+	dfx_iad_desc.bFirstInterface = id;
+
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ctrl_id = id;
+	dfx_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = dfx_create_bulk_endpoints(dev, &dfx_fullspeed_in_desc,
+					&dfx_fullspeed_out_desc,
+					&dfx_superspeed_in_comp_desc,
+					&dfx_superspeed_out_comp_desc
+		);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		dfx_highspeed_in_desc.bEndpointAddress =
+			dfx_fullspeed_in_desc.bEndpointAddress;
+		dfx_highspeed_out_desc.bEndpointAddress =
+			dfx_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		dfx_superspeed_in_desc.bEndpointAddress =
+			dfx_fullspeed_in_desc.bEndpointAddress;
+
+		dfx_superspeed_out_desc.bEndpointAddress =
+			dfx_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	pr_info("%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+dvc_dfx_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct dvc_dfx_dev	*dev = func_to_dvc_dfx(f);
+	struct usb_request *req;
+	int i;
+
+	dev->online = 0;
+	dev->online_ctrl = 0;
+	dev->online_data = 0;
+	dev->transfering = 0;
+	dev->error = 0;
+
+	dfx_string_defs[DVCDFX_CTRL_IDX].id = 0;
+
+	wake_up(&dev->read_wq);
+
+	for (i = 0; i < DFX_RX_REQ_MAX; i++)
+		dvc_dfx_request_free(dev->rx_req[i], dev->ep_out);
+	while ((req = dvc_dfx_req_get(dev, &dev->tx_idle)))
+		dvc_dfx_request_free(req, dev->ep_in);
+
+}
+
+static int dvc_dfx_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct dvc_dfx_dev	*dev = func_to_dvc_dfx(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	pr_info("%s intf: %d alt: %d\n", __func__, intf, alt);
+	if (intf == dfx_data_interface_desc.bInterfaceNumber) {
+		ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_by_speed in error %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+		ret = usb_ep_enable(dev->ep_in);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_enable in err %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+
+		ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_enable out error %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+
+		ret = usb_ep_enable(dev->ep_out);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_enable out err %d\n",
+				__func__, intf, alt, ret);
+			usb_ep_disable(dev->ep_in);
+			return ret;
+		}
+		dev->online_data = 1;
+	}
+	if (intf == dfx_interface_desc.bInterfaceNumber)
+		dev->online_ctrl = 1;
+
+	if (dev->online_data && dev->online_ctrl) {
+		dev->online = 1;
+		dev->error = 0;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void dvc_dfx_function_disable(struct usb_function *f)
+{
+	struct dvc_dfx_dev	*dev = func_to_dvc_dfx(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	pr_info("%s cdev %p\n", __func__, cdev);
+
+	if (dev->transfering)
+		dvc_dfx_disable_transfer();
+
+	dev->online = 0;
+	dev->online_ctrl = 0;
+	dev->online_data = 0;
+	dev->error = 0;
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	pr_debug("%s disabled\n", dev->function.name);
+}
+
+static int dvc_dfx_bind_config(struct usb_configuration *c)
+{
+	struct dvc_dfx_dev *dev = _dvc_dfx_dev;
+	int status;
+
+	pr_info("%s\n", __func__);
+
+	if (dfx_string_defs[DVCDFX_CTRL_IDX].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		dfx_string_defs[DVCDFX_CTRL_IDX].id = status;
+
+		dfx_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		dfx_string_defs[DVCDFX_DATA_IDX].id = status;
+
+		dfx_data_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		dfx_string_defs[DVCDFX_IAD_IDX].id = status;
+
+		dfx_iad_desc.iFunction = status;
+	}
+
+	dev->cdev = c->cdev;
+	dev->function.name = "dvcdfx";
+	dev->function.fs_descriptors = fs_dfx_descs;
+	dev->function.hs_descriptors = hs_dfx_descs;
+	dev->function.ss_descriptors = ss_dfx_descs;
+	dev->function.strings = dfx_strings;
+	dev->function.bind = dvc_dfx_function_bind;
+	dev->function.unbind = dvc_dfx_function_unbind;
+	dev->function.set_alt = dvc_dfx_function_set_alt;
+	dev->function.disable = dvc_dfx_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int dvc_dfx_setup(void)
+{
+	struct dvc_dfx_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->tx_xfer);
+
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->read_excl, 0);
+	atomic_set(&dev->write_excl, 0);
+
+	_dvc_dfx_dev = dev;
+
+	ret = misc_register(&dvc_dfx_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	pr_err("DvC.Dfx gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void dvc_dfx_cleanup(void)
+{
+	misc_deregister(&dvc_dfx_device);
+
+	kfree(_dvc_dfx_dev);
+	_dvc_dfx_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_dvc_trace.c b/drivers/usb/gadget/f_dvc_trace.c
new file mode 100644
index 0000000..4837bf1
--- /dev/null
+++ b/drivers/usb/gadget/f_dvc_trace.c
@@ -0,0 +1,887 @@
+/*
+ * Gadget Driver for Android DvC.Trace Debug Capability
+ *
+ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/usb/debug.h>
+#include <linux/sdm.h>
+
+#define TRACE_TX_REQ_MAX 3
+
+#define CONFIG_BOARD_MRFLD_VV
+
+struct dvc_trace_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+	u8	ctrl_id, data_id;
+	u8	class_id, subclass_id;
+
+	struct usb_ep *ep_in;
+
+	int online;
+	int online_data;
+	int online_ctrl;
+	int transfering;
+	int error;
+
+	atomic_t write_excl;
+	atomic_t open_excl;
+
+	wait_queue_head_t write_wq;
+
+	struct list_head tx_idle;
+	struct list_head tx_xfer;
+};
+
+static struct usb_interface_assoc_descriptor trace_iad_desc = {
+	.bLength		= sizeof(trace_iad_desc),
+	.bDescriptorType	= USB_DT_INTERFACE_ASSOCIATION,
+	/* .bFirstInterface	= DYNAMIC, */
+	.bInterfaceCount	= 2, /* debug control + data */
+	.bFunctionClass		= USB_CLASS_DEBUG,
+	.bFunctionSubClass	= USB_SUBCLASS_DVC_TRACE,
+	/* .bFunctionProtocol	= 0, */
+	/* .iFunction		= DYNAMIC, */
+};
+
+static struct usb_interface_descriptor trace_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bNumEndpoints          = 0,
+	.bInterfaceClass        = USB_CLASS_DEBUG,
+	.bInterfaceSubClass     = USB_SUBCLASS_DEBUG_CONTROL,
+	/* .bInterfaceProtocol     = 0, */
+};
+
+#define DC_DVCTRACE_ATTRI_LENGTH	DC_DBG_ATTRI_SIZE(2, 32)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define DC_DVCTRACE_TOTAL_LENGTH (DC_DVCTRACE_ATTRI_LENGTH \
+	+ DC_OUTPUT_CONNECTION_SIZE \
+	+ DC_OUTPUT_CONNECTION_SIZE \
+	+ DC_DBG_UNIT_SIZE(STM_NB_IN_PINS, 2, 2, 24))
+
+DECLARE_DC_DEBUG_ATTR_DESCR(DVCT, 2, 32);
+
+static struct DC_DEBUG_ATTR_DESCR(DVCT) trace_debug_attri_desc = {
+	.bLength		= DC_DVCTRACE_ATTRI_LENGTH,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_DEBUG_ATTRIBUTES,
+	.bcdDC			= __constant_cpu_to_le16(0x0100),
+	.wTotalLength	= __constant_cpu_to_le16(DC_DVCTRACE_TOTAL_LENGTH),
+	.bmSupportedFeatures	= 0, /* Debug Event Supported, per SAS */
+	.bControlSize		= 2,
+	.bmControl		= {	/* per SAS */
+		[0]		= 0xFF,
+		[1]		= 0x3F,
+	},
+	.wAuxDataSize		= __constant_cpu_to_le16(0x20),
+	.dInputBufferSize	= __constant_cpu_to_le32(0x00), /* per SAS */
+	.dOutputBufferSize = __constant_cpu_to_le32(TRACE_BULK_BUFFER_SIZE),
+	.qBaseAddress		= 0, /* revision */
+	.hGlobalID		= { /* revision */
+		[0]		= 0,
+		[1]		= 0,
+	}
+};
+
+static struct dc_output_connection_descriptor trace_output_conn_usb_desc = {
+	.bLength		= DC_OUTPUT_CONNECTION_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_OUTPUT_CONNECTION,
+	.bConnectionID		= 0x01, /* USB */
+	.bConnectionType	= DC_CONNECTION_USB,
+	.bAssocConnection	= 0, /* No related input-connection */
+	.wSourceID		= __constant_cpu_to_le16(0x01),
+	/* .iConnection		= DYNAMIC, */
+};
+
+static struct dc_output_connection_descriptor trace_output_conn_pti_desc = {
+	.bLength		= DC_OUTPUT_CONNECTION_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_OUTPUT_CONNECTION,
+	.bConnectionID		= 0, /* PTI */
+	.bConnectionType	= DC_CONNECTION_DEBUG_DATA,
+	.bAssocConnection	= 0, /* No related input-connection */
+	.wSourceID		= __constant_cpu_to_le16(0x01),
+	/* .iConnection		= DYNAMIC, */
+};
+
+#define DC_DVCTRACE_UNIT_LENGTH	DC_DBG_UNIT_SIZE(STM_NB_IN_PINS, 2, 2, 24)
+
+DECLARE_DC_DEBUG_UNIT_DESCRIPTOR(STM_NB_IN_PINS, 2, 2, 24);
+
+static struct DC_DEBUG_UNIT_DESCRIPTOR(STM_NB_IN_PINS, 2, 2, 24)
+		trace_debug_unit_stm_desc = {
+	.bLength		= DC_DVCTRACE_UNIT_LENGTH,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= DC_DEBUG_UNIT,
+	.bUnitID		= 0x01, /* per SAS */
+/* STM Trace Unit processor: revision */
+	.bDebugUnitType		= DC_UNIT_TYPE_TRACE_PROC,
+ /* STM: Trace compressor controller */
+	.bDebugSubUnitType	= DC_UNIT_SUBTYPE_TRACEZIP,
+	.bAliasUnitID		= 0, /* no associated debug unit */
+	.bNrInPins		= STM_NB_IN_PINS, /* p */
+/* wSourceID  contains STM_NB_IN_PINS elements */
+/*	.wSourceID		= {0}, */
+	.bNrOutPins		= 0x02,	/* q */
+	.dTraceFormat		= {
+		[0]	= __constant_cpu_to_le32(DC_TRACE_MIPI_FORMATED_STPV1),
+		[1]	= __constant_cpu_to_le32(DC_TRACE_MIPI_FORMATED_STPV1),
+	},
+	.dStreamID		= __constant_cpu_to_le32(0xFFFFFFFF),
+	.bControlSize		= 0x02,	/* n */
+	.bmControl		= {
+		[0]		= 0xFF,
+		[1]		= 0x3F,
+	},
+	.wAuxDataSize		= __constant_cpu_to_le16(24), /* m */
+	.qBaseAddress		= 0, /* revision */
+	.hIPID			= {
+		[0]		= 0,
+		[1]		= 0,
+	},
+	/* .iDebugUnitType		= DYNAMIC, */
+};
+
+static struct usb_interface_descriptor trace_data_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bAlternateSetting	= 0,
+	.bNumEndpoints          = 1,
+	.bInterfaceClass        = USB_CLASS_DEBUG,
+	.bInterfaceSubClass     = USB_SUBCLASS_DVC_TRACE,
+	/* .bInterfaceProtocol     = 0, */
+};
+
+static struct usb_endpoint_descriptor trace_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor trace_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor trace_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor trace_superspeed_in_comp_desc = {
+	.bLength		= USB_DT_SS_EP_COMP_SIZE,
+	.bDescriptorType	= USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst		= 0,
+	.bmAttributes		= 0,
+};
+
+static struct usb_descriptor_header *fs_trace_descs[] = {
+	(struct usb_descriptor_header *) &trace_iad_desc,
+	(struct usb_descriptor_header *) &trace_data_interface_desc,
+	(struct usb_descriptor_header *) &trace_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &trace_interface_desc,
+	(struct usb_descriptor_header *) &trace_debug_attri_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_pti_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_usb_desc,
+	(struct usb_descriptor_header *) &trace_debug_unit_stm_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_trace_descs[] = {
+	(struct usb_descriptor_header *) &trace_iad_desc,
+	(struct usb_descriptor_header *) &trace_data_interface_desc,
+	(struct usb_descriptor_header *) &trace_highspeed_in_desc,
+	(struct usb_descriptor_header *) &trace_interface_desc,
+	(struct usb_descriptor_header *) &trace_debug_attri_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_pti_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_usb_desc,
+	(struct usb_descriptor_header *) &trace_debug_unit_stm_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_trace_descs[] = {
+	(struct usb_descriptor_header *) &trace_iad_desc,
+	(struct usb_descriptor_header *) &trace_data_interface_desc,
+	(struct usb_descriptor_header *) &trace_superspeed_in_desc,
+	(struct usb_descriptor_header *) &trace_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &trace_interface_desc,
+	(struct usb_descriptor_header *) &trace_debug_attri_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_pti_desc,
+	(struct usb_descriptor_header *) &trace_output_conn_usb_desc,
+	(struct usb_descriptor_header *) &trace_debug_unit_stm_desc,
+	NULL,
+};
+
+/* string descriptors: */
+#define DVCTRACE_CTRL_IDX	0
+#define DVCTRACE_DATA_IDX	1
+#define DVCTRACE_IAD_IDX	2
+#define DVCTRACE_CONN_PTI_IDX	3
+#define DVCTRACE_CONN_USB_IDX	4
+#define DVCTRACE_UNIT_STM_IDX	5
+
+/* static strings, in UTF-8 */
+static struct usb_string trace_string_defs[] = {
+	[DVCTRACE_CTRL_IDX].s = "Debug Sub-Class DvC.Trace (Control)",
+	[DVCTRACE_DATA_IDX].s = "Debug Sub-Class DvC.Trace (Data)",
+	[DVCTRACE_IAD_IDX].s = "Debug Sub-Class DvC.Trace",
+	[DVCTRACE_CONN_PTI_IDX].s = "MIPI PTIv1 output Connector ",
+	[DVCTRACE_CONN_USB_IDX].s = "USB Device output connector",
+	[DVCTRACE_UNIT_STM_IDX].s = "MIPI STM Debug Unit",
+	{  /* ZEROES END LIST */ },
+};
+
+static struct usb_gadget_strings trace_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		trace_string_defs,
+};
+
+static struct usb_gadget_strings *trace_strings[] = {
+	&trace_string_table,
+	NULL,
+};
+
+/* temporary var used between dvc_trace_open() and dvc_trace_gadget_bind() */
+static struct dvc_trace_dev *_dvc_trace_dev;
+
+static inline struct dvc_trace_dev *func_to_dvc_trace(struct usb_function *f)
+{
+	return container_of(f, struct dvc_trace_dev, function);
+}
+
+static int dvc_trace_is_enabled(void)
+{
+	if (!stm_is_enabled()) {
+		pr_info("%s STM/PTI block is not enabled\n", __func__);
+		return 0;
+	}
+	return 1;
+}
+
+static struct usb_request *dvc_trace_request_new(struct usb_ep *ep,
+					 int buffer_size, dma_addr_t dma)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	req->dma = dma;
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void dvc_trace_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void dvc_trace_req_put(struct dvc_trace_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *dvc_trace_req_get(struct dvc_trace_dev *dev,
+			struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void dvc_trace_set_disconnected(struct dvc_trace_dev *dev)
+{
+	dev->transfering = 0;
+}
+
+static void dvc_trace_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+
+	if (req->status != 0)
+		dvc_trace_set_disconnected(dev);
+
+	dvc_trace_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static inline int dvc_trace_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void dvc_trace_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+static int trace_create_bulk_endpoints(struct dvc_trace_dev *dev,
+				       struct usb_endpoint_descriptor *in_desc,
+				       struct usb_ss_ep_comp_descriptor *in_comp_desc
+	)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	pr_debug("%s dev: %p\n", __func__, dev);
+
+	in_desc->bEndpointAddress |= 0x1;
+	ep = usb_ep_autoconfig_ss(cdev->gadget, in_desc, in_comp_desc);
+	if (!ep) {
+		pr_err("%s usb_ep_autoconfig for ep_in failed\n", __func__);
+		return -ENODEV;
+	}
+	pr_debug("%s usb_ep_autoconfig for ep_in got %s\n", __func__, ep->name);
+
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	for (i = 0; i < TRACE_TX_REQ_MAX; i++) {
+		req = dvc_trace_request_new(dev->ep_in, TRACE_BULK_BUFFER_SIZE,
+			(dma_addr_t)TRACE_BULK_IN_BUFFER_ADDR);
+		if (!req)
+			goto fail;
+		req->complete = dvc_trace_complete_in;
+		dvc_trace_req_put(dev, &dev->tx_idle, req);
+		pr_debug("%s req= %x : for %s predefined TRB\n", __func__,
+			(uint)req, ep->name);
+	}
+
+	return 0;
+
+fail:
+	pr_err("%s could not allocate requests\n", __func__);
+	return -1;
+}
+
+static ssize_t dvc_trace_start_transfer(size_t count)
+{
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	if (!_dvc_trace_dev)
+		return -ENODEV;
+
+	if (dvc_trace_lock(&dev->write_excl))
+		return -EBUSY;
+
+	/* we will block until enumeration completes */
+	while (!(dev->online || dev->error)) {
+		pr_debug("%s: waiting for online state\n", __func__);
+		ret = wait_event_interruptible(dev->write_wq,
+				(dev->online || dev->error));
+
+		if (ret < 0) {
+			/* not at CONFIGURED state */
+			pr_info("%s !dev->online already\n", __func__);
+			dvc_trace_unlock(&dev->write_excl);
+			return ret;
+		}
+	}
+
+	/* queue a ep_in endless request */
+	while (r > 0) {
+		if (dev->error) {
+			pr_debug("%s dev->error\n", __func__);
+			r = -EIO;
+			break;
+		}
+
+		if (!dev->online) {
+			pr_debug("%s !dev->online\n", __func__);
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+				dev->error || !dev->online ||
+				(req = dvc_trace_req_get(dev, &dev->tx_idle)));
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if (req != 0) {
+			if (count > TRACE_BULK_BUFFER_SIZE)
+				xfer = TRACE_BULK_BUFFER_SIZE;
+			else
+				xfer = count;
+			pr_debug("%s queue tx_idle list req to dev->ep_in\n",
+				__func__);
+			req->no_interrupt = 1;
+			req->context = &dev->function;
+			req->length = xfer;
+			ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC);
+			if (ret < 0) {
+				pr_err("%s: xfer error %d\n", __func__, ret);
+				dev->error = 1;
+				dev->transfering = 0;
+				r = -EIO;
+				break;
+			}
+			pr_debug("%s: xfer=%d/%d  queued req/%x\n", __func__,
+				xfer, r, (uint)req);
+			dvc_trace_req_put(dev, &dev->tx_xfer, req);
+			r -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+		}
+	}
+	if (req) {
+		pr_debug("%s req re-added to tx_idle on error\n", __func__);
+		dvc_trace_req_put(dev, &dev->tx_idle, req);
+	} else {
+		dev->transfering = 1;
+	}
+	dvc_trace_unlock(&dev->write_excl);
+	pr_debug("%s end\n", __func__);
+	return ret;
+}
+
+static int dvc_trace_disable_transfer(void)
+{
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+	struct usb_request *req = 0;
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	if (!_dvc_trace_dev)
+		return -ENODEV;
+
+	if (dvc_trace_lock(&dev->write_excl))
+		return -EBUSY;
+
+	if (dev->error) {
+		pr_debug("%s dev->error\n", __func__);
+		dvc_trace_unlock(&dev->write_excl);
+		return -EIO;
+	}
+
+	if ((!dev->online) || (!dev->transfering)) {
+		pr_debug("%s !dev->online OR !dev->transfering\n", __func__);
+		dvc_trace_unlock(&dev->write_excl);
+		return -EIO;
+	}
+
+	/* get an xfer tx request to use */
+	while ((req = dvc_trace_req_get(dev, &dev->tx_xfer))) {
+		ret = usb_ep_dequeue(dev->ep_in, req);
+		if (ret < 0) {
+			pr_err("%s: dequeue error %d\n", __func__, ret);
+			dev->error = 1;
+			dvc_trace_unlock(&dev->write_excl);
+			return -EIO;
+		}
+		pr_debug("%s: dequeued req/%x\n", __func__, (uint)req);
+	}
+
+	dvc_trace_unlock(&dev->write_excl);
+	return 1;
+}
+
+static int dvc_trace_open(struct inode *ip, struct file *fp)
+{
+	pr_debug("%s\n", __func__);
+	if (!_dvc_trace_dev)
+		return -ENODEV;
+
+	if (dvc_trace_lock(&_dvc_trace_dev->open_excl))
+		return -EBUSY;
+
+	fp->private_data = _dvc_trace_dev;
+
+	/* clear the error latch */
+	_dvc_trace_dev->error = 0;
+	_dvc_trace_dev->transfering = 0;
+
+	return 0;
+}
+
+static int dvc_trace_release(struct inode *ip, struct file *fp)
+{
+	pr_debug("%s\n", __func__);
+
+	dvc_trace_unlock(&_dvc_trace_dev->open_excl);
+	return 0;
+}
+
+/* file operations for DvC.Trace device /dev/usb_dvc_trace */
+static const struct file_operations dvc_trace_fops = {
+	.owner = THIS_MODULE,
+	.open = dvc_trace_open,
+	.release = dvc_trace_release,
+};
+
+static struct miscdevice dvc_trace_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_dvc_trace",
+	.fops = &dvc_trace_fops,
+};
+
+static int dvc_trace_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+	int	value = -EOPNOTSUPP;
+	int	ret;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+
+	pr_debug("%s %02x.%02x v%04x i%04x l%u\n", __func__,
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* DC_REQUEST_SET_RESET ... stop active transfer */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| DC_REQUEST_SET_RESET:
+		if (w_index != dev->data_id)
+			goto invalid;
+
+		pr_info("%s DC_REQUEST_SET_RESET v%04x i%04x l%u\n", __func__,
+			 w_value, w_index, w_length);
+
+		dvc_trace_disable_transfer();
+		value = 0;
+		break;
+
+	/* DC_REQUEST_SET_TRACE ... start trace transfer */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| DC_REQUEST_SET_TRACE:
+
+		pr_info("%s DC_REQUEST_SET_TRACE v%04x i%04x l%u\n", __func__,
+			 w_value, w_index, w_length);
+
+		if (!w_index)
+			ret = dvc_trace_disable_transfer();
+		else
+			ret = dvc_trace_start_transfer(4096);
+
+		if (ret < 0)
+			value = -EINVAL;
+		else
+			value = (int) w_index;
+		break;
+
+	default:
+invalid:
+		pr_debug("unknown class-specific control req "
+			 "%02x.%02x v%04x i%04x l%u\n",
+			 ctrl->bRequestType, ctrl->bRequest,
+			 w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("%s setup response queue error\n", __func__);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int
+dvc_trace_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev	*cdev = c->cdev;
+	struct dvc_trace_dev		*dev = func_to_dvc_trace(f);
+	int			id;
+	int			ret;
+	int status;
+
+	dev->cdev = cdev;
+	pr_debug("%s dev: %p\n", __func__, dev);
+
+	/* maybe allocate device-global string IDs, and patch descriptors */
+	if (trace_string_defs[DVCTRACE_CTRL_IDX].id == 0) {
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_DATA_IDX].id = status;
+		trace_data_interface_desc.iInterface = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_CTRL_IDX].id = status;
+		trace_interface_desc.iInterface = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_IAD_IDX].id = status;
+		trace_iad_desc.iFunction = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_CONN_PTI_IDX].id = status;
+		trace_output_conn_pti_desc.iConnection = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_CONN_USB_IDX].id = status;
+		trace_output_conn_usb_desc.iConnection = status;
+
+		status = usb_string_id(cdev);
+		if (status < 0)
+			return status;
+		trace_string_defs[DVCTRACE_UNIT_STM_IDX].id = status;
+		trace_debug_unit_stm_desc.iDebugUnitType = status;
+	}
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->data_id = id;
+	trace_data_interface_desc.bInterfaceNumber = id;
+	trace_iad_desc.bFirstInterface = id;
+
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ctrl_id = id;
+	trace_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = trace_create_bulk_endpoints(dev, &trace_fullspeed_in_desc,
+					  &trace_superspeed_in_comp_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		trace_highspeed_in_desc.bEndpointAddress =
+			trace_fullspeed_in_desc.bEndpointAddress;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		trace_superspeed_in_desc.bEndpointAddress =
+			trace_fullspeed_in_desc.bEndpointAddress;
+	}
+
+	pr_debug("%s speed %s: IN/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name);
+	return 0;
+}
+
+static void
+dvc_trace_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct dvc_trace_dev	*dev = func_to_dvc_trace(f);
+	struct usb_request *req;
+
+	dev->online = 0;
+	dev->online_data = 0;
+	dev->online_ctrl = 0;
+	dev->error = 0;
+	trace_string_defs[DVCTRACE_CTRL_IDX].id = 0;
+
+	wake_up(&dev->write_wq);
+
+	while ((req = dvc_trace_req_get(dev, &dev->tx_idle)))
+		dvc_trace_request_free(req, dev->ep_in);
+}
+
+static int dvc_trace_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct dvc_trace_dev	*dev = func_to_dvc_trace(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	pr_debug("%s intf: %d alt: %d\n", __func__, intf, alt);
+
+	if (intf == trace_interface_desc.bInterfaceNumber)
+		dev->online_ctrl = 1;
+
+	if (intf == trace_data_interface_desc.bInterfaceNumber) {
+		ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_by_speed in err %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+
+		ret = usb_ep_enable(dev->ep_in);
+		if (ret) {
+			pr_err("%s intf: %d alt: %d ep_enable in err %d\n",
+				__func__, intf, alt, ret);
+			return ret;
+		}
+		dev->online_data = 1;
+	}
+
+	if (dev->online_data && dev->online_ctrl) {
+		dev->online = 1;
+		dev->transfering = 0;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->write_wq);
+	return 0;
+}
+
+static void dvc_trace_function_disable(struct usb_function *f)
+{
+	struct dvc_trace_dev	*dev = func_to_dvc_trace(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	pr_debug("%s dev %p\n", __func__, cdev);
+
+	if (dev->transfering)
+		dvc_trace_disable_transfer();
+
+	dev->online = 0;
+	dev->online_data = 0;
+	dev->online_ctrl = 0;
+	dev->error = 0;
+	usb_ep_disable(dev->ep_in);
+
+	/* writer may be blocked waiting for us to go online */
+	wake_up(&dev->write_wq);
+
+	pr_debug("%s : %s disabled\n", __func__, dev->function.name);
+}
+
+static int dvc_trace_bind_config(struct usb_configuration *c)
+{
+	struct dvc_trace_dev *dev = _dvc_trace_dev;
+
+	pr_debug("%s\n", __func__);
+
+	dev->cdev = c->cdev;
+	dev->function.name = "dvctrace";
+	dev->function.strings = trace_strings;
+	dev->function.fs_descriptors = fs_trace_descs;
+	dev->function.hs_descriptors = hs_trace_descs;
+	dev->function.ss_descriptors = ss_trace_descs;
+	dev->function.bind = dvc_trace_function_bind;
+	dev->function.unbind = dvc_trace_function_unbind;
+	dev->function.set_alt = dvc_trace_function_set_alt;
+	dev->function.disable = dvc_trace_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int dvc_trace_setup(void)
+{
+	struct dvc_trace_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->tx_xfer);
+
+	init_waitqueue_head(&dev->write_wq);
+
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->write_excl, 0);
+
+	_dvc_trace_dev = dev;
+
+	ret = misc_register(&dvc_trace_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	pr_err("DvC.Trace gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void dvc_trace_cleanup(void)
+{
+	misc_deregister(&dvc_trace_device);
+
+	kfree(_dvc_trace_dev);
+	_dvc_trace_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 84219f6..c69598f 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1541,7 +1541,7 @@
 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
 	do {
 		/* pending requests get nuked */
-		if (likely(ep->ep))
+		if (ep->ep && epfile->ep)
 			usb_ep_disable(ep->ep);
 		epfile->ep = NULL;
 
@@ -1563,7 +1563,12 @@
 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
 	do {
 		struct usb_endpoint_descriptor *ds;
-		ds = ep->descs[ep->descs[1] ? 1 : 0];
+		int desc_idx = ffs->gadget->speed == USB_SPEED_HIGH ? 1 : 0;
+		ds = ep->descs[desc_idx];
+		if (!ds) {
+			ret = -EINVAL;
+			break;
+		}
 
 		ep->ep->driver_data = ep;
 		ep->ep->desc = ds;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index c35a9ec..1a847b7 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2871,7 +2871,7 @@
 		fsg->common->new_fsg = NULL;
 		raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
 		/* FIXME: make interruptible or killable somehow? */
-		wait_event(common->fsg_wait, common->fsg != fsg);
+		wait_event_timeout(common->fsg_wait, common->fsg != fsg, msecs_to_jiffies(1000));
 	}
 
 	fsg_common_put(common);
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 263e721..b0bce7e 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -65,6 +65,11 @@
 	uint8_t data[2];
 };
 
+struct midi_alsa_config {
+	int	card;
+	int	device;
+};
+
 struct f_midi {
 	struct usb_function	func;
 	struct usb_gadget	*gadget;
@@ -97,7 +102,7 @@
 DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
 
 /* B.3.1  Standard AC Interface Descriptor */
-static struct usb_interface_descriptor ac_interface_desc __initdata = {
+static struct usb_interface_descriptor ac_interface_desc /* __initdata */ = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
 	.bDescriptorType =	USB_DT_INTERFACE,
 	/* .bInterfaceNumber =	DYNAMIC */
@@ -108,7 +113,7 @@
 };
 
 /* B.3.2  Class-Specific AC Interface Descriptor */
-static struct uac1_ac_header_descriptor_1 ac_header_desc __initdata = {
+static struct uac1_ac_header_descriptor_1 ac_header_desc /* __initdata */ = {
 	.bLength =		UAC_DT_AC_HEADER_SIZE(1),
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype =	USB_MS_HEADER,
@@ -119,7 +124,7 @@
 };
 
 /* B.4.1  Standard MS Interface Descriptor */
-static struct usb_interface_descriptor ms_interface_desc __initdata = {
+static struct usb_interface_descriptor ms_interface_desc /* __initdata */ = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
 	.bDescriptorType =	USB_DT_INTERFACE,
 	/* .bInterfaceNumber =	DYNAMIC */
@@ -130,7 +135,7 @@
 };
 
 /* B.4.2  Class-Specific MS Interface Descriptor */
-static struct usb_ms_header_descriptor ms_header_desc __initdata = {
+static struct usb_ms_header_descriptor ms_header_desc /* __initdata */ = {
 	.bLength =		USB_DT_MS_HEADER_SIZE,
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype =	USB_MS_HEADER,
@@ -191,7 +196,7 @@
 	NULL,
 };
 
-static struct usb_request *alloc_ep_req(struct usb_ep *ep, unsigned length)
+static struct usb_request *midi_alloc_ep_req(struct usb_ep *ep, unsigned length)
 {
 	struct usb_request *req;
 
@@ -207,7 +212,7 @@
 	return req;
 }
 
-static void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+static void midi_free_ep_req(struct usb_ep *ep, struct usb_request *req)
 {
 	kfree(req->buf);
 	usb_ep_free_request(ep, req);
@@ -278,7 +283,7 @@
 		if (ep == midi->out_ep)
 			f_midi_handle_out_data(ep, req);
 
-		free_ep_req(ep, req);
+		midi_free_ep_req(ep, req);
 		return;
 
 	case -EOVERFLOW:	/* buffer overrun on read means that
@@ -365,7 +370,7 @@
 	/* allocate a bunch of read buffers and queue them all at once. */
 	for (i = 0; i < midi->qlen && err == 0; i++) {
 		struct usb_request *req =
-			alloc_ep_req(midi->out_ep, midi->buflen);
+			midi_alloc_ep_req(midi->out_ep, midi->buflen);
 		if (req == NULL)
 			return -ENOMEM;
 
@@ -409,7 +414,7 @@
 	card = midi->card;
 	midi->card = NULL;
 	if (card)
-		snd_card_free(card);
+		snd_card_free_when_closed(card);
 
 	kfree(midi->id);
 	midi->id = NULL;
@@ -546,10 +551,10 @@
 		return;
 
 	if (!req)
-		req = alloc_ep_req(ep, midi->buflen);
+		req = midi_alloc_ep_req(ep, midi->buflen);
 
 	if (!req) {
-		ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n");
+		ERROR(midi, "gmidi_transmit: midi_alloc_ep_request failed\n");
 		return;
 	}
 	req->length = 0;
@@ -575,7 +580,7 @@
 	if (req->length > 0)
 		usb_ep_queue(ep, req, GFP_ATOMIC);
 	else
-		free_ep_req(ep, req);
+		midi_free_ep_req(ep, req);
 }
 
 static void f_midi_in_tasklet(unsigned long data)
@@ -733,7 +738,7 @@
 
 /* MIDI function driver setup/binding */
 
-static int __init
+static int /* __init */
 f_midi_bind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct usb_descriptor_header **midi_function;
@@ -923,16 +928,22 @@
  *
  * Returns zero on success, else negative errno.
  */
-int __init f_midi_bind_config(struct usb_configuration *c,
+int /* __init */ f_midi_bind_config(struct usb_configuration *c,
 			      int index, char *id,
 			      unsigned int in_ports,
 			      unsigned int out_ports,
 			      unsigned int buflen,
-			      unsigned int qlen)
+			      unsigned int qlen,
+			      struct midi_alsa_config* config)
 {
 	struct f_midi *midi;
 	int status, i;
 
+	if (config) {
+		config->card = -1;
+		config->device = -1;
+	}
+
 	/* sanity check */
 	if (in_ports > MAX_PORTS || out_ports > MAX_PORTS)
 		return -EINVAL;
@@ -961,6 +972,10 @@
 	tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
 
 	/* set up ALSA midi devices */
+	midi->id = kstrdup(id, GFP_KERNEL);
+	midi->index = index;
+	midi->buflen = buflen;
+	midi->qlen = qlen;
 	midi->in_ports = in_ports;
 	midi->out_ports = out_ports;
 	status = f_midi_register_card(midi);
@@ -974,15 +989,16 @@
 	midi->func.set_alt     = f_midi_set_alt;
 	midi->func.disable     = f_midi_disable;
 
-	midi->id = kstrdup(id, GFP_KERNEL);
-	midi->index = index;
-	midi->buflen = buflen;
-	midi->qlen = qlen;
-
 	status = usb_add_function(c, &midi->func);
 	if (status)
 		goto setup_fail;
 
+
+	if (config) {
+		config->card = midi->rmidi->card->number;
+		config->device = midi->rmidi->device;
+	}
+
 	return 0;
 
 setup_fail:
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
new file mode 100644
index 0000000..620aeaa
--- /dev/null
+++ b/drivers/usb/gadget/f_mtp.c
@@ -0,0 +1,1287 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+
+#define MTP_BULK_BUFFER_SIZE       16384
+#define INTR_BUFFER_SIZE           28
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE               0   /* initial state, disconnected */
+#define STATE_READY                 1   /* ready for userspace calls */
+#define STATE_BUSY                  2   /* processing userspace calls */
+#define STATE_CANCELED              3   /* transaction canceled by host */
+#define STATE_ERROR                 4   /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID   0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL              0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA  0x65
+#define MTP_REQ_RESET               0x66
+#define MTP_REQ_GET_DEVICE_STATUS   0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK             0x2001
+#define MTP_RESPONSE_DEVICE_BUSY    0x2019
+
+static const char mtp_shortname[] = "mtp_usb";
+
+struct mtp_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+	struct usb_ep *ep_intr;
+
+	int state;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+	/* to enforce only one ioctl at a time */
+	atomic_t ioctl_excl;
+
+	struct list_head tx_idle;
+	struct list_head intr_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	wait_queue_head_t intr_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+	 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+	 */
+	struct workqueue_struct *wq;
+	struct work_struct send_file_work;
+	struct work_struct receive_file_work;
+	struct file *xfer_file;
+	loff_t xfer_file_offset;
+	int64_t xfer_file_length;
+	unsigned xfer_send_header;
+	uint16_t xfer_command;
+	uint32_t xfer_transaction_id;
+	int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_STILL_IMAGE,
+	.bInterfaceSubClass     = 1,
+	.bInterfaceProtocol     = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize         = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+	.bInterval              = 6,
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+	/* Naming interface "MTP" so libmtp will recognize us */
+	[INTERFACE_STRING_INDEX].s	= "MTP",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+	&mtp_string_table,
+	NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+	18, /* sizeof(mtp_os_string) */
+	USB_DT_STRING,
+	/* Signature field: "MSFT100" */
+	'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+	/* vendor code */
+	1,
+	/* padding */
+	0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+	__le32	dwLength;
+	__u16	bcdVersion;
+	__le16	wIndex;
+	__u8	bCount;
+	__u8	reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+	__u8	bFirstInterfaceNumber;
+	__u8	bInterfaceCount;
+	__u8	compatibleID[8];
+	__u8	subCompatibleID[8];
+	__u8	reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+	struct mtp_ext_config_desc_header	header;
+	struct mtp_ext_config_desc_function    function;
+} mtp_ext_config_desc = {
+	.header = {
+		.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+		.bcdVersion = __constant_cpu_to_le16(0x0100),
+		.wIndex = __constant_cpu_to_le16(4),
+		.bCount = __constant_cpu_to_le16(1),
+	},
+	.function = {
+		.bFirstInterfaceNumber = 0,
+		.bInterfaceCount = 1,
+		.compatibleID = { 'M', 'T', 'P' },
+	},
+};
+
+struct mtp_device_status {
+	__le16	wLength;
+	__le16	wCode;
+};
+
+struct mtp_data_header {
+	/* length of packet, including this header */
+	__le32	length;
+	/* container type (2 for data packet) */
+	__le16	type;
+	/* MTP command code */
+	__le16	command;
+	/* MTP transaction ID */
+	__le32	transaction_id;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+	return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	dev->rx_done = 1;
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->intr_idle, req);
+
+	wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc,
+				struct usb_endpoint_descriptor *intr_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_intr = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_in;
+		mtp_req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_out;
+		dev->rx_req[i] = req;
+	}
+	for (i = 0; i < INTR_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_intr;
+		mtp_req_put(dev, &dev->intr_idle, req);
+	}
+
+	return 0;
+
+fail:
+	printk(KERN_ERR "mtp_bind() could not allocate requests\n");
+	return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	ssize_t r = count;
+	unsigned xfer;
+	int ret = 0;
+
+	DBG(cdev, "mtp_read(%zu)\n", count);
+
+	if (count > MTP_BULK_BUFFER_SIZE)
+		return -EINVAL;
+
+	/* we will block until we're online */
+	DBG(cdev, "mtp_read: waiting for online state\n");
+	ret = wait_event_interruptible(dev->read_wq,
+		dev->state != STATE_OFFLINE);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		DBG(cdev, "rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		usb_ep_dequeue(dev->ep_out, req);
+		goto done;
+	}
+	if (dev->state == STATE_BUSY) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		DBG(cdev, "rx %p %d\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "mtp_read returning %zd\n", r);
+	return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	ssize_t r = count;
+	unsigned xfer;
+	int sendZLP = 0;
+	int ret;
+
+	DBG(cdev, "mtp_write(%zu)\n", count);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		return -ENODEV;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+		sendZLP = 1;
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		if (dev->state != STATE_BUSY) {
+			DBG(cdev, "mtp_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = mtp_req_get(dev, &dev->tx_idle))
+				|| dev->state != STATE_BUSY));
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > MTP_BULK_BUFFER_SIZE)
+			xfer = MTP_BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+		if (xfer && copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "mtp_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "mtp_write returning %zd\n", r);
+	return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+	struct mtp_dev *dev = container_of(data, struct mtp_dev,
+						send_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	struct mtp_data_header *header;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int xfer, ret, hdr_size;
+	int r = 0;
+	int sendZLP = 0;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+	if (dev->xfer_send_header) {
+		hdr_size = sizeof(struct mtp_data_header);
+		count += hdr_size;
+	} else {
+		hdr_size = 0;
+	}
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+		sendZLP = 1;
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			(req = mtp_req_get(dev, &dev->tx_idle))
+			|| dev->state != STATE_BUSY);
+		if (dev->state == STATE_CANCELED) {
+			r = -ECANCELED;
+			break;
+		}
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > MTP_BULK_BUFFER_SIZE)
+			xfer = MTP_BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+
+		if (hdr_size) {
+			/* prepend MTP data header */
+			header = (struct mtp_data_header *)req->buf;
+			header->length = __cpu_to_le32(count);
+			header->type = __cpu_to_le16(2); /* data packet */
+			header->command = __cpu_to_le16(dev->xfer_command);
+			header->transaction_id =
+					__cpu_to_le32(dev->xfer_transaction_id);
+		}
+
+		ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+								&offset);
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+		xfer = ret + hdr_size;
+		hdr_size = 0;
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "send_file_work: xfer error %d\n", ret);
+			dev->state = STATE_ERROR;
+			r = -EIO;
+			break;
+		}
+
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	DBG(cdev, "send_file_work returning %d\n", r);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+	struct mtp_dev *dev = container_of(data, struct mtp_dev,
+						receive_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *read_req = NULL, *write_req = NULL;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int ret, cur_buf = 0;
+	int r = 0;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "receive_file_work(%lld)\n", count);
+
+	while (count > 0 || write_req) {
+		if (count > 0) {
+			/* queue a request */
+			read_req = dev->rx_req[cur_buf];
+			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+			read_req->length = (count > MTP_BULK_BUFFER_SIZE
+					? MTP_BULK_BUFFER_SIZE : count);
+			dev->rx_done = 0;
+			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+			if (ret < 0) {
+				r = -EIO;
+				dev->state = STATE_ERROR;
+				break;
+			}
+		}
+
+		if (write_req) {
+			DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+			ret = vfs_write(filp, write_req->buf, write_req->actual,
+				&offset);
+			DBG(cdev, "vfs_write %d\n", ret);
+			if (ret != write_req->actual) {
+				r = -EIO;
+				dev->state = STATE_ERROR;
+				break;
+			}
+			write_req = NULL;
+		}
+
+		if (read_req) {
+			/* wait for our last read to complete */
+			ret = wait_event_interruptible(dev->read_wq,
+				dev->rx_done || dev->state != STATE_BUSY);
+			if (dev->state == STATE_CANCELED) {
+				r = -ECANCELED;
+				if (!dev->rx_done)
+					usb_ep_dequeue(dev->ep_out, read_req);
+				break;
+			}
+			/* if xfer_file_length is 0xFFFFFFFF, then we read until
+			 * we get a zero length packet
+			 */
+			if (count != 0xFFFFFFFF)
+				count -= read_req->actual;
+			if (read_req->actual < read_req->length) {
+				/*
+				 * short packet is used to signal EOF for
+				 * sizes > 4 gig
+				 */
+				DBG(cdev, "got short packet\n");
+				count = 0;
+			}
+
+			write_req = read_req;
+			read_req = NULL;
+		}
+	}
+
+	DBG(cdev, "receive_file_work returning %d\n", r);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+	struct usb_request *req = NULL;
+	int ret;
+	int length = event->length;
+
+	DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
+
+	if (length < 0 || length > INTR_BUFFER_SIZE)
+		return -EINVAL;
+	if (dev->state == STATE_OFFLINE)
+		return -ENODEV;
+
+	ret = wait_event_interruptible_timeout(dev->intr_wq,
+			(req = mtp_req_get(dev, &dev->intr_idle)),
+			msecs_to_jiffies(1000));
+	if (!req)
+		return -ETIME;
+
+	if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+		mtp_req_put(dev, &dev->intr_idle, req);
+		return -EFAULT;
+	}
+	req->length = length;
+	ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+	if (ret)
+		mtp_req_put(dev, &dev->intr_idle, req);
+
+	return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct file *filp = NULL;
+	int ret = -EINVAL;
+
+	if (mtp_lock(&dev->ioctl_excl))
+		return -EBUSY;
+
+	switch (code) {
+	case MTP_SEND_FILE:
+	case MTP_RECEIVE_FILE:
+	case MTP_SEND_FILE_WITH_HEADER:
+	{
+		struct mtp_file_range	mfr;
+		struct work_struct *work;
+
+		spin_lock_irq(&dev->lock);
+		if (dev->state == STATE_CANCELED) {
+			/* report cancelation to userspace */
+			dev->state = STATE_READY;
+			spin_unlock_irq(&dev->lock);
+			ret = -ECANCELED;
+			goto out;
+		}
+		if (dev->state == STATE_OFFLINE) {
+			spin_unlock_irq(&dev->lock);
+			ret = -ENODEV;
+			goto out;
+		}
+		dev->state = STATE_BUSY;
+		spin_unlock_irq(&dev->lock);
+
+		if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		/* hold a reference to the file while we are working with it */
+		filp = fget(mfr.fd);
+		if (!filp) {
+			ret = -EBADF;
+			goto fail;
+		}
+
+		/* write the parameters */
+		dev->xfer_file = filp;
+		dev->xfer_file_offset = mfr.offset;
+		dev->xfer_file_length = mfr.length;
+		smp_wmb();
+
+		if (code == MTP_SEND_FILE_WITH_HEADER) {
+			work = &dev->send_file_work;
+			dev->xfer_send_header = 1;
+			dev->xfer_command = mfr.command;
+			dev->xfer_transaction_id = mfr.transaction_id;
+		} else if (code == MTP_SEND_FILE) {
+			work = &dev->send_file_work;
+			dev->xfer_send_header = 0;
+		} else {
+			work = &dev->receive_file_work;
+		}
+
+		/* We do the file transfer on a work queue so it will run
+		 * in kernel context, which is necessary for vfs_read and
+		 * vfs_write to use our buffers in the kernel address space.
+		 */
+		queue_work(dev->wq, work);
+		/* wait for operation to complete */
+		flush_workqueue(dev->wq);
+		fput(filp);
+
+		/* read the result */
+		smp_rmb();
+		ret = dev->xfer_result;
+		break;
+	}
+	case MTP_SEND_EVENT:
+	{
+		struct mtp_event	event;
+		/* return here so we don't change dev->state below,
+		 * which would interfere with bulk transfer state.
+		 */
+		if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+			ret = -EFAULT;
+		else
+			ret = mtp_send_event(dev, &event);
+		goto out;
+	}
+	}
+
+fail:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		ret = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+out:
+	mtp_unlock(&dev->ioctl_excl);
+	DBG(dev->cdev, "ioctl returning %d\n", ret);
+	return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_open\n");
+	if (mtp_lock(&_mtp_dev->open_excl))
+		return -EBUSY;
+
+	/* clear any error condition */
+	if (_mtp_dev->state != STATE_OFFLINE)
+		_mtp_dev->state = STATE_READY;
+
+	fp->private_data = _mtp_dev;
+	return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_release\n");
+
+	mtp_unlock(&_mtp_dev->open_excl);
+	return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+	.owner = THIS_MODULE,
+	.read = mtp_read,
+	.write = mtp_write,
+	.unlocked_ioctl = mtp_ioctl,
+	.open = mtp_open,
+	.release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = mtp_shortname,
+	.fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int	value = -EOPNOTSUPP;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long	flags;
+
+	VDBG(cdev, "mtp_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	/* Handle MTP OS string */
+	if (ctrl->bRequestType ==
+			(USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+			&& ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+			&& (w_value >> 8) == USB_DT_STRING
+			&& (w_value & 0xFF) == MTP_OS_STRING_ID) {
+		value = (w_length < sizeof(mtp_os_string)
+				? w_length : sizeof(mtp_os_string));
+		memcpy(cdev->req->buf, mtp_os_string, value);
+	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+		/* Handle MTP OS descriptor */
+		DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == 1
+				&& (ctrl->bRequestType & USB_DIR_IN)
+				&& (w_index == 4 || w_index == 5)) {
+			value = (w_length < sizeof(mtp_ext_config_desc) ?
+					w_length : sizeof(mtp_ext_config_desc));
+			memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+		}
+	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+				&& w_value == 0) {
+			DBG(cdev, "MTP_REQ_CANCEL\n");
+
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->state == STATE_BUSY) {
+				dev->state = STATE_CANCELED;
+				wake_up(&dev->read_wq);
+				wake_up(&dev->write_wq);
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			/* We need to queue a request to read the remaining
+			 *  bytes, but we don't actually need to look at
+			 * the contents.
+			 */
+			value = w_length;
+		} else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+				&& w_index == 0 && w_value == 0) {
+			struct mtp_device_status *status = cdev->req->buf;
+			status->wLength =
+				__constant_cpu_to_le16(sizeof(*status));
+
+			DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			/* device status is "busy" until we report
+			 * the cancelation to userspace
+			 */
+			if (dev->state == STATE_CANCELED)
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+			else
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_OK);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			value = sizeof(*status);
+		}
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		int rc;
+		cdev->req->zero = value < w_length;
+		cdev->req->length = value;
+		rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (rc < 0)
+			ERROR(cdev, "%s: response queue error\n", __func__);
+	}
+	return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct mtp_dev	*dev = func_to_mtp(f);
+	int			id;
+	int			ret;
+
+	dev->cdev = cdev;
+	DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	mtp_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+			&mtp_fullspeed_out_desc, &mtp_intr_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		mtp_highspeed_in_desc.bEndpointAddress =
+			mtp_fullspeed_in_desc.bEndpointAddress;
+		mtp_highspeed_out_desc.bEndpointAddress =
+			mtp_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_request *req;
+	int i;
+
+	while ((req = mtp_req_get(dev, &dev->tx_idle)))
+		mtp_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		mtp_request_free(dev->rx_req[i], dev->ep_out);
+	while ((req = mtp_req_get(dev, &dev->intr_idle)))
+		mtp_request_free(req, dev->ep_intr);
+	dev->state = STATE_OFFLINE;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_intr);
+	if (ret) {
+		usb_ep_disable(dev->ep_out);
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+	dev->state = STATE_READY;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "mtp_function_disable\n");
+	dev->state = STATE_OFFLINE;
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+	usb_ep_disable(dev->ep_intr);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int ret = 0;
+
+	printk(KERN_INFO "mtp_bind_config\n");
+
+	/* allocate a string ID for our interface */
+	if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+		ret = usb_string_id(c->cdev);
+		if (ret < 0)
+			return ret;
+		mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+		mtp_interface_desc.iInterface = ret;
+	}
+
+	dev->cdev = c->cdev;
+	dev->function.name = "mtp";
+	dev->function.strings = mtp_strings;
+	if (ptp_config) {
+		dev->function.fs_descriptors = fs_ptp_descs;
+		dev->function.hs_descriptors = hs_ptp_descs;
+	} else {
+		dev->function.fs_descriptors = fs_mtp_descs;
+		dev->function.hs_descriptors = hs_mtp_descs;
+	}
+	dev->function.bind = mtp_function_bind;
+	dev->function.unbind = mtp_function_unbind;
+	dev->function.set_alt = mtp_function_set_alt;
+	dev->function.disable = mtp_function_disable;
+
+	return usb_add_function(c, &dev->function);
+}
+
+static int mtp_setup(void)
+{
+	struct mtp_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	init_waitqueue_head(&dev->intr_wq);
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->ioctl_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->intr_idle);
+
+	dev->wq = create_singlethread_workqueue("f_mtp");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+	INIT_WORK(&dev->send_file_work, send_file_work);
+	INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+	_mtp_dev = dev;
+
+	ret = misc_register(&mtp_device);
+	if (ret)
+		goto err2;
+
+	return 0;
+
+err2:
+	destroy_workqueue(dev->wq);
+err1:
+	_mtp_dev = NULL;
+	kfree(dev);
+	printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+	return ret;
+}
+
+static void mtp_cleanup(void)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (!dev)
+		return;
+
+	misc_deregister(&mtp_device);
+	destroy_workqueue(dev->wq);
+	_mtp_dev = NULL;
+	kfree(dev);
+}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 36e8c44..5c274f1 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -25,7 +25,6 @@
 #include "u_ether.h"
 #include "rndis.h"
 
-
 /*
  * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
  * been promoted instead of the standard CDC Ethernet.  The published RNDIS
@@ -67,6 +66,16 @@
  *   - MS-Windows drivers sometimes emit undocumented requests.
  */
 
+static unsigned int rndis_dl_max_pkt_per_xfer = 3;
+module_param(rndis_dl_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_dl_max_pkt_per_xfer,
+	"Maximum packets per transfer for DL aggregation");
+
+static unsigned int rndis_ul_max_pkt_per_xfer = 3;
+module_param(rndis_ul_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer,
+       "Maximum packets per transfer for UL aggregation");
+
 struct f_rndis {
 	struct gether			port;
 	u8				ctrl_id, data_id;
@@ -448,6 +457,7 @@
 {
 	struct f_rndis			*rndis = req->context;
 	int				status;
+	rndis_init_msg_type		*buf;
 
 	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
 //	spin_lock(&dev->lock);
@@ -455,6 +465,21 @@
 	if (status < 0)
 		pr_err("RNDIS command error %d, %d/%d\n",
 			status, req->actual, req->length);
+
+	buf = (rndis_init_msg_type *)req->buf;
+
+	if (buf->MessageType == RNDIS_MSG_INIT) {
+		if (buf->MaxTransferSize > 2048)
+			rndis->port.multi_pkt_xfer = 1;
+		else
+			rndis->port.multi_pkt_xfer = 0;
+		pr_info("%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n",
+				__func__, buf->MaxTransferSize,
+				rndis->port.multi_pkt_xfer ? "enabled" :
+							    "disabled");
+		if (rndis_dl_max_pkt_per_xfer <= 1)
+			rndis->port.multi_pkt_xfer = 0;
+	}
 //	spin_unlock(&dev->lock);
 }
 
@@ -748,6 +773,7 @@
 
 	rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
 	rndis_set_host_mac(rndis->config, rndis->ethaddr);
+	rndis_set_max_pkt_xfer(rndis->config, rndis_ul_max_pkt_per_xfer);
 
 	if (rndis->manufacturer && rndis->vendorID &&
 			rndis_set_param_vendor(rndis->config, rndis->vendorID,
@@ -821,12 +847,12 @@
 	if (!can_support_rndis(c) || !ethaddr)
 		return -EINVAL;
 
-	if (rndis_string_defs[0].id == 0) {
-		/* ... and setup RNDIS itself */
-		status = rndis_init();
-		if (status < 0)
-			return status;
+	/* setup RNDIS itself */
+	status = rndis_init();
+	if (status < 0)
+		return status;
 
+	if (rndis_string_defs[0].id == 0) {
 		status = usb_string_ids_tab(c->cdev, rndis_string_defs);
 		if (status)
 			return status;
@@ -854,6 +880,8 @@
 	rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
 	rndis->port.wrap = rndis_add_header;
 	rndis->port.unwrap = rndis_rm_hdr;
+	rndis->port.ul_max_pkts_per_xfer = rndis_ul_max_pkt_per_xfer;
+	rndis->port.dl_max_pkts_per_xfer = rndis_dl_max_pkt_per_xfer;
 
 	rndis->port.func.name = "rndis";
 	rndis->port.func.strings = rndis_strings;
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index bcd04bc..a3b44f2 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -16,6 +16,7 @@
 #define __GADGET_CHIPS_H
 
 #include <linux/usb/gadget.h>
+#include <asm/intel-mid.h>
 
 /*
  * NOTICE: the entries below are alphabetical and should be kept
@@ -29,11 +30,13 @@
  */
 #define gadget_is_at91(g)		(!strcmp("at91_udc", (g)->name))
 #define gadget_is_goku(g)		(!strcmp("goku_udc", (g)->name))
+#define gadget_is_middwc3tng(g)		((!strcmp("dwc3-gadget", (g)->name)) && \
+					 (intel_mid_identify_cpu() ==	\
+					  INTEL_MID_CPU_CHIP_TANGIER))
 #define gadget_is_musbhdrc(g)		(!strcmp("musb-hdrc", (g)->name))
 #define gadget_is_net2280(g)		(!strcmp("net2280", (g)->name))
 #define gadget_is_pxa(g)		(!strcmp("pxa25x_udc", (g)->name))
 #define gadget_is_pxa27x(g)		(!strcmp("pxa27x_udc", (g)->name))
-
 /**
  * gadget_supports_altsettings - return true if altsettings work
  * @gadget: the gadget in question
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 4a45e80..22f3eba 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -15,6 +15,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/moduleparam.h>
 
 #include "u_serial.h"
 #if defined USB_ETH_RNDIS
@@ -53,6 +54,12 @@
 
 USB_GADGET_COMPOSITE_OPTIONS();
 
+static char ethernet_config[6];
+module_param_string(ethernet_config, ethernet_config, sizeof(ethernet_config),
+	0444);
+MODULE_PARM_DESC(ethernet_config,
+	"ethernet configuration : should be cdc or rndis");
+
 /***************************** Device Descriptor ****************************/
 
 #define MULTI_VENDOR_NUM	0x1d6b	/* Linux Foundation */
@@ -308,13 +315,31 @@
 	device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
 	/* register configurations */
-	status = rndis_config_register(cdev);
-	if (unlikely(status < 0))
-		goto fail2;
+	/* RNDIS configuration */
+	if (strncmp(ethernet_config, "rndis", 5) == 0) {
+		status = rndis_config_register(cdev);
 
-	status = cdc_config_register(cdev);
-	if (unlikely(status < 0))
-		goto fail2;
+		if (unlikely(status < 0))
+			goto fail2;
+	} else if (strncmp(ethernet_config, "cdc", 3) == 0) {
+		/* CDC ECM configuration  */
+		status = cdc_config_register(cdev);
+
+		if (unlikely(status < 0))
+			goto fail2;
+	} else {
+		status = rndis_config_register(cdev);
+
+		if (unlikely(status < 0))
+			goto fail2;
+
+		status = cdc_config_register(cdev);
+
+		if (unlikely(status < 0))
+			goto fail2;
+	}
+
+
 	usb_composite_overwrite_options(cdev, &coverwrite);
 
 	/* we're done */
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 1e4cfb0..cb2767d 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -59,6 +59,16 @@
 
 #define RNDIS_MAX_CONFIGS	1
 
+int rndis_ul_max_pkt_per_xfer_rcvd;
+module_param(rndis_ul_max_pkt_per_xfer_rcvd, int, S_IRUGO);
+MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer_rcvd,
+		"Max num of REMOTE_NDIS_PACKET_MSGs received in a single transfer");
+
+int rndis_ul_max_xfer_size_rcvd;
+module_param(rndis_ul_max_xfer_size_rcvd, int, S_IRUGO);
+MODULE_PARM_DESC(rndis_ul_max_xfer_size_rcvd,
+		"Max size of bus transfer received");
+
 
 static rndis_params rndis_per_dev_params[RNDIS_MAX_CONFIGS];
 
@@ -585,12 +595,12 @@
 	resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
 	resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
 	resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
-	resp->MaxPacketsPerTransfer = cpu_to_le32(1);
-	resp->MaxTransferSize = cpu_to_le32(
-		  params->dev->mtu
+	resp->MaxPacketsPerTransfer = cpu_to_le32(params->max_pkt_per_xfer);
+	resp->MaxTransferSize = cpu_to_le32(params->max_pkt_per_xfer *
+		(params->dev->mtu
 		+ sizeof(struct ethhdr)
 		+ sizeof(struct rndis_packet_msg_type)
-		+ 22);
+		+ 22));
 	resp->PacketAlignmentFactor = cpu_to_le32(0);
 	resp->AFListOffset = cpu_to_le32(0);
 	resp->AFListSize = cpu_to_le32(0);
@@ -686,6 +696,12 @@
 	rndis_reset_cmplt_type *resp;
 	rndis_resp_t *r;
 	struct rndis_params *params = rndis_per_dev_params + configNr;
+	u32 length;
+	u8 *xbuf;
+
+	/* drain the response queue */
+	while ((xbuf = rndis_get_next_response(configNr, &length)))
+		rndis_free_response(configNr, xbuf);
 
 	r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type));
 	if (!r)
@@ -910,6 +926,8 @@
 	rndis_per_dev_params[configNr].dev = dev;
 	rndis_per_dev_params[configNr].filter = cdc_filter;
 
+	rndis_ul_max_xfer_size_rcvd = 0;
+	rndis_ul_max_pkt_per_xfer_rcvd = 0;
 	return 0;
 }
 
@@ -936,6 +954,13 @@
 	return 0;
 }
 
+void rndis_set_max_pkt_xfer(u8 configNr, u8 max_pkt_per_xfer)
+{
+	pr_debug("%s:\n", __func__);
+
+	rndis_per_dev_params[configNr].max_pkt_per_xfer = max_pkt_per_xfer;
+}
+
 void rndis_add_hdr(struct sk_buff *skb)
 {
 	struct rndis_packet_msg_type *header;
@@ -1008,23 +1033,73 @@
 			struct sk_buff *skb,
 			struct sk_buff_head *list)
 {
-	/* tmp points to a struct rndis_packet_msg_type */
-	__le32 *tmp = (void *)skb->data;
+	int num_pkts = 1;
 
-	/* MessageType, MessageLength */
-	if (cpu_to_le32(RNDIS_MSG_PACKET)
-			!= get_unaligned(tmp++)) {
-		dev_kfree_skb_any(skb);
-		return -EINVAL;
-	}
-	tmp++;
+	if (skb->len > rndis_ul_max_xfer_size_rcvd)
+		rndis_ul_max_xfer_size_rcvd = skb->len;
 
-	/* DataOffset, DataLength */
-	if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
-		dev_kfree_skb_any(skb);
-		return -EOVERFLOW;
+	while (skb->len) {
+		struct rndis_packet_msg_type *hdr;
+		struct sk_buff          *skb2;
+		u32             msg_len, data_offset, data_len;
+
+		/* some rndis hosts send extra byte to avoid zlp, ignore it */
+		if (skb->len == 1) {
+			dev_kfree_skb_any(skb);
+			return 0;
+		}
+
+		if (skb->len < sizeof *hdr) {
+			pr_err("invalid rndis pkt: skblen:%u hdr_len:%u",
+					skb->len, sizeof *hdr);
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+
+		hdr = (void *)skb->data;
+		msg_len = le32_to_cpu(hdr->MessageLength);
+		data_offset = le32_to_cpu(hdr->DataOffset);
+		data_len = le32_to_cpu(hdr->DataLength);
+
+		if (skb->len < msg_len ||
+				((data_offset + data_len + 8) > msg_len)) {
+			pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
+					le32_to_cpu(hdr->MessageType),
+					msg_len, data_offset, data_len, skb->len);
+			dev_kfree_skb_any(skb);
+			return -EOVERFLOW;
+		}
+		if (le32_to_cpu(hdr->MessageType) != RNDIS_MSG_PACKET) {
+			pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
+					le32_to_cpu(hdr->MessageType),
+					msg_len, data_offset, data_len, skb->len);
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+
+		skb_pull(skb, data_offset + 8);
+
+		if (msg_len == skb->len) {
+			skb_trim(skb, data_len);
+			break;
+		}
+
+		skb2 = skb_clone(skb, GFP_ATOMIC);
+		if (!skb2) {
+			pr_err("%s:skb clone failed\n", __func__);
+			dev_kfree_skb_any(skb);
+			return -ENOMEM;
+		}
+
+		skb_pull(skb, msg_len - sizeof *hdr);
+		skb_trim(skb2, data_len);
+		skb_queue_tail(list, skb2);
+
+		num_pkts++;
 	}
-	skb_trim(skb, get_unaligned_le32(tmp++));
+
+	if (num_pkts > rndis_ul_max_pkt_per_xfer_rcvd)
+		rndis_ul_max_pkt_per_xfer_rcvd = num_pkts;
 
 	skb_queue_tail(list, skb);
 	return 0;
@@ -1127,11 +1202,15 @@
 
 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
 
+static bool rndis_initialized;
 
 int rndis_init(void)
 {
 	u8 i;
 
+	if (rndis_initialized)
+		return 0;
+
 	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
 #ifdef	CONFIG_USB_GADGET_DEBUG_FILES
 		char name [20];
@@ -1158,6 +1237,7 @@
 		INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
 	}
 
+	rndis_initialized = true;
 	return 0;
 }
 
@@ -1166,7 +1246,13 @@
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
 	u8 i;
 	char name[20];
+#endif
 
+	if (!rndis_initialized)
+		return;
+	rndis_initialized = false;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
 	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
 		sprintf(name, NAME_TEMPLATE, i);
 		remove_proc_entry(name, NULL);
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index 0647f2f..12045b3 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -189,6 +189,7 @@
 	struct net_device	*dev;
 
 	u32			vendorID;
+	u8			max_pkt_per_xfer;
 	const char		*vendorDescr;
 	void			(*resp_avail)(void *v);
 	void			*v;
@@ -204,6 +205,7 @@
 int  rndis_set_param_vendor (u8 configNr, u32 vendorID,
 			    const char *vendorDescr);
 int  rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
+void rndis_set_max_pkt_xfer(u8 configNr, u8 max_pkt_per_xfer);
 void rndis_add_hdr (struct sk_buff *skb);
 int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
 			struct sk_buff_head *list);
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index ef5c623..eecb416 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -48,6 +48,8 @@
 
 #define UETH__VERSION	"29-May-2008"
 
+static struct workqueue_struct	*uether_wq;
+
 struct eth_dev {
 	/* lock is held while accessing port_usb
 	 */
@@ -59,17 +61,25 @@
 
 	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
 	struct list_head	tx_reqs, rx_reqs;
-	atomic_t		tx_qlen;
+	unsigned		tx_qlen;
+/* Minimum number of TX USB request queued to UDC */
+#define TX_REQ_THRESHOLD	5
+	int			no_tx_req_used;
+	int			tx_skb_hold_count;
+	u32			tx_req_bufsize;
 
 	struct sk_buff_head	rx_frames;
 
 	unsigned		header_len;
+	unsigned		ul_max_pkts_per_xfer;
+	unsigned		dl_max_pkts_per_xfer;
 	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
 	int			(*unwrap)(struct gether *,
 						struct sk_buff *skb,
 						struct sk_buff_head *list);
 
 	struct work_struct	work;
+	struct work_struct	rx_work;
 
 	unsigned long		todo;
 #define	WORK_RX_MEMORY		0
@@ -84,7 +94,7 @@
 
 #define DEFAULT_QLEN	2	/* double buffering by default */
 
-static unsigned qmult = 5;
+static unsigned qmult = 10;
 module_param(qmult, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
 
@@ -223,12 +233,14 @@
 	 */
 	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
 	size += dev->port_usb->header_len;
-	size += out->maxpacket - 1;
-	size -= size % out->maxpacket;
+
+	if (dev->ul_max_pkts_per_xfer)
+		size *= dev->ul_max_pkts_per_xfer;
 
 	if (dev->port_usb->is_fixed)
 		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 
+	DBG(dev, "%s: size: %d\n", __func__, size);
 	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
 	if (skb == NULL) {
 		DBG(dev, "no rx skb\n");
@@ -254,18 +266,16 @@
 		DBG(dev, "rx submit --> %d\n", retval);
 		if (skb)
 			dev_kfree_skb_any(skb);
-		spin_lock_irqsave(&dev->req_lock, flags);
-		list_add(&req->list, &dev->rx_reqs);
-		spin_unlock_irqrestore(&dev->req_lock, flags);
 	}
 	return retval;
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-	struct sk_buff	*skb = req->context, *skb2;
+	struct sk_buff	*skb = req->context;
 	struct eth_dev	*dev = ep->driver_data;
 	int		status = req->status;
+	bool		queue = 0;
 
 	switch (status) {
 
@@ -281,6 +291,10 @@
 				status = dev->unwrap(dev->port_usb,
 							skb,
 							&dev->rx_frames);
+				if (status == -EINVAL)
+					dev->net->stats.rx_errors++;
+				else if (status == -EOVERFLOW)
+					dev->net->stats.rx_over_errors++;
 			} else {
 				dev_kfree_skb_any(skb);
 				status = -ENOTCONN;
@@ -289,30 +303,9 @@
 		} else {
 			skb_queue_tail(&dev->rx_frames, skb);
 		}
-		skb = NULL;
 
-		skb2 = skb_dequeue(&dev->rx_frames);
-		while (skb2) {
-			if (status < 0
-					|| ETH_HLEN > skb2->len
-					|| skb2->len > VLAN_ETH_FRAME_LEN) {
-				dev->net->stats.rx_errors++;
-				dev->net->stats.rx_length_errors++;
-				DBG(dev, "rx length %d\n", skb2->len);
-				dev_kfree_skb_any(skb2);
-				goto next_frame;
-			}
-			skb2->protocol = eth_type_trans(skb2, dev->net);
-			dev->net->stats.rx_packets++;
-			dev->net->stats.rx_bytes += skb2->len;
-
-			/* no buffer copies needed, unless hardware can't
-			 * use skb buffers.
-			 */
-			status = netif_rx(skb2);
-next_frame:
-			skb2 = skb_dequeue(&dev->rx_frames);
-		}
+		if (!status)
+			queue = 1;
 		break;
 
 	/* software-driven interface shutdown */
@@ -335,22 +328,20 @@
 		/* FALLTHROUGH */
 
 	default:
+		queue = 1;
+		dev_kfree_skb_any(skb);
 		dev->net->stats.rx_errors++;
 		DBG(dev, "rx status %d\n", status);
 		break;
 	}
 
-	if (skb)
-		dev_kfree_skb_any(skb);
-	if (!netif_running(dev->net)) {
 clean:
-		spin_lock(&dev->req_lock);
-		list_add(&req->list, &dev->rx_reqs);
-		spin_unlock(&dev->req_lock);
-		req = NULL;
-	}
-	if (req)
-		rx_submit(dev, req, GFP_ATOMIC);
+	spin_lock(&dev->req_lock);
+	list_add(&req->list, &dev->rx_reqs);
+	spin_unlock(&dev->req_lock);
+
+	if (queue)
+		queue_work(uether_wq, &dev->rx_work);
 }
 
 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -415,16 +406,24 @@
 {
 	struct usb_request	*req;
 	unsigned long		flags;
+	int			req_cnt = 0;
 
 	/* fill unused rxq slots with some skb */
 	spin_lock_irqsave(&dev->req_lock, flags);
 	while (!list_empty(&dev->rx_reqs)) {
+		/* break the nexus of continuous completion and re-submission*/
+		if (++req_cnt > qlen(dev->gadget))
+			break;
+
 		req = container_of(dev->rx_reqs.next,
 				struct usb_request, list);
 		list_del_init(&req->list);
 		spin_unlock_irqrestore(&dev->req_lock, flags);
 
 		if (rx_submit(dev, req, gfp_flags) < 0) {
+			spin_lock_irqsave(&dev->req_lock, flags);
+			list_add(&req->list, &dev->rx_reqs);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
 			defer_kevent(dev, WORK_RX_MEMORY);
 			return;
 		}
@@ -434,6 +433,36 @@
 	spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
+static void process_rx_w(struct work_struct *work)
+{
+	struct eth_dev	*dev = container_of(work, struct eth_dev, rx_work);
+	struct sk_buff	*skb;
+	int		status = 0;
+
+	if (!dev->port_usb)
+		return;
+
+	while ((skb = skb_dequeue(&dev->rx_frames))) {
+		if (status < 0
+				|| ETH_HLEN > skb->len
+				|| skb->len > ETH_FRAME_LEN) {
+			dev->net->stats.rx_errors++;
+			dev->net->stats.rx_length_errors++;
+			DBG(dev, "rx length %d\n", skb->len);
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+		skb->protocol = eth_type_trans(skb, dev->net);
+		dev->net->stats.rx_packets++;
+		dev->net->stats.rx_bytes += skb->len;
+
+		status = netif_rx_ni(skb);
+	}
+
+	if (netif_running(dev->net))
+		rx_fill(dev, GFP_KERNEL);
+}
+
 static void eth_work(struct work_struct *work)
 {
 	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
@@ -451,6 +480,11 @@
 {
 	struct sk_buff	*skb = req->context;
 	struct eth_dev	*dev = ep->driver_data;
+	struct net_device *net = dev->net;
+	struct usb_request *new_req;
+	struct usb_ep *in;
+	int length;
+	int retval;
 
 	switch (req->status) {
 	default:
@@ -461,16 +495,74 @@
 	case -ESHUTDOWN:		/* disconnect etc */
 		break;
 	case 0:
-		dev->net->stats.tx_bytes += skb->len;
+		if (!req->zero)
+			dev->net->stats.tx_bytes += req->length-1;
+		else
+			dev->net->stats.tx_bytes += req->length;
 	}
 	dev->net->stats.tx_packets++;
 
 	spin_lock(&dev->req_lock);
-	list_add(&req->list, &dev->tx_reqs);
-	spin_unlock(&dev->req_lock);
-	dev_kfree_skb_any(skb);
+	list_add_tail(&req->list, &dev->tx_reqs);
 
-	atomic_dec(&dev->tx_qlen);
+	if (dev->port_usb->multi_pkt_xfer) {
+		dev->no_tx_req_used--;
+		req->length = 0;
+		in = dev->port_usb->in_ep;
+
+		if (!list_empty(&dev->tx_reqs)) {
+			new_req = container_of(dev->tx_reqs.next,
+					struct usb_request, list);
+			list_del(&new_req->list);
+			spin_unlock(&dev->req_lock);
+			if (new_req->length > 0) {
+				length = new_req->length;
+
+				/* NCM requires no zlp if transfer is
+				 * dwNtbInMaxSize */
+				if (dev->port_usb->is_fixed &&
+					length == dev->port_usb->fixed_in_len &&
+					(length % in->maxpacket) == 0)
+					new_req->zero = 0;
+				else
+					new_req->zero = 1;
+
+				/* use zlp framing on tx for strict CDC-Ether
+				 * conformance, though any robust network rx
+				 * path ignores extra padding. and some hardware
+				 * doesn't like to write zlps.
+				 */
+				if (new_req->zero && !dev->zlp &&
+						(length % in->maxpacket) == 0) {
+					new_req->zero = 0;
+					length++;
+				}
+
+				new_req->length = length;
+				retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
+				switch (retval) {
+				default:
+					DBG(dev, "tx queue err %d\n", retval);
+					break;
+				case 0:
+					spin_lock(&dev->req_lock);
+					dev->no_tx_req_used++;
+					spin_unlock(&dev->req_lock);
+					net->trans_start = jiffies;
+				}
+			} else {
+				spin_lock(&dev->req_lock);
+				list_add(&new_req->list, &dev->tx_reqs);
+				spin_unlock(&dev->req_lock);
+			}
+		} else {
+			spin_unlock(&dev->req_lock);
+		}
+	} else {
+		spin_unlock(&dev->req_lock);
+		dev_kfree_skb_any(skb);
+	}
+
 	if (netif_carrier_ok(dev->net))
 		netif_wake_queue(dev->net);
 }
@@ -480,6 +572,26 @@
 	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 }
 
+static void alloc_tx_buffer(struct eth_dev *dev)
+{
+	struct list_head	*act;
+	struct usb_request	*req;
+
+	dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
+				(dev->net->mtu
+				+ sizeof(struct ethhdr)
+				/* size of rndis_packet_msg_type */
+				+ 44
+				+ 22));
+
+	list_for_each(act, &dev->tx_reqs) {
+		req = container_of(act, struct usb_request, list);
+		if (!req->buf)
+			req->buf = kmalloc(dev->tx_req_bufsize,
+						GFP_ATOMIC);
+	}
+}
+
 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 					struct net_device *net)
 {
@@ -506,6 +618,10 @@
 		return NETDEV_TX_OK;
 	}
 
+	/* Allocate memory for tx_reqs to support multi packet transfer */
+	if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
+		alloc_tx_buffer(dev);
+
 	/* apply outgoing CDC or RNDIS filters */
 	if (!is_promisc(cdc_filter)) {
 		u8		*dest = skb->data;
@@ -560,11 +676,39 @@
 		spin_unlock_irqrestore(&dev->lock, flags);
 		if (!skb)
 			goto drop;
-
-		length = skb->len;
 	}
-	req->buf = skb->data;
-	req->context = skb;
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	dev->tx_skb_hold_count++;
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	if (dev->port_usb->multi_pkt_xfer) {
+		memcpy(req->buf + req->length, skb->data, skb->len);
+		req->length = req->length + skb->len;
+		length = req->length;
+		dev_kfree_skb_any(skb);
+
+		spin_lock_irqsave(&dev->req_lock, flags);
+		if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
+			if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
+				list_add(&req->list, &dev->tx_reqs);
+				spin_unlock_irqrestore(&dev->req_lock, flags);
+				goto success;
+			}
+		}
+
+		dev->no_tx_req_used++;
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->tx_skb_hold_count = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	} else {
+		length = skb->len;
+		req->buf = skb->data;
+		req->context = skb;
+	}
+
 	req->complete = tx_complete;
 
 	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
@@ -579,11 +723,27 @@
 	 * though any robust network rx path ignores extra padding.
 	 * and some hardware doesn't like to write zlps.
 	 */
-	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
+		req->zero = 0;
 		length++;
+	}
 
 	req->length = length;
 
+	/* throttle highspeed IRQ rate back slightly */
+	if (gadget_is_dualspeed(dev->gadget) &&
+			 (dev->gadget->speed == USB_SPEED_HIGH)) {
+		dev->tx_qlen++;
+		if (dev->tx_qlen == (qmult/2)) {
+			req->no_interrupt = 0;
+			dev->tx_qlen = 0;
+		} else {
+			req->no_interrupt = 1;
+		}
+	} else {
+		req->no_interrupt = 0;
+	}
+
 	retval = usb_ep_queue(in, req, GFP_ATOMIC);
 	switch (retval) {
 	default:
@@ -591,11 +751,11 @@
 		break;
 	case 0:
 		net->trans_start = jiffies;
-		atomic_inc(&dev->tx_qlen);
 	}
 
 	if (retval) {
-		dev_kfree_skb_any(skb);
+		if (!dev->port_usb->multi_pkt_xfer)
+			dev_kfree_skb_any(skb);
 drop:
 		dev->net->stats.tx_dropped++;
 		spin_lock_irqsave(&dev->req_lock, flags);
@@ -604,6 +764,7 @@
 		list_add(&req->list, &dev->tx_reqs);
 		spin_unlock_irqrestore(&dev->req_lock, flags);
 	}
+success:
 	return NETDEV_TX_OK;
 }
 
@@ -617,7 +778,7 @@
 	rx_fill(dev, gfp_flags);
 
 	/* and open the tx floodgates */
-	atomic_set(&dev->tx_qlen, 0);
+	dev->tx_qlen = 0;
 	netif_wake_queue(dev->net);
 }
 
@@ -690,6 +851,8 @@
 
 /*-------------------------------------------------------------------------*/
 
+static u8 host_ethaddr[ETH_ALEN];
+
 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
 static char *dev_addr;
 module_param(dev_addr, charp, S_IRUGO);
@@ -721,6 +884,17 @@
 	return 1;
 }
 
+static int get_host_ether_addr(u8 *str, u8 *dev_addr)
+{
+	memcpy(dev_addr, str, ETH_ALEN);
+	if (is_valid_ether_addr(dev_addr))
+		return 0;
+
+	random_ether_addr(dev_addr);
+	memcpy(str, dev_addr, ETH_ALEN);
+	return 1;
+}
+
 static const struct net_device_ops eth_netdev_ops = {
 	.ndo_open		= eth_open,
 	.ndo_stop		= eth_stop,
@@ -763,6 +937,7 @@
 	spin_lock_init(&dev->lock);
 	spin_lock_init(&dev->req_lock);
 	INIT_WORK(&dev->work, eth_work);
+	INIT_WORK(&dev->rx_work, process_rx_w);
 	INIT_LIST_HEAD(&dev->tx_reqs);
 	INIT_LIST_HEAD(&dev->rx_reqs);
 
@@ -775,9 +950,11 @@
 	if (get_ether_addr(dev_addr, net->dev_addr))
 		dev_warn(&g->dev,
 			"using random %s ethernet address\n", "self");
-	if (get_ether_addr(host_addr, dev->host_mac))
-		dev_warn(&g->dev,
-			"using random %s ethernet address\n", "host");
+
+	if (get_host_ether_addr(host_ethaddr, dev->host_mac))
+		dev_warn(&g->dev, "using random %s ethernet address\n", "host");
+	else
+		dev_warn(&g->dev, "using previous %s ethernet address\n", "host");
 
 	if (ethaddr)
 		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
@@ -875,8 +1052,13 @@
 		dev->header_len = link->header_len;
 		dev->unwrap = link->unwrap;
 		dev->wrap = link->wrap;
+		dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
+		dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
 
 		spin_lock(&dev->lock);
+		dev->tx_skb_hold_count = 0;
+		dev->no_tx_req_used = 0;
+		dev->tx_req_bufsize = 0;
 		dev->port_usb = link;
 		if (netif_running(dev->net)) {
 			if (link->open)
@@ -920,6 +1102,7 @@
 {
 	struct eth_dev		*dev = link->ioport;
 	struct usb_request	*req;
+	struct sk_buff		*skb;
 
 	WARN_ON(!dev);
 	if (!dev)
@@ -942,6 +1125,8 @@
 		list_del(&req->list);
 
 		spin_unlock(&dev->req_lock);
+		if (link->multi_pkt_xfer)
+			kfree(req->buf);
 		usb_ep_free_request(link->in_ep, req);
 		spin_lock(&dev->req_lock);
 	}
@@ -961,6 +1146,12 @@
 		spin_lock(&dev->req_lock);
 	}
 	spin_unlock(&dev->req_lock);
+
+	spin_lock(&dev->rx_frames.lock);
+	while ((skb = __skb_dequeue(&dev->rx_frames)))
+		dev_kfree_skb_any(skb);
+	spin_unlock(&dev->rx_frames.lock);
+
 	link->out_ep->driver_data = NULL;
 	link->out_ep->desc = NULL;
 
@@ -973,3 +1164,23 @@
 	dev->port_usb = NULL;
 	spin_unlock(&dev->lock);
 }
+
+static int __init gether_init(void)
+{
+	uether_wq  = create_singlethread_workqueue("uether");
+	if (!uether_wq) {
+		pr_err("%s: Unable to create workqueue: uether\n", __func__);
+		return -ENOMEM;
+	}
+	return 0;
+}
+module_init(gether_init);
+
+static void __exit gether_exit(void)
+{
+	destroy_workqueue(uether_wq);
+
+}
+module_exit(gether_exit);
+MODULE_DESCRIPTION("ethernet over USB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index 0252233..67eda50 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -54,6 +54,9 @@
 	bool				is_fixed;
 	u32				fixed_out_len;
 	u32				fixed_in_len;
+	unsigned		ul_max_pkts_per_xfer;
+	unsigned		dl_max_pkts_per_xfer;
+	bool				multi_pkt_xfer;
 	struct sk_buff			*(*wrap)(struct gether *port,
 						struct sk_buff *skb);
 	int				(*unwrap)(struct gether *port,
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index b369292..5559a15 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -1075,7 +1075,7 @@
 {
 	tasklet_kill(&port->push);
 	/* wait for old opens to finish */
-	wait_event(port->port.close_wait, gs_closed(port));
+	wait_event_timeout(port->port.close_wait, gs_closed(port), msecs_to_jiffies(1000));
 	WARN_ON(port->port_usb != NULL);
 	tty_port_destroy(&port->port);
 	kfree(port);
@@ -1126,6 +1126,7 @@
 
 	tty_dev = tty_port_register_device(&ports[port_num].port->port,
 			gs_tty_driver, port_num, NULL);
+
 	if (IS_ERR(tty_dev)) {
 		struct gs_port	*port;
 		pr_err("%s: failed to register tty for port %d, err %ld\n",
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 817a26c..7530190 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -23,6 +23,7 @@
 #include <linux/list.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
@@ -101,11 +102,18 @@
 
 /* ------------------------------------------------------------------------- */
 
+static void usb_gadget_state_work(struct work_struct *work)
+{
+	struct usb_gadget	*gadget = work_to_gadget(work);
+
+	sysfs_notify(&gadget->dev.kobj, NULL, "status");
+}
+
 void usb_gadget_set_state(struct usb_gadget *gadget,
 		enum usb_device_state state)
 {
 	gadget->state = state;
-	sysfs_notify(&gadget->dev.kobj, NULL, "state");
+	schedule_work(&gadget->work);
 }
 EXPORT_SYMBOL_GPL(usb_gadget_set_state);
 
@@ -192,6 +200,7 @@
 		goto err1;
 
 	dev_set_name(&gadget->dev, "gadget");
+	INIT_WORK(&gadget->work, usb_gadget_state_work);
 	gadget->dev.parent = parent;
 
 	dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
@@ -309,6 +318,7 @@
 		usb_gadget_remove_driver(udc);
 
 	kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+	flush_work(&gadget->work);
 	device_unregister(&udc->dev);
 	device_unregister(&gadget->dev);
 }
@@ -335,7 +345,15 @@
 		driver->unbind(udc->gadget);
 		goto err1;
 	}
-	usb_gadget_connect(udc->gadget);
+	/*
+	 * HACK: The Android gadget driver disconnects the gadget
+	 * on bind and expects the gadget to stay disconnected until
+	 * it calls usb_gadget_connect when userspace is ready. Remove
+	 * the call to usb_gadget_connect bellow to avoid enabling the
+	 * pullup before userspace is ready.
+	 *
+	 * usb_gadget_connect(udc->gadget);
+	 */
 
 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
 	return 0;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index fe13156..5e29949 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -437,7 +437,7 @@
 
 static int __init ehci_pci_init(void)
 {
-	if (usb_disabled())
+	//if (usb_disabled())
 		return -ENODEV;
 
 	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 66c9058..1dbed1c 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -359,6 +359,9 @@
  */
 void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
 {
+#ifdef CONFIG_MIPS_MALTA
+	int timeout = 10;
+#endif
 	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
 	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
 	 */
@@ -372,9 +375,16 @@
 	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
 	mb();
 	udelay(5);
-	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
-		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
 
+#ifdef CONFIG_MIPS_MALTA
+	while (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET) {
+	        if (--timeout < 0) {
+			dev_warn(&pdev->dev, "HCRESET timed out!\n");
+			break;
+		}
+		udelay(5);
+	}
+#endif
 	/* Just to be safe, disable interrupt requests and
 	 * make sure the controller is stopped.
 	 */
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index da6f56d..6c55d88 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -71,7 +71,9 @@
 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
 {
 	struct urb_priv *urbp = urb->hcpriv;
-
+#ifdef CONFIG_MIPS_MALTA
+       return;
+#endif
 	if (!(urb->transfer_flags & URB_NO_FSBR))
 		urbp->fsbr = 1;
 }
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6e70ce9..37003c8 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -195,12 +195,23 @@
 };
 MODULE_ALIAS("platform:xhci-hcd");
 
+#ifdef CONFIG_USB_DWC3_HOST_INTEL
+#include "../dwc3/dwc3-host-intel.c"
+#endif
+
 int xhci_register_plat(void)
 {
+#ifdef CONFIG_USB_DWC3_HOST_INTEL
+	return platform_driver_register(&dwc3_xhci_driver);
+#endif
 	return platform_driver_register(&usb_xhci_driver);
 }
 
 void xhci_unregister_plat(void)
 {
+#ifdef CONFIG_USB_DWC3_HOST_INTEL
+	platform_driver_unregister(&dwc3_xhci_driver);
+	return;
+#endif
 	platform_driver_unregister(&usb_xhci_driver);
 }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 507677b..4090b55 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -321,6 +321,9 @@
 
 	xhci_free_irq(xhci);
 
+	if (xhci->quirks & XHCI_PLAT)
+		return;
+
 	if (xhci->msix_entries) {
 		pci_disable_msix(pdev);
 		kfree(xhci->msix_entries);
@@ -2635,7 +2638,7 @@
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
 	/* Wait for the configure endpoint command to complete */
-	timeleft = wait_for_completion_interruptible_timeout(
+	timeleft = wait_for_completion_timeout(
 			cmd_completion,
 			XHCI_CMD_DEFAULT_TIMEOUT);
 	if (timeleft <= 0) {
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 2311b1e..700d572 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -16,6 +16,14 @@
 	  If you're not sure if this applies to you, it probably doesn't;
 	  say N here.
 
+config USB_OTG_WAKELOCK
+	bool "Hold a wakelock when USB connected"
+	depends on WAKELOCK
+	select USB_OTG_UTILS
+	help
+	  Select this to automatically hold a wakelock when USB is
+	  connected, preventing suspend.
+
 if USB_PHY
 
 #
@@ -210,4 +218,13 @@
 	  Provides read/write operations to the ULPI phy register set for
 	  controllers with a viewport register (e.g. Chipidea/ARC controllers).
 
+config DUAL_ROLE_USB_INTF
+	bool "Generic DUAL ROLE sysfs interface"
+	depends on SYSFS && USB_PHY
+	help
+	  A generic sysfs interface to track and change the state of
+	  dual role usb phys. The usb phy drivers can register to
+	  this interface to expose it capabilities to the userspace
+	  and thereby allowing userspace to change the port mode.
+
 endif # USB_PHY
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index a9169cb..c70afde 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -5,7 +5,9 @@
 ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
 
 obj-$(CONFIG_USB_PHY)			+= phy.o
+obj-$(CONFIG_USB_OTG_WAKELOCK)		+= otg-wakelock.o
 
+obj-$(CONFIG_DUAL_ROLE_USB_INTF)	+= class-dual-role.o
 # transceiver drivers, keep the list sorted
 
 obj-$(CONFIG_AB8500_USB)		+= phy-ab8500-usb.o
diff --git a/drivers/usb/phy/class-dual-role.c b/drivers/usb/phy/class-dual-role.c
new file mode 100644
index 0000000..51fcb54
--- /dev/null
+++ b/drivers/usb/phy/class-dual-role.c
@@ -0,0 +1,529 @@
+/*
+ * class-dual-role.c
+ *
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/usb/class-dual-role.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#define DUAL_ROLE_NOTIFICATION_TIMEOUT 2000
+
+static ssize_t dual_role_store_property(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count);
+static ssize_t dual_role_show_property(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf);
+
+#define DUAL_ROLE_ATTR(_name)				\
+{							\
+	.attr = { .name = #_name },			\
+	.show = dual_role_show_property,		\
+	.store = dual_role_store_property,		\
+}
+
+static struct device_attribute dual_role_attrs[] = {
+	DUAL_ROLE_ATTR(supported_modes),
+	DUAL_ROLE_ATTR(mode),
+	DUAL_ROLE_ATTR(power_role),
+	DUAL_ROLE_ATTR(data_role),
+	DUAL_ROLE_ATTR(powers_vconn),
+};
+
+struct class *dual_role_class;
+EXPORT_SYMBOL_GPL(dual_role_class);
+
+static struct device_type dual_role_dev_type;
+
+static char *kstrdupcase(const char *str, gfp_t gfp, bool to_upper)
+{
+	char *ret, *ustr;
+
+	ustr = ret = kmalloc(strlen(str) + 1, gfp);
+
+	if (!ret)
+		return NULL;
+
+	while (*str)
+		*ustr++ = to_upper ? toupper(*str++) : tolower(*str++);
+
+	*ustr = 0;
+
+	return ret;
+}
+
+static void dual_role_changed_work(struct work_struct *work)
+{
+	struct dual_role_phy_instance *dual_role =
+	    container_of(work, struct dual_role_phy_instance,
+			 changed_work);
+
+	dev_dbg(&dual_role->dev, "%s\n", __func__);
+	kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE);
+}
+
+void dual_role_instance_changed(struct dual_role_phy_instance *dual_role)
+{
+	dev_dbg(&dual_role->dev, "%s\n", __func__);
+	pm_wakeup_event(&dual_role->dev, DUAL_ROLE_NOTIFICATION_TIMEOUT);
+	schedule_work(&dual_role->changed_work);
+}
+EXPORT_SYMBOL_GPL(dual_role_instance_changed);
+
+int dual_role_get_property(struct dual_role_phy_instance *dual_role,
+			   enum dual_role_property prop,
+			   unsigned int *val)
+{
+	return dual_role->desc->get_property(dual_role, prop, val);
+}
+EXPORT_SYMBOL_GPL(dual_role_get_property);
+
+int dual_role_set_property(struct dual_role_phy_instance *dual_role,
+			   enum dual_role_property prop,
+			   const unsigned int *val)
+{
+	if (!dual_role->desc->set_property)
+		return -ENODEV;
+
+	return dual_role->desc->set_property(dual_role, prop, val);
+}
+EXPORT_SYMBOL_GPL(dual_role_set_property);
+
+int dual_role_property_is_writeable(struct dual_role_phy_instance *dual_role,
+				    enum dual_role_property prop)
+{
+	if (!dual_role->desc->property_is_writeable)
+		return -ENODEV;
+
+	return dual_role->desc->property_is_writeable(dual_role, prop);
+}
+EXPORT_SYMBOL_GPL(dual_role_property_is_writeable);
+
+static void dual_role_dev_release(struct device *dev)
+{
+	struct dual_role_phy_instance *dual_role =
+	    container_of(dev, struct dual_role_phy_instance, dev);
+	pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+	kfree(dual_role);
+}
+
+static struct dual_role_phy_instance *__must_check
+__dual_role_register(struct device *parent,
+		     const struct dual_role_phy_desc *desc)
+{
+	struct device *dev;
+	struct dual_role_phy_instance *dual_role;
+	int rc;
+
+	dual_role = kzalloc(sizeof(*dual_role), GFP_KERNEL);
+	if (!dual_role)
+		return ERR_PTR(-ENOMEM);
+
+	dev = &dual_role->dev;
+
+	device_initialize(dev);
+
+	dev->class = dual_role_class;
+	dev->type = &dual_role_dev_type;
+	dev->parent = parent;
+	dev->release = dual_role_dev_release;
+	dev_set_drvdata(dev, dual_role);
+	dual_role->desc = desc;
+
+	rc = dev_set_name(dev, "%s", desc->name);
+	if (rc)
+		goto dev_set_name_failed;
+
+	INIT_WORK(&dual_role->changed_work, dual_role_changed_work);
+
+	rc = device_init_wakeup(dev, true);
+	if (rc)
+		goto wakeup_init_failed;
+
+	rc = device_add(dev);
+	if (rc)
+		goto device_add_failed;
+
+	dual_role_instance_changed(dual_role);
+
+	return dual_role;
+
+device_add_failed:
+	device_init_wakeup(dev, false);
+wakeup_init_failed:
+dev_set_name_failed:
+	put_device(dev);
+	kfree(dual_role);
+
+	return ERR_PTR(rc);
+}
+
+static void dual_role_instance_unregister(struct dual_role_phy_instance
+					  *dual_role)
+{
+	cancel_work_sync(&dual_role->changed_work);
+	device_init_wakeup(&dual_role->dev, false);
+	device_unregister(&dual_role->dev);
+}
+
+static void devm_dual_role_release(struct device *dev, void *res)
+{
+	struct dual_role_phy_instance **dual_role = res;
+
+	dual_role_instance_unregister(*dual_role);
+}
+
+struct dual_role_phy_instance *__must_check
+devm_dual_role_instance_register(struct device *parent,
+				 const struct dual_role_phy_desc *desc)
+{
+	struct dual_role_phy_instance **ptr, *dual_role;
+
+	ptr = devres_alloc(devm_dual_role_release, sizeof(*ptr), GFP_KERNEL);
+
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+	dual_role = __dual_role_register(parent, desc);
+	if (IS_ERR(dual_role)) {
+		devres_free(ptr);
+	} else {
+		*ptr = dual_role;
+		devres_add(parent, ptr);
+	}
+	return dual_role;
+}
+EXPORT_SYMBOL_GPL(devm_dual_role_instance_register);
+
+static int devm_dual_role_match(struct device *dev, void *res, void *data)
+{
+	struct dual_role_phy_instance **r = res;
+
+	if (WARN_ON(!r || !*r))
+		return 0;
+
+	return *r == data;
+}
+
+void devm_dual_role_instance_unregister(struct device *dev,
+					struct dual_role_phy_instance
+					*dual_role)
+{
+	int rc;
+
+	rc = devres_release(dev, devm_dual_role_release,
+			    devm_dual_role_match, dual_role);
+	WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_dual_role_instance_unregister);
+
+void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role)
+{
+	return dual_role->drv_data;
+}
+EXPORT_SYMBOL_GPL(dual_role_get_drvdata);
+
+/***************** Device attribute functions **************************/
+
+/* port type */
+static char *supported_modes_text[] = {
+	"ufp dfp", "dfp", "ufp"
+};
+
+/* current mode */
+static char *mode_text[] = {
+	"ufp", "dfp", "none"
+};
+
+/* Power role */
+static char *pr_text[] = {
+	"source", "sink", "none"
+};
+
+/* Data role */
+static char *dr_text[] = {
+	"host", "device", "none"
+};
+
+/* Vconn supply */
+static char *vconn_supply_text[] = {
+	"n", "y"
+};
+
+static ssize_t dual_role_show_property(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+	const ptrdiff_t off = attr - dual_role_attrs;
+	unsigned int value;
+
+	if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) {
+		value = dual_role->desc->supported_modes;
+	} else {
+		ret = dual_role_get_property(dual_role, off, &value);
+
+		if (ret < 0) {
+			if (ret == -ENODATA)
+				dev_dbg(dev,
+					"driver has no data for `%s' property\n",
+					attr->attr.name);
+			else if (ret != -ENODEV)
+				dev_err(dev,
+					"driver failed to report `%s' property: %zd\n",
+					attr->attr.name, ret);
+			return ret;
+		}
+	}
+
+	if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL !=
+			ARRAY_SIZE(supported_modes_text));
+		if (value < DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					supported_modes_text[value]);
+		else
+			return -EIO;
+	} else if (off == DUAL_ROLE_PROP_MODE) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_MODE_TOTAL !=
+			ARRAY_SIZE(mode_text));
+		if (value < DUAL_ROLE_PROP_MODE_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					mode_text[value]);
+		else
+			return -EIO;
+	} else if (off == DUAL_ROLE_PROP_PR) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_PR_TOTAL != ARRAY_SIZE(pr_text));
+		if (value < DUAL_ROLE_PROP_PR_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					pr_text[value]);
+		else
+			return -EIO;
+	} else if (off == DUAL_ROLE_PROP_DR) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_DR_TOTAL != ARRAY_SIZE(dr_text));
+		if (value < DUAL_ROLE_PROP_DR_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					dr_text[value]);
+		else
+			return -EIO;
+	} else if (off == DUAL_ROLE_PROP_VCONN_SUPPLY) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL !=
+				ARRAY_SIZE(vconn_supply_text));
+		if (value < DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					vconn_supply_text[value]);
+		else
+			return -EIO;
+	} else
+		return -EIO;
+}
+
+static ssize_t dual_role_store_property(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	ssize_t ret;
+	struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+	const ptrdiff_t off = attr - dual_role_attrs;
+	unsigned int value;
+	int total, i;
+	char *dup_buf, **text_array;
+	bool result = false;
+
+	dup_buf = kstrdupcase(buf, GFP_KERNEL, false);
+	switch (off) {
+	case DUAL_ROLE_PROP_MODE:
+		total = DUAL_ROLE_PROP_MODE_TOTAL;
+		text_array = mode_text;
+		break;
+	case DUAL_ROLE_PROP_PR:
+		total = DUAL_ROLE_PROP_PR_TOTAL;
+		text_array = pr_text;
+		break;
+	case DUAL_ROLE_PROP_DR:
+		total = DUAL_ROLE_PROP_DR_TOTAL;
+		text_array = dr_text;
+		break;
+	case DUAL_ROLE_PROP_VCONN_SUPPLY:
+		ret = strtobool(dup_buf, &result);
+		value = result;
+		if (!ret)
+			goto setprop;
+	default:
+		ret = -EINVAL;
+		goto error;
+	}
+
+	for (i = 0; i <= total; i++) {
+		if (i == total) {
+			ret = -ENOTSUPP;
+			goto error;
+		}
+		if (!strncmp(*(text_array + i), dup_buf,
+			     strlen(*(text_array + i)))) {
+			value = i;
+			break;
+		}
+	}
+
+setprop:
+	ret = dual_role->desc->set_property(dual_role, off, &value);
+
+error:
+	kfree(dup_buf);
+
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static umode_t dual_role_attr_is_visible(struct kobject *kobj,
+					 struct attribute *attr, int attrno)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+	umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+	int i;
+
+	if (attrno == DUAL_ROLE_PROP_SUPPORTED_MODES)
+		return mode;
+
+	for (i = 0; i < dual_role->desc->num_properties; i++) {
+		int property = dual_role->desc->properties[i];
+
+		if (property == attrno) {
+			if (dual_role->desc->property_is_writeable &&
+			    dual_role_property_is_writeable(dual_role, property)
+			    > 0)
+				mode |= S_IWUSR;
+
+			return mode;
+		}
+	}
+
+	return 0;
+}
+
+static struct attribute *__dual_role_attrs[ARRAY_SIZE(dual_role_attrs) + 1];
+
+static struct attribute_group dual_role_attr_group = {
+	.attrs = __dual_role_attrs,
+	.is_visible = dual_role_attr_is_visible,
+};
+
+static const struct attribute_group *dual_role_attr_groups[] = {
+	&dual_role_attr_group,
+	NULL,
+};
+
+void dual_role_init_attrs(struct device_type *dev_type)
+{
+	int i;
+
+	dev_type->groups = dual_role_attr_groups;
+
+	for (i = 0; i < ARRAY_SIZE(dual_role_attrs); i++)
+		__dual_role_attrs[i] = &dual_role_attrs[i].attr;
+}
+
+int dual_role_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+	int ret = 0, j;
+	char *prop_buf;
+	char *attrname;
+
+	dev_dbg(dev, "uevent\n");
+
+	if (!dual_role || !dual_role->desc) {
+		dev_dbg(dev, "No dual_role phy yet\n");
+		return ret;
+	}
+
+	dev_dbg(dev, "DUAL_ROLE_NAME=%s\n", dual_role->desc->name);
+
+	ret = add_uevent_var(env, "DUAL_ROLE_NAME=%s", dual_role->desc->name);
+	if (ret)
+		return ret;
+
+	prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!prop_buf)
+		return -ENOMEM;
+
+	for (j = 0; j < dual_role->desc->num_properties; j++) {
+		struct device_attribute *attr;
+		char *line;
+
+		attr = &dual_role_attrs[dual_role->desc->properties[j]];
+
+		ret = dual_role_show_property(dev, attr, prop_buf);
+		if (ret == -ENODEV || ret == -ENODATA) {
+			ret = 0;
+			continue;
+		}
+
+		if (ret < 0)
+			goto out;
+		line = strnchr(prop_buf, PAGE_SIZE, '\n');
+		if (line)
+			*line = 0;
+
+		attrname = kstrdupcase(attr->attr.name, GFP_KERNEL, true);
+		if (!attrname)
+			ret = -ENOMEM;
+
+		dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
+
+		ret = add_uevent_var(env, "DUAL_ROLE_%s=%s", attrname,
+				     prop_buf);
+		kfree(attrname);
+		if (ret)
+			goto out;
+	}
+
+out:
+	free_page((unsigned long)prop_buf);
+
+	return ret;
+}
+
+/******************* Module Init ***********************************/
+
+static int __init dual_role_class_init(void)
+{
+	dual_role_class = class_create(THIS_MODULE, "dual_role_usb");
+
+	if (IS_ERR(dual_role_class))
+		return PTR_ERR(dual_role_class);
+
+	dual_role_class->dev_uevent = dual_role_uevent;
+	dual_role_init_attrs(&dual_role_dev_type);
+
+	return 0;
+}
+
+static void __exit dual_role_class_exit(void)
+{
+	class_destroy(dual_role_class);
+}
+
+subsys_initcall(dual_role_class_init);
+module_exit(dual_role_class_exit);
diff --git a/drivers/usb/phy/otg-wakelock.c b/drivers/usb/phy/otg-wakelock.c
new file mode 100644
index 0000000..479376b
--- /dev/null
+++ b/drivers/usb/phy/otg-wakelock.c
@@ -0,0 +1,173 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+#define TEMPORARY_HOLD_TIME	2000
+
+static bool enabled = true;
+static struct usb_phy *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * held field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+	char name[40];
+	struct wake_lock wakelock;
+	bool held;
+};
+
+/*
+ * VBUS present lock.  Also used as a timed lock on charger
+ * connect/disconnect and USB host disconnect, to allow the system
+ * to react to the change in power.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_hold(struct otgwl_lock *lock)
+{
+	if (!lock->held) {
+		wake_lock(&lock->wakelock);
+		lock->held = true;
+	}
+}
+
+static void otgwl_temporary_hold(struct otgwl_lock *lock)
+{
+	wake_lock_timeout(&lock->wakelock,
+			  msecs_to_jiffies(TEMPORARY_HOLD_TIME));
+	lock->held = false;
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+	if (lock->held) {
+		wake_unlock(&lock->wakelock);
+		lock->held = false;
+	}
+}
+
+static void otgwl_handle_event(unsigned long event)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+	if (!enabled) {
+		otgwl_drop(&vbus_lock);
+		spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+		return;
+	}
+
+	switch (event) {
+	case USB_EVENT_VBUS:
+	case USB_EVENT_ENUMERATED:
+		otgwl_hold(&vbus_lock);
+		break;
+
+	case USB_EVENT_NONE:
+	case USB_EVENT_ID:
+	case USB_EVENT_CHARGER:
+		otgwl_temporary_hold(&vbus_lock);
+		break;
+
+	default:
+		break;
+	}
+
+	spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+				   unsigned long event, void *unused)
+{
+	otgwl_handle_event(event);
+	return NOTIFY_OK;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+	int rv = param_set_bool(val, kp);
+
+	if (rv)
+		return rv;
+
+	if (otgwl_xceiv)
+		otgwl_handle_event(otgwl_xceiv->last_event);
+
+	return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+	.set = set_enabled,
+	.get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+	int ret;
+	struct usb_phy *phy;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+
+	if (IS_ERR(phy)) {
+		pr_err("%s: No USB transceiver found\n", __func__);
+		return PTR_ERR(phy);
+	}
+	otgwl_xceiv = phy;
+
+	snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+		 dev_name(otgwl_xceiv->dev));
+	wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+		       vbus_lock.name);
+
+	otgwl_nb.notifier_call = otgwl_otg_notifications;
+	ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+	if (ret) {
+		pr_err("%s: usb_register_notifier on transceiver %s"
+		       " failed\n", __func__,
+		       dev_name(otgwl_xceiv->dev));
+		otgwl_xceiv = NULL;
+		wake_lock_destroy(&vbus_lock.wakelock);
+		return ret;
+	}
+
+	otgwl_handle_event(otgwl_xceiv->last_event);
+	return ret;
+}
+
+late_initcall(otg_wakelock_init);
diff --git a/drivers/usb/phy/penwell_otg.c b/drivers/usb/phy/penwell_otg.c
new file mode 100644
index 0000000..14b7d42
--- /dev/null
+++ b/drivers/usb/phy/penwell_otg.c
@@ -0,0 +1,5780 @@
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+/* This driver helps to switch Penwell OTG controller function between host
+ * and peripheral. It works with EHCI driver and Penwell client controller
+ * driver together.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/gpio.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/wakelock.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+#include "../core/usb.h"
+#include <linux/intel_mid_pm.h>
+
+#include <linux/usb/penwell_otg.h>
+
+#define	DRIVER_DESC		"Intel Penwell USB OTG transceiver driver"
+#define	DRIVER_VERSION		"0.8"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static const char driver_name[] = "penwell_otg";
+
+static void penwell_otg_remove(struct pci_dev *pdev);
+
+static int penwell_otg_set_host(struct usb_otg *otg, struct usb_bus *host);
+static int penwell_otg_set_peripheral(struct usb_otg *otg,
+				      struct usb_gadget *gadget);
+static int penwell_otg_start_srp(struct usb_otg *otg);
+static void penwell_otg_mon_bus(void);
+
+static int penwell_otg_msic_write(u16 addr, u8 data);
+
+static void penwell_otg_phy_low_power(int on);
+static int penwell_otg_ulpi_read(struct intel_mid_otg_xceiv *iotg,
+				u8 reg, u8 *val);
+static int penwell_otg_ulpi_write(struct intel_mid_otg_xceiv *iotg,
+				u8 reg, u8 val);
+static void penwell_spi_reset_phy(void);
+static int penwell_otg_charger_hwdet(bool enable);
+static void update_hsm(void);
+static void set_client_mode(void);
+
+#ifdef CONFIG_DEBUG_FS
+unsigned int *pm_sss0_base;
+
+int check_pm_otg(void)
+{
+	/* check whether bit 12 and 13 are 0 */
+	/* printk(">>>>leon, pm_sss0_base:0x%x\n", *(pm_sss0_base)); */
+	if (pm_sss0_base)
+		return (*pm_sss0_base) & 0x3000;
+	else
+		return 0;
+}
+#ifdef readl
+#undef readl
+#endif
+#ifdef writel
+#undef writel
+#endif
+#define readl(addr) ({ if (check_pm_otg()) { \
+	panic("usb otg, read reg:%p, pm_sss0_base:0x%x",  \
+	addr, *(pm_sss0_base)); }; __le32_to_cpu(__raw_readl(addr)); })
+#define writel(b, addr) ({ if (check_pm_otg()) { \
+	panic("usb otg, write reg:%p, pm_sss0_base:0x%x",  \
+	addr, *(pm_sss0_base)); }; __raw_writel(__cpu_to_le32(b), addr); })
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+#include <linux/suspend.h>
+DECLARE_WAIT_QUEUE_HEAD(stop_host_wait);
+atomic_t pnw_sys_suspended;
+
+static int pnw_sleep_pm_callback(struct notifier_block *nfb,
+			unsigned long action, void *ignored)
+{
+	switch (action) {
+	case PM_SUSPEND_PREPARE:
+		atomic_set(&pnw_sys_suspended, 1);
+		return NOTIFY_OK;
+	case PM_POST_SUSPEND:
+		atomic_set(&pnw_sys_suspended, 0);
+		return NOTIFY_OK;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block pnw_sleep_pm_notifier = {
+	.notifier_call = pnw_sleep_pm_callback,
+	.priority = 0
+};
+
+/* the root hub will call this callback when device added/removed */
+static int otg_notify(struct notifier_block *nb, unsigned long action,
+		struct usb_device *udev)
+{
+	struct usb_phy			*otg;
+	struct intel_mid_otg_xceiv	*iotg;
+
+	/* skip bus add/remove notification, else access udev->parent could
+	 * panic if bus register and unregister quickly(setup failed). And we
+	 * do not care bus event.
+	 */
+	if (action == USB_BUS_ADD || action == USB_BUS_REMOVE)
+		return NOTIFY_DONE;
+
+	/* Ignore root hub add/remove event */
+	if (!udev->parent) {
+		pr_debug("%s Ignore root hub otg_notify\n", __func__);
+		return NOTIFY_DONE;
+	}
+
+	/* Ignore USB devices on external hub */
+	if (udev->parent && udev->parent->parent) {
+		pr_debug("%s Ignore USB devices on external hub\n", __func__);
+		return NOTIFY_DONE;
+	}
+
+	otg = usb_get_phy(USB_PHY_TYPE_USB2);
+	if (otg == NULL) {
+		pr_err("%s: failed to get otg transceiver\n", __func__);
+		return NOTIFY_BAD;
+	}
+	iotg = otg_to_mid_xceiv(otg);
+
+	switch (action) {
+	case USB_DEVICE_ADD:
+		pr_debug("Notify OTG HNP add device\n");
+		atomic_notifier_call_chain(&iotg->iotg_notifier,
+					MID_OTG_NOTIFY_CONNECT, iotg);
+		break;
+	case USB_DEVICE_REMOVE:
+		pr_debug("Notify OTG HNP delete device\n");
+		atomic_notifier_call_chain(&iotg->iotg_notifier,
+					MID_OTG_NOTIFY_DISCONN, iotg);
+		break;
+	case USB_OTG_TESTDEV:
+		pr_debug("Notify OTG test device\n");
+		atomic_notifier_call_chain(&iotg->iotg_notifier,
+					MID_OTG_NOTIFY_TEST, iotg);
+		break;
+	case USB_OTG_TESTDEV_VBUSOFF:
+		pr_debug("Notify OTG test device, Vbusoff mode\n");
+		atomic_notifier_call_chain(&iotg->iotg_notifier,
+					MID_OTG_NOTIFY_TEST_VBUS_OFF, iotg);
+		break;
+	default:
+		usb_put_phy(otg);
+		return NOTIFY_DONE;
+	}
+	usb_put_phy(otg);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block otg_nb = {
+	.notifier_call = otg_notify,
+};
+
+#define PNW_PM_RESUME_WAIT(a) do { \
+		while (atomic_read(&pnw_sys_suspended)) { \
+			wait_event_timeout(a, false, HZ/100); \
+		} \
+	} while (0)
+#else
+
+#define PNW_PM_RESUME_WAIT(a)
+
+#endif
+
+#define PNW_STOP_HOST(pnw) do { \
+	if ((pnw)->iotg.stop_host) { \
+		PNW_PM_RESUME_WAIT(stop_host_wait); \
+		(pnw)->iotg.stop_host(&(pnw)->iotg); \
+	} \
+	} while (0)
+
+inline int is_clovertrail(struct pci_dev *pdev)
+{
+	return (pdev->vendor == 0x8086 && pdev->device == 0xE006);
+}
+EXPORT_SYMBOL_GPL(is_clovertrail);
+
+static const char *state_string(enum usb_otg_state state)
+{
+	switch (state) {
+	case OTG_STATE_A_IDLE:
+		return "a_idle";
+	case OTG_STATE_A_WAIT_VRISE:
+		return "a_wait_vrise";
+	case OTG_STATE_A_WAIT_BCON:
+		return "a_wait_bcon";
+	case OTG_STATE_A_HOST:
+		return "a_host";
+	case OTG_STATE_A_SUSPEND:
+		return "a_suspend";
+	case OTG_STATE_A_PERIPHERAL:
+		return "a_peripheral";
+	case OTG_STATE_A_WAIT_VFALL:
+		return "a_wait_vfall";
+	case OTG_STATE_A_VBUS_ERR:
+		return "a_vbus_err";
+	case OTG_STATE_B_IDLE:
+		return "b_idle";
+	case OTG_STATE_B_PERIPHERAL:
+		return "b_peripheral";
+	case OTG_STATE_B_WAIT_ACON:
+		return "b_wait_acon";
+	case OTG_STATE_B_HOST:
+		return "b_host";
+	default:
+		return "UNDEFINED";
+	}
+}
+
+static const char *charger_string(enum usb_charger_type charger)
+{
+	switch (charger) {
+	case CHRG_SDP:
+		return "Standard Downstream Port";
+	case CHRG_SDP_INVAL:
+		return "Invalid Standard Downstream Port";
+	case CHRG_CDP:
+		return "Charging Downstream Port";
+	case CHRG_DCP:
+		return "Dedicated Charging Port";
+	case CHRG_ACA:
+		return "Accessory Charger Adaptor";
+	case CHRG_SE1:
+		return "SE1 Charger";
+	case CHRG_UNKNOWN:
+		return "Unknown";
+	default:
+		return "Undefined";
+	}
+}
+
+static const char *psc_string(enum power_supply_charger_cable_type charger)
+{
+	switch (charger) {
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+		return "Standard Downstream Port";
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+		return "Charging Downstream Port";
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+		return "Dedicated Charging Port";
+	case POWER_SUPPLY_CHARGER_TYPE_USB_ACA:
+		return "Accessory Charger Adaptor";
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK:
+		return "Accessory Charger Adaptor Dock";
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_A:
+		return "Accessory Charger Adaptor Type A";
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_B:
+		return "Accessory Charger Adaptor Type B";
+	case POWER_SUPPLY_CHARGER_TYPE_ACA_C:
+		return "Accessory Charger Adaptor Type C";
+	case POWER_SUPPLY_CHARGER_TYPE_SE1:
+		return "SE1 Charger";
+	case POWER_SUPPLY_CHARGER_TYPE_NONE:
+		return "Unknown";
+	default:
+		return "Undefined";
+	}
+}
+
+
+static struct penwell_otg *the_transceiver;
+
+void penwell_update_transceiver(void)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	unsigned long	flags;
+
+
+	if (!pnw->qwork) {
+		dev_warn(pnw->dev, "no workqueue for state machine\n");
+		return;
+	}
+
+	spin_lock_irqsave(&pnw->lock, flags);
+	if (!pnw->queue_stop) {
+		queue_work(pnw->qwork, &pnw->work);
+		dev_dbg(pnw->dev, "transceiver is updated\n");
+	}
+	spin_unlock_irqrestore(&pnw->lock, flags);
+}
+
+static int penwell_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+	otg->host = host;
+
+	return 0;
+}
+
+static int penwell_otg_set_peripheral(struct usb_otg *otg,
+				      struct usb_gadget *gadget)
+{
+	otg->gadget = gadget;
+
+	return 0;
+}
+
+static void penwell_otg_set_charger(enum usb_charger_type charger)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+
+	dev_dbg(pnw->dev, "%s ---> %s\n", __func__,
+			charger_string(charger));
+
+	switch (charger) {
+	case CHRG_SDP:
+	case CHRG_DCP:
+	case CHRG_CDP:
+	case CHRG_ACA:
+	case CHRG_SDP_INVAL:
+	case CHRG_SE1:
+	case CHRG_UNKNOWN:
+		pnw->charging_cap.chrg_type = charger;
+		break;
+	default:
+		dev_warn(pnw->dev, "undefined charger type\n");
+		break;
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static void _penwell_otg_update_chrg_cap(enum usb_charger_type charger,
+				unsigned ma)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	int			flag = 0;
+	int			event, retval;
+
+	dev_dbg(pnw->dev, "%s = %s, %d --->\n", __func__,
+			charger_string(charger), ma);
+
+	/* Check charger type information */
+	if (pnw->charging_cap.chrg_type != charger) {
+		if (pnw->charging_cap.chrg_type == CHRG_UNKNOWN ||
+			charger == CHRG_UNKNOWN) {
+			penwell_otg_set_charger(charger);
+		} else if (pnw->charging_cap.chrg_type == CHRG_SDP &&
+			charger == CHRG_SDP_INVAL) {
+			penwell_otg_set_charger(charger);
+		} else
+			return;
+	} else {
+		/* Do nothing if no update for current */
+		if (pnw->charging_cap.ma == ma)
+			return;
+	}
+
+	/* set current */
+	switch (pnw->charging_cap.chrg_type) {
+	case CHRG_SDP:
+		if (pnw->charging_cap.ma == CHRG_CURR_DISCONN
+				&& (ma == CHRG_CURR_SDP_LOW
+				|| ma == CHRG_CURR_SDP_HIGH)) {
+			/* SDP event: charger connect */
+			event = USBCHRG_EVENT_CONNECT;
+			flag = 1;
+		} else if (pnw->charging_cap.ma == CHRG_CURR_SDP_LOW
+				&& ma == CHRG_CURR_SDP_HIGH) {
+			/* SDP event: configuration update */
+			event = USBCHRG_EVENT_UPDATE;
+			flag = 1;
+		} else if (pnw->charging_cap.ma == CHRG_CURR_SDP_HIGH
+				&& ma == CHRG_CURR_SDP_LOW) {
+			/* SDP event: configuration update */
+			event = USBCHRG_EVENT_UPDATE;
+			flag = 1;
+		} else if (pnw->charging_cap.ma == CHRG_CURR_SDP_SUSP
+				&& (ma == CHRG_CURR_SDP_LOW
+				|| ma == CHRG_CURR_SDP_HIGH)) {
+			/* SDP event: resume from suspend state */
+			event = USBCHRG_EVENT_RESUME;
+			flag = 1;
+		} else if ((pnw->charging_cap.ma == CHRG_CURR_SDP_LOW
+			|| pnw->charging_cap.ma == CHRG_CURR_SDP_HIGH)
+				&& ma == CHRG_CURR_SDP_SUSP) {
+			/* SDP event: enter suspend state */
+			event = USBCHRG_EVENT_SUSPEND;
+			flag = 1;
+		} else if (ma == 0) {
+			event = USBCHRG_EVENT_DISCONN;
+			flag = 1;
+		} else
+			dev_dbg(pnw->dev, "SDP: no need to update EM\n");
+		break;
+	case CHRG_DCP:
+		if (ma == CHRG_CURR_DCP) {
+			/* DCP event: charger connect */
+			event = USBCHRG_EVENT_CONNECT;
+			flag = 1;
+		} else
+			dev_dbg(pnw->dev, "DCP: no need to update EM\n");
+		break;
+	case CHRG_SE1:
+		if (ma == CHRG_CURR_SE1) {
+			/* SE1 event: charger connect */
+			event = USBCHRG_EVENT_CONNECT;
+			flag = 1;
+		} else
+			dev_dbg(pnw->dev, "SE1: no need to update EM\n");
+		break;
+	case CHRG_CDP:
+		if (pnw->charging_cap.ma == CHRG_CURR_DISCONN
+				&& ma == CHRG_CURR_CDP) {
+			/* CDP event: charger connect */
+			event = USBCHRG_EVENT_CONNECT;
+			flag = 1;
+		} else
+			dev_dbg(pnw->dev, "CDP: no need to update EM\n");
+		break;
+	case CHRG_UNKNOWN:
+		if (ma == CHRG_CURR_DISCONN) {
+			/* event: chargers disconnect */
+			event = USBCHRG_EVENT_DISCONN;
+			flag = 1;
+		} else
+			dev_dbg(pnw->dev, "UNKNOWN: no need to update EM\n");
+		break;
+	case CHRG_SDP_INVAL:
+		if (ma == CHRG_CURR_SDP_INVAL) {
+			event = USBCHRG_EVENT_UPDATE;
+			flag = 1;
+		} else
+			dev_dbg(pnw->dev, "SDP_INVAL: no need to update EM\n");
+	default:
+		break;
+	}
+
+	if (flag) {
+		pnw->charging_cap.ma = ma;
+		pnw->charging_cap.current_event = event;
+
+		/* Notify EM the charging current update */
+		dev_dbg(pnw->dev, "Notify EM charging capability change\n");
+		dev_dbg(pnw->dev, "%s event = %d ma = %d\n",
+			charger_string(pnw->charging_cap.chrg_type), event, ma);
+
+		if (pnw->bc_callback) {
+			retval = pnw->bc_callback(pnw->bc_arg, event,
+					&pnw->charging_cap);
+			if (retval)
+				dev_dbg(pnw->dev,
+					"bc callback return %d\n", retval);
+		}
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static enum power_supply_charger_cable_type usb_chrg_to_power_supply_chrg(
+				enum usb_charger_type chrg_type)
+{
+	switch (chrg_type) {
+	case CHRG_UNKNOWN: return POWER_SUPPLY_CHARGER_TYPE_NONE;
+	case CHRG_SDP: return POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+	case CHRG_CDP: return POWER_SUPPLY_CHARGER_TYPE_USB_CDP;
+	case CHRG_SDP_INVAL: return POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+	case CHRG_DCP: return POWER_SUPPLY_CHARGER_TYPE_USB_DCP;
+	case CHRG_ACA: return POWER_SUPPLY_CHARGER_TYPE_USB_ACA;
+	case CHRG_ACA_DOCK: return POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK;
+	case CHRG_ACA_A: return POWER_SUPPLY_CHARGER_TYPE_ACA_A;
+	case CHRG_ACA_B: return POWER_SUPPLY_CHARGER_TYPE_ACA_B;
+	case CHRG_ACA_C: return POWER_SUPPLY_CHARGER_TYPE_ACA_C;
+	case CHRG_SE1: return POWER_SUPPLY_CHARGER_TYPE_SE1;
+	case CHRG_MHL: return POWER_SUPPLY_CHARGER_TYPE_MHL;
+	default: return POWER_SUPPLY_CHARGER_TYPE_NONE;
+	}
+}
+
+static enum power_supply_charger_event check_psc_event(
+			struct power_supply_cable_props old,
+			struct power_supply_cable_props new)
+{
+	struct penwell_otg *pnw = the_transceiver;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	/* Check charger type information */
+	if (old.chrg_type != new.chrg_type) {
+		if (old.chrg_type == POWER_SUPPLY_CHARGER_TYPE_NONE
+					&& new.ma != 0)
+			return POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+		else if (new.chrg_type == POWER_SUPPLY_CHARGER_TYPE_NONE)
+			return POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+		else {
+			dev_dbg(pnw->dev, "not a valid event\n");
+			return -1;
+		}
+	}
+
+	/* Check the charging current limit */
+	if (old.ma == new.ma) {
+		dev_dbg(pnw->dev, "not a valid event\n");
+		return -1;
+	}
+
+	switch (new.chrg_type) {
+	case POWER_SUPPLY_CHARGER_TYPE_USB_SDP:
+		if (old.ma == CHRG_CURR_DISCONN &&
+		   (new.ma == CHRG_CURR_SDP_LOW ||
+		    new.ma == CHRG_CURR_SDP_HIGH)) {
+			/* SDP event: charger connect */
+			return POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+		} else if (old.ma == CHRG_CURR_SDP_LOW &&
+			  new.ma == CHRG_CURR_SDP_HIGH) {
+			/* SDP event: configuration update */
+			return POWER_SUPPLY_CHARGER_EVENT_UPDATE;
+		} else if (old.ma == CHRG_CURR_SDP_HIGH &&
+			  new.ma == CHRG_CURR_SDP_LOW) {
+			/* SDP event: configuration update */
+			return POWER_SUPPLY_CHARGER_EVENT_UPDATE;
+		} else if (old.ma == CHRG_CURR_SDP_SUSP &&
+			  (new.ma == CHRG_CURR_SDP_LOW ||
+			   new.ma == CHRG_CURR_SDP_HIGH)) {
+			/* SDP event: resume from suspend state */
+			return POWER_SUPPLY_CHARGER_EVENT_RESUME;
+		} else if ((old.ma == CHRG_CURR_SDP_LOW ||
+			   old.ma == CHRG_CURR_SDP_HIGH) &&
+			  new.ma == CHRG_CURR_SDP_SUSP) {
+			/* SDP event: enter suspend state */
+			return POWER_SUPPLY_CHARGER_EVENT_SUSPEND;
+		} else
+			dev_dbg(pnw->dev, "SDP: no need to update EM\n");
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_DCP:
+		if (new.ma == CHRG_CURR_DCP) {
+			/* DCP event: charger connect */
+			return POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+		} else
+			dev_dbg(pnw->dev, "DCP: no need to update EM\n");
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_USB_CDP:
+		if (pnw->charging_cap.ma == CHRG_CURR_DISCONN &&
+		   new.ma == CHRG_CURR_CDP) {
+			/* CDP event: charger connect */
+			return POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+		} else
+			dev_dbg(pnw->dev, "CDP: no need to update EM\n");
+		break;
+	case POWER_SUPPLY_CHARGER_TYPE_NONE:
+		if (new.ma == CHRG_CURR_DISCONN) {
+			/* event: chargers disconnect */
+			return POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+		} else
+			dev_dbg(pnw->dev, "UNKNOWN: no need to update EM\n");
+		break;
+	default:
+		break;
+	}
+
+	return -1;
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static void penwell_otg_update_chrg_cap(enum usb_charger_type charger,
+				unsigned ma)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct pci_dev			*pdev;
+	unsigned long			flags;
+	struct otg_bc_event		*event;
+
+	pdev = to_pci_dev(pnw->dev);
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	if (!is_clovertrail(pdev)) {
+		spin_lock_irqsave(&pnw->charger_lock, flags);
+		_penwell_otg_update_chrg_cap(charger, ma);
+		spin_unlock_irqrestore(&pnw->charger_lock, flags);
+	} else {
+		dev_dbg(pnw->dev, "clv cable_props_update\n");
+
+		event = kzalloc(sizeof(*event), GFP_ATOMIC);
+		if (!event) {
+			dev_err(pnw->dev, "no memory for charging event");
+			return;
+		}
+
+		event->cap.chrg_type = usb_chrg_to_power_supply_chrg(charger);
+		event->cap.ma = ma;
+		INIT_LIST_HEAD(&event->node);
+
+		spin_lock_irqsave(&pnw->charger_lock, flags);
+		list_add_tail(&event->node, &pnw->chrg_evt_queue);
+		spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+		queue_work(pnw->chrg_qwork, &pnw->psc_notify);
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static int penwell_otg_set_power(struct usb_phy *otg, unsigned ma)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	unsigned long			flags;
+	struct pci_dev			*pdev;
+	struct otg_bc_event		*event;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	pdev = to_pci_dev(pnw->dev);
+
+	if (!is_clovertrail(pdev)) {
+		spin_lock_irqsave(&pnw->charger_lock, flags);
+
+		if (pnw->charging_cap.chrg_type != CHRG_SDP) {
+			spin_unlock_irqrestore(&pnw->charger_lock, flags);
+			return 0;
+		}
+
+		if (!pnw->otg_pdata->charging_compliance)
+			ma = CHRG_CURR_SDP_HIGH;
+
+		_penwell_otg_update_chrg_cap(CHRG_SDP, ma);
+
+		spin_unlock_irqrestore(&pnw->charger_lock, flags);
+	} else {
+		dev_dbg(pnw->dev, "clv charger_set_power\n");
+
+		if (pnw->psc_cap.chrg_type != POWER_SUPPLY_CHARGER_TYPE_USB_SDP)
+			return 0;
+
+		if (!pnw->otg_pdata->charging_compliance)
+			ma = CHRG_CURR_SDP_HIGH;
+
+		event = kzalloc(sizeof(*event), GFP_ATOMIC);
+		if (!event) {
+			dev_err(pnw->dev, "no memory for charging event");
+			return -ENOMEM;
+		}
+
+		event->cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_USB_SDP;
+		event->cap.ma = ma;
+		INIT_LIST_HEAD(&event->node);
+
+		spin_lock_irqsave(&pnw->charger_lock, flags);
+		list_add_tail(&event->node, &pnw->chrg_evt_queue);
+		spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+		queue_work(pnw->chrg_qwork, &pnw->psc_notify);
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+	return 0;
+}
+
+int penwell_otg_get_chrg_status(struct usb_phy *x, void *data)
+{
+	unsigned long flags;
+	struct power_supply_cable_props *cap =
+		(struct power_supply_cable_props *)data;
+	struct penwell_otg *pnw = the_transceiver;
+
+	if (pnw == NULL)
+		return -ENODEV;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	if (data == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(&pnw->cap_lock, flags);
+	cap->chrg_evt = pnw->psc_cap.chrg_evt;
+	cap->chrg_type = pnw->psc_cap.chrg_type;
+	cap->ma = pnw->psc_cap.ma;
+	spin_unlock_irqrestore(&pnw->cap_lock, flags);
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+	return 0;
+}
+
+int penwell_otg_query_charging_cap(struct otg_bc_cap *cap)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	unsigned long		flags;
+
+	if (pnw == NULL)
+		return -ENODEV;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	if (cap == NULL)
+		return -EINVAL;
+
+	if (is_clovertrail(to_pci_dev(pnw->dev)))
+		return -ENODEV;
+
+	spin_lock_irqsave(&pnw->charger_lock, flags);
+	cap->chrg_type = pnw->charging_cap.chrg_type;
+	cap->ma = pnw->charging_cap.ma;
+	cap->current_event = pnw->charging_cap.current_event;
+	spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(penwell_otg_query_charging_cap);
+
+/* Register/unregister battery driver callback */
+void *penwell_otg_register_bc_callback(
+	int (*cb)(void *, int, struct otg_bc_cap *), void *arg)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	unsigned long		flags;
+
+	if (pnw == NULL)
+		return pnw;
+
+	if (is_clovertrail(to_pci_dev(pnw->dev)))
+		return NULL;
+
+	spin_lock_irqsave(&pnw->charger_lock, flags);
+
+	if (pnw->bc_callback != NULL)
+		dev_dbg(pnw->dev, "callback has already registered\n");
+
+	pnw->bc_callback = cb;
+	pnw->bc_arg = arg;
+	spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+	return pnw;
+}
+EXPORT_SYMBOL_GPL(penwell_otg_register_bc_callback);
+
+int penwell_otg_unregister_bc_callback(void *handler)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	unsigned long		flags;
+
+	if (pnw == NULL)
+		return -ENODEV;
+
+	if (pnw != handler)
+		return -EINVAL;
+
+	if (is_clovertrail(to_pci_dev(pnw->dev)))
+		return -ENODEV;
+
+	spin_lock_irqsave(&pnw->charger_lock, flags);
+	pnw->bc_callback = NULL;
+	pnw->bc_arg = NULL;
+	spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(penwell_otg_unregister_bc_callback);
+
+/* After probe, it should enable the power of USB PHY */
+static void penwell_otg_phy_enable(int on)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u8			data;
+
+	dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+	data = on ? 0x37 : 0x24;
+
+	mutex_lock(&pnw->msic_mutex);
+
+	if (penwell_otg_msic_write(MSIC_VUSB330CNT, data)) {
+		mutex_unlock(&pnw->msic_mutex);
+		dev_err(pnw->dev, "Fail to enable PHY power\n");
+		return;
+	}
+
+	mutex_unlock(&pnw->msic_mutex);
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+/* A-device drives vbus, controlled through MSIC register */
+static int penwell_otg_set_vbus(struct usb_otg *otg, bool enabled)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	u8				data;
+	unsigned long			flags;
+	int				retval = 0;
+	struct otg_bc_event		*evt;
+
+	dev_dbg(pnw->dev, "%s ---> %s\n", __func__, enabled ? "on" : "off");
+
+	/*
+	 * For Clovertrail, VBUS is driven by TPS2052 power switch chip.
+	 * But TPS2052 is controlled by ULPI PHY.
+	 */
+	if (is_clovertrail(to_pci_dev(pnw->dev))) {
+		penwell_otg_phy_low_power(0);
+		if (enabled)
+			penwell_otg_ulpi_write(&pnw->iotg,
+				ULPI_OTGCTRLSET, DRVVBUS | DRVVBUS_EXTERNAL);
+		else
+			penwell_otg_ulpi_write(&pnw->iotg,
+				ULPI_OTGCTRLCLR, DRVVBUS | DRVVBUS_EXTERNAL);
+
+		evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+		if (!evt) {
+			dev_err(pnw->dev, "no memory for charging event");
+			return -ENOMEM;
+		}
+
+		evt->cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+		INIT_LIST_HEAD(&evt->node);
+
+		if ((!enabled) && (pnw->iotg.hsm.id == ID_ACA_A
+				|| pnw->iotg.hsm.id == ID_ACA_B
+				|| pnw->iotg.hsm.id == ID_ACA_C)) {
+			evt->cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_USB_ACA;
+			evt->cap.ma = CHRG_CURR_ACA;
+			evt->cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_CONNECT;
+		} else {
+			dev_info(pnw->dev, "notification: turn %s VBUS\n",
+						enabled ? "ON" : "OFF");
+			atomic_notifier_call_chain(&pnw->iotg.otg.notifier,
+				USB_EVENT_DRIVE_VBUS, &enabled);
+			kfree(evt);
+			goto done;
+		}
+
+		dev_dbg(pnw->dev, "set_vbus ma = %d, event = %d, type = %s\n",
+				evt->cap.ma, evt->cap.chrg_evt,
+				psc_string(evt->cap.chrg_type));
+
+		spin_lock_irqsave(&pnw->charger_lock, flags);
+		list_add_tail(&evt->node, &pnw->chrg_evt_queue);
+		spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+		queue_work(pnw->chrg_qwork, &pnw->psc_notify);
+
+		goto done;
+	}
+
+	if (pnw->otg_pdata->gpio_vbus) {
+		dev_info(pnw->dev, "Turn %s VBUS using GPIO pin %d\n",
+			enabled ? "on" : "off", pnw->otg_pdata->gpio_vbus);
+		gpio_direction_output(pnw->otg_pdata->gpio_vbus,
+							enabled ? 1 : 0);
+		goto done;
+	}
+
+	data = enabled ? VOTGEN : 0;
+
+	mutex_lock(&pnw->msic_mutex);
+
+	retval = intel_scu_ipc_update_register(MSIC_VOTGCNT, data, VOTGEN);
+
+	if (retval)
+		dev_err(pnw->dev, "Fail to set power on OTG Port\n");
+
+	mutex_unlock(&pnw->msic_mutex);
+
+done:
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+	return retval;
+}
+
+static int penwell_otg_ulpi_run(void)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u32			val;
+
+	val = readl(pnw->iotg.base + CI_ULPIVP);
+
+	if (val & ULPI_RUN)
+		return 1;
+
+	dev_dbg(pnw->dev, "%s: ULPI command done\n", __func__);
+	return 0;
+}
+
+/* io_ops to access ulpi registers */
+static int
+penwell_otg_ulpi_read(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u32			val32 = 0;
+	int			count;
+	unsigned long		flags;
+
+	dev_dbg(pnw->dev, "%s - addr 0x%x\n", __func__, reg);
+
+	spin_lock_irqsave(&pnw->lock, flags);
+
+	/* Port = 0 */
+	val32 = ULPI_RUN | reg << 16;
+	writel(val32, pnw->iotg.base + CI_ULPIVP);
+
+	/* Polling at least 2ms for read operation to complete*/
+	count = 400;
+
+	while (count) {
+		val32 = readl(pnw->iotg.base + CI_ULPIVP);
+		if (val32 & ULPI_RUN) {
+			count--;
+			udelay(5);
+		} else {
+			*val = (u8)((val32 & ULPI_DATRD) >> 8);
+			dev_dbg(pnw->dev,
+				"%s - done data 0x%x\n", __func__, *val);
+			spin_unlock_irqrestore(&pnw->lock, flags);
+			return 0;
+		}
+	}
+
+	dev_warn(pnw->dev, "%s - addr 0x%x timeout\n", __func__, reg);
+
+	spin_unlock_irqrestore(&pnw->lock, flags);
+	return -ETIMEDOUT;
+
+}
+
+static int
+penwell_otg_ulpi_write(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u32			val32 = 0;
+	int			count;
+	unsigned long		flags;
+
+	dev_dbg(pnw->dev,
+		"%s - addr 0x%x - data 0x%x\n", __func__, reg, val);
+
+	spin_lock_irqsave(&pnw->lock, flags);
+
+	/* Port = 0 */
+	val32 = ULPI_RUN | ULPI_RW | reg << 16 | val;
+	writel(val32, pnw->iotg.base + CI_ULPIVP);
+
+	/* Polling at least 2ms for write operation to complete*/
+	count = 400;
+
+	while (count && penwell_otg_ulpi_run()) {
+		count--;
+		udelay(5);
+	}
+
+	dev_dbg(pnw->dev, "%s - addr 0x%x %s\n", __func__, reg,
+			count ? "complete" : "timeout");
+
+	spin_unlock_irqrestore(&pnw->lock, flags);
+	return count ? 0 : -ETIMEDOUT;
+}
+
+int pnw_otg_ulpi_write(u8 reg, u8 val)
+{
+	return penwell_otg_ulpi_write(&the_transceiver->iotg,
+					reg, val);
+}
+EXPORT_SYMBOL_GPL(pnw_otg_ulpi_write);
+
+static enum msic_vendor penwell_otg_check_msic(void)
+{
+	/* Return MSIC_VD_TI directly */
+	return MSIC_VD_TI;
+}
+
+/* Monitor function check if SRP initial conditions. Use polling on current
+ * status for b_ssend_srp, b_se0_srp */
+static void penwell_otg_mon_bus(void)
+{
+	struct penwell_otg *pnw = the_transceiver;
+	int count = 6;
+	int interval = 300; /* ms */
+	u32 val = 0;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+	pnw->iotg.hsm.b_ssend_srp = 0;
+	pnw->iotg.hsm.b_se0_srp = 0;
+
+	while (count) {
+		msleep(interval);
+
+		/* Check VBus status */
+		val = readl(pnw->iotg.base + CI_OTGSC);
+		if (!(val & OTGSC_BSE))
+			return;
+
+		val = readl(pnw->iotg.base + CI_PORTSC1);
+		if (val & PORTSC_LS)
+			return;
+
+		count--;
+	}
+
+	pnw->iotg.hsm.b_ssend_srp = 1;
+	pnw->iotg.hsm.b_se0_srp = 1;
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+/* HNP polling function */
+/* The timeout callback function which polls the host request flag for HNP */
+static void penwell_otg_hnp_poll_fn(unsigned long indicator)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+
+	queue_work(pnw->qwork, &pnw->hnp_poll_work);
+}
+
+/* Start HNP polling */
+/* Call this function with iotg->hnp_poll_lock held */
+static int penwell_otg_add_hnp_poll_timer(struct intel_mid_otg_xceiv *iotg,
+					unsigned long delay)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	unsigned long		j = jiffies;
+
+	pnw->hnp_poll_timer.data = 1;
+	pnw->hnp_poll_timer.function = penwell_otg_hnp_poll_fn;
+	pnw->hnp_poll_timer.expires = j + msecs_to_jiffies(delay);
+
+	add_timer(&pnw->hnp_poll_timer);
+
+	return 0;
+}
+
+static int penwell_otg_start_hnp_poll(struct intel_mid_otg_xceiv *iotg)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&pnw->iotg.hnp_poll_lock, flags);
+
+	if (pnw->iotg.hsm.hnp_poll_enable) {
+		spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+		dev_dbg(pnw->dev, "HNP polling is already enabled\n");
+		return 0;
+	}
+
+	/* mark HNP polling enabled and start HNP polling in 50ms */
+	pnw->iotg.hsm.hnp_poll_enable = 1;
+	penwell_otg_add_hnp_poll_timer(&pnw->iotg, 50);
+
+	spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+
+	return 0;
+}
+
+static int penwell_otg_continue_hnp_poll(struct intel_mid_otg_xceiv *iotg)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&pnw->iotg.hnp_poll_lock, flags);
+
+	if (!pnw->iotg.hsm.hnp_poll_enable) {
+		spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+		dev_dbg(pnw->dev, "HNP polling is disabled, stop polling\n");
+		return 0;
+	}
+
+	penwell_otg_add_hnp_poll_timer(&pnw->iotg, THOS_REQ_POL);
+
+	spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+
+	return 0;
+}
+
+/* Stop HNP polling */
+static int penwell_otg_stop_hnp_poll(struct intel_mid_otg_xceiv *iotg)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&pnw->iotg.hnp_poll_lock, flags);
+
+	if (!pnw->iotg.hsm.hnp_poll_enable) {
+		spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+		dev_dbg(pnw->dev, "HNP polling is already disabled\n");
+		return 0;
+	}
+
+	pnw->iotg.hsm.hnp_poll_enable = 0;
+	del_timer_sync(&pnw->hnp_poll_timer);
+
+	spin_unlock_irqrestore(&pnw->iotg.hnp_poll_lock, flags);
+
+	return 0;
+}
+
+/* Start SRP function */
+static int penwell_otg_start_srp(struct usb_otg *otg)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u32			val;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	val = readl(pnw->iotg.base + CI_OTGSC);
+
+	writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
+				pnw->iotg.base + CI_OTGSC);
+
+	/* Check if the data plus is finished or not */
+	msleep(8);
+	val = readl(pnw->iotg.base + CI_OTGSC);
+	if (val & (OTGSC_HADP | OTGSC_DP))
+		dev_warn(pnw->dev, "DataLine SRP Error\n");
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+	return 0;
+}
+
+/* stop SOF via bus_suspend */
+static void penwell_otg_loc_sof(int on)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	struct usb_hcd		*hcd;
+	int			err;
+
+	dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "resume" : "suspend");
+
+	hcd = bus_to_hcd(pnw->iotg.otg.otg->host);
+	if (on)
+		err = hcd->driver->bus_resume(hcd);
+	else
+		err = hcd->driver->bus_suspend(hcd);
+
+	if (err)
+		dev_warn(pnw->dev, "Fail to resume/suspend USB bus -%d\n", err);
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+static void penwell_otg_phy_low_power(int on)
+{
+	struct	penwell_otg	*pnw = the_transceiver;
+	u32			val;
+
+	dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+	val = readl(pnw->iotg.base + CI_USBMODE);
+	if (!(val & USBMODE_CM)) {
+		dev_err(pnw->dev,
+			"PHY can't enter low power mode "
+			"when UDC is in idle mode\n");
+		set_client_mode();
+	}
+
+	val = readl(pnw->iotg.base + CI_HOSTPC1);
+	dev_dbg(pnw->dev, "---> Register CI_HOSTPC1 = %x\n", val);
+
+	if (on) {
+		if (val & HOSTPC1_PHCD) {
+			dev_dbg(pnw->dev, "already in Low power mode\n");
+			return;
+		}
+		writel(val | HOSTPC1_PHCD, pnw->iotg.base + CI_HOSTPC1);
+	} else {
+		if (!(val & HOSTPC1_PHCD)) {
+			dev_dbg(pnw->dev, "already in Normal mode\n");
+			return;
+		}
+		writel(val & ~HOSTPC1_PHCD, pnw->iotg.base + CI_HOSTPC1);
+	}
+
+	val = readl(pnw->iotg.base + CI_HOSTPC1);
+
+	dev_dbg(pnw->dev, "<--- Register CI_HOSTPC1 = %x\n", val);
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+/*
+ * For Penwell, VBUS330 is the power rail to otg PHY inside MSIC, set it
+ * into low power mode or normal mode according to pm state.
+ * Call this function when spi access to MSIC registers is enabled.
+ *
+ * For Clovertrail, we don't have a controllable power rail to the PHY -
+ * it's always on.
+ */
+static int penwell_otg_vusb330_low_power(int on)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u8			data;
+	int			retval = 0;
+
+	dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+	if (!is_clovertrail(to_pci_dev(pnw->dev))) {
+		if (on)
+			data = 0x5; /* Low power mode */
+		else
+			data = 0x7; /* Normal mode */
+		retval = penwell_otg_msic_write(MSIC_VUSB330CNT, data);
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+	return retval;
+}
+
+/* Enable/Disable OTG interrupt */
+static void penwell_otg_intr(int on)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u32			val;
+
+	dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+	val = readl(pnw->iotg.base + CI_OTGSC);
+	/* mask W/C bits to avoid clearing them when
+	*  val is written back to OTGSC */
+	val &= ~OTGSC_INTSTS_MASK;
+	if (on) {
+		val = val | (OTGSC_INTEN_MASK);
+		writel(val, pnw->iotg.base + CI_OTGSC);
+	} else {
+		val = val & ~(OTGSC_INTEN_MASK);
+		writel(val, pnw->iotg.base + CI_OTGSC);
+	}
+}
+
+/* set HAAR: Hardware Assist Auto-Reset */
+static void penwell_otg_HAAR(int on)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u32			val;
+
+	dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+	val = readl(pnw->iotg.base + CI_OTGSC);
+	if (on)
+		writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
+				pnw->iotg.base + CI_OTGSC);
+	else
+		writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
+				pnw->iotg.base + CI_OTGSC);
+}
+
+/* set HABA: Hardware Assist B-Disconnect to A-Connect */
+static void penwell_otg_HABA(int on)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u32	val;
+
+	dev_dbg(pnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
+
+	val = readl(pnw->iotg.base + CI_OTGSC);
+	if (on)
+		writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
+					pnw->iotg.base + CI_OTGSC);
+	else
+		writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
+					pnw->iotg.base + CI_OTGSC);
+}
+
+/* read 8bit msic register */
+static int penwell_otg_msic_read(u16 addr, u8 *data)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	int			retval = intel_scu_ipc_ioread8(addr, data);
+	if (retval)
+		dev_warn(pnw->dev, "Failed to read MSIC register %x\n", addr);
+
+	return retval;
+}
+
+/* write 8bit msic register */
+static int penwell_otg_msic_write(u16 addr, u8 data)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	int			retval = 0;
+
+	retval = intel_scu_ipc_iowrite8(addr, data);
+	if (retval)
+		dev_warn(pnw->dev, "Failed to write MSIC register %x\n", addr);
+
+	return retval;
+}
+
+/* USB related register in MSIC can be access via SPI address and ulpi address
+ * Access the control register to switch */
+static void penwell_otg_msic_spi_access(bool enabled)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u8			data;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	/* Set ULPI ACCESS MODE */
+	data = enabled ? SPIMODE : 0;
+
+	penwell_otg_msic_write(MSIC_ULPIACCESSMODE, data);
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+/* USB Battery Charger detection related functions */
+/* Data contact detection is the first step for charger detection */
+static int penwell_otg_data_contact_detect(void)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u8			data;
+	int			count = 50;
+	int			retval = 0;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	/* Enable SPI access */
+	penwell_otg_msic_spi_access(true);
+
+	/* Set POWER_CTRL_CLR */
+	retval = penwell_otg_msic_write(MSIC_PWRCTRLCLR, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	/* Set FUNC_CTRL_SET */
+	retval = penwell_otg_msic_write(MSIC_FUNCTRLSET, OPMODE0);
+	if (retval)
+		return retval;
+
+	/* Set FUNC_CTRL_CLR */
+	retval = penwell_otg_msic_write(MSIC_FUNCTRLCLR, OPMODE1);
+	if (retval)
+		return retval;
+
+	/* Set OTG_CTRL_CLR */
+	retval = penwell_otg_msic_write(MSIC_OTGCTRLCLR,
+					DMPULLDOWN | DPPULLDOWN);
+	if (retval)
+		return retval;
+
+	/* Set POWER_CTRL_CLR */
+	retval = penwell_otg_msic_write(MSIC_PWRCTRLCLR, SWCNTRL);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_msic_write(MSIC_VS3SET, DATACONEN | SWUSBDET);
+	if (retval)
+		return retval;
+
+	dev_dbg(pnw->dev, "Start Polling for Data contact detection!\n");
+
+	while (count) {
+		retval = intel_scu_ipc_ioread8(MSIC_USB_MISC, &data);
+		if (retval) {
+			dev_warn(pnw->dev, "Failed to read MSIC register\n");
+			return retval;
+		}
+
+		if (data & MISC_CHGDSERXDPINV) {
+			dev_dbg(pnw->dev, "Data contact detected!\n");
+			return 0;
+		}
+		count--;
+		/* Interval is 10 - 11ms */
+		usleep_range(10000, 11000);
+	}
+
+	dev_dbg(pnw->dev, "Data contact Timeout\n");
+
+	retval = penwell_otg_msic_write(MSIC_VS3CLR, DATACONEN | SWUSBDET);
+	if (retval)
+		return retval;
+
+	udelay(100);
+
+	retval = penwell_otg_msic_write(MSIC_VS3SET, SWUSBDET);
+	if (retval)
+		return retval;
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+	return 0;
+}
+
+static int penwell_otg_charger_detect(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	msleep(125);
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+	return 0;
+}
+
+static int penwell_otg_charger_type_detect(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	enum usb_charger_type		charger;
+	u8				data;
+	int				retval;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	retval = penwell_otg_msic_write(MSIC_VS3CLR, DATACONEN);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_msic_write(MSIC_PWRCTRLSET, DPWKPUEN | SWCNTRL);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_msic_write(MSIC_PWRCTRLCLR, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_msic_write(MSIC_OTGCTRLCLR,
+					DMPULLDOWN | DPPULLDOWN);
+	if (retval)
+		return retval;
+
+	msleep(55);
+
+	retval = penwell_otg_msic_write(MSIC_PWRCTRLCLR,
+					SWCNTRL | DPWKPUEN | HWDET);
+	if (retval)
+		return retval;
+
+	msleep(1);
+
+	/* Enable ULPI mode */
+	penwell_otg_msic_spi_access(false);
+
+	retval = intel_scu_ipc_ioread8(MSIC_SPWRSRINT1, &data);
+	if (retval) {
+		dev_warn(pnw->dev, "Failed to read MSIC register\n");
+		return retval;
+	}
+
+	switch (data & MSIC_SPWRSRINT1_MASK) {
+	case SPWRSRINT1_SDP:
+		charger = CHRG_SDP;
+		break;
+	case SPWRSRINT1_DCP:
+		charger = CHRG_DCP;
+		break;
+	case SPWRSRINT1_CDP:
+		charger = CHRG_CDP;
+		break;
+	default:
+		charger = CHRG_UNKNOWN;
+		break;
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+	return charger;
+}
+
+/* manual charger detection by ULPI access */
+static int penwell_otg_manual_chrg_det(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg;
+	int				retval;
+	u8				data, data1, data2;
+	unsigned long			timeout, interval;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	iotg = &pnw->iotg;
+
+	dev_info(pnw->dev, "USB charger detection start...\n");
+
+	/* WA for invalid (SE1) charger: add DCD & SE1 detection over SPI
+	* instead of ULPI interface to avoid any ulpi read/write failures
+	* with SE1 charger */
+	penwell_otg_msic_spi_access(true);
+
+	retval = penwell_otg_msic_write(MSIC_FUNCTRLCLR,
+					OPMODE1 | TERMSELECT | XCVRSELECT1);
+	if (retval) {
+		penwell_otg_msic_spi_access(false);
+		return retval;
+	}
+
+	retval = penwell_otg_msic_write(MSIC_FUNCTRLSET, OPMODE0 | XCVRSELECT0);
+	if (retval) {
+		penwell_otg_msic_spi_access(false);
+		return retval;
+	}
+
+	retval = penwell_otg_msic_write(MSIC_PWRCTRLSET, SWCNTRL);
+	if (retval) {
+		penwell_otg_msic_spi_access(false);
+		return retval;
+	}
+
+	retval = penwell_otg_msic_write(MSIC_VS3SET, CHGD_IDP_SRC);
+	if (retval) {
+		penwell_otg_msic_spi_access(false);
+		return retval;
+	}
+
+	dev_info(pnw->dev, "charger detection DCD start...\n");
+
+	/* Check DCD result, use same polling parameter */
+	timeout = jiffies + msecs_to_jiffies(DATACON_TIMEOUT);
+	interval = DATACON_INTERVAL * 1000; /* us */
+
+	dev_info(pnw->dev, "DCD started!\n");
+
+	/* Delay TIDP_SRC_ON + TCHGD_SERX_DEB */
+	usleep_range(66500, 67000);
+
+	while (!time_after(jiffies, timeout)) {
+		retval = penwell_otg_msic_read(MSIC_VS4, &data);
+		if (retval) {
+			penwell_otg_msic_spi_access(false);
+			return retval;
+		}
+		if (!(data & CHRG_SERX_DP)) {
+			dev_info(pnw->dev, "Data contact detected!\n");
+			break;
+		}
+
+		/* Polling interval */
+		usleep_range(interval, interval + 2000);
+	}
+
+	retval = penwell_otg_msic_write(MSIC_VS3CLR, CHGD_IDP_SRC);
+	if (retval) {
+		penwell_otg_msic_spi_access(false);
+		return retval;
+	}
+
+	dev_info(pnw->dev, "DCD complete\n");
+
+	/* Check for SE1, Linestate = '11' */
+	retval = penwell_otg_msic_read(MSIC_DEBUG, &data);
+	if (retval) {
+		penwell_otg_msic_spi_access(false);
+		return retval;
+	}
+	dev_info(pnw->dev, "MSIC.DEBUG.LINESTATE.D[1:0] = 0x%02X\n", data);
+
+	data &= LINESTATE_MSK;
+	if (data == LINESTATE_SE1) {
+		dev_info(pnw->dev, "SE1 Detected\n");
+		penwell_otg_msic_spi_access(false);
+		return CHRG_SE1;
+	}
+
+	penwell_otg_msic_spi_access(false);
+
+	dev_info(pnw->dev, "Primary Detection start...\n");
+
+	/* Primary Dection config */
+	/* ulpi_write(0x0b, 0x06) */
+	retval = penwell_otg_ulpi_write(iotg, ULPI_OTGCTRLSET,
+				DMPULLDOWN | DPPULLDOWN);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	msleep(125);
+
+	/* Check result SDP vs CDP/DCP */
+	retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &data1);
+	if (retval)
+		return retval;
+
+	data1 = data1 & VDATDET;
+
+	retval = penwell_otg_ulpi_read(iotg, ULPI_VS4, &data2);
+	if (retval)
+		return retval;
+
+	data2 = data2 & CHRG_SERX_DM;
+
+	if (!data1 || data2) {
+		retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR,
+						DPVSRCEN);
+		if (retval)
+			return retval;
+
+		dev_info(pnw->dev, "USB Charger Detection done\n");
+		return CHRG_SDP;
+	}
+
+	/* start detection on CDP vs DCP */
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	/* sleep 1ms between Primary and Secondary detection */
+	usleep_range(1000, 1200);
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_VS1CLR, DATAPOLARITY);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	msleep(85);
+
+	/* read result on CDP vs DCP */
+	retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &data);
+	if (retval)
+		return retval;
+
+	data = data & VDATDET;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_VS1SET, DATAPOLARITY);
+	if (retval)
+		return retval;
+
+	dev_info(pnw->dev, "USB Charger Detection done\n");
+
+	if (data) {
+		retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET,
+							DPVSRCEN);
+		if (retval)
+			return retval;
+
+		return CHRG_DCP;
+	} else
+		return CHRG_CDP;
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+};
+
+static int penwell_otg_charger_det_dcd_clt(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval;
+	unsigned long			timeout, interval;
+	u8				data;
+
+	/* Change OPMODE to '01' Non-driving */
+	retval = penwell_otg_ulpi_write(iotg, ULPI_FUNCTRLSET,
+						OPMODE0 | XCVRSELECT0);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_FUNCTRLCLR,
+					OPMODE1 | XCVRSELECT1 | TERMSELECT);
+	if (retval)
+		return retval;
+
+	/* Enable SW control */
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, SWCNTRL);
+	if (retval)
+		return retval;
+
+	/* Clear HWDETECT result for safety */
+	penwell_otg_charger_hwdet(false);
+
+	/* Enable IDPSRC */
+	retval = penwell_otg_ulpi_write(iotg, ULPI_VS3SET, CHGD_IDP_SRC);
+	if (retval)
+		return retval;
+
+	/* Check DCD result, use same polling parameter */
+	timeout = jiffies + msecs_to_jiffies(DATACON_TIMEOUT);
+	interval = DATACON_INTERVAL * 1000; /* us */
+
+	dev_info(pnw->dev, "DCD started\n");
+
+	/* Delay TIDP_SRC_ON + TCHGD_SERX_DEB = 347.8us + 66.1ms max */
+	usleep_range(66500, 67000);
+
+	while (!time_after(jiffies, timeout)) {
+		retval = penwell_otg_ulpi_read(iotg, ULPI_VS4, &data);
+		if (retval) {
+			dev_warn(pnw->dev, "Failed to read ULPI register\n");
+			return retval;
+		}
+
+		dev_dbg(pnw->dev, "VS4 = 0x%02x.. DP = bit1\n", data);
+
+		if (!(data & CHRG_SERX_DP)) {
+			dev_info(pnw->dev, "Data contact detected!\n");
+			break;
+		}
+
+		/* Polling interval */
+		usleep_range(interval, interval + 2000);
+	}
+
+	/* ulpi_write(0x87, 0x40)*/
+	retval = penwell_otg_ulpi_write(iotg, ULPI_VS3CLR, CHGD_IDP_SRC);
+	if (retval)
+		return retval;
+
+	dev_info(pnw->dev, "DCD complete\n");
+
+	return 0;
+}
+
+static int penwell_otg_charger_det_aca_clt(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval;
+	u8				data;
+	u8				usb_vs2_sts = 0;
+	u8				usb_vs2_latch = 0;
+	u8				usb_vdat_det = 0;
+	u8				usb_vdm = 0;
+
+	retval = penwell_otg_ulpi_read(iotg, ULPI_VS2LATCH, &usb_vs2_latch);
+	dev_dbg(pnw->dev, "%s: usb_vs2_latch = 0x%x\n",
+			__func__, usb_vs2_latch);
+	if (retval) {
+		dev_warn(pnw->dev, "ULPI read failed, exit\n");
+		return retval;
+	}
+
+	retval = penwell_otg_ulpi_read(iotg, ULPI_VS2STS, &usb_vs2_sts);
+	dev_dbg(pnw->dev, "%s: usb_vs2_sts = 0x%x\n",
+			__func__, usb_vs2_sts);
+	if (retval) {
+		dev_warn(pnw->dev, "ULPI read failed, exit\n");
+		return retval;
+	}
+
+	if (usb_vs2_latch & IDRARBRC_MSK) {
+		switch (IDRARBRC_STS(usb_vs2_sts)) {
+		case IDRARBRC_A:
+			iotg->hsm.id = ID_ACA_A;
+			break;
+		case IDRARBRC_B:
+			iotg->hsm.id = ID_ACA_B;
+			dev_info(pnw->dev, "ACA-B detected\n");
+			break;
+		case IDRARBRC_C:
+			iotg->hsm.id = ID_ACA_C;
+			dev_info(pnw->dev, "ACA-C detected\n");
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (iotg->hsm.id == ID_ACA_A) {
+		retval = penwell_otg_ulpi_write(iotg,
+					ULPI_PWRCTRLSET, DPVSRCEN);
+		if (retval)
+			return retval;
+
+		msleep(70);
+
+		retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &data);
+		usb_vdat_det = data & VDATDET;
+		if (retval) {
+			dev_warn(pnw->dev, "ULPI read failed, exit\n");
+			return retval;
+		}
+
+		retval = penwell_otg_ulpi_read(iotg, ULPI_VS4, &data);
+		usb_vdm = data & CHRG_SERX_DM;
+		if (retval) {
+			dev_warn(pnw->dev, "ULPI read failed, exit\n");
+			return retval;
+		}
+
+		retval = penwell_otg_ulpi_write(iotg,
+					 ULPI_PWRCTRLCLR, DPVSRCEN);
+		if (retval)
+			return retval;
+
+		if (usb_vdat_det && !usb_vdm)
+			dev_info(pnw->dev, "ACA-Dock detected\n");
+		else if (!usb_vdat_det && usb_vdm)
+			dev_info(pnw->dev, "ACA-A detected\n");
+	}
+
+	if (iotg->hsm.id == ID_ACA_A || iotg->hsm.id == ID_ACA_B
+			|| iotg->hsm.id == ID_ACA_C) {
+		return CHRG_ACA;
+	}
+
+	return 0;
+}
+
+static int penwell_otg_charger_det_se1_clt(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval;
+	u8				data;
+
+	retval = penwell_otg_ulpi_read(iotg, ULPI_DEBUG, &data);
+	if (retval) {
+		dev_warn(pnw->dev, "ULPI read failed, exit\n");
+		return -EBUSY;
+	}
+
+	if ((data & CHRG_SERX_DP) && (data & CHRG_SERX_DM))
+		return CHRG_SE1;
+
+	return 0;
+}
+
+static int penwell_otg_charger_det_pri_clt(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval;
+	u8				vdat_det, serx_dm;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	msleep(110);
+
+	retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &vdat_det);
+	if (retval) {
+		dev_warn(pnw->dev, "ULPI read failed, exit\n");
+		return -EBUSY;
+	}
+
+	retval = penwell_otg_ulpi_read(iotg, ULPI_VS4, &serx_dm);
+	if (retval) {
+		dev_warn(pnw->dev, "ULPI read failed, exit\n");
+		return -EBUSY;
+	}
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	vdat_det &= VDATDET;
+	serx_dm &= CHRG_SERX_DM;
+
+	if ((!vdat_det) || serx_dm)
+		return CHRG_SDP;
+
+	return 0;
+}
+
+static int penwell_otg_charger_det_sec_clt(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval;
+	u8				vdat_det;
+
+	usleep_range(1000, 1500);
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_VS1CLR, DATAPOLARITY);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	msleep(80);
+
+	retval = penwell_otg_ulpi_read(iotg, ULPI_PWRCTRL, &vdat_det);
+	if (retval) {
+		dev_warn(pnw->dev, "ULPI read failed, exit\n");
+		return retval;
+	}
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, DPVSRCEN);
+	if (retval)
+		return retval;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_VS1SET, DATAPOLARITY);
+	if (retval)
+		return retval;
+
+	vdat_det &= VDATDET;
+
+	if (vdat_det)
+		return CHRG_DCP;
+	else
+		return CHRG_CDP;
+
+	return 0;
+}
+
+static int penwell_otg_charger_det_clean_clt(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET,
+					DPVSRCEN | SWCNTRL);
+	if (retval)
+		return retval;
+
+	return 0;
+}
+
+/* As we use SW mode to do charger detection, need to notify HW
+ * the result SW get, charging port or not */
+static int penwell_otg_charger_hwdet(bool enable)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval;
+
+	/* This is for CLV only */
+	if (!is_clovertrail(to_pci_dev(pnw->dev)))
+		return 0;
+
+	if (enable) {
+		retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLSET, HWDET);
+		if (retval)
+			return retval;
+		dev_dbg(pnw->dev, "set HWDETECT\n");
+	} else {
+		retval = penwell_otg_ulpi_write(iotg, ULPI_PWRCTRLCLR, HWDET);
+		if (retval)
+			return retval;
+		dev_dbg(pnw->dev, "clear HWDETECT\n");
+	}
+
+	return 0;
+}
+
+static int penwell_otg_charger_det_clt(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	int				retval;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	/* DCD */
+	retval = penwell_otg_charger_det_dcd_clt();
+	if (retval) {
+		dev_warn(pnw->dev, "DCD failed, exit\n");
+		return retval;
+	}
+
+	/* ACA Detection */
+	retval = penwell_otg_charger_det_aca_clt();
+	if (retval < 0) {
+		dev_warn(pnw->dev, "ACA Det failed, exit\n");
+		return retval;
+	} else if (retval == CHRG_ACA) {
+		dev_info(pnw->dev, "ACA detected\n");
+		penwell_otg_charger_hwdet(true);
+		return CHRG_ACA;
+	}
+
+	/* SE1 Detection */
+	retval = penwell_otg_charger_det_se1_clt();
+	if (retval < 0) {
+		dev_warn(pnw->dev, "SE1 Det failed, exit\n");
+		return retval;
+	} else if (retval == CHRG_SE1) {
+		dev_info(pnw->dev, "SE1 detected\n");
+		penwell_otg_charger_hwdet(true);
+		return CHRG_SE1;
+	}
+
+	/* Pri Det */
+	retval = penwell_otg_charger_det_pri_clt();
+	if (retval < 0) {
+		dev_warn(pnw->dev, "Pri Det failed, exit\n");
+		return retval;
+	} else if (retval == CHRG_SDP) {
+		dev_info(pnw->dev, "SDP detected\n");
+		return CHRG_SDP;
+	}
+
+	/* Sec Det */
+	retval = penwell_otg_charger_det_sec_clt();
+	if (retval < 0) {
+		dev_warn(pnw->dev, "Sec Det failed, exit\n");
+		return retval;
+	} else if (retval == CHRG_CDP) {
+		dev_info(pnw->dev, "CDP detected\n");
+		penwell_otg_charger_hwdet(true);
+		return CHRG_CDP;
+	} else  if (retval == CHRG_DCP) {
+		dev_info(pnw->dev, "DCP detected\n");
+		penwell_otg_charger_det_clean_clt();
+		penwell_otg_charger_hwdet(true);
+		return CHRG_DCP;
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+	return 0;
+}
+
+void penwell_otg_phy_vbus_wakeup(bool on)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv  *iotg = &pnw->iotg;
+	u8			flag = 0;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	penwell_otg_msic_spi_access(true);
+
+	flag = VBUSVLD | SESSVLD | SESSEND;
+
+	if (is_clovertrail(to_pci_dev(pnw->dev))) {
+		if (on) {
+			penwell_otg_ulpi_write(iotg, ULPI_USBINTEN_RISINGSET,
+						flag);
+			penwell_otg_ulpi_write(iotg, ULPI_USBINTEN_FALLINGSET,
+						flag);
+		} else {
+			penwell_otg_ulpi_write(iotg, ULPI_USBINTEN_RISINGCLR,
+						flag);
+			penwell_otg_ulpi_write(iotg, ULPI_USBINTEN_FALLINGCLR,
+						flag);
+		}
+	} else {
+		if (on) {
+			penwell_otg_msic_write(MSIC_USBINTEN_RISESET, flag);
+			penwell_otg_msic_write(MSIC_USBINTEN_FALLSET, flag);
+		} else {
+			penwell_otg_msic_write(MSIC_USBINTEN_RISECLR, flag);
+			penwell_otg_msic_write(MSIC_USBINTEN_FALLCLR, flag);
+		}
+	}
+
+	penwell_otg_msic_spi_access(false);
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+}
+
+void penwell_otg_phy_intr(bool on)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u8			flag = 0;
+	int			retval;
+	u8			data;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	penwell_otg_msic_spi_access(true);
+
+	flag = VBUSVLD | IDGND;
+
+	if (on) {
+		dev_info(pnw->dev, "enable VBUSVLD & IDGND\n");
+		penwell_otg_msic_write(MSIC_USBINTEN_RISESET, flag);
+		penwell_otg_msic_write(MSIC_USBINTEN_FALLSET, flag);
+	} else {
+		dev_info(pnw->dev, "disable VBUSVLD & IDGND\n");
+		penwell_otg_msic_write(MSIC_USBINTEN_RISECLR, flag);
+		penwell_otg_msic_write(MSIC_USBINTEN_FALLCLR, flag);
+	}
+
+	retval = intel_scu_ipc_ioread8(MSIC_USBINTEN_RISE, &data);
+	if (retval)
+		dev_warn(pnw->dev, "Failed to read MSIC register\n");
+	else
+		dev_info(pnw->dev, "MSIC_USBINTEN_RISE = 0x%x", data);
+
+	retval = intel_scu_ipc_ioread8(MSIC_USBINTEN_FALL, &data);
+	if (retval)
+		dev_warn(pnw->dev, "Failed to read MSIC register\n");
+	else
+		dev_info(pnw->dev, "MSIC_USBINTEN_FALL = 0x%x", data);
+
+	penwell_otg_msic_spi_access(false);
+}
+
+void penwell_otg_phy_power(int on)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	if (is_clovertrail(to_pci_dev(pnw->dev))) {
+		dev_dbg(pnw->dev, "turn %s USB PHY by gpio_cs(%d)\n",
+					on ? "on" : "off",
+					pnw->otg_pdata->gpio_cs);
+		gpio_direction_output(pnw->otg_pdata->gpio_cs, on);
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+void penwell_otg_phy_reset(void)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	if (is_clovertrail(to_pci_dev(pnw->dev))) {
+		gpio_direction_output(pnw->otg_pdata->gpio_reset, 0);
+		usleep_range(200, 500);
+		gpio_set_value(pnw->otg_pdata->gpio_reset, 1);
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+#if 0
+knext_wa: usb_notify_warning should be implemented again instead of in hub.c
+static void penwell_otg_notify_warning(int warning_code)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+
+	dev_dbg(pnw->dev, "%s ---> %d\n", __func__, warning_code);
+
+	if (pnw && pnw->iotg.otg.otg->host && pnw->iotg.otg.otg->host->root_hub)
+		usb_notify_warning(pnw->iotg.otg.otg->host->root_hub,
+					warning_code);
+	else
+		dev_dbg(pnw->dev, "no valid device for notification\n");
+
+	dev_dbg(pnw->dev, "%s <--- %d\n", __func__, warning_code);
+}
+#else
+#define penwell_otg_notify_warning(x)
+#endif
+
+void penwell_otg_nsf_msg(unsigned long indicator)
+{
+	switch (indicator) {
+	case 2:
+	case 4:
+	case 6:
+	case 7:
+		dev_warn(the_transceiver->dev,
+			"NSF-%lu - device not responding\n", indicator);
+		break;
+	case 3:
+		dev_warn(the_transceiver->dev,
+			"NSF-%lu - device not supported\n", indicator);
+		break;
+	default:
+		dev_warn(the_transceiver->dev,
+			"Do not have this kind of NSF\n");
+		break;
+	}
+}
+
+/* The timeout callback function to set time out bit */
+static void penwell_otg_timer_fn(unsigned long indicator)
+{
+	struct penwell_otg *pnw = the_transceiver;
+
+	*(int *)indicator = 1;
+
+	dev_dbg(pnw->dev, "kernel timer - timeout\n");
+
+	penwell_update_transceiver();
+}
+
+/* kernel timer used for OTG timing */
+static void penwell_otg_add_timer(enum penwell_otg_timer_type timers)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	unsigned long			j = jiffies;
+	unsigned long			data, time;
+
+	if (timer_pending(&pnw->hsm_timer))
+		return;
+
+	switch (timers) {
+	case TA_WAIT_VRISE_TMR:
+		iotg->hsm.a_wait_vrise_tmout = 0;
+		data = (unsigned long)&iotg->hsm.a_wait_vrise_tmout;
+		/* Charger HW limitation workaround for CLV */
+		time = is_clovertrail(to_pci_dev(pnw->dev)) ?
+						400 : TA_WAIT_VRISE;
+		dev_dbg(pnw->dev,
+			"Add timer TA_WAIT_VRISE = %lu\n", time);
+		break;
+	case TA_WAIT_BCON_TMR:
+		iotg->hsm.a_wait_bcon_tmout = 0;
+		data = (unsigned long)&iotg->hsm.a_wait_bcon_tmout;
+		time = TA_WAIT_BCON;
+		dev_dbg(pnw->dev,
+			"Add timer TA_WAIT_BCON = %d\n", TA_WAIT_BCON);
+		break;
+	case TA_AIDL_BDIS_TMR:
+		iotg->hsm.a_aidl_bdis_tmout = 0;
+		data = (unsigned long)&iotg->hsm.a_aidl_bdis_tmout;
+		time = TA_AIDL_BDIS;
+		dev_dbg(pnw->dev,
+			"Add timer TA_AIDL_BDIS = %d\n", TA_AIDL_BDIS);
+		break;
+	case TA_BIDL_ADIS_TMR:
+		iotg->hsm.a_bidl_adis_tmout = 0;
+		iotg->hsm.a_bidl_adis_tmr = 1;
+		data = (unsigned long)&iotg->hsm.a_bidl_adis_tmout;
+		time = TA_BIDL_ADIS;
+		dev_dbg(pnw->dev,
+			"Add timer TA_BIDL_ADIS = %d\n", TA_BIDL_ADIS);
+		break;
+	case TA_WAIT_VFALL_TMR:
+		iotg->hsm.a_wait_vfall_tmout = 0;
+		data = (unsigned long)&iotg->hsm.a_wait_vfall_tmout;
+		time = TA_WAIT_VFALL;
+		dev_dbg(pnw->dev,
+			"Add timer TA_WAIT_VFALL = %d\n", TA_WAIT_VFALL);
+		break;
+	case TB_ASE0_BRST_TMR:
+		iotg->hsm.b_ase0_brst_tmout = 0;
+		data = (unsigned long)&iotg->hsm.b_ase0_brst_tmout;
+		time = TB_ASE0_BRST;
+		dev_dbg(pnw->dev,
+			"Add timer TB_ASE0_BRST = %d\n", TB_ASE0_BRST);
+		break;
+	case TB_SRP_FAIL_TMR:
+		iotg->hsm.b_srp_fail_tmout = 0;
+		iotg->hsm.b_srp_fail_tmr = 1;
+		data = (unsigned long)&iotg->hsm.b_srp_fail_tmout;
+		time = TB_SRP_FAIL;
+		dev_dbg(pnw->dev,
+			"Add timer TB_SRP_FAIL = %d\n", TB_SRP_FAIL);
+		break;
+	/* support OTG test mode */
+	case TTST_MAINT_TMR:
+		iotg->hsm.tst_maint_tmout = 0;
+		data = (unsigned long)&iotg->hsm.tst_maint_tmout;
+		time = TTST_MAINT;
+		dev_dbg(pnw->dev,
+			"Add timer TTST_MAINT = %d\n", TTST_MAINT);
+		break;
+	case TTST_NOADP_TMR:
+		iotg->hsm.tst_noadp_tmout = 0;
+		data = (unsigned long)&iotg->hsm.tst_noadp_tmout;
+		time = TTST_NOADP;
+		dev_dbg(pnw->dev,
+			"Add timer TTST_NOADP = %d\n", TTST_NOADP);
+		break;
+	default:
+		dev_dbg(pnw->dev,
+			"unkown timer, can not enable such timer\n");
+		return;
+	}
+
+	pnw->hsm_timer.data = data;
+	pnw->hsm_timer.function = penwell_otg_timer_fn;
+	pnw->hsm_timer.expires = j + time * HZ / 1000; /* milliseconds */
+
+	add_timer(&pnw->hsm_timer);
+}
+
+static inline void penwell_otg_del_timer(enum penwell_otg_timer_type timers)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+
+	switch (timers) {
+	case TA_BIDL_ADIS_TMR:
+		iotg->hsm.a_bidl_adis_tmr = 0;
+		break;
+	case TB_SRP_FAIL_TMR:
+		iotg->hsm.b_srp_fail_tmr = 0;
+		break;
+	case TA_WAIT_BCON_TMR:
+		iotg->hsm.a_wait_bcon_tmout = 0;
+		break;
+	case TTST_MAINT_TMR:
+		iotg->hsm.tst_maint_tmout = 0;
+		break;
+	case TTST_NOADP_TMR:
+		iotg->hsm.tst_noadp_tmout = 0;
+		break;
+	default:
+		break;
+	}
+
+	dev_dbg(pnw->dev, "state machine timer deleted\n");
+	del_timer_sync(&pnw->hsm_timer);
+}
+
+static void reset_otg(void)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	u32			val;
+	int			delay_time = 1000;
+
+	dev_dbg(pnw->dev, "reseting OTG controller ...\n");
+	val = readl(pnw->iotg.base + CI_USBCMD);
+	writel(val | USBCMD_RST, pnw->iotg.base + CI_USBCMD);
+	do {
+		udelay(100);
+		if (!delay_time--)
+			dev_dbg(pnw->dev, "reset timeout\n");
+		val = readl(pnw->iotg.base + CI_USBCMD);
+		val &= USBCMD_RST;
+	} while (val != 0);
+	dev_dbg(pnw->dev, "reset done.\n");
+}
+
+static void pnw_phy_ctrl_rst(void)
+{
+	struct penwell_otg *pnw = the_transceiver;
+	struct pci_dev	*pdev;
+
+	pdev = to_pci_dev(pnw->dev);
+
+	/* mask id intr before reset and delay for 4 ms
+	* before unmasking id intr to avoid wrongly
+	* detecting ID_A which is a side-effect of reset
+	* PHY
+	*/
+	penwell_otg_intr(0);
+	synchronize_irq(pdev->irq);
+
+	penwell_otg_phy_reset();
+
+	reset_otg();
+
+	msleep(50);
+	penwell_otg_intr(1);
+
+	/* after reset, need to sync to OTGSC status bits to hsm */
+	update_hsm();
+	penwell_update_transceiver();
+}
+
+static void set_host_mode(void)
+{
+	u32	val;
+
+	reset_otg();
+	val = readl(the_transceiver->iotg.base + CI_USBMODE);
+	val = (val & (~USBMODE_CM)) | USBMODE_HOST;
+	writel(val, the_transceiver->iotg.base + CI_USBMODE);
+}
+
+static void set_client_mode(void)
+{
+	u32	val;
+
+	reset_otg();
+	val = readl(the_transceiver->iotg.base + CI_USBMODE);
+	val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
+	writel(val, the_transceiver->iotg.base + CI_USBMODE);
+}
+
+static void init_hsm(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	u32				val32;
+
+	/* read OTGSC after reset */
+	val32 = readl(iotg->base + CI_OTGSC);
+	dev_dbg(pnw->dev,
+		"%s: OTGSC init value = 0x%x\n", __func__, val32);
+
+	/* set init state */
+	if (val32 & OTGSC_ID) {
+		iotg->hsm.id = ID_B;
+		iotg->otg.otg->default_a = 0;
+		set_client_mode();
+		iotg->otg.state = OTG_STATE_B_IDLE;
+	} else {
+		iotg->hsm.id = ID_A;
+		iotg->otg.otg->default_a = 1;
+		set_host_mode();
+		iotg->otg.state = OTG_STATE_A_IDLE;
+	}
+
+	/* set session indicator */
+	if (val32 & OTGSC_BSE)
+		iotg->hsm.b_sess_end = 1;
+	if (val32 & OTGSC_BSV)
+		iotg->hsm.b_sess_vld = 1;
+	if (val32 & OTGSC_ASV)
+		iotg->hsm.a_sess_vld = 1;
+	if (val32 & OTGSC_AVV)
+		iotg->hsm.a_vbus_vld = 1;
+
+	/* default user is not request the bus */
+	iotg->hsm.a_bus_req = 1;
+	iotg->hsm.a_bus_drop = 0;
+	/* init hsm means power_up case */
+	iotg->hsm.power_up = 0;
+	/* defautly don't request bus as B device */
+	iotg->hsm.b_bus_req = 0;
+	/* no system error */
+	iotg->hsm.a_clr_err = 0;
+
+	if (iotg->otg.state == OTG_STATE_A_IDLE) {
+		wake_lock(&pnw->wake_lock);
+		pm_runtime_get(pnw->dev);
+	}
+}
+
+static void update_hsm(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	u32				val32;
+
+	/* read OTGSC */
+	val32 = readl(iotg->base + CI_OTGSC);
+	dev_dbg(pnw->dev,
+		"%s OTGSC current value = 0x%x\n", __func__, val32);
+
+	iotg->hsm.id = !!(val32 & OTGSC_ID) ? ID_B : ID_A;
+	iotg->hsm.b_sess_end = !!(val32 & OTGSC_BSE);
+	iotg->hsm.b_sess_vld = !!(val32 & OTGSC_BSV);
+	iotg->hsm.a_sess_vld = !!(val32 & OTGSC_ASV);
+	iotg->hsm.a_vbus_vld = !!(val32 & OTGSC_AVV);
+}
+
+static void penwell_otg_eye_diagram_optimize(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval;
+	u8				value = 0;
+
+	/* Check platform value as different value will be used*/
+	if (is_clovertrail(to_pci_dev(pnw->dev))) {
+		/* Set 0x7f for better quality in eye diagram
+		 * It means ZHSDRV = 0b11 and IHSTX = 0b1111*/
+		value = 0x7f;
+	} else {
+		/* Set 0x77 for better quality in eye diagram
+		 * It means ZHSDRV = 0b11 and IHSTX = 0b0111*/
+		value = 0x77;
+	}
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_VS1SET, value);
+	if (retval)
+		dev_warn(pnw->dev,
+			"eye diagram optimize failed with ulpi failure\n");
+}
+
+static irqreturn_t otg_dummy_irq(int irq, void *_dev)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	void __iomem		*reg_base = _dev;
+	u32			val;
+	u32			int_mask = 0;
+
+	val = readl(reg_base + CI_USBMODE);
+	if ((val & USBMODE_CM) != USBMODE_DEVICE)
+		return IRQ_NONE;
+
+	val = readl(reg_base + CI_USBSTS);
+	int_mask = val & INTR_DUMMY_MASK;
+
+	if (int_mask == 0)
+		return IRQ_NONE;
+
+	/* clear hsm.b_conn here since host driver can't detect it
+	*  otg_dummy_irq called means B-disconnect happened.
+	*/
+	if (pnw->iotg.hsm.b_conn) {
+		pnw->iotg.hsm.b_conn = 0;
+		penwell_update_transceiver();
+	}
+
+	/* Clear interrupts */
+	writel(int_mask, reg_base + CI_USBSTS);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t otg_irq_handle(struct penwell_otg *pnw,
+					struct intel_mid_otg_xceiv *iotg)
+{
+	int				id = 0;
+	int				flag = 0;
+	u32				int_sts, int_en, int_mask = 0;
+	u8				usb_vs2_latch = 0;
+	u8				usb_vs2_sts = 0;
+	struct iotg_ulpi_access_ops	*ops;
+
+	/* Check VBUS/SRP interrup */
+	int_sts = readl(pnw->iotg.base + CI_OTGSC);
+	int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
+	int_mask = int_sts & int_en;
+
+	if (int_mask == 0)
+		return IRQ_NONE;
+
+	ops = &iotg->ulpi_ops;
+	ops->read(iotg, ULPI_VS2LATCH, &usb_vs2_latch);
+	dev_dbg(pnw->dev, "usb_vs2_latch = 0x%x\n", usb_vs2_latch);
+	if (usb_vs2_latch & IDRARBRC_MSK) {
+		ops->read(iotg, ULPI_VS2STS, &usb_vs2_sts);
+		dev_dbg(pnw->dev, "%s: usb_vs2_sts = 0x%x\n",
+				__func__, usb_vs2_sts);
+
+		switch (IDRARBRC_STS(usb_vs2_sts)) {
+		case IDRARBRC_A:
+			id = ID_ACA_A;
+			dev_dbg(pnw->dev, "ACA-A interrupt detected\n");
+			break;
+		case IDRARBRC_B:
+			id = ID_ACA_B;
+			dev_dbg(pnw->dev, "ACA-B interrupt detected\n");
+			break;
+		case IDRARBRC_C:
+			id = ID_ACA_C;
+			dev_dbg(pnw->dev, "ACA-C interrupt detected\n");
+			break;
+		default:
+			break;
+		}
+
+		if (id) {
+			iotg->hsm.id = id;
+			flag = 1;
+			dev_dbg(pnw->dev, "%s: id change int = %d\n",
+					__func__, iotg->hsm.id);
+		} else {
+			iotg->hsm.id = (int_sts & OTGSC_ID) ?
+				ID_B : ID_A;
+			flag = 1;
+			dev_dbg(pnw->dev, "%s: id change int = %d\n",
+					__func__, iotg->hsm.id);
+		}
+	}
+
+	if (int_mask) {
+		dev_dbg(pnw->dev,
+			"OTGSC = 0x%x, mask =0x%x\n", int_sts, int_mask);
+
+		if (int_mask & OTGSC_IDIS) {
+			if (!id)
+				iotg->hsm.id = (int_sts & OTGSC_ID) ?
+					ID_B : ID_A;
+			flag = 1;
+			dev_dbg(pnw->dev, "%s: id change int = %d\n",
+						__func__, iotg->hsm.id);
+
+			/* Update a_vbus_valid once ID changed */
+			iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
+		}
+		if (int_mask & OTGSC_DPIS) {
+			iotg->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
+			flag = 1;
+			dev_dbg(pnw->dev, "%s: data pulse int = %d\n",
+						__func__, iotg->hsm.a_srp_det);
+		}
+		if (int_mask & OTGSC_BSEIS) {
+			iotg->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
+			flag = 1;
+			dev_dbg(pnw->dev, "%s: b sess end int = %d\n",
+						__func__, iotg->hsm.b_sess_end);
+		}
+		if (int_mask & OTGSC_BSVIS) {
+			iotg->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
+			flag = 1;
+			dev_dbg(pnw->dev, "%s: b sess valid int = %d\n",
+						__func__, iotg->hsm.b_sess_vld);
+		}
+		if (int_mask & OTGSC_ASVIS) {
+			iotg->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
+			flag = 1;
+			dev_dbg(pnw->dev, "%s: a sess valid int = %d\n",
+						__func__, iotg->hsm.a_sess_vld);
+		}
+		if (int_mask & OTGSC_AVVIS) {
+			iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
+			flag = 1;
+			dev_dbg(pnw->dev, "%s: a vbus valid int = %d\n",
+						__func__, iotg->hsm.a_vbus_vld);
+		}
+
+		writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
+				pnw->iotg.base + CI_OTGSC);
+	}
+
+	if (flag)
+		penwell_update_transceiver();
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t otg_irq(int irq, void *_dev)
+{
+	struct penwell_otg		*pnw = _dev;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	unsigned long			flags;
+
+#ifdef CONFIG_PM_RUNTIME
+	if (pnw->rt_resuming)
+		return IRQ_HANDLED;
+
+	/* If it's not active, resume device first before access regs */
+	if (pnw->rt_quiesce) {
+		spin_lock_irqsave(&pnw->lock, flags);
+		if (pnw->rt_quiesce) {
+			dev_dbg(pnw->dev, "Wake up? Interrupt detected in suspended\n");
+			pnw->rt_resuming = 1;
+			pm_runtime_get(pnw->dev);
+		}
+		spin_unlock_irqrestore(&pnw->lock, flags);
+
+		return IRQ_HANDLED;
+	}
+#endif /* CONFIG_PM_RUNTIME */
+
+	return otg_irq_handle(pnw, iotg);
+}
+
+static void penwell_otg_start_ulpi_poll(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	int				retval = 0;
+
+	retval = penwell_otg_ulpi_write(&pnw->iotg, 0x16, 0x5a);
+	if (retval)
+		dev_err(pnw->dev, "ulpi write in init failed\n");
+
+	schedule_delayed_work(&pnw->ulpi_poll_work, HZ);
+}
+
+static void penwell_otg_continue_ulpi_poll(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+
+	schedule_delayed_work(&pnw->ulpi_poll_work, HZ);
+}
+
+static void penwell_otg_stop_ulpi_poll(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+
+	/* we have to use this version because this function can
+	 * be called in irq handler */
+	__cancel_delayed_work(&pnw->ulpi_poll_work);
+}
+
+
+static int penwell_otg_iotg_notify(struct notifier_block *nb,
+				unsigned long action, void *data)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = data;
+	int				flag = 0;
+	struct pci_dev			*pdev;
+
+	if (iotg == NULL)
+		return NOTIFY_BAD;
+
+	if (pnw == NULL)
+		return NOTIFY_BAD;
+
+	pdev = to_pci_dev(pnw->dev);
+
+	switch (action) {
+	case MID_OTG_NOTIFY_CONNECT:
+		dev_dbg(pnw->dev, "PNW OTG Notify Connect Event\n");
+		if (iotg->otg.otg->default_a == 1)
+			iotg->hsm.b_conn = 1;
+		else
+			iotg->hsm.a_conn = 1;
+		flag = 1;
+		break;
+	case MID_OTG_NOTIFY_DISCONN:
+		dev_dbg(pnw->dev, "PNW OTG Notify Disconnect Event\n");
+		if (iotg->otg.otg->default_a == 1)
+			iotg->hsm.b_conn = 0;
+		else
+			iotg->hsm.a_conn = 0;
+		flag = 1;
+		break;
+	case MID_OTG_NOTIFY_HSUSPEND:
+		dev_dbg(pnw->dev, "PNW OTG Notify Host Bus suspend Event\n");
+		break;
+	case MID_OTG_NOTIFY_HRESUME:
+		dev_dbg(pnw->dev, "PNW OTG Notify Host Bus resume Event\n");
+		if (iotg->otg.otg->default_a == 1 && iotg->hsm.a_bus_req == 0) {
+			iotg->hsm.a_bus_req = 1;
+			flag = 1;
+		}
+		break;
+	case MID_OTG_NOTIFY_CSUSPEND:
+		dev_dbg(pnw->dev, "PNW OTG Notify Client Bus suspend Event\n");
+		if (iotg->otg.otg->default_a == 1) {
+			iotg->hsm.b_bus_suspend = 1;
+			flag = 1;
+		} else {
+			penwell_otg_stop_ulpi_poll();
+			if (iotg->hsm.a_bus_suspend == 0) {
+				iotg->hsm.a_bus_suspend = 1;
+				flag = 1;
+			} else
+				flag = 0;
+		}
+		break;
+	case MID_OTG_NOTIFY_CRESUME:
+		dev_dbg(pnw->dev, "PNW OTG Notify Client Bus resume Event\n");
+		if (iotg->otg.otg->default_a == 1) {
+			/* in A_PERIPHERAL state */
+			iotg->hsm.b_bus_suspend = 0;
+			flag = 1;
+		} else {
+			/* in B_PERIPHERAL state */
+			if (!is_clovertrail(pdev))
+				penwell_otg_start_ulpi_poll();
+			iotg->hsm.a_bus_suspend = 0;
+			flag = 0;
+		}
+		break;
+	case MID_OTG_NOTIFY_CRESET:
+		dev_dbg(pnw->dev, "PNW OTG Notify Client Bus reset Event\n");
+		penwell_otg_set_power(&pnw->iotg.otg, CHRG_CURR_SDP_SUSP);
+		flag = 0;
+		break;
+	case MID_OTG_NOTIFY_HOSTADD:
+		dev_dbg(pnw->dev, "PNW OTG Nofity Host Driver Add\n");
+		flag = 1;
+		break;
+	case MID_OTG_NOTIFY_HOSTREMOVE:
+		dev_dbg(pnw->dev, "PNW OTG Nofity Host Driver remove\n");
+		flag = 1;
+		break;
+	case MID_OTG_NOTIFY_CLIENTADD:
+		dev_dbg(pnw->dev, "PNW OTG Nofity Client Driver Add\n");
+		flag = 1;
+		break;
+	case MID_OTG_NOTIFY_CLIENTREMOVE:
+		dev_dbg(pnw->dev, "PNW OTG Nofity Client Driver remove\n");
+		flag = 1;
+		break;
+	/* Test mode support */
+	case MID_OTG_NOTIFY_TEST_MODE_START:
+		dev_dbg(pnw->dev, "PNW OTG Notfiy Client Testmode Start\n");
+		iotg->hsm.in_test_mode = 1;
+		flag = 0;
+		break;
+	case MID_OTG_NOTIFY_TEST_MODE_STOP:
+		dev_dbg(pnw->dev, "PNW OTG Notfiy Client Testmode Stop\n");
+		iotg->hsm.in_test_mode = 0;
+		flag = 0;
+		break;
+	case MID_OTG_NOTIFY_TEST_SRP_REQD:
+		dev_dbg(pnw->dev, "PNW OTG Notfiy Client SRP REQD\n");
+		iotg->hsm.otg_srp_reqd = 1;
+		flag = 1;
+		break;
+	case MID_OTG_NOTIFY_TEST:
+		dev_dbg(pnw->dev, "PNW OTG Notfiy Test device detected\n");
+		iotg->hsm.test_device = 1;
+		flag = 0;
+		break;
+	case MID_OTG_NOTIFY_TEST_VBUS_OFF:
+		dev_dbg(pnw->dev, "PNW OTG Notfiy Test device Vbus off mode\n");
+		iotg->hsm.test_device = 1;
+		iotg->hsm.otg_vbus_off = 1;
+		flag = 1;
+		break;
+	default:
+		dev_dbg(pnw->dev, "PNW OTG Nofity unknown notify message\n");
+		return NOTIFY_DONE;
+	}
+
+	if (flag)
+		penwell_update_transceiver();
+
+	return NOTIFY_OK;
+}
+
+static void penwell_otg_hnp_poll_work(struct work_struct *work)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	struct usb_device		*udev;
+	int				err = 0;
+	u8				data;
+
+	if (iotg->otg.otg->host && iotg->otg.otg->host->root_hub) {
+		udev = usb_hub_find_child(iotg->otg.otg->host->root_hub, 1);
+	} else {
+		dev_warn(pnw->dev, "no host or root_hub registered\n");
+		return;
+	}
+
+	if (iotg->otg.state != OTG_STATE_A_HOST
+		&& iotg->otg.state != OTG_STATE_B_HOST)
+		return;
+
+	if (!udev) {
+		dev_warn(pnw->dev,
+			"no usb dev connected, stop HNP polling\n");
+		return;
+	}
+
+	/* Skip HS Electrical Test Device */
+	if (le16_to_cpu(udev->descriptor.idVendor) == 0x1A0A &&
+		le16_to_cpu(udev->descriptor.idProduct) > 0x0100 &&
+		le16_to_cpu(udev->descriptor.idProduct) < 0x0109) {
+		return;
+	}
+
+	/* get host request flag from connected USB device */
+	err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+		USB_REQ_GET_STATUS, USB_DIR_IN, 0, 0xF000, &data, 1, 5000);
+
+	if (err < 0) {
+		dev_warn(pnw->dev,
+			"ERR in HNP polling = %d, stop HNP polling\n", err);
+		return;
+	}
+
+	if (data & HOST_REQUEST_FLAG) {
+		/* start HNP sequence to switch role */
+		dev_dbg(pnw->dev, "host_request_flag = 1\n");
+
+		if (iotg->hsm.id == ID_B) {
+			dev_dbg(pnw->dev,
+				"Device B host - start HNP - b_bus_req = 0\n");
+			iotg->hsm.b_bus_req = 0;
+		} else if (iotg->hsm.id == ID_A) {
+			dev_dbg(pnw->dev,
+				"Device A host - start HNP - a_bus_req = 0\n");
+			iotg->hsm.a_bus_req = 0;
+		}
+		penwell_update_transceiver();
+	} else {
+		dev_dbg(pnw->dev, "host_request_flag = 0\n");
+		penwell_otg_continue_hnp_poll(&pnw->iotg);
+	}
+}
+
+static int penwell_otg_ulpi_check(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	u8				data;
+	int				retval;
+
+	retval = penwell_otg_ulpi_read(iotg, 0x16, &data);
+	if (retval) {
+		dev_err(pnw->dev,
+				"%s: [ ULPI hang ] detected\n"
+				"reset PHY & ctrl to recover\n",
+				 __func__);
+		pnw_phy_ctrl_rst();
+		return retval;
+	}
+	return 0;
+}
+
+static void penwell_otg_ulpi_check_work(struct work_struct *work)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				status;
+
+	status = pm_runtime_get_sync(pnw->dev);
+	if (status < 0) {
+		dev_err(pnw->dev, "%s: pm_runtime_get_sync FAILED err = %d\n",
+				__func__, status);
+		pm_runtime_put_sync(pnw->dev);
+		return;
+	}
+
+	if (iotg->otg.state == OTG_STATE_B_IDLE) {
+		/* Before charger detection or charger detection done */
+		dev_dbg(pnw->dev, "ulpi_check health\n");
+		penwell_otg_ulpi_check();
+	} else if (iotg->otg.state == OTG_STATE_B_PERIPHERAL) {
+		/* After charger detection, SDP/CDP is detected */
+		dev_dbg(pnw->dev, "ulpi_check health\n");
+		status = penwell_otg_ulpi_check();
+		if (status) {
+			/* After phy rst then restart peripheral stack */
+			if (iotg->stop_peripheral)
+				iotg->stop_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+					"client driver not support\n");
+
+			if (iotg->start_peripheral)
+				iotg->start_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+					"client driver not support\n");
+		}
+	}
+
+	pm_runtime_put(pnw->dev);
+}
+
+static void penwell_otg_uevent_work(struct work_struct *work)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	char *uevent_envp[2] = { "USB_INTR=BOGUS", NULL };
+
+	dev_info(pnw->dev, "%s: send uevent USB_INTR=BOGUS\n", __func__);
+	kobject_uevent_env(&pnw->dev->kobj, KOBJ_CHANGE, uevent_envp);
+}
+
+static void penwell_otg_ulpi_poll_work(struct work_struct *work)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	u8				data;
+
+	if (iotg->otg.state != OTG_STATE_B_PERIPHERAL)
+		return;
+
+	if (iotg->hsm.in_test_mode)
+		return;
+
+	if (penwell_otg_ulpi_read(iotg, 0x16, &data)) {
+		dev_err(pnw->dev, "ulpi read time out by polling\n");
+		iotg->hsm.ulpi_error = 1;
+		iotg->hsm.ulpi_polling = 0;
+		penwell_update_transceiver();
+	} else if (data != 0x5A) {
+		dev_err(pnw->dev, "ulpi read value incorrect by polling\n");
+		iotg->hsm.ulpi_error = 1;
+		iotg->hsm.ulpi_polling = 0;
+		penwell_update_transceiver();
+	} else {
+		dev_dbg(pnw->dev, "ulpi fine by polling\n");
+		iotg->hsm.ulpi_error = 0;
+		iotg->hsm.ulpi_polling = 1;
+		penwell_otg_continue_ulpi_poll();
+	}
+}
+
+static void penwell_otg_psc_notify_work(struct work_struct *work)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	struct power_supply_cable_props	psc_cap;
+	enum power_supply_charger_event chrg_event;
+	unsigned long			flags;
+	struct otg_bc_event		*event, *temp;
+
+	spin_lock_irqsave(&pnw->charger_lock, flags);
+	list_for_each_entry_safe(event, temp, &pnw->chrg_evt_queue, node) {
+		list_del(&event->node);
+		spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+		spin_lock_irqsave(&pnw->cap_lock, flags);
+		chrg_event = check_psc_event(pnw->psc_cap, event->cap);
+		if (chrg_event == -1)
+			dev_dbg(pnw->dev, "no need to notify\n");
+		else if (chrg_event == POWER_SUPPLY_CHARGER_EVENT_DISCONNECT) {
+			/* In Disconnect case, EM driver needs same chrg type
+			 * like Connect even, construct one here */
+			psc_cap = event->cap;
+			psc_cap.chrg_evt = chrg_event;
+			psc_cap.chrg_type = pnw->psc_cap.chrg_type;
+			pnw->psc_cap = event->cap;
+			pnw->psc_cap.chrg_evt = chrg_event;
+		} else {
+			pnw->psc_cap = event->cap;
+			pnw->psc_cap.chrg_evt = chrg_event;
+			psc_cap = pnw->psc_cap;
+		}
+		spin_unlock_irqrestore(&pnw->cap_lock, flags);
+
+		if (chrg_event != -1) {
+			dev_dbg(pnw->dev, "ma = %d, evt = %d, type = %s\n",
+				psc_cap.ma, psc_cap.chrg_evt,
+				psc_string(psc_cap.chrg_type));
+
+			atomic_notifier_call_chain(&iotg->otg.notifier,
+					USB_EVENT_CHARGER, &psc_cap);
+		}
+
+		kfree(event);
+		spin_lock_irqsave(&pnw->charger_lock, flags);
+	}
+	spin_unlock_irqrestore(&pnw->charger_lock, flags);
+}
+
+static void penwell_otg_sdp_check_work(struct work_struct *work)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct otg_bc_cap		cap;
+
+	/* Need to handle MFLD/CLV differently per different interface */
+	if (!is_clovertrail(to_pci_dev(pnw->dev))) {
+		if (penwell_otg_query_charging_cap(&cap)) {
+			dev_warn(pnw->dev, "SDP checking failed\n");
+			return;
+		}
+
+		/* If current charging cap is still 100ma SDP,
+		 * assume this is a invalid charger and do 500ma
+		 * charging */
+		if (cap.ma != 100 || cap.chrg_type != CHRG_SDP)
+			return;
+	} else
+		return;
+
+	dev_info(pnw->dev, "Notify invalid SDP at %dma\n", CHRG_CURR_SDP_INVAL);
+	penwell_otg_update_chrg_cap(CHRG_SDP_INVAL, CHRG_CURR_SDP_INVAL);
+}
+
+static void penwell_otg_work(struct work_struct *work)
+{
+	struct penwell_otg			*pnw = container_of(work,
+						struct penwell_otg, work);
+	struct intel_mid_otg_xceiv		*iotg = &pnw->iotg;
+	struct otg_hsm				*hsm = &iotg->hsm;
+	enum usb_charger_type			charger_type;
+	enum power_supply_charger_cable_type	type;
+	int					retval;
+	struct pci_dev				*pdev;
+	unsigned long				flags;
+
+	dev_dbg(pnw->dev,
+		"old state = %s\n", state_string(iotg->otg.state));
+
+	pm_runtime_get_sync(pnw->dev);
+
+	pdev = to_pci_dev(pnw->dev);
+
+	switch (iotg->otg.state) {
+	case OTG_STATE_UNDEFINED:
+	case OTG_STATE_B_IDLE:
+		if (hsm->id == ID_A || hsm->id == ID_ACA_A) {
+			/* Move to A_IDLE state, ID changes */
+
+			/* Delete current timer */
+			penwell_otg_del_timer(TB_SRP_FAIL_TMR);
+
+			iotg->otg.otg->default_a = 1;
+			hsm->a_srp_det = 0;
+			set_host_mode();
+			penwell_otg_phy_low_power(0);
+
+			/* Always set a_bus_req to 1, in case no ADP */
+			hsm->a_bus_req = 1;
+
+			/* Prevent device enter D0i1 or S3*/
+			wake_lock(&pnw->wake_lock);
+			pm_runtime_get(pnw->dev);
+
+			iotg->otg.state = OTG_STATE_A_IDLE;
+			penwell_update_transceiver();
+		} else if (hsm->b_adp_sense_tmout) {
+			hsm->b_adp_sense_tmout = 0;
+		} else if (hsm->b_srp_fail_tmout) {
+			hsm->b_srp_fail_tmr = 0;
+			hsm->b_srp_fail_tmout = 0;
+			hsm->b_bus_req = 0;
+			penwell_otg_nsf_msg(6);
+
+			penwell_update_transceiver();
+		} else if (hsm->b_sess_vld) {
+			/* Check if DCP is detected */
+			spin_lock_irqsave(&pnw->charger_lock, flags);
+			charger_type = pnw->charging_cap.chrg_type;
+			type = pnw->psc_cap.chrg_type;
+			if (charger_type == CHRG_DCP ||
+				type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP) {
+				spin_unlock_irqrestore(&pnw->charger_lock,
+						flags);
+				break;
+			}
+			spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+			penwell_otg_phy_low_power(0);
+
+			/* Clear power_up */
+			hsm->power_up = 0;
+
+			/* Move to B_PERIPHERAL state, Session Valid */
+
+			/* Delete current timer */
+			penwell_otg_del_timer(TB_SRP_FAIL_TMR);
+
+			hsm->b_sess_end = 0;
+			hsm->a_bus_suspend = 0;
+
+			/* Start USB Battery charger detection flow */
+
+			/* We need new charger detection flow for Clovertrail.
+			 * But for now(power-on), we just skip it.
+			 * Later on we'll figure it out.
+			 */
+			if (!is_clovertrail(pdev)) {
+				mutex_lock(&pnw->msic_mutex);
+				if (pdev->revision >= 0x8) {
+					retval = penwell_otg_manual_chrg_det();
+					if (retval < 0) {
+						/* if failed, reset controller
+						 * and try charger detection
+						 * flow again */
+						dev_warn(pnw->dev,
+								"detection failed, retry");
+						set_client_mode();
+						msleep(100);
+						penwell_otg_phy_low_power(0);
+						penwell_spi_reset_phy();
+					retval = penwell_otg_manual_chrg_det();
+					}
+				} else {
+					/* Enable data contact detection */
+					penwell_otg_data_contact_detect();
+					/* Enable charger detection */
+					penwell_otg_charger_detect();
+					retval =
+					penwell_otg_charger_type_detect();
+				}
+				mutex_unlock(&pnw->msic_mutex);
+				if (retval < 0) {
+					dev_warn(pnw->dev, "Charger detect failure\n");
+					break;
+				} else {
+					charger_type = retval;
+				}
+			} else {
+				/* Clovertrail charger detection flow */
+				retval = penwell_otg_charger_det_clt();
+				if (retval < 0) {
+					dev_warn(pnw->dev, "detect failed\n");
+					/* Reset PHY and redo the detection */
+					pnw_phy_ctrl_rst();
+					/* Restart charger detection */
+					retval = penwell_otg_charger_det_clt();
+					if (retval)
+						dev_warn(pnw->dev,
+							"detect fail again\n");
+					break;
+				} else
+					charger_type = retval;
+			}
+
+			/* This is a workaround for self-powered hub case,
+			 * vbus valid event comes several ms before id change */
+			if (hsm->id == ID_A) {
+				dev_warn(pnw->dev, "ID changed\n");
+				break;
+			}
+
+			if (charger_type == CHRG_SE1) {
+				dev_info(pnw->dev, "SE1 detected\n");
+
+				/* SE1: set charger type, current, notify EM */
+				penwell_otg_update_chrg_cap(CHRG_SE1,
+							CHRG_CURR_SE1);
+				dev_info(pnw->dev,
+					"reset PHY via SPI if SE1 detected\n");
+
+				if (!is_clovertrail(pdev)) {
+					/* Reset PHY for MFLD only */
+					penwell_otg_msic_spi_access(true);
+					penwell_otg_msic_write(MSIC_FUNCTRLSET,
+							PHYRESET);
+					penwell_otg_msic_spi_access(false);
+				}
+				break;
+			} else if (charger_type == CHRG_DCP) {
+				dev_info(pnw->dev, "DCP detected\n");
+
+				/* DCP: set charger type, current, notify EM */
+				penwell_otg_update_chrg_cap(CHRG_DCP,
+							CHRG_CURR_DCP);
+				set_client_mode();
+				break;
+
+			} else if (charger_type == CHRG_ACA) {
+				dev_info(pnw->dev, "ACA detected\n");
+				if (hsm->id == ID_ACA_A) {
+					/* Move to A_IDLE state, ID changes */
+					penwell_otg_update_chrg_cap(CHRG_ACA,
+								CHRG_CURR_ACA);
+
+					/* Delete current timer */
+					penwell_otg_del_timer(TB_SRP_FAIL_TMR);
+
+					iotg->otg.otg->default_a = 1;
+					hsm->a_srp_det = 0;
+					set_host_mode();
+					penwell_otg_phy_low_power(0);
+
+					/* Always set a_bus_req to 1,
+					 * in case no ADP */
+					hsm->a_bus_req = 1;
+
+					/* Prevent device enter D0i1 or S3*/
+					wake_lock(&pnw->wake_lock);
+					pm_runtime_get(pnw->dev);
+
+					iotg->otg.state = OTG_STATE_A_IDLE;
+					penwell_update_transceiver();
+					break;
+				} else if (hsm->id == ID_ACA_B) {
+					penwell_otg_update_chrg_cap(CHRG_ACA,
+								CHRG_CURR_ACA);
+					break;
+				} else if (hsm->id == ID_ACA_C) {
+					penwell_otg_update_chrg_cap(CHRG_ACA,
+								CHRG_CURR_ACA);
+					/* Clear HNP polling flag */
+					if (iotg->otg.otg->gadget)
+						iotg->otg.otg->gadget->
+							host_request_flag = 0;
+
+					penwell_otg_phy_low_power(0);
+					set_client_mode();
+
+					if (iotg->start_peripheral) {
+						iotg->start_peripheral(iotg);
+					} else {
+						dev_dbg(pnw->dev,
+							"client driver not support\n");
+						break;
+					}
+				}
+			} else if (charger_type == CHRG_CDP) {
+				dev_info(pnw->dev, "CDP detected\n");
+
+				/* MFLD WA: MSIC issue need disable phy intr */
+				if (!is_clovertrail(pdev)) {
+					dev_dbg(pnw->dev,
+						"MFLD WA: enable PHY int\n");
+					penwell_otg_phy_intr(0);
+				}
+
+				/* CDP: set charger type, current, notify EM */
+				penwell_otg_update_chrg_cap(CHRG_CDP,
+							CHRG_CURR_CDP);
+
+				/* Clear HNP polling flag */
+				if (iotg->otg.otg->gadget)
+					iotg->otg.otg->gadget->
+							host_request_flag = 0;
+
+				if (iotg->start_peripheral) {
+					iotg->start_peripheral(iotg);
+				} else {
+					dev_dbg(pnw->dev,
+						"client driver not support\n");
+					break;
+				}
+			} else if (charger_type == CHRG_SDP) {
+				dev_info(pnw->dev, "SDP detected\n");
+
+				/* MFLD WA: MSIC issue need disable phy intr */
+				if (!is_clovertrail(pdev)) {
+					dev_dbg(pnw->dev,
+						"MFLD WA: enable PHY int\n");
+					penwell_otg_phy_intr(0);
+				}
+
+				/* SDP: set charger type and 100ma by default */
+				penwell_otg_update_chrg_cap(CHRG_SDP, 100);
+
+				/* Clear HNP polling flag */
+				if (iotg->otg.otg->gadget)
+					iotg->otg.otg->gadget->
+							host_request_flag = 0;
+
+				penwell_otg_phy_low_power(0);
+				set_client_mode();
+
+				if (iotg->start_peripheral) {
+					iotg->start_peripheral(iotg);
+				} else {
+					dev_dbg(pnw->dev,
+						"client driver not support\n");
+					break;
+				}
+
+				/* Schedule the SDP checking after TIMEOUT */
+				queue_delayed_work(pnw->qwork,
+						&pnw->sdp_check_work,
+						INVALID_SDP_TIMEOUT);
+			} else if (charger_type == CHRG_UNKNOWN) {
+				dev_info(pnw->dev, "Unknown Charger Found\n");
+
+				/* Unknown: set charger type */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN, 0);
+			}
+
+			penwell_otg_eye_diagram_optimize();
+
+			/* MFLD WA for PHY issue */
+			iotg->hsm.in_test_mode = 0;
+			iotg->hsm.ulpi_error = 0;
+
+			if (!is_clovertrail(pdev))
+				penwell_otg_start_ulpi_poll();
+
+			iotg->otg.state = OTG_STATE_B_PERIPHERAL;
+
+		} else if ((hsm->b_bus_req || hsm->power_up || hsm->adp_change
+				|| hsm->otg_srp_reqd) && !hsm->b_srp_fail_tmr) {
+
+			penwell_otg_mon_bus();
+
+			if (hsm->b_ssend_srp && hsm->b_se0_srp) {
+
+				hsm->power_up = 0;
+				hsm->adp_change = 0;
+
+				/* clear the PHCD before start srp */
+				penwell_otg_phy_low_power(0);
+
+				/* Start SRP */
+				if (pnw->iotg.otg.otg->start_srp)
+					pnw->iotg.otg.otg->start_srp(
+							pnw->iotg.otg.otg);
+				penwell_otg_add_timer(TB_SRP_FAIL_TMR);
+
+			} else {
+				hsm->b_bus_req = 0;
+				dev_info(pnw->dev,
+					"BUS is active, try SRP later\n");
+			}
+
+			/* clear after SRP attemp */
+			if (hsm->otg_srp_reqd) {
+				dev_dbg(pnw->dev, "Test mode: SRP done\n");
+				hsm->otg_srp_reqd = 0;
+			}
+		} else if (!hsm->b_sess_vld && hsm->id == ID_B) {
+			spin_lock_irqsave(&pnw->charger_lock, flags);
+			charger_type = pnw->charging_cap.chrg_type;
+			type = pnw->psc_cap.chrg_type;
+			spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+			if (charger_type == CHRG_DCP) {
+				/* Notify EM charger remove event */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+						CHRG_CURR_DISCONN);
+
+				retval = penwell_otg_ulpi_write(iotg,
+						ULPI_PWRCTRLCLR, DPVSRCEN);
+				if (retval)
+					dev_warn(pnw->dev, "ulpi failed\n");
+				penwell_otg_charger_hwdet(false);
+			} else if (charger_type == CHRG_SE1) {
+				/* Notify EM charger remove event */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+						CHRG_CURR_DISCONN);
+
+				/* WA: on SE1 detach, reset PHY over SPI */
+				dev_info(pnw->dev,
+					"reset PHY over SPI if SE1 detached\n");
+				penwell_otg_msic_spi_access(true);
+				penwell_otg_msic_write(MSIC_FUNCTRLSET,
+							PHYRESET);
+				penwell_otg_msic_spi_access(false);
+			} else if (type == POWER_SUPPLY_CHARGER_TYPE_USB_ACA) {
+				/* Notify EM charger remove event */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+						CHRG_CURR_DISCONN);
+				penwell_otg_charger_hwdet(false);
+			} else if (type == POWER_SUPPLY_CHARGER_TYPE_USB_DCP) {
+				/* Notify EM charger remove event */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+						CHRG_CURR_DISCONN);
+
+				retval = penwell_otg_ulpi_write(iotg,
+						ULPI_PWRCTRLCLR, DPVSRCEN);
+				if (retval)
+					dev_warn(pnw->dev, "ulpi failed\n");
+				penwell_otg_charger_hwdet(false);
+			} else if (type == POWER_SUPPLY_CHARGER_TYPE_SE1) {
+				/* Notify EM charger remove event */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+						CHRG_CURR_DISCONN);
+			}
+		}
+		break;
+
+	case OTG_STATE_B_PERIPHERAL:
+		/* FIXME: Check if ID_ACA_A event will happened in this state */
+		if (hsm->id == ID_A) {
+			iotg->otg.otg->default_a = 1;
+			hsm->a_srp_det = 0;
+
+			cancel_delayed_work_sync(&pnw->ulpi_poll_work);
+			cancel_delayed_work_sync(&pnw->sdp_check_work);
+			penwell_otg_charger_hwdet(false);
+
+			if (iotg->stop_peripheral)
+				iotg->stop_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+					"client driver has been removed.\n");
+
+			set_host_mode();
+
+			/* Always set a_bus_req to 1, in case no ADP */
+			hsm->a_bus_req = 1;
+
+			/* Notify EM charger remove event */
+			penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+						CHRG_CURR_DISCONN);
+
+			/* Prevent device enter D0i1 or S3*/
+			wake_lock(&pnw->wake_lock);
+			pm_runtime_get(pnw->dev);
+
+			iotg->otg.state = OTG_STATE_A_IDLE;
+			penwell_update_transceiver();
+		} else if (hsm->ulpi_error && !hsm->in_test_mode) {
+			/* WA: try to recover once detected PHY issue */
+			hsm->ulpi_error = 0;
+
+			cancel_delayed_work_sync(&pnw->ulpi_poll_work);
+			cancel_delayed_work_sync(&pnw->sdp_check_work);
+
+			if (iotg->stop_peripheral)
+				iotg->stop_peripheral(iotg);
+
+			msleep(2000);
+
+			if (iotg->start_peripheral)
+				iotg->start_peripheral(iotg);
+
+			if (!is_clovertrail(pdev))
+				penwell_otg_start_ulpi_poll();
+
+		} else if (!hsm->b_sess_vld || hsm->id == ID_ACA_B) {
+			/* Move to B_IDLE state, VBUS off/ACA */
+
+			cancel_delayed_work(&pnw->ulpi_poll_work);
+			cancel_delayed_work_sync(&pnw->sdp_check_work);
+
+			hsm->b_bus_req = 0;
+
+			if (is_clovertrail(pdev)) {
+				queue_delayed_work(pnw->qwork,
+					&pnw->ulpi_check_work, HZ);
+			}
+
+			if (iotg->stop_peripheral)
+				iotg->stop_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+					"client driver has been removed.\n");
+
+			/* MFLD WA: reenable it for unplug event */
+			if (!is_clovertrail(pdev)) {
+				dev_dbg(pnw->dev, "MFLD WA: disable PHY int\n");
+				penwell_otg_phy_intr(1);
+			}
+
+			if (hsm->id == ID_ACA_B)
+				penwell_otg_update_chrg_cap(CHRG_ACA,
+							CHRG_CURR_ACA);
+			else if (hsm->id == ID_B) {
+				/* Notify EM charger remove event */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+							CHRG_CURR_DISCONN);
+				penwell_otg_charger_hwdet(false);
+			}
+
+				iotg->otg.state = OTG_STATE_B_IDLE;
+		} else if (hsm->b_bus_req && hsm->a_bus_suspend
+				&& iotg->otg.otg->gadget
+				&& iotg->otg.otg->gadget->b_hnp_enable) {
+
+			penwell_otg_phy_low_power(0);
+			msleep(10);
+
+			if (iotg->stop_peripheral)
+				iotg->stop_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+					"client driver has been removed.\n");
+
+			penwell_otg_phy_low_power(0);
+
+			hsm->a_conn = 0;
+			hsm->a_bus_resume = 0;
+
+			if (iotg->start_host) {
+				iotg->start_host(iotg);
+				hsm->test_device = 0;
+
+				/* FIXME: can we allow D3 and D0i3
+				* in B_WAIT_ACON?
+				* Now just disallow it
+				*/
+				/* disallow D3 or D0i3 */
+				pm_runtime_get(pnw->dev);
+				wake_lock(&pnw->wake_lock);
+				iotg->otg.state = OTG_STATE_B_WAIT_ACON;
+				penwell_otg_add_timer(TB_ASE0_BRST_TMR);
+			} else
+				dev_dbg(pnw->dev, "host driver not loaded.\n");
+
+		} else if (hsm->id == ID_ACA_C) {
+			cancel_delayed_work_sync(&pnw->sdp_check_work);
+
+			/* Make sure current limit updated */
+			penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+		} else if (hsm->id == ID_B) {
+			spin_lock_irqsave(&pnw->charger_lock, flags);
+			type = pnw->psc_cap.chrg_type;
+			spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+			if (type == POWER_SUPPLY_CHARGER_TYPE_USB_ACA) {
+				/* Notify EM charger ACA removal event */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+						CHRG_CURR_DISCONN);
+				penwell_otg_charger_hwdet(false);
+				/* Set current when switch from ACA to SDP */
+				if (!hsm->a_bus_suspend && iotg->otg.set_power)
+					iotg->otg.set_power(&iotg->otg, 500);
+			}
+		}
+		break;
+
+	case OTG_STATE_B_WAIT_ACON:
+		if (hsm->id == ID_A) {
+			/* Move to A_IDLE state, ID changes */
+
+			/* Delete current timer */
+			penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+			iotg->otg.otg->default_a = 1;
+			hsm->a_srp_det = 0;
+
+			penwell_otg_HAAR(0);
+
+			PNW_STOP_HOST(pnw);
+
+			set_host_mode();
+
+			/* Always set a_bus_req to 1, in case no ADP */
+			iotg->hsm.a_bus_req = 1;
+
+			iotg->otg.state = OTG_STATE_A_IDLE;
+			penwell_update_transceiver();
+		} else if (!hsm->b_sess_vld || hsm->id == ID_ACA_B) {
+			/* Move to B_IDLE state, VBUS off/ACA */
+
+			if (hsm->id == ID_ACA_B)
+				penwell_otg_update_chrg_cap(CHRG_ACA,
+							CHRG_CURR_ACA);
+			else if (hsm->id == ID_B) {
+				/* Notify EM charger remove event */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+							CHRG_CURR_DISCONN);
+			}
+
+			/* Delete current timer */
+			penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+			hsm->b_hnp_enable = 0;
+			hsm->b_bus_req = 0;
+			penwell_otg_HAAR(0);
+
+			PNW_STOP_HOST(pnw);
+
+			set_client_mode();
+
+			/* allow D3 and D0i3 */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_B_IDLE;
+		} else if (hsm->a_conn) {
+			/* Move to B_HOST state, A connected */
+
+			/* Delete current timer */
+			penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+			penwell_otg_HAAR(0);
+
+			iotg->otg.state = OTG_STATE_B_HOST;
+			penwell_update_transceiver();
+		} else if (hsm->a_bus_resume || hsm->b_ase0_brst_tmout) {
+			/* Move to B_HOST state, A connected */
+
+			/* Delete current timer */
+			penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+			penwell_otg_HAAR(0);
+			penwell_otg_nsf_msg(7);
+
+			PNW_STOP_HOST(pnw);
+
+			hsm->a_bus_suspend = 0;
+			hsm->b_bus_req = 0;
+
+			if (iotg->start_peripheral)
+				iotg->start_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev, "client driver not loaded\n");
+
+			/* allow D3 and D0i3 in A_WAIT_BCON */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_B_PERIPHERAL;
+		} else if (hsm->id == ID_ACA_C) {
+			/* Make sure current limit updated */
+			penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+		} else if (hsm->id == ID_B) {
+#if 0
+			/* only set 2ma due to client function stopped */
+			if (iotg->otg.set_power)
+				iotg->otg.set_power(&iotg->otg, 2);
+#endif
+		}
+		break;
+
+	case OTG_STATE_B_HOST:
+		if (hsm->id == ID_A) {
+			iotg->otg.otg->default_a = 1;
+			hsm->a_srp_det = 0;
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			PNW_STOP_HOST(pnw);
+
+			set_host_mode();
+
+			/* Always set a_bus_req to 1, in case no ADP */
+			hsm->a_bus_req = 1;
+
+			iotg->otg.state = OTG_STATE_A_IDLE;
+			penwell_update_transceiver();
+		} else if (!hsm->b_sess_vld || hsm->id == ID_ACA_B) {
+			/* Move to B_IDLE state, VBUS off/ACA */
+
+			if (hsm->id == ID_ACA_B)
+				penwell_otg_update_chrg_cap(CHRG_ACA,
+							CHRG_CURR_ACA);
+			else if (hsm->id == ID_B) {
+				/* Notify EM charger remove event */
+				penwell_otg_update_chrg_cap(CHRG_UNKNOWN,
+							CHRG_CURR_DISCONN);
+			}
+
+			hsm->b_hnp_enable = 0;
+			hsm->b_bus_req = 0;
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			PNW_STOP_HOST(pnw);
+
+			set_client_mode();
+
+			/* allow D3 and D0i3 in A_WAIT_BCON */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_B_IDLE;
+		} else if (!hsm->b_bus_req || !hsm->a_conn
+					|| hsm->test_device) {
+			hsm->b_bus_req = 0;
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			PNW_STOP_HOST(pnw);
+			hsm->a_bus_suspend = 0;
+
+			/* Clear HNP polling flag */
+			if (iotg->otg.otg->gadget)
+				iotg->otg.otg->gadget->host_request_flag = 0;
+
+			if (iotg->start_peripheral)
+				iotg->start_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+						"client driver not loaded.\n");
+
+			/* allow D3 and D0i3 in A_WAIT_BCON */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_B_PERIPHERAL;
+		} else if (hsm->id == ID_ACA_C) {
+			/* Make sure current limit updated */
+			penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+		}
+		break;
+
+	case OTG_STATE_A_IDLE:
+		if (hsm->id == ID_B || hsm->id == ID_ACA_B) {
+			pnw->iotg.otg.otg->default_a = 0;
+			hsm->b_bus_req = 0;
+
+			if (hsm->id == ID_ACA_B)
+				penwell_otg_update_chrg_cap(CHRG_ACA,
+							CHRG_CURR_ACA);
+
+			hsm->b_bus_req = 0;
+
+			set_client_mode();
+
+			iotg->otg.state = OTG_STATE_B_IDLE;
+			penwell_update_transceiver();
+
+			/* Decrement the device usage counter */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+		} else if (hsm->id == ID_ACA_A) {
+
+			penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+			if (hsm->power_up)
+				hsm->power_up = 0;
+
+			if (hsm->adp_change)
+				hsm->adp_change = 0;
+
+			if (hsm->a_srp_det)
+				hsm->a_srp_det = 0;
+
+			hsm->b_conn = 0;
+			hsm->hnp_poll_enable = 0;
+
+			if (iotg->start_host)
+				iotg->start_host(iotg);
+			else {
+				dev_dbg(pnw->dev, "host driver not loaded.\n");
+				break;
+			}
+
+			/* allow D3 and D0i3 in A_WAIT_BCON */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+
+			iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+
+		} else if (!hsm->a_bus_drop && (hsm->power_up || hsm->a_bus_req
+				|| hsm->a_srp_det || hsm->adp_change)) {
+			/* power up / adp changes / srp detection should be
+			 * cleared at once after handled. */
+			if (hsm->power_up)
+				hsm->power_up = 0;
+
+			if (hsm->adp_change)
+				hsm->adp_change = 0;
+
+			if (hsm->a_srp_det) {
+				hsm->a_srp_det = 0;
+				/* wait SRP done, then enable VBUS */
+				usleep_range(10000, 11000);
+			}
+
+			otg_set_vbus(iotg->otg.otg, true);
+
+			penwell_otg_add_timer(TA_WAIT_VRISE_TMR);
+
+			iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
+
+			penwell_update_transceiver();
+		} else if (hsm->b_sess_end || hsm->a_sess_vld ||
+				hsm->a_srp_det || !hsm->b_sess_vld) {
+			hsm->a_srp_det = 0;
+			dev_dbg(pnw->dev, "reconfig...\n");
+		}
+		break;
+
+	case OTG_STATE_A_WAIT_VRISE:
+		if (hsm->a_bus_drop ||
+				hsm->id == ID_B || hsm->id == ID_ACA_B) {
+			/* Move to A_WAIT_VFALL, over current/user request */
+
+			/* Delete current timer */
+			penwell_otg_del_timer(TA_WAIT_VRISE_TMR);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+
+			penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+			iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+		} else if (hsm->a_vbus_vld || hsm->a_wait_vrise_tmout
+						|| hsm->id == ID_ACA_A) {
+			/* Move to A_WAIT_BCON state, a vbus vld */
+			/* Delete current timer and clear flags */
+			penwell_otg_del_timer(TA_WAIT_VRISE_TMR);
+
+			if (!hsm->a_vbus_vld) {
+				dev_warn(pnw->dev, "vbus can't rise to vbus vld, overcurrent!\n");
+				/* Turn off VBUS */
+				otg_set_vbus(iotg->otg.otg, false);
+
+				penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+				iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+				break;
+			}
+
+			if (hsm->id == ID_ACA_A) {
+				otg_set_vbus(iotg->otg.otg, false);
+
+				penwell_otg_update_chrg_cap(CHRG_ACA,
+							CHRG_CURR_ACA);
+			}
+
+			hsm->a_bus_req = 1;
+			hsm->b_conn = 0;
+			hsm->hnp_poll_enable = 0;
+
+			penwell_otg_eye_diagram_optimize();
+
+			if (iotg->start_host) {
+				dev_dbg(pnw->dev, "host_ops registered!\n");
+				iotg->start_host(iotg);
+			} else {
+				dev_dbg(pnw->dev, "host driver not loaded.\n");
+				break;
+			}
+
+			penwell_otg_add_timer(TA_WAIT_BCON_TMR);
+
+			/* allow D3 and D0i3 in A_WAIT_BCON */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+			/* at least give some time to USB HOST to enumerate
+			* devices before trying to suspend the system*/
+			wake_lock_timeout(&pnw->wake_lock, 5 * HZ);
+
+			iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+		}
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+		if (hsm->id == ID_B || hsm->id == ID_ACA_B || hsm->a_bus_drop ||
+			hsm->a_wait_bcon_tmout) {
+			/* Move to A_WAIT_VFALL state, user request */
+
+			/* Delete current timer and clear flags for B-Device */
+			penwell_otg_del_timer(TA_WAIT_BCON_TMR);
+
+			hsm->b_bus_req = 0;
+
+			PNW_STOP_HOST(pnw);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+
+			penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+
+			/* disallow D3 or D0i3 */
+			pm_runtime_get(pnw->dev);
+			wake_lock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+		} else if (!hsm->a_vbus_vld) {
+			/* Move to A_VBUS_ERR state, over-current detected */
+
+			/* CTP SW Workaround, add 300ms debouce on VBUS drop */
+			if (is_clovertrail(pdev)) {
+				msleep(300);
+				if (hsm->a_vbus_vld)
+					break;
+			}
+
+			/* Notify user space for vbus invalid event */
+			penwell_otg_notify_warning(USB_WARNING_VBUS_INVALID);
+
+			/* Delete current timer and disable host function */
+			penwell_otg_del_timer(TA_WAIT_BCON_TMR);
+
+			PNW_STOP_HOST(pnw);
+
+			/* Turn off VBUS and enter PHY low power mode */
+			otg_set_vbus(iotg->otg.otg, false);
+
+			/* disallow D3 or D0i3 */
+			pm_runtime_get(pnw->dev);
+			wake_lock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_VBUS_ERR;
+		} else if (hsm->b_conn) {
+			/* Move to A_HOST state, device connected */
+
+			/* Delete current timer and disable host function */
+			penwell_otg_del_timer(TA_WAIT_BCON_TMR);
+
+			/* Start HNP polling */
+			if (iotg->start_hnp_poll)
+				iotg->start_hnp_poll(iotg);
+
+			if (!hsm->a_bus_req)
+				hsm->a_bus_req = 1;
+
+			if (hsm->test_device)
+				penwell_otg_add_timer(TTST_MAINT_TMR);
+
+			iotg->otg.state = OTG_STATE_A_HOST;
+		} else if (hsm->id == ID_ACA_A) {
+			penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+		}
+		break;
+
+	case OTG_STATE_A_HOST:
+		if (hsm->id == ID_B || hsm->id == ID_ACA_B || hsm->a_bus_drop) {
+			/* Move to A_WAIT_VFALL state, timeout/user request */
+
+			/* Delete current timer and clear flags */
+			if (hsm->test_device) {
+				hsm->test_device = 0;
+				penwell_otg_del_timer(TTST_MAINT_TMR);
+			}
+
+			if (hsm->id == ID_ACA_B)
+				penwell_otg_update_chrg_cap(CHRG_ACA,
+							CHRG_CURR_ACA);
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			penwell_otg_phy_low_power(0);
+
+			PNW_STOP_HOST(pnw);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+
+			penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+
+			/* disallow D3 or D0i3 */
+			pm_runtime_get(pnw->dev);
+			wake_lock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+		} else if (hsm->test_device && hsm->tst_maint_tmout) {
+
+			hsm->test_device = 0;
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			penwell_otg_phy_low_power(0);
+
+			PNW_STOP_HOST(pnw);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+
+			/* Clear states and wait for SRP */
+			hsm->a_srp_det = 0;
+			hsm->a_bus_req = 0;
+
+			/* disallow D3 or D0i3 */
+			pm_runtime_get(pnw->dev);
+			wake_lock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_IDLE;
+		} else if (!hsm->a_vbus_vld) {
+			/* Move to A_VBUS_ERR state */
+
+			/* CTP SW Workaround, add 300ms debouce on VBUS drop */
+			if (is_clovertrail(pdev)) {
+				msleep(300);
+				if (hsm->a_vbus_vld)
+					break;
+			}
+
+			/* Notify user space for vbus invalid event */
+			penwell_otg_notify_warning(USB_WARNING_VBUS_INVALID);
+
+			/* Delete current timer and clear flags */
+			if (hsm->test_device) {
+				hsm->test_device = 0;
+				penwell_otg_del_timer(TTST_MAINT_TMR);
+			}
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			PNW_STOP_HOST(pnw);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+
+			/* disallow D3 or D0i3 */
+			pm_runtime_get(pnw->dev);
+			wake_lock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_VBUS_ERR;
+		} else if (!hsm->a_bus_req &&
+			   iotg->otg.otg->host->b_hnp_enable) {
+			/* Move to A_SUSPEND state */
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			/* According to Spec 7.1.5 */
+			penwell_otg_add_timer(TA_AIDL_BDIS_TMR);
+
+			/* Set HABA to enable hardware assistance to
+			 * signal A-connect after receiver B-disconnect
+			 * Hardware will then set client mode and
+			 * enable URE, SLE and PCE after the assistance
+			 * otg_dummy_irq is used to clean these ints
+			 * when client driver is not resumed.
+			 */
+			if (request_irq(pdev->irq, otg_dummy_irq,
+					IRQF_SHARED, driver_name,
+					iotg->base) != 0) {
+					dev_dbg(pnw->dev,
+					"request interrupt %d failed\n",
+					pdev->irq);
+			}
+			penwell_otg_HABA(1);
+
+			penwell_otg_loc_sof(0);
+			penwell_otg_phy_low_power(0);
+
+			/* disallow D3 or D0i3 */
+			pm_runtime_get(pnw->dev);
+			wake_lock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_SUSPEND;
+		} else if (!hsm->b_conn && hsm->test_device
+						&& hsm->otg_vbus_off) {
+			/* If it is a test device with otg_vbus_off bit set,
+			 * turn off VBUS on disconnect event and stay for
+			 * TTST_NOADP without ADP */
+
+			penwell_otg_del_timer(TTST_MAINT_TMR);
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			penwell_otg_phy_low_power(0);
+
+			PNW_STOP_HOST(pnw);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+
+			penwell_otg_add_timer(TTST_NOADP_TMR);
+
+			/* disallow D3 or D0i3 */
+			pm_runtime_get(pnw->dev);
+			wake_lock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+
+		} else if (!hsm->b_conn) {
+
+			/* Delete current timer and clear flags */
+			if (hsm->test_device) {
+				hsm->test_device = 0;
+				penwell_otg_del_timer(TTST_MAINT_TMR);
+			}
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			/* add kernel timer */
+			iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+		} else if (hsm->id == ID_ACA_A) {
+			penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+		} else if (hsm->id == ID_A) {
+			/* Turn on VBUS */
+			otg_set_vbus(iotg->otg.otg, true);
+		}
+		break;
+
+	case OTG_STATE_A_SUSPEND:
+		if (hsm->id == ID_B || hsm->id == ID_ACA_B ||
+				hsm->a_bus_drop || hsm->a_aidl_bdis_tmout) {
+			/* Move to A_WAIT_VFALL state, timeout/user request */
+			penwell_otg_HABA(0);
+			free_irq(pdev->irq, iotg->base);
+
+			/* Delete current timer and clear HW assist */
+			if (hsm->a_aidl_bdis_tmout)
+				hsm->a_aidl_bdis_tmout = 0;
+			penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+
+			if (hsm->id == ID_ACA_B)
+				penwell_otg_update_chrg_cap(CHRG_ACA,
+							CHRG_CURR_ACA);
+
+			/* Stop HNP polling */
+			if (iotg->stop_hnp_poll)
+				iotg->stop_hnp_poll(iotg);
+
+			PNW_STOP_HOST(pnw);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+
+			penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+			iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+		} else if (!hsm->a_vbus_vld) {
+			/* Move to A_VBUS_ERR state, Over-current */
+			penwell_otg_HABA(0);
+			free_irq(pdev->irq, iotg->base);
+
+			/* Delete current timer and clear flags */
+			penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+
+			PNW_STOP_HOST(pnw);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+			iotg->otg.state = OTG_STATE_A_VBUS_ERR;
+		} else if (!hsm->b_conn &&
+			   !pnw->iotg.otg.otg->host->b_hnp_enable) {
+			/* Move to A_WAIT_BCON */
+
+			/* delete current timer */
+			penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+
+			/* add kernel timer */
+			penwell_otg_add_timer(TA_WAIT_BCON_TMR);
+
+			/* allow D3 and D0i3 in A_WAIT_BCON */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+		} else if (!hsm->b_conn &&
+			   pnw->iotg.otg.otg->host->b_hnp_enable) {
+			/* Move to A_PERIPHERAL state, HNP */
+			penwell_otg_HABA(0);
+			free_irq(pdev->irq, iotg->base);
+
+			/* Delete current timer and clear flags */
+			penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+			penwell_otg_phy_low_power(0);
+
+			PNW_STOP_HOST(pnw);
+
+			penwell_otg_phy_low_power(0);
+			hsm->b_bus_suspend = 0;
+
+			/* Clear HNP polling flag */
+			if (iotg->otg.otg->gadget)
+				iotg->otg.otg->gadget->host_request_flag = 0;
+
+			penwell_otg_phy_low_power(0);
+
+			if (iotg->start_peripheral)
+				iotg->start_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+						"client driver not loaded.\n");
+
+			penwell_otg_add_timer(TA_BIDL_ADIS_TMR);
+			iotg->otg.state = OTG_STATE_A_PERIPHERAL;
+		} else if (hsm->a_bus_req) {
+			/* Move to A_HOST state, user request */
+			penwell_otg_HABA(0);
+			free_irq(pdev->irq, iotg->base);
+
+			/* Delete current timer and clear flags */
+			penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+
+			penwell_otg_loc_sof(1);
+
+			/* Start HNP polling */
+			if (iotg->start_hnp_poll)
+				iotg->start_hnp_poll(iotg);
+
+			/* allow D3 and D0i3 in A_HOST */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_HOST;
+		} else if (hsm->id == ID_ACA_A) {
+			penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+		}
+		break;
+	case OTG_STATE_A_PERIPHERAL:
+		if (hsm->id == ID_B || hsm->a_bus_drop) {
+			/* Move to A_WAIT_VFALL state */
+
+			/* Delete current timer and clear flags */
+			penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
+
+			if (iotg->stop_peripheral)
+				iotg->stop_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+					"client driver has been removed.\n");
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+			set_host_mode();
+
+			penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+			iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+		} else if (!hsm->a_vbus_vld) {
+			/* Move to A_VBUS_ERR state, over-current detected */
+
+			/* Delete current timer and disable client function */
+			penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
+
+			if (iotg->stop_peripheral)
+				iotg->stop_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+					"client driver has been removed.\n");
+
+			/* Turn off the VBUS and enter PHY low power mode */
+			otg_set_vbus(iotg->otg.otg, false);
+
+			iotg->otg.state = OTG_STATE_A_VBUS_ERR;
+		} else if (hsm->a_bidl_adis_tmout) {
+			/* Move to A_WAIT_BCON state */
+			hsm->a_bidl_adis_tmr = 0;
+
+			msleep(10);
+			penwell_otg_phy_low_power(0);
+
+			/* Disable client function and switch to host mode */
+			if (iotg->stop_peripheral)
+				iotg->stop_peripheral(iotg);
+			else
+				dev_dbg(pnw->dev,
+					"client driver has been removed.\n");
+
+			hsm->hnp_poll_enable = 0;
+			hsm->b_conn = 0;
+
+			penwell_otg_phy_low_power(0);
+
+			if (iotg->start_host)
+				iotg->start_host(iotg);
+			else
+				dev_dbg(pnw->dev,
+						"host driver not loaded.\n");
+
+			penwell_otg_add_timer(TA_WAIT_BCON_TMR);
+
+			/* allow D3 and D0i3 in A_WAIT_BCON */
+			pm_runtime_put(pnw->dev);
+			wake_unlock(&pnw->wake_lock);
+			iotg->otg.state = OTG_STATE_A_WAIT_BCON;
+		} else if (hsm->id == ID_A && hsm->b_bus_suspend) {
+			if (!timer_pending(&pnw->hsm_timer))
+				penwell_otg_add_timer(TA_BIDL_ADIS_TMR);
+		} else if (hsm->id == ID_A && !hsm->b_bus_suspend) {
+			penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
+		} else if (hsm->id == ID_ACA_A) {
+			penwell_otg_update_chrg_cap(CHRG_ACA, CHRG_CURR_ACA);
+
+			/* Turn off VBUS */
+			otg_set_vbus(iotg->otg.otg, false);
+		}
+		break;
+	case OTG_STATE_A_VBUS_ERR:
+		if (hsm->id == ID_B || hsm->id == ID_ACA_B ||
+			hsm->id == ID_ACA_A || hsm->a_bus_drop ||
+						hsm->a_clr_err) {
+			if (hsm->a_clr_err)
+				hsm->a_clr_err = 0;
+
+			penwell_otg_add_timer(TA_WAIT_VFALL_TMR);
+			iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
+		}
+		break;
+	case OTG_STATE_A_WAIT_VFALL:
+		if (hsm->a_wait_vfall_tmout) {
+			hsm->a_srp_det = 0;
+			hsm->a_wait_vfall_tmout = 0;
+
+			/* Move to A_IDLE state, vbus falls */
+			/* Always set a_bus_req to 1, in case no ADP */
+			hsm->a_bus_req = 1;
+
+			iotg->otg.state = OTG_STATE_A_IDLE;
+			penwell_update_transceiver();
+		} else if (hsm->test_device && hsm->otg_vbus_off
+					&& hsm->tst_noadp_tmout) {
+			/* After noadp timeout, switch back to normal mode */
+			hsm->test_device = 0;
+			hsm->otg_vbus_off = 0;
+			hsm->tst_noadp_tmout = 0;
+
+			hsm->a_bus_req = 1;
+
+			iotg->otg.state = OTG_STATE_A_IDLE;
+			penwell_update_transceiver();
+		}
+		break;
+	default:
+		break;
+		;
+	}
+
+	pm_runtime_put_sync(pnw->dev);
+
+	dev_dbg(pnw->dev,
+			"new state = %s\n", state_string(iotg->otg.state));
+}
+
+static ssize_t
+show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	char			*next;
+	unsigned		size;
+	unsigned		t;
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	pm_runtime_get_sync(pnw->dev);
+
+	t = scnprintf(next, size,
+		"\n"
+		"USBCMD = 0x%08x\n"
+		"USBSTS = 0x%08x\n"
+		"USBINTR = 0x%08x\n"
+		"ASYNCLISTADDR = 0x%08x\n"
+		"PORTSC1 = 0x%08x\n"
+		"HOSTPC1 = 0x%08x\n"
+		"OTGSC = 0x%08x\n"
+		"USBMODE = 0x%08x\n",
+		readl(pnw->iotg.base + 0x30),
+		readl(pnw->iotg.base + 0x34),
+		readl(pnw->iotg.base + 0x38),
+		readl(pnw->iotg.base + 0x48),
+		readl(pnw->iotg.base + 0x74),
+		readl(pnw->iotg.base + 0xb4),
+		readl(pnw->iotg.base + 0xf4),
+		readl(pnw->iotg.base + 0xf8)
+		);
+
+	pm_runtime_put_sync(pnw->dev);
+
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
+
+static ssize_t
+show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	char				*next;
+	unsigned			size, t;
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	if (iotg->otg.otg->host)
+		iotg->hsm.a_set_b_hnp_en = iotg->otg.otg->host->b_hnp_enable;
+
+	if (iotg->otg.otg->gadget)
+		iotg->hsm.b_hnp_enable = iotg->otg.otg->gadget->b_hnp_enable;
+
+	t = scnprintf(next, size,
+		"\n"
+		"current state = %s\n"
+		"a_bus_resume = \t%d\n"
+		"a_bus_suspend = \t%d\n"
+		"a_conn = \t%d\n"
+		"a_sess_vld = \t%d\n"
+		"a_srp_det = \t%d\n"
+		"a_vbus_vld = \t%d\n"
+		"b_bus_suspend = \t%d\n"
+		"b_conn = \t%d\n"
+		"b_se0_srp = \t%d\n"
+		"b_ssend_srp = \t%d\n"
+		"b_sess_end = \t%d\n"
+		"b_sess_vld = \t%d\n"
+		"id = \t%d\n"
+		"power_up = \t%d\n"
+		"adp_change = \t%d\n"
+		"test_device = \t%d\n"
+		"a_set_b_hnp_en = \t%d\n"
+		"b_srp_done = \t%d\n"
+		"b_hnp_enable = \t%d\n"
+		"hnp_poll_enable = \t%d\n"
+		"a_wait_vrise_tmout = \t%d\n"
+		"a_wait_bcon_tmout = \t%d\n"
+		"a_aidl_bdis_tmout = \t%d\n"
+		"a_bidl_adis_tmout = \t%d\n"
+		"a_bidl_adis_tmr = \t%d\n"
+		"a_wait_vfall_tmout = \t%d\n"
+		"b_ase0_brst_tmout = \t%d\n"
+		"b_srp_fail_tmout = \t%d\n"
+		"b_srp_fail_tmr = \t%d\n"
+		"b_adp_sense_tmout = \t%d\n"
+		"tst_maint_tmout = \t%d\n"
+		"tst_noadp_tmout = \t%d\n"
+		"a_bus_drop = \t%d\n"
+		"a_bus_req = \t%d\n"
+		"a_clr_err = \t%d\n"
+		"b_bus_req = \t%d\n"
+		"ulpi_error = \t%d\n"
+		"ulpi_polling = \t%d\n",
+		state_string(iotg->otg.state),
+		iotg->hsm.a_bus_resume,
+		iotg->hsm.a_bus_suspend,
+		iotg->hsm.a_conn,
+		iotg->hsm.a_sess_vld,
+		iotg->hsm.a_srp_det,
+		iotg->hsm.a_vbus_vld,
+		iotg->hsm.b_bus_suspend,
+		iotg->hsm.b_conn,
+		iotg->hsm.b_se0_srp,
+		iotg->hsm.b_ssend_srp,
+		iotg->hsm.b_sess_end,
+		iotg->hsm.b_sess_vld,
+		iotg->hsm.id,
+		iotg->hsm.power_up,
+		iotg->hsm.adp_change,
+		iotg->hsm.test_device,
+		iotg->hsm.a_set_b_hnp_en,
+		iotg->hsm.b_srp_done,
+		iotg->hsm.b_hnp_enable,
+		iotg->hsm.hnp_poll_enable,
+		iotg->hsm.a_wait_vrise_tmout,
+		iotg->hsm.a_wait_bcon_tmout,
+		iotg->hsm.a_aidl_bdis_tmout,
+		iotg->hsm.a_bidl_adis_tmout,
+		iotg->hsm.a_bidl_adis_tmr,
+		iotg->hsm.a_wait_vfall_tmout,
+		iotg->hsm.b_ase0_brst_tmout,
+		iotg->hsm.b_srp_fail_tmout,
+		iotg->hsm.b_srp_fail_tmr,
+		iotg->hsm.b_adp_sense_tmout,
+		iotg->hsm.tst_maint_tmout,
+		iotg->hsm.tst_noadp_tmout,
+		iotg->hsm.a_bus_drop,
+		iotg->hsm.a_bus_req,
+		iotg->hsm.a_clr_err,
+		iotg->hsm.b_bus_req,
+		iotg->hsm.ulpi_error,
+		iotg->hsm.ulpi_polling
+		);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
+
+static ssize_t
+show_chargers(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	char				*next;
+	unsigned			size, t;
+	enum usb_charger_type		type;
+	unsigned int			ma;
+	unsigned long			flags;
+	struct pci_dev			*pdev;
+	struct power_supply_cable_props	psc_cap;
+
+	pdev = to_pci_dev(pnw->dev);
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	if (!is_clovertrail(pdev)) {
+		spin_lock_irqsave(&pnw->charger_lock, flags);
+		type = pnw->charging_cap.chrg_type;
+		ma = pnw->charging_cap.ma;
+		spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+		t = scnprintf(next, size,
+			"USB Battery Charging Capability\n"
+			"\tUSB Charger Type:  %s\n"
+			"\tMax Charging Current:  %u\n",
+			charger_string(type),
+			ma
+		);
+	} else {
+		spin_lock_irqsave(&pnw->charger_lock, flags);
+		psc_cap = pnw->psc_cap;
+		spin_unlock_irqrestore(&pnw->charger_lock, flags);
+
+		t = scnprintf(next, size,
+			"USB Battery Charging Capability(CLV)\n"
+			"\tUSB Charger Type:  %s\n"
+			"\tMax Charging Current:  %u\n",
+			psc_string(psc_cap.chrg_type),
+			psc_cap.ma
+		);
+
+	}
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(chargers, S_IRUGO, show_chargers, NULL);
+
+static ssize_t
+get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	char			*next;
+	unsigned		size, t;
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size, "%d", pnw->iotg.hsm.a_bus_req);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_req(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+
+	if (!iotg->otg.otg->default_a)
+		return -1;
+	if (count > 2)
+		return -1;
+
+	if (buf[0] == '0') {
+		iotg->hsm.a_bus_req = 0;
+		dev_dbg(pnw->dev, "a_bus_req = 0\n");
+	} else if (buf[0] == '1') {
+		/* If a_bus_drop is TRUE, a_bus_req can't be set */
+		if (iotg->hsm.a_bus_drop)
+			return -1;
+		iotg->hsm.a_bus_req = 1;
+		dev_dbg(pnw->dev, "a_bus_req = 1\n");
+		if (iotg->otg.state == OTG_STATE_A_PERIPHERAL) {
+			dev_warn(pnw->dev, "Role switch will be "
+				"performed soon, if connected OTG device "
+				"supports role switch request.\n");
+			dev_warn(pnw->dev, "It may cause data"
+				"corruption during data transfer\n");
+		}
+	}
+
+	penwell_update_transceiver();
+
+	return count;
+}
+static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR | S_IWGRP,
+				   get_a_bus_req, set_a_bus_req);
+
+static ssize_t
+get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	char			*next;
+	unsigned		size;
+	unsigned		t;
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size, "%d", pnw->iotg.hsm.a_bus_drop);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+
+	if (!iotg->otg.otg->default_a)
+		return -1;
+	if (count > 2)
+		return -1;
+
+	if (buf[0] == '0') {
+		iotg->hsm.a_bus_drop = 0;
+		dev_dbg(pnw->dev, "a_bus_drop = 0\n");
+	} else if (buf[0] == '1') {
+		iotg->hsm.a_bus_drop = 1;
+		iotg->hsm.a_bus_req = 0;
+		dev_dbg(pnw->dev, "a_bus_drop = 1, so a_bus_req = 0\n");
+	}
+
+	penwell_update_transceiver();
+
+	return count;
+}
+static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR | S_IWGRP,
+	get_a_bus_drop, set_a_bus_drop);
+
+static ssize_t
+get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	char			*next;
+	unsigned		size;
+	unsigned		t;
+
+	next = buf;
+	size = PAGE_SIZE;
+
+	t = scnprintf(next, size, "%d", pnw->iotg.hsm.b_bus_req);
+	size -= t;
+	next += t;
+
+	return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_b_bus_req(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+
+	if (iotg->otg.otg->default_a)
+		return -1;
+
+	if (count > 2)
+		return -1;
+
+	if (buf[0] == '0') {
+		iotg->hsm.b_bus_req = 0;
+		dev_dbg(pnw->dev, "b_bus_req = 0\n");
+
+		if (iotg->otg.otg->gadget)
+			iotg->otg.otg->gadget->host_request_flag = 0;
+	} else if (buf[0] == '1') {
+		iotg->hsm.b_bus_req = 1;
+		dev_dbg(pnw->dev, "b_bus_req = 1\n");
+
+		if (iotg->otg.state == OTG_STATE_B_PERIPHERAL) {
+			if (iotg->otg.otg->gadget)
+				iotg->otg.otg->gadget->host_request_flag = 1;
+
+			dev_warn(pnw->dev, "Role switch will be "
+				"performed soon, if connected OTG device "
+				"supports role switch request.\n");
+			dev_warn(pnw->dev, "It may cause data "
+				"corruption during data transfer\n");
+		}
+	}
+
+	penwell_update_transceiver();
+
+	return count;
+}
+static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR | S_IWGRP,
+				   get_b_bus_req, set_b_bus_req);
+
+static ssize_t
+set_a_clr_err(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+
+	if (!iotg->otg.otg->default_a)
+		return -1;
+	if (iotg->otg.state != OTG_STATE_A_VBUS_ERR)
+		return -1;
+	if (count > 2)
+		return -1;
+
+	if (buf[0] == '1') {
+		iotg->hsm.a_clr_err = 1;
+		dev_dbg(pnw->dev, "a_clr_err = 1\n");
+	}
+
+	penwell_update_transceiver();
+
+	return count;
+}
+static DEVICE_ATTR(a_clr_err, S_IRUGO | S_IWUSR | S_IWGRP, NULL, set_a_clr_err);
+
+static ssize_t
+set_ulpi_err(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+
+	dev_dbg(pnw->dev, "trigger ulpi error manually\n");
+
+	iotg->hsm.ulpi_error = 1;
+
+	penwell_update_transceiver();
+
+	return count;
+}
+static DEVICE_ATTR(ulpi_err, S_IRUGO | S_IWUSR | S_IWGRP, NULL, set_ulpi_err);
+
+static struct attribute *inputs_attrs[] = {
+	&dev_attr_a_bus_req.attr,
+	&dev_attr_a_bus_drop.attr,
+	&dev_attr_b_bus_req.attr,
+	&dev_attr_a_clr_err.attr,
+	&dev_attr_ulpi_err.attr,
+	NULL,
+};
+
+static struct attribute_group debug_dev_attr_group = {
+	.name = "inputs",
+	.attrs = inputs_attrs,
+};
+
+static int penwell_otg_aca_enable(void)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval = 0;
+	struct pci_dev			*pdev;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	pdev = to_pci_dev(pnw->dev);
+
+	if (!is_clovertrail(pdev)) {
+		penwell_otg_msic_spi_access(true);
+
+		retval = intel_scu_ipc_update_register(SPI_TI_VS4,
+				TI_ACA_DET_EN, TI_ACA_DET_EN);
+		if (retval)
+			goto done;
+
+		retval = intel_scu_ipc_update_register(SPI_TI_VS5,
+				TI_ID_FLOAT_EN | TI_ID_RES_EN,
+				TI_ID_FLOAT_EN | TI_ID_RES_EN);
+		if (retval)
+			goto done;
+	} else {
+		retval = penwell_otg_ulpi_write(iotg, ULPI_VS4SET,
+				ACADET);
+		if (retval)
+			goto done;
+
+		retval = penwell_otg_ulpi_write(iotg, ULPI_VS5SET,
+				IDFLOAT_EN | IDRES_EN);
+	}
+done:
+	if (!is_clovertrail(pdev))
+		penwell_otg_msic_spi_access(false);
+
+	if (retval)
+		dev_warn(pnw->dev, "Failed to enable ACA device detection\n");
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+	return retval;
+}
+
+static int penwell_otg_aca_disable(void)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				retval = 0;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_VS5CLR,
+			IDFLOAT_EN | IDRES_EN);
+	if (retval)
+		goto done;
+
+	retval = penwell_otg_ulpi_write(iotg, ULPI_VS4CLR,
+			ACADET);
+
+done:
+	if (retval)
+		dev_warn(pnw->dev, "Failed to disable ACA device detection\n");
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+
+	return retval;
+}
+
+static void penwell_spi_reset_phy(void)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+
+	dev_dbg(pnw->dev, "Reset Phy over SPI\n");
+	penwell_otg_msic_spi_access(true);
+	penwell_otg_msic_write(MSIC_FUNCTRLSET, PHYRESET);
+	penwell_otg_msic_spi_access(false);
+	dev_dbg(pnw->dev, "Reset Phy over SPI Done\n");
+}
+
+static int penwell_otg_probe(struct pci_dev *pdev,
+		const struct pci_device_id *id)
+{
+	unsigned long		resource, len;
+	void __iomem		*base = NULL;
+	int			retval;
+	u32			val32;
+	struct penwell_otg	*pnw;
+	char			qname[] = "penwell_otg_queue";
+	char			chrg_qname[] = "penwell_otg_chrg_queue";
+
+	retval = 0;
+
+	dev_info(&pdev->dev, "Intel OTG2.0 controller is detected.\n");
+	dev_info(&pdev->dev, "Driver version: " DRIVER_VERSION "\n");
+
+	if (pci_enable_device(pdev) < 0) {
+		retval = -ENODEV;
+		goto done;
+	}
+
+	pnw = kzalloc(sizeof(*pnw), GFP_KERNEL);
+	if (pnw == NULL) {
+		retval = -ENOMEM;
+		goto done;
+	}
+	the_transceiver = pnw;
+
+	/* control register: BAR 0 */
+	resource = pci_resource_start(pdev, 0);
+	len = pci_resource_len(pdev, 0);
+	if (!request_mem_region(resource, len, driver_name)) {
+		retval = -EBUSY;
+		goto err;
+	}
+	pnw->region = 1;
+
+	base = ioremap_nocache(resource, len);
+	if (base == NULL) {
+		retval = -EFAULT;
+		goto err;
+	}
+	pnw->iotg.base = base;
+
+	if (!request_mem_region(USBCFG_ADDR, USBCFG_LEN, driver_name)) {
+		retval = -EBUSY;
+		goto err;
+	}
+	pnw->cfg_region = 1;
+
+	if (!pdev->irq) {
+		dev_dbg(&pdev->dev, "No IRQ.\n");
+		retval = -ENODEV;
+		goto err;
+	}
+
+	pnw->qwork = create_singlethread_workqueue(qname);
+	if (!pnw->qwork) {
+		dev_dbg(&pdev->dev, "cannot create workqueue %s\n", qname);
+		retval = -ENOMEM;
+		goto err;
+	}
+
+	pnw->chrg_qwork = create_singlethread_workqueue(chrg_qname);
+	if (!pnw->chrg_qwork) {
+		dev_dbg(&pdev->dev, "cannot create workqueue %s\n", chrg_qname);
+		retval = -ENOMEM;
+		goto err;
+	}
+
+	INIT_LIST_HEAD(&pnw->chrg_evt_queue);
+	INIT_WORK(&pnw->work, penwell_otg_work);
+	INIT_WORK(&pnw->psc_notify, penwell_otg_psc_notify_work);
+	INIT_WORK(&pnw->hnp_poll_work, penwell_otg_hnp_poll_work);
+	INIT_WORK(&pnw->uevent_work, penwell_otg_uevent_work);
+	INIT_DELAYED_WORK(&pnw->ulpi_poll_work, penwell_otg_ulpi_poll_work);
+	INIT_DELAYED_WORK(&pnw->ulpi_check_work, penwell_otg_ulpi_check_work);
+	INIT_DELAYED_WORK(&pnw->sdp_check_work, penwell_otg_sdp_check_work);
+
+	/* OTG common part */
+	pnw->dev = &pdev->dev;
+	pnw->iotg.otg.dev = &pdev->dev;
+	pnw->iotg.otg.label = driver_name;
+	pnw->iotg.otg.otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL);
+	if (!pnw->iotg.otg.otg) {
+		retval = -ENOMEM;
+		goto err;
+	}
+	pnw->iotg.otg.otg->set_host = penwell_otg_set_host;
+	pnw->iotg.otg.otg->set_peripheral = penwell_otg_set_peripheral;
+	pnw->iotg.otg.set_power = penwell_otg_set_power;
+	pnw->iotg.otg.otg->set_vbus =  penwell_otg_set_vbus;
+	pnw->iotg.otg.otg->start_srp = penwell_otg_start_srp;
+	pnw->iotg.otg.get_chrg_status = penwell_otg_get_chrg_status;
+	pnw->iotg.set_adp_probe = NULL;
+	pnw->iotg.set_adp_sense = NULL;
+	pnw->iotg.start_hnp_poll = NULL;
+	pnw->iotg.stop_hnp_poll = NULL;
+	pnw->iotg.otg.state = OTG_STATE_UNDEFINED;
+	pnw->rt_resuming = 0;
+	pnw->rt_quiesce = 0;
+	pnw->queue_stop = 0;
+	pnw->phy_power_state = 1;
+	if (usb_add_phy(&pnw->iotg.otg, USB_PHY_TYPE_USB2)) {
+		dev_err(pnw->dev, "can't set transceiver\n");
+		retval = -EBUSY;
+		goto err;
+	}
+
+	pnw->iotg.ulpi_ops.read = penwell_otg_ulpi_read;
+	pnw->iotg.ulpi_ops.write = penwell_otg_ulpi_write;
+
+	spin_lock_init(&pnw->iotg.hnp_poll_lock);
+	spin_lock_init(&pnw->lock);
+
+	wake_lock_init(&pnw->wake_lock, WAKE_LOCK_SUSPEND, "pnw_wake_lock");
+
+	init_timer(&pnw->hsm_timer);
+	init_timer(&pnw->bus_mon_timer);
+	init_timer(&pnw->hnp_poll_timer);
+	init_completion(&pnw->adp.adp_comp);
+
+	/* Battery Charging part */
+	spin_lock_init(&pnw->charger_lock);
+	spin_lock_init(&pnw->cap_lock);
+	pnw->charging_cap.ma = CHRG_CURR_DISCONN;
+	pnw->charging_cap.chrg_type = CHRG_UNKNOWN;
+	pnw->charging_cap.current_event = USBCHRG_EVENT_DISCONN;
+	pnw->psc_cap.ma = CHRG_CURR_DISCONN;
+	pnw->psc_cap.chrg_type = POWER_SUPPLY_CHARGER_TYPE_NONE;
+	pnw->psc_cap.chrg_evt = POWER_SUPPLY_CHARGER_EVENT_DISCONNECT;
+
+	ATOMIC_INIT_NOTIFIER_HEAD(&pnw->iotg.iotg_notifier);
+	/* For generic otg notifications */
+	ATOMIC_INIT_NOTIFIER_HEAD(&pnw->iotg.otg.notifier);
+
+	pnw->iotg_notifier.notifier_call = penwell_otg_iotg_notify;
+	if (intel_mid_otg_register_notifier(&pnw->iotg, &pnw->iotg_notifier)) {
+		dev_dbg(pnw->dev, "Failed to register notifier\n");
+		retval = -EBUSY;
+		goto err;
+	}
+	if (register_pm_notifier(&pnw_sleep_pm_notifier)) {
+		dev_dbg(pnw->dev, "Fail to register PM notifier\n");
+		retval = -EBUSY;
+		goto err;
+	}
+
+	/* listen usb core events */
+	usb_register_notify(&otg_nb);
+
+	pnw->otg_pdata = pdev->dev.platform_data;
+	if (pnw->otg_pdata == NULL) {
+		dev_err(pnw->dev, "Failed to get OTG platform data.\n");
+		retval = -ENODEV;
+		goto err;
+	}
+
+	if (pnw->otg_pdata->hnp_poll_support) {
+		pnw->iotg.start_hnp_poll = penwell_otg_start_hnp_poll;
+		pnw->iotg.stop_hnp_poll = penwell_otg_stop_hnp_poll;
+	}
+
+	/* FIXME: Reads Charging compliance bit from scu mip.
+	 * This snippet needs to be cleaned up after EM inteface is ready
+	 */
+	if (is_clovertrail(pdev)) {
+		u8	smip_data = 0;
+		if (!intel_scu_ipc_read_mip(&smip_data, 1, 0x2e7, 1)) {
+			pnw->otg_pdata->charging_compliance =
+							!(smip_data & 0x40);
+			dev_info(pnw->dev, "charging_compliance = %d\n",
+					pnw->otg_pdata->charging_compliance);
+		} else
+			dev_err(pnw->dev, "scu mip read error\n");
+	}
+
+	if (!is_clovertrail(pdev)) {
+		if (pnw->otg_pdata->gpio_vbus) {
+			retval = gpio_request(pnw->otg_pdata->gpio_vbus,
+					"usb_otg_phy_reset");
+			if (retval < 0) {
+				dev_err(pnw->dev, "request gpio(%d) failed\n",
+						pnw->otg_pdata->gpio_vbus);
+				retval = -ENODEV;
+				goto err;
+			}
+		}
+	}
+
+	if (is_clovertrail(pdev)) {
+		/* Set up gpio for Clovertrail */
+		retval = gpio_request(pnw->otg_pdata->gpio_reset,
+					"usb_otg_phy_reset");
+		if (retval < 0) {
+			dev_err(pnw->dev, "request phy reset gpio(%d) failed\n",
+						pnw->otg_pdata->gpio_reset);
+			retval = -ENODEV;
+			goto err;
+		}
+		retval = gpio_request(pnw->otg_pdata->gpio_cs,
+					"usb_otg_phy_cs");
+		if (retval < 0) {
+			dev_err(pnw->dev, "request phy cs gpio(%d) failed\n",
+						pnw->otg_pdata->gpio_cs);
+			gpio_free(pnw->otg_pdata->gpio_reset);
+			retval = -ENODEV;
+			goto err;
+		}
+	}
+
+	penwell_otg_phy_power(1);
+	penwell_otg_phy_reset();
+
+	mutex_init(&pnw->msic_mutex);
+	pnw->msic = penwell_otg_check_msic();
+
+	penwell_otg_phy_low_power(0);
+
+	if (!is_clovertrail(pdev)) {
+		/* Workaround for ULPI lockup issue, need turn off PHY 4ms */
+		penwell_otg_phy_enable(0);
+		usleep_range(4000, 4500);
+		penwell_otg_phy_enable(1);
+		/* reset phy */
+		dev_dbg(pnw->dev, "Reset Phy over SPI\n");
+		penwell_otg_msic_spi_access(true);
+		penwell_otg_msic_write(MSIC_FUNCTRLSET, PHYRESET);
+		penwell_otg_msic_spi_access(false);
+		dev_dbg(pnw->dev, "Reset Phy over SPI Done\n");
+	}
+
+	/* Enable ID pullup immediately after reeable PHY */
+	val32 = readl(pnw->iotg.base + CI_OTGSC);
+	writel(val32 | OTGSC_IDPU, pnw->iotg.base + CI_OTGSC);
+
+	/* Wait correct value to be synced */
+	set_host_mode();
+	usleep_range(2000, 3000);
+	penwell_otg_phy_low_power(1);
+	msleep(100);
+
+	/* enable ACA device detection for CTP */
+	if (is_clovertrail(pdev))
+		penwell_otg_aca_enable();
+
+	reset_otg();
+	init_hsm();
+
+	/* we need to set active early or the first irqs will be ignored */
+	pm_runtime_set_active(&pdev->dev);
+
+	if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
+				driver_name, pnw) != 0) {
+		dev_dbg(pnw->dev,
+			"request interrupt %d failed\n", pdev->irq);
+		retval = -EBUSY;
+		goto err;
+	}
+
+	/* enable OTGSC int */
+	val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
+		OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
+	writel(val32, pnw->iotg.base + CI_OTGSC);
+
+	retval = device_create_file(&pdev->dev, &dev_attr_registers);
+	if (retval < 0) {
+		dev_dbg(pnw->dev,
+			"Can't register sysfs attribute: %d\n", retval);
+		goto err;
+	}
+
+	retval = device_create_file(&pdev->dev, &dev_attr_hsm);
+	if (retval < 0) {
+		dev_dbg(pnw->dev,
+			"Can't hsm sysfs attribute: %d\n", retval);
+		goto err;
+	}
+
+	retval = device_create_file(&pdev->dev, &dev_attr_chargers);
+	if (retval < 0) {
+		dev_dbg(pnw->dev,
+			"Can't chargers sysfs attribute: %d\n", retval);
+		goto err;
+	}
+
+	retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
+	if (retval < 0) {
+		dev_dbg(pnw->dev,
+			"Can't register sysfs attr group: %d\n", retval);
+		goto err;
+	}
+
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	penwell_update_transceiver();
+
+	return 0;
+
+err:
+	if (the_transceiver)
+		penwell_otg_remove(pdev);
+done:
+	return retval;
+}
+
+static void penwell_otg_remove(struct pci_dev *pdev)
+{
+	struct penwell_otg *pnw = the_transceiver;
+	struct otg_bc_event *evt, *tmp;
+
+	/* ACA device detection disable */
+	penwell_otg_aca_disable();
+
+	pm_runtime_get_noresume(&pdev->dev);
+	pm_runtime_forbid(&pdev->dev);
+
+	if (pnw->qwork) {
+		flush_workqueue(pnw->qwork);
+		destroy_workqueue(pnw->qwork);
+	}
+
+	if (pnw->chrg_qwork) {
+		flush_workqueue(pnw->chrg_qwork);
+		destroy_workqueue(pnw->chrg_qwork);
+		list_for_each_entry_safe(evt, tmp, &pnw->chrg_evt_queue, node) {
+			list_del(&evt->node);
+			kfree(evt);
+		}
+	}
+
+	/* disable OTGSC interrupt as OTGSC doesn't change in reset */
+	writel(0, pnw->iotg.base + CI_OTGSC);
+
+	wake_lock_destroy(&pnw->wake_lock);
+
+	if (pdev->irq)
+		free_irq(pdev->irq, pnw);
+	if (pnw->cfg_region)
+		release_mem_region(USBCFG_ADDR, USBCFG_LEN);
+	if (pnw->iotg.base)
+		iounmap(pnw->iotg.base);
+	kfree(pnw->iotg.otg.otg);
+	if (pnw->region)
+		release_mem_region(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
+
+	usb_remove_phy(&pnw->iotg.otg);
+	pci_disable_device(pdev);
+	sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
+	device_remove_file(&pdev->dev, &dev_attr_chargers);
+	device_remove_file(&pdev->dev, &dev_attr_hsm);
+	device_remove_file(&pdev->dev, &dev_attr_registers);
+	usb_unregister_notify(&otg_nb);
+	kfree(pnw);
+	pnw = NULL;
+}
+
+void penwell_otg_shutdown(struct pci_dev *pdev)
+{
+	struct penwell_otg *pnw = the_transceiver;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	if (!is_clovertrail(pdev)) {
+		/* Disable MSIC Interrupt Notifications */
+		penwell_otg_msic_spi_access(true);
+
+		penwell_otg_msic_write(MSIC_INT_EN_RISE_CLR, 0x1F);
+		penwell_otg_msic_write(MSIC_INT_EN_FALL_CLR, 0x1F);
+
+		penwell_otg_msic_spi_access(false);
+	}
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+}
+
+
+static int penwell_otg_suspend_noirq(struct device *dev)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				ret = 0;
+	unsigned long	flags;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	switch (iotg->otg.state) {
+	case OTG_STATE_A_VBUS_ERR:
+		set_host_mode();
+		iotg->otg.state = OTG_STATE_A_IDLE;
+		break;
+	case OTG_STATE_A_WAIT_VFALL:
+		penwell_otg_del_timer(TA_WAIT_VFALL_TMR);
+		iotg->otg.state = OTG_STATE_A_IDLE;
+	case OTG_STATE_A_IDLE:
+	case OTG_STATE_B_IDLE:
+		break;
+	case OTG_STATE_A_WAIT_VRISE:
+		penwell_otg_del_timer(TA_WAIT_VRISE_TMR);
+		iotg->hsm.a_srp_det = 0;
+
+		/* Turn off VBus */
+		otg_set_vbus(iotg->otg.otg, false);
+		iotg->otg.state = OTG_STATE_A_IDLE;
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+	case OTG_STATE_A_HOST:
+		if (pnw->iotg.suspend_noirq_host)
+			ret = pnw->iotg.suspend_noirq_host(&pnw->iotg);
+		goto done;
+		break;
+	case OTG_STATE_A_SUSPEND:
+		penwell_otg_del_timer(TA_AIDL_BDIS_TMR);
+		penwell_otg_HABA(0);
+		PNW_STOP_HOST(pnw);
+		iotg->hsm.a_srp_det = 0;
+
+		penwell_otg_phy_vbus_wakeup(false);
+
+		/* Turn off VBus */
+		otg_set_vbus(iotg->otg.otg, false);
+		iotg->otg.state = OTG_STATE_A_IDLE;
+		break;
+	case OTG_STATE_A_PERIPHERAL:
+		penwell_otg_del_timer(TA_BIDL_ADIS_TMR);
+
+		if (pnw->iotg.stop_peripheral)
+			pnw->iotg.stop_peripheral(&pnw->iotg);
+		else
+			dev_dbg(pnw->dev, "client driver has been stopped.\n");
+
+		/* Turn off VBus */
+		otg_set_vbus(iotg->otg.otg, false);
+		iotg->hsm.a_srp_det = 0;
+		iotg->otg.state = OTG_STATE_A_IDLE;
+		break;
+	case OTG_STATE_B_HOST:
+		/* Stop HNP polling */
+		if (iotg->stop_hnp_poll)
+			iotg->stop_hnp_poll(iotg);
+
+		PNW_STOP_HOST(pnw);
+		iotg->hsm.b_bus_req = 0;
+		iotg->otg.state = OTG_STATE_B_IDLE;
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		dev_dbg(pnw->dev, "don't suspend, client still alive\n");
+		ret = -EBUSY;
+		break;
+	case OTG_STATE_B_WAIT_ACON:
+		penwell_otg_del_timer(TB_ASE0_BRST_TMR);
+
+		penwell_otg_HAAR(0);
+
+		PNW_STOP_HOST(pnw);
+		iotg->hsm.b_bus_req = 0;
+		iotg->otg.state = OTG_STATE_B_IDLE;
+		break;
+	default:
+		dev_dbg(pnw->dev, "error state before suspend\n");
+		break;
+	}
+
+
+	if (ret) {
+		spin_lock_irqsave(&pnw->lock, flags);
+		pnw->queue_stop = 0;
+		spin_unlock_irqrestore(&pnw->lock, flags);
+
+		penwell_update_transceiver();
+	} else {
+		penwell_otg_phy_low_power(1);
+		penwell_otg_vusb330_low_power(1);
+#ifdef CONFIG_USB_PENWELL_OTG_PHY_OFF
+		if (iotg->otg.state == OTG_STATE_B_IDLE) {
+			penwell_otg_phy_power(0);
+			pnw->phy_power_state = 0;
+		}
+#endif
+	}
+
+done:
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+	return ret;
+}
+
+static int penwell_otg_suspend(struct device *dev)
+{
+	struct penwell_otg		*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int				ret = 0;
+	unsigned long	flags;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	if (iotg->otg.state == OTG_STATE_B_PERIPHERAL) {
+		dev_dbg(pnw->dev, "still alive, don't suspend\n");
+		ret = -EBUSY;
+		goto done;
+	}
+
+	/* quiesce any work scheduled */
+	spin_lock_irqsave(&pnw->lock, flags);
+	pnw->queue_stop = 1;
+	spin_unlock_irqrestore(&pnw->lock, flags);
+	flush_workqueue(pnw->qwork);
+	if (delayed_work_pending(&pnw->ulpi_check_work)) {
+		spin_lock_irqsave(&pnw->lock, flags);
+		pnw->queue_stop = 0;
+		spin_unlock_irqrestore(&pnw->lock, flags);
+		ret = -EBUSY;
+		goto done;
+	} else
+		flush_delayed_work_sync(&pnw->ulpi_check_work);
+
+	switch (iotg->otg.state) {
+	case OTG_STATE_A_WAIT_BCON:
+		penwell_otg_del_timer(TA_WAIT_BCON_TMR);
+		iotg->hsm.a_srp_det = 0;
+		if (iotg->suspend_host)
+			ret = iotg->suspend_host(iotg);
+		break;
+	case OTG_STATE_A_HOST:
+		if (iotg->stop_hnp_poll)
+			iotg->stop_hnp_poll(iotg);
+		if (iotg->suspend_host)
+			ret = iotg->suspend_host(iotg);
+		break;
+	default:
+		break;
+	}
+
+done:
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+	return ret;
+}
+
+static void penwell_otg_dump_bogus_wake(void)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	int addr = 0x2C8, retval;
+	u8 val;
+
+	/* Enable SPI access */
+	penwell_otg_msic_spi_access(true);
+
+	retval = penwell_otg_msic_read(addr, &val);
+	if (retval) {
+		dev_err(pnw->dev, "msic read failed\n");
+		goto out;
+	}
+	dev_info(pnw->dev, "0x%03x: 0x%02x", addr, val);
+
+	for (addr = 0x340; addr <= 0x348; addr++) {
+		retval = penwell_otg_msic_read(addr, &val);
+		if (retval) {
+			dev_err(pnw->dev, "msic read failed\n");
+			goto out;
+		}
+		dev_info(pnw->dev, "0x%03x: 0x%02x", addr, val);
+	}
+
+	for (addr = 0x394; addr <= 0x3BF; addr++) {
+		retval = penwell_otg_msic_read(addr, &val);
+		if (retval) {
+			dev_err(pnw->dev, "msic read failed\n");
+			goto out;
+		}
+		dev_info(pnw->dev, "0x%03x: 0x%02x", addr, val);
+	}
+
+	addr = 0x192;
+	retval = penwell_otg_msic_read(addr, &val);
+	if (retval) {
+		dev_err(pnw->dev, "msic read failed\n");
+		goto out;
+	}
+
+	dev_info(pnw->dev, "0x%03x: 0x%02x", addr, val);
+out:
+	penwell_otg_msic_spi_access(false);
+}
+
+static int penwell_otg_resume_noirq(struct device *dev)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	struct pci_dev			*pdev;
+	int			ret = 0;
+	u32			val;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+
+	pdev = to_pci_dev(pnw->dev);
+
+	/* If USB PHY is in OFF state, power on it and do basic init work */
+	if (!pnw->phy_power_state) {
+		penwell_otg_phy_power(1);
+		/* Change phy_power_state to 1 again */
+		pnw->phy_power_state = 1;
+		penwell_otg_phy_reset();
+
+		/* Reset controller and clear PHY low power mode setting */
+		reset_otg();
+		penwell_otg_phy_low_power(0);
+
+		/* Wait ID value to be synced */
+		msleep(60);
+	}
+
+	/* add delay in case controller is back to D0, controller
+	 * needs time to sync/latch value for OTGSC register */
+	usleep_range(2000, 2500);
+
+	if (mid_pmu_is_wake_source(PMU_OTG_WAKE_SOURCE)) {
+		/* dump OTGSC register for wakeup event */
+		val = readl(pnw->iotg.base + CI_OTGSC);
+		dev_info(pnw->dev, "%s: CI_OTGSC=0x%x\n", __func__, val);
+		if (val & OTGSC_IDIS)
+			dev_info(pnw->dev, "%s: id change\n", __func__);
+		if (val & OTGSC_DPIS)
+			dev_info(pnw->dev, "%s: data pulse\n", __func__);
+		if (val & OTGSC_BSEIS)
+			dev_info(pnw->dev, "%s: b sess end\n", __func__);
+		if (val & OTGSC_BSVIS)
+			dev_info(pnw->dev, "%s: b sess valid\n", __func__);
+		if (val & OTGSC_ASVIS)
+			dev_info(pnw->dev, "%s: a sess valid\n", __func__);
+		if (val & OTGSC_AVVIS)
+			dev_info(pnw->dev, "%s: a vbus valid\n", __func__);
+
+		if (!(val & OTGSC_INTSTS_MASK)) {
+			static bool uevent_reported;
+			dev_info(pnw->dev,
+				 "%s: waking up from USB source, but not a OTG wakeup event\n",
+				 __func__);
+			if (!uevent_reported) {
+				if (!is_clovertrail(pdev))
+					penwell_otg_dump_bogus_wake();
+				queue_work(pnw->qwork, &pnw->uevent_work);
+				uevent_reported = true;
+			}
+		}
+	}
+
+	if (iotg->otg.state != OTG_STATE_A_WAIT_BCON &&
+		iotg->otg.state != OTG_STATE_A_HOST) {
+		penwell_otg_vusb330_low_power(0);
+		penwell_otg_phy_low_power(0);
+	}
+
+	/* D3->D0 controller will be reset, so reset work mode and PHY state
+	 * which is cleared by the reset */
+
+	switch (pnw->iotg.otg.state) {
+	case OTG_STATE_B_IDLE:
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+	case OTG_STATE_A_HOST:
+		if (iotg->resume_noirq_host)
+			ret = iotg->resume_noirq_host(iotg);
+		break;
+	default:
+		break;
+	}
+
+
+	/* We didn't disable otgsc interrupt, to prevent intr from happening
+	* before penwell_otg_resume, intr is disabled here, and can be enabled
+	* by penwell_otg_resume
+	*/
+	penwell_otg_intr(0);
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+	return ret;
+}
+
+static int penwell_otg_resume(struct device *dev)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	struct intel_mid_otg_xceiv	*iotg = &pnw->iotg;
+	int			ret = 0;
+	unsigned long	flags;
+
+	dev_dbg(pnw->dev, "%s --->\n", __func__);
+	switch (iotg->otg.state) {
+	case OTG_STATE_A_WAIT_BCON:
+		if (iotg->resume_host)
+			ret = iotg->resume_host(iotg);
+		penwell_otg_add_timer(TA_WAIT_BCON_TMR);
+		break;
+	case OTG_STATE_A_HOST:
+		if (iotg->resume_host)
+			ret = iotg->resume_host(iotg);
+
+		/* FIXME: Ideally here should re-start HNP polling,
+		 * no start HNP here, because it blocks the resume
+		*/
+		break;
+	default:
+		break;
+	}
+
+	if (ret)
+		return ret;
+
+	/* allow queue work from notifier */
+	spin_lock_irqsave(&pnw->lock, flags);
+	pnw->queue_stop = 0;
+	spin_unlock_irqrestore(&pnw->lock, flags);
+
+	penwell_otg_intr(1);
+
+	/* If a plugging in or pluggout event happens during D3,
+	* we will miss the interrupt, so check OTGSC here to check
+	* if any ID change and update hsm correspondingly
+	*/
+	update_hsm();
+	penwell_update_transceiver();
+
+	dev_dbg(pnw->dev, "%s <---\n", __func__);
+	return ret;
+}
+
+
+#ifdef CONFIG_PM_RUNTIME
+/* Runtime PM */
+static int penwell_otg_runtime_suspend(struct device *dev)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	struct pci_dev		*pdev = to_pci_dev(dev);
+	int			ret = 0;
+	u32			val;
+	unsigned long		flags;
+
+	dev_dbg(dev, "%s --->\n", __func__);
+
+	pnw->rt_quiesce = 1;
+
+	/* Flush any pending otg irq on local or any other CPUs.
+	*
+	* Host mode or Device mode irq should be synchronized by itself in
+	* their runtime_suspend handler. In fact, Host mode does so. For
+	* device mode, we don't care as its runtime PM is disabled.
+	*
+	* As device's runtime_status is already RPM_SUSPENDING, after flushing,
+	* any new irq handling will be rejected (otg irq handler only continues
+	* if runtime_status is RPM_ACTIVE).
+	* Thus, now it's safe to put PHY into low power mode and gate the
+	* fabric later in pci_set_power_state().
+	*/
+	synchronize_irq(pdev->irq);
+
+	switch (pnw->iotg.otg.state) {
+	case OTG_STATE_A_IDLE:
+		break;
+	case OTG_STATE_B_IDLE:
+		val = readl(pnw->iotg.base + CI_USBMODE);
+		if (!(val & USBMODE_CM)) {
+			/* Controller needs to reset & set mode */
+			dev_dbg(dev, "reset to client mode\n");
+			set_client_mode();
+		}
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+	case OTG_STATE_A_HOST:
+	case OTG_STATE_A_SUSPEND:
+		if (pnw->iotg.runtime_suspend_host)
+			ret = pnw->iotg.runtime_suspend_host(&pnw->iotg);
+		break;
+	case OTG_STATE_A_PERIPHERAL:
+	case OTG_STATE_B_PERIPHERAL:
+		if (pnw->iotg.runtime_suspend_peripheral)
+			ret = pnw->iotg.runtime_suspend_peripheral(&pnw->iotg);
+		break;
+	default:
+		break;
+	}
+
+	if (ret) {
+		spin_lock_irqsave(&pnw->lock, flags);
+		pnw->rt_quiesce = 0;
+		if (pnw->rt_resuming) {
+			pnw->rt_resuming = 0;
+			pm_runtime_put(pnw->dev);
+		}
+		spin_unlock_irqrestore(&pnw->lock, flags);
+		goto DONE;
+	}
+
+	penwell_otg_phy_low_power(1);
+
+	msleep(2);
+
+	penwell_otg_vusb330_low_power(1);
+
+DONE:
+	dev_dbg(dev, "%s <---: ret = %d\n", __func__, ret);
+	return ret;
+}
+
+static int penwell_otg_runtime_resume(struct device *dev)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+	int			ret = 0;
+	u32			val;
+	unsigned long		flags;
+
+	dev_dbg(dev, "%s --->\n", __func__);
+
+	penwell_otg_phy_low_power(0);
+	penwell_otg_vusb330_low_power(0);
+	/* waiting for hardware stable */
+	usleep_range(2000, 4000);
+
+	switch (pnw->iotg.otg.state) {
+	case OTG_STATE_A_IDLE:
+		break;
+	case OTG_STATE_B_IDLE:
+		val = readl(pnw->iotg.base + CI_USBMODE);
+		if (!(val & USBMODE_CM)) {
+			/* Controller needs to reset & set mode */
+			dev_dbg(dev, "reset to client mode\n");
+			set_client_mode();
+		}
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+	case OTG_STATE_A_HOST:
+	case OTG_STATE_A_SUSPEND:
+		if (pnw->iotg.runtime_resume_host)
+			ret = pnw->iotg.runtime_resume_host(&pnw->iotg);
+		break;
+	case OTG_STATE_A_PERIPHERAL:
+	case OTG_STATE_B_PERIPHERAL:
+		if (pnw->iotg.runtime_resume_peripheral)
+			ret = pnw->iotg.runtime_resume_peripheral(&pnw->iotg);
+		break;
+	default:
+		break;
+	}
+
+	spin_lock_irqsave(&pnw->lock, flags);
+	pnw->rt_quiesce = 0;
+	if (pnw->rt_resuming) {
+		pnw->rt_resuming = 0;
+		pm_runtime_put(pnw->dev);
+	}
+	spin_unlock_irqrestore(&pnw->lock, flags);
+
+	dev_dbg(dev, "%s <---\n", __func__);
+
+	return ret;
+}
+
+static int penwell_otg_runtime_idle(struct device *dev)
+{
+	struct penwell_otg	*pnw = the_transceiver;
+
+	dev_dbg(dev, "%s --->\n", __func__);
+
+	switch (pnw->iotg.otg.state) {
+	case OTG_STATE_A_WAIT_VRISE:
+	case OTG_STATE_A_WAIT_VFALL:
+	case OTG_STATE_A_VBUS_ERR:
+	case OTG_STATE_B_WAIT_ACON:
+	case OTG_STATE_B_HOST:
+		dev_dbg(dev, "Keep in active\n");
+		dev_dbg(dev, "%s <---\n", __func__);
+		return -EBUSY;
+	case OTG_STATE_A_WAIT_BCON:
+	case OTG_STATE_A_HOST:
+		/* Schedule runtime_suspend without delay */
+		pm_schedule_suspend(dev, 0);
+		dev_dbg(dev, "%s <---\n", __func__);
+		return -EBUSY;
+	default:
+		break;
+	}
+
+	/* some delay for stability */
+	pm_schedule_suspend(dev, 500);
+
+	dev_dbg(dev, "%s <---\n", __func__);
+
+	return -EBUSY;
+}
+
+#else
+
+#define penwell_otg_runtime_suspend NULL
+#define penwell_otg_runtime_resume NULL
+#define penwell_otg_runtime_idle NULL
+
+#endif
+
+/*----------------------------------------------------------*/
+
+DEFINE_PCI_DEVICE_TABLE(pci_ids) = {{
+	.class =        ((PCI_CLASS_SERIAL_USB << 8) | 0x20),
+	.class_mask =   ~0,
+	.vendor =	0x8086,
+	.device =	0x0829,
+	.subvendor =	PCI_ANY_ID,
+	.subdevice =	PCI_ANY_ID,
+	},
+	{ /* Cloverview */
+		.class =        ((PCI_CLASS_SERIAL_USB << 8) | 0x20),
+		.class_mask =   ~0,
+		.vendor =	0x8086,
+		.device =	0xE006,
+		.subvendor =	PCI_ANY_ID,
+		.subdevice =	PCI_ANY_ID,
+	},
+	{ /* end: all zeroes */ }
+};
+
+static const struct dev_pm_ops penwell_otg_pm_ops = {
+	.runtime_suspend = penwell_otg_runtime_suspend,
+	.runtime_resume = penwell_otg_runtime_resume,
+	.runtime_idle = penwell_otg_runtime_idle,
+	.suspend = penwell_otg_suspend,
+	.suspend_noirq = penwell_otg_suspend_noirq,
+	.resume = penwell_otg_resume,
+	.resume_noirq = penwell_otg_resume_noirq,
+};
+
+static struct pci_driver otg_pci_driver = {
+	.name =		(char *) driver_name,
+	.id_table =	pci_ids,
+
+	.probe =	penwell_otg_probe,
+	.remove =	penwell_otg_remove,
+	.shutdown =	penwell_otg_shutdown,
+	.driver = {
+		.pm =	&penwell_otg_pm_ops
+	},
+};
+
+static int __init penwell_otg_init(void)
+{
+#ifdef	CONFIG_DEBUG_FS
+	pm_sss0_base = ioremap_nocache(0xFF11D030, 0x100);
+#endif
+	return pci_register_driver(&otg_pci_driver);
+}
+module_init(penwell_otg_init);
+
+static void __exit penwell_otg_cleanup(void)
+{
+#ifdef	CONFIG_DEBUG_FS
+	iounmap(pm_sss0_base);
+#endif
+	pci_unregister_driver(&otg_pci_driver);
+}
+module_exit(penwell_otg_cleanup);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 2e937bd..0669dac 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2475,6 +2475,7 @@
 source "drivers/video/exynos/Kconfig"
 source "drivers/video/mmp/Kconfig"
 source "drivers/video/backlight/Kconfig"
+source "drivers/video/adf/Kconfig"
 
 if VT
 	source "drivers/video/console/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index e8bae8d..2babdef 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -12,6 +12,7 @@
                                      modedb.o fbcvt.o
 fb-objs                           := $(fb-y)
 
+obj-$(CONFIG_ADF)		  += adf/
 obj-$(CONFIG_VT)		  += console/
 obj-$(CONFIG_LOGO)		  += logo/
 obj-y				  += backlight/
diff --git a/drivers/video/adf/Kconfig b/drivers/video/adf/Kconfig
new file mode 100644
index 0000000..33858b7
--- /dev/null
+++ b/drivers/video/adf/Kconfig
@@ -0,0 +1,14 @@
+menuconfig ADF
+	depends on SYNC
+	depends on DMA_SHARED_BUFFER
+	tristate "Atomic Display Framework"
+
+menuconfig ADF_FBDEV
+	depends on ADF
+	depends on FB
+	tristate "Helper for implementing the fbdev API in ADF drivers"
+
+menuconfig ADF_MEMBLOCK
+	depends on ADF
+	depends on HAVE_MEMBLOCK
+	tristate "Helper for using memblocks as buffers in ADF drivers"
diff --git a/drivers/video/adf/Makefile b/drivers/video/adf/Makefile
new file mode 100644
index 0000000..78d0915
--- /dev/null
+++ b/drivers/video/adf/Makefile
@@ -0,0 +1,15 @@
+ccflags-y := -Idrivers/staging/android
+
+CFLAGS_adf.o := -I$(src)
+
+obj-$(CONFIG_ADF) += adf.o \
+	adf_client.o \
+	adf_fops.o \
+	adf_format.o \
+	adf_sysfs.o
+
+obj-$(CONFIG_COMPAT) += adf_fops32.o
+
+obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o
+
+obj-$(CONFIG_ADF_MEMBLOCK) += adf_memblock.o
diff --git a/drivers/video/adf/adf.c b/drivers/video/adf/adf.c
new file mode 100644
index 0000000..42c30c0
--- /dev/null
+++ b/drivers/video/adf/adf.c
@@ -0,0 +1,1188 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * adf_modeinfo_{set_name,set_vrefresh} modified from
+ * drivers/gpu/drm/drm_modes.c
+ * adf_format_validate_yuv modified from framebuffer_check in
+ * drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#define CREATE_TRACE_POINTS
+#include "adf_trace.h"
+
+#define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
+#define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
+
+static DEFINE_IDR(adf_devices);
+
+static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
+{
+	/* sync_fence_wait() dumps debug information on timeout.  Experience
+	   has shown that if the pipeline gets stuck, a short timeout followed
+	   by a longer one provides useful information for debugging. */
+	int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT);
+	if (err >= 0)
+		return;
+
+	if (err == -ETIME)
+		err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT);
+
+	if (err < 0)
+		dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err);
+}
+
+void adf_buffer_cleanup(struct adf_buffer *buf)
+{
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++)
+		if (buf->dma_bufs[i])
+			dma_buf_put(buf->dma_bufs[i]);
+
+	if (buf->acquire_fence)
+		sync_fence_put(buf->acquire_fence);
+}
+
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+		struct adf_buffer *buf)
+{
+	/* calling adf_buffer_mapping_cleanup() is safe even if mapping is
+	   uninitialized or partially-initialized, as long as it was
+	   zeroed on allocation */
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) {
+		if (mapping->sg_tables[i])
+			dma_buf_unmap_attachment(mapping->attachments[i],
+					mapping->sg_tables[i], DMA_TO_DEVICE);
+		if (mapping->attachments[i])
+			dma_buf_detach(buf->dma_bufs[i],
+					mapping->attachments[i]);
+	}
+}
+
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post)
+{
+	size_t i;
+
+	if (post->state)
+		dev->ops->state_free(dev, post->state);
+
+	for (i = 0; i < post->config.n_bufs; i++) {
+		adf_buffer_mapping_cleanup(&post->config.mappings[i],
+				&post->config.bufs[i]);
+		adf_buffer_cleanup(&post->config.bufs[i]);
+	}
+
+	kfree(post->config.custom_data);
+	kfree(post->config.mappings);
+	kfree(post->config.bufs);
+	kfree(post);
+}
+
+static void adf_sw_advance_timeline(struct adf_device *dev)
+{
+#ifdef CONFIG_SW_SYNC
+	sw_sync_timeline_inc(dev->timeline, 1);
+#else
+	BUG();
+#endif
+}
+
+static void adf_post_work_func(struct kthread_work *work)
+{
+	struct adf_device *dev =
+			container_of(work, struct adf_device, post_work);
+	struct adf_pending_post *post, *next;
+	struct list_head saved_list;
+
+	mutex_lock(&dev->post_lock);
+	memcpy(&saved_list, &dev->post_list, sizeof(saved_list));
+	list_replace_init(&dev->post_list, &saved_list);
+	mutex_unlock(&dev->post_lock);
+
+	list_for_each_entry_safe(post, next, &saved_list, head) {
+		int i;
+
+		for (i = 0; i < post->config.n_bufs; i++) {
+			struct sync_fence *fence =
+					post->config.bufs[i].acquire_fence;
+			if (fence)
+				adf_fence_wait(dev, fence);
+		}
+
+		dev->ops->post(dev, &post->config, post->state);
+
+		if (dev->ops->advance_timeline)
+			dev->ops->advance_timeline(dev, &post->config,
+					post->state);
+		else
+			adf_sw_advance_timeline(dev);
+
+		list_del(&post->head);
+		if (dev->onscreen)
+			adf_post_cleanup(dev, dev->onscreen);
+		dev->onscreen = post;
+	}
+}
+
+void adf_attachment_free(struct adf_attachment_list *attachment)
+{
+	list_del(&attachment->head);
+	kfree(attachment);
+}
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+		enum adf_event_type type)
+{
+	struct rb_root *root = &obj->event_refcount;
+	struct rb_node **new = &(root->rb_node);
+	struct rb_node *parent = NULL;
+	struct adf_event_refcount *refcount;
+
+	while (*new) {
+		refcount = container_of(*new, struct adf_event_refcount, node);
+		parent = *new;
+
+		if (refcount->type > type)
+			new = &(*new)->rb_left;
+		else if (refcount->type < type)
+			new = &(*new)->rb_right;
+		else
+			return refcount;
+	}
+
+	refcount = kzalloc(sizeof(*refcount), GFP_KERNEL);
+	if (!refcount)
+		return NULL;
+	refcount->type = type;
+
+	rb_link_node(&refcount->node, parent, new);
+	rb_insert_color(&refcount->node, root);
+	return refcount;
+}
+
+/**
+ * adf_event_get - increase the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed.  ops are allowed
+ * to sleep, so adf_event_get() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, or -%EINVAL if the object does not support the
+ * requested event type.
+ */
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type)
+{
+	struct adf_event_refcount *refcount;
+	int old_refcount;
+	int ret;
+
+	ret = adf_obj_check_supports_event(obj, type);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&obj->event_lock);
+
+	refcount = adf_obj_find_event_refcount(obj, type);
+	if (!refcount) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	old_refcount = refcount->refcount++;
+
+	if (old_refcount == 0) {
+		obj->ops->set_event(obj, type, true);
+		trace_adf_event_enable(obj, type);
+	}
+
+done:
+	mutex_unlock(&obj->event_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_event_get);
+
+/**
+ * adf_event_put - decrease the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed.  ops are allowed
+ * to sleep, so adf_event_put() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, -%EINVAL if the object does not support the
+ * requested event type, or -%EALREADY if the refcount is already 0.
+ */
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type)
+{
+	struct adf_event_refcount *refcount;
+	int old_refcount;
+	int ret;
+
+	ret = adf_obj_check_supports_event(obj, type);
+	if (ret < 0)
+		return ret;
+
+
+	mutex_lock(&obj->event_lock);
+
+	refcount = adf_obj_find_event_refcount(obj, type);
+	if (!refcount) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	old_refcount = refcount->refcount--;
+
+	if (WARN_ON(old_refcount == 0)) {
+		refcount->refcount++;
+		ret = -EALREADY;
+	} else if (old_refcount == 1) {
+		obj->ops->set_event(obj, type, false);
+		trace_adf_event_disable(obj, type);
+	}
+
+done:
+	mutex_unlock(&obj->event_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_event_put);
+
+/**
+ * adf_vsync_wait - wait for a vsync event on a display interface
+ *
+ * @intf: the display interface
+ * @timeout: timeout in jiffies (0 = wait indefinitely)
+ *
+ * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context.
+ *
+ * This function returns -%ERESTARTSYS if it is interrupted by a signal.
+ * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then
+ * this function returns the number of remaining jiffies or -%ETIMEDOUT on
+ * timeout.
+ */
+int adf_vsync_wait(struct adf_interface *intf, long timeout)
+{
+	ktime_t timestamp;
+	int ret;
+	unsigned long flags;
+
+	read_lock_irqsave(&intf->vsync_lock, flags);
+	timestamp = intf->vsync_timestamp;
+	read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+	adf_vsync_get(intf);
+	if (timeout) {
+		ret = wait_event_interruptible_timeout(intf->vsync_wait,
+				!ktime_equal(timestamp,
+						intf->vsync_timestamp),
+				msecs_to_jiffies(timeout));
+		if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp))
+			ret = -ETIMEDOUT;
+	} else {
+		ret = wait_event_interruptible(intf->vsync_wait,
+				!ktime_equal(timestamp,
+						intf->vsync_timestamp));
+	}
+	adf_vsync_put(intf);
+
+	return ret;
+}
+EXPORT_SYMBOL(adf_vsync_wait);
+
+static void adf_event_queue(struct adf_obj *obj, struct adf_event *event)
+{
+	struct adf_file *file;
+	unsigned long flags;
+
+	trace_adf_event(obj, event->type);
+
+	spin_lock_irqsave(&obj->file_lock, flags);
+
+	list_for_each_entry(file, &obj->file_list, head)
+		if (test_bit(event->type, file->event_subscriptions))
+			adf_file_queue_event(file, event);
+
+	spin_unlock_irqrestore(&obj->file_lock, flags);
+}
+
+/**
+ * adf_event_notify - notify userspace of a driver-private event
+ *
+ * @obj: the ADF object that produced the event
+ * @event: the event
+ *
+ * adf_event_notify() may be called safely from an atomic context.  It will
+ * copy @event if needed, so @event may point to a variable on the stack.
+ *
+ * Drivers must NOT call adf_event_notify() for vsync and hotplug events.
+ * ADF provides adf_vsync_notify() and
+ * adf_hotplug_notify_{connected,disconnected}() for these events.
+ */
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event)
+{
+	if (WARN_ON(event->type == ADF_EVENT_VSYNC ||
+			event->type == ADF_EVENT_HOTPLUG))
+		return -EINVAL;
+
+	adf_event_queue(obj, event);
+	return 0;
+}
+EXPORT_SYMBOL(adf_event_notify);
+
+/**
+ * adf_vsync_notify - notify ADF of a display interface's vsync event
+ *
+ * @intf: the display interface
+ * @timestamp: the time the vsync occurred
+ *
+ * adf_vsync_notify() may be called safely from an atomic context.
+ */
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp)
+{
+	unsigned long flags;
+	struct adf_vsync_event event;
+
+	write_lock_irqsave(&intf->vsync_lock, flags);
+	intf->vsync_timestamp = timestamp;
+	write_unlock_irqrestore(&intf->vsync_lock, flags);
+
+	wake_up_interruptible_all(&intf->vsync_wait);
+
+	event.base.type = ADF_EVENT_VSYNC;
+	event.base.length = sizeof(event);
+	event.timestamp = ktime_to_ns(timestamp);
+	adf_event_queue(&intf->base, &event.base);
+}
+EXPORT_SYMBOL(adf_vsync_notify);
+
+void adf_hotplug_notify(struct adf_interface *intf, bool connected,
+		struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+	unsigned long flags;
+	struct adf_hotplug_event event;
+	struct drm_mode_modeinfo *old_modelist;
+
+	write_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+	old_modelist = intf->modelist;
+	intf->hotplug_detect = connected;
+	intf->modelist = modelist;
+	intf->n_modes = n_modes;
+	write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+	kfree(old_modelist);
+
+	event.base.length = sizeof(event);
+	event.base.type = ADF_EVENT_HOTPLUG;
+	event.connected = connected;
+	adf_event_queue(&intf->base, &event.base);
+}
+
+/**
+ * adf_hotplug_notify_connected - notify ADF of a display interface being
+ * connected to a display
+ *
+ * @intf: the display interface
+ * @modelist: hardware modes supported by display
+ * @n_modes: length of modelist
+ *
+ * @modelist is copied as needed, so it may point to a variable on the stack.
+ *
+ * adf_hotplug_notify_connected() may NOT be called safely from an atomic
+ * context.
+ *
+ * Returns 0 on success or error code (<0) on error.
+ */
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+		struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+	struct drm_mode_modeinfo *modelist_copy;
+
+	if (n_modes > ADF_MAX_MODES)
+		return -ENOMEM;
+
+	modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes,
+			GFP_KERNEL);
+	if (!modelist_copy)
+		return -ENOMEM;
+	memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes);
+
+	adf_hotplug_notify(intf, true, modelist_copy, n_modes);
+	return 0;
+}
+EXPORT_SYMBOL(adf_hotplug_notify_connected);
+
+/**
+ * adf_hotplug_notify_disconnected - notify ADF of a display interface being
+ * disconnected from a display
+ *
+ * @intf: the display interface
+ *
+ * adf_hotplug_notify_disconnected() may be called safely from an atomic
+ * context.
+ */
+void adf_hotplug_notify_disconnected(struct adf_interface *intf)
+{
+	adf_hotplug_notify(intf, false, NULL, 0);
+}
+EXPORT_SYMBOL(adf_hotplug_notify_disconnected);
+
+static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
+		struct idr *idr, struct adf_device *parent,
+		const struct adf_obj_ops *ops, const char *fmt, va_list args)
+{
+	int ret;
+
+	if (ops && ops->supports_event && !ops->set_event) {
+		pr_err("%s: %s implements supports_event but not set_event\n",
+				__func__, adf_obj_type_str(type));
+		return -EINVAL;
+	}
+
+	ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
+	if (ret < 0) {
+		pr_err("%s: allocating object id failed: %d\n", __func__, ret);
+		return ret;
+	}
+	obj->id = ret;
+
+	vscnprintf(obj->name, sizeof(obj->name), fmt, args);
+
+	obj->type = type;
+	obj->ops = ops;
+	obj->parent = parent;
+	mutex_init(&obj->event_lock);
+	obj->event_refcount = RB_ROOT;
+	spin_lock_init(&obj->file_lock);
+	INIT_LIST_HEAD(&obj->file_list);
+	return 0;
+}
+
+static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
+{
+	struct rb_node *node = rb_first(&obj->event_refcount);
+
+	while (node) {
+		struct adf_event_refcount *refcount =
+				container_of(node, struct adf_event_refcount,
+						node);
+		rb_erase(&refcount->node, &obj->event_refcount);
+		kfree(refcount);
+		node = rb_first(&obj->event_refcount);
+	}
+
+	mutex_destroy(&obj->event_lock);
+	idr_remove(idr, obj->id);
+}
+
+/**
+ * adf_device_init - initialize ADF-internal data for a display device
+ * and create sysfs entries
+ *
+ * @dev: the display device
+ * @parent: the device's parent device
+ * @ops: the device's associated ops
+ * @fmt: formatting string for the display device's name
+ *
+ * @fmt specifies the device's sysfs filename and the name returned to
+ * userspace through the %ADF_GET_DEVICE_DATA ioctl.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_device_init(struct adf_device *dev, struct device *parent,
+		const struct adf_device_ops *ops, const char *fmt, ...)
+{
+	int ret;
+	va_list args;
+
+	if (!ops->validate || !ops->post) {
+		pr_err("%s: device must implement validate and post\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (!ops->complete_fence && !ops->advance_timeline) {
+		if (!IS_ENABLED(CONFIG_SW_SYNC)) {
+			pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n",
+					__func__);
+			return -EINVAL;
+		}
+	} else if (!(ops->complete_fence && ops->advance_timeline)) {
+		pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	memset(dev, 0, sizeof(*dev));
+
+	va_start(args, fmt);
+	ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
+			&ops->base, fmt, args);
+	va_end(args);
+	if (ret < 0)
+		return ret;
+
+	dev->dev = parent;
+	dev->ops = ops;
+	idr_init(&dev->overlay_engines);
+	idr_init(&dev->interfaces);
+	mutex_init(&dev->client_lock);
+	INIT_LIST_HEAD(&dev->post_list);
+	mutex_init(&dev->post_lock);
+	init_kthread_worker(&dev->post_worker);
+	INIT_LIST_HEAD(&dev->attached);
+	INIT_LIST_HEAD(&dev->attach_allowed);
+
+	dev->post_thread = kthread_run(kthread_worker_fn,
+			&dev->post_worker, dev->base.name);
+	if (IS_ERR(dev->post_thread)) {
+		ret = PTR_ERR(dev->post_thread);
+		dev->post_thread = NULL;
+
+		pr_err("%s: failed to run config posting thread: %d\n",
+				__func__, ret);
+		goto err;
+	}
+	init_kthread_work(&dev->post_work, adf_post_work_func);
+
+	ret = adf_device_sysfs_init(dev);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+
+err:
+	adf_device_destroy(dev);
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_init);
+
+/**
+ * adf_device_destroy - clean up ADF-internal data for a display device
+ *
+ * @dev: the display device
+ */
+void adf_device_destroy(struct adf_device *dev)
+{
+	struct adf_attachment_list *entry, *next;
+
+	idr_destroy(&dev->interfaces);
+	idr_destroy(&dev->overlay_engines);
+
+	if (dev->post_thread) {
+		flush_kthread_worker(&dev->post_worker);
+		kthread_stop(dev->post_thread);
+	}
+
+	if (dev->onscreen)
+		adf_post_cleanup(dev, dev->onscreen);
+	adf_device_sysfs_destroy(dev);
+	list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+		adf_attachment_free(entry);
+	}
+	list_for_each_entry_safe(entry, next, &dev->attached, head) {
+		adf_attachment_free(entry);
+	}
+	mutex_destroy(&dev->post_lock);
+	mutex_destroy(&dev->client_lock);
+
+	if (dev->timeline)
+		sync_timeline_destroy(&dev->timeline->obj);
+
+	adf_obj_destroy(&dev->base, &adf_devices);
+}
+EXPORT_SYMBOL(adf_device_destroy);
+
+/**
+ * adf_interface_init - initialize ADF-internal data for a display interface
+ * and create sysfs entries
+ *
+ * @intf: the display interface
+ * @dev: the interface's "parent" display device
+ * @type: interface type (see enum @adf_interface_type)
+ * @idx: which interface of type @type;
+ *	e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @ops: the interface's associated ops
+ * @fmt: formatting string for the display interface's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_INTERFACE_DATA ioctl.  It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_interface_init(struct adf_interface *intf, struct adf_device *dev,
+		enum adf_interface_type type, u32 idx, u32 flags,
+		const struct adf_interface_ops *ops, const char *fmt, ...)
+{
+	int ret;
+	va_list args;
+	const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY |
+			ADF_INTF_FLAG_EXTERNAL;
+
+	if (dev->n_interfaces == ADF_MAX_INTERFACES) {
+		pr_err("%s: parent device %s has too many interfaces\n",
+				__func__, dev->base.name);
+		return -ENOMEM;
+	}
+
+	if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+		pr_err("%s: invalid interface type %u\n", __func__, type);
+		return -EINVAL;
+	}
+
+	if (flags & ~allowed_flags) {
+		pr_err("%s: invalid interface flags 0x%X\n", __func__,
+				flags & ~allowed_flags);
+		return -EINVAL;
+	}
+
+	memset(intf, 0, sizeof(*intf));
+
+	va_start(args, fmt);
+	ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces,
+			dev, ops ? &ops->base : NULL, fmt, args);
+	va_end(args);
+	if (ret < 0)
+		return ret;
+
+	intf->type = type;
+	intf->idx = idx;
+	intf->flags = flags;
+	intf->ops = ops;
+	intf->dpms_state = DRM_MODE_DPMS_OFF;
+	init_waitqueue_head(&intf->vsync_wait);
+	rwlock_init(&intf->vsync_lock);
+	rwlock_init(&intf->hotplug_modelist_lock);
+
+	ret = adf_interface_sysfs_init(intf);
+	if (ret < 0)
+		goto err;
+	dev->n_interfaces++;
+
+	return 0;
+
+err:
+	adf_obj_destroy(&intf->base, &dev->interfaces);
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for a display interface
+ *
+ * @intf: the display interface
+ */
+void adf_interface_destroy(struct adf_interface *intf)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	struct adf_attachment_list *entry, *next;
+
+	mutex_lock(&dev->client_lock);
+	list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+		if (entry->attachment.interface == intf) {
+			adf_attachment_free(entry);
+			dev->n_attach_allowed--;
+		}
+	}
+	list_for_each_entry_safe(entry, next, &dev->attached, head) {
+		if (entry->attachment.interface == intf) {
+			adf_device_detach_op(dev,
+					entry->attachment.overlay_engine, intf);
+			adf_attachment_free(entry);
+			dev->n_attached--;
+		}
+	}
+	kfree(intf->modelist);
+	adf_interface_sysfs_destroy(intf);
+	adf_obj_destroy(&intf->base, &dev->interfaces);
+	dev->n_interfaces--;
+	mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_destroy);
+
+static bool adf_overlay_engine_has_custom_formats(
+		const struct adf_overlay_engine_ops *ops)
+{
+	size_t i;
+	for (i = 0; i < ops->n_supported_formats; i++)
+		if (!adf_format_is_standard(ops->supported_formats[i]))
+			return true;
+	return false;
+}
+
+/**
+ * adf_overlay_engine_init - initialize ADF-internal data for an
+ * overlay engine and create sysfs entries
+ *
+ * @eng: the overlay engine
+ * @dev: the overlay engine's "parent" display device
+ * @ops: the overlay engine's associated ops
+ * @fmt: formatting string for the overlay engine's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_OVERLAY_ENGINE_DATA ioctl.  It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_overlay_engine_init(struct adf_overlay_engine *eng,
+		struct adf_device *dev,
+		const struct adf_overlay_engine_ops *ops, const char *fmt, ...)
+{
+	int ret;
+	va_list args;
+
+	if (!ops->supported_formats) {
+		pr_err("%s: overlay engine must support at least one format\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) {
+		pr_err("%s: overlay engine supports too many formats\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (adf_overlay_engine_has_custom_formats(ops) &&
+			!dev->ops->validate_custom_format) {
+		pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n",
+				__func__, dev->base.name);
+		return -EINVAL;
+	}
+
+	memset(eng, 0, sizeof(*eng));
+
+	va_start(args, fmt);
+	ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE,
+			&dev->overlay_engines, dev, &ops->base, fmt, args);
+	va_end(args);
+	if (ret < 0)
+		return ret;
+
+	eng->ops = ops;
+
+	ret = adf_overlay_engine_sysfs_init(eng);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+
+err:
+	adf_obj_destroy(&eng->base, &dev->overlay_engines);
+	return ret;
+}
+EXPORT_SYMBOL(adf_overlay_engine_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for an overlay engine
+ *
+ * @eng: the overlay engine
+ */
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng)
+{
+	struct adf_device *dev = adf_overlay_engine_parent(eng);
+	struct adf_attachment_list *entry, *next;
+
+	mutex_lock(&dev->client_lock);
+	list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+		if (entry->attachment.overlay_engine == eng) {
+			adf_attachment_free(entry);
+			dev->n_attach_allowed--;
+		}
+	}
+	list_for_each_entry_safe(entry, next, &dev->attached, head) {
+		if (entry->attachment.overlay_engine == eng) {
+			adf_device_detach_op(dev, eng,
+					entry->attachment.interface);
+			adf_attachment_free(entry);
+			dev->n_attached--;
+		}
+	}
+	adf_overlay_engine_sysfs_destroy(eng);
+	adf_obj_destroy(&eng->base, &dev->overlay_engines);
+	mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_overlay_engine_destroy);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	struct adf_attachment_list *entry;
+	list_for_each_entry(entry, list, head) {
+		if (entry->attachment.interface == intf &&
+				entry->attachment.overlay_engine == eng)
+			return entry;
+	}
+	return NULL;
+}
+
+int adf_attachment_validate(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	struct adf_device *intf_dev = adf_interface_parent(intf);
+	struct adf_device *eng_dev = adf_overlay_engine_parent(eng);
+
+	if (intf_dev != dev) {
+		dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n",
+				intf->base.name, intf_dev->base.name);
+		return -EINVAL;
+	}
+
+	if (eng_dev != dev) {
+		dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n",
+				eng->base.name, eng_dev->base.name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * adf_attachment_allow - add a new entry to the list of allowed
+ * attachments
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * adf_attachment_allow() indicates that the underlying display hardware allows
+ * @intf to scan out @eng's output.  It is intended to be called at
+ * driver initialization for each supported overlay engine + interface pair.
+ *
+ * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on
+ * any other failure.
+ */
+int adf_attachment_allow(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	int ret;
+	struct adf_attachment_list *entry = NULL;
+
+	ret = adf_attachment_validate(dev, eng, intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->client_lock);
+
+	if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	if (adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+		ret = -EALREADY;
+		goto done;
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	entry->attachment.interface = intf;
+	entry->attachment.overlay_engine = eng;
+	list_add_tail(&entry->head, &dev->attach_allowed);
+	dev->n_attach_allowed++;
+
+done:
+	mutex_unlock(&dev->client_lock);
+	if (ret < 0)
+		kfree(entry);
+
+	return ret;
+}
+EXPORT_SYMBOL(adf_attachment_allow);
+
+/**
+ * adf_obj_type_str - string representation of an adf_obj_type
+ *
+ * @type: the object type
+ */
+const char *adf_obj_type_str(enum adf_obj_type type)
+{
+	switch (type) {
+	case ADF_OBJ_OVERLAY_ENGINE:
+		return "overlay engine";
+
+	case ADF_OBJ_INTERFACE:
+		return "interface";
+
+	case ADF_OBJ_DEVICE:
+		return "device";
+
+	default:
+		return "unknown";
+	}
+}
+EXPORT_SYMBOL(adf_obj_type_str);
+
+/**
+ * adf_interface_type_str - string representation of an adf_interface's type
+ *
+ * @intf: the interface
+ */
+const char *adf_interface_type_str(struct adf_interface *intf)
+{
+	switch (intf->type) {
+	case ADF_INTF_DSI:
+		return "DSI";
+
+	case ADF_INTF_eDP:
+		return "eDP";
+
+	case ADF_INTF_DPI:
+		return "DPI";
+
+	case ADF_INTF_VGA:
+		return "VGA";
+
+	case ADF_INTF_DVI:
+		return "DVI";
+
+	case ADF_INTF_HDMI:
+		return "HDMI";
+
+	case ADF_INTF_MEMORY:
+		return "memory";
+
+	default:
+		if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+			if (intf->ops && intf->ops->type_str)
+				return intf->ops->type_str(intf);
+			return "custom";
+		}
+		return "unknown";
+	}
+}
+EXPORT_SYMBOL(adf_interface_type_str);
+
+/**
+ * adf_event_type_str - string representation of an adf_event_type
+ *
+ * @obj: ADF object that produced the event
+ * @type: event type
+ */
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type)
+{
+	switch (type) {
+	case ADF_EVENT_VSYNC:
+		return "vsync";
+
+	case ADF_EVENT_HOTPLUG:
+		return "hotplug";
+
+	default:
+		if (type >= ADF_EVENT_DEVICE_CUSTOM) {
+			if (obj->ops && obj->ops->event_type_str)
+				return obj->ops->event_type_str(obj, type);
+			return "custom";
+		}
+		return "unknown";
+	}
+}
+EXPORT_SYMBOL(adf_event_type_str);
+
+/**
+ * adf_format_str - string representation of an ADF/DRM fourcc format
+ *
+ * @format: format fourcc
+ * @buf: target buffer for the format's string representation
+ */
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE])
+{
+	buf[0] = format & 0xFF;
+	buf[1] = (format >> 8) & 0xFF;
+	buf[2] = (format >> 16) & 0xFF;
+	buf[3] = (format >> 24) & 0xFF;
+	buf[4] = '\0';
+}
+EXPORT_SYMBOL(adf_format_str);
+
+/**
+ * adf_format_validate_yuv - validate the number and size of planes in buffers
+ * with a custom YUV format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @num_planes: expected number of planes
+ * @hsub: expected horizontal chroma subsampling factor, in pixels
+ * @vsub: expected vertical chroma subsampling factor, in pixels
+ * @cpp: expected bytes per pixel for each plane (length @num_planes)
+ *
+ * adf_format_validate_yuv() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.
+ *
+ * Returns 0 if @buf has the expected number of planes and each plane
+ * has sufficient size, or -EINVAL otherwise.
+ */
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+		u8 num_planes, u8 hsub, u8 vsub, u8 cpp[])
+{
+	u8 i;
+
+	if (num_planes != buf->n_planes) {
+		char format_str[ADF_FORMAT_STR_SIZE];
+		adf_format_str(buf->format, format_str);
+		dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n",
+				num_planes, format_str, buf->n_planes);
+		return -EINVAL;
+	}
+
+	if (buf->w == 0 || buf->w % hsub) {
+		dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w);
+		return -EINVAL;
+	}
+
+	if (buf->h == 0 || buf->h % vsub) {
+		dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_planes; i++) {
+		u32 width = buf->w / (i != 0 ? hsub : 1);
+		u32 height = buf->h / (i != 0 ? vsub : 1);
+		u8 cpp = adf_format_plane_cpp(buf->format, i);
+		u32 last_line_size;
+
+		if (buf->pitch[i] < (u64) width * cpp) {
+			dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
+					i, buf->pitch[i], width, cpp * 8);
+			return -EINVAL;
+		}
+
+		switch (dev->ops->quirks.buffer_padding) {
+		case ADF_BUFFER_PADDED_TO_PITCH:
+			last_line_size = buf->pitch[i];
+			break;
+
+		case ADF_BUFFER_UNPADDED:
+			last_line_size = width * cpp;
+			break;
+
+		default:
+			BUG();
+		}
+
+		if ((u64) (height - 1) * buf->pitch[i] + last_line_size +
+				buf->offset[i] > buf->dma_bufs[i]->size) {
+			dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
+					i, height, buf->pitch[i],
+					buf->offset[i], buf->dma_bufs[i]->size);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(adf_format_validate_yuv);
+
+/**
+ * adf_modeinfo_set_name - sets the name of a mode from its display resolution
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_name() fills in @mode->name in the format
+ * "[hdisplay]x[vdisplay](i)".  It is intended to help drivers create
+ * ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode)
+{
+	bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+	snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+		 mode->hdisplay, mode->vdisplay,
+		 interlaced ? "i" : "");
+}
+EXPORT_SYMBOL(adf_modeinfo_set_name);
+
+/**
+ * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other
+ * timing data
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from
+ * @mode->{h,v}display and @mode->flags.  It is intended to help drivers
+ * create ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode)
+{
+	int refresh = 0;
+	unsigned int calc_val;
+
+	if (mode->vrefresh > 0)
+		return;
+
+	if (mode->htotal <= 0 || mode->vtotal <= 0)
+		return;
+
+	/* work out vrefresh the value will be x1000 */
+	calc_val = (mode->clock * 1000);
+	calc_val /= mode->htotal;
+	refresh = (calc_val + mode->vtotal / 2) / mode->vtotal;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		refresh *= 2;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		refresh /= 2;
+	if (mode->vscan > 1)
+		refresh /= mode->vscan;
+
+	mode->vrefresh = refresh;
+}
+EXPORT_SYMBOL(adf_modeinfo_set_vrefresh);
+
+static int __init adf_init(void)
+{
+	int err;
+
+	err = adf_sysfs_init();
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static void __exit adf_exit(void)
+{
+	adf_sysfs_destroy();
+}
+
+module_init(adf_init);
+module_exit(adf_exit);
diff --git a/drivers/video/adf/adf.h b/drivers/video/adf/adf.h
new file mode 100644
index 0000000..3bcf1fa
--- /dev/null
+++ b/drivers/video/adf/adf.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_H
+#define __VIDEO_ADF_ADF_H
+
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <video/adf.h>
+#include "sync.h"
+
+struct adf_event_refcount {
+	struct rb_node node;
+	enum adf_event_type type;
+	int refcount;
+};
+
+void adf_buffer_cleanup(struct adf_buffer *buf);
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+		struct adf_buffer *buf);
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+		struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_attachment_validate(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf);
+void adf_attachment_free(struct adf_attachment_list *attachment);
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+		enum adf_event_type type);
+
+static inline int adf_obj_check_supports_event(struct adf_obj *obj,
+		enum adf_event_type type)
+{
+	if (!obj->ops || !obj->ops->supports_event)
+		return -EOPNOTSUPP;
+	if (!obj->ops->supports_event(obj, type))
+		return -EINVAL;
+	return 0;
+}
+
+static inline int adf_device_attach_op(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	if (!dev->ops->attach)
+		return 0;
+
+	return dev->ops->attach(dev, eng, intf);
+}
+
+static inline int adf_device_detach_op(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	if (!dev->ops->detach)
+		return 0;
+
+	return dev->ops->detach(dev, eng, intf);
+}
+
+#endif /* __VIDEO_ADF_ADF_H */
diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c
new file mode 100644
index 0000000..8061d8e
--- /dev/null
+++ b/drivers/video/adf/adf_client.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "sw_sync.h"
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+static inline bool vsync_active(u8 state)
+{
+	return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY;
+}
+
+/**
+ * adf_interface_blank - set interface's DPMS state
+ *
+ * @intf: the interface
+ * @state: one of %DRM_MODE_DPMS_*
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_blank(struct adf_interface *intf, u8 state)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	u8 prev_state;
+	bool disable_vsync;
+	bool enable_vsync;
+	int ret = 0;
+	struct adf_event_refcount *vsync_refcount;
+
+	if (!intf->ops || !intf->ops->blank)
+		return -EOPNOTSUPP;
+
+	if (state > DRM_MODE_DPMS_OFF)
+		return -EINVAL;
+
+	mutex_lock(&dev->client_lock);
+	if (state != DRM_MODE_DPMS_ON)
+		flush_kthread_worker(&dev->post_worker);
+	mutex_lock(&intf->base.event_lock);
+
+	vsync_refcount = adf_obj_find_event_refcount(&intf->base,
+			ADF_EVENT_VSYNC);
+	if (!vsync_refcount) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	prev_state = intf->dpms_state;
+	if (prev_state == state) {
+		ret = -EBUSY;
+		goto done;
+	}
+
+	disable_vsync = vsync_active(prev_state) &&
+			!vsync_active(state) &&
+			vsync_refcount->refcount;
+	enable_vsync = !vsync_active(prev_state) &&
+			vsync_active(state) &&
+			vsync_refcount->refcount;
+
+	if (disable_vsync)
+		intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+				false);
+
+	ret = intf->ops->blank(intf, state);
+	if (ret < 0) {
+		if (disable_vsync)
+			intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+					true);
+		goto done;
+	}
+
+	if (enable_vsync)
+		intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+				true);
+
+	intf->dpms_state = state;
+done:
+	mutex_unlock(&intf->base.event_lock);
+	mutex_unlock(&dev->client_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_blank);
+
+/**
+ * adf_interface_blank - get interface's current DPMS state
+ *
+ * @intf: the interface
+ *
+ * Returns one of %DRM_MODE_DPMS_*.
+ */
+u8 adf_interface_dpms_state(struct adf_interface *intf)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	u8 dpms_state;
+
+	mutex_lock(&dev->client_lock);
+	dpms_state = intf->dpms_state;
+	mutex_unlock(&dev->client_lock);
+
+	return dpms_state;
+}
+EXPORT_SYMBOL(adf_interface_dpms_state);
+
+/**
+ * adf_interface_current_mode - get interface's current display mode
+ *
+ * @intf: the interface
+ * @mode: returns the current mode
+ */
+void adf_interface_current_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo *mode)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+
+	mutex_lock(&dev->client_lock);
+	memcpy(mode, &intf->current_mode, sizeof(*mode));
+	mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_current_mode);
+
+/**
+ * adf_interface_modelist - get interface's modelist
+ *
+ * @intf: the interface
+ * @modelist: storage for the modelist (optional)
+ * @n_modes: length of @modelist
+ *
+ * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
+ * modelist entries into @modelist.
+ *
+ * Returns the length of the modelist.
+ */
+size_t adf_interface_modelist(struct adf_interface *intf,
+		struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+	unsigned long flags;
+	size_t retval;
+
+	read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+	if (modelist)
+		memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
+				min(n_modes, intf->n_modes));
+	retval = intf->n_modes;
+	read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+	return retval;
+}
+EXPORT_SYMBOL(adf_interface_modelist);
+
+/**
+ * adf_interface_set_mode - set interface's display mode
+ *
+ * @intf: the interface
+ * @mode: the new mode
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_set_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo *mode)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	int ret = 0;
+
+	if (!intf->ops || !intf->ops->modeset)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&dev->client_lock);
+	flush_kthread_worker(&dev->post_worker);
+
+	ret = intf->ops->modeset(intf, mode);
+	if (ret < 0)
+		goto done;
+
+	memcpy(&intf->current_mode, mode, sizeof(*mode));
+done:
+	mutex_unlock(&dev->client_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_set_mode);
+
+/**
+ * adf_interface_screen_size - get size of screen connected to interface
+ *
+ * @intf: the interface
+ * @width_mm: returns the screen width in mm
+ * @height_mm: returns the screen width in mm
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm,
+		u16 *height_mm)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	int ret;
+
+	if (!intf->ops || !intf->ops->screen_size)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&dev->client_lock);
+	ret = intf->ops->screen_size(intf, width_mm, height_mm);
+	mutex_unlock(&dev->client_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_get_screen_size);
+
+/**
+ * adf_overlay_engine_supports_format - returns whether a format is in an
+ * overlay engine's supported list
+ *
+ * @eng: the overlay engine
+ * @format: format fourcc
+ */
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+		u32 format)
+{
+	size_t i;
+	for (i = 0; i < eng->ops->n_supported_formats; i++)
+		if (format == eng->ops->supported_formats[i])
+			return true;
+
+	return false;
+}
+EXPORT_SYMBOL(adf_overlay_engine_supports_format);
+
+static int adf_buffer_validate(struct adf_buffer *buf)
+{
+	struct adf_overlay_engine *eng = buf->overlay_engine;
+	struct device *dev = &eng->base.dev;
+	struct adf_device *parent = adf_overlay_engine_parent(eng);
+	u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i;
+
+	if (!adf_overlay_engine_supports_format(eng, buf->format)) {
+		char format_str[ADF_FORMAT_STR_SIZE];
+		adf_format_str(buf->format, format_str);
+		dev_err(dev, "unsupported format %s\n", format_str);
+		return -EINVAL;
+	}
+
+	if (!adf_format_is_standard(buf->format))
+		return parent->ops->validate_custom_format(parent, buf);
+
+	hsub = adf_format_horz_chroma_subsampling(buf->format);
+	vsub = adf_format_vert_chroma_subsampling(buf->format);
+	num_planes = adf_format_num_planes(buf->format);
+	for (i = 0; i < num_planes; i++)
+		cpp[i] = adf_format_plane_cpp(buf->format, i);
+
+	return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub,
+			cpp);
+}
+
+static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
+		struct adf_buffer_mapping *mapping)
+{
+	int ret = 0;
+	size_t i;
+
+	for (i = 0; i < buf->n_planes; i++) {
+		struct dma_buf_attachment *attachment;
+		struct sg_table *sg_table;
+
+		attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
+		if (IS_ERR(attachment)) {
+			ret = PTR_ERR(attachment);
+			dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n",
+					i, ret);
+			goto done;
+		}
+		mapping->attachments[i] = attachment;
+
+		sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
+		if (IS_ERR(sg_table)) {
+			ret = PTR_ERR(sg_table);
+			dev_err(&dev->base.dev, "mapping plane %zu failed: %d",
+					i, ret);
+			goto done;
+		} else if (!sg_table) {
+			ret = -ENOMEM;
+			dev_err(&dev->base.dev, "mapping plane %zu failed\n",
+					i);
+			goto done;
+		}
+		mapping->sg_tables[i] = sg_table;
+	}
+
+done:
+	if (ret < 0)
+		adf_buffer_mapping_cleanup(mapping, buf);
+
+	return ret;
+}
+
+static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev)
+{
+	struct sync_pt *pt;
+	struct sync_fence *complete_fence;
+
+	if (!dev->timeline) {
+		dev->timeline = sw_sync_timeline_create(dev->base.name);
+		if (!dev->timeline)
+			return ERR_PTR(-ENOMEM);
+		dev->timeline_max = 1;
+	}
+
+	dev->timeline_max++;
+	pt = sw_sync_pt_create(dev->timeline, dev->timeline_max);
+	if (!pt)
+		goto err_pt_create;
+	complete_fence = sync_fence_create(dev->base.name, pt);
+	if (!complete_fence)
+		goto err_fence_create;
+
+	return complete_fence;
+
+err_fence_create:
+	sync_pt_free(pt);
+err_pt_create:
+	dev->timeline_max--;
+	return ERR_PTR(-ENOSYS);
+}
+
+/**
+ * adf_device_post - flip to a new set of buffers
+ *
+ * @dev: device targeted by the flip
+ * @intfs: interfaces targeted by the flip
+ * @n_intfs: number of targeted interfaces
+ * @bufs: description of buffers displayed
+ * @n_bufs: number of buffers displayed
+ * @custom_data: driver-private data
+ * @custom_data_size: size of driver-private data
+ *
+ * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
+ * point to variables on the stack.  adf_device_post() also takes its own
+ * reference on each of the dma-bufs in @bufs.  The adf_device_post_nocopy()
+ * variant transfers ownership of these resources to ADF instead.
+ *
+ * On success, returns a sync fence which signals when the buffers are removed
+ * from the screen.  On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_device_post(struct adf_device *dev,
+		struct adf_interface **intfs, size_t n_intfs,
+		struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+		size_t custom_data_size)
+{
+	struct adf_interface **intfs_copy = NULL;
+	struct adf_buffer *bufs_copy = NULL;
+	void *custom_data_copy = NULL;
+	struct sync_fence *ret;
+	size_t i;
+
+	intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
+	if (!intfs_copy)
+		return ERR_PTR(-ENOMEM);
+
+	bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
+	if (!bufs_copy) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err_alloc;
+	}
+
+	custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
+	if (!custom_data_copy) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err_alloc;
+	}
+
+	for (i = 0; i < n_bufs; i++) {
+		size_t j;
+		for (j = 0; j < bufs[i].n_planes; j++)
+			get_dma_buf(bufs[i].dma_bufs[j]);
+	}
+
+	memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
+	memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
+	memcpy(custom_data_copy, custom_data, custom_data_size);
+
+	ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
+			n_bufs, custom_data_copy, custom_data_size);
+	if (IS_ERR(ret))
+		goto err_post;
+
+	return ret;
+
+err_post:
+	for (i = 0; i < n_bufs; i++) {
+		size_t j;
+		for (j = 0; j < bufs[i].n_planes; j++)
+			dma_buf_put(bufs[i].dma_bufs[j]);
+	}
+err_alloc:
+	kfree(custom_data_copy);
+	kfree(bufs_copy);
+	kfree(intfs_copy);
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_post);
+
+/**
+ * adf_device_post_nocopy - flip to a new set of buffers
+ *
+ * adf_device_post_nocopy() has the same behavior as adf_device_post(),
+ * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
+ * not take an extra reference on the dma-bufs in @bufs.
+ *
+ * @intfs, @bufs, and @custom_data must point to buffers allocated by
+ * kmalloc().  On success, ADF takes ownership of these buffers and the dma-bufs
+ * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
+ * On failure, adf_device_post_nocopy() does NOT take ownership of these
+ * buffers or the dma-bufs, and the caller must clean them up.
+ *
+ * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
+ * Clients may find the nocopy variant useful in limited cases, but most should
+ * call adf_device_post() instead.
+ */
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+		struct adf_interface **intfs, size_t n_intfs,
+		struct adf_buffer *bufs, size_t n_bufs,
+		void *custom_data, size_t custom_data_size)
+{
+	struct adf_pending_post *cfg;
+	struct adf_buffer_mapping *mappings;
+	struct sync_fence *ret;
+	size_t i;
+	int err;
+
+	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+	if (!cfg)
+		return ERR_PTR(-ENOMEM);
+
+	mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
+	if (!mappings) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err_alloc;
+	}
+
+	mutex_lock(&dev->client_lock);
+
+	for (i = 0; i < n_bufs; i++) {
+		err = adf_buffer_validate(&bufs[i]);
+		if (err < 0) {
+			ret = ERR_PTR(err);
+			goto err_buf;
+		}
+
+		err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
+		if (err < 0) {
+			ret = ERR_PTR(err);
+			goto err_buf;
+		}
+	}
+
+	INIT_LIST_HEAD(&cfg->head);
+	cfg->config.n_bufs = n_bufs;
+	cfg->config.bufs = bufs;
+	cfg->config.mappings = mappings;
+	cfg->config.custom_data = custom_data;
+	cfg->config.custom_data_size = custom_data_size;
+
+	err = dev->ops->validate(dev, &cfg->config, &cfg->state);
+	if (err < 0) {
+		ret = ERR_PTR(err);
+		goto err_buf;
+	}
+
+	mutex_lock(&dev->post_lock);
+
+	if (dev->ops->complete_fence)
+		ret = dev->ops->complete_fence(dev, &cfg->config,
+				cfg->state);
+	else
+		ret = adf_sw_complete_fence(dev);
+
+	if (IS_ERR(ret))
+		goto err_fence;
+
+	list_add_tail(&cfg->head, &dev->post_list);
+	queue_kthread_work(&dev->post_worker, &dev->post_work);
+	mutex_unlock(&dev->post_lock);
+	mutex_unlock(&dev->client_lock);
+	kfree(intfs);
+	return ret;
+
+err_fence:
+	mutex_unlock(&dev->post_lock);
+
+err_buf:
+	for (i = 0; i < n_bufs; i++)
+		adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);
+
+	mutex_unlock(&dev->client_lock);
+	kfree(mappings);
+
+err_alloc:
+	kfree(cfg);
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_post_nocopy);
+
+static void adf_attachment_list_to_array(struct adf_device *dev,
+		struct list_head *src, struct adf_attachment *dst, size_t size)
+{
+	struct adf_attachment_list *entry;
+	size_t i = 0;
+
+	if (!dst)
+		return;
+
+	list_for_each_entry(entry, src, head) {
+		if (i == size)
+			return;
+		dst[i] = entry->attachment;
+		i++;
+	}
+}
+
+/**
+ * adf_device_attachments - get device's list of active attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the active attachment list.
+ */
+size_t adf_device_attachments(struct adf_device *dev,
+		struct adf_attachment *attachments, size_t n_attachments)
+{
+	size_t retval;
+
+	mutex_lock(&dev->client_lock);
+	adf_attachment_list_to_array(dev, &dev->attached, attachments,
+			n_attachments);
+	retval = dev->n_attached;
+	mutex_unlock(&dev->client_lock);
+
+	return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments);
+
+/**
+ * adf_device_attachments_allowed - get device's list of allowed attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the allowed attachment list.
+ */
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+		struct adf_attachment *attachments, size_t n_attachments)
+{
+	size_t retval;
+
+	mutex_lock(&dev->client_lock);
+	adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments,
+			n_attachments);
+	retval = dev->n_attach_allowed;
+	mutex_unlock(&dev->client_lock);
+
+	return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments_allowed);
+
+/**
+ * adf_device_attached - return whether an overlay engine and interface are
+ * attached
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf)
+{
+	struct adf_attachment_list *attachment;
+
+	mutex_lock(&dev->client_lock);
+	attachment = adf_attachment_find(&dev->attached, eng, intf);
+	mutex_unlock(&dev->client_lock);
+
+	return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attached);
+
+/**
+ * adf_device_attach_allowed - return whether the ADF device supports attaching
+ * an overlay engine and interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attach_allowed(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	struct adf_attachment_list *attachment;
+
+	mutex_lock(&dev->client_lock);
+	attachment = adf_attachment_find(&dev->attach_allowed, eng, intf);
+	mutex_unlock(&dev->client_lock);
+
+	return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attach_allowed);
+/**
+ * adf_device_attach - attach an overlay engine to an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
+ * -%EALREADY if @intf and @eng are already attached, or -errno on any other
+ * failure.
+ */
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf)
+{
+	int ret;
+	struct adf_attachment_list *attachment = NULL;
+
+	ret = adf_attachment_validate(dev, eng, intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->client_lock);
+
+	if (dev->n_attached == ADF_MAX_ATTACHMENTS) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (adf_attachment_find(&dev->attached, eng, intf)) {
+		ret = -EALREADY;
+		goto done;
+	}
+
+	ret = adf_device_attach_op(dev, eng, intf);
+	if (ret < 0)
+		goto done;
+
+	attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+	if (!attachment) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	attachment->attachment.interface = intf;
+	attachment->attachment.overlay_engine = eng;
+	list_add_tail(&attachment->head, &dev->attached);
+	dev->n_attached++;
+
+done:
+	mutex_unlock(&dev->client_lock);
+	if (ret < 0)
+		kfree(attachment);
+
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_attach);
+
+/**
+ * adf_device_detach - detach an overlay engine from an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
+ * or -errno on any other failure.
+ */
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf)
+{
+	int ret;
+	struct adf_attachment_list *attachment;
+
+	ret = adf_attachment_validate(dev, eng, intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->client_lock);
+
+	attachment = adf_attachment_find(&dev->attached, eng, intf);
+	if (!attachment) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = adf_device_detach_op(dev, eng, intf);
+	if (ret < 0)
+		goto done;
+
+	adf_attachment_free(attachment);
+	dev->n_attached--;
+done:
+	mutex_unlock(&dev->client_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_detach);
+
+/**
+ * adf_interface_simple_buffer_alloc - allocate a simple buffer
+ *
+ * @intf: target interface
+ * @w: width in pixels
+ * @h: height in pixels
+ * @format: format fourcc
+ * @dma_buf: returns the allocated buffer
+ * @offset: returns the byte offset of the allocated buffer's first pixel
+ * @pitch: returns the allocated buffer's pitch
+ *
+ * See &struct adf_simple_buffer_alloc for a description of simple buffers and
+ * their limitations.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+		u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
+{
+	if (!intf->ops || !intf->ops->alloc_simple_buffer)
+		return -EOPNOTSUPP;
+
+	if (!adf_format_is_rgb(format))
+		return -EINVAL;
+
+	return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf,
+			offset, pitch);
+}
+EXPORT_SYMBOL(adf_interface_simple_buffer_alloc);
+
+/**
+ * adf_interface_simple_post - flip to a single buffer
+ *
+ * @intf: interface targeted by the flip
+ * @buf: buffer to display
+ *
+ * adf_interface_simple_post() can be used generically for simple display
+ * configurations, since the client does not need to provide any driver-private
+ * configuration data.
+ *
+ * adf_interface_simple_post() has the same copying semantics as
+ * adf_device_post().
+ *
+ * On success, returns a sync fence which signals when the buffer is removed
+ * from the screen.  On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+		struct adf_buffer *buf)
+{
+	size_t custom_data_size = 0;
+	void *custom_data = NULL;
+	struct sync_fence *ret;
+
+	if (intf->ops && intf->ops->describe_simple_post) {
+		int err;
+
+		custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+		if (!custom_data) {
+			ret = ERR_PTR(-ENOMEM);
+			goto done;
+		}
+
+		err = intf->ops->describe_simple_post(intf, buf, custom_data,
+				&custom_data_size);
+		if (err < 0) {
+			ret = ERR_PTR(err);
+			goto done;
+		}
+	}
+
+	ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1,
+			custom_data, custom_data_size);
+done:
+	kfree(custom_data);
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_simple_post);
diff --git a/drivers/video/adf/adf_fbdev.c b/drivers/video/adf/adf_fbdev.c
new file mode 100644
index 0000000..a5b53bc
--- /dev/null
+++ b/drivers/video/adf/adf_fbdev.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_fbdev.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+struct adf_fbdev_format {
+	u32 fourcc;
+	u32 bpp;
+	u32 r_length;
+	u32 g_length;
+	u32 b_length;
+	u32 a_length;
+	u32 r_offset;
+	u32 g_offset;
+	u32 b_offset;
+	u32 a_offset;
+};
+
+static const struct adf_fbdev_format format_table[] = {
+	{DRM_FORMAT_RGB332, 8, 3, 3, 2, 0, 5, 2, 0, 0},
+	{DRM_FORMAT_BGR233, 8, 3, 3, 2, 0, 0, 3, 5, 0},
+
+	{DRM_FORMAT_XRGB4444, 16, 4, 4, 4, 0, 8, 4, 0, 0},
+	{DRM_FORMAT_XBGR4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+	{DRM_FORMAT_RGBX4444, 16, 4, 4, 4, 0, 12, 8, 4, 0},
+	{DRM_FORMAT_BGRX4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+
+	{DRM_FORMAT_ARGB4444, 16, 4, 4, 4, 4, 8, 4, 0, 12},
+	{DRM_FORMAT_ABGR4444, 16, 4, 4, 4, 4, 0, 4, 8, 12},
+	{DRM_FORMAT_RGBA4444, 16, 4, 4, 4, 4, 12, 8, 4, 0},
+	{DRM_FORMAT_BGRA4444, 16, 4, 4, 4, 4, 0, 4, 8, 0},
+
+	{DRM_FORMAT_XRGB1555, 16, 5, 5, 5, 0, 10, 5, 0, 0},
+	{DRM_FORMAT_XBGR1555, 16, 5, 5, 5, 0, 0, 5, 10, 0},
+	{DRM_FORMAT_RGBX5551, 16, 5, 5, 5, 0, 11, 6, 1, 0},
+	{DRM_FORMAT_BGRX5551, 16, 5, 5, 5, 0, 1, 6, 11, 0},
+
+	{DRM_FORMAT_ARGB1555, 16, 5, 5, 5, 1, 10, 5, 0, 15},
+	{DRM_FORMAT_ABGR1555, 16, 5, 5, 5, 1, 0, 5, 10, 15},
+	{DRM_FORMAT_RGBA5551, 16, 5, 5, 5, 1, 11, 6, 1, 0},
+	{DRM_FORMAT_BGRA5551, 16, 5, 5, 5, 1, 1, 6, 11, 0},
+
+	{DRM_FORMAT_RGB565, 16, 5, 6, 5, 0, 11, 5, 0, 0},
+	{DRM_FORMAT_BGR565, 16, 5, 6, 5, 0, 0, 5, 11, 0},
+
+	{DRM_FORMAT_RGB888, 24, 8, 8, 8, 0, 16, 8, 0, 0},
+	{DRM_FORMAT_BGR888, 24, 8, 8, 8, 0, 0, 8, 16, 0},
+
+	{DRM_FORMAT_XRGB8888, 32, 8, 8, 8, 0, 16, 8, 0, 0},
+	{DRM_FORMAT_XBGR8888, 32, 8, 8, 8, 0, 0, 8, 16, 0},
+	{DRM_FORMAT_RGBX8888, 32, 8, 8, 8, 0, 24, 16, 8, 0},
+	{DRM_FORMAT_BGRX8888, 32, 8, 8, 8, 0, 8, 16, 24, 0},
+
+	{DRM_FORMAT_ARGB8888, 32, 8, 8, 8, 8, 16, 8, 0, 24},
+	{DRM_FORMAT_ABGR8888, 32, 8, 8, 8, 8, 0, 8, 16, 24},
+	{DRM_FORMAT_RGBA8888, 32, 8, 8, 8, 8, 24, 16, 8, 0},
+	{DRM_FORMAT_BGRA8888, 32, 8, 8, 8, 8, 8, 16, 24, 0},
+
+	{DRM_FORMAT_XRGB2101010, 32, 10, 10, 10, 0, 20, 10, 0, 0},
+	{DRM_FORMAT_XBGR2101010, 32, 10, 10, 10, 0, 0, 10, 20, 0},
+	{DRM_FORMAT_RGBX1010102, 32, 10, 10, 10, 0, 22, 12, 2, 0},
+	{DRM_FORMAT_BGRX1010102, 32, 10, 10, 10, 0, 2, 12, 22, 0},
+
+	{DRM_FORMAT_ARGB2101010, 32, 10, 10, 10, 2, 20, 10, 0, 30},
+	{DRM_FORMAT_ABGR2101010, 32, 10, 10, 10, 2, 0, 10, 20, 30},
+	{DRM_FORMAT_RGBA1010102, 32, 10, 10, 10, 2, 22, 12, 2, 0},
+	{DRM_FORMAT_BGRA1010102, 32, 10, 10, 10, 2, 2, 12, 22, 0},
+};
+
+static u32 drm_fourcc_from_fb_var(struct fb_var_screeninfo *var)
+{
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+		const struct adf_fbdev_format *f = &format_table[i];
+		if (var->red.length == f->r_length &&
+			var->red.offset == f->r_offset &&
+			var->green.length == f->g_length &&
+			var->green.offset == f->g_offset &&
+			var->blue.length == f->b_length &&
+			var->blue.offset == f->b_offset &&
+			var->transp.length == f->a_length &&
+			(var->transp.length == 0 ||
+					var->transp.offset == f->a_offset))
+			return f->fourcc;
+	}
+
+	return 0;
+}
+
+static const struct adf_fbdev_format *fbdev_format_info(u32 format)
+{
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+		const struct adf_fbdev_format *f = &format_table[i];
+		if (f->fourcc == format)
+			return f;
+	}
+
+	BUG();
+}
+
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+		struct fb_videomode *vmode)
+{
+	memset(vmode, 0, sizeof(*vmode));
+
+	vmode->refresh = mode->vrefresh;
+
+	vmode->xres = mode->hdisplay;
+	vmode->yres = mode->vdisplay;
+
+	vmode->pixclock = mode->clock ? KHZ2PICOS(mode->clock) : 0;
+	vmode->left_margin = mode->htotal - mode->hsync_end;
+	vmode->right_margin = mode->hsync_start - mode->hdisplay;
+	vmode->upper_margin = mode->vtotal - mode->vsync_end;
+	vmode->lower_margin = mode->vsync_start - mode->vdisplay;
+	vmode->hsync_len = mode->hsync_end - mode->hsync_start;
+	vmode->vsync_len = mode->vsync_end - mode->vsync_start;
+
+	vmode->sync = 0;
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
+	if (mode->flags & DRM_MODE_FLAG_PCSYNC)
+		vmode->sync |= FB_SYNC_COMP_HIGH_ACT;
+	if (mode->flags & DRM_MODE_FLAG_BCAST)
+		vmode->sync |= FB_SYNC_BROADCAST;
+
+	vmode->vmode = 0;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		vmode->vmode |= FB_VMODE_INTERLACED;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		vmode->vmode |= FB_VMODE_DOUBLE;
+}
+EXPORT_SYMBOL(adf_modeinfo_to_fb_videomode);
+
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+		struct drm_mode_modeinfo *mode)
+{
+	memset(mode, 0, sizeof(*mode));
+
+	mode->hdisplay = vmode->xres;
+	mode->hsync_start = mode->hdisplay + vmode->right_margin;
+	mode->hsync_end = mode->hsync_start + vmode->hsync_len;
+	mode->htotal = mode->hsync_end + vmode->left_margin;
+
+	mode->vdisplay = vmode->yres;
+	mode->vsync_start = mode->vdisplay + vmode->lower_margin;
+	mode->vsync_end = mode->vsync_start + vmode->vsync_len;
+	mode->vtotal = mode->vsync_end + vmode->upper_margin;
+
+	mode->clock = vmode->pixclock ? PICOS2KHZ(vmode->pixclock) : 0;
+
+	mode->flags = 0;
+	if (vmode->sync & FB_SYNC_HOR_HIGH_ACT)
+		mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	if (vmode->sync & FB_SYNC_VERT_HIGH_ACT)
+		mode->flags |= DRM_MODE_FLAG_PVSYNC;
+	if (vmode->sync & FB_SYNC_COMP_HIGH_ACT)
+		mode->flags |= DRM_MODE_FLAG_PCSYNC;
+	if (vmode->sync & FB_SYNC_BROADCAST)
+		mode->flags |= DRM_MODE_FLAG_BCAST;
+	if (vmode->vmode & FB_VMODE_INTERLACED)
+		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+	if (vmode->vmode & FB_VMODE_DOUBLE)
+		mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+	if (vmode->refresh)
+		mode->vrefresh = vmode->refresh;
+	else
+		adf_modeinfo_set_vrefresh(mode);
+
+	if (vmode->name)
+		strlcpy(mode->name, vmode->name, sizeof(mode->name));
+	else
+		adf_modeinfo_set_name(mode);
+}
+EXPORT_SYMBOL(adf_modeinfo_from_fb_videomode);
+
+static int adf_fbdev_post(struct adf_fbdev *fbdev)
+{
+	struct adf_buffer buf;
+	struct sync_fence *complete_fence;
+	int ret = 0;
+
+	memset(&buf, 0, sizeof(buf));
+	buf.overlay_engine = fbdev->eng;
+	buf.w = fbdev->info->var.xres;
+	buf.h = fbdev->info->var.yres;
+	buf.format = fbdev->format;
+	buf.dma_bufs[0] = fbdev->dma_buf;
+	buf.offset[0] = fbdev->offset +
+			fbdev->info->var.yoffset * fbdev->pitch +
+			fbdev->info->var.xoffset *
+			(fbdev->info->var.bits_per_pixel / 8);
+	buf.pitch[0] = fbdev->pitch;
+	buf.n_planes = 1;
+
+	complete_fence = adf_interface_simple_post(fbdev->intf, &buf);
+	if (IS_ERR(complete_fence)) {
+		ret = PTR_ERR(complete_fence);
+		goto done;
+	}
+
+	sync_fence_put(complete_fence);
+done:
+	return ret;
+}
+
+static const u16 vga_palette[][3] = {
+	{0x0000, 0x0000, 0x0000},
+	{0x0000, 0x0000, 0xAAAA},
+	{0x0000, 0xAAAA, 0x0000},
+	{0x0000, 0xAAAA, 0xAAAA},
+	{0xAAAA, 0x0000, 0x0000},
+	{0xAAAA, 0x0000, 0xAAAA},
+	{0xAAAA, 0x5555, 0x0000},
+	{0xAAAA, 0xAAAA, 0xAAAA},
+	{0x5555, 0x5555, 0x5555},
+	{0x5555, 0x5555, 0xFFFF},
+	{0x5555, 0xFFFF, 0x5555},
+	{0x5555, 0xFFFF, 0xFFFF},
+	{0xFFFF, 0x5555, 0x5555},
+	{0xFFFF, 0x5555, 0xFFFF},
+	{0xFFFF, 0xFFFF, 0x5555},
+	{0xFFFF, 0xFFFF, 0xFFFF},
+};
+
+static int adf_fb_alloc(struct adf_fbdev *fbdev)
+{
+	int ret;
+
+	ret = adf_interface_simple_buffer_alloc(fbdev->intf,
+			fbdev->default_xres_virtual,
+			fbdev->default_yres_virtual,
+			fbdev->default_format,
+			&fbdev->dma_buf, &fbdev->offset, &fbdev->pitch);
+	if (ret < 0) {
+		dev_err(fbdev->info->dev, "allocating fb failed: %d\n", ret);
+		return ret;
+	}
+
+	fbdev->vaddr = dma_buf_vmap(fbdev->dma_buf);
+	if (!fbdev->vaddr) {
+		ret = -ENOMEM;
+		dev_err(fbdev->info->dev, "vmapping fb failed\n");
+		goto err_vmap;
+	}
+	fbdev->info->fix.line_length = fbdev->pitch;
+	fbdev->info->var.xres_virtual = fbdev->default_xres_virtual;
+	fbdev->info->var.yres_virtual = fbdev->default_yres_virtual;
+	fbdev->info->fix.smem_len = fbdev->dma_buf->size;
+	fbdev->info->screen_base = fbdev->vaddr;
+
+	return 0;
+
+err_vmap:
+	dma_buf_put(fbdev->dma_buf);
+	return ret;
+}
+
+static void adf_fb_destroy(struct adf_fbdev *fbdev)
+{
+	dma_buf_vunmap(fbdev->dma_buf, fbdev->vaddr);
+	dma_buf_put(fbdev->dma_buf);
+}
+
+static void adf_fbdev_set_format(struct adf_fbdev *fbdev, u32 format)
+{
+	size_t i;
+	const struct adf_fbdev_format *info = fbdev_format_info(format);
+	for (i = 0; i < ARRAY_SIZE(vga_palette); i++) {
+		u16 r = vga_palette[i][0];
+		u16 g = vga_palette[i][1];
+		u16 b = vga_palette[i][2];
+
+		r >>= (16 - info->r_length);
+		g >>= (16 - info->g_length);
+		b >>= (16 - info->b_length);
+
+		fbdev->pseudo_palette[i] =
+			(r << info->r_offset) |
+			(g << info->g_offset) |
+			(b << info->b_offset);
+
+		if (info->a_length) {
+			u16 a = BIT(info->a_length) - 1;
+			fbdev->pseudo_palette[i] |= (a << info->a_offset);
+		}
+	}
+
+	fbdev->info->var.bits_per_pixel = adf_format_bpp(format);
+	fbdev->info->var.red.length = info->r_length;
+	fbdev->info->var.red.offset = info->r_offset;
+	fbdev->info->var.green.length = info->g_length;
+	fbdev->info->var.green.offset = info->g_offset;
+	fbdev->info->var.blue.length = info->b_length;
+	fbdev->info->var.blue.offset = info->b_offset;
+	fbdev->info->var.transp.length = info->a_length;
+	fbdev->info->var.transp.offset = info->a_offset;
+	fbdev->format = format;
+}
+
+static void adf_fbdev_fill_modelist(struct adf_fbdev *fbdev)
+{
+	struct drm_mode_modeinfo *modelist;
+	struct fb_videomode fbmode;
+	size_t n_modes, i;
+	int ret = 0;
+
+	n_modes = adf_interface_modelist(fbdev->intf, NULL, 0);
+	modelist = kzalloc(sizeof(modelist[0]) * n_modes, GFP_KERNEL);
+	if (!modelist) {
+		dev_warn(fbdev->info->dev, "allocating new modelist failed; keeping old modelist\n");
+		return;
+	}
+	adf_interface_modelist(fbdev->intf, modelist, n_modes);
+
+	fb_destroy_modelist(&fbdev->info->modelist);
+
+	for (i = 0; i < n_modes; i++) {
+		adf_modeinfo_to_fb_videomode(&modelist[i], &fbmode);
+		ret = fb_add_videomode(&fbmode, &fbdev->info->modelist);
+		if (ret < 0)
+			dev_warn(fbdev->info->dev, "adding mode %s to modelist failed: %d\n",
+					modelist[i].name, ret);
+	}
+
+	kfree(modelist);
+}
+
+/**
+ * adf_fbdev_open - default implementation of fbdev open op
+ */
+int adf_fbdev_open(struct fb_info *info, int user)
+{
+	struct adf_fbdev *fbdev = info->par;
+	int ret;
+
+	mutex_lock(&fbdev->refcount_lock);
+
+	if (unlikely(fbdev->refcount == UINT_MAX)) {
+		ret = -EMFILE;
+		goto done;
+	}
+
+	if (!fbdev->refcount) {
+		struct drm_mode_modeinfo mode;
+		struct fb_videomode fbmode;
+		struct adf_device *dev = adf_interface_parent(fbdev->intf);
+
+		ret = adf_device_attach(dev, fbdev->eng, fbdev->intf);
+		if (ret < 0 && ret != -EALREADY)
+			goto done;
+
+		ret = adf_fb_alloc(fbdev);
+		if (ret < 0)
+			goto done;
+
+		adf_interface_current_mode(fbdev->intf, &mode);
+		adf_modeinfo_to_fb_videomode(&mode, &fbmode);
+		fb_videomode_to_var(&fbdev->info->var, &fbmode);
+
+		adf_fbdev_set_format(fbdev, fbdev->default_format);
+		adf_fbdev_fill_modelist(fbdev);
+	}
+
+	ret = adf_fbdev_post(fbdev);
+	if (ret < 0) {
+		if (!fbdev->refcount)
+			adf_fb_destroy(fbdev);
+		goto done;
+	}
+
+	fbdev->refcount++;
+done:
+	mutex_unlock(&fbdev->refcount_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_fbdev_open);
+
+/**
+ * adf_fbdev_release - default implementation of fbdev release op
+ */
+int adf_fbdev_release(struct fb_info *info, int user)
+{
+	struct adf_fbdev *fbdev = info->par;
+	mutex_lock(&fbdev->refcount_lock);
+	BUG_ON(!fbdev->refcount);
+	fbdev->refcount--;
+	if (!fbdev->refcount)
+		adf_fb_destroy(fbdev);
+	mutex_unlock(&fbdev->refcount_lock);
+	return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_release);
+
+/**
+ * adf_fbdev_check_var - default implementation of fbdev check_var op
+ */
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	struct adf_fbdev *fbdev = info->par;
+	bool valid_format = true;
+	u32 format = drm_fourcc_from_fb_var(var);
+	u32 pitch = var->xres_virtual * var->bits_per_pixel / 8;
+
+	if (!format) {
+		dev_dbg(info->dev, "%s: unrecognized format\n", __func__);
+		valid_format = false;
+	}
+
+	if (valid_format && var->grayscale) {
+		dev_dbg(info->dev, "%s: grayscale modes not supported\n",
+				__func__);
+		valid_format = false;
+	}
+
+	if (valid_format && var->nonstd) {
+		dev_dbg(info->dev, "%s: nonstandard formats not supported\n",
+				__func__);
+		valid_format = false;
+	}
+
+	if (valid_format && !adf_overlay_engine_supports_format(fbdev->eng,
+			format)) {
+		char format_str[ADF_FORMAT_STR_SIZE];
+		adf_format_str(format, format_str);
+		dev_dbg(info->dev, "%s: format %s not supported by overlay engine %s\n",
+				__func__, format_str, fbdev->eng->base.name);
+		valid_format = false;
+	}
+
+	if (valid_format && pitch > fbdev->pitch) {
+		dev_dbg(info->dev, "%s: fb pitch too small for var (pitch = %u, xres_virtual = %u, bits_per_pixel = %u)\n",
+				__func__, fbdev->pitch, var->xres_virtual,
+				var->bits_per_pixel);
+		valid_format = false;
+	}
+
+	if (valid_format && var->yres_virtual > fbdev->default_yres_virtual) {
+		dev_dbg(info->dev, "%s: fb height too small for var (h = %u, yres_virtual = %u)\n",
+				__func__, fbdev->default_yres_virtual,
+				var->yres_virtual);
+		valid_format = false;
+	}
+
+	if (valid_format) {
+		var->activate = info->var.activate;
+		var->height = info->var.height;
+		var->width = info->var.width;
+		var->accel_flags = info->var.accel_flags;
+		var->rotate = info->var.rotate;
+		var->colorspace = info->var.colorspace;
+		/* userspace can't change these */
+	} else {
+		/* if any part of the format is invalid then fixing it up is
+		   impractical, so save just the modesetting bits and
+		   overwrite everything else */
+		struct fb_videomode mode;
+		fb_var_to_videomode(&mode, var);
+		memcpy(var, &info->var, sizeof(*var));
+		fb_videomode_to_var(var, &mode);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_check_var);
+
+/**
+ * adf_fbdev_set_par - default implementation of fbdev set_par op
+ */
+int adf_fbdev_set_par(struct fb_info *info)
+{
+	struct adf_fbdev *fbdev = info->par;
+	struct adf_interface *intf = fbdev->intf;
+	struct fb_videomode vmode;
+	struct drm_mode_modeinfo mode;
+	int ret;
+	u32 format = drm_fourcc_from_fb_var(&info->var);
+
+	fb_var_to_videomode(&vmode, &info->var);
+	adf_modeinfo_from_fb_videomode(&vmode, &mode);
+	ret = adf_interface_set_mode(intf, &mode);
+	if (ret < 0)
+		return ret;
+
+	ret = adf_fbdev_post(fbdev);
+	if (ret < 0)
+		return ret;
+
+	if (format != fbdev->format)
+		adf_fbdev_set_format(fbdev, format);
+
+	return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_set_par);
+
+/**
+ * adf_fbdev_blank - default implementation of fbdev blank op
+ */
+int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+	struct adf_fbdev *fbdev = info->par;
+	struct adf_interface *intf = fbdev->intf;
+	u8 dpms_state;
+
+	switch (blank) {
+	case FB_BLANK_UNBLANK:
+		dpms_state = DRM_MODE_DPMS_ON;
+		break;
+	case FB_BLANK_NORMAL:
+		dpms_state = DRM_MODE_DPMS_STANDBY;
+		break;
+	case FB_BLANK_VSYNC_SUSPEND:
+		dpms_state = DRM_MODE_DPMS_SUSPEND;
+		break;
+	case FB_BLANK_HSYNC_SUSPEND:
+		dpms_state = DRM_MODE_DPMS_STANDBY;
+		break;
+	case FB_BLANK_POWERDOWN:
+		dpms_state = DRM_MODE_DPMS_OFF;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return adf_interface_blank(intf, dpms_state);
+}
+EXPORT_SYMBOL(adf_fbdev_blank);
+
+/**
+ * adf_fbdev_pan_display - default implementation of fbdev pan_display op
+ */
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	struct adf_fbdev *fbdev = info->par;
+	return adf_fbdev_post(fbdev);
+}
+EXPORT_SYMBOL(adf_fbdev_pan_display);
+
+/**
+ * adf_fbdev_mmap - default implementation of fbdev mmap op
+ */
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	struct adf_fbdev *fbdev = info->par;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	return dma_buf_mmap(fbdev->dma_buf, vma, 0);
+}
+EXPORT_SYMBOL(adf_fbdev_mmap);
+
+/**
+ * adf_fbdev_init - initialize helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ * @interface: the ADF interface that will display the framebuffer
+ * @eng: the ADF overlay engine that will scan out the framebuffer
+ * @xres_virtual: the virtual width of the framebuffer
+ * @yres_virtual: the virtual height of the framebuffer
+ * @format: the format of the framebuffer
+ * @fbops: the device's fbdev ops
+ * @fmt: formatting for the framebuffer identification string
+ * @...: variable arguments
+ *
+ * @format must be a standard, non-indexed RGB format, i.e.,
+ * adf_format_is_rgb(@format) && @format != @DRM_FORMAT_C8.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+		struct adf_overlay_engine *eng,
+		u16 xres_virtual, u16 yres_virtual, u32 format,
+		struct fb_ops *fbops, const char *fmt, ...)
+{
+	struct adf_device *parent = adf_interface_parent(interface);
+	struct device *dev = &parent->base.dev;
+	u16 width_mm, height_mm;
+	va_list args;
+	int ret;
+
+	if (!adf_format_is_rgb(format) ||
+			format == DRM_FORMAT_C8) {
+		dev_err(dev, "fbdev helper does not support format %u\n",
+				format);
+		return -EINVAL;
+	}
+
+	memset(fbdev, 0, sizeof(*fbdev));
+	fbdev->intf = interface;
+	fbdev->eng = eng;
+	fbdev->info = framebuffer_alloc(0, dev);
+	if (!fbdev->info) {
+		dev_err(dev, "allocating framebuffer device failed\n");
+		return -ENOMEM;
+	}
+	mutex_init(&fbdev->refcount_lock);
+	fbdev->default_xres_virtual = xres_virtual;
+	fbdev->default_yres_virtual = yres_virtual;
+	fbdev->default_format = format;
+
+	fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+	ret = adf_interface_get_screen_size(interface, &width_mm, &height_mm);
+	if (ret < 0) {
+		width_mm = 0;
+		height_mm = 0;
+	}
+	fbdev->info->var.width = width_mm;
+	fbdev->info->var.height = height_mm;
+	fbdev->info->var.activate = FB_ACTIVATE_VBL;
+	va_start(args, fmt);
+	vsnprintf(fbdev->info->fix.id, sizeof(fbdev->info->fix.id), fmt, args);
+	va_end(args);
+	fbdev->info->fix.type = FB_TYPE_PACKED_PIXELS;
+	fbdev->info->fix.visual = FB_VISUAL_TRUECOLOR;
+	fbdev->info->fix.xpanstep = 1;
+	fbdev->info->fix.ypanstep = 1;
+	INIT_LIST_HEAD(&fbdev->info->modelist);
+	fbdev->info->fbops = fbops;
+	fbdev->info->pseudo_palette = fbdev->pseudo_palette;
+	fbdev->info->par = fbdev;
+
+	ret = register_framebuffer(fbdev->info);
+	if (ret < 0) {
+		dev_err(dev, "registering framebuffer failed: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_init);
+
+/**
+ * adf_fbdev_destroy - destroy helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ */
+void adf_fbdev_destroy(struct adf_fbdev *fbdev)
+{
+	unregister_framebuffer(fbdev->info);
+	BUG_ON(fbdev->refcount);
+	mutex_destroy(&fbdev->refcount_lock);
+	framebuffer_release(fbdev->info);
+}
+EXPORT_SYMBOL(adf_fbdev_destroy);
diff --git a/drivers/video/adf/adf_fops.c b/drivers/video/adf/adf_fops.c
new file mode 100644
index 0000000..7fbf33e
--- /dev/null
+++ b/drivers/video/adf/adf_fops.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/circ_buf.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#ifdef CONFIG_COMPAT
+#include "adf_fops32.h"
+#endif
+
+static int adf_obj_set_event(struct adf_obj *obj, struct adf_file *file,
+		struct adf_set_event __user *arg)
+{
+	struct adf_set_event data;
+	bool enabled;
+	unsigned long flags;
+	int err;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	err = adf_obj_check_supports_event(obj, data.type);
+	if (err < 0)
+		return err;
+
+	spin_lock_irqsave(&obj->file_lock, flags);
+	if (data.enabled)
+		enabled = test_and_set_bit(data.type,
+				file->event_subscriptions);
+	else
+		enabled = test_and_clear_bit(data.type,
+				file->event_subscriptions);
+	spin_unlock_irqrestore(&obj->file_lock, flags);
+
+	if (data.enabled == enabled)
+		return -EALREADY;
+
+	if (data.enabled)
+		adf_event_get(obj, data.type);
+	else
+		adf_event_put(obj, data.type);
+
+	return 0;
+}
+
+static int adf_obj_copy_custom_data_to_user(struct adf_obj *obj,
+		void __user *dst, size_t *dst_size)
+{
+	void *custom_data;
+	size_t custom_data_size;
+	int ret;
+
+	if (!obj->ops || !obj->ops->custom_data) {
+		dev_dbg(&obj->dev, "%s: no custom_data op\n", __func__);
+		return 0;
+	}
+
+	custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+	if (!custom_data)
+		return -ENOMEM;
+
+	ret = obj->ops->custom_data(obj, custom_data, &custom_data_size);
+	if (ret < 0)
+		goto done;
+
+	if (copy_to_user(dst, custom_data, min(*dst_size, custom_data_size))) {
+		ret = -EFAULT;
+		goto done;
+	}
+	*dst_size = custom_data_size;
+
+done:
+	kfree(custom_data);
+	return ret;
+}
+
+static int adf_eng_get_data(struct adf_overlay_engine *eng,
+		struct adf_overlay_engine_data __user *arg)
+{
+	struct adf_device *dev = adf_overlay_engine_parent(eng);
+	struct adf_overlay_engine_data data;
+	size_t n_supported_formats;
+	u32 *supported_formats = NULL;
+	int ret = 0;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	strlcpy(data.name, eng->base.name, sizeof(data.name));
+
+	if (data.n_supported_formats > ADF_MAX_SUPPORTED_FORMATS)
+		return -EINVAL;
+
+	n_supported_formats = data.n_supported_formats;
+	data.n_supported_formats = eng->ops->n_supported_formats;
+
+	if (n_supported_formats) {
+		supported_formats = kzalloc(n_supported_formats *
+				sizeof(supported_formats[0]), GFP_KERNEL);
+		if (!supported_formats)
+			return -ENOMEM;
+	}
+
+	memcpy(supported_formats, eng->ops->supported_formats,
+			sizeof(u32) * min(n_supported_formats,
+					eng->ops->n_supported_formats));
+
+	mutex_lock(&dev->client_lock);
+	ret = adf_obj_copy_custom_data_to_user(&eng->base, arg->custom_data,
+			&data.custom_data_size);
+	mutex_unlock(&dev->client_lock);
+
+	if (ret < 0)
+		goto done;
+
+	if (copy_to_user(arg, &data, sizeof(data))) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+	if (supported_formats && copy_to_user(arg->supported_formats,
+			supported_formats,
+			n_supported_formats * sizeof(supported_formats[0])))
+		ret = -EFAULT;
+
+done:
+	kfree(supported_formats);
+	return ret;
+}
+
+static int adf_buffer_import(struct adf_device *dev,
+		struct adf_buffer_config __user *cfg, struct adf_buffer *buf)
+{
+	struct adf_buffer_config user_buf;
+	size_t i;
+	int ret = 0;
+
+	if (copy_from_user(&user_buf, cfg, sizeof(user_buf)))
+		return -EFAULT;
+
+	memset(buf, 0, sizeof(*buf));
+
+	if (user_buf.n_planes > ADF_MAX_PLANES) {
+		dev_err(&dev->base.dev, "invalid plane count %u\n",
+				user_buf.n_planes);
+		return -EINVAL;
+	}
+
+	buf->overlay_engine = idr_find(&dev->overlay_engines,
+			user_buf.overlay_engine);
+	if (!buf->overlay_engine) {
+		dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+				user_buf.overlay_engine);
+		return -ENOENT;
+	}
+
+	buf->w = user_buf.w;
+	buf->h = user_buf.h;
+	buf->format = user_buf.format;
+	for (i = 0; i < user_buf.n_planes; i++) {
+		buf->dma_bufs[i] = dma_buf_get(user_buf.fd[i]);
+		if (IS_ERR(buf->dma_bufs[i])) {
+			ret = PTR_ERR(buf->dma_bufs[i]);
+			dev_err(&dev->base.dev, "importing dma_buf fd %d failed: %d\n",
+					user_buf.fd[i], ret);
+			buf->dma_bufs[i] = NULL;
+			goto done;
+		}
+		buf->offset[i] = user_buf.offset[i];
+		buf->pitch[i] = user_buf.pitch[i];
+	}
+	buf->n_planes = user_buf.n_planes;
+
+	if (user_buf.acquire_fence >= 0) {
+		buf->acquire_fence = sync_fence_fdget(user_buf.acquire_fence);
+		if (!buf->acquire_fence) {
+			dev_err(&dev->base.dev, "getting fence fd %d failed\n",
+					user_buf.acquire_fence);
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+
+done:
+	if (ret < 0)
+		adf_buffer_cleanup(buf);
+	return ret;
+}
+
+static int adf_device_post_config(struct adf_device *dev,
+		struct adf_post_config __user *arg)
+{
+	struct sync_fence *complete_fence;
+	int complete_fence_fd;
+	struct adf_buffer *bufs = NULL;
+	struct adf_interface **intfs = NULL;
+	size_t n_intfs, n_bufs, i;
+	void *custom_data = NULL;
+	size_t custom_data_size;
+	int ret = 0;
+
+	complete_fence_fd = get_unused_fd();
+	if (complete_fence_fd < 0)
+		return complete_fence_fd;
+
+	if (get_user(n_intfs, &arg->n_interfaces)) {
+		ret = -EFAULT;
+		goto err_get_user;
+	}
+
+	if (n_intfs > ADF_MAX_INTERFACES) {
+		ret = -EINVAL;
+		goto err_get_user;
+	}
+
+	if (get_user(n_bufs, &arg->n_bufs)) {
+		ret = -EFAULT;
+		goto err_get_user;
+	}
+
+	if (n_bufs > ADF_MAX_BUFFERS) {
+		ret = -EINVAL;
+		goto err_get_user;
+	}
+
+	if (get_user(custom_data_size, &arg->custom_data_size)) {
+		ret = -EFAULT;
+		goto err_get_user;
+	}
+
+	if (custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) {
+		ret = -EINVAL;
+		goto err_get_user;
+	}
+
+	if (n_intfs) {
+		intfs = kmalloc(sizeof(intfs[0]) * n_intfs, GFP_KERNEL);
+		if (!intfs) {
+			ret = -ENOMEM;
+			goto err_get_user;
+		}
+	}
+
+	for (i = 0; i < n_intfs; i++) {
+		u32 intf_id;
+		if (get_user(intf_id, &arg->interfaces[i])) {
+			ret = -EFAULT;
+			goto err_get_user;
+		}
+
+		intfs[i] = idr_find(&dev->interfaces, intf_id);
+		if (!intfs[i]) {
+			ret = -EINVAL;
+			goto err_get_user;
+		}
+	}
+
+	if (n_bufs) {
+		bufs = kzalloc(sizeof(bufs[0]) * n_bufs, GFP_KERNEL);
+		if (!bufs) {
+			ret = -ENOMEM;
+			goto err_get_user;
+		}
+	}
+
+	for (i = 0; i < n_bufs; i++) {
+		ret = adf_buffer_import(dev, &arg->bufs[i], &bufs[i]);
+		if (ret < 0) {
+			memset(&bufs[i], 0, sizeof(bufs[i]));
+			goto err_import;
+		}
+	}
+
+	if (custom_data_size) {
+		custom_data = kzalloc(custom_data_size, GFP_KERNEL);
+		if (!custom_data) {
+			ret = -ENOMEM;
+			goto err_import;
+		}
+
+		if (copy_from_user(custom_data, arg->custom_data,
+				custom_data_size)) {
+			ret = -EFAULT;
+			goto err_import;
+		}
+	}
+
+	if (put_user(complete_fence_fd, &arg->complete_fence)) {
+		ret = -EFAULT;
+		goto err_import;
+	}
+
+	complete_fence = adf_device_post_nocopy(dev, intfs, n_intfs, bufs,
+			n_bufs, custom_data, custom_data_size);
+	if (IS_ERR(complete_fence)) {
+		ret = PTR_ERR(complete_fence);
+		goto err_import;
+	}
+
+	sync_fence_install(complete_fence, complete_fence_fd);
+	return 0;
+
+err_import:
+	for (i = 0; i < n_bufs; i++)
+		adf_buffer_cleanup(&bufs[i]);
+
+err_get_user:
+	kfree(custom_data);
+	kfree(bufs);
+	kfree(intfs);
+	put_unused_fd(complete_fence_fd);
+	return ret;
+}
+
+static int adf_intf_simple_post_config(struct adf_interface *intf,
+		struct adf_simple_post_config __user *arg)
+{
+	struct adf_device *dev = intf->base.parent;
+	struct sync_fence *complete_fence;
+	int complete_fence_fd;
+	struct adf_buffer buf;
+	int ret = 0;
+
+	complete_fence_fd = get_unused_fd();
+	if (complete_fence_fd < 0)
+		return complete_fence_fd;
+
+	ret = adf_buffer_import(dev, &arg->buf, &buf);
+	if (ret < 0)
+		goto err_import;
+
+	if (put_user(complete_fence_fd, &arg->complete_fence)) {
+		ret = -EFAULT;
+		goto err_put_user;
+	}
+
+	complete_fence = adf_interface_simple_post(intf, &buf);
+	if (IS_ERR(complete_fence)) {
+		ret = PTR_ERR(complete_fence);
+		goto err_put_user;
+	}
+
+	sync_fence_install(complete_fence, complete_fence_fd);
+	return 0;
+
+err_put_user:
+	adf_buffer_cleanup(&buf);
+err_import:
+	put_unused_fd(complete_fence_fd);
+	return ret;
+}
+
+static int adf_intf_simple_buffer_alloc(struct adf_interface *intf,
+		struct adf_simple_buffer_alloc __user *arg)
+{
+	struct adf_simple_buffer_alloc data;
+	struct dma_buf *dma_buf;
+	int ret = 0;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	data.fd = get_unused_fd_flags(O_CLOEXEC);
+	if (data.fd < 0)
+		return data.fd;
+
+	ret = adf_interface_simple_buffer_alloc(intf, data.w, data.h,
+			data.format, &dma_buf, &data.offset, &data.pitch);
+	if (ret < 0)
+		goto err_alloc;
+
+	if (copy_to_user(arg, &data, sizeof(*arg))) {
+		ret = -EFAULT;
+		goto err_copy;
+	}
+
+	fd_install(data.fd, dma_buf->file);
+	return 0;
+
+err_copy:
+	dma_buf_put(dma_buf);
+
+err_alloc:
+	put_unused_fd(data.fd);
+	return ret;
+}
+
+static int adf_copy_attachment_list_to_user(
+		struct adf_attachment_config __user *to, size_t n_to,
+		struct adf_attachment *from, size_t n_from)
+{
+	struct adf_attachment_config *temp;
+	size_t n = min(n_to, n_from);
+	size_t i;
+	int ret = 0;
+
+	if (!n)
+		return 0;
+
+	temp = kzalloc(n * sizeof(temp[0]), GFP_KERNEL);
+	if (!temp)
+		return -ENOMEM;
+
+	for (i = 0; i < n; i++) {
+		temp[i].interface = from[i].interface->base.id;
+		temp[i].overlay_engine = from[i].overlay_engine->base.id;
+	}
+
+	if (copy_to_user(to, temp, n * sizeof(to[0]))) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+done:
+	kfree(temp);
+	return ret;
+}
+
+static int adf_device_get_data(struct adf_device *dev,
+		struct adf_device_data __user *arg)
+{
+	struct adf_device_data data;
+	size_t n_attach;
+	struct adf_attachment *attach = NULL;
+	size_t n_allowed_attach;
+	struct adf_attachment *allowed_attach = NULL;
+	int ret = 0;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	if (data.n_attachments > ADF_MAX_ATTACHMENTS ||
+			data.n_allowed_attachments > ADF_MAX_ATTACHMENTS)
+		return -EINVAL;
+
+	strlcpy(data.name, dev->base.name, sizeof(data.name));
+
+	if (data.n_attachments) {
+		attach = kzalloc(data.n_attachments * sizeof(attach[0]),
+				GFP_KERNEL);
+		if (!attach)
+			return -ENOMEM;
+	}
+	n_attach = adf_device_attachments(dev, attach, data.n_attachments);
+
+	if (data.n_allowed_attachments) {
+		allowed_attach = kzalloc(data.n_allowed_attachments *
+				sizeof(allowed_attach[0]), GFP_KERNEL);
+		if (!allowed_attach) {
+			ret = -ENOMEM;
+			goto done;
+		}
+	}
+	n_allowed_attach = adf_device_attachments_allowed(dev, allowed_attach,
+			data.n_allowed_attachments);
+
+	mutex_lock(&dev->client_lock);
+	ret = adf_obj_copy_custom_data_to_user(&dev->base, arg->custom_data,
+			&data.custom_data_size);
+	mutex_unlock(&dev->client_lock);
+
+	if (ret < 0)
+		goto done;
+
+	ret = adf_copy_attachment_list_to_user(arg->attachments,
+			data.n_attachments, attach, n_attach);
+	if (ret < 0)
+		goto done;
+
+	ret = adf_copy_attachment_list_to_user(arg->allowed_attachments,
+			data.n_allowed_attachments, allowed_attach,
+			n_allowed_attach);
+	if (ret < 0)
+		goto done;
+
+	data.n_attachments = n_attach;
+	data.n_allowed_attachments = n_allowed_attach;
+
+	if (copy_to_user(arg, &data, sizeof(data)))
+		ret = -EFAULT;
+
+done:
+	kfree(allowed_attach);
+	kfree(attach);
+	return ret;
+}
+
+static int adf_device_handle_attachment(struct adf_device *dev,
+		struct adf_attachment_config __user *arg, bool attach)
+{
+	struct adf_attachment_config data;
+	struct adf_overlay_engine *eng;
+	struct adf_interface *intf;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	eng = idr_find(&dev->overlay_engines, data.overlay_engine);
+	if (!eng) {
+		dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+				data.overlay_engine);
+		return -EINVAL;
+	}
+
+	intf = idr_find(&dev->interfaces, data.interface);
+	if (!intf) {
+		dev_err(&dev->base.dev, "invalid interface id %u\n",
+				data.interface);
+		return -EINVAL;
+	}
+
+	if (attach)
+		return adf_device_attach(dev, eng, intf);
+	else
+		return adf_device_detach(dev, eng, intf);
+}
+
+static int adf_intf_set_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo __user *arg)
+{
+	struct drm_mode_modeinfo mode;
+
+	if (copy_from_user(&mode, arg, sizeof(mode)))
+		return -EFAULT;
+
+	return adf_interface_set_mode(intf, &mode);
+}
+
+static int adf_intf_get_data(struct adf_interface *intf,
+		struct adf_interface_data __user *arg)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	struct adf_interface_data data;
+	struct drm_mode_modeinfo *modelist;
+	size_t modelist_size;
+	int err;
+	int ret = 0;
+	unsigned long flags;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	strlcpy(data.name, intf->base.name, sizeof(data.name));
+
+	data.type = intf->type;
+	data.id = intf->idx;
+	data.flags = intf->flags;
+
+	err = adf_interface_get_screen_size(intf, &data.width_mm,
+			&data.height_mm);
+	if (err < 0) {
+		data.width_mm = 0;
+		data.height_mm = 0;
+	}
+
+	modelist = kmalloc(sizeof(modelist[0]) * ADF_MAX_MODES, GFP_KERNEL);
+	if (!modelist)
+		return -ENOMEM;
+
+	mutex_lock(&dev->client_lock);
+	read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+	data.hotplug_detect = intf->hotplug_detect;
+	modelist_size = min(data.n_available_modes, intf->n_modes) *
+			sizeof(intf->modelist[0]);
+	memcpy(modelist, intf->modelist, modelist_size);
+	data.n_available_modes = intf->n_modes;
+	read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+	if (copy_to_user(arg->available_modes, modelist, modelist_size)) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+	data.dpms_state = intf->dpms_state;
+	memcpy(&data.current_mode, &intf->current_mode,
+			sizeof(intf->current_mode));
+
+	ret = adf_obj_copy_custom_data_to_user(&intf->base, arg->custom_data,
+			&data.custom_data_size);
+done:
+	mutex_unlock(&dev->client_lock);
+	kfree(modelist);
+
+	if (ret < 0)
+		return ret;
+
+	if (copy_to_user(arg, &data, sizeof(data)))
+		ret = -EFAULT;
+
+	return ret;
+}
+
+static inline long adf_obj_custom_ioctl(struct adf_obj *obj, unsigned int cmd,
+		unsigned long arg)
+{
+	if (obj->ops && obj->ops->ioctl)
+		return obj->ops->ioctl(obj, cmd, arg);
+	return -ENOTTY;
+}
+
+static long adf_overlay_engine_ioctl(struct adf_overlay_engine *eng,
+		struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case ADF_SET_EVENT:
+		return adf_obj_set_event(&eng->base, file,
+				(struct adf_set_event __user *)arg);
+
+	case ADF_GET_OVERLAY_ENGINE_DATA:
+		return adf_eng_get_data(eng,
+			(struct adf_overlay_engine_data __user *)arg);
+
+	case ADF_BLANK:
+	case ADF_POST_CONFIG:
+	case ADF_SET_MODE:
+	case ADF_GET_DEVICE_DATA:
+	case ADF_GET_INTERFACE_DATA:
+	case ADF_SIMPLE_POST_CONFIG:
+	case ADF_SIMPLE_BUFFER_ALLOC:
+	case ADF_ATTACH:
+	case ADF_DETACH:
+		return -EINVAL;
+
+	default:
+		return adf_obj_custom_ioctl(&eng->base, cmd, arg);
+	}
+}
+
+static long adf_interface_ioctl(struct adf_interface *intf,
+		struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case ADF_SET_EVENT:
+		return adf_obj_set_event(&intf->base, file,
+				(struct adf_set_event __user *)arg);
+
+	case ADF_BLANK:
+		return adf_interface_blank(intf, arg);
+
+	case ADF_SET_MODE:
+		return adf_intf_set_mode(intf,
+				(struct drm_mode_modeinfo __user *)arg);
+
+	case ADF_GET_INTERFACE_DATA:
+		return adf_intf_get_data(intf,
+				(struct adf_interface_data __user *)arg);
+
+	case ADF_SIMPLE_POST_CONFIG:
+		return adf_intf_simple_post_config(intf,
+				(struct adf_simple_post_config __user *)arg);
+
+	case ADF_SIMPLE_BUFFER_ALLOC:
+		return adf_intf_simple_buffer_alloc(intf,
+				(struct adf_simple_buffer_alloc __user *)arg);
+
+	case ADF_POST_CONFIG:
+	case ADF_GET_DEVICE_DATA:
+	case ADF_GET_OVERLAY_ENGINE_DATA:
+	case ADF_ATTACH:
+	case ADF_DETACH:
+		return -EINVAL;
+
+	default:
+		return adf_obj_custom_ioctl(&intf->base, cmd, arg);
+	}
+}
+
+static long adf_device_ioctl(struct adf_device *dev, struct adf_file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case ADF_SET_EVENT:
+		return adf_obj_set_event(&dev->base, file,
+				(struct adf_set_event __user *)arg);
+
+	case ADF_POST_CONFIG:
+		return adf_device_post_config(dev,
+				(struct adf_post_config __user *)arg);
+
+	case ADF_GET_DEVICE_DATA:
+		return adf_device_get_data(dev,
+				(struct adf_device_data __user *)arg);
+
+	case ADF_ATTACH:
+		return adf_device_handle_attachment(dev,
+				(struct adf_attachment_config __user *)arg,
+				true);
+
+	case ADF_DETACH:
+		return adf_device_handle_attachment(dev,
+				(struct adf_attachment_config __user *)arg,
+				false);
+
+	case ADF_BLANK:
+	case ADF_SET_MODE:
+	case ADF_GET_INTERFACE_DATA:
+	case ADF_GET_OVERLAY_ENGINE_DATA:
+	case ADF_SIMPLE_POST_CONFIG:
+	case ADF_SIMPLE_BUFFER_ALLOC:
+		return -EINVAL;
+
+	default:
+		return adf_obj_custom_ioctl(&dev->base, cmd, arg);
+	}
+}
+
+static int adf_file_open(struct inode *inode, struct file *file)
+{
+	struct adf_obj *obj;
+	struct adf_file *fpriv = NULL;
+	unsigned long flags;
+	int ret = 0;
+
+	obj = adf_obj_sysfs_find(iminor(inode));
+	if (!obj)
+		return -ENODEV;
+
+	dev_dbg(&obj->dev, "opening %s\n", dev_name(&obj->dev));
+
+	if (!try_module_get(obj->parent->ops->owner)) {
+		dev_err(&obj->dev, "getting owner module failed\n");
+		return -ENODEV;
+	}
+
+	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+	if (!fpriv) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	INIT_LIST_HEAD(&fpriv->head);
+	fpriv->obj = obj;
+	init_waitqueue_head(&fpriv->event_wait);
+
+	file->private_data = fpriv;
+
+	if (obj->ops && obj->ops->open) {
+		ret = obj->ops->open(obj, inode, file);
+		if (ret < 0)
+			goto done;
+	}
+
+	spin_lock_irqsave(&obj->file_lock, flags);
+	list_add_tail(&fpriv->head, &obj->file_list);
+	spin_unlock_irqrestore(&obj->file_lock, flags);
+
+done:
+	if (ret < 0) {
+		kfree(fpriv);
+		module_put(obj->parent->ops->owner);
+	}
+	return ret;
+}
+
+static int adf_file_release(struct inode *inode, struct file *file)
+{
+	struct adf_file *fpriv = file->private_data;
+	struct adf_obj *obj = fpriv->obj;
+	enum adf_event_type event_type;
+	unsigned long flags;
+
+	if (obj->ops && obj->ops->release)
+		obj->ops->release(obj, inode, file);
+
+	spin_lock_irqsave(&obj->file_lock, flags);
+	list_del(&fpriv->head);
+	spin_unlock_irqrestore(&obj->file_lock, flags);
+
+	for_each_set_bit(event_type, fpriv->event_subscriptions,
+			ADF_EVENT_TYPE_MAX) {
+		adf_event_put(obj, event_type);
+	}
+
+	kfree(fpriv);
+	module_put(obj->parent->ops->owner);
+
+	dev_dbg(&obj->dev, "released %s\n", dev_name(&obj->dev));
+	return 0;
+}
+
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct adf_file *fpriv = file->private_data;
+	struct adf_obj *obj = fpriv->obj;
+	long ret = -EINVAL;
+
+	dev_dbg(&obj->dev, "%s ioctl %u\n", dev_name(&obj->dev), _IOC_NR(cmd));
+
+	switch (obj->type) {
+	case ADF_OBJ_OVERLAY_ENGINE:
+		ret = adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj),
+				fpriv, cmd, arg);
+		break;
+
+	case ADF_OBJ_INTERFACE:
+		ret = adf_interface_ioctl(adf_obj_to_interface(obj), fpriv, cmd,
+				arg);
+		break;
+
+	case ADF_OBJ_DEVICE:
+		ret = adf_device_ioctl(adf_obj_to_device(obj), fpriv, cmd, arg);
+		break;
+	}
+
+	return ret;
+}
+
+static inline bool adf_file_event_available(struct adf_file *fpriv)
+{
+	int head = fpriv->event_head;
+	int tail = fpriv->event_tail;
+	return CIRC_CNT(head, tail, sizeof(fpriv->event_buf)) != 0;
+}
+
+void adf_file_queue_event(struct adf_file *fpriv, struct adf_event *event)
+{
+	int head = fpriv->event_head;
+	int tail = fpriv->event_tail;
+	size_t space = CIRC_SPACE(head, tail, sizeof(fpriv->event_buf));
+	size_t space_to_end =
+			CIRC_SPACE_TO_END(head, tail, sizeof(fpriv->event_buf));
+
+	if (space < event->length) {
+		dev_dbg(&fpriv->obj->dev,
+				"insufficient buffer space for event %u\n",
+				event->type);
+		return;
+	}
+
+	if (space_to_end >= event->length) {
+		memcpy(fpriv->event_buf + head, event, event->length);
+	} else {
+		memcpy(fpriv->event_buf + head, event, space_to_end);
+		memcpy(fpriv->event_buf, (u8 *)event + space_to_end,
+				event->length - space_to_end);
+	}
+
+	smp_wmb();
+	fpriv->event_head = (fpriv->event_head + event->length) &
+			(sizeof(fpriv->event_buf) - 1);
+	wake_up_interruptible_all(&fpriv->event_wait);
+}
+
+static ssize_t adf_file_copy_to_user(struct adf_file *fpriv,
+		char __user *buffer, size_t buffer_size)
+{
+	int head, tail;
+	u8 *event_buf;
+	size_t cnt, cnt_to_end, copy_size = 0;
+	ssize_t ret = 0;
+	unsigned long flags;
+
+	event_buf = kmalloc(min(buffer_size, sizeof(fpriv->event_buf)),
+			GFP_KERNEL);
+	if (!event_buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&fpriv->obj->file_lock, flags);
+
+	if (!adf_file_event_available(fpriv))
+		goto out;
+
+	head = fpriv->event_head;
+	tail = fpriv->event_tail;
+
+	cnt = CIRC_CNT(head, tail, sizeof(fpriv->event_buf));
+	cnt_to_end = CIRC_CNT_TO_END(head, tail, sizeof(fpriv->event_buf));
+	copy_size = min(buffer_size, cnt);
+
+	if (cnt_to_end >= copy_size) {
+		memcpy(event_buf, fpriv->event_buf + tail, copy_size);
+	} else {
+		memcpy(event_buf, fpriv->event_buf + tail, cnt_to_end);
+		memcpy(event_buf + cnt_to_end, fpriv->event_buf,
+				copy_size - cnt_to_end);
+	}
+
+	fpriv->event_tail = (fpriv->event_tail + copy_size) &
+			(sizeof(fpriv->event_buf) - 1);
+
+out:
+	spin_unlock_irqrestore(&fpriv->obj->file_lock, flags);
+	if (copy_size) {
+		if (copy_to_user(buffer, event_buf, copy_size))
+			ret = -EFAULT;
+		else
+			ret = copy_size;
+	}
+	kfree(event_buf);
+	return ret;
+}
+
+ssize_t adf_file_read(struct file *filp, char __user *buffer,
+		 size_t count, loff_t *offset)
+{
+	struct adf_file *fpriv = filp->private_data;
+	int err;
+
+	err = wait_event_interruptible(fpriv->event_wait,
+			adf_file_event_available(fpriv));
+	if (err < 0)
+		return err;
+
+	return adf_file_copy_to_user(fpriv, buffer, count);
+}
+
+unsigned int adf_file_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	struct adf_file *fpriv = filp->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(filp, &fpriv->event_wait, wait);
+
+	if (adf_file_event_available(fpriv))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
+}
+
+const struct file_operations adf_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = adf_file_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = adf_file_compat_ioctl,
+#endif
+	.open = adf_file_open,
+	.release = adf_file_release,
+	.llseek = default_llseek,
+	.read = adf_file_read,
+	.poll = adf_file_poll,
+};
diff --git a/drivers/video/adf/adf_fops.h b/drivers/video/adf/adf_fops.h
new file mode 100644
index 0000000..90a3a74
--- /dev/null
+++ b/drivers/video/adf/adf_fops.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_FOPS_H
+#define __VIDEO_ADF_ADF_FOPS_H
+
+#include <linux/bitmap.h>
+#include <linux/fs.h>
+
+extern const struct file_operations adf_fops;
+
+struct adf_file {
+	struct list_head head;
+	struct adf_obj *obj;
+
+	DECLARE_BITMAP(event_subscriptions, ADF_EVENT_TYPE_MAX);
+	u8 event_buf[4096];
+	int event_head;
+	int event_tail;
+	wait_queue_head_t event_wait;
+};
+
+void adf_file_queue_event(struct adf_file *file, struct adf_event *event);
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS_H */
diff --git a/drivers/video/adf/adf_fops32.c b/drivers/video/adf/adf_fops32.c
new file mode 100644
index 0000000..d299a81
--- /dev/null
+++ b/drivers/video/adf/adf_fops32.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <video/adf.h>
+
+#include "adf_fops.h"
+#include "adf_fops32.h"
+
+long adf_compat_post_config(struct file *file,
+		struct adf_post_config32 __user *arg)
+{
+	struct adf_post_config32 cfg32;
+	struct adf_post_config __user *cfg;
+	int ret;
+
+	if (copy_from_user(&cfg32, arg, sizeof(cfg32)))
+		return -EFAULT;
+
+	cfg = compat_alloc_user_space(sizeof(*cfg));
+	if (!access_ok(VERIFY_WRITE, cfg, sizeof(*cfg)))
+		return -EFAULT;
+
+	if (put_user(cfg32.n_interfaces, &cfg->n_interfaces) ||
+			put_user(compat_ptr(cfg32.interfaces),
+					&cfg->interfaces) ||
+			put_user(cfg32.n_bufs, &cfg->n_bufs) ||
+			put_user(compat_ptr(cfg32.bufs), &cfg->bufs) ||
+			put_user(cfg32.custom_data_size,
+					&cfg->custom_data_size) ||
+			put_user(compat_ptr(cfg32.custom_data),
+					&cfg->custom_data))
+		return -EFAULT;
+
+	ret = adf_file_ioctl(file, ADF_POST_CONFIG, (unsigned long)cfg);
+	if (ret < 0)
+		return ret;
+
+	if (copy_in_user(&arg->complete_fence, &cfg->complete_fence,
+			sizeof(cfg->complete_fence)))
+		return -EFAULT;
+
+	return 0;
+}
+
+long adf_compat_get_device_data(struct file *file,
+		struct adf_device_data32 __user *arg)
+{
+	struct adf_device_data32 data32;
+	struct adf_device_data __user *data;
+	int ret;
+
+	if (copy_from_user(&data32, arg, sizeof(data32)))
+		return -EFAULT;
+
+	data = compat_alloc_user_space(sizeof(*data));
+	if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+		return -EFAULT;
+
+	if (put_user(data32.n_attachments, &data->n_attachments) ||
+			put_user(compat_ptr(data32.attachments),
+					&data->attachments) ||
+			put_user(data32.n_allowed_attachments,
+					&data->n_allowed_attachments) ||
+			put_user(compat_ptr(data32.allowed_attachments),
+					&data->allowed_attachments) ||
+			put_user(data32.custom_data_size,
+					&data->custom_data_size) ||
+			put_user(compat_ptr(data32.custom_data),
+					&data->custom_data))
+		return -EFAULT;
+
+	ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA, (unsigned long)data);
+	if (ret < 0)
+		return ret;
+
+	if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+			copy_in_user(&arg->n_attachments, &data->n_attachments,
+					sizeof(arg->n_attachments)) ||
+			copy_in_user(&arg->n_allowed_attachments,
+					&data->n_allowed_attachments,
+					sizeof(arg->n_allowed_attachments)) ||
+			copy_in_user(&arg->custom_data_size,
+					&data->custom_data_size,
+					sizeof(arg->custom_data_size)))
+		return -EFAULT;
+
+	return 0;
+}
+
+long adf_compat_get_interface_data(struct file *file,
+		struct adf_interface_data32 __user *arg)
+{
+	struct adf_interface_data32 data32;
+	struct adf_interface_data __user *data;
+	int ret;
+
+	if (copy_from_user(&data32, arg, sizeof(data32)))
+		return -EFAULT;
+
+	data = compat_alloc_user_space(sizeof(*data));
+	if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+		return -EFAULT;
+
+	if (put_user(data32.n_available_modes, &data->n_available_modes) ||
+			put_user(compat_ptr(data32.available_modes),
+					&data->available_modes) ||
+			put_user(data32.custom_data_size,
+					&data->custom_data_size) ||
+			put_user(compat_ptr(data32.custom_data),
+					&data->custom_data))
+		return -EFAULT;
+
+	ret = adf_file_ioctl(file, ADF_GET_INTERFACE_DATA, (unsigned long)data);
+	if (ret < 0)
+		return ret;
+
+	if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+			copy_in_user(&arg->type, &data->type,
+					sizeof(arg->type)) ||
+			copy_in_user(&arg->id, &data->id, sizeof(arg->id)) ||
+			copy_in_user(&arg->flags, &data->flags,
+					sizeof(arg->flags)) ||
+			copy_in_user(&arg->dpms_state, &data->dpms_state,
+					sizeof(arg->dpms_state)) ||
+			copy_in_user(&arg->hotplug_detect,
+					&data->hotplug_detect,
+					sizeof(arg->hotplug_detect)) ||
+			copy_in_user(&arg->width_mm, &data->width_mm,
+					sizeof(arg->width_mm)) ||
+			copy_in_user(&arg->height_mm, &data->height_mm,
+					sizeof(arg->height_mm)) ||
+			copy_in_user(&arg->current_mode, &data->current_mode,
+					sizeof(arg->current_mode)) ||
+			copy_in_user(&arg->n_available_modes,
+					&data->n_available_modes,
+					sizeof(arg->n_available_modes)) ||
+			copy_in_user(&arg->custom_data_size,
+					&data->custom_data_size,
+					sizeof(arg->custom_data_size)))
+		return -EFAULT;
+
+	return 0;
+}
+
+long adf_compat_get_overlay_engine_data(struct file *file,
+		struct adf_overlay_engine_data32 __user *arg)
+{
+	struct adf_overlay_engine_data32 data32;
+	struct adf_overlay_engine_data __user *data;
+	int ret;
+
+	if (copy_from_user(&data32, arg, sizeof(data32)))
+		return -EFAULT;
+
+	data = compat_alloc_user_space(sizeof(*data));
+	if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+		return -EFAULT;
+
+	if (put_user(data32.n_supported_formats, &data->n_supported_formats) ||
+			put_user(compat_ptr(data32.supported_formats),
+					&data->supported_formats) ||
+			put_user(data32.custom_data_size,
+					&data->custom_data_size) ||
+			put_user(compat_ptr(data32.custom_data),
+					&data->custom_data))
+		return -EFAULT;
+
+	ret = adf_file_ioctl(file, ADF_GET_OVERLAY_ENGINE_DATA,
+			(unsigned long)data);
+	if (ret < 0)
+		return ret;
+
+	if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+			copy_in_user(&arg->n_supported_formats,
+					&data->n_supported_formats,
+					sizeof(arg->n_supported_formats)) ||
+			copy_in_user(&arg->custom_data_size,
+					&data->custom_data_size,
+					sizeof(arg->custom_data_size)))
+		return -EFAULT;
+
+	return 0;
+}
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	switch (cmd) {
+	case ADF_POST_CONFIG32:
+		return adf_compat_post_config(file, compat_ptr(arg));
+
+	case ADF_GET_DEVICE_DATA32:
+		return adf_compat_get_device_data(file, compat_ptr(arg));
+
+	case ADF_GET_INTERFACE_DATA32:
+		return adf_compat_get_interface_data(file, compat_ptr(arg));
+
+	case ADF_GET_OVERLAY_ENGINE_DATA32:
+		return adf_compat_get_overlay_engine_data(file,
+				compat_ptr(arg));
+
+	default:
+		return adf_file_ioctl(file, cmd, arg);
+	}
+}
diff --git a/drivers/video/adf/adf_fops32.h b/drivers/video/adf/adf_fops32.h
new file mode 100644
index 0000000..64034ce
--- /dev/null
+++ b/drivers/video/adf/adf_fops32.h
@@ -0,0 +1,78 @@
+#ifndef __VIDEO_ADF_ADF_FOPS32_H
+#define __VIDEO_ADF_ADF_FOPS32_H
+
+#include <linux/compat.h>
+#include <linux/ioctl.h>
+
+#include <video/adf.h>
+
+#define ADF_POST_CONFIG32 \
+		_IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config32)
+#define ADF_GET_DEVICE_DATA32 \
+		_IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data32)
+#define ADF_GET_INTERFACE_DATA32 \
+		_IOR(ADF_IOCTL_TYPE, 5, struct adf_interface_data32)
+#define ADF_GET_OVERLAY_ENGINE_DATA32 \
+		_IOR(ADF_IOCTL_TYPE, 6, struct adf_overlay_engine_data32)
+
+struct adf_post_config32 {
+	compat_size_t n_interfaces;
+	compat_uptr_t interfaces;
+
+	compat_size_t n_bufs;
+	compat_uptr_t bufs;
+
+	compat_size_t custom_data_size;
+	compat_uptr_t custom_data;
+
+	__s32 complete_fence;
+};
+
+struct adf_device_data32 {
+	char name[ADF_NAME_LEN];
+
+	compat_size_t n_attachments;
+	compat_uptr_t attachments;
+
+	compat_size_t n_allowed_attachments;
+	compat_uptr_t allowed_attachments;
+
+	compat_size_t custom_data_size;
+	compat_uptr_t custom_data;
+};
+
+struct adf_interface_data32 {
+	char name[ADF_NAME_LEN];
+
+	__u8 type;
+	__u32 id;
+	/* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+	__u32 flags;
+
+	__u8 dpms_state;
+	__u8 hotplug_detect;
+	__u16 width_mm;
+	__u16 height_mm;
+
+	struct drm_mode_modeinfo current_mode;
+	compat_size_t n_available_modes;
+	compat_uptr_t available_modes;
+
+	compat_size_t custom_data_size;
+	compat_uptr_t custom_data;
+};
+
+struct adf_overlay_engine_data32 {
+	char name[ADF_NAME_LEN];
+
+	compat_size_t n_supported_formats;
+	compat_uptr_t supported_formats;
+
+	compat_size_t custom_data_size;
+	compat_uptr_t custom_data;
+};
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS32_H */
diff --git a/drivers/video/adf/adf_format.c b/drivers/video/adf/adf_format.c
new file mode 100644
index 0000000..e3f22c7
--- /dev/null
+++ b/drivers/video/adf/adf_format.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * modified from drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <drm/drm_fourcc.h>
+#include <video/adf_format.h>
+
+bool adf_format_is_standard(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_XBGR4444:
+	case DRM_FORMAT_RGBX4444:
+	case DRM_FORMAT_BGRX4444:
+	case DRM_FORMAT_ARGB4444:
+	case DRM_FORMAT_ABGR4444:
+	case DRM_FORMAT_RGBA4444:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_AYUV:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return true;
+	default:
+		return false;
+	}
+}
+EXPORT_SYMBOL(adf_format_is_standard);
+
+bool adf_format_is_rgb(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+		return true;
+
+	default:
+		return false;
+	}
+}
+EXPORT_SYMBOL(adf_format_is_rgb);
+
+u8 adf_format_num_planes(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 3;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(adf_format_num_planes);
+
+u8 adf_format_bpp(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+		return 8;
+
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+		return 16;
+
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+		return 24;
+
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+		return 32;
+
+	default:
+		pr_debug("%s: unsupported pixel format %u\n", __func__, format);
+		return 0;
+	}
+}
+EXPORT_SYMBOL(adf_format_bpp);
+
+u8 adf_format_plane_cpp(u32 format, int plane)
+{
+	if (plane >= adf_format_num_planes(format))
+		return 0;
+
+	switch (format) {
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		return 2;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+		return plane ? 2 : 1;
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 1;
+	default:
+		return adf_format_bpp(format) / 8;
+	}
+}
+EXPORT_SYMBOL(adf_format_plane_cpp);
+
+u8 adf_format_horz_chroma_subsampling(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+		return 4;
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(adf_format_horz_chroma_subsampling);
+
+u8 adf_format_vert_chroma_subsampling(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+		return 4;
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(adf_format_vert_chroma_subsampling);
diff --git a/drivers/video/adf/adf_memblock.c b/drivers/video/adf/adf_memblock.c
new file mode 100644
index 0000000..e73a7d5
--- /dev/null
+++ b/drivers/video/adf/adf_memblock.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+struct adf_memblock_pdata {
+	phys_addr_t base;
+};
+
+static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
+		enum dma_data_direction direction)
+{
+	struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
+	unsigned long pfn = PFN_DOWN(pdata->base);
+	struct page *page = pfn_to_page(pfn);
+	struct sg_table *table;
+	int nents, ret;
+
+	table = kzalloc(sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret < 0)
+		goto err_alloc;
+
+	sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
+
+	nents = dma_map_sg(attach->dev, table->sgl, 1, direction);
+	if (!nents) {
+		ret = -EINVAL;
+		goto err_map;
+	}
+
+	return table;
+
+err_map:
+	sg_free_table(table);
+err_alloc:
+	kfree(table);
+	return ERR_PTR(ret);
+}
+
+static void adf_memblock_unmap(struct dma_buf_attachment *attach,
+		struct sg_table *table, enum dma_data_direction direction)
+{
+	dma_unmap_sg(attach->dev, table->sgl, 1, direction);
+	sg_free_table(table);
+}
+
+static void __init_memblock adf_memblock_release(struct dma_buf *buf)
+{
+	struct adf_memblock_pdata *pdata = buf->priv;
+	int err = memblock_free(pdata->base, buf->size);
+
+	if (err < 0)
+		pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
+	kfree(pdata);
+}
+
+static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset,
+		bool atomic)
+{
+	struct adf_memblock_pdata *pdata = buf->priv;
+	unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset;
+	struct page *page = pfn_to_page(pfn);
+
+	if (atomic)
+		return kmap_atomic(page);
+	else
+		return kmap(page);
+}
+
+static void *adf_memblock_kmap_atomic(struct dma_buf *buf,
+		unsigned long pgoffset)
+{
+	return adf_memblock_do_kmap(buf, pgoffset, true);
+}
+
+static void adf_memblock_kunmap_atomic(struct dma_buf *buf,
+		unsigned long pgoffset, void *vaddr)
+{
+	kunmap_atomic(vaddr);
+}
+
+static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset)
+{
+	return adf_memblock_do_kmap(buf, pgoffset, false);
+}
+
+static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset,
+		void *vaddr)
+{
+	kunmap(vaddr);
+}
+
+static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+	struct adf_memblock_pdata *pdata = buf->priv;
+
+	return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base),
+			vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+struct dma_buf_ops adf_memblock_ops = {
+	.map_dma_buf = adf_memblock_map,
+	.unmap_dma_buf = adf_memblock_unmap,
+	.release = adf_memblock_release,
+	.kmap_atomic = adf_memblock_kmap_atomic,
+	.kunmap_atomic = adf_memblock_kunmap_atomic,
+	.kmap = adf_memblock_kmap,
+	.kunmap = adf_memblock_kunmap,
+	.mmap = adf_memblock_mmap,
+};
+
+/**
+ * adf_memblock_export - export a memblock reserved area as a dma-buf
+ *
+ * @base: base physical address
+ * @size: memblock size
+ * @flags: mode flags for the dma-buf's file
+ *
+ * @base and @size must be page-aligned.
+ *
+ * Returns a dma-buf on success or ERR_PTR(-errno) on failure.
+ */
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
+{
+	struct adf_memblock_pdata *pdata;
+	struct dma_buf *buf;
+
+	if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
+		return ERR_PTR(-EINVAL);
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->base = base;
+	buf = dma_buf_export(pdata, &adf_memblock_ops, size, flags);
+	if (IS_ERR(buf))
+		kfree(pdata);
+
+	return buf;
+}
+EXPORT_SYMBOL(adf_memblock_export);
diff --git a/drivers/video/adf/adf_sysfs.c b/drivers/video/adf/adf_sysfs.c
new file mode 100644
index 0000000..8c659c7
--- /dev/null
+++ b/drivers/video/adf/adf_sysfs.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <video/adf_client.h>
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+static struct class *adf_class;
+static int adf_major;
+static DEFINE_IDR(adf_minors);
+
+#define dev_to_adf_interface(p) \
+	adf_obj_to_interface(container_of(p, struct adf_obj, dev))
+
+static ssize_t dpms_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			adf_interface_dpms_state(intf));
+}
+
+static ssize_t dpms_state_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	u8 dpms_state;
+	int err;
+
+	err = kstrtou8(buf, 0, &dpms_state);
+	if (err < 0)
+		return err;
+
+	err = adf_interface_blank(intf, dpms_state);
+	if (err < 0)
+		return err;
+
+	return count;
+}
+
+static ssize_t current_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	struct drm_mode_modeinfo mode;
+
+	adf_interface_current_mode(intf, &mode);
+
+	if (mode.name[0]) {
+		return scnprintf(buf, PAGE_SIZE, "%s\n", mode.name);
+	} else {
+		bool interlaced = !!(mode.flags & DRM_MODE_FLAG_INTERLACE);
+		return scnprintf(buf, PAGE_SIZE, "%ux%u%s\n", mode.hdisplay,
+				mode.vdisplay, interlaced ? "i" : "");
+	}
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			adf_interface_type_str(intf));
+}
+
+static ssize_t vsync_timestamp_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	ktime_t timestamp;
+	unsigned long flags;
+
+	read_lock_irqsave(&intf->vsync_lock, flags);
+	memcpy(&timestamp, &intf->vsync_timestamp, sizeof(timestamp));
+	read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", ktime_to_ns(timestamp));
+}
+
+static ssize_t hotplug_detect_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	return scnprintf(buf, PAGE_SIZE, "%u\n", intf->hotplug_detect);
+}
+
+static struct device_attribute adf_interface_attrs[] = {
+	__ATTR(dpms_state, S_IRUGO|S_IWUSR, dpms_state_show, dpms_state_store),
+	__ATTR_RO(current_mode),
+	__ATTR_RO(hotplug_detect),
+	__ATTR_RO(type),
+	__ATTR_RO(vsync_timestamp),
+};
+
+int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent)
+{
+	int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL);
+	if (ret < 0) {
+		pr_err("%s: allocating adf minor failed: %d\n", __func__,
+				ret);
+		return ret;
+	}
+
+	obj->minor = ret;
+	obj->dev.parent = parent;
+	obj->dev.class = adf_class;
+	obj->dev.devt = MKDEV(adf_major, obj->minor);
+
+	ret = device_register(&obj->dev);
+	if (ret < 0) {
+		pr_err("%s: registering adf object failed: %d\n", __func__,
+				ret);
+		goto err_device_register;
+	}
+
+	return 0;
+
+err_device_register:
+	idr_remove(&adf_minors, obj->minor);
+	return ret;
+}
+
+static char *adf_device_devnode(struct device *dev, umode_t *mode,
+		kuid_t *uid, kgid_t *gid)
+{
+	struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+	return kasprintf(GFP_KERNEL, "adf%d", obj->id);
+}
+
+static char *adf_interface_devnode(struct device *dev, umode_t *mode,
+		kuid_t *uid, kgid_t *gid)
+{
+	struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+	struct adf_interface *intf = adf_obj_to_interface(obj);
+	struct adf_device *parent = adf_interface_parent(intf);
+	return kasprintf(GFP_KERNEL, "adf-interface%d.%d",
+			parent->base.id, intf->base.id);
+}
+
+static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
+		kuid_t *uid, kgid_t *gid)
+{
+	struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+	struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj);
+	struct adf_device *parent = adf_overlay_engine_parent(eng);
+	return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d",
+			parent->base.id, eng->base.id);
+}
+
+static void adf_noop_release(struct device *dev)
+{
+}
+
+static struct device_type adf_device_type = {
+	.name = "adf_device",
+	.devnode = adf_device_devnode,
+	.release = adf_noop_release,
+};
+
+static struct device_type adf_interface_type = {
+	.name = "adf_interface",
+	.devnode = adf_interface_devnode,
+	.release = adf_noop_release,
+};
+
+static struct device_type adf_overlay_engine_type = {
+	.name = "adf_overlay_engine",
+	.devnode = adf_overlay_engine_devnode,
+	.release = adf_noop_release,
+};
+
+int adf_device_sysfs_init(struct adf_device *dev)
+{
+	dev->base.dev.type = &adf_device_type;
+	dev_set_name(&dev->base.dev, "%s", dev->base.name);
+	return adf_obj_sysfs_init(&dev->base, dev->dev);
+}
+
+int adf_interface_sysfs_init(struct adf_interface *intf)
+{
+	struct adf_device *parent = adf_interface_parent(intf);
+	size_t i, j;
+	int ret;
+
+	intf->base.dev.type = &adf_interface_type;
+	dev_set_name(&intf->base.dev, "%s-interface%d", parent->base.name,
+			intf->base.id);
+
+	ret = adf_obj_sysfs_init(&intf->base, &parent->base.dev);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++) {
+		ret = device_create_file(&intf->base.dev,
+				&adf_interface_attrs[i]);
+		if (ret < 0) {
+			dev_err(&intf->base.dev, "creating sysfs attribute %s failed: %d\n",
+					adf_interface_attrs[i].attr.name, ret);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	for (j = 0; j < i; j++)
+		device_remove_file(&intf->base.dev, &adf_interface_attrs[j]);
+	return ret;
+}
+
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng)
+{
+	struct adf_device *parent = adf_overlay_engine_parent(eng);
+
+	eng->base.dev.type = &adf_overlay_engine_type;
+	dev_set_name(&eng->base.dev, "%s-overlay-engine%d", parent->base.name,
+			eng->base.id);
+
+	return adf_obj_sysfs_init(&eng->base, &parent->base.dev);
+}
+
+struct adf_obj *adf_obj_sysfs_find(int minor)
+{
+	return idr_find(&adf_minors, minor);
+}
+
+void adf_obj_sysfs_destroy(struct adf_obj *obj)
+{
+	idr_remove(&adf_minors, obj->minor);
+	device_unregister(&obj->dev);
+}
+
+void adf_device_sysfs_destroy(struct adf_device *dev)
+{
+	adf_obj_sysfs_destroy(&dev->base);
+}
+
+void adf_interface_sysfs_destroy(struct adf_interface *intf)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++)
+		device_remove_file(&intf->base.dev, &adf_interface_attrs[i]);
+	adf_obj_sysfs_destroy(&intf->base);
+}
+
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng)
+{
+	adf_obj_sysfs_destroy(&eng->base);
+}
+
+int adf_sysfs_init(void)
+{
+	struct class *class;
+	int ret;
+
+	class = class_create(THIS_MODULE, "adf");
+	if (IS_ERR(class)) {
+		ret = PTR_ERR(class);
+		pr_err("%s: creating class failed: %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = register_chrdev(0, "adf", &adf_fops);
+	if (ret < 0) {
+		pr_err("%s: registering device failed: %d\n", __func__, ret);
+		goto err_chrdev;
+	}
+
+	adf_class = class;
+	adf_major = ret;
+	return 0;
+
+err_chrdev:
+	class_destroy(adf_class);
+	return ret;
+}
+
+void adf_sysfs_destroy(void)
+{
+	idr_destroy(&adf_minors);
+	class_destroy(adf_class);
+}
diff --git a/drivers/video/adf/adf_sysfs.h b/drivers/video/adf/adf_sysfs.h
new file mode 100644
index 0000000..0613ac3
--- /dev/null
+++ b/drivers/video/adf/adf_sysfs.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_SYSFS_H
+#define __VIDEO_ADF_ADF_SYSFS_H
+
+struct adf_device;
+struct adf_interface;
+struct adf_overlay_engine;
+
+int adf_device_sysfs_init(struct adf_device *dev);
+void adf_device_sysfs_destroy(struct adf_device *dev);
+int adf_interface_sysfs_init(struct adf_interface *intf);
+void adf_interface_sysfs_destroy(struct adf_interface *intf);
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng);
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng);
+struct adf_obj *adf_obj_sysfs_find(int minor);
+
+int adf_sysfs_init(void);
+void adf_sysfs_destroy(void);
+
+#endif /* __VIDEO_ADF_ADF_SYSFS_H */
diff --git a/drivers/video/adf/adf_trace.h b/drivers/video/adf/adf_trace.h
new file mode 100644
index 0000000..3cb2a84
--- /dev/null
+++ b/drivers/video/adf/adf_trace.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM adf
+
+#if !defined(__VIDEO_ADF_ADF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __VIDEO_ADF_ADF_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <video/adf.h>
+
+TRACE_EVENT(adf_event,
+	TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+	TP_ARGS(obj, type),
+
+	TP_STRUCT__entry(
+		__string(name, obj->name)
+		__field(enum adf_event_type, type)
+		__array(char, type_str, 32)
+	),
+	TP_fast_assign(
+		__assign_str(name, obj->name);
+		__entry->type = type;
+		strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+				sizeof(__entry->type_str));
+	),
+	TP_printk("obj=%s type=%u (%s)",
+			__get_str(name),
+			__entry->type,
+			__entry->type_str)
+);
+
+TRACE_EVENT(adf_event_enable,
+	TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+	TP_ARGS(obj, type),
+
+	TP_STRUCT__entry(
+		__string(name, obj->name)
+		__field(enum adf_event_type, type)
+		__array(char, type_str, 32)
+	),
+	TP_fast_assign(
+		__assign_str(name, obj->name);
+		__entry->type = type;
+		strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+				sizeof(__entry->type_str));
+	),
+	TP_printk("obj=%s type=%u (%s)",
+			__get_str(name),
+			__entry->type,
+			__entry->type_str)
+);
+
+TRACE_EVENT(adf_event_disable,
+	TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+	TP_ARGS(obj, type),
+
+	TP_STRUCT__entry(
+		__string(name, obj->name)
+		__field(enum adf_event_type, type)
+		__array(char, type_str, 32)
+	),
+	TP_fast_assign(
+		__assign_str(name, obj->name);
+		__entry->type = type;
+		strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+				sizeof(__entry->type_str));
+	),
+	TP_printk("obj=%s type=%u (%s)",
+			__get_str(name),
+			__entry->type,
+			__entry->type_str)
+);
+
+#endif /* __VIDEO_ADF_ADF_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adf_trace
+#include <trace/define_trace.h>
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index e033491..ab29939 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -18,6 +18,8 @@
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/ds2482.h>
 #include <asm/delay.h>
 
 #include "../w1.h"
@@ -84,7 +86,8 @@
 static int ds2482_probe(struct i2c_client *client,
 			const struct i2c_device_id *id);
 static int ds2482_remove(struct i2c_client *client);
-
+static int ds2482_suspend(struct device *dev);
+static int ds2482_resume(struct device *dev);
 
 /**
  * Driver data (common to all clients)
@@ -94,10 +97,16 @@
 	{ }
 };
 
+static const struct dev_pm_ops ds2482_pm_ops = {
+	.suspend = ds2482_suspend,
+	.resume = ds2482_resume,
+};
+
 static struct i2c_driver ds2482_driver = {
 	.driver = {
 		.owner	= THIS_MODULE,
 		.name	= "ds2482",
+		.pm = &ds2482_pm_ops,
 	},
 	.probe		= ds2482_probe,
 	.remove		= ds2482_remove,
@@ -119,6 +128,7 @@
 struct ds2482_data {
 	struct i2c_client	*client;
 	struct mutex		access_lock;
+	int			slpz_gpio;
 
 	/* 1-wire interface(s) */
 	int			w1_count;	/* 1 or 8 */
@@ -444,11 +454,31 @@
 	return retval;
 }
 
+static int ds2482_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct ds2482_data *data = i2c_get_clientdata(client);
+
+	if (data->slpz_gpio >= 0)
+		gpio_set_value(data->slpz_gpio, 0);
+	return 0;
+}
+
+static int ds2482_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct ds2482_data *data = i2c_get_clientdata(client);
+
+	if (data->slpz_gpio >= 0)
+		gpio_set_value(data->slpz_gpio, 1);
+	return 0;
+}
 
 static int ds2482_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
 {
 	struct ds2482_data *data;
+	struct ds2482_platform_data *pdata;
 	int err = -ENODEV;
 	int temp1;
 	int idx;
@@ -515,6 +545,16 @@
 		}
 	}
 
+	pdata = client->dev.platform_data;
+	data->slpz_gpio = pdata ? pdata->slpz_gpio : -1;
+
+	if (data->slpz_gpio >= 0) {
+		err = gpio_request_one(data->slpz_gpio, GPIOF_OUT_INIT_HIGH,
+				       "ds2482.slpz");
+		if (err < 0)
+			goto exit_w1_remove;
+	}
+
 	return 0;
 
 exit_w1_remove:
@@ -539,6 +579,11 @@
 			w1_remove_master_device(&data->w1_ch[idx].w1_bm);
 	}
 
+	if (data->slpz_gpio >= 0) {
+		gpio_set_value(data->slpz_gpio, 0);
+		gpio_free(data->slpz_gpio);
+	}
+
 	/* Free the memory */
 	kfree(data);
 	return 0;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index e89fc31..7aea0b9 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -615,6 +615,40 @@
 
 	  To compile this driver as a module, choose M here.
 
+config INTEL_SCU_WATCHDOG_EVO
+	bool "Intel SCU Watchdog Evolution for Mobile Platforms"
+	depends on X86_INTEL_MID
+	---help---
+	  Hardware driver evolution for the watchdog timer built into the Intel
+	  SCU for Intel Mobile Platforms.
+
+	  This driver supports the watchdog evolution implementation in SCU,
+	  available for Merrifield generation.
+
+	  To compile this driver as a module, choose M here.
+
+config DISABLE_SCU_WATCHDOG
+	bool "De-activate Intel SCU Watchdog by cmdline for Mobile Platforms"
+	depends on INTEL_SCU_WATCHDOG || INTEL_SCU_WATCHDOG_EVO
+	---help---
+	  De-activate the watchdog by cmdline for Intel Mobile Platforms.
+	  This allows to use breakpoints without resetting.
+
+	  Only for debug purpose.
+
+config INTEL_MID_WATCHDOG
+       tristate "Intel MID Watchdog Timer"
+       depends on X86_INTEL_MID
+       select WATCHDOG_CORE
+       ---help---
+         Watchdog timer driver built into the Intel SCU for Intel MID
+         Platforms.
+
+         This driver currently supports only the watchdog evolution
+         implementation in SCU, available for Merrifield generation.
+
+         To compile this driver as a module, choose M here.
+
 config ITCO_WDT
 	tristate "Intel TCO Timer/Watchdog"
 	depends on (X86 || IA64) && PCI
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index a300b94..e5b56bc 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -110,6 +110,8 @@
 obj-$(CONFIG_MACHZ_WDT) += machzwd.o
 obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
 obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
+obj-$(CONFIG_INTEL_SCU_WATCHDOG_EVO) += intel_scu_watchdog_evo.o
+obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
 
 # M32R Architecture
 
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
new file mode 100644
index 0000000..9dffc53
--- /dev/null
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -0,0 +1,177 @@
+/*
+ *      intel-mid_wdt: generic Intel MID SCU watchdog driver
+ *
+ *      Platforms supported so far:
+ *      - Merrifield only
+ *
+ *      Copyright (C) 2014 Intel Corporation. All rights reserved.
+ *      Contact: David Cohen <david.a.cohen@linux.intel.com>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of version 2 of the GNU General
+ *      Public License as published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/nmi.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/platform_data/intel-mid_wdt.h>
+
+#include <asm/intel_scu_ipc.h>
+#include <asm/intel-mid.h>
+
+#define IPC_WATCHDOG 0xf8
+
+#define MID_WDT_PRETIMEOUT		15
+#define MID_WDT_TIMEOUT_MIN		(1 + MID_WDT_PRETIMEOUT)
+#define MID_WDT_TIMEOUT_MAX		170
+/* Change default watchdog timeout from 90 to 80 for issue seen with
+ * SCU setting not being properly set and it using a value of of ~70.
+ * Needs further debugging of SCU driver code to trace if its in driver
+ * or in SCU Firmware. Will adjust in watchdog driver as workaround.
+  #define MID_WDT_DEFAULT_TIMEOUT              90
+*/
+#define MID_WDT_DEFAULT_TIMEOUT                80
+
+/* SCU watchdog messages */
+enum {
+	SCU_WATCHDOG_START = 0,
+	SCU_WATCHDOG_STOP,
+	SCU_WATCHDOG_KEEPALIVE,
+};
+
+static inline int wdt_command(int sub, u8 *in, int inlen)
+{
+	return intel_scu_ipc_command(IPC_WATCHDOG, (u32)sub, in, (u32)inlen,
+			NULL, 0);
+}
+
+static int wdt_start(struct watchdog_device *wd)
+{
+	int ret;
+	int timeout = wd->timeout + MID_WDT_PRETIMEOUT;
+
+	struct ipc_wd_start {
+		u32 pretimeout;
+		u32 timeout;
+	} ipc_wd_start = { timeout - MID_WDT_PRETIMEOUT, timeout };
+
+	ret = wdt_command(SCU_WATCHDOG_START, (u8 *)&ipc_wd_start,
+			sizeof(ipc_wd_start));
+	if (ret) {
+		struct device *dev = watchdog_get_drvdata(wd);
+		dev_crit(dev, "error starting watchdog: %d\n", ret);
+	}
+
+	return ret;
+}
+
+static int wdt_ping(struct watchdog_device *wd)
+{
+	int ret;
+
+	ret = wdt_command(SCU_WATCHDOG_KEEPALIVE, NULL, 0);
+	if (ret) {
+		struct device *dev = watchdog_get_drvdata(wd);
+		dev_crit(dev, "Error executing keepalive: 0x%x\n", ret);
+	}
+
+	return ret;
+}
+
+static int wdt_stop(struct watchdog_device *wd)
+{
+	int ret;
+
+	ret = wdt_command(SCU_WATCHDOG_STOP, NULL, 0);
+	if (ret) {
+		struct device *dev = watchdog_get_drvdata(wd);
+		dev_crit(dev, "Error stopping watchdog: 0x%x\n", ret);
+	}
+
+	return ret;
+}
+
+static irqreturn_t mid_wdt_irq(int irq, void *dev_id)
+{
+	panic("Kernel Watchdog");
+
+	/* This code should not be reached */
+	return IRQ_HANDLED;
+}
+
+static const struct watchdog_info mid_wdt_info = {
+	.identity = "Intel MID SCU watchdog",
+	.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops mid_wdt_ops = {
+	.owner = THIS_MODULE,
+	.start = wdt_start,
+	.stop = wdt_stop,
+	.ping = wdt_ping,
+};
+
+static int mid_wdt_probe(struct platform_device *pdev)
+{
+	struct watchdog_device *wdt_dev;
+	struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+	int ret;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "missing platform data\n");
+		return -EINVAL;
+	}
+
+	if (pdata->probe) {
+		ret = pdata->probe(pdev);
+		if (ret)
+			return ret;
+	}
+
+	wdt_dev = devm_kzalloc(&pdev->dev, sizeof(*wdt_dev), GFP_KERNEL);
+	if (!wdt_dev)
+		return -ENOMEM;
+
+	wdt_dev->info = &mid_wdt_info;
+	wdt_dev->ops = &mid_wdt_ops;
+	wdt_dev->min_timeout = MID_WDT_TIMEOUT_MIN;
+	wdt_dev->max_timeout = MID_WDT_TIMEOUT_MAX;
+	wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT;
+
+	watchdog_set_drvdata(wdt_dev, &pdev->dev);
+	platform_set_drvdata(pdev, wdt_dev);
+	ret = watchdog_register_device(wdt_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "error registering watchdog device\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Intel MID watchdog device probed\n");
+
+	return 0;
+}
+
+static int mid_wdt_remove(struct platform_device *pdev)
+{
+	struct watchdog_device *wd = platform_get_drvdata(pdev);
+	watchdog_unregister_device(wd);
+	return 0;
+}
+
+static struct platform_driver mid_wdt_driver = {
+	.probe		= mid_wdt_probe,
+	.remove		= mid_wdt_remove,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "intel_mid_wdt",
+	},
+};
+
+module_platform_driver(mid_wdt_driver);
+
+MODULE_AUTHOR("David Cohen <david.a.cohen@linux.intel.com>");
+MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
deleted file mode 100644
index 9dda2d0..0000000
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- *      Intel_SCU 0.2:  An Intel SCU IOH Based Watchdog Device
- *			for Intel part #(s):
- *				- AF82MP20 PCH
- *
- *      Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of version 2 of the GNU General
- *      Public License as published by the Free Software Foundation.
- *
- *      This program is distributed in the hope that it will be
- *      useful, but WITHOUT ANY WARRANTY; without even the implied
- *      warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- *      PURPOSE.  See the GNU General Public License for more details.
- *      You should have received a copy of the GNU General Public
- *      License along with this program; if not, write to the Free
- *      Software Foundation, Inc., 59 Temple Place - Suite 330,
- *      Boston, MA  02111-1307, USA.
- *      The full GNU General Public License is included in this
- *      distribution in the file called COPYING.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/fs.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/sfi.h>
-#include <asm/irq.h>
-#include <linux/atomic.h>
-#include <asm/intel_scu_ipc.h>
-#include <asm/apb_timer.h>
-#include <asm/mrst.h>
-
-#include "intel_scu_watchdog.h"
-
-/* Bounds number of times we will retry loading time count */
-/* This retry is a work around for a silicon bug.	   */
-#define MAX_RETRY 16
-
-#define IPC_SET_WATCHDOG_TIMER	0xF8
-
-static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
-module_param(timer_margin, int, 0);
-MODULE_PARM_DESC(timer_margin,
-		"Watchdog timer margin"
-		"Time between interrupt and resetting the system"
-		"The range is from 1 to 160"
-		"This is the time for all keep alives to arrive");
-
-static int timer_set = DEFAULT_TIME;
-module_param(timer_set, int, 0);
-MODULE_PARM_DESC(timer_set,
-		"Default Watchdog timer setting"
-		"Complete cycle time"
-		"The range is from 1 to 170"
-		"This is the time for all keep alives to arrive");
-
-/* After watchdog device is closed, check force_boot. If:
- * force_boot == 0, then force boot on next watchdog interrupt after close,
- * force_boot == 1, then force boot immediately when device is closed.
- */
-static int force_boot;
-module_param(force_boot, int, 0);
-MODULE_PARM_DESC(force_boot,
-		"A value of 1 means that the driver will reboot"
-		"the system immediately if the /dev/watchdog device is closed"
-		"A value of 0 means that when /dev/watchdog device is closed"
-		"the watchdog timer will be refreshed for one more interval"
-		"of length: timer_set. At the end of this interval, the"
-		"watchdog timer will reset the system."
-		);
-
-/* there is only one device in the system now; this can be made into
- * an array in the future if we have more than one device */
-
-static struct intel_scu_watchdog_dev watchdog_device;
-
-/* Forces restart, if force_reboot is set */
-static void watchdog_fire(void)
-{
-	if (force_boot) {
-		pr_crit("Initiating system reboot\n");
-		emergency_restart();
-		pr_crit("Reboot didn't ?????\n");
-	}
-
-	else {
-		pr_crit("Immediate Reboot Disabled\n");
-		pr_crit("System will reset when watchdog timer times out!\n");
-	}
-}
-
-static int check_timer_margin(int new_margin)
-{
-	if ((new_margin < MIN_TIME_CYCLE) ||
-	    (new_margin > MAX_TIME - timer_set)) {
-		pr_debug("value of new_margin %d is out of the range %d to %d\n",
-			 new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-/*
- * IPC operations
- */
-static int watchdog_set_ipc(int soft_threshold, int threshold)
-{
-	u32	*ipc_wbuf;
-	u8	 cbuf[16] = { '\0' };
-	int	 ipc_ret = 0;
-
-	ipc_wbuf = (u32 *)&cbuf;
-	ipc_wbuf[0] = soft_threshold;
-	ipc_wbuf[1] = threshold;
-
-	ipc_ret = intel_scu_ipc_command(
-			IPC_SET_WATCHDOG_TIMER,
-			0,
-			ipc_wbuf,
-			2,
-			NULL,
-			0);
-
-	if (ipc_ret != 0)
-		pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret);
-
-	return ipc_ret;
-};
-
-/*
- *      Intel_SCU operations
- */
-
-/* timer interrupt handler */
-static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
-{
-	int int_status;
-	int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
-
-	pr_debug("irq, int_status: %x\n", int_status);
-
-	if (int_status != 0)
-		return IRQ_NONE;
-
-	/* has the timer been started? If not, then this is spurious */
-	if (watchdog_device.timer_started == 0) {
-		pr_debug("spurious interrupt received\n");
-		return IRQ_HANDLED;
-	}
-
-	/* temporarily disable the timer */
-	iowrite32(0x00000002, watchdog_device.timer_control_addr);
-
-	/* set the timer to the threshold */
-	iowrite32(watchdog_device.threshold,
-		  watchdog_device.timer_load_count_addr);
-
-	/* allow the timer to run */
-	iowrite32(0x00000003, watchdog_device.timer_control_addr);
-
-	return IRQ_HANDLED;
-}
-
-static int intel_scu_keepalive(void)
-{
-
-	/* read eoi register - clears interrupt */
-	ioread32(watchdog_device.timer_clear_interrupt_addr);
-
-	/* temporarily disable the timer */
-	iowrite32(0x00000002, watchdog_device.timer_control_addr);
-
-	/* set the timer to the soft_threshold */
-	iowrite32(watchdog_device.soft_threshold,
-		  watchdog_device.timer_load_count_addr);
-
-	/* allow the timer to run */
-	iowrite32(0x00000003, watchdog_device.timer_control_addr);
-
-	return 0;
-}
-
-static int intel_scu_stop(void)
-{
-	iowrite32(0, watchdog_device.timer_control_addr);
-	return 0;
-}
-
-static int intel_scu_set_heartbeat(u32 t)
-{
-	int			 ipc_ret;
-	int			 retry_count;
-	u32			 soft_value;
-	u32			 hw_pre_value;
-	u32			 hw_value;
-
-	watchdog_device.timer_set = t;
-	watchdog_device.threshold =
-		timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
-	watchdog_device.soft_threshold =
-		(watchdog_device.timer_set - timer_margin)
-		* watchdog_device.timer_tbl_ptr->freq_hz;
-
-	pr_debug("set_heartbeat: timer freq is %d\n",
-		 watchdog_device.timer_tbl_ptr->freq_hz);
-	pr_debug("set_heartbeat: timer_set is %x (hex)\n",
-		 watchdog_device.timer_set);
-	pr_debug("set_hearbeat: timer_margin is %x (hex)\n", timer_margin);
-	pr_debug("set_heartbeat: threshold is %x (hex)\n",
-		 watchdog_device.threshold);
-	pr_debug("set_heartbeat: soft_threshold is %x (hex)\n",
-		 watchdog_device.soft_threshold);
-
-	/* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
-	/* watchdog timing come out right. */
-	watchdog_device.threshold =
-		watchdog_device.threshold / FREQ_ADJUSTMENT;
-	watchdog_device.soft_threshold =
-		watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
-
-	/* temporarily disable the timer */
-	iowrite32(0x00000002, watchdog_device.timer_control_addr);
-
-	/* send the threshold and soft_threshold via IPC to the processor */
-	ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
-				   watchdog_device.threshold);
-
-	if (ipc_ret != 0) {
-		/* Make sure the watchdog timer is stopped */
-		intel_scu_stop();
-		return ipc_ret;
-	}
-
-	/* Soft Threshold set loop. Early versions of silicon did */
-	/* not always set this count correctly.  This loop checks */
-	/* the value and retries if it was not set correctly.     */
-
-	retry_count = 0;
-	soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
-	do {
-
-		/* Make sure timer is stopped */
-		intel_scu_stop();
-
-		if (MAX_RETRY < retry_count++) {
-			/* Unable to set timer value */
-			pr_err("Unable to set timer\n");
-			return -ENODEV;
-		}
-
-		/* set the timer to the soft threshold */
-		iowrite32(watchdog_device.soft_threshold,
-			watchdog_device.timer_load_count_addr);
-
-		/* read count value before starting timer */
-		hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
-		hw_pre_value = hw_pre_value & 0xFFFF0000;
-
-		/* Start the timer */
-		iowrite32(0x00000003, watchdog_device.timer_control_addr);
-
-		/* read the value the time loaded into its count reg */
-		hw_value = ioread32(watchdog_device.timer_load_count_addr);
-		hw_value = hw_value & 0xFFFF0000;
-
-
-	} while (soft_value != hw_value);
-
-	watchdog_device.timer_started = 1;
-
-	return 0;
-}
-
-/*
- * /dev/watchdog handling
- */
-
-static int intel_scu_open(struct inode *inode, struct file *file)
-{
-
-	/* Set flag to indicate that watchdog device is open */
-	if (test_and_set_bit(0, &watchdog_device.driver_open))
-		return -EBUSY;
-
-	/* Check for reopen of driver. Reopens are not allowed */
-	if (watchdog_device.driver_closed)
-		return -EPERM;
-
-	return nonseekable_open(inode, file);
-}
-
-static int intel_scu_release(struct inode *inode, struct file *file)
-{
-	/*
-	 * This watchdog should not be closed, after the timer
-	 * is started with the WDIPC_SETTIMEOUT ioctl
-	 * If force_boot is set watchdog_fire() will cause an
-	 * immediate reset. If force_boot is not set, the watchdog
-	 * timer is refreshed for one more interval. At the end
-	 * of that interval, the watchdog timer will reset the system.
-	 */
-
-	if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
-		pr_debug("intel_scu_release, without open\n");
-		return -ENOTTY;
-	}
-
-	if (!watchdog_device.timer_started) {
-		/* Just close, since timer has not been started */
-		pr_debug("closed, without starting timer\n");
-		return 0;
-	}
-
-	pr_crit("Unexpected close of /dev/watchdog!\n");
-
-	/* Since the timer was started, prevent future reopens */
-	watchdog_device.driver_closed = 1;
-
-	/* Refresh the timer for one more interval */
-	intel_scu_keepalive();
-
-	/* Reboot system (if force_boot is set) */
-	watchdog_fire();
-
-	/* We should only reach this point if force_boot is not set */
-	return 0;
-}
-
-static ssize_t intel_scu_write(struct file *file,
-			      char const *data,
-			      size_t len,
-			      loff_t *ppos)
-{
-
-	if (watchdog_device.timer_started)
-		/* Watchdog already started, keep it alive */
-		intel_scu_keepalive();
-	else
-		/* Start watchdog with timer value set by init */
-		intel_scu_set_heartbeat(watchdog_device.timer_set);
-
-	return len;
-}
-
-static long intel_scu_ioctl(struct file *file,
-			   unsigned int cmd,
-			   unsigned long arg)
-{
-	void __user *argp = (void __user *)arg;
-	u32 __user *p = argp;
-	u32 new_margin;
-
-
-	static const struct watchdog_info ident = {
-		.options =          WDIOF_SETTIMEOUT
-				    | WDIOF_KEEPALIVEPING,
-		.firmware_version = 0,  /* @todo Get from SCU via
-						 ipc_get_scu_fw_version()? */
-		.identity =         "Intel_SCU IOH Watchdog"  /* len < 32 */
-	};
-
-	switch (cmd) {
-	case WDIOC_GETSUPPORT:
-		return copy_to_user(argp,
-				    &ident,
-				    sizeof(ident)) ? -EFAULT : 0;
-	case WDIOC_GETSTATUS:
-	case WDIOC_GETBOOTSTATUS:
-		return put_user(0, p);
-	case WDIOC_KEEPALIVE:
-		intel_scu_keepalive();
-
-		return 0;
-	case WDIOC_SETTIMEOUT:
-		if (get_user(new_margin, p))
-			return -EFAULT;
-
-		if (check_timer_margin(new_margin))
-			return -EINVAL;
-
-		if (intel_scu_set_heartbeat(new_margin))
-			return -EINVAL;
-		return 0;
-	case WDIOC_GETTIMEOUT:
-		return put_user(watchdog_device.soft_threshold, p);
-
-	default:
-		return -ENOTTY;
-	}
-}
-
-/*
- *      Notifier for system down
- */
-static int intel_scu_notify_sys(struct notifier_block *this,
-			       unsigned long code,
-			       void *another_unused)
-{
-	if (code == SYS_DOWN || code == SYS_HALT)
-		/* Turn off the watchdog timer. */
-		intel_scu_stop();
-	return NOTIFY_DONE;
-}
-
-/*
- *      Kernel Interfaces
- */
-static const struct file_operations intel_scu_fops = {
-	.owner          = THIS_MODULE,
-	.llseek         = no_llseek,
-	.write          = intel_scu_write,
-	.unlocked_ioctl = intel_scu_ioctl,
-	.open           = intel_scu_open,
-	.release        = intel_scu_release,
-};
-
-static int __init intel_scu_watchdog_init(void)
-{
-	int ret;
-	u32 __iomem *tmp_addr;
-
-	/*
-	 * We don't really need to check this as the SFI timer get will fail
-	 * but if we do so we can exit with a clearer reason and no noise.
-	 *
-	 * If it isn't an intel MID device then it doesn't have this watchdog
-	 */
-	if (!mrst_identify_cpu())
-		return -ENODEV;
-
-	/* Check boot parameters to verify that their initial values */
-	/* are in range. */
-	/* Check value of timer_set boot parameter */
-	if ((timer_set < MIN_TIME_CYCLE) ||
-	    (timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
-		pr_err("value of timer_set %x (hex) is out of range from %x to %x (hex)\n",
-		       timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
-		return -EINVAL;
-	}
-
-	/* Check value of timer_margin boot parameter */
-	if (check_timer_margin(timer_margin))
-		return -EINVAL;
-
-	watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
-
-	if (watchdog_device.timer_tbl_ptr == NULL) {
-		pr_debug("timer is not available\n");
-		return -ENODEV;
-	}
-	/* make sure the timer exists */
-	if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
-		pr_debug("timer %d does not have valid physical memory\n",
-			 sfi_mtimer_num);
-		return -ENODEV;
-	}
-
-	if (watchdog_device.timer_tbl_ptr->irq == 0) {
-		pr_debug("timer %d invalid irq\n", sfi_mtimer_num);
-		return -ENODEV;
-	}
-
-	tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
-			20);
-
-	if (tmp_addr == NULL) {
-		pr_debug("timer unable to ioremap\n");
-		return -ENOMEM;
-	}
-
-	watchdog_device.timer_load_count_addr = tmp_addr++;
-	watchdog_device.timer_current_value_addr = tmp_addr++;
-	watchdog_device.timer_control_addr = tmp_addr++;
-	watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
-	watchdog_device.timer_interrupt_status_addr = tmp_addr++;
-
-	/* Set the default time values in device structure */
-
-	watchdog_device.timer_set = timer_set;
-	watchdog_device.threshold =
-		timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
-	watchdog_device.soft_threshold =
-		(watchdog_device.timer_set - timer_margin)
-		* watchdog_device.timer_tbl_ptr->freq_hz;
-
-
-	watchdog_device.intel_scu_notifier.notifier_call =
-		intel_scu_notify_sys;
-
-	ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
-	if (ret) {
-		pr_err("cannot register notifier %d)\n", ret);
-		goto register_reboot_error;
-	}
-
-	watchdog_device.miscdev.minor = WATCHDOG_MINOR;
-	watchdog_device.miscdev.name = "watchdog";
-	watchdog_device.miscdev.fops = &intel_scu_fops;
-
-	ret = misc_register(&watchdog_device.miscdev);
-	if (ret) {
-		pr_err("cannot register miscdev %d err =%d\n",
-		       WATCHDOG_MINOR, ret);
-		goto misc_register_error;
-	}
-
-	ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
-		watchdog_timer_interrupt,
-		IRQF_SHARED, "watchdog",
-		&watchdog_device.timer_load_count_addr);
-	if (ret) {
-		pr_err("error requesting irq %d\n", ret);
-		goto request_irq_error;
-	}
-	/* Make sure timer is disabled before returning */
-	intel_scu_stop();
-	return 0;
-
-/* error cleanup */
-
-request_irq_error:
-	misc_deregister(&watchdog_device.miscdev);
-misc_register_error:
-	unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
-register_reboot_error:
-	intel_scu_stop();
-	iounmap(watchdog_device.timer_load_count_addr);
-	return ret;
-}
-
-static void __exit intel_scu_watchdog_exit(void)
-{
-
-	misc_deregister(&watchdog_device.miscdev);
-	unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
-	/* disable the timer */
-	iowrite32(0x00000002, watchdog_device.timer_control_addr);
-	iounmap(watchdog_device.timer_load_count_addr);
-}
-
-late_initcall(intel_scu_watchdog_init);
-module_exit(intel_scu_watchdog_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/intel_scu_watchdog_evo.c b/drivers/watchdog/intel_scu_watchdog_evo.c
new file mode 100644
index 0000000..1e04d73
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog_evo.c
@@ -0,0 +1,1433 @@
+/*
+ *      Intel_SCU 0.3:  An Intel SCU IOH Based Watchdog Device
+ *			for Intel part #(s):
+ *				- AF82MP20 PCH
+ *
+ *      Copyright (C) 2009-2013 Intel Corporation. All rights reserved.
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of version 2 of the GNU General
+ *      Public License as published by the Free Software Foundation.
+ *
+ *      This program is distributed in the hope that it will be
+ *      useful, but WITHOUT ANY WARRANTY; without even the implied
+ *      warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ *      PURPOSE.  See the GNU General Public License for more details.
+ *      You should have received a copy of the GNU General Public
+ *      License along with this program; if not, write to the Free
+ *      Software Foundation, Inc., 59 Temple Place - Suite 330,
+ *      Boston, MA  02111-1307, USA.
+ *      The full GNU General Public License is included in this
+ *      distribution in the file called COPYING.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/* See Documentation/watchdog/intel-scu-watchdog.txt */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/rpmsg.h>
+#include <linux/nmi.h>
+#include <asm/intel_scu_ipcutil.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel-mid.h>
+
+#include "intel_scu_watchdog_evo.h"
+
+/* Adjustment flags */
+#define CONFIG_INTEL_SCU_SOFT_LOCKUP
+#define CONFIG_DEBUG_WATCHDOG
+
+/* Defines */
+#define STRING_RESET_TYPE_MAX_LEN   11
+#define STRING_COLD_OFF             "COLD_OFF"
+#define STRING_COLD_RESET           "COLD_RESET"
+#define STRING_COLD_BOOT            "COLD_BOOT"
+
+#define EXT_TIMER0_MSI 12
+
+#define IPC_WATCHDOG 0xF8
+
+enum {
+	SCU_WATCHDOG_START = 0,
+	SCU_WATCHDOG_STOP,
+	SCU_WATCHDOG_KEEPALIVE,
+	SCU_WATCHDOG_SET_ACTION_ON_TIMEOUT
+};
+
+enum {
+	SCU_COLD_OFF_ON_TIMEOUT = 0,
+	SCU_COLD_RESET_ON_TIMEOUT,
+	SCU_COLD_BOOT_ON_TIMEOUT,
+	SCU_DO_NOTHING_ON_TIMEOUT
+};
+
+#ifdef CONFIG_DEBUG_FS
+#define SECURITY_WATCHDOG_ADDR  0xFF222230
+#define STRING_NONE "NONE"
+#endif
+
+/* Statics */
+static int reset_type_to_string(int reset_type, char *string);
+static int string_to_reset_type(const char *string, int *reset_type);
+static struct intel_scu_watchdog_dev watchdog_device;
+static unsigned char osnib_reset = OSNIB_WRITE_VALUE;
+
+/* Module params */
+static bool kicking_active = true;
+#ifdef CONFIG_DEBUG_WATCHDOG
+module_param(kicking_active, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(kicking_active,
+		"Deactivate the kicking will result in a cold reset"
+		"after a while");
+#endif
+
+static bool disable_kernel_watchdog = false;
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+/*
+ * Please note that we are using a config CONFIG_DISABLE_SCU_WATCHDOG
+ * because this boot parameter should only be settable in a developement
+ */
+module_param(disable_kernel_watchdog, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_kernel_watchdog,
+		"Disable kernel watchdog"
+		"Set to 0, watchdog started at boot"
+		"and left running; Set to 1; watchdog"
+		"is not started until user space"
+		"watchdog daemon is started; also if the"
+		"timer is started by the iafw firmware, it"
+		"will be disabled upon initialization of this"
+		"driver if disable_kernel_watchdog is set");
+#endif
+
+static int pre_timeout = DEFAULT_PRETIMEOUT;
+
+static int timeout = DEFAULT_TIMEOUT;
+module_param(timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(timeout,
+		"Default Watchdog timer setting"
+		"Complete cycle time"
+		"The range is from 35 to 170"
+		"This is the time for all keep alives to arrive");
+
+static bool reset_on_release = true;
+
+#ifdef CONFIG_INTEL_SCU_SOFT_LOCKUP
+/*
+ * heartbeats: cpu last kstat.system times
+ * beattime : jiffies at the sample time of heartbeats.
+ * SOFT_LOCK_TIME : some time out in sec after warning interrupt.
+ * dump_softloc_debug : called on SOFT_LOCK_TIME time out after scu
+ *	interrupt to log data to logbuffer and emmc-panic code,
+ *	SOFT_LOCK_TIME needs to be < SCU warn to reset time
+ *	which is currently thats 15 sec.
+ *
+ * The soft lock works be taking a snapshot of kstat_cpu(i).cpustat.system at
+ * the time of the warning interrupt for each cpu.  Then at SOFT_LOCK_TIME the
+ * amount of time spend in system is computed and if its within 10 ms of the
+ * total SOFT_LOCK_TIME on any cpu it will dump the stack on that cpu and then
+ * calls panic.
+ *
+ */
+static u64 heartbeats[NR_CPUS];
+static u64 beattime;
+#define SOFT_LOCK_TIME 10
+static void dump_softlock_debug(unsigned long data);
+DEFINE_TIMER(softlock_timer, dump_softlock_debug, 0, 0);
+
+static struct rpmsg_instance *watchdog_instance;
+
+/* time is about to run out and the scu will reset soon.  quickly
+ * dump debug data to logbuffer and emmc via calling panic before lights
+ * go out.
+ */
+static void smp_dumpstack(void *info)
+{
+	dump_stack();
+}
+
+static void dump_softlock_debug(unsigned long data)
+{
+	int i, reboot;
+	u64 system[NR_CPUS], num_jifs;
+
+	memset(system, 0, NR_CPUS*sizeof(u64));
+
+	num_jifs = jiffies - beattime;
+	for_each_possible_cpu(i) {
+		system[i] = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM] -
+				heartbeats[i];
+	}
+
+	reboot = 0;
+
+	for_each_possible_cpu(i) {
+		if ((num_jifs - cputime_to_jiffies(system[i])) <
+						msecs_to_jiffies(10)) {
+			WARN(1, "cpu %d wedged\n", i);
+			smp_call_function_single(i, smp_dumpstack, NULL, 1);
+			reboot = 1;
+		}
+	}
+
+	if (reboot) {
+		panic_timeout = 10;
+		trigger_all_cpu_backtrace();
+		panic("Soft lock on CPUs\n");
+	}
+}
+#endif /* CONFIG_INTEL_SCU_SOFT_LOCKUP */
+
+/* Check current timeouts */
+/* Timeout bounds come from the MODULE_PARAM_DESC description */
+static int check_timeouts(int pre_timeout_time, int timeout_time)
+{
+	if (pre_timeout_time >= timeout_time)
+		return -EINVAL;
+	if (pre_timeout_time > 155 || pre_timeout_time < 1)
+		return -EINVAL;
+	if (timeout_time > 170 || timeout_time < 35)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Set the different timeouts needed by the SCU FW and start the
+ * kernel watchdog */
+static int watchdog_set_timeouts_and_start(int pretimeout,
+					   int timeout)
+{
+	int ret, error = 0;
+	struct ipc_wd_start {
+		u32 pretimeout;
+		u32 timeout;
+	} ipc_wd_start = { pretimeout, timeout };
+
+	ret = rpmsg_send_command(watchdog_instance, IPC_WATCHDOG,
+				 SCU_WATCHDOG_START, (u8 *)&ipc_wd_start,
+				 NULL, sizeof(ipc_wd_start), 0);
+	if (ret) {
+		pr_crit("Error configuring and starting watchdog: %d\n",
+			ret);
+		error = -EIO;
+	}
+
+	return error;
+}
+
+/* Provisioning function for future enhancement : allow to fine tune timing
+   according to watchdog action settings */
+static int watchdog_set_appropriate_timeouts(void)
+{
+	pr_debug("Setting shutdown timeouts\n");
+	return watchdog_set_timeouts_and_start(pre_timeout, timeout);
+}
+
+/* Keep alive  */
+static int watchdog_keepalive(void)
+{
+	int ret, error = 0;
+
+	pr_debug("%s\n", __func__);
+
+	if (unlikely(!kicking_active)) {
+		/* Close our eyes */
+		pr_err("Transparent kicking\n");
+		return 0;
+	}
+
+	/* Really kick it */
+	ret = rpmsg_send_simple_command(watchdog_instance, IPC_WATCHDOG,
+					SCU_WATCHDOG_KEEPALIVE);
+	if (ret) {
+		pr_crit("Error executing keepalive: %x\n", ret);
+		error = -EIO;
+	}
+
+	return error;
+}
+
+/* stops the timer */
+static int watchdog_stop(void)
+{
+	int ret = 0;
+	int error = 0;
+
+	pr_crit("%s\n", __func__);
+
+	ret = rpmsg_send_simple_command(watchdog_instance, IPC_WATCHDOG,
+					SCU_WATCHDOG_STOP);
+	if (ret) {
+		pr_crit("Error stopping watchdog: %x\n", ret);
+		error = -EIO;
+	}
+
+	watchdog_device.started = false;
+
+	return error;
+}
+
+/* warning interrupt handler */
+static irqreturn_t watchdog_warning_interrupt(int irq, void *dev_id)
+{
+	if (unlikely(!kicking_active))
+		pr_warn("[SHTDWN] WATCHDOG TIMEOUT for test!, %s\n", __func__);
+
+	else
+		pr_warn("[SHTDWN] %s, WATCHDOG TIMEOUT!\n", __func__);
+
+	/* Let's reset the platform after dumping some data */
+	trigger_all_cpu_backtrace();
+	panic("Kernel Watchdog");
+
+	/* This code should not be reached */
+	return IRQ_HANDLED;
+}
+
+/* Program and starts the timer */
+static int watchdog_config_and_start(u32 newtimeout, u32 newpretimeout)
+{
+	int ret;
+
+	timeout = newtimeout;
+	pre_timeout = newpretimeout;
+
+	pr_debug("timeout=%ds, pre_timeout=%ds\n", timeout, pre_timeout);
+
+	/* Configure the watchdog */
+	ret = watchdog_set_timeouts_and_start(pre_timeout, timeout);
+	if (ret) {
+		pr_err("%s: Cannot configure the watchdog\n", __func__);
+
+		/* Make sure the watchdog timer is stopped */
+		watchdog_stop();
+		return ret;
+	}
+
+	watchdog_device.started = true;
+
+	return 0;
+}
+
+/* Open */
+static int intel_scu_open(struct inode *inode, struct file *file)
+{
+	/* Set flag to indicate that watchdog device is open */
+	if (test_and_set_bit(0, &watchdog_device.driver_open)) {
+		pr_err("watchdog device is busy\n");
+		return -EBUSY;
+	}
+
+	/* Check for reopen of driver. Reopens are not allowed */
+	if (watchdog_device.driver_closed) {
+		pr_err("watchdog device has been closed\n");
+		return -EPERM;
+	}
+
+	return nonseekable_open(inode, file);
+}
+
+/* Release */
+static int intel_scu_release(struct inode *inode, struct file *file)
+{
+	/*
+	 * This watchdog should not be closed, after the timer
+	 * is started with the WDIPC_SETTIMEOUT ioctl
+	 * If reset_on_release is set this  will cause an
+	 * immediate reset. If reset_on_release is not set, the watchdog
+	 * timer is refreshed for one more interval. At the end
+	 * of that interval, the watchdog timer will reset the system.
+	 */
+
+	if (!test_bit(0, &watchdog_device.driver_open)) {
+		pr_err("intel_scu_release, without open\n");
+		return -ENOTTY;
+	}
+
+	if (!watchdog_device.started) {
+		/* Just close, since timer has not been started */
+		pr_err("Closed, without starting timer\n");
+		return 0;
+	}
+
+	pr_crit("Unexpected close of /dev/watchdog!\n");
+
+	/* Since the timer was started, prevent future reopens */
+	watchdog_device.driver_closed = 1;
+
+	/* Refresh the timer for one more interval */
+	watchdog_keepalive();
+
+	/* Reboot system if requested */
+	if (reset_on_release) {
+		pr_crit("Initiating system reboot.\n");
+		emergency_restart();
+	}
+
+	pr_crit("Immediate Reboot Disabled\n");
+	pr_crit("System will reset when watchdog timer expire!\n");
+
+	return 0;
+}
+
+/* Write */
+static ssize_t intel_scu_write(struct file *file, char const *data, size_t len,
+			      loff_t *ppos)
+{
+	pr_debug("watchdog %s\n", __func__);
+
+	if (watchdog_device.shutdown_flag == true)
+		/* do nothing if we are shutting down */
+		return len;
+
+	if (watchdog_device.started) {
+		/* Watchdog already started, keep it alive */
+		watchdog_keepalive();
+	}
+
+	return len;
+}
+
+/* ioctl */
+static long intel_scu_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	u32 __user *p = argp;
+	u32 val;
+	u32 new_pre_timeout;
+	int options;
+
+	static const struct watchdog_info ident = {
+		.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+		/* @todo Get from SCU via ipc_get_scu_fw_version()? */
+		.firmware_version = 0,
+		/* len < 32 */
+		.identity = "Intel_SCU IOH Watchdog"
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user(argp, &ident,
+				    sizeof(ident)) ? -EFAULT : 0;
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(0, p);
+	case WDIOC_KEEPALIVE:
+		pr_debug("%s: KeepAlive ioctl\n", __func__);
+		if (!watchdog_device.started)
+			return -EINVAL;
+
+		watchdog_keepalive();
+		return 0;
+	case WDIOC_SETTIMEOUT:
+		pr_debug("%s: SetTimeout ioctl\n", __func__);
+
+		if (watchdog_device.started)
+			return -EBUSY;
+
+		if (get_user(val, p))
+			return -EFAULT;
+		new_pre_timeout = val-15;
+		if (check_timeouts(new_pre_timeout, val)) {
+			pr_warn("%s: Invalid timeout thresholds (timeout: %d, pretimeout: %d) \n", __func__, val, new_pre_timeout);
+			return -EINVAL;
+		}
+
+		pre_timeout = new_pre_timeout;
+		timeout = val;
+		return 0;
+	case WDIOC_GETTIMEOUT:
+		return put_user(timeout, p);
+	case WDIOC_SETOPTIONS:
+		if (get_user(options, p))
+			return -EFAULT;
+
+		if (options & WDIOS_DISABLECARD) {
+			pr_debug("%s: Stopping the watchdog\n", __func__);
+			watchdog_stop();
+			return 0;
+		}
+
+		if (options & WDIOS_ENABLECARD) {
+			pr_debug("%s: Starting the watchdog\n", __func__);
+
+			if (watchdog_device.started)
+				return -EBUSY;
+
+			if (check_timeouts(pre_timeout, timeout)) {
+				pr_warn("%s: Invalid thresholds\n",
+					__func__);
+				return -EINVAL;
+			}
+			if (watchdog_config_and_start(timeout, pre_timeout))
+				return -EINVAL;
+			return 0;
+		}
+		return 0;
+	default:
+		return -ENOTTY;
+	}
+}
+
+static int watchdog_set_reset_type(int reset_type)
+{
+	int ret;
+	struct ipc_wd_on_timeout {
+		u32 reset_type;
+	} ipc_wd_on_timeout = { reset_type };
+
+	ret = rpmsg_send_command(watchdog_instance, IPC_WATCHDOG,
+				 SCU_WATCHDOG_SET_ACTION_ON_TIMEOUT,
+				 (u8 *)&ipc_wd_on_timeout, NULL,
+				 sizeof(ipc_wd_on_timeout), 0);
+	if (ret) {
+		pr_crit("Error setting watchdog action: %d\n", ret);
+		return -EIO;
+	}
+
+	watchdog_device.normal_wd_action = reset_type;
+
+	return 0;
+}
+
+/* Reboot notifier */
+static int reboot_notifier(struct notifier_block *this,
+			   unsigned long code,
+			   void *another_unused)
+{
+	int ret;
+
+	if (code == SYS_RESTART || code == SYS_HALT || code == SYS_POWER_OFF) {
+		pr_warn("Reboot notifier\n");
+
+		if (watchdog_set_appropriate_timeouts())
+			pr_crit("reboot notifier cant set time\n");
+
+		switch (code) {
+		case SYS_RESTART:
+			ret = watchdog_set_reset_type(
+				watchdog_device.reboot_wd_action);
+			break;
+
+		case SYS_HALT:
+		case SYS_POWER_OFF:
+			ret = watchdog_set_reset_type(
+				watchdog_device.shutdown_wd_action);
+			break;
+		}
+		if (ret)
+			pr_err("%s: could not set reset type\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+		/* debugfs entry to generate a BUG during
+		any shutdown/reboot call */
+		if (watchdog_device.panic_reboot_notifier)
+			BUG();
+#endif
+		/* Don't do instant reset on close */
+		reset_on_release = false;
+
+		/* Kick once again */
+		if (disable_kernel_watchdog == false) {
+			ret = watchdog_keepalive();
+			if (ret)
+				pr_warn("%s: no keep alive\n", __func__);
+
+			/* Don't allow any more keep-alives */
+			watchdog_device.shutdown_flag = true;
+		}
+	}
+	return NOTIFY_DONE;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/* This code triggers a Security Watchdog */
+int open_security(struct inode *i, struct file *f)
+{
+	int ret = 0;
+	u64 *ptr;
+	u32 value;
+
+	ptr = ioremap_nocache(SECURITY_WATCHDOG_ADDR, sizeof(u32));
+
+	if (!ptr) {
+		pr_err("cannot open secwd's debugfile\n");
+		ret = -ENODEV;
+		goto error;
+	}
+	value = readl(ptr); /* trigger */
+
+	pr_err("%s: This code should never be reached but it got %x\n",
+		__func__, (unsigned int)value);
+
+error:
+	return ret;
+}
+
+static const struct file_operations security_watchdog_fops = {
+	.open = open_security,
+};
+
+static int kwd_trigger_write(struct file *file, const char __user *buff,
+			     size_t count, loff_t *ppos)
+{
+	pr_debug("kwd_trigger_write\n");
+	panic("Kernel watchdog triggered\n");
+	return 0;
+}
+
+static const struct file_operations kwd_trigger_fops = {
+	.open		= nonseekable_open,
+	.write		= kwd_trigger_write,
+	.llseek		= no_llseek,
+};
+
+static int kwd_reset_type_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static ssize_t kwd_reset_type_read(struct file *file, char __user *buff,
+				   size_t count, loff_t *ppos)
+{
+	ssize_t len;
+	int ret;
+	char str[STRING_RESET_TYPE_MAX_LEN + 1];
+
+	pr_debug("reading reset_type of %x\n",
+		 watchdog_device.normal_wd_action);
+
+	if (*ppos > 0)
+		return 0;
+
+	ret = reset_type_to_string(watchdog_device.normal_wd_action, str);
+	if (ret)
+		return -EINVAL;
+	else {
+		for (len = 0; len < (STRING_RESET_TYPE_MAX_LEN - 1)
+			     && str[len] != '\0'; len++)
+			;
+		str[len++] = '\n';
+		ret = copy_to_user(buff, str, len);
+	}
+
+	*ppos += len;
+	return len;
+}
+
+static ssize_t kwd_reset_type_write(struct file *file, const char __user *buff,
+				    size_t count, loff_t *ppos)
+{
+	char str[STRING_RESET_TYPE_MAX_LEN];
+	unsigned long res;
+	int ret, reset_type;
+
+	if (count > STRING_RESET_TYPE_MAX_LEN) {
+		pr_err("Invalid size: count=%d\n", count);
+		return -EINVAL;
+	}
+
+	memset(str, 0x00, STRING_RESET_TYPE_MAX_LEN);
+
+	res = copy_from_user((void *)str,
+		(void __user *)buff,
+		(unsigned long)min((unsigned long)(count-1),
+		(unsigned long)(STRING_RESET_TYPE_MAX_LEN-1)));
+
+	if (res) {
+		pr_err("%s: copy to user failed\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("writing reset_type of %s\n", str);
+
+	ret = string_to_reset_type(str, &reset_type);
+	if (ret) {
+		pr_err("Invalid value\n");
+		return -EINVAL;
+	}
+
+	ret = watchdog_set_reset_type(reset_type);
+	if (ret) {
+		pr_err("%s: could not set reset type\n", __func__);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations kwd_reset_type_fops = {
+	.open		= nonseekable_open,
+	.release	= kwd_reset_type_release,
+	.read		= kwd_reset_type_read,
+	.write		= kwd_reset_type_write,
+	.llseek		= no_llseek,
+};
+
+static ssize_t kwd_panic_reboot_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	# define RET_SIZE 3 /* prints only 2 chars : '0' or '1', plus '\n' */
+	char str[RET_SIZE];
+
+	int res;
+
+	if (*ppos > 0)
+		return 0;
+
+	strcpy(str, watchdog_device.panic_reboot_notifier ? "1\n" : "0\n");
+
+	res = copy_to_user(buff, str, RET_SIZE);
+	if (res) {
+		pr_err("%s: copy to user failed\n", __func__);
+		return -EINVAL;
+	}
+
+	*ppos += RET_SIZE-1;
+	return RET_SIZE-1;
+}
+
+
+static ssize_t kwd_panic_reboot_write(struct file *file,
+		const char __user *buff, size_t count, loff_t *ppos)
+{
+	/* whatever is written, simply set flag to TRUE */
+	watchdog_device.panic_reboot_notifier = true;
+
+	return count;
+}
+
+
+static const struct file_operations kwd_panic_reboot_fops = {
+	.open		= nonseekable_open,
+	.read		= kwd_panic_reboot_read,
+	.write		= kwd_panic_reboot_write,
+	.llseek		= no_llseek,
+};
+
+static int remove_debugfs_entries(void)
+{
+struct intel_scu_watchdog_dev *dev = &watchdog_device;
+
+	/* /sys/kernel/debug/watchdog */
+	debugfs_remove_recursive(dev->dfs_wd);
+
+	return 0;
+}
+
+static int create_debugfs_entries(void)
+{
+	struct intel_scu_watchdog_dev *dev = &watchdog_device;
+
+	/* /sys/kernel/debug/watchdog */
+	dev->dfs_wd = debugfs_create_dir("watchdog", NULL);
+	if (!dev->dfs_wd) {
+		pr_err("%s: Error, cannot create main dir\n", __func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/security_watchdog */
+	dev->dfs_secwd = debugfs_create_dir("security_watchdog", dev->dfs_wd);
+	if (!dev->dfs_secwd) {
+		pr_err("%s: Error, cannot create sec dir\n", __func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/security_watchdog/trigger */
+	dev->dfs_secwd_trigger = debugfs_create_file("trigger",
+				    S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+				    dev->dfs_secwd, NULL,
+				    &security_watchdog_fops);
+
+	if (!dev->dfs_secwd_trigger) {
+		pr_err("%s: Error, cannot create sec file\n", __func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/kernel_watchdog */
+	dev->dfs_kwd = debugfs_create_dir("kernel_watchdog", dev->dfs_wd);
+	if (!dev->dfs_kwd) {
+		pr_err("%s: Error, cannot create kwd dir\n", __func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/kernel_watchdog/trigger */
+	dev->dfs_kwd_trigger = debugfs_create_file("trigger",
+				    S_IFREG | S_IWUSR | S_IWGRP,
+				    dev->dfs_kwd, NULL,
+				    &kwd_trigger_fops);
+
+	if (!dev->dfs_kwd_trigger) {
+		pr_err("%s: Error, cannot create kwd trigger file\n",
+			__func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/kernel_watchdog/reset_type */
+	dev->dfs_kwd_trigger = debugfs_create_file("reset_type",
+				    S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+				    dev->dfs_kwd, NULL,
+				    &kwd_reset_type_fops);
+
+	if (!dev->dfs_kwd_trigger) {
+		pr_err("%s: Error, cannot create kwd trigger file\n",
+			__func__);
+		goto error;
+	}
+
+	/* /sys/kernel/debug/watchdog/kernel_watchdog/panic_reboot_notifier */
+	dev->dfs_kwd_panic_reboot = debugfs_create_file("panic_reboot_notifier",
+					S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP,
+					dev->dfs_kwd, NULL,
+					&kwd_panic_reboot_fops);
+
+	if (!dev->dfs_kwd_panic_reboot) {
+		pr_err("%s: Error, cannot create kwd panic_reboot_notifier file\n",
+			__func__);
+		goto error;
+	}
+
+
+	return 0;
+error:
+	remove_debugfs_entries();
+	return 1;
+}
+#endif  /* CONFIG_DEBUG_FS*/
+
+/* Kernel Interfaces */
+static const struct file_operations intel_scu_fops = {
+	.owner          = THIS_MODULE,
+	.llseek         = no_llseek,
+	.write          = intel_scu_write,
+	.unlocked_ioctl = intel_scu_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= intel_scu_ioctl,
+#endif
+	.open           = intel_scu_open,
+	.release        = intel_scu_release,
+};
+
+/* sysfs entry to disable watchdog */
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+static ssize_t disable_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	if (!strtobool(buf, &disable_kernel_watchdog)) {
+		if (disable_kernel_watchdog) {
+			ret = watchdog_stop();
+			if (ret)
+				pr_err("cannot disable the timer\n");
+		} else {
+			ret = watchdog_config_and_start(timeout, pre_timeout);
+			if (ret)
+				return -EINVAL;
+		}
+	} else {
+		pr_err("got invalid value\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t disable_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	pr_debug("%s\n", __func__);
+	if (disable_kernel_watchdog)
+		return sprintf(buf, "1\n");
+
+	return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(disable, S_IWUSR | S_IRUGO,
+	disable_show, disable_store);
+
+#endif
+
+#define OSNIB_WDOG_COUNTER_MASK 0xF0
+#define OSNIB_WDOG_COUNTER_SHIFT 4
+#define WDOG_COUNTER_MAX_VALUE   3
+static ssize_t counter_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+
+	ret = sscanf(buf, "%hhu", &osnib_reset);
+	if (ret != 1) {
+		pr_err(PFX "cannot get counter value\n");
+		if (ret == 0)
+			ret = -EINVAL;
+		return ret;
+	}
+	if (osnib_reset > WDOG_COUNTER_MAX_VALUE)
+		osnib_reset = WDOG_COUNTER_MAX_VALUE;
+	osnib_reset = ((osnib_reset << OSNIB_WDOG_COUNTER_SHIFT) &
+				OSNIB_WDOG_COUNTER_MASK);
+	ret = intel_scu_ipc_write_osnib_wd(&osnib_reset);
+
+	if (ret != 0) {
+		pr_err(PFX "cannot write OSNIB\n");
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+
+static ssize_t counter_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	unsigned char osnib_read = (unsigned char)0;
+	int ret;
+	pr_debug("%s\n", __func__);
+
+	ret = intel_scu_ipc_read_osnib_wd(&osnib_read);
+
+	if (ret != 0)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", (int)((osnib_read & OSNIB_WDOG_COUNTER_MASK)
+						>> OSNIB_WDOG_COUNTER_SHIFT));
+}
+
+static int reset_type_to_string(int reset_type, char *string)
+{
+	switch (reset_type) {
+	case SCU_COLD_BOOT_ON_TIMEOUT:
+		strcpy(string, STRING_COLD_BOOT);
+		break;
+	case SCU_COLD_RESET_ON_TIMEOUT:
+		strcpy(string, STRING_COLD_RESET);
+		break;
+	case SCU_COLD_OFF_ON_TIMEOUT:
+		strcpy(string, STRING_COLD_OFF);
+		break;
+#ifdef CONFIG_DEBUG_FS
+	case SCU_DO_NOTHING_ON_TIMEOUT:
+		/* The IPC command DONOTHING is provided */
+		/* for debug purpose only.               */
+		strcpy(string, STRING_NONE);
+		break;
+#endif
+	default:
+		return 1;
+	}
+
+	return 0;
+}
+
+static int string_to_reset_type(const char *string, int *reset_type)
+{
+	if (!reset_type || !string)
+		return 1;
+
+	if (strncmp(string, STRING_COLD_RESET,
+			sizeof(STRING_COLD_RESET) - 1) == 0) {
+		*reset_type = SCU_COLD_RESET_ON_TIMEOUT;
+		return 0;
+	}
+	if (strncmp(string, STRING_COLD_BOOT,
+			sizeof(STRING_COLD_BOOT) - 1) == 0) {
+		*reset_type = SCU_COLD_BOOT_ON_TIMEOUT;
+		return 0;
+	}
+	if (strncmp(string, STRING_COLD_OFF,
+			sizeof(STRING_COLD_OFF) - 1) == 0) {
+		*reset_type = SCU_COLD_OFF_ON_TIMEOUT;
+		return 0;
+	}
+#ifdef CONFIG_DEBUG_FS
+	if (strncmp(string, STRING_NONE,
+			sizeof(STRING_NONE) - 1) == 0) {
+		*reset_type = SCU_DO_NOTHING_ON_TIMEOUT;
+		return 0;
+	}
+#endif
+	/* We should not be here, this is an error case */
+	pr_debug("Invalid reset type value\n");
+	return 1;
+}
+
+static ssize_t reboot_ongoing_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	/* reprogram timeouts. if error : continue */
+	ret = watchdog_set_appropriate_timeouts();
+	if (ret)
+		pr_err("%s: could not set timeouts\n", __func__);
+
+	/* restore reset type */
+	watchdog_set_reset_type(watchdog_device.reboot_wd_action);
+	if (ret) {
+		pr_err("%s: could not set reset type\n", __func__);
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t shutdown_ongoing_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	/* reprogram timeouts. if error : continue */
+	ret = watchdog_set_appropriate_timeouts();
+	if (ret)
+		pr_err("%s: could not set timeouts\n", __func__);
+
+	/* restore reset type */
+	watchdog_set_reset_type(watchdog_device.shutdown_wd_action);
+	if (ret) {
+		pr_err("%s: could not set reset type\n", __func__);
+		return -EINVAL;
+	}
+
+	return size;
+}
+
+static ssize_t normal_config_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	if (reset_type_to_string(watchdog_device.normal_wd_action, buf) != 0)
+		return -EINVAL;
+	strcat(buf, "\n");
+	return strlen(buf);
+}
+
+static ssize_t normal_config_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	if (string_to_reset_type(buf, &watchdog_device.normal_wd_action) != 0)
+		return -EINVAL;
+	if (watchdog_set_reset_type(watchdog_device.normal_wd_action) != 0)
+		return -EINVAL;
+
+	return size;
+}
+
+static ssize_t reboot_config_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	if (reset_type_to_string(watchdog_device.reboot_wd_action, buf) != 0)
+		return -EINVAL;
+	strcat(buf, "\n");
+	return strlen(buf);
+}
+
+static ssize_t reboot_config_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	if (string_to_reset_type(buf, &watchdog_device.reboot_wd_action) != 0)
+		return -EINVAL;
+
+	return size;
+}
+
+static ssize_t shutdown_config_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	if (reset_type_to_string(watchdog_device.shutdown_wd_action, buf) != 0)
+		return -EINVAL;
+	strcat(buf, "\n");
+	return strlen(buf);
+}
+
+static ssize_t shutdown_config_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t size)
+{
+	if (string_to_reset_type(buf, &watchdog_device.shutdown_wd_action) != 0)
+		return -EINVAL;
+
+	return size;
+}
+
+/* Watchdog behavior depending on system phase */
+static DEVICE_ATTR(normal_config, S_IWUSR | S_IRUGO,
+	normal_config_show, normal_config_store);
+static DEVICE_ATTR(reboot_config, S_IWUSR | S_IRUGO,
+	reboot_config_show, reboot_config_store);
+static DEVICE_ATTR(shutdown_config, S_IWUSR | S_IRUGO,
+	shutdown_config_show, shutdown_config_store);
+static DEVICE_ATTR(reboot_ongoing, S_IWUSR,
+	NULL, reboot_ongoing_store);
+static DEVICE_ATTR(shutdown_ongoing, S_IWUSR,
+	NULL, shutdown_ongoing_store);
+
+/* Reset counter watchdog entry */
+static DEVICE_ATTR(counter, S_IWUSR | S_IRUGO,
+	counter_show, counter_store);
+
+
+int create_watchdog_sysfs_files(void)
+{
+	int ret;
+
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_disable);
+	if (ret) {
+		pr_warn("cant register dev file for disable\n");
+		return ret;
+	}
+#endif
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_normal_config);
+	if (ret) {
+		pr_warn("cant register dev file for normal_config\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_reboot_config);
+	if (ret) {
+		pr_warn("cant register dev file for reboot_config\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_shutdown_config);
+	if (ret) {
+		pr_warn("cant register dev file for shutdown_config\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_counter);
+	if (ret) {
+		pr_warn("cant register dev file for counter\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_reboot_ongoing);
+	if (ret) {
+		pr_warn("cant register dev file for reboot_ongoing\n");
+		return ret;
+	}
+
+	ret = device_create_file(watchdog_device.miscdev.this_device,
+		&dev_attr_shutdown_ongoing);
+	if (ret) {
+		pr_warn("cant register dev file for shutdown_ongoing\n");
+		return ret;
+	}
+	return 0;
+}
+
+int remove_watchdog_sysfs_files(void)
+{
+#ifdef CONFIG_DISABLE_SCU_WATCHDOG
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_disable);
+#endif
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_normal_config);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_reboot_config);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_shutdown_config);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_counter);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_reboot_ongoing);
+
+	device_remove_file(watchdog_device.miscdev.this_device,
+		&dev_attr_shutdown_ongoing);
+	return 0;
+}
+
+static int handle_mrfl_dev_ioapic(int irq)
+{
+	int ret = 0;
+	int ioapic;
+	struct io_apic_irq_attr irq_attr;
+
+	ioapic = mp_find_ioapic(irq);
+	if (ioapic >= 0) {
+		irq_attr.ioapic = ioapic;
+		irq_attr.ioapic_pin = irq;
+		irq_attr.trigger = 1;
+		irq_attr.polarity = 0; /* Active high */
+		io_apic_set_pci_routing(NULL, irq, &irq_attr);
+	} else {
+		pr_warn("can not find interrupt %d in ioapic\n", irq);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/* Init code */
+static int intel_scu_watchdog_init(void)
+{
+	int ret = 0;
+
+	watchdog_device.normal_wd_action   = SCU_COLD_RESET_ON_TIMEOUT;
+	watchdog_device.reboot_wd_action   = SCU_COLD_RESET_ON_TIMEOUT;
+	watchdog_device.shutdown_wd_action = SCU_COLD_OFF_ON_TIMEOUT;
+
+#ifdef CONFIG_DEBUG_FS
+	watchdog_device.panic_reboot_notifier = false;
+#endif /* CONFIG_DEBUG_FS */
+
+	/* Initially, we are not in shutdown mode */
+	watchdog_device.shutdown_flag = false;
+
+	/* Since timeout can be set by MODULE_PARAM, need to reset pre_timeout */
+	pre_timeout = timeout-15;
+
+	/* Check timeouts boot parameter */
+	if (check_timeouts(pre_timeout, timeout)) {
+		pr_err("%s: Invalid timeouts\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Reboot notifier */
+	watchdog_device.reboot_notifier.notifier_call = reboot_notifier;
+	watchdog_device.reboot_notifier.priority = 1;
+	ret = register_reboot_notifier(&watchdog_device.reboot_notifier);
+	if (ret) {
+		pr_crit("cannot register reboot notifier %d\n", ret);
+		goto error_stop_timer;
+	}
+
+	/* Do not publish the watchdog device when disable (TO BE REMOVED) */
+	if (!disable_kernel_watchdog) {
+		watchdog_device.miscdev.minor = WATCHDOG_MINOR;
+		watchdog_device.miscdev.name = "watchdog";
+		watchdog_device.miscdev.fops = &intel_scu_fops;
+
+		ret = misc_register(&watchdog_device.miscdev);
+		if (ret) {
+			pr_crit("Cannot register miscdev %d err =%d\n",
+				WATCHDOG_MINOR, ret);
+			goto error_reboot_notifier;
+		}
+	}
+
+	/* MSI #12 handler to dump registers */
+	handle_mrfl_dev_ioapic(EXT_TIMER0_MSI);
+	ret = request_irq((unsigned int)EXT_TIMER0_MSI,
+		watchdog_warning_interrupt,
+		IRQF_SHARED|IRQF_NO_SUSPEND, "watchdog",
+		&watchdog_device);
+	if (ret) {
+		pr_err("error requesting warning irq %d\n",
+		       EXT_TIMER0_MSI);
+		pr_err("error value returned is %d\n", ret);
+		goto error_misc_register;
+	}
+
+#ifdef CONFIG_INTEL_SCU_SOFT_LOCKUP
+	init_timer(&softlock_timer);
+#endif
+
+	if (disable_kernel_watchdog) {
+		pr_err("%s: Disable kernel watchdog\n", __func__);
+
+		/* Make sure timer is stopped */
+		ret = watchdog_stop();
+		if (ret != 0)
+			pr_debug("cant disable timer\n");
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	ret = create_debugfs_entries();
+	if (ret) {
+		pr_err("%s: Error creating debugfs entries\n", __func__);
+		goto error_debugfs_entry;
+	}
+#endif
+
+	watchdog_device.started = false;
+
+	ret = create_watchdog_sysfs_files();
+	if (ret) {
+		pr_err("%s: Error creating debugfs entries\n", __func__);
+		goto error_sysfs_entry;
+	}
+
+	return ret;
+
+error_sysfs_entry:
+	/* Nothing special to do */
+#ifdef CONFIG_DEBUG_FS
+error_debugfs_entry:
+	/* Remove entries done by create function */
+#endif
+
+error_misc_register:
+	misc_deregister(&watchdog_device.miscdev);
+
+error_reboot_notifier:
+	unregister_reboot_notifier(&watchdog_device.reboot_notifier);
+
+error_stop_timer:
+	watchdog_stop();
+
+	return ret;
+}
+
+static void intel_scu_watchdog_exit(void)
+{
+	int ret = 0;
+
+	remove_watchdog_sysfs_files();
+#ifdef CONFIG_DEBUG_FS
+	remove_debugfs_entries();
+#endif
+
+#ifdef CONFIG_INTEL_SCU_SOFT_LOCKUP
+	del_timer_sync(&softlock_timer);
+#endif
+
+	ret = watchdog_stop();
+	if (ret != 0)
+		pr_err("cant disable timer\n");
+
+	misc_deregister(&watchdog_device.miscdev);
+	unregister_reboot_notifier(&watchdog_device.reboot_notifier);
+}
+
+static int watchdog_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed watchdog rpmsg device\n");
+
+	/* Allocate rpmsg instance for watchdog*/
+	ret = alloc_rpmsg_instance(rpdev, &watchdog_instance);
+	if (!watchdog_instance) {
+		dev_err(&rpdev->dev, "kzalloc watchdog instance failed\n");
+		goto out;
+	}
+	/* Initialize rpmsg instance */
+	init_rpmsg_instance(watchdog_instance);
+	/* Init scu watchdog */
+	ret = intel_scu_watchdog_init();
+
+	if (ret)
+		free_rpmsg_instance(rpdev, &watchdog_instance);
+out:
+	return ret;
+}
+
+static void watchdog_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	intel_scu_watchdog_exit();
+	free_rpmsg_instance(rpdev, &watchdog_instance);
+	dev_info(&rpdev->dev, "Removed watchdog rpmsg device\n");
+}
+
+static void watchdog_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+					int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+		       data, len,  true);
+}
+
+static struct rpmsg_device_id watchdog_rpmsg_id_table[] = {
+	{ .name	= "rpmsg_watchdog" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, watchdog_rpmsg_id_table);
+
+static int watchdog_rpmsg_suspend(struct device *dev)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	ret = watchdog_stop();
+	if (ret)
+		pr_err("cannot stop the watchdog\n");
+	return 0;
+}
+
+static int watchdog_rpmsg_resume(struct device *dev)
+{
+	int ret;
+
+	pr_debug("%s\n", __func__);
+	ret = watchdog_config_and_start(timeout, pre_timeout);
+	if (ret)
+		pr_err("cannot start the watchdog\n");
+	return 0;
+}
+
+static const struct dev_pm_ops wd_pm_ops = {
+	.suspend	= watchdog_rpmsg_suspend,
+	.resume		= watchdog_rpmsg_resume,
+};
+
+static struct rpmsg_driver watchdog_rpmsg = {
+	.id_table	= watchdog_rpmsg_id_table,
+	.probe		= watchdog_rpmsg_probe,
+	.callback	= watchdog_rpmsg_cb,
+	.remove		= watchdog_rpmsg_remove,
+	.drv 		= {
+			.name = KBUILD_MODNAME,
+			.owner = THIS_MODULE,
+			.pm	= &wd_pm_ops,
+		},
+};
+
+static int __init watchdog_rpmsg_init(void)
+{
+	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+		return register_rpmsg_driver(&watchdog_rpmsg);
+	else {
+		pr_err("%s: watchdog driver: bad platform\n", __func__);
+		return -ENODEV;
+	}
+}
+
+#ifdef MODULE
+module_init(watchdog_rpmsg_init);
+#else
+rootfs_initcall(watchdog_rpmsg_init);
+#endif
+
+static void __exit watchdog_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&watchdog_rpmsg);
+}
+module_exit(watchdog_rpmsg_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_AUTHOR("mark.a.allyn@intel.com");
+MODULE_AUTHOR("yannx.puech@intel.com");
+MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/intel_scu_watchdog_evo.h b/drivers/watchdog/intel_scu_watchdog_evo.h
new file mode 100644
index 0000000..2176974
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog_evo.h
@@ -0,0 +1,68 @@
+/*
+ *      Intel_SCU 0.3:  An Intel SCU IOH Based Watchdog Device
+ *			for Intel part #(s):
+ *				- AF82MP20 PCH
+ *
+ *      Copyright (C) 2009-2013 Intel Corporation. All rights reserved.
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of version 2 of the GNU General
+ *      Public License as published by the Free Software Foundation.
+ *
+ *      This program is distributed in the hope that it will be
+ *      useful, but WITHOUT ANY WARRANTY; without even the implied
+ *      warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ *      PURPOSE.  See the GNU General Public License for more details.
+ *      You should have received a copy of the GNU General Public
+ *      License along with this program; if not, write to the Free
+ *      Software Foundation, Inc., 59 Temple Place - Suite 330,
+ *      Boston, MA  02111-1307, USA.
+ *      The full GNU General Public License is included in this
+ *      distribution in the file called COPYING.
+ *
+ */
+
+#ifndef __INTEL_SCU_WATCHDOG_H
+#define __INTEL_SCU_WATCHDOG_H
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+
+#define PFX "intel_scu_watchdog: "
+#define WDT_VER "0.3"
+
+#define DEFAULT_PRETIMEOUT 75
+#define DEFAULT_TIMEOUT 90
+
+/* Value 0 to reset the reset counter */
+#define OSNIB_WRITE_VALUE 0
+
+struct intel_scu_watchdog_dev {
+	ulong driver_open;
+	ulong driver_closed;
+	bool started;
+	struct notifier_block reboot_notifier;
+	struct miscdevice miscdev;
+	bool shutdown_flag;
+	int reset_type;
+	int normal_wd_action;
+	int reboot_wd_action;
+	int shutdown_wd_action;
+#ifdef CONFIG_DEBUG_FS
+	bool panic_reboot_notifier;
+	struct dentry *dfs_wd;
+	struct dentry *dfs_secwd;
+	struct dentry *dfs_secwd_trigger;
+	struct dentry *dfs_kwd;
+	struct dentry *dfs_kwd_trigger;
+	struct dentry *dfs_kwd_reset_type;
+	struct dentry *dfs_kwd_panic_reboot;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+#endif /* __INTEL_SCU_WATCHDOG_H */
diff --git a/firmware/Makefile b/firmware/Makefile
index cbb09ce..6e1dca7 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -135,6 +135,7 @@
 fw-shipped-$(CONFIG_USB_VICAM) += vicam/firmware.fw
 fw-shipped-$(CONFIG_VIDEO_CPIA2) += cpia2/stv0672_vp4.bin
 fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
+fw-shipped-$(CONFIG_INTEL_MID_REMOTEPROC) += intel_mid/intel_mid_remoteproc.fw
 
 fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
 
diff --git a/firmware/intel_mid/intel_mid_remoteproc.fw.ihex b/firmware/intel_mid/intel_mid_remoteproc.fw.ihex
new file mode 100644
index 0000000..998c4f2
--- /dev/null
+++ b/firmware/intel_mid/intel_mid_remoteproc.fw.ihex
@@ -0,0 +1,280 @@
+:100000007F454C4601010100000000000000000097
+:100010000200030001000000240000003400000082
+:10002000F010000000000000340020000100280053
+:1000300003000200040000000000000000000000B7
+:1000400000000000000000000000000007000000A9
+:10005000040000000000000000000000000000009C
+:100060000000000000000000000000000000000090
+:100070000000000000000000000000000000000080
+:100080000000000000000000000000000000000070
+:100090000000000000000000000000000000000060
+:1000A0000000000000000000000000000000000050
+:1000B0000000000000000000000000000000000040
+:1000C0000000000000000000000000000000000030
+:1000D0000000000000000000000000000000000020
+:1000E0000000000000000000000000000000000010
+:1000F0000000000000000000000000000000000000
+:1001000000000000000000000000000000000000EF
+:1001100000000000000000000000000000000000DF
+:1001200000000000000000000000000000000000CF
+:1001300000000000000000000000000000000000BF
+:1001400000000000000000000000000000000000AF
+:10015000000000000000000000000000000000009F
+:10016000000000000000000000000000000000008F
+:10017000000000000000000000000000000000007F
+:10018000000000000000000000000000000000006F
+:10019000000000000000000000000000000000005F
+:1001A000000000000000000000000000000000004F
+:1001B000000000000000000000000000000000003F
+:1001C000000000000000000000000000000000002F
+:1001D000000000000000000000000000000000001F
+:1001E000000000000000000000000000000000000F
+:1001F00000000000000000000000000000000000FF
+:1002000000000000000000000000000000000000EE
+:1002100000000000000000000000000000000000DE
+:1002200000000000000000000000000000000000CE
+:1002300000000000000000000000000000000000BE
+:1002400000000000000000000000000000000000AE
+:10025000000000000000000000000000000000009E
+:10026000000000000000000000000000000000008E
+:10027000000000000000000000000000000000007E
+:10028000000000000000000000000000000000006E
+:10029000000000000000000000000000000000005E
+:1002A000000000000000000000000000000000004E
+:1002B000000000000000000000000000000000003E
+:1002C000000000000000000000000000000000002E
+:1002D000000000000000000000000000000000001E
+:1002E000000000000000000000000000000000000E
+:1002F00000000000000000000000000000000000FE
+:1003000000000000000000000000000000000000ED
+:1003100000000000000000000000000000000000DD
+:1003200000000000000000000000000000000000CD
+:1003300000000000000000000000000000000000BD
+:1003400000000000000000000000000000000000AD
+:10035000000000000000000000000000000000009D
+:10036000000000000000000000000000000000008D
+:10037000000000000000000000000000000000007D
+:10038000000000000000000000000000000000006D
+:10039000000000000000000000000000000000005D
+:1003A000000000000000000000000000000000004D
+:1003B000000000000000000000000000000000003D
+:1003C000000000000000000000000000000000002D
+:1003D000000000000000000000000000000000001D
+:1003E000000000000000000000000000000000000D
+:1003F00000000000000000000000000000000000FD
+:1004000000000000000000000000000000000000EC
+:1004100000000000000000000000000000000000DC
+:1004200000000000000000000000000000000000CC
+:1004300000000000000000000000000000000000BC
+:1004400000000000000000000000000000000000AC
+:10045000000000000000000000000000000000009C
+:10046000000000000000000000000000000000008C
+:10047000000000000000000000000000000000007C
+:10048000000000000000000000000000000000006C
+:10049000000000000000000000000000000000005C
+:1004A000000000000000000000000000000000004C
+:1004B000000000000000000000000000000000003C
+:1004C000000000000000000000000000000000002C
+:1004D000000000000000000000000000000000001C
+:1004E000000000000000000000000000000000000C
+:1004F00000000000000000000000000000000000FC
+:1005000000000000000000000000000000000000EB
+:1005100000000000000000000000000000000000DB
+:1005200000000000000000000000000000000000CB
+:1005300000000000000000000000000000000000BB
+:1005400000000000000000000000000000000000AB
+:10055000000000000000000000000000000000009B
+:10056000000000000000000000000000000000008B
+:10057000000000000000000000000000000000007B
+:10058000000000000000000000000000000000006B
+:10059000000000000000000000000000000000005B
+:1005A000000000000000000000000000000000004B
+:1005B000000000000000000000000000000000003B
+:1005C000000000000000000000000000000000002B
+:1005D000000000000000000000000000000000001B
+:1005E000000000000000000000000000000000000B
+:1005F00000000000000000000000000000000000FB
+:1006000000000000000000000000000000000000EA
+:1006100000000000000000000000000000000000DA
+:1006200000000000000000000000000000000000CA
+:1006300000000000000000000000000000000000BA
+:1006400000000000000000000000000000000000AA
+:10065000000000000000000000000000000000009A
+:10066000000000000000000000000000000000008A
+:10067000000000000000000000000000000000007A
+:10068000000000000000000000000000000000006A
+:10069000000000000000000000000000000000005A
+:1006A000000000000000000000000000000000004A
+:1006B000000000000000000000000000000000003A
+:1006C000000000000000000000000000000000002A
+:1006D000000000000000000000000000000000001A
+:1006E000000000000000000000000000000000000A
+:1006F00000000000000000000000000000000000FA
+:1007000000000000000000000000000000000000E9
+:1007100000000000000000000000000000000000D9
+:1007200000000000000000000000000000000000C9
+:1007300000000000000000000000000000000000B9
+:1007400000000000000000000000000000000000A9
+:100750000000000000000000000000000000000099
+:100760000000000000000000000000000000000089
+:100770000000000000000000000000000000000079
+:100780000000000000000000000000000000000069
+:100790000000000000000000000000000000000059
+:1007A0000000000000000000000000000000000049
+:1007B0000000000000000000000000000000000039
+:1007C0000000000000000000000000000000000029
+:1007D0000000000000000000000000000000000019
+:1007E0000000000000000000000000000000000009
+:1007F00000000000000000000000000000000000F9
+:1008000000000000000000000000000000000000E8
+:1008100000000000000000000000000000000000D8
+:1008200000000000000000000000000000000000C8
+:1008300000000000000000000000000000000000B8
+:1008400000000000000000000000000000000000A8
+:100850000000000000000000000000000000000098
+:100860000000000000000000000000000000000088
+:100870000000000000000000000000000000000078
+:100880000000000000000000000000000000000068
+:100890000000000000000000000000000000000058
+:1008A0000000000000000000000000000000000048
+:1008B0000000000000000000000000000000000038
+:1008C0000000000000000000000000000000000028
+:1008D0000000000000000000000000000000000018
+:1008E0000000000000000000000000000000000008
+:1008F00000000000000000000000000000000000F8
+:1009000000000000000000000000000000000000E7
+:1009100000000000000000000000000000000000D7
+:1009200000000000000000000000000000000000C7
+:1009300000000000000000000000000000000000B7
+:1009400000000000000000000000000000000000A7
+:100950000000000000000000000000000000000097
+:100960000000000000000000000000000000000087
+:100970000000000000000000000000000000000077
+:100980000000000000000000000000000000000067
+:100990000000000000000000000000000000000057
+:1009A0000000000000000000000000000000000047
+:1009B0000000000000000000000000000000000037
+:1009C0000000000000000000000000000000000027
+:1009D0000000000000000000000000000000000017
+:1009E0000000000000000000000000000000000007
+:1009F00000000000000000000000000000000000F7
+:100A000000000000000000000000000000000000E6
+:100A100000000000000000000000000000000000D6
+:100A200000000000000000000000000000000000C6
+:100A300000000000000000000000000000000000B6
+:100A400000000000000000000000000000000000A6
+:100A50000000000000000000000000000000000096
+:100A60000000000000000000000000000000000086
+:100A70000000000000000000000000000000000076
+:100A80000000000000000000000000000000000066
+:100A90000000000000000000000000000000000056
+:100AA0000000000000000000000000000000000046
+:100AB0000000000000000000000000000000000036
+:100AC0000000000000000000000000000000000026
+:100AD0000000000000000000000000000000000016
+:100AE0000000000000000000000000000000000006
+:100AF00000000000000000000000000000000000F6
+:100B000000000000000000000000000000000000E5
+:100B100000000000000000000000000000000000D5
+:100B200000000000000000000000000000000000C5
+:100B300000000000000000000000000000000000B5
+:100B400000000000000000000000000000000000A5
+:100B50000000000000000000000000000000000095
+:100B60000000000000000000000000000000000085
+:100B70000000000000000000000000000000000075
+:100B80000000000000000000000000000000000065
+:100B90000000000000000000000000000000000055
+:100BA0000000000000000000000000000000000045
+:100BB0000000000000000000000000000000000035
+:100BC0000000000000000000000000000000000025
+:100BD0000000000000000000000000000000000015
+:100BE0000000000000000000000000000000000005
+:100BF00000000000000000000000000000000000F5
+:100C000000000000000000000000000000000000E4
+:100C100000000000000000000000000000000000D4
+:100C200000000000000000000000000000000000C4
+:100C300000000000000000000000000000000000B4
+:100C400000000000000000000000000000000000A4
+:100C50000000000000000000000000000000000094
+:100C60000000000000000000000000000000000084
+:100C70000000000000000000000000000000000074
+:100C80000000000000000000000000000000000064
+:100C90000000000000000000000000000000000054
+:100CA0000000000000000000000000000000000044
+:100CB0000000000000000000000000000000000034
+:100CC0000000000000000000000000000000000024
+:100CD0000000000000000000000000000000000014
+:100CE0000000000000000000000000000000000004
+:100CF00000000000000000000000000000000000F4
+:100D000000000000000000000000000000000000E3
+:100D100000000000000000000000000000000000D3
+:100D200000000000000000000000000000000000C3
+:100D300000000000000000000000000000000000B3
+:100D400000000000000000000000000000000000A3
+:100D50000000000000000000000000000000000093
+:100D60000000000000000000000000000000000083
+:100D70000000000000000000000000000000000073
+:100D80000000000000000000000000000000000063
+:100D90000000000000000000000000000000000053
+:100DA0000000000000000000000000000000000043
+:100DB0000000000000000000000000000000000033
+:100DC0000000000000000000000000000000000023
+:100DD0000000000000000000000000000000000013
+:100DE0000000000000000000000000000000000003
+:100DF00000000000000000000000000000000000F3
+:100E000000000000000000000000000000000000E2
+:100E100000000000000000000000000000000000D2
+:100E200000000000000000000000000000000000C2
+:100E300000000000000000000000000000000000B2
+:100E400000000000000000000000000000000000A2
+:100E50000000000000000000000000000000000092
+:100E60000000000000000000000000000000000082
+:100E70000000000000000000000000000000000072
+:100E80000000000000000000000000000000000062
+:100E90000000000000000000000000000000000052
+:100EA0000000000000000000000000000000000042
+:100EB0000000000000000000000000000000000032
+:100EC0000000000000000000000000000000000022
+:100ED0000000000000000000000000000000000012
+:100EE0000000000000000000000000000000000002
+:100EF00000000000000000000000000000000000F2
+:100F000000000000000000000000000000000000E1
+:100F100000000000000000000000000000000000D1
+:100F200000000000000000000000000000000000C1
+:100F300000000000000000000000000000000000B1
+:100F400000000000000000000000000000000000A1
+:100F50000000000000000000000000000000000091
+:100F60000000000000000000000000000000000081
+:100F70000000000000000000000000000000000071
+:100F80000000000000000000000000000000000061
+:100F90000000000000000000000000000000000051
+:100FA0000000000000000000000000000000000041
+:100FB0000000000000000000000000000000000031
+:100FC0000000000000000000000000000000000021
+:100FD0000000000000000000000000000000000011
+:100FE0000000000000000000000000000000000001
+:100FF00000000000000000000000000000000000F1
+:1010000000000000000000000000000000000000E0
+:1010100000000000000000000000000000000000D0
+:1010200000000000000000000000000000000000C0
+:1010300000000000000000000000000000000000B0
+:10104000010000000200000000000000000000009D
+:10105000180000005C000000030000000700000012
+:10106000010000000100000001000000000000007D
+:10107000000200000000000001000000000100006C
+:10108000000000000000000000000000010000005F
+:10109000000100000000000000000000000000004F
+:1010A000000000000000000000020000000000003E
+:1010B0000000000000000000000000000000000030
+:1010C0000000000000000000000000000000000020
+:1010D00000000000002E7368737472746162002E49
+:1010E0007265736F757263655F7461626C65000031
+:1010F00000000000000000000000000000000000F0
+:1011000000000000000000000000000000000000DF
+:1011100000000000000000000B00000007000000BD
+:101120000300000040000000401000009400000098
+:10113000000000000000000020000000000000008F
+:10114000010000000300000000000000000000009B
+:10115000D41000001B000000000000000000000090
+:08116000010000000000000086
+:00000001FF
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 055562c..9ff073f 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -148,13 +148,14 @@
  * @offset: offset in the page
  */
 
-static void v9fs_invalidate_page(struct page *page, unsigned long offset)
+static void v9fs_invalidate_page(struct page *page, unsigned int offset,
+				 unsigned int length)
 {
 	/*
 	 * If called with zero offset, we should release
 	 * the private state assocated with the page
 	 */
-	if (offset == 0)
+	if (offset == 0 && length == PAGE_CACHE_SIZE)
 		v9fs_fscache_invalidate_page(page);
 }
 
diff --git a/fs/Kconfig b/fs/Kconfig
index c229f82..056db61 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -193,6 +193,7 @@
 source "fs/befs/Kconfig"
 source "fs/bfs/Kconfig"
 source "fs/efs/Kconfig"
+source "fs/yaffs2/Kconfig"
 source "fs/jffs2/Kconfig"
 # UBIFS File system configuration
 source "fs/ubifs/Kconfig"
@@ -212,6 +213,7 @@
 source "fs/exofs/Kconfig"
 source "fs/f2fs/Kconfig"
 source "fs/efivarfs/Kconfig"
+source "fs/aufs/Kconfig"
 
 endif # MISC_FILESYSTEMS
 
diff --git a/fs/Makefile b/fs/Makefile
index 4fe6df3..04e97df 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -126,3 +126,5 @@
 obj-$(CONFIG_CEPH_FS)		+= ceph/
 obj-$(CONFIG_PSTORE)		+= pstore/
 obj-$(CONFIG_EFIVAR_FS)		+= efivarfs/
+obj-$(CONFIG_AUFS_FS)           += aufs/
+obj-$(CONFIG_YAFFS_FS)		+= yaffs2/
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 0ff4bae..479ef5a 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -213,6 +213,7 @@
 
 static int adfs_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_NODIRATIME;
 	return parse_options(sb, data);
 }
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 45161a8..01c6e07 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -549,6 +549,7 @@
 
 	pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
 
+	sync_filesystem(sb);
 	*flags |= MS_NODIRATIME;
 
 	memcpy(volume, sbi->s_volume, 32);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 8f6e923..66d50fe 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -19,7 +19,8 @@
 #include "internal.h"
 
 static int afs_readpage(struct file *file, struct page *page);
-static void afs_invalidatepage(struct page *page, unsigned long offset);
+static void afs_invalidatepage(struct page *page, unsigned int offset,
+			       unsigned int length);
 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
 static int afs_launder_page(struct page *page);
 
@@ -310,16 +311,17 @@
  * - release a page and clean up its private data if offset is 0 (indicating
  *   the entire page)
  */
-static void afs_invalidatepage(struct page *page, unsigned long offset)
+static void afs_invalidatepage(struct page *page, unsigned int offset,
+			       unsigned int length)
 {
 	struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
 
-	_enter("{%lu},%lu", page->index, offset);
+	_enter("{%lu},%u,%u", page->index, offset, length);
 
 	BUG_ON(!PageLocked(page));
 
 	/* we clean up only if the entire page is being invalidated */
-	if (offset == 0) {
+	if (offset == 0 && length == PAGE_CACHE_SIZE) {
 #ifdef CONFIG_AFS_FSCACHE
 		if (PageFsCache(page)) {
 			struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
diff --git a/fs/aufs/Kconfig b/fs/aufs/Kconfig
new file mode 100644
index 0000000..c648d40
--- /dev/null
+++ b/fs/aufs/Kconfig
@@ -0,0 +1,202 @@
+config AUFS_FS
+	tristate "Aufs (Advanced multi layered unification filesystem) support"
+	help
+	Aufs is a stackable unification filesystem such as Unionfs,
+	which unifies several directories and provides a merged single
+	directory.
+	In the early days, aufs was entirely re-designed and
+	re-implemented Unionfs Version 1.x series. Introducing many
+	original ideas, approaches and improvements, it becomes totally
+	different from Unionfs while keeping the basic features.
+
+if AUFS_FS
+choice
+	prompt "Maximum number of branches"
+	default AUFS_BRANCH_MAX_127
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_127
+	bool "127"
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_511
+	bool "511"
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_1023
+	bool "1023"
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_32767
+	bool "32767"
+	help
+	Specifies the maximum number of branches (or member directories)
+	in a single aufs. The larger value consumes more system
+	resources and has a minor impact to performance.
+endchoice
+
+config AUFS_SBILIST
+	bool
+	depends on AUFS_MAGIC_SYSRQ || PROC_FS
+	default y
+	help
+	Automatic configuration for internal use.
+	When aufs supports Magic SysRq or /proc, enabled automatically.
+
+config AUFS_HNOTIFY
+	bool "Detect direct branch access (bypassing aufs)"
+	help
+	If you want to modify files on branches directly, eg. bypassing aufs,
+	and want aufs to detect the changes of them fully, then enable this
+	option and use 'udba=notify' mount option.
+	Currently there is only one available configuration, "fsnotify".
+	It will have a negative impact to the performance.
+	See detail in aufs.5.
+
+choice
+	prompt "method" if AUFS_HNOTIFY
+	default AUFS_HFSNOTIFY
+config AUFS_HFSNOTIFY
+	bool "fsnotify"
+	select FSNOTIFY
+endchoice
+
+config AUFS_EXPORT
+	bool "NFS-exportable aufs"
+	depends on EXPORTFS
+	help
+	If you want to export your mounted aufs via NFS, then enable this
+	option. There are several requirements for this configuration.
+	See detail in aufs.5.
+
+config AUFS_INO_T_64
+	bool
+	depends on AUFS_EXPORT
+	depends on 64BIT && !(ALPHA || S390)
+	default y
+	help
+	Automatic configuration for internal use.
+	/* typedef unsigned long/int __kernel_ino_t */
+	/* alpha and s390x are int */
+
+config AUFS_RDU
+	bool "Readdir in userspace"
+	help
+	Aufs has two methods to provide a merged view for a directory,
+	by a user-space library and by kernel-space natively. The latter
+	is always enabled but sometimes large and slow.
+	If you enable this option, install the library in aufs2-util
+	package, and set some environment variables for your readdir(3),
+	then the work will be handled in user-space which generally
+	shows better performance in most cases.
+	See detail in aufs.5.
+
+config AUFS_PROC_MAP
+	bool "support for /proc/maps and lsof(1)"
+	depends on PROC_FS
+	help
+	When you issue mmap(2) in aufs, it is actually a direct mmap(2)
+	call to the file on the branch fs since the file in aufs is
+	purely virtual. And the file path printed in /proc/maps (and
+	others) will be the path on the branch fs. In most cases, it
+	does no harm. But some utilities like lsof(1) may confuse since
+	the utility or user may expect the file path in aufs to be
+	printed.
+	To address this issue, aufs provides a patch which introduces a
+	new member called vm_prfile into struct vm_are_struct. The patch
+	is meaningless without enabling this configuration since nobody
+	sets the new vm_prfile member.
+	If you don't apply the patch, then enabling this configuration
+	will cause a compile error.
+	This approach is fragile since if someone else make some changes
+	around vm_file, then vm_prfile may not work anymore. As a
+	workaround such case, aufs provides this configuration. If you
+	disable it, then lsof(1) may produce incorrect result but the
+	problem will be gone even if the aufs patch is applied (I hope).
+
+config AUFS_SP_IATTR
+	bool "Respect the attributes (mtime/ctime mainly) of special files"
+	help
+	When you write something to a special file, some attributes of it
+	(mtime/ctime mainly) may be updated. Generally such updates are
+	less important (actually some device drivers and NFS ignore
+	it). But some applications (such like test program) requires
+	such updates. If you need these updates, then enable this
+	configuration which introduces some overhead.
+	Currently this configuration handles FIFO only.
+
+config AUFS_SHWH
+	bool "Show whiteouts"
+	help
+	If you want to make the whiteouts in aufs visible, then enable
+	this option and specify 'shwh' mount option. Although it may
+	sounds like philosophy or something, but in technically it
+	simply shows the name of whiteout with keeping its behaviour.
+
+config AUFS_BR_RAMFS
+	bool "Ramfs (initramfs/rootfs) as an aufs branch"
+	help
+	If you want to use ramfs as an aufs branch fs, then enable this
+	option. Generally tmpfs is recommended.
+	Aufs prohibited them to be a branch fs by default, because
+	initramfs becomes unusable after switch_root or something
+	generally. If you sets initramfs as an aufs branch and boot your
+	system by switch_root, you will meet a problem easily since the
+	files in initramfs may be inaccessible.
+	Unless you are going to use ramfs as an aufs branch fs without
+	switch_root or something, leave it N.
+
+config AUFS_BR_FUSE
+	bool "Fuse fs as an aufs branch"
+	depends on FUSE_FS
+	select AUFS_POLL
+	help
+	If you want to use fuse-based userspace filesystem as an aufs
+	branch fs, then enable this option.
+	It implements the internal poll(2) operation which is
+	implemented by fuse only (curretnly).
+
+config AUFS_POLL
+	bool
+	help
+	Automatic configuration for internal use.
+
+config AUFS_BR_HFSPLUS
+	bool "Hfsplus as an aufs branch"
+	depends on HFSPLUS_FS
+	default y
+	help
+	If you want to use hfsplus fs as an aufs branch fs, then enable
+	this option. This option introduces a small overhead at
+	copying-up a file on hfsplus.
+
+config AUFS_BDEV_LOOP
+	bool
+	depends on BLK_DEV_LOOP
+	default y
+	help
+	Automatic configuration for internal use.
+	Convert =[ym] into =y.
+
+config AUFS_DEBUG
+	bool "Debug aufs"
+	help
+	Enable this to compile aufs internal debug code.
+	It will have a negative impact to the performance.
+
+config AUFS_MAGIC_SYSRQ
+	bool
+	depends on AUFS_DEBUG && MAGIC_SYSRQ
+	default y
+	help
+	Automatic configuration for internal use.
+	When aufs supports Magic SysRq, enabled automatically.
+endif
diff --git a/fs/aufs/Makefile b/fs/aufs/Makefile
new file mode 100644
index 0000000..500d6a6
--- /dev/null
+++ b/fs/aufs/Makefile
@@ -0,0 +1,42 @@
+
+include ${src}/magic.mk
+ifeq (${CONFIG_AUFS_FS},m)
+include ${src}/conf.mk
+endif
+-include ${src}/priv_def.mk
+
+# cf. include/linux/kernel.h
+# enable pr_debug
+ccflags-y += -DDEBUG
+# sparse requires the full pathname
+ifdef M
+ccflags-y += -include ${M}/../../include/linux/aufs_type.h
+else
+ccflags-y += -include ${srctree}/include/linux/aufs_type.h
+endif
+
+obj-$(CONFIG_AUFS_FS) += aufs.o
+aufs-y := module.o sbinfo.o super.o branch.o xino.o sysaufs.o opts.o \
+	wkq.o vfsub.o dcsub.o \
+	cpup.o whout.o wbr_policy.o \
+	dinfo.o dentry.o \
+	dynop.o \
+	finfo.o file.o f_op.o \
+	dir.o vdir.o \
+	iinfo.o inode.o i_op.o i_op_add.o i_op_del.o i_op_ren.o \
+	mvdown.o ioctl.o
+
+# all are boolean
+aufs-$(CONFIG_PROC_FS) += procfs.o plink.o
+aufs-$(CONFIG_SYSFS) += sysfs.o
+aufs-$(CONFIG_DEBUG_FS) += dbgaufs.o
+aufs-$(CONFIG_AUFS_BDEV_LOOP) += loop.o
+aufs-$(CONFIG_AUFS_HNOTIFY) += hnotify.o
+aufs-$(CONFIG_AUFS_HFSNOTIFY) += hfsnotify.o
+aufs-$(CONFIG_AUFS_EXPORT) += export.o
+aufs-$(CONFIG_AUFS_POLL) += poll.o
+aufs-$(CONFIG_AUFS_RDU) += rdu.o
+aufs-$(CONFIG_AUFS_SP_IATTR) += f_op_sp.o
+aufs-$(CONFIG_AUFS_BR_HFSPLUS) += hfsplus.o
+aufs-$(CONFIG_AUFS_DEBUG) += debug.o
+aufs-$(CONFIG_AUFS_MAGIC_SYSRQ) += sysrq.o
diff --git a/fs/aufs/aufs.h b/fs/aufs/aufs.h
new file mode 100644
index 0000000..8adb4f2
--- /dev/null
+++ b/fs/aufs/aufs.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * all header files
+ */
+
+#ifndef __AUFS_H__
+#define __AUFS_H__
+
+#ifdef __KERNEL__
+
+#define AuStub(type, name, body, ...) \
+	static inline type name(__VA_ARGS__) { body; }
+
+#define AuStubVoid(name, ...) \
+	AuStub(void, name, , __VA_ARGS__)
+#define AuStubInt0(name, ...) \
+	AuStub(int, name, return 0, __VA_ARGS__)
+
+#include "debug.h"
+
+#include "branch.h"
+#include "cpup.h"
+#include "dcsub.h"
+#include "dbgaufs.h"
+#include "dentry.h"
+#include "dir.h"
+#include "dynop.h"
+#include "file.h"
+#include "fstype.h"
+#include "inode.h"
+#include "loop.h"
+#include "module.h"
+#include "opts.h"
+#include "rwsem.h"
+#include "spl.h"
+#include "super.h"
+#include "sysaufs.h"
+#include "vfsub.h"
+#include "whout.h"
+#include "wkq.h"
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_H__ */
diff --git a/fs/aufs/branch.c b/fs/aufs/branch.c
new file mode 100644
index 0000000..5819809
--- /dev/null
+++ b/fs/aufs/branch.c
@@ -0,0 +1,1213 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * branch management
+ */
+
+#include <linux/compat.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/*
+ * free a single branch
+ */
+
+/* prohibit rmdir to the root of the branch */
+/* todo: another new flag? */
+static void au_br_dflags_force(struct au_branch *br)
+{
+	struct dentry *h_dentry;
+
+	h_dentry = au_br_dentry(br);
+	spin_lock(&h_dentry->d_lock);
+	br->br_dflags = h_dentry->d_flags & DCACHE_MOUNTED;
+	h_dentry->d_flags |= DCACHE_MOUNTED;
+	spin_unlock(&h_dentry->d_lock);
+}
+
+/* restore its d_flags */
+static void au_br_dflags_restore(struct au_branch *br)
+{
+	struct dentry *h_dentry;
+
+	if (br->br_dflags)
+		return;
+
+	h_dentry = au_br_dentry(br);
+	spin_lock(&h_dentry->d_lock);
+	h_dentry->d_flags &= ~DCACHE_MOUNTED;
+	spin_unlock(&h_dentry->d_lock);
+}
+
+static void au_br_do_free(struct au_branch *br)
+{
+	int i;
+	struct au_wbr *wbr;
+	struct au_dykey **key;
+
+	au_hnotify_fin_br(br);
+
+	if (br->br_xino.xi_file)
+		fput(br->br_xino.xi_file);
+	mutex_destroy(&br->br_xino.xi_nondir_mtx);
+
+	AuDebugOn(atomic_read(&br->br_count));
+
+	wbr = br->br_wbr;
+	if (wbr) {
+		for (i = 0; i < AuBrWh_Last; i++)
+			dput(wbr->wbr_wh[i]);
+		AuDebugOn(atomic_read(&wbr->wbr_wh_running));
+		AuRwDestroy(&wbr->wbr_wh_rwsem);
+	}
+
+	key = br->br_dykey;
+	for (i = 0; i < AuBrDynOp; i++, key++)
+		if (*key)
+			au_dy_put(*key);
+		else
+			break;
+
+	au_br_dflags_restore(br);
+
+	/* recursive lock, s_umount of branch's */
+	lockdep_off();
+	path_put(&br->br_path);
+	lockdep_on();
+	kfree(wbr);
+	kfree(br);
+}
+
+/*
+ * frees all branches
+ */
+void au_br_free(struct au_sbinfo *sbinfo)
+{
+	aufs_bindex_t bmax;
+	struct au_branch **br;
+
+	AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+	bmax = sbinfo->si_bend + 1;
+	br = sbinfo->si_branch;
+	while (bmax--)
+		au_br_do_free(*br++);
+}
+
+/*
+ * find the index of a branch which is specified by @br_id.
+ */
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id)
+{
+	aufs_bindex_t bindex, bend;
+
+	bend = au_sbend(sb);
+	for (bindex = 0; bindex <= bend; bindex++)
+		if (au_sbr_id(sb, bindex) == br_id)
+			return bindex;
+	return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * add a branch
+ */
+
+static int test_overlap(struct super_block *sb, struct dentry *h_adding,
+			struct dentry *h_root)
+{
+	if (unlikely(h_adding == h_root
+		     || au_test_loopback_overlap(sb, h_adding)))
+		return 1;
+	if (h_adding->d_sb != h_root->d_sb)
+		return 0;
+	return au_test_subdir(h_adding, h_root)
+		|| au_test_subdir(h_root, h_adding);
+}
+
+/*
+ * returns a newly allocated branch. @new_nbranch is a number of branches
+ * after adding a branch.
+ */
+static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch,
+				     int perm)
+{
+	struct au_branch *add_branch;
+	struct dentry *root;
+	int err;
+
+	err = -ENOMEM;
+	root = sb->s_root;
+	add_branch = kmalloc(sizeof(*add_branch), GFP_NOFS);
+	if (unlikely(!add_branch))
+		goto out;
+
+	err = au_hnotify_init_br(add_branch, perm);
+	if (unlikely(err))
+		goto out_br;
+
+	add_branch->br_wbr = NULL;
+	if (au_br_writable(perm)) {
+		/* may be freed separately at changing the branch permission */
+		add_branch->br_wbr = kmalloc(sizeof(*add_branch->br_wbr),
+					     GFP_NOFS);
+		if (unlikely(!add_branch->br_wbr))
+			goto out_hnotify;
+	}
+
+	err = au_sbr_realloc(au_sbi(sb), new_nbranch);
+	if (!err)
+		err = au_di_realloc(au_di(root), new_nbranch);
+	if (!err)
+		err = au_ii_realloc(au_ii(root->d_inode), new_nbranch);
+	if (!err)
+		return add_branch; /* success */
+
+	kfree(add_branch->br_wbr);
+
+out_hnotify:
+	au_hnotify_fin_br(add_branch);
+out_br:
+	kfree(add_branch);
+out:
+	return ERR_PTR(err);
+}
+
+/*
+ * test if the branch permission is legal or not.
+ */
+static int test_br(struct inode *inode, int brperm, char *path)
+{
+	int err;
+
+	err = (au_br_writable(brperm) && IS_RDONLY(inode));
+	if (!err)
+		goto out;
+
+	err = -EINVAL;
+	pr_err("write permission for readonly mount or inode, %s\n", path);
+
+out:
+	return err;
+}
+
+/*
+ * returns:
+ * 0: success, the caller will add it
+ * plus: success, it is already unified, the caller should ignore it
+ * minus: error
+ */
+static int test_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+	int err;
+	aufs_bindex_t bend, bindex;
+	struct dentry *root;
+	struct inode *inode, *h_inode;
+
+	root = sb->s_root;
+	bend = au_sbend(sb);
+	if (unlikely(bend >= 0
+		     && au_find_dbindex(root, add->path.dentry) >= 0)) {
+		err = 1;
+		if (!remount) {
+			err = -EINVAL;
+			pr_err("%s duplicated\n", add->pathname);
+		}
+		goto out;
+	}
+
+	err = -ENOSPC; /* -E2BIG; */
+	if (unlikely(AUFS_BRANCH_MAX <= add->bindex
+		     || AUFS_BRANCH_MAX - 1 <= bend)) {
+		pr_err("number of branches exceeded %s\n", add->pathname);
+		goto out;
+	}
+
+	err = -EDOM;
+	if (unlikely(add->bindex < 0 || bend + 1 < add->bindex)) {
+		pr_err("bad index %d\n", add->bindex);
+		goto out;
+	}
+
+	inode = add->path.dentry->d_inode;
+	err = -ENOENT;
+	if (unlikely(!inode->i_nlink)) {
+		pr_err("no existence %s\n", add->pathname);
+		goto out;
+	}
+
+	err = -EINVAL;
+	if (unlikely(inode->i_sb == sb)) {
+		pr_err("%s must be outside\n", add->pathname);
+		goto out;
+	}
+
+	if (unlikely(au_test_fs_unsuppoted(inode->i_sb))) {
+		pr_err("unsupported filesystem, %s (%s)\n",
+		       add->pathname, au_sbtype(inode->i_sb));
+		goto out;
+	}
+
+	err = test_br(add->path.dentry->d_inode, add->perm, add->pathname);
+	if (unlikely(err))
+		goto out;
+
+	if (bend < 0)
+		return 0; /* success */
+
+	err = -EINVAL;
+	for (bindex = 0; bindex <= bend; bindex++)
+		if (unlikely(test_overlap(sb, add->path.dentry,
+					  au_h_dptr(root, bindex)))) {
+			pr_err("%s is overlapped\n", add->pathname);
+			goto out;
+		}
+
+	err = 0;
+	if (au_opt_test(au_mntflags(sb), WARN_PERM)) {
+		h_inode = au_h_dptr(root, 0)->d_inode;
+		if ((h_inode->i_mode & S_IALLUGO) != (inode->i_mode & S_IALLUGO)
+		    || !uid_eq(h_inode->i_uid, inode->i_uid)
+		    || !gid_eq(h_inode->i_gid, inode->i_gid))
+			pr_warn("uid/gid/perm %s %u/%u/0%o, %u/%u/0%o\n",
+				add->pathname,
+				i_uid_read(inode), i_gid_read(inode),
+				(inode->i_mode & S_IALLUGO),
+				i_uid_read(h_inode), i_gid_read(h_inode),
+				(h_inode->i_mode & S_IALLUGO));
+	}
+
+out:
+	return err;
+}
+
+/*
+ * initialize or clean the whiteouts for an adding branch
+ */
+static int au_br_init_wh(struct super_block *sb, struct au_branch *br,
+			 int new_perm)
+{
+	int err, old_perm;
+	aufs_bindex_t bindex;
+	struct mutex *h_mtx;
+	struct au_wbr *wbr;
+	struct au_hinode *hdir;
+
+	err = vfsub_mnt_want_write(au_br_mnt(br));
+	if (unlikely(err))
+		goto out;
+
+	wbr = br->br_wbr;
+	old_perm = br->br_perm;
+	br->br_perm = new_perm;
+	hdir = NULL;
+	h_mtx = NULL;
+	bindex = au_br_index(sb, br->br_id);
+	if (0 <= bindex) {
+		hdir = au_hi(sb->s_root->d_inode, bindex);
+		au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+	} else {
+		h_mtx = &au_br_dentry(br)->d_inode->i_mutex;
+		mutex_lock_nested(h_mtx, AuLsc_I_PARENT);
+	}
+	if (!wbr)
+		err = au_wh_init(br, sb);
+	else {
+		wbr_wh_write_lock(wbr);
+		err = au_wh_init(br, sb);
+		wbr_wh_write_unlock(wbr);
+	}
+	if (hdir)
+		au_hn_imtx_unlock(hdir);
+	else
+		mutex_unlock(h_mtx);
+	vfsub_mnt_drop_write(au_br_mnt(br));
+	br->br_perm = old_perm;
+
+	if (!err && wbr && !au_br_writable(new_perm)) {
+		kfree(wbr);
+		br->br_wbr = NULL;
+	}
+
+out:
+	return err;
+}
+
+static int au_wbr_init(struct au_branch *br, struct super_block *sb,
+		       int perm)
+{
+	int err;
+	struct kstatfs kst;
+	struct au_wbr *wbr;
+
+	wbr = br->br_wbr;
+	au_rw_init(&wbr->wbr_wh_rwsem);
+	memset(wbr->wbr_wh, 0, sizeof(wbr->wbr_wh));
+	atomic_set(&wbr->wbr_wh_running, 0);
+	wbr->wbr_bytes = 0;
+
+	/*
+	 * a limit for rmdir/rename a dir
+	 * cf. AUFS_MAX_NAMELEN in include/linux/aufs_type.h
+	 */
+	err = vfs_statfs(&br->br_path, &kst);
+	if (unlikely(err))
+		goto out;
+	err = -EINVAL;
+	if (kst.f_namelen >= NAME_MAX)
+		err = au_br_init_wh(sb, br, perm);
+	else
+		pr_err("%.*s(%s), unsupported namelen %ld\n",
+		       AuDLNPair(au_br_dentry(br)),
+		       au_sbtype(au_br_dentry(br)->d_sb), kst.f_namelen);
+
+out:
+	return err;
+}
+
+/* intialize a new branch */
+static int au_br_init(struct au_branch *br, struct super_block *sb,
+		      struct au_opt_add *add)
+{
+	int err;
+
+	err = 0;
+	memset(&br->br_xino, 0, sizeof(br->br_xino));
+	mutex_init(&br->br_xino.xi_nondir_mtx);
+	br->br_perm = add->perm;
+	BUILD_BUG_ON(sizeof(br->br_dflags)
+		     != sizeof(br->br_path.dentry->d_flags));
+	br->br_dflags = DCACHE_MOUNTED;
+	br->br_path = add->path; /* set first, path_get() later */
+	spin_lock_init(&br->br_dykey_lock);
+	memset(br->br_dykey, 0, sizeof(br->br_dykey));
+	atomic_set(&br->br_count, 0);
+	br->br_xino_upper = AUFS_XINO_TRUNC_INIT;
+	atomic_set(&br->br_xino_running, 0);
+	br->br_id = au_new_br_id(sb);
+	AuDebugOn(br->br_id < 0);
+
+	if (au_br_writable(add->perm)) {
+		err = au_wbr_init(br, sb, add->perm);
+		if (unlikely(err))
+			goto out_err;
+	}
+
+	if (au_opt_test(au_mntflags(sb), XINO)) {
+		err = au_xino_br(sb, br, add->path.dentry->d_inode->i_ino,
+				 au_sbr(sb, 0)->br_xino.xi_file, /*do_test*/1);
+		if (unlikely(err)) {
+			AuDebugOn(br->br_xino.xi_file);
+			goto out_err;
+		}
+	}
+
+	sysaufs_br_init(br);
+	path_get(&br->br_path);
+	goto out; /* success */
+
+out_err:
+	memset(&br->br_path, 0, sizeof(br->br_path));
+out:
+	return err;
+}
+
+static void au_br_do_add_brp(struct au_sbinfo *sbinfo, aufs_bindex_t bindex,
+			     struct au_branch *br, aufs_bindex_t bend,
+			     aufs_bindex_t amount)
+{
+	struct au_branch **brp;
+
+	AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+	brp = sbinfo->si_branch + bindex;
+	memmove(brp + 1, brp, sizeof(*brp) * amount);
+	*brp = br;
+	sbinfo->si_bend++;
+	if (unlikely(bend < 0))
+		sbinfo->si_bend = 0;
+}
+
+static void au_br_do_add_hdp(struct au_dinfo *dinfo, aufs_bindex_t bindex,
+			     aufs_bindex_t bend, aufs_bindex_t amount)
+{
+	struct au_hdentry *hdp;
+
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	hdp = dinfo->di_hdentry + bindex;
+	memmove(hdp + 1, hdp, sizeof(*hdp) * amount);
+	au_h_dentry_init(hdp);
+	dinfo->di_bend++;
+	if (unlikely(bend < 0))
+		dinfo->di_bstart = 0;
+}
+
+static void au_br_do_add_hip(struct au_iinfo *iinfo, aufs_bindex_t bindex,
+			     aufs_bindex_t bend, aufs_bindex_t amount)
+{
+	struct au_hinode *hip;
+
+	AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+	hip = iinfo->ii_hinode + bindex;
+	memmove(hip + 1, hip, sizeof(*hip) * amount);
+	hip->hi_inode = NULL;
+	au_hn_init(hip);
+	iinfo->ii_bend++;
+	if (unlikely(bend < 0))
+		iinfo->ii_bstart = 0;
+}
+
+static void au_br_do_add(struct super_block *sb, struct au_branch *br,
+			 aufs_bindex_t bindex)
+{
+	struct dentry *root, *h_dentry;
+	struct inode *root_inode;
+	aufs_bindex_t bend, amount;
+
+	au_br_dflags_force(br);
+
+	root = sb->s_root;
+	root_inode = root->d_inode;
+	bend = au_sbend(sb);
+	amount = bend + 1 - bindex;
+	h_dentry = au_br_dentry(br);
+	au_sbilist_lock();
+	au_br_do_add_brp(au_sbi(sb), bindex, br, bend, amount);
+	au_br_do_add_hdp(au_di(root), bindex, bend, amount);
+	au_br_do_add_hip(au_ii(root_inode), bindex, bend, amount);
+	au_set_h_dptr(root, bindex, dget(h_dentry));
+	au_set_h_iptr(root_inode, bindex, au_igrab(h_dentry->d_inode),
+		      /*flags*/0);
+	au_sbilist_unlock();
+}
+
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+	int err;
+	aufs_bindex_t bend, add_bindex;
+	struct dentry *root, *h_dentry;
+	struct inode *root_inode;
+	struct au_branch *add_branch;
+
+	root = sb->s_root;
+	root_inode = root->d_inode;
+	IMustLock(root_inode);
+	err = test_add(sb, add, remount);
+	if (unlikely(err < 0))
+		goto out;
+	if (err) {
+		err = 0;
+		goto out; /* success */
+	}
+
+	bend = au_sbend(sb);
+	add_branch = au_br_alloc(sb, bend + 2, add->perm);
+	err = PTR_ERR(add_branch);
+	if (IS_ERR(add_branch))
+		goto out;
+
+	err = au_br_init(add_branch, sb, add);
+	if (unlikely(err)) {
+		au_br_do_free(add_branch);
+		goto out;
+	}
+
+	add_bindex = add->bindex;
+	if (!remount)
+		au_br_do_add(sb, add_branch, add_bindex);
+	else {
+		sysaufs_brs_del(sb, add_bindex);
+		au_br_do_add(sb, add_branch, add_bindex);
+		sysaufs_brs_add(sb, add_bindex);
+	}
+
+	h_dentry = add->path.dentry;
+	if (!add_bindex) {
+		au_cpup_attr_all(root_inode, /*force*/1);
+		sb->s_maxbytes = h_dentry->d_sb->s_maxbytes;
+	} else
+		au_add_nlink(root_inode, h_dentry->d_inode);
+
+	/*
+	 * this test/set prevents aufs from handling unnecesary notify events
+	 * of xino files, in case of re-adding a writable branch which was
+	 * once detached from aufs.
+	 */
+	if (au_xino_brid(sb) < 0
+	    && au_br_writable(add_branch->br_perm)
+	    && !au_test_fs_bad_xino(h_dentry->d_sb)
+	    && add_branch->br_xino.xi_file
+	    && add_branch->br_xino.xi_file->f_dentry->d_parent == h_dentry)
+		au_xino_brid_set(sb, add_branch->br_id);
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * delete a branch
+ */
+
+/* to show the line number, do not make it inlined function */
+#define AuVerbose(do_info, fmt, ...) do { \
+	if (do_info) \
+		pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static int au_test_ibusy(struct inode *inode, aufs_bindex_t bstart,
+			 aufs_bindex_t bend)
+{
+	return (inode && !S_ISDIR(inode->i_mode)) || bstart == bend;
+}
+
+static int au_test_dbusy(struct dentry *dentry, aufs_bindex_t bstart,
+			 aufs_bindex_t bend)
+{
+	return au_test_ibusy(dentry->d_inode, bstart, bend);
+}
+
+/*
+ * test if the branch is deletable or not.
+ */
+static int test_dentry_busy(struct dentry *root, aufs_bindex_t bindex,
+			    unsigned int sigen, const unsigned int verbose)
+{
+	int err, i, j, ndentry;
+	aufs_bindex_t bstart, bend;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry *d;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_dcsub_pages(&dpages, root, NULL, NULL);
+	if (unlikely(err))
+		goto out_dpages;
+
+	for (i = 0; !err && i < dpages.ndpage; i++) {
+		dpage = dpages.dpages + i;
+		ndentry = dpage->ndentry;
+		for (j = 0; !err && j < ndentry; j++) {
+			d = dpage->dentries[j];
+			AuDebugOn(!d->d_count);
+			if (!au_digen_test(d, sigen)) {
+				di_read_lock_child(d, AuLock_IR);
+				if (unlikely(au_dbrange_test(d))) {
+					di_read_unlock(d, AuLock_IR);
+					continue;
+				}
+			} else {
+				di_write_lock_child(d);
+				if (unlikely(au_dbrange_test(d))) {
+					di_write_unlock(d);
+					continue;
+				}
+				err = au_reval_dpath(d, sigen);
+				if (!err)
+					di_downgrade_lock(d, AuLock_IR);
+				else {
+					di_write_unlock(d);
+					break;
+				}
+			}
+
+			/* AuDbgDentry(d); */
+			bstart = au_dbstart(d);
+			bend = au_dbend(d);
+			if (bstart <= bindex
+			    && bindex <= bend
+			    && au_h_dptr(d, bindex)
+			    && au_test_dbusy(d, bstart, bend)) {
+				err = -EBUSY;
+				AuVerbose(verbose, "busy %.*s\n", AuDLNPair(d));
+				AuDbgDentry(d);
+			}
+			di_read_unlock(d, AuLock_IR);
+		}
+	}
+
+out_dpages:
+	au_dpages_free(&dpages);
+out:
+	return err;
+}
+
+static int test_inode_busy(struct super_block *sb, aufs_bindex_t bindex,
+			   unsigned int sigen, const unsigned int verbose)
+{
+	int err;
+	unsigned long long max, ull;
+	struct inode *i, **array;
+	aufs_bindex_t bstart, bend;
+
+	array = au_iarray_alloc(sb, &max);
+	err = PTR_ERR(array);
+	if (IS_ERR(array))
+		goto out;
+
+	err = 0;
+	AuDbg("b%d\n", bindex);
+	for (ull = 0; !err && ull < max; ull++) {
+		i = array[ull];
+		if (i->i_ino == AUFS_ROOT_INO)
+			continue;
+
+		/* AuDbgInode(i); */
+		if (au_iigen(i, NULL) == sigen)
+			ii_read_lock_child(i);
+		else {
+			ii_write_lock_child(i);
+			err = au_refresh_hinode_self(i);
+			au_iigen_dec(i);
+			if (!err)
+				ii_downgrade_lock(i);
+			else {
+				ii_write_unlock(i);
+				break;
+			}
+		}
+
+		bstart = au_ibstart(i);
+		bend = au_ibend(i);
+		if (bstart <= bindex
+		    && bindex <= bend
+		    && au_h_iptr(i, bindex)
+		    && au_test_ibusy(i, bstart, bend)) {
+			err = -EBUSY;
+			AuVerbose(verbose, "busy i%lu\n", i->i_ino);
+			AuDbgInode(i);
+		}
+		ii_read_unlock(i);
+	}
+	au_iarray_free(array, max);
+
+out:
+	return err;
+}
+
+static int test_children_busy(struct dentry *root, aufs_bindex_t bindex,
+			      const unsigned int verbose)
+{
+	int err;
+	unsigned int sigen;
+
+	sigen = au_sigen(root->d_sb);
+	DiMustNoWaiters(root);
+	IiMustNoWaiters(root->d_inode);
+	di_write_unlock(root);
+	err = test_dentry_busy(root, bindex, sigen, verbose);
+	if (!err)
+		err = test_inode_busy(root->d_sb, bindex, sigen, verbose);
+	di_write_lock_child(root); /* aufs_write_lock() calls ..._child() */
+
+	return err;
+}
+
+static void au_br_do_del_brp(struct au_sbinfo *sbinfo,
+			     const aufs_bindex_t bindex,
+			     const aufs_bindex_t bend)
+{
+	struct au_branch **brp, **p;
+
+	AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+	brp = sbinfo->si_branch + bindex;
+	if (bindex < bend)
+		memmove(brp, brp + 1, sizeof(*brp) * (bend - bindex));
+	sbinfo->si_branch[0 + bend] = NULL;
+	sbinfo->si_bend--;
+
+	p = krealloc(sbinfo->si_branch, sizeof(*p) * bend, AuGFP_SBILIST);
+	if (p)
+		sbinfo->si_branch = p;
+	/* harmless error */
+}
+
+static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex,
+			     const aufs_bindex_t bend)
+{
+	struct au_hdentry *hdp, *p;
+
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	hdp = dinfo->di_hdentry;
+	if (bindex < bend)
+		memmove(hdp + bindex, hdp + bindex + 1,
+			sizeof(*hdp) * (bend - bindex));
+	hdp[0 + bend].hd_dentry = NULL;
+	dinfo->di_bend--;
+
+	p = krealloc(hdp, sizeof(*p) * bend, AuGFP_SBILIST);
+	if (p)
+		dinfo->di_hdentry = p;
+	/* harmless error */
+}
+
+static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex,
+			     const aufs_bindex_t bend)
+{
+	struct au_hinode *hip, *p;
+
+	AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+	hip = iinfo->ii_hinode + bindex;
+	if (bindex < bend)
+		memmove(hip, hip + 1, sizeof(*hip) * (bend - bindex));
+	iinfo->ii_hinode[0 + bend].hi_inode = NULL;
+	au_hn_init(iinfo->ii_hinode + bend);
+	iinfo->ii_bend--;
+
+	p = krealloc(iinfo->ii_hinode, sizeof(*p) * bend, AuGFP_SBILIST);
+	if (p)
+		iinfo->ii_hinode = p;
+	/* harmless error */
+}
+
+static void au_br_do_del(struct super_block *sb, aufs_bindex_t bindex,
+			 struct au_branch *br)
+{
+	aufs_bindex_t bend;
+	struct au_sbinfo *sbinfo;
+	struct dentry *root, *h_root;
+	struct inode *inode, *h_inode;
+	struct au_hinode *hinode;
+
+	SiMustWriteLock(sb);
+
+	root = sb->s_root;
+	inode = root->d_inode;
+	sbinfo = au_sbi(sb);
+	bend = sbinfo->si_bend;
+
+	h_root = au_h_dptr(root, bindex);
+	hinode = au_hi(inode, bindex);
+	h_inode = au_igrab(hinode->hi_inode);
+	au_hiput(hinode);
+
+	au_sbilist_lock();
+	au_br_do_del_brp(sbinfo, bindex, bend);
+	au_br_do_del_hdp(au_di(root), bindex, bend);
+	au_br_do_del_hip(au_ii(inode), bindex, bend);
+	au_sbilist_unlock();
+
+	dput(h_root);
+	iput(h_inode);
+	au_br_do_free(br);
+}
+
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount)
+{
+	int err, rerr, i;
+	unsigned int mnt_flags;
+	aufs_bindex_t bindex, bend, br_id;
+	unsigned char do_wh, verbose;
+	struct au_branch *br;
+	struct au_wbr *wbr;
+
+	err = 0;
+	bindex = au_find_dbindex(sb->s_root, del->h_path.dentry);
+	if (bindex < 0) {
+		if (remount)
+			goto out; /* success */
+		err = -ENOENT;
+		pr_err("%s no such branch\n", del->pathname);
+		goto out;
+	}
+	AuDbg("bindex b%d\n", bindex);
+
+	err = -EBUSY;
+	mnt_flags = au_mntflags(sb);
+	verbose = !!au_opt_test(mnt_flags, VERBOSE);
+	bend = au_sbend(sb);
+	if (unlikely(!bend)) {
+		AuVerbose(verbose, "no more branches left\n");
+		goto out;
+	}
+	br = au_sbr(sb, bindex);
+	AuDebugOn(!path_equal(&br->br_path, &del->h_path));
+	i = atomic_read(&br->br_count);
+	if (unlikely(i)) {
+		AuVerbose(verbose, "%d file(s) opened\n", i);
+		goto out;
+	}
+
+	wbr = br->br_wbr;
+	do_wh = wbr && (wbr->wbr_whbase || wbr->wbr_plink || wbr->wbr_orph);
+	if (do_wh) {
+		/* instead of WbrWhMustWriteLock(wbr) */
+		SiMustWriteLock(sb);
+		for (i = 0; i < AuBrWh_Last; i++) {
+			dput(wbr->wbr_wh[i]);
+			wbr->wbr_wh[i] = NULL;
+		}
+	}
+
+	err = test_children_busy(sb->s_root, bindex, verbose);
+	if (unlikely(err)) {
+		if (do_wh)
+			goto out_wh;
+		goto out;
+	}
+
+	err = 0;
+	br_id = br->br_id;
+	if (!remount)
+		au_br_do_del(sb, bindex, br);
+	else {
+		sysaufs_brs_del(sb, bindex);
+		au_br_do_del(sb, bindex, br);
+		sysaufs_brs_add(sb, bindex);
+	}
+
+	if (!bindex) {
+		au_cpup_attr_all(sb->s_root->d_inode, /*force*/1);
+		sb->s_maxbytes = au_sbr_sb(sb, 0)->s_maxbytes;
+	} else
+		au_sub_nlink(sb->s_root->d_inode, del->h_path.dentry->d_inode);
+	if (au_opt_test(mnt_flags, PLINK))
+		au_plink_half_refresh(sb, br_id);
+
+	if (au_xino_brid(sb) == br_id)
+		au_xino_brid_set(sb, -1);
+	goto out; /* success */
+
+out_wh:
+	/* revert */
+	rerr = au_br_init_wh(sb, br, br->br_perm);
+	if (rerr)
+		pr_warn("failed re-creating base whiteout, %s. (%d)\n",
+			del->pathname, rerr);
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_ibusy(struct super_block *sb, struct aufs_ibusy __user *arg)
+{
+	int err;
+	aufs_bindex_t bstart, bend;
+	struct aufs_ibusy ibusy;
+	struct inode *inode, *h_inode;
+
+	err = -EPERM;
+	if (unlikely(!capable(CAP_SYS_ADMIN)))
+		goto out;
+
+	err = copy_from_user(&ibusy, arg, sizeof(ibusy));
+	if (!err)
+		err = !access_ok(VERIFY_WRITE, &arg->h_ino, sizeof(arg->h_ino));
+	if (unlikely(err)) {
+		err = -EFAULT;
+		AuTraceErr(err);
+		goto out;
+	}
+
+	err = -EINVAL;
+	si_read_lock(sb, AuLock_FLUSH);
+	if (unlikely(ibusy.bindex < 0 || ibusy.bindex > au_sbend(sb)))
+		goto out_unlock;
+
+	err = 0;
+	ibusy.h_ino = 0; /* invalid */
+	inode = ilookup(sb, ibusy.ino);
+	if (!inode
+	    || inode->i_ino == AUFS_ROOT_INO
+	    || is_bad_inode(inode))
+		goto out_unlock;
+
+	ii_read_lock_child(inode);
+	bstart = au_ibstart(inode);
+	bend = au_ibend(inode);
+	if (bstart <= ibusy.bindex && ibusy.bindex <= bend) {
+		h_inode = au_h_iptr(inode, ibusy.bindex);
+		if (h_inode && au_test_ibusy(inode, bstart, bend))
+			ibusy.h_ino = h_inode->i_ino;
+	}
+	ii_read_unlock(inode);
+	iput(inode);
+
+out_unlock:
+	si_read_unlock(sb);
+	if (!err) {
+		err = __put_user(ibusy.h_ino, &arg->h_ino);
+		if (unlikely(err)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+		}
+	}
+out:
+	return err;
+}
+
+long au_ibusy_ioctl(struct file *file, unsigned long arg)
+{
+	return au_ibusy(file->f_dentry->d_sb, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg)
+{
+	return au_ibusy(file->f_dentry->d_sb, compat_ptr(arg));
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * change a branch permission
+ */
+
+static void au_warn_ima(void)
+{
+#ifdef CONFIG_IMA
+	/* since it doesn't support mark_files_ro() */
+	AuWarn1("RW -> RO makes IMA to produce wrong message\n");
+#endif
+}
+
+static int do_need_sigen_inc(int a, int b)
+{
+	return au_br_whable(a) && !au_br_whable(b);
+}
+
+static int need_sigen_inc(int old, int new)
+{
+	return do_need_sigen_inc(old, new)
+		|| do_need_sigen_inc(new, old);
+}
+
+static unsigned long long au_farray_cb(void *a,
+				       unsigned long long max __maybe_unused,
+				       void *arg)
+{
+	unsigned long long n;
+	struct file **p, *f;
+	struct super_block *sb = arg;
+
+	n = 0;
+	p = a;
+	lg_global_lock(&files_lglock);
+	do_file_list_for_each_entry(sb, f) {
+		if (au_fi(f)
+		    && file_count(f)
+		    && !special_file(file_inode(f)->i_mode)) {
+			get_file(f);
+			*p++ = f;
+			n++;
+			AuDebugOn(n > max);
+		}
+	} while_file_list_for_each_entry;
+	lg_global_unlock(&files_lglock);
+
+	return n;
+}
+
+static struct file **au_farray_alloc(struct super_block *sb,
+				     unsigned long long *max)
+{
+	*max = atomic_long_read(&au_sbi(sb)->si_nfiles);
+	return au_array_alloc(max, au_farray_cb, sb);
+}
+
+static void au_farray_free(struct file **a, unsigned long long max)
+{
+	unsigned long long ull;
+
+	for (ull = 0; ull < max; ull++)
+		if (a[ull])
+			fput(a[ull]);
+	au_array_free(a);
+}
+
+static int au_br_mod_files_ro(struct super_block *sb, aufs_bindex_t bindex)
+{
+	int err, do_warn;
+	unsigned int mnt_flags;
+	unsigned long long ull, max;
+	aufs_bindex_t br_id;
+	unsigned char verbose;
+	struct file *file, *hf, **array;
+	struct inode *inode;
+	struct au_hfile *hfile;
+
+	mnt_flags = au_mntflags(sb);
+	verbose = !!au_opt_test(mnt_flags, VERBOSE);
+
+	array = au_farray_alloc(sb, &max);
+	err = PTR_ERR(array);
+	if (IS_ERR(array))
+		goto out;
+
+	do_warn = 0;
+	br_id = au_sbr_id(sb, bindex);
+	for (ull = 0; ull < max; ull++) {
+		file = array[ull];
+
+		/* AuDbg("%.*s\n", AuDLNPair(file->f_dentry)); */
+		fi_read_lock(file);
+		if (unlikely(au_test_mmapped(file))) {
+			err = -EBUSY;
+			AuVerbose(verbose, "mmapped %.*s\n",
+				  AuDLNPair(file->f_dentry));
+			AuDbgFile(file);
+			FiMustNoWaiters(file);
+			fi_read_unlock(file);
+			goto out_array;
+		}
+
+		inode = file_inode(file);
+		hfile = &au_fi(file)->fi_htop;
+		hf = hfile->hf_file;
+		if (!S_ISREG(inode->i_mode)
+		    || !(file->f_mode & FMODE_WRITE)
+		    || hfile->hf_br->br_id != br_id
+		    || !(hf->f_mode & FMODE_WRITE))
+			array[ull] = NULL;
+		else {
+			do_warn = 1;
+			get_file(file);
+		}
+
+		FiMustNoWaiters(file);
+		fi_read_unlock(file);
+		fput(file);
+	}
+
+	err = 0;
+	if (do_warn)
+		au_warn_ima();
+
+	for (ull = 0; ull < max; ull++) {
+		file = array[ull];
+		if (!file)
+			continue;
+
+		/* todo: already flushed? */
+		/* cf. fs/super.c:mark_files_ro() */
+		/* fi_read_lock(file); */
+		hfile = &au_fi(file)->fi_htop;
+		hf = hfile->hf_file;
+		/* fi_read_unlock(file); */
+		spin_lock(&hf->f_lock);
+		hf->f_mode &= ~FMODE_WRITE;
+		spin_unlock(&hf->f_lock);
+		if (!file_check_writeable(hf)) {
+			__mnt_drop_write(hf->f_path.mnt);
+			file_release_write(hf);
+		}
+	}
+
+out_array:
+	au_farray_free(array, max);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+	      int *do_refresh)
+{
+	int err, rerr;
+	aufs_bindex_t bindex;
+	struct dentry *root;
+	struct au_branch *br;
+
+	root = sb->s_root;
+	bindex = au_find_dbindex(root, mod->h_root);
+	if (bindex < 0) {
+		if (remount)
+			return 0; /* success */
+		err = -ENOENT;
+		pr_err("%s no such branch\n", mod->path);
+		goto out;
+	}
+	AuDbg("bindex b%d\n", bindex);
+
+	err = test_br(mod->h_root->d_inode, mod->perm, mod->path);
+	if (unlikely(err))
+		goto out;
+
+	br = au_sbr(sb, bindex);
+	AuDebugOn(mod->h_root != au_br_dentry(br));
+	if (br->br_perm == mod->perm)
+		return 0; /* success */
+
+	if (au_br_writable(br->br_perm)) {
+		/* remove whiteout base */
+		err = au_br_init_wh(sb, br, mod->perm);
+		if (unlikely(err))
+			goto out;
+
+		if (!au_br_writable(mod->perm)) {
+			/* rw --> ro, file might be mmapped */
+			DiMustNoWaiters(root);
+			IiMustNoWaiters(root->d_inode);
+			di_write_unlock(root);
+			err = au_br_mod_files_ro(sb, bindex);
+			/* aufs_write_lock() calls ..._child() */
+			di_write_lock_child(root);
+
+			if (unlikely(err)) {
+				rerr = -ENOMEM;
+				br->br_wbr = kmalloc(sizeof(*br->br_wbr),
+						     GFP_NOFS);
+				if (br->br_wbr)
+					rerr = au_wbr_init(br, sb, br->br_perm);
+				if (unlikely(rerr)) {
+					AuIOErr("nested error %d (%d)\n",
+						rerr, err);
+					br->br_perm = mod->perm;
+				}
+			}
+		}
+	} else if (au_br_writable(mod->perm)) {
+		/* ro --> rw */
+		err = -ENOMEM;
+		br->br_wbr = kmalloc(sizeof(*br->br_wbr), GFP_NOFS);
+		if (br->br_wbr) {
+			err = au_wbr_init(br, sb, mod->perm);
+			if (unlikely(err)) {
+				kfree(br->br_wbr);
+				br->br_wbr = NULL;
+			}
+		}
+	}
+
+	if (!err) {
+		if ((br->br_perm & AuBrAttr_UNPIN)
+		    && !(mod->perm & AuBrAttr_UNPIN))
+			au_br_dflags_force(br);
+		else if (!(br->br_perm & AuBrAttr_UNPIN)
+			 && (mod->perm & AuBrAttr_UNPIN))
+			au_br_dflags_restore(br);
+		*do_refresh |= need_sigen_inc(br->br_perm, mod->perm);
+		br->br_perm = mod->perm;
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
diff --git a/fs/aufs/branch.h b/fs/aufs/branch.h
new file mode 100644
index 0000000..0eab01c
--- /dev/null
+++ b/fs/aufs/branch.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * branch filesystems and xino for them
+ */
+
+#ifndef __AUFS_BRANCH_H__
+#define __AUFS_BRANCH_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mount.h>
+#include "dynop.h"
+#include "rwsem.h"
+#include "super.h"
+
+/* ---------------------------------------------------------------------- */
+
+/* a xino file */
+struct au_xino_file {
+	struct file		*xi_file;
+	struct mutex		xi_nondir_mtx;
+
+	/* todo: make xino files an array to support huge inode number */
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		 *xi_dbgaufs;
+#endif
+};
+
+/* members for writable branch only */
+enum {AuBrWh_BASE, AuBrWh_PLINK, AuBrWh_ORPH, AuBrWh_Last};
+struct au_wbr {
+	struct au_rwsem		wbr_wh_rwsem;
+	struct dentry		*wbr_wh[AuBrWh_Last];
+	atomic_t		wbr_wh_running;
+#define wbr_whbase		wbr_wh[AuBrWh_BASE]	/* whiteout base */
+#define wbr_plink		wbr_wh[AuBrWh_PLINK]	/* pseudo-link dir */
+#define wbr_orph		wbr_wh[AuBrWh_ORPH]	/* dir for orphans */
+
+	/* mfs mode */
+	unsigned long long	wbr_bytes;
+};
+
+/* ext2 has 3 types of operations at least, ext3 has 4 */
+#define AuBrDynOp (AuDyLast * 4)
+
+#ifdef CONFIG_AUFS_HFSNOTIFY
+/* support for asynchronous destruction */
+struct au_br_hfsnotify {
+	struct fsnotify_group	*hfsn_group;
+};
+#endif
+
+/* protected by superblock rwsem */
+struct au_branch {
+	struct au_xino_file	br_xino;
+
+	aufs_bindex_t		br_id;
+
+	int			br_perm;
+	unsigned int		br_dflags;
+	struct path		br_path;
+	spinlock_t		br_dykey_lock;
+	struct au_dykey		*br_dykey[AuBrDynOp];
+	atomic_t		br_count;
+
+	struct au_wbr		*br_wbr;
+
+	/* xino truncation */
+	blkcnt_t		br_xino_upper;	/* watermark in blocks */
+	atomic_t		br_xino_running;
+
+#ifdef CONFIG_AUFS_HFSNOTIFY
+	struct au_br_hfsnotify	*br_hfsn;
+#endif
+
+#ifdef CONFIG_SYSFS
+	/* an entry under sysfs per mount-point */
+	char			br_name[8];
+	struct attribute	br_attr;
+#endif
+};
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct vfsmount *au_br_mnt(struct au_branch *br)
+{
+	return br->br_path.mnt;
+}
+
+static inline struct dentry *au_br_dentry(struct au_branch *br)
+{
+	return br->br_path.dentry;
+}
+
+static inline struct super_block *au_br_sb(struct au_branch *br)
+{
+	return au_br_mnt(br)->mnt_sb;
+}
+
+/* branch permissions and attributes */
+#define AuBrPerm_RW		1		/* writable, hardlinkable wh */
+#define AuBrPerm_RO		(1 << 1)	/* readonly */
+#define AuBrPerm_RR		(1 << 2)	/* natively readonly */
+#define AuBrPerm_Mask		(AuBrPerm_RW | AuBrPerm_RO | AuBrPerm_RR)
+
+#define AuBrRAttr_WH		(1 << 3)	/* whiteout-able */
+
+#define AuBrWAttr_NoLinkWH	(1 << 4)	/* un-hardlinkable whiteouts */
+
+#define AuBrAttr_UNPIN		(1 << 5)	/* rename-able top dir of
+						   branch */
+
+static inline int au_br_writable(int brperm)
+{
+	return brperm & AuBrPerm_RW;
+}
+
+static inline int au_br_whable(int brperm)
+{
+	return brperm & (AuBrPerm_RW | AuBrRAttr_WH);
+}
+
+static inline int au_br_wh_linkable(int brperm)
+{
+	return !(brperm & AuBrWAttr_NoLinkWH);
+}
+
+static inline int au_br_rdonly(struct au_branch *br)
+{
+	return ((au_br_sb(br)->s_flags & MS_RDONLY)
+		|| !au_br_writable(br->br_perm))
+		? -EROFS : 0;
+}
+
+static inline int au_br_hnotifyable(int brperm __maybe_unused)
+{
+#ifdef CONFIG_AUFS_HNOTIFY
+	return !(brperm & AuBrPerm_RR);
+#else
+	return 0;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* branch.c */
+struct au_sbinfo;
+void au_br_free(struct au_sbinfo *sinfo);
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id);
+struct au_opt_add;
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount);
+struct au_opt_del;
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount);
+long au_ibusy_ioctl(struct file *file, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg);
+#endif
+struct au_opt_mod;
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+	      int *do_refresh);
+
+/* xino.c */
+static const loff_t au_loff_max = LLONG_MAX;
+
+int au_xib_trunc(struct super_block *sb);
+ssize_t xino_fread(au_readf_t func, struct file *file, void *buf, size_t size,
+		   loff_t *pos);
+ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size,
+		    loff_t *pos);
+struct file *au_xino_create2(struct file *base_file, struct file *copy_src);
+struct file *au_xino_create(struct super_block *sb, char *fname, int silent);
+ino_t au_xino_new_ino(struct super_block *sb);
+void au_xino_delete_inode(struct inode *inode, const int unlinked);
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		  ino_t ino);
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		 ino_t *ino);
+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t hino,
+	       struct file *base_file, int do_test);
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex);
+
+struct au_opt_xino;
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount);
+void au_xino_clr(struct super_block *sb);
+struct file *au_xino_def(struct super_block *sb);
+int au_xino_path(struct seq_file *seq, struct file *file);
+
+/* ---------------------------------------------------------------------- */
+
+/* Superblock to branch */
+static inline
+aufs_bindex_t au_sbr_id(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_sbr(sb, bindex)->br_id;
+}
+
+static inline
+struct vfsmount *au_sbr_mnt(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_br_mnt(au_sbr(sb, bindex));
+}
+
+static inline
+struct super_block *au_sbr_sb(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_br_sb(au_sbr(sb, bindex));
+}
+
+static inline void au_sbr_put(struct super_block *sb, aufs_bindex_t bindex)
+{
+	atomic_dec(&au_sbr(sb, bindex)->br_count);
+}
+
+static inline int au_sbr_perm(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_sbr(sb, bindex)->br_perm;
+}
+
+static inline int au_sbr_whable(struct super_block *sb, aufs_bindex_t bindex)
+{
+	return au_br_whable(au_sbr_perm(sb, bindex));
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * wbr_wh_read_lock, wbr_wh_write_lock
+ * wbr_wh_read_unlock, wbr_wh_write_unlock, wbr_wh_downgrade_lock
+ */
+AuSimpleRwsemFuncs(wbr_wh, struct au_wbr *wbr, &wbr->wbr_wh_rwsem);
+
+#define WbrWhMustNoWaiters(wbr)	AuRwMustNoWaiters(&wbr->wbr_wh_rwsem)
+#define WbrWhMustAnyLock(wbr)	AuRwMustAnyLock(&wbr->wbr_wh_rwsem)
+#define WbrWhMustWriteLock(wbr)	AuRwMustWriteLock(&wbr->wbr_wh_rwsem)
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_BRANCH_H__ */
diff --git a/fs/aufs/conf.mk b/fs/aufs/conf.mk
new file mode 100644
index 0000000..6c5108d
--- /dev/null
+++ b/fs/aufs/conf.mk
@@ -0,0 +1,38 @@
+
+AuConfStr = CONFIG_AUFS_FS=${CONFIG_AUFS_FS}
+
+define AuConf
+ifdef ${1}
+AuConfStr += ${1}=${${1}}
+endif
+endef
+
+AuConfAll = BRANCH_MAX_127 BRANCH_MAX_511 BRANCH_MAX_1023 BRANCH_MAX_32767 \
+	SBILIST \
+	HNOTIFY HFSNOTIFY \
+	EXPORT INO_T_64 \
+	RDU \
+	PROC_MAP \
+	SP_IATTR \
+	SHWH \
+	BR_RAMFS \
+	BR_FUSE POLL \
+	BR_HFSPLUS \
+	BDEV_LOOP \
+	DEBUG MAGIC_SYSRQ
+$(foreach i, ${AuConfAll}, \
+	$(eval $(call AuConf,CONFIG_AUFS_${i})))
+
+AuConfName = ${obj}/conf.str
+${AuConfName}.tmp: FORCE
+	@echo ${AuConfStr} | tr ' ' '\n' | sed -e 's/^/"/' -e 's/$$/\\n"/' > $@
+${AuConfName}: ${AuConfName}.tmp
+	@diff -q $< $@ > /dev/null 2>&1 || { \
+	echo '  GEN    ' $@; \
+	cp -p $< $@; \
+	}
+FORCE:
+clean-files += ${AuConfName} ${AuConfName}.tmp
+${obj}/sysfs.o: ${AuConfName}
+
+-include ${srctree}/${src}/conf_priv.mk
diff --git a/fs/aufs/cpup.c b/fs/aufs/cpup.c
new file mode 100644
index 0000000..a9edf03
--- /dev/null
+++ b/fs/aufs/cpup.c
@@ -0,0 +1,1255 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * copy-up functions, see wbr_policy.c for copy-down
+ */
+
+#include <linux/fs_stack.h>
+#include <linux/mm.h>
+#include "aufs.h"
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags)
+{
+	const unsigned int mask = S_DEAD | S_SWAPFILE | S_PRIVATE
+		| S_NOATIME | S_NOCMTIME | S_AUTOMOUNT;
+
+	BUILD_BUG_ON(sizeof(iflags) != sizeof(dst->i_flags));
+
+	dst->i_flags |= iflags & ~mask;
+	if (au_test_fs_notime(dst->i_sb))
+		dst->i_flags |= S_NOATIME | S_NOCMTIME;
+}
+
+void au_cpup_attr_timesizes(struct inode *inode)
+{
+	struct inode *h_inode;
+
+	h_inode = au_h_iptr(inode, au_ibstart(inode));
+	fsstack_copy_attr_times(inode, h_inode);
+	fsstack_copy_inode_size(inode, h_inode);
+}
+
+void au_cpup_attr_nlink(struct inode *inode, int force)
+{
+	struct inode *h_inode;
+	struct super_block *sb;
+	aufs_bindex_t bindex, bend;
+
+	sb = inode->i_sb;
+	bindex = au_ibstart(inode);
+	h_inode = au_h_iptr(inode, bindex);
+	if (!force
+	    && !S_ISDIR(h_inode->i_mode)
+	    && au_opt_test(au_mntflags(sb), PLINK)
+	    && au_plink_test(inode))
+		return;
+
+	/*
+	 * 0 can happen in revalidating.
+	 * h_inode->i_mutex is not held, but it is harmless since once i_nlink
+	 * reaches 0, it will never become positive.
+	 */
+	set_nlink(inode, h_inode->i_nlink);
+
+	/*
+	 * fewer nlink makes find(1) noisy, but larger nlink doesn't.
+	 * it may includes whplink directory.
+	 */
+	if (S_ISDIR(h_inode->i_mode)) {
+		bend = au_ibend(inode);
+		for (bindex++; bindex <= bend; bindex++) {
+			h_inode = au_h_iptr(inode, bindex);
+			if (h_inode)
+				au_add_nlink(inode, h_inode);
+		}
+	}
+}
+
+void au_cpup_attr_changeable(struct inode *inode)
+{
+	struct inode *h_inode;
+
+	h_inode = au_h_iptr(inode, au_ibstart(inode));
+	inode->i_mode = h_inode->i_mode;
+	inode->i_uid = h_inode->i_uid;
+	inode->i_gid = h_inode->i_gid;
+	au_cpup_attr_timesizes(inode);
+	au_cpup_attr_flags(inode, h_inode->i_flags);
+}
+
+void au_cpup_igen(struct inode *inode, struct inode *h_inode)
+{
+	struct au_iinfo *iinfo = au_ii(inode);
+
+	IiMustWriteLock(inode);
+
+	iinfo->ii_higen = h_inode->i_generation;
+	iinfo->ii_hsb1 = h_inode->i_sb;
+}
+
+void au_cpup_attr_all(struct inode *inode, int force)
+{
+	struct inode *h_inode;
+
+	h_inode = au_h_iptr(inode, au_ibstart(inode));
+	au_cpup_attr_changeable(inode);
+	if (inode->i_nlink > 0)
+		au_cpup_attr_nlink(inode, force);
+	inode->i_rdev = h_inode->i_rdev;
+	inode->i_blkbits = h_inode->i_blkbits;
+	au_cpup_igen(inode, h_inode);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Note: dt_dentry and dt_h_dentry are not dget/dput-ed */
+
+/* keep the timestamps of the parent dir when cpup */
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+		    struct path *h_path)
+{
+	struct inode *h_inode;
+
+	dt->dt_dentry = dentry;
+	dt->dt_h_path = *h_path;
+	h_inode = h_path->dentry->d_inode;
+	dt->dt_atime = h_inode->i_atime;
+	dt->dt_mtime = h_inode->i_mtime;
+	/* smp_mb(); */
+}
+
+void au_dtime_revert(struct au_dtime *dt)
+{
+	struct iattr attr;
+	int err;
+
+	attr.ia_atime = dt->dt_atime;
+	attr.ia_mtime = dt->dt_mtime;
+	attr.ia_valid = ATTR_FORCE | ATTR_MTIME | ATTR_MTIME_SET
+		| ATTR_ATIME | ATTR_ATIME_SET;
+
+	err = vfsub_notify_change(&dt->dt_h_path, &attr);
+	if (unlikely(err))
+		pr_warn("restoring timestamps failed(%d). ignored\n", err);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* internal use only */
+struct au_cpup_reg_attr {
+	int		valid;
+	struct kstat	st;
+	unsigned int	iflags; /* inode->i_flags */
+};
+
+static noinline_for_stack
+int cpup_iattr(struct dentry *dst, aufs_bindex_t bindex, struct dentry *h_src,
+	       struct au_cpup_reg_attr *h_src_attr)
+{
+	int err, sbits;
+	struct iattr ia;
+	struct path h_path;
+	struct inode *h_isrc, *h_idst;
+	struct kstat *h_st;
+
+	h_path.dentry = au_h_dptr(dst, bindex);
+	h_idst = h_path.dentry->d_inode;
+	h_path.mnt = au_sbr_mnt(dst->d_sb, bindex);
+	h_isrc = h_src->d_inode;
+	ia.ia_valid = ATTR_FORCE | ATTR_UID | ATTR_GID
+		| ATTR_ATIME | ATTR_MTIME
+		| ATTR_ATIME_SET | ATTR_MTIME_SET;
+	if (h_src_attr && h_src_attr->valid) {
+		h_st = &h_src_attr->st;
+		ia.ia_uid = h_st->uid;
+		ia.ia_gid = h_st->gid;
+		ia.ia_atime = h_st->atime;
+		ia.ia_mtime = h_st->mtime;
+		if (h_idst->i_mode != h_st->mode
+		    && !S_ISLNK(h_idst->i_mode)) {
+			ia.ia_valid |= ATTR_MODE;
+			ia.ia_mode = h_st->mode;
+		}
+		sbits = !!(h_st->mode & (S_ISUID | S_ISGID));
+		au_cpup_attr_flags(h_idst, h_src_attr->iflags);
+	} else {
+		ia.ia_uid = h_isrc->i_uid;
+		ia.ia_gid = h_isrc->i_gid;
+		ia.ia_atime = h_isrc->i_atime;
+		ia.ia_mtime = h_isrc->i_mtime;
+		if (h_idst->i_mode != h_isrc->i_mode
+		    && !S_ISLNK(h_idst->i_mode)) {
+			ia.ia_valid |= ATTR_MODE;
+			ia.ia_mode = h_isrc->i_mode;
+		}
+		sbits = !!(h_isrc->i_mode & (S_ISUID | S_ISGID));
+		au_cpup_attr_flags(h_idst, h_isrc->i_flags);
+	}
+	err = vfsub_notify_change(&h_path, &ia);
+
+	/* is this nfs only? */
+	if (!err && sbits && au_test_nfs(h_path.dentry->d_sb)) {
+		ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+		ia.ia_mode = h_isrc->i_mode;
+		err = vfsub_notify_change(&h_path, &ia);
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_copy_file(struct file *dst, struct file *src, loff_t len,
+			   char *buf, unsigned long blksize)
+{
+	int err;
+	size_t sz, rbytes, wbytes;
+	unsigned char all_zero;
+	char *p, *zp;
+	struct mutex *h_mtx;
+	/* reduce stack usage */
+	struct iattr *ia;
+
+	zp = page_address(ZERO_PAGE(0));
+	if (unlikely(!zp))
+		return -ENOMEM; /* possible? */
+
+	err = 0;
+	all_zero = 0;
+	while (len) {
+		AuDbg("len %lld\n", len);
+		sz = blksize;
+		if (len < blksize)
+			sz = len;
+
+		rbytes = 0;
+		/* todo: signal_pending? */
+		while (!rbytes || err == -EAGAIN || err == -EINTR) {
+			rbytes = vfsub_read_k(src, buf, sz, &src->f_pos);
+			err = rbytes;
+		}
+		if (unlikely(err < 0))
+			break;
+
+		all_zero = 0;
+		if (len >= rbytes && rbytes == blksize)
+			all_zero = !memcmp(buf, zp, rbytes);
+		if (!all_zero) {
+			wbytes = rbytes;
+			p = buf;
+			while (wbytes) {
+				size_t b;
+
+				b = vfsub_write_k(dst, p, wbytes, &dst->f_pos);
+				err = b;
+				/* todo: signal_pending? */
+				if (unlikely(err == -EAGAIN || err == -EINTR))
+					continue;
+				if (unlikely(err < 0))
+					break;
+				wbytes -= b;
+				p += b;
+			}
+		} else {
+			loff_t res;
+
+			AuLabel(hole);
+			res = vfsub_llseek(dst, rbytes, SEEK_CUR);
+			err = res;
+			if (unlikely(res < 0))
+				break;
+		}
+		len -= rbytes;
+		err = 0;
+	}
+
+	/* the last block may be a hole */
+	if (!err && all_zero) {
+		AuLabel(last hole);
+
+		err = 1;
+		if (au_test_nfs(dst->f_dentry->d_sb)) {
+			/* nfs requires this step to make last hole */
+			/* is this only nfs? */
+			do {
+				/* todo: signal_pending? */
+				err = vfsub_write_k(dst, "\0", 1, &dst->f_pos);
+			} while (err == -EAGAIN || err == -EINTR);
+			if (err == 1)
+				dst->f_pos--;
+		}
+
+		if (err == 1) {
+			ia = (void *)buf;
+			ia->ia_size = dst->f_pos;
+			ia->ia_valid = ATTR_SIZE | ATTR_FILE;
+			ia->ia_file = dst;
+			h_mtx = &file_inode(dst)->i_mutex;
+			mutex_lock_nested(h_mtx, AuLsc_I_CHILD2);
+			err = vfsub_notify_change(&dst->f_path, ia);
+			mutex_unlock(h_mtx);
+		}
+	}
+
+	return err;
+}
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len)
+{
+	int err;
+	unsigned long blksize;
+	unsigned char do_kfree;
+	char *buf;
+
+	err = -ENOMEM;
+	blksize = dst->f_dentry->d_sb->s_blocksize;
+	if (!blksize || PAGE_SIZE < blksize)
+		blksize = PAGE_SIZE;
+	AuDbg("blksize %lu\n", blksize);
+	do_kfree = (blksize != PAGE_SIZE && blksize >= sizeof(struct iattr *));
+	if (do_kfree)
+		buf = kmalloc(blksize, GFP_NOFS);
+	else
+		buf = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!buf))
+		goto out;
+
+	if (len > (1 << 22))
+		AuDbg("copying a large file %lld\n", (long long)len);
+
+	src->f_pos = 0;
+	dst->f_pos = 0;
+	err = au_do_copy_file(dst, src, len, buf, blksize);
+	if (do_kfree)
+		kfree(buf);
+	else
+		free_page((unsigned long)buf);
+
+out:
+	return err;
+}
+
+/*
+ * to support a sparse file which is opened with O_APPEND,
+ * we need to close the file.
+ */
+static int au_cp_regular(struct au_cp_generic *cpg)
+{
+	int err, i;
+	enum { SRC, DST };
+	struct {
+		aufs_bindex_t bindex;
+		unsigned int flags;
+		struct dentry *dentry;
+		struct file *file;
+		void *label, *label_file;
+	} *f, file[] = {
+		{
+			.bindex = cpg->bsrc,
+			.flags = O_RDONLY | O_NOATIME | O_LARGEFILE,
+			.file = NULL,
+			.label = &&out,
+			.label_file = &&out_src
+		},
+		{
+			.bindex = cpg->bdst,
+			.flags = O_WRONLY | O_NOATIME | O_LARGEFILE,
+			.file = NULL,
+			.label = &&out_src,
+			.label_file = &&out_dst
+		}
+	};
+	struct super_block *sb;
+
+	/* bsrc branch can be ro/rw. */
+	sb = cpg->dentry->d_sb;
+	f = file;
+	for (i = 0; i < 2; i++, f++) {
+		f->dentry = au_h_dptr(cpg->dentry, f->bindex);
+		f->file = au_h_open(cpg->dentry, f->bindex, f->flags,
+				    /*file*/NULL);
+		err = PTR_ERR(f->file);
+		if (IS_ERR(f->file))
+			goto *f->label;
+		err = -EINVAL;
+		if (unlikely(!f->file->f_op))
+			goto *f->label_file;
+	}
+
+	/* try stopping to update while we copyup */
+	IMustLock(file[SRC].dentry->d_inode);
+	err = au_copy_file(file[DST].file, file[SRC].file, cpg->len);
+
+out_dst:
+	fput(file[DST].file);
+	au_sbr_put(sb, file[DST].bindex);
+out_src:
+	fput(file[SRC].file);
+	au_sbr_put(sb, file[SRC].bindex);
+out:
+	return err;
+}
+
+static int au_do_cpup_regular(struct au_cp_generic *cpg,
+			      struct au_cpup_reg_attr *h_src_attr)
+{
+	int err, rerr;
+	loff_t l;
+	struct path h_path;
+	struct inode *h_src_inode;
+
+	err = 0;
+	h_src_inode = au_h_iptr(cpg->dentry->d_inode, cpg->bsrc);
+	l = i_size_read(h_src_inode);
+	if (cpg->len == -1 || l < cpg->len)
+		cpg->len = l;
+	if (cpg->len) {
+		/* try stopping to update while we are referencing */
+		mutex_lock_nested(&h_src_inode->i_mutex, AuLsc_I_CHILD);
+		au_pin_hdir_unlock(cpg->pin);
+
+		h_path.dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+		h_path.mnt = au_sbr_mnt(cpg->dentry->d_sb, cpg->bsrc);
+		h_src_attr->iflags = h_src_inode->i_flags;
+		err = vfs_getattr(&h_path, &h_src_attr->st);
+		if (unlikely(err)) {
+			mutex_unlock(&h_src_inode->i_mutex);
+			goto out;
+		}
+		h_src_attr->valid = 1;
+		err = au_cp_regular(cpg);
+		mutex_unlock(&h_src_inode->i_mutex);
+		rerr = au_pin_hdir_relock(cpg->pin);
+		if (!err && rerr)
+			err = rerr;
+	}
+
+out:
+	return err;
+}
+
+static int au_do_cpup_symlink(struct path *h_path, struct dentry *h_src,
+			      struct inode *h_dir)
+{
+	int err, symlen;
+	mm_segment_t old_fs;
+	union {
+		char *k;
+		char __user *u;
+	} sym;
+
+	err = -ENOSYS;
+	if (unlikely(!h_src->d_inode->i_op->readlink))
+		goto out;
+
+	err = -ENOMEM;
+	sym.k = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!sym.k))
+		goto out;
+
+	/* unnecessary to support mmap_sem since symlink is not mmap-able */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	symlen = h_src->d_inode->i_op->readlink(h_src, sym.u, PATH_MAX);
+	err = symlen;
+	set_fs(old_fs);
+
+	if (symlen > 0) {
+		sym.k[symlen] = 0;
+		err = vfsub_symlink(h_dir, h_path, sym.k);
+	}
+	free_page((unsigned long)sym.k);
+
+out:
+	return err;
+}
+
+static noinline_for_stack
+int cpup_entry(struct au_cp_generic *cpg, struct dentry *dst_parent,
+	       struct au_cpup_reg_attr *h_src_attr)
+{
+	int err;
+	umode_t mode;
+	unsigned int mnt_flags;
+	unsigned char isdir;
+	const unsigned char do_dt = !!au_ftest_cpup(cpg->flags, DTIME);
+	struct au_dtime dt;
+	struct path h_path;
+	struct dentry *h_src, *h_dst, *h_parent;
+	struct inode *h_inode, *h_dir;
+	struct super_block *sb;
+
+	/* bsrc branch can be ro/rw. */
+	h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+	h_inode = h_src->d_inode;
+	AuDebugOn(h_inode != au_h_iptr(cpg->dentry->d_inode, cpg->bsrc));
+
+	/* try stopping to be referenced while we are creating */
+	h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+	if (au_ftest_cpup(cpg->flags, RENAME))
+		AuDebugOn(strncmp(h_dst->d_name.name, AUFS_WH_PFX,
+				  AUFS_WH_PFX_LEN));
+	h_parent = h_dst->d_parent; /* dir inode is locked */
+	h_dir = h_parent->d_inode;
+	IMustLock(h_dir);
+	AuDebugOn(h_parent != h_dst->d_parent);
+
+	sb = cpg->dentry->d_sb;
+	h_path.mnt = au_sbr_mnt(sb, cpg->bdst);
+	if (do_dt) {
+		h_path.dentry = h_parent;
+		au_dtime_store(&dt, dst_parent, &h_path);
+	}
+	h_path.dentry = h_dst;
+
+	isdir = 0;
+	mode = h_inode->i_mode;
+	switch (mode & S_IFMT) {
+	case S_IFREG:
+		err = vfsub_create(h_dir, &h_path, mode | S_IWUSR,
+				   /*want_excl*/true);
+		if (!err)
+			err = au_do_cpup_regular(cpg, h_src_attr);
+		break;
+	case S_IFDIR:
+		isdir = 1;
+		err = vfsub_mkdir(h_dir, &h_path, mode);
+		if (!err) {
+			/*
+			 * strange behaviour from the users view,
+			 * particularry setattr case
+			 */
+			if (au_ibstart(dst_parent->d_inode) == cpg->bdst)
+				au_cpup_attr_nlink(dst_parent->d_inode,
+						   /*force*/1);
+			au_cpup_attr_nlink(cpg->dentry->d_inode, /*force*/1);
+		}
+		break;
+	case S_IFLNK:
+		err = au_do_cpup_symlink(&h_path, h_src, h_dir);
+		break;
+	case S_IFCHR:
+	case S_IFBLK:
+		AuDebugOn(!capable(CAP_MKNOD));
+		/*FALLTHROUGH*/
+	case S_IFIFO:
+	case S_IFSOCK:
+		err = vfsub_mknod(h_dir, &h_path, mode, h_inode->i_rdev);
+		break;
+	default:
+		AuIOErr("Unknown inode type 0%o\n", mode);
+		err = -EIO;
+	}
+
+	mnt_flags = au_mntflags(sb);
+	if (!au_opt_test(mnt_flags, UDBA_NONE)
+	    && !isdir
+	    && au_opt_test(mnt_flags, XINO)
+	    && h_inode->i_nlink == 1
+	    /* todo: unnecessary? */
+	    /* && cpg->dentry->d_inode->i_nlink == 1 */
+	    && cpg->bdst < cpg->bsrc
+	    && !au_ftest_cpup(cpg->flags, KEEPLINO))
+		au_xino_write(sb, cpg->bsrc, h_inode->i_ino, /*ino*/0);
+		/* ignore this error */
+
+	if (do_dt)
+		au_dtime_revert(&dt);
+	return err;
+}
+
+static int au_do_ren_after_cpup(struct dentry *dentry, aufs_bindex_t bdst,
+				struct path *h_path)
+{
+	int err;
+	struct dentry *h_dentry, *h_parent;
+	struct inode *h_dir;
+
+	h_dentry = dget(au_h_dptr(dentry, bdst));
+	au_set_h_dptr(dentry, bdst, NULL);
+	err = au_lkup_neg(dentry, bdst, /*wh*/0);
+	if (unlikely(err)) {
+		au_set_h_dptr(dentry, bdst, h_dentry);
+		goto out;
+	}
+
+	h_path->dentry = dget(au_h_dptr(dentry, bdst));
+	au_set_h_dptr(dentry, bdst, h_dentry);
+	h_parent = h_dentry->d_parent; /* dir inode is locked */
+	h_dir = h_parent->d_inode;
+	IMustLock(h_dir);
+	AuDbg("%.*s %.*s\n", AuDLNPair(h_dentry), AuDLNPair(h_path->dentry));
+	err = vfsub_rename(h_dir, h_dentry, h_dir, h_path);
+	dput(h_path->dentry);
+
+out:
+	return err;
+}
+
+/*
+ * copyup the @dentry from @bsrc to @bdst.
+ * the caller must set the both of lower dentries.
+ * @len is for truncating when it is -1 copyup the entire file.
+ * in link/rename cases, @dst_parent may be different from the real one.
+ * basic->bsrc can be larger than basic->bdst.
+ */
+static int au_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+	int err, rerr;
+	aufs_bindex_t old_ibstart;
+	unsigned char isdir, plink;
+	struct dentry *h_src, *h_dst, *h_parent;
+	struct inode *dst_inode, *h_dir, *inode;
+	struct super_block *sb;
+	struct au_branch *br;
+	/* to reuduce stack size */
+	struct {
+		struct au_dtime dt;
+		struct path h_path;
+		struct au_cpup_reg_attr h_src_attr;
+	} *a;
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+	a->h_src_attr.valid = 0;
+
+	sb = cpg->dentry->d_sb;
+	br = au_sbr(sb, cpg->bdst);
+	a->h_path.mnt = au_br_mnt(br);
+	h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+	h_parent = h_dst->d_parent; /* dir inode is locked */
+	h_dir = h_parent->d_inode;
+	IMustLock(h_dir);
+
+	h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+	inode = cpg->dentry->d_inode;
+
+	if (!dst_parent)
+		dst_parent = dget_parent(cpg->dentry);
+	else
+		dget(dst_parent);
+
+	plink = !!au_opt_test(au_mntflags(sb), PLINK);
+	dst_inode = au_h_iptr(inode, cpg->bdst);
+	if (dst_inode) {
+		if (unlikely(!plink)) {
+			err = -EIO;
+			AuIOErr("hi%lu(i%lu) exists on b%d "
+				"but plink is disabled\n",
+				dst_inode->i_ino, inode->i_ino, cpg->bdst);
+			goto out_parent;
+		}
+
+		if (dst_inode->i_nlink) {
+			const int do_dt = au_ftest_cpup(cpg->flags, DTIME);
+
+			h_src = au_plink_lkup(inode, cpg->bdst);
+			err = PTR_ERR(h_src);
+			if (IS_ERR(h_src))
+				goto out_parent;
+			if (unlikely(!h_src->d_inode)) {
+				err = -EIO;
+				AuIOErr("i%lu exists on a upper branch "
+					"but not pseudo-linked\n",
+					inode->i_ino);
+				dput(h_src);
+				goto out_parent;
+			}
+
+			if (do_dt) {
+				a->h_path.dentry = h_parent;
+				au_dtime_store(&a->dt, dst_parent, &a->h_path);
+			}
+
+			a->h_path.dentry = h_dst;
+			err = vfsub_link(h_src, h_dir, &a->h_path);
+			if (!err && au_ftest_cpup(cpg->flags, RENAME))
+				err = au_do_ren_after_cpup
+					(cpg->dentry, cpg->bdst, &a->h_path);
+			if (do_dt)
+				au_dtime_revert(&a->dt);
+			dput(h_src);
+			goto out_parent;
+		} else
+			/* todo: cpup_wh_file? */
+			/* udba work */
+			au_update_ibrange(inode, /*do_put_zero*/1);
+	}
+
+	isdir = S_ISDIR(inode->i_mode);
+	old_ibstart = au_ibstart(inode);
+	err = cpup_entry(cpg, dst_parent, &a->h_src_attr);
+	if (unlikely(err))
+		goto out_rev;
+	dst_inode = h_dst->d_inode;
+	mutex_lock_nested(&dst_inode->i_mutex, AuLsc_I_CHILD2);
+	/* todo: necessary? */
+	/* au_pin_hdir_unlock(cpg->pin); */
+
+	err = cpup_iattr(cpg->dentry, cpg->bdst, h_src, &a->h_src_attr);
+	if (unlikely(err)) {
+		/* todo: necessary? */
+		/* au_pin_hdir_relock(cpg->pin); */ /* ignore an error */
+		mutex_unlock(&dst_inode->i_mutex);
+		goto out_rev;
+	}
+
+	if (cpg->bdst < old_ibstart) {
+		if (S_ISREG(inode->i_mode)) {
+			err = au_dy_iaop(inode, cpg->bdst, dst_inode);
+			if (unlikely(err)) {
+				/* ignore an error */
+				/* au_pin_hdir_relock(cpg->pin); */
+				mutex_unlock(&dst_inode->i_mutex);
+				goto out_rev;
+			}
+		}
+		au_set_ibstart(inode, cpg->bdst);
+	} else
+		au_set_ibend(inode, cpg->bdst);
+	au_set_h_iptr(inode, cpg->bdst, au_igrab(dst_inode),
+		      au_hi_flags(inode, isdir));
+
+	/* todo: necessary? */
+	/* err = au_pin_hdir_relock(cpg->pin); */
+	mutex_unlock(&dst_inode->i_mutex);
+	if (unlikely(err))
+		goto out_rev;
+
+	if (!isdir
+	    && h_src->d_inode->i_nlink > 1
+	    && plink)
+		au_plink_append(inode, cpg->bdst, h_dst);
+
+	if (au_ftest_cpup(cpg->flags, RENAME)) {
+		a->h_path.dentry = h_dst;
+		err = au_do_ren_after_cpup(cpg->dentry, cpg->bdst, &a->h_path);
+	}
+	if (!err)
+		goto out_parent; /* success */
+
+	/* revert */
+out_rev:
+	a->h_path.dentry = h_parent;
+	au_dtime_store(&a->dt, dst_parent, &a->h_path);
+	a->h_path.dentry = h_dst;
+	rerr = 0;
+	if (h_dst->d_inode) {
+		if (!isdir)
+			rerr = vfsub_unlink(h_dir, &a->h_path, /*force*/0);
+		else
+			rerr = vfsub_rmdir(h_dir, &a->h_path);
+	}
+	au_dtime_revert(&a->dt);
+	if (rerr) {
+		AuIOErr("failed removing broken entry(%d, %d)\n", err, rerr);
+		err = -EIO;
+	}
+out_parent:
+	dput(dst_parent);
+	kfree(a);
+out:
+	return err;
+}
+
+#if 0 /* unused */
+struct au_cpup_single_args {
+	int *errp;
+	struct au_cp_generic *cpg;
+	struct dentry *dst_parent;
+};
+
+static void au_call_cpup_single(void *args)
+{
+	struct au_cpup_single_args *a = args;
+
+	au_pin_hdir_acquire_nest(a->cpg->pin);
+	*a->errp = au_cpup_single(a->cpg, a->dst_parent);
+	au_pin_hdir_release(a->cpg->pin);
+}
+#endif
+
+/*
+ * prevent SIGXFSZ in copy-up.
+ * testing CAP_MKNOD is for generic fs,
+ * but CAP_FSETID is for xfs only, currently.
+ */
+static int au_cpup_sio_test(struct au_pin *pin, umode_t mode)
+{
+	int do_sio;
+	struct super_block *sb;
+	struct inode *h_dir;
+
+	do_sio = 0;
+	sb = au_pinned_parent(pin)->d_sb;
+	if (!au_wkq_test()
+	    && (!au_sbi(sb)->si_plink_maint_pid
+		|| au_plink_maint(sb, AuLock_NOPLM))) {
+		switch (mode & S_IFMT) {
+		case S_IFREG:
+			/* no condition about RLIMIT_FSIZE and the file size */
+			do_sio = 1;
+			break;
+		case S_IFCHR:
+		case S_IFBLK:
+			do_sio = !capable(CAP_MKNOD);
+			break;
+		}
+		if (!do_sio)
+			do_sio = ((mode & (S_ISUID | S_ISGID))
+				  && !capable(CAP_FSETID));
+		/* this workaround may be removed in the future */
+		if (!do_sio) {
+			h_dir = au_pinned_h_dir(pin);
+			do_sio = h_dir->i_mode & S_ISVTX;
+		}
+	}
+
+	return do_sio;
+}
+
+#if 0 /* unused */
+int au_sio_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+	int err, wkq_err;
+	struct dentry *h_dentry;
+
+	h_dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+	if (!au_cpup_sio_test(pin, h_dentry->d_inode->i_mode))
+		err = au_cpup_single(cpg, dst_parent);
+	else {
+		struct au_cpup_single_args args = {
+			.errp		= &err,
+			.cpg		= cpg,
+			.dst_parent	= dst_parent
+		};
+		wkq_err = au_wkq_wait(au_call_cpup_single, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	return err;
+}
+#endif
+
+/*
+ * copyup the @dentry from the first active lower branch to @bdst,
+ * using au_cpup_single().
+ */
+static int au_cpup_simple(struct au_cp_generic *cpg)
+{
+	int err;
+	unsigned int flags_orig;
+	struct dentry *dentry;
+
+	AuDebugOn(cpg->bsrc < 0);
+
+	dentry = cpg->dentry;
+	DiMustWriteLock(dentry);
+
+	err = au_lkup_neg(dentry, cpg->bdst, /*wh*/1);
+	if (!err) {
+		flags_orig = cpg->flags;
+		au_fset_cpup(cpg->flags, RENAME);
+		err = au_cpup_single(cpg, NULL);
+		cpg->flags = flags_orig;
+		if (!err)
+			return 0; /* success */
+
+		/* revert */
+		au_set_h_dptr(dentry, cpg->bdst, NULL);
+		au_set_dbstart(dentry, cpg->bsrc);
+	}
+
+	return err;
+}
+
+struct au_cpup_simple_args {
+	int *errp;
+	struct au_cp_generic *cpg;
+};
+
+static void au_call_cpup_simple(void *args)
+{
+	struct au_cpup_simple_args *a = args;
+
+	au_pin_hdir_acquire_nest(a->cpg->pin);
+	*a->errp = au_cpup_simple(a->cpg);
+	au_pin_hdir_release(a->cpg->pin);
+}
+
+static int au_do_sio_cpup_simple(struct au_cp_generic *cpg)
+{
+	int err, wkq_err;
+	struct dentry *dentry, *parent;
+	struct file *h_file;
+	struct inode *h_dir;
+
+	dentry = cpg->dentry;
+	h_file = NULL;
+	if (au_ftest_cpup(cpg->flags, HOPEN)) {
+		AuDebugOn(cpg->bsrc < 0);
+		h_file = au_h_open_pre(dentry, cpg->bsrc);
+		err = PTR_ERR(h_file);
+		if (IS_ERR(h_file))
+			goto out;
+	}
+
+	parent = dget_parent(dentry);
+	h_dir = au_h_iptr(parent->d_inode, cpg->bdst);
+	if (!au_test_h_perm_sio(h_dir, MAY_EXEC | MAY_WRITE)
+	    && !au_cpup_sio_test(cpg->pin, dentry->d_inode->i_mode))
+		err = au_cpup_simple(cpg);
+	else {
+		struct au_cpup_simple_args args = {
+			.errp		= &err,
+			.cpg		= cpg
+		};
+		wkq_err = au_wkq_wait(au_call_cpup_simple, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	dput(parent);
+	if (h_file)
+		au_h_open_post(dentry, cpg->bsrc, h_file);
+
+out:
+	return err;
+}
+
+int au_sio_cpup_simple(struct au_cp_generic *cpg)
+{
+	aufs_bindex_t bsrc, bend;
+	struct dentry *dentry, *h_dentry;
+
+	if (cpg->bsrc < 0) {
+		dentry = cpg->dentry;
+		bend = au_dbend(dentry);
+		for (bsrc = cpg->bdst + 1; bsrc <= bend; bsrc++) {
+			h_dentry = au_h_dptr(dentry, bsrc);
+			if (h_dentry) {
+				AuDebugOn(!h_dentry->d_inode);
+				break;
+			}
+		}
+		AuDebugOn(bsrc > bend);
+		cpg->bsrc = bsrc;
+	}
+	AuDebugOn(cpg->bsrc <= cpg->bdst);
+	return au_do_sio_cpup_simple(cpg);
+}
+
+int au_sio_cpdown_simple(struct au_cp_generic *cpg)
+{
+	AuDebugOn(cpg->bdst <= cpg->bsrc);
+	return au_do_sio_cpup_simple(cpg);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * copyup the deleted file for writing.
+ */
+static int au_do_cpup_wh(struct au_cp_generic *cpg, struct dentry *wh_dentry,
+			 struct file *file)
+{
+	int err;
+	unsigned int flags_orig;
+	aufs_bindex_t bsrc_orig;
+	struct dentry *h_d_dst, *h_d_start;
+	struct au_dinfo *dinfo;
+	struct au_hdentry *hdp;
+
+	dinfo = au_di(cpg->dentry);
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	bsrc_orig = cpg->bsrc;
+	cpg->bsrc = dinfo->di_bstart;
+	hdp = dinfo->di_hdentry;
+	h_d_dst = hdp[0 + cpg->bdst].hd_dentry;
+	dinfo->di_bstart = cpg->bdst;
+	hdp[0 + cpg->bdst].hd_dentry = wh_dentry;
+	h_d_start = NULL;
+	if (file) {
+		h_d_start = hdp[0 + cpg->bsrc].hd_dentry;
+		hdp[0 + cpg->bsrc].hd_dentry = au_hf_top(file)->f_dentry;
+	}
+	flags_orig = cpg->flags;
+	cpg->flags = !AuCpup_DTIME;
+	err = au_cpup_single(cpg, /*h_parent*/NULL);
+	cpg->flags = flags_orig;
+	if (file) {
+		if (!err)
+			err = au_reopen_nondir(file);
+		hdp[0 + cpg->bsrc].hd_dentry = h_d_start;
+	}
+	hdp[0 + cpg->bdst].hd_dentry = h_d_dst;
+	dinfo->di_bstart = cpg->bsrc;
+	cpg->bsrc = bsrc_orig;
+
+	return err;
+}
+
+static int au_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+	int err;
+	aufs_bindex_t bdst;
+	struct au_dtime dt;
+	struct dentry *dentry, *parent, *h_parent, *wh_dentry;
+	struct au_branch *br;
+	struct path h_path;
+
+	dentry = cpg->dentry;
+	bdst = cpg->bdst;
+	br = au_sbr(dentry->d_sb, bdst);
+	parent = dget_parent(dentry);
+	h_parent = au_h_dptr(parent, bdst);
+	wh_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out;
+
+	h_path.dentry = h_parent;
+	h_path.mnt = au_br_mnt(br);
+	au_dtime_store(&dt, parent, &h_path);
+	err = au_do_cpup_wh(cpg, wh_dentry, file);
+	if (unlikely(err))
+		goto out_wh;
+
+	dget(wh_dentry);
+	h_path.dentry = wh_dentry;
+	if (!S_ISDIR(wh_dentry->d_inode->i_mode))
+		err = vfsub_unlink(h_parent->d_inode, &h_path, /*force*/0);
+	else
+		err = vfsub_rmdir(h_parent->d_inode, &h_path);
+	if (unlikely(err)) {
+		AuIOErr("failed remove copied-up tmp file %.*s(%d)\n",
+			AuDLNPair(wh_dentry), err);
+		err = -EIO;
+	}
+	au_dtime_revert(&dt);
+	au_set_hi_wh(dentry->d_inode, bdst, wh_dentry);
+
+out_wh:
+	dput(wh_dentry);
+out:
+	dput(parent);
+	return err;
+}
+
+struct au_cpup_wh_args {
+	int *errp;
+	struct au_cp_generic *cpg;
+	struct file *file;
+};
+
+static void au_call_cpup_wh(void *args)
+{
+	struct au_cpup_wh_args *a = args;
+
+	au_pin_hdir_acquire_nest(a->cpg->pin);
+	*a->errp = au_cpup_wh(a->cpg, a->file);
+	au_pin_hdir_release(a->cpg->pin);
+}
+
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+	int err, wkq_err;
+	aufs_bindex_t bdst;
+	struct dentry *dentry, *parent, *h_orph, *h_parent, *h_dentry;
+	struct inode *dir, *h_dir, *h_tmpdir;
+	struct au_wbr *wbr;
+	struct au_pin wh_pin, *pin_orig;
+
+	dentry = cpg->dentry;
+	bdst = cpg->bdst;
+	parent = dget_parent(dentry);
+	dir = parent->d_inode;
+	h_orph = NULL;
+	h_parent = NULL;
+	h_dir = au_igrab(au_h_iptr(dir, bdst));
+	h_tmpdir = h_dir;
+	pin_orig = NULL;
+	if (!h_dir->i_nlink) {
+		wbr = au_sbr(dentry->d_sb, bdst)->br_wbr;
+		h_orph = wbr->wbr_orph;
+
+		h_parent = dget(au_h_dptr(parent, bdst));
+		au_set_h_dptr(parent, bdst, dget(h_orph));
+		h_tmpdir = h_orph->d_inode;
+		au_set_h_iptr(dir, bdst, au_igrab(h_tmpdir), /*flags*/0);
+
+		if (file)
+			h_dentry = au_hf_top(file)->f_dentry;
+		else
+			h_dentry = au_h_dptr(dentry, au_dbstart(dentry));
+		mutex_lock_nested(&h_tmpdir->i_mutex, AuLsc_I_PARENT3);
+		/* todo: au_h_open_pre()? */
+
+		pin_orig = cpg->pin;
+		au_pin_init(&wh_pin, dentry, bdst, AuLsc_DI_PARENT,
+			    AuLsc_I_PARENT3, cpg->pin->udba, AuPin_DI_LOCKED);
+		cpg->pin = &wh_pin;
+	}
+
+	if (!au_test_h_perm_sio(h_tmpdir, MAY_EXEC | MAY_WRITE)
+	    && !au_cpup_sio_test(cpg->pin, dentry->d_inode->i_mode))
+		err = au_cpup_wh(cpg, file);
+	else {
+		struct au_cpup_wh_args args = {
+			.errp	= &err,
+			.cpg	= cpg,
+			.file	= file
+		};
+		wkq_err = au_wkq_wait(au_call_cpup_wh, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	if (h_orph) {
+		mutex_unlock(&h_tmpdir->i_mutex);
+		/* todo: au_h_open_post()? */
+		au_set_h_iptr(dir, bdst, au_igrab(h_dir), /*flags*/0);
+		au_set_h_dptr(parent, bdst, h_parent);
+		AuDebugOn(!pin_orig);
+		cpg->pin = pin_orig;
+	}
+	iput(h_dir);
+	dput(parent);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generic routine for both of copy-up and copy-down.
+ */
+/* cf. revalidate function in file.c */
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+	       int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+			 struct au_pin *pin,
+			 struct dentry *h_parent, void *arg),
+	       void *arg)
+{
+	int err;
+	struct au_pin pin;
+	struct dentry *d, *parent, *h_parent, *real_parent;
+
+	err = 0;
+	parent = dget_parent(dentry);
+	if (IS_ROOT(parent))
+		goto out;
+
+	au_pin_init(&pin, dentry, bdst, AuLsc_DI_PARENT2, AuLsc_I_PARENT2,
+		    au_opt_udba(dentry->d_sb), AuPin_MNT_WRITE);
+
+	/* do not use au_dpage */
+	real_parent = parent;
+	while (1) {
+		dput(parent);
+		parent = dget_parent(dentry);
+		h_parent = au_h_dptr(parent, bdst);
+		if (h_parent)
+			goto out; /* success */
+
+		/* find top dir which is necessary to cpup */
+		do {
+			d = parent;
+			dput(parent);
+			parent = dget_parent(d);
+			di_read_lock_parent3(parent, !AuLock_IR);
+			h_parent = au_h_dptr(parent, bdst);
+			di_read_unlock(parent, !AuLock_IR);
+		} while (!h_parent);
+
+		if (d != real_parent)
+			di_write_lock_child3(d);
+
+		/* somebody else might create while we were sleeping */
+		if (!au_h_dptr(d, bdst) || !au_h_dptr(d, bdst)->d_inode) {
+			if (au_h_dptr(d, bdst))
+				au_update_dbstart(d);
+
+			au_pin_set_dentry(&pin, d);
+			err = au_do_pin(&pin);
+			if (!err) {
+				err = cp(d, bdst, &pin, h_parent, arg);
+				au_unpin(&pin);
+			}
+		}
+
+		if (d != real_parent)
+			di_write_unlock(d);
+		if (unlikely(err))
+			break;
+	}
+
+out:
+	dput(parent);
+	return err;
+}
+
+static int au_cpup_dir(struct dentry *dentry, aufs_bindex_t bdst,
+		       struct au_pin *pin,
+		       struct dentry *h_parent __maybe_unused ,
+		       void *arg __maybe_unused)
+{
+	struct au_cp_generic cpg = {
+		.dentry	= dentry,
+		.bdst	= bdst,
+		.bsrc	= -1,
+		.len	= 0,
+		.pin	= pin,
+		.flags	= AuCpup_DTIME
+	};
+	return au_sio_cpup_simple(&cpg);
+}
+
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+	return au_cp_dirs(dentry, bdst, au_cpup_dir, NULL);
+}
+
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+	int err;
+	struct dentry *parent;
+	struct inode *dir;
+
+	parent = dget_parent(dentry);
+	dir = parent->d_inode;
+	err = 0;
+	if (au_h_iptr(dir, bdst))
+		goto out;
+
+	di_read_unlock(parent, AuLock_IR);
+	di_write_lock_parent(parent);
+	/* someone else might change our inode while we were sleeping */
+	if (!au_h_iptr(dir, bdst))
+		err = au_cpup_dirs(dentry, bdst);
+	di_downgrade_lock(parent, AuLock_IR);
+
+out:
+	dput(parent);
+	return err;
+}
diff --git a/fs/aufs/cpup.h b/fs/aufs/cpup.h
new file mode 100644
index 0000000..5500bf6
--- /dev/null
+++ b/fs/aufs/cpup.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * copy-up/down functions
+ */
+
+#ifndef __AUFS_CPUP_H__
+#define __AUFS_CPUP_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct inode;
+struct file;
+struct au_pin;
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags);
+void au_cpup_attr_timesizes(struct inode *inode);
+void au_cpup_attr_nlink(struct inode *inode, int force);
+void au_cpup_attr_changeable(struct inode *inode);
+void au_cpup_igen(struct inode *inode, struct inode *h_inode);
+void au_cpup_attr_all(struct inode *inode, int force);
+
+/* ---------------------------------------------------------------------- */
+
+struct au_cp_generic {
+	struct dentry	*dentry;
+	aufs_bindex_t	bdst, bsrc;
+	loff_t		len;
+	struct au_pin	*pin;
+	unsigned int	flags;
+};
+
+/* cpup flags */
+#define AuCpup_DTIME	1		/* do dtime_store/revert */
+#define AuCpup_KEEPLINO	(1 << 1)	/* do not clear the lower xino,
+					   for link(2) */
+#define AuCpup_RENAME	(1 << 2)	/* rename after cpup */
+#define AuCpup_HOPEN	(1 << 3)	/* call h_open_pre/post() in cpup */
+
+#define au_ftest_cpup(flags, name)	((flags) & AuCpup_##name)
+#define au_fset_cpup(flags, name) \
+	do { (flags) |= AuCpup_##name; } while (0)
+#define au_fclr_cpup(flags, name) \
+	do { (flags) &= ~AuCpup_##name; } while (0)
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len);
+int au_sio_cpup_simple(struct au_cp_generic *cpg);
+int au_sio_cpdown_simple(struct au_cp_generic *cpg);
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file);
+
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+	       int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+			 struct au_pin *pin,
+			 struct dentry *h_parent, void *arg),
+	       void *arg);
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+
+/* ---------------------------------------------------------------------- */
+
+/* keep timestamps when copyup */
+struct au_dtime {
+	struct dentry *dt_dentry;
+	struct path dt_h_path;
+	struct timespec dt_atime, dt_mtime;
+};
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+		    struct path *h_path);
+void au_dtime_revert(struct au_dtime *dt);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_CPUP_H__ */
diff --git a/fs/aufs/dbgaufs.c b/fs/aufs/dbgaufs.c
new file mode 100644
index 0000000..c479eec
--- /dev/null
+++ b/fs/aufs/dbgaufs.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * debugfs interface
+ */
+
+#include <linux/debugfs.h>
+#include "aufs.h"
+
+#ifndef CONFIG_SYSFS
+#error DEBUG_FS depends upon SYSFS
+#endif
+
+static struct dentry *dbgaufs;
+static const mode_t dbgaufs_mode = S_IRUSR | S_IRGRP | S_IROTH;
+
+/* 20 is max digits length of ulong 64 */
+struct dbgaufs_arg {
+	int n;
+	char a[20 * 4];
+};
+
+/*
+ * common function for all XINO files
+ */
+static int dbgaufs_xi_release(struct inode *inode __maybe_unused,
+			      struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static int dbgaufs_xi_open(struct file *xf, struct file *file, int do_fcnt)
+{
+	int err;
+	struct kstat st;
+	struct dbgaufs_arg *p;
+
+	err = -ENOMEM;
+	p = kmalloc(sizeof(*p), GFP_NOFS);
+	if (unlikely(!p))
+		goto out;
+
+	err = 0;
+	p->n = 0;
+	file->private_data = p;
+	if (!xf)
+		goto out;
+
+	err = vfs_getattr(&xf->f_path, &st);
+	if (!err) {
+		if (do_fcnt)
+			p->n = snprintf
+				(p->a, sizeof(p->a), "%ld, %llux%lu %lld\n",
+				 (long)file_count(xf), st.blocks, st.blksize,
+				 (long long)st.size);
+		else
+			p->n = snprintf(p->a, sizeof(p->a), "%llux%lu %lld\n",
+					st.blocks, st.blksize,
+					(long long)st.size);
+		AuDebugOn(p->n >= sizeof(p->a));
+	} else {
+		p->n = snprintf(p->a, sizeof(p->a), "err %d\n", err);
+		err = 0;
+	}
+
+out:
+	return err;
+
+}
+
+static ssize_t dbgaufs_xi_read(struct file *file, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	struct dbgaufs_arg *p;
+
+	p = file->private_data;
+	return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dbgaufs_plink_arg {
+	int n;
+	char a[];
+};
+
+static int dbgaufs_plink_release(struct inode *inode __maybe_unused,
+				 struct file *file)
+{
+	free_page((unsigned long)file->private_data);
+	return 0;
+}
+
+static int dbgaufs_plink_open(struct inode *inode, struct file *file)
+{
+	int err, i, limit;
+	unsigned long n, sum;
+	struct dbgaufs_plink_arg *p;
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+	struct au_sphlhead *sphl;
+
+	err = -ENOMEM;
+	p = (void *)get_zeroed_page(GFP_NOFS);
+	if (unlikely(!p))
+		goto out;
+
+	err = -EFBIG;
+	sbinfo = inode->i_private;
+	sb = sbinfo->si_sb;
+	si_noflush_read_lock(sb);
+	if (au_opt_test(au_mntflags(sb), PLINK)) {
+		limit = PAGE_SIZE - sizeof(p->n);
+
+		/* the number of buckets */
+		n = snprintf(p->a + p->n, limit, "%d\n", AuPlink_NHASH);
+		p->n += n;
+		limit -= n;
+
+		sum = 0;
+		for (i = 0, sphl = sbinfo->si_plink;
+		     i < AuPlink_NHASH;
+		     i++, sphl++) {
+			n = au_sphl_count(sphl);
+			sum += n;
+
+			n = snprintf(p->a + p->n, limit, "%lu ", n);
+			p->n += n;
+			limit -= n;
+			if (unlikely(limit <= 0))
+				goto out_free;
+		}
+		p->a[p->n - 1] = '\n';
+
+		/* the sum of plinks */
+		n = snprintf(p->a + p->n, limit, "%lu\n", sum);
+		p->n += n;
+		limit -= n;
+		if (unlikely(limit <= 0))
+			goto out_free;
+	} else {
+#define str "1\n0\n0\n"
+		p->n = sizeof(str) - 1;
+		strcpy(p->a, str);
+#undef str
+	}
+	si_read_unlock(sb);
+
+	err = 0;
+	file->private_data = p;
+	goto out; /* success */
+
+out_free:
+	free_page((unsigned long)p);
+out:
+	return err;
+}
+
+static ssize_t dbgaufs_plink_read(struct file *file, char __user *buf,
+				  size_t count, loff_t *ppos)
+{
+	struct dbgaufs_plink_arg *p;
+
+	p = file->private_data;
+	return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+static const struct file_operations dbgaufs_plink_fop = {
+	.owner		= THIS_MODULE,
+	.open		= dbgaufs_plink_open,
+	.release	= dbgaufs_plink_release,
+	.read		= dbgaufs_plink_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int dbgaufs_xib_open(struct inode *inode, struct file *file)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+
+	sbinfo = inode->i_private;
+	sb = sbinfo->si_sb;
+	si_noflush_read_lock(sb);
+	err = dbgaufs_xi_open(sbinfo->si_xib, file, /*do_fcnt*/0);
+	si_read_unlock(sb);
+	return err;
+}
+
+static const struct file_operations dbgaufs_xib_fop = {
+	.owner		= THIS_MODULE,
+	.open		= dbgaufs_xib_open,
+	.release	= dbgaufs_xi_release,
+	.read		= dbgaufs_xi_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+#define DbgaufsXi_PREFIX "xi"
+
+static int dbgaufs_xino_open(struct inode *inode, struct file *file)
+{
+	int err;
+	long l;
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+	struct file *xf;
+	struct qstr *name;
+
+	err = -ENOENT;
+	xf = NULL;
+	name = &file->f_dentry->d_name;
+	if (unlikely(name->len < sizeof(DbgaufsXi_PREFIX)
+		     || memcmp(name->name, DbgaufsXi_PREFIX,
+			       sizeof(DbgaufsXi_PREFIX) - 1)))
+		goto out;
+	err = kstrtol(name->name + sizeof(DbgaufsXi_PREFIX) - 1, 10, &l);
+	if (unlikely(err))
+		goto out;
+
+	sbinfo = inode->i_private;
+	sb = sbinfo->si_sb;
+	si_noflush_read_lock(sb);
+	if (l <= au_sbend(sb)) {
+		xf = au_sbr(sb, (aufs_bindex_t)l)->br_xino.xi_file;
+		err = dbgaufs_xi_open(xf, file, /*do_fcnt*/1);
+	} else
+		err = -ENOENT;
+	si_read_unlock(sb);
+
+out:
+	return err;
+}
+
+static const struct file_operations dbgaufs_xino_fop = {
+	.owner		= THIS_MODULE,
+	.open		= dbgaufs_xino_open,
+	.release	= dbgaufs_xi_release,
+	.read		= dbgaufs_xi_read
+};
+
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+	aufs_bindex_t bend;
+	struct au_branch *br;
+	struct au_xino_file *xi;
+
+	if (!au_sbi(sb)->si_dbgaufs)
+		return;
+
+	bend = au_sbend(sb);
+	for (; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		xi = &br->br_xino;
+		debugfs_remove(xi->xi_dbgaufs);
+		xi->xi_dbgaufs = NULL;
+	}
+}
+
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
+{
+	struct au_sbinfo *sbinfo;
+	struct dentry *parent;
+	struct au_branch *br;
+	struct au_xino_file *xi;
+	aufs_bindex_t bend;
+	char name[sizeof(DbgaufsXi_PREFIX) + 5]; /* "xi" bindex NULL */
+
+	sbinfo = au_sbi(sb);
+	parent = sbinfo->si_dbgaufs;
+	if (!parent)
+		return;
+
+	bend = au_sbend(sb);
+	for (; bindex <= bend; bindex++) {
+		snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d", bindex);
+		br = au_sbr(sb, bindex);
+		xi = &br->br_xino;
+		AuDebugOn(xi->xi_dbgaufs);
+		xi->xi_dbgaufs = debugfs_create_file(name, dbgaufs_mode, parent,
+						     sbinfo, &dbgaufs_xino_fop);
+		/* ignore an error */
+		if (unlikely(!xi->xi_dbgaufs))
+			AuWarn1("failed %s under debugfs\n", name);
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+static int dbgaufs_xigen_open(struct inode *inode, struct file *file)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+
+	sbinfo = inode->i_private;
+	sb = sbinfo->si_sb;
+	si_noflush_read_lock(sb);
+	err = dbgaufs_xi_open(sbinfo->si_xigen, file, /*do_fcnt*/0);
+	si_read_unlock(sb);
+	return err;
+}
+
+static const struct file_operations dbgaufs_xigen_fop = {
+	.owner		= THIS_MODULE,
+	.open		= dbgaufs_xigen_open,
+	.release	= dbgaufs_xi_release,
+	.read		= dbgaufs_xi_read
+};
+
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+	int err;
+
+	/*
+	 * This function is a dynamic '__init' fucntion actually,
+	 * so the tiny check for si_rwsem is unnecessary.
+	 */
+	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+	err = -EIO;
+	sbinfo->si_dbgaufs_xigen = debugfs_create_file
+		("xigen", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+		 &dbgaufs_xigen_fop);
+	if (sbinfo->si_dbgaufs_xigen)
+		err = 0;
+
+	return err;
+}
+#else
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+	return 0;
+}
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo)
+{
+	/*
+	 * This function is a dynamic '__init' fucntion actually,
+	 * so the tiny check for si_rwsem is unnecessary.
+	 */
+	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+	debugfs_remove_recursive(sbinfo->si_dbgaufs);
+	sbinfo->si_dbgaufs = NULL;
+	kobject_put(&sbinfo->si_kobj);
+}
+
+int dbgaufs_si_init(struct au_sbinfo *sbinfo)
+{
+	int err;
+	char name[SysaufsSiNameLen];
+
+	/*
+	 * This function is a dynamic '__init' fucntion actually,
+	 * so the tiny check for si_rwsem is unnecessary.
+	 */
+	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+	err = -ENOENT;
+	if (!dbgaufs) {
+		AuErr1("/debug/aufs is uninitialized\n");
+		goto out;
+	}
+
+	err = -EIO;
+	sysaufs_name(sbinfo, name);
+	sbinfo->si_dbgaufs = debugfs_create_dir(name, dbgaufs);
+	if (unlikely(!sbinfo->si_dbgaufs))
+		goto out;
+	kobject_get(&sbinfo->si_kobj);
+
+	sbinfo->si_dbgaufs_xib = debugfs_create_file
+		("xib", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+		 &dbgaufs_xib_fop);
+	if (unlikely(!sbinfo->si_dbgaufs_xib))
+		goto out_dir;
+
+	sbinfo->si_dbgaufs_plink = debugfs_create_file
+		("plink", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+		 &dbgaufs_plink_fop);
+	if (unlikely(!sbinfo->si_dbgaufs_plink))
+		goto out_dir;
+
+	err = dbgaufs_xigen_init(sbinfo);
+	if (!err)
+		goto out; /* success */
+
+out_dir:
+	dbgaufs_si_fin(sbinfo);
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_fin(void)
+{
+	debugfs_remove(dbgaufs);
+}
+
+int __init dbgaufs_init(void)
+{
+	int err;
+
+	err = -EIO;
+	dbgaufs = debugfs_create_dir(AUFS_NAME, NULL);
+	if (dbgaufs)
+		err = 0;
+	return err;
+}
diff --git a/fs/aufs/dbgaufs.h b/fs/aufs/dbgaufs.h
new file mode 100644
index 0000000..f418c92d
--- /dev/null
+++ b/fs/aufs/dbgaufs.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * debugfs interface
+ */
+
+#ifndef __DBGAUFS_H__
+#define __DBGAUFS_H__
+
+#ifdef __KERNEL__
+
+struct super_block;
+struct au_sbinfo;
+
+#ifdef CONFIG_DEBUG_FS
+/* dbgaufs.c */
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo);
+int dbgaufs_si_init(struct au_sbinfo *sbinfo);
+void dbgaufs_fin(void);
+int __init dbgaufs_init(void);
+#else
+AuStubVoid(dbgaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(dbgaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(dbgaufs_si_fin, struct au_sbinfo *sbinfo)
+AuStubInt0(dbgaufs_si_init, struct au_sbinfo *sbinfo)
+AuStubVoid(dbgaufs_fin, void)
+AuStubInt0(__init dbgaufs_init, void)
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __KERNEL__ */
+#endif /* __DBGAUFS_H__ */
diff --git a/fs/aufs/dcsub.c b/fs/aufs/dcsub.c
new file mode 100644
index 0000000..5b3d904
--- /dev/null
+++ b/fs/aufs/dcsub.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#include "aufs.h"
+
+static void au_dpage_free(struct au_dpage *dpage)
+{
+	int i;
+	struct dentry **p;
+
+	p = dpage->dentries;
+	for (i = 0; i < dpage->ndentry; i++)
+		dput(*p++);
+	free_page((unsigned long)dpage->dentries);
+}
+
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp)
+{
+	int err;
+	void *p;
+
+	err = -ENOMEM;
+	dpages->dpages = kmalloc(sizeof(*dpages->dpages), gfp);
+	if (unlikely(!dpages->dpages))
+		goto out;
+
+	p = (void *)__get_free_page(gfp);
+	if (unlikely(!p))
+		goto out_dpages;
+
+	dpages->dpages[0].ndentry = 0;
+	dpages->dpages[0].dentries = p;
+	dpages->ndpage = 1;
+	return 0; /* success */
+
+out_dpages:
+	kfree(dpages->dpages);
+out:
+	return err;
+}
+
+void au_dpages_free(struct au_dcsub_pages *dpages)
+{
+	int i;
+	struct au_dpage *p;
+
+	p = dpages->dpages;
+	for (i = 0; i < dpages->ndpage; i++)
+		au_dpage_free(p++);
+	kfree(dpages->dpages);
+}
+
+static int au_dpages_append(struct au_dcsub_pages *dpages,
+			    struct dentry *dentry, gfp_t gfp)
+{
+	int err, sz;
+	struct au_dpage *dpage;
+	void *p;
+
+	dpage = dpages->dpages + dpages->ndpage - 1;
+	sz = PAGE_SIZE / sizeof(dentry);
+	if (unlikely(dpage->ndentry >= sz)) {
+		AuLabel(new dpage);
+		err = -ENOMEM;
+		sz = dpages->ndpage * sizeof(*dpages->dpages);
+		p = au_kzrealloc(dpages->dpages, sz,
+				 sz + sizeof(*dpages->dpages), gfp);
+		if (unlikely(!p))
+			goto out;
+
+		dpages->dpages = p;
+		dpage = dpages->dpages + dpages->ndpage;
+		p = (void *)__get_free_page(gfp);
+		if (unlikely(!p))
+			goto out;
+
+		dpage->ndentry = 0;
+		dpage->dentries = p;
+		dpages->ndpage++;
+	}
+
+	AuDebugOn(!dentry->d_count);
+	dpage->dentries[dpage->ndentry++] = dget_dlock(dentry);
+	return 0; /* success */
+
+out:
+	return err;
+}
+
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+		   au_dpages_test test, void *arg)
+{
+	int err;
+	struct dentry *this_parent;
+	struct list_head *next;
+	struct super_block *sb = root->d_sb;
+
+	err = 0;
+	write_seqlock(&rename_lock);
+	this_parent = root;
+	spin_lock(&this_parent->d_lock);
+repeat:
+	next = this_parent->d_subdirs.next;
+resume:
+	if (this_parent->d_sb == sb
+	    && !IS_ROOT(this_parent)
+	    && au_di(this_parent)
+	    && this_parent->d_count
+	    && (!test || test(this_parent, arg))) {
+		err = au_dpages_append(dpages, this_parent, GFP_ATOMIC);
+		if (unlikely(err))
+			goto out;
+	}
+
+	while (next != &this_parent->d_subdirs) {
+		struct list_head *tmp = next;
+		struct dentry *dentry = list_entry(tmp, struct dentry,
+						   d_u.d_child);
+
+		next = tmp->next;
+		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+		if (dentry->d_count) {
+			if (!list_empty(&dentry->d_subdirs)) {
+				spin_unlock(&this_parent->d_lock);
+				spin_release(&dentry->d_lock.dep_map, 1,
+					     _RET_IP_);
+				this_parent = dentry;
+				spin_acquire(&this_parent->d_lock.dep_map, 0, 1,
+					     _RET_IP_);
+				goto repeat;
+			}
+			if (dentry->d_sb == sb
+			    && au_di(dentry)
+			    && (!test || test(dentry, arg)))
+				err = au_dpages_append(dpages, dentry,
+						       GFP_ATOMIC);
+		}
+		spin_unlock(&dentry->d_lock);
+		if (unlikely(err))
+			goto out;
+	}
+
+	if (this_parent != root) {
+		struct dentry *tmp;
+		struct dentry *child;
+
+		tmp = this_parent->d_parent;
+		rcu_read_lock();
+		spin_unlock(&this_parent->d_lock);
+		child = this_parent;
+		this_parent = tmp;
+		spin_lock(&this_parent->d_lock);
+		rcu_read_unlock();
+		next = child->d_u.d_child.next;
+		goto resume;
+	}
+
+out:
+	spin_unlock(&this_parent->d_lock);
+	write_sequnlock(&rename_lock);
+	return err;
+}
+
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+		       int do_include, au_dpages_test test, void *arg)
+{
+	int err;
+
+	err = 0;
+	write_seqlock(&rename_lock);
+	spin_lock(&dentry->d_lock);
+	if (do_include
+	    && dentry->d_count
+	    && (!test || test(dentry, arg)))
+		err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+	spin_unlock(&dentry->d_lock);
+	if (unlikely(err))
+		goto out;
+
+	/*
+	 * vfsmount_lock is unnecessary since this is a traverse in a single
+	 * mount
+	 */
+	while (!IS_ROOT(dentry)) {
+		dentry = dentry->d_parent; /* rename_lock is locked */
+		spin_lock(&dentry->d_lock);
+		if (dentry->d_count
+		    && (!test || test(dentry, arg)))
+			err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+		spin_unlock(&dentry->d_lock);
+		if (unlikely(err))
+			break;
+	}
+
+out:
+	write_sequnlock(&rename_lock);
+	return err;
+}
+
+static inline int au_dcsub_dpages_aufs(struct dentry *dentry, void *arg)
+{
+	return au_di(dentry) && dentry->d_sb == arg;
+}
+
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+			    struct dentry *dentry, int do_include)
+{
+	return au_dcsub_pages_rev(dpages, dentry, do_include,
+				  au_dcsub_dpages_aufs, dentry->d_sb);
+}
+
+int au_test_subdir(struct dentry *d1, struct dentry *d2)
+{
+	struct path path[2] = {
+		{
+			.dentry = d1
+		},
+		{
+			.dentry = d2
+		}
+	};
+
+	return path_is_under(path + 0, path + 1);
+}
diff --git a/fs/aufs/dcsub.h b/fs/aufs/dcsub.h
new file mode 100644
index 0000000..53dcbd7
--- /dev/null
+++ b/fs/aufs/dcsub.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#ifndef __AUFS_DCSUB_H__
+#define __AUFS_DCSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include <linux/fs.h>
+
+struct dentry;
+
+struct au_dpage {
+	int ndentry;
+	struct dentry **dentries;
+};
+
+struct au_dcsub_pages {
+	int ndpage;
+	struct au_dpage *dpages;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dcsub.c */
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp);
+void au_dpages_free(struct au_dcsub_pages *dpages);
+typedef int (*au_dpages_test)(struct dentry *dentry, void *arg);
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+		   au_dpages_test test, void *arg);
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+		       int do_include, au_dpages_test test, void *arg);
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+			    struct dentry *dentry, int do_include);
+int au_test_subdir(struct dentry *d1, struct dentry *d2);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int au_d_hashed_positive(struct dentry *d)
+{
+	int err;
+	struct inode *inode = d->d_inode;
+	err = 0;
+	if (unlikely(d_unhashed(d) || !inode || !inode->i_nlink))
+		err = -ENOENT;
+	return err;
+}
+
+static inline int au_d_alive(struct dentry *d)
+{
+	int err;
+	struct inode *inode;
+	err = 0;
+	if (!IS_ROOT(d))
+		err = au_d_hashed_positive(d);
+	else {
+		inode = d->d_inode;
+		if (unlikely(d_unlinked(d) || !inode || !inode->i_nlink))
+			err = -ENOENT;
+	}
+	return err;
+}
+
+static inline int au_alive_dir(struct dentry *d)
+{
+	int err;
+	err = au_d_alive(d);
+	if (unlikely(err || IS_DEADDIR(d->d_inode)))
+		err = -ENOENT;
+	return err;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DCSUB_H__ */
diff --git a/fs/aufs/debug.c b/fs/aufs/debug.c
new file mode 100644
index 0000000..3defa97
--- /dev/null
+++ b/fs/aufs/debug.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * debug print functions
+ */
+
+#include <linux/vt_kern.h>
+#include "aufs.h"
+
+int aufs_debug;
+MODULE_PARM_DESC(debug, "debug print");
+module_param_named(debug, aufs_debug, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+char *au_plevel = KERN_DEBUG;
+#define dpri(fmt, ...) do {					\
+	if ((au_plevel						\
+	     && strcmp(au_plevel, KERN_DEBUG))			\
+	    || au_debug_test())					\
+		printk("%s" fmt, au_plevel, ##__VA_ARGS__);	\
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+void au_dpri_whlist(struct au_nhash *whlist)
+{
+	unsigned long ul, n;
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+
+	n = whlist->nh_num;
+	head = whlist->nh_head;
+	for (ul = 0; ul < n; ul++) {
+		hlist_for_each_entry(pos, head, wh_hash)
+			dpri("b%d, %.*s, %d\n",
+			     pos->wh_bindex,
+			     pos->wh_str.len, pos->wh_str.name,
+			     pos->wh_str.len);
+		head++;
+	}
+}
+
+void au_dpri_vdir(struct au_vdir *vdir)
+{
+	unsigned long ul;
+	union au_vdir_deblk_p p;
+	unsigned char *o;
+
+	if (!vdir || IS_ERR(vdir)) {
+		dpri("err %ld\n", PTR_ERR(vdir));
+		return;
+	}
+
+	dpri("deblk %u, nblk %lu, deblk %p, last{%lu, %p}, ver %lu\n",
+	     vdir->vd_deblk_sz, vdir->vd_nblk, vdir->vd_deblk,
+	     vdir->vd_last.ul, vdir->vd_last.p.deblk, vdir->vd_version);
+	for (ul = 0; ul < vdir->vd_nblk; ul++) {
+		p.deblk = vdir->vd_deblk[ul];
+		o = p.deblk;
+		dpri("[%lu]: %p\n", ul, o);
+	}
+}
+
+static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode, int hn,
+			struct dentry *wh)
+{
+	char *n = NULL;
+	int l = 0;
+
+	if (!inode || IS_ERR(inode)) {
+		dpri("i%d: err %ld\n", bindex, PTR_ERR(inode));
+		return -1;
+	}
+
+	/* the type of i_blocks depends upon CONFIG_LBDAF */
+	BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long)
+		     && sizeof(inode->i_blocks) != sizeof(u64));
+	if (wh) {
+		n = (void *)wh->d_name.name;
+		l = wh->d_name.len;
+	}
+
+	dpri("i%d: %p, i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu,"
+	     " hn %d, ct %lld, np %lu, st 0x%lx, f 0x%x, v %llu, g %x%s%.*s\n",
+	     bindex, inode,
+	     inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??",
+	     atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode,
+	     i_size_read(inode), (unsigned long long)inode->i_blocks,
+	     hn, (long long)timespec_to_ns(&inode->i_ctime) & 0x0ffff,
+	     inode->i_mapping ? inode->i_mapping->nrpages : 0,
+	     inode->i_state, inode->i_flags, inode->i_version,
+	     inode->i_generation,
+	     l ? ", wh " : "", l, n);
+	return 0;
+}
+
+void au_dpri_inode(struct inode *inode)
+{
+	struct au_iinfo *iinfo;
+	aufs_bindex_t bindex;
+	int err, hn;
+
+	err = do_pri_inode(-1, inode, -1, NULL);
+	if (err || !au_test_aufs(inode->i_sb))
+		return;
+
+	iinfo = au_ii(inode);
+	if (!iinfo)
+		return;
+	dpri("i-1: bstart %d, bend %d, gen %d\n",
+	     iinfo->ii_bstart, iinfo->ii_bend, au_iigen(inode, NULL));
+	if (iinfo->ii_bstart < 0)
+		return;
+	hn = 0;
+	for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; bindex++) {
+		hn = !!au_hn(iinfo->ii_hinode + bindex);
+		do_pri_inode(bindex, iinfo->ii_hinode[0 + bindex].hi_inode, hn,
+			     iinfo->ii_hinode[0 + bindex].hi_whdentry);
+	}
+}
+
+void au_dpri_dalias(struct inode *inode)
+{
+	struct dentry *d;
+
+	spin_lock(&inode->i_lock);
+	hlist_for_each_entry(d, &inode->i_dentry, d_alias)
+		au_dpri_dentry(d);
+	spin_unlock(&inode->i_lock);
+}
+
+static int do_pri_dentry(aufs_bindex_t bindex, struct dentry *dentry)
+{
+	struct dentry *wh = NULL;
+	int hn;
+
+	if (!dentry || IS_ERR(dentry)) {
+		dpri("d%d: err %ld\n", bindex, PTR_ERR(dentry));
+		return -1;
+	}
+	/* do not call dget_parent() here */
+	/* note: access d_xxx without d_lock */
+	dpri("d%d: %.*s?/%.*s, %s, cnt %d, flags 0x%x\n",
+	     bindex,
+	     AuDLNPair(dentry->d_parent), AuDLNPair(dentry),
+	     dentry->d_sb ? au_sbtype(dentry->d_sb) : "??",
+	     dentry->d_count, dentry->d_flags);
+	hn = -1;
+	if (bindex >= 0 && dentry->d_inode && au_test_aufs(dentry->d_sb)) {
+		struct au_iinfo *iinfo = au_ii(dentry->d_inode);
+		if (iinfo) {
+			hn = !!au_hn(iinfo->ii_hinode + bindex);
+			wh = iinfo->ii_hinode[0 + bindex].hi_whdentry;
+		}
+	}
+	do_pri_inode(bindex, dentry->d_inode, hn, wh);
+	return 0;
+}
+
+void au_dpri_dentry(struct dentry *dentry)
+{
+	struct au_dinfo *dinfo;
+	aufs_bindex_t bindex;
+	int err;
+	struct au_hdentry *hdp;
+
+	err = do_pri_dentry(-1, dentry);
+	if (err || !au_test_aufs(dentry->d_sb))
+		return;
+
+	dinfo = au_di(dentry);
+	if (!dinfo)
+		return;
+	dpri("d-1: bstart %d, bend %d, bwh %d, bdiropq %d, gen %d\n",
+	     dinfo->di_bstart, dinfo->di_bend,
+	     dinfo->di_bwh, dinfo->di_bdiropq, au_digen(dentry));
+	if (dinfo->di_bstart < 0)
+		return;
+	hdp = dinfo->di_hdentry;
+	for (bindex = dinfo->di_bstart; bindex <= dinfo->di_bend; bindex++)
+		do_pri_dentry(bindex, hdp[0 + bindex].hd_dentry);
+}
+
+static int do_pri_file(aufs_bindex_t bindex, struct file *file)
+{
+	char a[32];
+
+	if (!file || IS_ERR(file)) {
+		dpri("f%d: err %ld\n", bindex, PTR_ERR(file));
+		return -1;
+	}
+	a[0] = 0;
+	if (bindex < 0
+	    && file->f_dentry
+	    && au_test_aufs(file->f_dentry->d_sb)
+	    && au_fi(file))
+		snprintf(a, sizeof(a), ", gen %d, mmapped %d",
+			 au_figen(file), atomic_read(&au_fi(file)->fi_mmapped));
+	dpri("f%d: mode 0x%x, flags 0%o, cnt %ld, v %llu, pos %llu%s\n",
+	     bindex, file->f_mode, file->f_flags, (long)file_count(file),
+	     file->f_version, file->f_pos, a);
+	if (file->f_dentry)
+		do_pri_dentry(bindex, file->f_dentry);
+	return 0;
+}
+
+void au_dpri_file(struct file *file)
+{
+	struct au_finfo *finfo;
+	struct au_fidir *fidir;
+	struct au_hfile *hfile;
+	aufs_bindex_t bindex;
+	int err;
+
+	err = do_pri_file(-1, file);
+	if (err || !file->f_dentry || !au_test_aufs(file->f_dentry->d_sb))
+		return;
+
+	finfo = au_fi(file);
+	if (!finfo)
+		return;
+	if (finfo->fi_btop < 0)
+		return;
+	fidir = finfo->fi_hdir;
+	if (!fidir)
+		do_pri_file(finfo->fi_btop, finfo->fi_htop.hf_file);
+	else
+		for (bindex = finfo->fi_btop;
+		     bindex >= 0 && bindex <= fidir->fd_bbot;
+		     bindex++) {
+			hfile = fidir->fd_hfile + bindex;
+			do_pri_file(bindex, hfile ? hfile->hf_file : NULL);
+		}
+}
+
+static int do_pri_br(aufs_bindex_t bindex, struct au_branch *br)
+{
+	struct vfsmount *mnt;
+	struct super_block *sb;
+
+	if (!br || IS_ERR(br))
+		goto out;
+	mnt = au_br_mnt(br);
+	if (!mnt || IS_ERR(mnt))
+		goto out;
+	sb = mnt->mnt_sb;
+	if (!sb || IS_ERR(sb))
+		goto out;
+
+	dpri("s%d: {perm 0x%x, id %d, cnt %d, wbr %p}, "
+	     "%s, dev 0x%02x%02x, flags 0x%lx, cnt %d, active %d, "
+	     "xino %d\n",
+	     bindex, br->br_perm, br->br_id, atomic_read(&br->br_count),
+	     br->br_wbr, au_sbtype(sb), MAJOR(sb->s_dev), MINOR(sb->s_dev),
+	     sb->s_flags, sb->s_count,
+	     atomic_read(&sb->s_active), !!br->br_xino.xi_file);
+	return 0;
+
+out:
+	dpri("s%d: err %ld\n", bindex, PTR_ERR(br));
+	return -1;
+}
+
+void au_dpri_sb(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+	aufs_bindex_t bindex;
+	int err;
+	/* to reuduce stack size */
+	struct {
+		struct vfsmount mnt;
+		struct au_branch fake;
+	} *a;
+
+	/* this function can be called from magic sysrq */
+	a = kzalloc(sizeof(*a), GFP_ATOMIC);
+	if (unlikely(!a)) {
+		dpri("no memory\n");
+		return;
+	}
+
+	a->mnt.mnt_sb = sb;
+	a->fake.br_perm = 0;
+	a->fake.br_path.mnt = &a->mnt;
+	a->fake.br_xino.xi_file = NULL;
+	atomic_set(&a->fake.br_count, 0);
+	smp_mb(); /* atomic_set */
+	err = do_pri_br(-1, &a->fake);
+	kfree(a);
+	dpri("dev 0x%x\n", sb->s_dev);
+	if (err || !au_test_aufs(sb))
+		return;
+
+	sbinfo = au_sbi(sb);
+	if (!sbinfo)
+		return;
+	dpri("nw %d, gen %u, kobj %d\n",
+	     atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation,
+	     atomic_read(&sbinfo->si_kobj.kref.refcount));
+	for (bindex = 0; bindex <= sbinfo->si_bend; bindex++)
+		do_pri_br(bindex, sbinfo->si_branch[0 + bindex]);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_dbg_sleep_jiffy(int jiffy)
+{
+	while (jiffy)
+		jiffy = schedule_timeout_uninterruptible(jiffy);
+}
+
+void au_dbg_iattr(struct iattr *ia)
+{
+#define AuBit(name)					\
+	do {						\
+		if (ia->ia_valid & ATTR_ ## name)	\
+			dpri(#name "\n");		\
+	} while (0)
+	AuBit(MODE);
+	AuBit(UID);
+	AuBit(GID);
+	AuBit(SIZE);
+	AuBit(ATIME);
+	AuBit(MTIME);
+	AuBit(CTIME);
+	AuBit(ATIME_SET);
+	AuBit(MTIME_SET);
+	AuBit(FORCE);
+	AuBit(ATTR_FLAG);
+	AuBit(KILL_SUID);
+	AuBit(KILL_SGID);
+	AuBit(FILE);
+	AuBit(KILL_PRIV);
+	AuBit(OPEN);
+	AuBit(TIMES_SET);
+#undef	AuBit
+	dpri("ia_file %p\n", ia->ia_file);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line)
+{
+	struct inode *h_inode, *inode = dentry->d_inode;
+	struct dentry *h_dentry;
+	aufs_bindex_t bindex, bend, bi;
+
+	if (!inode /* || au_di(dentry)->di_lsc == AuLsc_DI_TMP */)
+		return;
+
+	bend = au_dbend(dentry);
+	bi = au_ibend(inode);
+	if (bi < bend)
+		bend = bi;
+	bindex = au_dbstart(dentry);
+	bi = au_ibstart(inode);
+	if (bi > bindex)
+		bindex = bi;
+
+	for (; bindex <= bend; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+		h_inode = au_h_iptr(inode, bindex);
+		if (unlikely(h_inode != h_dentry->d_inode)) {
+			int old = au_debug_test();
+			if (!old)
+				au_debug(1);
+			AuDbg("b%d, %s:%d\n", bindex, func, line);
+			AuDbgDentry(dentry);
+			AuDbgInode(inode);
+			if (!old)
+				au_debug(0);
+			BUG();
+		}
+	}
+}
+
+void au_dbg_verify_dir_parent(struct dentry *dentry, unsigned int sigen)
+{
+	struct dentry *parent;
+
+	parent = dget_parent(dentry);
+	AuDebugOn(!S_ISDIR(dentry->d_inode->i_mode));
+	AuDebugOn(IS_ROOT(dentry));
+	AuDebugOn(au_digen_test(parent, sigen));
+	dput(parent);
+}
+
+void au_dbg_verify_nondir_parent(struct dentry *dentry, unsigned int sigen)
+{
+	struct dentry *parent;
+	struct inode *inode;
+
+	parent = dget_parent(dentry);
+	inode = dentry->d_inode;
+	AuDebugOn(inode && S_ISDIR(dentry->d_inode->i_mode));
+	AuDebugOn(au_digen_test(parent, sigen));
+	dput(parent);
+}
+
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen)
+{
+	int err, i, j;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry **dentries;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	AuDebugOn(err);
+	err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/1);
+	AuDebugOn(err);
+	for (i = dpages.ndpage - 1; !err && i >= 0; i--) {
+		dpage = dpages.dpages + i;
+		dentries = dpage->dentries;
+		for (j = dpage->ndentry - 1; !err && j >= 0; j--)
+			AuDebugOn(au_digen_test(dentries[j], sigen));
+	}
+	au_dpages_free(&dpages);
+}
+
+void au_dbg_verify_kthread(void)
+{
+	if (au_wkq_test()) {
+		au_dbg_blocked();
+		/*
+		 * It may be recursive, but udba=notify between two aufs mounts,
+		 * where a single ro branch is shared, is not a problem.
+		 */
+		/* WARN_ON(1); */
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_debug_sbinfo_init(struct au_sbinfo *sbinfo __maybe_unused)
+{
+#ifdef AuForceNoPlink
+	au_opt_clr(sbinfo->si_mntflags, PLINK);
+#endif
+#ifdef AuForceNoXino
+	au_opt_clr(sbinfo->si_mntflags, XINO);
+#endif
+#ifdef AuForceNoRefrof
+	au_opt_clr(sbinfo->si_mntflags, REFROF);
+#endif
+#ifdef AuForceHnotify
+	au_opt_set_udba(sbinfo->si_mntflags, UDBA_HNOTIFY);
+#endif
+#ifdef AuForceRd0
+	sbinfo->si_rdblk = 0;
+	sbinfo->si_rdhash = 0;
+#endif
+}
+
+int __init au_debug_init(void)
+{
+	aufs_bindex_t bindex;
+	struct au_vdir_destr destr;
+
+	bindex = -1;
+	AuDebugOn(bindex >= 0);
+
+	destr.len = -1;
+	AuDebugOn(destr.len < NAME_MAX);
+
+#ifdef CONFIG_4KSTACKS
+	pr_warn("CONFIG_4KSTACKS is defined.\n");
+#endif
+
+#ifdef AuForceNoBrs
+	sysaufs_brs = 0;
+#endif
+
+	return 0;
+}
diff --git a/fs/aufs/debug.h b/fs/aufs/debug.h
new file mode 100644
index 0000000..28ff8a7
--- /dev/null
+++ b/fs/aufs/debug.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * debug print functions
+ */
+
+#ifndef __AUFS_DEBUG_H__
+#define __AUFS_DEBUG_H__
+
+#ifdef __KERNEL__
+
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/sysrq.h>
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuDebugOn(a)		BUG_ON(a)
+
+/* module parameter */
+extern int aufs_debug;
+static inline void au_debug(int n)
+{
+	aufs_debug = n;
+	smp_mb();
+}
+
+static inline int au_debug_test(void)
+{
+	return aufs_debug;
+}
+#else
+#define AuDebugOn(a)		do {} while (0)
+AuStubVoid(au_debug, int n)
+AuStubInt0(au_debug_test, void)
+#endif /* CONFIG_AUFS_DEBUG */
+
+/* ---------------------------------------------------------------------- */
+
+/* debug print */
+
+#define AuDbg(fmt, ...) do { \
+	if (au_debug_test()) \
+		pr_debug("DEBUG: " fmt, ##__VA_ARGS__); \
+} while (0)
+#define AuLabel(l)		AuDbg(#l "\n")
+#define AuIOErr(fmt, ...)	pr_err("I/O Error, " fmt, ##__VA_ARGS__)
+#define AuWarn1(fmt, ...) do { \
+	static unsigned char _c; \
+	if (!_c++) \
+		pr_warn(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuErr1(fmt, ...) do { \
+	static unsigned char _c; \
+	if (!_c++) \
+		pr_err(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuIOErr1(fmt, ...) do { \
+	static unsigned char _c; \
+	if (!_c++) \
+		AuIOErr(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuUnsupportMsg	"This operation is not supported." \
+			" Please report this application to aufs-users ML."
+#define AuUnsupport(fmt, ...) do { \
+	pr_err(AuUnsupportMsg "\n" fmt, ##__VA_ARGS__); \
+	dump_stack(); \
+} while (0)
+
+#define AuTraceErr(e) do { \
+	if (unlikely((e) < 0)) \
+		AuDbg("err %d\n", (int)(e)); \
+} while (0)
+
+#define AuTraceErrPtr(p) do { \
+	if (IS_ERR(p)) \
+		AuDbg("err %ld\n", PTR_ERR(p)); \
+} while (0)
+
+/* dirty macros for debug print, use with "%.*s" and caution */
+#define AuLNPair(qstr)		(qstr)->len, (qstr)->name
+#define AuDLNPair(d)		AuLNPair(&(d)->d_name)
+
+/* ---------------------------------------------------------------------- */
+
+struct au_sbinfo;
+struct au_finfo;
+struct dentry;
+#ifdef CONFIG_AUFS_DEBUG
+extern char *au_plevel;
+struct au_nhash;
+void au_dpri_whlist(struct au_nhash *whlist);
+struct au_vdir;
+void au_dpri_vdir(struct au_vdir *vdir);
+struct inode;
+void au_dpri_inode(struct inode *inode);
+void au_dpri_dalias(struct inode *inode);
+void au_dpri_dentry(struct dentry *dentry);
+struct file;
+void au_dpri_file(struct file *filp);
+struct super_block;
+void au_dpri_sb(struct super_block *sb);
+
+void au_dbg_sleep_jiffy(int jiffy);
+struct iattr;
+void au_dbg_iattr(struct iattr *ia);
+
+#define au_dbg_verify_dinode(d) __au_dbg_verify_dinode(d, __func__, __LINE__)
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line);
+void au_dbg_verify_dir_parent(struct dentry *dentry, unsigned int sigen);
+void au_dbg_verify_nondir_parent(struct dentry *dentry, unsigned int sigen);
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen);
+void au_dbg_verify_kthread(void);
+
+int __init au_debug_init(void);
+void au_debug_sbinfo_init(struct au_sbinfo *sbinfo);
+#define AuDbgWhlist(w) do { \
+	AuDbg(#w "\n"); \
+	au_dpri_whlist(w); \
+} while (0)
+
+#define AuDbgVdir(v) do { \
+	AuDbg(#v "\n"); \
+	au_dpri_vdir(v); \
+} while (0)
+
+#define AuDbgInode(i) do { \
+	AuDbg(#i "\n"); \
+	au_dpri_inode(i); \
+} while (0)
+
+#define AuDbgDAlias(i) do { \
+	AuDbg(#i "\n"); \
+	au_dpri_dalias(i); \
+} while (0)
+
+#define AuDbgDentry(d) do { \
+	AuDbg(#d "\n"); \
+	au_dpri_dentry(d); \
+} while (0)
+
+#define AuDbgFile(f) do { \
+	AuDbg(#f "\n"); \
+	au_dpri_file(f); \
+} while (0)
+
+#define AuDbgSb(sb) do { \
+	AuDbg(#sb "\n"); \
+	au_dpri_sb(sb); \
+} while (0)
+
+#define AuDbgSleep(sec) do { \
+	AuDbg("sleep %d sec\n", sec); \
+	ssleep(sec); \
+} while (0)
+
+#define AuDbgSleepJiffy(jiffy) do { \
+	AuDbg("sleep %d jiffies\n", jiffy); \
+	au_dbg_sleep_jiffy(jiffy); \
+} while (0)
+
+#define AuDbgIAttr(ia) do { \
+	AuDbg("ia_valid 0x%x\n", (ia)->ia_valid); \
+	au_dbg_iattr(ia); \
+} while (0)
+
+#define AuDbgSym(addr) do {				\
+	char sym[KSYM_SYMBOL_LEN];			\
+	sprint_symbol(sym, (unsigned long)addr);	\
+	AuDbg("%s\n", sym);				\
+} while (0)
+
+#define AuInfoSym(addr) do {				\
+	char sym[KSYM_SYMBOL_LEN];			\
+	sprint_symbol(sym, (unsigned long)addr);	\
+	AuInfo("%s\n", sym);				\
+} while (0)
+#else
+AuStubVoid(au_dbg_verify_dinode, struct dentry *dentry)
+AuStubVoid(au_dbg_verify_dir_parent, struct dentry *dentry, unsigned int sigen)
+AuStubVoid(au_dbg_verify_nondir_parent, struct dentry *dentry,
+	   unsigned int sigen)
+AuStubVoid(au_dbg_verify_gen, struct dentry *parent, unsigned int sigen)
+AuStubVoid(au_dbg_verify_kthread, void)
+AuStubInt0(__init au_debug_init, void)
+AuStubVoid(au_debug_sbinfo_init, struct au_sbinfo *sbinfo)
+
+#define AuDbgWhlist(w)		do {} while (0)
+#define AuDbgVdir(v)		do {} while (0)
+#define AuDbgInode(i)		do {} while (0)
+#define AuDbgDAlias(i)		do {} while (0)
+#define AuDbgDentry(d)		do {} while (0)
+#define AuDbgFile(f)		do {} while (0)
+#define AuDbgSb(sb)		do {} while (0)
+#define AuDbgSleep(sec)		do {} while (0)
+#define AuDbgSleepJiffy(jiffy)	do {} while (0)
+#define AuDbgIAttr(ia)		do {} while (0)
+#define AuDbgSym(addr)		do {} while (0)
+#define AuInfoSym(addr)		do {} while (0)
+#endif /* CONFIG_AUFS_DEBUG */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+int __init au_sysrq_init(void);
+void au_sysrq_fin(void);
+
+#ifdef CONFIG_HW_CONSOLE
+#define au_dbg_blocked() do { \
+	WARN_ON(1); \
+	handle_sysrq('w'); \
+} while (0)
+#else
+AuStubVoid(au_dbg_blocked, void)
+#endif
+
+#else
+AuStubInt0(__init au_sysrq_init, void)
+AuStubVoid(au_sysrq_fin, void)
+AuStubVoid(au_dbg_blocked, void)
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DEBUG_H__ */
diff --git a/fs/aufs/dentry.c b/fs/aufs/dentry.c
new file mode 100644
index 0000000..6c0238e
--- /dev/null
+++ b/fs/aufs/dentry.c
@@ -0,0 +1,1065 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#include <linux/namei.h>
+#include "aufs.h"
+
+#define AuLkup_ALLOW_NEG	1
+#define au_ftest_lkup(flags, name)	((flags) & AuLkup_##name)
+#define au_fset_lkup(flags, name) \
+	do { (flags) |= AuLkup_##name; } while (0)
+#define au_fclr_lkup(flags, name) \
+	do { (flags) &= ~AuLkup_##name; } while (0)
+
+struct au_do_lookup_args {
+	unsigned int		flags;
+	mode_t			type;
+};
+
+/*
+ * returns positive/negative dentry, NULL or an error.
+ * NULL means whiteout-ed or not-found.
+ */
+static struct dentry*
+au_do_lookup(struct dentry *h_parent, struct dentry *dentry,
+	     aufs_bindex_t bindex, struct qstr *wh_name,
+	     struct au_do_lookup_args *args)
+{
+	struct dentry *h_dentry;
+	struct inode *h_inode, *inode;
+	struct au_branch *br;
+	int wh_found, opq;
+	unsigned char wh_able;
+	const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG);
+
+	wh_found = 0;
+	br = au_sbr(dentry->d_sb, bindex);
+	wh_able = !!au_br_whable(br->br_perm);
+	if (wh_able)
+		wh_found = au_wh_test(h_parent, wh_name, br, /*try_sio*/0);
+	h_dentry = ERR_PTR(wh_found);
+	if (!wh_found)
+		goto real_lookup;
+	if (unlikely(wh_found < 0))
+		goto out;
+
+	/* We found a whiteout */
+	/* au_set_dbend(dentry, bindex); */
+	au_set_dbwh(dentry, bindex);
+	if (!allow_neg)
+		return NULL; /* success */
+
+real_lookup:
+	h_dentry = vfsub_lkup_one(&dentry->d_name, h_parent);
+	if (IS_ERR(h_dentry))
+		goto out;
+
+	h_inode = h_dentry->d_inode;
+	if (!h_inode) {
+		if (!allow_neg)
+			goto out_neg;
+	} else if (wh_found
+		   || (args->type && args->type != (h_inode->i_mode & S_IFMT)))
+		goto out_neg;
+
+	if (au_dbend(dentry) <= bindex)
+		au_set_dbend(dentry, bindex);
+	if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
+		au_set_dbstart(dentry, bindex);
+	au_set_h_dptr(dentry, bindex, h_dentry);
+
+	inode = dentry->d_inode;
+	if (!h_inode || !S_ISDIR(h_inode->i_mode) || !wh_able
+	    || (inode && !S_ISDIR(inode->i_mode)))
+		goto out; /* success */
+
+	mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
+	opq = au_diropq_test(h_dentry, br);
+	mutex_unlock(&h_inode->i_mutex);
+	if (opq > 0)
+		au_set_dbdiropq(dentry, bindex);
+	else if (unlikely(opq < 0)) {
+		au_set_h_dptr(dentry, bindex, NULL);
+		h_dentry = ERR_PTR(opq);
+	}
+	goto out;
+
+out_neg:
+	dput(h_dentry);
+	h_dentry = NULL;
+out:
+	return h_dentry;
+}
+
+static int au_test_shwh(struct super_block *sb, const struct qstr *name)
+{
+	if (unlikely(!au_opt_test(au_mntflags(sb), SHWH)
+		     && !strncmp(name->name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)))
+		return -EPERM;
+	return 0;
+}
+
+/*
+ * returns the number of lower positive dentries,
+ * otherwise an error.
+ * can be called at unlinking with @type is zero.
+ */
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type)
+{
+	int npositive, err;
+	aufs_bindex_t bindex, btail, bdiropq;
+	unsigned char isdir;
+	struct qstr whname;
+	struct au_do_lookup_args args = {
+		.flags		= 0,
+		.type		= type
+	};
+	const struct qstr *name = &dentry->d_name;
+	struct dentry *parent;
+	struct inode *inode;
+
+	err = au_test_shwh(dentry->d_sb, name);
+	if (unlikely(err))
+		goto out;
+
+	err = au_wh_name_alloc(&whname, name);
+	if (unlikely(err))
+		goto out;
+
+	inode = dentry->d_inode;
+	isdir = !!(inode && S_ISDIR(inode->i_mode));
+	if (!type)
+		au_fset_lkup(args.flags, ALLOW_NEG);
+
+	npositive = 0;
+	parent = dget_parent(dentry);
+	btail = au_dbtaildir(parent);
+	for (bindex = bstart; bindex <= btail; bindex++) {
+		struct dentry *h_parent, *h_dentry;
+		struct inode *h_inode, *h_dir;
+
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (h_dentry) {
+			if (h_dentry->d_inode)
+				npositive++;
+			if (type != S_IFDIR)
+				break;
+			continue;
+		}
+		h_parent = au_h_dptr(parent, bindex);
+		if (!h_parent)
+			continue;
+		h_dir = h_parent->d_inode;
+		if (!h_dir || !S_ISDIR(h_dir->i_mode))
+			continue;
+
+		mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
+		h_dentry = au_do_lookup(h_parent, dentry, bindex, &whname,
+					&args);
+		mutex_unlock(&h_dir->i_mutex);
+		err = PTR_ERR(h_dentry);
+		if (IS_ERR(h_dentry))
+			goto out_parent;
+		au_fclr_lkup(args.flags, ALLOW_NEG);
+
+		if (au_dbwh(dentry) >= 0)
+			break;
+		if (!h_dentry)
+			continue;
+		h_inode = h_dentry->d_inode;
+		if (!h_inode)
+			continue;
+		npositive++;
+		if (!args.type)
+			args.type = h_inode->i_mode & S_IFMT;
+		if (args.type != S_IFDIR)
+			break;
+		else if (isdir) {
+			/* the type of lower may be different */
+			bdiropq = au_dbdiropq(dentry);
+			if (bdiropq >= 0 && bdiropq <= bindex)
+				break;
+		}
+	}
+
+	if (npositive) {
+		AuLabel(positive);
+		au_update_dbstart(dentry);
+	}
+	err = npositive;
+	if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
+		     && au_dbstart(dentry) < 0)) {
+		err = -EIO;
+		AuIOErr("both of real entry and whiteout found, %.*s, err %d\n",
+			AuDLNPair(dentry), err);
+	}
+
+out_parent:
+	dput(parent);
+	kfree(whname.name);
+out:
+	return err;
+}
+
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent,
+			       struct au_branch *br)
+{
+	struct dentry *dentry;
+	int wkq_err;
+
+	if (!au_test_h_perm_sio(parent->d_inode, MAY_EXEC))
+		dentry = vfsub_lkup_one(name, parent);
+	else {
+		struct vfsub_lkup_one_args args = {
+			.errp	= &dentry,
+			.name	= name,
+			.parent	= parent
+		};
+
+		wkq_err = au_wkq_wait(vfsub_call_lkup_one, &args);
+		if (unlikely(wkq_err))
+			dentry = ERR_PTR(wkq_err);
+	}
+
+	return dentry;
+}
+
+/*
+ * lookup @dentry on @bindex which should be negative.
+ */
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh)
+{
+	int err;
+	struct dentry *parent, *h_parent, *h_dentry;
+	struct au_branch *br;
+
+	parent = dget_parent(dentry);
+	h_parent = au_h_dptr(parent, bindex);
+	br = au_sbr(dentry->d_sb, bindex);
+	if (wh)
+		h_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+	else
+		h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent, br);
+	err = PTR_ERR(h_dentry);
+	if (IS_ERR(h_dentry))
+		goto out;
+	if (unlikely(h_dentry->d_inode)) {
+		err = -EIO;
+		AuIOErr("%.*s should be negative on b%d.\n",
+			AuDLNPair(h_dentry), bindex);
+		dput(h_dentry);
+		goto out;
+	}
+
+	err = 0;
+	if (bindex < au_dbstart(dentry))
+		au_set_dbstart(dentry, bindex);
+	if (au_dbend(dentry) < bindex)
+		au_set_dbend(dentry, bindex);
+	au_set_h_dptr(dentry, bindex, h_dentry);
+
+out:
+	dput(parent);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* subset of struct inode */
+struct au_iattr {
+	unsigned long		i_ino;
+	/* unsigned int		i_nlink; */
+	kuid_t			i_uid;
+	kgid_t			i_gid;
+	u64			i_version;
+/*
+	loff_t			i_size;
+	blkcnt_t		i_blocks;
+*/
+	umode_t			i_mode;
+};
+
+static void au_iattr_save(struct au_iattr *ia, struct inode *h_inode)
+{
+	ia->i_ino = h_inode->i_ino;
+	/* ia->i_nlink = h_inode->i_nlink; */
+	ia->i_uid = h_inode->i_uid;
+	ia->i_gid = h_inode->i_gid;
+	ia->i_version = h_inode->i_version;
+/*
+	ia->i_size = h_inode->i_size;
+	ia->i_blocks = h_inode->i_blocks;
+*/
+	ia->i_mode = (h_inode->i_mode & S_IFMT);
+}
+
+static int au_iattr_test(struct au_iattr *ia, struct inode *h_inode)
+{
+	return ia->i_ino != h_inode->i_ino
+		/* || ia->i_nlink != h_inode->i_nlink */
+		|| !uid_eq(ia->i_uid, h_inode->i_uid)
+		|| !gid_eq(ia->i_gid, h_inode->i_gid)
+		|| ia->i_version != h_inode->i_version
+/*
+		|| ia->i_size != h_inode->i_size
+		|| ia->i_blocks != h_inode->i_blocks
+*/
+		|| ia->i_mode != (h_inode->i_mode & S_IFMT);
+}
+
+static int au_h_verify_dentry(struct dentry *h_dentry, struct dentry *h_parent,
+			      struct au_branch *br)
+{
+	int err;
+	struct au_iattr ia;
+	struct inode *h_inode;
+	struct dentry *h_d;
+	struct super_block *h_sb;
+
+	err = 0;
+	memset(&ia, -1, sizeof(ia));
+	h_sb = h_dentry->d_sb;
+	h_inode = h_dentry->d_inode;
+	if (h_inode)
+		au_iattr_save(&ia, h_inode);
+	else if (au_test_nfs(h_sb) || au_test_fuse(h_sb))
+		/* nfs d_revalidate may return 0 for negative dentry */
+		/* fuse d_revalidate always return 0 for negative dentry */
+		goto out;
+
+	/* main purpose is namei.c:cached_lookup() and d_revalidate */
+	h_d = vfsub_lkup_one(&h_dentry->d_name, h_parent);
+	err = PTR_ERR(h_d);
+	if (IS_ERR(h_d))
+		goto out;
+
+	err = 0;
+	if (unlikely(h_d != h_dentry
+		     || h_d->d_inode != h_inode
+		     || (h_inode && au_iattr_test(&ia, h_inode))))
+		err = au_busy_or_stale();
+	dput(h_d);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+		struct dentry *h_parent, struct au_branch *br)
+{
+	int err;
+
+	err = 0;
+	if (udba == AuOpt_UDBA_REVAL
+	    && !au_test_fs_remote(h_dentry->d_sb)) {
+		IMustLock(h_dir);
+		err = (h_dentry->d_parent->d_inode != h_dir);
+	} else if (udba != AuOpt_UDBA_NONE)
+		err = au_h_verify_dentry(h_dentry, h_parent, br);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_refresh_hdentry(struct dentry *dentry, struct dentry *parent)
+{
+	int err;
+	aufs_bindex_t new_bindex, bindex, bend, bwh, bdiropq;
+	struct au_hdentry tmp, *p, *q;
+	struct au_dinfo *dinfo;
+	struct super_block *sb;
+
+	DiMustWriteLock(dentry);
+
+	sb = dentry->d_sb;
+	dinfo = au_di(dentry);
+	bend = dinfo->di_bend;
+	bwh = dinfo->di_bwh;
+	bdiropq = dinfo->di_bdiropq;
+	p = dinfo->di_hdentry + dinfo->di_bstart;
+	for (bindex = dinfo->di_bstart; bindex <= bend; bindex++, p++) {
+		if (!p->hd_dentry)
+			continue;
+
+		new_bindex = au_br_index(sb, p->hd_id);
+		if (new_bindex == bindex)
+			continue;
+
+		if (dinfo->di_bwh == bindex)
+			bwh = new_bindex;
+		if (dinfo->di_bdiropq == bindex)
+			bdiropq = new_bindex;
+		if (new_bindex < 0) {
+			au_hdput(p);
+			p->hd_dentry = NULL;
+			continue;
+		}
+
+		/* swap two lower dentries, and loop again */
+		q = dinfo->di_hdentry + new_bindex;
+		tmp = *q;
+		*q = *p;
+		*p = tmp;
+		if (tmp.hd_dentry) {
+			bindex--;
+			p--;
+		}
+	}
+
+	dinfo->di_bwh = -1;
+	if (bwh >= 0 && bwh <= au_sbend(sb) && au_sbr_whable(sb, bwh))
+		dinfo->di_bwh = bwh;
+
+	dinfo->di_bdiropq = -1;
+	if (bdiropq >= 0
+	    && bdiropq <= au_sbend(sb)
+	    && au_sbr_whable(sb, bdiropq))
+		dinfo->di_bdiropq = bdiropq;
+
+	err = -EIO;
+	dinfo->di_bstart = -1;
+	dinfo->di_bend = -1;
+	bend = au_dbend(parent);
+	p = dinfo->di_hdentry;
+	for (bindex = 0; bindex <= bend; bindex++, p++)
+		if (p->hd_dentry) {
+			dinfo->di_bstart = bindex;
+			break;
+		}
+
+	if (dinfo->di_bstart >= 0) {
+		p = dinfo->di_hdentry + bend;
+		for (bindex = bend; bindex >= 0; bindex--, p--)
+			if (p->hd_dentry) {
+				dinfo->di_bend = bindex;
+				err = 0;
+				break;
+			}
+	}
+
+	return err;
+}
+
+static void au_do_hide(struct dentry *dentry)
+{
+	struct inode *inode;
+
+	inode = dentry->d_inode;
+	if (inode) {
+		if (!S_ISDIR(inode->i_mode)) {
+			if (inode->i_nlink && !d_unhashed(dentry))
+				drop_nlink(inode);
+		} else {
+			clear_nlink(inode);
+			/* stop next lookup */
+			inode->i_flags |= S_DEAD;
+		}
+		smp_mb(); /* necessary? */
+	}
+	d_drop(dentry);
+}
+
+static int au_hide_children(struct dentry *parent)
+{
+	int err, i, j, ndentry;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry *dentry;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_dcsub_pages(&dpages, parent, NULL, NULL);
+	if (unlikely(err))
+		goto out_dpages;
+
+	/* in reverse order */
+	for (i = dpages.ndpage - 1; i >= 0; i--) {
+		dpage = dpages.dpages + i;
+		ndentry = dpage->ndentry;
+		for (j = ndentry - 1; j >= 0; j--) {
+			dentry = dpage->dentries[j];
+			if (dentry != parent)
+				au_do_hide(dentry);
+		}
+	}
+
+out_dpages:
+	au_dpages_free(&dpages);
+out:
+	return err;
+}
+
+static void au_hide(struct dentry *dentry)
+{
+	int err;
+	struct inode *inode;
+
+	AuDbgDentry(dentry);
+	inode = dentry->d_inode;
+	if (inode && S_ISDIR(inode->i_mode)) {
+		/* shrink_dcache_parent(dentry); */
+		err = au_hide_children(dentry);
+		if (unlikely(err))
+			AuIOErr("%.*s, failed hiding children, ignored %d\n",
+				AuDLNPair(dentry), err);
+	}
+	au_do_hide(dentry);
+}
+
+/*
+ * By adding a dirty branch, a cached dentry may be affected in various ways.
+ *
+ * a dirty branch is added
+ * - on the top of layers
+ * - in the middle of layers
+ * - to the bottom of layers
+ *
+ * on the added branch there exists
+ * - a whiteout
+ * - a diropq
+ * - a same named entry
+ *   + exist
+ *     * negative --> positive
+ *     * positive --> positive
+ *	 - type is unchanged
+ *	 - type is changed
+ *   + doesn't exist
+ *     * negative --> negative
+ *     * positive --> negative (rejected by au_br_del() for non-dir case)
+ * - none
+ */
+static int au_refresh_by_dinfo(struct dentry *dentry, struct au_dinfo *dinfo,
+			       struct au_dinfo *tmp)
+{
+	int err;
+	aufs_bindex_t bindex, bend;
+	struct {
+		struct dentry *dentry;
+		struct inode *inode;
+		mode_t mode;
+	} orig_h, tmp_h;
+	struct au_hdentry *hd;
+	struct inode *inode, *h_inode;
+	struct dentry *h_dentry;
+
+	err = 0;
+	AuDebugOn(dinfo->di_bstart < 0);
+	orig_h.dentry = dinfo->di_hdentry[dinfo->di_bstart].hd_dentry;
+	orig_h.inode = orig_h.dentry->d_inode;
+	orig_h.mode = 0;
+	if (orig_h.inode)
+		orig_h.mode = orig_h.inode->i_mode & S_IFMT;
+	memset(&tmp_h, 0, sizeof(tmp_h));
+	if (tmp->di_bstart >= 0) {
+		tmp_h.dentry = tmp->di_hdentry[tmp->di_bstart].hd_dentry;
+		tmp_h.inode = tmp_h.dentry->d_inode;
+		if (tmp_h.inode)
+			tmp_h.mode = tmp_h.inode->i_mode & S_IFMT;
+	}
+
+	inode = dentry->d_inode;
+	if (!orig_h.inode) {
+		AuDbg("nagative originally\n");
+		if (inode) {
+			au_hide(dentry);
+			goto out;
+		}
+		AuDebugOn(inode);
+		AuDebugOn(dinfo->di_bstart != dinfo->di_bend);
+		AuDebugOn(dinfo->di_bdiropq != -1);
+
+		if (!tmp_h.inode) {
+			AuDbg("negative --> negative\n");
+			/* should have only one negative lower */
+			if (tmp->di_bstart >= 0
+			    && tmp->di_bstart < dinfo->di_bstart) {
+				AuDebugOn(tmp->di_bstart != tmp->di_bend);
+				AuDebugOn(dinfo->di_bstart != dinfo->di_bend);
+				au_set_h_dptr(dentry, dinfo->di_bstart, NULL);
+				au_di_cp(dinfo, tmp);
+				hd = tmp->di_hdentry + tmp->di_bstart;
+				au_set_h_dptr(dentry, tmp->di_bstart,
+					      dget(hd->hd_dentry));
+			}
+			au_dbg_verify_dinode(dentry);
+		} else {
+			AuDbg("negative --> positive\n");
+			/*
+			 * similar to the behaviour of creating with bypassing
+			 * aufs.
+			 * unhash it in order to force an error in the
+			 * succeeding create operation.
+			 * we should not set S_DEAD here.
+			 */
+			d_drop(dentry);
+			/* au_di_swap(tmp, dinfo); */
+			au_dbg_verify_dinode(dentry);
+		}
+	} else {
+		AuDbg("positive originally\n");
+		/* inode may be NULL */
+		AuDebugOn(inode && (inode->i_mode & S_IFMT) != orig_h.mode);
+		if (!tmp_h.inode) {
+			AuDbg("positive --> negative\n");
+			/* or bypassing aufs */
+			au_hide(dentry);
+			if (tmp->di_bwh >= 0 && tmp->di_bwh <= dinfo->di_bstart)
+				dinfo->di_bwh = tmp->di_bwh;
+			if (inode)
+				err = au_refresh_hinode_self(inode);
+			au_dbg_verify_dinode(dentry);
+		} else if (orig_h.mode == tmp_h.mode) {
+			AuDbg("positive --> positive, same type\n");
+			if (!S_ISDIR(orig_h.mode)
+			    && dinfo->di_bstart > tmp->di_bstart) {
+				/*
+				 * similar to the behaviour of removing and
+				 * creating.
+				 */
+				au_hide(dentry);
+				if (inode)
+					err = au_refresh_hinode_self(inode);
+				au_dbg_verify_dinode(dentry);
+			} else {
+				/* fill empty slots */
+				if (dinfo->di_bstart > tmp->di_bstart)
+					dinfo->di_bstart = tmp->di_bstart;
+				if (dinfo->di_bend < tmp->di_bend)
+					dinfo->di_bend = tmp->di_bend;
+				dinfo->di_bwh = tmp->di_bwh;
+				dinfo->di_bdiropq = tmp->di_bdiropq;
+				hd = tmp->di_hdentry;
+				bend = dinfo->di_bend;
+				for (bindex = tmp->di_bstart; bindex <= bend;
+				     bindex++) {
+					if (au_h_dptr(dentry, bindex))
+						continue;
+					h_dentry = hd[bindex].hd_dentry;
+					if (!h_dentry)
+						continue;
+					h_inode = h_dentry->d_inode;
+					AuDebugOn(!h_inode);
+					AuDebugOn(orig_h.mode
+						  != (h_inode->i_mode
+						      & S_IFMT));
+					au_set_h_dptr(dentry, bindex,
+						      dget(h_dentry));
+				}
+				err = au_refresh_hinode(inode, dentry);
+				au_dbg_verify_dinode(dentry);
+			}
+		} else {
+			AuDbg("positive --> positive, different type\n");
+			/* similar to the behaviour of removing and creating */
+			au_hide(dentry);
+			if (inode)
+				err = au_refresh_hinode_self(inode);
+			au_dbg_verify_dinode(dentry);
+		}
+	}
+
+out:
+	return err;
+}
+
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent)
+{
+	int err, ebrange;
+	unsigned int sigen;
+	struct au_dinfo *dinfo, *tmp;
+	struct super_block *sb;
+	struct inode *inode;
+
+	DiMustWriteLock(dentry);
+	AuDebugOn(IS_ROOT(dentry));
+	AuDebugOn(!parent->d_inode);
+
+	sb = dentry->d_sb;
+	inode = dentry->d_inode;
+	sigen = au_sigen(sb);
+	err = au_digen_test(parent, sigen);
+	if (unlikely(err))
+		goto out;
+
+	dinfo = au_di(dentry);
+	err = au_di_realloc(dinfo, au_sbend(sb) + 1);
+	if (unlikely(err))
+		goto out;
+	ebrange = au_dbrange_test(dentry);
+	if (!ebrange)
+		ebrange = au_do_refresh_hdentry(dentry, parent);
+
+	if (d_unhashed(dentry) || ebrange) {
+		AuDebugOn(au_dbstart(dentry) < 0 && au_dbend(dentry) >= 0);
+		if (inode)
+			err = au_refresh_hinode_self(inode);
+		au_dbg_verify_dinode(dentry);
+		if (!err)
+			goto out_dgen; /* success */
+		goto out;
+	}
+
+	/* temporary dinfo */
+	AuDbgDentry(dentry);
+	err = -ENOMEM;
+	tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+	if (unlikely(!tmp))
+		goto out;
+	au_di_swap(tmp, dinfo);
+	/* returns the number of positive dentries */
+	/*
+	 * if current working dir is removed, it returns an error.
+	 * but the dentry is legal.
+	 */
+	err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0);
+	AuDbgDentry(dentry);
+	au_di_swap(tmp, dinfo);
+	if (err == -ENOENT)
+		err = 0;
+	if (err >= 0) {
+		/* compare/refresh by dinfo */
+		AuDbgDentry(dentry);
+		err = au_refresh_by_dinfo(dentry, dinfo, tmp);
+		au_dbg_verify_dinode(dentry);
+		AuTraceErr(err);
+	}
+	au_rw_write_unlock(&tmp->di_rwsem);
+	au_di_free(tmp);
+	if (unlikely(err))
+		goto out;
+
+out_dgen:
+	au_update_digen(dentry);
+out:
+	if (unlikely(err && !(dentry->d_flags & DCACHE_NFSFS_RENAMED))) {
+		AuIOErr("failed refreshing %.*s, %d\n",
+			AuDLNPair(dentry), err);
+		AuDbgDentry(dentry);
+	}
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_do_h_d_reval(struct dentry *h_dentry, unsigned int flags,
+			   struct dentry *dentry, aufs_bindex_t bindex)
+{
+	int err, valid;
+
+	err = 0;
+	if (!(h_dentry->d_flags & DCACHE_OP_REVALIDATE))
+		goto out;
+
+	AuDbg("b%d\n", bindex);
+	/*
+	 * gave up supporting LOOKUP_CREATE/OPEN for lower fs,
+	 * due to whiteout and branch permission.
+	 */
+	flags &= ~(/*LOOKUP_PARENT |*/ LOOKUP_OPEN | LOOKUP_CREATE
+		   | LOOKUP_FOLLOW | LOOKUP_EXCL);
+	/* it may return tri-state */
+	valid = h_dentry->d_op->d_revalidate(h_dentry, flags);
+
+	if (unlikely(valid < 0))
+		err = valid;
+	else if (!valid)
+		err = -EINVAL;
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* todo: remove this */
+static int h_d_revalidate(struct dentry *dentry, struct inode *inode,
+			  unsigned int flags, int do_udba)
+{
+	int err;
+	umode_t mode, h_mode;
+	aufs_bindex_t bindex, btail, bstart, ibs, ibe;
+	unsigned char plus, unhashed, is_root, h_plus;
+	struct inode *h_inode, *h_cached_inode;
+	struct dentry *h_dentry;
+	struct qstr *name, *h_name;
+
+	err = 0;
+	plus = 0;
+	mode = 0;
+	ibs = -1;
+	ibe = -1;
+	unhashed = !!d_unhashed(dentry);
+	is_root = !!IS_ROOT(dentry);
+	name = &dentry->d_name;
+
+	/*
+	 * Theoretically, REVAL test should be unnecessary in case of
+	 * {FS,I}NOTIFY.
+	 * But {fs,i}notify doesn't fire some necessary events,
+	 *	IN_ATTRIB for atime/nlink/pageio
+	 *	IN_DELETE for NFS dentry
+	 * Let's do REVAL test too.
+	 */
+	if (do_udba && inode) {
+		mode = (inode->i_mode & S_IFMT);
+		plus = (inode->i_nlink > 0);
+		ibs = au_ibstart(inode);
+		ibe = au_ibend(inode);
+	}
+
+	bstart = au_dbstart(dentry);
+	btail = bstart;
+	if (inode && S_ISDIR(inode->i_mode))
+		btail = au_dbtaildir(dentry);
+	for (bindex = bstart; bindex <= btail; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+
+		AuDbg("b%d, %.*s\n", bindex, AuDLNPair(h_dentry));
+		spin_lock(&h_dentry->d_lock);
+		h_name = &h_dentry->d_name;
+		if (unlikely(do_udba
+			     && !is_root
+			     && (unhashed != !!d_unhashed(h_dentry)
+				 || name->len != h_name->len
+				 || memcmp(name->name, h_name->name, name->len))
+			    )) {
+			AuDbg("unhash 0x%x 0x%x, %.*s %.*s\n",
+				  unhashed, d_unhashed(h_dentry),
+				  AuDLNPair(dentry), AuDLNPair(h_dentry));
+			spin_unlock(&h_dentry->d_lock);
+			goto err;
+		}
+		spin_unlock(&h_dentry->d_lock);
+
+		err = au_do_h_d_reval(h_dentry, flags, dentry, bindex);
+		if (unlikely(err))
+			/* do not goto err, to keep the errno */
+			break;
+
+		/* todo: plink too? */
+		if (!do_udba)
+			continue;
+
+		/* UDBA tests */
+		h_inode = h_dentry->d_inode;
+		if (unlikely(!!inode != !!h_inode))
+			goto err;
+
+		h_plus = plus;
+		h_mode = mode;
+		h_cached_inode = h_inode;
+		if (h_inode) {
+			h_mode = (h_inode->i_mode & S_IFMT);
+			h_plus = (h_inode->i_nlink > 0);
+		}
+		if (inode && ibs <= bindex && bindex <= ibe)
+			h_cached_inode = au_h_iptr(inode, bindex);
+
+		if (unlikely(plus != h_plus
+			     || mode != h_mode
+			     || h_cached_inode != h_inode))
+			goto err;
+		continue;
+
+	err:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+/* todo: consolidate with do_refresh() and au_reval_for_attr() */
+static int simple_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+	int err;
+	struct dentry *parent;
+
+	if (!au_digen_test(dentry, sigen))
+		return 0;
+
+	parent = dget_parent(dentry);
+	di_read_lock_parent(parent, AuLock_IR);
+	AuDebugOn(au_digen_test(parent, sigen));
+	au_dbg_verify_gen(parent, sigen);
+	err = au_refresh_dentry(dentry, parent);
+	di_read_unlock(parent, AuLock_IR);
+	dput(parent);
+	AuTraceErr(err);
+	return err;
+}
+
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+	int err;
+	struct dentry *d, *parent;
+	struct inode *inode;
+
+	if (!au_ftest_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR))
+		return simple_reval_dpath(dentry, sigen);
+
+	/* slow loop, keep it simple and stupid */
+	/* cf: au_cpup_dirs() */
+	err = 0;
+	parent = NULL;
+	while (au_digen_test(dentry, sigen)) {
+		d = dentry;
+		while (1) {
+			dput(parent);
+			parent = dget_parent(d);
+			if (!au_digen_test(parent, sigen))
+				break;
+			d = parent;
+		}
+
+		inode = d->d_inode;
+		if (d != dentry)
+			di_write_lock_child2(d);
+
+		/* someone might update our dentry while we were sleeping */
+		if (au_digen_test(d, sigen)) {
+			/*
+			 * todo: consolidate with simple_reval_dpath(),
+			 * do_refresh() and au_reval_for_attr().
+			 */
+			di_read_lock_parent(parent, AuLock_IR);
+			err = au_refresh_dentry(d, parent);
+			di_read_unlock(parent, AuLock_IR);
+		}
+
+		if (d != dentry)
+			di_write_unlock(d);
+		dput(parent);
+		if (unlikely(err))
+			break;
+	}
+
+	return err;
+}
+
+/*
+ * if valid returns 1, otherwise 0.
+ */
+static int aufs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+	int valid, err;
+	unsigned int sigen;
+	unsigned char do_udba;
+	struct super_block *sb;
+	struct inode *inode;
+
+	/* todo: support rcu-walk? */
+	if (flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	valid = 0;
+	if (unlikely(!au_di(dentry)))
+		goto out;
+
+	inode = dentry->d_inode;
+	if (inode && is_bad_inode(inode))
+		goto out;
+
+	valid = 1;
+	sb = dentry->d_sb;
+	/*
+	 * todo: very ugly
+	 * i_mutex of parent dir may be held,
+	 * but we should not return 'invalid' due to busy.
+	 */
+	err = aufs_read_lock(dentry, AuLock_FLUSH | AuLock_DW | AuLock_NOPLM);
+	if (unlikely(err)) {
+		valid = err;
+		AuTraceErr(err);
+		goto out;
+	}
+	if (unlikely(au_dbrange_test(dentry))) {
+		err = -EINVAL;
+		AuTraceErr(err);
+		goto out_dgrade;
+	}
+
+	sigen = au_sigen(sb);
+	if (au_digen_test(dentry, sigen)) {
+		AuDebugOn(IS_ROOT(dentry));
+		err = au_reval_dpath(dentry, sigen);
+		if (unlikely(err)) {
+			AuTraceErr(err);
+			goto out_dgrade;
+		}
+	}
+	di_downgrade_lock(dentry, AuLock_IR);
+
+	err = -EINVAL;
+	if (inode && (IS_DEADDIR(inode) || !inode->i_nlink))
+		goto out_inval;
+
+	do_udba = !au_opt_test(au_mntflags(sb), UDBA_NONE);
+	if (do_udba && inode) {
+		aufs_bindex_t bstart = au_ibstart(inode);
+		struct inode *h_inode;
+
+		if (bstart >= 0) {
+			h_inode = au_h_iptr(inode, bstart);
+			if (h_inode && au_test_higen(inode, h_inode))
+				goto out_inval;
+		}
+	}
+
+	err = h_d_revalidate(dentry, inode, flags, do_udba);
+	if (unlikely(!err && do_udba && au_dbstart(dentry) < 0)) {
+		err = -EIO;
+		AuDbg("both of real entry and whiteout found, %.*s, err %d\n",
+		      AuDLNPair(dentry), err);
+	}
+	goto out_inval;
+
+out_dgrade:
+	di_downgrade_lock(dentry, AuLock_IR);
+out_inval:
+	aufs_read_unlock(dentry, AuLock_IR);
+	AuTraceErr(err);
+	valid = !err;
+out:
+	if (!valid) {
+		AuDbg("%.*s invalid, %d\n", AuDLNPair(dentry), valid);
+		d_drop(dentry);
+	}
+	return valid;
+}
+
+static void aufs_d_release(struct dentry *dentry)
+{
+	if (au_di(dentry)) {
+		au_di_fin(dentry);
+		au_hn_di_reinit(dentry);
+	}
+}
+
+const struct dentry_operations aufs_dop = {
+	.d_revalidate		= aufs_d_revalidate,
+	.d_weak_revalidate	= aufs_d_revalidate,
+	.d_release		= aufs_d_release
+};
diff --git a/fs/aufs/dentry.h b/fs/aufs/dentry.h
new file mode 100644
index 0000000..175a871
--- /dev/null
+++ b/fs/aufs/dentry.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#ifndef __AUFS_DENTRY_H__
+#define __AUFS_DENTRY_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include "rwsem.h"
+
+struct au_hdentry {
+	struct dentry		*hd_dentry;
+	aufs_bindex_t		hd_id;
+};
+
+struct au_dinfo {
+	atomic_t		di_generation;
+
+	struct au_rwsem		di_rwsem;
+	aufs_bindex_t		di_bstart, di_bend, di_bwh, di_bdiropq;
+	struct au_hdentry	*di_hdentry;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* dentry.c */
+extern const struct dentry_operations aufs_dop;
+struct au_branch;
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent,
+			       struct au_branch *br);
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+		struct dentry *h_parent, struct au_branch *br);
+
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type);
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh);
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent);
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen);
+
+/* dinfo.c */
+void au_di_init_once(void *_di);
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc);
+void au_di_free(struct au_dinfo *dinfo);
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b);
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src);
+int au_di_init(struct dentry *dentry);
+void au_di_fin(struct dentry *dentry);
+int au_di_realloc(struct au_dinfo *dinfo, int nbr);
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc);
+void di_read_unlock(struct dentry *d, int flags);
+void di_downgrade_lock(struct dentry *d, int flags);
+void di_write_lock(struct dentry *d, unsigned int lsc);
+void di_write_unlock(struct dentry *d);
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex);
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex);
+aufs_bindex_t au_dbtail(struct dentry *dentry);
+aufs_bindex_t au_dbtaildir(struct dentry *dentry);
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+		   struct dentry *h_dentry);
+int au_digen_test(struct dentry *dentry, unsigned int sigen);
+int au_dbrange_test(struct dentry *dentry);
+void au_update_digen(struct dentry *dentry);
+void au_update_dbrange(struct dentry *dentry, int do_put_zero);
+void au_update_dbstart(struct dentry *dentry);
+void au_update_dbend(struct dentry *dentry);
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_dinfo *au_di(struct dentry *dentry)
+{
+	return dentry->d_fsdata;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for dinfo */
+enum {
+	AuLsc_DI_CHILD,		/* child first */
+	AuLsc_DI_CHILD2,	/* rename(2), link(2), and cpup at hnotify */
+	AuLsc_DI_CHILD3,	/* copyup dirs */
+	AuLsc_DI_PARENT,
+	AuLsc_DI_PARENT2,
+	AuLsc_DI_PARENT3,
+	AuLsc_DI_TMP		/* temp for replacing dinfo */
+};
+
+/*
+ * di_read_lock_child, di_write_lock_child,
+ * di_read_lock_child2, di_write_lock_child2,
+ * di_read_lock_child3, di_write_lock_child3,
+ * di_read_lock_parent, di_write_lock_parent,
+ * di_read_lock_parent2, di_write_lock_parent2,
+ * di_read_lock_parent3, di_write_lock_parent3,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void di_read_lock_##name(struct dentry *d, int flags) \
+{ di_read_lock(d, flags, AuLsc_DI_##lsc); }
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void di_write_lock_##name(struct dentry *d) \
+{ di_write_lock(d, AuLsc_DI_##lsc); }
+
+#define AuRWLockFuncs(name, lsc) \
+	AuReadLockFunc(name, lsc) \
+	AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+#define DiMustNoWaiters(d)	AuRwMustNoWaiters(&au_di(d)->di_rwsem)
+#define DiMustAnyLock(d)	AuRwMustAnyLock(&au_di(d)->di_rwsem)
+#define DiMustWriteLock(d)	AuRwMustWriteLock(&au_di(d)->di_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: memory barrier? */
+static inline unsigned int au_digen(struct dentry *d)
+{
+	return atomic_read(&au_di(d)->di_generation);
+}
+
+static inline void au_h_dentry_init(struct au_hdentry *hdentry)
+{
+	hdentry->hd_dentry = NULL;
+}
+
+static inline void au_hdput(struct au_hdentry *hd)
+{
+	if (hd)
+		dput(hd->hd_dentry);
+}
+
+static inline aufs_bindex_t au_dbstart(struct dentry *dentry)
+{
+	DiMustAnyLock(dentry);
+	return au_di(dentry)->di_bstart;
+}
+
+static inline aufs_bindex_t au_dbend(struct dentry *dentry)
+{
+	DiMustAnyLock(dentry);
+	return au_di(dentry)->di_bend;
+}
+
+static inline aufs_bindex_t au_dbwh(struct dentry *dentry)
+{
+	DiMustAnyLock(dentry);
+	return au_di(dentry)->di_bwh;
+}
+
+static inline aufs_bindex_t au_dbdiropq(struct dentry *dentry)
+{
+	DiMustAnyLock(dentry);
+	return au_di(dentry)->di_bdiropq;
+}
+
+/* todo: hard/soft set? */
+static inline void au_set_dbstart(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	DiMustWriteLock(dentry);
+	au_di(dentry)->di_bstart = bindex;
+}
+
+static inline void au_set_dbend(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	DiMustWriteLock(dentry);
+	au_di(dentry)->di_bend = bindex;
+}
+
+static inline void au_set_dbwh(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	DiMustWriteLock(dentry);
+	/* dbwh can be outside of bstart - bend range */
+	au_di(dentry)->di_bwh = bindex;
+}
+
+static inline void au_set_dbdiropq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	DiMustWriteLock(dentry);
+	au_di(dentry)->di_bdiropq = bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_HNOTIFY
+static inline void au_digen_dec(struct dentry *d)
+{
+	atomic_dec(&au_di(d)->di_generation);
+}
+
+static inline void au_hn_di_reinit(struct dentry *dentry)
+{
+	dentry->d_fsdata = NULL;
+}
+#else
+AuStubVoid(au_hn_di_reinit, struct dentry *dentry __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DENTRY_H__ */
diff --git a/fs/aufs/dinfo.c b/fs/aufs/dinfo.c
new file mode 100644
index 0000000..2a92487
--- /dev/null
+++ b/fs/aufs/dinfo.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * dentry private data
+ */
+
+#include "aufs.h"
+
+void au_di_init_once(void *_dinfo)
+{
+	struct au_dinfo *dinfo = _dinfo;
+	static struct lock_class_key aufs_di;
+
+	au_rw_init(&dinfo->di_rwsem);
+	au_rw_class(&dinfo->di_rwsem, &aufs_di);
+}
+
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc)
+{
+	struct au_dinfo *dinfo;
+	int nbr, i;
+
+	dinfo = au_cache_alloc_dinfo();
+	if (unlikely(!dinfo))
+		goto out;
+
+	nbr = au_sbend(sb) + 1;
+	if (nbr <= 0)
+		nbr = 1;
+	dinfo->di_hdentry = kcalloc(nbr, sizeof(*dinfo->di_hdentry), GFP_NOFS);
+	if (dinfo->di_hdentry) {
+		au_rw_write_lock_nested(&dinfo->di_rwsem, lsc);
+		dinfo->di_bstart = -1;
+		dinfo->di_bend = -1;
+		dinfo->di_bwh = -1;
+		dinfo->di_bdiropq = -1;
+		for (i = 0; i < nbr; i++)
+			dinfo->di_hdentry[i].hd_id = -1;
+		goto out;
+	}
+
+	au_cache_free_dinfo(dinfo);
+	dinfo = NULL;
+
+out:
+	return dinfo;
+}
+
+void au_di_free(struct au_dinfo *dinfo)
+{
+	struct au_hdentry *p;
+	aufs_bindex_t bend, bindex;
+
+	/* dentry may not be revalidated */
+	bindex = dinfo->di_bstart;
+	if (bindex >= 0) {
+		bend = dinfo->di_bend;
+		p = dinfo->di_hdentry + bindex;
+		while (bindex++ <= bend)
+			au_hdput(p++);
+	}
+	kfree(dinfo->di_hdentry);
+	au_cache_free_dinfo(dinfo);
+}
+
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b)
+{
+	struct au_hdentry *p;
+	aufs_bindex_t bi;
+
+	AuRwMustWriteLock(&a->di_rwsem);
+	AuRwMustWriteLock(&b->di_rwsem);
+
+#define DiSwap(v, name)				\
+	do {					\
+		v = a->di_##name;		\
+		a->di_##name = b->di_##name;	\
+		b->di_##name = v;		\
+	} while (0)
+
+	DiSwap(p, hdentry);
+	DiSwap(bi, bstart);
+	DiSwap(bi, bend);
+	DiSwap(bi, bwh);
+	DiSwap(bi, bdiropq);
+	/* smp_mb(); */
+
+#undef DiSwap
+}
+
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src)
+{
+	AuRwMustWriteLock(&dst->di_rwsem);
+	AuRwMustWriteLock(&src->di_rwsem);
+
+	dst->di_bstart = src->di_bstart;
+	dst->di_bend = src->di_bend;
+	dst->di_bwh = src->di_bwh;
+	dst->di_bdiropq = src->di_bdiropq;
+	/* smp_mb(); */
+}
+
+int au_di_init(struct dentry *dentry)
+{
+	int err;
+	struct super_block *sb;
+	struct au_dinfo *dinfo;
+
+	err = 0;
+	sb = dentry->d_sb;
+	dinfo = au_di_alloc(sb, AuLsc_DI_CHILD);
+	if (dinfo) {
+		atomic_set(&dinfo->di_generation, au_sigen(sb));
+		/* smp_mb(); */ /* atomic_set */
+		dentry->d_fsdata = dinfo;
+	} else
+		err = -ENOMEM;
+
+	return err;
+}
+
+void au_di_fin(struct dentry *dentry)
+{
+	struct au_dinfo *dinfo;
+
+	dinfo = au_di(dentry);
+	AuRwDestroy(&dinfo->di_rwsem);
+	au_di_free(dinfo);
+}
+
+int au_di_realloc(struct au_dinfo *dinfo, int nbr)
+{
+	int err, sz;
+	struct au_hdentry *hdp;
+
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	err = -ENOMEM;
+	sz = sizeof(*hdp) * (dinfo->di_bend + 1);
+	if (!sz)
+		sz = sizeof(*hdp);
+	hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS);
+	if (hdp) {
+		dinfo->di_hdentry = hdp;
+		err = 0;
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void do_ii_write_lock(struct inode *inode, unsigned int lsc)
+{
+	switch (lsc) {
+	case AuLsc_DI_CHILD:
+		ii_write_lock_child(inode);
+		break;
+	case AuLsc_DI_CHILD2:
+		ii_write_lock_child2(inode);
+		break;
+	case AuLsc_DI_CHILD3:
+		ii_write_lock_child3(inode);
+		break;
+	case AuLsc_DI_PARENT:
+		ii_write_lock_parent(inode);
+		break;
+	case AuLsc_DI_PARENT2:
+		ii_write_lock_parent2(inode);
+		break;
+	case AuLsc_DI_PARENT3:
+		ii_write_lock_parent3(inode);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void do_ii_read_lock(struct inode *inode, unsigned int lsc)
+{
+	switch (lsc) {
+	case AuLsc_DI_CHILD:
+		ii_read_lock_child(inode);
+		break;
+	case AuLsc_DI_CHILD2:
+		ii_read_lock_child2(inode);
+		break;
+	case AuLsc_DI_CHILD3:
+		ii_read_lock_child3(inode);
+		break;
+	case AuLsc_DI_PARENT:
+		ii_read_lock_parent(inode);
+		break;
+	case AuLsc_DI_PARENT2:
+		ii_read_lock_parent2(inode);
+		break;
+	case AuLsc_DI_PARENT3:
+		ii_read_lock_parent3(inode);
+		break;
+	default:
+		BUG();
+	}
+}
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc)
+{
+	au_rw_read_lock_nested(&au_di(d)->di_rwsem, lsc);
+	if (d->d_inode) {
+		if (au_ftest_lock(flags, IW))
+			do_ii_write_lock(d->d_inode, lsc);
+		else if (au_ftest_lock(flags, IR))
+			do_ii_read_lock(d->d_inode, lsc);
+	}
+}
+
+void di_read_unlock(struct dentry *d, int flags)
+{
+	if (d->d_inode) {
+		if (au_ftest_lock(flags, IW)) {
+			au_dbg_verify_dinode(d);
+			ii_write_unlock(d->d_inode);
+		} else if (au_ftest_lock(flags, IR)) {
+			au_dbg_verify_dinode(d);
+			ii_read_unlock(d->d_inode);
+		}
+	}
+	au_rw_read_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_downgrade_lock(struct dentry *d, int flags)
+{
+	if (d->d_inode && au_ftest_lock(flags, IR))
+		ii_downgrade_lock(d->d_inode);
+	au_rw_dgrade_lock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock(struct dentry *d, unsigned int lsc)
+{
+	au_rw_write_lock_nested(&au_di(d)->di_rwsem, lsc);
+	if (d->d_inode)
+		do_ii_write_lock(d->d_inode, lsc);
+}
+
+void di_write_unlock(struct dentry *d)
+{
+	au_dbg_verify_dinode(d);
+	if (d->d_inode)
+		ii_write_unlock(d->d_inode);
+	au_rw_write_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir)
+{
+	AuDebugOn(d1 == d2
+		  || d1->d_inode == d2->d_inode
+		  || d1->d_sb != d2->d_sb);
+
+	if (isdir && au_test_subdir(d1, d2)) {
+		di_write_lock_child(d1);
+		di_write_lock_child2(d2);
+	} else {
+		/* there should be no races */
+		di_write_lock_child(d2);
+		di_write_lock_child2(d1);
+	}
+}
+
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir)
+{
+	AuDebugOn(d1 == d2
+		  || d1->d_inode == d2->d_inode
+		  || d1->d_sb != d2->d_sb);
+
+	if (isdir && au_test_subdir(d1, d2)) {
+		di_write_lock_parent(d1);
+		di_write_lock_parent2(d2);
+	} else {
+		/* there should be no races */
+		di_write_lock_parent(d2);
+		di_write_lock_parent2(d1);
+	}
+}
+
+void di_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+	di_write_unlock(d1);
+	if (d1->d_inode == d2->d_inode)
+		au_rw_write_unlock(&au_di(d2)->di_rwsem);
+	else
+		di_write_unlock(d2);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	struct dentry *d;
+
+	DiMustAnyLock(dentry);
+
+	if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
+		return NULL;
+	AuDebugOn(bindex < 0);
+	d = au_di(dentry)->di_hdentry[0 + bindex].hd_dentry;
+	AuDebugOn(d && d->d_count <= 0);
+	return d;
+}
+
+/*
+ * extended version of au_h_dptr().
+ * returns a hashed and positive h_dentry in bindex, NULL, or error.
+ */
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	struct dentry *h_dentry;
+	struct inode *inode, *h_inode;
+
+	inode = dentry->d_inode;
+	AuDebugOn(!inode);
+
+	h_dentry = NULL;
+	if (au_dbstart(dentry) <= bindex
+	    && bindex <= au_dbend(dentry))
+		h_dentry = au_h_dptr(dentry, bindex);
+	if (h_dentry && !au_d_hashed_positive(h_dentry)) {
+		dget(h_dentry);
+		goto out; /* success */
+	}
+
+	AuDebugOn(bindex < au_ibstart(inode));
+	AuDebugOn(au_ibend(inode) < bindex);
+	h_inode = au_h_iptr(inode, bindex);
+	h_dentry = d_find_alias(h_inode);
+	if (h_dentry) {
+		if (!IS_ERR(h_dentry)) {
+			if (!au_d_hashed_positive(h_dentry))
+				goto out; /* success */
+			dput(h_dentry);
+		} else
+			goto out;
+	}
+
+	if (au_opt_test(au_mntflags(dentry->d_sb), PLINK)) {
+		h_dentry = au_plink_lkup(inode, bindex);
+		AuDebugOn(!h_dentry);
+		if (!IS_ERR(h_dentry)) {
+			if (!au_d_hashed_positive(h_dentry))
+				goto out; /* success */
+			dput(h_dentry);
+			h_dentry = NULL;
+		}
+	}
+
+out:
+	AuDbgDentry(h_dentry);
+	return h_dentry;
+}
+
+aufs_bindex_t au_dbtail(struct dentry *dentry)
+{
+	aufs_bindex_t bend, bwh;
+
+	bend = au_dbend(dentry);
+	if (0 <= bend) {
+		bwh = au_dbwh(dentry);
+		if (!bwh)
+			return bwh;
+		if (0 < bwh && bwh < bend)
+			return bwh - 1;
+	}
+	return bend;
+}
+
+aufs_bindex_t au_dbtaildir(struct dentry *dentry)
+{
+	aufs_bindex_t bend, bopq;
+
+	bend = au_dbtail(dentry);
+	if (0 <= bend) {
+		bopq = au_dbdiropq(dentry);
+		if (0 <= bopq && bopq < bend)
+			bend = bopq;
+	}
+	return bend;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+		   struct dentry *h_dentry)
+{
+	struct au_hdentry *hd = au_di(dentry)->di_hdentry + bindex;
+	struct au_branch *br;
+
+	DiMustWriteLock(dentry);
+
+	au_hdput(hd);
+	hd->hd_dentry = h_dentry;
+	if (h_dentry) {
+		br = au_sbr(dentry->d_sb, bindex);
+		hd->hd_id = br->br_id;
+	}
+}
+
+int au_dbrange_test(struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t bstart, bend;
+
+	err = 0;
+	bstart = au_dbstart(dentry);
+	bend = au_dbend(dentry);
+	if (bstart >= 0)
+		AuDebugOn(bend < 0 && bstart > bend);
+	else {
+		err = -EIO;
+		AuDebugOn(bend >= 0);
+	}
+
+	return err;
+}
+
+int au_digen_test(struct dentry *dentry, unsigned int sigen)
+{
+	int err;
+
+	err = 0;
+	if (unlikely(au_digen(dentry) != sigen
+		     || au_iigen_test(dentry->d_inode, sigen)))
+		err = -EIO;
+
+	return err;
+}
+
+void au_update_digen(struct dentry *dentry)
+{
+	atomic_set(&au_di(dentry)->di_generation, au_sigen(dentry->d_sb));
+	/* smp_mb(); */ /* atomic_set */
+}
+
+void au_update_dbrange(struct dentry *dentry, int do_put_zero)
+{
+	struct au_dinfo *dinfo;
+	struct dentry *h_d;
+	struct au_hdentry *hdp;
+
+	DiMustWriteLock(dentry);
+
+	dinfo = au_di(dentry);
+	if (!dinfo || dinfo->di_bstart < 0)
+		return;
+
+	hdp = dinfo->di_hdentry;
+	if (do_put_zero) {
+		aufs_bindex_t bindex, bend;
+
+		bend = dinfo->di_bend;
+		for (bindex = dinfo->di_bstart; bindex <= bend; bindex++) {
+			h_d = hdp[0 + bindex].hd_dentry;
+			if (h_d && !h_d->d_inode)
+				au_set_h_dptr(dentry, bindex, NULL);
+		}
+	}
+
+	dinfo->di_bstart = -1;
+	while (++dinfo->di_bstart <= dinfo->di_bend)
+		if (hdp[0 + dinfo->di_bstart].hd_dentry)
+			break;
+	if (dinfo->di_bstart > dinfo->di_bend) {
+		dinfo->di_bstart = -1;
+		dinfo->di_bend = -1;
+		return;
+	}
+
+	dinfo->di_bend++;
+	while (0 <= --dinfo->di_bend)
+		if (hdp[0 + dinfo->di_bend].hd_dentry)
+			break;
+	AuDebugOn(dinfo->di_bstart > dinfo->di_bend || dinfo->di_bend < 0);
+}
+
+void au_update_dbstart(struct dentry *dentry)
+{
+	aufs_bindex_t bindex, bend;
+	struct dentry *h_dentry;
+
+	bend = au_dbend(dentry);
+	for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+		if (h_dentry->d_inode) {
+			au_set_dbstart(dentry, bindex);
+			return;
+		}
+		au_set_h_dptr(dentry, bindex, NULL);
+	}
+}
+
+void au_update_dbend(struct dentry *dentry)
+{
+	aufs_bindex_t bindex, bstart;
+	struct dentry *h_dentry;
+
+	bstart = au_dbstart(dentry);
+	for (bindex = au_dbend(dentry); bindex >= bstart; bindex--) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+		if (h_dentry->d_inode) {
+			au_set_dbend(dentry, bindex);
+			return;
+		}
+		au_set_h_dptr(dentry, bindex, NULL);
+	}
+}
+
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry)
+{
+	aufs_bindex_t bindex, bend;
+
+	bend = au_dbend(dentry);
+	for (bindex = au_dbstart(dentry); bindex <= bend; bindex++)
+		if (au_h_dptr(dentry, bindex) == h_dentry)
+			return bindex;
+	return -1;
+}
diff --git a/fs/aufs/dir.c b/fs/aufs/dir.c
new file mode 100644
index 0000000..568ea0a
--- /dev/null
+++ b/fs/aufs/dir.c
@@ -0,0 +1,632 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * directory operations
+ */
+
+#include <linux/fs_stack.h>
+#include "aufs.h"
+
+void au_add_nlink(struct inode *dir, struct inode *h_dir)
+{
+	unsigned int nlink;
+
+	AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+	nlink = dir->i_nlink;
+	nlink += h_dir->i_nlink - 2;
+	if (h_dir->i_nlink < 2)
+		nlink += 2;
+	smp_mb();
+	/* 0 can happen in revaliding */
+	set_nlink(dir, nlink);
+}
+
+void au_sub_nlink(struct inode *dir, struct inode *h_dir)
+{
+	unsigned int nlink;
+
+	AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+	nlink = dir->i_nlink;
+	nlink -= h_dir->i_nlink - 2;
+	if (h_dir->i_nlink < 2)
+		nlink -= 2;
+	smp_mb();
+	/* nlink == 0 means the branch-fs is broken */
+	set_nlink(dir, nlink);
+}
+
+loff_t au_dir_size(struct file *file, struct dentry *dentry)
+{
+	loff_t sz;
+	aufs_bindex_t bindex, bend;
+	struct file *h_file;
+	struct dentry *h_dentry;
+
+	sz = 0;
+	if (file) {
+		AuDebugOn(!file_inode(file));
+		AuDebugOn(!S_ISDIR(file_inode(file)->i_mode));
+
+		bend = au_fbend_dir(file);
+		for (bindex = au_fbstart(file);
+		     bindex <= bend && sz < KMALLOC_MAX_SIZE;
+		     bindex++) {
+			h_file = au_hf_dir(file, bindex);
+			if (h_file && file_inode(h_file))
+				sz += vfsub_f_size_read(h_file);
+		}
+	} else {
+		AuDebugOn(!dentry);
+		AuDebugOn(!dentry->d_inode);
+		AuDebugOn(!S_ISDIR(dentry->d_inode->i_mode));
+
+		bend = au_dbtaildir(dentry);
+		for (bindex = au_dbstart(dentry);
+		     bindex <= bend && sz < KMALLOC_MAX_SIZE;
+		     bindex++) {
+			h_dentry = au_h_dptr(dentry, bindex);
+			if (h_dentry && h_dentry->d_inode)
+				sz += i_size_read(h_dentry->d_inode);
+		}
+	}
+	if (sz < KMALLOC_MAX_SIZE)
+		sz = roundup_pow_of_two(sz);
+	if (sz > KMALLOC_MAX_SIZE)
+		sz = KMALLOC_MAX_SIZE;
+	else if (sz < NAME_MAX) {
+		BUILD_BUG_ON(AUFS_RDBLK_DEF < NAME_MAX);
+		sz = AUFS_RDBLK_DEF;
+	}
+	return sz;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int reopen_dir(struct file *file)
+{
+	int err;
+	unsigned int flags;
+	aufs_bindex_t bindex, btail, bstart;
+	struct dentry *dentry, *h_dentry;
+	struct file *h_file;
+
+	/* open all lower dirs */
+	dentry = file->f_dentry;
+	bstart = au_dbstart(dentry);
+	for (bindex = au_fbstart(file); bindex < bstart; bindex++)
+		au_set_h_fptr(file, bindex, NULL);
+	au_set_fbstart(file, bstart);
+
+	btail = au_dbtaildir(dentry);
+	for (bindex = au_fbend_dir(file); btail < bindex; bindex--)
+		au_set_h_fptr(file, bindex, NULL);
+	au_set_fbend_dir(file, btail);
+
+	flags = vfsub_file_flags(file);
+	for (bindex = bstart; bindex <= btail; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+		h_file = au_hf_dir(file, bindex);
+		if (h_file)
+			continue;
+
+		h_file = au_h_open(dentry, bindex, flags, file);
+		err = PTR_ERR(h_file);
+		if (IS_ERR(h_file))
+			goto out; /* close all? */
+		au_set_h_fptr(file, bindex, h_file);
+	}
+	au_update_figen(file);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+	err = 0;
+
+out:
+	return err;
+}
+
+static int do_open_dir(struct file *file, int flags)
+{
+	int err;
+	aufs_bindex_t bindex, btail;
+	struct dentry *dentry, *h_dentry;
+	struct file *h_file;
+
+	FiMustWriteLock(file);
+
+	dentry = file->f_dentry;
+	err = au_alive_dir(dentry);
+	if (unlikely(err))
+		goto out;
+
+	file->f_version = dentry->d_inode->i_version;
+	bindex = au_dbstart(dentry);
+	au_set_fbstart(file, bindex);
+	btail = au_dbtaildir(dentry);
+	au_set_fbend_dir(file, btail);
+	for (; !err && bindex <= btail; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (!h_dentry)
+			continue;
+
+		h_file = au_h_open(dentry, bindex, flags, file);
+		if (IS_ERR(h_file)) {
+			err = PTR_ERR(h_file);
+			break;
+		}
+		au_set_h_fptr(file, bindex, h_file);
+	}
+	au_update_figen(file);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+	if (!err)
+		return 0; /* success */
+
+	/* close all */
+	for (bindex = au_fbstart(file); bindex <= btail; bindex++)
+		au_set_h_fptr(file, bindex, NULL);
+	au_set_fbstart(file, -1);
+	au_set_fbend_dir(file, -1);
+
+out:
+	return err;
+}
+
+static int aufs_open_dir(struct inode *inode __maybe_unused,
+			 struct file *file)
+{
+	int err;
+	struct super_block *sb;
+	struct au_fidir *fidir;
+
+	err = -ENOMEM;
+	sb = file->f_dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	fidir = au_fidir_alloc(sb);
+	if (fidir) {
+		err = au_do_open(file, do_open_dir, fidir);
+		if (unlikely(err))
+			kfree(fidir);
+	}
+	si_read_unlock(sb);
+	return err;
+}
+
+static int aufs_release_dir(struct inode *inode __maybe_unused,
+			    struct file *file)
+{
+	struct au_vdir *vdir_cache;
+	struct au_finfo *finfo;
+	struct au_fidir *fidir;
+	aufs_bindex_t bindex, bend;
+
+	finfo = au_fi(file);
+	fidir = finfo->fi_hdir;
+	if (fidir) {
+		vdir_cache = fidir->fd_vdir_cache; /* lock-free */
+		if (vdir_cache)
+			au_vdir_free(vdir_cache);
+
+		bindex = finfo->fi_btop;
+		if (bindex >= 0) {
+			/*
+			 * calls fput() instead of filp_close(),
+			 * since no dnotify or lock for the lower file.
+			 */
+			bend = fidir->fd_bbot;
+			for (; bindex <= bend; bindex++)
+				au_set_h_fptr(file, bindex, NULL);
+		}
+		kfree(fidir);
+		finfo->fi_hdir = NULL;
+	}
+	au_finfo_fin(file);
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_dir(struct file *file, fl_owner_t id)
+{
+	int err;
+	aufs_bindex_t bindex, bend;
+	struct file *h_file;
+
+	err = 0;
+	bend = au_fbend_dir(file);
+	for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
+		h_file = au_hf_dir(file, bindex);
+		if (h_file)
+			err = vfsub_flush(h_file, id);
+	}
+	return err;
+}
+
+static int aufs_flush_dir(struct file *file, fl_owner_t id)
+{
+	return au_do_flush(file, id, au_do_flush_dir);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_fsync_dir_no_file(struct dentry *dentry, int datasync)
+{
+	int err;
+	aufs_bindex_t bend, bindex;
+	struct inode *inode;
+	struct super_block *sb;
+
+	err = 0;
+	sb = dentry->d_sb;
+	inode = dentry->d_inode;
+	IMustLock(inode);
+	bend = au_dbend(dentry);
+	for (bindex = au_dbstart(dentry); !err && bindex <= bend; bindex++) {
+		struct path h_path;
+
+		if (au_test_ro(sb, bindex, inode))
+			continue;
+		h_path.dentry = au_h_dptr(dentry, bindex);
+		if (!h_path.dentry)
+			continue;
+
+		h_path.mnt = au_sbr_mnt(sb, bindex);
+		err = vfsub_fsync(NULL, &h_path, datasync);
+	}
+
+	return err;
+}
+
+static int au_do_fsync_dir(struct file *file, int datasync)
+{
+	int err;
+	aufs_bindex_t bend, bindex;
+	struct file *h_file;
+	struct super_block *sb;
+	struct inode *inode;
+
+	err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
+	if (unlikely(err))
+		goto out;
+
+	sb = file->f_dentry->d_sb;
+	inode = file_inode(file);
+	bend = au_fbend_dir(file);
+	for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
+		h_file = au_hf_dir(file, bindex);
+		if (!h_file || au_test_ro(sb, bindex, inode))
+			continue;
+
+		err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+	}
+
+out:
+	return err;
+}
+
+/*
+ * @file may be NULL
+ */
+static int aufs_fsync_dir(struct file *file, loff_t start, loff_t end,
+			  int datasync)
+{
+	int err;
+	struct dentry *dentry;
+	struct super_block *sb;
+	struct mutex *mtx;
+
+	err = 0;
+	dentry = file->f_dentry;
+	mtx = &dentry->d_inode->i_mutex;
+	mutex_lock(mtx);
+	sb = dentry->d_sb;
+	si_noflush_read_lock(sb);
+	if (file)
+		err = au_do_fsync_dir(file, datasync);
+	else {
+		di_write_lock_child(dentry);
+		err = au_do_fsync_dir_no_file(dentry, datasync);
+	}
+	au_cpup_attr_timesizes(dentry->d_inode);
+	di_write_unlock(dentry);
+	if (file)
+		fi_write_unlock(file);
+
+	si_read_unlock(sb);
+	mutex_unlock(mtx);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_readdir(struct file *file, void *dirent, filldir_t filldir)
+{
+	int err;
+	struct dentry *dentry;
+	struct inode *inode, *h_inode;
+	struct super_block *sb;
+
+	dentry = file->f_dentry;
+	inode = dentry->d_inode;
+	IMustLock(inode);
+
+	sb = dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
+	if (unlikely(err))
+		goto out;
+	err = au_alive_dir(dentry);
+	if (!err)
+		err = au_vdir_init(file);
+	di_downgrade_lock(dentry, AuLock_IR);
+	if (unlikely(err))
+		goto out_unlock;
+
+	h_inode = au_h_iptr(inode, au_ibstart(inode));
+	if (!au_test_nfsd()) {
+		err = au_vdir_fill_de(file, dirent, filldir);
+		fsstack_copy_attr_atime(inode, h_inode);
+	} else {
+		/*
+		 * nfsd filldir may call lookup_one_len(), vfs_getattr(),
+		 * encode_fh() and others.
+		 */
+		atomic_inc(&h_inode->i_count);
+		di_read_unlock(dentry, AuLock_IR);
+		si_read_unlock(sb);
+		err = au_vdir_fill_de(file, dirent, filldir);
+		fsstack_copy_attr_atime(inode, h_inode);
+		fi_write_unlock(file);
+		iput(h_inode);
+
+		AuTraceErr(err);
+		return err;
+	}
+
+out_unlock:
+	di_read_unlock(dentry, AuLock_IR);
+	fi_write_unlock(file);
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuTestEmpty_WHONLY	1
+#define AuTestEmpty_CALLED	(1 << 1)
+#define AuTestEmpty_SHWH	(1 << 2)
+#define au_ftest_testempty(flags, name)	((flags) & AuTestEmpty_##name)
+#define au_fset_testempty(flags, name) \
+	do { (flags) |= AuTestEmpty_##name; } while (0)
+#define au_fclr_testempty(flags, name) \
+	do { (flags) &= ~AuTestEmpty_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuTestEmpty_SHWH
+#define AuTestEmpty_SHWH	0
+#endif
+
+struct test_empty_arg {
+	struct au_nhash *whlist;
+	unsigned int flags;
+	int err;
+	aufs_bindex_t bindex;
+};
+
+static int test_empty_cb(void *__arg, const char *__name, int namelen,
+			 loff_t offset __maybe_unused, u64 ino,
+			 unsigned int d_type)
+{
+	struct test_empty_arg *arg = __arg;
+	char *name = (void *)__name;
+
+	arg->err = 0;
+	au_fset_testempty(arg->flags, CALLED);
+	/* smp_mb(); */
+	if (name[0] == '.'
+	    && (namelen == 1 || (name[1] == '.' && namelen == 2)))
+		goto out; /* success */
+
+	if (namelen <= AUFS_WH_PFX_LEN
+	    || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+		if (au_ftest_testempty(arg->flags, WHONLY)
+		    && !au_nhash_test_known_wh(arg->whlist, name, namelen))
+			arg->err = -ENOTEMPTY;
+		goto out;
+	}
+
+	name += AUFS_WH_PFX_LEN;
+	namelen -= AUFS_WH_PFX_LEN;
+	if (!au_nhash_test_known_wh(arg->whlist, name, namelen))
+		arg->err = au_nhash_append_wh
+			(arg->whlist, name, namelen, ino, d_type, arg->bindex,
+			 au_ftest_testempty(arg->flags, SHWH));
+
+out:
+	/* smp_mb(); */
+	AuTraceErr(arg->err);
+	return arg->err;
+}
+
+static int do_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+	int err;
+	struct file *h_file;
+
+	h_file = au_h_open(dentry, arg->bindex,
+			   O_RDONLY | O_NONBLOCK | O_DIRECTORY | O_LARGEFILE,
+			   /*file*/NULL);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out;
+
+	err = 0;
+	if (!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
+	    && !file_inode(h_file)->i_nlink)
+		goto out_put;
+
+	do {
+		arg->err = 0;
+		au_fclr_testempty(arg->flags, CALLED);
+		/* smp_mb(); */
+		err = vfsub_readdir(h_file, test_empty_cb, arg);
+		if (err >= 0)
+			err = arg->err;
+	} while (!err && au_ftest_testempty(arg->flags, CALLED));
+
+out_put:
+	fput(h_file);
+	au_sbr_put(dentry->d_sb, arg->bindex);
+out:
+	return err;
+}
+
+struct do_test_empty_args {
+	int *errp;
+	struct dentry *dentry;
+	struct test_empty_arg *arg;
+};
+
+static void call_do_test_empty(void *args)
+{
+	struct do_test_empty_args *a = args;
+	*a->errp = do_test_empty(a->dentry, a->arg);
+}
+
+static int sio_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+	int err, wkq_err;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+
+	h_dentry = au_h_dptr(dentry, arg->bindex);
+	h_inode = h_dentry->d_inode;
+	/* todo: i_mode changes anytime? */
+	mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
+	err = au_test_h_perm_sio(h_inode, MAY_EXEC | MAY_READ);
+	mutex_unlock(&h_inode->i_mutex);
+	if (!err)
+		err = do_test_empty(dentry, arg);
+	else {
+		struct do_test_empty_args args = {
+			.errp	= &err,
+			.dentry	= dentry,
+			.arg	= arg
+		};
+		unsigned int flags = arg->flags;
+
+		wkq_err = au_wkq_wait(call_do_test_empty, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+		arg->flags = flags;
+	}
+
+	return err;
+}
+
+int au_test_empty_lower(struct dentry *dentry)
+{
+	int err;
+	unsigned int rdhash;
+	aufs_bindex_t bindex, bstart, btail;
+	struct au_nhash whlist;
+	struct test_empty_arg arg;
+
+	SiMustAnyLock(dentry->d_sb);
+
+	rdhash = au_sbi(dentry->d_sb)->si_rdhash;
+	if (!rdhash)
+		rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, dentry));
+	err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+
+	arg.flags = 0;
+	arg.whlist = &whlist;
+	bstart = au_dbstart(dentry);
+	if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+		au_fset_testempty(arg.flags, SHWH);
+	arg.bindex = bstart;
+	err = do_test_empty(dentry, &arg);
+	if (unlikely(err))
+		goto out_whlist;
+
+	au_fset_testempty(arg.flags, WHONLY);
+	btail = au_dbtaildir(dentry);
+	for (bindex = bstart + 1; !err && bindex <= btail; bindex++) {
+		struct dentry *h_dentry;
+
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (h_dentry && h_dentry->d_inode) {
+			arg.bindex = bindex;
+			err = do_test_empty(dentry, &arg);
+		}
+	}
+
+out_whlist:
+	au_nhash_wh_free(&whlist);
+out:
+	return err;
+}
+
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist)
+{
+	int err;
+	struct test_empty_arg arg;
+	aufs_bindex_t bindex, btail;
+
+	err = 0;
+	arg.whlist = whlist;
+	arg.flags = AuTestEmpty_WHONLY;
+	if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+		au_fset_testempty(arg.flags, SHWH);
+	btail = au_dbtaildir(dentry);
+	for (bindex = au_dbstart(dentry); !err && bindex <= btail; bindex++) {
+		struct dentry *h_dentry;
+
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (h_dentry && h_dentry->d_inode) {
+			arg.bindex = bindex;
+			err = sio_test_empty(dentry, &arg);
+		}
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_dir_fop = {
+	.owner		= THIS_MODULE,
+	.llseek		= default_llseek,
+	.read		= generic_read_dir,
+	.readdir	= aufs_readdir,
+	.unlocked_ioctl	= aufs_ioctl_dir,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= aufs_compat_ioctl_dir,
+#endif
+	.open		= aufs_open_dir,
+	.release	= aufs_release_dir,
+	.flush		= aufs_flush_dir,
+	.fsync		= aufs_fsync_dir
+};
diff --git a/fs/aufs/dir.h b/fs/aufs/dir.h
new file mode 100644
index 0000000..fb237ba
--- /dev/null
+++ b/fs/aufs/dir.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * directory operations
+ */
+
+#ifndef __AUFS_DIR_H__
+#define __AUFS_DIR_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+
+/* ---------------------------------------------------------------------- */
+
+/* need to be faster and smaller */
+
+struct au_nhash {
+	unsigned int		nh_num;
+	struct hlist_head	*nh_head;
+};
+
+struct au_vdir_destr {
+	unsigned char	len;
+	unsigned char	name[0];
+} __packed;
+
+struct au_vdir_dehstr {
+	struct hlist_node	hash;
+	struct au_vdir_destr	*str;
+} ____cacheline_aligned_in_smp;
+
+struct au_vdir_de {
+	ino_t			de_ino;
+	unsigned char		de_type;
+	/* caution: packed */
+	struct au_vdir_destr	de_str;
+} __packed;
+
+struct au_vdir_wh {
+	struct hlist_node	wh_hash;
+#ifdef CONFIG_AUFS_SHWH
+	ino_t			wh_ino;
+	aufs_bindex_t		wh_bindex;
+	unsigned char		wh_type;
+#else
+	aufs_bindex_t		wh_bindex;
+#endif
+	/* caution: packed */
+	struct au_vdir_destr	wh_str;
+} __packed;
+
+union au_vdir_deblk_p {
+	unsigned char		*deblk;
+	struct au_vdir_de	*de;
+};
+
+struct au_vdir {
+	unsigned char	**vd_deblk;
+	unsigned long	vd_nblk;
+	struct {
+		unsigned long		ul;
+		union au_vdir_deblk_p	p;
+	} vd_last;
+
+	unsigned long	vd_version;
+	unsigned int	vd_deblk_sz;
+	unsigned long	vd_jiffy;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* dir.c */
+extern const struct file_operations aufs_dir_fop;
+void au_add_nlink(struct inode *dir, struct inode *h_dir);
+void au_sub_nlink(struct inode *dir, struct inode *h_dir);
+loff_t au_dir_size(struct file *file, struct dentry *dentry);
+int au_test_empty_lower(struct dentry *dentry);
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist);
+
+/* vdir.c */
+unsigned int au_rdhash_est(loff_t sz);
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp);
+void au_nhash_wh_free(struct au_nhash *whlist);
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+			    int limit);
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen);
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+		       unsigned int d_type, aufs_bindex_t bindex,
+		       unsigned char shwh);
+void au_vdir_free(struct au_vdir *vdir);
+int au_vdir_init(struct file *file);
+int au_vdir_fill_de(struct file *file, void *dirent, filldir_t filldir);
+
+/* ioctl.c */
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_AUFS_RDU
+/* rdu.c */
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd,
+			 unsigned long arg);
+#endif
+#else
+static inline long au_rdu_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	return -EINVAL;
+}
+#ifdef CONFIG_COMPAT
+static inline long au_rdu_compat_ioctl(struct file *file, unsigned int cmd,
+				       unsigned long arg)
+{
+	return -EINVAL;
+}
+#endif
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DIR_H__ */
diff --git a/fs/aufs/dynop.c b/fs/aufs/dynop.c
new file mode 100644
index 0000000..be35399
--- /dev/null
+++ b/fs/aufs/dynop.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (C) 2010-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * dynamically customizable operations for regular files
+ */
+
+#include "aufs.h"
+
+#define DyPrSym(key)	AuDbgSym(key->dk_op.dy_hop)
+
+/*
+ * How large will these lists be?
+ * Usually just a few elements, 20-30 at most for each, I guess.
+ */
+static struct au_splhead dynop[AuDyLast];
+
+static struct au_dykey *dy_gfind_get(struct au_splhead *spl, const void *h_op)
+{
+	struct au_dykey *key, *tmp;
+	struct list_head *head;
+
+	key = NULL;
+	head = &spl->head;
+	rcu_read_lock();
+	list_for_each_entry_rcu(tmp, head, dk_list)
+		if (tmp->dk_op.dy_hop == h_op) {
+			key = tmp;
+			kref_get(&key->dk_kref);
+			break;
+		}
+	rcu_read_unlock();
+
+	return key;
+}
+
+static struct au_dykey *dy_bradd(struct au_branch *br, struct au_dykey *key)
+{
+	struct au_dykey **k, *found;
+	const void *h_op = key->dk_op.dy_hop;
+	int i;
+
+	found = NULL;
+	k = br->br_dykey;
+	for (i = 0; i < AuBrDynOp; i++)
+		if (k[i]) {
+			if (k[i]->dk_op.dy_hop == h_op) {
+				found = k[i];
+				break;
+			}
+		} else
+			break;
+	if (!found) {
+		spin_lock(&br->br_dykey_lock);
+		for (; i < AuBrDynOp; i++)
+			if (k[i]) {
+				if (k[i]->dk_op.dy_hop == h_op) {
+					found = k[i];
+					break;
+				}
+			} else {
+				k[i] = key;
+				break;
+			}
+		spin_unlock(&br->br_dykey_lock);
+		BUG_ON(i == AuBrDynOp); /* expand the array */
+	}
+
+	return found;
+}
+
+/* kref_get() if @key is already added */
+static struct au_dykey *dy_gadd(struct au_splhead *spl, struct au_dykey *key)
+{
+	struct au_dykey *tmp, *found;
+	struct list_head *head;
+	const void *h_op = key->dk_op.dy_hop;
+
+	found = NULL;
+	head = &spl->head;
+	spin_lock(&spl->spin);
+	list_for_each_entry(tmp, head, dk_list)
+		if (tmp->dk_op.dy_hop == h_op) {
+			kref_get(&tmp->dk_kref);
+			found = tmp;
+			break;
+		}
+	if (!found)
+		list_add_rcu(&key->dk_list, head);
+	spin_unlock(&spl->spin);
+
+	if (!found)
+		DyPrSym(key);
+	return found;
+}
+
+static void dy_free_rcu(struct rcu_head *rcu)
+{
+	struct au_dykey *key;
+
+	key = container_of(rcu, struct au_dykey, dk_rcu);
+	DyPrSym(key);
+	kfree(key);
+}
+
+static void dy_free(struct kref *kref)
+{
+	struct au_dykey *key;
+	struct au_splhead *spl;
+
+	key = container_of(kref, struct au_dykey, dk_kref);
+	spl = dynop + key->dk_op.dy_type;
+	au_spl_del_rcu(&key->dk_list, spl);
+	call_rcu(&key->dk_rcu, dy_free_rcu);
+}
+
+void au_dy_put(struct au_dykey *key)
+{
+	kref_put(&key->dk_kref, dy_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define DyDbgSize(cnt, op)	AuDebugOn(cnt != sizeof(op)/sizeof(void *))
+
+#ifdef CONFIG_AUFS_DEBUG
+#define DyDbgDeclare(cnt)	unsigned int cnt = 0
+#define DyDbgInc(cnt)		do { cnt++; } while (0)
+#else
+#define DyDbgDeclare(cnt)	do {} while (0)
+#define DyDbgInc(cnt)		do {} while (0)
+#endif
+
+#define DySet(func, dst, src, h_op, h_sb) do {				\
+	DyDbgInc(cnt);							\
+	if (h_op->func) {						\
+		if (src.func)						\
+			dst.func = src.func;				\
+		else							\
+			AuDbg("%s %s\n", au_sbtype(h_sb), #func);	\
+	}								\
+} while (0)
+
+#define DySetForce(func, dst, src) do {		\
+	AuDebugOn(!src.func);			\
+	DyDbgInc(cnt);				\
+	dst.func = src.func;			\
+} while (0)
+
+#define DySetAop(func) \
+	DySet(func, dyaop->da_op, aufs_aop, h_aop, h_sb)
+#define DySetAopForce(func) \
+	DySetForce(func, dyaop->da_op, aufs_aop)
+
+static void dy_aop(struct au_dykey *key, const void *h_op,
+		   struct super_block *h_sb __maybe_unused)
+{
+	struct au_dyaop *dyaop = (void *)key;
+	const struct address_space_operations *h_aop = h_op;
+	DyDbgDeclare(cnt);
+
+	AuDbg("%s\n", au_sbtype(h_sb));
+
+	DySetAop(writepage);
+	DySetAopForce(readpage);	/* force */
+	DySetAop(writepages);
+	DySetAop(set_page_dirty);
+	DySetAop(readpages);
+	DySetAop(write_begin);
+	DySetAop(write_end);
+	DySetAop(bmap);
+	DySetAop(invalidatepage);
+	DySetAop(releasepage);
+	DySetAop(freepage);
+	/* these two will be changed according to an aufs mount option */
+	DySetAop(direct_IO);
+	DySetAop(get_xip_mem);
+	DySetAop(migratepage);
+	DySetAop(launder_page);
+	DySetAop(is_partially_uptodate);
+	DySetAop(error_remove_page);
+	DySetAop(swap_activate);
+	DySetAop(swap_deactivate);
+
+	DyDbgSize(cnt, *h_aop);
+	dyaop->da_get_xip_mem = h_aop->get_xip_mem;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void dy_bug(struct kref *kref)
+{
+	BUG();
+}
+
+static struct au_dykey *dy_get(struct au_dynop *op, struct au_branch *br)
+{
+	struct au_dykey *key, *old;
+	struct au_splhead *spl;
+	struct op {
+		unsigned int sz;
+		void (*set)(struct au_dykey *key, const void *h_op,
+			    struct super_block *h_sb __maybe_unused);
+	};
+	static const struct op a[] = {
+		[AuDy_AOP] = {
+			.sz	= sizeof(struct au_dyaop),
+			.set	= dy_aop
+		}
+	};
+	const struct op *p;
+
+	spl = dynop + op->dy_type;
+	key = dy_gfind_get(spl, op->dy_hop);
+	if (key)
+		goto out_add; /* success */
+
+	p = a + op->dy_type;
+	key = kzalloc(p->sz, GFP_NOFS);
+	if (unlikely(!key)) {
+		key = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	key->dk_op.dy_hop = op->dy_hop;
+	kref_init(&key->dk_kref);
+	p->set(key, op->dy_hop, au_br_sb(br));
+	old = dy_gadd(spl, key);
+	if (old) {
+		kfree(key);
+		key = old;
+	}
+
+out_add:
+	old = dy_bradd(br, key);
+	if (old)
+		/* its ref-count should never be zero here */
+		kref_put(&key->dk_kref, dy_bug);
+out:
+	return key;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Aufs prohibits O_DIRECT by defaut even if the branch supports it.
+ * This behaviour is neccessary to return an error from open(O_DIRECT) instead
+ * of the succeeding I/O. The dio mount option enables O_DIRECT and makes
+ * open(O_DIRECT) always succeed, but the succeeding I/O may return an error.
+ * See the aufs manual in detail.
+ *
+ * To keep this behaviour, aufs has to set NULL to ->get_xip_mem too, and the
+ * performance of fadvise() and madvise() may be affected.
+ */
+static void dy_adx(struct au_dyaop *dyaop, int do_dx)
+{
+	if (!do_dx) {
+		dyaop->da_op.direct_IO = NULL;
+		dyaop->da_op.get_xip_mem = NULL;
+	} else {
+		dyaop->da_op.direct_IO = aufs_aop.direct_IO;
+		dyaop->da_op.get_xip_mem = aufs_aop.get_xip_mem;
+		if (!dyaop->da_get_xip_mem)
+			dyaop->da_op.get_xip_mem = NULL;
+	}
+}
+
+static struct au_dyaop *dy_aget(struct au_branch *br,
+				const struct address_space_operations *h_aop,
+				int do_dx)
+{
+	struct au_dyaop *dyaop;
+	struct au_dynop op;
+
+	op.dy_type = AuDy_AOP;
+	op.dy_haop = h_aop;
+	dyaop = (void *)dy_get(&op, br);
+	if (IS_ERR(dyaop))
+		goto out;
+	dy_adx(dyaop, do_dx);
+
+out:
+	return dyaop;
+}
+
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+		struct inode *h_inode)
+{
+	int err, do_dx;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct au_dyaop *dyaop;
+
+	AuDebugOn(!S_ISREG(h_inode->i_mode));
+	IiMustWriteLock(inode);
+
+	sb = inode->i_sb;
+	br = au_sbr(sb, bindex);
+	do_dx = !!au_opt_test(au_mntflags(sb), DIO);
+	dyaop = dy_aget(br, h_inode->i_mapping->a_ops, do_dx);
+	err = PTR_ERR(dyaop);
+	if (IS_ERR(dyaop))
+		/* unnecessary to call dy_fput() */
+		goto out;
+
+	err = 0;
+	inode->i_mapping->a_ops = &dyaop->da_op;
+
+out:
+	return err;
+}
+
+/*
+ * Is it safe to replace a_ops during the inode/file is in operation?
+ * Yes, I hope so.
+ */
+int au_dy_irefresh(struct inode *inode)
+{
+	int err;
+	aufs_bindex_t bstart;
+	struct inode *h_inode;
+
+	err = 0;
+	if (S_ISREG(inode->i_mode)) {
+		bstart = au_ibstart(inode);
+		h_inode = au_h_iptr(inode, bstart);
+		err = au_dy_iaop(inode, bstart, h_inode);
+	}
+	return err;
+}
+
+void au_dy_arefresh(int do_dx)
+{
+	struct au_splhead *spl;
+	struct list_head *head;
+	struct au_dykey *key;
+
+	spl = dynop + AuDy_AOP;
+	head = &spl->head;
+	spin_lock(&spl->spin);
+	list_for_each_entry(key, head, dk_list)
+		dy_adx((void *)key, do_dx);
+	spin_unlock(&spl->spin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __init au_dy_init(void)
+{
+	int i;
+
+	/* make sure that 'struct au_dykey *' can be any type */
+	BUILD_BUG_ON(offsetof(struct au_dyaop, da_key));
+
+	for (i = 0; i < AuDyLast; i++)
+		au_spl_init(dynop + i);
+}
+
+void au_dy_fin(void)
+{
+	int i;
+
+	for (i = 0; i < AuDyLast; i++)
+		WARN_ON(!list_empty(&dynop[i].head));
+}
diff --git a/fs/aufs/dynop.h b/fs/aufs/dynop.h
new file mode 100644
index 0000000..3f2c9aa
--- /dev/null
+++ b/fs/aufs/dynop.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * dynamically customizable operations (for regular files only)
+ */
+
+#ifndef __AUFS_DYNOP_H__
+#define __AUFS_DYNOP_H__
+
+#ifdef __KERNEL__
+
+#include "inode.h"
+
+enum {AuDy_AOP, AuDyLast};
+
+struct au_dynop {
+	int						dy_type;
+	union {
+		const void				*dy_hop;
+		const struct address_space_operations	*dy_haop;
+	};
+};
+
+struct au_dykey {
+	union {
+		struct list_head	dk_list;
+		struct rcu_head		dk_rcu;
+	};
+	struct au_dynop		dk_op;
+
+	/*
+	 * during I am in the branch local array, kref is gotten. when the
+	 * branch is removed, kref is put.
+	 */
+	struct kref		dk_kref;
+};
+
+/* stop unioning since their sizes are very different from each other */
+struct au_dyaop {
+	struct au_dykey			da_key;
+	struct address_space_operations	da_op; /* not const */
+	int (*da_get_xip_mem)(struct address_space *, pgoff_t, int,
+			      void **, unsigned long *);
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dynop.c */
+struct au_branch;
+void au_dy_put(struct au_dykey *key);
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+		struct inode *h_inode);
+int au_dy_irefresh(struct inode *inode);
+void au_dy_arefresh(int do_dio);
+
+void __init au_dy_init(void);
+void au_dy_fin(void);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DYNOP_H__ */
diff --git a/fs/aufs/export.c b/fs/aufs/export.c
new file mode 100644
index 0000000..876decbd9
--- /dev/null
+++ b/fs/aufs/export.c
@@ -0,0 +1,826 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * export via nfs
+ */
+
+#include <linux/exportfs.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/random.h>
+#include <linux/writeback.h>
+#include "../fs/mount.h"
+#include "aufs.h"
+
+union conv {
+#ifdef CONFIG_AUFS_INO_T_64
+	__u32 a[2];
+#else
+	__u32 a[1];
+#endif
+	ino_t ino;
+};
+
+static ino_t decode_ino(__u32 *a)
+{
+	union conv u;
+
+	BUILD_BUG_ON(sizeof(u.ino) != sizeof(u.a));
+	u.a[0] = a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+	u.a[1] = a[1];
+#endif
+	return u.ino;
+}
+
+static void encode_ino(__u32 *a, ino_t ino)
+{
+	union conv u;
+
+	u.ino = ino;
+	a[0] = u.a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+	a[1] = u.a[1];
+#endif
+}
+
+/* NFS file handle */
+enum {
+	Fh_br_id,
+	Fh_sigen,
+#ifdef CONFIG_AUFS_INO_T_64
+	/* support 64bit inode number */
+	Fh_ino1,
+	Fh_ino2,
+	Fh_dir_ino1,
+	Fh_dir_ino2,
+#else
+	Fh_ino1,
+	Fh_dir_ino1,
+#endif
+	Fh_igen,
+	Fh_h_type,
+	Fh_tail,
+
+	Fh_ino = Fh_ino1,
+	Fh_dir_ino = Fh_dir_ino1
+};
+
+static int au_test_anon(struct dentry *dentry)
+{
+	/* note: read d_flags without d_lock */
+	return !!(dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
+int au_test_nfsd(void)
+{
+	int ret;
+	struct task_struct *tsk = current;
+	char comm[sizeof(tsk->comm)];
+
+	ret = 0;
+	if (tsk->flags & PF_KTHREAD) {
+		get_task_comm(comm, tsk);
+		ret = !strcmp(comm, "nfsd");
+	}
+
+	return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+/* inode generation external table */
+
+void au_xigen_inc(struct inode *inode)
+{
+	loff_t pos;
+	ssize_t sz;
+	__u32 igen;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+
+	sb = inode->i_sb;
+	AuDebugOn(!au_opt_test(au_mntflags(sb), XINO));
+
+	sbinfo = au_sbi(sb);
+	pos = inode->i_ino;
+	pos *= sizeof(igen);
+	igen = inode->i_generation + 1;
+	sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xigen, &igen,
+			 sizeof(igen), &pos);
+	if (sz == sizeof(igen))
+		return; /* success */
+
+	if (unlikely(sz >= 0))
+		AuIOErr("xigen error (%zd)\n", sz);
+}
+
+int au_xigen_new(struct inode *inode)
+{
+	int err;
+	loff_t pos;
+	ssize_t sz;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+
+	err = 0;
+	/* todo: dirty, at mount time */
+	if (inode->i_ino == AUFS_ROOT_INO)
+		goto out;
+	sb = inode->i_sb;
+	SiMustAnyLock(sb);
+	if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+		goto out;
+
+	err = -EFBIG;
+	pos = inode->i_ino;
+	if (unlikely(au_loff_max / sizeof(inode->i_generation) - 1 < pos)) {
+		AuIOErr1("too large i%lld\n", pos);
+		goto out;
+	}
+	pos *= sizeof(inode->i_generation);
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	file = sbinfo->si_xigen;
+	BUG_ON(!file);
+
+	if (vfsub_f_size_read(file)
+	    < pos + sizeof(inode->i_generation)) {
+		inode->i_generation = atomic_inc_return(&sbinfo->si_xigen_next);
+		sz = xino_fwrite(sbinfo->si_xwrite, file, &inode->i_generation,
+				 sizeof(inode->i_generation), &pos);
+	} else
+		sz = xino_fread(sbinfo->si_xread, file, &inode->i_generation,
+				sizeof(inode->i_generation), &pos);
+	if (sz == sizeof(inode->i_generation))
+		goto out; /* success */
+
+	err = sz;
+	if (unlikely(sz >= 0)) {
+		err = -EIO;
+		AuIOErr("xigen error (%zd)\n", sz);
+	}
+
+out:
+	return err;
+}
+
+int au_xigen_set(struct super_block *sb, struct file *base)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	file = au_xino_create2(base, sbinfo->si_xigen);
+	err = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+	err = 0;
+	if (sbinfo->si_xigen)
+		fput(sbinfo->si_xigen);
+	sbinfo->si_xigen = file;
+
+out:
+	return err;
+}
+
+void au_xigen_clr(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	if (sbinfo->si_xigen) {
+		fput(sbinfo->si_xigen);
+		sbinfo->si_xigen = NULL;
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino,
+				    ino_t dir_ino)
+{
+	struct dentry *dentry, *d;
+	struct inode *inode;
+	unsigned int sigen;
+
+	dentry = NULL;
+	inode = ilookup(sb, ino);
+	if (!inode)
+		goto out;
+
+	dentry = ERR_PTR(-ESTALE);
+	sigen = au_sigen(sb);
+	if (unlikely(is_bad_inode(inode)
+		     || IS_DEADDIR(inode)
+		     || sigen != au_iigen(inode, NULL)))
+		goto out_iput;
+
+	dentry = NULL;
+	if (!dir_ino || S_ISDIR(inode->i_mode))
+		dentry = d_find_alias(inode);
+	else {
+		spin_lock(&inode->i_lock);
+		hlist_for_each_entry(d, &inode->i_dentry, d_alias) {
+			spin_lock(&d->d_lock);
+			if (!au_test_anon(d)
+			    && d->d_parent->d_inode->i_ino == dir_ino) {
+				dentry = dget_dlock(d);
+				spin_unlock(&d->d_lock);
+				break;
+			}
+			spin_unlock(&d->d_lock);
+		}
+		spin_unlock(&inode->i_lock);
+	}
+	if (unlikely(dentry && au_digen_test(dentry, sigen))) {
+		/* need to refresh */
+		dput(dentry);
+		dentry = NULL;
+	}
+
+out_iput:
+	iput(inode);
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: dirty? */
+/* if exportfs_decode_fh() passed vfsmount*, we could be happy */
+
+struct au_compare_mnt_args {
+	/* input */
+	struct super_block *sb;
+
+	/* output */
+	struct vfsmount *mnt;
+};
+
+static int au_compare_mnt(struct vfsmount *mnt, void *arg)
+{
+	struct au_compare_mnt_args *a = arg;
+
+	if (mnt->mnt_sb != a->sb)
+		return 0;
+	a->mnt = mntget(mnt);
+	return 1;
+}
+
+static struct vfsmount *au_mnt_get(struct super_block *sb)
+{
+	int err;
+	struct path root;
+	struct au_compare_mnt_args args = {
+		.sb = sb
+	};
+
+	get_fs_root(current->fs, &root);
+	br_read_lock(&vfsmount_lock);
+	err = iterate_mounts(au_compare_mnt, &args, root.mnt);
+	br_read_unlock(&vfsmount_lock);
+	path_put(&root);
+	AuDebugOn(!err);
+	AuDebugOn(!args.mnt);
+	return args.mnt;
+}
+
+struct au_nfsd_si_lock {
+	unsigned int sigen;
+	aufs_bindex_t bindex, br_id;
+	unsigned char force_lock;
+};
+
+static int si_nfsd_read_lock(struct super_block *sb,
+			     struct au_nfsd_si_lock *nsi_lock)
+{
+	int err;
+	aufs_bindex_t bindex;
+
+	si_read_lock(sb, AuLock_FLUSH);
+
+	/* branch id may be wrapped around */
+	err = 0;
+	bindex = au_br_index(sb, nsi_lock->br_id);
+	if (bindex >= 0 && nsi_lock->sigen + AUFS_BRANCH_MAX > au_sigen(sb))
+		goto out; /* success */
+
+	err = -ESTALE;
+	bindex = -1;
+	if (!nsi_lock->force_lock)
+		si_read_unlock(sb);
+
+out:
+	nsi_lock->bindex = bindex;
+	return err;
+}
+
+struct find_name_by_ino {
+	int called, found;
+	ino_t ino;
+	char *name;
+	int namelen;
+};
+
+static int
+find_name_by_ino(void *arg, const char *name, int namelen, loff_t offset,
+		 u64 ino, unsigned int d_type)
+{
+	struct find_name_by_ino *a = arg;
+
+	a->called++;
+	if (a->ino != ino)
+		return 0;
+
+	memcpy(a->name, name, namelen);
+	a->namelen = namelen;
+	a->found = 1;
+	return 1;
+}
+
+static struct dentry *au_lkup_by_ino(struct path *path, ino_t ino,
+				     struct au_nfsd_si_lock *nsi_lock)
+{
+	struct dentry *dentry, *parent;
+	struct file *file;
+	struct inode *dir;
+	struct find_name_by_ino arg;
+	int err;
+
+	parent = path->dentry;
+	if (nsi_lock)
+		si_read_unlock(parent->d_sb);
+	file = vfsub_dentry_open(path, au_dir_roflags);
+	dentry = (void *)file;
+	if (IS_ERR(file))
+		goto out;
+
+	dentry = ERR_PTR(-ENOMEM);
+	arg.name = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!arg.name))
+		goto out_file;
+	arg.ino = ino;
+	arg.found = 0;
+	do {
+		arg.called = 0;
+		/* smp_mb(); */
+		err = vfsub_readdir(file, find_name_by_ino, &arg);
+	} while (!err && !arg.found && arg.called);
+	dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out_name;
+	/* instead of ENOENT */
+	dentry = ERR_PTR(-ESTALE);
+	if (!arg.found)
+		goto out_name;
+
+	/* do not call vfsub_lkup_one() */
+	dir = parent->d_inode;
+	mutex_lock(&dir->i_mutex);
+	dentry = vfsub_lookup_one_len(arg.name, parent, arg.namelen);
+	mutex_unlock(&dir->i_mutex);
+	AuTraceErrPtr(dentry);
+	if (IS_ERR(dentry))
+		goto out_name;
+	AuDebugOn(au_test_anon(dentry));
+	if (unlikely(!dentry->d_inode)) {
+		dput(dentry);
+		dentry = ERR_PTR(-ENOENT);
+	}
+
+out_name:
+	free_page((unsigned long)arg.name);
+out_file:
+	fput(file);
+out:
+	if (unlikely(nsi_lock
+		     && si_nfsd_read_lock(parent->d_sb, nsi_lock) < 0))
+		if (!IS_ERR(dentry)) {
+			dput(dentry);
+			dentry = ERR_PTR(-ESTALE);
+		}
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+static struct dentry *decode_by_dir_ino(struct super_block *sb, ino_t ino,
+					ino_t dir_ino,
+					struct au_nfsd_si_lock *nsi_lock)
+{
+	struct dentry *dentry;
+	struct path path;
+
+	if (dir_ino != AUFS_ROOT_INO) {
+		path.dentry = decode_by_ino(sb, dir_ino, 0);
+		dentry = path.dentry;
+		if (!path.dentry || IS_ERR(path.dentry))
+			goto out;
+		AuDebugOn(au_test_anon(path.dentry));
+	} else
+		path.dentry = dget(sb->s_root);
+
+	path.mnt = au_mnt_get(sb);
+	dentry = au_lkup_by_ino(&path, ino, nsi_lock);
+	path_put(&path);
+
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int h_acceptable(void *expv, struct dentry *dentry)
+{
+	return 1;
+}
+
+static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath,
+			   char *buf, int len, struct super_block *sb)
+{
+	char *p;
+	int n;
+	struct path path;
+
+	p = d_path(h_rootpath, buf, len);
+	if (IS_ERR(p))
+		goto out;
+	n = strlen(p);
+
+	path.mnt = h_rootpath->mnt;
+	path.dentry = h_parent;
+	p = d_path(&path, buf, len);
+	if (IS_ERR(p))
+		goto out;
+	if (n != 1)
+		p += n;
+
+	path.mnt = au_mnt_get(sb);
+	path.dentry = sb->s_root;
+	p = d_path(&path, buf, len - strlen(p));
+	mntput(path.mnt);
+	if (IS_ERR(p))
+		goto out;
+	if (n != 1)
+		p[strlen(p)] = '/';
+
+out:
+	AuTraceErrPtr(p);
+	return p;
+}
+
+static
+struct dentry *decode_by_path(struct super_block *sb, ino_t ino, __u32 *fh,
+			      int fh_len, struct au_nfsd_si_lock *nsi_lock)
+{
+	struct dentry *dentry, *h_parent, *root;
+	struct super_block *h_sb;
+	char *pathname, *p;
+	struct vfsmount *h_mnt;
+	struct au_branch *br;
+	int err;
+	struct path path;
+
+	br = au_sbr(sb, nsi_lock->bindex);
+	h_mnt = au_br_mnt(br);
+	h_sb = h_mnt->mnt_sb;
+	/* todo: call lower fh_to_dentry()? fh_to_parent()? */
+	h_parent = exportfs_decode_fh(h_mnt, (void *)(fh + Fh_tail),
+				      fh_len - Fh_tail, fh[Fh_h_type],
+				      h_acceptable, /*context*/NULL);
+	dentry = h_parent;
+	if (unlikely(!h_parent || IS_ERR(h_parent))) {
+		AuWarn1("%s decode_fh failed, %ld\n",
+			au_sbtype(h_sb), PTR_ERR(h_parent));
+		goto out;
+	}
+	dentry = NULL;
+	if (unlikely(au_test_anon(h_parent))) {
+		AuWarn1("%s decode_fh returned a disconnected dentry\n",
+			au_sbtype(h_sb));
+		goto out_h_parent;
+	}
+
+	dentry = ERR_PTR(-ENOMEM);
+	pathname = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!pathname))
+		goto out_h_parent;
+
+	root = sb->s_root;
+	path.mnt = h_mnt;
+	di_read_lock_parent(root, !AuLock_IR);
+	path.dentry = au_h_dptr(root, nsi_lock->bindex);
+	di_read_unlock(root, !AuLock_IR);
+	p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb);
+	dentry = (void *)p;
+	if (IS_ERR(p))
+		goto out_pathname;
+
+	si_read_unlock(sb);
+	err = vfsub_kern_path(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
+	dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out_relock;
+
+	dentry = ERR_PTR(-ENOENT);
+	AuDebugOn(au_test_anon(path.dentry));
+	if (unlikely(!path.dentry->d_inode))
+		goto out_path;
+
+	if (ino != path.dentry->d_inode->i_ino)
+		dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL);
+	else
+		dentry = dget(path.dentry);
+
+out_path:
+	path_put(&path);
+out_relock:
+	if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0))
+		if (!IS_ERR(dentry)) {
+			dput(dentry);
+			dentry = ERR_PTR(-ESTALE);
+		}
+out_pathname:
+	free_page((unsigned long)pathname);
+out_h_parent:
+	dput(h_parent);
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *
+aufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len,
+		  int fh_type)
+{
+	struct dentry *dentry;
+	__u32 *fh = fid->raw;
+	struct au_branch *br;
+	ino_t ino, dir_ino;
+	struct au_nfsd_si_lock nsi_lock = {
+		.force_lock	= 0
+	};
+
+	dentry = ERR_PTR(-ESTALE);
+	/* it should never happen, but the file handle is unreliable */
+	if (unlikely(fh_len < Fh_tail))
+		goto out;
+	nsi_lock.sigen = fh[Fh_sigen];
+	nsi_lock.br_id = fh[Fh_br_id];
+
+	/* branch id may be wrapped around */
+	br = NULL;
+	if (unlikely(si_nfsd_read_lock(sb, &nsi_lock)))
+		goto out;
+	nsi_lock.force_lock = 1;
+
+	/* is this inode still cached? */
+	ino = decode_ino(fh + Fh_ino);
+	/* it should never happen */
+	if (unlikely(ino == AUFS_ROOT_INO))
+		goto out;
+
+	dir_ino = decode_ino(fh + Fh_dir_ino);
+	dentry = decode_by_ino(sb, ino, dir_ino);
+	if (IS_ERR(dentry))
+		goto out_unlock;
+	if (dentry)
+		goto accept;
+
+	/* is the parent dir cached? */
+	br = au_sbr(sb, nsi_lock.bindex);
+	atomic_inc(&br->br_count);
+	dentry = decode_by_dir_ino(sb, ino, dir_ino, &nsi_lock);
+	if (IS_ERR(dentry))
+		goto out_unlock;
+	if (dentry)
+		goto accept;
+
+	/* lookup path */
+	dentry = decode_by_path(sb, ino, fh, fh_len, &nsi_lock);
+	if (IS_ERR(dentry))
+		goto out_unlock;
+	if (unlikely(!dentry))
+		/* todo?: make it ESTALE */
+		goto out_unlock;
+
+accept:
+	if (!au_digen_test(dentry, au_sigen(sb))
+	    && dentry->d_inode->i_generation == fh[Fh_igen])
+		goto out_unlock; /* success */
+
+	dput(dentry);
+	dentry = ERR_PTR(-ESTALE);
+out_unlock:
+	if (br)
+		atomic_dec(&br->br_count);
+	si_read_unlock(sb);
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+#if 0 /* reserved for future use */
+/* support subtreecheck option */
+static struct dentry *aufs_fh_to_parent(struct super_block *sb, struct fid *fid,
+					int fh_len, int fh_type)
+{
+	struct dentry *parent;
+	__u32 *fh = fid->raw;
+	ino_t dir_ino;
+
+	dir_ino = decode_ino(fh + Fh_dir_ino);
+	parent = decode_by_ino(sb, dir_ino, 0);
+	if (IS_ERR(parent))
+		goto out;
+	if (!parent)
+		parent = decode_by_path(sb, au_br_index(sb, fh[Fh_br_id]),
+					dir_ino, fh, fh_len);
+
+out:
+	AuTraceErrPtr(parent);
+	return parent;
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_encode_fh(struct inode *inode, __u32 *fh, int *max_len,
+			  struct inode *dir)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct super_block *sb, *h_sb;
+	struct dentry *dentry, *parent, *h_parent;
+	struct inode *h_dir;
+	struct au_branch *br;
+
+	err = -ENOSPC;
+	if (unlikely(*max_len <= Fh_tail)) {
+		AuWarn1("NFSv2 client (max_len %d)?\n", *max_len);
+		goto out;
+	}
+
+	err = FILEID_ROOT;
+	if (inode->i_ino == AUFS_ROOT_INO) {
+		AuDebugOn(inode->i_ino != AUFS_ROOT_INO);
+		goto out;
+	}
+
+	h_parent = NULL;
+	sb = inode->i_sb;
+	err = si_read_lock(sb, AuLock_FLUSH);
+	if (unlikely(err))
+		goto out;
+
+#ifdef CONFIG_AUFS_DEBUG
+	if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+		AuWarn1("NFS-exporting requires xino\n");
+#endif
+	err = -EIO;
+	parent = NULL;
+	ii_read_lock_child(inode);
+	bindex = au_ibstart(inode);
+	if (!dir) {
+		dentry = d_find_alias(inode);
+		if (unlikely(!dentry))
+			goto out_unlock;
+		AuDebugOn(au_test_anon(dentry));
+		parent = dget_parent(dentry);
+		dput(dentry);
+		if (unlikely(!parent))
+			goto out_unlock;
+		dir = parent->d_inode;
+	}
+
+	ii_read_lock_parent(dir);
+	h_dir = au_h_iptr(dir, bindex);
+	ii_read_unlock(dir);
+	if (unlikely(!h_dir))
+		goto out_parent;
+	h_parent = d_find_alias(h_dir);
+	if (unlikely(!h_parent))
+		goto out_hparent;
+
+	err = -EPERM;
+	br = au_sbr(sb, bindex);
+	h_sb = au_br_sb(br);
+	if (unlikely(!h_sb->s_export_op)) {
+		AuErr1("%s branch is not exportable\n", au_sbtype(h_sb));
+		goto out_hparent;
+	}
+
+	fh[Fh_br_id] = br->br_id;
+	fh[Fh_sigen] = au_sigen(sb);
+	encode_ino(fh + Fh_ino, inode->i_ino);
+	encode_ino(fh + Fh_dir_ino, dir->i_ino);
+	fh[Fh_igen] = inode->i_generation;
+
+	*max_len -= Fh_tail;
+	fh[Fh_h_type] = exportfs_encode_fh(h_parent, (void *)(fh + Fh_tail),
+					   max_len,
+					   /*connectable or subtreecheck*/0);
+	err = fh[Fh_h_type];
+	*max_len += Fh_tail;
+	/* todo: macros? */
+	if (err != FILEID_INVALID)
+		err = 99;
+	else
+		AuWarn1("%s encode_fh failed\n", au_sbtype(h_sb));
+
+out_hparent:
+	dput(h_parent);
+out_parent:
+	dput(parent);
+out_unlock:
+	ii_read_unlock(inode);
+	si_read_unlock(sb);
+out:
+	if (unlikely(err < 0))
+		err = FILEID_INVALID;
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_commit_metadata(struct inode *inode)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct super_block *sb;
+	struct inode *h_inode;
+	int (*f)(struct inode *inode);
+
+	sb = inode->i_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+	ii_write_lock_child(inode);
+	bindex = au_ibstart(inode);
+	AuDebugOn(bindex < 0);
+	h_inode = au_h_iptr(inode, bindex);
+
+	f = h_inode->i_sb->s_export_op->commit_metadata;
+	if (f)
+		err = f(h_inode);
+	else {
+		struct writeback_control wbc = {
+			.sync_mode	= WB_SYNC_ALL,
+			.nr_to_write	= 0 /* metadata only */
+		};
+
+		err = sync_inode(h_inode, &wbc);
+	}
+
+	au_cpup_attr_timesizes(inode);
+	ii_write_unlock(inode);
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct export_operations aufs_export_op = {
+	.fh_to_dentry		= aufs_fh_to_dentry,
+	/* .fh_to_parent	= aufs_fh_to_parent, */
+	.encode_fh		= aufs_encode_fh,
+	.commit_metadata	= aufs_commit_metadata
+};
+
+void au_export_init(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+	__u32 u;
+
+	sb->s_export_op = &aufs_export_op;
+	sbinfo = au_sbi(sb);
+	sbinfo->si_xigen = NULL;
+	get_random_bytes(&u, sizeof(u));
+	BUILD_BUG_ON(sizeof(u) != sizeof(int));
+	atomic_set(&sbinfo->si_xigen_next, u);
+}
diff --git a/fs/aufs/f_op.c b/fs/aufs/f_op.c
new file mode 100644
index 0000000..400a0cd
--- /dev/null
+++ b/fs/aufs/f_op.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * file and vm operations
+ */
+
+#include <linux/aio.h>
+#include <linux/fs_stack.h>
+#include <linux/mman.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+int au_do_open_nondir(struct file *file, int flags)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct file *h_file;
+	struct dentry *dentry;
+	struct au_finfo *finfo;
+
+	FiMustWriteLock(file);
+
+	dentry = file->f_dentry;
+	err = au_d_alive(dentry);
+	if (unlikely(err))
+		goto out;
+
+	finfo = au_fi(file);
+	memset(&finfo->fi_htop, 0, sizeof(finfo->fi_htop));
+	atomic_set(&finfo->fi_mmapped, 0);
+	bindex = au_dbstart(dentry);
+	h_file = au_h_open(dentry, bindex, flags, file);
+	if (IS_ERR(h_file))
+		err = PTR_ERR(h_file);
+	else {
+		au_set_fbstart(file, bindex);
+		au_set_h_fptr(file, bindex, h_file);
+		au_update_figen(file);
+		/* todo: necessary? */
+		/* file->f_ra = h_file->f_ra; */
+	}
+
+out:
+	return err;
+}
+
+static int aufs_open_nondir(struct inode *inode __maybe_unused,
+			    struct file *file)
+{
+	int err;
+	struct super_block *sb;
+
+	AuDbg("%.*s, f_flags 0x%x, f_mode 0x%x\n",
+	      AuDLNPair(file->f_dentry), vfsub_file_flags(file),
+	      file->f_mode);
+
+	sb = file->f_dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	err = au_do_open(file, au_do_open_nondir, /*fidir*/NULL);
+	si_read_unlock(sb);
+	return err;
+}
+
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file)
+{
+	struct au_finfo *finfo;
+	aufs_bindex_t bindex;
+
+	finfo = au_fi(file);
+	bindex = finfo->fi_btop;
+	if (bindex >= 0)
+		au_set_h_fptr(file, bindex, NULL);
+
+	au_finfo_fin(file);
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_nondir(struct file *file, fl_owner_t id)
+{
+	int err;
+	struct file *h_file;
+
+	err = 0;
+	h_file = au_hf_top(file);
+	if (h_file)
+		err = vfsub_flush(h_file, id);
+	return err;
+}
+
+static int aufs_flush_nondir(struct file *file, fl_owner_t id)
+{
+	return au_do_flush(file, id, au_do_flush_nondir);
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * read and write functions acquire [fdi]_rwsem once, but release before
+ * mmap_sem. This is because to stop a race condition between mmap(2).
+ * Releasing these aufs-rwsem should be safe, no branch-mamagement (by keeping
+ * si_rwsem), no harmful copy-up should happen. Actually copy-up may happen in
+ * read functions after [fdi]_rwsem are released, but it should be harmless.
+ */
+
+static ssize_t aufs_read(struct file *file, char __user *buf, size_t count,
+			 loff_t *ppos)
+{
+	ssize_t err;
+	struct dentry *dentry;
+	struct file *h_file;
+	struct super_block *sb;
+
+	dentry = file->f_dentry;
+	sb = dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+	if (unlikely(err))
+		goto out;
+
+	h_file = au_hf_top(file);
+	get_file(h_file);
+	di_read_unlock(dentry, AuLock_IR);
+	fi_read_unlock(file);
+
+	/* filedata may be obsoleted by concurrent copyup, but no problem */
+	err = vfsub_read_u(h_file, buf, count, ppos);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+	/* update without lock, I don't think it a problem */
+	fsstack_copy_attr_atime(dentry->d_inode, file_inode(h_file));
+	fput(h_file);
+
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+/*
+ * todo: very ugly
+ * it locks both of i_mutex and si_rwsem for read in safe.
+ * if the plink maintenance mode continues forever (that is the problem),
+ * may loop forever.
+ */
+static void au_mtx_and_read_lock(struct inode *inode)
+{
+	int err;
+	struct super_block *sb = inode->i_sb;
+
+	while (1) {
+		mutex_lock(&inode->i_mutex);
+		err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+		if (!err)
+			break;
+		mutex_unlock(&inode->i_mutex);
+		si_read_lock(sb, AuLock_NOPLMW);
+		si_read_unlock(sb);
+	}
+}
+
+static ssize_t aufs_write(struct file *file, const char __user *ubuf,
+			  size_t count, loff_t *ppos)
+{
+	ssize_t err;
+	struct au_pin pin;
+	struct dentry *dentry;
+	struct super_block *sb;
+	struct inode *inode;
+	struct file *h_file;
+	char __user *buf = (char __user *)ubuf;
+
+	dentry = file->f_dentry;
+	sb = dentry->d_sb;
+	inode = dentry->d_inode;
+	au_mtx_and_read_lock(inode);
+
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+	if (unlikely(err))
+		goto out;
+
+	err = au_ready_to_write(file, -1, &pin);
+	di_downgrade_lock(dentry, AuLock_IR);
+	if (unlikely(err)) {
+		di_read_unlock(dentry, AuLock_IR);
+		fi_write_unlock(file);
+		goto out;
+	}
+
+	h_file = au_hf_top(file);
+	get_file(h_file);
+	au_unpin(&pin);
+	di_read_unlock(dentry, AuLock_IR);
+	fi_write_unlock(file);
+
+	err = vfsub_write_u(h_file, buf, count, ppos);
+	ii_write_lock_child(inode);
+	au_cpup_attr_timesizes(inode);
+	inode->i_mode = file_inode(h_file)->i_mode;
+	ii_write_unlock(inode);
+	fput(h_file);
+
+out:
+	si_read_unlock(sb);
+	mutex_unlock(&inode->i_mutex);
+	return err;
+}
+
+static ssize_t au_do_aio(struct file *h_file, int rw, struct kiocb *kio,
+			 const struct iovec *iov, unsigned long nv, loff_t pos)
+{
+	ssize_t err;
+	struct file *file;
+	ssize_t (*func)(struct kiocb *, const struct iovec *, unsigned long,
+			loff_t);
+
+	err = security_file_permission(h_file, rw);
+	if (unlikely(err))
+		goto out;
+
+	err = -ENOSYS;
+	func = NULL;
+	if (rw == MAY_READ)
+		func = h_file->f_op->aio_read;
+	else if (rw == MAY_WRITE)
+		func = h_file->f_op->aio_write;
+	if (func) {
+		file = kio->ki_filp;
+		kio->ki_filp = h_file;
+		lockdep_off();
+		err = func(kio, iov, nv, pos);
+		lockdep_on();
+		kio->ki_filp = file;
+	} else
+		/* currently there is no such fs */
+		WARN_ON_ONCE(1);
+
+out:
+	return err;
+}
+
+static ssize_t aufs_aio_read(struct kiocb *kio, const struct iovec *iov,
+			     unsigned long nv, loff_t pos)
+{
+	ssize_t err;
+	struct file *file, *h_file;
+	struct dentry *dentry;
+	struct super_block *sb;
+
+	file = kio->ki_filp;
+	dentry = file->f_dentry;
+	sb = dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+	if (unlikely(err))
+		goto out;
+
+	h_file = au_hf_top(file);
+	get_file(h_file);
+	di_read_unlock(dentry, AuLock_IR);
+	fi_read_unlock(file);
+
+	err = au_do_aio(h_file, MAY_READ, kio, iov, nv, pos);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+	/* update without lock, I don't think it a problem */
+	fsstack_copy_attr_atime(dentry->d_inode, file_inode(h_file));
+	fput(h_file);
+
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+static ssize_t aufs_aio_write(struct kiocb *kio, const struct iovec *iov,
+			      unsigned long nv, loff_t pos)
+{
+	ssize_t err;
+	struct au_pin pin;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct file *file, *h_file;
+	struct super_block *sb;
+
+	file = kio->ki_filp;
+	dentry = file->f_dentry;
+	sb = dentry->d_sb;
+	inode = dentry->d_inode;
+	au_mtx_and_read_lock(inode);
+
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+	if (unlikely(err))
+		goto out;
+
+	err = au_ready_to_write(file, -1, &pin);
+	di_downgrade_lock(dentry, AuLock_IR);
+	if (unlikely(err)) {
+		di_read_unlock(dentry, AuLock_IR);
+		fi_write_unlock(file);
+		goto out;
+	}
+
+	h_file = au_hf_top(file);
+	get_file(h_file);
+	au_unpin(&pin);
+	di_read_unlock(dentry, AuLock_IR);
+	fi_write_unlock(file);
+
+	err = au_do_aio(h_file, MAY_WRITE, kio, iov, nv, pos);
+	ii_write_lock_child(inode);
+	au_cpup_attr_timesizes(inode);
+	inode->i_mode = file_inode(h_file)->i_mode;
+	ii_write_unlock(inode);
+	fput(h_file);
+
+out:
+	si_read_unlock(sb);
+	mutex_unlock(&inode->i_mutex);
+	return err;
+}
+
+static ssize_t aufs_splice_read(struct file *file, loff_t *ppos,
+				struct pipe_inode_info *pipe, size_t len,
+				unsigned int flags)
+{
+	ssize_t err;
+	struct file *h_file;
+	struct dentry *dentry;
+	struct super_block *sb;
+
+	dentry = file->f_dentry;
+	sb = dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+	if (unlikely(err))
+		goto out;
+
+	err = -EINVAL;
+	h_file = au_hf_top(file);
+	get_file(h_file);
+	if (au_test_loopback_kthread()) {
+		au_warn_loopback(h_file->f_dentry->d_sb);
+		if (file->f_mapping != h_file->f_mapping) {
+			file->f_mapping = h_file->f_mapping;
+			smp_mb(); /* unnecessary? */
+		}
+	}
+	di_read_unlock(dentry, AuLock_IR);
+	fi_read_unlock(file);
+
+	err = vfsub_splice_to(h_file, ppos, pipe, len, flags);
+	/* todo: necessasry? */
+	/* file->f_ra = h_file->f_ra; */
+	/* update without lock, I don't think it a problem */
+	fsstack_copy_attr_atime(dentry->d_inode, file_inode(h_file));
+	fput(h_file);
+
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+static ssize_t
+aufs_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos,
+		  size_t len, unsigned int flags)
+{
+	ssize_t err;
+	struct au_pin pin;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct file *h_file;
+	struct super_block *sb;
+
+	dentry = file->f_dentry;
+	sb = dentry->d_sb;
+	inode = dentry->d_inode;
+	au_mtx_and_read_lock(inode);
+
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+	if (unlikely(err))
+		goto out;
+
+	err = au_ready_to_write(file, -1, &pin);
+	di_downgrade_lock(dentry, AuLock_IR);
+	if (unlikely(err)) {
+		di_read_unlock(dentry, AuLock_IR);
+		fi_write_unlock(file);
+		goto out;
+	}
+
+	h_file = au_hf_top(file);
+	get_file(h_file);
+	au_unpin(&pin);
+	di_read_unlock(dentry, AuLock_IR);
+	fi_write_unlock(file);
+
+	err = vfsub_splice_from(pipe, h_file, ppos, len, flags);
+	ii_write_lock_child(inode);
+	au_cpup_attr_timesizes(inode);
+	inode->i_mode = file_inode(h_file)->i_mode;
+	ii_write_unlock(inode);
+	fput(h_file);
+
+out:
+	si_read_unlock(sb);
+	mutex_unlock(&inode->i_mutex);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * The locking order around current->mmap_sem.
+ * - in most and regular cases
+ *   file I/O syscall -- aufs_read() or something
+ *	-- si_rwsem for read -- mmap_sem
+ *	(Note that [fdi]i_rwsem are released before mmap_sem).
+ * - in mmap case
+ *   mmap(2) -- mmap_sem -- aufs_mmap() -- si_rwsem for read -- [fdi]i_rwsem
+ * This AB-BA order is definitly bad, but is not a problem since "si_rwsem for
+ * read" allows muliple processes to acquire it and [fdi]i_rwsem are not held in
+ * file I/O. Aufs needs to stop lockdep in aufs_mmap() though.
+ * It means that when aufs acquires si_rwsem for write, the process should never
+ * acquire mmap_sem.
+ *
+ * Actually aufs_readdir() holds [fdi]i_rwsem before mmap_sem, but this is not a
+ * problem either since any directory is not able to be mmap-ed.
+ * The similar scenario is applied to aufs_readlink() too.
+ */
+
+/* cf. linux/include/linux/mman.h: calc_vm_prot_bits() */
+#define AuConv_VM_PROT(f, b)	_calc_vm_trans(f, VM_##b, PROT_##b)
+
+static unsigned long au_arch_prot_conv(unsigned long flags)
+{
+	/* currently ppc64 only */
+#ifdef CONFIG_PPC64
+	/* cf. linux/arch/powerpc/include/asm/mman.h */
+	AuDebugOn(arch_calc_vm_prot_bits(-1) != VM_SAO);
+	return AuConv_VM_PROT(flags, SAO);
+#else
+	AuDebugOn(arch_calc_vm_prot_bits(-1));
+	return 0;
+#endif
+}
+
+static unsigned long au_prot_conv(unsigned long flags)
+{
+	return AuConv_VM_PROT(flags, READ)
+		| AuConv_VM_PROT(flags, WRITE)
+		| AuConv_VM_PROT(flags, EXEC)
+		| au_arch_prot_conv(flags);
+}
+
+/* cf. linux/include/linux/mman.h: calc_vm_flag_bits() */
+#define AuConv_VM_MAP(f, b)	_calc_vm_trans(f, VM_##b, MAP_##b)
+
+static unsigned long au_flag_conv(unsigned long flags)
+{
+	return AuConv_VM_MAP(flags, GROWSDOWN)
+		| AuConv_VM_MAP(flags, DENYWRITE)
+		| AuConv_VM_MAP(flags, LOCKED);
+}
+
+static int aufs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int err;
+	aufs_bindex_t bstart;
+	const unsigned char wlock
+		= (file->f_mode & FMODE_WRITE) && (vma->vm_flags & VM_SHARED);
+	struct dentry *dentry;
+	struct super_block *sb;
+	struct file *h_file;
+	struct au_branch *br;
+	struct au_pin pin;
+
+	AuDbgVmRegion(file, vma);
+
+	dentry = file->f_dentry;
+	sb = dentry->d_sb;
+	lockdep_off();
+	si_read_lock(sb, AuLock_NOPLMW);
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+	if (unlikely(err))
+		goto out;
+
+	if (wlock) {
+		err = au_ready_to_write(file, -1, &pin);
+		di_write_unlock(dentry);
+		if (unlikely(err)) {
+			fi_write_unlock(file);
+			goto out;
+		}
+		au_unpin(&pin);
+	} else
+		di_write_unlock(dentry);
+
+	bstart = au_fbstart(file);
+	br = au_sbr(sb, bstart);
+	h_file = au_hf_top(file);
+	get_file(h_file);
+	au_set_mmapped(file);
+	fi_write_unlock(file);
+	lockdep_on();
+
+	au_vm_file_reset(vma, h_file);
+	err = security_mmap_file(h_file, au_prot_conv(vma->vm_flags),
+				 au_flag_conv(vma->vm_flags));
+	if (!err)
+		err = h_file->f_op->mmap(h_file, vma);
+	if (unlikely(err))
+		goto out_reset;
+
+	au_vm_prfile_set(vma, file);
+	/* update without lock, I don't think it a problem */
+	fsstack_copy_attr_atime(file_inode(file), file_inode(h_file));
+	goto out_fput; /* success */
+
+out_reset:
+	au_unset_mmapped(file);
+	au_vm_file_reset(vma, file);
+out_fput:
+	fput(h_file);
+	lockdep_off();
+out:
+	si_read_unlock(sb);
+	lockdep_on();
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_fsync_nondir(struct file *file, loff_t start, loff_t end,
+			     int datasync)
+{
+	int err;
+	struct au_pin pin;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct file *h_file;
+	struct super_block *sb;
+
+	dentry = file->f_dentry;
+	inode = dentry->d_inode;
+	sb = dentry->d_sb;
+	mutex_lock(&inode->i_mutex);
+	err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out;
+
+	err = 0; /* -EBADF; */ /* posix? */
+	if (unlikely(!(file->f_mode & FMODE_WRITE)))
+		goto out_si;
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+	if (unlikely(err))
+		goto out_si;
+
+	err = au_ready_to_write(file, -1, &pin);
+	di_downgrade_lock(dentry, AuLock_IR);
+	if (unlikely(err))
+		goto out_unlock;
+	au_unpin(&pin);
+
+	err = -EINVAL;
+	h_file = au_hf_top(file);
+	err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+	au_cpup_attr_timesizes(inode);
+
+out_unlock:
+	di_read_unlock(dentry, AuLock_IR);
+	fi_write_unlock(file);
+out_si:
+	si_read_unlock(sb);
+out:
+	mutex_unlock(&inode->i_mutex);
+	return err;
+}
+
+/* no one supports this operation, currently */
+#if 0
+static int aufs_aio_fsync_nondir(struct kiocb *kio, int datasync)
+{
+	int err;
+	struct au_pin pin;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct file *file, *h_file;
+
+	file = kio->ki_filp;
+	dentry = file->f_dentry;
+	inode = dentry->d_inode;
+	au_mtx_and_read_lock(inode);
+
+	err = 0; /* -EBADF; */ /* posix? */
+	if (unlikely(!(file->f_mode & FMODE_WRITE)))
+		goto out;
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+	if (unlikely(err))
+		goto out;
+
+	err = au_ready_to_write(file, -1, &pin);
+	di_downgrade_lock(dentry, AuLock_IR);
+	if (unlikely(err))
+		goto out_unlock;
+	au_unpin(&pin);
+
+	err = -ENOSYS;
+	h_file = au_hf_top(file);
+	if (h_file->f_op && h_file->f_op->aio_fsync) {
+		struct mutex *h_mtx;
+
+		h_mtx = &file_inode(h_file)->i_mutex;
+		if (!is_sync_kiocb(kio)) {
+			get_file(h_file);
+			fput(file);
+		}
+		kio->ki_filp = h_file;
+		err = h_file->f_op->aio_fsync(kio, datasync);
+		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+		if (!err)
+			vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL);
+		/*ignore*/
+		au_cpup_attr_timesizes(inode);
+		mutex_unlock(h_mtx);
+	}
+
+out_unlock:
+	di_read_unlock(dentry, AuLock_IR);
+	fi_write_unlock(file);
+out:
+	si_read_unlock(inode->sb);
+	mutex_unlock(&inode->i_mutex);
+	return err;
+}
+#endif
+
+static int aufs_fasync(int fd, struct file *file, int flag)
+{
+	int err;
+	struct file *h_file;
+	struct dentry *dentry;
+	struct super_block *sb;
+
+	dentry = file->f_dentry;
+	sb = dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+	if (unlikely(err))
+		goto out;
+
+	h_file = au_hf_top(file);
+	if (h_file->f_op && h_file->f_op->fasync)
+		err = h_file->f_op->fasync(fd, h_file, flag);
+
+	di_read_unlock(dentry, AuLock_IR);
+	fi_read_unlock(file);
+
+out:
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* no one supports this operation, currently */
+#if 0
+static ssize_t aufs_sendpage(struct file *file, struct page *page, int offset,
+			     size_t len, loff_t *pos , int more)
+{
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_file_fop = {
+	.owner		= THIS_MODULE,
+
+	.llseek		= default_llseek,
+
+	.read		= aufs_read,
+	.write		= aufs_write,
+	.aio_read	= aufs_aio_read,
+	.aio_write	= aufs_aio_write,
+#ifdef CONFIG_AUFS_POLL
+	.poll		= aufs_poll,
+#endif
+	.unlocked_ioctl	= aufs_ioctl_nondir,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= aufs_compat_ioctl_nondir,
+#endif
+	.mmap		= aufs_mmap,
+	.open		= aufs_open_nondir,
+	.flush		= aufs_flush_nondir,
+	.release	= aufs_release_nondir,
+	.fsync		= aufs_fsync_nondir,
+	/* .aio_fsync	= aufs_aio_fsync_nondir, */
+	.fasync		= aufs_fasync,
+	/* .sendpage	= aufs_sendpage, */
+	.splice_write	= aufs_splice_write,
+	.splice_read	= aufs_splice_read,
+#if 0
+	.aio_splice_write = aufs_aio_splice_write,
+	.aio_splice_read  = aufs_aio_splice_read
+#endif
+};
diff --git a/fs/aufs/f_op_sp.c b/fs/aufs/f_op_sp.c
new file mode 100644
index 0000000..1753ea1
--- /dev/null
+++ b/fs/aufs/f_op_sp.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * file operations for special files.
+ * while they exist in aufs virtually,
+ * their file I/O is handled out of aufs.
+ */
+
+#include <linux/aio.h>
+#include "aufs.h"
+
+/*
+ * I don't think the size of this list grows much.
+ * so here is a very simple list implemented in order to find finfo matching a
+ * given file.
+ */
+static struct au_sphlhead au_finfo_sp = {
+	.spin	= __SPIN_LOCK_INITIALIZER(au_finfo_sp.spin),
+	.head	= HLIST_HEAD_INIT
+};
+
+struct au_finfo_sp {
+	struct hlist_node	hlist;
+	struct file		*file;
+	struct au_finfo		*finfo;
+};
+
+struct au_finfo *au_fi_sp(struct file *file)
+{
+	struct au_finfo *finfo;
+	struct au_finfo_sp *sp;
+
+	finfo = NULL;
+	spin_lock(&au_finfo_sp.spin);
+	hlist_for_each_entry(sp, &au_finfo_sp.head, hlist) {
+		if (sp->file != file)
+			continue;
+		finfo = sp->finfo;
+		break;
+	}
+	spin_unlock(&au_finfo_sp.spin);
+
+	return finfo;
+}
+
+static int au_fi_sp_add(struct file *file)
+{
+	int err;
+	struct au_finfo_sp *sp;
+
+	err = -ENOMEM;
+	sp = kmalloc(sizeof(*sp), GFP_NOFS);
+	if (sp) {
+		err = 0;
+		sp->file = file;
+		sp->finfo = file->private_data;
+		spin_lock(&au_finfo_sp.spin);
+		hlist_add_head(&sp->hlist, &au_finfo_sp.head);
+		spin_unlock(&au_finfo_sp.spin);
+	}
+	return err;
+}
+
+static void au_fi_sp_del(struct file *file)
+{
+	struct au_finfo_sp *sp, *do_free;
+
+	do_free = NULL;
+	spin_lock(&au_finfo_sp.spin);
+	hlist_for_each_entry(sp, &au_finfo_sp.head, hlist) {
+		if (sp->file != file)
+			continue;
+		hlist_del(&sp->hlist);
+		do_free = sp;
+		break;
+	}
+	spin_unlock(&au_finfo_sp.spin);
+	kfree(do_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static ssize_t aufs_aio_read_sp(struct kiocb *kio, const struct iovec *iov,
+				unsigned long nv, loff_t pos)
+{
+	ssize_t err;
+	aufs_bindex_t bstart;
+	unsigned char wbr;
+	struct file *file, *h_file;
+	struct super_block *sb;
+
+	file = kio->ki_filp;
+	sb = file->f_dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	fi_read_lock(file);
+	bstart = au_fbstart(file);
+	h_file = au_hf_top(file);
+	fi_read_unlock(file);
+	wbr = !!au_br_writable(au_sbr(sb, bstart)->br_perm);
+	si_read_unlock(sb);
+
+	/* do not change the file in kio */
+	AuDebugOn(!h_file->f_op || !h_file->f_op->aio_read);
+	err = h_file->f_op->aio_read(kio, iov, nv, pos);
+	if (err > 0 && wbr)
+		file_accessed(h_file);
+
+	return err;
+}
+
+static ssize_t aufs_aio_write_sp(struct kiocb *kio, const struct iovec *iov,
+				 unsigned long nv, loff_t pos)
+{
+	ssize_t err;
+	aufs_bindex_t bstart;
+	unsigned char wbr;
+	struct super_block *sb;
+	struct file *file, *h_file;
+
+	file = kio->ki_filp;
+	sb = file->f_dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	fi_read_lock(file);
+	bstart = au_fbstart(file);
+	h_file = au_hf_top(file);
+	fi_read_unlock(file);
+	wbr = !!au_br_writable(au_sbr(sb, bstart)->br_perm);
+	si_read_unlock(sb);
+
+	/* do not change the file in kio */
+	AuDebugOn(!h_file->f_op || !h_file->f_op->aio_write);
+	err = h_file->f_op->aio_write(kio, iov, nv, pos);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_release_sp(struct inode *inode, struct file *file)
+{
+	int err;
+	struct file *h_file;
+
+	fi_read_lock(file);
+	h_file = au_hf_top(file);
+	fi_read_unlock(file);
+	/* close this fifo in aufs */
+	err = h_file->f_op->release(inode, file); /* ignore */
+	aufs_release_nondir(inode, file); /* ignore */
+	au_fi_sp_del(file);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* currently, support only FIFO */
+enum {
+	AuSp_FIFO, AuSp_FIFO_R, AuSp_FIFO_W, AuSp_FIFO_RW,
+	/* AuSp_SOCK, AuSp_CHR, AuSp_BLK, */
+	AuSp_Last
+};
+static int aufs_open_sp(struct inode *inode, struct file *file);
+static struct au_sp_fop {
+	int			done;
+	struct file_operations	fop;	/* not 'const' */
+	spinlock_t		spin;
+} au_sp_fop[AuSp_Last] = {
+	[AuSp_FIFO] = {
+		.fop	= {
+			.owner	= THIS_MODULE,
+			.open	= aufs_open_sp
+		}
+	}
+};
+
+static void au_init_fop_sp(struct file *file)
+{
+	struct au_sp_fop *p;
+	int i;
+	struct file *h_file;
+
+	p = au_sp_fop;
+	if (unlikely(!p->done)) {
+		/* initialize first time only */
+		static DEFINE_SPINLOCK(spin);
+
+		spin_lock(&spin);
+		if (!p->done) {
+			BUILD_BUG_ON(sizeof(au_sp_fop)/sizeof(*au_sp_fop)
+				     != AuSp_Last);
+			for (i = 0; i < AuSp_Last; i++)
+				spin_lock_init(&p[i].spin);
+			p->done = 1;
+		}
+		spin_unlock(&spin);
+	}
+
+	switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
+	case FMODE_READ:
+		i = AuSp_FIFO_R;
+		break;
+	case FMODE_WRITE:
+		i = AuSp_FIFO_W;
+		break;
+	case FMODE_READ | FMODE_WRITE:
+		i = AuSp_FIFO_RW;
+		break;
+	default:
+		BUG();
+	}
+
+	p += i;
+	if (unlikely(!p->done)) {
+		/* initialize first time only */
+		h_file = au_hf_top(file);
+		spin_lock(&p->spin);
+		if (!p->done) {
+			p->fop = *h_file->f_op;
+			p->fop.owner = THIS_MODULE;
+			if (p->fop.aio_read)
+				p->fop.aio_read = aufs_aio_read_sp;
+			if (p->fop.aio_write)
+				p->fop.aio_write = aufs_aio_write_sp;
+			p->fop.release = aufs_release_sp;
+			p->done = 1;
+		}
+		spin_unlock(&p->spin);
+	}
+	file->f_op = &p->fop;
+}
+
+static int au_cpup_sp(struct dentry *dentry)
+{
+	int err;
+	struct au_pin pin;
+	struct au_wr_dir_args wr_dir_args = {
+		.force_btgt	= -1,
+		.flags		= 0
+	};
+	struct au_cp_generic cpg = {
+		.dentry	= dentry,
+		.bdst	= -1,
+		.bsrc	= -1,
+		.len	= -1,
+		.pin	= &pin,
+		.flags	= AuCpup_DTIME
+	};
+
+	AuDbg("%.*s\n", AuDLNPair(dentry));
+
+	di_read_unlock(dentry, AuLock_IR);
+	di_write_lock_child(dentry);
+	err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+	if (unlikely(err < 0))
+		goto out;
+	cpg.bdst = err;
+	err = 0;
+	if (cpg.bdst == au_dbstart(dentry))
+		goto out; /* success */
+
+	err = au_pin(&pin, dentry, cpg.bdst, au_opt_udba(dentry->d_sb),
+		     AuPin_MNT_WRITE);
+	if (!err) {
+		err = au_sio_cpup_simple(&cpg);
+		au_unpin(&pin);
+	}
+
+out:
+	di_downgrade_lock(dentry, AuLock_IR);
+	return err;
+}
+
+static int au_do_open_sp(struct file *file, int flags)
+{
+	int err;
+	struct dentry *dentry;
+	struct super_block *sb;
+	struct file *h_file;
+	struct inode *h_inode;
+
+	err = au_fi_sp_add(file);
+	if (unlikely(err))
+		goto out;
+
+	dentry = file->f_dentry;
+	AuDbg("%.*s\n", AuDLNPair(dentry));
+
+	/*
+	 * try copying-up.
+	 * operate on the ro branch is not an error.
+	 */
+	au_cpup_sp(dentry); /* ignore */
+
+	/* prepare h_file */
+	err = au_do_open_nondir(file, vfsub_file_flags(file));
+	if (unlikely(err))
+		goto out_del;
+
+	sb = dentry->d_sb;
+	h_file = au_hf_top(file);
+	h_inode = file_inode(h_file);
+	di_read_unlock(dentry, AuLock_IR);
+	fi_write_unlock(file);
+	si_read_unlock(sb);
+	/* open this fifo in aufs */
+	err = h_inode->i_fop->open(file_inode(file), file);
+	si_noflush_read_lock(sb);
+	fi_write_lock(file);
+	di_read_lock_child(dentry, AuLock_IR);
+	if (!err) {
+		au_init_fop_sp(file);
+		goto out; /* success */
+	}
+
+out_del:
+	au_fi_sp_del(file);
+out:
+	return err;
+}
+
+static int aufs_open_sp(struct inode *inode, struct file *file)
+{
+	int err;
+	struct super_block *sb;
+
+	sb = file->f_dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	err = au_do_open(file, au_do_open_sp, /*fidir*/NULL);
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_init_special_fop(struct inode *inode, umode_t mode, dev_t rdev)
+{
+	init_special_inode(inode, mode, rdev);
+
+	switch (mode & S_IFMT) {
+	case S_IFIFO:
+		inode->i_fop = &au_sp_fop[AuSp_FIFO].fop;
+		/*FALLTHROUGH*/
+	case S_IFCHR:
+	case S_IFBLK:
+	case S_IFSOCK:
+		break;
+	default:
+		AuDebugOn(1);
+	}
+}
+
+int au_special_file(umode_t mode)
+{
+	int ret;
+
+	ret = 0;
+	switch (mode & S_IFMT) {
+	case S_IFIFO:
+#if 0
+	case S_IFCHR:
+	case S_IFBLK:
+	case S_IFSOCK:
+#endif
+		ret = 1;
+	}
+
+	return ret;
+}
diff --git a/fs/aufs/file.c b/fs/aufs/file.c
new file mode 100644
index 0000000..32b735d
--- /dev/null
+++ b/fs/aufs/file.c
@@ -0,0 +1,708 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * handling file/dir, and address_space operation
+ */
+
+#ifdef CONFIG_AUFS_DEBUG
+#include <linux/migrate.h>
+#endif
+#include <linux/pagemap.h>
+#include "aufs.h"
+
+/* drop flags for writing */
+unsigned int au_file_roflags(unsigned int flags)
+{
+	flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC);
+	flags |= O_RDONLY | O_NOATIME;
+	return flags;
+}
+
+/* common functions to regular file and dir */
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+		       struct file *file)
+{
+	struct file *h_file;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct path h_path;
+	int err, exec_flag;
+
+	/* a race condition can happen between open and unlink/rmdir */
+	h_file = ERR_PTR(-ENOENT);
+	h_dentry = au_h_dptr(dentry, bindex);
+	if (au_test_nfsd() && !h_dentry)
+		goto out;
+	h_inode = h_dentry->d_inode;
+	if (au_test_nfsd() && !h_inode)
+		goto out;
+	spin_lock(&h_dentry->d_lock);
+	err = (!d_unhashed(dentry) && d_unlinked(h_dentry))
+		|| !h_inode
+		/* || !dentry->d_inode->i_nlink */
+		;
+	spin_unlock(&h_dentry->d_lock);
+	if (unlikely(err))
+		goto out;
+
+	sb = dentry->d_sb;
+	br = au_sbr(sb, bindex);
+	h_file = ERR_PTR(-EACCES);
+	exec_flag = flags & __FMODE_EXEC;
+	if (exec_flag && (au_br_mnt(br)->mnt_flags & MNT_NOEXEC))
+		goto out;
+
+	/* drop flags for writing */
+	if (au_test_ro(sb, bindex, dentry->d_inode))
+		flags = au_file_roflags(flags);
+	flags &= ~O_CREAT;
+	atomic_inc(&br->br_count);
+	h_path.dentry = h_dentry;
+	h_path.mnt = au_br_mnt(br);
+	if (!au_special_file(h_inode->i_mode))
+		h_file = vfsub_dentry_open(&h_path, flags);
+	else {
+		/* this block depends upon the configuration */
+		di_read_unlock(dentry, AuLock_IR);
+		fi_write_unlock(file);
+		si_read_unlock(sb);
+		h_file = vfsub_dentry_open(&h_path, flags);
+		si_noflush_read_lock(sb);
+		fi_write_lock(file);
+		di_read_lock_child(dentry, AuLock_IR);
+	}
+	if (IS_ERR(h_file))
+		goto out_br;
+
+	if (exec_flag) {
+		err = deny_write_access(h_file);
+		if (unlikely(err)) {
+			fput(h_file);
+			h_file = ERR_PTR(err);
+			goto out_br;
+		}
+	}
+	fsnotify_open(h_file);
+	goto out; /* success */
+
+out_br:
+	atomic_dec(&br->br_count);
+out:
+	return h_file;
+}
+
+int au_do_open(struct file *file, int (*open)(struct file *file, int flags),
+	       struct au_fidir *fidir)
+{
+	int err;
+	struct dentry *dentry;
+
+	err = au_finfo_init(file, fidir);
+	if (unlikely(err))
+		goto out;
+
+	dentry = file->f_dentry;
+	di_read_lock_child(dentry, AuLock_IR);
+	err = open(file, vfsub_file_flags(file));
+	di_read_unlock(dentry, AuLock_IR);
+
+	fi_write_unlock(file);
+	if (unlikely(err)) {
+		au_fi(file)->fi_hdir = NULL;
+		au_finfo_fin(file);
+	}
+
+out:
+	return err;
+}
+
+int au_reopen_nondir(struct file *file)
+{
+	int err;
+	aufs_bindex_t bstart;
+	struct dentry *dentry;
+	struct file *h_file, *h_file_tmp;
+
+	dentry = file->f_dentry;
+	AuDebugOn(au_special_file(dentry->d_inode->i_mode));
+	bstart = au_dbstart(dentry);
+	h_file_tmp = NULL;
+	if (au_fbstart(file) == bstart) {
+		h_file = au_hf_top(file);
+		if (file->f_mode == h_file->f_mode)
+			return 0; /* success */
+		h_file_tmp = h_file;
+		get_file(h_file_tmp);
+		au_set_h_fptr(file, bstart, NULL);
+	}
+	AuDebugOn(au_fi(file)->fi_hdir);
+	/*
+	 * it can happen
+	 * file exists on both of rw and ro
+	 * open --> dbstart and fbstart are both 0
+	 * prepend a branch as rw, "rw" become ro
+	 * remove rw/file
+	 * delete the top branch, "rw" becomes rw again
+	 *	--> dbstart is 1, fbstart is still 0
+	 * write --> fbstart is 0 but dbstart is 1
+	 */
+	/* AuDebugOn(au_fbstart(file) < bstart); */
+
+	h_file = au_h_open(dentry, bstart, vfsub_file_flags(file) & ~O_TRUNC,
+			   file);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file)) {
+		if (h_file_tmp) {
+			atomic_inc(&au_sbr(dentry->d_sb, bstart)->br_count);
+			au_set_h_fptr(file, bstart, h_file_tmp);
+			h_file_tmp = NULL;
+		}
+		goto out; /* todo: close all? */
+	}
+
+	err = 0;
+	au_set_fbstart(file, bstart);
+	au_set_h_fptr(file, bstart, h_file);
+	au_update_figen(file);
+	/* todo: necessary? */
+	/* file->f_ra = h_file->f_ra; */
+
+out:
+	if (h_file_tmp)
+		fput(h_file_tmp);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_reopen_wh(struct file *file, aufs_bindex_t btgt,
+			struct dentry *hi_wh)
+{
+	int err;
+	aufs_bindex_t bstart;
+	struct au_dinfo *dinfo;
+	struct dentry *h_dentry;
+	struct au_hdentry *hdp;
+
+	dinfo = au_di(file->f_dentry);
+	AuRwMustWriteLock(&dinfo->di_rwsem);
+
+	bstart = dinfo->di_bstart;
+	dinfo->di_bstart = btgt;
+	hdp = dinfo->di_hdentry;
+	h_dentry = hdp[0 + btgt].hd_dentry;
+	hdp[0 + btgt].hd_dentry = hi_wh;
+	err = au_reopen_nondir(file);
+	hdp[0 + btgt].hd_dentry = h_dentry;
+	dinfo->di_bstart = bstart;
+
+	return err;
+}
+
+static int au_ready_to_write_wh(struct file *file, loff_t len,
+				aufs_bindex_t bcpup, struct au_pin *pin)
+{
+	int err;
+	struct inode *inode, *h_inode;
+	struct dentry *h_dentry, *hi_wh;
+	struct au_cp_generic cpg = {
+		.dentry	= file->f_dentry,
+		.bdst	= bcpup,
+		.bsrc	= -1,
+		.len	= len,
+		.pin	= pin
+	};
+
+	au_update_dbstart(cpg.dentry);
+	inode = cpg.dentry->d_inode;
+	h_inode = NULL;
+	if (au_dbstart(cpg.dentry) <= bcpup
+	    && au_dbend(cpg.dentry) >= bcpup) {
+		h_dentry = au_h_dptr(cpg.dentry, bcpup);
+		if (h_dentry)
+			h_inode = h_dentry->d_inode;
+	}
+	hi_wh = au_hi_wh(inode, bcpup);
+	if (!hi_wh && !h_inode)
+		err = au_sio_cpup_wh(&cpg, file);
+	else
+		/* already copied-up after unlink */
+		err = au_reopen_wh(file, bcpup, hi_wh);
+
+	if (!err
+	    && inode->i_nlink > 1
+	    && au_opt_test(au_mntflags(cpg.dentry->d_sb), PLINK))
+		au_plink_append(inode, bcpup, au_h_dptr(cpg.dentry, bcpup));
+
+	return err;
+}
+
+/*
+ * prepare the @file for writing.
+ */
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin)
+{
+	int err;
+	aufs_bindex_t dbstart;
+	struct dentry *parent, *h_dentry;
+	struct inode *inode;
+	struct super_block *sb;
+	struct file *h_file;
+	struct au_cp_generic cpg = {
+		.dentry	= file->f_dentry,
+		.bdst	= -1,
+		.bsrc	= -1,
+		.len	= len,
+		.pin	= pin,
+		.flags	= AuCpup_DTIME
+	};
+
+	sb = cpg.dentry->d_sb;
+	inode = cpg.dentry->d_inode;
+	AuDebugOn(au_special_file(inode->i_mode));
+	cpg.bsrc = au_fbstart(file);
+	err = au_test_ro(sb, cpg.bsrc, inode);
+	if (!err && (au_hf_top(file)->f_mode & FMODE_WRITE)) {
+		err = au_pin(pin, cpg.dentry, cpg.bsrc, AuOpt_UDBA_NONE,
+			     /*flags*/0);
+		goto out;
+	}
+
+	/* need to cpup or reopen */
+	parent = dget_parent(cpg.dentry);
+	di_write_lock_parent(parent);
+	err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+	cpg.bdst = err;
+	if (unlikely(err < 0))
+		goto out_dgrade;
+	err = 0;
+
+	if (!d_unhashed(cpg.dentry) && !au_h_dptr(parent, cpg.bdst)) {
+		err = au_cpup_dirs(cpg.dentry, cpg.bdst);
+		if (unlikely(err))
+			goto out_dgrade;
+	}
+
+	err = au_pin(pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	if (unlikely(err))
+		goto out_dgrade;
+
+	h_dentry = au_hf_top(file)->f_dentry;
+	dbstart = au_dbstart(cpg.dentry);
+	if (dbstart <= cpg.bdst) {
+		h_dentry = au_h_dptr(cpg.dentry, cpg.bdst);
+		AuDebugOn(!h_dentry);
+		cpg.bsrc = cpg.bdst;
+	}
+
+	if (dbstart <= cpg.bdst		/* just reopen */
+	    || !d_unhashed(cpg.dentry)	/* copyup and reopen */
+		) {
+		h_file = au_h_open_pre(cpg.dentry, cpg.bsrc);
+		if (IS_ERR(h_file))
+			err = PTR_ERR(h_file);
+		else {
+			di_downgrade_lock(parent, AuLock_IR);
+			if (dbstart > cpg.bdst)
+				err = au_sio_cpup_simple(&cpg);
+			if (!err)
+				err = au_reopen_nondir(file);
+			au_h_open_post(cpg.dentry, cpg.bsrc, h_file);
+		}
+	} else {			/* copyup as wh and reopen */
+		/*
+		 * since writable hfsplus branch is not supported,
+		 * h_open_pre/post() are unnecessary.
+		 */
+		err = au_ready_to_write_wh(file, len, cpg.bdst, pin);
+		di_downgrade_lock(parent, AuLock_IR);
+	}
+
+	if (!err) {
+		au_pin_set_parent_lflag(pin, /*lflag*/0);
+		goto out_dput; /* success */
+	}
+	au_unpin(pin);
+	goto out_unlock;
+
+out_dgrade:
+	di_downgrade_lock(parent, AuLock_IR);
+out_unlock:
+	di_read_unlock(parent, AuLock_IR);
+out_dput:
+	dput(parent);
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_do_flush(struct file *file, fl_owner_t id,
+		int (*flush)(struct file *file, fl_owner_t id))
+{
+	int err;
+	struct super_block *sb;
+	struct inode *inode;
+
+	inode = file_inode(file);
+	sb = inode->i_sb;
+	si_noflush_read_lock(sb);
+	fi_read_lock(file);
+	ii_read_lock_child(inode);
+
+	err = flush(file, id);
+	au_cpup_attr_timesizes(inode);
+
+	ii_read_unlock(inode);
+	fi_read_unlock(file);
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_file_refresh_by_inode(struct file *file, int *need_reopen)
+{
+	int err;
+	struct au_pin pin;
+	struct au_finfo *finfo;
+	struct dentry *parent, *hi_wh;
+	struct inode *inode;
+	struct super_block *sb;
+	struct au_cp_generic cpg = {
+		.dentry	= file->f_dentry,
+		.bdst	= -1,
+		.bsrc	= -1,
+		.len	= -1,
+		.pin	= &pin,
+		.flags	= AuCpup_DTIME
+	};
+
+	FiMustWriteLock(file);
+
+	err = 0;
+	finfo = au_fi(file);
+	sb = cpg.dentry->d_sb;
+	inode = cpg.dentry->d_inode;
+	cpg.bdst = au_ibstart(inode);
+	if (cpg.bdst == finfo->fi_btop || IS_ROOT(cpg.dentry))
+		goto out;
+
+	parent = dget_parent(cpg.dentry);
+	if (au_test_ro(sb, cpg.bdst, inode)) {
+		di_read_lock_parent(parent, !AuLock_IR);
+		err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+		cpg.bdst = err;
+		di_read_unlock(parent, !AuLock_IR);
+		if (unlikely(err < 0))
+			goto out_parent;
+		err = 0;
+	}
+
+	di_read_lock_parent(parent, AuLock_IR);
+	hi_wh = au_hi_wh(inode, cpg.bdst);
+	if (!S_ISDIR(inode->i_mode)
+	    && au_opt_test(au_mntflags(sb), PLINK)
+	    && au_plink_test(inode)
+	    && !d_unhashed(cpg.dentry)
+	    && cpg.bdst < au_dbstart(cpg.dentry)) {
+		err = au_test_and_cpup_dirs(cpg.dentry, cpg.bdst);
+		if (unlikely(err))
+			goto out_unlock;
+
+		/* always superio. */
+		err = au_pin(&pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+			     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+		if (!err) {
+			err = au_sio_cpup_simple(&cpg);
+			au_unpin(&pin);
+		}
+	} else if (hi_wh) {
+		/* already copied-up after unlink */
+		err = au_reopen_wh(file, cpg.bdst, hi_wh);
+		*need_reopen = 0;
+	}
+
+out_unlock:
+	di_read_unlock(parent, AuLock_IR);
+out_parent:
+	dput(parent);
+out:
+	return err;
+}
+
+static void au_do_refresh_dir(struct file *file)
+{
+	aufs_bindex_t bindex, bend, new_bindex, brid;
+	struct au_hfile *p, tmp, *q;
+	struct au_finfo *finfo;
+	struct super_block *sb;
+	struct au_fidir *fidir;
+
+	FiMustWriteLock(file);
+
+	sb = file->f_dentry->d_sb;
+	finfo = au_fi(file);
+	fidir = finfo->fi_hdir;
+	AuDebugOn(!fidir);
+	p = fidir->fd_hfile + finfo->fi_btop;
+	brid = p->hf_br->br_id;
+	bend = fidir->fd_bbot;
+	for (bindex = finfo->fi_btop; bindex <= bend; bindex++, p++) {
+		if (!p->hf_file)
+			continue;
+
+		new_bindex = au_br_index(sb, p->hf_br->br_id);
+		if (new_bindex == bindex)
+			continue;
+		if (new_bindex < 0) {
+			au_set_h_fptr(file, bindex, NULL);
+			continue;
+		}
+
+		/* swap two lower inode, and loop again */
+		q = fidir->fd_hfile + new_bindex;
+		tmp = *q;
+		*q = *p;
+		*p = tmp;
+		if (tmp.hf_file) {
+			bindex--;
+			p--;
+		}
+	}
+
+	p = fidir->fd_hfile;
+	if (!au_test_mmapped(file) && !d_unlinked(file->f_dentry)) {
+		bend = au_sbend(sb);
+		for (finfo->fi_btop = 0; finfo->fi_btop <= bend;
+		     finfo->fi_btop++, p++)
+			if (p->hf_file) {
+				if (file_inode(p->hf_file))
+					break;
+				else
+					au_hfput(p, file);
+			}
+	} else {
+		bend = au_br_index(sb, brid);
+		for (finfo->fi_btop = 0; finfo->fi_btop < bend;
+		     finfo->fi_btop++, p++)
+			if (p->hf_file)
+				au_hfput(p, file);
+		bend = au_sbend(sb);
+	}
+
+	p = fidir->fd_hfile + bend;
+	for (fidir->fd_bbot = bend; fidir->fd_bbot >= finfo->fi_btop;
+	     fidir->fd_bbot--, p--)
+		if (p->hf_file) {
+			if (file_inode(p->hf_file))
+				break;
+			else
+				au_hfput(p, file);
+		}
+	AuDebugOn(fidir->fd_bbot < finfo->fi_btop);
+}
+
+/*
+ * after branch manipulating, refresh the file.
+ */
+static int refresh_file(struct file *file, int (*reopen)(struct file *file))
+{
+	int err, need_reopen;
+	aufs_bindex_t bend, bindex;
+	struct dentry *dentry;
+	struct au_finfo *finfo;
+	struct au_hfile *hfile;
+
+	dentry = file->f_dentry;
+	finfo = au_fi(file);
+	if (!finfo->fi_hdir) {
+		hfile = &finfo->fi_htop;
+		AuDebugOn(!hfile->hf_file);
+		bindex = au_br_index(dentry->d_sb, hfile->hf_br->br_id);
+		AuDebugOn(bindex < 0);
+		if (bindex != finfo->fi_btop)
+			au_set_fbstart(file, bindex);
+	} else {
+		err = au_fidir_realloc(finfo, au_sbend(dentry->d_sb) + 1);
+		if (unlikely(err))
+			goto out;
+		au_do_refresh_dir(file);
+	}
+
+	err = 0;
+	need_reopen = 1;
+	if (!au_test_mmapped(file))
+		err = au_file_refresh_by_inode(file, &need_reopen);
+	if (!err && need_reopen && !d_unlinked(dentry))
+		err = reopen(file);
+	if (!err) {
+		au_update_figen(file);
+		goto out; /* success */
+	}
+
+	/* error, close all lower files */
+	if (finfo->fi_hdir) {
+		bend = au_fbend_dir(file);
+		for (bindex = au_fbstart(file); bindex <= bend; bindex++)
+			au_set_h_fptr(file, bindex, NULL);
+	}
+
+out:
+	return err;
+}
+
+/* common function to regular file and dir */
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+			  int wlock)
+{
+	int err;
+	unsigned int sigen, figen;
+	aufs_bindex_t bstart;
+	unsigned char pseudo_link;
+	struct dentry *dentry;
+	struct inode *inode;
+
+	err = 0;
+	dentry = file->f_dentry;
+	inode = dentry->d_inode;
+	AuDebugOn(au_special_file(inode->i_mode));
+	sigen = au_sigen(dentry->d_sb);
+	fi_write_lock(file);
+	figen = au_figen(file);
+	di_write_lock_child(dentry);
+	bstart = au_dbstart(dentry);
+	pseudo_link = (bstart != au_ibstart(inode));
+	if (sigen == figen && !pseudo_link && au_fbstart(file) == bstart) {
+		if (!wlock) {
+			di_downgrade_lock(dentry, AuLock_IR);
+			fi_downgrade_lock(file);
+		}
+		goto out; /* success */
+	}
+
+	AuDbg("sigen %d, figen %d\n", sigen, figen);
+	if (au_digen_test(dentry, sigen)) {
+		err = au_reval_dpath(dentry, sigen);
+		AuDebugOn(!err && au_digen_test(dentry, sigen));
+	}
+
+	if (!err)
+		err = refresh_file(file, reopen);
+	if (!err) {
+		if (!wlock) {
+			di_downgrade_lock(dentry, AuLock_IR);
+			fi_downgrade_lock(file);
+		}
+	} else {
+		di_write_unlock(dentry);
+		fi_write_unlock(file);
+	}
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* cf. aufs_nopage() */
+/* for madvise(2) */
+static int aufs_readpage(struct file *file __maybe_unused, struct page *page)
+{
+	unlock_page(page);
+	return 0;
+}
+
+/* it will never be called, but necessary to support O_DIRECT */
+static ssize_t aufs_direct_IO(int rw, struct kiocb *iocb,
+			      const struct iovec *iov, loff_t offset,
+			      unsigned long nr_segs)
+{ BUG(); return 0; }
+
+/*
+ * it will never be called, but madvise and fadvise behaves differently
+ * when get_xip_mem is defined
+ */
+static int aufs_get_xip_mem(struct address_space *mapping, pgoff_t pgoff,
+			    int create, void **kmem, unsigned long *pfn)
+{ BUG(); return 0; }
+
+/* they will never be called. */
+#ifdef CONFIG_AUFS_DEBUG
+static int aufs_write_begin(struct file *file, struct address_space *mapping,
+			    loff_t pos, unsigned len, unsigned flags,
+			    struct page **pagep, void **fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_write_end(struct file *file, struct address_space *mapping,
+			  loff_t pos, unsigned len, unsigned copied,
+			  struct page *page, void *fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_writepage(struct page *page, struct writeback_control *wbc)
+{ AuUnsupport(); return 0; }
+
+static int aufs_set_page_dirty(struct page *page)
+{ AuUnsupport(); return 0; }
+static void aufs_invalidatepage(struct page *page, unsigned long offset)
+{ AuUnsupport(); }
+static int aufs_releasepage(struct page *page, gfp_t gfp)
+{ AuUnsupport(); return 0; }
+static int aufs_migratepage(struct address_space *mapping, struct page *newpage,
+			    struct page *page, enum migrate_mode mode)
+{ AuUnsupport(); return 0; }
+static int aufs_launder_page(struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_is_partially_uptodate(struct page *page,
+				      read_descriptor_t *desc,
+				      unsigned long from)
+{ AuUnsupport(); return 0; }
+static int aufs_error_remove_page(struct address_space *mapping,
+				  struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_swap_activate(struct swap_info_struct *sis, struct file *file,
+			      sector_t *span)
+{ AuUnsupport(); return 0; }
+static void aufs_swap_deactivate(struct file *file)
+{ AuUnsupport(); }
+#endif /* CONFIG_AUFS_DEBUG */
+
+const struct address_space_operations aufs_aop = {
+	.readpage		= aufs_readpage,
+	.direct_IO		= aufs_direct_IO,
+	.get_xip_mem		= aufs_get_xip_mem,
+#ifdef CONFIG_AUFS_DEBUG
+	.writepage		= aufs_writepage,
+	/* no writepages, because of writepage */
+	.set_page_dirty		= aufs_set_page_dirty,
+	/* no readpages, because of readpage */
+	.write_begin		= aufs_write_begin,
+	.write_end		= aufs_write_end,
+	/* no bmap, no block device */
+	.invalidatepage		= aufs_invalidatepage,
+	.releasepage		= aufs_releasepage,
+	.migratepage		= aufs_migratepage,
+	.launder_page		= aufs_launder_page,
+	.is_partially_uptodate	= aufs_is_partially_uptodate,
+	.error_remove_page	= aufs_error_remove_page,
+	.swap_activate		= aufs_swap_activate,
+	.swap_deactivate	= aufs_swap_deactivate
+#endif /* CONFIG_AUFS_DEBUG */
+};
diff --git a/fs/aufs/file.h b/fs/aufs/file.h
new file mode 100644
index 0000000..28ff3f3
--- /dev/null
+++ b/fs/aufs/file.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * file operations
+ */
+
+#ifndef __AUFS_FILE_H__
+#define __AUFS_FILE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include "rwsem.h"
+
+struct au_branch;
+struct au_hfile {
+	struct file		*hf_file;
+	struct au_branch	*hf_br;
+};
+
+struct au_vdir;
+struct au_fidir {
+	aufs_bindex_t		fd_bbot;
+	aufs_bindex_t		fd_nent;
+	struct au_vdir		*fd_vdir_cache;
+	struct au_hfile		fd_hfile[];
+};
+
+static inline int au_fidir_sz(int nent)
+{
+	AuDebugOn(nent < 0);
+	return sizeof(struct au_fidir) + sizeof(struct au_hfile) * nent;
+}
+
+struct au_finfo {
+	atomic_t		fi_generation;
+
+	struct au_rwsem		fi_rwsem;
+	aufs_bindex_t		fi_btop;
+
+	/* do not union them */
+	struct {				/* for non-dir */
+		struct au_hfile			fi_htop;
+		atomic_t			fi_mmapped;
+	};
+	struct au_fidir		*fi_hdir;	/* for dir only */
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* file.c */
+extern const struct address_space_operations aufs_aop;
+unsigned int au_file_roflags(unsigned int flags);
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+		       struct file *file);
+int au_do_open(struct file *file, int (*open)(struct file *file, int flags),
+	       struct au_fidir *fidir);
+int au_reopen_nondir(struct file *file);
+struct au_pin;
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin);
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+			  int wlock);
+int au_do_flush(struct file *file, fl_owner_t id,
+		int (*flush)(struct file *file, fl_owner_t id));
+
+/* poll.c */
+#ifdef CONFIG_AUFS_POLL
+unsigned int aufs_poll(struct file *file, poll_table *wait);
+#endif
+
+#ifdef CONFIG_AUFS_BR_HFSPLUS
+/* hfsplus.c */
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex);
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+		    struct file *h_file);
+#else
+static inline
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	return NULL;
+}
+
+AuStubVoid(au_h_open_post, struct dentry *dentry, aufs_bindex_t bindex,
+	   struct file *h_file);
+#endif
+
+/* f_op.c */
+extern const struct file_operations aufs_file_fop;
+int au_do_open_nondir(struct file *file, int flags);
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file);
+
+#ifdef CONFIG_AUFS_SP_IATTR
+/* f_op_sp.c */
+struct au_finfo *au_fi_sp(struct file *file);
+int au_special_file(umode_t mode);
+void au_init_special_fop(struct inode *inode, umode_t mode, dev_t rdev);
+#else
+static inline struct au_finfo *au_fi_sp(struct file *file)
+{
+	return NULL;
+}
+AuStubInt0(au_special_file, umode_t mode)
+static inline void au_init_special_fop(struct inode *inode, umode_t mode,
+				       dev_t rdev)
+{
+	init_special_inode(inode, mode, rdev);
+}
+#endif
+
+/* finfo.c */
+void au_hfput(struct au_hfile *hf, struct file *file);
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex,
+		   struct file *h_file);
+
+void au_update_figen(struct file *file);
+struct au_fidir *au_fidir_alloc(struct super_block *sb);
+int au_fidir_realloc(struct au_finfo *finfo, int nbr);
+
+void au_fi_init_once(void *_fi);
+void au_finfo_fin(struct file *file);
+int au_finfo_init(struct file *file, struct au_fidir *fidir);
+
+/* ioctl.c */
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+			   unsigned long arg);
+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
+			      unsigned long arg);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_finfo *au_fi(struct file *file)
+{
+	struct au_finfo *finfo;
+
+	finfo = au_fi_sp(file);
+	if (!finfo)
+		finfo = file->private_data;
+	return finfo;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * fi_read_lock, fi_write_lock,
+ * fi_read_unlock, fi_write_unlock, fi_downgrade_lock
+ */
+AuSimpleRwsemFuncs(fi, struct file *f, &au_fi(f)->fi_rwsem);
+
+#define FiMustNoWaiters(f)	AuRwMustNoWaiters(&au_fi(f)->fi_rwsem)
+#define FiMustAnyLock(f)	AuRwMustAnyLock(&au_fi(f)->fi_rwsem)
+#define FiMustWriteLock(f)	AuRwMustWriteLock(&au_fi(f)->fi_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: hard/soft set? */
+static inline aufs_bindex_t au_fbstart(struct file *file)
+{
+	FiMustAnyLock(file);
+	return au_fi(file)->fi_btop;
+}
+
+static inline aufs_bindex_t au_fbend_dir(struct file *file)
+{
+	FiMustAnyLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	return au_fi(file)->fi_hdir->fd_bbot;
+}
+
+static inline struct au_vdir *au_fvdir_cache(struct file *file)
+{
+	FiMustAnyLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	return au_fi(file)->fi_hdir->fd_vdir_cache;
+}
+
+static inline void au_set_fbstart(struct file *file, aufs_bindex_t bindex)
+{
+	FiMustWriteLock(file);
+	au_fi(file)->fi_btop = bindex;
+}
+
+static inline void au_set_fbend_dir(struct file *file, aufs_bindex_t bindex)
+{
+	FiMustWriteLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	au_fi(file)->fi_hdir->fd_bbot = bindex;
+}
+
+static inline void au_set_fvdir_cache(struct file *file,
+				      struct au_vdir *vdir_cache)
+{
+	FiMustWriteLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	au_fi(file)->fi_hdir->fd_vdir_cache = vdir_cache;
+}
+
+static inline struct file *au_hf_top(struct file *file)
+{
+	FiMustAnyLock(file);
+	AuDebugOn(au_fi(file)->fi_hdir);
+	return au_fi(file)->fi_htop.hf_file;
+}
+
+static inline struct file *au_hf_dir(struct file *file, aufs_bindex_t bindex)
+{
+	FiMustAnyLock(file);
+	AuDebugOn(!au_fi(file)->fi_hdir);
+	return au_fi(file)->fi_hdir->fd_hfile[0 + bindex].hf_file;
+}
+
+/* todo: memory barrier? */
+static inline unsigned int au_figen(struct file *f)
+{
+	return atomic_read(&au_fi(f)->fi_generation);
+}
+
+static inline void au_set_mmapped(struct file *f)
+{
+	if (atomic_inc_return(&au_fi(f)->fi_mmapped))
+		return;
+	pr_warn("fi_mmapped wrapped around\n");
+	while (!atomic_inc_return(&au_fi(f)->fi_mmapped))
+		;
+}
+
+static inline void au_unset_mmapped(struct file *f)
+{
+	atomic_dec(&au_fi(f)->fi_mmapped);
+}
+
+static inline int au_test_mmapped(struct file *f)
+{
+	return atomic_read(&au_fi(f)->fi_mmapped);
+}
+
+/* customize vma->vm_file */
+
+static inline void au_do_vm_file_reset(struct vm_area_struct *vma,
+				       struct file *file)
+{
+	struct file *f;
+
+	f = vma->vm_file;
+	get_file(file);
+	vma->vm_file = file;
+	fput(f);
+}
+
+#ifdef CONFIG_MMU
+#define AuDbgVmRegion(file, vma) do {} while (0)
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+				    struct file *file)
+{
+	au_do_vm_file_reset(vma, file);
+}
+#else
+#define AuDbgVmRegion(file, vma) \
+	AuDebugOn((vma)->vm_region && (vma)->vm_region->vm_file != (file))
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+				    struct file *file)
+{
+	struct file *f;
+
+	au_do_vm_file_reset(vma, file);
+	f = vma->vm_region->vm_file;
+	get_file(file);
+	vma->vm_region->vm_file = file;
+	fput(f);
+}
+#endif /* CONFIG_MMU */
+
+/* handle vma->vm_prfile */
+static inline void au_vm_prfile_set(struct vm_area_struct *vma,
+				    struct file *file)
+{
+#ifdef CONFIG_AUFS_PROC_MAP
+	get_file(file);
+	vma->vm_prfile = file;
+#ifndef CONFIG_MMU
+	get_file(file);
+	vma->vm_region->vm_prfile = file;
+#endif
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FILE_H__ */
diff --git a/fs/aufs/finfo.c b/fs/aufs/finfo.c
new file mode 100644
index 0000000..2111355
--- /dev/null
+++ b/fs/aufs/finfo.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * file private data
+ */
+
+#include "aufs.h"
+
+void au_hfput(struct au_hfile *hf, struct file *file)
+{
+	/* todo: direct access f_flags */
+	if (vfsub_file_flags(file) & __FMODE_EXEC)
+		allow_write_access(hf->hf_file);
+	fput(hf->hf_file);
+	hf->hf_file = NULL;
+	atomic_dec(&hf->hf_br->br_count);
+	hf->hf_br = NULL;
+}
+
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, struct file *val)
+{
+	struct au_finfo *finfo = au_fi(file);
+	struct au_hfile *hf;
+	struct au_fidir *fidir;
+
+	fidir = finfo->fi_hdir;
+	if (!fidir) {
+		AuDebugOn(finfo->fi_btop != bindex);
+		hf = &finfo->fi_htop;
+	} else
+		hf = fidir->fd_hfile + bindex;
+
+	if (hf && hf->hf_file)
+		au_hfput(hf, file);
+	if (val) {
+		FiMustWriteLock(file);
+		hf->hf_file = val;
+		hf->hf_br = au_sbr(file->f_dentry->d_sb, bindex);
+	}
+}
+
+void au_update_figen(struct file *file)
+{
+	atomic_set(&au_fi(file)->fi_generation, au_digen(file->f_dentry));
+	/* smp_mb(); */ /* atomic_set */
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_fidir *au_fidir_alloc(struct super_block *sb)
+{
+	struct au_fidir *fidir;
+	int nbr;
+
+	nbr = au_sbend(sb) + 1;
+	if (nbr < 2)
+		nbr = 2; /* initial allocate for 2 branches */
+	fidir = kzalloc(au_fidir_sz(nbr), GFP_NOFS);
+	if (fidir) {
+		fidir->fd_bbot = -1;
+		fidir->fd_nent = nbr;
+		fidir->fd_vdir_cache = NULL;
+	}
+
+	return fidir;
+}
+
+int au_fidir_realloc(struct au_finfo *finfo, int nbr)
+{
+	int err;
+	struct au_fidir *fidir, *p;
+
+	AuRwMustWriteLock(&finfo->fi_rwsem);
+	fidir = finfo->fi_hdir;
+	AuDebugOn(!fidir);
+
+	err = -ENOMEM;
+	p = au_kzrealloc(fidir, au_fidir_sz(fidir->fd_nent), au_fidir_sz(nbr),
+			 GFP_NOFS);
+	if (p) {
+		p->fd_nent = nbr;
+		finfo->fi_hdir = p;
+		err = 0;
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_finfo_fin(struct file *file)
+{
+	struct au_finfo *finfo;
+
+	au_nfiles_dec(file->f_dentry->d_sb);
+
+	finfo = au_fi(file);
+	AuDebugOn(finfo->fi_hdir);
+	AuRwDestroy(&finfo->fi_rwsem);
+	au_cache_free_finfo(finfo);
+}
+
+void au_fi_init_once(void *_finfo)
+{
+	struct au_finfo *finfo = _finfo;
+	static struct lock_class_key aufs_fi;
+
+	au_rw_init(&finfo->fi_rwsem);
+	au_rw_class(&finfo->fi_rwsem, &aufs_fi);
+}
+
+int au_finfo_init(struct file *file, struct au_fidir *fidir)
+{
+	int err;
+	struct au_finfo *finfo;
+	struct dentry *dentry;
+
+	err = -ENOMEM;
+	dentry = file->f_dentry;
+	finfo = au_cache_alloc_finfo();
+	if (unlikely(!finfo))
+		goto out;
+
+	err = 0;
+	au_nfiles_inc(dentry->d_sb);
+	/* verbose coding for lock class name */
+	if (!fidir)
+		au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcNonDir_FIINFO);
+	else
+		au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcDir_FIINFO);
+	au_rw_write_lock(&finfo->fi_rwsem);
+	finfo->fi_btop = -1;
+	finfo->fi_hdir = fidir;
+	atomic_set(&finfo->fi_generation, au_digen(dentry));
+	/* smp_mb(); */ /* atomic_set */
+
+	file->private_data = finfo;
+
+out:
+	return err;
+}
diff --git a/fs/aufs/fstype.h b/fs/aufs/fstype.h
new file mode 100644
index 0000000..4d9ab6e
--- /dev/null
+++ b/fs/aufs/fstype.h
@@ -0,0 +1,470 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * judging filesystem type
+ */
+
+#ifndef __AUFS_FSTYPE_H__
+#define __AUFS_FSTYPE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <linux/romfs_fs.h>
+
+static inline int au_test_aufs(struct super_block *sb)
+{
+	return sb->s_magic == AUFS_SUPER_MAGIC;
+}
+
+static inline const char *au_sbtype(struct super_block *sb)
+{
+	return sb->s_type->name;
+}
+
+static inline int au_test_iso9660(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_ROMFS_FS) || defined(CONFIG_ROMFS_FS_MODULE)
+	return sb->s_magic == ROMFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_romfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_ISO9660_FS) || defined(CONFIG_ISO9660_FS_MODULE)
+	return sb->s_magic == ISOFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_cramfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CRAMFS) || defined(CONFIG_CRAMFS_MODULE)
+	return sb->s_magic == CRAMFS_MAGIC;
+#endif
+	return 0;
+}
+
+static inline int au_test_nfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_NFS_FS) || defined(CONFIG_NFS_FS_MODULE)
+	return sb->s_magic == NFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_fuse(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_FUSE_FS) || defined(CONFIG_FUSE_FS_MODULE)
+	return sb->s_magic == FUSE_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_xfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_XFS_FS) || defined(CONFIG_XFS_FS_MODULE)
+	return sb->s_magic == XFS_SB_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_tmpfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_TMPFS
+	return sb->s_magic == TMPFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_ecryptfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_ECRYPT_FS) || defined(CONFIG_ECRYPT_FS_MODULE)
+	return !strcmp(au_sbtype(sb), "ecryptfs");
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_ocfs2(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_OCFS2_FS) || defined(CONFIG_OCFS2_FS_MODULE)
+	return sb->s_magic == OCFS2_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_ocfs2_dlmfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_OCFS2_FS_O2CB) || defined(CONFIG_OCFS2_FS_O2CB_MODULE)
+	return sb->s_magic == DLMFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_coda(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CODA_FS) || defined(CONFIG_CODA_FS_MODULE)
+	return sb->s_magic == CODA_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_v9fs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_9P_FS) || defined(CONFIG_9P_FS_MODULE)
+	return sb->s_magic == V9FS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_ext4(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_EXT4_FS) || defined(CONFIG_EXT4_FS_MODULE)
+	return sb->s_magic == EXT4_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_sysv(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_SYSV_FS) || defined(CONFIG_SYSV_FS_MODULE)
+	return !strcmp(au_sbtype(sb), "sysv");
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_ramfs(struct super_block *sb)
+{
+	return sb->s_magic == RAMFS_MAGIC;
+}
+
+static inline int au_test_ubifs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_UBIFS_FS) || defined(CONFIG_UBIFS_FS_MODULE)
+	return sb->s_magic == UBIFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_procfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_PROC_FS
+	return sb->s_magic == PROC_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_sysfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SYSFS
+	return sb->s_magic == SYSFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_configfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CONFIGFS_FS) || defined(CONFIG_CONFIGFS_FS_MODULE)
+	return sb->s_magic == CONFIGFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_minix(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_MINIX_FS) || defined(CONFIG_MINIX_FS_MODULE)
+	return sb->s_magic == MINIX3_SUPER_MAGIC
+		|| sb->s_magic == MINIX2_SUPER_MAGIC
+		|| sb->s_magic == MINIX2_SUPER_MAGIC2
+		|| sb->s_magic == MINIX_SUPER_MAGIC
+		|| sb->s_magic == MINIX_SUPER_MAGIC2;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_cifs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CIFS_FS) || defined(CONFIGCIFS_FS_MODULE)
+	return sb->s_magic == CIFS_MAGIC_NUMBER;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_fat(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_FAT_FS) || defined(CONFIG_FAT_FS_MODULE)
+	return sb->s_magic == MSDOS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_msdos(struct super_block *sb)
+{
+	return au_test_fat(sb);
+}
+
+static inline int au_test_vfat(struct super_block *sb)
+{
+	return au_test_fat(sb);
+}
+
+static inline int au_test_securityfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SECURITYFS
+	return sb->s_magic == SECURITYFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_squashfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_SQUASHFS) || defined(CONFIG_SQUASHFS_MODULE)
+	return sb->s_magic == SQUASHFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_btrfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
+	return sb->s_magic == BTRFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_xenfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_XENFS) || defined(CONFIG_XENFS_MODULE)
+	return sb->s_magic == XENFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_debugfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_DEBUG_FS
+	return sb->s_magic == DEBUGFS_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_nilfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_NILFS) || defined(CONFIG_NILFS_MODULE)
+	return sb->s_magic == NILFS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+static inline int au_test_hfsplus(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_HFSPLUS_FS) || defined(CONFIG_HFSPLUS_FS_MODULE)
+	return sb->s_magic == HFSPLUS_SUPER_MAGIC;
+#else
+	return 0;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * they can't be an aufs branch.
+ */
+static inline int au_test_fs_unsuppoted(struct super_block *sb)
+{
+	return
+#ifndef CONFIG_AUFS_BR_RAMFS
+		au_test_ramfs(sb) ||
+#endif
+		au_test_procfs(sb)
+		|| au_test_sysfs(sb)
+		|| au_test_configfs(sb)
+		|| au_test_debugfs(sb)
+		|| au_test_securityfs(sb)
+		|| au_test_xenfs(sb)
+		|| au_test_ecryptfs(sb)
+		/* || !strcmp(au_sbtype(sb), "unionfs") */
+		|| au_test_aufs(sb); /* will be supported in next version */
+}
+
+static inline int au_test_fs_remote(struct super_block *sb)
+{
+	return !au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+		&& !au_test_ramfs(sb)
+#endif
+		&& !(sb->s_type->fs_flags & FS_REQUIRES_DEV);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * Note: these functions (below) are created after reading ->getattr() in all
+ * filesystems under linux/fs. it means we have to do so in every update...
+ */
+
+/*
+ * some filesystems require getattr to refresh the inode attributes before
+ * referencing.
+ * in most cases, we can rely on the inode attribute in NFS (or every remote fs)
+ * and leave the work for d_revalidate()
+ */
+static inline int au_test_fs_refresh_iattr(struct super_block *sb)
+{
+	return au_test_nfs(sb)
+		|| au_test_fuse(sb)
+		/* || au_test_ocfs2(sb) */	/* untested */
+		/* || au_test_btrfs(sb) */	/* untested */
+		/* || au_test_coda(sb) */	/* untested */
+		/* || au_test_v9fs(sb) */	/* untested */
+		;
+}
+
+/*
+ * filesystems which don't maintain i_size or i_blocks.
+ */
+static inline int au_test_fs_bad_iattr_size(struct super_block *sb)
+{
+	return au_test_xfs(sb)
+		|| au_test_btrfs(sb)
+		|| au_test_ubifs(sb)
+		|| au_test_hfsplus(sb)	/* maintained, but incorrect */
+		/* || au_test_ext4(sb) */	/* untested */
+		/* || au_test_ocfs2(sb) */	/* untested */
+		/* || au_test_ocfs2_dlmfs(sb) */ /* untested */
+		/* || au_test_sysv(sb) */	/* untested */
+		/* || au_test_minix(sb) */	/* untested */
+		;
+}
+
+/*
+ * filesystems which don't store the correct value in some of their inode
+ * attributes.
+ */
+static inline int au_test_fs_bad_iattr(struct super_block *sb)
+{
+	return au_test_fs_bad_iattr_size(sb)
+		/* || au_test_cifs(sb) */	/* untested */
+		|| au_test_fat(sb)
+		|| au_test_msdos(sb)
+		|| au_test_vfat(sb);
+}
+
+/* they don't check i_nlink in link(2) */
+static inline int au_test_fs_no_limit_nlink(struct super_block *sb)
+{
+	return au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+		|| au_test_ramfs(sb)
+#endif
+		|| au_test_ubifs(sb)
+		|| au_test_hfsplus(sb);
+}
+
+/*
+ * filesystems which sets S_NOATIME and S_NOCMTIME.
+ */
+static inline int au_test_fs_notime(struct super_block *sb)
+{
+	return au_test_nfs(sb)
+		|| au_test_fuse(sb)
+		|| au_test_ubifs(sb)
+		/* || au_test_cifs(sb) */	/* untested */
+		;
+}
+
+/*
+ * filesystems which requires replacing i_mapping.
+ */
+static inline int au_test_fs_bad_mapping(struct super_block *sb)
+{
+	return au_test_fuse(sb)
+		|| au_test_ubifs(sb);
+}
+
+/* temporary support for i#1 in cramfs */
+static inline int au_test_fs_unique_ino(struct inode *inode)
+{
+	if (au_test_cramfs(inode->i_sb))
+		return inode->i_ino != 1;
+	return 1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * the filesystem where the xino files placed must support i/o after unlink and
+ * maintain i_size and i_blocks.
+ */
+static inline int au_test_fs_bad_xino(struct super_block *sb)
+{
+	return au_test_fs_remote(sb)
+		|| au_test_fs_bad_iattr_size(sb)
+		/* don't want unnecessary work for xino */
+		|| au_test_aufs(sb)
+		|| au_test_ecryptfs(sb)
+		|| au_test_nilfs(sb);
+}
+
+static inline int au_test_fs_trunc_xino(struct super_block *sb)
+{
+	return au_test_tmpfs(sb)
+		|| au_test_ramfs(sb);
+}
+
+/*
+ * test if the @sb is real-readonly.
+ */
+static inline int au_test_fs_rr(struct super_block *sb)
+{
+	return au_test_squashfs(sb)
+		|| au_test_iso9660(sb)
+		|| au_test_cramfs(sb)
+		|| au_test_romfs(sb);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FSTYPE_H__ */
diff --git a/fs/aufs/hfsnotify.c b/fs/aufs/hfsnotify.c
new file mode 100644
index 0000000..22b34cd0e
--- /dev/null
+++ b/fs/aufs/hfsnotify.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * fsnotify for the lower directories
+ */
+
+#include "aufs.h"
+
+/* FS_IN_IGNORED is unnecessary */
+static const __u32 AuHfsnMask = (FS_MOVED_TO | FS_MOVED_FROM | FS_DELETE
+				 | FS_CREATE | FS_EVENT_ON_CHILD);
+static DECLARE_WAIT_QUEUE_HEAD(au_hfsn_wq);
+static __cacheline_aligned_in_smp atomic64_t au_hfsn_ifree = ATOMIC64_INIT(0);
+
+static void au_hfsn_free_mark(struct fsnotify_mark *mark)
+{
+	struct au_hnotify *hn = container_of(mark, struct au_hnotify,
+					     hn_mark);
+	AuDbg("here\n");
+	au_cache_free_hnotify(hn);
+	smp_mb__before_atomic_dec();
+	if (atomic64_dec_and_test(&au_hfsn_ifree))
+		wake_up(&au_hfsn_wq);
+}
+
+static int au_hfsn_alloc(struct au_hinode *hinode)
+{
+	int err;
+	struct au_hnotify *hn;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct fsnotify_mark *mark;
+	aufs_bindex_t bindex;
+
+	hn = hinode->hi_notify;
+	sb = hn->hn_aufs_inode->i_sb;
+	bindex = au_br_index(sb, hinode->hi_id);
+	br = au_sbr(sb, bindex);
+	AuDebugOn(!br->br_hfsn);
+
+	mark = &hn->hn_mark;
+	fsnotify_init_mark(mark, au_hfsn_free_mark);
+	mark->mask = AuHfsnMask;
+	/*
+	 * by udba rename or rmdir, aufs assign a new inode to the known
+	 * h_inode, so specify 1 to allow dups.
+	 */
+	err = fsnotify_add_mark(mark, br->br_hfsn->hfsn_group, hinode->hi_inode,
+				 /*mnt*/NULL, /*allow_dups*/1);
+	/* even if err */
+	fsnotify_put_mark(mark);
+
+	return err;
+}
+
+static int au_hfsn_free(struct au_hinode *hinode, struct au_hnotify *hn)
+{
+	struct fsnotify_mark *mark;
+	unsigned long long ull;
+	struct fsnotify_group *group;
+
+	ull = atomic64_inc_return(&au_hfsn_ifree);
+	BUG_ON(!ull);
+
+	mark = &hn->hn_mark;
+	spin_lock(&mark->lock);
+	group = mark->group;
+	fsnotify_get_group(group);
+	spin_unlock(&mark->lock);
+	fsnotify_destroy_mark(mark, group);
+	fsnotify_put_group(group);
+
+	/* free hn by myself */
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_ctl(struct au_hinode *hinode, int do_set)
+{
+	struct fsnotify_mark *mark;
+
+	mark = &hinode->hi_notify->hn_mark;
+	spin_lock(&mark->lock);
+	if (do_set) {
+		AuDebugOn(mark->mask & AuHfsnMask);
+		mark->mask |= AuHfsnMask;
+	} else {
+		AuDebugOn(!(mark->mask & AuHfsnMask));
+		mark->mask &= ~AuHfsnMask;
+	}
+	spin_unlock(&mark->lock);
+	/* fsnotify_recalc_inode_mask(hinode->hi_inode); */
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* #define AuDbgHnotify */
+#ifdef AuDbgHnotify
+static char *au_hfsn_name(u32 mask)
+{
+#ifdef CONFIG_AUFS_DEBUG
+#define test_ret(flag)				\
+	do {					\
+		if (mask & flag)		\
+			return #flag;		\
+	} while (0)
+	test_ret(FS_ACCESS);
+	test_ret(FS_MODIFY);
+	test_ret(FS_ATTRIB);
+	test_ret(FS_CLOSE_WRITE);
+	test_ret(FS_CLOSE_NOWRITE);
+	test_ret(FS_OPEN);
+	test_ret(FS_MOVED_FROM);
+	test_ret(FS_MOVED_TO);
+	test_ret(FS_CREATE);
+	test_ret(FS_DELETE);
+	test_ret(FS_DELETE_SELF);
+	test_ret(FS_MOVE_SELF);
+	test_ret(FS_UNMOUNT);
+	test_ret(FS_Q_OVERFLOW);
+	test_ret(FS_IN_IGNORED);
+	test_ret(FS_IN_ISDIR);
+	test_ret(FS_IN_ONESHOT);
+	test_ret(FS_EVENT_ON_CHILD);
+	return "";
+#undef test_ret
+#else
+	return "??";
+#endif
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_free_group(struct fsnotify_group *group)
+{
+	struct au_br_hfsnotify *hfsn = group->private;
+
+	AuDbg("here\n");
+	kfree(hfsn);
+}
+
+static int au_hfsn_handle_event(struct fsnotify_group *group,
+				struct fsnotify_mark *inode_mark,
+				struct fsnotify_mark *vfsmount_mark,
+				struct fsnotify_event *event)
+{
+	int err;
+	struct au_hnotify *hnotify;
+	struct inode *h_dir, *h_inode;
+	__u32 mask;
+	struct qstr h_child_qstr = QSTR_INIT(event->file_name, event->name_len);
+
+	AuDebugOn(event->data_type != FSNOTIFY_EVENT_INODE);
+
+	err = 0;
+	/* if FS_UNMOUNT happens, there must be another bug */
+	mask = event->mask;
+	AuDebugOn(mask & FS_UNMOUNT);
+	if (mask & (FS_IN_IGNORED | FS_UNMOUNT))
+		goto out;
+
+	h_dir = event->to_tell;
+	h_inode = event->inode;
+#ifdef AuDbgHnotify
+	au_debug(1);
+	if (1 || h_child_qstr.len != sizeof(AUFS_XINO_FNAME) - 1
+	    || strncmp(h_child_qstr.name, AUFS_XINO_FNAME, h_child_qstr.len)) {
+		AuDbg("i%lu, mask 0x%x %s, hcname %.*s, hi%lu\n",
+		      h_dir->i_ino, mask, au_hfsn_name(mask),
+		      AuLNPair(&h_child_qstr), h_inode ? h_inode->i_ino : 0);
+		/* WARN_ON(1); */
+	}
+	au_debug(0);
+#endif
+
+	AuDebugOn(!inode_mark);
+	hnotify = container_of(inode_mark, struct au_hnotify, hn_mark);
+	err = au_hnotify(h_dir, hnotify, mask, &h_child_qstr, h_inode);
+
+out:
+	return err;
+}
+
+/* isn't it waste to ask every registered 'group'? */
+/* copied from linux/fs/notify/inotify/inotify_fsnotiry.c */
+/* it should be exported to modules */
+static bool au_hfsn_should_send_event(struct fsnotify_group *group,
+				      struct inode *h_inode,
+				      struct fsnotify_mark *inode_mark,
+				      struct fsnotify_mark *vfsmount_mark,
+				      __u32 mask, void *data, int data_type)
+{
+	mask = (mask & ~FS_EVENT_ON_CHILD);
+	return inode_mark->mask & mask;
+}
+
+static struct fsnotify_ops au_hfsn_ops = {
+	.should_send_event	= au_hfsn_should_send_event,
+	.handle_event		= au_hfsn_handle_event,
+	.free_group_priv	= au_hfsn_free_group
+};
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin_br(struct au_branch *br)
+{
+	struct au_br_hfsnotify *hfsn;
+
+	hfsn = br->br_hfsn;
+	if (hfsn)
+		fsnotify_put_group(hfsn->hfsn_group);
+}
+
+static int au_hfsn_init_br(struct au_branch *br, int perm)
+{
+	int err;
+	struct fsnotify_group *group;
+	struct au_br_hfsnotify *hfsn;
+
+	err = 0;
+	br->br_hfsn = NULL;
+	if (!au_br_hnotifyable(perm))
+		goto out;
+
+	err = -ENOMEM;
+	hfsn = kmalloc(sizeof(*hfsn), GFP_NOFS);
+	if (unlikely(!hfsn))
+		goto out;
+
+	err = 0;
+	group = fsnotify_alloc_group(&au_hfsn_ops);
+	if (IS_ERR(group)) {
+		err = PTR_ERR(group);
+		pr_err("fsnotify_alloc_group() failed, %d\n", err);
+		goto out_hfsn;
+	}
+
+	group->private = hfsn;
+	hfsn->hfsn_group = group;
+	br->br_hfsn = hfsn;
+	goto out; /* success */
+
+out_hfsn:
+	kfree(hfsn);
+out:
+	return err;
+}
+
+static int au_hfsn_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+	int err;
+
+	err = 0;
+	if (!br->br_hfsn)
+		err = au_hfsn_init_br(br, perm);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin(void)
+{
+	AuDbg("au_hfsn_ifree %lld\n", (long long)atomic64_read(&au_hfsn_ifree));
+	wait_event(au_hfsn_wq, !atomic64_read(&au_hfsn_ifree));
+}
+
+const struct au_hnotify_op au_hnotify_op = {
+	.ctl		= au_hfsn_ctl,
+	.alloc		= au_hfsn_alloc,
+	.free		= au_hfsn_free,
+
+	.fin		= au_hfsn_fin,
+
+	.reset_br	= au_hfsn_reset_br,
+	.fin_br		= au_hfsn_fin_br,
+	.init_br	= au_hfsn_init_br
+};
diff --git a/fs/aufs/hfsplus.c b/fs/aufs/hfsplus.c
new file mode 100644
index 0000000..c77a2a3
--- /dev/null
+++ b/fs/aufs/hfsplus.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2010-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * special support for filesystems which aqucires an inode mutex
+ * at final closing a file, eg, hfsplus.
+ *
+ * This trick is very simple and stupid, just to open the file before really
+ * neceeary open to tell hfsplus that this is not the final closing.
+ * The caller should call au_h_open_pre() after acquiring the inode mutex,
+ * and au_h_open_post() after releasing it.
+ */
+
+#include "aufs.h"
+
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	struct file *h_file;
+	struct dentry *h_dentry;
+
+	h_dentry = au_h_dptr(dentry, bindex);
+	AuDebugOn(!h_dentry);
+	AuDebugOn(!h_dentry->d_inode);
+
+	h_file = NULL;
+	if (au_test_hfsplus(h_dentry->d_sb)
+	    && S_ISREG(h_dentry->d_inode->i_mode))
+		h_file = au_h_open(dentry, bindex,
+				   O_RDONLY | O_NOATIME | O_LARGEFILE,
+				   /*file*/NULL);
+	return h_file;
+}
+
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+		    struct file *h_file)
+{
+	if (h_file) {
+		fput(h_file);
+		au_sbr_put(dentry->d_sb, bindex);
+	}
+}
diff --git a/fs/aufs/hnotify.c b/fs/aufs/hnotify.c
new file mode 100644
index 0000000..b7f7a95
--- /dev/null
+++ b/fs/aufs/hnotify.c
@@ -0,0 +1,712 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * abstraction to notify the direct changes on lower directories
+ */
+
+#include "aufs.h"
+
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode)
+{
+	int err;
+	struct au_hnotify *hn;
+
+	err = -ENOMEM;
+	hn = au_cache_alloc_hnotify();
+	if (hn) {
+		hn->hn_aufs_inode = inode;
+		hinode->hi_notify = hn;
+		err = au_hnotify_op.alloc(hinode);
+		AuTraceErr(err);
+		if (unlikely(err)) {
+			hinode->hi_notify = NULL;
+			au_cache_free_hnotify(hn);
+			/*
+			 * The upper dir was removed by udba, but the same named
+			 * dir left. In this case, aufs assignes a new inode
+			 * number and set the monitor again.
+			 * For the lower dir, the old monitnor is still left.
+			 */
+			if (err == -EEXIST)
+				err = 0;
+		}
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+void au_hn_free(struct au_hinode *hinode)
+{
+	struct au_hnotify *hn;
+
+	hn = hinode->hi_notify;
+	if (hn) {
+		hinode->hi_notify = NULL;
+		if (au_hnotify_op.free(hinode, hn))
+			au_cache_free_hnotify(hn);
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_hn_ctl(struct au_hinode *hinode, int do_set)
+{
+	if (hinode->hi_notify)
+		au_hnotify_op.ctl(hinode, do_set);
+}
+
+void au_hn_reset(struct inode *inode, unsigned int flags)
+{
+	aufs_bindex_t bindex, bend;
+	struct inode *hi;
+	struct dentry *iwhdentry;
+
+	bend = au_ibend(inode);
+	for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
+		hi = au_h_iptr(inode, bindex);
+		if (!hi)
+			continue;
+
+		/* mutex_lock_nested(&hi->i_mutex, AuLsc_I_CHILD); */
+		iwhdentry = au_hi_wh(inode, bindex);
+		if (iwhdentry)
+			dget(iwhdentry);
+		au_igrab(hi);
+		au_set_h_iptr(inode, bindex, NULL, 0);
+		au_set_h_iptr(inode, bindex, au_igrab(hi),
+			      flags & ~AuHi_XINO);
+		iput(hi);
+		dput(iwhdentry);
+		/* mutex_unlock(&hi->i_mutex); */
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int hn_xino(struct inode *inode, struct inode *h_inode)
+{
+	int err;
+	aufs_bindex_t bindex, bend, bfound, bstart;
+	struct inode *h_i;
+
+	err = 0;
+	if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+		pr_warn("branch root dir was changed\n");
+		goto out;
+	}
+
+	bfound = -1;
+	bend = au_ibend(inode);
+	bstart = au_ibstart(inode);
+#if 0 /* reserved for future use */
+	if (bindex == bend) {
+		/* keep this ino in rename case */
+		goto out;
+	}
+#endif
+	for (bindex = bstart; bindex <= bend; bindex++)
+		if (au_h_iptr(inode, bindex) == h_inode) {
+			bfound = bindex;
+			break;
+		}
+	if (bfound < 0)
+		goto out;
+
+	for (bindex = bstart; bindex <= bend; bindex++) {
+		h_i = au_h_iptr(inode, bindex);
+		if (!h_i)
+			continue;
+
+		err = au_xino_write(inode->i_sb, bindex, h_i->i_ino, /*ino*/0);
+		/* ignore this error */
+		/* bad action? */
+	}
+
+	/* children inode number will be broken */
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int hn_gen_tree(struct dentry *dentry)
+{
+	int err, i, j, ndentry;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry **dentries;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_dcsub_pages(&dpages, dentry, NULL, NULL);
+	if (unlikely(err))
+		goto out_dpages;
+
+	for (i = 0; i < dpages.ndpage; i++) {
+		dpage = dpages.dpages + i;
+		dentries = dpage->dentries;
+		ndentry = dpage->ndentry;
+		for (j = 0; j < ndentry; j++) {
+			struct dentry *d;
+
+			d = dentries[j];
+			if (IS_ROOT(d))
+				continue;
+
+			au_digen_dec(d);
+			if (d->d_inode)
+				/* todo: reset children xino?
+				   cached children only? */
+				au_iigen_dec(d->d_inode);
+		}
+	}
+
+out_dpages:
+	au_dpages_free(&dpages);
+
+#if 0
+	/* discard children */
+	dentry_unhash(dentry);
+	dput(dentry);
+#endif
+out:
+	return err;
+}
+
+/*
+ * return 0 if processed.
+ */
+static int hn_gen_by_inode(char *name, unsigned int nlen, struct inode *inode,
+			   const unsigned int isdir)
+{
+	int err;
+	struct dentry *d;
+	struct qstr *dname;
+
+	err = 1;
+	if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+		pr_warn("branch root dir was changed\n");
+		err = 0;
+		goto out;
+	}
+
+	if (!isdir) {
+		AuDebugOn(!name);
+		au_iigen_dec(inode);
+		spin_lock(&inode->i_lock);
+		hlist_for_each_entry(d, &inode->i_dentry, d_alias) {
+			spin_lock(&d->d_lock);
+			dname = &d->d_name;
+			if (dname->len != nlen
+			    && memcmp(dname->name, name, nlen)) {
+				spin_unlock(&d->d_lock);
+				continue;
+			}
+			err = 0;
+			au_digen_dec(d);
+			spin_unlock(&d->d_lock);
+			break;
+		}
+		spin_unlock(&inode->i_lock);
+	} else {
+		au_fset_si(au_sbi(inode->i_sb), FAILED_REFRESH_DIR);
+		d = d_find_alias(inode);
+		if (!d) {
+			au_iigen_dec(inode);
+			goto out;
+		}
+
+		spin_lock(&d->d_lock);
+		dname = &d->d_name;
+		if (dname->len == nlen && !memcmp(dname->name, name, nlen)) {
+			spin_unlock(&d->d_lock);
+			err = hn_gen_tree(d);
+			spin_lock(&d->d_lock);
+		}
+		spin_unlock(&d->d_lock);
+		dput(d);
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int hn_gen_by_name(struct dentry *dentry, const unsigned int isdir)
+{
+	int err;
+	struct inode *inode;
+
+	inode = dentry->d_inode;
+	if (IS_ROOT(dentry)
+	    /* || (inode && inode->i_ino == AUFS_ROOT_INO) */
+		) {
+		pr_warn("branch root dir was changed\n");
+		return 0;
+	}
+
+	err = 0;
+	if (!isdir) {
+		au_digen_dec(dentry);
+		if (inode)
+			au_iigen_dec(inode);
+	} else {
+		au_fset_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR);
+		if (inode)
+			err = hn_gen_tree(dentry);
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* hnotify job flags */
+#define AuHnJob_XINO0		1
+#define AuHnJob_GEN		(1 << 1)
+#define AuHnJob_DIRENT		(1 << 2)
+#define AuHnJob_ISDIR		(1 << 3)
+#define AuHnJob_TRYXINO0	(1 << 4)
+#define AuHnJob_MNTPNT		(1 << 5)
+#define au_ftest_hnjob(flags, name)	((flags) & AuHnJob_##name)
+#define au_fset_hnjob(flags, name) \
+	do { (flags) |= AuHnJob_##name; } while (0)
+#define au_fclr_hnjob(flags, name) \
+	do { (flags) &= ~AuHnJob_##name; } while (0)
+
+enum {
+	AuHn_CHILD,
+	AuHn_PARENT,
+	AuHnLast
+};
+
+struct au_hnotify_args {
+	struct inode *h_dir, *dir, *h_child_inode;
+	u32 mask;
+	unsigned int flags[AuHnLast];
+	unsigned int h_child_nlen;
+	char h_child_name[];
+};
+
+struct hn_job_args {
+	unsigned int flags;
+	struct inode *inode, *h_inode, *dir, *h_dir;
+	struct dentry *dentry;
+	char *h_name;
+	int h_nlen;
+};
+
+static int hn_job(struct hn_job_args *a)
+{
+	const unsigned int isdir = au_ftest_hnjob(a->flags, ISDIR);
+
+	/* reset xino */
+	if (au_ftest_hnjob(a->flags, XINO0) && a->inode)
+		hn_xino(a->inode, a->h_inode); /* ignore this error */
+
+	if (au_ftest_hnjob(a->flags, TRYXINO0)
+	    && a->inode
+	    && a->h_inode) {
+		mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
+		if (!a->h_inode->i_nlink)
+			hn_xino(a->inode, a->h_inode); /* ignore this error */
+		mutex_unlock(&a->h_inode->i_mutex);
+	}
+
+	/* make the generation obsolete */
+	if (au_ftest_hnjob(a->flags, GEN)) {
+		int err = -1;
+		if (a->inode)
+			err = hn_gen_by_inode(a->h_name, a->h_nlen, a->inode,
+					      isdir);
+		if (err && a->dentry)
+			hn_gen_by_name(a->dentry, isdir);
+		/* ignore this error */
+	}
+
+	/* make dir entries obsolete */
+	if (au_ftest_hnjob(a->flags, DIRENT) && a->inode) {
+		struct au_vdir *vdir;
+
+		vdir = au_ivdir(a->inode);
+		if (vdir)
+			vdir->vd_jiffy = 0;
+		/* IMustLock(a->inode); */
+		/* a->inode->i_version++; */
+	}
+
+	/* can do nothing but warn */
+	if (au_ftest_hnjob(a->flags, MNTPNT)
+	    && a->dentry
+	    && d_mountpoint(a->dentry))
+		pr_warn("mount-point %.*s is removed or renamed\n",
+			AuDLNPair(a->dentry));
+
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *lookup_wlock_by_name(char *name, unsigned int nlen,
+					   struct inode *dir)
+{
+	struct dentry *dentry, *d, *parent;
+	struct qstr *dname;
+
+	parent = d_find_alias(dir);
+	if (!parent)
+		return NULL;
+
+	dentry = NULL;
+	spin_lock(&parent->d_lock);
+	list_for_each_entry(d, &parent->d_subdirs, d_u.d_child) {
+		/* AuDbg("%.*s\n", AuDLNPair(d)); */
+		spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+		dname = &d->d_name;
+		if (dname->len != nlen || memcmp(dname->name, name, nlen))
+			goto cont_unlock;
+		if (au_di(d))
+			au_digen_dec(d);
+		else
+			goto cont_unlock;
+		if (d->d_count) {
+			dentry = dget_dlock(d);
+			spin_unlock(&d->d_lock);
+			break;
+		}
+
+	cont_unlock:
+		spin_unlock(&d->d_lock);
+	}
+	spin_unlock(&parent->d_lock);
+	dput(parent);
+
+	if (dentry)
+		di_write_lock_child(dentry);
+
+	return dentry;
+}
+
+static struct inode *lookup_wlock_by_ino(struct super_block *sb,
+					 aufs_bindex_t bindex, ino_t h_ino)
+{
+	struct inode *inode;
+	ino_t ino;
+	int err;
+
+	inode = NULL;
+	err = au_xino_read(sb, bindex, h_ino, &ino);
+	if (!err && ino)
+		inode = ilookup(sb, ino);
+	if (!inode)
+		goto out;
+
+	if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+		pr_warn("wrong root branch\n");
+		iput(inode);
+		inode = NULL;
+		goto out;
+	}
+
+	ii_write_lock_child(inode);
+
+out:
+	return inode;
+}
+
+static void au_hn_bh(void *_args)
+{
+	struct au_hnotify_args *a = _args;
+	struct super_block *sb;
+	aufs_bindex_t bindex, bend, bfound;
+	unsigned char xino, try_iput;
+	int err;
+	struct inode *inode;
+	ino_t h_ino;
+	struct hn_job_args args;
+	struct dentry *dentry;
+	struct au_sbinfo *sbinfo;
+
+	AuDebugOn(!_args);
+	AuDebugOn(!a->h_dir);
+	AuDebugOn(!a->dir);
+	AuDebugOn(!a->mask);
+	AuDbg("mask 0x%x, i%lu, hi%lu, hci%lu\n",
+	      a->mask, a->dir->i_ino, a->h_dir->i_ino,
+	      a->h_child_inode ? a->h_child_inode->i_ino : 0);
+
+	inode = NULL;
+	dentry = NULL;
+	/*
+	 * do not lock a->dir->i_mutex here
+	 * because of d_revalidate() may cause a deadlock.
+	 */
+	sb = a->dir->i_sb;
+	AuDebugOn(!sb);
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!sbinfo);
+	si_write_lock(sb, AuLock_NOPLMW);
+
+	ii_read_lock_parent(a->dir);
+	bfound = -1;
+	bend = au_ibend(a->dir);
+	for (bindex = au_ibstart(a->dir); bindex <= bend; bindex++)
+		if (au_h_iptr(a->dir, bindex) == a->h_dir) {
+			bfound = bindex;
+			break;
+		}
+	ii_read_unlock(a->dir);
+	if (unlikely(bfound < 0))
+		goto out;
+
+	xino = !!au_opt_test(au_mntflags(sb), XINO);
+	h_ino = 0;
+	if (a->h_child_inode)
+		h_ino = a->h_child_inode->i_ino;
+
+	if (a->h_child_nlen
+	    && (au_ftest_hnjob(a->flags[AuHn_CHILD], GEN)
+		|| au_ftest_hnjob(a->flags[AuHn_CHILD], MNTPNT)))
+		dentry = lookup_wlock_by_name(a->h_child_name, a->h_child_nlen,
+					      a->dir);
+	try_iput = 0;
+	if (dentry)
+		inode = dentry->d_inode;
+	if (xino && !inode && h_ino
+	    && (au_ftest_hnjob(a->flags[AuHn_CHILD], XINO0)
+		|| au_ftest_hnjob(a->flags[AuHn_CHILD], TRYXINO0)
+		|| au_ftest_hnjob(a->flags[AuHn_CHILD], GEN))) {
+		inode = lookup_wlock_by_ino(sb, bfound, h_ino);
+		try_iput = 1;
+	    }
+
+	args.flags = a->flags[AuHn_CHILD];
+	args.dentry = dentry;
+	args.inode = inode;
+	args.h_inode = a->h_child_inode;
+	args.dir = a->dir;
+	args.h_dir = a->h_dir;
+	args.h_name = a->h_child_name;
+	args.h_nlen = a->h_child_nlen;
+	err = hn_job(&args);
+	if (dentry) {
+		if (au_di(dentry))
+			di_write_unlock(dentry);
+		dput(dentry);
+	}
+	if (inode && try_iput) {
+		ii_write_unlock(inode);
+		iput(inode);
+	}
+
+	ii_write_lock_parent(a->dir);
+	args.flags = a->flags[AuHn_PARENT];
+	args.dentry = NULL;
+	args.inode = a->dir;
+	args.h_inode = a->h_dir;
+	args.dir = NULL;
+	args.h_dir = NULL;
+	args.h_name = NULL;
+	args.h_nlen = 0;
+	err = hn_job(&args);
+	ii_write_unlock(a->dir);
+
+out:
+	iput(a->h_child_inode);
+	iput(a->h_dir);
+	iput(a->dir);
+	si_write_unlock(sb);
+	au_nwt_done(&sbinfo->si_nowait);
+	kfree(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+	       struct qstr *h_child_qstr, struct inode *h_child_inode)
+{
+	int err, len;
+	unsigned int flags[AuHnLast], f;
+	unsigned char isdir, isroot, wh;
+	struct inode *dir;
+	struct au_hnotify_args *args;
+	char *p, *h_child_name;
+
+	err = 0;
+	AuDebugOn(!hnotify || !hnotify->hn_aufs_inode);
+	dir = igrab(hnotify->hn_aufs_inode);
+	if (!dir)
+		goto out;
+
+	isroot = (dir->i_ino == AUFS_ROOT_INO);
+	wh = 0;
+	h_child_name = (void *)h_child_qstr->name;
+	len = h_child_qstr->len;
+	if (h_child_name) {
+		if (len > AUFS_WH_PFX_LEN
+		    && !memcmp(h_child_name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+			h_child_name += AUFS_WH_PFX_LEN;
+			len -= AUFS_WH_PFX_LEN;
+			wh = 1;
+		}
+	}
+
+	isdir = 0;
+	if (h_child_inode)
+		isdir = !!S_ISDIR(h_child_inode->i_mode);
+	flags[AuHn_PARENT] = AuHnJob_ISDIR;
+	flags[AuHn_CHILD] = 0;
+	if (isdir)
+		flags[AuHn_CHILD] = AuHnJob_ISDIR;
+	au_fset_hnjob(flags[AuHn_PARENT], DIRENT);
+	au_fset_hnjob(flags[AuHn_CHILD], GEN);
+	switch (mask & FS_EVENTS_POSS_ON_CHILD) {
+	case FS_MOVED_FROM:
+	case FS_MOVED_TO:
+		au_fset_hnjob(flags[AuHn_CHILD], XINO0);
+		au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+		/*FALLTHROUGH*/
+	case FS_CREATE:
+		AuDebugOn(!h_child_name || !h_child_inode);
+		break;
+
+	case FS_DELETE:
+		/*
+		 * aufs never be able to get this child inode.
+		 * revalidation should be in d_revalidate()
+		 * by checking i_nlink, i_generation or d_unhashed().
+		 */
+		AuDebugOn(!h_child_name);
+		au_fset_hnjob(flags[AuHn_CHILD], TRYXINO0);
+		au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+		break;
+
+	default:
+		AuDebugOn(1);
+	}
+
+	if (wh)
+		h_child_inode = NULL;
+
+	err = -ENOMEM;
+	/* iput() and kfree() will be called in au_hnotify() */
+	args = kmalloc(sizeof(*args) + len + 1, GFP_NOFS);
+	if (unlikely(!args)) {
+		AuErr1("no memory\n");
+		iput(dir);
+		goto out;
+	}
+	args->flags[AuHn_PARENT] = flags[AuHn_PARENT];
+	args->flags[AuHn_CHILD] = flags[AuHn_CHILD];
+	args->mask = mask;
+	args->dir = dir;
+	args->h_dir = igrab(h_dir);
+	if (h_child_inode)
+		h_child_inode = igrab(h_child_inode); /* can be NULL */
+	args->h_child_inode = h_child_inode;
+	args->h_child_nlen = len;
+	if (len) {
+		p = (void *)args;
+		p += sizeof(*args);
+		memcpy(p, h_child_name, len);
+		p[len] = 0;
+	}
+
+	f = 0;
+	if (!dir->i_nlink)
+		f = AuWkq_NEST;
+	err = au_wkq_nowait(au_hn_bh, args, dir->i_sb, f);
+	if (unlikely(err)) {
+		pr_err("wkq %d\n", err);
+		iput(args->h_child_inode);
+		iput(args->h_dir);
+		iput(args->dir);
+		kfree(args);
+	}
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+	int err;
+
+	AuDebugOn(!(udba & AuOptMask_UDBA));
+
+	err = 0;
+	if (au_hnotify_op.reset_br)
+		err = au_hnotify_op.reset_br(udba, br, perm);
+
+	return err;
+}
+
+int au_hnotify_init_br(struct au_branch *br, int perm)
+{
+	int err;
+
+	err = 0;
+	if (au_hnotify_op.init_br)
+		err = au_hnotify_op.init_br(br, perm);
+
+	return err;
+}
+
+void au_hnotify_fin_br(struct au_branch *br)
+{
+	if (au_hnotify_op.fin_br)
+		au_hnotify_op.fin_br(br);
+}
+
+static void au_hn_destroy_cache(void)
+{
+	kmem_cache_destroy(au_cachep[AuCache_HNOTIFY]);
+	au_cachep[AuCache_HNOTIFY] = NULL;
+}
+
+int __init au_hnotify_init(void)
+{
+	int err;
+
+	err = -ENOMEM;
+	au_cachep[AuCache_HNOTIFY] = AuCache(au_hnotify);
+	if (au_cachep[AuCache_HNOTIFY]) {
+		err = 0;
+		if (au_hnotify_op.init)
+			err = au_hnotify_op.init();
+		if (unlikely(err))
+			au_hn_destroy_cache();
+	}
+	AuTraceErr(err);
+	return err;
+}
+
+void au_hnotify_fin(void)
+{
+	if (au_hnotify_op.fin)
+		au_hnotify_op.fin();
+	/* cf. au_cache_fin() */
+	if (au_cachep[AuCache_HNOTIFY])
+		au_hn_destroy_cache();
+}
diff --git a/fs/aufs/i_op.c b/fs/aufs/i_op.c
new file mode 100644
index 0000000..d82899a
--- /dev/null
+++ b/fs/aufs/i_op.c
@@ -0,0 +1,1115 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operations (except add/del/rename)
+ */
+
+#include <linux/device_cgroup.h>
+#include <linux/fs_stack.h>
+#include <linux/mm.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+static int h_permission(struct inode *h_inode, int mask,
+			struct vfsmount *h_mnt, int brperm)
+{
+	int err;
+	const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+
+	err = -EACCES;
+	if ((write_mask && IS_IMMUTABLE(h_inode))
+	    || ((mask & MAY_EXEC)
+		&& S_ISREG(h_inode->i_mode)
+		&& ((h_mnt->mnt_flags & MNT_NOEXEC)
+		    || !(h_inode->i_mode & S_IXUGO))))
+		goto out;
+
+	/*
+	 * - skip the lower fs test in the case of write to ro branch.
+	 * - nfs dir permission write check is optimized, but a policy for
+	 *   link/rename requires a real check.
+	 */
+	if ((write_mask && !au_br_writable(brperm))
+	    || (au_test_nfs(h_inode->i_sb) && S_ISDIR(h_inode->i_mode)
+		&& write_mask && !(mask & MAY_READ))
+	    || !h_inode->i_op->permission) {
+		/* AuLabel(generic_permission); */
+		err = generic_permission(h_inode, mask);
+	} else {
+		/* AuLabel(h_inode->permission); */
+		err = h_inode->i_op->permission(h_inode, mask);
+		AuTraceErr(err);
+	}
+
+	if (!err)
+		err = devcgroup_inode_permission(h_inode, mask);
+	if (!err)
+		err = security_inode_permission(h_inode, mask);
+
+#if 0
+	if (!err) {
+		/* todo: do we need to call ima_path_check()? */
+		struct path h_path = {
+			.dentry	=
+			.mnt	= h_mnt
+		};
+		err = ima_path_check(&h_path,
+				     mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+				     IMA_COUNT_LEAVE);
+	}
+#endif
+
+out:
+	return err;
+}
+
+static int aufs_permission(struct inode *inode, int mask)
+{
+	int err;
+	aufs_bindex_t bindex, bend;
+	const unsigned char isdir = !!S_ISDIR(inode->i_mode),
+		write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+	struct inode *h_inode;
+	struct super_block *sb;
+	struct au_branch *br;
+
+	/* todo: support rcu-walk? */
+	if (mask & MAY_NOT_BLOCK)
+		return -ECHILD;
+
+	sb = inode->i_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	ii_read_lock_child(inode);
+#if 0
+	err = au_iigen_test(inode, au_sigen(sb));
+	if (unlikely(err))
+		goto out;
+#endif
+
+	if (!isdir || write_mask) {
+		err = au_busy_or_stale();
+		h_inode = au_h_iptr(inode, au_ibstart(inode));
+		if (unlikely(!h_inode
+			     || (h_inode->i_mode & S_IFMT)
+			     != (inode->i_mode & S_IFMT)))
+			goto out;
+
+		err = 0;
+		bindex = au_ibstart(inode);
+		br = au_sbr(sb, bindex);
+		err = h_permission(h_inode, mask, au_br_mnt(br), br->br_perm);
+		if (write_mask
+		    && !err
+		    && !special_file(h_inode->i_mode)) {
+			/* test whether the upper writable branch exists */
+			err = -EROFS;
+			for (; bindex >= 0; bindex--)
+				if (!au_br_rdonly(au_sbr(sb, bindex))) {
+					err = 0;
+					break;
+				}
+		}
+		goto out;
+	}
+
+	/* non-write to dir */
+	err = 0;
+	bend = au_ibend(inode);
+	for (bindex = au_ibstart(inode); !err && bindex <= bend; bindex++) {
+		h_inode = au_h_iptr(inode, bindex);
+		if (h_inode) {
+			err = au_busy_or_stale();
+			if (unlikely(!S_ISDIR(h_inode->i_mode)))
+				break;
+
+			br = au_sbr(sb, bindex);
+			err = h_permission(h_inode, mask, au_br_mnt(br),
+					   br->br_perm);
+		}
+	}
+
+out:
+	ii_read_unlock(inode);
+	si_read_unlock(sb);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_lookup(struct inode *dir, struct dentry *dentry,
+				  unsigned int flags)
+{
+	struct dentry *ret, *parent;
+	struct inode *inode;
+	struct super_block *sb;
+	int err, npositive;
+
+	IMustLock(dir);
+
+	/* todo: support rcu-walk? */
+	ret = ERR_PTR(-ECHILD);
+	if (flags & LOOKUP_RCU)
+		goto out;
+
+	ret = ERR_PTR(-ENAMETOOLONG);
+	if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+		goto out;
+
+	sb = dir->i_sb;
+	err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	ret = ERR_PTR(err);
+	if (unlikely(err))
+		goto out;
+
+	err = au_di_init(dentry);
+	ret = ERR_PTR(err);
+	if (unlikely(err))
+		goto out_si;
+
+	inode = NULL;
+	npositive = 0; /* suppress a warning */
+	parent = dentry->d_parent; /* dir inode is locked */
+	di_read_lock_parent(parent, AuLock_IR);
+	err = au_alive_dir(parent);
+	if (!err)
+		err = au_digen_test(parent, au_sigen(sb));
+	if (!err) {
+		npositive = au_lkup_dentry(dentry, au_dbstart(parent),
+					   /*type*/0);
+		err = npositive;
+	}
+	di_read_unlock(parent, AuLock_IR);
+	ret = ERR_PTR(err);
+	if (unlikely(err < 0))
+		goto out_unlock;
+
+	if (npositive) {
+		inode = au_new_inode(dentry, /*must_new*/0);
+		ret = (void *)inode;
+	}
+	if (IS_ERR(inode)) {
+		inode = NULL;
+		goto out_unlock;
+	}
+
+	ret = d_splice_alias(inode, dentry);
+#if 0
+	if (unlikely(d_need_lookup(dentry))) {
+		spin_lock(&dentry->d_lock);
+		dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+		spin_unlock(&dentry->d_lock);
+	} else
+#endif
+	if (unlikely(IS_ERR(ret) && inode)) {
+		ii_write_unlock(inode);
+		iput(inode);
+		inode = NULL;
+	}
+
+out_unlock:
+	di_write_unlock(dentry);
+	if (inode) {
+		/* verbose coding for lock class name */
+		if (unlikely(S_ISLNK(inode->i_mode)))
+			au_rw_class(&au_di(dentry)->di_rwsem,
+				    au_lc_key + AuLcSymlink_DIINFO);
+		else if (unlikely(S_ISDIR(inode->i_mode)))
+			au_rw_class(&au_di(dentry)->di_rwsem,
+				    au_lc_key + AuLcDir_DIINFO);
+		else /* likely */
+			au_rw_class(&au_di(dentry)->di_rwsem,
+				    au_lc_key + AuLcNonDir_DIINFO);
+	}
+out_si:
+	si_read_unlock(sb);
+out:
+	return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_wr_dir_cpup(struct dentry *dentry, struct dentry *parent,
+			  const unsigned char add_entry, aufs_bindex_t bcpup,
+			  aufs_bindex_t bstart)
+{
+	int err;
+	struct dentry *h_parent;
+	struct inode *h_dir;
+
+	if (add_entry)
+		IMustLock(parent->d_inode);
+	else
+		di_write_lock_parent(parent);
+
+	err = 0;
+	if (!au_h_dptr(parent, bcpup)) {
+		if (bstart > bcpup)
+			err = au_cpup_dirs(dentry, bcpup);
+		else if (bstart < bcpup)
+			err = au_cpdown_dirs(dentry, bcpup);
+		else
+			BUG();
+	}
+	if (!err && add_entry) {
+		h_parent = au_h_dptr(parent, bcpup);
+		h_dir = h_parent->d_inode;
+		mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
+		err = au_lkup_neg(dentry, bcpup,
+				  au_ftest_wrdir(add_entry, TMP_WHENTRY));
+		/* todo: no unlock here */
+		mutex_unlock(&h_dir->i_mutex);
+
+		AuDbg("bcpup %d\n", bcpup);
+		if (!err) {
+			if (!dentry->d_inode)
+				au_set_h_dptr(dentry, bstart, NULL);
+			au_update_dbrange(dentry, /*do_put_zero*/0);
+		}
+	}
+
+	if (!add_entry)
+		di_write_unlock(parent);
+	if (!err)
+		err = bcpup; /* success */
+
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * decide the branch and the parent dir where we will create a new entry.
+ * returns new bindex or an error.
+ * copyup the parent dir if needed.
+ */
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+	      struct au_wr_dir_args *args)
+{
+	int err;
+	aufs_bindex_t bcpup, bstart, src_bstart;
+	const unsigned char add_entry
+		= au_ftest_wrdir(args->flags, ADD_ENTRY)
+		| au_ftest_wrdir(args->flags, TMP_WHENTRY);
+	struct super_block *sb;
+	struct dentry *parent;
+	struct au_sbinfo *sbinfo;
+
+	sb = dentry->d_sb;
+	sbinfo = au_sbi(sb);
+	parent = dget_parent(dentry);
+	bstart = au_dbstart(dentry);
+	bcpup = bstart;
+	if (args->force_btgt < 0) {
+		if (src_dentry) {
+			src_bstart = au_dbstart(src_dentry);
+			if (src_bstart < bstart)
+				bcpup = src_bstart;
+		} else if (add_entry) {
+			err = AuWbrCreate(sbinfo, dentry,
+					  au_ftest_wrdir(args->flags, ISDIR));
+			bcpup = err;
+		}
+
+		if (bcpup < 0 || au_test_ro(sb, bcpup, dentry->d_inode)) {
+			if (add_entry)
+				err = AuWbrCopyup(sbinfo, dentry);
+			else {
+				if (!IS_ROOT(dentry)) {
+					di_read_lock_parent(parent, !AuLock_IR);
+					err = AuWbrCopyup(sbinfo, dentry);
+					di_read_unlock(parent, !AuLock_IR);
+				} else
+					err = AuWbrCopyup(sbinfo, dentry);
+			}
+			bcpup = err;
+			if (unlikely(err < 0))
+				goto out;
+		}
+	} else {
+		bcpup = args->force_btgt;
+		AuDebugOn(au_test_ro(sb, bcpup, dentry->d_inode));
+	}
+
+	AuDbg("bstart %d, bcpup %d\n", bstart, bcpup);
+	err = bcpup;
+	if (bcpup == bstart)
+		goto out; /* success */
+
+	/* copyup the new parent into the branch we process */
+	err = au_wr_dir_cpup(dentry, parent, add_entry, bcpup, bstart);
+	if (err >= 0) {
+		if (!dentry->d_inode) {
+			au_set_h_dptr(dentry, bstart, NULL);
+			au_set_dbstart(dentry, bcpup);
+			au_set_dbend(dentry, bcpup);
+		}
+		AuDebugOn(add_entry && !au_h_dptr(dentry, bcpup));
+	}
+
+out:
+	dput(parent);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_pin_hdir_unlock(struct au_pin *p)
+{
+	if (p->hdir)
+		au_hn_imtx_unlock(p->hdir);
+}
+
+static int au_pin_hdir_lock(struct au_pin *p)
+{
+	int err;
+
+	err = 0;
+	if (!p->hdir)
+		goto out;
+
+	/* even if an error happens later, keep this lock */
+	au_hn_imtx_lock_nested(p->hdir, p->lsc_hi);
+
+	err = -EBUSY;
+	if (unlikely(p->hdir->hi_inode != p->h_parent->d_inode))
+		goto out;
+
+	err = 0;
+	if (p->h_dentry)
+		err = au_h_verify(p->h_dentry, p->udba, p->hdir->hi_inode,
+				  p->h_parent, p->br);
+
+out:
+	return err;
+}
+
+int au_pin_hdir_relock(struct au_pin *p)
+{
+	int err, i;
+	struct inode *h_i;
+	struct dentry *h_d[] = {
+		p->h_dentry,
+		p->h_parent
+	};
+
+	err = au_pin_hdir_lock(p);
+	if (unlikely(err))
+		goto out;
+
+	for (i = 0; !err && i < sizeof(h_d)/sizeof(*h_d); i++) {
+		if (!h_d[i])
+			continue;
+		h_i = h_d[i]->d_inode;
+		if (h_i)
+			err = !h_i->i_nlink;
+	}
+
+out:
+	return err;
+}
+
+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task)
+{
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
+	p->hdir->hi_inode->i_mutex.owner = task;
+#endif
+}
+
+void au_pin_hdir_acquire_nest(struct au_pin *p)
+{
+	if (p->hdir) {
+		mutex_acquire_nest(&p->hdir->hi_inode->i_mutex.dep_map,
+				   p->lsc_hi, 0, NULL, _RET_IP_);
+		au_pin_hdir_set_owner(p, current);
+	}
+}
+
+void au_pin_hdir_release(struct au_pin *p)
+{
+	if (p->hdir) {
+		au_pin_hdir_set_owner(p, p->task);
+		mutex_release(&p->hdir->hi_inode->i_mutex.dep_map, 1, _RET_IP_);
+	}
+}
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin)
+{
+	if (pin && pin->parent)
+		return au_h_dptr(pin->parent, pin->bindex);
+	return NULL;
+}
+
+void au_unpin(struct au_pin *p)
+{
+	if (p->hdir)
+		au_pin_hdir_unlock(p);
+	if (p->h_mnt && au_ftest_pin(p->flags, MNT_WRITE))
+		vfsub_mnt_drop_write(p->h_mnt);
+	if (!p->hdir)
+		return;
+
+	if (!au_ftest_pin(p->flags, DI_LOCKED))
+		di_read_unlock(p->parent, AuLock_IR);
+	iput(p->hdir->hi_inode);
+	dput(p->parent);
+	p->parent = NULL;
+	p->hdir = NULL;
+	p->h_mnt = NULL;
+	/* do not clear p->task */
+}
+
+int au_do_pin(struct au_pin *p)
+{
+	int err;
+	struct super_block *sb;
+	struct inode *h_dir;
+
+	err = 0;
+	sb = p->dentry->d_sb;
+	p->br = au_sbr(sb, p->bindex);
+	if (IS_ROOT(p->dentry)) {
+		if (au_ftest_pin(p->flags, MNT_WRITE)) {
+			p->h_mnt = au_br_mnt(p->br);
+			err = vfsub_mnt_want_write(p->h_mnt);
+			if (unlikely(err)) {
+				au_fclr_pin(p->flags, MNT_WRITE);
+				goto out_err;
+			}
+		}
+		goto out;
+	}
+
+	p->h_dentry = NULL;
+	if (p->bindex <= au_dbend(p->dentry))
+		p->h_dentry = au_h_dptr(p->dentry, p->bindex);
+
+	p->parent = dget_parent(p->dentry);
+	if (!au_ftest_pin(p->flags, DI_LOCKED))
+		di_read_lock(p->parent, AuLock_IR, p->lsc_di);
+
+	h_dir = NULL;
+	p->h_parent = au_h_dptr(p->parent, p->bindex);
+	p->hdir = au_hi(p->parent->d_inode, p->bindex);
+	if (p->hdir)
+		h_dir = p->hdir->hi_inode;
+
+	/*
+	 * udba case, or
+	 * if DI_LOCKED is not set, then p->parent may be different
+	 * and h_parent can be NULL.
+	 */
+	if (unlikely(!p->hdir || !h_dir || !p->h_parent)) {
+		err = -EBUSY;
+		if (!au_ftest_pin(p->flags, DI_LOCKED))
+			di_read_unlock(p->parent, AuLock_IR);
+		dput(p->parent);
+		p->parent = NULL;
+		goto out_err;
+	}
+
+	if (au_ftest_pin(p->flags, MNT_WRITE)) {
+		p->h_mnt = au_br_mnt(p->br);
+		err = vfsub_mnt_want_write(p->h_mnt);
+		if (unlikely(err)) {
+			au_fclr_pin(p->flags, MNT_WRITE);
+			if (!au_ftest_pin(p->flags, DI_LOCKED))
+				di_read_unlock(p->parent, AuLock_IR);
+			dput(p->parent);
+			p->parent = NULL;
+			goto out_err;
+		}
+	}
+
+	au_igrab(h_dir);
+	err = au_pin_hdir_lock(p);
+	if (!err)
+		goto out; /* success */
+
+out_err:
+	pr_err("err %d\n", err);
+	err = au_busy_or_stale();
+out:
+	return err;
+}
+
+void au_pin_init(struct au_pin *p, struct dentry *dentry,
+		 aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+		 unsigned int udba, unsigned char flags)
+{
+	p->dentry = dentry;
+	p->udba = udba;
+	p->lsc_di = lsc_di;
+	p->lsc_hi = lsc_hi;
+	p->flags = flags;
+	p->bindex = bindex;
+
+	p->parent = NULL;
+	p->hdir = NULL;
+	p->h_mnt = NULL;
+
+	p->h_dentry = NULL;
+	p->h_parent = NULL;
+	p->br = NULL;
+	p->task = current;
+}
+
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+	   unsigned int udba, unsigned char flags)
+{
+	au_pin_init(pin, dentry, bindex, AuLsc_DI_PARENT, AuLsc_I_PARENT2,
+		    udba, flags);
+	return au_do_pin(pin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * ->setattr() and ->getattr() are called in various cases.
+ * chmod, stat: dentry is revalidated.
+ * fchmod, fstat: file and dentry are not revalidated, additionally they may be
+ *		  unhashed.
+ * for ->setattr(), ia->ia_file is passed from ftruncate only.
+ */
+/* todo: consolidate with do_refresh() and simple_reval_dpath() */
+static int au_reval_for_attr(struct dentry *dentry, unsigned int sigen)
+{
+	int err;
+	struct inode *inode;
+	struct dentry *parent;
+
+	err = 0;
+	inode = dentry->d_inode;
+	if (au_digen_test(dentry, sigen)) {
+		parent = dget_parent(dentry);
+		di_read_lock_parent(parent, AuLock_IR);
+		err = au_refresh_dentry(dentry, parent);
+		di_read_unlock(parent, AuLock_IR);
+		dput(parent);
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+#define AuIcpup_DID_CPUP	1
+#define au_ftest_icpup(flags, name)	((flags) & AuIcpup_##name)
+#define au_fset_icpup(flags, name) \
+	do { (flags) |= AuIcpup_##name; } while (0)
+#define au_fclr_icpup(flags, name) \
+	do { (flags) &= ~AuIcpup_##name; } while (0)
+
+struct au_icpup_args {
+	unsigned char flags;
+	unsigned char pin_flags;
+	aufs_bindex_t btgt;
+	unsigned int udba;
+	struct au_pin pin;
+	struct path h_path;
+	struct inode *h_inode;
+};
+
+static int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
+			    struct au_icpup_args *a)
+{
+	int err;
+	loff_t sz;
+	aufs_bindex_t bstart, ibstart;
+	struct dentry *hi_wh, *parent;
+	struct inode *inode;
+	struct au_wr_dir_args wr_dir_args = {
+		.force_btgt	= -1,
+		.flags		= 0
+	};
+
+	bstart = au_dbstart(dentry);
+	inode = dentry->d_inode;
+	if (S_ISDIR(inode->i_mode))
+		au_fset_wrdir(wr_dir_args.flags, ISDIR);
+	/* plink or hi_wh() case */
+	ibstart = au_ibstart(inode);
+	if (bstart != ibstart && !au_test_ro(inode->i_sb, ibstart, inode))
+		wr_dir_args.force_btgt = ibstart;
+	err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+	if (unlikely(err < 0))
+		goto out;
+	a->btgt = err;
+	if (err != bstart)
+		au_fset_icpup(a->flags, DID_CPUP);
+
+	err = 0;
+	a->pin_flags = AuPin_MNT_WRITE;
+	parent = NULL;
+	if (!IS_ROOT(dentry)) {
+		au_fset_pin(a->pin_flags, DI_LOCKED);
+		parent = dget_parent(dentry);
+		di_write_lock_parent(parent);
+	}
+
+	err = au_pin(&a->pin, dentry, a->btgt, a->udba, a->pin_flags);
+	if (unlikely(err))
+		goto out_parent;
+
+	a->h_path.dentry = au_h_dptr(dentry, bstart);
+	a->h_inode = a->h_path.dentry->d_inode;
+	mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
+	sz = -1;
+	if ((ia->ia_valid & ATTR_SIZE) && ia->ia_size < i_size_read(a->h_inode))
+		sz = ia->ia_size;
+	mutex_unlock(&a->h_inode->i_mutex);
+
+	hi_wh = NULL;
+	if (au_ftest_icpup(a->flags, DID_CPUP) && d_unlinked(dentry)) {
+		hi_wh = au_hi_wh(inode, a->btgt);
+		if (!hi_wh) {
+			struct au_cp_generic cpg = {
+				.dentry	= dentry,
+				.bdst	= a->btgt,
+				.bsrc	= -1,
+				.len	= sz,
+				.pin	= &a->pin
+			};
+			err = au_sio_cpup_wh(&cpg, /*file*/NULL);
+			if (unlikely(err))
+				goto out_unlock;
+			hi_wh = au_hi_wh(inode, a->btgt);
+			/* todo: revalidate hi_wh? */
+		}
+	}
+
+	if (parent) {
+		au_pin_set_parent_lflag(&a->pin, /*lflag*/0);
+		di_downgrade_lock(parent, AuLock_IR);
+		dput(parent);
+		parent = NULL;
+	}
+	if (!au_ftest_icpup(a->flags, DID_CPUP))
+		goto out; /* success */
+
+	if (!d_unhashed(dentry)) {
+		struct au_cp_generic cpg = {
+			.dentry	= dentry,
+			.bdst	= a->btgt,
+			.bsrc	= bstart,
+			.len	= sz,
+			.pin	= &a->pin,
+			.flags	= AuCpup_DTIME | AuCpup_HOPEN
+		};
+		err = au_sio_cpup_simple(&cpg);
+		if (!err)
+			a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+	} else if (!hi_wh)
+		a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+	else
+		a->h_path.dentry = hi_wh; /* do not dget here */
+
+out_unlock:
+	a->h_inode = a->h_path.dentry->d_inode;
+	if (!err)
+		goto out; /* success */
+	au_unpin(&a->pin);
+out_parent:
+	if (parent) {
+		di_write_unlock(parent);
+		dput(parent);
+	}
+out:
+	if (!err)
+		mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
+	return err;
+}
+
+static int aufs_setattr(struct dentry *dentry, struct iattr *ia)
+{
+	int err;
+	struct inode *inode;
+	struct super_block *sb;
+	struct file *file;
+	struct au_icpup_args *a;
+
+	inode = dentry->d_inode;
+	IMustLock(inode);
+
+	err = -ENOMEM;
+	a = kzalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+		ia->ia_valid &= ~ATTR_MODE;
+
+	file = NULL;
+	sb = dentry->d_sb;
+	err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out_kfree;
+
+	if (ia->ia_valid & ATTR_FILE) {
+		/* currently ftruncate(2) only */
+		AuDebugOn(!S_ISREG(inode->i_mode));
+		file = ia->ia_file;
+		err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+		if (unlikely(err))
+			goto out_si;
+		ia->ia_file = au_hf_top(file);
+		a->udba = AuOpt_UDBA_NONE;
+	} else {
+		/* fchmod() doesn't pass ia_file */
+		a->udba = au_opt_udba(sb);
+		di_write_lock_child(dentry);
+		/* no d_unlinked(), to set UDBA_NONE for root */
+		if (d_unhashed(dentry))
+			a->udba = AuOpt_UDBA_NONE;
+		if (a->udba != AuOpt_UDBA_NONE) {
+			AuDebugOn(IS_ROOT(dentry));
+			err = au_reval_for_attr(dentry, au_sigen(sb));
+			if (unlikely(err))
+				goto out_dentry;
+		}
+	}
+
+	err = au_pin_and_icpup(dentry, ia, a);
+	if (unlikely(err < 0))
+		goto out_dentry;
+	if (au_ftest_icpup(a->flags, DID_CPUP)) {
+		ia->ia_file = NULL;
+		ia->ia_valid &= ~ATTR_FILE;
+	}
+
+	a->h_path.mnt = au_sbr_mnt(sb, a->btgt);
+	if ((ia->ia_valid & (ATTR_MODE | ATTR_CTIME))
+	    == (ATTR_MODE | ATTR_CTIME)) {
+		err = security_path_chmod(&a->h_path, ia->ia_mode);
+		if (unlikely(err))
+			goto out_unlock;
+	} else if ((ia->ia_valid & (ATTR_UID | ATTR_GID))
+		   && (ia->ia_valid & ATTR_CTIME)) {
+		err = security_path_chown(&a->h_path, ia->ia_uid, ia->ia_gid);
+		if (unlikely(err))
+			goto out_unlock;
+	}
+
+	if (ia->ia_valid & ATTR_SIZE) {
+		struct file *f;
+
+		if (ia->ia_size < i_size_read(inode))
+			/* unmap only */
+			truncate_setsize(inode, ia->ia_size);
+
+		f = NULL;
+		if (ia->ia_valid & ATTR_FILE)
+			f = ia->ia_file;
+		mutex_unlock(&a->h_inode->i_mutex);
+		err = vfsub_trunc(&a->h_path, ia->ia_size, ia->ia_valid, f);
+		mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD);
+	} else
+		err = vfsub_notify_change(&a->h_path, ia);
+	if (!err)
+		au_cpup_attr_changeable(inode);
+
+out_unlock:
+	mutex_unlock(&a->h_inode->i_mutex);
+	au_unpin(&a->pin);
+	if (unlikely(err))
+		au_update_dbstart(dentry);
+out_dentry:
+	di_write_unlock(dentry);
+	if (file) {
+		fi_write_unlock(file);
+		ia->ia_file = file;
+		ia->ia_valid |= ATTR_FILE;
+	}
+out_si:
+	si_read_unlock(sb);
+out_kfree:
+	kfree(a);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static void au_refresh_iattr(struct inode *inode, struct kstat *st,
+			     unsigned int nlink)
+{
+	unsigned int n;
+
+	inode->i_mode = st->mode;
+	/* don't i_[ug]id_write() here */
+	inode->i_uid = st->uid;
+	inode->i_gid = st->gid;
+	inode->i_atime = st->atime;
+	inode->i_mtime = st->mtime;
+	inode->i_ctime = st->ctime;
+
+	au_cpup_attr_nlink(inode, /*force*/0);
+	if (S_ISDIR(inode->i_mode)) {
+		n = inode->i_nlink;
+		n -= nlink;
+		n += st->nlink;
+		smp_mb();
+		/* 0 can happen */
+		set_nlink(inode, n);
+	}
+
+	spin_lock(&inode->i_lock);
+	inode->i_blocks = st->blocks;
+	i_size_write(inode, st->size);
+	spin_unlock(&inode->i_lock);
+}
+
+static int aufs_getattr(struct vfsmount *mnt __maybe_unused,
+			struct dentry *dentry, struct kstat *st)
+{
+	int err;
+	unsigned int mnt_flags;
+	aufs_bindex_t bindex;
+	unsigned char udba_none, positive;
+	struct super_block *sb, *h_sb;
+	struct inode *inode;
+	struct path h_path;
+
+	sb = dentry->d_sb;
+	inode = dentry->d_inode;
+	err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out;
+	mnt_flags = au_mntflags(sb);
+	udba_none = !!au_opt_test(mnt_flags, UDBA_NONE);
+
+	/* support fstat(2) */
+	if (!d_unlinked(dentry) && !udba_none) {
+		unsigned int sigen = au_sigen(sb);
+		err = au_digen_test(dentry, sigen);
+		if (!err) {
+			di_read_lock_child(dentry, AuLock_IR);
+			err = au_dbrange_test(dentry);
+			if (unlikely(err))
+				goto out_unlock;
+		} else {
+			AuDebugOn(IS_ROOT(dentry));
+			di_write_lock_child(dentry);
+			err = au_dbrange_test(dentry);
+			if (!err)
+				err = au_reval_for_attr(dentry, sigen);
+			di_downgrade_lock(dentry, AuLock_IR);
+			if (unlikely(err))
+				goto out_unlock;
+		}
+	} else
+		di_read_lock_child(dentry, AuLock_IR);
+
+	bindex = au_ibstart(inode);
+	h_path.mnt = au_sbr_mnt(sb, bindex);
+	h_sb = h_path.mnt->mnt_sb;
+	if (!au_test_fs_bad_iattr(h_sb) && udba_none)
+		goto out_fill; /* success */
+
+	h_path.dentry = NULL;
+	if (au_dbstart(dentry) == bindex)
+		h_path.dentry = dget(au_h_dptr(dentry, bindex));
+	else if (au_opt_test(mnt_flags, PLINK) && au_plink_test(inode)) {
+		h_path.dentry = au_plink_lkup(inode, bindex);
+		if (IS_ERR(h_path.dentry))
+			goto out_fill; /* pretending success */
+	}
+	/* illegally overlapped or something */
+	if (unlikely(!h_path.dentry))
+		goto out_fill; /* pretending success */
+
+	positive = !!h_path.dentry->d_inode;
+	if (positive)
+		err = vfs_getattr(&h_path, st);
+	dput(h_path.dentry);
+	if (!err) {
+		if (positive)
+			au_refresh_iattr(inode, st,
+					 h_path.dentry->d_inode->i_nlink);
+		goto out_fill; /* success */
+	}
+	AuTraceErr(err);
+	goto out_unlock;
+
+out_fill:
+	generic_fillattr(inode, st);
+out_unlock:
+	di_read_unlock(dentry, AuLock_IR);
+	si_read_unlock(sb);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int h_readlink(struct dentry *dentry, int bindex, char __user *buf,
+		      int bufsiz)
+{
+	int err;
+	struct super_block *sb;
+	struct dentry *h_dentry;
+
+	err = -EINVAL;
+	h_dentry = au_h_dptr(dentry, bindex);
+	if (unlikely(!h_dentry->d_inode->i_op->readlink))
+		goto out;
+
+	err = security_inode_readlink(h_dentry);
+	if (unlikely(err))
+		goto out;
+
+	sb = dentry->d_sb;
+	if (!au_test_ro(sb, bindex, dentry->d_inode)) {
+		vfsub_touch_atime(au_sbr_mnt(sb, bindex), h_dentry);
+		fsstack_copy_attr_atime(dentry->d_inode, h_dentry->d_inode);
+	}
+	err = h_dentry->d_inode->i_op->readlink(h_dentry, buf, bufsiz);
+
+out:
+	return err;
+}
+
+static int aufs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+{
+	int err;
+
+	err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN);
+	if (unlikely(err))
+		goto out;
+	err = au_d_hashed_positive(dentry);
+	if (!err)
+		err = h_readlink(dentry, au_dbstart(dentry), buf, bufsiz);
+	aufs_read_unlock(dentry, AuLock_IR);
+
+out:
+	return err;
+}
+
+static void *aufs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	int err;
+	mm_segment_t old_fs;
+	union {
+		char *k;
+		char __user *u;
+	} buf;
+
+	err = -ENOMEM;
+	buf.k = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!buf.k))
+		goto out;
+
+	err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN);
+	if (unlikely(err))
+		goto out_name;
+
+	err = au_d_hashed_positive(dentry);
+	if (!err) {
+		old_fs = get_fs();
+		set_fs(KERNEL_DS);
+		err = h_readlink(dentry, au_dbstart(dentry), buf.u, PATH_MAX);
+		set_fs(old_fs);
+	}
+	aufs_read_unlock(dentry, AuLock_IR);
+
+	if (err >= 0) {
+		buf.k[err] = 0;
+		/* will be freed by put_link */
+		nd_set_link(nd, buf.k);
+		return NULL; /* success */
+	}
+
+out_name:
+	free_page((unsigned long)buf.k);
+out:
+	AuTraceErr(err);
+	return ERR_PTR(err);
+}
+
+static void aufs_put_link(struct dentry *dentry __maybe_unused,
+			  struct nameidata *nd, void *cookie __maybe_unused)
+{
+	char *p;
+
+	p = nd_get_link(nd);
+	if (!IS_ERR_OR_NULL(p))
+		free_page((unsigned long)p);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_update_time(struct inode *inode, struct timespec *ts, int flags)
+{
+	int err;
+	struct super_block *sb;
+	struct inode *h_inode;
+
+	sb = inode->i_sb;
+	/* mmap_sem might be acquired already, cf. aufs_mmap() */
+	lockdep_off();
+	si_read_lock(sb, AuLock_FLUSH);
+	ii_write_lock_child(inode);
+	lockdep_on();
+	h_inode = au_h_iptr(inode, au_ibstart(inode));
+	err = vfsub_update_time(h_inode, ts, flags);
+	lockdep_off();
+	ii_write_unlock(inode);
+	si_read_unlock(sb);
+	lockdep_on();
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct inode_operations aufs_symlink_iop = {
+	.permission	= aufs_permission,
+	.setattr	= aufs_setattr,
+	.getattr	= aufs_getattr,
+
+	.readlink	= aufs_readlink,
+	.follow_link	= aufs_follow_link,
+	.put_link	= aufs_put_link,
+
+	/* .update_time	= aufs_update_time */
+};
+
+struct inode_operations aufs_dir_iop = {
+	.create		= aufs_create,
+	.lookup		= aufs_lookup,
+	.link		= aufs_link,
+	.unlink		= aufs_unlink,
+	.symlink	= aufs_symlink,
+	.mkdir		= aufs_mkdir,
+	.rmdir		= aufs_rmdir,
+	.mknod		= aufs_mknod,
+	.rename		= aufs_rename,
+
+	.permission	= aufs_permission,
+	.setattr	= aufs_setattr,
+	.getattr	= aufs_getattr,
+
+	.update_time	= aufs_update_time
+	/* no support for atomic_open() */
+};
+
+struct inode_operations aufs_iop = {
+	.permission	= aufs_permission,
+	.setattr	= aufs_setattr,
+	.getattr	= aufs_getattr,
+
+	.update_time	= aufs_update_time
+};
diff --git a/fs/aufs/i_op_add.c b/fs/aufs/i_op_add.c
new file mode 100644
index 0000000..d9482a0
--- /dev/null
+++ b/fs/aufs/i_op_add.c
@@ -0,0 +1,739 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operations (add entry)
+ */
+
+#include "aufs.h"
+
+/*
+ * final procedure of adding a new entry, except link(2).
+ * remove whiteout, instantiate, copyup the parent dir's times and size
+ * and update version.
+ * if it failed, re-create the removed whiteout.
+ */
+static int epilog(struct inode *dir, aufs_bindex_t bindex,
+		  struct dentry *wh_dentry, struct dentry *dentry)
+{
+	int err, rerr;
+	aufs_bindex_t bwh;
+	struct path h_path;
+	struct inode *inode, *h_dir;
+	struct dentry *wh;
+
+	bwh = -1;
+	if (wh_dentry) {
+		h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */
+		IMustLock(h_dir);
+		AuDebugOn(au_h_iptr(dir, bindex) != h_dir);
+		bwh = au_dbwh(dentry);
+		h_path.dentry = wh_dentry;
+		h_path.mnt = au_sbr_mnt(dir->i_sb, bindex);
+		err = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path,
+					  dentry);
+		if (unlikely(err))
+			goto out;
+	}
+
+	inode = au_new_inode(dentry, /*must_new*/1);
+	if (!IS_ERR(inode)) {
+		d_instantiate(dentry, inode);
+		dir = dentry->d_parent->d_inode; /* dir inode is locked */
+		IMustLock(dir);
+		if (au_ibstart(dir) == au_dbstart(dentry))
+			au_cpup_attr_timesizes(dir);
+		dir->i_version++;
+		return 0; /* success */
+	}
+
+	err = PTR_ERR(inode);
+	if (!wh_dentry)
+		goto out;
+
+	/* revert */
+	/* dir inode is locked */
+	wh = au_wh_create(dentry, bwh, wh_dentry->d_parent);
+	rerr = PTR_ERR(wh);
+	if (IS_ERR(wh)) {
+		AuIOErr("%.*s reverting whiteout failed(%d, %d)\n",
+			AuDLNPair(dentry), err, rerr);
+		err = -EIO;
+	} else
+		dput(wh);
+
+out:
+	return err;
+}
+
+static int au_d_may_add(struct dentry *dentry)
+{
+	int err;
+
+	err = 0;
+	if (unlikely(d_unhashed(dentry)))
+		err = -ENOENT;
+	if (unlikely(dentry->d_inode))
+		err = -EEXIST;
+	return err;
+}
+
+/*
+ * simple tests for the adding inode operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+	       struct dentry *h_parent, int isdir)
+{
+	int err;
+	umode_t h_mode;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+
+	err = -ENAMETOOLONG;
+	if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+		goto out;
+
+	h_dentry = au_h_dptr(dentry, bindex);
+	h_inode = h_dentry->d_inode;
+	if (!dentry->d_inode) {
+		err = -EEXIST;
+		if (unlikely(h_inode))
+			goto out;
+	} else {
+		/* rename(2) case */
+		err = -EIO;
+		if (unlikely(!h_inode || !h_inode->i_nlink))
+			goto out;
+
+		h_mode = h_inode->i_mode;
+		if (!isdir) {
+			err = -EISDIR;
+			if (unlikely(S_ISDIR(h_mode)))
+				goto out;
+		} else if (unlikely(!S_ISDIR(h_mode))) {
+			err = -ENOTDIR;
+			goto out;
+		}
+	}
+
+	err = 0;
+	/* expected parent dir is locked */
+	if (unlikely(h_parent != h_dentry->d_parent))
+		err = -EIO;
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * initial procedure of adding a new entry.
+ * prepare writable branch and the parent dir, lock it,
+ * and lookup whiteout for the new entry.
+ */
+static struct dentry*
+lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt,
+		  struct dentry *src_dentry, struct au_pin *pin,
+		  struct au_wr_dir_args *wr_dir_args)
+{
+	struct dentry *wh_dentry, *h_parent;
+	struct super_block *sb;
+	struct au_branch *br;
+	int err;
+	unsigned int udba;
+	aufs_bindex_t bcpup;
+
+	AuDbg("%.*s\n", AuDLNPair(dentry));
+
+	err = au_wr_dir(dentry, src_dentry, wr_dir_args);
+	bcpup = err;
+	wh_dentry = ERR_PTR(err);
+	if (unlikely(err < 0))
+		goto out;
+
+	sb = dentry->d_sb;
+	udba = au_opt_udba(sb);
+	err = au_pin(pin, dentry, bcpup, udba,
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	wh_dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out;
+
+	h_parent = au_pinned_h_parent(pin);
+	if (udba != AuOpt_UDBA_NONE
+	    && au_dbstart(dentry) == bcpup)
+		err = au_may_add(dentry, bcpup, h_parent,
+				 au_ftest_wrdir(wr_dir_args->flags, ISDIR));
+	else if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+		err = -ENAMETOOLONG;
+	wh_dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out_unpin;
+
+	br = au_sbr(sb, bcpup);
+	if (dt) {
+		struct path tmp = {
+			.dentry	= h_parent,
+			.mnt	= au_br_mnt(br)
+		};
+		au_dtime_store(dt, au_pinned_parent(pin), &tmp);
+	}
+
+	wh_dentry = NULL;
+	if (bcpup != au_dbwh(dentry))
+		goto out; /* success */
+
+	wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+
+out_unpin:
+	if (IS_ERR(wh_dentry))
+		au_unpin(pin);
+out:
+	return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+enum { Mknod, Symlink, Creat };
+struct simple_arg {
+	int type;
+	union {
+		struct {
+			umode_t mode;
+			bool want_excl;
+		} c;
+		struct {
+			const char *symname;
+		} s;
+		struct {
+			umode_t mode;
+			dev_t dev;
+		} m;
+	} u;
+};
+
+static int add_simple(struct inode *dir, struct dentry *dentry,
+		      struct simple_arg *arg)
+{
+	int err;
+	aufs_bindex_t bstart;
+	unsigned char created;
+	struct dentry *wh_dentry, *parent;
+	struct inode *h_dir;
+	/* to reuduce stack size */
+	struct {
+		struct au_dtime dt;
+		struct au_pin pin;
+		struct path h_path;
+		struct au_wr_dir_args wr_dir_args;
+	} *a;
+
+	AuDbg("%.*s\n", AuDLNPair(dentry));
+	IMustLock(dir);
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+	a->wr_dir_args.force_btgt = -1;
+	a->wr_dir_args.flags = AuWrDir_ADD_ENTRY;
+
+	parent = dentry->d_parent; /* dir inode is locked */
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+	if (unlikely(err))
+		goto out_free;
+	err = au_d_may_add(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	di_write_lock_parent(parent);
+	wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+				      &a->pin, &a->wr_dir_args);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	bstart = au_dbstart(dentry);
+	a->h_path.dentry = au_h_dptr(dentry, bstart);
+	a->h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
+	h_dir = au_pinned_h_dir(&a->pin);
+	switch (arg->type) {
+	case Creat:
+		err = vfsub_create(h_dir, &a->h_path, arg->u.c.mode,
+				   arg->u.c.want_excl);
+		break;
+	case Symlink:
+		err = vfsub_symlink(h_dir, &a->h_path, arg->u.s.symname);
+		break;
+	case Mknod:
+		err = vfsub_mknod(h_dir, &a->h_path, arg->u.m.mode,
+				  arg->u.m.dev);
+		break;
+	default:
+		BUG();
+	}
+	created = !err;
+	if (!err)
+		err = epilog(dir, bstart, wh_dentry, dentry);
+
+	/* revert */
+	if (unlikely(created && err && a->h_path.dentry->d_inode)) {
+		int rerr;
+		rerr = vfsub_unlink(h_dir, &a->h_path, /*force*/0);
+		if (rerr) {
+			AuIOErr("%.*s revert failure(%d, %d)\n",
+				AuDLNPair(dentry), err, rerr);
+			err = -EIO;
+		}
+		au_dtime_revert(&a->dt);
+	}
+
+	au_unpin(&a->pin);
+	dput(wh_dentry);
+
+out_parent:
+	di_write_unlock(parent);
+out_unlock:
+	if (unlikely(err)) {
+		au_update_dbstart(dentry);
+		d_drop(dentry);
+	}
+	aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+	kfree(a);
+out:
+	return err;
+}
+
+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+	       dev_t dev)
+{
+	struct simple_arg arg = {
+		.type = Mknod,
+		.u.m = {
+			.mode	= mode,
+			.dev	= dev
+		}
+	};
+	return add_simple(dir, dentry, &arg);
+}
+
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+	struct simple_arg arg = {
+		.type = Symlink,
+		.u.s.symname = symname
+	};
+	return add_simple(dir, dentry, &arg);
+}
+
+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+		bool want_excl)
+{
+	struct simple_arg arg = {
+		.type = Creat,
+		.u.c = {
+			.mode		= mode,
+			.want_excl	= want_excl
+		}
+	};
+	return add_simple(dir, dentry, &arg);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_link_args {
+	aufs_bindex_t bdst, bsrc;
+	struct au_pin pin;
+	struct path h_path;
+	struct dentry *src_parent, *parent;
+};
+
+static int au_cpup_before_link(struct dentry *src_dentry,
+			       struct au_link_args *a)
+{
+	int err;
+	struct dentry *h_src_dentry;
+	struct au_cp_generic cpg = {
+		.dentry	= src_dentry,
+		.bdst	= a->bdst,
+		.bsrc	= a->bsrc,
+		.len	= -1,
+		.pin	= &a->pin,
+		.flags	= AuCpup_DTIME | AuCpup_HOPEN /* | AuCpup_KEEPLINO */
+	};
+
+	di_read_lock_parent(a->src_parent, AuLock_IR);
+	err = au_test_and_cpup_dirs(src_dentry, a->bdst);
+	if (unlikely(err))
+		goto out;
+
+	h_src_dentry = au_h_dptr(src_dentry, a->bsrc);
+	err = au_pin(&a->pin, src_dentry, a->bdst,
+		     au_opt_udba(src_dentry->d_sb),
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	if (unlikely(err))
+		goto out;
+
+	err = au_sio_cpup_simple(&cpg);
+	au_unpin(&a->pin);
+
+out:
+	di_read_unlock(a->src_parent, AuLock_IR);
+	return err;
+}
+
+static int au_cpup_or_link(struct dentry *src_dentry, struct dentry *dentry,
+			   struct au_link_args *a)
+{
+	int err;
+	unsigned char plink;
+	aufs_bindex_t bend;
+	struct dentry *h_src_dentry;
+	struct inode *h_inode, *inode;
+	struct super_block *sb;
+	struct file *h_file;
+
+	plink = 0;
+	h_inode = NULL;
+	sb = src_dentry->d_sb;
+	inode = src_dentry->d_inode;
+	if (au_ibstart(inode) <= a->bdst)
+		h_inode = au_h_iptr(inode, a->bdst);
+	if (!h_inode || !h_inode->i_nlink) {
+		/* copyup src_dentry as the name of dentry. */
+		bend = au_dbend(dentry);
+		if (bend < a->bsrc)
+			au_set_dbend(dentry, a->bsrc);
+		au_set_h_dptr(dentry, a->bsrc,
+			      dget(au_h_dptr(src_dentry, a->bsrc)));
+		dget(a->h_path.dentry);
+		au_set_h_dptr(dentry, a->bdst, NULL);
+		dentry->d_inode = src_dentry->d_inode; /* tmp */
+		h_file = au_h_open_pre(dentry, a->bsrc);
+		if (IS_ERR(h_file))
+			err = PTR_ERR(h_file);
+		else {
+			struct au_cp_generic cpg = {
+				.dentry	= dentry,
+				.bdst	= a->bdst,
+				.bsrc	= -1,
+				.len	= -1,
+				.pin	= &a->pin,
+				.flags	= AuCpup_KEEPLINO
+			};
+			err = au_sio_cpup_simple(&cpg);
+			au_h_open_post(dentry, a->bsrc, h_file);
+			if (!err) {
+				dput(a->h_path.dentry);
+				a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+			} else
+				au_set_h_dptr(dentry, a->bdst,
+					      a->h_path.dentry);
+		}
+		dentry->d_inode = NULL; /* restore */
+		au_set_h_dptr(dentry, a->bsrc, NULL);
+		au_set_dbend(dentry, bend);
+	} else {
+		/* the inode of src_dentry already exists on a.bdst branch */
+		h_src_dentry = d_find_alias(h_inode);
+		if (!h_src_dentry && au_plink_test(inode)) {
+			plink = 1;
+			h_src_dentry = au_plink_lkup(inode, a->bdst);
+			err = PTR_ERR(h_src_dentry);
+			if (IS_ERR(h_src_dentry))
+				goto out;
+
+			if (unlikely(!h_src_dentry->d_inode)) {
+				dput(h_src_dentry);
+				h_src_dentry = NULL;
+			}
+
+		}
+		if (h_src_dentry) {
+			err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+					 &a->h_path);
+			dput(h_src_dentry);
+		} else {
+			AuIOErr("no dentry found for hi%lu on b%d\n",
+				h_inode->i_ino, a->bdst);
+			err = -EIO;
+		}
+	}
+
+	if (!err && !plink)
+		au_plink_append(inode, a->bdst, a->h_path.dentry);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+	      struct dentry *dentry)
+{
+	int err, rerr;
+	struct au_dtime dt;
+	struct au_link_args *a;
+	struct dentry *wh_dentry, *h_src_dentry;
+	struct inode *inode;
+	struct super_block *sb;
+	struct au_wr_dir_args wr_dir_args = {
+		/* .force_btgt	= -1, */
+		.flags		= AuWrDir_ADD_ENTRY
+	};
+
+	IMustLock(dir);
+	inode = src_dentry->d_inode;
+	IMustLock(inode);
+
+	err = -ENOMEM;
+	a = kzalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	a->parent = dentry->d_parent; /* dir inode is locked */
+	err = aufs_read_and_write_lock2(dentry, src_dentry,
+					AuLock_NOPLM | AuLock_GEN);
+	if (unlikely(err))
+		goto out_kfree;
+	err = au_d_hashed_positive(src_dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	err = au_d_may_add(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+
+	a->src_parent = dget_parent(src_dentry);
+	wr_dir_args.force_btgt = au_ibstart(inode);
+
+	di_write_lock_parent(a->parent);
+	wr_dir_args.force_btgt = au_wbr(dentry, wr_dir_args.force_btgt);
+	wh_dentry = lock_hdir_lkup_wh(dentry, &dt, src_dentry, &a->pin,
+				      &wr_dir_args);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	err = 0;
+	sb = dentry->d_sb;
+	a->bdst = au_dbstart(dentry);
+	a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+	a->h_path.mnt = au_sbr_mnt(sb, a->bdst);
+	a->bsrc = au_ibstart(inode);
+	h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+	if (!h_src_dentry) {
+		a->bsrc = au_dbstart(src_dentry);
+		h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+		AuDebugOn(!h_src_dentry);
+	} else if (IS_ERR(h_src_dentry))
+		goto out_parent;
+
+	if (au_opt_test(au_mntflags(sb), PLINK)) {
+		if (a->bdst < a->bsrc
+		    /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */)
+			err = au_cpup_or_link(src_dentry, dentry, a);
+		else
+			err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+					 &a->h_path);
+		dput(h_src_dentry);
+	} else {
+		/*
+		 * copyup src_dentry to the branch we process,
+		 * and then link(2) to it.
+		 */
+		dput(h_src_dentry);
+		if (a->bdst < a->bsrc
+		    /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) {
+			au_unpin(&a->pin);
+			di_write_unlock(a->parent);
+			err = au_cpup_before_link(src_dentry, a);
+			di_write_lock_parent(a->parent);
+			if (!err)
+				err = au_pin(&a->pin, dentry, a->bdst,
+					     au_opt_udba(sb),
+					     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+			if (unlikely(err))
+				goto out_wh;
+		}
+		if (!err) {
+			h_src_dentry = au_h_dptr(src_dentry, a->bdst);
+			err = -ENOENT;
+			if (h_src_dentry && h_src_dentry->d_inode)
+				err = vfsub_link(h_src_dentry,
+						 au_pinned_h_dir(&a->pin),
+						 &a->h_path);
+		}
+	}
+	if (unlikely(err))
+		goto out_unpin;
+
+	if (wh_dentry) {
+		a->h_path.dentry = wh_dentry;
+		err = au_wh_unlink_dentry(au_pinned_h_dir(&a->pin), &a->h_path,
+					  dentry);
+		if (unlikely(err))
+			goto out_revert;
+	}
+
+	dir->i_version++;
+	if (au_ibstart(dir) == au_dbstart(dentry))
+		au_cpup_attr_timesizes(dir);
+	inc_nlink(inode);
+	inode->i_ctime = dir->i_ctime;
+	d_instantiate(dentry, au_igrab(inode));
+	if (d_unhashed(a->h_path.dentry))
+		/* some filesystem calls d_drop() */
+		d_drop(dentry);
+	goto out_unpin; /* success */
+
+out_revert:
+	rerr = vfsub_unlink(au_pinned_h_dir(&a->pin), &a->h_path, /*force*/0);
+	if (unlikely(rerr)) {
+		AuIOErr("%.*s reverting failed(%d, %d)\n",
+			AuDLNPair(dentry), err, rerr);
+		err = -EIO;
+	}
+	au_dtime_revert(&dt);
+out_unpin:
+	au_unpin(&a->pin);
+out_wh:
+	dput(wh_dentry);
+out_parent:
+	di_write_unlock(a->parent);
+	dput(a->src_parent);
+out_unlock:
+	if (unlikely(err)) {
+		au_update_dbstart(dentry);
+		d_drop(dentry);
+	}
+	aufs_read_and_write_unlock2(dentry, src_dentry);
+out_kfree:
+	kfree(a);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	int err, rerr;
+	aufs_bindex_t bindex;
+	unsigned char diropq;
+	struct path h_path;
+	struct dentry *wh_dentry, *parent, *opq_dentry;
+	struct mutex *h_mtx;
+	struct super_block *sb;
+	struct {
+		struct au_pin pin;
+		struct au_dtime dt;
+	} *a; /* reduce the stack usage */
+	struct au_wr_dir_args wr_dir_args = {
+		.force_btgt	= -1,
+		.flags		= AuWrDir_ADD_ENTRY | AuWrDir_ISDIR
+	};
+
+	IMustLock(dir);
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+	if (unlikely(err))
+		goto out_free;
+	err = au_d_may_add(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+
+	parent = dentry->d_parent; /* dir inode is locked */
+	di_write_lock_parent(parent);
+	wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+				      &a->pin, &wr_dir_args);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	sb = dentry->d_sb;
+	bindex = au_dbstart(dentry);
+	h_path.dentry = au_h_dptr(dentry, bindex);
+	h_path.mnt = au_sbr_mnt(sb, bindex);
+	err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode);
+	if (unlikely(err))
+		goto out_unpin;
+
+	/* make the dir opaque */
+	diropq = 0;
+	h_mtx = &h_path.dentry->d_inode->i_mutex;
+	if (wh_dentry
+	    || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) {
+		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+		opq_dentry = au_diropq_create(dentry, bindex);
+		mutex_unlock(h_mtx);
+		err = PTR_ERR(opq_dentry);
+		if (IS_ERR(opq_dentry))
+			goto out_dir;
+		dput(opq_dentry);
+		diropq = 1;
+	}
+
+	err = epilog(dir, bindex, wh_dentry, dentry);
+	if (!err) {
+		inc_nlink(dir);
+		goto out_unpin; /* success */
+	}
+
+	/* revert */
+	if (diropq) {
+		AuLabel(revert opq);
+		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+		rerr = au_diropq_remove(dentry, bindex);
+		mutex_unlock(h_mtx);
+		if (rerr) {
+			AuIOErr("%.*s reverting diropq failed(%d, %d)\n",
+				AuDLNPair(dentry), err, rerr);
+			err = -EIO;
+		}
+	}
+
+out_dir:
+	AuLabel(revert dir);
+	rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path);
+	if (rerr) {
+		AuIOErr("%.*s reverting dir failed(%d, %d)\n",
+			AuDLNPair(dentry), err, rerr);
+		err = -EIO;
+	}
+	au_dtime_revert(&a->dt);
+out_unpin:
+	au_unpin(&a->pin);
+	dput(wh_dentry);
+out_parent:
+	di_write_unlock(parent);
+out_unlock:
+	if (unlikely(err)) {
+		au_update_dbstart(dentry);
+		d_drop(dentry);
+	}
+	aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+	kfree(a);
+out:
+	return err;
+}
diff --git a/fs/aufs/i_op_del.c b/fs/aufs/i_op_del.c
new file mode 100644
index 0000000..be50603
--- /dev/null
+++ b/fs/aufs/i_op_del.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operations (del entry)
+ */
+
+#include "aufs.h"
+
+/*
+ * decide if a new whiteout for @dentry is necessary or not.
+ * when it is necessary, prepare the parent dir for the upper branch whose
+ * branch index is @bcpup for creation. the actual creation of the whiteout will
+ * be done by caller.
+ * return value:
+ * 0: wh is unnecessary
+ * plus: wh is necessary
+ * minus: error
+ */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup)
+{
+	int need_wh, err;
+	aufs_bindex_t bstart;
+	struct super_block *sb;
+
+	sb = dentry->d_sb;
+	bstart = au_dbstart(dentry);
+	if (*bcpup < 0) {
+		*bcpup = bstart;
+		if (au_test_ro(sb, bstart, dentry->d_inode)) {
+			err = AuWbrCopyup(au_sbi(sb), dentry);
+			*bcpup = err;
+			if (unlikely(err < 0))
+				goto out;
+		}
+	} else
+		AuDebugOn(bstart < *bcpup
+			  || au_test_ro(sb, *bcpup, dentry->d_inode));
+	AuDbg("bcpup %d, bstart %d\n", *bcpup, bstart);
+
+	if (*bcpup != bstart) {
+		err = au_cpup_dirs(dentry, *bcpup);
+		if (unlikely(err))
+			goto out;
+		need_wh = 1;
+	} else {
+		struct au_dinfo *dinfo, *tmp;
+
+		need_wh = -ENOMEM;
+		dinfo = au_di(dentry);
+		tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+		if (tmp) {
+			au_di_cp(tmp, dinfo);
+			au_di_swap(tmp, dinfo);
+			/* returns the number of positive dentries */
+			need_wh = au_lkup_dentry(dentry, bstart + 1, /*type*/0);
+			au_di_swap(tmp, dinfo);
+			au_rw_write_unlock(&tmp->di_rwsem);
+			au_di_free(tmp);
+		}
+	}
+	AuDbg("need_wh %d\n", need_wh);
+	err = need_wh;
+
+out:
+	return err;
+}
+
+/*
+ * simple tests for the del-entry operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+	       struct dentry *h_parent, int isdir)
+{
+	int err;
+	umode_t h_mode;
+	struct dentry *h_dentry, *h_latest;
+	struct inode *h_inode;
+
+	h_dentry = au_h_dptr(dentry, bindex);
+	h_inode = h_dentry->d_inode;
+	if (dentry->d_inode) {
+		err = -ENOENT;
+		if (unlikely(!h_inode || !h_inode->i_nlink))
+			goto out;
+
+		h_mode = h_inode->i_mode;
+		if (!isdir) {
+			err = -EISDIR;
+			if (unlikely(S_ISDIR(h_mode)))
+				goto out;
+		} else if (unlikely(!S_ISDIR(h_mode))) {
+			err = -ENOTDIR;
+			goto out;
+		}
+	} else {
+		/* rename(2) case */
+		err = -EIO;
+		if (unlikely(h_inode))
+			goto out;
+	}
+
+	err = -ENOENT;
+	/* expected parent dir is locked */
+	if (unlikely(h_parent != h_dentry->d_parent))
+		goto out;
+	err = 0;
+
+	/*
+	 * rmdir a dir may break the consistency on some filesystem.
+	 * let's try heavy test.
+	 */
+	err = -EACCES;
+	if (unlikely(au_test_h_perm(h_parent->d_inode, MAY_EXEC | MAY_WRITE)))
+		goto out;
+
+	h_latest = au_sio_lkup_one(&dentry->d_name, h_parent,
+				   au_sbr(dentry->d_sb, bindex));
+	err = -EIO;
+	if (IS_ERR(h_latest))
+		goto out;
+	if (h_latest == h_dentry)
+		err = 0;
+	dput(h_latest);
+
+out:
+	return err;
+}
+
+/*
+ * decide the branch where we operate for @dentry. the branch index will be set
+ * @rbcpup. after diciding it, 'pin' it and store the timestamps of the parent
+ * dir for reverting.
+ * when a new whiteout is necessary, create it.
+ */
+static struct dentry*
+lock_hdir_create_wh(struct dentry *dentry, int isdir, aufs_bindex_t *rbcpup,
+		    struct au_dtime *dt, struct au_pin *pin)
+{
+	struct dentry *wh_dentry;
+	struct super_block *sb;
+	struct path h_path;
+	int err, need_wh;
+	unsigned int udba;
+	aufs_bindex_t bcpup;
+
+	need_wh = au_wr_dir_need_wh(dentry, isdir, rbcpup);
+	wh_dentry = ERR_PTR(need_wh);
+	if (unlikely(need_wh < 0))
+		goto out;
+
+	sb = dentry->d_sb;
+	udba = au_opt_udba(sb);
+	bcpup = *rbcpup;
+	err = au_pin(pin, dentry, bcpup, udba,
+		     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+	wh_dentry = ERR_PTR(err);
+	if (unlikely(err))
+		goto out;
+
+	h_path.dentry = au_pinned_h_parent(pin);
+	if (udba != AuOpt_UDBA_NONE
+	    && au_dbstart(dentry) == bcpup) {
+		err = au_may_del(dentry, bcpup, h_path.dentry, isdir);
+		wh_dentry = ERR_PTR(err);
+		if (unlikely(err))
+			goto out_unpin;
+	}
+
+	h_path.mnt = au_sbr_mnt(sb, bcpup);
+	au_dtime_store(dt, au_pinned_parent(pin), &h_path);
+	wh_dentry = NULL;
+	if (!need_wh)
+		goto out; /* success, no need to create whiteout */
+
+	wh_dentry = au_wh_create(dentry, bcpup, h_path.dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_unpin;
+
+	/* returns with the parent is locked and wh_dentry is dget-ed */
+	goto out; /* success */
+
+out_unpin:
+	au_unpin(pin);
+out:
+	return wh_dentry;
+}
+
+/*
+ * when removing a dir, rename it to a unique temporary whiteout-ed name first
+ * in order to be revertible and save time for removing many child whiteouts
+ * under the dir.
+ * returns 1 when there are too many child whiteout and caller should remove
+ * them asynchronously. returns 0 when the number of children is enough small to
+ * remove now or the branch fs is a remote fs.
+ * otherwise return an error.
+ */
+static int renwh_and_rmdir(struct dentry *dentry, aufs_bindex_t bindex,
+			   struct au_nhash *whlist, struct inode *dir)
+{
+	int rmdir_later, err, dirwh;
+	struct dentry *h_dentry;
+	struct super_block *sb;
+
+	sb = dentry->d_sb;
+	SiMustAnyLock(sb);
+	h_dentry = au_h_dptr(dentry, bindex);
+	err = au_whtmp_ren(h_dentry, au_sbr(sb, bindex));
+	if (unlikely(err))
+		goto out;
+
+	/* stop monitoring */
+	au_hn_free(au_hi(dentry->d_inode, bindex));
+
+	if (!au_test_fs_remote(h_dentry->d_sb)) {
+		dirwh = au_sbi(sb)->si_dirwh;
+		rmdir_later = (dirwh <= 1);
+		if (!rmdir_later)
+			rmdir_later = au_nhash_test_longer_wh(whlist, bindex,
+							      dirwh);
+		if (rmdir_later)
+			return rmdir_later;
+	}
+
+	err = au_whtmp_rmdir(dir, bindex, h_dentry, whlist);
+	if (unlikely(err)) {
+		AuIOErr("rmdir %.*s, b%d failed, %d. ignored\n",
+			AuDLNPair(h_dentry), bindex, err);
+		err = 0;
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * final procedure for deleting a entry.
+ * maintain dentry and iattr.
+ */
+static void epilog(struct inode *dir, struct dentry *dentry,
+		   aufs_bindex_t bindex)
+{
+	struct inode *inode;
+
+	inode = dentry->d_inode;
+	d_drop(dentry);
+	inode->i_ctime = dir->i_ctime;
+
+	if (au_ibstart(dir) == bindex)
+		au_cpup_attr_timesizes(dir);
+	dir->i_version++;
+}
+
+/*
+ * when an error happened, remove the created whiteout and revert everything.
+ */
+static int do_revert(int err, struct inode *dir, aufs_bindex_t bindex,
+		     aufs_bindex_t bwh, struct dentry *wh_dentry,
+		     struct dentry *dentry, struct au_dtime *dt)
+{
+	int rerr;
+	struct path h_path = {
+		.dentry	= wh_dentry,
+		.mnt	= au_sbr_mnt(dir->i_sb, bindex)
+	};
+
+	rerr = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, dentry);
+	if (!rerr) {
+		au_set_dbwh(dentry, bwh);
+		au_dtime_revert(dt);
+		return 0;
+	}
+
+	AuIOErr("%.*s reverting whiteout failed(%d, %d)\n",
+		AuDLNPair(dentry), err, rerr);
+	return -EIO;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t bwh, bindex, bstart;
+	struct inode *inode, *h_dir;
+	struct dentry *parent, *wh_dentry;
+	/* to reuduce stack size */
+	struct {
+		struct au_dtime dt;
+		struct au_pin pin;
+		struct path h_path;
+	} *a;
+
+	IMustLock(dir);
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+	if (unlikely(err))
+		goto out_free;
+	err = au_d_hashed_positive(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	inode = dentry->d_inode;
+	IMustLock(inode);
+	err = -EISDIR;
+	if (unlikely(S_ISDIR(inode->i_mode)))
+		goto out_unlock; /* possible? */
+
+	bstart = au_dbstart(dentry);
+	bwh = au_dbwh(dentry);
+	bindex = -1;
+	parent = dentry->d_parent; /* dir inode is locked */
+	di_write_lock_parent(parent);
+	wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &a->dt,
+					&a->pin);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	a->h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
+	a->h_path.dentry = au_h_dptr(dentry, bstart);
+	dget(a->h_path.dentry);
+	if (bindex == bstart) {
+		h_dir = au_pinned_h_dir(&a->pin);
+		err = vfsub_unlink(h_dir, &a->h_path, /*force*/0);
+	} else {
+		/* dir inode is locked */
+		h_dir = wh_dentry->d_parent->d_inode;
+		IMustLock(h_dir);
+		err = 0;
+	}
+
+	if (!err) {
+		vfsub_drop_nlink(inode);
+		epilog(dir, dentry, bindex);
+
+		/* update target timestamps */
+		if (bindex == bstart) {
+			vfsub_update_h_iattr(&a->h_path, /*did*/NULL);
+			/*ignore*/
+			inode->i_ctime = a->h_path.dentry->d_inode->i_ctime;
+		} else
+			/* todo: this timestamp may be reverted later */
+			inode->i_ctime = h_dir->i_ctime;
+		goto out_unpin; /* success */
+	}
+
+	/* revert */
+	if (wh_dentry) {
+		int rerr;
+
+		rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+				 &a->dt);
+		if (rerr)
+			err = rerr;
+	}
+
+out_unpin:
+	au_unpin(&a->pin);
+	dput(wh_dentry);
+	dput(a->h_path.dentry);
+out_parent:
+	di_write_unlock(parent);
+out_unlock:
+	aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+	kfree(a);
+out:
+	return err;
+}
+
+int aufs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	int err, rmdir_later;
+	aufs_bindex_t bwh, bindex, bstart;
+	struct inode *inode;
+	struct dentry *parent, *wh_dentry, *h_dentry;
+	struct au_whtmp_rmdir *args;
+	/* to reuduce stack size */
+	struct {
+		struct au_dtime dt;
+		struct au_pin pin;
+	} *a;
+
+	IMustLock(dir);
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
+	if (unlikely(err))
+		goto out_free;
+	err = au_alive_dir(dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	inode = dentry->d_inode;
+	IMustLock(inode);
+	err = -ENOTDIR;
+	if (unlikely(!S_ISDIR(inode->i_mode)))
+		goto out_unlock; /* possible? */
+
+	err = -ENOMEM;
+	args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS);
+	if (unlikely(!args))
+		goto out_unlock;
+
+	parent = dentry->d_parent; /* dir inode is locked */
+	di_write_lock_parent(parent);
+	err = au_test_empty(dentry, &args->whlist);
+	if (unlikely(err))
+		goto out_parent;
+
+	bstart = au_dbstart(dentry);
+	bwh = au_dbwh(dentry);
+	bindex = -1;
+	wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &a->dt,
+					&a->pin);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out_parent;
+
+	h_dentry = au_h_dptr(dentry, bstart);
+	dget(h_dentry);
+	rmdir_later = 0;
+	if (bindex == bstart) {
+		err = renwh_and_rmdir(dentry, bstart, &args->whlist, dir);
+		if (err > 0) {
+			rmdir_later = err;
+			err = 0;
+		}
+	} else {
+		/* stop monitoring */
+		au_hn_free(au_hi(inode, bstart));
+
+		/* dir inode is locked */
+		IMustLock(wh_dentry->d_parent->d_inode);
+		err = 0;
+	}
+
+	if (!err) {
+		vfsub_dead_dir(inode);
+		au_set_dbdiropq(dentry, -1);
+		epilog(dir, dentry, bindex);
+
+		if (rmdir_later) {
+			au_whtmp_kick_rmdir(dir, bstart, h_dentry, args);
+			args = NULL;
+		}
+
+		goto out_unpin; /* success */
+	}
+
+	/* revert */
+	AuLabel(revert);
+	if (wh_dentry) {
+		int rerr;
+
+		rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+				 &a->dt);
+		if (rerr)
+			err = rerr;
+	}
+
+out_unpin:
+	au_unpin(&a->pin);
+	dput(wh_dentry);
+	dput(h_dentry);
+out_parent:
+	di_write_unlock(parent);
+	if (args)
+		au_whtmp_rmdir_free(args);
+out_unlock:
+	aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+	kfree(a);
+out:
+	AuTraceErr(err);
+	return err;
+}
diff --git a/fs/aufs/i_op_ren.c b/fs/aufs/i_op_ren.c
new file mode 100644
index 0000000..5d8ea76
--- /dev/null
+++ b/fs/aufs/i_op_ren.c
@@ -0,0 +1,1009 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operation (rename entry)
+ * todo: this is crazy monster
+ */
+
+#include "aufs.h"
+
+enum { AuSRC, AuDST, AuSrcDst };
+enum { AuPARENT, AuCHILD, AuParentChild };
+
+#define AuRen_ISDIR	1
+#define AuRen_ISSAMEDIR	(1 << 1)
+#define AuRen_WHSRC	(1 << 2)
+#define AuRen_WHDST	(1 << 3)
+#define AuRen_MNT_WRITE	(1 << 4)
+#define AuRen_DT_DSTDIR	(1 << 5)
+#define AuRen_DIROPQ	(1 << 6)
+#define AuRen_CPUP	(1 << 7)
+#define au_ftest_ren(flags, name)	((flags) & AuRen_##name)
+#define au_fset_ren(flags, name) \
+	do { (flags) |= AuRen_##name; } while (0)
+#define au_fclr_ren(flags, name) \
+	do { (flags) &= ~AuRen_##name; } while (0)
+
+struct au_ren_args {
+	struct {
+		struct dentry *dentry, *h_dentry, *parent, *h_parent,
+			*wh_dentry;
+		struct inode *dir, *inode;
+		struct au_hinode *hdir;
+		struct au_dtime dt[AuParentChild];
+		aufs_bindex_t bstart;
+	} sd[AuSrcDst];
+
+#define src_dentry	sd[AuSRC].dentry
+#define src_dir		sd[AuSRC].dir
+#define src_inode	sd[AuSRC].inode
+#define src_h_dentry	sd[AuSRC].h_dentry
+#define src_parent	sd[AuSRC].parent
+#define src_h_parent	sd[AuSRC].h_parent
+#define src_wh_dentry	sd[AuSRC].wh_dentry
+#define src_hdir	sd[AuSRC].hdir
+#define src_h_dir	sd[AuSRC].hdir->hi_inode
+#define src_dt		sd[AuSRC].dt
+#define src_bstart	sd[AuSRC].bstart
+
+#define dst_dentry	sd[AuDST].dentry
+#define dst_dir		sd[AuDST].dir
+#define dst_inode	sd[AuDST].inode
+#define dst_h_dentry	sd[AuDST].h_dentry
+#define dst_parent	sd[AuDST].parent
+#define dst_h_parent	sd[AuDST].h_parent
+#define dst_wh_dentry	sd[AuDST].wh_dentry
+#define dst_hdir	sd[AuDST].hdir
+#define dst_h_dir	sd[AuDST].hdir->hi_inode
+#define dst_dt		sd[AuDST].dt
+#define dst_bstart	sd[AuDST].bstart
+
+	struct dentry *h_trap;
+	struct au_branch *br;
+	struct au_hinode *src_hinode;
+	struct path h_path;
+	struct au_nhash whlist;
+	aufs_bindex_t btgt, src_bwh, src_bdiropq;
+
+	unsigned int flags;
+
+	struct au_whtmp_rmdir *thargs;
+	struct dentry *h_dst;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * functions for reverting.
+ * when an error happened in a single rename systemcall, we should revert
+ * everything as if nothing happend.
+ * we don't need to revert the copied-up/down the parent dir since they are
+ * harmless.
+ */
+
+#define RevertFailure(fmt, ...) do { \
+	AuIOErr("revert failure: " fmt " (%d, %d)\n", \
+		##__VA_ARGS__, err, rerr); \
+	err = -EIO; \
+} while (0)
+
+static void au_ren_rev_diropq(int err, struct au_ren_args *a)
+{
+	int rerr;
+
+	au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
+	rerr = au_diropq_remove(a->src_dentry, a->btgt);
+	au_hn_imtx_unlock(a->src_hinode);
+	au_set_dbdiropq(a->src_dentry, a->src_bdiropq);
+	if (rerr)
+		RevertFailure("remove diropq %.*s", AuDLNPair(a->src_dentry));
+}
+
+static void au_ren_rev_rename(int err, struct au_ren_args *a)
+{
+	int rerr;
+
+	a->h_path.dentry = vfsub_lkup_one(&a->src_dentry->d_name,
+					  a->src_h_parent);
+	rerr = PTR_ERR(a->h_path.dentry);
+	if (IS_ERR(a->h_path.dentry)) {
+		RevertFailure("lkup one %.*s", AuDLNPair(a->src_dentry));
+		return;
+	}
+
+	rerr = vfsub_rename(a->dst_h_dir,
+			    au_h_dptr(a->src_dentry, a->btgt),
+			    a->src_h_dir, &a->h_path);
+	d_drop(a->h_path.dentry);
+	dput(a->h_path.dentry);
+	/* au_set_h_dptr(a->src_dentry, a->btgt, NULL); */
+	if (rerr)
+		RevertFailure("rename %.*s", AuDLNPair(a->src_dentry));
+}
+
+static void au_ren_rev_cpup(int err, struct au_ren_args *a)
+{
+	int rerr;
+
+	a->h_path.dentry = a->dst_h_dentry;
+	rerr = vfsub_unlink(a->dst_h_dir, &a->h_path, /*force*/0);
+	au_set_h_dptr(a->src_dentry, a->btgt, NULL);
+	au_set_dbstart(a->src_dentry, a->src_bstart);
+	if (rerr)
+		RevertFailure("unlink %.*s", AuDLNPair(a->dst_h_dentry));
+}
+
+static void au_ren_rev_whtmp(int err, struct au_ren_args *a)
+{
+	int rerr;
+
+	a->h_path.dentry = vfsub_lkup_one(&a->dst_dentry->d_name,
+					  a->dst_h_parent);
+	rerr = PTR_ERR(a->h_path.dentry);
+	if (IS_ERR(a->h_path.dentry)) {
+		RevertFailure("lkup one %.*s", AuDLNPair(a->dst_dentry));
+		return;
+	}
+	if (a->h_path.dentry->d_inode) {
+		d_drop(a->h_path.dentry);
+		dput(a->h_path.dentry);
+		return;
+	}
+
+	rerr = vfsub_rename(a->dst_h_dir, a->h_dst, a->dst_h_dir, &a->h_path);
+	d_drop(a->h_path.dentry);
+	dput(a->h_path.dentry);
+	if (!rerr)
+		au_set_h_dptr(a->dst_dentry, a->btgt, dget(a->h_dst));
+	else
+		RevertFailure("rename %.*s", AuDLNPair(a->h_dst));
+}
+
+static void au_ren_rev_whsrc(int err, struct au_ren_args *a)
+{
+	int rerr;
+
+	a->h_path.dentry = a->src_wh_dentry;
+	rerr = au_wh_unlink_dentry(a->src_h_dir, &a->h_path, a->src_dentry);
+	au_set_dbwh(a->src_dentry, a->src_bwh);
+	if (rerr)
+		RevertFailure("unlink %.*s", AuDLNPair(a->src_wh_dentry));
+}
+#undef RevertFailure
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * when we have to copyup the renaming entry, do it with the rename-target name
+ * in order to minimize the cost (the later actual rename is unnecessary).
+ * otherwise rename it on the target branch.
+ */
+static int au_ren_or_cpup(struct au_ren_args *a)
+{
+	int err;
+	struct dentry *d;
+
+	d = a->src_dentry;
+	if (au_dbstart(d) == a->btgt) {
+		a->h_path.dentry = a->dst_h_dentry;
+		if (au_ftest_ren(a->flags, DIROPQ)
+		    && au_dbdiropq(d) == a->btgt)
+			au_fclr_ren(a->flags, DIROPQ);
+		AuDebugOn(au_dbstart(d) != a->btgt);
+		err = vfsub_rename(a->src_h_dir, au_h_dptr(d, a->btgt),
+				   a->dst_h_dir, &a->h_path);
+	} else
+		BUG();
+
+	if (!err && a->h_dst)
+		/* it will be set to dinfo later */
+		dget(a->h_dst);
+
+	return err;
+}
+
+/* cf. aufs_rmdir() */
+static int au_ren_del_whtmp(struct au_ren_args *a)
+{
+	int err;
+	struct inode *dir;
+
+	dir = a->dst_dir;
+	SiMustAnyLock(dir->i_sb);
+	if (!au_nhash_test_longer_wh(&a->whlist, a->btgt,
+				     au_sbi(dir->i_sb)->si_dirwh)
+	    || au_test_fs_remote(a->h_dst->d_sb)) {
+		err = au_whtmp_rmdir(dir, a->btgt, a->h_dst, &a->whlist);
+		if (unlikely(err))
+			pr_warn("failed removing whtmp dir %.*s (%d), "
+				"ignored.\n", AuDLNPair(a->h_dst), err);
+	} else {
+		au_nhash_wh_free(&a->thargs->whlist);
+		a->thargs->whlist = a->whlist;
+		a->whlist.nh_num = 0;
+		au_whtmp_kick_rmdir(dir, a->btgt, a->h_dst, a->thargs);
+		dput(a->h_dst);
+		a->thargs = NULL;
+	}
+
+	return 0;
+}
+
+/* make it 'opaque' dir. */
+static int au_ren_diropq(struct au_ren_args *a)
+{
+	int err;
+	struct dentry *diropq;
+
+	err = 0;
+	a->src_bdiropq = au_dbdiropq(a->src_dentry);
+	a->src_hinode = au_hi(a->src_inode, a->btgt);
+	au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
+	diropq = au_diropq_create(a->src_dentry, a->btgt);
+	au_hn_imtx_unlock(a->src_hinode);
+	if (IS_ERR(diropq))
+		err = PTR_ERR(diropq);
+	dput(diropq);
+
+	return err;
+}
+
+static int do_rename(struct au_ren_args *a)
+{
+	int err;
+	struct dentry *d, *h_d;
+
+	/* prepare workqueue args for asynchronous rmdir */
+	h_d = a->dst_h_dentry;
+	if (au_ftest_ren(a->flags, ISDIR) && h_d->d_inode) {
+		err = -ENOMEM;
+		a->thargs = au_whtmp_rmdir_alloc(a->src_dentry->d_sb, GFP_NOFS);
+		if (unlikely(!a->thargs))
+			goto out;
+		a->h_dst = dget(h_d);
+	}
+
+	/* create whiteout for src_dentry */
+	if (au_ftest_ren(a->flags, WHSRC)) {
+		a->src_bwh = au_dbwh(a->src_dentry);
+		AuDebugOn(a->src_bwh >= 0);
+		a->src_wh_dentry
+			= au_wh_create(a->src_dentry, a->btgt, a->src_h_parent);
+		err = PTR_ERR(a->src_wh_dentry);
+		if (IS_ERR(a->src_wh_dentry))
+			goto out_thargs;
+	}
+
+	/* lookup whiteout for dentry */
+	if (au_ftest_ren(a->flags, WHDST)) {
+		h_d = au_wh_lkup(a->dst_h_parent, &a->dst_dentry->d_name,
+				 a->br);
+		err = PTR_ERR(h_d);
+		if (IS_ERR(h_d))
+			goto out_whsrc;
+		if (!h_d->d_inode)
+			dput(h_d);
+		else
+			a->dst_wh_dentry = h_d;
+	}
+
+	/* rename dentry to tmpwh */
+	if (a->thargs) {
+		err = au_whtmp_ren(a->dst_h_dentry, a->br);
+		if (unlikely(err))
+			goto out_whdst;
+
+		d = a->dst_dentry;
+		au_set_h_dptr(d, a->btgt, NULL);
+		err = au_lkup_neg(d, a->btgt, /*wh*/0);
+		if (unlikely(err))
+			goto out_whtmp;
+		a->dst_h_dentry = au_h_dptr(d, a->btgt);
+	}
+
+	BUG_ON(a->dst_h_dentry->d_inode && a->src_bstart != a->btgt);
+
+	/* rename by vfs_rename or cpup */
+	d = a->dst_dentry;
+	if (au_ftest_ren(a->flags, ISDIR)
+	    && (a->dst_wh_dentry
+		|| au_dbdiropq(d) == a->btgt
+		/* hide the lower to keep xino */
+		|| a->btgt < au_dbend(d)
+		|| au_opt_test(au_mntflags(d->d_sb), ALWAYS_DIROPQ)))
+		au_fset_ren(a->flags, DIROPQ);
+	err = au_ren_or_cpup(a);
+	if (unlikely(err))
+		/* leave the copied-up one */
+		goto out_whtmp;
+
+	/* make dir opaque */
+	if (au_ftest_ren(a->flags, DIROPQ)) {
+		err = au_ren_diropq(a);
+		if (unlikely(err))
+			goto out_rename;
+	}
+
+	/* update target timestamps */
+	AuDebugOn(au_dbstart(a->src_dentry) != a->btgt);
+	a->h_path.dentry = au_h_dptr(a->src_dentry, a->btgt);
+	vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/
+	a->src_inode->i_ctime = a->h_path.dentry->d_inode->i_ctime;
+
+	/* remove whiteout for dentry */
+	if (a->dst_wh_dentry) {
+		a->h_path.dentry = a->dst_wh_dentry;
+		err = au_wh_unlink_dentry(a->dst_h_dir, &a->h_path,
+					  a->dst_dentry);
+		if (unlikely(err))
+			goto out_diropq;
+	}
+
+	/* remove whtmp */
+	if (a->thargs)
+		au_ren_del_whtmp(a); /* ignore this error */
+
+	err = 0;
+	goto out_success;
+
+out_diropq:
+	if (au_ftest_ren(a->flags, DIROPQ))
+		au_ren_rev_diropq(err, a);
+out_rename:
+	if (!au_ftest_ren(a->flags, CPUP))
+		au_ren_rev_rename(err, a);
+	else
+		au_ren_rev_cpup(err, a);
+	dput(a->h_dst);
+out_whtmp:
+	if (a->thargs)
+		au_ren_rev_whtmp(err, a);
+out_whdst:
+	dput(a->dst_wh_dentry);
+	a->dst_wh_dentry = NULL;
+out_whsrc:
+	if (a->src_wh_dentry)
+		au_ren_rev_whsrc(err, a);
+out_success:
+	dput(a->src_wh_dentry);
+	dput(a->dst_wh_dentry);
+out_thargs:
+	if (a->thargs) {
+		dput(a->h_dst);
+		au_whtmp_rmdir_free(a->thargs);
+		a->thargs = NULL;
+	}
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if @dentry dir can be rename destination or not.
+ * success means, it is a logically empty dir.
+ */
+static int may_rename_dstdir(struct dentry *dentry, struct au_nhash *whlist)
+{
+	return au_test_empty(dentry, whlist);
+}
+
+/*
+ * test if @dentry dir can be rename source or not.
+ * if it can, return 0 and @children is filled.
+ * success means,
+ * - it is a logically empty dir.
+ * - or, it exists on writable branch and has no children including whiteouts
+ *       on the lower branch.
+ */
+static int may_rename_srcdir(struct dentry *dentry, aufs_bindex_t btgt)
+{
+	int err;
+	unsigned int rdhash;
+	aufs_bindex_t bstart;
+
+	bstart = au_dbstart(dentry);
+	if (bstart != btgt) {
+		struct au_nhash whlist;
+
+		SiMustAnyLock(dentry->d_sb);
+		rdhash = au_sbi(dentry->d_sb)->si_rdhash;
+		if (!rdhash)
+			rdhash = au_rdhash_est(au_dir_size(/*file*/NULL,
+							   dentry));
+		err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+		if (unlikely(err))
+			goto out;
+		err = au_test_empty(dentry, &whlist);
+		au_nhash_wh_free(&whlist);
+		goto out;
+	}
+
+	if (bstart == au_dbtaildir(dentry))
+		return 0; /* success */
+
+	err = au_test_empty_lower(dentry);
+
+out:
+	if (err == -ENOTEMPTY) {
+		AuWarn1("renaming dir who has child(ren) on multiple branches,"
+			" is not supported\n");
+		err = -EXDEV;
+	}
+	return err;
+}
+
+/* side effect: sets whlist and h_dentry */
+static int au_ren_may_dir(struct au_ren_args *a)
+{
+	int err;
+	unsigned int rdhash;
+	struct dentry *d;
+
+	d = a->dst_dentry;
+	SiMustAnyLock(d->d_sb);
+
+	err = 0;
+	if (au_ftest_ren(a->flags, ISDIR) && a->dst_inode) {
+		rdhash = au_sbi(d->d_sb)->si_rdhash;
+		if (!rdhash)
+			rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, d));
+		err = au_nhash_alloc(&a->whlist, rdhash, GFP_NOFS);
+		if (unlikely(err))
+			goto out;
+
+		au_set_dbstart(d, a->dst_bstart);
+		err = may_rename_dstdir(d, &a->whlist);
+		au_set_dbstart(d, a->btgt);
+	}
+	a->dst_h_dentry = au_h_dptr(d, au_dbstart(d));
+	if (unlikely(err))
+		goto out;
+
+	d = a->src_dentry;
+	a->src_h_dentry = au_h_dptr(d, au_dbstart(d));
+	if (au_ftest_ren(a->flags, ISDIR)) {
+		err = may_rename_srcdir(d, a->btgt);
+		if (unlikely(err)) {
+			au_nhash_wh_free(&a->whlist);
+			a->whlist.nh_num = 0;
+		}
+	}
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * simple tests for rename.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+static int au_may_ren(struct au_ren_args *a)
+{
+	int err, isdir;
+	struct inode *h_inode;
+
+	if (a->src_bstart == a->btgt) {
+		err = au_may_del(a->src_dentry, a->btgt, a->src_h_parent,
+				 au_ftest_ren(a->flags, ISDIR));
+		if (unlikely(err))
+			goto out;
+		err = -EINVAL;
+		if (unlikely(a->src_h_dentry == a->h_trap))
+			goto out;
+	}
+
+	err = 0;
+	if (a->dst_bstart != a->btgt)
+		goto out;
+
+	err = -ENOTEMPTY;
+	if (unlikely(a->dst_h_dentry == a->h_trap))
+		goto out;
+
+	err = -EIO;
+	h_inode = a->dst_h_dentry->d_inode;
+	isdir = !!au_ftest_ren(a->flags, ISDIR);
+	if (!a->dst_dentry->d_inode) {
+		if (unlikely(h_inode))
+			goto out;
+		err = au_may_add(a->dst_dentry, a->btgt, a->dst_h_parent,
+				 isdir);
+	} else {
+		if (unlikely(!h_inode || !h_inode->i_nlink))
+			goto out;
+		err = au_may_del(a->dst_dentry, a->btgt, a->dst_h_parent,
+				 isdir);
+		if (unlikely(err))
+			goto out;
+	}
+
+out:
+	if (unlikely(err == -ENOENT || err == -EEXIST))
+		err = -EIO;
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * locking order
+ * (VFS)
+ * - src_dir and dir by lock_rename()
+ * - inode if exitsts
+ * (aufs)
+ * - lock all
+ *   + src_dentry and dentry by aufs_read_and_write_lock2() which calls,
+ *     + si_read_lock
+ *     + di_write_lock2_child()
+ *       + di_write_lock_child()
+ *	   + ii_write_lock_child()
+ *       + di_write_lock_child2()
+ *	   + ii_write_lock_child2()
+ *     + src_parent and parent
+ *       + di_write_lock_parent()
+ *	   + ii_write_lock_parent()
+ *       + di_write_lock_parent2()
+ *	   + ii_write_lock_parent2()
+ *   + lower src_dir and dir by vfsub_lock_rename()
+ *   + verify the every relationships between child and parent. if any
+ *     of them failed, unlock all and return -EBUSY.
+ */
+static void au_ren_unlock(struct au_ren_args *a)
+{
+	vfsub_unlock_rename(a->src_h_parent, a->src_hdir,
+			    a->dst_h_parent, a->dst_hdir);
+	if (au_ftest_ren(a->flags, MNT_WRITE))
+		vfsub_mnt_drop_write(au_br_mnt(a->br));
+}
+
+static int au_ren_lock(struct au_ren_args *a)
+{
+	int err;
+	unsigned int udba;
+
+	err = 0;
+	a->src_h_parent = au_h_dptr(a->src_parent, a->btgt);
+	a->src_hdir = au_hi(a->src_dir, a->btgt);
+	a->dst_h_parent = au_h_dptr(a->dst_parent, a->btgt);
+	a->dst_hdir = au_hi(a->dst_dir, a->btgt);
+
+	err = vfsub_mnt_want_write(au_br_mnt(a->br));
+	if (unlikely(err))
+		goto out;
+	au_fset_ren(a->flags, MNT_WRITE);
+	a->h_trap = vfsub_lock_rename(a->src_h_parent, a->src_hdir,
+				      a->dst_h_parent, a->dst_hdir);
+	udba = au_opt_udba(a->src_dentry->d_sb);
+	if (unlikely(a->src_hdir->hi_inode != a->src_h_parent->d_inode
+		     || a->dst_hdir->hi_inode != a->dst_h_parent->d_inode))
+		err = au_busy_or_stale();
+	if (!err && au_dbstart(a->src_dentry) == a->btgt)
+		err = au_h_verify(a->src_h_dentry, udba,
+				  a->src_h_parent->d_inode, a->src_h_parent,
+				  a->br);
+	if (!err && au_dbstart(a->dst_dentry) == a->btgt)
+		err = au_h_verify(a->dst_h_dentry, udba,
+				  a->dst_h_parent->d_inode, a->dst_h_parent,
+				  a->br);
+	if (!err)
+		goto out; /* success */
+
+	err = au_busy_or_stale();
+	au_ren_unlock(a);
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_ren_refresh_dir(struct au_ren_args *a)
+{
+	struct inode *dir;
+
+	dir = a->dst_dir;
+	dir->i_version++;
+	if (au_ftest_ren(a->flags, ISDIR)) {
+		/* is this updating defined in POSIX? */
+		au_cpup_attr_timesizes(a->src_inode);
+		au_cpup_attr_nlink(dir, /*force*/1);
+	}
+
+	if (au_ibstart(dir) == a->btgt)
+		au_cpup_attr_timesizes(dir);
+
+	if (au_ftest_ren(a->flags, ISSAMEDIR))
+		return;
+
+	dir = a->src_dir;
+	dir->i_version++;
+	if (au_ftest_ren(a->flags, ISDIR))
+		au_cpup_attr_nlink(dir, /*force*/1);
+	if (au_ibstart(dir) == a->btgt)
+		au_cpup_attr_timesizes(dir);
+}
+
+static void au_ren_refresh(struct au_ren_args *a)
+{
+	aufs_bindex_t bend, bindex;
+	struct dentry *d, *h_d;
+	struct inode *i, *h_i;
+	struct super_block *sb;
+
+	d = a->dst_dentry;
+	d_drop(d);
+	if (a->h_dst)
+		/* already dget-ed by au_ren_or_cpup() */
+		au_set_h_dptr(d, a->btgt, a->h_dst);
+
+	i = a->dst_inode;
+	if (i) {
+		if (!au_ftest_ren(a->flags, ISDIR))
+			vfsub_drop_nlink(i);
+		else {
+			vfsub_dead_dir(i);
+			au_cpup_attr_timesizes(i);
+		}
+		au_update_dbrange(d, /*do_put_zero*/1);
+	} else {
+		bend = a->btgt;
+		for (bindex = au_dbstart(d); bindex < bend; bindex++)
+			au_set_h_dptr(d, bindex, NULL);
+		bend = au_dbend(d);
+		for (bindex = a->btgt + 1; bindex <= bend; bindex++)
+			au_set_h_dptr(d, bindex, NULL);
+		au_update_dbrange(d, /*do_put_zero*/0);
+	}
+
+	d = a->src_dentry;
+	au_set_dbwh(d, -1);
+	bend = au_dbend(d);
+	for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
+		h_d = au_h_dptr(d, bindex);
+		if (h_d)
+			au_set_h_dptr(d, bindex, NULL);
+	}
+	au_set_dbend(d, a->btgt);
+
+	sb = d->d_sb;
+	i = a->src_inode;
+	if (au_opt_test(au_mntflags(sb), PLINK) && au_plink_test(i))
+		return; /* success */
+
+	bend = au_ibend(i);
+	for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
+		h_i = au_h_iptr(i, bindex);
+		if (h_i) {
+			au_xino_write(sb, bindex, h_i->i_ino, /*ino*/0);
+			/* ignore this error */
+			au_set_h_iptr(i, bindex, NULL, 0);
+		}
+	}
+	au_set_ibend(i, a->btgt);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* mainly for link(2) and rename(2) */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt)
+{
+	aufs_bindex_t bdiropq, bwh;
+	struct dentry *parent;
+	struct au_branch *br;
+
+	parent = dentry->d_parent;
+	IMustLock(parent->d_inode); /* dir is locked */
+
+	bdiropq = au_dbdiropq(parent);
+	bwh = au_dbwh(dentry);
+	br = au_sbr(dentry->d_sb, btgt);
+	if (au_br_rdonly(br)
+	    || (0 <= bdiropq && bdiropq < btgt)
+	    || (0 <= bwh && bwh < btgt))
+		btgt = -1;
+
+	AuDbg("btgt %d\n", btgt);
+	return btgt;
+}
+
+/* sets src_bstart, dst_bstart and btgt */
+static int au_ren_wbr(struct au_ren_args *a)
+{
+	int err;
+	struct au_wr_dir_args wr_dir_args = {
+		/* .force_btgt	= -1, */
+		.flags		= AuWrDir_ADD_ENTRY
+	};
+
+	a->src_bstart = au_dbstart(a->src_dentry);
+	a->dst_bstart = au_dbstart(a->dst_dentry);
+	if (au_ftest_ren(a->flags, ISDIR))
+		au_fset_wrdir(wr_dir_args.flags, ISDIR);
+	wr_dir_args.force_btgt = a->src_bstart;
+	if (a->dst_inode && a->dst_bstart < a->src_bstart)
+		wr_dir_args.force_btgt = a->dst_bstart;
+	wr_dir_args.force_btgt = au_wbr(a->dst_dentry, wr_dir_args.force_btgt);
+	err = au_wr_dir(a->dst_dentry, a->src_dentry, &wr_dir_args);
+	a->btgt = err;
+
+	return err;
+}
+
+static void au_ren_dt(struct au_ren_args *a)
+{
+	a->h_path.dentry = a->src_h_parent;
+	au_dtime_store(a->src_dt + AuPARENT, a->src_parent, &a->h_path);
+	if (!au_ftest_ren(a->flags, ISSAMEDIR)) {
+		a->h_path.dentry = a->dst_h_parent;
+		au_dtime_store(a->dst_dt + AuPARENT, a->dst_parent, &a->h_path);
+	}
+
+	au_fclr_ren(a->flags, DT_DSTDIR);
+	if (!au_ftest_ren(a->flags, ISDIR))
+		return;
+
+	a->h_path.dentry = a->src_h_dentry;
+	au_dtime_store(a->src_dt + AuCHILD, a->src_dentry, &a->h_path);
+	if (a->dst_h_dentry->d_inode) {
+		au_fset_ren(a->flags, DT_DSTDIR);
+		a->h_path.dentry = a->dst_h_dentry;
+		au_dtime_store(a->dst_dt + AuCHILD, a->dst_dentry, &a->h_path);
+	}
+}
+
+static void au_ren_rev_dt(int err, struct au_ren_args *a)
+{
+	struct dentry *h_d;
+	struct mutex *h_mtx;
+
+	au_dtime_revert(a->src_dt + AuPARENT);
+	if (!au_ftest_ren(a->flags, ISSAMEDIR))
+		au_dtime_revert(a->dst_dt + AuPARENT);
+
+	if (au_ftest_ren(a->flags, ISDIR) && err != -EIO) {
+		h_d = a->src_dt[AuCHILD].dt_h_path.dentry;
+		h_mtx = &h_d->d_inode->i_mutex;
+		mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+		au_dtime_revert(a->src_dt + AuCHILD);
+		mutex_unlock(h_mtx);
+
+		if (au_ftest_ren(a->flags, DT_DSTDIR)) {
+			h_d = a->dst_dt[AuCHILD].dt_h_path.dentry;
+			h_mtx = &h_d->d_inode->i_mutex;
+			mutex_lock_nested(h_mtx, AuLsc_I_CHILD);
+			au_dtime_revert(a->dst_dt + AuCHILD);
+			mutex_unlock(h_mtx);
+		}
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_rename(struct inode *_src_dir, struct dentry *_src_dentry,
+		struct inode *_dst_dir, struct dentry *_dst_dentry)
+{
+	int err, flags;
+	/* reduce stack space */
+	struct au_ren_args *a;
+
+	AuDbg("%.*s, %.*s\n", AuDLNPair(_src_dentry), AuDLNPair(_dst_dentry));
+	IMustLock(_src_dir);
+	IMustLock(_dst_dir);
+
+	err = -ENOMEM;
+	BUILD_BUG_ON(sizeof(*a) > PAGE_SIZE);
+	a = kzalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	a->src_dir = _src_dir;
+	a->src_dentry = _src_dentry;
+	a->src_inode = a->src_dentry->d_inode;
+	a->src_parent = a->src_dentry->d_parent; /* dir inode is locked */
+	a->dst_dir = _dst_dir;
+	a->dst_dentry = _dst_dentry;
+	a->dst_inode = a->dst_dentry->d_inode;
+	a->dst_parent = a->dst_dentry->d_parent; /* dir inode is locked */
+	if (a->dst_inode) {
+		IMustLock(a->dst_inode);
+		au_igrab(a->dst_inode);
+	}
+
+	err = -ENOTDIR;
+	flags = AuLock_FLUSH | AuLock_NOPLM | AuLock_GEN;
+	if (S_ISDIR(a->src_inode->i_mode)) {
+		au_fset_ren(a->flags, ISDIR);
+		if (unlikely(a->dst_inode && !S_ISDIR(a->dst_inode->i_mode)))
+			goto out_free;
+		err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
+						AuLock_DIR | flags);
+	} else
+		err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry,
+						flags);
+	if (unlikely(err))
+		goto out_free;
+
+	err = au_d_hashed_positive(a->src_dentry);
+	if (unlikely(err))
+		goto out_unlock;
+	err = -ENOENT;
+	if (a->dst_inode) {
+		/*
+		 * If it is a dir, VFS unhash dst_dentry before this
+		 * function. It means we cannot rely upon d_unhashed().
+		 */
+		if (unlikely(!a->dst_inode->i_nlink))
+			goto out_unlock;
+		if (!S_ISDIR(a->dst_inode->i_mode)) {
+			err = au_d_hashed_positive(a->dst_dentry);
+			if (unlikely(err))
+				goto out_unlock;
+		} else if (unlikely(IS_DEADDIR(a->dst_inode)))
+			goto out_unlock;
+	} else if (unlikely(d_unhashed(a->dst_dentry)))
+		goto out_unlock;
+
+	/*
+	 * is it possible?
+	 * yes, it happend (in linux-3.3-rcN) but I don't know why.
+	 * there may exist a problem somewhere else.
+	 */
+	err = -EINVAL;
+	if (unlikely(a->dst_parent->d_inode == a->src_dentry->d_inode))
+		goto out_unlock;
+
+	au_fset_ren(a->flags, ISSAMEDIR); /* temporary */
+	di_write_lock_parent(a->dst_parent);
+
+	/* which branch we process */
+	err = au_ren_wbr(a);
+	if (unlikely(err < 0))
+		goto out_parent;
+	a->br = au_sbr(a->dst_dentry->d_sb, a->btgt);
+	a->h_path.mnt = au_br_mnt(a->br);
+
+	/* are they available to be renamed */
+	err = au_ren_may_dir(a);
+	if (unlikely(err))
+		goto out_children;
+
+	/* prepare the writable parent dir on the same branch */
+	if (a->dst_bstart == a->btgt) {
+		au_fset_ren(a->flags, WHDST);
+	} else {
+		err = au_cpup_dirs(a->dst_dentry, a->btgt);
+		if (unlikely(err))
+			goto out_children;
+	}
+
+	if (a->src_dir != a->dst_dir) {
+		/*
+		 * this temporary unlock is safe,
+		 * because both dir->i_mutex are locked.
+		 */
+		di_write_unlock(a->dst_parent);
+		di_write_lock_parent(a->src_parent);
+		err = au_wr_dir_need_wh(a->src_dentry,
+					au_ftest_ren(a->flags, ISDIR),
+					&a->btgt);
+		di_write_unlock(a->src_parent);
+		di_write_lock2_parent(a->src_parent, a->dst_parent, /*isdir*/1);
+		au_fclr_ren(a->flags, ISSAMEDIR);
+	} else
+		err = au_wr_dir_need_wh(a->src_dentry,
+					au_ftest_ren(a->flags, ISDIR),
+					&a->btgt);
+	if (unlikely(err < 0))
+		goto out_children;
+	if (err)
+		au_fset_ren(a->flags, WHSRC);
+
+	/* cpup src */
+	if (a->src_bstart != a->btgt) {
+		struct au_pin pin;
+
+		err = au_pin(&pin, a->src_dentry, a->btgt,
+			     au_opt_udba(a->src_dentry->d_sb),
+			     AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+		if (!err) {
+			struct au_cp_generic cpg = {
+				.dentry	= a->src_dentry,
+				.bdst	= a->btgt,
+				.bsrc	= a->src_bstart,
+				.len	= -1,
+				.pin	= &pin,
+				.flags	= AuCpup_DTIME | AuCpup_HOPEN
+			};
+			AuDebugOn(au_dbstart(a->src_dentry) != a->src_bstart);
+			err = au_sio_cpup_simple(&cpg);
+			au_unpin(&pin);
+		}
+		if (unlikely(err))
+			goto out_children;
+		a->src_bstart = a->btgt;
+		a->src_h_dentry = au_h_dptr(a->src_dentry, a->btgt);
+		au_fset_ren(a->flags, WHSRC);
+	}
+
+	/* lock them all */
+	err = au_ren_lock(a);
+	if (unlikely(err))
+		/* leave the copied-up one */
+		goto out_children;
+
+	if (!au_opt_test(au_mntflags(a->dst_dir->i_sb), UDBA_NONE))
+		err = au_may_ren(a);
+	else if (unlikely(a->dst_dentry->d_name.len > AUFS_MAX_NAMELEN))
+		err = -ENAMETOOLONG;
+	if (unlikely(err))
+		goto out_hdir;
+
+	/* store timestamps to be revertible */
+	au_ren_dt(a);
+
+	/* here we go */
+	err = do_rename(a);
+	if (unlikely(err))
+		goto out_dt;
+
+	/* update dir attributes */
+	au_ren_refresh_dir(a);
+
+	/* dput/iput all lower dentries */
+	au_ren_refresh(a);
+
+	goto out_hdir; /* success */
+
+out_dt:
+	au_ren_rev_dt(err, a);
+out_hdir:
+	au_ren_unlock(a);
+out_children:
+	au_nhash_wh_free(&a->whlist);
+	if (err && a->dst_inode && a->dst_bstart != a->btgt) {
+		AuDbg("bstart %d, btgt %d\n", a->dst_bstart, a->btgt);
+		au_set_h_dptr(a->dst_dentry, a->btgt, NULL);
+		au_set_dbstart(a->dst_dentry, a->dst_bstart);
+	}
+out_parent:
+	if (!err)
+		d_move(a->src_dentry, a->dst_dentry);
+	else {
+		au_update_dbstart(a->dst_dentry);
+		if (!a->dst_inode)
+			d_drop(a->dst_dentry);
+	}
+	if (au_ftest_ren(a->flags, ISSAMEDIR))
+		di_write_unlock(a->dst_parent);
+	else
+		di_write_unlock2(a->src_parent, a->dst_parent);
+out_unlock:
+	aufs_read_and_write_unlock2(a->dst_dentry, a->src_dentry);
+out_free:
+	iput(a->dst_inode);
+	if (a->thargs)
+		au_whtmp_rmdir_free(a->thargs);
+	kfree(a);
+out:
+	AuTraceErr(err);
+	return err;
+}
diff --git a/fs/aufs/iinfo.c b/fs/aufs/iinfo.c
new file mode 100644
index 0000000..b82ebbf
--- /dev/null
+++ b/fs/aufs/iinfo.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode private data
+ */
+
+#include "aufs.h"
+
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex)
+{
+	struct inode *h_inode;
+
+	IiMustAnyLock(inode);
+
+	h_inode = au_ii(inode)->ii_hinode[0 + bindex].hi_inode;
+	AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+	return h_inode;
+}
+
+/* todo: hard/soft set? */
+void au_hiput(struct au_hinode *hinode)
+{
+	au_hn_free(hinode);
+	dput(hinode->hi_whdentry);
+	iput(hinode->hi_inode);
+}
+
+unsigned int au_hi_flags(struct inode *inode, int isdir)
+{
+	unsigned int flags;
+	const unsigned int mnt_flags = au_mntflags(inode->i_sb);
+
+	flags = 0;
+	if (au_opt_test(mnt_flags, XINO))
+		au_fset_hi(flags, XINO);
+	if (isdir && au_opt_test(mnt_flags, UDBA_HNOTIFY))
+		au_fset_hi(flags, HNOTIFY);
+	return flags;
+}
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+		   struct inode *h_inode, unsigned int flags)
+{
+	struct au_hinode *hinode;
+	struct inode *hi;
+	struct au_iinfo *iinfo = au_ii(inode);
+
+	IiMustWriteLock(inode);
+
+	hinode = iinfo->ii_hinode + bindex;
+	hi = hinode->hi_inode;
+	AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+
+	if (hi)
+		au_hiput(hinode);
+	hinode->hi_inode = h_inode;
+	if (h_inode) {
+		int err;
+		struct super_block *sb = inode->i_sb;
+		struct au_branch *br;
+
+		AuDebugOn(inode->i_mode
+			  && (h_inode->i_mode & S_IFMT)
+			  != (inode->i_mode & S_IFMT));
+		if (bindex == iinfo->ii_bstart)
+			au_cpup_igen(inode, h_inode);
+		br = au_sbr(sb, bindex);
+		hinode->hi_id = br->br_id;
+		if (au_ftest_hi(flags, XINO)) {
+			err = au_xino_write(sb, bindex, h_inode->i_ino,
+					    inode->i_ino);
+			if (unlikely(err))
+				AuIOErr1("failed au_xino_write() %d\n", err);
+		}
+
+		if (au_ftest_hi(flags, HNOTIFY)
+		    && au_br_hnotifyable(br->br_perm)) {
+			err = au_hn_alloc(hinode, inode);
+			if (unlikely(err))
+				AuIOErr1("au_hn_alloc() %d\n", err);
+		}
+	}
+}
+
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+		  struct dentry *h_wh)
+{
+	struct au_hinode *hinode;
+
+	IiMustWriteLock(inode);
+
+	hinode = au_ii(inode)->ii_hinode + bindex;
+	AuDebugOn(hinode->hi_whdentry);
+	hinode->hi_whdentry = h_wh;
+}
+
+void au_update_iigen(struct inode *inode, int half)
+{
+	struct au_iinfo *iinfo;
+	struct au_iigen *iigen;
+	unsigned int sigen;
+
+	sigen = au_sigen(inode->i_sb);
+	iinfo = au_ii(inode);
+	iigen = &iinfo->ii_generation;
+	spin_lock(&iinfo->ii_genspin);
+	iigen->ig_generation = sigen;
+	if (half)
+		au_ig_fset(iigen->ig_flags, HALF_REFRESHED);
+	else
+		au_ig_fclr(iigen->ig_flags, HALF_REFRESHED);
+	spin_unlock(&iinfo->ii_genspin);
+}
+
+/* it may be called at remount time, too */
+void au_update_ibrange(struct inode *inode, int do_put_zero)
+{
+	struct au_iinfo *iinfo;
+	aufs_bindex_t bindex, bend;
+
+	iinfo = au_ii(inode);
+	if (!iinfo)
+		return;
+
+	IiMustWriteLock(inode);
+
+	if (do_put_zero && iinfo->ii_bstart >= 0) {
+		for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
+		     bindex++) {
+			struct inode *h_i;
+
+			h_i = iinfo->ii_hinode[0 + bindex].hi_inode;
+			if (h_i && !h_i->i_nlink)
+				au_set_h_iptr(inode, bindex, NULL, 0);
+		}
+	}
+
+	iinfo->ii_bstart = -1;
+	iinfo->ii_bend = -1;
+	bend = au_sbend(inode->i_sb);
+	for (bindex = 0; bindex <= bend; bindex++)
+		if (iinfo->ii_hinode[0 + bindex].hi_inode) {
+			iinfo->ii_bstart = bindex;
+			break;
+		}
+	if (iinfo->ii_bstart >= 0)
+		for (bindex = bend; bindex >= iinfo->ii_bstart; bindex--)
+			if (iinfo->ii_hinode[0 + bindex].hi_inode) {
+				iinfo->ii_bend = bindex;
+				break;
+			}
+	AuDebugOn(iinfo->ii_bstart > iinfo->ii_bend);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_icntnr_init_once(void *_c)
+{
+	struct au_icntnr *c = _c;
+	struct au_iinfo *iinfo = &c->iinfo;
+	static struct lock_class_key aufs_ii;
+
+	spin_lock_init(&iinfo->ii_genspin);
+	au_rw_init(&iinfo->ii_rwsem);
+	au_rw_class(&iinfo->ii_rwsem, &aufs_ii);
+	inode_init_once(&c->vfs_inode);
+}
+
+int au_iinfo_init(struct inode *inode)
+{
+	struct au_iinfo *iinfo;
+	struct super_block *sb;
+	int nbr, i;
+
+	sb = inode->i_sb;
+	iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+	nbr = au_sbend(sb) + 1;
+	if (unlikely(nbr <= 0))
+		nbr = 1;
+	iinfo->ii_hinode = kcalloc(nbr, sizeof(*iinfo->ii_hinode), GFP_NOFS);
+	if (iinfo->ii_hinode) {
+		au_ninodes_inc(sb);
+		for (i = 0; i < nbr; i++)
+			iinfo->ii_hinode[i].hi_id = -1;
+
+		iinfo->ii_generation.ig_generation = au_sigen(sb);
+		iinfo->ii_bstart = -1;
+		iinfo->ii_bend = -1;
+		iinfo->ii_vdir = NULL;
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+int au_ii_realloc(struct au_iinfo *iinfo, int nbr)
+{
+	int err, sz;
+	struct au_hinode *hip;
+
+	AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+	err = -ENOMEM;
+	sz = sizeof(*hip) * (iinfo->ii_bend + 1);
+	if (!sz)
+		sz = sizeof(*hip);
+	hip = au_kzrealloc(iinfo->ii_hinode, sz, sizeof(*hip) * nbr, GFP_NOFS);
+	if (hip) {
+		iinfo->ii_hinode = hip;
+		err = 0;
+	}
+
+	return err;
+}
+
+void au_iinfo_fin(struct inode *inode)
+{
+	struct au_iinfo *iinfo;
+	struct au_hinode *hi;
+	struct super_block *sb;
+	aufs_bindex_t bindex, bend;
+	const unsigned char unlinked = !inode->i_nlink;
+
+	iinfo = au_ii(inode);
+	/* bad_inode case */
+	if (!iinfo)
+		return;
+
+	sb = inode->i_sb;
+	au_ninodes_dec(sb);
+	if (si_pid_test(sb))
+		au_xino_delete_inode(inode, unlinked);
+	else {
+		/*
+		 * it is safe to hide the dependency between sbinfo and
+		 * sb->s_umount.
+		 */
+		lockdep_off();
+		si_noflush_read_lock(sb);
+		au_xino_delete_inode(inode, unlinked);
+		si_read_unlock(sb);
+		lockdep_on();
+	}
+
+	if (iinfo->ii_vdir)
+		au_vdir_free(iinfo->ii_vdir);
+
+	bindex = iinfo->ii_bstart;
+	if (bindex >= 0) {
+		hi = iinfo->ii_hinode + bindex;
+		bend = iinfo->ii_bend;
+		while (bindex++ <= bend) {
+			if (hi->hi_inode)
+				au_hiput(hi);
+			hi++;
+		}
+	}
+	kfree(iinfo->ii_hinode);
+	iinfo->ii_hinode = NULL;
+	AuRwDestroy(&iinfo->ii_rwsem);
+}
diff --git a/fs/aufs/inode.c b/fs/aufs/inode.c
new file mode 100644
index 0000000..9c86d98
--- /dev/null
+++ b/fs/aufs/inode.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode functions
+ */
+
+#include "aufs.h"
+
+struct inode *au_igrab(struct inode *inode)
+{
+	if (inode) {
+		AuDebugOn(!atomic_read(&inode->i_count));
+		ihold(inode);
+	}
+	return inode;
+}
+
+static void au_refresh_hinode_attr(struct inode *inode, int do_version)
+{
+	au_cpup_attr_all(inode, /*force*/0);
+	au_update_iigen(inode, /*half*/1);
+	if (do_version)
+		inode->i_version++;
+}
+
+static int au_ii_refresh(struct inode *inode, int *update)
+{
+	int err, e;
+	umode_t type;
+	aufs_bindex_t bindex, new_bindex;
+	struct super_block *sb;
+	struct au_iinfo *iinfo;
+	struct au_hinode *p, *q, tmp;
+
+	IiMustWriteLock(inode);
+
+	*update = 0;
+	sb = inode->i_sb;
+	type = inode->i_mode & S_IFMT;
+	iinfo = au_ii(inode);
+	err = au_ii_realloc(iinfo, au_sbend(sb) + 1);
+	if (unlikely(err))
+		goto out;
+
+	AuDebugOn(iinfo->ii_bstart < 0);
+	p = iinfo->ii_hinode + iinfo->ii_bstart;
+	for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
+	     bindex++, p++) {
+		if (!p->hi_inode)
+			continue;
+
+		AuDebugOn(type != (p->hi_inode->i_mode & S_IFMT));
+		new_bindex = au_br_index(sb, p->hi_id);
+		if (new_bindex == bindex)
+			continue;
+
+		if (new_bindex < 0) {
+			*update = 1;
+			au_hiput(p);
+			p->hi_inode = NULL;
+			continue;
+		}
+
+		if (new_bindex < iinfo->ii_bstart)
+			iinfo->ii_bstart = new_bindex;
+		if (iinfo->ii_bend < new_bindex)
+			iinfo->ii_bend = new_bindex;
+		/* swap two lower inode, and loop again */
+		q = iinfo->ii_hinode + new_bindex;
+		tmp = *q;
+		*q = *p;
+		*p = tmp;
+		if (tmp.hi_inode) {
+			bindex--;
+			p--;
+		}
+	}
+	au_update_ibrange(inode, /*do_put_zero*/0);
+	e = au_dy_irefresh(inode);
+	if (unlikely(e && !err))
+		err = e;
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_refresh_hinode_self(struct inode *inode)
+{
+	int err, update;
+
+	err = au_ii_refresh(inode, &update);
+	if (!err)
+		au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode));
+
+	AuTraceErr(err);
+	return err;
+}
+
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry)
+{
+	int err, e, update;
+	unsigned int flags;
+	umode_t mode;
+	aufs_bindex_t bindex, bend;
+	unsigned char isdir;
+	struct au_hinode *p;
+	struct au_iinfo *iinfo;
+
+	err = au_ii_refresh(inode, &update);
+	if (unlikely(err))
+		goto out;
+
+	update = 0;
+	iinfo = au_ii(inode);
+	p = iinfo->ii_hinode + iinfo->ii_bstart;
+	mode = (inode->i_mode & S_IFMT);
+	isdir = S_ISDIR(mode);
+	flags = au_hi_flags(inode, isdir);
+	bend = au_dbend(dentry);
+	for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
+		struct inode *h_i;
+		struct dentry *h_d;
+
+		h_d = au_h_dptr(dentry, bindex);
+		if (!h_d || !h_d->d_inode)
+			continue;
+
+		AuDebugOn(mode != (h_d->d_inode->i_mode & S_IFMT));
+		if (iinfo->ii_bstart <= bindex && bindex <= iinfo->ii_bend) {
+			h_i = au_h_iptr(inode, bindex);
+			if (h_i) {
+				if (h_i == h_d->d_inode)
+					continue;
+				err = -EIO;
+				break;
+			}
+		}
+		if (bindex < iinfo->ii_bstart)
+			iinfo->ii_bstart = bindex;
+		if (iinfo->ii_bend < bindex)
+			iinfo->ii_bend = bindex;
+		au_set_h_iptr(inode, bindex, au_igrab(h_d->d_inode), flags);
+		update = 1;
+	}
+	au_update_ibrange(inode, /*do_put_zero*/0);
+	e = au_dy_irefresh(inode);
+	if (unlikely(e && !err))
+		err = e;
+	if (!err)
+		au_refresh_hinode_attr(inode, update && isdir);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int set_inode(struct inode *inode, struct dentry *dentry)
+{
+	int err;
+	unsigned int flags;
+	umode_t mode;
+	aufs_bindex_t bindex, bstart, btail;
+	unsigned char isdir;
+	struct dentry *h_dentry;
+	struct inode *h_inode;
+	struct au_iinfo *iinfo;
+
+	IiMustWriteLock(inode);
+
+	err = 0;
+	isdir = 0;
+	bstart = au_dbstart(dentry);
+	h_inode = au_h_dptr(dentry, bstart)->d_inode;
+	mode = h_inode->i_mode;
+	switch (mode & S_IFMT) {
+	case S_IFREG:
+		btail = au_dbtail(dentry);
+		inode->i_op = &aufs_iop;
+		inode->i_fop = &aufs_file_fop;
+		err = au_dy_iaop(inode, bstart, h_inode);
+		if (unlikely(err))
+			goto out;
+		break;
+	case S_IFDIR:
+		isdir = 1;
+		btail = au_dbtaildir(dentry);
+		inode->i_op = &aufs_dir_iop;
+		inode->i_fop = &aufs_dir_fop;
+		break;
+	case S_IFLNK:
+		btail = au_dbtail(dentry);
+		inode->i_op = &aufs_symlink_iop;
+		break;
+	case S_IFBLK:
+	case S_IFCHR:
+	case S_IFIFO:
+	case S_IFSOCK:
+		btail = au_dbtail(dentry);
+		inode->i_op = &aufs_iop;
+		au_init_special_fop(inode, mode, h_inode->i_rdev);
+		break;
+	default:
+		AuIOErr("Unknown file type 0%o\n", mode);
+		err = -EIO;
+		goto out;
+	}
+
+	/* do not set hnotify for whiteouted dirs (SHWH mode) */
+	flags = au_hi_flags(inode, isdir);
+	if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)
+	    && au_ftest_hi(flags, HNOTIFY)
+	    && dentry->d_name.len > AUFS_WH_PFX_LEN
+	    && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN))
+		au_fclr_hi(flags, HNOTIFY);
+	iinfo = au_ii(inode);
+	iinfo->ii_bstart = bstart;
+	iinfo->ii_bend = btail;
+	for (bindex = bstart; bindex <= btail; bindex++) {
+		h_dentry = au_h_dptr(dentry, bindex);
+		if (h_dentry)
+			au_set_h_iptr(inode, bindex,
+				      au_igrab(h_dentry->d_inode), flags);
+	}
+	au_cpup_attr_all(inode, /*force*/1);
+
+out:
+	return err;
+}
+
+/*
+ * successful returns with iinfo write_locked
+ * minus: errno
+ * zero: success, matched
+ * plus: no error, but unmatched
+ */
+static int reval_inode(struct inode *inode, struct dentry *dentry)
+{
+	int err;
+	unsigned int gen;
+	struct au_iigen iigen;
+	aufs_bindex_t bindex, bend;
+	struct inode *h_inode, *h_dinode;
+
+	/*
+	 * before this function, if aufs got any iinfo lock, it must be only
+	 * one, the parent dir.
+	 * it can happen by UDBA and the obsoleted inode number.
+	 */
+	err = -EIO;
+	if (unlikely(inode->i_ino == parent_ino(dentry)))
+		goto out;
+
+	err = 1;
+	ii_write_lock_new_child(inode);
+	h_dinode = au_h_dptr(dentry, au_dbstart(dentry))->d_inode;
+	bend = au_ibend(inode);
+	for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
+		h_inode = au_h_iptr(inode, bindex);
+		if (!h_inode || h_inode != h_dinode)
+			continue;
+
+		err = 0;
+		gen = au_iigen(inode, &iigen);
+		if (gen == au_digen(dentry)
+		    && !au_ig_ftest(iigen.ig_flags, HALF_REFRESHED))
+			break;
+
+		/* fully refresh inode using dentry */
+		err = au_refresh_hinode(inode, dentry);
+		if (!err)
+			au_update_iigen(inode, /*half*/0);
+		break;
+	}
+
+	if (unlikely(err))
+		ii_write_unlock(inode);
+out:
+	return err;
+}
+
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+	   unsigned int d_type, ino_t *ino)
+{
+	int err;
+	struct mutex *mtx;
+
+	/* prevent hardlinked inode number from race condition */
+	mtx = NULL;
+	if (d_type != DT_DIR) {
+		mtx = &au_sbr(sb, bindex)->br_xino.xi_nondir_mtx;
+		mutex_lock(mtx);
+	}
+	err = au_xino_read(sb, bindex, h_ino, ino);
+	if (unlikely(err))
+		goto out;
+
+	if (!*ino) {
+		err = -EIO;
+		*ino = au_xino_new_ino(sb);
+		if (unlikely(!*ino))
+			goto out;
+		err = au_xino_write(sb, bindex, h_ino, *ino);
+		if (unlikely(err))
+			goto out;
+	}
+
+out:
+	if (mtx)
+		mutex_unlock(mtx);
+	return err;
+}
+
+/* successful returns with iinfo write_locked */
+/* todo: return with unlocked? */
+struct inode *au_new_inode(struct dentry *dentry, int must_new)
+{
+	struct inode *inode, *h_inode;
+	struct dentry *h_dentry;
+	struct super_block *sb;
+	struct mutex *mtx;
+	ino_t h_ino, ino;
+	int err;
+	aufs_bindex_t bstart;
+
+	sb = dentry->d_sb;
+	bstart = au_dbstart(dentry);
+	h_dentry = au_h_dptr(dentry, bstart);
+	h_inode = h_dentry->d_inode;
+	h_ino = h_inode->i_ino;
+
+	/*
+	 * stop 'race'-ing between hardlinks under different
+	 * parents.
+	 */
+	mtx = NULL;
+	if (!S_ISDIR(h_inode->i_mode))
+		mtx = &au_sbr(sb, bstart)->br_xino.xi_nondir_mtx;
+
+new_ino:
+	if (mtx)
+		mutex_lock(mtx);
+	err = au_xino_read(sb, bstart, h_ino, &ino);
+	inode = ERR_PTR(err);
+	if (unlikely(err))
+		goto out;
+
+	if (!ino) {
+		ino = au_xino_new_ino(sb);
+		if (unlikely(!ino)) {
+			inode = ERR_PTR(-EIO);
+			goto out;
+		}
+	}
+
+	AuDbg("i%lu\n", (unsigned long)ino);
+	inode = au_iget_locked(sb, ino);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out;
+
+	AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW));
+	if (inode->i_state & I_NEW) {
+		/* verbose coding for lock class name */
+		if (unlikely(S_ISLNK(h_inode->i_mode)))
+			au_rw_class(&au_ii(inode)->ii_rwsem,
+				    au_lc_key + AuLcSymlink_IIINFO);
+		else if (unlikely(S_ISDIR(h_inode->i_mode)))
+			au_rw_class(&au_ii(inode)->ii_rwsem,
+				    au_lc_key + AuLcDir_IIINFO);
+		else /* likely */
+			au_rw_class(&au_ii(inode)->ii_rwsem,
+				    au_lc_key + AuLcNonDir_IIINFO);
+
+		ii_write_lock_new_child(inode);
+		err = set_inode(inode, dentry);
+		if (!err) {
+			unlock_new_inode(inode);
+			goto out; /* success */
+		}
+
+		/*
+		 * iget_failed() calls iput(), but we need to call
+		 * ii_write_unlock() after iget_failed(). so dirty hack for
+		 * i_count.
+		 */
+		atomic_inc(&inode->i_count);
+		iget_failed(inode);
+		ii_write_unlock(inode);
+		au_xino_write(sb, bstart, h_ino, /*ino*/0);
+		/* ignore this error */
+		goto out_iput;
+	} else if (!must_new && !IS_DEADDIR(inode) && inode->i_nlink) {
+		/*
+		 * horrible race condition between lookup, readdir and copyup
+		 * (or something).
+		 */
+		if (mtx)
+			mutex_unlock(mtx);
+		err = reval_inode(inode, dentry);
+		if (unlikely(err < 0)) {
+			mtx = NULL;
+			goto out_iput;
+		}
+
+		if (!err) {
+			mtx = NULL;
+			goto out; /* success */
+		} else if (mtx)
+			mutex_lock(mtx);
+	}
+
+	if (unlikely(au_test_fs_unique_ino(h_dentry->d_inode)))
+		AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir,"
+			" b%d, %s, %.*s, hi%lu, i%lu.\n",
+			bstart, au_sbtype(h_dentry->d_sb), AuDLNPair(dentry),
+			(unsigned long)h_ino, (unsigned long)ino);
+	ino = 0;
+	err = au_xino_write(sb, bstart, h_ino, /*ino*/0);
+	if (!err) {
+		iput(inode);
+		if (mtx)
+			mutex_unlock(mtx);
+		goto new_ino;
+	}
+
+out_iput:
+	iput(inode);
+	inode = ERR_PTR(err);
+out:
+	if (mtx)
+		mutex_unlock(mtx);
+	return inode;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+	       struct inode *inode)
+{
+	int err;
+
+	err = au_br_rdonly(au_sbr(sb, bindex));
+
+	/* pseudo-link after flushed may happen out of bounds */
+	if (!err
+	    && inode
+	    && au_ibstart(inode) <= bindex
+	    && bindex <= au_ibend(inode)) {
+		/*
+		 * permission check is unnecessary since vfsub routine
+		 * will be called later
+		 */
+		struct inode *hi = au_h_iptr(inode, bindex);
+		if (hi)
+			err = IS_IMMUTABLE(hi) ? -EROFS : 0;
+	}
+
+	return err;
+}
+
+int au_test_h_perm(struct inode *h_inode, int mask)
+{
+	if (uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
+		return 0;
+	return inode_permission(h_inode, mask);
+}
+
+int au_test_h_perm_sio(struct inode *h_inode, int mask)
+{
+	if (au_test_nfs(h_inode->i_sb)
+	    && (mask & MAY_WRITE)
+	    && S_ISDIR(h_inode->i_mode))
+		mask |= MAY_READ; /* force permission check */
+	return au_test_h_perm(h_inode, mask);
+}
diff --git a/fs/aufs/inode.h b/fs/aufs/inode.h
new file mode 100644
index 0000000..2a87c76
--- /dev/null
+++ b/fs/aufs/inode.h
@@ -0,0 +1,600 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * inode operations
+ */
+
+#ifndef __AUFS_INODE_H__
+#define __AUFS_INODE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fsnotify.h>
+#include "rwsem.h"
+
+struct vfsmount;
+
+struct au_hnotify {
+#ifdef CONFIG_AUFS_HNOTIFY
+#ifdef CONFIG_AUFS_HFSNOTIFY
+	/* never use fsnotify_add_vfsmount_mark() */
+	struct fsnotify_mark		hn_mark;
+#endif
+	struct inode			*hn_aufs_inode;	/* no get/put */
+#endif
+} ____cacheline_aligned_in_smp;
+
+struct au_hinode {
+	struct inode		*hi_inode;
+	aufs_bindex_t		hi_id;
+#ifdef CONFIG_AUFS_HNOTIFY
+	struct au_hnotify	*hi_notify;
+#endif
+
+	/* reference to the copied-up whiteout with get/put */
+	struct dentry		*hi_whdentry;
+};
+
+/* ig_flags */
+#define AuIG_HALF_REFRESHED		1
+#define au_ig_ftest(flags, name)	((flags) & AuIG_##name)
+#define au_ig_fset(flags, name) \
+	do { (flags) |= AuIG_##name; } while (0)
+#define au_ig_fclr(flags, name) \
+	do { (flags) &= ~AuIG_##name; } while (0)
+
+struct au_iigen {
+	__u32		ig_generation, ig_flags;
+};
+
+struct au_vdir;
+struct au_iinfo {
+	spinlock_t		ii_genspin;
+	struct au_iigen		ii_generation;
+	struct super_block	*ii_hsb1;	/* no get/put */
+
+	struct au_rwsem		ii_rwsem;
+	aufs_bindex_t		ii_bstart, ii_bend;
+	__u32			ii_higen;
+	struct au_hinode	*ii_hinode;
+	struct au_vdir		*ii_vdir;
+};
+
+struct au_icntnr {
+	struct au_iinfo iinfo;
+	struct inode vfs_inode;
+} ____cacheline_aligned_in_smp;
+
+/* au_pin flags */
+#define AuPin_DI_LOCKED		1
+#define AuPin_MNT_WRITE		(1 << 1)
+#define au_ftest_pin(flags, name)	((flags) & AuPin_##name)
+#define au_fset_pin(flags, name) \
+	do { (flags) |= AuPin_##name; } while (0)
+#define au_fclr_pin(flags, name) \
+	do { (flags) &= ~AuPin_##name; } while (0)
+
+struct au_pin {
+	/* input */
+	struct dentry *dentry;
+	unsigned int udba;
+	unsigned char lsc_di, lsc_hi, flags;
+	aufs_bindex_t bindex;
+
+	/* output */
+	struct dentry *parent;
+	struct au_hinode *hdir;
+	struct vfsmount *h_mnt;
+
+	/* temporary unlock/relock for copyup */
+	struct dentry *h_dentry, *h_parent;
+	struct au_branch *br;
+	struct task_struct *task;
+};
+
+void au_pin_hdir_unlock(struct au_pin *p);
+int au_pin_hdir_relock(struct au_pin *p);
+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task);
+void au_pin_hdir_acquire_nest(struct au_pin *p);
+void au_pin_hdir_release(struct au_pin *p);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_iinfo *au_ii(struct inode *inode)
+{
+	struct au_iinfo *iinfo;
+
+	iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+	if (iinfo->ii_hinode)
+		return iinfo;
+	return NULL; /* debugging bad_inode case */
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* inode.c */
+struct inode *au_igrab(struct inode *inode);
+int au_refresh_hinode_self(struct inode *inode);
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry);
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+	   unsigned int d_type, ino_t *ino);
+struct inode *au_new_inode(struct dentry *dentry, int must_new);
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+	       struct inode *inode);
+int au_test_h_perm(struct inode *h_inode, int mask);
+int au_test_h_perm_sio(struct inode *h_inode, int mask);
+
+static inline int au_wh_ino(struct super_block *sb, aufs_bindex_t bindex,
+			    ino_t h_ino, unsigned int d_type, ino_t *ino)
+{
+#ifdef CONFIG_AUFS_SHWH
+	return au_ino(sb, bindex, h_ino, d_type, ino);
+#else
+	return 0;
+#endif
+}
+
+/* i_op.c */
+extern struct inode_operations aufs_iop, aufs_symlink_iop, aufs_dir_iop;
+
+/* au_wr_dir flags */
+#define AuWrDir_ADD_ENTRY	1
+#define AuWrDir_TMP_WHENTRY	(1 << 1)
+#define AuWrDir_ISDIR		(1 << 2)
+#define au_ftest_wrdir(flags, name)	((flags) & AuWrDir_##name)
+#define au_fset_wrdir(flags, name) \
+	do { (flags) |= AuWrDir_##name; } while (0)
+#define au_fclr_wrdir(flags, name) \
+	do { (flags) &= ~AuWrDir_##name; } while (0)
+
+struct au_wr_dir_args {
+	aufs_bindex_t force_btgt;
+	unsigned char flags;
+};
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+	      struct au_wr_dir_args *args);
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin);
+void au_pin_init(struct au_pin *pin, struct dentry *dentry,
+		 aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+		 unsigned int udba, unsigned char flags);
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+	   unsigned int udba, unsigned char flags) __must_check;
+int au_do_pin(struct au_pin *pin) __must_check;
+void au_unpin(struct au_pin *pin);
+
+/* i_op_add.c */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+	       struct dentry *h_parent, int isdir);
+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+	       dev_t dev);
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname);
+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+		bool want_excl);
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+	      struct dentry *dentry);
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+
+/* i_op_del.c */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup);
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+	       struct dentry *h_parent, int isdir);
+int aufs_unlink(struct inode *dir, struct dentry *dentry);
+int aufs_rmdir(struct inode *dir, struct dentry *dentry);
+
+/* i_op_ren.c */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt);
+int aufs_rename(struct inode *src_dir, struct dentry *src_dentry,
+		struct inode *dir, struct dentry *dentry);
+
+/* iinfo.c */
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex);
+void au_hiput(struct au_hinode *hinode);
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+		  struct dentry *h_wh);
+unsigned int au_hi_flags(struct inode *inode, int isdir);
+
+/* hinode flags */
+#define AuHi_XINO	1
+#define AuHi_HNOTIFY	(1 << 1)
+#define au_ftest_hi(flags, name)	((flags) & AuHi_##name)
+#define au_fset_hi(flags, name) \
+	do { (flags) |= AuHi_##name; } while (0)
+#define au_fclr_hi(flags, name) \
+	do { (flags) &= ~AuHi_##name; } while (0)
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuHi_HNOTIFY
+#define AuHi_HNOTIFY	0
+#endif
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+		   struct inode *h_inode, unsigned int flags);
+
+void au_update_iigen(struct inode *inode, int half);
+void au_update_ibrange(struct inode *inode, int do_put_zero);
+
+void au_icntnr_init_once(void *_c);
+int au_iinfo_init(struct inode *inode);
+void au_iinfo_fin(struct inode *inode);
+int au_ii_realloc(struct au_iinfo *iinfo, int nbr);
+
+#ifdef CONFIG_PROC_FS
+/* plink.c */
+int au_plink_maint(struct super_block *sb, int flags);
+void au_plink_maint_leave(struct au_sbinfo *sbinfo);
+int au_plink_maint_enter(struct super_block *sb);
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb);
+#else
+AuStubVoid(au_plink_list, struct super_block *sb)
+#endif
+int au_plink_test(struct inode *inode);
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex);
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+		     struct dentry *h_dentry);
+void au_plink_put(struct super_block *sb, int verbose);
+void au_plink_clean(struct super_block *sb, int verbose);
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id);
+#else
+AuStubInt0(au_plink_maint, struct super_block *sb, int flags);
+AuStubVoid(au_plink_maint_leave, struct au_sbinfo *sbinfo);
+AuStubInt0(au_plink_maint_enter, struct super_block *sb);
+AuStubVoid(au_plink_list, struct super_block *sb);
+AuStubInt0(au_plink_test, struct inode *inode);
+AuStub(struct dentry *, au_plink_lkup, return NULL,
+       struct inode *inode, aufs_bindex_t bindex);
+AuStubVoid(au_plink_append, struct inode *inode, aufs_bindex_t bindex,
+	   struct dentry *h_dentry);
+AuStubVoid(au_plink_put, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_clean, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_half_refresh, struct super_block *sb, aufs_bindex_t br_id);
+#endif /* CONFIG_PROC_FS */
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for iinfo */
+enum {
+	AuLsc_II_CHILD,		/* child first */
+	AuLsc_II_CHILD2,	/* rename(2), link(2), and cpup at hnotify */
+	AuLsc_II_CHILD3,	/* copyup dirs */
+	AuLsc_II_PARENT,	/* see AuLsc_I_PARENT in vfsub.h */
+	AuLsc_II_PARENT2,
+	AuLsc_II_PARENT3,	/* copyup dirs */
+	AuLsc_II_NEW_CHILD
+};
+
+/*
+ * ii_read_lock_child, ii_write_lock_child,
+ * ii_read_lock_child2, ii_write_lock_child2,
+ * ii_read_lock_child3, ii_write_lock_child3,
+ * ii_read_lock_parent, ii_write_lock_parent,
+ * ii_read_lock_parent2, ii_write_lock_parent2,
+ * ii_read_lock_parent3, ii_write_lock_parent3,
+ * ii_read_lock_new_child, ii_write_lock_new_child,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void ii_read_lock_##name(struct inode *i) \
+{ \
+	au_rw_read_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void ii_write_lock_##name(struct inode *i) \
+{ \
+	au_rw_write_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuRWLockFuncs(name, lsc) \
+	AuReadLockFunc(name, lsc) \
+	AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+AuRWLockFuncs(new_child, NEW_CHILD);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+/*
+ * ii_read_unlock, ii_write_unlock, ii_downgrade_lock
+ */
+AuSimpleUnlockRwsemFuncs(ii, struct inode *i, &au_ii(i)->ii_rwsem);
+
+#define IiMustNoWaiters(i)	AuRwMustNoWaiters(&au_ii(i)->ii_rwsem)
+#define IiMustAnyLock(i)	AuRwMustAnyLock(&au_ii(i)->ii_rwsem)
+#define IiMustWriteLock(i)	AuRwMustWriteLock(&au_ii(i)->ii_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+static inline void au_icntnr_init(struct au_icntnr *c)
+{
+#ifdef CONFIG_AUFS_DEBUG
+	c->vfs_inode.i_mode = 0;
+#endif
+}
+
+static inline unsigned int au_iigen(struct inode *inode, struct au_iigen *iigen)
+{
+	unsigned int gen;
+	struct au_iinfo *iinfo;
+
+	iinfo = au_ii(inode);
+	spin_lock(&iinfo->ii_genspin);
+	if (iigen)
+		*iigen = iinfo->ii_generation;
+	gen = iinfo->ii_generation.ig_generation;
+	spin_unlock(&iinfo->ii_genspin);
+
+	return gen;
+}
+
+/* tiny test for inode number */
+/* tmpfs generation is too rough */
+static inline int au_test_higen(struct inode *inode, struct inode *h_inode)
+{
+	struct au_iinfo *iinfo;
+
+	iinfo = au_ii(inode);
+	AuRwMustAnyLock(&iinfo->ii_rwsem);
+	return !(iinfo->ii_hsb1 == h_inode->i_sb
+		 && iinfo->ii_higen == h_inode->i_generation);
+}
+
+static inline void au_iigen_dec(struct inode *inode)
+{
+	struct au_iinfo *iinfo;
+
+	iinfo = au_ii(inode);
+	spin_lock(&iinfo->ii_genspin);
+	iinfo->ii_generation.ig_generation--;
+	spin_unlock(&iinfo->ii_genspin);
+}
+
+static inline int au_iigen_test(struct inode *inode, unsigned int sigen)
+{
+	int err;
+
+	err = 0;
+	if (unlikely(inode && au_iigen(inode, NULL) != sigen))
+		err = -EIO;
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline aufs_bindex_t au_ii_br_id(struct inode *inode,
+					aufs_bindex_t bindex)
+{
+	IiMustAnyLock(inode);
+	return au_ii(inode)->ii_hinode[0 + bindex].hi_id;
+}
+
+static inline aufs_bindex_t au_ibstart(struct inode *inode)
+{
+	IiMustAnyLock(inode);
+	return au_ii(inode)->ii_bstart;
+}
+
+static inline aufs_bindex_t au_ibend(struct inode *inode)
+{
+	IiMustAnyLock(inode);
+	return au_ii(inode)->ii_bend;
+}
+
+static inline struct au_vdir *au_ivdir(struct inode *inode)
+{
+	IiMustAnyLock(inode);
+	return au_ii(inode)->ii_vdir;
+}
+
+static inline struct dentry *au_hi_wh(struct inode *inode, aufs_bindex_t bindex)
+{
+	IiMustAnyLock(inode);
+	return au_ii(inode)->ii_hinode[0 + bindex].hi_whdentry;
+}
+
+static inline void au_set_ibstart(struct inode *inode, aufs_bindex_t bindex)
+{
+	IiMustWriteLock(inode);
+	au_ii(inode)->ii_bstart = bindex;
+}
+
+static inline void au_set_ibend(struct inode *inode, aufs_bindex_t bindex)
+{
+	IiMustWriteLock(inode);
+	au_ii(inode)->ii_bend = bindex;
+}
+
+static inline void au_set_ivdir(struct inode *inode, struct au_vdir *vdir)
+{
+	IiMustWriteLock(inode);
+	au_ii(inode)->ii_vdir = vdir;
+}
+
+static inline struct au_hinode *au_hi(struct inode *inode, aufs_bindex_t bindex)
+{
+	IiMustAnyLock(inode);
+	return au_ii(inode)->ii_hinode + bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_pinned_parent(struct au_pin *pin)
+{
+	if (pin)
+		return pin->parent;
+	return NULL;
+}
+
+static inline struct inode *au_pinned_h_dir(struct au_pin *pin)
+{
+	if (pin && pin->hdir)
+		return pin->hdir->hi_inode;
+	return NULL;
+}
+
+static inline struct au_hinode *au_pinned_hdir(struct au_pin *pin)
+{
+	if (pin)
+		return pin->hdir;
+	return NULL;
+}
+
+static inline void au_pin_set_dentry(struct au_pin *pin, struct dentry *dentry)
+{
+	if (pin)
+		pin->dentry = dentry;
+}
+
+static inline void au_pin_set_parent_lflag(struct au_pin *pin,
+					   unsigned char lflag)
+{
+	if (pin) {
+		if (lflag)
+			au_fset_pin(pin->flags, DI_LOCKED);
+		else
+			au_fclr_pin(pin->flags, DI_LOCKED);
+	}
+}
+
+static inline void au_pin_set_parent(struct au_pin *pin, struct dentry *parent)
+{
+	if (pin) {
+		dput(pin->parent);
+		pin->parent = dget(parent);
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_branch;
+#ifdef CONFIG_AUFS_HNOTIFY
+struct au_hnotify_op {
+	void (*ctl)(struct au_hinode *hinode, int do_set);
+	int (*alloc)(struct au_hinode *hinode);
+
+	/*
+	 * if it returns true, the the caller should free hinode->hi_notify,
+	 * otherwise ->free() frees it.
+	 */
+	int (*free)(struct au_hinode *hinode,
+		    struct au_hnotify *hn) __must_check;
+
+	void (*fin)(void);
+	int (*init)(void);
+
+	int (*reset_br)(unsigned int udba, struct au_branch *br, int perm);
+	void (*fin_br)(struct au_branch *br);
+	int (*init_br)(struct au_branch *br, int perm);
+};
+
+/* hnotify.c */
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode);
+void au_hn_free(struct au_hinode *hinode);
+void au_hn_ctl(struct au_hinode *hinode, int do_set);
+void au_hn_reset(struct inode *inode, unsigned int flags);
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+	       struct qstr *h_child_qstr, struct inode *h_child_inode);
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm);
+int au_hnotify_init_br(struct au_branch *br, int perm);
+void au_hnotify_fin_br(struct au_branch *br);
+int __init au_hnotify_init(void);
+void au_hnotify_fin(void);
+
+/* hfsnotify.c */
+extern const struct au_hnotify_op au_hnotify_op;
+
+static inline
+void au_hn_init(struct au_hinode *hinode)
+{
+	hinode->hi_notify = NULL;
+}
+
+static inline struct au_hnotify *au_hn(struct au_hinode *hinode)
+{
+	return hinode->hi_notify;
+}
+
+#else
+static inline
+int au_hn_alloc(struct au_hinode *hinode __maybe_unused,
+		struct inode *inode __maybe_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline struct au_hnotify *au_hn(struct au_hinode *hinode)
+{
+	return NULL;
+}
+
+AuStubVoid(au_hn_free, struct au_hinode *hinode __maybe_unused)
+AuStubVoid(au_hn_ctl, struct au_hinode *hinode __maybe_unused,
+	   int do_set __maybe_unused)
+AuStubVoid(au_hn_reset, struct inode *inode __maybe_unused,
+	   unsigned int flags __maybe_unused)
+AuStubInt0(au_hnotify_reset_br, unsigned int udba __maybe_unused,
+	   struct au_branch *br __maybe_unused,
+	   int perm __maybe_unused)
+AuStubInt0(au_hnotify_init_br, struct au_branch *br __maybe_unused,
+	   int perm __maybe_unused)
+AuStubVoid(au_hnotify_fin_br, struct au_branch *br __maybe_unused)
+AuStubInt0(__init au_hnotify_init, void)
+AuStubVoid(au_hnotify_fin, void)
+AuStubVoid(au_hn_init, struct au_hinode *hinode __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+static inline void au_hn_suspend(struct au_hinode *hdir)
+{
+	au_hn_ctl(hdir, /*do_set*/0);
+}
+
+static inline void au_hn_resume(struct au_hinode *hdir)
+{
+	au_hn_ctl(hdir, /*do_set*/1);
+}
+
+static inline void au_hn_imtx_lock(struct au_hinode *hdir)
+{
+	mutex_lock(&hdir->hi_inode->i_mutex);
+	au_hn_suspend(hdir);
+}
+
+static inline void au_hn_imtx_lock_nested(struct au_hinode *hdir,
+					  unsigned int sc __maybe_unused)
+{
+	mutex_lock_nested(&hdir->hi_inode->i_mutex, sc);
+	au_hn_suspend(hdir);
+}
+
+static inline void au_hn_imtx_unlock(struct au_hinode *hdir)
+{
+	au_hn_resume(hdir);
+	mutex_unlock(&hdir->hi_inode->i_mutex);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_INODE_H__ */
diff --git a/fs/aufs/ioctl.c b/fs/aufs/ioctl.c
new file mode 100644
index 0000000..628d627
--- /dev/null
+++ b/fs/aufs/ioctl.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * ioctl
+ * plink-management and readdir in userspace.
+ * assist the pathconf(3) wrapper library.
+ * move-down
+ */
+
+#include <linux/compat.h>
+#include <linux/file.h>
+#include "aufs.h"
+
+static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg)
+{
+	int err, fd;
+	aufs_bindex_t wbi, bindex, bend;
+	struct file *h_file;
+	struct super_block *sb;
+	struct dentry *root;
+	struct au_branch *br;
+	struct aufs_wbr_fd wbrfd = {
+		.oflags	= au_dir_roflags,
+		.brid	= -1
+	};
+	const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY
+		| O_NOATIME | O_CLOEXEC;
+
+	AuDebugOn(wbrfd.oflags & ~valid);
+
+	if (arg) {
+		err = copy_from_user(&wbrfd, arg, sizeof(wbrfd));
+		if (unlikely(err)) {
+			err = -EFAULT;
+			goto out;
+		}
+
+		err = -EINVAL;
+		AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid);
+		wbrfd.oflags |= au_dir_roflags;
+		AuDbg("0%o\n", wbrfd.oflags);
+		if (unlikely(wbrfd.oflags & ~valid))
+			goto out;
+	}
+
+	fd = get_unused_fd();
+	err = fd;
+	if (unlikely(fd < 0))
+		goto out;
+
+	h_file = ERR_PTR(-EINVAL);
+	wbi = 0;
+	br = NULL;
+	sb = path->dentry->d_sb;
+	root = sb->s_root;
+	aufs_read_lock(root, AuLock_IR);
+	bend = au_sbend(sb);
+	if (wbrfd.brid >= 0) {
+		wbi = au_br_index(sb, wbrfd.brid);
+		if (unlikely(wbi < 0 || wbi > bend))
+			goto out_unlock;
+	}
+
+	h_file = ERR_PTR(-ENOENT);
+	br = au_sbr(sb, wbi);
+	if (!au_br_writable(br->br_perm)) {
+		if (arg)
+			goto out_unlock;
+
+		bindex = wbi + 1;
+		wbi = -1;
+		for (; bindex <= bend; bindex++) {
+			br = au_sbr(sb, bindex);
+			if (au_br_writable(br->br_perm)) {
+				wbi = bindex;
+				br = au_sbr(sb, wbi);
+				break;
+			}
+		}
+	}
+	AuDbg("wbi %d\n", wbi);
+	if (wbi >= 0)
+		h_file = au_h_open(root, wbi, wbrfd.oflags, NULL);
+
+out_unlock:
+	aufs_read_unlock(root, AuLock_IR);
+	err = PTR_ERR(h_file);
+	if (IS_ERR(h_file))
+		goto out_fd;
+
+	atomic_dec(&br->br_count); /* cf. au_h_open() */
+	fd_install(fd, h_file);
+	err = fd;
+	goto out; /* success */
+
+out_fd:
+	put_unused_fd(fd);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long err;
+
+	switch (cmd) {
+	case AUFS_CTL_RDU:
+	case AUFS_CTL_RDU_INO:
+		err = au_rdu_ioctl(file, cmd, arg);
+		break;
+
+	case AUFS_CTL_WBR_FD:
+		err = au_wbr_fd(&file->f_path, (void __user *)arg);
+		break;
+
+	case AUFS_CTL_IBUSY:
+		err = au_ibusy_ioctl(file, arg);
+		break;
+
+	default:
+		/* do not call the lower */
+		AuDbg("0x%x\n", cmd);
+		err = -ENOTTY;
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long err;
+
+	switch (cmd) {
+	case AUFS_CTL_MVDOWN:
+		WARN_ONCE(1, "move-down is still testing...\n");
+		err = au_mvdown(file->f_dentry, (void __user *)arg);
+		break;
+
+	case AUFS_CTL_WBR_FD:
+		err = au_wbr_fd(&file->f_path, (void __user *)arg);
+		break;
+
+	default:
+		/* do not call the lower */
+		AuDbg("0x%x\n", cmd);
+		err = -ENOTTY;
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+			   unsigned long arg)
+{
+	long err;
+
+	switch (cmd) {
+	case AUFS_CTL_RDU:
+	case AUFS_CTL_RDU_INO:
+		err = au_rdu_compat_ioctl(file, cmd, arg);
+		break;
+
+	case AUFS_CTL_IBUSY:
+		err = au_ibusy_compat_ioctl(file, arg);
+		break;
+
+	default:
+		err = aufs_ioctl_dir(file, cmd, arg);
+	}
+
+	AuTraceErr(err);
+	return err;
+}
+
+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	return aufs_ioctl_nondir(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
diff --git a/fs/aufs/loop.c b/fs/aufs/loop.c
new file mode 100644
index 0000000..ccae19c
--- /dev/null
+++ b/fs/aufs/loop.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * support for loopback block device as a branch
+ */
+
+#include <linux/loop.h>
+#include "aufs.h"
+
+/*
+ * test if two lower dentries have overlapping branches.
+ */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding)
+{
+	struct super_block *h_sb;
+	struct loop_device *l;
+
+	h_sb = h_adding->d_sb;
+	if (MAJOR(h_sb->s_dev) != LOOP_MAJOR)
+		return 0;
+
+	l = h_sb->s_bdev->bd_disk->private_data;
+	h_adding = l->lo_backing_file->f_dentry;
+	/*
+	 * h_adding can be local NFS.
+	 * in this case aufs cannot detect the loop.
+	 */
+	if (unlikely(h_adding->d_sb == sb))
+		return 1;
+	return !!au_test_subdir(h_adding, sb->s_root);
+}
+
+/* true if a kernel thread named 'loop[0-9].*' accesses a file */
+int au_test_loopback_kthread(void)
+{
+	int ret;
+	struct task_struct *tsk = current;
+	char c, comm[sizeof(tsk->comm)];
+
+	ret = 0;
+	if (tsk->flags & PF_KTHREAD) {
+		get_task_comm(comm, tsk);
+		c = comm[4];
+		ret = ('0' <= c && c <= '9'
+		       && !strncmp(comm, "loop", 4));
+	}
+
+	return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define au_warn_loopback_step	16
+static int au_warn_loopback_nelem = au_warn_loopback_step;
+static unsigned long *au_warn_loopback_array;
+
+void au_warn_loopback(struct super_block *h_sb)
+{
+	int i, new_nelem;
+	unsigned long *a, magic;
+	static DEFINE_SPINLOCK(spin);
+
+	magic = h_sb->s_magic;
+	spin_lock(&spin);
+	a = au_warn_loopback_array;
+	for (i = 0; i < au_warn_loopback_nelem && *a; i++)
+		if (a[i] == magic) {
+			spin_unlock(&spin);
+			return;
+		}
+
+	/* h_sb is new to us, print it */
+	if (i < au_warn_loopback_nelem) {
+		a[i] = magic;
+		goto pr;
+	}
+
+	/* expand the array */
+	new_nelem = au_warn_loopback_nelem + au_warn_loopback_step;
+	a = au_kzrealloc(au_warn_loopback_array,
+			 au_warn_loopback_nelem * sizeof(unsigned long),
+			 new_nelem * sizeof(unsigned long), GFP_ATOMIC);
+	if (a) {
+		au_warn_loopback_nelem = new_nelem;
+		au_warn_loopback_array = a;
+		a[i] = magic;
+		goto pr;
+	}
+
+	spin_unlock(&spin);
+	AuWarn1("realloc failed, ignored\n");
+	return;
+
+pr:
+	spin_unlock(&spin);
+	pr_warn("you may want to try another patch for loopback file "
+		"on %s(0x%lx) branch\n", au_sbtype(h_sb), magic);
+}
+
+int au_loopback_init(void)
+{
+	int err;
+	struct super_block *sb __maybe_unused;
+
+	AuDebugOn(sizeof(sb->s_magic) != sizeof(unsigned long));
+
+	err = 0;
+	au_warn_loopback_array = kcalloc(au_warn_loopback_step,
+					 sizeof(unsigned long), GFP_NOFS);
+	if (unlikely(!au_warn_loopback_array))
+		err = -ENOMEM;
+
+	return err;
+}
+
+void au_loopback_fin(void)
+{
+	kfree(au_warn_loopback_array);
+}
diff --git a/fs/aufs/loop.h b/fs/aufs/loop.h
new file mode 100644
index 0000000..88d019c
--- /dev/null
+++ b/fs/aufs/loop.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * support for loopback mount as a branch
+ */
+
+#ifndef __AUFS_LOOP_H__
+#define __AUFS_LOOP_H__
+
+#ifdef __KERNEL__
+
+struct dentry;
+struct super_block;
+
+#ifdef CONFIG_AUFS_BDEV_LOOP
+/* loop.c */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding);
+int au_test_loopback_kthread(void);
+void au_warn_loopback(struct super_block *h_sb);
+
+int au_loopback_init(void);
+void au_loopback_fin(void);
+#else
+AuStubInt0(au_test_loopback_overlap, struct super_block *sb,
+	   struct dentry *h_adding)
+AuStubInt0(au_test_loopback_kthread, void)
+AuStubVoid(au_warn_loopback, struct super_block *h_sb)
+
+AuStubInt0(au_loopback_init, void)
+AuStubVoid(au_loopback_fin, void)
+#endif /* BLK_DEV_LOOP */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_LOOP_H__ */
diff --git a/fs/aufs/magic.mk b/fs/aufs/magic.mk
new file mode 100644
index 0000000..3e6387b
--- /dev/null
+++ b/fs/aufs/magic.mk
@@ -0,0 +1,54 @@
+
+# defined in ${srctree}/fs/fuse/inode.c
+# tristate
+ifdef CONFIG_FUSE_FS
+ccflags-y += -DFUSE_SUPER_MAGIC=0x65735546
+endif
+
+# defined in ${srctree}/fs/ocfs2/ocfs2_fs.h
+# tristate
+ifdef CONFIG_OCFS2_FS
+ccflags-y += -DOCFS2_SUPER_MAGIC=0x7461636f
+endif
+
+# defined in ${srctree}/fs/ocfs2/dlm/userdlm.h
+# tristate
+ifdef CONFIG_OCFS2_FS_O2CB
+ccflags-y += -DDLMFS_MAGIC=0x76a9f425
+endif
+
+# defined in ${srctree}/fs/cifs/cifsfs.c
+# tristate
+ifdef CONFIG_CIFS_FS
+ccflags-y += -DCIFS_MAGIC_NUMBER=0xFF534D42
+endif
+
+# defined in ${srctree}/fs/xfs/xfs_sb.h
+# tristate
+ifdef CONFIG_XFS_FS
+ccflags-y += -DXFS_SB_MAGIC=0x58465342
+endif
+
+# defined in ${srctree}/fs/configfs/mount.c
+# tristate
+ifdef CONFIG_CONFIGFS_FS
+ccflags-y += -DCONFIGFS_MAGIC=0x62656570
+endif
+
+# defined in ${srctree}/fs/9p/v9fs.h
+# tristate
+ifdef CONFIG_9P_FS
+ccflags-y += -DV9FS_MAGIC=0x01021997
+endif
+
+# defined in ${srctree}/fs/ubifs/ubifs.h
+# tristate
+ifdef CONFIG_UBIFS_FS
+ccflags-y += -DUBIFS_SUPER_MAGIC=0x24051905
+endif
+
+# defined in ${srctree}/fs/hfsplus/hfsplus_raw.h
+# tristate
+ifdef CONFIG_HFSPLUS_FS
+ccflags-y += -DHFSPLUS_SUPER_MAGIC=0x482b
+endif
diff --git a/fs/aufs/module.c b/fs/aufs/module.c
new file mode 100644
index 0000000..3930f56
--- /dev/null
+++ b/fs/aufs/module.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * module global variables and operations
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp)
+{
+	if (new_sz <= nused)
+		return p;
+
+	p = krealloc(p, new_sz, gfp);
+	if (p)
+		memset(p + nused, 0, new_sz - nused);
+	return p;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * aufs caches
+ */
+struct kmem_cache *au_cachep[AuCache_Last];
+static int __init au_cache_init(void)
+{
+	au_cachep[AuCache_DINFO] = AuCacheCtor(au_dinfo, au_di_init_once);
+	if (au_cachep[AuCache_DINFO])
+		/* SLAB_DESTROY_BY_RCU */
+		au_cachep[AuCache_ICNTNR] = AuCacheCtor(au_icntnr,
+							au_icntnr_init_once);
+	if (au_cachep[AuCache_ICNTNR])
+		au_cachep[AuCache_FINFO] = AuCacheCtor(au_finfo,
+						       au_fi_init_once);
+	if (au_cachep[AuCache_FINFO])
+		au_cachep[AuCache_VDIR] = AuCache(au_vdir);
+	if (au_cachep[AuCache_VDIR])
+		au_cachep[AuCache_DEHSTR] = AuCache(au_vdir_dehstr);
+	if (au_cachep[AuCache_DEHSTR])
+		return 0;
+
+	return -ENOMEM;
+}
+
+static void au_cache_fin(void)
+{
+	int i;
+
+	/*
+	 * Make sure all delayed rcu free inodes are flushed before we
+	 * destroy cache.
+	 */
+	rcu_barrier();
+
+	/* excluding AuCache_HNOTIFY */
+	BUILD_BUG_ON(AuCache_HNOTIFY + 1 != AuCache_Last);
+	for (i = 0; i < AuCache_HNOTIFY; i++)
+		if (au_cachep[i]) {
+			kmem_cache_destroy(au_cachep[i]);
+			au_cachep[i] = NULL;
+		}
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_dir_roflags;
+
+#ifdef CONFIG_AUFS_SBILIST
+/*
+ * iterate_supers_type() doesn't protect us from
+ * remounting (branch management)
+ */
+struct au_splhead au_sbilist;
+#endif
+
+struct lock_class_key au_lc_key[AuLcKey_Last];
+
+/*
+ * functions for module interface.
+ */
+MODULE_LICENSE("GPL");
+/* MODULE_LICENSE("GPL v2"); */
+MODULE_AUTHOR("Junjiro R. Okajima <aufs-users@lists.sourceforge.net>");
+MODULE_DESCRIPTION(AUFS_NAME
+	" -- Advanced multi layered unification filesystem");
+MODULE_VERSION(AUFS_VERSION);
+MODULE_ALIAS_FS(AUFS_NAME);
+
+/* this module parameter has no meaning when SYSFS is disabled */
+int sysaufs_brs = 1;
+MODULE_PARM_DESC(brs, "use <sysfs>/fs/aufs/si_*/brN");
+module_param_named(brs, sysaufs_brs, int, S_IRUGO);
+
+/* ---------------------------------------------------------------------- */
+
+static char au_esc_chars[0x20 + 3]; /* 0x01-0x20, backslash, del, and NULL */
+
+int au_seq_path(struct seq_file *seq, struct path *path)
+{
+	return seq_path(seq, path, au_esc_chars);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int __init aufs_init(void)
+{
+	int err, i;
+	char *p;
+
+	p = au_esc_chars;
+	for (i = 1; i <= ' '; i++)
+		*p++ = i;
+	*p++ = '\\';
+	*p++ = '\x7f';
+	*p = 0;
+
+	au_dir_roflags = au_file_roflags(O_DIRECTORY | O_LARGEFILE);
+
+	au_sbilist_init();
+	sysaufs_brs_init();
+	au_debug_init();
+	au_dy_init();
+	err = sysaufs_init();
+	if (unlikely(err))
+		goto out;
+	err = au_procfs_init();
+	if (unlikely(err))
+		goto out_sysaufs;
+	err = au_wkq_init();
+	if (unlikely(err))
+		goto out_procfs;
+	err = au_loopback_init();
+	if (unlikely(err))
+		goto out_wkq;
+	err = au_hnotify_init();
+	if (unlikely(err))
+		goto out_loopback;
+	err = au_sysrq_init();
+	if (unlikely(err))
+		goto out_hin;
+	err = au_cache_init();
+	if (unlikely(err))
+		goto out_sysrq;
+	err = register_filesystem(&aufs_fs_type);
+	if (unlikely(err))
+		goto out_cache;
+	/* since we define pr_fmt, call printk directly */
+	printk(KERN_INFO AUFS_NAME " " AUFS_VERSION "\n");
+	goto out; /* success */
+
+out_cache:
+	au_cache_fin();
+out_sysrq:
+	au_sysrq_fin();
+out_hin:
+	au_hnotify_fin();
+out_loopback:
+	au_loopback_fin();
+out_wkq:
+	au_wkq_fin();
+out_procfs:
+	au_procfs_fin();
+out_sysaufs:
+	sysaufs_fin();
+	au_dy_fin();
+out:
+	return err;
+}
+
+static void __exit aufs_exit(void)
+{
+	unregister_filesystem(&aufs_fs_type);
+	au_cache_fin();
+	au_sysrq_fin();
+	au_hnotify_fin();
+	au_loopback_fin();
+	au_wkq_fin();
+	au_procfs_fin();
+	sysaufs_fin();
+	au_dy_fin();
+}
+
+module_init(aufs_init);
+module_exit(aufs_exit);
diff --git a/fs/aufs/module.h b/fs/aufs/module.h
new file mode 100644
index 0000000..52bf472
--- /dev/null
+++ b/fs/aufs/module.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * module initialization and module-global
+ */
+
+#ifndef __AUFS_MODULE_H__
+#define __AUFS_MODULE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/slab.h>
+
+struct path;
+struct seq_file;
+
+/* module parameters */
+extern int sysaufs_brs;
+
+/* ---------------------------------------------------------------------- */
+
+extern int au_dir_roflags;
+
+enum {
+	AuLcNonDir_FIINFO,
+	AuLcNonDir_DIINFO,
+	AuLcNonDir_IIINFO,
+
+	AuLcDir_FIINFO,
+	AuLcDir_DIINFO,
+	AuLcDir_IIINFO,
+
+	AuLcSymlink_DIINFO,
+	AuLcSymlink_IIINFO,
+
+	AuLcKey_Last
+};
+extern struct lock_class_key au_lc_key[AuLcKey_Last];
+
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp);
+int au_seq_path(struct seq_file *seq, struct path *path);
+
+#ifdef CONFIG_PROC_FS
+/* procfs.c */
+int __init au_procfs_init(void);
+void au_procfs_fin(void);
+#else
+AuStubInt0(au_procfs_init, void);
+AuStubVoid(au_procfs_fin, void);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* kmem cache */
+enum {
+	AuCache_DINFO,
+	AuCache_ICNTNR,
+	AuCache_FINFO,
+	AuCache_VDIR,
+	AuCache_DEHSTR,
+	AuCache_HNOTIFY, /* must be last */
+	AuCache_Last
+};
+
+#define AuCacheFlags		(SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD)
+#define AuCache(type)		KMEM_CACHE(type, AuCacheFlags)
+#define AuCacheCtor(type, ctor)	\
+	kmem_cache_create(#type, sizeof(struct type), \
+			  __alignof__(struct type), AuCacheFlags, ctor)
+
+extern struct kmem_cache *au_cachep[];
+
+#define AuCacheFuncs(name, index) \
+static inline struct au_##name *au_cache_alloc_##name(void) \
+{ return kmem_cache_alloc(au_cachep[AuCache_##index], GFP_NOFS); } \
+static inline void au_cache_free_##name(struct au_##name *p) \
+{ kmem_cache_free(au_cachep[AuCache_##index], p); }
+
+AuCacheFuncs(dinfo, DINFO);
+AuCacheFuncs(icntnr, ICNTNR);
+AuCacheFuncs(finfo, FINFO);
+AuCacheFuncs(vdir, VDIR);
+AuCacheFuncs(vdir_dehstr, DEHSTR);
+#ifdef CONFIG_AUFS_HNOTIFY
+AuCacheFuncs(hnotify, HNOTIFY);
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_MODULE_H__ */
diff --git a/fs/aufs/mvdown.c b/fs/aufs/mvdown.c
new file mode 100644
index 0000000..9e35baa
--- /dev/null
+++ b/fs/aufs/mvdown.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2011-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include "aufs.h"
+
+enum {
+	AUFS_MVDOWN_SRC,
+	AUFS_MVDOWN_DST,
+	AUFS_MVDOWN_NARRAY
+};
+
+struct au_mvd_args {
+	struct {
+		aufs_bindex_t bindex;
+		struct super_block *h_sb;
+		struct dentry *h_parent;
+		struct au_hinode *hdir;
+		struct inode *h_dir;
+	} info[AUFS_MVDOWN_NARRAY];
+
+	struct aufs_mvdown mvdown;
+	struct dentry *dentry, *parent;
+	struct inode *inode, *dir;
+	struct super_block *sb;
+	aufs_bindex_t bopq, bwh, bfound;
+	unsigned char rename_lock;
+	struct au_pin pin;
+};
+
+#define mvd_bsrc		info[AUFS_MVDOWN_SRC].bindex
+#define mvd_h_src_sb		info[AUFS_MVDOWN_SRC].h_sb
+#define mvd_h_src_parent	info[AUFS_MVDOWN_SRC].h_parent
+#define mvd_hdir_src		info[AUFS_MVDOWN_SRC].hdir
+#define mvd_h_src_dir		info[AUFS_MVDOWN_SRC].h_dir
+
+#define mvd_bdst		info[AUFS_MVDOWN_DST].bindex
+#define mvd_h_dst_sb		info[AUFS_MVDOWN_DST].h_sb
+#define mvd_h_dst_parent	info[AUFS_MVDOWN_DST].h_parent
+#define mvd_hdir_dst		info[AUFS_MVDOWN_DST].hdir
+#define mvd_h_dst_dir		info[AUFS_MVDOWN_DST].h_dir
+
+#define AU_MVD_PR(flag, ...) do {			\
+		if (flag)				\
+			pr_err(__VA_ARGS__);		\
+	} while (0)
+
+/* make the parent dir on bdst */
+static int au_do_mkdir(const unsigned char verbose, struct au_mvd_args *a)
+{
+	int err;
+
+	err = 0;
+	a->mvd_hdir_src = au_hi(a->dir, a->mvd_bsrc);
+	a->mvd_hdir_dst = au_hi(a->dir, a->mvd_bdst);
+	a->mvd_h_src_parent = au_h_dptr(a->parent, a->mvd_bsrc);
+	a->mvd_h_dst_parent = NULL;
+	if (au_dbend(a->parent) >= a->mvd_bdst)
+		a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
+	if (!a->mvd_h_dst_parent) {
+		err = au_cpdown_dirs(a->dentry, a->mvd_bdst);
+		if (unlikely(err)) {
+			AU_MVD_PR(verbose, "cpdown_dirs failed\n");
+			goto out;
+		}
+		a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* lock them all */
+static int au_do_lock(const unsigned char verbose, struct au_mvd_args *a)
+{
+	int err;
+	struct dentry *h_trap;
+
+	a->mvd_h_src_sb = au_sbr_sb(a->sb, a->mvd_bsrc);
+	a->mvd_h_dst_sb = au_sbr_sb(a->sb, a->mvd_bdst);
+	if (a->mvd_h_src_sb != a->mvd_h_dst_sb) {
+		a->rename_lock = 0;
+		err = au_pin(&a->pin, a->dentry, a->mvd_bdst, au_opt_udba(a->sb),
+			     AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+		if (!err) {
+			a->mvd_h_src_dir = a->mvd_h_src_parent->d_inode;
+			mutex_lock_nested(&a->mvd_h_src_dir->i_mutex,
+					  AuLsc_I_PARENT3);
+		} else
+			AU_MVD_PR(verbose, "pin failed\n");
+		goto out;
+	}
+
+	err = 0;
+	a->rename_lock = 1;
+	h_trap = vfsub_lock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+				   a->mvd_h_dst_parent, a->mvd_hdir_dst);
+	if (h_trap) {
+		err = (h_trap != a->mvd_h_src_parent);
+		if (err)
+			err = (h_trap != a->mvd_h_dst_parent);
+	}
+	BUG_ON(err); /* it should never happen */
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static void au_do_unlock(const unsigned char verbose, struct au_mvd_args *a)
+{
+	if (!a->rename_lock) {
+		mutex_unlock(&a->mvd_h_src_dir->i_mutex);
+		au_unpin(&a->pin);
+	} else
+		vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+				    a->mvd_h_dst_parent, a->mvd_hdir_dst);
+}
+
+/* copy-down the file */
+static int au_do_cpdown(const unsigned char verbose, struct au_mvd_args *a)
+{
+	int err;
+	struct au_cp_generic cpg = {
+		.dentry	= a->dentry,
+		.bdst	= a->mvd_bdst,
+		.bsrc	= a->mvd_bsrc,
+		.len	= -1,
+		.pin	= &a->pin,
+		.flags	= AuCpup_DTIME | AuCpup_HOPEN
+	};
+
+	AuDbg("b%d, b%d\n", cpg.bsrc, cpg.bdst);
+	err = au_sio_cpdown_simple(&cpg);
+	if (unlikely(err))
+		AU_MVD_PR(verbose, "cpdown failed\n");
+
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * unlink the whiteout on bdst if exist which may be created by UDBA while we
+ * were sleeping
+ */
+static int au_do_unlink_wh(const unsigned char verbose, struct au_mvd_args *a)
+{
+	int err;
+	struct path h_path;
+	struct au_branch *br;
+
+	br = au_sbr(a->sb, a->mvd_bdst);
+	h_path.dentry = au_wh_lkup(a->mvd_h_dst_parent, &a->dentry->d_name, br);
+	err = PTR_ERR(h_path.dentry);
+	if (IS_ERR(h_path.dentry)) {
+		AU_MVD_PR(verbose, "wh_lkup failed\n");
+		goto out;
+	}
+
+	err = 0;
+	if (h_path.dentry->d_inode) {
+		h_path.mnt = au_br_mnt(br);
+		err = vfsub_unlink(a->mvd_h_dst_parent->d_inode, &h_path,
+				   /*force*/0);
+		if (unlikely(err))
+			AU_MVD_PR(verbose, "wh_unlink failed\n");
+	}
+	dput(h_path.dentry);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * unlink the topmost h_dentry
+ * Note: the target file MAY be modified by UDBA between this mutex_unlock() and
+ *	mutex_lock() in vfs_unlink(). in this case, such changes may be lost.
+ */
+static int au_do_unlink(const unsigned char verbose, struct au_mvd_args *a)
+{
+	int err;
+	struct path h_path;
+
+	h_path.mnt = au_sbr_mnt(a->sb, a->mvd_bsrc);
+	h_path.dentry = au_h_dptr(a->dentry, a->mvd_bsrc);
+	err = vfsub_unlink(a->mvd_h_src_dir, &h_path, /*force*/0);
+	if (unlikely(err))
+		AU_MVD_PR(verbose, "unlink failed\n");
+
+	AuTraceErr(err);
+	return err;
+}
+
+/*
+ * copy-down the file and unlink the bsrc file.
+ * - unlink the bdst whout if exist
+ * - copy-down the file (with whtmp name and rename)
+ * - unlink the bsrc file
+ */
+static int au_do_mvdown(const unsigned char verbose, struct au_mvd_args *a)
+{
+	int err;
+
+	err = au_do_mkdir(verbose, a);
+	if (!err)
+		err = au_do_lock(verbose, a);
+	if (unlikely(err))
+		goto out;
+
+	/*
+	 * do not revert the activities we made on bdst since they should be
+	 * harmless in aufs.
+	 */
+
+	err = au_do_cpdown(verbose, a);
+	if (!err)
+		err = au_do_unlink_wh(verbose, a);
+	if (!err)
+		err = au_do_unlink(verbose, a);
+	if (unlikely(err))
+		goto out_unlock;
+
+	/* maintain internal array */
+	au_set_h_dptr(a->dentry, a->mvd_bsrc, NULL);
+	au_set_dbstart(a->dentry, a->mvd_bdst);
+	if (au_dbend(a->dentry) < a->mvd_bdst)
+		au_set_dbend(a->dentry, a->mvd_bdst);
+	au_set_h_iptr(a->inode, a->mvd_bsrc, NULL, /*flags*/0);
+	au_set_ibstart(a->inode, a->mvd_bdst);
+	if (au_ibend(a->inode) < a->mvd_bdst)
+		au_set_ibend(a->inode, a->mvd_bdst);
+
+out_unlock:
+	au_do_unlock(verbose, a);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int find_lower_writable(struct super_block *sb, aufs_bindex_t bindex)
+{
+	aufs_bindex_t bend;
+	struct au_branch *br;
+
+	bend = au_sbend(sb);
+	for (bindex++; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (!au_br_rdonly(br))
+			return bindex;
+	}
+
+	return -1;
+}
+
+/* make sure the file is idle */
+static int au_mvd_args_busy(const unsigned char verbose, struct au_mvd_args *a)
+{
+	int err, plinked;
+	struct inode *h_src_inode;
+
+	err = 0;
+	h_src_inode = au_h_iptr(a->inode, a->mvd_bsrc);
+	plinked = !!au_opt_test(au_mntflags(a->sb), PLINK);
+	if (au_dbstart(a->dentry) == a->mvd_bsrc
+	    && a->dentry->d_count == 1
+	    && atomic_read(&a->inode->i_count) == 1
+	    /* && h_src_inode->i_nlink == 1 */
+	    && (!plinked || !au_plink_test(a->inode))
+	    && a->inode->i_nlink == 1)
+		goto out;
+
+	err = -EBUSY;
+	AU_MVD_PR(verbose,
+		  "b%d, d{b%d, c%u?}, i{c%d?, l%u}, hi{l%u}, p{%d, %d}\n",
+		  a->mvd_bsrc, au_dbstart(a->dentry), a->dentry->d_count,
+		  atomic_read(&a->inode->i_count), a->inode->i_nlink,
+		  h_src_inode->i_nlink,
+		  plinked, plinked ? au_plink_test(a->inode) : 0);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* make sure the parent dir is fine */
+static int au_mvd_args_parent(const unsigned char verbose,
+			      struct au_mvd_args *a)
+{
+	int err;
+	aufs_bindex_t bindex;
+
+	err = 0;
+	if (unlikely(au_alive_dir(a->parent))) {
+		err = -ENOENT;
+		AU_MVD_PR(verbose, "parent dir is dead\n");
+		goto out;
+	}
+
+	a->bopq = au_dbdiropq(a->parent);
+	bindex = au_wbr_nonopq(a->dentry, a->mvd_bdst);
+	AuDbg("b%d\n", bindex);
+	if (unlikely((bindex >= 0 && bindex < a->mvd_bdst)
+		     || (a->bopq != -1 && a->bopq < a->mvd_bdst))) {
+		err = -EINVAL;
+		AU_MVD_PR(verbose, "parent dir is opaque b%d, b%d\n",
+			  a->bopq, a->mvd_bdst);
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_mvd_args_intermediate(const unsigned char verbose,
+				    struct au_mvd_args *a)
+{
+	int err;
+	struct au_dinfo *dinfo, *tmp;
+
+	/* lookup the next lower positive entry */
+	err = -ENOMEM;
+	tmp = au_di_alloc(a->sb, AuLsc_DI_TMP);
+	if (unlikely(!tmp))
+		goto out;
+
+	a->bfound = -1;
+	a->bwh = -1;
+	dinfo = au_di(a->dentry);
+	au_di_cp(tmp, dinfo);
+	au_di_swap(tmp, dinfo);
+
+	/* returns the number of positive dentries */
+	err = au_lkup_dentry(a->dentry, a->mvd_bsrc + 1, /*type*/0);
+	if (!err)
+		a->bwh = au_dbwh(a->dentry);
+	else if (err > 0)
+		a->bfound = au_dbstart(a->dentry);
+
+	au_di_swap(tmp, dinfo);
+	au_rw_write_unlock(&tmp->di_rwsem);
+	au_di_free(tmp);
+	if (unlikely(err < 0))
+		AU_MVD_PR(verbose, "failed look-up lower\n");
+
+	/*
+	 * here, we have these cases.
+	 * bfound == -1
+	 *	no positive dentry under bsrc. there are more sub-cases.
+	 *	bwh < 0
+	 *		there no whiteout, we can safely move-down.
+	 *	bwh <= bsrc
+	 *		impossible
+	 *	bsrc < bwh && bwh < bdst
+	 *		there is a whiteout on RO branch. cannot proceed.
+	 *	bwh == bdst
+	 *		there is a whiteout on the RW target branch. it should
+	 *		be removed.
+	 *	bdst < bwh
+	 *		there is a whiteout somewhere unrelated branch.
+	 * -1 < bfound && bfound <= bsrc
+	 *	impossible.
+	 * bfound < bdst
+	 *	found, but it is on RO branch between bsrc and bdst. cannot
+	 *	proceed.
+	 * bfound == bdst
+	 *	found, replace it if AUFS_MVDOWN_FORCE is set. otherwise return
+	 *	error.
+	 * bdst < bfound
+	 *	found, after we create the file on bdst, it will be hidden.
+	 */
+
+	AuDebugOn(a->bfound == -1
+		  && a->bwh != -1
+		  && a->bwh <= a->mvd_bsrc);
+	AuDebugOn(-1 < a->bfound
+		  && a->bfound <= a->mvd_bsrc);
+
+	err = -EINVAL;
+	if (a->bfound == -1
+	    && a->mvd_bsrc < a->bwh
+	    && a->bwh != -1
+	    && a->bwh < a->mvd_bdst) {
+		AU_MVD_PR(verbose, "bsrc %d, bdst %d, bfound %d, bwh %d\n",
+			  a->mvd_bsrc, a->mvd_bdst, a->bfound, a->bwh);
+		goto out;
+	} else if (a->bfound != -1 && a->bfound < a->mvd_bdst) {
+		AU_MVD_PR(verbose, "bdst %d, bfound %d\n",
+			  a->mvd_bdst, a->bfound);
+		goto out;
+	}
+
+	err = 0; /* success */
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_mvd_args_exist(const unsigned char verbose, struct au_mvd_args *a)
+{
+	int err;
+
+	err = (a->bfound != a->mvd_bdst) ? 0 : -EEXIST;
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_mvd_args(const unsigned char verbose, struct au_mvd_args *a)
+{
+	int err;
+	struct au_branch *br;
+
+	err = -EISDIR;
+	if (unlikely(S_ISDIR(a->inode->i_mode)))
+		goto out;
+
+	err = -EINVAL;
+	a->mvd_bsrc = au_ibstart(a->inode);
+	if (unlikely(a->mvd_bsrc == au_sbend(a->sb))) {
+		AU_MVD_PR(verbose, "on the bottom\n");
+		goto out;
+	}
+	br = au_sbr(a->sb, a->mvd_bsrc);
+	err = au_br_rdonly(br);
+	if (unlikely(err))
+		goto out;
+
+	err = -EINVAL;
+	a->mvd_bdst = find_lower_writable(a->sb, a->mvd_bsrc);
+	if (unlikely(a->mvd_bdst < 0)) {
+		AU_MVD_PR(verbose, "no writable lower branch\n");
+		goto out;
+	}
+
+	err = au_mvd_args_busy(verbose, a);
+	if (!err)
+		err = au_mvd_args_parent(verbose, a);
+	if (!err)
+		err = au_mvd_args_intermediate(verbose, a);
+	if (!err)
+		err = au_mvd_args_exist(verbose, a);
+	if (!err)
+		AuDbg("b%d, b%d\n", a->mvd_bsrc, a->mvd_bdst);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *uarg)
+{
+	int err;
+	unsigned char verbose;
+	struct au_mvd_args args = {
+		.dentry = dentry,
+		.inode	= dentry->d_inode,
+		.sb	= dentry->d_sb
+	};
+
+	err = -EPERM;
+	if (unlikely(!capable(CAP_SYS_ADMIN)))
+		goto out;
+
+	err = copy_from_user(&args.mvdown, uarg, sizeof(args.mvdown));
+	if (unlikely(err)) {
+		err = -EFAULT;
+		goto out;
+	}
+	AuDbg("flags 0x%x\n", args.mvdown.flags);
+
+	err = -EBUSY;
+	verbose = !!(args.mvdown.flags & AUFS_MVDOWN_VERBOSE);
+	args.parent = dget_parent(dentry);
+	args.dir = args.parent->d_inode;
+	mutex_lock_nested(&args.dir->i_mutex, I_MUTEX_PARENT);
+	dput(args.parent);
+	if (unlikely(args.parent != dentry->d_parent)) {
+		AU_MVD_PR(verbose, "parent dir is moved\n");
+		goto out_dir;
+	}
+
+	mutex_lock_nested(&args.inode->i_mutex, I_MUTEX_CHILD);
+	err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH);
+	if (unlikely(err))
+		goto out_inode;
+
+	di_write_lock_parent(args.parent);
+	err = au_mvd_args(verbose, &args);
+	if (unlikely(err))
+		goto out_parent;
+
+	AuDbgDentry(dentry);
+	AuDbgInode(args.inode);
+	err = au_do_mvdown(verbose, &args);
+	if (unlikely(err))
+		goto out_parent;
+	AuDbgDentry(dentry);
+	AuDbgInode(args.inode);
+
+	au_cpup_attr_timesizes(args.dir);
+	au_cpup_attr_timesizes(args.inode);
+	au_cpup_igen(args.inode, au_h_iptr(args.inode, args.mvd_bdst));
+	/* au_digen_dec(dentry); */
+
+out_parent:
+	di_write_unlock(args.parent);
+	aufs_read_unlock(dentry, AuLock_DW);
+out_inode:
+	mutex_unlock(&args.inode->i_mutex);
+out_dir:
+	mutex_unlock(&args.dir->i_mutex);
+out:
+	AuTraceErr(err);
+	return err;
+}
diff --git a/fs/aufs/opts.c b/fs/aufs/opts.c
new file mode 100644
index 0000000..7cac1ed
--- /dev/null
+++ b/fs/aufs/opts.c
@@ -0,0 +1,1697 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * mount options/flags
+ */
+
+#include <linux/namei.h>
+#include <linux/types.h> /* a distribution requires */
+#include <linux/parser.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+enum {
+	Opt_br,
+	Opt_add, Opt_del, Opt_mod, Opt_reorder, Opt_append, Opt_prepend,
+	Opt_idel, Opt_imod, Opt_ireorder,
+	Opt_dirwh, Opt_rdcache, Opt_rdblk, Opt_rdhash, Opt_rendir,
+	Opt_rdblk_def, Opt_rdhash_def,
+	Opt_xino, Opt_zxino, Opt_noxino,
+	Opt_trunc_xino, Opt_trunc_xino_v, Opt_notrunc_xino,
+	Opt_trunc_xino_path, Opt_itrunc_xino,
+	Opt_trunc_xib, Opt_notrunc_xib,
+	Opt_shwh, Opt_noshwh,
+	Opt_plink, Opt_noplink, Opt_list_plink,
+	Opt_udba,
+	Opt_dio, Opt_nodio,
+	/* Opt_lock, Opt_unlock, */
+	Opt_cmd, Opt_cmd_args,
+	Opt_diropq_a, Opt_diropq_w,
+	Opt_warn_perm, Opt_nowarn_perm,
+	Opt_wbr_copyup, Opt_wbr_create,
+	Opt_refrof, Opt_norefrof,
+	Opt_verbose, Opt_noverbose,
+	Opt_sum, Opt_nosum, Opt_wsum,
+	Opt_tail, Opt_ignore, Opt_ignore_silent, Opt_err
+};
+
+static match_table_t options = {
+	{Opt_br, "br=%s"},
+	{Opt_br, "br:%s"},
+
+	{Opt_add, "add=%d:%s"},
+	{Opt_add, "add:%d:%s"},
+	{Opt_add, "ins=%d:%s"},
+	{Opt_add, "ins:%d:%s"},
+	{Opt_append, "append=%s"},
+	{Opt_append, "append:%s"},
+	{Opt_prepend, "prepend=%s"},
+	{Opt_prepend, "prepend:%s"},
+
+	{Opt_del, "del=%s"},
+	{Opt_del, "del:%s"},
+	/* {Opt_idel, "idel:%d"}, */
+	{Opt_mod, "mod=%s"},
+	{Opt_mod, "mod:%s"},
+	/* {Opt_imod, "imod:%d:%s"}, */
+
+	{Opt_dirwh, "dirwh=%d"},
+
+	{Opt_xino, "xino=%s"},
+	{Opt_noxino, "noxino"},
+	{Opt_trunc_xino, "trunc_xino"},
+	{Opt_trunc_xino_v, "trunc_xino_v=%d:%d"},
+	{Opt_notrunc_xino, "notrunc_xino"},
+	{Opt_trunc_xino_path, "trunc_xino=%s"},
+	{Opt_itrunc_xino, "itrunc_xino=%d"},
+	/* {Opt_zxino, "zxino=%s"}, */
+	{Opt_trunc_xib, "trunc_xib"},
+	{Opt_notrunc_xib, "notrunc_xib"},
+
+#ifdef CONFIG_PROC_FS
+	{Opt_plink, "plink"},
+#else
+	{Opt_ignore_silent, "plink"},
+#endif
+
+	{Opt_noplink, "noplink"},
+
+#ifdef CONFIG_AUFS_DEBUG
+	{Opt_list_plink, "list_plink"},
+#endif
+
+	{Opt_udba, "udba=%s"},
+
+	{Opt_dio, "dio"},
+	{Opt_nodio, "nodio"},
+
+	{Opt_diropq_a, "diropq=always"},
+	{Opt_diropq_a, "diropq=a"},
+	{Opt_diropq_w, "diropq=whiteouted"},
+	{Opt_diropq_w, "diropq=w"},
+
+	{Opt_warn_perm, "warn_perm"},
+	{Opt_nowarn_perm, "nowarn_perm"},
+
+	/* keep them temporary */
+	{Opt_ignore_silent, "coo=%s"},
+	{Opt_ignore_silent, "nodlgt"},
+	{Opt_ignore_silent, "nodirperm1"},
+	{Opt_ignore_silent, "clean_plink"},
+
+#ifdef CONFIG_AUFS_SHWH
+	{Opt_shwh, "shwh"},
+#endif
+	{Opt_noshwh, "noshwh"},
+
+	{Opt_rendir, "rendir=%d"},
+
+	{Opt_refrof, "refrof"},
+	{Opt_norefrof, "norefrof"},
+
+	{Opt_verbose, "verbose"},
+	{Opt_verbose, "v"},
+	{Opt_noverbose, "noverbose"},
+	{Opt_noverbose, "quiet"},
+	{Opt_noverbose, "q"},
+	{Opt_noverbose, "silent"},
+
+	{Opt_sum, "sum"},
+	{Opt_nosum, "nosum"},
+	{Opt_wsum, "wsum"},
+
+	{Opt_rdcache, "rdcache=%d"},
+	{Opt_rdblk, "rdblk=%d"},
+	{Opt_rdblk_def, "rdblk=def"},
+	{Opt_rdhash, "rdhash=%d"},
+	{Opt_rdhash_def, "rdhash=def"},
+
+	{Opt_wbr_create, "create=%s"},
+	{Opt_wbr_create, "create_policy=%s"},
+	{Opt_wbr_copyup, "cpup=%s"},
+	{Opt_wbr_copyup, "copyup=%s"},
+	{Opt_wbr_copyup, "copyup_policy=%s"},
+
+	/* internal use for the scripts */
+	{Opt_ignore_silent, "si=%s"},
+
+	{Opt_br, "dirs=%s"},
+	{Opt_ignore, "debug=%d"},
+	{Opt_ignore, "delete=whiteout"},
+	{Opt_ignore, "delete=all"},
+	{Opt_ignore, "imap=%s"},
+
+	/* temporary workaround, due to old mount(8)? */
+	{Opt_ignore_silent, "relatime"},
+
+	{Opt_err, NULL}
+};
+
+/* ---------------------------------------------------------------------- */
+
+static const char *au_parser_pattern(int val, struct match_token *token)
+{
+	while (token->pattern) {
+		if (token->token == val)
+			return token->pattern;
+		token++;
+	}
+	BUG();
+	return "??";
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t brperm = {
+	{AuBrPerm_RO, AUFS_BRPERM_RO},
+	{AuBrPerm_RR, AUFS_BRPERM_RR},
+	{AuBrPerm_RW, AUFS_BRPERM_RW},
+	{0, NULL}
+};
+
+static match_table_t brattr = {
+	{AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN},
+	{AuBrRAttr_WH, AUFS_BRRATTR_WH},
+	{AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH},
+	{0, NULL}
+};
+
+#define AuBrStr_LONGEST	AUFS_BRPERM_RW \
+	"+" AUFS_BRATTR_UNPIN \
+	"+" AUFS_BRWATTR_NLWH
+
+static int br_attr_val(char *str, match_table_t table, substring_t args[])
+{
+	int attr, v;
+	char *p;
+
+	attr = 0;
+	do {
+		p = strchr(str, '+');
+		if (p)
+			*p = 0;
+		v = match_token(str, table, args);
+		if (v)
+			attr |= v;
+		else {
+			if (p)
+				*p = '+';
+			pr_warn("ignored branch attribute %s\n", str);
+			break;
+		}
+		if (p)
+			str = p + 1;
+	} while (p);
+
+	return attr;
+}
+
+static int noinline_for_stack br_perm_val(char *perm)
+{
+	int val;
+	char *p, *q;
+	substring_t args[MAX_OPT_ARGS];
+
+	p = strchr(perm, '+');
+	if (p)
+		*p = 0;
+	val = match_token(perm, brperm, args);
+	if (!val) {
+		if (p)
+			*p = '+';
+		pr_warn("ignored branch permission %s\n", perm);
+		val = AuBrPerm_RO;
+		goto out;
+	}
+	if (!p)
+		goto out;
+
+	p++;
+	while (1) {
+		q = strchr(p, '+');
+		if (q)
+			*q = 0;
+		val |= br_attr_val(p, brattr, args);
+		if (q) {
+			*q = '+';
+			p = q + 1;
+		} else
+			break;
+	}
+	switch (val & AuBrPerm_Mask) {
+	case AuBrPerm_RO:
+	case AuBrPerm_RR:
+		if (unlikely(val & AuBrWAttr_NoLinkWH)) {
+			pr_warn("ignored branch attribute %s\n",
+				AUFS_BRWATTR_NLWH);
+			val &= ~AuBrWAttr_NoLinkWH;
+		}
+		break;
+	case AuBrPerm_RW:
+		if (unlikely(val & AuBrRAttr_WH)) {
+			pr_warn("ignored branch attribute %s\n",
+				AUFS_BRRATTR_WH);
+			val &= ~AuBrRAttr_WH;
+		}
+		break;
+	}
+
+out:
+	return val;
+}
+
+/* Caller should free the return value */
+char *au_optstr_br_perm(int brperm)
+{
+	char *p, a[sizeof(AuBrStr_LONGEST)];
+	int sz;
+
+#define SetPerm(str) do {			\
+		sz = sizeof(str);		\
+		memcpy(a, str, sz);		\
+		p = a + sz - 1;			\
+	} while (0)
+
+#define AppendAttr(flag, str) do {			\
+		if (brperm & flag) {		\
+			sz = sizeof(str);	\
+			*p++ = '+';		\
+			memcpy(p, str, sz);	\
+			p += sz - 1;		\
+		}				\
+	} while (0)
+
+	switch (brperm & AuBrPerm_Mask) {
+	case AuBrPerm_RO:
+		SetPerm(AUFS_BRPERM_RO);
+		break;
+	case AuBrPerm_RR:
+		SetPerm(AUFS_BRPERM_RR);
+		break;
+	case AuBrPerm_RW:
+		SetPerm(AUFS_BRPERM_RW);
+		break;
+	default:
+		AuDebugOn(1);
+	}
+
+	AppendAttr(AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN);
+	AppendAttr(AuBrRAttr_WH, AUFS_BRRATTR_WH);
+	AppendAttr(AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH);
+
+	AuDebugOn(strlen(a) >= sizeof(a));
+	return kstrdup(a, GFP_NOFS);
+#undef SetPerm
+#undef AppendAttr
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t udbalevel = {
+	{AuOpt_UDBA_REVAL, "reval"},
+	{AuOpt_UDBA_NONE, "none"},
+#ifdef CONFIG_AUFS_HNOTIFY
+	{AuOpt_UDBA_HNOTIFY, "notify"}, /* abstraction */
+#ifdef CONFIG_AUFS_HFSNOTIFY
+	{AuOpt_UDBA_HNOTIFY, "fsnotify"},
+#endif
+#endif
+	{-1, NULL}
+};
+
+static int noinline_for_stack udba_val(char *str)
+{
+	substring_t args[MAX_OPT_ARGS];
+
+	return match_token(str, udbalevel, args);
+}
+
+const char *au_optstr_udba(int udba)
+{
+	return au_parser_pattern(udba, (void *)udbalevel);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t au_wbr_create_policy = {
+	{AuWbrCreate_TDP, "tdp"},
+	{AuWbrCreate_TDP, "top-down-parent"},
+	{AuWbrCreate_RR, "rr"},
+	{AuWbrCreate_RR, "round-robin"},
+	{AuWbrCreate_MFS, "mfs"},
+	{AuWbrCreate_MFS, "most-free-space"},
+	{AuWbrCreate_MFSV, "mfs:%d"},
+	{AuWbrCreate_MFSV, "most-free-space:%d"},
+
+	{AuWbrCreate_MFSRR, "mfsrr:%d"},
+	{AuWbrCreate_MFSRRV, "mfsrr:%d:%d"},
+	{AuWbrCreate_PMFS, "pmfs"},
+	{AuWbrCreate_PMFSV, "pmfs:%d"},
+
+	{-1, NULL}
+};
+
+/*
+ * cf. linux/lib/parser.c and cmdline.c
+ * gave up calling memparse() since it uses simple_strtoull() instead of
+ * kstrto...().
+ */
+static int noinline_for_stack
+au_match_ull(substring_t *s, unsigned long long *result)
+{
+	int err;
+	unsigned int len;
+	char a[32];
+
+	err = -ERANGE;
+	len = s->to - s->from;
+	if (len + 1 <= sizeof(a)) {
+		memcpy(a, s->from, len);
+		a[len] = '\0';
+		err = kstrtoull(a, 0, result);
+	}
+	return err;
+}
+
+static int au_wbr_mfs_wmark(substring_t *arg, char *str,
+			    struct au_opt_wbr_create *create)
+{
+	int err;
+	unsigned long long ull;
+
+	err = 0;
+	if (!au_match_ull(arg, &ull))
+		create->mfsrr_watermark = ull;
+	else {
+		pr_err("bad integer in %s\n", str);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int au_wbr_mfs_sec(substring_t *arg, char *str,
+			  struct au_opt_wbr_create *create)
+{
+	int n, err;
+
+	err = 0;
+	if (!match_int(arg, &n) && 0 <= n && n <= AUFS_MFS_MAX_SEC)
+		create->mfs_second = n;
+	else {
+		pr_err("bad integer in %s\n", str);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int noinline_for_stack
+au_wbr_create_val(char *str, struct au_opt_wbr_create *create)
+{
+	int err, e;
+	substring_t args[MAX_OPT_ARGS];
+
+	err = match_token(str, au_wbr_create_policy, args);
+	create->wbr_create = err;
+	switch (err) {
+	case AuWbrCreate_MFSRRV:
+		e = au_wbr_mfs_wmark(&args[0], str, create);
+		if (!e)
+			e = au_wbr_mfs_sec(&args[1], str, create);
+		if (unlikely(e))
+			err = e;
+		break;
+	case AuWbrCreate_MFSRR:
+		e = au_wbr_mfs_wmark(&args[0], str, create);
+		if (unlikely(e)) {
+			err = e;
+			break;
+		}
+		/*FALLTHROUGH*/
+	case AuWbrCreate_MFS:
+	case AuWbrCreate_PMFS:
+		create->mfs_second = AUFS_MFS_DEF_SEC;
+		break;
+	case AuWbrCreate_MFSV:
+	case AuWbrCreate_PMFSV:
+		e = au_wbr_mfs_sec(&args[0], str, create);
+		if (unlikely(e))
+			err = e;
+		break;
+	}
+
+	return err;
+}
+
+const char *au_optstr_wbr_create(int wbr_create)
+{
+	return au_parser_pattern(wbr_create, (void *)au_wbr_create_policy);
+}
+
+static match_table_t au_wbr_copyup_policy = {
+	{AuWbrCopyup_TDP, "tdp"},
+	{AuWbrCopyup_TDP, "top-down-parent"},
+	{AuWbrCopyup_BUP, "bup"},
+	{AuWbrCopyup_BUP, "bottom-up-parent"},
+	{AuWbrCopyup_BU, "bu"},
+	{AuWbrCopyup_BU, "bottom-up"},
+	{-1, NULL}
+};
+
+static int noinline_for_stack au_wbr_copyup_val(char *str)
+{
+	substring_t args[MAX_OPT_ARGS];
+
+	return match_token(str, au_wbr_copyup_policy, args);
+}
+
+const char *au_optstr_wbr_copyup(int wbr_copyup)
+{
+	return au_parser_pattern(wbr_copyup, (void *)au_wbr_copyup_policy);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static const int lkup_dirflags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+
+static void dump_opts(struct au_opts *opts)
+{
+#ifdef CONFIG_AUFS_DEBUG
+	/* reduce stack space */
+	union {
+		struct au_opt_add *add;
+		struct au_opt_del *del;
+		struct au_opt_mod *mod;
+		struct au_opt_xino *xino;
+		struct au_opt_xino_itrunc *xino_itrunc;
+		struct au_opt_wbr_create *create;
+	} u;
+	struct au_opt *opt;
+
+	opt = opts->opt;
+	while (opt->type != Opt_tail) {
+		switch (opt->type) {
+		case Opt_add:
+			u.add = &opt->add;
+			AuDbg("add {b%d, %s, 0x%x, %p}\n",
+				  u.add->bindex, u.add->pathname, u.add->perm,
+				  u.add->path.dentry);
+			break;
+		case Opt_del:
+		case Opt_idel:
+			u.del = &opt->del;
+			AuDbg("del {%s, %p}\n",
+			      u.del->pathname, u.del->h_path.dentry);
+			break;
+		case Opt_mod:
+		case Opt_imod:
+			u.mod = &opt->mod;
+			AuDbg("mod {%s, 0x%x, %p}\n",
+				  u.mod->path, u.mod->perm, u.mod->h_root);
+			break;
+		case Opt_append:
+			u.add = &opt->add;
+			AuDbg("append {b%d, %s, 0x%x, %p}\n",
+				  u.add->bindex, u.add->pathname, u.add->perm,
+				  u.add->path.dentry);
+			break;
+		case Opt_prepend:
+			u.add = &opt->add;
+			AuDbg("prepend {b%d, %s, 0x%x, %p}\n",
+				  u.add->bindex, u.add->pathname, u.add->perm,
+				  u.add->path.dentry);
+			break;
+		case Opt_dirwh:
+			AuDbg("dirwh %d\n", opt->dirwh);
+			break;
+		case Opt_rdcache:
+			AuDbg("rdcache %d\n", opt->rdcache);
+			break;
+		case Opt_rdblk:
+			AuDbg("rdblk %u\n", opt->rdblk);
+			break;
+		case Opt_rdblk_def:
+			AuDbg("rdblk_def\n");
+			break;
+		case Opt_rdhash:
+			AuDbg("rdhash %u\n", opt->rdhash);
+			break;
+		case Opt_rdhash_def:
+			AuDbg("rdhash_def\n");
+			break;
+		case Opt_xino:
+			u.xino = &opt->xino;
+			AuDbg("xino {%s %.*s}\n",
+				  u.xino->path,
+				  AuDLNPair(u.xino->file->f_dentry));
+			break;
+		case Opt_trunc_xino:
+			AuLabel(trunc_xino);
+			break;
+		case Opt_notrunc_xino:
+			AuLabel(notrunc_xino);
+			break;
+		case Opt_trunc_xino_path:
+		case Opt_itrunc_xino:
+			u.xino_itrunc = &opt->xino_itrunc;
+			AuDbg("trunc_xino %d\n", u.xino_itrunc->bindex);
+			break;
+
+		case Opt_noxino:
+			AuLabel(noxino);
+			break;
+		case Opt_trunc_xib:
+			AuLabel(trunc_xib);
+			break;
+		case Opt_notrunc_xib:
+			AuLabel(notrunc_xib);
+			break;
+		case Opt_shwh:
+			AuLabel(shwh);
+			break;
+		case Opt_noshwh:
+			AuLabel(noshwh);
+			break;
+		case Opt_plink:
+			AuLabel(plink);
+			break;
+		case Opt_noplink:
+			AuLabel(noplink);
+			break;
+		case Opt_list_plink:
+			AuLabel(list_plink);
+			break;
+		case Opt_udba:
+			AuDbg("udba %d, %s\n",
+				  opt->udba, au_optstr_udba(opt->udba));
+			break;
+		case Opt_dio:
+			AuLabel(dio);
+			break;
+		case Opt_nodio:
+			AuLabel(nodio);
+			break;
+		case Opt_diropq_a:
+			AuLabel(diropq_a);
+			break;
+		case Opt_diropq_w:
+			AuLabel(diropq_w);
+			break;
+		case Opt_warn_perm:
+			AuLabel(warn_perm);
+			break;
+		case Opt_nowarn_perm:
+			AuLabel(nowarn_perm);
+			break;
+		case Opt_refrof:
+			AuLabel(refrof);
+			break;
+		case Opt_norefrof:
+			AuLabel(norefrof);
+			break;
+		case Opt_verbose:
+			AuLabel(verbose);
+			break;
+		case Opt_noverbose:
+			AuLabel(noverbose);
+			break;
+		case Opt_sum:
+			AuLabel(sum);
+			break;
+		case Opt_nosum:
+			AuLabel(nosum);
+			break;
+		case Opt_wsum:
+			AuLabel(wsum);
+			break;
+		case Opt_wbr_create:
+			u.create = &opt->wbr_create;
+			AuDbg("create %d, %s\n", u.create->wbr_create,
+				  au_optstr_wbr_create(u.create->wbr_create));
+			switch (u.create->wbr_create) {
+			case AuWbrCreate_MFSV:
+			case AuWbrCreate_PMFSV:
+				AuDbg("%d sec\n", u.create->mfs_second);
+				break;
+			case AuWbrCreate_MFSRR:
+				AuDbg("%llu watermark\n",
+					  u.create->mfsrr_watermark);
+				break;
+			case AuWbrCreate_MFSRRV:
+				AuDbg("%llu watermark, %d sec\n",
+					  u.create->mfsrr_watermark,
+					  u.create->mfs_second);
+				break;
+			}
+			break;
+		case Opt_wbr_copyup:
+			AuDbg("copyup %d, %s\n", opt->wbr_copyup,
+				  au_optstr_wbr_copyup(opt->wbr_copyup));
+			break;
+		default:
+			BUG();
+		}
+		opt++;
+	}
+#endif
+}
+
+void au_opts_free(struct au_opts *opts)
+{
+	struct au_opt *opt;
+
+	opt = opts->opt;
+	while (opt->type != Opt_tail) {
+		switch (opt->type) {
+		case Opt_add:
+		case Opt_append:
+		case Opt_prepend:
+			path_put(&opt->add.path);
+			break;
+		case Opt_del:
+		case Opt_idel:
+			path_put(&opt->del.h_path);
+			break;
+		case Opt_mod:
+		case Opt_imod:
+			dput(opt->mod.h_root);
+			break;
+		case Opt_xino:
+			fput(opt->xino.file);
+			break;
+		}
+		opt++;
+	}
+}
+
+static int opt_add(struct au_opt *opt, char *opt_str, unsigned long sb_flags,
+		   aufs_bindex_t bindex)
+{
+	int err;
+	struct au_opt_add *add = &opt->add;
+	char *p;
+
+	add->bindex = bindex;
+	add->perm = AuBrPerm_RO;
+	add->pathname = opt_str;
+	p = strchr(opt_str, '=');
+	if (p) {
+		*p++ = 0;
+		if (*p)
+			add->perm = br_perm_val(p);
+	}
+
+	err = vfsub_kern_path(add->pathname, lkup_dirflags, &add->path);
+	if (!err) {
+		if (!p) {
+			add->perm = AuBrPerm_RO;
+			if (au_test_fs_rr(add->path.dentry->d_sb))
+				add->perm = AuBrPerm_RR;
+			else if (!bindex && !(sb_flags & MS_RDONLY))
+				add->perm = AuBrPerm_RW;
+		}
+		opt->type = Opt_add;
+		goto out;
+	}
+	pr_err("lookup failed %s (%d)\n", add->pathname, err);
+	err = -EINVAL;
+
+out:
+	return err;
+}
+
+static int au_opts_parse_del(struct au_opt_del *del, substring_t args[])
+{
+	int err;
+
+	del->pathname = args[0].from;
+	AuDbg("del path %s\n", del->pathname);
+
+	err = vfsub_kern_path(del->pathname, lkup_dirflags, &del->h_path);
+	if (unlikely(err))
+		pr_err("lookup failed %s (%d)\n", del->pathname, err);
+
+	return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_idel(struct super_block *sb, aufs_bindex_t bindex,
+			      struct au_opt_del *del, substring_t args[])
+{
+	int err;
+	struct dentry *root;
+
+	err = -EINVAL;
+	root = sb->s_root;
+	aufs_read_lock(root, AuLock_FLUSH);
+	if (bindex < 0 || au_sbend(sb) < bindex) {
+		pr_err("out of bounds, %d\n", bindex);
+		goto out;
+	}
+
+	err = 0;
+	del->h_path.dentry = dget(au_h_dptr(root, bindex));
+	del->h_path.mnt = mntget(au_sbr_mnt(sb, bindex));
+
+out:
+	aufs_read_unlock(root, !AuLock_IR);
+	return err;
+}
+#endif
+
+static int noinline_for_stack
+au_opts_parse_mod(struct au_opt_mod *mod, substring_t args[])
+{
+	int err;
+	struct path path;
+	char *p;
+
+	err = -EINVAL;
+	mod->path = args[0].from;
+	p = strchr(mod->path, '=');
+	if (unlikely(!p)) {
+		pr_err("no permssion %s\n", args[0].from);
+		goto out;
+	}
+
+	*p++ = 0;
+	err = vfsub_kern_path(mod->path, lkup_dirflags, &path);
+	if (unlikely(err)) {
+		pr_err("lookup failed %s (%d)\n", mod->path, err);
+		goto out;
+	}
+
+	mod->perm = br_perm_val(p);
+	AuDbg("mod path %s, perm 0x%x, %s\n", mod->path, mod->perm, p);
+	mod->h_root = dget(path.dentry);
+	path_put(&path);
+
+out:
+	return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_imod(struct super_block *sb, aufs_bindex_t bindex,
+			      struct au_opt_mod *mod, substring_t args[])
+{
+	int err;
+	struct dentry *root;
+
+	err = -EINVAL;
+	root = sb->s_root;
+	aufs_read_lock(root, AuLock_FLUSH);
+	if (bindex < 0 || au_sbend(sb) < bindex) {
+		pr_err("out of bounds, %d\n", bindex);
+		goto out;
+	}
+
+	err = 0;
+	mod->perm = br_perm_val(args[1].from);
+	AuDbg("mod path %s, perm 0x%x, %s\n",
+	      mod->path, mod->perm, args[1].from);
+	mod->h_root = dget(au_h_dptr(root, bindex));
+
+out:
+	aufs_read_unlock(root, !AuLock_IR);
+	return err;
+}
+#endif
+
+static int au_opts_parse_xino(struct super_block *sb, struct au_opt_xino *xino,
+			      substring_t args[])
+{
+	int err;
+	struct file *file;
+
+	file = au_xino_create(sb, args[0].from, /*silent*/0);
+	err = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+
+	err = -EINVAL;
+	if (unlikely(file->f_dentry->d_sb == sb)) {
+		fput(file);
+		pr_err("%s must be outside\n", args[0].from);
+		goto out;
+	}
+
+	err = 0;
+	xino->file = file;
+	xino->path = args[0].from;
+
+out:
+	return err;
+}
+
+static int noinline_for_stack
+au_opts_parse_xino_itrunc_path(struct super_block *sb,
+			       struct au_opt_xino_itrunc *xino_itrunc,
+			       substring_t args[])
+{
+	int err;
+	aufs_bindex_t bend, bindex;
+	struct path path;
+	struct dentry *root;
+
+	err = vfsub_kern_path(args[0].from, lkup_dirflags, &path);
+	if (unlikely(err)) {
+		pr_err("lookup failed %s (%d)\n", args[0].from, err);
+		goto out;
+	}
+
+	xino_itrunc->bindex = -1;
+	root = sb->s_root;
+	aufs_read_lock(root, AuLock_FLUSH);
+	bend = au_sbend(sb);
+	for (bindex = 0; bindex <= bend; bindex++) {
+		if (au_h_dptr(root, bindex) == path.dentry) {
+			xino_itrunc->bindex = bindex;
+			break;
+		}
+	}
+	aufs_read_unlock(root, !AuLock_IR);
+	path_put(&path);
+
+	if (unlikely(xino_itrunc->bindex < 0)) {
+		pr_err("no such branch %s\n", args[0].from);
+		err = -EINVAL;
+	}
+
+out:
+	return err;
+}
+
+/* called without aufs lock */
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts)
+{
+	int err, n, token;
+	aufs_bindex_t bindex;
+	unsigned char skipped;
+	struct dentry *root;
+	struct au_opt *opt, *opt_tail;
+	char *opt_str;
+	/* reduce the stack space */
+	union {
+		struct au_opt_xino_itrunc *xino_itrunc;
+		struct au_opt_wbr_create *create;
+	} u;
+	struct {
+		substring_t args[MAX_OPT_ARGS];
+	} *a;
+
+	err = -ENOMEM;
+	a = kmalloc(sizeof(*a), GFP_NOFS);
+	if (unlikely(!a))
+		goto out;
+
+	root = sb->s_root;
+	err = 0;
+	bindex = 0;
+	opt = opts->opt;
+	opt_tail = opt + opts->max_opt - 1;
+	opt->type = Opt_tail;
+	while (!err && (opt_str = strsep(&str, ",")) && *opt_str) {
+		err = -EINVAL;
+		skipped = 0;
+		token = match_token(opt_str, options, a->args);
+		switch (token) {
+		case Opt_br:
+			err = 0;
+			while (!err && (opt_str = strsep(&a->args[0].from, ":"))
+			       && *opt_str) {
+				err = opt_add(opt, opt_str, opts->sb_flags,
+					      bindex++);
+				if (unlikely(!err && ++opt > opt_tail)) {
+					err = -E2BIG;
+					break;
+				}
+				opt->type = Opt_tail;
+				skipped = 1;
+			}
+			break;
+		case Opt_add:
+			if (unlikely(match_int(&a->args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			bindex = n;
+			err = opt_add(opt, a->args[1].from, opts->sb_flags,
+				      bindex);
+			if (!err)
+				opt->type = token;
+			break;
+		case Opt_append:
+			err = opt_add(opt, a->args[0].from, opts->sb_flags,
+				      /*dummy bindex*/1);
+			if (!err)
+				opt->type = token;
+			break;
+		case Opt_prepend:
+			err = opt_add(opt, a->args[0].from, opts->sb_flags,
+				      /*bindex*/0);
+			if (!err)
+				opt->type = token;
+			break;
+		case Opt_del:
+			err = au_opts_parse_del(&opt->del, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+#if 0 /* reserved for future use */
+		case Opt_idel:
+			del->pathname = "(indexed)";
+			if (unlikely(match_int(&args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			err = au_opts_parse_idel(sb, n, &opt->del, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+#endif
+		case Opt_mod:
+			err = au_opts_parse_mod(&opt->mod, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+#ifdef IMOD /* reserved for future use */
+		case Opt_imod:
+			u.mod->path = "(indexed)";
+			if (unlikely(match_int(&a->args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			err = au_opts_parse_imod(sb, n, &opt->mod, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+#endif
+		case Opt_xino:
+			err = au_opts_parse_xino(sb, &opt->xino, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+
+		case Opt_trunc_xino_path:
+			err = au_opts_parse_xino_itrunc_path
+				(sb, &opt->xino_itrunc, a->args);
+			if (!err)
+				opt->type = token;
+			break;
+
+		case Opt_itrunc_xino:
+			u.xino_itrunc = &opt->xino_itrunc;
+			if (unlikely(match_int(&a->args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			u.xino_itrunc->bindex = n;
+			aufs_read_lock(root, AuLock_FLUSH);
+			if (n < 0 || au_sbend(sb) < n) {
+				pr_err("out of bounds, %d\n", n);
+				aufs_read_unlock(root, !AuLock_IR);
+				break;
+			}
+			aufs_read_unlock(root, !AuLock_IR);
+			err = 0;
+			opt->type = token;
+			break;
+
+		case Opt_dirwh:
+			if (unlikely(match_int(&a->args[0], &opt->dirwh)))
+				break;
+			err = 0;
+			opt->type = token;
+			break;
+
+		case Opt_rdcache:
+			if (unlikely(match_int(&a->args[0], &n))) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			if (unlikely(n > AUFS_RDCACHE_MAX)) {
+				pr_err("rdcache must be smaller than %d\n",
+				       AUFS_RDCACHE_MAX);
+				break;
+			}
+			opt->rdcache = n;
+			err = 0;
+			opt->type = token;
+			break;
+		case Opt_rdblk:
+			if (unlikely(match_int(&a->args[0], &n)
+				     || n < 0
+				     || n > KMALLOC_MAX_SIZE)) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			if (unlikely(n && n < NAME_MAX)) {
+				pr_err("rdblk must be larger than %d\n",
+				       NAME_MAX);
+				break;
+			}
+			opt->rdblk = n;
+			err = 0;
+			opt->type = token;
+			break;
+		case Opt_rdhash:
+			if (unlikely(match_int(&a->args[0], &n)
+				     || n < 0
+				     || n * sizeof(struct hlist_head)
+				     > KMALLOC_MAX_SIZE)) {
+				pr_err("bad integer in %s\n", opt_str);
+				break;
+			}
+			opt->rdhash = n;
+			err = 0;
+			opt->type = token;
+			break;
+
+		case Opt_trunc_xino:
+		case Opt_notrunc_xino:
+		case Opt_noxino:
+		case Opt_trunc_xib:
+		case Opt_notrunc_xib:
+		case Opt_shwh:
+		case Opt_noshwh:
+		case Opt_plink:
+		case Opt_noplink:
+		case Opt_list_plink:
+		case Opt_dio:
+		case Opt_nodio:
+		case Opt_diropq_a:
+		case Opt_diropq_w:
+		case Opt_warn_perm:
+		case Opt_nowarn_perm:
+		case Opt_refrof:
+		case Opt_norefrof:
+		case Opt_verbose:
+		case Opt_noverbose:
+		case Opt_sum:
+		case Opt_nosum:
+		case Opt_wsum:
+		case Opt_rdblk_def:
+		case Opt_rdhash_def:
+			err = 0;
+			opt->type = token;
+			break;
+
+		case Opt_udba:
+			opt->udba = udba_val(a->args[0].from);
+			if (opt->udba >= 0) {
+				err = 0;
+				opt->type = token;
+			} else
+				pr_err("wrong value, %s\n", opt_str);
+			break;
+
+		case Opt_wbr_create:
+			u.create = &opt->wbr_create;
+			u.create->wbr_create
+				= au_wbr_create_val(a->args[0].from, u.create);
+			if (u.create->wbr_create >= 0) {
+				err = 0;
+				opt->type = token;
+			} else
+				pr_err("wrong value, %s\n", opt_str);
+			break;
+		case Opt_wbr_copyup:
+			opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from);
+			if (opt->wbr_copyup >= 0) {
+				err = 0;
+				opt->type = token;
+			} else
+				pr_err("wrong value, %s\n", opt_str);
+			break;
+
+		case Opt_ignore:
+			pr_warn("ignored %s\n", opt_str);
+			/*FALLTHROUGH*/
+		case Opt_ignore_silent:
+			skipped = 1;
+			err = 0;
+			break;
+		case Opt_err:
+			pr_err("unknown option %s\n", opt_str);
+			break;
+		}
+
+		if (!err && !skipped) {
+			if (unlikely(++opt > opt_tail)) {
+				err = -E2BIG;
+				opt--;
+				opt->type = Opt_tail;
+				break;
+			}
+			opt->type = Opt_tail;
+		}
+	}
+
+	kfree(a);
+	dump_opts(opts);
+	if (unlikely(err))
+		au_opts_free(opts);
+
+out:
+	return err;
+}
+
+static int au_opt_wbr_create(struct super_block *sb,
+			     struct au_opt_wbr_create *create)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	err = 1; /* handled */
+	sbinfo = au_sbi(sb);
+	if (sbinfo->si_wbr_create_ops->fin) {
+		err = sbinfo->si_wbr_create_ops->fin(sb);
+		if (!err)
+			err = 1;
+	}
+
+	sbinfo->si_wbr_create = create->wbr_create;
+	sbinfo->si_wbr_create_ops = au_wbr_create_ops + create->wbr_create;
+	switch (create->wbr_create) {
+	case AuWbrCreate_MFSRRV:
+	case AuWbrCreate_MFSRR:
+		sbinfo->si_wbr_mfs.mfsrr_watermark = create->mfsrr_watermark;
+		/*FALLTHROUGH*/
+	case AuWbrCreate_MFS:
+	case AuWbrCreate_MFSV:
+	case AuWbrCreate_PMFS:
+	case AuWbrCreate_PMFSV:
+		sbinfo->si_wbr_mfs.mfs_expire
+			= msecs_to_jiffies(create->mfs_second * MSEC_PER_SEC);
+		break;
+	}
+
+	if (sbinfo->si_wbr_create_ops->init)
+		sbinfo->si_wbr_create_ops->init(sb); /* ignore */
+
+	return err;
+}
+
+/*
+ * returns,
+ * plus: processed without an error
+ * zero: unprocessed
+ */
+static int au_opt_simple(struct super_block *sb, struct au_opt *opt,
+			 struct au_opts *opts)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	err = 1; /* handled */
+	sbinfo = au_sbi(sb);
+	switch (opt->type) {
+	case Opt_udba:
+		sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+		sbinfo->si_mntflags |= opt->udba;
+		opts->given_udba |= opt->udba;
+		break;
+
+	case Opt_plink:
+		au_opt_set(sbinfo->si_mntflags, PLINK);
+		break;
+	case Opt_noplink:
+		if (au_opt_test(sbinfo->si_mntflags, PLINK))
+			au_plink_put(sb, /*verbose*/1);
+		au_opt_clr(sbinfo->si_mntflags, PLINK);
+		break;
+	case Opt_list_plink:
+		if (au_opt_test(sbinfo->si_mntflags, PLINK))
+			au_plink_list(sb);
+		break;
+
+	case Opt_dio:
+		au_opt_set(sbinfo->si_mntflags, DIO);
+		au_fset_opts(opts->flags, REFRESH_DYAOP);
+		break;
+	case Opt_nodio:
+		au_opt_clr(sbinfo->si_mntflags, DIO);
+		au_fset_opts(opts->flags, REFRESH_DYAOP);
+		break;
+
+	case Opt_diropq_a:
+		au_opt_set(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+		break;
+	case Opt_diropq_w:
+		au_opt_clr(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+		break;
+
+	case Opt_warn_perm:
+		au_opt_set(sbinfo->si_mntflags, WARN_PERM);
+		break;
+	case Opt_nowarn_perm:
+		au_opt_clr(sbinfo->si_mntflags, WARN_PERM);
+		break;
+
+	case Opt_refrof:
+		au_opt_set(sbinfo->si_mntflags, REFROF);
+		break;
+	case Opt_norefrof:
+		au_opt_clr(sbinfo->si_mntflags, REFROF);
+		break;
+
+	case Opt_verbose:
+		au_opt_set(sbinfo->si_mntflags, VERBOSE);
+		break;
+	case Opt_noverbose:
+		au_opt_clr(sbinfo->si_mntflags, VERBOSE);
+		break;
+
+	case Opt_sum:
+		au_opt_set(sbinfo->si_mntflags, SUM);
+		break;
+	case Opt_wsum:
+		au_opt_clr(sbinfo->si_mntflags, SUM);
+		au_opt_set(sbinfo->si_mntflags, SUM_W);
+	case Opt_nosum:
+		au_opt_clr(sbinfo->si_mntflags, SUM);
+		au_opt_clr(sbinfo->si_mntflags, SUM_W);
+		break;
+
+	case Opt_wbr_create:
+		err = au_opt_wbr_create(sb, &opt->wbr_create);
+		break;
+	case Opt_wbr_copyup:
+		sbinfo->si_wbr_copyup = opt->wbr_copyup;
+		sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + opt->wbr_copyup;
+		break;
+
+	case Opt_dirwh:
+		sbinfo->si_dirwh = opt->dirwh;
+		break;
+
+	case Opt_rdcache:
+		sbinfo->si_rdcache
+			= msecs_to_jiffies(opt->rdcache * MSEC_PER_SEC);
+		break;
+	case Opt_rdblk:
+		sbinfo->si_rdblk = opt->rdblk;
+		break;
+	case Opt_rdblk_def:
+		sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+		break;
+	case Opt_rdhash:
+		sbinfo->si_rdhash = opt->rdhash;
+		break;
+	case Opt_rdhash_def:
+		sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+		break;
+
+	case Opt_shwh:
+		au_opt_set(sbinfo->si_mntflags, SHWH);
+		break;
+	case Opt_noshwh:
+		au_opt_clr(sbinfo->si_mntflags, SHWH);
+		break;
+
+	case Opt_trunc_xino:
+		au_opt_set(sbinfo->si_mntflags, TRUNC_XINO);
+		break;
+	case Opt_notrunc_xino:
+		au_opt_clr(sbinfo->si_mntflags, TRUNC_XINO);
+		break;
+
+	case Opt_trunc_xino_path:
+	case Opt_itrunc_xino:
+		err = au_xino_trunc(sb, opt->xino_itrunc.bindex);
+		if (!err)
+			err = 1;
+		break;
+
+	case Opt_trunc_xib:
+		au_fset_opts(opts->flags, TRUNC_XIB);
+		break;
+	case Opt_notrunc_xib:
+		au_fclr_opts(opts->flags, TRUNC_XIB);
+		break;
+
+	default:
+		err = 0;
+		break;
+	}
+
+	return err;
+}
+
+/*
+ * returns tri-state.
+ * plus: processed without an error
+ * zero: unprocessed
+ * minus: error
+ */
+static int au_opt_br(struct super_block *sb, struct au_opt *opt,
+		     struct au_opts *opts)
+{
+	int err, do_refresh;
+
+	err = 0;
+	switch (opt->type) {
+	case Opt_append:
+		opt->add.bindex = au_sbend(sb) + 1;
+		if (opt->add.bindex < 0)
+			opt->add.bindex = 0;
+		goto add;
+	case Opt_prepend:
+		opt->add.bindex = 0;
+	add:
+	case Opt_add:
+		err = au_br_add(sb, &opt->add,
+				au_ftest_opts(opts->flags, REMOUNT));
+		if (!err) {
+			err = 1;
+			au_fset_opts(opts->flags, REFRESH);
+		}
+		break;
+
+	case Opt_del:
+	case Opt_idel:
+		err = au_br_del(sb, &opt->del,
+				au_ftest_opts(opts->flags, REMOUNT));
+		if (!err) {
+			err = 1;
+			au_fset_opts(opts->flags, TRUNC_XIB);
+			au_fset_opts(opts->flags, REFRESH);
+		}
+		break;
+
+	case Opt_mod:
+	case Opt_imod:
+		err = au_br_mod(sb, &opt->mod,
+				au_ftest_opts(opts->flags, REMOUNT),
+				&do_refresh);
+		if (!err) {
+			err = 1;
+			if (do_refresh)
+				au_fset_opts(opts->flags, REFRESH);
+		}
+		break;
+	}
+
+	return err;
+}
+
+static int au_opt_xino(struct super_block *sb, struct au_opt *opt,
+		       struct au_opt_xino **opt_xino,
+		       struct au_opts *opts)
+{
+	int err;
+	aufs_bindex_t bend, bindex;
+	struct dentry *root, *parent, *h_root;
+
+	err = 0;
+	switch (opt->type) {
+	case Opt_xino:
+		err = au_xino_set(sb, &opt->xino,
+				  !!au_ftest_opts(opts->flags, REMOUNT));
+		if (unlikely(err))
+			break;
+
+		*opt_xino = &opt->xino;
+		au_xino_brid_set(sb, -1);
+
+		/* safe d_parent access */
+		parent = opt->xino.file->f_dentry->d_parent;
+		root = sb->s_root;
+		bend = au_sbend(sb);
+		for (bindex = 0; bindex <= bend; bindex++) {
+			h_root = au_h_dptr(root, bindex);
+			if (h_root == parent) {
+				au_xino_brid_set(sb, au_sbr_id(sb, bindex));
+				break;
+			}
+		}
+		break;
+
+	case Opt_noxino:
+		au_xino_clr(sb);
+		au_xino_brid_set(sb, -1);
+		*opt_xino = (void *)-1;
+		break;
+	}
+
+	return err;
+}
+
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+		   unsigned int pending)
+{
+	int err;
+	aufs_bindex_t bindex, bend;
+	unsigned char do_plink, skip, do_free;
+	struct au_branch *br;
+	struct au_wbr *wbr;
+	struct dentry *root;
+	struct inode *dir, *h_dir;
+	struct au_sbinfo *sbinfo;
+	struct au_hinode *hdir;
+
+	SiMustAnyLock(sb);
+
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA));
+
+	if (!(sb_flags & MS_RDONLY)) {
+		if (unlikely(!au_br_writable(au_sbr_perm(sb, 0))))
+			pr_warn("first branch should be rw\n");
+		if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH)))
+			pr_warn("shwh should be used with ro\n");
+	}
+
+	if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HNOTIFY)
+	    && !au_opt_test(sbinfo->si_mntflags, XINO))
+		pr_warn("udba=*notify requires xino\n");
+
+	err = 0;
+	root = sb->s_root;
+	dir = root->d_inode;
+	do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK);
+	bend = au_sbend(sb);
+	for (bindex = 0; !err && bindex <= bend; bindex++) {
+		skip = 0;
+		h_dir = au_h_iptr(dir, bindex);
+		br = au_sbr(sb, bindex);
+		do_free = 0;
+
+		wbr = br->br_wbr;
+		if (wbr)
+			wbr_wh_read_lock(wbr);
+
+		if (!au_br_writable(br->br_perm)) {
+			do_free = !!wbr;
+			skip = (!wbr
+				|| (!wbr->wbr_whbase
+				    && !wbr->wbr_plink
+				    && !wbr->wbr_orph));
+		} else if (!au_br_wh_linkable(br->br_perm)) {
+			/* skip = (!br->br_whbase && !br->br_orph); */
+			skip = (!wbr || !wbr->wbr_whbase);
+			if (skip && wbr) {
+				if (do_plink)
+					skip = !!wbr->wbr_plink;
+				else
+					skip = !wbr->wbr_plink;
+			}
+		} else {
+			/* skip = (br->br_whbase && br->br_ohph); */
+			skip = (wbr && wbr->wbr_whbase);
+			if (skip) {
+				if (do_plink)
+					skip = !!wbr->wbr_plink;
+				else
+					skip = !wbr->wbr_plink;
+			}
+		}
+		if (wbr)
+			wbr_wh_read_unlock(wbr);
+
+		if (skip)
+			continue;
+
+		hdir = au_hi(dir, bindex);
+		au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+		if (wbr)
+			wbr_wh_write_lock(wbr);
+		err = au_wh_init(br, sb);
+		if (wbr)
+			wbr_wh_write_unlock(wbr);
+		au_hn_imtx_unlock(hdir);
+
+		if (!err && do_free) {
+			kfree(wbr);
+			br->br_wbr = NULL;
+		}
+	}
+
+	return err;
+}
+
+int au_opts_mount(struct super_block *sb, struct au_opts *opts)
+{
+	int err;
+	unsigned int tmp;
+	aufs_bindex_t bindex, bend;
+	struct au_opt *opt;
+	struct au_opt_xino *opt_xino, xino;
+	struct au_sbinfo *sbinfo;
+	struct au_branch *br;
+
+	SiMustWriteLock(sb);
+
+	err = 0;
+	opt_xino = NULL;
+	opt = opts->opt;
+	while (err >= 0 && opt->type != Opt_tail)
+		err = au_opt_simple(sb, opt++, opts);
+	if (err > 0)
+		err = 0;
+	else if (unlikely(err < 0))
+		goto out;
+
+	/* disable xino and udba temporary */
+	sbinfo = au_sbi(sb);
+	tmp = sbinfo->si_mntflags;
+	au_opt_clr(sbinfo->si_mntflags, XINO);
+	au_opt_set_udba(sbinfo->si_mntflags, UDBA_REVAL);
+
+	opt = opts->opt;
+	while (err >= 0 && opt->type != Opt_tail)
+		err = au_opt_br(sb, opt++, opts);
+	if (err > 0)
+		err = 0;
+	else if (unlikely(err < 0))
+		goto out;
+
+	bend = au_sbend(sb);
+	if (unlikely(bend < 0)) {
+		err = -EINVAL;
+		pr_err("no branches\n");
+		goto out;
+	}
+
+	if (au_opt_test(tmp, XINO))
+		au_opt_set(sbinfo->si_mntflags, XINO);
+	opt = opts->opt;
+	while (!err && opt->type != Opt_tail)
+		err = au_opt_xino(sb, opt++, &opt_xino, opts);
+	if (unlikely(err))
+		goto out;
+
+	err = au_opts_verify(sb, sb->s_flags, tmp);
+	if (unlikely(err))
+		goto out;
+
+	/* restore xino */
+	if (au_opt_test(tmp, XINO) && !opt_xino) {
+		xino.file = au_xino_def(sb);
+		err = PTR_ERR(xino.file);
+		if (IS_ERR(xino.file))
+			goto out;
+
+		err = au_xino_set(sb, &xino, /*remount*/0);
+		fput(xino.file);
+		if (unlikely(err))
+			goto out;
+	}
+
+	/* restore udba */
+	tmp &= AuOptMask_UDBA;
+	sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+	sbinfo->si_mntflags |= tmp;
+	bend = au_sbend(sb);
+	for (bindex = 0; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		err = au_hnotify_reset_br(tmp, br, br->br_perm);
+		if (unlikely(err))
+			AuIOErr("hnotify failed on br %d, %d, ignored\n",
+				bindex, err);
+		/* go on even if err */
+	}
+	if (au_opt_test(tmp, UDBA_HNOTIFY)) {
+		struct inode *dir = sb->s_root->d_inode;
+		au_hn_reset(dir, au_hi_flags(dir, /*isdir*/1) & ~AuHi_XINO);
+	}
+
+out:
+	return err;
+}
+
+int au_opts_remount(struct super_block *sb, struct au_opts *opts)
+{
+	int err, rerr;
+	struct inode *dir;
+	struct au_opt_xino *opt_xino;
+	struct au_opt *opt;
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	dir = sb->s_root->d_inode;
+	sbinfo = au_sbi(sb);
+	err = 0;
+	opt_xino = NULL;
+	opt = opts->opt;
+	while (err >= 0 && opt->type != Opt_tail) {
+		err = au_opt_simple(sb, opt, opts);
+		if (!err)
+			err = au_opt_br(sb, opt, opts);
+		if (!err)
+			err = au_opt_xino(sb, opt, &opt_xino, opts);
+		opt++;
+	}
+	if (err > 0)
+		err = 0;
+	AuTraceErr(err);
+	/* go on even err */
+
+	rerr = au_opts_verify(sb, opts->sb_flags, /*pending*/0);
+	if (unlikely(rerr && !err))
+		err = rerr;
+
+	if (au_ftest_opts(opts->flags, TRUNC_XIB)) {
+		rerr = au_xib_trunc(sb);
+		if (unlikely(rerr && !err))
+			err = rerr;
+	}
+
+	/* will be handled by the caller */
+	if (!au_ftest_opts(opts->flags, REFRESH)
+	    && (opts->given_udba || au_opt_test(sbinfo->si_mntflags, XINO)))
+		au_fset_opts(opts->flags, REFRESH);
+
+	AuDbg("status 0x%x\n", opts->flags);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_opt_udba(struct super_block *sb)
+{
+	return au_mntflags(sb) & AuOptMask_UDBA;
+}
diff --git a/fs/aufs/opts.h b/fs/aufs/opts.h
new file mode 100644
index 0000000..e2f1803
--- /dev/null
+++ b/fs/aufs/opts.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * mount options/flags
+ */
+
+#ifndef __AUFS_OPTS_H__
+#define __AUFS_OPTS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct file;
+struct super_block;
+
+/* ---------------------------------------------------------------------- */
+
+/* mount flags */
+#define AuOpt_XINO		1		/* external inode number bitmap
+						   and translation table */
+#define AuOpt_TRUNC_XINO	(1 << 1)	/* truncate xino files */
+#define AuOpt_UDBA_NONE		(1 << 2)	/* users direct branch access */
+#define AuOpt_UDBA_REVAL	(1 << 3)
+#define AuOpt_UDBA_HNOTIFY	(1 << 4)
+#define AuOpt_SHWH		(1 << 5)	/* show whiteout */
+#define AuOpt_PLINK		(1 << 6)	/* pseudo-link */
+#define AuOpt_DIRPERM1		(1 << 7)	/* unimplemented */
+#define AuOpt_REFROF		(1 << 8)	/* unimplemented */
+#define AuOpt_ALWAYS_DIROPQ	(1 << 9)	/* policy to creating diropq */
+#define AuOpt_SUM		(1 << 10)	/* summation for statfs(2) */
+#define AuOpt_SUM_W		(1 << 11)	/* unimplemented */
+#define AuOpt_WARN_PERM		(1 << 12)	/* warn when add-branch */
+#define AuOpt_VERBOSE		(1 << 13)	/* busy inode when del-branch */
+#define AuOpt_DIO		(1 << 14)	/* direct io */
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuOpt_UDBA_HNOTIFY
+#define AuOpt_UDBA_HNOTIFY	0
+#endif
+#ifndef CONFIG_AUFS_SHWH
+#undef AuOpt_SHWH
+#define AuOpt_SHWH		0
+#endif
+
+#define AuOpt_Def	(AuOpt_XINO \
+			 | AuOpt_UDBA_REVAL \
+			 | AuOpt_PLINK \
+			 /* | AuOpt_DIRPERM1 */ \
+			 | AuOpt_WARN_PERM)
+#define AuOptMask_UDBA	(AuOpt_UDBA_NONE \
+			 | AuOpt_UDBA_REVAL \
+			 | AuOpt_UDBA_HNOTIFY)
+
+#define au_opt_test(flags, name)	(flags & AuOpt_##name)
+#define au_opt_set(flags, name) do { \
+	BUILD_BUG_ON(AuOpt_##name & AuOptMask_UDBA); \
+	((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_set_udba(flags, name) do { \
+	(flags) &= ~AuOptMask_UDBA; \
+	((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_clr(flags, name) do { \
+	((flags) &= ~AuOpt_##name); \
+} while (0)
+
+static inline unsigned int au_opts_plink(unsigned int mntflags)
+{
+#ifdef CONFIG_PROC_FS
+	return mntflags;
+#else
+	return mntflags & ~AuOpt_PLINK;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies to select one among multiple writable branches */
+enum {
+	AuWbrCreate_TDP,	/* top down parent */
+	AuWbrCreate_RR,		/* round robin */
+	AuWbrCreate_MFS,	/* most free space */
+	AuWbrCreate_MFSV,	/* mfs with seconds */
+	AuWbrCreate_MFSRR,	/* mfs then rr */
+	AuWbrCreate_MFSRRV,	/* mfs then rr with seconds */
+	AuWbrCreate_PMFS,	/* parent and mfs */
+	AuWbrCreate_PMFSV,	/* parent and mfs with seconds */
+
+	AuWbrCreate_Def = AuWbrCreate_TDP
+};
+
+enum {
+	AuWbrCopyup_TDP,	/* top down parent */
+	AuWbrCopyup_BUP,	/* bottom up parent */
+	AuWbrCopyup_BU,		/* bottom up */
+
+	AuWbrCopyup_Def = AuWbrCopyup_TDP
+};
+
+/* ---------------------------------------------------------------------- */
+
+struct au_opt_add {
+	aufs_bindex_t	bindex;
+	char		*pathname;
+	int		perm;
+	struct path	path;
+};
+
+struct au_opt_del {
+	char		*pathname;
+	struct path	h_path;
+};
+
+struct au_opt_mod {
+	char		*path;
+	int		perm;
+	struct dentry	*h_root;
+};
+
+struct au_opt_xino {
+	char		*path;
+	struct file	*file;
+};
+
+struct au_opt_xino_itrunc {
+	aufs_bindex_t	bindex;
+};
+
+struct au_opt_wbr_create {
+	int			wbr_create;
+	int			mfs_second;
+	unsigned long long	mfsrr_watermark;
+};
+
+struct au_opt {
+	int type;
+	union {
+		struct au_opt_xino	xino;
+		struct au_opt_xino_itrunc xino_itrunc;
+		struct au_opt_add	add;
+		struct au_opt_del	del;
+		struct au_opt_mod	mod;
+		int			dirwh;
+		int			rdcache;
+		unsigned int		rdblk;
+		unsigned int		rdhash;
+		int			udba;
+		struct au_opt_wbr_create wbr_create;
+		int			wbr_copyup;
+	};
+};
+
+/* opts flags */
+#define AuOpts_REMOUNT		1
+#define AuOpts_REFRESH		(1 << 1)
+#define AuOpts_TRUNC_XIB	(1 << 2)
+#define AuOpts_REFRESH_DYAOP	(1 << 3)
+#define au_ftest_opts(flags, name)	((flags) & AuOpts_##name)
+#define au_fset_opts(flags, name) \
+	do { (flags) |= AuOpts_##name; } while (0)
+#define au_fclr_opts(flags, name) \
+	do { (flags) &= ~AuOpts_##name; } while (0)
+
+struct au_opts {
+	struct au_opt	*opt;
+	int		max_opt;
+
+	unsigned int	given_udba;
+	unsigned int	flags;
+	unsigned long	sb_flags;
+};
+
+/* ---------------------------------------------------------------------- */
+
+char *au_optstr_br_perm(int brperm);
+const char *au_optstr_udba(int udba);
+const char *au_optstr_wbr_copyup(int wbr_copyup);
+const char *au_optstr_wbr_create(int wbr_create);
+
+void au_opts_free(struct au_opts *opts);
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts);
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+		   unsigned int pending);
+int au_opts_mount(struct super_block *sb, struct au_opts *opts);
+int au_opts_remount(struct super_block *sb, struct au_opts *opts);
+
+unsigned int au_opt_udba(struct super_block *sb);
+
+/* ---------------------------------------------------------------------- */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_OPTS_H__ */
diff --git a/fs/aufs/plink.c b/fs/aufs/plink.c
new file mode 100644
index 0000000..654438e
--- /dev/null
+++ b/fs/aufs/plink.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * pseudo-link
+ */
+
+#include "aufs.h"
+
+/*
+ * the pseudo-link maintenance mode.
+ * during a user process maintains the pseudo-links,
+ * prohibit adding a new plink and branch manipulation.
+ *
+ * Flags
+ * NOPLM:
+ *	For entry functions which will handle plink, and i_mutex is already held
+ *	in VFS.
+ *	They cannot wait and should return an error at once.
+ *	Callers has to check the error.
+ * NOPLMW:
+ *	For entry functions which will handle plink, but i_mutex is not held
+ *	in VFS.
+ *	They can wait the plink maintenance mode to finish.
+ *
+ * They behave like F_SETLK and F_SETLKW.
+ * If the caller never handle plink, then both flags are unnecessary.
+ */
+
+int au_plink_maint(struct super_block *sb, int flags)
+{
+	int err;
+	pid_t pid, ppid;
+	struct au_sbinfo *sbi;
+
+	SiMustAnyLock(sb);
+
+	err = 0;
+	if (!au_opt_test(au_mntflags(sb), PLINK))
+		goto out;
+
+	sbi = au_sbi(sb);
+	pid = sbi->si_plink_maint_pid;
+	if (!pid || pid == current->pid)
+		goto out;
+
+	/* todo: it highly depends upon /sbin/mount.aufs */
+	rcu_read_lock();
+	ppid = task_pid_vnr(rcu_dereference(current->real_parent));
+	rcu_read_unlock();
+	if (pid == ppid)
+		goto out;
+
+	if (au_ftest_lock(flags, NOPLMW)) {
+		/* if there is no i_mutex lock in VFS, we don't need to wait */
+		/* AuDebugOn(!lockdep_depth(current)); */
+		while (sbi->si_plink_maint_pid) {
+			si_read_unlock(sb);
+			/* gave up wake_up_bit() */
+			wait_event(sbi->si_plink_wq, !sbi->si_plink_maint_pid);
+
+			if (au_ftest_lock(flags, FLUSH))
+				au_nwt_flush(&sbi->si_nowait);
+			si_noflush_read_lock(sb);
+		}
+	} else if (au_ftest_lock(flags, NOPLM)) {
+		AuDbg("ppid %d, pid %d\n", ppid, pid);
+		err = -EAGAIN;
+	}
+
+out:
+	return err;
+}
+
+void au_plink_maint_leave(struct au_sbinfo *sbinfo)
+{
+	spin_lock(&sbinfo->si_plink_maint_lock);
+	sbinfo->si_plink_maint_pid = 0;
+	spin_unlock(&sbinfo->si_plink_maint_lock);
+	wake_up_all(&sbinfo->si_plink_wq);
+}
+
+int au_plink_maint_enter(struct super_block *sb)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	/* make sure i am the only one in this fs */
+	si_write_lock(sb, AuLock_FLUSH);
+	if (au_opt_test(au_mntflags(sb), PLINK)) {
+		spin_lock(&sbinfo->si_plink_maint_lock);
+		if (!sbinfo->si_plink_maint_pid)
+			sbinfo->si_plink_maint_pid = current->pid;
+		else
+			err = -EBUSY;
+		spin_unlock(&sbinfo->si_plink_maint_lock);
+	}
+	si_write_unlock(sb);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb)
+{
+	int i;
+	struct au_sbinfo *sbinfo;
+	struct hlist_head *plink_hlist;
+	struct pseudo_link *plink;
+
+	SiMustAnyLock(sb);
+
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+	AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+	for (i = 0; i < AuPlink_NHASH; i++) {
+		plink_hlist = &sbinfo->si_plink[i].head;
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(plink, plink_hlist, hlist)
+			AuDbg("%lu\n", plink->inode->i_ino);
+		rcu_read_unlock();
+	}
+}
+#endif
+
+/* is the inode pseudo-linked? */
+int au_plink_test(struct inode *inode)
+{
+	int found, i;
+	struct au_sbinfo *sbinfo;
+	struct hlist_head *plink_hlist;
+	struct pseudo_link *plink;
+
+	sbinfo = au_sbi(inode->i_sb);
+	AuRwMustAnyLock(&sbinfo->si_rwsem);
+	AuDebugOn(!au_opt_test(au_mntflags(inode->i_sb), PLINK));
+	AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+	found = 0;
+	i = au_plink_hash(inode->i_ino);
+	plink_hlist = &sbinfo->si_plink[i].head;
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(plink, plink_hlist, hlist)
+		if (plink->inode == inode) {
+			found = 1;
+			break;
+		}
+	rcu_read_unlock();
+	return found;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generate a name for plink.
+ * the file will be stored under AUFS_WH_PLINKDIR.
+ */
+/* 20 is max digits length of ulong 64 */
+#define PLINK_NAME_LEN	((20 + 1) * 2)
+
+static int plink_name(char *name, int len, struct inode *inode,
+		      aufs_bindex_t bindex)
+{
+	int rlen;
+	struct inode *h_inode;
+
+	h_inode = au_h_iptr(inode, bindex);
+	rlen = snprintf(name, len, "%lu.%lu", inode->i_ino, h_inode->i_ino);
+	return rlen;
+}
+
+struct au_do_plink_lkup_args {
+	struct dentry **errp;
+	struct qstr *tgtname;
+	struct dentry *h_parent;
+	struct au_branch *br;
+};
+
+static struct dentry *au_do_plink_lkup(struct qstr *tgtname,
+				       struct dentry *h_parent,
+				       struct au_branch *br)
+{
+	struct dentry *h_dentry;
+	struct mutex *h_mtx;
+
+	h_mtx = &h_parent->d_inode->i_mutex;
+	mutex_lock_nested(h_mtx, AuLsc_I_CHILD2);
+	h_dentry = vfsub_lkup_one(tgtname, h_parent);
+	mutex_unlock(h_mtx);
+	return h_dentry;
+}
+
+static void au_call_do_plink_lkup(void *args)
+{
+	struct au_do_plink_lkup_args *a = args;
+	*a->errp = au_do_plink_lkup(a->tgtname, a->h_parent, a->br);
+}
+
+/* lookup the plink-ed @inode under the branch at @bindex */
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex)
+{
+	struct dentry *h_dentry, *h_parent;
+	struct au_branch *br;
+	struct inode *h_dir;
+	int wkq_err;
+	char a[PLINK_NAME_LEN];
+	struct qstr tgtname = QSTR_INIT(a, 0);
+
+	AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+	br = au_sbr(inode->i_sb, bindex);
+	h_parent = br->br_wbr->wbr_plink;
+	h_dir = h_parent->d_inode;
+	tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+	if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
+		struct au_do_plink_lkup_args args = {
+			.errp		= &h_dentry,
+			.tgtname	= &tgtname,
+			.h_parent	= h_parent,
+			.br		= br
+		};
+
+		wkq_err = au_wkq_wait(au_call_do_plink_lkup, &args);
+		if (unlikely(wkq_err))
+			h_dentry = ERR_PTR(wkq_err);
+	} else
+		h_dentry = au_do_plink_lkup(&tgtname, h_parent, br);
+
+	return h_dentry;
+}
+
+/* create a pseudo-link */
+static int do_whplink(struct qstr *tgt, struct dentry *h_parent,
+		      struct dentry *h_dentry, struct au_branch *br)
+{
+	int err;
+	struct path h_path = {
+		.mnt = au_br_mnt(br)
+	};
+	struct inode *h_dir;
+
+	h_dir = h_parent->d_inode;
+	mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_CHILD2);
+again:
+	h_path.dentry = vfsub_lkup_one(tgt, h_parent);
+	err = PTR_ERR(h_path.dentry);
+	if (IS_ERR(h_path.dentry))
+		goto out;
+
+	err = 0;
+	/* wh.plink dir is not monitored */
+	/* todo: is it really safe? */
+	if (h_path.dentry->d_inode
+	    && h_path.dentry->d_inode != h_dentry->d_inode) {
+		err = vfsub_unlink(h_dir, &h_path, /*force*/0);
+		dput(h_path.dentry);
+		h_path.dentry = NULL;
+		if (!err)
+			goto again;
+	}
+	if (!err && !h_path.dentry->d_inode)
+		err = vfsub_link(h_dentry, h_dir, &h_path);
+	dput(h_path.dentry);
+
+out:
+	mutex_unlock(&h_dir->i_mutex);
+	return err;
+}
+
+struct do_whplink_args {
+	int *errp;
+	struct qstr *tgt;
+	struct dentry *h_parent;
+	struct dentry *h_dentry;
+	struct au_branch *br;
+};
+
+static void call_do_whplink(void *args)
+{
+	struct do_whplink_args *a = args;
+	*a->errp = do_whplink(a->tgt, a->h_parent, a->h_dentry, a->br);
+}
+
+static int whplink(struct dentry *h_dentry, struct inode *inode,
+		   aufs_bindex_t bindex, struct au_branch *br)
+{
+	int err, wkq_err;
+	struct au_wbr *wbr;
+	struct dentry *h_parent;
+	struct inode *h_dir;
+	char a[PLINK_NAME_LEN];
+	struct qstr tgtname = QSTR_INIT(a, 0);
+
+	wbr = au_sbr(inode->i_sb, bindex)->br_wbr;
+	h_parent = wbr->wbr_plink;
+	h_dir = h_parent->d_inode;
+	tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+	/* always superio. */
+	if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
+		struct do_whplink_args args = {
+			.errp		= &err,
+			.tgt		= &tgtname,
+			.h_parent	= h_parent,
+			.h_dentry	= h_dentry,
+			.br		= br
+		};
+		wkq_err = au_wkq_wait(call_do_whplink, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	} else
+		err = do_whplink(&tgtname, h_parent, h_dentry, br);
+
+	return err;
+}
+
+/* free a single plink */
+static void do_put_plink(struct pseudo_link *plink, int do_del)
+{
+	if (do_del)
+		hlist_del(&plink->hlist);
+	iput(plink->inode);
+	kfree(plink);
+}
+
+static void do_put_plink_rcu(struct rcu_head *rcu)
+{
+	struct pseudo_link *plink;
+
+	plink = container_of(rcu, struct pseudo_link, rcu);
+	iput(plink->inode);
+	kfree(plink);
+}
+
+/*
+ * create a new pseudo-link for @h_dentry on @bindex.
+ * the linked inode is held in aufs @inode.
+ */
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+		     struct dentry *h_dentry)
+{
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+	struct hlist_head *plink_hlist;
+	struct pseudo_link *plink, *tmp;
+	struct au_sphlhead *sphl;
+	int found, err, cnt, i;
+
+	sb = inode->i_sb;
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+	AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+	found = au_plink_test(inode);
+	if (found)
+		return;
+
+	i = au_plink_hash(inode->i_ino);
+	sphl = sbinfo->si_plink + i;
+	plink_hlist = &sphl->head;
+	tmp = kmalloc(sizeof(*plink), GFP_NOFS);
+	if (tmp)
+		tmp->inode = au_igrab(inode);
+	else {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	spin_lock(&sphl->spin);
+	hlist_for_each_entry(plink, plink_hlist, hlist) {
+		if (plink->inode == inode) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found)
+		hlist_add_head_rcu(&tmp->hlist, plink_hlist);
+	spin_unlock(&sphl->spin);
+	if (!found) {
+		cnt = au_sphl_count(sphl);
+#define msg "unexpectedly unblanced or too many pseudo-links"
+		if (cnt > AUFS_PLINK_WARN)
+			AuWarn1(msg ", %d\n", cnt);
+#undef msg
+		err = whplink(h_dentry, inode, bindex, au_sbr(sb, bindex));
+	} else {
+		do_put_plink(tmp, 0);
+		return;
+	}
+
+out:
+	if (unlikely(err)) {
+		pr_warn("err %d, damaged pseudo link.\n", err);
+		if (tmp) {
+			au_sphl_del_rcu(&tmp->hlist, sphl);
+			call_rcu(&tmp->rcu, do_put_plink_rcu);
+		}
+	}
+}
+
+/* free all plinks */
+void au_plink_put(struct super_block *sb, int verbose)
+{
+	int i, warned;
+	struct au_sbinfo *sbinfo;
+	struct hlist_head *plink_hlist;
+	struct hlist_node *tmp;
+	struct pseudo_link *plink;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+	AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+	/* no spin_lock since sbinfo is write-locked */
+	warned = 0;
+	for (i = 0; i < AuPlink_NHASH; i++) {
+		plink_hlist = &sbinfo->si_plink[i].head;
+		if (!warned && verbose && !hlist_empty(plink_hlist)) {
+			pr_warn("pseudo-link is not flushed");
+			warned = 1;
+		}
+		hlist_for_each_entry_safe(plink, tmp, plink_hlist, hlist)
+			do_put_plink(plink, 0);
+		INIT_HLIST_HEAD(plink_hlist);
+	}
+}
+
+void au_plink_clean(struct super_block *sb, int verbose)
+{
+	struct dentry *root;
+
+	root = sb->s_root;
+	aufs_write_lock(root);
+	if (au_opt_test(au_mntflags(sb), PLINK))
+		au_plink_put(sb, verbose);
+	aufs_write_unlock(root);
+}
+
+static int au_plink_do_half_refresh(struct inode *inode, aufs_bindex_t br_id)
+{
+	int do_put;
+	aufs_bindex_t bstart, bend, bindex;
+
+	do_put = 0;
+	bstart = au_ibstart(inode);
+	bend = au_ibend(inode);
+	if (bstart >= 0) {
+		for (bindex = bstart; bindex <= bend; bindex++) {
+			if (!au_h_iptr(inode, bindex)
+			    || au_ii_br_id(inode, bindex) != br_id)
+				continue;
+			au_set_h_iptr(inode, bindex, NULL, 0);
+			do_put = 1;
+			break;
+		}
+		if (do_put)
+			for (bindex = bstart; bindex <= bend; bindex++)
+				if (au_h_iptr(inode, bindex)) {
+					do_put = 0;
+					break;
+				}
+	} else
+		do_put = 1;
+
+	return do_put;
+}
+
+/* free the plinks on a branch specified by @br_id */
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id)
+{
+	struct au_sbinfo *sbinfo;
+	struct hlist_head *plink_hlist;
+	struct hlist_node *tmp;
+	struct pseudo_link *plink;
+	struct inode *inode;
+	int i, do_put;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+	AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+	/* no spin_lock since sbinfo is write-locked */
+	for (i = 0; i < AuPlink_NHASH; i++) {
+		plink_hlist = &sbinfo->si_plink[i].head;
+		hlist_for_each_entry_safe(plink, tmp, plink_hlist, hlist) {
+			inode = au_igrab(plink->inode);
+			ii_write_lock_child(inode);
+			do_put = au_plink_do_half_refresh(inode, br_id);
+			if (do_put)
+				do_put_plink(plink, 1);
+			ii_write_unlock(inode);
+			iput(inode);
+		}
+	}
+}
diff --git a/fs/aufs/poll.c b/fs/aufs/poll.c
new file mode 100644
index 0000000..aa5e2ae
--- /dev/null
+++ b/fs/aufs/poll.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * poll operation
+ * There is only one filesystem which implements ->poll operation, currently.
+ */
+
+#include "aufs.h"
+
+unsigned int aufs_poll(struct file *file, poll_table *wait)
+{
+	unsigned int mask;
+	int err;
+	struct file *h_file;
+	struct dentry *dentry;
+	struct super_block *sb;
+
+	/* We should pretend an error happened. */
+	mask = POLLERR /* | POLLIN | POLLOUT */;
+	dentry = file->f_dentry;
+	sb = dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+	err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+	if (unlikely(err))
+		goto out;
+
+	/* it is not an error if h_file has no operation */
+	mask = DEFAULT_POLLMASK;
+	h_file = au_hf_top(file);
+	if (h_file->f_op && h_file->f_op->poll)
+		mask = h_file->f_op->poll(h_file, wait);
+
+	di_read_unlock(dentry, AuLock_IR);
+	fi_read_unlock(file);
+
+out:
+	si_read_unlock(sb);
+	AuTraceErr((int)mask);
+	return mask;
+}
diff --git a/fs/aufs/procfs.c b/fs/aufs/procfs.c
new file mode 100644
index 0000000..7201cdf
--- /dev/null
+++ b/fs/aufs/procfs.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2010-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * procfs interfaces
+ */
+
+#include <linux/proc_fs.h>
+#include "aufs.h"
+
+static int au_procfs_plm_release(struct inode *inode, struct file *file)
+{
+	struct au_sbinfo *sbinfo;
+
+	sbinfo = file->private_data;
+	if (sbinfo) {
+		au_plink_maint_leave(sbinfo);
+		kobject_put(&sbinfo->si_kobj);
+	}
+
+	return 0;
+}
+
+static void au_procfs_plm_write_clean(struct file *file)
+{
+	struct au_sbinfo *sbinfo;
+
+	sbinfo = file->private_data;
+	if (sbinfo)
+		au_plink_clean(sbinfo->si_sb, /*verbose*/0);
+}
+
+static int au_procfs_plm_write_si(struct file *file, unsigned long id)
+{
+	int err;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+
+	err = -EBUSY;
+	if (unlikely(file->private_data))
+		goto out;
+
+	sb = NULL;
+	/* don't use au_sbilist_lock() here */
+	spin_lock(&au_sbilist.spin);
+	list_for_each_entry(sbinfo, &au_sbilist.head, si_list)
+		if (id == sysaufs_si_id(sbinfo)) {
+			kobject_get(&sbinfo->si_kobj);
+			sb = sbinfo->si_sb;
+			break;
+		}
+	spin_unlock(&au_sbilist.spin);
+
+	err = -EINVAL;
+	if (unlikely(!sb))
+		goto out;
+
+	err = au_plink_maint_enter(sb);
+	if (!err)
+		/* keep kobject_get() */
+		file->private_data = sbinfo;
+	else
+		kobject_put(&sbinfo->si_kobj);
+out:
+	return err;
+}
+
+/*
+ * Accept a valid "si=xxxx" only.
+ * Once it is accepted successfully, accept "clean" too.
+ */
+static ssize_t au_procfs_plm_write(struct file *file, const char __user *ubuf,
+				   size_t count, loff_t *ppos)
+{
+	ssize_t err;
+	unsigned long id;
+	/* last newline is allowed */
+	char buf[3 + sizeof(unsigned long) * 2 + 1];
+
+	err = -EACCES;
+	if (unlikely(!capable(CAP_SYS_ADMIN)))
+		goto out;
+
+	err = -EINVAL;
+	if (unlikely(count > sizeof(buf)))
+		goto out;
+
+	err = copy_from_user(buf, ubuf, count);
+	if (unlikely(err)) {
+		err = -EFAULT;
+		goto out;
+	}
+	buf[count] = 0;
+
+	err = -EINVAL;
+	if (!strcmp("clean", buf)) {
+		au_procfs_plm_write_clean(file);
+		goto out_success;
+	} else if (unlikely(strncmp("si=", buf, 3)))
+		goto out;
+
+	err = kstrtoul(buf + 3, 16, &id);
+	if (unlikely(err))
+		goto out;
+
+	err = au_procfs_plm_write_si(file, id);
+	if (unlikely(err))
+		goto out;
+
+out_success:
+	err = count; /* success */
+out:
+	return err;
+}
+
+static const struct file_operations au_procfs_plm_fop = {
+	.write		= au_procfs_plm_write,
+	.release	= au_procfs_plm_release,
+	.owner		= THIS_MODULE
+};
+
+/* ---------------------------------------------------------------------- */
+
+static struct proc_dir_entry *au_procfs_dir;
+
+void au_procfs_fin(void)
+{
+	remove_proc_entry(AUFS_PLINK_MAINT_NAME, au_procfs_dir);
+	remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+}
+
+int __init au_procfs_init(void)
+{
+	int err;
+	struct proc_dir_entry *entry;
+
+	err = -ENOMEM;
+	au_procfs_dir = proc_mkdir(AUFS_PLINK_MAINT_DIR, NULL);
+	if (unlikely(!au_procfs_dir))
+		goto out;
+
+	entry = proc_create(AUFS_PLINK_MAINT_NAME, S_IFREG | S_IWUSR,
+			    au_procfs_dir, &au_procfs_plm_fop);
+	if (unlikely(!entry))
+		goto out_dir;
+
+	err = 0;
+	goto out; /* success */
+
+
+out_dir:
+	remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+out:
+	return err;
+}
diff --git a/fs/aufs/rdu.c b/fs/aufs/rdu.c
new file mode 100644
index 0000000..0dce11e
--- /dev/null
+++ b/fs/aufs/rdu.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * readdir in userspace.
+ */
+
+#include <linux/compat.h>
+#include <linux/fs_stack.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+/* bits for struct aufs_rdu.flags */
+#define	AuRdu_CALLED	1
+#define	AuRdu_CONT	(1 << 1)
+#define	AuRdu_FULL	(1 << 2)
+#define au_ftest_rdu(flags, name)	((flags) & AuRdu_##name)
+#define au_fset_rdu(flags, name) \
+	do { (flags) |= AuRdu_##name; } while (0)
+#define au_fclr_rdu(flags, name) \
+	do { (flags) &= ~AuRdu_##name; } while (0)
+
+struct au_rdu_arg {
+	struct aufs_rdu			*rdu;
+	union au_rdu_ent_ul		ent;
+	unsigned long			end;
+
+	struct super_block		*sb;
+	int				err;
+};
+
+static int au_rdu_fill(void *__arg, const char *name, int nlen,
+		       loff_t offset, u64 h_ino, unsigned int d_type)
+{
+	int err, len;
+	struct au_rdu_arg *arg = __arg;
+	struct aufs_rdu *rdu = arg->rdu;
+	struct au_rdu_ent ent;
+
+	err = 0;
+	arg->err = 0;
+	au_fset_rdu(rdu->cookie.flags, CALLED);
+	len = au_rdu_len(nlen);
+	if (arg->ent.ul + len  < arg->end) {
+		ent.ino = h_ino;
+		ent.bindex = rdu->cookie.bindex;
+		ent.type = d_type;
+		ent.nlen = nlen;
+		if (unlikely(nlen > AUFS_MAX_NAMELEN))
+			ent.type = DT_UNKNOWN;
+
+		/* unnecessary to support mmap_sem since this is a dir */
+		err = -EFAULT;
+		if (copy_to_user(arg->ent.e, &ent, sizeof(ent)))
+			goto out;
+		if (copy_to_user(arg->ent.e->name, name, nlen))
+			goto out;
+		/* the terminating NULL */
+		if (__put_user(0, arg->ent.e->name + nlen))
+			goto out;
+		err = 0;
+		/* AuDbg("%p, %.*s\n", arg->ent.p, nlen, name); */
+		arg->ent.ul += len;
+		rdu->rent++;
+	} else {
+		err = -EFAULT;
+		au_fset_rdu(rdu->cookie.flags, FULL);
+		rdu->full = 1;
+		rdu->tail = arg->ent;
+	}
+
+out:
+	/* AuTraceErr(err); */
+	return err;
+}
+
+static int au_rdu_do(struct file *h_file, struct au_rdu_arg *arg)
+{
+	int err;
+	loff_t offset;
+	struct au_rdu_cookie *cookie = &arg->rdu->cookie;
+
+	/* we don't have to care (FMODE_32BITHASH | FMODE_64BITHASH) for ext4 */
+	offset = vfsub_llseek(h_file, cookie->h_pos, SEEK_SET);
+	err = offset;
+	if (unlikely(offset != cookie->h_pos))
+		goto out;
+
+	err = 0;
+	do {
+		arg->err = 0;
+		au_fclr_rdu(cookie->flags, CALLED);
+		/* smp_mb(); */
+		err = vfsub_readdir(h_file, au_rdu_fill, arg);
+		if (err >= 0)
+			err = arg->err;
+	} while (!err
+		 && au_ftest_rdu(cookie->flags, CALLED)
+		 && !au_ftest_rdu(cookie->flags, FULL));
+	cookie->h_pos = h_file->f_pos;
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_rdu(struct file *file, struct aufs_rdu *rdu)
+{
+	int err;
+	aufs_bindex_t bend;
+	struct au_rdu_arg arg;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct file *h_file;
+	struct au_rdu_cookie *cookie = &rdu->cookie;
+
+	err = !access_ok(VERIFY_WRITE, rdu->ent.e, rdu->sz);
+	if (unlikely(err)) {
+		err = -EFAULT;
+		AuTraceErr(err);
+		goto out;
+	}
+	rdu->rent = 0;
+	rdu->tail = rdu->ent;
+	rdu->full = 0;
+	arg.rdu = rdu;
+	arg.ent = rdu->ent;
+	arg.end = arg.ent.ul;
+	arg.end += rdu->sz;
+
+	err = -ENOTDIR;
+	if (unlikely(!file->f_op || !file->f_op->readdir))
+		goto out;
+
+	err = security_file_permission(file, MAY_READ);
+	AuTraceErr(err);
+	if (unlikely(err))
+		goto out;
+
+	dentry = file->f_dentry;
+	inode = dentry->d_inode;
+#if 1
+	mutex_lock(&inode->i_mutex);
+#else
+	err = mutex_lock_killable(&inode->i_mutex);
+	AuTraceErr(err);
+	if (unlikely(err))
+		goto out;
+#endif
+
+	arg.sb = inode->i_sb;
+	err = si_read_lock(arg.sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out_mtx;
+	err = au_alive_dir(dentry);
+	if (unlikely(err))
+		goto out_si;
+	/* todo: reval? */
+	fi_read_lock(file);
+
+	err = -EAGAIN;
+	if (unlikely(au_ftest_rdu(cookie->flags, CONT)
+		     && cookie->generation != au_figen(file)))
+		goto out_unlock;
+
+	err = 0;
+	if (!rdu->blk) {
+		rdu->blk = au_sbi(arg.sb)->si_rdblk;
+		if (!rdu->blk)
+			rdu->blk = au_dir_size(file, /*dentry*/NULL);
+	}
+	bend = au_fbstart(file);
+	if (cookie->bindex < bend)
+		cookie->bindex = bend;
+	bend = au_fbend_dir(file);
+	/* AuDbg("b%d, b%d\n", cookie->bindex, bend); */
+	for (; !err && cookie->bindex <= bend;
+	     cookie->bindex++, cookie->h_pos = 0) {
+		h_file = au_hf_dir(file, cookie->bindex);
+		if (!h_file)
+			continue;
+
+		au_fclr_rdu(cookie->flags, FULL);
+		err = au_rdu_do(h_file, &arg);
+		AuTraceErr(err);
+		if (unlikely(au_ftest_rdu(cookie->flags, FULL) || err))
+			break;
+	}
+	AuDbg("rent %llu\n", rdu->rent);
+
+	if (!err && !au_ftest_rdu(cookie->flags, CONT)) {
+		rdu->shwh = !!au_opt_test(au_sbi(arg.sb)->si_mntflags, SHWH);
+		au_fset_rdu(cookie->flags, CONT);
+		cookie->generation = au_figen(file);
+	}
+
+	ii_read_lock_child(inode);
+	fsstack_copy_attr_atime(inode, au_h_iptr(inode, au_ibstart(inode)));
+	ii_read_unlock(inode);
+
+out_unlock:
+	fi_read_unlock(file);
+out_si:
+	si_read_unlock(arg.sb);
+out_mtx:
+	mutex_unlock(&inode->i_mutex);
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_rdu_ino(struct file *file, struct aufs_rdu *rdu)
+{
+	int err;
+	ino_t ino;
+	unsigned long long nent;
+	union au_rdu_ent_ul *u;
+	struct au_rdu_ent ent;
+	struct super_block *sb;
+
+	err = 0;
+	nent = rdu->nent;
+	u = &rdu->ent;
+	sb = file->f_dentry->d_sb;
+	si_read_lock(sb, AuLock_FLUSH);
+	while (nent-- > 0) {
+		/* unnecessary to support mmap_sem since this is a dir */
+		err = copy_from_user(&ent, u->e, sizeof(ent));
+		if (!err)
+			err = !access_ok(VERIFY_WRITE, &u->e->ino, sizeof(ino));
+		if (unlikely(err)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+			break;
+		}
+
+		/* AuDbg("b%d, i%llu\n", ent.bindex, ent.ino); */
+		if (!ent.wh)
+			err = au_ino(sb, ent.bindex, ent.ino, ent.type, &ino);
+		else
+			err = au_wh_ino(sb, ent.bindex, ent.ino, ent.type,
+					&ino);
+		if (unlikely(err)) {
+			AuTraceErr(err);
+			break;
+		}
+
+		err = __put_user(ino, &u->e->ino);
+		if (unlikely(err)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+			break;
+		}
+		u->ul += au_rdu_len(ent.nlen);
+	}
+	si_read_unlock(sb);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_rdu_verify(struct aufs_rdu *rdu)
+{
+	AuDbg("rdu{%llu, %p, %u | %u | %llu, %u, %u | "
+	      "%llu, b%d, 0x%x, g%u}\n",
+	      rdu->sz, rdu->ent.e, rdu->verify[AufsCtlRduV_SZ],
+	      rdu->blk,
+	      rdu->rent, rdu->shwh, rdu->full,
+	      rdu->cookie.h_pos, rdu->cookie.bindex, rdu->cookie.flags,
+	      rdu->cookie.generation);
+
+	if (rdu->verify[AufsCtlRduV_SZ] == sizeof(*rdu))
+		return 0;
+
+	AuDbg("%u:%u\n",
+	      rdu->verify[AufsCtlRduV_SZ], (unsigned int)sizeof(*rdu));
+	return -EINVAL;
+}
+
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long err, e;
+	struct aufs_rdu rdu;
+	void __user *p = (void __user *)arg;
+
+	err = copy_from_user(&rdu, p, sizeof(rdu));
+	if (unlikely(err)) {
+		err = -EFAULT;
+		AuTraceErr(err);
+		goto out;
+	}
+	err = au_rdu_verify(&rdu);
+	if (unlikely(err))
+		goto out;
+
+	switch (cmd) {
+	case AUFS_CTL_RDU:
+		err = au_rdu(file, &rdu);
+		if (unlikely(err))
+			break;
+
+		e = copy_to_user(p, &rdu, sizeof(rdu));
+		if (unlikely(e)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+		}
+		break;
+	case AUFS_CTL_RDU_INO:
+		err = au_rdu_ino(file, &rdu);
+		break;
+
+	default:
+		/* err = -ENOTTY; */
+		err = -EINVAL;
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long err, e;
+	struct aufs_rdu rdu;
+	void __user *p = compat_ptr(arg);
+
+	/* todo: get_user()? */
+	err = copy_from_user(&rdu, p, sizeof(rdu));
+	if (unlikely(err)) {
+		err = -EFAULT;
+		AuTraceErr(err);
+		goto out;
+	}
+	rdu.ent.e = compat_ptr(rdu.ent.ul);
+	err = au_rdu_verify(&rdu);
+	if (unlikely(err))
+		goto out;
+
+	switch (cmd) {
+	case AUFS_CTL_RDU:
+		err = au_rdu(file, &rdu);
+		if (unlikely(err))
+			break;
+
+		rdu.ent.ul = ptr_to_compat(rdu.ent.e);
+		rdu.tail.ul = ptr_to_compat(rdu.tail.e);
+		e = copy_to_user(p, &rdu, sizeof(rdu));
+		if (unlikely(e)) {
+			err = -EFAULT;
+			AuTraceErr(err);
+		}
+		break;
+	case AUFS_CTL_RDU_INO:
+		err = au_rdu_ino(file, &rdu);
+		break;
+
+	default:
+		/* err = -ENOTTY; */
+		err = -EINVAL;
+	}
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+#endif
diff --git a/fs/aufs/rwsem.h b/fs/aufs/rwsem.h
new file mode 100644
index 0000000..a1eb04b
--- /dev/null
+++ b/fs/aufs/rwsem.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * simple read-write semaphore wrappers
+ */
+
+#ifndef __AUFS_RWSEM_H__
+#define __AUFS_RWSEM_H__
+
+#ifdef __KERNEL__
+
+#include "debug.h"
+
+struct au_rwsem {
+	struct rw_semaphore	rwsem;
+#ifdef CONFIG_AUFS_DEBUG
+	/* just for debugging, not almighty counter */
+	atomic_t		rcnt, wcnt;
+#endif
+};
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuDbgCntInit(rw) do { \
+	atomic_set(&(rw)->rcnt, 0); \
+	atomic_set(&(rw)->wcnt, 0); \
+	smp_mb(); /* atomic set */ \
+} while (0)
+
+#define AuDbgRcntInc(rw)	atomic_inc(&(rw)->rcnt)
+#define AuDbgRcntDec(rw)	WARN_ON(atomic_dec_return(&(rw)->rcnt) < 0)
+#define AuDbgWcntInc(rw)	atomic_inc(&(rw)->wcnt)
+#define AuDbgWcntDec(rw)	WARN_ON(atomic_dec_return(&(rw)->wcnt) < 0)
+#else
+#define AuDbgCntInit(rw)	do {} while (0)
+#define AuDbgRcntInc(rw)	do {} while (0)
+#define AuDbgRcntDec(rw)	do {} while (0)
+#define AuDbgWcntInc(rw)	do {} while (0)
+#define AuDbgWcntDec(rw)	do {} while (0)
+#endif /* CONFIG_AUFS_DEBUG */
+
+/* to debug easier, do not make them inlined functions */
+#define AuRwMustNoWaiters(rw)	AuDebugOn(!list_empty(&(rw)->rwsem.wait_list))
+/* rwsem_is_locked() is unusable */
+#define AuRwMustReadLock(rw)	AuDebugOn(atomic_read(&(rw)->rcnt) <= 0)
+#define AuRwMustWriteLock(rw)	AuDebugOn(atomic_read(&(rw)->wcnt) <= 0)
+#define AuRwMustAnyLock(rw)	AuDebugOn(atomic_read(&(rw)->rcnt) <= 0 \
+					&& atomic_read(&(rw)->wcnt) <= 0)
+#define AuRwDestroy(rw)		AuDebugOn(atomic_read(&(rw)->rcnt) \
+					|| atomic_read(&(rw)->wcnt))
+
+#define au_rw_class(rw, key)	lockdep_set_class(&(rw)->rwsem, key)
+
+static inline void au_rw_init(struct au_rwsem *rw)
+{
+	AuDbgCntInit(rw);
+	init_rwsem(&rw->rwsem);
+}
+
+static inline void au_rw_init_wlock(struct au_rwsem *rw)
+{
+	au_rw_init(rw);
+	down_write(&rw->rwsem);
+	AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_init_wlock_nested(struct au_rwsem *rw,
+					   unsigned int lsc)
+{
+	au_rw_init(rw);
+	down_write_nested(&rw->rwsem, lsc);
+	AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_read_lock(struct au_rwsem *rw)
+{
+	down_read(&rw->rwsem);
+	AuDbgRcntInc(rw);
+}
+
+static inline void au_rw_read_lock_nested(struct au_rwsem *rw, unsigned int lsc)
+{
+	down_read_nested(&rw->rwsem, lsc);
+	AuDbgRcntInc(rw);
+}
+
+static inline void au_rw_read_unlock(struct au_rwsem *rw)
+{
+	AuRwMustReadLock(rw);
+	AuDbgRcntDec(rw);
+	up_read(&rw->rwsem);
+}
+
+static inline void au_rw_dgrade_lock(struct au_rwsem *rw)
+{
+	AuRwMustWriteLock(rw);
+	AuDbgRcntInc(rw);
+	AuDbgWcntDec(rw);
+	downgrade_write(&rw->rwsem);
+}
+
+static inline void au_rw_write_lock(struct au_rwsem *rw)
+{
+	down_write(&rw->rwsem);
+	AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_write_lock_nested(struct au_rwsem *rw,
+					   unsigned int lsc)
+{
+	down_write_nested(&rw->rwsem, lsc);
+	AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_write_unlock(struct au_rwsem *rw)
+{
+	AuRwMustWriteLock(rw);
+	AuDbgWcntDec(rw);
+	up_write(&rw->rwsem);
+}
+
+/* why is not _nested version defined */
+static inline int au_rw_read_trylock(struct au_rwsem *rw)
+{
+	int ret = down_read_trylock(&rw->rwsem);
+	if (ret)
+		AuDbgRcntInc(rw);
+	return ret;
+}
+
+static inline int au_rw_write_trylock(struct au_rwsem *rw)
+{
+	int ret = down_write_trylock(&rw->rwsem);
+	if (ret)
+		AuDbgWcntInc(rw);
+	return ret;
+}
+
+#undef AuDbgCntInit
+#undef AuDbgRcntInc
+#undef AuDbgRcntDec
+#undef AuDbgWcntInc
+#undef AuDbgWcntDec
+
+#define AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
+static inline void prefix##_read_lock(param) \
+{ au_rw_read_lock(rwsem); } \
+static inline void prefix##_write_lock(param) \
+{ au_rw_write_lock(rwsem); } \
+static inline int prefix##_read_trylock(param) \
+{ return au_rw_read_trylock(rwsem); } \
+static inline int prefix##_write_trylock(param) \
+{ return au_rw_write_trylock(rwsem); }
+/* why is not _nested version defined */
+/* static inline void prefix##_read_trylock_nested(param, lsc)
+{ au_rw_read_trylock_nested(rwsem, lsc)); }
+static inline void prefix##_write_trylock_nestd(param, lsc)
+{ au_rw_write_trylock_nested(rwsem, lsc); } */
+
+#define AuSimpleUnlockRwsemFuncs(prefix, param, rwsem) \
+static inline void prefix##_read_unlock(param) \
+{ au_rw_read_unlock(rwsem); } \
+static inline void prefix##_write_unlock(param) \
+{ au_rw_write_unlock(rwsem); } \
+static inline void prefix##_downgrade_lock(param) \
+{ au_rw_dgrade_lock(rwsem); }
+
+#define AuSimpleRwsemFuncs(prefix, param, rwsem) \
+	AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
+	AuSimpleUnlockRwsemFuncs(prefix, param, rwsem)
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_RWSEM_H__ */
diff --git a/fs/aufs/sbinfo.c b/fs/aufs/sbinfo.c
new file mode 100644
index 0000000..e07fc30
--- /dev/null
+++ b/fs/aufs/sbinfo.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * superblock private data
+ */
+
+#include "aufs.h"
+
+/*
+ * they are necessary regardless sysfs is disabled.
+ */
+void au_si_free(struct kobject *kobj)
+{
+	int i;
+	struct au_sbinfo *sbinfo;
+	char *locked __maybe_unused; /* debug only */
+
+	sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+	for (i = 0; i < AuPlink_NHASH; i++)
+		AuDebugOn(!hlist_empty(&sbinfo->si_plink[i].head));
+	AuDebugOn(atomic_read(&sbinfo->si_nowait.nw_len));
+
+	au_rw_write_lock(&sbinfo->si_rwsem);
+	au_br_free(sbinfo);
+	au_rw_write_unlock(&sbinfo->si_rwsem);
+
+	AuDebugOn(radix_tree_gang_lookup
+		  (&sbinfo->au_si_pid.tree, (void **)&locked,
+		   /*first_index*/PID_MAX_DEFAULT - 1,
+		   /*max_items*/sizeof(locked)/sizeof(*locked)));
+
+	kfree(sbinfo->si_branch);
+	kfree(sbinfo->au_si_pid.bitmap);
+	mutex_destroy(&sbinfo->si_xib_mtx);
+	AuRwDestroy(&sbinfo->si_rwsem);
+
+	kfree(sbinfo);
+}
+
+int au_si_alloc(struct super_block *sb)
+{
+	int err, i;
+	struct au_sbinfo *sbinfo;
+	static struct lock_class_key aufs_si;
+
+	err = -ENOMEM;
+	sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS);
+	if (unlikely(!sbinfo))
+		goto out;
+
+	BUILD_BUG_ON(sizeof(unsigned long) !=
+		     sizeof(*sbinfo->au_si_pid.bitmap));
+	sbinfo->au_si_pid.bitmap = kcalloc(BITS_TO_LONGS(PID_MAX_DEFAULT),
+					sizeof(*sbinfo->au_si_pid.bitmap),
+					GFP_NOFS);
+	if (unlikely(!sbinfo->au_si_pid.bitmap))
+		goto out_sbinfo;
+
+	/* will be reallocated separately */
+	sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS);
+	if (unlikely(!sbinfo->si_branch))
+		goto out_pidmap;
+
+	err = sysaufs_si_init(sbinfo);
+	if (unlikely(err))
+		goto out_br;
+
+	au_nwt_init(&sbinfo->si_nowait);
+	au_rw_init_wlock(&sbinfo->si_rwsem);
+	au_rw_class(&sbinfo->si_rwsem, &aufs_si);
+	spin_lock_init(&sbinfo->au_si_pid.tree_lock);
+	INIT_RADIX_TREE(&sbinfo->au_si_pid.tree, GFP_ATOMIC | __GFP_NOFAIL);
+
+	atomic_long_set(&sbinfo->si_ninodes, 0);
+	atomic_long_set(&sbinfo->si_nfiles, 0);
+
+	sbinfo->si_bend = -1;
+
+	sbinfo->si_wbr_copyup = AuWbrCopyup_Def;
+	sbinfo->si_wbr_create = AuWbrCreate_Def;
+	sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup;
+	sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create;
+
+	sbinfo->si_mntflags = au_opts_plink(AuOpt_Def);
+
+	mutex_init(&sbinfo->si_xib_mtx);
+	sbinfo->si_xino_brid = -1;
+	/* leave si_xib_last_pindex and si_xib_next_bit */
+
+	sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC);
+	sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+	sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+	sbinfo->si_dirwh = AUFS_DIRWH_DEF;
+
+	for (i = 0; i < AuPlink_NHASH; i++)
+		au_sphl_init(sbinfo->si_plink + i);
+	init_waitqueue_head(&sbinfo->si_plink_wq);
+	spin_lock_init(&sbinfo->si_plink_maint_lock);
+
+	/* leave other members for sysaufs and si_mnt. */
+	sbinfo->si_sb = sb;
+	sb->s_fs_info = sbinfo;
+	si_pid_set(sb);
+	au_debug_sbinfo_init(sbinfo);
+	return 0; /* success */
+
+out_br:
+	kfree(sbinfo->si_branch);
+out_pidmap:
+	kfree(sbinfo->au_si_pid.bitmap);
+out_sbinfo:
+	kfree(sbinfo);
+out:
+	return err;
+}
+
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr)
+{
+	int err, sz;
+	struct au_branch **brp;
+
+	AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+	err = -ENOMEM;
+	sz = sizeof(*brp) * (sbinfo->si_bend + 1);
+	if (unlikely(!sz))
+		sz = sizeof(*brp);
+	brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS);
+	if (brp) {
+		sbinfo->si_branch = brp;
+		err = 0;
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_sigen_inc(struct super_block *sb)
+{
+	unsigned int gen;
+
+	SiMustWriteLock(sb);
+
+	gen = ++au_sbi(sb)->si_generation;
+	au_update_digen(sb->s_root);
+	au_update_iigen(sb->s_root->d_inode, /*half*/0);
+	sb->s_root->d_inode->i_version++;
+	return gen;
+}
+
+aufs_bindex_t au_new_br_id(struct super_block *sb)
+{
+	aufs_bindex_t br_id;
+	int i;
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	for (i = 0; i <= AUFS_BRANCH_MAX; i++) {
+		br_id = ++sbinfo->si_last_br_id;
+		AuDebugOn(br_id < 0);
+		if (br_id && au_br_index(sb, br_id) < 0)
+			return br_id;
+	}
+
+	return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* it is ok that new 'nwt' tasks are appended while we are sleeping */
+int si_read_lock(struct super_block *sb, int flags)
+{
+	int err;
+
+	err = 0;
+	if (au_ftest_lock(flags, FLUSH))
+		au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+	si_noflush_read_lock(sb);
+	err = au_plink_maint(sb, flags);
+	if (unlikely(err))
+		si_read_unlock(sb);
+
+	return err;
+}
+
+int si_write_lock(struct super_block *sb, int flags)
+{
+	int err;
+
+	if (au_ftest_lock(flags, FLUSH))
+		au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+	si_noflush_write_lock(sb);
+	err = au_plink_maint(sb, flags);
+	if (unlikely(err))
+		si_write_unlock(sb);
+
+	return err;
+}
+
+/* dentry and super_block lock. call at entry point */
+int aufs_read_lock(struct dentry *dentry, int flags)
+{
+	int err;
+	struct super_block *sb;
+
+	sb = dentry->d_sb;
+	err = si_read_lock(sb, flags);
+	if (unlikely(err))
+		goto out;
+
+	if (au_ftest_lock(flags, DW))
+		di_write_lock_child(dentry);
+	else
+		di_read_lock_child(dentry, flags);
+
+	if (au_ftest_lock(flags, GEN)) {
+		err = au_digen_test(dentry, au_sigen(sb));
+		AuDebugOn(!err && au_dbrange_test(dentry));
+		if (unlikely(err))
+			aufs_read_unlock(dentry, flags);
+	}
+
+out:
+	return err;
+}
+
+void aufs_read_unlock(struct dentry *dentry, int flags)
+{
+	if (au_ftest_lock(flags, DW))
+		di_write_unlock(dentry);
+	else
+		di_read_unlock(dentry, flags);
+	si_read_unlock(dentry->d_sb);
+}
+
+void aufs_write_lock(struct dentry *dentry)
+{
+	si_write_lock(dentry->d_sb, AuLock_FLUSH | AuLock_NOPLMW);
+	di_write_lock_child(dentry);
+}
+
+void aufs_write_unlock(struct dentry *dentry)
+{
+	di_write_unlock(dentry);
+	si_write_unlock(dentry->d_sb);
+}
+
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags)
+{
+	int err;
+	unsigned int sigen;
+	struct super_block *sb;
+
+	sb = d1->d_sb;
+	err = si_read_lock(sb, flags);
+	if (unlikely(err))
+		goto out;
+
+	di_write_lock2_child(d1, d2, au_ftest_lock(flags, DIR));
+
+	if (au_ftest_lock(flags, GEN)) {
+		sigen = au_sigen(sb);
+		err = au_digen_test(d1, sigen);
+		AuDebugOn(!err && au_dbrange_test(d1));
+		if (!err) {
+			err = au_digen_test(d2, sigen);
+			AuDebugOn(!err && au_dbrange_test(d2));
+		}
+		if (unlikely(err))
+			aufs_read_and_write_unlock2(d1, d2);
+	}
+
+out:
+	return err;
+}
+
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+	di_write_unlock2(d1, d2);
+	si_read_unlock(d1->d_sb);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int si_pid_test_slow(struct super_block *sb)
+{
+	void *p;
+
+	rcu_read_lock();
+	p = radix_tree_lookup(&au_sbi(sb)->au_si_pid.tree, current->pid);
+	rcu_read_unlock();
+
+	return (long)!!p;
+}
+
+void si_pid_set_slow(struct super_block *sb)
+{
+	int err;
+	struct au_sbinfo *sbinfo;
+
+	AuDebugOn(si_pid_test_slow(sb));
+
+	sbinfo = au_sbi(sb);
+	err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+	AuDebugOn(err);
+	spin_lock(&sbinfo->au_si_pid.tree_lock);
+	err = radix_tree_insert(&sbinfo->au_si_pid.tree, current->pid,
+				/*any valid ptr*/sb);
+	spin_unlock(&sbinfo->au_si_pid.tree_lock);
+	AuDebugOn(err);
+	radix_tree_preload_end();
+}
+
+void si_pid_clr_slow(struct super_block *sb)
+{
+	void *p;
+	struct au_sbinfo *sbinfo;
+
+	AuDebugOn(!si_pid_test_slow(sb));
+
+	sbinfo = au_sbi(sb);
+	spin_lock(&sbinfo->au_si_pid.tree_lock);
+	p = radix_tree_delete(&sbinfo->au_si_pid.tree, current->pid);
+	spin_unlock(&sbinfo->au_si_pid.tree_lock);
+}
diff --git a/fs/aufs/spl.h b/fs/aufs/spl.h
new file mode 100644
index 0000000..2d53e87
--- /dev/null
+++ b/fs/aufs/spl.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * simple list protected by a spinlock
+ */
+
+#ifndef __AUFS_SPL_H__
+#define __AUFS_SPL_H__
+
+#ifdef __KERNEL__
+
+struct au_splhead {
+	spinlock_t		spin;
+	struct list_head	head;
+};
+
+static inline void au_spl_init(struct au_splhead *spl)
+{
+	spin_lock_init(&spl->spin);
+	INIT_LIST_HEAD(&spl->head);
+}
+
+static inline void au_spl_add(struct list_head *list, struct au_splhead *spl)
+{
+	spin_lock(&spl->spin);
+	list_add(list, &spl->head);
+	spin_unlock(&spl->spin);
+}
+
+static inline void au_spl_del(struct list_head *list, struct au_splhead *spl)
+{
+	spin_lock(&spl->spin);
+	list_del(list);
+	spin_unlock(&spl->spin);
+}
+
+static inline void au_spl_del_rcu(struct list_head *list,
+				  struct au_splhead *spl)
+{
+	spin_lock(&spl->spin);
+	list_del_rcu(list);
+	spin_unlock(&spl->spin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_sphlhead {
+	spinlock_t		spin;
+	struct hlist_head	head;
+};
+
+static inline void au_sphl_init(struct au_sphlhead *sphl)
+{
+	spin_lock_init(&sphl->spin);
+	INIT_HLIST_HEAD(&sphl->head);
+}
+
+static inline void au_sphl_add(struct hlist_node *hlist,
+			       struct au_sphlhead *sphl)
+{
+	spin_lock(&sphl->spin);
+	hlist_add_head(hlist, &sphl->head);
+	spin_unlock(&sphl->spin);
+}
+
+static inline void au_sphl_del(struct hlist_node *hlist,
+			       struct au_sphlhead *sphl)
+{
+	spin_lock(&sphl->spin);
+	hlist_del(hlist);
+	spin_unlock(&sphl->spin);
+}
+
+static inline void au_sphl_del_rcu(struct hlist_node *hlist,
+				   struct au_sphlhead *sphl)
+{
+	spin_lock(&sphl->spin);
+	hlist_del_rcu(hlist);
+	spin_unlock(&sphl->spin);
+}
+
+static inline unsigned long au_sphl_count(struct au_sphlhead *sphl)
+{
+	unsigned long cnt;
+	struct hlist_node *pos;
+
+	cnt = 0;
+	spin_lock(&sphl->spin);
+	hlist_for_each(pos, &sphl->head)
+		cnt++;
+	spin_unlock(&sphl->spin);
+	return cnt;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_SPL_H__ */
diff --git a/fs/aufs/super.c b/fs/aufs/super.c
new file mode 100644
index 0000000..3955f86
--- /dev/null
+++ b/fs/aufs/super.c
@@ -0,0 +1,992 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * mount and super_block operations
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include <linux/vmalloc.h>
+#include <linux/writeback.h>
+#include "aufs.h"
+
+/*
+ * super_operations
+ */
+static struct inode *aufs_alloc_inode(struct super_block *sb __maybe_unused)
+{
+	struct au_icntnr *c;
+
+	c = au_cache_alloc_icntnr();
+	if (c) {
+		au_icntnr_init(c);
+		c->vfs_inode.i_version = 1; /* sigen(sb); */
+		c->iinfo.ii_hinode = NULL;
+		return &c->vfs_inode;
+	}
+	return NULL;
+}
+
+static void aufs_destroy_inode_cb(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+
+	INIT_HLIST_HEAD(&inode->i_dentry);
+	au_cache_free_icntnr(container_of(inode, struct au_icntnr, vfs_inode));
+}
+
+static void aufs_destroy_inode(struct inode *inode)
+{
+	au_iinfo_fin(inode);
+	call_rcu(&inode->i_rcu, aufs_destroy_inode_cb);
+}
+
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino)
+{
+	struct inode *inode;
+	int err;
+
+	inode = iget_locked(sb, ino);
+	if (unlikely(!inode)) {
+		inode = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+	if (!(inode->i_state & I_NEW))
+		goto out;
+
+	err = au_xigen_new(inode);
+	if (!err)
+		err = au_iinfo_init(inode);
+	if (!err)
+		inode->i_version++;
+	else {
+		iget_failed(inode);
+		inode = ERR_PTR(err);
+	}
+
+out:
+	/* never return NULL */
+	AuDebugOn(!inode);
+	AuTraceErrPtr(inode);
+	return inode;
+}
+
+/* lock free root dinfo */
+static int au_show_brs(struct seq_file *seq, struct super_block *sb)
+{
+	int err;
+	aufs_bindex_t bindex, bend;
+	struct path path;
+	struct au_hdentry *hdp;
+	struct au_branch *br;
+	char *perm;
+
+	err = 0;
+	bend = au_sbend(sb);
+	hdp = au_di(sb->s_root)->di_hdentry;
+	for (bindex = 0; !err && bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		path.mnt = au_br_mnt(br);
+		path.dentry = hdp[bindex].hd_dentry;
+		err = au_seq_path(seq, &path);
+		if (err > 0) {
+			perm = au_optstr_br_perm(br->br_perm);
+			if (perm) {
+				err = seq_printf(seq, "=%s", perm);
+				kfree(perm);
+				if (err == -1)
+					err = -E2BIG;
+			} else
+				err = -ENOMEM;
+		}
+		if (!err && bindex != bend)
+			err = seq_putc(seq, ':');
+	}
+
+	return err;
+}
+
+static void au_show_wbr_create(struct seq_file *m, int v,
+			       struct au_sbinfo *sbinfo)
+{
+	const char *pat;
+
+	AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+	seq_puts(m, ",create=");
+	pat = au_optstr_wbr_create(v);
+	switch (v) {
+	case AuWbrCreate_TDP:
+	case AuWbrCreate_RR:
+	case AuWbrCreate_MFS:
+	case AuWbrCreate_PMFS:
+		seq_puts(m, pat);
+		break;
+	case AuWbrCreate_MFSV:
+		seq_printf(m, /*pat*/"mfs:%lu",
+			   jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+			   / MSEC_PER_SEC);
+		break;
+	case AuWbrCreate_PMFSV:
+		seq_printf(m, /*pat*/"pmfs:%lu",
+			   jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+			   / MSEC_PER_SEC);
+		break;
+	case AuWbrCreate_MFSRR:
+		seq_printf(m, /*pat*/"mfsrr:%llu",
+			   sbinfo->si_wbr_mfs.mfsrr_watermark);
+		break;
+	case AuWbrCreate_MFSRRV:
+		seq_printf(m, /*pat*/"mfsrr:%llu:%lu",
+			   sbinfo->si_wbr_mfs.mfsrr_watermark,
+			   jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+			   / MSEC_PER_SEC);
+		break;
+	}
+}
+
+static int au_show_xino(struct seq_file *seq, struct super_block *sb)
+{
+#ifdef CONFIG_SYSFS
+	return 0;
+#else
+	int err;
+	const int len = sizeof(AUFS_XINO_FNAME) - 1;
+	aufs_bindex_t bindex, brid;
+	struct qstr *name;
+	struct file *f;
+	struct dentry *d, *h_root;
+	struct au_hdentry *hdp;
+
+	AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+	err = 0;
+	f = au_sbi(sb)->si_xib;
+	if (!f)
+		goto out;
+
+	/* stop printing the default xino path on the first writable branch */
+	h_root = NULL;
+	brid = au_xino_brid(sb);
+	if (brid >= 0) {
+		bindex = au_br_index(sb, brid);
+		hdp = au_di(sb->s_root)->di_hdentry;
+		h_root = hdp[0 + bindex].hd_dentry;
+	}
+	d = f->f_dentry;
+	name = &d->d_name;
+	/* safe ->d_parent because the file is unlinked */
+	if (d->d_parent == h_root
+	    && name->len == len
+	    && !memcmp(name->name, AUFS_XINO_FNAME, len))
+		goto out;
+
+	seq_puts(seq, ",xino=");
+	err = au_xino_path(seq, f);
+
+out:
+	return err;
+#endif
+}
+
+/* seq_file will re-call me in case of too long string */
+static int aufs_show_options(struct seq_file *m, struct dentry *dentry)
+{
+	int err;
+	unsigned int mnt_flags, v;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+
+#define AuBool(name, str) do { \
+	v = au_opt_test(mnt_flags, name); \
+	if (v != au_opt_test(AuOpt_Def, name)) \
+		seq_printf(m, ",%s" #str, v ? "" : "no"); \
+} while (0)
+
+#define AuStr(name, str) do { \
+	v = mnt_flags & AuOptMask_##name; \
+	if (v != (AuOpt_Def & AuOptMask_##name)) \
+		seq_printf(m, "," #str "=%s", au_optstr_##str(v)); \
+} while (0)
+
+#define AuUInt(name, str, val) do { \
+	if (val != AUFS_##name##_DEF) \
+		seq_printf(m, "," #str "=%u", val); \
+} while (0)
+
+	/* lock free root dinfo */
+	sb = dentry->d_sb;
+	si_noflush_read_lock(sb);
+	sbinfo = au_sbi(sb);
+	seq_printf(m, ",si=%lx", sysaufs_si_id(sbinfo));
+
+	mnt_flags = au_mntflags(sb);
+	if (au_opt_test(mnt_flags, XINO)) {
+		err = au_show_xino(m, sb);
+		if (unlikely(err))
+			goto out;
+	} else
+		seq_puts(m, ",noxino");
+
+	AuBool(TRUNC_XINO, trunc_xino);
+	AuStr(UDBA, udba);
+	AuBool(SHWH, shwh);
+	AuBool(PLINK, plink);
+	AuBool(DIO, dio);
+	/* AuBool(DIRPERM1, dirperm1); */
+	/* AuBool(REFROF, refrof); */
+
+	v = sbinfo->si_wbr_create;
+	if (v != AuWbrCreate_Def)
+		au_show_wbr_create(m, v, sbinfo);
+
+	v = sbinfo->si_wbr_copyup;
+	if (v != AuWbrCopyup_Def)
+		seq_printf(m, ",cpup=%s", au_optstr_wbr_copyup(v));
+
+	v = au_opt_test(mnt_flags, ALWAYS_DIROPQ);
+	if (v != au_opt_test(AuOpt_Def, ALWAYS_DIROPQ))
+		seq_printf(m, ",diropq=%c", v ? 'a' : 'w');
+
+	AuUInt(DIRWH, dirwh, sbinfo->si_dirwh);
+
+	v = jiffies_to_msecs(sbinfo->si_rdcache) / MSEC_PER_SEC;
+	AuUInt(RDCACHE, rdcache, v);
+
+	AuUInt(RDBLK, rdblk, sbinfo->si_rdblk);
+	AuUInt(RDHASH, rdhash, sbinfo->si_rdhash);
+
+	AuBool(SUM, sum);
+	/* AuBool(SUM_W, wsum); */
+	AuBool(WARN_PERM, warn_perm);
+	AuBool(VERBOSE, verbose);
+
+out:
+	/* be sure to print "br:" last */
+	if (!sysaufs_brs) {
+		seq_puts(m, ",br:");
+		au_show_brs(m, sb);
+	}
+	si_read_unlock(sb);
+	return 0;
+
+#undef AuBool
+#undef AuStr
+#undef AuUInt
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* sum mode which returns the summation for statfs(2) */
+
+static u64 au_add_till_max(u64 a, u64 b)
+{
+	u64 old;
+
+	old = a;
+	a += b;
+	if (old <= a)
+		return a;
+	return ULLONG_MAX;
+}
+
+static u64 au_mul_till_max(u64 a, long mul)
+{
+	u64 old;
+
+	old = a;
+	a *= mul;
+	if (old <= a)
+		return a;
+	return ULLONG_MAX;
+}
+
+static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf)
+{
+	int err;
+	long bsize, factor;
+	u64 blocks, bfree, bavail, files, ffree;
+	aufs_bindex_t bend, bindex, i;
+	unsigned char shared;
+	struct path h_path;
+	struct super_block *h_sb;
+
+	err = 0;
+	bsize = LONG_MAX;
+	files = 0;
+	ffree = 0;
+	blocks = 0;
+	bfree = 0;
+	bavail = 0;
+	bend = au_sbend(sb);
+	for (bindex = 0; bindex <= bend; bindex++) {
+		h_path.mnt = au_sbr_mnt(sb, bindex);
+		h_sb = h_path.mnt->mnt_sb;
+		shared = 0;
+		for (i = 0; !shared && i < bindex; i++)
+			shared = (au_sbr_sb(sb, i) == h_sb);
+		if (shared)
+			continue;
+
+		/* sb->s_root for NFS is unreliable */
+		h_path.dentry = h_path.mnt->mnt_root;
+		err = vfs_statfs(&h_path, buf);
+		if (unlikely(err))
+			goto out;
+
+		if (bsize > buf->f_bsize) {
+			/*
+			 * we will reduce bsize, so we have to expand blocks
+			 * etc. to match them again
+			 */
+			factor = (bsize / buf->f_bsize);
+			blocks = au_mul_till_max(blocks, factor);
+			bfree = au_mul_till_max(bfree, factor);
+			bavail = au_mul_till_max(bavail, factor);
+			bsize = buf->f_bsize;
+		}
+
+		factor = (buf->f_bsize / bsize);
+		blocks = au_add_till_max(blocks,
+				au_mul_till_max(buf->f_blocks, factor));
+		bfree = au_add_till_max(bfree,
+				au_mul_till_max(buf->f_bfree, factor));
+		bavail = au_add_till_max(bavail,
+				au_mul_till_max(buf->f_bavail, factor));
+		files = au_add_till_max(files, buf->f_files);
+		ffree = au_add_till_max(ffree, buf->f_ffree);
+	}
+
+	buf->f_bsize = bsize;
+	buf->f_blocks = blocks;
+	buf->f_bfree = bfree;
+	buf->f_bavail = bavail;
+	buf->f_files = files;
+	buf->f_ffree = ffree;
+	buf->f_frsize = 0;
+
+out:
+	return err;
+}
+
+static int aufs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+	int err;
+	struct path h_path;
+	struct super_block *sb;
+
+	/* lock free root dinfo */
+	sb = dentry->d_sb;
+	si_noflush_read_lock(sb);
+	if (!au_opt_test(au_mntflags(sb), SUM)) {
+		/* sb->s_root for NFS is unreliable */
+		h_path.mnt = au_sbr_mnt(sb, 0);
+		h_path.dentry = h_path.mnt->mnt_root;
+		err = vfs_statfs(&h_path, buf);
+	} else
+		err = au_statfs_sum(sb, buf);
+	si_read_unlock(sb);
+
+	if (!err) {
+		buf->f_type = AUFS_SUPER_MAGIC;
+		buf->f_namelen = AUFS_MAX_NAMELEN;
+		memset(&buf->f_fsid, 0, sizeof(buf->f_fsid));
+	}
+	/* buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; */
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_sync_fs(struct super_block *sb, int wait)
+{
+	int err, e;
+	aufs_bindex_t bend, bindex;
+	struct au_branch *br;
+	struct super_block *h_sb;
+
+	err = 0;
+	si_noflush_read_lock(sb);
+	bend = au_sbend(sb);
+	for (bindex = 0; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (!au_br_writable(br->br_perm))
+			continue;
+
+		h_sb = au_sbr_sb(sb, bindex);
+		if (h_sb->s_op->sync_fs) {
+			e = h_sb->s_op->sync_fs(h_sb, wait);
+			if (unlikely(e && !err))
+				err = e;
+			/* go on even if an error happens */
+		}
+	}
+	si_read_unlock(sb);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* final actions when unmounting a file system */
+static void aufs_put_super(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	sbinfo = au_sbi(sb);
+	if (!sbinfo)
+		return;
+
+	dbgaufs_si_fin(sbinfo);
+	kobject_put(&sbinfo->si_kobj);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_array_free(void *array)
+{
+	if (array) {
+		if (!is_vmalloc_addr(array))
+			kfree(array);
+		else
+			vfree(array);
+	}
+}
+
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg)
+{
+	void *array;
+	unsigned long long n;
+
+	array = NULL;
+	n = 0;
+	if (!*hint)
+		goto out;
+
+	if (*hint > ULLONG_MAX / sizeof(array)) {
+		array = ERR_PTR(-EMFILE);
+		pr_err("hint %llu\n", *hint);
+		goto out;
+	}
+
+	array = kmalloc(sizeof(array) * *hint, GFP_NOFS);
+	if (unlikely(!array))
+		array = vmalloc(sizeof(array) * *hint);
+	if (unlikely(!array)) {
+		array = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	n = cb(array, *hint, arg);
+	AuDebugOn(n > *hint);
+
+out:
+	*hint = n;
+	return array;
+}
+
+static unsigned long long au_iarray_cb(void *a,
+				       unsigned long long max __maybe_unused,
+				       void *arg)
+{
+	unsigned long long n;
+	struct inode **p, *inode;
+	struct list_head *head;
+
+	n = 0;
+	p = a;
+	head = arg;
+	spin_lock(&inode_sb_list_lock);
+	list_for_each_entry(inode, head, i_sb_list) {
+		if (!is_bad_inode(inode)
+		    && au_ii(inode)->ii_bstart >= 0) {
+			spin_lock(&inode->i_lock);
+			if (atomic_read(&inode->i_count)) {
+				au_igrab(inode);
+				*p++ = inode;
+				n++;
+				AuDebugOn(n > max);
+			}
+			spin_unlock(&inode->i_lock);
+		}
+	}
+	spin_unlock(&inode_sb_list_lock);
+
+	return n;
+}
+
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max)
+{
+	*max = atomic_long_read(&au_sbi(sb)->si_ninodes);
+	return au_array_alloc(max, au_iarray_cb, &sb->s_inodes);
+}
+
+void au_iarray_free(struct inode **a, unsigned long long max)
+{
+	unsigned long long ull;
+
+	for (ull = 0; ull < max; ull++)
+		iput(a[ull]);
+	au_array_free(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * refresh dentry and inode at remount time.
+ */
+/* todo: consolidate with simple_reval_dpath() and au_reval_for_attr() */
+static int au_do_refresh(struct dentry *dentry, unsigned int dir_flags,
+		      struct dentry *parent)
+{
+	int err;
+
+	di_write_lock_child(dentry);
+	di_read_lock_parent(parent, AuLock_IR);
+	err = au_refresh_dentry(dentry, parent);
+	if (!err && dir_flags)
+		au_hn_reset(dentry->d_inode, dir_flags);
+	di_read_unlock(parent, AuLock_IR);
+	di_write_unlock(dentry);
+
+	return err;
+}
+
+static int au_do_refresh_d(struct dentry *dentry, unsigned int sigen,
+			   struct au_sbinfo *sbinfo,
+			   const unsigned int dir_flags)
+{
+	int err;
+	struct dentry *parent;
+	struct inode *inode;
+
+	err = 0;
+	parent = dget_parent(dentry);
+	if (!au_digen_test(parent, sigen) && au_digen_test(dentry, sigen)) {
+		inode = dentry->d_inode;
+		if (inode) {
+			if (!S_ISDIR(inode->i_mode))
+				err = au_do_refresh(dentry, /*dir_flags*/0,
+						 parent);
+			else {
+				err = au_do_refresh(dentry, dir_flags, parent);
+				if (unlikely(err))
+					au_fset_si(sbinfo, FAILED_REFRESH_DIR);
+			}
+		} else
+			err = au_do_refresh(dentry, /*dir_flags*/0, parent);
+		AuDbgDentry(dentry);
+	}
+	dput(parent);
+
+	AuTraceErr(err);
+	return err;
+}
+
+static int au_refresh_d(struct super_block *sb)
+{
+	int err, i, j, ndentry, e;
+	unsigned int sigen;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry **dentries, *d;
+	struct au_sbinfo *sbinfo;
+	struct dentry *root = sb->s_root;
+	const unsigned int dir_flags = au_hi_flags(root->d_inode, /*isdir*/1);
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_dcsub_pages(&dpages, root, NULL, NULL);
+	if (unlikely(err))
+		goto out_dpages;
+
+	sigen = au_sigen(sb);
+	sbinfo = au_sbi(sb);
+	for (i = 0; i < dpages.ndpage; i++) {
+		dpage = dpages.dpages + i;
+		dentries = dpage->dentries;
+		ndentry = dpage->ndentry;
+		for (j = 0; j < ndentry; j++) {
+			d = dentries[j];
+			e = au_do_refresh_d(d, sigen, sbinfo, dir_flags);
+			if (unlikely(e && !err))
+				err = e;
+			/* go on even err */
+		}
+	}
+
+out_dpages:
+	au_dpages_free(&dpages);
+out:
+	return err;
+}
+
+static int au_refresh_i(struct super_block *sb)
+{
+	int err, e;
+	unsigned int sigen;
+	unsigned long long max, ull;
+	struct inode *inode, **array;
+
+	array = au_iarray_alloc(sb, &max);
+	err = PTR_ERR(array);
+	if (IS_ERR(array))
+		goto out;
+
+	err = 0;
+	sigen = au_sigen(sb);
+	for (ull = 0; ull < max; ull++) {
+		inode = array[ull];
+		if (au_iigen(inode, NULL) != sigen) {
+			ii_write_lock_child(inode);
+			e = au_refresh_hinode_self(inode);
+			ii_write_unlock(inode);
+			if (unlikely(e)) {
+				pr_err("error %d, i%lu\n", e, inode->i_ino);
+				if (!err)
+					err = e;
+				/* go on even if err */
+			}
+		}
+	}
+
+	au_iarray_free(array, max);
+
+out:
+	return err;
+}
+
+static void au_remount_refresh(struct super_block *sb)
+{
+	int err, e;
+	unsigned int udba;
+	aufs_bindex_t bindex, bend;
+	struct dentry *root;
+	struct inode *inode;
+	struct au_branch *br;
+
+	au_sigen_inc(sb);
+	au_fclr_si(au_sbi(sb), FAILED_REFRESH_DIR);
+
+	root = sb->s_root;
+	DiMustNoWaiters(root);
+	inode = root->d_inode;
+	IiMustNoWaiters(inode);
+
+	udba = au_opt_udba(sb);
+	bend = au_sbend(sb);
+	for (bindex = 0; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		err = au_hnotify_reset_br(udba, br, br->br_perm);
+		if (unlikely(err))
+			AuIOErr("hnotify failed on br %d, %d, ignored\n",
+				bindex, err);
+		/* go on even if err */
+	}
+	au_hn_reset(inode, au_hi_flags(inode, /*isdir*/1));
+
+	di_write_unlock(root);
+	err = au_refresh_d(sb);
+	e = au_refresh_i(sb);
+	if (unlikely(e && !err))
+		err = e;
+	/* aufs_write_lock() calls ..._child() */
+	di_write_lock_child(root);
+
+	au_cpup_attr_all(inode, /*force*/1);
+
+	if (unlikely(err))
+		AuIOErr("refresh failed, ignored, %d\n", err);
+}
+
+/* stop extra interpretation of errno in mount(8), and strange error messages */
+static int cvt_err(int err)
+{
+	AuTraceErr(err);
+
+	switch (err) {
+	case -ENOENT:
+	case -ENOTDIR:
+	case -EEXIST:
+	case -EIO:
+		err = -EINVAL;
+	}
+	return err;
+}
+
+static int aufs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+	int err, do_dx;
+	unsigned int mntflags;
+	struct au_opts opts;
+	struct dentry *root;
+	struct inode *inode;
+	struct au_sbinfo *sbinfo;
+
+	err = 0;
+	root = sb->s_root;
+	if (!data || !*data) {
+		err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+		if (!err) {
+			di_write_lock_child(root);
+			err = au_opts_verify(sb, *flags, /*pending*/0);
+			aufs_write_unlock(root);
+		}
+		goto out;
+	}
+
+	err = -ENOMEM;
+	memset(&opts, 0, sizeof(opts));
+	opts.opt = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!opts.opt))
+		goto out;
+	opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+	opts.flags = AuOpts_REMOUNT;
+	opts.sb_flags = *flags;
+
+	/* parse it before aufs lock */
+	err = au_opts_parse(sb, data, &opts);
+	if (unlikely(err))
+		goto out_opts;
+
+	sbinfo = au_sbi(sb);
+	inode = root->d_inode;
+	mutex_lock(&inode->i_mutex);
+	err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+	if (unlikely(err))
+		goto out_mtx;
+	di_write_lock_child(root);
+
+	/* au_opts_remount() may return an error */
+	err = au_opts_remount(sb, &opts);
+	au_opts_free(&opts);
+
+	if (au_ftest_opts(opts.flags, REFRESH))
+		au_remount_refresh(sb);
+
+	if (au_ftest_opts(opts.flags, REFRESH_DYAOP)) {
+		mntflags = au_mntflags(sb);
+		do_dx = !!au_opt_test(mntflags, DIO);
+		au_dy_arefresh(do_dx);
+	}
+
+	aufs_write_unlock(root);
+
+out_mtx:
+	mutex_unlock(&inode->i_mutex);
+out_opts:
+	free_page((unsigned long)opts.opt);
+out:
+	err = cvt_err(err);
+	AuTraceErr(err);
+	return err;
+}
+
+static const struct super_operations aufs_sop = {
+	.alloc_inode	= aufs_alloc_inode,
+	.destroy_inode	= aufs_destroy_inode,
+	/* always deleting, no clearing */
+	.drop_inode	= generic_delete_inode,
+	.show_options	= aufs_show_options,
+	.statfs		= aufs_statfs,
+	.put_super	= aufs_put_super,
+	.sync_fs	= aufs_sync_fs,
+	.remount_fs	= aufs_remount_fs
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int alloc_root(struct super_block *sb)
+{
+	int err;
+	struct inode *inode;
+	struct dentry *root;
+
+	err = -ENOMEM;
+	inode = au_iget_locked(sb, AUFS_ROOT_INO);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out;
+
+	inode->i_op = &aufs_dir_iop;
+	inode->i_fop = &aufs_dir_fop;
+	inode->i_mode = S_IFDIR;
+	set_nlink(inode, 2);
+	unlock_new_inode(inode);
+
+	root = d_make_root(inode);
+	if (unlikely(!root))
+		goto out;
+	err = PTR_ERR(root);
+	if (IS_ERR(root))
+		goto out;
+
+	err = au_di_init(root);
+	if (!err) {
+		sb->s_root = root;
+		return 0; /* success */
+	}
+	dput(root);
+
+out:
+	return err;
+}
+
+static int aufs_fill_super(struct super_block *sb, void *raw_data,
+			   int silent __maybe_unused)
+{
+	int err;
+	struct au_opts opts;
+	struct dentry *root;
+	struct inode *inode;
+	char *arg = raw_data;
+
+	if (unlikely(!arg || !*arg)) {
+		err = -EINVAL;
+		pr_err("no arg\n");
+		goto out;
+	}
+
+	err = -ENOMEM;
+	memset(&opts, 0, sizeof(opts));
+	opts.opt = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!opts.opt))
+		goto out;
+	opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+	opts.sb_flags = sb->s_flags;
+
+	err = au_si_alloc(sb);
+	if (unlikely(err))
+		goto out_opts;
+
+	/* all timestamps always follow the ones on the branch */
+	sb->s_flags |= MS_NOATIME | MS_NODIRATIME;
+	sb->s_op = &aufs_sop;
+	sb->s_d_op = &aufs_dop;
+	sb->s_magic = AUFS_SUPER_MAGIC;
+	sb->s_maxbytes = 0;
+	au_export_init(sb);
+
+	err = alloc_root(sb);
+	if (unlikely(err)) {
+		si_write_unlock(sb);
+		goto out_info;
+	}
+	root = sb->s_root;
+	inode = root->d_inode;
+
+	/*
+	 * actually we can parse options regardless aufs lock here.
+	 * but at remount time, parsing must be done before aufs lock.
+	 * so we follow the same rule.
+	 */
+	ii_write_lock_parent(inode);
+	aufs_write_unlock(root);
+	err = au_opts_parse(sb, arg, &opts);
+	if (unlikely(err))
+		goto out_root;
+
+	/* lock vfs_inode first, then aufs. */
+	mutex_lock(&inode->i_mutex);
+	aufs_write_lock(root);
+	err = au_opts_mount(sb, &opts);
+	au_opts_free(&opts);
+	aufs_write_unlock(root);
+	mutex_unlock(&inode->i_mutex);
+	if (!err)
+		goto out_opts; /* success */
+
+out_root:
+	dput(root);
+	sb->s_root = NULL;
+out_info:
+	dbgaufs_si_fin(au_sbi(sb));
+	kobject_put(&au_sbi(sb)->si_kobj);
+	sb->s_fs_info = NULL;
+out_opts:
+	free_page((unsigned long)opts.opt);
+out:
+	AuTraceErr(err);
+	err = cvt_err(err);
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_mount(struct file_system_type *fs_type, int flags,
+				 const char *dev_name __maybe_unused,
+				 void *raw_data)
+{
+	struct dentry *root;
+	struct super_block *sb;
+
+	/* all timestamps always follow the ones on the branch */
+	/* mnt->mnt_flags |= MNT_NOATIME | MNT_NODIRATIME; */
+	root = mount_nodev(fs_type, flags, raw_data, aufs_fill_super);
+	if (IS_ERR(root))
+		goto out;
+
+	sb = root->d_sb;
+	si_write_lock(sb, !AuLock_FLUSH);
+	sysaufs_brs_add(sb, 0);
+	si_write_unlock(sb);
+	au_sbilist_add(sb);
+
+out:
+	return root;
+}
+
+static void aufs_kill_sb(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	sbinfo = au_sbi(sb);
+	if (sbinfo) {
+		au_sbilist_del(sb);
+		aufs_write_lock(sb->s_root);
+		if (sbinfo->si_wbr_create_ops->fin)
+			sbinfo->si_wbr_create_ops->fin(sb);
+		if (au_opt_test(sbinfo->si_mntflags, UDBA_HNOTIFY)) {
+			au_opt_set_udba(sbinfo->si_mntflags, UDBA_NONE);
+			au_remount_refresh(sb);
+		}
+		if (au_opt_test(sbinfo->si_mntflags, PLINK))
+			au_plink_put(sb, /*verbose*/1);
+		au_xino_clr(sb);
+		sbinfo->si_sb = NULL;
+		aufs_write_unlock(sb->s_root);
+		au_nwt_flush(&sbinfo->si_nowait);
+	}
+	generic_shutdown_super(sb);
+}
+
+struct file_system_type aufs_fs_type = {
+	.name		= AUFS_FSTYPE,
+	/* a race between rename and others */
+	.fs_flags	= FS_RENAME_DOES_D_MOVE,
+	.mount		= aufs_mount,
+	.kill_sb	= aufs_kill_sb,
+	/* no need to __module_get() and module_put(). */
+	.owner		= THIS_MODULE,
+};
diff --git a/fs/aufs/super.h b/fs/aufs/super.h
new file mode 100644
index 0000000..5d9cfb3e
--- /dev/null
+++ b/fs/aufs/super.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * super_block operations
+ */
+
+#ifndef __AUFS_SUPER_H__
+#define __AUFS_SUPER_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include "rwsem.h"
+#include "spl.h"
+#include "wkq.h"
+
+typedef ssize_t (*au_readf_t)(struct file *, char __user *, size_t, loff_t *);
+typedef ssize_t (*au_writef_t)(struct file *, const char __user *, size_t,
+			       loff_t *);
+
+/* policies to select one among multiple writable branches */
+struct au_wbr_copyup_operations {
+	int (*copyup)(struct dentry *dentry);
+};
+
+struct au_wbr_create_operations {
+	int (*create)(struct dentry *dentry, int isdir);
+	int (*init)(struct super_block *sb);
+	int (*fin)(struct super_block *sb);
+};
+
+struct au_wbr_mfs {
+	struct mutex	mfs_lock; /* protect this structure */
+	unsigned long	mfs_jiffy;
+	unsigned long	mfs_expire;
+	aufs_bindex_t	mfs_bindex;
+
+	unsigned long long	mfsrr_bytes;
+	unsigned long long	mfsrr_watermark;
+};
+
+struct pseudo_link {
+	union {
+		struct hlist_node hlist;
+		struct rcu_head rcu;
+	};
+	struct inode *inode;
+};
+
+#define AuPlink_NHASH 100
+static inline int au_plink_hash(ino_t ino)
+{
+	return ino % AuPlink_NHASH;
+}
+
+struct au_branch;
+struct au_sbinfo {
+	/* nowait tasks in the system-wide workqueue */
+	struct au_nowait_tasks	si_nowait;
+
+	/*
+	 * tried sb->s_umount, but failed due to the dependecy between i_mutex.
+	 * rwsem for au_sbinfo is necessary.
+	 */
+	struct au_rwsem		si_rwsem;
+
+	/* prevent recursive locking in deleting inode */
+	struct {
+		unsigned long		*bitmap;
+		spinlock_t		tree_lock;
+		struct radix_tree_root	tree;
+	} au_si_pid;
+
+	/*
+	 * dirty approach to protect sb->sb_inodes and ->s_files from remount.
+	 */
+	atomic_long_t		si_ninodes, si_nfiles;
+
+	/* branch management */
+	unsigned int		si_generation;
+
+	/* see above flags */
+	unsigned char		au_si_status;
+
+	aufs_bindex_t		si_bend;
+
+	/* dirty trick to keep br_id plus */
+	unsigned int		si_last_br_id :
+				sizeof(aufs_bindex_t) * BITS_PER_BYTE - 1;
+	struct au_branch	**si_branch;
+
+	/* policy to select a writable branch */
+	unsigned char		si_wbr_copyup;
+	unsigned char		si_wbr_create;
+	struct au_wbr_copyup_operations *si_wbr_copyup_ops;
+	struct au_wbr_create_operations *si_wbr_create_ops;
+
+	/* round robin */
+	atomic_t		si_wbr_rr_next;
+
+	/* most free space */
+	struct au_wbr_mfs	si_wbr_mfs;
+
+	/* mount flags */
+	/* include/asm-ia64/siginfo.h defines a macro named si_flags */
+	unsigned int		si_mntflags;
+
+	/* external inode number (bitmap and translation table) */
+	au_readf_t		si_xread;
+	au_writef_t		si_xwrite;
+	struct file		*si_xib;
+	struct mutex		si_xib_mtx; /* protect xib members */
+	unsigned long		*si_xib_buf;
+	unsigned long		si_xib_last_pindex;
+	int			si_xib_next_bit;
+	aufs_bindex_t		si_xino_brid;
+	/* reserved for future use */
+	/* unsigned long long	si_xib_limit; */	/* Max xib file size */
+
+#ifdef CONFIG_AUFS_EXPORT
+	/* i_generation */
+	struct file		*si_xigen;
+	atomic_t		si_xigen_next;
+#endif
+
+	/* vdir parameters */
+	unsigned long		si_rdcache;	/* max cache time in jiffies */
+	unsigned int		si_rdblk;	/* deblk size */
+	unsigned int		si_rdhash;	/* hash size */
+
+	/*
+	 * If the number of whiteouts are larger than si_dirwh, leave all of
+	 * them after au_whtmp_ren to reduce the cost of rmdir(2).
+	 * future fsck.aufs or kernel thread will remove them later.
+	 * Otherwise, remove all whiteouts and the dir in rmdir(2).
+	 */
+	unsigned int		si_dirwh;
+
+	/*
+	 * rename(2) a directory with all children.
+	 */
+	/* reserved for future use */
+	/* int			si_rendir; */
+
+	/* pseudo_link list */
+	struct au_sphlhead	si_plink[AuPlink_NHASH];
+	wait_queue_head_t	si_plink_wq;
+	spinlock_t		si_plink_maint_lock;
+	pid_t			si_plink_maint_pid;
+
+	/*
+	 * sysfs and lifetime management.
+	 * this is not a small structure and it may be a waste of memory in case
+	 * of sysfs is disabled, particulary when many aufs-es are mounted.
+	 * but using sysfs is majority.
+	 */
+	struct kobject		si_kobj;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		 *si_dbgaufs;
+	struct dentry		 *si_dbgaufs_plink;
+	struct dentry		 *si_dbgaufs_xib;
+#ifdef CONFIG_AUFS_EXPORT
+	struct dentry		 *si_dbgaufs_xigen;
+#endif
+#endif
+
+#ifdef CONFIG_AUFS_SBILIST
+	struct list_head	si_list;
+#endif
+
+	/* dirty, necessary for unmounting, sysfs and sysrq */
+	struct super_block	*si_sb;
+};
+
+/* sbinfo status flags */
+/*
+ * set true when refresh_dirs() failed at remount time.
+ * then try refreshing dirs at access time again.
+ * if it is false, refreshing dirs at access time is unnecesary
+ */
+#define AuSi_FAILED_REFRESH_DIR	1
+static inline unsigned char au_do_ftest_si(struct au_sbinfo *sbi,
+					   unsigned int flag)
+{
+	AuRwMustAnyLock(&sbi->si_rwsem);
+	return sbi->au_si_status & flag;
+}
+#define au_ftest_si(sbinfo, name)	au_do_ftest_si(sbinfo, AuSi_##name)
+#define au_fset_si(sbinfo, name) do { \
+	AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+	(sbinfo)->au_si_status |= AuSi_##name; \
+} while (0)
+#define au_fclr_si(sbinfo, name) do { \
+	AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+	(sbinfo)->au_si_status &= ~AuSi_##name; \
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* policy to select one among writable branches */
+#define AuWbrCopyup(sbinfo, ...) \
+	((sbinfo)->si_wbr_copyup_ops->copyup(__VA_ARGS__))
+#define AuWbrCreate(sbinfo, ...) \
+	((sbinfo)->si_wbr_create_ops->create(__VA_ARGS__))
+
+/* flags for si_read_lock()/aufs_read_lock()/di_read_lock() */
+#define AuLock_DW		1		/* write-lock dentry */
+#define AuLock_IR		(1 << 1)	/* read-lock inode */
+#define AuLock_IW		(1 << 2)	/* write-lock inode */
+#define AuLock_FLUSH		(1 << 3)	/* wait for 'nowait' tasks */
+#define AuLock_DIR		(1 << 4)	/* target is a dir */
+#define AuLock_NOPLM		(1 << 5)	/* return err in plm mode */
+#define AuLock_NOPLMW		(1 << 6)	/* wait for plm mode ends */
+#define AuLock_GEN		(1 << 7)	/* test digen/iigen */
+#define au_ftest_lock(flags, name)	((flags) & AuLock_##name)
+#define au_fset_lock(flags, name) \
+	do { (flags) |= AuLock_##name; } while (0)
+#define au_fclr_lock(flags, name) \
+	do { (flags) &= ~AuLock_##name; } while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* super.c */
+extern struct file_system_type aufs_fs_type;
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino);
+typedef unsigned long long (*au_arraycb_t)(void *array, unsigned long long max,
+					   void *arg);
+void au_array_free(void *array);
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg);
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max);
+void au_iarray_free(struct inode **a, unsigned long long max);
+
+/* sbinfo.c */
+void au_si_free(struct kobject *kobj);
+int au_si_alloc(struct super_block *sb);
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr);
+
+unsigned int au_sigen_inc(struct super_block *sb);
+aufs_bindex_t au_new_br_id(struct super_block *sb);
+
+int si_read_lock(struct super_block *sb, int flags);
+int si_write_lock(struct super_block *sb, int flags);
+int aufs_read_lock(struct dentry *dentry, int flags);
+void aufs_read_unlock(struct dentry *dentry, int flags);
+void aufs_write_lock(struct dentry *dentry);
+void aufs_write_unlock(struct dentry *dentry);
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags);
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+int si_pid_test_slow(struct super_block *sb);
+void si_pid_set_slow(struct super_block *sb);
+void si_pid_clr_slow(struct super_block *sb);
+
+/* wbr_policy.c */
+extern struct au_wbr_copyup_operations au_wbr_copyup_ops[];
+extern struct au_wbr_create_operations au_wbr_create_ops[];
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex);
+
+/* mvdown.c */
+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *arg);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_sbinfo *au_sbi(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+int au_test_nfsd(void);
+void au_export_init(struct super_block *sb);
+void au_xigen_inc(struct inode *inode);
+int au_xigen_new(struct inode *inode);
+int au_xigen_set(struct super_block *sb, struct file *base);
+void au_xigen_clr(struct super_block *sb);
+
+static inline int au_busy_or_stale(void)
+{
+	if (!au_test_nfsd())
+		return -EBUSY;
+	return -ESTALE;
+}
+#else
+AuStubInt0(au_test_nfsd, void)
+AuStubVoid(au_export_init, struct super_block *sb)
+AuStubVoid(au_xigen_inc, struct inode *inode)
+AuStubInt0(au_xigen_new, struct inode *inode)
+AuStubInt0(au_xigen_set, struct super_block *sb, struct file *base)
+AuStubVoid(au_xigen_clr, struct super_block *sb)
+static inline int au_busy_or_stale(void)
+{
+	return -EBUSY;
+}
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_SBILIST
+/* module.c */
+extern struct au_splhead au_sbilist;
+
+static inline void au_sbilist_init(void)
+{
+	au_spl_init(&au_sbilist);
+}
+
+static inline void au_sbilist_add(struct super_block *sb)
+{
+	au_spl_add(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+static inline void au_sbilist_del(struct super_block *sb)
+{
+	au_spl_del(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+static inline void au_sbilist_lock(void)
+{
+	spin_lock(&au_sbilist.spin);
+}
+
+static inline void au_sbilist_unlock(void)
+{
+	spin_unlock(&au_sbilist.spin);
+}
+#define AuGFP_SBILIST	GFP_ATOMIC
+#else
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST	GFP_NOFS
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+#else
+AuStubVoid(au_sbilist_init, void)
+AuStubVoid(au_sbilist_add, struct super_block*)
+AuStubVoid(au_sbilist_del, struct super_block*)
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST	GFP_NOFS
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline void dbgaufs_si_null(struct au_sbinfo *sbinfo)
+{
+	/*
+	 * This function is a dynamic '__init' fucntion actually,
+	 * so the tiny check for si_rwsem is unnecessary.
+	 */
+	/* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+#ifdef CONFIG_DEBUG_FS
+	sbinfo->si_dbgaufs = NULL;
+	sbinfo->si_dbgaufs_plink = NULL;
+	sbinfo->si_dbgaufs_xib = NULL;
+#ifdef CONFIG_AUFS_EXPORT
+	sbinfo->si_dbgaufs_xigen = NULL;
+#endif
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline pid_t si_pid_bit(void)
+{
+	/* the origin of pid is 1, but the bitmap's is 0 */
+	return current->pid - 1;
+}
+
+static inline int si_pid_test(struct super_block *sb)
+{
+	pid_t bit = si_pid_bit();
+	if (bit < PID_MAX_DEFAULT)
+		return test_bit(bit, au_sbi(sb)->au_si_pid.bitmap);
+	else
+		return si_pid_test_slow(sb);
+}
+
+static inline void si_pid_set(struct super_block *sb)
+{
+	pid_t bit = si_pid_bit();
+	if (bit < PID_MAX_DEFAULT) {
+		AuDebugOn(test_bit(bit, au_sbi(sb)->au_si_pid.bitmap));
+		set_bit(bit, au_sbi(sb)->au_si_pid.bitmap);
+		/* smp_mb(); */
+	} else
+		si_pid_set_slow(sb);
+}
+
+static inline void si_pid_clr(struct super_block *sb)
+{
+	pid_t bit = si_pid_bit();
+	if (bit < PID_MAX_DEFAULT) {
+		AuDebugOn(!test_bit(bit, au_sbi(sb)->au_si_pid.bitmap));
+		clear_bit(bit, au_sbi(sb)->au_si_pid.bitmap);
+		/* smp_mb(); */
+	} else
+		si_pid_clr_slow(sb);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* lock superblock. mainly for entry point functions */
+/*
+ * __si_read_lock, __si_write_lock,
+ * __si_read_unlock, __si_write_unlock, __si_downgrade_lock
+ */
+AuSimpleRwsemFuncs(__si, struct super_block *sb, &au_sbi(sb)->si_rwsem);
+
+#define SiMustNoWaiters(sb)	AuRwMustNoWaiters(&au_sbi(sb)->si_rwsem)
+#define SiMustAnyLock(sb)	AuRwMustAnyLock(&au_sbi(sb)->si_rwsem)
+#define SiMustWriteLock(sb)	AuRwMustWriteLock(&au_sbi(sb)->si_rwsem)
+
+static inline void si_noflush_read_lock(struct super_block *sb)
+{
+	__si_read_lock(sb);
+	si_pid_set(sb);
+}
+
+static inline int si_noflush_read_trylock(struct super_block *sb)
+{
+	int locked = __si_read_trylock(sb);
+	if (locked)
+		si_pid_set(sb);
+	return locked;
+}
+
+static inline void si_noflush_write_lock(struct super_block *sb)
+{
+	__si_write_lock(sb);
+	si_pid_set(sb);
+}
+
+static inline int si_noflush_write_trylock(struct super_block *sb)
+{
+	int locked = __si_write_trylock(sb);
+	if (locked)
+		si_pid_set(sb);
+	return locked;
+}
+
+#if 0 /* unused */
+static inline int si_read_trylock(struct super_block *sb, int flags)
+{
+	if (au_ftest_lock(flags, FLUSH))
+		au_nwt_flush(&au_sbi(sb)->si_nowait);
+	return si_noflush_read_trylock(sb);
+}
+#endif
+
+static inline void si_read_unlock(struct super_block *sb)
+{
+	si_pid_clr(sb);
+	__si_read_unlock(sb);
+}
+
+#if 0 /* unused */
+static inline int si_write_trylock(struct super_block *sb, int flags)
+{
+	if (au_ftest_lock(flags, FLUSH))
+		au_nwt_flush(&au_sbi(sb)->si_nowait);
+	return si_noflush_write_trylock(sb);
+}
+#endif
+
+static inline void si_write_unlock(struct super_block *sb)
+{
+	si_pid_clr(sb);
+	__si_write_unlock(sb);
+}
+
+#if 0 /* unused */
+static inline void si_downgrade_lock(struct super_block *sb)
+{
+	__si_downgrade_lock(sb);
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline aufs_bindex_t au_sbend(struct super_block *sb)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_bend;
+}
+
+static inline unsigned int au_mntflags(struct super_block *sb)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_mntflags;
+}
+
+static inline unsigned int au_sigen(struct super_block *sb)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_generation;
+}
+
+static inline void au_ninodes_inc(struct super_block *sb)
+{
+	atomic_long_inc(&au_sbi(sb)->si_ninodes);
+}
+
+static inline void au_ninodes_dec(struct super_block *sb)
+{
+	AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_ninodes));
+	atomic_long_dec(&au_sbi(sb)->si_ninodes);
+}
+
+static inline void au_nfiles_inc(struct super_block *sb)
+{
+	atomic_long_inc(&au_sbi(sb)->si_nfiles);
+}
+
+static inline void au_nfiles_dec(struct super_block *sb)
+{
+	AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_nfiles));
+	atomic_long_dec(&au_sbi(sb)->si_nfiles);
+}
+
+static inline struct au_branch *au_sbr(struct super_block *sb,
+				       aufs_bindex_t bindex)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_branch[0 + bindex];
+}
+
+static inline void au_xino_brid_set(struct super_block *sb, aufs_bindex_t brid)
+{
+	SiMustWriteLock(sb);
+	au_sbi(sb)->si_xino_brid = brid;
+}
+
+static inline aufs_bindex_t au_xino_brid(struct super_block *sb)
+{
+	SiMustAnyLock(sb);
+	return au_sbi(sb)->si_xino_brid;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_SUPER_H__ */
diff --git a/fs/aufs/sysaufs.c b/fs/aufs/sysaufs.c
new file mode 100644
index 0000000..f68e844
--- /dev/null
+++ b/fs/aufs/sysaufs.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sysfs interface and lifetime management
+ * they are necessary regardless sysfs is disabled.
+ */
+
+#include <linux/random.h>
+#include "aufs.h"
+
+unsigned long sysaufs_si_mask;
+struct kset *sysaufs_kset;
+
+#define AuSiAttr(_name) { \
+	.attr   = { .name = __stringify(_name), .mode = 0444 },	\
+	.show   = sysaufs_si_##_name,				\
+}
+
+static struct sysaufs_si_attr sysaufs_si_attr_xi_path = AuSiAttr(xi_path);
+struct attribute *sysaufs_si_attrs[] = {
+	&sysaufs_si_attr_xi_path.attr,
+	NULL,
+};
+
+static const struct sysfs_ops au_sbi_ops = {
+	.show   = sysaufs_si_show
+};
+
+static struct kobj_type au_sbi_ktype = {
+	.release	= au_si_free,
+	.sysfs_ops	= &au_sbi_ops,
+	.default_attrs	= sysaufs_si_attrs
+};
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_init(struct au_sbinfo *sbinfo)
+{
+	int err;
+
+	sbinfo->si_kobj.kset = sysaufs_kset;
+	/* cf. sysaufs_name() */
+	err = kobject_init_and_add
+		(&sbinfo->si_kobj, &au_sbi_ktype, /*&sysaufs_kset->kobj*/NULL,
+		 SysaufsSiNamePrefix "%lx", sysaufs_si_id(sbinfo));
+
+	dbgaufs_si_null(sbinfo);
+	if (!err) {
+		err = dbgaufs_si_init(sbinfo);
+		if (unlikely(err))
+			kobject_put(&sbinfo->si_kobj);
+	}
+	return err;
+}
+
+void sysaufs_fin(void)
+{
+	dbgaufs_fin();
+	sysfs_remove_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+	kset_unregister(sysaufs_kset);
+}
+
+int __init sysaufs_init(void)
+{
+	int err;
+
+	do {
+		get_random_bytes(&sysaufs_si_mask, sizeof(sysaufs_si_mask));
+	} while (!sysaufs_si_mask);
+
+	err = -EINVAL;
+	sysaufs_kset = kset_create_and_add(AUFS_NAME, NULL, fs_kobj);
+	if (unlikely(!sysaufs_kset))
+		goto out;
+	err = PTR_ERR(sysaufs_kset);
+	if (IS_ERR(sysaufs_kset))
+		goto out;
+	err = sysfs_create_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+	if (unlikely(err)) {
+		kset_unregister(sysaufs_kset);
+		goto out;
+	}
+
+	err = dbgaufs_init();
+	if (unlikely(err))
+		sysaufs_fin();
+out:
+	return err;
+}
diff --git a/fs/aufs/sysaufs.h b/fs/aufs/sysaufs.h
new file mode 100644
index 0000000..2fc17d9
--- /dev/null
+++ b/fs/aufs/sysaufs.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sysfs interface and mount lifetime management
+ */
+
+#ifndef __SYSAUFS_H__
+#define __SYSAUFS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/sysfs.h>
+#include "module.h"
+
+struct super_block;
+struct au_sbinfo;
+
+struct sysaufs_si_attr {
+	struct attribute attr;
+	int (*show)(struct seq_file *seq, struct super_block *sb);
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* sysaufs.c */
+extern unsigned long sysaufs_si_mask;
+extern struct kset *sysaufs_kset;
+extern struct attribute *sysaufs_si_attrs[];
+int sysaufs_si_init(struct au_sbinfo *sbinfo);
+int __init sysaufs_init(void);
+void sysaufs_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+/* some people doesn't like to show a pointer in kernel */
+static inline unsigned long sysaufs_si_id(struct au_sbinfo *sbinfo)
+{
+	return sysaufs_si_mask ^ (unsigned long)sbinfo;
+}
+
+#define SysaufsSiNamePrefix	"si_"
+#define SysaufsSiNameLen	(sizeof(SysaufsSiNamePrefix) + 16)
+static inline void sysaufs_name(struct au_sbinfo *sbinfo, char *name)
+{
+	snprintf(name, SysaufsSiNameLen, SysaufsSiNamePrefix "%lx",
+		 sysaufs_si_id(sbinfo));
+}
+
+struct au_branch;
+#ifdef CONFIG_SYSFS
+/* sysfs.c */
+extern struct attribute_group *sysaufs_attr_group;
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb);
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+			 char *buf);
+
+void sysaufs_br_init(struct au_branch *br);
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+
+#define sysaufs_brs_init()	do {} while (0)
+
+#else
+#define sysaufs_attr_group	NULL
+
+AuStubInt0(sysaufs_si_xi_path, struct seq_file *seq, struct super_block *sb)
+
+static inline
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+			 char *buf)
+{
+	return 0;
+}
+
+AuStubVoid(sysaufs_br_init, struct au_branch *br)
+AuStubVoid(sysaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(sysaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+
+static inline void sysaufs_brs_init(void)
+{
+	sysaufs_brs = 0;
+}
+
+#endif /* CONFIG_SYSFS */
+
+#endif /* __KERNEL__ */
+#endif /* __SYSAUFS_H__ */
diff --git a/fs/aufs/sysfs.c b/fs/aufs/sysfs.c
new file mode 100644
index 0000000..5282bd3
--- /dev/null
+++ b/fs/aufs/sysfs.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sysfs interface
+ */
+
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+#ifdef CONFIG_AUFS_FS_MODULE
+/* this entry violates the "one line per file" policy of sysfs */
+static ssize_t config_show(struct kobject *kobj, struct kobj_attribute *attr,
+			   char *buf)
+{
+	ssize_t err;
+	static char *conf =
+/* this file is generated at compiling */
+#include "conf.str"
+		;
+
+	err = snprintf(buf, PAGE_SIZE, conf);
+	if (unlikely(err >= PAGE_SIZE))
+		err = -EFBIG;
+	return err;
+}
+
+static struct kobj_attribute au_config_attr = __ATTR_RO(config);
+#endif
+
+static struct attribute *au_attr[] = {
+#ifdef CONFIG_AUFS_FS_MODULE
+	&au_config_attr.attr,
+#endif
+	NULL,	/* need to NULL terminate the list of attributes */
+};
+
+static struct attribute_group sysaufs_attr_group_body = {
+	.attrs = au_attr
+};
+
+struct attribute_group *sysaufs_attr_group = &sysaufs_attr_group_body;
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb)
+{
+	int err;
+
+	SiMustAnyLock(sb);
+
+	err = 0;
+	if (au_opt_test(au_mntflags(sb), XINO)) {
+		err = au_xino_path(seq, au_sbi(sb)->si_xib);
+		seq_putc(seq, '\n');
+	}
+	return err;
+}
+
+/*
+ * the lifetime of branch is independent from the entry under sysfs.
+ * sysfs handles the lifetime of the entry, and never call ->show() after it is
+ * unlinked.
+ */
+static int sysaufs_si_br(struct seq_file *seq, struct super_block *sb,
+			 aufs_bindex_t bindex)
+{
+	int err;
+	struct path path;
+	struct dentry *root;
+	struct au_branch *br;
+	char *perm;
+
+	AuDbg("b%d\n", bindex);
+
+	err = 0;
+	root = sb->s_root;
+	di_read_lock_parent(root, !AuLock_IR);
+	br = au_sbr(sb, bindex);
+	path.mnt = au_br_mnt(br);
+	path.dentry = au_h_dptr(root, bindex);
+	au_seq_path(seq, &path);
+	di_read_unlock(root, !AuLock_IR);
+	perm = au_optstr_br_perm(br->br_perm);
+	if (perm) {
+		err = seq_printf(seq, "=%s\n", perm);
+		kfree(perm);
+		if (err == -1)
+			err = -E2BIG;
+	} else
+		err = -ENOMEM;
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct seq_file *au_seq(char *p, ssize_t len)
+{
+	struct seq_file *seq;
+
+	seq = kzalloc(sizeof(*seq), GFP_NOFS);
+	if (seq) {
+		/* mutex_init(&seq.lock); */
+		seq->buf = p;
+		seq->size = len;
+		return seq; /* success */
+	}
+
+	seq = ERR_PTR(-ENOMEM);
+	return seq;
+}
+
+#define SysaufsBr_PREFIX "br"
+
+/* todo: file size may exceed PAGE_SIZE */
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+			char *buf)
+{
+	ssize_t err;
+	long l;
+	aufs_bindex_t bend;
+	struct au_sbinfo *sbinfo;
+	struct super_block *sb;
+	struct seq_file *seq;
+	char *name;
+	struct attribute **cattr;
+
+	sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+	sb = sbinfo->si_sb;
+
+	/*
+	 * prevent a race condition between sysfs and aufs.
+	 * for instance, sysfs_file_read() calls sysfs_get_active_two() which
+	 * prohibits maintaining the sysfs entries.
+	 * hew we acquire read lock after sysfs_get_active_two().
+	 * on the other hand, the remount process may maintain the sysfs/aufs
+	 * entries after acquiring write lock.
+	 * it can cause a deadlock.
+	 * simply we gave up processing read here.
+	 */
+	err = -EBUSY;
+	if (unlikely(!si_noflush_read_trylock(sb)))
+		goto out;
+
+	seq = au_seq(buf, PAGE_SIZE);
+	err = PTR_ERR(seq);
+	if (IS_ERR(seq))
+		goto out_unlock;
+
+	name = (void *)attr->name;
+	cattr = sysaufs_si_attrs;
+	while (*cattr) {
+		if (!strcmp(name, (*cattr)->name)) {
+			err = container_of(*cattr, struct sysaufs_si_attr, attr)
+				->show(seq, sb);
+			goto out_seq;
+		}
+		cattr++;
+	}
+
+	bend = au_sbend(sb);
+	if (!strncmp(name, SysaufsBr_PREFIX, sizeof(SysaufsBr_PREFIX) - 1)) {
+		name += sizeof(SysaufsBr_PREFIX) - 1;
+		err = kstrtol(name, 10, &l);
+		if (!err) {
+			if (l <= bend)
+				err = sysaufs_si_br(seq, sb, (aufs_bindex_t)l);
+			else
+				err = -ENOENT;
+		}
+		goto out_seq;
+	}
+	BUG();
+
+out_seq:
+	if (!err) {
+		err = seq->count;
+		/* sysfs limit */
+		if (unlikely(err == PAGE_SIZE))
+			err = -EFBIG;
+	}
+	kfree(seq);
+out_unlock:
+	si_read_unlock(sb);
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void sysaufs_br_init(struct au_branch *br)
+{
+	struct attribute *attr = &br->br_attr;
+
+	sysfs_attr_init(attr);
+	attr->name = br->br_name;
+	attr->mode = S_IRUGO;
+}
+
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+	struct au_branch *br;
+	struct kobject *kobj;
+	aufs_bindex_t bend;
+
+	dbgaufs_brs_del(sb, bindex);
+
+	if (!sysaufs_brs)
+		return;
+
+	kobj = &au_sbi(sb)->si_kobj;
+	bend = au_sbend(sb);
+	for (; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		sysfs_remove_file(kobj, &br->br_attr);
+	}
+}
+
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
+{
+	int err;
+	aufs_bindex_t bend;
+	struct kobject *kobj;
+	struct au_branch *br;
+
+	dbgaufs_brs_add(sb, bindex);
+
+	if (!sysaufs_brs)
+		return;
+
+	kobj = &au_sbi(sb)->si_kobj;
+	bend = au_sbend(sb);
+	for (; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		snprintf(br->br_name, sizeof(br->br_name), SysaufsBr_PREFIX
+			 "%d", bindex);
+		err = sysfs_create_file(kobj, &br->br_attr);
+		if (unlikely(err))
+			pr_warn("failed %s under sysfs(%d)\n",
+				br->br_name, err);
+	}
+}
diff --git a/fs/aufs/sysrq.c b/fs/aufs/sysrq.c
new file mode 100644
index 0000000..4d770ad
--- /dev/null
+++ b/fs/aufs/sysrq.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * magic sysrq hanlder
+ */
+
+/* #include <linux/sysrq.h> */
+#include <linux/writeback.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void sysrq_sb(struct super_block *sb)
+{
+	char *plevel;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+
+	plevel = au_plevel;
+	au_plevel = KERN_WARNING;
+
+	/* since we define pr_fmt, call printk directly */
+#define pr(str) printk(KERN_WARNING AUFS_NAME ": " str)
+
+	sbinfo = au_sbi(sb);
+	printk(KERN_WARNING "si=%lx\n", sysaufs_si_id(sbinfo));
+	pr("superblock\n");
+	au_dpri_sb(sb);
+
+#if 0
+	pr("root dentry\n");
+	au_dpri_dentry(sb->s_root);
+	pr("root inode\n");
+	au_dpri_inode(sb->s_root->d_inode);
+#endif
+
+#if 0
+	do {
+		int err, i, j, ndentry;
+		struct au_dcsub_pages dpages;
+		struct au_dpage *dpage;
+
+		err = au_dpages_init(&dpages, GFP_ATOMIC);
+		if (unlikely(err))
+			break;
+		err = au_dcsub_pages(&dpages, sb->s_root, NULL, NULL);
+		if (!err)
+			for (i = 0; i < dpages.ndpage; i++) {
+				dpage = dpages.dpages + i;
+				ndentry = dpage->ndentry;
+				for (j = 0; j < ndentry; j++)
+					au_dpri_dentry(dpage->dentries[j]);
+			}
+		au_dpages_free(&dpages);
+	} while (0);
+#endif
+
+#if 1
+	{
+		struct inode *i;
+		pr("isolated inode\n");
+		spin_lock(&inode_sb_list_lock);
+		list_for_each_entry(i, &sb->s_inodes, i_sb_list) {
+			spin_lock(&i->i_lock);
+			if (1 || hlist_empty(&i->i_dentry))
+				au_dpri_inode(i);
+			spin_unlock(&i->i_lock);
+		}
+		spin_unlock(&inode_sb_list_lock);
+	}
+#endif
+	pr("files\n");
+	lg_global_lock(&files_lglock);
+	do_file_list_for_each_entry(sb, file) {
+		umode_t mode;
+		mode = file_inode(file)->i_mode;
+		if (!special_file(mode) || au_special_file(mode))
+			au_dpri_file(file);
+	} while_file_list_for_each_entry;
+	lg_global_unlock(&files_lglock);
+	pr("done\n");
+
+#undef pr
+	au_plevel = plevel;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* module parameter */
+static char *aufs_sysrq_key = "a";
+module_param_named(sysrq, aufs_sysrq_key, charp, S_IRUGO);
+MODULE_PARM_DESC(sysrq, "MagicSysRq key for " AUFS_NAME);
+
+static void au_sysrq(int key __maybe_unused)
+{
+	struct au_sbinfo *sbinfo;
+
+	lockdep_off();
+	au_sbilist_lock();
+	list_for_each_entry(sbinfo, &au_sbilist.head, si_list)
+		sysrq_sb(sbinfo->si_sb);
+	au_sbilist_unlock();
+	lockdep_on();
+}
+
+static struct sysrq_key_op au_sysrq_op = {
+	.handler	= au_sysrq,
+	.help_msg	= "Aufs",
+	.action_msg	= "Aufs",
+	.enable_mask	= SYSRQ_ENABLE_DUMP
+};
+
+/* ---------------------------------------------------------------------- */
+
+int __init au_sysrq_init(void)
+{
+	int err;
+	char key;
+
+	err = -1;
+	key = *aufs_sysrq_key;
+	if ('a' <= key && key <= 'z')
+		err = register_sysrq_key(key, &au_sysrq_op);
+	if (unlikely(err))
+		pr_err("err %d, sysrq=%c\n", err, key);
+	return err;
+}
+
+void au_sysrq_fin(void)
+{
+	int err;
+	err = unregister_sysrq_key(*aufs_sysrq_key, &au_sysrq_op);
+	if (unlikely(err))
+		pr_err("err %d (ignored)\n", err);
+}
diff --git a/fs/aufs/vdir.c b/fs/aufs/vdir.c
new file mode 100644
index 0000000..ccd6e24
--- /dev/null
+++ b/fs/aufs/vdir.c
@@ -0,0 +1,878 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * virtual or vertical directory
+ */
+
+#include "aufs.h"
+
+static unsigned int calc_size(int nlen)
+{
+	return ALIGN(sizeof(struct au_vdir_de) + nlen, sizeof(ino_t));
+}
+
+static int set_deblk_end(union au_vdir_deblk_p *p,
+			 union au_vdir_deblk_p *deblk_end)
+{
+	if (calc_size(0) <= deblk_end->deblk - p->deblk) {
+		p->de->de_str.len = 0;
+		/* smp_mb(); */
+		return 0;
+	}
+	return -1; /* error */
+}
+
+/* returns true or false */
+static int is_deblk_end(union au_vdir_deblk_p *p,
+			union au_vdir_deblk_p *deblk_end)
+{
+	if (calc_size(0) <= deblk_end->deblk - p->deblk)
+		return !p->de->de_str.len;
+	return 1;
+}
+
+static unsigned char *last_deblk(struct au_vdir *vdir)
+{
+	return vdir->vd_deblk[vdir->vd_nblk - 1];
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* estimate the apropriate size for name hash table */
+unsigned int au_rdhash_est(loff_t sz)
+{
+	unsigned int n;
+
+	n = UINT_MAX;
+	sz >>= 10;
+	if (sz < n)
+		n = sz;
+	if (sz < AUFS_RDHASH_DEF)
+		n = AUFS_RDHASH_DEF;
+	/* pr_info("n %u\n", n); */
+	return n;
+}
+
+/*
+ * the allocated memory has to be freed by
+ * au_nhash_wh_free() or au_nhash_de_free().
+ */
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp)
+{
+	struct hlist_head *head;
+	unsigned int u;
+
+	head = kmalloc(sizeof(*nhash->nh_head) * num_hash, gfp);
+	if (head) {
+		nhash->nh_num = num_hash;
+		nhash->nh_head = head;
+		for (u = 0; u < num_hash; u++)
+			INIT_HLIST_HEAD(head++);
+		return 0; /* success */
+	}
+
+	return -ENOMEM;
+}
+
+static void nhash_count(struct hlist_head *head)
+{
+#if 0
+	unsigned long n;
+	struct hlist_node *pos;
+
+	n = 0;
+	hlist_for_each(pos, head)
+		n++;
+	pr_info("%lu\n", n);
+#endif
+}
+
+static void au_nhash_wh_do_free(struct hlist_head *head)
+{
+	struct au_vdir_wh *pos;
+	struct hlist_node *node;
+
+	hlist_for_each_entry_safe(pos, node, head, wh_hash)
+		kfree(pos);
+}
+
+static void au_nhash_de_do_free(struct hlist_head *head)
+{
+	struct au_vdir_dehstr *pos;
+	struct hlist_node *node;
+
+	hlist_for_each_entry_safe(pos, node, head, hash)
+		au_cache_free_vdir_dehstr(pos);
+}
+
+static void au_nhash_do_free(struct au_nhash *nhash,
+			     void (*free)(struct hlist_head *head))
+{
+	unsigned int n;
+	struct hlist_head *head;
+
+	n = nhash->nh_num;
+	if (!n)
+		return;
+
+	head = nhash->nh_head;
+	while (n-- > 0) {
+		nhash_count(head);
+		free(head++);
+	}
+	kfree(nhash->nh_head);
+}
+
+void au_nhash_wh_free(struct au_nhash *whlist)
+{
+	au_nhash_do_free(whlist, au_nhash_wh_do_free);
+}
+
+static void au_nhash_de_free(struct au_nhash *delist)
+{
+	au_nhash_do_free(delist, au_nhash_de_do_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+			    int limit)
+{
+	int num;
+	unsigned int u, n;
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+
+	num = 0;
+	n = whlist->nh_num;
+	head = whlist->nh_head;
+	for (u = 0; u < n; u++, head++)
+		hlist_for_each_entry(pos, head, wh_hash)
+			if (pos->wh_bindex == btgt && ++num > limit)
+				return 1;
+	return 0;
+}
+
+static struct hlist_head *au_name_hash(struct au_nhash *nhash,
+				       unsigned char *name,
+				       unsigned int len)
+{
+	unsigned int v;
+	/* const unsigned int magic_bit = 12; */
+
+	AuDebugOn(!nhash->nh_num || !nhash->nh_head);
+
+	v = 0;
+	while (len--)
+		v += *name++;
+	/* v = hash_long(v, magic_bit); */
+	v %= nhash->nh_num;
+	return nhash->nh_head + v;
+}
+
+static int au_nhash_test_name(struct au_vdir_destr *str, const char *name,
+			      int nlen)
+{
+	return str->len == nlen && !memcmp(str->name, name, nlen);
+}
+
+/* returns found or not */
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen)
+{
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+	struct au_vdir_destr *str;
+
+	head = au_name_hash(whlist, name, nlen);
+	hlist_for_each_entry(pos, head, wh_hash) {
+		str = &pos->wh_str;
+		AuDbg("%.*s\n", str->len, str->name);
+		if (au_nhash_test_name(str, name, nlen))
+			return 1;
+	}
+	return 0;
+}
+
+/* returns found(true) or not */
+static int test_known(struct au_nhash *delist, char *name, int nlen)
+{
+	struct hlist_head *head;
+	struct au_vdir_dehstr *pos;
+	struct au_vdir_destr *str;
+
+	head = au_name_hash(delist, name, nlen);
+	hlist_for_each_entry(pos, head, hash) {
+		str = pos->str;
+		AuDbg("%.*s\n", str->len, str->name);
+		if (au_nhash_test_name(str, name, nlen))
+			return 1;
+	}
+	return 0;
+}
+
+static void au_shwh_init_wh(struct au_vdir_wh *wh, ino_t ino,
+			    unsigned char d_type)
+{
+#ifdef CONFIG_AUFS_SHWH
+	wh->wh_ino = ino;
+	wh->wh_type = d_type;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+		       unsigned int d_type, aufs_bindex_t bindex,
+		       unsigned char shwh)
+{
+	int err;
+	struct au_vdir_destr *str;
+	struct au_vdir_wh *wh;
+
+	AuDbg("%.*s\n", nlen, name);
+	AuDebugOn(!whlist->nh_num || !whlist->nh_head);
+
+	err = -ENOMEM;
+	wh = kmalloc(sizeof(*wh) + nlen, GFP_NOFS);
+	if (unlikely(!wh))
+		goto out;
+
+	err = 0;
+	wh->wh_bindex = bindex;
+	if (shwh)
+		au_shwh_init_wh(wh, ino, d_type);
+	str = &wh->wh_str;
+	str->len = nlen;
+	memcpy(str->name, name, nlen);
+	hlist_add_head(&wh->wh_hash, au_name_hash(whlist, name, nlen));
+	/* smp_mb(); */
+
+out:
+	return err;
+}
+
+static int append_deblk(struct au_vdir *vdir)
+{
+	int err;
+	unsigned long ul;
+	const unsigned int deblk_sz = vdir->vd_deblk_sz;
+	union au_vdir_deblk_p p, deblk_end;
+	unsigned char **o;
+
+	err = -ENOMEM;
+	o = krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1),
+		     GFP_NOFS);
+	if (unlikely(!o))
+		goto out;
+
+	vdir->vd_deblk = o;
+	p.deblk = kmalloc(deblk_sz, GFP_NOFS);
+	if (p.deblk) {
+		ul = vdir->vd_nblk++;
+		vdir->vd_deblk[ul] = p.deblk;
+		vdir->vd_last.ul = ul;
+		vdir->vd_last.p.deblk = p.deblk;
+		deblk_end.deblk = p.deblk + deblk_sz;
+		err = set_deblk_end(&p, &deblk_end);
+	}
+
+out:
+	return err;
+}
+
+static int append_de(struct au_vdir *vdir, char *name, int nlen, ino_t ino,
+		     unsigned int d_type, struct au_nhash *delist)
+{
+	int err;
+	unsigned int sz;
+	const unsigned int deblk_sz = vdir->vd_deblk_sz;
+	union au_vdir_deblk_p p, *room, deblk_end;
+	struct au_vdir_dehstr *dehstr;
+
+	p.deblk = last_deblk(vdir);
+	deblk_end.deblk = p.deblk + deblk_sz;
+	room = &vdir->vd_last.p;
+	AuDebugOn(room->deblk < p.deblk || deblk_end.deblk <= room->deblk
+		  || !is_deblk_end(room, &deblk_end));
+
+	sz = calc_size(nlen);
+	if (unlikely(sz > deblk_end.deblk - room->deblk)) {
+		err = append_deblk(vdir);
+		if (unlikely(err))
+			goto out;
+
+		p.deblk = last_deblk(vdir);
+		deblk_end.deblk = p.deblk + deblk_sz;
+		/* smp_mb(); */
+		AuDebugOn(room->deblk != p.deblk);
+	}
+
+	err = -ENOMEM;
+	dehstr = au_cache_alloc_vdir_dehstr();
+	if (unlikely(!dehstr))
+		goto out;
+
+	dehstr->str = &room->de->de_str;
+	hlist_add_head(&dehstr->hash, au_name_hash(delist, name, nlen));
+	room->de->de_ino = ino;
+	room->de->de_type = d_type;
+	room->de->de_str.len = nlen;
+	memcpy(room->de->de_str.name, name, nlen);
+
+	err = 0;
+	room->deblk += sz;
+	if (unlikely(set_deblk_end(room, &deblk_end)))
+		err = append_deblk(vdir);
+	/* smp_mb(); */
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_vdir_free(struct au_vdir *vdir)
+{
+	unsigned char **deblk;
+
+	deblk = vdir->vd_deblk;
+	while (vdir->vd_nblk--)
+		kfree(*deblk++);
+	kfree(vdir->vd_deblk);
+	au_cache_free_vdir(vdir);
+}
+
+static struct au_vdir *alloc_vdir(struct file *file)
+{
+	struct au_vdir *vdir;
+	struct super_block *sb;
+	int err;
+
+	sb = file->f_dentry->d_sb;
+	SiMustAnyLock(sb);
+
+	err = -ENOMEM;
+	vdir = au_cache_alloc_vdir();
+	if (unlikely(!vdir))
+		goto out;
+
+	vdir->vd_deblk = kzalloc(sizeof(*vdir->vd_deblk), GFP_NOFS);
+	if (unlikely(!vdir->vd_deblk))
+		goto out_free;
+
+	vdir->vd_deblk_sz = au_sbi(sb)->si_rdblk;
+	if (!vdir->vd_deblk_sz) {
+		/* estimate the apropriate size for deblk */
+		vdir->vd_deblk_sz = au_dir_size(file, /*dentry*/NULL);
+		/* pr_info("vd_deblk_sz %u\n", vdir->vd_deblk_sz); */
+	}
+	vdir->vd_nblk = 0;
+	vdir->vd_version = 0;
+	vdir->vd_jiffy = 0;
+	err = append_deblk(vdir);
+	if (!err)
+		return vdir; /* success */
+
+	kfree(vdir->vd_deblk);
+
+out_free:
+	au_cache_free_vdir(vdir);
+out:
+	vdir = ERR_PTR(err);
+	return vdir;
+}
+
+static int reinit_vdir(struct au_vdir *vdir)
+{
+	int err;
+	union au_vdir_deblk_p p, deblk_end;
+
+	while (vdir->vd_nblk > 1) {
+		kfree(vdir->vd_deblk[vdir->vd_nblk - 1]);
+		/* vdir->vd_deblk[vdir->vd_nblk - 1] = NULL; */
+		vdir->vd_nblk--;
+	}
+	p.deblk = vdir->vd_deblk[0];
+	deblk_end.deblk = p.deblk + vdir->vd_deblk_sz;
+	err = set_deblk_end(&p, &deblk_end);
+	/* keep vd_dblk_sz */
+	vdir->vd_last.ul = 0;
+	vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+	vdir->vd_version = 0;
+	vdir->vd_jiffy = 0;
+	/* smp_mb(); */
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuFillVdir_CALLED	1
+#define AuFillVdir_WHABLE	(1 << 1)
+#define AuFillVdir_SHWH		(1 << 2)
+#define au_ftest_fillvdir(flags, name)	((flags) & AuFillVdir_##name)
+#define au_fset_fillvdir(flags, name) \
+	do { (flags) |= AuFillVdir_##name; } while (0)
+#define au_fclr_fillvdir(flags, name) \
+	do { (flags) &= ~AuFillVdir_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuFillVdir_SHWH
+#define AuFillVdir_SHWH		0
+#endif
+
+struct fillvdir_arg {
+	struct file		*file;
+	struct au_vdir		*vdir;
+	struct au_nhash		delist;
+	struct au_nhash		whlist;
+	aufs_bindex_t		bindex;
+	unsigned int		flags;
+	int			err;
+};
+
+static int fillvdir(void *__arg, const char *__name, int nlen,
+		    loff_t offset __maybe_unused, u64 h_ino,
+		    unsigned int d_type)
+{
+	struct fillvdir_arg *arg = __arg;
+	char *name = (void *)__name;
+	struct super_block *sb;
+	ino_t ino;
+	const unsigned char shwh = !!au_ftest_fillvdir(arg->flags, SHWH);
+
+	arg->err = 0;
+	sb = arg->file->f_dentry->d_sb;
+	au_fset_fillvdir(arg->flags, CALLED);
+	/* smp_mb(); */
+	if (nlen <= AUFS_WH_PFX_LEN
+	    || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+		if (test_known(&arg->delist, name, nlen)
+		    || au_nhash_test_known_wh(&arg->whlist, name, nlen))
+			goto out; /* already exists or whiteouted */
+
+		sb = arg->file->f_dentry->d_sb;
+		arg->err = au_ino(sb, arg->bindex, h_ino, d_type, &ino);
+		if (!arg->err) {
+			if (unlikely(nlen > AUFS_MAX_NAMELEN))
+				d_type = DT_UNKNOWN;
+			arg->err = append_de(arg->vdir, name, nlen, ino,
+					     d_type, &arg->delist);
+		}
+	} else if (au_ftest_fillvdir(arg->flags, WHABLE)) {
+		name += AUFS_WH_PFX_LEN;
+		nlen -= AUFS_WH_PFX_LEN;
+		if (au_nhash_test_known_wh(&arg->whlist, name, nlen))
+			goto out; /* already whiteouted */
+
+		if (shwh)
+			arg->err = au_wh_ino(sb, arg->bindex, h_ino, d_type,
+					     &ino);
+		if (!arg->err) {
+			if (nlen <= AUFS_MAX_NAMELEN + AUFS_WH_PFX_LEN)
+				d_type = DT_UNKNOWN;
+			arg->err = au_nhash_append_wh
+				(&arg->whlist, name, nlen, ino, d_type,
+				 arg->bindex, shwh);
+		}
+	}
+
+out:
+	if (!arg->err)
+		arg->vdir->vd_jiffy = jiffies;
+	/* smp_mb(); */
+	AuTraceErr(arg->err);
+	return arg->err;
+}
+
+static int au_handle_shwh(struct super_block *sb, struct au_vdir *vdir,
+			  struct au_nhash *whlist, struct au_nhash *delist)
+{
+#ifdef CONFIG_AUFS_SHWH
+	int err;
+	unsigned int nh, u;
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+	struct hlist_node *n;
+	char *p, *o;
+	struct au_vdir_destr *destr;
+
+	AuDebugOn(!au_opt_test(au_mntflags(sb), SHWH));
+
+	err = -ENOMEM;
+	o = p = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!p))
+		goto out;
+
+	err = 0;
+	nh = whlist->nh_num;
+	memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+	p += AUFS_WH_PFX_LEN;
+	for (u = 0; u < nh; u++) {
+		head = whlist->nh_head + u;
+		hlist_for_each_entry_safe(pos, n, head, wh_hash) {
+			destr = &pos->wh_str;
+			memcpy(p, destr->name, destr->len);
+			err = append_de(vdir, o, destr->len + AUFS_WH_PFX_LEN,
+					pos->wh_ino, pos->wh_type, delist);
+			if (unlikely(err))
+				break;
+		}
+	}
+
+	free_page((unsigned long)o);
+
+out:
+	AuTraceErr(err);
+	return err;
+#else
+	return 0;
+#endif
+}
+
+static int au_do_read_vdir(struct fillvdir_arg *arg)
+{
+	int err;
+	unsigned int rdhash;
+	loff_t offset;
+	aufs_bindex_t bend, bindex, bstart;
+	unsigned char shwh;
+	struct file *hf, *file;
+	struct super_block *sb;
+
+	file = arg->file;
+	sb = file->f_dentry->d_sb;
+	SiMustAnyLock(sb);
+
+	rdhash = au_sbi(sb)->si_rdhash;
+	if (!rdhash)
+		rdhash = au_rdhash_est(au_dir_size(file, /*dentry*/NULL));
+	err = au_nhash_alloc(&arg->delist, rdhash, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	err = au_nhash_alloc(&arg->whlist, rdhash, GFP_NOFS);
+	if (unlikely(err))
+		goto out_delist;
+
+	err = 0;
+	arg->flags = 0;
+	shwh = 0;
+	if (au_opt_test(au_mntflags(sb), SHWH)) {
+		shwh = 1;
+		au_fset_fillvdir(arg->flags, SHWH);
+	}
+	bstart = au_fbstart(file);
+	bend = au_fbend_dir(file);
+	for (bindex = bstart; !err && bindex <= bend; bindex++) {
+		hf = au_hf_dir(file, bindex);
+		if (!hf)
+			continue;
+
+		offset = vfsub_llseek(hf, 0, SEEK_SET);
+		err = offset;
+		if (unlikely(offset))
+			break;
+
+		arg->bindex = bindex;
+		au_fclr_fillvdir(arg->flags, WHABLE);
+		if (shwh
+		    || (bindex != bend
+			&& au_br_whable(au_sbr_perm(sb, bindex))))
+			au_fset_fillvdir(arg->flags, WHABLE);
+		do {
+			arg->err = 0;
+			au_fclr_fillvdir(arg->flags, CALLED);
+			/* smp_mb(); */
+			err = vfsub_readdir(hf, fillvdir, arg);
+			if (err >= 0)
+				err = arg->err;
+		} while (!err && au_ftest_fillvdir(arg->flags, CALLED));
+	}
+
+	if (!err && shwh)
+		err = au_handle_shwh(sb, arg->vdir, &arg->whlist, &arg->delist);
+
+	au_nhash_wh_free(&arg->whlist);
+
+out_delist:
+	au_nhash_de_free(&arg->delist);
+out:
+	return err;
+}
+
+static int read_vdir(struct file *file, int may_read)
+{
+	int err;
+	unsigned long expire;
+	unsigned char do_read;
+	struct fillvdir_arg arg;
+	struct inode *inode;
+	struct au_vdir *vdir, *allocated;
+
+	err = 0;
+	inode = file_inode(file);
+	IMustLock(inode);
+	SiMustAnyLock(inode->i_sb);
+
+	allocated = NULL;
+	do_read = 0;
+	expire = au_sbi(inode->i_sb)->si_rdcache;
+	vdir = au_ivdir(inode);
+	if (!vdir) {
+		do_read = 1;
+		vdir = alloc_vdir(file);
+		err = PTR_ERR(vdir);
+		if (IS_ERR(vdir))
+			goto out;
+		err = 0;
+		allocated = vdir;
+	} else if (may_read
+		   && (inode->i_version != vdir->vd_version
+		       || time_after(jiffies, vdir->vd_jiffy + expire))) {
+		do_read = 1;
+		err = reinit_vdir(vdir);
+		if (unlikely(err))
+			goto out;
+	}
+
+	if (!do_read)
+		return 0; /* success */
+
+	arg.file = file;
+	arg.vdir = vdir;
+	err = au_do_read_vdir(&arg);
+	if (!err) {
+		/* file->f_pos = 0; */
+		vdir->vd_version = inode->i_version;
+		vdir->vd_last.ul = 0;
+		vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+		if (allocated)
+			au_set_ivdir(inode, allocated);
+	} else if (allocated)
+		au_vdir_free(allocated);
+
+out:
+	return err;
+}
+
+static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src)
+{
+	int err, rerr;
+	unsigned long ul, n;
+	const unsigned int deblk_sz = src->vd_deblk_sz;
+
+	AuDebugOn(tgt->vd_nblk != 1);
+
+	err = -ENOMEM;
+	if (tgt->vd_nblk < src->vd_nblk) {
+		unsigned char **p;
+
+		p = krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk,
+			     GFP_NOFS);
+		if (unlikely(!p))
+			goto out;
+		tgt->vd_deblk = p;
+	}
+
+	if (tgt->vd_deblk_sz != deblk_sz) {
+		unsigned char *p;
+
+		tgt->vd_deblk_sz = deblk_sz;
+		p = krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS);
+		if (unlikely(!p))
+			goto out;
+		tgt->vd_deblk[0] = p;
+	}
+	memcpy(tgt->vd_deblk[0], src->vd_deblk[0], deblk_sz);
+	tgt->vd_version = src->vd_version;
+	tgt->vd_jiffy = src->vd_jiffy;
+
+	n = src->vd_nblk;
+	for (ul = 1; ul < n; ul++) {
+		tgt->vd_deblk[ul] = kmemdup(src->vd_deblk[ul], deblk_sz,
+					    GFP_NOFS);
+		if (unlikely(!tgt->vd_deblk[ul]))
+			goto out;
+		tgt->vd_nblk++;
+	}
+	tgt->vd_nblk = n;
+	tgt->vd_last.ul = tgt->vd_last.ul;
+	tgt->vd_last.p.deblk = tgt->vd_deblk[tgt->vd_last.ul];
+	tgt->vd_last.p.deblk += src->vd_last.p.deblk
+		- src->vd_deblk[src->vd_last.ul];
+	/* smp_mb(); */
+	return 0; /* success */
+
+out:
+	rerr = reinit_vdir(tgt);
+	BUG_ON(rerr);
+	return err;
+}
+
+int au_vdir_init(struct file *file)
+{
+	int err;
+	struct inode *inode;
+	struct au_vdir *vdir_cache, *allocated;
+
+	err = read_vdir(file, !file->f_pos);
+	if (unlikely(err))
+		goto out;
+
+	allocated = NULL;
+	vdir_cache = au_fvdir_cache(file);
+	if (!vdir_cache) {
+		vdir_cache = alloc_vdir(file);
+		err = PTR_ERR(vdir_cache);
+		if (IS_ERR(vdir_cache))
+			goto out;
+		allocated = vdir_cache;
+	} else if (!file->f_pos && vdir_cache->vd_version != file->f_version) {
+		err = reinit_vdir(vdir_cache);
+		if (unlikely(err))
+			goto out;
+	} else
+		return 0; /* success */
+
+	inode = file_inode(file);
+	err = copy_vdir(vdir_cache, au_ivdir(inode));
+	if (!err) {
+		file->f_version = inode->i_version;
+		if (allocated)
+			au_set_fvdir_cache(file, allocated);
+	} else if (allocated)
+		au_vdir_free(allocated);
+
+out:
+	return err;
+}
+
+static loff_t calc_offset(struct au_vdir *vdir)
+{
+	loff_t offset;
+	union au_vdir_deblk_p p;
+
+	p.deblk = vdir->vd_deblk[vdir->vd_last.ul];
+	offset = vdir->vd_last.p.deblk - p.deblk;
+	offset += vdir->vd_deblk_sz * vdir->vd_last.ul;
+	return offset;
+}
+
+/* returns true or false */
+static int seek_vdir(struct file *file)
+{
+	int valid;
+	unsigned int deblk_sz;
+	unsigned long ul, n;
+	loff_t offset;
+	union au_vdir_deblk_p p, deblk_end;
+	struct au_vdir *vdir_cache;
+
+	valid = 1;
+	vdir_cache = au_fvdir_cache(file);
+	offset = calc_offset(vdir_cache);
+	AuDbg("offset %lld\n", offset);
+	if (file->f_pos == offset)
+		goto out;
+
+	vdir_cache->vd_last.ul = 0;
+	vdir_cache->vd_last.p.deblk = vdir_cache->vd_deblk[0];
+	if (!file->f_pos)
+		goto out;
+
+	valid = 0;
+	deblk_sz = vdir_cache->vd_deblk_sz;
+	ul = div64_u64(file->f_pos, deblk_sz);
+	AuDbg("ul %lu\n", ul);
+	if (ul >= vdir_cache->vd_nblk)
+		goto out;
+
+	n = vdir_cache->vd_nblk;
+	for (; ul < n; ul++) {
+		p.deblk = vdir_cache->vd_deblk[ul];
+		deblk_end.deblk = p.deblk + deblk_sz;
+		offset = ul;
+		offset *= deblk_sz;
+		while (!is_deblk_end(&p, &deblk_end) && offset < file->f_pos) {
+			unsigned int l;
+
+			l = calc_size(p.de->de_str.len);
+			offset += l;
+			p.deblk += l;
+		}
+		if (!is_deblk_end(&p, &deblk_end)) {
+			valid = 1;
+			vdir_cache->vd_last.ul = ul;
+			vdir_cache->vd_last.p = p;
+			break;
+		}
+	}
+
+out:
+	/* smp_mb(); */
+	AuTraceErr(!valid);
+	return valid;
+}
+
+int au_vdir_fill_de(struct file *file, void *dirent, filldir_t filldir)
+{
+	int err;
+	unsigned int l, deblk_sz;
+	union au_vdir_deblk_p deblk_end;
+	struct au_vdir *vdir_cache;
+	struct au_vdir_de *de;
+
+	vdir_cache = au_fvdir_cache(file);
+	if (!seek_vdir(file))
+		return 0;
+
+	deblk_sz = vdir_cache->vd_deblk_sz;
+	while (1) {
+		deblk_end.deblk = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+		deblk_end.deblk += deblk_sz;
+		while (!is_deblk_end(&vdir_cache->vd_last.p, &deblk_end)) {
+			de = vdir_cache->vd_last.p.de;
+			AuDbg("%.*s, off%lld, i%lu, dt%d\n",
+			      de->de_str.len, de->de_str.name, file->f_pos,
+			      (unsigned long)de->de_ino, de->de_type);
+			err = filldir(dirent, de->de_str.name, de->de_str.len,
+				      file->f_pos, de->de_ino, de->de_type);
+			if (unlikely(err)) {
+				AuTraceErr(err);
+				/* todo: ignore the error caused by udba? */
+				/* return err; */
+				return 0;
+			}
+
+			l = calc_size(de->de_str.len);
+			vdir_cache->vd_last.p.deblk += l;
+			file->f_pos += l;
+		}
+		if (vdir_cache->vd_last.ul < vdir_cache->vd_nblk - 1) {
+			vdir_cache->vd_last.ul++;
+			vdir_cache->vd_last.p.deblk
+				= vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+			file->f_pos = deblk_sz * vdir_cache->vd_last.ul;
+			continue;
+		}
+		break;
+	}
+
+	/* smp_mb(); */
+	return 0;
+}
diff --git a/fs/aufs/vfsub.c b/fs/aufs/vfsub.c
new file mode 100644
index 0000000..917d374
--- /dev/null
+++ b/fs/aufs/vfsub.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#include <linux/ima.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include <linux/splice.h>
+#include "aufs.h"
+
+int vfsub_update_h_iattr(struct path *h_path, int *did)
+{
+	int err;
+	struct kstat st;
+	struct super_block *h_sb;
+
+	/* for remote fs, leave work for its getattr or d_revalidate */
+	/* for bad i_attr fs, handle them in aufs_getattr() */
+	/* still some fs may acquire i_mutex. we need to skip them */
+	err = 0;
+	if (!did)
+		did = &err;
+	h_sb = h_path->dentry->d_sb;
+	*did = (!au_test_fs_remote(h_sb) && au_test_fs_refresh_iattr(h_sb));
+	if (*did)
+		err = vfs_getattr(h_path, &st);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct file *vfsub_dentry_open(struct path *path, int flags)
+{
+	struct file *file;
+
+	file = dentry_open(path, flags /* | __FMODE_NONOTIFY */,
+			   current_cred());
+	if (!IS_ERR_OR_NULL(file)
+	    && (file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+		i_readcount_inc(path->dentry->d_inode);
+
+	return file;
+}
+
+struct file *vfsub_filp_open(const char *path, int oflags, int mode)
+{
+	struct file *file;
+
+	lockdep_off();
+	file = filp_open(path,
+			 oflags /* | __FMODE_NONOTIFY */,
+			 mode);
+	lockdep_on();
+	if (IS_ERR(file))
+		goto out;
+	vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+
+out:
+	return file;
+}
+
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path)
+{
+	int err;
+
+	err = kern_path(name, flags, path);
+	if (!err && path->dentry->d_inode)
+		vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+				    int len)
+{
+	struct path path = {
+		.mnt = NULL
+	};
+
+	/* VFS checks it too, but by WARN_ON_ONCE() */
+	IMustLock(parent->d_inode);
+
+	path.dentry = lookup_one_len(name, parent, len);
+	if (IS_ERR(path.dentry))
+		goto out;
+	if (path.dentry->d_inode)
+		vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
+
+out:
+	AuTraceErrPtr(path.dentry);
+	return path.dentry;
+}
+
+void vfsub_call_lkup_one(void *args)
+{
+	struct vfsub_lkup_one_args *a = args;
+	*a->errp = vfsub_lkup_one(a->name, a->parent);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+				 struct dentry *d2, struct au_hinode *hdir2)
+{
+	struct dentry *d;
+
+	lockdep_off();
+	d = lock_rename(d1, d2);
+	lockdep_on();
+	au_hn_suspend(hdir1);
+	if (hdir1 != hdir2)
+		au_hn_suspend(hdir2);
+
+	return d;
+}
+
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+			 struct dentry *d2, struct au_hinode *hdir2)
+{
+	au_hn_resume(hdir1);
+	if (hdir1 != hdir2)
+		au_hn_resume(hdir2);
+	lockdep_off();
+	unlock_rename(d1, d2);
+	lockdep_on();
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_create(struct inode *dir, struct path *path, int mode, bool want_excl)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_mknod(path, d, mode, 0);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	err = vfs_create(dir, path->dentry, mode, want_excl);
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_symlink(struct inode *dir, struct path *path, const char *symname)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_symlink(path, d, symname);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	err = vfs_symlink(dir, path->dentry, symname);
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_mknod(path, d, mode, new_encode_dev(dev));
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	err = vfs_mknod(dir, path->dentry, mode, dev);
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+static int au_test_nlink(struct inode *inode)
+{
+	const unsigned int link_max = UINT_MAX >> 1; /* rough margin */
+
+	if (!au_test_fs_no_limit_nlink(inode->i_sb)
+	    || inode->i_nlink < link_max)
+		return 0;
+	return -EMLINK;
+}
+
+int vfsub_link(struct dentry *src_dentry, struct inode *dir, struct path *path)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	err = au_test_nlink(src_dentry->d_inode);
+	if (unlikely(err))
+		return err;
+
+	/* we don't call may_linkat() */
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_link(src_dentry, path, d);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_link(src_dentry, dir, path->dentry);
+	lockdep_on();
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		/* fuse has different memory inode for the same inumber */
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+			tmp.dentry = src_dentry;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry,
+		 struct inode *dir, struct path *path)
+{
+	int err;
+	struct path tmp = {
+		.mnt	= path->mnt
+	};
+	struct dentry *d;
+
+	IMustLock(dir);
+	IMustLock(src_dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	tmp.dentry = src_dentry->d_parent;
+	err = security_path_rename(&tmp, src_dentry, path, d);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_rename(src_dir, src_dentry, dir, path->dentry);
+	lockdep_on();
+	if (!err) {
+		int did;
+
+		tmp.dentry = d->d_parent;
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = src_dentry;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+			tmp.dentry = src_dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_mkdir(path, d, mode);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	err = vfs_mkdir(dir, path->dentry, mode);
+	if (!err) {
+		struct path tmp = *path;
+		int did;
+
+		vfsub_update_h_iattr(&tmp, &did);
+		if (did) {
+			tmp.dentry = path->dentry->d_parent;
+			vfsub_update_h_iattr(&tmp, /*did*/NULL);
+		}
+		/*ignore*/
+	}
+
+out:
+	return err;
+}
+
+int vfsub_rmdir(struct inode *dir, struct path *path)
+{
+	int err;
+	struct dentry *d;
+
+	IMustLock(dir);
+
+	d = path->dentry;
+	path->dentry = d->d_parent;
+	err = security_path_rmdir(path, d);
+	path->dentry = d;
+	if (unlikely(err))
+		goto out;
+
+	lockdep_off();
+	err = vfs_rmdir(dir, path->dentry);
+	lockdep_on();
+	if (!err) {
+		struct path tmp = {
+			.dentry	= path->dentry->d_parent,
+			.mnt	= path->mnt
+		};
+
+		vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+	}
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: support mmap_sem? */
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+		     loff_t *ppos)
+{
+	ssize_t err;
+
+	lockdep_off();
+	err = vfs_read(file, ubuf, count, ppos);
+	lockdep_on();
+	if (err >= 0)
+		vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+/* todo: kernel_read()? */
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+		     loff_t *ppos)
+{
+	ssize_t err;
+	mm_segment_t oldfs;
+	union {
+		void *k;
+		char __user *u;
+	} buf;
+
+	buf.k = kbuf;
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	err = vfsub_read_u(file, buf.u, count, ppos);
+	set_fs(oldfs);
+	return err;
+}
+
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+		      loff_t *ppos)
+{
+	ssize_t err;
+
+	lockdep_off();
+	err = vfs_write(file, ubuf, count, ppos);
+	lockdep_on();
+	if (err >= 0)
+		vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, loff_t *ppos)
+{
+	ssize_t err;
+	mm_segment_t oldfs;
+	union {
+		void *k;
+		const char __user *u;
+	} buf;
+
+	buf.k = kbuf;
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	err = vfsub_write_u(file, buf.u, count, ppos);
+	set_fs(oldfs);
+	return err;
+}
+
+int vfsub_flush(struct file *file, fl_owner_t id)
+{
+	int err;
+
+	err = 0;
+	if (file->f_op && file->f_op->flush) {
+		if (!au_test_nfs(file->f_dentry->d_sb))
+			err = file->f_op->flush(file, id);
+		else {
+			lockdep_off();
+			err = file->f_op->flush(file, id);
+			lockdep_on();
+		}
+		if (!err)
+			vfsub_update_h_iattr(&file->f_path, /*did*/NULL);
+		/*ignore*/
+	}
+	return err;
+}
+
+int vfsub_readdir(struct file *file, filldir_t filldir, void *arg)
+{
+	int err;
+
+	lockdep_off();
+	err = vfs_readdir(file, filldir, arg);
+	lockdep_on();
+	if (err >= 0)
+		vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+		     struct pipe_inode_info *pipe, size_t len,
+		     unsigned int flags)
+{
+	long err;
+
+	lockdep_off();
+	err = do_splice_to(in, ppos, pipe, len, flags);
+	lockdep_on();
+	file_accessed(in);
+	if (err >= 0)
+		vfsub_update_h_iattr(&in->f_path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+		       loff_t *ppos, size_t len, unsigned int flags)
+{
+	long err;
+
+	lockdep_off();
+	err = do_splice_from(pipe, out, ppos, len, flags);
+	lockdep_on();
+	if (err >= 0)
+		vfsub_update_h_iattr(&out->f_path, /*did*/NULL); /*ignore*/
+	return err;
+}
+
+int vfsub_fsync(struct file *file, struct path *path, int datasync)
+{
+	int err;
+
+	/* file can be NULL */
+	lockdep_off();
+	err = vfs_fsync(file, datasync);
+	lockdep_on();
+	if (!err) {
+		if (!path) {
+			AuDebugOn(!file);
+			path = &file->f_path;
+		}
+		vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+	}
+	return err;
+}
+
+/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+		struct file *h_file)
+{
+	int err;
+	struct inode *h_inode;
+	struct super_block *h_sb;
+
+	if (!h_file) {
+		err = vfsub_truncate(h_path, length);
+		goto out;
+	}
+
+	h_inode = h_path->dentry->d_inode;
+	h_sb = h_inode->i_sb;
+	lockdep_off();
+	sb_start_write(h_sb);
+	lockdep_on();
+	err = locks_verify_truncate(h_inode, h_file, length);
+	if (!err)
+		err = security_path_truncate(h_path);
+	if (!err) {
+		lockdep_off();
+		err = do_truncate(h_path->dentry, length, attr, h_file);
+		lockdep_on();
+	}
+	lockdep_off();
+	sb_end_write(h_sb);
+	lockdep_on();
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_vfsub_mkdir_args {
+	int *errp;
+	struct inode *dir;
+	struct path *path;
+	int mode;
+};
+
+static void au_call_vfsub_mkdir(void *args)
+{
+	struct au_vfsub_mkdir_args *a = args;
+	*a->errp = vfsub_mkdir(a->dir, a->path, a->mode);
+}
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode)
+{
+	int err, do_sio, wkq_err;
+
+	do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+	if (!do_sio)
+		err = vfsub_mkdir(dir, path, mode);
+	else {
+		struct au_vfsub_mkdir_args args = {
+			.errp	= &err,
+			.dir	= dir,
+			.path	= path,
+			.mode	= mode
+		};
+		wkq_err = au_wkq_wait(au_call_vfsub_mkdir, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	return err;
+}
+
+struct au_vfsub_rmdir_args {
+	int *errp;
+	struct inode *dir;
+	struct path *path;
+};
+
+static void au_call_vfsub_rmdir(void *args)
+{
+	struct au_vfsub_rmdir_args *a = args;
+	*a->errp = vfsub_rmdir(a->dir, a->path);
+}
+
+int vfsub_sio_rmdir(struct inode *dir, struct path *path)
+{
+	int err, do_sio, wkq_err;
+
+	do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+	if (!do_sio)
+		err = vfsub_rmdir(dir, path);
+	else {
+		struct au_vfsub_rmdir_args args = {
+			.errp	= &err,
+			.dir	= dir,
+			.path	= path
+		};
+		wkq_err = au_wkq_wait(au_call_vfsub_rmdir, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct notify_change_args {
+	int *errp;
+	struct path *path;
+	struct iattr *ia;
+};
+
+static void call_notify_change(void *args)
+{
+	struct notify_change_args *a = args;
+	struct inode *h_inode;
+
+	h_inode = a->path->dentry->d_inode;
+	IMustLock(h_inode);
+
+	*a->errp = -EPERM;
+	if (!IS_IMMUTABLE(h_inode) && !IS_APPEND(h_inode)) {
+		*a->errp = notify_change(a->path->dentry, a->ia);
+		if (!*a->errp)
+			vfsub_update_h_iattr(a->path, /*did*/NULL); /*ignore*/
+	}
+	AuTraceErr(*a->errp);
+}
+
+int vfsub_notify_change(struct path *path, struct iattr *ia)
+{
+	int err;
+	struct notify_change_args args = {
+		.errp	= &err,
+		.path	= path,
+		.ia	= ia
+	};
+
+	call_notify_change(&args);
+
+	return err;
+}
+
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia)
+{
+	int err, wkq_err;
+	struct notify_change_args args = {
+		.errp	= &err,
+		.path	= path,
+		.ia	= ia
+	};
+
+	wkq_err = au_wkq_wait(call_notify_change, &args);
+	if (unlikely(wkq_err))
+		err = wkq_err;
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct unlink_args {
+	int *errp;
+	struct inode *dir;
+	struct path *path;
+};
+
+static void call_unlink(void *args)
+{
+	struct unlink_args *a = args;
+	struct dentry *d = a->path->dentry;
+	struct inode *h_inode;
+	const int stop_sillyrename = (au_test_nfs(d->d_sb)
+				      && d->d_count == 1);
+
+	IMustLock(a->dir);
+
+	a->path->dentry = d->d_parent;
+	*a->errp = security_path_unlink(a->path, d);
+	a->path->dentry = d;
+	if (unlikely(*a->errp))
+		return;
+
+	if (!stop_sillyrename)
+		dget(d);
+	h_inode = d->d_inode;
+	if (h_inode)
+		ihold(h_inode);
+
+	lockdep_off();
+	*a->errp = vfs_unlink(a->dir, d);
+	lockdep_on();
+	if (!*a->errp) {
+		struct path tmp = {
+			.dentry = d->d_parent,
+			.mnt	= a->path->mnt
+		};
+		vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+	}
+
+	if (!stop_sillyrename)
+		dput(d);
+	if (h_inode)
+		iput(h_inode);
+
+	AuTraceErr(*a->errp);
+}
+
+/*
+ * @dir: must be locked.
+ * @dentry: target dentry.
+ */
+int vfsub_unlink(struct inode *dir, struct path *path, int force)
+{
+	int err;
+	struct unlink_args args = {
+		.errp	= &err,
+		.dir	= dir,
+		.path	= path
+	};
+
+	if (!force)
+		call_unlink(&args);
+	else {
+		int wkq_err;
+
+		wkq_err = au_wkq_wait(call_unlink, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	return err;
+}
diff --git a/fs/aufs/vfsub.h b/fs/aufs/vfsub.h
new file mode 100644
index 0000000..5a50a62
--- /dev/null
+++ b/fs/aufs/vfsub.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#ifndef __AUFS_VFSUB_H__
+#define __AUFS_VFSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/lglock.h>
+#include <linux/mount.h>
+#include "debug.h"
+
+/* copied from linux/fs/internal.h */
+/* todo: BAD approach!! */
+extern struct lglock vfsmount_lock;
+extern void __mnt_drop_write(struct vfsmount *);
+extern spinlock_t inode_sb_list_lock;
+
+/* copied from linux/fs/file_table.c */
+extern struct lglock files_lglock;
+#ifdef CONFIG_SMP
+/*
+ * These macros iterate all files on all CPUs for a given superblock.
+ * files_lglock must be held globally.
+ */
+#define do_file_list_for_each_entry(__sb, __file)		\
+{								\
+	int i;							\
+	for_each_possible_cpu(i) {				\
+		struct list_head *list;				\
+		list = per_cpu_ptr((__sb)->s_files, i);		\
+		list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry				\
+	}							\
+}
+
+#else
+
+#define do_file_list_for_each_entry(__sb, __file)		\
+{								\
+	struct list_head *list;					\
+	list = &(sb)->s_files;					\
+	list_for_each_entry((__file), list, f_u.fu_list)
+
+#define while_file_list_for_each_entry				\
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for lower inode */
+/* default MAX_LOCKDEP_SUBCLASSES(8) is not enough */
+/* reduce? gave up. */
+enum {
+	AuLsc_I_Begin = I_MUTEX_QUOTA, /* 4 */
+	AuLsc_I_PARENT,		/* lower inode, parent first */
+	AuLsc_I_PARENT2,	/* copyup dirs */
+	AuLsc_I_PARENT3,	/* copyup wh */
+	AuLsc_I_CHILD,
+	AuLsc_I_CHILD2,
+	AuLsc_I_End
+};
+
+/* to debug easier, do not make them inlined functions */
+#define MtxMustLock(mtx)	AuDebugOn(!mutex_is_locked(mtx))
+#define IMustLock(i)		MtxMustLock(&(i)->i_mutex)
+
+/* ---------------------------------------------------------------------- */
+
+static inline void vfsub_drop_nlink(struct inode *inode)
+{
+	AuDebugOn(!inode->i_nlink);
+	drop_nlink(inode);
+}
+
+static inline void vfsub_dead_dir(struct inode *inode)
+{
+	AuDebugOn(!S_ISDIR(inode->i_mode));
+	inode->i_flags |= S_DEAD;
+	clear_nlink(inode);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_update_h_iattr(struct path *h_path, int *did);
+struct file *vfsub_dentry_open(struct path *path, int flags);
+struct file *vfsub_filp_open(const char *path, int oflags, int mode);
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path);
+
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+				    int len);
+
+struct vfsub_lkup_one_args {
+	struct dentry **errp;
+	struct qstr *name;
+	struct dentry *parent;
+};
+
+static inline struct dentry *vfsub_lkup_one(struct qstr *name,
+					    struct dentry *parent)
+{
+	return vfsub_lookup_one_len(name->name, parent, name->len);
+}
+
+void vfsub_call_lkup_one(void *args);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int vfsub_mnt_want_write(struct vfsmount *mnt)
+{
+	int err;
+	lockdep_off();
+	err = mnt_want_write(mnt);
+	lockdep_on();
+	return err;
+}
+
+static inline void vfsub_mnt_drop_write(struct vfsmount *mnt)
+{
+	lockdep_off();
+	mnt_drop_write(mnt);
+	lockdep_on();
+}
+
+static inline void vfsub_mnt_drop_write_file(struct file *file)
+{
+	lockdep_off();
+	mnt_drop_write_file(file);
+	lockdep_on();
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_hinode;
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+				 struct dentry *d2, struct au_hinode *hdir2);
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+			 struct dentry *d2, struct au_hinode *hdir2);
+
+int vfsub_create(struct inode *dir, struct path *path, int mode,
+		 bool want_excl);
+int vfsub_symlink(struct inode *dir, struct path *path,
+		  const char *symname);
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev);
+int vfsub_link(struct dentry *src_dentry, struct inode *dir,
+	       struct path *path);
+int vfsub_rename(struct inode *src_hdir, struct dentry *src_dentry,
+		 struct inode *hdir, struct path *path);
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_rmdir(struct inode *dir, struct path *path);
+
+/* ---------------------------------------------------------------------- */
+
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+		     loff_t *ppos);
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+			loff_t *ppos);
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+		      loff_t *ppos);
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count,
+		      loff_t *ppos);
+int vfsub_flush(struct file *file, fl_owner_t id);
+int vfsub_readdir(struct file *file, filldir_t filldir, void *arg);
+
+static inline loff_t vfsub_f_size_read(struct file *file)
+{
+	return i_size_read(file_inode(file));
+}
+
+static inline unsigned int vfsub_file_flags(struct file *file)
+{
+	unsigned int flags;
+
+	spin_lock(&file->f_lock);
+	flags = file->f_flags;
+	spin_unlock(&file->f_lock);
+
+	return flags;
+}
+
+static inline void vfsub_file_accessed(struct file *h_file)
+{
+	file_accessed(h_file);
+	vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); /*ignore*/
+}
+
+static inline void vfsub_touch_atime(struct vfsmount *h_mnt,
+				     struct dentry *h_dentry)
+{
+	struct path h_path = {
+		.dentry	= h_dentry,
+		.mnt	= h_mnt
+	};
+	touch_atime(&h_path);
+	vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
+}
+
+static inline int vfsub_update_time(struct inode *h_inode, struct timespec *ts,
+				    int flags)
+{
+	return update_time(h_inode, ts, flags);
+	/* no vfsub_update_h_iattr() since we don't have struct path */
+}
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+		     struct pipe_inode_info *pipe, size_t len,
+		     unsigned int flags);
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+		       loff_t *ppos, size_t len, unsigned int flags);
+
+static inline long vfsub_truncate(struct path *path, loff_t length)
+{
+	long err;
+	lockdep_off();
+	err = vfs_truncate(path, length);
+	lockdep_on();
+	return err;
+}
+
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+		struct file *h_file);
+int vfsub_fsync(struct file *file, struct path *path, int datasync);
+
+/* ---------------------------------------------------------------------- */
+
+static inline loff_t vfsub_llseek(struct file *file, loff_t offset, int origin)
+{
+	loff_t err;
+
+	lockdep_off();
+	err = vfs_llseek(file, offset, origin);
+	lockdep_on();
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* dirty workaround for strict type of fmode_t */
+union vfsub_fmu {
+	fmode_t fm;
+	unsigned int ui;
+};
+
+static inline unsigned int vfsub_fmode_to_uint(fmode_t fm)
+{
+	union vfsub_fmu u = {
+		.fm = fm
+	};
+
+	BUILD_BUG_ON(sizeof(u.fm) != sizeof(u.ui));
+
+	return u.ui;
+}
+
+static inline fmode_t vfsub_uint_to_fmode(unsigned int ui)
+{
+	union vfsub_fmu u = {
+		.ui = ui
+	};
+
+	return u.fm;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_sio_rmdir(struct inode *dir, struct path *path);
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia);
+int vfsub_notify_change(struct path *path, struct iattr *ia);
+int vfsub_unlink(struct inode *dir, struct path *path, int force);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_VFSUB_H__ */
diff --git a/fs/aufs/wbr_policy.c b/fs/aufs/wbr_policy.c
new file mode 100644
index 0000000..ef01a2c
--- /dev/null
+++ b/fs/aufs/wbr_policy.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * policies for selecting one among multiple writable branches
+ */
+
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/* subset of cpup_attr() */
+static noinline_for_stack
+int au_cpdown_attr(struct path *h_path, struct dentry *h_src)
+{
+	int err, sbits;
+	struct iattr ia;
+	struct inode *h_isrc;
+
+	h_isrc = h_src->d_inode;
+	ia.ia_valid = ATTR_FORCE | ATTR_MODE | ATTR_UID | ATTR_GID;
+	ia.ia_mode = h_isrc->i_mode;
+	ia.ia_uid = h_isrc->i_uid;
+	ia.ia_gid = h_isrc->i_gid;
+	sbits = !!(ia.ia_mode & (S_ISUID | S_ISGID));
+	au_cpup_attr_flags(h_path->dentry->d_inode, h_isrc->i_flags);
+	err = vfsub_sio_notify_change(h_path, &ia);
+
+	/* is this nfs only? */
+	if (!err && sbits && au_test_nfs(h_path->dentry->d_sb)) {
+		ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+		ia.ia_mode = h_isrc->i_mode;
+		err = vfsub_sio_notify_change(h_path, &ia);
+	}
+
+	return err;
+}
+
+#define AuCpdown_PARENT_OPQ	1
+#define AuCpdown_WHED		(1 << 1)
+#define AuCpdown_MADE_DIR	(1 << 2)
+#define AuCpdown_DIROPQ		(1 << 3)
+#define au_ftest_cpdown(flags, name)	((flags) & AuCpdown_##name)
+#define au_fset_cpdown(flags, name) \
+	do { (flags) |= AuCpdown_##name; } while (0)
+#define au_fclr_cpdown(flags, name) \
+	do { (flags) &= ~AuCpdown_##name; } while (0)
+
+static int au_cpdown_dir_opq(struct dentry *dentry, aufs_bindex_t bdst,
+			     unsigned int *flags)
+{
+	int err;
+	struct dentry *opq_dentry;
+
+	opq_dentry = au_diropq_create(dentry, bdst);
+	err = PTR_ERR(opq_dentry);
+	if (IS_ERR(opq_dentry))
+		goto out;
+	dput(opq_dentry);
+	au_fset_cpdown(*flags, DIROPQ);
+
+out:
+	return err;
+}
+
+static int au_cpdown_dir_wh(struct dentry *dentry, struct dentry *h_parent,
+			    struct inode *dir, aufs_bindex_t bdst)
+{
+	int err;
+	struct path h_path;
+	struct au_branch *br;
+
+	br = au_sbr(dentry->d_sb, bdst);
+	h_path.dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+	err = PTR_ERR(h_path.dentry);
+	if (IS_ERR(h_path.dentry))
+		goto out;
+
+	err = 0;
+	if (h_path.dentry->d_inode) {
+		h_path.mnt = au_br_mnt(br);
+		err = au_wh_unlink_dentry(au_h_iptr(dir, bdst), &h_path,
+					  dentry);
+	}
+	dput(h_path.dentry);
+
+out:
+	return err;
+}
+
+static int au_cpdown_dir(struct dentry *dentry, aufs_bindex_t bdst,
+			 struct au_pin *pin,
+			 struct dentry *h_parent, void *arg)
+{
+	int err, rerr;
+	aufs_bindex_t bopq, bstart;
+	struct path h_path;
+	struct dentry *parent;
+	struct inode *h_dir, *h_inode, *inode, *dir;
+	unsigned int *flags = arg;
+
+	bstart = au_dbstart(dentry);
+	/* dentry is di-locked */
+	parent = dget_parent(dentry);
+	dir = parent->d_inode;
+	h_dir = h_parent->d_inode;
+	AuDebugOn(h_dir != au_h_iptr(dir, bdst));
+	IMustLock(h_dir);
+
+	err = au_lkup_neg(dentry, bdst, /*wh*/0);
+	if (unlikely(err < 0))
+		goto out;
+	h_path.dentry = au_h_dptr(dentry, bdst);
+	h_path.mnt = au_sbr_mnt(dentry->d_sb, bdst);
+	err = vfsub_sio_mkdir(au_h_iptr(dir, bdst), &h_path,
+			      S_IRWXU | S_IRUGO | S_IXUGO);
+	if (unlikely(err))
+		goto out_put;
+	au_fset_cpdown(*flags, MADE_DIR);
+
+	bopq = au_dbdiropq(dentry);
+	au_fclr_cpdown(*flags, WHED);
+	au_fclr_cpdown(*flags, DIROPQ);
+	if (au_dbwh(dentry) == bdst)
+		au_fset_cpdown(*flags, WHED);
+	if (!au_ftest_cpdown(*flags, PARENT_OPQ) && bopq <= bdst)
+		au_fset_cpdown(*flags, PARENT_OPQ);
+	h_inode = h_path.dentry->d_inode;
+	mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
+	if (au_ftest_cpdown(*flags, WHED)) {
+		err = au_cpdown_dir_opq(dentry, bdst, flags);
+		if (unlikely(err)) {
+			mutex_unlock(&h_inode->i_mutex);
+			goto out_dir;
+		}
+	}
+
+	err = au_cpdown_attr(&h_path, au_h_dptr(dentry, bstart));
+	mutex_unlock(&h_inode->i_mutex);
+	if (unlikely(err))
+		goto out_opq;
+
+	if (au_ftest_cpdown(*flags, WHED)) {
+		err = au_cpdown_dir_wh(dentry, h_parent, dir, bdst);
+		if (unlikely(err))
+			goto out_opq;
+	}
+
+	inode = dentry->d_inode;
+	if (au_ibend(inode) < bdst)
+		au_set_ibend(inode, bdst);
+	au_set_h_iptr(inode, bdst, au_igrab(h_inode),
+		      au_hi_flags(inode, /*isdir*/1));
+	goto out; /* success */
+
+	/* revert */
+out_opq:
+	if (au_ftest_cpdown(*flags, DIROPQ)) {
+		mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD);
+		rerr = au_diropq_remove(dentry, bdst);
+		mutex_unlock(&h_inode->i_mutex);
+		if (unlikely(rerr)) {
+			AuIOErr("failed removing diropq for %.*s b%d (%d)\n",
+				AuDLNPair(dentry), bdst, rerr);
+			err = -EIO;
+			goto out;
+		}
+	}
+out_dir:
+	if (au_ftest_cpdown(*flags, MADE_DIR)) {
+		rerr = vfsub_sio_rmdir(au_h_iptr(dir, bdst), &h_path);
+		if (unlikely(rerr)) {
+			AuIOErr("failed removing %.*s b%d (%d)\n",
+				AuDLNPair(dentry), bdst, rerr);
+			err = -EIO;
+		}
+	}
+out_put:
+	au_set_h_dptr(dentry, bdst, NULL);
+	if (au_dbend(dentry) == bdst)
+		au_update_dbend(dentry);
+out:
+	dput(parent);
+	return err;
+}
+
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+	int err;
+	unsigned int flags;
+
+	flags = 0;
+	err = au_cp_dirs(dentry, bdst, au_cpdown_dir, &flags);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for create */
+
+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	int err, i, j, ndentry;
+	aufs_bindex_t bopq;
+	struct au_dcsub_pages dpages;
+	struct au_dpage *dpage;
+	struct dentry **dentries, *parent, *d;
+
+	err = au_dpages_init(&dpages, GFP_NOFS);
+	if (unlikely(err))
+		goto out;
+	parent = dget_parent(dentry);
+	err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/0);
+	if (unlikely(err))
+		goto out_free;
+
+	err = bindex;
+	for (i = 0; i < dpages.ndpage; i++) {
+		dpage = dpages.dpages + i;
+		dentries = dpage->dentries;
+		ndentry = dpage->ndentry;
+		for (j = 0; j < ndentry; j++) {
+			d = dentries[j];
+			di_read_lock_parent2(d, !AuLock_IR);
+			bopq = au_dbdiropq(d);
+			di_read_unlock(d, !AuLock_IR);
+			if (bopq >= 0 && bopq < err)
+				err = bopq;
+		}
+	}
+
+out_free:
+	dput(parent);
+	au_dpages_free(&dpages);
+out:
+	return err;
+}
+
+static int au_wbr_bu(struct super_block *sb, aufs_bindex_t bindex)
+{
+	for (; bindex >= 0; bindex--)
+		if (!au_br_rdonly(au_sbr(sb, bindex)))
+			return bindex;
+	return -EROFS;
+}
+
+/* top down parent */
+static int au_wbr_create_tdp(struct dentry *dentry, int isdir __maybe_unused)
+{
+	int err;
+	aufs_bindex_t bstart, bindex;
+	struct super_block *sb;
+	struct dentry *parent, *h_parent;
+
+	sb = dentry->d_sb;
+	bstart = au_dbstart(dentry);
+	err = bstart;
+	if (!au_br_rdonly(au_sbr(sb, bstart)))
+		goto out;
+
+	err = -EROFS;
+	parent = dget_parent(dentry);
+	for (bindex = au_dbstart(parent); bindex < bstart; bindex++) {
+		h_parent = au_h_dptr(parent, bindex);
+		if (!h_parent || !h_parent->d_inode)
+			continue;
+
+		if (!au_br_rdonly(au_sbr(sb, bindex))) {
+			err = bindex;
+			break;
+		}
+	}
+	dput(parent);
+
+	/* bottom up here */
+	if (unlikely(err < 0)) {
+		err = au_wbr_bu(sb, bstart - 1);
+		if (err >= 0)
+			err = au_wbr_nonopq(dentry, err);
+	}
+
+out:
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* an exception for the policy other than tdp */
+static int au_wbr_create_exp(struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t bwh, bdiropq;
+	struct dentry *parent;
+
+	err = -1;
+	bwh = au_dbwh(dentry);
+	parent = dget_parent(dentry);
+	bdiropq = au_dbdiropq(parent);
+	if (bwh >= 0) {
+		if (bdiropq >= 0)
+			err = min(bdiropq, bwh);
+		else
+			err = bwh;
+		AuDbg("%d\n", err);
+	} else if (bdiropq >= 0) {
+		err = bdiropq;
+		AuDbg("%d\n", err);
+	}
+	dput(parent);
+
+	if (err >= 0)
+		err = au_wbr_nonopq(dentry, err);
+
+	if (err >= 0 && au_br_rdonly(au_sbr(dentry->d_sb, err)))
+		err = -1;
+
+	AuDbg("%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* round robin */
+static int au_wbr_create_init_rr(struct super_block *sb)
+{
+	int err;
+
+	err = au_wbr_bu(sb, au_sbend(sb));
+	atomic_set(&au_sbi(sb)->si_wbr_rr_next, -err); /* less important */
+	/* smp_mb(); */
+
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+static int au_wbr_create_rr(struct dentry *dentry, int isdir)
+{
+	int err, nbr;
+	unsigned int u;
+	aufs_bindex_t bindex, bend;
+	struct super_block *sb;
+	atomic_t *next;
+
+	err = au_wbr_create_exp(dentry);
+	if (err >= 0)
+		goto out;
+
+	sb = dentry->d_sb;
+	next = &au_sbi(sb)->si_wbr_rr_next;
+	bend = au_sbend(sb);
+	nbr = bend + 1;
+	for (bindex = 0; bindex <= bend; bindex++) {
+		if (!isdir) {
+			err = atomic_dec_return(next) + 1;
+			/* modulo for 0 is meaningless */
+			if (unlikely(!err))
+				err = atomic_dec_return(next) + 1;
+		} else
+			err = atomic_read(next);
+		AuDbg("%d\n", err);
+		u = err;
+		err = u % nbr;
+		AuDbg("%d\n", err);
+		if (!au_br_rdonly(au_sbr(sb, err)))
+			break;
+		err = -EROFS;
+	}
+
+	if (err >= 0)
+		err = au_wbr_nonopq(dentry, err);
+
+out:
+	AuDbg("%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space */
+static void au_mfs(struct dentry *dentry)
+{
+	struct super_block *sb;
+	struct au_branch *br;
+	struct au_wbr_mfs *mfs;
+	aufs_bindex_t bindex, bend;
+	int err;
+	unsigned long long b, bavail;
+	struct path h_path;
+	/* reduce the stack usage */
+	struct kstatfs *st;
+
+	st = kmalloc(sizeof(*st), GFP_NOFS);
+	if (unlikely(!st)) {
+		AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM);
+		return;
+	}
+
+	bavail = 0;
+	sb = dentry->d_sb;
+	mfs = &au_sbi(sb)->si_wbr_mfs;
+	MtxMustLock(&mfs->mfs_lock);
+	mfs->mfs_bindex = -EROFS;
+	mfs->mfsrr_bytes = 0;
+	bend = au_sbend(sb);
+	for (bindex = 0; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (au_br_rdonly(br))
+			continue;
+
+		/* sb->s_root for NFS is unreliable */
+		h_path.mnt = au_br_mnt(br);
+		h_path.dentry = h_path.mnt->mnt_root;
+		err = vfs_statfs(&h_path, st);
+		if (unlikely(err)) {
+			AuWarn1("failed statfs, b%d, %d\n", bindex, err);
+			continue;
+		}
+
+		/* when the available size is equal, select the lower one */
+		BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail)
+			     || sizeof(b) < sizeof(st->f_bsize));
+		b = st->f_bavail * st->f_bsize;
+		br->br_wbr->wbr_bytes = b;
+		if (b >= bavail) {
+			bavail = b;
+			mfs->mfs_bindex = bindex;
+			mfs->mfs_jiffy = jiffies;
+		}
+	}
+
+	mfs->mfsrr_bytes = bavail;
+	AuDbg("b%d\n", mfs->mfs_bindex);
+	kfree(st);
+}
+
+static int au_wbr_create_mfs(struct dentry *dentry, int isdir __maybe_unused)
+{
+	int err;
+	struct super_block *sb;
+	struct au_wbr_mfs *mfs;
+
+	err = au_wbr_create_exp(dentry);
+	if (err >= 0)
+		goto out;
+
+	sb = dentry->d_sb;
+	mfs = &au_sbi(sb)->si_wbr_mfs;
+	mutex_lock(&mfs->mfs_lock);
+	if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire)
+	    || mfs->mfs_bindex < 0
+	    || au_br_rdonly(au_sbr(sb, mfs->mfs_bindex)))
+		au_mfs(dentry);
+	mutex_unlock(&mfs->mfs_lock);
+	err = mfs->mfs_bindex;
+
+	if (err >= 0)
+		err = au_wbr_nonopq(dentry, err);
+
+out:
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+static int au_wbr_create_init_mfs(struct super_block *sb)
+{
+	struct au_wbr_mfs *mfs;
+
+	mfs = &au_sbi(sb)->si_wbr_mfs;
+	mutex_init(&mfs->mfs_lock);
+	mfs->mfs_jiffy = 0;
+	mfs->mfs_bindex = -EROFS;
+
+	return 0;
+}
+
+static int au_wbr_create_fin_mfs(struct super_block *sb __maybe_unused)
+{
+	mutex_destroy(&au_sbi(sb)->si_wbr_mfs.mfs_lock);
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space and then round robin */
+static int au_wbr_create_mfsrr(struct dentry *dentry, int isdir)
+{
+	int err;
+	struct au_wbr_mfs *mfs;
+
+	err = au_wbr_create_mfs(dentry, isdir);
+	if (err >= 0) {
+		mfs = &au_sbi(dentry->d_sb)->si_wbr_mfs;
+		mutex_lock(&mfs->mfs_lock);
+		if (mfs->mfsrr_bytes < mfs->mfsrr_watermark)
+			err = au_wbr_create_rr(dentry, isdir);
+		mutex_unlock(&mfs->mfs_lock);
+	}
+
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+static int au_wbr_create_init_mfsrr(struct super_block *sb)
+{
+	int err;
+
+	au_wbr_create_init_mfs(sb); /* ignore */
+	err = au_wbr_create_init_rr(sb);
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* top down parent and most free space */
+static int au_wbr_create_pmfs(struct dentry *dentry, int isdir)
+{
+	int err, e2;
+	unsigned long long b;
+	aufs_bindex_t bindex, bstart, bend;
+	struct super_block *sb;
+	struct dentry *parent, *h_parent;
+	struct au_branch *br;
+
+	err = au_wbr_create_tdp(dentry, isdir);
+	if (unlikely(err < 0))
+		goto out;
+	parent = dget_parent(dentry);
+	bstart = au_dbstart(parent);
+	bend = au_dbtaildir(parent);
+	if (bstart == bend)
+		goto out_parent; /* success */
+
+	e2 = au_wbr_create_mfs(dentry, isdir);
+	if (e2 < 0)
+		goto out_parent; /* success */
+
+	/* when the available size is equal, select upper one */
+	sb = dentry->d_sb;
+	br = au_sbr(sb, err);
+	b = br->br_wbr->wbr_bytes;
+	AuDbg("b%d, %llu\n", err, b);
+
+	for (bindex = bstart; bindex <= bend; bindex++) {
+		h_parent = au_h_dptr(parent, bindex);
+		if (!h_parent || !h_parent->d_inode)
+			continue;
+
+		br = au_sbr(sb, bindex);
+		if (!au_br_rdonly(br) && br->br_wbr->wbr_bytes > b) {
+			b = br->br_wbr->wbr_bytes;
+			err = bindex;
+			AuDbg("b%d, %llu\n", err, b);
+		}
+	}
+
+	if (err >= 0)
+		err = au_wbr_nonopq(dentry, err);
+
+out_parent:
+	dput(parent);
+out:
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for copyup */
+
+/* top down parent */
+static int au_wbr_copyup_tdp(struct dentry *dentry)
+{
+	return au_wbr_create_tdp(dentry, /*isdir, anything is ok*/0);
+}
+
+/* bottom up parent */
+static int au_wbr_copyup_bup(struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t bindex, bstart;
+	struct dentry *parent, *h_parent;
+	struct super_block *sb;
+
+	err = -EROFS;
+	sb = dentry->d_sb;
+	parent = dget_parent(dentry);
+	bstart = au_dbstart(parent);
+	for (bindex = au_dbstart(dentry); bindex >= bstart; bindex--) {
+		h_parent = au_h_dptr(parent, bindex);
+		if (!h_parent || !h_parent->d_inode)
+			continue;
+
+		if (!au_br_rdonly(au_sbr(sb, bindex))) {
+			err = bindex;
+			break;
+		}
+	}
+	dput(parent);
+
+	/* bottom up here */
+	if (unlikely(err < 0))
+		err = au_wbr_bu(sb, bstart - 1);
+
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+/* bottom up */
+static int au_wbr_copyup_bu(struct dentry *dentry)
+{
+	int err;
+	aufs_bindex_t bstart;
+
+	bstart = au_dbstart(dentry);
+	err = au_wbr_bu(dentry->d_sb, bstart);
+	AuDbg("b%d\n", err);
+	if (err > bstart)
+		err = au_wbr_nonopq(dentry, err);
+
+	AuDbg("b%d\n", err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_wbr_copyup_operations au_wbr_copyup_ops[] = {
+	[AuWbrCopyup_TDP] = {
+		.copyup	= au_wbr_copyup_tdp
+	},
+	[AuWbrCopyup_BUP] = {
+		.copyup	= au_wbr_copyup_bup
+	},
+	[AuWbrCopyup_BU] = {
+		.copyup	= au_wbr_copyup_bu
+	}
+};
+
+struct au_wbr_create_operations au_wbr_create_ops[] = {
+	[AuWbrCreate_TDP] = {
+		.create	= au_wbr_create_tdp
+	},
+	[AuWbrCreate_RR] = {
+		.create	= au_wbr_create_rr,
+		.init	= au_wbr_create_init_rr
+	},
+	[AuWbrCreate_MFS] = {
+		.create	= au_wbr_create_mfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_MFSV] = {
+		.create	= au_wbr_create_mfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_MFSRR] = {
+		.create	= au_wbr_create_mfsrr,
+		.init	= au_wbr_create_init_mfsrr,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_MFSRRV] = {
+		.create	= au_wbr_create_mfsrr,
+		.init	= au_wbr_create_init_mfsrr,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_PMFS] = {
+		.create	= au_wbr_create_pmfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	},
+	[AuWbrCreate_PMFSV] = {
+		.create	= au_wbr_create_pmfs,
+		.init	= au_wbr_create_init_mfs,
+		.fin	= au_wbr_create_fin_mfs
+	}
+};
diff --git a/fs/aufs/whout.c b/fs/aufs/whout.c
new file mode 100644
index 0000000..65711da
--- /dev/null
+++ b/fs/aufs/whout.c
@@ -0,0 +1,1022 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#include "aufs.h"
+
+#define WH_MASK			S_IRUGO
+
+/*
+ * If a directory contains this file, then it is opaque.  We start with the
+ * .wh. flag so that it is blocked by lookup.
+ */
+static struct qstr diropq_name = QSTR_INIT(AUFS_WH_DIROPQ,
+					   sizeof(AUFS_WH_DIROPQ) - 1);
+
+/*
+ * generate whiteout name, which is NOT terminated by NULL.
+ * @name: original d_name.name
+ * @len: original d_name.len
+ * @wh: whiteout qstr
+ * returns zero when succeeds, otherwise error.
+ * succeeded value as wh->name should be freed by kfree().
+ */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name)
+{
+	char *p;
+
+	if (unlikely(name->len > PATH_MAX - AUFS_WH_PFX_LEN))
+		return -ENAMETOOLONG;
+
+	wh->len = name->len + AUFS_WH_PFX_LEN;
+	p = kmalloc(wh->len, GFP_NOFS);
+	wh->name = p;
+	if (p) {
+		memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+		memcpy(p + AUFS_WH_PFX_LEN, name->name, name->len);
+		/* smp_mb(); */
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if the @wh_name exists under @h_parent.
+ * @try_sio specifies the necessary of super-io.
+ */
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name,
+	       struct au_branch *br, int try_sio)
+{
+	int err;
+	struct dentry *wh_dentry;
+
+	if (!try_sio)
+		wh_dentry = vfsub_lkup_one(wh_name, h_parent);
+	else
+		wh_dentry = au_sio_lkup_one(wh_name, h_parent, br);
+	err = PTR_ERR(wh_dentry);
+	if (IS_ERR(wh_dentry))
+		goto out;
+
+	err = 0;
+	if (!wh_dentry->d_inode)
+		goto out_wh; /* success */
+
+	err = 1;
+	if (S_ISREG(wh_dentry->d_inode->i_mode))
+		goto out_wh; /* success */
+
+	err = -EIO;
+	AuIOErr("%.*s Invalid whiteout entry type 0%o.\n",
+		AuDLNPair(wh_dentry), wh_dentry->d_inode->i_mode);
+
+out_wh:
+	dput(wh_dentry);
+out:
+	return err;
+}
+
+/*
+ * test if the @h_dentry sets opaque or not.
+ */
+int au_diropq_test(struct dentry *h_dentry, struct au_branch *br)
+{
+	int err;
+	struct inode *h_dir;
+
+	h_dir = h_dentry->d_inode;
+	err = au_wh_test(h_dentry, &diropq_name, br,
+			 au_test_h_perm_sio(h_dir, MAY_EXEC));
+	return err;
+}
+
+/*
+ * returns a negative dentry whose name is unique and temporary.
+ */
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+			     struct qstr *prefix)
+{
+	struct dentry *dentry;
+	int i;
+	char defname[NAME_MAX - AUFS_MAX_NAMELEN + DNAME_INLINE_LEN + 1],
+		*name, *p;
+	/* strict atomic_t is unnecessary here */
+	static unsigned short cnt;
+	struct qstr qs;
+
+	BUILD_BUG_ON(sizeof(cnt) * 2 > AUFS_WH_TMP_LEN);
+
+	name = defname;
+	qs.len = sizeof(defname) - DNAME_INLINE_LEN + prefix->len - 1;
+	if (unlikely(prefix->len > DNAME_INLINE_LEN)) {
+		dentry = ERR_PTR(-ENAMETOOLONG);
+		if (unlikely(qs.len > NAME_MAX))
+			goto out;
+		dentry = ERR_PTR(-ENOMEM);
+		name = kmalloc(qs.len + 1, GFP_NOFS);
+		if (unlikely(!name))
+			goto out;
+	}
+
+	/* doubly whiteout-ed */
+	memcpy(name, AUFS_WH_PFX AUFS_WH_PFX, AUFS_WH_PFX_LEN * 2);
+	p = name + AUFS_WH_PFX_LEN * 2;
+	memcpy(p, prefix->name, prefix->len);
+	p += prefix->len;
+	*p++ = '.';
+	AuDebugOn(name + qs.len + 1 - p <= AUFS_WH_TMP_LEN);
+
+	qs.name = name;
+	for (i = 0; i < 3; i++) {
+		sprintf(p, "%.*x", AUFS_WH_TMP_LEN, cnt++);
+		dentry = au_sio_lkup_one(&qs, h_parent, br);
+		if (IS_ERR(dentry) || !dentry->d_inode)
+			goto out_name;
+		dput(dentry);
+	}
+	/* pr_warn("could not get random name\n"); */
+	dentry = ERR_PTR(-EEXIST);
+	AuDbg("%.*s\n", AuLNPair(&qs));
+	BUG();
+
+out_name:
+	if (name != defname)
+		kfree(name);
+out:
+	AuTraceErrPtr(dentry);
+	return dentry;
+}
+
+/*
+ * rename the @h_dentry on @br to the whiteouted temporary name.
+ */
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br)
+{
+	int err;
+	struct path h_path = {
+		.mnt = au_br_mnt(br)
+	};
+	struct inode *h_dir;
+	struct dentry *h_parent;
+
+	h_parent = h_dentry->d_parent; /* dir inode is locked */
+	h_dir = h_parent->d_inode;
+	IMustLock(h_dir);
+
+	h_path.dentry = au_whtmp_lkup(h_parent, br, &h_dentry->d_name);
+	err = PTR_ERR(h_path.dentry);
+	if (IS_ERR(h_path.dentry))
+		goto out;
+
+	/* under the same dir, no need to lock_rename() */
+	err = vfsub_rename(h_dir, h_dentry, h_dir, &h_path);
+	AuTraceErr(err);
+	dput(h_path.dentry);
+
+out:
+	AuTraceErr(err);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * functions for removing a whiteout
+ */
+
+static int do_unlink_wh(struct inode *h_dir, struct path *h_path)
+{
+	int force;
+
+	/*
+	 * forces superio when the dir has a sticky bit.
+	 * this may be a violation of unix fs semantics.
+	 */
+	force = (h_dir->i_mode & S_ISVTX)
+		&& !uid_eq(current_fsuid(), h_path->dentry->d_inode->i_uid);
+	return vfsub_unlink(h_dir, h_path, force);
+}
+
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+			struct dentry *dentry)
+{
+	int err;
+
+	err = do_unlink_wh(h_dir, h_path);
+	if (!err && dentry)
+		au_set_dbwh(dentry, -1);
+
+	return err;
+}
+
+static int unlink_wh_name(struct dentry *h_parent, struct qstr *wh,
+			  struct au_branch *br)
+{
+	int err;
+	struct path h_path = {
+		.mnt = au_br_mnt(br)
+	};
+
+	err = 0;
+	h_path.dentry = vfsub_lkup_one(wh, h_parent);
+	if (IS_ERR(h_path.dentry))
+		err = PTR_ERR(h_path.dentry);
+	else {
+		if (h_path.dentry->d_inode
+		    && S_ISREG(h_path.dentry->d_inode->i_mode))
+			err = do_unlink_wh(h_parent->d_inode, &h_path);
+		dput(h_path.dentry);
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * initialize/clean whiteout for a branch
+ */
+
+static void au_wh_clean(struct inode *h_dir, struct path *whpath,
+			const int isdir)
+{
+	int err;
+
+	if (!whpath->dentry->d_inode)
+		return;
+
+	if (isdir)
+		err = vfsub_rmdir(h_dir, whpath);
+	else
+		err = vfsub_unlink(h_dir, whpath, /*force*/0);
+	if (unlikely(err))
+		pr_warn("failed removing %.*s (%d), ignored.\n",
+			AuDLNPair(whpath->dentry), err);
+}
+
+static int test_linkable(struct dentry *h_root)
+{
+	struct inode *h_dir = h_root->d_inode;
+
+	if (h_dir->i_op->link)
+		return 0;
+
+	pr_err("%.*s (%s) doesn't support link(2), use noplink and rw+nolwh\n",
+	       AuDLNPair(h_root), au_sbtype(h_root->d_sb));
+	return -ENOSYS;
+}
+
+/* todo: should this mkdir be done in /sbin/mount.aufs helper? */
+static int au_whdir(struct inode *h_dir, struct path *path)
+{
+	int err;
+
+	err = -EEXIST;
+	if (!path->dentry->d_inode) {
+		int mode = S_IRWXU;
+
+		if (au_test_nfs(path->dentry->d_sb))
+			mode |= S_IXUGO;
+		err = vfsub_mkdir(h_dir, path, mode);
+	} else if (S_ISDIR(path->dentry->d_inode->i_mode))
+		err = 0;
+	else
+		pr_err("unknown %.*s exists\n", AuDLNPair(path->dentry));
+
+	return err;
+}
+
+struct au_wh_base {
+	const struct qstr *name;
+	struct dentry *dentry;
+};
+
+static void au_wh_init_ro(struct inode *h_dir, struct au_wh_base base[],
+			  struct path *h_path)
+{
+	h_path->dentry = base[AuBrWh_BASE].dentry;
+	au_wh_clean(h_dir, h_path, /*isdir*/0);
+	h_path->dentry = base[AuBrWh_PLINK].dentry;
+	au_wh_clean(h_dir, h_path, /*isdir*/1);
+	h_path->dentry = base[AuBrWh_ORPH].dentry;
+	au_wh_clean(h_dir, h_path, /*isdir*/1);
+}
+
+/*
+ * returns tri-state,
+ * minus: error, caller should print the mesage
+ * zero: succuess
+ * plus: error, caller should NOT print the mesage
+ */
+static int au_wh_init_rw_nolink(struct dentry *h_root, struct au_wbr *wbr,
+				int do_plink, struct au_wh_base base[],
+				struct path *h_path)
+{
+	int err;
+	struct inode *h_dir;
+
+	h_dir = h_root->d_inode;
+	h_path->dentry = base[AuBrWh_BASE].dentry;
+	au_wh_clean(h_dir, h_path, /*isdir*/0);
+	h_path->dentry = base[AuBrWh_PLINK].dentry;
+	if (do_plink) {
+		err = test_linkable(h_root);
+		if (unlikely(err)) {
+			err = 1;
+			goto out;
+		}
+
+		err = au_whdir(h_dir, h_path);
+		if (unlikely(err))
+			goto out;
+		wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+	} else
+		au_wh_clean(h_dir, h_path, /*isdir*/1);
+	h_path->dentry = base[AuBrWh_ORPH].dentry;
+	err = au_whdir(h_dir, h_path);
+	if (unlikely(err))
+		goto out;
+	wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+	return err;
+}
+
+/*
+ * for the moment, aufs supports the branch filesystem which does not support
+ * link(2). testing on FAT which does not support i_op->setattr() fully either,
+ * copyup failed. finally, such filesystem will not be used as the writable
+ * branch.
+ *
+ * returns tri-state, see above.
+ */
+static int au_wh_init_rw(struct dentry *h_root, struct au_wbr *wbr,
+			 int do_plink, struct au_wh_base base[],
+			 struct path *h_path)
+{
+	int err;
+	struct inode *h_dir;
+
+	WbrWhMustWriteLock(wbr);
+
+	err = test_linkable(h_root);
+	if (unlikely(err)) {
+		err = 1;
+		goto out;
+	}
+
+	/*
+	 * todo: should this create be done in /sbin/mount.aufs helper?
+	 */
+	err = -EEXIST;
+	h_dir = h_root->d_inode;
+	if (!base[AuBrWh_BASE].dentry->d_inode) {
+		h_path->dentry = base[AuBrWh_BASE].dentry;
+		err = vfsub_create(h_dir, h_path, WH_MASK, /*want_excl*/true);
+	} else if (S_ISREG(base[AuBrWh_BASE].dentry->d_inode->i_mode))
+		err = 0;
+	else
+		pr_err("unknown %.*s/%.*s exists\n",
+		       AuDLNPair(h_root), AuDLNPair(base[AuBrWh_BASE].dentry));
+	if (unlikely(err))
+		goto out;
+
+	h_path->dentry = base[AuBrWh_PLINK].dentry;
+	if (do_plink) {
+		err = au_whdir(h_dir, h_path);
+		if (unlikely(err))
+			goto out;
+		wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+	} else
+		au_wh_clean(h_dir, h_path, /*isdir*/1);
+	wbr->wbr_whbase = dget(base[AuBrWh_BASE].dentry);
+
+	h_path->dentry = base[AuBrWh_ORPH].dentry;
+	err = au_whdir(h_dir, h_path);
+	if (unlikely(err))
+		goto out;
+	wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+	return err;
+}
+
+/*
+ * initialize the whiteout base file/dir for @br.
+ */
+int au_wh_init(struct au_branch *br, struct super_block *sb)
+{
+	int err, i;
+	const unsigned char do_plink
+		= !!au_opt_test(au_mntflags(sb), PLINK);
+	struct inode *h_dir;
+	struct path path = br->br_path;
+	struct dentry *h_root = path.dentry;
+	struct au_wbr *wbr = br->br_wbr;
+	static const struct qstr base_name[] = {
+		[AuBrWh_BASE] = QSTR_INIT(AUFS_BASE_NAME,
+					  sizeof(AUFS_BASE_NAME) - 1),
+		[AuBrWh_PLINK] = QSTR_INIT(AUFS_PLINKDIR_NAME,
+					   sizeof(AUFS_PLINKDIR_NAME) - 1),
+		[AuBrWh_ORPH] = QSTR_INIT(AUFS_ORPHDIR_NAME,
+					  sizeof(AUFS_ORPHDIR_NAME) - 1)
+	};
+	struct au_wh_base base[] = {
+		[AuBrWh_BASE] = {
+			.name	= base_name + AuBrWh_BASE,
+			.dentry	= NULL
+		},
+		[AuBrWh_PLINK] = {
+			.name	= base_name + AuBrWh_PLINK,
+			.dentry	= NULL
+		},
+		[AuBrWh_ORPH] = {
+			.name	= base_name + AuBrWh_ORPH,
+			.dentry	= NULL
+		}
+	};
+
+	if (wbr)
+		WbrWhMustWriteLock(wbr);
+
+	for (i = 0; i < AuBrWh_Last; i++) {
+		/* doubly whiteouted */
+		struct dentry *d;
+
+		d = au_wh_lkup(h_root, (void *)base[i].name, br);
+		err = PTR_ERR(d);
+		if (IS_ERR(d))
+			goto out;
+
+		base[i].dentry = d;
+		AuDebugOn(wbr
+			  && wbr->wbr_wh[i]
+			  && wbr->wbr_wh[i] != base[i].dentry);
+	}
+
+	if (wbr)
+		for (i = 0; i < AuBrWh_Last; i++) {
+			dput(wbr->wbr_wh[i]);
+			wbr->wbr_wh[i] = NULL;
+		}
+
+	err = 0;
+	if (!au_br_writable(br->br_perm)) {
+		h_dir = h_root->d_inode;
+		au_wh_init_ro(h_dir, base, &path);
+	} else if (!au_br_wh_linkable(br->br_perm)) {
+		err = au_wh_init_rw_nolink(h_root, wbr, do_plink, base, &path);
+		if (err > 0)
+			goto out;
+		else if (err)
+			goto out_err;
+	} else {
+		err = au_wh_init_rw(h_root, wbr, do_plink, base, &path);
+		if (err > 0)
+			goto out;
+		else if (err)
+			goto out_err;
+	}
+	goto out; /* success */
+
+out_err:
+	pr_err("an error(%d) on the writable branch %.*s(%s)\n",
+	       err, AuDLNPair(h_root), au_sbtype(h_root->d_sb));
+out:
+	for (i = 0; i < AuBrWh_Last; i++)
+		dput(base[i].dentry);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * whiteouts are all hard-linked usually.
+ * when its link count reaches a ceiling, we create a new whiteout base
+ * asynchronously.
+ */
+
+struct reinit_br_wh {
+	struct super_block *sb;
+	struct au_branch *br;
+};
+
+static void reinit_br_wh(void *arg)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct path h_path;
+	struct reinit_br_wh *a = arg;
+	struct au_wbr *wbr;
+	struct inode *dir;
+	struct dentry *h_root;
+	struct au_hinode *hdir;
+
+	err = 0;
+	wbr = a->br->br_wbr;
+	/* big aufs lock */
+	si_noflush_write_lock(a->sb);
+	if (!au_br_writable(a->br->br_perm))
+		goto out;
+	bindex = au_br_index(a->sb, a->br->br_id);
+	if (unlikely(bindex < 0))
+		goto out;
+
+	di_read_lock_parent(a->sb->s_root, AuLock_IR);
+	dir = a->sb->s_root->d_inode;
+	hdir = au_hi(dir, bindex);
+	h_root = au_h_dptr(a->sb->s_root, bindex);
+	AuDebugOn(h_root != au_br_dentry(a->br));
+
+	au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+	wbr_wh_write_lock(wbr);
+	err = au_h_verify(wbr->wbr_whbase, au_opt_udba(a->sb), hdir->hi_inode,
+			  h_root, a->br);
+	if (!err) {
+		h_path.dentry = wbr->wbr_whbase;
+		h_path.mnt = au_br_mnt(a->br);
+		err = vfsub_unlink(hdir->hi_inode, &h_path, /*force*/0);
+	} else {
+		pr_warn("%.*s is moved, ignored\n",
+			AuDLNPair(wbr->wbr_whbase));
+		err = 0;
+	}
+	dput(wbr->wbr_whbase);
+	wbr->wbr_whbase = NULL;
+	if (!err)
+		err = au_wh_init(a->br, a->sb);
+	wbr_wh_write_unlock(wbr);
+	au_hn_imtx_unlock(hdir);
+	di_read_unlock(a->sb->s_root, AuLock_IR);
+
+out:
+	if (wbr)
+		atomic_dec(&wbr->wbr_wh_running);
+	atomic_dec(&a->br->br_count);
+	si_write_unlock(a->sb);
+	au_nwt_done(&au_sbi(a->sb)->si_nowait);
+	kfree(arg);
+	if (unlikely(err))
+		AuIOErr("err %d\n", err);
+}
+
+static void kick_reinit_br_wh(struct super_block *sb, struct au_branch *br)
+{
+	int do_dec, wkq_err;
+	struct reinit_br_wh *arg;
+
+	do_dec = 1;
+	if (atomic_inc_return(&br->br_wbr->wbr_wh_running) != 1)
+		goto out;
+
+	/* ignore ENOMEM */
+	arg = kmalloc(sizeof(*arg), GFP_NOFS);
+	if (arg) {
+		/*
+		 * dec(wh_running), kfree(arg) and dec(br_count)
+		 * in reinit function
+		 */
+		arg->sb = sb;
+		arg->br = br;
+		atomic_inc(&br->br_count);
+		wkq_err = au_wkq_nowait(reinit_br_wh, arg, sb, /*flags*/0);
+		if (unlikely(wkq_err)) {
+			atomic_dec(&br->br_wbr->wbr_wh_running);
+			atomic_dec(&br->br_count);
+			kfree(arg);
+		}
+		do_dec = 0;
+	}
+
+out:
+	if (do_dec)
+		atomic_dec(&br->br_wbr->wbr_wh_running);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create the whiteout @wh.
+ */
+static int link_or_create_wh(struct super_block *sb, aufs_bindex_t bindex,
+			     struct dentry *wh)
+{
+	int err;
+	struct path h_path = {
+		.dentry = wh
+	};
+	struct au_branch *br;
+	struct au_wbr *wbr;
+	struct dentry *h_parent;
+	struct inode *h_dir;
+
+	h_parent = wh->d_parent; /* dir inode is locked */
+	h_dir = h_parent->d_inode;
+	IMustLock(h_dir);
+
+	br = au_sbr(sb, bindex);
+	h_path.mnt = au_br_mnt(br);
+	wbr = br->br_wbr;
+	wbr_wh_read_lock(wbr);
+	if (wbr->wbr_whbase) {
+		err = vfsub_link(wbr->wbr_whbase, h_dir, &h_path);
+		if (!err || err != -EMLINK)
+			goto out;
+
+		/* link count full. re-initialize br_whbase. */
+		kick_reinit_br_wh(sb, br);
+	}
+
+	/* return this error in this context */
+	err = vfsub_create(h_dir, &h_path, WH_MASK, /*want_excl*/true);
+
+out:
+	wbr_wh_read_unlock(wbr);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create or remove the diropq.
+ */
+static struct dentry *do_diropq(struct dentry *dentry, aufs_bindex_t bindex,
+				unsigned int flags)
+{
+	struct dentry *opq_dentry, *h_dentry;
+	struct super_block *sb;
+	struct au_branch *br;
+	int err;
+
+	sb = dentry->d_sb;
+	br = au_sbr(sb, bindex);
+	h_dentry = au_h_dptr(dentry, bindex);
+	opq_dentry = vfsub_lkup_one(&diropq_name, h_dentry);
+	if (IS_ERR(opq_dentry))
+		goto out;
+
+	if (au_ftest_diropq(flags, CREATE)) {
+		err = link_or_create_wh(sb, bindex, opq_dentry);
+		if (!err) {
+			au_set_dbdiropq(dentry, bindex);
+			goto out; /* success */
+		}
+	} else {
+		struct path tmp = {
+			.dentry = opq_dentry,
+			.mnt	= au_br_mnt(br)
+		};
+		err = do_unlink_wh(au_h_iptr(dentry->d_inode, bindex), &tmp);
+		if (!err)
+			au_set_dbdiropq(dentry, -1);
+	}
+	dput(opq_dentry);
+	opq_dentry = ERR_PTR(err);
+
+out:
+	return opq_dentry;
+}
+
+struct do_diropq_args {
+	struct dentry **errp;
+	struct dentry *dentry;
+	aufs_bindex_t bindex;
+	unsigned int flags;
+};
+
+static void call_do_diropq(void *args)
+{
+	struct do_diropq_args *a = args;
+	*a->errp = do_diropq(a->dentry, a->bindex, a->flags);
+}
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+			     unsigned int flags)
+{
+	struct dentry *diropq, *h_dentry;
+
+	h_dentry = au_h_dptr(dentry, bindex);
+	if (!au_test_h_perm_sio(h_dentry->d_inode, MAY_EXEC | MAY_WRITE))
+		diropq = do_diropq(dentry, bindex, flags);
+	else {
+		int wkq_err;
+		struct do_diropq_args args = {
+			.errp		= &diropq,
+			.dentry		= dentry,
+			.bindex		= bindex,
+			.flags		= flags
+		};
+
+		wkq_err = au_wkq_wait(call_do_diropq, &args);
+		if (unlikely(wkq_err))
+			diropq = ERR_PTR(wkq_err);
+	}
+
+	return diropq;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * lookup whiteout dentry.
+ * @h_parent: lower parent dentry which must exist and be locked
+ * @base_name: name of dentry which will be whiteouted
+ * returns dentry for whiteout.
+ */
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+			  struct au_branch *br)
+{
+	int err;
+	struct qstr wh_name;
+	struct dentry *wh_dentry;
+
+	err = au_wh_name_alloc(&wh_name, base_name);
+	wh_dentry = ERR_PTR(err);
+	if (!err) {
+		wh_dentry = vfsub_lkup_one(&wh_name, h_parent);
+		kfree(wh_name.name);
+	}
+	return wh_dentry;
+}
+
+/*
+ * link/create a whiteout for @dentry on @bindex.
+ */
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+			    struct dentry *h_parent)
+{
+	struct dentry *wh_dentry;
+	struct super_block *sb;
+	int err;
+
+	sb = dentry->d_sb;
+	wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, au_sbr(sb, bindex));
+	if (!IS_ERR(wh_dentry) && !wh_dentry->d_inode) {
+		err = link_or_create_wh(sb, bindex, wh_dentry);
+		if (!err)
+			au_set_dbwh(dentry, bindex);
+		else {
+			dput(wh_dentry);
+			wh_dentry = ERR_PTR(err);
+		}
+	}
+
+	return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Delete all whiteouts in this directory on branch bindex. */
+static int del_wh_children(struct dentry *h_dentry, struct au_nhash *whlist,
+			   aufs_bindex_t bindex, struct au_branch *br)
+{
+	int err;
+	unsigned long ul, n;
+	struct qstr wh_name;
+	char *p;
+	struct hlist_head *head;
+	struct au_vdir_wh *pos;
+	struct au_vdir_destr *str;
+
+	err = -ENOMEM;
+	p = (void *)__get_free_page(GFP_NOFS);
+	wh_name.name = p;
+	if (unlikely(!wh_name.name))
+		goto out;
+
+	err = 0;
+	memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+	p += AUFS_WH_PFX_LEN;
+	n = whlist->nh_num;
+	head = whlist->nh_head;
+	for (ul = 0; !err && ul < n; ul++, head++) {
+		hlist_for_each_entry(pos, head, wh_hash) {
+			if (pos->wh_bindex != bindex)
+				continue;
+
+			str = &pos->wh_str;
+			if (str->len + AUFS_WH_PFX_LEN <= PATH_MAX) {
+				memcpy(p, str->name, str->len);
+				wh_name.len = AUFS_WH_PFX_LEN + str->len;
+				err = unlink_wh_name(h_dentry, &wh_name, br);
+				if (!err)
+					continue;
+				break;
+			}
+			AuIOErr("whiteout name too long %.*s\n",
+				str->len, str->name);
+			err = -EIO;
+			break;
+		}
+	}
+	free_page((unsigned long)wh_name.name);
+
+out:
+	return err;
+}
+
+struct del_wh_children_args {
+	int *errp;
+	struct dentry *h_dentry;
+	struct au_nhash *whlist;
+	aufs_bindex_t bindex;
+	struct au_branch *br;
+};
+
+static void call_del_wh_children(void *args)
+{
+	struct del_wh_children_args *a = args;
+	*a->errp = del_wh_children(a->h_dentry, a->whlist, a->bindex, a->br);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp)
+{
+	struct au_whtmp_rmdir *whtmp;
+	int err;
+	unsigned int rdhash;
+
+	SiMustAnyLock(sb);
+
+	whtmp = kmalloc(sizeof(*whtmp), gfp);
+	if (unlikely(!whtmp)) {
+		whtmp = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	whtmp->dir = NULL;
+	whtmp->br = NULL;
+	whtmp->wh_dentry = NULL;
+	/* no estimation for dir size */
+	rdhash = au_sbi(sb)->si_rdhash;
+	if (!rdhash)
+		rdhash = AUFS_RDHASH_DEF;
+	err = au_nhash_alloc(&whtmp->whlist, rdhash, gfp);
+	if (unlikely(err)) {
+		kfree(whtmp);
+		whtmp = ERR_PTR(err);
+	}
+
+out:
+	return whtmp;
+}
+
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp)
+{
+	if (whtmp->br)
+		atomic_dec(&whtmp->br->br_count);
+	dput(whtmp->wh_dentry);
+	iput(whtmp->dir);
+	au_nhash_wh_free(&whtmp->whlist);
+	kfree(whtmp);
+}
+
+/*
+ * rmdir the whiteouted temporary named dir @h_dentry.
+ * @whlist: whiteouted children.
+ */
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+		   struct dentry *wh_dentry, struct au_nhash *whlist)
+{
+	int err;
+	struct path h_tmp;
+	struct inode *wh_inode, *h_dir;
+	struct au_branch *br;
+
+	h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */
+	IMustLock(h_dir);
+
+	br = au_sbr(dir->i_sb, bindex);
+	wh_inode = wh_dentry->d_inode;
+	mutex_lock_nested(&wh_inode->i_mutex, AuLsc_I_CHILD);
+
+	/*
+	 * someone else might change some whiteouts while we were sleeping.
+	 * it means this whlist may have an obsoleted entry.
+	 */
+	if (!au_test_h_perm_sio(wh_inode, MAY_EXEC | MAY_WRITE))
+		err = del_wh_children(wh_dentry, whlist, bindex, br);
+	else {
+		int wkq_err;
+		struct del_wh_children_args args = {
+			.errp		= &err,
+			.h_dentry	= wh_dentry,
+			.whlist		= whlist,
+			.bindex		= bindex,
+			.br		= br
+		};
+
+		wkq_err = au_wkq_wait(call_del_wh_children, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+	mutex_unlock(&wh_inode->i_mutex);
+
+	if (!err) {
+		h_tmp.dentry = wh_dentry;
+		h_tmp.mnt = au_br_mnt(br);
+		err = vfsub_rmdir(h_dir, &h_tmp);
+	}
+
+	if (!err) {
+		if (au_ibstart(dir) == bindex) {
+			/* todo: dir->i_mutex is necessary */
+			au_cpup_attr_timesizes(dir);
+			vfsub_drop_nlink(dir);
+		}
+		return 0; /* success */
+	}
+
+	pr_warn("failed removing %.*s(%d), ignored\n",
+		AuDLNPair(wh_dentry), err);
+	return err;
+}
+
+static void call_rmdir_whtmp(void *args)
+{
+	int err;
+	aufs_bindex_t bindex;
+	struct au_whtmp_rmdir *a = args;
+	struct super_block *sb;
+	struct dentry *h_parent;
+	struct inode *h_dir;
+	struct au_hinode *hdir;
+
+	/* rmdir by nfsd may cause deadlock with this i_mutex */
+	/* mutex_lock(&a->dir->i_mutex); */
+	err = -EROFS;
+	sb = a->dir->i_sb;
+	si_read_lock(sb, !AuLock_FLUSH);
+	if (!au_br_writable(a->br->br_perm))
+		goto out;
+	bindex = au_br_index(sb, a->br->br_id);
+	if (unlikely(bindex < 0))
+		goto out;
+
+	err = -EIO;
+	ii_write_lock_parent(a->dir);
+	h_parent = dget_parent(a->wh_dentry);
+	h_dir = h_parent->d_inode;
+	hdir = au_hi(a->dir, bindex);
+	err = vfsub_mnt_want_write(au_br_mnt(a->br));
+	if (unlikely(err))
+		goto out_mnt;
+	au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+	err = au_h_verify(a->wh_dentry, au_opt_udba(sb), h_dir, h_parent,
+			  a->br);
+	if (!err)
+		err = au_whtmp_rmdir(a->dir, bindex, a->wh_dentry, &a->whlist);
+	au_hn_imtx_unlock(hdir);
+	vfsub_mnt_drop_write(au_br_mnt(a->br));
+
+out_mnt:
+	dput(h_parent);
+	ii_write_unlock(a->dir);
+out:
+	/* mutex_unlock(&a->dir->i_mutex); */
+	au_whtmp_rmdir_free(a);
+	si_read_unlock(sb);
+	au_nwt_done(&au_sbi(sb)->si_nowait);
+	if (unlikely(err))
+		AuIOErr("err %d\n", err);
+}
+
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+			 struct dentry *wh_dentry, struct au_whtmp_rmdir *args)
+{
+	int wkq_err;
+	struct super_block *sb;
+
+	IMustLock(dir);
+
+	/* all post-process will be done in do_rmdir_whtmp(). */
+	sb = dir->i_sb;
+	args->dir = au_igrab(dir);
+	args->br = au_sbr(sb, bindex);
+	atomic_inc(&args->br->br_count);
+	args->wh_dentry = dget(wh_dentry);
+	wkq_err = au_wkq_nowait(call_rmdir_whtmp, args, sb, /*flags*/0);
+	if (unlikely(wkq_err)) {
+		pr_warn("rmdir error %.*s (%d), ignored\n",
+			AuDLNPair(wh_dentry), wkq_err);
+		au_whtmp_rmdir_free(args);
+	}
+}
diff --git a/fs/aufs/whout.h b/fs/aufs/whout.h
new file mode 100644
index 0000000..8508560
--- /dev/null
+++ b/fs/aufs/whout.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#ifndef __AUFS_WHOUT_H__
+#define __AUFS_WHOUT_H__
+
+#ifdef __KERNEL__
+
+#include "dir.h"
+
+/* whout.c */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name);
+struct au_branch;
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name,
+	       struct au_branch *br, int try_sio);
+int au_diropq_test(struct dentry *h_dentry, struct au_branch *br);
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+			     struct qstr *prefix);
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br);
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+			struct dentry *dentry);
+int au_wh_init(struct au_branch *br, struct super_block *sb);
+
+/* diropq flags */
+#define AuDiropq_CREATE	1
+#define au_ftest_diropq(flags, name)	((flags) & AuDiropq_##name)
+#define au_fset_diropq(flags, name) \
+	do { (flags) |= AuDiropq_##name; } while (0)
+#define au_fclr_diropq(flags, name) \
+	do { (flags) &= ~AuDiropq_##name; } while (0)
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+			     unsigned int flags);
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+			  struct au_branch *br);
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+			    struct dentry *h_parent);
+
+/* real rmdir for the whiteout-ed dir */
+struct au_whtmp_rmdir {
+	struct inode *dir;
+	struct au_branch *br;
+	struct dentry *wh_dentry;
+	struct au_nhash whlist;
+};
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp);
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp);
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+		   struct dentry *wh_dentry, struct au_nhash *whlist);
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+			 struct dentry *wh_dentry, struct au_whtmp_rmdir *args);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_diropq_create(struct dentry *dentry,
+					      aufs_bindex_t bindex)
+{
+	return au_diropq_sio(dentry, bindex, AuDiropq_CREATE);
+}
+
+static inline int au_diropq_remove(struct dentry *dentry, aufs_bindex_t bindex)
+{
+	return PTR_ERR(au_diropq_sio(dentry, bindex, !AuDiropq_CREATE));
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WHOUT_H__ */
diff --git a/fs/aufs/wkq.c b/fs/aufs/wkq.c
new file mode 100644
index 0000000..c632bb3
--- /dev/null
+++ b/fs/aufs/wkq.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new dredential scheme
+ */
+
+#include <linux/module.h>
+#include "aufs.h"
+
+/* internal workqueue named AUFS_WKQ_NAME */
+
+static struct workqueue_struct *au_wkq;
+
+struct au_wkinfo {
+	struct work_struct wk;
+	struct kobject *kobj;
+
+	unsigned int flags; /* see wkq.h */
+
+	au_wkq_func_t func;
+	void *args;
+
+	struct completion *comp;
+};
+
+/* ---------------------------------------------------------------------- */
+
+static void wkq_func(struct work_struct *wk)
+{
+	struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk);
+
+	AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID));
+	AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY);
+
+	wkinfo->func(wkinfo->args);
+	if (au_ftest_wkq(wkinfo->flags, WAIT))
+		complete(wkinfo->comp);
+	else {
+		kobject_put(wkinfo->kobj);
+		module_put(THIS_MODULE); /* todo: ?? */
+		kfree(wkinfo);
+	}
+}
+
+/*
+ * Since struct completion is large, try allocating it dynamically.
+ */
+#if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */
+#define AuWkqCompDeclare(name)	struct completion *comp = NULL
+
+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
+{
+	*comp = kmalloc(sizeof(**comp), GFP_NOFS);
+	if (*comp) {
+		init_completion(*comp);
+		wkinfo->comp = *comp;
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+static void au_wkq_comp_free(struct completion *comp)
+{
+	kfree(comp);
+}
+
+#else
+
+/* no braces */
+#define AuWkqCompDeclare(name) \
+	DECLARE_COMPLETION_ONSTACK(_ ## name); \
+	struct completion *comp = &_ ## name
+
+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
+{
+	wkinfo->comp = *comp;
+	return 0;
+}
+
+static void au_wkq_comp_free(struct completion *comp __maybe_unused)
+{
+	/* empty */
+}
+#endif /* 4KSTACKS */
+
+static void au_wkq_run(struct au_wkinfo *wkinfo)
+{
+	if (au_ftest_wkq(wkinfo->flags, NEST)) {
+		if (au_wkq_test()) {
+			AuWarn1("wkq from wkq, due to a dead dir by UDBA?\n");
+			AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT));
+		}
+	} else
+		au_dbg_verify_kthread();
+
+	if (au_ftest_wkq(wkinfo->flags, WAIT)) {
+		INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func);
+		queue_work(au_wkq, &wkinfo->wk);
+	} else {
+		INIT_WORK(&wkinfo->wk, wkq_func);
+		schedule_work(&wkinfo->wk);
+	}
+}
+
+/*
+ * Be careful. It is easy to make deadlock happen.
+ * processA: lock, wkq and wait
+ * processB: wkq and wait, lock in wkq
+ * --> deadlock
+ */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args)
+{
+	int err;
+	AuWkqCompDeclare(comp);
+	struct au_wkinfo wkinfo = {
+		.flags	= flags,
+		.func	= func,
+		.args	= args
+	};
+
+	err = au_wkq_comp_alloc(&wkinfo, &comp);
+	if (!err) {
+		au_wkq_run(&wkinfo);
+		/* no timeout, no interrupt */
+		wait_for_completion(wkinfo.comp);
+		au_wkq_comp_free(comp);
+		destroy_work_on_stack(&wkinfo.wk);
+	}
+
+	return err;
+
+}
+
+/*
+ * Note: dget/dput() in func for aufs dentries are not supported. It will be a
+ * problem in a concurrent umounting.
+ */
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+		  unsigned int flags)
+{
+	int err;
+	struct au_wkinfo *wkinfo;
+
+	atomic_inc(&au_sbi(sb)->si_nowait.nw_len);
+
+	/*
+	 * wkq_func() must free this wkinfo.
+	 * it highly depends upon the implementation of workqueue.
+	 */
+	err = 0;
+	wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS);
+	if (wkinfo) {
+		wkinfo->kobj = &au_sbi(sb)->si_kobj;
+		wkinfo->flags = flags & ~AuWkq_WAIT;
+		wkinfo->func = func;
+		wkinfo->args = args;
+		wkinfo->comp = NULL;
+		kobject_get(wkinfo->kobj);
+		__module_get(THIS_MODULE); /* todo: ?? */
+
+		au_wkq_run(wkinfo);
+	} else {
+		err = -ENOMEM;
+		au_nwt_done(&au_sbi(sb)->si_nowait);
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_nwt_init(struct au_nowait_tasks *nwt)
+{
+	atomic_set(&nwt->nw_len, 0);
+	/* smp_mb(); */ /* atomic_set */
+	init_waitqueue_head(&nwt->nw_wq);
+}
+
+void au_wkq_fin(void)
+{
+	destroy_workqueue(au_wkq);
+}
+
+int __init au_wkq_init(void)
+{
+	int err;
+
+	err = 0;
+	au_wkq = alloc_workqueue(AUFS_WKQ_NAME, 0, WQ_DFL_ACTIVE);
+	if (IS_ERR(au_wkq))
+		err = PTR_ERR(au_wkq);
+	else if (!au_wkq)
+		err = -ENOMEM;
+
+	return err;
+}
diff --git a/fs/aufs/wkq.h b/fs/aufs/wkq.h
new file mode 100644
index 0000000..c316b7f
--- /dev/null
+++ b/fs/aufs/wkq.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new credentials management scheme
+ */
+
+#ifndef __AUFS_WKQ_H__
+#define __AUFS_WKQ_H__
+
+#ifdef __KERNEL__
+
+struct super_block;
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * in the next operation, wait for the 'nowait' tasks in system-wide workqueue
+ */
+struct au_nowait_tasks {
+	atomic_t		nw_len;
+	wait_queue_head_t	nw_wq;
+};
+
+/* ---------------------------------------------------------------------- */
+
+typedef void (*au_wkq_func_t)(void *args);
+
+/* wkq flags */
+#define AuWkq_WAIT	1
+#define AuWkq_NEST	(1 << 1)
+#define au_ftest_wkq(flags, name)	((flags) & AuWkq_##name)
+#define au_fset_wkq(flags, name) \
+	do { (flags) |= AuWkq_##name; } while (0)
+#define au_fclr_wkq(flags, name) \
+	do { (flags) &= ~AuWkq_##name; } while (0)
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuWkq_NEST
+#define AuWkq_NEST	0
+#endif
+
+/* wkq.c */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args);
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+		  unsigned int flags);
+void au_nwt_init(struct au_nowait_tasks *nwt);
+int __init au_wkq_init(void);
+void au_wkq_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int au_wkq_test(void)
+{
+	return current->flags & PF_WQ_WORKER;
+}
+
+static inline int au_wkq_wait(au_wkq_func_t func, void *args)
+{
+	return au_wkq_do_wait(AuWkq_WAIT, func, args);
+}
+
+static inline void au_nwt_done(struct au_nowait_tasks *nwt)
+{
+	if (atomic_dec_and_test(&nwt->nw_len))
+		wake_up_all(&nwt->nw_wq);
+}
+
+static inline int au_nwt_flush(struct au_nowait_tasks *nwt)
+{
+	wait_event(nwt->nw_wq, !atomic_read(&nwt->nw_len));
+	return 0;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WKQ_H__ */
diff --git a/fs/aufs/xino.c b/fs/aufs/xino.c
new file mode 100644
index 0000000..88b1c94
--- /dev/null
+++ b/fs/aufs/xino.c
@@ -0,0 +1,1264 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * external inode number translation table and bitmap
+ */
+
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+/* todo: unnecessary to support mmap_sem since kernel-space? */
+ssize_t xino_fread(au_readf_t func, struct file *file, void *kbuf, size_t size,
+		   loff_t *pos)
+{
+	ssize_t err;
+	mm_segment_t oldfs;
+	union {
+		void *k;
+		char __user *u;
+	} buf;
+
+	buf.k = kbuf;
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	do {
+		/* todo: signal_pending? */
+		err = func(file, buf.u, size, pos);
+	} while (err == -EAGAIN || err == -EINTR);
+	set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+	if (err > 0)
+		fsnotify_access(file->f_dentry);
+#endif
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static ssize_t do_xino_fwrite(au_writef_t func, struct file *file, void *kbuf,
+			      size_t size, loff_t *pos)
+{
+	ssize_t err;
+	mm_segment_t oldfs;
+	union {
+		void *k;
+		const char __user *u;
+	} buf;
+
+	buf.k = kbuf;
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	do {
+		/* todo: signal_pending? */
+		err = func(file, buf.u, size, pos);
+	} while (err == -EAGAIN || err == -EINTR);
+	set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+	if (err > 0)
+		fsnotify_modify(file->f_dentry);
+#endif
+
+	return err;
+}
+
+struct do_xino_fwrite_args {
+	ssize_t *errp;
+	au_writef_t func;
+	struct file *file;
+	void *buf;
+	size_t size;
+	loff_t *pos;
+};
+
+static void call_do_xino_fwrite(void *args)
+{
+	struct do_xino_fwrite_args *a = args;
+	*a->errp = do_xino_fwrite(a->func, a->file, a->buf, a->size, a->pos);
+}
+
+ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size,
+		    loff_t *pos)
+{
+	ssize_t err;
+
+	/* todo: signal block and no wkq? */
+	if (rlimit(RLIMIT_FSIZE) == RLIM_INFINITY) {
+		lockdep_off();
+		err = do_xino_fwrite(func, file, buf, size, pos);
+		lockdep_on();
+	} else {
+		/*
+		 * it breaks RLIMIT_FSIZE and normal user's limit,
+		 * users should care about quota and real 'filesystem full.'
+		 */
+		int wkq_err;
+		struct do_xino_fwrite_args args = {
+			.errp	= &err,
+			.func	= func,
+			.file	= file,
+			.buf	= buf,
+			.size	= size,
+			.pos	= pos
+		};
+
+		wkq_err = au_wkq_wait(call_do_xino_fwrite, &args);
+		if (unlikely(wkq_err))
+			err = wkq_err;
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create a new xinofile at the same place/path as @base_file.
+ */
+struct file *au_xino_create2(struct file *base_file, struct file *copy_src)
+{
+	struct file *file;
+	struct dentry *base, *parent;
+	struct inode *dir;
+	struct qstr *name;
+	struct path path;
+	int err;
+
+	base = base_file->f_dentry;
+	parent = base->d_parent; /* dir inode is locked */
+	dir = parent->d_inode;
+	IMustLock(dir);
+
+	file = ERR_PTR(-EINVAL);
+	name = &base->d_name;
+	path.dentry = vfsub_lookup_one_len(name->name, parent, name->len);
+	if (IS_ERR(path.dentry)) {
+		file = (void *)path.dentry;
+		pr_err("%.*s lookup err %ld\n",
+		       AuLNPair(name), PTR_ERR(path.dentry));
+		goto out;
+	}
+
+	/* no need to mnt_want_write() since we call dentry_open() later */
+	err = vfs_create(dir, path.dentry, S_IRUGO | S_IWUGO, NULL);
+	if (unlikely(err)) {
+		file = ERR_PTR(err);
+		pr_err("%.*s create err %d\n", AuLNPair(name), err);
+		goto out_dput;
+	}
+
+	path.mnt = base_file->f_path.mnt;
+	file = vfsub_dentry_open(&path,
+				 O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+				 /* | __FMODE_NONOTIFY */);
+	if (IS_ERR(file)) {
+		pr_err("%.*s open err %ld\n", AuLNPair(name), PTR_ERR(file));
+		goto out_dput;
+	}
+
+	err = vfsub_unlink(dir, &file->f_path, /*force*/0);
+	if (unlikely(err)) {
+		pr_err("%.*s unlink err %d\n", AuLNPair(name), err);
+		goto out_fput;
+	}
+
+	if (copy_src) {
+		/* no one can touch copy_src xino */
+		err = au_copy_file(file, copy_src, vfsub_f_size_read(copy_src));
+		if (unlikely(err)) {
+			pr_err("%.*s copy err %d\n", AuLNPair(name), err);
+			goto out_fput;
+		}
+	}
+	goto out_dput; /* success */
+
+out_fput:
+	fput(file);
+	file = ERR_PTR(err);
+out_dput:
+	dput(path.dentry);
+out:
+	return file;
+}
+
+struct au_xino_lock_dir {
+	struct au_hinode *hdir;
+	struct dentry *parent;
+	struct mutex *mtx;
+};
+
+static void au_xino_lock_dir(struct super_block *sb, struct file *xino,
+			     struct au_xino_lock_dir *ldir)
+{
+	aufs_bindex_t brid, bindex;
+
+	ldir->hdir = NULL;
+	bindex = -1;
+	brid = au_xino_brid(sb);
+	if (brid >= 0)
+		bindex = au_br_index(sb, brid);
+	if (bindex >= 0) {
+		ldir->hdir = au_hi(sb->s_root->d_inode, bindex);
+		au_hn_imtx_lock_nested(ldir->hdir, AuLsc_I_PARENT);
+	} else {
+		ldir->parent = dget_parent(xino->f_dentry);
+		ldir->mtx = &ldir->parent->d_inode->i_mutex;
+		mutex_lock_nested(ldir->mtx, AuLsc_I_PARENT);
+	}
+}
+
+static void au_xino_unlock_dir(struct au_xino_lock_dir *ldir)
+{
+	if (ldir->hdir)
+		au_hn_imtx_unlock(ldir->hdir);
+	else {
+		mutex_unlock(ldir->mtx);
+		dput(ldir->parent);
+	}
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* trucate xino files asynchronously */
+
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex)
+{
+	int err;
+	aufs_bindex_t bi, bend;
+	struct au_branch *br;
+	struct file *new_xino, *file;
+	struct super_block *h_sb;
+	struct au_xino_lock_dir ldir;
+
+	err = -EINVAL;
+	bend = au_sbend(sb);
+	if (unlikely(bindex < 0 || bend < bindex))
+		goto out;
+	br = au_sbr(sb, bindex);
+	file = br->br_xino.xi_file;
+	if (!file)
+		goto out;
+
+	au_xino_lock_dir(sb, file, &ldir);
+	/* mnt_want_write() is unnecessary here */
+	new_xino = au_xino_create2(file, file);
+	au_xino_unlock_dir(&ldir);
+	err = PTR_ERR(new_xino);
+	if (IS_ERR(new_xino))
+		goto out;
+	err = 0;
+	fput(file);
+	br->br_xino.xi_file = new_xino;
+
+	h_sb = au_br_sb(br);
+	for (bi = 0; bi <= bend; bi++) {
+		if (unlikely(bi == bindex))
+			continue;
+		br = au_sbr(sb, bi);
+		if (au_br_sb(br) != h_sb)
+			continue;
+
+		fput(br->br_xino.xi_file);
+		br->br_xino.xi_file = new_xino;
+		get_file(new_xino);
+	}
+
+out:
+	return err;
+}
+
+struct xino_do_trunc_args {
+	struct super_block *sb;
+	struct au_branch *br;
+};
+
+static void xino_do_trunc(void *_args)
+{
+	struct xino_do_trunc_args *args = _args;
+	struct super_block *sb;
+	struct au_branch *br;
+	struct inode *dir;
+	int err;
+	aufs_bindex_t bindex;
+
+	err = 0;
+	sb = args->sb;
+	dir = sb->s_root->d_inode;
+	br = args->br;
+
+	si_noflush_write_lock(sb);
+	ii_read_lock_parent(dir);
+	bindex = au_br_index(sb, br->br_id);
+	err = au_xino_trunc(sb, bindex);
+	if (!err
+	    && file_inode(br->br_xino.xi_file)->i_blocks
+	    >= br->br_xino_upper)
+		br->br_xino_upper += AUFS_XINO_TRUNC_STEP;
+
+	ii_read_unlock(dir);
+	if (unlikely(err))
+		pr_warn("err b%d, upper %llu, (%d)\n",
+			bindex, (unsigned long long)br->br_xino_upper, err);
+	atomic_dec(&br->br_xino_running);
+	atomic_dec(&br->br_count);
+	si_write_unlock(sb);
+	au_nwt_done(&au_sbi(sb)->si_nowait);
+	kfree(args);
+}
+
+static void xino_try_trunc(struct super_block *sb, struct au_branch *br)
+{
+	struct xino_do_trunc_args *args;
+	int wkq_err;
+
+	if (file_inode(br->br_xino.xi_file)->i_blocks
+	    < br->br_xino_upper)
+		return;
+
+	if (atomic_inc_return(&br->br_xino_running) > 1)
+		goto out;
+
+	/* lock and kfree() will be called in trunc_xino() */
+	args = kmalloc(sizeof(*args), GFP_NOFS);
+	if (unlikely(!args)) {
+		AuErr1("no memory\n");
+		goto out_args;
+	}
+
+	atomic_inc(&br->br_count);
+	args->sb = sb;
+	args->br = br;
+	wkq_err = au_wkq_nowait(xino_do_trunc, args, sb, /*flags*/0);
+	if (!wkq_err)
+		return; /* success */
+
+	pr_err("wkq %d\n", wkq_err);
+	atomic_dec(&br->br_count);
+
+out_args:
+	kfree(args);
+out:
+	atomic_dec(&br->br_xino_running);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_xino_do_write(au_writef_t write, struct file *file,
+			    ino_t h_ino, ino_t ino)
+{
+	loff_t pos;
+	ssize_t sz;
+
+	pos = h_ino;
+	if (unlikely(au_loff_max / sizeof(ino) - 1 < pos)) {
+		AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
+		return -EFBIG;
+	}
+	pos *= sizeof(ino);
+	sz = xino_fwrite(write, file, &ino, sizeof(ino), &pos);
+	if (sz == sizeof(ino))
+		return 0; /* success */
+
+	AuIOErr("write failed (%zd)\n", sz);
+	return -EIO;
+}
+
+/*
+ * write @ino to the xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ * even if @ino is zero, it is written to the xinofile and means no entry.
+ * if the size of the xino file on a specific filesystem exceeds the watermark,
+ * try truncating it.
+ */
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		  ino_t ino)
+{
+	int err;
+	unsigned int mnt_flags;
+	struct au_branch *br;
+
+	BUILD_BUG_ON(sizeof(long long) != sizeof(au_loff_max)
+		     || ((loff_t)-1) > 0);
+	SiMustAnyLock(sb);
+
+	mnt_flags = au_mntflags(sb);
+	if (!au_opt_test(mnt_flags, XINO))
+		return 0;
+
+	br = au_sbr(sb, bindex);
+	err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
+			       h_ino, ino);
+	if (!err) {
+		if (au_opt_test(mnt_flags, TRUNC_XINO)
+		    && au_test_fs_trunc_xino(au_br_sb(br)))
+			xino_try_trunc(sb, br);
+		return 0; /* success */
+	}
+
+	AuIOErr("write failed (%d)\n", err);
+	return -EIO;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* aufs inode number bitmap */
+
+static const int page_bits = (int)PAGE_SIZE * BITS_PER_BYTE;
+static ino_t xib_calc_ino(unsigned long pindex, int bit)
+{
+	ino_t ino;
+
+	AuDebugOn(bit < 0 || page_bits <= bit);
+	ino = AUFS_FIRST_INO + pindex * page_bits + bit;
+	return ino;
+}
+
+static void xib_calc_bit(ino_t ino, unsigned long *pindex, int *bit)
+{
+	AuDebugOn(ino < AUFS_FIRST_INO);
+	ino -= AUFS_FIRST_INO;
+	*pindex = ino / page_bits;
+	*bit = ino % page_bits;
+}
+
+static int xib_pindex(struct super_block *sb, unsigned long pindex)
+{
+	int err;
+	loff_t pos;
+	ssize_t sz;
+	struct au_sbinfo *sbinfo;
+	struct file *xib;
+	unsigned long *p;
+
+	sbinfo = au_sbi(sb);
+	MtxMustLock(&sbinfo->si_xib_mtx);
+	AuDebugOn(pindex > ULONG_MAX / PAGE_SIZE
+		  || !au_opt_test(sbinfo->si_mntflags, XINO));
+
+	if (pindex == sbinfo->si_xib_last_pindex)
+		return 0;
+
+	xib = sbinfo->si_xib;
+	p = sbinfo->si_xib_buf;
+	pos = sbinfo->si_xib_last_pindex;
+	pos *= PAGE_SIZE;
+	sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+	if (unlikely(sz != PAGE_SIZE))
+		goto out;
+
+	pos = pindex;
+	pos *= PAGE_SIZE;
+	if (vfsub_f_size_read(xib) >= pos + PAGE_SIZE)
+		sz = xino_fread(sbinfo->si_xread, xib, p, PAGE_SIZE, &pos);
+	else {
+		memset(p, 0, PAGE_SIZE);
+		sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+	}
+	if (sz == PAGE_SIZE) {
+		sbinfo->si_xib_last_pindex = pindex;
+		return 0; /* success */
+	}
+
+out:
+	AuIOErr1("write failed (%zd)\n", sz);
+	err = sz;
+	if (sz >= 0)
+		err = -EIO;
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_xib_clear_bit(struct inode *inode)
+{
+	int err, bit;
+	unsigned long pindex;
+	struct super_block *sb;
+	struct au_sbinfo *sbinfo;
+
+	AuDebugOn(inode->i_nlink);
+
+	sb = inode->i_sb;
+	xib_calc_bit(inode->i_ino, &pindex, &bit);
+	AuDebugOn(page_bits <= bit);
+	sbinfo = au_sbi(sb);
+	mutex_lock(&sbinfo->si_xib_mtx);
+	err = xib_pindex(sb, pindex);
+	if (!err) {
+		clear_bit(bit, sbinfo->si_xib_buf);
+		sbinfo->si_xib_next_bit = bit;
+	}
+	mutex_unlock(&sbinfo->si_xib_mtx);
+}
+
+/* for s_op->delete_inode() */
+void au_xino_delete_inode(struct inode *inode, const int unlinked)
+{
+	int err;
+	unsigned int mnt_flags;
+	aufs_bindex_t bindex, bend, bi;
+	unsigned char try_trunc;
+	struct au_iinfo *iinfo;
+	struct super_block *sb;
+	struct au_hinode *hi;
+	struct inode *h_inode;
+	struct au_branch *br;
+	au_writef_t xwrite;
+
+	sb = inode->i_sb;
+	mnt_flags = au_mntflags(sb);
+	if (!au_opt_test(mnt_flags, XINO)
+	    || inode->i_ino == AUFS_ROOT_INO)
+		return;
+
+	if (unlinked) {
+		au_xigen_inc(inode);
+		au_xib_clear_bit(inode);
+	}
+
+	iinfo = au_ii(inode);
+	if (!iinfo)
+		return;
+
+	bindex = iinfo->ii_bstart;
+	if (bindex < 0)
+		return;
+
+	xwrite = au_sbi(sb)->si_xwrite;
+	try_trunc = !!au_opt_test(mnt_flags, TRUNC_XINO);
+	hi = iinfo->ii_hinode + bindex;
+	bend = iinfo->ii_bend;
+	for (; bindex <= bend; bindex++, hi++) {
+		h_inode = hi->hi_inode;
+		if (!h_inode
+		    || (!unlinked && h_inode->i_nlink))
+			continue;
+
+		/* inode may not be revalidated */
+		bi = au_br_index(sb, hi->hi_id);
+		if (bi < 0)
+			continue;
+
+		br = au_sbr(sb, bi);
+		err = au_xino_do_write(xwrite, br->br_xino.xi_file,
+				       h_inode->i_ino, /*ino*/0);
+		if (!err && try_trunc
+		    && au_test_fs_trunc_xino(au_br_sb(br)))
+			xino_try_trunc(sb, br);
+	}
+}
+
+/* get an unused inode number from bitmap */
+ino_t au_xino_new_ino(struct super_block *sb)
+{
+	ino_t ino;
+	unsigned long *p, pindex, ul, pend;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+	int free_bit, err;
+
+	if (!au_opt_test(au_mntflags(sb), XINO))
+		return iunique(sb, AUFS_FIRST_INO);
+
+	sbinfo = au_sbi(sb);
+	mutex_lock(&sbinfo->si_xib_mtx);
+	p = sbinfo->si_xib_buf;
+	free_bit = sbinfo->si_xib_next_bit;
+	if (free_bit < page_bits && !test_bit(free_bit, p))
+		goto out; /* success */
+	free_bit = find_first_zero_bit(p, page_bits);
+	if (free_bit < page_bits)
+		goto out; /* success */
+
+	pindex = sbinfo->si_xib_last_pindex;
+	for (ul = pindex - 1; ul < ULONG_MAX; ul--) {
+		err = xib_pindex(sb, ul);
+		if (unlikely(err))
+			goto out_err;
+		free_bit = find_first_zero_bit(p, page_bits);
+		if (free_bit < page_bits)
+			goto out; /* success */
+	}
+
+	file = sbinfo->si_xib;
+	pend = vfsub_f_size_read(file) / PAGE_SIZE;
+	for (ul = pindex + 1; ul <= pend; ul++) {
+		err = xib_pindex(sb, ul);
+		if (unlikely(err))
+			goto out_err;
+		free_bit = find_first_zero_bit(p, page_bits);
+		if (free_bit < page_bits)
+			goto out; /* success */
+	}
+	BUG();
+
+out:
+	set_bit(free_bit, p);
+	sbinfo->si_xib_next_bit = free_bit + 1;
+	pindex = sbinfo->si_xib_last_pindex;
+	mutex_unlock(&sbinfo->si_xib_mtx);
+	ino = xib_calc_ino(pindex, free_bit);
+	AuDbg("i%lu\n", (unsigned long)ino);
+	return ino;
+out_err:
+	mutex_unlock(&sbinfo->si_xib_mtx);
+	AuDbg("i0\n");
+	return 0;
+}
+
+/*
+ * read @ino from xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ * if @ino does not exist and @do_new is true, get new one.
+ */
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+		 ino_t *ino)
+{
+	int err;
+	ssize_t sz;
+	loff_t pos;
+	struct file *file;
+	struct au_sbinfo *sbinfo;
+
+	*ino = 0;
+	if (!au_opt_test(au_mntflags(sb), XINO))
+		return 0; /* no xino */
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	pos = h_ino;
+	if (unlikely(au_loff_max / sizeof(*ino) - 1 < pos)) {
+		AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
+		return -EFBIG;
+	}
+	pos *= sizeof(*ino);
+
+	file = au_sbr(sb, bindex)->br_xino.xi_file;
+	if (vfsub_f_size_read(file) < pos + sizeof(*ino))
+		return 0; /* no ino */
+
+	sz = xino_fread(sbinfo->si_xread, file, ino, sizeof(*ino), &pos);
+	if (sz == sizeof(*ino))
+		return 0; /* success */
+
+	err = sz;
+	if (unlikely(sz >= 0)) {
+		err = -EIO;
+		AuIOErr("xino read error (%zd)\n", sz);
+	}
+
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* create and set a new xino file */
+
+struct file *au_xino_create(struct super_block *sb, char *fname, int silent)
+{
+	struct file *file;
+	struct dentry *h_parent, *d;
+	struct inode *h_dir;
+	int err;
+
+	/*
+	 * at mount-time, and the xino file is the default path,
+	 * hnotify is disabled so we have no notify events to ignore.
+	 * when a user specified the xino, we cannot get au_hdir to be ignored.
+	 */
+	file = vfsub_filp_open(fname, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+			       /* | __FMODE_NONOTIFY */,
+			       S_IRUGO | S_IWUGO);
+	if (IS_ERR(file)) {
+		if (!silent)
+			pr_err("open %s(%ld)\n", fname, PTR_ERR(file));
+		return file;
+	}
+
+	/* keep file count */
+	h_parent = dget_parent(file->f_dentry);
+	h_dir = h_parent->d_inode;
+	mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT);
+	/* mnt_want_write() is unnecessary here */
+	err = vfsub_unlink(h_dir, &file->f_path, /*force*/0);
+	mutex_unlock(&h_dir->i_mutex);
+	dput(h_parent);
+	if (unlikely(err)) {
+		if (!silent)
+			pr_err("unlink %s(%d)\n", fname, err);
+		goto out;
+	}
+
+	err = -EINVAL;
+	d = file->f_dentry;
+	if (unlikely(sb == d->d_sb)) {
+		if (!silent)
+			pr_err("%s must be outside\n", fname);
+		goto out;
+	}
+	if (unlikely(au_test_fs_bad_xino(d->d_sb))) {
+		if (!silent)
+			pr_err("xino doesn't support %s(%s)\n",
+			       fname, au_sbtype(d->d_sb));
+		goto out;
+	}
+	return file; /* success */
+
+out:
+	fput(file);
+	file = ERR_PTR(err);
+	return file;
+}
+
+/*
+ * find another branch who is on the same filesystem of the specified
+ * branch{@btgt}. search until @bend.
+ */
+static int is_sb_shared(struct super_block *sb, aufs_bindex_t btgt,
+			aufs_bindex_t bend)
+{
+	aufs_bindex_t bindex;
+	struct super_block *tgt_sb = au_sbr_sb(sb, btgt);
+
+	for (bindex = 0; bindex < btgt; bindex++)
+		if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
+			return bindex;
+	for (bindex++; bindex <= bend; bindex++)
+		if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
+			return bindex;
+	return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * initialize the xinofile for the specified branch @br
+ * at the place/path where @base_file indicates.
+ * test whether another branch is on the same filesystem or not,
+ * if @do_test is true.
+ */
+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t h_ino,
+	       struct file *base_file, int do_test)
+{
+	int err;
+	ino_t ino;
+	aufs_bindex_t bend, bindex;
+	struct au_branch *shared_br, *b;
+	struct file *file;
+	struct super_block *tgt_sb;
+
+	shared_br = NULL;
+	bend = au_sbend(sb);
+	if (do_test) {
+		tgt_sb = au_br_sb(br);
+		for (bindex = 0; bindex <= bend; bindex++) {
+			b = au_sbr(sb, bindex);
+			if (tgt_sb == au_br_sb(b)) {
+				shared_br = b;
+				break;
+			}
+		}
+	}
+
+	if (!shared_br || !shared_br->br_xino.xi_file) {
+		struct au_xino_lock_dir ldir;
+
+		au_xino_lock_dir(sb, base_file, &ldir);
+		/* mnt_want_write() is unnecessary here */
+		file = au_xino_create2(base_file, NULL);
+		au_xino_unlock_dir(&ldir);
+		err = PTR_ERR(file);
+		if (IS_ERR(file))
+			goto out;
+		br->br_xino.xi_file = file;
+	} else {
+		br->br_xino.xi_file = shared_br->br_xino.xi_file;
+		get_file(br->br_xino.xi_file);
+	}
+
+	ino = AUFS_ROOT_INO;
+	err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
+			       h_ino, ino);
+	if (unlikely(err)) {
+		fput(br->br_xino.xi_file);
+		br->br_xino.xi_file = NULL;
+	}
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* trucate a xino bitmap file */
+
+/* todo: slow */
+static int do_xib_restore(struct super_block *sb, struct file *file, void *page)
+{
+	int err, bit;
+	ssize_t sz;
+	unsigned long pindex;
+	loff_t pos, pend;
+	struct au_sbinfo *sbinfo;
+	au_readf_t func;
+	ino_t *ino;
+	unsigned long *p;
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	MtxMustLock(&sbinfo->si_xib_mtx);
+	p = sbinfo->si_xib_buf;
+	func = sbinfo->si_xread;
+	pend = vfsub_f_size_read(file);
+	pos = 0;
+	while (pos < pend) {
+		sz = xino_fread(func, file, page, PAGE_SIZE, &pos);
+		err = sz;
+		if (unlikely(sz <= 0))
+			goto out;
+
+		err = 0;
+		for (ino = page; sz > 0; ino++, sz -= sizeof(ino)) {
+			if (unlikely(*ino < AUFS_FIRST_INO))
+				continue;
+
+			xib_calc_bit(*ino, &pindex, &bit);
+			AuDebugOn(page_bits <= bit);
+			err = xib_pindex(sb, pindex);
+			if (!err)
+				set_bit(bit, p);
+			else
+				goto out;
+		}
+	}
+
+out:
+	return err;
+}
+
+static int xib_restore(struct super_block *sb)
+{
+	int err;
+	aufs_bindex_t bindex, bend;
+	void *page;
+
+	err = -ENOMEM;
+	page = (void *)__get_free_page(GFP_NOFS);
+	if (unlikely(!page))
+		goto out;
+
+	err = 0;
+	bend = au_sbend(sb);
+	for (bindex = 0; !err && bindex <= bend; bindex++)
+		if (!bindex || is_sb_shared(sb, bindex, bindex - 1) < 0)
+			err = do_xib_restore
+				(sb, au_sbr(sb, bindex)->br_xino.xi_file, page);
+		else
+			AuDbg("b%d\n", bindex);
+	free_page((unsigned long)page);
+
+out:
+	return err;
+}
+
+int au_xib_trunc(struct super_block *sb)
+{
+	int err;
+	ssize_t sz;
+	loff_t pos;
+	struct au_xino_lock_dir ldir;
+	struct au_sbinfo *sbinfo;
+	unsigned long *p;
+	struct file *file;
+
+	SiMustWriteLock(sb);
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	if (!au_opt_test(sbinfo->si_mntflags, XINO))
+		goto out;
+
+	file = sbinfo->si_xib;
+	if (vfsub_f_size_read(file) <= PAGE_SIZE)
+		goto out;
+
+	au_xino_lock_dir(sb, file, &ldir);
+	/* mnt_want_write() is unnecessary here */
+	file = au_xino_create2(sbinfo->si_xib, NULL);
+	au_xino_unlock_dir(&ldir);
+	err = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+	fput(sbinfo->si_xib);
+	sbinfo->si_xib = file;
+
+	p = sbinfo->si_xib_buf;
+	memset(p, 0, PAGE_SIZE);
+	pos = 0;
+	sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xib, p, PAGE_SIZE, &pos);
+	if (unlikely(sz != PAGE_SIZE)) {
+		err = sz;
+		AuIOErr("err %d\n", err);
+		if (sz >= 0)
+			err = -EIO;
+		goto out;
+	}
+
+	mutex_lock(&sbinfo->si_xib_mtx);
+	/* mnt_want_write() is unnecessary here */
+	err = xib_restore(sb);
+	mutex_unlock(&sbinfo->si_xib_mtx);
+
+out:
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * xino mount option handlers
+ */
+static au_readf_t find_readf(struct file *h_file)
+{
+	const struct file_operations *fop = h_file->f_op;
+
+	if (fop) {
+		if (fop->read)
+			return fop->read;
+		if (fop->aio_read)
+			return do_sync_read;
+	}
+	return ERR_PTR(-ENOSYS);
+}
+
+static au_writef_t find_writef(struct file *h_file)
+{
+	const struct file_operations *fop = h_file->f_op;
+
+	if (fop) {
+		if (fop->write)
+			return fop->write;
+		if (fop->aio_write)
+			return do_sync_write;
+	}
+	return ERR_PTR(-ENOSYS);
+}
+
+/* xino bitmap */
+static void xino_clear_xib(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	sbinfo->si_xread = NULL;
+	sbinfo->si_xwrite = NULL;
+	if (sbinfo->si_xib)
+		fput(sbinfo->si_xib);
+	sbinfo->si_xib = NULL;
+	free_page((unsigned long)sbinfo->si_xib_buf);
+	sbinfo->si_xib_buf = NULL;
+}
+
+static int au_xino_set_xib(struct super_block *sb, struct file *base)
+{
+	int err;
+	loff_t pos;
+	struct au_sbinfo *sbinfo;
+	struct file *file;
+
+	SiMustWriteLock(sb);
+
+	sbinfo = au_sbi(sb);
+	file = au_xino_create2(base, sbinfo->si_xib);
+	err = PTR_ERR(file);
+	if (IS_ERR(file))
+		goto out;
+	if (sbinfo->si_xib)
+		fput(sbinfo->si_xib);
+	sbinfo->si_xib = file;
+	sbinfo->si_xread = find_readf(file);
+	sbinfo->si_xwrite = find_writef(file);
+
+	err = -ENOMEM;
+	if (!sbinfo->si_xib_buf)
+		sbinfo->si_xib_buf = (void *)get_zeroed_page(GFP_NOFS);
+	if (unlikely(!sbinfo->si_xib_buf))
+		goto out_unset;
+
+	sbinfo->si_xib_last_pindex = 0;
+	sbinfo->si_xib_next_bit = 0;
+	if (vfsub_f_size_read(file) < PAGE_SIZE) {
+		pos = 0;
+		err = xino_fwrite(sbinfo->si_xwrite, file, sbinfo->si_xib_buf,
+				  PAGE_SIZE, &pos);
+		if (unlikely(err != PAGE_SIZE))
+			goto out_free;
+	}
+	err = 0;
+	goto out; /* success */
+
+out_free:
+	free_page((unsigned long)sbinfo->si_xib_buf);
+	sbinfo->si_xib_buf = NULL;
+	if (err >= 0)
+		err = -EIO;
+out_unset:
+	fput(sbinfo->si_xib);
+	sbinfo->si_xib = NULL;
+	sbinfo->si_xread = NULL;
+	sbinfo->si_xwrite = NULL;
+out:
+	return err;
+}
+
+/* xino for each branch */
+static void xino_clear_br(struct super_block *sb)
+{
+	aufs_bindex_t bindex, bend;
+	struct au_branch *br;
+
+	bend = au_sbend(sb);
+	for (bindex = 0; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (!br || !br->br_xino.xi_file)
+			continue;
+
+		fput(br->br_xino.xi_file);
+		br->br_xino.xi_file = NULL;
+	}
+}
+
+static int au_xino_set_br(struct super_block *sb, struct file *base)
+{
+	int err;
+	ino_t ino;
+	aufs_bindex_t bindex, bend, bshared;
+	struct {
+		struct file *old, *new;
+	} *fpair, *p;
+	struct au_branch *br;
+	struct inode *inode;
+	au_writef_t writef;
+
+	SiMustWriteLock(sb);
+
+	err = -ENOMEM;
+	bend = au_sbend(sb);
+	fpair = kcalloc(bend + 1, sizeof(*fpair), GFP_NOFS);
+	if (unlikely(!fpair))
+		goto out;
+
+	inode = sb->s_root->d_inode;
+	ino = AUFS_ROOT_INO;
+	writef = au_sbi(sb)->si_xwrite;
+	for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
+		br = au_sbr(sb, bindex);
+		bshared = is_sb_shared(sb, bindex, bindex - 1);
+		if (bshared >= 0) {
+			/* shared xino */
+			*p = fpair[bshared];
+			get_file(p->new);
+		}
+
+		if (!p->new) {
+			/* new xino */
+			p->old = br->br_xino.xi_file;
+			p->new = au_xino_create2(base, br->br_xino.xi_file);
+			err = PTR_ERR(p->new);
+			if (IS_ERR(p->new)) {
+				p->new = NULL;
+				goto out_pair;
+			}
+		}
+
+		err = au_xino_do_write(writef, p->new,
+				       au_h_iptr(inode, bindex)->i_ino, ino);
+		if (unlikely(err))
+			goto out_pair;
+	}
+
+	for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
+		br = au_sbr(sb, bindex);
+		if (br->br_xino.xi_file)
+			fput(br->br_xino.xi_file);
+		get_file(p->new);
+		br->br_xino.xi_file = p->new;
+	}
+
+out_pair:
+	for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++)
+		if (p->new)
+			fput(p->new);
+		else
+			break;
+	kfree(fpair);
+out:
+	return err;
+}
+
+void au_xino_clr(struct super_block *sb)
+{
+	struct au_sbinfo *sbinfo;
+
+	au_xigen_clr(sb);
+	xino_clear_xib(sb);
+	xino_clear_br(sb);
+	sbinfo = au_sbi(sb);
+	/* lvalue, do not call au_mntflags() */
+	au_opt_clr(sbinfo->si_mntflags, XINO);
+}
+
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount)
+{
+	int err, skip;
+	struct dentry *parent, *cur_parent;
+	struct qstr *dname, *cur_name;
+	struct file *cur_xino;
+	struct inode *dir;
+	struct au_sbinfo *sbinfo;
+
+	SiMustWriteLock(sb);
+
+	err = 0;
+	sbinfo = au_sbi(sb);
+	parent = dget_parent(xino->file->f_dentry);
+	if (remount) {
+		skip = 0;
+		dname = &xino->file->f_dentry->d_name;
+		cur_xino = sbinfo->si_xib;
+		if (cur_xino) {
+			cur_parent = dget_parent(cur_xino->f_dentry);
+			cur_name = &cur_xino->f_dentry->d_name;
+			skip = (cur_parent == parent
+				&& dname->len == cur_name->len
+				&& !memcmp(dname->name, cur_name->name,
+					   dname->len));
+			dput(cur_parent);
+		}
+		if (skip)
+			goto out;
+	}
+
+	au_opt_set(sbinfo->si_mntflags, XINO);
+	dir = parent->d_inode;
+	mutex_lock_nested(&dir->i_mutex, AuLsc_I_PARENT);
+	/* mnt_want_write() is unnecessary here */
+	err = au_xino_set_xib(sb, xino->file);
+	if (!err)
+		err = au_xigen_set(sb, xino->file);
+	if (!err)
+		err = au_xino_set_br(sb, xino->file);
+	mutex_unlock(&dir->i_mutex);
+	if (!err)
+		goto out; /* success */
+
+	/* reset all */
+	AuIOErr("failed creating xino(%d).\n", err);
+
+out:
+	dput(parent);
+	return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create a xinofile at the default place/path.
+ */
+struct file *au_xino_def(struct super_block *sb)
+{
+	struct file *file;
+	char *page, *p;
+	struct au_branch *br;
+	struct super_block *h_sb;
+	struct path path;
+	aufs_bindex_t bend, bindex, bwr;
+
+	br = NULL;
+	bend = au_sbend(sb);
+	bwr = -1;
+	for (bindex = 0; bindex <= bend; bindex++) {
+		br = au_sbr(sb, bindex);
+		if (au_br_writable(br->br_perm)
+		    && !au_test_fs_bad_xino(au_br_sb(br))) {
+			bwr = bindex;
+			break;
+		}
+	}
+
+	if (bwr >= 0) {
+		file = ERR_PTR(-ENOMEM);
+		page = (void *)__get_free_page(GFP_NOFS);
+		if (unlikely(!page))
+			goto out;
+		path.mnt = au_br_mnt(br);
+		path.dentry = au_h_dptr(sb->s_root, bwr);
+		p = d_path(&path, page, PATH_MAX - sizeof(AUFS_XINO_FNAME));
+		file = (void *)p;
+		if (!IS_ERR(p)) {
+			strcat(p, "/" AUFS_XINO_FNAME);
+			AuDbg("%s\n", p);
+			file = au_xino_create(sb, p, /*silent*/0);
+			if (!IS_ERR(file))
+				au_xino_brid_set(sb, br->br_id);
+		}
+		free_page((unsigned long)page);
+	} else {
+		file = au_xino_create(sb, AUFS_XINO_DEFPATH, /*silent*/0);
+		if (IS_ERR(file))
+			goto out;
+		h_sb = file->f_dentry->d_sb;
+		if (unlikely(au_test_fs_bad_xino(h_sb))) {
+			pr_err("xino doesn't support %s(%s)\n",
+			       AUFS_XINO_DEFPATH, au_sbtype(h_sb));
+			fput(file);
+			file = ERR_PTR(-EINVAL);
+		}
+		if (!IS_ERR(file))
+			au_xino_brid_set(sb, -1);
+	}
+
+out:
+	return file;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_xino_path(struct seq_file *seq, struct file *file)
+{
+	int err;
+
+	err = au_seq_path(seq, &file->f_path);
+	if (unlikely(err < 0))
+		goto out;
+
+	err = 0;
+#define Deleted "\\040(deleted)"
+	seq->count -= sizeof(Deleted) - 1;
+	AuDebugOn(memcmp(seq->buf + seq->count, Deleted,
+			 sizeof(Deleted) - 1));
+#undef Deleted
+
+out:
+	return err;
+}
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index f95dddc..7192a7e 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -908,6 +908,7 @@
 static int
 befs_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	if (!(*flags & MS_RDONLY))
 		return -EINVAL;
 	return 0;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9612a01..e1b2084 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1013,7 +1013,8 @@
 	return try_release_extent_buffer(page);
 }
 
-static void btree_invalidatepage(struct page *page, unsigned long offset)
+static void btree_invalidatepage(struct page *page, unsigned int offset,
+				 unsigned int length)
 {
 	struct extent_io_tree *tree;
 	tree = &BTRFS_I(page->mapping->host)->io_tree;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index be7e31a..70c2369 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2960,7 +2960,7 @@
 	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
 	if (page->index > end_index ||
 	   (page->index == end_index && !pg_offset)) {
-		page->mapping->a_ops->invalidatepage(page, 0);
+		page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
 		unlock_page(page);
 		return 0;
 	}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b5d13c4..5907f66 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7543,7 +7543,8 @@
 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
 }
 
-static void btrfs_invalidatepage(struct page *page, unsigned long offset)
+static void btrfs_invalidatepage(struct page *page, unsigned int offset,
+				 unsigned int length)
 {
 	struct inode *inode = page->mapping->host;
 	struct extent_io_tree *tree;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 68891ff..feecdfb 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1248,6 +1248,7 @@
 	unsigned int old_metadata_ratio = fs_info->metadata_ratio;
 	int ret;
 
+	sync_filesystem(sb);
 	btrfs_remount_prepare(fs_info);
 
 	ret = btrfs_parse_options(root, data);
diff --git a/fs/buffer.c b/fs/buffer.c
index 83fedaa..8aa01a9 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1458,7 +1458,8 @@
  * block_invalidatepage - invalidate part or all of a buffer-backed page
  *
  * @page: the page which is affected
- * @offset: the index of the truncation point
+ * @offset: start of the range to invalidate
+ * @length: length of the range to invalidate
  *
  * block_invalidatepage() is called when all or part of the page has become
  * invalidated by a truncate operation.
@@ -1469,15 +1470,22 @@
  * point.  Because the caller is about to free (and possibly reuse) those
  * blocks on-disk.
  */
-void block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned int offset,
+			  unsigned int length)
 {
 	struct buffer_head *head, *bh, *next;
 	unsigned int curr_off = 0;
+	unsigned int stop = length + offset;
 
 	BUG_ON(!PageLocked(page));
 	if (!page_has_buffers(page))
 		goto out;
 
+	/*
+	 * Check for overflow
+	 */
+	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+
 	head = page_buffers(page);
 	bh = head;
 	do {
@@ -1485,6 +1493,12 @@
 		next = bh->b_this_page;
 
 		/*
+		 * Are we still fully in range ?
+		 */
+		if (next_off > stop)
+			goto out;
+
+		/*
 		 * is this block fully invalidated?
 		 */
 		if (offset <= curr_off)
@@ -1505,6 +1519,7 @@
 }
 EXPORT_SYMBOL(block_invalidatepage);
 
+
 /*
  * We attach and possibly dirty the buffers atomically wrt
  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
@@ -2400,6 +2415,8 @@
 	 * fault so this update may be superfluous but who really cares...
 	 */
 	file_update_time(vma->vm_file);
+	if (vma->vm_prfile)
+		file_update_time(vma->vm_prfile);
 
 	ret = __block_page_mkwrite(vma, vmf, get_block);
 	sb_end_pagefault(sb);
@@ -2853,7 +2870,7 @@
 		 * they may have been added in ext3_writepage().  Make them
 		 * freeable here, so the page does not leak.
 		 */
-		do_invalidatepage(page, 0);
+		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
 		unlock_page(page);
 		return 0; /* don't care */
 	}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 5da06f0..1070554 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -143,7 +143,8 @@
  * dirty page counters appropriately.  Only called if there is private
  * data on the page.
  */
-static void ceph_invalidatepage(struct page *page, unsigned long offset)
+static void ceph_invalidatepage(struct page *page, unsigned int offset,
+				unsigned int length)
 {
 	struct inode *inode;
 	struct ceph_inode_info *ci;
@@ -168,7 +169,7 @@
 
 	ci = ceph_inode(inode);
 	if (offset == 0) {
-		dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
+		dout("%p invalidatepage %p idx %lu full dirty page %u\n",
 		     inode, page, page->index, offset);
 		ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
 		ceph_put_snap_context(snapc);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3752b9f..0d7daf7 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -521,6 +521,7 @@
 
 static int cifs_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_NODIRATIME;
 	return 0;
 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index f4a8577..4e7cc89 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3577,11 +3577,12 @@
 	return cifs_fscache_release_page(page, gfp);
 }
 
-static void cifs_invalidate_page(struct page *page, unsigned long offset)
+static void cifs_invalidate_page(struct page *page, unsigned int offset,
+				 unsigned int length)
 {
 	struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
 
-	if (offset == 0)
+	if (offset == 0 && length == PAGE_CACHE_SIZE)
 		cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
 }
 
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index bfbf470..b70aa7c 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -447,7 +447,7 @@
 {
 	int error;
 
-	error = wait_event_freezekillable(server->response_q,
+	error = wait_event_freezekillable_unsafe(server->response_q,
 				    midQ->mid_state != MID_REQUEST_SUBMITTED);
 	if (error < 0)
 		return -ERESTARTSYS;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index b7d3a05..fc66861 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -391,8 +391,7 @@
 	if (!host_file->f_op)
 		return -ENOTDIR;
 
-	if (host_file->f_op->readdir)
-	{
+	if (host_file->f_op->readdir) {
 		/* potemkin case: we were handed a directory inode.
 		 * We can't use vfs_readdir because we have to keep the file
 		 * position in sync between the coda_file and the host_file.
@@ -410,8 +409,20 @@
 
 		coda_file->f_pos = host_file->f_pos;
 		mutex_unlock(&host_inode->i_mutex);
-	}
-	else /* Venus: we must read Venus dirents from a file */
+	} else if (host_file->f_op->iterate) {
+		struct inode *host_inode = file_inode(host_file);
+		struct dir_context *ctx = buf;
+
+		mutex_lock(&host_inode->i_mutex);
+		ret = -ENOENT;
+		if (!IS_DEADDIR(host_inode)) {
+			ret = host_file->f_op->iterate(host_file, ctx);
+			file_accessed(host_file);
+		}
+		mutex_unlock(&host_inode->i_mutex);
+
+		coda_file->f_pos = ctx->pos;
+	} else /* Venus: we must read Venus dirents from a file */
 		ret = coda_venus_readdir(coda_file, buf, filldir);
 
 	return ret;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 4dcc0d8..0aa4c4d 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -96,6 +96,7 @@
 
 static int coda_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_NOATIME;
 	return 0;
 }
diff --git a/fs/compat.c b/fs/compat.c
index fc3b55d..6af20de 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -832,6 +832,7 @@
 };
 
 struct compat_readdir_callback {
+	struct dir_context ctx;
 	struct compat_old_linux_dirent __user *dirent;
 	int result;
 };
@@ -873,15 +874,15 @@
 {
 	int error;
 	struct fd f = fdget(fd);
-	struct compat_readdir_callback buf;
+	struct compat_readdir_callback buf = {
+		.ctx.actor = compat_fillonedir,
+		.dirent = dirent
+	};
 
 	if (!f.file)
 		return -EBADF;
 
-	buf.result = 0;
-	buf.dirent = dirent;
-
-	error = vfs_readdir(f.file, compat_fillonedir, &buf);
+	error = iterate_dir(f.file, &buf.ctx);
 	if (buf.result)
 		error = buf.result;
 
@@ -897,6 +898,7 @@
 };
 
 struct compat_getdents_callback {
+	struct dir_context ctx;
 	struct compat_linux_dirent __user *current_dir;
 	struct compat_linux_dirent __user *previous;
 	int count;
@@ -951,7 +953,11 @@
 {
 	struct fd f;
 	struct compat_linux_dirent __user * lastdirent;
-	struct compat_getdents_callback buf;
+	struct compat_getdents_callback buf = {
+		.ctx.actor = compat_filldir,
+		.current_dir = dirent,
+		.count = count
+	};
 	int error;
 
 	if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -961,17 +967,12 @@
 	if (!f.file)
 		return -EBADF;
 
-	buf.current_dir = dirent;
-	buf.previous = NULL;
-	buf.count = count;
-	buf.error = 0;
-
-	error = vfs_readdir(f.file, compat_filldir, &buf);
+	error = iterate_dir(f.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
 	lastdirent = buf.previous;
 	if (lastdirent) {
-		if (put_user(f.file->f_pos, &lastdirent->d_off))
+		if (put_user(buf.ctx.pos, &lastdirent->d_off))
 			error = -EFAULT;
 		else
 			error = count - buf.count;
@@ -983,6 +984,7 @@
 #ifndef __ARCH_OMIT_COMPAT_SYS_GETDENTS64
 
 struct compat_getdents_callback64 {
+	struct dir_context ctx;
 	struct linux_dirent64 __user *current_dir;
 	struct linux_dirent64 __user *previous;
 	int count;
@@ -1036,7 +1038,11 @@
 {
 	struct fd f;
 	struct linux_dirent64 __user * lastdirent;
-	struct compat_getdents_callback64 buf;
+	struct compat_getdents_callback64 buf = {
+		.ctx.actor = compat_filldir64,
+		.current_dir = dirent,
+		.count = count
+	};
 	int error;
 
 	if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -1046,17 +1052,12 @@
 	if (!f.file)
 		return -EBADF;
 
-	buf.current_dir = dirent;
-	buf.previous = NULL;
-	buf.count = count;
-	buf.error = 0;
-
-	error = vfs_readdir(f.file, compat_filldir64, &buf);
+	error = iterate_dir(f.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
 	lastdirent = buf.previous;
 	if (lastdirent) {
-		typeof(lastdirent->d_off) d_off = f.file->f_pos;
+		typeof(lastdirent->d_off) d_off = buf.ctx.pos;
 		if (__put_user_unaligned(d_off, &lastdirent->d_off))
 			error = -EFAULT;
 		else
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index a81147e..4d24d17 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -88,6 +88,11 @@
 #define	ELF_HWCAP		COMPAT_ELF_HWCAP
 #endif
 
+#ifdef	COMPAT_ELF_HWCAP2
+#undef	ELF_HWCAP2
+#define	ELF_HWCAP2		COMPAT_ELF_HWCAP2
+#endif
+
 #ifdef	COMPAT_ARCH_DLINFO
 #undef	ARCH_DLINFO
 #define	ARCH_DLINFO		COMPAT_ARCH_DLINFO
diff --git a/fs/coredump.c b/fs/coredump.c
index a94f94d..afbd037 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -187,6 +187,11 @@
 				err = cn_printf(cn, "%d",
 					      task_tgid_vnr(current));
 				break;
+			/* global pid */
+			case 'P':
+				err = cn_printf(cn, "%d",
+					      task_tgid_nr(current));
+				break;
 			/* uid */
 			case 'u':
 				err = cn_printf(cn, "%d", cred->uid);
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 35b1c7b..c014858 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -227,6 +227,7 @@
 
 static int cramfs_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_RDONLY;
 	return 0;
 }
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 26d7fff..84378f0 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -218,6 +218,7 @@
 	int err;
 	struct debugfs_fs_info *fsi = sb->s_fs_info;
 
+	sync_filesystem(sb);
 	err = debugfs_parse_options(data, &fsi->mount_opts);
 	if (err)
 		goto fail;
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 61af24e..91ea0e9 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -313,6 +313,7 @@
 	struct pts_fs_info *fsi = DEVPTS_SB(sb);
 	struct pts_mount_opts *opts = &fsi->mount_opts;
 
+	sync_filesystem(sb);
 	err = parse_mount_options(data, PARSE_REMOUNT, opts);
 
 	/*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 7ab90f5..8b31b9f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -127,6 +127,7 @@
 	spinlock_t bio_lock;		/* protects BIO fields below */
 	int page_errors;		/* errno from get_user_pages() */
 	int is_async;			/* is IO async ? */
+	bool defer_completion;		/* defer AIO completion to workqueue? */
 	int io_error;			/* IO error in completion path */
 	unsigned long refcount;		/* direct_io_worker() and bios */
 	struct bio *bio_list;		/* singly linked via bi_private */
@@ -141,7 +142,10 @@
 	 * allocation time.  Don't add new fields after pages[] unless you
 	 * wish that they not be zeroed.
 	 */
-	struct page *pages[DIO_PAGES];	/* page buffer */
+	union {
+		struct page *pages[DIO_PAGES];	/* page buffer */
+		struct work_struct complete_work;/* deferred AIO completion */
+	};
 } ____cacheline_aligned_in_smp;
 
 static struct kmem_cache *dio_cache __read_mostly;
@@ -221,16 +225,16 @@
  * dio_complete() - called when all DIO BIO I/O has been completed
  * @offset: the byte offset in the file of the completed operation
  *
- * This releases locks as dictated by the locking type, lets interested parties
- * know that a DIO operation has completed, and calculates the resulting return
- * code for the operation.
+ * This drops i_dio_count, lets interested parties know that a DIO operation
+ * has completed, and calculates the resulting return code for the operation.
  *
  * It lets the filesystem know if it registered an interest earlier via
  * get_block.  Pass the private field of the map buffer_head so that
  * filesystems can use it to hold additional state between get_block calls and
  * dio_complete.
  */
-static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is_async)
+static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
+		bool is_async)
 {
 	ssize_t transferred = 0;
 
@@ -258,19 +262,26 @@
 	if (ret == 0)
 		ret = transferred;
 
-	if (dio->end_io && dio->result) {
-		dio->end_io(dio->iocb, offset, transferred,
-			    dio->private, ret, is_async);
-	} else {
-		inode_dio_done(dio->inode);
-		if (is_async)
-			aio_complete(dio->iocb, ret, 0);
-	}
+	if (dio->end_io && dio->result)
+		dio->end_io(dio->iocb, offset, transferred, dio->private);
 
+	inode_dio_done(dio->inode);
+	if (is_async)
+		aio_complete(dio->iocb, ret, 0);
+
+	kmem_cache_free(dio_cache, dio);
 	return ret;
 }
 
+static void dio_aio_complete_work(struct work_struct *work)
+{
+	struct dio *dio = container_of(work, struct dio, complete_work);
+
+	dio_complete(dio, dio->iocb->ki_pos, 0, true);
+}
+
 static int dio_bio_complete(struct dio *dio, struct bio *bio);
+
 /*
  * Asynchronous IO callback. 
  */
@@ -290,8 +301,13 @@
 	spin_unlock_irqrestore(&dio->bio_lock, flags);
 
 	if (remaining == 0) {
-		dio_complete(dio, dio->iocb->ki_pos, 0, true);
-		kmem_cache_free(dio_cache, dio);
+		if (dio->result && dio->defer_completion) {
+			INIT_WORK(&dio->complete_work, dio_aio_complete_work);
+			queue_work(dio->inode->i_sb->s_dio_done_wq,
+				   &dio->complete_work);
+		} else {
+			dio_complete(dio, dio->iocb->ki_pos, 0, true);
+		}
 	}
 }
 
@@ -511,6 +527,41 @@
 }
 
 /*
+ * Create workqueue for deferred direct IO completions. We allocate the
+ * workqueue when it's first needed. This avoids creating workqueue for
+ * filesystems that don't need it and also allows us to create the workqueue
+ * late enough so the we can include s_id in the name of the workqueue.
+ */
+static int sb_init_dio_done_wq(struct super_block *sb)
+{
+	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
+						      WQ_MEM_RECLAIM, 0,
+						      sb->s_id);
+	if (!wq)
+		return -ENOMEM;
+	/*
+	 * This has to be atomic as more DIOs can race to create the workqueue
+	 */
+	cmpxchg(&sb->s_dio_done_wq, NULL, wq);
+	/* Someone created workqueue before us? Free ours... */
+	if (wq != sb->s_dio_done_wq)
+		destroy_workqueue(wq);
+	return 0;
+}
+
+static int dio_set_defer_completion(struct dio *dio)
+{
+	struct super_block *sb = dio->inode->i_sb;
+
+	if (dio->defer_completion)
+		return 0;
+	dio->defer_completion = true;
+	if (!sb->s_dio_done_wq)
+		return sb_init_dio_done_wq(sb);
+	return 0;
+}
+
+/*
  * Call into the fs to map some more disk blocks.  We record the current number
  * of available blocks at sdio->blocks_available.  These are in units of the
  * fs blocksize, (1 << inode->i_blkbits).
@@ -581,6 +632,9 @@
 
 		/* Store for completion */
 		dio->private = map_bh->b_private;
+
+		if (ret == 0 && buffer_defer_completion(map_bh))
+			ret = dio_set_defer_completion(dio);
 	}
 	return ret;
 }
@@ -1269,7 +1323,6 @@
 
 	if (drop_refcount(dio) == 0) {
 		retval = dio_complete(dio, offset, retval, false);
-		kmem_cache_free(dio_cache, dio);
 	} else
 		BUG_ON(retval != -EIOCBQUEUED);
 
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 1da2446..3cf8b3a 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1023,8 +1023,10 @@
 		       "to the inode key sigs; rc = [%d]\n", rc);
 		goto out;
 	}
+	/* TODO: Investigate side effect of truncating name if too long */
 	cipher_name_len =
-		strlen(mount_crypt_stat->global_default_cipher_name);
+		min(strlen(mount_crypt_stat->global_default_cipher_name),
+		    sizeof(crypt_stat->cipher)-1);
 	memcpy(crypt_stat->cipher,
 	       mount_crypt_stat->global_default_cipher_name,
 	       cipher_name_len);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index d4644cc..125457c 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -68,6 +68,7 @@
 }
 
 struct ecryptfs_getdents_callback {
+	struct dir_context ctx;
 	void *dirent;
 	struct dentry *dentry;
 	filldir_t filldir;
@@ -115,18 +116,19 @@
 	int rc;
 	struct file *lower_file;
 	struct inode *inode;
-	struct ecryptfs_getdents_callback buf;
+	struct ecryptfs_getdents_callback buf = {
+		.dirent = dirent,
+		.dentry = file->f_path.dentry,
+		.filldir = filldir,
+		.filldir_called = 0,
+		.entries_written = 0,
+		.ctx.actor = ecryptfs_filldir
+	};
 
 	lower_file = ecryptfs_file_to_lower(file);
 	lower_file->f_pos = file->f_pos;
 	inode = file_inode(file);
-	memset(&buf, 0, sizeof(buf));
-	buf.dirent = dirent;
-	buf.dentry = file->f_path.dentry;
-	buf.filldir = filldir;
-	buf.filldir_called = 0;
-	buf.entries_written = 0;
-	rc = vfs_readdir(lower_file, ecryptfs_filldir, (void *)&buf);
+	rc = iterate_dir(lower_file, &buf.ctx);
 	file->f_pos = lower_file->f_pos;
 	if (rc < 0)
 		goto out;
diff --git a/fs/efs/super.c b/fs/efs/super.c
index c6f57a7..4709692 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -113,6 +113,7 @@
 
 static int efs_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_RDONLY;
 	return 0;
 }
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index deecc72..0cff443 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -34,6 +34,7 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1602,7 +1603,8 @@
 			}
 
 			spin_unlock_irqrestore(&ep->lock, flags);
-			if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+			if (!freezable_schedule_hrtimeout_range(to, slack,
+								HRTIMER_MODE_ABS))
 				timed_out = 1;
 
 			spin_lock_irqsave(&ep->lock, flags);
diff --git a/fs/exec.c b/fs/exec.c
index acbd7ac..d6de15b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1220,7 +1220,7 @@
 /*
  * determine how safe it is to execute the proposed program
  * - the caller must hold ->cred_guard_mutex to protect against
- *   PTRACE_ATTACH
+ *   PTRACE_ATTACH or seccomp thread-sync
  */
 static int check_unsafe_exec(struct linux_binprm *bprm)
 {
@@ -1239,7 +1239,7 @@
 	 * This isn't strictly necessary, but it makes it harder for LSMs to
 	 * mess up.
 	 */
-	if (current->no_new_privs)
+	if (task_no_new_privs(current))
 		bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
 
 	n_fs = 1;
@@ -1265,53 +1265,6 @@
 	return res;
 }
 
-static void bprm_fill_uid(struct linux_binprm *bprm)
-{
-	struct inode *inode;
-	unsigned int mode;
-	kuid_t uid;
-	kgid_t gid;
-
-	/* clear any previous set[ug]id data from a previous binary */
-	bprm->cred->euid = current_euid();
-	bprm->cred->egid = current_egid();
-
-	if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
-		return;
-
-	if (current->no_new_privs)
-		return;
-
-	inode = file_inode(bprm->file);
-	mode = ACCESS_ONCE(inode->i_mode);
-	if (!(mode & (S_ISUID|S_ISGID)))
-		return;
-
-	/* Be careful if suid/sgid is set */
-	mutex_lock(&inode->i_mutex);
-
-	/* reload atomically mode/uid/gid now that lock held */
-	mode = inode->i_mode;
-	uid = inode->i_uid;
-	gid = inode->i_gid;
-	mutex_unlock(&inode->i_mutex);
-
-	/* We ignore suid/sgid if there are no mappings for them in the ns */
-	if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
-		 !kgid_has_mapping(bprm->cred->user_ns, gid))
-		return;
-
-	if (mode & S_ISUID) {
-		bprm->per_clear |= PER_CLEAR_ON_SETID;
-		bprm->cred->euid = uid;
-	}
-
-	if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
-		bprm->per_clear |= PER_CLEAR_ON_SETID;
-		bprm->cred->egid = gid;
-	}
-}
-
 /* 
  * Fill the binprm structure from the inode. 
  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
@@ -1320,12 +1273,39 @@
  */
 int prepare_binprm(struct linux_binprm *bprm)
 {
+	umode_t mode;
+	struct inode * inode = file_inode(bprm->file);
 	int retval;
 
+	mode = inode->i_mode;
 	if (bprm->file->f_op == NULL)
 		return -EACCES;
 
-	bprm_fill_uid(bprm);
+	/* clear any previous set[ug]id data from a previous binary */
+	bprm->cred->euid = current_euid();
+	bprm->cred->egid = current_egid();
+
+	if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
+		!task_no_new_privs(current) &&
+		kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
+		kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
+		/* Set-uid? */
+		if (mode & S_ISUID) {
+			bprm->per_clear |= PER_CLEAR_ON_SETID;
+			bprm->cred->euid = inode->i_uid;
+		}
+
+		/* Set-gid? */
+		/*
+		* If setgid is set but no group execute bit then this
+		* is a candidate for mandatory locking, not a setgid
+		* executable.
+		*/
+		if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+			bprm->per_clear |= PER_CLEAR_ON_SETID;
+			bprm->cred->egid = inode->i_gid;
+		}
+	}
 
 	/* fill in binprm security blob */
 	retval = security_bprm_set_creds(bprm);
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index d1f80ab..2ec8eb1 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -953,9 +953,11 @@
 	return 0;
 }
 
-static void exofs_invalidatepage(struct page *page, unsigned long offset)
+static void exofs_invalidatepage(struct page *page, unsigned int offset,
+				 unsigned int length)
 {
-	EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
+	EXOFS_DBGMSG("page 0x%lx offset 0x%x length 0x%x\n",
+		     page->index, offset, length);
 	WARN_ON(1);
 }
 
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index b4eec4c..dc4784b 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -212,6 +212,7 @@
 }
 
 struct getdents_callback {
+	struct dir_context ctx;
 	char *name;		/* name that was found. It already points to a
 				   buffer NAME_MAX+1 is size */
 	unsigned long ino;	/* the inum we are looking for */
@@ -254,7 +255,11 @@
 	struct inode *dir = path->dentry->d_inode;
 	int error;
 	struct file *file;
-	struct getdents_callback buffer;
+	struct getdents_callback buffer = {
+		.ctx.actor = filldir_one,
+		.name = name,
+		.ino = child->d_inode->i_ino
+	};
 
 	error = -ENOTDIR;
 	if (!dir || !S_ISDIR(dir->i_mode))
@@ -271,17 +276,14 @@
 		goto out;
 
 	error = -EINVAL;
-	if (!file->f_op->readdir)
+	if (!file->f_op->readdir && !file->f_op->iterate)
 		goto out_close;
 
-	buffer.name = name;
-	buffer.ino = child->d_inode->i_ino;
-	buffer.found = 0;
 	buffer.sequence = 0;
 	while (1) {
 		int old_seq = buffer.sequence;
 
-		error = vfs_readdir(file, filldir_one, &buffer);
+		error = iterate_dir(file, &buffer.ctx);
 		if (buffer.found) {
 			error = 0;
 			break;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 20d6697..d260115 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1254,6 +1254,7 @@
 	unsigned long old_sb_flags;
 	int err;
 
+	sync_filesystem(sb);
 	spin_lock(&sbi->s_lock);
 
 	/* Store the old options */
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 23c7128..b6e5934 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1825,7 +1825,8 @@
 	return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
 }
 
-static void ext3_invalidatepage(struct page *page, unsigned long offset)
+static void ext3_invalidatepage(struct page *page, unsigned int offset,
+				unsigned int length)
 {
 	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
 
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 882d4bd..b1e30be 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2588,6 +2588,8 @@
 	int i;
 #endif
 
+	sync_filesystem(sb);
+
 	/* Store the original options */
 	old_sb_flags = sb->s_flags;
 	old_opts.s_mount_opt = sbi->s_mount_opt;
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index efea5d5..af1a90a 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -64,6 +64,24 @@
 	  If you are not using a security module that requires using
 	  extended attributes for file security labels, say N.
 
+config EXT4_FS_ENCRYPTION
+	bool "Ext4 Encryption"
+	depends on EXT4_FS
+	select CRYPTO_AES
+	select CRYPTO_CBC
+	select CRYPTO_ECB
+	select CRYPTO_XTS
+	select CRYPTO_CTS
+	select CRYPTO_CTR
+	select CRYPTO_SHA256
+	select KEYS
+	select ENCRYPTED_KEYS
+	help
+	  Enable encryption of ext4 files and directories.  This
+	  feature is similar to ecryptfs, but it is more memory
+	  efficient since it avoids caching the encrypted and
+	  decrypted pages in the page cache.
+
 config EXT4_DEBUG
 	bool "EXT4 debugging support"
 	depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 0310fec..75285ea 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -8,7 +8,9 @@
 		ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
 		ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
 		mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
-		xattr_trusted.o inline.o
+		xattr_trusted.o inline.o readpage.o
 
 ext4-$(CONFIG_EXT4_FS_POSIX_ACL)	+= acl.o
 ext4-$(CONFIG_EXT4_FS_SECURITY)		+= xattr_security.o
+ext4-$(CONFIG_EXT4_FS_ENCRYPTION)	+= crypto_policy.o crypto.o \
+		crypto_key.o crypto_fname.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 39a54a0..d40c8db 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -152,13 +152,6 @@
 	struct posix_acl *acl;
 	int retval;
 
-	if (!test_opt(inode->i_sb, POSIX_ACL))
-		return NULL;
-
-	acl = get_cached_acl(inode, type);
-	if (acl != ACL_NOT_CACHED)
-		return acl;
-
 	switch (type) {
 	case ACL_TYPE_ACCESS:
 		name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
@@ -196,7 +189,7 @@
  * inode->i_mutex: down unless called from ext4_new_inode
  */
 static int
-ext4_set_acl(handle_t *handle, struct inode *inode, int type,
+__ext4_set_acl(handle_t *handle, struct inode *inode, int type,
 	     struct posix_acl *acl)
 {
 	int name_index;
@@ -204,9 +197,6 @@
 	size_t size = 0;
 	int error;
 
-	if (S_ISLNK(inode->i_mode))
-		return -EOPNOTSUPP;
-
 	switch (type) {
 	case ACL_TYPE_ACCESS:
 		name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
@@ -248,6 +238,25 @@
 	return error;
 }
 
+int
+ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+	handle_t *handle;
+	int error, retries = 0;
+
+retry:
+	handle = ext4_journal_start(inode, EXT4_HT_XATTR,
+				    ext4_jbd2_credits_xattr(inode));
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	error = __ext4_set_acl(handle, inode, type, acl);
+	ext4_journal_stop(handle);
+	if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+		goto retry;
+	return error;
+}
+
 /*
  * Initialize the ACLs of a new inode. Called from ext4_new_inode.
  *
@@ -257,199 +266,23 @@
 int
 ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
 {
-	struct posix_acl *acl = NULL;
-	int error = 0;
-
-	if (!S_ISLNK(inode->i_mode)) {
-		if (test_opt(dir->i_sb, POSIX_ACL)) {
-			acl = ext4_get_acl(dir, ACL_TYPE_DEFAULT);
-			if (IS_ERR(acl))
-				return PTR_ERR(acl);
-		}
-		if (!acl)
-			inode->i_mode &= ~current_umask();
-	}
-	if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
-		if (S_ISDIR(inode->i_mode)) {
-			error = ext4_set_acl(handle, inode,
-					     ACL_TYPE_DEFAULT, acl);
-			if (error)
-				goto cleanup;
-		}
-		error = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
-		if (error < 0)
-			return error;
-
-		if (error > 0) {
-			/* This is an extended ACL */
-			error = ext4_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
-		}
-	}
-cleanup:
-	posix_acl_release(acl);
-	return error;
-}
-
-/*
- * Does chmod for an inode that may have an Access Control List. The
- * inode->i_mode field must be updated to the desired value by the caller
- * before calling this function.
- * Returns 0 on success, or a negative error number.
- *
- * We change the ACL rather than storing some ACL entries in the file
- * mode permission bits (which would be more efficient), because that
- * would break once additional permissions (like  ACL_APPEND, ACL_DELETE
- * for directories) are added. There are no more bits available in the
- * file mode.
- *
- * inode->i_mutex: down
- */
-int
-ext4_acl_chmod(struct inode *inode)
-{
-	struct posix_acl *acl;
-	handle_t *handle;
-	int retries = 0;
+	struct posix_acl *default_acl, *acl;
 	int error;
 
-
-	if (S_ISLNK(inode->i_mode))
-		return -EOPNOTSUPP;
-	if (!test_opt(inode->i_sb, POSIX_ACL))
-		return 0;
-	acl = ext4_get_acl(inode, ACL_TYPE_ACCESS);
-	if (IS_ERR(acl) || !acl)
-		return PTR_ERR(acl);
-	error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
+	error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
 	if (error)
 		return error;
-retry:
-	handle = ext4_journal_start(inode, EXT4_HT_XATTR,
-				    ext4_jbd2_credits_xattr(inode));
-	if (IS_ERR(handle)) {
-		error = PTR_ERR(handle);
-		ext4_std_error(inode->i_sb, error);
-		goto out;
+
+	if (default_acl) {
+		error = __ext4_set_acl(handle, inode, ACL_TYPE_DEFAULT,
+				       default_acl);
+		posix_acl_release(default_acl);
 	}
-	error = ext4_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
-	ext4_journal_stop(handle);
-	if (error == -ENOSPC &&
-	    ext4_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-out:
-	posix_acl_release(acl);
-	return error;
-}
-
-/*
- * Extended attribute handlers
- */
-static size_t
-ext4_xattr_list_acl_access(struct dentry *dentry, char *list, size_t list_len,
-			   const char *name, size_t name_len, int type)
-{
-	const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
-
-	if (!test_opt(dentry->d_sb, POSIX_ACL))
-		return 0;
-	if (list && size <= list_len)
-		memcpy(list, POSIX_ACL_XATTR_ACCESS, size);
-	return size;
-}
-
-static size_t
-ext4_xattr_list_acl_default(struct dentry *dentry, char *list, size_t list_len,
-			    const char *name, size_t name_len, int type)
-{
-	const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
-
-	if (!test_opt(dentry->d_sb, POSIX_ACL))
-		return 0;
-	if (list && size <= list_len)
-		memcpy(list, POSIX_ACL_XATTR_DEFAULT, size);
-	return size;
-}
-
-static int
-ext4_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer,
-		   size_t size, int type)
-{
-	struct posix_acl *acl;
-	int error;
-
-	if (strcmp(name, "") != 0)
-		return -EINVAL;
-	if (!test_opt(dentry->d_sb, POSIX_ACL))
-		return -EOPNOTSUPP;
-
-	acl = ext4_get_acl(dentry->d_inode, type);
-	if (IS_ERR(acl))
-		return PTR_ERR(acl);
-	if (acl == NULL)
-		return -ENODATA;
-	error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
-	posix_acl_release(acl);
-
-	return error;
-}
-
-static int
-ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
-		   size_t size, int flags, int type)
-{
-	struct inode *inode = dentry->d_inode;
-	handle_t *handle;
-	struct posix_acl *acl;
-	int error, retries = 0;
-
-	if (strcmp(name, "") != 0)
-		return -EINVAL;
-	if (!test_opt(inode->i_sb, POSIX_ACL))
-		return -EOPNOTSUPP;
-	if (!inode_owner_or_capable(inode))
-		return -EPERM;
-
-	if (value) {
-		acl = posix_acl_from_xattr(&init_user_ns, value, size);
-		if (IS_ERR(acl))
-			return PTR_ERR(acl);
-		else if (acl) {
-			error = posix_acl_valid(acl);
-			if (error)
-				goto release_and_out;
-		}
-	} else
-		acl = NULL;
-
-retry:
-	handle = ext4_journal_start(inode, EXT4_HT_XATTR,
-				    ext4_jbd2_credits_xattr(inode));
-	if (IS_ERR(handle)) {
-		error = PTR_ERR(handle);
-		goto release_and_out;
+	if (acl) {
+		if (!error)
+			error = __ext4_set_acl(handle, inode, ACL_TYPE_ACCESS,
+					       acl);
+		posix_acl_release(acl);
 	}
-	error = ext4_set_acl(handle, inode, type, acl);
-	ext4_journal_stop(handle);
-	if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-
-release_and_out:
-	posix_acl_release(acl);
 	return error;
 }
-
-const struct xattr_handler ext4_xattr_acl_access_handler = {
-	.prefix	= POSIX_ACL_XATTR_ACCESS,
-	.flags	= ACL_TYPE_ACCESS,
-	.list	= ext4_xattr_list_acl_access,
-	.get	= ext4_xattr_get_acl,
-	.set	= ext4_xattr_set_acl,
-};
-
-const struct xattr_handler ext4_xattr_acl_default_handler = {
-	.prefix	= POSIX_ACL_XATTR_DEFAULT,
-	.flags	= ACL_TYPE_DEFAULT,
-	.list	= ext4_xattr_list_acl_default,
-	.get	= ext4_xattr_get_acl,
-	.set	= ext4_xattr_set_acl,
-};
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index 18cb39e..da2c795 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -55,18 +55,13 @@
 
 /* acl.c */
 struct posix_acl *ext4_get_acl(struct inode *inode, int type);
-extern int ext4_acl_chmod(struct inode *);
+int ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type);
 extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
 
 #else  /* CONFIG_EXT4_FS_POSIX_ACL */
 #include <linux/sched.h>
 #define ext4_get_acl NULL
-
-static inline int
-ext4_acl_chmod(struct inode *inode)
-{
-	return 0;
-}
+#define ext4_set_acl NULL
 
 static inline int
 ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 3742e4c..83a6f49 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -83,9 +83,9 @@
 /* Return the number of clusters used for file system metadata; this
  * represents the overhead needed by the file system.
  */
-unsigned ext4_num_overhead_clusters(struct super_block *sb,
-				    ext4_group_t block_group,
-				    struct ext4_group_desc *gdp)
+static unsigned ext4_num_overhead_clusters(struct super_block *sb,
+					   ext4_group_t block_group,
+					   struct ext4_group_desc *gdp)
 {
 	unsigned num_clusters;
 	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
@@ -176,27 +176,35 @@
 }
 
 /* Initializes an uninitialized block bitmap */
-void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
-			    ext4_group_t block_group,
-			    struct ext4_group_desc *gdp)
+static int ext4_init_block_bitmap(struct super_block *sb,
+				   struct buffer_head *bh,
+				   ext4_group_t block_group,
+				   struct ext4_group_desc *gdp)
 {
 	unsigned int bit, bit_max;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	ext4_fsblk_t start, tmp;
 	int flex_bg = 0;
+	struct ext4_group_info *grp;
 
 	J_ASSERT_BH(bh, buffer_locked(bh));
 
 	/* If checksum is bad mark all blocks used to prevent allocation
 	 * essentially implementing a per-group read-only flag. */
 	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
-		ext4_error(sb, "Checksum bad for group %u", block_group);
-		ext4_free_group_clusters_set(sb, gdp, 0);
-		ext4_free_inodes_set(sb, gdp, 0);
-		ext4_itable_unused_set(sb, gdp, 0);
-		memset(bh->b_data, 0xff, sb->s_blocksize);
-		ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
-		return;
+		grp = ext4_get_group_info(sb, block_group);
+		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+			percpu_counter_sub(&sbi->s_freeclusters_counter,
+					   grp->bb_free);
+		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+		if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+			int count;
+			count = ext4_free_inodes_count(sb, gdp);
+			percpu_counter_sub(&sbi->s_freeinodes_counter,
+					   count);
+		}
+		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
+		return -EIO;
 	}
 	memset(bh->b_data, 0, sb->s_blocksize);
 
@@ -234,6 +242,7 @@
 			     sb->s_blocksize * 8, bh->b_data);
 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
 	ext4_group_desc_csum_set(sb, block_group, gdp);
+	return 0;
 }
 
 /* Return the number of free blocks in a block group.  It is used when
@@ -305,9 +314,10 @@
  */
 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
 					    struct ext4_group_desc *desc,
-					    unsigned int block_group,
+					    ext4_group_t block_group,
 					    struct buffer_head *bh)
 {
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	ext4_grpblk_t offset;
 	ext4_grpblk_t next_zero_bit;
 	ext4_fsblk_t blk;
@@ -327,14 +337,14 @@
 	/* check whether block bitmap block number is set */
 	blk = ext4_block_bitmap(sb, desc);
 	offset = blk - group_first_block;
-	if (!ext4_test_bit(offset, bh->b_data))
+	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
 		/* bad block bitmap */
 		return blk;
 
 	/* check whether the inode bitmap block number is set */
 	blk = ext4_inode_bitmap(sb, desc);
 	offset = blk - group_first_block;
-	if (!ext4_test_bit(offset, bh->b_data))
+	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
 		/* bad block bitmap */
 		return blk;
 
@@ -342,20 +352,23 @@
 	blk = ext4_inode_table(sb, desc);
 	offset = blk - group_first_block;
 	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
-				offset + EXT4_SB(sb)->s_itb_per_group,
-				offset);
-	if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
+			EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
+			EXT4_B2C(sbi, offset));
+	if (next_zero_bit <
+	    EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group))
 		/* bad bitmap for inode tables */
 		return blk;
 	return 0;
 }
 
-void ext4_validate_block_bitmap(struct super_block *sb,
-			       struct ext4_group_desc *desc,
-			       unsigned int block_group,
-			       struct buffer_head *bh)
+static void ext4_validate_block_bitmap(struct super_block *sb,
+				       struct ext4_group_desc *desc,
+				       ext4_group_t block_group,
+				       struct buffer_head *bh)
 {
 	ext4_fsblk_t	blk;
+	struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
 	if (buffer_verified(bh))
 		return;
@@ -366,12 +379,20 @@
 		ext4_unlock_group(sb, block_group);
 		ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
 			   block_group, blk);
+		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+			percpu_counter_sub(&sbi->s_freeclusters_counter,
+					   grp->bb_free);
+		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
 		return;
 	}
 	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
 			desc, bh))) {
 		ext4_unlock_group(sb, block_group);
 		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
+		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+			percpu_counter_sub(&sbi->s_freeclusters_counter,
+					   grp->bb_free);
+		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
 		return;
 	}
 	set_buffer_verified(bh);
@@ -417,11 +438,15 @@
 	}
 	ext4_lock_group(sb, block_group);
 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
-		ext4_init_block_bitmap(sb, bh, block_group, desc);
+		int err;
+
+		err = ext4_init_block_bitmap(sb, bh, block_group, desc);
 		set_bitmap_uptodate(bh);
 		set_buffer_uptodate(bh);
 		ext4_unlock_group(sb, block_group);
 		unlock_buffer(bh);
+		if (err)
+			ext4_error(sb, "Checksum bad for grp %u", block_group);
 		return bh;
 	}
 	ext4_unlock_group(sb, block_group);
@@ -445,7 +470,10 @@
 	return bh;
 verify:
 	ext4_validate_block_bitmap(sb, desc, block_group, bh);
-	return bh;
+	if (buffer_verified(bh))
+		return bh;
+	put_bh(bh);
+	return NULL;
 }
 
 /* Returns 0 on success, 1 on error */
@@ -469,7 +497,8 @@
 	clear_buffer_new(bh);
 	/* Panic or remount fs read-only if block bitmap is invalid */
 	ext4_validate_block_bitmap(sb, desc, block_group, bh);
-	return 0;
+	/* ...but check for error just in case errors=continue. */
+	return !buffer_verified(bh);
 }
 
 struct buffer_head *
@@ -611,10 +640,8 @@
 	 * Account for the allocated meta blocks.  We will never
 	 * fail EDQUOT for metdata, but we do account for it.
 	 */
-	if (!(*errp) &&
-	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
+	if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
 		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
-		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 		dquot_alloc_block_nofail(inode,
 				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
@@ -634,6 +661,7 @@
 	struct ext4_group_desc *gdp;
 	ext4_group_t i;
 	ext4_group_t ngroups = ext4_get_groups_count(sb);
+	struct ext4_group_info *grp;
 #ifdef EXT4FS_DEBUG
 	struct ext4_super_block *es;
 	ext4_fsblk_t bitmap_count;
@@ -649,14 +677,18 @@
 		gdp = ext4_get_group_desc(sb, i, NULL);
 		if (!gdp)
 			continue;
-		desc_count += ext4_free_group_clusters(sb, gdp);
+		grp = NULL;
+		if (EXT4_SB(sb)->s_group_info)
+			grp = ext4_get_group_info(sb, i);
+		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+			desc_count += ext4_free_group_clusters(sb, gdp);
 		brelse(bitmap_bh);
 		bitmap_bh = ext4_read_block_bitmap(sb, i);
 		if (bitmap_bh == NULL)
 			continue;
 
 		x = ext4_count_free(bitmap_bh->b_data,
-				    EXT4_BLOCKS_PER_GROUP(sb) / 8);
+				    EXT4_CLUSTERS_PER_GROUP(sb) / 8);
 		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
 			i, ext4_free_group_clusters(sb, gdp), x);
 		bitmap_count += x;
@@ -673,7 +705,11 @@
 		gdp = ext4_get_group_desc(sb, i, NULL);
 		if (!gdp)
 			continue;
-		desc_count += ext4_free_group_clusters(sb, gdp);
+		grp = NULL;
+		if (EXT4_SB(sb)->s_group_info)
+			grp = ext4_get_group_info(sb, i);
+		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+			desc_count += ext4_free_group_clusters(sb, gdp);
 	}
 
 	return desc_count;
@@ -682,21 +718,15 @@
 
 static inline int test_root(ext4_group_t a, int b)
 {
-	int num = b;
-
-	while (a > num)
-		num *= b;
-	return num == a;
-}
-
-static int ext4_group_sparse(ext4_group_t group)
-{
-	if (group <= 1)
-		return 1;
-	if (!(group & 1))
-		return 0;
-	return (test_root(group, 7) || test_root(group, 5) ||
-		test_root(group, 3));
+	while (1) {
+		if (a < b)
+			return 0;
+		if (a == b)
+			return 1;
+		if ((a % b) != 0)
+			return 0;
+		a = a / b;
+	}
 }
 
 /**
@@ -709,11 +739,26 @@
  */
 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
 {
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
-			!ext4_group_sparse(group))
+	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+	if (group == 0)
+		return 1;
+	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_SPARSE_SUPER2)) {
+		if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
+		    group == le32_to_cpu(es->s_backup_bgs[1]))
+			return 1;
 		return 0;
-	return 1;
+	}
+	if ((group <= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb,
+					EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER))
+		return 1;
+	if (!(group & 1))
+		return 0;
+	if (test_root(group, 3) || (test_root(group, 5)) ||
+	    test_root(group, 7))
+		return 1;
+
+	return 0;
 }
 
 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index 3285aa5..b610779 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -24,8 +24,7 @@
 	__u32 provided, calculated;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(sb))
 		return 1;
 
 	provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
@@ -46,8 +45,7 @@
 	__u32 csum;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(sb))
 		return;
 
 	csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
@@ -65,8 +63,7 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(sb))
 		return 1;
 
 	provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
@@ -91,8 +88,7 @@
 	__u32 csum;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
-			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(sb))
 		return;
 
 	csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 3f11656..41eb9dc 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -180,37 +180,12 @@
 /* Called when the filesystem is unmounted */
 void ext4_release_system_zone(struct super_block *sb)
 {
-	struct rb_node	*n = EXT4_SB(sb)->system_blks.rb_node;
-	struct rb_node	*parent;
-	struct ext4_system_zone	*entry;
+	struct ext4_system_zone	*entry, *n;
 
-	while (n) {
-		/* Do the node's children first */
-		if (n->rb_left) {
-			n = n->rb_left;
-			continue;
-		}
-		if (n->rb_right) {
-			n = n->rb_right;
-			continue;
-		}
-		/*
-		 * The node has no children; free it, and then zero
-		 * out parent's link to it.  Finally go to the
-		 * beginning of the loop and try to free the parent
-		 * node.
-		 */
-		parent = rb_parent(n);
-		entry = rb_entry(n, struct ext4_system_zone, node);
+	rbtree_postorder_for_each_entry_safe(entry, n,
+			&EXT4_SB(sb)->system_blks, node)
 		kmem_cache_free(ext4_system_zone_cachep, entry);
-		if (!parent)
-			EXT4_SB(sb)->system_blks = RB_ROOT;
-		else if (parent->rb_left == n)
-			parent->rb_left = NULL;
-		else if (parent->rb_right == n)
-			parent->rb_right = NULL;
-		n = parent;
-	}
+
 	EXT4_SB(sb)->system_blks = RB_ROOT;
 }
 
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
new file mode 100644
index 0000000..e086ac4
--- /dev/null
+++ b/fs/ext4/crypto.c
@@ -0,0 +1,475 @@
+/*
+ * linux/fs/ext4/crypto.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption functions for ext4
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ *	Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ *	Ildar Muslukhov, 2014
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <keys/user-type.h>
+#include <keys/encrypted-type.h>
+#include <linux/crypto.h>
+#include <linux/ecryptfs.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock_types.h>
+
+#include "ext4_extents.h"
+#include "xattr.h"
+
+/* Encryption added and removed here! (L: */
+
+static unsigned int num_prealloc_crypto_pages = 32;
+static unsigned int num_prealloc_crypto_ctxs = 128;
+
+module_param(num_prealloc_crypto_pages, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_pages,
+		 "Number of crypto pages to preallocate");
+module_param(num_prealloc_crypto_ctxs, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
+		 "Number of crypto contexts to preallocate");
+
+static mempool_t *ext4_bounce_page_pool;
+
+static LIST_HEAD(ext4_free_crypto_ctxs);
+static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
+
+static struct kmem_cache *ext4_crypto_ctx_cachep;
+struct kmem_cache *ext4_crypt_info_cachep;
+
+/**
+ * ext4_release_crypto_ctx() - Releases an encryption context
+ * @ctx: The encryption context to release.
+ *
+ * If the encryption context was allocated from the pre-allocated pool, returns
+ * it to that pool. Else, frees it.
+ *
+ * If there's a bounce page in the context, this frees that.
+ */
+void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
+{
+	unsigned long flags;
+
+	if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page)
+		mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
+	ctx->w.bounce_page = NULL;
+	ctx->w.control_page = NULL;
+	if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
+		kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
+	} else {
+		spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
+		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
+		spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
+	}
+}
+
+/**
+ * ext4_get_crypto_ctx() - Gets an encryption context
+ * @inode:       The inode for which we are doing the crypto
+ *
+ * Allocates and initializes an encryption context.
+ *
+ * Return: An allocated and initialized encryption context on success; error
+ * value or NULL otherwise.
+ */
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
+{
+	struct ext4_crypto_ctx *ctx = NULL;
+	int res = 0;
+	unsigned long flags;
+	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+
+	if (ci == NULL)
+		return ERR_PTR(-ENOKEY);
+
+	/*
+	 * We first try getting the ctx from a free list because in
+	 * the common case the ctx will have an allocated and
+	 * initialized crypto tfm, so it's probably a worthwhile
+	 * optimization. For the bounce page, we first try getting it
+	 * from the kernel allocator because that's just about as fast
+	 * as getting it from a list and because a cache of free pages
+	 * should generally be a "last resort" option for a filesystem
+	 * to be able to do its job.
+	 */
+	spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
+	ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
+				       struct ext4_crypto_ctx, free_list);
+	if (ctx)
+		list_del(&ctx->free_list);
+	spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
+	if (!ctx) {
+		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+		if (!ctx) {
+			res = -ENOMEM;
+			goto out;
+		}
+		ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
+	} else {
+		ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
+	}
+	ctx->flags &= ~EXT4_WRITE_PATH_FL;
+
+out:
+	if (res) {
+		if (!IS_ERR_OR_NULL(ctx))
+			ext4_release_crypto_ctx(ctx);
+		ctx = ERR_PTR(res);
+	}
+	return ctx;
+}
+
+struct workqueue_struct *ext4_read_workqueue;
+static DEFINE_MUTEX(crypto_init);
+
+/**
+ * ext4_exit_crypto() - Shutdown the ext4 encryption system
+ */
+void ext4_exit_crypto(void)
+{
+	struct ext4_crypto_ctx *pos, *n;
+
+	list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list)
+		kmem_cache_free(ext4_crypto_ctx_cachep, pos);
+	INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
+	if (ext4_bounce_page_pool)
+		mempool_destroy(ext4_bounce_page_pool);
+	ext4_bounce_page_pool = NULL;
+	if (ext4_read_workqueue)
+		destroy_workqueue(ext4_read_workqueue);
+	ext4_read_workqueue = NULL;
+	if (ext4_crypto_ctx_cachep)
+		kmem_cache_destroy(ext4_crypto_ctx_cachep);
+	ext4_crypto_ctx_cachep = NULL;
+	if (ext4_crypt_info_cachep)
+		kmem_cache_destroy(ext4_crypt_info_cachep);
+	ext4_crypt_info_cachep = NULL;
+}
+
+/**
+ * ext4_init_crypto() - Set up for ext4 encryption.
+ *
+ * We only call this when we start accessing encrypted files, since it
+ * results in memory getting allocated that wouldn't otherwise be used.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int ext4_init_crypto(void)
+{
+	int i, res = -ENOMEM;
+
+	mutex_lock(&crypto_init);
+	if (ext4_read_workqueue)
+		goto already_initialized;
+	ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
+	if (!ext4_read_workqueue)
+		goto fail;
+
+	ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
+					    SLAB_RECLAIM_ACCOUNT);
+	if (!ext4_crypto_ctx_cachep)
+		goto fail;
+
+	ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
+					    SLAB_RECLAIM_ACCOUNT);
+	if (!ext4_crypt_info_cachep)
+		goto fail;
+
+	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
+		struct ext4_crypto_ctx *ctx;
+
+		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+		if (!ctx) {
+			res = -ENOMEM;
+			goto fail;
+		}
+		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
+	}
+
+	ext4_bounce_page_pool =
+		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
+	if (!ext4_bounce_page_pool) {
+		res = -ENOMEM;
+		goto fail;
+	}
+already_initialized:
+	mutex_unlock(&crypto_init);
+	return 0;
+fail:
+	ext4_exit_crypto();
+	mutex_unlock(&crypto_init);
+	return res;
+}
+
+void ext4_restore_control_page(struct page *data_page)
+{
+	struct ext4_crypto_ctx *ctx =
+		(struct ext4_crypto_ctx *)page_private(data_page);
+
+	set_page_private(data_page, (unsigned long)NULL);
+	ClearPagePrivate(data_page);
+	unlock_page(data_page);
+	ext4_release_crypto_ctx(ctx);
+}
+
+/**
+ * ext4_crypt_complete() - The completion callback for page encryption
+ * @req: The asynchronous encryption request context
+ * @res: The result of the encryption operation
+ */
+static void ext4_crypt_complete(struct crypto_async_request *req, int res)
+{
+	struct ext4_completion_result *ecr = req->data;
+
+	if (res == -EINPROGRESS)
+		return;
+	ecr->res = res;
+	complete(&ecr->completion);
+}
+
+typedef enum {
+	EXT4_DECRYPT = 0,
+	EXT4_ENCRYPT,
+} ext4_direction_t;
+
+static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
+			    struct inode *inode,
+			    ext4_direction_t rw,
+			    pgoff_t index,
+			    struct page *src_page,
+			    struct page *dest_page)
+
+{
+	u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
+	struct ablkcipher_request *req = NULL;
+	DECLARE_EXT4_COMPLETION_RESULT(ecr);
+	struct scatterlist dst, src;
+	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+	struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+	int res = 0;
+
+	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+	if (!req) {
+		printk_ratelimited(KERN_ERR
+				   "%s: crypto_request_alloc() failed\n",
+				   __func__);
+		return -ENOMEM;
+	}
+	ablkcipher_request_set_callback(
+		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+		ext4_crypt_complete, &ecr);
+
+	BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
+	memcpy(xts_tweak, &index, sizeof(index));
+	memset(&xts_tweak[sizeof(index)], 0,
+	       EXT4_XTS_TWEAK_SIZE - sizeof(index));
+
+	sg_init_table(&dst, 1);
+	sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
+	sg_init_table(&src, 1);
+	sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
+	ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+				     xts_tweak);
+	if (rw == EXT4_DECRYPT)
+		res = crypto_ablkcipher_decrypt(req);
+	else
+		res = crypto_ablkcipher_encrypt(req);
+	if (res == -EINPROGRESS || res == -EBUSY) {
+		BUG_ON(req->base.data != &ecr);
+		wait_for_completion(&ecr.completion);
+		res = ecr.res;
+	}
+	ablkcipher_request_free(req);
+	if (res) {
+		printk_ratelimited(
+			KERN_ERR
+			"%s: crypto_ablkcipher_encrypt() returned %d\n",
+			__func__, res);
+		return res;
+	}
+	return 0;
+}
+
+static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
+{
+	ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
+	if (ctx->w.bounce_page == NULL)
+		return ERR_PTR(-ENOMEM);
+	ctx->flags |= EXT4_WRITE_PATH_FL;
+	return ctx->w.bounce_page;
+}
+
+/**
+ * ext4_encrypt() - Encrypts a page
+ * @inode:          The inode for which the encryption should take place
+ * @plaintext_page: The page to encrypt. Must be locked.
+ *
+ * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
+ * encryption context.
+ *
+ * Called on the page write path.  The caller must call
+ * ext4_restore_control_page() on the returned ciphertext page to
+ * release the bounce buffer and the encryption context.
+ *
+ * Return: An allocated page with the encrypted content on success. Else, an
+ * error value or NULL.
+ */
+struct page *ext4_encrypt(struct inode *inode,
+			  struct page *plaintext_page)
+{
+	struct ext4_crypto_ctx *ctx;
+	struct page *ciphertext_page = NULL;
+	int err;
+
+	BUG_ON(!PageLocked(plaintext_page));
+
+	ctx = ext4_get_crypto_ctx(inode);
+	if (IS_ERR(ctx))
+		return (struct page *) ctx;
+
+	/* The encryption operation will require a bounce page. */
+	ciphertext_page = alloc_bounce_page(ctx);
+	if (IS_ERR(ciphertext_page))
+		goto errout;
+	ctx->w.control_page = plaintext_page;
+	err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
+			       plaintext_page, ciphertext_page);
+	if (err) {
+		ciphertext_page = ERR_PTR(err);
+	errout:
+		ext4_release_crypto_ctx(ctx);
+		return ciphertext_page;
+	}
+	SetPagePrivate(ciphertext_page);
+	set_page_private(ciphertext_page, (unsigned long)ctx);
+	lock_page(ciphertext_page);
+	return ciphertext_page;
+}
+
+/**
+ * ext4_decrypt() - Decrypts a page in-place
+ * @ctx:  The encryption context.
+ * @page: The page to decrypt. Must be locked.
+ *
+ * Decrypts page in-place using the ctx encryption context.
+ *
+ * Called from the read completion callback.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
+{
+	BUG_ON(!PageLocked(page));
+
+	return ext4_page_crypto(ctx, page->mapping->host,
+				EXT4_DECRYPT, page->index, page, page);
+}
+
+/*
+ * Convenience function which takes care of allocating and
+ * deallocating the encryption context
+ */
+int ext4_decrypt_one(struct inode *inode, struct page *page)
+{
+	int ret;
+
+	struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
+
+	if (!ctx)
+		return -ENOMEM;
+	ret = ext4_decrypt(ctx, page);
+	ext4_release_crypto_ctx(ctx);
+	return ret;
+}
+
+int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
+{
+	struct ext4_crypto_ctx	*ctx;
+	struct page		*ciphertext_page = NULL;
+	struct bio		*bio;
+	ext4_lblk_t		lblk = ex->ee_block;
+	ext4_fsblk_t		pblk = ext4_ext_pblock(ex);
+	unsigned int		len = ext4_ext_get_actual_len(ex);
+	int			err = 0;
+
+	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+
+	ctx = ext4_get_crypto_ctx(inode);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	ciphertext_page = alloc_bounce_page(ctx);
+	if (IS_ERR(ciphertext_page)) {
+		err = PTR_ERR(ciphertext_page);
+		goto errout;
+	}
+
+	while (len--) {
+		err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
+				       ZERO_PAGE(0), ciphertext_page);
+		if (err)
+			goto errout;
+
+		bio = bio_alloc(GFP_KERNEL, 1);
+		if (!bio) {
+			err = -ENOMEM;
+			goto errout;
+		}
+		bio->bi_bdev = inode->i_sb->s_bdev;
+		bio->bi_sector = pblk;
+		err = bio_add_page(bio, ciphertext_page,
+				   inode->i_sb->s_blocksize, 0);
+		if (err) {
+			bio_put(bio);
+			goto errout;
+		}
+		err = submit_bio_wait(WRITE, bio);
+		bio_put(bio);
+		if (err)
+			goto errout;
+	}
+	err = 0;
+errout:
+	ext4_release_crypto_ctx(ctx);
+	return err;
+}
+
+bool ext4_valid_contents_enc_mode(uint32_t mode)
+{
+	return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
+}
+
+/**
+ * ext4_validate_encryption_key_size() - Validate the encryption key size
+ * @mode: The key mode.
+ * @size: The key size to validate.
+ *
+ * Return: The validated key size for @mode. Zero if invalid.
+ */
+uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
+{
+	if (size == ext4_encryption_key_size(mode))
+		return size;
+	return 0;
+}
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
new file mode 100644
index 0000000..7dc4eb5
--- /dev/null
+++ b/fs/ext4/crypto_fname.c
@@ -0,0 +1,469 @@
+/*
+ * linux/fs/ext4/crypto_fname.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains functions for filename crypto management in ext4
+ *
+ * Written by Uday Savagaonkar, 2014.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ */
+
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <keys/encrypted-type.h>
+#include <keys/user-type.h>
+#include <linux/crypto.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/key.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock_types.h>
+
+#include "ext4.h"
+#include "ext4_crypto.h"
+#include "xattr.h"
+
+/**
+ * ext4_dir_crypt_complete() -
+ */
+static void ext4_dir_crypt_complete(struct crypto_async_request *req, int res)
+{
+	struct ext4_completion_result *ecr = req->data;
+
+	if (res == -EINPROGRESS)
+		return;
+	ecr->res = res;
+	complete(&ecr->completion);
+}
+
+bool ext4_valid_filenames_enc_mode(uint32_t mode)
+{
+	return (mode == EXT4_ENCRYPTION_MODE_AES_256_CTS);
+}
+
+static unsigned max_name_len(struct inode *inode)
+{
+	return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
+		EXT4_NAME_LEN;
+}
+
+/**
+ * ext4_fname_encrypt() -
+ *
+ * This function encrypts the input filename, and returns the length of the
+ * ciphertext. Errors are returned as negative numbers.  We trust the caller to
+ * allocate sufficient memory to oname string.
+ */
+static int ext4_fname_encrypt(struct inode *inode,
+			      const struct qstr *iname,
+			      struct ext4_str *oname)
+{
+	u32 ciphertext_len;
+	struct ablkcipher_request *req = NULL;
+	DECLARE_EXT4_COMPLETION_RESULT(ecr);
+	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+	struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+	int res = 0;
+	char iv[EXT4_CRYPTO_BLOCK_SIZE];
+	struct scatterlist src_sg, dst_sg;
+	int padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK);
+	char *workbuf, buf[32], *alloc_buf = NULL;
+	unsigned lim = max_name_len(inode);
+
+	if (iname->len <= 0 || iname->len > lim)
+		return -EIO;
+
+	ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
+		EXT4_CRYPTO_BLOCK_SIZE : iname->len;
+	ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
+	ciphertext_len = (ciphertext_len > lim)
+			? lim : ciphertext_len;
+
+	if (ciphertext_len <= sizeof(buf)) {
+		workbuf = buf;
+	} else {
+		alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
+		if (!alloc_buf)
+			return -ENOMEM;
+		workbuf = alloc_buf;
+	}
+
+	/* Allocate request */
+	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+	if (!req) {
+		printk_ratelimited(
+		    KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
+		kfree(alloc_buf);
+		return -ENOMEM;
+	}
+	ablkcipher_request_set_callback(req,
+		CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+		ext4_dir_crypt_complete, &ecr);
+
+	/* Copy the input */
+	memcpy(workbuf, iname->name, iname->len);
+	if (iname->len < ciphertext_len)
+		memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
+
+	/* Initialize IV */
+	memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
+
+	/* Create encryption request */
+	sg_init_one(&src_sg, workbuf, ciphertext_len);
+	sg_init_one(&dst_sg, oname->name, ciphertext_len);
+	ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
+	res = crypto_ablkcipher_encrypt(req);
+	if (res == -EINPROGRESS || res == -EBUSY) {
+		BUG_ON(req->base.data != &ecr);
+		wait_for_completion(&ecr.completion);
+		res = ecr.res;
+	}
+	kfree(alloc_buf);
+	ablkcipher_request_free(req);
+	if (res < 0) {
+		printk_ratelimited(
+		    KERN_ERR "%s: Error (error code %d)\n", __func__, res);
+	}
+	oname->len = ciphertext_len;
+	return res;
+}
+
+/*
+ * ext4_fname_decrypt()
+ *	This function decrypts the input filename, and returns
+ *	the length of the plaintext.
+ *	Errors are returned as negative numbers.
+ *	We trust the caller to allocate sufficient memory to oname string.
+ */
+static int ext4_fname_decrypt(struct inode *inode,
+			      const struct ext4_str *iname,
+			      struct ext4_str *oname)
+{
+	struct ext4_str tmp_in[2], tmp_out[1];
+	struct ablkcipher_request *req = NULL;
+	DECLARE_EXT4_COMPLETION_RESULT(ecr);
+	struct scatterlist src_sg, dst_sg;
+	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+	struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+	int res = 0;
+	char iv[EXT4_CRYPTO_BLOCK_SIZE];
+	unsigned lim = max_name_len(inode);
+
+	if (iname->len <= 0 || iname->len > lim)
+		return -EIO;
+
+	tmp_in[0].name = iname->name;
+	tmp_in[0].len = iname->len;
+	tmp_out[0].name = oname->name;
+
+	/* Allocate request */
+	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+	if (!req) {
+		printk_ratelimited(
+		    KERN_ERR "%s: crypto_request_alloc() failed\n",  __func__);
+		return -ENOMEM;
+	}
+	ablkcipher_request_set_callback(req,
+		CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+		ext4_dir_crypt_complete, &ecr);
+
+	/* Initialize IV */
+	memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
+
+	/* Create encryption request */
+	sg_init_one(&src_sg, iname->name, iname->len);
+	sg_init_one(&dst_sg, oname->name, oname->len);
+	ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+	res = crypto_ablkcipher_decrypt(req);
+	if (res == -EINPROGRESS || res == -EBUSY) {
+		BUG_ON(req->base.data != &ecr);
+		wait_for_completion(&ecr.completion);
+		res = ecr.res;
+	}
+	ablkcipher_request_free(req);
+	if (res < 0) {
+		printk_ratelimited(
+		    KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
+		    __func__, res);
+		return res;
+	}
+
+	oname->len = strnlen(oname->name, iname->len);
+	return oname->len;
+}
+
+static const char *lookup_table =
+	"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+
+/**
+ * ext4_fname_encode_digest() -
+ *
+ * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
+ * The encoded string is roughly 4/3 times the size of the input string.
+ */
+static int digest_encode(const char *src, int len, char *dst)
+{
+	int i = 0, bits = 0, ac = 0;
+	char *cp = dst;
+
+	while (i < len) {
+		ac += (((unsigned char) src[i]) << bits);
+		bits += 8;
+		do {
+			*cp++ = lookup_table[ac & 0x3f];
+			ac >>= 6;
+			bits -= 6;
+		} while (bits >= 6);
+		i++;
+	}
+	if (bits)
+		*cp++ = lookup_table[ac & 0x3f];
+	return cp - dst;
+}
+
+static int digest_decode(const char *src, int len, char *dst)
+{
+	int i = 0, bits = 0, ac = 0;
+	const char *p;
+	char *cp = dst;
+
+	while (i < len) {
+		p = strchr(lookup_table, src[i]);
+		if (p == NULL || src[i] == 0)
+			return -2;
+		ac += (p - lookup_table) << bits;
+		bits += 6;
+		if (bits >= 8) {
+			*cp++ = ac & 0xff;
+			ac >>= 8;
+			bits -= 8;
+		}
+		i++;
+	}
+	if (ac)
+		return -1;
+	return cp - dst;
+}
+
+/**
+ * ext4_fname_crypto_round_up() -
+ *
+ * Return: The next multiple of block size
+ */
+u32 ext4_fname_crypto_round_up(u32 size, u32 blksize)
+{
+	return ((size+blksize-1)/blksize)*blksize;
+}
+
+unsigned ext4_fname_encrypted_size(struct inode *inode, u32 ilen)
+{
+	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+	int padding = 32;
+
+	if (ci)
+		padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK);
+	if (ilen < EXT4_CRYPTO_BLOCK_SIZE)
+		ilen = EXT4_CRYPTO_BLOCK_SIZE;
+	return ext4_fname_crypto_round_up(ilen, padding);
+}
+
+/*
+ * ext4_fname_crypto_alloc_buffer() -
+ *
+ * Allocates an output buffer that is sufficient for the crypto operation
+ * specified by the context and the direction.
+ */
+int ext4_fname_crypto_alloc_buffer(struct inode *inode,
+				   u32 ilen, struct ext4_str *crypto_str)
+{
+	unsigned int olen = ext4_fname_encrypted_size(inode, ilen);
+
+	crypto_str->len = olen;
+	if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
+		olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
+	/* Allocated buffer can hold one more character to null-terminate the
+	 * string */
+	crypto_str->name = kmalloc(olen+1, GFP_NOFS);
+	if (!(crypto_str->name))
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ * ext4_fname_crypto_free_buffer() -
+ *
+ * Frees the buffer allocated for crypto operation.
+ */
+void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
+{
+	if (!crypto_str)
+		return;
+	kfree(crypto_str->name);
+	crypto_str->name = NULL;
+}
+
+/**
+ * ext4_fname_disk_to_usr() - converts a filename from disk space to user space
+ */
+int _ext4_fname_disk_to_usr(struct inode *inode,
+			    struct dx_hash_info *hinfo,
+			    const struct ext4_str *iname,
+			    struct ext4_str *oname)
+{
+	char buf[24];
+	int ret;
+
+	if (iname->len < 3) {
+		/*Check for . and .. */
+		if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') {
+			oname->name[0] = '.';
+			oname->name[iname->len-1] = '.';
+			oname->len = iname->len;
+			return oname->len;
+		}
+	}
+	if (EXT4_I(inode)->i_crypt_info)
+		return ext4_fname_decrypt(inode, iname, oname);
+
+	if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
+		ret = digest_encode(iname->name, iname->len, oname->name);
+		oname->len = ret;
+		return ret;
+	}
+	if (hinfo) {
+		memcpy(buf, &hinfo->hash, 4);
+		memcpy(buf+4, &hinfo->minor_hash, 4);
+	} else
+		memset(buf, 0, 8);
+	memcpy(buf + 8, iname->name + iname->len - 16, 16);
+	oname->name[0] = '_';
+	ret = digest_encode(buf, 24, oname->name+1);
+	oname->len = ret + 1;
+	return ret + 1;
+}
+
+int ext4_fname_disk_to_usr(struct inode *inode,
+			   struct dx_hash_info *hinfo,
+			   const struct ext4_dir_entry_2 *de,
+			   struct ext4_str *oname)
+{
+	struct ext4_str iname = {.name = (unsigned char *) de->name,
+				 .len = de->name_len };
+
+	return _ext4_fname_disk_to_usr(inode, hinfo, &iname, oname);
+}
+
+
+/**
+ * ext4_fname_usr_to_disk() - converts a filename from user space to disk space
+ */
+int ext4_fname_usr_to_disk(struct inode *inode,
+			   const struct qstr *iname,
+			   struct ext4_str *oname)
+{
+	int res;
+	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+
+	if (iname->len < 3) {
+		/*Check for . and .. */
+		if (iname->name[0] == '.' &&
+				iname->name[iname->len-1] == '.') {
+			oname->name[0] = '.';
+			oname->name[iname->len-1] = '.';
+			oname->len = iname->len;
+			return oname->len;
+		}
+	}
+	if (ci) {
+		res = ext4_fname_encrypt(inode, iname, oname);
+		return res;
+	}
+	/* Without a proper key, a user is not allowed to modify the filenames
+	 * in a directory. Consequently, a user space name cannot be mapped to
+	 * a disk-space name */
+	return -EACCES;
+}
+
+int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
+			      int lookup, struct ext4_filename *fname)
+{
+	struct ext4_crypt_info *ci;
+	int ret = 0, bigname = 0;
+
+	memset(fname, 0, sizeof(struct ext4_filename));
+	fname->usr_fname = iname;
+
+	if (!ext4_encrypted_inode(dir) ||
+	    ((iname->name[0] == '.') &&
+	     ((iname->len == 1) ||
+	      ((iname->name[1] == '.') && (iname->len == 2))))) {
+		fname->disk_name.name = (unsigned char *) iname->name;
+		fname->disk_name.len = iname->len;
+		return 0;
+	}
+	ret = ext4_get_encryption_info(dir);
+	if (ret)
+		return ret;
+	ci = EXT4_I(dir)->i_crypt_info;
+	if (ci) {
+		ret = ext4_fname_crypto_alloc_buffer(dir, iname->len,
+						     &fname->crypto_buf);
+		if (ret < 0)
+			return ret;
+		ret = ext4_fname_encrypt(dir, iname, &fname->crypto_buf);
+		if (ret < 0)
+			goto errout;
+		fname->disk_name.name = fname->crypto_buf.name;
+		fname->disk_name.len = fname->crypto_buf.len;
+		return 0;
+	}
+	if (!lookup)
+		return -EACCES;
+
+	/* We don't have the key and we are doing a lookup; decode the
+	 * user-supplied name
+	 */
+	if (iname->name[0] == '_')
+		bigname = 1;
+	if ((bigname && (iname->len != 33)) ||
+	    (!bigname && (iname->len > 43)))
+		return -ENOENT;
+
+	fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
+	if (fname->crypto_buf.name == NULL)
+		return -ENOMEM;
+	ret = digest_decode(iname->name + bigname, iname->len - bigname,
+			    fname->crypto_buf.name);
+	if (ret < 0) {
+		ret = -ENOENT;
+		goto errout;
+	}
+	fname->crypto_buf.len = ret;
+	if (bigname) {
+		memcpy(&fname->hinfo.hash, fname->crypto_buf.name, 4);
+		memcpy(&fname->hinfo.minor_hash, fname->crypto_buf.name + 4, 4);
+	} else {
+		fname->disk_name.name = fname->crypto_buf.name;
+		fname->disk_name.len = fname->crypto_buf.len;
+	}
+	return 0;
+errout:
+	kfree(fname->crypto_buf.name);
+	fname->crypto_buf.name = NULL;
+	return ret;
+}
+
+void ext4_fname_free_filename(struct ext4_filename *fname)
+{
+	kfree(fname->crypto_buf.name);
+	fname->crypto_buf.name = NULL;
+	fname->usr_fname = NULL;
+	fname->disk_name.name = NULL;
+}
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
new file mode 100644
index 0000000..2cc4c80
--- /dev/null
+++ b/fs/ext4/crypto_key.c
@@ -0,0 +1,260 @@
+/*
+ * linux/fs/ext4/crypto_key.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions for ext4
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#include <keys/encrypted-type.h>
+#include <keys/user-type.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <uapi/linux/keyctl.h>
+
+#include "ext4.h"
+#include "xattr.h"
+
+static void derive_crypt_complete(struct crypto_async_request *req, int rc)
+{
+	struct ext4_completion_result *ecr = req->data;
+
+	if (rc == -EINPROGRESS)
+		return;
+
+	ecr->res = rc;
+	complete(&ecr->completion);
+}
+
+/**
+ * ext4_derive_key_aes() - Derive a key using AES-128-ECB
+ * @deriving_key: Encryption key used for derivatio.
+ * @source_key:   Source key to which to apply derivation.
+ * @derived_key:  Derived key.
+ *
+ * Return: Zero on success; non-zero otherwise.
+ */
+static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE],
+			       char source_key[EXT4_AES_256_XTS_KEY_SIZE],
+			       char derived_key[EXT4_AES_256_XTS_KEY_SIZE])
+{
+	int res = 0;
+	struct ablkcipher_request *req = NULL;
+	DECLARE_EXT4_COMPLETION_RESULT(ecr);
+	struct scatterlist src_sg, dst_sg;
+	struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
+								0);
+
+	if (IS_ERR(tfm)) {
+		res = PTR_ERR(tfm);
+		tfm = NULL;
+		goto out;
+	}
+	crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+	if (!req) {
+		res = -ENOMEM;
+		goto out;
+	}
+	ablkcipher_request_set_callback(req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+			derive_crypt_complete, &ecr);
+	res = crypto_ablkcipher_setkey(tfm, deriving_key,
+				       EXT4_AES_128_ECB_KEY_SIZE);
+	if (res < 0)
+		goto out;
+	sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE);
+	sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE);
+	ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
+				     EXT4_AES_256_XTS_KEY_SIZE, NULL);
+	res = crypto_ablkcipher_encrypt(req);
+	if (res == -EINPROGRESS || res == -EBUSY) {
+		BUG_ON(req->base.data != &ecr);
+		wait_for_completion(&ecr.completion);
+		res = ecr.res;
+	}
+
+out:
+	if (req)
+		ablkcipher_request_free(req);
+	if (tfm)
+		crypto_free_ablkcipher(tfm);
+	return res;
+}
+
+void ext4_free_crypt_info(struct ext4_crypt_info *ci)
+{
+	if (!ci)
+		return;
+
+	if (ci->ci_keyring_key)
+		key_put(ci->ci_keyring_key);
+	crypto_free_ablkcipher(ci->ci_ctfm);
+	kmem_cache_free(ext4_crypt_info_cachep, ci);
+}
+
+void ext4_free_encryption_info(struct inode *inode,
+			       struct ext4_crypt_info *ci)
+{
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	struct ext4_crypt_info *prev;
+
+	if (ci == NULL)
+		ci = ACCESS_ONCE(ei->i_crypt_info);
+	if (ci == NULL)
+		return;
+	prev = cmpxchg(&ei->i_crypt_info, ci, NULL);
+	if (prev != ci)
+		return;
+
+	ext4_free_crypt_info(ci);
+}
+
+int _ext4_get_encryption_info(struct inode *inode)
+{
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	struct ext4_crypt_info *crypt_info;
+	char full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
+				 (EXT4_KEY_DESCRIPTOR_SIZE * 2) + 1];
+	struct key *keyring_key = NULL;
+	struct ext4_encryption_key *master_key;
+	struct ext4_encryption_context ctx;
+	struct user_key_payload *ukp;
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	struct crypto_ablkcipher *ctfm;
+	const char *cipher_str;
+	char raw_key[EXT4_MAX_KEY_SIZE];
+	char mode;
+	int res;
+
+	if (!ext4_read_workqueue) {
+		res = ext4_init_crypto();
+		if (res)
+			return res;
+	}
+
+retry:
+	crypt_info = ACCESS_ONCE(ei->i_crypt_info);
+	if (crypt_info) {
+		if (!crypt_info->ci_keyring_key ||
+		    key_validate(crypt_info->ci_keyring_key) == 0)
+			return 0;
+		ext4_free_encryption_info(inode, crypt_info);
+		goto retry;
+	}
+
+	res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+				 &ctx, sizeof(ctx));
+	if (res < 0) {
+		if (!DUMMY_ENCRYPTION_ENABLED(sbi))
+			return res;
+		ctx.contents_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
+		ctx.filenames_encryption_mode =
+			EXT4_ENCRYPTION_MODE_AES_256_CTS;
+		ctx.flags = 0;
+	} else if (res != sizeof(ctx))
+		return -EINVAL;
+	res = 0;
+
+	crypt_info = kmem_cache_alloc(ext4_crypt_info_cachep, GFP_KERNEL);
+	if (!crypt_info)
+		return -ENOMEM;
+
+	crypt_info->ci_flags = ctx.flags;
+	crypt_info->ci_data_mode = ctx.contents_encryption_mode;
+	crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
+	crypt_info->ci_ctfm = NULL;
+	crypt_info->ci_keyring_key = NULL;
+	memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
+	       sizeof(crypt_info->ci_master_key));
+	if (S_ISREG(inode->i_mode))
+		mode = crypt_info->ci_data_mode;
+	else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+		mode = crypt_info->ci_filename_mode;
+	else
+		BUG();
+	switch (mode) {
+	case EXT4_ENCRYPTION_MODE_AES_256_XTS:
+		cipher_str = "xts(aes)";
+		break;
+	case EXT4_ENCRYPTION_MODE_AES_256_CTS:
+		cipher_str = "cts(cbc(aes))";
+		break;
+	default:
+		printk_once(KERN_WARNING
+			    "ext4: unsupported key mode %d (ino %u)\n",
+			    mode, (unsigned) inode->i_ino);
+		res = -ENOKEY;
+		goto out;
+	}
+	if (DUMMY_ENCRYPTION_ENABLED(sbi)) {
+		memset(raw_key, 0x42, EXT4_AES_256_XTS_KEY_SIZE);
+		goto got_key;
+	}
+	memcpy(full_key_descriptor, EXT4_KEY_DESC_PREFIX,
+	       EXT4_KEY_DESC_PREFIX_SIZE);
+	sprintf(full_key_descriptor + EXT4_KEY_DESC_PREFIX_SIZE,
+		"%*phN", EXT4_KEY_DESCRIPTOR_SIZE,
+		ctx.master_key_descriptor);
+	full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
+			    (2 * EXT4_KEY_DESCRIPTOR_SIZE)] = '\0';
+	keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
+	if (IS_ERR(keyring_key)) {
+		res = PTR_ERR(keyring_key);
+		keyring_key = NULL;
+		goto out;
+	}
+	crypt_info->ci_keyring_key = keyring_key;
+	BUG_ON(keyring_key->type != &key_type_logon);
+	ukp = ((struct user_key_payload *)keyring_key->payload.data);
+	if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
+		res = -EINVAL;
+		goto out;
+	}
+	master_key = (struct ext4_encryption_key *)ukp->data;
+	BUILD_BUG_ON(EXT4_AES_128_ECB_KEY_SIZE !=
+		     EXT4_KEY_DERIVATION_NONCE_SIZE);
+	BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE);
+	res = ext4_derive_key_aes(ctx.nonce, master_key->raw,
+				  raw_key);
+got_key:
+	ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
+	if (!ctfm || IS_ERR(ctfm)) {
+		res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
+		printk(KERN_DEBUG
+		       "%s: error %d (inode %u) allocating crypto tfm\n",
+		       __func__, res, (unsigned) inode->i_ino);
+		goto out;
+	}
+	crypt_info->ci_ctfm = ctfm;
+	crypto_ablkcipher_clear_flags(ctfm, ~0);
+	crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm),
+			     CRYPTO_TFM_REQ_WEAK_KEY);
+	res = crypto_ablkcipher_setkey(ctfm, raw_key,
+				       ext4_encryption_key_size(mode));
+	if (res)
+		goto out;
+	memset(raw_key, 0, sizeof(raw_key));
+	if (cmpxchg(&ei->i_crypt_info, NULL, crypt_info) != NULL) {
+		ext4_free_crypt_info(crypt_info);
+		goto retry;
+	}
+	return 0;
+
+out:
+	if (res == -ENOKEY)
+		res = 0;
+	ext4_free_crypt_info(crypt_info);
+	memset(raw_key, 0, sizeof(raw_key));
+	return res;
+}
+
+int ext4_has_encryption_key(struct inode *inode)
+{
+	struct ext4_inode_info *ei = EXT4_I(inode);
+
+	return (ei->i_crypt_info != NULL);
+}
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
new file mode 100644
index 0000000..02c4e5d
--- /dev/null
+++ b/fs/ext4/crypto_policy.c
@@ -0,0 +1,215 @@
+/*
+ * linux/fs/ext4/crypto_policy.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption policy functions for ext4
+ *
+ * Written by Michael Halcrow, 2015.
+ */
+
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "ext4.h"
+#include "xattr.h"
+
+static int ext4_inode_has_encryption_context(struct inode *inode)
+{
+	int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0);
+	return (res > 0);
+}
+
+/*
+ * check whether the policy is consistent with the encryption context
+ * for the inode
+ */
+static int ext4_is_encryption_context_consistent_with_policy(
+	struct inode *inode, const struct ext4_encryption_policy *policy)
+{
+	struct ext4_encryption_context ctx;
+	int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
+				 sizeof(ctx));
+	if (res != sizeof(ctx))
+		return 0;
+	return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
+			EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
+		(ctx.flags ==
+		 policy->flags) &&
+		(ctx.contents_encryption_mode ==
+		 policy->contents_encryption_mode) &&
+		(ctx.filenames_encryption_mode ==
+		 policy->filenames_encryption_mode));
+}
+
+static int ext4_create_encryption_context_from_policy(
+	struct inode *inode, const struct ext4_encryption_policy *policy)
+{
+	struct ext4_encryption_context ctx;
+	int res = 0;
+
+	res = ext4_convert_inline_data(inode);
+	if (res)
+		return res;
+
+	ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
+	memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
+	       EXT4_KEY_DESCRIPTOR_SIZE);
+	if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) {
+		printk(KERN_WARNING
+		       "%s: Invalid contents encryption mode %d\n", __func__,
+			policy->contents_encryption_mode);
+		return -EINVAL;
+	}
+	if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
+		printk(KERN_WARNING
+		       "%s: Invalid filenames encryption mode %d\n", __func__,
+			policy->filenames_encryption_mode);
+		return -EINVAL;
+	}
+	if (policy->flags & ~EXT4_POLICY_FLAGS_VALID)
+		return -EINVAL;
+	ctx.contents_encryption_mode = policy->contents_encryption_mode;
+	ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
+	ctx.flags = policy->flags;
+	BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
+	get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
+
+	res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+			     EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
+			     sizeof(ctx), 0);
+	if (!res)
+		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+	return res;
+}
+
+int ext4_process_policy(const struct ext4_encryption_policy *policy,
+			struct inode *inode)
+{
+	if (policy->version != 0)
+		return -EINVAL;
+
+	if (!ext4_inode_has_encryption_context(inode)) {
+		if (!S_ISDIR(inode->i_mode))
+			return -EINVAL;
+		if (!ext4_empty_dir(inode))
+			return -ENOTEMPTY;
+		return ext4_create_encryption_context_from_policy(inode,
+								  policy);
+	}
+
+	if (ext4_is_encryption_context_consistent_with_policy(inode, policy))
+		return 0;
+
+	printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
+	       __func__);
+	return -EINVAL;
+}
+
+int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy)
+{
+	struct ext4_encryption_context ctx;
+
+	int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+				 &ctx, sizeof(ctx));
+	if (res != sizeof(ctx))
+		return -ENOENT;
+	if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1)
+		return -EINVAL;
+	policy->version = 0;
+	policy->contents_encryption_mode = ctx.contents_encryption_mode;
+	policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
+	policy->flags = ctx.flags;
+	memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
+	       EXT4_KEY_DESCRIPTOR_SIZE);
+	return 0;
+}
+
+int ext4_is_child_context_consistent_with_parent(struct inode *parent,
+						 struct inode *child)
+{
+	struct ext4_crypt_info *parent_ci, *child_ci;
+	int res;
+
+	if ((parent == NULL) || (child == NULL)) {
+		pr_err("parent %p child %p\n", parent, child);
+		BUG_ON(1);
+	}
+	/* no restrictions if the parent directory is not encrypted */
+	if (!ext4_encrypted_inode(parent))
+		return 1;
+	/* if the child directory is not encrypted, this is always a problem */
+	if (!ext4_encrypted_inode(child))
+		return 0;
+	res = ext4_get_encryption_info(parent);
+	if (res)
+		return 0;
+	res = ext4_get_encryption_info(child);
+	if (res)
+		return 0;
+	parent_ci = EXT4_I(parent)->i_crypt_info;
+	child_ci = EXT4_I(child)->i_crypt_info;
+	if (!parent_ci && !child_ci)
+		return 1;
+	if (!parent_ci || !child_ci)
+		return 0;
+
+	return (memcmp(parent_ci->ci_master_key,
+		       child_ci->ci_master_key,
+		       EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
+		(parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
+		(parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
+		(parent_ci->ci_flags == child_ci->ci_flags));
+}
+
+/**
+ * ext4_inherit_context() - Sets a child context from its parent
+ * @parent: Parent inode from which the context is inherited.
+ * @child:  Child inode that inherits the context from @parent.
+ *
+ * Return: Zero on success, non-zero otherwise
+ */
+int ext4_inherit_context(struct inode *parent, struct inode *child)
+{
+	struct ext4_encryption_context ctx;
+	struct ext4_crypt_info *ci;
+	int res;
+
+	res = ext4_get_encryption_info(parent);
+	if (res < 0)
+		return res;
+	ci = EXT4_I(parent)->i_crypt_info;
+	if (ci == NULL)
+		return -ENOKEY;
+
+	ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
+	if (DUMMY_ENCRYPTION_ENABLED(EXT4_SB(parent->i_sb))) {
+		ctx.contents_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
+		ctx.filenames_encryption_mode =
+			EXT4_ENCRYPTION_MODE_AES_256_CTS;
+		ctx.flags = 0;
+		memset(ctx.master_key_descriptor, 0x42,
+		       EXT4_KEY_DESCRIPTOR_SIZE);
+		res = 0;
+	} else {
+		ctx.contents_encryption_mode = ci->ci_data_mode;
+		ctx.filenames_encryption_mode = ci->ci_filename_mode;
+		ctx.flags = ci->ci_flags;
+		memcpy(ctx.master_key_descriptor, ci->ci_master_key,
+		       EXT4_KEY_DESCRIPTOR_SIZE);
+	}
+	get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
+	res = ext4_xattr_set(child, EXT4_XATTR_INDEX_ENCRYPTION,
+			     EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
+			     sizeof(ctx), 0);
+	if (!res) {
+		ext4_set_inode_flag(child, EXT4_INODE_ENCRYPT);
+		ext4_clear_inode_state(child, EXT4_STATE_MAY_INLINE_DATA);
+		res = ext4_get_encryption_info(child);
+	}
+	return res;
+}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index f8d56e4..2d3e900 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -29,12 +29,11 @@
 #include "ext4.h"
 #include "xattr.h"
 
-static int ext4_dx_readdir(struct file *filp,
-			   void *dirent, filldir_t filldir);
+static int ext4_dx_readdir(struct file *, struct dir_context *);
 
 /**
  * Check if the given dir-inode refers to an htree-indexed directory
- * (or a directory which chould potentially get coverted to use htree
+ * (or a directory which could potentially get converted to use htree
  * indexing).
  *
  * Return 1 if it is a dx dir, 0 if not
@@ -103,79 +102,80 @@
 	return 1;
 }
 
-static int ext4_readdir(struct file *filp,
-			 void *dirent, filldir_t filldir)
+static int ext4_readdir(struct file *file, struct dir_context *ctx)
 {
-	int error = 0;
 	unsigned int offset;
-	int i, stored;
+	int i;
 	struct ext4_dir_entry_2 *de;
 	int err;
-	struct inode *inode = file_inode(filp);
+	struct inode *inode = file_inode(file);
 	struct super_block *sb = inode->i_sb;
-	int ret = 0;
+	struct buffer_head *bh = NULL;
 	int dir_has_error = 0;
+	struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
 
 	if (is_dx_dir(inode)) {
-		err = ext4_dx_readdir(filp, dirent, filldir);
+		err = ext4_dx_readdir(file, ctx);
 		if (err != ERR_BAD_DX_DIR) {
-			ret = err;
-			goto out;
+			return err;
 		}
 		/*
 		 * We don't set the inode dirty flag since it's not
 		 * critical that it get flushed back to the disk.
 		 */
-		ext4_clear_inode_flag(file_inode(filp),
+		ext4_clear_inode_flag(file_inode(file),
 				      EXT4_INODE_INDEX);
 	}
 
 	if (ext4_has_inline_data(inode)) {
 		int has_inline_data = 1;
-		ret = ext4_read_inline_dir(filp, dirent, filldir,
+		err = ext4_read_inline_dir(file, ctx,
 					   &has_inline_data);
 		if (has_inline_data)
-			return ret;
+			return err;
 	}
 
-	stored = 0;
-	offset = filp->f_pos & (sb->s_blocksize - 1);
+	if (ext4_encrypted_inode(inode)) {
+		err = ext4_fname_crypto_alloc_buffer(inode, EXT4_NAME_LEN,
+						     &fname_crypto_str);
+		if (err < 0)
+			return err;
+	}
 
-	while (!error && !stored && filp->f_pos < inode->i_size) {
+	offset = ctx->pos & (sb->s_blocksize - 1);
+
+	while (ctx->pos < inode->i_size) {
 		struct ext4_map_blocks map;
-		struct buffer_head *bh = NULL;
 
-		map.m_lblk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb);
+		map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
 		map.m_len = 1;
 		err = ext4_map_blocks(NULL, inode, &map, 0);
 		if (err > 0) {
 			pgoff_t index = map.m_pblk >>
 					(PAGE_CACHE_SHIFT - inode->i_blkbits);
-			if (!ra_has_index(&filp->f_ra, index))
+			if (!ra_has_index(&file->f_ra, index))
 				page_cache_sync_readahead(
 					sb->s_bdev->bd_inode->i_mapping,
-					&filp->f_ra, filp,
+					&file->f_ra, file,
 					index, 1);
-			filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
-			bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err);
+			file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+			bh = ext4_bread(NULL, inode, map.m_lblk, 0);
+			if (IS_ERR(bh))
+				return PTR_ERR(bh);
 		}
 
-		/*
-		 * We ignore I/O errors on directories so users have a chance
-		 * of recovering data when there's a bad sector
-		 */
 		if (!bh) {
 			if (!dir_has_error) {
-				EXT4_ERROR_FILE(filp, 0,
+				EXT4_ERROR_FILE(file, 0,
 						"directory contains a "
 						"hole at offset %llu",
-					   (unsigned long long) filp->f_pos);
+					   (unsigned long long) ctx->pos);
 				dir_has_error = 1;
 			}
 			/* corrupt size?  Maybe no more blocks to read */
-			if (filp->f_pos > inode->i_blocks << 9)
+			if (ctx->pos > inode->i_blocks << 9)
 				break;
-			filp->f_pos += sb->s_blocksize - offset;
+			ctx->pos += sb->s_blocksize - offset;
 			continue;
 		}
 
@@ -183,21 +183,21 @@
 		if (!buffer_verified(bh) &&
 		    !ext4_dirent_csum_verify(inode,
 				(struct ext4_dir_entry *)bh->b_data)) {
-			EXT4_ERROR_FILE(filp, 0, "directory fails checksum "
+			EXT4_ERROR_FILE(file, 0, "directory fails checksum "
 					"at offset %llu",
-					(unsigned long long)filp->f_pos);
-			filp->f_pos += sb->s_blocksize - offset;
+					(unsigned long long)ctx->pos);
+			ctx->pos += sb->s_blocksize - offset;
 			brelse(bh);
+			bh = NULL;
 			continue;
 		}
 		set_buffer_verified(bh);
 
-revalidate:
 		/* If the dir block has changed since the last call to
 		 * readdir(2), then we might be pointing to an invalid
 		 * dirent right now.  Scan from the start of the block
 		 * to make sure. */
-		if (filp->f_version != inode->i_version) {
+		if (file->f_version != inode->i_version) {
 			for (i = 0; i < sb->s_blocksize && i < offset; ) {
 				de = (struct ext4_dir_entry_2 *)
 					(bh->b_data + i);
@@ -214,57 +214,66 @@
 							    sb->s_blocksize);
 			}
 			offset = i;
-			filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
 				| offset;
-			filp->f_version = inode->i_version;
+			file->f_version = inode->i_version;
 		}
 
-		while (!error && filp->f_pos < inode->i_size
+		while (ctx->pos < inode->i_size
 		       && offset < sb->s_blocksize) {
 			de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
-			if (ext4_check_dir_entry(inode, filp, de, bh,
+			if (ext4_check_dir_entry(inode, file, de, bh,
 						 bh->b_data, bh->b_size,
 						 offset)) {
 				/*
-				 * On error, skip the f_pos to the next block
+				 * On error, skip to the next block
 				 */
-				filp->f_pos = (filp->f_pos |
+				ctx->pos = (ctx->pos |
 						(sb->s_blocksize - 1)) + 1;
-				brelse(bh);
-				ret = stored;
-				goto out;
+				break;
 			}
 			offset += ext4_rec_len_from_disk(de->rec_len,
 					sb->s_blocksize);
 			if (le32_to_cpu(de->inode)) {
-				/* We might block in the next section
-				 * if the data destination is
-				 * currently swapped out.  So, use a
-				 * version stamp to detect whether or
-				 * not the directory has been modified
-				 * during the copy operation.
-				 */
-				u64 version = filp->f_version;
+				if (!ext4_encrypted_inode(inode)) {
+					if (!dir_emit(ctx, de->name,
+					    de->name_len,
+					    le32_to_cpu(de->inode),
+					    get_dtype(sb, de->file_type)))
+						goto done;
+				} else {
+					int save_len = fname_crypto_str.len;
 
-				error = filldir(dirent, de->name,
-						de->name_len,
-						filp->f_pos,
-						le32_to_cpu(de->inode),
-						get_dtype(sb, de->file_type));
-				if (error)
-					break;
-				if (version != filp->f_version)
-					goto revalidate;
-				stored++;
+					/* Directory is encrypted */
+					err = ext4_fname_disk_to_usr(inode,
+						NULL, de, &fname_crypto_str);
+					fname_crypto_str.len = save_len;
+					if (err < 0)
+						goto errout;
+					if (!dir_emit(ctx,
+					    fname_crypto_str.name, err,
+					    le32_to_cpu(de->inode),
+					    get_dtype(sb, de->file_type)))
+						goto done;
+				}
 			}
-			filp->f_pos += ext4_rec_len_from_disk(de->rec_len,
+			ctx->pos += ext4_rec_len_from_disk(de->rec_len,
 						sb->s_blocksize);
 		}
-		offset = 0;
+		if ((ctx->pos < inode->i_size) && !dir_relax(inode))
+			goto done;
 		brelse(bh);
+		bh = NULL;
+		offset = 0;
 	}
-out:
-	return ret;
+done:
+	err = 0;
+errout:
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	ext4_fname_crypto_free_buffer(&fname_crypto_str);
+#endif
+	brelse(bh);
+	return err;
 }
 
 static inline int is_32bit_api(void)
@@ -370,41 +379,16 @@
  */
 static void free_rb_tree_fname(struct rb_root *root)
 {
-	struct rb_node	*n = root->rb_node;
-	struct rb_node	*parent;
-	struct fname	*fname;
+	struct fname *fname, *next;
 
-	while (n) {
-		/* Do the node's children first */
-		if (n->rb_left) {
-			n = n->rb_left;
-			continue;
-		}
-		if (n->rb_right) {
-			n = n->rb_right;
-			continue;
-		}
-		/*
-		 * The node has no children; free it, and then zero
-		 * out parent's link to it.  Finally go to the
-		 * beginning of the loop and try to free the parent
-		 * node.
-		 */
-		parent = rb_parent(n);
-		fname = rb_entry(n, struct fname, rb_hash);
+	rbtree_postorder_for_each_entry_safe(fname, next, root, rb_hash)
 		while (fname) {
 			struct fname *old = fname;
 			fname = fname->next;
 			kfree(old);
 		}
-		if (!parent)
-			*root = RB_ROOT;
-		else if (parent->rb_left == n)
-			parent->rb_left = NULL;
-		else if (parent->rb_right == n)
-			parent->rb_right = NULL;
-		n = parent;
-	}
+
+	*root = RB_ROOT;
 }
 
 
@@ -429,10 +413,15 @@
 
 /*
  * Given a directory entry, enter it into the fname rb tree.
+ *
+ * When filename encryption is enabled, the dirent will hold the
+ * encrypted filename, while the htree will hold decrypted filename.
+ * The decrypted filename is passed in via ent_name.  parameter.
  */
 int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
 			     __u32 minor_hash,
-			     struct ext4_dir_entry_2 *dirent)
+			    struct ext4_dir_entry_2 *dirent,
+			    struct ext4_str *ent_name)
 {
 	struct rb_node **p, *parent = NULL;
 	struct fname *fname, *new_fn;
@@ -443,17 +432,17 @@
 	p = &info->root.rb_node;
 
 	/* Create and allocate the fname structure */
-	len = sizeof(struct fname) + dirent->name_len + 1;
+	len = sizeof(struct fname) + ent_name->len + 1;
 	new_fn = kzalloc(len, GFP_KERNEL);
 	if (!new_fn)
 		return -ENOMEM;
 	new_fn->hash = hash;
 	new_fn->minor_hash = minor_hash;
 	new_fn->inode = le32_to_cpu(dirent->inode);
-	new_fn->name_len = dirent->name_len;
+	new_fn->name_len = ent_name->len;
 	new_fn->file_type = dirent->file_type;
-	memcpy(new_fn->name, dirent->name, dirent->name_len);
-	new_fn->name[dirent->name_len] = 0;
+	memcpy(new_fn->name, ent_name->name, ent_name->len);
+	new_fn->name[ent_name->len] = 0;
 
 	while (*p) {
 		parent = *p;
@@ -492,16 +481,12 @@
  * for all entres on the fname linked list.  (Normally there is only
  * one entry on the linked list, unless there are 62 bit hash collisions.)
  */
-static int call_filldir(struct file *filp, void *dirent,
-			filldir_t filldir, struct fname *fname)
+static int call_filldir(struct file *file, struct dir_context *ctx,
+			struct fname *fname)
 {
-	struct dir_private_info *info = filp->private_data;
-	loff_t	curr_pos;
-	struct inode *inode = file_inode(filp);
-	struct super_block *sb;
-	int error;
-
-	sb = inode->i_sb;
+	struct dir_private_info *info = file->private_data;
+	struct inode *inode = file_inode(file);
+	struct super_block *sb = inode->i_sb;
 
 	if (!fname) {
 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: comm %s: "
@@ -509,47 +494,44 @@
 			 inode->i_ino, current->comm);
 		return 0;
 	}
-	curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+	ctx->pos = hash2pos(file, fname->hash, fname->minor_hash);
 	while (fname) {
-		error = filldir(dirent, fname->name,
-				fname->name_len, curr_pos,
+		if (!dir_emit(ctx, fname->name,
+				fname->name_len,
 				fname->inode,
-				get_dtype(sb, fname->file_type));
-		if (error) {
-			filp->f_pos = curr_pos;
+				get_dtype(sb, fname->file_type))) {
 			info->extra_fname = fname;
-			return error;
+			return 1;
 		}
 		fname = fname->next;
 	}
 	return 0;
 }
 
-static int ext4_dx_readdir(struct file *filp,
-			 void *dirent, filldir_t filldir)
+static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
 {
-	struct dir_private_info *info = filp->private_data;
-	struct inode *inode = file_inode(filp);
+	struct dir_private_info *info = file->private_data;
+	struct inode *inode = file_inode(file);
 	struct fname *fname;
 	int	ret;
 
 	if (!info) {
-		info = ext4_htree_create_dir_info(filp, filp->f_pos);
+		info = ext4_htree_create_dir_info(file, ctx->pos);
 		if (!info)
 			return -ENOMEM;
-		filp->private_data = info;
+		file->private_data = info;
 	}
 
-	if (filp->f_pos == ext4_get_htree_eof(filp))
+	if (ctx->pos == ext4_get_htree_eof(file))
 		return 0;	/* EOF */
 
 	/* Some one has messed with f_pos; reset the world */
-	if (info->last_pos != filp->f_pos) {
+	if (info->last_pos != ctx->pos) {
 		free_rb_tree_fname(&info->root);
 		info->curr_node = NULL;
 		info->extra_fname = NULL;
-		info->curr_hash = pos2maj_hash(filp, filp->f_pos);
-		info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+		info->curr_hash = pos2maj_hash(file, ctx->pos);
+		info->curr_minor_hash = pos2min_hash(file, ctx->pos);
 	}
 
 	/*
@@ -557,7 +539,7 @@
 	 * chain, return them first.
 	 */
 	if (info->extra_fname) {
-		if (call_filldir(filp, dirent, filldir, info->extra_fname))
+		if (call_filldir(file, ctx, info->extra_fname))
 			goto finished;
 		info->extra_fname = NULL;
 		goto next_node;
@@ -571,17 +553,17 @@
 		 * cached entries.
 		 */
 		if ((!info->curr_node) ||
-		    (filp->f_version != inode->i_version)) {
+		    (file->f_version != inode->i_version)) {
 			info->curr_node = NULL;
 			free_rb_tree_fname(&info->root);
-			filp->f_version = inode->i_version;
-			ret = ext4_htree_fill_tree(filp, info->curr_hash,
+			file->f_version = inode->i_version;
+			ret = ext4_htree_fill_tree(file, info->curr_hash,
 						   info->curr_minor_hash,
 						   &info->next_hash);
 			if (ret < 0)
 				return ret;
 			if (ret == 0) {
-				filp->f_pos = ext4_get_htree_eof(filp);
+				ctx->pos = ext4_get_htree_eof(file);
 				break;
 			}
 			info->curr_node = rb_first(&info->root);
@@ -590,7 +572,7 @@
 		fname = rb_entry(info->curr_node, struct fname, rb_hash);
 		info->curr_hash = fname->hash;
 		info->curr_minor_hash = fname->minor_hash;
-		if (call_filldir(filp, dirent, filldir, fname))
+		if (call_filldir(file, ctx, fname))
 			break;
 	next_node:
 		info->curr_node = rb_next(info->curr_node);
@@ -601,7 +583,7 @@
 			info->curr_minor_hash = fname->minor_hash;
 		} else {
 			if (info->next_hash == ~0) {
-				filp->f_pos = ext4_get_htree_eof(filp);
+				ctx->pos = ext4_get_htree_eof(file);
 				break;
 			}
 			info->curr_hash = info->next_hash;
@@ -609,7 +591,14 @@
 		}
 	}
 finished:
-	info->last_pos = filp->f_pos;
+	info->last_pos = ctx->pos;
+	return 0;
+}
+
+static int ext4_dir_open(struct inode * inode, struct file * filp)
+{
+	if (ext4_encrypted_inode(inode))
+		return ext4_get_encryption_info(inode) ? -EACCES : 0;
 	return 0;
 }
 
@@ -621,14 +610,40 @@
 	return 0;
 }
 
+int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
+		      int buf_size)
+{
+	struct ext4_dir_entry_2 *de;
+	int nlen, rlen;
+	unsigned int offset = 0;
+	char *top;
+
+	de = (struct ext4_dir_entry_2 *)buf;
+	top = buf + buf_size;
+	while ((char *) de < top) {
+		if (ext4_check_dir_entry(dir, NULL, de, bh,
+					 buf, buf_size, offset))
+			return -EIO;
+		nlen = EXT4_DIR_REC_LEN(de->name_len);
+		rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+		de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
+		offset += rlen;
+	}
+	if ((char *) de > top)
+		return -EIO;
+
+	return 0;
+}
+
 const struct file_operations ext4_dir_operations = {
 	.llseek		= ext4_dir_llseek,
 	.read		= generic_read_dir,
-	.readdir	= ext4_readdir,
+	.iterate	= ext4_readdir,
 	.unlocked_ioctl = ext4_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= ext4_compat_ioctl,
 #endif
 	.fsync		= ext4_sync_file,
+	.open		= ext4_dir_open,
 	.release	= ext4_release_dir,
 };
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f9c938e..2166e54 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -30,7 +30,9 @@
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
+#include <linux/ratelimit.h>
 #include <crypto/hash.h>
+#include <linux/falloc.h>
 #ifdef __KERNEL__
 #include <linux/compat.h>
 #endif
@@ -157,7 +159,6 @@
 #define EXT4_MAP_MAPPED		(1 << BH_Mapped)
 #define EXT4_MAP_UNWRITTEN	(1 << BH_Unwritten)
 #define EXT4_MAP_BOUNDARY	(1 << BH_Boundary)
-#define EXT4_MAP_UNINIT		(1 << BH_Uninit)
 /* Sometimes (in the bigalloc case, from ext4_da_get_block_prep) the caller of
  * ext4_map_blocks wants to know whether or not the underlying cluster has
  * already been accounted for. EXT4_MAP_FROM_CLUSTER conveys to the caller that
@@ -168,7 +169,7 @@
 #define EXT4_MAP_FROM_CLUSTER	(1 << BH_AllocFromCluster)
 #define EXT4_MAP_FLAGS		(EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
 				 EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
-				 EXT4_MAP_UNINIT | EXT4_MAP_FROM_CLUSTER)
+				 EXT4_MAP_FROM_CLUSTER)
 
 struct ext4_map_blocks {
 	ext4_fsblk_t m_pblk;
@@ -178,38 +179,25 @@
 };
 
 /*
- * For delayed allocation tracking
- */
-struct mpage_da_data {
-	struct inode *inode;
-	sector_t b_blocknr;		/* start block number of extent */
-	size_t b_size;			/* size of extent */
-	unsigned long b_state;		/* state of the extent */
-	unsigned long first_page, next_page;	/* extent of pages */
-	struct writeback_control *wbc;
-	int io_done;
-	int pages_written;
-	int retval;
-};
-
-/*
  * Flags for ext4_io_end->flags
  */
 #define	EXT4_IO_END_UNWRITTEN	0x0001
-#define EXT4_IO_END_ERROR	0x0002
-#define EXT4_IO_END_DIRECT	0x0004
 
 /*
- * For converting uninitialized extents on a work queue.
+ * For converting unwritten extents on a work queue. 'handle' is used for
+ * buffered writeback.
  */
 typedef struct ext4_io_end {
 	struct list_head	list;		/* per-file finished IO list */
+	handle_t		*handle;	/* handle reserved for extent
+						 * conversion */
 	struct inode		*inode;		/* file being written to */
+	struct bio		*bio;		/* Linked list of completed
+						 * bios covering the extent */
 	unsigned int		flag;		/* unwritten or not */
 	loff_t			offset;		/* offset in the file */
 	ssize_t			size;		/* size of the extent */
-	struct kiocb		*iocb;		/* iocb struct for AIO */
-	int			result;		/* error value for AIO */
+	atomic_t		count;		/* reference counter */
 } ext4_io_end_t;
 
 struct ext4_io_submit {
@@ -387,7 +375,8 @@
 #define EXT4_DIRTY_FL			0x00000100
 #define EXT4_COMPRBLK_FL		0x00000200 /* One or more compressed clusters */
 #define EXT4_NOCOMPR_FL			0x00000400 /* Don't compress */
-#define EXT4_ECOMPR_FL			0x00000800 /* Compression error */
+	/* nb: was previously EXT2_ECOMPR_FL */
+#define EXT4_ENCRYPT_FL			0x00000800 /* encrypted file */
 /* End compression flags --- maybe not all used */
 #define EXT4_INDEX_FL			0x00001000 /* hash-indexed directory */
 #define EXT4_IMAGIC_FL			0x00002000 /* AFS directory */
@@ -444,7 +433,7 @@
 	EXT4_INODE_DIRTY	= 8,
 	EXT4_INODE_COMPRBLK	= 9,	/* One or more compressed clusters */
 	EXT4_INODE_NOCOMPR	= 10,	/* Don't compress */
-	EXT4_INODE_ECOMPR	= 11,	/* Compression error */
+	EXT4_INODE_ENCRYPT	= 11,	/* Encrypted file */
 /* End compression flags --- maybe not all used */
 	EXT4_INODE_INDEX	= 12,	/* hash-indexed directory */
 	EXT4_INODE_IMAGIC	= 13,	/* AFS directory */
@@ -489,7 +478,7 @@
 	CHECK_FLAG_VALUE(DIRTY);
 	CHECK_FLAG_VALUE(COMPRBLK);
 	CHECK_FLAG_VALUE(NOCOMPR);
-	CHECK_FLAG_VALUE(ECOMPR);
+	CHECK_FLAG_VALUE(ENCRYPT);
 	CHECK_FLAG_VALUE(INDEX);
 	CHECK_FLAG_VALUE(IMAGIC);
 	CHECK_FLAG_VALUE(JOURNAL_DATA);
@@ -550,26 +539,26 @@
 /*
  * Flags used by ext4_map_blocks()
  */
-	/* Allocate any needed blocks and/or convert an unitialized
+	/* Allocate any needed blocks and/or convert an unwritten
 	   extent to be an initialized ext4 */
 #define EXT4_GET_BLOCKS_CREATE			0x0001
-	/* Request the creation of an unitialized extent */
-#define EXT4_GET_BLOCKS_UNINIT_EXT		0x0002
-#define EXT4_GET_BLOCKS_CREATE_UNINIT_EXT	(EXT4_GET_BLOCKS_UNINIT_EXT|\
+	/* Request the creation of an unwritten extent */
+#define EXT4_GET_BLOCKS_UNWRIT_EXT		0x0002
+#define EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT	(EXT4_GET_BLOCKS_UNWRIT_EXT|\
 						 EXT4_GET_BLOCKS_CREATE)
 	/* Caller is from the delayed allocation writeout path
 	 * finally doing the actual allocation of delayed blocks */
 #define EXT4_GET_BLOCKS_DELALLOC_RESERVE	0x0004
 	/* caller is from the direct IO path, request to creation of an
-	unitialized extents if not allocated, split the uninitialized
+	unwritten extents if not allocated, split the unwritten
 	extent if blocks has been preallocated already*/
 #define EXT4_GET_BLOCKS_PRE_IO			0x0008
 #define EXT4_GET_BLOCKS_CONVERT			0x0010
 #define EXT4_GET_BLOCKS_IO_CREATE_EXT		(EXT4_GET_BLOCKS_PRE_IO|\
-					 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
+					 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
 	/* Convert extent to initialized after IO complete */
 #define EXT4_GET_BLOCKS_IO_CONVERT_EXT		(EXT4_GET_BLOCKS_CONVERT|\
-					 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
+					 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
 	/* Eventual metadata allocation (due to growing extent tree)
 	 * should not fail, so try to use reserved blocks for that.*/
 #define EXT4_GET_BLOCKS_METADATA_NOFAIL		0x0020
@@ -581,6 +570,20 @@
 #define EXT4_GET_BLOCKS_NO_LOCK			0x0100
 	/* Do not put hole in extent cache */
 #define EXT4_GET_BLOCKS_NO_PUT_HOLE		0x0200
+	/* Convert written extents to unwritten */
+#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN	0x0400
+
+/*
+ * The bit position of these flags must not overlap with any of the
+ * EXT4_GET_BLOCKS_*.  They are used by ext4_find_extent(),
+ * read_extent_tree_block(), ext4_split_extent_at(),
+ * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf().
+ * EXT4_EX_NOCACHE is used to indicate that the we shouldn't be
+ * caching the extents when reading from the extent tree while a
+ * truncate or punch hole operation is in progress.
+ */
+#define EXT4_EX_NOCACHE				0x40000000
+#define EXT4_EX_FORCE_CACHE			0x20000000
 
 /*
  * Flags used by ext4_free_blocks
@@ -593,10 +596,14 @@
 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER	0x0020
 #define EXT4_FREE_BLOCKS_RESERVE		0x0040
 
-/*
- * Flags used by ext4_discard_partial_page_buffers
- */
-#define EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED	0x0001
+/* Encryption algorithms */
+#define EXT4_ENCRYPTION_MODE_INVALID		0
+#define EXT4_ENCRYPTION_MODE_AES_256_XTS	1
+#define EXT4_ENCRYPTION_MODE_AES_256_GCM	2
+#define EXT4_ENCRYPTION_MODE_AES_256_CBC	3
+#define EXT4_ENCRYPTION_MODE_AES_256_CTS	4
+
+#include "ext4_crypto.h"
 
 /*
  * ioctl commands
@@ -618,6 +625,10 @@
 #define EXT4_IOC_MOVE_EXT		_IOWR('f', 15, struct move_extent)
 #define EXT4_IOC_RESIZE_FS		_IOW('f', 16, __u64)
 #define EXT4_IOC_SWAP_BOOT		_IO('f', 17)
+#define EXT4_IOC_PRECACHE_EXTENTS	_IO('f', 18)
+#define EXT4_IOC_SET_ENCRYPTION_POLICY	_IOR('f', 19, struct ext4_encryption_policy)
+#define EXT4_IOC_GET_ENCRYPTION_PWSALT	_IOW('f', 20, __u8[16])
+#define EXT4_IOC_GET_ENCRYPTION_POLICY	_IOW('f', 21, struct ext4_encryption_policy)
 
 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /*
@@ -938,6 +949,8 @@
 	struct inode vfs_inode;
 	struct jbd2_inode *jinode;
 
+	spinlock_t i_raw_lock;	/* protects updates to the raw inode */
+
 	/*
 	 * File creation time. Its function is same as that of
 	 * struct timespec i_{a,c,m}time in the generic inode.
@@ -952,7 +965,9 @@
 	struct ext4_es_tree i_es_tree;
 	rwlock_t i_es_lock;
 	struct list_head i_es_lru;
+	unsigned int i_es_all_nr;	/* protected by i_es_lock */
 	unsigned int i_es_lru_nr;	/* protected by i_es_lock */
+	unsigned long i_touch_when;	/* jiffies of last accessing */
 
 	/* ialloc */
 	ext4_group_t	i_last_alloc_group;
@@ -977,12 +992,20 @@
 	qsize_t i_reserved_quota;
 #endif
 
-	/* completed IOs that might need unwritten extents handling */
-	struct list_head i_completed_io_list;
+	/* Lock protecting lists below */
 	spinlock_t i_completed_io_lock;
+	/*
+	 * Completed IOs that need unwritten extents handling and have
+	 * transaction reserved
+	 */
+	struct list_head i_rsv_conversion_list;
+	/*
+	 * Completed IOs that need unwritten extents handling and don't have
+	 * transaction reserved
+	 */
 	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
 	atomic_t i_unwritten; /* Nr. of inflight conversions pending */
-	struct work_struct i_unwritten_work;	/* deferred extent conversion */
+	struct work_struct i_rsv_conversion_work;
 
 	spinlock_t i_block_reservation_lock;
 
@@ -995,6 +1018,11 @@
 
 	/* Precomputed uuid+inum+igen checksum for seeding inode checksums */
 	__u32 i_csum_seed;
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	/* Encryption params */
+	struct ext4_crypt_info *i_crypt_info;
+#endif
 };
 
 /*
@@ -1054,6 +1082,8 @@
 #define EXT4_MOUNT2_STD_GROUP_SIZE	0x00000002 /* We have standard group
 						      size of blocksize * 8
 						      blocks */
+#define EXT4_MOUNT2_HURD_COMPAT		0x00000004 /* Support HURD-castrated
+						      file systems */
 
 #define clear_opt(sb, opt)		EXT4_SB(sb)->s_mount_opt &= \
 						~EXT4_MOUNT_##opt
@@ -1185,7 +1215,8 @@
 	__le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
 	__u8	s_log_groups_per_flex;  /* FLEX_BG group size */
 	__u8	s_checksum_type;	/* metadata checksum algorithm used */
-	__le16  s_reserved_pad;
+	__u8	s_encryption_level;	/* versioning level for encryption */
+	__u8	s_reserved_pad;		/* Padding to next 32bits */
 	__le64	s_kbytes_written;	/* nr of lifetime kilobytes written */
 	__le32	s_snapshot_inum;	/* Inode number of active snapshot */
 	__le32	s_snapshot_id;		/* sequential ID of active snapshot */
@@ -1210,7 +1241,11 @@
 	__le32	s_usr_quota_inum;	/* inode for tracking user quota */
 	__le32	s_grp_quota_inum;	/* inode for tracking group quota */
 	__le32	s_overhead_clusters;	/* overhead blocks/clusters in fs */
-	__le32	s_reserved[108];	/* Padding to the end of the block */
+	__le32	s_backup_bgs[2];	/* groups with sparse_super2 SBs */
+	__u8	s_encrypt_algos[4];	/* Encryption algorithms in use  */
+	__u8	s_encrypt_pw_salt[16];	/* Salt used for string2key algorithm */
+	__le32	s_lpf_ino;		/* Location of the lost+found inode */
+	__le32	s_reserved[100];	/* Padding to the end of the block */
 	__le32	s_checksum;		/* crc32c(superblock) */
 };
 
@@ -1221,8 +1256,19 @@
 /*
  * run-time mount flags
  */
-#define EXT4_MF_MNTDIR_SAMPLED	0x0001
-#define EXT4_MF_FS_ABORTED	0x0002	/* Fatal error detected */
+#define EXT4_MF_MNTDIR_SAMPLED		0x0001
+#define EXT4_MF_FS_ABORTED		0x0002	/* Fatal error detected */
+#define EXT4_MF_TEST_DUMMY_ENCRYPTION	0x0004
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \
+						EXT4_MF_TEST_DUMMY_ENCRYPTION))
+#else
+#define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
+#endif
+
+/* Number of quota types we support */
+#define EXT4_MAXQUOTAS 2
 
 /*
  * fourth extended-fs super-block data in memory
@@ -1287,7 +1333,7 @@
 	u32 s_min_batch_time;
 	struct block_device *journal_bdev;
 #ifdef CONFIG_QUOTA
-	char *s_qf_names[MAXQUOTAS];		/* Names of quota files with journalled quota */
+	char *s_qf_names[EXT4_MAXQUOTAS];	/* Names of quota files with journalled quota */
 	int s_jquota_fmt;			/* Format of quota to use */
 #endif
 	unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
@@ -1319,7 +1365,6 @@
 	unsigned int s_mb_stats;
 	unsigned int s_mb_order2_reqs;
 	unsigned int s_mb_group_prealloc;
-	unsigned int s_max_writeback_mb_bump;
 	unsigned int s_max_dir_size_kb;
 	/* where last allocation was done - for stream allocation */
 	unsigned long s_mb_last_group;
@@ -1355,8 +1400,8 @@
 	struct flex_groups *s_flex_groups;
 	ext4_group_t s_flex_groups_allocated;
 
-	/* workqueue for dio unwritten */
-	struct workqueue_struct *dio_unwritten_wq;
+	/* workqueue for reserved extent conversions (buffered io) */
+	struct workqueue_struct *rsv_conversion_wq;
 
 	/* timer for periodic error stats printing */
 	struct timer_list s_err_report;
@@ -1381,8 +1426,14 @@
 	/* Reclaim extents from extent status tree */
 	struct shrinker s_es_shrinker;
 	struct list_head s_es_lru;
-	struct percpu_counter s_extent_cache_cnt;
+	struct ext4_es_stats s_es_stats;
+	struct mb_cache *s_mb_cache;
 	spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
+
+	/* Ratelimit ext4 messages. */
+	struct ratelimit_state s_err_ratelimit_state;
+	struct ratelimit_state s_warning_ratelimit_state;
+	struct ratelimit_state s_msg_ratelimit_state;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1443,11 +1494,11 @@
 	EXT4_STATE_EXT_MIGRATE,		/* Inode is migrating */
 	EXT4_STATE_DIO_UNWRITTEN,	/* need convert on dio done*/
 	EXT4_STATE_NEWENTRY,		/* File just added to dir */
-	EXT4_STATE_DELALLOC_RESERVED,	/* blks already reserved for delalloc */
 	EXT4_STATE_DIOREAD_LOCK,	/* Disable support for dio read
 					   nolocking */
 	EXT4_STATE_MAY_INLINE_DATA,	/* may have in-inode data */
 	EXT4_STATE_ORDERED_MODE,	/* data=ordered mode */
+	EXT4_STATE_EXT_PRECACHED,	/* extents have been precached */
 };
 
 #define EXT4_INODE_BIT_FNS(name, field, offset)				\
@@ -1464,7 +1515,18 @@
 	clear_bit(bit + (offset), &EXT4_I(inode)->i_##field);		\
 }
 
+/* Add these declarations here only so that these functions can be
+ * found by name.  Otherwise, they are very hard to locate. */
+static inline int ext4_test_inode_flag(struct inode *inode, int bit);
+static inline void ext4_set_inode_flag(struct inode *inode, int bit);
+static inline void ext4_clear_inode_flag(struct inode *inode, int bit);
 EXT4_INODE_BIT_FNS(flag, flags, 0)
+
+/* Add these declarations here only so that these functions can be
+ * found by name.  Otherwise, they are very hard to locate. */
+static inline int ext4_test_inode_state(struct inode *inode, int bit);
+static inline void ext4_set_inode_state(struct inode *inode, int bit);
+static inline void ext4_clear_inode_state(struct inode *inode, int bit);
 #if (BITS_PER_LONG < 64)
 EXT4_INODE_BIT_FNS(state, state_flags, 0)
 
@@ -1487,6 +1549,18 @@
 #define EXT4_SB(sb)	(sb)
 #endif
 
+/*
+ * Returns true if the inode is inode is encrypted
+ */
+static inline int ext4_encrypted_inode(struct inode *inode)
+{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
+#else
+	return 0;
+#endif
+}
+
 #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
 
 /*
@@ -1538,6 +1612,7 @@
 #define EXT4_FEATURE_COMPAT_EXT_ATTR		0x0008
 #define EXT4_FEATURE_COMPAT_RESIZE_INODE	0x0010
 #define EXT4_FEATURE_COMPAT_DIR_INDEX		0x0020
+#define EXT4_FEATURE_COMPAT_SPARSE_SUPER2	0x0200
 
 #define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER	0x0001
 #define EXT4_FEATURE_RO_COMPAT_LARGE_FILE	0x0002
@@ -1570,6 +1645,7 @@
 #define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM	0x2000 /* use crc32c for bg */
 #define EXT4_FEATURE_INCOMPAT_LARGEDIR		0x4000 /* >2GB or 3-lvl htree */
 #define EXT4_FEATURE_INCOMPAT_INLINE_DATA	0x8000 /* data in inode */
+#define EXT4_FEATURE_INCOMPAT_ENCRYPT		0x10000
 
 #define EXT2_FEATURE_COMPAT_SUPP	EXT4_FEATURE_COMPAT_EXT_ATTR
 #define EXT2_FEATURE_INCOMPAT_SUPP	(EXT4_FEATURE_INCOMPAT_FILETYPE| \
@@ -1593,8 +1669,9 @@
 					 EXT4_FEATURE_INCOMPAT_EXTENTS| \
 					 EXT4_FEATURE_INCOMPAT_64BIT| \
 					 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-					 EXT4_FEATURE_INCOMPAT_MMP |	\
-					 EXT4_FEATURE_INCOMPAT_INLINE_DATA)
+					 EXT4_FEATURE_INCOMPAT_MMP | \
+					 EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
+					 EXT4_FEATURE_INCOMPAT_ENCRYPT)
 #define EXT4_FEATURE_RO_COMPAT_SUPP	(EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
 					 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
 					 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
@@ -1814,6 +1891,17 @@
  */
 #define HASH_NB_ALWAYS		1
 
+struct ext4_filename {
+	const struct qstr *usr_fname;
+	struct ext4_str disk_name;
+	struct dx_hash_info hinfo;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	struct ext4_str crypto_buf;
+#endif
+};
+
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p)  ((p)->disk_name.len)
 
 /*
  * Describe an inode's exact location on disk and in memory
@@ -1856,7 +1944,7 @@
 /*
  * Special error return code only used by dx_probe() and its callers.
  */
-#define ERR_BAD_DX_DIR	-75000
+#define ERR_BAD_DX_DIR	(-(MAX_ERRNO - 1))
 
 /*
  * Timeout and state flag for lazy initialization inode thread.
@@ -1986,10 +2074,6 @@
 extern ext4_group_t ext4_get_group_number(struct super_block *sb,
 					  ext4_fsblk_t block);
 
-extern void ext4_validate_block_bitmap(struct super_block *sb,
-				       struct ext4_group_desc *desc,
-				       unsigned int block_group,
-				       struct buffer_head *bh);
 extern unsigned int ext4_block_group(struct super_block *sb,
 			ext4_fsblk_t blocknr);
 extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb,
@@ -2018,18 +2102,135 @@
 				  struct buffer_head *bh);
 extern struct buffer_head *ext4_read_block_bitmap(struct super_block *sb,
 						  ext4_group_t block_group);
-extern void ext4_init_block_bitmap(struct super_block *sb,
-				   struct buffer_head *bh,
-				   ext4_group_t group,
-				   struct ext4_group_desc *desc);
 extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
 					      ext4_group_t block_group,
 					      struct ext4_group_desc *gdp);
-extern unsigned ext4_num_overhead_clusters(struct super_block *sb,
-					   ext4_group_t block_group,
-					   struct ext4_group_desc *gdp);
 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
 
+/* crypto_policy.c */
+int ext4_is_child_context_consistent_with_parent(struct inode *parent,
+						 struct inode *child);
+int ext4_inherit_context(struct inode *parent, struct inode *child);
+void ext4_to_hex(char *dst, char *src, size_t src_size);
+int ext4_process_policy(const struct ext4_encryption_policy *policy,
+			struct inode *inode);
+int ext4_get_policy(struct inode *inode,
+		    struct ext4_encryption_policy *policy);
+
+/* crypto.c */
+extern struct kmem_cache *ext4_crypt_info_cachep;
+bool ext4_valid_contents_enc_mode(uint32_t mode);
+uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
+extern struct workqueue_struct *ext4_read_workqueue;
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
+void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
+void ext4_restore_control_page(struct page *data_page);
+struct page *ext4_encrypt(struct inode *inode,
+			  struct page *plaintext_page);
+int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page);
+int ext4_decrypt_one(struct inode *inode, struct page *page);
+int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+int ext4_init_crypto(void);
+void ext4_exit_crypto(void);
+static inline int ext4_sb_has_crypto(struct super_block *sb)
+{
+	return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
+}
+#else
+static inline int ext4_init_crypto(void) { return 0; }
+static inline void ext4_exit_crypto(void) { }
+static inline int ext4_sb_has_crypto(struct super_block *sb)
+{
+	return 0;
+}
+#endif
+
+/* crypto_fname.c */
+bool ext4_valid_filenames_enc_mode(uint32_t mode);
+u32 ext4_fname_crypto_round_up(u32 size, u32 blksize);
+unsigned ext4_fname_encrypted_size(struct inode *inode, u32 ilen);
+int ext4_fname_crypto_alloc_buffer(struct inode *inode,
+				   u32 ilen, struct ext4_str *crypto_str);
+int _ext4_fname_disk_to_usr(struct inode *inode,
+			    struct dx_hash_info *hinfo,
+			    const struct ext4_str *iname,
+			    struct ext4_str *oname);
+int ext4_fname_disk_to_usr(struct inode *inode,
+			   struct dx_hash_info *hinfo,
+			   const struct ext4_dir_entry_2 *de,
+			   struct ext4_str *oname);
+int ext4_fname_usr_to_disk(struct inode *inode,
+			   const struct qstr *iname,
+			   struct ext4_str *oname);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str);
+int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
+			      int lookup, struct ext4_filename *fname);
+void ext4_fname_free_filename(struct ext4_filename *fname);
+#else
+static inline
+int ext4_setup_fname_crypto(struct inode *inode)
+{
+	return 0;
+}
+static inline void ext4_fname_crypto_free_buffer(struct ext4_str *p) { }
+static inline int ext4_fname_setup_filename(struct inode *dir,
+				     const struct qstr *iname,
+				     int lookup, struct ext4_filename *fname)
+{
+	fname->usr_fname = iname;
+	fname->disk_name.name = (unsigned char *) iname->name;
+	fname->disk_name.len = iname->len;
+	return 0;
+}
+static inline void ext4_fname_free_filename(struct ext4_filename *fname) { }
+#endif
+
+
+/* crypto_key.c */
+void ext4_free_crypt_info(struct ext4_crypt_info *ci);
+void ext4_free_encryption_info(struct inode *inode, struct ext4_crypt_info *ci);
+int _ext4_get_encryption_info(struct inode *inode);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+int ext4_has_encryption_key(struct inode *inode);
+
+static inline int ext4_get_encryption_info(struct inode *inode)
+{
+	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
+
+	if (!ci ||
+	    (ci->ci_keyring_key &&
+	     (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+					   (1 << KEY_FLAG_REVOKED) |
+					   (1 << KEY_FLAG_DEAD)))))
+		return _ext4_get_encryption_info(inode);
+	return 0;
+}
+
+static inline struct ext4_crypt_info *ext4_encryption_info(struct inode *inode)
+{
+	return EXT4_I(inode)->i_crypt_info;
+}
+
+#else
+static inline int ext4_has_encryption_key(struct inode *inode)
+{
+	return 0;
+}
+static inline int ext4_get_encryption_info(struct inode *inode)
+{
+	return 0;
+}
+static inline struct ext4_crypt_info *ext4_encryption_info(struct inode *inode)
+{
+	return NULL;
+}
+#endif
+
+
 /* dir.c */
 extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
 				  struct file *,
@@ -2040,18 +2241,20 @@
 	unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
 					(de), (bh), (buf), (size), (offset)))
 extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
-				    __u32 minor_hash,
-				    struct ext4_dir_entry_2 *dirent);
+				__u32 minor_hash,
+				struct ext4_dir_entry_2 *dirent,
+				struct ext4_str *ent_name);
 extern void ext4_htree_free_dir_info(struct dir_private_info *p);
 extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
 			     struct buffer_head *bh,
 			     void *buf, int buf_size,
-			     const char *name, int namelen,
+			     struct ext4_filename *fname,
 			     struct ext4_dir_entry_2 **dest_de);
-void ext4_insert_dentry(struct inode *inode,
-			struct ext4_dir_entry_2 *de,
-			int buf_size,
-			const char *name, int namelen);
+int ext4_insert_dentry(struct inode *dir,
+		       struct inode *inode,
+		       struct ext4_dir_entry_2 *de,
+		       int buf_size,
+		       struct ext4_filename *fname);
 static inline void ext4_update_dx_flag(struct inode *inode)
 {
 	if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
@@ -2070,10 +2273,11 @@
 
 	return ext4_filetype_table[filetype];
 }
+extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh,
+			     void *buf, int buf_size);
 
 /* fsync.c */
 extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
-extern int ext4_flush_unwritten_io(struct inode *);
 
 /* hash.c */
 extern int ext4fs_dirhash(const char *name, int len, struct
@@ -2124,13 +2328,13 @@
 		ext4_group_t i, struct ext4_group_desc *desc);
 extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
 				ext4_fsblk_t block, unsigned long count);
-extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
+extern int ext4_trim_fs(struct super_block *, struct fstrim_range *,
+				unsigned long blkdev_flags);
 
 /* inode.c */
-struct buffer_head *ext4_getblk(handle_t *, struct inode *,
-						ext4_lblk_t, int, int *);
-struct buffer_head *ext4_bread(handle_t *, struct inode *,
-						ext4_lblk_t, int, int *);
+int ext4_inode_is_fast_symlink(struct inode *inode);
+struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
+struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
 int ext4_get_block_write(struct inode *inode, sector_t iblock,
 			 struct buffer_head *bh_result, int create);
 int ext4_get_block(struct inode *inode, sector_t iblock,
@@ -2161,9 +2365,10 @@
 extern void ext4_dirty_inode(struct inode *, int);
 extern int ext4_change_inode_journal_flag(struct inode *, int);
 extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
+extern int ext4_inode_attach_jinode(struct inode *inode);
 extern int ext4_can_truncate(struct inode *inode);
 extern void ext4_truncate(struct inode *);
-extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
+extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
 extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
 extern void ext4_set_inode_flags(struct inode *);
 extern void ext4_get_inode_flags(struct ext4_inode_info *);
@@ -2171,9 +2376,8 @@
 extern void ext4_set_aops(struct inode *inode);
 extern int ext4_writepage_trans_blocks(struct inode *);
 extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
-extern int ext4_discard_partial_page_buffers(handle_t *handle,
-		struct address_space *mapping, loff_t from,
-		loff_t length, int flags);
+extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
+			     loff_t lstart, loff_t lend);
 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
 extern void ext4_da_update_reserve_space(struct inode *inode,
@@ -2183,13 +2387,13 @@
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
 				struct ext4_map_blocks *map, int flags);
 extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-				const struct iovec *iov, loff_t offset,
-				unsigned long nr_segs);
+				  const struct iovec *iov, loff_t offset,
+				  unsigned long nr_segs);
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
-extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk);
+extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
-extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
-				 ext4_lblk_t first, ext4_lblk_t stop);
+extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
+				 ext4_lblk_t start, ext4_lblk_t end);
 
 /* ioctl.c */
 extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
@@ -2206,13 +2410,14 @@
 extern int ext4_orphan_del(handle_t *, struct inode *);
 extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
 				__u32 start_minor_hash, __u32 *next_hash);
-extern int search_dir(struct buffer_head *bh,
-		      char *search_buf,
-		      int buf_size,
-		      struct inode *dir,
-		      const struct qstr *d_name,
-		      unsigned int offset,
-		      struct ext4_dir_entry_2 **res_dir);
+extern int ext4_search_dir(struct buffer_head *bh,
+			   char *search_buf,
+			   int buf_size,
+			   struct inode *dir,
+			   struct ext4_filename *fname,
+			   const struct qstr *d_name,
+			   unsigned int offset,
+			   struct ext4_dir_entry_2 **res_dir);
 extern int ext4_generic_delete_entry(handle_t *handle,
 				     struct inode *dir,
 				     struct ext4_dir_entry_2 *de_del,
@@ -2220,6 +2425,7 @@
 				     void *entry_buf,
 				     int buf_size,
 				     int csum_size);
+extern int ext4_empty_dir(struct inode *inode);
 
 /* resize.c */
 extern int ext4_group_add(struct super_block *sb,
@@ -2231,8 +2437,6 @@
 
 /* super.c */
 extern int ext4_calculate_overhead(struct super_block *sb);
-extern int ext4_superblock_csum_verify(struct super_block *sb,
-				       struct ext4_super_block *es);
 extern void ext4_superblock_csum_set(struct super_block *sb);
 extern void *ext4_kvmalloc(size_t size, gfp_t flags);
 extern void *ext4_kvzalloc(size_t size, gfp_t flags);
@@ -2241,42 +2445,96 @@
 				    ext4_group_t ngroup);
 extern const char *ext4_decode_error(struct super_block *sb, int errno,
 				     char nbuf[16]);
+
 extern __printf(4, 5)
 void __ext4_error(struct super_block *, const char *, unsigned int,
 		  const char *, ...);
-#define ext4_error(sb, message...)	__ext4_error(sb, __func__,	\
-						     __LINE__, ## message)
 extern __printf(5, 6)
-void ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t,
+void __ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t,
 		      const char *, ...);
 extern __printf(5, 6)
-void ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t,
+void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t,
 		     const char *, ...);
 extern void __ext4_std_error(struct super_block *, const char *,
 			     unsigned int, int);
 extern __printf(4, 5)
 void __ext4_abort(struct super_block *, const char *, unsigned int,
 		  const char *, ...);
-#define ext4_abort(sb, message...)	__ext4_abort(sb, __func__, \
-						       __LINE__, ## message)
 extern __printf(4, 5)
 void __ext4_warning(struct super_block *, const char *, unsigned int,
 		    const char *, ...);
-#define ext4_warning(sb, message...)	__ext4_warning(sb, __func__, \
-						       __LINE__, ## message)
 extern __printf(3, 4)
-void ext4_msg(struct super_block *, const char *, const char *, ...);
+void __ext4_msg(struct super_block *, const char *, const char *, ...);
 extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
 			   const char *, unsigned int, const char *);
-#define dump_mmp_msg(sb, mmp, msg)	__dump_mmp_msg(sb, mmp, __func__, \
-						       __LINE__, msg)
 extern __printf(7, 8)
 void __ext4_grp_locked_error(const char *, unsigned int,
 			     struct super_block *, ext4_group_t,
 			     unsigned long, ext4_fsblk_t,
 			     const char *, ...);
-#define ext4_grp_locked_error(sb, grp, message...) \
-	__ext4_grp_locked_error(__func__, __LINE__, (sb), (grp), ## message)
+
+#ifdef CONFIG_PRINTK
+
+#define ext4_error_inode(inode, func, line, block, fmt, ...)		\
+	__ext4_error_inode(inode, func, line, block, fmt, ##__VA_ARGS__)
+#define ext4_error_file(file, func, line, block, fmt, ...)		\
+	__ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__)
+#define ext4_error(sb, fmt, ...)					\
+	__ext4_error(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
+#define ext4_abort(sb, fmt, ...)					\
+	__ext4_abort(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
+#define ext4_warning(sb, fmt, ...)					\
+	__ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
+#define ext4_msg(sb, level, fmt, ...)				\
+	__ext4_msg(sb, level, fmt, ##__VA_ARGS__)
+#define dump_mmp_msg(sb, mmp, msg)					\
+	__dump_mmp_msg(sb, mmp, __func__, __LINE__, msg)
+#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...)		\
+	__ext4_grp_locked_error(__func__, __LINE__, sb, grp, ino, block, \
+				fmt, ##__VA_ARGS__)
+
+#else
+
+#define ext4_error_inode(inode, func, line, block, fmt, ...)		\
+do {									\
+	no_printk(fmt, ##__VA_ARGS__);					\
+	__ext4_error_inode(inode, "", 0, block, " ");			\
+} while (0)
+#define ext4_error_file(file, func, line, block, fmt, ...)		\
+do {									\
+	no_printk(fmt, ##__VA_ARGS__);					\
+	__ext4_error_file(file, "", 0, block, " ");			\
+} while (0)
+#define ext4_error(sb, fmt, ...)					\
+do {									\
+	no_printk(fmt, ##__VA_ARGS__);					\
+	__ext4_error(sb, "", 0, " ");					\
+} while (0)
+#define ext4_abort(sb, fmt, ...)					\
+do {									\
+	no_printk(fmt, ##__VA_ARGS__);					\
+	__ext4_abort(sb, "", 0, " ");					\
+} while (0)
+#define ext4_warning(sb, fmt, ...)					\
+do {									\
+	no_printk(fmt, ##__VA_ARGS__);					\
+	__ext4_warning(sb, "", 0, " ");					\
+} while (0)
+#define ext4_msg(sb, level, fmt, ...)					\
+do {									\
+	no_printk(fmt, ##__VA_ARGS__);					\
+	__ext4_msg(sb, "", " ");					\
+} while (0)
+#define dump_mmp_msg(sb, mmp, msg)					\
+	__dump_mmp_msg(sb, mmp, "", 0, "")
+#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...)		\
+do {									\
+	no_printk(fmt, ##__VA_ARGS__);				\
+	__ext4_grp_locked_error("", 0, sb, grp, ino, block, " ");	\
+} while (0)
+
+#endif
+
 extern void ext4_update_dynamic_rev(struct super_block *sb);
 extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
 					__u32 compat);
@@ -2327,6 +2585,14 @@
 	       (EXT4_SB(sb)->s_chksum_driver != NULL);
 }
 
+static inline int ext4_has_metadata_csum(struct super_block *sb)
+{
+	WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb,
+			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
+		     !EXT4_SB(sb)->s_chksum_driver);
+
+	return (EXT4_SB(sb)->s_chksum_driver != NULL);
+}
 static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
 {
 	return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
@@ -2387,6 +2653,7 @@
 {
 	 struct ext4_group_info ***grp_info;
 	 long indexv, indexh;
+	 BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
 	 grp_info = EXT4_SB(sb)->s_group_info;
 	 indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
 	 indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
@@ -2433,16 +2700,31 @@
 #define EXT4_FREECLUSTERS_WATERMARK 0
 #endif
 
+/* Update i_disksize. Requires i_mutex to avoid races with truncate */
 static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
 {
-	/*
-	 * XXX: replace with spinlock if seen contended -bzzz
-	 */
+	WARN_ON_ONCE(S_ISREG(inode->i_mode) &&
+		     !mutex_is_locked(&inode->i_mutex));
 	down_write(&EXT4_I(inode)->i_data_sem);
 	if (newsize > EXT4_I(inode)->i_disksize)
 		EXT4_I(inode)->i_disksize = newsize;
 	up_write(&EXT4_I(inode)->i_data_sem);
-	return ;
+}
+
+/* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */
+static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
+{
+	int changed = 0;
+
+	if (newsize > inode->i_size) {
+		i_size_write(inode, newsize);
+		changed = 1;
+	}
+	if (newsize > EXT4_I(inode)->i_disksize) {
+		ext4_update_i_disksize(inode, newsize);
+		changed |= 2;
+	}
+	return changed;
 }
 
 struct ext4_group_info {
@@ -2465,9 +2747,15 @@
 
 #define EXT4_GROUP_INFO_NEED_INIT_BIT		0
 #define EXT4_GROUP_INFO_WAS_TRIMMED_BIT		1
+#define EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT	2
+#define EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT	3
 
 #define EXT4_MB_GRP_NEED_INIT(grp)	\
 	(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_BBITMAP_CORRUPT(grp)	\
+	(test_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_IBITMAP_CORRUPT(grp)	\
+	(test_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &((grp)->bb_state)))
 
 #define EXT4_MB_GRP_WAS_TRIMMED(grp)	\
 	(test_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
@@ -2544,19 +2832,10 @@
 extern const struct inode_operations ext4_file_inode_operations;
 extern const struct file_operations ext4_file_operations;
 extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
-extern void ext4_unwritten_wait(struct inode *inode);
 
 /* inline.c */
-extern int ext4_has_inline_data(struct inode *inode);
-extern int ext4_get_inline_size(struct inode *inode);
 extern int ext4_get_max_inline_size(struct inode *inode);
 extern int ext4_find_inline_data_nolock(struct inode *inode);
-extern void ext4_write_inline_data(struct inode *inode,
-				   struct ext4_iloc *iloc,
-				   void *buffer, loff_t pos,
-				   unsigned int len);
-extern int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
-				    unsigned int len);
 extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
 				 unsigned int len);
 extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
@@ -2584,13 +2863,15 @@
 extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
 					 unsigned len, unsigned copied,
 					 struct page *page);
-extern int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
+extern int ext4_try_add_inline_entry(handle_t *handle,
+				     struct ext4_filename *fname,
+				     struct dentry *dentry,
 				     struct inode *inode);
 extern int ext4_try_create_inline_dir(handle_t *handle,
 				      struct inode *parent,
 				      struct inode *inode);
 extern int ext4_read_inline_dir(struct file *filp,
-				void *dirent, filldir_t filldir,
+				struct dir_context *ctx,
 				int *has_inline_data);
 extern int htree_inlinedir_to_tree(struct file *dir_file,
 				   struct inode *dir, ext4_lblk_t block,
@@ -2598,6 +2879,7 @@
 				   __u32 start_hash, __u32 start_minor_hash,
 				   int *has_inline_data);
 extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+					struct ext4_filename *fname,
 					const struct qstr *d_name,
 					struct ext4_dir_entry_2 **res_dir,
 					int *has_inline_data);
@@ -2620,6 +2902,12 @@
 
 extern int ext4_convert_inline_data(struct inode *inode);
 
+static inline int ext4_has_inline_data(struct inode *inode)
+{
+	return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
+	       EXT4_I(inode)->i_inline_off;
+}
+
 /* namei.c */
 extern const struct inode_operations ext4_dir_inode_operations;
 extern const struct inode_operations ext4_special_inode_operations;
@@ -2651,6 +2939,10 @@
 		de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
 }
 
+/* readpages.c */
+extern int ext4_mpage_readpages(struct address_space *mapping,
+				struct list_head *pages, struct page *page,
+				unsigned nr_pages);
 
 /* symlink.c */
 extern const struct inode_operations ext4_symlink_inode_operations;
@@ -2671,10 +2963,15 @@
 struct ext4_ext_path;
 struct ext4_extent;
 
+/*
+ * Maximum number of logical blocks in a file; ext4_extent's ee_block is
+ * __le32.
+ */
+#define EXT_MAX_BLOCKS	0xffffffff
+
 extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
 extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
-extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
-				       int chunk);
+extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
 extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
 			       struct ext4_map_blocks *map, int flags);
 extern void ext4_ext_truncate(handle_t *, struct inode *);
@@ -2684,8 +2981,8 @@
 extern void ext4_ext_release(struct super_block *);
 extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
 			  loff_t len);
-extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
-			  ssize_t len);
+extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
+					  loff_t offset, ssize_t len);
 extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
 			   struct ext4_map_blocks *map, int flags);
 extern int ext4_ext_calc_metadata_amount(struct inode *inode,
@@ -2698,65 +2995,66 @@
 				      struct ext4_extent *ex1,
 				      struct ext4_extent *ex2);
 extern int ext4_ext_insert_extent(handle_t *, struct inode *,
-				  struct ext4_ext_path *,
+				  struct ext4_ext_path **,
 				  struct ext4_extent *, int);
-extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
-						  struct ext4_ext_path *);
+extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
+					      struct ext4_ext_path **,
+					      int flags);
 extern void ext4_ext_drop_refs(struct ext4_ext_path *);
 extern int ext4_ext_check_inode(struct inode *inode);
 extern int ext4_find_delalloc_range(struct inode *inode,
 				    ext4_lblk_t lblk_start,
 				    ext4_lblk_t lblk_end);
 extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
+extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path);
 extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 			__u64 start, __u64 len);
-
+extern int ext4_ext_precache(struct inode *inode);
+extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
+extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
+				struct inode *inode2, ext4_lblk_t lblk1,
+			     ext4_lblk_t lblk2,  ext4_lblk_t count,
+			     int mark_unwritten,int *err);
 
 /* move_extent.c */
 extern void ext4_double_down_write_data_sem(struct inode *first,
 					    struct inode *second);
 extern void ext4_double_up_write_data_sem(struct inode *orig_inode,
 					  struct inode *donor_inode);
-void ext4_inode_double_lock(struct inode *inode1, struct inode *inode2);
-void ext4_inode_double_unlock(struct inode *inode1, struct inode *inode2);
 extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
 			     __u64 start_orig, __u64 start_donor,
 			     __u64 len, __u64 *moved_len);
 
 /* page-io.c */
 extern int __init ext4_init_pageio(void);
-extern void ext4_add_complete_io(ext4_io_end_t *io_end);
 extern void ext4_exit_pageio(void);
-extern void ext4_ioend_shutdown(struct inode *);
-extern void ext4_free_io_end(ext4_io_end_t *io);
 extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
-extern void ext4_end_io_work(struct work_struct *work);
+extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end);
+extern int ext4_put_io_end(ext4_io_end_t *io_end);
+extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
+extern void ext4_io_submit_init(struct ext4_io_submit *io,
+				struct writeback_control *wbc);
+extern void ext4_end_io_rsv_work(struct work_struct *work);
 extern void ext4_io_submit(struct ext4_io_submit *io);
 extern int ext4_bio_write_page(struct ext4_io_submit *io,
 			       struct page *page,
 			       int len,
-			       struct writeback_control *wbc);
+			       struct writeback_control *wbc,
+			       bool keep_towrite);
 
 /* mmp.c */
 extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
-extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp);
-extern int ext4_mmp_csum_verify(struct super_block *sb,
-				struct mmp_struct *mmp);
 
-/* BH_Uninit flag: blocks are allocated but uninitialized on disk */
+/*
+ * Note that these flags will never ever appear in a buffer_head's state flag.
+ * See EXT4_MAP_... to see where this is used.
+ */
 enum ext4_state_bits {
-	BH_Uninit	/* blocks are allocated but uninitialized on disk */
-	  = BH_JBDPrivateStart,
-	BH_AllocFromCluster,	/* allocated blocks were part of already
-				 * allocated cluster. Note that this flag will
-				 * never, ever appear in a buffer_head's state
-				 * flag. See EXT4_MAP_FROM_CLUSTER to see where
-				 * this is used. */
+	BH_AllocFromCluster	/* allocated blocks were part of already
+				 * allocated cluster. */
+	= BH_JBDPrivateStart
 };
 
-BUFFER_FNS(Uninit, uninit)
-TAS_BUFFER_FNS(Uninit, uninit)
-
 /*
  * Add new method to test whether block and inode bitmaps are properly
  * initialized. With uninit_bg reading the block from disk is not enough
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
new file mode 100644
index 0000000..ac7d4e8
--- /dev/null
+++ b/fs/ext4/ext4_crypto.h
@@ -0,0 +1,159 @@
+/*
+ * linux/fs/ext4/ext4_crypto.h
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption header content for ext4
+ *
+ * Written by Michael Halcrow, 2015.
+ */
+
+#ifndef _EXT4_CRYPTO_H
+#define _EXT4_CRYPTO_H
+
+#include <linux/fs.h>
+
+#define EXT4_KEY_DESCRIPTOR_SIZE 8
+
+/* Policy provided via an ioctl on the topmost directory */
+struct ext4_encryption_policy {
+	char version;
+	char contents_encryption_mode;
+	char filenames_encryption_mode;
+	char flags;
+	char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
+} __attribute__((__packed__));
+
+#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
+#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
+
+#define EXT4_POLICY_FLAGS_PAD_4		0x00
+#define EXT4_POLICY_FLAGS_PAD_8		0x01
+#define EXT4_POLICY_FLAGS_PAD_16	0x02
+#define EXT4_POLICY_FLAGS_PAD_32	0x03
+#define EXT4_POLICY_FLAGS_PAD_MASK	0x03
+#define EXT4_POLICY_FLAGS_VALID		0x03
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ *  1 byte: Protector format (1 = this version)
+ *  1 byte: File contents encryption mode
+ *  1 byte: File names encryption mode
+ *  1 byte: Reserved
+ *  8 bytes: Master Key descriptor
+ *  16 bytes: Encryption Key derivation nonce
+ */
+struct ext4_encryption_context {
+	char format;
+	char contents_encryption_mode;
+	char filenames_encryption_mode;
+	char flags;
+	char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
+	char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
+} __attribute__((__packed__));
+
+/* Encryption parameters */
+#define EXT4_XTS_TWEAK_SIZE 16
+#define EXT4_AES_128_ECB_KEY_SIZE 16
+#define EXT4_AES_256_GCM_KEY_SIZE 32
+#define EXT4_AES_256_CBC_KEY_SIZE 32
+#define EXT4_AES_256_CTS_KEY_SIZE 32
+#define EXT4_AES_256_XTS_KEY_SIZE 64
+#define EXT4_MAX_KEY_SIZE 64
+
+#define EXT4_KEY_DESC_PREFIX "ext4:"
+#define EXT4_KEY_DESC_PREFIX_SIZE 5
+
+/* This is passed in from userspace into the kernel keyring */
+struct ext4_encryption_key {
+        __u32 mode;
+        char raw[EXT4_MAX_KEY_SIZE];
+        __u32 size;
+} __attribute__((__packed__));
+
+struct ext4_crypt_info {
+	char		ci_data_mode;
+	char		ci_filename_mode;
+	char		ci_flags;
+	struct crypto_ablkcipher *ci_ctfm;
+	struct key	*ci_keyring_key;
+	char		ci_master_key[EXT4_KEY_DESCRIPTOR_SIZE];
+};
+
+#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL             0x00000001
+#define EXT4_WRITE_PATH_FL			      0x00000002
+
+struct ext4_crypto_ctx {
+	union {
+		struct {
+			struct page *bounce_page;       /* Ciphertext page */
+			struct page *control_page;      /* Original page  */
+		} w;
+		struct {
+			struct bio *bio;
+			struct work_struct work;
+		} r;
+		struct list_head free_list;     /* Free list */
+	};
+	char flags;                      /* Flags */
+	char mode;                       /* Encryption mode for tfm */
+};
+
+struct ext4_completion_result {
+	struct completion completion;
+	int res;
+};
+
+#define DECLARE_EXT4_COMPLETION_RESULT(ecr) \
+	struct ext4_completion_result ecr = { \
+		COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+static inline int ext4_encryption_key_size(int mode)
+{
+	switch (mode) {
+	case EXT4_ENCRYPTION_MODE_AES_256_XTS:
+		return EXT4_AES_256_XTS_KEY_SIZE;
+	case EXT4_ENCRYPTION_MODE_AES_256_GCM:
+		return EXT4_AES_256_GCM_KEY_SIZE;
+	case EXT4_ENCRYPTION_MODE_AES_256_CBC:
+		return EXT4_AES_256_CBC_KEY_SIZE;
+	case EXT4_ENCRYPTION_MODE_AES_256_CTS:
+		return EXT4_AES_256_CTS_KEY_SIZE;
+	default:
+		BUG();
+	}
+	return 0;
+}
+
+#define EXT4_FNAME_NUM_SCATTER_ENTRIES	4
+#define EXT4_CRYPTO_BLOCK_SIZE		16
+#define EXT4_FNAME_CRYPTO_DIGEST_SIZE	32
+
+struct ext4_str {
+	unsigned char *name;
+	u32 len;
+};
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct ext4_encrypted_symlink_data {
+	__le16 len;
+	char encrypted_path[1];
+} __attribute__((__packed__));
+
+/**
+ * This function is used to calculate the disk space required to
+ * store a filename of length l in encrypted symlink format.
+ */
+static inline u32 encrypted_symlink_data_len(u32 l)
+{
+	if (l < EXT4_CRYPTO_BLOCK_SIZE)
+		l = EXT4_CRYPTO_BLOCK_SIZE;
+	return (l + sizeof(struct ext4_encrypted_symlink_data) - 1);
+}
+
+#endif	/* _EXT4_CRYPTO_H */
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 51bc821..3c93815 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -123,6 +123,7 @@
 struct ext4_ext_path {
 	ext4_fsblk_t			p_block;
 	__u16				p_depth;
+	__u16				p_maxdepth;
 	struct ext4_extent		*p_ext;
 	struct ext4_extent_idx		*p_idx;
 	struct ext4_extent_header	*p_hdr;
@@ -134,30 +135,24 @@
  */
 
 /*
- * Maximum number of logical blocks in a file; ext4_extent's ee_block is
- * __le32.
- */
-#define EXT_MAX_BLOCKS	0xffffffff
-
-/*
  * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
  * initialized extent. This is 2^15 and not (2^16 - 1), since we use the
  * MSB of ee_len field in the extent datastructure to signify if this
- * particular extent is an initialized extent or an uninitialized (i.e.
+ * particular extent is an initialized extent or an unwritten (i.e.
  * preallocated).
- * EXT_UNINIT_MAX_LEN is the maximum number of blocks we can have in an
- * uninitialized extent.
+ * EXT_UNWRITTEN_MAX_LEN is the maximum number of blocks we can have in an
+ * unwritten extent.
  * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an
- * uninitialized one. In other words, if MSB of ee_len is set, it is an
- * uninitialized extent with only one special scenario when ee_len = 0x8000.
- * In this case we can not have an uninitialized extent of zero length and
+ * unwritten one. In other words, if MSB of ee_len is set, it is an
+ * unwritten extent with only one special scenario when ee_len = 0x8000.
+ * In this case we can not have an unwritten extent of zero length and
  * thus we make it as a special case of initialized extent with 0x8000 length.
  * This way we get better extent-to-group alignment for initialized extents.
  * Hence, the maximum number of blocks we can have in an *initialized*
- * extent is 2^15 (32768) and in an *uninitialized* extent is 2^15-1 (32767).
+ * extent is 2^15 (32768) and in an *unwritten* extent is 2^15-1 (32767).
  */
 #define EXT_INIT_MAX_LEN	(1UL << 15)
-#define EXT_UNINIT_MAX_LEN	(EXT_INIT_MAX_LEN - 1)
+#define EXT_UNWRITTEN_MAX_LEN	(EXT_INIT_MAX_LEN - 1)
 
 
 #define EXT_FIRST_EXTENT(__hdr__) \
@@ -193,14 +188,14 @@
 	return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
 }
 
-static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
+static inline void ext4_ext_mark_unwritten(struct ext4_extent *ext)
 {
-	/* We can not have an uninitialized extent of zero length! */
+	/* We can not have an unwritten extent of zero length! */
 	BUG_ON((le16_to_cpu(ext->ee_len) & ~EXT_INIT_MAX_LEN) == 0);
 	ext->ee_len |= cpu_to_le16(EXT_INIT_MAX_LEN);
 }
 
-static inline int ext4_ext_is_uninitialized(struct ext4_extent *ext)
+static inline int ext4_ext_is_unwritten(struct ext4_extent *ext)
 {
 	/* Extent with ee_len of 0x8000 is treated as an initialized extent */
 	return (le16_to_cpu(ext->ee_len) > EXT_INIT_MAX_LEN);
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 1be3996..458e78e 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -38,31 +38,43 @@
 /*
  * Wrappers for jbd2_journal_start/end.
  */
-handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
-				  int type, int nblocks)
+static int ext4_journal_check_start(struct super_block *sb)
 {
 	journal_t *journal;
 
 	might_sleep();
-
-	trace_ext4_journal_start(sb, nblocks, _RET_IP_);
 	if (sb->s_flags & MS_RDONLY)
-		return ERR_PTR(-EROFS);
-
+		return -EROFS;
 	WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
 	journal = EXT4_SB(sb)->s_journal;
-	if (!journal)
-		return ext4_get_nojournal();
 	/*
 	 * Special case here: if the journal has aborted behind our
 	 * backs (eg. EIO in the commit thread), then we still need to
 	 * take the FS itself readonly cleanly.
 	 */
-	if (is_journal_aborted(journal)) {
+	if (journal && is_journal_aborted(journal)) {
 		ext4_abort(sb, "Detected aborted journal");
-		return ERR_PTR(-EROFS);
+		return -EROFS;
 	}
-	return jbd2__journal_start(journal, nblocks, GFP_NOFS, type, line);
+	return 0;
+}
+
+handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
+				  int type, int blocks, int rsv_blocks)
+{
+	journal_t *journal;
+	int err;
+
+	trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_);
+	err = ext4_journal_check_start(sb);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	journal = EXT4_SB(sb)->s_journal;
+	if (!journal)
+		return ext4_get_nojournal();
+	return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS,
+				   type, line);
 }
 
 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
@@ -86,9 +98,34 @@
 	return err;
 }
 
-void ext4_journal_abort_handle(const char *caller, unsigned int line,
-			       const char *err_fn, struct buffer_head *bh,
-			       handle_t *handle, int err)
+handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
+					int type)
+{
+	struct super_block *sb;
+	int err;
+
+	if (!ext4_handle_valid(handle))
+		return ext4_get_nojournal();
+
+	sb = handle->h_journal->j_private;
+	trace_ext4_journal_start_reserved(sb, handle->h_buffer_credits,
+					  _RET_IP_);
+	err = ext4_journal_check_start(sb);
+	if (err < 0) {
+		jbd2_journal_free_reserved(handle);
+		return ERR_PTR(err);
+	}
+
+	err = jbd2_journal_start_reserved(handle, type, line);
+	if (err < 0)
+		return ERR_PTR(err);
+	return handle;
+}
+
+static void ext4_journal_abort_handle(const char *caller, unsigned int line,
+				      const char *err_fn,
+				      struct buffer_head *bh,
+				      handle_t *handle, int err)
 {
 	char nbuf[16];
 	const char *errstr = ext4_decode_error(NULL, err, nbuf);
@@ -219,10 +256,21 @@
 	set_buffer_prio(bh);
 	if (ext4_handle_valid(handle)) {
 		err = jbd2_journal_dirty_metadata(handle, bh);
-		/* Errors can only happen if there is a bug */
-		if (WARN_ON_ONCE(err)) {
+		/* Errors can only happen due to aborted journal or a nasty bug */
+		if (!is_handle_aborted(handle) && WARN_ON_ONCE(err)) {
 			ext4_journal_abort_handle(where, line, __func__, bh,
 						  handle, err);
+			if (inode == NULL) {
+				pr_err("EXT4: jbd2_journal_dirty_metadata "
+				       "failed: handle type %u started at "
+				       "line %u, credits %u/%u, errcode %d",
+				       handle->h_type,
+				       handle->h_line_no,
+				       handle->h_requested_credits,
+				       handle->h_buffer_credits, err);
+				return err;
+			}
+
 			ext4_error_inode(inode, where, line,
 					 bh->b_blocknr,
 					 "journal_dirty_metadata failed: "
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index c8c6885..9c5b49f 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -102,9 +102,9 @@
 #define EXT4_QUOTA_INIT_BLOCKS(sb) 0
 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0
 #endif
-#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb))
-#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
-#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
+#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb))
+#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
+#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
 
 static inline int ext4_jbd2_credits_xattr(struct inode *inode)
 {
@@ -134,7 +134,8 @@
 #define EXT4_HT_MIGRATE          8
 #define EXT4_HT_MOVE_EXTENTS     9
 #define EXT4_HT_XATTR           10
-#define EXT4_HT_MAX             11
+#define EXT4_HT_EXT_CONVERT     11
+#define EXT4_HT_MAX             12
 
 /**
  *   struct ext4_journal_cb_entry - Base structure for callback information.
@@ -196,7 +197,7 @@
  * ext4_journal_callback_del: delete a registered callback
  * @handle: active journal transaction handle on which callback was registered
  * @jce: registered journal callback entry to unregister
- * Return true if object was sucessfully removed
+ * Return true if object was successfully removed
  */
 static inline bool ext4_journal_callback_try_del(handle_t *handle,
 					     struct ext4_journal_cb_entry *jce)
@@ -230,10 +231,6 @@
 /*
  * Wrapper functions with which ext4 calls into JBD.
  */
-void ext4_journal_abort_handle(const char *caller, unsigned int line,
-			       const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err);
-
 int __ext4_journal_get_write_access(const char *where, unsigned int line,
 				    handle_t *handle, struct buffer_head *bh);
 
@@ -265,7 +262,7 @@
 	__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
 
 handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
-				  int type, int nblocks);
+				  int type, int blocks, int rsv_blocks);
 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
 
 #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
@@ -300,21 +297,37 @@
 }
 
 #define ext4_journal_start_sb(sb, type, nblocks)			\
-	__ext4_journal_start_sb((sb), __LINE__, (type), (nblocks))
+	__ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0)
 
 #define ext4_journal_start(inode, type, nblocks)			\
-	__ext4_journal_start((inode), __LINE__, (type), (nblocks))
+	__ext4_journal_start((inode), __LINE__, (type), (nblocks), 0)
+
+#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks) \
+	__ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks))
 
 static inline handle_t *__ext4_journal_start(struct inode *inode,
 					     unsigned int line, int type,
-					     int nblocks)
+					     int blocks, int rsv_blocks)
 {
-	return __ext4_journal_start_sb(inode->i_sb, line, type, nblocks);
+	return __ext4_journal_start_sb(inode->i_sb, line, type, blocks,
+				       rsv_blocks);
 }
 
 #define ext4_journal_stop(handle) \
 	__ext4_journal_stop(__func__, __LINE__, (handle))
 
+#define ext4_journal_start_reserved(handle, type) \
+	__ext4_journal_start_reserved((handle), __LINE__, (type))
+
+handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
+					int type);
+
+static inline void ext4_journal_free_reserved(handle_t *handle)
+{
+	if (ext4_handle_valid(handle))
+		jbd2_journal_free_reserved(handle);
+}
+
 static inline handle_t *ext4_journal_current_handle(void)
 {
 	return journal_current_handle();
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 7eea761..ce57723 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -37,7 +37,6 @@
 #include <linux/quotaops.h>
 #include <linux/string.h>
 #include <linux/slab.h>
-#include <linux/falloc.h>
 #include <asm/uaccess.h>
 #include <linux/fiemap.h>
 #include "ext4_jbd2.h"
@@ -51,8 +50,8 @@
  */
 #define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails \
 					due to ENOSPC */
-#define EXT4_EXT_MARK_UNINIT1	0x2  /* mark first half uninitialized */
-#define EXT4_EXT_MARK_UNINIT2	0x4  /* mark second half uninitialized */
+#define EXT4_EXT_MARK_UNWRIT1	0x2  /* mark first half unwritten */
+#define EXT4_EXT_MARK_UNWRIT2	0x4  /* mark second half unwritten */
 
 #define EXT4_EXT_DATA_VALID1	0x8  /* first half contains valid data */
 #define EXT4_EXT_DATA_VALID2	0x10 /* second half contains valid data */
@@ -74,8 +73,7 @@
 {
 	struct ext4_extent_tail *et;
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(inode->i_sb))
 		return 1;
 
 	et = find_ext4_extent_tail(eh);
@@ -89,8 +87,7 @@
 {
 	struct ext4_extent_tail *et;
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(inode->i_sb))
 		return;
 
 	et = find_ext4_extent_tail(eh);
@@ -99,14 +96,14 @@
 
 static int ext4_split_extent(handle_t *handle,
 				struct inode *inode,
-				struct ext4_ext_path *path,
+				struct ext4_ext_path **ppath,
 				struct ext4_map_blocks *map,
 				int split_flag,
 				int flags);
 
 static int ext4_split_extent_at(handle_t *handle,
 			     struct inode *inode,
-			     struct ext4_ext_path *path,
+			     struct ext4_ext_path **ppath,
 			     ext4_lblk_t split,
 			     int split_flag,
 			     int flags);
@@ -144,6 +141,7 @@
 {
 	if (path->p_bh) {
 		/* path points to block */
+		BUFFER_TRACE(path->p_bh, "get_write_access");
 		return ext4_journal_get_write_access(handle, path->p_bh);
 	}
 	/* path points to leaf/index in inode body */
@@ -161,6 +159,8 @@
 		     struct inode *inode, struct ext4_ext_path *path)
 {
 	int err;
+
+	WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
 	if (path->p_bh) {
 		ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
 		/* path points to block */
@@ -289,6 +289,20 @@
 	return size;
 }
 
+static inline int
+ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
+			   struct ext4_ext_path **ppath, ext4_lblk_t lblk,
+			   int nofail)
+{
+	struct ext4_ext_path *path = *ppath;
+	int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
+
+	return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
+			EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
+			EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
+			(nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
+}
+
 /*
  * Calculate the number of metadata blocks needed
  * to allocate @blocks
@@ -428,7 +442,7 @@
 
 static int __ext4_ext_check(const char *function, unsigned int line,
 			    struct inode *inode, struct ext4_extent_header *eh,
-			    int depth)
+			    int depth, ext4_fsblk_t pblk)
 {
 	const char *error_msg;
 	int max = 0;
@@ -472,43 +486,150 @@
 
 corrupted:
 	ext4_error_inode(inode, function, line, 0,
-			"bad header/extent: %s - magic %x, "
-			"entries %u, max %u(%u), depth %u(%u)",
-			error_msg, le16_to_cpu(eh->eh_magic),
-			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
-			max, le16_to_cpu(eh->eh_depth), depth);
-
+			 "pblk %llu bad header/extent: %s - magic %x, "
+			 "entries %u, max %u(%u), depth %u(%u)",
+			 (unsigned long long) pblk, error_msg,
+			 le16_to_cpu(eh->eh_magic),
+			 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
+			 max, le16_to_cpu(eh->eh_depth), depth);
 	return -EIO;
 }
 
-#define ext4_ext_check(inode, eh, depth)	\
-	__ext4_ext_check(__func__, __LINE__, inode, eh, depth)
+#define ext4_ext_check(inode, eh, depth, pblk)			\
+	__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
 
 int ext4_ext_check_inode(struct inode *inode)
 {
-	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
+	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
 }
 
-static int __ext4_ext_check_block(const char *function, unsigned int line,
-				  struct inode *inode,
-				  struct ext4_extent_header *eh,
-				  int depth,
-				  struct buffer_head *bh)
+static struct buffer_head *
+__read_extent_tree_block(const char *function, unsigned int line,
+			 struct inode *inode, ext4_fsblk_t pblk, int depth,
+			 int flags)
 {
-	int ret;
+	struct buffer_head		*bh;
+	int				err;
 
-	if (buffer_verified(bh))
-		return 0;
-	ret = ext4_ext_check(inode, eh, depth);
-	if (ret)
-		return ret;
+	bh = sb_getblk(inode->i_sb, pblk);
+	if (unlikely(!bh))
+		return ERR_PTR(-ENOMEM);
+
+	if (!bh_uptodate_or_lock(bh)) {
+		trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
+		err = bh_submit_read(bh);
+		if (err < 0)
+			goto errout;
+	}
+	if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
+		return bh;
+	err = __ext4_ext_check(function, line, inode,
+			       ext_block_hdr(bh), depth, pblk);
+	if (err)
+		goto errout;
 	set_buffer_verified(bh);
+	/*
+	 * If this is a leaf block, cache all of its entries
+	 */
+	if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
+		struct ext4_extent_header *eh = ext_block_hdr(bh);
+		struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
+		ext4_lblk_t prev = 0;
+		int i;
+
+		for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
+			unsigned int status = EXTENT_STATUS_WRITTEN;
+			ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
+			int len = ext4_ext_get_actual_len(ex);
+
+			if (prev && (prev != lblk))
+				ext4_es_cache_extent(inode, prev,
+						     lblk - prev, ~0,
+						     EXTENT_STATUS_HOLE);
+
+			if (ext4_ext_is_unwritten(ex))
+				status = EXTENT_STATUS_UNWRITTEN;
+			ext4_es_cache_extent(inode, lblk, len,
+					     ext4_ext_pblock(ex), status);
+			prev = lblk + len;
+		}
+	}
+	return bh;
+errout:
+	put_bh(bh);
+	return ERR_PTR(err);
+
+}
+
+#define read_extent_tree_block(inode, pblk, depth, flags)		\
+	__read_extent_tree_block(__func__, __LINE__, (inode), (pblk),   \
+				 (depth), (flags))
+
+/*
+ * This function is called to cache a file's extent information in the
+ * extent status tree
+ */
+int ext4_ext_precache(struct inode *inode)
+{
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	struct ext4_ext_path *path = NULL;
+	struct buffer_head *bh;
+	int i = 0, depth, ret = 0;
+
+	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+		return 0;	/* not an extent-mapped inode */
+
+	down_read(&ei->i_data_sem);
+	depth = ext_depth(inode);
+
+	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
+		       GFP_NOFS);
+	if (path == NULL) {
+		up_read(&ei->i_data_sem);
+		return -ENOMEM;
+	}
+
+	/* Don't cache anything if there are no external extent blocks */
+	if (depth == 0)
+		goto out;
+	path[0].p_hdr = ext_inode_hdr(inode);
+	ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
+	if (ret)
+		goto out;
+	path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
+	while (i >= 0) {
+		/*
+		 * If this is a leaf block or we've reached the end of
+		 * the index block, go up
+		 */
+		if ((i == depth) ||
+		    path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
+			brelse(path[i].p_bh);
+			path[i].p_bh = NULL;
+			i--;
+			continue;
+		}
+		bh = read_extent_tree_block(inode,
+					    ext4_idx_pblock(path[i].p_idx++),
+					    depth - i - 1,
+					    EXT4_EX_FORCE_CACHE);
+		if (IS_ERR(bh)) {
+			ret = PTR_ERR(bh);
+			break;
+		}
+		i++;
+		path[i].p_bh = bh;
+		path[i].p_hdr = ext_block_hdr(bh);
+		path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
+	}
+	ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
+out:
+	up_read(&ei->i_data_sem);
+	ext4_ext_drop_refs(path);
+	kfree(path);
 	return ret;
 }
 
-#define ext4_ext_check_block(inode, eh, depth, bh)	\
-	__ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
-
 #ifdef EXT_DEBUG
 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
 {
@@ -522,7 +643,7 @@
 		} else if (path->p_ext) {
 			ext_debug("  %d:[%d]%d:%llu ",
 				  le32_to_cpu(path->p_ext->ee_block),
-				  ext4_ext_is_uninitialized(path->p_ext),
+				  ext4_ext_is_unwritten(path->p_ext),
 				  ext4_ext_get_actual_len(path->p_ext),
 				  ext4_ext_pblock(path->p_ext));
 		} else
@@ -548,7 +669,7 @@
 
 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
 		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
-			  ext4_ext_is_uninitialized(ex),
+			  ext4_ext_is_unwritten(ex),
 			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
 	}
 	ext_debug("\n");
@@ -579,7 +700,7 @@
 		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
 				le32_to_cpu(ex->ee_block),
 				ext4_ext_pblock(ex),
-				ext4_ext_is_uninitialized(ex),
+				ext4_ext_is_unwritten(ex),
 				ext4_ext_get_actual_len(ex),
 				newblock);
 		ex++;
@@ -594,9 +715,11 @@
 
 void ext4_ext_drop_refs(struct ext4_ext_path *path)
 {
-	int depth = path->p_depth;
-	int i;
+	int depth, i;
 
+	if (!path)
+		return;
+	depth = path->p_depth;
 	for (i = 0; i <= depth; i++, path++)
 		if (path->p_bh) {
 			brelse(path->p_bh);
@@ -704,7 +827,7 @@
 	ext_debug("  -> %d:%llu:[%d]%d ",
 			le32_to_cpu(path->p_ext->ee_block),
 			ext4_ext_pblock(path->p_ext),
-			ext4_ext_is_uninitialized(path->p_ext),
+			ext4_ext_is_unwritten(path->p_ext),
 			ext4_ext_get_actual_len(path->p_ext));
 
 #ifdef CHECK_BINSEARCH
@@ -740,24 +863,32 @@
 }
 
 struct ext4_ext_path *
-ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
-					struct ext4_ext_path *path)
+ext4_find_extent(struct inode *inode, ext4_lblk_t block,
+		 struct ext4_ext_path **orig_path, int flags)
 {
 	struct ext4_extent_header *eh;
 	struct buffer_head *bh;
-	short int depth, i, ppos = 0, alloc = 0;
+	struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
+	short int depth, i, ppos = 0;
 	int ret;
 
 	eh = ext_inode_hdr(inode);
 	depth = ext_depth(inode);
 
-	/* account possible depth increase */
+	if (path) {
+		ext4_ext_drop_refs(path);
+		if (depth > path[0].p_maxdepth) {
+			kfree(path);
+			*orig_path = path = NULL;
+		}
+	}
 	if (!path) {
+		/* account possible depth increase */
 		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
 				GFP_NOFS);
-		if (!path)
+		if (unlikely(!path))
 			return ERR_PTR(-ENOMEM);
-		alloc = 1;
+		path[0].p_maxdepth = depth + 1;
 	}
 	path[0].p_hdr = eh;
 	path[0].p_bh = NULL;
@@ -773,20 +904,13 @@
 		path[ppos].p_depth = i;
 		path[ppos].p_ext = NULL;
 
-		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
-		if (unlikely(!bh)) {
-			ret = -ENOMEM;
+		bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
+					    flags);
+		if (unlikely(IS_ERR(bh))) {
+			ret = PTR_ERR(bh);
 			goto err;
 		}
-		if (!bh_uptodate_or_lock(bh)) {
-			trace_ext4_ext_load_extent(inode, block,
-						path[ppos].p_block);
-			ret = bh_submit_read(bh);
-			if (ret < 0) {
-				put_bh(bh);
-				goto err;
-			}
-		}
+
 		eh = ext_block_hdr(bh);
 		ppos++;
 		if (unlikely(ppos > depth)) {
@@ -798,11 +922,6 @@
 		}
 		path[ppos].p_bh = bh;
 		path[ppos].p_hdr = eh;
-		i--;
-
-		ret = ext4_ext_check_block(inode, eh, i, bh);
-		if (ret < 0)
-			goto err;
 	}
 
 	path[ppos].p_depth = i;
@@ -821,8 +940,9 @@
 
 err:
 	ext4_ext_drop_refs(path);
-	if (alloc)
-		kfree(path);
+	kfree(path);
+	if (orig_path)
+		*orig_path = NULL;
 	return ERR_PTR(ret);
 }
 
@@ -1149,16 +1269,24 @@
  *   just created block
  */
 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
-				 unsigned int flags,
-				 struct ext4_extent *newext)
+				 unsigned int flags)
 {
 	struct ext4_extent_header *neh;
 	struct buffer_head *bh;
-	ext4_fsblk_t newblock;
+	ext4_fsblk_t newblock, goal = 0;
+	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
 	int err = 0;
 
-	newblock = ext4_ext_new_meta_block(handle, inode, NULL,
-		newext, &err, flags);
+	/* Try to prepend new index to old one */
+	if (ext_depth(inode))
+		goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
+	if (goal > le32_to_cpu(es->s_first_data_block)) {
+		flags |= EXT4_MB_HINT_TRY_GOAL;
+		goal--;
+	} else
+		goal = ext4_inode_to_goal_block(inode);
+	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
+					NULL, &err);
 	if (newblock == 0)
 		return err;
 
@@ -1223,10 +1351,12 @@
  * if no free index is found, then it requests in-depth growing.
  */
 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
-				    unsigned int flags,
-				    struct ext4_ext_path *path,
+				    unsigned int mb_flags,
+				    unsigned int gb_flags,
+				    struct ext4_ext_path **ppath,
 				    struct ext4_extent *newext)
 {
+	struct ext4_ext_path *path = *ppath;
 	struct ext4_ext_path *curp;
 	int depth, i, err = 0;
 
@@ -1245,28 +1375,26 @@
 	if (EXT_HAS_FREE_INDEX(curp)) {
 		/* if we found index with free entry, then use that
 		 * entry: create all needed subtree and add new leaf */
-		err = ext4_ext_split(handle, inode, flags, path, newext, i);
+		err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
 		if (err)
 			goto out;
 
 		/* refill path */
-		ext4_ext_drop_refs(path);
-		path = ext4_ext_find_extent(inode,
+		path = ext4_find_extent(inode,
 				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
-				    path);
+				    ppath, gb_flags);
 		if (IS_ERR(path))
 			err = PTR_ERR(path);
 	} else {
 		/* tree is full, time to grow in depth */
-		err = ext4_ext_grow_indepth(handle, inode, flags, newext);
+		err = ext4_ext_grow_indepth(handle, inode, mb_flags);
 		if (err)
 			goto out;
 
 		/* refill path */
-		ext4_ext_drop_refs(path);
-		path = ext4_ext_find_extent(inode,
+		path = ext4_find_extent(inode,
 				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
-				    path);
+				    ppath, gb_flags);
 		if (IS_ERR(path)) {
 			err = PTR_ERR(path);
 			goto out;
@@ -1437,29 +1565,21 @@
 	ix++;
 	block = ext4_idx_pblock(ix);
 	while (++depth < path->p_depth) {
-		bh = sb_bread(inode->i_sb, block);
-		if (bh == NULL)
-			return -EIO;
-		eh = ext_block_hdr(bh);
 		/* subtract from p_depth to get proper eh_depth */
-		if (ext4_ext_check_block(inode, eh,
-					 path->p_depth - depth, bh)) {
-			put_bh(bh);
-			return -EIO;
-		}
+		bh = read_extent_tree_block(inode, block,
+					    path->p_depth - depth, 0);
+		if (IS_ERR(bh))
+			return PTR_ERR(bh);
+		eh = ext_block_hdr(bh);
 		ix = EXT_FIRST_INDEX(eh);
 		block = ext4_idx_pblock(ix);
 		put_bh(bh);
 	}
 
-	bh = sb_bread(inode->i_sb, block);
-	if (bh == NULL)
-		return -EIO;
+	bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 	eh = ext_block_hdr(bh);
-	if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
-		put_bh(bh);
-		return -EIO;
-	}
 	ex = EXT_FIRST_EXTENT(eh);
 found_extent:
 	*logical = le32_to_cpu(ex->ee_block);
@@ -1477,7 +1597,7 @@
  * allocated block. Thus, index entries have to be consistent
  * with leaves.
  */
-static ext4_lblk_t
+ext4_lblk_t
 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
 {
 	int depth;
@@ -1603,22 +1723,17 @@
 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
 				struct ext4_extent *ex2)
 {
-	unsigned short ext1_ee_len, ext2_ee_len, max_len;
+	unsigned short ext1_ee_len, ext2_ee_len;
 
 	/*
 	 * Make sure that both extents are initialized. We don't merge
-	 * uninitialized extents so that we can be sure that end_io code has
+	 * unwritten extents so that we can be sure that end_io code has
 	 * the extent that was written properly split out and conversion to
 	 * initialized is trivial.
 	 */
-	if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2))
+	if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
 		return 0;
 
-	if (ext4_ext_is_uninitialized(ex1))
-		max_len = EXT_UNINIT_MAX_LEN;
-	else
-		max_len = EXT_INIT_MAX_LEN;
-
 	ext1_ee_len = ext4_ext_get_actual_len(ex1);
 	ext2_ee_len = ext4_ext_get_actual_len(ex2);
 
@@ -1631,7 +1746,12 @@
 	 * as an RO_COMPAT feature, refuse to merge to extents if
 	 * this can result in the top bit of ee_len being set.
 	 */
-	if (ext1_ee_len + ext2_ee_len > max_len)
+	if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
+		return 0;
+	if (ext4_ext_is_unwritten(ex1) &&
+	    (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
+	     atomic_read(&EXT4_I(inode)->i_unwritten) ||
+	     (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
 		return 0;
 #ifdef AGGRESSIVE_TEST
 	if (ext1_ee_len >= 4)
@@ -1656,8 +1776,7 @@
 {
 	struct ext4_extent_header *eh;
 	unsigned int depth, len;
-	int merge_done = 0;
-	int uninitialized = 0;
+	int merge_done = 0, unwritten;
 
 	depth = ext_depth(inode);
 	BUG_ON(path[depth].p_hdr == NULL);
@@ -1667,12 +1786,11 @@
 		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
 			break;
 		/* merge with next extent! */
-		if (ext4_ext_is_uninitialized(ex))
-			uninitialized = 1;
+		unwritten = ext4_ext_is_unwritten(ex);
 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
 				+ ext4_ext_get_actual_len(ex + 1));
-		if (uninitialized)
-			ext4_ext_mark_uninitialized(ex);
+		if (unwritten)
+			ext4_ext_mark_unwritten(ex);
 
 		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
 			len = (EXT_LAST_EXTENT(eh) - ex - 1)
@@ -1722,6 +1840,7 @@
 		sizeof(struct ext4_extent_idx);
 	s += sizeof(struct ext4_extent_header);
 
+	path[1].p_maxdepth = path[0].p_maxdepth;
 	memcpy(path[0].p_hdr, path[1].p_hdr, s);
 	path[0].p_depth = 0;
 	path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
@@ -1817,18 +1936,20 @@
  * creating new leaf in the no-space case.
  */
 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
-				struct ext4_ext_path *path,
-				struct ext4_extent *newext, int flag)
+				struct ext4_ext_path **ppath,
+				struct ext4_extent *newext, int gb_flags)
 {
+	struct ext4_ext_path *path = *ppath;
 	struct ext4_extent_header *eh;
 	struct ext4_extent *ex, *fex;
 	struct ext4_extent *nearex; /* nearest extent */
 	struct ext4_ext_path *npath = NULL;
 	int depth, len, err;
 	ext4_lblk_t next;
-	unsigned uninitialized = 0;
-	int flags = 0;
+	int mb_flags = 0, unwritten;
 
+	if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+		mb_flags |= EXT4_MB_DELALLOC_RESERVED;
 	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
 		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
 		return -EIO;
@@ -1842,12 +1963,12 @@
 	}
 
 	/* try to insert block into found extent and return */
-	if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)) {
+	if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
 
 		/*
 		 * Try to see whether we should rather test the extent on
 		 * right from ex, or from the left of ex. This is because
-		 * ext4_ext_find_extent() can return either extent on the
+		 * ext4_find_extent() can return either extent on the
 		 * left, or on the right from the searched position. This
 		 * will make merging more effective.
 		 */
@@ -1867,29 +1988,21 @@
 		if (ext4_can_extents_be_merged(inode, ex, newext)) {
 			ext_debug("append [%d]%d block to %u:[%d]%d"
 				  "(from %llu)\n",
-				  ext4_ext_is_uninitialized(newext),
+				  ext4_ext_is_unwritten(newext),
 				  ext4_ext_get_actual_len(newext),
 				  le32_to_cpu(ex->ee_block),
-				  ext4_ext_is_uninitialized(ex),
+				  ext4_ext_is_unwritten(ex),
 				  ext4_ext_get_actual_len(ex),
 				  ext4_ext_pblock(ex));
 			err = ext4_ext_get_access(handle, inode,
 						  path + depth);
 			if (err)
 				return err;
-
-			/*
-			 * ext4_can_extents_be_merged should have checked
-			 * that either both extents are uninitialized, or
-			 * both aren't. Thus we need to check only one of
-			 * them here.
-			 */
-			if (ext4_ext_is_uninitialized(ex))
-				uninitialized = 1;
+			unwritten = ext4_ext_is_unwritten(ex);
 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
 					+ ext4_ext_get_actual_len(newext));
-			if (uninitialized)
-				ext4_ext_mark_uninitialized(ex);
+			if (unwritten)
+				ext4_ext_mark_unwritten(ex);
 			eh = path[depth].p_hdr;
 			nearex = ex;
 			goto merge;
@@ -1901,10 +2014,10 @@
 			ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
 				  "(from %llu)\n",
 				  le32_to_cpu(newext->ee_block),
-				  ext4_ext_is_uninitialized(newext),
+				  ext4_ext_is_unwritten(newext),
 				  ext4_ext_get_actual_len(newext),
 				  le32_to_cpu(ex->ee_block),
-				  ext4_ext_is_uninitialized(ex),
+				  ext4_ext_is_unwritten(ex),
 				  ext4_ext_get_actual_len(ex),
 				  ext4_ext_pblock(ex));
 			err = ext4_ext_get_access(handle, inode,
@@ -1912,20 +2025,13 @@
 			if (err)
 				return err;
 
-			/*
-			 * ext4_can_extents_be_merged should have checked
-			 * that either both extents are uninitialized, or
-			 * both aren't. Thus we need to check only one of
-			 * them here.
-			 */
-			if (ext4_ext_is_uninitialized(ex))
-				uninitialized = 1;
+			unwritten = ext4_ext_is_unwritten(ex);
 			ex->ee_block = newext->ee_block;
 			ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
 					+ ext4_ext_get_actual_len(newext));
-			if (uninitialized)
-				ext4_ext_mark_uninitialized(ex);
+			if (unwritten)
+				ext4_ext_mark_unwritten(ex);
 			eh = path[depth].p_hdr;
 			nearex = ex;
 			goto merge;
@@ -1945,7 +2051,7 @@
 	if (next != EXT_MAX_BLOCKS) {
 		ext_debug("next leaf block - %u\n", next);
 		BUG_ON(npath != NULL);
-		npath = ext4_ext_find_extent(inode, next, NULL);
+		npath = ext4_find_extent(inode, next, NULL, 0);
 		if (IS_ERR(npath))
 			return PTR_ERR(npath);
 		BUG_ON(npath->p_depth != path->p_depth);
@@ -1964,9 +2070,10 @@
 	 * There is no free space in the found leaf.
 	 * We're gonna add a new leaf in the tree.
 	 */
-	if (flag & EXT4_GET_BLOCKS_METADATA_NOFAIL)
-		flags = EXT4_MB_USE_RESERVED;
-	err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
+	if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
+		mb_flags |= EXT4_MB_USE_RESERVED;
+	err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+				       ppath, newext);
 	if (err)
 		goto cleanup;
 	depth = ext_depth(inode);
@@ -1984,7 +2091,7 @@
 		ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
 				le32_to_cpu(newext->ee_block),
 				ext4_ext_pblock(newext),
-				ext4_ext_is_uninitialized(newext),
+				ext4_ext_is_unwritten(newext),
 				ext4_ext_get_actual_len(newext));
 		nearex = EXT_FIRST_EXTENT(eh);
 	} else {
@@ -1995,7 +2102,7 @@
 					"nearest %p\n",
 					le32_to_cpu(newext->ee_block),
 					ext4_ext_pblock(newext),
-					ext4_ext_is_uninitialized(newext),
+					ext4_ext_is_unwritten(newext),
 					ext4_ext_get_actual_len(newext),
 					nearex);
 			nearex++;
@@ -2006,7 +2113,7 @@
 					"nearest %p\n",
 					le32_to_cpu(newext->ee_block),
 					ext4_ext_pblock(newext),
-					ext4_ext_is_uninitialized(newext),
+					ext4_ext_is_unwritten(newext),
 					ext4_ext_get_actual_len(newext),
 					nearex);
 		}
@@ -2016,7 +2123,7 @@
 					"move %d extents from 0x%p to 0x%p\n",
 					le32_to_cpu(newext->ee_block),
 					ext4_ext_pblock(newext),
-					ext4_ext_is_uninitialized(newext),
+					ext4_ext_is_unwritten(newext),
 					ext4_ext_get_actual_len(newext),
 					len, nearex, nearex + 1);
 			memmove(nearex + 1, nearex,
@@ -2032,7 +2139,7 @@
 
 merge:
 	/* try to merge extents */
-	if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
+	if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
 		ext4_ext_try_to_merge(handle, inode, path, nearex);
 
 
@@ -2044,10 +2151,8 @@
 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
 
 cleanup:
-	if (npath) {
-		ext4_ext_drop_refs(npath);
-		kfree(npath);
-	}
+	ext4_ext_drop_refs(npath);
+	kfree(npath);
 	return err;
 }
 
@@ -2069,13 +2174,7 @@
 		/* find extent for this block */
 		down_read(&EXT4_I(inode)->i_data_sem);
 
-		if (path && ext_depth(inode) != depth) {
-			/* depth was changed. we have to realloc path */
-			kfree(path);
-			path = NULL;
-		}
-
-		path = ext4_ext_find_extent(inode, block, path);
+		path = ext4_find_extent(inode, block, &path, 0);
 		if (IS_ERR(path)) {
 			up_read(&EXT4_I(inode)->i_data_sem);
 			err = PTR_ERR(path);
@@ -2092,7 +2191,6 @@
 		}
 		ex = path[depth].p_ext;
 		next = ext4_ext_next_allocated_block(path);
-		ext4_ext_drop_refs(path);
 
 		flags = 0;
 		exists = 0;
@@ -2138,7 +2236,7 @@
 			es.es_lblk = le32_to_cpu(ex->ee_block);
 			es.es_len = ext4_ext_get_actual_len(ex);
 			es.es_pblk = ext4_ext_pblock(ex);
-			if (ext4_ext_is_uninitialized(ex))
+			if (ext4_ext_is_unwritten(ex))
 				flags |= FIEMAP_EXTENT_UNWRITTEN;
 		}
 
@@ -2150,7 +2248,8 @@
 		next_del = ext4_find_delayed_extent(inode, &es);
 		if (!exists && next_del) {
 			exists = 1;
-			flags |= FIEMAP_EXTENT_DELALLOC;
+			flags |= (FIEMAP_EXTENT_DELALLOC |
+				  FIEMAP_EXTENT_UNKNOWN);
 		}
 		up_read(&EXT4_I(inode)->i_data_sem);
 
@@ -2201,11 +2300,8 @@
 		block = es.es_lblk + es.es_len;
 	}
 
-	if (path) {
-		ext4_ext_drop_refs(path);
-		kfree(path);
-	}
-
+	ext4_ext_drop_refs(path);
+	kfree(path);
 	return err;
 }
 
@@ -2219,8 +2315,8 @@
 				ext4_lblk_t block)
 {
 	int depth = ext_depth(inode);
-	unsigned long len;
-	ext4_lblk_t lblock;
+	unsigned long len = 0;
+	ext4_lblk_t lblock = 0;
 	struct ext4_extent *ex;
 
 	ex = path[depth].p_ext;
@@ -2257,7 +2353,6 @@
 			ext4_es_insert_extent(inode, lblock, len, ~0,
 					      EXTENT_STATUS_HOLE);
 	} else {
-		lblock = len = 0;
 		BUG();
 	}
 
@@ -2353,17 +2448,15 @@
 }
 
 /*
- * How many index/leaf blocks need to change/allocate to modify nrblocks?
+ * How many index/leaf blocks need to change/allocate to add @extents extents?
  *
- * if nrblocks are fit in a single extent (chunk flag is 1), then
- * in the worse case, each tree level index/leaf need to be changed
- * if the tree split due to insert a new extent, then the old tree
- * index/leaf need to be updated too
+ * If we add a single extent, then in the worse case, each tree level
+ * index/leaf need to be changed in case of the tree split.
  *
- * If the nrblocks are discontiguous, they could cause
- * the whole tree split more than once, but this is really rare.
+ * If more extents are inserted, they could cause the whole tree split more
+ * than once, but this is really rare.
  */
-int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
 {
 	int index;
 	int depth;
@@ -2374,7 +2467,7 @@
 
 	depth = ext_depth(inode);
 
-	if (chunk)
+	if (extents <= 1)
 		index = depth * 2;
 	else
 		index = depth * 3;
@@ -2382,20 +2475,24 @@
 	return index;
 }
 
+static inline int get_default_free_blocks_flags(struct inode *inode)
+{
+	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+		return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
+	else if (ext4_should_journal_data(inode))
+		return EXT4_FREE_BLOCKS_FORGET;
+	return 0;
+}
+
 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
 			      struct ext4_extent *ex,
-			      ext4_fsblk_t *partial_cluster,
+			      long long *partial_cluster,
 			      ext4_lblk_t from, ext4_lblk_t to)
 {
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
 	ext4_fsblk_t pblk;
-	int flags = 0;
-
-	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
-		flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
-	else if (ext4_should_journal_data(inode))
-		flags |= EXT4_FREE_BLOCKS_FORGET;
+	int flags = get_default_free_blocks_flags(inode);
 
 	/*
 	 * For bigalloc file systems, we never free a partial cluster
@@ -2413,7 +2510,8 @@
 	 * partial cluster here.
 	 */
 	pblk = ext4_ext_pblock(ex) + ee_len - 1;
-	if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
+	if ((*partial_cluster > 0) &&
+	    (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
 		ext4_free_blocks(handle, inode, NULL,
 				 EXT4_C2B(sbi, *partial_cluster),
 				 sbi->s_cluster_ratio, flags);
@@ -2439,41 +2537,46 @@
 	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
 		/* tail removal */
 		ext4_lblk_t num;
+		unsigned int unaligned;
 
 		num = le32_to_cpu(ex->ee_block) + ee_len - from;
 		pblk = ext4_ext_pblock(ex) + ee_len - num;
-		ext_debug("free last %u blocks starting %llu\n", num, pblk);
+		/*
+		 * Usually we want to free partial cluster at the end of the
+		 * extent, except for the situation when the cluster is still
+		 * used by any other extent (partial_cluster is negative).
+		 */
+		if (*partial_cluster < 0 &&
+		    -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
+			flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
+
+		ext_debug("free last %u blocks starting %llu partial %lld\n",
+			  num, pblk, *partial_cluster);
 		ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
 		/*
 		 * If the block range to be freed didn't start at the
 		 * beginning of a cluster, and we removed the entire
-		 * extent, save the partial cluster here, since we
-		 * might need to delete if we determine that the
-		 * truncate operation has removed all of the blocks in
-		 * the cluster.
+		 * extent and the cluster is not used by any other extent,
+		 * save the partial cluster here, since we might need to
+		 * delete if we determine that the truncate operation has
+		 * removed all of the blocks in the cluster.
+		 *
+		 * On the other hand, if we did not manage to free the whole
+		 * extent, we have to mark the cluster as used (store negative
+		 * cluster number in partial_cluster).
 		 */
-		if (EXT4_PBLK_COFF(sbi, pblk) &&
-		    (ee_len == num))
+		unaligned = EXT4_PBLK_COFF(sbi, pblk);
+		if (unaligned && (ee_len == num) &&
+		    (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
 			*partial_cluster = EXT4_B2C(sbi, pblk);
-		else
+		else if (unaligned)
+			*partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
+		else if (*partial_cluster > 0)
 			*partial_cluster = 0;
-	} else if (from == le32_to_cpu(ex->ee_block)
-		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
-		/* head removal */
-		ext4_lblk_t num;
-		ext4_fsblk_t start;
-
-		num = to - from;
-		start = ext4_ext_pblock(ex);
-
-		ext_debug("free first %u blocks starting %llu\n", num, start);
-		ext4_free_blocks(handle, inode, NULL, start, num, flags);
-
-	} else {
-		printk(KERN_INFO "strange request: removal(2) "
-				"%u-%u from %u:%u\n",
-				from, to, le32_to_cpu(ex->ee_block), ee_len);
-	}
+	} else
+		ext4_error(sbi->s_sb, "strange request: removal(2) "
+			   "%u-%u from %u:%u\n",
+			   from, to, le32_to_cpu(ex->ee_block), ee_len);
 	return 0;
 }
 
@@ -2486,12 +2589,16 @@
  * @handle: The journal handle
  * @inode:  The files inode
  * @path:   The path to the leaf
+ * @partial_cluster: The cluster which we'll have to free if all extents
+ *                   has been released from it. It gets negative in case
+ *                   that the cluster is still used.
  * @start:  The first block to remove
  * @end:   The last block to remove
  */
 static int
 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
-		 struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
+		 struct ext4_ext_path *path,
+		 long long *partial_cluster,
 		 ext4_lblk_t start, ext4_lblk_t end)
 {
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2502,8 +2609,9 @@
 	unsigned num;
 	ext4_lblk_t ex_ee_block;
 	unsigned short ex_ee_len;
-	unsigned uninitialized = 0;
+	unsigned unwritten = 0;
 	struct ext4_extent *ex;
+	ext4_fsblk_t pblk;
 
 	/* the header must be checked already in ext4_ext_remove_space() */
 	ext_debug("truncate since %u in leaf to %u\n", start, end);
@@ -2515,7 +2623,9 @@
 		return -EIO;
 	}
 	/* find where to start removing */
-	ex = EXT_LAST_EXTENT(eh);
+	ex = path[depth].p_ext;
+	if (!ex)
+		ex = EXT_LAST_EXTENT(eh);
 
 	ex_ee_block = le32_to_cpu(ex->ee_block);
 	ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2546,13 +2656,13 @@
 	while (ex >= EXT_FIRST_EXTENT(eh) &&
 			ex_ee_block + ex_ee_len > start) {
 
-		if (ext4_ext_is_uninitialized(ex))
-			uninitialized = 1;
+		if (ext4_ext_is_unwritten(ex))
+			unwritten = 1;
 		else
-			uninitialized = 0;
+			unwritten = 0;
 
 		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
-			 uninitialized, ex_ee_len);
+			  unwritten, ex_ee_len);
 		path[depth].p_ext = ex;
 
 		a = ex_ee_block > start ? ex_ee_block : start;
@@ -2563,6 +2673,16 @@
 
 		/* If this extent is beyond the end of the hole, skip it */
 		if (end < ex_ee_block) {
+			/*
+			 * We're going to skip this extent and move to another,
+			 * so if this extent is not cluster aligned we have
+			 * to mark the current cluster as used to avoid
+			 * accidentally freeing it later on
+			 */
+			pblk = ext4_ext_pblock(ex);
+			if (EXT4_PBLK_COFF(sbi, pblk))
+				*partial_cluster =
+					-((long long)EXT4_B2C(sbi, pblk));
 			ex--;
 			ex_ee_block = le32_to_cpu(ex->ee_block);
 			ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2614,11 +2734,11 @@
 
 		ex->ee_len = cpu_to_le16(num);
 		/*
-		 * Do not mark uninitialized if all the blocks in the
+		 * Do not mark unwritten if all the blocks in the
 		 * extent have been removed.
 		 */
-		if (uninitialized && num)
-			ext4_ext_mark_uninitialized(ex);
+		if (unwritten && num)
+			ext4_ext_mark_unwritten(ex);
 		/*
 		 * If the extent was completely released,
 		 * we need to remove it from the leaf
@@ -2638,7 +2758,7 @@
 					sizeof(struct ext4_extent));
 			}
 			le16_add_cpu(&eh->eh_entries, -1);
-		} else
+		} else if (*partial_cluster > 0)
 			*partial_cluster = 0;
 
 		err = ext4_ext_dirty(handle, inode, path + depth);
@@ -2656,17 +2776,18 @@
 		err = ext4_ext_correct_indexes(handle, inode, path);
 
 	/*
-	 * If there is still a entry in the leaf node, check to see if
-	 * it references the partial cluster.  This is the only place
-	 * where it could; if it doesn't, we can free the cluster.
+	 * If there's a partial cluster and at least one extent remains in
+	 * the leaf, free the partial cluster if it isn't shared with the
+	 * current extent.  If there's a partial cluster and no extents
+	 * remain in the leaf, it can't be freed here.  It can only be
+	 * freed when it's possible to determine if it's not shared with
+	 * any other extent - when the next leaf is processed or when space
+	 * removal is complete.
 	 */
-	if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
+	if (*partial_cluster > 0 && eh->eh_entries &&
 	    (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
 	     *partial_cluster)) {
-		int flags = EXT4_FREE_BLOCKS_FORGET;
-
-		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
-			flags |= EXT4_FREE_BLOCKS_METADATA;
+		int flags = get_default_free_blocks_flags(inode);
 
 		ext4_free_blocks(handle, inode, NULL,
 				 EXT4_C2B(sbi, *partial_cluster),
@@ -2710,7 +2831,7 @@
 	struct super_block *sb = inode->i_sb;
 	int depth = ext_depth(inode);
 	struct ext4_ext_path *path = NULL;
-	ext4_fsblk_t partial_cluster = 0;
+	long long partial_cluster = 0;
 	handle_t *handle;
 	int i = 0, err = 0;
 
@@ -2722,7 +2843,7 @@
 		return PTR_ERR(handle);
 
 again:
-	trace_ext4_ext_remove_space(inode, start, depth);
+	trace_ext4_ext_remove_space(inode, start, end, depth);
 
 	/*
 	 * Check if we are removing extents inside the extent tree. If that
@@ -2736,7 +2857,7 @@
 		ext4_lblk_t ee_block;
 
 		/* find extent for this block */
-		path = ext4_ext_find_extent(inode, end, NULL);
+		path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
 		if (IS_ERR(path)) {
 			ext4_journal_stop(handle);
 			return PTR_ERR(path);
@@ -2764,23 +2885,14 @@
 		 */
 		if (end >= ee_block &&
 		    end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
-			int split_flag = 0;
-
-			if (ext4_ext_is_uninitialized(ex))
-				split_flag = EXT4_EXT_MARK_UNINIT1 |
-					     EXT4_EXT_MARK_UNINIT2;
-
 			/*
 			 * Split the extent in two so that 'end' is the last
 			 * block in the first new extent. Also we should not
 			 * fail removing space due to ENOSPC so try to use
 			 * reserved block if that happens.
 			 */
-			err = ext4_split_extent_at(handle, inode, path,
-					end + 1, split_flag,
-					EXT4_GET_BLOCKS_PRE_IO |
-					EXT4_GET_BLOCKS_METADATA_NOFAIL);
-
+			err = ext4_force_split_extent_at(handle, inode, &path,
+							 end + 1, 1);
 			if (err < 0)
 				goto out;
 		}
@@ -2802,11 +2914,11 @@
 			ext4_journal_stop(handle);
 			return -ENOMEM;
 		}
-		path[0].p_depth = depth;
+		path[0].p_maxdepth = path[0].p_depth = depth;
 		path[0].p_hdr = ext_inode_hdr(inode);
 		i = 0;
 
-		if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
+		if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
 			err = -EIO;
 			goto out;
 		}
@@ -2853,21 +2965,21 @@
 			ext_debug("move to level %d (block %llu)\n",
 				  i + 1, ext4_idx_pblock(path[i].p_idx));
 			memset(path + i + 1, 0, sizeof(*path));
-			bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
-			if (!bh) {
+			bh = read_extent_tree_block(inode,
+				ext4_idx_pblock(path[i].p_idx), depth - i - 1,
+				EXT4_EX_NOCACHE);
+			if (IS_ERR(bh)) {
 				/* should we reset i_size? */
-				err = -EIO;
+				err = PTR_ERR(bh);
 				break;
 			}
+			/* Yield here to deal with large extent trees.
+			 * Should be a no-op if we did IO above. */
+			cond_resched();
 			if (WARN_ON(i + 1 > depth)) {
 				err = -EIO;
 				break;
 			}
-			if (ext4_ext_check_block(inode, ext_block_hdr(bh),
-							depth - i - 1, bh)) {
-				err = -EIO;
-				break;
-			}
 			path[i + 1].p_bh = bh;
 
 			/* save actual number of indexes since this
@@ -2890,17 +3002,14 @@
 		}
 	}
 
-	trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
-			path->p_hdr->eh_entries);
+	trace_ext4_ext_remove_space_done(inode, start, end, depth,
+			partial_cluster, path->p_hdr->eh_entries);
 
 	/* If we still have something in the partial cluster and we have removed
 	 * even the first extent, then we should free the blocks in the partial
 	 * cluster as well. */
-	if (partial_cluster && path->p_hdr->eh_entries == 0) {
-		int flags = EXT4_FREE_BLOCKS_FORGET;
-
-		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
-			flags |= EXT4_FREE_BLOCKS_METADATA;
+	if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
+		int flags = get_default_free_blocks_flags(inode);
 
 		ext4_free_blocks(handle, inode, NULL,
 				 EXT4_C2B(EXT4_SB(sb), partial_cluster),
@@ -2925,10 +3034,9 @@
 out:
 	ext4_ext_drop_refs(path);
 	kfree(path);
-	if (err == -EAGAIN) {
-		path = NULL;
+	path = NULL;
+	if (err == -EAGAIN)
 		goto again;
-	}
 	ext4_journal_stop(handle);
 
 	return err;
@@ -2985,6 +3093,23 @@
 #endif
 }
 
+static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
+{
+	ext4_lblk_t  ee_block;
+	ext4_fsblk_t ee_pblock;
+	unsigned int ee_len;
+
+	ee_block  = le32_to_cpu(ex->ee_block);
+	ee_len    = ext4_ext_get_actual_len(ex);
+	ee_pblock = ext4_ext_pblock(ex);
+
+	if (ee_len == 0)
+		return 0;
+
+	return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
+				     EXTENT_STATUS_WRITTEN);
+}
+
 /* FIXME!! we need to try to merge to left or right after zero-out  */
 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
 {
@@ -2995,6 +3120,9 @@
 	ee_len    = ext4_ext_get_actual_len(ex);
 	ee_pblock = ext4_ext_pblock(ex);
 
+	if (ext4_encrypted_inode(inode))
+		return ext4_encrypted_zeroout(inode, ex);
+
 	ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
 	if (ret > 0)
 		ret = 0;
@@ -3010,7 +3138,7 @@
  * @path: the path to the extent
  * @split: the logical block where the extent is splitted.
  * @split_flags: indicates if the extent could be zeroout if split fails, and
- *		 the states(init or uninit) of new extents.
+ *		 the states(init or unwritten) of new extents.
  * @flags: flags used to insert new extent to extent tree.
  *
  *
@@ -3025,11 +3153,12 @@
  */
 static int ext4_split_extent_at(handle_t *handle,
 			     struct inode *inode,
-			     struct ext4_ext_path *path,
+			     struct ext4_ext_path **ppath,
 			     ext4_lblk_t split,
 			     int split_flag,
 			     int flags)
 {
+	struct ext4_ext_path *path = *ppath;
 	ext4_fsblk_t newblock;
 	ext4_lblk_t ee_block;
 	struct ext4_extent *ex, newex, orig_ex, zero_ex;
@@ -3052,10 +3181,10 @@
 	newblock = split - ee_block + ext4_ext_pblock(ex);
 
 	BUG_ON(split < ee_block || split >= (ee_block + ee_len));
-	BUG_ON(!ext4_ext_is_uninitialized(ex) &&
+	BUG_ON(!ext4_ext_is_unwritten(ex) &&
 	       split_flag & (EXT4_EXT_MAY_ZEROOUT |
-			     EXT4_EXT_MARK_UNINIT1 |
-			     EXT4_EXT_MARK_UNINIT2));
+			     EXT4_EXT_MARK_UNWRIT1 |
+			     EXT4_EXT_MARK_UNWRIT2));
 
 	err = ext4_ext_get_access(handle, inode, path + depth);
 	if (err)
@@ -3067,8 +3196,8 @@
 		 * then we just change the state of the extent, and splitting
 		 * is not needed.
 		 */
-		if (split_flag & EXT4_EXT_MARK_UNINIT2)
-			ext4_ext_mark_uninitialized(ex);
+		if (split_flag & EXT4_EXT_MARK_UNWRIT2)
+			ext4_ext_mark_unwritten(ex);
 		else
 			ext4_ext_mark_initialized(ex);
 
@@ -3082,8 +3211,8 @@
 	/* case a */
 	memcpy(&orig_ex, ex, sizeof(orig_ex));
 	ex->ee_len = cpu_to_le16(split - ee_block);
-	if (split_flag & EXT4_EXT_MARK_UNINIT1)
-		ext4_ext_mark_uninitialized(ex);
+	if (split_flag & EXT4_EXT_MARK_UNWRIT1)
+		ext4_ext_mark_unwritten(ex);
 
 	/*
 	 * path may lead to new leaf, not to original leaf any more
@@ -3097,10 +3226,10 @@
 	ex2->ee_block = cpu_to_le32(split);
 	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
 	ext4_ext_store_pblock(ex2, newblock);
-	if (split_flag & EXT4_EXT_MARK_UNINIT2)
-		ext4_ext_mark_uninitialized(ex2);
+	if (split_flag & EXT4_EXT_MARK_UNWRIT2)
+		ext4_ext_mark_unwritten(ex2);
 
-	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+	err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
 	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
 		if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
 			if (split_flag & EXT4_EXT_DATA_VALID1) {
@@ -3137,7 +3266,7 @@
 			goto fix_extent_len;
 
 		/* update extent status tree */
-		err = ext4_es_zeroout(inode, &zero_ex);
+		err = ext4_zeroout_es(inode, &zero_ex);
 
 		goto out;
 	} else if (err)
@@ -3149,7 +3278,7 @@
 
 fix_extent_len:
 	ex->ee_len = orig_ex.ee_len;
-	ext4_ext_dirty(handle, inode, path + depth);
+	ext4_ext_dirty(handle, inode, path + path->p_depth);
 	return err;
 }
 
@@ -3157,7 +3286,7 @@
  * ext4_split_extents() splits an extent and mark extent which is covered
  * by @map as split_flags indicates
  *
- * It may result in splitting the extent into multiple extents (upto three)
+ * It may result in splitting the extent into multiple extents (up to three)
  * There are three possibilities:
  *   a> There is no split required
  *   b> Splits in two extents: Split is happening at either end of the extent
@@ -3166,16 +3295,17 @@
  */
 static int ext4_split_extent(handle_t *handle,
 			      struct inode *inode,
-			      struct ext4_ext_path *path,
+			      struct ext4_ext_path **ppath,
 			      struct ext4_map_blocks *map,
 			      int split_flag,
 			      int flags)
 {
+	struct ext4_ext_path *path = *ppath;
 	ext4_lblk_t ee_block;
 	struct ext4_extent *ex;
 	unsigned int ee_len, depth;
 	int err = 0;
-	int uninitialized;
+	int unwritten;
 	int split_flag1, flags1;
 	int allocated = map->m_len;
 
@@ -3183,17 +3313,17 @@
 	ex = path[depth].p_ext;
 	ee_block = le32_to_cpu(ex->ee_block);
 	ee_len = ext4_ext_get_actual_len(ex);
-	uninitialized = ext4_ext_is_uninitialized(ex);
+	unwritten = ext4_ext_is_unwritten(ex);
 
 	if (map->m_lblk + map->m_len < ee_block + ee_len) {
 		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
 		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
-		if (uninitialized)
-			split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
-				       EXT4_EXT_MARK_UNINIT2;
+		if (unwritten)
+			split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
+				       EXT4_EXT_MARK_UNWRIT2;
 		if (split_flag & EXT4_EXT_DATA_VALID2)
 			split_flag1 |= EXT4_EXT_DATA_VALID1;
-		err = ext4_split_extent_at(handle, inode, path,
+		err = ext4_split_extent_at(handle, inode, ppath,
 				map->m_lblk + map->m_len, split_flag1, flags1);
 		if (err)
 			goto out;
@@ -3204,23 +3334,27 @@
 	 * Update path is required because previous ext4_split_extent_at() may
 	 * result in split of original leaf or extent zeroout.
 	 */
-	ext4_ext_drop_refs(path);
-	path = ext4_ext_find_extent(inode, map->m_lblk, path);
+	path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
 	if (IS_ERR(path))
 		return PTR_ERR(path);
 	depth = ext_depth(inode);
 	ex = path[depth].p_ext;
-	uninitialized = ext4_ext_is_uninitialized(ex);
+	if (!ex) {
+		EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+				 (unsigned long) map->m_lblk);
+		return -EIO;
+	}
+	unwritten = ext4_ext_is_unwritten(ex);
 	split_flag1 = 0;
 
 	if (map->m_lblk >= ee_block) {
 		split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
-		if (uninitialized) {
-			split_flag1 |= EXT4_EXT_MARK_UNINIT1;
+		if (unwritten) {
+			split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
 			split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
-						     EXT4_EXT_MARK_UNINIT2);
+						     EXT4_EXT_MARK_UNWRIT2);
 		}
-		err = ext4_split_extent_at(handle, inode, path,
+		err = ext4_split_extent_at(handle, inode, ppath,
 				map->m_lblk, split_flag1, flags);
 		if (err)
 			goto out;
@@ -3233,16 +3367,16 @@
 
 /*
  * This function is called by ext4_ext_map_blocks() if someone tries to write
- * to an uninitialized extent. It may result in splitting the uninitialized
+ * to an unwritten extent. It may result in splitting the unwritten
  * extent into multiple extents (up to three - one initialized and two
- * uninitialized).
+ * unwritten).
  * There are three possibilities:
  *   a> There is no split required: Entire extent should be initialized
  *   b> Splits in two extents: Write is happening at either end of the extent
  *   c> Splits in three extents: Somone is writing in middle of the extent
  *
  * Pre-conditions:
- *  - The extent pointed to by 'path' is uninitialized.
+ *  - The extent pointed to by 'path' is unwritten.
  *  - The extent pointed to by 'path' contains a superset
  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
  *
@@ -3254,9 +3388,10 @@
 static int ext4_ext_convert_to_initialized(handle_t *handle,
 					   struct inode *inode,
 					   struct ext4_map_blocks *map,
-					   struct ext4_ext_path *path,
+					   struct ext4_ext_path **ppath,
 					   int flags)
 {
+	struct ext4_ext_path *path = *ppath;
 	struct ext4_sb_info *sbi;
 	struct ext4_extent_header *eh;
 	struct ext4_map_blocks split_map;
@@ -3288,12 +3423,12 @@
 	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
 
 	/* Pre-conditions */
-	BUG_ON(!ext4_ext_is_uninitialized(ex));
+	BUG_ON(!ext4_ext_is_unwritten(ex));
 	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
 
 	/*
 	 * Attempt to transfer newly initialized blocks from the currently
-	 * uninitialized extent to its neighbor. This is much cheaper
+	 * unwritten extent to its neighbor. This is much cheaper
 	 * than an insertion followed by a merge as those involve costly
 	 * memmove() calls. Transferring to the left is the common case in
 	 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
@@ -3329,7 +3464,7 @@
 		 * - C4: abut_ex can receive the additional blocks without
 		 *   overflowing the (initialized) length limit.
 		 */
-		if ((!ext4_ext_is_uninitialized(abut_ex)) &&		/*C1*/
+		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
 			((prev_lblk + prev_len) == ee_block) &&		/*C2*/
 			((prev_pblk + prev_len) == ee_pblk) &&		/*C3*/
 			(prev_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
@@ -3344,7 +3479,7 @@
 			ex->ee_block = cpu_to_le32(ee_block + map_len);
 			ext4_ext_store_pblock(ex, ee_pblk + map_len);
 			ex->ee_len = cpu_to_le16(ee_len - map_len);
-			ext4_ext_mark_uninitialized(ex); /* Restore the flag */
+			ext4_ext_mark_unwritten(ex); /* Restore the flag */
 
 			/* Extend abut_ex by 'map_len' blocks */
 			abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
@@ -3375,7 +3510,7 @@
 		 * - C4: abut_ex can receive the additional blocks without
 		 *   overflowing the (initialized) length limit.
 		 */
-		if ((!ext4_ext_is_uninitialized(abut_ex)) &&		/*C1*/
+		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
 		    ((map->m_lblk + map_len) == next_lblk) &&		/*C2*/
 		    ((ee_pblk + ee_len) == next_pblk) &&		/*C3*/
 		    (next_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
@@ -3390,7 +3525,7 @@
 			abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
 			ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
 			ex->ee_len = cpu_to_le16(ee_len - map_len);
-			ext4_ext_mark_uninitialized(ex); /* Restore the flag */
+			ext4_ext_mark_unwritten(ex); /* Restore the flag */
 
 			/* Extend abut_ex by 'map_len' blocks */
 			abut_ex->ee_len = cpu_to_le16(next_len + map_len);
@@ -3412,7 +3547,7 @@
 	WARN_ON(map->m_lblk < ee_block);
 	/*
 	 * It is safe to convert extent to initialized via explicit
-	 * zeroout only if extent is fully insde i_size or new_size.
+	 * zeroout only if extent is fully inside i_size or new_size.
 	 */
 	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
 
@@ -3480,55 +3615,57 @@
 		}
 	}
 
-	allocated = ext4_split_extent(handle, inode, path,
-				      &split_map, split_flag, flags);
-	if (allocated < 0)
-		err = allocated;
-
+	err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
+				flags);
+	if (err > 0)
+		err = 0;
 out:
 	/* If we have gotten a failure, don't zero out status tree */
 	if (!err)
-		err = ext4_es_zeroout(inode, &zero_ex);
+		err = ext4_zeroout_es(inode, &zero_ex);
 	return err ? err : allocated;
 }
 
 /*
  * This function is called by ext4_ext_map_blocks() from
  * ext4_get_blocks_dio_write() when DIO to write
- * to an uninitialized extent.
+ * to an unwritten extent.
  *
- * Writing to an uninitialized extent may result in splitting the uninitialized
- * extent into multiple initialized/uninitialized extents (up to three)
+ * Writing to an unwritten extent may result in splitting the unwritten
+ * extent into multiple initialized/unwritten extents (up to three)
  * There are three possibilities:
- *   a> There is no split required: Entire extent should be uninitialized
+ *   a> There is no split required: Entire extent should be unwritten
  *   b> Splits in two extents: Write is happening at either end of the extent
  *   c> Splits in three extents: Somone is writing in middle of the extent
  *
+ * This works the same way in the case of initialized -> unwritten conversion.
+ *
  * One of more index blocks maybe needed if the extent tree grow after
- * the uninitialized extent split. To prevent ENOSPC occur at the IO
- * complete, we need to split the uninitialized extent before DIO submit
- * the IO. The uninitialized extent called at this time will be split
- * into three uninitialized extent(at most). After IO complete, the part
+ * the unwritten extent split. To prevent ENOSPC occur at the IO
+ * complete, we need to split the unwritten extent before DIO submit
+ * the IO. The unwritten extent called at this time will be split
+ * into three unwritten extent(at most). After IO complete, the part
  * being filled will be convert to initialized by the end_io callback function
  * via ext4_convert_unwritten_extents().
  *
- * Returns the size of uninitialized extent to be written on success.
+ * Returns the size of unwritten extent to be written on success.
  */
-static int ext4_split_unwritten_extents(handle_t *handle,
+static int ext4_split_convert_extents(handle_t *handle,
 					struct inode *inode,
 					struct ext4_map_blocks *map,
-					struct ext4_ext_path *path,
+					struct ext4_ext_path **ppath,
 					int flags)
 {
+	struct ext4_ext_path *path = *ppath;
 	ext4_lblk_t eof_block;
 	ext4_lblk_t ee_block;
 	struct ext4_extent *ex;
 	unsigned int ee_len;
 	int split_flag = 0, depth;
 
-	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
-		"block %llu, max_blocks %u\n", inode->i_ino,
-		(unsigned long long)map->m_lblk, map->m_len);
+	ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
+		  __func__, inode->i_ino,
+		  (unsigned long long)map->m_lblk, map->m_len);
 
 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
 		inode->i_sb->s_blocksize_bits;
@@ -3543,19 +3680,25 @@
 	ee_block = le32_to_cpu(ex->ee_block);
 	ee_len = ext4_ext_get_actual_len(ex);
 
-	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
-	split_flag |= EXT4_EXT_MARK_UNINIT2;
-	if (flags & EXT4_GET_BLOCKS_CONVERT)
-		split_flag |= EXT4_EXT_DATA_VALID2;
+	/* Convert to unwritten */
+	if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
+		split_flag |= EXT4_EXT_DATA_VALID1;
+	/* Convert to initialized */
+	} else if (flags & EXT4_GET_BLOCKS_CONVERT) {
+		split_flag |= ee_block + ee_len <= eof_block ?
+			      EXT4_EXT_MAY_ZEROOUT : 0;
+		split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
+	}
 	flags |= EXT4_GET_BLOCKS_PRE_IO;
-	return ext4_split_extent(handle, inode, path, map, split_flag, flags);
+	return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
 }
 
 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
 						struct inode *inode,
 						struct ext4_map_blocks *map,
-						struct ext4_ext_path *path)
+						struct ext4_ext_path **ppath)
 {
+	struct ext4_ext_path *path = *ppath;
 	struct ext4_extent *ex;
 	ext4_lblk_t ee_block;
 	unsigned int ee_len;
@@ -3584,16 +3727,13 @@
 			     inode->i_ino, (unsigned long long)ee_block, ee_len,
 			     (unsigned long long)map->m_lblk, map->m_len);
 #endif
-		err = ext4_split_unwritten_extents(handle, inode, map, path,
-						   EXT4_GET_BLOCKS_CONVERT);
+		err = ext4_split_convert_extents(handle, inode, map, ppath,
+						 EXT4_GET_BLOCKS_CONVERT);
 		if (err < 0)
-			goto out;
-		ext4_ext_drop_refs(path);
-		path = ext4_ext_find_extent(inode, map->m_lblk, path);
-		if (IS_ERR(path)) {
-			err = PTR_ERR(path);
-			goto out;
-		}
+			return err;
+		path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+		if (IS_ERR(path))
+			return PTR_ERR(path);
 		depth = ext_depth(inode);
 		ex = path[depth].p_ext;
 	}
@@ -3786,34 +3926,109 @@
 }
 
 static int
-ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
+convert_initialized_extent(handle_t *handle, struct inode *inode,
+			   struct ext4_map_blocks *map,
+			   struct ext4_ext_path **ppath, int flags,
+			   unsigned int allocated, ext4_fsblk_t newblock)
+{
+	struct ext4_ext_path *path = *ppath;
+	struct ext4_extent *ex;
+	ext4_lblk_t ee_block;
+	unsigned int ee_len;
+	int depth;
+	int err = 0;
+
+	/*
+	 * Make sure that the extent is no bigger than we support with
+	 * unwritten extent
+	 */
+	if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
+		map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
+
+	depth = ext_depth(inode);
+	ex = path[depth].p_ext;
+	ee_block = le32_to_cpu(ex->ee_block);
+	ee_len = ext4_ext_get_actual_len(ex);
+
+	ext_debug("%s: inode %lu, logical"
+		"block %llu, max_blocks %u\n", __func__, inode->i_ino,
+		  (unsigned long long)ee_block, ee_len);
+
+	if (ee_block != map->m_lblk || ee_len > map->m_len) {
+		err = ext4_split_convert_extents(handle, inode, map, ppath,
+				EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
+		if (err < 0)
+			return err;
+		path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+		if (IS_ERR(path))
+			return PTR_ERR(path);
+		depth = ext_depth(inode);
+		ex = path[depth].p_ext;
+		if (!ex) {
+			EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+					 (unsigned long) map->m_lblk);
+			return -EIO;
+		}
+	}
+
+	err = ext4_ext_get_access(handle, inode, path + depth);
+	if (err)
+		return err;
+	/* first mark the extent as unwritten */
+	ext4_ext_mark_unwritten(ex);
+
+	/* note: ext4_ext_correct_indexes() isn't needed here because
+	 * borders are not changed
+	 */
+	ext4_ext_try_to_merge(handle, inode, path, ex);
+
+	/* Mark modified extent as dirty */
+	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+	if (err)
+		return err;
+	ext4_ext_show_leaf(inode, path);
+
+	ext4_update_inode_fsync_trans(handle, inode, 1);
+	err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
+	if (err)
+		return err;
+	map->m_flags |= EXT4_MAP_UNWRITTEN;
+	if (allocated > map->m_len)
+		allocated = map->m_len;
+	map->m_len = allocated;
+	return allocated;
+}
+
+static int
+ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
 			struct ext4_map_blocks *map,
-			struct ext4_ext_path *path, int flags,
+			struct ext4_ext_path **ppath, int flags,
 			unsigned int allocated, ext4_fsblk_t newblock)
 {
+	struct ext4_ext_path *path = *ppath;
 	int ret = 0;
 	int err = 0;
 	ext4_io_end_t *io = ext4_inode_aio(inode);
 
-	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
+	ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
 		  "block %llu, max_blocks %u, flags %x, allocated %u\n",
 		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
 		  flags, allocated);
 	ext4_ext_show_leaf(inode, path);
 
 	/*
-	 * When writing into uninitialized space, we should not fail to
+	 * When writing into unwritten space, we should not fail to
 	 * allocate metadata blocks for the new extent block if needed.
 	 */
 	flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
 
-	trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
+	trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
 						    allocated, newblock);
 
 	/* get_block() before submit the IO, split the extent */
-	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
-		ret = ext4_split_unwritten_extents(handle, inode, map,
-						   path, flags);
+	if (flags & EXT4_GET_BLOCKS_PRE_IO) {
+		ret = ext4_split_convert_extents(handle, inode, map, ppath,
+					 flags | EXT4_GET_BLOCKS_CONVERT);
 		if (ret <= 0)
 			goto out;
 		/*
@@ -3826,14 +4041,12 @@
 		else
 			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
 		map->m_flags |= EXT4_MAP_UNWRITTEN;
-		if (ext4_should_dioread_nolock(inode))
-			map->m_flags |= EXT4_MAP_UNINIT;
 		goto out;
 	}
 	/* IO end_io complete, convert the filled extent to written */
-	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
+	if (flags & EXT4_GET_BLOCKS_CONVERT) {
 		ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
-							path);
+							   ppath);
 		if (ret >= 0) {
 			ext4_update_inode_fsync_trans(handle, inode, 1);
 			err = check_eofblocks_fl(handle, inode, map->m_lblk,
@@ -3841,6 +4054,7 @@
 		} else
 			err = ret;
 		map->m_flags |= EXT4_MAP_MAPPED;
+		map->m_pblk = newblock;
 		if (allocated > map->m_len)
 			allocated = map->m_len;
 		map->m_len = allocated;
@@ -3851,7 +4065,7 @@
 	 * repeat fallocate creation request
 	 * we already have an unwritten extent
 	 */
-	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
+	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
 		map->m_flags |= EXT4_MAP_UNWRITTEN;
 		goto map_out;
 	}
@@ -3870,7 +4084,7 @@
 	}
 
 	/* buffered write, writepage time, convert*/
-	ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
+	ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
 	if (ret >= 0)
 		ext4_update_inode_fsync_trans(handle, inode, 1);
 out:
@@ -3927,10 +4141,6 @@
 	map->m_pblk = newblock;
 	map->m_len = allocated;
 out2:
-	if (path) {
-		ext4_ext_drop_refs(path);
-		kfree(path);
-	}
 	return err ? err : allocated;
 }
 
@@ -4075,7 +4285,7 @@
 	trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
 
 	/* find extent for this block */
-	path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
+	path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
 	if (IS_ERR(path)) {
 		err = PTR_ERR(path);
 		path = NULL;
@@ -4087,7 +4297,7 @@
 	/*
 	 * consistent leaf must not be empty;
 	 * this situation is possible, though, _during_ tree modification;
-	 * this is why assert can't be put in ext4_ext_find_extent()
+	 * this is why assert can't be put in ext4_find_extent()
 	 */
 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
 		EXT4_ERROR_INODE(inode, "bad extent address "
@@ -4104,8 +4314,9 @@
 		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
 		unsigned short ee_len;
 
+
 		/*
-		 * Uninitialized extents are treated as holes, except that
+		 * unwritten extents are treated as holes, except that
 		 * we split out initialized portions during a write.
 		 */
 		ee_len = ext4_ext_get_actual_len(ex);
@@ -4120,17 +4331,27 @@
 			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
 				  ee_block, ee_len, newblock);
 
-			if (!ext4_ext_is_uninitialized(ex))
+			/*
+			 * If the extent is initialized check whether the
+			 * caller wants to convert it to unwritten.
+			 */
+			if ((!ext4_ext_is_unwritten(ex)) &&
+			    (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
+				allocated = convert_initialized_extent(
+						handle, inode, map, &path,
+						flags, allocated, newblock);
+				goto out2;
+			} else if (!ext4_ext_is_unwritten(ex))
 				goto out;
 
-			ret = ext4_ext_handle_uninitialized_extents(
-				handle, inode, map, path, flags,
+			ret = ext4_ext_handle_unwritten_extents(
+				handle, inode, map, &path, flags,
 				allocated, newblock);
 			if (ret < 0)
 				err = ret;
 			else
 				allocated = ret;
-			goto out3;
+			goto out2;
 		}
 	}
 
@@ -4161,7 +4382,7 @@
 
 	/*
 	 * If we are doing bigalloc, check to see if the extent returned
-	 * by ext4_ext_find_extent() implies a cluster we can use.
+	 * by ext4_find_extent() implies a cluster we can use.
 	 */
 	if (cluster_offset && ex &&
 	    get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
@@ -4195,15 +4416,15 @@
 	/*
 	 * See if request is beyond maximum number of blocks we can have in
 	 * a single extent. For an initialized extent this limit is
-	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
-	 * EXT_UNINIT_MAX_LEN.
+	 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
+	 * EXT_UNWRITTEN_MAX_LEN.
 	 */
 	if (map->m_len > EXT_INIT_MAX_LEN &&
-	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
+	    !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
 		map->m_len = EXT_INIT_MAX_LEN;
-	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
-		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
-		map->m_len = EXT_UNINIT_MAX_LEN;
+	else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
+		 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
+		map->m_len = EXT_UNWRITTEN_MAX_LEN;
 
 	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
 	newex.ee_len = cpu_to_le16(map->m_len);
@@ -4236,6 +4457,8 @@
 		ar.flags = 0;
 	if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
 		ar.flags |= EXT4_MB_HINT_NOPREALLOC;
+	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+		ar.flags |= EXT4_MB_DELALLOC_RESERVED;
 	newblock = ext4_mb_new_blocks(handle, &ar, &err);
 	if (!newblock)
 		goto out2;
@@ -4251,21 +4474,19 @@
 	/* try to insert new extent into found leaf and return */
 	ext4_ext_store_pblock(&newex, newblock + offset);
 	newex.ee_len = cpu_to_le16(ar.len);
-	/* Mark uninitialized */
-	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
-		ext4_ext_mark_uninitialized(&newex);
+	/* Mark unwritten */
+	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
+		ext4_ext_mark_unwritten(&newex);
 		map->m_flags |= EXT4_MAP_UNWRITTEN;
 		/*
 		 * io_end structure was created for every IO write to an
-		 * uninitialized extent. To avoid unnecessary conversion,
+		 * unwritten extent. To avoid unnecessary conversion,
 		 * here we flag the IO that really needs the conversion.
 		 * For non asycn direct IO case, flag the inode state
 		 * that we need to perform conversion when IO is done.
 		 */
-		if ((flags & EXT4_GET_BLOCKS_PRE_IO))
+		if (flags & EXT4_GET_BLOCKS_PRE_IO)
 			set_unwritten = 1;
-		if (ext4_should_dioread_nolock(inode))
-			map->m_flags |= EXT4_MAP_UNINIT;
 	}
 
 	err = 0;
@@ -4273,7 +4494,7 @@
 		err = check_eofblocks_fl(handle, inode, map->m_lblk,
 					 path, ar.len);
 	if (!err)
-		err = ext4_ext_insert_extent(handle, inode, path,
+		err = ext4_ext_insert_extent(handle, inode, &path,
 					     &newex, flags);
 
 	if (!err && set_unwritten) {
@@ -4291,8 +4512,8 @@
 		/* not a good idea to call discard here directly,
 		 * but otherwise we'd need to call it every free() */
 		ext4_discard_preallocations(inode);
-		ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
-				 ext4_ext_get_actual_len(&newex), fb_flags);
+		ext4_free_blocks(handle, inode, NULL, newblock,
+				 EXT4_C2B(sbi, allocated_clusters), fb_flags);
 		goto out2;
 	}
 
@@ -4392,9 +4613,9 @@
 
 	/*
 	 * Cache the extent and update transaction to commit on fdatasync only
-	 * when it is _not_ an uninitialized extent.
+	 * when it is _not_ an unwritten extent.
 	 */
-	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
+	if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
 		ext4_update_inode_fsync_trans(handle, inode, 1);
 	else
 		ext4_update_inode_fsync_trans(handle, inode, 0);
@@ -4406,14 +4627,12 @@
 	map->m_pblk = newblock;
 	map->m_len = allocated;
 out2:
-	if (path) {
-		ext4_ext_drop_refs(path);
-		kfree(path);
-	}
+	ext4_ext_drop_refs(path);
+	kfree(path);
 
-out3:
-	trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated);
-
+	trace_ext4_ext_map_blocks_exit(inode, flags, map,
+				       err ? err : allocated);
+	ext4_es_lru_add(inode);
 	return err ? err : allocated;
 }
 
@@ -4451,34 +4670,240 @@
 	ext4_std_error(inode->i_sb, err);
 }
 
-static void ext4_falloc_update_inode(struct inode *inode,
-				int mode, loff_t new_size, int update_ctime)
+static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
+				  ext4_lblk_t len, loff_t new_size,
+				  int flags, int mode)
 {
-	struct timespec now;
+	struct inode *inode = file_inode(file);
+	handle_t *handle;
+	int ret = 0;
+	int ret2 = 0;
+	int retries = 0;
+	struct ext4_map_blocks map;
+	unsigned int credits;
+	loff_t epos;
 
-	if (update_ctime) {
-		now = current_fs_time(inode->i_sb);
-		if (!timespec_equal(&inode->i_ctime, &now))
-			inode->i_ctime = now;
-	}
+	map.m_lblk = offset;
+	map.m_len = len;
 	/*
-	 * Update only when preallocation was requested beyond
-	 * the file size.
+	 * Don't normalize the request if it can fit in one extent so
+	 * that it doesn't get unnecessarily split into multiple
+	 * extents.
 	 */
-	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
-		if (new_size > i_size_read(inode))
-			i_size_write(inode, new_size);
-		if (new_size > EXT4_I(inode)->i_disksize)
-			ext4_update_i_disksize(inode, new_size);
+	if (len <= EXT_UNWRITTEN_MAX_LEN)
+		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
+
+	/*
+	 * credits to insert 1 extent into extent tree
+	 */
+	credits = ext4_chunk_trans_blocks(inode, len);
+
+retry:
+	while (ret >= 0 && len) {
+		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+					    credits);
+		if (IS_ERR(handle)) {
+			ret = PTR_ERR(handle);
+			break;
+		}
+		ret = ext4_map_blocks(handle, inode, &map, flags);
+		if (ret <= 0) {
+			ext4_debug("inode #%lu: block %u: len %u: "
+				   "ext4_ext_map_blocks returned %d",
+				   inode->i_ino, map.m_lblk,
+				   map.m_len, ret);
+			ext4_mark_inode_dirty(handle, inode);
+			ret2 = ext4_journal_stop(handle);
+			break;
+		}
+		map.m_lblk += ret;
+		map.m_len = len = len - ret;
+		epos = (loff_t)map.m_lblk << inode->i_blkbits;
+		inode->i_ctime = ext4_current_time(inode);
+		if (new_size) {
+			if (epos > new_size)
+				epos = new_size;
+			if (ext4_update_inode_size(inode, epos) & 0x1)
+				inode->i_mtime = inode->i_ctime;
+		} else {
+			if (epos > inode->i_size)
+				ext4_set_inode_flag(inode,
+						    EXT4_INODE_EOFBLOCKS);
+		}
+		ext4_mark_inode_dirty(handle, inode);
+		ret2 = ext4_journal_stop(handle);
+		if (ret2)
+			break;
+	}
+	if (ret == -ENOSPC &&
+			ext4_should_retry_alloc(inode->i_sb, &retries)) {
+		ret = 0;
+		goto retry;
+	}
+
+	return ret > 0 ? ret2 : ret;
+}
+
+static long ext4_zero_range(struct file *file, loff_t offset,
+			    loff_t len, int mode)
+{
+	struct inode *inode = file_inode(file);
+	handle_t *handle = NULL;
+	unsigned int max_blocks;
+	loff_t new_size = 0;
+	int ret = 0;
+	int flags;
+	int credits;
+	int partial_begin, partial_end;
+	loff_t start, end;
+	ext4_lblk_t lblk;
+	struct address_space *mapping = inode->i_mapping;
+	unsigned int blkbits = inode->i_blkbits;
+
+	trace_ext4_zero_range(inode, offset, len, mode);
+
+	if (!S_ISREG(inode->i_mode))
+		return -EINVAL;
+
+	/* Call ext4_force_commit to flush all data in case of data=journal. */
+	if (ext4_should_journal_data(inode)) {
+		ret = ext4_force_commit(inode->i_sb);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Write out all dirty pages to avoid race conditions
+	 * Then release them.
+	 */
+	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+		ret = filemap_write_and_wait_range(mapping, offset,
+						   offset + len - 1);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Round up offset. This is not fallocate, we neet to zero out
+	 * blocks, so convert interior block aligned part of the range to
+	 * unwritten and possibly manually zero out unaligned parts of the
+	 * range.
+	 */
+	start = round_up(offset, 1 << blkbits);
+	end = round_down((offset + len), 1 << blkbits);
+
+	if (start < offset || end > offset + len)
+		return -EINVAL;
+	partial_begin = offset & ((1 << blkbits) - 1);
+	partial_end = (offset + len) & ((1 << blkbits) - 1);
+
+	lblk = start >> blkbits;
+	max_blocks = (end >> blkbits);
+	if (max_blocks < lblk)
+		max_blocks = 0;
+	else
+		max_blocks -= lblk;
+
+	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
+		EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+		EXT4_EX_NOCACHE;
+	if (mode & FALLOC_FL_KEEP_SIZE)
+		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
+
+	mutex_lock(&inode->i_mutex);
+
+	/*
+	 * Indirect files do not support unwritten extnets
+	 */
+	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+		ret = -EOPNOTSUPP;
+		goto out_mutex;
+	}
+
+	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+	     offset + len > i_size_read(inode)) {
+		new_size = offset + len;
+		ret = inode_newsize_ok(inode, new_size);
+		if (ret)
+			goto out_mutex;
+		/*
+		 * If we have a partial block after EOF we have to allocate
+		 * the entire block.
+		 */
+		if (partial_end)
+			max_blocks += 1;
+	}
+
+	if (max_blocks > 0) {
+
+		/* Now release the pages and zero block aligned part of pages*/
+		truncate_pagecache_range(inode, start, end - 1);
+		inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+
+		/* Wait all existing dio workers, newcomers will block on i_mutex */
+		ext4_inode_block_unlocked_dio(inode);
+		inode_dio_wait(inode);
+
+		ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+					     flags, mode);
+		if (ret)
+			goto out_dio;
+		/*
+		 * Remove entire range from the extent status tree.
+		 *
+		 * ext4_es_remove_extent(inode, lblk, max_blocks) is
+		 * NOT sufficient.  I'm not sure why this is the case,
+		 * but let's be conservative and remove the extent
+		 * status tree for the entire inode.  There should be
+		 * no outstanding delalloc extents thanks to the
+		 * filemap_write_and_wait_range() call above.
+		 */
+		ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+		if (ret)
+			goto out_dio;
+	}
+	if (!partial_begin && !partial_end)
+		goto out_dio;
+
+	/*
+	 * In worst case we have to writeout two nonadjacent unwritten
+	 * blocks and update the inode
+	 */
+	credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
+	if (ext4_should_journal_data(inode))
+		credits += 2;
+	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		ext4_std_error(inode->i_sb, ret);
+		goto out_dio;
+	}
+
+	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+	if (new_size) {
+		ext4_update_inode_size(inode, new_size);
 	} else {
 		/*
-		 * Mark that we allocate beyond EOF so the subsequent truncate
-		 * can proceed even if the new size is the same as i_size.
-		 */
-		if (new_size > i_size_read(inode))
+		* Mark that we allocate beyond EOF so the subsequent truncate
+		* can proceed even if the new size is the same as i_size.
+		*/
+		if ((offset + len) > i_size_read(inode))
 			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
 	}
+	ext4_mark_inode_dirty(handle, inode);
 
+	/* Zero out partial block at the edges of the range */
+	ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+
+	if (file->f_flags & O_SYNC)
+		ext4_handle_sync(handle);
+
+	ext4_journal_stop(handle);
+out_dio:
+	ext4_inode_resume_unlocked_dio(inode);
+out_mutex:
+	mutex_unlock(&inode->i_mutex);
+	return ret;
 }
 
 /*
@@ -4491,22 +4916,34 @@
 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 {
 	struct inode *inode = file_inode(file);
-	handle_t *handle;
-	loff_t new_size;
+	loff_t new_size = 0;
 	unsigned int max_blocks;
 	int ret = 0;
-	int ret2 = 0;
-	int retries = 0;
 	int flags;
-	struct ext4_map_blocks map;
-	unsigned int credits, blkbits = inode->i_blkbits;
+	ext4_lblk_t lblk;
+	unsigned int blkbits = inode->i_blkbits;
+
+	/*
+	 * Encrypted inodes can't handle collapse range or insert
+	 * range since we would need to re-encrypt blocks with a
+	 * different IV or XTS tweak (which are based on the logical
+	 * block number).
+	 *
+	 * XXX It's not clear why zero range isn't working, but we'll
+	 * leave it disabled for encrypted inodes for now.  This is a
+	 * bug we should fix....
+	 */
+	if (ext4_encrypted_inode(inode) &&
+	    (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)))
+		return -EOPNOTSUPP;
 
 	/* Return error if mode is not supported */
-	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+		     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
 		return -EOPNOTSUPP;
 
 	if (mode & FALLOC_FL_PUNCH_HOLE)
-		return ext4_punch_hole(file, offset, len);
+		return ext4_punch_hole(inode, offset, len);
 
 	ret = ext4_convert_inline_data(inode);
 	if (ret)
@@ -4519,83 +4956,48 @@
 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 		return -EOPNOTSUPP;
 
+	if (mode & FALLOC_FL_COLLAPSE_RANGE)
+		return ext4_collapse_range(inode, offset, len);
+
+	if (mode & FALLOC_FL_ZERO_RANGE)
+		return ext4_zero_range(file, offset, len, mode);
+
 	trace_ext4_fallocate_enter(inode, offset, len, mode);
-	map.m_lblk = offset >> blkbits;
+	lblk = offset >> blkbits;
 	/*
 	 * We can't just convert len to max_blocks because
 	 * If blocksize = 4096 offset = 3072 and len = 2048
 	 */
 	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
-		- map.m_lblk;
-	/*
-	 * credits to insert 1 extent into extent tree
-	 */
-	credits = ext4_chunk_trans_blocks(inode, max_blocks);
-	mutex_lock(&inode->i_mutex);
-	ret = inode_newsize_ok(inode, (len + offset));
-	if (ret) {
-		mutex_unlock(&inode->i_mutex);
-		trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
-		return ret;
-	}
-	flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
+		- lblk;
+
+	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
 	if (mode & FALLOC_FL_KEEP_SIZE)
 		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
-	/*
-	 * Don't normalize the request if it can fit in one extent so
-	 * that it doesn't get unnecessarily split into multiple
-	 * extents.
-	 */
-	if (len <= EXT_UNINIT_MAX_LEN << blkbits)
-		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
 
-retry:
-	while (ret >= 0 && ret < max_blocks) {
-		map.m_lblk = map.m_lblk + ret;
-		map.m_len = max_blocks = max_blocks - ret;
-		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
-					    credits);
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
-			break;
-		}
-		ret = ext4_map_blocks(handle, inode, &map, flags);
-		if (ret <= 0) {
-#ifdef EXT4FS_DEBUG
-			ext4_warning(inode->i_sb,
-				     "inode #%lu: block %u: len %u: "
-				     "ext4_ext_map_blocks returned %d",
-				     inode->i_ino, map.m_lblk,
-				     map.m_len, ret);
-#endif
-			ext4_mark_inode_dirty(handle, inode);
-			ret2 = ext4_journal_stop(handle);
-			break;
-		}
-		if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
-						blkbits) >> blkbits))
-			new_size = offset + len;
-		else
-			new_size = ((loff_t) map.m_lblk + ret) << blkbits;
+	mutex_lock(&inode->i_mutex);
 
-		ext4_falloc_update_inode(inode, mode, new_size,
-					 (map.m_flags & EXT4_MAP_NEW));
-		ext4_mark_inode_dirty(handle, inode);
-		if ((file->f_flags & O_SYNC) && ret >= max_blocks)
-			ext4_handle_sync(handle);
-		ret2 = ext4_journal_stop(handle);
-		if (ret2)
-			break;
+	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+	     offset + len > i_size_read(inode)) {
+		new_size = offset + len;
+		ret = inode_newsize_ok(inode, new_size);
+		if (ret)
+			goto out;
 	}
-	if (ret == -ENOSPC &&
-			ext4_should_retry_alloc(inode->i_sb, &retries)) {
-		ret = 0;
-		goto retry;
+
+	ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+				     flags, mode);
+	if (ret)
+		goto out;
+
+	if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
+		ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
+						EXT4_I(inode)->i_sync_tid);
 	}
+out:
 	mutex_unlock(&inode->i_mutex);
-	trace_ext4_fallocate_exit(inode, offset, max_blocks,
-				ret > 0 ? ret2 : ret);
-	return ret > 0 ? ret2 : ret;
+	trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
+	return ret;
 }
 
 /*
@@ -4608,10 +5010,9 @@
  * function, to convert the fallocated extents after IO is completed.
  * Returns 0 on success.
  */
-int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
-				    ssize_t len)
+int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
+				   loff_t offset, ssize_t len)
 {
-	handle_t *handle;
 	unsigned int max_blocks;
 	int ret = 0;
 	int ret2 = 0;
@@ -4626,16 +5027,32 @@
 	max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
 		      map.m_lblk);
 	/*
-	 * credits to insert 1 extent into extent tree
+	 * This is somewhat ugly but the idea is clear: When transaction is
+	 * reserved, everything goes into it. Otherwise we rather start several
+	 * smaller transactions for conversion of each extent separately.
 	 */
-	credits = ext4_chunk_trans_blocks(inode, max_blocks);
+	if (handle) {
+		handle = ext4_journal_start_reserved(handle,
+						     EXT4_HT_EXT_CONVERT);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+		credits = 0;
+	} else {
+		/*
+		 * credits to insert 1 extent into extent tree
+		 */
+		credits = ext4_chunk_trans_blocks(inode, max_blocks);
+	}
 	while (ret >= 0 && ret < max_blocks) {
 		map.m_lblk += ret;
 		map.m_len = (max_blocks -= ret);
-		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
-			break;
+		if (credits) {
+			handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+						    credits);
+			if (IS_ERR(handle)) {
+				ret = PTR_ERR(handle);
+				break;
+			}
 		}
 		ret = ext4_map_blocks(handle, inode, &map,
 				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
@@ -4646,10 +5063,13 @@
 				     inode->i_ino, map.m_lblk,
 				     map.m_len, ret);
 		ext4_mark_inode_dirty(handle, inode);
-		ret2 = ext4_journal_stop(handle);
-		if (ret <= 0 || ret2 )
+		if (credits)
+			ret2 = ext4_journal_stop(handle);
+		if (ret <= 0 || ret2)
 			break;
 	}
+	if (!credits)
+		ret2 = ext4_journal_stop(handle);
 	return ret > 0 ? ret2 : ret;
 }
 
@@ -4752,6 +5172,12 @@
 			return error;
 	}
 
+	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
+		error = ext4_ext_precache(inode);
+		if (error)
+			return error;
+	}
+
 	/* fallback to generic here if not in extents fmt */
 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 		return generic_block_fiemap(inode, fieinfo, start, len,
@@ -4779,6 +5205,519 @@
 		error = ext4_fill_fiemap_extents(inode, start_blk,
 						 len_blks, fieinfo);
 	}
-
+	ext4_es_lru_add(inode);
 	return error;
 }
+
+/*
+ * ext4_access_path:
+ * Function to access the path buffer for marking it dirty.
+ * It also checks if there are sufficient credits left in the journal handle
+ * to update path.
+ */
+static int
+ext4_access_path(handle_t *handle, struct inode *inode,
+		struct ext4_ext_path *path)
+{
+	int credits, err;
+
+	if (!ext4_handle_valid(handle))
+		return 0;
+
+	/*
+	 * Check if need to extend journal credits
+	 * 3 for leaf, sb, and inode plus 2 (bmap and group
+	 * descriptor) for each block group; assume two block
+	 * groups
+	 */
+	if (handle->h_buffer_credits < 7) {
+		credits = ext4_writepage_trans_blocks(inode);
+		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
+		/* EAGAIN is success */
+		if (err && err != -EAGAIN)
+			return err;
+	}
+
+	err = ext4_ext_get_access(handle, inode, path);
+	return err;
+}
+
+/*
+ * ext4_ext_shift_path_extents:
+ * Shift the extents of a path structure lying between path[depth].p_ext
+ * and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift
+ * from starting block for each extent.
+ */
+static int
+ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
+			    struct inode *inode, handle_t *handle,
+			    ext4_lblk_t *start)
+{
+	int depth, err = 0;
+	struct ext4_extent *ex_start, *ex_last;
+	bool update = 0;
+	depth = path->p_depth;
+
+	while (depth >= 0) {
+		if (depth == path->p_depth) {
+			ex_start = path[depth].p_ext;
+			if (!ex_start)
+				return -EIO;
+
+			ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
+			if (!ex_last)
+				return -EIO;
+
+			err = ext4_access_path(handle, inode, path + depth);
+			if (err)
+				goto out;
+
+			if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
+				update = 1;
+
+			*start = le32_to_cpu(ex_last->ee_block) +
+				ext4_ext_get_actual_len(ex_last);
+
+			while (ex_start <= ex_last) {
+				le32_add_cpu(&ex_start->ee_block, -shift);
+				/* Try to merge to the left. */
+				if ((ex_start >
+				     EXT_FIRST_EXTENT(path[depth].p_hdr)) &&
+				    ext4_ext_try_to_merge_right(inode,
+							path, ex_start - 1))
+					ex_last--;
+				else
+					ex_start++;
+			}
+			err = ext4_ext_dirty(handle, inode, path + depth);
+			if (err)
+				goto out;
+
+			if (--depth < 0 || !update)
+				break;
+		}
+
+		/* Update index too */
+		err = ext4_access_path(handle, inode, path + depth);
+		if (err)
+			goto out;
+
+		le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
+		err = ext4_ext_dirty(handle, inode, path + depth);
+		if (err)
+			goto out;
+
+		/* we are done if current index is not a starting index */
+		if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
+			break;
+
+		depth--;
+	}
+
+out:
+	return err;
+}
+
+/*
+ * ext4_ext_shift_extents:
+ * All the extents which lies in the range from start to the last allocated
+ * block for the file are shifted downwards by shift blocks.
+ * On success, 0 is returned, error otherwise.
+ */
+static int
+ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
+		       ext4_lblk_t start, ext4_lblk_t shift)
+{
+	struct ext4_ext_path *path;
+	int ret = 0, depth;
+	struct ext4_extent *extent;
+	ext4_lblk_t stop_block;
+	ext4_lblk_t ex_start, ex_end;
+
+	/* Let path point to the last extent */
+	path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
+	if (IS_ERR(path))
+		return PTR_ERR(path);
+
+	depth = path->p_depth;
+	extent = path[depth].p_ext;
+	if (!extent)
+		goto out;
+
+	stop_block = le32_to_cpu(extent->ee_block) +
+			ext4_ext_get_actual_len(extent);
+
+	/* Nothing to shift, if hole is at the end of file */
+	if (start >= stop_block)
+		goto out;
+
+	/*
+	 * Don't start shifting extents until we make sure the hole is big
+	 * enough to accomodate the shift.
+	 */
+	path = ext4_find_extent(inode, start - 1, &path, 0);
+	if (IS_ERR(path))
+		return PTR_ERR(path);
+	depth = path->p_depth;
+	extent =  path[depth].p_ext;
+	if (extent) {
+		ex_start = le32_to_cpu(extent->ee_block);
+		ex_end = le32_to_cpu(extent->ee_block) +
+			ext4_ext_get_actual_len(extent);
+	} else {
+		ex_start = 0;
+		ex_end = 0;
+	}
+
+	if ((start == ex_start && shift > ex_start) ||
+	    (shift > start - ex_end))
+		return -EINVAL;
+
+	/* Its safe to start updating extents */
+	while (start < stop_block) {
+		path = ext4_find_extent(inode, start, &path, 0);
+		if (IS_ERR(path))
+			return PTR_ERR(path);
+		depth = path->p_depth;
+		extent = path[depth].p_ext;
+		if (!extent) {
+			EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+					 (unsigned long) start);
+			return -EIO;
+		}
+		if (start > le32_to_cpu(extent->ee_block)) {
+			/* Hole, move to the next extent */
+			if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
+				path[depth].p_ext++;
+			} else {
+				start = ext4_ext_next_allocated_block(path);
+				continue;
+			}
+		}
+		ret = ext4_ext_shift_path_extents(path, shift, inode,
+				handle, &start);
+		if (ret)
+			break;
+	}
+out:
+	ext4_ext_drop_refs(path);
+	kfree(path);
+	return ret;
+}
+
+/*
+ * ext4_collapse_range:
+ * This implements the fallocate's collapse range functionality for ext4
+ * Returns: 0 and non-zero on error.
+ */
+int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+{
+	struct super_block *sb = inode->i_sb;
+	ext4_lblk_t punch_start, punch_stop;
+	handle_t *handle;
+	unsigned int credits;
+	loff_t new_size, ioffset;
+	loff_t oldsize = i_size_read(inode);
+	int ret;
+
+	/* Collapse range works only on fs block size aligned offsets. */
+	if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
+	    len & (EXT4_CLUSTER_SIZE(sb) - 1))
+		return -EINVAL;
+
+	if (!S_ISREG(inode->i_mode))
+		return -EINVAL;
+
+	trace_ext4_collapse_range(inode, offset, len);
+
+	punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+	punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
+
+	/* Call ext4_force_commit to flush all data in case of data=journal. */
+	if (ext4_should_journal_data(inode)) {
+		ret = ext4_force_commit(inode->i_sb);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Need to round down offset to be aligned with page size boundary
+	 * for page size > block size.
+	 */
+	ioffset = round_down(offset, PAGE_SIZE);
+
+	/* Write out all dirty pages */
+	ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+					   LLONG_MAX);
+	if (ret)
+		return ret;
+
+	/* Take mutex lock */
+	mutex_lock(&inode->i_mutex);
+
+	/*
+	 * There is no need to overlap collapse range with EOF, in which case
+	 * it is effectively a truncate operation
+	 */
+	if (offset + len >= oldsize) {
+		ret = -EINVAL;
+		goto out_mutex;
+	}
+
+	/* Currently just for extent based files */
+	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+		ret = -EOPNOTSUPP;
+		goto out_mutex;
+	}
+
+	truncate_pagecache(inode, oldsize, ioffset);
+
+	/* Wait for existing dio to complete */
+	ext4_inode_block_unlocked_dio(inode);
+	inode_dio_wait(inode);
+
+	credits = ext4_writepage_trans_blocks(inode);
+	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		goto out_dio;
+	}
+
+	down_write(&EXT4_I(inode)->i_data_sem);
+	ext4_discard_preallocations(inode);
+
+	ret = ext4_es_remove_extent(inode, punch_start,
+				    EXT_MAX_BLOCKS - punch_start);
+	if (ret) {
+		up_write(&EXT4_I(inode)->i_data_sem);
+		goto out_stop;
+	}
+
+	ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
+	if (ret) {
+		up_write(&EXT4_I(inode)->i_data_sem);
+		goto out_stop;
+	}
+	ext4_discard_preallocations(inode);
+
+	ret = ext4_ext_shift_extents(inode, handle, punch_stop,
+				     punch_stop - punch_start);
+	if (ret) {
+		up_write(&EXT4_I(inode)->i_data_sem);
+		goto out_stop;
+	}
+
+	new_size = i_size_read(inode) - len;
+	i_size_write(inode, new_size);
+	EXT4_I(inode)->i_disksize = new_size;
+
+	up_write(&EXT4_I(inode)->i_data_sem);
+	if (IS_SYNC(inode))
+		ext4_handle_sync(handle);
+	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+	ext4_mark_inode_dirty(handle, inode);
+
+out_stop:
+	ext4_journal_stop(handle);
+out_dio:
+	ext4_inode_resume_unlocked_dio(inode);
+out_mutex:
+	mutex_unlock(&inode->i_mutex);
+	return ret;
+}
+
+/**
+ * ext4_swap_extents - Swap extents between two inodes
+ *
+ * @inode1:	First inode
+ * @inode2:	Second inode
+ * @lblk1:	Start block for first inode
+ * @lblk2:	Start block for second inode
+ * @count:	Number of blocks to swap
+ * @mark_unwritten: Mark second inode's extents as unwritten after swap
+ * @erp:	Pointer to save error value
+ *
+ * This helper routine does exactly what is promise "swap extents". All other
+ * stuff such as page-cache locking consistency, bh mapping consistency or
+ * extent's data copying must be performed by caller.
+ * Locking:
+ * 		i_mutex is held for both inodes
+ * 		i_data_sem is locked for write for both inodes
+ * Assumptions:
+ *		All pages from requested range are locked for both inodes
+ */
+int
+ext4_swap_extents(handle_t *handle, struct inode *inode1,
+		     struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
+		  ext4_lblk_t count, int unwritten, int *erp)
+{
+	struct ext4_ext_path *path1 = NULL;
+	struct ext4_ext_path *path2 = NULL;
+	int replaced_count = 0;
+
+	BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
+	BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
+	BUG_ON(!mutex_is_locked(&inode1->i_mutex));
+	BUG_ON(!mutex_is_locked(&inode1->i_mutex));
+
+	*erp = ext4_es_remove_extent(inode1, lblk1, count);
+	if (unlikely(*erp))
+		return 0;
+	*erp = ext4_es_remove_extent(inode2, lblk2, count);
+	if (unlikely(*erp))
+		return 0;
+
+	while (count) {
+		struct ext4_extent *ex1, *ex2, tmp_ex;
+		ext4_lblk_t e1_blk, e2_blk;
+		int e1_len, e2_len, len;
+		int split = 0;
+
+		path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
+		if (unlikely(IS_ERR(path1))) {
+			*erp = PTR_ERR(path1);
+			path1 = NULL;
+		finish:
+			count = 0;
+			goto repeat;
+		}
+		path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
+		if (unlikely(IS_ERR(path2))) {
+			*erp = PTR_ERR(path2);
+			path2 = NULL;
+			goto finish;
+		}
+		ex1 = path1[path1->p_depth].p_ext;
+		ex2 = path2[path2->p_depth].p_ext;
+		/* Do we have somthing to swap ? */
+		if (unlikely(!ex2 || !ex1))
+			goto finish;
+
+		e1_blk = le32_to_cpu(ex1->ee_block);
+		e2_blk = le32_to_cpu(ex2->ee_block);
+		e1_len = ext4_ext_get_actual_len(ex1);
+		e2_len = ext4_ext_get_actual_len(ex2);
+
+		/* Hole handling */
+		if (!in_range(lblk1, e1_blk, e1_len) ||
+		    !in_range(lblk2, e2_blk, e2_len)) {
+			ext4_lblk_t next1, next2;
+
+			/* if hole after extent, then go to next extent */
+			next1 = ext4_ext_next_allocated_block(path1);
+			next2 = ext4_ext_next_allocated_block(path2);
+			/* If hole before extent, then shift to that extent */
+			if (e1_blk > lblk1)
+				next1 = e1_blk;
+			if (e2_blk > lblk2)
+				next2 = e1_blk;
+			/* Do we have something to swap */
+			if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
+				goto finish;
+			/* Move to the rightest boundary */
+			len = next1 - lblk1;
+			if (len < next2 - lblk2)
+				len = next2 - lblk2;
+			if (len > count)
+				len = count;
+			lblk1 += len;
+			lblk2 += len;
+			count -= len;
+			goto repeat;
+		}
+
+		/* Prepare left boundary */
+		if (e1_blk < lblk1) {
+			split = 1;
+			*erp = ext4_force_split_extent_at(handle, inode1,
+						&path1, lblk1, 0);
+			if (unlikely(*erp))
+				goto finish;
+		}
+		if (e2_blk < lblk2) {
+			split = 1;
+			*erp = ext4_force_split_extent_at(handle, inode2,
+						&path2,  lblk2, 0);
+			if (unlikely(*erp))
+				goto finish;
+		}
+		/* ext4_split_extent_at() may result in leaf extent split,
+		 * path must to be revalidated. */
+		if (split)
+			goto repeat;
+
+		/* Prepare right boundary */
+		len = count;
+		if (len > e1_blk + e1_len - lblk1)
+			len = e1_blk + e1_len - lblk1;
+		if (len > e2_blk + e2_len - lblk2)
+			len = e2_blk + e2_len - lblk2;
+
+		if (len != e1_len) {
+			split = 1;
+			*erp = ext4_force_split_extent_at(handle, inode1,
+						&path1, lblk1 + len, 0);
+			if (unlikely(*erp))
+				goto finish;
+		}
+		if (len != e2_len) {
+			split = 1;
+			*erp = ext4_force_split_extent_at(handle, inode2,
+						&path2, lblk2 + len, 0);
+			if (*erp)
+				goto finish;
+		}
+		/* ext4_split_extent_at() may result in leaf extent split,
+		 * path must to be revalidated. */
+		if (split)
+			goto repeat;
+
+		BUG_ON(e2_len != e1_len);
+		*erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
+		if (unlikely(*erp))
+			goto finish;
+		*erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
+		if (unlikely(*erp))
+			goto finish;
+
+		/* Both extents are fully inside boundaries. Swap it now */
+		tmp_ex = *ex1;
+		ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
+		ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
+		ex1->ee_len = cpu_to_le16(e2_len);
+		ex2->ee_len = cpu_to_le16(e1_len);
+		if (unwritten)
+			ext4_ext_mark_unwritten(ex2);
+		if (ext4_ext_is_unwritten(&tmp_ex))
+			ext4_ext_mark_unwritten(ex1);
+
+		ext4_ext_try_to_merge(handle, inode2, path2, ex2);
+		ext4_ext_try_to_merge(handle, inode1, path1, ex1);
+		*erp = ext4_ext_dirty(handle, inode2, path2 +
+				      path2->p_depth);
+		if (unlikely(*erp))
+			goto finish;
+		*erp = ext4_ext_dirty(handle, inode1, path1 +
+				      path1->p_depth);
+		/*
+		 * Looks scarry ah..? second inode already points to new blocks,
+		 * and it was successfully dirtied. But luckily error may happen
+		 * only due to journal error, so full transaction will be
+		 * aborted anyway.
+		 */
+		if (unlikely(*erp))
+			goto finish;
+		lblk1 += len;
+		lblk2 += len;
+		replaced_count += len;
+		count -= len;
+
+	repeat:
+		ext4_ext_drop_refs(path1);
+		kfree(path1);
+		ext4_ext_drop_refs(path2);
+		kfree(path2);
+		path1 = path2 = NULL;
+	}
+	return replaced_count;
+}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 1fefeb7..4603652 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -10,9 +10,11 @@
  * Ext4 extents status tree core functions.
  */
 #include <linux/rbtree.h>
+#include <linux/list_sort.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include "ext4.h"
 #include "extents_status.h"
-#include "ext4_extents.h"
 
 #include <trace/events/ext4.h>
 
@@ -147,6 +149,8 @@
 			      ext4_lblk_t end);
 static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
 				       int nr_to_scan);
+static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+			    struct ext4_inode_info *locked_ei);
 
 int __init ext4_init_es(void)
 {
@@ -182,7 +186,7 @@
 	while (node) {
 		struct extent_status *es;
 		es = rb_entry(node, struct extent_status, rb_node);
-		printk(KERN_DEBUG " [%u/%u) %llu %llx",
+		printk(KERN_DEBUG " [%u/%u) %llu %x",
 		       es->es_lblk, es->es_len,
 		       ext4_es_pblock(es), ext4_es_status(es));
 		node = rb_next(node);
@@ -260,7 +264,7 @@
 	if (tree->cache_es) {
 		es1 = tree->cache_es;
 		if (in_range(lblk, es1->es_lblk, es1->es_len)) {
-			es_debug("%u cached by [%u/%u) %llu %llx\n",
+			es_debug("%u cached by [%u/%u) %llu %x\n",
 				 lblk, es1->es_lblk, es1->es_len,
 				 ext4_es_pblock(es1), ext4_es_status(es1));
 			goto out;
@@ -291,7 +295,6 @@
 
 	read_unlock(&EXT4_I(inode)->i_es_lock);
 
-	ext4_es_lru_add(inode);
 	trace_ext4_es_find_delayed_extent_range_exit(inode, es);
 }
 
@@ -312,19 +315,27 @@
 	 */
 	if (!ext4_es_is_delayed(es)) {
 		EXT4_I(inode)->i_es_lru_nr++;
-		percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
+		percpu_counter_inc(&EXT4_SB(inode->i_sb)->
+					s_es_stats.es_stats_lru_cnt);
 	}
 
+	EXT4_I(inode)->i_es_all_nr++;
+	percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
+
 	return es;
 }
 
 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
 {
+	EXT4_I(inode)->i_es_all_nr--;
+	percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
+
 	/* Decrease the lru counter when this es is not delayed */
 	if (!ext4_es_is_delayed(es)) {
 		BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
 		EXT4_I(inode)->i_es_lru_nr--;
-		percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
+		percpu_counter_dec(&EXT4_SB(inode->i_sb)->
+					s_es_stats.es_stats_lru_cnt);
 	}
 
 	kmem_cache_free(ext4_es_cachep, es);
@@ -343,8 +354,14 @@
 	if (ext4_es_status(es1) != ext4_es_status(es2))
 		return 0;
 
-	if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL)
+	if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
+		pr_warn("ES assertion failed when merging extents. "
+			"The sum of lengths of es1 (%d) and es2 (%d) "
+			"is bigger than allowed file size (%d)\n",
+			es1->es_len, es2->es_len, EXT_MAX_BLOCKS);
+		WARN_ON(1);
 		return 0;
+	}
 
 	if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
 		return 0;
@@ -407,6 +424,8 @@
 }
 
 #ifdef ES_AGGRESSIVE_TEST
+#include "ext4_extents.h"	/* Needed when ES_AGGRESSIVE_TEST is defined */
+
 static void ext4_es_insert_extent_ext_check(struct inode *inode,
 					    struct extent_status *es)
 {
@@ -417,7 +436,7 @@
 	unsigned short ee_len;
 	int depth, ee_status, es_status;
 
-	path = ext4_ext_find_extent(inode, es->es_lblk, NULL);
+	path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
 	if (IS_ERR(path))
 		return;
 
@@ -430,7 +449,7 @@
 		ee_start = ext4_ext_pblock(ex);
 		ee_len = ext4_ext_get_actual_len(ex);
 
-		ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0;
+		ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
 		es_status = ext4_es_is_unwritten(es) ? 1 : 0;
 
 		/*
@@ -439,11 +458,11 @@
 		 */
 		if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
 			if (in_range(es->es_lblk, ee_block, ee_len)) {
-				pr_warn("ES insert assertation failed for "
+				pr_warn("ES insert assertion failed for "
 					"inode: %lu we can find an extent "
 					"at block [%d/%d/%llu/%c], but we "
-					"want to add an delayed/hole extent "
-					"[%d/%d/%llu/%llx]\n",
+					"want to add a delayed/hole extent "
+					"[%d/%d/%llu/%x]\n",
 					inode->i_ino, ee_block, ee_len,
 					ee_start, ee_status ? 'u' : 'w',
 					es->es_lblk, es->es_len,
@@ -458,7 +477,7 @@
 		 */
 		if (es->es_lblk < ee_block ||
 		    ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
-			pr_warn("ES insert assertation failed for inode: %lu "
+			pr_warn("ES insert assertion failed for inode: %lu "
 				"ex_status [%d/%d/%llu/%c] != "
 				"es_status [%d/%d/%llu/%c]\n", inode->i_ino,
 				ee_block, ee_len, ee_start,
@@ -468,7 +487,7 @@
 		}
 
 		if (ee_status ^ es_status) {
-			pr_warn("ES insert assertation failed for inode: %lu "
+			pr_warn("ES insert assertion failed for inode: %lu "
 				"ex_status [%d/%d/%llu/%c] != "
 				"es_status [%d/%d/%llu/%c]\n", inode->i_ino,
 				ee_block, ee_len, ee_start,
@@ -481,19 +500,17 @@
 		 * that we don't want to add an written/unwritten extent.
 		 */
 		if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
-			pr_warn("ES insert assertation failed for inode: %lu "
+			pr_warn("ES insert assertion failed for inode: %lu "
 				"can't find an extent at block %d but we want "
-				"to add an written/unwritten extent "
-				"[%d/%d/%llu/%llx]\n", inode->i_ino,
+				"to add a written/unwritten extent "
+				"[%d/%d/%llu/%x]\n", inode->i_ino,
 				es->es_lblk, es->es_lblk, es->es_len,
 				ext4_es_pblock(es), ext4_es_status(es));
 		}
 	}
 out:
-	if (path) {
-		ext4_ext_drop_refs(path);
-		kfree(path);
-	}
+	ext4_ext_drop_refs(path);
+	kfree(path);
 }
 
 static void ext4_es_insert_extent_ind_check(struct inode *inode,
@@ -519,21 +536,21 @@
 			 * We want to add a delayed/hole extent but this
 			 * block has been allocated.
 			 */
-			pr_warn("ES insert assertation failed for inode: %lu "
+			pr_warn("ES insert assertion failed for inode: %lu "
 				"We can find blocks but we want to add a "
-				"delayed/hole extent [%d/%d/%llu/%llx]\n",
+				"delayed/hole extent [%d/%d/%llu/%x]\n",
 				inode->i_ino, es->es_lblk, es->es_len,
 				ext4_es_pblock(es), ext4_es_status(es));
 			return;
 		} else if (ext4_es_is_written(es)) {
 			if (retval != es->es_len) {
-				pr_warn("ES insert assertation failed for "
+				pr_warn("ES insert assertion failed for "
 					"inode: %lu retval %d != es_len %d\n",
 					inode->i_ino, retval, es->es_len);
 				return;
 			}
 			if (map.m_pblk != ext4_es_pblock(es)) {
-				pr_warn("ES insert assertation failed for "
+				pr_warn("ES insert assertion failed for "
 					"inode: %lu m_pblk %llu != "
 					"es_pblk %llu\n",
 					inode->i_ino, map.m_pblk,
@@ -549,9 +566,9 @@
 		}
 	} else if (retval == 0) {
 		if (ext4_es_is_written(es)) {
-			pr_warn("ES insert assertation failed for inode: %lu "
+			pr_warn("ES insert assertion failed for inode: %lu "
 				"We can't find the block but we want to add "
-				"an written extent [%d/%d/%llu/%llx]\n",
+				"a written extent [%d/%d/%llu/%x]\n",
 				inode->i_ino, es->es_lblk, es->es_len,
 				ext4_es_pblock(es), ext4_es_status(es));
 			return;
@@ -632,22 +649,20 @@
 }
 
 /*
- * ext4_es_insert_extent() adds a space to a extent status tree.
- *
- * ext4_es_insert_extent is called by ext4_da_write_begin and
- * ext4_es_remove_extent.
+ * ext4_es_insert_extent() adds information to an inode's extent
+ * status tree.
  *
  * Return 0 on success, error code on failure.
  */
 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
 			  ext4_lblk_t len, ext4_fsblk_t pblk,
-			  unsigned long long status)
+			  unsigned int status)
 {
 	struct extent_status newes;
 	ext4_lblk_t end = lblk + len - 1;
 	int err = 0;
 
-	es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n",
+	es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
 		 lblk, len, pblk, status, inode->i_ino);
 
 	if (!len)
@@ -665,8 +680,7 @@
 
 	newes.es_lblk = lblk;
 	newes.es_len = len;
-	ext4_es_store_pblock(&newes, pblk);
-	ext4_es_store_status(&newes, status);
+	ext4_es_store_pblock_status(&newes, pblk, status);
 	trace_ext4_es_insert_extent(inode, &newes);
 
 	ext4_es_insert_extent_check(inode, &newes);
@@ -675,18 +689,54 @@
 	err = __es_remove_extent(inode, lblk, end);
 	if (err != 0)
 		goto error;
+retry:
 	err = __es_insert_extent(inode, &newes);
+	if (err == -ENOMEM && __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
+					       EXT4_I(inode)))
+		goto retry;
+	if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
+		err = 0;
 
 error:
 	write_unlock(&EXT4_I(inode)->i_es_lock);
 
-	ext4_es_lru_add(inode);
 	ext4_es_print_tree(inode);
 
 	return err;
 }
 
 /*
+ * ext4_es_cache_extent() inserts information into the extent status
+ * tree if and only if there isn't information about the range in
+ * question already.
+ */
+void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+			  ext4_lblk_t len, ext4_fsblk_t pblk,
+			  unsigned int status)
+{
+	struct extent_status *es;
+	struct extent_status newes;
+	ext4_lblk_t end = lblk + len - 1;
+
+	newes.es_lblk = lblk;
+	newes.es_len = len;
+	ext4_es_store_pblock_status(&newes, pblk, status);
+	trace_ext4_es_cache_extent(inode, &newes);
+
+	if (!len)
+		return;
+
+	BUG_ON(end < lblk);
+
+	write_lock(&EXT4_I(inode)->i_es_lock);
+
+	es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
+	if (!es || es->es_lblk > end)
+		__es_insert_extent(inode, &newes);
+	write_unlock(&EXT4_I(inode)->i_es_lock);
+}
+
+/*
  * ext4_es_lookup_extent() looks up an extent in extent status tree.
  *
  * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
@@ -697,6 +747,7 @@
 			  struct extent_status *es)
 {
 	struct ext4_es_tree *tree;
+	struct ext4_es_stats *stats;
 	struct extent_status *es1 = NULL;
 	struct rb_node *node;
 	int found = 0;
@@ -733,16 +784,19 @@
 	}
 
 out:
+	stats = &EXT4_SB(inode->i_sb)->s_es_stats;
 	if (found) {
 		BUG_ON(!es1);
 		es->es_lblk = es1->es_lblk;
 		es->es_len = es1->es_len;
 		es->es_pblk = es1->es_pblk;
+		stats->es_stats_cache_hits++;
+	} else {
+		stats->es_stats_cache_misses++;
 	}
 
 	read_unlock(&EXT4_I(inode)->i_es_lock);
 
-	ext4_es_lru_add(inode);
 	trace_ext4_es_lookup_extent_exit(inode, es, found);
 	return found;
 }
@@ -756,8 +810,10 @@
 	struct extent_status orig_es;
 	ext4_lblk_t len1, len2;
 	ext4_fsblk_t block;
-	int err = 0;
+	int err;
 
+retry:
+	err = 0;
 	es = __es_tree_search(&tree->root, lblk);
 	if (!es)
 		goto out;
@@ -781,17 +837,21 @@
 
 			newes.es_lblk = end + 1;
 			newes.es_len = len2;
+			block = 0x7FDEADBEEFULL;
 			if (ext4_es_is_written(&orig_es) ||
-			    ext4_es_is_unwritten(&orig_es)) {
+			    ext4_es_is_unwritten(&orig_es))
 				block = ext4_es_pblock(&orig_es) +
 					orig_es.es_len - len2;
-				ext4_es_store_pblock(&newes, block);
-			}
-			ext4_es_store_status(&newes, ext4_es_status(&orig_es));
+			ext4_es_store_pblock_status(&newes, block,
+						    ext4_es_status(&orig_es));
 			err = __es_insert_extent(inode, &newes);
 			if (err) {
 				es->es_lblk = orig_es.es_lblk;
 				es->es_len = orig_es.es_len;
+				if ((err == -ENOMEM) &&
+				    __ext4_es_shrink(EXT4_SB(inode->i_sb), 1,
+						     EXT4_I(inode)))
+					goto retry;
 				goto out;
 			}
 		} else {
@@ -869,85 +929,312 @@
 	return err;
 }
 
-int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex)
+static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
+				     struct list_head *b)
 {
-	ext4_lblk_t  ee_block;
-	ext4_fsblk_t ee_pblock;
-	unsigned int ee_len;
+	struct ext4_inode_info *eia, *eib;
+	eia = list_entry(a, struct ext4_inode_info, i_es_lru);
+	eib = list_entry(b, struct ext4_inode_info, i_es_lru);
 
-	ee_block  = le32_to_cpu(ex->ee_block);
-	ee_len    = ext4_ext_get_actual_len(ex);
-	ee_pblock = ext4_ext_pblock(ex);
-
-	if (ee_len == 0)
+	if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
+	    !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
+		return 1;
+	if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
+	    ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
+		return -1;
+	if (eia->i_touch_when == eib->i_touch_when)
 		return 0;
-
-	return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
-				     EXTENT_STATUS_WRITTEN);
+	if (time_after(eia->i_touch_when, eib->i_touch_when))
+		return 1;
+	else
+		return -1;
 }
 
-static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
+static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
+			    struct ext4_inode_info *locked_ei)
+{
+	struct ext4_inode_info *ei;
+	struct ext4_es_stats *es_stats;
+	struct list_head *cur, *tmp;
+	LIST_HEAD(skipped);
+	ktime_t start_time;
+	u64 scan_time;
+	int nr_shrunk = 0;
+	int retried = 0, skip_precached = 1, nr_skipped = 0;
+
+	es_stats = &sbi->s_es_stats;
+	start_time = ktime_get();
+	spin_lock(&sbi->s_es_lru_lock);
+
+retry:
+	list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
+		int shrunk;
+
+		/*
+		 * If we have already reclaimed all extents from extent
+		 * status tree, just stop the loop immediately.
+		 */
+		if (percpu_counter_read_positive(
+				&es_stats->es_stats_lru_cnt) == 0)
+			break;
+
+		ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
+
+		/*
+		 * Skip the inode that is newer than the last_sorted
+		 * time.  Normally we try hard to avoid shrinking
+		 * precached inodes, but we will as a last resort.
+		 */
+		if ((es_stats->es_stats_last_sorted < ei->i_touch_when) ||
+		    (skip_precached && ext4_test_inode_state(&ei->vfs_inode,
+						EXT4_STATE_EXT_PRECACHED))) {
+			nr_skipped++;
+			list_move_tail(cur, &skipped);
+			continue;
+		}
+
+		if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
+		    !write_trylock(&ei->i_es_lock))
+			continue;
+
+		shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
+		if (ei->i_es_lru_nr == 0)
+			list_del_init(&ei->i_es_lru);
+		write_unlock(&ei->i_es_lock);
+
+		nr_shrunk += shrunk;
+		nr_to_scan -= shrunk;
+		if (nr_to_scan == 0)
+			break;
+	}
+
+	/* Move the newer inodes into the tail of the LRU list. */
+	list_splice_tail(&skipped, &sbi->s_es_lru);
+	INIT_LIST_HEAD(&skipped);
+
+	/*
+	 * If we skipped any inodes, and we weren't able to make any
+	 * forward progress, sort the list and try again.
+	 */
+	if ((nr_shrunk == 0) && nr_skipped && !retried) {
+		retried++;
+		list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
+		es_stats->es_stats_last_sorted = jiffies;
+		ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
+				      i_es_lru);
+		/*
+		 * If there are no non-precached inodes left on the
+		 * list, start releasing precached extents.
+		 */
+		if (ext4_test_inode_state(&ei->vfs_inode,
+					  EXT4_STATE_EXT_PRECACHED))
+			skip_precached = 0;
+		goto retry;
+	}
+
+	spin_unlock(&sbi->s_es_lru_lock);
+
+	if (locked_ei && nr_shrunk == 0)
+		nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan);
+
+	scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
+	if (likely(es_stats->es_stats_scan_time))
+		es_stats->es_stats_scan_time = (scan_time +
+				es_stats->es_stats_scan_time*3) / 4;
+	else
+		es_stats->es_stats_scan_time = scan_time;
+	if (scan_time > es_stats->es_stats_max_scan_time)
+		es_stats->es_stats_max_scan_time = scan_time;
+	if (likely(es_stats->es_stats_shrunk))
+		es_stats->es_stats_shrunk = (nr_shrunk +
+				es_stats->es_stats_shrunk*3) / 4;
+	else
+		es_stats->es_stats_shrunk = nr_shrunk;
+
+	trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, skip_precached,
+			     nr_skipped, retried);
+	return nr_shrunk;
+}
+
+static unsigned long ext4_es_count(struct shrinker *shrink,
+				   struct shrink_control *sc)
+{
+	unsigned long nr;
+	struct ext4_sb_info *sbi;
+
+	sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
+	nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
+	trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
+	return nr;
+}
+
+static unsigned long ext4_es_scan(struct shrinker *shrink,
+				  struct shrink_control *sc)
 {
 	struct ext4_sb_info *sbi = container_of(shrink,
 					struct ext4_sb_info, s_es_shrinker);
-	struct ext4_inode_info *ei;
-	struct list_head *cur, *tmp, scanned;
 	int nr_to_scan = sc->nr_to_scan;
-	int ret, nr_shrunk = 0;
+	int ret, nr_shrunk;
 
-	ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
-	trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
+	ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
+	trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
 
 	if (!nr_to_scan)
 		return ret;
 
-	INIT_LIST_HEAD(&scanned);
+	nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
 
+	trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
+	return percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
+}
+
+static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
+{
+	if (sc->nr_to_scan)
+		return ext4_es_scan(shrink, sc);
+	else
+		return ext4_es_count(shrink, sc);
+}
+
+static void *ext4_es_seq_shrinker_info_start(struct seq_file *seq, loff_t *pos)
+{
+	return *pos ? NULL : SEQ_START_TOKEN;
+}
+
+static void *
+ext4_es_seq_shrinker_info_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return NULL;
+}
+
+static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
+{
+	struct ext4_sb_info *sbi = seq->private;
+	struct ext4_es_stats *es_stats = &sbi->s_es_stats;
+	struct ext4_inode_info *ei, *max = NULL;
+	unsigned int inode_cnt = 0;
+
+	if (v != SEQ_START_TOKEN)
+		return 0;
+
+	/* here we just find an inode that has the max nr. of objects */
 	spin_lock(&sbi->s_es_lru_lock);
-	list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
-		list_move_tail(cur, &scanned);
-
-		ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
-
-		read_lock(&ei->i_es_lock);
-		if (ei->i_es_lru_nr == 0) {
-			read_unlock(&ei->i_es_lock);
-			continue;
-		}
-		read_unlock(&ei->i_es_lock);
-
-		write_lock(&ei->i_es_lock);
-		ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
-		write_unlock(&ei->i_es_lock);
-
-		nr_shrunk += ret;
-		nr_to_scan -= ret;
-		if (nr_to_scan == 0)
-			break;
+	list_for_each_entry(ei, &sbi->s_es_lru, i_es_lru) {
+		inode_cnt++;
+		if (max && max->i_es_all_nr < ei->i_es_all_nr)
+			max = ei;
+		else if (!max)
+			max = ei;
 	}
-	list_splice_tail(&scanned, &sbi->s_es_lru);
 	spin_unlock(&sbi->s_es_lru_lock);
 
-	ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
-	trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
+	seq_printf(seq, "stats:\n  %lld objects\n  %lld reclaimable objects\n",
+		   percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
+		   percpu_counter_sum_positive(&es_stats->es_stats_lru_cnt));
+	seq_printf(seq, "  %lu/%lu cache hits/misses\n",
+		   es_stats->es_stats_cache_hits,
+		   es_stats->es_stats_cache_misses);
+	if (es_stats->es_stats_last_sorted != 0)
+		seq_printf(seq, "  %u ms last sorted interval\n",
+			   jiffies_to_msecs(jiffies -
+					    es_stats->es_stats_last_sorted));
+	if (inode_cnt)
+		seq_printf(seq, "  %d inodes on lru list\n", inode_cnt);
+
+	seq_printf(seq, "average:\n  %llu us scan time\n",
+	    div_u64(es_stats->es_stats_scan_time, 1000));
+	seq_printf(seq, "  %lu shrunk objects\n", es_stats->es_stats_shrunk);
+	if (inode_cnt)
+		seq_printf(seq,
+		    "maximum:\n  %lu inode (%u objects, %u reclaimable)\n"
+		    "  %llu us max scan time\n",
+		    max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_lru_nr,
+		    div_u64(es_stats->es_stats_max_scan_time, 1000));
+
+	return 0;
+}
+
+static void ext4_es_seq_shrinker_info_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations ext4_es_seq_shrinker_info_ops = {
+	.start = ext4_es_seq_shrinker_info_start,
+	.next  = ext4_es_seq_shrinker_info_next,
+	.stop  = ext4_es_seq_shrinker_info_stop,
+	.show  = ext4_es_seq_shrinker_info_show,
+};
+
+static int
+ext4_es_seq_shrinker_info_open(struct inode *inode, struct file *file)
+{
+	int ret;
+
+	ret = seq_open(file, &ext4_es_seq_shrinker_info_ops);
+	if (!ret) {
+		struct seq_file *m = file->private_data;
+		m->private = PDE_DATA(inode);
+	}
+
 	return ret;
 }
 
-void ext4_es_register_shrinker(struct super_block *sb)
+static int
+ext4_es_seq_shrinker_info_release(struct inode *inode, struct file *file)
 {
-	struct ext4_sb_info *sbi;
+	return seq_release(inode, file);
+}
 
-	sbi = EXT4_SB(sb);
+static const struct file_operations ext4_es_seq_shrinker_info_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ext4_es_seq_shrinker_info_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= ext4_es_seq_shrinker_info_release,
+};
+
+int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
+{
+	int err;
+
 	INIT_LIST_HEAD(&sbi->s_es_lru);
 	spin_lock_init(&sbi->s_es_lru_lock);
+	sbi->s_es_stats.es_stats_last_sorted = 0;
+	sbi->s_es_stats.es_stats_shrunk = 0;
+	sbi->s_es_stats.es_stats_cache_hits = 0;
+	sbi->s_es_stats.es_stats_cache_misses = 0;
+	sbi->s_es_stats.es_stats_scan_time = 0;
+	sbi->s_es_stats.es_stats_max_scan_time = 0;
+	err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0);
+	if (err)
+		return err;
+	err = percpu_counter_init(&sbi->s_es_stats.es_stats_lru_cnt, 0);
+	if (err)
+		goto err1;
+
 	sbi->s_es_shrinker.shrink = ext4_es_shrink;
 	sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
 	register_shrinker(&sbi->s_es_shrinker);
+
+	if (sbi->s_proc)
+		proc_create_data("es_shrinker_info", S_IRUGO, sbi->s_proc,
+				 &ext4_es_seq_shrinker_info_fops, sbi);
+
+	return 0;
+
+	percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
+err1:
+	percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
+	return err;
 }
 
-void ext4_es_unregister_shrinker(struct super_block *sb)
+void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
 {
-	unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker);
+	if (sbi->s_proc)
+		remove_proc_entry("es_shrinker_info", sbi->s_proc);
+	percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
+	percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
+	unregister_shrinker(&sbi->s_es_shrinker);
 }
 
 void ext4_es_lru_add(struct inode *inode)
@@ -955,11 +1242,14 @@
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 
+	ei->i_touch_when = jiffies;
+
+	if (!list_empty(&ei->i_es_lru))
+		return;
+
 	spin_lock(&sbi->s_es_lru_lock);
 	if (list_empty(&ei->i_es_lru))
 		list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
-	else
-		list_move_tail(&ei->i_es_lru, &sbi->s_es_lru);
 	spin_unlock(&sbi->s_es_lru_lock);
 }
 
@@ -981,11 +1271,17 @@
 	struct ext4_es_tree *tree = &ei->i_es_tree;
 	struct rb_node *node;
 	struct extent_status *es;
-	int nr_shrunk = 0;
+	unsigned long nr_shrunk = 0;
+	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+				      DEFAULT_RATELIMIT_BURST);
 
 	if (ei->i_es_lru_nr == 0)
 		return 0;
 
+	if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
+	    __ratelimit(&_rs))
+		ext4_warning(inode->i_sb, "forced shrink of precached extents");
+
 	node = rb_first(&tree->root);
 	while (node != NULL) {
 		es = rb_entry(node, struct extent_status, rb_node);
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index f740eb03..efd5f97 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -29,16 +29,27 @@
 /*
  * These flags live in the high bits of extent_status.es_pblk
  */
-#define EXTENT_STATUS_WRITTEN	(1ULL << 63)
-#define EXTENT_STATUS_UNWRITTEN (1ULL << 62)
-#define EXTENT_STATUS_DELAYED	(1ULL << 61)
-#define EXTENT_STATUS_HOLE	(1ULL << 60)
+#define ES_SHIFT	60
+
+#define EXTENT_STATUS_WRITTEN	(1 << 3)
+#define EXTENT_STATUS_UNWRITTEN (1 << 2)
+#define EXTENT_STATUS_DELAYED	(1 << 1)
+#define EXTENT_STATUS_HOLE	(1 << 0)
 
 #define EXTENT_STATUS_FLAGS	(EXTENT_STATUS_WRITTEN | \
 				 EXTENT_STATUS_UNWRITTEN | \
 				 EXTENT_STATUS_DELAYED | \
 				 EXTENT_STATUS_HOLE)
 
+#define ES_WRITTEN		(1ULL << 63)
+#define ES_UNWRITTEN		(1ULL << 62)
+#define ES_DELAYED		(1ULL << 61)
+#define ES_HOLE			(1ULL << 60)
+
+#define ES_MASK			(ES_WRITTEN | ES_UNWRITTEN | \
+				 ES_DELAYED | ES_HOLE)
+
+struct ext4_sb_info;
 struct ext4_extent;
 
 struct extent_status {
@@ -53,13 +64,27 @@
 	struct extent_status *cache_es;	/* recently accessed extent */
 };
 
+struct ext4_es_stats {
+	unsigned long es_stats_last_sorted;
+	unsigned long es_stats_shrunk;
+	unsigned long es_stats_cache_hits;
+	unsigned long es_stats_cache_misses;
+	u64 es_stats_scan_time;
+	u64 es_stats_max_scan_time;
+	struct percpu_counter es_stats_all_cnt;
+	struct percpu_counter es_stats_lru_cnt;
+};
+
 extern int __init ext4_init_es(void);
 extern void ext4_exit_es(void);
 extern void ext4_es_init_tree(struct ext4_es_tree *tree);
 
 extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
 				 ext4_lblk_t len, ext4_fsblk_t pblk,
-				 unsigned long long status);
+				 unsigned int status);
+extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+				 ext4_lblk_t len, ext4_fsblk_t pblk,
+				 unsigned int status);
 extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
 				 ext4_lblk_t len);
 extern void ext4_es_find_delayed_extent_range(struct inode *inode,
@@ -67,36 +92,35 @@
 					struct extent_status *es);
 extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
 				 struct extent_status *es);
-extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex);
 
 static inline int ext4_es_is_written(struct extent_status *es)
 {
-	return (es->es_pblk & EXTENT_STATUS_WRITTEN) != 0;
+	return (es->es_pblk & ES_WRITTEN) != 0;
 }
 
 static inline int ext4_es_is_unwritten(struct extent_status *es)
 {
-	return (es->es_pblk & EXTENT_STATUS_UNWRITTEN) != 0;
+	return (es->es_pblk & ES_UNWRITTEN) != 0;
 }
 
 static inline int ext4_es_is_delayed(struct extent_status *es)
 {
-	return (es->es_pblk & EXTENT_STATUS_DELAYED) != 0;
+	return (es->es_pblk & ES_DELAYED) != 0;
 }
 
 static inline int ext4_es_is_hole(struct extent_status *es)
 {
-	return (es->es_pblk & EXTENT_STATUS_HOLE) != 0;
+	return (es->es_pblk & ES_HOLE) != 0;
 }
 
-static inline ext4_fsblk_t ext4_es_status(struct extent_status *es)
+static inline unsigned int ext4_es_status(struct extent_status *es)
 {
-	return (es->es_pblk & EXTENT_STATUS_FLAGS);
+	return es->es_pblk >> ES_SHIFT;
 }
 
 static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
 {
-	return (es->es_pblk & ~EXTENT_STATUS_FLAGS);
+	return es->es_pblk & ~ES_MASK;
 }
 
 static inline void ext4_es_store_pblock(struct extent_status *es,
@@ -104,23 +128,29 @@
 {
 	ext4_fsblk_t block;
 
-	block = (pb & ~EXTENT_STATUS_FLAGS) |
-		(es->es_pblk & EXTENT_STATUS_FLAGS);
+	block = (pb & ~ES_MASK) | (es->es_pblk & ES_MASK);
 	es->es_pblk = block;
 }
 
 static inline void ext4_es_store_status(struct extent_status *es,
-					unsigned long long status)
+					unsigned int status)
 {
-	ext4_fsblk_t block;
-
-	block = (status & EXTENT_STATUS_FLAGS) |
-		(es->es_pblk & ~EXTENT_STATUS_FLAGS);
-	es->es_pblk = block;
+	es->es_pblk = (((ext4_fsblk_t)
+			(status & EXTENT_STATUS_FLAGS) << ES_SHIFT) |
+		       (es->es_pblk & ~ES_MASK));
 }
 
-extern void ext4_es_register_shrinker(struct super_block *sb);
-extern void ext4_es_unregister_shrinker(struct super_block *sb);
+static inline void ext4_es_store_pblock_status(struct extent_status *es,
+					       ext4_fsblk_t pb,
+					       unsigned int status)
+{
+	es->es_pblk = (((ext4_fsblk_t)
+			(status & EXTENT_STATUS_FLAGS) << ES_SHIFT) |
+		       (pb & ~ES_MASK));
+}
+
+extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi);
+extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
 extern void ext4_es_lru_add(struct inode *inode);
 extern void ext4_es_lru_del(struct inode *inode);
 
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index ec9770f..bae48a0 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -57,13 +57,142 @@
 	return 0;
 }
 
-void ext4_unwritten_wait(struct inode *inode)
+static void ext4_unwritten_wait(struct inode *inode)
 {
 	wait_queue_head_t *wq = ext4_ioend_wq(inode);
 
 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
 }
 
+#if 0
+/*
+ * This tests whether the IO in question is block-aligned or not.
+ * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
+ * are converted to written only after the IO is complete.  Until they are
+ * mapped, these blocks appear as holes, so dio_zero_block() will assume that
+ * it needs to zero out portions of the start and/or end block.  If 2 AIO
+ * threads are at work on the same unwritten block, they must be synchronized
+ * or one thread will zero the other's data, causing corruption.
+ */
+static int
+ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
+{
+	struct super_block *sb = inode->i_sb;
+	int blockmask = sb->s_blocksize - 1;
+
+	if (pos >= i_size_read(inode))
+		return 0;
+
+	if ((pos | iov_iter_alignment(from)) & blockmask)
+		return 1;
+
+	return 0;
+}
+
+static ssize_t
+ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file_inode(iocb->ki_filp);
+	struct mutex *aio_mutex = NULL;
+	struct blk_plug plug;
+	int o_direct = file->f_flags & O_DIRECT;
+	int overwrite = 0;
+	size_t length = iov_iter_count(from);
+	ssize_t ret;
+	loff_t pos = iocb->ki_pos;
+
+	/*
+	 * Unaligned direct AIO must be serialized; see comment above
+	 * In the case of O_APPEND, assume that we must always serialize
+	 */
+	if (o_direct &&
+	    ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
+	    !is_sync_kiocb(iocb) &&
+	    (file->f_flags & O_APPEND ||
+	     ext4_unaligned_aio(inode, from, pos))) {
+		aio_mutex = ext4_aio_mutex(inode);
+		mutex_lock(aio_mutex);
+		ext4_unwritten_wait(inode);
+	}
+
+	mutex_lock(&inode->i_mutex);
+	if (file->f_flags & O_APPEND)
+		iocb->ki_pos = pos = i_size_read(inode);
+
+	/*
+	 * If we have encountered a bitmap-format file, the size limit
+	 * is smaller than s_maxbytes, which is for extent-mapped files.
+	 */
+	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+		if ((pos > sbi->s_bitmap_maxbytes) ||
+		    (pos == sbi->s_bitmap_maxbytes && length > 0)) {
+			mutex_unlock(&inode->i_mutex);
+			ret = -EFBIG;
+			goto errout;
+		}
+
+		if (pos + length > sbi->s_bitmap_maxbytes)
+			iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
+	}
+
+	iocb->private = &overwrite;
+	if (o_direct) {
+		blk_start_plug(&plug);
+
+
+		/* check whether we do a DIO overwrite or not */
+		if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
+		    !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
+			struct ext4_map_blocks map;
+			unsigned int blkbits = inode->i_blkbits;
+			int err, len;
+
+			map.m_lblk = pos >> blkbits;
+			map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
+				- map.m_lblk;
+			len = map.m_len;
+
+			err = ext4_map_blocks(NULL, inode, &map, 0);
+			/*
+			 * 'err==len' means that all of blocks has
+			 * been preallocated no matter they are
+			 * initialized or not.  For excluding
+			 * unwritten extents, we need to check
+			 * m_flags.  There are two conditions that
+			 * indicate for initialized extents.  1) If we
+			 * hit extent cache, EXT4_MAP_MAPPED flag is
+			 * returned; 2) If we do a real lookup,
+			 * non-flags are returned.  So we should check
+			 * these two conditions.
+			 */
+			if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
+				overwrite = 1;
+		}
+	}
+
+	ret = __generic_file_write_iter(iocb, from);
+	mutex_unlock(&inode->i_mutex);
+
+	if (ret > 0) {
+		ssize_t err;
+
+		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+		if (err < 0)
+			ret = err;
+	}
+	if (o_direct)
+		blk_finish_plug(&plug);
+
+errout:
+	if (aio_mutex)
+		mutex_unlock(aio_mutex);
+	return ret;
+}
+#endif
+
 /*
  * This tests whether the IO in question is block-aligned or not.
  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
@@ -200,16 +329,24 @@
 
 static const struct vm_operations_struct ext4_file_vm_ops = {
 	.fault		= filemap_fault,
+#if 0
+	.map_pages	= filemap_map_pages,
+#endif
 	.page_mkwrite   = ext4_page_mkwrite,
 	.remap_pages	= generic_file_remap_pages,
 };
 
 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
-	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = file->f_mapping->host;
 
-	if (!mapping->a_ops->readpage)
-		return -ENOEXEC;
+	if (ext4_encrypted_inode(inode)) {
+		int err = ext4_get_encryption_info(inode);
+		if (err)
+			return 0;
+		if (ext4_encryption_info(inode) == NULL)
+			return -ENOKEY;
+	}
 	file_accessed(file);
 	vma->vm_ops = &ext4_file_vm_ops;
 	return 0;
@@ -219,10 +356,10 @@
 {
 	struct super_block *sb = inode->i_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-	struct ext4_inode_info *ei = EXT4_I(inode);
 	struct vfsmount *mnt = filp->f_path.mnt;
 	struct path path;
 	char buf[64], *cp;
+	int ret;
 
 	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
 		     !(sb->s_flags & MS_RDONLY))) {
@@ -244,6 +381,7 @@
 			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
 			if (IS_ERR(handle))
 				return PTR_ERR(handle);
+			BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 			if (err) {
 				ext4_journal_stop(handle);
@@ -255,26 +393,21 @@
 			ext4_journal_stop(handle);
 		}
 	}
+	if (ext4_encrypted_inode(inode)) {
+		ret = ext4_get_encryption_info(inode);
+		if (ret)
+			return -EACCES;
+		if (ext4_encryption_info(inode) == NULL)
+			return -ENOKEY;
+	}
 	/*
 	 * Set up the jbd2_inode if we are opening the inode for
 	 * writing and the journal is present
 	 */
-	if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
-		struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
-
-		spin_lock(&inode->i_lock);
-		if (!ei->jinode) {
-			if (!jinode) {
-				spin_unlock(&inode->i_lock);
-				return -ENOMEM;
-			}
-			ei->jinode = jinode;
-			jbd2_journal_init_jbd_inode(ei->jinode, inode);
-			jinode = NULL;
-		}
-		spin_unlock(&inode->i_lock);
-		if (unlikely(jinode != NULL))
-			jbd2_free_inode(jinode);
+	if (filp->f_mode & FMODE_WRITE) {
+		ret = ext4_inode_attach_jinode(inode);
+		if (ret < 0)
+			return ret;
 	}
 	return dquot_file_open(inode, filp);
 }
@@ -424,6 +557,37 @@
 	return found;
 }
 
+static inline int unsigned_offsets(struct file *file)
+{
+	return file->f_mode & FMODE_UNSIGNED_OFFSET;
+}
+
+/**
+ * vfs_setpos - update the file offset for lseek
+ * @file:	file structure in question
+ * @offset:	file offset to seek to
+ * @maxsize:	maximum file size
+ *
+ * This is a low-level filesystem helper for updating the file offset to
+ * the value specified by @offset if the given offset is valid and it is
+ * not equal to the current file offset.
+ *
+ * Return the specified offset on success and -EINVAL on invalid offset.
+ */
+static loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
+{
+	if (offset < 0 && !unsigned_offsets(file))
+		return -EINVAL;
+	if (offset > maxsize)
+		return -EINVAL;
+
+	if (offset != file->f_pos) {
+		file->f_pos = offset;
+		file->f_version = 0;
+	}
+	return offset;
+}
+
 /*
  * ext4_seek_data() retrieves the offset for SEEK_DATA.
  */
@@ -494,17 +658,7 @@
 	if (dataoff > isize)
 		return -ENXIO;
 
-	if (dataoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
-		return -EINVAL;
-	if (dataoff > maxsize)
-		return -EINVAL;
-
-	if (dataoff != file->f_pos) {
-		file->f_pos = dataoff;
-		file->f_version = 0;
-	}
-
-	return dataoff;
+	return vfs_setpos(file, dataoff, maxsize);
 }
 
 /*
@@ -580,17 +734,7 @@
 	if (holeoff > isize)
 		holeoff = isize;
 
-	if (holeoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
-		return -EINVAL;
-	if (holeoff > maxsize)
-		return -EINVAL;
-
-	if (holeoff != file->f_pos) {
-		file->f_pos = holeoff;
-		file->f_version = 0;
-	}
-
-	return holeoff;
+	return vfs_setpos(file, holeoff, maxsize);
 }
 
 /*
@@ -650,6 +794,9 @@
 	.listxattr	= ext4_listxattr,
 	.removexattr	= generic_removexattr,
 	.get_acl	= ext4_get_acl,
+#if 0
+	.set_acl	= ext4_set_acl,
+#endif
 	.fiemap		= ext4_fiemap,
 };
 
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index e0ba8a4..a8bc47f 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -73,32 +73,6 @@
 	return ret;
 }
 
-/**
- * __sync_file - generic_file_fsync without the locking and filemap_write
- * @inode:	inode to sync
- * @datasync:	only sync essential metadata if true
- *
- * This is just generic_file_fsync without the locking.  This is needed for
- * nojournal mode to make sure this inodes data/metadata makes it to disk
- * properly.  The i_mutex should be held already.
- */
-static int __sync_inode(struct inode *inode, int datasync)
-{
-	int err;
-	int ret;
-
-	ret = sync_mapping_buffers(inode->i_mapping);
-	if (!(inode->i_state & I_DIRTY))
-		return ret;
-	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
-		return ret;
-
-	err = sync_inode_metadata(inode, 1);
-	if (ret == 0)
-		ret = err;
-	return ret;
-}
-
 /*
  * akpm: A new design for ext4_sync_file().
  *
@@ -116,7 +90,7 @@
 	struct inode *inode = file->f_mapping->host;
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
-	int ret, err;
+	int ret = 0, err;
 	tid_t commit_tid;
 	bool needs_barrier = false;
 
@@ -124,25 +98,24 @@
 
 	trace_ext4_sync_file_enter(file, datasync);
 
-	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-	if (ret)
-		return ret;
-	mutex_lock(&inode->i_mutex);
-
-	if (inode->i_sb->s_flags & MS_RDONLY)
+	if (inode->i_sb->s_flags & MS_RDONLY) {
+		/* Make sure that we read updated s_mount_flags value */
+		smp_rmb();
+		if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+			ret = -EROFS;
 		goto out;
-
-	ret = ext4_flush_unwritten_io(inode);
-	if (ret < 0)
-		goto out;
+	}
 
 	if (!journal) {
-		ret = __sync_inode(inode, datasync);
+		ret = generic_file_fsync(file, start, end, datasync);
 		if (!ret && !hlist_empty(&inode->i_dentry))
 			ret = ext4_sync_parent(inode);
 		goto out;
 	}
 
+	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+	if (ret)
+		return ret;
 	/*
 	 * data=writeback,ordered:
 	 *  The caller's filemap_fdatawrite()/wait will sync the data.
@@ -172,8 +145,7 @@
 		if (!ret)
 			ret = err;
 	}
- out:
-	mutex_unlock(&inode->i_mutex);
+out:
 	trace_ext4_sync_file_exit(inode, ret);
 	return ret;
 }
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 00cbc64..8b2980a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -70,18 +70,26 @@
 				       ext4_group_t block_group,
 				       struct ext4_group_desc *gdp)
 {
+	struct ext4_group_info *grp;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	J_ASSERT_BH(bh, buffer_locked(bh));
 
 	/* If checksum is bad mark all blocks and inodes use to prevent
 	 * allocation, essentially implementing a per-group read-only flag. */
 	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
 		ext4_error(sb, "Checksum bad for group %u", block_group);
-		ext4_free_group_clusters_set(sb, gdp, 0);
-		ext4_free_inodes_set(sb, gdp, 0);
-		ext4_itable_unused_set(sb, gdp, 0);
-		memset(bh->b_data, 0xff, sb->s_blocksize);
-		ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
-					   EXT4_INODES_PER_GROUP(sb) / 8);
+		grp = ext4_get_group_info(sb, block_group);
+		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+			percpu_counter_sub(&sbi->s_freeclusters_counter,
+					   grp->bb_free);
+		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+		if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+			int count;
+			count = ext4_free_inodes_count(sb, gdp);
+			percpu_counter_sub(&sbi->s_freeinodes_counter,
+					   count);
+		}
+		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
 		return 0;
 	}
 
@@ -117,6 +125,8 @@
 	struct ext4_group_desc *desc;
 	struct buffer_head *bh = NULL;
 	ext4_fsblk_t bitmap_blk;
+	struct ext4_group_info *grp;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
 	desc = ext4_get_group_desc(sb, block_group, NULL);
 	if (!desc)
@@ -185,6 +195,14 @@
 		put_bh(bh);
 		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
 			   "inode_bitmap = %llu", block_group, bitmap_blk);
+		grp = ext4_get_group_info(sb, block_group);
+		if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+			int count;
+			count = ext4_free_inodes_count(sb, desc);
+			percpu_counter_sub(&sbi->s_freeinodes_counter,
+					   count);
+		}
+		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
 		return NULL;
 	}
 	ext4_unlock_group(sb, block_group);
@@ -221,6 +239,7 @@
 	struct ext4_super_block *es;
 	struct ext4_sb_info *sbi;
 	int fatal = 0, err, count, cleared;
+	struct ext4_group_info *grp;
 
 	if (!sb) {
 		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
@@ -266,7 +285,9 @@
 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
-	if (!bitmap_bh)
+	/* Don't bother if the inode bitmap is corrupt. */
+	grp = ext4_get_group_info(sb, block_group);
+	if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || !bitmap_bh)
 		goto error_return;
 
 	BUFFER_TRACE(bitmap_bh, "get_write_access");
@@ -315,8 +336,16 @@
 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
 		if (!fatal)
 			fatal = err;
-	} else
+	} else {
 		ext4_error(sb, "bit already cleared for inode %lu", ino);
+		if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+			int count;
+			count = ext4_free_inodes_count(sb, gdp);
+			percpu_counter_sub(&sbi->s_freeinodes_counter,
+					   count);
+		}
+		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
+	}
 
 error_return:
 	brelse(bitmap_bh);
@@ -426,7 +455,7 @@
 			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
 			grp = hinfo.hash;
 		} else
-			get_random_bytes(&grp, sizeof(grp));
+			grp = prandom_u32();
 		parent_group = (unsigned)grp % ngroups;
 		for (i = 0; i < ngroups; i++) {
 			g = (parent_group + i) % ngroups;
@@ -625,6 +654,51 @@
 }
 
 /*
+ * In no journal mode, if an inode has recently been deleted, we want
+ * to avoid reusing it until we're reasonably sure the inode table
+ * block has been written back to disk.  (Yes, these values are
+ * somewhat arbitrary...)
+ */
+#define RECENTCY_MIN	5
+#define RECENTCY_DIRTY	30
+
+static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
+{
+	struct ext4_group_desc	*gdp;
+	struct ext4_inode	*raw_inode;
+	struct buffer_head	*bh;
+	unsigned long		dtime, now;
+	int	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
+	int	offset, ret = 0, recentcy = RECENTCY_MIN;
+
+	gdp = ext4_get_group_desc(sb, group, NULL);
+	if (unlikely(!gdp))
+		return 0;
+
+	bh = sb_getblk(sb, ext4_inode_table(sb, gdp) +
+		       (ino / inodes_per_block));
+	if (unlikely(!bh) || !buffer_uptodate(bh))
+		/*
+		 * If the block is not in the buffer cache, then it
+		 * must have been written out.
+		 */
+		goto out;
+
+	offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
+	raw_inode = (struct ext4_inode *) (bh->b_data + offset);
+	dtime = le32_to_cpu(raw_inode->i_dtime);
+	now = get_seconds();
+	if (buffer_dirty(bh))
+		recentcy += RECENTCY_DIRTY;
+
+	if (dtime && (dtime < now) && (now < dtime + recentcy))
+		ret = 1;
+out:
+	brelse(bh);
+	return ret;
+}
+
+/*
  * There are two policies for allocating an inode.  If the new inode is
  * a directory, then a forward search is made for a block group with both
  * free space and a low directory-to-inode ratio; if that fails, then of
@@ -652,11 +726,26 @@
 	struct inode *ret;
 	ext4_group_t i;
 	ext4_group_t flex_group;
+	struct ext4_group_info *grp;
+	int encrypt = 0;
 
 	/* Cannot create files in a deleted directory */
 	if (!dir || !dir->i_nlink)
 		return ERR_PTR(-EPERM);
 
+	if ((ext4_encrypted_inode(dir) ||
+	     DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) &&
+	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
+		err = ext4_get_encryption_info(dir);
+		if (err)
+			return ERR_PTR(err);
+		if (ext4_encryption_info(dir) == NULL)
+			return ERR_PTR(-EPERM);
+		if (!handle)
+			nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb);
+		encrypt = 1;
+	}
+
 	sb = dir->i_sb;
 	ngroups = ext4_get_groups_count(sb);
 	trace_ext4_request_inode(dir, mode);
@@ -725,10 +814,22 @@
 			continue;
 		}
 
+		grp = ext4_get_group_info(sb, group);
+		/* Skip groups with already-known suspicious inode tables */
+		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+			if (++group == ngroups)
+				group = 0;
+			continue;
+		}
+
 		brelse(inode_bitmap_bh);
 		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
-		if (!inode_bitmap_bh)
-			goto out;
+		/* Skip groups with suspicious inode tables */
+		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) || !inode_bitmap_bh) {
+			if (++group == ngroups)
+				group = 0;
+			continue;
+		}
 
 repeat_in_this_group:
 		ino = ext4_find_next_zero_bit((unsigned long *)
@@ -741,10 +842,16 @@
 				   "inode=%lu", ino + 1);
 			continue;
 		}
+		if ((EXT4_SB(sb)->s_journal == NULL) &&
+		    recently_deleted(sb, group, ino)) {
+			ino++;
+			goto next_inode;
+		}
 		if (!handle) {
 			BUG_ON(nblocks <= 0);
 			handle = __ext4_journal_start_sb(dir->i_sb, line_no,
-							 handle_type, nblocks);
+							 handle_type, nblocks,
+							 0);
 			if (IS_ERR(handle)) {
 				err = PTR_ERR(handle);
 				ext4_std_error(sb, err);
@@ -763,6 +870,7 @@
 		ino++;		/* the inode bitmap is zero-based */
 		if (!ret2)
 			goto got; /* we grabbed the inode! */
+next_inode:
 		if (ino < EXT4_INODES_PER_GROUP(sb))
 			goto repeat_in_this_group;
 next_group:
@@ -921,8 +1029,7 @@
 	spin_unlock(&sbi->s_next_gen_lock);
 
 	/* Precompute checksum seed for inode metadata */
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+	if (ext4_has_metadata_csum(sb)) {
 		__u32 csum;
 		__le32 inum = cpu_to_le32(inode->i_ino);
 		__le32 gen = cpu_to_le32(inode->i_generation);
@@ -936,11 +1043,9 @@
 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
 
 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
-
 	ei->i_inline_off = 0;
 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
 		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
-
 	ret = inode;
 	err = dquot_alloc_inode(inode);
 	if (err)
@@ -967,6 +1072,12 @@
 		ei->i_datasync_tid = handle->h_transaction->t_tid;
 	}
 
+	if (encrypt) {
+		err = ext4_inherit_context(dir, inode);
+		if (err)
+			goto fail_free_drop;
+	}
+
 	err = ext4_mark_inode_dirty(handle, inode);
 	if (err) {
 		ext4_std_error(sb, err);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index b07a221..0fb5967 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -23,7 +23,6 @@
 #include <linux/aio.h>
 #include "ext4_jbd2.h"
 #include "truncate.h"
-#include "ext4_extents.h"	/* Needed for EXT_MAX_BLOCKS */
 
 #include <trace/events/ext4.h>
 
@@ -319,34 +318,24 @@
  *	ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
  *	as described above and return 0.
  */
-static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
-			     ext4_lblk_t iblock, int indirect_blks,
-			     int *blks, ext4_fsblk_t goal,
-			     ext4_lblk_t *offsets, Indirect *branch)
+static int ext4_alloc_branch(handle_t *handle,
+			     struct ext4_allocation_request *ar,
+			     int indirect_blks, ext4_lblk_t *offsets,
+			     Indirect *branch)
 {
-	struct ext4_allocation_request	ar;
 	struct buffer_head *		bh;
 	ext4_fsblk_t			b, new_blocks[4];
 	__le32				*p;
 	int				i, j, err, len = 1;
 
-	/*
-	 * Set up for the direct block allocation
-	 */
-	memset(&ar, 0, sizeof(ar));
-	ar.inode = inode;
-	ar.len = *blks;
-	ar.logical = iblock;
-	if (S_ISREG(inode->i_mode))
-		ar.flags = EXT4_MB_HINT_DATA;
-
 	for (i = 0; i <= indirect_blks; i++) {
 		if (i == indirect_blks) {
-			ar.goal = goal;
-			new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err);
+			new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
 		} else
-			goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode,
-							goal, 0, NULL, &err);
+			ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
+					ar->inode, ar->goal,
+					ar->flags & EXT4_MB_DELALLOC_RESERVED,
+					NULL, &err);
 		if (err) {
 			i--;
 			goto failed;
@@ -355,7 +344,7 @@
 		if (i == 0)
 			continue;
 
-		bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]);
+		bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
 		if (unlikely(!bh)) {
 			err = -ENOMEM;
 			goto failed;
@@ -373,7 +362,7 @@
 		b = new_blocks[i];
 
 		if (i == indirect_blks)
-			len = ar.len;
+			len = ar->len;
 		for (j = 0; j < len; j++)
 			*p++ = cpu_to_le32(b++);
 
@@ -382,11 +371,10 @@
 		unlock_buffer(bh);
 
 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-		err = ext4_handle_dirty_metadata(handle, inode, bh);
+		err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
 		if (err)
 			goto failed;
 	}
-	*blks = ar.len;
 	return 0;
 failed:
 	for (; i >= 0; i--) {
@@ -397,10 +385,10 @@
 		 * existing before ext4_alloc_branch() was called.
 		 */
 		if (i > 0 && i != indirect_blks && branch[i].bh)
-			ext4_forget(handle, 1, inode, branch[i].bh,
+			ext4_forget(handle, 1, ar->inode, branch[i].bh,
 				    branch[i].bh->b_blocknr);
-		ext4_free_blocks(handle, inode, NULL, new_blocks[i],
-				 (i == indirect_blks) ? ar.len : 1, 0);
+		ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
+				 (i == indirect_blks) ? ar->len : 1, 0);
 	}
 	return err;
 }
@@ -420,9 +408,9 @@
  * inode (->i_blocks, etc.). In case of success we end up with the full
  * chain to new block and return 0.
  */
-static int ext4_splice_branch(handle_t *handle, struct inode *inode,
-			      ext4_lblk_t block, Indirect *where, int num,
-			      int blks)
+static int ext4_splice_branch(handle_t *handle,
+			      struct ext4_allocation_request *ar,
+			      Indirect *where, int num)
 {
 	int i;
 	int err = 0;
@@ -447,9 +435,9 @@
 	 * Update the host buffer_head or inode to point to more just allocated
 	 * direct blocks blocks
 	 */
-	if (num == 0 && blks > 1) {
+	if (num == 0 && ar->len > 1) {
 		current_block = le32_to_cpu(where->key) + 1;
-		for (i = 1; i < blks; i++)
+		for (i = 1; i < ar->len; i++)
 			*(where->p + i) = cpu_to_le32(current_block++);
 	}
 
@@ -466,14 +454,14 @@
 		 */
 		jbd_debug(5, "splicing indirect only\n");
 		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
-		err = ext4_handle_dirty_metadata(handle, inode, where->bh);
+		err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
 		if (err)
 			goto err_out;
 	} else {
 		/*
 		 * OK, we spliced it into the inode itself on a direct block.
 		 */
-		ext4_mark_inode_dirty(handle, inode);
+		ext4_mark_inode_dirty(handle, ar->inode);
 		jbd_debug(5, "splicing direct\n");
 	}
 	return err;
@@ -485,11 +473,11 @@
 		 * need to revoke the block, which is why we don't
 		 * need to set EXT4_FREE_BLOCKS_METADATA.
 		 */
-		ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
+		ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
 				 EXT4_FREE_BLOCKS_FORGET);
 	}
-	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
-			 blks, 0);
+	ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
+			 ar->len, 0);
 
 	return err;
 }
@@ -526,11 +514,11 @@
 			struct ext4_map_blocks *map,
 			int flags)
 {
+	struct ext4_allocation_request ar;
 	int err = -EIO;
 	ext4_lblk_t offsets[4];
 	Indirect chain[4];
 	Indirect *partial;
-	ext4_fsblk_t goal;
 	int indirect_blks;
 	int blocks_to_boundary = 0;
 	int depth;
@@ -580,7 +568,16 @@
 		return -EUCLEAN;
 	}
 
-	goal = ext4_find_goal(inode, map->m_lblk, partial);
+	/* Set up for the direct block allocation */
+	memset(&ar, 0, sizeof(ar));
+	ar.inode = inode;
+	ar.logical = map->m_lblk;
+	if (S_ISREG(inode->i_mode))
+		ar.flags = EXT4_MB_HINT_DATA;
+	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+		ar.flags |= EXT4_MB_DELALLOC_RESERVED;
+
+	ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
 
 	/* the number of blocks need to allocate for [d,t]indirect blocks */
 	indirect_blks = (chain + depth) - partial - 1;
@@ -589,13 +586,13 @@
 	 * Next look up the indirect map to count the totoal number of
 	 * direct blocks to allocate for this branch.
 	 */
-	count = ext4_blks_to_allocate(partial, indirect_blks,
-				      map->m_len, blocks_to_boundary);
+	ar.len = ext4_blks_to_allocate(partial, indirect_blks,
+				       map->m_len, blocks_to_boundary);
+
 	/*
 	 * Block out ext4_truncate while we alter the tree
 	 */
-	err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
-				&count, goal,
+	err = ext4_alloc_branch(handle, &ar, indirect_blks,
 				offsets + (partial - chain), partial);
 
 	/*
@@ -606,14 +603,14 @@
 	 * may need to return -EAGAIN upwards in the worst case.  --sct
 	 */
 	if (!err)
-		err = ext4_splice_branch(handle, inode, map->m_lblk,
-					 partial, indirect_blks, count);
+		err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
 	if (err)
 		goto cleanup;
 
 	map->m_flags |= EXT4_MAP_NEW;
 
 	ext4_update_inode_fsync_trans(handle, inode, 1);
+	count = ar.len;
 got_it:
 	map->m_flags |= EXT4_MAP_MAPPED;
 	map->m_pblk = le32_to_cpu(chain[depth-1].key);
@@ -630,7 +627,7 @@
 		partial--;
 	}
 out:
-	trace_ext4_ind_map_blocks_exit(inode, map, err);
+	trace_ext4_ind_map_blocks_exit(inode, flags, map, err);
 	return err;
 }
 
@@ -681,11 +678,6 @@
 
 retry:
 	if (rw == READ && ext4_should_dioread_nolock(inode)) {
-		if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) {
-			mutex_lock(&inode->i_mutex);
-			ext4_flush_unwritten_io(inode);
-			mutex_unlock(&inode->i_mutex);
-		}
 		/*
 		 * Nolock dioread optimization may be dynamically disabled
 		 * via ext4_inode_block_unlocked_dio(). Check inode's state
@@ -700,7 +692,7 @@
 		}
 		ret = __blockdev_direct_IO(rw, iocb, inode,
 				 inode->i_sb->s_bdev, iov,
-				 offset, nr_segs,
+					   offset, nr_segs,
 				 ext4_get_block, NULL, NULL, 0);
 		inode_dio_done(inode);
 	} else {
@@ -710,7 +702,7 @@
 
 		if (unlikely((rw & WRITE) && ret < 0)) {
 			loff_t isize = i_size_read(inode);
-			loff_t end = offset + iov_length(iov, nr_segs);
+			loff_t end = offset + count;
 
 			if (end > isize)
 				ext4_truncate_failed_write(inode);
@@ -785,27 +777,18 @@
 	return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
 }
 
-int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+/*
+ * Calculate number of indirect blocks touched by mapping @nrblocks logically
+ * contiguous blocks
+ */
+int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
 {
-	int indirects;
-
-	/* if nrblocks are contiguous */
-	if (chunk) {
-		/*
-		 * With N contiguous data blocks, we need at most
-		 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
-		 * 2 dindirect blocks, and 1 tindirect block
-		 */
-		return DIV_ROUND_UP(nrblocks,
-				    EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
-	}
 	/*
-	 * if nrblocks are not contiguous, worse case, each block touch
-	 * a indirect block, and each indirect block touch a double indirect
-	 * block, plus a triple indirect block
+	 * With N contiguous data blocks, we need at most
+	 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+	 * 2 dindirect blocks, and 1 tindirect block
 	 */
-	indirects = nrblocks * 2 + 1;
-	return indirects;
+	return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
 }
 
 /*
@@ -946,11 +929,13 @@
 			     __le32 *last)
 {
 	__le32 *p;
-	int	flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
+	int	flags = EXT4_FREE_BLOCKS_VALIDATED;
 	int	err;
 
 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
-		flags |= EXT4_FREE_BLOCKS_METADATA;
+		flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
+	else if (ext4_should_journal_data(inode))
+		flags |= EXT4_FREE_BLOCKS_FORGET;
 
 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
 				   count)) {
@@ -1310,97 +1295,220 @@
 	}
 }
 
-static int free_hole_blocks(handle_t *handle, struct inode *inode,
-			    struct buffer_head *parent_bh, __le32 *i_data,
-			    int level, ext4_lblk_t first,
-			    ext4_lblk_t count, int max)
+/**
+ *	ext4_ind_remove_space - remove space from the range
+ *	@handle: JBD handle for this transaction
+ *	@inode:	inode we are dealing with
+ *	@start:	First block to remove
+ *	@end:	One block after the last block to remove (exclusive)
+ *
+ *	Free the blocks in the defined range (end is exclusive endpoint of
+ *	range). This is used by ext4_punch_hole().
+ */
+int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
+			  ext4_lblk_t start, ext4_lblk_t end)
 {
-	struct buffer_head *bh = NULL;
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	__le32 *i_data = ei->i_data;
 	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
-	int ret = 0;
-	int i, inc;
-	ext4_lblk_t offset;
-	__le32 blk;
+	ext4_lblk_t offsets[4], offsets2[4];
+	Indirect chain[4], chain2[4];
+	Indirect *partial, *partial2;
+	ext4_lblk_t max_block;
+	__le32 nr = 0, nr2 = 0;
+	int n = 0, n2 = 0;
+	unsigned blocksize = inode->i_sb->s_blocksize;
 
-	inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level);
-	for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) {
-		if (offset >= count + first)
-			break;
-		if (*i_data == 0 || (offset + inc) <= first)
-			continue;
-		blk = *i_data;
-		if (level > 0) {
-			ext4_lblk_t first2;
-			ext4_lblk_t count2;
+	max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
+					>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+	if (end >= max_block)
+		end = max_block;
+	if ((start >= end) || (start > max_block))
+		return 0;
 
-			bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
-			if (!bh) {
-				EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
-						       "Read failure");
-				return -EIO;
-			}
-			if (first > offset) {
-				first2 = first - offset;
-				count2 = count;
+	n = ext4_block_to_path(inode, start, offsets, NULL);
+	n2 = ext4_block_to_path(inode, end, offsets2, NULL);
+
+	BUG_ON(n > n2);
+
+	if ((n == 1) && (n == n2)) {
+		/* We're punching only within direct block range */
+		ext4_free_data(handle, inode, NULL, i_data + offsets[0],
+			       i_data + offsets2[0]);
+		return 0;
+	} else if (n2 > n) {
+		/*
+		 * Start and end are on a different levels so we're going to
+		 * free partial block at start, and partial block at end of
+		 * the range. If there are some levels in between then
+		 * do_indirects label will take care of that.
+		 */
+
+		if (n == 1) {
+			/*
+			 * Start is at the direct block level, free
+			 * everything to the end of the level.
+			 */
+			ext4_free_data(handle, inode, NULL, i_data + offsets[0],
+				       i_data + EXT4_NDIR_BLOCKS);
+			goto end_range;
+		}
+
+
+		partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+		if (nr) {
+			if (partial == chain) {
+				/* Shared branch grows from the inode */
+				ext4_free_branches(handle, inode, NULL,
+					   &nr, &nr+1, (chain+n-1) - partial);
+				*partial->p = 0;
 			} else {
-				first2 = 0;
-				count2 = count - (offset - first);
-			}
-			ret = free_hole_blocks(handle, inode, bh,
-					       (__le32 *)bh->b_data, level - 1,
-					       first2, count2,
-					       inode->i_sb->s_blocksize >> 2);
-			if (ret) {
-				brelse(bh);
-				goto err;
+				/* Shared branch grows from an indirect block */
+				BUFFER_TRACE(partial->bh, "get_write_access");
+				ext4_free_branches(handle, inode, partial->bh,
+					partial->p,
+					partial->p+1, (chain+n-1) - partial);
 			}
 		}
-		if (level == 0 ||
-		    (bh && all_zeroes((__le32 *)bh->b_data,
-				      (__le32 *)bh->b_data + addr_per_block))) {
-			ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
-			*i_data = 0;
+
+		/*
+		 * Clear the ends of indirect blocks on the shared branch
+		 * at the start of the range
+		 */
+		while (partial > chain) {
+			ext4_free_branches(handle, inode, partial->bh,
+				partial->p + 1,
+				(__le32 *)partial->bh->b_data+addr_per_block,
+				(chain+n-1) - partial);
+			BUFFER_TRACE(partial->bh, "call brelse");
+			brelse(partial->bh);
+			partial--;
 		}
-		brelse(bh);
-		bh = NULL;
-	}
 
-err:
-	return ret;
-}
-
-int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
-			  ext4_lblk_t first, ext4_lblk_t stop)
-{
-	int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
-	int level, ret = 0;
-	int num = EXT4_NDIR_BLOCKS;
-	ext4_lblk_t count, max = EXT4_NDIR_BLOCKS;
-	__le32 *i_data = EXT4_I(inode)->i_data;
-
-	count = stop - first;
-	for (level = 0; level < 4; level++, max *= addr_per_block) {
-		if (first < max) {
-			ret = free_hole_blocks(handle, inode, NULL, i_data,
-					       level, first, count, num);
-			if (ret)
-				goto err;
-			if (count > max - first)
-				count -= max - first;
-			else
-				break;
-			first = 0;
+end_range:
+		partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+		if (nr2) {
+			if (partial2 == chain2) {
+				/*
+				 * Remember, end is exclusive so here we're at
+				 * the start of the next level we're not going
+				 * to free. Everything was covered by the start
+				 * of the range.
+				 */
+				return 0;
+			} else {
+				/* Shared branch grows from an indirect block */
+				partial2--;
+			}
 		} else {
-			first -= max;
+			/*
+			 * ext4_find_shared returns Indirect structure which
+			 * points to the last element which should not be
+			 * removed by truncate. But this is end of the range
+			 * in punch_hole so we need to point to the next element
+			 */
+			partial2->p++;
 		}
-		i_data += num;
-		if (level == 0) {
-			num = 1;
-			max = 1;
+
+		/*
+		 * Clear the ends of indirect blocks on the shared branch
+		 * at the end of the range
+		 */
+		while (partial2 > chain2) {
+			ext4_free_branches(handle, inode, partial2->bh,
+					   (__le32 *)partial2->bh->b_data,
+					   partial2->p,
+					   (chain2+n2-1) - partial2);
+			BUFFER_TRACE(partial2->bh, "call brelse");
+			brelse(partial2->bh);
+			partial2--;
+		}
+		goto do_indirects;
+	}
+
+	/* Punch happened within the same level (n == n2) */
+	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+	partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+	/*
+	 * ext4_find_shared returns Indirect structure which
+	 * points to the last element which should not be
+	 * removed by truncate. But this is end of the range
+	 * in punch_hole so we need to point to the next element
+	 */
+	partial2->p++;
+	while ((partial > chain) || (partial2 > chain2)) {
+		/* We're at the same block, so we're almost finished */
+		if ((partial->bh && partial2->bh) &&
+		    (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
+			if ((partial > chain) && (partial2 > chain2)) {
+				ext4_free_branches(handle, inode, partial->bh,
+						   partial->p + 1,
+						   partial2->p,
+						   (chain+n-1) - partial);
+				BUFFER_TRACE(partial->bh, "call brelse");
+				brelse(partial->bh);
+				BUFFER_TRACE(partial2->bh, "call brelse");
+				brelse(partial2->bh);
+			}
+			return 0;
+		}
+		/*
+		 * Clear the ends of indirect blocks on the shared branch
+		 * at the start of the range
+		 */
+		if (partial > chain) {
+			ext4_free_branches(handle, inode, partial->bh,
+				   partial->p + 1,
+				   (__le32 *)partial->bh->b_data+addr_per_block,
+				   (chain+n-1) - partial);
+			BUFFER_TRACE(partial->bh, "call brelse");
+			brelse(partial->bh);
+			partial--;
+		}
+		/*
+		 * Clear the ends of indirect blocks on the shared branch
+		 * at the end of the range
+		 */
+		if (partial2 > chain2) {
+			ext4_free_branches(handle, inode, partial2->bh,
+					   (__le32 *)partial2->bh->b_data,
+					   partial2->p,
+					   (chain2+n-1) - partial2);
+			BUFFER_TRACE(partial2->bh, "call brelse");
+			brelse(partial2->bh);
+			partial2--;
 		}
 	}
 
-err:
-	return ret;
+do_indirects:
+	/* Kill the remaining (whole) subtrees */
+	switch (offsets[0]) {
+	default:
+		if (++n >= n2)
+			return 0;
+		nr = i_data[EXT4_IND_BLOCK];
+		if (nr) {
+			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
+			i_data[EXT4_IND_BLOCK] = 0;
+		}
+	case EXT4_IND_BLOCK:
+		if (++n >= n2)
+			return 0;
+		nr = i_data[EXT4_DIND_BLOCK];
+		if (nr) {
+			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
+			i_data[EXT4_DIND_BLOCK] = 0;
+		}
+	case EXT4_DIND_BLOCK:
+		if (++n >= n2)
+			return 0;
+		nr = i_data[EXT4_TIND_BLOCK];
+		if (nr) {
+			ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
+			i_data[EXT4_TIND_BLOCK] = 0;
+		}
+	case EXT4_TIND_BLOCK:
+		;
+	}
+	return 0;
 }
-
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index e350be6..6990a6f 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -11,18 +11,20 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+
+#include <linux/fiemap.h>
+
 #include "ext4_jbd2.h"
 #include "ext4.h"
 #include "xattr.h"
 #include "truncate.h"
-#include <linux/fiemap.h>
 
 #define EXT4_XATTR_SYSTEM_DATA	"data"
 #define EXT4_MIN_INLINE_DATA_SIZE	((sizeof(__le32) * EXT4_N_BLOCKS))
 #define EXT4_INLINE_DOTDOT_OFFSET	2
 #define EXT4_INLINE_DOTDOT_SIZE		4
 
-int ext4_get_inline_size(struct inode *inode)
+static int ext4_get_inline_size(struct inode *inode)
 {
 	if (EXT4_I(inode)->i_inline_off)
 		return EXT4_I(inode)->i_inline_size;
@@ -72,7 +74,7 @@
 		entry = (struct ext4_xattr_entry *)
 			((void *)raw_inode + EXT4_I(inode)->i_inline_off);
 
-		free += le32_to_cpu(entry->e_value_size);
+		free += EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size));
 		goto out;
 	}
 
@@ -120,12 +122,6 @@
 	return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE;
 }
 
-int ext4_has_inline_data(struct inode *inode)
-{
-	return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
-	       EXT4_I(inode)->i_inline_off;
-}
-
 /*
  * this function does not take xattr_sem, which is OK because it is
  * currently only used in a code path coming form ext4_iget, before
@@ -211,8 +207,8 @@
  * value since it is already handled by ext4_xattr_ibody_inline_set.
  * That saves us one memcpy.
  */
-void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
-			    void *buffer, loff_t pos, unsigned int len)
+static void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
+				   void *buffer, loff_t pos, unsigned int len)
 {
 	struct ext4_xattr_entry *entry;
 	struct ext4_xattr_ibody_header *header;
@@ -264,6 +260,7 @@
 	if (error)
 		return error;
 
+	BUFFER_TRACE(is.iloc.bh, "get_write_access");
 	error = ext4_journal_get_write_access(handle, is.iloc.bh);
 	if (error)
 		goto out;
@@ -347,6 +344,7 @@
 	if (error == -ENODATA)
 		goto out;
 
+	BUFFER_TRACE(is.iloc.bh, "get_write_access");
 	error = ext4_journal_get_write_access(handle, is.iloc.bh);
 	if (error)
 		goto out;
@@ -373,8 +371,8 @@
 	return error;
 }
 
-int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
-			     unsigned int len)
+static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
+				    unsigned int len)
 {
 	int ret, size;
 	struct ext4_inode_info *ei = EXT4_I(inode);
@@ -424,6 +422,7 @@
 	if (error)
 		goto out;
 
+	BUFFER_TRACE(is.iloc.bh, "get_write_access");
 	error = ext4_journal_get_write_access(handle, is.iloc.bh);
 	if (error)
 		goto out;
@@ -597,6 +596,7 @@
 	if (ret) {
 		unlock_page(page);
 		page_cache_release(page);
+		page = NULL;
 		ext4_orphan_add(handle, inode);
 		up_write(&EXT4_I(inode)->xattr_sem);
 		sem_held = 0;
@@ -616,7 +616,8 @@
 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
 		goto retry;
 
-	block_commit_write(page, from, to);
+	if (page)
+		block_commit_write(page, from, to);
 out:
 	if (page) {
 		unlock_page(page);
@@ -849,15 +850,16 @@
 	handle_t *handle;
 	struct page *page;
 	struct ext4_iloc iloc;
+	int retries;
 
 	ret = ext4_get_inode_loc(inode, &iloc);
 	if (ret)
 		return ret;
 
+retry_journal:
 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
 	if (IS_ERR(handle)) {
 		ret = PTR_ERR(handle);
-		handle = NULL;
 		goto out;
 	}
 
@@ -867,7 +869,7 @@
 	if (inline_size >= pos + len) {
 		ret = ext4_prepare_inline_data(handle, inode, pos + len);
 		if (ret && ret != -ENOSPC)
-			goto out;
+			goto out_journal;
 	}
 
 	if (ret == -ENOSPC) {
@@ -875,6 +877,10 @@
 							    inode,
 							    flags,
 							    fsdata);
+		ext4_journal_stop(handle);
+		if (ret == -ENOSPC &&
+		    ext4_should_retry_alloc(inode->i_sb, &retries))
+			goto retry_journal;
 		goto out;
 	}
 
@@ -887,7 +893,7 @@
 	page = grab_cache_page_write_begin(mapping, 0, flags);
 	if (!page) {
 		ret = -ENOMEM;
-		goto out;
+		goto out_journal;
 	}
 
 	down_read(&EXT4_I(inode)->xattr_sem);
@@ -904,16 +910,15 @@
 
 	up_read(&EXT4_I(inode)->xattr_sem);
 	*pagep = page;
-	handle = NULL;
 	brelse(iloc.bh);
 	return 1;
 out_release_page:
 	up_read(&EXT4_I(inode)->xattr_sem);
 	unlock_page(page);
 	page_cache_release(page);
+out_journal:
+	ext4_journal_stop(handle);
 out:
-	if (handle)
-		ext4_journal_stop(handle);
 	brelse(iloc.bh);
 	return ret;
 }
@@ -986,29 +991,26 @@
  * and -EEXIST if directory entry already exists.
  */
 static int ext4_add_dirent_to_inline(handle_t *handle,
+				     struct ext4_filename *fname,
 				     struct dentry *dentry,
 				     struct inode *inode,
 				     struct ext4_iloc *iloc,
 				     void *inline_start, int inline_size)
 {
 	struct inode	*dir = dentry->d_parent->d_inode;
-	const char	*name = dentry->d_name.name;
-	int		namelen = dentry->d_name.len;
-	unsigned short	reclen;
 	int		err;
 	struct ext4_dir_entry_2 *de;
 
-	reclen = EXT4_DIR_REC_LEN(namelen);
-	err = ext4_find_dest_de(dir, inode, iloc->bh,
-				inline_start, inline_size,
-				name, namelen, &de);
+	err = ext4_find_dest_de(dir, inode, iloc->bh, inline_start,
+				inline_size, fname, &de);
 	if (err)
 		return err;
 
+	BUFFER_TRACE(iloc->bh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, iloc->bh);
 	if (err)
 		return err;
-	ext4_insert_dentry(inode, de, inline_size, name, namelen);
+	ext4_insert_dentry(dir, inode, de, inline_size, fname);
 
 	ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
 
@@ -1126,8 +1128,7 @@
 	memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
 		inline_size - EXT4_INLINE_DOTDOT_SIZE);
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(inode->i_sb))
 		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	inode->i_size = inode->i_sb->s_blocksize;
@@ -1172,6 +1173,18 @@
 	if (error < 0)
 		goto out;
 
+	/*
+	 * Make sure the inline directory entries pass checks before we try to
+	 * convert them, so that we avoid touching stuff that needs fsck.
+	 */
+	if (S_ISDIR(inode->i_mode)) {
+		error = ext4_check_all_de(inode, iloc->bh,
+					buf + EXT4_INLINE_DOTDOT_SIZE,
+					inline_size - EXT4_INLINE_DOTDOT_SIZE);
+		if (error)
+			goto out;
+	}
+
 	error = ext4_destroy_inline_data_nolock(handle, inode);
 	if (error)
 		goto out;
@@ -1228,8 +1241,8 @@
  * If succeeds, return 0. If not, extended the inline dir and copied data to
  * the new created block.
  */
-int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
-			      struct inode *inode)
+int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
+			      struct dentry *dentry, struct inode *inode)
 {
 	int ret, inline_size;
 	void *inline_start;
@@ -1248,7 +1261,7 @@
 						 EXT4_INLINE_DOTDOT_SIZE;
 	inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
 
-	ret = ext4_add_dirent_to_inline(handle, dentry, inode, &iloc,
+	ret = ext4_add_dirent_to_inline(handle, fname, dentry, inode, &iloc,
 					inline_start, inline_size);
 	if (ret != -ENOSPC)
 		goto out;
@@ -1269,8 +1282,9 @@
 	if (inline_size) {
 		inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
 
-		ret = ext4_add_dirent_to_inline(handle, dentry, inode, &iloc,
-						inline_start, inline_size);
+		ret = ext4_add_dirent_to_inline(handle, fname, dentry,
+						inode, &iloc, inline_start,
+						inline_size);
 
 		if (ret != -ENOSPC)
 			goto out;
@@ -1310,6 +1324,7 @@
 	struct ext4_iloc iloc;
 	void *dir_buf = NULL;
 	struct ext4_dir_entry_2 fake;
+	struct ext4_str tmp_str;
 
 	ret = ext4_get_inode_loc(inode, &iloc);
 	if (ret)
@@ -1381,8 +1396,10 @@
 			continue;
 		if (de->inode == 0)
 			continue;
-		err = ext4_htree_store_dirent(dir_file,
-				   hinfo->hash, hinfo->minor_hash, de);
+		tmp_str.name = de->name;
+		tmp_str.len = de->name_len;
+		err = ext4_htree_store_dirent(dir_file, hinfo->hash,
+					      hinfo->minor_hash, de, &tmp_str);
 		if (err) {
 			count = err;
 			goto out;
@@ -1404,16 +1421,15 @@
  * offset as if '.' and '..' really take place.
  *
  */
-int ext4_read_inline_dir(struct file *filp,
-			 void *dirent, filldir_t filldir,
+int ext4_read_inline_dir(struct file *file,
+			 struct dir_context *ctx,
 			 int *has_inline_data)
 {
-	int error = 0;
 	unsigned int offset, parent_ino;
-	int i, stored;
+	int i;
 	struct ext4_dir_entry_2 *de;
 	struct super_block *sb;
-	struct inode *inode = file_inode(filp);
+	struct inode *inode = file_inode(file);
 	int ret, inline_size = 0;
 	struct ext4_iloc iloc;
 	void *dir_buf = NULL;
@@ -1443,10 +1459,10 @@
 	if (ret < 0)
 		goto out;
 
+	ret = 0;
 	sb = inode->i_sb;
-	stored = 0;
 	parent_ino = le32_to_cpu(((struct ext4_dir_entry_2 *)dir_buf)->inode);
-	offset = filp->f_pos;
+	offset = ctx->pos;
 
 	/*
 	 * dotdot_offset and dotdot_size is the real offset and
@@ -1460,104 +1476,74 @@
 	extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
 	extra_size = extra_offset + inline_size;
 
-	while (!error && !stored && filp->f_pos < extra_size) {
-revalidate:
-		/*
-		 * If the version has changed since the last call to
-		 * readdir(2), then we might be pointing to an invalid
-		 * dirent right now.  Scan from the start of the inline
-		 * dir to make sure.
-		 */
-		if (filp->f_version != inode->i_version) {
-			for (i = 0; i < extra_size && i < offset;) {
-				/*
-				 * "." is with offset 0 and
-				 * ".." is dotdot_offset.
-				 */
-				if (!i) {
-					i = dotdot_offset;
-					continue;
-				} else if (i == dotdot_offset) {
-					i = dotdot_size;
-					continue;
-				}
-				/* for other entry, the real offset in
-				 * the buf has to be tuned accordingly.
-				 */
-				de = (struct ext4_dir_entry_2 *)
-					(dir_buf + i - extra_offset);
-				/* It's too expensive to do a full
-				 * dirent test each time round this
-				 * loop, but we do have to test at
-				 * least that it is non-zero.  A
-				 * failure will be detected in the
-				 * dirent test below. */
-				if (ext4_rec_len_from_disk(de->rec_len,
-					extra_size) < EXT4_DIR_REC_LEN(1))
-					break;
-				i += ext4_rec_len_from_disk(de->rec_len,
-							    extra_size);
-			}
-			offset = i;
-			filp->f_pos = offset;
-			filp->f_version = inode->i_version;
-		}
-
-		while (!error && filp->f_pos < extra_size) {
-			if (filp->f_pos == 0) {
-				error = filldir(dirent, ".", 1, 0, inode->i_ino,
-						DT_DIR);
-				if (error)
-					break;
-				stored++;
-				filp->f_pos = dotdot_offset;
+	/*
+	 * If the version has changed since the last call to
+	 * readdir(2), then we might be pointing to an invalid
+	 * dirent right now.  Scan from the start of the inline
+	 * dir to make sure.
+	 */
+	if (file->f_version != inode->i_version) {
+		for (i = 0; i < extra_size && i < offset;) {
+			/*
+			 * "." is with offset 0 and
+			 * ".." is dotdot_offset.
+			 */
+			if (!i) {
+				i = dotdot_offset;
+				continue;
+			} else if (i == dotdot_offset) {
+				i = dotdot_size;
 				continue;
 			}
-
-			if (filp->f_pos == dotdot_offset) {
-				error = filldir(dirent, "..", 2,
-						dotdot_offset,
-						parent_ino, DT_DIR);
-				if (error)
-					break;
-				stored++;
-
-				filp->f_pos = dotdot_size;
-				continue;
-			}
-
+			/* for other entry, the real offset in
+			 * the buf has to be tuned accordingly.
+			 */
 			de = (struct ext4_dir_entry_2 *)
-				(dir_buf + filp->f_pos - extra_offset);
-			if (ext4_check_dir_entry(inode, filp, de,
-						 iloc.bh, dir_buf,
-						 extra_size, filp->f_pos)) {
-				ret = stored;
-				goto out;
-			}
-			if (le32_to_cpu(de->inode)) {
-				/* We might block in the next section
-				 * if the data destination is
-				 * currently swapped out.  So, use a
-				 * version stamp to detect whether or
-				 * not the directory has been modified
-				 * during the copy operation.
-				 */
-				u64 version = filp->f_version;
-
-				error = filldir(dirent, de->name,
-						de->name_len,
-						filp->f_pos,
-						le32_to_cpu(de->inode),
-						get_dtype(sb, de->file_type));
-				if (error)
-					break;
-				if (version != filp->f_version)
-					goto revalidate;
-				stored++;
-			}
-			filp->f_pos += ext4_rec_len_from_disk(de->rec_len,
-							      extra_size);
+				(dir_buf + i - extra_offset);
+			/* It's too expensive to do a full
+			 * dirent test each time round this
+			 * loop, but we do have to test at
+			 * least that it is non-zero.  A
+			 * failure will be detected in the
+			 * dirent test below. */
+			if (ext4_rec_len_from_disk(de->rec_len, extra_size)
+				< EXT4_DIR_REC_LEN(1))
+				break;
+			i += ext4_rec_len_from_disk(de->rec_len,
+						    extra_size);
 		}
+		offset = i;
+		ctx->pos = offset;
+		file->f_version = inode->i_version;
+	}
+
+	while (ctx->pos < extra_size) {
+		if (ctx->pos == 0) {
+			if (!dir_emit(ctx, ".", 1, inode->i_ino, DT_DIR))
+				goto out;
+			ctx->pos = dotdot_offset;
+			continue;
+		}
+
+		if (ctx->pos == dotdot_offset) {
+			if (!dir_emit(ctx, "..", 2, parent_ino, DT_DIR))
+				goto out;
+			ctx->pos = dotdot_size;
+			continue;
+		}
+
+		de = (struct ext4_dir_entry_2 *)
+			(dir_buf + ctx->pos - extra_offset);
+		if (ext4_check_dir_entry(inode, file, de, iloc.bh, dir_buf,
+					 extra_size, ctx->pos))
+			goto out;
+		if (le32_to_cpu(de->inode)) {
+			if (!dir_emit(ctx, de->name, de->name_len,
+				      le32_to_cpu(de->inode),
+				      get_dtype(sb, de->file_type)))
+				goto out;
+		}
+		ctx->pos += ext4_rec_len_from_disk(de->rec_len, extra_size);
 	}
 out:
 	kfree(dir_buf);
@@ -1619,6 +1605,7 @@
 }
 
 struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+					struct ext4_filename *fname,
 					const struct qstr *d_name,
 					struct ext4_dir_entry_2 **res_dir,
 					int *has_inline_data)
@@ -1640,8 +1627,8 @@
 	inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
 						EXT4_INLINE_DOTDOT_SIZE;
 	inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
-	ret = search_dir(iloc.bh, inline_start, inline_size,
-			 dir, d_name, 0, res_dir);
+	ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
+			      dir, fname, d_name, 0, res_dir);
 	if (ret == 1)
 		goto out_find;
 	if (ret < 0)
@@ -1653,8 +1640,8 @@
 	inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
 	inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
 
-	ret = search_dir(iloc.bh, inline_start, inline_size,
-			 dir, d_name, 0, res_dir);
+	ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
+			      dir, fname, d_name, 0, res_dir);
 	if (ret == 1)
 		goto out_find;
 
@@ -1698,6 +1685,7 @@
 				EXT4_MIN_INLINE_DATA_SIZE;
 	}
 
+	BUFFER_TRACE(bh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, bh);
 	if (err)
 		goto out;
@@ -1870,7 +1858,6 @@
 {
 	int error;
 	struct ext4_xattr_entry *entry;
-	struct ext4_xattr_ibody_header *header;
 	struct ext4_inode *raw_inode;
 	struct ext4_iloc iloc;
 
@@ -1879,7 +1866,6 @@
 		return error;
 
 	raw_inode = ext4_raw_inode(&iloc);
-	header = IHDR(inode, raw_inode);
 	entry = (struct ext4_xattr_entry *)((void *)raw_inode +
 					    EXT4_I(inode)->i_inline_off);
 	if (EXT4_XATTR_LEN(entry->e_name_len) +
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 31179ba..152e55b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -89,8 +89,7 @@
 
 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 	    cpu_to_le32(EXT4_OS_LINUX) ||
-	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	    !ext4_has_metadata_csum(inode->i_sb))
 		return 1;
 
 	provided = le16_to_cpu(raw->i_checksum_lo);
@@ -111,8 +110,7 @@
 
 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 	    cpu_to_le32(EXT4_OS_LINUX) ||
-	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	    !ext4_has_metadata_csum(inode->i_sb))
 		return;
 
 	csum = ext4_inode_csum(inode, raw, ei);
@@ -139,20 +137,23 @@
 						   new_size);
 }
 
-static void ext4_invalidatepage(struct page *page, unsigned long offset);
+static void ext4_invalidatepage(struct page *page, unsigned int offset,
+				unsigned int length);
 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
-static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
-		struct inode *inode, struct page *page, loff_t from,
-		loff_t length, int flags);
+static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
+				  int pextents);
 
 /*
  * Test whether an inode is a fast symlink.
  */
-static int ext4_inode_is_fast_symlink(struct inode *inode)
+int ext4_inode_is_fast_symlink(struct inode *inode)
 {
-	int ea_blocks = EXT4_I(inode)->i_file_acl ?
-		(inode->i_sb->s_blocksize >> 9) : 0;
+        int ea_blocks = EXT4_I(inode)->i_file_acl ?
+		EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
+
+	if (ext4_has_inline_data(inode))
+		return 0;
 
 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
 }
@@ -221,21 +222,21 @@
 			jbd2_complete_transaction(journal, commit_tid);
 			filemap_write_and_wait(&inode->i_data);
 		}
-		truncate_inode_pages(&inode->i_data, 0);
-		ext4_ioend_shutdown(inode);
+		truncate_inode_pages_final(&inode->i_data);
+
+		WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
 		goto no_delete;
 	}
 
-	if (!is_bad_inode(inode))
-		dquot_initialize(inode);
+	if (is_bad_inode(inode))
+		goto no_delete;
+	dquot_initialize(inode);
 
 	if (ext4_should_order_data(inode))
 		ext4_begin_ordered_truncate(inode, 0);
-	truncate_inode_pages(&inode->i_data, 0);
-	ext4_ioend_shutdown(inode);
+	truncate_inode_pages_final(&inode->i_data);
 
-	if (is_bad_inode(inode))
-		goto no_delete;
+	WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
 
 	/*
 	 * Protect us against freezing - iput() caller didn't have to have any
@@ -327,18 +328,6 @@
 #endif
 
 /*
- * Calculate the number of metadata blocks need to reserve
- * to allocate a block located at @lblock
- */
-static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
-{
-	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-		return ext4_ext_calc_metadata_amount(inode, lblock);
-
-	return ext4_ind_calc_metadata_amount(inode, lblock);
-}
-
-/*
  * Called with i_data_sem down, which is important since we can call
  * ext4_discard_preallocations() from here.
  */
@@ -359,35 +348,10 @@
 		used = ei->i_reserved_data_blocks;
 	}
 
-	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
-		ext4_warning(inode->i_sb, "ino %lu, allocated %d "
-			"with only %d reserved metadata blocks "
-			"(releasing %d blocks with reserved %d data blocks)",
-			inode->i_ino, ei->i_allocated_meta_blocks,
-			     ei->i_reserved_meta_blocks, used,
-			     ei->i_reserved_data_blocks);
-		WARN_ON(1);
-		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
-	}
-
 	/* Update per-inode reservations */
 	ei->i_reserved_data_blocks -= used;
-	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
-	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
-			   used + ei->i_allocated_meta_blocks);
-	ei->i_allocated_meta_blocks = 0;
+	percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
 
-	if (ei->i_reserved_data_blocks == 0) {
-		/*
-		 * We can release all of the reserved metadata blocks
-		 * only when we have written all of the delayed
-		 * allocation blocks.
-		 */
-		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
-				   ei->i_reserved_meta_blocks);
-		ei->i_reserved_meta_blocks = 0;
-		ei->i_da_metadata_calc_len = 0;
-	}
 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
 	/* Update quota subsystem for data blocks */
@@ -430,66 +394,6 @@
 #define check_block_validity(inode, map)	\
 	__check_block_validity((inode), __func__, __LINE__, (map))
 
-/*
- * Return the number of contiguous dirty pages in a given inode
- * starting at page frame idx.
- */
-static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
-				    unsigned int max_pages)
-{
-	struct address_space *mapping = inode->i_mapping;
-	pgoff_t	index;
-	struct pagevec pvec;
-	pgoff_t num = 0;
-	int i, nr_pages, done = 0;
-
-	if (max_pages == 0)
-		return 0;
-	pagevec_init(&pvec, 0);
-	while (!done) {
-		index = idx;
-		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-					      PAGECACHE_TAG_DIRTY,
-					      (pgoff_t)PAGEVEC_SIZE);
-		if (nr_pages == 0)
-			break;
-		for (i = 0; i < nr_pages; i++) {
-			struct page *page = pvec.pages[i];
-			struct buffer_head *bh, *head;
-
-			lock_page(page);
-			if (unlikely(page->mapping != mapping) ||
-			    !PageDirty(page) ||
-			    PageWriteback(page) ||
-			    page->index != idx) {
-				done = 1;
-				unlock_page(page);
-				break;
-			}
-			if (page_has_buffers(page)) {
-				bh = head = page_buffers(page);
-				do {
-					if (!buffer_delay(bh) &&
-					    !buffer_unwritten(bh))
-						done = 1;
-					bh = bh->b_this_page;
-				} while (!done && (bh != head));
-			}
-			unlock_page(page);
-			if (done)
-				break;
-			idx++;
-			num++;
-			if (num >= max_pages) {
-				done = 1;
-				break;
-			}
-		}
-		pagevec_release(&pvec);
-	}
-	return num;
-}
-
 #ifdef ES_AGGRESSIVE_TEST
 static void ext4_map_blocks_es_recheck(handle_t *handle,
 				       struct inode *inode,
@@ -508,7 +412,7 @@
 	 * could be converted.
 	 */
 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
-		down_read((&EXT4_I(inode)->i_data_sem));
+		down_read(&EXT4_I(inode)->i_data_sem);
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 					     EXT4_GET_BLOCKS_KEEP_SIZE);
@@ -531,7 +435,7 @@
 	if (es_map->m_lblk != map->m_lblk ||
 	    es_map->m_flags != map->m_flags ||
 	    es_map->m_pblk != map->m_pblk) {
-		printk("ES cache assertation failed for inode: %lu "
+		printk("ES cache assertion failed for inode: %lu "
 		       "es_cached ex [%d/%d/%llu/%x] != "
 		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
 		       inode->i_ino, es_map->m_lblk, es_map->m_len,
@@ -554,8 +458,8 @@
  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
  * based files
  *
- * On success, it returns the number of blocks being mapped or allocate.
- * if create==0 and the blocks are pre-allocated and uninitialized block,
+ * On success, it returns the number of blocks being mapped or allocated.
+ * if create==0 and the blocks are pre-allocated and unwritten block,
  * the result buffer head is unmapped. If the create ==1, it will make sure
  * the buffer head is mapped.
  *
@@ -569,6 +473,7 @@
 {
 	struct extent_status es;
 	int retval;
+	int ret = 0;
 #ifdef ES_AGGRESSIVE_TEST
 	struct ext4_map_blocks orig_map;
 
@@ -580,8 +485,19 @@
 		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
 		  (unsigned long) map->m_lblk);
 
+	/*
+	 * ext4_map_blocks returns an int, and m_len is an unsigned int
+	 */
+	if (unlikely(map->m_len > INT_MAX))
+		map->m_len = INT_MAX;
+
+	/* We can handle the block number less than EXT_MAX_BLOCKS */
+	if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
+		return -EIO;
+
 	/* Lookup extent status tree firstly */
 	if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
+		ext4_es_lru_add(inode);
 		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
 			map->m_pblk = ext4_es_pblock(&es) +
 					map->m_lblk - es.es_lblk;
@@ -608,7 +524,7 @@
 	 * file system block.
 	 */
 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
-		down_read((&EXT4_I(inode)->i_data_sem));
+		down_read(&EXT4_I(inode)->i_data_sem);
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 					     EXT4_GET_BLOCKS_KEEP_SIZE);
@@ -617,17 +533,15 @@
 					     EXT4_GET_BLOCKS_KEEP_SIZE);
 	}
 	if (retval > 0) {
-		int ret;
-		unsigned long long status;
+		unsigned int status;
 
-#ifdef ES_AGGRESSIVE_TEST
-		if (retval != map->m_len) {
-			printk("ES len assertation failed for inode: %lu "
-			       "retval %d != map->m_len %d "
-			       "in %s (lookup)\n", inode->i_ino, retval,
-			       map->m_len, __func__);
+		if (unlikely(retval != map->m_len)) {
+			ext4_warning(inode->i_sb,
+				     "ES len assertion failed for inode "
+				     "%lu: retval %d != map->m_len %d",
+				     inode->i_ino, retval, map->m_len);
+			WARN_ON(1);
 		}
-#endif
 
 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -646,7 +560,7 @@
 
 found:
 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
-		int ret = check_block_validity(inode, map);
+		ret = check_block_validity(inode, map);
 		if (ret != 0)
 			return ret;
 	}
@@ -663,7 +577,13 @@
 	 * with buffer head unmapped.
 	 */
 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
-		return retval;
+		/*
+		 * If we need to convert extent to unwritten
+		 * we continue and do the actual work in
+		 * ext4_ext_map_blocks()
+		 */
+		if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
+			return retval;
 
 	/*
 	 * Here we clear m_flags because after allocating an new extent,
@@ -672,22 +592,14 @@
 	map->m_flags &= ~EXT4_MAP_FLAGS;
 
 	/*
-	 * New blocks allocate and/or writing to uninitialized extent
+	 * New blocks allocate and/or writing to unwritten extent
 	 * will possibly result in updating i_data, so we take
-	 * the write lock of i_data_sem, and call get_blocks()
+	 * the write lock of i_data_sem, and call get_block()
 	 * with create == 1 flag.
 	 */
-	down_write((&EXT4_I(inode)->i_data_sem));
+	down_write(&EXT4_I(inode)->i_data_sem);
 
 	/*
-	 * if the caller is from delayed allocation writeout path
-	 * we have already reserved fs blocks for allocation
-	 * let the underlying get_block() function know to
-	 * avoid double accounting
-	 */
-	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
-	/*
 	 * We need to check for EXT4 here because migrate
 	 * could have changed the inode type in between
 	 */
@@ -715,21 +627,17 @@
 			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
 			ext4_da_update_reserve_space(inode, retval, 1);
 	}
-	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 
 	if (retval > 0) {
-		int ret;
-		unsigned long long status;
+		unsigned int status;
 
-#ifdef ES_AGGRESSIVE_TEST
-		if (retval != map->m_len) {
-			printk("ES len assertation failed for inode: %lu "
-			       "retval %d != map->m_len %d "
-			       "in %s (allocation)\n", inode->i_ino, retval,
-			       map->m_len, __func__);
+		if (unlikely(retval != map->m_len)) {
+			ext4_warning(inode->i_sb,
+				     "ES len assertion failed for inode "
+				     "%lu: retval %d != map->m_len %d",
+				     inode->i_ino, retval, map->m_len);
+			WARN_ON(1);
 		}
-#endif
 
 		/*
 		 * If the extent has been zeroed out, we don't need to update
@@ -756,7 +664,7 @@
 has_zeroout:
 	up_write((&EXT4_I(inode)->i_data_sem));
 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
-		int ret = check_block_validity(inode, map);
+		ret = check_block_validity(inode, map);
 		if (ret != 0)
 			return ret;
 	}
@@ -796,8 +704,12 @@
 
 	ret = ext4_map_blocks(handle, inode, &map, flags);
 	if (ret > 0) {
+		ext4_io_end_t *io_end = ext4_inode_aio(inode);
+
 		map_bh(bh, inode->i_sb, map.m_pblk);
 		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+		if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
+			set_buffer_defer_completion(bh);
 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 		ret = 0;
 	}
@@ -817,11 +729,11 @@
  * `handle' can be NULL if create is zero
  */
 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
-				ext4_lblk_t block, int create, int *errp)
+				ext4_lblk_t block, int create)
 {
 	struct ext4_map_blocks map;
 	struct buffer_head *bh;
-	int fatal = 0, err;
+	int err;
 
 	J_ASSERT(handle != NULL || create == 0);
 
@@ -830,21 +742,14 @@
 	err = ext4_map_blocks(handle, inode, &map,
 			      create ? EXT4_GET_BLOCKS_CREATE : 0);
 
-	/* ensure we send some value back into *errp */
-	*errp = 0;
-
-	if (create && err == 0)
-		err = -ENOSPC;	/* should never happen */
+	if (err == 0)
+		return create ? ERR_PTR(-ENOSPC) : NULL;
 	if (err < 0)
-		*errp = err;
-	if (err <= 0)
-		return NULL;
+		return ERR_PTR(err);
 
 	bh = sb_getblk(inode->i_sb, map.m_pblk);
-	if (unlikely(!bh)) {
-		*errp = -ENOMEM;
-		return NULL;
-	}
+	if (unlikely(!bh))
+		return ERR_PTR(-ENOMEM);
 	if (map.m_flags & EXT4_MAP_NEW) {
 		J_ASSERT(create != 0);
 		J_ASSERT(handle != NULL);
@@ -858,44 +763,44 @@
 		 */
 		lock_buffer(bh);
 		BUFFER_TRACE(bh, "call get_create_access");
-		fatal = ext4_journal_get_create_access(handle, bh);
-		if (!fatal && !buffer_uptodate(bh)) {
+		err = ext4_journal_get_create_access(handle, bh);
+		if (unlikely(err)) {
+			unlock_buffer(bh);
+			goto errout;
+		}
+		if (!buffer_uptodate(bh)) {
 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
 			set_buffer_uptodate(bh);
 		}
 		unlock_buffer(bh);
 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
 		err = ext4_handle_dirty_metadata(handle, inode, bh);
-		if (!fatal)
-			fatal = err;
-	} else {
+		if (unlikely(err))
+			goto errout;
+	} else
 		BUFFER_TRACE(bh, "not a new buffer");
-	}
-	if (fatal) {
-		*errp = fatal;
-		brelse(bh);
-		bh = NULL;
-	}
 	return bh;
+errout:
+	brelse(bh);
+	return ERR_PTR(err);
 }
 
 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
-			       ext4_lblk_t block, int create, int *err)
+			       ext4_lblk_t block, int create)
 {
 	struct buffer_head *bh;
 
-	bh = ext4_getblk(handle, inode, block, create, err);
-	if (!bh)
+	bh = ext4_getblk(handle, inode, block, create);
+	if (IS_ERR(bh))
 		return bh;
-	if (buffer_uptodate(bh))
+	if (!bh || buffer_uptodate(bh))
 		return bh;
 	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
 	wait_on_buffer(bh);
 	if (buffer_uptodate(bh))
 		return bh;
 	put_bh(bh);
-	*err = -EIO;
-	return NULL;
+	return ERR_PTR(-EIO);
 }
 
 int ext4_walk_page_buffers(handle_t *handle,
@@ -971,6 +876,7 @@
 	 */
 	if (dirty)
 		clear_buffer_dirty(bh);
+	BUFFER_TRACE(bh, "get write access");
 	ret = ext4_journal_get_write_access(handle, bh);
 	if (!ret && dirty)
 		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
@@ -979,6 +885,95 @@
 
 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
 		   struct buffer_head *bh_result, int create);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
+				  get_block_t *get_block)
+{
+	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned to = from + len;
+	struct inode *inode = page->mapping->host;
+	unsigned block_start, block_end;
+	sector_t block;
+	int err = 0;
+	unsigned blocksize = inode->i_sb->s_blocksize;
+	unsigned bbits;
+	struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
+	bool decrypt = false;
+
+	BUG_ON(!PageLocked(page));
+	BUG_ON(from > PAGE_CACHE_SIZE);
+	BUG_ON(to > PAGE_CACHE_SIZE);
+	BUG_ON(from > to);
+
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, blocksize, 0);
+	head = page_buffers(page);
+	bbits = ilog2(blocksize);
+	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+
+	for (bh = head, block_start = 0; bh != head || !block_start;
+	    block++, block_start = block_end, bh = bh->b_this_page) {
+		block_end = block_start + blocksize;
+		if (block_end <= from || block_start >= to) {
+			if (PageUptodate(page)) {
+				if (!buffer_uptodate(bh))
+					set_buffer_uptodate(bh);
+			}
+			continue;
+		}
+		if (buffer_new(bh))
+			clear_buffer_new(bh);
+		if (!buffer_mapped(bh)) {
+			WARN_ON(bh->b_size != blocksize);
+			err = get_block(inode, block, bh, 1);
+			if (err)
+				break;
+			if (buffer_new(bh)) {
+				unmap_underlying_metadata(bh->b_bdev,
+							  bh->b_blocknr);
+				if (PageUptodate(page)) {
+					clear_buffer_new(bh);
+					set_buffer_uptodate(bh);
+					mark_buffer_dirty(bh);
+					continue;
+				}
+				if (block_end > to || block_start < from)
+					zero_user_segments(page, to, block_end,
+							   block_start, from);
+				continue;
+			}
+		}
+		if (PageUptodate(page)) {
+			if (!buffer_uptodate(bh))
+				set_buffer_uptodate(bh);
+			continue;
+		}
+		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+		    !buffer_unwritten(bh) &&
+		    (block_start < from || block_end > to)) {
+			ll_rw_block(READ, 1, &bh);
+			*wait_bh++ = bh;
+			decrypt = ext4_encrypted_inode(inode) &&
+				S_ISREG(inode->i_mode);
+		}
+	}
+	/*
+	 * If we issued read requests, let them complete.
+	 */
+	while (wait_bh > wait) {
+		wait_on_buffer(*--wait_bh);
+		if (!buffer_uptodate(*wait_bh))
+			err = -EIO;
+	}
+	if (unlikely(err))
+		page_zero_new_buffers(page, from, to);
+	else if (decrypt)
+		err = ext4_decrypt_one(inode, page);
+	return err;
+}
+#endif
+
 static int ext4_write_begin(struct file *file, struct address_space *mapping,
 			    loff_t pos, unsigned len, unsigned flags,
 			    struct page **pagep, void **fsdata)
@@ -1041,11 +1036,19 @@
 	/* In case writeback began while the page was unlocked */
 	wait_for_stable_page(page);
 
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	if (ext4_should_dioread_nolock(inode))
+		ret = ext4_block_write_begin(page, pos, len,
+					     ext4_get_block_write);
+	else
+		ret = ext4_block_write_begin(page, pos, len,
+					     ext4_get_block);
+#else
 	if (ext4_should_dioread_nolock(inode))
 		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
 	else
 		ret = __block_write_begin(page, pos, len, ext4_get_block);
-
+#endif
 	if (!ret && ext4_should_journal_data(inode)) {
 		ret = ext4_walk_page_buffers(handle, page_buffers(page),
 					     from, to, NULL,
@@ -1137,27 +1140,11 @@
 	} else
 		copied = block_write_end(file, mapping, pos,
 					 len, copied, page, fsdata);
-
 	/*
-	 * No need to use i_size_read() here, the i_size
-	 * cannot change under us because we hole i_mutex.
-	 *
-	 * But it's important to update i_size while still holding page lock:
+	 * it's important to update i_size while still holding page lock:
 	 * page writeout could otherwise come in and zero beyond i_size.
 	 */
-	if (pos + copied > inode->i_size) {
-		i_size_write(inode, pos + copied);
-		i_size_changed = 1;
-	}
-
-	if (pos + copied > EXT4_I(inode)->i_disksize) {
-		/* We need to mark inode dirty even if
-		 * new_i_size is less that inode->i_size
-		 * but greater than i_disksize. (hint delalloc)
-		 */
-		ext4_update_i_disksize(inode, (pos + copied));
-		i_size_changed = 1;
-	}
+	i_size_changed = ext4_update_inode_size(inode, pos + copied);
 	unlock_page(page);
 	page_cache_release(page);
 
@@ -1170,8 +1157,6 @@
 	if (i_size_changed)
 		ext4_mark_inode_dirty(handle, inode);
 
-	if (copied < 0)
-		ret = copied;
 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
 		/* if we have allocated more blocks and copied
 		 * less. We will have blocks allocated outside
@@ -1207,7 +1192,7 @@
 	int ret = 0, ret2;
 	int partial = 0;
 	unsigned from, to;
-	loff_t new_i_size;
+	int size_changed = 0;
 
 	trace_ext4_journalled_write_end(inode, pos, len, copied);
 	from = pos & (PAGE_CACHE_SIZE - 1);
@@ -1230,20 +1215,18 @@
 		if (!partial)
 			SetPageUptodate(page);
 	}
-	new_i_size = pos + copied;
-	if (new_i_size > inode->i_size)
-		i_size_write(inode, pos+copied);
+	size_changed = ext4_update_inode_size(inode, pos + copied);
 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
-	if (new_i_size > EXT4_I(inode)->i_disksize) {
-		ext4_update_i_disksize(inode, new_i_size);
+	unlock_page(page);
+	page_cache_release(page);
+
+	if (size_changed) {
 		ret2 = ext4_mark_inode_dirty(handle, inode);
 		if (!ret)
 			ret = ret2;
 	}
 
-	unlock_page(page);
-	page_cache_release(page);
 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
 		/* if we have allocated more blocks and copied
 		 * less. We will have blocks allocated outside
@@ -1269,49 +1252,6 @@
 }
 
 /*
- * Reserve a metadata for a single block located at lblock
- */
-static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
-{
-	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-	struct ext4_inode_info *ei = EXT4_I(inode);
-	unsigned int md_needed;
-	ext4_lblk_t save_last_lblock;
-	int save_len;
-
-	/*
-	 * recalculate the amount of metadata blocks to reserve
-	 * in order to allocate nrblocks
-	 * worse case is one extent per block
-	 */
-	spin_lock(&ei->i_block_reservation_lock);
-	/*
-	 * ext4_calc_metadata_amount() has side effects, which we have
-	 * to be prepared undo if we fail to claim space.
-	 */
-	save_len = ei->i_da_metadata_calc_len;
-	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
-	md_needed = EXT4_NUM_B2C(sbi,
-				 ext4_calc_metadata_amount(inode, lblock));
-	trace_ext4_da_reserve_space(inode, md_needed);
-
-	/*
-	 * We do still charge estimated metadata to the sb though;
-	 * we cannot afford to run out of free blocks.
-	 */
-	if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
-		ei->i_da_metadata_calc_len = save_len;
-		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
-		spin_unlock(&ei->i_block_reservation_lock);
-		return -ENOSPC;
-	}
-	ei->i_reserved_meta_blocks += md_needed;
-	spin_unlock(&ei->i_block_reservation_lock);
-
-	return 0;       /* success */
-}
-
-/*
  * Reserve a single cluster located at lblock
  */
 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
@@ -1320,8 +1260,6 @@
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	unsigned int md_needed;
 	int ret;
-	ext4_lblk_t save_last_lblock;
-	int save_len;
 
 	/*
 	 * We will charge metadata quota at writeout time; this saves
@@ -1342,25 +1280,15 @@
 	 * ext4_calc_metadata_amount() has side effects, which we have
 	 * to be prepared undo if we fail to claim space.
 	 */
-	save_len = ei->i_da_metadata_calc_len;
-	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
-	md_needed = EXT4_NUM_B2C(sbi,
-				 ext4_calc_metadata_amount(inode, lblock));
-	trace_ext4_da_reserve_space(inode, md_needed);
+	md_needed = 0;
+	trace_ext4_da_reserve_space(inode, 0);
 
-	/*
-	 * We do still charge estimated metadata to the sb though;
-	 * we cannot afford to run out of free blocks.
-	 */
-	if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
-		ei->i_da_metadata_calc_len = save_len;
-		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+	if (ext4_claim_free_clusters(sbi, 1, 0)) {
 		spin_unlock(&ei->i_block_reservation_lock);
 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
 		return -ENOSPC;
 	}
 	ei->i_reserved_data_blocks++;
-	ei->i_reserved_meta_blocks += md_needed;
 	spin_unlock(&ei->i_block_reservation_lock);
 
 	return 0;       /* success */
@@ -1393,20 +1321,6 @@
 	}
 	ei->i_reserved_data_blocks -= to_free;
 
-	if (ei->i_reserved_data_blocks == 0) {
-		/*
-		 * We can release all of the reserved metadata blocks
-		 * only when we have written all of the delayed
-		 * allocation blocks.
-		 * Note that in case of bigalloc, i_reserved_meta_blocks,
-		 * i_reserved_data_blocks, etc. refer to number of clusters.
-		 */
-		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
-				   ei->i_reserved_meta_blocks);
-		ei->i_reserved_meta_blocks = 0;
-		ei->i_da_metadata_calc_len = 0;
-	}
-
 	/* update fs dirty data blocks counter */
 	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
 
@@ -1416,21 +1330,28 @@
 }
 
 static void ext4_da_page_release_reservation(struct page *page,
-					     unsigned long offset)
+					     unsigned int offset,
+					     unsigned int length)
 {
 	int to_release = 0, contiguous_blks = 0;
 	struct buffer_head *head, *bh;
 	unsigned int curr_off = 0;
 	struct inode *inode = page->mapping->host;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	unsigned int stop = offset + length;
 	int num_clusters;
 	ext4_fsblk_t lblk;
 
+	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+
 	head = page_buffers(page);
 	bh = head;
 	do {
 		unsigned int next_off = curr_off + bh->b_size;
 
+		if (next_off > stop)
+			break;
+
 		if ((offset <= curr_off) && (buffer_delay(bh))) {
 			to_release++;
 			contiguous_blks++;
@@ -1470,140 +1391,43 @@
  * Delayed allocation stuff
  */
 
-/*
- * mpage_da_submit_io - walks through extent of pages and try to write
- * them with writepage() call back
- *
- * @mpd->inode: inode
- * @mpd->first_page: first page of the extent
- * @mpd->next_page: page after the last page of the extent
- *
- * By the time mpage_da_submit_io() is called we expect all blocks
- * to be allocated. this may be wrong if allocation failed.
- *
- * As pages are already locked by write_cache_pages(), we can't use it
- */
-static int mpage_da_submit_io(struct mpage_da_data *mpd,
-			      struct ext4_map_blocks *map)
-{
-	struct pagevec pvec;
-	unsigned long index, end;
-	int ret = 0, err, nr_pages, i;
-	struct inode *inode = mpd->inode;
-	struct address_space *mapping = inode->i_mapping;
-	loff_t size = i_size_read(inode);
-	unsigned int len, block_start;
-	struct buffer_head *bh, *page_bufs = NULL;
-	sector_t pblock = 0, cur_logical = 0;
-	struct ext4_io_submit io_submit;
+struct mpage_da_data {
+	struct inode *inode;
+	struct writeback_control *wbc;
 
-	BUG_ON(mpd->next_page <= mpd->first_page);
-	memset(&io_submit, 0, sizeof(io_submit));
+	pgoff_t first_page;	/* The first page to write */
+	pgoff_t next_page;	/* Current page to examine */
+	pgoff_t last_page;	/* Last page to examine */
 	/*
-	 * We need to start from the first_page to the next_page - 1
-	 * to make sure we also write the mapped dirty buffer_heads.
-	 * If we look at mpd->b_blocknr we would only be looking
-	 * at the currently mapped buffer_heads.
+	 * Extent to map - this can be after first_page because that can be
+	 * fully mapped. We somewhat abuse m_flags to store whether the extent
+	 * is delalloc or unwritten.
 	 */
-	index = mpd->first_page;
-	end = mpd->next_page - 1;
+	struct ext4_map_blocks map;
+	struct ext4_io_submit io_submit;	/* IO submission data */
+};
 
-	pagevec_init(&pvec, 0);
-	while (index <= end) {
-		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
-		if (nr_pages == 0)
-			break;
-		for (i = 0; i < nr_pages; i++) {
-			int skip_page = 0;
-			struct page *page = pvec.pages[i];
-
-			index = page->index;
-			if (index > end)
-				break;
-
-			if (index == size >> PAGE_CACHE_SHIFT)
-				len = size & ~PAGE_CACHE_MASK;
-			else
-				len = PAGE_CACHE_SIZE;
-			if (map) {
-				cur_logical = index << (PAGE_CACHE_SHIFT -
-							inode->i_blkbits);
-				pblock = map->m_pblk + (cur_logical -
-							map->m_lblk);
-			}
-			index++;
-
-			BUG_ON(!PageLocked(page));
-			BUG_ON(PageWriteback(page));
-
-			bh = page_bufs = page_buffers(page);
-			block_start = 0;
-			do {
-				if (map && (cur_logical >= map->m_lblk) &&
-				    (cur_logical <= (map->m_lblk +
-						     (map->m_len - 1)))) {
-					if (buffer_delay(bh)) {
-						clear_buffer_delay(bh);
-						bh->b_blocknr = pblock;
-					}
-					if (buffer_unwritten(bh) ||
-					    buffer_mapped(bh))
-						BUG_ON(bh->b_blocknr != pblock);
-					if (map->m_flags & EXT4_MAP_UNINIT)
-						set_buffer_uninit(bh);
-					clear_buffer_unwritten(bh);
-				}
-
-				/*
-				 * skip page if block allocation undone and
-				 * block is dirty
-				 */
-				if (ext4_bh_delay_or_unwritten(NULL, bh))
-					skip_page = 1;
-				bh = bh->b_this_page;
-				block_start += bh->b_size;
-				cur_logical++;
-				pblock++;
-			} while (bh != page_bufs);
-
-			if (skip_page) {
-				unlock_page(page);
-				continue;
-			}
-
-			clear_page_dirty_for_io(page);
-			err = ext4_bio_write_page(&io_submit, page, len,
-						  mpd->wbc);
-			if (!err)
-				mpd->pages_written++;
-			/*
-			 * In error case, we have to continue because
-			 * remaining pages are still locked
-			 */
-			if (ret == 0)
-				ret = err;
-		}
-		pagevec_release(&pvec);
-	}
-	ext4_io_submit(&io_submit);
-	return ret;
-}
-
-static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
+static void mpage_release_unused_pages(struct mpage_da_data *mpd,
+				       bool invalidate)
 {
 	int nr_pages, i;
 	pgoff_t index, end;
 	struct pagevec pvec;
 	struct inode *inode = mpd->inode;
 	struct address_space *mapping = inode->i_mapping;
-	ext4_lblk_t start, last;
+
+	/* This is necessary when next_page == 0. */
+	if (mpd->first_page >= mpd->next_page)
+		return;
 
 	index = mpd->first_page;
 	end   = mpd->next_page - 1;
-
-	start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-	last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-	ext4_es_remove_extent(inode, start, last - start + 1);
+	if (invalidate) {
+		ext4_lblk_t start, last;
+		start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+		last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+		ext4_es_remove_extent(inode, start, last - start + 1);
+	}
 
 	pagevec_init(&pvec, 0);
 	while (index <= end) {
@@ -1616,14 +1440,15 @@
 				break;
 			BUG_ON(!PageLocked(page));
 			BUG_ON(PageWriteback(page));
-			block_invalidatepage(page, 0);
-			ClearPageUptodate(page);
+			if (invalidate) {
+				block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+				ClearPageUptodate(page);
+			}
 			unlock_page(page);
 		}
 		index = pvec.pages[nr_pages - 1]->index + 1;
 		pagevec_release(&pvec);
 	}
-	return;
 }
 
 static void ext4_print_free_blocks(struct inode *inode)
@@ -1645,219 +1470,6 @@
 	ext4_msg(sb, KERN_CRIT, "Block reservation details");
 	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
 		 ei->i_reserved_data_blocks);
-	ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
-	       ei->i_reserved_meta_blocks);
-	ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
-	       ei->i_allocated_meta_blocks);
-	return;
-}
-
-/*
- * mpage_da_map_and_submit - go through given space, map them
- *       if necessary, and then submit them for I/O
- *
- * @mpd - bh describing space
- *
- * The function skips space we know is already mapped to disk blocks.
- *
- */
-static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
-{
-	int err, blks, get_blocks_flags;
-	struct ext4_map_blocks map, *mapp = NULL;
-	sector_t next = mpd->b_blocknr;
-	unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
-	loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
-	handle_t *handle = NULL;
-
-	/*
-	 * If the blocks are mapped already, or we couldn't accumulate
-	 * any blocks, then proceed immediately to the submission stage.
-	 */
-	if ((mpd->b_size == 0) ||
-	    ((mpd->b_state  & (1 << BH_Mapped)) &&
-	     !(mpd->b_state & (1 << BH_Delay)) &&
-	     !(mpd->b_state & (1 << BH_Unwritten))))
-		goto submit_io;
-
-	handle = ext4_journal_current_handle();
-	BUG_ON(!handle);
-
-	/*
-	 * Call ext4_map_blocks() to allocate any delayed allocation
-	 * blocks, or to convert an uninitialized extent to be
-	 * initialized (in the case where we have written into
-	 * one or more preallocated blocks).
-	 *
-	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
-	 * indicate that we are on the delayed allocation path.  This
-	 * affects functions in many different parts of the allocation
-	 * call path.  This flag exists primarily because we don't
-	 * want to change *many* call functions, so ext4_map_blocks()
-	 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
-	 * inode's allocation semaphore is taken.
-	 *
-	 * If the blocks in questions were delalloc blocks, set
-	 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
-	 * variables are updated after the blocks have been allocated.
-	 */
-	map.m_lblk = next;
-	map.m_len = max_blocks;
-	/*
-	 * We're in delalloc path and it is possible that we're going to
-	 * need more metadata blocks than previously reserved. However
-	 * we must not fail because we're in writeback and there is
-	 * nothing we can do about it so it might result in data loss.
-	 * So use reserved blocks to allocate metadata if possible.
-	 */
-	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
-			   EXT4_GET_BLOCKS_METADATA_NOFAIL;
-	if (ext4_should_dioread_nolock(mpd->inode))
-		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
-	if (mpd->b_state & (1 << BH_Delay))
-		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
-
-
-	blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
-	if (blks < 0) {
-		struct super_block *sb = mpd->inode->i_sb;
-
-		err = blks;
-		/*
-		 * If get block returns EAGAIN or ENOSPC and there
-		 * appears to be free blocks we will just let
-		 * mpage_da_submit_io() unlock all of the pages.
-		 */
-		if (err == -EAGAIN)
-			goto submit_io;
-
-		if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
-			mpd->retval = err;
-			goto submit_io;
-		}
-
-		/*
-		 * get block failure will cause us to loop in
-		 * writepages, because a_ops->writepage won't be able
-		 * to make progress. The page will be redirtied by
-		 * writepage and writepages will again try to write
-		 * the same.
-		 */
-		if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
-			ext4_msg(sb, KERN_CRIT,
-				 "delayed block allocation failed for inode %lu "
-				 "at logical offset %llu with max blocks %zd "
-				 "with error %d", mpd->inode->i_ino,
-				 (unsigned long long) next,
-				 mpd->b_size >> mpd->inode->i_blkbits, err);
-			ext4_msg(sb, KERN_CRIT,
-				"This should not happen!! Data will be lost");
-			if (err == -ENOSPC)
-				ext4_print_free_blocks(mpd->inode);
-		}
-		/* invalidate all the pages */
-		ext4_da_block_invalidatepages(mpd);
-
-		/* Mark this page range as having been completed */
-		mpd->io_done = 1;
-		return;
-	}
-	BUG_ON(blks == 0);
-
-	mapp = &map;
-	if (map.m_flags & EXT4_MAP_NEW) {
-		struct block_device *bdev = mpd->inode->i_sb->s_bdev;
-		int i;
-
-		for (i = 0; i < map.m_len; i++)
-			unmap_underlying_metadata(bdev, map.m_pblk + i);
-	}
-
-	/*
-	 * Update on-disk size along with block allocation.
-	 */
-	disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
-	if (disksize > i_size_read(mpd->inode))
-		disksize = i_size_read(mpd->inode);
-	if (disksize > EXT4_I(mpd->inode)->i_disksize) {
-		ext4_update_i_disksize(mpd->inode, disksize);
-		err = ext4_mark_inode_dirty(handle, mpd->inode);
-		if (err)
-			ext4_error(mpd->inode->i_sb,
-				   "Failed to mark inode %lu dirty",
-				   mpd->inode->i_ino);
-	}
-
-submit_io:
-	mpage_da_submit_io(mpd, mapp);
-	mpd->io_done = 1;
-}
-
-#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
-		(1 << BH_Delay) | (1 << BH_Unwritten))
-
-/*
- * mpage_add_bh_to_extent - try to add one more block to extent of blocks
- *
- * @mpd->lbh - extent of blocks
- * @logical - logical number of the block in the file
- * @b_state - b_state of the buffer head added
- *
- * the function is used to collect contig. blocks in same state
- */
-static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical,
-				   unsigned long b_state)
-{
-	sector_t next;
-	int blkbits = mpd->inode->i_blkbits;
-	int nrblocks = mpd->b_size >> blkbits;
-
-	/*
-	 * XXX Don't go larger than mballoc is willing to allocate
-	 * This is a stopgap solution.  We eventually need to fold
-	 * mpage_da_submit_io() into this function and then call
-	 * ext4_map_blocks() multiple times in a loop
-	 */
-	if (nrblocks >= (8*1024*1024 >> blkbits))
-		goto flush_it;
-
-	/* check if the reserved journal credits might overflow */
-	if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) {
-		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
-			/*
-			 * With non-extent format we are limited by the journal
-			 * credit available.  Total credit needed to insert
-			 * nrblocks contiguous blocks is dependent on the
-			 * nrblocks.  So limit nrblocks.
-			 */
-			goto flush_it;
-		}
-	}
-	/*
-	 * First block in the extent
-	 */
-	if (mpd->b_size == 0) {
-		mpd->b_blocknr = logical;
-		mpd->b_size = 1 << blkbits;
-		mpd->b_state = b_state & BH_FLAGS;
-		return;
-	}
-
-	next = mpd->b_blocknr + nrblocks;
-	/*
-	 * Can we merge the block to our big extent?
-	 */
-	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
-		mpd->b_size += 1 << blkbits;
-		return;
-	}
-
-flush_it:
-	/*
-	 * We couldn't merge the block to our extent, so we
-	 * need to flush current  extent and start new one
-	 */
-	mpage_da_map_and_submit(mpd);
 	return;
 }
 
@@ -1895,10 +1507,10 @@
 
 	/* Lookup extent status tree firstly */
 	if (ext4_es_lookup_extent(inode, iblock, &es)) {
-
+		ext4_es_lru_add(inode);
 		if (ext4_es_is_hole(&es)) {
 			retval = 0;
-			down_read((&EXT4_I(inode)->i_data_sem));
+			down_read(&EXT4_I(inode)->i_data_sem);
 			goto add_delayed;
 		}
 
@@ -1935,7 +1547,7 @@
 	 * Try to see if we can get the block without requesting a new
 	 * file system block.
 	 */
-	down_read((&EXT4_I(inode)->i_data_sem));
+	down_read(&EXT4_I(inode)->i_data_sem);
 	if (ext4_has_inline_data(inode)) {
 		/*
 		 * We will soon create blocks for this page, and let
@@ -1974,13 +1586,6 @@
 				retval = ret;
 				goto out_unlock;
 			}
-		} else {
-			ret = ext4_da_reserve_metadata(inode, iblock);
-			if (ret) {
-				/* not enough space to reserve */
-				retval = ret;
-				goto out_unlock;
-			}
 		}
 
 		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@@ -2000,16 +1605,15 @@
 		set_buffer_delay(bh);
 	} else if (retval > 0) {
 		int ret;
-		unsigned long long status;
+		unsigned int status;
 
-#ifdef ES_AGGRESSIVE_TEST
-		if (retval != map->m_len) {
-			printk("ES len assertation failed for inode: %lu "
-			       "retval %d != map->m_len %d "
-			       "in %s (lookup)\n", inode->i_ino, retval,
-			       map->m_len, __func__);
+		if (unlikely(retval != map->m_len)) {
+			ext4_warning(inode->i_sb,
+				     "ES len assertion failed for inode "
+				     "%lu: retval %d != map->m_len %d",
+				     inode->i_ino, retval, map->m_len);
+			WARN_ON(1);
 		}
-#endif
 
 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -2026,7 +1630,7 @@
 }
 
 /*
- * This is a special get_blocks_t callback which is used by
+ * This is a special get_block_t callback which is used by
  * ext4_da_write_begin().  It will either return mapped block or
  * reserve space for a single block.
  *
@@ -2141,6 +1745,7 @@
 	}
 
 	if (inline_data) {
+		BUFFER_TRACE(inode_bh, "get write access");
 		ret = ext4_journal_get_write_access(handle, inode_bh);
 
 		err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
@@ -2160,7 +1765,7 @@
 		ret = err;
 
 	if (!ext4_has_inline_data(inode))
-		ext4_walk_page_buffers(handle, page_bufs, 0, len,
+		ext4_walk_page_buffers(NULL, page_bufs, 0, len,
 				       NULL, bput_one);
 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
 out:
@@ -2181,7 +1786,7 @@
  * lock so we have to do some magic.
  *
  * This function can get called via...
- *   - ext4_da_writepages after taking page lock (have journal handle)
+ *   - ext4_writepages after taking page lock (have journal handle)
  *   - journal_submit_inode_data_buffers (no journal handle)
  *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
  *   - grab_page_cache when doing write_begin (have journal handle)
@@ -2220,6 +1825,7 @@
 	struct buffer_head *page_bufs = NULL;
 	struct inode *inode = page->mapping->host;
 	struct ext4_io_submit io_submit;
+	bool keep_towrite = false;
 
 	trace_ext4_writepage(page);
 	size = i_size_read(inode);
@@ -2250,6 +1856,7 @@
 			unlock_page(page);
 			return 0;
 		}
+		keep_towrite = true;
 	}
 
 	if (PageChecked(page) && ext4_should_journal_data(inode))
@@ -2259,76 +1866,460 @@
 		 */
 		return __ext4_journalled_writepage(page, len);
 
-	memset(&io_submit, 0, sizeof(io_submit));
-	ret = ext4_bio_write_page(&io_submit, page, len, wbc);
+	ext4_io_submit_init(&io_submit, wbc);
+	io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
+	if (!io_submit.io_end) {
+		redirty_page_for_writepage(wbc, page);
+		unlock_page(page);
+		return -ENOMEM;
+	}
+	ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
 	ext4_io_submit(&io_submit);
+	/* Drop io_end reference we got from init */
+	ext4_put_io_end_defer(io_submit.io_end);
 	return ret;
 }
 
-/*
- * This is called via ext4_da_writepages() to
- * calculate the total number of credits to reserve to fit
- * a single extent allocation into a single transaction,
- * ext4_da_writpeages() will loop calling this before
- * the block allocation.
- */
-
-static int ext4_da_writepages_trans_blocks(struct inode *inode)
+static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
 {
-	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+	int len;
+	loff_t size = i_size_read(mpd->inode);
+	int err;
 
-	/*
-	 * With non-extent format the journal credit needed to
-	 * insert nrblocks contiguous block is dependent on
-	 * number of contiguous block. So we will limit
-	 * number of contiguous block to a sane value
-	 */
-	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
-	    (max_blocks > EXT4_MAX_TRANS_DATA))
-		max_blocks = EXT4_MAX_TRANS_DATA;
+	BUG_ON(page->index != mpd->first_page);
+	if (page->index == size >> PAGE_CACHE_SHIFT)
+		len = size & ~PAGE_CACHE_MASK;
+	else
+		len = PAGE_CACHE_SIZE;
+	clear_page_dirty_for_io(page);
+	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
+	if (!err)
+		mpd->wbc->nr_to_write--;
+	mpd->first_page++;
 
-	return ext4_chunk_trans_blocks(inode, max_blocks);
+	return err;
+}
+
+#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
+
+/*
+ * mballoc gives us at most this number of blocks...
+ * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
+ * The rest of mballoc seems to handle chunks up to full group size.
+ */
+#define MAX_WRITEPAGES_EXTENT_LEN 2048
+
+/*
+ * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
+ *
+ * @mpd - extent of blocks
+ * @lblk - logical number of the block in the file
+ * @bh - buffer head we want to add to the extent
+ *
+ * The function is used to collect contig. blocks in the same state. If the
+ * buffer doesn't require mapping for writeback and we haven't started the
+ * extent of buffers to map yet, the function returns 'true' immediately - the
+ * caller can write the buffer right away. Otherwise the function returns true
+ * if the block has been added to the extent, false if the block couldn't be
+ * added.
+ */
+static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
+				   struct buffer_head *bh)
+{
+	struct ext4_map_blocks *map = &mpd->map;
+
+	/* Buffer that doesn't need mapping for writeback? */
+	if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
+	    (!buffer_delay(bh) && !buffer_unwritten(bh))) {
+		/* So far no extent to map => we write the buffer right away */
+		if (map->m_len == 0)
+			return true;
+		return false;
+	}
+
+	/* First block in the extent? */
+	if (map->m_len == 0) {
+		map->m_lblk = lblk;
+		map->m_len = 1;
+		map->m_flags = bh->b_state & BH_FLAGS;
+		return true;
+	}
+
+	/* Don't go larger than mballoc is willing to allocate */
+	if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
+		return false;
+
+	/* Can we merge the block to our big extent? */
+	if (lblk == map->m_lblk + map->m_len &&
+	    (bh->b_state & BH_FLAGS) == map->m_flags) {
+		map->m_len++;
+		return true;
+	}
+	return false;
 }
 
 /*
- * write_cache_pages_da - walk the list of dirty pages of the given
- * address space and accumulate pages that need writing, and call
- * mpage_da_map_and_submit to map a single contiguous memory region
- * and then write them.
+ * mpage_process_page_bufs - submit page buffers for IO or add them to extent
+ *
+ * @mpd - extent of blocks for mapping
+ * @head - the first buffer in the page
+ * @bh - buffer we should start processing from
+ * @lblk - logical number of the block in the file corresponding to @bh
+ *
+ * Walk through page buffers from @bh upto @head (exclusive) and either submit
+ * the page for IO if all buffers in this page were mapped and there's no
+ * accumulated extent of buffers to map or add buffers in the page to the
+ * extent of buffers to map. The function returns 1 if the caller can continue
+ * by processing the next page, 0 if it should stop adding buffers to the
+ * extent to map because we cannot extend it anymore. It can also return value
+ * < 0 in case of error during IO submission.
  */
-static int write_cache_pages_da(handle_t *handle,
-				struct address_space *mapping,
-				struct writeback_control *wbc,
-				struct mpage_da_data *mpd,
-				pgoff_t *done_index)
+static int mpage_process_page_bufs(struct mpage_da_data *mpd,
+				   struct buffer_head *head,
+				   struct buffer_head *bh,
+				   ext4_lblk_t lblk)
 {
-	struct buffer_head	*bh, *head;
-	struct inode		*inode = mapping->host;
-	struct pagevec		pvec;
-	unsigned int		nr_pages;
-	sector_t		logical;
-	pgoff_t			index, end;
-	long			nr_to_write = wbc->nr_to_write;
-	int			i, tag, ret = 0;
+	struct inode *inode = mpd->inode;
+	int err;
+	ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
+							>> inode->i_blkbits;
 
-	memset(mpd, 0, sizeof(struct mpage_da_data));
-	mpd->wbc = wbc;
-	mpd->inode = inode;
+	do {
+		BUG_ON(buffer_locked(bh));
+
+		if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
+			/* Found extent to map? */
+			if (mpd->map.m_len)
+				return 0;
+			/* Everything mapped so far and we hit EOF */
+			break;
+		}
+	} while (lblk++, (bh = bh->b_this_page) != head);
+	/* So far everything mapped? Submit the page for IO. */
+	if (mpd->map.m_len == 0) {
+		err = mpage_submit_page(mpd, head->b_page);
+		if (err < 0)
+			return err;
+	}
+	return lblk < blocks;
+}
+
+/*
+ * mpage_map_buffers - update buffers corresponding to changed extent and
+ *		       submit fully mapped pages for IO
+ *
+ * @mpd - description of extent to map, on return next extent to map
+ *
+ * Scan buffers corresponding to changed extent (we expect corresponding pages
+ * to be already locked) and update buffer state according to new extent state.
+ * We map delalloc buffers to their physical location, clear unwritten bits,
+ * and mark buffers as uninit when we perform writes to unwritten extents
+ * and do extent conversion after IO is finished. If the last page is not fully
+ * mapped, we update @map to the next extent in the last page that needs
+ * mapping. Otherwise we submit the page for IO.
+ */
+static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
+{
+	struct pagevec pvec;
+	int nr_pages, i;
+	struct inode *inode = mpd->inode;
+	struct buffer_head *head, *bh;
+	int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
+	pgoff_t start, end;
+	ext4_lblk_t lblk;
+	sector_t pblock;
+	int err;
+
+	start = mpd->map.m_lblk >> bpp_bits;
+	end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
+	lblk = start << bpp_bits;
+	pblock = mpd->map.m_pblk;
+
 	pagevec_init(&pvec, 0);
-	index = wbc->range_start >> PAGE_CACHE_SHIFT;
-	end = wbc->range_end >> PAGE_CACHE_SHIFT;
+	while (start <= end) {
+		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
+					  PAGEVEC_SIZE);
+		if (nr_pages == 0)
+			break;
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
 
-	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+			if (page->index > end)
+				break;
+			/* Up to 'end' pages must be contiguous */
+			BUG_ON(page->index != start);
+			bh = head = page_buffers(page);
+			do {
+				if (lblk < mpd->map.m_lblk)
+					continue;
+				if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
+					/*
+					 * Buffer after end of mapped extent.
+					 * Find next buffer in the page to map.
+					 */
+					mpd->map.m_len = 0;
+					mpd->map.m_flags = 0;
+					/*
+					 * FIXME: If dioread_nolock supports
+					 * blocksize < pagesize, we need to make
+					 * sure we add size mapped so far to
+					 * io_end->size as the following call
+					 * can submit the page for IO.
+					 */
+					err = mpage_process_page_bufs(mpd, head,
+								      bh, lblk);
+					pagevec_release(&pvec);
+					if (err > 0)
+						err = 0;
+					return err;
+				}
+				if (buffer_delay(bh)) {
+					clear_buffer_delay(bh);
+					bh->b_blocknr = pblock++;
+				}
+				clear_buffer_unwritten(bh);
+			} while (lblk++, (bh = bh->b_this_page) != head);
+
+			/*
+			 * FIXME: This is going to break if dioread_nolock
+			 * supports blocksize < pagesize as we will try to
+			 * convert potentially unmapped parts of inode.
+			 */
+			mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
+			/* Page fully mapped - let IO run! */
+			err = mpage_submit_page(mpd, page);
+			if (err < 0) {
+				pagevec_release(&pvec);
+				return err;
+			}
+			start++;
+		}
+		pagevec_release(&pvec);
+	}
+	/* Extent fully mapped and matches with page boundary. We are done. */
+	mpd->map.m_len = 0;
+	mpd->map.m_flags = 0;
+	return 0;
+}
+
+static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
+{
+	struct inode *inode = mpd->inode;
+	struct ext4_map_blocks *map = &mpd->map;
+	int get_blocks_flags;
+	int err, dioread_nolock;
+
+	trace_ext4_da_write_pages_extent(inode, map);
+	/*
+	 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
+	 * to convert an unwritten extent to be initialized (in the case
+	 * where we have written into one or more preallocated blocks).  It is
+	 * possible that we're going to need more metadata blocks than
+	 * previously reserved. However we must not fail because we're in
+	 * writeback and there is nothing we can do about it so it might result
+	 * in data loss.  So use reserved blocks to allocate metadata if
+	 * possible.
+	 *
+	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
+	 * the blocks in question are delalloc blocks.  This indicates
+	 * that the blocks and quotas has already been checked when
+	 * the data was copied into the page cache.
+	 */
+	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
+			   EXT4_GET_BLOCKS_METADATA_NOFAIL;
+	dioread_nolock = ext4_should_dioread_nolock(inode);
+	if (dioread_nolock)
+		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
+	if (map->m_flags & (1 << BH_Delay))
+		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
+
+	err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
+	if (err < 0)
+		return err;
+	if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
+		if (!mpd->io_submit.io_end->handle &&
+		    ext4_handle_valid(handle)) {
+			mpd->io_submit.io_end->handle = handle->h_rsv_handle;
+			handle->h_rsv_handle = NULL;
+		}
+		ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
+	}
+
+	BUG_ON(map->m_len == 0);
+	if (map->m_flags & EXT4_MAP_NEW) {
+		struct block_device *bdev = inode->i_sb->s_bdev;
+		int i;
+
+		for (i = 0; i < map->m_len; i++)
+			unmap_underlying_metadata(bdev, map->m_pblk + i);
+	}
+	return 0;
+}
+
+/*
+ * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
+ *				 mpd->len and submit pages underlying it for IO
+ *
+ * @handle - handle for journal operations
+ * @mpd - extent to map
+ * @give_up_on_write - we set this to true iff there is a fatal error and there
+ *                     is no hope of writing the data. The caller should discard
+ *                     dirty pages to avoid infinite loops.
+ *
+ * The function maps extent starting at mpd->lblk of length mpd->len. If it is
+ * delayed, blocks are allocated, if it is unwritten, we may need to convert
+ * them to initialized or split the described range from larger unwritten
+ * extent. Note that we need not map all the described range since allocation
+ * can return less blocks or the range is covered by more unwritten extents. We
+ * cannot map more because we are limited by reserved transaction credits. On
+ * the other hand we always make sure that the last touched page is fully
+ * mapped so that it can be written out (and thus forward progress is
+ * guaranteed). After mapping we submit all mapped pages for IO.
+ */
+static int mpage_map_and_submit_extent(handle_t *handle,
+				       struct mpage_da_data *mpd,
+				       bool *give_up_on_write)
+{
+	struct inode *inode = mpd->inode;
+	struct ext4_map_blocks *map = &mpd->map;
+	int err;
+	loff_t disksize;
+	int progress = 0;
+
+	mpd->io_submit.io_end->offset =
+				((loff_t)map->m_lblk) << inode->i_blkbits;
+	do {
+		err = mpage_map_one_extent(handle, mpd);
+		if (err < 0) {
+			struct super_block *sb = inode->i_sb;
+
+			if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+				goto invalidate_dirty_pages;
+			/*
+			 * Let the uper layers retry transient errors.
+			 * In the case of ENOSPC, if ext4_count_free_blocks()
+			 * is non-zero, a commit should free up blocks.
+			 */
+			if ((err == -ENOMEM) ||
+			    (err == -ENOSPC && ext4_count_free_clusters(sb))) {
+				if (progress)
+					goto update_disksize;
+				return err;
+			}
+			ext4_msg(sb, KERN_CRIT,
+				 "Delayed block allocation failed for "
+				 "inode %lu at logical offset %llu with"
+				 " max blocks %u with error %d",
+				 inode->i_ino,
+				 (unsigned long long)map->m_lblk,
+				 (unsigned)map->m_len, -err);
+			ext4_msg(sb, KERN_CRIT,
+				 "This should not happen!! Data will "
+				 "be lost\n");
+			if (err == -ENOSPC)
+				ext4_print_free_blocks(inode);
+		invalidate_dirty_pages:
+			*give_up_on_write = true;
+			return err;
+		}
+		progress = 1;
+		/*
+		 * Update buffer state, submit mapped pages, and get us new
+		 * extent to map
+		 */
+		err = mpage_map_and_submit_buffers(mpd);
+		if (err < 0)
+			goto update_disksize;
+	} while (map->m_len);
+
+update_disksize:
+	/*
+	 * Update on-disk size after IO is submitted.  Races with
+	 * truncate are avoided by checking i_size under i_data_sem.
+	 */
+	disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
+	if (disksize > EXT4_I(inode)->i_disksize) {
+		int err2;
+		loff_t i_size;
+
+		down_write(&EXT4_I(inode)->i_data_sem);
+		i_size = i_size_read(inode);
+		if (disksize > i_size)
+			disksize = i_size;
+		if (disksize > EXT4_I(inode)->i_disksize)
+			EXT4_I(inode)->i_disksize = disksize;
+		err2 = ext4_mark_inode_dirty(handle, inode);
+		up_write(&EXT4_I(inode)->i_data_sem);
+		if (err2)
+			ext4_error(inode->i_sb,
+				   "Failed to mark inode %lu dirty",
+				   inode->i_ino);
+		if (!err)
+			err = err2;
+	}
+	return err;
+}
+
+/*
+ * Calculate the total number of credits to reserve for one writepages
+ * iteration. This is called from ext4_writepages(). We map an extent of
+ * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
+ * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
+ * bpp - 1 blocks in bpp different extents.
+ */
+static int ext4_da_writepages_trans_blocks(struct inode *inode)
+{
+	int bpp = ext4_journal_blocks_per_page(inode);
+
+	return ext4_meta_trans_blocks(inode,
+				MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
+}
+
+/*
+ * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
+ * 				 and underlying extent to map
+ *
+ * @mpd - where to look for pages
+ *
+ * Walk dirty pages in the mapping. If they are fully mapped, submit them for
+ * IO immediately. When we find a page which isn't mapped we start accumulating
+ * extent of buffers underlying these pages that needs mapping (formed by
+ * either delayed or unwritten buffers). We also lock the pages containing
+ * these buffers. The extent found is returned in @mpd structure (starting at
+ * mpd->lblk with length mpd->len blocks).
+ *
+ * Note that this function can attach bios to one io_end structure which are
+ * neither logically nor physically contiguous. Although it may seem as an
+ * unnecessary complication, it is actually inevitable in blocksize < pagesize
+ * case as we need to track IO to all buffers underlying a page in one io_end.
+ */
+static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
+{
+	struct address_space *mapping = mpd->inode->i_mapping;
+	struct pagevec pvec;
+	unsigned int nr_pages;
+	long left = mpd->wbc->nr_to_write;
+	pgoff_t index = mpd->first_page;
+	pgoff_t end = mpd->last_page;
+	int tag;
+	int i, err = 0;
+	int blkbits = mpd->inode->i_blkbits;
+	ext4_lblk_t lblk;
+	struct buffer_head *head;
+
+	if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
 		tag = PAGECACHE_TAG_TOWRITE;
 	else
 		tag = PAGECACHE_TAG_DIRTY;
 
-	*done_index = index;
+	pagevec_init(&pvec, 0);
+	mpd->map.m_len = 0;
+	mpd->next_page = index;
 	while (index <= end) {
 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
 		if (nr_pages == 0)
-			return 0;
+			goto out;
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
@@ -2343,31 +2334,32 @@
 			if (page->index > end)
 				goto out;
 
-			*done_index = page->index + 1;
-
 			/*
-			 * If we can't merge this page, and we have
-			 * accumulated an contiguous region, write it
+			 * Accumulated enough dirty pages? This doesn't apply
+			 * to WB_SYNC_ALL mode. For integrity sync we have to
+			 * keep going because someone may be concurrently
+			 * dirtying pages, and we might have synced a lot of
+			 * newly appeared dirty pages, but have not synced all
+			 * of the old dirty pages.
 			 */
-			if ((mpd->next_page != page->index) &&
-			    (mpd->next_page != mpd->first_page)) {
-				mpage_da_map_and_submit(mpd);
-				goto ret_extent_tail;
-			}
+			if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
+				goto out;
+
+			/* If we can't merge this page, we are done. */
+			if (mpd->map.m_len > 0 && mpd->next_page != page->index)
+				goto out;
 
 			lock_page(page);
-
 			/*
-			 * If the page is no longer dirty, or its
-			 * mapping no longer corresponds to inode we
-			 * are writing (which means it has been
-			 * truncated or invalidated), or the page is
-			 * already under writeback and we are not
-			 * doing a data integrity writeback, skip the page
+			 * If the page is no longer dirty, or its mapping no
+			 * longer corresponds to inode we are writing (which
+			 * means it has been truncated or invalidated), or the
+			 * page is already under writeback and we are not doing
+			 * a data integrity writeback, skip the page
 			 */
 			if (!PageDirty(page) ||
 			    (PageWriteback(page) &&
-			     (wbc->sync_mode == WB_SYNC_NONE)) ||
+			     (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
 			    unlikely(page->mapping != mapping)) {
 				unlock_page(page);
 				continue;
@@ -2376,106 +2368,54 @@
 			wait_on_page_writeback(page);
 			BUG_ON(PageWriteback(page));
 
-			/*
-			 * If we have inline data and arrive here, it means that
-			 * we will soon create the block for the 1st page, so
-			 * we'd better clear the inline data here.
-			 */
-			if (ext4_has_inline_data(inode)) {
-				BUG_ON(ext4_test_inode_state(inode,
-						EXT4_STATE_MAY_INLINE_DATA));
-				ext4_destroy_inline_data(handle, inode);
-			}
-
-			if (mpd->next_page != page->index)
+			if (mpd->map.m_len == 0)
 				mpd->first_page = page->index;
 			mpd->next_page = page->index + 1;
-			logical = (sector_t) page->index <<
-				(PAGE_CACHE_SHIFT - inode->i_blkbits);
-
 			/* Add all dirty buffers to mpd */
+			lblk = ((ext4_lblk_t)page->index) <<
+				(PAGE_CACHE_SHIFT - blkbits);
 			head = page_buffers(page);
-			bh = head;
-			do {
-				BUG_ON(buffer_locked(bh));
-				/*
-				 * We need to try to allocate unmapped blocks
-				 * in the same page.  Otherwise we won't make
-				 * progress with the page in ext4_writepage
-				 */
-				if (ext4_bh_delay_or_unwritten(NULL, bh)) {
-					mpage_add_bh_to_extent(mpd, logical,
-							       bh->b_state);
-					if (mpd->io_done)
-						goto ret_extent_tail;
-				} else if (buffer_dirty(bh) &&
-					   buffer_mapped(bh)) {
-					/*
-					 * mapped dirty buffer. We need to
-					 * update the b_state because we look
-					 * at b_state in mpage_da_map_blocks.
-					 * We don't update b_size because if we
-					 * find an unmapped buffer_head later
-					 * we need to use the b_state flag of
-					 * that buffer_head.
-					 */
-					if (mpd->b_size == 0)
-						mpd->b_state =
-							bh->b_state & BH_FLAGS;
-				}
-				logical++;
-			} while ((bh = bh->b_this_page) != head);
-
-			if (nr_to_write > 0) {
-				nr_to_write--;
-				if (nr_to_write == 0 &&
-				    wbc->sync_mode == WB_SYNC_NONE)
-					/*
-					 * We stop writing back only if we are
-					 * not doing integrity sync. In case of
-					 * integrity sync we have to keep going
-					 * because someone may be concurrently
-					 * dirtying pages, and we might have
-					 * synced a lot of newly appeared dirty
-					 * pages, but have not synced all of the
-					 * old dirty pages.
-					 */
-					goto out;
-			}
+			err = mpage_process_page_bufs(mpd, head, head, lblk);
+			if (err <= 0)
+				goto out;
+			err = 0;
+			left--;
 		}
 		pagevec_release(&pvec);
 		cond_resched();
 	}
 	return 0;
-ret_extent_tail:
-	ret = MPAGE_DA_EXTENT_TAIL;
 out:
 	pagevec_release(&pvec);
-	cond_resched();
+	return err;
+}
+
+static int __writepage(struct page *page, struct writeback_control *wbc,
+		       void *data)
+{
+	struct address_space *mapping = data;
+	int ret = ext4_writepage(page, wbc);
+	mapping_set_error(mapping, ret);
 	return ret;
 }
 
-
-static int ext4_da_writepages(struct address_space *mapping,
-			      struct writeback_control *wbc)
+static int ext4_writepages(struct address_space *mapping,
+			   struct writeback_control *wbc)
 {
-	pgoff_t	index;
+	pgoff_t	writeback_index = 0;
+	long nr_to_write = wbc->nr_to_write;
 	int range_whole = 0;
+	int cycled = 1;
 	handle_t *handle = NULL;
 	struct mpage_da_data mpd;
 	struct inode *inode = mapping->host;
-	int pages_written = 0;
-	unsigned int max_pages;
-	int range_cyclic, cycled = 1, io_done = 0;
-	int needed_blocks, ret = 0;
-	long desired_nr_to_write, nr_to_writebump = 0;
-	loff_t range_start = wbc->range_start;
+	int needed_blocks, rsv_blocks = 0, ret = 0;
 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
-	pgoff_t done_index = 0;
-	pgoff_t end;
+	bool done;
 	struct blk_plug plug;
+	bool give_up_on_write = false;
 
-	trace_ext4_da_writepages(inode, wbc);
+	trace_ext4_writepages(inode, wbc);
 
 	/*
 	 * No pages to write? This is mainly a kludge to avoid starting
@@ -2483,7 +2423,16 @@
 	 * because that could violate lock ordering on umount
 	 */
 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
-		return 0;
+		goto out_writepages;
+
+	if (ext4_should_journal_data(inode)) {
+		struct blk_plug plug;
+
+		blk_start_plug(&plug);
+		ret = write_cache_pages(mapping, wbc, __writepage, mapping);
+		blk_finish_plug(&plug);
+		goto out_writepages;
+	}
 
 	/*
 	 * If the filesystem has aborted, it is read-only, so return
@@ -2491,158 +2440,151 @@
 	 * will obscure the real source of the problem.  We test
 	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
 	 * the latter could be true if the filesystem is mounted
-	 * read-only, and in that case, ext4_da_writepages should
+	 * read-only, and in that case, ext4_writepages should
 	 * *never* be called, so if that ever happens, we would want
 	 * the stack trace.
 	 */
-	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
-		return -EROFS;
+	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
+		ret = -EROFS;
+		goto out_writepages;
+	}
+
+	if (ext4_should_dioread_nolock(inode)) {
+		/*
+		 * We may need to convert up to one extent per block in
+		 * the page and we may dirty the inode.
+		 */
+		rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
+	}
+
+	/*
+	 * If we have inline data and arrive here, it means that
+	 * we will soon create the block for the 1st page, so
+	 * we'd better clear the inline data here.
+	 */
+	if (ext4_has_inline_data(inode)) {
+		/* Just inode will be modified... */
+		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
+		if (IS_ERR(handle)) {
+			ret = PTR_ERR(handle);
+			goto out_writepages;
+		}
+		BUG_ON(ext4_test_inode_state(inode,
+				EXT4_STATE_MAY_INLINE_DATA));
+		ext4_destroy_inline_data(handle, inode);
+		ext4_journal_stop(handle);
+	}
 
 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
 		range_whole = 1;
 
-	range_cyclic = wbc->range_cyclic;
 	if (wbc->range_cyclic) {
-		index = mapping->writeback_index;
-		if (index)
+		writeback_index = mapping->writeback_index;
+		if (writeback_index)
 			cycled = 0;
-		wbc->range_start = index << PAGE_CACHE_SHIFT;
-		wbc->range_end  = LLONG_MAX;
-		wbc->range_cyclic = 0;
-		end = -1;
+		mpd.first_page = writeback_index;
+		mpd.last_page = -1;
 	} else {
-		index = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
+		mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
 	}
 
-	/*
-	 * This works around two forms of stupidity.  The first is in
-	 * the writeback code, which caps the maximum number of pages
-	 * written to be 1024 pages.  This is wrong on multiple
-	 * levels; different architectues have a different page size,
-	 * which changes the maximum amount of data which gets
-	 * written.  Secondly, 4 megabytes is way too small.  XFS
-	 * forces this value to be 16 megabytes by multiplying
-	 * nr_to_write parameter by four, and then relies on its
-	 * allocator to allocate larger extents to make them
-	 * contiguous.  Unfortunately this brings us to the second
-	 * stupidity, which is that ext4's mballoc code only allocates
-	 * at most 2048 blocks.  So we force contiguous writes up to
-	 * the number of dirty blocks in the inode, or
-	 * sbi->max_writeback_mb_bump whichever is smaller.
-	 */
-	max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
-	if (!range_cyclic && range_whole) {
-		if (wbc->nr_to_write == LONG_MAX)
-			desired_nr_to_write = wbc->nr_to_write;
-		else
-			desired_nr_to_write = wbc->nr_to_write * 8;
-	} else
-		desired_nr_to_write = ext4_num_dirty_pages(inode, index,
-							   max_pages);
-	if (desired_nr_to_write > max_pages)
-		desired_nr_to_write = max_pages;
-
-	if (wbc->nr_to_write < desired_nr_to_write) {
-		nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
-		wbc->nr_to_write = desired_nr_to_write;
-	}
-
+	mpd.inode = inode;
+	mpd.wbc = wbc;
+	ext4_io_submit_init(&mpd.io_submit, wbc);
 retry:
 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
-		tag_pages_for_writeback(mapping, index, end);
-
+		tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
+	done = false;
 	blk_start_plug(&plug);
-	while (!ret && wbc->nr_to_write > 0) {
+	while (!done && mpd.first_page <= mpd.last_page) {
+		/* For each extent of pages we use new io_end */
+		mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
+		if (!mpd.io_submit.io_end) {
+			ret = -ENOMEM;
+			break;
+		}
 
 		/*
-		 * we  insert one extent at a time. So we need
-		 * credit needed for single extent allocation.
-		 * journalled mode is currently not supported
-		 * by delalloc
+		 * We have two constraints: We find one extent to map and we
+		 * must always write out whole page (makes a difference when
+		 * blocksize < pagesize) so that we don't block on IO when we
+		 * try to write out the rest of the page. Journalled mode is
+		 * not supported by delalloc.
 		 */
 		BUG_ON(ext4_should_journal_data(inode));
 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
 
-		/* start a new transaction*/
-		handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
-					    needed_blocks);
+		/* start a new transaction */
+		handle = ext4_journal_start_with_reserve(inode,
+				EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
 		if (IS_ERR(handle)) {
 			ret = PTR_ERR(handle);
 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
 			       "%ld pages, ino %lu; err %d", __func__,
 				wbc->nr_to_write, inode->i_ino, ret);
-			blk_finish_plug(&plug);
-			goto out_writepages;
+			/* Release allocated io_end */
+			ext4_put_io_end(mpd.io_submit.io_end);
+			break;
 		}
 
-		/*
-		 * Now call write_cache_pages_da() to find the next
-		 * contiguous region of logical blocks that need
-		 * blocks to be allocated by ext4 and submit them.
-		 */
-		ret = write_cache_pages_da(handle, mapping,
-					   wbc, &mpd, &done_index);
-		/*
-		 * If we have a contiguous extent of pages and we
-		 * haven't done the I/O yet, map the blocks and submit
-		 * them for I/O.
-		 */
-		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
-			mpage_da_map_and_submit(&mpd);
-			ret = MPAGE_DA_EXTENT_TAIL;
+		trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
+		ret = mpage_prepare_extent_to_map(&mpd);
+		if (!ret) {
+			if (mpd.map.m_len)
+				ret = mpage_map_and_submit_extent(handle, &mpd,
+					&give_up_on_write);
+			else {
+				/*
+				 * We scanned the whole range (or exhausted
+				 * nr_to_write), submitted what was mapped and
+				 * didn't find anything needing mapping. We are
+				 * done.
+				 */
+				done = true;
+			}
 		}
-		trace_ext4_da_write_pages(inode, &mpd);
-		wbc->nr_to_write -= mpd.pages_written;
-
 		ext4_journal_stop(handle);
+		/* Submit prepared bio */
+		ext4_io_submit(&mpd.io_submit);
+		/* Unlock pages we didn't use */
+		mpage_release_unused_pages(&mpd, give_up_on_write);
+		/* Drop our io_end reference we got from init */
+		ext4_put_io_end(mpd.io_submit.io_end);
 
-		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
-			/* commit the transaction which would
+		if (ret == -ENOSPC && sbi->s_journal) {
+			/*
+			 * Commit the transaction which would
 			 * free blocks released in the transaction
 			 * and try again
 			 */
 			jbd2_journal_force_commit_nested(sbi->s_journal);
 			ret = 0;
-		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
-			/*
-			 * Got one extent now try with rest of the pages.
-			 * If mpd.retval is set -EIO, journal is aborted.
-			 * So we don't need to write any more.
-			 */
-			pages_written += mpd.pages_written;
-			ret = mpd.retval;
-			io_done = 1;
-		} else if (wbc->nr_to_write)
-			/*
-			 * There is no more writeout needed
-			 * or we requested for a noblocking writeout
-			 * and we found the device congested
-			 */
+			continue;
+		}
+		/* Fatal error - ENOMEM, EIO... */
+		if (ret)
 			break;
 	}
 	blk_finish_plug(&plug);
-	if (!io_done && !cycled) {
+	if (!ret && !cycled && wbc->nr_to_write > 0) {
 		cycled = 1;
-		index = 0;
-		wbc->range_start = index << PAGE_CACHE_SHIFT;
-		wbc->range_end  = mapping->writeback_index - 1;
+		mpd.last_page = writeback_index - 1;
+		mpd.first_page = 0;
 		goto retry;
 	}
 
 	/* Update index */
-	wbc->range_cyclic = range_cyclic;
 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
 		/*
-		 * set the writeback_index so that range_cyclic
+		 * Set the writeback_index so that range_cyclic
 		 * mode will write it back later
 		 */
-		mapping->writeback_index = done_index;
+		mapping->writeback_index = mpd.first_page;
 
 out_writepages:
-	wbc->nr_to_write -= nr_to_writebump;
-	wbc->range_start = range_start;
-	trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
+	trace_ext4_writepages_result(inode, wbc, ret,
+				     nr_to_write - wbc->nr_to_write);
 	return ret;
 }
 
@@ -2762,7 +2704,12 @@
 	/* In case writeback began while the page was unlocked */
 	wait_for_stable_page(page);
 
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	ret = ext4_block_write_begin(page, pos, len,
+				     ext4_da_get_block_prep);
+#else
 	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
+#endif
 	if (ret < 0) {
 		unlock_page(page);
 		ext4_journal_stop(handle);
@@ -2838,10 +2785,7 @@
 	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
 		if (ext4_has_inline_data(inode) ||
 		    ext4_da_should_update_i_disksize(page, end)) {
-			down_write(&EXT4_I(inode)->i_data_sem);
-			if (new_i_size > EXT4_I(inode)->i_disksize)
-				EXT4_I(inode)->i_disksize = new_i_size;
-			up_write(&EXT4_I(inode)->i_data_sem);
+			ext4_update_i_disksize(inode, new_i_size);
 			/* We need to mark inode dirty even if
 			 * new_i_size is less that inode->i_size
 			 * bu greater than i_disksize.(hint delalloc)
@@ -2869,7 +2813,8 @@
 	return ret ? ret : copied;
 }
 
-static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
+static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
+				   unsigned int length)
 {
 	/*
 	 * Drop reserved blocks
@@ -2878,10 +2823,10 @@
 	if (!page_has_buffers(page))
 		goto out;
 
-	ext4_da_page_release_reservation(page, offset);
+	ext4_da_page_release_reservation(page, offset, length);
 
 out:
-	ext4_invalidatepage(page, offset);
+	ext4_invalidatepage(page, offset, length);
 
 	return;
 }
@@ -2893,8 +2838,7 @@
 {
 	trace_ext4_alloc_da_blocks(inode);
 
-	if (!EXT4_I(inode)->i_reserved_data_blocks &&
-	    !EXT4_I(inode)->i_reserved_meta_blocks)
+	if (!EXT4_I(inode)->i_reserved_data_blocks)
 		return 0;
 
 	/*
@@ -2904,7 +2848,7 @@
 	 * laptop_mode, not even desirable).  However, to do otherwise
 	 * would require replicating code paths in:
 	 *
-	 * ext4_da_writepages() ->
+	 * ext4_writepages() ->
 	 *    write_cache_pages() ---> (via passed in callback function)
 	 *        __mpage_da_writepage() -->
 	 *           mpage_add_bh_to_extent()
@@ -3011,7 +2955,7 @@
 		ret = ext4_readpage_inline(inode, page);
 
 	if (ret == -EAGAIN)
-		return mpage_readpage(page, ext4_get_block);
+		return ext4_mpage_readpages(page->mapping, NULL, page, 1);
 
 	return ret;
 }
@@ -3026,40 +2970,43 @@
 	if (ext4_has_inline_data(inode))
 		return 0;
 
-	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
+	return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
 }
 
-static void ext4_invalidatepage(struct page *page, unsigned long offset)
+static void ext4_invalidatepage(struct page *page, unsigned int offset,
+				unsigned int length)
 {
-	trace_ext4_invalidatepage(page, offset);
+	trace_ext4_invalidatepage(page, offset, length);
 
 	/* No journalling happens on data buffers when this function is used */
 	WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
 
-	block_invalidatepage(page, offset);
+	block_invalidatepage(page, offset, length);
 }
 
 static int __ext4_journalled_invalidatepage(struct page *page,
-					    unsigned long offset)
+					    unsigned int offset,
+					    unsigned int length)
 {
 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
 
-	trace_ext4_journalled_invalidatepage(page, offset);
+	trace_ext4_journalled_invalidatepage(page, offset, length);
 
 	/*
 	 * If it's a full truncate we just forget about the pending dirtying
 	 */
-	if (offset == 0)
+	if (offset == 0 && length == PAGE_CACHE_SIZE)
 		ClearPageChecked(page);
 
-	return jbd2_journal_invalidatepage(journal, page, offset);
+	return jbd2_journal_invalidatepage(journal, page, offset, length);
 }
 
 /* Wrapper for aops... */
 static void ext4_journalled_invalidatepage(struct page *page,
-					   unsigned long offset)
+					   unsigned int offset,
+					   unsigned int length)
 {
-	WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
+	WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
 }
 
 static int ext4_releasepage(struct page *page, gfp_t wait)
@@ -3101,15 +3048,13 @@
 }
 
 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
-			    ssize_t size, void *private, int ret,
-			    bool is_async)
+			    ssize_t size, void *private)
 {
-	struct inode *inode = file_inode(iocb->ki_filp);
         ext4_io_end_t *io_end = iocb->private;
 
-	/* if not async direct IO or dio with 0 bytes write, just return */
-	if (!io_end || !size)
-		goto out;
+	/* if not async direct IO just return */
+	if (!io_end)
+		return;
 
 	ext_debug("ext4_end_io_dio(): io_end 0x%p "
 		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3117,25 +3062,9 @@
 		  size);
 
 	iocb->private = NULL;
-
-	/* if not aio dio with unwritten extents, just free io and return */
-	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
-		ext4_free_io_end(io_end);
-out:
-		inode_dio_done(inode);
-		if (is_async)
-			aio_complete(iocb, ret, 0);
-		return;
-	}
-
 	io_end->offset = offset;
 	io_end->size = size;
-	if (is_async) {
-		io_end->iocb = iocb;
-		io_end->result = ret;
-	}
-
-	ext4_add_complete_io(io_end);
+	ext4_put_io_end(io_end);
 }
 
 /*
@@ -3143,9 +3072,9 @@
  * preallocated extents, and those write extend the file, no need to
  * fall back to buffered IO.
  *
- * For holes, we fallocate those blocks, mark them as uninitialized
+ * For holes, we fallocate those blocks, mark them as unwritten
  * If those blocks were preallocated, we mark sure they are split, but
- * still keep the range to write as uninitialized.
+ * still keep the range to write as unwritten.
  *
  * The unwritten extents will be converted to written when DIO is completed.
  * For async direct IO, since the IO may still pending when return, we
@@ -3169,6 +3098,7 @@
 	get_block_t *get_block_func = NULL;
 	int dio_flags = 0;
 	loff_t final_size = offset + count;
+	ext4_io_end_t *io_end = NULL;
 
 	/* Use the old path for reads and writes beyond i_size. */
 	if (rw != WRITE || final_size > inode->i_size)
@@ -3176,11 +3106,18 @@
 
 	BUG_ON(iocb->private == NULL);
 
+	/*
+	 * Make all waiters for direct IO properly wait also for extent
+	 * conversion. This also disallows race between truncate() and
+	 * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
+	 */
+	if (rw == WRITE)
+		atomic_inc(&inode->i_dio_count);
+
 	/* If we do a overwrite dio, i_mutex locking can be released */
 	overwrite = *((int *)iocb->private);
 
 	if (overwrite) {
-		atomic_inc(&inode->i_dio_count);
 		down_read(&EXT4_I(inode)->i_data_sem);
 		mutex_unlock(&inode->i_mutex);
 	}
@@ -3189,12 +3126,12 @@
 	 * We could direct write to holes and fallocate.
 	 *
 	 * Allocated blocks to fill the hole are marked as
-	 * uninitialized to prevent parallel buffered read to expose
+	 * unwritten to prevent parallel buffered read to expose
 	 * the stale data before DIO complete the data IO.
 	 *
 	 * As to previously fallocated extents, ext4 get_block will
 	 * just simply mark the buffer mapped but still keep the
-	 * extents uninitialized.
+	 * extents unwritten.
 	 *
 	 * For non AIO case, we will convert those unwritten extents
 	 * to written after return back from blockdev_direct_IO.
@@ -3207,13 +3144,15 @@
 	iocb->private = NULL;
 	ext4_inode_aio_set(inode, NULL);
 	if (!is_sync_kiocb(iocb)) {
-		ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
+		io_end = ext4_init_io_end(inode, GFP_NOFS);
 		if (!io_end) {
 			ret = -ENOMEM;
 			goto retake_lock;
 		}
-		io_end->flag |= EXT4_IO_END_DIRECT;
-		iocb->private = io_end;
+		/*
+		 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
+		 */
+		iocb->private = ext4_get_io_end(io_end);
 		/*
 		 * we save the io structure for current async direct
 		 * IO, so that later ext4_map_blocks() could flag the
@@ -3229,6 +3168,9 @@
 		get_block_func = ext4_get_block_write;
 		dio_flags = DIO_LOCKING;
 	}
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
+#endif
 	ret = __blockdev_direct_IO(rw, iocb, inode,
 				   inode->i_sb->s_bdev, iov,
 				   offset, nr_segs,
@@ -3237,33 +3179,35 @@
 				   NULL,
 				   dio_flags);
 
-	if (iocb->private)
-		ext4_inode_aio_set(inode, NULL);
 	/*
-	 * The io_end structure takes a reference to the inode, that
-	 * structure needs to be destroyed and the reference to the
-	 * inode need to be dropped, when IO is complete, even with 0
-	 * byte write, or failed.
-	 *
-	 * In the successful AIO DIO case, the io_end structure will
-	 * be destroyed and the reference to the inode will be dropped
-	 * after the end_io call back function is called.
-	 *
-	 * In the case there is 0 byte write, or error case, since VFS
-	 * direct IO won't invoke the end_io call back function, we
-	 * need to free the end_io structure here.
+	 * Put our reference to io_end. This can free the io_end structure e.g.
+	 * in sync IO case or in case of error. It can even perform extent
+	 * conversion if all bios we submitted finished before we got here.
+	 * Note that in that case iocb->private can be already set to NULL
+	 * here.
 	 */
-	if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
-		ext4_free_io_end(iocb->private);
-		iocb->private = NULL;
-	} else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
+	if (io_end) {
+		ext4_inode_aio_set(inode, NULL);
+		ext4_put_io_end(io_end);
+		/*
+		 * When no IO was submitted ext4_end_io_dio() was not
+		 * called so we have to put iocb's reference.
+		 */
+		if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
+			WARN_ON(iocb->private != io_end);
+			WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
+			ext4_put_io_end(io_end);
+			iocb->private = NULL;
+		}
+	}
+	if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
 						EXT4_STATE_DIO_UNWRITTEN)) {
 		int err;
 		/*
 		 * for non AIO case, since the IO is already
 		 * completed, we could do the conversion right here
 		 */
-		err = ext4_convert_unwritten_extents(inode,
+		err = ext4_convert_unwritten_extents(NULL, inode,
 						     offset, ret);
 		if (err < 0)
 			ret = err;
@@ -3271,9 +3215,10 @@
 	}
 
 retake_lock:
+	if (rw == WRITE)
+		inode_dio_done(inode);
 	/* take i_mutex locking again if we do a ovewrite dio */
 	if (overwrite) {
-		inode_dio_done(inode);
 		up_read(&EXT4_I(inode)->i_data_sem);
 		mutex_lock(&inode->i_mutex);
 	}
@@ -3287,8 +3232,14 @@
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
+	size_t count = iov_length(iov, nr_segs);
 	ssize_t ret;
 
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+		return 0;
+#endif
+
 	/*
 	 * If we are doing data journalling we don't support O_DIRECT
 	 */
@@ -3299,13 +3250,12 @@
 	if (ext4_has_inline_data(inode))
 		return 0;
 
-	trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
+	trace_ext4_direct_IO_enter(inode, offset, count, rw);
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 		ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
 	else
 		ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
-	trace_ext4_direct_IO_exit(inode, offset,
-				iov_length(iov, nr_segs), rw, ret);
+	trace_ext4_direct_IO_exit(inode, offset, count, rw, ret);
 	return ret;
 }
 
@@ -3332,6 +3282,7 @@
 	.readpage		= ext4_readpage,
 	.readpages		= ext4_readpages,
 	.writepage		= ext4_writepage,
+	.writepages		= ext4_writepages,
 	.write_begin		= ext4_write_begin,
 	.write_end		= ext4_write_end,
 	.bmap			= ext4_bmap,
@@ -3347,6 +3298,7 @@
 	.readpage		= ext4_readpage,
 	.readpages		= ext4_readpages,
 	.writepage		= ext4_writepage,
+	.writepages		= ext4_writepages,
 	.write_begin		= ext4_write_begin,
 	.write_end		= ext4_journalled_write_end,
 	.set_page_dirty		= ext4_journalled_set_page_dirty,
@@ -3362,7 +3314,7 @@
 	.readpage		= ext4_readpage,
 	.readpages		= ext4_readpages,
 	.writepage		= ext4_writepage,
-	.writepages		= ext4_da_writepages,
+	.writepages		= ext4_writepages,
 	.write_begin		= ext4_da_write_begin,
 	.write_end		= ext4_da_write_end,
 	.bmap			= ext4_bmap,
@@ -3395,20 +3347,22 @@
 		inode->i_mapping->a_ops = &ext4_aops;
 }
 
-
 /*
- * ext4_discard_partial_page_buffers()
- * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
- * This function finds and locks the page containing the offset
- * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
- * Calling functions that already have the page locked should call
- * ext4_discard_partial_page_buffers_no_lock directly.
+ * ext4_block_zero_page_range() zeros out a mapping of length 'length'
+ * starting from file offset 'from'.  The range to be zero'd must
+ * be contained with in one block.  If the specified range exceeds
+ * the end of the block it will be shortened to end of the block
+ * that cooresponds to 'from'
  */
-int ext4_discard_partial_page_buffers(handle_t *handle,
-		struct address_space *mapping, loff_t from,
-		loff_t length, int flags)
+static int ext4_block_zero_page_range(handle_t *handle,
+		struct address_space *mapping, loff_t from, loff_t length)
 {
+	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
+	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	unsigned blocksize, max, pos;
+	ext4_lblk_t iblock;
 	struct inode *inode = mapping->host;
+	struct buffer_head *bh;
 	struct page *page;
 	int err = 0;
 
@@ -3417,67 +3371,12 @@
 	if (!page)
 		return -ENOMEM;
 
-	err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
-		from, length, flags);
-
-	unlock_page(page);
-	page_cache_release(page);
-	return err;
-}
-
-/*
- * ext4_discard_partial_page_buffers_no_lock()
- * Zeros a page range of length 'length' starting from offset 'from'.
- * Buffer heads that correspond to the block aligned regions of the
- * zeroed range will be unmapped.  Unblock aligned regions
- * will have the corresponding buffer head mapped if needed so that
- * that region of the page can be updated with the partial zero out.
- *
- * This function assumes that the page has already been  locked.  The
- * The range to be discarded must be contained with in the given page.
- * If the specified range exceeds the end of the page it will be shortened
- * to the end of the page that corresponds to 'from'.  This function is
- * appropriate for updating a page and it buffer heads to be unmapped and
- * zeroed for blocks that have been either released, or are going to be
- * released.
- *
- * handle: The journal handle
- * inode:  The files inode
- * page:   A locked page that contains the offset "from"
- * from:   The starting byte offset (from the beginning of the file)
- *         to begin discarding
- * len:    The length of bytes to discard
- * flags:  Optional flags that may be used:
- *
- *         EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
- *         Only zero the regions of the page whose buffer heads
- *         have already been unmapped.  This flag is appropriate
- *         for updating the contents of a page whose blocks may
- *         have already been released, and we only want to zero
- *         out the regions that correspond to those released blocks.
- *
- * Returns zero on success or negative on failure.
- */
-static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
-		struct inode *inode, struct page *page, loff_t from,
-		loff_t length, int flags)
-{
-	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
-	unsigned int offset = from & (PAGE_CACHE_SIZE-1);
-	unsigned int blocksize, max, pos;
-	ext4_lblk_t iblock;
-	struct buffer_head *bh;
-	int err = 0;
-
 	blocksize = inode->i_sb->s_blocksize;
-	max = PAGE_CACHE_SIZE - offset;
-
-	if (index != page->index)
-		return -EINVAL;
+	max = blocksize - (offset & (blocksize - 1));
 
 	/*
 	 * correct length if it does not fall between
-	 * 'from' and the end of the page
+	 * 'from' and the end of the block
 	 */
 	if (length > max || length < 0)
 		length = max;
@@ -3495,106 +3394,118 @@
 		iblock++;
 		pos += blocksize;
 	}
-
-	pos = offset;
-	while (pos < offset + length) {
-		unsigned int end_of_block, range_to_discard;
-
-		err = 0;
-
-		/* The length of space left to zero and unmap */
-		range_to_discard = offset + length - pos;
-
-		/* The length of space until the end of the block */
-		end_of_block = blocksize - (pos & (blocksize-1));
-
-		/*
-		 * Do not unmap or zero past end of block
-		 * for this buffer head
-		 */
-		if (range_to_discard > end_of_block)
-			range_to_discard = end_of_block;
-
-
-		/*
-		 * Skip this buffer head if we are only zeroing unampped
-		 * regions of the page
-		 */
-		if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
-			buffer_mapped(bh))
-				goto next;
-
-		/* If the range is block aligned, unmap */
-		if (range_to_discard == blocksize) {
-			clear_buffer_dirty(bh);
-			bh->b_bdev = NULL;
-			clear_buffer_mapped(bh);
-			clear_buffer_req(bh);
-			clear_buffer_new(bh);
-			clear_buffer_delay(bh);
-			clear_buffer_unwritten(bh);
-			clear_buffer_uptodate(bh);
-			zero_user(page, pos, range_to_discard);
-			BUFFER_TRACE(bh, "Buffer discarded");
-			goto next;
-		}
-
-		/*
-		 * If this block is not completely contained in the range
-		 * to be discarded, then it is not going to be released. Because
-		 * we need to keep this block, we need to make sure this part
-		 * of the page is uptodate before we modify it by writeing
-		 * partial zeros on it.
-		 */
+	if (buffer_freed(bh)) {
+		BUFFER_TRACE(bh, "freed: skip");
+		goto unlock;
+	}
+	if (!buffer_mapped(bh)) {
+		BUFFER_TRACE(bh, "unmapped");
+		ext4_get_block(inode, iblock, bh, 0);
+		/* unmapped? It's a hole - nothing to do */
 		if (!buffer_mapped(bh)) {
-			/*
-			 * Buffer head must be mapped before we can read
-			 * from the block
-			 */
-			BUFFER_TRACE(bh, "unmapped");
-			ext4_get_block(inode, iblock, bh, 0);
-			/* unmapped? It's a hole - nothing to do */
-			if (!buffer_mapped(bh)) {
-				BUFFER_TRACE(bh, "still unmapped");
-				goto next;
-			}
+			BUFFER_TRACE(bh, "still unmapped");
+			goto unlock;
 		}
-
-		/* Ok, it's mapped. Make sure it's up-to-date */
-		if (PageUptodate(page))
-			set_buffer_uptodate(bh);
-
-		if (!buffer_uptodate(bh)) {
-			err = -EIO;
-			ll_rw_block(READ, 1, &bh);
-			wait_on_buffer(bh);
-			/* Uhhuh. Read error. Complain and punt.*/
-			if (!buffer_uptodate(bh))
-				goto next;
-		}
-
-		if (ext4_should_journal_data(inode)) {
-			BUFFER_TRACE(bh, "get write access");
-			err = ext4_journal_get_write_access(handle, bh);
-			if (err)
-				goto next;
-		}
-
-		zero_user(page, pos, range_to_discard);
-
-		err = 0;
-		if (ext4_should_journal_data(inode)) {
-			err = ext4_handle_dirty_metadata(handle, inode, bh);
-		} else
-			mark_buffer_dirty(bh);
-
-		BUFFER_TRACE(bh, "Partial buffer zeroed");
-next:
-		bh = bh->b_this_page;
-		iblock++;
-		pos += range_to_discard;
 	}
 
+	/* Ok, it's mapped. Make sure it's up-to-date */
+	if (PageUptodate(page))
+		set_buffer_uptodate(bh);
+
+	if (!buffer_uptodate(bh)) {
+		err = -EIO;
+		ll_rw_block(READ, 1, &bh);
+		wait_on_buffer(bh);
+		/* Uhhuh. Read error. Complain and punt. */
+		if (!buffer_uptodate(bh))
+			goto unlock;
+		if (S_ISREG(inode->i_mode) &&
+		    ext4_encrypted_inode(inode)) {
+			/* We expect the key to be set. */
+			BUG_ON(!ext4_has_encryption_key(inode));
+			BUG_ON(blocksize != PAGE_CACHE_SIZE);
+			WARN_ON_ONCE(ext4_decrypt_one(inode, page));
+		}
+	}
+	if (ext4_should_journal_data(inode)) {
+		BUFFER_TRACE(bh, "get write access");
+		err = ext4_journal_get_write_access(handle, bh);
+		if (err)
+			goto unlock;
+	}
+	zero_user(page, offset, length);
+	BUFFER_TRACE(bh, "zeroed end of block");
+
+	if (ext4_should_journal_data(inode)) {
+		err = ext4_handle_dirty_metadata(handle, inode, bh);
+	} else {
+		err = 0;
+		mark_buffer_dirty(bh);
+		if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE))
+			err = ext4_jbd2_file_inode(handle, inode);
+	}
+
+unlock:
+	unlock_page(page);
+	page_cache_release(page);
+	return err;
+}
+
+/*
+ * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
+ * up to the end of the block which corresponds to `from'.
+ * This required during truncate. We need to physically zero the tail end
+ * of that block so it doesn't yield old data if the file is later grown.
+ */
+static int ext4_block_truncate_page(handle_t *handle,
+		struct address_space *mapping, loff_t from)
+{
+	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	unsigned length;
+	unsigned blocksize;
+	struct inode *inode = mapping->host;
+
+	blocksize = inode->i_sb->s_blocksize;
+	length = blocksize - (offset & (blocksize - 1));
+
+	return ext4_block_zero_page_range(handle, mapping, from, length);
+}
+
+int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
+			     loff_t lstart, loff_t length)
+{
+	struct super_block *sb = inode->i_sb;
+	struct address_space *mapping = inode->i_mapping;
+	unsigned partial_start, partial_end;
+	ext4_fsblk_t start, end;
+	loff_t byte_end = (lstart + length - 1);
+	int err = 0;
+
+	partial_start = lstart & (sb->s_blocksize - 1);
+	partial_end = byte_end & (sb->s_blocksize - 1);
+
+	start = lstart >> sb->s_blocksize_bits;
+	end = byte_end >> sb->s_blocksize_bits;
+
+	/* Handle partial zero within the single block */
+	if (start == end &&
+	    (partial_start || (partial_end != sb->s_blocksize - 1))) {
+		err = ext4_block_zero_page_range(handle, mapping,
+						 lstart, length);
+		return err;
+	}
+	/* Handle partial zero out on the start of the range */
+	if (partial_start) {
+		err = ext4_block_zero_page_range(handle, mapping,
+						 lstart, sb->s_blocksize);
+		if (err)
+			return err;
+	}
+	/* Handle partial zero out on the end of the range */
+	if (partial_end != sb->s_blocksize - 1)
+		err = ext4_block_zero_page_range(handle, mapping,
+						 byte_end - partial_end,
+						 partial_end + 1);
 	return err;
 }
 
@@ -3620,14 +3531,12 @@
  * Returns: 0 on success or negative on failure
  */
 
-int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 {
-	struct inode *inode = file_inode(file);
 	struct super_block *sb = inode->i_sb;
 	ext4_lblk_t first_block, stop_block;
 	struct address_space *mapping = inode->i_mapping;
-	loff_t first_page, last_page, page_len;
-	loff_t first_page_offset, last_page_offset;
+	loff_t first_block_offset, last_block_offset;
 	handle_t *handle;
 	unsigned int credits;
 	int ret = 0;
@@ -3635,12 +3544,7 @@
 	if (!S_ISREG(inode->i_mode))
 		return -EOPNOTSUPP;
 
-	if (EXT4_SB(sb)->s_cluster_ratio > 1) {
-		/* TODO: Add support for bigalloc file systems */
-		return -EOPNOTSUPP;
-	}
-
-	trace_ext4_punch_hole(inode, offset, length);
+	trace_ext4_punch_hole(inode, offset, length, 0);
 
 	/*
 	 * Write out all dirty pages to avoid race conditions
@@ -3654,15 +3558,6 @@
 	}
 
 	mutex_lock(&inode->i_mutex);
-	/* It's not possible punch hole on append only file */
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
-		ret = -EPERM;
-		goto out_mutex;
-	}
-	if (IS_SWAPFILE(inode)) {
-		ret = -ETXTBSY;
-		goto out_mutex;
-	}
 
 	/* No need to punch hole beyond i_size */
 	if (offset >= inode->i_size)
@@ -3678,23 +3573,28 @@
 		   offset;
 	}
 
-	first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	last_page = (offset + length) >> PAGE_CACHE_SHIFT;
+	if (offset & (sb->s_blocksize - 1) ||
+	    (offset + length) & (sb->s_blocksize - 1)) {
+		/*
+		 * Attach jinode to inode for jbd2 if we do any zeroing of
+		 * partial block
+		 */
+		ret = ext4_inode_attach_jinode(inode);
+		if (ret < 0)
+			goto out_mutex;
 
-	first_page_offset = first_page << PAGE_CACHE_SHIFT;
-	last_page_offset = last_page << PAGE_CACHE_SHIFT;
-
-	/* Now release the pages */
-	if (last_page_offset > first_page_offset) {
-		truncate_pagecache_range(inode, first_page_offset,
-					 last_page_offset - 1);
 	}
 
+	first_block_offset = round_up(offset, sb->s_blocksize);
+	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
+
+	/* Now release the pages and zero block aligned part of pages*/
+	if (last_block_offset > first_block_offset)
+		truncate_pagecache_range(inode, first_block_offset,
+					 last_block_offset);
+
 	/* Wait all existing dio workers, newcomers will block on i_mutex */
 	ext4_inode_block_unlocked_dio(inode);
-	ret = ext4_flush_unwritten_io(inode);
-	if (ret)
-		goto out_dio;
 	inode_dio_wait(inode);
 
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -3708,66 +3608,10 @@
 		goto out_dio;
 	}
 
-	/*
-	 * Now we need to zero out the non-page-aligned data in the
-	 * pages at the start and tail of the hole, and unmap the
-	 * buffer heads for the block aligned regions of the page that
-	 * were completely zeroed.
-	 */
-	if (first_page > last_page) {
-		/*
-		 * If the file space being truncated is contained
-		 * within a page just zero out and unmap the middle of
-		 * that page
-		 */
-		ret = ext4_discard_partial_page_buffers(handle,
-			mapping, offset, length, 0);
-
-		if (ret)
-			goto out_stop;
-	} else {
-		/*
-		 * zero out and unmap the partial page that contains
-		 * the start of the hole
-		 */
-		page_len = first_page_offset - offset;
-		if (page_len > 0) {
-			ret = ext4_discard_partial_page_buffers(handle, mapping,
-						offset, page_len, 0);
-			if (ret)
-				goto out_stop;
-		}
-
-		/*
-		 * zero out and unmap the partial page that contains
-		 * the end of the hole
-		 */
-		page_len = offset + length - last_page_offset;
-		if (page_len > 0) {
-			ret = ext4_discard_partial_page_buffers(handle, mapping,
-					last_page_offset, page_len, 0);
-			if (ret)
-				goto out_stop;
-		}
-	}
-
-	/*
-	 * If i_size is contained in the last page, we need to
-	 * unmap and zero the partial page after i_size
-	 */
-	if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
-	   inode->i_size % PAGE_CACHE_SIZE != 0) {
-		page_len = PAGE_CACHE_SIZE -
-			(inode->i_size & (PAGE_CACHE_SIZE - 1));
-
-		if (page_len > 0) {
-			ret = ext4_discard_partial_page_buffers(handle,
-					mapping, inode->i_size, page_len, 0);
-
-			if (ret)
-				goto out_stop;
-		}
-	}
+	ret = ext4_zero_partial_blocks(handle, inode, offset,
+				       length);
+	if (ret)
+		goto out_stop;
 
 	first_block = (offset + sb->s_blocksize - 1) >>
 		EXT4_BLOCK_SIZE_BITS(sb);
@@ -3791,13 +3635,18 @@
 		ret = ext4_ext_remove_space(inode, first_block,
 					    stop_block - 1);
 	else
-		ret = ext4_free_hole_blocks(handle, inode, first_block,
+		ret = ext4_ind_remove_space(handle, inode, first_block,
 					    stop_block);
 
-	ext4_discard_preallocations(inode);
 	up_write(&EXT4_I(inode)->i_data_sem);
 	if (IS_SYNC(inode))
 		ext4_handle_sync(handle);
+
+	/* Now release the pages again to reduce race window */
+	if (last_block_offset > first_block_offset)
+		truncate_pagecache_range(inode, first_block_offset,
+					 last_block_offset);
+
 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 out_stop:
@@ -3809,6 +3658,31 @@
 	return ret;
 }
 
+int ext4_inode_attach_jinode(struct inode *inode)
+{
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	struct jbd2_inode *jinode;
+
+	if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
+		return 0;
+
+	jinode = jbd2_alloc_inode(GFP_KERNEL);
+	spin_lock(&inode->i_lock);
+	if (!ei->jinode) {
+		if (!jinode) {
+			spin_unlock(&inode->i_lock);
+			return -ENOMEM;
+		}
+		ei->jinode = jinode;
+		jbd2_journal_init_jbd_inode(ei->jinode, inode);
+		jinode = NULL;
+	}
+	spin_unlock(&inode->i_lock);
+	if (unlikely(jinode != NULL))
+		jbd2_free_inode(jinode);
+	return 0;
+}
+
 /*
  * ext4_truncate()
  *
@@ -3843,11 +3717,10 @@
 	unsigned int credits;
 	handle_t *handle;
 	struct address_space *mapping = inode->i_mapping;
-	loff_t page_len;
 
 	/*
 	 * There is a possibility that we're either freeing the inode
-	 * or it completely new indode. In those cases we might not
+	 * or it's a completely new inode. In those cases we might not
 	 * have i_mutex locked because it's not necessary.
 	 */
 	if (!(inode->i_state & (I_NEW|I_FREEING)))
@@ -3870,11 +3743,11 @@
 			return;
 	}
 
-	/*
-	 * finish any pending end_io work so we won't run the risk of
-	 * converting any truncated blocks to initialized later
-	 */
-	ext4_flush_unwritten_io(inode);
+	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
+	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
+		if (ext4_inode_attach_jinode(inode) < 0)
+			return;
+	}
 
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 		credits = ext4_writepage_trans_blocks(inode);
@@ -3887,14 +3760,8 @@
 		return;
 	}
 
-	if (inode->i_size % PAGE_CACHE_SIZE != 0) {
-		page_len = PAGE_CACHE_SIZE -
-			(inode->i_size & (PAGE_CACHE_SIZE - 1));
-
-		if (ext4_discard_partial_page_buffers(handle,
-				mapping, inode->i_size, page_len, 0))
-			goto out_stop;
-	}
+	if (inode->i_size & (inode->i_sb->s_blocksize - 1))
+		ext4_block_truncate_page(handle, mapping, inode->i_size);
 
 	/*
 	 * We add the inode to the orphan list, so that if this
@@ -4105,8 +3972,8 @@
 		new_fl |= S_NOATIME;
 	if (flags & EXT4_DIRSYNC_FL)
 		new_fl |= S_DIRSYNC;
-	set_mask_bits(&inode->i_flags,
-		      S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
+	inode_set_flags(inode, new_fl,
+			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
 }
 
 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
@@ -4210,8 +4077,7 @@
 		ei->i_extra_isize = 0;
 
 	/* Precompute checksum seed for inode metadata */
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+	if (ext4_has_metadata_csum(sb)) {
 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 		__u32 csum;
 		__le32 inum = cpu_to_le32(inode->i_ino);
@@ -4325,11 +4191,13 @@
 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
 
-	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
-	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
-		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
-			inode->i_version |=
-			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
+	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
+		inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
+		if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
+				inode->i_version |=
+		    (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
+		}
 	}
 
 	ret = 0;
@@ -4364,7 +4232,8 @@
 		inode->i_op = &ext4_dir_inode_operations;
 		inode->i_fop = &ext4_dir_operations;
 	} else if (S_ISLNK(inode->i_mode)) {
-		if (ext4_inode_is_fast_symlink(inode)) {
+		if (ext4_inode_is_fast_symlink(inode) &&
+		    !ext4_encrypted_inode(inode)) {
 			inode->i_op = &ext4_fast_symlink_inode_operations;
 			nd_terminate_link(ei->i_data, inode->i_size,
 				sizeof(ei->i_data) - 1);
@@ -4459,12 +4328,15 @@
 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	struct buffer_head *bh = iloc->bh;
+	struct super_block *sb = inode->i_sb;
 	int err = 0, rc, block;
-	int need_datasync = 0;
+	int need_datasync = 0, set_large_file = 0;
 	uid_t i_uid;
 	gid_t i_gid;
 
-	/* For fields not not tracking in the in-memory inode,
+	spin_lock(&ei->i_raw_lock);
+
+	/* For fields not tracked in the in-memory inode,
 	 * initialise them to zero for new inodes. */
 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
@@ -4502,12 +4374,14 @@
 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
 
-	if (ext4_inode_blocks_set(handle, raw_inode, ei))
+	err = ext4_inode_blocks_set(handle, raw_inode, ei);
+	if (err) {
+		spin_unlock(&ei->i_raw_lock);
 		goto out_brelse;
+	}
 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
-	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
-	    cpu_to_le32(EXT4_OS_HURD))
+	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
 		raw_inode->i_file_acl_high =
 			cpu_to_le16(ei->i_file_acl >> 32);
 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
@@ -4516,24 +4390,11 @@
 		need_datasync = 1;
 	}
 	if (ei->i_disksize > 0x7fffffffULL) {
-		struct super_block *sb = inode->i_sb;
 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
 				EXT4_SB(sb)->s_es->s_rev_level ==
-				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
-			/* If this is the first large file
-			 * created, add a flag to the superblock.
-			 */
-			err = ext4_journal_get_write_access(handle,
-					EXT4_SB(sb)->s_sbh);
-			if (err)
-				goto out_brelse;
-			ext4_update_dynamic_rev(sb);
-			EXT4_SET_RO_COMPAT_FEATURE(sb,
-					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
-			ext4_handle_sync(handle);
-			err = ext4_handle_dirty_super(handle, sb);
-		}
+		    cpu_to_le32(EXT4_GOOD_OLD_REV))
+			set_large_file = 1;
 	}
 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
@@ -4552,22 +4413,37 @@
 			raw_inode->i_block[block] = ei->i_data[block];
 	}
 
-	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
-	if (ei->i_extra_isize) {
-		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
-			raw_inode->i_version_hi =
-			cpu_to_le32(inode->i_version >> 32);
-		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
+	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
+		raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
+		if (ei->i_extra_isize) {
+			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
+				raw_inode->i_version_hi =
+					cpu_to_le32(inode->i_version >> 32);
+			raw_inode->i_extra_isize =
+				cpu_to_le16(ei->i_extra_isize);
+		}
 	}
 
 	ext4_inode_csum_set(inode, raw_inode, ei);
 
+	spin_unlock(&ei->i_raw_lock);
+
 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
 	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
 	if (!err)
 		err = rc;
 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
-
+	if (set_large_file) {
+		BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
+		err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+		if (err)
+			goto out_brelse;
+		ext4_update_dynamic_rev(sb);
+		EXT4_SET_RO_COMPAT_FEATURE(sb,
+					   EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
+		ext4_handle_sync(handle);
+		err = ext4_handle_dirty_super(handle, sb);
+	}
 	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
 out_brelse:
 	brelse(bh);
@@ -4580,21 +4456,20 @@
  *
  * We are called from a few places:
  *
- * - Within generic_file_write() for O_SYNC files.
+ * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
  *   Here, there will be no transaction running. We wait for any running
  *   transaction to commit.
  *
- * - Within sys_sync(), kupdate and such.
- *   We wait on commit, if tol to.
+ * - Within flush work (sys_sync(), kupdate and such).
+ *   We wait on commit, if told to.
  *
- * - Within prune_icache() (PF_MEMALLOC == true)
- *   Here we simply return.  We can't afford to block kswapd on the
- *   journal commit.
+ * - Within iput_final() -> write_inode_now()
+ *   We wait on commit, if told to.
  *
  * In all cases it is actually safe for us to return without doing anything,
  * because the inode has been copied into a raw inode buffer in
- * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
- * knfsd.
+ * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
+ * writeback.
  *
  * Note that we are absolutely dependent upon all inode dirtiers doing the
  * right thing: they *must* call mark_inode_dirty() after dirtying info in
@@ -4606,15 +4481,15 @@
  *	stuff();
  *	inode->i_size = expr;
  *
- * is in error because a kswapd-driven write_inode() could occur while
- * `stuff()' is running, and the new i_size will be lost.  Plus the inode
- * will no longer be on the superblock's dirty inode list.
+ * is in error because write_inode() could occur while `stuff()' is running,
+ * and the new i_size will be lost.  Plus the inode will no longer be on the
+ * superblock's dirty inode list.
  */
 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
 	int err;
 
-	if (current->flags & PF_MEMALLOC)
+	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
 		return 0;
 
 	if (EXT4_SB(inode->i_sb)->s_journal) {
@@ -4624,7 +4499,12 @@
 			return -EIO;
 		}
 
-		if (wbc->sync_mode != WB_SYNC_ALL)
+		/*
+		 * No need to force transaction in WB_SYNC_NONE mode. Also
+		 * ext4_sync_fs() will force the commit after everything is
+		 * written.
+		 */
+		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
 			return 0;
 
 		err = ext4_force_commit(inode->i_sb);
@@ -4634,7 +4514,11 @@
 		err = __ext4_get_inode_loc(inode, &iloc, 0);
 		if (err)
 			return err;
-		if (wbc->sync_mode == WB_SYNC_ALL)
+		/*
+		 * sync(2) will flush the whole buffer cache. No need to do
+		 * it here separately for each inode.
+		 */
+		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
 			sync_dirty_buffer(iloc.bh);
 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
 			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
@@ -4672,7 +4556,8 @@
 				      inode->i_size >> PAGE_CACHE_SHIFT);
 		if (!page)
 			return;
-		ret = __ext4_journalled_invalidatepage(page, offset);
+		ret = __ext4_journalled_invalidatepage(page, offset,
+						PAGE_CACHE_SIZE - offset);
 		unlock_page(page);
 		page_cache_release(page);
 		if (ret != -EBUSY)
@@ -4754,7 +4639,7 @@
 
 	if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
 		handle_t *handle;
-		loff_t oldsize = inode->i_size;
+		loff_t oldsize = i_size_read(inode);
 
 		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -4783,18 +4668,31 @@
 				error = ext4_orphan_add(handle, inode);
 				orphan = 1;
 			}
+			down_write(&EXT4_I(inode)->i_data_sem);
 			EXT4_I(inode)->i_disksize = attr->ia_size;
 			rc = ext4_mark_inode_dirty(handle, inode);
 			if (!error)
 				error = rc;
+			/*
+			 * We have to update i_size under i_data_sem together
+			 * with i_disksize to avoid races with writeback code
+			 * running ext4_wb_update_i_disksize().
+			 */
+			if (!error)
+				i_size_write(inode, attr->ia_size);
+			up_write(&EXT4_I(inode)->i_data_sem);
 			ext4_journal_stop(handle);
 			if (error) {
 				ext4_orphan_del(NULL, inode);
 				goto err_out;
 			}
+		} else {
+			loff_t oldsize = inode->i_size;
+
+			i_size_write(inode, attr->ia_size);
+			pagecache_isize_extended(inode, oldsize, inode->i_size);
 		}
 
-		i_size_write(inode, attr->ia_size);
 		/*
 		 * Blocks are going to be removed from the inode. Wait
 		 * for dio in flight.  Temporarily disable
@@ -4833,8 +4731,11 @@
 	if (orphan && inode->i_nlink)
 		ext4_orphan_del(NULL, inode);
 
+#ifdef CONFIG_EXT4_FS_POSIX_ACL
+#error POSIX_ACL not supported in 3.18 backport
 	if (!rc && (ia_valid & ATTR_MODE))
-		rc = ext4_acl_chmod(inode);
+		rc = posix_acl_chmod(inode, inode->i_mode);
+#endif
 
 err_out:
 	ext4_std_error(inode->i_sb, error);
@@ -4853,6 +4754,15 @@
 	generic_fillattr(inode, stat);
 
 	/*
+	 * If there is inline data in the inode, the inode will normally not
+	 * have data blocks allocated (it may have an external xattr block).
+	 * Report at least one sector for such files, so tools like tar, rsync,
+	 * others doen't incorrectly think the file is completely sparse.
+	 */
+	if (unlikely(ext4_has_inline_data(inode)))
+		stat->blocks += (stat->size + 511) >> 9;
+
+	/*
 	 * We can't update i_blocks if the block allocation is delayed
 	 * otherwise in the case of system crash before the real block
 	 * allocation is done, we will have i_blocks inconsistent with
@@ -4863,17 +4773,17 @@
 	 * blocks for this file.
 	 */
 	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
-				EXT4_I(inode)->i_reserved_data_blocks);
-
-	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
+				   EXT4_I(inode)->i_reserved_data_blocks);
+	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
 	return 0;
 }
 
-static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
+				   int pextents)
 {
 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
-		return ext4_ind_trans_blocks(inode, nrblocks, chunk);
-	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
+		return ext4_ind_trans_blocks(inode, lblocks);
+	return ext4_ext_index_trans_blocks(inode, pextents);
 }
 
 /*
@@ -4887,7 +4797,8 @@
  *
  * Also account for superblock, inode, quota and xattr blocks
  */
-static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
+				  int pextents)
 {
 	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
 	int gdpblocks;
@@ -4895,14 +4806,10 @@
 	int ret = 0;
 
 	/*
-	 * How many index blocks need to touch to modify nrblocks?
-	 * The "Chunk" flag indicating whether the nrblocks is
-	 * physically contiguous on disk
-	 *
-	 * For Direct IO and fallocate, they calls get_block to allocate
-	 * one single extent at a time, so they could set the "Chunk" flag
+	 * How many index blocks need to touch to map @lblocks logical blocks
+	 * to @pextents physical extents?
 	 */
-	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
+	idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
 
 	ret = idxblocks;
 
@@ -4910,12 +4817,7 @@
 	 * Now let's see how many group bitmaps and group descriptors need
 	 * to account
 	 */
-	groups = idxblocks;
-	if (chunk)
-		groups += 1;
-	else
-		groups += nrblocks;
-
+	groups = idxblocks + pextents;
 	gdpblocks = groups;
 	if (groups > ngroups)
 		groups = ngroups;
@@ -4946,7 +4848,7 @@
 	int bpp = ext4_journal_blocks_per_page(inode);
 	int ret;
 
-	ret = ext4_meta_trans_blocks(inode, bpp, 0);
+	ret = ext4_meta_trans_blocks(inode, bpp, bpp);
 
 	/* Account for data blocks for journalled mode */
 	if (ext4_should_journal_data(inode))
@@ -5213,7 +5115,12 @@
 	if (val)
 		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
 	else {
-		jbd2_journal_flush(journal);
+		err = jbd2_journal_flush(journal);
+		if (err < 0) {
+			jbd2_journal_unlock_updates(journal);
+			ext4_inode_resume_unlocked_dio(inode);
+			return err;
+		}
 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
 	}
 	ext4_set_aops(inode);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index d4fd81c..21d077c 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -14,10 +14,10 @@
 #include <linux/compat.h>
 #include <linux/mount.h>
 #include <linux/file.h>
+#include <linux/random.h>
 #include <asm/uaccess.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
-#include "ext4_extents.h"
 
 #define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
 
@@ -102,28 +102,18 @@
 	handle_t *handle;
 	int err;
 	struct inode *inode_bl;
-	struct ext4_inode_info *ei;
 	struct ext4_inode_info *ei_bl;
-	struct ext4_sb_info *sbi;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
-	if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode)) {
-		err = -EINVAL;
-		goto swap_boot_out;
-	}
+	if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode))
+		return -EINVAL;
 
-	if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
-		err = -EPERM;
-		goto swap_boot_out;
-	}
-
-	sbi = EXT4_SB(sb);
-	ei = EXT4_I(inode);
+	if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
+		return -EPERM;
 
 	inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
-	if (IS_ERR(inode_bl)) {
-		err = PTR_ERR(inode_bl);
-		goto swap_boot_out;
-	}
+	if (IS_ERR(inode_bl))
+		return PTR_ERR(inode_bl);
 	ei_bl = EXT4_I(inode_bl);
 
 	filemap_flush(inode->i_mapping);
@@ -131,7 +121,7 @@
 
 	/* Protect orig inodes against a truncate and make sure,
 	 * that only 1 swap_inode_boot_loader is running. */
-	ext4_inode_double_lock(inode, inode_bl);
+	lock_two_nondirectories(inode, inode_bl);
 
 	truncate_inode_pages(&inode->i_data, 0);
 	truncate_inode_pages(&inode_bl->i_data, 0);
@@ -198,23 +188,27 @@
 			ext4_mark_inode_dirty(handle, inode);
 		}
 	}
-
 	ext4_journal_stop(handle);
-
 	ext4_double_up_write_data_sem(inode, inode_bl);
 
 journal_err_out:
 	ext4_inode_resume_unlocked_dio(inode);
 	ext4_inode_resume_unlocked_dio(inode_bl);
-
-	ext4_inode_double_unlock(inode, inode_bl);
-
+	unlock_two_nondirectories(inode, inode_bl);
 	iput(inode_bl);
-
-swap_boot_out:
 	return err;
 }
 
+static int uuid_is_zero(__u8 u[16])
+{
+	int	i;
+
+	for (i = 0; i < 16; i++)
+		if (u[i])
+			return 0;
+	return 1;
+}
+
 long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = file_inode(filp);
@@ -348,8 +342,7 @@
 		if (!inode_owner_or_capable(inode))
 			return -EPERM;
 
-		if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-				EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+		if (ext4_has_metadata_csum(inode->i_sb)) {
 			ext4_warning(sb, "Setting inode version is not "
 				     "supported with metadata_csum enabled.");
 			return -ENOTTY;
@@ -605,11 +598,13 @@
 		return err;
 	}
 
+	case FIDTRIM:
 	case FITRIM:
 	{
 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
 		struct fstrim_range range;
 		int ret = 0;
+		int flags  = cmd == FIDTRIM ? BLKDEV_DISCARD_SECURE : 0;
 
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
@@ -617,13 +612,15 @@
 		if (!blk_queue_discard(q))
 			return -EOPNOTSUPP;
 
+		if ((flags & BLKDEV_DISCARD_SECURE) && !blk_queue_secdiscard(q))
+			return -EOPNOTSUPP;
 		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
 		    sizeof(range)))
 			return -EFAULT;
 
 		range.minlen = max((unsigned int)range.minlen,
 				   q->limits.discard_granularity);
-		ret = ext4_trim_fs(sb, &range);
+		ret = ext4_trim_fs(sb, &range, flags);
 		if (ret < 0)
 			return ret;
 
@@ -633,7 +630,80 @@
 
 		return 0;
 	}
+	case EXT4_IOC_PRECACHE_EXTENTS:
+		return ext4_ext_precache(inode);
+	case EXT4_IOC_SET_ENCRYPTION_POLICY: {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+		struct ext4_encryption_policy policy;
+		int err = 0;
 
+		if (copy_from_user(&policy,
+				   (struct ext4_encryption_policy __user *)arg,
+				   sizeof(policy))) {
+			err = -EFAULT;
+			goto encryption_policy_out;
+		}
+
+		err = ext4_process_policy(&policy, inode);
+encryption_policy_out:
+		return err;
+#else
+		return -EOPNOTSUPP;
+#endif
+	}
+	case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
+		int err, err2;
+		struct ext4_sb_info *sbi = EXT4_SB(sb);
+		handle_t *handle;
+
+		if (!ext4_sb_has_crypto(sb))
+			return -EOPNOTSUPP;
+		if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
+			err = mnt_want_write_file(filp);
+			if (err)
+				return err;
+			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
+			if (IS_ERR(handle)) {
+				err = PTR_ERR(handle);
+				goto pwsalt_err_exit;
+			}
+			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
+			if (err)
+				goto pwsalt_err_journal;
+			generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
+			err = ext4_handle_dirty_metadata(handle, NULL,
+							 sbi->s_sbh);
+		pwsalt_err_journal:
+			err2 = ext4_journal_stop(handle);
+			if (err2 && !err)
+				err = err2;
+		pwsalt_err_exit:
+			mnt_drop_write_file(filp);
+			if (err)
+				return err;
+		}
+		if (copy_to_user((void *) arg, sbi->s_es->s_encrypt_pw_salt,
+				 16))
+			return -EFAULT;
+		return 0;
+	}
+	case EXT4_IOC_GET_ENCRYPTION_POLICY: {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+		struct ext4_encryption_policy policy;
+		int err = 0;
+
+		if (!ext4_encrypted_inode(inode))
+			return -ENOENT;
+		err = ext4_get_policy(inode, &policy);
+		if (err)
+			return err;
+		if (copy_to_user((void *)arg, &policy, sizeof(policy)))
+			return -EFAULT;
+		return 0;
+#else
+		return -EOPNOTSUPP;
+#endif
+	}
 	default:
 		return -ENOTTY;
 	}
@@ -697,6 +767,10 @@
 	case EXT4_IOC_MOVE_EXT:
 	case FITRIM:
 	case EXT4_IOC_RESIZE_FS:
+	case EXT4_IOC_PRECACHE_EXTENTS:
+	case EXT4_IOC_SET_ENCRYPTION_POLICY:
+	case EXT4_IOC_GET_ENCRYPTION_PWSALT:
+	case EXT4_IOC_GET_ENCRYPTION_POLICY:
 		break;
 	default:
 		return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index cb9eec0..d60a28a 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -722,6 +722,7 @@
 				void *buddy, void *bitmap, ext4_group_t group)
 {
 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
 	ext4_grpblk_t i = 0;
 	ext4_grpblk_t first;
@@ -751,13 +752,18 @@
 
 	if (free != grp->bb_free) {
 		ext4_grp_locked_error(sb, group, 0, 0,
-				      "%u clusters in bitmap, %u in gd",
+				      "block bitmap and bg descriptor "
+				      "inconsistent: %u vs %u free clusters",
 				      free, grp->bb_free);
 		/*
-		 * If we intent to continue, we consider group descritor
+		 * If we intend to continue, we consider group descriptor
 		 * corrupt and update bb_free using bitmap value
 		 */
 		grp->bb_free = free;
+		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+			percpu_counter_sub(&sbi->s_freeclusters_counter,
+					   grp->bb_free);
+		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
 	}
 	mb_set_largest_free_order(sb, grp);
 
@@ -987,7 +993,7 @@
 	poff = block % blocks_per_page;
 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
 	if (!page)
-		return -EIO;
+		return -ENOMEM;
 	BUG_ON(page->mapping != inode->i_mapping);
 	e4b->bd_bitmap_page = page;
 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
@@ -1001,7 +1007,7 @@
 	pnum = block / blocks_per_page;
 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
 	if (!page)
-		return -EIO;
+		return -ENOMEM;
 	BUG_ON(page->mapping != inode->i_mapping);
 	e4b->bd_buddy_page = page;
 	return 0;
@@ -1042,6 +1048,8 @@
 	 * allocating. If we are looking at the buddy cache we would
 	 * have taken a reference using ext4_mb_load_buddy and that
 	 * would have pinned buddy page to page cache.
+	 * The call to ext4_mb_get_buddy_page_lock will mark the
+	 * page accessed.
 	 */
 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
@@ -1060,7 +1068,6 @@
 		ret = -EIO;
 		goto err;
 	}
-	mark_page_accessed(page);
 
 	if (e4b.bd_buddy_page == NULL) {
 		/*
@@ -1080,7 +1087,6 @@
 		ret = -EIO;
 		goto err;
 	}
-	mark_page_accessed(page);
 err:
 	ext4_mb_put_buddy_page_lock(&e4b);
 	return ret;
@@ -1139,7 +1145,7 @@
 
 	/* we could use find_or_create_page(), but it locks page
 	 * what we'd like to avoid in fast path ... */
-	page = find_get_page(inode->i_mapping, pnum);
+	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
 	if (page == NULL || !PageUptodate(page)) {
 		if (page)
 			/*
@@ -1166,19 +1172,24 @@
 			unlock_page(page);
 		}
 	}
-	if (page == NULL || !PageUptodate(page)) {
+	if (page == NULL) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	if (!PageUptodate(page)) {
 		ret = -EIO;
 		goto err;
 	}
+
+	/* Pages marked accessed already */
 	e4b->bd_bitmap_page = page;
 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
-	mark_page_accessed(page);
 
 	block++;
 	pnum = block / blocks_per_page;
 	poff = block % blocks_per_page;
 
-	page = find_get_page(inode->i_mapping, pnum);
+	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
 	if (page == NULL || !PageUptodate(page)) {
 		if (page)
 			page_cache_release(page);
@@ -1196,13 +1207,18 @@
 			unlock_page(page);
 		}
 	}
-	if (page == NULL || !PageUptodate(page)) {
+	if (page == NULL) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	if (!PageUptodate(page)) {
 		ret = -EIO;
 		goto err;
 	}
+
+	/* Pages marked accessed already */
 	e4b->bd_buddy_page = page;
 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
-	mark_page_accessed(page);
 
 	BUG_ON(e4b->bd_bitmap_page == NULL);
 	BUG_ON(e4b->bd_buddy_page == NULL);
@@ -1409,6 +1425,10 @@
 		return;
 	BUG_ON(last >= (sb->s_blocksize << 3));
 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
+	/* Don't bother if the block group is corrupt. */
+	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+		return;
+
 	mb_check_buddy(e4b);
 	mb_free_blocks_double(inode, e4b, first, count);
 
@@ -1426,6 +1446,7 @@
 		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
 
 	if (unlikely(block != -1)) {
+		struct ext4_sb_info *sbi = EXT4_SB(sb);
 		ext4_fsblk_t blocknr;
 
 		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
@@ -1434,7 +1455,14 @@
 				      inode ? inode->i_ino : 0,
 				      blocknr,
 				      "freeing already freed block "
-				      "(bit %u)", block);
+				      "(bit %u); block bitmap corrupt.",
+				      block);
+		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
+			percpu_counter_sub(&sbi->s_freeclusters_counter,
+					   e4b->bd_info->bb_free);
+		/* Mark the block group as corrupt. */
+		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
+			&e4b->bd_info->bb_state);
 		mb_regenerate_buddy(e4b);
 		goto done;
 	}
@@ -1801,9 +1829,15 @@
 	if (err)
 		return err;
 
+	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
+		ext4_mb_unload_buddy(e4b);
+		return 0;
+	}
+
 	ext4_lock_group(ac->ac_sb, group);
 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
 			     ac->ac_g_ex.fe_len, &ex);
+	ex.fe_logical = 0xDEADFA11; /* debug value */
 
 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
 		ext4_fsblk_t start;
@@ -1932,7 +1966,7 @@
 			 */
 			break;
 		}
-
+		ex.fe_logical = 0xDEADC0DE; /* debug value */
 		ext4_mb_measure_extent(ac, &ex, e4b);
 
 		i += ex.fe_len;
@@ -1973,6 +2007,7 @@
 			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
 			if (max >= sbi->s_stripe) {
 				ac->ac_found++;
+				ex.fe_logical = 0xDEADF00D; /* debug value */
 				ac->ac_b_ex = ex;
 				ext4_mb_use_best_found(ac, e4b);
 				break;
@@ -1998,6 +2033,9 @@
 	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
 		return 0;
 
+	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+		return 0;
+
 	/* We only do this if the grp has never been initialized */
 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
 		int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
@@ -2116,6 +2154,7 @@
 		group = ac->ac_g_ex.fe_group;
 
 		for (i = 0; i < ngroups; group++, i++) {
+			cond_resched();
 			/*
 			 * Artificially restricted ngroups for non-extent
 			 * files makes group > ngroups possible on first loop.
@@ -2601,7 +2640,7 @@
 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
 	if (sbi->s_locality_groups == NULL) {
 		ret = -ENOMEM;
-		goto out_free_groupinfo_slab;
+		goto out;
 	}
 	for_each_possible_cpu(i) {
 		struct ext4_locality_group *lg;
@@ -2626,8 +2665,6 @@
 out_free_locality_groups:
 	free_percpu(sbi->s_locality_groups);
 	sbi->s_locality_groups = NULL;
-out_free_groupinfo_slab:
-	ext4_groupinfo_destroy_slabs();
 out:
 	kfree(sbi->s_mb_offsets);
 	sbi->s_mb_offsets = NULL;
@@ -2718,7 +2755,8 @@
 }
 
 static inline int ext4_issue_discard(struct super_block *sb,
-		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
+		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
+		unsigned long flags)
 {
 	ext4_fsblk_t discard_block;
 
@@ -2727,7 +2765,7 @@
 	count = EXT4_C2B(EXT4_SB(sb), count);
 	trace_ext4_discard_blocks(sb,
 			(unsigned long long) discard_block, count);
-	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
+	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
 }
 
 /*
@@ -2749,7 +2787,7 @@
 	if (test_opt(sb, DISCARD)) {
 		err = ext4_issue_discard(sb, entry->efd_group,
 					 entry->efd_start_cluster,
-					 entry->efd_count);
+					 entry->efd_count, 0);
 		if (err && err != -EOPNOTSUPP)
 			ext4_msg(sb, KERN_WARNING, "discard request in"
 				 " group:%d block:%d count:%d failed"
@@ -2860,6 +2898,7 @@
 	if (!bitmap_bh)
 		goto out_err;
 
+	BUFFER_TRACE(bitmap_bh, "getting write access");
 	err = ext4_journal_get_write_access(handle, bitmap_bh);
 	if (err)
 		goto out_err;
@@ -2872,6 +2911,7 @@
 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
 			ext4_free_group_clusters(sb, gdp));
 
+	BUFFER_TRACE(gdp_bh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, gdp_bh);
 	if (err)
 		goto out_err;
@@ -3049,8 +3089,9 @@
 							(23 - bsbits)) << 23;
 		size = 8 * 1024 * 1024;
 	} else {
-		start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
-		size	  = ac->ac_o_ex.fe_len << bsbits;
+		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
+		size	  = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
+					      ac->ac_o_ex.fe_len) << bsbits;
 	}
 	size = size >> bsbits;
 	start = start_off >> bsbits;
@@ -3126,9 +3167,8 @@
 			 "start %lu, size %lu, fe_logical %lu",
 			 (unsigned long) start, (unsigned long) size,
 			 (unsigned long) ac->ac_o_ex.fe_logical);
+		BUG();
 	}
-	BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
-			start > ac->ac_o_ex.fe_logical);
 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
 
 	/* now prepare goal request */
@@ -4022,8 +4062,7 @@
 			(unsigned long)ac->ac_b_ex.fe_len,
 			(unsigned long)ac->ac_b_ex.fe_logical,
 			(int)ac->ac_criteria);
-	ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found",
-		 ac->ac_ex_scanned, ac->ac_found);
+	ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found);
 	ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
 	ngroups = ext4_get_groups_count(sb);
 	for (i = 0; i < ngroups; i++) {
@@ -4106,7 +4145,7 @@
 	 * per cpu locality group is to reduce the contention between block
 	 * request from multiple CPUs.
 	 */
-	ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
+	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
 
 	/* we're going to use group allocation */
 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
@@ -4382,14 +4421,7 @@
 	if (IS_NOQUOTA(ar->inode))
 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
 
-	/*
-	 * For delayed allocation, we could skip the ENOSPC and
-	 * EDQUOT check, as blocks and quotas have been already
-	 * reserved when data being copied into pagecache.
-	 */
-	if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
-		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
-	else {
+	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
 		/* Without delayed allocation we need to verify
 		 * there is enough free blocks to do block allocation
 		 * and verify allocation doesn't exceed the quota limits.
@@ -4445,17 +4477,20 @@
 repeat:
 		/* allocate space in core */
 		*errp = ext4_mb_regular_allocator(ac);
+		if (*errp)
+			goto discard_and_exit;
+
+		/* as we've just preallocated more space than
+		 * user requested originally, we store allocated
+		 * space in a special descriptor */
+		if (ac->ac_status == AC_STATUS_FOUND &&
+		    ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
+			*errp = ext4_mb_new_preallocation(ac);
 		if (*errp) {
+		discard_and_exit:
 			ext4_discard_allocated_blocks(ac);
 			goto errout;
 		}
-
-		/* as we've just preallocated more space than
-		 * user requested orinally, we store allocated
-		 * space in a special descriptor */
-		if (ac->ac_status == AC_STATUS_FOUND &&
-				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
-			ext4_mb_new_preallocation(ac);
 	}
 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
@@ -4486,8 +4521,7 @@
 	if (inquota && ar->len < inquota)
 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
 	if (!ar->len) {
-		if (!ext4_test_inode_state(ar->inode,
-					   EXT4_STATE_DELALLOC_RESERVED))
+		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
 			/* release all the reserved blocks if non delalloc */
 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 						reserv_clstrs);
@@ -4642,10 +4676,11 @@
 		BUG_ON(bh && (count > 1));
 
 		for (i = 0; i < count; i++) {
+			cond_resched();
 			if (!bh)
 				tbh = sb_find_get_block(inode->i_sb,
 							block + i);
-			if (unlikely(!tbh))
+			if (!tbh)
 				continue;
 			ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
 				    inode, tbh, block + i);
@@ -4698,6 +4733,10 @@
 	overflow = 0;
 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
 
+	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
+			ext4_get_group_info(sb, block_group))))
+		return;
+
 	/*
 	 * Check to see if we are freeing blocks across a group
 	 * boundary.
@@ -4786,7 +4825,8 @@
 		 * them with group lock_held
 		 */
 		if (test_opt(sb, DISCARD)) {
-			err = ext4_issue_discard(sb, block_group, bit, count);
+			err = ext4_issue_discard(sb, block_group, bit, count,
+						 0);
 			if (err && err != -EOPNOTSUPP)
 				ext4_msg(sb, KERN_WARNING, "discard request in"
 					 " group:%d block:%d count:%lu failed"
@@ -4993,13 +5033,17 @@
  * @count:	number of blocks to TRIM
  * @group:	alloc. group we are working with
  * @e4b:	ext4 buddy for the group
+ * @blkdev_flags: flags for the block device
  *
  * Trim "count" blocks starting at "start" in the "group". To assure that no
  * one will allocate those blocks, mark it as used in buddy bitmap. This must
  * be called with under the group lock.
  */
 static int ext4_trim_extent(struct super_block *sb, int start, int count,
-			     ext4_group_t group, struct ext4_buddy *e4b)
+			    ext4_group_t group, struct ext4_buddy *e4b,
+			    unsigned long blkdev_flags)
+__releases(bitlock)
+__acquires(bitlock)
 {
 	struct ext4_free_extent ex;
 	int ret = 0;
@@ -5018,7 +5062,7 @@
 	 */
 	mb_mark_used(e4b, &ex);
 	ext4_unlock_group(sb, group);
-	ret = ext4_issue_discard(sb, group, start, count);
+	ret = ext4_issue_discard(sb, group, start, count, blkdev_flags);
 	ext4_lock_group(sb, group);
 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
 	return ret;
@@ -5031,6 +5075,7 @@
  * @start:		first group block to examine
  * @max:		last group block to examine
  * @minblocks:		minimum extent block count
+ * @blkdev_flags:	flags for the block device
  *
  * ext4_trim_all_free walks through group's buddy bitmap searching for free
  * extents. When the free block is found, ext4_trim_extent is called to TRIM
@@ -5045,7 +5090,7 @@
 static ext4_grpblk_t
 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
 		   ext4_grpblk_t start, ext4_grpblk_t max,
-		   ext4_grpblk_t minblocks)
+		   ext4_grpblk_t minblocks, unsigned long blkdev_flags)
 {
 	void *bitmap;
 	ext4_grpblk_t next, count = 0, free_count = 0;
@@ -5078,7 +5123,8 @@
 
 		if ((next - start) >= minblocks) {
 			ret = ext4_trim_extent(sb, start,
-					       next - start, group, &e4b);
+					       next - start, group, &e4b,
+					       blkdev_flags);
 			if (ret && ret != -EOPNOTSUPP)
 				break;
 			ret = 0;
@@ -5120,6 +5166,7 @@
  * ext4_trim_fs() -- trim ioctl handle function
  * @sb:			superblock for filesystem
  * @range:		fstrim_range structure
+ * @blkdev_flags:	flags for the block device
  *
  * start:	First Byte to trim
  * len:		number of Bytes to trim from start
@@ -5128,7 +5175,8 @@
  * start to start+len. For each such a group ext4_trim_all_free function
  * is invoked to trim all free space.
  */
-int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
+			unsigned long blkdev_flags)
 {
 	struct ext4_group_info *grp;
 	ext4_group_t group, first_group, last_group;
@@ -5184,7 +5232,7 @@
 
 		if (grp->bb_free >= minlen) {
 			cnt = ext4_trim_all_free(sb, group, first_cluster,
-						end, minlen);
+						end, minlen, blkdev_flags);
 			if (cnt < 0) {
 				ret = cnt;
 				break;
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 08481ee..d634e18 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -48,7 +48,7 @@
 		}							\
 	} while (0)
 #else
-#define mb_debug(n, fmt, a...)
+#define mb_debug(n, fmt, a...)		no_printk(fmt, ## a)
 #endif
 
 #define EXT4_MB_HISTORY_ALLOC		1	/* allocation */
@@ -175,8 +175,6 @@
 	/* copy of the best found extent taken before preallocation efforts */
 	struct ext4_free_extent ac_f_ex;
 
-	/* number of iterations done. we have to track to limit searching */
-	unsigned long ac_ex_scanned;
 	__u16 ac_groups_scanned;
 	__u16 ac_found;
 	__u16 ac_tail;
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index d19efab..876b984 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -39,8 +39,9 @@
 	newext.ee_block = cpu_to_le32(lb->first_block);
 	newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
 	ext4_ext_store_pblock(&newext, lb->first_pblock);
-	path = ext4_ext_find_extent(inode, lb->first_block, NULL);
-
+	/* Locking only for convinience since we are operating on temp inode */
+	down_write(&EXT4_I(inode)->i_data_sem);
+	path = ext4_find_extent(inode, lb->first_block, NULL, 0);
 	if (IS_ERR(path)) {
 		retval = PTR_ERR(path);
 		path = NULL;
@@ -61,7 +62,9 @@
 	 */
 	if (needed && ext4_handle_has_enough_credits(handle,
 						EXT4_RESERVE_TRANS_BLOCKS)) {
+		up_write((&EXT4_I(inode)->i_data_sem));
 		retval = ext4_journal_restart(handle, needed);
+		down_write((&EXT4_I(inode)->i_data_sem));
 		if (retval)
 			goto err_out;
 	} else if (needed) {
@@ -70,17 +73,18 @@
 			/*
 			 * IF not able to extend the journal restart the journal
 			 */
+			up_write((&EXT4_I(inode)->i_data_sem));
 			retval = ext4_journal_restart(handle, needed);
+			down_write((&EXT4_I(inode)->i_data_sem));
 			if (retval)
 				goto err_out;
 		}
 	}
-	retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
+	retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
 err_out:
-	if (path) {
-		ext4_ext_drop_refs(path);
-		kfree(path);
-	}
+	up_write((&EXT4_I(inode)->i_data_sem));
+	ext4_ext_drop_refs(path);
+	kfree(path);
 	lb->first_pblock = 0;
 	return retval;
 }
@@ -494,7 +498,7 @@
 	 * superblock modification.
 	 *
 	 * For the tmp_inode we already have committed the
-	 * trascation that created the inode. Later as and
+	 * transaction that created the inode. Later as and
 	 * when we add extents we extent the journal
 	 */
 	/*
@@ -505,7 +509,7 @@
 	 * with i_data_sem held to prevent racing with block
 	 * allocation.
 	 */
-	down_read((&EXT4_I(inode)->i_data_sem));
+	down_read(&EXT4_I(inode)->i_data_sem);
 	ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 	up_read((&EXT4_I(inode)->i_data_sem));
 
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 214461e..8313ca3 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -18,19 +18,17 @@
 	return cpu_to_le32(csum);
 }
 
-int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
 {
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(sb))
 		return 1;
 
 	return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
 }
 
-void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
 {
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(sb))
 		return;
 
 	mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
@@ -259,7 +257,7 @@
 	u32 new_seq;
 
 	do {
-		get_random_bytes(&new_seq, sizeof(u32));
+		new_seq = prandom_u32();
 	} while (new_seq > EXT4_MMP_SEQ_MAX);
 
 	return new_seq;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index ad52ace..023314c 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -27,120 +27,26 @@
  * @lblock:	logical block number to find an extent path
  * @path:	pointer to an extent path pointer (for output)
  *
- * ext4_ext_find_extent wrapper. Return 0 on success, or a negative error value
+ * ext4_find_extent wrapper. Return 0 on success, or a negative error value
  * on failure.
  */
 static inline int
 get_ext_path(struct inode *inode, ext4_lblk_t lblock,
-		struct ext4_ext_path **orig_path)
+		struct ext4_ext_path **ppath)
 {
-	int ret = 0;
 	struct ext4_ext_path *path;
 
-	path = ext4_ext_find_extent(inode, lblock, *orig_path);
+	path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
 	if (IS_ERR(path))
-		ret = PTR_ERR(path);
-	else if (path[ext_depth(inode)].p_ext == NULL)
-		ret = -ENODATA;
-	else
-		*orig_path = path;
-
-	return ret;
-}
-
-/**
- * copy_extent_status - Copy the extent's initialization status
- *
- * @src:	an extent for getting initialize status
- * @dest:	an extent to be set the status
- */
-static void
-copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest)
-{
-	if (ext4_ext_is_uninitialized(src))
-		ext4_ext_mark_uninitialized(dest);
-	else
-		dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest));
-}
-
-/**
- * mext_next_extent - Search for the next extent and set it to "extent"
- *
- * @inode:	inode which is searched
- * @path:	this will obtain data for the next extent
- * @extent:	pointer to the next extent we have just gotten
- *
- * Search the next extent in the array of ext4_ext_path structure (@path)
- * and set it to ext4_extent structure (@extent). In addition, the member of
- * @path (->p_ext) also points the next extent. Return 0 on success, 1 if
- * ext4_ext_path structure refers to the last extent, or a negative error
- * value on failure.
- */
-static int
-mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
-		      struct ext4_extent **extent)
-{
-	struct ext4_extent_header *eh;
-	int ppos, leaf_ppos = path->p_depth;
-
-	ppos = leaf_ppos;
-	if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) {
-		/* leaf block */
-		*extent = ++path[ppos].p_ext;
-		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
-		return 0;
+		return PTR_ERR(path);
+	if (path[ext_depth(inode)].p_ext == NULL) {
+		ext4_ext_drop_refs(path);
+		kfree(path);
+		*ppath = NULL;
+		return -ENODATA;
 	}
-
-	while (--ppos >= 0) {
-		if (EXT_LAST_INDEX(path[ppos].p_hdr) >
-		    path[ppos].p_idx) {
-			int cur_ppos = ppos;
-
-			/* index block */
-			path[ppos].p_idx++;
-			path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
-			if (path[ppos+1].p_bh)
-				brelse(path[ppos+1].p_bh);
-			path[ppos+1].p_bh =
-				sb_bread(inode->i_sb, path[ppos].p_block);
-			if (!path[ppos+1].p_bh)
-				return -EIO;
-			path[ppos+1].p_hdr =
-				ext_block_hdr(path[ppos+1].p_bh);
-
-			/* Halfway index block */
-			while (++cur_ppos < leaf_ppos) {
-				path[cur_ppos].p_idx =
-					EXT_FIRST_INDEX(path[cur_ppos].p_hdr);
-				path[cur_ppos].p_block =
-					ext4_idx_pblock(path[cur_ppos].p_idx);
-				if (path[cur_ppos+1].p_bh)
-					brelse(path[cur_ppos+1].p_bh);
-				path[cur_ppos+1].p_bh = sb_bread(inode->i_sb,
-					path[cur_ppos].p_block);
-				if (!path[cur_ppos+1].p_bh)
-					return -EIO;
-				path[cur_ppos+1].p_hdr =
-					ext_block_hdr(path[cur_ppos+1].p_bh);
-			}
-
-			path[leaf_ppos].p_ext = *extent = NULL;
-
-			eh = path[leaf_ppos].p_hdr;
-			if (le16_to_cpu(eh->eh_entries) == 0)
-				/* empty leaf is found */
-				return -ENODATA;
-
-			/* leaf block */
-			path[leaf_ppos].p_ext = *extent =
-				EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr);
-			path[leaf_ppos].p_block =
-					ext4_ext_pblock(path[leaf_ppos].p_ext);
-			return 0;
-		}
-	}
-	/* We found the last extent */
-	return 1;
+	*ppath = path;
+	return 0;
 }
 
 /**
@@ -178,429 +84,19 @@
 }
 
 /**
- * mext_insert_across_blocks - Insert extents across leaf block
- *
- * @handle:		journal handle
- * @orig_inode:		original inode
- * @o_start:		first original extent to be changed
- * @o_end:		last original extent to be changed
- * @start_ext:		first new extent to be inserted
- * @new_ext:		middle of new extent to be inserted
- * @end_ext:		last new extent to be inserted
- *
- * Allocate a new leaf block and insert extents into it. Return 0 on success,
- * or a negative error value on failure.
- */
-static int
-mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
-		struct ext4_extent *o_start, struct ext4_extent *o_end,
-		struct ext4_extent *start_ext, struct ext4_extent *new_ext,
-		struct ext4_extent *end_ext)
-{
-	struct ext4_ext_path *orig_path = NULL;
-	ext4_lblk_t eblock = 0;
-	int new_flag = 0;
-	int end_flag = 0;
-	int err = 0;
-
-	if (start_ext->ee_len && new_ext->ee_len && end_ext->ee_len) {
-		if (o_start == o_end) {
-
-			/*       start_ext   new_ext    end_ext
-			 * donor |---------|-----------|--------|
-			 * orig  |------------------------------|
-			 */
-			end_flag = 1;
-		} else {
-
-			/*       start_ext   new_ext   end_ext
-			 * donor |---------|----------|---------|
-			 * orig  |---------------|--------------|
-			 */
-			o_end->ee_block = end_ext->ee_block;
-			o_end->ee_len = end_ext->ee_len;
-			ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
-		}
-
-		o_start->ee_len = start_ext->ee_len;
-		eblock = le32_to_cpu(start_ext->ee_block);
-		new_flag = 1;
-
-	} else if (start_ext->ee_len && new_ext->ee_len &&
-		   !end_ext->ee_len && o_start == o_end) {
-
-		/*	 start_ext	new_ext
-		 * donor |--------------|---------------|
-		 * orig  |------------------------------|
-		 */
-		o_start->ee_len = start_ext->ee_len;
-		eblock = le32_to_cpu(start_ext->ee_block);
-		new_flag = 1;
-
-	} else if (!start_ext->ee_len && new_ext->ee_len &&
-		   end_ext->ee_len && o_start == o_end) {
-
-		/*	  new_ext	end_ext
-		 * donor |--------------|---------------|
-		 * orig  |------------------------------|
-		 */
-		o_end->ee_block = end_ext->ee_block;
-		o_end->ee_len = end_ext->ee_len;
-		ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
-
-		/*
-		 * Set 0 to the extent block if new_ext was
-		 * the first block.
-		 */
-		if (new_ext->ee_block)
-			eblock = le32_to_cpu(new_ext->ee_block);
-
-		new_flag = 1;
-	} else {
-		ext4_debug("ext4 move extent: Unexpected insert case\n");
-		return -EIO;
-	}
-
-	if (new_flag) {
-		err = get_ext_path(orig_inode, eblock, &orig_path);
-		if (err)
-			goto out;
-
-		if (ext4_ext_insert_extent(handle, orig_inode,
-					orig_path, new_ext, 0))
-			goto out;
-	}
-
-	if (end_flag) {
-		err = get_ext_path(orig_inode,
-				le32_to_cpu(end_ext->ee_block) - 1, &orig_path);
-		if (err)
-			goto out;
-
-		if (ext4_ext_insert_extent(handle, orig_inode,
-					   orig_path, end_ext, 0))
-			goto out;
-	}
-out:
-	if (orig_path) {
-		ext4_ext_drop_refs(orig_path);
-		kfree(orig_path);
-	}
-
-	return err;
-
-}
-
-/**
- * mext_insert_inside_block - Insert new extent to the extent block
- *
- * @o_start:		first original extent to be moved
- * @o_end:		last original extent to be moved
- * @start_ext:		first new extent to be inserted
- * @new_ext:		middle of new extent to be inserted
- * @end_ext:		last new extent to be inserted
- * @eh:			extent header of target leaf block
- * @range_to_move:	used to decide how to insert extent
- *
- * Insert extents into the leaf block. The extent (@o_start) is overwritten
- * by inserted extents.
- */
-static void
-mext_insert_inside_block(struct ext4_extent *o_start,
-			      struct ext4_extent *o_end,
-			      struct ext4_extent *start_ext,
-			      struct ext4_extent *new_ext,
-			      struct ext4_extent *end_ext,
-			      struct ext4_extent_header *eh,
-			      int range_to_move)
-{
-	int i = 0;
-	unsigned long len;
-
-	/* Move the existing extents */
-	if (range_to_move && o_end < EXT_LAST_EXTENT(eh)) {
-		len = (unsigned long)(EXT_LAST_EXTENT(eh) + 1) -
-			(unsigned long)(o_end + 1);
-		memmove(o_end + 1 + range_to_move, o_end + 1, len);
-	}
-
-	/* Insert start entry */
-	if (start_ext->ee_len)
-		o_start[i++].ee_len = start_ext->ee_len;
-
-	/* Insert new entry */
-	if (new_ext->ee_len) {
-		o_start[i] = *new_ext;
-		ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext));
-	}
-
-	/* Insert end entry */
-	if (end_ext->ee_len)
-		o_start[i] = *end_ext;
-
-	/* Increment the total entries counter on the extent block */
-	le16_add_cpu(&eh->eh_entries, range_to_move);
-}
-
-/**
- * mext_insert_extents - Insert new extent
- *
- * @handle:	journal handle
- * @orig_inode:	original inode
- * @orig_path:	path indicates first extent to be changed
- * @o_start:	first original extent to be changed
- * @o_end:	last original extent to be changed
- * @start_ext:	first new extent to be inserted
- * @new_ext:	middle of new extent to be inserted
- * @end_ext:	last new extent to be inserted
- *
- * Call the function to insert extents. If we cannot add more extents into
- * the leaf block, we call mext_insert_across_blocks() to create a
- * new leaf block. Otherwise call mext_insert_inside_block(). Return 0
- * on success, or a negative error value on failure.
- */
-static int
-mext_insert_extents(handle_t *handle, struct inode *orig_inode,
-			 struct ext4_ext_path *orig_path,
-			 struct ext4_extent *o_start,
-			 struct ext4_extent *o_end,
-			 struct ext4_extent *start_ext,
-			 struct ext4_extent *new_ext,
-			 struct ext4_extent *end_ext)
-{
-	struct  ext4_extent_header *eh;
-	unsigned long need_slots, slots_range;
-	int	range_to_move, depth, ret;
-
-	/*
-	 * The extents need to be inserted
-	 * start_extent + new_extent + end_extent.
-	 */
-	need_slots = (start_ext->ee_len ? 1 : 0) + (end_ext->ee_len ? 1 : 0) +
-		(new_ext->ee_len ? 1 : 0);
-
-	/* The number of slots between start and end */
-	slots_range = ((unsigned long)(o_end + 1) - (unsigned long)o_start + 1)
-		/ sizeof(struct ext4_extent);
-
-	/* Range to move the end of extent */
-	range_to_move = need_slots - slots_range;
-	depth = orig_path->p_depth;
-	orig_path += depth;
-	eh = orig_path->p_hdr;
-
-	if (depth) {
-		/* Register to journal */
-		ret = ext4_journal_get_write_access(handle, orig_path->p_bh);
-		if (ret)
-			return ret;
-	}
-
-	/* Expansion */
-	if (range_to_move > 0 &&
-		(range_to_move > le16_to_cpu(eh->eh_max)
-			- le16_to_cpu(eh->eh_entries))) {
-
-		ret = mext_insert_across_blocks(handle, orig_inode, o_start,
-					o_end, start_ext, new_ext, end_ext);
-		if (ret < 0)
-			return ret;
-	} else
-		mext_insert_inside_block(o_start, o_end, start_ext, new_ext,
-						end_ext, eh, range_to_move);
-
-	return ext4_ext_dirty(handle, orig_inode, orig_path);
-}
-
-/**
- * mext_leaf_block - Move one leaf extent block into the inode.
- *
- * @handle:		journal handle
- * @orig_inode:		original inode
- * @orig_path:		path indicates first extent to be changed
- * @dext:		donor extent
- * @from:		start offset on the target file
- *
- * In order to insert extents into the leaf block, we must divide the extent
- * in the leaf block into three extents. The one is located to be inserted
- * extents, and the others are located around it.
- *
- * Therefore, this function creates structures to save extents of the leaf
- * block, and inserts extents by calling mext_insert_extents() with
- * created extents. Return 0 on success, or a negative error value on failure.
- */
-static int
-mext_leaf_block(handle_t *handle, struct inode *orig_inode,
-		     struct ext4_ext_path *orig_path, struct ext4_extent *dext,
-		     ext4_lblk_t *from)
-{
-	struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
-	struct ext4_extent new_ext, start_ext, end_ext;
-	ext4_lblk_t new_ext_end;
-	int oext_alen, new_ext_alen, end_ext_alen;
-	int depth = ext_depth(orig_inode);
-	int ret;
-
-	start_ext.ee_block = end_ext.ee_block = 0;
-	o_start = o_end = oext = orig_path[depth].p_ext;
-	oext_alen = ext4_ext_get_actual_len(oext);
-	start_ext.ee_len = end_ext.ee_len = 0;
-
-	new_ext.ee_block = cpu_to_le32(*from);
-	ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext));
-	new_ext.ee_len = dext->ee_len;
-	new_ext_alen = ext4_ext_get_actual_len(&new_ext);
-	new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
-
-	/*
-	 * Case: original extent is first
-	 * oext      |--------|
-	 * new_ext      |--|
-	 * start_ext |--|
-	 */
-	if (le32_to_cpu(oext->ee_block) < le32_to_cpu(new_ext.ee_block) &&
-		le32_to_cpu(new_ext.ee_block) <
-		le32_to_cpu(oext->ee_block) + oext_alen) {
-		start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
-					       le32_to_cpu(oext->ee_block));
-		start_ext.ee_block = oext->ee_block;
-		copy_extent_status(oext, &start_ext);
-	} else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
-		prev_ext = oext - 1;
-		/*
-		 * We can merge new_ext into previous extent,
-		 * if these are contiguous and same extent type.
-		 */
-		if (ext4_can_extents_be_merged(orig_inode, prev_ext,
-					       &new_ext)) {
-			o_start = prev_ext;
-			start_ext.ee_len = cpu_to_le16(
-				ext4_ext_get_actual_len(prev_ext) +
-				new_ext_alen);
-			start_ext.ee_block = oext->ee_block;
-			copy_extent_status(prev_ext, &start_ext);
-			new_ext.ee_len = 0;
-		}
-	}
-
-	/*
-	 * Case: new_ext_end must be less than oext
-	 * oext      |-----------|
-	 * new_ext       |-------|
-	 */
-	if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
-		EXT4_ERROR_INODE(orig_inode,
-			"new_ext_end(%u) should be less than or equal to "
-			"oext->ee_block(%u) + oext_alen(%d) - 1",
-			new_ext_end, le32_to_cpu(oext->ee_block),
-			oext_alen);
-		ret = -EIO;
-		goto out;
-	}
-
-	/*
-	 * Case: new_ext is smaller than original extent
-	 * oext    |---------------|
-	 * new_ext |-----------|
-	 * end_ext             |---|
-	 */
-	if (le32_to_cpu(oext->ee_block) <= new_ext_end &&
-		new_ext_end < le32_to_cpu(oext->ee_block) + oext_alen - 1) {
-		end_ext.ee_len =
-			cpu_to_le16(le32_to_cpu(oext->ee_block) +
-			oext_alen - 1 - new_ext_end);
-		copy_extent_status(oext, &end_ext);
-		end_ext_alen = ext4_ext_get_actual_len(&end_ext);
-		ext4_ext_store_pblock(&end_ext,
-			(ext4_ext_pblock(o_end) + oext_alen - end_ext_alen));
-		end_ext.ee_block =
-			cpu_to_le32(le32_to_cpu(o_end->ee_block) +
-			oext_alen - end_ext_alen);
-	}
-
-	ret = mext_insert_extents(handle, orig_inode, orig_path, o_start,
-				o_end, &start_ext, &new_ext, &end_ext);
-out:
-	return ret;
-}
-
-/**
- * mext_calc_swap_extents - Calculate extents for extent swapping.
- *
- * @tmp_dext:		the extent that will belong to the original inode
- * @tmp_oext:		the extent that will belong to the donor inode
- * @orig_off:		block offset of original inode
- * @donor_off:		block offset of donor inode
- * @max_count:		the maximum length of extents
- *
- * Return 0 on success, or a negative error value on failure.
- */
-static int
-mext_calc_swap_extents(struct ext4_extent *tmp_dext,
-			      struct ext4_extent *tmp_oext,
-			      ext4_lblk_t orig_off, ext4_lblk_t donor_off,
-			      ext4_lblk_t max_count)
-{
-	ext4_lblk_t diff, orig_diff;
-	struct ext4_extent dext_old, oext_old;
-
-	BUG_ON(orig_off != donor_off);
-
-	/* original and donor extents have to cover the same block offset */
-	if (orig_off < le32_to_cpu(tmp_oext->ee_block) ||
-	    le32_to_cpu(tmp_oext->ee_block) +
-			ext4_ext_get_actual_len(tmp_oext) - 1 < orig_off)
-		return -ENODATA;
-
-	if (orig_off < le32_to_cpu(tmp_dext->ee_block) ||
-	    le32_to_cpu(tmp_dext->ee_block) +
-			ext4_ext_get_actual_len(tmp_dext) - 1 < orig_off)
-		return -ENODATA;
-
-	dext_old = *tmp_dext;
-	oext_old = *tmp_oext;
-
-	/* When tmp_dext is too large, pick up the target range. */
-	diff = donor_off - le32_to_cpu(tmp_dext->ee_block);
-
-	ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff);
-	le32_add_cpu(&tmp_dext->ee_block, diff);
-	le16_add_cpu(&tmp_dext->ee_len, -diff);
-
-	if (max_count < ext4_ext_get_actual_len(tmp_dext))
-		tmp_dext->ee_len = cpu_to_le16(max_count);
-
-	orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block);
-	ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff);
-
-	/* Adjust extent length if donor extent is larger than orig */
-	if (ext4_ext_get_actual_len(tmp_dext) >
-	    ext4_ext_get_actual_len(tmp_oext) - orig_diff)
-		tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_oext->ee_len) -
-						orig_diff);
-
-	tmp_oext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(tmp_dext));
-
-	copy_extent_status(&oext_old, tmp_dext);
-	copy_extent_status(&dext_old, tmp_oext);
-
-	return 0;
-}
-
-/**
  * mext_check_coverage - Check that all extents in range has the same type
  *
  * @inode:		inode in question
  * @from:		block offset of inode
  * @count:		block count to be checked
- * @uninit:		extents expected to be uninitialized
+ * @unwritten:		extents expected to be unwritten
  * @err:		pointer to save error value
  *
  * Return 1 if all extents in range has expected type, and zero otherwise.
  */
 static int
 mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
-			  int uninit, int *err)
+		    int unwritten, int *err)
 {
 	struct ext4_ext_path *path = NULL;
 	struct ext4_extent *ext;
@@ -611,178 +107,32 @@
 		if (*err)
 			goto out;
 		ext = path[ext_depth(inode)].p_ext;
-		if (uninit != ext4_ext_is_uninitialized(ext))
+		if (unwritten != ext4_ext_is_unwritten(ext))
 			goto out;
 		from += ext4_ext_get_actual_len(ext);
 		ext4_ext_drop_refs(path);
 	}
 	ret = 1;
 out:
-	if (path) {
-		ext4_ext_drop_refs(path);
-		kfree(path);
-	}
+	ext4_ext_drop_refs(path);
+	kfree(path);
 	return ret;
 }
 
 /**
- * mext_replace_branches - Replace original extents with new extents
- *
- * @handle:		journal handle
- * @orig_inode:		original inode
- * @donor_inode:	donor inode
- * @from:		block offset of orig_inode
- * @count:		block count to be replaced
- * @err:		pointer to save return value
- *
- * Replace original inode extents and donor inode extents page by page.
- * We implement this replacement in the following three steps:
- * 1. Save the block information of original and donor inodes into
- *    dummy extents.
- * 2. Change the block information of original inode to point at the
- *    donor inode blocks.
- * 3. Change the block information of donor inode to point at the saved
- *    original inode blocks in the dummy extents.
- *
- * Return replaced block count.
- */
-static int
-mext_replace_branches(handle_t *handle, struct inode *orig_inode,
-			   struct inode *donor_inode, ext4_lblk_t from,
-			   ext4_lblk_t count, int *err)
-{
-	struct ext4_ext_path *orig_path = NULL;
-	struct ext4_ext_path *donor_path = NULL;
-	struct ext4_extent *oext, *dext;
-	struct ext4_extent tmp_dext, tmp_oext;
-	ext4_lblk_t orig_off = from, donor_off = from;
-	int depth;
-	int replaced_count = 0;
-	int dext_alen;
-
-	*err = ext4_es_remove_extent(orig_inode, from, count);
-	if (*err)
-		goto out;
-
-	*err = ext4_es_remove_extent(donor_inode, from, count);
-	if (*err)
-		goto out;
-
-	/* Get the original extent for the block "orig_off" */
-	*err = get_ext_path(orig_inode, orig_off, &orig_path);
-	if (*err)
-		goto out;
-
-	/* Get the donor extent for the head */
-	*err = get_ext_path(donor_inode, donor_off, &donor_path);
-	if (*err)
-		goto out;
-	depth = ext_depth(orig_inode);
-	oext = orig_path[depth].p_ext;
-	tmp_oext = *oext;
-
-	depth = ext_depth(donor_inode);
-	dext = donor_path[depth].p_ext;
-	if (unlikely(!dext))
-		goto missing_donor_extent;
-	tmp_dext = *dext;
-
-	*err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
-				      donor_off, count);
-	if (*err)
-		goto out;
-
-	/* Loop for the donor extents */
-	while (1) {
-		/* The extent for donor must be found. */
-		if (unlikely(!dext)) {
-		missing_donor_extent:
-			EXT4_ERROR_INODE(donor_inode,
-				   "The extent for donor must be found");
-			*err = -EIO;
-			goto out;
-		} else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
-			EXT4_ERROR_INODE(donor_inode,
-				"Donor offset(%u) and the first block of donor "
-				"extent(%u) should be equal",
-				donor_off,
-				le32_to_cpu(tmp_dext.ee_block));
-			*err = -EIO;
-			goto out;
-		}
-
-		/* Set donor extent to orig extent */
-		*err = mext_leaf_block(handle, orig_inode,
-					   orig_path, &tmp_dext, &orig_off);
-		if (*err)
-			goto out;
-
-		/* Set orig extent to donor extent */
-		*err = mext_leaf_block(handle, donor_inode,
-					   donor_path, &tmp_oext, &donor_off);
-		if (*err)
-			goto out;
-
-		dext_alen = ext4_ext_get_actual_len(&tmp_dext);
-		replaced_count += dext_alen;
-		donor_off += dext_alen;
-		orig_off += dext_alen;
-
-		BUG_ON(replaced_count > count);
-		/* Already moved the expected blocks */
-		if (replaced_count >= count)
-			break;
-
-		if (orig_path)
-			ext4_ext_drop_refs(orig_path);
-		*err = get_ext_path(orig_inode, orig_off, &orig_path);
-		if (*err)
-			goto out;
-		depth = ext_depth(orig_inode);
-		oext = orig_path[depth].p_ext;
-		tmp_oext = *oext;
-
-		if (donor_path)
-			ext4_ext_drop_refs(donor_path);
-		*err = get_ext_path(donor_inode, donor_off, &donor_path);
-		if (*err)
-			goto out;
-		depth = ext_depth(donor_inode);
-		dext = donor_path[depth].p_ext;
-		tmp_dext = *dext;
-
-		*err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
-					   donor_off, count - replaced_count);
-		if (*err)
-			goto out;
-	}
-
-out:
-	if (orig_path) {
-		ext4_ext_drop_refs(orig_path);
-		kfree(orig_path);
-	}
-	if (donor_path) {
-		ext4_ext_drop_refs(donor_path);
-		kfree(donor_path);
-	}
-
-	return replaced_count;
-}
-
-/**
  * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
  *
  * @inode1:	the inode structure
  * @inode2:	the inode structure
- * @index:	page index
+ * @index1:	page index
+ * @index2:	page index
  * @page:	result page vector
  *
  * Grab two locked pages for inode's by inode order
  */
 static int
 mext_page_double_lock(struct inode *inode1, struct inode *inode2,
-		      pgoff_t index, struct page *page[2])
+		      pgoff_t index1, pgoff_t index2, struct page *page[2])
 {
 	struct address_space *mapping[2];
 	unsigned fl = AOP_FLAG_NOFS;
@@ -792,15 +142,18 @@
 		mapping[0] = inode1->i_mapping;
 		mapping[1] = inode2->i_mapping;
 	} else {
+		pgoff_t tmp = index1;
+		index1 = index2;
+		index2 = tmp;
 		mapping[0] = inode2->i_mapping;
 		mapping[1] = inode1->i_mapping;
 	}
 
-	page[0] = grab_cache_page_write_begin(mapping[0], index, fl);
+	page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
 	if (!page[0])
 		return -ENOMEM;
 
-	page[1] = grab_cache_page_write_begin(mapping[1], index, fl);
+	page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
 	if (!page[1]) {
 		unlock_page(page[0]);
 		page_cache_release(page[0]);
@@ -861,8 +214,7 @@
 			}
 			if (!buffer_mapped(bh)) {
 				zero_user(page, block_start, blocksize);
-				if (!err)
-					set_buffer_uptodate(bh);
+				set_buffer_uptodate(bh);
 				continue;
 			}
 		}
@@ -893,26 +245,27 @@
  * @o_filp:			file structure of original file
  * @donor_inode:		donor inode
  * @orig_page_offset:		page index on original file
+ * @donor_page_offset:		page index on donor file
  * @data_offset_in_page:	block index where data swapping starts
  * @block_len_in_page:		the number of blocks to be swapped
- * @uninit:			orig extent is uninitialized or not
+ * @unwritten:			orig extent is unwritten or not
  * @err:			pointer to save return value
  *
  * Save the data in original inode blocks and replace original inode extents
- * with donor inode extents by calling mext_replace_branches().
+ * with donor inode extents by calling ext4_swap_extents().
  * Finally, write out the saved data in new original inode blocks. Return
  * replaced block count.
  */
 static int
 move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
-		  pgoff_t orig_page_offset, int data_offset_in_page,
-		  int block_len_in_page, int uninit, int *err)
+		     pgoff_t orig_page_offset, pgoff_t donor_page_offset,
+		     int data_offset_in_page,
+		     int block_len_in_page, int unwritten, int *err)
 {
 	struct inode *orig_inode = file_inode(o_filp);
 	struct page *pagep[2] = {NULL, NULL};
 	handle_t *handle;
-	ext4_lblk_t orig_blk_offset;
-	long long offs = orig_page_offset << PAGE_CACHE_SHIFT;
+	ext4_lblk_t orig_blk_offset, donor_blk_offset;
 	unsigned long blocksize = orig_inode->i_sb->s_blocksize;
 	unsigned int w_flags = 0;
 	unsigned int tmp_data_size, data_size, replaced_size;
@@ -940,7 +293,8 @@
 	orig_blk_offset = orig_page_offset * blocks_per_page +
 		data_offset_in_page;
 
-	offs = (long long)orig_blk_offset << orig_inode->i_blkbits;
+	donor_blk_offset = donor_page_offset * blocks_per_page +
+		data_offset_in_page;
 
 	/* Calculate data_size */
 	if ((orig_blk_offset + block_len_in_page - 1) ==
@@ -962,31 +316,31 @@
 	replaced_size = data_size;
 
 	*err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
-				     pagep);
+				     donor_page_offset, pagep);
 	if (unlikely(*err < 0))
 		goto stop_journal;
 	/*
-	 * If orig extent was uninitialized it can become initialized
+	 * If orig extent was unwritten it can become initialized
 	 * at any time after i_data_sem was dropped, in order to
 	 * serialize with delalloc we have recheck extent while we
 	 * hold page's lock, if it is still the case data copy is not
 	 * necessary, just swap data blocks between orig and donor.
 	 */
-	if (uninit) {
+	if (unwritten) {
 		ext4_double_down_write_data_sem(orig_inode, donor_inode);
 		/* If any of extents in range became initialized we have to
 		 * fallback to data copying */
-		uninit = mext_check_coverage(orig_inode, orig_blk_offset,
-					     block_len_in_page, 1, err);
+		unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
+						block_len_in_page, 1, err);
 		if (*err)
 			goto drop_data_sem;
 
-		uninit &= mext_check_coverage(donor_inode, orig_blk_offset,
-					      block_len_in_page, 1, err);
+		unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
+						 block_len_in_page, 1, err);
 		if (*err)
 			goto drop_data_sem;
 
-		if (!uninit) {
+		if (!unwritten) {
 			ext4_double_up_write_data_sem(orig_inode, donor_inode);
 			goto data_copy;
 		}
@@ -997,9 +351,10 @@
 			*err = -EBUSY;
 			goto drop_data_sem;
 		}
-		replaced_count = mext_replace_branches(handle, orig_inode,
-						donor_inode, orig_blk_offset,
-						block_len_in_page, err);
+		replaced_count = ext4_swap_extents(handle, orig_inode,
+						   donor_inode, orig_blk_offset,
+						   donor_blk_offset,
+						   block_len_in_page, 1, err);
 	drop_data_sem:
 		ext4_double_up_write_data_sem(orig_inode, donor_inode);
 		goto unlock_pages;
@@ -1016,10 +371,11 @@
 		*err = -EBUSY;
 		goto unlock_pages;
 	}
-
-	replaced_count = mext_replace_branches(handle, orig_inode, donor_inode,
-					       orig_blk_offset,
-					       block_len_in_page, err);
+	ext4_double_down_write_data_sem(orig_inode, donor_inode);
+	replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
+					       orig_blk_offset, donor_blk_offset,
+					   block_len_in_page, 1, err);
+	ext4_double_up_write_data_sem(orig_inode, donor_inode);
 	if (*err) {
 		if (replaced_count) {
 			block_len_in_page = replaced_count;
@@ -1063,9 +419,9 @@
 	 * Try to swap extents to it's original places
 	 */
 	ext4_double_down_write_data_sem(orig_inode, donor_inode);
-	replaced_count = mext_replace_branches(handle, donor_inode, orig_inode,
-					       orig_blk_offset,
-					       block_len_in_page, &err2);
+	replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
+					       orig_blk_offset, donor_blk_offset,
+					   block_len_in_page, 0, &err2);
 	ext4_double_up_write_data_sem(orig_inode, donor_inode);
 	if (replaced_count != block_len_in_page) {
 		EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset),
@@ -1095,10 +451,14 @@
 		     struct inode *donor_inode, __u64 orig_start,
 		     __u64 donor_start, __u64 *len)
 {
-	ext4_lblk_t orig_blocks, donor_blocks;
+	__u64 orig_eof, donor_eof;
 	unsigned int blkbits = orig_inode->i_blkbits;
 	unsigned int blocksize = 1 << blkbits;
 
+	orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
+	donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
+
+
 	if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
 		ext4_debug("ext4 move extent: suid or sgid is set"
 			   " to donor file [ino:orig %lu, donor %lu]\n",
@@ -1114,7 +474,7 @@
 		ext4_debug("ext4 move extent: The argument files should "
 			"not be swapfile [ino:orig %lu, donor %lu]\n",
 			orig_inode->i_ino, donor_inode->i_ino);
-		return -EINVAL;
+		return -EBUSY;
 	}
 
 	if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
@@ -1141,67 +501,28 @@
 	}
 
 	/* Start offset should be same */
-	if (orig_start != donor_start) {
+	if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
+	    (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
 		ext4_debug("ext4 move extent: orig and donor's start "
-			"offset are not same [ino:orig %lu, donor %lu]\n",
+			"offset are not alligned [ino:orig %lu, donor %lu]\n",
 			orig_inode->i_ino, donor_inode->i_ino);
 		return -EINVAL;
 	}
 
 	if ((orig_start >= EXT_MAX_BLOCKS) ||
+	    (donor_start >= EXT_MAX_BLOCKS) ||
 	    (*len > EXT_MAX_BLOCKS) ||
+	    (donor_start + *len >= EXT_MAX_BLOCKS) ||
 	    (orig_start + *len >= EXT_MAX_BLOCKS))  {
 		ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
 			"[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
 			orig_inode->i_ino, donor_inode->i_ino);
 		return -EINVAL;
 	}
-
-	if (orig_inode->i_size > donor_inode->i_size) {
-		donor_blocks = (donor_inode->i_size + blocksize - 1) >> blkbits;
-		/* TODO: eliminate this artificial restriction */
-		if (orig_start >= donor_blocks) {
-			ext4_debug("ext4 move extent: orig start offset "
-			"[%llu] should be less than donor file blocks "
-			"[%u] [ino:orig %lu, donor %lu]\n",
-			orig_start, donor_blocks,
-			orig_inode->i_ino, donor_inode->i_ino);
-			return -EINVAL;
-		}
-
-		/* TODO: eliminate this artificial restriction */
-		if (orig_start + *len > donor_blocks) {
-			ext4_debug("ext4 move extent: End offset [%llu] should "
-				"be less than donor file blocks [%u]."
-				"So adjust length from %llu to %llu "
-				"[ino:orig %lu, donor %lu]\n",
-				orig_start + *len, donor_blocks,
-				*len, donor_blocks - orig_start,
-				orig_inode->i_ino, donor_inode->i_ino);
-			*len = donor_blocks - orig_start;
-		}
-	} else {
-		orig_blocks = (orig_inode->i_size + blocksize - 1) >> blkbits;
-		if (orig_start >= orig_blocks) {
-			ext4_debug("ext4 move extent: start offset [%llu] "
-				"should be less than original file blocks "
-				"[%u] [ino:orig %lu, donor %lu]\n",
-				 orig_start, orig_blocks,
-				orig_inode->i_ino, donor_inode->i_ino);
-			return -EINVAL;
-		}
-
-		if (orig_start + *len > orig_blocks) {
-			ext4_debug("ext4 move extent: Adjust length "
-				"from %llu to %llu. Because it should be "
-				"less than original file blocks "
-				"[ino:orig %lu, donor %lu]\n",
-				*len, orig_blocks - orig_start,
-				orig_inode->i_ino, donor_inode->i_ino);
-			*len = orig_blocks - orig_start;
-		}
-	}
-
+	if (orig_eof < orig_start + *len - 1)
+		*len = orig_eof - orig_start;
+	if (donor_eof < donor_start + *len - 1)
+		*len = donor_eof - donor_start;
 	if (!*len) {
 		ext4_debug("ext4 move extent: len should not be 0 "
 			"[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
@@ -1213,100 +534,30 @@
 }
 
 /**
- * ext4_inode_double_lock - Lock i_mutex on both @inode1 and @inode2
- *
- * @inode1:	the inode structure
- * @inode2:	the inode structure
- *
- * Lock two inodes' i_mutex
- */
-void
-ext4_inode_double_lock(struct inode *inode1, struct inode *inode2)
-{
-	BUG_ON(inode1 == inode2);
-	if (inode1 < inode2) {
-		mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
-		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
-	} else {
-		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
-		mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
-	}
-}
-
-/**
- * ext4_inode_double_unlock - Release i_mutex on both @inode1 and @inode2
- *
- * @inode1:     the inode that is released first
- * @inode2:     the inode that is released second
- *
- */
-
-void
-ext4_inode_double_unlock(struct inode *inode1, struct inode *inode2)
-{
-	mutex_unlock(&inode1->i_mutex);
-	mutex_unlock(&inode2->i_mutex);
-}
-
-/**
  * ext4_move_extents - Exchange the specified range of a file
  *
  * @o_filp:		file structure of the original file
  * @d_filp:		file structure of the donor file
- * @orig_start:		start offset in block for orig
- * @donor_start:	start offset in block for donor
+ * @orig_blk:		start offset in block for orig
+ * @donor_blk:		start offset in block for donor
  * @len:		the number of blocks to be moved
  * @moved_len:		moved block length
  *
  * This function returns 0 and moved block length is set in moved_len
  * if succeed, otherwise returns error value.
  *
- * Note: ext4_move_extents() proceeds the following order.
- * 1:ext4_move_extents() calculates the last block number of moving extent
- *   function by the start block number (orig_start) and the number of blocks
- *   to be moved (len) specified as arguments.
- *   If the {orig, donor}_start points a hole, the extent's start offset
- *   pointed by ext_cur (current extent), holecheck_path, orig_path are set
- *   after hole behind.
- * 2:Continue step 3 to step 5, until the holecheck_path points to last_extent
- *   or the ext_cur exceeds the block_end which is last logical block number.
- * 3:To get the length of continues area, call mext_next_extent()
- *   specified with the ext_cur (initial value is holecheck_path) re-cursive,
- *   until find un-continuous extent, the start logical block number exceeds
- *   the block_end or the extent points to the last extent.
- * 4:Exchange the original inode data with donor inode data
- *   from orig_page_offset to seq_end_page.
- *   The start indexes of data are specified as arguments.
- *   That of the original inode is orig_page_offset,
- *   and the donor inode is also orig_page_offset
- *   (To easily handle blocksize != pagesize case, the offset for the
- *   donor inode is block unit).
- * 5:Update holecheck_path and orig_path to points a next proceeding extent,
- *   then returns to step 2.
- * 6:Release holecheck_path, orig_path and set the len to moved_len
- *   which shows the number of moved blocks.
- *   The moved_len is useful for the command to calculate the file offset
- *   for starting next move extent ioctl.
- * 7:Return 0 on success, or a negative error value on failure.
  */
 int
-ext4_move_extents(struct file *o_filp, struct file *d_filp,
-		 __u64 orig_start, __u64 donor_start, __u64 len,
-		 __u64 *moved_len)
+ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
+		  __u64 donor_blk, __u64 len, __u64 *moved_len)
 {
 	struct inode *orig_inode = file_inode(o_filp);
 	struct inode *donor_inode = file_inode(d_filp);
-	struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL;
-	struct ext4_extent *ext_prev, *ext_cur, *ext_dummy;
-	ext4_lblk_t block_start = orig_start;
-	ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
-	ext4_lblk_t rest_blocks;
-	pgoff_t orig_page_offset = 0, seq_end_page;
-	int ret, depth, last_extent = 0;
+	struct ext4_ext_path *path = NULL;
 	int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
-	int data_offset_in_page;
-	int block_len_in_page;
-	int uninit;
+	ext4_lblk_t o_end, o_start = orig_blk;
+	ext4_lblk_t d_start = donor_blk;
+	int ret;
 
 	if (orig_inode->i_sb != donor_inode->i_sb) {
 		ext4_debug("ext4 move extent: The argument files "
@@ -1337,7 +588,7 @@
 		return -EINVAL;
 	}
 	/* Protect orig and donor inodes against a truncate */
-	ext4_inode_double_lock(orig_inode, donor_inode);
+	lock_two_nondirectories(orig_inode, donor_inode);
 
 	/* Wait for all existing dio workers */
 	ext4_inode_block_unlocked_dio(orig_inode);
@@ -1348,121 +599,58 @@
 	/* Protect extent tree against block allocations via delalloc */
 	ext4_double_down_write_data_sem(orig_inode, donor_inode);
 	/* Check the filesystem environment whether move_extent can be done */
-	ret = mext_check_arguments(orig_inode, donor_inode, orig_start,
-				    donor_start, &len);
+	ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
+				    donor_blk, &len);
 	if (ret)
 		goto out;
+	o_end = o_start + len;
 
-	file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
-	block_end = block_start + len - 1;
-	if (file_end < block_end)
-		len -= block_end - file_end;
+	while (o_start < o_end) {
+		struct ext4_extent *ex;
+		ext4_lblk_t cur_blk, next_blk;
+		pgoff_t orig_page_index, donor_page_index;
+		int offset_in_page;
+		int unwritten, cur_len;
 
-	ret = get_ext_path(orig_inode, block_start, &orig_path);
-	if (ret)
-		goto out;
-
-	/* Get path structure to check the hole */
-	ret = get_ext_path(orig_inode, block_start, &holecheck_path);
-	if (ret)
-		goto out;
-
-	depth = ext_depth(orig_inode);
-	ext_cur = holecheck_path[depth].p_ext;
-
-	/*
-	 * Get proper starting location of block replacement if block_start was
-	 * within the hole.
-	 */
-	if (le32_to_cpu(ext_cur->ee_block) +
-		ext4_ext_get_actual_len(ext_cur) - 1 < block_start) {
-		/*
-		 * The hole exists between extents or the tail of
-		 * original file.
-		 */
-		last_extent = mext_next_extent(orig_inode,
-					holecheck_path, &ext_cur);
-		if (last_extent < 0) {
-			ret = last_extent;
+		ret = get_ext_path(orig_inode, o_start, &path);
+		if (ret)
 			goto out;
-		}
-		last_extent = mext_next_extent(orig_inode, orig_path,
-							&ext_dummy);
-		if (last_extent < 0) {
-			ret = last_extent;
-			goto out;
-		}
-		seq_start = le32_to_cpu(ext_cur->ee_block);
-	} else if (le32_to_cpu(ext_cur->ee_block) > block_start)
-		/* The hole exists at the beginning of original file. */
-		seq_start = le32_to_cpu(ext_cur->ee_block);
-	else
-		seq_start = block_start;
-
-	/* No blocks within the specified range. */
-	if (le32_to_cpu(ext_cur->ee_block) > block_end) {
-		ext4_debug("ext4 move extent: The specified range of file "
-							"may be the hole\n");
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* Adjust start blocks */
-	add_blocks = min(le32_to_cpu(ext_cur->ee_block) +
-			 ext4_ext_get_actual_len(ext_cur), block_end + 1) -
-		     max(le32_to_cpu(ext_cur->ee_block), block_start);
-
-	while (!last_extent && le32_to_cpu(ext_cur->ee_block) <= block_end) {
-		seq_blocks += add_blocks;
-
-		/* Adjust tail blocks */
-		if (seq_start + seq_blocks - 1 > block_end)
-			seq_blocks = block_end - seq_start + 1;
-
-		ext_prev = ext_cur;
-		last_extent = mext_next_extent(orig_inode, holecheck_path,
-						&ext_cur);
-		if (last_extent < 0) {
-			ret = last_extent;
-			break;
-		}
-		add_blocks = ext4_ext_get_actual_len(ext_cur);
-
-		/*
-		 * Extend the length of contiguous block (seq_blocks)
-		 * if extents are contiguous.
-		 */
-		if (ext4_can_extents_be_merged(orig_inode,
-					       ext_prev, ext_cur) &&
-		    block_end >= le32_to_cpu(ext_cur->ee_block) &&
-		    !last_extent)
+		ex = path[path->p_depth].p_ext;
+		next_blk = ext4_ext_next_allocated_block(path);
+		cur_blk = le32_to_cpu(ex->ee_block);
+		cur_len = ext4_ext_get_actual_len(ex);
+		/* Check hole before the start pos */
+		if (cur_blk + cur_len - 1 < o_start) {
+			if (next_blk == EXT_MAX_BLOCKS) {
+				o_start = o_end;
+				ret = -ENODATA;
+				goto out;
+			}
+			d_start += next_blk - o_start;
+			o_start = next_blk;
 			continue;
-
-		/* Is original extent is uninitialized */
-		uninit = ext4_ext_is_uninitialized(ext_prev);
-
-		data_offset_in_page = seq_start % blocks_per_page;
-
-		/*
-		 * Calculate data blocks count that should be swapped
-		 * at the first page.
-		 */
-		if (data_offset_in_page + seq_blocks > blocks_per_page) {
-			/* Swapped blocks are across pages */
-			block_len_in_page =
-					blocks_per_page - data_offset_in_page;
-		} else {
-			/* Swapped blocks are in a page */
-			block_len_in_page = seq_blocks;
+		/* Check hole after the start pos */
+		} else if (cur_blk > o_start) {
+			/* Skip hole */
+			d_start += cur_blk - o_start;
+			o_start = cur_blk;
+			/* Extent inside requested range ?*/
+			if (cur_blk >= o_end)
+				goto out;
+		} else { /* in_range(o_start, o_blk, o_len) */
+			cur_len += cur_blk - o_start;
 		}
+		unwritten = ext4_ext_is_unwritten(ex);
+		if (o_end - o_start < cur_len)
+			cur_len = o_end - o_start;
 
-		orig_page_offset = seq_start >>
-				(PAGE_CACHE_SHIFT - orig_inode->i_blkbits);
-		seq_end_page = (seq_start + seq_blocks - 1) >>
-				(PAGE_CACHE_SHIFT - orig_inode->i_blkbits);
-		seq_start = le32_to_cpu(ext_cur->ee_block);
-		rest_blocks = seq_blocks;
-
+		orig_page_index = o_start >> (PAGE_CACHE_SHIFT -
+					       orig_inode->i_blkbits);
+		donor_page_index = d_start >> (PAGE_CACHE_SHIFT -
+					       donor_inode->i_blkbits);
+		offset_in_page = o_start % blocks_per_page;
+		if (cur_len > blocks_per_page- offset_in_page)
+			cur_len = blocks_per_page - offset_in_page;
 		/*
 		 * Up semaphore to avoid following problems:
 		 * a. transaction deadlock among ext4_journal_start,
@@ -1471,81 +659,33 @@
 		 *    in move_extent_per_page
 		 */
 		ext4_double_up_write_data_sem(orig_inode, donor_inode);
-
-		while (orig_page_offset <= seq_end_page) {
-
-			/* Swap original branches with new branches */
-			block_len_in_page = move_extent_per_page(
-						o_filp, donor_inode,
-						orig_page_offset,
-						data_offset_in_page,
-						block_len_in_page, uninit,
-						&ret);
-
-			/* Count how many blocks we have exchanged */
-			*moved_len += block_len_in_page;
-			if (ret < 0)
-				break;
-			if (*moved_len > len) {
-				EXT4_ERROR_INODE(orig_inode,
-					"We replaced blocks too much! "
-					"sum of replaced: %llu requested: %llu",
-					*moved_len, len);
-				ret = -EIO;
-				break;
-			}
-
-			orig_page_offset++;
-			data_offset_in_page = 0;
-			rest_blocks -= block_len_in_page;
-			if (rest_blocks > blocks_per_page)
-				block_len_in_page = blocks_per_page;
-			else
-				block_len_in_page = rest_blocks;
-		}
-
+		/* Swap original branches with new branches */
+		move_extent_per_page(o_filp, donor_inode,
+				     orig_page_index, donor_page_index,
+				     offset_in_page, cur_len,
+				     unwritten, &ret);
 		ext4_double_down_write_data_sem(orig_inode, donor_inode);
 		if (ret < 0)
 			break;
-
-		/* Decrease buffer counter */
-		if (holecheck_path)
-			ext4_ext_drop_refs(holecheck_path);
-		ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
-		if (ret)
-			break;
-		depth = holecheck_path->p_depth;
-
-		/* Decrease buffer counter */
-		if (orig_path)
-			ext4_ext_drop_refs(orig_path);
-		ret = get_ext_path(orig_inode, seq_start, &orig_path);
-		if (ret)
-			break;
-
-		ext_cur = holecheck_path[depth].p_ext;
-		add_blocks = ext4_ext_get_actual_len(ext_cur);
-		seq_blocks = 0;
-
+		o_start += cur_len;
+		d_start += cur_len;
 	}
+	*moved_len = o_start - orig_blk;
+	if (*moved_len > len)
+		*moved_len = len;
+
 out:
 	if (*moved_len) {
 		ext4_discard_preallocations(orig_inode);
 		ext4_discard_preallocations(donor_inode);
 	}
 
-	if (orig_path) {
-		ext4_ext_drop_refs(orig_path);
-		kfree(orig_path);
-	}
-	if (holecheck_path) {
-		ext4_ext_drop_refs(holecheck_path);
-		kfree(holecheck_path);
-	}
+	ext4_ext_drop_refs(path);
+	kfree(path);
 	ext4_double_up_write_data_sem(orig_inode, donor_inode);
 	ext4_inode_resume_unlocked_dio(orig_inode);
 	ext4_inode_resume_unlocked_dio(donor_inode);
-	ext4_inode_double_unlock(orig_inode, donor_inode);
+	unlock_two_nondirectories(orig_inode, donor_inode);
 
 	return ret;
 }
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 407bcf7..ff960e0 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -53,7 +53,7 @@
 					ext4_lblk_t *block)
 {
 	struct buffer_head *bh;
-	int err = 0;
+	int err;
 
 	if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
 		     ((inode->i_size >> 10) >=
@@ -62,11 +62,12 @@
 
 	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
 
-	bh = ext4_bread(handle, inode, *block, 1, &err);
-	if (!bh)
-		return ERR_PTR(err);
+	bh = ext4_bread(handle, inode, *block, 1);
+	if (IS_ERR(bh))
+		return bh;
 	inode->i_size += inode->i_sb->s_blocksize;
 	EXT4_I(inode)->i_disksize = inode->i_size;
+	BUFFER_TRACE(bh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, bh);
 	if (err) {
 		brelse(bh);
@@ -93,20 +94,20 @@
 {
 	struct buffer_head *bh;
 	struct ext4_dir_entry *dirent;
-	int err = 0, is_dx_block = 0;
+	int is_dx_block = 0;
 
-	bh = ext4_bread(NULL, inode, block, 0, &err);
-	if (!bh) {
-		if (err == 0) {
-			ext4_error_inode(inode, __func__, line, block,
-					       "Directory hole found");
-			return ERR_PTR(-EIO);
-		}
+	bh = ext4_bread(NULL, inode, block, 0);
+	if (IS_ERR(bh)) {
 		__ext4_warning(inode->i_sb, __func__, line,
-			       "error reading directory block "
-			       "(ino %lu, block %lu)", inode->i_ino,
+			       "error %ld reading directory block "
+			       "(ino %lu, block %lu)", PTR_ERR(bh), inode->i_ino,
 			       (unsigned long) block);
-		return ERR_PTR(err);
+
+		return bh;
+	}
+	if (!bh) {
+		ext4_error_inode(inode, __func__, line, block, "Directory hole found");
+		return ERR_PTR(-EIO);
 	}
 	dirent = (struct ext4_dir_entry *) bh->b_data;
 	/* Determine whether or not we have an index block */
@@ -123,8 +124,7 @@
 		       "directory leaf block found instead of index block");
 		return ERR_PTR(-EIO);
 	}
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
+	if (!ext4_has_metadata_csum(inode->i_sb) ||
 	    buffer_verified(bh))
 		return bh;
 
@@ -249,14 +249,14 @@
 static void dx_set_limit(struct dx_entry *entries, unsigned value);
 static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
 static unsigned dx_node_limit(struct inode *dir);
-static struct dx_frame *dx_probe(const struct qstr *d_name,
+static struct dx_frame *dx_probe(struct ext4_filename *fname,
 				 struct inode *dir,
 				 struct dx_hash_info *hinfo,
-				 struct dx_frame *frame,
-				 int *err);
+				 struct dx_frame *frame);
 static void dx_release(struct dx_frame *frames);
-static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
-		       struct dx_hash_info *hinfo, struct dx_map_entry map[]);
+static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
+		       unsigned blocksize, struct dx_hash_info *hinfo,
+		       struct dx_map_entry map[]);
 static void dx_sort_map(struct dx_map_entry *map, unsigned count);
 static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
 		struct dx_map_entry *offsets, int count, unsigned blocksize);
@@ -268,11 +268,10 @@
 				 struct dx_frame *frames,
 				 __u32 *start_hash);
 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
-		const struct qstr *d_name,
-		struct ext4_dir_entry_2 **res_dir,
-		int *err);
-static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode);
+		struct ext4_filename *fname,
+		struct ext4_dir_entry_2 **res_dir);
+static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+			     struct dentry *dentry, struct inode *inode);
 
 /* checksumming functions */
 void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
@@ -339,8 +338,7 @@
 {
 	struct ext4_dir_entry_tail *t;
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(inode->i_sb))
 		return 1;
 
 	t = get_dirent_tail(inode, dirent);
@@ -361,8 +359,7 @@
 {
 	struct ext4_dir_entry_tail *t;
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(inode->i_sb))
 		return;
 
 	t = get_dirent_tail(inode, dirent);
@@ -436,8 +433,7 @@
 	struct dx_tail *t;
 	int count_offset, limit, count;
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(inode->i_sb))
 		return 1;
 
 	c = get_dx_countlimit(inode, dirent, &count_offset);
@@ -466,8 +462,7 @@
 	struct dx_tail *t;
 	int count_offset, limit, count;
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(inode->i_sb))
 		return;
 
 	c = get_dx_countlimit(inode, dirent, &count_offset);
@@ -555,8 +550,7 @@
 	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
 		EXT4_DIR_REC_LEN(2) - infosize;
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(dir->i_sb))
 		entry_space -= sizeof(struct dx_tail);
 	return entry_space / sizeof(struct dx_entry);
 }
@@ -565,8 +559,7 @@
 {
 	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(dir->i_sb))
 		entry_space -= sizeof(struct dx_tail);
 	return entry_space / sizeof(struct dx_entry);
 }
@@ -593,8 +586,10 @@
 	unsigned bcount;
 };
 
-static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
-				 int size, int show_names)
+static struct stats dx_show_leaf(struct inode *dir,
+				struct dx_hash_info *hinfo,
+				struct ext4_dir_entry_2 *de,
+				int size, int show_names)
 {
 	unsigned names = 0, space = 0;
 	char *base = (char *) de;
@@ -607,12 +602,69 @@
 		{
 			if (show_names)
 			{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+				int len;
+				char *name;
+				struct ext4_str fname_crypto_str
+					= {.name = NULL, .len = 0};
+				int res = 0;
+
+				name  = de->name;
+				len = de->name_len;
+				if (ext4_encrypted_inode(inode))
+					res = ext4_get_encryption_info(dir);
+				if (res) {
+					printk(KERN_WARNING "Error setting up"
+					       " fname crypto: %d\n", res);
+				}
+				if (ctx == NULL) {
+					/* Directory is not encrypted */
+					ext4fs_dirhash(de->name,
+						de->name_len, &h);
+					printk("%*.s:(U)%x.%u ", len,
+					       name, h.hash,
+					       (unsigned) ((char *) de
+							   - base));
+				} else {
+					/* Directory is encrypted */
+					res = ext4_fname_crypto_alloc_buffer(
+						ctx, de->name_len,
+						&fname_crypto_str);
+					if (res < 0) {
+						printk(KERN_WARNING "Error "
+							"allocating crypto "
+							"buffer--skipping "
+							"crypto\n");
+						ctx = NULL;
+					}
+					res = ext4_fname_disk_to_usr(ctx, NULL, de,
+							&fname_crypto_str);
+					if (res < 0) {
+						printk(KERN_WARNING "Error "
+							"converting filename "
+							"from disk to usr"
+							"\n");
+						name = "??";
+						len = 2;
+					} else {
+						name = fname_crypto_str.name;
+						len = fname_crypto_str.len;
+					}
+					ext4fs_dirhash(de->name, de->name_len,
+						       &h);
+					printk("%*.s:(E)%x.%u ", len, name,
+					       h.hash, (unsigned) ((char *) de
+								   - base));
+					ext4_fname_crypto_free_buffer(
+						&fname_crypto_str);
+				}
+#else
 				int len = de->name_len;
 				char *name = de->name;
-				while (len--) printk("%c", *name++);
 				ext4fs_dirhash(de->name, de->name_len, &h);
-				printk(":%x.%u ", h.hash,
+				printk("%*.s:%x.%u ", len, name, h.hash,
 				       (unsigned) ((char *) de - base));
+#endif
 			}
 			space += EXT4_DIR_REC_LEN(de->name_len);
 			names++;
@@ -630,7 +682,6 @@
 	unsigned count = dx_get_count(entries), names = 0, space = 0, i;
 	unsigned bcount = 0;
 	struct buffer_head *bh;
-	int err;
 	printk("%i indexed blocks...\n", count);
 	for (i = 0; i < count; i++, entries++)
 	{
@@ -639,10 +690,13 @@
 		u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
 		struct stats stats;
 		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
-		if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue;
+		bh = ext4_bread(NULL,dir, block, 0);
+		if (!bh || IS_ERR(bh))
+			continue;
 		stats = levels?
 		   dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
-		   dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
+		   dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *)
+			bh->b_data, blocksize, 0);
 		names += stats.names;
 		space += stats.space;
 		bcount += stats.bcount;
@@ -666,53 +720,47 @@
  * back to userspace.
  */
 static struct dx_frame *
-dx_probe(const struct qstr *d_name, struct inode *dir,
-	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
+dx_probe(struct ext4_filename *fname, struct inode *dir,
+	 struct dx_hash_info *hinfo, struct dx_frame *frame_in)
 {
 	unsigned count, indirect;
 	struct dx_entry *at, *entries, *p, *q, *m;
 	struct dx_root *root;
-	struct buffer_head *bh;
 	struct dx_frame *frame = frame_in;
+	struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
 	u32 hash;
 
-	frame->bh = NULL;
-	bh = ext4_read_dirblock(dir, 0, INDEX);
-	if (IS_ERR(bh)) {
-		*err = PTR_ERR(bh);
-		goto fail;
-	}
-	root = (struct dx_root *) bh->b_data;
+	frame->bh = ext4_read_dirblock(dir, 0, INDEX);
+	if (IS_ERR(frame->bh))
+		return (struct dx_frame *) frame->bh;
+
+	root = (struct dx_root *) frame->bh->b_data;
 	if (root->info.hash_version != DX_HASH_TEA &&
 	    root->info.hash_version != DX_HASH_HALF_MD4 &&
 	    root->info.hash_version != DX_HASH_LEGACY) {
 		ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
 			     root->info.hash_version);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
 		goto fail;
 	}
+	if (fname)
+		hinfo = &fname->hinfo;
 	hinfo->hash_version = root->info.hash_version;
 	if (hinfo->hash_version <= DX_HASH_TEA)
 		hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
 	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
-	if (d_name)
-		ext4fs_dirhash(d_name->name, d_name->len, hinfo);
+	if (fname && fname_name(fname))
+		ext4fs_dirhash(fname_name(fname), fname_len(fname), hinfo);
 	hash = hinfo->hash;
 
 	if (root->info.unused_flags & 1) {
 		ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
 			     root->info.unused_flags);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
 		goto fail;
 	}
 
 	if ((indirect = root->info.indirect_levels) > 1) {
 		ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
 			     root->info.indirect_levels);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
 		goto fail;
 	}
 
@@ -722,27 +770,21 @@
 	if (dx_get_limit(entries) != dx_root_limit(dir,
 						   root->info.info_length)) {
 		ext4_warning(dir->i_sb, "dx entry: limit != root limit");
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
 		goto fail;
 	}
 
 	dxtrace(printk("Look up %x", hash));
-	while (1)
-	{
+	while (1) {
 		count = dx_get_count(entries);
 		if (!count || count > dx_get_limit(entries)) {
 			ext4_warning(dir->i_sb,
 				     "dx entry: no count or count > limit");
-			brelse(bh);
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
+			goto fail;
 		}
 
 		p = entries + 1;
 		q = entries + count - 1;
-		while (p <= q)
-		{
+		while (p <= q) {
 			m = p + (q - p)/2;
 			dxtrace(printk("."));
 			if (dx_get_hash(m) > hash)
@@ -751,8 +793,7 @@
 				p = m + 1;
 		}
 
-		if (0) // linear search cross check
-		{
+		if (0) { // linear search cross check
 			unsigned n = count - 1;
 			at = entries;
 			while (n--)
@@ -769,38 +810,36 @@
 
 		at = p - 1;
 		dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
-		frame->bh = bh;
 		frame->entries = entries;
 		frame->at = at;
-		if (!indirect--) return frame;
-		bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
-		if (IS_ERR(bh)) {
-			*err = PTR_ERR(bh);
-			goto fail2;
+		if (!indirect--)
+			return frame;
+		frame++;
+		frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
+		if (IS_ERR(frame->bh)) {
+			ret_err = (struct dx_frame *) frame->bh;
+			frame->bh = NULL;
+			goto fail;
 		}
-		entries = ((struct dx_node *) bh->b_data)->entries;
+		entries = ((struct dx_node *) frame->bh->b_data)->entries;
 
 		if (dx_get_limit(entries) != dx_node_limit (dir)) {
 			ext4_warning(dir->i_sb,
 				     "dx entry: limit != node limit");
-			brelse(bh);
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
+			goto fail;
 		}
-		frame++;
-		frame->bh = NULL;
 	}
-fail2:
+fail:
 	while (frame >= frame_in) {
 		brelse(frame->bh);
 		frame--;
 	}
-fail:
-	if (*err == ERR_BAD_DX_DIR)
+
+	if (ret_err == ERR_PTR(ERR_BAD_DX_DIR))
 		ext4_warning(dir->i_sb,
 			     "Corrupt dir inode %lu, running e2fsck is "
 			     "recommended.", dir->i_ino);
-	return NULL;
+	return ret_err;
 }
 
 static void dx_release (struct dx_frame *frames)
@@ -901,6 +940,7 @@
 	struct buffer_head *bh;
 	struct ext4_dir_entry_2 *de, *top;
 	int err = 0, count = 0;
+	struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}, tmp_str;
 
 	dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
 							(unsigned long)block));
@@ -912,6 +952,22 @@
 	top = (struct ext4_dir_entry_2 *) ((char *) de +
 					   dir->i_sb->s_blocksize -
 					   EXT4_DIR_REC_LEN(0));
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	/* Check if the directory is encrypted */
+	if (ext4_encrypted_inode(dir)) {
+		err = ext4_get_encryption_info(dir);
+		if (err < 0) {
+			brelse(bh);
+			return err;
+		}
+		err = ext4_fname_crypto_alloc_buffer(dir, EXT4_NAME_LEN,
+						     &fname_crypto_str);
+		if (err < 0) {
+			brelse(bh);
+			return err;
+		}
+	}
+#endif
 	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
 		if (ext4_check_dir_entry(dir, NULL, de, bh,
 				bh->b_data, bh->b_size,
@@ -927,14 +983,38 @@
 			continue;
 		if (de->inode == 0)
 			continue;
-		if ((err = ext4_htree_store_dirent(dir_file,
-				   hinfo->hash, hinfo->minor_hash, de)) != 0) {
-			brelse(bh);
-			return err;
+		if (!ext4_encrypted_inode(dir)) {
+			tmp_str.name = de->name;
+			tmp_str.len = de->name_len;
+			err = ext4_htree_store_dirent(dir_file,
+				   hinfo->hash, hinfo->minor_hash, de,
+				   &tmp_str);
+		} else {
+			int save_len = fname_crypto_str.len;
+
+			/* Directory is encrypted */
+			err = ext4_fname_disk_to_usr(dir, hinfo, de,
+						     &fname_crypto_str);
+			if (err < 0) {
+				count = err;
+				goto errout;
+			}
+			err = ext4_htree_store_dirent(dir_file,
+				   hinfo->hash, hinfo->minor_hash, de,
+					&fname_crypto_str);
+			fname_crypto_str.len = save_len;
+		}
+		if (err != 0) {
+			count = err;
+			goto errout;
 		}
 		count++;
 	}
+errout:
 	brelse(bh);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	ext4_fname_crypto_free_buffer(&fname_crypto_str);
+#endif
 	return count;
 }
 
@@ -958,6 +1038,7 @@
 	int count = 0;
 	int ret, err;
 	__u32 hashval;
+	struct ext4_str tmp_str;
 
 	dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
 		       start_hash, start_minor_hash));
@@ -986,21 +1067,29 @@
 	}
 	hinfo.hash = start_hash;
 	hinfo.minor_hash = 0;
-	frame = dx_probe(NULL, dir, &hinfo, frames, &err);
-	if (!frame)
-		return err;
+	frame = dx_probe(NULL, dir, &hinfo, frames);
+	if (IS_ERR(frame))
+		return PTR_ERR(frame);
 
 	/* Add '.' and '..' from the htree header */
 	if (!start_hash && !start_minor_hash) {
 		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
-		if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
+		tmp_str.name = de->name;
+		tmp_str.len = de->name_len;
+		err = ext4_htree_store_dirent(dir_file, 0, 0,
+					      de, &tmp_str);
+		if (err != 0)
 			goto errout;
 		count++;
 	}
 	if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
 		de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
 		de = ext4_next_entry(de, dir->i_sb->s_blocksize);
-		if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
+		tmp_str.name = de->name;
+		tmp_str.len = de->name_len;
+		err = ext4_htree_store_dirent(dir_file, 2, 0,
+					      de, &tmp_str);
+		if (err != 0)
 			goto errout;
 		count++;
 	}
@@ -1042,12 +1131,13 @@
 
 static inline int search_dirblock(struct buffer_head *bh,
 				  struct inode *dir,
+				  struct ext4_filename *fname,
 				  const struct qstr *d_name,
 				  unsigned int offset,
 				  struct ext4_dir_entry_2 **res_dir)
 {
-	return search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
-			  d_name, offset, res_dir);
+	return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
+			       fname, d_name, offset, res_dir);
 }
 
 /*
@@ -1058,8 +1148,8 @@
  * Create map of hash values, offsets, and sizes, stored at end of block.
  * Returns number of entries mapped.
  */
-static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
-		       struct dx_hash_info *hinfo,
+static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
+		       unsigned blocksize, struct dx_hash_info *hinfo,
 		       struct dx_map_entry *map_tail)
 {
 	int count = 0;
@@ -1129,57 +1219,87 @@
  * `len <= EXT4_NAME_LEN' is guaranteed by caller.
  * `de != NULL' is guaranteed by caller.
  */
-static inline int ext4_match (int len, const char * const name,
-			      struct ext4_dir_entry_2 * de)
+static inline int ext4_match(struct ext4_filename *fname,
+			     struct ext4_dir_entry_2 *de)
 {
-	if (len != de->name_len)
-		return 0;
+	const void *name = fname_name(fname);
+	u32 len = fname_len(fname);
+
 	if (!de->inode)
 		return 0;
-	return !memcmp(name, de->name, len);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	if (unlikely(!name)) {
+		if (fname->usr_fname->name[0] == '_') {
+			int ret;
+			if (de->name_len < 16)
+				return 0;
+			ret = memcmp(de->name + de->name_len - 16,
+				     fname->crypto_buf.name + 8, 16);
+			return (ret == 0) ? 1 : 0;
+		}
+		name = fname->crypto_buf.name;
+		len = fname->crypto_buf.len;
+	}
+#endif
+	if (de->name_len != len)
+		return 0;
+	return (memcmp(de->name, name, len) == 0) ? 1 : 0;
 }
 
 /*
  * Returns 0 if not found, -1 on failure, and 1 on success
  */
-int search_dir(struct buffer_head *bh,
-	       char *search_buf,
-	       int buf_size,
-	       struct inode *dir,
-	       const struct qstr *d_name,
-	       unsigned int offset,
-	       struct ext4_dir_entry_2 **res_dir)
+int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
+		    struct inode *dir, struct ext4_filename *fname,
+		    const struct qstr *d_name,
+		    unsigned int offset, struct ext4_dir_entry_2 **res_dir)
 {
 	struct ext4_dir_entry_2 * de;
 	char * dlimit;
 	int de_len;
-	const char *name = d_name->name;
-	int namelen = d_name->len;
+	int res;
 
 	de = (struct ext4_dir_entry_2 *)search_buf;
 	dlimit = search_buf + buf_size;
 	while ((char *) de < dlimit) {
 		/* this code is executed quadratically often */
 		/* do minimal checking `by hand' */
+		if ((char *) de + de->name_len <= dlimit) {
+			res = ext4_match(fname, de);
+			if (res < 0) {
+				res = -1;
+				goto return_result;
+			}
+			if (res > 0) {
+				/* found a match - just to be sure, do
+				 * a full check */
+				if (ext4_check_dir_entry(dir, NULL, de, bh,
+						bh->b_data,
+						 bh->b_size, offset)) {
+					res = -1;
+					goto return_result;
+				}
+				*res_dir = de;
+				res = 1;
+				goto return_result;
+			}
 
-		if ((char *) de + namelen <= dlimit &&
-		    ext4_match (namelen, name, de)) {
-			/* found a match - just to be sure, do a full check */
-			if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
-						 bh->b_size, offset))
-				return -1;
-			*res_dir = de;
-			return 1;
 		}
 		/* prevent looping on a bad block */
 		de_len = ext4_rec_len_from_disk(de->rec_len,
 						dir->i_sb->s_blocksize);
-		if (de_len <= 0)
-			return -1;
+		if (de_len <= 0) {
+			res = -1;
+			goto return_result;
+		}
 		offset += de_len;
 		de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
 	}
-	return 0;
+
+	res = 0;
+return_result:
+	return res;
 }
 
 static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
@@ -1225,8 +1345,8 @@
 				   buffer */
 	int num = 0;
 	ext4_lblk_t  nblocks;
-	int i, err;
-	int namelen;
+	int i, namelen, retval;
+	struct ext4_filename fname;
 
 	*res_dir = NULL;
 	sb = dir->i_sb;
@@ -1234,14 +1354,18 @@
 	if (namelen > EXT4_NAME_LEN)
 		return NULL;
 
+	retval = ext4_fname_setup_filename(dir, d_name, 1, &fname);
+	if (retval)
+		return ERR_PTR(retval);
+
 	if (ext4_has_inline_data(dir)) {
 		int has_inline_data = 1;
-		ret = ext4_find_inline_entry(dir, d_name, res_dir,
+		ret = ext4_find_inline_entry(dir, &fname, d_name, res_dir,
 					     &has_inline_data);
 		if (has_inline_data) {
 			if (inlined)
 				*inlined = 1;
-			return ret;
+			goto cleanup_and_exit;
 		}
 	}
 
@@ -1256,14 +1380,14 @@
 		goto restart;
 	}
 	if (is_dx(dir)) {
-		bh = ext4_dx_find_entry(dir, d_name, res_dir, &err);
+		ret = ext4_dx_find_entry(dir, &fname, res_dir);
 		/*
 		 * On success, or if the error was file not found,
 		 * return.  Otherwise, fall back to doing a search the
 		 * old fashioned way.
 		 */
-		if (bh || (err != ERR_BAD_DX_DIR))
-			return bh;
+		if (!IS_ERR(ret) || PTR_ERR(ret) != ERR_BAD_DX_DIR)
+			goto cleanup_and_exit;
 		dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
 			       "falling back\n"));
 	}
@@ -1292,7 +1416,14 @@
 					break;
 				}
 				num++;
-				bh = ext4_getblk(NULL, dir, b++, 0, &err);
+				bh = ext4_getblk(NULL, dir, b++, 0);
+				if (unlikely(IS_ERR(bh))) {
+					if (ra_max == 0) {
+						ret = bh;
+						goto cleanup_and_exit;
+					}
+					break;
+				}
 				bh_use[ra_max] = bh;
 				if (bh)
 					ll_rw_block(READ | REQ_META | REQ_PRIO,
@@ -1320,7 +1451,7 @@
 			goto next;
 		}
 		set_buffer_verified(bh);
-		i = search_dirblock(bh, dir, d_name,
+		i = search_dirblock(bh, dir, &fname, d_name,
 			    block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
 		if (i == 1) {
 			EXT4_I(dir)->i_dir_start_lookup = block;
@@ -1351,58 +1482,62 @@
 	/* Clean up the read-ahead blocks */
 	for (; ra_ptr < ra_max; ra_ptr++)
 		brelse(bh_use[ra_ptr]);
+	ext4_fname_free_filename(&fname);
 	return ret;
 }
 
-static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
-		       struct ext4_dir_entry_2 **res_dir, int *err)
+static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+			struct ext4_filename *fname,
+			struct ext4_dir_entry_2 **res_dir)
 {
 	struct super_block * sb = dir->i_sb;
-	struct dx_hash_info	hinfo;
 	struct dx_frame frames[2], *frame;
+	const struct qstr *d_name = fname->usr_fname;
 	struct buffer_head *bh;
 	ext4_lblk_t block;
 	int retval;
 
-	if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err)))
-		return NULL;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	*res_dir = NULL;
+#endif
+	frame = dx_probe(fname, dir, NULL, frames);
+	if (IS_ERR(frame))
+		return (struct buffer_head *) frame;
 	do {
 		block = dx_get_block(frame->at);
 		bh = ext4_read_dirblock(dir, block, DIRENT);
-		if (IS_ERR(bh)) {
-			*err = PTR_ERR(bh);
+		if (IS_ERR(bh))
 			goto errout;
-		}
-		retval = search_dirblock(bh, dir, d_name,
+
+		retval = search_dirblock(bh, dir, fname, d_name,
 					 block << EXT4_BLOCK_SIZE_BITS(sb),
 					 res_dir);
-		if (retval == 1) { 	/* Success! */
-			dx_release(frames);
-			return bh;
-		}
+		if (retval == 1)
+			goto success;
 		brelse(bh);
 		if (retval == -1) {
-			*err = ERR_BAD_DX_DIR;
+			bh = ERR_PTR(ERR_BAD_DX_DIR);
 			goto errout;
 		}
 
 		/* Check to see if we should continue to search */
-		retval = ext4_htree_next_block(dir, hinfo.hash, frame,
+		retval = ext4_htree_next_block(dir, fname->hinfo.hash, frame,
 					       frames, NULL);
 		if (retval < 0) {
 			ext4_warning(sb,
-			     "error reading index page in directory #%lu",
-			     dir->i_ino);
-			*err = retval;
+			     "error %d reading index page in directory #%lu",
+			     retval, dir->i_ino);
+			bh = ERR_PTR(retval);
 			goto errout;
 		}
 	} while (retval == 1);
 
-	*err = -ENOENT;
+	bh = NULL;
 errout:
 	dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
-	dx_release (frames);
-	return NULL;
+success:
+	dx_release(frames);
+	return bh;
 }
 
 static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
@@ -1415,6 +1550,8 @@
 		return ERR_PTR(-ENAMETOOLONG);
 
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+	if (IS_ERR(bh))
+		return (struct dentry *) bh;
 	inode = NULL;
 	if (bh) {
 		__u32 ino = le32_to_cpu(de->inode);
@@ -1424,9 +1561,8 @@
 			return ERR_PTR(-EIO);
 		}
 		if (unlikely(ino == dir->i_ino)) {
-			EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
-					 dentry->d_name.len,
-					 dentry->d_name.name);
+			EXT4_ERROR_INODE(dir, "'%pd' linked to parent dir",
+					 dentry);
 			return ERR_PTR(-EIO);
 		}
 		inode = ext4_iget_normal(dir->i_sb, ino);
@@ -1436,6 +1572,18 @@
 					 ino);
 			return ERR_PTR(-EIO);
 		}
+		if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
+		    (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+		     S_ISLNK(inode->i_mode)) &&
+		    !ext4_is_child_context_consistent_with_parent(dir,
+								  inode)) {
+			iput(inode);
+			ext4_warning(inode->i_sb,
+				     "Inconsistent encryption contexts: %lu/%lu\n",
+				     (unsigned long) dir->i_ino,
+				     (unsigned long) inode->i_ino);
+			return ERR_PTR(-EPERM);
+		}
 	}
 	return d_splice_alias(inode, dentry);
 }
@@ -1449,6 +1597,8 @@
 	struct buffer_head *bh;
 
 	bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
+	if (IS_ERR(bh))
+		return (struct dentry *) bh;
 	if (!bh)
 		return ERR_PTR(-ENOENT);
 	ino = le32_to_cpu(de->inode);
@@ -1519,7 +1669,7 @@
  */
 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
 			struct buffer_head **bh,struct dx_frame *frame,
-			struct dx_hash_info *hinfo, int *error)
+			struct dx_hash_info *hinfo)
 {
 	unsigned blocksize = dir->i_sb->s_blocksize;
 	unsigned count, continued;
@@ -1534,16 +1684,14 @@
 	int	csum_size = 0;
 	int	err = 0, i;
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(dir->i_sb))
 		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	bh2 = ext4_append(handle, dir, &newblock);
 	if (IS_ERR(bh2)) {
 		brelse(*bh);
 		*bh = NULL;
-		*error = PTR_ERR(bh2);
-		return NULL;
+		return (struct ext4_dir_entry_2 *) bh2;
 	}
 
 	BUFFER_TRACE(*bh, "get_write_access");
@@ -1560,7 +1708,7 @@
 
 	/* create map in the end of data2 block */
 	map = (struct dx_map_entry *) (data2 + blocksize);
-	count = dx_make_map((struct ext4_dir_entry_2 *) data1,
+	count = dx_make_map(dir, (struct ext4_dir_entry_2 *) data1,
 			     blocksize, hinfo, map);
 	map -= count;
 	dx_sort_map(map, count);
@@ -1583,7 +1731,8 @@
 					hash2, split, count-split));
 
 	/* Fancy dance to stay within two buffers */
-	de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
+	de2 = dx_move_dirents(data1, data2, map + split, count - split,
+			      blocksize);
 	de = dx_pack_dirents(data1, blocksize);
 	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
 					   (char *) de,
@@ -1599,12 +1748,13 @@
 		initialize_dirent_tail(t, blocksize);
 	}
 
-	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
-	dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
+	dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data1,
+			blocksize, 1));
+	dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
+			blocksize, 1));
 
 	/* Which block gets the new entry? */
-	if (hinfo->hash >= hash2)
-	{
+	if (hinfo->hash >= hash2) {
 		swap(*bh, bh2);
 		de = de2;
 	}
@@ -1624,30 +1774,38 @@
 	brelse(bh2);
 	*bh = NULL;
 	ext4_std_error(dir->i_sb, err);
-	*error = err;
-	return NULL;
+	return ERR_PTR(err);
 }
 
 int ext4_find_dest_de(struct inode *dir, struct inode *inode,
 		      struct buffer_head *bh,
 		      void *buf, int buf_size,
-		      const char *name, int namelen,
+		      struct ext4_filename *fname,
 		      struct ext4_dir_entry_2 **dest_de)
 {
 	struct ext4_dir_entry_2 *de;
-	unsigned short reclen = EXT4_DIR_REC_LEN(namelen);
+	unsigned short reclen = EXT4_DIR_REC_LEN(fname_len(fname));
 	int nlen, rlen;
 	unsigned int offset = 0;
 	char *top;
+	int res;
 
 	de = (struct ext4_dir_entry_2 *)buf;
 	top = buf + buf_size - reclen;
 	while ((char *) de <= top) {
 		if (ext4_check_dir_entry(dir, NULL, de, bh,
-					 buf, buf_size, offset))
-			return -EIO;
-		if (ext4_match(namelen, name, de))
-			return -EEXIST;
+					 buf, buf_size, offset)) {
+			res = -EIO;
+			goto return_result;
+		}
+		/* Provide crypto context and crypto buffer to ext4 match */
+		res = ext4_match(fname, de);
+		if (res < 0)
+			goto return_result;
+		if (res > 0) {
+			res = -EEXIST;
+			goto return_result;
+		}
 		nlen = EXT4_DIR_REC_LEN(de->name_len);
 		rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
 		if ((de->inode ? rlen - nlen : rlen) >= reclen)
@@ -1655,17 +1813,22 @@
 		de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
 		offset += rlen;
 	}
-	if ((char *) de > top)
-		return -ENOSPC;
 
-	*dest_de = de;
-	return 0;
+	if ((char *) de > top)
+		res = -ENOSPC;
+	else {
+		*dest_de = de;
+		res = 0;
+	}
+return_result:
+	return res;
 }
 
-void ext4_insert_dentry(struct inode *inode,
-			struct ext4_dir_entry_2 *de,
-			int buf_size,
-			const char *name, int namelen)
+int ext4_insert_dentry(struct inode *dir,
+		       struct inode *inode,
+		       struct ext4_dir_entry_2 *de,
+		       int buf_size,
+		       struct ext4_filename *fname)
 {
 
 	int nlen, rlen;
@@ -1674,7 +1837,7 @@
 	rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
 	if (de->inode) {
 		struct ext4_dir_entry_2 *de1 =
-				(struct ext4_dir_entry_2 *)((char *)de + nlen);
+			(struct ext4_dir_entry_2 *)((char *)de + nlen);
 		de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
 		de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
 		de = de1;
@@ -1682,9 +1845,11 @@
 	de->file_type = EXT4_FT_UNKNOWN;
 	de->inode = cpu_to_le32(inode->i_ino);
 	ext4_set_de_type(inode->i_sb, de, inode->i_mode);
-	de->name_len = namelen;
-	memcpy(de->name, name, namelen);
+	de->name_len = fname_len(fname);
+	memcpy(de->name, fname_name(fname), fname_len(fname));
+	return 0;
 }
+
 /*
  * Add a new entry into a directory (leaf) block.  If de is non-NULL,
  * it points to a directory entry which is guaranteed to be large
@@ -1693,25 +1858,21 @@
  * space.  It will return -ENOSPC if no space is available, and -EIO
  * and -EEXIST if directory entry already exists.
  */
-static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
+static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
+			     struct inode *dir,
 			     struct inode *inode, struct ext4_dir_entry_2 *de,
 			     struct buffer_head *bh)
 {
-	struct inode	*dir = dentry->d_parent->d_inode;
-	const char	*name = dentry->d_name.name;
-	int		namelen = dentry->d_name.len;
 	unsigned int	blocksize = dir->i_sb->s_blocksize;
 	int		csum_size = 0;
 	int		err;
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(inode->i_sb))
 		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	if (!de) {
-		err = ext4_find_dest_de(dir, inode,
-					bh, bh->b_data, blocksize - csum_size,
-					name, namelen, &de);
+		err = ext4_find_dest_de(dir, inode, bh, bh->b_data,
+					blocksize - csum_size, fname, &de);
 		if (err)
 			return err;
 	}
@@ -1722,8 +1883,11 @@
 		return err;
 	}
 
-	/* By now the buffer is marked for journaling */
-	ext4_insert_dentry(inode, de, blocksize, name, namelen);
+	/* By now the buffer is marked for journaling. Due to crypto operations,
+	 * the following function call may fail */
+	err = ext4_insert_dentry(dir, inode, de, blocksize, fname);
+	if (err < 0)
+		return err;
 
 	/*
 	 * XXX shouldn't update any times until successful
@@ -1751,12 +1915,11 @@
  * This converts a one block unindexed directory to a 3 block indexed
  * directory, and adds the dentry to the indexed directory.
  */
-static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
+static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+			    struct dentry *dentry,
 			    struct inode *inode, struct buffer_head *bh)
 {
 	struct inode	*dir = dentry->d_parent->d_inode;
-	const char	*name = dentry->d_name.name;
-	int		namelen = dentry->d_name.len;
 	struct buffer_head *bh2;
 	struct dx_root	*root;
 	struct dx_frame	frames[2], *frame;
@@ -1767,17 +1930,16 @@
 	unsigned	len;
 	int		retval;
 	unsigned	blocksize;
-	struct dx_hash_info hinfo;
 	ext4_lblk_t  block;
 	struct fake_dirent *fde;
-	int		csum_size = 0;
+	int csum_size = 0;
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(inode->i_sb))
 		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	blocksize =  dir->i_sb->s_blocksize;
 	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
+	BUFFER_TRACE(bh, "get_write_access");
 	retval = ext4_journal_get_write_access(handle, bh);
 	if (retval) {
 		ext4_std_error(dir->i_sb, retval);
@@ -1833,36 +1995,45 @@
 	dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
 
 	/* Initialize as for dx_probe */
-	hinfo.hash_version = root->info.hash_version;
-	if (hinfo.hash_version <= DX_HASH_TEA)
-		hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
-	hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
-	ext4fs_dirhash(name, namelen, &hinfo);
+	fname->hinfo.hash_version = root->info.hash_version;
+	if (fname->hinfo.hash_version <= DX_HASH_TEA)
+		fname->hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
+	fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+	ext4fs_dirhash(fname_name(fname), fname_len(fname), &fname->hinfo);
+
+	memset(frames, 0, sizeof(frames));
 	frame = frames;
 	frame->entries = entries;
 	frame->at = entries;
 	frame->bh = bh;
 	bh = bh2;
 
-	ext4_handle_dirty_dx_node(handle, dir, frame->bh);
-	ext4_handle_dirty_dirent_node(handle, dir, bh);
+	retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
+	if (retval)
+		goto out_frames;	
+	retval = ext4_handle_dirty_dirent_node(handle, dir, bh);
+	if (retval)
+		goto out_frames;	
 
-	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
-	if (!de) {
-		/*
-		 * Even if the block split failed, we have to properly write
-		 * out all the changes we did so far. Otherwise we can end up
-		 * with corrupted filesystem.
-		 */
-		ext4_mark_inode_dirty(handle, dir);
-		dx_release(frames);
-		return retval;
+	de = do_split(handle,dir, &bh, frame, &fname->hinfo);
+	if (IS_ERR(de)) {
+		retval = PTR_ERR(de);
+		goto out_frames;
 	}
 	dx_release(frames);
 
-	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
+	retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
 	brelse(bh);
 	return retval;
+out_frames:
+	/*
+	 * Even if the block split failed, we have to properly write
+	 * out all the changes we did so far. Otherwise we can end up
+	 * with corrupted filesystem.
+	 */
+	ext4_mark_inode_dirty(handle, dir);
+	dx_release(frames);
+	return retval;
 }
 
 /*
@@ -1883,14 +2054,14 @@
 	struct ext4_dir_entry_2 *de;
 	struct ext4_dir_entry_tail *t;
 	struct super_block *sb;
+	struct ext4_filename fname;
 	int	retval;
 	int	dx_fallback=0;
 	unsigned blocksize;
 	ext4_lblk_t block, blocks;
 	int	csum_size = 0;
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(inode->i_sb))
 		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	sb = dir->i_sb;
@@ -1898,10 +2069,15 @@
 	if (!dentry->d_name.len)
 		return -EINVAL;
 
+	retval = ext4_fname_setup_filename(dir, &dentry->d_name, 0, &fname);
+	if (retval)
+		return retval;
+
 	if (ext4_has_inline_data(dir)) {
-		retval = ext4_try_add_inline_entry(handle, dentry, inode);
+		retval = ext4_try_add_inline_entry(handle, &fname,
+						   dentry, inode);
 		if (retval < 0)
-			return retval;
+			goto out;
 		if (retval == 1) {
 			retval = 0;
 			goto out;
@@ -1909,7 +2085,7 @@
 	}
 
 	if (is_dx(dir)) {
-		retval = ext4_dx_add_entry(handle, dentry, inode);
+		retval = ext4_dx_add_entry(handle, &fname, dentry, inode);
 		if (!retval || (retval != ERR_BAD_DX_DIR))
 			goto out;
 		ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
@@ -1919,24 +2095,31 @@
 	blocks = dir->i_size >> sb->s_blocksize_bits;
 	for (block = 0; block < blocks; block++) {
 		bh = ext4_read_dirblock(dir, block, DIRENT);
-		if (IS_ERR(bh))
-			return PTR_ERR(bh);
-
-		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+		if (IS_ERR(bh)) {
+			retval = PTR_ERR(bh);
+			bh = NULL;
+			goto out;
+		}
+		retval = add_dirent_to_buf(handle, &fname, dir, inode,
+					   NULL, bh);
 		if (retval != -ENOSPC)
 			goto out;
 
 		if (blocks == 1 && !dx_fallback &&
 		    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
-			retval = make_indexed_dir(handle, dentry, inode, bh);
+			retval = make_indexed_dir(handle, &fname, dentry,
+						  inode, bh);
 			bh = NULL; /* make_indexed_dir releases bh */
 			goto out;
 		}
 		brelse(bh);
 	}
 	bh = ext4_append(handle, dir, &block);
-	if (IS_ERR(bh))
-		return PTR_ERR(bh);
+	if (IS_ERR(bh)) {
+		retval = PTR_ERR(bh);
+		bh = NULL;
+		goto out;
+	}
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
 	de->inode = 0;
 	de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
@@ -1946,8 +2129,9 @@
 		initialize_dirent_tail(t, blocksize);
 	}
 
-	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
+	retval = add_dirent_to_buf(handle, &fname, dir, inode, de, bh);
 out:
+	ext4_fname_free_filename(&fname);
 	brelse(bh);
 	if (retval == 0)
 		ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -1957,21 +2141,20 @@
 /*
  * Returns 0 for success, or a negative error value
  */
-static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode)
+static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
+			     struct dentry *dentry, struct inode *inode)
 {
 	struct dx_frame frames[2], *frame;
 	struct dx_entry *entries, *at;
-	struct dx_hash_info hinfo;
 	struct buffer_head *bh;
 	struct inode *dir = dentry->d_parent->d_inode;
 	struct super_block *sb = dir->i_sb;
 	struct ext4_dir_entry_2 *de;
 	int err;
 
-	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
-	if (!frame)
-		return err;
+	frame = dx_probe(fname, dir, NULL, frames);
+	if (IS_ERR(frame))
+		return PTR_ERR(frame);
 	entries = frame->entries;
 	at = frame->at;
 	bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
@@ -1986,7 +2169,7 @@
 	if (err)
 		goto journal_error;
 
-	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+	err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh);
 	if (err != -ENOSPC)
 		goto cleanup;
 
@@ -2082,10 +2265,12 @@
 			goto cleanup;
 		}
 	}
-	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
-	if (!de)
+	de = do_split(handle, dir, &bh, frame, &fname->hinfo);
+	if (IS_ERR(de)) {
+		err = PTR_ERR(de);
 		goto cleanup;
-	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
+	}
+	err = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
 	goto cleanup;
 
 journal_error:
@@ -2154,8 +2339,7 @@
 			return err;
 	}
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(dir->i_sb))
 		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	BUFFER_TRACE(bh, "get_write_access");
@@ -2297,6 +2481,46 @@
 	return err;
 }
 
+#if 0
+static int ext4_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	handle_t *handle;
+	struct inode *inode;
+	int err, retries = 0;
+
+	dquot_initialize(dir);
+
+retry:
+	inode = ext4_new_inode_start_handle(dir, mode,
+					    NULL, 0, NULL,
+					    EXT4_HT_DIR,
+			EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
+			  4 + EXT4_XATTR_TRANS_BLOCKS);
+	handle = ext4_journal_current_handle();
+	err = PTR_ERR(inode);
+	if (!IS_ERR(inode)) {
+		inode->i_op = &ext4_file_inode_operations;
+		inode->i_fop = &ext4_file_operations;
+		ext4_set_aops(inode);
+		d_tmpfile(dentry, inode);
+		err = ext4_orphan_add(handle, inode);
+		if (err)
+			goto err_unlock_inode;
+		mark_inode_dirty(inode);
+		unlock_new_inode(inode);
+	}
+	if (handle)
+		ext4_journal_stop(handle);
+	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
+		goto retry;
+	return err;
+err_unlock_inode:
+	ext4_journal_stop(handle);
+	unlock_new_inode(inode);
+	return err;
+}
+#endif
+
 struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
 			  struct ext4_dir_entry_2 *de,
 			  int blocksize, int csum_size,
@@ -2336,8 +2560,7 @@
 	int csum_size = 0;
 	int err;
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(dir->i_sb))
 		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -2352,10 +2575,6 @@
 	dir_block = ext4_append(handle, inode, &block);
 	if (IS_ERR(dir_block))
 		return PTR_ERR(dir_block);
-	BUFFER_TRACE(dir_block, "get_write_access");
-	err = ext4_journal_get_write_access(handle, dir_block);
-	if (err)
-		goto out;
 	de = (struct ext4_dir_entry_2 *)dir_block->b_data;
 	ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
 	set_nlink(inode, 2);
@@ -2433,7 +2652,7 @@
 /*
  * routine to check that the specified directory is empty (for rmdir)
  */
-static int empty_dir(struct inode *inode)
+int ext4_empty_dir(struct inode *inode)
 {
 	unsigned int offset;
 	struct buffer_head *bh;
@@ -2474,8 +2693,7 @@
 		 ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
 	de = ext4_next_entry(de1, sb->s_blocksize);
 	while (offset < inode->i_size) {
-		if (!bh ||
-		    (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
+		if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
 			unsigned int lblock;
 			err = 0;
 			brelse(bh);
@@ -2503,26 +2721,37 @@
 	return 1;
 }
 
-/* ext4_orphan_add() links an unlinked or truncated inode into a list of
+/*
+ * ext4_orphan_add() links an unlinked or truncated inode into a list of
  * such inodes, starting at the superblock, in case we crash before the
  * file is closed/deleted, or in case the inode truncate spans multiple
  * transactions and the last transaction is not recovered after a crash.
  *
  * At filesystem recovery time, we walk this list deleting unlinked
  * inodes and truncating linked inodes in ext4_orphan_cleanup().
+ *
+ * Orphan list manipulation functions must be called under i_mutex unless
+ * we are just creating the inode or deleting it.
  */
 int ext4_orphan_add(handle_t *handle, struct inode *inode)
 {
 	struct super_block *sb = inode->i_sb;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_iloc iloc;
 	int err = 0, rc;
+	bool dirty = false;
 
-	if (!EXT4_SB(sb)->s_journal)
+	if (!sbi->s_journal || is_bad_inode(inode))
 		return 0;
 
-	mutex_lock(&EXT4_SB(sb)->s_orphan_lock);
+	WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
+		     !mutex_is_locked(&inode->i_mutex));
+	/*
+	 * Exit early if inode already is on orphan list. This is a big speedup
+	 * since we don't have to contend on the global s_orphan_lock.
+	 */
 	if (!list_empty(&EXT4_I(inode)->i_orphan))
-		goto out_unlock;
+		return 0;
 
 	/*
 	 * Orphan handling is only valid for files with data blocks
@@ -2533,48 +2762,51 @@
 	J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 		  S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
 
-	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
-	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
+	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 	if (err)
-		goto out_unlock;
+		goto out;
 
 	err = ext4_reserve_inode_write(handle, inode, &iloc);
 	if (err)
-		goto out_unlock;
+		goto out;
+
+	mutex_lock(&sbi->s_orphan_lock);
 	/*
 	 * Due to previous errors inode may be already a part of on-disk
 	 * orphan list. If so skip on-disk list modification.
 	 */
-	if (NEXT_ORPHAN(inode) && NEXT_ORPHAN(inode) <=
-		(le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)))
-			goto mem_insert;
+	if (!NEXT_ORPHAN(inode) || NEXT_ORPHAN(inode) >
+	    (le32_to_cpu(sbi->s_es->s_inodes_count))) {
+		/* Insert this inode at the head of the on-disk orphan list */
+		NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan);
+		sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
+		dirty = true;
+	}
+	list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan);
+	mutex_unlock(&sbi->s_orphan_lock);
 
-	/* Insert this inode at the head of the on-disk orphan list... */
-	NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
-	EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
-	err = ext4_handle_dirty_super(handle, sb);
-	rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
-	if (!err)
-		err = rc;
-
-	/* Only add to the head of the in-memory list if all the
-	 * previous operations succeeded.  If the orphan_add is going to
-	 * fail (possibly taking the journal offline), we can't risk
-	 * leaving the inode on the orphan list: stray orphan-list
-	 * entries can cause panics at unmount time.
-	 *
-	 * This is safe: on error we're going to ignore the orphan list
-	 * anyway on the next recovery. */
-mem_insert:
-	if (!err)
-		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
-
+	if (dirty) {
+		err = ext4_handle_dirty_super(handle, sb);
+		rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
+		if (!err)
+			err = rc;
+		if (err) {
+			/*
+			 * We have to remove inode from in-memory list if
+			 * addition to on disk orphan list failed. Stray orphan
+			 * list entries can cause panics at unmount time.
+			 */
+			mutex_lock(&sbi->s_orphan_lock);
+			list_del(&EXT4_I(inode)->i_orphan);
+			mutex_unlock(&sbi->s_orphan_lock);
+		}
+	}
 	jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
 	jbd_debug(4, "orphan inode %lu will point to %d\n",
 			inode->i_ino, NEXT_ORPHAN(inode));
-out_unlock:
-	mutex_unlock(&EXT4_SB(sb)->s_orphan_lock);
-	ext4_std_error(inode->i_sb, err);
+out:
+	ext4_std_error(sb, err);
 	return err;
 }
 
@@ -2586,45 +2818,51 @@
 {
 	struct list_head *prev;
 	struct ext4_inode_info *ei = EXT4_I(inode);
-	struct ext4_sb_info *sbi;
+	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 	__u32 ino_next;
 	struct ext4_iloc iloc;
 	int err = 0;
 
-	if ((!EXT4_SB(inode->i_sb)->s_journal) &&
-	    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
+	if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS))
 		return 0;
 
-	mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
+	WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
+		     !mutex_is_locked(&inode->i_mutex));
+	/* Do this quick check before taking global s_orphan_lock. */
 	if (list_empty(&ei->i_orphan))
-		goto out;
+		return 0;
 
-	ino_next = NEXT_ORPHAN(inode);
-	prev = ei->i_orphan.prev;
-	sbi = EXT4_SB(inode->i_sb);
+	if (handle) {
+		/* Grab inode buffer early before taking global s_orphan_lock */
+		err = ext4_reserve_inode_write(handle, inode, &iloc);
+	}
 
+	mutex_lock(&sbi->s_orphan_lock);
 	jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
 
+	prev = ei->i_orphan.prev;
 	list_del_init(&ei->i_orphan);
 
 	/* If we're on an error path, we may not have a valid
 	 * transaction handle with which to update the orphan list on
 	 * disk, but we still need to remove the inode from the linked
 	 * list in memory. */
-	if (!handle)
-		goto out;
-
-	err = ext4_reserve_inode_write(handle, inode, &iloc);
-	if (err)
+	if (!handle || err) {
+		mutex_unlock(&sbi->s_orphan_lock);
 		goto out_err;
+	}
 
+	ino_next = NEXT_ORPHAN(inode);
 	if (prev == &sbi->s_orphan) {
 		jbd_debug(4, "superblock will point to %u\n", ino_next);
 		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 		err = ext4_journal_get_write_access(handle, sbi->s_sbh);
-		if (err)
+		if (err) {
+			mutex_unlock(&sbi->s_orphan_lock);
 			goto out_brelse;
+		}
 		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
+		mutex_unlock(&sbi->s_orphan_lock);
 		err = ext4_handle_dirty_super(handle, inode->i_sb);
 	} else {
 		struct ext4_iloc iloc2;
@@ -2634,20 +2872,20 @@
 		jbd_debug(4, "orphan inode %lu will point to %u\n",
 			  i_prev->i_ino, ino_next);
 		err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
-		if (err)
+		if (err) {
+			mutex_unlock(&sbi->s_orphan_lock);
 			goto out_brelse;
+		}
 		NEXT_ORPHAN(i_prev) = ino_next;
 		err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
+		mutex_unlock(&sbi->s_orphan_lock);
 	}
 	if (err)
 		goto out_brelse;
 	NEXT_ORPHAN(inode) = 0;
 	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
-
 out_err:
 	ext4_std_error(inode->i_sb, err);
-out:
-	mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
 	return err;
 
 out_brelse:
@@ -2670,6 +2908,8 @@
 
 	retval = -ENOENT;
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 	if (!bh)
 		goto end_rmdir;
 
@@ -2680,7 +2920,7 @@
 		goto end_rmdir;
 
 	retval = -ENOTEMPTY;
-	if (!empty_dir(inode))
+	if (!ext4_empty_dir(inode))
 		goto end_rmdir;
 
 	handle = ext4_journal_start(dir, EXT4_HT_DIR,
@@ -2737,6 +2977,8 @@
 
 	retval = -ENOENT;
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 	if (!bh)
 		goto end_unlink;
 
@@ -2789,16 +3031,38 @@
 {
 	handle_t *handle;
 	struct inode *inode;
-	int l, err, retries = 0;
+	int err, len = strlen(symname);
 	int credits;
+	bool encryption_required;
+	struct ext4_str disk_link;
+	struct ext4_encrypted_symlink_data *sd = NULL;
 
-	l = strlen(symname)+1;
-	if (l > dir->i_sb->s_blocksize)
-		return -ENAMETOOLONG;
+	disk_link.len = len + 1;
+	disk_link.name = (char *) symname;
+
+	encryption_required = (ext4_encrypted_inode(dir) ||
+			       DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)));
+	if (encryption_required) {
+		err = ext4_get_encryption_info(dir);
+		if (err)
+			return err;
+		if (ext4_encryption_info(dir) == NULL)
+			return -EPERM;
+		disk_link.len = (ext4_fname_encrypted_size(dir, len) +
+				 sizeof(struct ext4_encrypted_symlink_data));
+		sd = kzalloc(disk_link.len, GFP_KERNEL);
+		if (!sd)
+			return -ENOMEM;
+	}
+
+	if (disk_link.len > dir->i_sb->s_blocksize) {
+		err = -ENAMETOOLONG;
+		goto err_free_sd;
+	}
 
 	dquot_initialize(dir);
 
-	if (l > EXT4_N_BLOCKS * 4) {
+	if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
 		/*
 		 * For non-fast symlinks, we just allocate inode and put it on
 		 * orphan list in the first transaction => we need bitmap,
@@ -2817,16 +3081,34 @@
 		credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 			  EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
 	}
-retry:
+
 	inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
 					    &dentry->d_name, 0, NULL,
 					    EXT4_HT_DIR, credits);
 	handle = ext4_journal_current_handle();
-	err = PTR_ERR(inode);
-	if (IS_ERR(inode))
-		goto out_stop;
+	if (IS_ERR(inode)) {
+		if (handle)
+			ext4_journal_stop(handle);
+		err = PTR_ERR(inode);
+		goto err_free_sd;
+	}
 
-	if (l > EXT4_N_BLOCKS * 4) {
+	if (encryption_required) {
+		struct qstr istr;
+		struct ext4_str ostr;
+
+		istr.name = (const unsigned char *) symname;
+		istr.len = len;
+		ostr.name = sd->encrypted_path;
+		ostr.len = disk_link.len;
+		err = ext4_fname_usr_to_disk(inode, &istr, &ostr);
+		if (err < 0)
+			goto err_drop_inode;
+		sd->len = cpu_to_le16(ostr.len);
+		disk_link.name = (char *) sd;
+	}
+
+	if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
 		inode->i_op = &ext4_symlink_inode_operations;
 		ext4_set_aops(inode);
 		/*
@@ -2842,9 +3124,10 @@
 		drop_nlink(inode);
 		err = ext4_orphan_add(handle, inode);
 		ext4_journal_stop(handle);
+		handle = NULL;
 		if (err)
 			goto err_drop_inode;
-		err = __page_symlink(inode, symname, l, 1);
+		err = __page_symlink(inode, disk_link.name, disk_link.len, 1);
 		if (err)
 			goto err_drop_inode;
 		/*
@@ -2856,36 +3139,40 @@
 				EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
 		if (IS_ERR(handle)) {
 			err = PTR_ERR(handle);
+			handle = NULL;
 			goto err_drop_inode;
 		}
 		set_nlink(inode, 1);
 		err = ext4_orphan_del(handle, inode);
-		if (err) {
-			ext4_journal_stop(handle);
-			clear_nlink(inode);
+		if (err)
 			goto err_drop_inode;
-		}
 	} else {
 		/* clear the extent format for fast symlink */
 		ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
-		inode->i_op = &ext4_fast_symlink_inode_operations;
-		memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
-		inode->i_size = l-1;
+		inode->i_op = encryption_required ?
+			&ext4_symlink_inode_operations :
+			&ext4_fast_symlink_inode_operations;
+		memcpy((char *)&EXT4_I(inode)->i_data, disk_link.name,
+		       disk_link.len);
+		inode->i_size = disk_link.len - 1;
 	}
 	EXT4_I(inode)->i_disksize = inode->i_size;
 	err = ext4_add_nondir(handle, dentry, inode);
 	if (!err && IS_DIRSYNC(dir))
 		ext4_handle_sync(handle);
 
-out_stop:
 	if (handle)
 		ext4_journal_stop(handle);
-	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
+	kfree(sd);
 	return err;
 err_drop_inode:
+	if (handle)
+		ext4_journal_stop(handle);
+	clear_nlink(inode);
 	unlock_new_inode(inode);
 	iput(inode);
+err_free_sd:
+	kfree(sd);
 	return err;
 }
 
@@ -2898,13 +3185,15 @@
 
 	if (inode->i_nlink >= EXT4_LINK_MAX)
 		return -EMLINK;
-
+	if (ext4_encrypted_inode(dir) &&
+	    !ext4_is_child_context_consistent_with_parent(dir, inode))
+		return -EPERM;
 	dquot_initialize(dir);
 
 retry:
 	handle = ext4_journal_start(dir, EXT4_HT_DIR,
 		(EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
-		 EXT4_INDEX_EXTRA_TRANS_BLOCKS));
+		 EXT4_INDEX_EXTRA_TRANS_BLOCKS) + 1);
 	if (IS_ERR(handle))
 		return PTR_ERR(handle);
 
@@ -2918,6 +3207,11 @@
 	err = ext4_add_entry(handle, dentry, inode);
 	if (!err) {
 		ext4_mark_inode_dirty(handle, inode);
+		/* this can happen only for tmpfile being
+		 * linked the first time
+		 */
+		if (inode->i_nlink == 1)
+			ext4_orphan_del(handle, inode);
 		d_instantiate(dentry, inode);
 	} else {
 		drop_nlink(inode);
@@ -2959,205 +3253,536 @@
 	return ext4_get_first_inline_block(inode, parent_de, retval);
 }
 
+struct ext4_renament {
+	struct inode *dir;
+	struct dentry *dentry;
+	struct inode *inode;
+	bool is_dir;
+	int dir_nlink_delta;
+
+	/* entry for "dentry" */
+	struct buffer_head *bh;
+	struct ext4_dir_entry_2 *de;
+	int inlined;
+
+	/* entry for ".." in inode if it's a directory */
+	struct buffer_head *dir_bh;
+	struct ext4_dir_entry_2 *parent_de;
+	int dir_inlined;
+};
+
+static int ext4_rename_dir_prepare(handle_t *handle, struct ext4_renament *ent)
+{
+	int retval;
+
+	ent->dir_bh = ext4_get_first_dir_block(handle, ent->inode,
+					      &retval, &ent->parent_de,
+					      &ent->dir_inlined);
+	if (!ent->dir_bh)
+		return retval;
+	if (le32_to_cpu(ent->parent_de->inode) != ent->dir->i_ino)
+		return -EIO;
+	BUFFER_TRACE(ent->dir_bh, "get_write_access");
+	return ext4_journal_get_write_access(handle, ent->dir_bh);
+}
+
+static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent,
+				  unsigned dir_ino)
+{
+	int retval;
+
+	ent->parent_de->inode = cpu_to_le32(dir_ino);
+	BUFFER_TRACE(ent->dir_bh, "call ext4_handle_dirty_metadata");
+	if (!ent->dir_inlined) {
+		if (is_dx(ent->inode)) {
+			retval = ext4_handle_dirty_dx_node(handle,
+							   ent->inode,
+							   ent->dir_bh);
+		} else {
+			retval = ext4_handle_dirty_dirent_node(handle,
+							       ent->inode,
+							       ent->dir_bh);
+		}
+	} else {
+		retval = ext4_mark_inode_dirty(handle, ent->inode);
+	}
+	if (retval) {
+		ext4_std_error(ent->dir->i_sb, retval);
+		return retval;
+	}
+	return 0;
+}
+
+static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
+		       unsigned ino, unsigned file_type)
+{
+	int retval;
+
+	BUFFER_TRACE(ent->bh, "get write access");
+	retval = ext4_journal_get_write_access(handle, ent->bh);
+	if (retval)
+		return retval;
+	ent->de->inode = cpu_to_le32(ino);
+	if (EXT4_HAS_INCOMPAT_FEATURE(ent->dir->i_sb,
+				      EXT4_FEATURE_INCOMPAT_FILETYPE))
+		ent->de->file_type = file_type;
+	ent->dir->i_version++;
+	ent->dir->i_ctime = ent->dir->i_mtime =
+		ext4_current_time(ent->dir);
+	ext4_mark_inode_dirty(handle, ent->dir);
+	BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
+	if (!ent->inlined) {
+		retval = ext4_handle_dirty_dirent_node(handle,
+						       ent->dir, ent->bh);
+		if (unlikely(retval)) {
+			ext4_std_error(ent->dir->i_sb, retval);
+			return retval;
+		}
+	}
+	brelse(ent->bh);
+	ent->bh = NULL;
+
+	return 0;
+}
+
+static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
+				  const struct qstr *d_name)
+{
+	int retval = -ENOENT;
+	struct buffer_head *bh;
+	struct ext4_dir_entry_2 *de;
+
+	bh = ext4_find_entry(dir, d_name, &de, NULL);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
+	if (bh) {
+		retval = ext4_delete_entry(handle, dir, de, bh);
+		brelse(bh);
+	}
+	return retval;
+}
+
+static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent,
+			       int force_reread)
+{
+	int retval;
+	/*
+	 * ent->de could have moved from under us during htree split, so make
+	 * sure that we are deleting the right entry.  We might also be pointing
+	 * to a stale entry in the unused part of ent->bh so just checking inum
+	 * and the name isn't enough.
+	 */
+	if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino ||
+	    ent->de->name_len != ent->dentry->d_name.len ||
+	    strncmp(ent->de->name, ent->dentry->d_name.name,
+		    ent->de->name_len) ||
+	    force_reread) {
+		retval = ext4_find_delete_entry(handle, ent->dir,
+						&ent->dentry->d_name);
+	} else {
+		retval = ext4_delete_entry(handle, ent->dir, ent->de, ent->bh);
+		if (retval == -ENOENT) {
+			retval = ext4_find_delete_entry(handle, ent->dir,
+							&ent->dentry->d_name);
+		}
+	}
+
+	if (retval) {
+		ext4_warning(ent->dir->i_sb,
+				"Deleting old file (%lu), %d, error=%d",
+				ent->dir->i_ino, ent->dir->i_nlink, retval);
+	}
+}
+
+#if 0
+static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent)
+{
+	if (ent->dir_nlink_delta) {
+		if (ent->dir_nlink_delta == -1)
+			ext4_dec_count(handle, ent->dir);
+		else
+			ext4_inc_count(handle, ent->dir);
+		ext4_mark_inode_dirty(handle, ent->dir);
+	}
+}
+
+static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent,
+					      int credits, handle_t **h)
+{
+	struct inode *wh;
+	handle_t *handle;
+	int retries = 0;
+
+	/*
+	 * for inode block, sb block, group summaries,
+	 * and inode bitmap
+	 */
+	credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) +
+		    EXT4_XATTR_TRANS_BLOCKS + 4);
+retry:
+	wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE,
+					 &ent->dentry->d_name, 0, NULL,
+					 EXT4_HT_DIR, credits);
+
+	handle = ext4_journal_current_handle();
+	if (IS_ERR(wh)) {
+		if (handle)
+			ext4_journal_stop(handle);
+		if (PTR_ERR(wh) == -ENOSPC &&
+		    ext4_should_retry_alloc(ent->dir->i_sb, &retries))
+			goto retry;
+	} else {
+		*h = handle;
+		init_special_inode(wh, wh->i_mode, WHITEOUT_DEV);
+		wh->i_op = &ext4_special_inode_operations;
+	}
+	return wh;
+}
+#endif
+
 /*
  * Anybody can rename anything with this: the permission checks are left to the
  * higher-level routines.
+ *
+ * n.b.  old_{dentry,inode) refers to the source dentry/inode
+ * while new_{dentry,inode) refers to the destination dentry/inode
+ * This comes from rename(const char *oldpath, const char *newpath)
  */
 static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
-		       struct inode *new_dir, struct dentry *new_dentry)
+		       struct inode *new_dir, struct dentry *new_dentry,
+		       unsigned int flags)
 {
-	handle_t *handle;
-	struct inode *old_inode, *new_inode;
-	struct buffer_head *old_bh, *new_bh, *dir_bh;
-	struct ext4_dir_entry_2 *old_de, *new_de;
-	int retval, force_da_alloc = 0;
-	int inlined = 0, new_inlined = 0;
-	struct ext4_dir_entry_2 *parent_de;
+	handle_t *handle = NULL;
+	struct ext4_renament old = {
+		.dir = old_dir,
+		.dentry = old_dentry,
+		.inode = old_dentry->d_inode,
+	};
+	struct ext4_renament new = {
+		.dir = new_dir,
+		.dentry = new_dentry,
+		.inode = new_dentry->d_inode,
+	};
+	int force_reread;
+	int retval;
+	struct inode *whiteout = NULL;
+	int credits;
+	u8 old_file_type;
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
-
-	old_bh = new_bh = dir_bh = NULL;
+	dquot_initialize(old.dir);
+	dquot_initialize(new.dir);
 
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
-	if (new_dentry->d_inode)
-		dquot_initialize(new_dentry->d_inode);
-	handle = ext4_journal_start(old_dir, EXT4_HT_DIR,
-		(2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
-		 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
+	if (new.inode)
+		dquot_initialize(new.inode);
 
-	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
-		ext4_handle_sync(handle);
-
-	old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
+	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+	if (IS_ERR(old.bh))
+		return PTR_ERR(old.bh);
 	/*
 	 *  Check for inode number is _not_ due to possible IO errors.
 	 *  We might rmdir the source, keep it as pwd of some process
 	 *  and merrily kill the link to whatever was created under the
 	 *  same name. Goodbye sticky bit ;-<
 	 */
-	old_inode = old_dentry->d_inode;
 	retval = -ENOENT;
-	if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
+	if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
 		goto end_rename;
 
-	new_inode = new_dentry->d_inode;
-	new_bh = ext4_find_entry(new_dir, &new_dentry->d_name,
-				 &new_de, &new_inlined);
-	if (new_bh) {
-		if (!new_inode) {
-			brelse(new_bh);
-			new_bh = NULL;
+	if ((old.dir != new.dir) &&
+	    ext4_encrypted_inode(new.dir) &&
+	    !ext4_is_child_context_consistent_with_parent(new.dir,
+							  old.inode)) {
+		retval = -EPERM;
+		goto end_rename;
+	}
+
+	new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
+				 &new.de, &new.inlined);
+	if (IS_ERR(new.bh)) {
+		retval = PTR_ERR(new.bh);
+		new.bh = NULL;
+		goto end_rename;
+	}
+	if (new.bh) {
+		if (!new.inode) {
+			brelse(new.bh);
+			new.bh = NULL;
 		}
 	}
-	if (S_ISDIR(old_inode->i_mode)) {
-		if (new_inode) {
+	if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC))
+		ext4_alloc_da_blocks(old.inode);
+
+	credits = (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
+		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
+#if 0
+	if (!(flags & RENAME_WHITEOUT)) {
+#endif
+		handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+#if 0
+	} else {
+		whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
+		if (IS_ERR(whiteout))
+			return PTR_ERR(whiteout);
+	}
+#endif
+
+	if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
+		ext4_handle_sync(handle);
+
+	if (S_ISDIR(old.inode->i_mode)) {
+		if (new.inode) {
 			retval = -ENOTEMPTY;
-			if (!empty_dir(new_inode))
+			if (!ext4_empty_dir(new.inode))
+				goto end_rename;
+		} else {
+			retval = -EMLINK;
+			if (new.dir != old.dir && EXT4_DIR_LINK_MAX(new.dir))
 				goto end_rename;
 		}
-		retval = -EIO;
-		dir_bh = ext4_get_first_dir_block(handle, old_inode,
-						  &retval, &parent_de,
-						  &inlined);
-		if (!dir_bh)
-			goto end_rename;
-		if (le32_to_cpu(parent_de->inode) != old_dir->i_ino)
-			goto end_rename;
-		retval = -EMLINK;
-		if (!new_inode && new_dir != old_dir &&
-		    EXT4_DIR_LINK_MAX(new_dir))
-			goto end_rename;
-		BUFFER_TRACE(dir_bh, "get_write_access");
-		retval = ext4_journal_get_write_access(handle, dir_bh);
+		retval = ext4_rename_dir_prepare(handle, &old);
 		if (retval)
 			goto end_rename;
 	}
-	if (!new_bh) {
-		retval = ext4_add_entry(handle, new_dentry, old_inode);
+	/*
+	 * If we're renaming a file within an inline_data dir and adding or
+	 * setting the new dirent causes a conversion from inline_data to
+	 * extents/blockmap, we need to force the dirent delete code to
+	 * re-read the directory, or else we end up trying to delete a dirent
+	 * from what is now the extent tree root (or a block map).
+	 */
+	force_reread = (new.dir->i_ino == old.dir->i_ino &&
+			ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
+
+	old_file_type = old.de->file_type;
+	if (whiteout) {
+		/*
+		 * Do this before adding a new entry, so the old entry is sure
+		 * to be still pointing to the valid old entry.
+		 */
+		retval = ext4_setent(handle, &old, whiteout->i_ino,
+				     EXT4_FT_CHRDEV);
+		if (retval)
+			goto end_rename;
+		ext4_mark_inode_dirty(handle, whiteout);
+	}
+	if (!new.bh) {
+		retval = ext4_add_entry(handle, new.dentry, old.inode);
 		if (retval)
 			goto end_rename;
 	} else {
-		BUFFER_TRACE(new_bh, "get write access");
-		retval = ext4_journal_get_write_access(handle, new_bh);
+		retval = ext4_setent(handle, &new,
+				     old.inode->i_ino, old_file_type);
 		if (retval)
 			goto end_rename;
-		new_de->inode = cpu_to_le32(old_inode->i_ino);
-		if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
-					      EXT4_FEATURE_INCOMPAT_FILETYPE))
-			new_de->file_type = old_de->file_type;
-		new_dir->i_version++;
-		new_dir->i_ctime = new_dir->i_mtime =
-					ext4_current_time(new_dir);
-		ext4_mark_inode_dirty(handle, new_dir);
-		BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
-		if (!new_inlined) {
-			retval = ext4_handle_dirty_dirent_node(handle,
-							       new_dir, new_bh);
-			if (unlikely(retval)) {
-				ext4_std_error(new_dir->i_sb, retval);
-				goto end_rename;
-			}
-		}
-		brelse(new_bh);
-		new_bh = NULL;
 	}
+	if (force_reread)
+		force_reread = !ext4_test_inode_flag(new.dir,
+						     EXT4_INODE_INLINE_DATA);
 
 	/*
 	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
 	 */
-	old_inode->i_ctime = ext4_current_time(old_inode);
-	ext4_mark_inode_dirty(handle, old_inode);
+	old.inode->i_ctime = ext4_current_time(old.inode);
+	ext4_mark_inode_dirty(handle, old.inode);
 
-	/*
-	 * ok, that's it
-	 */
-	if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
-	    old_de->name_len != old_dentry->d_name.len ||
-	    strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
-	    (retval = ext4_delete_entry(handle, old_dir,
-					old_de, old_bh)) == -ENOENT) {
-		/* old_de could have moved from under us during htree split, so
-		 * make sure that we are deleting the right entry.  We might
-		 * also be pointing to a stale entry in the unused part of
-		 * old_bh so just checking inum and the name isn't enough. */
-		struct buffer_head *old_bh2;
-		struct ext4_dir_entry_2 *old_de2;
-
-		old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name,
-					  &old_de2, NULL);
-		if (old_bh2) {
-			retval = ext4_delete_entry(handle, old_dir,
-						   old_de2, old_bh2);
-			brelse(old_bh2);
-		}
-	}
-	if (retval) {
-		ext4_warning(old_dir->i_sb,
-				"Deleting old file (%lu), %d, error=%d",
-				old_dir->i_ino, old_dir->i_nlink, retval);
+	if (!whiteout) {
+		/*
+		 * ok, that's it
+		 */
+		ext4_rename_delete(handle, &old, force_reread);
 	}
 
-	if (new_inode) {
-		ext4_dec_count(handle, new_inode);
-		new_inode->i_ctime = ext4_current_time(new_inode);
+	if (new.inode) {
+		ext4_dec_count(handle, new.inode);
+		new.inode->i_ctime = ext4_current_time(new.inode);
 	}
-	old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
-	ext4_update_dx_flag(old_dir);
-	if (dir_bh) {
-		parent_de->inode = cpu_to_le32(new_dir->i_ino);
-		BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
-		if (!inlined) {
-			if (is_dx(old_inode)) {
-				retval = ext4_handle_dirty_dx_node(handle,
-								   old_inode,
-								   dir_bh);
-			} else {
-				retval = ext4_handle_dirty_dirent_node(handle,
-							old_inode, dir_bh);
-			}
-		} else {
-			retval = ext4_mark_inode_dirty(handle, old_inode);
-		}
-		if (retval) {
-			ext4_std_error(old_dir->i_sb, retval);
+	old.dir->i_ctime = old.dir->i_mtime = ext4_current_time(old.dir);
+	ext4_update_dx_flag(old.dir);
+	if (old.dir_bh) {
+		retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
+		if (retval)
 			goto end_rename;
-		}
-		ext4_dec_count(handle, old_dir);
-		if (new_inode) {
-			/* checked empty_dir above, can't have another parent,
-			 * ext4_dec_count() won't work for many-linked dirs */
-			clear_nlink(new_inode);
+
+		ext4_dec_count(handle, old.dir);
+		if (new.inode) {
+			/* checked ext4_empty_dir above, can't have another
+			 * parent, ext4_dec_count() won't work for many-linked
+			 * dirs */
+			clear_nlink(new.inode);
 		} else {
-			ext4_inc_count(handle, new_dir);
-			ext4_update_dx_flag(new_dir);
-			ext4_mark_inode_dirty(handle, new_dir);
+			ext4_inc_count(handle, new.dir);
+			ext4_update_dx_flag(new.dir);
+			ext4_mark_inode_dirty(handle, new.dir);
 		}
 	}
-	ext4_mark_inode_dirty(handle, old_dir);
-	if (new_inode) {
-		ext4_mark_inode_dirty(handle, new_inode);
-		if (!new_inode->i_nlink)
-			ext4_orphan_add(handle, new_inode);
-		if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
-			force_da_alloc = 1;
+	ext4_mark_inode_dirty(handle, old.dir);
+	if (new.inode) {
+		ext4_mark_inode_dirty(handle, new.inode);
+		if (!new.inode->i_nlink)
+			ext4_orphan_add(handle, new.inode);
 	}
 	retval = 0;
 
 end_rename:
-	brelse(dir_bh);
-	brelse(old_bh);
-	brelse(new_bh);
-	ext4_journal_stop(handle);
-	if (retval == 0 && force_da_alloc)
-		ext4_alloc_da_blocks(old_inode);
+	brelse(old.dir_bh);
+	brelse(old.bh);
+	brelse(new.bh);
+	if (whiteout) {
+		if (retval)
+			drop_nlink(whiteout);
+		unlock_new_inode(whiteout);
+		iput(whiteout);
+	}
+	if (handle)
+		ext4_journal_stop(handle);
 	return retval;
 }
 
+#if 0
+static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
+			     struct inode *new_dir, struct dentry *new_dentry)
+{
+	handle_t *handle = NULL;
+	struct ext4_renament old = {
+		.dir = old_dir,
+		.dentry = old_dentry,
+		.inode = old_dentry->d_inode,
+	};
+	struct ext4_renament new = {
+		.dir = new_dir,
+		.dentry = new_dentry,
+		.inode = new_dentry->d_inode,
+	};
+	u8 new_file_type;
+	int retval;
+
+	if ((ext4_encrypted_inode(old_dir) ||
+	     ext4_encrypted_inode(new_dir)) &&
+	    (old_dir != new_dir) &&
+	    (!ext4_is_child_context_consistent_with_parent(new_dir,
+							   old.inode) ||
+	     !ext4_is_child_context_consistent_with_parent(old_dir,
+							   new.inode)))
+		return -EPERM;
+
+	dquot_initialize(old.dir);
+	dquot_initialize(new.dir);
+
+	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
+				 &old.de, &old.inlined);
+	if (IS_ERR(old.bh))
+		return PTR_ERR(old.bh);
+	/*
+	 *  Check for inode number is _not_ due to possible IO errors.
+	 *  We might rmdir the source, keep it as pwd of some process
+	 *  and merrily kill the link to whatever was created under the
+	 *  same name. Goodbye sticky bit ;-<
+	 */
+	retval = -ENOENT;
+	if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
+		goto end_rename;
+
+	new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
+				 &new.de, &new.inlined);
+	if (IS_ERR(new.bh)) {
+		retval = PTR_ERR(new.bh);
+		new.bh = NULL;
+		goto end_rename;
+	}
+
+	/* RENAME_EXCHANGE case: old *and* new must both exist */
+	if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
+		goto end_rename;
+
+	handle = ext4_journal_start(old.dir, EXT4_HT_DIR,
+		(2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
+		 2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
+		ext4_handle_sync(handle);
+
+	if (S_ISDIR(old.inode->i_mode)) {
+		old.is_dir = true;
+		retval = ext4_rename_dir_prepare(handle, &old);
+		if (retval)
+			goto end_rename;
+	}
+	if (S_ISDIR(new.inode->i_mode)) {
+		new.is_dir = true;
+		retval = ext4_rename_dir_prepare(handle, &new);
+		if (retval)
+			goto end_rename;
+	}
+
+	/*
+	 * Other than the special case of overwriting a directory, parents'
+	 * nlink only needs to be modified if this is a cross directory rename.
+	 */
+	if (old.dir != new.dir && old.is_dir != new.is_dir) {
+		old.dir_nlink_delta = old.is_dir ? -1 : 1;
+		new.dir_nlink_delta = -old.dir_nlink_delta;
+		retval = -EMLINK;
+		if ((old.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(old.dir)) ||
+		    (new.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(new.dir)))
+			goto end_rename;
+	}
+
+	new_file_type = new.de->file_type;
+	retval = ext4_setent(handle, &new, old.inode->i_ino, old.de->file_type);
+	if (retval)
+		goto end_rename;
+
+	retval = ext4_setent(handle, &old, new.inode->i_ino, new_file_type);
+	if (retval)
+		goto end_rename;
+
+	/*
+	 * Like most other Unix systems, set the ctime for inodes on a
+	 * rename.
+	 */
+	old.inode->i_ctime = ext4_current_time(old.inode);
+	new.inode->i_ctime = ext4_current_time(new.inode);
+	ext4_mark_inode_dirty(handle, old.inode);
+	ext4_mark_inode_dirty(handle, new.inode);
+
+	if (old.dir_bh) {
+		retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
+		if (retval)
+			goto end_rename;
+	}
+	if (new.dir_bh) {
+		retval = ext4_rename_dir_finish(handle, &new, old.dir->i_ino);
+		if (retval)
+			goto end_rename;
+	}
+	ext4_update_dir_count(handle, &old);
+	ext4_update_dir_count(handle, &new);
+	retval = 0;
+
+end_rename:
+	brelse(old.dir_bh);
+	brelse(new.dir_bh);
+	brelse(old.bh);
+	brelse(new.bh);
+	if (handle)
+		ext4_journal_stop(handle);
+	return retval;
+}
+#endif
+
+static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry,
+			struct inode *new_dir, struct dentry *new_dentry)
+{
+	return ext4_rename(old_dir, old_dentry, new_dir, new_dentry, 0);
+}
+
 /*
  * directories can handle most operations...
  */
@@ -3170,13 +3795,19 @@
 	.mkdir		= ext4_mkdir,
 	.rmdir		= ext4_rmdir,
 	.mknod		= ext4_mknod,
-	.rename		= ext4_rename,
+#if 0
+	.tmpfile	= ext4_tmpfile,
+#endif
+	.rename		= ext4_rename2,
 	.setattr	= ext4_setattr,
 	.setxattr	= generic_setxattr,
 	.getxattr	= generic_getxattr,
 	.listxattr	= ext4_listxattr,
 	.removexattr	= generic_removexattr,
 	.get_acl	= ext4_get_acl,
+#if 0
+	.set_acl	= ext4_set_acl,
+#endif
 	.fiemap         = ext4_fiemap,
 };
 
@@ -3187,4 +3818,7 @@
 	.listxattr	= ext4_listxattr,
 	.removexattr	= generic_removexattr,
 	.get_acl	= ext4_get_acl,
+#if 0
+	.set_acl	= ext4_set_acl,
+#endif
 };
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index b12a442..f55384c 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -25,6 +25,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/ratelimit.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -46,165 +47,6 @@
 }
 
 /*
- * This function is called by ext4_evict_inode() to make sure there is
- * no more pending I/O completion work left to do.
- */
-void ext4_ioend_shutdown(struct inode *inode)
-{
-	wait_queue_head_t *wq = ext4_ioend_wq(inode);
-
-	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
-	/*
-	 * We need to make sure the work structure is finished being
-	 * used before we let the inode get destroyed.
-	 */
-	if (work_pending(&EXT4_I(inode)->i_unwritten_work))
-		cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
-}
-
-void ext4_free_io_end(ext4_io_end_t *io)
-{
-	BUG_ON(!io);
-	BUG_ON(!list_empty(&io->list));
-	BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
-
-	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
-		wake_up_all(ext4_ioend_wq(io->inode));
-	kmem_cache_free(io_end_cachep, io);
-}
-
-/* check a range of space and convert unwritten extents to written. */
-static int ext4_end_io(ext4_io_end_t *io)
-{
-	struct inode *inode = io->inode;
-	loff_t offset = io->offset;
-	ssize_t size = io->size;
-	int ret = 0;
-
-	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
-		   "list->prev 0x%p\n",
-		   io, inode->i_ino, io->list.next, io->list.prev);
-
-	ret = ext4_convert_unwritten_extents(inode, offset, size);
-	if (ret < 0) {
-		ext4_msg(inode->i_sb, KERN_EMERG,
-			 "failed to convert unwritten extents to written "
-			 "extents -- potential data loss!  "
-			 "(inode %lu, offset %llu, size %zd, error %d)",
-			 inode->i_ino, offset, size, ret);
-	}
-	/* Wake up anyone waiting on unwritten extent conversion */
-	if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
-		wake_up_all(ext4_ioend_wq(inode));
-	if (io->flag & EXT4_IO_END_DIRECT)
-		inode_dio_done(inode);
-	if (io->iocb)
-		aio_complete(io->iocb, io->result, 0);
-	return ret;
-}
-
-static void dump_completed_IO(struct inode *inode)
-{
-#ifdef	EXT4FS_DEBUG
-	struct list_head *cur, *before, *after;
-	ext4_io_end_t *io, *io0, *io1;
-
-	if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
-		ext4_debug("inode %lu completed_io list is empty\n",
-			   inode->i_ino);
-		return;
-	}
-
-	ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino);
-	list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) {
-		cur = &io->list;
-		before = cur->prev;
-		io0 = container_of(before, ext4_io_end_t, list);
-		after = cur->next;
-		io1 = container_of(after, ext4_io_end_t, list);
-
-		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
-			    io, inode->i_ino, io0, io1);
-	}
-#endif
-}
-
-/* Add the io_end to per-inode completed end_io list. */
-void ext4_add_complete_io(ext4_io_end_t *io_end)
-{
-	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
-	struct workqueue_struct *wq;
-	unsigned long flags;
-
-	BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
-	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
-
-	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-	if (list_empty(&ei->i_completed_io_list))
-		queue_work(wq, &ei->i_unwritten_work);
-	list_add_tail(&io_end->list, &ei->i_completed_io_list);
-	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
-}
-
-static int ext4_do_flush_completed_IO(struct inode *inode)
-{
-	ext4_io_end_t *io;
-	struct list_head unwritten;
-	unsigned long flags;
-	struct ext4_inode_info *ei = EXT4_I(inode);
-	int err, ret = 0;
-
-	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-	dump_completed_IO(inode);
-	list_replace_init(&ei->i_completed_io_list, &unwritten);
-	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
-
-	while (!list_empty(&unwritten)) {
-		io = list_entry(unwritten.next, ext4_io_end_t, list);
-		BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
-		list_del_init(&io->list);
-
-		err = ext4_end_io(io);
-		if (unlikely(!ret && err))
-			ret = err;
-		io->flag &= ~EXT4_IO_END_UNWRITTEN;
-		ext4_free_io_end(io);
-	}
-	return ret;
-}
-
-/*
- * work on completed aio dio IO, to convert unwritten extents to extents
- */
-void ext4_end_io_work(struct work_struct *work)
-{
-	struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
-						  i_unwritten_work);
-	ext4_do_flush_completed_IO(&ei->vfs_inode);
-}
-
-int ext4_flush_unwritten_io(struct inode *inode)
-{
-	int ret;
-	WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
-		     !(inode->i_state & I_FREEING));
-	ret = ext4_do_flush_completed_IO(inode);
-	ext4_unwritten_wait(inode);
-	return ret;
-}
-
-ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
-{
-	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
-	if (io) {
-		atomic_inc(&EXT4_I(inode)->i_ioend_count);
-		io->inode = inode;
-		INIT_LIST_HEAD(&io->list);
-	}
-	return io;
-}
-
-/*
  * Print an buffer I/O error compatible with the fs/buffer.c.  This
  * provides compatibility with dmesg scrapers that look for a specific
  * buffer I/O error message.  We really need a unified error reporting
@@ -214,29 +56,23 @@
 static void buffer_io_error(struct buffer_head *bh)
 {
 	char b[BDEVNAME_SIZE];
-	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
+	printk_ratelimited(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
 			bdevname(bh->b_bdev, b),
 			(unsigned long long)bh->b_blocknr);
 }
 
-static void ext4_end_bio(struct bio *bio, int error)
+static void ext4_finish_bio(struct bio *bio)
 {
-	ext4_io_end_t *io_end = bio->bi_private;
-	struct inode *inode;
 	int i;
-	int blocksize;
-	sector_t bi_sector = bio->bi_sector;
+	int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+	struct bio_vec *bvec;
 
-	BUG_ON(!io_end);
-	inode = io_end->inode;
-	blocksize = 1 << inode->i_blkbits;
-	bio->bi_private = NULL;
-	bio->bi_end_io = NULL;
-	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
-		error = 0;
-	for (i = 0; i < bio->bi_vcnt; i++) {
-		struct bio_vec *bvec = &bio->bi_io_vec[i];
+	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+		struct page *data_page = NULL;
+		struct ext4_crypto_ctx *ctx = NULL;
+#endif
 		struct buffer_head *bh, *head;
 		unsigned bio_start = bvec->bv_offset;
 		unsigned bio_end = bio_start + bvec->bv_len;
@@ -246,6 +82,15 @@
 		if (!page)
 			continue;
 
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+		if (!page->mapping) {
+			/* The bounce data pages are unmapped. */
+			data_page = page;
+			ctx = (struct ext4_crypto_ctx *)page_private(data_page);
+			page = ctx->w.control_page;
+		}
+#endif
+
 		if (error) {
 			SetPageError(page);
 			set_bit(AS_EIO, &page->mapping->flags);
@@ -259,7 +104,7 @@
 		bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
 		do {
 			if (bh_offset(bh) < bio_start ||
-			    bh_offset(bh) + blocksize > bio_end) {
+			    bh_offset(bh) + bh->b_size > bio_end) {
 				if (buffer_async_write(bh))
 					under_io++;
 				continue;
@@ -270,28 +115,244 @@
 		} while ((bh = bh->b_this_page) != head);
 		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
 		local_irq_restore(flags);
-		if (!under_io)
+		if (!under_io) {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+			if (ctx)
+				ext4_restore_control_page(data_page);
+#endif
 			end_page_writeback(page);
+		}
 	}
-	bio_put(bio);
+}
+
+static void ext4_release_io_end(ext4_io_end_t *io_end)
+{
+	struct bio *bio, *next_bio;
+
+	BUG_ON(!list_empty(&io_end->list));
+	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
+	WARN_ON(io_end->handle);
+
+	if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
+		wake_up_all(ext4_ioend_wq(io_end->inode));
+
+	for (bio = io_end->bio; bio; bio = next_bio) {
+		next_bio = bio->bi_private;
+		ext4_finish_bio(bio);
+		bio_put(bio);
+	}
+	kmem_cache_free(io_end_cachep, io_end);
+}
+
+static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
+{
+	struct inode *inode = io_end->inode;
+
+	io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
+	/* Wake up anyone waiting on unwritten extent conversion */
+	if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
+		wake_up_all(ext4_ioend_wq(inode));
+}
+
+/*
+ * Check a range of space and convert unwritten extents to written. Note that
+ * we are protected from truncate touching same part of extent tree by the
+ * fact that truncate code waits for all DIO to finish (thus exclusion from
+ * direct IO is achieved) and also waits for PageWriteback bits. Thus we
+ * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
+ * completed (happens from ext4_free_ioend()).
+ */
+static int ext4_end_io(ext4_io_end_t *io)
+{
+	struct inode *inode = io->inode;
+	loff_t offset = io->offset;
+	ssize_t size = io->size;
+	handle_t *handle = io->handle;
+	int ret = 0;
+
+	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
+		   "list->prev 0x%p\n",
+		   io, inode->i_ino, io->list.next, io->list.prev);
+
+	io->handle = NULL;	/* Following call will use up the handle */
+	ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
+	if (ret < 0) {
+		ext4_msg(inode->i_sb, KERN_EMERG,
+			 "failed to convert unwritten extents to written "
+			 "extents -- potential data loss!  "
+			 "(inode %lu, offset %llu, size %zd, error %d)",
+			 inode->i_ino, offset, size, ret);
+	}
+	ext4_clear_io_unwritten_flag(io);
+	ext4_release_io_end(io);
+	return ret;
+}
+
+static void dump_completed_IO(struct inode *inode, struct list_head *head)
+{
+#ifdef	EXT4FS_DEBUG
+	struct list_head *cur, *before, *after;
+	ext4_io_end_t *io, *io0, *io1;
+
+	if (list_empty(head))
+		return;
+
+	ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
+	list_for_each_entry(io, head, list) {
+		cur = &io->list;
+		before = cur->prev;
+		io0 = container_of(before, ext4_io_end_t, list);
+		after = cur->next;
+		io1 = container_of(after, ext4_io_end_t, list);
+
+		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
+			    io, inode->i_ino, io0, io1);
+	}
+#endif
+}
+
+/* Add the io_end to per-inode completed end_io list. */
+static void ext4_add_complete_io(ext4_io_end_t *io_end)
+{
+	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
+	struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
+	struct workqueue_struct *wq;
+	unsigned long flags;
+
+	/* Only reserved conversions from writeback should enter here */
+	WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+	WARN_ON(!io_end->handle && sbi->s_journal);
+	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+	wq = sbi->rsv_conversion_wq;
+	if (list_empty(&ei->i_rsv_conversion_list))
+		queue_work(wq, &ei->i_rsv_conversion_work);
+	list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
+	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+}
+
+static int ext4_do_flush_completed_IO(struct inode *inode,
+				      struct list_head *head)
+{
+	ext4_io_end_t *io;
+	struct list_head unwritten;
+	unsigned long flags;
+	struct ext4_inode_info *ei = EXT4_I(inode);
+	int err, ret = 0;
+
+	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+	dump_completed_IO(inode, head);
+	list_replace_init(head, &unwritten);
+	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+
+	while (!list_empty(&unwritten)) {
+		io = list_entry(unwritten.next, ext4_io_end_t, list);
+		BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
+		list_del_init(&io->list);
+
+		err = ext4_end_io(io);
+		if (unlikely(!ret && err))
+			ret = err;
+	}
+	return ret;
+}
+
+/*
+ * work on completed IO, to convert unwritten extents to extents
+ */
+void ext4_end_io_rsv_work(struct work_struct *work)
+{
+	struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
+						  i_rsv_conversion_work);
+	ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
+}
+
+ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
+{
+	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
+	if (io) {
+		atomic_inc(&EXT4_I(inode)->i_ioend_count);
+		io->inode = inode;
+		INIT_LIST_HEAD(&io->list);
+		atomic_set(&io->count, 1);
+	}
+	return io;
+}
+
+void ext4_put_io_end_defer(ext4_io_end_t *io_end)
+{
+	if (atomic_dec_and_test(&io_end->count)) {
+		if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
+			ext4_release_io_end(io_end);
+			return;
+		}
+		ext4_add_complete_io(io_end);
+	}
+}
+
+int ext4_put_io_end(ext4_io_end_t *io_end)
+{
+	int err = 0;
+
+	if (atomic_dec_and_test(&io_end->count)) {
+		if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+			err = ext4_convert_unwritten_extents(io_end->handle,
+						io_end->inode, io_end->offset,
+						io_end->size);
+			io_end->handle = NULL;
+			ext4_clear_io_unwritten_flag(io_end);
+		}
+		ext4_release_io_end(io_end);
+	}
+	return err;
+}
+
+ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
+{
+	atomic_inc(&io_end->count);
+	return io_end;
+}
+
+/* BIO completion function for page writeback */
+static void ext4_end_bio(struct bio *bio, int error)
+{
+	ext4_io_end_t *io_end = bio->bi_private;
+	sector_t bi_sector = bio->bi_sector;
+
+	BUG_ON(!io_end);
+	bio->bi_end_io = NULL;
+	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+		error = 0;
 
 	if (error) {
-		io_end->flag |= EXT4_IO_END_ERROR;
-		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+		struct inode *inode = io_end->inode;
+
+		ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
 			     "(offset %llu size %ld starting block %llu)",
-			     inode->i_ino,
+			     error, inode->i_ino,
 			     (unsigned long long) io_end->offset,
 			     (long) io_end->size,
 			     (unsigned long long)
 			     bi_sector >> (inode->i_blkbits - 9));
+		mapping_set_error(inode->i_mapping, error);
 	}
 
-	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
-		ext4_free_io_end(io_end);
-		return;
+	if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+		/*
+		 * Link bio into list hanging from io_end. We have to do it
+		 * atomically as bio completions can be racing against each
+		 * other.
+		 */
+		bio->bi_private = xchg(&io_end->bio, bio);
+		ext4_put_io_end_defer(io_end);
+	} else {
+		/*
+		 * Drop io_end reference early. Inode can get freed once
+		 * we finish the bio.
+		 */
+		ext4_put_io_end_defer(io_end);
+		ext4_finish_bio(bio);
+		bio_put(bio);
 	}
-
-	ext4_add_complete_io(io_end);
 }
 
 void ext4_io_submit(struct ext4_io_submit *io)
@@ -305,43 +366,39 @@
 		bio_put(io->io_bio);
 	}
 	io->io_bio = NULL;
-	io->io_op = 0;
+}
+
+void ext4_io_submit_init(struct ext4_io_submit *io,
+			 struct writeback_control *wbc)
+{
+	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
+	io->io_bio = NULL;
 	io->io_end = NULL;
 }
 
-static int io_submit_init(struct ext4_io_submit *io,
-			  struct inode *inode,
-			  struct writeback_control *wbc,
-			  struct buffer_head *bh)
+static int io_submit_init_bio(struct ext4_io_submit *io,
+			      struct buffer_head *bh)
 {
-	ext4_io_end_t *io_end;
-	struct page *page = bh->b_page;
 	int nvecs = bio_get_nr_vecs(bh->b_bdev);
 	struct bio *bio;
 
-	io_end = ext4_init_io_end(inode, GFP_NOFS);
-	if (!io_end)
-		return -ENOMEM;
 	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
+	if (!bio)
+		return -ENOMEM;
 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
-	bio->bi_private = io->io_end = io_end;
 	bio->bi_end_io = ext4_end_bio;
-
-	io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
-
+	bio->bi_private = ext4_get_io_end(io->io_end);
 	io->io_bio = bio;
-	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
 	io->io_next_block = bh->b_blocknr;
 	return 0;
 }
 
 static int io_submit_add_bh(struct ext4_io_submit *io,
 			    struct inode *inode,
-			    struct writeback_control *wbc,
+			    struct page *page,
 			    struct buffer_head *bh)
 {
-	ext4_io_end_t *io_end;
 	int ret;
 
 	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
@@ -349,26 +406,24 @@
 		ext4_io_submit(io);
 	}
 	if (io->io_bio == NULL) {
-		ret = io_submit_init(io, inode, wbc, bh);
+		ret = io_submit_init_bio(io, bh);
 		if (ret)
 			return ret;
 	}
-	io_end = io->io_end;
-	if (test_clear_buffer_uninit(bh))
-		ext4_set_io_unwritten_flag(inode, io_end);
-	io->io_end->size += bh->b_size;
-	io->io_next_block++;
-	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
+	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
 	if (ret != bh->b_size)
 		goto submit_and_retry;
+	io->io_next_block++;
 	return 0;
 }
 
 int ext4_bio_write_page(struct ext4_io_submit *io,
 			struct page *page,
 			int len,
-			struct writeback_control *wbc)
+			struct writeback_control *wbc,
+			bool keep_towrite)
 {
+	struct page *data_page = NULL;
 	struct inode *inode = page->mapping->host;
 	unsigned block_start, blocksize;
 	struct buffer_head *bh, *head;
@@ -380,11 +435,14 @@
 	BUG_ON(!PageLocked(page));
 	BUG_ON(PageWriteback(page));
 
-	set_page_writeback(page);
+	if (keep_towrite)
+		set_page_writeback_keepwrite(page);
+	else
+		set_page_writeback(page);
 	ClearPageError(page);
 
 	/*
-	 * Comments copied from block_write_full_page_endio:
+	 * Comments copied from block_write_full_page:
 	 *
 	 * The page straddles i_size.  It must be zeroed out on each and every
 	 * writepage invocation because it may be mmapped.  "A file is mapped
@@ -425,19 +483,29 @@
 		set_buffer_async_write(bh);
 	} while ((bh = bh->b_this_page) != head);
 
-	/* Now submit buffers to write */
 	bh = head = page_buffers(page);
+
+	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+		data_page = ext4_encrypt(inode, page);
+		if (IS_ERR(data_page)) {
+			ret = PTR_ERR(data_page);
+			data_page = NULL;
+			goto out;
+		}
+	}
+
+	/* Now submit buffers to write */
 	do {
 		if (!buffer_async_write(bh))
 			continue;
-		ret = io_submit_add_bh(io, inode, wbc, bh);
+		ret = io_submit_add_bh(io, inode,
+				       data_page ? data_page : page, bh);
 		if (ret) {
 			/*
 			 * We only get here on ENOMEM.  Not much else
 			 * we can do but mark the page as dirty, and
 			 * better luck next time.
 			 */
-			redirty_page_for_writepage(wbc, page);
 			break;
 		}
 		nr_submitted++;
@@ -446,6 +514,11 @@
 
 	/* Error stopped previous loop? Clean up buffers... */
 	if (ret) {
+	out:
+		if (data_page)
+			ext4_restore_control_page(data_page);
+		printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
+		redirty_page_for_writepage(wbc, page);
 		do {
 			clear_buffer_async_write(bh);
 			bh = bh->b_this_page;
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
new file mode 100644
index 0000000..4cd252b
--- /dev/null
+++ b/fs/ext4/readpage.c
@@ -0,0 +1,328 @@
+/*
+ * linux/fs/ext4/readpage.c
+ *
+ * Copyright (C) 2002, Linus Torvalds.
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This was originally taken from fs/mpage.c
+ *
+ * The intent is the ext4_mpage_readpages() function here is intended
+ * to replace mpage_readpages() in the general case, not just for
+ * encrypted files.  It has some limitations (see below), where it
+ * will fall back to read_block_full_page(), but these limitations
+ * should only be hit when page_size != block_size.
+ *
+ * This will allow us to attach a callback function to support ext4
+ * encryption.
+ *
+ * If anything unusual happens, such as:
+ *
+ * - encountering a page which has buffers
+ * - encountering a page which has a non-hole after a hole
+ * - encountering a page with non-contiguous blocks
+ *
+ * then this code just gives up and calls the buffer_head-based read function.
+ * It does handle a page which has holes at the end - that is a common case:
+ * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/kdev_t.h>
+#include <linux/gfp.h>
+#include <linux/bio.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/highmem.h>
+#include <linux/prefetch.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/pagevec.h>
+#include <linux/cleancache.h>
+
+#include "ext4.h"
+
+/*
+ * Call ext4_decrypt on every single page, reusing the encryption
+ * context.
+ */
+static void completion_pages(struct work_struct *work)
+{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	struct ext4_crypto_ctx *ctx =
+		container_of(work, struct ext4_crypto_ctx, r.work);
+	struct bio	*bio	= ctx->r.bio;
+	struct bio_vec	*bv;
+	int		i;
+
+	bio_for_each_segment_all(bv, bio, i) {
+		struct page *page = bv->bv_page;
+
+		int ret = ext4_decrypt(ctx, page);
+		if (ret) {
+			WARN_ON_ONCE(1);
+			SetPageError(page);
+		} else
+			SetPageUptodate(page);
+		unlock_page(page);
+	}
+	ext4_release_crypto_ctx(ctx);
+	bio_put(bio);
+#else
+	BUG();
+#endif
+}
+
+static inline bool ext4_bio_encrypted(struct bio *bio)
+{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	return unlikely(bio->bi_private != NULL);
+#else
+	return false;
+#endif
+}
+
+/*
+ * I/O completion handler for multipage BIOs.
+ *
+ * The mpage code never puts partial pages into a BIO (except for end-of-file).
+ * If a page does not map to a contiguous run of blocks then it simply falls
+ * back to block_read_full_page().
+ *
+ * Why is this?  If a page's completion depends on a number of different BIOs
+ * which can complete in any order (or at the same time) then determining the
+ * status of that page is hard.  See end_buffer_async_read() for the details.
+ * There is no point in duplicating all that complexity.
+ */
+static void mpage_end_io(struct bio *bio, int err)
+{
+	struct bio_vec *bv;
+	int i;
+
+	if (ext4_bio_encrypted(bio)) {
+		struct ext4_crypto_ctx *ctx = bio->bi_private;
+
+		if (err) {
+			ext4_release_crypto_ctx(ctx);
+		} else {
+			INIT_WORK(&ctx->r.work, completion_pages);
+			ctx->r.bio = bio;
+			queue_work(ext4_read_workqueue, &ctx->r.work);
+			return;
+		}
+	}
+	bio_for_each_segment_all(bv, bio, i) {
+		struct page *page = bv->bv_page;
+
+		if (!err) {
+			SetPageUptodate(page);
+		} else {
+			ClearPageUptodate(page);
+			SetPageError(page);
+		}
+		unlock_page(page);
+	}
+
+	bio_put(bio);
+}
+
+int ext4_mpage_readpages(struct address_space *mapping,
+			 struct list_head *pages, struct page *page,
+			 unsigned nr_pages)
+{
+	struct bio *bio = NULL;
+	unsigned page_idx;
+	sector_t last_block_in_bio = 0;
+
+	struct inode *inode = mapping->host;
+	const unsigned blkbits = inode->i_blkbits;
+	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+	const unsigned blocksize = 1 << blkbits;
+	sector_t block_in_file;
+	sector_t last_block;
+	sector_t last_block_in_file;
+	sector_t blocks[MAX_BUF_PER_PAGE];
+	unsigned page_block;
+	struct block_device *bdev = inode->i_sb->s_bdev;
+	int length;
+	unsigned relative_block = 0;
+	struct ext4_map_blocks map;
+
+	map.m_pblk = 0;
+	map.m_lblk = 0;
+	map.m_len = 0;
+	map.m_flags = 0;
+
+	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
+		int fully_mapped = 1;
+		unsigned first_hole = blocks_per_page;
+
+		prefetchw(&page->flags);
+		if (pages) {
+			page = list_entry(pages->prev, struct page, lru);
+			list_del(&page->lru);
+			if (add_to_page_cache_lru(page, mapping,
+						  page->index, GFP_KERNEL))
+				goto next_page;
+		}
+
+		if (page_has_buffers(page))
+			goto confused;
+
+		block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+		last_block = block_in_file + nr_pages * blocks_per_page;
+		last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+		if (last_block > last_block_in_file)
+			last_block = last_block_in_file;
+		page_block = 0;
+
+		/*
+		 * Map blocks using the previous result first.
+		 */
+		if ((map.m_flags & EXT4_MAP_MAPPED) &&
+		    block_in_file > map.m_lblk &&
+		    block_in_file < (map.m_lblk + map.m_len)) {
+			unsigned map_offset = block_in_file - map.m_lblk;
+			unsigned last = map.m_len - map_offset;
+
+			for (relative_block = 0; ; relative_block++) {
+				if (relative_block == last) {
+					/* needed? */
+					map.m_flags &= ~EXT4_MAP_MAPPED;
+					break;
+				}
+				if (page_block == blocks_per_page)
+					break;
+				blocks[page_block] = map.m_pblk + map_offset +
+					relative_block;
+				page_block++;
+				block_in_file++;
+			}
+		}
+
+		/*
+		 * Then do more ext4_map_blocks() calls until we are
+		 * done with this page.
+		 */
+		while (page_block < blocks_per_page) {
+			if (block_in_file < last_block) {
+				map.m_lblk = block_in_file;
+				map.m_len = last_block - block_in_file;
+
+				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
+				set_error_page:
+					SetPageError(page);
+					zero_user_segment(page, 0,
+							  PAGE_CACHE_SIZE);
+					unlock_page(page);
+					goto next_page;
+				}
+			}
+			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
+				fully_mapped = 0;
+				if (first_hole == blocks_per_page)
+					first_hole = page_block;
+				page_block++;
+				block_in_file++;
+				continue;
+			}
+			if (first_hole != blocks_per_page)
+				goto confused;		/* hole -> non-hole */
+
+			/* Contiguous blocks? */
+			if (page_block && blocks[page_block-1] != map.m_pblk-1)
+				goto confused;
+			for (relative_block = 0; ; relative_block++) {
+				if (relative_block == map.m_len) {
+					/* needed? */
+					map.m_flags &= ~EXT4_MAP_MAPPED;
+					break;
+				} else if (page_block == blocks_per_page)
+					break;
+				blocks[page_block] = map.m_pblk+relative_block;
+				page_block++;
+				block_in_file++;
+			}
+		}
+		if (first_hole != blocks_per_page) {
+			zero_user_segment(page, first_hole << blkbits,
+					  PAGE_CACHE_SIZE);
+			if (first_hole == 0) {
+				SetPageUptodate(page);
+				unlock_page(page);
+				goto next_page;
+			}
+		} else if (fully_mapped) {
+			SetPageMappedToDisk(page);
+		}
+		if (fully_mapped && blocks_per_page == 1 &&
+		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
+			SetPageUptodate(page);
+			goto confused;
+		}
+
+		/*
+		 * This page will go to BIO.  Do we need to send this
+		 * BIO off first?
+		 */
+		if (bio && (last_block_in_bio != blocks[0] - 1)) {
+		submit_and_realloc:
+			submit_bio(READ, bio);
+			bio = NULL;
+		}
+		if (bio == NULL) {
+			struct ext4_crypto_ctx *ctx = NULL;
+
+			if (ext4_encrypted_inode(inode) &&
+			    S_ISREG(inode->i_mode)) {
+				ctx = ext4_get_crypto_ctx(inode);
+				if (IS_ERR(ctx))
+					goto set_error_page;
+			}
+			bio = bio_alloc(GFP_KERNEL,
+				min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
+			if (!bio) {
+				if (ctx)
+					ext4_release_crypto_ctx(ctx);
+				goto set_error_page;
+			}
+			bio->bi_bdev = bdev;
+			bio->bi_sector = blocks[0] << (blkbits - 9);
+			bio->bi_end_io = mpage_end_io;
+			bio->bi_private = ctx;
+		}
+
+		length = first_hole << blkbits;
+		if (bio_add_page(bio, page, length, 0) < length)
+			goto submit_and_realloc;
+
+		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
+		     (relative_block == map.m_len)) ||
+		    (first_hole != blocks_per_page)) {
+			submit_bio(READ, bio);
+			bio = NULL;
+		} else
+			last_block_in_bio = blocks[blocks_per_page - 1];
+		goto next_page;
+	confused:
+		if (bio) {
+			submit_bio(READ, bio);
+			bio = NULL;
+		}
+		if (!PageUptodate(page))
+			block_read_full_page(page, ext4_get_block);
+		else
+			unlock_page(page);
+	next_page:
+		if (pages)
+			page_cache_release(page);
+	}
+	BUG_ON(pages && !list_empty(pages));
+	if (bio)
+		submit_bio(READ, bio);
+	return 0;
+}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index cf0a704..d534e58 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -42,7 +42,7 @@
 void ext4_resize_end(struct super_block *sb)
 {
 	clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
-	smp_mb__after_clear_bit();
+	smp_mb__after_atomic();
 }
 
 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
@@ -79,12 +79,20 @@
 	ext4_fsblk_t end = start + input->blocks_count;
 	ext4_group_t group = input->group;
 	ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
-	unsigned overhead = ext4_group_overhead_blocks(sb, group);
-	ext4_fsblk_t metaend = start + overhead;
+	unsigned overhead;
+	ext4_fsblk_t metaend;
 	struct buffer_head *bh = NULL;
 	ext4_grpblk_t free_blocks_count, offset;
 	int err = -EINVAL;
 
+	if (group != sbi->s_groups_count) {
+		ext4_warning(sb, "Cannot add at group %u (only %u groups)",
+			     input->group, sbi->s_groups_count);
+		return -EINVAL;
+	}
+
+	overhead = ext4_group_overhead_blocks(sb, group);
+	metaend = start + overhead;
 	input->free_blocks_count = free_blocks_count =
 		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
 
@@ -96,10 +104,7 @@
 		       free_blocks_count, input->reserved_blocks);
 
 	ext4_get_group_no_and_offset(sb, start, NULL, &offset);
-	if (group != sbi->s_groups_count)
-		ext4_warning(sb, "Cannot add at group %u (only %u groups)",
-			     input->group, sbi->s_groups_count);
-	else if (offset != 0)
+	if (offset != 0)
 			ext4_warning(sb, "Last group not full");
 	else if (input->reserved_blocks > input->blocks_count / 5)
 		ext4_warning(sb, "Reserved blocks too high (%u)",
@@ -343,6 +348,7 @@
 	bh = sb_getblk(sb, blk);
 	if (unlikely(!bh))
 		return ERR_PTR(-ENOMEM);
+	BUFFER_TRACE(bh, "get_write_access");
 	if ((err = ext4_journal_get_write_access(handle, bh))) {
 		brelse(bh);
 		bh = ERR_PTR(err);
@@ -421,6 +427,7 @@
 		if (unlikely(!bh))
 			return -ENOMEM;
 
+		BUFFER_TRACE(bh, "get_write_access");
 		err = ext4_journal_get_write_access(handle, bh);
 		if (err)
 			return err;
@@ -513,6 +520,7 @@
 				goto out;
 			}
 
+			BUFFER_TRACE(gdb, "get_write_access");
 			err = ext4_journal_get_write_access(handle, gdb);
 			if (err) {
 				brelse(gdb);
@@ -567,6 +575,7 @@
 		bh = bclean(handle, sb, block);
 		if (IS_ERR(bh)) {
 			err = PTR_ERR(bh);
+			bh = NULL;
 			goto out;
 		}
 		overhead = ext4_group_overhead_blocks(sb, group);
@@ -595,6 +604,7 @@
 		bh = bclean(handle, sb, block);
 		if (IS_ERR(bh)) {
 			err = PTR_ERR(bh);
+			bh = NULL;
 			goto out;
 		}
 
@@ -785,14 +795,17 @@
 		goto exit_dind;
 	}
 
+	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
 	if (unlikely(err))
 		goto exit_dind;
 
+	BUFFER_TRACE(gdb_bh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, gdb_bh);
 	if (unlikely(err))
 		goto exit_dind;
 
+	BUFFER_TRACE(dind, "get_write_access");
 	err = ext4_journal_get_write_access(handle, dind);
 	if (unlikely(err))
 		ext4_std_error(sb, err);
@@ -897,6 +910,7 @@
 	EXT4_SB(sb)->s_group_desc = n_group_desc;
 	EXT4_SB(sb)->s_gdb_count++;
 	ext4_kvfree(o_group_desc);
+	BUFFER_TRACE(gdb_bh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, gdb_bh);
 	if (unlikely(err))
 		brelse(gdb_bh);
@@ -972,6 +986,7 @@
 	}
 
 	for (i = 0; i < reserved_gdb; i++) {
+		BUFFER_TRACE(primary[i], "get_write_access");
 		if ((err = ext4_journal_get_write_access(handle, primary[i])))
 			goto exit_bh;
 	}
@@ -1079,6 +1094,7 @@
 		ext4_debug("update metadata backup %llu(+%llu)\n",
 			   backup_block, backup_block -
 			   ext4_group_first_block_no(sb, group));
+		BUFFER_TRACE(bh, "get_write_access");
 		if ((err = ext4_journal_get_write_access(handle, bh)))
 			break;
 		lock_buffer(bh);
@@ -1158,6 +1174,7 @@
 		 */
 		if (gdb_off) {
 			gdb_bh = sbi->s_group_desc[gdb_num];
+			BUFFER_TRACE(gdb_bh, "get_write_access");
 			err = ext4_journal_get_write_access(handle, gdb_bh);
 
 			if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
@@ -1195,8 +1212,7 @@
 {
 	struct buffer_head *bh;
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
-					EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(sb))
 		return 0;
 
 	bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
@@ -1428,6 +1444,7 @@
 		goto exit;
 	}
 
+	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 	if (err)
 		goto exit_journal;
@@ -1559,11 +1576,10 @@
 	int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
 		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
 	struct inode *inode = NULL;
-	int gdb_off, gdb_num;
+	int gdb_off;
 	int err;
 	__u16 bg_flags = 0;
 
-	gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
 	gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
 
 	if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
@@ -1641,6 +1657,7 @@
 		return err;
 	}
 
+	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
 	if (err) {
 		ext4_warning(sb, "error %d on journal write access", err);
@@ -1800,6 +1817,7 @@
 	if (IS_ERR(handle))
 		return PTR_ERR(handle);
 
+	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 	if (err)
 		goto errout;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index faa1920..0774d50 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -59,6 +59,7 @@
 static struct ext4_lazy_init *ext4_li_info;
 static struct mutex ext4_li_mtx;
 static struct ext4_features *ext4_feat;
+static int ext4_mballoc_ready;
 
 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
 			     unsigned long journal_devnum);
@@ -136,11 +137,10 @@
 	return cpu_to_le32(csum);
 }
 
-int ext4_superblock_csum_verify(struct super_block *sb,
-				struct ext4_super_block *es)
+static int ext4_superblock_csum_verify(struct super_block *sb,
+				       struct ext4_super_block *es)
 {
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(sb))
 		return 1;
 
 	return es->s_checksum == ext4_superblock_csum(sb, es);
@@ -150,8 +150,7 @@
 {
 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(sb))
 		return;
 
 	es->s_checksum = ext4_superblock_csum(sb, es);
@@ -161,7 +160,7 @@
 {
 	void *ret;
 
-	ret = kmalloc(size, flags);
+	ret = kmalloc(size, flags | __GFP_NOWARN);
 	if (!ret)
 		ret = __vmalloc(size, flags, PAGE_KERNEL);
 	return ret;
@@ -171,7 +170,7 @@
 {
 	void *ret;
 
-	ret = kzalloc(size, flags);
+	ret = kzalloc(size, flags | __GFP_NOWARN);
 	if (!ret)
 		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
 	return ret;
@@ -305,6 +304,8 @@
 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 
 	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+	if (bdev_read_only(sb->s_bdev))
+		return;
 	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
 	es->s_last_error_time = cpu_to_le32(get_seconds());
 	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
@@ -398,6 +399,11 @@
 	}
 	if (test_opt(sb, ERRORS_RO)) {
 		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
+		/*
+		 * Make sure updated value of ->s_mount_flags will be visible
+		 * before ->s_flags update
+		 */
+		smp_wmb();
 		sb->s_flags |= MS_RDONLY;
 	}
 	if (test_opt(sb, ERRORS_PANIC)) {
@@ -409,26 +415,32 @@
 	}
 }
 
+#define ext4_error_ratelimit(sb)					\
+		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
+			     "EXT4-fs error")
+
 void __ext4_error(struct super_block *sb, const char *function,
 		  unsigned int line, const char *fmt, ...)
 {
 	struct va_format vaf;
 	va_list args;
 
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
-	       sb->s_id, function, line, current->comm, &vaf);
-	va_end(args);
+	if (ext4_error_ratelimit(sb)) {
+		va_start(args, fmt);
+		vaf.fmt = fmt;
+		vaf.va = &args;
+		printk(KERN_CRIT
+		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+		       sb->s_id, function, line, current->comm, &vaf);
+		va_end(args);
+	}
 	save_error_info(sb, function, line);
-
 	ext4_handle_error(sb);
 }
 
-void ext4_error_inode(struct inode *inode, const char *function,
-		      unsigned int line, ext4_fsblk_t block,
-		      const char *fmt, ...)
+void __ext4_error_inode(struct inode *inode, const char *function,
+			unsigned int line, ext4_fsblk_t block,
+			const char *fmt, ...)
 {
 	va_list args;
 	struct va_format vaf;
@@ -436,28 +448,29 @@
 
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
 	es->s_last_error_block = cpu_to_le64(block);
+	if (ext4_error_ratelimit(inode->i_sb)) {
+		va_start(args, fmt);
+		vaf.fmt = fmt;
+		vaf.va = &args;
+		if (block)
+			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+			       "inode #%lu: block %llu: comm %s: %pV\n",
+			       inode->i_sb->s_id, function, line, inode->i_ino,
+			       block, current->comm, &vaf);
+		else
+			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+			       "inode #%lu: comm %s: %pV\n",
+			       inode->i_sb->s_id, function, line, inode->i_ino,
+			       current->comm, &vaf);
+		va_end(args);
+	}
 	save_error_info(inode->i_sb, function, line);
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	if (block)
-		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
-		       "inode #%lu: block %llu: comm %s: %pV\n",
-		       inode->i_sb->s_id, function, line, inode->i_ino,
-		       block, current->comm, &vaf);
-	else
-		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
-		       "inode #%lu: comm %s: %pV\n",
-		       inode->i_sb->s_id, function, line, inode->i_ino,
-		       current->comm, &vaf);
-	va_end(args);
-
 	ext4_handle_error(inode->i_sb);
 }
 
-void ext4_error_file(struct file *file, const char *function,
-		     unsigned int line, ext4_fsblk_t block,
-		     const char *fmt, ...)
+void __ext4_error_file(struct file *file, const char *function,
+		       unsigned int line, ext4_fsblk_t block,
+		       const char *fmt, ...)
 {
 	va_list args;
 	struct va_format vaf;
@@ -467,27 +480,28 @@
 
 	es = EXT4_SB(inode->i_sb)->s_es;
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
+	if (ext4_error_ratelimit(inode->i_sb)) {
+		path = d_path(&(file->f_path), pathname, sizeof(pathname));
+		if (IS_ERR(path))
+			path = "(unknown)";
+		va_start(args, fmt);
+		vaf.fmt = fmt;
+		vaf.va = &args;
+		if (block)
+			printk(KERN_CRIT
+			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+			       "block %llu: comm %s: path %s: %pV\n",
+			       inode->i_sb->s_id, function, line, inode->i_ino,
+			       block, current->comm, path, &vaf);
+		else
+			printk(KERN_CRIT
+			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+			       "comm %s: path %s: %pV\n",
+			       inode->i_sb->s_id, function, line, inode->i_ino,
+			       current->comm, path, &vaf);
+		va_end(args);
+	}
 	save_error_info(inode->i_sb, function, line);
-	path = d_path(&(file->f_path), pathname, sizeof(pathname));
-	if (IS_ERR(path))
-		path = "(unknown)";
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	if (block)
-		printk(KERN_CRIT
-		       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
-		       "block %llu: comm %s: path %s: %pV\n",
-		       inode->i_sb->s_id, function, line, inode->i_ino,
-		       block, current->comm, path, &vaf);
-	else
-		printk(KERN_CRIT
-		       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
-		       "comm %s: path %s: %pV\n",
-		       inode->i_sb->s_id, function, line, inode->i_ino,
-		       current->comm, path, &vaf);
-	va_end(args);
-
 	ext4_handle_error(inode->i_sb);
 }
 
@@ -541,11 +555,13 @@
 	    (sb->s_flags & MS_RDONLY))
 		return;
 
-	errstr = ext4_decode_error(sb, errno, nbuf);
-	printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
-	       sb->s_id, function, line, errstr);
-	save_error_info(sb, function, line);
+	if (ext4_error_ratelimit(sb)) {
+		errstr = ext4_decode_error(sb, errno, nbuf);
+		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
+		       sb->s_id, function, line, errstr);
+	}
 
+	save_error_info(sb, function, line);
 	ext4_handle_error(sb);
 }
 
@@ -574,8 +590,13 @@
 
 	if ((sb->s_flags & MS_RDONLY) == 0) {
 		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
-		sb->s_flags |= MS_RDONLY;
 		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
+		/*
+		 * Make sure updated value of ->s_mount_flags will be visible
+		 * before ->s_flags update
+		 */
+		smp_wmb();
+		sb->s_flags |= MS_RDONLY;
 		if (EXT4_SB(sb)->s_journal)
 			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
 		save_error_info(sb, function, line);
@@ -588,11 +609,15 @@
 	}
 }
 
-void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
+void __ext4_msg(struct super_block *sb,
+		const char *prefix, const char *fmt, ...)
 {
 	struct va_format vaf;
 	va_list args;
 
+	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
+		return;
+
 	va_start(args, fmt);
 	vaf.fmt = fmt;
 	vaf.va = &args;
@@ -606,6 +631,10 @@
 	struct va_format vaf;
 	va_list args;
 
+	if (!___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
+			  "EXT4-fs warning"))
+		return;
+
 	va_start(args, fmt);
 	vaf.fmt = fmt;
 	vaf.va = &args;
@@ -629,18 +658,20 @@
 	es->s_last_error_block = cpu_to_le64(block);
 	__save_error_info(sb, function, line);
 
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
-	       sb->s_id, function, line, grp);
-	if (ino)
-		printk(KERN_CONT "inode %lu: ", ino);
-	if (block)
-		printk(KERN_CONT "block %llu:", (unsigned long long) block);
-	printk(KERN_CONT "%pV\n", &vaf);
-	va_end(args);
+	if (ext4_error_ratelimit(sb)) {
+		va_start(args, fmt);
+		vaf.fmt = fmt;
+		vaf.va = &args;
+		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
+		       sb->s_id, function, line, grp);
+		if (ino)
+			printk(KERN_CONT "inode %lu: ", ino);
+		if (block)
+			printk(KERN_CONT "block %llu:",
+			       (unsigned long long) block);
+		printk(KERN_CONT "%pV\n", &vaf);
+		va_end(args);
+	}
 
 	if (test_opt(sb, ERRORS_CONT)) {
 		ext4_commit_super(sb, 0);
@@ -758,8 +789,8 @@
 	ext4_unregister_li_request(sb);
 	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
 
-	flush_workqueue(sbi->dio_unwritten_wq);
-	destroy_workqueue(sbi->dio_unwritten_wq);
+	flush_workqueue(sbi->rsv_conversion_wq);
+	destroy_workqueue(sbi->rsv_conversion_wq);
 
 	if (sbi->s_journal) {
 		err = jbd2_journal_destroy(sbi->s_journal);
@@ -768,8 +799,8 @@
 			ext4_abort(sb, "Couldn't clean up the journal");
 	}
 
-	ext4_es_unregister_shrinker(sb);
-	del_timer(&sbi->s_err_report);
+	ext4_es_unregister_shrinker(sbi);
+	del_timer_sync(&sbi->s_err_report);
 	ext4_release_system_zone(sb);
 	ext4_mb_release(sb);
 	ext4_ext_release(sb);
@@ -796,10 +827,9 @@
 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
 	percpu_counter_destroy(&sbi->s_dirs_counter);
 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
-	percpu_counter_destroy(&sbi->s_extent_cache_cnt);
 	brelse(sbi->s_sbh);
 #ifdef CONFIG_QUOTA
-	for (i = 0; i < MAXQUOTAS; i++)
+	for (i = 0; i < EXT4_MAXQUOTAS; i++)
 		kfree(sbi->s_qf_names[i]);
 #endif
 
@@ -823,6 +853,10 @@
 		invalidate_bdev(sbi->journal_bdev);
 		ext4_blkdev_remove(sbi);
 	}
+	if (sbi->s_mb_cache) {
+		ext4_xattr_destroy_cache(sbi->s_mb_cache);
+		sbi->s_mb_cache = NULL;
+	}
 	if (sbi->s_mmp_tsk)
 		kthread_stop(sbi->s_mmp_tsk);
 	sb->s_fs_info = NULL;
@@ -852,12 +886,15 @@
 		return NULL;
 
 	ei->vfs_inode.i_version = 1;
+	spin_lock_init(&ei->i_raw_lock);
 	INIT_LIST_HEAD(&ei->i_prealloc_list);
 	spin_lock_init(&ei->i_prealloc_lock);
 	ext4_es_init_tree(&ei->i_es_tree);
 	rwlock_init(&ei->i_es_lock);
 	INIT_LIST_HEAD(&ei->i_es_lru);
+	ei->i_es_all_nr = 0;
 	ei->i_es_lru_nr = 0;
+	ei->i_touch_when = 0;
 	ei->i_reserved_data_blocks = 0;
 	ei->i_reserved_meta_blocks = 0;
 	ei->i_allocated_meta_blocks = 0;
@@ -868,14 +905,16 @@
 	ei->i_reserved_quota = 0;
 #endif
 	ei->jinode = NULL;
-	INIT_LIST_HEAD(&ei->i_completed_io_list);
+	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
 	spin_lock_init(&ei->i_completed_io_lock);
 	ei->i_sync_tid = 0;
 	ei->i_datasync_tid = 0;
 	atomic_set(&ei->i_ioend_count, 0);
 	atomic_set(&ei->i_unwritten, 0);
-	INIT_WORK(&ei->i_unwritten_work, ext4_end_io_work);
-
+	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	ei->i_crypt_info = NULL;
+#endif
 	return &ei->vfs_inode;
 }
 
@@ -917,7 +956,7 @@
 	inode_init_once(&ei->vfs_inode);
 }
 
-static int init_inodecache(void)
+static int __init init_inodecache(void)
 {
 	ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
 					     sizeof(struct ext4_inode_info),
@@ -953,6 +992,10 @@
 		jbd2_free_inode(EXT4_I(inode)->jinode);
 		EXT4_I(inode)->jinode = NULL;
 	}
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	if (EXT4_I(inode)->i_crypt_info)
+		ext4_free_encryption_info(inode, EXT4_I(inode)->i_crypt_info);
+#endif
 }
 
 static struct inode *ext4_nfs_get_inode(struct super_block *sb,
@@ -1095,24 +1138,6 @@
 	.bdev_try_to_free_page = bdev_try_to_free_page,
 };
 
-static const struct super_operations ext4_nojournal_sops = {
-	.alloc_inode	= ext4_alloc_inode,
-	.destroy_inode	= ext4_destroy_inode,
-	.write_inode	= ext4_write_inode,
-	.dirty_inode	= ext4_dirty_inode,
-	.drop_inode	= ext4_drop_inode,
-	.evict_inode	= ext4_evict_inode,
-	.put_super	= ext4_put_super,
-	.statfs		= ext4_statfs,
-	.remount_fs	= ext4_remount,
-	.show_options	= ext4_show_options,
-#ifdef CONFIG_QUOTA
-	.quota_read	= ext4_quota_read,
-	.quota_write	= ext4_quota_write,
-#endif
-	.bdev_try_to_free_page = bdev_try_to_free_page,
-};
-
 static const struct export_operations ext4_export_ops = {
 	.fh_to_dentry = ext4_fh_to_dentry,
 	.fh_to_parent = ext4_fh_to_parent,
@@ -1125,10 +1150,10 @@
 	Opt_nouid32, Opt_debug, Opt_removed,
 	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
 	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
-	Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
-	Opt_journal_dev, Opt_journal_checksum, Opt_journal_async_commit,
+	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
+	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
 	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
-	Opt_data_err_abort, Opt_data_err_ignore,
+	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
 	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
@@ -1170,6 +1195,7 @@
 	{Opt_min_batch_time, "min_batch_time=%u"},
 	{Opt_max_batch_time, "max_batch_time=%u"},
 	{Opt_journal_dev, "journal_dev=%u"},
+	{Opt_journal_path, "journal_path=%s"},
 	{Opt_journal_checksum, "journal_checksum"},
 	{Opt_journal_async_commit, "journal_async_commit"},
 	{Opt_abort, "abort"},
@@ -1213,6 +1239,7 @@
 	{Opt_init_itable, "init_itable"},
 	{Opt_noinit_itable, "noinit_itable"},
 	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
+	{Opt_test_dummy_encryption, "test_dummy_encryption"},
 	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
 	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
 	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
@@ -1329,6 +1356,7 @@
 #define MOPT_NO_EXT2	0x0100
 #define MOPT_NO_EXT3	0x0200
 #define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
+#define MOPT_STRING	0x0400
 
 static const struct mount_opts {
 	int	token;
@@ -1378,6 +1406,7 @@
 	{Opt_resuid, 0, MOPT_GTE0},
 	{Opt_resgid, 0, MOPT_GTE0},
 	{Opt_journal_dev, 0, MOPT_GTE0},
+	{Opt_journal_path, 0, MOPT_STRING},
 	{Opt_journal_ioprio, 0, MOPT_GTE0},
 	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
 	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
@@ -1409,6 +1438,7 @@
 	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
 	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
 	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
+	{Opt_test_dummy_encryption, 0, MOPT_GTE0},
 	{Opt_err, 0, 0}
 };
 
@@ -1471,7 +1501,7 @@
 		return -1;
 	}
 
-	if (args->from && match_int(args, &arg))
+	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
 		return -1;
 	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
 		return -1;
@@ -1533,6 +1563,44 @@
 			return -1;
 		}
 		*journal_devnum = arg;
+	} else if (token == Opt_journal_path) {
+		char *journal_path;
+		struct inode *journal_inode;
+		struct path path;
+		int error;
+
+		if (is_remount) {
+			ext4_msg(sb, KERN_ERR,
+				 "Cannot specify journal on remount");
+			return -1;
+		}
+		journal_path = match_strdup(&args[0]);
+		if (!journal_path) {
+			ext4_msg(sb, KERN_ERR, "error: could not dup "
+				"journal device string");
+			return -1;
+		}
+
+		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
+		if (error) {
+			ext4_msg(sb, KERN_ERR, "error: could not find "
+				"journal device path: error %d", error);
+			kfree(journal_path);
+			return -1;
+		}
+
+		journal_inode = path.dentry->d_inode;
+		if (!S_ISBLK(journal_inode->i_mode)) {
+			ext4_msg(sb, KERN_ERR, "error: journal path %s "
+				"is not a block device", journal_path);
+			path_put(&path);
+			kfree(journal_path);
+			return -1;
+		}
+
+		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
+		path_put(&path);
+		kfree(journal_path);
 	} else if (token == Opt_journal_ioprio) {
 		if (arg > 7) {
 			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
@@ -1541,6 +1609,15 @@
 		}
 		*journal_ioprio =
 			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
+	} else if (token == Opt_test_dummy_encryption) {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
+		ext4_msg(sb, KERN_WARNING,
+			 "Test dummy encryption mode enabled");
+#else
+		ext4_msg(sb, KERN_WARNING,
+			 "Test dummy encryption mount option ignored");
+#endif
 	} else if (m->flags & MOPT_DATAJ) {
 		if (is_remount) {
 			if (!sbi->s_journal)
@@ -1824,7 +1901,7 @@
 	if (!(sbi->s_mount_state & EXT4_VALID_FS))
 		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
 			 "running e2fsck is recommended");
-	else if ((sbi->s_mount_state & EXT4_ERROR_FS))
+	else if (sbi->s_mount_state & EXT4_ERROR_FS)
 		ext4_msg(sb, KERN_WARNING,
 			 "warning: mounting fs with errors, "
 			 "running e2fsck is recommended");
@@ -1902,7 +1979,6 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_group_desc *gdp = NULL;
 	ext4_group_t flex_group;
-	unsigned int groups_per_flex = 0;
 	int i, err;
 
 	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
@@ -1910,7 +1986,6 @@
 		sbi->s_log_groups_per_flex = 0;
 		return 1;
 	}
-	groups_per_flex = 1U << sbi->s_log_groups_per_flex;
 
 	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
 	if (err)
@@ -1940,8 +2015,7 @@
 	__u16 crc = 0;
 	__le32 le_group = cpu_to_le32(block_group);
 
-	if ((sbi->s_es->s_feature_ro_compat &
-	     cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
+	if (ext4_has_metadata_csum(sbi->s_sb)) {
 		/* Use new metadata_csum algorithm */
 		__u32 csum32;
 		__u16 dummy_csum = 0;
@@ -2086,10 +2160,6 @@
 	}
 	if (NULL != first_not_zeroed)
 		*first_not_zeroed = grp;
-
-	ext4_free_blocks_count_set(sbi->s_es,
-				   EXT4_C2B(sbi, ext4_count_free_clusters(sb)));
-	sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb));
 	return 1;
 }
 
@@ -2139,7 +2209,7 @@
 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
 		/* don't clear list on RO mount w/ errors */
 		if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
-			jbd_debug(1, "Errors on filesystem, "
+			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
 				  "clearing orphan list.\n");
 			es->s_last_orphan = 0;
 		}
@@ -2155,7 +2225,7 @@
 	/* Needed for iput() to work correctly and not trash data */
 	sb->s_flags |= MS_ACTIVE;
 	/* Turn on quotas so that they are updated correctly */
-	for (i = 0; i < MAXQUOTAS; i++) {
+	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
 		if (EXT4_SB(sb)->s_qf_names[i]) {
 			int ret = ext4_quota_on_mount(sb, i);
 			if (ret < 0)
@@ -2188,19 +2258,22 @@
 		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
 		dquot_initialize(inode);
 		if (inode->i_nlink) {
-			ext4_msg(sb, KERN_DEBUG,
-				"%s: truncating inode %lu to %lld bytes",
-				__func__, inode->i_ino, inode->i_size);
+			if (test_opt(sb, DEBUG))
+				ext4_msg(sb, KERN_DEBUG,
+					"%s: truncating inode %lu to %lld bytes",
+					__func__, inode->i_ino, inode->i_size);
 			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
 				  inode->i_ino, inode->i_size);
 			mutex_lock(&inode->i_mutex);
+			truncate_inode_pages(inode->i_mapping, inode->i_size);
 			ext4_truncate(inode);
 			mutex_unlock(&inode->i_mutex);
 			nr_truncates++;
 		} else {
-			ext4_msg(sb, KERN_DEBUG,
-				"%s: deleting unreferenced inode %lu",
-				__func__, inode->i_ino);
+			if (test_opt(sb, DEBUG))
+				ext4_msg(sb, KERN_DEBUG,
+					"%s: deleting unreferenced inode %lu",
+					__func__, inode->i_ino);
 			jbd_debug(2, "deleting unreferenced inode %lu\n",
 				  inode->i_ino);
 			nr_orphans++;
@@ -2218,7 +2291,7 @@
 		       PLURAL(nr_truncates));
 #ifdef CONFIG_QUOTA
 	/* Turn quotas off */
-	for (i = 0; i < MAXQUOTAS; i++) {
+	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
 		if (sb_dqopt(sb)->files[i])
 			dquot_quota_off(sb, i);
 	}
@@ -2354,6 +2427,16 @@
 	if (ext4_bg_has_super(sb, bg))
 		has_super = 1;
 
+	/*
+	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
+	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
+	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
+	 * compensate.
+	 */
+	if (sb->s_blocksize == 1024 && nr == 0 &&
+	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0)
+		has_super++;
+
 	return (has_super + ext4_group_first_block_no(sb, bg));
 }
 
@@ -2401,7 +2484,10 @@
 	ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *);
 	ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
 			 const char *, size_t);
-	int offset;
+	union {
+		int offset;
+		int deprecated_val;
+	} u;
 };
 
 static int parse_strtoull(const char *buf,
@@ -2470,7 +2556,7 @@
 static ssize_t sbi_ui_show(struct ext4_attr *a,
 			   struct ext4_sb_info *sbi, char *buf)
 {
-	unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
+	unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset);
 
 	return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
 }
@@ -2479,7 +2565,7 @@
 			    struct ext4_sb_info *sbi,
 			    const char *buf, size_t count)
 {
-	unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
+	unsigned int *ui = (unsigned int *) (((char *) sbi) + a->u.offset);
 	unsigned long t;
 	int ret;
 
@@ -2490,6 +2576,16 @@
 	return count;
 }
 
+static ssize_t es_ui_show(struct ext4_attr *a,
+			   struct ext4_sb_info *sbi, char *buf)
+{
+
+	unsigned int *ui = (unsigned int *) (((char *) sbi->s_es) +
+			   a->u.offset);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
+}
+
 static ssize_t reserved_clusters_show(struct ext4_attr *a,
 				  struct ext4_sb_info *sbi, char *buf)
 {
@@ -2528,22 +2624,53 @@
 	return count;
 }
 
+static ssize_t sbi_deprecated_show(struct ext4_attr *a,
+				   struct ext4_sb_info *sbi, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", a->u.deprecated_val);
+}
+
 #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
 static struct ext4_attr ext4_attr_##_name = {			\
 	.attr = {.name = __stringify(_name), .mode = _mode },	\
 	.show	= _show,					\
 	.store	= _store,					\
-	.offset = offsetof(struct ext4_sb_info, _elname),	\
+	.u = {							\
+		.offset = offsetof(struct ext4_sb_info, _elname),\
+	},							\
 }
+
+#define EXT4_ATTR_OFFSET_ES(_name,_mode,_show,_store,_elname)		\
+static struct ext4_attr ext4_attr_##_name = {				\
+	.attr = {.name = __stringify(_name), .mode = _mode },		\
+	.show	= _show,						\
+	.store	= _store,						\
+	.u = {								\
+		.offset = offsetof(struct ext4_super_block, _elname),	\
+	},								\
+}
+
 #define EXT4_ATTR(name, mode, show, store) \
 static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
 
 #define EXT4_INFO_ATTR(name) EXT4_ATTR(name, 0444, NULL, NULL)
 #define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL)
 #define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store)
+
+#define EXT4_RO_ATTR_ES_UI(name, elname)	\
+	EXT4_ATTR_OFFSET_ES(name, 0444, es_ui_show, NULL, elname)
 #define EXT4_RW_ATTR_SBI_UI(name, elname)	\
 	EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname)
+
 #define ATTR_LIST(name) &ext4_attr_##name.attr
+#define EXT4_DEPRECATED_ATTR(_name, _val)	\
+static struct ext4_attr ext4_attr_##_name = {			\
+	.attr = {.name = __stringify(_name), .mode = 0444 },	\
+	.show	= sbi_deprecated_show,				\
+	.u = {							\
+		.deprecated_val = _val,				\
+	},							\
+}
 
 EXT4_RO_ATTR(delayed_allocation_blocks);
 EXT4_RO_ATTR(session_write_kbytes);
@@ -2558,9 +2685,18 @@
 EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
-EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
+EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128);
 EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
 EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_burst, s_err_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
+EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
+EXT4_RO_ATTR_ES_UI(first_error_time, s_first_error_time);
+EXT4_RO_ATTR_ES_UI(last_error_time, s_last_error_time);
 
 static struct attribute *ext4_attrs[] = {
 	ATTR_LIST(delayed_allocation_blocks),
@@ -2578,6 +2714,15 @@
 	ATTR_LIST(max_writeback_mb_bump),
 	ATTR_LIST(extent_max_zeroout_kb),
 	ATTR_LIST(trigger_fs_error),
+	ATTR_LIST(err_ratelimit_interval_ms),
+	ATTR_LIST(err_ratelimit_burst),
+	ATTR_LIST(warning_ratelimit_interval_ms),
+	ATTR_LIST(warning_ratelimit_burst),
+	ATTR_LIST(msg_ratelimit_interval_ms),
+	ATTR_LIST(msg_ratelimit_burst),
+	ATTR_LIST(errors_count),
+	ATTR_LIST(first_error_time),
+	ATTR_LIST(last_error_time),
 	NULL,
 };
 
@@ -2585,11 +2730,13 @@
 EXT4_INFO_ATTR(lazy_itable_init);
 EXT4_INFO_ATTR(batched_discard);
 EXT4_INFO_ATTR(meta_bg_resize);
+EXT4_INFO_ATTR(encryption);
 
 static struct attribute *ext4_feat_attrs[] = {
 	ATTR_LIST(lazy_itable_init),
 	ATTR_LIST(batched_discard),
 	ATTR_LIST(meta_bg_resize),
+	ATTR_LIST(encryption),
 	NULL,
 };
 
@@ -2637,9 +2784,25 @@
 	complete(&ext4_feat->f_kobj_unregister);
 }
 
+static ssize_t ext4_feat_show(struct kobject *kobj,
+			      struct attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "supported\n");
+}
+
+/*
+ * We can not use ext4_attr_show/store because it relies on the kobject
+ * being embedded in the ext4_sb_info structure which is definitely not
+ * true in this case.
+ */
+static const struct sysfs_ops ext4_feat_ops = {
+	.show	= ext4_feat_show,
+	.store	= NULL,
+};
+
 static struct kobj_type ext4_feat_ktype = {
 	.default_attrs	= ext4_feat_attrs,
-	.sysfs_ops	= &ext4_attr_ops,
+	.sysfs_ops	= &ext4_feat_ops,
 	.release	= ext4_feat_release,
 };
 
@@ -2993,7 +3156,6 @@
 {
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_li_request *elr;
-	unsigned long rnd;
 
 	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
 	if (!elr)
@@ -3008,10 +3170,8 @@
 	 * spread the inode table initialization requests
 	 * better.
 	 */
-	get_random_bytes(&rnd, sizeof(rnd));
-	elr->lr_next_sched = jiffies + (unsigned long)rnd %
-			     (EXT4_DEF_LI_MAX_START_DELAY * HZ);
-
+	elr->lr_next_sched = jiffies + (prandom_u32() %
+				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
 	return elr;
 }
 
@@ -3096,17 +3256,20 @@
 	int compat, incompat;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
-		/* journal checksum v2 */
+	if (ext4_has_metadata_csum(sb)) {
+		/* journal checksum v3 */
 		compat = 0;
-		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
+		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
 	} else {
 		/* journal checksum v1 */
 		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
 		incompat = 0;
 	}
 
+	jbd2_journal_clear_features(sbi->s_journal,
+			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
+			JBD2_FEATURE_INCOMPAT_CSUM_V2);
 	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
 		ret = jbd2_journal_set_features(sbi->s_journal,
 				compat, 0,
@@ -3119,10 +3282,8 @@
 		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
 				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
 	} else {
-		jbd2_journal_clear_features(sbi->s_journal,
-				JBD2_FEATURE_COMPAT_CHECKSUM, 0,
-				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
-				JBD2_FEATURE_INCOMPAT_CSUM_V2);
+		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
+				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
 	}
 
 	return ret;
@@ -3260,7 +3421,7 @@
 	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
 	 * This should cover the situations where we can not afford to run
 	 * out of space like for example punch hole, or converting
-	 * uninitialized extents in delalloc path. In most cases such
+	 * unwritten extents in delalloc path. In most cases such
 	 * allocation would require 1, or 2 blocks, higher numbers are
 	 * very rare.
 	 */
@@ -3352,7 +3513,7 @@
 		logical_sb_block = sb_block;
 	}
 
-	if (!(bh = sb_bread(sb, logical_sb_block))) {
+	if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
 		ext4_msg(sb, KERN_ERR, "unable to read superblock");
 		goto out_fail;
 	}
@@ -3403,8 +3564,7 @@
 	}
 
 	/* Precompute checksum seed for all metadata */
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
-			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (ext4_has_metadata_csum(sb))
 		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
 					       sizeof(es->s_uuid));
 
@@ -3422,6 +3582,10 @@
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
 	set_opt(sb, POSIX_ACL);
 #endif
+	/* don't forget to enable journal_csum when metadata_csum is enabled. */
+	if (ext4_has_metadata_csum(sb))
+		set_opt(sb, JOURNAL_CHECKSUM);
+
 	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
 		set_opt(sb, JOURNAL_DATA);
 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
@@ -3435,8 +3599,8 @@
 		set_opt(sb, ERRORS_CONT);
 	else
 		set_opt(sb, ERRORS_RO);
-	if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
-		set_opt(sb, BLOCK_VALIDITY);
+	/* block_validity enabled by default; disable with noblock_validity */
+	set_opt(sb, BLOCK_VALIDITY);
 	if (def_mount_opts & EXT4_DEFM_DISCARD)
 		set_opt(sb, DISCARD);
 
@@ -3503,6 +3667,16 @@
 		       "feature flags set on rev 0 fs, "
 		       "running e2fsck is recommended");
 
+	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
+		set_opt2(sb, HURD_COMPAT);
+		if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+					      EXT4_FEATURE_INCOMPAT_64BIT)) {
+			ext4_msg(sb, KERN_ERR,
+				 "The Hurd can't support 64-bit file systems");
+			goto failed_mount;
+		}
+	}
+
 	if (IS_EXT2_SB(sb)) {
 		if (ext2_feature_set_ok(sb))
 			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
@@ -3549,6 +3723,13 @@
 		goto failed_mount;
 	}
 
+	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT) &&
+	    es->s_encryption_level) {
+		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
+			 es->s_encryption_level);
+		goto failed_mount;
+	}
+
 	if (sb->s_blocksize != blocksize) {
 		/* Validate the filesystem blocksize */
 		if (!sb_set_blocksize(sb, blocksize)) {
@@ -3560,7 +3741,7 @@
 		brelse(bh);
 		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
 		offset = do_div(logical_sb_block, blocksize);
-		bh = sb_bread(sb, logical_sb_block);
+		bh = sb_bread_unmovable(sb, logical_sb_block);
 		if (!bh) {
 			ext4_msg(sb, KERN_ERR,
 			       "Can't read superblock on 2nd try");
@@ -3789,7 +3970,7 @@
 
 	for (i = 0; i < db_count; i++) {
 		block = descriptor_loc(sb, logical_sb_block, i);
-		sbi->s_group_desc[i] = sb_bread(sb, block);
+		sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
 		if (!sbi->s_group_desc[i]) {
 			ext4_msg(sb, KERN_ERR,
 			       "can't read group descriptor %d", i);
@@ -3801,13 +3982,6 @@
 		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
 		goto failed_mount2;
 	}
-	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
-		if (!ext4_fill_flex_info(sb)) {
-			ext4_msg(sb, KERN_ERR,
-			       "unable to initialize "
-			       "flex_bg meta info!");
-			goto failed_mount2;
-		}
 
 	sbi->s_gdb_count = db_count;
 	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
@@ -3818,41 +3992,16 @@
 	sbi->s_err_report.data = (unsigned long) sb;
 
 	/* Register extent status tree shrinker */
-	ext4_es_register_shrinker(sb);
-
-	err = percpu_counter_init(&sbi->s_freeclusters_counter,
-			ext4_count_free_clusters(sb));
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_freeinodes_counter,
-				ext4_count_free_inodes(sb));
-	}
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_dirs_counter,
-				ext4_count_dirs(sb));
-	}
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
-	}
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0);
-	}
-	if (err) {
-		ext4_msg(sb, KERN_ERR, "insufficient memory");
+	if (ext4_es_register_shrinker(sbi))
 		goto failed_mount3;
-	}
 
 	sbi->s_stripe = ext4_get_stripe_size(sbi);
-	sbi->s_max_writeback_mb_bump = 128;
 	sbi->s_extent_max_zeroout_kb = 32;
 
 	/*
 	 * set up enough so that it can read an inode
 	 */
-	if (!test_opt(sb, NOLOAD) &&
-	    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
-		sb->s_op = &ext4_sops;
-	else
-		sb->s_op = &ext4_nojournal_sops;
+	sb->s_op = &ext4_sops;
 	sb->s_export_op = &ext4_export_ops;
 	sb->s_xattr = ext4_xattr_handlers;
 #ifdef CONFIG_QUOTA
@@ -3876,7 +4025,7 @@
 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
 	    !(sb->s_flags & MS_RDONLY))
 		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
-			goto failed_mount3;
+			goto failed_mount3a;
 
 	/*
 	 * The first inode we look at is the journal inode.  Don't try
@@ -3885,7 +4034,7 @@
 	if (!test_opt(sb, NOLOAD) &&
 	    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
 		if (ext4_load_journal(sb, es, journal_devnum))
-			goto failed_mount3;
+			goto failed_mount3a;
 	} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
 	      EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
 		ext4_msg(sb, KERN_ERR, "required journal recovery "
@@ -3941,19 +4090,22 @@
 
 	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
 
-	/*
-	 * The journal may have updated the bg summary counts, so we
-	 * need to update the global counters.
-	 */
-	percpu_counter_set(&sbi->s_freeclusters_counter,
-			   ext4_count_free_clusters(sb));
-	percpu_counter_set(&sbi->s_freeinodes_counter,
-			   ext4_count_free_inodes(sb));
-	percpu_counter_set(&sbi->s_dirs_counter,
-			   ext4_count_dirs(sb));
-	percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
-
 no_journal:
+	if (ext4_mballoc_ready) {
+		sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
+		if (!sbi->s_mb_cache) {
+			ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
+			goto failed_mount_wq;
+		}
+	}
+
+	if (unlikely(sbi->s_mount_flags & EXT4_MF_TEST_DUMMY_ENCRYPTION) &&
+	    !(sb->s_flags & MS_RDONLY) &&
+	    !EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT)) {
+		EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
+		ext4_commit_super(sb, 1);
+	}
+
 	/*
 	 * Get the # of file system overhead blocks from the
 	 * superblock if present.
@@ -3970,12 +4122,12 @@
 	 * The maximum number of concurrent works can be high and
 	 * concurrency isn't really necessary.  Limit it to 1.
 	 */
-	EXT4_SB(sb)->dio_unwritten_wq =
-		alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
-	if (!EXT4_SB(sb)->dio_unwritten_wq) {
-		printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
+	EXT4_SB(sb)->rsv_conversion_wq =
+		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+	if (!EXT4_SB(sb)->rsv_conversion_wq) {
+		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
 		ret = -ENOMEM;
-		goto failed_mount_wq;
+		goto failed_mount4;
 	}
 
 	/*
@@ -4052,6 +4204,33 @@
 		goto failed_mount5;
 	}
 
+	block = ext4_count_free_clusters(sb);
+	ext4_free_blocks_count_set(sbi->s_es, 
+				   EXT4_C2B(sbi, block));
+	err = percpu_counter_init(&sbi->s_freeclusters_counter, block);
+	if (!err) {
+		unsigned long freei = ext4_count_free_inodes(sb);
+		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei);
+	}
+	if (!err)
+		err = percpu_counter_init(&sbi->s_dirs_counter,
+					  ext4_count_dirs(sb));
+	if (!err)
+		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
+	if (err) {
+		ext4_msg(sb, KERN_ERR, "insufficient memory");
+		goto failed_mount6;
+	}
+
+	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
+		if (!ext4_fill_flex_info(sb)) {
+			ext4_msg(sb, KERN_ERR,
+			       "unable to initialize "
+			       "flex_bg meta info!");
+			goto failed_mount6;
+		}
+
 	err = ext4_register_li_request(sb, first_not_zeroed);
 	if (err)
 		goto failed_mount6;
@@ -4105,6 +4284,11 @@
 	if (es->s_error_count)
 		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
 
+	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
+	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
+	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
+	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
+
 	kfree(orig_data);
 	return 0;
 
@@ -4121,6 +4305,12 @@
 	ext4_unregister_li_request(sb);
 failed_mount6:
 	ext4_mb_release(sb);
+	if (sbi->s_flex_groups)
+		ext4_kvfree(sbi->s_flex_groups);
+	percpu_counter_destroy(&sbi->s_freeclusters_counter);
+	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+	percpu_counter_destroy(&sbi->s_dirs_counter);
+	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
 failed_mount5:
 	ext4_ext_release(sb);
 	ext4_release_system_zone(sb);
@@ -4129,22 +4319,17 @@
 	sb->s_root = NULL;
 failed_mount4:
 	ext4_msg(sb, KERN_ERR, "mount failed");
-	destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
+	if (EXT4_SB(sb)->rsv_conversion_wq)
+		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
 failed_mount_wq:
 	if (sbi->s_journal) {
 		jbd2_journal_destroy(sbi->s_journal);
 		sbi->s_journal = NULL;
 	}
+failed_mount3a:
+	ext4_es_unregister_shrinker(sbi);
 failed_mount3:
-	ext4_es_unregister_shrinker(sb);
-	del_timer(&sbi->s_err_report);
-	if (sbi->s_flex_groups)
-		ext4_kvfree(sbi->s_flex_groups);
-	percpu_counter_destroy(&sbi->s_freeclusters_counter);
-	percpu_counter_destroy(&sbi->s_freeinodes_counter);
-	percpu_counter_destroy(&sbi->s_dirs_counter);
-	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
-	percpu_counter_destroy(&sbi->s_extent_cache_cnt);
+	del_timer_sync(&sbi->s_err_report);
 	if (sbi->s_mmp_tsk)
 		kthread_stop(sbi->s_mmp_tsk);
 failed_mount2:
@@ -4159,7 +4344,7 @@
 		remove_proc_entry(sb->s_id, ext4_proc_root);
 	}
 #ifdef CONFIG_QUOTA
-	for (i = 0; i < MAXQUOTAS; i++)
+	for (i = 0; i < EXT4_MAXQUOTAS; i++)
 		kfree(sbi->s_qf_names[i]);
 #endif
 	ext4_blkdev_remove(sbi);
@@ -4287,6 +4472,15 @@
 		goto out_bdev;
 	}
 
+	if ((le32_to_cpu(es->s_feature_ro_compat) &
+	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
+	    es->s_checksum != ext4_superblock_csum(sb, es)) {
+		ext4_msg(sb, KERN_ERR, "external journal has "
+				       "corrupt superblock");
+		brelse(bh);
+		goto out_bdev;
+	}
+
 	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
 		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
 		brelse(bh);
@@ -4461,11 +4655,13 @@
 	else
 		es->s_kbytes_written =
 			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
-	ext4_free_blocks_count_set(es,
+	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
+		ext4_free_blocks_count_set(es,
 			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
 				&EXT4_SB(sb)->s_freeclusters_counter)));
-	es->s_free_inodes_count =
-		cpu_to_le32(percpu_counter_sum_positive(
+	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
+		es->s_free_inodes_count =
+			cpu_to_le32(percpu_counter_sum_positive(
 				&EXT4_SB(sb)->s_freeinodes_counter));
 	BUFFER_TRACE(sbh, "marking dirty");
 	ext4_superblock_csum_set(sb);
@@ -4572,19 +4768,41 @@
 {
 	int ret = 0;
 	tid_t target;
+	bool needs_barrier = false;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
 	trace_ext4_sync_fs(sb, wait);
-	flush_workqueue(sbi->dio_unwritten_wq);
+	flush_workqueue(sbi->rsv_conversion_wq);
 	/*
 	 * Writeback quota in non-journalled quota case - journalled quota has
 	 * no dirty dquots
 	 */
 	dquot_writeback_dquots(sb, -1);
-	if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
-		if (wait)
-			jbd2_log_wait_commit(sbi->s_journal, target);
+	/*
+	 * Data writeback is possible w/o journal transaction, so barrier must
+	 * being sent at the end of the function. But we can skip it if
+	 * transaction_commit will do it for us.
+	 */
+	if (sbi->s_journal) {
+		target = jbd2_get_latest_transaction(sbi->s_journal);
+		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
+		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
+			needs_barrier = true;
+
+		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
+			if (wait)
+				ret = jbd2_log_wait_commit(sbi->s_journal,
+							   target);
+		}
+	} else if (wait && test_opt(sb, BARRIER))
+		needs_barrier = true;
+	if (needs_barrier) {
+		int err;
+		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+		if (!ret)
+			ret = err;
 	}
+
 	return ret;
 }
 
@@ -4606,23 +4824,26 @@
 
 	journal = EXT4_SB(sb)->s_journal;
 
-	/* Now we set up the journal barrier. */
-	jbd2_journal_lock_updates(journal);
+	if (journal) {
+		/* Now we set up the journal barrier. */
+		jbd2_journal_lock_updates(journal);
 
-	/*
-	 * Don't clear the needs_recovery flag if we failed to flush
-	 * the journal.
-	 */
-	error = jbd2_journal_flush(journal);
-	if (error < 0)
-		goto out;
+		/*
+		 * Don't clear the needs_recovery flag if we failed to
+		 * flush the journal.
+		 */
+		error = jbd2_journal_flush(journal);
+		if (error < 0)
+			goto out;
+	}
 
 	/* Journal blocked and flushed, clear needs_recovery flag. */
 	EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 	error = ext4_commit_super(sb, 1);
 out:
-	/* we rely on upper layer to stop further updates */
-	jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+	if (journal)
+		/* we rely on upper layer to stop further updates */
+		jbd2_journal_unlock_updates(journal);
 	return error;
 }
 
@@ -4653,7 +4874,7 @@
 	u32 s_min_batch_time, s_max_batch_time;
 #ifdef CONFIG_QUOTA
 	int s_jquota_fmt;
-	char *s_qf_names[MAXQUOTAS];
+	char *s_qf_names[EXT4_MAXQUOTAS];
 #endif
 };
 
@@ -4683,7 +4904,7 @@
 	old_opts.s_max_batch_time = sbi->s_max_batch_time;
 #ifdef CONFIG_QUOTA
 	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
-	for (i = 0; i < MAXQUOTAS; i++)
+	for (i = 0; i < EXT4_MAXQUOTAS; i++)
 		if (sbi->s_qf_names[i]) {
 			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
 							 GFP_KERNEL);
@@ -4707,6 +4928,14 @@
 		goto restore_opts;
 	}
 
+	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+	    test_opt(sb, JOURNAL_CHECKSUM)) {
+		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
+			 "during remount not supported");
+		err = -EINVAL;
+		goto restore_opts;
+	}
+
 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
 		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
 			ext4_msg(sb, KERN_ERR, "can't mount with "
@@ -4742,6 +4971,9 @@
 		}
 
 		if (*flags & MS_RDONLY) {
+			err = sync_filesystem(sb);
+			if (err < 0)
+				goto restore_opts;
 			err = dquot_suspend(sb, -1);
 			if (err < 0)
 				goto restore_opts;
@@ -4841,7 +5073,7 @@
 
 #ifdef CONFIG_QUOTA
 	/* Release old quota file names */
-	for (i = 0; i < MAXQUOTAS; i++)
+	for (i = 0; i < EXT4_MAXQUOTAS; i++)
 		kfree(old_opts.s_qf_names[i]);
 	if (enable_quota) {
 		if (sb_any_quota_suspended(sb))
@@ -4870,7 +5102,7 @@
 	sbi->s_max_batch_time = old_opts.s_max_batch_time;
 #ifdef CONFIG_QUOTA
 	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
-	for (i = 0; i < MAXQUOTAS; i++) {
+	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
 		kfree(sbi->s_qf_names[i]);
 		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
 	}
@@ -5091,7 +5323,7 @@
 {
 	int err;
 	struct inode *qf_inode;
-	unsigned long qf_inums[MAXQUOTAS] = {
+	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
 		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
 		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
 	};
@@ -5122,13 +5354,13 @@
 static int ext4_enable_quotas(struct super_block *sb)
 {
 	int type, err = 0;
-	unsigned long qf_inums[MAXQUOTAS] = {
+	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
 		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
 		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
 	};
 
 	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
-	for (type = 0; type < MAXQUOTAS; type++) {
+	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
 		if (qf_inums[type]) {
 			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
 						DQUOT_USAGE_ENABLED);
@@ -5206,7 +5438,6 @@
 {
 	struct inode *inode = sb_dqopt(sb)->files[type];
 	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
-	int err = 0;
 	int offset = off & (sb->s_blocksize - 1);
 	int tocopy;
 	size_t toread;
@@ -5221,9 +5452,9 @@
 	while (toread > 0) {
 		tocopy = sb->s_blocksize - offset < toread ?
 				sb->s_blocksize - offset : toread;
-		bh = ext4_bread(NULL, inode, blk, 0, &err);
-		if (err)
-			return err;
+		bh = ext4_bread(NULL, inode, blk, 0);
+		if (IS_ERR(bh))
+			return PTR_ERR(bh);
 		if (!bh)	/* A hole? */
 			memset(data, 0, tocopy);
 		else
@@ -5244,8 +5475,7 @@
 {
 	struct inode *inode = sb_dqopt(sb)->files[type];
 	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
-	int err = 0;
-	int offset = off & (sb->s_blocksize - 1);
+	int err, offset = off & (sb->s_blocksize - 1);
 	struct buffer_head *bh;
 	handle_t *handle = journal_current_handle();
 
@@ -5266,13 +5496,16 @@
 		return -EIO;
 	}
 
-	bh = ext4_bread(handle, inode, blk, 1, &err);
+	bh = ext4_bread(handle, inode, blk, 1);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 	if (!bh)
 		goto out;
+	BUFFER_TRACE(bh, "get write access");
 	err = ext4_journal_get_write_access(handle, bh);
 	if (err) {
 		brelse(bh);
-		goto out;
+		return err;
 	}
 	lock_buffer(bh);
 	memcpy(bh->b_data+offset, data, len);
@@ -5281,8 +5514,6 @@
 	err = ext4_handle_dirty_metadata(handle, NULL, bh);
 	brelse(bh);
 out:
-	if (err)
-		return err;
 	if (inode->i_size < off + len) {
 		i_size_write(inode, off + len);
 		EXT4_I(inode)->i_disksize = inode->i_size;
@@ -5444,11 +5675,9 @@
 
 	err = ext4_init_mballoc();
 	if (err)
-		goto out3;
-
-	err = ext4_init_xattr();
-	if (err)
 		goto out2;
+	else
+		ext4_mballoc_ready = 1;
 	err = init_inodecache();
 	if (err)
 		goto out1;
@@ -5464,10 +5693,9 @@
 	unregister_as_ext3();
 	destroy_inodecache();
 out1:
-	ext4_exit_xattr();
-out2:
+	ext4_mballoc_ready = 0;
 	ext4_exit_mballoc();
-out3:
+out2:
 	ext4_exit_feat_adverts();
 out4:
 	if (ext4_proc_root)
@@ -5485,12 +5713,12 @@
 
 static void __exit ext4_exit_fs(void)
 {
+	ext4_exit_crypto();
 	ext4_destroy_lazyinit_thread();
 	unregister_as_ext2();
 	unregister_as_ext3();
 	unregister_filesystem(&ext4_fs_type);
 	destroy_inodecache();
-	ext4_exit_xattr();
 	ext4_exit_mballoc();
 	ext4_exit_feat_adverts();
 	remove_proc_entry("fs/ext4", NULL);
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index ff37119..4f68e83 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -23,8 +23,93 @@
 #include "ext4.h"
 #include "xattr.h"
 
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
 static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
+	struct page *cpage = NULL;
+	char *caddr, *paddr = NULL;
+	struct ext4_str cstr, pstr;
+	struct inode *inode = dentry->d_inode;
+	struct ext4_encrypted_symlink_data *sd;
+	loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
+	int res;
+	u32 plen, max_size = inode->i_sb->s_blocksize;
+
+	if (!ext4_encrypted_inode(inode))
+		return page_follow_link_light(dentry, nd);
+
+	res = ext4_get_encryption_info(inode);
+	if (res)
+		return ERR_PTR(res);
+
+	if (ext4_inode_is_fast_symlink(inode)) {
+		caddr = (char *) EXT4_I(dentry->d_inode)->i_data;
+		max_size = sizeof(EXT4_I(dentry->d_inode)->i_data);
+	} else {
+		cpage = read_mapping_page(inode->i_mapping, 0, NULL);
+		if (IS_ERR(cpage))
+			return cpage;
+		caddr = kmap(cpage);
+		caddr[size] = 0;
+	}
+
+	/* Symlink is encrypted */
+	sd = (struct ext4_encrypted_symlink_data *)caddr;
+	cstr.name = sd->encrypted_path;
+	cstr.len  = le32_to_cpu(sd->len);
+	if ((cstr.len +
+	     sizeof(struct ext4_encrypted_symlink_data) - 1) >
+	    max_size) {
+		/* Symlink data on the disk is corrupted */
+		res = -EIO;
+		goto errout;
+	}
+	plen = (cstr.len < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) ?
+		EXT4_FNAME_CRYPTO_DIGEST_SIZE*2 : cstr.len;
+	paddr = kmalloc(plen + 1, GFP_NOFS);
+	if (!paddr) {
+		res = -ENOMEM;
+		goto errout;
+	}
+	pstr.name = paddr;
+	pstr.len = plen;
+	res = _ext4_fname_disk_to_usr(inode, NULL, &cstr, &pstr);
+	if (res < 0)
+		goto errout;
+	/* Null-terminate the name */
+	if (res <= plen)
+		paddr[res] = '\0';
+	nd_set_link(nd, paddr);
+	if (cpage) {
+		kunmap(cpage);
+		page_cache_release(cpage);
+	}
+	return NULL;
+errout:
+	if (cpage) {
+		kunmap(cpage);
+		page_cache_release(cpage);
+	}
+	kfree(paddr);
+	return ERR_PTR(res);
+}
+
+static void ext4_put_link(struct dentry *dentry, struct nameidata *nd,
+			  void *cookie)
+{
+	struct page *page = cookie;
+
+	if (!page) {
+		kfree(nd_get_link(nd));
+	} else {
+		kunmap(page);
+		page_cache_release(page);
+	}
+}
+#endif
+
+static void *ext4_follow_fast_link(struct dentry *dentry, struct nameidata *nd)
+{
 	struct ext4_inode_info *ei = EXT4_I(dentry->d_inode);
 	nd_set_link(nd, (char *) ei->i_data);
 	return NULL;
@@ -32,8 +117,13 @@
 
 const struct inode_operations ext4_symlink_inode_operations = {
 	.readlink	= generic_readlink,
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+	.follow_link    = ext4_follow_link,
+	.put_link       = ext4_put_link,
+#else
 	.follow_link	= page_follow_link_light,
 	.put_link	= page_put_link,
+#endif
 	.setattr	= ext4_setattr,
 	.setxattr	= generic_setxattr,
 	.getxattr	= generic_getxattr,
@@ -43,7 +133,7 @@
 
 const struct inode_operations ext4_fast_symlink_inode_operations = {
 	.readlink	= generic_readlink,
-	.follow_link	= ext4_follow_link,
+	.follow_link    = ext4_follow_fast_link,
 	.setattr	= ext4_setattr,
 	.setxattr	= generic_setxattr,
 	.getxattr	= generic_getxattr,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 92850ba..70c4953 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -81,7 +81,7 @@
 # define ea_bdebug(bh, fmt, ...)	no_printk(fmt, ##__VA_ARGS__)
 #endif
 
-static void ext4_xattr_cache_insert(struct buffer_head *);
+static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
 static struct buffer_head *ext4_xattr_cache_find(struct inode *,
 						 struct ext4_xattr_header *,
 						 struct mb_cache_entry **);
@@ -90,13 +90,11 @@
 static int ext4_xattr_list(struct dentry *dentry, char *buffer,
 			   size_t buffer_size);
 
-static struct mb_cache *ext4_xattr_cache;
-
 static const struct xattr_handler *ext4_xattr_handler_map[] = {
 	[EXT4_XATTR_INDEX_USER]		     = &ext4_xattr_user_handler,
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
-	[EXT4_XATTR_INDEX_POSIX_ACL_ACCESS]  = &ext4_xattr_acl_access_handler,
-	[EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext4_xattr_acl_default_handler,
+	[EXT4_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
+	[EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
 #endif
 	[EXT4_XATTR_INDEX_TRUSTED]	     = &ext4_xattr_trusted_handler,
 #ifdef CONFIG_EXT4_FS_SECURITY
@@ -108,8 +106,8 @@
 	&ext4_xattr_user_handler,
 	&ext4_xattr_trusted_handler,
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
-	&ext4_xattr_acl_access_handler,
-	&ext4_xattr_acl_default_handler,
+	&posix_acl_access_xattr_handler,
+	&posix_acl_default_xattr_handler,
 #endif
 #ifdef CONFIG_EXT4_FS_SECURITY
 	&ext4_xattr_security_handler,
@@ -117,6 +115,9 @@
 	NULL
 };
 
+#define EXT4_GET_MB_CACHE(inode)	(((struct ext4_sb_info *) \
+				inode->i_sb->s_fs_info)->s_mb_cache)
+
 static __le32 ext4_xattr_block_csum(struct inode *inode,
 				    sector_t block_nr,
 				    struct ext4_xattr_header *hdr)
@@ -142,8 +143,7 @@
 					sector_t block_nr,
 					struct ext4_xattr_header *hdr)
 {
-	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
+	if (ext4_has_metadata_csum(inode->i_sb) &&
 	    (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
 		return 0;
 	return 1;
@@ -153,8 +153,7 @@
 				      sector_t block_nr,
 				      struct ext4_xattr_header *hdr)
 {
-	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
-		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+	if (!ext4_has_metadata_csum(inode->i_sb))
 		return;
 
 	hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
@@ -281,6 +280,7 @@
 	struct ext4_xattr_entry *entry;
 	size_t size;
 	int error;
+	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
 	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
 		  name_index, name, buffer, (long)buffer_size);
@@ -302,7 +302,7 @@
 		error = -EIO;
 		goto cleanup;
 	}
-	ext4_xattr_cache_insert(bh);
+	ext4_xattr_cache_insert(ext4_mb_cache, bh);
 	entry = BFIRST(bh);
 	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
 	if (error == -EIO)
@@ -383,6 +383,9 @@
 {
 	int error;
 
+	if (strlen(name) > 255)
+		return -ERANGE;
+
 	down_read(&EXT4_I(inode)->xattr_sem);
 	error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
 				     buffer_size);
@@ -425,6 +428,7 @@
 	struct inode *inode = dentry->d_inode;
 	struct buffer_head *bh = NULL;
 	int error;
+	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
 	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
 		  buffer, (long)buffer_size);
@@ -446,7 +450,7 @@
 		error = -EIO;
 		goto cleanup;
 	}
-	ext4_xattr_cache_insert(bh);
+	ext4_xattr_cache_insert(ext4_mb_cache, bh);
 	error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
 
 cleanup:
@@ -526,6 +530,7 @@
 	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
 		return;
 
+	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
 	if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
 		EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
 		ext4_handle_dirty_super(handle, sb);
@@ -542,8 +547,10 @@
 {
 	struct mb_cache_entry *ce = NULL;
 	int error = 0;
+	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
-	ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
+	ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
+	BUFFER_TRACE(bh, "get_write_access");
 	error = ext4_journal_get_write_access(handle, bh);
 	if (error)
 		goto out;
@@ -598,12 +605,13 @@
 				    size_t *min_offs, void *base, int *total)
 {
 	for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
-		*total += EXT4_XATTR_LEN(last->e_name_len);
 		if (!last->e_value_block && last->e_value_size) {
 			size_t offs = le16_to_cpu(last->e_value_offs);
 			if (offs < *min_offs)
 				*min_offs = offs;
 		}
+		if (total)
+			*total += EXT4_XATTR_LEN(last->e_name_len);
 	}
 	return (*min_offs - ((void *)last - base) - sizeof(__u32));
 }
@@ -776,14 +784,16 @@
 	struct ext4_xattr_search *s = &bs->s;
 	struct mb_cache_entry *ce = NULL;
 	int error = 0;
+	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
 #define header(x) ((struct ext4_xattr_header *)(x))
 
 	if (i->value && i->value_len > sb->s_blocksize)
 		return -ENOSPC;
 	if (s->base) {
-		ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
+		ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
 					bs->bh->b_blocknr);
+		BUFFER_TRACE(bs->bh, "get_write_access");
 		error = ext4_journal_get_write_access(handle, bs->bh);
 		if (error)
 			goto cleanup;
@@ -800,7 +810,8 @@
 				if (!IS_LAST_ENTRY(s->first))
 					ext4_xattr_rehash(header(s->base),
 							  s->here);
-				ext4_xattr_cache_insert(bs->bh);
+				ext4_xattr_cache_insert(ext4_mb_cache,
+					bs->bh);
 			}
 			unlock_buffer(bs->bh);
 			if (error == -EIO)
@@ -868,6 +879,7 @@
 						EXT4_C2B(EXT4_SB(sb), 1));
 				if (error)
 					goto cleanup;
+				BUFFER_TRACE(new_bh, "get_write_access");
 				error = ext4_journal_get_write_access(handle,
 								      new_bh);
 				if (error)
@@ -901,14 +913,8 @@
 			if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 				goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
 
-			/*
-			 * take i_data_sem because we will test
-			 * i_delalloc_reserved_flag in ext4_mb_new_blocks
-			 */
-			down_read((&EXT4_I(inode)->i_data_sem));
 			block = ext4_new_meta_blocks(handle, inode, goal, 0,
 						     NULL, &error);
-			up_read((&EXT4_I(inode)->i_data_sem));
 			if (error)
 				goto cleanup;
 
@@ -936,7 +942,7 @@
 			memcpy(new_bh->b_data, s->base, new_bh->b_size);
 			set_buffer_uptodate(new_bh);
 			unlock_buffer(new_bh);
-			ext4_xattr_cache_insert(new_bh);
+			ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
 			error = ext4_handle_dirty_xattr_block(handle,
 							      inode, new_bh);
 			if (error)
@@ -1260,7 +1266,7 @@
 	struct ext4_xattr_block_find *bs = NULL;
 	char *buffer = NULL, *b_entry_name = NULL;
 	size_t min_offs, free;
-	int total_ino, total_blk;
+	int total_ino;
 	void *base, *start, *end;
 	int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
 	int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
@@ -1318,8 +1324,7 @@
 		first = BFIRST(bh);
 		end = bh->b_data + bh->b_size;
 		min_offs = end - base;
-		free = ext4_xattr_free_space(first, &min_offs, base,
-					     &total_blk);
+		free = ext4_xattr_free_space(first, &min_offs, base, NULL);
 		if (free < new_extra_isize) {
 			if (!tried_min_extra_isize && s_min_extra_isize) {
 				tried_min_extra_isize++;
@@ -1527,13 +1532,13 @@
  * Returns 0, or a negative error number on failure.
  */
 static void
-ext4_xattr_cache_insert(struct buffer_head *bh)
+ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
 {
 	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
 	struct mb_cache_entry *ce;
 	int error;
 
-	ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
+	ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
 	if (!ce) {
 		ea_bdebug(bh, "out of memory");
 		return;
@@ -1605,12 +1610,13 @@
 {
 	__u32 hash = le32_to_cpu(header->h_hash);
 	struct mb_cache_entry *ce;
+	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
 
 	if (!header->h_hash)
 		return NULL;  /* never share */
 	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
 again:
-	ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev,
+	ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
 				       hash);
 	while (ce) {
 		struct buffer_head *bh;
@@ -1708,19 +1714,17 @@
 
 #undef BLOCK_HASH_SHIFT
 
-int __init
-ext4_init_xattr(void)
+#define	HASH_BUCKET_BITS	10
+
+struct mb_cache *
+ext4_xattr_create_cache(char *name)
 {
-	ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
-	if (!ext4_xattr_cache)
-		return -ENOMEM;
-	return 0;
+	return mb_cache_create(name, HASH_BUCKET_BITS);
 }
 
-void
-ext4_exit_xattr(void)
+void ext4_xattr_destroy_cache(struct mb_cache *cache)
 {
-	if (ext4_xattr_cache)
-		mb_cache_destroy(ext4_xattr_cache);
-	ext4_xattr_cache = NULL;
+	if (cache)
+		mb_cache_destroy(cache);
 }
+
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index c767dbd..ddc0957 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -23,6 +23,7 @@
 #define EXT4_XATTR_INDEX_SECURITY	        6
 #define EXT4_XATTR_INDEX_SYSTEM			7
 #define EXT4_XATTR_INDEX_RICHACL		8
+#define EXT4_XATTR_INDEX_ENCRYPTION		9
 
 struct ext4_xattr_header {
 	__le32	h_magic;	/* magic number for identification */
@@ -96,10 +97,10 @@
 
 extern const struct xattr_handler ext4_xattr_user_handler;
 extern const struct xattr_handler ext4_xattr_trusted_handler;
-extern const struct xattr_handler ext4_xattr_acl_access_handler;
-extern const struct xattr_handler ext4_xattr_acl_default_handler;
 extern const struct xattr_handler ext4_xattr_security_handler;
 
+#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
+
 extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
 
 extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
@@ -112,9 +113,6 @@
 extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
 			    struct ext4_inode *raw_inode, handle_t *handle);
 
-extern int __init ext4_init_xattr(void);
-extern void ext4_exit_xattr(void);
-
 extern const struct xattr_handler *ext4_xattr_handlers[];
 
 extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
@@ -126,6 +124,9 @@
 				       struct ext4_xattr_info *i,
 				       struct ext4_xattr_ibody_find *is);
 
+extern struct mb_cache *ext4_xattr_create_cache(char *name);
+extern void ext4_xattr_destroy_cache(struct mb_cache *);
+
 #ifdef CONFIG_EXT4_FS_SECURITY
 extern int ext4_init_security(handle_t *handle, struct inode *inode,
 			      struct inode *dir, const struct qstr *qstr);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 91ff93b..ce11d9a 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -698,7 +698,8 @@
 						  get_data_block_ro);
 }
 
-static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
+static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
+				      unsigned int length)
 {
 	struct inode *inode = page->mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3df43b4..74f3c7b 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1205,7 +1205,8 @@
 	return 0;
 }
 
-static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
+static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
+				      unsigned int length)
 {
 	struct inode *inode = page->mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 8555f7d..03ab8b8 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -239,7 +239,6 @@
 	if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
 		seq_puts(seq, ",disable_ext_identify");
 
-	seq_printf(seq, ",active_logs=%u", sbi->active_logs);
 
 	return 0;
 }
diff --git a/fs/fat/Kconfig b/fs/fat/Kconfig
index 182f9ff..6bd12c0 100644
--- a/fs/fat/Kconfig
+++ b/fs/fat/Kconfig
@@ -74,6 +74,27 @@
 	  To compile this as a module, choose M here: the module will be called
 	  vfat.
 
+config VFAT_FS_NO_DUALNAMES
+	bool "disable VFAT dual names support (patent workaround)"
+	depends on VFAT_FS
+	help
+	  This option disables support for dual filenames on VFAT filesystems.
+	  If this option is enabled then file creation will either put
+	  a short (8.3) name or a long name on the file, but never both.
+	  The field where a shortname would normally go is filled with
+	  invalid characters such that it cannot be considered a valid
+	  short filename.
+
+	  That means that long filenames created with this option
+	  disabled will not be accessible at all to operating systems
+	  that do not understand the FAT long filename extensions.
+
+	  Users considering disabling this option should consider the
+	  implications of any patents that may exist on dual filenames
+	  in VFAT.
+
+	  If unsure, say N
+
 config FAT_DEFAULT_CODEPAGE
 	int "Default codepage for FAT"
 	depends on MSDOS_FS || VFAT_FS
@@ -98,3 +119,14 @@
 
 	  Enable any character sets you need in File Systems/Native Language
 	  Support.
+
+config VFAT_NO_CREATE_WITH_LONGNAMES
+	bool "Disable creating files with long names"
+	depends on VFAT_FS
+	default n
+	help
+	  Set this to disable support for creating files or directories with
+	  names longer than 8.3 (the original DOS maximum file name length)
+	  e.g. naming a file FILE1234.TXT would be allowed but creating or
+	  renaming a file to FILE12345.TXT or FILE1234.TEXT would not
+	  be permitted.  Reading files with long file names is still permitted.
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 7a6f02c..4b807f6 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -372,7 +372,8 @@
 		dotoffset = 1;
 	}
 
-	memcpy(work, de->name, sizeof(work));
+	memcpy(work, de->name, sizeof(de->name));
+
 	/* see namei.c, msdos_format_name */
 	if (work[0] == 0x05)
 		work[0] = 0xE5;
@@ -502,6 +503,13 @@
 				goto end_of_dir;
 		}
 
+		/*
+		 * The FAT_NO_83NAME flag is used to mark files
+		 * created with no 8.3 short name
+		 */
+		if (de->lcase & FAT_NO_83NAME)
+			goto compare_longname;
+
 		/* Never prepend '.' to hidden files here.
 		 * That is done only for msdos mounts (and only when
 		 * 'dotsOK=yes'); if we are executing here, it is in the
@@ -515,6 +523,7 @@
 		if (fat_name_match(sbi, name, name_len, bufname, len))
 			goto found;
 
+compare_longname:
 		if (nr_slots) {
 			void *longname = unicode + FAT_MAX_UNI_CHARS;
 			int size = PATH_MAX - FAT_MAX_UNI_SIZE;
@@ -611,6 +620,8 @@
 		if (de->attr != ATTR_EXT && IS_FREE(de->name))
 			goto record_end;
 	} else {
+		if (de->lcase & FAT_NO_83NAME)
+			goto record_end;
 		if ((de->attr & ATTR_VOLUME) || IS_FREE(de->name))
 			goto record_end;
 	}
@@ -776,6 +787,13 @@
 	return ret;
 }
 
+static int fat_ioctl_volume_id(struct inode *dir)
+{
+	struct super_block *sb = dir->i_sb;
+	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+	return sbi->vol_id;
+}
+
 static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
 			  unsigned long arg)
 {
@@ -792,6 +810,8 @@
 		short_only = 0;
 		both = 1;
 		break;
+	case VFAT_IOCTL_GET_VOLUME_ID:
+		return fat_ioctl_volume_id(inode);
 	default:
 		return fat_generic_ioctl(filp, cmd, arg);
 	}
@@ -832,6 +852,8 @@
 		short_only = 0;
 		both = 1;
 		break;
+	case VFAT_IOCTL_GET_VOLUME_ID:
+		return fat_ioctl_volume_id(inode);
 	default:
 		return fat_generic_ioctl(filp, cmd, (unsigned long)arg);
 	}
@@ -953,6 +975,10 @@
 	sinfo->bh = NULL;
 	while (fat_get_short_entry(dir, &sinfo->slot_off, &sinfo->bh,
 				   &sinfo->de) >= 0) {
+		/* skip files marked as having no 8.3 short name  */
+		if (sinfo->de->lcase & FAT_NO_83NAME)
+			continue;
+
 		if (!strncmp(sinfo->de->name, name, MSDOS_NAME)) {
 			sinfo->slot_off -= sizeof(*sinfo->de);
 			sinfo->nr_slots = 1;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 21664fc..8180ece 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -86,6 +86,7 @@
 	const void *dir_ops;	      /* Opaque; default directory operations */
 	int dir_per_block;	      /* dir entries per block */
 	int dir_per_block_bits;	      /* log2(dir_per_block) */
+	unsigned long vol_id;         /* volume ID */
 
 	int fatent_shift;
 	struct fatent_operations *fatent_ops;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 5d4513c..ebdc665 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -632,6 +632,8 @@
 	struct msdos_sb_info *sbi = MSDOS_SB(sb);
 	*flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
 
+	sync_filesystem(sb);
+
 	/* make sure we update state on remount. */
 	new_rdonly = *flags & MS_RDONLY;
 	if (new_rdonly != (sb->s_flags & MS_RDONLY)) {
@@ -1252,6 +1254,7 @@
 	struct inode *fsinfo_inode = NULL;
 	struct buffer_head *bh;
 	struct fat_boot_sector *b;
+	struct fat_boot_bsx *bsx;
 	struct msdos_sb_info *sbi;
 	u16 logical_sector_size;
 	u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
@@ -1398,6 +1401,8 @@
 			goto out_fail;
 		}
 
+		bsx = (struct fat_boot_bsx *)(bh->b_data + FAT32_BSX_OFFSET);
+
 		fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
 		if (!IS_FSINFO(fsinfo)) {
 			fat_msg(sb, KERN_WARNING, "Invalid FSINFO signature: "
@@ -1413,8 +1418,14 @@
 		}
 
 		brelse(fsinfo_bh);
+	} else {
+		bsx = (struct fat_boot_bsx *)(bh->b_data + FAT16_BSX_OFFSET);
 	}
 
+	/* interpret volume ID as a little endian 32 bit integer */
+	sbi->vol_id = (((u32)bsx->vol_id[0]) | ((u32)bsx->vol_id[1] << 8) |
+		((u32)bsx->vol_id[2] << 16) | ((u32)bsx->vol_id[3] << 24));
+
 	sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
 	sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
 
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 2da9520..0c26e0a 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/buffer_head.h>
 #include <linux/namei.h>
+#include <linux/random.h>
 #include "fat.h"
 
 /*
@@ -328,6 +329,17 @@
 	int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
 	int is_shortname;
 	struct shortname_info base_info, ext_info;
+	unsigned opts_shortname = opts->shortname;
+
+#ifdef CONFIG_VFAT_FS_NO_DUALNAMES
+	/*
+	 * When we do not have dualnames, we want to maximise the
+	 * chance that a file will be able to be represented with just
+	 * a 8.3 entry. We can do that by using the WINNT case
+	 * handling extensions to FAT.
+	 */
+	opts_shortname = VFAT_SFN_CREATE_WINNT;
+#endif
 
 	is_shortname = 1;
 	INIT_SHORTNAME_INFO(&base_info);
@@ -436,13 +448,22 @@
 	memcpy(name_res, base, baselen);
 	memcpy(name_res + 8, ext, extlen);
 	*lcase = 0;
+
+#ifdef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+	if (is_shortname == 0)
+		return -ENAMETOOLONG;
+	if (!base_info.valid || !ext_info.valid)
+		return -EINVAL;
+	opts_shortname = VFAT_SFN_CREATE_WINNT;
+#endif
+
 	if (is_shortname && base_info.valid && ext_info.valid) {
 		if (vfat_find_form(dir, name_res) == 0)
 			return -EEXIST;
 
-		if (opts->shortname & VFAT_SFN_CREATE_WIN95) {
+		if (opts_shortname & VFAT_SFN_CREATE_WIN95) {
 			return (base_info.upper && ext_info.upper);
-		} else if (opts->shortname & VFAT_SFN_CREATE_WINNT) {
+		} else if (opts_shortname & VFAT_SFN_CREATE_WINNT) {
 			if ((base_info.upper || base_info.lower) &&
 			    (ext_info.upper || ext_info.lower)) {
 				if (!base_info.upper && base_info.lower)
@@ -581,6 +602,66 @@
 	return 0;
 }
 
+#ifdef CONFIG_VFAT_FS_NO_DUALNAMES
+/*
+ * This function creates a dummy 8.3 entry which is as compatible as
+ * possible with existing FAT devices, while not being a valid
+ * filename under windows or Linux
+ */
+static void vfat_build_dummy_83_buffer(struct inode *dir, char *msdos_name,
+				       int is_dir)
+{
+	/*
+	 * These characters are all invalid in 8.3 names, plus have
+	 * been shown to be harmless on all tested devices
+	 */
+	const char invalidchar[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x0B,
+				     0x0C, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13,
+				     0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A,
+				     0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x22, 0x2a,
+				     0x3a, 0x3c, 0x3e, 0x3f, 0x5b, 0x5d, 0x7c };
+	int i, tilde_pos, slash_pos;
+	u32 rand_num = random32();
+
+	/* We need a '~' in the prefix to make Win98 happy. */
+	tilde_pos = rand_num % 8;
+	rand_num >>= 3;
+
+	/*
+	 * the '/' makes sure that even unpatched Linux systems can't
+	 * get at files by the 8.3 entry. Don't put in a / in
+	 * directories as it can cause problems with some
+	 * photo frames
+	 */
+	if (is_dir)
+		slash_pos = -1;
+	else {
+		slash_pos = (tilde_pos + 1 + rand_num % 7) % 8;
+		rand_num >>= 3;
+	}
+
+	/*
+	 * fill in the first 8 bytes with invalid characters. Note
+	 * that we need to be careful not to run out of randomness. We
+	 * leave the 3 byte extension in place as some cheap MP3
+	 * players need them.
+	 */
+	for (i = 0; i < 8; i++) {
+		if (i == tilde_pos)
+			msdos_name[i] = '~';
+		else if (i == slash_pos)
+			msdos_name[i] = '/';
+		else {
+			msdos_name[i] =
+				invalidchar[rand_num % sizeof(invalidchar)];
+			rand_num /= sizeof(invalidchar);
+			if (rand_num < sizeof(invalidchar))
+				rand_num = random32();
+		}
+	}
+}
+#endif
+
 static int vfat_build_slots(struct inode *dir, const unsigned char *name,
 			    int len, int is_dir, int cluster,
 			    struct timespec *ts,
@@ -588,15 +669,19 @@
 {
 	struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
 	struct fat_mount_options *opts = &sbi->options;
-	struct msdos_dir_slot *ps;
 	struct msdos_dir_entry *de;
-	unsigned char cksum, lcase;
+	unsigned char lcase;
 	unsigned char msdos_name[MSDOS_NAME];
 	wchar_t *uname;
 	__le16 time, date;
 	u8 time_cs;
-	int err, ulen, usize, i;
+	int err, ulen, usize;
+#ifndef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+	int i;
+	struct msdos_dir_slot *ps;
+	unsigned char cksum;
 	loff_t offset;
+#endif
 
 	*nr_slots = 0;
 
@@ -623,6 +708,16 @@
 		goto shortname;
 	}
 
+#ifdef CONFIG_VFAT_FS_NO_DUALNAMES
+	printk_once(KERN_INFO
+		    "VFAT: not creating 8.3 short filenames for long names\n");
+	vfat_build_dummy_83_buffer(dir, msdos_name, is_dir);
+	lcase = FAT_NO_83NAME;
+#endif
+
+#ifdef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+	de = (struct msdos_dir_entry *)slots;
+#else
 	/* build the entry of long file name */
 	cksum = fat_checksum(msdos_name);
 
@@ -640,6 +735,7 @@
 	}
 	slots[0].id |= 0x40;
 	de = (struct msdos_dir_entry *)ps;
+#endif
 
 shortname:
 	/* build the entry of 8.3 alias name */
@@ -1068,7 +1164,11 @@
 
 static struct file_system_type vfat_fs_type = {
 	.owner		= THIS_MODULE,
+#ifdef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+	.name		= "lfat",
+#else
 	.name		= "vfat",
+#endif
 	.mount		= vfat_mount,
 	.kill_sb	= kill_block_super,
 	.fs_flags	= FS_REQUIRES_DEV,
@@ -1088,6 +1188,9 @@
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("VFAT filesystem support");
 MODULE_AUTHOR("Gordon Chaffee");
+#ifdef CONFIG_VFAT_NO_CREATE_WITH_LONGNAMES
+MODULE_ALIAS("lfat");
+#endif
 
 module_init(init_vfat_fs)
 module_exit(exit_vfat_fs)
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index e37eb27..7ca8c75 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -124,6 +124,7 @@
 
 static int vxfs_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_RDONLY;
 	return 0;
 }
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index b443063..8d6005c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -45,6 +45,7 @@
 	unsigned int for_kupdate:1;
 	unsigned int range_cyclic:1;
 	unsigned int for_background:1;
+	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
 	enum wb_reason reason;		/* why was writeback initiated? */
 
 	struct list_head list;		/* pending work list */
@@ -456,9 +457,11 @@
 	/*
 	 * Make sure to wait on the data before writing out the metadata.
 	 * This is important for filesystems that modify metadata on data
-	 * I/O completion.
+	 * I/O completion. We don't do it for sync(2) writeback because it has a
+	 * separate, external IO completion path and ->sync_fs for guaranteeing
+	 * inode metadata is written back correctly.
 	 */
-	if (wbc->sync_mode == WB_SYNC_ALL) {
+	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
 		int err = filemap_fdatawait(mapping);
 		if (ret == 0)
 			ret = err;
@@ -610,6 +613,7 @@
 		.tagged_writepages	= work->tagged_writepages,
 		.for_kupdate		= work->for_kupdate,
 		.for_background		= work->for_background,
+		.for_sync		= work->for_sync,
 		.range_cyclic		= work->range_cyclic,
 		.range_start		= 0,
 		.range_end		= LLONG_MAX,
@@ -1170,7 +1174,7 @@
 	if ((inode->i_state & flags) == flags)
 		return;
 
-	if (unlikely(block_dump))
+	if (unlikely(block_dump > 1))
 		block_dump___mark_inode_dirty(inode);
 
 	spin_lock(&inode->i_lock);
@@ -1393,6 +1397,7 @@
 		.range_cyclic	= 0,
 		.done		= &done,
 		.reason		= WB_REASON_SYNC,
+		.for_sync	= 1,
 	};
 
 	/* Nothing to do? */
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index b535008..f4e875a 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -20,6 +20,7 @@
 #include <linux/swap.h>
 #include <linux/splice.h>
 #include <linux/aio.h>
+#include <linux/freezer.h>
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 MODULE_ALIAS("devname:fuse");
@@ -464,7 +465,10 @@
 	 * Wait it out.
 	 */
 	spin_unlock(&fc->lock);
-	wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
+
+	while (req->state != FUSE_REQ_FINISHED)
+		wait_event_freezable(req->waitq,
+				     req->state == FUSE_REQ_FINISHED);
 	spin_lock(&fc->lock);
 
 	if (!req->aborted)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index efe802e..0526f12 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -135,6 +135,7 @@
 
 static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	if (*flags & MS_MANDLOCK)
 		return -EINVAL;
 
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 7625160..e8de950 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -110,7 +110,7 @@
 	/* Is the page fully outside i_size? (truncate in progress) */
 	offset = i_size & (PAGE_CACHE_SIZE-1);
 	if (page->index > end_index || (page->index == end_index && !offset)) {
-		page->mapping->a_ops->invalidatepage(page, 0);
+		page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
 		goto out;
 	}
 	return 1;
@@ -299,7 +299,8 @@
 
 		/* Is the page fully outside i_size? (truncate in progress) */
 		if (page->index > end_index || (page->index == end_index && !offset)) {
-			page->mapping->a_ops->invalidatepage(page, 0);
+			page->mapping->a_ops->invalidatepage(page, 0,
+							     PAGE_CACHE_SIZE);
 			unlock_page(page);
 			continue;
 		}
@@ -943,7 +944,8 @@
 	unlock_buffer(bh);
 }
 
-static void gfs2_invalidatepage(struct page *page, unsigned long offset)
+static void gfs2_invalidatepage(struct page *page, unsigned int offset,
+				unsigned int length)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
 	struct buffer_head *bh, *head;
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 9973df4..7102c7a 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -88,7 +88,10 @@
 	struct inode *dir = parent->d_inode;
 	struct inode *inode = child->d_inode;
 	struct gfs2_inode *dip, *ip;
-	struct get_name_filldir gnfd;
+	struct get_name_filldir gnfd = {
+		.ctx.actor = get_name_filldir,
+		.name = name
+	};
 	struct gfs2_holder gh;
 	u64 offset = 0;
 	int error;
@@ -106,7 +109,6 @@
 	*name = 0;
 	gnfd.inum.no_addr = ip->i_no_addr;
 	gnfd.inum.no_formal_ino = ip->i_no_formal_ino;
-	gnfd.name = name;
 
 	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh);
 	if (error)
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index e5639de..db7fff5 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1142,6 +1142,8 @@
 	struct gfs2_tune *gt = &sdp->sd_tune;
 	int error;
 
+	sync_filesystem(sb);
+
 	spin_lock(&gt->gt_spin);
 	args.ar_commit = gt->gt_logd_secs;
 	args.ar_quota_quantum = gt->gt_quota_quantum;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 2d2039e..eee7206 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -112,6 +112,7 @@
 
 static int hfs_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_NODIRATIME;
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		return 0;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 4c4d142..1b9414f 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -323,6 +323,7 @@
 
 static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		return 0;
 	if (!(*flags & MS_RDONLY)) {
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 2c1ce19..0d4b1f6 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -398,6 +398,8 @@
 	struct hpfs_sb_info *sbi = hpfs_sb(s);
 	char *new_opts = kstrdup(data, GFP_KERNEL);
 	
+	sync_filesystem(s);
+
 	*flags |= MS_NOATIME;
 	
 	hpfs_lock(s);
diff --git a/fs/inode.c b/fs/inode.c
index 17f95b4..e099403 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -56,6 +56,7 @@
 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
 
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
+EXPORT_SYMBOL(inode_sb_list_lock);
 
 /*
  * Empty aops. Can be used for the cases where the user does not
@@ -980,6 +981,41 @@
 EXPORT_SYMBOL(unlock_new_inode);
 
 /**
+ * lock_two_nondirectories - take two i_mutexes on non-directory objects
+ *
+ * Lock any non-NULL argument that is not a directory.
+ * Zero, one or two objects may be locked by this function.
+ *
+ * @inode1: first inode to lock
+ * @inode2: second inode to lock
+ */
+void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
+{
+	if (inode1 > inode2)
+		swap(inode1, inode2);
+
+	if (inode1 && !S_ISDIR(inode1->i_mode))
+		mutex_lock(&inode1->i_mutex);
+	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
+		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2);
+}
+EXPORT_SYMBOL(lock_two_nondirectories);
+
+/**
+ * unlock_two_nondirectories - release locks from lock_two_nondirectories()
+ * @inode1: first inode to unlock
+ * @inode2: second inode to unlock
+ */
+void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
+{
+	if (inode1 && !S_ISDIR(inode1->i_mode))
+		mutex_unlock(&inode1->i_mutex);
+	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
+		mutex_unlock(&inode2->i_mutex);
+}
+EXPORT_SYMBOL(unlock_two_nondirectories);
+
+/**
  * iget5_locked - obtain an inode from a mounted file system
  * @sb:		super block of file system
  * @hashval:	hash value (usually inode number) to get
@@ -1498,7 +1534,7 @@
  * This does the actual work of updating an inodes time or version.  Must have
  * had called mnt_want_write() before calling this.
  */
-static int update_time(struct inode *inode, struct timespec *time, int flags)
+int update_time(struct inode *inode, struct timespec *time, int flags)
 {
 	if (inode->i_op->update_time)
 		return inode->i_op->update_time(inode, time, flags);
@@ -1514,6 +1550,7 @@
 	mark_inode_dirty_sync(inode);
 	return 0;
 }
+EXPORT_SYMBOL(update_time);
 
 /**
  *	touch_atime	-	update the access time
@@ -1900,3 +1937,34 @@
 		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
 }
 EXPORT_SYMBOL(inode_dio_done);
+
+/*
+ * inode_set_flags - atomically set some inode flags
+ *
+ * Note: the caller should be holding i_mutex, or else be sure that
+ * they have exclusive access to the inode structure (i.e., while the
+ * inode is being instantiated).  The reason for the cmpxchg() loop
+ * --- which wouldn't be necessary if all code paths which modify
+ * i_flags actually followed this rule, is that there is at least one
+ * code path which doesn't today --- for example,
+ * __generic_file_aio_write() calls file_remove_suid() without holding
+ * i_mutex --- so we use cmpxchg() out of an abundance of caution.
+ *
+ * In the long run, i_mutex is overkill, and we should probably look
+ * at using the i_lock spinlock to protect i_flags, and then make sure
+ * it is so documented in include/linux/fs.h and that all code follows
+ * the locking convention!!
+ */
+void inode_set_flags(struct inode *inode, unsigned int flags,
+		     unsigned int mask)
+{
+	unsigned int old_flags, new_flags;
+
+	WARN_ON_ONCE(flags & ~mask);
+	do {
+		old_flags = ACCESS_ONCE(inode->i_flags);
+		new_flags = (old_flags & ~mask) | flags;
+	} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
+				  new_flags) != old_flags));
+}
+EXPORT_SYMBOL(inode_set_flags);
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig
index 69a48c2..5a9f553 100644
--- a/fs/jbd2/Kconfig
+++ b/fs/jbd2/Kconfig
@@ -20,7 +20,7 @@
 
 config JBD2_DEBUG
 	bool "JBD2 (ext4) debugging support"
-	depends on JBD2 && DEBUG_FS
+	depends on JBD2
 	help
 	  If you are using the ext4 journaled file system (or
 	  potentially any other filesystem/device using JBD2), this option
@@ -29,7 +29,7 @@
 	  By default, the debugging output will be turned off.
 
 	  If you select Y here, then you will be able to turn on debugging
-	  with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a
+	  with "echo N > /sys/module/jbd2/parameters/jbd2_debug", where N is a
 	  number between 1 and 5. The higher the number, the more debugging
 	  output is generated.  To turn debugging off again, do
-	  "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug".
+	  "echo 0 > /sys/module/jbd2/parameters/jbd2_debug".
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 6bb5285..4227dc4 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -96,15 +96,8 @@
 
 	if (jh->b_transaction == NULL && !buffer_locked(bh) &&
 	    !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
-		/*
-		 * Get our reference so that bh cannot be freed before
-		 * we unlock it
-		 */
-		get_bh(bh);
 		JBUFFER_TRACE(jh, "remove from checkpoint list");
 		ret = __jbd2_journal_remove_checkpoint(jh) + 1;
-		BUFFER_TRACE(bh, "release");
-		__brelse(bh);
 	}
 	return ret;
 }
@@ -120,10 +113,8 @@
 	int nblocks, space_left;
 	/* assert_spin_locked(&journal->j_state_lock); */
 
-	nblocks = jbd_space_needed(journal);
-	while (__jbd2_log_space_left(journal) < nblocks) {
-		if (journal->j_flags & JBD2_ABORT)
-			return;
+	nblocks = jbd2_space_needed(journal);
+	while (jbd2_log_space_left(journal) < nblocks) {
 		write_unlock(&journal->j_state_lock);
 		mutex_lock(&journal->j_checkpoint_mutex);
 
@@ -139,9 +130,13 @@
 		 * trace for forensic evidence.
 		 */
 		write_lock(&journal->j_state_lock);
+		if (journal->j_flags & JBD2_ABORT) {
+			mutex_unlock(&journal->j_checkpoint_mutex);
+			return;
+		}
 		spin_lock(&journal->j_list_lock);
-		nblocks = jbd_space_needed(journal);
-		space_left = __jbd2_log_space_left(journal);
+		nblocks = jbd2_space_needed(journal);
+		space_left = jbd2_log_space_left(journal);
 		if (space_left < nblocks) {
 			int chkpt = journal->j_checkpoint_transactions != NULL;
 			tid_t tid = 0;
@@ -156,7 +151,15 @@
 				/* We were able to recover space; yay! */
 				;
 			} else if (tid) {
+				/*
+				 * jbd2_journal_commit_transaction() may want
+				 * to take the checkpoint_mutex if JBD2_FLUSHED
+				 * is set.  So we need to temporarily drop it.
+				 */
+				mutex_unlock(&journal->j_checkpoint_mutex);
 				jbd2_log_wait_commit(journal, tid);
+				write_lock(&journal->j_state_lock);
+				continue;
 			} else {
 				printk(KERN_ERR "%s: needed %d blocks and "
 				       "only had %d space available\n",
@@ -175,58 +178,6 @@
 	}
 }
 
-/*
- * Clean up transaction's list of buffers submitted for io.
- * We wait for any pending IO to complete and remove any clean
- * buffers. Note that we take the buffers in the opposite ordering
- * from the one in which they were submitted for IO.
- *
- * Return 0 on success, and return <0 if some buffers have failed
- * to be written out.
- *
- * Called with j_list_lock held.
- */
-static int __wait_cp_io(journal_t *journal, transaction_t *transaction)
-{
-	struct journal_head *jh;
-	struct buffer_head *bh;
-	tid_t this_tid;
-	int released = 0;
-	int ret = 0;
-
-	this_tid = transaction->t_tid;
-restart:
-	/* Did somebody clean up the transaction in the meanwhile? */
-	if (journal->j_checkpoint_transactions != transaction ||
-			transaction->t_tid != this_tid)
-		return ret;
-	while (!released && transaction->t_checkpoint_io_list) {
-		jh = transaction->t_checkpoint_io_list;
-		bh = jh2bh(jh);
-		get_bh(bh);
-		if (buffer_locked(bh)) {
-			spin_unlock(&journal->j_list_lock);
-			wait_on_buffer(bh);
-			/* the journal_head may have gone by now */
-			BUFFER_TRACE(bh, "brelse");
-			__brelse(bh);
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		if (unlikely(buffer_write_io_error(bh)))
-			ret = -EIO;
-
-		/*
-		 * Now in whatever state the buffer currently is, we know that
-		 * it has been written out and so we can drop it from the list
-		 */
-		released = __jbd2_journal_remove_checkpoint(jh);
-		__brelse(bh);
-	}
-
-	return ret;
-}
-
 static void
 __flush_batch(journal_t *journal, int *batch_count)
 {
@@ -247,81 +198,6 @@
 }
 
 /*
- * Try to flush one buffer from the checkpoint list to disk.
- *
- * Return 1 if something happened which requires us to abort the current
- * scan of the checkpoint list.  Return <0 if the buffer has failed to
- * be written out.
- *
- * Called with j_list_lock held and drops it if 1 is returned
- */
-static int __process_buffer(journal_t *journal, struct journal_head *jh,
-			    int *batch_count, transaction_t *transaction)
-{
-	struct buffer_head *bh = jh2bh(jh);
-	int ret = 0;
-
-	if (buffer_locked(bh)) {
-		get_bh(bh);
-		spin_unlock(&journal->j_list_lock);
-		wait_on_buffer(bh);
-		/* the journal_head may have gone by now */
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-		ret = 1;
-	} else if (jh->b_transaction != NULL) {
-		transaction_t *t = jh->b_transaction;
-		tid_t tid = t->t_tid;
-
-		transaction->t_chp_stats.cs_forced_to_close++;
-		spin_unlock(&journal->j_list_lock);
-		if (unlikely(journal->j_flags & JBD2_UNMOUNT))
-			/*
-			 * The journal thread is dead; so starting and
-			 * waiting for a commit to finish will cause
-			 * us to wait for a _very_ long time.
-			 */
-			printk(KERN_ERR "JBD2: %s: "
-			       "Waiting for Godot: block %llu\n",
-			       journal->j_devname,
-			       (unsigned long long) bh->b_blocknr);
-		jbd2_log_start_commit(journal, tid);
-		jbd2_log_wait_commit(journal, tid);
-		ret = 1;
-	} else if (!buffer_dirty(bh)) {
-		ret = 1;
-		if (unlikely(buffer_write_io_error(bh)))
-			ret = -EIO;
-		get_bh(bh);
-		BUFFER_TRACE(bh, "remove from checkpoint");
-		__jbd2_journal_remove_checkpoint(jh);
-		spin_unlock(&journal->j_list_lock);
-		__brelse(bh);
-	} else {
-		/*
-		 * Important: we are about to write the buffer, and
-		 * possibly block, while still holding the journal lock.
-		 * We cannot afford to let the transaction logic start
-		 * messing around with this buffer before we write it to
-		 * disk, as that would break recoverability.
-		 */
-		BUFFER_TRACE(bh, "queue");
-		get_bh(bh);
-		J_ASSERT_BH(bh, !buffer_jwrite(bh));
-		journal->j_chkpt_bhs[*batch_count] = bh;
-		__buffer_relink_io(jh);
-		transaction->t_chp_stats.cs_written++;
-		(*batch_count)++;
-		if (*batch_count == JBD2_NR_BATCH) {
-			spin_unlock(&journal->j_list_lock);
-			__flush_batch(journal, batch_count);
-			ret = 1;
-		}
-	}
-	return ret;
-}
-
-/*
  * Perform an actual checkpoint. We take the first transaction on the
  * list of transactions to be checkpointed and send all its buffers
  * to disk. We submit larger chunks of data at once.
@@ -331,9 +207,11 @@
  */
 int jbd2_log_do_checkpoint(journal_t *journal)
 {
-	transaction_t *transaction;
-	tid_t this_tid;
-	int result;
+	struct journal_head	*jh;
+	struct buffer_head	*bh;
+	transaction_t		*transaction;
+	tid_t			this_tid;
+	int			result, batch_count = 0;
 
 	jbd_debug(1, "Start checkpoint\n");
 
@@ -366,45 +244,117 @@
 	 * done (maybe it's a new transaction, but it fell at the same
 	 * address).
 	 */
-	if (journal->j_checkpoint_transactions == transaction &&
-			transaction->t_tid == this_tid) {
-		int batch_count = 0;
-		struct journal_head *jh;
-		int retry = 0, err;
+	if (journal->j_checkpoint_transactions != transaction ||
+	    transaction->t_tid != this_tid)
+		goto out;
 
-		while (!retry && transaction->t_checkpoint_list) {
-			jh = transaction->t_checkpoint_list;
-			retry = __process_buffer(journal, jh, &batch_count,
-						 transaction);
-			if (retry < 0 && !result)
-				result = retry;
-			if (!retry && (need_resched() ||
-				spin_needbreak(&journal->j_list_lock))) {
-				spin_unlock(&journal->j_list_lock);
-				retry = 1;
-				break;
-			}
+	/* checkpoint all of the transaction's buffers */
+	while (transaction->t_checkpoint_list) {
+		jh = transaction->t_checkpoint_list;
+		bh = jh2bh(jh);
+
+		if (buffer_locked(bh)) {
+			spin_unlock(&journal->j_list_lock);
+			get_bh(bh);
+			wait_on_buffer(bh);
+			/* the journal_head may have gone by now */
+			BUFFER_TRACE(bh, "brelse");
+			__brelse(bh);
+			goto retry;
 		}
+		if (jh->b_transaction != NULL) {
+			transaction_t *t = jh->b_transaction;
+			tid_t tid = t->t_tid;
 
-		if (batch_count) {
-			if (!retry) {
-				spin_unlock(&journal->j_list_lock);
-				retry = 1;
-			}
-			__flush_batch(journal, &batch_count);
+			transaction->t_chp_stats.cs_forced_to_close++;
+			spin_unlock(&journal->j_list_lock);
+			if (unlikely(journal->j_flags & JBD2_UNMOUNT))
+				/*
+				 * The journal thread is dead; so
+				 * starting and waiting for a commit
+				 * to finish will cause us to wait for
+				 * a _very_ long time.
+				 */
+				printk(KERN_ERR
+		"JBD2: %s: Waiting for Godot: block %llu\n",
+		journal->j_devname, (unsigned long long) bh->b_blocknr);
+
+			jbd2_log_start_commit(journal, tid);
+			jbd2_log_wait_commit(journal, tid);
+			goto retry;
 		}
-
-		if (retry) {
-			spin_lock(&journal->j_list_lock);
-			goto restart;
+		if (!buffer_dirty(bh)) {
+			if (unlikely(buffer_write_io_error(bh)) && !result)
+				result = -EIO;
+			BUFFER_TRACE(bh, "remove from checkpoint");
+			if (__jbd2_journal_remove_checkpoint(jh))
+				/* The transaction was released; we're done */
+				goto out;
+			continue;
 		}
 		/*
-		 * Now we have cleaned up the first transaction's checkpoint
-		 * list. Let's clean up the second one
+		 * Important: we are about to write the buffer, and
+		 * possibly block, while still holding the journal
+		 * lock.  We cannot afford to let the transaction
+		 * logic start messing around with this buffer before
+		 * we write it to disk, as that would break
+		 * recoverability.
 		 */
-		err = __wait_cp_io(journal, transaction);
-		if (!result)
-			result = err;
+		BUFFER_TRACE(bh, "queue");
+		get_bh(bh);
+		J_ASSERT_BH(bh, !buffer_jwrite(bh));
+		journal->j_chkpt_bhs[batch_count++] = bh;
+		__buffer_relink_io(jh);
+		transaction->t_chp_stats.cs_written++;
+		if ((batch_count == JBD2_NR_BATCH) ||
+		    need_resched() ||
+		    spin_needbreak(&journal->j_list_lock))
+			goto unlock_and_flush;
+	}
+
+	if (batch_count) {
+		unlock_and_flush:
+			spin_unlock(&journal->j_list_lock);
+		retry:
+			if (batch_count)
+				__flush_batch(journal, &batch_count);
+			spin_lock(&journal->j_list_lock);
+			goto restart;
+	}
+
+	/*
+	 * Now we issued all of the transaction's buffers, let's deal
+	 * with the buffers that are out for I/O.
+	 */
+restart2:
+	/* Did somebody clean up the transaction in the meanwhile? */
+	if (journal->j_checkpoint_transactions != transaction ||
+	    transaction->t_tid != this_tid)
+		goto out;
+
+	while (transaction->t_checkpoint_io_list) {
+		jh = transaction->t_checkpoint_io_list;
+		bh = jh2bh(jh);
+		if (buffer_locked(bh)) {
+			spin_unlock(&journal->j_list_lock);
+			get_bh(bh);
+			wait_on_buffer(bh);
+			/* the journal_head may have gone by now */
+			BUFFER_TRACE(bh, "brelse");
+			__brelse(bh);
+			spin_lock(&journal->j_list_lock);
+			goto restart2;
+		}
+		if (unlikely(buffer_write_io_error(bh)) && !result)
+			result = -EIO;
+
+		/*
+		 * Now in whatever state the buffer currently is, we
+		 * know that it has been written out and so we can
+		 * drop it from the list
+		 */
+		if (__jbd2_journal_remove_checkpoint(jh))
+			break;
 	}
 out:
 	spin_unlock(&journal->j_list_lock);
@@ -469,18 +419,16 @@
  * Find all the written-back checkpoint buffers in the given list and
  * release them.
  *
- * Called with the journal locked.
  * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
+ * Returns 1 if we freed the transaction, 0 otherwise.
  */
-
-static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+static int journal_clean_one_cp_list(struct journal_head *jh)
 {
 	struct journal_head *last_jh;
 	struct journal_head *next_jh = jh;
-	int ret, freed = 0;
+	int ret;
+	int freed = 0;
 
-	*released = 0;
 	if (!jh)
 		return 0;
 
@@ -489,13 +437,11 @@
 		jh = next_jh;
 		next_jh = jh->b_cpnext;
 		ret = __try_to_free_cp_buf(jh);
-		if (ret) {
-			freed++;
-			if (ret == 2) {
-				*released = 1;
-				return freed;
-			}
-		}
+		if (!ret)
+			return freed;
+		if (ret == 2)
+			return 1;
+		freed = 1;
 		/*
 		 * This function only frees up some memory
 		 * if possible so we dont have an obligation
@@ -514,49 +460,49 @@
  *
  * Find all the written-back checkpoint buffers in the journal and release them.
  *
- * Called with the journal locked.
  * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
  */
-
-int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
 {
 	transaction_t *transaction, *last_transaction, *next_transaction;
-	int ret = 0;
-	int released;
+	int ret;
 
 	transaction = journal->j_checkpoint_transactions;
 	if (!transaction)
-		goto out;
+		return;
 
 	last_transaction = transaction->t_cpprev;
 	next_transaction = transaction;
 	do {
 		transaction = next_transaction;
 		next_transaction = transaction->t_cpnext;
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_list, &released);
+		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
 		/*
 		 * This function only frees up some memory if possible so we
 		 * dont have an obligation to finish processing. Bail out if
 		 * preemption requested:
 		 */
 		if (need_resched())
-			goto out;
-		if (released)
+			return;
+		if (ret)
 			continue;
 		/*
 		 * It is essential that we are as careful as in the case of
 		 * t_checkpoint_list with removing the buffer from the list as
 		 * we can possibly see not yet submitted buffers on io_list
 		 */
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_io_list, &released);
+		ret = journal_clean_one_cp_list(transaction->
+				t_checkpoint_io_list);
 		if (need_resched())
-			goto out;
+			return;
+		/*
+		 * Stop scanning if we couldn't free the transaction. This
+		 * avoids pointless scanning of transactions which still
+		 * weren't checkpointed.
+		 */
+		if (!ret)
+			return;
 	} while (transaction != last_transaction);
-out:
-	return ret;
 }
 
 /*
@@ -624,10 +570,6 @@
 
 	__jbd2_journal_drop_transaction(journal, transaction);
 	jbd2_journal_free_transaction(transaction);
-
-	/* Just in case anybody was waiting for more transactions to be
-           checkpointed... */
-	wake_up(&journal->j_wait_logspace);
 	ret = 1;
 out:
 	return ret;
@@ -689,9 +631,7 @@
 	J_ASSERT(transaction->t_state == T_FINISHED);
 	J_ASSERT(transaction->t_buffers == NULL);
 	J_ASSERT(transaction->t_forget == NULL);
-	J_ASSERT(transaction->t_iobuf_list == NULL);
 	J_ASSERT(transaction->t_shadow_list == NULL);
-	J_ASSERT(transaction->t_log_list == NULL);
 	J_ASSERT(transaction->t_checkpoint_list == NULL);
 	J_ASSERT(transaction->t_checkpoint_io_list == NULL);
 	J_ASSERT(atomic_read(&transaction->t_updates) == 0);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 0f53946..b73e021 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -30,15 +30,22 @@
 #include <trace/events/jbd2.h>
 
 /*
- * Default IO end handler for temporary BJ_IO buffer_heads.
+ * IO end handler for temporary buffer_heads handling writes to the journal.
  */
 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
 {
+	struct buffer_head *orig_bh = bh->b_private;
+
 	BUFFER_TRACE(bh, "");
 	if (uptodate)
 		set_buffer_uptodate(bh);
 	else
 		clear_buffer_uptodate(bh);
+	if (orig_bh) {
+		clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
+		smp_mb__after_atomic();
+		wake_up_bit(&orig_bh->b_state, BH_Shadow);
+	}
 	unlock_buffer(bh);
 }
 
@@ -85,21 +92,19 @@
 	__brelse(bh);
 }
 
-static void jbd2_commit_block_csum_set(journal_t *j,
-				       struct journal_head *descriptor)
+static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
 {
 	struct commit_header *h;
 	__u32 csum;
 
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return;
 
-	h = (struct commit_header *)(jh2bh(descriptor)->b_data);
+	h = (struct commit_header *)(bh->b_data);
 	h->h_chksum_type = 0;
 	h->h_chksum_size = 0;
 	h->h_chksum[0] = 0;
-	csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
-			   j->j_blocksize);
+	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
 	h->h_chksum[0] = cpu_to_be32(csum);
 }
 
@@ -116,7 +121,6 @@
 					struct buffer_head **cbh,
 					__u32 crc32_sum)
 {
-	struct journal_head *descriptor;
 	struct commit_header *tmp;
 	struct buffer_head *bh;
 	int ret;
@@ -127,12 +131,10 @@
 	if (is_journal_aborted(journal))
 		return 0;
 
-	descriptor = jbd2_journal_get_descriptor_buffer(journal);
-	if (!descriptor)
+	bh = jbd2_journal_get_descriptor_buffer(journal);
+	if (!bh)
 		return 1;
 
-	bh = jh2bh(descriptor);
-
 	tmp = (struct commit_header *)bh->b_data;
 	tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
 	tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
@@ -146,9 +148,9 @@
 		tmp->h_chksum_size 	= JBD2_CRC32_CHKSUM_SIZE;
 		tmp->h_chksum[0] 	= cpu_to_be32(crc32_sum);
 	}
-	jbd2_commit_block_csum_set(journal, descriptor);
+	jbd2_commit_block_csum_set(journal, bh);
 
-	JBUFFER_TRACE(descriptor, "submit commit block");
+	BUFFER_TRACE(bh, "submit commit block");
 	lock_buffer(bh);
 	clear_buffer_dirty(bh);
 	set_buffer_uptodate(bh);
@@ -180,7 +182,6 @@
 	if (unlikely(!buffer_uptodate(bh)))
 		ret = -EIO;
 	put_bh(bh);            /* One for getblk() */
-	jbd2_journal_put_journal_head(bh2jh(bh));
 
 	return ret;
 }
@@ -238,7 +239,7 @@
 		spin_lock(&journal->j_list_lock);
 		J_ASSERT(jinode->i_transaction == commit_transaction);
 		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
-		smp_mb__after_clear_bit();
+		smp_mb__after_atomic();
 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 	}
 	spin_unlock(&journal->j_list_lock);
@@ -276,7 +277,7 @@
 		}
 		spin_lock(&journal->j_list_lock);
 		clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
-		smp_mb__after_clear_bit();
+		smp_mb__after_atomic();
 		wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 	}
 
@@ -312,51 +313,53 @@
 	return checksum;
 }
 
-static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
+static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
 				   unsigned long long block)
 {
 	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
-	if (tag_bytes > JBD2_TAG_SIZE32)
+	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT))
 		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 }
 
 static void jbd2_descr_block_csum_set(journal_t *j,
-				      struct journal_head *descriptor)
+				      struct buffer_head *bh)
 {
 	struct jbd2_journal_block_tail *tail;
 	__u32 csum;
 
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return;
 
-	tail = (struct jbd2_journal_block_tail *)
-			(jh2bh(descriptor)->b_data + j->j_blocksize -
+	tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
 			sizeof(struct jbd2_journal_block_tail));
 	tail->t_checksum = 0;
-	csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
-			   j->j_blocksize);
+	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
 	tail->t_checksum = cpu_to_be32(csum);
 }
 
 static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
 				    struct buffer_head *bh, __u32 sequence)
 {
+	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
 	struct page *page = bh->b_page;
 	__u8 *addr;
-	__u32 csum;
+	__u32 csum32;
+	__be32 seq;
 
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return;
 
-	sequence = cpu_to_be32(sequence);
+	seq = cpu_to_be32(sequence);
 	addr = kmap_atomic(page);
-	csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
-			  sizeof(sequence));
-	csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
-			  bh->b_size);
+	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
+	csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
+			     bh->b_size);
 	kunmap_atomic(addr);
 
-	tag->t_checksum = cpu_to_be32(csum);
+	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+		tag3->t_checksum = cpu_to_be32(csum32);
+	else
+		tag->t_checksum = cpu_to_be16(csum32);
 }
 /*
  * jbd2_journal_commit_transaction
@@ -368,7 +371,8 @@
 {
 	struct transaction_stats_s stats;
 	transaction_t *commit_transaction;
-	struct journal_head *jh, *new_jh, *descriptor;
+	struct journal_head *jh;
+	struct buffer_head *descriptor;
 	struct buffer_head **wbuf = journal->j_wbuf;
 	int bufs;
 	int flags;
@@ -392,8 +396,10 @@
 	tid_t first_tid;
 	int update_tail;
 	int csum_size = 0;
+	LIST_HEAD(io_bufs);
+	LIST_HEAD(log_bufs);
 
-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (jbd2_journal_has_csum_v2or3(journal))
 		csum_size = sizeof(struct jbd2_journal_block_tail);
 
 	/*
@@ -424,13 +430,13 @@
 	J_ASSERT(journal->j_committing_transaction == NULL);
 
 	commit_transaction = journal->j_running_transaction;
-	J_ASSERT(commit_transaction->t_state == T_RUNNING);
 
 	trace_jbd2_start_commit(journal, commit_transaction);
 	jbd_debug(1, "JBD2: starting commit of transaction %d\n",
 			commit_transaction->t_tid);
 
 	write_lock(&journal->j_state_lock);
+	J_ASSERT(commit_transaction->t_state == T_RUNNING);
 	commit_transaction->t_state = T_LOCKED;
 
 	trace_jbd2_commit_locking(journal, commit_transaction);
@@ -520,6 +526,12 @@
 	 */
 	jbd2_journal_switch_revoke_table(journal);
 
+	/*
+	 * Reserved credits cannot be claimed anymore, free them
+	 */
+	atomic_sub(atomic_read(&journal->j_reserved_credits),
+		   &commit_transaction->t_outstanding_credits);
+
 	trace_jbd2_commit_flushing(journal, commit_transaction);
 	stats.run.rs_flushing = jiffies;
 	stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
@@ -533,7 +545,7 @@
 	wake_up(&journal->j_wait_transaction_locked);
 	write_unlock(&journal->j_state_lock);
 
-	jbd_debug(3, "JBD2: commit phase 2\n");
+	jbd_debug(3, "JBD2: commit phase 2a\n");
 
 	/*
 	 * Now start flushing things to disk, in the order they appear
@@ -545,10 +557,9 @@
 
 	blk_start_plug(&plug);
 	jbd2_journal_write_revoke_records(journal, commit_transaction,
-					  WRITE_SYNC);
-	blk_finish_plug(&plug);
+					  &log_bufs, WRITE_SYNC);
 
-	jbd_debug(3, "JBD2: commit phase 2\n");
+	jbd_debug(3, "JBD2: commit phase 2b\n");
 
 	/*
 	 * Way to go: we have now written out all of the data for a
@@ -571,9 +582,8 @@
 		 atomic_read(&commit_transaction->t_outstanding_credits));
 
 	err = 0;
-	descriptor = NULL;
 	bufs = 0;
-	blk_start_plug(&plug);
+	descriptor = NULL;
 	while (commit_transaction->t_buffers) {
 
 		/* Find the next buffer to be journaled... */
@@ -604,8 +614,6 @@
 		   record the metadata buffer. */
 
 		if (!descriptor) {
-			struct buffer_head *bh;
-
 			J_ASSERT (bufs == 0);
 
 			jbd_debug(4, "JBD2: get descriptor\n");
@@ -616,26 +624,26 @@
 				continue;
 			}
 
-			bh = jh2bh(descriptor);
 			jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
-				(unsigned long long)bh->b_blocknr, bh->b_data);
-			header = (journal_header_t *)&bh->b_data[0];
+				(unsigned long long)descriptor->b_blocknr,
+				descriptor->b_data);
+			header = (journal_header_t *)descriptor->b_data;
 			header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
 			header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
 			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
 
-			tagp = &bh->b_data[sizeof(journal_header_t)];
-			space_left = bh->b_size - sizeof(journal_header_t);
+			tagp = &descriptor->b_data[sizeof(journal_header_t)];
+			space_left = descriptor->b_size -
+						sizeof(journal_header_t);
 			first_tag = 1;
-			set_buffer_jwrite(bh);
-			set_buffer_dirty(bh);
-			wbuf[bufs++] = bh;
+			set_buffer_jwrite(descriptor);
+			set_buffer_dirty(descriptor);
+			wbuf[bufs++] = descriptor;
 
 			/* Record it so that we can wait for IO
                            completion later */
-			BUFFER_TRACE(bh, "ph3: file as descriptor");
-			jbd2_journal_file_buffer(descriptor, commit_transaction,
-					BJ_LogCtl);
+			BUFFER_TRACE(descriptor, "ph3: file as descriptor");
+			jbd2_file_log_bh(&log_bufs, descriptor);
 		}
 
 		/* Where is the buffer to be written? */
@@ -658,29 +666,22 @@
 
 		/* Bump b_count to prevent truncate from stumbling over
                    the shadowed buffer!  @@@ This can go if we ever get
-                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
+                   rid of the shadow pairing of buffers. */
 		atomic_inc(&jh2bh(jh)->b_count);
 
-		/* Make a temporary IO buffer with which to write it out
-                   (this will requeue both the metadata buffer and the
-                   temporary IO buffer). new_bh goes on BJ_IO*/
-
-		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
 		/*
-		 * akpm: jbd2_journal_write_metadata_buffer() sets
-		 * new_bh->b_transaction to commit_transaction.
-		 * We need to clean this up before we release new_bh
-		 * (which is of type BJ_IO)
+		 * Make a temporary IO buffer with which to write it out
+		 * (this will requeue the metadata buffer to BJ_Shadow).
 		 */
+		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
 		JBUFFER_TRACE(jh, "ph3: write metadata");
 		flags = jbd2_journal_write_metadata_buffer(commit_transaction,
-						      jh, &new_jh, blocknr);
+						jh, &wbuf[bufs], blocknr);
 		if (flags < 0) {
 			jbd2_journal_abort(journal, flags);
 			continue;
 		}
-		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
-		wbuf[bufs++] = jh2bh(new_jh);
+		jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
 
 		/* Record the new block's tag in the current descriptor
                    buffer */
@@ -692,12 +693,13 @@
 			tag_flag |= JBD2_FLAG_SAME_UUID;
 
 		tag = (journal_block_tag_t *) tagp;
-		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
+		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
 		tag->t_flags = cpu_to_be16(tag_flag);
-		jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh),
+		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
 					commit_transaction->t_tid);
 		tagp += tag_bytes;
 		space_left -= tag_bytes;
+		bufs++;
 
 		if (first_tag) {
 			memcpy (tagp, journal->j_uuid, 16);
@@ -809,7 +811,7 @@
            the log.  Before we can commit it, wait for the IO so far to
            complete.  Control buffers being written are on the
            transaction's t_log_list queue, and metadata buffers are on
-           the t_iobuf_list queue.
+           the io_bufs list.
 
 	   Wait for the buffers in reverse order.  That way we are
 	   less likely to be woken up until all IOs have completed, and
@@ -818,47 +820,33 @@
 
 	jbd_debug(3, "JBD2: commit phase 3\n");
 
-	/*
-	 * akpm: these are BJ_IO, and j_list_lock is not needed.
-	 * See __journal_try_to_free_buffer.
-	 */
-wait_for_iobuf:
-	while (commit_transaction->t_iobuf_list != NULL) {
-		struct buffer_head *bh;
+	while (!list_empty(&io_bufs)) {
+		struct buffer_head *bh = list_entry(io_bufs.prev,
+						    struct buffer_head,
+						    b_assoc_buffers);
 
-		jh = commit_transaction->t_iobuf_list->b_tprev;
-		bh = jh2bh(jh);
-		if (buffer_locked(bh)) {
-			wait_on_buffer(bh);
-			goto wait_for_iobuf;
-		}
-		if (cond_resched())
-			goto wait_for_iobuf;
+		wait_on_buffer(bh);
+		cond_resched();
 
 		if (unlikely(!buffer_uptodate(bh)))
 			err = -EIO;
-
-		clear_buffer_jwrite(bh);
-
-		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
-		jbd2_journal_unfile_buffer(journal, jh);
+		jbd2_unfile_log_bh(bh);
 
 		/*
-		 * ->t_iobuf_list should contain only dummy buffer_heads
-		 * which were created by jbd2_journal_write_metadata_buffer().
+		 * The list contains temporary buffer heads created by
+		 * jbd2_journal_write_metadata_buffer().
 		 */
 		BUFFER_TRACE(bh, "dumping temporary bh");
-		jbd2_journal_put_journal_head(jh);
 		__brelse(bh);
 		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
 		free_buffer_head(bh);
 
-		/* We also have to unlock and free the corresponding
-                   shadowed buffer */
+		/* We also have to refile the corresponding shadowed buffer */
 		jh = commit_transaction->t_shadow_list->b_tprev;
 		bh = jh2bh(jh);
-		clear_bit(BH_JWrite, &bh->b_state);
+		clear_buffer_jwrite(bh);
 		J_ASSERT_BH(bh, buffer_jbddirty(bh));
+		J_ASSERT_BH(bh, !buffer_shadow(bh));
 
 		/* The metadata is now released for reuse, but we need
                    to remember it against this transaction so that when
@@ -866,14 +854,6 @@
                    required. */
 		JBUFFER_TRACE(jh, "file as BJ_Forget");
 		jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
-		/*
-		 * Wake up any transactions which were waiting for this IO to
-		 * complete. The barrier must be here so that changes by
-		 * jbd2_journal_file_buffer() take effect before wake_up_bit()
-		 * does the waitqueue check.
-		 */
-		smp_mb();
-		wake_up_bit(&bh->b_state, BH_Unshadow);
 		JBUFFER_TRACE(jh, "brelse shadowed buffer");
 		__brelse(bh);
 	}
@@ -883,26 +863,19 @@
 	jbd_debug(3, "JBD2: commit phase 4\n");
 
 	/* Here we wait for the revoke record and descriptor record buffers */
- wait_for_ctlbuf:
-	while (commit_transaction->t_log_list != NULL) {
+	while (!list_empty(&log_bufs)) {
 		struct buffer_head *bh;
 
-		jh = commit_transaction->t_log_list->b_tprev;
-		bh = jh2bh(jh);
-		if (buffer_locked(bh)) {
-			wait_on_buffer(bh);
-			goto wait_for_ctlbuf;
-		}
-		if (cond_resched())
-			goto wait_for_ctlbuf;
+		bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
+		wait_on_buffer(bh);
+		cond_resched();
 
 		if (unlikely(!buffer_uptodate(bh)))
 			err = -EIO;
 
 		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
 		clear_buffer_jwrite(bh);
-		jbd2_journal_unfile_buffer(journal, jh);
-		jbd2_journal_put_journal_head(jh);
+		jbd2_unfile_log_bh(bh);
 		__brelse(bh);		/* One for getblk */
 		/* AKPM: bforget here */
 	}
@@ -952,9 +925,7 @@
 	J_ASSERT(list_empty(&commit_transaction->t_inode_list));
 	J_ASSERT(commit_transaction->t_buffers == NULL);
 	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
-	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
 	J_ASSERT(commit_transaction->t_shadow_list == NULL);
-	J_ASSERT(commit_transaction->t_log_list == NULL);
 
 restart_loop:
 	/*
@@ -1097,6 +1068,25 @@
 		goto restart_loop;
 	}
 
+	/* Add the transaction to the checkpoint list
+	 * __journal_remove_checkpoint() can not destroy transaction
+	 * under us because it is not marked as T_FINISHED yet */
+	if (journal->j_checkpoint_transactions == NULL) {
+		journal->j_checkpoint_transactions = commit_transaction;
+		commit_transaction->t_cpnext = commit_transaction;
+		commit_transaction->t_cpprev = commit_transaction;
+	} else {
+		commit_transaction->t_cpnext =
+			journal->j_checkpoint_transactions;
+		commit_transaction->t_cpprev =
+			commit_transaction->t_cpnext->t_cpprev;
+		commit_transaction->t_cpnext->t_cpprev =
+			commit_transaction;
+		commit_transaction->t_cpprev->t_cpnext =
+				commit_transaction;
+	}
+	spin_unlock(&journal->j_list_lock);
+
 	/* Done with this transaction! */
 
 	jbd_debug(3, "JBD2: commit phase 7\n");
@@ -1115,24 +1105,7 @@
 		atomic_read(&commit_transaction->t_handle_count);
 	trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
 			     commit_transaction->t_tid, &stats.run);
-
-	/*
-	 * Calculate overall stats
-	 */
-	spin_lock(&journal->j_history_lock);
-	journal->j_stats.ts_tid++;
-	if (commit_transaction->t_requested)
-		journal->j_stats.ts_requested++;
-	journal->j_stats.run.rs_wait += stats.run.rs_wait;
-	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
-	journal->j_stats.run.rs_running += stats.run.rs_running;
-	journal->j_stats.run.rs_locked += stats.run.rs_locked;
-	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
-	journal->j_stats.run.rs_logging += stats.run.rs_logging;
-	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
-	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
-	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
-	spin_unlock(&journal->j_history_lock);
+	stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
 
 	commit_transaction->t_state = T_COMMIT_CALLBACK;
 	J_ASSERT(commit_transaction == journal->j_committing_transaction);
@@ -1152,24 +1125,6 @@
 
 	write_unlock(&journal->j_state_lock);
 
-	if (journal->j_checkpoint_transactions == NULL) {
-		journal->j_checkpoint_transactions = commit_transaction;
-		commit_transaction->t_cpnext = commit_transaction;
-		commit_transaction->t_cpprev = commit_transaction;
-	} else {
-		commit_transaction->t_cpnext =
-			journal->j_checkpoint_transactions;
-		commit_transaction->t_cpprev =
-			commit_transaction->t_cpnext->t_cpprev;
-		commit_transaction->t_cpnext->t_cpprev =
-			commit_transaction;
-		commit_transaction->t_cpprev->t_cpnext =
-				commit_transaction;
-	}
-	spin_unlock(&journal->j_list_lock);
-	/* Drop all spin_locks because commit_callback may be block.
-	 * __journal_remove_checkpoint() can not destroy transaction
-	 * under us because it is not marked as T_FINISHED yet */
 	if (journal->j_commit_callback)
 		journal->j_commit_callback(journal, commit_transaction);
 
@@ -1180,7 +1135,7 @@
 	write_lock(&journal->j_state_lock);
 	spin_lock(&journal->j_list_lock);
 	commit_transaction->t_state = T_FINISHED;
-	/* Recheck checkpoint lists after j_list_lock was dropped */
+	/* Check if the transaction can be dropped now that we are finished */
 	if (commit_transaction->t_checkpoint_list == NULL &&
 	    commit_transaction->t_checkpoint_io_list == NULL) {
 		__jbd2_journal_drop_transaction(journal, commit_transaction);
@@ -1189,4 +1144,21 @@
 	spin_unlock(&journal->j_list_lock);
 	write_unlock(&journal->j_state_lock);
 	wake_up(&journal->j_wait_done_commit);
+
+	/*
+	 * Calculate overall stats
+	 */
+	spin_lock(&journal->j_history_lock);
+	journal->j_stats.ts_tid++;
+	journal->j_stats.ts_requested += stats.ts_requested;
+	journal->j_stats.run.rs_wait += stats.run.rs_wait;
+	journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
+	journal->j_stats.run.rs_running += stats.run.rs_running;
+	journal->j_stats.run.rs_locked += stats.run.rs_locked;
+	journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
+	journal->j_stats.run.rs_logging += stats.run.rs_logging;
+	journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
+	journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
+	journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
+	spin_unlock(&journal->j_history_lock);
 }
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 85733dd..0524ae6 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -103,18 +103,37 @@
 static void __journal_abort_soft (journal_t *journal, int errno);
 static int jbd2_journal_create_slab(size_t slab_size);
 
-/* Checksumming functions */
-int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
+#ifdef CONFIG_JBD2_DEBUG
+void __jbd2_debug(int level, const char *file, const char *func,
+		  unsigned int line, const char *fmt, ...)
 {
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	struct va_format vaf;
+	va_list args;
+
+	if (level > jbd2_journal_enable_debug)
+		return;
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
+	va_end(args);
+}
+EXPORT_SYMBOL(__jbd2_debug);
+#endif
+
+/* Checksumming functions */
+static int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
+{
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return 1;
 
 	return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
 }
 
-static __u32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
 {
-	__u32 csum, old_csum;
+	__u32 csum;
+	__be32 old_csum;
 
 	old_csum = sb->s_checksum;
 	sb->s_checksum = 0;
@@ -124,17 +143,17 @@
 	return cpu_to_be32(csum);
 }
 
-int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
 {
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return 1;
 
 	return sb->s_checksum == jbd2_superblock_csum(j, sb);
 }
 
-void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
+static void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
 {
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return;
 
 	sb->s_checksum = jbd2_superblock_csum(j, sb);
@@ -283,8 +302,8 @@
 	journal->j_flags |= JBD2_UNMOUNT;
 
 	while (journal->j_task) {
-		wake_up(&journal->j_wait_commit);
 		write_unlock(&journal->j_state_lock);
+		wake_up(&journal->j_wait_commit);
 		wait_event(journal->j_wait_done_commit, journal->j_task == NULL);
 		write_lock(&journal->j_state_lock);
 	}
@@ -310,14 +329,12 @@
  *
  * If the source buffer has already been modified by a new transaction
  * since we took the last commit snapshot, we use the frozen copy of
- * that data for IO.  If we end up using the existing buffer_head's data
- * for the write, then we *have* to lock the buffer to prevent anyone
- * else from using and possibly modifying it while the IO is in
- * progress.
+ * that data for IO. If we end up using the existing buffer_head's data
+ * for the write, then we have to make sure nobody modifies it while the
+ * IO is in progress. do_get_write_access() handles this.
  *
- * The function returns a pointer to the buffer_heads to be used for IO.
- *
- * We assume that the journal has already been locked in this function.
+ * The function returns a pointer to the buffer_head to be used for IO.
+ * 
  *
  * Return value:
  *  <0: Error
@@ -330,15 +347,14 @@
 
 int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
 				  struct journal_head  *jh_in,
-				  struct journal_head **jh_out,
-				  unsigned long long blocknr)
+				  struct buffer_head **bh_out,
+				  sector_t blocknr)
 {
 	int need_copy_out = 0;
 	int done_copy_out = 0;
 	int do_escape = 0;
 	char *mapped_data;
 	struct buffer_head *new_bh;
-	struct journal_head *new_jh;
 	struct page *new_page;
 	unsigned int new_offset;
 	struct buffer_head *bh_in = jh2bh(jh_in);
@@ -368,14 +384,13 @@
 
 	/* keep subsequent assertions sane */
 	atomic_set(&new_bh->b_count, 1);
-	new_jh = jbd2_journal_add_journal_head(new_bh);	/* This sleeps */
 
+	jbd_lock_bh_state(bh_in);
+repeat:
 	/*
 	 * If a new transaction has already done a buffer copy-out, then
 	 * we use that version of the data for the commit.
 	 */
-	jbd_lock_bh_state(bh_in);
-repeat:
 	if (jh_in->b_frozen_data) {
 		done_copy_out = 1;
 		new_page = virt_to_page(jh_in->b_frozen_data);
@@ -415,7 +430,7 @@
 		jbd_unlock_bh_state(bh_in);
 		tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
 		if (!tmp) {
-			jbd2_journal_put_journal_head(new_jh);
+			brelse(new_bh);
 			return -ENOMEM;
 		}
 		jbd_lock_bh_state(bh_in);
@@ -426,7 +441,7 @@
 
 		jh_in->b_frozen_data = tmp;
 		mapped_data = kmap_atomic(new_page);
-		memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
+		memcpy(tmp, mapped_data + new_offset, bh_in->b_size);
 		kunmap_atomic(mapped_data);
 
 		new_page = virt_to_page(tmp);
@@ -452,14 +467,14 @@
 	}
 
 	set_bh_page(new_bh, new_page, new_offset);
-	new_jh->b_transaction = NULL;
-	new_bh->b_size = jh2bh(jh_in)->b_size;
-	new_bh->b_bdev = transaction->t_journal->j_dev;
+	new_bh->b_size = bh_in->b_size;
+	new_bh->b_bdev = journal->j_dev;
 	new_bh->b_blocknr = blocknr;
+	new_bh->b_private = bh_in;
 	set_buffer_mapped(new_bh);
 	set_buffer_dirty(new_bh);
 
-	*jh_out = new_jh;
+	*bh_out = new_bh;
 
 	/*
 	 * The to-be-written buffer needs to get moved to the io queue,
@@ -470,11 +485,9 @@
 	spin_lock(&journal->j_list_lock);
 	__jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
 	spin_unlock(&journal->j_list_lock);
+	set_buffer_shadow(bh_in);
 	jbd_unlock_bh_state(bh_in);
 
-	JBUFFER_TRACE(new_jh, "file as BJ_IO");
-	jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
-
 	return do_escape | (done_copy_out << 1);
 }
 
@@ -484,35 +497,6 @@
  */
 
 /*
- * __jbd2_log_space_left: Return the number of free blocks left in the journal.
- *
- * Called with the journal already locked.
- *
- * Called under j_state_lock
- */
-
-int __jbd2_log_space_left(journal_t *journal)
-{
-	int left = journal->j_free;
-
-	/* assert_spin_locked(&journal->j_state_lock); */
-
-	/*
-	 * Be pessimistic here about the number of those free blocks which
-	 * might be required for log descriptor control blocks.
-	 */
-
-#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */
-
-	left -= MIN_LOG_RESERVED_BLOCKS;
-
-	if (left <= 0)
-		return 0;
-	left -= (left >> 3);
-	return left;
-}
-
-/*
  * Called with j_state_lock locked for writing.
  * Returns true if a transaction commit was started.
  */
@@ -564,20 +548,17 @@
 }
 
 /*
- * Force and wait upon a commit if the calling process is not within
- * transaction.  This is used for forcing out undo-protected data which contains
- * bitmaps, when the fs is running out of space.
- *
- * We can only force the running transaction if we don't have an active handle;
- * otherwise, we will deadlock.
- *
- * Returns true if a transaction was started.
+ * Force and wait any uncommitted transactions.  We can only force the running
+ * transaction if we don't have an active handle, otherwise, we will deadlock.
+ * Returns: <0 in case of error,
+ *           0 if nothing to commit,
+ *           1 if transaction was successfully committed.
  */
-int jbd2_journal_force_commit_nested(journal_t *journal)
+static int __jbd2_journal_force_commit(journal_t *journal)
 {
 	transaction_t *transaction = NULL;
 	tid_t tid;
-	int need_to_start = 0;
+	int need_to_start = 0, ret = 0;
 
 	read_lock(&journal->j_state_lock);
 	if (journal->j_running_transaction && !current->journal_info) {
@@ -588,16 +569,53 @@
 		transaction = journal->j_committing_transaction;
 
 	if (!transaction) {
+		/* Nothing to commit */
 		read_unlock(&journal->j_state_lock);
-		return 0;	/* Nothing to retry */
+		return 0;
 	}
-
 	tid = transaction->t_tid;
 	read_unlock(&journal->j_state_lock);
 	if (need_to_start)
 		jbd2_log_start_commit(journal, tid);
-	jbd2_log_wait_commit(journal, tid);
-	return 1;
+	ret = jbd2_log_wait_commit(journal, tid);
+	if (!ret)
+		ret = 1;
+
+	return ret;
+}
+
+/**
+ * Force and wait upon a commit if the calling process is not within
+ * transaction.  This is used for forcing out undo-protected data which contains
+ * bitmaps, when the fs is running out of space.
+ *
+ * @journal: journal to force
+ * Returns true if progress was made.
+ */
+int jbd2_journal_force_commit_nested(journal_t *journal)
+{
+	int ret;
+
+	ret = __jbd2_journal_force_commit(journal);
+	return ret > 0;
+}
+
+/**
+ * int journal_force_commit() - force any uncommitted transactions
+ * @journal: journal to force
+ *
+ * Caller want unconditional commit. We can only force the running transaction
+ * if we don't have an active handle, otherwise, we will deadlock.
+ */
+int jbd2_journal_force_commit(journal_t *journal)
+{
+	int ret;
+
+	J_ASSERT(!current->journal_info);
+	ret = __jbd2_journal_force_commit(journal);
+	if (ret > 0)
+		ret = 0;
+	return ret;
 }
 
 /*
@@ -684,7 +702,7 @@
 	read_lock(&journal->j_state_lock);
 #ifdef CONFIG_JBD2_DEBUG
 	if (!tid_geq(journal->j_commit_request, tid)) {
-		printk(KERN_EMERG
+		printk(KERN_ERR
 		       "%s: error: j_commit_request=%d, tid=%d\n",
 		       __func__, journal->j_commit_request, tid);
 	}
@@ -692,18 +710,16 @@
 	while (tid_gt(tid, journal->j_commit_sequence)) {
 		jbd_debug(1, "JBD2: want %d, j_commit_sequence=%d\n",
 				  tid, journal->j_commit_sequence);
-		wake_up(&journal->j_wait_commit);
 		read_unlock(&journal->j_state_lock);
+		wake_up(&journal->j_wait_commit);
 		wait_event(journal->j_wait_done_commit,
 				!tid_gt(tid, journal->j_commit_sequence));
 		read_lock(&journal->j_state_lock);
 	}
 	read_unlock(&journal->j_state_lock);
 
-	if (unlikely(is_journal_aborted(journal))) {
-		printk(KERN_EMERG "journal commit I/O error\n");
+	if (unlikely(is_journal_aborted(journal)))
 		err = -EIO;
-	}
 	return err;
 }
 
@@ -798,7 +814,7 @@
  * But we don't bother doing that, so there will be coherency problems with
  * mmaps of blockdevs which hold live JBD-controlled filesystems.
  */
-struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
+struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
 {
 	struct buffer_head *bh;
 	unsigned long long blocknr;
@@ -817,7 +833,7 @@
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
 	BUFFER_TRACE(bh, "return this buffer");
-	return jbd2_journal_add_journal_head(bh);
+	return bh;
 }
 
 /*
@@ -1069,11 +1085,10 @@
 		return NULL;
 
 	init_waitqueue_head(&journal->j_wait_transaction_locked);
-	init_waitqueue_head(&journal->j_wait_logspace);
 	init_waitqueue_head(&journal->j_wait_done_commit);
-	init_waitqueue_head(&journal->j_wait_checkpoint);
 	init_waitqueue_head(&journal->j_wait_commit);
 	init_waitqueue_head(&journal->j_wait_updates);
+	init_waitqueue_head(&journal->j_wait_reserved);
 	mutex_init(&journal->j_barrier);
 	mutex_init(&journal->j_checkpoint_mutex);
 	spin_lock_init(&journal->j_revoke_lock);
@@ -1083,6 +1098,7 @@
 	journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
 	journal->j_min_batch_time = 0;
 	journal->j_max_batch_time = 15000; /* 15ms */
+	atomic_set(&journal->j_reserved_credits, 0);
 
 	/* The journal is marked for error until we succeed with recovery! */
 	journal->j_flags = JBD2_ABORT;
@@ -1228,7 +1244,7 @@
 		goto out_err;
 	}
 
-	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+	bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize);
 	if (!bh) {
 		printk(KERN_ERR
 		       "%s: Cannot get buffer for journal superblock\n",
@@ -1523,24 +1539,32 @@
 		goto out;
 	}
 
-	if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
-	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) &&
+	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
+		/* Can't have checksum v2 and v3 at the same time! */
+		printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 "
+		       "at the same time!\n");
+		goto out;
+	}
+
+	if (jbd2_journal_has_csum_v2or3(journal) &&
+	    JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) {
 		/* Can't have checksum v1 and v2 on at the same time! */
-		printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+		printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2/3 "
 		       "at the same time!\n");
 		goto out;
 	}
 
 	if (!jbd2_verify_csum_type(journal, sb)) {
-		printk(KERN_ERR "JBD: Unknown checksum type\n");
+		printk(KERN_ERR "JBD2: Unknown checksum type\n");
 		goto out;
 	}
 
 	/* Load the checksum driver */
-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+	if (jbd2_journal_has_csum_v2or3(journal)) {
 		journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
 		if (IS_ERR(journal->j_chksum_driver)) {
-			printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+			printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
 			err = PTR_ERR(journal->j_chksum_driver);
 			journal->j_chksum_driver = NULL;
 			goto out;
@@ -1549,12 +1573,12 @@
 
 	/* Check superblock checksum */
 	if (!jbd2_superblock_csum_verify(journal, sb)) {
-		printk(KERN_ERR "JBD: journal checksum error\n");
+		printk(KERN_ERR "JBD2: journal checksum error\n");
 		goto out;
 	}
 
 	/* Precompute checksum seed for all metadata */
-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (jbd2_journal_has_csum_v2or3(journal))
 		journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
 						   sizeof(sb->s_uuid));
 
@@ -1820,8 +1844,14 @@
 	if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
 		return 0;
 
-	/* Asking for checksumming v2 and v1?  Only give them v2. */
-	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
+	/* If enabling v2 checksums, turn on v3 instead */
+	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) {
+		incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2;
+		incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3;
+	}
+
+	/* Asking for checksumming v3 and v1?  Only give them v3. */
+	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 &&
 	    compat & JBD2_FEATURE_COMPAT_CHECKSUM)
 		compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
 
@@ -1830,8 +1860,8 @@
 
 	sb = journal->j_superblock;
 
-	/* If enabling v2 checksums, update superblock */
-	if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
+	/* If enabling v3 checksums, update superblock */
+	if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
 		sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
 		sb->s_feature_compat &=
 			~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
@@ -1841,25 +1871,24 @@
 			journal->j_chksum_driver = crypto_alloc_shash("crc32c",
 								      0, 0);
 			if (IS_ERR(journal->j_chksum_driver)) {
-				printk(KERN_ERR "JBD: Cannot load crc32c "
+				printk(KERN_ERR "JBD2: Cannot load crc32c "
 				       "driver.\n");
 				journal->j_chksum_driver = NULL;
 				return 0;
 			}
-		}
 
-		/* Precompute checksum seed for all metadata */
-		if (JBD2_HAS_INCOMPAT_FEATURE(journal,
-					      JBD2_FEATURE_INCOMPAT_CSUM_V2))
+			/* Precompute checksum seed for all metadata */
 			journal->j_csum_seed = jbd2_chksum(journal, ~0,
 							   sb->s_uuid,
 							   sizeof(sb->s_uuid));
+		}
 	}
 
 	/* If enabling v1 checksums, downgrade superblock */
 	if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
 		sb->s_feature_incompat &=
-			~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
+			~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 |
+				     JBD2_FEATURE_INCOMPAT_CSUM_V3);
 
 	sb->s_feature_compat    |= cpu_to_be32(compat);
 	sb->s_feature_ro_compat |= cpu_to_be32(ro);
@@ -2184,16 +2213,20 @@
  */
 size_t journal_tag_bytes(journal_t *journal)
 {
-	journal_block_tag_t tag;
-	size_t x = 0;
+	size_t sz;
+
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+		return sizeof(journal_block_tag3_t);
+
+	sz = sizeof(journal_block_tag_t);
 
 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
-		x += sizeof(tag.t_checksum);
+		sz += sizeof(__u16);
 
 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
-		return x + JBD2_TAG_SIZE64;
+		return sz;
 	else
-		return x + JBD2_TAG_SIZE32;
+		return sz - sizeof(__u32);
 }
 
 /*
@@ -2361,13 +2394,13 @@
 #ifdef CONFIG_JBD2_DEBUG
 	atomic_inc(&nr_journal_heads);
 #endif
-	ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
+	ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
 	if (!ret) {
 		jbd_debug(1, "out of memory for journal_head\n");
 		pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
 		while (!ret) {
 			yield();
-			ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
+			ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
 		}
 	}
 	return ret;
@@ -2429,10 +2462,8 @@
 	struct journal_head *new_jh = NULL;
 
 repeat:
-	if (!buffer_jbd(bh)) {
+	if (!buffer_jbd(bh))
 		new_jh = journal_alloc_journal_head();
-		memset(new_jh, 0, sizeof(*new_jh));
-	}
 
 	jbd_lock_bh_journal_head(bh);
 	if (buffer_jbd(bh)) {
@@ -2664,7 +2695,7 @@
 #ifdef CONFIG_JBD2_DEBUG
 	int n = atomic_read(&nr_journal_heads);
 	if (n)
-		printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n);
+		printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n);
 #endif
 	jbd2_remove_jbd_stats_proc_entry();
 	jbd2_journal_destroy_caches();
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 6e2fb5c..bcbef08 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -178,9 +178,10 @@
 					void *buf)
 {
 	struct jbd2_journal_block_tail *tail;
-	__u32 provided, calculated;
+	__be32 provided;
+	__u32 calculated;
 
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return 1;
 
 	tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
@@ -190,8 +191,7 @@
 	calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
 	tail->t_checksum = provided;
 
-	provided = be32_to_cpu(provided);
-	return provided == calculated;
+	return provided == cpu_to_be32(calculated);
 }
 
 /*
@@ -205,7 +205,7 @@
 	int			nr = 0, size = journal->j_blocksize;
 	int			tag_bytes = journal_tag_bytes(journal);
 
-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (jbd2_journal_has_csum_v2or3(journal))
 		size -= sizeof(struct jbd2_journal_block_tail);
 
 	tagp = &bh->b_data[sizeof(journal_header_t)];
@@ -338,10 +338,11 @@
 	return err;
 }
 
-static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag)
+static inline unsigned long long read_tag_block(journal_t *journal,
+						journal_block_tag_t *tag)
 {
 	unsigned long long block = be32_to_cpu(tag->t_blocknr);
-	if (tag_bytes > JBD2_TAG_SIZE32)
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
 		block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
 	return block;
 }
@@ -381,9 +382,10 @@
 static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
 {
 	struct commit_header *h;
-	__u32 provided, calculated;
+	__be32 provided;
+	__u32 calculated;
 
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return 1;
 
 	h = buf;
@@ -392,25 +394,27 @@
 	calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
 	h->h_chksum[0] = provided;
 
-	provided = be32_to_cpu(provided);
-	return provided == calculated;
+	return provided == cpu_to_be32(calculated);
 }
 
 static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
 				      void *buf, __u32 sequence)
 {
-	__u32 provided, calculated;
+	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
+	__u32 csum32;
+	__be32 seq;
 
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return 1;
 
-	sequence = cpu_to_be32(sequence);
-	calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
-				 sizeof(sequence));
-	calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize);
-	provided = be32_to_cpu(tag->t_checksum);
+	seq = cpu_to_be32(sequence);
+	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
+	csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
 
-	return provided == cpu_to_be32(calculated);
+	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+		return tag3->t_checksum == cpu_to_be32(csum32);
+	else
+		return tag->t_checksum == cpu_to_be16(csum32);
 }
 
 static int do_one_pass(journal_t *journal,
@@ -514,8 +518,7 @@
 		switch(blocktype) {
 		case JBD2_DESCRIPTOR_BLOCK:
 			/* Verify checksum first */
-			if (JBD2_HAS_INCOMPAT_FEATURE(journal,
-					JBD2_FEATURE_INCOMPAT_CSUM_V2))
+			if (jbd2_journal_has_csum_v2or3(journal))
 				descr_csum_size =
 					sizeof(struct jbd2_journal_block_tail);
 			if (descr_csum_size > 0 &&
@@ -577,7 +580,7 @@
 					unsigned long long blocknr;
 
 					J_ASSERT(obh != NULL);
-					blocknr = read_tag_block(tag_bytes,
+					blocknr = read_tag_block(journal,
 								 tag);
 
 					/* If the block has been
@@ -597,7 +600,7 @@
 						be32_to_cpu(tmp->h_sequence))) {
 						brelse(obh);
 						success = -EIO;
-						printk(KERN_ERR "JBD: Invalid "
+						printk(KERN_ERR "JBD2: Invalid "
 						       "checksum recovering "
 						       "block %llu in log\n",
 						       blocknr);
@@ -813,9 +816,10 @@
 					 void *buf)
 {
 	struct jbd2_journal_revoke_tail *tail;
-	__u32 provided, calculated;
+	__be32 provided;
+	__u32 calculated;
 
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return 1;
 
 	tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
@@ -825,8 +829,7 @@
 	calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
 	tail->r_checksum = provided;
 
-	provided = be32_to_cpu(provided);
-	return provided == calculated;
+	return provided == cpu_to_be32(calculated);
 }
 
 /* Scan a revoke record, marking all blocks mentioned as revoked. */
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index f30b80b..c6cbaef 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -91,8 +91,9 @@
 #include <linux/list.h>
 #include <linux/init.h>
 #include <linux/bio.h>
-#endif
 #include <linux/log2.h>
+#include <linux/hash.h>
+#endif
 
 static struct kmem_cache *jbd2_revoke_record_cache;
 static struct kmem_cache *jbd2_revoke_table_cache;
@@ -122,23 +123,17 @@
 
 #ifdef __KERNEL__
 static void write_one_revoke_record(journal_t *, transaction_t *,
-				    struct journal_head **, int *,
+				    struct list_head *,
+				    struct buffer_head **, int *,
 				    struct jbd2_revoke_record_s *, int);
-static void flush_descriptor(journal_t *, struct journal_head *, int, int);
+static void flush_descriptor(journal_t *, struct buffer_head *, int, int);
 #endif
 
 /* Utility functions to maintain the revoke table */
 
-/* Borrowed from buffer.c: this is a tried and tested block hash function */
 static inline int hash(journal_t *journal, unsigned long long block)
 {
-	struct jbd2_revoke_table_s *table = journal->j_revoke;
-	int hash_shift = table->hash_shift;
-	int hash = (int)block ^ (int)((block >> 31) >> 1);
-
-	return ((hash << (hash_shift - 6)) ^
-		(hash >> 13) ^
-		(hash << (hash_shift - 12))) & (table->hash_size - 1);
+	return hash_64(block, journal->j_revoke->hash_shift);
 }
 
 static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr,
@@ -531,9 +526,10 @@
  */
 void jbd2_journal_write_revoke_records(journal_t *journal,
 				       transaction_t *transaction,
+				       struct list_head *log_bufs,
 				       int write_op)
 {
-	struct journal_head *descriptor;
+	struct buffer_head *descriptor;
 	struct jbd2_revoke_record_s *record;
 	struct jbd2_revoke_table_s *revoke;
 	struct list_head *hash_list;
@@ -553,7 +549,7 @@
 		while (!list_empty(hash_list)) {
 			record = (struct jbd2_revoke_record_s *)
 				hash_list->next;
-			write_one_revoke_record(journal, transaction,
+			write_one_revoke_record(journal, transaction, log_bufs,
 						&descriptor, &offset,
 						record, write_op);
 			count++;
@@ -573,13 +569,14 @@
 
 static void write_one_revoke_record(journal_t *journal,
 				    transaction_t *transaction,
-				    struct journal_head **descriptorp,
+				    struct list_head *log_bufs,
+				    struct buffer_head **descriptorp,
 				    int *offsetp,
 				    struct jbd2_revoke_record_s *record,
 				    int write_op)
 {
 	int csum_size = 0;
-	struct journal_head *descriptor;
+	struct buffer_head *descriptor;
 	int offset;
 	journal_header_t *header;
 
@@ -594,7 +591,7 @@
 	offset = *offsetp;
 
 	/* Do we need to leave space at the end for a checksum? */
-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (jbd2_journal_has_csum_v2or3(journal))
 		csum_size = sizeof(struct jbd2_journal_revoke_tail);
 
 	/* Make sure we have a descriptor with space left for the record */
@@ -609,26 +606,26 @@
 		descriptor = jbd2_journal_get_descriptor_buffer(journal);
 		if (!descriptor)
 			return;
-		header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
+		header = (journal_header_t *)descriptor->b_data;
 		header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
 		header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
 		header->h_sequence  = cpu_to_be32(transaction->t_tid);
 
 		/* Record it so that we can wait for IO completion later */
-		JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
-		jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl);
+		BUFFER_TRACE(descriptor, "file in log_bufs");
+		jbd2_file_log_bh(log_bufs, descriptor);
 
 		offset = sizeof(jbd2_journal_revoke_header_t);
 		*descriptorp = descriptor;
 	}
 
 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
-		* ((__be64 *)(&jh2bh(descriptor)->b_data[offset])) =
+		* ((__be64 *)(&descriptor->b_data[offset])) =
 			cpu_to_be64(record->blocknr);
 		offset += 8;
 
 	} else {
-		* ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
+		* ((__be32 *)(&descriptor->b_data[offset])) =
 			cpu_to_be32(record->blocknr);
 		offset += 4;
 	}
@@ -636,21 +633,18 @@
 	*offsetp = offset;
 }
 
-static void jbd2_revoke_csum_set(journal_t *j,
-				 struct journal_head *descriptor)
+static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
 {
 	struct jbd2_journal_revoke_tail *tail;
 	__u32 csum;
 
-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+	if (!jbd2_journal_has_csum_v2or3(j))
 		return;
 
-	tail = (struct jbd2_journal_revoke_tail *)
-			(jh2bh(descriptor)->b_data + j->j_blocksize -
+	tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
 			sizeof(struct jbd2_journal_revoke_tail));
 	tail->r_checksum = 0;
-	csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
-			   j->j_blocksize);
+	csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
 	tail->r_checksum = cpu_to_be32(csum);
 }
 
@@ -662,25 +656,24 @@
  */
 
 static void flush_descriptor(journal_t *journal,
-			     struct journal_head *descriptor,
+			     struct buffer_head *descriptor,
 			     int offset, int write_op)
 {
 	jbd2_journal_revoke_header_t *header;
-	struct buffer_head *bh = jh2bh(descriptor);
 
 	if (is_journal_aborted(journal)) {
-		put_bh(bh);
+		put_bh(descriptor);
 		return;
 	}
 
-	header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
+	header = (jbd2_journal_revoke_header_t *)descriptor->b_data;
 	header->r_count = cpu_to_be32(offset);
 	jbd2_revoke_csum_set(journal, descriptor);
 
-	set_buffer_jwrite(bh);
-	BUFFER_TRACE(bh, "write");
-	set_buffer_dirty(bh);
-	write_dirty_buffer(bh, write_op);
+	set_buffer_jwrite(descriptor);
+	BUFFER_TRACE(descriptor, "write");
+	set_buffer_dirty(descriptor);
+	write_dirty_buffer(descriptor, write_op);
 }
 #endif
 
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 21b828c..c6aa583 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -89,7 +89,8 @@
 	transaction->t_expires = jiffies + journal->j_commit_interval;
 	spin_lock_init(&transaction->t_handle_lock);
 	atomic_set(&transaction->t_updates, 0);
-	atomic_set(&transaction->t_outstanding_credits, 0);
+	atomic_set(&transaction->t_outstanding_credits,
+		   atomic_read(&journal->j_reserved_credits));
 	atomic_set(&transaction->t_handle_count, 0);
 	INIT_LIST_HEAD(&transaction->t_inode_list);
 	INIT_LIST_HEAD(&transaction->t_private_list);
@@ -141,6 +142,112 @@
 }
 
 /*
+ * Wait until running transaction passes T_LOCKED state. Also starts the commit
+ * if needed. The function expects running transaction to exist and releases
+ * j_state_lock.
+ */
+static void wait_transaction_locked(journal_t *journal)
+	__releases(journal->j_state_lock)
+{
+	DEFINE_WAIT(wait);
+	int need_to_start;
+	tid_t tid = journal->j_running_transaction->t_tid;
+
+	prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+			TASK_UNINTERRUPTIBLE);
+	need_to_start = !tid_geq(journal->j_commit_request, tid);
+	read_unlock(&journal->j_state_lock);
+	if (need_to_start)
+		jbd2_log_start_commit(journal, tid);
+	schedule();
+	finish_wait(&journal->j_wait_transaction_locked, &wait);
+}
+
+static void sub_reserved_credits(journal_t *journal, int blocks)
+{
+	atomic_sub(blocks, &journal->j_reserved_credits);
+	wake_up(&journal->j_wait_reserved);
+}
+
+/*
+ * Wait until we can add credits for handle to the running transaction.  Called
+ * with j_state_lock held for reading. Returns 0 if handle joined the running
+ * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
+ * caller must retry.
+ */
+static int add_transaction_credits(journal_t *journal, int blocks,
+				   int rsv_blocks)
+{
+	transaction_t *t = journal->j_running_transaction;
+	int needed;
+	int total = blocks + rsv_blocks;
+
+	/*
+	 * If the current transaction is locked down for commit, wait
+	 * for the lock to be released.
+	 */
+	if (t->t_state == T_LOCKED) {
+		wait_transaction_locked(journal);
+		return 1;
+	}
+
+	/*
+	 * If there is not enough space left in the log to write all
+	 * potential buffers requested by this operation, we need to
+	 * stall pending a log checkpoint to free some more log space.
+	 */
+	needed = atomic_add_return(total, &t->t_outstanding_credits);
+	if (needed > journal->j_max_transaction_buffers) {
+		/*
+		 * If the current transaction is already too large,
+		 * then start to commit it: we can then go back and
+		 * attach this handle to a new transaction.
+		 */
+		atomic_sub(total, &t->t_outstanding_credits);
+		wait_transaction_locked(journal);
+		return 1;
+	}
+
+	/*
+	 * The commit code assumes that it can get enough log space
+	 * without forcing a checkpoint.  This is *critical* for
+	 * correctness: a checkpoint of a buffer which is also
+	 * associated with a committing transaction creates a deadlock,
+	 * so commit simply cannot force through checkpoints.
+	 *
+	 * We must therefore ensure the necessary space in the journal
+	 * *before* starting to dirty potentially checkpointed buffers
+	 * in the new transaction.
+	 */
+	if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
+		atomic_sub(total, &t->t_outstanding_credits);
+		read_unlock(&journal->j_state_lock);
+		write_lock(&journal->j_state_lock);
+		if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
+			__jbd2_log_wait_for_space(journal);
+		write_unlock(&journal->j_state_lock);
+		return 1;
+	}
+
+	/* No reservation? We are done... */
+	if (!rsv_blocks)
+		return 0;
+
+	needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
+	/* We allow at most half of a transaction to be reserved */
+	if (needed > journal->j_max_transaction_buffers / 2) {
+		sub_reserved_credits(journal, rsv_blocks);
+		atomic_sub(total, &t->t_outstanding_credits);
+		read_unlock(&journal->j_state_lock);
+		wait_event(journal->j_wait_reserved,
+			 atomic_read(&journal->j_reserved_credits) + rsv_blocks
+			 <= journal->j_max_transaction_buffers / 2);
+		return 1;
+	}
+	return 0;
+}
+
+/*
  * start_this_handle: Given a handle, deal with any locking or stalling
  * needed to make sure that there is enough journal space for the handle
  * to begin.  Attach the handle to a transaction and set up the
@@ -151,18 +258,24 @@
 			     gfp_t gfp_mask)
 {
 	transaction_t	*transaction, *new_transaction = NULL;
-	tid_t		tid;
-	int		needed, need_to_start;
-	int		nblocks = handle->h_buffer_credits;
+	int		blocks = handle->h_buffer_credits;
+	int		rsv_blocks = 0;
 	unsigned long ts = jiffies;
 
-	if (nblocks > journal->j_max_transaction_buffers) {
+	/*
+	 * 1/2 of transaction can be reserved so we can practically handle
+	 * only 1/2 of maximum transaction size per operation
+	 */
+	if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) {
 		printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
-		       current->comm, nblocks,
-		       journal->j_max_transaction_buffers);
+		       current->comm, blocks,
+		       journal->j_max_transaction_buffers / 2);
 		return -ENOSPC;
 	}
 
+	if (handle->h_rsv_handle)
+		rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
+
 alloc_transaction:
 	if (!journal->j_running_transaction) {
 		new_transaction = kmem_cache_zalloc(transaction_cache,
@@ -199,8 +312,12 @@
 		return -EROFS;
 	}
 
-	/* Wait on the journal's transaction barrier if necessary */
-	if (journal->j_barrier_count) {
+	/*
+	 * Wait on the journal's transaction barrier if necessary. Specifically
+	 * we allow reserved handles to proceed because otherwise commit could
+	 * deadlock on page writeback not being able to complete.
+	 */
+	if (!handle->h_reserved && journal->j_barrier_count) {
 		read_unlock(&journal->j_state_lock);
 		wait_event(journal->j_wait_transaction_locked,
 				journal->j_barrier_count == 0);
@@ -213,7 +330,7 @@
 			goto alloc_transaction;
 		write_lock(&journal->j_state_lock);
 		if (!journal->j_running_transaction &&
-		    !journal->j_barrier_count) {
+		    (handle->h_reserved || !journal->j_barrier_count)) {
 			jbd2_get_transaction(journal, new_transaction);
 			new_transaction = NULL;
 		}
@@ -223,85 +340,18 @@
 
 	transaction = journal->j_running_transaction;
 
-	/*
-	 * If the current transaction is locked down for commit, wait for the
-	 * lock to be released.
-	 */
-	if (transaction->t_state == T_LOCKED) {
-		DEFINE_WAIT(wait);
-
-		prepare_to_wait(&journal->j_wait_transaction_locked,
-					&wait, TASK_UNINTERRUPTIBLE);
-		read_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_transaction_locked, &wait);
-		goto repeat;
-	}
-
-	/*
-	 * If there is not enough space left in the log to write all potential
-	 * buffers requested by this operation, we need to stall pending a log
-	 * checkpoint to free some more log space.
-	 */
-	needed = atomic_add_return(nblocks,
-				   &transaction->t_outstanding_credits);
-
-	if (needed > journal->j_max_transaction_buffers) {
+	if (!handle->h_reserved) {
+		/* We may have dropped j_state_lock - restart in that case */
+		if (add_transaction_credits(journal, blocks, rsv_blocks))
+			goto repeat;
+	} else {
 		/*
-		 * If the current transaction is already too large, then start
-		 * to commit it: we can then go back and attach this handle to
-		 * a new transaction.
+		 * We have handle reserved so we are allowed to join T_LOCKED
+		 * transaction and we don't have to check for transaction size
+		 * and journal space.
 		 */
-		DEFINE_WAIT(wait);
-
-		jbd_debug(2, "Handle %p starting new commit...\n", handle);
-		atomic_sub(nblocks, &transaction->t_outstanding_credits);
-		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
-				TASK_UNINTERRUPTIBLE);
-		tid = transaction->t_tid;
-		need_to_start = !tid_geq(journal->j_commit_request, tid);
-		read_unlock(&journal->j_state_lock);
-		if (need_to_start)
-			jbd2_log_start_commit(journal, tid);
-		schedule();
-		finish_wait(&journal->j_wait_transaction_locked, &wait);
-		goto repeat;
-	}
-
-	/*
-	 * The commit code assumes that it can get enough log space
-	 * without forcing a checkpoint.  This is *critical* for
-	 * correctness: a checkpoint of a buffer which is also
-	 * associated with a committing transaction creates a deadlock,
-	 * so commit simply cannot force through checkpoints.
-	 *
-	 * We must therefore ensure the necessary space in the journal
-	 * *before* starting to dirty potentially checkpointed buffers
-	 * in the new transaction.
-	 *
-	 * The worst part is, any transaction currently committing can
-	 * reduce the free space arbitrarily.  Be careful to account for
-	 * those buffers when checkpointing.
-	 */
-
-	/*
-	 * @@@ AKPM: This seems rather over-defensive.  We're giving commit
-	 * a _lot_ of headroom: 1/4 of the journal plus the size of
-	 * the committing transaction.  Really, we only need to give it
-	 * committing_transaction->t_outstanding_credits plus "enough" for
-	 * the log control blocks.
-	 * Also, this test is inconsistent with the matching one in
-	 * jbd2_journal_extend().
-	 */
-	if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
-		jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
-		atomic_sub(nblocks, &transaction->t_outstanding_credits);
-		read_unlock(&journal->j_state_lock);
-		write_lock(&journal->j_state_lock);
-		if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
-			__jbd2_log_wait_for_space(journal);
-		write_unlock(&journal->j_state_lock);
-		goto repeat;
+		sub_reserved_credits(journal, blocks);
+		handle->h_reserved = 0;
 	}
 
 	/* OK, account for the buffers that this operation expects to
@@ -309,15 +359,16 @@
 	 */
 	update_t_max_wait(transaction, ts);
 	handle->h_transaction = transaction;
-	handle->h_requested_credits = nblocks;
+	handle->h_requested_credits = blocks;
 	handle->h_start_jiffies = jiffies;
 	atomic_inc(&transaction->t_updates);
 	atomic_inc(&transaction->t_handle_count);
-	jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
-		  handle, nblocks,
+	jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
+		  handle, blocks,
 		  atomic_read(&transaction->t_outstanding_credits),
-		  __jbd2_log_space_left(journal));
+		  jbd2_log_space_left(journal));
 	read_unlock(&journal->j_state_lock);
+	current->journal_info = handle;
 
 	lock_map_acquire(&handle->h_lockdep_map);
 	jbd2_journal_free_transaction(new_transaction);
@@ -348,16 +399,21 @@
  *
  * We make sure that the transaction can guarantee at least nblocks of
  * modified buffers in the log.  We block until the log can guarantee
- * that much space.
- *
- * This function is visible to journal users (like ext3fs), so is not
- * called with the journal already locked.
+ * that much space. Additionally, if rsv_blocks > 0, we also create another
+ * handle with rsv_blocks reserved blocks in the journal. This handle is
+ * is stored in h_rsv_handle. It is not attached to any particular transaction
+ * and thus doesn't block transaction commit. If the caller uses this reserved
+ * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
+ * on the parent handle will dispose the reserved one. Reserved handle has to
+ * be converted to a normal handle using jbd2_journal_start_reserved() before
+ * it can be used.
  *
  * Return a pointer to a newly allocated handle, or an ERR_PTR() value
  * on failure.
  */
-handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask,
-			      unsigned int type, unsigned int line_no)
+handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
+			      gfp_t gfp_mask, unsigned int type,
+			      unsigned int line_no)
 {
 	handle_t *handle = journal_current_handle();
 	int err;
@@ -374,13 +430,24 @@
 	handle = new_handle(nblocks);
 	if (!handle)
 		return ERR_PTR(-ENOMEM);
+	if (rsv_blocks) {
+		handle_t *rsv_handle;
 
-	current->journal_info = handle;
+		rsv_handle = new_handle(rsv_blocks);
+		if (!rsv_handle) {
+			jbd2_free_handle(handle);
+			return ERR_PTR(-ENOMEM);
+		}
+		rsv_handle->h_reserved = 1;
+		rsv_handle->h_journal = journal;
+		handle->h_rsv_handle = rsv_handle;
+	}
 
 	err = start_this_handle(journal, handle, gfp_mask);
 	if (err < 0) {
+		if (handle->h_rsv_handle)
+			jbd2_free_handle(handle->h_rsv_handle);
 		jbd2_free_handle(handle);
-		current->journal_info = NULL;
 		return ERR_PTR(err);
 	}
 	handle->h_type = type;
@@ -395,10 +462,67 @@
 
 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
 {
-	return jbd2__journal_start(journal, nblocks, GFP_NOFS, 0, 0);
+	return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0);
 }
 EXPORT_SYMBOL(jbd2_journal_start);
 
+void jbd2_journal_free_reserved(handle_t *handle)
+{
+	journal_t *journal = handle->h_journal;
+
+	WARN_ON(!handle->h_reserved);
+	sub_reserved_credits(journal, handle->h_buffer_credits);
+	jbd2_free_handle(handle);
+}
+EXPORT_SYMBOL(jbd2_journal_free_reserved);
+
+/**
+ * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle
+ * @handle: handle to start
+ *
+ * Start handle that has been previously reserved with jbd2_journal_reserve().
+ * This attaches @handle to the running transaction (or creates one if there's
+ * not transaction running). Unlike jbd2_journal_start() this function cannot
+ * block on journal commit, checkpointing, or similar stuff. It can block on
+ * memory allocation or frozen journal though.
+ *
+ * Return 0 on success, non-zero on error - handle is freed in that case.
+ */
+int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
+				unsigned int line_no)
+{
+	journal_t *journal = handle->h_journal;
+	int ret = -EIO;
+
+	if (WARN_ON(!handle->h_reserved)) {
+		/* Someone passed in normal handle? Just stop it. */
+		jbd2_journal_stop(handle);
+		return ret;
+	}
+	/*
+	 * Usefulness of mixing of reserved and unreserved handles is
+	 * questionable. So far nobody seems to need it so just error out.
+	 */
+	if (WARN_ON(current->journal_info)) {
+		jbd2_journal_free_reserved(handle);
+		return ret;
+	}
+
+	handle->h_journal = NULL;
+	/*
+	 * GFP_NOFS is here because callers are likely from writeback or
+	 * similarly constrained call sites
+	 */
+	ret = start_this_handle(journal, handle, GFP_NOFS);
+	if (ret < 0) {
+		jbd2_journal_free_reserved(handle);
+		return ret;
+	}
+	handle->h_type = type;
+	handle->h_line_no = line_no;
+	return 0;
+}
+EXPORT_SYMBOL(jbd2_journal_start_reserved);
 
 /**
  * int jbd2_journal_extend() - extend buffer credits.
@@ -423,49 +547,53 @@
 int jbd2_journal_extend(handle_t *handle, int nblocks)
 {
 	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
+	journal_t *journal;
 	int result;
 	int wanted;
 
-	result = -EIO;
+	WARN_ON(!transaction);
 	if (is_handle_aborted(handle))
-		goto out;
+		return -EROFS;
+	journal = transaction->t_journal;
 
 	result = 1;
 
 	read_lock(&journal->j_state_lock);
 
 	/* Don't extend a locked-down transaction! */
-	if (handle->h_transaction->t_state != T_RUNNING) {
+	if (transaction->t_state != T_RUNNING) {
 		jbd_debug(3, "denied handle %p %d blocks: "
 			  "transaction not running\n", handle, nblocks);
 		goto error_out;
 	}
 
 	spin_lock(&transaction->t_handle_lock);
-	wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks;
+	wanted = atomic_add_return(nblocks,
+				   &transaction->t_outstanding_credits);
 
 	if (wanted > journal->j_max_transaction_buffers) {
 		jbd_debug(3, "denied handle %p %d blocks: "
 			  "transaction too large\n", handle, nblocks);
+		atomic_sub(nblocks, &transaction->t_outstanding_credits);
 		goto unlock;
 	}
 
-	if (wanted > __jbd2_log_space_left(journal)) {
+	if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) >
+	    jbd2_log_space_left(journal)) {
 		jbd_debug(3, "denied handle %p %d blocks: "
 			  "insufficient log space\n", handle, nblocks);
+		atomic_sub(nblocks, &transaction->t_outstanding_credits);
 		goto unlock;
 	}
 
 	trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
-				 handle->h_transaction->t_tid,
+				 transaction->t_tid,
 				 handle->h_type, handle->h_line_no,
 				 handle->h_buffer_credits,
 				 nblocks);
 
 	handle->h_buffer_credits += nblocks;
 	handle->h_requested_credits += nblocks;
-	atomic_add(nblocks, &transaction->t_outstanding_credits);
 	result = 0;
 
 	jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
@@ -473,7 +601,6 @@
 	spin_unlock(&transaction->t_handle_lock);
 error_out:
 	read_unlock(&journal->j_state_lock);
-out:
 	return result;
 }
 
@@ -490,19 +617,22 @@
  * to a running handle, a call to jbd2_journal_restart will commit the
  * handle's transaction so far and reattach the handle to a new
  * transaction capabable of guaranteeing the requested number of
- * credits.
+ * credits. We preserve reserved handle if there's any attached to the
+ * passed in handle.
  */
 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
 {
 	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
+	journal_t *journal;
 	tid_t		tid;
 	int		need_to_start, ret;
 
+	WARN_ON(!transaction);
 	/* If we've had an abort of any type, don't even think about
 	 * actually doing the restart! */
 	if (is_handle_aborted(handle))
 		return 0;
+	journal = transaction->t_journal;
 
 	/*
 	 * First unlink the handle from its current transaction, and start the
@@ -515,10 +645,16 @@
 	spin_lock(&transaction->t_handle_lock);
 	atomic_sub(handle->h_buffer_credits,
 		   &transaction->t_outstanding_credits);
+	if (handle->h_rsv_handle) {
+		sub_reserved_credits(journal,
+				     handle->h_rsv_handle->h_buffer_credits);
+	}
 	if (atomic_dec_and_test(&transaction->t_updates))
 		wake_up(&journal->j_wait_updates);
 	tid = transaction->t_tid;
 	spin_unlock(&transaction->t_handle_lock);
+	handle->h_transaction = NULL;
+	current->journal_info = NULL;
 
 	jbd_debug(2, "restarting handle %p\n", handle);
 	need_to_start = !tid_geq(journal->j_commit_request, tid);
@@ -557,6 +693,14 @@
 	write_lock(&journal->j_state_lock);
 	++journal->j_barrier_count;
 
+	/* Wait until there are no reserved handles */
+	if (atomic_read(&journal->j_reserved_credits)) {
+		write_unlock(&journal->j_state_lock);
+		wait_event(journal->j_wait_reserved,
+			   atomic_read(&journal->j_reserved_credits) == 0);
+		write_lock(&journal->j_state_lock);
+	}
+
 	/* Wait until there are no running updates */
 	while (1) {
 		transaction_t *transaction = journal->j_running_transaction;
@@ -634,17 +778,16 @@
 			int force_copy)
 {
 	struct buffer_head *bh;
-	transaction_t *transaction;
+	transaction_t *transaction = handle->h_transaction;
 	journal_t *journal;
 	int error;
 	char *frozen_buffer = NULL;
 	int need_copy = 0;
 	unsigned long start_lock, time_lock;
 
+	WARN_ON(!transaction);
 	if (is_handle_aborted(handle))
 		return -EROFS;
-
-	transaction = handle->h_transaction;
 	journal = transaction->t_journal;
 
 	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
@@ -754,41 +897,29 @@
 		 * journaled.  If the primary copy is already going to
 		 * disk then we cannot do copy-out here. */
 
-		if (jh->b_jlist == BJ_Shadow) {
-			DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
-			wait_queue_head_t *wqh;
-
-			wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
-
+		if (buffer_shadow(bh)) {
 			JBUFFER_TRACE(jh, "on shadow: sleep");
 			jbd_unlock_bh_state(bh);
-			/* commit wakes up all shadow buffers after IO */
-			for ( ; ; ) {
-				prepare_to_wait(wqh, &wait.wait,
-						TASK_UNINTERRUPTIBLE);
-				if (jh->b_jlist != BJ_Shadow)
-					break;
-				schedule();
-			}
-			finish_wait(wqh, &wait.wait);
+			wait_on_bit_io(&bh->b_state, BH_Shadow,
+				       TASK_UNINTERRUPTIBLE);
 			goto repeat;
 		}
 
-		/* Only do the copy if the currently-owning transaction
-		 * still needs it.  If it is on the Forget list, the
-		 * committing transaction is past that stage.  The
-		 * buffer had better remain locked during the kmalloc,
-		 * but that should be true --- we hold the journal lock
-		 * still and the buffer is already on the BUF_JOURNAL
-		 * list so won't be flushed.
+		/*
+		 * Only do the copy if the currently-owning transaction still
+		 * needs it. If buffer isn't on BJ_Metadata list, the
+		 * committing transaction is past that stage (here we use the
+		 * fact that BH_Shadow is set under bh_state lock together with
+		 * refiling to BJ_Shadow list and at this point we know the
+		 * buffer doesn't have BH_Shadow set).
 		 *
 		 * Subtle point, though: if this is a get_undo_access,
 		 * then we will be relying on the frozen_data to contain
 		 * the new value of the committed_data record after the
 		 * transaction, so we HAVE to force the frozen_data copy
-		 * in that case. */
-
-		if (jh->b_jlist != BJ_Forget || force_copy) {
+		 * in that case.
+		 */
+		if (jh->b_jlist == BJ_Metadata || force_copy) {
 			JBUFFER_TRACE(jh, "generate frozen data");
 			if (!frozen_buffer) {
 				JBUFFER_TRACE(jh, "allocate memory for buffer");
@@ -797,7 +928,7 @@
 					jbd2_alloc(jh2bh(jh)->b_size,
 							 GFP_NOFS);
 				if (!frozen_buffer) {
-					printk(KERN_EMERG
+					printk(KERN_ERR
 					       "%s: OOM for frozen_buffer\n",
 					       __func__);
 					JBUFFER_TRACE(jh, "oom!");
@@ -915,14 +1046,16 @@
 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
 {
 	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
+	journal_t *journal;
 	struct journal_head *jh = jbd2_journal_add_journal_head(bh);
 	int err;
 
 	jbd_debug(5, "journal_head %p\n", jh);
+	WARN_ON(!transaction);
 	err = -EROFS;
 	if (is_handle_aborted(handle))
 		goto out;
+	journal = transaction->t_journal;
 	err = 0;
 
 	JBUFFER_TRACE(jh, "entry");
@@ -934,7 +1067,6 @@
 	 * reused here.
 	 */
 	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
 	J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
 		jh->b_transaction == NULL ||
 		(jh->b_transaction == journal->j_committing_transaction &&
@@ -957,12 +1089,14 @@
 		jh->b_modified = 0;
 
 		JBUFFER_TRACE(jh, "file as BJ_Reserved");
+		spin_lock(&journal->j_list_lock);
 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
 	} else if (jh->b_transaction == journal->j_committing_transaction) {
 		/* first access by this transaction */
 		jh->b_modified = 0;
 
 		JBUFFER_TRACE(jh, "set next transaction");
+		spin_lock(&journal->j_list_lock);
 		jh->b_next_transaction = transaction;
 	}
 	spin_unlock(&journal->j_list_lock);
@@ -1029,7 +1163,7 @@
 	if (!jh->b_committed_data) {
 		committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
 		if (!committed_data) {
-			printk(KERN_EMERG "%s: No memory for committed data\n",
+			printk(KERN_ERR "%s: No memory for committed data\n",
 				__func__);
 			err = -ENOMEM;
 			goto out;
@@ -1128,12 +1262,14 @@
 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
 {
 	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
+	journal_t *journal;
 	struct journal_head *jh;
 	int ret = 0;
 
+	WARN_ON(!transaction);
 	if (is_handle_aborted(handle))
-		goto out;
+		return -EROFS;
+	journal = transaction->t_journal;
 	jh = jbd2_journal_grab_journal_head(bh);
 	if (!jh) {
 		ret = -EUCLEAN;
@@ -1169,9 +1305,9 @@
 		JBUFFER_TRACE(jh, "fastpath");
 		if (unlikely(jh->b_transaction !=
 			     journal->j_running_transaction)) {
-			printk(KERN_EMERG "JBD: %s: "
+			printk(KERN_ERR "JBD2: %s: "
 			       "jh->b_transaction (%llu, %p, %u) != "
-			       "journal->j_running_transaction (%p, %u)",
+			       "journal->j_running_transaction (%p, %u)\n",
 			       journal->j_devname,
 			       (unsigned long long) bh->b_blocknr,
 			       jh->b_transaction,
@@ -1194,30 +1330,25 @@
 	 */
 	if (jh->b_transaction != transaction) {
 		JBUFFER_TRACE(jh, "already on other transaction");
-		if (unlikely(jh->b_transaction !=
-			     journal->j_committing_transaction)) {
-			printk(KERN_EMERG "JBD: %s: "
-			       "jh->b_transaction (%llu, %p, %u) != "
-			       "journal->j_committing_transaction (%p, %u)",
+		if (unlikely(((jh->b_transaction !=
+			       journal->j_committing_transaction)) ||
+			     (jh->b_next_transaction != transaction))) {
+			printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: "
+			       "bad jh for block %llu: "
+			       "transaction (%p, %u), "
+			       "jh->b_transaction (%p, %u), "
+			       "jh->b_next_transaction (%p, %u), jlist %u\n",
 			       journal->j_devname,
 			       (unsigned long long) bh->b_blocknr,
+			       transaction, transaction->t_tid,
 			       jh->b_transaction,
-			       jh->b_transaction ? jh->b_transaction->t_tid : 0,
-			       journal->j_committing_transaction,
-			       journal->j_committing_transaction ?
-			       journal->j_committing_transaction->t_tid : 0);
-			ret = -EINVAL;
-		}
-		if (unlikely(jh->b_next_transaction != transaction)) {
-			printk(KERN_EMERG "JBD: %s: "
-			       "jh->b_next_transaction (%llu, %p, %u) != "
-			       "transaction (%p, %u)",
-			       journal->j_devname,
-			       (unsigned long long) bh->b_blocknr,
+			       jh->b_transaction ?
+			       jh->b_transaction->t_tid : 0,
 			       jh->b_next_transaction,
 			       jh->b_next_transaction ?
 			       jh->b_next_transaction->t_tid : 0,
-			       transaction, transaction->t_tid);
+			       jh->b_jlist);
+			WARN_ON(1);
 			ret = -EINVAL;
 		}
 		/* And this case is illegal: we can't reuse another
@@ -1230,7 +1361,7 @@
 
 	JBUFFER_TRACE(jh, "file as BJ_Metadata");
 	spin_lock(&journal->j_list_lock);
-	__jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
+	__jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
 	spin_unlock(&journal->j_list_lock);
 out_unlock_bh:
 	jbd_unlock_bh_state(bh);
@@ -1260,16 +1391,20 @@
 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
 {
 	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
+	journal_t *journal;
 	struct journal_head *jh;
 	int drop_reserve = 0;
 	int err = 0;
 	int was_modified = 0;
 
+	WARN_ON(!transaction);
+	if (is_handle_aborted(handle))
+		return -EROFS;
+	journal = transaction->t_journal;
+
 	BUFFER_TRACE(bh, "entry");
 
 	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
 
 	if (!buffer_jbd(bh))
 		goto not_jbd;
@@ -1292,7 +1427,7 @@
 	 */
 	jh->b_modified = 0;
 
-	if (jh->b_transaction == handle->h_transaction) {
+	if (jh->b_transaction == transaction) {
 		J_ASSERT_JH(jh, !jh->b_frozen_data);
 
 		/* If we are forgetting a buffer which is already part
@@ -1322,6 +1457,7 @@
 		 * we know to remove the checkpoint after we commit.
 		 */
 
+		spin_lock(&journal->j_list_lock);
 		if (jh->b_cp_transaction) {
 			__jbd2_journal_temp_unlink_buffer(jh);
 			__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
@@ -1334,6 +1470,7 @@
 				goto drop;
 			}
 		}
+		spin_unlock(&journal->j_list_lock);
 	} else if (jh->b_transaction) {
 		J_ASSERT_JH(jh, (jh->b_transaction ==
 				 journal->j_committing_transaction));
@@ -1345,7 +1482,9 @@
 
 		if (jh->b_next_transaction) {
 			J_ASSERT(jh->b_next_transaction == transaction);
+			spin_lock(&journal->j_list_lock);
 			jh->b_next_transaction = NULL;
+			spin_unlock(&journal->j_list_lock);
 
 			/*
 			 * only drop a reference if this transaction modified
@@ -1357,7 +1496,6 @@
 	}
 
 not_jbd:
-	spin_unlock(&journal->j_list_lock);
 	jbd_unlock_bh_state(bh);
 	__brelse(bh);
 drop:
@@ -1387,19 +1525,21 @@
 int jbd2_journal_stop(handle_t *handle)
 {
 	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int err, wait_for_commit = 0;
+	journal_t *journal;
+	int err = 0, wait_for_commit = 0;
 	tid_t tid;
 	pid_t pid;
 
+	if (!transaction)
+		goto free_and_exit;
+	journal = transaction->t_journal;
+
 	J_ASSERT(journal_current_handle() == handle);
 
 	if (is_handle_aborted(handle))
 		err = -EIO;
-	else {
+	else
 		J_ASSERT(atomic_read(&transaction->t_updates) > 0);
-		err = 0;
-	}
 
 	if (--handle->h_ref > 0) {
 		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
@@ -1409,7 +1549,7 @@
 
 	jbd_debug(4, "Handle %p going down\n", handle);
 	trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
-				handle->h_transaction->t_tid,
+				transaction->t_tid,
 				handle->h_type, handle->h_line_no,
 				jiffies - handle->h_start_jiffies,
 				handle->h_sync, handle->h_requested_credits,
@@ -1523,33 +1663,13 @@
 
 	lock_map_release(&handle->h_lockdep_map);
 
+	if (handle->h_rsv_handle)
+		jbd2_journal_free_reserved(handle->h_rsv_handle);
+free_and_exit:
 	jbd2_free_handle(handle);
 	return err;
 }
 
-/**
- * int jbd2_journal_force_commit() - force any uncommitted transactions
- * @journal: journal to force
- *
- * For synchronous operations: force any uncommitted transactions
- * to disk.  May seem kludgy, but it reuses all the handle batching
- * code in a very simple manner.
- */
-int jbd2_journal_force_commit(journal_t *journal)
-{
-	handle_t *handle;
-	int ret;
-
-	handle = jbd2_journal_start(journal, 1);
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-	} else {
-		handle->h_sync = 1;
-		ret = jbd2_journal_stop(handle);
-	}
-	return ret;
-}
-
 /*
  *
  * List management code snippets: various functions for manipulating the
@@ -1606,10 +1726,10 @@
  * Remove a buffer from the appropriate transaction list.
  *
  * Note that this function can *change* the value of
- * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
- * t_log_list or t_reserved_list.  If the caller is holding onto a copy of one
- * of these pointers, it could go bad.  Generally the caller needs to re-read
- * the pointer from the transaction_t.
+ * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
+ * t_reserved_list.  If the caller is holding onto a copy of one of these
+ * pointers, it could go bad.  Generally the caller needs to re-read the
+ * pointer from the transaction_t.
  *
  * Called under j_list_lock.
  */
@@ -1639,15 +1759,9 @@
 	case BJ_Forget:
 		list = &transaction->t_forget;
 		break;
-	case BJ_IO:
-		list = &transaction->t_iobuf_list;
-		break;
 	case BJ_Shadow:
 		list = &transaction->t_shadow_list;
 		break;
-	case BJ_LogCtl:
-		list = &transaction->t_log_list;
-		break;
 	case BJ_Reserved:
 		list = &transaction->t_reserved_list;
 		break;
@@ -1702,11 +1816,11 @@
 	if (buffer_locked(bh) || buffer_dirty(bh))
 		goto out;
 
-	if (jh->b_next_transaction != NULL)
+	if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
 		goto out;
 
 	spin_lock(&journal->j_list_lock);
-	if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
+	if (jh->b_cp_transaction != NULL) {
 		/* written-back checkpointed metadata buffer */
 		JBUFFER_TRACE(jh, "remove from checkpoint list");
 		__jbd2_journal_remove_checkpoint(jh);
@@ -2041,18 +2155,23 @@
  * void jbd2_journal_invalidatepage()
  * @journal: journal to use for flush...
  * @page:    page to flush
- * @offset:  length of page to invalidate.
+ * @offset:  start of the range to invalidate
+ * @length:  length of the range to invalidate
  *
- * Reap page buffers containing data after offset in page. Can return -EBUSY
- * if buffers are part of the committing transaction and the page is straddling
- * i_size. Caller then has to wait for current commit and try again.
+ * Reap page buffers containing data after in the specified range in page.
+ * Can return -EBUSY if buffers are part of the committing transaction and
+ * the page is straddling i_size. Caller then has to wait for current commit
+ * and try again.
  */
 int jbd2_journal_invalidatepage(journal_t *journal,
 				struct page *page,
-				unsigned long offset)
+				unsigned int offset,
+				unsigned int length)
 {
 	struct buffer_head *head, *bh, *next;
+	unsigned int stop = offset + length;
 	unsigned int curr_off = 0;
+	int partial_page = (offset || length < PAGE_CACHE_SIZE);
 	int may_free = 1;
 	int ret = 0;
 
@@ -2061,6 +2180,8 @@
 	if (!page_has_buffers(page))
 		return 0;
 
+	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+
 	/* We will potentially be playing with lists other than just the
 	 * data lists (especially for journaled data mode), so be
 	 * cautious in our locking. */
@@ -2070,10 +2191,13 @@
 		unsigned int next_off = curr_off + bh->b_size;
 		next = bh->b_this_page;
 
+		if (next_off > stop)
+			return 0;
+
 		if (offset <= curr_off) {
 			/* This block is wholly outside the truncation point */
 			lock_buffer(bh);
-			ret = journal_unmap_buffer(journal, bh, offset > 0);
+			ret = journal_unmap_buffer(journal, bh, partial_page);
 			unlock_buffer(bh);
 			if (ret < 0)
 				return ret;
@@ -2084,7 +2208,7 @@
 
 	} while (bh != head);
 
-	if (!offset) {
+	if (!partial_page) {
 		if (may_free && try_to_free_buffers(page))
 			J_ASSERT(!page_has_buffers(page));
 	}
@@ -2145,15 +2269,9 @@
 	case BJ_Forget:
 		list = &transaction->t_forget;
 		break;
-	case BJ_IO:
-		list = &transaction->t_iobuf_list;
-		break;
 	case BJ_Shadow:
 		list = &transaction->t_shadow_list;
 		break;
-	case BJ_LogCtl:
-		list = &transaction->t_log_list;
-		break;
 	case BJ_Reserved:
 		list = &transaction->t_reserved_list;
 		break;
@@ -2255,10 +2373,12 @@
 int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
 {
 	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
+	journal_t *journal;
 
+	WARN_ON(!transaction);
 	if (is_handle_aborted(handle))
-		return -EIO;
+		return -EROFS;
+	journal = transaction->t_journal;
 
 	jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
 			transaction->t_tid);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 0defb1c..0918f0e 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -243,6 +243,7 @@
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 	int err;
 
+	sync_filesystem(sb);
 	err = jffs2_parse_options(c, data);
 	if (err)
 		return -EINVAL;
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 6740d34..9e3aaff 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -571,9 +571,10 @@
 	return ret;
 }
 
-static void metapage_invalidatepage(struct page *page, unsigned long offset)
+static void metapage_invalidatepage(struct page *page, unsigned int offset,
+				    unsigned int length)
 {
-	BUG_ON(offset);
+	BUG_ON(offset || length < PAGE_CACHE_SIZE);
 
 	BUG_ON(PageWriteback(page));
 
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 788e0a9..b7486da 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -413,6 +413,7 @@
 	int flag = JFS_SBI(sb)->flag;
 	int ret;
 
+	sync_filesystem(sb);
 	if (!parse_options(data, sb, &newLVSize, &flag)) {
 		return -EINVAL;
 	}
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index c2219a6..57914fc 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -159,7 +159,8 @@
 	return __logfs_writepage(page);
 }
 
-static void logfs_invalidatepage(struct page *page, unsigned long offset)
+static void logfs_invalidatepage(struct page *page, unsigned int offset,
+				 unsigned int length)
 {
 	struct logfs_block *block = logfs_block(page);
 
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 038da09..d448a77 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -884,7 +884,8 @@
 	return area;
 }
 
-static void map_invalidatepage(struct page *page, unsigned long l)
+static void map_invalidatepage(struct page *page, unsigned int o,
+			       unsigned int l)
 {
 	return;
 }
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index df12249..a54d088 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -123,6 +123,7 @@
 	struct minix_sb_info * sbi = minix_sb(sb);
 	struct minix_super_block * ms;
 
+	sync_filesystem(sb);
 	ms = sbi->s_ms;
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		return 0;
diff --git a/fs/namespace.c b/fs/namespace.c
index d0244c8..8ecc31c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -54,6 +54,7 @@
  * tree or hash is modified or when a vfsmount structure is modified.
  */
 DEFINE_BRLOCK(vfsmount_lock);
+EXPORT_SYMBOL(vfsmount_lock);
 
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
@@ -427,6 +428,7 @@
 	mnt_dec_writers(real_mount(mnt));
 	preempt_enable();
 }
+EXPORT_SYMBOL_GPL(__mnt_drop_write);
 
 /**
  * mnt_drop_write - give up write access to a mount
@@ -1474,6 +1476,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(iterate_mounts);
 
 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
 {
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 26910c8..3f54348 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -99,6 +99,7 @@
 
 static int ncp_remount(struct super_block *sb, int *flags, char* data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_NODIRATIME;
 	return 0;
 }
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 13ca196..169e5ab 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -39,6 +39,19 @@
 
 	  If unsure, say Y.
 
+config NFS_DEF_FILE_IO_SIZE
+	int "Default size for NFS I/O read and write at runtime"
+	depends on NFS_FS
+	default "4096"
+	help
+	  To change the default rsize and wsize supported by the NFS client,
+	  adjust NFS_DEF_FILE_IO_SIZE.  64KB is a typical maximum, but some
+	  servers can support a megabyte or more.  The default is left at 4096
+	  bytes, which is reasonable for NFS over UDP, however, for some
+	  systems, setting a smaller value like 1024 can work around
+	  limitations in the driver or hardware and result in overall
+	  improved performance.
+
 config NFS_V3
 	tristate "NFS client support for NFS version 3"
 	depends on NFS_FS
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index c513b0c..818697c 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -9,7 +9,6 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sched.h>
@@ -70,7 +69,7 @@
 	[4] = NULL,
 };
 
-const struct rpc_program nfs_program = {
+struct rpc_program nfs_program = {
 	.name			= "nfs",
 	.number			= NFS_PROGRAM,
 	.nrvers			= ARRAY_SIZE(nfs_version),
@@ -161,6 +160,7 @@
 	try_module_get(clp->cl_nfs_mod->owner);
 
 	clp->rpc_ops = clp->cl_nfs_mod->rpc_ops;
+	clp->nfs_prog = cl_init->nfs_prog;
 
 	atomic_set(&clp->cl_count, 1);
 	clp->cl_cons_state = NFS_CS_INITING;
@@ -418,6 +418,9 @@
 		/* Match nfsv4 minorversion */
 		if (clp->cl_minorversion != data->minorversion)
 			continue;
+		if (clp->nfs_prog != data->nfs_prog)
+			continue;
+
 		/* Match the full socket address */
 		if (!nfs_sockaddr_cmp(sap, clap))
 			continue;
@@ -599,6 +602,10 @@
 	if (!IS_ERR(clp->cl_rpcclient))
 		return 0;
 
+	if (clp->nfs_prog)
+		nfs_program.number = clp->nfs_prog;
+	else
+		nfs_program.number = NFS_PROGRAM;
 	clnt = rpc_create(&args);
 	if (IS_ERR(clnt)) {
 		dprintk("%s: cannot create RPC client. Error = %ld\n",
@@ -742,6 +749,7 @@
 		.nfs_mod = nfs_mod,
 		.proto = data->nfs_server.protocol,
 		.net = data->net,
+		.nfs_prog = data->nfs_prog,
 	};
 	struct rpc_timeout timeparms;
 	struct nfs_client *clp;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index a87a44f..6b4a79f 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -451,11 +451,13 @@
  * - Called if either PG_private or PG_fscache is set on the page
  * - Caller holds page lock
  */
-static void nfs_invalidate_page(struct page *page, unsigned long offset)
+static void nfs_invalidate_page(struct page *page, unsigned int offset,
+				unsigned int length)
 {
-	dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset);
+	dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
+		 page, offset, length);
 
-	if (offset != 0)
+	if (offset != 0 || length < PAGE_CACHE_SIZE)
 		return;
 	/* Cancel any unstarted writes on this page */
 	nfs_wb_page_cancel(page_file_mapping(page)->host, page);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 02c6ead..9de3976 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -79,7 +79,7 @@
 {
 	if (fatal_signal_pending(current))
 		return -ERESTARTSYS;
-	freezable_schedule();
+	freezable_schedule_unsafe();
 	return 0;
 }
 EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 91e59a3..13a36e2 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -73,6 +73,7 @@
 	int proto;
 	u32 minorversion;
 	struct net *net;
+	int nfs_prog;
 };
 
 /*
@@ -84,6 +85,8 @@
 	unsigned int		timeo, retrans;
 	unsigned int		acregmin, acregmax,
 				acdirmin, acdirmax;
+	int			nfs_prog;
+	int			mount_prog;
 	unsigned int		namlen;
 	unsigned int		options;
 	unsigned int		bsize;
@@ -140,11 +143,11 @@
 	struct nfs_fh *mntfh;
 };
 
-extern int nfs_mount(struct nfs_mount_request *info);
+extern int nfs_mount(struct nfs_mount_request *info, int prog);
 extern void nfs_umount(const struct nfs_mount_request *info);
 
 /* client.c */
-extern const struct rpc_program nfs_program;
+extern struct rpc_program nfs_program;
 extern void nfs_clients_init(struct net *net);
 extern struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *);
 int nfs_create_rpc_client(struct nfs_client *, const struct rpc_timeout *, rpc_authflavor_t);
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 91a6faf..09eeebc 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -67,7 +67,7 @@
 	MOUNTPROC3_EXPORT	= 5,
 };
 
-static const struct rpc_program mnt_program;
+static struct rpc_program mnt_program;
 
 /*
  * Defined by OpenGroup XNFS Version 3W, chapter 8
@@ -141,7 +141,7 @@
  *
  * Uses default timeout parameters specified by underlying transport.
  */
-int nfs_mount(struct nfs_mount_request *info)
+int nfs_mount(struct nfs_mount_request *info, int m_prog)
 {
 	struct mountres	result = {
 		.fh		= info->fh,
@@ -175,6 +175,7 @@
 	if (info->noresvport)
 		args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
 
+	mnt_program.number = m_prog;
 	mnt_clnt = rpc_create(&args);
 	if (IS_ERR(mnt_clnt))
 		goto out_clnt_err;
@@ -514,7 +515,7 @@
 
 static struct rpc_stat mnt_stats;
 
-static const struct rpc_program mnt_program = {
+static struct rpc_program mnt_program = {
 	.name		= "mount",
 	.number		= NFS_MNT_PROGRAM,
 	.nrvers		= ARRAY_SIZE(mnt_version),
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 43ea96c..ce90eb4 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -33,7 +33,7 @@
 		res = rpc_call_sync(clnt, msg, flags);
 		if (res != -EJUKEBOX)
 			break;
-		freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+		freezable_schedule_timeout_killable_unsafe(NFS_JUKEBOX_RETRY_TIME);
 		res = -ERESTARTSYS;
 	} while (!fatal_signal_pending(current));
 	return res;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c2b89a1..6b4689b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -268,7 +268,7 @@
 		*timeout = NFS4_POLL_RETRY_MIN;
 	if (*timeout > NFS4_POLL_RETRY_MAX)
 		*timeout = NFS4_POLL_RETRY_MAX;
-	freezable_schedule_timeout_killable(*timeout);
+	freezable_schedule_timeout_killable_unsafe(*timeout);
 	if (fatal_signal_pending(current))
 		res = -ERESTARTSYS;
 	*timeout <<= 1;
@@ -4557,7 +4557,7 @@
 static unsigned long
 nfs4_set_lock_task_retry(unsigned long timeout)
 {
-	freezable_schedule_timeout_killable(timeout);
+	freezable_schedule_timeout_killable_unsafe(timeout);
 	timeout <<= 1;
 	if (timeout > NFS4_LOCK_MAXTIMEOUT)
 		return NFS4_LOCK_MAXTIMEOUT;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2d7525f..38fb4a9 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -103,6 +103,8 @@
 	Opt_mountport,
 	Opt_mountvers,
 	Opt_minorversion,
+	Opt_mountprog,
+	Opt_nfsprog,
 
 	/* Mount options that take string arguments */
 	Opt_nfsvers,
@@ -168,6 +170,8 @@
 	{ Opt_mountport, "mountport=%s" },
 	{ Opt_mountvers, "mountvers=%s" },
 	{ Opt_minorversion, "minorversion=%s" },
+	{ Opt_mountprog, "mountprog=%s" },
+	{ Opt_nfsprog, "nfsprog=%s" },
 
 	{ Opt_nfsvers, "nfsvers=%s" },
 	{ Opt_nfsvers, "vers=%s" },
@@ -917,6 +921,8 @@
 		data->acregmax		= NFS_DEF_ACREGMAX;
 		data->acdirmin		= NFS_DEF_ACDIRMIN;
 		data->acdirmax		= NFS_DEF_ACDIRMAX;
+		data->nfs_prog		= NFS_PROGRAM;
+		data->mount_prog	= NFS_MNT_PROGRAM;
 		data->mount_server.port	= NFS_UNSPEC_PORT;
 		data->nfs_server.port	= NFS_UNSPEC_PORT;
 		data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
@@ -1316,6 +1322,26 @@
 				goto out_invalid_value;
 			mnt->acdirmax = option;
 			break;
+		case Opt_mountprog:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+			rc = strict_strtoul(string, 10, &option);
+			kfree(string);
+			if (rc != 0)
+				goto out_invalid_value;
+			mnt->mount_prog = option;
+			break;
+		case Opt_nfsprog:
+			string = match_strdup(args);
+			if (string == NULL)
+				goto out_nomem;
+			rc = strict_strtoul(string, 10, &option);
+			kfree(string);
+			if (rc != 0)
+			       goto out_invalid_value;
+			mnt->nfs_prog = option;
+			break;
 		case Opt_actimeo:
 			if (nfs_get_option_ul(args, &option))
 				goto out_invalid_value;
@@ -1749,7 +1775,7 @@
 	 * Now ask the mount server to map our export path
 	 * to a file handle.
 	 */
-	status = nfs_mount(&request);
+	status = nfs_mount(&request,args->mount_prog);
 	if (status != 0) {
 		dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
 				request.hostname, status);
@@ -1876,6 +1902,7 @@
 {
 	struct nfs_mount_data *data = (struct nfs_mount_data *)options;
 	struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+	args->nfs_prog		= NFS_PROGRAM;
 
 	if (data == NULL)
 		goto out_no_data;
@@ -1896,6 +1923,8 @@
 			goto out_no_sec;
 	case 5:
 		memset(data->context, 0, sizeof(data->context));
+	case 7:
+		args->nfs_prog = (data->version >= 7) ? data->nfs_prog : NFS_PROGRAM;
 	case 6:
 		if (data->flags & NFS_MOUNT_VER3) {
 			if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
@@ -2133,6 +2162,8 @@
 	struct nfs4_mount_data *options4 = (struct nfs4_mount_data *)raw_data;
 	u32 nfsvers = nfss->nfs_client->rpc_ops->version;
 
+	sync_filesystem(sb);
+
 	/*
 	 * Userspace mount programs that send binary options generally send
 	 * them populated with default values. We have no way to know which
@@ -2625,6 +2656,8 @@
 
 	args->version = 4;
 
+	args->nfs_prog = NFS_PROGRAM;
+
 	switch (data->version) {
 	case 1:
 		if (data->host_addrlen > sizeof(args->nfs_server.address))
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 4e9a21d..105a3b0 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -240,11 +240,16 @@
 	struct list_head list;
 };
 
+struct nfs4_dir_ctx {
+	struct dir_context ctx;
+	struct list_head names;
+};
+
 static int
 nfsd4_build_namelist(void *arg, const char *name, int namlen,
 		loff_t offset, u64 ino, unsigned int d_type)
 {
-	struct list_head *names = arg;
+	struct nfs4_dir_ctx *ctx = arg;
 	struct name_list *entry;
 
 	if (namlen != HEXDIR_LEN - 1)
@@ -254,7 +259,7 @@
 		return -ENOMEM;
 	memcpy(entry->name, name, HEXDIR_LEN - 1);
 	entry->name[HEXDIR_LEN - 1] = '\0';
-	list_add(&entry->list, names);
+	list_add(&entry->list, &ctx->names);
 	return 0;
 }
 
@@ -263,7 +268,10 @@
 {
 	const struct cred *original_cred;
 	struct dentry *dir = nn->rec_file->f_path.dentry;
-	LIST_HEAD(names);
+	struct nfs4_dir_ctx ctx = {
+		.ctx.actor = nfsd4_build_namelist,
+		.names = LIST_HEAD_INIT(ctx.names)
+	};
 	int status;
 
 	status = nfs4_save_creds(&original_cred);
@@ -276,11 +284,11 @@
 		return status;
 	}
 
-	status = vfs_readdir(nn->rec_file, nfsd4_build_namelist, &names);
+	status = iterate_dir(nn->rec_file, &ctx.ctx);
 	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
-	while (!list_empty(&names)) {
+	while (!list_empty(&ctx.names)) {
 		struct name_list *entry;
-		entry = list_entry(names.next, struct name_list, list);
+		entry = list_entry(ctx.names.next, struct name_list, list);
 		if (!status) {
 			struct dentry *dentry;
 			dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 81325ba..11224fa 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1950,6 +1950,7 @@
 };
 
 struct readdir_data {
+	struct dir_context ctx;
 	char		*dirent;
 	size_t		used;
 	int		full;
@@ -1981,13 +1982,15 @@
 static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
 				    struct readdir_cd *cdp, loff_t *offsetp)
 {
-	struct readdir_data buf;
 	struct buffered_dirent *de;
 	int host_err;
 	int size;
 	loff_t offset;
+	struct readdir_data buf = {
+		.ctx.actor = nfsd_buffered_filldir,
+		.dirent = (void *)__get_free_page(GFP_KERNEL)
+	};
 
-	buf.dirent = (void *)__get_free_page(GFP_KERNEL);
 	if (!buf.dirent)
 		return nfserrno(-ENOMEM);
 
@@ -2001,7 +2004,7 @@
 		buf.used = 0;
 		buf.full = 0;
 
-		host_err = vfs_readdir(file, nfsd_buffered_filldir, &buf);
+		host_err = iterate_dir(file, &buf.ctx);
 		if (buf.full)
 			host_err = 0;
 
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index c7d1f9f..4b0a8d4 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1114,6 +1114,7 @@
 	unsigned long old_mount_opt;
 	int err;
 
+	sync_filesystem(sb);
 	old_sb_flags = sb->s_flags;
 	old_mount_opt = nilfs->ns_mount_opt;
 
diff --git a/fs/notify/group.c b/fs/notify/group.c
index bd2625b..2ff2a0f 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -22,6 +22,7 @@
 #include <linux/srcu.h>
 #include <linux/rculist.h>
 #include <linux/wait.h>
+#include <linux/module.h>
 
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
@@ -65,6 +66,7 @@
 {
 	atomic_inc(&group->refcnt);
 }
+EXPORT_SYMBOL(fsnotify_get_group);
 
 /*
  * Drop a reference to a group.  Free it if it's through.
@@ -74,6 +76,7 @@
 	if (atomic_dec_and_test(&group->refcnt))
 		fsnotify_final_destroy_group(group);
 }
+EXPORT_SYMBOL(fsnotify_put_group);
 
 /*
  * Create a new fsnotify_group and hold a reference for the group returned.
@@ -102,6 +105,7 @@
 
 	return group;
 }
+EXPORT_SYMBOL(fsnotify_alloc_group);
 
 int fsnotify_fasync(int fd, struct file *file, int on)
 {
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index f08b3b7..fb55964 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -115,6 +115,7 @@
 		mark->free_mark(mark);
 	}
 }
+EXPORT_SYMBOL(fsnotify_put_mark);
 
 /*
  * Any time a mark is getting freed we end up here.
@@ -197,6 +198,7 @@
 	fsnotify_destroy_mark_locked(mark, group);
 	mutex_unlock(&group->mark_mutex);
 }
+EXPORT_SYMBOL(fsnotify_destroy_mark);
 
 void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
 {
@@ -281,6 +283,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(fsnotify_add_mark);
 
 int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
 		      struct inode *inode, struct vfsmount *mnt, int allow_dups)
@@ -362,6 +365,7 @@
 	atomic_set(&mark->refcnt, 1);
 	mark->free_mark = free_mark;
 }
+EXPORT_SYMBOL(fsnotify_init_mark);
 
 static int fsnotify_mark_destroy(void *ignored)
 {
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index fa9c05f..d267ea6 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1372,7 +1372,7 @@
 		 * The page may have dirty, unmapped buffers.  Make them
 		 * freeable here, so the page does not leak.
 		 */
-		block_invalidatepage(page, 0);
+		block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
 		unlock_page(page);
 		ntfs_debug("Write outside i_size - truncated?");
 		return 0;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 82650d5..bd5610d 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -468,6 +468,8 @@
 
 	ntfs_debug("Entering with remount options string: %s", opt);
 
+	sync_filesystem(sb);
+
 #ifndef NTFS_RW
 	/* For read-only compiled driver, enforce read-only flag. */
 	*flags |= MS_RDONLY;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f998c60..b259907 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -565,9 +565,7 @@
 static void ocfs2_dio_end_io(struct kiocb *iocb,
 			     loff_t offset,
 			     ssize_t bytes,
-			     void *private,
-			     int ret,
-			     bool is_async)
+			     void *private)
 {
 	struct inode *inode = file_inode(iocb->ki_filp);
 	int level;
@@ -592,10 +590,6 @@
 
 	level = ocfs2_iocb_rw_locked_level(iocb);
 	ocfs2_rw_unlock(inode, level);
-
-	inode_dio_done(inode);
-	if (is_async)
-		aio_complete(iocb, ret, 0);
 }
 
 /*
@@ -603,7 +597,8 @@
  * from ext3.  PageChecked() bits have been removed as OCFS2 does not
  * do journalled data.
  */
-static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
+static void ocfs2_invalidatepage(struct page *page, unsigned int offset,
+				 unsigned int length)
 {
 	journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
 
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 01b8516..616bde6 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -632,6 +632,8 @@
 	struct ocfs2_super *osb = OCFS2_SB(sb);
 	u32 tmp;
 
+	sync_filesystem(sb);
+
 	if (!ocfs2_parse_options(sb, data, &parsed_options, 1) ||
 	    !ocfs2_check_set_options(sb, &parsed_options)) {
 		ret = -EINVAL;
diff --git a/fs/open.c b/fs/open.c
index 5f12968..aad65f5 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -61,6 +61,7 @@
 	mutex_unlock(&dentry->d_inode->i_mutex);
 	return ret;
 }
+EXPORT_SYMBOL(do_truncate);
 
 long vfs_truncate(struct path *path, loff_t length)
 {
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 75885ff..f4026ab 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -375,6 +375,7 @@
 
 static int openprom_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_NOATIME;
 	return 0;
 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index e5160b7..62f3d1f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2750,8 +2750,8 @@
 	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
 	INF("oom_score",  S_IRUGO, proc_oom_score),
-	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
-	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+	REG("oom_adj",    S_IRUSR, proc_oom_adj_operations),
+	REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
 	REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
 	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
@@ -3107,8 +3107,8 @@
 	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
 	INF("oom_score", S_IRUGO, proc_oom_score),
-	REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
-	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+	REG("oom_adj",   S_IRUSR, proc_oom_adj_operations),
+	REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
 	REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
 	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index ccfd99b..c09e2cf 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -46,6 +46,10 @@
 
 	if (file) {
 		struct inode *inode = file_inode(region->vm_file);
+		if (region->vm_prfile) {
+			file = region->vm_prfile;
+			inode = file_inode(file);
+		}
 		dev = inode->i_sb->s_dev;
 		ino = inode->i_ino;
 	}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 04ec276..3df19e6 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -92,6 +92,8 @@
 int proc_remount(struct super_block *sb, int *flags, char *data)
 {
 	struct pid_namespace *pid = sb->s_fs_info;
+
+	sync_filesystem(sb);
 	return !proc_parse_options(data, pid);
 }
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index b86db12..66aafd3 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -134,6 +134,56 @@
 }
 #endif
 
+static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
+{
+	const char __user *name = vma_get_anon_name(vma);
+	struct mm_struct *mm = vma->vm_mm;
+
+	unsigned long page_start_vaddr;
+	unsigned long page_offset;
+	unsigned long num_pages;
+	unsigned long max_len = NAME_MAX;
+	int i;
+
+	page_start_vaddr = (unsigned long)name & PAGE_MASK;
+	page_offset = (unsigned long)name - page_start_vaddr;
+	num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
+
+	seq_puts(m, "[anon:");
+
+	for (i = 0; i < num_pages; i++) {
+		int len;
+		int write_len;
+		const char *kaddr;
+		long pages_pinned;
+		struct page *page;
+
+		pages_pinned = get_user_pages(current, mm, page_start_vaddr,
+				1, 0, 0, &page, NULL);
+		if (pages_pinned < 1) {
+			seq_puts(m, "<fault>]");
+			return;
+		}
+
+		kaddr = (const char *)kmap(page);
+		len = min(max_len, PAGE_SIZE - page_offset);
+		write_len = strnlen(kaddr + page_offset, len);
+		seq_write(m, kaddr + page_offset, write_len);
+		kunmap(page);
+		put_page(page);
+
+		/* if strnlen hit a null terminator then we're done */
+		if (write_len != len)
+			break;
+
+		max_len -= len;
+		page_offset = 0;
+		page_start_vaddr += PAGE_SIZE;
+	}
+
+	seq_putc(m, ']');
+}
+
 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
 {
 	if (vma && vma != priv->tail_vma) {
@@ -272,6 +322,10 @@
 
 	if (file) {
 		struct inode *inode = file_inode(vma->vm_file);
+		if (vma->vm_prfile) {
+			file = vma->vm_prfile;
+			inode = file_inode(file);
+		}
 		dev = inode->i_sb->s_dev;
 		ino = inode->i_ino;
 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
@@ -335,6 +389,12 @@
 				pad_len_spaces(m, len);
 				seq_printf(m, "[stack:%d]", tid);
 			}
+			goto done;
+		}
+
+		if (vma_get_anon_name(vma)) {
+			pad_len_spaces(m, len);
+			seq_print_vma_name(m, vma);
 		}
 	}
 
@@ -634,6 +694,12 @@
 
 	show_smap_vma_flags(m, vma);
 
+	if (vma_get_anon_name(vma)) {
+		seq_puts(m, "Name:           ");
+		seq_print_vma_name(m, vma);
+		seq_putc(m, '\n');
+	}
+
 	if (m->count < m->size)  /* vma is copied successfully */
 		m->version = (vma != get_gate_vma(task->mm))
 			? vma->vm_start : 0;
@@ -722,6 +788,7 @@
 #define CLEAR_REFS_ALL 1
 #define CLEAR_REFS_ANON 2
 #define CLEAR_REFS_MAPPED 3
+#define CLEAR_REFS_MM_HIWATER_RSS 5
 
 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 				size_t count, loff_t *ppos)
@@ -741,7 +808,8 @@
 	rv = kstrtoint(strstrip(buffer), 10, &type);
 	if (rv < 0)
 		return rv;
-	if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
+	if ((type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) &&
+	    type != CLEAR_REFS_MM_HIWATER_RSS)
 		return -EINVAL;
 	task = get_proc_task(file_inode(file));
 	if (!task)
@@ -752,6 +820,18 @@
 			.pmd_entry = clear_refs_pte_range,
 			.mm = mm,
 		};
+
+		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
+			/*
+			 * Writing 5 to /proc/pid/clear_refs resets the peak
+			 * resident set size to this mm's current rss value.
+			 */
+			down_write(&mm->mmap_sem);
+			reset_mm_hiwater_rss(mm);
+			up_write(&mm->mmap_sem);
+			goto out_mm;
+		}
+
 		down_read(&mm->mmap_sem);
 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
 			clear_refs_walk.private = vma;
@@ -775,6 +855,7 @@
 		}
 		flush_tlb_mm(mm);
 		up_read(&mm->mmap_sem);
+out_mm:
 		mmput(mm);
 	}
 	put_task_struct(task);
@@ -1295,6 +1376,8 @@
 
 	if (file) {
 		seq_printf(m, " file=");
+		if (vma->vm_prfile)
+			file = vma->vm_prfile;
 		seq_path(m, &file->f_path, "\n\t= ");
 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
 		seq_printf(m, " heap");
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 123c198..ac19a00 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -150,6 +150,10 @@
 
 	if (file) {
 		struct inode *inode = file_inode(vma->vm_file);
+		if (vma->vm_prfile) {
+			file = vma->vm_prfile;
+			inode = file_inode(file);
+		}
 		dev = inode->i_sb->s_dev;
 		ino = inode->i_ino;
 		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index ca71db6..da12fd4 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -19,6 +19,16 @@
 	  When the option is enabled, pstore will log all kernel
 	  messages, even if no oops or panic happened.
 
+config PSTORE_PMSG
+	bool "Log user space messages"
+	depends on PSTORE
+	help
+	  When the option is enabled, pstore will export a character
+	  interface /dev/pmsg0 to log user space messages. On reboot
+	  data can be retrieved from /sys/fs/pstore/pmsg-ramoops-[ID].
+
+	  If unsure, say N.
+
 config PSTORE_FTRACE
 	bool "Persistent function tracer"
 	depends on PSTORE
diff --git a/fs/pstore/Makefile b/fs/pstore/Makefile
index 4c9095c..e647d8e 100644
--- a/fs/pstore/Makefile
+++ b/fs/pstore/Makefile
@@ -7,5 +7,7 @@
 pstore-objs += inode.o platform.o
 obj-$(CONFIG_PSTORE_FTRACE)	+= ftrace.o
 
+obj-$(CONFIG_PSTORE_PMSG)	+= pmsg.o
+
 ramoops-objs += ram.o ram_core.o
 obj-$(CONFIG_PSTORE_RAM)	+= ramoops.o
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 66c8c2f..a12e082 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -249,6 +249,7 @@
 
 static int pstore_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	parse_options(data);
 
 	return 0;
@@ -315,22 +316,27 @@
 
 	switch (type) {
 	case PSTORE_TYPE_DMESG:
-		sprintf(name, "dmesg-%s-%lld", psname, id);
+		scnprintf(name, sizeof(name), "dmesg-%s-%lld",
+			  psname, id);
 		break;
 	case PSTORE_TYPE_CONSOLE:
-		sprintf(name, "console-%s-%lld", psname, id);
+		scnprintf(name, sizeof(name), "console-%s", psname);
 		break;
 	case PSTORE_TYPE_FTRACE:
-		sprintf(name, "ftrace-%s-%lld", psname, id);
+		scnprintf(name, sizeof(name), "ftrace-%s", psname);
 		break;
 	case PSTORE_TYPE_MCE:
-		sprintf(name, "mce-%s-%lld", psname, id);
+		scnprintf(name, sizeof(name), "mce-%s-%lld", psname, id);
+		break;
+	case PSTORE_TYPE_PMSG:
+		scnprintf(name, sizeof(name), "pmsg-%s-%lld", psname, id);
 		break;
 	case PSTORE_TYPE_UNKNOWN:
-		sprintf(name, "unknown-%s-%lld", psname, id);
+		scnprintf(name, sizeof(name), "unknown-%s-%lld", psname, id);
 		break;
 	default:
-		sprintf(name, "type%d-%s-%lld", type, psname, id);
+		scnprintf(name, sizeof(name), "type%d-%s-%lld",
+			  type, psname, id);
 		break;
 	}
 
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 937d820..fd8d248 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -45,6 +45,12 @@
 static inline void pstore_register_ftrace(void) {}
 #endif
 
+#ifdef CONFIG_PSTORE_PMSG
+extern void pstore_register_pmsg(void);
+#else
+static inline void pstore_register_pmsg(void) {}
+#endif
+
 extern struct pstore_info *psinfo;
 
 extern void	pstore_set_kmsg_bytes(int);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 86d1038..4f11f23 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -267,6 +267,7 @@
 	kmsg_dump_register(&pstore_dumper);
 	pstore_register_console();
 	pstore_register_ftrace();
+	pstore_register_pmsg();
 
 	if (pstore_update_ms >= 0) {
 		pstore_timer.expires = jiffies +
diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
new file mode 100644
index 0000000..82a86aa
--- /dev/null
+++ b/fs/pstore/pmsg.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2014  Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include "internal.h"
+
+static DEFINE_MUTEX(pmsg_lock);
+#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE)
+
+static ssize_t write_pmsg(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	size_t i, buffer_size;
+	char *buffer;
+
+	if (!count)
+		return 0;
+
+	if (!access_ok(VERIFY_READ, buf, count))
+		return -EFAULT;
+
+	buffer_size = count;
+	if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
+		buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
+	buffer = vmalloc(buffer_size);
+	if (!buffer)
+		return -ENOMEM;
+
+	mutex_lock(&pmsg_lock);
+	for (i = 0; i < count; ) {
+		size_t c = min(count - i, buffer_size);
+		u64 id;
+		long ret;
+
+		ret = __copy_from_user(buffer, buf + i, c);
+		if (unlikely(ret != 0)) {
+			mutex_unlock(&pmsg_lock);
+			vfree(buffer);
+			return -EFAULT;
+		}
+		psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, c,
+				  psinfo);
+
+		i += c;
+	}
+
+	mutex_unlock(&pmsg_lock);
+	vfree(buffer);
+	return count;
+}
+
+static const struct file_operations pmsg_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= noop_llseek,
+	.write		= write_pmsg,
+};
+
+static struct class *pmsg_class;
+static int pmsg_major;
+#define PMSG_NAME "pmsg"
+#undef pr_fmt
+#define pr_fmt(fmt) PMSG_NAME ": " fmt
+
+static char *pmsg_devnode(struct device *dev, umode_t *mode)
+{
+	if (mode)
+		*mode = 0220;
+	return NULL;
+}
+
+void pstore_register_pmsg(void)
+{
+	struct device *pmsg_device;
+
+	pmsg_major = register_chrdev(0, PMSG_NAME, &pmsg_fops);
+	if (pmsg_major < 0) {
+		pr_err("register_chrdev failed\n");
+		goto err;
+	}
+
+	pmsg_class = class_create(THIS_MODULE, PMSG_NAME);
+	if (IS_ERR(pmsg_class)) {
+		pr_err("device class file already in use\n");
+		goto err_class;
+	}
+	pmsg_class->devnode = pmsg_devnode;
+
+	pmsg_device = device_create(pmsg_class, NULL, MKDEV(pmsg_major, 0),
+					NULL, "%s%d", PMSG_NAME, 0);
+	if (IS_ERR(pmsg_device)) {
+		pr_err("failed to create device\n");
+		goto err_device;
+	}
+	return;
+
+err_device:
+	class_destroy(pmsg_class);
+err_class:
+	unregister_chrdev(pmsg_major, PMSG_NAME);
+err:
+	return;
+}
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index d3d3714..f721e47 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -51,6 +51,10 @@
 module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400);
 MODULE_PARM_DESC(ftrace_size, "size of ftrace log");
 
+static ulong ramoops_pmsg_size = MIN_MEM_SIZE;
+module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400);
+MODULE_PARM_DESC(pmsg_size, "size of user space message log");
+
 static ulong mem_address;
 module_param(mem_address, ulong, 0400);
 MODULE_PARM_DESC(mem_address,
@@ -82,12 +86,14 @@
 	struct persistent_ram_zone **przs;
 	struct persistent_ram_zone *cprz;
 	struct persistent_ram_zone *fprz;
+	struct persistent_ram_zone *mprz;
 	phys_addr_t phys_addr;
 	unsigned long size;
 	unsigned int memtype;
 	size_t record_size;
 	size_t console_size;
 	size_t ftrace_size;
+	size_t pmsg_size;
 	int dump_oops;
 	struct persistent_ram_ecc_info ecc_info;
 	unsigned int max_dump_cnt;
@@ -96,6 +102,7 @@
 	unsigned int dump_read_cnt;
 	unsigned int console_read_cnt;
 	unsigned int ftrace_read_cnt;
+	unsigned int pmsg_read_cnt;
 	struct pstore_info pstore;
 };
 
@@ -109,6 +116,7 @@
 	cxt->dump_read_cnt = 0;
 	cxt->console_read_cnt = 0;
 	cxt->ftrace_read_cnt = 0;
+	cxt->pmsg_read_cnt = 0;
 	return 0;
 }
 
@@ -141,6 +149,12 @@
 	return prz;
 }
 
+static bool prz_ok(struct persistent_ram_zone *prz)
+{
+	return !!prz && !!(persistent_ram_old_size(prz) +
+			   persistent_ram_ecc_string(prz, NULL, 0));
+}
+
 static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
 				   int *count, struct timespec *time,
 				   char **buf, struct pstore_info *psi)
@@ -153,13 +167,16 @@
 	prz = ramoops_get_next_prz(cxt->przs, &cxt->dump_read_cnt,
 				   cxt->max_dump_cnt, id, type,
 				   PSTORE_TYPE_DMESG, 1);
-	if (!prz)
+	if (!prz_ok(prz))
 		prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt,
 					   1, id, type, PSTORE_TYPE_CONSOLE, 0);
-	if (!prz)
+	if (!prz_ok(prz))
 		prz = ramoops_get_next_prz(&cxt->fprz, &cxt->ftrace_read_cnt,
 					   1, id, type, PSTORE_TYPE_FTRACE, 0);
-	if (!prz)
+	if (!prz_ok(prz))
+		prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt,
+					   1, id, type, PSTORE_TYPE_PMSG, 0);
+	if (!prz_ok(prz))
 		return 0;
 
 	/* TODO(kees): Bogus time for the moment. */
@@ -222,6 +239,11 @@
 			return -ENOMEM;
 		persistent_ram_write(cxt->fprz, buf, size);
 		return 0;
+	} else if (type == PSTORE_TYPE_PMSG) {
+		if (!cxt->mprz)
+			return -ENOMEM;
+		persistent_ram_write(cxt->mprz, buf, size);
+		return 0;
 	}
 
 	if (type != PSTORE_TYPE_DMESG)
@@ -279,6 +301,9 @@
 	case PSTORE_TYPE_FTRACE:
 		prz = cxt->fprz;
 		break;
+	case PSTORE_TYPE_PMSG:
+		prz = cxt->mprz;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -388,6 +413,12 @@
 	return 0;
 }
 
+void notrace ramoops_console_write_buf(const char *buf, size_t size)
+{
+	struct ramoops_context *cxt = &oops_cxt;
+	persistent_ram_write(cxt->cprz, buf, size);
+}
+
 static int ramoops_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -404,7 +435,7 @@
 		goto fail_out;
 
 	if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size &&
-			!pdata->ftrace_size)) {
+			!pdata->ftrace_size && !pdata->pmsg_size)) {
 		pr_err("The memory size and the record/console size must be "
 			"non-zero\n");
 		goto fail_out;
@@ -418,6 +449,8 @@
 		pdata->console_size = rounddown_pow_of_two(pdata->console_size);
 	if (!is_power_of_2(pdata->ftrace_size))
 		pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
+	if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size))
+		pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size);
 
 	cxt->size = pdata->mem_size;
 	cxt->phys_addr = pdata->mem_address;
@@ -425,12 +458,14 @@
 	cxt->record_size = pdata->record_size;
 	cxt->console_size = pdata->console_size;
 	cxt->ftrace_size = pdata->ftrace_size;
+	cxt->pmsg_size = pdata->pmsg_size;
 	cxt->dump_oops = pdata->dump_oops;
 	cxt->ecc_info = pdata->ecc_info;
 
 	paddr = cxt->phys_addr;
 
-	dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size;
+	dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
+			- cxt->pmsg_size;
 	err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz);
 	if (err)
 		goto fail_out;
@@ -445,13 +480,9 @@
 	if (err)
 		goto fail_init_fprz;
 
-	if (!cxt->przs && !cxt->cprz && !cxt->fprz) {
-		pr_err("memory size too small, minimum is %zu\n",
-			cxt->console_size + cxt->record_size +
-			cxt->ftrace_size);
-		err = -EINVAL;
-		goto fail_cnt;
-	}
+	err = ramoops_init_prz(dev, cxt, &cxt->mprz, &paddr, cxt->pmsg_size, 0);
+	if (err)
+		goto fail_init_mprz;
 
 	cxt->pstore.data = cxt;
 	/*
@@ -497,7 +528,8 @@
 fail_clear:
 	cxt->pstore.bufsize = 0;
 	cxt->max_dump_cnt = 0;
-fail_cnt:
+	kfree(cxt->mprz);
+fail_init_mprz:
 	kfree(cxt->fprz);
 fail_init_fprz:
 	kfree(cxt->cprz);
@@ -556,6 +588,7 @@
 	dummy_data->record_size = record_size;
 	dummy_data->console_size = ramoops_console_size;
 	dummy_data->ftrace_size = ramoops_ftrace_size;
+	dummy_data->pmsg_size = ramoops_pmsg_size;
 	dummy_data->dump_oops = dump_oops;
 	/*
 	 * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 2e8caa6..3410e9f 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -46,6 +46,7 @@
 {
 	struct qnx4_sb_info *qs;
 
+	sync_filesystem(sb);
 	qs = qnx4_sb(sb);
 	qs->Version = QNX4_VERSION;
 	*flags |= MS_RDONLY;
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 8d941ed..65cdaab 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -55,6 +55,7 @@
 
 static int qnx6_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_RDONLY;
 	return 0;
 }
diff --git a/fs/readdir.c b/fs/readdir.c
index fee38e0..d46eca8 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -20,11 +20,11 @@
 
 #include <asm/uaccess.h>
 
-int vfs_readdir(struct file *file, filldir_t filler, void *buf)
+int iterate_dir(struct file *file, struct dir_context *ctx)
 {
 	struct inode *inode = file_inode(file);
 	int res = -ENOTDIR;
-	if (!file->f_op || !file->f_op->readdir)
+	if (!file->f_op || (!file->f_op->readdir && !file->f_op->iterate))
 		goto out;
 
 	res = security_file_permission(file, MAY_READ);
@@ -37,15 +37,21 @@
 
 	res = -ENOENT;
 	if (!IS_DEADDIR(inode)) {
-		res = file->f_op->readdir(file, buf, filler);
+		if (file->f_op->iterate) {
+			ctx->pos = file->f_pos;
+			res = file->f_op->iterate(file, ctx);
+			file->f_pos = ctx->pos;
+		} else {
+			res = file->f_op->readdir(file, ctx, ctx->actor);
+			ctx->pos = file->f_pos;
+		}
 		file_accessed(file);
 	}
 	mutex_unlock(&inode->i_mutex);
 out:
 	return res;
 }
-
-EXPORT_SYMBOL(vfs_readdir);
+EXPORT_SYMBOL(iterate_dir);
 
 /*
  * Traditional linux readdir() handling..
@@ -66,6 +72,7 @@
 };
 
 struct readdir_callback {
+	struct dir_context ctx;
 	struct old_linux_dirent __user * dirent;
 	int result;
 };
@@ -73,7 +80,7 @@
 static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset,
 		      u64 ino, unsigned int d_type)
 {
-	struct readdir_callback * buf = (struct readdir_callback *) __buf;
+	struct readdir_callback *buf = (struct readdir_callback *) __buf;
 	struct old_linux_dirent __user * dirent;
 	unsigned long d_ino;
 
@@ -107,15 +114,15 @@
 {
 	int error;
 	struct fd f = fdget(fd);
-	struct readdir_callback buf;
+	struct readdir_callback buf = {
+		.ctx.actor = fillonedir,
+		.dirent = dirent
+	};
 
 	if (!f.file)
 		return -EBADF;
 
-	buf.result = 0;
-	buf.dirent = dirent;
-
-	error = vfs_readdir(f.file, fillonedir, &buf);
+	error = iterate_dir(f.file, &buf.ctx);
 	if (buf.result)
 		error = buf.result;
 
@@ -137,6 +144,7 @@
 };
 
 struct getdents_callback {
+	struct dir_context ctx;
 	struct linux_dirent __user * current_dir;
 	struct linux_dirent __user * previous;
 	int count;
@@ -191,7 +199,11 @@
 {
 	struct fd f;
 	struct linux_dirent __user * lastdirent;
-	struct getdents_callback buf;
+	struct getdents_callback buf = {
+		.ctx.actor = filldir,
+		.count = count,
+		.current_dir = dirent
+	};
 	int error;
 
 	if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -201,17 +213,12 @@
 	if (!f.file)
 		return -EBADF;
 
-	buf.current_dir = dirent;
-	buf.previous = NULL;
-	buf.count = count;
-	buf.error = 0;
-
-	error = vfs_readdir(f.file, filldir, &buf);
+	error = iterate_dir(f.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
 	lastdirent = buf.previous;
 	if (lastdirent) {
-		if (put_user(f.file->f_pos, &lastdirent->d_off))
+		if (put_user(buf.ctx.pos, &lastdirent->d_off))
 			error = -EFAULT;
 		else
 			error = count - buf.count;
@@ -221,6 +228,7 @@
 }
 
 struct getdents_callback64 {
+	struct dir_context ctx;
 	struct linux_dirent64 __user * current_dir;
 	struct linux_dirent64 __user * previous;
 	int count;
@@ -271,7 +279,11 @@
 {
 	struct fd f;
 	struct linux_dirent64 __user * lastdirent;
-	struct getdents_callback64 buf;
+	struct getdents_callback64 buf = {
+		.ctx.actor = filldir64,
+		.count = count,
+		.current_dir = dirent
+	};
 	int error;
 
 	if (!access_ok(VERIFY_WRITE, dirent, count))
@@ -281,17 +293,12 @@
 	if (!f.file)
 		return -EBADF;
 
-	buf.current_dir = dirent;
-	buf.previous = NULL;
-	buf.count = count;
-	buf.error = 0;
-
-	error = vfs_readdir(f.file, filldir64, &buf);
+	error = iterate_dir(f.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
 	lastdirent = buf.previous;
 	if (lastdirent) {
-		typeof(lastdirent->d_off) d_off = f.file->f_pos;
+		typeof(lastdirent->d_off) d_off = buf.ctx.pos;
 		if (__put_user(d_off, &lastdirent->d_off))
 			error = -EFAULT;
 		else
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 3616644..dd76473 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2975,7 +2975,8 @@
 }
 
 /* clm -- taken from fs/buffer.c:block_invalidate_page */
-static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
+static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
+				    unsigned int length)
 {
 	struct buffer_head *head, *bh, *next;
 	struct inode *inode = page->mapping->host;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 7ff27fa..7c8fa87 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1327,6 +1327,7 @@
 	int i;
 #endif
 
+	sync_filesystem(s);
 	reiserfs_write_lock(s);
 
 #ifdef CONFIG_QUOTA
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 15cbc41..ae83948 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -435,6 +435,7 @@
  */
 static int romfs_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_RDONLY;
 	return 0;
 }
diff --git a/fs/select.c b/fs/select.c
index 8c1c96c..6b14dc7 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -27,6 +27,7 @@
 #include <linux/rcupdate.h>
 #include <linux/hrtimer.h>
 #include <linux/sched/rt.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
@@ -236,7 +237,8 @@
 
 	set_current_state(state);
 	if (!pwq->triggered)
-		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
+		rc = freezable_schedule_hrtimeout_range(expires, slack,
+							HRTIMER_MODE_ABS);
 	__set_current_state(TASK_RUNNING);
 
 	/*
diff --git a/fs/seq_file.c b/fs/seq_file.c
index c009e60..2cee82b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -8,8 +8,10 @@
 #include <linux/fs.h>
 #include <linux/export.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/cred.h>
+#include <linux/mm.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -30,6 +32,16 @@
 	m->count = m->size;
 }
 
+static void *seq_buf_alloc(unsigned long size)
+{
+	void *buf;
+
+	buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+	if (!buf && size > PAGE_SIZE)
+		buf = vmalloc(size);
+	return buf;
+}
+
 /**
  *	seq_open -	initialize sequential file
  *	@file: file we initialize
@@ -96,7 +108,7 @@
 		return 0;
 	}
 	if (!m->buf) {
-		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+		m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
 		if (!m->buf)
 			return -ENOMEM;
 	}
@@ -135,8 +147,8 @@
 
 Eoverflow:
 	m->op->stop(m, p);
-	kfree(m->buf);
-	m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+	kvfree(m->buf);
+	m->buf = seq_buf_alloc(m->size <<= 1);
 	return !m->buf ? -ENOMEM : -EAGAIN;
 }
 
@@ -191,7 +203,7 @@
 
 	/* grab buffer if we didn't have one */
 	if (!m->buf) {
-		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+		m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
 		if (!m->buf)
 			goto Enomem;
 	}
@@ -233,8 +245,8 @@
 		if (m->count < m->size)
 			goto Fill;
 		m->op->stop(m, p);
-		kfree(m->buf);
-		m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+		kvfree(m->buf);
+		m->buf = seq_buf_alloc(m->size <<= 1);
 		if (!m->buf)
 			goto Enomem;
 		m->count = 0;
@@ -351,7 +363,7 @@
 int seq_release(struct inode *inode, struct file *file)
 {
 	struct seq_file *m = file->private_data;
-	kfree(m->buf);
+	kvfree(m->buf);
 	kfree(m);
 	return 0;
 }
@@ -606,13 +618,13 @@
 int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
 		void *data, size_t size)
 {
-	char *buf = kmalloc(size, GFP_KERNEL);
+	char *buf = seq_buf_alloc(size);
 	int ret;
 	if (!buf)
 		return -ENOMEM;
 	ret = single_open(file, show, data);
 	if (ret) {
-		kfree(buf);
+		kvfree(buf);
 		return ret;
 	}
 	((struct seq_file *)file->private_data)->buf = buf;
diff --git a/fs/splice.c b/fs/splice.c
index 2ffa7b0..5f0a6df 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1119,8 +1119,8 @@
 /*
  * Attempt to initiate a splice from pipe to file.
  */
-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
-			   loff_t *ppos, size_t len, unsigned int flags)
+long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+		    loff_t *ppos, size_t len, unsigned int flags)
 {
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
 				loff_t *, size_t, unsigned int);
@@ -1146,13 +1146,14 @@
 	file_end_write(out);
 	return ret;
 }
+EXPORT_SYMBOL(do_splice_from);
 
 /*
  * Attempt to initiate a splice from a file to a pipe.
  */
-static long do_splice_to(struct file *in, loff_t *ppos,
-			 struct pipe_inode_info *pipe, size_t len,
-			 unsigned int flags)
+long do_splice_to(struct file *in, loff_t *ppos,
+		  struct pipe_inode_info *pipe, size_t len,
+		  unsigned int flags)
 {
 	ssize_t (*splice_read)(struct file *, loff_t *,
 			       struct pipe_inode_info *, size_t, unsigned int);
@@ -1172,6 +1173,7 @@
 
 	return splice_read(in, ppos, pipe, len, flags);
 }
+EXPORT_SYMBOL(do_splice_to);
 
 /**
  * splice_direct_to_actor - splices data directly between two non-pipes
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index c70111e..ffb093e 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -25,6 +25,78 @@
 
 	  If unsure, say N.
 
+choice
+	prompt "File decompression options"
+	depends on SQUASHFS
+	help
+	  Squashfs now supports two options for decompressing file
+	  data.  Traditionally Squashfs has decompressed into an
+	  intermediate buffer and then memcopied it into the page cache.
+	  Squashfs now supports the ability to decompress directly into
+	  the page cache.
+
+	  If unsure, select "Decompress file data into an intermediate buffer"
+
+config SQUASHFS_FILE_CACHE
+	bool "Decompress file data into an intermediate buffer"
+	help
+	  Decompress file data into an intermediate buffer and then
+	  memcopy it into the page cache.
+
+config SQUASHFS_FILE_DIRECT
+	bool "Decompress files directly into the page cache"
+	help
+	  Directly decompress file data into the page cache.
+	  Doing so can significantly improve performance because
+	  it eliminates a memcpy and it also removes the lock contention
+	  on the single buffer.
+
+endchoice
+
+choice
+	prompt "Decompressor parallelisation options"
+	depends on SQUASHFS
+	help
+	  Squashfs now supports three parallelisation options for
+	  decompression.  Each one exhibits various trade-offs between
+	  decompression performance and CPU and memory usage.
+
+	  If in doubt, select "Single threaded compression"
+
+config SQUASHFS_DECOMP_SINGLE
+	bool "Single threaded compression"
+	help
+	  Traditionally Squashfs has used single-threaded decompression.
+	  Only one block (data or metadata) can be decompressed at any
+	  one time.  This limits CPU and memory usage to a minimum.
+
+config SQUASHFS_DECOMP_MULTI
+	bool "Use multiple decompressors for parallel I/O"
+	help
+	  By default Squashfs uses a single decompressor but it gives
+	  poor performance on parallel I/O workloads when using multiple CPU
+	  machines due to waiting on decompressor availability.
+
+	  If you have a parallel I/O workload and your system has enough memory,
+	  using this option may improve overall I/O performance.
+
+	  This decompressor implementation uses up to two parallel
+	  decompressors per core.  It dynamically allocates decompressors
+	  on a demand basis.
+
+config SQUASHFS_DECOMP_MULTI_PERCPU
+	bool "Use percpu multiple decompressors for parallel I/O"
+	help
+	  By default Squashfs uses a single decompressor but it gives
+	  poor performance on parallel I/O workloads when using multiple CPU
+	  machines due to waiting on decompressor availability.
+
+	  This decompressor implementation uses a maximum of one
+	  decompressor per core.  It uses percpu variables to ensure
+	  decompression is load-balanced across the cores.
+
+endchoice
+
 config SQUASHFS_XATTR
 	bool "Squashfs XATTR support"
 	depends on SQUASHFS
@@ -48,6 +120,21 @@
 
 	  If unsure, say Y.
 
+config SQUASHFS_LZ4
+	bool "Include support for LZ4 compressed file systems"
+	depends on SQUASHFS
+	select LZ4_DECOMPRESS
+	help
+	  Saying Y here includes support for reading Squashfs file systems
+	  compressed with LZ4 compression.  LZ4 compression is mainly
+	  aimed at embedded systems with slower CPUs where the overheads
+	  of zlib are too high.
+
+	  LZ4 is not the standard compression used in Squashfs and so most
+	  file systems will be readable without selecting this option.
+
+	  If unsure, say N.
+
 config SQUASHFS_LZO
 	bool "Include support for LZO compressed file systems"
 	depends on SQUASHFS
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 110b047..246a6f3 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -5,7 +5,13 @@
 obj-$(CONFIG_SQUASHFS) += squashfs.o
 squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
 squashfs-y += namei.o super.o symlink.o decompressor.o
+squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
+squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
+squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
+squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
+squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
 squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o
+squashfs-$(CONFIG_SQUASHFS_LZ4) += lz4_wrapper.o
 squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
 squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
 squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index fb50652..0cea9b9 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -36,6 +36,7 @@
 #include "squashfs_fs_sb.h"
 #include "squashfs.h"
 #include "decompressor.h"
+#include "page_actor.h"
 
 /*
  * Read the metadata block length, this is stored in the first two
@@ -86,16 +87,16 @@
  * generated a larger block - this does occasionally happen with compression
  * algorithms).
  */
-int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
-			int length, u64 *next_index, int srclength, int pages)
+int squashfs_read_data(struct super_block *sb, u64 index, int length,
+		u64 *next_index, struct squashfs_page_actor *output)
 {
 	struct squashfs_sb_info *msblk = sb->s_fs_info;
 	struct buffer_head **bh;
 	int offset = index & ((1 << msblk->devblksize_log2) - 1);
 	u64 cur_index = index >> msblk->devblksize_log2;
-	int bytes, compressed, b = 0, k = 0, page = 0, avail;
+	int bytes, compressed, b = 0, k = 0, avail, i;
 
-	bh = kcalloc(((srclength + msblk->devblksize - 1)
+	bh = kcalloc(((output->length + msblk->devblksize - 1)
 		>> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
 	if (bh == NULL)
 		return -ENOMEM;
@@ -111,9 +112,9 @@
 			*next_index = index + length;
 
 		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
-			index, compressed ? "" : "un", length, srclength);
+			index, compressed ? "" : "un", length, output->length);
 
-		if (length < 0 || length > srclength ||
+		if (length < 0 || length > output->length ||
 				(index + length) > msblk->bytes_used)
 			goto read_failure;
 
@@ -145,7 +146,7 @@
 		TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
 				compressed ? "" : "un", length);
 
-		if (length < 0 || length > srclength ||
+		if (length < 0 || length > output->length ||
 					(index + length) > msblk->bytes_used)
 			goto block_release;
 
@@ -158,35 +159,36 @@
 		ll_rw_block(READ, b - 1, bh + 1);
 	}
 
+	for (i = 0; i < b; i++) {
+		wait_on_buffer(bh[i]);
+		if (!buffer_uptodate(bh[i]))
+			goto block_release;
+	}
+
 	if (compressed) {
-		length = squashfs_decompress(msblk, buffer, bh, b, offset,
-			 length, srclength, pages);
+		length = squashfs_decompress(msblk, bh, b, offset, length,
+			output);
 		if (length < 0)
 			goto read_failure;
 	} else {
 		/*
 		 * Block is uncompressed.
 		 */
-		int i, in, pg_offset = 0;
-
-		for (i = 0; i < b; i++) {
-			wait_on_buffer(bh[i]);
-			if (!buffer_uptodate(bh[i]))
-				goto block_release;
-		}
+		int in, pg_offset = 0;
+		void *data = squashfs_first_page(output);
 
 		for (bytes = length; k < b; k++) {
 			in = min(bytes, msblk->devblksize - offset);
 			bytes -= in;
 			while (in) {
 				if (pg_offset == PAGE_CACHE_SIZE) {
-					page++;
+					data = squashfs_next_page(output);
 					pg_offset = 0;
 				}
 				avail = min_t(int, in, PAGE_CACHE_SIZE -
 						pg_offset);
-				memcpy(buffer[page] + pg_offset,
-						bh[k]->b_data + offset, avail);
+				memcpy(data + pg_offset, bh[k]->b_data + offset,
+						avail);
 				in -= avail;
 				pg_offset += avail;
 				offset += avail;
@@ -194,6 +196,7 @@
 			offset = 0;
 			put_bh(bh[k]);
 		}
+		squashfs_finish_page(output);
 	}
 
 	kfree(bh);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index af0b738..1cb70a0 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -56,6 +56,7 @@
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
 #include "squashfs.h"
+#include "page_actor.h"
 
 /*
  * Look-up block in cache, and increment usage count.  If not in cache, read
@@ -119,9 +120,8 @@
 			entry->error = 0;
 			spin_unlock(&cache->lock);
 
-			entry->length = squashfs_read_data(sb, entry->data,
-				block, length, &entry->next_index,
-				cache->block_size, cache->pages);
+			entry->length = squashfs_read_data(sb, block, length,
+				&entry->next_index, entry->actor);
 
 			spin_lock(&cache->lock);
 
@@ -220,6 +220,7 @@
 				kfree(cache->entry[i].data[j]);
 			kfree(cache->entry[i].data);
 		}
+		kfree(cache->entry[i].actor);
 	}
 
 	kfree(cache->entry);
@@ -280,6 +281,13 @@
 				goto cleanup;
 			}
 		}
+
+		entry->actor = squashfs_page_actor_init(entry->data,
+						cache->pages, 0);
+		if (entry->actor == NULL) {
+			ERROR("Failed to allocate %s cache entry\n", name);
+			goto cleanup;
+		}
 	}
 
 	return cache;
@@ -410,6 +418,7 @@
 	int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 	int i, res;
 	void *table, *buffer, **data;
+	struct squashfs_page_actor *actor;
 
 	table = buffer = kmalloc(length, GFP_KERNEL);
 	if (table == NULL)
@@ -421,19 +430,28 @@
 		goto failed;
 	}
 
+	actor = squashfs_page_actor_init(data, pages, length);
+	if (actor == NULL) {
+		res = -ENOMEM;
+		goto failed2;
+	}
+
 	for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
 		data[i] = buffer;
 
-	res = squashfs_read_data(sb, data, block, length |
-		SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
+	res = squashfs_read_data(sb, block, length |
+		SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
 
 	kfree(data);
+	kfree(actor);
 
 	if (res < 0)
 		goto failed;
 
 	return table;
 
+failed2:
+	kfree(data);
 failed:
 	kfree(table);
 	return ERR_PTR(res);
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index 3f6271d..e9034bf 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -30,6 +30,7 @@
 #include "squashfs_fs_sb.h"
 #include "decompressor.h"
 #include "squashfs.h"
+#include "page_actor.h"
 
 /*
  * This file (and decompressor.h) implements a decompressor framework for
@@ -37,33 +38,40 @@
  */
 
 static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
-	NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
+	NULL, NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
 };
 
+#ifndef CONFIG_SQUASHFS_LZ4
+static const struct squashfs_decompressor squashfs_lz4_comp_ops = {
+	NULL, NULL, NULL, NULL, LZ4_COMPRESSION, "lz4", 0
+};
+#endif
+
 #ifndef CONFIG_SQUASHFS_LZO
 static const struct squashfs_decompressor squashfs_lzo_comp_ops = {
-	NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
+	NULL, NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
 };
 #endif
 
 #ifndef CONFIG_SQUASHFS_XZ
 static const struct squashfs_decompressor squashfs_xz_comp_ops = {
-	NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
+	NULL, NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
 };
 #endif
 
 #ifndef CONFIG_SQUASHFS_ZLIB
 static const struct squashfs_decompressor squashfs_zlib_comp_ops = {
-	NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0
+	NULL, NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0
 };
 #endif
 
 static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
-	NULL, NULL, NULL, 0, "unknown", 0
+	NULL, NULL, NULL, NULL, 0, "unknown", 0
 };
 
 static const struct squashfs_decompressor *decompressor[] = {
 	&squashfs_zlib_comp_ops,
+	&squashfs_lz4_comp_ops,
 	&squashfs_lzo_comp_ops,
 	&squashfs_xz_comp_ops,
 	&squashfs_lzma_unsupported_comp_ops,
@@ -83,10 +91,11 @@
 }
 
 
-void *squashfs_decompressor_init(struct super_block *sb, unsigned short flags)
+static void *get_comp_opts(struct super_block *sb, unsigned short flags)
 {
 	struct squashfs_sb_info *msblk = sb->s_fs_info;
-	void *strm, *buffer = NULL;
+	void *buffer = NULL, *comp_opts;
+	struct squashfs_page_actor *actor = NULL;
 	int length = 0;
 
 	/*
@@ -94,23 +103,46 @@
 	 */
 	if (SQUASHFS_COMP_OPTS(flags)) {
 		buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
-		if (buffer == NULL)
-			return ERR_PTR(-ENOMEM);
+		if (buffer == NULL) {
+			comp_opts = ERR_PTR(-ENOMEM);
+			goto out;
+		}
 
-		length = squashfs_read_data(sb, &buffer,
-			sizeof(struct squashfs_super_block), 0, NULL,
-			PAGE_CACHE_SIZE, 1);
+		actor = squashfs_page_actor_init(&buffer, 1, 0);
+		if (actor == NULL) {
+			comp_opts = ERR_PTR(-ENOMEM);
+			goto out;
+		}
+
+		length = squashfs_read_data(sb,
+			sizeof(struct squashfs_super_block), 0, NULL, actor);
 
 		if (length < 0) {
-			strm = ERR_PTR(length);
-			goto finished;
+			comp_opts = ERR_PTR(length);
+			goto out;
 		}
 	}
 
-	strm = msblk->decompressor->init(msblk, buffer, length);
+	comp_opts = squashfs_comp_opts(msblk, buffer, length);
 
-finished:
+out:
+	kfree(actor);
 	kfree(buffer);
+	return comp_opts;
+}
 
-	return strm;
+
+void *squashfs_decompressor_setup(struct super_block *sb, unsigned short flags)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	void *stream, *comp_opts = get_comp_opts(sb, flags);
+
+	if (IS_ERR(comp_opts))
+		return comp_opts;
+
+	stream = squashfs_decompressor_create(msblk, comp_opts);
+	if (IS_ERR(stream))
+		kfree(comp_opts);
+
+	return stream;
 }
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 330073e..a25713c 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -24,34 +24,32 @@
  */
 
 struct squashfs_decompressor {
-	void	*(*init)(struct squashfs_sb_info *, void *, int);
+	void	*(*init)(struct squashfs_sb_info *, void *);
+	void	*(*comp_opts)(struct squashfs_sb_info *, void *, int);
 	void	(*free)(void *);
-	int	(*decompress)(struct squashfs_sb_info *, void **,
-		struct buffer_head **, int, int, int, int, int);
+	int	(*decompress)(struct squashfs_sb_info *, void *,
+		struct buffer_head **, int, int, int,
+		struct squashfs_page_actor *);
 	int	id;
 	char	*name;
 	int	supported;
 };
 
-static inline void squashfs_decompressor_free(struct squashfs_sb_info *msblk,
-	void *s)
+static inline void *squashfs_comp_opts(struct squashfs_sb_info *msblk,
+							void *buff, int length)
 {
-	if (msblk->decompressor)
-		msblk->decompressor->free(s);
-}
-
-static inline int squashfs_decompress(struct squashfs_sb_info *msblk,
-	void **buffer, struct buffer_head **bh, int b, int offset, int length,
-	int srclength, int pages)
-{
-	return msblk->decompressor->decompress(msblk, buffer, bh, b, offset,
-		length, srclength, pages);
+	return msblk->decompressor->comp_opts ?
+		msblk->decompressor->comp_opts(msblk, buff, length) : NULL;
 }
 
 #ifdef CONFIG_SQUASHFS_XZ
 extern const struct squashfs_decompressor squashfs_xz_comp_ops;
 #endif
 
+#ifdef CONFIG_SQUASHFS_LZ4
+extern const struct squashfs_decompressor squashfs_lz4_comp_ops;
+#endif
+
 #ifdef CONFIG_SQUASHFS_LZO
 extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
 #endif
diff --git a/fs/squashfs/decompressor_multi.c b/fs/squashfs/decompressor_multi.c
new file mode 100644
index 0000000..d6008a6
--- /dev/null
+++ b/fs/squashfs/decompressor_multi.c
@@ -0,0 +1,198 @@
+/*
+ *  Copyright (c) 2013
+ *  Minchan Kim <minchan@kernel.org>
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/cpumask.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements multi-threaded decompression in the
+ * decompressor framework
+ */
+
+
+/*
+ * The reason that multiply two is that a CPU can request new I/O
+ * while it is waiting previous request.
+ */
+#define MAX_DECOMPRESSOR	(num_online_cpus() * 2)
+
+
+int squashfs_max_decompressors(void)
+{
+	return MAX_DECOMPRESSOR;
+}
+
+
+struct squashfs_stream {
+	void			*comp_opts;
+	struct list_head	strm_list;
+	struct mutex		mutex;
+	int			avail_decomp;
+	wait_queue_head_t	wait;
+};
+
+
+struct decomp_stream {
+	void *stream;
+	struct list_head list;
+};
+
+
+static void put_decomp_stream(struct decomp_stream *decomp_strm,
+				struct squashfs_stream *stream)
+{
+	mutex_lock(&stream->mutex);
+	list_add(&decomp_strm->list, &stream->strm_list);
+	mutex_unlock(&stream->mutex);
+	wake_up(&stream->wait);
+}
+
+void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+				void *comp_opts)
+{
+	struct squashfs_stream *stream;
+	struct decomp_stream *decomp_strm = NULL;
+	int err = -ENOMEM;
+
+	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+	if (!stream)
+		goto out;
+
+	stream->comp_opts = comp_opts;
+	mutex_init(&stream->mutex);
+	INIT_LIST_HEAD(&stream->strm_list);
+	init_waitqueue_head(&stream->wait);
+
+	/*
+	 * We should have a decompressor at least as default
+	 * so if we fail to allocate new decompressor dynamically,
+	 * we could always fall back to default decompressor and
+	 * file system works.
+	 */
+	decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
+	if (!decomp_strm)
+		goto out;
+
+	decomp_strm->stream = msblk->decompressor->init(msblk,
+						stream->comp_opts);
+	if (IS_ERR(decomp_strm->stream)) {
+		err = PTR_ERR(decomp_strm->stream);
+		goto out;
+	}
+
+	list_add(&decomp_strm->list, &stream->strm_list);
+	stream->avail_decomp = 1;
+	return stream;
+
+out:
+	kfree(decomp_strm);
+	kfree(stream);
+	return ERR_PTR(err);
+}
+
+
+void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+	struct squashfs_stream *stream = msblk->stream;
+	if (stream) {
+		struct decomp_stream *decomp_strm;
+
+		while (!list_empty(&stream->strm_list)) {
+			decomp_strm = list_entry(stream->strm_list.prev,
+						struct decomp_stream, list);
+			list_del(&decomp_strm->list);
+			msblk->decompressor->free(decomp_strm->stream);
+			kfree(decomp_strm);
+			stream->avail_decomp--;
+		}
+		WARN_ON(stream->avail_decomp);
+		kfree(stream->comp_opts);
+		kfree(stream);
+	}
+}
+
+
+static struct decomp_stream *get_decomp_stream(struct squashfs_sb_info *msblk,
+					struct squashfs_stream *stream)
+{
+	struct decomp_stream *decomp_strm;
+
+	while (1) {
+		mutex_lock(&stream->mutex);
+
+		/* There is available decomp_stream */
+		if (!list_empty(&stream->strm_list)) {
+			decomp_strm = list_entry(stream->strm_list.prev,
+				struct decomp_stream, list);
+			list_del(&decomp_strm->list);
+			mutex_unlock(&stream->mutex);
+			break;
+		}
+
+		/*
+		 * If there is no available decomp and already full,
+		 * let's wait for releasing decomp from other users.
+		 */
+		if (stream->avail_decomp >= MAX_DECOMPRESSOR)
+			goto wait;
+
+		/* Let's allocate new decomp */
+		decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
+		if (!decomp_strm)
+			goto wait;
+
+		decomp_strm->stream = msblk->decompressor->init(msblk,
+						stream->comp_opts);
+		if (IS_ERR(decomp_strm->stream)) {
+			kfree(decomp_strm);
+			goto wait;
+		}
+
+		stream->avail_decomp++;
+		WARN_ON(stream->avail_decomp > MAX_DECOMPRESSOR);
+
+		mutex_unlock(&stream->mutex);
+		break;
+wait:
+		/*
+		 * If system memory is tough, let's for other's
+		 * releasing instead of hurting VM because it could
+		 * make page cache thrashing.
+		 */
+		mutex_unlock(&stream->mutex);
+		wait_event(stream->wait,
+			!list_empty(&stream->strm_list));
+	}
+
+	return decomp_strm;
+}
+
+
+int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
+	int b, int offset, int length, struct squashfs_page_actor *output)
+{
+	int res;
+	struct squashfs_stream *stream = msblk->stream;
+	struct decomp_stream *decomp_stream = get_decomp_stream(msblk, stream);
+	res = msblk->decompressor->decompress(msblk, decomp_stream->stream,
+		bh, b, offset, length, output);
+	put_decomp_stream(decomp_stream, stream);
+	if (res < 0)
+		ERROR("%s decompression failed, data probably corrupt\n",
+			msblk->decompressor->name);
+	return res;
+}
diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c
new file mode 100644
index 0000000..23a9c28
--- /dev/null
+++ b/fs/squashfs/decompressor_multi_percpu.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/percpu.h>
+#include <linux/buffer_head.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements multi-threaded decompression using percpu
+ * variables, one thread per cpu core.
+ */
+
+struct squashfs_stream {
+	void		*stream;
+};
+
+void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+						void *comp_opts)
+{
+	struct squashfs_stream *stream;
+	struct squashfs_stream __percpu *percpu;
+	int err, cpu;
+
+	percpu = alloc_percpu(struct squashfs_stream);
+	if (percpu == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	for_each_possible_cpu(cpu) {
+		stream = per_cpu_ptr(percpu, cpu);
+		stream->stream = msblk->decompressor->init(msblk, comp_opts);
+		if (IS_ERR(stream->stream)) {
+			err = PTR_ERR(stream->stream);
+			goto out;
+		}
+	}
+
+	kfree(comp_opts);
+	return (__force void *) percpu;
+
+out:
+	for_each_possible_cpu(cpu) {
+		stream = per_cpu_ptr(percpu, cpu);
+		if (!IS_ERR_OR_NULL(stream->stream))
+			msblk->decompressor->free(stream->stream);
+	}
+	free_percpu(percpu);
+	return ERR_PTR(err);
+}
+
+void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+	struct squashfs_stream __percpu *percpu =
+			(struct squashfs_stream __percpu *) msblk->stream;
+	struct squashfs_stream *stream;
+	int cpu;
+
+	if (msblk->stream) {
+		for_each_possible_cpu(cpu) {
+			stream = per_cpu_ptr(percpu, cpu);
+			msblk->decompressor->free(stream->stream);
+		}
+		free_percpu(percpu);
+	}
+}
+
+int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
+	int b, int offset, int length, struct squashfs_page_actor *output)
+{
+	struct squashfs_stream __percpu *percpu =
+			(struct squashfs_stream __percpu *) msblk->stream;
+	struct squashfs_stream *stream = get_cpu_ptr(percpu);
+	int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
+		offset, length, output);
+	put_cpu_ptr(stream);
+
+	if (res < 0)
+		ERROR("%s decompression failed, data probably corrupt\n",
+			msblk->decompressor->name);
+
+	return res;
+}
+
+int squashfs_max_decompressors(void)
+{
+	return num_possible_cpus();
+}
diff --git a/fs/squashfs/decompressor_single.c b/fs/squashfs/decompressor_single.c
new file mode 100644
index 0000000..a6c7592
--- /dev/null
+++ b/fs/squashfs/decompressor_single.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements single-threaded decompression in the
+ * decompressor framework
+ */
+
+struct squashfs_stream {
+	void		*stream;
+	struct mutex	mutex;
+};
+
+void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+						void *comp_opts)
+{
+	struct squashfs_stream *stream;
+	int err = -ENOMEM;
+
+	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+	if (stream == NULL)
+		goto out;
+
+	stream->stream = msblk->decompressor->init(msblk, comp_opts);
+	if (IS_ERR(stream->stream)) {
+		err = PTR_ERR(stream->stream);
+		goto out;
+	}
+
+	kfree(comp_opts);
+	mutex_init(&stream->mutex);
+	return stream;
+
+out:
+	kfree(stream);
+	return ERR_PTR(err);
+}
+
+void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+	struct squashfs_stream *stream = msblk->stream;
+
+	if (stream) {
+		msblk->decompressor->free(stream->stream);
+		kfree(stream);
+	}
+}
+
+int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
+	int b, int offset, int length, struct squashfs_page_actor *output)
+{
+	int res;
+	struct squashfs_stream *stream = msblk->stream;
+
+	mutex_lock(&stream->mutex);
+	res = msblk->decompressor->decompress(msblk, stream->stream, bh, b,
+		offset, length, output);
+	mutex_unlock(&stream->mutex);
+
+	if (res < 0)
+		ERROR("%s decompression failed, data probably corrupt\n",
+			msblk->decompressor->name);
+
+	return res;
+}
+
+int squashfs_max_decompressors(void)
+{
+	return 1;
+}
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index 57dc70e..d8c2d74 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -54,6 +54,7 @@
 {
 	struct squashfs_sb_info *msblk = sb->s_fs_info;
 	int err, i, index, length = 0;
+	unsigned int size;
 	struct squashfs_dir_index dir_index;
 
 	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %lld\n",
@@ -81,8 +82,14 @@
 			 */
 			break;
 
+		size = le32_to_cpu(dir_index.size) + 1;
+
+		/* size should never be larger than SQUASHFS_NAME_LEN */
+		if (size > SQUASHFS_NAME_LEN)
+			break;
+
 		err = squashfs_read_metadata(sb, NULL, &index_start,
-				&index_offset, le32_to_cpu(dir_index.size) + 1);
+				&index_offset, size);
 		if (err < 0)
 			break;
 
@@ -100,14 +107,13 @@
 }
 
 
-static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
+static int squashfs_readdir(struct file *file, struct dir_context *ctx)
 {
 	struct inode *inode = file_inode(file);
 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
 	u64 block = squashfs_i(inode)->start + msblk->directory_table;
-	int offset = squashfs_i(inode)->offset, length, dir_count, size,
-				type, err;
-	unsigned int inode_number;
+	int offset = squashfs_i(inode)->offset, length, err;
+	unsigned int inode_number, dir_count, size, type;
 	struct squashfs_dir_header dirh;
 	struct squashfs_dir_entry *dire;
 
@@ -127,11 +133,11 @@
 	 * It also means that the external f_pos is offset by 3 from the
 	 * on-disk directory f_pos.
 	 */
-	while (file->f_pos < 3) {
+	while (ctx->pos < 3) {
 		char *name;
 		int i_ino;
 
-		if (file->f_pos == 0) {
+		if (ctx->pos == 0) {
 			name = ".";
 			size = 1;
 			i_ino = inode->i_ino;
@@ -141,24 +147,18 @@
 			i_ino = squashfs_i(inode)->parent;
 		}
 
-		TRACE("Calling filldir(%p, %s, %d, %lld, %d, %d)\n",
-				dirent, name, size, file->f_pos, i_ino,
-				squashfs_filetype_table[1]);
-
-		if (filldir(dirent, name, size, file->f_pos, i_ino,
-				squashfs_filetype_table[1]) < 0) {
-				TRACE("Filldir returned less than 0\n");
+		if (!dir_emit(ctx, name, size, i_ino,
+				squashfs_filetype_table[1]))
 			goto finish;
-		}
 
-		file->f_pos += size;
+		ctx->pos += size;
 	}
 
 	length = get_dir_index_using_offset(inode->i_sb, &block, &offset,
 				squashfs_i(inode)->dir_idx_start,
 				squashfs_i(inode)->dir_idx_offset,
 				squashfs_i(inode)->dir_idx_cnt,
-				file->f_pos);
+				ctx->pos);
 
 	while (length < i_size_read(inode)) {
 		/*
@@ -198,7 +198,7 @@
 
 			length += sizeof(*dire) + size;
 
-			if (file->f_pos >= length)
+			if (ctx->pos >= length)
 				continue;
 
 			dire->name[size] = '\0';
@@ -206,22 +206,15 @@
 				((short) le16_to_cpu(dire->inode_number));
 			type = le16_to_cpu(dire->type);
 
-			TRACE("Calling filldir(%p, %s, %d, %lld, %x:%x, %d, %d)"
-					"\n", dirent, dire->name, size,
-					file->f_pos,
-					le32_to_cpu(dirh.start_block),
-					le16_to_cpu(dire->offset),
-					inode_number,
-					squashfs_filetype_table[type]);
+			if (type > SQUASHFS_MAX_DIR_TYPE)
+				goto failed_read;
 
-			if (filldir(dirent, dire->name, size, file->f_pos,
+			if (!dir_emit(ctx, dire->name, size,
 					inode_number,
-					squashfs_filetype_table[type]) < 0) {
-				TRACE("Filldir returned less than 0\n");
+					squashfs_filetype_table[type]))
 				goto finish;
-			}
 
-			file->f_pos = length;
+			ctx->pos = length;
 		}
 	}
 
@@ -238,6 +231,6 @@
 
 const struct file_operations squashfs_dir_ops = {
 	.read = generic_read_dir,
-	.readdir = squashfs_readdir,
+	.iterate = squashfs_readdir,
 	.llseek = default_llseek,
 };
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 8ca62c2..e5c9689 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -370,77 +370,15 @@
 	return le32_to_cpu(size);
 }
 
-
-static int squashfs_readpage(struct file *file, struct page *page)
+/* Copy data into page cache  */
+void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
+	int bytes, int offset)
 {
 	struct inode *inode = page->mapping->host;
 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-	int bytes, i, offset = 0, sparse = 0;
-	struct squashfs_cache_entry *buffer = NULL;
 	void *pageaddr;
-
-	int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
-	int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
-	int start_index = page->index & ~mask;
-	int end_index = start_index | mask;
-	int file_end = i_size_read(inode) >> msblk->block_log;
-
-	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
-				page->index, squashfs_i(inode)->start);
-
-	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-					PAGE_CACHE_SHIFT))
-		goto out;
-
-	if (index < file_end || squashfs_i(inode)->fragment_block ==
-					SQUASHFS_INVALID_BLK) {
-		/*
-		 * Reading a datablock from disk.  Need to read block list
-		 * to get location and block size.
-		 */
-		u64 block = 0;
-		int bsize = read_blocklist(inode, index, &block);
-		if (bsize < 0)
-			goto error_out;
-
-		if (bsize == 0) { /* hole */
-			bytes = index == file_end ?
-				(i_size_read(inode) & (msblk->block_size - 1)) :
-				 msblk->block_size;
-			sparse = 1;
-		} else {
-			/*
-			 * Read and decompress datablock.
-			 */
-			buffer = squashfs_get_datablock(inode->i_sb,
-								block, bsize);
-			if (buffer->error) {
-				ERROR("Unable to read page, block %llx, size %x"
-					"\n", block, bsize);
-				squashfs_cache_put(buffer);
-				goto error_out;
-			}
-			bytes = buffer->length;
-		}
-	} else {
-		/*
-		 * Datablock is stored inside a fragment (tail-end packed
-		 * block).
-		 */
-		buffer = squashfs_get_fragment(inode->i_sb,
-				squashfs_i(inode)->fragment_block,
-				squashfs_i(inode)->fragment_size);
-
-		if (buffer->error) {
-			ERROR("Unable to read page, block %llx, size %x\n",
-				squashfs_i(inode)->fragment_block,
-				squashfs_i(inode)->fragment_size);
-			squashfs_cache_put(buffer);
-			goto error_out;
-		}
-		bytes = i_size_read(inode) & (msblk->block_size - 1);
-		offset = squashfs_i(inode)->fragment_offset;
-	}
+	int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+	int start_index = page->index & ~mask, end_index = start_index | mask;
 
 	/*
 	 * Loop copying datablock into pages.  As the datablock likely covers
@@ -451,7 +389,7 @@
 	for (i = start_index; i <= end_index && bytes > 0; i++,
 			bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
 		struct page *push_page;
-		int avail = sparse ? 0 : min_t(int, bytes, PAGE_CACHE_SIZE);
+		int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0;
 
 		TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
 
@@ -475,11 +413,75 @@
 		if (i != page->index)
 			page_cache_release(push_page);
 	}
+}
 
-	if (!sparse)
-		squashfs_cache_put(buffer);
+/* Read datablock stored packed inside a fragment (tail-end packed block) */
+static int squashfs_readpage_fragment(struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
+		squashfs_i(inode)->fragment_block,
+		squashfs_i(inode)->fragment_size);
+	int res = buffer->error;
 
+	if (res)
+		ERROR("Unable to read page, block %llx, size %x\n",
+			squashfs_i(inode)->fragment_block,
+			squashfs_i(inode)->fragment_size);
+	else
+		squashfs_copy_cache(page, buffer, i_size_read(inode) &
+			(msblk->block_size - 1),
+			squashfs_i(inode)->fragment_offset);
+
+	squashfs_cache_put(buffer);
+	return res;
+}
+
+static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
+{
+	struct inode *inode = page->mapping->host;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	int bytes = index == file_end ?
+			(i_size_read(inode) & (msblk->block_size - 1)) :
+			 msblk->block_size;
+
+	squashfs_copy_cache(page, NULL, bytes, 0);
 	return 0;
+}
+
+static int squashfs_readpage(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
+	int file_end = i_size_read(inode) >> msblk->block_log;
+	int res;
+	void *pageaddr;
+
+	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
+				page->index, squashfs_i(inode)->start);
+
+	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+					PAGE_CACHE_SHIFT))
+		goto out;
+
+	if (index < file_end || squashfs_i(inode)->fragment_block ==
+					SQUASHFS_INVALID_BLK) {
+		u64 block = 0;
+		int bsize = read_blocklist(inode, index, &block);
+		if (bsize < 0)
+			goto error_out;
+
+		if (bsize == 0)
+			res = squashfs_readpage_sparse(page, index, file_end);
+		else
+			res = squashfs_readpage_block(page, block, bsize);
+	} else
+		res = squashfs_readpage_fragment(page);
+
+	if (!res)
+		return 0;
 
 error_out:
 	SetPageError(page);
diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
new file mode 100644
index 0000000..f2310d2
--- /dev/null
+++ b/fs/squashfs/file_cache.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/* Read separately compressed datablock and memcopy into page cache */
+int squashfs_readpage_block(struct page *page, u64 block, int bsize)
+{
+	struct inode *i = page->mapping->host;
+	struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
+		block, bsize);
+	int res = buffer->error;
+
+	if (res)
+		ERROR("Unable to read page, block %llx, size %x\n", block,
+			bsize);
+	else
+		squashfs_copy_cache(page, buffer, buffer->length, 0);
+
+	squashfs_cache_put(buffer);
+	return res;
+}
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
new file mode 100644
index 0000000..43e7a7e
--- /dev/null
+++ b/fs/squashfs/file_direct.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "page_actor.h"
+
+static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
+	int pages, struct page **page);
+
+/* Read separately compressed datablock directly into page cache */
+int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
+
+{
+	struct inode *inode = target_page->mapping->host;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+
+	int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+	int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+	int start_index = target_page->index & ~mask;
+	int end_index = start_index | mask;
+	int i, n, pages, missing_pages, bytes, res = -ENOMEM;
+	struct page **page;
+	struct squashfs_page_actor *actor;
+	void *pageaddr;
+
+	if (end_index > file_end)
+		end_index = file_end;
+
+	pages = end_index - start_index + 1;
+
+	page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
+	if (page == NULL)
+		return res;
+
+	/*
+	 * Create a "page actor" which will kmap and kunmap the
+	 * page cache pages appropriately within the decompressor
+	 */
+	actor = squashfs_page_actor_init_special(page, pages, 0);
+	if (actor == NULL)
+		goto out;
+
+	/* Try to grab all the pages covered by the Squashfs block */
+	for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
+		page[i] = (n == target_page->index) ? target_page :
+			grab_cache_page_nowait(target_page->mapping, n);
+
+		if (page[i] == NULL) {
+			missing_pages++;
+			continue;
+		}
+
+		if (PageUptodate(page[i])) {
+			unlock_page(page[i]);
+			page_cache_release(page[i]);
+			page[i] = NULL;
+			missing_pages++;
+		}
+	}
+
+	if (missing_pages) {
+		/*
+		 * Couldn't get one or more pages, this page has either
+		 * been VM reclaimed, but others are still in the page cache
+		 * and uptodate, or we're racing with another thread in
+		 * squashfs_readpage also trying to grab them.  Fall back to
+		 * using an intermediate buffer.
+		 */
+		res = squashfs_read_cache(target_page, block, bsize, pages,
+								page);
+		if (res < 0)
+			goto mark_errored;
+
+		goto out;
+	}
+
+	/* Decompress directly into the page cache buffers */
+	res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
+	if (res < 0)
+		goto mark_errored;
+
+	/* Last page may have trailing bytes not filled */
+	bytes = res % PAGE_CACHE_SIZE;
+	if (bytes) {
+		pageaddr = kmap_atomic(page[pages - 1]);
+		memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
+		kunmap_atomic(pageaddr);
+	}
+
+	/* Mark pages as uptodate, unlock and release */
+	for (i = 0; i < pages; i++) {
+		flush_dcache_page(page[i]);
+		SetPageUptodate(page[i]);
+		unlock_page(page[i]);
+		if (page[i] != target_page)
+			page_cache_release(page[i]);
+	}
+
+	kfree(actor);
+	kfree(page);
+
+	return 0;
+
+mark_errored:
+	/* Decompression failed, mark pages as errored.  Target_page is
+	 * dealt with by the caller
+	 */
+	for (i = 0; i < pages; i++) {
+		if (page[i] == NULL || page[i] == target_page)
+			continue;
+		flush_dcache_page(page[i]);
+		SetPageError(page[i]);
+		unlock_page(page[i]);
+		page_cache_release(page[i]);
+	}
+
+out:
+	kfree(actor);
+	kfree(page);
+	return res;
+}
+
+
+static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
+	int pages, struct page **page)
+{
+	struct inode *i = target_page->mapping->host;
+	struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
+						 block, bsize);
+	int bytes = buffer->length, res = buffer->error, n, offset = 0;
+	void *pageaddr;
+
+	if (res) {
+		ERROR("Unable to read page, block %llx, size %x\n", block,
+			bsize);
+		goto out;
+	}
+
+	for (n = 0; n < pages && bytes > 0; n++,
+			bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
+		int avail = min_t(int, bytes, PAGE_CACHE_SIZE);
+
+		if (page[n] == NULL)
+			continue;
+
+		pageaddr = kmap_atomic(page[n]);
+		squashfs_copy_data(pageaddr, buffer, offset, avail);
+		memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+		kunmap_atomic(pageaddr);
+		flush_dcache_page(page[n]);
+		SetPageUptodate(page[n]);
+		unlock_page(page[n]);
+		if (page[n] != target_page)
+			page_cache_release(page[n]);
+	}
+
+out:
+	squashfs_cache_put(buffer);
+	return res;
+}
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
new file mode 100644
index 0000000..c31e2bc
--- /dev/null
+++ b/fs/squashfs/lz4_wrapper.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2013, 2014
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/lz4.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+#define LZ4_LEGACY	1
+
+struct lz4_comp_opts {
+	__le32 version;
+	__le32 flags;
+};
+
+struct squashfs_lz4 {
+	void *input;
+	void *output;
+};
+
+
+static void *lz4_comp_opts(struct squashfs_sb_info *msblk,
+	void *buff, int len)
+{
+	struct lz4_comp_opts *comp_opts = buff;
+
+	/* LZ4 compressed filesystems always have compression options */
+	if (comp_opts == NULL || len < sizeof(*comp_opts))
+		return ERR_PTR(-EIO);
+
+	if (le32_to_cpu(comp_opts->version) != LZ4_LEGACY) {
+		/* LZ4 format currently used by the kernel is the 'legacy'
+		 * format */
+		ERROR("Unknown LZ4 version\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return NULL;
+}
+
+
+static void *lz4_init(struct squashfs_sb_info *msblk, void *buff)
+{
+	int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+	struct squashfs_lz4 *stream;
+
+	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+	if (stream == NULL)
+		goto failed;
+	stream->input = vmalloc(block_size);
+	if (stream->input == NULL)
+		goto failed2;
+	stream->output = vmalloc(block_size);
+	if (stream->output == NULL)
+		goto failed3;
+
+	return stream;
+
+failed3:
+	vfree(stream->input);
+failed2:
+	kfree(stream);
+failed:
+	ERROR("Failed to initialise LZ4 decompressor\n");
+	return ERR_PTR(-ENOMEM);
+}
+
+
+static void lz4_free(void *strm)
+{
+	struct squashfs_lz4 *stream = strm;
+
+	if (stream) {
+		vfree(stream->input);
+		vfree(stream->output);
+	}
+	kfree(stream);
+}
+
+
+static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
+	struct buffer_head **bh, int b, int offset, int length,
+	struct squashfs_page_actor *output)
+{
+	struct squashfs_lz4 *stream = strm;
+	void *buff = stream->input, *data;
+	int avail, i, bytes = length, res;
+	size_t dest_len = output->length;
+
+	for (i = 0; i < b; i++) {
+		avail = min(bytes, msblk->devblksize - offset);
+		memcpy(buff, bh[i]->b_data + offset, avail);
+		buff += avail;
+		bytes -= avail;
+		offset = 0;
+		put_bh(bh[i]);
+	}
+
+	res = lz4_decompress_unknownoutputsize(stream->input, length,
+					stream->output, &dest_len);
+	if (res)
+		return -EIO;
+
+	bytes = dest_len;
+	data = squashfs_first_page(output);
+	buff = stream->output;
+	while (data) {
+		if (bytes <= PAGE_CACHE_SIZE) {
+			memcpy(data, buff, bytes);
+			break;
+		}
+		memcpy(data, buff, PAGE_CACHE_SIZE);
+		buff += PAGE_CACHE_SIZE;
+		bytes -= PAGE_CACHE_SIZE;
+		data = squashfs_next_page(output);
+	}
+	squashfs_finish_page(output);
+
+	return dest_len;
+}
+
+const struct squashfs_decompressor squashfs_lz4_comp_ops = {
+	.init = lz4_init,
+	.comp_opts = lz4_comp_opts,
+	.free = lz4_free,
+	.decompress = lz4_uncompress,
+	.id = LZ4_COMPRESSION,
+	.name = "lz4",
+	.supported = 1
+};
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 00f4dfc..244b9fb 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -31,13 +31,14 @@
 #include "squashfs_fs_sb.h"
 #include "squashfs.h"
 #include "decompressor.h"
+#include "page_actor.h"
 
 struct squashfs_lzo {
 	void	*input;
 	void	*output;
 };
 
-static void *lzo_init(struct squashfs_sb_info *msblk, void *buff, int len)
+static void *lzo_init(struct squashfs_sb_info *msblk, void *buff)
 {
 	int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
 
@@ -74,22 +75,16 @@
 }
 
 
-static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer,
-	struct buffer_head **bh, int b, int offset, int length, int srclength,
-	int pages)
+static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
+	struct buffer_head **bh, int b, int offset, int length,
+	struct squashfs_page_actor *output)
 {
-	struct squashfs_lzo *stream = msblk->stream;
-	void *buff = stream->input;
+	struct squashfs_lzo *stream = strm;
+	void *buff = stream->input, *data;
 	int avail, i, bytes = length, res;
-	size_t out_len = srclength;
-
-	mutex_lock(&msblk->read_data_mutex);
+	size_t out_len = output->length;
 
 	for (i = 0; i < b; i++) {
-		wait_on_buffer(bh[i]);
-		if (!buffer_uptodate(bh[i]))
-			goto block_release;
-
 		avail = min(bytes, msblk->devblksize - offset);
 		memcpy(buff, bh[i]->b_data + offset, avail);
 		buff += avail;
@@ -104,24 +99,24 @@
 		goto failed;
 
 	res = bytes = (int)out_len;
-	for (i = 0, buff = stream->output; bytes && i < pages; i++) {
-		avail = min_t(int, bytes, PAGE_CACHE_SIZE);
-		memcpy(buffer[i], buff, avail);
-		buff += avail;
-		bytes -= avail;
+	data = squashfs_first_page(output);
+	buff = stream->output;
+	while (data) {
+		if (bytes <= PAGE_CACHE_SIZE) {
+			memcpy(data, buff, bytes);
+			break;
+		} else {
+			memcpy(data, buff, PAGE_CACHE_SIZE);
+			buff += PAGE_CACHE_SIZE;
+			bytes -= PAGE_CACHE_SIZE;
+			data = squashfs_next_page(output);
+		}
 	}
+	squashfs_finish_page(output);
 
-	mutex_unlock(&msblk->read_data_mutex);
 	return res;
 
-block_release:
-	for (; i < b; i++)
-		put_bh(bh[i]);
-
 failed:
-	mutex_unlock(&msblk->read_data_mutex);
-
-	ERROR("lzo decompression failed, data probably corrupt\n");
 	return -EIO;
 }
 
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 7834a51..67cad77 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -79,7 +79,8 @@
 			int len)
 {
 	struct squashfs_sb_info *msblk = sb->s_fs_info;
-	int i, size, length = 0, err;
+	int i, length = 0, err;
+	unsigned int size;
 	struct squashfs_dir_index *index;
 	char *str;
 
@@ -103,6 +104,8 @@
 
 
 		size = le32_to_cpu(index->size) + 1;
+		if (size > SQUASHFS_NAME_LEN)
+			break;
 
 		err = squashfs_read_metadata(sb, index->name, &index_start,
 					&index_offset, size);
@@ -144,7 +147,8 @@
 	struct squashfs_dir_entry *dire;
 	u64 block = squashfs_i(dir)->start + msblk->directory_table;
 	int offset = squashfs_i(dir)->offset;
-	int err, length, dir_count, size;
+	int err, length;
+	unsigned int dir_count, size;
 
 	TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset);
 
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
new file mode 100644
index 0000000..5a1c11f
--- /dev/null
+++ b/fs/squashfs/page_actor.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include "page_actor.h"
+
+/*
+ * This file contains implementations of page_actor for decompressing into
+ * an intermediate buffer, and for decompressing directly into the
+ * page cache.
+ *
+ * Calling code should avoid sleeping between calls to squashfs_first_page()
+ * and squashfs_finish_page().
+ */
+
+/* Implementation of page_actor for decompressing into intermediate buffer */
+static void *cache_first_page(struct squashfs_page_actor *actor)
+{
+	actor->next_page = 1;
+	return actor->buffer[0];
+}
+
+static void *cache_next_page(struct squashfs_page_actor *actor)
+{
+	if (actor->next_page == actor->pages)
+		return NULL;
+
+	return actor->buffer[actor->next_page++];
+}
+
+static void cache_finish_page(struct squashfs_page_actor *actor)
+{
+	/* empty */
+}
+
+struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
+	int pages, int length)
+{
+	struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+
+	if (actor == NULL)
+		return NULL;
+
+	actor->length = length ? : pages * PAGE_CACHE_SIZE;
+	actor->buffer = buffer;
+	actor->pages = pages;
+	actor->next_page = 0;
+	actor->squashfs_first_page = cache_first_page;
+	actor->squashfs_next_page = cache_next_page;
+	actor->squashfs_finish_page = cache_finish_page;
+	return actor;
+}
+
+/* Implementation of page_actor for decompressing directly into page cache. */
+static void *direct_first_page(struct squashfs_page_actor *actor)
+{
+	actor->next_page = 1;
+	return actor->pageaddr = kmap_atomic(actor->page[0]);
+}
+
+static void *direct_next_page(struct squashfs_page_actor *actor)
+{
+	if (actor->pageaddr)
+		kunmap_atomic(actor->pageaddr);
+
+	return actor->pageaddr = actor->next_page == actor->pages ? NULL :
+		kmap_atomic(actor->page[actor->next_page++]);
+}
+
+static void direct_finish_page(struct squashfs_page_actor *actor)
+{
+	if (actor->pageaddr)
+		kunmap_atomic(actor->pageaddr);
+}
+
+struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
+	int pages, int length)
+{
+	struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+
+	if (actor == NULL)
+		return NULL;
+
+	actor->length = length ? : pages * PAGE_CACHE_SIZE;
+	actor->page = page;
+	actor->pages = pages;
+	actor->next_page = 0;
+	actor->pageaddr = NULL;
+	actor->squashfs_first_page = direct_first_page;
+	actor->squashfs_next_page = direct_next_page;
+	actor->squashfs_finish_page = direct_finish_page;
+	return actor;
+}
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
new file mode 100644
index 0000000..26dd820
--- /dev/null
+++ b/fs/squashfs/page_actor.h
@@ -0,0 +1,81 @@
+#ifndef PAGE_ACTOR_H
+#define PAGE_ACTOR_H
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef CONFIG_SQUASHFS_FILE_DIRECT
+struct squashfs_page_actor {
+	void	**page;
+	int	pages;
+	int	length;
+	int	next_page;
+};
+
+static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
+	int pages, int length)
+{
+	struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+
+	if (actor == NULL)
+		return NULL;
+
+	actor->length = length ? : pages * PAGE_CACHE_SIZE;
+	actor->page = page;
+	actor->pages = pages;
+	actor->next_page = 0;
+	return actor;
+}
+
+static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
+{
+	actor->next_page = 1;
+	return actor->page[0];
+}
+
+static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
+{
+	return actor->next_page == actor->pages ? NULL :
+		actor->page[actor->next_page++];
+}
+
+static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
+{
+	/* empty */
+}
+#else
+struct squashfs_page_actor {
+	union {
+		void		**buffer;
+		struct page	**page;
+	};
+	void	*pageaddr;
+	void    *(*squashfs_first_page)(struct squashfs_page_actor *);
+	void    *(*squashfs_next_page)(struct squashfs_page_actor *);
+	void    (*squashfs_finish_page)(struct squashfs_page_actor *);
+	int	pages;
+	int	length;
+	int	next_page;
+};
+
+extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
+extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
+							 **, int, int);
+static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
+{
+	return actor->squashfs_first_page(actor);
+}
+static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
+{
+	return actor->squashfs_next_page(actor);
+}
+static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
+{
+	actor->squashfs_finish_page(actor);
+}
+#endif
+#endif
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index d126651..887d6d2 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -25,11 +25,11 @@
 
 #define ERROR(s, args...)	pr_err("SQUASHFS error: "s, ## args)
 
-#define WARNING(s, args...)	pr_warning("SQUASHFS: "s, ## args)
+#define WARNING(s, args...)	pr_warn("SQUASHFS: "s, ## args)
 
 /* block.c */
-extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *,
-				int, int);
+extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
+				struct squashfs_page_actor *);
 
 /* cache.c */
 extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
@@ -48,7 +48,14 @@
 
 /* decompressor.c */
 extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
-extern void *squashfs_decompressor_init(struct super_block *, unsigned short);
+extern void *squashfs_decompressor_setup(struct super_block *, unsigned short);
+
+/* decompressor_xxx.c */
+extern void *squashfs_decompressor_create(struct squashfs_sb_info *, void *);
+extern void squashfs_decompressor_destroy(struct squashfs_sb_info *);
+extern int squashfs_decompress(struct squashfs_sb_info *, struct buffer_head **,
+	int, int, int, struct squashfs_page_actor *);
+extern int squashfs_max_decompressors(void);
 
 /* export.c */
 extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64,
@@ -59,6 +66,13 @@
 extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
 				u64, u64, unsigned int);
 
+/* file.c */
+void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
+				int);
+
+/* file_xxx.c */
+extern int squashfs_readpage_block(struct page *, u64, int);
+
 /* id.c */
 extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
 extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64,
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 9e2349d..506f4ba 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -87,7 +87,7 @@
 #define SQUASHFS_COMP_OPTS(flags)		SQUASHFS_BIT(flags, \
 						SQUASHFS_COMP_OPT)
 
-/* Max number of types and file types */
+/* Inode types including extended types */
 #define SQUASHFS_DIR_TYPE		1
 #define SQUASHFS_REG_TYPE		2
 #define SQUASHFS_SYMLINK_TYPE		3
@@ -103,6 +103,9 @@
 #define SQUASHFS_LFIFO_TYPE		13
 #define SQUASHFS_LSOCKET_TYPE		14
 
+/* Max type value stored in directory entry */
+#define SQUASHFS_MAX_DIR_TYPE		7
+
 /* Xattr types */
 #define SQUASHFS_XATTR_USER             0
 #define SQUASHFS_XATTR_TRUSTED          1
@@ -237,6 +240,7 @@
 #define LZMA_COMPRESSION	2
 #define LZO_COMPRESSION		3
 #define XZ_COMPRESSION		4
+#define LZ4_COMPRESSION		5
 
 struct squashfs_super_block {
 	__le32			s_magic;
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 52934a2..1da565c 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -50,6 +50,7 @@
 	wait_queue_head_t	wait_queue;
 	struct squashfs_cache	*cache;
 	void			**data;
+	struct squashfs_page_actor	*actor;
 };
 
 struct squashfs_sb_info {
@@ -63,10 +64,9 @@
 	__le64					*id_table;
 	__le64					*fragment_index;
 	__le64					*xattr_id_table;
-	struct mutex				read_data_mutex;
 	struct mutex				meta_index_mutex;
 	struct meta_index			*meta_index;
-	void					*stream;
+	struct squashfs_stream			*stream;
 	__le64					*inode_lookup_table;
 	u64					inode_table;
 	u64					directory_table;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 60553a9..5056bab 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -27,6 +27,8 @@
  * the filesystem.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/fs.h>
 #include <linux/vfs.h>
 #include <linux/slab.h>
@@ -98,7 +100,6 @@
 	msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
 	msblk->devblksize_log2 = ffz(~msblk->devblksize);
 
-	mutex_init(&msblk->read_data_mutex);
 	mutex_init(&msblk->meta_index_mutex);
 
 	/*
@@ -206,13 +207,14 @@
 		goto failed_mount;
 
 	/* Allocate read_page block */
-	msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size);
+	msblk->read_page = squashfs_cache_init("data",
+		squashfs_max_decompressors(), msblk->block_size);
 	if (msblk->read_page == NULL) {
 		ERROR("Failed to allocate read_page block\n");
 		goto failed_mount;
 	}
 
-	msblk->stream = squashfs_decompressor_init(sb, flags);
+	msblk->stream = squashfs_decompressor_setup(sb, flags);
 	if (IS_ERR(msblk->stream)) {
 		err = PTR_ERR(msblk->stream);
 		msblk->stream = NULL;
@@ -336,7 +338,7 @@
 	squashfs_cache_delete(msblk->block_cache);
 	squashfs_cache_delete(msblk->fragment_cache);
 	squashfs_cache_delete(msblk->read_page);
-	squashfs_decompressor_free(msblk, msblk->stream);
+	squashfs_decompressor_destroy(msblk);
 	kfree(msblk->inode_lookup_table);
 	kfree(msblk->fragment_index);
 	kfree(msblk->id_table);
@@ -371,6 +373,7 @@
 
 static int squashfs_remount(struct super_block *sb, int *flags, char *data)
 {
+	sync_filesystem(sb);
 	*flags |= MS_RDONLY;
 	return 0;
 }
@@ -383,7 +386,7 @@
 		squashfs_cache_delete(sbi->block_cache);
 		squashfs_cache_delete(sbi->fragment_cache);
 		squashfs_cache_delete(sbi->read_page);
-		squashfs_decompressor_free(sbi, sbi->stream);
+		squashfs_decompressor_destroy(sbi);
 		kfree(sbi->id_table);
 		kfree(sbi->fragment_index);
 		kfree(sbi->meta_index);
@@ -447,8 +450,7 @@
 		return err;
 	}
 
-	printk(KERN_INFO "squashfs: version 4.0 (2009/01/31) "
-		"Phillip Lougher\n");
+	pr_info("version 4.0 (2009/01/31) Phillip Lougher\n");
 
 	return 0;
 }
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index 1760b7d1..c609624 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -32,44 +32,70 @@
 #include "squashfs_fs_sb.h"
 #include "squashfs.h"
 #include "decompressor.h"
+#include "page_actor.h"
 
 struct squashfs_xz {
 	struct xz_dec *state;
 	struct xz_buf buf;
 };
 
-struct comp_opts {
+struct disk_comp_opts {
 	__le32 dictionary_size;
 	__le32 flags;
 };
 
-static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
-	int len)
+struct comp_opts {
+	int dict_size;
+};
+
+static void *squashfs_xz_comp_opts(struct squashfs_sb_info *msblk,
+	void *buff, int len)
 {
-	struct comp_opts *comp_opts = buff;
-	struct squashfs_xz *stream;
-	int dict_size = msblk->block_size;
-	int err, n;
+	struct disk_comp_opts *comp_opts = buff;
+	struct comp_opts *opts;
+	int err = 0, n;
+
+	opts = kmalloc(sizeof(*opts), GFP_KERNEL);
+	if (opts == NULL) {
+		err = -ENOMEM;
+		goto out2;
+	}
 
 	if (comp_opts) {
 		/* check compressor options are the expected length */
 		if (len < sizeof(*comp_opts)) {
 			err = -EIO;
-			goto failed;
+			goto out;
 		}
 
-		dict_size = le32_to_cpu(comp_opts->dictionary_size);
+		opts->dict_size = le32_to_cpu(comp_opts->dictionary_size);
 
 		/* the dictionary size should be 2^n or 2^n+2^(n+1) */
-		n = ffs(dict_size) - 1;
-		if (dict_size != (1 << n) && dict_size != (1 << n) +
+		n = ffs(opts->dict_size) - 1;
+		if (opts->dict_size != (1 << n) && opts->dict_size != (1 << n) +
 						(1 << (n + 1))) {
 			err = -EIO;
-			goto failed;
+			goto out;
 		}
-	}
+	} else
+		/* use defaults */
+		opts->dict_size = max_t(int, msblk->block_size,
+							SQUASHFS_METADATA_SIZE);
 
-	dict_size = max_t(int, dict_size, SQUASHFS_METADATA_SIZE);
+	return opts;
+
+out:
+	kfree(opts);
+out2:
+	return ERR_PTR(err);
+}
+
+
+static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff)
+{
+	struct comp_opts *comp_opts = buff;
+	struct squashfs_xz *stream;
+	int err;
 
 	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
 	if (stream == NULL) {
@@ -77,7 +103,7 @@
 		goto failed;
 	}
 
-	stream->state = xz_dec_init(XZ_PREALLOC, dict_size);
+	stream->state = xz_dec_init(XZ_PREALLOC, comp_opts->dict_size);
 	if (stream->state == NULL) {
 		kfree(stream);
 		err = -ENOMEM;
@@ -103,42 +129,37 @@
 }
 
 
-static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
-	struct buffer_head **bh, int b, int offset, int length, int srclength,
-	int pages)
+static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
+	struct buffer_head **bh, int b, int offset, int length,
+	struct squashfs_page_actor *output)
 {
 	enum xz_ret xz_err;
-	int avail, total = 0, k = 0, page = 0;
-	struct squashfs_xz *stream = msblk->stream;
-
-	mutex_lock(&msblk->read_data_mutex);
+	int avail, total = 0, k = 0;
+	struct squashfs_xz *stream = strm;
 
 	xz_dec_reset(stream->state);
 	stream->buf.in_pos = 0;
 	stream->buf.in_size = 0;
 	stream->buf.out_pos = 0;
 	stream->buf.out_size = PAGE_CACHE_SIZE;
-	stream->buf.out = buffer[page++];
+	stream->buf.out = squashfs_first_page(output);
 
 	do {
 		if (stream->buf.in_pos == stream->buf.in_size && k < b) {
 			avail = min(length, msblk->devblksize - offset);
 			length -= avail;
-			wait_on_buffer(bh[k]);
-			if (!buffer_uptodate(bh[k]))
-				goto release_mutex;
-
 			stream->buf.in = bh[k]->b_data + offset;
 			stream->buf.in_size = avail;
 			stream->buf.in_pos = 0;
 			offset = 0;
 		}
 
-		if (stream->buf.out_pos == stream->buf.out_size
-							&& page < pages) {
-			stream->buf.out = buffer[page++];
-			stream->buf.out_pos = 0;
-			total += PAGE_CACHE_SIZE;
+		if (stream->buf.out_pos == stream->buf.out_size) {
+			stream->buf.out = squashfs_next_page(output);
+			if (stream->buf.out != NULL) {
+				stream->buf.out_pos = 0;
+				total += PAGE_CACHE_SIZE;
+			}
 		}
 
 		xz_err = xz_dec_run(stream->state, &stream->buf);
@@ -147,23 +168,14 @@
 			put_bh(bh[k++]);
 	} while (xz_err == XZ_OK);
 
-	if (xz_err != XZ_STREAM_END) {
-		ERROR("xz_dec_run error, data probably corrupt\n");
-		goto release_mutex;
-	}
+	squashfs_finish_page(output);
 
-	if (k < b) {
-		ERROR("xz_uncompress error, input remaining\n");
-		goto release_mutex;
-	}
+	if (xz_err != XZ_STREAM_END || k < b)
+		goto out;
 
-	total += stream->buf.out_pos;
-	mutex_unlock(&msblk->read_data_mutex);
-	return total;
+	return total + stream->buf.out_pos;
 
-release_mutex:
-	mutex_unlock(&msblk->read_data_mutex);
-
+out:
 	for (; k < b; k++)
 		put_bh(bh[k]);
 
@@ -172,6 +184,7 @@
 
 const struct squashfs_decompressor squashfs_xz_comp_ops = {
 	.init = squashfs_xz_init,
+	.comp_opts = squashfs_xz_comp_opts,
 	.free = squashfs_xz_free,
 	.decompress = squashfs_xz_uncompress,
 	.id = XZ_COMPRESSION,
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 55d918f..8727cab 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -32,8 +32,9 @@
 #include "squashfs_fs_sb.h"
 #include "squashfs.h"
 #include "decompressor.h"
+#include "page_actor.h"
 
-static void *zlib_init(struct squashfs_sb_info *dummy, void *buff, int len)
+static void *zlib_init(struct squashfs_sb_info *dummy, void *buff)
 {
 	z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
 	if (stream == NULL)
@@ -61,44 +62,37 @@
 }
 
 
-static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
-	struct buffer_head **bh, int b, int offset, int length, int srclength,
-	int pages)
+static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
+	struct buffer_head **bh, int b, int offset, int length,
+	struct squashfs_page_actor *output)
 {
-	int zlib_err, zlib_init = 0;
-	int k = 0, page = 0;
-	z_stream *stream = msblk->stream;
+	int zlib_err, zlib_init = 0, k = 0;
+	z_stream *stream = strm;
 
-	mutex_lock(&msblk->read_data_mutex);
-
-	stream->avail_out = 0;
+	stream->avail_out = PAGE_CACHE_SIZE;
+	stream->next_out = squashfs_first_page(output);
 	stream->avail_in = 0;
 
 	do {
 		if (stream->avail_in == 0 && k < b) {
 			int avail = min(length, msblk->devblksize - offset);
 			length -= avail;
-			wait_on_buffer(bh[k]);
-			if (!buffer_uptodate(bh[k]))
-				goto release_mutex;
-
 			stream->next_in = bh[k]->b_data + offset;
 			stream->avail_in = avail;
 			offset = 0;
 		}
 
-		if (stream->avail_out == 0 && page < pages) {
-			stream->next_out = buffer[page++];
-			stream->avail_out = PAGE_CACHE_SIZE;
+		if (stream->avail_out == 0) {
+			stream->next_out = squashfs_next_page(output);
+			if (stream->next_out != NULL)
+				stream->avail_out = PAGE_CACHE_SIZE;
 		}
 
 		if (!zlib_init) {
 			zlib_err = zlib_inflateInit(stream);
 			if (zlib_err != Z_OK) {
-				ERROR("zlib_inflateInit returned unexpected "
-					"result 0x%x, srclength %d\n",
-					zlib_err, srclength);
-				goto release_mutex;
+				squashfs_finish_page(output);
+				goto out;
 			}
 			zlib_init = 1;
 		}
@@ -109,29 +103,21 @@
 			put_bh(bh[k++]);
 	} while (zlib_err == Z_OK);
 
-	if (zlib_err != Z_STREAM_END) {
-		ERROR("zlib_inflate error, data probably corrupt\n");
-		goto release_mutex;
-	}
+	squashfs_finish_page(output);
+
+	if (zlib_err != Z_STREAM_END)
+		goto out;
 
 	zlib_err = zlib_inflateEnd(stream);
-	if (zlib_err != Z_OK) {
-		ERROR("zlib_inflate error, data probably corrupt\n");
-		goto release_mutex;
-	}
+	if (zlib_err != Z_OK)
+		goto out;
 
-	if (k < b) {
-		ERROR("zlib_uncompress error, data remaining\n");
-		goto release_mutex;
-	}
+	if (k < b)
+		goto out;
 
-	length = stream->total_out;
-	mutex_unlock(&msblk->read_data_mutex);
-	return length;
+	return stream->total_out;
 
-release_mutex:
-	mutex_unlock(&msblk->read_data_mutex);
-
+out:
 	for (; k < b; k++)
 		put_bh(bh[k]);
 
diff --git a/fs/super.c b/fs/super.c
index fd3281d..4b56bac 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -213,6 +213,7 @@
 err_out:
 	security_sb_free(s);
 	destroy_sb_writers(s);
+out_free_sb:
 	kfree(s);
 	s = NULL;
 	goto out;
@@ -396,6 +397,11 @@
 
 		evict_inodes(sb);
 
+		if (sb->s_dio_done_wq) {
+			destroy_workqueue(sb->s_dio_done_wq);
+			sb->s_dio_done_wq = NULL;
+		}
+
 		if (sop->put_super)
 			sop->put_super(sb);
 
@@ -753,7 +759,7 @@
 	struct super_block *sb, *p = NULL;
 
 	spin_lock(&sb_lock);
-	list_for_each_entry(sb, &super_blocks, s_list) {
+	list_for_each_entry_reverse(sb, &super_blocks, s_list) {
 		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		sb->s_count++;
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 7b3792e..2829b66 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -60,6 +60,7 @@
 {
 	struct sysv_sb_info *sbi = SYSV_SB(sb);
 
+	sync_filesystem(sb);
 	if (sbi->s_forced_ro)
 		*flags |= MS_RDONLY;
 	return 0;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 32b644f..0013142 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/alarmtimer.h>
 #include <linux/file.h>
 #include <linux/poll.h>
 #include <linux/init.h>
@@ -26,7 +27,10 @@
 #include <linux/rcupdate.h>
 
 struct timerfd_ctx {
-	struct hrtimer tmr;
+	union {
+		struct hrtimer tmr;
+		struct alarm alarm;
+	} t;
 	ktime_t tintv;
 	ktime_t moffs;
 	wait_queue_head_t wqh;
@@ -41,14 +45,19 @@
 static LIST_HEAD(cancel_list);
 static DEFINE_SPINLOCK(cancel_lock);
 
+static inline bool isalarm(struct timerfd_ctx *ctx)
+{
+	return ctx->clockid == CLOCK_REALTIME_ALARM ||
+		ctx->clockid == CLOCK_BOOTTIME_ALARM;
+}
+
 /*
  * This gets called when the timer event triggers. We set the "expired"
  * flag, but we do not re-arm the timer (in case it's necessary,
  * tintv.tv64 != 0) until the timer is accessed.
  */
-static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
+static void timerfd_triggered(struct timerfd_ctx *ctx)
 {
-	struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, tmr);
 	unsigned long flags;
 
 	spin_lock_irqsave(&ctx->wqh.lock, flags);
@@ -56,10 +65,25 @@
 	ctx->ticks++;
 	wake_up_locked(&ctx->wqh);
 	spin_unlock_irqrestore(&ctx->wqh.lock, flags);
+}
 
+static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
+{
+	struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx,
+					       t.tmr);
+	timerfd_triggered(ctx);
 	return HRTIMER_NORESTART;
 }
 
+static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm,
+	ktime_t now)
+{
+	struct timerfd_ctx *ctx = container_of(alarm, struct timerfd_ctx,
+					       t.alarm);
+	timerfd_triggered(ctx);
+	return ALARMTIMER_NORESTART;
+}
+
 /*
  * Called when the clock was set to cancel the timers in the cancel
  * list. This will wake up processes waiting on these timers. The
@@ -107,8 +131,9 @@
 
 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
 {
-	if (ctx->clockid == CLOCK_REALTIME && (flags & TFD_TIMER_ABSTIME) &&
-	    (flags & TFD_TIMER_CANCEL_ON_SET)) {
+	if ((ctx->clockid == CLOCK_REALTIME ||
+	     ctx->clockid == CLOCK_REALTIME_ALARM) &&
+	    (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
 		if (!ctx->might_cancel) {
 			ctx->might_cancel = true;
 			spin_lock(&cancel_lock);
@@ -124,7 +149,11 @@
 {
 	ktime_t remaining;
 
-	remaining = hrtimer_expires_remaining(&ctx->tmr);
+	if (isalarm(ctx))
+		remaining = alarm_expires_remaining(&ctx->t.alarm);
+	else
+		remaining = hrtimer_expires_remaining(&ctx->t.tmr);
+
 	return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
 }
 
@@ -142,11 +171,28 @@
 	ctx->expired = 0;
 	ctx->ticks = 0;
 	ctx->tintv = timespec_to_ktime(ktmr->it_interval);
-	hrtimer_init(&ctx->tmr, clockid, htmode);
-	hrtimer_set_expires(&ctx->tmr, texp);
-	ctx->tmr.function = timerfd_tmrproc;
+
+	if (isalarm(ctx)) {
+		alarm_init(&ctx->t.alarm,
+			   ctx->clockid == CLOCK_REALTIME_ALARM ?
+			   ALARM_REALTIME : ALARM_BOOTTIME,
+			   timerfd_alarmproc);
+	} else {
+		hrtimer_init(&ctx->t.tmr, clockid, htmode);
+		hrtimer_set_expires(&ctx->t.tmr, texp);
+		ctx->t.tmr.function = timerfd_tmrproc;
+	}
+
 	if (texp.tv64 != 0) {
-		hrtimer_start(&ctx->tmr, texp, htmode);
+		if (isalarm(ctx)) {
+			if (flags & TFD_TIMER_ABSTIME)
+				alarm_start(&ctx->t.alarm, texp);
+			else
+				alarm_start_relative(&ctx->t.alarm, texp);
+		} else {
+			hrtimer_start(&ctx->t.tmr, texp, htmode);
+		}
+
 		if (timerfd_canceled(ctx))
 			return -ECANCELED;
 	}
@@ -158,7 +204,11 @@
 	struct timerfd_ctx *ctx = file->private_data;
 
 	timerfd_remove_cancel(ctx);
-	hrtimer_cancel(&ctx->tmr);
+
+	if (isalarm(ctx))
+		alarm_cancel(&ctx->t.alarm);
+	else
+		hrtimer_cancel(&ctx->t.tmr);
 	kfree_rcu(ctx, rcu);
 	return 0;
 }
@@ -215,9 +265,15 @@
 			 * callback to avoid DoS attacks specifying a very
 			 * short timer period.
 			 */
-			ticks += hrtimer_forward_now(&ctx->tmr,
-						     ctx->tintv) - 1;
-			hrtimer_restart(&ctx->tmr);
+			if (isalarm(ctx)) {
+				ticks += alarm_forward_now(
+					&ctx->t.alarm, ctx->tintv) - 1;
+				alarm_restart(&ctx->t.alarm);
+			} else {
+				ticks += hrtimer_forward_now(&ctx->t.tmr,
+							     ctx->tintv) - 1;
+				hrtimer_restart(&ctx->t.tmr);
+			}
 		}
 		ctx->expired = 0;
 		ctx->ticks = 0;
@@ -259,7 +315,10 @@
 
 	if ((flags & ~TFD_CREATE_FLAGS) ||
 	    (clockid != CLOCK_MONOTONIC &&
-	     clockid != CLOCK_REALTIME))
+	     clockid != CLOCK_REALTIME &&
+	     clockid != CLOCK_REALTIME_ALARM &&
+	     clockid != CLOCK_BOOTTIME &&
+	     clockid != CLOCK_BOOTTIME_ALARM))
 		return -EINVAL;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -268,7 +327,15 @@
 
 	init_waitqueue_head(&ctx->wqh);
 	ctx->clockid = clockid;
-	hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
+
+	if (isalarm(ctx))
+		alarm_init(&ctx->t.alarm,
+			   ctx->clockid == CLOCK_REALTIME_ALARM ?
+			   ALARM_REALTIME : ALARM_BOOTTIME,
+			   timerfd_alarmproc);
+	else
+		hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
+
 	ctx->moffs = ktime_get_monotonic_offset();
 
 	ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
@@ -305,8 +372,14 @@
 	 */
 	for (;;) {
 		spin_lock_irq(&ctx->wqh.lock);
-		if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
-			break;
+
+		if (isalarm(ctx)) {
+			if (alarm_try_to_cancel(&ctx->t.alarm) >= 0)
+				break;
+		} else {
+			if (hrtimer_try_to_cancel(&ctx->t.tmr) >= 0)
+				break;
+		}
 		spin_unlock_irq(&ctx->wqh.lock);
 		cpu_relax();
 	}
@@ -317,8 +390,12 @@
 	 * We do not update "ticks" and "expired" since the timer will be
 	 * re-programmed again in the following timerfd_setup() call.
 	 */
-	if (ctx->expired && ctx->tintv.tv64)
-		hrtimer_forward_now(&ctx->tmr, ctx->tintv);
+	if (ctx->expired && ctx->tintv.tv64) {
+		if (isalarm(ctx))
+			alarm_forward_now(&ctx->t.alarm, ctx->tintv);
+		else
+			hrtimer_forward_now(&ctx->t.tmr, ctx->tintv);
+	}
 
 	old->it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
 	old->it_interval = ktime_to_timespec(ctx->tintv);
@@ -345,9 +422,18 @@
 	spin_lock_irq(&ctx->wqh.lock);
 	if (ctx->expired && ctx->tintv.tv64) {
 		ctx->expired = 0;
-		ctx->ticks +=
-			hrtimer_forward_now(&ctx->tmr, ctx->tintv) - 1;
-		hrtimer_restart(&ctx->tmr);
+
+		if (isalarm(ctx)) {
+			ctx->ticks +=
+				alarm_forward_now(
+					&ctx->t.alarm, ctx->tintv) - 1;
+			alarm_restart(&ctx->t.alarm);
+		} else {
+			ctx->ticks +=
+				hrtimer_forward_now(&ctx->t.tmr, ctx->tintv)
+				- 1;
+			hrtimer_restart(&ctx->t.tmr);
+		}
 	}
 	t->it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
 	t->it_interval = ktime_to_timespec(ctx->tintv);
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index a335e4e..2c3aa9d 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1278,13 +1278,14 @@
 	return err;
 }
 
-static void ubifs_invalidatepage(struct page *page, unsigned long offset)
+static void ubifs_invalidatepage(struct page *page, unsigned int offset,
+				 unsigned int length)
 {
 	struct inode *inode = page->mapping->host;
 	struct ubifs_info *c = inode->i_sb->s_fs_info;
 
 	ubifs_assert(PagePrivate(page));
-	if (offset)
+	if (offset || length < PAGE_CACHE_SIZE)
 		/* Partial page remains dirty */
 		return;
 
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 05115d7..3bf098a 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1840,6 +1840,7 @@
 	int err;
 	struct ubifs_info *c = sb->s_fs_info;
 
+	sync_filesystem(sb);
 	dbg_gen("old flags %#lx, new flags %#x", sb->s_flags, *flags);
 
 	err = ubifs_parse_options(c, data, 1);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 839a2ba..32f5297 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -629,6 +629,7 @@
 	struct udf_options uopt;
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	int error = 0;
+	sync_filesystem(sb);
 
 	if (sbi->s_lvid_bh) {
 		int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 329f2f5..b8c6791 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1280,6 +1280,7 @@
 	unsigned new_mount_opt, ufstype;
 	unsigned flags;
 
+	sync_filesystem(sb);
 	lock_ufs(sb);
 	mutex_lock(&UFS_SB(sb)->s_lock);
 	uspi = UFS_SB(sb)->s_uspi;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index cfbb4c1..923996c 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -86,14 +86,6 @@
 		bh->b_end_io(bh, !ioend->io_error);
 	}
 
-	if (ioend->io_iocb) {
-		inode_dio_done(ioend->io_inode);
-		if (ioend->io_isasync) {
-			aio_complete(ioend->io_iocb, ioend->io_error ?
-					ioend->io_error : ioend->io_result, 0);
-		}
-	}
-
 	mempool_free(ioend, xfs_ioend_pool);
 }
 
@@ -281,7 +273,6 @@
 	 * all the I/O from calling the completion routine too early.
 	 */
 	atomic_set(&ioend->io_remaining, 1);
-	ioend->io_isasync = 0;
 	ioend->io_isdirect = 0;
 	ioend->io_error = 0;
 	ioend->io_list = NULL;
@@ -291,8 +282,6 @@
 	ioend->io_buffer_tail = NULL;
 	ioend->io_offset = 0;
 	ioend->io_size = 0;
-	ioend->io_iocb = NULL;
-	ioend->io_result = 0;
 	ioend->io_append_trans = NULL;
 
 	INIT_WORK(&ioend->io_work, xfs_end_io);
@@ -843,10 +832,11 @@
 STATIC void
 xfs_vm_invalidatepage(
 	struct page		*page,
-	unsigned long		offset)
+	unsigned int		offset,
+	unsigned int		length)
 {
 	trace_xfs_invalidatepage(page->mapping->host, page, offset);
-	block_invalidatepage(page, offset);
+	block_invalidatepage(page, offset, PAGE_CACHE_SIZE - offset);
 }
 
 /*
@@ -910,7 +900,7 @@
 
 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out_invalidate:
-	xfs_vm_invalidatepage(page, 0);
+	xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
 	return;
 }
 
@@ -1290,8 +1280,10 @@
 		if (create || !ISUNWRITTEN(&imap))
 			xfs_map_buffer(inode, bh_result, &imap, offset);
 		if (create && ISUNWRITTEN(&imap)) {
-			if (direct)
+			if (direct) {
 				bh_result->b_private = inode;
+				set_buffer_defer_completion(bh_result);
+			}
 			set_buffer_unwritten(bh_result);
 		}
 	}
@@ -1388,9 +1380,7 @@
 	struct kiocb		*iocb,
 	loff_t			offset,
 	ssize_t			size,
-	void			*private,
-	int			ret,
-	bool			is_async)
+	void			*private)
 {
 	struct xfs_ioend	*ioend = iocb->private;
 
@@ -1412,17 +1402,10 @@
 
 	ioend->io_offset = offset;
 	ioend->io_size = size;
-	ioend->io_iocb = iocb;
-	ioend->io_result = ret;
 	if (private && size > 0)
 		ioend->io_type = XFS_IO_UNWRITTEN;
 
-	if (is_async) {
-		ioend->io_isasync = 1;
-		xfs_finish_ioend(ioend);
-	} else {
-		xfs_finish_ioend_sync(ioend);
-	}
+	xfs_finish_ioend_sync(ioend);
 }
 
 STATIC ssize_t
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index c325abb..f94dd45 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -45,7 +45,6 @@
 	unsigned int		io_type;	/* delalloc / unwritten */
 	int			io_error;	/* I/O error code */
 	atomic_t		io_remaining;	/* hold count */
-	unsigned int		io_isasync : 1;	/* needs aio_complete */
 	unsigned int		io_isdirect : 1;/* direct I/O */
 	struct inode		*io_inode;	/* file being written to */
 	struct buffer_head	*io_buffer_head;/* buffer linked list head */
@@ -54,8 +53,6 @@
 	xfs_off_t		io_offset;	/* offset in the file */
 	struct work_struct	io_work;	/* xfsdatad work queue */
 	struct xfs_trans	*io_append_trans;/* xact. for size update */
-	struct kiocb		*io_iocb;
-	int			io_result;
 } xfs_ioend_t;
 
 extern const struct address_space_operations xfs_address_space_operations;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 3033ba5..478c0ad 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1218,6 +1218,7 @@
 	char			*p;
 	int			error;
 
+	sync_filesystem(sb);
 	while ((p = strsep(&options, ",")) != NULL) {
 		int token;
 
diff --git a/fs/yaffs2/Kconfig b/fs/yaffs2/Kconfig
new file mode 100644
index 0000000..408570f
--- /dev/null
+++ b/fs/yaffs2/Kconfig
@@ -0,0 +1,171 @@
+#
+# yaffs file system configurations
+#
+
+config YAFFS_FS
+	tristate "yaffs2 file system support"
+	default n
+	depends on MTD_BLOCK
+	select YAFFS_YAFFS1
+	select YAFFS_YAFFS2
+	help
+	  yaffs2, or Yet Another Flash File System, is a file system
+	  optimised for NAND Flash chips.
+
+	  To compile the yaffs2 file system support as a module, choose M
+	  here: the module will be called yaffs2.
+
+	  If unsure, say N.
+
+	  Further information on yaffs2 is available at
+	  <http://www.aleph1.co.uk/yaffs/>.
+
+config YAFFS_YAFFS1
+	bool "512 byte / page devices"
+	depends on YAFFS_FS
+	default y
+	help
+	  Enable yaffs1 support -- yaffs for 512 byte / page devices
+
+	  Not needed for 2K-page devices.
+
+	  If unsure, say Y.
+
+config YAFFS_9BYTE_TAGS
+	bool "Use older-style on-NAND data format with pageStatus byte"
+	depends on YAFFS_YAFFS1
+	default n
+	help
+
+	  Older-style on-NAND data format has a "pageStatus" byte to record
+	  chunk/page state.  This byte is zero when the page is discarded.
+	  Choose this option if you have existing on-NAND data using this
+	  format that you need to continue to support.  New data written
+	  also uses the older-style format.  Note: Use of this option
+	  generally requires that MTD's oob layout be adjusted to use the
+	  older-style format.  See notes on tags formats and MTD versions
+	  in yaffs_mtdif1.c.
+
+	  If unsure, say N.
+
+config YAFFS_DOES_ECC
+	bool "Lets yaffs do its own ECC"
+	depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
+	default n
+	help
+	  This enables yaffs to use its own ECC functions instead of using
+	  the ones from the generic MTD-NAND driver.
+
+	  If unsure, say N.
+
+config YAFFS_ECC_WRONG_ORDER
+	bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
+	depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
+	default n
+	help
+	  This makes yaffs_ecc.c use the same ecc byte order as Steven
+	  Hill's nand_ecc.c. If not set, then you get the same ecc byte
+	  order as SmartMedia.
+
+	  If unsure, say N.
+
+config YAFFS_YAFFS2
+	bool "2048 byte (or larger) / page devices"
+	depends on YAFFS_FS
+	default y
+	help
+	  Enable yaffs2 support -- yaffs for >= 2K bytes per page devices
+
+	  If unsure, say Y.
+
+config YAFFS_AUTO_YAFFS2
+	bool "Autoselect yaffs2 format"
+	depends on YAFFS_YAFFS2
+	default y
+	help
+	  Without this, you need to explicitely use yaffs2 as the file
+	  system type. With this, you can say "yaffs" and yaffs or yaffs2
+	  will be used depending on the device page size (yaffs on
+	  512-byte page devices, yaffs2 on 2K page devices).
+
+	  If unsure, say Y.
+
+config YAFFS_DISABLE_TAGS_ECC
+	bool "Disable yaffs from doing ECC on tags by default"
+	depends on YAFFS_FS && YAFFS_YAFFS2
+	default n
+	help
+	  This defaults yaffs to using its own ECC calculations on tags instead of
+	  just relying on the MTD.
+	  This behavior can also be overridden with tags_ecc_on and
+	  tags_ecc_off mount options.
+
+	  If unsure, say N.
+
+config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+	bool "Force chunk erase check"
+	depends on YAFFS_FS
+	default n
+	help
+          Normally yaffs only checks chunks before writing until an erased
+	  chunk is found. This helps to detect any partially written
+	  chunks that might have happened due to power loss.
+
+	  Enabling this forces on the test that chunks are erased in flash
+	  before writing to them. This takes more time but is potentially
+	  a bit more secure.
+
+	  Suggest setting Y during development and ironing out driver
+	  issues etc. Suggest setting to N if you want faster writing.
+
+	  If unsure, say Y.
+
+config YAFFS_EMPTY_LOST_AND_FOUND
+	bool "Empty lost and found on boot"
+	depends on YAFFS_FS
+	default n
+	help
+	  If this is enabled then the contents of lost and found is
+	  automatically dumped at mount.
+
+	  If unsure, say N.
+
+config YAFFS_DISABLE_BLOCK_REFRESHING
+	bool "Disable yaffs2 block refreshing"
+	depends on YAFFS_FS
+	default n
+	help
+	 If this is set, then block refreshing is disabled.
+	 Block refreshing infrequently refreshes the oldest block in
+	 a yaffs2 file system. This mechanism helps to refresh flash to
+	 mitigate against data loss. This is particularly useful for MLC.
+
+	  If unsure, say N.
+
+config YAFFS_DISABLE_BACKGROUND
+	bool "Disable yaffs2 background processing"
+	depends on YAFFS_FS
+	default n
+	help
+	 If this is set, then background processing is disabled.
+	 Background processing makes many foreground activities faster.
+
+	 If unsure, say N.
+
+config YAFFS_DISABLE_BAD_BLOCK_MARKING
+	bool "Disable yaffs2 bad block marking"
+	depends on YAFFS_FS
+	default n
+	help
+	 Useful during early flash bring up to prevent problems causing
+	 lots of bad block marking.
+
+	 If unsure, say N.
+
+config YAFFS_XATTR
+	bool "Enable yaffs2 xattr support"
+	depends on YAFFS_FS
+	default y
+	help
+	 If this is set then yaffs2 will provide xattr support.
+	 If unsure, say Y.
diff --git a/fs/yaffs2/Makefile b/fs/yaffs2/Makefile
new file mode 100644
index 0000000..f9a9fb1
--- /dev/null
+++ b/fs/yaffs2/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the linux YAFFS filesystem routines.
+#
+
+obj-$(CONFIG_YAFFS_FS) += yaffs.o
+
+yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
+yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
+yaffs-y += yaffs_tagscompat.o yaffs_tagsmarshall.o
+yaffs-y += yaffs_mtdif.o
+yaffs-y += yaffs_nameval.o yaffs_attribs.o
+yaffs-y += yaffs_allocator.o
+yaffs-y += yaffs_yaffs1.o
+yaffs-y += yaffs_yaffs2.o
+yaffs-y += yaffs_bitmap.o
+yaffs-y += yaffs_summary.o
+yaffs-y += yaffs_verify.o
+
diff --git a/fs/yaffs2/yaffs_allocator.c b/fs/yaffs2/yaffs_allocator.c
new file mode 100644
index 0000000..c8f2861
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.c
@@ -0,0 +1,357 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_allocator.h"
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yportenv.h"
+
+/*
+ * Each entry in yaffs_tnode_list and yaffs_obj_list hold blocks
+ * of approx 100 objects that are themn allocated singly.
+ * This is basically a simplified slab allocator.
+ *
+ * We don't use the Linux slab allocator because slab does not allow
+ * us to dump all the objects in one hit when we do a umount and tear
+ * down  all the tnodes and objects. slab requires that we first free
+ * the individual objects.
+ *
+ * Once yaffs has been mainlined I shall try to motivate for a change
+ * to slab to provide the extra features we need here.
+ */
+
+struct yaffs_tnode_list {
+	struct yaffs_tnode_list *next;
+	struct yaffs_tnode *tnodes;
+};
+
+struct yaffs_obj_list {
+	struct yaffs_obj_list *next;
+	struct yaffs_obj *objects;
+};
+
+struct yaffs_allocator {
+	int n_tnodes_created;
+	struct yaffs_tnode *free_tnodes;
+	int n_free_tnodes;
+	struct yaffs_tnode_list *alloc_tnode_list;
+
+	int n_obj_created;
+	struct list_head free_objs;
+	int n_free_objects;
+
+	struct yaffs_obj_list *allocated_obj_list;
+};
+
+static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator =
+	    (struct yaffs_allocator *)dev->allocator;
+	struct yaffs_tnode_list *tmp;
+
+	if (!allocator) {
+		BUG();
+		return;
+	}
+
+	while (allocator->alloc_tnode_list) {
+		tmp = allocator->alloc_tnode_list->next;
+
+		kfree(allocator->alloc_tnode_list->tnodes);
+		kfree(allocator->alloc_tnode_list);
+		allocator->alloc_tnode_list = tmp;
+	}
+
+	allocator->free_tnodes = NULL;
+	allocator->n_free_tnodes = 0;
+	allocator->n_tnodes_created = 0;
+}
+
+static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (!allocator) {
+		BUG();
+		return;
+	}
+
+	allocator->alloc_tnode_list = NULL;
+	allocator->free_tnodes = NULL;
+	allocator->n_free_tnodes = 0;
+	allocator->n_tnodes_created = 0;
+}
+
+static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
+{
+	struct yaffs_allocator *allocator =
+	    (struct yaffs_allocator *)dev->allocator;
+	int i;
+	struct yaffs_tnode *new_tnodes;
+	u8 *mem;
+	struct yaffs_tnode *curr;
+	struct yaffs_tnode *next;
+	struct yaffs_tnode_list *tnl;
+
+	if (!allocator) {
+		BUG();
+		return YAFFS_FAIL;
+	}
+
+	if (n_tnodes < 1)
+		return YAFFS_OK;
+
+	/* make these things */
+	new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
+	mem = (u8 *) new_tnodes;
+
+	if (!new_tnodes) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"yaffs: Could not allocate Tnodes");
+		return YAFFS_FAIL;
+	}
+
+	/* New hookup for wide tnodes */
+	for (i = 0; i < n_tnodes - 1; i++) {
+		curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
+		next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
+		curr->internal[0] = next;
+	}
+
+	curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
+	curr->internal[0] = allocator->free_tnodes;
+	allocator->free_tnodes = (struct yaffs_tnode *)mem;
+
+	allocator->n_free_tnodes += n_tnodes;
+	allocator->n_tnodes_created += n_tnodes;
+
+	/* Now add this bunch of tnodes to a list for freeing up.
+	 * NB If we can't add this to the management list it isn't fatal
+	 * but it just means we can't free this bunch of tnodes later.
+	 */
+	tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
+	if (!tnl) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"Could not add tnodes to management list");
+		return YAFFS_FAIL;
+	} else {
+		tnl->tnodes = new_tnodes;
+		tnl->next = allocator->alloc_tnode_list;
+		allocator->alloc_tnode_list = tnl;
+	}
+
+	yaffs_trace(YAFFS_TRACE_ALLOCATE, "Tnodes added");
+
+	return YAFFS_OK;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator =
+	    (struct yaffs_allocator *)dev->allocator;
+	struct yaffs_tnode *tn = NULL;
+
+	if (!allocator) {
+		BUG();
+		return NULL;
+	}
+
+	/* If there are none left make more */
+	if (!allocator->free_tnodes)
+		yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
+
+	if (allocator->free_tnodes) {
+		tn = allocator->free_tnodes;
+		allocator->free_tnodes = allocator->free_tnodes->internal[0];
+		allocator->n_free_tnodes--;
+	}
+
+	return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (!allocator) {
+		BUG();
+		return;
+	}
+
+	if (tn) {
+		tn->internal[0] = allocator->free_tnodes;
+		allocator->free_tnodes = tn;
+		allocator->n_free_tnodes++;
+	}
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+}
+
+/*--------------- yaffs_obj alloaction ------------------------
+ *
+ * Free yaffs_objs are stored in a list using obj->siblings.
+ * The blocks of allocated objects are stored in a linked list.
+ */
+
+static void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (!allocator) {
+		BUG();
+		return;
+	}
+
+	allocator->allocated_obj_list = NULL;
+	INIT_LIST_HEAD(&allocator->free_objs);
+	allocator->n_free_objects = 0;
+}
+
+static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+	struct yaffs_obj_list *tmp;
+
+	if (!allocator) {
+		BUG();
+		return;
+	}
+
+	while (allocator->allocated_obj_list) {
+		tmp = allocator->allocated_obj_list->next;
+		kfree(allocator->allocated_obj_list->objects);
+		kfree(allocator->allocated_obj_list);
+		allocator->allocated_obj_list = tmp;
+	}
+
+	INIT_LIST_HEAD(&allocator->free_objs);
+	allocator->n_free_objects = 0;
+	allocator->n_obj_created = 0;
+}
+
+static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
+{
+	struct yaffs_allocator *allocator = dev->allocator;
+	int i;
+	struct yaffs_obj *new_objs;
+	struct yaffs_obj_list *list;
+
+	if (!allocator) {
+		BUG();
+		return YAFFS_FAIL;
+	}
+
+	if (n_obj < 1)
+		return YAFFS_OK;
+
+	/* make these things */
+	new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
+	list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
+
+	if (!new_objs || !list) {
+		kfree(new_objs);
+		new_objs = NULL;
+		kfree(list);
+		list = NULL;
+		yaffs_trace(YAFFS_TRACE_ALLOCATE,
+			"Could not allocate more objects");
+		return YAFFS_FAIL;
+	}
+
+	/* Hook them into the free list */
+	for (i = 0; i < n_obj; i++)
+		list_add(&new_objs[i].siblings, &allocator->free_objs);
+
+	allocator->n_free_objects += n_obj;
+	allocator->n_obj_created += n_obj;
+
+	/* Now add this bunch of Objects to a list for freeing up. */
+
+	list->objects = new_objs;
+	list->next = allocator->allocated_obj_list;
+	allocator->allocated_obj_list = list;
+
+	return YAFFS_OK;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj = NULL;
+	struct list_head *lh;
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (!allocator) {
+		BUG();
+		return obj;
+	}
+
+	/* If there are none left make more */
+	if (list_empty(&allocator->free_objs))
+		yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
+
+	if (!list_empty(&allocator->free_objs)) {
+		lh = allocator->free_objs.next;
+		obj = list_entry(lh, struct yaffs_obj, siblings);
+		list_del_init(lh);
+		allocator->n_free_objects--;
+	}
+
+	return obj;
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+	struct yaffs_allocator *allocator = dev->allocator;
+
+	if (!allocator) {
+		BUG();
+		return;
+	}
+
+	/* Link into the free list. */
+	list_add(&obj->siblings, &allocator->free_objs);
+	allocator->n_free_objects++;
+}
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+
+	if (!dev->allocator) {
+		BUG();
+		return;
+	}
+
+	yaffs_deinit_raw_tnodes(dev);
+	yaffs_deinit_raw_objs(dev);
+	kfree(dev->allocator);
+	dev->allocator = NULL;
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_allocator *allocator;
+
+	if (dev->allocator) {
+		BUG();
+		return;
+	}
+
+	allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
+	if (allocator) {
+		dev->allocator = allocator;
+		yaffs_init_raw_tnodes(dev);
+		yaffs_init_raw_objs(dev);
+	}
+}
+
diff --git a/fs/yaffs2/yaffs_allocator.h b/fs/yaffs2/yaffs_allocator.h
new file mode 100644
index 0000000..a8cc322
--- /dev/null
+++ b/fs/yaffs2/yaffs_allocator.h
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ALLOCATOR_H__
+#define __YAFFS_ALLOCATOR_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
+
+#endif
diff --git a/fs/yaffs2/yaffs_attribs.c b/fs/yaffs2/yaffs_attribs.c
new file mode 100644
index 0000000..fdc7f21
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.c
@@ -0,0 +1,124 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
+{
+	obj->yst_uid = oh->yst_uid;
+	obj->yst_gid = oh->yst_gid;
+	obj->yst_atime = oh->yst_atime;
+	obj->yst_mtime = oh->yst_mtime;
+	obj->yst_ctime = oh->yst_ctime;
+	obj->yst_rdev = oh->yst_rdev;
+}
+
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
+{
+	oh->yst_uid = obj->yst_uid;
+	oh->yst_gid = obj->yst_gid;
+	oh->yst_atime = obj->yst_atime;
+	oh->yst_mtime = obj->yst_mtime;
+	oh->yst_ctime = obj->yst_ctime;
+	oh->yst_rdev = obj->yst_rdev;
+
+}
+
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
+{
+	obj->yst_mtime = Y_CURRENT_TIME;
+	if (do_a)
+		obj->yst_atime = obj->yst_mtime;
+	if (do_c)
+		obj->yst_ctime = obj->yst_mtime;
+}
+
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
+{
+	yaffs_load_current_time(obj, 1, 1);
+	obj->yst_rdev = rdev;
+	obj->yst_uid = uid;
+	obj->yst_gid = gid;
+}
+
+static loff_t yaffs_get_file_size(struct yaffs_obj *obj)
+{
+	YCHAR *alias = NULL;
+	obj = yaffs_get_equivalent_obj(obj);
+
+	switch (obj->variant_type) {
+	case YAFFS_OBJECT_TYPE_FILE:
+		return obj->variant.file_variant.file_size;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		alias = obj->variant.symlink_variant.alias;
+		if (!alias)
+			return 0;
+		return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
+	default:
+		return 0;
+	}
+}
+
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+	unsigned int valid = attr->ia_valid;
+
+	if (valid & ATTR_MODE)
+		obj->yst_mode = attr->ia_mode;
+	if (valid & ATTR_UID)
+		obj->yst_uid = ia_uid_read(attr);
+	if (valid & ATTR_GID)
+		obj->yst_gid = ia_gid_read(attr);
+
+	if (valid & ATTR_ATIME)
+		obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+	if (valid & ATTR_CTIME)
+		obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+	if (valid & ATTR_MTIME)
+		obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+	if (valid & ATTR_SIZE)
+		yaffs_resize_file(obj, attr->ia_size);
+
+	yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+	return YAFFS_OK;
+
+}
+
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+	unsigned int valid = 0;
+
+	attr->ia_mode = obj->yst_mode;
+	valid |= ATTR_MODE;
+	ia_uid_write( attr, obj->yst_uid );
+	valid |= ATTR_UID;
+	ia_gid_write( attr, obj->yst_gid );
+	valid |= ATTR_GID;
+
+	Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+	valid |= ATTR_ATIME;
+	Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+	valid |= ATTR_CTIME;
+	Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+	valid |= ATTR_MTIME;
+
+	attr->ia_size = yaffs_get_file_size(obj);
+	valid |= ATTR_SIZE;
+
+	attr->ia_valid = valid;
+
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_attribs.h b/fs/yaffs2/yaffs_attribs.h
new file mode 100644
index 0000000..121d99d
--- /dev/null
+++ b/fs/yaffs2/yaffs_attribs.h
@@ -0,0 +1,71 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ATTRIBS_H__
+#define __YAFFS_ATTRIBS_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+static inline uid_t ia_uid_read(const struct iattr *iattr)
+{
+	return from_kuid(&init_user_ns, iattr->ia_uid);
+}
+
+static inline gid_t ia_gid_read(const struct iattr *iattr)
+{
+	return from_kgid(&init_user_ns, iattr->ia_gid);
+}
+
+static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
+{
+	iattr->ia_uid = make_kuid(&init_user_ns, uid);
+}
+
+static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
+{
+	iattr->ia_gid = make_kgid(&init_user_ns, gid);
+}
+#else
+static inline uid_t ia_uid_read(const struct iattr *iattr)
+{
+	return iattr->ia_uid;
+}
+
+static inline gid_t ia_gid_read(const struct iattr *inode)
+{
+	return iattr->ia_gid;
+}
+
+static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
+{
+	iattr->ia_uid = uid;
+}
+
+static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
+{
+	iattr->ia_gid = gid;
+}
+#endif
+
+#endif
diff --git a/fs/yaffs2/yaffs_bitmap.c b/fs/yaffs2/yaffs_bitmap.c
new file mode 100644
index 0000000..4440e93
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.c
@@ -0,0 +1,97 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_bitmap.h"
+#include "yaffs_trace.h"
+/*
+ * Chunk bitmap manipulations
+ */
+
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+{
+	if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"BlockBits block %d is not valid",
+			blk);
+		BUG();
+	}
+	return dev->chunk_bits +
+	    (dev->chunk_bit_stride * (blk - dev->internal_start_block));
+}
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+{
+	if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
+	    chunk < 0 || chunk >= dev->param.chunks_per_block) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"Chunk Id (%d:%d) invalid",
+			blk, chunk);
+		BUG();
+	}
+}
+
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+	memset(blk_bits, 0, dev->chunk_bit_stride);
+}
+
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+	yaffs_verify_chunk_bit_id(dev, blk, chunk);
+	blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+	yaffs_verify_chunk_bit_id(dev, blk, chunk);
+	blk_bits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+	yaffs_verify_chunk_bit_id(dev, blk, chunk);
+	return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+	int i;
+
+	for (i = 0; i < dev->chunk_bit_stride; i++) {
+		if (*blk_bits)
+			return 1;
+		blk_bits++;
+	}
+	return 0;
+}
+
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+	u8 *blk_bits = yaffs_block_bits(dev, blk);
+	int i;
+	int n = 0;
+
+	for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
+		n += hweight8(*blk_bits);
+
+	return n;
+}
diff --git a/fs/yaffs2/yaffs_bitmap.h b/fs/yaffs2/yaffs_bitmap.h
new file mode 100644
index 0000000..e26b37d
--- /dev/null
+++ b/fs/yaffs2/yaffs_bitmap.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+#ifndef __YAFFS_BITMAP_H__
+#define __YAFFS_BITMAP_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+
+#endif
diff --git a/fs/yaffs2/yaffs_checkptrw.c b/fs/yaffs2/yaffs_checkptrw.c
new file mode 100644
index 0000000..e739fb4
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.c
@@ -0,0 +1,474 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_checkptrw.h"
+#include "yaffs_getblockinfo.h"
+
+struct yaffs_checkpt_chunk_hdr {
+	int version;
+	int seq;
+	u32 sum;
+	u32 xor;
+} ;
+
+
+static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
+{
+	return chunk - dev->chunk_offset;
+}
+
+static int apply_block_offset(struct yaffs_dev *dev, int block)
+{
+	return block - dev->block_offset;
+}
+
+static void yaffs2_checkpt_init_chunk_hdr(struct yaffs_dev *dev)
+{
+	struct yaffs_checkpt_chunk_hdr hdr;
+
+	hdr.version = YAFFS_CHECKPOINT_VERSION;
+	hdr.seq = dev->checkpt_page_seq;
+	hdr.sum = dev->checkpt_sum;
+	hdr.xor = dev->checkpt_xor;
+
+	dev->checkpt_byte_offs = sizeof(hdr);
+
+	memcpy(dev->checkpt_buffer, &hdr, sizeof(hdr));
+}
+
+static int yaffs2_checkpt_check_chunk_hdr(struct yaffs_dev *dev)
+{
+	struct yaffs_checkpt_chunk_hdr hdr;
+
+	memcpy(&hdr, dev->checkpt_buffer, sizeof(hdr));
+
+	dev->checkpt_byte_offs = sizeof(hdr);
+
+	return hdr.version == YAFFS_CHECKPOINT_VERSION &&
+		hdr.seq == dev->checkpt_page_seq &&
+		hdr.sum == dev->checkpt_sum &&
+		hdr.xor == dev->checkpt_xor;
+}
+
+static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
+{
+	int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"checkpt blocks_avail = %d", blocks_avail);
+
+	return (blocks_avail <= 0) ? 0 : 1;
+}
+
+static int yaffs_checkpt_erase(struct yaffs_dev *dev)
+{
+	int i;
+
+	if (!dev->drv.drv_erase_fn)
+		return 0;
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"checking blocks %d to %d",
+		dev->internal_start_block, dev->internal_end_block);
+
+	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+		int offset_i = apply_block_offset(dev, i);
+		int result;
+
+		if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"erasing checkpt block %d", i);
+
+			dev->n_erasures++;
+
+			result = dev->drv.drv_erase_fn(dev, offset_i);
+			if(result) {
+				bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+				dev->n_erased_blocks++;
+				dev->n_free_chunks +=
+				    dev->param.chunks_per_block;
+			} else {
+				dev->drv.drv_mark_bad_fn(dev, offset_i);
+				bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+			}
+		}
+	}
+
+	dev->blocks_in_checkpt = 0;
+
+	return 1;
+}
+
+static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
+{
+	int i;
+	int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"allocating checkpt block: erased %d reserved %d avail %d next %d ",
+		dev->n_erased_blocks, dev->param.n_reserved_blocks,
+		blocks_avail, dev->checkpt_next_block);
+
+	if (dev->checkpt_next_block >= 0 &&
+	    dev->checkpt_next_block <= dev->internal_end_block &&
+	    blocks_avail > 0) {
+
+		for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+		     i++) {
+			struct yaffs_block_info *bi;
+
+			bi = yaffs_get_block_info(dev, i);
+			if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+				dev->checkpt_next_block = i + 1;
+				dev->checkpt_cur_block = i;
+				yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+					"allocating checkpt block %d", i);
+				return;
+			}
+		}
+	}
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
+
+	dev->checkpt_next_block = -1;
+	dev->checkpt_cur_block = -1;
+}
+
+static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
+{
+	int i;
+	struct yaffs_ext_tags tags;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"find next checkpt block: start:  blocks %d next %d",
+		dev->blocks_in_checkpt, dev->checkpt_next_block);
+
+	if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
+		for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+		     i++) {
+			int chunk = i * dev->param.chunks_per_block;
+			enum yaffs_block_state state;
+			u32 seq;
+
+			dev->tagger.read_chunk_tags_fn(dev,
+					apply_chunk_offset(dev, chunk),
+					NULL, &tags);
+			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+				"find next checkpt block: search: block %d state %d oid %d seq %d eccr %d",
+				i, (int) state,
+				tags.obj_id, tags.seq_number,
+				tags.ecc_result);
+
+			if (tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA)
+				continue;
+
+			dev->tagger.query_block_fn(dev,
+						apply_block_offset(dev, i),
+						&state, &seq);
+			if (state == YAFFS_BLOCK_STATE_DEAD)
+				continue;
+
+			/* Right kind of block */
+			dev->checkpt_next_block = tags.obj_id;
+			dev->checkpt_cur_block = i;
+			dev->checkpt_block_list[dev->blocks_in_checkpt] = i;
+			dev->blocks_in_checkpt++;
+			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+				"found checkpt block %d", i);
+			return;
+		}
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
+
+	dev->checkpt_next_block = -1;
+	dev->checkpt_cur_block = -1;
+}
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
+{
+	int i;
+
+	dev->checkpt_open_write = writing;
+
+	/* Got the functions we need? */
+	if (!dev->tagger.write_chunk_tags_fn ||
+	    !dev->tagger.read_chunk_tags_fn ||
+	    !dev->drv.drv_erase_fn ||
+	    !dev->drv.drv_mark_bad_fn)
+		return 0;
+
+	if (writing && !yaffs2_checkpt_space_ok(dev))
+		return 0;
+
+	if (!dev->checkpt_buffer)
+		dev->checkpt_buffer =
+		    kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+	if (!dev->checkpt_buffer)
+		return 0;
+
+	dev->checkpt_page_seq = 0;
+	dev->checkpt_byte_count = 0;
+	dev->checkpt_sum = 0;
+	dev->checkpt_xor = 0;
+	dev->checkpt_cur_block = -1;
+	dev->checkpt_cur_chunk = -1;
+	dev->checkpt_next_block = dev->internal_start_block;
+
+	if (writing) {
+		memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+		yaffs2_checkpt_init_chunk_hdr(dev);
+		return yaffs_checkpt_erase(dev);
+	}
+
+	/* Opening for a read */
+	/* Set to a value that will kick off a read */
+	dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+	/* A checkpoint block list of 1 checkpoint block per 16 block is
+	 * (hopefully) going to be way more than we need */
+	dev->blocks_in_checkpt = 0;
+	dev->checkpt_max_blocks =
+	    (dev->internal_end_block - dev->internal_start_block) / 16 + 2;
+	dev->checkpt_block_list =
+	    kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
+
+	if (!dev->checkpt_block_list)
+		return 0;
+
+	for (i = 0; i < dev->checkpt_max_blocks; i++)
+		dev->checkpt_block_list[i] = -1;
+
+	return 1;
+}
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
+{
+	u32 composite_sum;
+
+	composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xff);
+	*sum = composite_sum;
+	return 1;
+}
+
+static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
+{
+	int chunk;
+	int offset_chunk;
+	struct yaffs_ext_tags tags;
+
+	if (dev->checkpt_cur_block < 0) {
+		yaffs2_checkpt_find_erased_block(dev);
+		dev->checkpt_cur_chunk = 0;
+	}
+
+	if (dev->checkpt_cur_block < 0)
+		return 0;
+
+	tags.is_deleted = 0;
+	tags.obj_id = dev->checkpt_next_block;	/* Hint to next place to look */
+	tags.chunk_id = dev->checkpt_page_seq + 1;
+	tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+	tags.n_bytes = dev->data_bytes_per_chunk;
+	if (dev->checkpt_cur_chunk == 0) {
+		/* First chunk we write for the block? Set block state to
+		   checkpoint */
+		struct yaffs_block_info *bi =
+		    yaffs_get_block_info(dev, dev->checkpt_cur_block);
+		bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+		dev->blocks_in_checkpt++;
+	}
+
+	chunk =
+	    dev->checkpt_cur_block * dev->param.chunks_per_block +
+	    dev->checkpt_cur_chunk;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
+		chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
+		tags.obj_id, tags.chunk_id);
+
+	offset_chunk = apply_chunk_offset(dev, chunk);
+
+	dev->n_page_writes++;
+
+	dev->tagger.write_chunk_tags_fn(dev, offset_chunk,
+				       dev->checkpt_buffer, &tags);
+	dev->checkpt_page_seq++;
+	dev->checkpt_cur_chunk++;
+	if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
+		dev->checkpt_cur_chunk = 0;
+		dev->checkpt_cur_block = -1;
+	}
+	memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+	yaffs2_checkpt_init_chunk_hdr(dev);
+
+
+	return 1;
+}
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
+{
+	int i = 0;
+	int ok = 1;
+	u8 *data_bytes = (u8 *) data;
+
+	if (!dev->checkpt_buffer)
+		return 0;
+
+	if (!dev->checkpt_open_write)
+		return -1;
+
+	while (i < n_bytes && ok) {
+		dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
+		dev->checkpt_sum += *data_bytes;
+		dev->checkpt_xor ^= *data_bytes;
+
+		dev->checkpt_byte_offs++;
+		i++;
+		data_bytes++;
+		dev->checkpt_byte_count++;
+
+		if (dev->checkpt_byte_offs < 0 ||
+		    dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+			ok = yaffs2_checkpt_flush_buffer(dev);
+	}
+
+	return i;
+}
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
+{
+	int i = 0;
+	int ok = 1;
+	struct yaffs_ext_tags tags;
+	int chunk;
+	int offset_chunk;
+	u8 *data_bytes = (u8 *) data;
+
+	if (!dev->checkpt_buffer)
+		return 0;
+
+	if (dev->checkpt_open_write)
+		return -1;
+
+	while (i < n_bytes && ok) {
+
+		if (dev->checkpt_byte_offs < 0 ||
+		    dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+			if (dev->checkpt_cur_block < 0) {
+				yaffs2_checkpt_find_block(dev);
+				dev->checkpt_cur_chunk = 0;
+			}
+
+			if (dev->checkpt_cur_block < 0) {
+				ok = 0;
+				break;
+			}
+
+			chunk = dev->checkpt_cur_block *
+			    dev->param.chunks_per_block +
+			    dev->checkpt_cur_chunk;
+
+			offset_chunk = apply_chunk_offset(dev, chunk);
+			dev->n_page_reads++;
+
+			/* read in the next chunk */
+			dev->tagger.read_chunk_tags_fn(dev,
+						offset_chunk,
+						dev->checkpt_buffer,
+						&tags);
+
+			if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
+			    tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+			    tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+				ok = 0;
+				break;
+			}
+			if(!yaffs2_checkpt_check_chunk_hdr(dev)) {
+				ok = 0;
+				break;
+			}
+
+			dev->checkpt_page_seq++;
+			dev->checkpt_cur_chunk++;
+
+			if (dev->checkpt_cur_chunk >=
+					dev->param.chunks_per_block)
+				dev->checkpt_cur_block = -1;
+
+		}
+
+		*data_bytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
+		dev->checkpt_sum += *data_bytes;
+		dev->checkpt_xor ^= *data_bytes;
+		dev->checkpt_byte_offs++;
+		i++;
+		data_bytes++;
+		dev->checkpt_byte_count++;
+	}
+
+	return i;
+}
+
+int yaffs_checkpt_close(struct yaffs_dev *dev)
+{
+	int i;
+
+	if (dev->checkpt_open_write) {
+		if (dev->checkpt_byte_offs !=
+			sizeof(sizeof(struct yaffs_checkpt_chunk_hdr)))
+			yaffs2_checkpt_flush_buffer(dev);
+	} else if (dev->checkpt_block_list) {
+		for (i = 0;
+		     i < dev->blocks_in_checkpt &&
+		     dev->checkpt_block_list[i] >= 0; i++) {
+			int blk = dev->checkpt_block_list[i];
+			struct yaffs_block_info *bi = NULL;
+
+			if (dev->internal_start_block <= blk &&
+			    blk <= dev->internal_end_block)
+				bi = yaffs_get_block_info(dev, blk);
+			if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
+				bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+		}
+		kfree(dev->checkpt_block_list);
+		dev->checkpt_block_list = NULL;
+	}
+
+	dev->n_free_chunks -=
+		dev->blocks_in_checkpt * dev->param.chunks_per_block;
+	dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT, "checkpoint byte count %d",
+		dev->checkpt_byte_count);
+
+	if (dev->checkpt_buffer) {
+		/* free the buffer */
+		kfree(dev->checkpt_buffer);
+		dev->checkpt_buffer = NULL;
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
+{
+	/* Erase the checkpoint data */
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"checkpoint invalidate of %d blocks",
+		dev->blocks_in_checkpt);
+
+	return yaffs_checkpt_erase(dev);
+}
diff --git a/fs/yaffs2/yaffs_checkptrw.h b/fs/yaffs2/yaffs_checkptrw.h
new file mode 100644
index 0000000..cdbaba7
--- /dev/null
+++ b/fs/yaffs2/yaffs_checkptrw.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
+
+int yaffs_checkpt_close(struct yaffs_dev *dev);
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_ecc.c b/fs/yaffs2/yaffs_ecc.c
new file mode 100644
index 0000000..9294107
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.c
@@ -0,0 +1,281 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two
+ * such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity,
+ * and therefore this bytes influence on the line parity.
+ */
+
+static const unsigned char column_parity_table[] = {
+	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+	0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+	0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+	0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+	0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+	0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+	0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+	0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+	0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+	0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+	0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+	0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+	0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+	0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+	0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+	0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+	0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc)
+{
+	unsigned int i;
+	unsigned char col_parity = 0;
+	unsigned char line_parity = 0;
+	unsigned char line_parity_prime = 0;
+	unsigned char t;
+	unsigned char b;
+
+	for (i = 0; i < 256; i++) {
+		b = column_parity_table[*data++];
+		col_parity ^= b;
+
+		if (b & 0x01) {	/* odd number of bits in the byte */
+			line_parity ^= i;
+			line_parity_prime ^= ~i;
+		}
+	}
+
+	ecc[2] = (~col_parity) | 0x03;
+
+	t = 0;
+	if (line_parity & 0x80)
+		t |= 0x80;
+	if (line_parity_prime & 0x80)
+		t |= 0x40;
+	if (line_parity & 0x40)
+		t |= 0x20;
+	if (line_parity_prime & 0x40)
+		t |= 0x10;
+	if (line_parity & 0x20)
+		t |= 0x08;
+	if (line_parity_prime & 0x20)
+		t |= 0x04;
+	if (line_parity & 0x10)
+		t |= 0x02;
+	if (line_parity_prime & 0x10)
+		t |= 0x01;
+	ecc[1] = ~t;
+
+	t = 0;
+	if (line_parity & 0x08)
+		t |= 0x80;
+	if (line_parity_prime & 0x08)
+		t |= 0x40;
+	if (line_parity & 0x04)
+		t |= 0x20;
+	if (line_parity_prime & 0x04)
+		t |= 0x10;
+	if (line_parity & 0x02)
+		t |= 0x08;
+	if (line_parity_prime & 0x02)
+		t |= 0x04;
+	if (line_parity & 0x01)
+		t |= 0x02;
+	if (line_parity_prime & 0x01)
+		t |= 0x01;
+	ecc[0] = ~t;
+
+}
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+		      const unsigned char *test_ecc)
+{
+	unsigned char d0, d1, d2;	/* deltas */
+
+	d0 = read_ecc[0] ^ test_ecc[0];
+	d1 = read_ecc[1] ^ test_ecc[1];
+	d2 = read_ecc[2] ^ test_ecc[2];
+
+	if ((d0 | d1 | d2) == 0)
+		return 0;	/* no error */
+
+	if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+	    ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+	    ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+		/* Single bit (recoverable) error in data */
+
+		unsigned byte;
+		unsigned bit;
+
+		bit = byte = 0;
+
+		if (d1 & 0x80)
+			byte |= 0x80;
+		if (d1 & 0x20)
+			byte |= 0x40;
+		if (d1 & 0x08)
+			byte |= 0x20;
+		if (d1 & 0x02)
+			byte |= 0x10;
+		if (d0 & 0x80)
+			byte |= 0x08;
+		if (d0 & 0x20)
+			byte |= 0x04;
+		if (d0 & 0x08)
+			byte |= 0x02;
+		if (d0 & 0x02)
+			byte |= 0x01;
+
+		if (d2 & 0x80)
+			bit |= 0x04;
+		if (d2 & 0x20)
+			bit |= 0x02;
+		if (d2 & 0x08)
+			bit |= 0x01;
+
+		data[byte] ^= (1 << bit);
+
+		return 1;	/* Corrected the error */
+	}
+
+	if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
+		/* Reccoverable error in ecc */
+
+		read_ecc[0] = test_ecc[0];
+		read_ecc[1] = test_ecc[1];
+		read_ecc[2] = test_ecc[2];
+
+		return 1;	/* Corrected the error */
+	}
+
+	/* Unrecoverable error */
+
+	return -1;
+
+}
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+			  struct yaffs_ecc_other *ecc_other)
+{
+	unsigned int i;
+	unsigned char col_parity = 0;
+	unsigned line_parity = 0;
+	unsigned line_parity_prime = 0;
+	unsigned char b;
+
+	for (i = 0; i < n_bytes; i++) {
+		b = column_parity_table[*data++];
+		col_parity ^= b;
+
+		if (b & 0x01) {
+			/* odd number of bits in the byte */
+			line_parity ^= i;
+			line_parity_prime ^= ~i;
+		}
+
+	}
+
+	ecc_other->col_parity = (col_parity >> 2) & 0x3f;
+	ecc_other->line_parity = line_parity;
+	ecc_other->line_parity_prime = line_parity_prime;
+}
+
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+			    struct yaffs_ecc_other *read_ecc,
+			    const struct yaffs_ecc_other *test_ecc)
+{
+	unsigned char delta_col;	/* column parity delta */
+	unsigned delta_line;	/* line parity delta */
+	unsigned delta_line_prime;	/* line parity delta */
+	unsigned bit;
+
+	delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
+	delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
+	delta_line_prime =
+	    read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
+
+	if ((delta_col | delta_line | delta_line_prime) == 0)
+		return 0;	/* no error */
+
+	if (delta_line == ~delta_line_prime &&
+	    (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
+		/* Single bit (recoverable) error in data */
+
+		bit = 0;
+
+		if (delta_col & 0x20)
+			bit |= 0x04;
+		if (delta_col & 0x08)
+			bit |= 0x02;
+		if (delta_col & 0x02)
+			bit |= 0x01;
+
+		if (delta_line >= n_bytes)
+			return -1;
+
+		data[delta_line] ^= (1 << bit);
+
+		return 1;	/* corrected */
+	}
+
+	if ((hweight32(delta_line) +
+	     hweight32(delta_line_prime) +
+	     hweight8(delta_col)) == 1) {
+		/* Reccoverable error in ecc */
+
+		*read_ecc = *test_ecc;
+		return 1;	/* corrected */
+	}
+
+	/* Unrecoverable error */
+
+	return -1;
+}
diff --git a/fs/yaffs2/yaffs_ecc.h b/fs/yaffs2/yaffs_ecc.h
new file mode 100644
index 0000000..17d47bd
--- /dev/null
+++ b/fs/yaffs2/yaffs_ecc.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data.
+ * Thus, two such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+struct yaffs_ecc_other {
+	unsigned char col_parity;
+	unsigned line_parity;
+	unsigned line_parity_prime;
+};
+
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc);
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+		      const unsigned char *test_ecc);
+
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+			  struct yaffs_ecc_other *ecc);
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+			    struct yaffs_ecc_other *read_ecc,
+			    const struct yaffs_ecc_other *test_ecc);
+#endif
diff --git a/fs/yaffs2/yaffs_getblockinfo.h b/fs/yaffs2/yaffs_getblockinfo.h
new file mode 100644
index 0000000..8fd0802
--- /dev/null
+++ b/fs/yaffs2/yaffs_getblockinfo.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GETBLOCKINFO_H__
+#define __YAFFS_GETBLOCKINFO_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+
+/* Function to manipulate block info */
+static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
+							      *dev, int blk)
+{
+	if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"**>> yaffs: get_block_info block %d is not valid",
+			blk);
+		BUG();
+	}
+	return &dev->block_info[blk - dev->internal_start_block];
+}
+
+#endif
diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
new file mode 100644
index 0000000..1fd464d
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,5082 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_guts.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_tagsmarshall.h"
+#include "yaffs_nand.h"
+#include "yaffs_yaffs1.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_verify.h"
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nameval.h"
+#include "yaffs_allocator.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
+#define YAFFS_GC_GOOD_ENOUGH 2
+#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+#include "yaffs_ecc.h"
+
+/* Forward declarations */
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+			     const u8 *buffer, int n_bytes, int use_reserve);
+
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
+				int buffer_size);
+
+/* Function to calculate chunk and offset */
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+				int *chunk_out, u32 *offset_out)
+{
+	int chunk;
+	u32 offset;
+
+	chunk = (u32) (addr >> dev->chunk_shift);
+
+	if (dev->chunk_div == 1) {
+		/* easy power of 2 case */
+		offset = (u32) (addr & dev->chunk_mask);
+	} else {
+		/* Non power-of-2 case */
+
+		loff_t chunk_base;
+
+		chunk /= dev->chunk_div;
+
+		chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
+		offset = (u32) (addr - chunk_base);
+	}
+
+	*chunk_out = chunk;
+	*offset_out = offset;
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or
+ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static inline u32 calc_shifts_ceiling(u32 x)
+{
+	int extra_bits;
+	int shifts;
+
+	shifts = extra_bits = 0;
+
+	while (x > 1) {
+		if (x & 1)
+			extra_bits++;
+		x >>= 1;
+		shifts++;
+	}
+
+	if (extra_bits)
+		shifts++;
+
+	return shifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static inline u32 calc_shifts(u32 x)
+{
+	u32 shifts;
+
+	shifts = 0;
+
+	if (!x)
+		return 0;
+
+	while (!(x & 1)) {
+		x >>= 1;
+		shifts++;
+	}
+
+	return shifts;
+}
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
+{
+	int i;
+	u8 *buf = (u8 *) 1;
+
+	memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+	for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+		dev->temp_buffer[i].in_use = 0;
+		buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+		dev->temp_buffer[i].buffer = buf;
+	}
+
+	return buf ? YAFFS_OK : YAFFS_FAIL;
+}
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
+{
+	int i;
+
+	dev->temp_in_use++;
+	if (dev->temp_in_use > dev->max_temp)
+		dev->max_temp = dev->temp_in_use;
+
+	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+		if (dev->temp_buffer[i].in_use == 0) {
+			dev->temp_buffer[i].in_use = 1;
+			return dev->temp_buffer[i].buffer;
+		}
+	}
+
+	yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
+	/*
+	 * If we got here then we have to allocate an unmanaged one
+	 * This is not good.
+	 */
+
+	dev->unmanaged_buffer_allocs++;
+	return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
+
+}
+
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
+{
+	int i;
+
+	dev->temp_in_use--;
+
+	for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+		if (dev->temp_buffer[i].buffer == buffer) {
+			dev->temp_buffer[i].in_use = 0;
+			return;
+		}
+	}
+
+	if (buffer) {
+		/* assume it is an unmanaged one. */
+		yaffs_trace(YAFFS_TRACE_BUFFERS,
+			"Releasing unmanaged temp buffer");
+		kfree(buffer);
+		dev->unmanaged_buffer_deallocs++;
+	}
+
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
+				     const u8 *data,
+				     const struct yaffs_ext_tags *tags)
+{
+	(void) dev;
+	(void) nand_chunk;
+	(void) data;
+	(void) tags;
+}
+
+static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
+				      const struct yaffs_ext_tags *tags)
+{
+	(void) dev;
+	(void) nand_chunk;
+	(void) tags;
+}
+
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+			      struct yaffs_block_info *bi)
+{
+	if (!bi->gc_prioritise) {
+		bi->gc_prioritise = 1;
+		dev->has_pending_prioritised_gc = 1;
+		bi->chunk_error_strikes++;
+
+		if (bi->chunk_error_strikes > 3) {
+			bi->needs_retiring = 1;	/* Too many stikes, so retire */
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"yaffs: Block struck out");
+
+		}
+	}
+}
+
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+					int erased_ok)
+{
+	int flash_block = nand_chunk / dev->param.chunks_per_block;
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+	yaffs_handle_chunk_error(dev, bi);
+
+	if (erased_ok) {
+		/* Was an actual write failure,
+		 * so mark the block for retirement.*/
+		bi->needs_retiring = 1;
+		yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+		  "**>> Block %d needs retiring", flash_block);
+	}
+
+	/* Delete the chunk */
+	yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+	yaffs_skip_rest_of_block(dev);
+}
+
+/*
+ * Verification code
+ */
+
+/*
+ *  Simple hash function. Needs to have a reasonable spread
+ */
+
+static inline int yaffs_hash_fn(int n)
+{
+	if (n < 0)
+		n = -n;
+	return n % YAFFS_NOBJECT_BUCKETS;
+}
+
+/*
+ * Access functions to useful fake objects.
+ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
+{
+	return dev->root_dir;
+}
+
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
+{
+	return dev->lost_n_found;
+}
+
+/*
+ *  Erased NAND checking functions
+ */
+
+int yaffs_check_ff(u8 *buffer, int n_bytes)
+{
+	/* Horrible, slow implementation */
+	while (n_bytes--) {
+		if (*buffer != 0xff)
+			return 0;
+		buffer++;
+	}
+	return 1;
+}
+
+static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
+{
+	int retval = YAFFS_OK;
+	u8 *data = yaffs_get_temp_buffer(dev);
+	struct yaffs_ext_tags tags;
+	int result;
+
+	result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+	if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
+		retval = YAFFS_FAIL;
+
+	if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
+		tags.chunk_used) {
+		yaffs_trace(YAFFS_TRACE_NANDACCESS,
+			"Chunk %d not erased", nand_chunk);
+		retval = YAFFS_FAIL;
+	}
+
+	yaffs_release_temp_buffer(dev, data);
+
+	return retval;
+
+}
+
+static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
+				      int nand_chunk,
+				      const u8 *data,
+				      struct yaffs_ext_tags *tags)
+{
+	int retval = YAFFS_OK;
+	struct yaffs_ext_tags temp_tags;
+	u8 *buffer = yaffs_get_temp_buffer(dev);
+	int result;
+
+	result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+	if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
+	    temp_tags.obj_id != tags->obj_id ||
+	    temp_tags.chunk_id != tags->chunk_id ||
+	    temp_tags.n_bytes != tags->n_bytes)
+		retval = YAFFS_FAIL;
+
+	yaffs_release_temp_buffer(dev, buffer);
+
+	return retval;
+}
+
+
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
+{
+	int reserved_chunks;
+	int reserved_blocks = dev->param.n_reserved_blocks;
+	int checkpt_blocks;
+
+	checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
+
+	reserved_chunks =
+	    (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
+
+	return (dev->n_free_chunks > (reserved_chunks + n_chunks));
+}
+
+static int yaffs_find_alloc_block(struct yaffs_dev *dev)
+{
+	int i;
+	struct yaffs_block_info *bi;
+
+	if (dev->n_erased_blocks < 1) {
+		/* Hoosterman we've got a problem.
+		 * Can't get space to gc
+		 */
+		yaffs_trace(YAFFS_TRACE_ERROR,
+		  "yaffs tragedy: no more erased blocks");
+
+		return -1;
+	}
+
+	/* Find an empty block. */
+
+	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		dev->alloc_block_finder++;
+		if (dev->alloc_block_finder < dev->internal_start_block
+		    || dev->alloc_block_finder > dev->internal_end_block) {
+			dev->alloc_block_finder = dev->internal_start_block;
+		}
+
+		bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
+
+		if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+			bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
+			dev->seq_number++;
+			bi->seq_number = dev->seq_number;
+			dev->n_erased_blocks--;
+			yaffs_trace(YAFFS_TRACE_ALLOCATE,
+			  "Allocated block %d, seq  %d, %d left" ,
+			   dev->alloc_block_finder, dev->seq_number,
+			   dev->n_erased_blocks);
+			return dev->alloc_block_finder;
+		}
+	}
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"yaffs tragedy: no more erased blocks, but there should have been %d",
+		dev->n_erased_blocks);
+
+	return -1;
+}
+
+static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
+			     struct yaffs_block_info **block_ptr)
+{
+	int ret_val;
+	struct yaffs_block_info *bi;
+
+	if (dev->alloc_block < 0) {
+		/* Get next block to allocate off */
+		dev->alloc_block = yaffs_find_alloc_block(dev);
+		dev->alloc_page = 0;
+	}
+
+	if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
+		/* No space unless we're allowed to use the reserve. */
+		return -1;
+	}
+
+	if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+	    && dev->alloc_page == 0)
+		yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
+
+	/* Next page please.... */
+	if (dev->alloc_block >= 0) {
+		bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+		ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
+		    dev->alloc_page;
+		bi->pages_in_use++;
+		yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
+
+		dev->alloc_page++;
+
+		dev->n_free_chunks--;
+
+		/* If the block is full set the state to full */
+		if (dev->alloc_page >= dev->param.chunks_per_block) {
+			bi->block_state = YAFFS_BLOCK_STATE_FULL;
+			dev->alloc_block = -1;
+		}
+
+		if (block_ptr)
+			*block_ptr = bi;
+
+		return ret_val;
+	}
+
+	yaffs_trace(YAFFS_TRACE_ERROR,
+		"!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
+
+	return -1;
+}
+
+static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
+{
+	int n;
+
+	n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+	if (dev->alloc_block > 0)
+		n += (dev->param.chunks_per_block - dev->alloc_page);
+
+	return n;
+
+}
+
+/*
+ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
+{
+	struct yaffs_block_info *bi;
+
+	if (dev->alloc_block > 0) {
+		bi = yaffs_get_block_info(dev, dev->alloc_block);
+		if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+			bi->block_state = YAFFS_BLOCK_STATE_FULL;
+			dev->alloc_block = -1;
+		}
+	}
+}
+
+static int yaffs_write_new_chunk(struct yaffs_dev *dev,
+				 const u8 *data,
+				 struct yaffs_ext_tags *tags, int use_reserver)
+{
+	int attempts = 0;
+	int write_ok = 0;
+	int chunk;
+
+	yaffs2_checkpt_invalidate(dev);
+
+	do {
+		struct yaffs_block_info *bi = 0;
+		int erased_ok = 0;
+
+		chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
+		if (chunk < 0) {
+			/* no space */
+			break;
+		}
+
+		/* First check this chunk is erased, if it needs
+		 * checking.  The checking policy (unless forced
+		 * always on) is as follows:
+		 *
+		 * Check the first page we try to write in a block.
+		 * If the check passes then we don't need to check any
+		 * more.        If the check fails, we check again...
+		 * If the block has been erased, we don't need to check.
+		 *
+		 * However, if the block has been prioritised for gc,
+		 * then we think there might be something odd about
+		 * this block and stop using it.
+		 *
+		 * Rationale: We should only ever see chunks that have
+		 * not been erased if there was a partially written
+		 * chunk due to power loss.  This checking policy should
+		 * catch that case with very few checks and thus save a
+		 * lot of checks that are most likely not needed.
+		 *
+		 * Mods to the above
+		 * If an erase check fails or the write fails we skip the
+		 * rest of the block.
+		 */
+
+		/* let's give it a try */
+		attempts++;
+
+		if (dev->param.always_check_erased)
+			bi->skip_erased_check = 0;
+
+		if (!bi->skip_erased_check) {
+			erased_ok = yaffs_check_chunk_erased(dev, chunk);
+			if (erased_ok != YAFFS_OK) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+				  "**>> yaffs chunk %d was not erased",
+				  chunk);
+
+				/* If not erased, delete this one,
+				 * skip rest of block and
+				 * try another chunk */
+				yaffs_chunk_del(dev, chunk, 1, __LINE__);
+				yaffs_skip_rest_of_block(dev);
+				continue;
+			}
+		}
+
+		write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
+
+		if (!bi->skip_erased_check)
+			write_ok =
+			    yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+		if (write_ok != YAFFS_OK) {
+			/* Clean up aborted write, skip to next block and
+			 * try another chunk */
+			yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
+			continue;
+		}
+
+		bi->skip_erased_check = 1;
+
+		/* Copy the data into the robustification buffer */
+		yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+	} while (write_ok != YAFFS_OK &&
+		 (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+	if (!write_ok)
+		chunk = -1;
+
+	if (attempts > 1) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"**>> yaffs write required %d attempts",
+			attempts);
+		dev->n_retried_writes += (attempts - 1);
+	}
+
+	return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
+{
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+	yaffs2_checkpt_invalidate(dev);
+
+	yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+	if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
+		if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"yaffs: Failed to mark bad and erase block %d",
+				flash_block);
+		} else {
+			struct yaffs_ext_tags tags;
+			int chunk_id =
+			    flash_block * dev->param.chunks_per_block;
+
+			u8 *buffer = yaffs_get_temp_buffer(dev);
+
+			memset(buffer, 0xff, dev->data_bytes_per_chunk);
+			memset(&tags, 0, sizeof(tags));
+			tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
+			if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
+							dev->chunk_offset,
+							buffer,
+							&tags) != YAFFS_OK)
+				yaffs_trace(YAFFS_TRACE_ALWAYS,
+					"yaffs: Failed to write bad block marker to block %d",
+					flash_block);
+
+			yaffs_release_temp_buffer(dev, buffer);
+		}
+	}
+
+	bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+	bi->gc_prioritise = 0;
+	bi->needs_retiring = 0;
+
+	dev->n_retired_blocks++;
+}
+
+/*---------------- Name handling functions ------------*/
+
+static u16 yaffs_calc_name_sum(const YCHAR *name)
+{
+	u16 sum = 0;
+	u16 i = 1;
+
+	if (!name)
+		return 0;
+
+	while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
+
+		/* 0x1f mask is case insensitive */
+		sum += ((*name) & 0x1f) * i;
+		i++;
+		name++;
+	}
+	return sum;
+}
+
+
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
+{
+	memset(obj->short_name, 0, sizeof(obj->short_name));
+
+	if (name && !name[0]) {
+		yaffs_fix_null_name(obj, obj->short_name,
+				YAFFS_SHORT_NAME_LENGTH);
+		name = obj->short_name;
+	} else if (name &&
+		strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
+		YAFFS_SHORT_NAME_LENGTH)  {
+		strcpy(obj->short_name, name);
+	}
+
+	obj->sum = yaffs_calc_name_sum(name);
+}
+
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+				const struct yaffs_obj_hdr *oh)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+	YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
+	memset(tmp_name, 0, sizeof(tmp_name));
+	yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
+				YAFFS_MAX_NAME_LENGTH + 1);
+	yaffs_set_obj_name(obj, tmp_name);
+#else
+	yaffs_set_obj_name(obj, oh->name);
+#endif
+}
+
+loff_t yaffs_max_file_size(struct yaffs_dev *dev)
+{
+	if(sizeof(loff_t) < 8)
+		return YAFFS_MAX_FILE_SIZE_32;
+	else
+		return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
+{
+	struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
+
+	if (tn) {
+		memset(tn, 0, dev->tnode_size);
+		dev->n_tnodes++;
+	}
+
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+
+	return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+	yaffs_free_raw_tnode(dev, tn);
+	dev->n_tnodes--;
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+}
+
+static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
+{
+	yaffs_deinit_raw_tnodes_and_objs(dev);
+	dev->n_obj = 0;
+	dev->n_tnodes = 0;
+}
+
+static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+			unsigned pos, unsigned val)
+{
+	u32 *map = (u32 *) tn;
+	u32 bit_in_map;
+	u32 bit_in_word;
+	u32 word_in_map;
+	u32 mask;
+
+	pos &= YAFFS_TNODES_LEVEL0_MASK;
+	val >>= dev->chunk_grp_bits;
+
+	bit_in_map = pos * dev->tnode_width;
+	word_in_map = bit_in_map / 32;
+	bit_in_word = bit_in_map & (32 - 1);
+
+	mask = dev->tnode_mask << bit_in_word;
+
+	map[word_in_map] &= ~mask;
+	map[word_in_map] |= (mask & (val << bit_in_word));
+
+	if (dev->tnode_width > (32 - bit_in_word)) {
+		bit_in_word = (32 - bit_in_word);
+		word_in_map++;
+		mask =
+		    dev->tnode_mask >> bit_in_word;
+		map[word_in_map] &= ~mask;
+		map[word_in_map] |= (mask & (val >> bit_in_word));
+	}
+}
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+			 unsigned pos)
+{
+	u32 *map = (u32 *) tn;
+	u32 bit_in_map;
+	u32 bit_in_word;
+	u32 word_in_map;
+	u32 val;
+
+	pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+	bit_in_map = pos * dev->tnode_width;
+	word_in_map = bit_in_map / 32;
+	bit_in_word = bit_in_map & (32 - 1);
+
+	val = map[word_in_map] >> bit_in_word;
+
+	if (dev->tnode_width > (32 - bit_in_word)) {
+		bit_in_word = (32 - bit_in_word);
+		word_in_map++;
+		val |= (map[word_in_map] << bit_in_word);
+	}
+
+	val &= dev->tnode_mask;
+	val <<= dev->chunk_grp_bits;
+
+	return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of top_level
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+				       struct yaffs_file_var *file_struct,
+				       u32 chunk_id)
+{
+	struct yaffs_tnode *tn = file_struct->top;
+	u32 i;
+	int required_depth;
+	int level = file_struct->top_level;
+
+	(void) dev;
+
+	/* Check sane level and chunk Id */
+	if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+		return NULL;
+
+	if (chunk_id > YAFFS_MAX_CHUNK_ID)
+		return NULL;
+
+	/* First check we're tall enough (ie enough top_level) */
+
+	i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+	required_depth = 0;
+	while (i) {
+		i >>= YAFFS_TNODES_INTERNAL_BITS;
+		required_depth++;
+	}
+
+	if (required_depth > file_struct->top_level)
+		return NULL;	/* Not tall enough, so we can't find it */
+
+	/* Traverse down to level 0 */
+	while (level > 0 && tn) {
+		tn = tn->internal[(chunk_id >>
+				   (YAFFS_TNODES_LEVEL0_BITS +
+				    (level - 1) *
+				    YAFFS_TNODES_INTERNAL_BITS)) &
+				  YAFFS_TNODES_INTERNAL_MASK];
+		level--;
+	}
+
+	return tn;
+}
+
+/* add_find_tnode_0 finds the level 0 tnode if it exists,
+ * otherwise first expands the tree.
+ * This happens in two steps:
+ *  1. If the tree isn't tall enough, then make it taller.
+ *  2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ *  If the tn argument is NULL, then a fresh tnode will be added otherwise the
+ *  specified tn will be plugged into the ttree.
+ */
+
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+					   struct yaffs_file_var *file_struct,
+					   u32 chunk_id,
+					   struct yaffs_tnode *passed_tn)
+{
+	int required_depth;
+	int i;
+	int l;
+	struct yaffs_tnode *tn;
+	u32 x;
+
+	/* Check sane level and page Id */
+	if (file_struct->top_level < 0 ||
+	    file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
+		return NULL;
+
+	if (chunk_id > YAFFS_MAX_CHUNK_ID)
+		return NULL;
+
+	/* First check we're tall enough (ie enough top_level) */
+
+	x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+	required_depth = 0;
+	while (x) {
+		x >>= YAFFS_TNODES_INTERNAL_BITS;
+		required_depth++;
+	}
+
+	if (required_depth > file_struct->top_level) {
+		/* Not tall enough, gotta make the tree taller */
+		for (i = file_struct->top_level; i < required_depth; i++) {
+
+			tn = yaffs_get_tnode(dev);
+
+			if (tn) {
+				tn->internal[0] = file_struct->top;
+				file_struct->top = tn;
+				file_struct->top_level++;
+			} else {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"yaffs: no more tnodes");
+				return NULL;
+			}
+		}
+	}
+
+	/* Traverse down to level 0, adding anything we need */
+
+	l = file_struct->top_level;
+	tn = file_struct->top;
+
+	if (l > 0) {
+		while (l > 0 && tn) {
+			x = (chunk_id >>
+			     (YAFFS_TNODES_LEVEL0_BITS +
+			      (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+			    YAFFS_TNODES_INTERNAL_MASK;
+
+			if ((l > 1) && !tn->internal[x]) {
+				/* Add missing non-level-zero tnode */
+				tn->internal[x] = yaffs_get_tnode(dev);
+				if (!tn->internal[x])
+					return NULL;
+			} else if (l == 1) {
+				/* Looking from level 1 at level 0 */
+				if (passed_tn) {
+					/* If we already have one, release it */
+					if (tn->internal[x])
+						yaffs_free_tnode(dev,
+							tn->internal[x]);
+					tn->internal[x] = passed_tn;
+
+				} else if (!tn->internal[x]) {
+					/* Don't have one, none passed in */
+					tn->internal[x] = yaffs_get_tnode(dev);
+					if (!tn->internal[x])
+						return NULL;
+				}
+			}
+
+			tn = tn->internal[x];
+			l--;
+		}
+	} else {
+		/* top is level 0 */
+		if (passed_tn) {
+			memcpy(tn, passed_tn,
+			       (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
+			yaffs_free_tnode(dev, passed_tn);
+		}
+	}
+
+	return tn;
+}
+
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
+			    int chunk_obj)
+{
+	return (tags->chunk_id == chunk_obj &&
+		tags->obj_id == obj_id &&
+		!tags->is_deleted) ? 1 : 0;
+
+}
+
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
+					struct yaffs_ext_tags *tags, int obj_id,
+					int inode_chunk)
+{
+	int j;
+
+	for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
+		if (yaffs_check_chunk_bit
+		    (dev, the_chunk / dev->param.chunks_per_block,
+		     the_chunk % dev->param.chunks_per_block)) {
+
+			if (dev->chunk_grp_size == 1)
+				return the_chunk;
+			else {
+				yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+							 tags);
+				if (yaffs_tags_match(tags,
+							obj_id, inode_chunk)) {
+					/* found it; */
+					return the_chunk;
+				}
+			}
+		}
+		the_chunk++;
+	}
+	return -1;
+}
+
+int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+				    struct yaffs_ext_tags *tags)
+{
+	/*Get the Tnode, then get the level 0 offset chunk offset */
+	struct yaffs_tnode *tn;
+	int the_chunk = -1;
+	struct yaffs_ext_tags local_tags;
+	int ret_val = -1;
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (!tags) {
+		/* Passed a NULL, so use our own tags space */
+		tags = &local_tags;
+	}
+
+	tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+	if (!tn)
+		return ret_val;
+
+	the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+	ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+					      inode_chunk);
+	return ret_val;
+}
+
+static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
+				     struct yaffs_ext_tags *tags)
+{
+	/* Get the Tnode, then get the level 0 offset chunk offset */
+	struct yaffs_tnode *tn;
+	int the_chunk = -1;
+	struct yaffs_ext_tags local_tags;
+	struct yaffs_dev *dev = in->my_dev;
+	int ret_val = -1;
+
+	if (!tags) {
+		/* Passed a NULL, so use our own tags space */
+		tags = &local_tags;
+	}
+
+	tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+	if (!tn)
+		return ret_val;
+
+	the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+	ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+					      inode_chunk);
+
+	/* Delete the entry in the filestructure (if found) */
+	if (ret_val != -1)
+		yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
+
+	return ret_val;
+}
+
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+			    int nand_chunk, int in_scan)
+{
+	/* NB in_scan is zero unless scanning.
+	 * For forward scanning, in_scan is > 0;
+	 * for backward scanning in_scan is < 0
+	 *
+	 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
+	 */
+
+	struct yaffs_tnode *tn;
+	struct yaffs_dev *dev = in->my_dev;
+	int existing_cunk;
+	struct yaffs_ext_tags existing_tags;
+	struct yaffs_ext_tags new_tags;
+	unsigned existing_serial, new_serial;
+
+	if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
+		/* Just ignore an attempt at putting a chunk into a non-file
+		 * during scanning.
+		 * If it is not during Scanning then something went wrong!
+		 */
+		if (!in_scan) {
+			yaffs_trace(YAFFS_TRACE_ERROR,
+				"yaffs tragedy:attempt to put data chunk into a non-file"
+				);
+			BUG();
+		}
+
+		yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+		return YAFFS_OK;
+	}
+
+	tn = yaffs_add_find_tnode_0(dev,
+				    &in->variant.file_variant,
+				    inode_chunk, NULL);
+	if (!tn)
+		return YAFFS_FAIL;
+
+	if (!nand_chunk)
+		/* Dummy insert, bail now */
+		return YAFFS_OK;
+
+	existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+	if (in_scan != 0) {
+		/* If we're scanning then we need to test for duplicates
+		 * NB This does not need to be efficient since it should only
+		 * happen when the power fails during a write, then only one
+		 * chunk should ever be affected.
+		 *
+		 * Correction for YAFFS2: This could happen quite a lot and we
+		 * need to think about efficiency! TODO
+		 * Update: For backward scanning we don't need to re-read tags
+		 * so this is quite cheap.
+		 */
+
+		if (existing_cunk > 0) {
+			/* NB Right now existing chunk will not be real
+			 * chunk_id if the chunk group size > 1
+			 * thus we have to do a FindChunkInFile to get the
+			 * real chunk id.
+			 *
+			 * We have a duplicate now we need to decide which
+			 * one to use:
+			 *
+			 * Backwards scanning YAFFS2: The old one is what
+			 * we use, dump the new one.
+			 * YAFFS1: Get both sets of tags and compare serial
+			 * numbers.
+			 */
+
+			if (in_scan > 0) {
+				/* Only do this for forward scanning */
+				yaffs_rd_chunk_tags_nand(dev,
+							 nand_chunk,
+							 NULL, &new_tags);
+
+				/* Do a proper find */
+				existing_cunk =
+				    yaffs_find_chunk_in_file(in, inode_chunk,
+							     &existing_tags);
+			}
+
+			if (existing_cunk <= 0) {
+				/*Hoosterman - how did this happen? */
+
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"yaffs tragedy: existing chunk < 0 in scan"
+					);
+
+			}
+
+			/* NB The deleted flags should be false, otherwise
+			 * the chunks will not be loaded during a scan
+			 */
+
+			if (in_scan > 0) {
+				new_serial = new_tags.serial_number;
+				existing_serial = existing_tags.serial_number;
+			}
+
+			if ((in_scan > 0) &&
+			    (existing_cunk <= 0 ||
+			     ((existing_serial + 1) & 3) == new_serial)) {
+				/* Forward scanning.
+				 * Use new
+				 * Delete the old one and drop through to
+				 * update the tnode
+				 */
+				yaffs_chunk_del(dev, existing_cunk, 1,
+						__LINE__);
+			} else {
+				/* Backward scanning or we want to use the
+				 * existing one
+				 * Delete the new one and return early so that
+				 * the tnode isn't changed
+				 */
+				yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+				return YAFFS_OK;
+			}
+		}
+
+	}
+
+	if (existing_cunk == 0)
+		in->n_data_chunks++;
+
+	yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+	return YAFFS_OK;
+}
+
+static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
+{
+	struct yaffs_block_info *the_block;
+	unsigned block_no;
+
+	yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
+
+	block_no = chunk / dev->param.chunks_per_block;
+	the_block = yaffs_get_block_info(dev, block_no);
+	if (the_block) {
+		the_block->soft_del_pages++;
+		dev->n_free_chunks++;
+		yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
+	}
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
+ * the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls
+ * the chunk out of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks
+ * are soft deleted.
+ */
+
+static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
+				 u32 level, int chunk_offset)
+{
+	int i;
+	int the_chunk;
+	int all_done = 1;
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (!tn)
+		return 1;
+
+	if (level > 0) {
+		for (i = YAFFS_NTNODES_INTERNAL - 1;
+			all_done && i >= 0;
+			i--) {
+			if (tn->internal[i]) {
+				all_done =
+				    yaffs_soft_del_worker(in,
+					tn->internal[i],
+					level - 1,
+					(chunk_offset <<
+					YAFFS_TNODES_INTERNAL_BITS)
+					+ i);
+				if (all_done) {
+					yaffs_free_tnode(dev,
+						tn->internal[i]);
+					tn->internal[i] = NULL;
+				} else {
+					/* Can this happen? */
+				}
+			}
+		}
+		return (all_done) ? 1 : 0;
+	}
+
+	/* level 0 */
+	 for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+		the_chunk = yaffs_get_group_base(dev, tn, i);
+		if (the_chunk) {
+			yaffs_soft_del_chunk(dev, the_chunk);
+			yaffs_load_tnode_0(dev, tn, i, 0);
+		}
+	}
+	return 1;
+}
+
+static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev = obj->my_dev;
+	struct yaffs_obj *parent;
+
+	yaffs_verify_obj_in_dir(obj);
+	parent = obj->parent;
+
+	yaffs_verify_dir(parent);
+
+	if (dev && dev->param.remove_obj_fn)
+		dev->param.remove_obj_fn(obj);
+
+	list_del_init(&obj->siblings);
+	obj->parent = NULL;
+
+	yaffs_verify_dir(parent);
+}
+
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
+{
+	if (!directory) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: Trying to add an object to a null pointer directory"
+			);
+		BUG();
+		return;
+	}
+	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: Trying to add an object to a non-directory"
+			);
+		BUG();
+	}
+
+	if (obj->siblings.prev == NULL) {
+		/* Not initialised */
+		BUG();
+	}
+
+	yaffs_verify_dir(directory);
+
+	yaffs_remove_obj_from_dir(obj);
+
+	/* Now add it */
+	list_add(&obj->siblings, &directory->variant.dir_variant.children);
+	obj->parent = directory;
+
+	if (directory == obj->my_dev->unlinked_dir
+	    || directory == obj->my_dev->del_dir) {
+		obj->unlinked = 1;
+		obj->my_dev->n_unlinked_files++;
+		obj->rename_allowed = 0;
+	}
+
+	yaffs_verify_dir(directory);
+	yaffs_verify_obj_in_dir(obj);
+}
+
+static int yaffs_change_obj_name(struct yaffs_obj *obj,
+				 struct yaffs_obj *new_dir,
+				 const YCHAR *new_name, int force, int shadows)
+{
+	int unlink_op;
+	int del_op;
+	struct yaffs_obj *existing_target;
+
+	if (new_dir == NULL)
+		new_dir = obj->parent;	/* use the old directory */
+
+	if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: yaffs_change_obj_name: new_dir is not a directory"
+			);
+		BUG();
+	}
+
+	unlink_op = (new_dir == obj->my_dev->unlinked_dir);
+	del_op = (new_dir == obj->my_dev->del_dir);
+
+	existing_target = yaffs_find_by_name(new_dir, new_name);
+
+	/* If the object is a file going into the unlinked directory,
+	 *   then it is OK to just stuff it in since duplicate names are OK.
+	 *   else only proceed if the new name does not exist and we're putting
+	 *   it into a directory.
+	 */
+	if (!(unlink_op || del_op || force ||
+	      shadows > 0 || !existing_target) ||
+	      new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+		return YAFFS_FAIL;
+
+	yaffs_set_obj_name(obj, new_name);
+	obj->dirty = 1;
+	yaffs_add_obj_to_dir(new_dir, obj);
+
+	if (unlink_op)
+		obj->unlinked = 1;
+
+	/* If it is a deletion then we mark it as a shrink for gc  */
+	if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
+		return YAFFS_OK;
+
+	return YAFFS_FAIL;
+}
+
+/*------------------------ Short Operations Cache ------------------------------
+ *   In many situations where there is no high level buffering  a lot of
+ *   reads might be short sequential reads, and a lot of writes may be short
+ *   sequential writes. eg. scanning/writing a jpeg file.
+ *   In these cases, a short read/write cache can provide a huge perfomance
+ *   benefit with dumb-as-a-rock code.
+ *   In Linux, the page cache provides read buffering and the short op cache
+ *   provides write buffering.
+ *
+ *   There are a small number (~10) of cache chunks per device so that we don't
+ *   need a very intelligent search.
+ */
+
+static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev = obj->my_dev;
+	int i;
+	struct yaffs_cache *cache;
+	int n_caches = obj->my_dev->param.n_caches;
+
+	for (i = 0; i < n_caches; i++) {
+		cache = &dev->cache[i];
+		if (cache->object == obj && cache->dirty)
+			return 1;
+	}
+
+	return 0;
+}
+
+static void yaffs_flush_file_cache(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev = obj->my_dev;
+	int lowest = -99;	/* Stop compiler whining. */
+	int i;
+	struct yaffs_cache *cache;
+	int chunk_written = 0;
+	int n_caches = obj->my_dev->param.n_caches;
+
+	if (n_caches < 1)
+		return;
+	do {
+		cache = NULL;
+
+		/* Find the lowest dirty chunk for this object */
+		for (i = 0; i < n_caches; i++) {
+			if (dev->cache[i].object == obj &&
+			    dev->cache[i].dirty) {
+				if (!cache ||
+				    dev->cache[i].chunk_id < lowest) {
+					cache = &dev->cache[i];
+					lowest = cache->chunk_id;
+				}
+			}
+		}
+
+		if (cache && !cache->locked) {
+			/* Write it out and free it up */
+			chunk_written =
+			    yaffs_wr_data_obj(cache->object,
+					      cache->chunk_id,
+					      cache->data,
+					      cache->n_bytes, 1);
+			cache->dirty = 0;
+			cache->object = NULL;
+		}
+	} while (cache && chunk_written > 0);
+
+	if (cache)
+		/* Hoosterman, disk full while writing cache out. */
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"yaffs tragedy: no space during cache write");
+}
+
+/*yaffs_flush_whole_cache(dev)
+ *
+ *
+ */
+
+void yaffs_flush_whole_cache(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	int n_caches = dev->param.n_caches;
+	int i;
+
+	/* Find a dirty object in the cache and flush it...
+	 * until there are no further dirty objects.
+	 */
+	do {
+		obj = NULL;
+		for (i = 0; i < n_caches && !obj; i++) {
+			if (dev->cache[i].object && dev->cache[i].dirty)
+				obj = dev->cache[i].object;
+		}
+		if (obj)
+			yaffs_flush_file_cache(obj);
+	} while (obj);
+
+}
+
+/* Grab us a cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
+{
+	int i;
+
+	if (dev->param.n_caches > 0) {
+		for (i = 0; i < dev->param.n_caches; i++) {
+			if (!dev->cache[i].object)
+				return &dev->cache[i];
+		}
+	}
+	return NULL;
+}
+
+static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
+{
+	struct yaffs_cache *cache;
+	struct yaffs_obj *the_obj;
+	int usage;
+	int i;
+	int pushout;
+
+	if (dev->param.n_caches < 1)
+		return NULL;
+
+	/* Try find a non-dirty one... */
+
+	cache = yaffs_grab_chunk_worker(dev);
+
+	if (!cache) {
+		/* They were all dirty, find the LRU object and flush
+		 * its cache, then  find again.
+		 * NB what's here is not very accurate,
+		 * we actually flush the object with the LRU chunk.
+		 */
+
+		/* With locking we can't assume we can use entry zero,
+		 * Set the_obj to a valid pointer for Coverity. */
+		the_obj = dev->cache[0].object;
+		usage = -1;
+		cache = NULL;
+		pushout = -1;
+
+		for (i = 0; i < dev->param.n_caches; i++) {
+			if (dev->cache[i].object &&
+			    !dev->cache[i].locked &&
+			    (dev->cache[i].last_use < usage ||
+			    !cache)) {
+				usage = dev->cache[i].last_use;
+				the_obj = dev->cache[i].object;
+				cache = &dev->cache[i];
+				pushout = i;
+			}
+		}
+
+		if (!cache || cache->dirty) {
+			/* Flush and try again */
+			yaffs_flush_file_cache(the_obj);
+			cache = yaffs_grab_chunk_worker(dev);
+		}
+	}
+	return cache;
+}
+
+/* Find a cached chunk */
+static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
+						  int chunk_id)
+{
+	struct yaffs_dev *dev = obj->my_dev;
+	int i;
+
+	if (dev->param.n_caches < 1)
+		return NULL;
+
+	for (i = 0; i < dev->param.n_caches; i++) {
+		if (dev->cache[i].object == obj &&
+		    dev->cache[i].chunk_id == chunk_id) {
+			dev->cache_hits++;
+
+			return &dev->cache[i];
+		}
+	}
+	return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
+			    int is_write)
+{
+	int i;
+
+	if (dev->param.n_caches < 1)
+		return;
+
+	if (dev->cache_last_use < 0 ||
+		dev->cache_last_use > 100000000) {
+		/* Reset the cache usages */
+		for (i = 1; i < dev->param.n_caches; i++)
+			dev->cache[i].last_use = 0;
+
+		dev->cache_last_use = 0;
+	}
+	dev->cache_last_use++;
+	cache->last_use = dev->cache_last_use;
+
+	if (is_write)
+		cache->dirty = 1;
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
+{
+	struct yaffs_cache *cache;
+
+	if (object->my_dev->param.n_caches > 0) {
+		cache = yaffs_find_chunk_cache(object, chunk_id);
+
+		if (cache)
+			cache->object = NULL;
+	}
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
+{
+	int i;
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (dev->param.n_caches > 0) {
+		/* Invalidate it. */
+		for (i = 0; i < dev->param.n_caches; i++) {
+			if (dev->cache[i].object == in)
+				dev->cache[i].object = NULL;
+		}
+	}
+}
+
+static void yaffs_unhash_obj(struct yaffs_obj *obj)
+{
+	int bucket;
+	struct yaffs_dev *dev = obj->my_dev;
+
+	/* If it is still linked into the bucket list, free from the list */
+	if (!list_empty(&obj->hash_link)) {
+		list_del_init(&obj->hash_link);
+		bucket = yaffs_hash_fn(obj->obj_id);
+		dev->obj_bucket[bucket].count--;
+	}
+}
+
+/*  FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_free_obj(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev;
+
+	if (!obj) {
+		BUG();
+		return;
+	}
+	dev = obj->my_dev;
+	yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
+		obj, obj->my_inode);
+	if (obj->parent)
+		BUG();
+	if (!list_empty(&obj->siblings))
+		BUG();
+
+	if (obj->my_inode) {
+		/* We're still hooked up to a cached inode.
+		 * Don't delete now, but mark for later deletion
+		 */
+		obj->defered_free = 1;
+		return;
+	}
+
+	yaffs_unhash_obj(obj);
+
+	yaffs_free_raw_obj(dev, obj);
+	dev->n_obj--;
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+}
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj)
+{
+	if (obj->defered_free)
+		yaffs_free_obj(obj);
+}
+
+static int yaffs_generic_obj_del(struct yaffs_obj *in)
+{
+	/* Iinvalidate the file's data in the cache, without flushing. */
+	yaffs_invalidate_whole_cache(in);
+
+	if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
+		/* Move to unlinked directory so we have a deletion record */
+		yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
+				      0);
+	}
+
+	yaffs_remove_obj_from_dir(in);
+	yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
+	in->hdr_chunk = 0;
+
+	yaffs_free_obj(in);
+	return YAFFS_OK;
+
+}
+
+static void yaffs_soft_del_file(struct yaffs_obj *obj)
+{
+	if (!obj->deleted ||
+	    obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
+	    obj->soft_del)
+		return;
+
+	if (obj->n_data_chunks <= 0) {
+		/* Empty file with no duplicate object headers,
+		 * just delete it immediately */
+		yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
+		obj->variant.file_variant.top = NULL;
+		yaffs_trace(YAFFS_TRACE_TRACING,
+			"yaffs: Deleting empty file %d",
+			obj->obj_id);
+		yaffs_generic_obj_del(obj);
+	} else {
+		yaffs_soft_del_worker(obj,
+				      obj->variant.file_variant.top,
+				      obj->variant.
+				      file_variant.top_level, 0);
+		obj->soft_del = 1;
+	}
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
+ */
+
+static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
+					      struct yaffs_tnode *tn, u32 level,
+					      int del0)
+{
+	int i;
+	int has_data;
+
+	if (!tn)
+		return tn;
+
+	has_data = 0;
+
+	if (level > 0) {
+		for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+			if (tn->internal[i]) {
+				tn->internal[i] =
+				    yaffs_prune_worker(dev,
+						tn->internal[i],
+						level - 1,
+						(i == 0) ? del0 : 1);
+			}
+
+			if (tn->internal[i])
+				has_data++;
+		}
+	} else {
+		int tnode_size_u32 = dev->tnode_size / sizeof(u32);
+		u32 *map = (u32 *) tn;
+
+		for (i = 0; !has_data && i < tnode_size_u32; i++) {
+			if (map[i])
+				has_data++;
+		}
+	}
+
+	if (has_data == 0 && del0) {
+		/* Free and return NULL */
+		yaffs_free_tnode(dev, tn);
+		tn = NULL;
+	}
+	return tn;
+}
+
+static int yaffs_prune_tree(struct yaffs_dev *dev,
+			    struct yaffs_file_var *file_struct)
+{
+	int i;
+	int has_data;
+	int done = 0;
+	struct yaffs_tnode *tn;
+
+	if (file_struct->top_level < 1)
+		return YAFFS_OK;
+
+	file_struct->top =
+	   yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
+
+	/* Now we have a tree with all the non-zero branches NULL but
+	 * the height is the same as it was.
+	 * Let's see if we can trim internal tnodes to shorten the tree.
+	 * We can do this if only the 0th element in the tnode is in use
+	 * (ie all the non-zero are NULL)
+	 */
+
+	while (file_struct->top_level && !done) {
+		tn = file_struct->top;
+
+		has_data = 0;
+		for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+			if (tn->internal[i])
+				has_data++;
+		}
+
+		if (!has_data) {
+			file_struct->top = tn->internal[0];
+			file_struct->top_level--;
+			yaffs_free_tnode(dev, tn);
+		} else {
+			done = 1;
+		}
+	}
+
+	return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* alloc_empty_obj gets us a clean Object.*/
+static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
+
+	if (!obj)
+		return obj;
+
+	dev->n_obj++;
+
+	/* Now sweeten it up... */
+
+	memset(obj, 0, sizeof(struct yaffs_obj));
+	obj->being_created = 1;
+
+	obj->my_dev = dev;
+	obj->hdr_chunk = 0;
+	obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
+	INIT_LIST_HEAD(&(obj->hard_links));
+	INIT_LIST_HEAD(&(obj->hash_link));
+	INIT_LIST_HEAD(&obj->siblings);
+
+	/* Now make the directory sane */
+	if (dev->root_dir) {
+		obj->parent = dev->root_dir;
+		list_add(&(obj->siblings),
+			 &dev->root_dir->variant.dir_variant.children);
+	}
+
+	/* Add it to the lost and found directory.
+	 * NB Can't put root or lost-n-found in lost-n-found so
+	 * check if lost-n-found exists first
+	 */
+	if (dev->lost_n_found)
+		yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+
+	obj->being_created = 0;
+
+	dev->checkpoint_blocks_required = 0;	/* force recalculation */
+
+	return obj;
+}
+
+static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
+{
+	int i;
+	int l = 999;
+	int lowest = 999999;
+
+	/* Search for the shortest list or one that
+	 * isn't too long.
+	 */
+
+	for (i = 0; i < 10 && lowest > 4; i++) {
+		dev->bucket_finder++;
+		dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
+		if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
+			lowest = dev->obj_bucket[dev->bucket_finder].count;
+			l = dev->bucket_finder;
+		}
+	}
+
+	return l;
+}
+
+static int yaffs_new_obj_id(struct yaffs_dev *dev)
+{
+	int bucket = yaffs_find_nice_bucket(dev);
+	int found = 0;
+	struct list_head *i;
+	u32 n = (u32) bucket;
+
+	/* Now find an object value that has not already been taken
+	 * by scanning the list.
+	 */
+
+	while (!found) {
+		found = 1;
+		n += YAFFS_NOBJECT_BUCKETS;
+		if (1 || dev->obj_bucket[bucket].count > 0) {
+			list_for_each(i, &dev->obj_bucket[bucket].list) {
+				/* If there is already one in the list */
+				if (i && list_entry(i, struct yaffs_obj,
+						    hash_link)->obj_id == n) {
+					found = 0;
+				}
+			}
+		}
+	}
+	return n;
+}
+
+static void yaffs_hash_obj(struct yaffs_obj *in)
+{
+	int bucket = yaffs_hash_fn(in->obj_id);
+	struct yaffs_dev *dev = in->my_dev;
+
+	list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
+	dev->obj_bucket[bucket].count++;
+}
+
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
+{
+	int bucket = yaffs_hash_fn(number);
+	struct list_head *i;
+	struct yaffs_obj *in;
+
+	list_for_each(i, &dev->obj_bucket[bucket].list) {
+		/* Look if it is in the list */
+		in = list_entry(i, struct yaffs_obj, hash_link);
+		if (in->obj_id == number) {
+			/* Don't show if it is defered free */
+			if (in->defered_free)
+				return NULL;
+			return in;
+		}
+	}
+
+	return NULL;
+}
+
+static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
+				enum yaffs_obj_type type)
+{
+	struct yaffs_obj *the_obj = NULL;
+	struct yaffs_tnode *tn = NULL;
+
+	if (number < 0)
+		number = yaffs_new_obj_id(dev);
+
+	if (type == YAFFS_OBJECT_TYPE_FILE) {
+		tn = yaffs_get_tnode(dev);
+		if (!tn)
+			return NULL;
+	}
+
+	the_obj = yaffs_alloc_empty_obj(dev);
+	if (!the_obj) {
+		if (tn)
+			yaffs_free_tnode(dev, tn);
+		return NULL;
+	}
+
+	the_obj->fake = 0;
+	the_obj->rename_allowed = 1;
+	the_obj->unlink_allowed = 1;
+	the_obj->obj_id = number;
+	yaffs_hash_obj(the_obj);
+	the_obj->variant_type = type;
+	yaffs_load_current_time(the_obj, 1, 1);
+
+	switch (type) {
+	case YAFFS_OBJECT_TYPE_FILE:
+		the_obj->variant.file_variant.file_size = 0;
+		the_obj->variant.file_variant.scanned_size = 0;
+		the_obj->variant.file_variant.shrink_size =
+						yaffs_max_file_size(dev);
+		the_obj->variant.file_variant.top_level = 0;
+		the_obj->variant.file_variant.top = tn;
+		break;
+	case YAFFS_OBJECT_TYPE_DIRECTORY:
+		INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
+		INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
+		break;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+	case YAFFS_OBJECT_TYPE_HARDLINK:
+	case YAFFS_OBJECT_TYPE_SPECIAL:
+		/* No action required */
+		break;
+	case YAFFS_OBJECT_TYPE_UNKNOWN:
+		/* todo this should not happen */
+		break;
+	}
+	return the_obj;
+}
+
+static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
+					       int number, u32 mode)
+{
+
+	struct yaffs_obj *obj =
+	    yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+
+	if (!obj)
+		return NULL;
+
+	obj->fake = 1;	/* it is fake so it might not use NAND */
+	obj->rename_allowed = 0;
+	obj->unlink_allowed = 0;
+	obj->deleted = 0;
+	obj->unlinked = 0;
+	obj->yst_mode = mode;
+	obj->my_dev = dev;
+	obj->hdr_chunk = 0;	/* Not a valid chunk. */
+	return obj;
+
+}
+
+
+static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
+{
+	int i;
+
+	dev->n_obj = 0;
+	dev->n_tnodes = 0;
+	yaffs_init_raw_tnodes_and_objs(dev);
+
+	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+		INIT_LIST_HEAD(&dev->obj_bucket[i].list);
+		dev->obj_bucket[i].count = 0;
+	}
+}
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+						 int number,
+						 enum yaffs_obj_type type)
+{
+	struct yaffs_obj *the_obj = NULL;
+
+	if (number > 0)
+		the_obj = yaffs_find_by_number(dev, number);
+
+	if (!the_obj)
+		the_obj = yaffs_new_obj(dev, number, type);
+
+	return the_obj;
+
+}
+
+YCHAR *yaffs_clone_str(const YCHAR *str)
+{
+	YCHAR *new_str = NULL;
+	int len;
+
+	if (!str)
+		str = _Y("");
+
+	len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
+	new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
+	if (new_str) {
+		strncpy(new_str, str, len);
+		new_str[len] = 0;
+	}
+	return new_str;
+
+}
+/*
+ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
+ * link (ie. name) is created or deleted in the directory.
+ *
+ * ie.
+ *   create dir/a : update dir's mtime/ctime
+ *   rm dir/a:   update dir's mtime/ctime
+ *   modify dir/a: don't update dir's mtimme/ctime
+ *
+ * This can be handled immediately or defered. Defering helps reduce the number
+ * of updates when many files in a directory are changed within a brief period.
+ *
+ * If the directory updating is defered then yaffs_update_dirty_dirs must be
+ * called periodically.
+ */
+
+static void yaffs_update_parent(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev;
+
+	if (!obj)
+		return;
+	dev = obj->my_dev;
+	obj->dirty = 1;
+	yaffs_load_current_time(obj, 0, 1);
+	if (dev->param.defered_dir_update) {
+		struct list_head *link = &obj->variant.dir_variant.dirty;
+
+		if (list_empty(link)) {
+			list_add(link, &dev->dirty_dirs);
+			yaffs_trace(YAFFS_TRACE_BACKGROUND,
+			  "Added object %d to dirty directories",
+			   obj->obj_id);
+		}
+
+	} else {
+		yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+	}
+}
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
+{
+	struct list_head *link;
+	struct yaffs_obj *obj;
+	struct yaffs_dir_var *d_s;
+	union yaffs_obj_var *o_v;
+
+	yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
+
+	while (!list_empty(&dev->dirty_dirs)) {
+		link = dev->dirty_dirs.next;
+		list_del_init(link);
+
+		d_s = list_entry(link, struct yaffs_dir_var, dirty);
+		o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
+		obj = list_entry(o_v, struct yaffs_obj, variant);
+
+		yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
+			obj->obj_id);
+
+		if (obj->dirty)
+			yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+	}
+}
+
+/*
+ * Mknod (create) a new object.
+ * equiv_obj only has meaning for a hard link;
+ * alias_str only has meaning for a symlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
+					  struct yaffs_obj *parent,
+					  const YCHAR *name,
+					  u32 mode,
+					  u32 uid,
+					  u32 gid,
+					  struct yaffs_obj *equiv_obj,
+					  const YCHAR *alias_str, u32 rdev)
+{
+	struct yaffs_obj *in;
+	YCHAR *str = NULL;
+	struct yaffs_dev *dev = parent->my_dev;
+
+	/* Check if the entry exists.
+	 * If it does then fail the call since we don't want a dup. */
+	if (yaffs_find_by_name(parent, name))
+		return NULL;
+
+	if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+		str = yaffs_clone_str(alias_str);
+		if (!str)
+			return NULL;
+	}
+
+	in = yaffs_new_obj(dev, -1, type);
+
+	if (!in) {
+		kfree(str);
+		return NULL;
+	}
+
+	in->hdr_chunk = 0;
+	in->valid = 1;
+	in->variant_type = type;
+
+	in->yst_mode = mode;
+
+	yaffs_attribs_init(in, gid, uid, rdev);
+
+	in->n_data_chunks = 0;
+
+	yaffs_set_obj_name(in, name);
+	in->dirty = 1;
+
+	yaffs_add_obj_to_dir(parent, in);
+
+	in->my_dev = parent->my_dev;
+
+	switch (type) {
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		in->variant.symlink_variant.alias = str;
+		break;
+	case YAFFS_OBJECT_TYPE_HARDLINK:
+		in->variant.hardlink_variant.equiv_obj = equiv_obj;
+		in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
+		list_add(&in->hard_links, &equiv_obj->hard_links);
+		break;
+	case YAFFS_OBJECT_TYPE_FILE:
+	case YAFFS_OBJECT_TYPE_DIRECTORY:
+	case YAFFS_OBJECT_TYPE_SPECIAL:
+	case YAFFS_OBJECT_TYPE_UNKNOWN:
+		/* do nothing */
+		break;
+	}
+
+	if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
+		/* Could not create the object header, fail */
+		yaffs_del_obj(in);
+		in = NULL;
+	}
+
+	if (in)
+		yaffs_update_parent(parent);
+
+	return in;
+}
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+				    const YCHAR *name, u32 mode, u32 uid,
+				    u32 gid)
+{
+	return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+				uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+				   u32 mode, u32 uid, u32 gid)
+{
+	return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+				mode, uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+				       const YCHAR *name, u32 mode, u32 uid,
+				       u32 gid, u32 rdev)
+{
+	return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+				uid, gid, NULL, NULL, rdev);
+}
+
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+				       const YCHAR *name, u32 mode, u32 uid,
+				       u32 gid, const YCHAR *alias)
+{
+	return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+				uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_link_obj returns the object id of the equivalent object.*/
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+				 struct yaffs_obj *equiv_obj)
+{
+	/* Get the real object in case we were fed a hard link obj */
+	equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+	if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
+			parent, name, 0, 0, 0,
+			equiv_obj, NULL, 0))
+		return equiv_obj;
+
+	return NULL;
+
+}
+
+
+
+/*---------------------- Block Management and Page Allocation -------------*/
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+	if (dev->block_info_alt && dev->block_info)
+		vfree(dev->block_info);
+	else
+		kfree(dev->block_info);
+
+	dev->block_info_alt = 0;
+
+	dev->block_info = NULL;
+
+	if (dev->chunk_bits_alt && dev->chunk_bits)
+		vfree(dev->chunk_bits);
+	else
+		kfree(dev->chunk_bits);
+	dev->chunk_bits_alt = 0;
+	dev->chunk_bits = NULL;
+}
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+	int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+	dev->block_info = NULL;
+	dev->chunk_bits = NULL;
+	dev->alloc_block = -1;	/* force it to get a new one */
+
+	/* If the first allocation strategy fails, thry the alternate one */
+	dev->block_info =
+		kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+	if (!dev->block_info) {
+		dev->block_info =
+		    vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+		dev->block_info_alt = 1;
+	} else {
+		dev->block_info_alt = 0;
+	}
+
+	if (!dev->block_info)
+		goto alloc_error;
+
+	/* Set up dynamic blockinfo stuff. Round up bytes. */
+	dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+	dev->chunk_bits =
+		kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+	if (!dev->chunk_bits) {
+		dev->chunk_bits =
+		    vmalloc(dev->chunk_bit_stride * n_blocks);
+		dev->chunk_bits_alt = 1;
+	} else {
+		dev->chunk_bits_alt = 0;
+	}
+	if (!dev->chunk_bits)
+		goto alloc_error;
+
+
+	memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
+	memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+	return YAFFS_OK;
+
+alloc_error:
+	yaffs_deinit_blocks(dev);
+	return YAFFS_FAIL;
+}
+
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+	int erased_ok = 0;
+	int i;
+
+	/* If the block is still healthy erase it and mark as clean.
+	 * If the block has had a data failure, then retire it.
+	 */
+
+	yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+		"yaffs_block_became_dirty block %d state %d %s",
+		block_no, bi->block_state,
+		(bi->needs_retiring) ? "needs retiring" : "");
+
+	yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+	bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+	/* If this is the block being garbage collected then stop gc'ing */
+	if (block_no == dev->gc_block)
+		dev->gc_block = 0;
+
+	/* If this block is currently the best candidate for gc
+	 * then drop as a candidate */
+	if (block_no == dev->gc_dirtiest) {
+		dev->gc_dirtiest = 0;
+		dev->gc_pages_in_use = 0;
+	}
+
+	if (!bi->needs_retiring) {
+		yaffs2_checkpt_invalidate(dev);
+		erased_ok = yaffs_erase_block(dev, block_no);
+		if (!erased_ok) {
+			dev->n_erase_failures++;
+			yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+			  "**>> Erasure failed %d", block_no);
+		}
+	}
+
+	/* Verify erasure if needed */
+	if (erased_ok &&
+	    ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
+	     !yaffs_skip_verification(dev))) {
+		for (i = 0; i < dev->param.chunks_per_block; i++) {
+			if (!yaffs_check_chunk_erased(dev,
+				block_no * dev->param.chunks_per_block + i)) {
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					">>Block %d erasure supposedly OK, but chunk %d not erased",
+					block_no, i);
+			}
+		}
+	}
+
+	if (!erased_ok) {
+		/* We lost a block of free space */
+		dev->n_free_chunks -= dev->param.chunks_per_block;
+		yaffs_retire_block(dev, block_no);
+		yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+			"**>> Block %d retired", block_no);
+		return;
+	}
+
+	/* Clean it up... */
+	bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+	bi->seq_number = 0;
+	dev->n_erased_blocks++;
+	bi->pages_in_use = 0;
+	bi->soft_del_pages = 0;
+	bi->has_shrink_hdr = 0;
+	bi->skip_erased_check = 1;	/* Clean, so no need to check */
+	bi->gc_prioritise = 0;
+	bi->has_summary = 0;
+
+	yaffs_clear_chunk_bits(dev, block_no);
+
+	yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
+}
+
+static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
+					struct yaffs_block_info *bi,
+					int old_chunk, u8 *buffer)
+{
+	int new_chunk;
+	int mark_flash = 1;
+	struct yaffs_ext_tags tags;
+	struct yaffs_obj *object;
+	int matching_chunk;
+	int ret_val = YAFFS_OK;
+
+	memset(&tags, 0, sizeof(tags));
+	yaffs_rd_chunk_tags_nand(dev, old_chunk,
+				 buffer, &tags);
+	object = yaffs_find_by_number(dev, tags.obj_id);
+
+	yaffs_trace(YAFFS_TRACE_GC_DETAIL,
+		"Collecting chunk in block %d, %d %d %d ",
+		dev->gc_chunk, tags.obj_id,
+		tags.chunk_id, tags.n_bytes);
+
+	if (object && !yaffs_skip_verification(dev)) {
+		if (tags.chunk_id == 0)
+			matching_chunk =
+			    object->hdr_chunk;
+		else if (object->soft_del)
+			/* Defeat the test */
+			matching_chunk = old_chunk;
+		else
+			matching_chunk =
+			    yaffs_find_chunk_in_file
+			    (object, tags.chunk_id,
+			     NULL);
+
+		if (old_chunk != matching_chunk)
+			yaffs_trace(YAFFS_TRACE_ERROR,
+				"gc: page in gc mismatch: %d %d %d %d",
+				old_chunk,
+				matching_chunk,
+				tags.obj_id,
+				tags.chunk_id);
+	}
+
+	if (!object) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"page %d in gc has no object: %d %d %d ",
+			old_chunk,
+			tags.obj_id, tags.chunk_id,
+			tags.n_bytes);
+	}
+
+	if (object &&
+	    object->deleted &&
+	    object->soft_del && tags.chunk_id != 0) {
+		/* Data chunk in a soft deleted file,
+		 * throw it away.
+		 * It's a soft deleted data chunk,
+		 * No need to copy this, just forget
+		 * about it and fix up the object.
+		 */
+
+		/* Free chunks already includes
+		 * softdeleted chunks, how ever this
+		 * chunk is going to soon be really
+		 * deleted which will increment free
+		 * chunks. We have to decrement free
+		 * chunks so this works out properly.
+		 */
+		dev->n_free_chunks--;
+		bi->soft_del_pages--;
+
+		object->n_data_chunks--;
+		if (object->n_data_chunks <= 0) {
+			/* remeber to clean up obj */
+			dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
+			dev->n_clean_ups++;
+		}
+		mark_flash = 0;
+	} else if (object) {
+		/* It's either a data chunk in a live
+		 * file or an ObjectHeader, so we're
+		 * interested in it.
+		 * NB Need to keep the ObjectHeaders of
+		 * deleted files until the whole file
+		 * has been deleted off
+		 */
+		tags.serial_number++;
+		dev->n_gc_copies++;
+
+		if (tags.chunk_id == 0) {
+			/* It is an object Id,
+			 * We need to nuke the
+			 * shrinkheader flags since its
+			 * work is done.
+			 * Also need to clean up
+			 * shadowing.
+			 */
+			struct yaffs_obj_hdr *oh;
+			oh = (struct yaffs_obj_hdr *) buffer;
+
+			oh->is_shrink = 0;
+			tags.extra_is_shrink = 0;
+			oh->shadows_obj = 0;
+			oh->inband_shadowed_obj_id = 0;
+			tags.extra_shadows = 0;
+
+			/* Update file size */
+			if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+				yaffs_oh_size_load(oh,
+				    object->variant.file_variant.file_size);
+				tags.extra_file_size =
+				    object->variant.file_variant.file_size;
+			}
+
+			yaffs_verify_oh(object, oh, &tags, 1);
+			new_chunk =
+			    yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
+		} else {
+			new_chunk =
+			    yaffs_write_new_chunk(dev, buffer, &tags, 1);
+		}
+
+		if (new_chunk < 0) {
+			ret_val = YAFFS_FAIL;
+		} else {
+
+			/* Now fix up the Tnodes etc. */
+
+			if (tags.chunk_id == 0) {
+				/* It's a header */
+				object->hdr_chunk = new_chunk;
+				object->serial = tags.serial_number;
+			} else {
+				/* It's a data chunk */
+				yaffs_put_chunk_in_file(object, tags.chunk_id,
+							new_chunk, 0);
+			}
+		}
+	}
+	if (ret_val == YAFFS_OK)
+		yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
+	return ret_val;
+}
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+	int old_chunk;
+	int ret_val = YAFFS_OK;
+	int i;
+	int is_checkpt_block;
+	int max_copies;
+	int chunks_before = yaffs_get_erased_chunks(dev);
+	int chunks_after;
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+	is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+	yaffs_trace(YAFFS_TRACE_TRACING,
+		"Collecting block %d, in use %d, shrink %d, whole_block %d",
+		block, bi->pages_in_use, bi->has_shrink_hdr,
+		whole_block);
+
+	/*yaffs_verify_free_chunks(dev); */
+
+	if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+		bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+	bi->has_shrink_hdr = 0;	/* clear the flag so that the block can erase */
+
+	dev->gc_disable = 1;
+
+	yaffs_summary_gc(dev, block);
+
+	if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+		yaffs_trace(YAFFS_TRACE_TRACING,
+			"Collecting block %d that has no chunks in use",
+			block);
+		yaffs_block_became_dirty(dev, block);
+	} else {
+
+		u8 *buffer = yaffs_get_temp_buffer(dev);
+
+		yaffs_verify_blk(dev, bi, block);
+
+		max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+		old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+		for (/* init already done */ ;
+		     ret_val == YAFFS_OK &&
+		     dev->gc_chunk < dev->param.chunks_per_block &&
+		     (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+		     max_copies > 0;
+		     dev->gc_chunk++, old_chunk++) {
+			if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+				/* Page is in use and might need to be copied */
+				max_copies--;
+				ret_val = yaffs_gc_process_chunk(dev, bi,
+							old_chunk, buffer);
+			}
+		}
+		yaffs_release_temp_buffer(dev, buffer);
+	}
+
+	yaffs_verify_collected_blk(dev, bi, block);
+
+	if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+		/*
+		 * The gc did not complete. Set block state back to FULL
+		 * because checkpointing does not restore gc.
+		 */
+		bi->block_state = YAFFS_BLOCK_STATE_FULL;
+	} else {
+		/* The gc completed. */
+		/* Do any required cleanups */
+		for (i = 0; i < dev->n_clean_ups; i++) {
+			/* Time to delete the file too */
+			struct yaffs_obj *object =
+			    yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
+			if (object) {
+				yaffs_free_tnode(dev,
+					  object->variant.file_variant.top);
+				object->variant.file_variant.top = NULL;
+				yaffs_trace(YAFFS_TRACE_GC,
+					"yaffs: About to finally delete object %d",
+					object->obj_id);
+				yaffs_generic_obj_del(object);
+				object->my_dev->n_deleted_files--;
+			}
+
+		}
+		chunks_after = yaffs_get_erased_chunks(dev);
+		if (chunks_before >= chunks_after)
+			yaffs_trace(YAFFS_TRACE_GC,
+				"gc did not increase free chunks before %d after %d",
+				chunks_before, chunks_after);
+		dev->gc_block = 0;
+		dev->gc_chunk = 0;
+		dev->n_clean_ups = 0;
+	}
+
+	dev->gc_disable = 0;
+
+	return ret_val;
+}
+
+/*
+ * find_gc_block() selects the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
+				    int aggressive, int background)
+{
+	int i;
+	int iterations;
+	unsigned selected = 0;
+	int prioritised = 0;
+	int prioritised_exist = 0;
+	struct yaffs_block_info *bi;
+	int threshold;
+
+	/* First let's see if we need to grab a prioritised block */
+	if (dev->has_pending_prioritised_gc && !aggressive) {
+		dev->gc_dirtiest = 0;
+		bi = dev->block_info;
+		for (i = dev->internal_start_block;
+		     i <= dev->internal_end_block && !selected; i++) {
+
+			if (bi->gc_prioritise) {
+				prioritised_exist = 1;
+				if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+				    yaffs_block_ok_for_gc(dev, bi)) {
+					selected = i;
+					prioritised = 1;
+				}
+			}
+			bi++;
+		}
+
+		/*
+		 * If there is a prioritised block and none was selected then
+		 * this happened because there is at least one old dirty block
+		 * gumming up the works. Let's gc the oldest dirty block.
+		 */
+
+		if (prioritised_exist &&
+		    !selected && dev->oldest_dirty_block > 0)
+			selected = dev->oldest_dirty_block;
+
+		if (!prioritised_exist)	/* None found, so we can clear this */
+			dev->has_pending_prioritised_gc = 0;
+	}
+
+	/* If we're doing aggressive GC then we are happy to take a less-dirty
+	 * block, and search harder.
+	 * else (leasurely gc), then we only bother to do this if the
+	 * block has only a few pages in use.
+	 */
+
+	if (!selected) {
+		int pages_used;
+		int n_blocks =
+		    dev->internal_end_block - dev->internal_start_block + 1;
+		if (aggressive) {
+			threshold = dev->param.chunks_per_block;
+			iterations = n_blocks;
+		} else {
+			int max_threshold;
+
+			if (background)
+				max_threshold = dev->param.chunks_per_block / 2;
+			else
+				max_threshold = dev->param.chunks_per_block / 8;
+
+			if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+				max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+			threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
+			if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+				threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+			if (threshold > max_threshold)
+				threshold = max_threshold;
+
+			iterations = n_blocks / 16 + 1;
+			if (iterations > 100)
+				iterations = 100;
+		}
+
+		for (i = 0;
+		     i < iterations &&
+		     (dev->gc_dirtiest < 1 ||
+		      dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
+		     i++) {
+			dev->gc_block_finder++;
+			if (dev->gc_block_finder < dev->internal_start_block ||
+			    dev->gc_block_finder > dev->internal_end_block)
+				dev->gc_block_finder =
+				    dev->internal_start_block;
+
+			bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+			pages_used = bi->pages_in_use - bi->soft_del_pages;
+
+			if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+			    pages_used < dev->param.chunks_per_block &&
+			    (dev->gc_dirtiest < 1 ||
+			     pages_used < dev->gc_pages_in_use) &&
+			    yaffs_block_ok_for_gc(dev, bi)) {
+				dev->gc_dirtiest = dev->gc_block_finder;
+				dev->gc_pages_in_use = pages_used;
+			}
+		}
+
+		if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+			selected = dev->gc_dirtiest;
+	}
+
+	/*
+	 * If nothing has been selected for a while, try the oldest dirty
+	 * because that's gumming up the works.
+	 */
+
+	if (!selected && dev->param.is_yaffs2 &&
+	    dev->gc_not_done >= (background ? 10 : 20)) {
+		yaffs2_find_oldest_dirty_seq(dev);
+		if (dev->oldest_dirty_block > 0) {
+			selected = dev->oldest_dirty_block;
+			dev->gc_dirtiest = selected;
+			dev->oldest_dirty_gc_count++;
+			bi = yaffs_get_block_info(dev, selected);
+			dev->gc_pages_in_use =
+			    bi->pages_in_use - bi->soft_del_pages;
+		} else {
+			dev->gc_not_done = 0;
+		}
+	}
+
+	if (selected) {
+		yaffs_trace(YAFFS_TRACE_GC,
+			"GC Selected block %d with %d free, prioritised:%d",
+			selected,
+			dev->param.chunks_per_block - dev->gc_pages_in_use,
+			prioritised);
+
+		dev->n_gc_blocks++;
+		if (background)
+			dev->bg_gcs++;
+
+		dev->gc_dirtiest = 0;
+		dev->gc_pages_in_use = 0;
+		dev->gc_not_done = 0;
+		if (dev->refresh_skip > 0)
+			dev->refresh_skip--;
+	} else {
+		dev->gc_not_done++;
+		yaffs_trace(YAFFS_TRACE_GC,
+			"GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+			dev->gc_block_finder, dev->gc_not_done, threshold,
+			dev->gc_dirtiest, dev->gc_pages_in_use,
+			dev->oldest_dirty_block, background ? " bg" : "");
+	}
+
+	return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and only accepts more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+	int aggressive = 0;
+	int gc_ok = YAFFS_OK;
+	int max_tries = 0;
+	int min_erased;
+	int erased_chunks;
+	int checkpt_block_adjust;
+
+	if (dev->param.gc_control_fn &&
+		(dev->param.gc_control_fn(dev) & 1) == 0)
+		return YAFFS_OK;
+
+	if (dev->gc_disable)
+		/* Bail out so we don't get recursive gc */
+		return YAFFS_OK;
+
+	/* This loop should pass the first time.
+	 * Only loops here if the collection does not increase space.
+	 */
+
+	do {
+		max_tries++;
+
+		checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+		min_erased =
+		    dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+		erased_chunks =
+		    dev->n_erased_blocks * dev->param.chunks_per_block;
+
+		/* If we need a block soon then do aggressive gc. */
+		if (dev->n_erased_blocks < min_erased)
+			aggressive = 1;
+		else {
+			if (!background
+			    && erased_chunks > (dev->n_free_chunks / 4))
+				break;
+
+			if (dev->gc_skip > 20)
+				dev->gc_skip = 20;
+			if (erased_chunks < dev->n_free_chunks / 2 ||
+			    dev->gc_skip < 1 || background)
+				aggressive = 0;
+			else {
+				dev->gc_skip--;
+				break;
+			}
+		}
+
+		dev->gc_skip = 5;
+
+		/* If we don't already have a block being gc'd then see if we
+		 * should start another */
+
+		if (dev->gc_block < 1 && !aggressive) {
+			dev->gc_block = yaffs2_find_refresh_block(dev);
+			dev->gc_chunk = 0;
+			dev->n_clean_ups = 0;
+		}
+		if (dev->gc_block < 1) {
+			dev->gc_block =
+			    yaffs_find_gc_block(dev, aggressive, background);
+			dev->gc_chunk = 0;
+			dev->n_clean_ups = 0;
+		}
+
+		if (dev->gc_block > 0) {
+			dev->all_gcs++;
+			if (!aggressive)
+				dev->passive_gc_count++;
+
+			yaffs_trace(YAFFS_TRACE_GC,
+				"yaffs: GC n_erased_blocks %d aggressive %d",
+				dev->n_erased_blocks, aggressive);
+
+			gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+		}
+
+		if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
+		    dev->gc_block > 0) {
+			yaffs_trace(YAFFS_TRACE_GC,
+				"yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+				dev->n_erased_blocks, max_tries,
+				dev->gc_block);
+		}
+	} while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+		 (dev->gc_block > 0) && (max_tries < 2));
+
+	return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+	int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+	yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+	yaffs_check_gc(dev, 1);
+	return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+	int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+	if (nand_chunk >= 0)
+		return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+						buffer, NULL);
+	else {
+		yaffs_trace(YAFFS_TRACE_NANDACCESS,
+			"Chunk %d not found zero instead",
+			nand_chunk);
+		/* get sane (zero) data if you read a hole */
+		memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+		return 0;
+	}
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+		     int lyn)
+{
+	int block;
+	int page;
+	struct yaffs_ext_tags tags;
+	struct yaffs_block_info *bi;
+
+	if (chunk_id <= 0)
+		return;
+
+	dev->n_deletions++;
+	block = chunk_id / dev->param.chunks_per_block;
+	page = chunk_id % dev->param.chunks_per_block;
+
+	if (!yaffs_check_chunk_bit(dev, block, page))
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Deleting invalid chunk %d", chunk_id);
+
+	bi = yaffs_get_block_info(dev, block);
+
+	yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+	yaffs_trace(YAFFS_TRACE_DELETION,
+		"line %d delete of chunk %d",
+		lyn, chunk_id);
+
+	if (!dev->param.is_yaffs2 && mark_flash &&
+	    bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+		memset(&tags, 0, sizeof(tags));
+		tags.is_deleted = 1;
+		yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+		yaffs_handle_chunk_update(dev, chunk_id, &tags);
+	} else {
+		dev->n_unmarked_deletions++;
+	}
+
+	/* Pull out of the management area.
+	 * If the whole block became dirty, this will kick off an erasure.
+	 */
+	if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+	    bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+	    bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+	    bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+		dev->n_free_chunks++;
+		yaffs_clear_chunk_bit(dev, block, page);
+		bi->pages_in_use--;
+
+		if (bi->pages_in_use == 0 &&
+		    !bi->has_shrink_hdr &&
+		    bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+		    bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+			yaffs_block_became_dirty(dev, block);
+		}
+	}
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+			     const u8 *buffer, int n_bytes, int use_reserve)
+{
+	/* Find old chunk Need to do this to get serial number
+	 * Write new one and patch into tree.
+	 * Invalidate old tags.
+	 */
+
+	int prev_chunk_id;
+	struct yaffs_ext_tags prev_tags;
+	int new_chunk_id;
+	struct yaffs_ext_tags new_tags;
+	struct yaffs_dev *dev = in->my_dev;
+
+	yaffs_check_gc(dev, 0);
+
+	/* Get the previous chunk at this location in the file if it exists.
+	 * If it does not exist then put a zero into the tree. This creates
+	 * the tnode now, rather than later when it is harder to clean up.
+	 */
+	prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+	if (prev_chunk_id < 1 &&
+	    !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+		return 0;
+
+	/* Set up new tags */
+	memset(&new_tags, 0, sizeof(new_tags));
+
+	new_tags.chunk_id = inode_chunk;
+	new_tags.obj_id = in->obj_id;
+	new_tags.serial_number =
+	    (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+	new_tags.n_bytes = n_bytes;
+
+	if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+		  "Writing %d bytes to chunk!!!!!!!!!",
+		   n_bytes);
+		BUG();
+	}
+
+	new_chunk_id =
+	    yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+	if (new_chunk_id > 0) {
+		yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+		if (prev_chunk_id > 0)
+			yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+		yaffs_verify_file_sane(in);
+	}
+	return new_chunk_id;
+
+}
+
+
+
+static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
+				const YCHAR *name, const void *value, int size,
+				int flags)
+{
+	struct yaffs_xattr_mod xmod;
+	int result;
+
+	xmod.set = set;
+	xmod.name = name;
+	xmod.data = value;
+	xmod.size = size;
+	xmod.flags = flags;
+	xmod.result = -ENOSPC;
+
+	result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
+
+	if (result > 0)
+		return xmod.result;
+	else
+		return -ENOSPC;
+}
+
+static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
+				   struct yaffs_xattr_mod *xmod)
+{
+	int retval = 0;
+	int x_offs = sizeof(struct yaffs_obj_hdr);
+	struct yaffs_dev *dev = obj->my_dev;
+	int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+	char *x_buffer = buffer + x_offs;
+
+	if (xmod->set)
+		retval =
+		    nval_set(x_buffer, x_size, xmod->name, xmod->data,
+			     xmod->size, xmod->flags);
+	else
+		retval = nval_del(x_buffer, x_size, xmod->name);
+
+	obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+	obj->xattr_known = 1;
+	xmod->result = retval;
+
+	return retval;
+}
+
+static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
+				  void *value, int size)
+{
+	char *buffer = NULL;
+	int result;
+	struct yaffs_ext_tags tags;
+	struct yaffs_dev *dev = obj->my_dev;
+	int x_offs = sizeof(struct yaffs_obj_hdr);
+	int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+	char *x_buffer;
+	int retval = 0;
+
+	if (obj->hdr_chunk < 1)
+		return -ENODATA;
+
+	/* If we know that the object has no xattribs then don't do all the
+	 * reading and parsing.
+	 */
+	if (obj->xattr_known && !obj->has_xattr) {
+		if (name)
+			return -ENODATA;
+		else
+			return 0;
+	}
+
+	buffer = (char *)yaffs_get_temp_buffer(dev);
+	if (!buffer)
+		return -ENOMEM;
+
+	result =
+	    yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
+
+	if (result != YAFFS_OK)
+		retval = -ENOENT;
+	else {
+		x_buffer = buffer + x_offs;
+
+		if (!obj->xattr_known) {
+			obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+			obj->xattr_known = 1;
+		}
+
+		if (name)
+			retval = nval_get(x_buffer, x_size, name, value, size);
+		else
+			retval = nval_list(x_buffer, x_size, value, size);
+	}
+	yaffs_release_temp_buffer(dev, (u8 *) buffer);
+	return retval;
+}
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+		      const void *value, int size, int flags)
+{
+	return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
+}
+
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
+{
+	return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
+}
+
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+		      int size)
+{
+	return yaffs_do_xattrib_fetch(obj, name, value, size);
+}
+
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
+{
+	return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
+}
+
+static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
+{
+	u8 *buf;
+	struct yaffs_obj_hdr *oh;
+	struct yaffs_dev *dev;
+	struct yaffs_ext_tags tags;
+	int result;
+	int alloc_failed = 0;
+
+	if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
+		return;
+
+	dev = in->my_dev;
+	in->lazy_loaded = 0;
+	buf = yaffs_get_temp_buffer(dev);
+
+	result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
+	oh = (struct yaffs_obj_hdr *)buf;
+
+	in->yst_mode = oh->yst_mode;
+	yaffs_load_attribs(in, oh);
+	yaffs_set_obj_name_from_oh(in, oh);
+
+	if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+		in->variant.symlink_variant.alias =
+		    yaffs_clone_str(oh->alias);
+		if (!in->variant.symlink_variant.alias)
+			alloc_failed = 1;	/* Not returned */
+	}
+	yaffs_release_temp_buffer(dev, buf);
+}
+
+static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
+				    const YCHAR *oh_name, int buff_size)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+	if (dev->param.auto_unicode) {
+		if (*oh_name) {
+			/* It is an ASCII name, do an ASCII to
+			 * unicode conversion */
+			const char *ascii_oh_name = (const char *)oh_name;
+			int n = buff_size - 1;
+			while (n > 0 && *ascii_oh_name) {
+				*name = *ascii_oh_name;
+				name++;
+				ascii_oh_name++;
+				n--;
+			}
+		} else {
+			strncpy(name, oh_name + 1, buff_size - 1);
+		}
+	} else {
+#else
+	(void) dev;
+	{
+#endif
+		strncpy(name, oh_name, buff_size - 1);
+	}
+}
+
+static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
+				    const YCHAR *name)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+
+	int is_ascii;
+	YCHAR *w;
+
+	if (dev->param.auto_unicode) {
+
+		is_ascii = 1;
+		w = name;
+
+		/* Figure out if the name will fit in ascii character set */
+		while (is_ascii && *w) {
+			if ((*w) & 0xff00)
+				is_ascii = 0;
+			w++;
+		}
+
+		if (is_ascii) {
+			/* It is an ASCII name, so convert unicode to ascii */
+			char *ascii_oh_name = (char *)oh_name;
+			int n = YAFFS_MAX_NAME_LENGTH - 1;
+			while (n > 0 && *name) {
+				*ascii_oh_name = *name;
+				name++;
+				ascii_oh_name++;
+				n--;
+			}
+		} else {
+			/* Unicode name, so save starting at the second YCHAR */
+			*oh_name = 0;
+			strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
+		}
+	} else {
+#else
+	dev = dev;
+	{
+#endif
+		strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
+	}
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
+		    int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
+{
+
+	struct yaffs_block_info *bi;
+	struct yaffs_dev *dev = in->my_dev;
+	int prev_chunk_id;
+	int ret_val = 0;
+	int result = 0;
+	int new_chunk_id;
+	struct yaffs_ext_tags new_tags;
+	struct yaffs_ext_tags old_tags;
+	const YCHAR *alias = NULL;
+	u8 *buffer = NULL;
+	YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+	struct yaffs_obj_hdr *oh = NULL;
+	loff_t file_size = 0;
+
+	strcpy(old_name, _Y("silly old name"));
+
+	if (in->fake && in != dev->root_dir && !force && !xmod)
+		return ret_val;
+
+	yaffs_check_gc(dev, 0);
+	yaffs_check_obj_details_loaded(in);
+
+	buffer = yaffs_get_temp_buffer(in->my_dev);
+	oh = (struct yaffs_obj_hdr *)buffer;
+
+	prev_chunk_id = in->hdr_chunk;
+
+	if (prev_chunk_id > 0) {
+		result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+						  buffer, &old_tags);
+
+		yaffs_verify_oh(in, oh, &old_tags, 0);
+		memcpy(old_name, oh->name, sizeof(oh->name));
+		memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
+	} else {
+		memset(buffer, 0xff, dev->data_bytes_per_chunk);
+	}
+
+	oh->type = in->variant_type;
+	oh->yst_mode = in->yst_mode;
+	oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+	yaffs_load_attribs_oh(oh, in);
+
+	if (in->parent)
+		oh->parent_obj_id = in->parent->obj_id;
+	else
+		oh->parent_obj_id = 0;
+
+	if (name && *name) {
+		memset(oh->name, 0, sizeof(oh->name));
+		yaffs_load_oh_from_name(dev, oh->name, name);
+	} else if (prev_chunk_id > 0) {
+		memcpy(oh->name, old_name, sizeof(oh->name));
+	} else {
+		memset(oh->name, 0, sizeof(oh->name));
+	}
+
+	oh->is_shrink = is_shrink;
+
+	switch (in->variant_type) {
+	case YAFFS_OBJECT_TYPE_UNKNOWN:
+		/* Should not happen */
+		break;
+	case YAFFS_OBJECT_TYPE_FILE:
+		if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
+		    oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
+			file_size = in->variant.file_variant.file_size;
+		yaffs_oh_size_load(oh, file_size);
+		break;
+	case YAFFS_OBJECT_TYPE_HARDLINK:
+		oh->equiv_id = in->variant.hardlink_variant.equiv_id;
+		break;
+	case YAFFS_OBJECT_TYPE_SPECIAL:
+		/* Do nothing */
+		break;
+	case YAFFS_OBJECT_TYPE_DIRECTORY:
+		/* Do nothing */
+		break;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		alias = in->variant.symlink_variant.alias;
+		if (!alias)
+			alias = _Y("no alias");
+		strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
+		oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+		break;
+	}
+
+	/* process any xattrib modifications */
+	if (xmod)
+		yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+	/* Tags */
+	memset(&new_tags, 0, sizeof(new_tags));
+	in->serial++;
+	new_tags.chunk_id = 0;
+	new_tags.obj_id = in->obj_id;
+	new_tags.serial_number = in->serial;
+
+	/* Add extra info for file header */
+	new_tags.extra_available = 1;
+	new_tags.extra_parent_id = oh->parent_obj_id;
+	new_tags.extra_file_size = file_size;
+	new_tags.extra_is_shrink = oh->is_shrink;
+	new_tags.extra_equiv_id = oh->equiv_id;
+	new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
+	new_tags.extra_obj_type = in->variant_type;
+	yaffs_verify_oh(in, oh, &new_tags, 1);
+
+	/* Create new chunk in NAND */
+	new_chunk_id =
+	    yaffs_write_new_chunk(dev, buffer, &new_tags,
+				  (prev_chunk_id > 0) ? 1 : 0);
+
+	if (buffer)
+		yaffs_release_temp_buffer(dev, buffer);
+
+	if (new_chunk_id < 0)
+		return new_chunk_id;
+
+	in->hdr_chunk = new_chunk_id;
+
+	if (prev_chunk_id > 0)
+		yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+	if (!yaffs_obj_cache_dirty(in))
+		in->dirty = 0;
+
+	/* If this was a shrink, then mark the block
+	 * that the chunk lives on */
+	if (is_shrink) {
+		bi = yaffs_get_block_info(in->my_dev,
+					  new_chunk_id /
+					  in->my_dev->param.chunks_per_block);
+		bi->has_shrink_hdr = 1;
+	}
+
+
+	return new_chunk_id;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+{
+	int chunk;
+	u32 start;
+	int n_copy;
+	int n = n_bytes;
+	int n_done = 0;
+	struct yaffs_cache *cache;
+	struct yaffs_dev *dev;
+
+	dev = in->my_dev;
+
+	while (n > 0) {
+		yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+		chunk++;
+
+		/* OK now check for the curveball where the start and end are in
+		 * the same chunk.
+		 */
+		if ((start + n) < dev->data_bytes_per_chunk)
+			n_copy = n;
+		else
+			n_copy = dev->data_bytes_per_chunk - start;
+
+		cache = yaffs_find_chunk_cache(in, chunk);
+
+		/* If the chunk is already in the cache or it is less than
+		 * a whole chunk or we're using inband tags then use the cache
+		 * (if there is caching) else bypass the cache.
+		 */
+		if (cache || n_copy != dev->data_bytes_per_chunk ||
+		    dev->param.inband_tags) {
+			if (dev->param.n_caches > 0) {
+
+				/* If we can't find the data in the cache,
+				 * then load it up. */
+
+				if (!cache) {
+					cache =
+					    yaffs_grab_chunk_cache(in->my_dev);
+					cache->object = in;
+					cache->chunk_id = chunk;
+					cache->dirty = 0;
+					cache->locked = 0;
+					yaffs_rd_data_obj(in, chunk,
+							  cache->data);
+					cache->n_bytes = 0;
+				}
+
+				yaffs_use_cache(dev, cache, 0);
+
+				cache->locked = 1;
+
+				memcpy(buffer, &cache->data[start], n_copy);
+
+				cache->locked = 0;
+			} else {
+				/* Read into the local buffer then copy.. */
+
+				u8 *local_buffer =
+				    yaffs_get_temp_buffer(dev);
+				yaffs_rd_data_obj(in, chunk, local_buffer);
+
+				memcpy(buffer, &local_buffer[start], n_copy);
+
+				yaffs_release_temp_buffer(dev, local_buffer);
+			}
+		} else {
+			/* A full chunk. Read directly into the buffer. */
+			yaffs_rd_data_obj(in, chunk, buffer);
+		}
+		n -= n_copy;
+		offset += n_copy;
+		buffer += n_copy;
+		n_done += n_copy;
+	}
+	return n_done;
+}
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+		     int n_bytes, int write_through)
+{
+
+	int chunk;
+	u32 start;
+	int n_copy;
+	int n = n_bytes;
+	int n_done = 0;
+	int n_writeback;
+	loff_t start_write = offset;
+	int chunk_written = 0;
+	u32 n_bytes_read;
+	loff_t chunk_start;
+	struct yaffs_dev *dev;
+
+	dev = in->my_dev;
+
+	while (n > 0 && chunk_written >= 0) {
+		yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+		if (((loff_t)chunk) *
+		    dev->data_bytes_per_chunk + start != offset ||
+		    start >= dev->data_bytes_per_chunk) {
+			yaffs_trace(YAFFS_TRACE_ERROR,
+				"AddrToChunk of offset %lld gives chunk %d start %d",
+				offset, chunk, start);
+		}
+		chunk++;	/* File pos to chunk in file offset */
+
+		/* OK now check for the curveball where the start and end are in
+		 * the same chunk.
+		 */
+
+		if ((start + n) < dev->data_bytes_per_chunk) {
+			n_copy = n;
+
+			/* Now calculate how many bytes to write back....
+			 * If we're overwriting and not writing to then end of
+			 * file then we need to write back as much as was there
+			 * before.
+			 */
+
+			chunk_start = (((loff_t)(chunk - 1)) *
+					dev->data_bytes_per_chunk);
+
+			if (chunk_start > in->variant.file_variant.file_size)
+				n_bytes_read = 0;	/* Past end of file */
+			else
+				n_bytes_read =
+				    in->variant.file_variant.file_size -
+				    chunk_start;
+
+			if (n_bytes_read > dev->data_bytes_per_chunk)
+				n_bytes_read = dev->data_bytes_per_chunk;
+
+			n_writeback =
+			    (n_bytes_read >
+			     (start + n)) ? n_bytes_read : (start + n);
+
+			if (n_writeback < 0 ||
+			    n_writeback > dev->data_bytes_per_chunk)
+				BUG();
+
+		} else {
+			n_copy = dev->data_bytes_per_chunk - start;
+			n_writeback = dev->data_bytes_per_chunk;
+		}
+
+		if (n_copy != dev->data_bytes_per_chunk ||
+		    !dev->param.cache_bypass_aligned ||
+		    dev->param.inband_tags) {
+			/* An incomplete start or end chunk (or maybe both
+			 * start and end chunk), or we're using inband tags,
+			 * or we're forcing writes through the cache,
+			 * so we want to use the cache buffers.
+			 */
+			if (dev->param.n_caches > 0) {
+				struct yaffs_cache *cache;
+
+				/* If we can't find the data in the cache, then
+				 * load the cache */
+				cache = yaffs_find_chunk_cache(in, chunk);
+
+				if (!cache &&
+				    yaffs_check_alloc_available(dev, 1)) {
+					cache = yaffs_grab_chunk_cache(dev);
+					cache->object = in;
+					cache->chunk_id = chunk;
+					cache->dirty = 0;
+					cache->locked = 0;
+					yaffs_rd_data_obj(in, chunk,
+							  cache->data);
+				} else if (cache &&
+					   !cache->dirty &&
+					   !yaffs_check_alloc_available(dev,
+									1)) {
+					/* Drop the cache if it was a read cache
+					 * item and no space check has been made
+					 * for it.
+					 */
+					cache = NULL;
+				}
+
+				if (cache) {
+					yaffs_use_cache(dev, cache, 1);
+					cache->locked = 1;
+
+					memcpy(&cache->data[start], buffer,
+					       n_copy);
+
+					cache->locked = 0;
+					cache->n_bytes = n_writeback;
+
+					if (write_through) {
+						chunk_written =
+						    yaffs_wr_data_obj
+						    (cache->object,
+						     cache->chunk_id,
+						     cache->data,
+						     cache->n_bytes, 1);
+						cache->dirty = 0;
+					}
+				} else {
+					chunk_written = -1;	/* fail write */
+				}
+			} else {
+				/* An incomplete start or end chunk (or maybe
+				 * both start and end chunk). Read into the
+				 * local buffer then copy over and write back.
+				 */
+
+				u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+				yaffs_rd_data_obj(in, chunk, local_buffer);
+				memcpy(&local_buffer[start], buffer, n_copy);
+
+				chunk_written =
+				    yaffs_wr_data_obj(in, chunk,
+						      local_buffer,
+						      n_writeback, 0);
+
+				yaffs_release_temp_buffer(dev, local_buffer);
+			}
+		} else {
+			/* A full chunk. Write directly from the buffer. */
+
+			chunk_written =
+			    yaffs_wr_data_obj(in, chunk, buffer,
+					      dev->data_bytes_per_chunk, 0);
+
+			/* Since we've overwritten the cached data,
+			 * we better invalidate it. */
+			yaffs_invalidate_chunk_cache(in, chunk);
+		}
+
+		if (chunk_written >= 0) {
+			n -= n_copy;
+			offset += n_copy;
+			buffer += n_copy;
+			n_done += n_copy;
+		}
+	}
+
+	/* Update file object */
+
+	if ((start_write + n_done) > in->variant.file_variant.file_size)
+		in->variant.file_variant.file_size = (start_write + n_done);
+
+	in->dirty = 1;
+	return n_done;
+}
+
+int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+		  int n_bytes, int write_through)
+{
+	yaffs2_handle_hole(in, offset);
+	return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
+}
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
+{
+
+	struct yaffs_dev *dev = in->my_dev;
+	loff_t old_size = in->variant.file_variant.file_size;
+	int i;
+	int chunk_id;
+	u32 dummy;
+	int last_del;
+	int start_del;
+
+	if (old_size > 0)
+		yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
+	else
+		last_del = 0;
+
+	yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
+				&start_del, &dummy);
+	last_del++;
+	start_del++;
+
+	/* Delete backwards so that we don't end up with holes if
+	 * power is lost part-way through the operation.
+	 */
+	for (i = last_del; i >= start_del; i--) {
+		/* NB this could be optimised somewhat,
+		 * eg. could retrieve the tags and write them without
+		 * using yaffs_chunk_del
+		 */
+
+		chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
+
+		if (chunk_id < 1)
+			continue;
+
+		if (chunk_id <
+		    (dev->internal_start_block * dev->param.chunks_per_block) ||
+		    chunk_id >=
+		    ((dev->internal_end_block + 1) *
+		      dev->param.chunks_per_block)) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"Found daft chunk_id %d for %d",
+				chunk_id, i);
+		} else {
+			in->n_data_chunks--;
+			yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
+		}
+	}
+}
+
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
+{
+	int new_full;
+	u32 new_partial;
+	struct yaffs_dev *dev = obj->my_dev;
+
+	yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
+
+	yaffs_prune_chunks(obj, new_size);
+
+	if (new_partial != 0) {
+		int last_chunk = 1 + new_full;
+		u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+		/* Rewrite the last chunk with its new size and zero pad */
+		yaffs_rd_data_obj(obj, last_chunk, local_buffer);
+		memset(local_buffer + new_partial, 0,
+		       dev->data_bytes_per_chunk - new_partial);
+
+		yaffs_wr_data_obj(obj, last_chunk, local_buffer,
+				  new_partial, 1);
+
+		yaffs_release_temp_buffer(dev, local_buffer);
+	}
+
+	obj->variant.file_variant.file_size = new_size;
+
+	yaffs_prune_tree(dev, &obj->variant.file_variant);
+}
+
+int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
+{
+	struct yaffs_dev *dev = in->my_dev;
+	loff_t old_size = in->variant.file_variant.file_size;
+
+	yaffs_flush_file_cache(in);
+	yaffs_invalidate_whole_cache(in);
+
+	yaffs_check_gc(dev, 0);
+
+	if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+		return YAFFS_FAIL;
+
+	if (new_size == old_size)
+		return YAFFS_OK;
+
+	if (new_size > old_size) {
+		yaffs2_handle_hole(in, new_size);
+		in->variant.file_variant.file_size = new_size;
+	} else {
+		/* new_size < old_size */
+		yaffs_resize_file_down(in, new_size);
+	}
+
+	/* Write a new object header to reflect the resize.
+	 * show we've shrunk the file, if need be
+	 * Do this only if the file is not in the deleted directories
+	 * and is not shadowed.
+	 */
+	if (in->parent &&
+	    !in->is_shadowed &&
+	    in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+	    in->parent->obj_id != YAFFS_OBJECTID_DELETED)
+		yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+	return YAFFS_OK;
+}
+
+int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
+{
+	if (!in->dirty)
+		return YAFFS_OK;
+
+	yaffs_flush_file_cache(in);
+
+	if (data_sync)
+		return YAFFS_OK;
+
+	if (update_time)
+		yaffs_load_current_time(in, 0, 0);
+
+	return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
+				YAFFS_OK : YAFFS_FAIL;
+}
+
+
+/* yaffs_del_file deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
+{
+	int ret_val;
+	int del_now = 0;
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (!in->my_inode)
+		del_now = 1;
+
+	if (del_now) {
+		ret_val =
+		    yaffs_change_obj_name(in, in->my_dev->del_dir,
+					  _Y("deleted"), 0, 0);
+		yaffs_trace(YAFFS_TRACE_TRACING,
+			"yaffs: immediate deletion of file %d",
+			in->obj_id);
+		in->deleted = 1;
+		in->my_dev->n_deleted_files++;
+		if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+			yaffs_resize_file(in, 0);
+		yaffs_soft_del_file(in);
+	} else {
+		ret_val =
+		    yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
+					  _Y("unlinked"), 0, 0);
+	}
+	return ret_val;
+}
+
+static int yaffs_del_file(struct yaffs_obj *in)
+{
+	int ret_val = YAFFS_OK;
+	int deleted;	/* Need to cache value on stack if in is freed */
+	struct yaffs_dev *dev = in->my_dev;
+
+	if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+		yaffs_resize_file(in, 0);
+
+	if (in->n_data_chunks > 0) {
+		/* Use soft deletion if there is data in the file.
+		 * That won't be the case if it has been resized to zero.
+		 */
+		if (!in->unlinked)
+			ret_val = yaffs_unlink_file_if_needed(in);
+
+		deleted = in->deleted;
+
+		if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
+			in->deleted = 1;
+			deleted = 1;
+			in->my_dev->n_deleted_files++;
+			yaffs_soft_del_file(in);
+		}
+		return deleted ? YAFFS_OK : YAFFS_FAIL;
+	} else {
+		/* The file has no data chunks so we toss it immediately */
+		yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
+		in->variant.file_variant.top = NULL;
+		yaffs_generic_obj_del(in);
+
+		return YAFFS_OK;
+	}
+}
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
+{
+	return (obj &&
+		obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
+		!(list_empty(&obj->variant.dir_variant.children));
+}
+
+static int yaffs_del_dir(struct yaffs_obj *obj)
+{
+	/* First check that the directory is empty. */
+	if (yaffs_is_non_empty_dir(obj))
+		return YAFFS_FAIL;
+
+	return yaffs_generic_obj_del(obj);
+}
+
+static int yaffs_del_symlink(struct yaffs_obj *in)
+{
+	kfree(in->variant.symlink_variant.alias);
+	in->variant.symlink_variant.alias = NULL;
+
+	return yaffs_generic_obj_del(in);
+}
+
+static int yaffs_del_link(struct yaffs_obj *in)
+{
+	/* remove this hardlink from the list associated with the equivalent
+	 * object
+	 */
+	list_del_init(&in->hard_links);
+	return yaffs_generic_obj_del(in);
+}
+
+int yaffs_del_obj(struct yaffs_obj *obj)
+{
+	int ret_val = -1;
+
+	switch (obj->variant_type) {
+	case YAFFS_OBJECT_TYPE_FILE:
+		ret_val = yaffs_del_file(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_DIRECTORY:
+		if (!list_empty(&obj->variant.dir_variant.dirty)) {
+			yaffs_trace(YAFFS_TRACE_BACKGROUND,
+				"Remove object %d from dirty directories",
+				obj->obj_id);
+			list_del_init(&obj->variant.dir_variant.dirty);
+		}
+		return yaffs_del_dir(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		ret_val = yaffs_del_symlink(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_HARDLINK:
+		ret_val = yaffs_del_link(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_SPECIAL:
+		ret_val = yaffs_generic_obj_del(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_UNKNOWN:
+		ret_val = 0;
+		break;		/* should not happen. */
+	}
+	return ret_val;
+}
+
+static int yaffs_unlink_worker(struct yaffs_obj *obj)
+{
+	int del_now = 0;
+
+	if (!obj)
+		return YAFFS_FAIL;
+
+	if (!obj->my_inode)
+		del_now = 1;
+
+	yaffs_update_parent(obj->parent);
+
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+		return yaffs_del_link(obj);
+	} else if (!list_empty(&obj->hard_links)) {
+		/* Curve ball: We're unlinking an object that has a hardlink.
+		 *
+		 * This problem arises because we are not strictly following
+		 * The Linux link/inode model.
+		 *
+		 * We can't really delete the object.
+		 * Instead, we do the following:
+		 * - Select a hardlink.
+		 * - Unhook it from the hard links
+		 * - Move it from its parent directory so that the rename works.
+		 * - Rename the object to the hardlink's name.
+		 * - Delete the hardlink
+		 */
+
+		struct yaffs_obj *hl;
+		struct yaffs_obj *parent;
+		int ret_val;
+		YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+		hl = list_entry(obj->hard_links.next, struct yaffs_obj,
+				hard_links);
+
+		yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+		parent = hl->parent;
+
+		list_del_init(&hl->hard_links);
+
+		yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+		ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
+
+		if (ret_val == YAFFS_OK)
+			ret_val = yaffs_generic_obj_del(hl);
+
+		return ret_val;
+
+	} else if (del_now) {
+		switch (obj->variant_type) {
+		case YAFFS_OBJECT_TYPE_FILE:
+			return yaffs_del_file(obj);
+			break;
+		case YAFFS_OBJECT_TYPE_DIRECTORY:
+			list_del_init(&obj->variant.dir_variant.dirty);
+			return yaffs_del_dir(obj);
+			break;
+		case YAFFS_OBJECT_TYPE_SYMLINK:
+			return yaffs_del_symlink(obj);
+			break;
+		case YAFFS_OBJECT_TYPE_SPECIAL:
+			return yaffs_generic_obj_del(obj);
+			break;
+		case YAFFS_OBJECT_TYPE_HARDLINK:
+		case YAFFS_OBJECT_TYPE_UNKNOWN:
+		default:
+			return YAFFS_FAIL;
+		}
+	} else if (yaffs_is_non_empty_dir(obj)) {
+		return YAFFS_FAIL;
+	} else {
+		return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
+						_Y("unlinked"), 0, 0);
+	}
+}
+
+static int yaffs_unlink_obj(struct yaffs_obj *obj)
+{
+	if (obj && obj->unlink_allowed)
+		return yaffs_unlink_worker(obj);
+
+	return YAFFS_FAIL;
+}
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
+{
+	struct yaffs_obj *obj;
+
+	obj = yaffs_find_by_name(dir, name);
+	return yaffs_unlink_obj(obj);
+}
+
+/* Note:
+ * If old_name is NULL then we take old_dir as the object to be renamed.
+ */
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
+		     struct yaffs_obj *new_dir, const YCHAR *new_name)
+{
+	struct yaffs_obj *obj = NULL;
+	struct yaffs_obj *existing_target = NULL;
+	int force = 0;
+	int result;
+	struct yaffs_dev *dev;
+
+	if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		BUG();
+		return YAFFS_FAIL;
+	}
+	if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		BUG();
+		return YAFFS_FAIL;
+	}
+
+	dev = old_dir->my_dev;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+	/* Special case for case insemsitive systems.
+	 * While look-up is case insensitive, the name isn't.
+	 * Therefore we might want to change x.txt to X.txt
+	 */
+	if (old_dir == new_dir &&
+		old_name && new_name &&
+		strcmp(old_name, new_name) == 0)
+		force = 1;
+#endif
+
+	if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
+	    YAFFS_MAX_NAME_LENGTH)
+		/* ENAMETOOLONG */
+		return YAFFS_FAIL;
+
+	if (old_name)
+		obj = yaffs_find_by_name(old_dir, old_name);
+	else{
+		obj = old_dir;
+		old_dir = obj->parent;
+	}
+
+	if (obj && obj->rename_allowed) {
+		/* Now handle an existing target, if there is one */
+		existing_target = yaffs_find_by_name(new_dir, new_name);
+		if (yaffs_is_non_empty_dir(existing_target)) {
+			return YAFFS_FAIL;	/* ENOTEMPTY */
+		} else if (existing_target && existing_target != obj) {
+			/* Nuke the target first, using shadowing,
+			 * but only if it isn't the same object.
+			 *
+			 * Note we must disable gc here otherwise it can mess
+			 * up the shadowing.
+			 *
+			 */
+			dev->gc_disable = 1;
+			yaffs_change_obj_name(obj, new_dir, new_name, force,
+					      existing_target->obj_id);
+			existing_target->is_shadowed = 1;
+			yaffs_unlink_obj(existing_target);
+			dev->gc_disable = 0;
+		}
+
+		result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
+
+		yaffs_update_parent(old_dir);
+		if (new_dir != old_dir)
+			yaffs_update_parent(new_dir);
+
+		return result;
+	}
+	return YAFFS_FAIL;
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+			       int backward_scanning)
+{
+	struct yaffs_obj *obj;
+
+	if (backward_scanning) {
+		/* Handle YAFFS2 case (backward scanning)
+		 * If the shadowed object exists then ignore.
+		 */
+		obj = yaffs_find_by_number(dev, obj_id);
+		if (obj)
+			return;
+	}
+
+	/* Let's create it (if it does not exist) assuming it is a file so that
+	 * it can do shrinking etc.
+	 * We put it in unlinked dir to be cleaned up after the scanning
+	 */
+	obj =
+	    yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
+	if (!obj)
+		return;
+	obj->is_shadowed = 1;
+	yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
+	obj->variant.file_variant.shrink_size = 0;
+	obj->valid = 1;		/* So that we don't read any other info. */
+}
+
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
+{
+	struct list_head *lh;
+	struct list_head *save;
+	struct yaffs_obj *hl;
+	struct yaffs_obj *in;
+
+	list_for_each_safe(lh, save, hard_list) {
+		hl = list_entry(lh, struct yaffs_obj, hard_links);
+		in = yaffs_find_by_number(dev,
+					hl->variant.hardlink_variant.equiv_id);
+
+		if (in) {
+			/* Add the hardlink pointers */
+			hl->variant.hardlink_variant.equiv_obj = in;
+			list_add(&hl->hard_links, &in->hard_links);
+		} else {
+			/* Todo Need to report/handle this better.
+			 * Got a problem... hardlink to a non-existant object
+			 */
+			hl->variant.hardlink_variant.equiv_obj = NULL;
+			INIT_LIST_HEAD(&hl->hard_links);
+		}
+	}
+}
+
+static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
+{
+	/*
+	 *  Sort out state of unlinked and deleted objects after scanning.
+	 */
+	struct list_head *i;
+	struct list_head *n;
+	struct yaffs_obj *l;
+
+	if (dev->read_only)
+		return;
+
+	/* Soft delete all the unlinked files */
+	list_for_each_safe(i, n,
+			   &dev->unlinked_dir->variant.dir_variant.children) {
+		l = list_entry(i, struct yaffs_obj, siblings);
+		yaffs_del_obj(l);
+	}
+
+	list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
+		l = list_entry(i, struct yaffs_obj, siblings);
+		yaffs_del_obj(l);
+	}
+}
+
+/*
+ * This code iterates through all the objects making sure that they are rooted.
+ * Any unrooted objects are re-rooted in lost+found.
+ * An object needs to be in one of:
+ * - Directly under deleted, unlinked
+ * - Directly or indirectly under root.
+ *
+ * Note:
+ *  This code assumes that we don't ever change the current relationships
+ *  between directories:
+ *   root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
+ *   lost-n-found->parent == root_dir
+ *
+ * This fixes the problem where directories might have inadvertently been
+ * deleted leaving the object "hanging" without being rooted in the
+ * directory tree.
+ */
+
+static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+	return (obj == dev->del_dir ||
+		obj == dev->unlinked_dir || obj == dev->root_dir);
+}
+
+static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_obj *parent;
+	int i;
+	struct list_head *lh;
+	struct list_head *n;
+	int depth_limit;
+	int hanging;
+
+	if (dev->read_only)
+		return;
+
+	/* Iterate through the objects in each hash entry,
+	 * looking at each object.
+	 * Make sure it is rooted.
+	 */
+
+	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+		list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
+			obj = list_entry(lh, struct yaffs_obj, hash_link);
+			parent = obj->parent;
+
+			if (yaffs_has_null_parent(dev, obj)) {
+				/* These directories are not hanging */
+				hanging = 0;
+			} else if (!parent ||
+				   parent->variant_type !=
+				   YAFFS_OBJECT_TYPE_DIRECTORY) {
+				hanging = 1;
+			} else if (yaffs_has_null_parent(dev, parent)) {
+				hanging = 0;
+			} else {
+				/*
+				 * Need to follow the parent chain to
+				 * see if it is hanging.
+				 */
+				hanging = 0;
+				depth_limit = 100;
+
+				while (parent != dev->root_dir &&
+				       parent->parent &&
+				       parent->parent->variant_type ==
+				       YAFFS_OBJECT_TYPE_DIRECTORY &&
+				       depth_limit > 0) {
+					parent = parent->parent;
+					depth_limit--;
+				}
+				if (parent != dev->root_dir)
+					hanging = 1;
+			}
+			if (hanging) {
+				yaffs_trace(YAFFS_TRACE_SCAN,
+					"Hanging object %d moved to lost and found",
+					obj->obj_id);
+				yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+			}
+		}
+	}
+}
+
+/*
+ * Delete directory contents for cleaning up lost and found.
+ */
+static void yaffs_del_dir_contents(struct yaffs_obj *dir)
+{
+	struct yaffs_obj *obj;
+	struct list_head *lh;
+	struct list_head *n;
+
+	if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+		BUG();
+
+	list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
+		obj = list_entry(lh, struct yaffs_obj, siblings);
+		if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+			yaffs_del_dir_contents(obj);
+		yaffs_trace(YAFFS_TRACE_SCAN,
+			"Deleting lost_found object %d",
+			obj->obj_id);
+		yaffs_unlink_obj(obj);
+	}
+}
+
+static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
+{
+	yaffs_del_dir_contents(dev->lost_n_found);
+}
+
+
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
+				     const YCHAR *name)
+{
+	int sum;
+	struct list_head *i;
+	YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+	struct yaffs_obj *l;
+
+	if (!name)
+		return NULL;
+
+	if (!directory) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: yaffs_find_by_name: null pointer directory"
+			);
+		BUG();
+		return NULL;
+	}
+	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"tragedy: yaffs_find_by_name: non-directory"
+			);
+		BUG();
+	}
+
+	sum = yaffs_calc_name_sum(name);
+
+	list_for_each(i, &directory->variant.dir_variant.children) {
+		l = list_entry(i, struct yaffs_obj, siblings);
+
+		if (l->parent != directory)
+			BUG();
+
+		yaffs_check_obj_details_loaded(l);
+
+		/* Special case for lost-n-found */
+		if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+			if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
+				return l;
+		} else if (l->sum == sum || l->hdr_chunk <= 0) {
+			/* LostnFound chunk called Objxxx
+			 * Do a real check
+			 */
+			yaffs_get_obj_name(l, buffer,
+				YAFFS_MAX_NAME_LENGTH + 1);
+			if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
+				return l;
+		}
+	}
+	return NULL;
+}
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
+{
+	if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+		obj = obj->variant.hardlink_variant.equiv_obj;
+		yaffs_check_obj_details_loaded(obj);
+	}
+	return obj;
+}
+
+/*
+ *  A note or two on object names.
+ *  * If the object name is missing, we then make one up in the form objnnn
+ *
+ *  * ASCII names are stored in the object header's name field from byte zero
+ *  * Unicode names are historically stored starting from byte zero.
+ *
+ * Then there are automatic Unicode names...
+ * The purpose of these is to save names in a way that can be read as
+ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
+ * system to share files.
+ *
+ * These automatic unicode are stored slightly differently...
+ *  - If the name can fit in the ASCII character space then they are saved as
+ *    ascii names as per above.
+ *  - If the name needs Unicode then the name is saved in Unicode
+ *    starting at oh->name[1].
+
+ */
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
+				int buffer_size)
+{
+	/* Create an object name if we could not find one. */
+	if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
+		YCHAR local_name[20];
+		YCHAR num_string[20];
+		YCHAR *x = &num_string[19];
+		unsigned v = obj->obj_id;
+		num_string[19] = 0;
+		while (v > 0) {
+			x--;
+			*x = '0' + (v % 10);
+			v /= 10;
+		}
+		/* make up a name */
+		strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
+		strcat(local_name, x);
+		strncpy(name, local_name, buffer_size - 1);
+	}
+}
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
+{
+	memset(name, 0, buffer_size * sizeof(YCHAR));
+	yaffs_check_obj_details_loaded(obj);
+	if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+		strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
+	} else if (obj->short_name[0]) {
+		strcpy(name, obj->short_name);
+	} else if (obj->hdr_chunk > 0) {
+		int result;
+		u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
+
+		struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
+
+		memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+		if (obj->hdr_chunk > 0) {
+			result = yaffs_rd_chunk_tags_nand(obj->my_dev,
+							  obj->hdr_chunk,
+							  buffer, NULL);
+		}
+		yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
+					buffer_size);
+
+		yaffs_release_temp_buffer(obj->my_dev, buffer);
+	}
+
+	yaffs_fix_null_name(obj, name, buffer_size);
+
+	return strnlen(name, YAFFS_MAX_NAME_LENGTH);
+}
+
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
+{
+	/* Dereference any hard linking */
+	obj = yaffs_get_equivalent_obj(obj);
+
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+		return obj->variant.file_variant.file_size;
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+		if (!obj->variant.symlink_variant.alias)
+			return 0;
+		return strnlen(obj->variant.symlink_variant.alias,
+				     YAFFS_MAX_ALIAS_LENGTH);
+	} else {
+		/* Only a directory should drop through to here */
+		return obj->my_dev->data_bytes_per_chunk;
+	}
+}
+
+int yaffs_get_obj_link_count(struct yaffs_obj *obj)
+{
+	int count = 0;
+	struct list_head *i;
+
+	if (!obj->unlinked)
+		count++;	/* the object itself */
+
+	list_for_each(i, &obj->hard_links)
+	    count++;		/* add the hard links; */
+
+	return count;
+}
+
+int yaffs_get_obj_inode(struct yaffs_obj *obj)
+{
+	obj = yaffs_get_equivalent_obj(obj);
+
+	return obj->obj_id;
+}
+
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
+{
+	obj = yaffs_get_equivalent_obj(obj);
+
+	switch (obj->variant_type) {
+	case YAFFS_OBJECT_TYPE_FILE:
+		return DT_REG;
+		break;
+	case YAFFS_OBJECT_TYPE_DIRECTORY:
+		return DT_DIR;
+		break;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		return DT_LNK;
+		break;
+	case YAFFS_OBJECT_TYPE_HARDLINK:
+		return DT_REG;
+		break;
+	case YAFFS_OBJECT_TYPE_SPECIAL:
+		if (S_ISFIFO(obj->yst_mode))
+			return DT_FIFO;
+		if (S_ISCHR(obj->yst_mode))
+			return DT_CHR;
+		if (S_ISBLK(obj->yst_mode))
+			return DT_BLK;
+		if (S_ISSOCK(obj->yst_mode))
+			return DT_SOCK;
+		return DT_REG;
+		break;
+	default:
+		return DT_REG;
+		break;
+	}
+}
+
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
+{
+	obj = yaffs_get_equivalent_obj(obj);
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+		return yaffs_clone_str(obj->variant.symlink_variant.alias);
+	else
+		return yaffs_clone_str(_Y(""));
+}
+
+/*--------------------------- Initialisation code -------------------------- */
+
+static int yaffs_check_dev_fns(struct yaffs_dev *dev)
+{
+	struct yaffs_driver *drv = &dev->drv;
+	struct yaffs_tags_handler *tagger = &dev->tagger;
+
+	/* Common functions, gotta have */
+	if (!drv->drv_read_chunk_fn ||
+	    !drv->drv_write_chunk_fn ||
+	    !drv->drv_erase_fn)
+		return 0;
+
+	if (dev->param.is_yaffs2 &&
+	     (!drv->drv_mark_bad_fn  || !drv->drv_check_bad_fn))
+		return 0;
+
+	/* Install the default tags marshalling functions if needed. */
+	yaffs_tags_compat_install(dev);
+	yaffs_tags_marshall_install(dev);
+
+	/* Check we now have the marshalling functions required. */
+	if (!tagger->write_chunk_tags_fn ||
+	    !tagger->read_chunk_tags_fn ||
+	    !tagger->query_block_fn ||
+	    !tagger->mark_bad_fn)
+		return 0;
+
+	return 1;
+}
+
+static int yaffs_create_initial_dir(struct yaffs_dev *dev)
+{
+	/* Initialise the unlinked, deleted, root and lost+found directories */
+	dev->lost_n_found = dev->root_dir = NULL;
+	dev->unlinked_dir = dev->del_dir = NULL;
+	dev->unlinked_dir =
+	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+	dev->del_dir =
+	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+	dev->root_dir =
+	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+				  YAFFS_ROOT_MODE | S_IFDIR);
+	dev->lost_n_found =
+	    yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+				  YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+	if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
+	    && dev->del_dir) {
+		yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+		return YAFFS_OK;
+	}
+	return YAFFS_FAIL;
+}
+
+/* Low level init.
+ * Typically only used by yaffs_guts_initialise, but also used by the
+ * Low level yaffs driver tests.
+ */
+
+int yaffs_guts_ll_init(struct yaffs_dev *dev)
+{
+
+
+	yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_ll_init()");
+
+	if (!dev) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"yaffs: Need a device"
+			);
+		return YAFFS_FAIL;
+	}
+
+	if (dev->ll_init)
+		return YAFFS_OK;
+
+	dev->internal_start_block = dev->param.start_block;
+	dev->internal_end_block = dev->param.end_block;
+	dev->block_offset = 0;
+	dev->chunk_offset = 0;
+	dev->n_free_chunks = 0;
+
+	dev->gc_block = 0;
+
+	if (dev->param.start_block == 0) {
+		dev->internal_start_block = dev->param.start_block + 1;
+		dev->internal_end_block = dev->param.end_block + 1;
+		dev->block_offset = 1;
+		dev->chunk_offset = dev->param.chunks_per_block;
+	}
+
+	/* Check geometry parameters. */
+
+	if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
+		dev->param.total_bytes_per_chunk < 1024) ||
+		(!dev->param.is_yaffs2 &&
+			dev->param.total_bytes_per_chunk < 512) ||
+		(dev->param.inband_tags && !dev->param.is_yaffs2) ||
+		 dev->param.chunks_per_block < 2 ||
+		 dev->param.n_reserved_blocks < 2 ||
+		dev->internal_start_block <= 0 ||
+		dev->internal_end_block <= 0 ||
+		dev->internal_end_block <=
+		(dev->internal_start_block + dev->param.n_reserved_blocks + 2)
+		) {
+		/* otherwise it is too small */
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
+			dev->param.total_bytes_per_chunk,
+			dev->param.is_yaffs2 ? "2" : "",
+			dev->param.inband_tags);
+		return YAFFS_FAIL;
+	}
+
+	/* Sort out space for inband tags, if required */
+	if (dev->param.inband_tags)
+		dev->data_bytes_per_chunk =
+		    dev->param.total_bytes_per_chunk -
+		    sizeof(struct yaffs_packed_tags2_tags_only);
+	else
+		dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+	/* Got the right mix of functions? */
+	if (!yaffs_check_dev_fns(dev)) {
+		/* Function missing */
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"device function(s) missing or wrong");
+
+		return YAFFS_FAIL;
+	}
+
+	if (yaffs_init_nand(dev) != YAFFS_OK) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
+		return YAFFS_FAIL;
+	}
+
+	return YAFFS_OK;
+}
+
+
+int yaffs_format_dev(struct yaffs_dev *dev)
+{
+	int i;
+	enum yaffs_block_state state;
+	u32 dummy;
+
+	if(yaffs_guts_ll_init(dev) != YAFFS_OK)
+		return YAFFS_FAIL;
+
+	if(dev->is_mounted)
+		return YAFFS_FAIL;
+
+	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		yaffs_query_init_block_state(dev, i, &state, &dummy);
+		if (state != YAFFS_BLOCK_STATE_DEAD)
+			yaffs_erase_block(dev, i);
+	}
+
+	return YAFFS_OK;
+}
+
+
+int yaffs_guts_initialise(struct yaffs_dev *dev)
+{
+	int init_failed = 0;
+	unsigned x;
+	int bits;
+
+	if(yaffs_guts_ll_init(dev) != YAFFS_OK)
+		return YAFFS_FAIL;
+
+	if (dev->is_mounted) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
+		return YAFFS_FAIL;
+	}
+
+	dev->is_mounted = 1;
+
+	/* OK now calculate a few things for the device */
+
+	/*
+	 *  Calculate all the chunk size manipulation numbers:
+	 */
+	x = dev->data_bytes_per_chunk;
+	/* We always use dev->chunk_shift and dev->chunk_div */
+	dev->chunk_shift = calc_shifts(x);
+	x >>= dev->chunk_shift;
+	dev->chunk_div = x;
+	/* We only use chunk mask if chunk_div is 1 */
+	dev->chunk_mask = (1 << dev->chunk_shift) - 1;
+
+	/*
+	 * Calculate chunk_grp_bits.
+	 * We need to find the next power of 2 > than internal_end_block
+	 */
+
+	x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+	bits = calc_shifts_ceiling(x);
+
+	/* Set up tnode width if wide tnodes are enabled. */
+	if (!dev->param.wide_tnodes_disabled) {
+		/* bits must be even so that we end up with 32-bit words */
+		if (bits & 1)
+			bits++;
+		if (bits < 16)
+			dev->tnode_width = 16;
+		else
+			dev->tnode_width = bits;
+	} else {
+		dev->tnode_width = 16;
+	}
+
+	dev->tnode_mask = (1 << dev->tnode_width) - 1;
+
+	/* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+	 * so if the bitwidth of the
+	 * chunk range we're using is greater than 16 we need
+	 * to figure out chunk shift and chunk_grp_size
+	 */
+
+	if (bits <= dev->tnode_width)
+		dev->chunk_grp_bits = 0;
+	else
+		dev->chunk_grp_bits = bits - dev->tnode_width;
+
+	dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
+	if (dev->tnode_size < sizeof(struct yaffs_tnode))
+		dev->tnode_size = sizeof(struct yaffs_tnode);
+
+	dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+	if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+		/* We have a problem because the soft delete won't work if
+		 * the chunk group size > chunks per block.
+		 * This can be remedied by using larger "virtual blocks".
+		 */
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
+
+		return YAFFS_FAIL;
+	}
+
+	/* Finished verifying the device, continue with initialisation */
+
+	/* More device initialisation */
+	dev->all_gcs = 0;
+	dev->passive_gc_count = 0;
+	dev->oldest_dirty_gc_count = 0;
+	dev->bg_gcs = 0;
+	dev->gc_block_finder = 0;
+	dev->buffered_block = -1;
+	dev->doing_buffered_block_rewrite = 0;
+	dev->n_deleted_files = 0;
+	dev->n_bg_deletions = 0;
+	dev->n_unlinked_files = 0;
+	dev->n_ecc_fixed = 0;
+	dev->n_ecc_unfixed = 0;
+	dev->n_tags_ecc_fixed = 0;
+	dev->n_tags_ecc_unfixed = 0;
+	dev->n_erase_failures = 0;
+	dev->n_erased_blocks = 0;
+	dev->gc_disable = 0;
+	dev->has_pending_prioritised_gc = 1;
+		/* Assume the worst for now, will get fixed on first GC */
+	INIT_LIST_HEAD(&dev->dirty_dirs);
+	dev->oldest_dirty_seq = 0;
+	dev->oldest_dirty_block = 0;
+
+	/* Initialise temporary buffers and caches. */
+	if (!yaffs_init_tmp_buffers(dev))
+		init_failed = 1;
+
+	dev->cache = NULL;
+	dev->gc_cleanup_list = NULL;
+
+	if (!init_failed && dev->param.n_caches > 0) {
+		int i;
+		void *buf;
+		int cache_bytes =
+		    dev->param.n_caches * sizeof(struct yaffs_cache);
+
+		if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
+			dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+		dev->cache = kmalloc(cache_bytes, GFP_NOFS);
+
+		buf = (u8 *) dev->cache;
+
+		if (dev->cache)
+			memset(dev->cache, 0, cache_bytes);
+
+		for (i = 0; i < dev->param.n_caches && buf; i++) {
+			dev->cache[i].object = NULL;
+			dev->cache[i].last_use = 0;
+			dev->cache[i].dirty = 0;
+			dev->cache[i].data = buf =
+			    kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+		}
+		if (!buf)
+			init_failed = 1;
+
+		dev->cache_last_use = 0;
+	}
+
+	dev->cache_hits = 0;
+
+	if (!init_failed) {
+		dev->gc_cleanup_list =
+		    kmalloc(dev->param.chunks_per_block * sizeof(u32),
+					GFP_NOFS);
+		if (!dev->gc_cleanup_list)
+			init_failed = 1;
+	}
+
+	if (dev->param.is_yaffs2)
+		dev->param.use_header_file_size = 1;
+
+	if (!init_failed && !yaffs_init_blocks(dev))
+		init_failed = 1;
+
+	yaffs_init_tnodes_and_objs(dev);
+
+	if (!init_failed && !yaffs_create_initial_dir(dev))
+		init_failed = 1;
+
+	if (!init_failed && dev->param.is_yaffs2 &&
+		!dev->param.disable_summary &&
+		!yaffs_summary_init(dev))
+		init_failed = 1;
+
+	if (!init_failed) {
+		/* Now scan the flash. */
+		if (dev->param.is_yaffs2) {
+			if (yaffs2_checkpt_restore(dev)) {
+				yaffs_check_obj_details_loaded(dev->root_dir);
+				yaffs_trace(YAFFS_TRACE_CHECKPOINT |
+					YAFFS_TRACE_MOUNT,
+					"yaffs: restored from checkpoint"
+					);
+			} else {
+
+				/* Clean up the mess caused by an aborted
+				 * checkpoint load then scan backwards.
+				 */
+				yaffs_deinit_blocks(dev);
+
+				yaffs_deinit_tnodes_and_objs(dev);
+
+				dev->n_erased_blocks = 0;
+				dev->n_free_chunks = 0;
+				dev->alloc_block = -1;
+				dev->alloc_page = -1;
+				dev->n_deleted_files = 0;
+				dev->n_unlinked_files = 0;
+				dev->n_bg_deletions = 0;
+
+				if (!init_failed && !yaffs_init_blocks(dev))
+					init_failed = 1;
+
+				yaffs_init_tnodes_and_objs(dev);
+
+				if (!init_failed
+				    && !yaffs_create_initial_dir(dev))
+					init_failed = 1;
+
+				if (!init_failed && !yaffs2_scan_backwards(dev))
+					init_failed = 1;
+			}
+		} else if (!yaffs1_scan(dev)) {
+			init_failed = 1;
+		}
+
+		yaffs_strip_deleted_objs(dev);
+		yaffs_fix_hanging_objs(dev);
+		if (dev->param.empty_lost_n_found)
+			yaffs_empty_l_n_f(dev);
+	}
+
+	if (init_failed) {
+		/* Clean up the mess */
+		yaffs_trace(YAFFS_TRACE_TRACING,
+		  "yaffs: yaffs_guts_initialise() aborted.");
+
+		yaffs_deinitialise(dev);
+		return YAFFS_FAIL;
+	}
+
+	/* Zero out stats */
+	dev->n_page_reads = 0;
+	dev->n_page_writes = 0;
+	dev->n_erasures = 0;
+	dev->n_gc_copies = 0;
+	dev->n_retried_writes = 0;
+
+	dev->n_retired_blocks = 0;
+
+	yaffs_verify_free_chunks(dev);
+	yaffs_verify_blocks(dev);
+
+	/* Clean up any aborted checkpoint data */
+	if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
+		yaffs2_checkpt_invalidate(dev);
+
+	yaffs_trace(YAFFS_TRACE_TRACING,
+	  "yaffs: yaffs_guts_initialise() done.");
+	return YAFFS_OK;
+}
+
+void yaffs_deinitialise(struct yaffs_dev *dev)
+{
+	if (dev->is_mounted) {
+		int i;
+
+		yaffs_deinit_blocks(dev);
+		yaffs_deinit_tnodes_and_objs(dev);
+		yaffs_summary_deinit(dev);
+
+		if (dev->param.n_caches > 0 && dev->cache) {
+
+			for (i = 0; i < dev->param.n_caches; i++) {
+				kfree(dev->cache[i].data);
+				dev->cache[i].data = NULL;
+			}
+
+			kfree(dev->cache);
+			dev->cache = NULL;
+		}
+
+		kfree(dev->gc_cleanup_list);
+
+		for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+			kfree(dev->temp_buffer[i].buffer);
+
+		dev->is_mounted = 0;
+
+		yaffs_deinit_nand(dev);
+	}
+}
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev)
+{
+	int n_free = 0;
+	int b;
+	struct yaffs_block_info *blk;
+
+	blk = dev->block_info;
+	for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+		switch (blk->block_state) {
+		case YAFFS_BLOCK_STATE_EMPTY:
+		case YAFFS_BLOCK_STATE_ALLOCATING:
+		case YAFFS_BLOCK_STATE_COLLECTING:
+		case YAFFS_BLOCK_STATE_FULL:
+			n_free +=
+			    (dev->param.chunks_per_block - blk->pages_in_use +
+			     blk->soft_del_pages);
+			break;
+		default:
+			break;
+		}
+		blk++;
+	}
+	return n_free;
+}
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
+{
+	/* This is what we report to the outside world */
+	int n_free;
+	int n_dirty_caches;
+	int blocks_for_checkpt;
+	int i;
+
+	n_free = dev->n_free_chunks;
+	n_free += dev->n_deleted_files;
+
+	/* Now count and subtract the number of dirty chunks in the cache. */
+
+	for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
+		if (dev->cache[i].dirty)
+			n_dirty_caches++;
+	}
+
+	n_free -= n_dirty_caches;
+
+	n_free -=
+	    ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+	/* Now figure checkpoint space and report that... */
+	blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
+
+	n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
+
+	if (n_free < 0)
+		n_free = 0;
+
+	return n_free;
+}
+
+
+
+/*
+ * Marshalling functions to get loff_t file sizes into and out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize)
+{
+	oh->file_size_low = (fsize & 0xFFFFFFFF);
+	oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF);
+}
+
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh)
+{
+	loff_t retval;
+
+	if (sizeof(loff_t) >= 8 && ~(oh->file_size_high))
+		retval = (((loff_t) oh->file_size_high) << 32) |
+			(((loff_t) oh->file_size_low) & 0xFFFFFFFF);
+	else
+		retval = (loff_t) oh->file_size_low;
+
+	return retval;
+}
+
+
+void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10])
+{
+	int i;
+	struct yaffs_block_info *bi;
+	int s;
+
+	for(i = 0; i < 10; i++)
+		bs[i] = 0;
+
+	for(i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		bi = yaffs_get_block_info(dev, i);
+		s = bi->block_state;
+		if(s > YAFFS_BLOCK_STATE_DEAD || s < YAFFS_BLOCK_STATE_UNKNOWN)
+			bs[0]++;
+		else
+			bs[s]++;
+	}
+}
diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h
new file mode 100644
index 0000000..0578536
--- /dev/null
+++ b/fs/yaffs2/yaffs_guts.h
@@ -0,0 +1,1003 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "yportenv.h"
+
+#define YAFFS_OK	1
+#define YAFFS_FAIL  0
+
+/* Give us a  Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xff
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC			0x5941ff53
+
+/*
+ * Tnodes form a tree with the tnodes in "levels"
+ * Levels greater than 0 hold 8 slots which point to other tnodes.
+ * Those at level 0 hold 16 slots which point to chunks in NAND.
+ *
+ * A maximum level of 8 thust supports files of size up to:
+ *
+ * 2^(3*MAX_LEVEL+4)
+ *
+ * Thus a max level of 8 supports files with up to 2^^28 chunks which gives
+ * a maximum file size of around 512Gbytees with 2k chunks.
+ */
+#define YAFFS_NTNODES_LEVEL0		16
+#define YAFFS_TNODES_LEVEL0_BITS	4
+#define YAFFS_TNODES_LEVEL0_MASK	0xf
+
+#define YAFFS_NTNODES_INTERNAL		(YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS	(YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK	0x7
+#define YAFFS_TNODES_MAX_LEVEL		8
+#define YAFFS_TNODES_MAX_BITS		(YAFFS_TNODES_LEVEL0_BITS + \
+					YAFFS_TNODES_INTERNAL_BITS * \
+					YAFFS_TNODES_MAX_LEVEL)
+#define YAFFS_MAX_CHUNK_ID		((1 << YAFFS_TNODES_MAX_BITS) - 1)
+
+#define YAFFS_MAX_FILE_SIZE_32		0x7fffffff
+
+/* Constants for YAFFS1 mode */
+#define YAFFS_BYTES_PER_SPARE		16
+#define YAFFS_BYTES_PER_CHUNK		512
+#define YAFFS_CHUNK_SIZE_SHIFT		9
+#define YAFFS_CHUNKS_PER_BLOCK		32
+#define YAFFS_BYTES_PER_BLOCK	(YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE	1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE	32
+
+
+
+#define YAFFS_ALLOCATION_NOBJECTS	100
+#define YAFFS_ALLOCATION_NTNODES	100
+#define YAFFS_ALLOCATION_NLINKS		100
+
+#define YAFFS_NOBJECT_BUCKETS		256
+
+#define YAFFS_OBJECT_SPACE		0x40000
+#define YAFFS_MAX_OBJECT_ID		(YAFFS_OBJECT_SPACE - 1)
+
+/* Binary data version stamps */
+#define YAFFS_SUMMARY_VERSION		1
+#define YAFFS_CHECKPOINT_VERSION	7
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH		127
+#define YAFFS_MAX_ALIAS_LENGTH		79
+#else
+#define YAFFS_MAX_NAME_LENGTH		255
+#define YAFFS_MAX_ALIAS_LENGTH		159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH		15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT		1
+#define YAFFS_OBJECTID_LOSTNFOUND	2
+#define YAFFS_OBJECTID_UNLINKED		3
+#define YAFFS_OBJECTID_DELETED		4
+
+/* Fake object Id for summary data */
+#define YAFFS_OBJECTID_SUMMARY		0x10
+
+/* Pseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_CHECKPOINT_DATA	0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA	0x21
+
+#define YAFFS_MAX_SHORT_OP_CACHES	20
+
+#define YAFFS_N_TEMP_BUFFERS		6
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS		(5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER	0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER	0xefffff00
+
+/* Special sequence number for bad block that failed to be marked bad */
+#define YAFFS_SEQUENCE_BAD_BLOCK	0xffff0000
+
+/* ChunkCache is used for short read/write operations.*/
+struct yaffs_cache {
+	struct yaffs_obj *object;
+	int chunk_id;
+	int last_use;
+	int dirty;
+	int n_bytes;		/* Only valid if the cache is dirty */
+	int locked;		/* Can't push out or flush while locked. */
+	u8 *data;
+};
+
+/* yaffs1 tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary
+ * otherwise the structure size will get blown out.
+ */
+
+struct yaffs_tags {
+	unsigned chunk_id:20;
+	unsigned serial_number:2;
+	unsigned n_bytes_lsb:10;
+	unsigned obj_id:18;
+	unsigned ecc:12;
+	unsigned n_bytes_msb:2;
+};
+
+union yaffs_tags_union {
+	struct yaffs_tags as_tags;
+	u8 as_bytes[8];
+};
+
+
+/* Stuff used for extended tags in YAFFS2 */
+
+enum yaffs_ecc_result {
+	YAFFS_ECC_RESULT_UNKNOWN,
+	YAFFS_ECC_RESULT_NO_ERROR,
+	YAFFS_ECC_RESULT_FIXED,
+	YAFFS_ECC_RESULT_UNFIXED
+};
+
+enum yaffs_obj_type {
+	YAFFS_OBJECT_TYPE_UNKNOWN,
+	YAFFS_OBJECT_TYPE_FILE,
+	YAFFS_OBJECT_TYPE_SYMLINK,
+	YAFFS_OBJECT_TYPE_DIRECTORY,
+	YAFFS_OBJECT_TYPE_HARDLINK,
+	YAFFS_OBJECT_TYPE_SPECIAL
+};
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+struct yaffs_ext_tags {
+	unsigned chunk_used;	/*  Status of the chunk: used or unused */
+	unsigned obj_id;	/* If 0 this is not used */
+	unsigned chunk_id;	/* If 0 this is a header, else a data chunk */
+	unsigned n_bytes;	/* Only valid for data chunks */
+
+	/* The following stuff only has meaning when we read */
+	enum yaffs_ecc_result ecc_result;
+	unsigned block_bad;
+
+	/* YAFFS 1 stuff */
+	unsigned is_deleted;	/* The chunk is marked deleted */
+	unsigned serial_number;	/* Yaffs1 2-bit serial number */
+
+	/* YAFFS2 stuff */
+	unsigned seq_number;	/* The sequence number of this block */
+
+	/* Extra info if this is an object header (YAFFS2 only) */
+
+	unsigned extra_available;	/* Extra info available if not zero */
+	unsigned extra_parent_id;	/* The parent object */
+	unsigned extra_is_shrink;	/* Is it a shrink header? */
+	unsigned extra_shadows;	/* Does this shadow another object? */
+
+	enum yaffs_obj_type extra_obj_type;	/* What object type? */
+
+	loff_t extra_file_size;		/* Length if it is a file */
+	unsigned extra_equiv_id;	/* Equivalent object for a hard link */
+};
+
+/* Spare structure for YAFFS1 */
+struct yaffs_spare {
+	u8 tb0;
+	u8 tb1;
+	u8 tb2;
+	u8 tb3;
+	u8 page_status;		/* set to 0 to delete the chunk */
+	u8 block_status;
+	u8 tb4;
+	u8 tb5;
+	u8 ecc1[3];
+	u8 tb6;
+	u8 tb7;
+	u8 ecc2[3];
+};
+
+/*Special structure for passing through to mtd */
+struct yaffs_nand_spare {
+	struct yaffs_spare spare;
+	int eccres1;
+	int eccres2;
+};
+
+/* Block data in RAM */
+
+enum yaffs_block_state {
+	YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+	YAFFS_BLOCK_STATE_SCANNING,
+	/* Being scanned */
+
+	YAFFS_BLOCK_STATE_NEEDS_SCAN,
+	/* The block might have something on it (ie it is allocating or full,
+	 * perhaps empty) but it needs to be scanned to determine its true
+	 * state.
+	 * This state is only valid during scanning.
+	 * NB We tolerate empty because the pre-scanner might be incapable of
+	 * deciding
+	 * However, if this state is returned on a YAFFS2 device,
+	 * then we expect a sequence number
+	 */
+
+	YAFFS_BLOCK_STATE_EMPTY,
+	/* This block is empty */
+
+	YAFFS_BLOCK_STATE_ALLOCATING,
+	/* This block is partially allocated.
+	 * At least one page holds valid data.
+	 * This is the one currently being used for page
+	 * allocation. Should never be more than one of these.
+	 * If a block is only partially allocated at mount it is treated as
+	 * full.
+	 */
+
+	YAFFS_BLOCK_STATE_FULL,
+	/* All the pages in this block have been allocated.
+	 * If a block was only partially allocated when mounted we treat
+	 * it as fully allocated.
+	 */
+
+	YAFFS_BLOCK_STATE_DIRTY,
+	/* The block was full and now all chunks have been deleted.
+	 * Erase me, reuse me.
+	 */
+
+	YAFFS_BLOCK_STATE_CHECKPOINT,
+	/* This block is assigned to holding checkpoint data. */
+
+	YAFFS_BLOCK_STATE_COLLECTING,
+	/* This block is being garbage collected */
+
+	YAFFS_BLOCK_STATE_DEAD
+	    /* This block has failed and is not in use */
+};
+
+#define	YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+struct yaffs_block_info {
+
+	int soft_del_pages:10;	/* number of soft deleted pages */
+	int pages_in_use:10;	/* number of pages in use */
+	unsigned block_state:4;	/* One of the above block states. */
+				/* NB use unsigned because enum is sometimes
+				 * an int */
+	u32 needs_retiring:1;	/* Data has failed on this block, */
+				/*need to get valid data off and retire*/
+	u32 skip_erased_check:1;/* Skip the erased check on this block */
+	u32 gc_prioritise:1;	/* An ECC check or blank check has failed.
+				   Block should be prioritised for GC */
+	u32 chunk_error_strikes:3;	/* How many times we've had ecc etc
+				failures on this block and tried to reuse it */
+	u32 has_summary:1;	/* The block has a summary */
+
+	u32 has_shrink_hdr:1;	/* This block has at least one shrink header */
+	u32 seq_number;		/* block sequence number for yaffs2 */
+
+};
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+struct yaffs_obj_hdr {
+	enum yaffs_obj_type type;
+
+	/* Apply to everything  */
+	int parent_obj_id;
+	u16 sum_no_longer_used;	/* checksum of name. No longer used */
+	YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+	/* The following apply to all object types except for hard links */
+	u32 yst_mode;		/* protection */
+
+	u32 yst_uid;
+	u32 yst_gid;
+	u32 yst_atime;
+	u32 yst_mtime;
+	u32 yst_ctime;
+
+	/* File size  applies to files only */
+	u32 file_size_low;
+
+	/* Equivalent object id applies to hard links only. */
+	int equiv_id;
+
+	/* Alias is for symlinks only. */
+	YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+	u32 yst_rdev;	/* stuff for block and char devices (major/min) */
+
+	u32 win_ctime[2];
+	u32 win_atime[2];
+	u32 win_mtime[2];
+
+	u32 inband_shadowed_obj_id;
+	u32 inband_is_shrink;
+
+	u32 file_size_high;
+	u32 reserved[1];
+	int shadows_obj;	/* This object header shadows the
+				specified object if > 0 */
+
+	/* is_shrink applies to object headers written when wemake a hole. */
+	u32 is_shrink;
+
+};
+
+/*--------------------------- Tnode -------------------------- */
+
+struct yaffs_tnode {
+	struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
+};
+
+/*------------------------  Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+struct yaffs_file_var {
+	loff_t file_size;
+	loff_t scanned_size;
+	loff_t shrink_size;
+	int top_level;
+	struct yaffs_tnode *top;
+};
+
+struct yaffs_dir_var {
+	struct list_head children;	/* list of child links */
+	struct list_head dirty;	/* Entry for list of dirty directories */
+};
+
+struct yaffs_symlink_var {
+	YCHAR *alias;
+};
+
+struct yaffs_hardlink_var {
+	struct yaffs_obj *equiv_obj;
+	u32 equiv_id;
+};
+
+union yaffs_obj_var {
+	struct yaffs_file_var file_variant;
+	struct yaffs_dir_var dir_variant;
+	struct yaffs_symlink_var symlink_variant;
+	struct yaffs_hardlink_var hardlink_variant;
+};
+
+struct yaffs_obj {
+	u8 deleted:1;		/* This should only apply to unlinked files. */
+	u8 soft_del:1;		/* it has also been soft deleted */
+	u8 unlinked:1;		/* An unlinked file.*/
+	u8 fake:1;		/* A fake object has no presence on NAND. */
+	u8 rename_allowed:1;	/* Some objects cannot be renamed. */
+	u8 unlink_allowed:1;
+	u8 dirty:1;		/* the object needs to be written to flash */
+	u8 valid:1;		/* When the file system is being loaded up, this
+				 * object might be created before the data
+				 * is available
+				 * ie. file data chunks encountered before
+				* the header.
+				 */
+	u8 lazy_loaded:1;	/* This object has been lazy loaded and
+				 * is missing some detail */
+
+	u8 defered_free:1;	/* Object is removed from NAND, but is
+				 * still in the inode cache.
+				 * Free of object is defered.
+				 * until the inode is released.
+				 */
+	u8 being_created:1;	/* This object is still being created
+				 * so skip some verification checks. */
+	u8 is_shadowed:1;	/* This object is shadowed on the way
+				 * to being renamed. */
+
+	u8 xattr_known:1;	/* We know if this has object has xattribs
+				 * or not. */
+	u8 has_xattr:1;		/* This object has xattribs.
+				 * Only valid if xattr_known. */
+
+	u8 serial;		/* serial number of chunk in NAND.*/
+	u16 sum;		/* sum of the name to speed searching */
+
+	struct yaffs_dev *my_dev;	/* The device I'm on */
+
+	struct list_head hash_link;	/* list of objects in hash bucket */
+
+	struct list_head hard_links;	/* hard linked object chain*/
+
+	/* directory structure stuff */
+	/* also used for linking up the free list */
+	struct yaffs_obj *parent;
+	struct list_head siblings;
+
+	/* Where's my object header in NAND? */
+	int hdr_chunk;
+
+	int n_data_chunks;	/* Number of data chunks for this file. */
+
+	u32 obj_id;		/* the object id value */
+
+	u32 yst_mode;
+
+	YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+
+#ifdef CONFIG_YAFFS_WINCE
+	u32 win_ctime[2];
+	u32 win_mtime[2];
+	u32 win_atime[2];
+#else
+	u32 yst_uid;
+	u32 yst_gid;
+	u32 yst_atime;
+	u32 yst_mtime;
+	u32 yst_ctime;
+#endif
+
+	u32 yst_rdev;
+
+	void *my_inode;
+
+	enum yaffs_obj_type variant_type;
+
+	union yaffs_obj_var variant;
+
+};
+
+struct yaffs_obj_bucket {
+	struct list_head list;
+	int count;
+};
+
+/* yaffs_checkpt_obj holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+struct yaffs_checkpt_obj {
+	int struct_type;
+	u32 obj_id;
+	u32 parent_id;
+	int hdr_chunk;
+	enum yaffs_obj_type variant_type:3;
+	u8 deleted:1;
+	u8 soft_del:1;
+	u8 unlinked:1;
+	u8 fake:1;
+	u8 rename_allowed:1;
+	u8 unlink_allowed:1;
+	u8 serial;
+	int n_data_chunks;
+	loff_t size_or_equiv_obj;
+};
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few.
+ */
+
+struct yaffs_buffer {
+	u8 *buffer;
+	int in_use;
+};
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_param {
+	const YCHAR *name;
+
+	/*
+	 * Entry parameters set up way early. Yaffs sets up the rest.
+	 * The structure should be zeroed out before use so that unused
+	 * and default values are zero.
+	 */
+
+	int inband_tags;	/* Use unband tags */
+	u32 total_bytes_per_chunk;	/* Should be >= 512, does not need to
+					 be a power of 2 */
+	int chunks_per_block;	/* does not need to be a power of 2 */
+	int spare_bytes_per_chunk;	/* spare area size */
+	int start_block;	/* Start block we're allowed to use */
+	int end_block;		/* End block we're allowed to use */
+	int n_reserved_blocks;	/* Tuneable so that we can reduce
+				 * reserved blocks on NOR and RAM. */
+
+	int n_caches;		/* If <= 0, then short op caching is disabled,
+				 * else the number of short op caches.
+				 */
+	int cache_bypass_aligned; /* If non-zero then bypass the cache for
+				   * aligned writes.
+				   */
+
+	int use_nand_ecc;	/* Flag to decide whether or not to use
+				 * NAND driver ECC on data (yaffs1) */
+	int tags_9bytes;	/* Use 9 byte tags */
+	int no_tags_ecc;	/* Flag to decide whether or not to do ECC
+				 * on packed tags (yaffs2) */
+
+	int is_yaffs2;		/* Use yaffs2 mode on this device */
+
+	int empty_lost_n_found;	/* Auto-empty lost+found directory on mount */
+
+	int refresh_period;	/* How often to check for a block refresh */
+
+	/* Checkpoint control. Can be set before or after initialisation */
+	u8 skip_checkpt_rd;
+	u8 skip_checkpt_wr;
+
+	int enable_xattr;	/* Enable xattribs */
+
+	int max_objects;	/*
+				 * Set to limit the number of objects created.
+				 * 0 = no limit.
+				*/
+
+	/* The remove_obj_fn function must be supplied by OS flavours that
+	 * need it.
+	 * yaffs direct uses it to implement the faster readdir.
+	 * Linux uses it to protect the directory during unlocking.
+	 */
+	void (*remove_obj_fn) (struct yaffs_obj *obj);
+
+	/* Callback to mark the superblock dirty */
+	void (*sb_dirty_fn) (struct yaffs_dev *dev);
+
+	/*  Callback to control garbage collection. */
+	unsigned (*gc_control_fn) (struct yaffs_dev *dev);
+
+	/* Debug control flags. Don't use unless you know what you're doing */
+	int use_header_file_size;	/* Flag to determine if we should use
+					 * file sizes from the header */
+	int disable_lazy_load;	/* Disable lazy loading on this device */
+	int wide_tnodes_disabled;	/* Set to disable wide tnodes */
+	int disable_soft_del;	/* yaffs 1 only: Set to disable the use of
+				 * softdeletion. */
+
+	int defered_dir_update;	/* Set to defer directory updates */
+
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+	int auto_unicode;
+#endif
+	int always_check_erased;	/* Force chunk erased check always on */
+
+	int disable_summary;
+	int disable_bad_block_marking;
+
+};
+
+struct yaffs_driver {
+	int (*drv_write_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
+				   const u8 *data, int data_len,
+				   const u8 *oob, int oob_len);
+	int (*drv_read_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
+				   u8 *data, int data_len,
+				   u8 *oob, int oob_len,
+				   enum yaffs_ecc_result *ecc_result);
+	int (*drv_erase_fn) (struct yaffs_dev *dev, int block_no);
+	int (*drv_mark_bad_fn) (struct yaffs_dev *dev, int block_no);
+	int (*drv_check_bad_fn) (struct yaffs_dev *dev, int block_no);
+	int (*drv_initialise_fn) (struct yaffs_dev *dev);
+	int (*drv_deinitialise_fn) (struct yaffs_dev *dev);
+};
+
+struct yaffs_tags_handler {
+	int (*write_chunk_tags_fn) (struct yaffs_dev *dev,
+				    int nand_chunk, const u8 *data,
+				    const struct yaffs_ext_tags *tags);
+	int (*read_chunk_tags_fn) (struct yaffs_dev *dev,
+				   int nand_chunk, u8 *data,
+				   struct yaffs_ext_tags *tags);
+
+	int (*query_block_fn) (struct yaffs_dev *dev, int block_no,
+			       enum yaffs_block_state *state,
+			       u32 *seq_number);
+	int (*mark_bad_fn) (struct yaffs_dev *dev, int block_no);
+};
+
+struct yaffs_dev {
+	struct yaffs_param param;
+	struct yaffs_driver drv;
+	struct yaffs_tags_handler tagger;
+
+	/* Context storage. Holds extra OS specific data for this device */
+
+	void *os_context;
+	void *driver_context;
+
+	struct list_head dev_list;
+
+	int ll_init;
+	/* Runtime parameters. Set up by YAFFS. */
+	int data_bytes_per_chunk;
+
+	/* Non-wide tnode stuff */
+	u16 chunk_grp_bits;	/* Number of bits that need to be resolved if
+				 * the tnodes are not wide enough.
+				 */
+	u16 chunk_grp_size;	/* == 2^^chunk_grp_bits */
+
+	/* Stuff to support wide tnodes */
+	u32 tnode_width;
+	u32 tnode_mask;
+	u32 tnode_size;
+
+	/* Stuff for figuring out file offset to chunk conversions */
+	u32 chunk_shift;	/* Shift value */
+	u32 chunk_div;		/* Divisor after shifting: 1 for 2^n sizes */
+	u32 chunk_mask;		/* Mask to use for power-of-2 case */
+
+	int is_mounted;
+	int read_only;
+	int is_checkpointed;
+
+	/* Stuff to support block offsetting to support start block zero */
+	int internal_start_block;
+	int internal_end_block;
+	int block_offset;
+	int chunk_offset;
+
+	/* Runtime checkpointing stuff */
+	int checkpt_page_seq;	/* running sequence number of checkpt pages */
+	int checkpt_byte_count;
+	int checkpt_byte_offs;
+	u8 *checkpt_buffer;
+	int checkpt_open_write;
+	int blocks_in_checkpt;
+	int checkpt_cur_chunk;
+	int checkpt_cur_block;
+	int checkpt_next_block;
+	int *checkpt_block_list;
+	int checkpt_max_blocks;
+	u32 checkpt_sum;
+	u32 checkpt_xor;
+
+	int checkpoint_blocks_required;	/* Number of blocks needed to store
+					 * current checkpoint set */
+
+	/* Block Info */
+	struct yaffs_block_info *block_info;
+	u8 *chunk_bits;		/* bitmap of chunks in use */
+	unsigned block_info_alt:1;	/* allocated using alternative alloc */
+	unsigned chunk_bits_alt:1;	/* allocated using alternative alloc */
+	int chunk_bit_stride;	/* Number of bytes of chunk_bits per block.
+				 * Must be consistent with chunks_per_block.
+				 */
+
+	int n_erased_blocks;
+	int alloc_block;	/* Current block being allocated off */
+	u32 alloc_page;
+	int alloc_block_finder;	/* Used to search for next allocation block */
+
+	/* Object and Tnode memory management */
+	void *allocator;
+	int n_obj;
+	int n_tnodes;
+
+	int n_hardlinks;
+
+	struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
+	u32 bucket_finder;
+
+	int n_free_chunks;
+
+	/* Garbage collection control */
+	u32 *gc_cleanup_list;	/* objects to delete at the end of a GC. */
+	u32 n_clean_ups;
+
+	unsigned has_pending_prioritised_gc;	/* We think this device might
+						have pending prioritised gcs */
+	unsigned gc_disable;
+	unsigned gc_block_finder;
+	unsigned gc_dirtiest;
+	unsigned gc_pages_in_use;
+	unsigned gc_not_done;
+	unsigned gc_block;
+	unsigned gc_chunk;
+	unsigned gc_skip;
+	struct yaffs_summary_tags *gc_sum_tags;
+
+	/* Special directories */
+	struct yaffs_obj *root_dir;
+	struct yaffs_obj *lost_n_found;
+
+	int buffered_block;	/* Which block is buffered here? */
+	int doing_buffered_block_rewrite;
+
+	struct yaffs_cache *cache;
+	int cache_last_use;
+
+	/* Stuff for background deletion and unlinked files. */
+	struct yaffs_obj *unlinked_dir;	/* Directory where unlinked and deleted
+					 files live. */
+	struct yaffs_obj *del_dir;	/* Directory where deleted objects are
+					sent to disappear. */
+	struct yaffs_obj *unlinked_deletion;	/* Current file being
+							background deleted. */
+	int n_deleted_files;	/* Count of files awaiting deletion; */
+	int n_unlinked_files;	/* Count of unlinked files. */
+	int n_bg_deletions;	/* Count of background deletions. */
+
+	/* Temporary buffer management */
+	struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
+	int max_temp;
+	int temp_in_use;
+	int unmanaged_buffer_allocs;
+	int unmanaged_buffer_deallocs;
+
+	/* yaffs2 runtime stuff */
+	unsigned seq_number;	/* Sequence number of currently
+					allocating block */
+	unsigned oldest_dirty_seq;
+	unsigned oldest_dirty_block;
+
+	/* Block refreshing */
+	int refresh_skip;	/* A skip down counter.
+				 * Refresh happens when this gets to zero. */
+
+	/* Dirty directory handling */
+	struct list_head dirty_dirs;	/* List of dirty directories */
+
+	/* Summary */
+	int chunks_per_summary;
+	struct yaffs_summary_tags *sum_tags;
+
+	/* Statistics */
+	u32 n_page_writes;
+	u32 n_page_reads;
+	u32 n_erasures;
+	u32 n_bad_markings;
+	u32 n_erase_failures;
+	u32 n_gc_copies;
+	u32 all_gcs;
+	u32 passive_gc_count;
+	u32 oldest_dirty_gc_count;
+	u32 n_gc_blocks;
+	u32 bg_gcs;
+	u32 n_retried_writes;
+	u32 n_retired_blocks;
+	u32 n_ecc_fixed;
+	u32 n_ecc_unfixed;
+	u32 n_tags_ecc_fixed;
+	u32 n_tags_ecc_unfixed;
+	u32 n_deletions;
+	u32 n_unmarked_deletions;
+	u32 refresh_count;
+	u32 cache_hits;
+	u32 tags_used;
+	u32 summary_used;
+
+};
+
+/* The CheckpointDevice structure holds the device information that changes
+ *at runtime and must be preserved over unmount/mount cycles.
+ */
+struct yaffs_checkpt_dev {
+	int struct_type;
+	int n_erased_blocks;
+	int alloc_block;	/* Current block being allocated off */
+	u32 alloc_page;
+	int n_free_chunks;
+
+	int n_deleted_files;	/* Count of files awaiting deletion; */
+	int n_unlinked_files;	/* Count of unlinked files. */
+	int n_bg_deletions;	/* Count of background deletions. */
+
+	/* yaffs2 runtime stuff */
+	unsigned seq_number;	/* Sequence number of currently
+				 * allocating block */
+
+};
+
+struct yaffs_checkpt_validity {
+	int struct_type;
+	u32 magic;
+	u32 version;
+	u32 head;
+};
+
+struct yaffs_shadow_fixer {
+	int obj_id;
+	int shadowed_id;
+	struct yaffs_shadow_fixer *next;
+};
+
+/* Structure for doing xattr modifications */
+struct yaffs_xattr_mod {
+	int set;		/* If 0 then this is a deletion */
+	const YCHAR *name;
+	const void *data;
+	int size;
+	int flags;
+	int result;
+};
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_guts_initialise(struct yaffs_dev *dev);
+void yaffs_deinitialise(struct yaffs_dev *dev);
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+		     struct yaffs_obj *new_dir, const YCHAR * new_name);
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
+int yaffs_del_obj(struct yaffs_obj *obj);
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj);
+int yaffs_get_obj_inode(struct yaffs_obj *obj);
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
+int yaffs_get_obj_link_count(struct yaffs_obj *obj);
+
+/* File operations */
+int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
+		  int n_bytes);
+int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
+		  int n_bytes, int write_trhrough);
+int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+				    const YCHAR *name, u32 mode, u32 uid,
+				    u32 gid);
+
+int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
+
+/* Flushing and checkpointing */
+void yaffs_flush_whole_cache(struct yaffs_dev *dev);
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev);
+int yaffs_checkpoint_restore(struct yaffs_dev *dev);
+
+/* Directory operations */
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+				   u32 mode, u32 uid, u32 gid);
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
+				     const YCHAR *name);
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
+
+/* Link operations */
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name,
+				 struct yaffs_obj *equiv_obj);
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
+
+/* Symlink operations */
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+				       const YCHAR *name, u32 mode, u32 uid,
+				       u32 gid, const YCHAR *alias);
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
+
+/* Special inodes (fifos, sockets and devices) */
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+				       const YCHAR *name, u32 mode, u32 uid,
+				       u32 gid, u32 rdev);
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name,
+		      const void *value, int size, int flags);
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value,
+		      int size);
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name);
+
+/* Special directories */
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj);
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
+
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
+
+/* Debug dump  */
+int yaffs_dump_obj(struct yaffs_obj *obj);
+
+void yaffs_guts_test(struct yaffs_dev *dev);
+int yaffs_guts_ll_init(struct yaffs_dev *dev);
+
+
+/* A few useful functions to be used within the core files*/
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+		     int lyn);
+int yaffs_check_ff(u8 *buffer, int n_bytes);
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+			      struct yaffs_block_info *bi);
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev);
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer);
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+						 int number,
+						 enum yaffs_obj_type type);
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+			    int nand_chunk, int in_scan);
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name);
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+				const struct yaffs_obj_hdr *oh);
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
+YCHAR *yaffs_clone_str(const YCHAR *str);
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
+		    int force, int is_shrink, int shadows,
+		    struct yaffs_xattr_mod *xop);
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+			       int backward_scanning);
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+					   struct yaffs_file_var *file_struct,
+					   u32 chunk_id,
+					   struct yaffs_tnode *passed_tn);
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+		     int n_bytes, int write_trhrough);
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+				       struct yaffs_file_var *file_struct,
+				       u32 chunk_id);
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+			 unsigned pos);
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
+
+int yaffs_format_dev(struct yaffs_dev *dev);
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+				int *chunk_out, u32 *offset_out);
+/*
+ * Marshalling functions to get loff_t file sizes into aand out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize);
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh);
+loff_t yaffs_max_file_size(struct yaffs_dev *dev);
+
+/*
+ * Debug function to count number of blocks in each state
+ * NB Needs to be called with correct number of integers
+ */
+
+void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10]);
+
+int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+				    struct yaffs_ext_tags *tags);
+
+#endif
diff --git a/fs/yaffs2/yaffs_linux.h b/fs/yaffs2/yaffs_linux.h
new file mode 100644
index 0000000..c20ab14
--- /dev/null
+++ b/fs/yaffs2/yaffs_linux.h
@@ -0,0 +1,48 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_LINUX_H__
+#define __YAFFS_LINUX_H__
+
+#include "yportenv.h"
+
+struct yaffs_linux_context {
+	struct list_head context_list;	/* List of these we have mounted */
+	struct yaffs_dev *dev;
+	struct super_block *super;
+	struct task_struct *bg_thread;	/* Background thread for this device */
+	int bg_running;
+	struct mutex gross_lock;	/* Gross locking mutex*/
+	u8 *spare_buffer;	/* For mtdif2 use. Don't know the buffer size
+				 * at compile time so we have to allocate it.
+				 */
+	struct list_head search_contexts;
+	struct task_struct *readdir_process;
+	unsigned mount_id;
+	int dirty;
+};
+
+#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
+#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+#define WRITE_SIZE_STR "writesize"
+#define WRITE_SIZE(mtd) ((mtd)->writesize)
+#else
+#define WRITE_SIZE_STR "oobblock"
+#define WRITE_SIZE(mtd) ((mtd)->oobblock)
+#endif
+
+#endif
diff --git a/fs/yaffs2/yaffs_mtdif.c b/fs/yaffs2/yaffs_mtdif.c
new file mode 100644
index 0000000..7ae63c5
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.c
@@ -0,0 +1,308 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/nand.h"
+#include "linux/kernel.h"
+#include "linux/version.h"
+#include "linux/types.h"
+
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_linux.h"
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+#define mtd_erase(m, ei) (m)->erase(m, ei)
+#define mtd_write_oob(m, addr, pops) (m)->write_oob(m, addr, pops)
+#define mtd_read_oob(m, addr, pops) (m)->read_oob(m, addr, pops)
+#define mtd_block_isbad(m, offs) (m)->block_isbad(m, offs)
+#define mtd_block_markbad(m, offs) (m)->block_markbad(m, offs)
+#endif
+
+
+
+int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	u32 addr =
+	    ((loff_t) block_no) * dev->param.total_bytes_per_chunk *
+	    dev->param.chunks_per_block;
+	struct erase_info ei;
+	int retval = 0;
+
+	ei.mtd = mtd;
+	ei.addr = addr;
+	ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
+	ei.time = 1000;
+	ei.retries = 2;
+	ei.callback = NULL;
+	ei.priv = (u_long) dev;
+
+	retval = mtd_erase(mtd, &ei);
+
+	if (retval == 0)
+		return YAFFS_OK;
+
+	return YAFFS_FAIL;
+}
+
+
+static 	int yaffs_mtd_write(struct yaffs_dev *dev, int nand_chunk,
+				   const u8 *data, int data_len,
+				   const u8 *oob, int oob_len)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	loff_t addr;
+	struct mtd_oob_ops ops;
+	int retval;
+
+	yaffs_trace(YAFFS_TRACE_MTD,
+			"yaffs_mtd_write(%p, %d, %p, %d, %p, %d)\n",
+			dev, nand_chunk, data, data_len, oob, oob_len);
+
+	if (!data || !data_len) {
+		data = NULL;
+		data_len = 0;
+	}
+
+	if (!oob || !oob_len) {
+		oob = NULL;
+		oob_len = 0;
+	}
+
+	addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+	memset(&ops, 0, sizeof(ops));
+	ops.mode = MTD_OPS_AUTO_OOB;
+	ops.len = (data) ? data_len : 0;
+	ops.ooblen = oob_len;
+	ops.datbuf = (u8 *)data;
+	ops.oobbuf = (u8 *)oob;
+
+	retval = mtd_write_oob(mtd, addr, &ops);
+	if (retval) {
+		yaffs_trace(YAFFS_TRACE_MTD,
+			"write_oob failed, chunk %d, mtd error %d",
+			nand_chunk, retval);
+	}
+	return retval ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_read(struct yaffs_dev *dev, int nand_chunk,
+				   u8 *data, int data_len,
+				   u8 *oob, int oob_len,
+				   enum yaffs_ecc_result *ecc_result)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	loff_t addr;
+	struct mtd_oob_ops ops;
+	int retval;
+
+	addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+	memset(&ops, 0, sizeof(ops));
+	ops.mode = MTD_OPS_AUTO_OOB;
+	ops.len = (data) ? data_len : 0;
+	ops.ooblen = oob_len;
+	ops.datbuf = data;
+	ops.oobbuf = oob;
+
+#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
+	/* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
+	 * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
+	 */
+	ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
+#endif
+	/* Read page and oob using MTD.
+	 * Check status and determine ECC result.
+	 */
+	retval = mtd_read_oob(mtd, addr, &ops);
+	if (retval)
+		yaffs_trace(YAFFS_TRACE_MTD,
+			"read_oob failed, chunk %d, mtd error %d",
+			nand_chunk, retval);
+
+	switch (retval) {
+	case 0:
+		/* no error */
+		if(ecc_result)
+			*ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+		break;
+
+	case -EUCLEAN:
+		/* MTD's ECC fixed the data */
+		if(ecc_result)
+			*ecc_result = YAFFS_ECC_RESULT_FIXED;
+		dev->n_ecc_fixed++;
+		break;
+
+	case -EBADMSG:
+	default:
+		/* MTD's ECC could not fix the data */
+		dev->n_ecc_unfixed++;
+		if(ecc_result)
+			*ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+		return YAFFS_FAIL;
+	}
+
+	return YAFFS_OK;
+}
+
+static 	int yaffs_mtd_erase(struct yaffs_dev *dev, int block_no)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+
+	loff_t addr;
+	struct erase_info ei;
+	int retval = 0;
+	u32 block_size;
+
+	block_size = dev->param.total_bytes_per_chunk *
+		     dev->param.chunks_per_block;
+	addr = ((loff_t) block_no) * block_size;
+
+	ei.mtd = mtd;
+	ei.addr = addr;
+	ei.len = block_size;
+	ei.time = 1000;
+	ei.retries = 2;
+	ei.callback = NULL;
+	ei.priv = (u_long) dev;
+
+	retval = mtd_erase(mtd, &ei);
+
+	if (retval == 0)
+		return YAFFS_OK;
+
+	return YAFFS_FAIL;
+}
+
+static int yaffs_mtd_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
+	int retval;
+
+	yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no);
+
+	retval = mtd_block_markbad(mtd, (loff_t) blocksize * block_no);
+	return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_check_bad(struct yaffs_dev *dev, int block_no)
+{
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+	int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
+	int retval;
+
+	yaffs_trace(YAFFS_TRACE_MTD, "checking block %d bad", block_no);
+
+	retval = mtd_block_isbad(mtd, (loff_t) blocksize * block_no);
+	return (retval) ? YAFFS_FAIL : YAFFS_OK;
+}
+
+static int yaffs_mtd_initialise(struct yaffs_dev *dev)
+{
+	return YAFFS_OK;
+}
+
+static int yaffs_mtd_deinitialise(struct yaffs_dev *dev)
+{
+	return YAFFS_OK;
+}
+
+
+void yaffs_mtd_drv_install(struct yaffs_dev *dev)
+{
+	struct yaffs_driver *drv = &dev->drv;
+
+	drv->drv_write_chunk_fn = yaffs_mtd_write;
+	drv->drv_read_chunk_fn = yaffs_mtd_read;
+	drv->drv_erase_fn = yaffs_mtd_erase;
+	drv->drv_mark_bad_fn = yaffs_mtd_mark_bad;
+	drv->drv_check_bad_fn = yaffs_mtd_check_bad;
+	drv->drv_initialise_fn = yaffs_mtd_initialise;
+	drv->drv_deinitialise_fn = yaffs_mtd_deinitialise;
+}
+
+
+struct mtd_info * yaffs_get_mtd_device(dev_t sdev)
+{
+	struct mtd_info *mtd;
+
+	mtd = yaffs_get_mtd_device(sdev);
+
+	/* Check it's an mtd device..... */
+	if (MAJOR(sdev) != MTD_BLOCK_MAJOR)
+		return NULL;	/* This isn't an mtd device */
+
+	/* Check it's NAND */
+	if (mtd->type != MTD_NANDFLASH) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"yaffs: MTD device is not NAND it's type %d",
+			mtd->type);
+		return NULL;
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
+	yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
+	yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+	yaffs_trace(YAFFS_TRACE_OS, " size %u", mtd->size);
+#else
+	yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
+#endif
+
+	return mtd;
+}
+
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags)
+{
+	if (yaffs_version == 2) {
+		if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
+		     mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
+		    !inband_tags) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"MTD device does not have the right page sizes"
+			);
+			return -1;
+		}
+	} else {
+		if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
+		    mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"MTD device does not support have the right page sizes"
+			);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+
+void yaffs_put_mtd_device(struct mtd_info *mtd)
+{
+	if(mtd)
+		put_mtd_device(mtd);
+}
diff --git a/fs/yaffs2/yaffs_mtdif.h b/fs/yaffs2/yaffs_mtdif.h
new file mode 100644
index 0000000..9cff224
--- /dev/null
+++ b/fs/yaffs2/yaffs_mtdif.h
@@ -0,0 +1,25 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_mtd_drv_install(struct yaffs_dev *dev);
+struct mtd_info * yaffs_get_mtd_device(dev_t sdev);
+void yaffs_put_mtd_device(struct mtd_info *mtd);
+int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags);
+#endif
diff --git a/fs/yaffs2/yaffs_nameval.c b/fs/yaffs2/yaffs_nameval.c
new file mode 100644
index 0000000..4bdf4ed
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.c
@@ -0,0 +1,208 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This simple implementation of a name-value store assumes a small number of
+* values and fits into a small finite buffer.
+ *
+ * Each attribute is stored as a record:
+ *  sizeof(int) bytes   record size.
+ *  strnlen+1 bytes name null terminated.
+ *  nbytes    value.
+ *  ----------
+ *  total size  stored in record size
+ *
+ * This code has not been tested with unicode yet.
+ */
+
+#include "yaffs_nameval.h"
+
+#include "yportenv.h"
+
+static int nval_find(const char *xb, int xb_size, const YCHAR *name,
+		     int *exist_size)
+{
+	int pos = 0;
+	int size;
+
+	memcpy(&size, xb, sizeof(int));
+	while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+		if (!strncmp((YCHAR *) (xb + pos + sizeof(int)),
+				name, size)) {
+			if (exist_size)
+				*exist_size = size;
+			return pos;
+		}
+		pos += size;
+		if (pos < xb_size - sizeof(int))
+			memcpy(&size, xb + pos, sizeof(int));
+		else
+			size = 0;
+	}
+	if (exist_size)
+		*exist_size = 0;
+	return -ENODATA;
+}
+
+static int nval_used(const char *xb, int xb_size)
+{
+	int pos = 0;
+	int size;
+
+	memcpy(&size, xb + pos, sizeof(int));
+	while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+		pos += size;
+		if (pos < xb_size - sizeof(int))
+			memcpy(&size, xb + pos, sizeof(int));
+		else
+			size = 0;
+	}
+	return pos;
+}
+
+int nval_del(char *xb, int xb_size, const YCHAR *name)
+{
+	int pos = nval_find(xb, xb_size, name, NULL);
+	int size;
+
+	if (pos < 0 || pos >= xb_size)
+		return -ENODATA;
+
+	/* Find size, shift rest over this record,
+	 * then zero out the rest of buffer */
+	memcpy(&size, xb + pos, sizeof(int));
+	memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
+	memset(xb + (xb_size - size), 0, size);
+	return 0;
+}
+
+int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf,
+		int bsize, int flags)
+{
+	int pos;
+	int namelen = strnlen(name, xb_size);
+	int reclen;
+	int size_exist = 0;
+	int space;
+	int start;
+
+	pos = nval_find(xb, xb_size, name, &size_exist);
+
+	if (flags & XATTR_CREATE && pos >= 0)
+		return -EEXIST;
+	if (flags & XATTR_REPLACE && pos < 0)
+		return -ENODATA;
+
+	start = nval_used(xb, xb_size);
+	space = xb_size - start + size_exist;
+
+	reclen = (sizeof(int) + namelen + 1 + bsize);
+
+	if (reclen > space)
+		return -ENOSPC;
+
+	if (pos >= 0) {
+		nval_del(xb, xb_size, name);
+		start = nval_used(xb, xb_size);
+	}
+
+	pos = start;
+
+	memcpy(xb + pos, &reclen, sizeof(int));
+	pos += sizeof(int);
+	strncpy((YCHAR *) (xb + pos), name, reclen);
+	pos += (namelen + 1);
+	memcpy(xb + pos, buf, bsize);
+	return 0;
+}
+
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+	     int bsize)
+{
+	int pos = nval_find(xb, xb_size, name, NULL);
+	int size;
+
+	if (pos >= 0 && pos < xb_size) {
+
+		memcpy(&size, xb + pos, sizeof(int));
+		pos += sizeof(int);	/* advance past record length */
+		size -= sizeof(int);
+
+		/* Advance over name string */
+		while (xb[pos] && size > 0 && pos < xb_size) {
+			pos++;
+			size--;
+		}
+		/*Advance over NUL */
+		pos++;
+		size--;
+
+		/* If bsize is zero then this is a size query.
+		 * Return the size, but don't copy.
+		 */
+		if (!bsize)
+			return size;
+
+		if (size <= bsize) {
+			memcpy(buf, xb + pos, size);
+			return size;
+		}
+	}
+	if (pos >= 0)
+		return -ERANGE;
+
+	return -ENODATA;
+}
+
+int nval_list(const char *xb, int xb_size, char *buf, int bsize)
+{
+	int pos = 0;
+	int size;
+	int name_len;
+	int ncopied = 0;
+	int filled = 0;
+
+	memcpy(&size, xb + pos, sizeof(int));
+	while (size > sizeof(int) &&
+		size <= xb_size &&
+		(pos + size) < xb_size &&
+		!filled) {
+		pos += sizeof(int);
+		size -= sizeof(int);
+		name_len = strnlen((YCHAR *) (xb + pos), size);
+		if (ncopied + name_len + 1 < bsize) {
+			memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
+			buf += name_len;
+			*buf = '\0';
+			buf++;
+			if (sizeof(YCHAR) > 1) {
+				*buf = '\0';
+				buf++;
+			}
+			ncopied += (name_len + 1);
+		} else {
+			filled = 1;
+		}
+		pos += size;
+		if (pos < xb_size - sizeof(int))
+			memcpy(&size, xb + pos, sizeof(int));
+		else
+			size = 0;
+	}
+	return ncopied;
+}
+
+int nval_hasvalues(const char *xb, int xb_size)
+{
+	return nval_used(xb, xb_size) > 0;
+}
diff --git a/fs/yaffs2/yaffs_nameval.h b/fs/yaffs2/yaffs_nameval.h
new file mode 100644
index 0000000..951e64f
--- /dev/null
+++ b/fs/yaffs2/yaffs_nameval.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __NAMEVAL_H__
+#define __NAMEVAL_H__
+
+#include "yportenv.h"
+
+int nval_del(char *xb, int xb_size, const YCHAR * name);
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+	     int bsize, int flags);
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+	     int bsize);
+int nval_list(const char *xb, int xb_size, char *buf, int bsize);
+int nval_hasvalues(const char *xb, int xb_size);
+#endif
diff --git a/fs/yaffs2/yaffs_nand.c b/fs/yaffs2/yaffs_nand.c
new file mode 100644
index 0000000..0d8499b
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.c
@@ -0,0 +1,122 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+
+#include "yaffs_getblockinfo.h"
+#include "yaffs_summary.h"
+
+static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
+{
+	return chunk - dev->chunk_offset;
+}
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+			     u8 *buffer, struct yaffs_ext_tags *tags)
+{
+	int result;
+	struct yaffs_ext_tags local_tags;
+	int flash_chunk = apply_chunk_offset(dev, nand_chunk);
+
+	dev->n_page_reads++;
+
+	/* If there are no tags provided use local tags. */
+	if (!tags)
+		tags = &local_tags;
+
+	result = dev->tagger.read_chunk_tags_fn(dev, flash_chunk, buffer, tags);
+	if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+		struct yaffs_block_info *bi;
+		bi = yaffs_get_block_info(dev,
+					  nand_chunk /
+					  dev->param.chunks_per_block);
+		yaffs_handle_chunk_error(dev, bi);
+	}
+	return result;
+}
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+				int nand_chunk,
+				const u8 *buffer, struct yaffs_ext_tags *tags)
+{
+	int result;
+	int flash_chunk = apply_chunk_offset(dev, nand_chunk);
+
+	dev->n_page_writes++;
+
+	if (!tags) {
+		yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
+		BUG();
+		return YAFFS_FAIL;
+	}
+
+	tags->seq_number = dev->seq_number;
+	tags->chunk_used = 1;
+	yaffs_trace(YAFFS_TRACE_WRITE,
+		"Writing chunk %d tags %d %d",
+		nand_chunk, tags->obj_id, tags->chunk_id);
+
+	result = dev->tagger.write_chunk_tags_fn(dev, flash_chunk,
+							buffer, tags);
+
+	yaffs_summary_add(dev, tags, nand_chunk);
+
+	return result;
+}
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+	block_no -= dev->block_offset;
+	dev->n_bad_markings++;
+
+	if (dev->param.disable_bad_block_marking)
+		return YAFFS_OK;
+
+	return dev->tagger.mark_bad_fn(dev, block_no);
+}
+
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+				 int block_no,
+				 enum yaffs_block_state *state,
+				 u32 *seq_number)
+{
+	block_no -= dev->block_offset;
+	return dev->tagger.query_block_fn(dev, block_no, state, seq_number);
+}
+
+int yaffs_erase_block(struct yaffs_dev *dev, int block_no)
+{
+	int result;
+
+	block_no -= dev->block_offset;
+	dev->n_erasures++;
+	result = dev->drv.drv_erase_fn(dev, block_no);
+	return result;
+}
+
+int yaffs_init_nand(struct yaffs_dev *dev)
+{
+	if (dev->drv.drv_initialise_fn)
+		return dev->drv.drv_initialise_fn(dev);
+	return YAFFS_OK;
+}
+
+int yaffs_deinit_nand(struct yaffs_dev *dev)
+{
+	if (dev->drv.drv_deinitialise_fn)
+		return dev->drv.drv_deinitialise_fn(dev);
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_nand.h b/fs/yaffs2/yaffs_nand.h
new file mode 100644
index 0000000..804e97a
--- /dev/null
+++ b/fs/yaffs2/yaffs_nand.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+			     u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+			     int nand_chunk,
+			     const u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+				 int block_no,
+				 enum yaffs_block_state *state,
+				 unsigned *seq_number);
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
+
+int yaffs_init_nand(struct yaffs_dev *dev);
+int yaffs_deinit_nand(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags1.c b/fs/yaffs2/yaffs_packedtags1.c
new file mode 100644
index 0000000..dd9a331
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.c
@@ -0,0 +1,56 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+static const u8 all_ff[20] = {
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+		      const struct yaffs_ext_tags *t)
+{
+	pt->chunk_id = t->chunk_id;
+	pt->serial_number = t->serial_number;
+	pt->n_bytes = t->n_bytes;
+	pt->obj_id = t->obj_id;
+	pt->ecc = 0;
+	pt->deleted = (t->is_deleted) ? 0 : 1;
+	pt->unused_stuff = 0;
+	pt->should_be_ff = 0xffffffff;
+}
+
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+			const struct yaffs_packed_tags1 *pt)
+{
+
+	if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
+		t->block_bad = 0;
+		if (pt->should_be_ff != 0xffffffff)
+			t->block_bad = 1;
+		t->chunk_used = 1;
+		t->obj_id = pt->obj_id;
+		t->chunk_id = pt->chunk_id;
+		t->n_bytes = pt->n_bytes;
+		t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+		t->is_deleted = (pt->deleted) ? 0 : 1;
+		t->serial_number = pt->serial_number;
+	} else {
+		memset(t, 0, sizeof(struct yaffs_ext_tags));
+	}
+}
diff --git a/fs/yaffs2/yaffs_packedtags1.h b/fs/yaffs2/yaffs_packedtags1.h
new file mode 100644
index 0000000..b80f0a5
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags1.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+struct yaffs_packed_tags1 {
+	unsigned chunk_id:20;
+	unsigned serial_number:2;
+	unsigned n_bytes:10;
+	unsigned obj_id:18;
+	unsigned ecc:12;
+	unsigned deleted:1;
+	unsigned unused_stuff:1;
+	unsigned should_be_ff;
+
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+		      const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+			const struct yaffs_packed_tags1 *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_packedtags2.c b/fs/yaffs2/yaffs_packedtags2.c
new file mode 100644
index 0000000..e1d18cc
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.c
@@ -0,0 +1,197 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunk_id */
+
+#define EXTRA_HEADER_INFO_FLAG	0x80000000
+#define EXTRA_SHRINK_FLAG	0x40000000
+#define EXTRA_SHADOWS_FLAG	0x20000000
+#define EXTRA_SPARE_FLAGS	0x10000000
+
+#define ALL_EXTRA_FLAGS		0xf0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK  ((0x0f) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_dump_packed_tags2_tags_only(
+				const struct yaffs_packed_tags2_tags_only *ptt)
+{
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"packed tags obj %d chunk %d byte %d seq %d",
+		ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
+}
+
+static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
+{
+	yaffs_dump_packed_tags2_tags_only(&pt->t);
+}
+
+static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
+{
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
+		t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
+		t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
+		t->seq_number);
+
+}
+
+static int yaffs_check_tags_extra_packable(const struct yaffs_ext_tags *t)
+{
+	if (t->chunk_id != 0 || !t->extra_available)
+		return 0;
+
+	/* Check if the file size is too long to store */
+	if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE &&
+	    (t->extra_file_size >> 31) != 0)
+		return 0;
+	return 1;
+}
+
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
+				const struct yaffs_ext_tags *t)
+{
+	ptt->chunk_id = t->chunk_id;
+	ptt->seq_number = t->seq_number;
+	ptt->n_bytes = t->n_bytes;
+	ptt->obj_id = t->obj_id;
+
+	/* Only store extra tags for object headers.
+	 * If it is a file then only store  if the file size is short\
+	 * enough to fit.
+	 */
+	if (yaffs_check_tags_extra_packable(t)) {
+		/* Store the extra header info instead */
+		/* We save the parent object in the chunk_id */
+		ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
+		if (t->extra_is_shrink)
+			ptt->chunk_id |= EXTRA_SHRINK_FLAG;
+		if (t->extra_shadows)
+			ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
+
+		ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+		ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
+
+		if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+			ptt->n_bytes = t->extra_equiv_id;
+		else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+			ptt->n_bytes = (unsigned) t->extra_file_size;
+		else
+			ptt->n_bytes = 0;
+	}
+
+	yaffs_dump_packed_tags2_tags_only(ptt);
+	yaffs_dump_tags2(t);
+}
+
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+		      const struct yaffs_ext_tags *t, int tags_ecc)
+{
+	yaffs_pack_tags2_tags_only(&pt->t, t);
+
+	if (tags_ecc)
+		yaffs_ecc_calc_other((unsigned char *)&pt->t,
+				    sizeof(struct yaffs_packed_tags2_tags_only),
+				    &pt->ecc);
+}
+
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+				  struct yaffs_packed_tags2_tags_only *ptt)
+{
+	memset(t, 0, sizeof(struct yaffs_ext_tags));
+
+	if (ptt->seq_number == 0xffffffff)
+		return;
+
+	t->block_bad = 0;
+	t->chunk_used = 1;
+	t->obj_id = ptt->obj_id;
+	t->chunk_id = ptt->chunk_id;
+	t->n_bytes = ptt->n_bytes;
+	t->is_deleted = 0;
+	t->serial_number = 0;
+	t->seq_number = ptt->seq_number;
+
+	/* Do extra header info stuff */
+	if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
+		t->chunk_id = 0;
+		t->n_bytes = 0;
+
+		t->extra_available = 1;
+		t->extra_parent_id = ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
+		t->extra_is_shrink = ptt->chunk_id & EXTRA_SHRINK_FLAG ? 1 : 0;
+		t->extra_shadows = ptt->chunk_id & EXTRA_SHADOWS_FLAG ? 1 : 0;
+		t->extra_obj_type = ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
+		t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+		if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+			t->extra_equiv_id = ptt->n_bytes;
+		else
+			t->extra_file_size = ptt->n_bytes;
+	}
+	yaffs_dump_packed_tags2_tags_only(ptt);
+	yaffs_dump_tags2(t);
+}
+
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+			int tags_ecc)
+{
+	enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+	if (pt->t.seq_number != 0xffffffff && tags_ecc) {
+		/* Chunk is in use and we need to do ECC */
+
+		struct yaffs_ecc_other ecc;
+		int result;
+		yaffs_ecc_calc_other((unsigned char *)&pt->t,
+				sizeof(struct yaffs_packed_tags2_tags_only),
+				&ecc);
+		result =
+		    yaffs_ecc_correct_other((unsigned char *)&pt->t,
+				sizeof(struct yaffs_packed_tags2_tags_only),
+				&pt->ecc, &ecc);
+		switch (result) {
+		case 0:
+			ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+			break;
+		case 1:
+			ecc_result = YAFFS_ECC_RESULT_FIXED;
+			break;
+		case -1:
+			ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+			break;
+		default:
+			ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+		}
+	}
+	yaffs_unpack_tags2_tags_only(t, &pt->t);
+
+	t->ecc_result = ecc_result;
+
+	yaffs_dump_packed_tags2(pt);
+	yaffs_dump_tags2(t);
+}
diff --git a/fs/yaffs2/yaffs_packedtags2.h b/fs/yaffs2/yaffs_packedtags2.h
new file mode 100644
index 0000000..675e719
--- /dev/null
+++ b/fs/yaffs2/yaffs_packedtags2.h
@@ -0,0 +1,47 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+struct yaffs_packed_tags2_tags_only {
+	unsigned seq_number;
+	unsigned obj_id;
+	unsigned chunk_id;
+	unsigned n_bytes;
+};
+
+struct yaffs_packed_tags2 {
+	struct yaffs_packed_tags2_tags_only t;
+	struct yaffs_ecc_other ecc;
+};
+
+/* Full packed tags with ECC, used for oob tags */
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+		      const struct yaffs_ext_tags *t, int tags_ecc);
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+			int tags_ecc);
+
+/* Only the tags part (no ECC for use with inband tags */
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
+				const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+				  struct yaffs_packed_tags2_tags_only *pt);
+#endif
diff --git a/fs/yaffs2/yaffs_summary.c b/fs/yaffs2/yaffs_summary.c
new file mode 100644
index 0000000..6f3c783
--- /dev/null
+++ b/fs/yaffs2/yaffs_summary.c
@@ -0,0 +1,313 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Summaries write the useful part of the tags for the chunks in a block into an
+ * an array which is written to the last n chunks of the block.
+ * Reading the summaries gives all the tags for the block in one read. Much
+ * faster.
+ *
+ * Chunks holding summaries are marked with tags making it look like
+ * they are part of a fake file.
+ *
+ * The summary could also be used during gc.
+ *
+ */
+
+#include "yaffs_summary.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_bitmap.h"
+
+/*
+ * The summary is built up in an array of summary tags.
+ * This gets written to the last one or two (maybe more) chunks in a block.
+ * A summary header is written as the first part of each chunk of summary data.
+ * The summary header must match or the summary is rejected.
+ */
+
+/* Summary tags don't need the sequence number because that is redundant. */
+struct yaffs_summary_tags {
+	unsigned obj_id;
+	unsigned chunk_id;
+	unsigned n_bytes;
+};
+
+/* Summary header */
+struct yaffs_summary_header {
+	unsigned version;	/* Must match current version */
+	unsigned block;		/* Must be this block */
+	unsigned seq;		/* Must be this sequence number */
+	unsigned sum;		/* Just add up all the bytes in the tags */
+};
+
+
+static void yaffs_summary_clear(struct yaffs_dev *dev)
+{
+	if (!dev->sum_tags)
+		return;
+	memset(dev->sum_tags, 0, dev->chunks_per_summary *
+		sizeof(struct yaffs_summary_tags));
+}
+
+
+void yaffs_summary_deinit(struct yaffs_dev *dev)
+{
+	kfree(dev->sum_tags);
+	dev->sum_tags = NULL;
+	kfree(dev->gc_sum_tags);
+	dev->gc_sum_tags = NULL;
+	dev->chunks_per_summary = 0;
+}
+
+int yaffs_summary_init(struct yaffs_dev *dev)
+{
+	int sum_bytes;
+	int chunks_used; /* Number of chunks used by summary */
+	int sum_tags_bytes;
+
+	sum_bytes = dev->param.chunks_per_block *
+			sizeof(struct yaffs_summary_tags);
+
+	chunks_used = (sum_bytes + dev->data_bytes_per_chunk - 1)/
+			(dev->data_bytes_per_chunk -
+				sizeof(struct yaffs_summary_header));
+
+	dev->chunks_per_summary = dev->param.chunks_per_block - chunks_used;
+	sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
+				dev->chunks_per_summary;
+	dev->sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+	dev->gc_sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+	if (!dev->sum_tags || !dev->gc_sum_tags) {
+		yaffs_summary_deinit(dev);
+		return YAFFS_FAIL;
+	}
+
+	yaffs_summary_clear(dev);
+
+	return YAFFS_OK;
+}
+
+static unsigned yaffs_summary_sum(struct yaffs_dev *dev)
+{
+	u8 *sum_buffer = (u8 *)dev->sum_tags;
+	int i;
+	unsigned sum = 0;
+
+	i = sizeof(struct yaffs_summary_tags) *
+				dev->chunks_per_summary;
+	while (i > 0) {
+		sum += *sum_buffer;
+		sum_buffer++;
+		i--;
+	}
+
+	return sum;
+}
+
+static int yaffs_summary_write(struct yaffs_dev *dev, int blk)
+{
+	struct yaffs_ext_tags tags;
+	u8 *buffer;
+	u8 *sum_buffer = (u8 *)dev->sum_tags;
+	int n_bytes;
+	int chunk_in_nand;
+	int chunk_in_block;
+	int result;
+	int this_tx;
+	struct yaffs_summary_header hdr;
+	int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+
+	buffer = yaffs_get_temp_buffer(dev);
+	n_bytes = sizeof(struct yaffs_summary_tags) *
+				dev->chunks_per_summary;
+	memset(&tags, 0, sizeof(struct yaffs_ext_tags));
+	tags.obj_id = YAFFS_OBJECTID_SUMMARY;
+	tags.chunk_id = 1;
+	chunk_in_block = dev->chunks_per_summary;
+	chunk_in_nand = dev->alloc_block * dev->param.chunks_per_block +
+						dev->chunks_per_summary;
+	hdr.version = YAFFS_SUMMARY_VERSION;
+	hdr.block = blk;
+	hdr.seq = bi->seq_number;
+	hdr.sum = yaffs_summary_sum(dev);
+
+	do {
+		this_tx = n_bytes;
+		if (this_tx > sum_bytes_per_chunk)
+			this_tx = sum_bytes_per_chunk;
+		memcpy(buffer, &hdr, sizeof(hdr));
+		memcpy(buffer + sizeof(hdr), sum_buffer, this_tx);
+		tags.n_bytes = this_tx + sizeof(hdr);
+		result = yaffs_wr_chunk_tags_nand(dev, chunk_in_nand,
+						buffer, &tags);
+
+		if (result != YAFFS_OK)
+			break;
+		yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+		bi->pages_in_use++;
+		dev->n_free_chunks--;
+
+		n_bytes -= this_tx;
+		sum_buffer += this_tx;
+		chunk_in_nand++;
+		chunk_in_block++;
+		tags.chunk_id++;
+	} while (result == YAFFS_OK && n_bytes > 0);
+	yaffs_release_temp_buffer(dev, buffer);
+
+
+	if (result == YAFFS_OK)
+		bi->has_summary = 1;
+
+
+	return result;
+}
+
+int yaffs_summary_read(struct yaffs_dev *dev,
+			struct yaffs_summary_tags *st,
+			int blk)
+{
+	struct yaffs_ext_tags tags;
+	u8 *buffer;
+	u8 *sum_buffer = (u8 *)st;
+	int n_bytes;
+	int chunk_id;
+	int chunk_in_nand;
+	int chunk_in_block;
+	int result;
+	int this_tx;
+	struct yaffs_summary_header hdr;
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+	int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+	int sum_tags_bytes;
+
+	sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
+				dev->chunks_per_summary;
+	buffer = yaffs_get_temp_buffer(dev);
+	n_bytes = sizeof(struct yaffs_summary_tags) * dev->chunks_per_summary;
+	chunk_in_block = dev->chunks_per_summary;
+	chunk_in_nand = blk * dev->param.chunks_per_block +
+							dev->chunks_per_summary;
+	chunk_id = 1;
+	do {
+		this_tx = n_bytes;
+		if (this_tx > sum_bytes_per_chunk)
+			this_tx = sum_bytes_per_chunk;
+		result = yaffs_rd_chunk_tags_nand(dev, chunk_in_nand,
+						buffer, &tags);
+
+		if (tags.chunk_id != chunk_id ||
+			tags.obj_id != YAFFS_OBJECTID_SUMMARY ||
+			tags.chunk_used == 0 ||
+			tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+			tags.n_bytes != (this_tx + sizeof(hdr)))
+				result = YAFFS_FAIL;
+		if (result != YAFFS_OK)
+			break;
+
+		if (st == dev->sum_tags) {
+			/* If we're scanning then update the block info */
+			yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+			bi->pages_in_use++;
+		}
+		memcpy(&hdr, buffer, sizeof(hdr));
+		memcpy(sum_buffer, buffer + sizeof(hdr), this_tx);
+		n_bytes -= this_tx;
+		sum_buffer += this_tx;
+		chunk_in_nand++;
+		chunk_in_block++;
+		chunk_id++;
+	} while (result == YAFFS_OK && n_bytes > 0);
+	yaffs_release_temp_buffer(dev, buffer);
+
+	if (result == YAFFS_OK) {
+		/* Verify header */
+		if (hdr.version != YAFFS_SUMMARY_VERSION ||
+		    hdr.block != blk ||
+		    hdr.seq != bi->seq_number ||
+		    hdr.sum != yaffs_summary_sum(dev))
+			result = YAFFS_FAIL;
+	}
+
+	if (st == dev->sum_tags && result == YAFFS_OK)
+		bi->has_summary = 1;
+
+	return result;
+}
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+			struct yaffs_ext_tags *tags,
+			int chunk_in_nand)
+{
+	struct yaffs_packed_tags2_tags_only tags_only;
+	struct yaffs_summary_tags *sum_tags;
+	int block_in_nand = chunk_in_nand / dev->param.chunks_per_block;
+	int chunk_in_block = chunk_in_nand % dev->param.chunks_per_block;
+
+	if (!dev->sum_tags)
+		return YAFFS_OK;
+
+	if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+		yaffs_pack_tags2_tags_only(&tags_only, tags);
+		sum_tags = &dev->sum_tags[chunk_in_block];
+		sum_tags->chunk_id = tags_only.chunk_id;
+		sum_tags->n_bytes = tags_only.n_bytes;
+		sum_tags->obj_id = tags_only.obj_id;
+
+		if (chunk_in_block == dev->chunks_per_summary - 1) {
+			/* Time to write out the summary */
+			yaffs_summary_write(dev, block_in_nand);
+			yaffs_summary_clear(dev);
+			yaffs_skip_rest_of_block(dev);
+		}
+	}
+	return YAFFS_OK;
+}
+
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+			struct yaffs_ext_tags *tags,
+			int chunk_in_block)
+{
+	struct yaffs_packed_tags2_tags_only tags_only;
+	struct yaffs_summary_tags *sum_tags;
+	if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+		sum_tags = &dev->sum_tags[chunk_in_block];
+		tags_only.chunk_id = sum_tags->chunk_id;
+		tags_only.n_bytes = sum_tags->n_bytes;
+		tags_only.obj_id = sum_tags->obj_id;
+		yaffs_unpack_tags2_tags_only(tags, &tags_only);
+		return YAFFS_OK;
+	}
+	return YAFFS_FAIL;
+}
+
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk)
+{
+	struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+	int i;
+
+	if (!bi->has_summary)
+		return;
+
+	for (i = dev->chunks_per_summary;
+	     i < dev->param.chunks_per_block;
+	     i++) {
+		if (yaffs_check_chunk_bit(dev, blk, i)) {
+			yaffs_clear_chunk_bit(dev, blk, i);
+			bi->pages_in_use--;
+			dev->n_free_chunks++;
+		}
+	}
+}
diff --git a/fs/yaffs2/yaffs_summary.h b/fs/yaffs2/yaffs_summary.h
new file mode 100644
index 0000000..be141d0
--- /dev/null
+++ b/fs/yaffs2/yaffs_summary.h
@@ -0,0 +1,37 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_SUMMARY_H__
+#define __YAFFS_SUMMARY_H__
+
+#include "yaffs_packedtags2.h"
+
+
+int yaffs_summary_init(struct yaffs_dev *dev);
+void yaffs_summary_deinit(struct yaffs_dev *dev);
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+			struct yaffs_ext_tags *tags,
+			int chunk_in_block);
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+			struct yaffs_ext_tags *tags,
+			int chunk_in_block);
+int yaffs_summary_read(struct yaffs_dev *dev,
+			struct yaffs_summary_tags *st,
+			int blk);
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk);
+
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagscompat.c b/fs/yaffs2/yaffs_tagscompat.c
new file mode 100644
index 0000000..092430b
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.c
@@ -0,0 +1,381 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_trace.h"
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+
+
+/********** Tags ECC calculations  *********/
+
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
+{
+	/* Calculate an ecc */
+	unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+	unsigned i, j;
+	unsigned ecc = 0;
+	unsigned bit = 0;
+
+	tags->ecc = 0;
+
+	for (i = 0; i < 8; i++) {
+		for (j = 1; j & 0xff; j <<= 1) {
+			bit++;
+			if (b[i] & j)
+				ecc ^= bit;
+		}
+	}
+	tags->ecc = ecc;
+}
+
+int yaffs_check_tags_ecc(struct yaffs_tags *tags)
+{
+	unsigned ecc = tags->ecc;
+
+	yaffs_calc_tags_ecc(tags);
+
+	ecc ^= tags->ecc;
+
+	if (ecc && ecc <= 64) {
+		/* TODO: Handle the failure better. Retire? */
+		unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+
+		ecc--;
+
+		b[ecc / 8] ^= (1 << (ecc & 7));
+
+		/* Now recvalc the ecc */
+		yaffs_calc_tags_ecc(tags);
+
+		return 1;	/* recovered error */
+	} else if (ecc) {
+		/* Wierd ecc failure value */
+		/* TODO Need to do somethiong here */
+		return -1;	/* unrecovered error */
+	}
+	return 0;
+}
+
+/********** Tags **********/
+
+static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
+				     struct yaffs_tags *tags_ptr)
+{
+	union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+
+	yaffs_calc_tags_ecc(tags_ptr);
+
+	spare_ptr->tb0 = tu->as_bytes[0];
+	spare_ptr->tb1 = tu->as_bytes[1];
+	spare_ptr->tb2 = tu->as_bytes[2];
+	spare_ptr->tb3 = tu->as_bytes[3];
+	spare_ptr->tb4 = tu->as_bytes[4];
+	spare_ptr->tb5 = tu->as_bytes[5];
+	spare_ptr->tb6 = tu->as_bytes[6];
+	spare_ptr->tb7 = tu->as_bytes[7];
+}
+
+static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
+				      struct yaffs_spare *spare_ptr,
+				      struct yaffs_tags *tags_ptr)
+{
+	union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+	int result;
+
+	tu->as_bytes[0] = spare_ptr->tb0;
+	tu->as_bytes[1] = spare_ptr->tb1;
+	tu->as_bytes[2] = spare_ptr->tb2;
+	tu->as_bytes[3] = spare_ptr->tb3;
+	tu->as_bytes[4] = spare_ptr->tb4;
+	tu->as_bytes[5] = spare_ptr->tb5;
+	tu->as_bytes[6] = spare_ptr->tb6;
+	tu->as_bytes[7] = spare_ptr->tb7;
+
+	result = yaffs_check_tags_ecc(tags_ptr);
+	if (result > 0)
+		dev->n_tags_ecc_fixed++;
+	else if (result < 0)
+		dev->n_tags_ecc_unfixed++;
+}
+
+static void yaffs_spare_init(struct yaffs_spare *spare)
+{
+	memset(spare, 0xff, sizeof(struct yaffs_spare));
+}
+
+static int yaffs_wr_nand(struct yaffs_dev *dev,
+			 int nand_chunk, const u8 *data,
+			 struct yaffs_spare *spare)
+{
+	int data_size = dev->data_bytes_per_chunk;
+
+	return dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+				data, data_size,
+				(u8 *) spare, sizeof(*spare));
+}
+
+static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
+			       int nand_chunk,
+			       u8 *data,
+			       struct yaffs_spare *spare,
+			       enum yaffs_ecc_result *ecc_result,
+			       int correct_errors)
+{
+	int ret_val;
+	struct yaffs_spare local_spare;
+	int data_size;
+	int spare_size;
+	int ecc_result1, ecc_result2;
+	u8 calc_ecc[3];
+
+	if (!spare) {
+		/* If we don't have a real spare, then we use a local one. */
+		/* Need this for the calculation of the ecc */
+		spare = &local_spare;
+	}
+	data_size = dev->data_bytes_per_chunk;
+	spare_size = sizeof(struct yaffs_spare);
+
+	if (dev->param.use_nand_ecc)
+		return dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+						data, data_size,
+						(u8 *) spare, spare_size,
+						ecc_result);
+
+
+	/* Handle the ECC at this level. */
+
+	ret_val = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+						 data, data_size,
+						 (u8 *)spare, spare_size,
+						NULL);
+	if (!data || !correct_errors)
+		return ret_val;
+
+	/* Do ECC correction if needed. */
+	yaffs_ecc_calc(data, calc_ecc);
+	ecc_result1 = yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
+	yaffs_ecc_calc(&data[256], calc_ecc);
+	ecc_result2 = yaffs_ecc_correct(&data[256], spare->ecc2, calc_ecc);
+
+	if (ecc_result1 > 0) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"**>>yaffs ecc error fix performed on chunk %d:0",
+			nand_chunk);
+		dev->n_ecc_fixed++;
+	} else if (ecc_result1 < 0) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"**>>yaffs ecc error unfixed on chunk %d:0",
+			nand_chunk);
+		dev->n_ecc_unfixed++;
+	}
+
+	if (ecc_result2 > 0) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"**>>yaffs ecc error fix performed on chunk %d:1",
+			nand_chunk);
+		dev->n_ecc_fixed++;
+	} else if (ecc_result2 < 0) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"**>>yaffs ecc error unfixed on chunk %d:1",
+			nand_chunk);
+		dev->n_ecc_unfixed++;
+	}
+
+	if (ecc_result1 || ecc_result2) {
+		/* We had a data problem on this page */
+		yaffs_handle_rd_data_error(dev, nand_chunk);
+	}
+
+	if (ecc_result1 < 0 || ecc_result2 < 0)
+		*ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+	else if (ecc_result1 > 0 || ecc_result2 > 0)
+		*ecc_result = YAFFS_ECC_RESULT_FIXED;
+	else
+		*ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+	return ret_val;
+}
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
+{
+	int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+	/* Mark the block for retirement */
+	yaffs_get_block_info(dev, flash_block + dev->block_offset)->
+		needs_retiring = 1;
+	yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+		"**>>Block %d marked for retirement",
+		flash_block);
+
+	/* TODO:
+	 * Just do a garbage collection on the affected block
+	 * then retire the block
+	 * NB recursion
+	 */
+}
+
+static int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+			 int nand_chunk,
+			 const u8 *data, const struct yaffs_ext_tags *ext_tags)
+{
+	struct yaffs_spare spare;
+	struct yaffs_tags tags;
+
+	yaffs_spare_init(&spare);
+
+	if (ext_tags->is_deleted)
+		spare.page_status = 0;
+	else {
+		tags.obj_id = ext_tags->obj_id;
+		tags.chunk_id = ext_tags->chunk_id;
+
+		tags.n_bytes_lsb = ext_tags->n_bytes & (1024 - 1);
+
+		if (dev->data_bytes_per_chunk >= 1024)
+			tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
+		else
+			tags.n_bytes_msb = 3;
+
+		tags.serial_number = ext_tags->serial_number;
+
+		if (!dev->param.use_nand_ecc && data) {
+			yaffs_ecc_calc(data, spare.ecc1);
+			yaffs_ecc_calc(&data[256], spare.ecc2);
+		}
+
+		yaffs_load_tags_to_spare(&spare, &tags);
+	}
+	return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+}
+
+static int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+			 int nand_chunk,
+			 u8 *data, struct yaffs_ext_tags *ext_tags)
+{
+	struct yaffs_spare spare;
+	struct yaffs_tags tags;
+	enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+	static struct yaffs_spare spare_ff;
+	static int init;
+	int deleted;
+
+	if (!init) {
+		memset(&spare_ff, 0xff, sizeof(spare_ff));
+		init = 1;
+	}
+
+	if (!yaffs_rd_chunk_nand(dev, nand_chunk,
+					data, &spare, &ecc_result, 1))
+		return YAFFS_FAIL;
+
+	/* ext_tags may be NULL */
+	if (!ext_tags)
+		return YAFFS_OK;
+
+	deleted = (hweight8(spare.page_status) < 7) ? 1 : 0;
+
+	ext_tags->is_deleted = deleted;
+	ext_tags->ecc_result = ecc_result;
+	ext_tags->block_bad = 0;	/* We're reading it */
+	/* therefore it is not a bad block */
+	ext_tags->chunk_used =
+		memcmp(&spare_ff, &spare, sizeof(spare_ff)) ? 1 : 0;
+
+	if (ext_tags->chunk_used) {
+		yaffs_get_tags_from_spare(dev, &spare, &tags);
+		ext_tags->obj_id = tags.obj_id;
+		ext_tags->chunk_id = tags.chunk_id;
+		ext_tags->n_bytes = tags.n_bytes_lsb;
+
+		if (dev->data_bytes_per_chunk >= 1024)
+			ext_tags->n_bytes |=
+				(((unsigned)tags.n_bytes_msb) << 10);
+
+		ext_tags->serial_number = tags.serial_number;
+	}
+
+	return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
+{
+	struct yaffs_spare spare;
+
+	memset(&spare, 0xff, sizeof(struct yaffs_spare));
+
+	spare.block_status = 'Y';
+
+	yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+		      &spare);
+	yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+		      NULL, &spare);
+
+	return YAFFS_OK;
+}
+
+static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+				  int block_no,
+				  enum yaffs_block_state *state,
+				  u32 *seq_number)
+{
+	struct yaffs_spare spare0, spare1;
+	static struct yaffs_spare spare_ff;
+	static int init;
+	enum yaffs_ecc_result dummy;
+
+	if (!init) {
+		memset(&spare_ff, 0xff, sizeof(spare_ff));
+		init = 1;
+	}
+
+	*seq_number = 0;
+
+	/* Look for bad block markers in the first two chunks */
+	yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block,
+			    NULL, &spare0, &dummy, 0);
+	yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
+			    NULL, &spare1, &dummy, 0);
+
+	if (hweight8(spare0.block_status & spare1.block_status) < 7)
+		*state = YAFFS_BLOCK_STATE_DEAD;
+	else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
+		*state = YAFFS_BLOCK_STATE_EMPTY;
+	else
+		*state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+
+	return YAFFS_OK;
+}
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev)
+{
+	if(dev->param.is_yaffs2)
+		return;
+	if(!dev->tagger.write_chunk_tags_fn)
+		dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_wr;
+	if(!dev->tagger.read_chunk_tags_fn)
+		dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_rd;
+	if(!dev->tagger.query_block_fn)
+		dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
+	if(!dev->tagger.mark_bad_fn)
+		dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
+}
diff --git a/fs/yaffs2/yaffs_tagscompat.h b/fs/yaffs2/yaffs_tagscompat.h
new file mode 100644
index 0000000..92d298a
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagscompat.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+
+#include "yaffs_guts.h"
+
+#if 0
+
+
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+			 int nand_chunk,
+			 const u8 *data, const struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+			 int nand_chunk,
+			 u8 *data, struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+				  int block_no,
+				  enum yaffs_block_state *state,
+				  u32 *seq_number);
+
+#endif
+
+
+void yaffs_tags_compat_install(struct yaffs_dev *dev);
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
+int yaffs_check_tags_ecc(struct yaffs_tags *tags);
+
+#endif
diff --git a/fs/yaffs2/yaffs_tagsmarshall.c b/fs/yaffs2/yaffs_tagsmarshall.c
new file mode 100644
index 0000000..44a83b1
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsmarshall.c
@@ -0,0 +1,199 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_packedtags2.h"
+
+static int yaffs_tags_marshall_write(struct yaffs_dev *dev,
+				    int nand_chunk, const u8 *data,
+				    const struct yaffs_ext_tags *tags)
+{
+	struct yaffs_packed_tags2 pt;
+	int retval;
+
+	int packed_tags_size =
+	    dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+	void *packed_tags_ptr =
+	    dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"yaffs_tags_marshall_write chunk %d data %p tags %p",
+		nand_chunk, data, tags);
+
+	/* For yaffs2 writing there must be both data and tags.
+	 * If we're using inband tags, then the tags are stuffed into
+	 * the end of the data buffer.
+	 */
+	if (!data || !tags)
+		BUG();
+	else if (dev->param.inband_tags) {
+		struct yaffs_packed_tags2_tags_only *pt2tp;
+		pt2tp =
+		    (struct yaffs_packed_tags2_tags_only *)(data +
+							dev->
+							data_bytes_per_chunk);
+		yaffs_pack_tags2_tags_only(pt2tp, tags);
+	} else {
+		yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+	}
+
+	retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
+			data, dev->param.total_bytes_per_chunk,
+			(dev->param.inband_tags) ? NULL : packed_tags_ptr,
+			(dev->param.inband_tags) ? 0 : packed_tags_size);
+
+	return retval;
+}
+
+static int yaffs_tags_marshall_read(struct yaffs_dev *dev,
+				   int nand_chunk, u8 *data,
+				   struct yaffs_ext_tags *tags)
+{
+	int retval = 0;
+	int local_data = 0;
+	u8 spare_buffer[100];
+	enum yaffs_ecc_result ecc_result;
+
+	struct yaffs_packed_tags2 pt;
+
+	int packed_tags_size =
+	    dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+	void *packed_tags_ptr =
+	    dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"yaffs_tags_marshall_read chunk %d data %p tags %p",
+		nand_chunk, data, tags);
+
+	if (dev->param.inband_tags) {
+		if (!data) {
+			local_data = 1;
+			data = yaffs_get_temp_buffer(dev);
+		}
+	}
+
+	if (dev->param.inband_tags || (data && !tags))
+		retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+					data, dev->param.total_bytes_per_chunk,
+					NULL, 0,
+					&ecc_result);
+	else if (tags)
+		retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
+					data, dev->param.total_bytes_per_chunk,
+					spare_buffer, packed_tags_size,
+					&ecc_result);
+	else
+		BUG();
+
+
+	if (dev->param.inband_tags) {
+		if (tags) {
+			struct yaffs_packed_tags2_tags_only *pt2tp;
+			pt2tp =
+				(struct yaffs_packed_tags2_tags_only *)
+				&data[dev->data_bytes_per_chunk];
+			yaffs_unpack_tags2_tags_only(tags, pt2tp);
+		}
+	} else if (tags) {
+		memcpy(packed_tags_ptr, spare_buffer, packed_tags_size);
+		yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+	}
+
+	if (local_data)
+		yaffs_release_temp_buffer(dev, data);
+
+	if (tags && ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
+		tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+		dev->n_ecc_unfixed++;
+	}
+
+	if (tags && ecc_result == -YAFFS_ECC_RESULT_FIXED) {
+		if (tags->ecc_result <= YAFFS_ECC_RESULT_NO_ERROR)
+			tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+		dev->n_ecc_fixed++;
+	}
+
+	if (ecc_result < YAFFS_ECC_RESULT_UNFIXED)
+		return YAFFS_OK;
+	else
+		return YAFFS_FAIL;
+}
+
+static int yaffs_tags_marshall_query_block(struct yaffs_dev *dev, int block_no,
+			       enum yaffs_block_state *state,
+			       u32 *seq_number)
+{
+	int retval;
+
+	yaffs_trace(YAFFS_TRACE_MTD, "yaffs_tags_marshall_query_block %d",
+			block_no);
+
+	retval = dev->drv.drv_check_bad_fn(dev, block_no);
+
+	if (retval== YAFFS_FAIL) {
+		yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
+
+		*state = YAFFS_BLOCK_STATE_DEAD;
+		*seq_number = 0;
+	} else {
+		struct yaffs_ext_tags t;
+
+		yaffs_tags_marshall_read(dev,
+				    block_no * dev->param.chunks_per_block,
+				    NULL, &t);
+
+		if (t.chunk_used) {
+			*seq_number = t.seq_number;
+			*state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+		} else {
+			*seq_number = 0;
+			*state = YAFFS_BLOCK_STATE_EMPTY;
+		}
+	}
+
+	yaffs_trace(YAFFS_TRACE_MTD,
+		"block query returns  seq %d state %d",
+		*seq_number, *state);
+
+	if (retval == 0)
+		return YAFFS_OK;
+	else
+		return YAFFS_FAIL;
+}
+
+static int yaffs_tags_marshall_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+	return dev->drv.drv_mark_bad_fn(dev, block_no);
+
+}
+
+
+void yaffs_tags_marshall_install(struct yaffs_dev *dev)
+{
+	if (!dev->param.is_yaffs2)
+		return;
+
+	if (!dev->tagger.write_chunk_tags_fn)
+		dev->tagger.write_chunk_tags_fn = yaffs_tags_marshall_write;
+
+	if (!dev->tagger.read_chunk_tags_fn)
+		dev->tagger.read_chunk_tags_fn = yaffs_tags_marshall_read;
+
+	if (!dev->tagger.query_block_fn)
+		dev->tagger.query_block_fn = yaffs_tags_marshall_query_block;
+
+	if (!dev->tagger.mark_bad_fn)
+		dev->tagger.mark_bad_fn = yaffs_tags_marshall_mark_bad;
+
+}
diff --git a/fs/yaffs2/yaffs_tagsmarshall.h b/fs/yaffs2/yaffs_tagsmarshall.h
new file mode 100644
index 0000000..bf3e68a
--- /dev/null
+++ b/fs/yaffs2/yaffs_tagsmarshall.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSMARSHALL_H__
+#define __YAFFS_TAGSMARSHALL_H__
+
+#include "yaffs_guts.h"
+void yaffs_tags_marshall_install(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_trace.h b/fs/yaffs2/yaffs_trace.h
new file mode 100644
index 0000000..fd26054
--- /dev/null
+++ b/fs/yaffs2/yaffs_trace.h
@@ -0,0 +1,57 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YTRACE_H__
+#define __YTRACE_H__
+
+extern unsigned int yaffs_trace_mask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS			0x00000002
+#define YAFFS_TRACE_ALLOCATE		0x00000004
+#define YAFFS_TRACE_SCAN		0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS		0x00000010
+#define YAFFS_TRACE_ERASE		0x00000020
+#define YAFFS_TRACE_GC			0x00000040
+#define YAFFS_TRACE_WRITE		0x00000080
+#define YAFFS_TRACE_TRACING		0x00000100
+#define YAFFS_TRACE_DELETION		0x00000200
+#define YAFFS_TRACE_BUFFERS		0x00000400
+#define YAFFS_TRACE_NANDACCESS		0x00000800
+#define YAFFS_TRACE_GC_DETAIL		0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG		0x00002000
+#define YAFFS_TRACE_MTD			0x00004000
+#define YAFFS_TRACE_CHECKPOINT		0x00008000
+
+#define YAFFS_TRACE_VERIFY		0x00010000
+#define YAFFS_TRACE_VERIFY_NAND		0x00020000
+#define YAFFS_TRACE_VERIFY_FULL		0x00040000
+#define YAFFS_TRACE_VERIFY_ALL		0x000f0000
+
+#define YAFFS_TRACE_SYNC		0x00100000
+#define YAFFS_TRACE_BACKGROUND		0x00200000
+#define YAFFS_TRACE_LOCK		0x00400000
+#define YAFFS_TRACE_MOUNT		0x00800000
+
+#define YAFFS_TRACE_ERROR		0x40000000
+#define YAFFS_TRACE_BUG			0x80000000
+#define YAFFS_TRACE_ALWAYS		0xf0000000
+
+#endif
diff --git a/fs/yaffs2/yaffs_verify.c b/fs/yaffs2/yaffs_verify.c
new file mode 100644
index 0000000..e8f2f0a
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.c
@@ -0,0 +1,529 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_verify.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+
+int yaffs_skip_verification(struct yaffs_dev *dev)
+{
+	(void) dev;
+	return !(yaffs_trace_mask &
+		 (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_full_verification(struct yaffs_dev *dev)
+{
+	(void) dev;
+	return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
+{
+	(void) dev;
+	return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char * const block_state_name[] = {
+	"Unknown",
+	"Needs scan",
+	"Scanning",
+	"Empty",
+	"Allocating",
+	"Full",
+	"Dirty",
+	"Checkpoint",
+	"Collecting",
+	"Dead"
+};
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
+{
+	int actually_used;
+	int in_use;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	/* Report illegal runtime states */
+	if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Block %d has undefined state %d",
+			n, bi->block_state);
+
+	switch (bi->block_state) {
+	case YAFFS_BLOCK_STATE_UNKNOWN:
+	case YAFFS_BLOCK_STATE_SCANNING:
+	case YAFFS_BLOCK_STATE_NEEDS_SCAN:
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Block %d has bad run-state %s",
+			n, block_state_name[bi->block_state]);
+	}
+
+	/* Check pages in use and soft deletions are legal */
+
+	actually_used = bi->pages_in_use - bi->soft_del_pages;
+
+	if (bi->pages_in_use < 0 ||
+	    bi->pages_in_use > dev->param.chunks_per_block ||
+	    bi->soft_del_pages < 0 ||
+	    bi->soft_del_pages > dev->param.chunks_per_block ||
+	    actually_used < 0 || actually_used > dev->param.chunks_per_block)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Block %d has illegal values pages_in_used %d soft_del_pages %d",
+			n, bi->pages_in_use, bi->soft_del_pages);
+
+	/* Check chunk bitmap legal */
+	in_use = yaffs_count_chunk_bits(dev, n);
+	if (in_use != bi->pages_in_use)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
+			n, bi->pages_in_use, in_use);
+}
+
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+				struct yaffs_block_info *bi, int n)
+{
+	yaffs_verify_blk(dev, bi, n);
+
+	/* After collection the block should be in the erased state */
+
+	if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
+	    bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"Block %d is in state %d after gc, should be erased",
+			n, bi->block_state);
+	}
+}
+
+void yaffs_verify_blocks(struct yaffs_dev *dev)
+{
+	int i;
+	int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
+	int illegal_states = 0;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	memset(state_count, 0, sizeof(state_count));
+
+	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+		yaffs_verify_blk(dev, bi, i);
+
+		if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
+			state_count[bi->block_state]++;
+		else
+			illegal_states++;
+	}
+
+	yaffs_trace(YAFFS_TRACE_VERIFY,	"Block summary");
+
+	yaffs_trace(YAFFS_TRACE_VERIFY,
+		"%d blocks have illegal states",
+		illegal_states);
+	if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Too many allocating blocks");
+
+	for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"%s %d blocks",
+			block_state_name[i], state_count[i]);
+
+	if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Checkpoint block count wrong dev %d count %d",
+			dev->blocks_in_checkpt,
+			state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
+
+	if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Erased block count wrong dev %d count %d",
+			dev->n_erased_blocks,
+			state_count[YAFFS_BLOCK_STATE_EMPTY]);
+
+	if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Too many collecting blocks %d (max is 1)",
+			state_count[YAFFS_BLOCK_STATE_COLLECTING]);
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in
+ * which case those tests will not be performed.
+ */
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+		     struct yaffs_ext_tags *tags, int parent_check)
+{
+	if (obj && yaffs_skip_verification(obj->my_dev))
+		return;
+
+	if (!(tags && obj && oh)) {
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Verifying object header tags %p obj %p oh %p",
+			tags, obj, oh);
+		return;
+	}
+
+	if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+	    oh->type > YAFFS_OBJECT_TYPE_MAX)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header type is illegal value 0x%x",
+			tags->obj_id, oh->type);
+
+	if (tags->obj_id != obj->obj_id)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header mismatch obj_id %d",
+			tags->obj_id, obj->obj_id);
+
+	/*
+	 * Check that the object's parent ids match if parent_check requested.
+	 *
+	 * Tests do not apply to the root object.
+	 */
+
+	if (parent_check && tags->obj_id > 1 && !obj->parent)
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header mismatch parent_id %d obj->parent is NULL",
+			tags->obj_id, oh->parent_obj_id);
+
+	if (parent_check && obj->parent &&
+	    oh->parent_obj_id != obj->parent->obj_id &&
+	    (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
+	     obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header mismatch parent_id %d parent_obj_id %d",
+			tags->obj_id, oh->parent_obj_id,
+			obj->parent->obj_id);
+
+	if (tags->obj_id > 1 && oh->name[0] == 0)	/* Null name */
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header name is NULL",
+			obj->obj_id);
+
+	if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff)	/* Junk name */
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d header name is 0xff",
+			obj->obj_id);
+}
+
+void yaffs_verify_file(struct yaffs_obj *obj)
+{
+	u32 x;
+	int required_depth;
+	int actual_depth;
+	int last_chunk;
+	u32 offset_in_chunk;
+	u32 the_chunk;
+
+	u32 i;
+	struct yaffs_dev *dev;
+	struct yaffs_ext_tags tags;
+	struct yaffs_tnode *tn;
+	u32 obj_id;
+
+	if (!obj)
+		return;
+
+	if (yaffs_skip_verification(obj->my_dev))
+		return;
+
+	dev = obj->my_dev;
+	obj_id = obj->obj_id;
+
+
+	/* Check file size is consistent with tnode depth */
+	yaffs_addr_to_chunk(dev, obj->variant.file_variant.file_size,
+				&last_chunk, &offset_in_chunk);
+	last_chunk++;
+	x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
+	required_depth = 0;
+	while (x > 0) {
+		x >>= YAFFS_TNODES_INTERNAL_BITS;
+		required_depth++;
+	}
+
+	actual_depth = obj->variant.file_variant.top_level;
+
+	/* Check that the chunks in the tnode tree are all correct.
+	 * We do this by scanning through the tnode tree and
+	 * checking the tags for every chunk match.
+	 */
+
+	if (yaffs_skip_nand_verification(dev))
+		return;
+
+	for (i = 1; i <= last_chunk; i++) {
+		tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
+
+		if (!tn)
+			continue;
+
+		the_chunk = yaffs_get_group_base(dev, tn, i);
+		if (the_chunk > 0) {
+			yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+						 &tags);
+			if (tags.obj_id != obj_id || tags.chunk_id != i)
+				yaffs_trace(YAFFS_TRACE_VERIFY,
+					"Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
+					obj_id, i, the_chunk,
+					tags.obj_id, tags.chunk_id);
+		}
+	}
+}
+
+void yaffs_verify_link(struct yaffs_obj *obj)
+{
+	if (obj && yaffs_skip_verification(obj->my_dev))
+		return;
+
+	/* Verify sane equivalent object */
+}
+
+void yaffs_verify_symlink(struct yaffs_obj *obj)
+{
+	if (obj && yaffs_skip_verification(obj->my_dev))
+		return;
+
+	/* Verify symlink string */
+}
+
+void yaffs_verify_special(struct yaffs_obj *obj)
+{
+	if (obj && yaffs_skip_verification(obj->my_dev))
+		return;
+}
+
+void yaffs_verify_obj(struct yaffs_obj *obj)
+{
+	struct yaffs_dev *dev;
+	u32 chunk_min;
+	u32 chunk_max;
+	u32 chunk_id_ok;
+	u32 chunk_in_range;
+	u32 chunk_wrongly_deleted;
+	u32 chunk_valid;
+
+	if (!obj)
+		return;
+
+	if (obj->being_created)
+		return;
+
+	dev = obj->my_dev;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	/* Check sane object header chunk */
+
+	chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
+	chunk_max =
+	    (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
+
+	chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
+			  ((unsigned)(obj->hdr_chunk)) <= chunk_max);
+	chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
+	chunk_valid = chunk_in_range &&
+	    yaffs_check_chunk_bit(dev,
+				  obj->hdr_chunk / dev->param.chunks_per_block,
+				  obj->hdr_chunk % dev->param.chunks_per_block);
+	chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
+
+	if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d has chunk_id %d %s %s",
+			obj->obj_id, obj->hdr_chunk,
+			chunk_id_ok ? "" : ",out of range",
+			chunk_wrongly_deleted ? ",marked as deleted" : "");
+
+	if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
+		struct yaffs_ext_tags tags;
+		struct yaffs_obj_hdr *oh;
+		u8 *buffer = yaffs_get_temp_buffer(dev);
+
+		oh = (struct yaffs_obj_hdr *)buffer;
+
+		yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
+
+		yaffs_verify_oh(obj, oh, &tags, 1);
+
+		yaffs_release_temp_buffer(dev, buffer);
+	}
+
+	/* Verify it has a parent */
+	if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d has parent pointer %p which does not look like an object",
+			obj->obj_id, obj->parent);
+	}
+
+	/* Verify parent is a directory */
+	if (obj->parent &&
+	    obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d's parent is not a directory (type %d)",
+			obj->obj_id, obj->parent->variant_type);
+	}
+
+	switch (obj->variant_type) {
+	case YAFFS_OBJECT_TYPE_FILE:
+		yaffs_verify_file(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_SYMLINK:
+		yaffs_verify_symlink(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_DIRECTORY:
+		yaffs_verify_dir(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_HARDLINK:
+		yaffs_verify_link(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_SPECIAL:
+		yaffs_verify_special(obj);
+		break;
+	case YAFFS_OBJECT_TYPE_UNKNOWN:
+	default:
+		yaffs_trace(YAFFS_TRACE_VERIFY,
+			"Obj %d has illegaltype %d",
+		   obj->obj_id, obj->variant_type);
+		break;
+	}
+}
+
+void yaffs_verify_objects(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	int i;
+	struct list_head *lh;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	/* Iterate through the objects in each hash entry */
+
+	for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+		list_for_each(lh, &dev->obj_bucket[i].list) {
+			obj = list_entry(lh, struct yaffs_obj, hash_link);
+			yaffs_verify_obj(obj);
+		}
+	}
+}
+
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
+{
+	struct list_head *lh;
+	struct yaffs_obj *list_obj;
+	int count = 0;
+
+	if (!obj) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
+		BUG();
+		return;
+	}
+
+	if (yaffs_skip_verification(obj->my_dev))
+		return;
+
+	if (!obj->parent) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent");
+		BUG();
+		return;
+	}
+
+	if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
+		BUG();
+	}
+
+	/* Iterate through the objects in each hash entry */
+
+	list_for_each(lh, &obj->parent->variant.dir_variant.children) {
+		list_obj = list_entry(lh, struct yaffs_obj, siblings);
+		yaffs_verify_obj(list_obj);
+		if (obj == list_obj)
+			count++;
+	}
+
+	if (count != 1) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"Object in directory %d times",
+			count);
+		BUG();
+	}
+}
+
+void yaffs_verify_dir(struct yaffs_obj *directory)
+{
+	struct list_head *lh;
+	struct yaffs_obj *list_obj;
+
+	if (!directory) {
+		BUG();
+		return;
+	}
+
+	if (yaffs_skip_full_verification(directory->my_dev))
+		return;
+
+	if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"Directory has wrong type: %d",
+			directory->variant_type);
+		BUG();
+	}
+
+	/* Iterate through the objects in each hash entry */
+
+	list_for_each(lh, &directory->variant.dir_variant.children) {
+		list_obj = list_entry(lh, struct yaffs_obj, siblings);
+		if (list_obj->parent != directory) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"Object in directory list has wrong parent %p",
+				list_obj->parent);
+			BUG();
+		}
+		yaffs_verify_obj_in_dir(list_obj);
+	}
+}
+
+static int yaffs_free_verification_failures;
+
+void yaffs_verify_free_chunks(struct yaffs_dev *dev)
+{
+	int counted;
+	int difference;
+
+	if (yaffs_skip_verification(dev))
+		return;
+
+	counted = yaffs_count_free_chunks(dev);
+
+	difference = dev->n_free_chunks - counted;
+
+	if (difference) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"Freechunks verification failure %d %d %d",
+			dev->n_free_chunks, counted, difference);
+		yaffs_free_verification_failures++;
+	}
+}
+
+int yaffs_verify_file_sane(struct yaffs_obj *in)
+{
+	(void) in;
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_verify.h b/fs/yaffs2/yaffs_verify.h
new file mode 100644
index 0000000..4f4af8d
--- /dev/null
+++ b/fs/yaffs2/yaffs_verify.h
@@ -0,0 +1,43 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_VERIFY_H__
+#define __YAFFS_VERIFY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
+		      int n);
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+				struct yaffs_block_info *bi, int n);
+void yaffs_verify_blocks(struct yaffs_dev *dev);
+
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+		     struct yaffs_ext_tags *tags, int parent_check);
+void yaffs_verify_file(struct yaffs_obj *obj);
+void yaffs_verify_link(struct yaffs_obj *obj);
+void yaffs_verify_symlink(struct yaffs_obj *obj);
+void yaffs_verify_special(struct yaffs_obj *obj);
+void yaffs_verify_obj(struct yaffs_obj *obj);
+void yaffs_verify_objects(struct yaffs_dev *dev);
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
+void yaffs_verify_dir(struct yaffs_obj *directory);
+void yaffs_verify_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_verify_file_sane(struct yaffs_obj *obj);
+
+int yaffs_skip_verification(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
new file mode 100644
index 0000000..568cef4
--- /dev/null
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -0,0 +1,3497 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ * Acknowledgements:
+ * Luc van OostenRyck for numerous patches.
+ * Nick Bane for numerous patches.
+ * Nick Bane for 2.5/2.6 integration.
+ * Andras Toth for mknod rdev issue.
+ * Michael Fischer for finding the problem with inode inconsistency.
+ * Some code bodily lifted from JFFS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *
+ * This is the file system front-end to YAFFS that hooks it up to
+ * the VFS.
+ *
+ * Special notes:
+ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
+ *         this superblock
+ * >> 2.6: sb->s_fs_info  points to the struct yaffs_dev associated with this
+ *         superblock
+ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
+ */
+
+/*
+ * There are two variants of the VFS glue code. This variant should compile
+ * for any version of Linux.
+ */
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10))
+#define YAFFS_COMPILE_BACKGROUND
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23))
+#define YAFFS_COMPILE_FREEZER
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
+#define YAFFS_COMPILE_EXPORTFS
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
+#define YAFFS_USE_SETATTR_COPY
+#define YAFFS_USE_TRUNCATE_SETSIZE
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
+#define YAFFS_HAS_EVICT_INODE
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+#define YAFFS_NEW_FOLLOW_LINK 1
+#else
+#define YAFFS_NEW_FOLLOW_LINK 0
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define YAFFS_HAS_WRITE_SUPER
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+#include <linux/config.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
+#include <linux/smp_lock.h>
+#endif
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+#include <linux/namei.h>
+#endif
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+#include <linux/exportfs.h>
+#endif
+
+#ifdef YAFFS_COMPILE_BACKGROUND
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#endif
+#ifdef YAFFS_COMPILE_FREEZER
+#include <linux/freezer.h>
+#endif
+
+#include <asm/div64.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+#include <linux/statfs.h>
+
+#define UnlockPage(p) unlock_page(p)
+#define Page_Uptodate(page)	test_bit(PG_uptodate, &(page)->flags)
+
+/* FIXME: use sb->s_id instead ? */
+#define yaffs_devname(sb, buf)	bdevname(sb->s_bdev, buf)
+
+#else
+
+#include <linux/locks.h>
+#define	BDEVNAME_SIZE		0
+#define	yaffs_devname(sb, buf)	kdevname(sb->s_dev)
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
+/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
+#define __user
+#endif
+
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+#define YPROC_ROOT  (&proc_root)
+#else
+#define YPROC_ROOT  NULL
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
+#define Y_INIT_TIMER(a)	init_timer(a)
+#else
+#define Y_INIT_TIMER(a)	init_timer_on_stack(a)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
+#define YAFFS_USE_WRITE_BEGIN_END 1
+#else
+#define YAFFS_USE_WRITE_BEGIN_END 0
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define YAFFS_SUPER_HAS_DIRTY
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#define set_nlink(inode, count)  do { (inode)->i_nlink = (count); } while(0)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
+static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
+{
+	uint64_t result = partition_size;
+	do_div(result, block_size);
+	return (uint32_t) result;
+}
+#else
+#define YCALCBLOCKS(s, b) ((s)/(b))
+#endif
+
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_guts.h"
+#include "yaffs_attribs.h"
+
+#include "yaffs_linux.h"
+
+#include "yaffs_mtdif.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_getblockinfo.h"
+
+unsigned int yaffs_trace_mask =
+		YAFFS_TRACE_BAD_BLOCKS |
+		YAFFS_TRACE_ALWAYS |
+		0;
+
+unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
+unsigned int yaffs_auto_checkpoint = 1;
+unsigned int yaffs_gc_control = 1;
+unsigned int yaffs_bg_enable = 1;
+unsigned int yaffs_auto_select = 1;
+/* Module Parameters */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+module_param(yaffs_trace_mask, uint, 0644);
+module_param(yaffs_wr_attempts, uint, 0644);
+module_param(yaffs_auto_checkpoint, uint, 0644);
+module_param(yaffs_gc_control, uint, 0644);
+module_param(yaffs_bg_enable, uint, 0644);
+#else
+MODULE_PARM(yaffs_trace_mask, "i");
+MODULE_PARM(yaffs_wr_attempts, "i");
+MODULE_PARM(yaffs_auto_checkpoint, "i");
+MODULE_PARM(yaffs_gc_control, "i");
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+/* use iget and read_inode */
+#define Y_IGET(sb, inum) iget((sb), (inum))
+
+#else
+/* Call local equivalent */
+#define YAFFS_USE_OWN_IGET
+#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
+
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
+#else
+#define yaffs_inode_to_obj_lv(iptr) ((iptr)->u.generic_ip)
+#endif
+
+#define yaffs_inode_to_obj(iptr) \
+	((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
+#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#define yaffs_super_to_dev(sb)	((struct yaffs_dev *)sb->s_fs_info)
+#else
+#define yaffs_super_to_dev(sb)	((struct yaffs_dev *)sb->u.generic_sbp)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#define Y_CLEAR_INODE(i) clear_inode(i)
+#else
+#define Y_CLEAR_INODE(i) end_writeback(i)
+#endif
+
+
+#define update_dir_time(dir) do {\
+			(dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
+		} while (0)
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+				      struct yaffs_obj *obj);
+
+
+static void yaffs_gross_lock(struct yaffs_dev *dev)
+{
+	yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
+	mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
+	yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
+}
+
+static void yaffs_gross_unlock(struct yaffs_dev *dev)
+{
+	yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
+	mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
+}
+
+
+static int yaffs_readpage_nolock(struct file *f, struct page *pg)
+{
+	/* Lifted from jffs2 */
+
+	struct yaffs_obj *obj;
+	unsigned char *pg_buf;
+	int ret;
+	loff_t pos = ((loff_t) pg->index) << PAGE_CACHE_SHIFT;
+	struct yaffs_dev *dev;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_readpage_nolock at %lld, size %08x",
+		(long long)pos,
+		(unsigned)PAGE_CACHE_SIZE);
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+
+	dev = obj->my_dev;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+	BUG_ON(!PageLocked(pg));
+#else
+	if (!PageLocked(pg))
+		PAGE_BUG(pg);
+#endif
+
+	pg_buf = kmap(pg);
+	/* FIXME: Can kmap fail? */
+
+	yaffs_gross_lock(dev);
+
+	ret = yaffs_file_rd(obj, pg_buf, pos, PAGE_CACHE_SIZE);
+
+	yaffs_gross_unlock(dev);
+
+	if (ret >= 0)
+		ret = 0;
+
+	if (ret) {
+		ClearPageUptodate(pg);
+		SetPageError(pg);
+	} else {
+		SetPageUptodate(pg);
+		ClearPageError(pg);
+	}
+
+	flush_dcache_page(pg);
+	kunmap(pg);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
+	return ret;
+}
+
+static int yaffs_readpage_unlock(struct file *f, struct page *pg)
+{
+	int ret = yaffs_readpage_nolock(f, pg);
+	UnlockPage(pg);
+	return ret;
+}
+
+static int yaffs_readpage(struct file *f, struct page *pg)
+{
+	int ret;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
+	ret = yaffs_readpage_unlock(f, pg);
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
+	return ret;
+}
+
+
+static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val)
+{
+	struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
+
+	if (lc)
+		lc->dirty = val;
+
+# ifdef YAFFS_SUPER_HAS_DIRTY
+	{
+		struct super_block *sb = lc->super;
+
+		if (sb)
+			sb->s_dirt = val;
+	}
+#endif
+
+}
+
+static void yaffs_set_super_dirty(struct yaffs_dev *dev)
+{
+	yaffs_set_super_dirty_val(dev, 1);
+}
+
+static void yaffs_clear_super_dirty(struct yaffs_dev *dev)
+{
+	yaffs_set_super_dirty_val(dev, 0);
+}
+
+static int yaffs_check_super_dirty(struct yaffs_dev *dev)
+{
+	struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
+
+	if (lc && lc->dirty)
+		return 1;
+
+# ifdef YAFFS_SUPER_HAS_DIRTY
+	{
+		struct super_block *sb = lc->super;
+
+		if (sb && sb->s_dirt)
+			return 1;
+	}
+#endif
+	return 0;
+
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
+#else
+static int yaffs_writepage(struct page *page)
+#endif
+{
+	struct yaffs_dev *dev;
+	struct address_space *mapping = page->mapping;
+	struct inode *inode;
+	unsigned long end_index;
+	char *buffer;
+	struct yaffs_obj *obj;
+	int n_written = 0;
+	unsigned n_bytes;
+	loff_t i_size;
+
+	if (!mapping)
+		BUG();
+	inode = mapping->host;
+	if (!inode)
+		BUG();
+	i_size = i_size_read(inode);
+
+	end_index = i_size >> PAGE_CACHE_SHIFT;
+
+	if (page->index < end_index)
+		n_bytes = PAGE_CACHE_SIZE;
+	else {
+		n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
+
+		if (page->index > end_index || !n_bytes) {
+			yaffs_trace(YAFFS_TRACE_OS,
+				"yaffs_writepage at %lld, inode size = %lld!!",
+				((loff_t)page->index) << PAGE_CACHE_SHIFT,
+				inode->i_size);
+			yaffs_trace(YAFFS_TRACE_OS,
+				"                -> don't care!!");
+
+			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+			set_page_writeback(page);
+			unlock_page(page);
+			end_page_writeback(page);
+			return 0;
+		}
+	}
+
+	if (n_bytes != PAGE_CACHE_SIZE)
+		zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
+
+	get_page(page);
+
+	buffer = kmap(page);
+
+	obj = yaffs_inode_to_obj(inode);
+	dev = obj->my_dev;
+	yaffs_gross_lock(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_writepage at %lld, size %08x",
+		((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes);
+	yaffs_trace(YAFFS_TRACE_OS,
+		"writepag0: obj = %lld, ino = %lld",
+		obj->variant.file_variant.file_size, inode->i_size);
+
+	n_written = yaffs_wr_file(obj, buffer,
+				  ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes, 0);
+
+	yaffs_set_super_dirty(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"writepag1: obj = %lld, ino = %lld",
+		obj->variant.file_variant.file_size, inode->i_size);
+
+	yaffs_gross_unlock(dev);
+
+	kunmap(page);
+	set_page_writeback(page);
+	unlock_page(page);
+	end_page_writeback(page);
+	put_page(page);
+
+	return (n_written == n_bytes) ? 0 : -ENOSPC;
+}
+
+/* Space holding and freeing is done to ensure we have space available for write_begin/end */
+/* For now we just assume few parallel writes and check against a small number. */
+/* Todo: need to do this with a counter to handle parallel reads better */
+
+static ssize_t yaffs_hold_space(struct file *f)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+
+	int n_free_chunks;
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	n_free_chunks = yaffs_get_n_free_chunks(dev);
+
+	yaffs_gross_unlock(dev);
+
+	return (n_free_chunks > 20) ? 1 : 0;
+}
+
+static void yaffs_release_space(struct file *f)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	yaffs_gross_unlock(dev);
+}
+
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
+			     loff_t pos, unsigned len, unsigned flags,
+			     struct page **pagep, void **fsdata)
+{
+	struct page *pg = NULL;
+	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+	int ret = 0;
+	int space_held = 0;
+
+	/* Get a page */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
+	pg = grab_cache_page_write_begin(mapping, index, flags);
+#else
+	pg = __grab_cache_page(mapping, index);
+#endif
+
+	*pagep = pg;
+	if (!pg) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	yaffs_trace(YAFFS_TRACE_OS,
+		"start yaffs_write_begin index %d(%x) uptodate %d",
+		(int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
+
+	/* Get fs space */
+	space_held = yaffs_hold_space(filp);
+
+	if (!space_held) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	/* Update page if required */
+
+	if (!Page_Uptodate(pg))
+		ret = yaffs_readpage_nolock(filp, pg);
+
+	if (ret)
+		goto out;
+
+	/* Happy path return */
+	yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
+
+	return 0;
+
+out:
+	yaffs_trace(YAFFS_TRACE_OS,
+		"end yaffs_write_begin fail returning %d", ret);
+	if (space_held)
+		yaffs_release_space(filp);
+	if (pg) {
+		unlock_page(pg);
+		page_cache_release(pg);
+	}
+	return ret;
+}
+
+#else
+
+static int yaffs_prepare_write(struct file *f, struct page *pg,
+			       unsigned offset, unsigned to)
+{
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_prepair_write");
+
+	if (!Page_Uptodate(pg))
+		return yaffs_readpage_nolock(f, pg);
+	return 0;
+}
+#endif
+
+
+static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
+				loff_t * pos)
+{
+	struct yaffs_obj *obj;
+	int n_written;
+	loff_t ipos;
+	struct inode *inode;
+	struct yaffs_dev *dev;
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+
+	if (!obj) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_file_write: hey obj is null!");
+                return -EINVAL;
+        }
+
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	inode = f->f_dentry->d_inode;
+
+	if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
+		ipos = inode->i_size;
+	else
+		ipos = *pos;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_file_write about to write writing %u(%x) bytes to object %d at %lld",
+		(unsigned)n, (unsigned)n, obj->obj_id, ipos);
+
+	n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
+
+	yaffs_set_super_dirty(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_file_write: %d(%x) bytes written",
+		(unsigned)n, (unsigned)n);
+
+	if (n_written > 0) {
+		ipos += n_written;
+		*pos = ipos;
+		if (ipos > inode->i_size) {
+			inode->i_size = ipos;
+			inode->i_blocks = (ipos + 511) >> 9;
+
+			yaffs_trace(YAFFS_TRACE_OS,
+				"yaffs_file_write size updated to %lld bytes, %d blocks",
+				ipos, (int)(inode->i_blocks));
+		}
+
+	}
+	yaffs_gross_unlock(dev);
+	return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
+}
+
+
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+static int yaffs_write_end(struct file *filp, struct address_space *mapping,
+			   loff_t pos, unsigned len, unsigned copied,
+			   struct page *pg, void *fsdadata)
+{
+	int ret = 0;
+	void *addr, *kva;
+	uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
+
+	kva = kmap(pg);
+	addr = kva + offset_into_page;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_write_end addr %p pos %lld n_bytes %d",
+		addr, pos, copied);
+
+	ret = yaffs_file_write(filp, addr, copied, &pos);
+
+	if (ret != copied) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_write_end not same size ret %d  copied %d",
+			ret, copied);
+		SetPageError(pg);
+	}
+
+	kunmap(pg);
+
+	yaffs_release_space(filp);
+	unlock_page(pg);
+	page_cache_release(pg);
+	return ret;
+}
+#else
+
+static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
+			      unsigned to)
+{
+	void *addr, *kva;
+
+	loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
+	int n_bytes = to - offset;
+	int n_written;
+
+	kva = kmap(pg);
+	addr = kva + offset;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_commit_write addr %p pos %lld n_bytes %d",
+		addr, pos, n_bytes);
+
+	n_written = yaffs_file_write(f, addr, n_bytes, &pos);
+
+	if (n_written != n_bytes) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_commit_write not same size n_written %d  n_bytes %d",
+			n_written, n_bytes);
+		SetPageError(pg);
+	}
+	kunmap(pg);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_commit_write returning %d",
+		n_written == n_bytes ? 0 : n_written);
+
+	return n_written == n_bytes ? 0 : n_written;
+}
+#endif
+
+static struct address_space_operations yaffs_file_address_operations = {
+	.readpage = yaffs_readpage,
+	.writepage = yaffs_writepage,
+#if (YAFFS_USE_WRITE_BEGIN_END > 0)
+	.write_begin = yaffs_write_begin,
+	.write_end = yaffs_write_end,
+#else
+	.prepare_write = yaffs_prepare_write,
+	.commit_write = yaffs_commit_write,
+#endif
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#define YCRED_FSUID()	from_kuid(&init_user_ns, current_fsuid())
+#define YCRED_FSGID()	from_kgid(&init_user_ns, current_fsgid())
+#else
+#define YCRED_FSUID()	YCRED(current)->fsuid
+#define YCRED_FSGID()	YCRED(current)->fsgid
+
+static inline uid_t i_uid_read(const struct inode *inode)
+{
+	return inode->i_uid;
+}
+
+static inline gid_t i_gid_read(const struct inode *inode)
+{
+	return inode->i_gid;
+}
+
+static inline void i_uid_write(struct inode *inode, uid_t uid)
+{
+	inode->i_uid = uid;
+}
+
+static inline void i_gid_write(struct inode *inode, gid_t gid)
+{
+	inode->i_gid = gid;
+}
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_file_flush(struct file *file, fl_owner_t id)
+#else
+static int yaffs_file_flush(struct file *file)
+#endif
+{
+	struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
+
+	struct yaffs_dev *dev = obj->my_dev;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_file_flush object %d (%s)",
+		obj->obj_id,
+		obj->dirty ? "dirty" : "clean");
+
+	yaffs_gross_lock(dev);
+
+	yaffs_flush_file(obj, 1, 0);
+
+	yaffs_gross_unlock(dev);
+
+	return 0;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+static int yaffs_sync_object(struct file *file, int datasync)
+#else
+static int yaffs_sync_object(struct file *file, struct dentry *dentry,
+			     int datasync)
+#endif
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
+	struct dentry *dentry = file->f_path.dentry;
+#endif
+
+	obj = yaffs_dentry_to_obj(dentry);
+
+	dev = obj->my_dev;
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+		"yaffs_sync_object");
+	yaffs_gross_lock(dev);
+	yaffs_flush_file(obj, 1, datasync);
+	yaffs_gross_unlock(dev);
+	return 0;
+}
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
+static const struct file_operations yaffs_file_operations = {
+	.read = do_sync_read,
+	.write = do_sync_write,
+	.aio_read = generic_file_aio_read,
+	.aio_write = generic_file_aio_write,
+	.mmap = generic_file_mmap,
+	.flush = yaffs_file_flush,
+	.fsync = yaffs_sync_object,
+	.splice_read = generic_file_splice_read,
+	.splice_write = generic_file_splice_write,
+	.llseek = generic_file_llseek,
+};
+
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+
+static const struct file_operations yaffs_file_operations = {
+	.read = do_sync_read,
+	.write = do_sync_write,
+	.aio_read = generic_file_aio_read,
+	.aio_write = generic_file_aio_write,
+	.mmap = generic_file_mmap,
+	.flush = yaffs_file_flush,
+	.fsync = yaffs_sync_object,
+	.sendfile = generic_file_sendfile,
+};
+
+#else
+
+static const struct file_operations yaffs_file_operations = {
+	.read = generic_file_read,
+	.write = generic_file_write,
+	.mmap = generic_file_mmap,
+	.flush = yaffs_file_flush,
+	.fsync = yaffs_sync_object,
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+	.sendfile = generic_file_sendfile,
+#endif
+};
+#endif
+
+
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
+static void zero_user_segment(struct page *page, unsigned start, unsigned end)
+{
+	void *kaddr = kmap_atomic(page, KM_USER0);
+	memset(kaddr + start, 0, end - start);
+	kunmap_atomic(kaddr, KM_USER0);
+	flush_dcache_page(page);
+}
+#endif
+
+
+static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize)
+{
+#ifdef YAFFS_USE_TRUNCATE_SETSIZE
+	truncate_setsize(inode, newsize);
+	return 0;
+#else
+	truncate_inode_pages(&inode->i_data, newsize);
+	return 0;
+#endif
+
+}
+
+
+static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr)
+{
+#ifdef YAFFS_USE_SETATTR_COPY
+	setattr_copy(inode, attr);
+	return 0;
+#else
+	return inode_setattr(inode, attr);
+#endif
+
+}
+
+static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_setattr of object %d",
+		yaffs_inode_to_obj(inode)->obj_id);
+#if 0
+	/* Fail if a requested resize >= 2GB */
+	if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
+		error = -EINVAL;
+#endif
+
+	if (error == 0)
+		error = inode_change_ok(inode, attr);
+	if (error == 0) {
+		int result;
+		if (!error) {
+			error = yaffs_vfs_setattr(inode, attr);
+			yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
+			if (attr->ia_valid & ATTR_SIZE) {
+				yaffs_vfs_setsize(inode, attr->ia_size);
+				inode->i_blocks = (inode->i_size + 511) >> 9;
+			}
+		}
+		dev = yaffs_inode_to_obj(inode)->my_dev;
+		if (attr->ia_valid & ATTR_SIZE) {
+			yaffs_trace(YAFFS_TRACE_OS,
+				"resize to %d(%x)",
+				(int)(attr->ia_size),
+				(int)(attr->ia_size));
+		}
+		yaffs_gross_lock(dev);
+		result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
+		if (result == YAFFS_OK) {
+			error = 0;
+		} else {
+			error = -EPERM;
+		}
+		yaffs_gross_unlock(dev);
+
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
+
+	return error;
+}
+
+static int yaffs_setxattr(struct dentry *dentry, const char *name,
+		   const void *value, size_t size, int flags)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
+
+	if (error == 0) {
+		int result;
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		result = yaffs_set_xattrib(obj, name, value, size, flags);
+		if (result == YAFFS_OK)
+			error = 0;
+		else if (result < 0)
+			error = result;
+		yaffs_gross_unlock(dev);
+
+	}
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
+
+	return error;
+}
+
+static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name,
+			void *buff, size_t size)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_getxattr \"%s\" from object %d",
+		name, obj->obj_id);
+
+	if (error == 0) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		error = yaffs_get_xattrib(obj, name, buff, size);
+		yaffs_gross_unlock(dev);
+
+	}
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
+
+	return error;
+}
+
+static int yaffs_removexattr(struct dentry *dentry, const char *name)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_removexattr of object %d", obj->obj_id);
+
+	if (error == 0) {
+		int result;
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		result = yaffs_remove_xattrib(obj, name);
+		if (result == YAFFS_OK)
+			error = 0;
+		else if (result < 0)
+			error = result;
+		yaffs_gross_unlock(dev);
+
+	}
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_removexattr done returning %d", error);
+
+	return error;
+}
+
+static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
+{
+	struct inode *inode = dentry->d_inode;
+	int error = 0;
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_listxattr of object %d", obj->obj_id);
+
+	if (error == 0) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		error = yaffs_list_xattrib(obj, buff, size);
+		yaffs_gross_unlock(dev);
+
+	}
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_listxattr done returning %d", error);
+
+	return error;
+}
+
+
+static const struct inode_operations yaffs_file_inode_operations = {
+	.setattr = yaffs_setattr,
+	.setxattr = yaffs_setxattr,
+	.getxattr = yaffs_getxattr,
+	.listxattr = yaffs_listxattr,
+	.removexattr = yaffs_removexattr,
+};
+
+
+static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
+			  int buflen)
+{
+	unsigned char *alias;
+	int ret;
+
+	struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+
+	yaffs_gross_unlock(dev);
+
+	if (!alias)
+		return -ENOMEM;
+
+	ret = vfs_readlink(dentry, buffer, buflen, alias);
+	kfree(alias);
+	return ret;
+}
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	void *ret;
+#else
+static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+	int ret
+#endif
+	unsigned char *alias;
+	int ret_int = 0;
+	struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
+	yaffs_gross_unlock(dev);
+
+	if (!alias) {
+		ret_int = -ENOMEM;
+		goto out;
+	}
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+	nd_set_link(nd, alias);
+	ret = alias;
+out:
+	if (ret_int)
+		ret = ERR_PTR(ret_int);
+	return ret;
+#else
+	ret = vfs_follow_link(nd, alias);
+	kfree(alias);
+out:
+	if (ret_int)
+		ret = ret_int;
+	return ret;
+#endif
+}
+
+
+#ifdef YAFFS_HAS_PUT_INODE
+
+/* For now put inode is just for debugging
+ * Put inode is called when the inode **structure** is put.
+ */
+static void yaffs_put_inode(struct inode *inode)
+{
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_put_inode: ino %d, count %d"),
+		(int)inode->i_ino, atomic_read(&inode->i_count);
+
+}
+#endif
+
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
+{
+	kfree(alias);
+}
+#endif
+
+static const struct inode_operations yaffs_symlink_inode_operations = {
+	.readlink = yaffs_readlink,
+	.follow_link = yaffs_follow_link,
+#if (YAFFS_NEW_FOLLOW_LINK == 1)
+	.put_link = yaffs_put_link,
+#endif
+	.setattr = yaffs_setattr,
+	.setxattr = yaffs_setxattr,
+	.getxattr = yaffs_getxattr,
+	.listxattr = yaffs_listxattr,
+	.removexattr = yaffs_removexattr,
+};
+
+#ifdef YAFFS_USE_OWN_IGET
+
+static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
+{
+	struct inode *inode;
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
+
+	inode = iget_locked(sb, ino);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	if (!(inode->i_state & I_NEW))
+		return inode;
+
+	/* NB This is called as a side effect of other functions, but
+	 * we had to release the lock to prevent deadlocks, so
+	 * need to lock again.
+	 */
+
+	yaffs_gross_lock(dev);
+
+	obj = yaffs_find_by_number(dev, inode->i_ino);
+
+	yaffs_fill_inode_from_obj(inode, obj);
+
+	yaffs_gross_unlock(dev);
+
+	unlock_new_inode(inode);
+	return inode;
+}
+
+#else
+
+static void yaffs_read_inode(struct inode *inode)
+{
+	/* NB This is called as a side effect of other functions, but
+	 * we had to release the lock to prevent deadlocks, so
+	 * need to lock again.
+	 */
+
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev = yaffs_super_to_dev(inode->i_sb);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_read_inode for %d", (int)inode->i_ino);
+
+	if (current != yaffs_dev_to_lc(dev)->readdir_process)
+		yaffs_gross_lock(dev);
+
+	obj = yaffs_find_by_number(dev, inode->i_ino);
+
+	yaffs_fill_inode_from_obj(inode, obj);
+
+	if (current != yaffs_dev_to_lc(dev)->readdir_process)
+		yaffs_gross_unlock(dev);
+}
+
+#endif
+
+
+
+struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
+			      struct yaffs_obj *obj)
+{
+	struct inode *inode;
+
+	if (!sb) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_get_inode for NULL super_block!!");
+		return NULL;
+
+	}
+
+	if (!obj) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_get_inode for NULL object!!");
+		return NULL;
+
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_get_inode for object %d", obj->obj_id);
+
+	inode = Y_IGET(sb, obj->obj_id);
+	if (IS_ERR(inode))
+		return NULL;
+
+	/* NB Side effect: iget calls back to yaffs_read_inode(). */
+	/* iget also increments the inode's i_count */
+	/* NB You can't be holding gross_lock or deadlock will happen! */
+
+	return inode;
+}
+
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
+#define YCRED(x) x
+#else
+#define YCRED(x) (x->cred)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+		       dev_t rdev)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+		       dev_t rdev)
+#else
+static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+		       int rdev)
+#endif
+{
+	struct inode *inode;
+
+	struct yaffs_obj *obj = NULL;
+	struct yaffs_dev *dev;
+
+	struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
+
+	int error = -ENOSPC;
+
+	uid_t uid = YCRED_FSUID();
+	gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
+
+	if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
+		mode |= S_ISGID;
+
+	if (parent) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_mknod: parent object %d type %d",
+			parent->obj_id, parent->variant_type);
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_mknod: could not get parent object");
+		return -EPERM;
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_mknod: making oject for %s, mode %x dev %x",
+		dentry->d_name.name, mode, rdev);
+
+	dev = parent->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	switch (mode & S_IFMT) {
+	default:
+		/* Special (socket, fifo, device...) */
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+		obj =
+		    yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+					 gid, old_encode_dev(rdev));
+#else
+		obj =
+		    yaffs_create_special(parent, dentry->d_name.name, mode, uid,
+					 gid, rdev);
+#endif
+		break;
+	case S_IFREG:		/* file          */
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
+		obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
+					gid);
+		break;
+	case S_IFDIR:		/* directory */
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
+		obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
+				       uid, gid);
+		break;
+	case S_IFLNK:		/* symlink */
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
+		obj = NULL;	/* Do we ever get here? */
+		break;
+	}
+
+	/* Can not call yaffs_get_inode() with gross lock held */
+	yaffs_gross_unlock(dev);
+
+	if (obj) {
+		inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
+		d_instantiate(dentry, inode);
+		update_dir_time(dir);
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_mknod created object %d count = %d",
+			obj->obj_id, atomic_read(&inode->i_count));
+		error = 0;
+		yaffs_fill_inode_from_obj(dir, parent);
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
+		error = -ENOMEM;
+	}
+
+	return error;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+#else
+static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+#endif
+{
+	int ret_val;
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_mkdir");
+	ret_val = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
+	return ret_val;
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+			bool dummy)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+			struct nameidata *n)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
+			struct nameidata *n)
+#else
+static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
+#endif
+{
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_create");
+	return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+				   unsigned int dummy)
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
+				   struct nameidata *n)
+#else
+static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
+#endif
+{
+	struct yaffs_obj *obj;
+	struct inode *inode = NULL;	/* NCB 2.5/2.6 needs NULL here */
+
+	struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
+
+	if (current != yaffs_dev_to_lc(dev)->readdir_process)
+		yaffs_gross_lock(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup for %d:%s",
+		yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
+
+	obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
+
+	obj = yaffs_get_equivalent_obj(obj);	/* in case it was a hardlink */
+
+	/* Can't hold gross lock when calling yaffs_get_inode() */
+	if (current != yaffs_dev_to_lc(dev)->readdir_process)
+		yaffs_gross_unlock(dev);
+
+	if (obj) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_lookup found %d", obj->obj_id);
+
+		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
+
+	}
+
+/* added NCB for 2.5/6 compatability - forces add even if inode is
+ * NULL which creates dentry hash */
+	d_add(dentry, inode);
+
+	return NULL;
+}
+
+/*
+ * Create a link...
+ */
+static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
+		      struct dentry *dentry)
+{
+	struct inode *inode = old_dentry->d_inode;
+	struct yaffs_obj *obj = NULL;
+	struct yaffs_obj *link = NULL;
+	struct yaffs_dev *dev;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
+
+	obj = yaffs_inode_to_obj(inode);
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	if (!S_ISDIR(inode->i_mode))	/* Don't link directories */
+		link =
+		    yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
+				   obj);
+
+	if (link) {
+		set_nlink(old_dentry->d_inode, yaffs_get_obj_link_count(obj));
+		d_instantiate(dentry, old_dentry->d_inode);
+		atomic_inc(&old_dentry->d_inode->i_count);
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_link link count %d i_count %d",
+			old_dentry->d_inode->i_nlink,
+			atomic_read(&old_dentry->d_inode->i_count));
+	}
+
+	yaffs_gross_unlock(dev);
+
+	if (link) {
+		update_dir_time(dir);
+		return 0;
+	}
+
+	return -EPERM;
+}
+
+static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
+			 const char *symname)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+	uid_t uid = YCRED_FSUID();
+	gid_t gid = (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
+
+	if (strnlen(dentry->d_name.name, YAFFS_MAX_NAME_LENGTH + 1) >
+				YAFFS_MAX_NAME_LENGTH)
+		return -ENAMETOOLONG;
+
+	if (strnlen(symname, YAFFS_MAX_ALIAS_LENGTH + 1) >
+				YAFFS_MAX_ALIAS_LENGTH)
+		return -ENAMETOOLONG;
+
+	dev = yaffs_inode_to_obj(dir)->my_dev;
+	yaffs_gross_lock(dev);
+	obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
+				   S_IFLNK | S_IRWXUGO, uid, gid, symname);
+	yaffs_gross_unlock(dev);
+
+	if (obj) {
+		struct inode *inode;
+
+		inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
+		d_instantiate(dentry, inode);
+		update_dir_time(dir);
+		yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
+		return 0;
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
+	}
+
+	return -ENOMEM;
+}
+
+/*
+ * The VFS layer already does all the dentry stuff for rename.
+ *
+ * NB: POSIX says you can rename an object over an old object of the same name
+ */
+static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
+			struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct yaffs_dev *dev;
+	int ret_val = YAFFS_FAIL;
+	struct yaffs_obj *target;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
+	dev = yaffs_inode_to_obj(old_dir)->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	/* Check if the target is an existing directory that is not empty. */
+	target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
+				    new_dentry->d_name.name);
+
+	if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
+	    !list_empty(&target->variant.dir_variant.children)) {
+
+		yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
+
+		ret_val = YAFFS_FAIL;
+	} else {
+		/* Now does unlinking internally using shadowing mechanism */
+		yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
+
+		ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
+					   old_dentry->d_name.name,
+					   yaffs_inode_to_obj(new_dir),
+					   new_dentry->d_name.name);
+	}
+	yaffs_gross_unlock(dev);
+
+	if (ret_val == YAFFS_OK) {
+		if (target)
+			inode_dec_link_count(new_dentry->d_inode);
+
+		update_dir_time(old_dir);
+		if (old_dir != new_dir)
+			update_dir_time(new_dir);
+		return 0;
+	} else {
+		return -ENOTEMPTY;
+	}
+}
+
+
+
+
+static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int ret_val;
+
+	struct yaffs_dev *dev;
+	struct yaffs_obj *obj;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_unlink %d:%s",
+		(int)(dir->i_ino), dentry->d_name.name);
+	obj = yaffs_inode_to_obj(dir);
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	ret_val = yaffs_unlinker(obj, dentry->d_name.name);
+
+	if (ret_val == YAFFS_OK) {
+		inode_dec_link_count(dentry->d_inode);
+		dir->i_version++;
+		yaffs_gross_unlock(dev);
+		update_dir_time(dir);
+		return 0;
+	}
+	yaffs_gross_unlock(dev);
+	return -ENOTEMPTY;
+}
+
+
+
+static const struct inode_operations yaffs_dir_inode_operations = {
+	.create = yaffs_create,
+	.lookup = yaffs_lookup,
+	.link = yaffs_link,
+	.unlink = yaffs_unlink,
+	.symlink = yaffs_symlink,
+	.mkdir = yaffs_mkdir,
+	.rmdir = yaffs_unlink,
+	.mknod = yaffs_mknod,
+	.rename = yaffs_rename,
+	.setattr = yaffs_setattr,
+	.setxattr = yaffs_setxattr,
+	.getxattr = yaffs_getxattr,
+	.listxattr = yaffs_listxattr,
+	.removexattr = yaffs_removexattr,
+};
+
+/*-----------------------------------------------------------------*/
+/* Directory search context allows us to unlock access to yaffs during
+ * filldir without causing problems with the directory being modified.
+ * This is similar to the tried and tested mechanism used in yaffs direct.
+ *
+ * A search context iterates along a doubly linked list of siblings in the
+ * directory. If the iterating object is deleted then this would corrupt
+ * the list iteration, likely causing a crash. The search context avoids
+ * this by using the remove_obj_fn to move the search context to the
+ * next object before the object is deleted.
+ *
+ * Many readdirs (and thus seach conexts) may be alive simulateously so
+ * each struct yaffs_dev has a list of these.
+ *
+ * A seach context lives for the duration of a readdir.
+ *
+ * All these functions must be called while yaffs is locked.
+ */
+
+struct yaffs_search_context {
+	struct yaffs_dev *dev;
+	struct yaffs_obj *dir_obj;
+	struct yaffs_obj *next_return;
+	struct list_head others;
+};
+
+/*
+ * yaffs_new_search() creates a new search context, initialises it and
+ * adds it to the device's search context list.
+ *
+ * Called at start of readdir.
+ */
+static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
+{
+	struct yaffs_dev *dev = dir->my_dev;
+	struct yaffs_search_context *sc =
+	    kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
+	if (sc) {
+		sc->dir_obj = dir;
+		sc->dev = dev;
+		if (list_empty(&sc->dir_obj->variant.dir_variant.children))
+			sc->next_return = NULL;
+		else
+			sc->next_return =
+			    list_entry(dir->variant.dir_variant.children.next,
+				       struct yaffs_obj, siblings);
+		INIT_LIST_HEAD(&sc->others);
+		list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
+	}
+	return sc;
+}
+
+/*
+ * yaffs_search_end() disposes of a search context and cleans up.
+ */
+static void yaffs_search_end(struct yaffs_search_context *sc)
+{
+	if (sc) {
+		list_del(&sc->others);
+		kfree(sc);
+	}
+}
+
+/*
+ * yaffs_search_advance() moves a search context to the next object.
+ * Called when the search iterates or when an object removal causes
+ * the search context to be moved to the next object.
+ */
+static void yaffs_search_advance(struct yaffs_search_context *sc)
+{
+	if (!sc)
+		return;
+
+	if (sc->next_return == NULL ||
+	    list_empty(&sc->dir_obj->variant.dir_variant.children))
+		sc->next_return = NULL;
+	else {
+		struct list_head *next = sc->next_return->siblings.next;
+
+		if (next == &sc->dir_obj->variant.dir_variant.children)
+			sc->next_return = NULL;	/* end of list */
+		else
+			sc->next_return =
+			    list_entry(next, struct yaffs_obj, siblings);
+	}
+}
+
+/*
+ * yaffs_remove_obj_callback() is called when an object is unlinked.
+ * We check open search contexts and advance any which are currently
+ * on the object being iterated.
+ */
+static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
+{
+
+	struct list_head *i;
+	struct yaffs_search_context *sc;
+	struct list_head *search_contexts =
+	    &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
+
+	/* Iterate through the directory search contexts.
+	 * If any are currently on the object being removed, then advance
+	 * the search context to the next object to prevent a hanging pointer.
+	 */
+	list_for_each(i, search_contexts) {
+		sc = list_entry(i, struct yaffs_search_context, others);
+		if (sc->next_return == obj)
+			yaffs_search_advance(sc);
+	}
+
+}
+
+
+/*-----------------------------------------------------------------*/
+
+static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+	struct yaffs_search_context *sc;
+	struct inode *inode = f->f_dentry->d_inode;
+	unsigned long offset, curoffs;
+	struct yaffs_obj *l;
+	int ret_val = 0;
+
+	char name[YAFFS_MAX_NAME_LENGTH + 1];
+
+	obj = yaffs_dentry_to_obj(f->f_dentry);
+	dev = obj->my_dev;
+
+	yaffs_gross_lock(dev);
+
+	yaffs_dev_to_lc(dev)->readdir_process = current;
+
+	offset = f->f_pos;
+
+	sc = yaffs_new_search(obj);
+	if (!sc) {
+		ret_val = -ENOMEM;
+		goto out;
+	}
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_readdir: starting at %d", (int)offset);
+
+	if (offset == 0) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_readdir: entry . ino %d",
+			(int)inode->i_ino);
+		yaffs_gross_unlock(dev);
+		if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
+			yaffs_gross_lock(dev);
+			goto out;
+		}
+		yaffs_gross_lock(dev);
+		offset++;
+		f->f_pos++;
+	}
+	if (offset == 1) {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_readdir: entry .. ino %d",
+			(int)f->f_dentry->d_parent->d_inode->i_ino);
+		yaffs_gross_unlock(dev);
+		if (filldir(dirent, "..", 2, offset,
+			    f->f_dentry->d_parent->d_inode->i_ino,
+			    DT_DIR) < 0) {
+			yaffs_gross_lock(dev);
+			goto out;
+		}
+		yaffs_gross_lock(dev);
+		offset++;
+		f->f_pos++;
+	}
+
+	curoffs = 1;
+
+	/* If the directory has changed since the open or last call to
+	   readdir, rewind to after the 2 canned entries. */
+	if (f->f_version != inode->i_version) {
+		offset = 2;
+		f->f_pos = offset;
+		f->f_version = inode->i_version;
+	}
+
+	while (sc->next_return) {
+		curoffs++;
+		l = sc->next_return;
+		if (curoffs >= offset) {
+			int this_inode = yaffs_get_obj_inode(l);
+			int this_type = yaffs_get_obj_type(l);
+
+			yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
+			yaffs_trace(YAFFS_TRACE_OS,
+				"yaffs_readdir: %s inode %d",
+				name, yaffs_get_obj_inode(l));
+
+			yaffs_gross_unlock(dev);
+
+			if (filldir(dirent,
+				    name,
+				    strlen(name),
+				    offset, this_inode, this_type) < 0) {
+				yaffs_gross_lock(dev);
+				goto out;
+			}
+
+			yaffs_gross_lock(dev);
+
+			offset++;
+			f->f_pos++;
+		}
+		yaffs_search_advance(sc);
+	}
+
+out:
+	yaffs_search_end(sc);
+	yaffs_dev_to_lc(dev)->readdir_process = NULL;
+	yaffs_gross_unlock(dev);
+
+	return ret_val;
+}
+
+static const struct file_operations yaffs_dir_operations = {
+	.read = generic_read_dir,
+	.readdir = yaffs_readdir,
+	.fsync = yaffs_sync_object,
+	.llseek = generic_file_llseek,
+};
+
+static void yaffs_fill_inode_from_obj(struct inode *inode,
+				      struct yaffs_obj *obj)
+{
+	if (inode && obj) {
+
+		/* Check mode against the variant type and attempt to repair if broken. */
+		u32 mode = obj->yst_mode;
+		switch (obj->variant_type) {
+		case YAFFS_OBJECT_TYPE_FILE:
+			if (!S_ISREG(mode)) {
+				obj->yst_mode &= ~S_IFMT;
+				obj->yst_mode |= S_IFREG;
+			}
+
+			break;
+		case YAFFS_OBJECT_TYPE_SYMLINK:
+			if (!S_ISLNK(mode)) {
+				obj->yst_mode &= ~S_IFMT;
+				obj->yst_mode |= S_IFLNK;
+			}
+
+			break;
+		case YAFFS_OBJECT_TYPE_DIRECTORY:
+			if (!S_ISDIR(mode)) {
+				obj->yst_mode &= ~S_IFMT;
+				obj->yst_mode |= S_IFDIR;
+			}
+
+			break;
+		case YAFFS_OBJECT_TYPE_UNKNOWN:
+		case YAFFS_OBJECT_TYPE_HARDLINK:
+		case YAFFS_OBJECT_TYPE_SPECIAL:
+		default:
+			/* TODO? */
+			break;
+		}
+
+		inode->i_flags |= S_NOATIME;
+
+		inode->i_ino = obj->obj_id;
+		inode->i_mode = obj->yst_mode;
+		i_uid_write(inode, obj->yst_uid);
+		i_gid_write(inode, obj->yst_gid);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+		inode->i_blksize = inode->i_sb->s_blocksize;
+#endif
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+
+		inode->i_rdev = old_decode_dev(obj->yst_rdev);
+		inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
+		inode->i_atime.tv_nsec = 0;
+		inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
+		inode->i_mtime.tv_nsec = 0;
+		inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
+		inode->i_ctime.tv_nsec = 0;
+#else
+		inode->i_rdev = obj->yst_rdev;
+		inode->i_atime = obj->yst_atime;
+		inode->i_mtime = obj->yst_mtime;
+		inode->i_ctime = obj->yst_ctime;
+#endif
+		inode->i_size = yaffs_get_obj_length(obj);
+		inode->i_blocks = (inode->i_size + 511) >> 9;
+
+		set_nlink(inode, yaffs_get_obj_link_count(obj));
+
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_fill_inode mode %x uid %d gid %d size %lld count %d",
+			inode->i_mode, i_uid_read(inode), i_gid_read(inode),
+			inode->i_size, atomic_read(&inode->i_count));
+
+		switch (obj->yst_mode & S_IFMT) {
+		default:	/* fifo, device or socket */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+			init_special_inode(inode, obj->yst_mode,
+					   old_decode_dev(obj->yst_rdev));
+#else
+			init_special_inode(inode, obj->yst_mode,
+					   (dev_t) (obj->yst_rdev));
+#endif
+			break;
+		case S_IFREG:	/* file */
+			inode->i_op = &yaffs_file_inode_operations;
+			inode->i_fop = &yaffs_file_operations;
+			inode->i_mapping->a_ops =
+			    &yaffs_file_address_operations;
+			break;
+		case S_IFDIR:	/* directory */
+			inode->i_op = &yaffs_dir_inode_operations;
+			inode->i_fop = &yaffs_dir_operations;
+			break;
+		case S_IFLNK:	/* symlink */
+			inode->i_op = &yaffs_symlink_inode_operations;
+			break;
+		}
+
+		yaffs_inode_to_obj_lv(inode) = obj;
+
+		obj->my_inode = inode;
+
+	} else {
+		yaffs_trace(YAFFS_TRACE_OS,
+			"yaffs_fill_inode invalid parameters");
+	}
+
+}
+
+
+
+/*
+ * yaffs background thread functions .
+ * yaffs_bg_thread_fn() the thread function
+ * yaffs_bg_start() launches the background thread.
+ * yaffs_bg_stop() cleans up the background thread.
+ *
+ * NB:
+ * The thread should only run after the yaffs is initialised
+ * The thread should be stopped before yaffs is unmounted.
+ * The thread should not do any writing while the fs is in read only.
+ */
+
+static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
+{
+	unsigned erased_chunks =
+	    dev->n_erased_blocks * dev->param.chunks_per_block;
+	struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+	unsigned scattered = 0;	/* Free chunks not in an erased block */
+
+	if (erased_chunks < dev->n_free_chunks)
+		scattered = (dev->n_free_chunks - erased_chunks);
+
+	if (!context->bg_running)
+		return 0;
+	else if (scattered < (dev->param.chunks_per_block * 2))
+		return 0;
+	else if (erased_chunks > dev->n_free_chunks / 2)
+		return 0;
+	else if (erased_chunks > dev->n_free_chunks / 4)
+		return 1;
+	else
+		return 2;
+}
+
+#ifdef YAFFS_COMPILE_BACKGROUND
+
+void yaffs_background_waker(unsigned long data)
+{
+	wake_up_process((struct task_struct *)data);
+}
+
+static int yaffs_bg_thread_fn(void *data)
+{
+	struct yaffs_dev *dev = (struct yaffs_dev *)data;
+	struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+	unsigned long now = jiffies;
+	unsigned long next_dir_update = now;
+	unsigned long next_gc = now;
+	unsigned long expires;
+	unsigned int urgency;
+
+	int gc_result;
+	struct timer_list timer;
+
+	yaffs_trace(YAFFS_TRACE_BACKGROUND,
+		"yaffs_background starting for dev %p", (void *)dev);
+
+#ifdef YAFFS_COMPILE_FREEZER
+	set_freezable();
+#endif
+	while (context->bg_running) {
+		yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
+
+		if (kthread_should_stop())
+			break;
+
+#ifdef YAFFS_COMPILE_FREEZER
+		if (try_to_freeze())
+			continue;
+#endif
+		yaffs_gross_lock(dev);
+
+		now = jiffies;
+
+		if (time_after(now, next_dir_update) && yaffs_bg_enable) {
+			yaffs_update_dirty_dirs(dev);
+			next_dir_update = now + HZ;
+		}
+
+		if (time_after(now, next_gc) && yaffs_bg_enable) {
+			if (!dev->is_checkpointed) {
+				urgency = yaffs_bg_gc_urgency(dev);
+				gc_result = yaffs_bg_gc(dev, urgency);
+				if (urgency > 1)
+					next_gc = now + HZ / 20 + 1;
+				else if (urgency > 0)
+					next_gc = now + HZ / 10 + 1;
+				else
+					next_gc = now + HZ * 2;
+			} else	{
+			        /*
+				 * gc not running so set to next_dir_update
+				 * to cut down on wake ups
+				 */
+				next_gc = next_dir_update;
+                        }
+		}
+		yaffs_gross_unlock(dev);
+#if 1
+		expires = next_dir_update;
+		if (time_before(next_gc, expires))
+			expires = next_gc;
+		if (time_before(expires, now))
+			expires = now + HZ;
+
+		Y_INIT_TIMER(&timer);
+		timer.expires = expires + 1;
+		timer.data = (unsigned long)current;
+		timer.function = yaffs_background_waker;
+
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_timer(&timer);
+		schedule();
+		del_timer_sync(&timer);
+#else
+		msleep(10);
+#endif
+	}
+
+	return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+	int retval = 0;
+	struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
+
+	if (dev->read_only)
+		return -1;
+
+	context->bg_running = 1;
+
+	context->bg_thread = kthread_run(yaffs_bg_thread_fn,
+					 (void *)dev, "yaffs-bg-%d",
+					 context->mount_id);
+
+	if (IS_ERR(context->bg_thread)) {
+		retval = PTR_ERR(context->bg_thread);
+		context->bg_thread = NULL;
+		context->bg_running = 0;
+	}
+	return retval;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+	struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
+
+	ctxt->bg_running = 0;
+
+	if (ctxt->bg_thread) {
+		kthread_stop(ctxt->bg_thread);
+		ctxt->bg_thread = NULL;
+	}
+}
+#else
+static int yaffs_bg_thread_fn(void *data)
+{
+	return 0;
+}
+
+static int yaffs_bg_start(struct yaffs_dev *dev)
+{
+	return 0;
+}
+
+static void yaffs_bg_stop(struct yaffs_dev *dev)
+{
+}
+#endif
+
+
+static void yaffs_flush_inodes(struct super_block *sb)
+{
+	struct inode *iptr;
+	struct yaffs_obj *obj;
+
+	list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
+		obj = yaffs_inode_to_obj(iptr);
+		if (obj) {
+			yaffs_trace(YAFFS_TRACE_OS,
+				"flushing obj %d",
+				obj->obj_id);
+			yaffs_flush_file(obj, 1, 0);
+		}
+	}
+}
+
+static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
+{
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+	if (!dev)
+		return;
+
+	yaffs_flush_inodes(sb);
+	yaffs_update_dirty_dirs(dev);
+	yaffs_flush_whole_cache(dev);
+	if (do_checkpoint)
+		yaffs_checkpoint_save(dev);
+}
+
+static LIST_HEAD(yaffs_context_list);
+struct mutex yaffs_context_lock;
+
+static void yaffs_put_super(struct super_block *sb)
+{
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+	struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
+			"yaffs_put_super");
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+		"Shutting down yaffs background thread");
+	yaffs_bg_stop(dev);
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
+		"yaffs background thread shut down");
+
+	yaffs_gross_lock(dev);
+
+	yaffs_flush_super(sb, 1);
+
+	yaffs_deinitialise(dev);
+
+	yaffs_gross_unlock(dev);
+
+	mutex_lock(&yaffs_context_lock);
+	list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
+	mutex_unlock(&yaffs_context_lock);
+
+	if (yaffs_dev_to_lc(dev)->spare_buffer) {
+		kfree(yaffs_dev_to_lc(dev)->spare_buffer);
+		yaffs_dev_to_lc(dev)->spare_buffer = NULL;
+	}
+
+	kfree(dev);
+
+	yaffs_put_mtd_device(mtd);
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
+			"yaffs_put_super done");
+}
+
+
+static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
+{
+	return yaffs_gc_control;
+}
+
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+
+static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
+					  uint32_t generation)
+{
+	return Y_IGET(sb, ino);
+}
+
+static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
+					  struct fid *fid, int fh_len,
+					  int fh_type)
+{
+	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+				    yaffs2_nfs_get_inode);
+}
+
+static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
+					  struct fid *fid, int fh_len,
+					  int fh_type)
+{
+	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+				    yaffs2_nfs_get_inode);
+}
+
+struct dentry *yaffs2_get_parent(struct dentry *dentry)
+{
+
+	struct super_block *sb = dentry->d_inode->i_sb;
+	struct dentry *parent = ERR_PTR(-ENOENT);
+	struct inode *inode;
+	unsigned long parent_ino;
+	struct yaffs_obj *d_obj;
+	struct yaffs_obj *parent_obj;
+
+	d_obj = yaffs_inode_to_obj(dentry->d_inode);
+
+	if (d_obj) {
+		parent_obj = d_obj->parent;
+		if (parent_obj) {
+			parent_ino = yaffs_get_obj_inode(parent_obj);
+			inode = Y_IGET(sb, parent_ino);
+
+			if (IS_ERR(inode)) {
+				parent = ERR_CAST(inode);
+			} else {
+				parent = d_obtain_alias(inode);
+				if (!IS_ERR(parent)) {
+					parent = ERR_PTR(-ENOMEM);
+					iput(inode);
+				}
+			}
+		}
+	}
+
+	return parent;
+}
+
+/* Just declare a zero structure as a NULL value implies
+ * using the default functions of exportfs.
+ */
+
+static struct export_operations yaffs_export_ops = {
+	.fh_to_dentry = yaffs2_fh_to_dentry,
+	.fh_to_parent = yaffs2_fh_to_parent,
+	.get_parent = yaffs2_get_parent,
+};
+
+#endif
+
+static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
+{
+	/* Clear the association between the inode and
+	 * the struct yaffs_obj.
+	 */
+	obj->my_inode = NULL;
+	yaffs_inode_to_obj_lv(inode) = NULL;
+
+	/* If the object freeing was deferred, then the real
+	 * free happens now.
+	 * This should fix the inode inconsistency problem.
+	 */
+	yaffs_handle_defered_free(obj);
+}
+
+#ifdef YAFFS_HAS_EVICT_INODE
+/* yaffs_evict_inode combines into one operation what was previously done in
+ * yaffs_clear_inode() and yaffs_delete_inode()
+ *
+ */
+static void yaffs_evict_inode(struct inode *inode)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+	int deleteme = 0;
+
+	obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_evict_inode: ino %d, count %d %s",
+		(int)inode->i_ino, atomic_read(&inode->i_count),
+		obj ? "object exists" : "null object");
+
+	if (!inode->i_nlink && !is_bad_inode(inode))
+		deleteme = 1;
+	truncate_inode_pages(&inode->i_data, 0);
+	Y_CLEAR_INODE(inode);
+
+	if (deleteme && obj) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		yaffs_del_obj(obj);
+		yaffs_gross_unlock(dev);
+	}
+	if (obj) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		yaffs_unstitch_obj(inode, obj);
+		yaffs_gross_unlock(dev);
+	}
+}
+#else
+
+/* clear is called to tell the fs to release any per-inode data it holds.
+ * The object might still exist on disk and is just being thrown out of the cache
+ * or else the object has actually been deleted and we're being called via
+ * the chain
+ *   yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode()
+ */
+
+static void yaffs_clear_inode(struct inode *inode)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_dev *dev;
+
+	obj = yaffs_inode_to_obj(inode);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_clear_inode: ino %d, count %d %s",
+		(int)inode->i_ino, atomic_read(&inode->i_count),
+		obj ? "object exists" : "null object");
+
+	if (obj) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		yaffs_unstitch_obj(inode, obj);
+		yaffs_gross_unlock(dev);
+	}
+
+}
+
+/* delete is called when the link count is zero and the inode
+ * is put (ie. nobody wants to know about it anymore, time to
+ * delete the file).
+ * NB Must call clear_inode()
+ */
+static void yaffs_delete_inode(struct inode *inode)
+{
+	struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
+	struct yaffs_dev *dev;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_delete_inode: ino %d, count %d %s",
+		(int)inode->i_ino, atomic_read(&inode->i_count),
+		obj ? "object exists" : "null object");
+
+	if (obj) {
+		dev = obj->my_dev;
+		yaffs_gross_lock(dev);
+		yaffs_del_obj(obj);
+		yaffs_gross_unlock(dev);
+	}
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
+	truncate_inode_pages(&inode->i_data, 0);
+#endif
+	clear_inode(inode);
+}
+#endif
+
+
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+	struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
+	struct super_block *sb = dentry->d_sb;
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+#else
+static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
+{
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+#endif
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
+
+	yaffs_gross_lock(dev);
+
+	buf->f_type = YAFFS_MAGIC;
+	buf->f_bsize = sb->s_blocksize;
+	buf->f_namelen = 255;
+
+	if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
+		/* Do this if chunk size is not a power of 2 */
+
+		uint64_t bytes_in_dev;
+		uint64_t bytes_free;
+
+		bytes_in_dev =
+		    ((uint64_t)
+		     ((dev->param.end_block - dev->param.start_block +
+		       1))) * ((uint64_t) (dev->param.chunks_per_block *
+					   dev->data_bytes_per_chunk));
+
+		do_div(bytes_in_dev, sb->s_blocksize);	/* bytes_in_dev becomes the number of blocks */
+		buf->f_blocks = bytes_in_dev;
+
+		bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
+		    ((uint64_t) (dev->data_bytes_per_chunk));
+
+		do_div(bytes_free, sb->s_blocksize);
+
+		buf->f_bfree = bytes_free;
+
+	} else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
+
+		buf->f_blocks =
+		    (dev->param.end_block - dev->param.start_block + 1) *
+		    dev->param.chunks_per_block /
+		    (sb->s_blocksize / dev->data_bytes_per_chunk);
+		buf->f_bfree =
+		    yaffs_get_n_free_chunks(dev) /
+		    (sb->s_blocksize / dev->data_bytes_per_chunk);
+	} else {
+		buf->f_blocks =
+		    (dev->param.end_block - dev->param.start_block + 1) *
+		    dev->param.chunks_per_block *
+		    (dev->data_bytes_per_chunk / sb->s_blocksize);
+
+		buf->f_bfree =
+		    yaffs_get_n_free_chunks(dev) *
+		    (dev->data_bytes_per_chunk / sb->s_blocksize);
+	}
+
+	buf->f_files = 0;
+	buf->f_ffree = 0;
+	buf->f_bavail = buf->f_bfree;
+
+	yaffs_gross_unlock(dev);
+	return 0;
+}
+
+
+
+static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
+{
+
+	struct yaffs_dev *dev = yaffs_super_to_dev(sb);
+	unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
+	unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
+	int do_checkpoint;
+	int dirty = yaffs_check_super_dirty(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+		"yaffs_do_sync_fs: gc-urgency %d %s %s%s",
+		gc_urgent,
+		dirty ? "dirty" : "clean",
+		request_checkpoint ? "checkpoint requested" : "no checkpoint",
+		oneshot_checkpoint ? " one-shot" : "");
+
+	yaffs_gross_lock(dev);
+	do_checkpoint = ((request_checkpoint && !gc_urgent) ||
+			 oneshot_checkpoint) && !dev->is_checkpointed;
+
+	if (dirty || do_checkpoint) {
+		yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
+		yaffs_clear_super_dirty(dev);
+		if (oneshot_checkpoint)
+			yaffs_auto_checkpoint &= ~4;
+	}
+	yaffs_gross_unlock(dev);
+
+	return 0;
+}
+
+
+#ifdef YAFFS_HAS_WRITE_SUPER
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static void yaffs_write_super(struct super_block *sb)
+#else
+static int yaffs_write_super(struct super_block *sb)
+#endif
+{
+	unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
+		"yaffs_write_super %s",
+		request_checkpoint ? " checkpt" : "");
+
+	yaffs_do_sync_fs(sb, request_checkpoint);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
+	return 0;
+#endif
+}
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_sync_fs(struct super_block *sb, int wait)
+#else
+static int yaffs_sync_fs(struct super_block *sb)
+#endif
+{
+	unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
+
+	yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
+		"yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
+
+	yaffs_do_sync_fs(sb, request_checkpoint);
+
+	return 0;
+}
+
+
+
+static const struct super_operations yaffs_super_ops = {
+	.statfs = yaffs_statfs,
+
+#ifndef YAFFS_USE_OWN_IGET
+	.read_inode = yaffs_read_inode,
+#endif
+#ifdef YAFFS_HAS_PUT_INODE
+	.put_inode = yaffs_put_inode,
+#endif
+	.put_super = yaffs_put_super,
+#ifdef YAFFS_HAS_EVICT_INODE
+	.evict_inode = yaffs_evict_inode,
+#else
+	.delete_inode = yaffs_delete_inode,
+	.clear_inode = yaffs_clear_inode,
+#endif
+	.sync_fs = yaffs_sync_fs,
+#ifdef YAFFS_HAS_WRITE_SUPER
+	.write_super = yaffs_write_super,
+#endif
+};
+
+struct yaffs_options {
+	int inband_tags;
+	int skip_checkpoint_read;
+	int skip_checkpoint_write;
+	int no_cache;
+	int tags_ecc_on;
+	int tags_ecc_overridden;
+	int lazy_loading_enabled;
+	int lazy_loading_overridden;
+	int empty_lost_and_found;
+	int empty_lost_and_found_overridden;
+	int disable_summary;
+};
+
+#define MAX_OPT_LEN 30
+static int yaffs_parse_options(struct yaffs_options *options,
+			       const char *options_str)
+{
+	char cur_opt[MAX_OPT_LEN + 1];
+	int p;
+	int error = 0;
+
+	/* Parse through the options which is a comma seperated list */
+
+	while (options_str && *options_str && !error) {
+		memset(cur_opt, 0, MAX_OPT_LEN + 1);
+		p = 0;
+
+		while (*options_str == ',')
+			options_str++;
+
+		while (*options_str && *options_str != ',') {
+			if (p < MAX_OPT_LEN) {
+				cur_opt[p] = *options_str;
+				p++;
+			}
+			options_str++;
+		}
+
+		if (!strcmp(cur_opt, "inband-tags")) {
+			options->inband_tags = 1;
+		} else if (!strcmp(cur_opt, "tags-ecc-off")) {
+			options->tags_ecc_on = 0;
+			options->tags_ecc_overridden = 1;
+		} else if (!strcmp(cur_opt, "tags-ecc-on")) {
+			options->tags_ecc_on = 1;
+			options->tags_ecc_overridden = 1;
+		} else if (!strcmp(cur_opt, "lazy-loading-off")) {
+			options->lazy_loading_enabled = 0;
+			options->lazy_loading_overridden = 1;
+		} else if (!strcmp(cur_opt, "lazy-loading-on")) {
+			options->lazy_loading_enabled = 1;
+			options->lazy_loading_overridden = 1;
+		} else if (!strcmp(cur_opt, "disable-summary")) {
+			options->disable_summary = 1;
+		} else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
+			options->empty_lost_and_found = 0;
+			options->empty_lost_and_found_overridden = 1;
+		} else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
+			options->empty_lost_and_found = 1;
+			options->empty_lost_and_found_overridden = 1;
+		} else if (!strcmp(cur_opt, "no-cache")) {
+			options->no_cache = 1;
+		} else if (!strcmp(cur_opt, "no-checkpoint-read")) {
+			options->skip_checkpoint_read = 1;
+		} else if (!strcmp(cur_opt, "no-checkpoint-write")) {
+			options->skip_checkpoint_write = 1;
+		} else if (!strcmp(cur_opt, "no-checkpoint")) {
+			options->skip_checkpoint_read = 1;
+			options->skip_checkpoint_write = 1;
+		} else {
+			printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
+			       cur_opt);
+			error = 1;
+		}
+	}
+
+	return error;
+}
+
+
+static struct dentry *yaffs_make_root(struct inode *inode)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+	struct dentry *root = d_alloc_root(inode);
+
+	if (!root)
+		iput(inode);
+
+        return root;
+#else
+        return d_make_root(inode);
+#endif
+}
+
+
+
+
+static struct super_block *yaffs_internal_read_super(int yaffs_version,
+						     struct super_block *sb,
+						     void *data, int silent)
+{
+	int n_blocks;
+	struct inode *inode = NULL;
+	struct dentry *root;
+	struct yaffs_dev *dev = 0;
+	char devname_buf[BDEVNAME_SIZE + 1];
+	struct mtd_info *mtd;
+	int err;
+	char *data_str = (char *)data;
+	struct yaffs_linux_context *context = NULL;
+	struct yaffs_param *param;
+
+	int read_only = 0;
+	int inband_tags = 0;
+
+	struct yaffs_options options;
+
+	unsigned mount_id;
+	int found;
+	struct yaffs_linux_context *context_iterator;
+	struct list_head *l;
+
+	if (!sb) {
+		printk(KERN_INFO "yaffs: sb is NULL\n");
+		return NULL;
+        }
+
+	sb->s_magic = YAFFS_MAGIC;
+	sb->s_op = &yaffs_super_ops;
+	sb->s_flags |= MS_NOATIME;
+
+	read_only = ((sb->s_flags & MS_RDONLY) != 0);
+
+#ifdef YAFFS_COMPILE_EXPORTFS
+	sb->s_export_op = &yaffs_export_ops;
+#endif
+
+	if (!sb->s_dev)
+		printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
+	else if (!yaffs_devname(sb, devname_buf))
+		printk(KERN_INFO "yaffs: devname is NULL\n");
+	else
+		printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
+		       sb->s_dev,
+		       yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
+
+	if (!data_str)
+		data_str = "";
+
+	printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
+
+	memset(&options, 0, sizeof(options));
+
+	if (yaffs_parse_options(&options, data_str)) {
+		/* Option parsing failed */
+		return NULL;
+	}
+
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_read_super: Using yaffs%d", yaffs_version);
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_read_super: block size %d", (int)(sb->s_blocksize));
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"yaffs: Attempting MTD mount of %u.%u,\"%s\"",
+		MAJOR(sb->s_dev), MINOR(sb->s_dev),
+		yaffs_devname(sb, devname_buf));
+
+	/* Get the device */
+	mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
+	if (!mtd) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"yaffs: MTD device %u either not valid or unavailable",
+			MINOR(sb->s_dev));
+		return NULL;
+	}
+
+	if (yaffs_auto_select && yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
+		yaffs_version = 2;
+	}
+
+	/* Added NCB 26/5/2006 for completeness */
+	if (yaffs_version == 2 && !options.inband_tags
+	    && WRITE_SIZE(mtd) == 512) {
+		yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
+		yaffs_version = 1;
+	}
+
+	if (mtd->oobavail < sizeof(struct yaffs_packed_tags2) ||
+	    options.inband_tags)
+		inband_tags = 1;
+
+	if(yaffs_verify_mtd(mtd, yaffs_version, inband_tags) < 0)
+		return NULL;
+
+	/* OK, so if we got here, we have an MTD that's NAND and looks
+	 * like it has the right capabilities
+	 * Set the struct yaffs_dev up for mtd
+	 */
+
+	if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
+		read_only = 1;
+		printk(KERN_INFO
+		       "yaffs: mtd is read only, setting superblock read only\n"
+		);
+		sb->s_flags |= MS_RDONLY;
+	}
+
+	dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
+	context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
+
+	if (!dev || !context) {
+		if (dev)
+			kfree(dev);
+		if (context)
+			kfree(context);
+		dev = NULL;
+		context = NULL;
+	}
+
+	if (!dev) {
+		/* Deep shit could not allocate device structure */
+		yaffs_trace(YAFFS_TRACE_ALWAYS,
+			"yaffs_read_super: Failed trying to allocate struct yaffs_dev."
+		);
+		return NULL;
+	}
+	memset(dev, 0, sizeof(struct yaffs_dev));
+	param = &(dev->param);
+
+	memset(context, 0, sizeof(struct yaffs_linux_context));
+	dev->os_context = context;
+	INIT_LIST_HEAD(&(context->context_list));
+	context->dev = dev;
+	context->super = sb;
+
+	dev->read_only = read_only;
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+	sb->s_fs_info = dev;
+#else
+	sb->u.generic_sbp = dev;
+#endif
+
+
+	dev->driver_context = mtd;
+	param->name = mtd->name;
+
+	/* Set up the memory size parameters.... */
+
+
+	param->n_reserved_blocks = 5;
+	param->n_caches = (options.no_cache) ? 0 : 10;
+	param->inband_tags = inband_tags;
+
+	param->enable_xattr = 1;
+	if (options.lazy_loading_overridden)
+		param->disable_lazy_load = !options.lazy_loading_enabled;
+
+	param->defered_dir_update = 1;
+
+	if (options.tags_ecc_overridden)
+		param->no_tags_ecc = !options.tags_ecc_on;
+
+	param->empty_lost_n_found = 1;
+	param->refresh_period = 500;
+	param->disable_summary = options.disable_summary;
+
+
+#ifdef CONFIG_YAFFS_DISABLE_BAD_BLOCK_MARKING
+	param->disable_bad_block_marking  = 1;
+#endif
+	if (options.empty_lost_and_found_overridden)
+		param->empty_lost_n_found = options.empty_lost_and_found;
+
+	/* ... and the functions. */
+	if (yaffs_version == 2) {
+		param->is_yaffs2 = 1;
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+		param->total_bytes_per_chunk = mtd->writesize;
+		param->chunks_per_block = mtd->erasesize / mtd->writesize;
+#else
+		param->total_bytes_per_chunk = mtd->oobblock;
+		param->chunks_per_block = mtd->erasesize / mtd->oobblock;
+#endif
+		n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
+
+		param->start_block = 0;
+		param->end_block = n_blocks - 1;
+	} else {
+		param->is_yaffs2 = 0;
+		n_blocks = YCALCBLOCKS(mtd->size,
+			     YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
+
+		param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
+		param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
+	}
+
+	param->start_block = 0;
+	param->end_block = n_blocks - 1;
+
+	yaffs_mtd_drv_install(dev);
+
+	param->sb_dirty_fn = yaffs_set_super_dirty;
+	param->gc_control_fn = yaffs_gc_control_callback;
+
+	yaffs_dev_to_lc(dev)->super = sb;
+
+	param->use_nand_ecc = 1;
+
+	param->skip_checkpt_rd = options.skip_checkpoint_read;
+	param->skip_checkpt_wr = options.skip_checkpoint_write;
+
+	mutex_lock(&yaffs_context_lock);
+	/* Get a mount id */
+	found = 0;
+	for (mount_id = 0; !found; mount_id++) {
+		found = 1;
+		list_for_each(l, &yaffs_context_list) {
+			context_iterator =
+			    list_entry(l, struct yaffs_linux_context,
+				       context_list);
+			if (context_iterator->mount_id == mount_id)
+				found = 0;
+		}
+	}
+	context->mount_id = mount_id;
+
+	list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
+		      &yaffs_context_list);
+	mutex_unlock(&yaffs_context_lock);
+
+	/* Directory search handling... */
+	INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
+	param->remove_obj_fn = yaffs_remove_obj_callback;
+
+	mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
+
+	yaffs_gross_lock(dev);
+
+	err = yaffs_guts_initialise(dev);
+
+	yaffs_trace(YAFFS_TRACE_OS,
+		"yaffs_read_super: guts initialised %s",
+		(err == YAFFS_OK) ? "OK" : "FAILED");
+
+	if (err == YAFFS_OK)
+		yaffs_bg_start(dev);
+
+	if (!context->bg_thread)
+		param->defered_dir_update = 0;
+
+	sb->s_maxbytes = yaffs_max_file_size(dev);
+
+	/* Release lock before yaffs_get_inode() */
+	yaffs_gross_unlock(dev);
+
+	/* Create root inode */
+	if (err == YAFFS_OK)
+		inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
+
+	if (!inode)
+		return NULL;
+
+	inode->i_op = &yaffs_dir_inode_operations;
+	inode->i_fop = &yaffs_dir_operations;
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode");
+
+	root = yaffs_make_root(inode);
+
+	if (!root)
+		return NULL;
+
+	sb->s_root = root;
+	if(!dev->is_checkpointed)
+		yaffs_set_super_dirty(dev);
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"yaffs_read_super: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done");
+	return sb;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
+					 int silent)
+{
+	return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags,
+        const char *dev_name, void *data)
+{
+    return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs_read_super(struct file_system_type *fs,
+			    int flags, const char *dev_name,
+			    void *data, struct vfsmount *mnt)
+{
+
+	return get_sb_bdev(fs, flags, dev_name, data,
+			   yaffs_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs_read_super(struct file_system_type *fs,
+					    int flags, const char *dev_name,
+					    void *data)
+{
+
+	return get_sb_bdev(fs, flags, dev_name, data,
+			   yaffs_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs_fs_type = {
+	.owner = THIS_MODULE,
+	.name = "yaffs",
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+        .mount = yaffs_mount,
+#else
+        .get_sb = yaffs_read_super,
+#endif
+     	.kill_sb = kill_block_super,
+	.fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
+					    int silent)
+{
+	return yaffs_internal_read_super(1, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
+		      FS_REQUIRES_DEV);
+#endif
+
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
+					  int silent)
+{
+	return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags,
+        const char *dev_name, void *data)
+{
+        return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd);
+}
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
+static int yaffs2_read_super(struct file_system_type *fs,
+			     int flags, const char *dev_name, void *data,
+			     struct vfsmount *mnt)
+{
+	return get_sb_bdev(fs, flags, dev_name, data,
+			   yaffs2_internal_read_super_mtd, mnt);
+}
+#else
+static struct super_block *yaffs2_read_super(struct file_system_type *fs,
+					     int flags, const char *dev_name,
+					     void *data)
+{
+
+	return get_sb_bdev(fs, flags, dev_name, data,
+			   yaffs2_internal_read_super_mtd);
+}
+#endif
+
+static struct file_system_type yaffs2_fs_type = {
+	.owner = THIS_MODULE,
+	.name = "yaffs2",
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+        .mount = yaffs2_mount,
+#else
+        .get_sb = yaffs2_read_super,
+#endif
+     	.kill_sb = kill_block_super,
+	.fs_flags = FS_REQUIRES_DEV,
+};
+#else
+static struct super_block *yaffs2_read_super(struct super_block *sb,
+					     void *data, int silent)
+{
+	return yaffs_internal_read_super(2, sb, data, silent);
+}
+
+static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
+		      FS_REQUIRES_DEV);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+static struct proc_dir_entry *my_proc_entry;
+#endif
+
+static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
+{
+	struct yaffs_param *param = &dev->param;
+	int bs[10];
+
+	yaffs_count_blocks_by_state(dev,bs);
+
+	buf += sprintf(buf, "start_block.......... %d\n", param->start_block);
+	buf += sprintf(buf, "end_block............ %d\n", param->end_block);
+	buf += sprintf(buf, "total_bytes_per_chunk %d\n",
+				param->total_bytes_per_chunk);
+	buf += sprintf(buf, "use_nand_ecc......... %d\n", param->use_nand_ecc);
+	buf += sprintf(buf, "no_tags_ecc.......... %d\n", param->no_tags_ecc);
+	buf += sprintf(buf, "is_yaffs2............ %d\n", param->is_yaffs2);
+	buf += sprintf(buf, "inband_tags.......... %d\n", param->inband_tags);
+	buf += sprintf(buf, "empty_lost_n_found... %d\n",
+				param->empty_lost_n_found);
+	buf += sprintf(buf, "disable_lazy_load.... %d\n",
+				param->disable_lazy_load);
+	buf += sprintf(buf, "disable_bad_block_mrk %d\n",
+				param->disable_bad_block_marking);
+	buf += sprintf(buf, "refresh_period....... %d\n",
+				param->refresh_period);
+	buf += sprintf(buf, "n_caches............. %d\n", param->n_caches);
+	buf += sprintf(buf, "n_reserved_blocks.... %d\n",
+				param->n_reserved_blocks);
+	buf += sprintf(buf, "always_check_erased.. %d\n",
+				param->always_check_erased);
+	buf += sprintf(buf, "\n");
+	buf += sprintf(buf, "block count by state\n");
+	buf += sprintf(buf, "0:%d 1:%d 2:%d 3:%d 4:%d\n",
+				bs[0], bs[1], bs[2], bs[3], bs[4]);
+	buf += sprintf(buf, "5:%d 6:%d 7:%d 8:%d 9:%d\n",
+				bs[5], bs[6], bs[7], bs[8], bs[9]);
+
+	return buf;
+}
+
+static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
+{
+	buf += sprintf(buf, "max file size....... %lld\n",
+				(long long) yaffs_max_file_size(dev));
+	buf += sprintf(buf, "data_bytes_per_chunk. %d\n",
+				dev->data_bytes_per_chunk);
+	buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits);
+	buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size);
+	buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks);
+	buf += sprintf(buf, "blocks_in_checkpt.... %d\n",
+				dev->blocks_in_checkpt);
+	buf += sprintf(buf, "\n");
+	buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes);
+	buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj);
+	buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks);
+	buf += sprintf(buf, "\n");
+	buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes);
+	buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads);
+	buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures);
+	buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies);
+	buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs);
+	buf += sprintf(buf, "passive_gc_count..... %u\n",
+				dev->passive_gc_count);
+	buf += sprintf(buf, "oldest_dirty_gc_count %u\n",
+				dev->oldest_dirty_gc_count);
+	buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks);
+	buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs);
+	buf += sprintf(buf, "n_retried_writes..... %u\n",
+				dev->n_retried_writes);
+	buf += sprintf(buf, "n_retired_blocks..... %u\n",
+				dev->n_retired_blocks);
+	buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed);
+	buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed);
+	buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n",
+				dev->n_tags_ecc_fixed);
+	buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n",
+				dev->n_tags_ecc_unfixed);
+	buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits);
+	buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files);
+	buf += sprintf(buf, "n_unlinked_files..... %u\n",
+				dev->n_unlinked_files);
+	buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count);
+	buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions);
+	buf += sprintf(buf, "tags_used............ %u\n", dev->tags_used);
+	buf += sprintf(buf, "summary_used......... %u\n", dev->summary_used);
+
+	return buf;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+static int yaffs_proc_read(char *page,
+			   char **start,
+			   off_t offset, int count, int *eof, void *data)
+{
+	struct list_head *item;
+	char *buf = page;
+	int step = offset;
+	int n = 0;
+
+	/* Get proc_file_read() to step 'offset' by one on each sucessive call.
+	 * We use 'offset' (*ppos) to indicate where we are in dev_list.
+	 * This also assumes the user has posted a read buffer large
+	 * enough to hold the complete output; but that's life in /proc.
+	 */
+
+	*(int *)start = 1;
+
+	/* Print header first */
+	if (step == 0)
+		buf +=
+		    sprintf(buf,
+			    "Multi-version YAFFS built:" __DATE__ " " __TIME__
+			    "\n");
+	else if (step == 1)
+		buf += sprintf(buf, "\n");
+	else {
+		step -= 2;
+
+		mutex_lock(&yaffs_context_lock);
+
+		/* Locate and print the Nth entry.  Order N-squared but N is small. */
+		list_for_each(item, &yaffs_context_list) {
+			struct yaffs_linux_context *dc =
+			    list_entry(item, struct yaffs_linux_context,
+				       context_list);
+			struct yaffs_dev *dev = dc->dev;
+
+			if (n < (step & ~1)) {
+				n += 2;
+				continue;
+			}
+			if ((step & 1) == 0) {
+				buf +=
+				    sprintf(buf, "\nDevice %d \"%s\"\n", n,
+					    dev->param.name);
+				buf = yaffs_dump_dev_part0(buf, dev);
+			} else {
+				buf = yaffs_dump_dev_part1(buf, dev);
+                        }
+
+			break;
+		}
+		mutex_unlock(&yaffs_context_lock);
+	}
+
+	return buf - page < count ? buf - page : count;
+}
+#endif
+
+/**
+ * Set the verbosity of the warnings and error messages.
+ *
+ * Note that the names can only be a..z or _ with the current code.
+ */
+
+static struct {
+	char *mask_name;
+	unsigned mask_bitfield;
+} mask_flags[] = {
+	{"allocate", YAFFS_TRACE_ALLOCATE},
+	{"always", YAFFS_TRACE_ALWAYS},
+	{"background", YAFFS_TRACE_BACKGROUND},
+	{"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
+	{"buffers", YAFFS_TRACE_BUFFERS},
+	{"bug", YAFFS_TRACE_BUG},
+	{"checkpt", YAFFS_TRACE_CHECKPOINT},
+	{"deletion", YAFFS_TRACE_DELETION},
+	{"erase", YAFFS_TRACE_ERASE},
+	{"error", YAFFS_TRACE_ERROR},
+	{"gc_detail", YAFFS_TRACE_GC_DETAIL},
+	{"gc", YAFFS_TRACE_GC},
+	{"lock", YAFFS_TRACE_LOCK},
+	{"mtd", YAFFS_TRACE_MTD},
+	{"nandaccess", YAFFS_TRACE_NANDACCESS},
+	{"os", YAFFS_TRACE_OS},
+	{"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
+	{"scan", YAFFS_TRACE_SCAN},
+	{"mount", YAFFS_TRACE_MOUNT},
+	{"tracing", YAFFS_TRACE_TRACING},
+	{"sync", YAFFS_TRACE_SYNC},
+	{"write", YAFFS_TRACE_WRITE},
+	{"verify", YAFFS_TRACE_VERIFY},
+	{"verify_nand", YAFFS_TRACE_VERIFY_NAND},
+	{"verify_full", YAFFS_TRACE_VERIFY_FULL},
+	{"verify_all", YAFFS_TRACE_VERIFY_ALL},
+	{"all", 0xffffffff},
+	{"none", 0},
+	{NULL, 0},
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+#define MAX_MASK_NAME_LENGTH 40
+static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
+					  unsigned long count, void *data)
+{
+	unsigned rg = 0, mask_bitfield;
+	char *end;
+	char *mask_name;
+	const char *x;
+	char substring[MAX_MASK_NAME_LENGTH + 1];
+	int i;
+	int done = 0;
+	int add, len = 0;
+	int pos = 0;
+
+	rg = yaffs_trace_mask;
+
+	while (!done && (pos < count)) {
+		done = 1;
+		while ((pos < count) && isspace(buf[pos]))
+			pos++;
+
+		switch (buf[pos]) {
+		case '+':
+		case '-':
+		case '=':
+			add = buf[pos];
+			pos++;
+			break;
+
+		default:
+			add = ' ';
+			break;
+		}
+		mask_name = NULL;
+
+		mask_bitfield = simple_strtoul(buf + pos, &end, 0);
+
+		if (end > buf + pos) {
+			mask_name = "numeral";
+			len = end - (buf + pos);
+			pos += len;
+			done = 0;
+		} else {
+			for (x = buf + pos, i = 0;
+			     (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
+			     i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
+				substring[i] = *x;
+			substring[i] = '\0';
+
+			for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+				if (strcmp(substring, mask_flags[i].mask_name)
+				    == 0) {
+					mask_name = mask_flags[i].mask_name;
+					mask_bitfield =
+					    mask_flags[i].mask_bitfield;
+					done = 0;
+					break;
+				}
+			}
+		}
+
+		if (mask_name != NULL) {
+			done = 0;
+			switch (add) {
+			case '-':
+				rg &= ~mask_bitfield;
+				break;
+			case '+':
+				rg |= mask_bitfield;
+				break;
+			case '=':
+				rg = mask_bitfield;
+				break;
+			default:
+				rg |= mask_bitfield;
+				break;
+			}
+		}
+	}
+
+	yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
+
+	printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
+
+	if (rg & YAFFS_TRACE_ALWAYS) {
+		for (i = 0; mask_flags[i].mask_name != NULL; i++) {
+			char flag;
+			flag = ((rg & mask_flags[i].mask_bitfield) ==
+				mask_flags[i].mask_bitfield) ? '+' : '-';
+			printk(KERN_DEBUG "%c%s\n", flag,
+			       mask_flags[i].mask_name);
+		}
+	}
+
+	return count;
+}
+
+/* Debug strings are of the form:
+ * .bnnn         print info on block n
+ * .cobjn,chunkn print nand chunk id for objn:chunkn
+ */
+
+static int yaffs_proc_debug_write(struct file *file, const char *buf,
+					  unsigned long count, void *data)
+{
+
+	char str[100];
+	char *p0;
+	char *p1;
+	long p1_val;
+	long p0_val;
+	char cmd;
+	struct list_head *item;
+
+	memset(str, 0, sizeof(str));
+	memcpy(str, buf, min(count, sizeof(str) -1));
+
+	cmd = str[1];
+
+	p0 = str + 2;
+
+	p1 = p0;
+
+	while (*p1 && *p1 != ',') {
+		p1++;
+	}
+	*p1 = '\0';
+	p1++;
+
+	p0_val = simple_strtol(p0, NULL, 0);
+	p1_val = simple_strtol(p1, NULL, 0);
+
+
+	mutex_lock(&yaffs_context_lock);
+
+	/* Locate and print the Nth entry.  Order N-squared but N is small. */
+	list_for_each(item, &yaffs_context_list) {
+		struct yaffs_linux_context *dc =
+		    list_entry(item, struct yaffs_linux_context,
+			       context_list);
+		struct yaffs_dev *dev = dc->dev;
+
+		if (cmd == 'b') {
+			struct yaffs_block_info *bi;
+
+			bi = yaffs_get_block_info(dev,p0_val);
+
+			if(bi) {
+				printk("Block %d: state %d, retire %d, use %d, seq %d\n",
+					(int)p0_val, bi->block_state,
+					bi->needs_retiring, bi->pages_in_use,
+					bi->seq_number);
+			}
+		} else if (cmd == 'c') {
+			struct yaffs_obj *obj;
+			int nand_chunk;
+
+			obj = yaffs_find_by_number(dev, p0_val);
+			if (!obj)
+				printk("No obj %d\n", (int)p0_val);
+			else {
+				if(p1_val == 0)
+					nand_chunk = obj->hdr_chunk;
+				else
+					nand_chunk =
+						yaffs_find_chunk_in_file(obj,
+							p1_val, NULL);
+				printk("Nand chunk for %d:%d is %d\n",
+					(int)p0_val, (int)p1_val, nand_chunk);
+			}
+		}
+	}
+
+	mutex_unlock(&yaffs_context_lock);
+
+	return count;
+}
+
+static int yaffs_proc_write(struct file *file, const char *buf,
+			    unsigned long count, void *data)
+{
+	if (buf[0] == '.')
+		return yaffs_proc_debug_write(file, buf, count, data);
+	return yaffs_proc_write_trace_options(file, buf, count, data);
+}
+#endif
+
+/* Stuff to handle installation of file systems */
+struct file_system_to_install {
+	struct file_system_type *fst;
+	int installed;
+};
+
+static struct file_system_to_install fs_to_install[] = {
+	{&yaffs_fs_type, 0},
+	{&yaffs2_fs_type, 0},
+	{NULL, 0}
+};
+
+static int __init init_yaffs_fs(void)
+{
+	int error = 0;
+	struct file_system_to_install *fsinst;
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"yaffs built " __DATE__ " " __TIME__ " Installing.");
+
+	mutex_init(&yaffs_context_lock);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+	/* Install the proc_fs entries */
+	my_proc_entry = create_proc_entry("yaffs",
+					  S_IRUGO | S_IFREG, YPROC_ROOT);
+
+	if (my_proc_entry) {
+		my_proc_entry->write_proc = yaffs_proc_write;
+		my_proc_entry->read_proc = yaffs_proc_read;
+		my_proc_entry->data = NULL;
+	} else {
+		return -ENOMEM;
+        }
+#endif
+
+	/* Now add the file system entries */
+
+	fsinst = fs_to_install;
+
+	while (fsinst->fst && !error) {
+		error = register_filesystem(fsinst->fst);
+		if (!error)
+			fsinst->installed = 1;
+		fsinst++;
+	}
+
+	/* Any errors? uninstall  */
+	if (error) {
+		fsinst = fs_to_install;
+
+		while (fsinst->fst) {
+			if (fsinst->installed) {
+				unregister_filesystem(fsinst->fst);
+				fsinst->installed = 0;
+			}
+			fsinst++;
+		}
+	}
+
+	return error;
+}
+
+static void __exit exit_yaffs_fs(void)
+{
+
+	struct file_system_to_install *fsinst;
+
+	yaffs_trace(YAFFS_TRACE_ALWAYS,
+		"yaffs built " __DATE__ " " __TIME__ " removing.");
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+	remove_proc_entry("yaffs", YPROC_ROOT);
+#endif
+
+	fsinst = fs_to_install;
+
+	while (fsinst->fst) {
+		if (fsinst->installed) {
+			unregister_filesystem(fsinst->fst);
+			fsinst->installed = 0;
+		}
+		fsinst++;
+	}
+}
+
+module_init(init_yaffs_fs)
+    module_exit(exit_yaffs_fs)
+
+    MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
+MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2011");
+MODULE_LICENSE("GPL");
diff --git a/fs/yaffs2/yaffs_yaffs1.c b/fs/yaffs2/yaffs_yaffs1.c
new file mode 100644
index 0000000..d277e20
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.c
@@ -0,0 +1,422 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_yaffs1.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+#include "yaffs_attribs.h"
+
+int yaffs1_scan(struct yaffs_dev *dev)
+{
+	struct yaffs_ext_tags tags;
+	int blk;
+	int result;
+	int chunk;
+	int c;
+	int deleted;
+	enum yaffs_block_state state;
+	LIST_HEAD(hard_list);
+	struct yaffs_block_info *bi;
+	u32 seq_number;
+	struct yaffs_obj_hdr *oh;
+	struct yaffs_obj *in;
+	struct yaffs_obj *parent;
+	int alloc_failed = 0;
+	struct yaffs_shadow_fixer *shadow_fixers = NULL;
+	u8 *chunk_data;
+
+	yaffs_trace(YAFFS_TRACE_SCAN,
+		"yaffs1_scan starts  intstartblk %d intendblk %d...",
+		dev->internal_start_block, dev->internal_end_block);
+
+	chunk_data = yaffs_get_temp_buffer(dev);
+
+	dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+	/* Scan all the blocks to determine their state */
+	bi = dev->block_info;
+	for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+	     blk++) {
+		yaffs_clear_chunk_bits(dev, blk);
+		bi->pages_in_use = 0;
+		bi->soft_del_pages = 0;
+
+		yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+		bi->block_state = state;
+		bi->seq_number = seq_number;
+
+		if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+			bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+		yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+			"Block scanning block %d state %d seq %d",
+			blk, state, seq_number);
+
+		if (state == YAFFS_BLOCK_STATE_DEAD) {
+			yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+				"block %d is bad", blk);
+		} else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+			yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+			dev->n_erased_blocks++;
+			dev->n_free_chunks += dev->param.chunks_per_block;
+		}
+		bi++;
+	}
+
+	/* For each block.... */
+	for (blk = dev->internal_start_block;
+	     !alloc_failed && blk <= dev->internal_end_block; blk++) {
+
+		cond_resched();
+
+		bi = yaffs_get_block_info(dev, blk);
+		state = bi->block_state;
+
+		deleted = 0;
+
+		/* For each chunk in each block that needs scanning.... */
+		for (c = 0;
+			!alloc_failed && c < dev->param.chunks_per_block &&
+			state == YAFFS_BLOCK_STATE_NEEDS_SCAN; c++) {
+			/* Read the tags and decide what to do */
+			chunk = blk * dev->param.chunks_per_block + c;
+
+			result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+							  &tags);
+
+			/* Let's have a good look at this chunk... */
+
+			if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED ||
+			    tags.is_deleted) {
+				/* YAFFS1 only...
+				 * A deleted chunk
+				 */
+				deleted++;
+				dev->n_free_chunks++;
+			} else if (!tags.chunk_used) {
+				/* An unassigned chunk in the block
+				 * This means that either the block is empty or
+				 * this is the one being allocated from
+				 */
+
+				if (c == 0) {
+					/* We're looking at the first chunk in
+					 *the block so the block is unused */
+					state = YAFFS_BLOCK_STATE_EMPTY;
+					dev->n_erased_blocks++;
+				} else {
+					/* this is the block being allocated */
+					yaffs_trace(YAFFS_TRACE_SCAN,
+						" Allocating from %d %d",
+						blk, c);
+					state = YAFFS_BLOCK_STATE_ALLOCATING;
+					dev->alloc_block = blk;
+					dev->alloc_page = c;
+					dev->alloc_block_finder = blk;
+
+				}
+
+				dev->n_free_chunks +=
+				    (dev->param.chunks_per_block - c);
+			} else if (tags.chunk_id > 0) {
+				/* chunk_id > 0 so it is a data chunk... */
+				unsigned int endpos;
+
+				yaffs_set_chunk_bit(dev, blk, c);
+				bi->pages_in_use++;
+
+				in = yaffs_find_or_create_by_number(dev,
+							tags.obj_id,
+							YAFFS_OBJECT_TYPE_FILE);
+				/* PutChunkIntoFile checks for a clash
+				 * (two data chunks with the same chunk_id).
+				 */
+
+				if (!in)
+					alloc_failed = 1;
+
+				if (in) {
+					if (!yaffs_put_chunk_in_file
+					    (in, tags.chunk_id, chunk, 1))
+						alloc_failed = 1;
+				}
+
+				endpos =
+				    (tags.chunk_id - 1) *
+				    dev->data_bytes_per_chunk +
+				    tags.n_bytes;
+				if (in &&
+				    in->variant_type ==
+				     YAFFS_OBJECT_TYPE_FILE &&
+				    in->variant.file_variant.scanned_size <
+				      endpos) {
+					in->variant.file_variant.scanned_size =
+					    endpos;
+					if (!dev->param.use_header_file_size) {
+						in->variant.
+						    file_variant.file_size =
+						    in->variant.
+						    file_variant.scanned_size;
+					}
+
+				}
+			} else {
+				/* chunk_id == 0, so it is an ObjectHeader.
+				 * Make the object
+				 */
+				yaffs_set_chunk_bit(dev, blk, c);
+				bi->pages_in_use++;
+
+				result = yaffs_rd_chunk_tags_nand(dev, chunk,
+								  chunk_data,
+								  NULL);
+
+				oh = (struct yaffs_obj_hdr *)chunk_data;
+
+				in = yaffs_find_by_number(dev, tags.obj_id);
+				if (in && in->variant_type != oh->type) {
+					/* This should not happen, but somehow
+					 * Wev'e ended up with an obj_id that
+					 * has been reused but not yet deleted,
+					 * and worse still it has changed type.
+					 * Delete the old object.
+					 */
+
+					yaffs_del_obj(in);
+					in = NULL;
+				}
+
+				in = yaffs_find_or_create_by_number(dev,
+								tags.obj_id,
+								oh->type);
+
+				if (!in)
+					alloc_failed = 1;
+
+				if (in && oh->shadows_obj > 0) {
+
+					struct yaffs_shadow_fixer *fixer;
+					fixer =
+						kmalloc(sizeof
+						(struct yaffs_shadow_fixer),
+						GFP_NOFS);
+					if (fixer) {
+						fixer->next = shadow_fixers;
+						shadow_fixers = fixer;
+						fixer->obj_id = tags.obj_id;
+						fixer->shadowed_id =
+						    oh->shadows_obj;
+						yaffs_trace(YAFFS_TRACE_SCAN,
+							" Shadow fixer: %d shadows %d",
+							fixer->obj_id,
+							fixer->shadowed_id);
+
+					}
+
+				}
+
+				if (in && in->valid) {
+					/* We have already filled this one.
+					 * We have a duplicate and need to
+					 * resolve it. */
+
+					unsigned existing_serial = in->serial;
+					unsigned new_serial =
+					    tags.serial_number;
+
+					if (((existing_serial + 1) & 3) ==
+					    new_serial) {
+						/* Use new one - destroy the
+						 * exisiting one */
+						yaffs_chunk_del(dev,
+								in->hdr_chunk,
+								1, __LINE__);
+						in->valid = 0;
+					} else {
+						/* Use existing - destroy
+						 * this one. */
+						yaffs_chunk_del(dev, chunk, 1,
+								__LINE__);
+					}
+				}
+
+				if (in && !in->valid &&
+				    (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+				     tags.obj_id ==
+				     YAFFS_OBJECTID_LOSTNFOUND)) {
+					/* We only load some info, don't fiddle
+					 * with directory structure */
+					in->valid = 1;
+					in->variant_type = oh->type;
+
+					in->yst_mode = oh->yst_mode;
+					yaffs_load_attribs(in, oh);
+					in->hdr_chunk = chunk;
+					in->serial = tags.serial_number;
+
+				} else if (in && !in->valid) {
+					/* we need to load this info */
+
+					in->valid = 1;
+					in->variant_type = oh->type;
+
+					in->yst_mode = oh->yst_mode;
+					yaffs_load_attribs(in, oh);
+					in->hdr_chunk = chunk;
+					in->serial = tags.serial_number;
+
+					yaffs_set_obj_name_from_oh(in, oh);
+					in->dirty = 0;
+
+					/* directory stuff...
+					 * hook up to parent
+					 */
+
+					parent =
+					    yaffs_find_or_create_by_number
+					    (dev, oh->parent_obj_id,
+					     YAFFS_OBJECT_TYPE_DIRECTORY);
+					if (!parent)
+						alloc_failed = 1;
+					if (parent && parent->variant_type ==
+					    YAFFS_OBJECT_TYPE_UNKNOWN) {
+						/* Set up as a directory */
+						parent->variant_type =
+						    YAFFS_OBJECT_TYPE_DIRECTORY;
+						INIT_LIST_HEAD(&parent->
+							variant.dir_variant.
+							children);
+					} else if (!parent ||
+						parent->variant_type !=
+						YAFFS_OBJECT_TYPE_DIRECTORY) {
+						/* Hoosterman, a problem....
+						 * We're trying to use a
+						 * non-directory as a directory
+						 */
+
+						yaffs_trace(YAFFS_TRACE_ERROR,
+							"yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+							);
+						parent = dev->lost_n_found;
+					}
+
+					yaffs_add_obj_to_dir(parent, in);
+
+					switch (in->variant_type) {
+					case YAFFS_OBJECT_TYPE_UNKNOWN:
+						/* Todo got a problem */
+						break;
+					case YAFFS_OBJECT_TYPE_FILE:
+						if (dev->param.
+						    use_header_file_size)
+							in->variant.
+							file_variant.file_size
+							= yaffs_oh_to_size(oh);
+						break;
+					case YAFFS_OBJECT_TYPE_HARDLINK:
+						in->variant.
+						    hardlink_variant.equiv_id =
+						    oh->equiv_id;
+						list_add(&in->hard_links,
+								&hard_list);
+						break;
+					case YAFFS_OBJECT_TYPE_DIRECTORY:
+						/* Do nothing */
+						break;
+					case YAFFS_OBJECT_TYPE_SPECIAL:
+						/* Do nothing */
+						break;
+					case YAFFS_OBJECT_TYPE_SYMLINK:
+						in->variant.symlink_variant.
+						    alias =
+						    yaffs_clone_str(oh->alias);
+						if (!in->variant.
+						    symlink_variant.alias)
+							alloc_failed = 1;
+						break;
+					}
+				}
+			}
+		}
+
+		if (state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+			/* If we got this far while scanning,
+			 * then the block is fully allocated. */
+			state = YAFFS_BLOCK_STATE_FULL;
+		}
+
+		if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+			/* If the block was partially allocated then
+			 * treat it as fully allocated. */
+			state = YAFFS_BLOCK_STATE_FULL;
+			dev->alloc_block = -1;
+		}
+
+		bi->block_state = state;
+
+		/* Now let's see if it was dirty */
+		if (bi->pages_in_use == 0 &&
+		    !bi->has_shrink_hdr &&
+		    bi->block_state == YAFFS_BLOCK_STATE_FULL)
+			yaffs_block_became_dirty(dev, blk);
+	}
+
+	/* Ok, we've done all the scanning.
+	 * Fix up the hard link chains.
+	 * We should now have scanned all the objects, now it's time to add
+	 * these hardlinks.
+	 */
+
+	yaffs_link_fixup(dev, &hard_list);
+
+	/*
+	 * Fix up any shadowed objects.
+	 * There should not be more than one of these.
+	 */
+	{
+		struct yaffs_shadow_fixer *fixer;
+		struct yaffs_obj *obj;
+
+		while (shadow_fixers) {
+			fixer = shadow_fixers;
+			shadow_fixers = fixer->next;
+			/* Complete the rename transaction by deleting the
+			 * shadowed object then setting the object header
+			 to unshadowed.
+			 */
+			obj = yaffs_find_by_number(dev, fixer->shadowed_id);
+			if (obj)
+				yaffs_del_obj(obj);
+
+			obj = yaffs_find_by_number(dev, fixer->obj_id);
+
+			if (obj)
+				yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+			kfree(fixer);
+		}
+	}
+
+	yaffs_release_temp_buffer(dev, chunk_data);
+
+	if (alloc_failed)
+		return YAFFS_FAIL;
+
+	yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
+
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs1.h b/fs/yaffs2/yaffs_yaffs1.h
new file mode 100644
index 0000000..97e2fdd
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs1.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS1_H__
+#define __YAFFS_YAFFS1_H__
+
+#include "yaffs_guts.h"
+int yaffs1_scan(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yaffs_yaffs2.c b/fs/yaffs2/yaffs_yaffs2.c
new file mode 100644
index 0000000..f1dc972
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.c
@@ -0,0 +1,1532 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_checkptrw.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_verify.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/*
+ * Checkpoints are really no benefit on very small partitions.
+ *
+ * To save space on small partitions don't bother with checkpoints unless
+ * the partition is at least this big.
+ */
+#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
+#define YAFFS_SMALL_HOLE_THRESHOLD 4
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_calc_oldest_dirty_seq()
+ * yaffs2_find_oldest_dirty_seq()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+	int i;
+	unsigned seq;
+	unsigned block_no = 0;
+	struct yaffs_block_info *b;
+
+	if (!dev->param.is_yaffs2)
+		return;
+
+	/* Find the oldest dirty sequence number. */
+	seq = dev->seq_number + 1;
+	b = dev->block_info;
+	for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+		if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
+		    (b->pages_in_use - b->soft_del_pages) <
+		    dev->param.chunks_per_block &&
+		    b->seq_number < seq) {
+			seq = b->seq_number;
+			block_no = i;
+		}
+		b++;
+	}
+
+	if (block_no) {
+		dev->oldest_dirty_seq = seq;
+		dev->oldest_dirty_block = block_no;
+	}
+}
+
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+	if (!dev->param.is_yaffs2)
+		return;
+
+	if (!dev->oldest_dirty_seq)
+		yaffs_calc_oldest_dirty_seq(dev);
+}
+
+/*
+ * yaffs_clear_oldest_dirty_seq()
+ * Called when a block is erased or marked bad. (ie. when its seq_number
+ * becomes invalid). If the value matches the oldest then we clear
+ * dev->oldest_dirty_seq to force its recomputation.
+ */
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+				   struct yaffs_block_info *bi)
+{
+
+	if (!dev->param.is_yaffs2)
+		return;
+
+	if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
+		dev->oldest_dirty_seq = 0;
+		dev->oldest_dirty_block = 0;
+	}
+}
+
+/*
+ * yaffs2_update_oldest_dirty_seq()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldest_dirty_seq is actually being tracked.
+ */
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+				    struct yaffs_block_info *bi)
+{
+	if (!dev->param.is_yaffs2)
+		return;
+
+	if (dev->oldest_dirty_seq) {
+		if (dev->oldest_dirty_seq > bi->seq_number) {
+			dev->oldest_dirty_seq = bi->seq_number;
+			dev->oldest_dirty_block = block_no;
+		}
+	}
+}
+
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
+{
+
+	if (!dev->param.is_yaffs2)
+		return 1;	/* disqualification only applies to yaffs2. */
+
+	if (!bi->has_shrink_hdr)
+		return 1;	/* can gc */
+
+	yaffs2_find_oldest_dirty_seq(dev);
+
+	/* Can't do gc of this block if there are any blocks older than this
+	 * one that have discarded pages.
+	 */
+	return (bi->seq_number <= dev->oldest_dirty_seq);
+}
+
+/*
+ * yaffs2_find_refresh_block()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev)
+{
+	u32 b;
+	u32 oldest = 0;
+	u32 oldest_seq = 0;
+	struct yaffs_block_info *bi;
+
+	if (!dev->param.is_yaffs2)
+		return oldest;
+
+	/*
+	 * If refresh period < 10 then refreshing is disabled.
+	 */
+	if (dev->param.refresh_period < 10)
+		return oldest;
+
+	/*
+	 * Fix broken values.
+	 */
+	if (dev->refresh_skip > dev->param.refresh_period)
+		dev->refresh_skip = dev->param.refresh_period;
+
+	if (dev->refresh_skip > 0)
+		return oldest;
+
+	/*
+	 * Refresh skip is now zero.
+	 * We'll do a refresh this time around....
+	 * Update the refresh skip and find the oldest block.
+	 */
+	dev->refresh_skip = dev->param.refresh_period;
+	dev->refresh_count++;
+	bi = dev->block_info;
+	for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+
+		if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+
+			if (oldest < 1 || bi->seq_number < oldest_seq) {
+				oldest = b;
+				oldest_seq = bi->seq_number;
+			}
+		}
+		bi++;
+	}
+
+	if (oldest > 0) {
+		yaffs_trace(YAFFS_TRACE_GC,
+			"GC refresh count %d selected block %d with seq_number %d",
+			dev->refresh_count, oldest, oldest_seq);
+	}
+
+	return oldest;
+}
+
+int yaffs2_checkpt_required(struct yaffs_dev *dev)
+{
+	int nblocks;
+
+	if (!dev->param.is_yaffs2)
+		return 0;
+
+	nblocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+	return !dev->param.skip_checkpt_wr &&
+	    !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
+}
+
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
+{
+	int retval;
+	int n_bytes = 0;
+	int n_blocks;
+	int dev_blocks;
+
+	if (!dev->param.is_yaffs2)
+		return 0;
+
+	if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
+		/* Not a valid value so recalculate */
+		dev_blocks = dev->param.end_block - dev->param.start_block + 1;
+		n_bytes += sizeof(struct yaffs_checkpt_validity);
+		n_bytes += sizeof(struct yaffs_checkpt_dev);
+		n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
+		n_bytes += dev_blocks * dev->chunk_bit_stride;
+		n_bytes +=
+		    (sizeof(struct yaffs_checkpt_obj) + sizeof(u32)) *
+		    dev->n_obj;
+		n_bytes += (dev->tnode_size + sizeof(u32)) * dev->n_tnodes;
+		n_bytes += sizeof(struct yaffs_checkpt_validity);
+		n_bytes += sizeof(u32);	/* checksum */
+
+		/* Round up and add 2 blocks to allow for some bad blocks,
+		 * so add 3 */
+
+		n_blocks =
+		    (n_bytes /
+		     (dev->data_bytes_per_chunk *
+		      dev->param.chunks_per_block)) + 3;
+
+		dev->checkpoint_blocks_required = n_blocks;
+	}
+
+	retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
+	if (retval < 0)
+		retval = 0;
+	return retval;
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+	struct yaffs_checkpt_validity cp;
+
+	memset(&cp, 0, sizeof(cp));
+
+	cp.struct_type = sizeof(cp);
+	cp.magic = YAFFS_MAGIC;
+	cp.version = YAFFS_CHECKPOINT_VERSION;
+	cp.head = (head) ? 1 : 0;
+
+	return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+	struct yaffs_checkpt_validity cp;
+	int ok;
+
+	ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+	if (ok)
+		ok = (cp.struct_type == sizeof(cp)) &&
+		    (cp.magic == YAFFS_MAGIC) &&
+		    (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+		    (cp.head == ((head) ? 1 : 0));
+	return ok ? 1 : 0;
+}
+
+static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
+				      struct yaffs_dev *dev)
+{
+	cp->n_erased_blocks = dev->n_erased_blocks;
+	cp->alloc_block = dev->alloc_block;
+	cp->alloc_page = dev->alloc_page;
+	cp->n_free_chunks = dev->n_free_chunks;
+
+	cp->n_deleted_files = dev->n_deleted_files;
+	cp->n_unlinked_files = dev->n_unlinked_files;
+	cp->n_bg_deletions = dev->n_bg_deletions;
+	cp->seq_number = dev->seq_number;
+
+}
+
+static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
+				     struct yaffs_checkpt_dev *cp)
+{
+	dev->n_erased_blocks = cp->n_erased_blocks;
+	dev->alloc_block = cp->alloc_block;
+	dev->alloc_page = cp->alloc_page;
+	dev->n_free_chunks = cp->n_free_chunks;
+
+	dev->n_deleted_files = cp->n_deleted_files;
+	dev->n_unlinked_files = cp->n_unlinked_files;
+	dev->n_bg_deletions = cp->n_bg_deletions;
+	dev->seq_number = cp->seq_number;
+}
+
+static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
+{
+	struct yaffs_checkpt_dev cp;
+	u32 n_bytes;
+	u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+	int ok;
+
+	/* Write device runtime values */
+	yaffs2_dev_to_checkpt_dev(&cp, dev);
+	cp.struct_type = sizeof(cp);
+
+	ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+	if (!ok)
+		return 0;
+
+	/* Write block info */
+	n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+	ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
+	if (!ok)
+		return 0;
+
+	/* Write chunk bits */
+	n_bytes = n_blocks * dev->chunk_bit_stride;
+	ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+	return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
+{
+	struct yaffs_checkpt_dev cp;
+	u32 n_bytes;
+	u32 n_blocks =
+	    (dev->internal_end_block - dev->internal_start_block + 1);
+	int ok;
+
+	ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+	if (!ok)
+		return 0;
+
+	if (cp.struct_type != sizeof(cp))
+		return 0;
+
+	yaffs_checkpt_dev_to_dev(dev, &cp);
+
+	n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+
+	ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
+
+	if (!ok)
+		return 0;
+
+	n_bytes = n_blocks * dev->chunk_bit_stride;
+
+	ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+	return ok ? 1 : 0;
+}
+
+static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
+				   struct yaffs_obj *obj)
+{
+	cp->obj_id = obj->obj_id;
+	cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
+	cp->hdr_chunk = obj->hdr_chunk;
+	cp->variant_type = obj->variant_type;
+	cp->deleted = obj->deleted;
+	cp->soft_del = obj->soft_del;
+	cp->unlinked = obj->unlinked;
+	cp->fake = obj->fake;
+	cp->rename_allowed = obj->rename_allowed;
+	cp->unlink_allowed = obj->unlink_allowed;
+	cp->serial = obj->serial;
+	cp->n_data_chunks = obj->n_data_chunks;
+
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+		cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
+	else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+		cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
+}
+
+static int yaffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
+				     struct yaffs_checkpt_obj *cp)
+{
+	struct yaffs_obj *parent;
+
+	if (obj->variant_type != cp->variant_type) {
+		yaffs_trace(YAFFS_TRACE_ERROR,
+			"Checkpoint read object %d type %d chunk %d does not match existing object type %d",
+			cp->obj_id, cp->variant_type, cp->hdr_chunk,
+			obj->variant_type);
+		return 0;
+	}
+
+	obj->obj_id = cp->obj_id;
+
+	if (cp->parent_id)
+		parent = yaffs_find_or_create_by_number(obj->my_dev,
+						cp->parent_id,
+						YAFFS_OBJECT_TYPE_DIRECTORY);
+	else
+		parent = NULL;
+
+	if (parent) {
+		if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+			yaffs_trace(YAFFS_TRACE_ALWAYS,
+				"Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
+				cp->obj_id, cp->parent_id,
+				cp->variant_type, cp->hdr_chunk,
+				parent->variant_type);
+			return 0;
+		}
+		yaffs_add_obj_to_dir(parent, obj);
+	}
+
+	obj->hdr_chunk = cp->hdr_chunk;
+	obj->variant_type = cp->variant_type;
+	obj->deleted = cp->deleted;
+	obj->soft_del = cp->soft_del;
+	obj->unlinked = cp->unlinked;
+	obj->fake = cp->fake;
+	obj->rename_allowed = cp->rename_allowed;
+	obj->unlink_allowed = cp->unlink_allowed;
+	obj->serial = cp->serial;
+	obj->n_data_chunks = cp->n_data_chunks;
+
+	if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+		obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
+	else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+		obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
+
+	if (obj->hdr_chunk > 0)
+		obj->lazy_loaded = 1;
+	return 1;
+}
+
+static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
+				       struct yaffs_tnode *tn, u32 level,
+				       int chunk_offset)
+{
+	int i;
+	struct yaffs_dev *dev = in->my_dev;
+	int ok = 1;
+	u32 base_offset;
+
+	if (!tn)
+		return 1;
+
+	if (level > 0) {
+		for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+			if (!tn->internal[i])
+				continue;
+			ok = yaffs2_checkpt_tnode_worker(in,
+				 tn->internal[i],
+				 level - 1,
+				 (chunk_offset <<
+				  YAFFS_TNODES_INTERNAL_BITS) + i);
+		}
+		return ok;
+	}
+
+	/* Level 0 tnode */
+	base_offset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
+	ok = (yaffs2_checkpt_wr(dev, &base_offset, sizeof(base_offset)) ==
+			sizeof(base_offset));
+	if (ok)
+		ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) ==
+			dev->tnode_size);
+
+	return ok;
+}
+
+static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
+{
+	u32 end_marker = ~0;
+	int ok = 1;
+
+	if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+		return ok;
+
+	ok = yaffs2_checkpt_tnode_worker(obj,
+					 obj->variant.file_variant.top,
+					 obj->variant.file_variant.
+					 top_level, 0);
+	if (ok)
+		ok = (yaffs2_checkpt_wr(obj->my_dev, &end_marker,
+				sizeof(end_marker)) == sizeof(end_marker));
+
+	return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
+{
+	u32 base_chunk;
+	int ok = 1;
+	struct yaffs_dev *dev = obj->my_dev;
+	struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
+	struct yaffs_tnode *tn;
+	int nread = 0;
+
+	ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
+	      sizeof(base_chunk));
+
+	while (ok && (~base_chunk)) {
+		nread++;
+		/* Read level 0 tnode */
+
+		tn = yaffs_get_tnode(dev);
+		if (tn)
+			ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
+				dev->tnode_size);
+		else
+			ok = 0;
+
+		if (tn && ok)
+			ok = yaffs_add_find_tnode_0(dev,
+						    file_stuct_ptr,
+						    base_chunk, tn) ? 1 : 0;
+
+		if (ok)
+			ok = (yaffs2_checkpt_rd
+			      (dev, &base_chunk,
+			       sizeof(base_chunk)) == sizeof(base_chunk));
+	}
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"Checkpoint read tnodes %d records, last %d. ok %d",
+		nread, base_chunk, ok);
+
+	return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_checkpt_obj cp;
+	int i;
+	int ok = 1;
+	struct list_head *lh;
+
+	/* Iterate through the objects in each hash entry,
+	 * dumping them to the checkpointing stream.
+	 */
+
+	for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+		list_for_each(lh, &dev->obj_bucket[i].list) {
+			obj = list_entry(lh, struct yaffs_obj, hash_link);
+			if (!obj->defered_free) {
+				yaffs2_obj_checkpt_obj(&cp, obj);
+				cp.struct_type = sizeof(cp);
+
+				yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+					"Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
+					cp.obj_id, cp.parent_id,
+					cp.variant_type, cp.hdr_chunk, obj);
+
+				ok = (yaffs2_checkpt_wr(dev, &cp,
+						sizeof(cp)) == sizeof(cp));
+
+				if (ok &&
+					obj->variant_type ==
+					YAFFS_OBJECT_TYPE_FILE)
+					ok = yaffs2_wr_checkpt_tnodes(obj);
+			}
+		}
+	}
+
+	/* Dump end of list */
+	memset(&cp, 0xff, sizeof(struct yaffs_checkpt_obj));
+	cp.struct_type = sizeof(cp);
+
+	if (ok)
+		ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+	return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
+{
+	struct yaffs_obj *obj;
+	struct yaffs_checkpt_obj cp;
+	int ok = 1;
+	int done = 0;
+	LIST_HEAD(hard_list);
+
+
+	while (ok && !done) {
+		ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+		if (cp.struct_type != sizeof(cp)) {
+			yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+				"struct size %d instead of %d ok %d",
+				cp.struct_type, (int)sizeof(cp), ok);
+			ok = 0;
+		}
+
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"Checkpoint read object %d parent %d type %d chunk %d ",
+			cp.obj_id, cp.parent_id, cp.variant_type,
+			cp.hdr_chunk);
+
+		if (ok && cp.obj_id == ~0) {
+			done = 1;
+		} else if (ok) {
+			obj =
+			    yaffs_find_or_create_by_number(dev, cp.obj_id,
+							   cp.variant_type);
+			if (obj) {
+				ok = yaffs2_checkpt_obj_to_obj(obj, &cp);
+				if (!ok)
+					break;
+				if (obj->variant_type ==
+					YAFFS_OBJECT_TYPE_FILE) {
+					ok = yaffs2_rd_checkpt_tnodes(obj);
+				} else if (obj->variant_type ==
+					YAFFS_OBJECT_TYPE_HARDLINK) {
+					list_add(&obj->hard_links, &hard_list);
+				}
+			} else {
+				ok = 0;
+			}
+		}
+	}
+
+	if (ok)
+		yaffs_link_fixup(dev, &hard_list);
+
+	return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
+{
+	u32 checkpt_sum;
+	int ok;
+
+	yaffs2_get_checkpt_sum(dev, &checkpt_sum);
+
+	ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
+		sizeof(checkpt_sum));
+
+	if (!ok)
+		return 0;
+
+	return 1;
+}
+
+static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
+{
+	u32 checkpt_sum0;
+	u32 checkpt_sum1;
+	int ok;
+
+	yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
+
+	ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
+		sizeof(checkpt_sum1));
+
+	if (!ok)
+		return 0;
+
+	if (checkpt_sum0 != checkpt_sum1)
+		return 0;
+
+	return 1;
+}
+
+static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
+{
+	int ok = 1;
+
+	if (!yaffs2_checkpt_required(dev)) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"skipping checkpoint write");
+		ok = 0;
+	}
+
+	if (ok)
+		ok = yaffs2_checkpt_open(dev, 1);
+
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"write checkpoint validity");
+		ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"write checkpoint device");
+		ok = yaffs2_wr_checkpt_dev(dev);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"write checkpoint objects");
+		ok = yaffs2_wr_checkpt_objs(dev);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"write checkpoint validity");
+		ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
+	}
+
+	if (ok)
+		ok = yaffs2_wr_checkpt_sum(dev);
+
+	if (!yaffs_checkpt_close(dev))
+		ok = 0;
+
+	if (ok)
+		dev->is_checkpointed = 1;
+	else
+		dev->is_checkpointed = 0;
+
+	return dev->is_checkpointed;
+}
+
+static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
+{
+	int ok = 1;
+
+	if (!dev->param.is_yaffs2)
+		ok = 0;
+
+	if (ok && dev->param.skip_checkpt_rd) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"skipping checkpoint read");
+		ok = 0;
+	}
+
+	if (ok)
+		ok = yaffs2_checkpt_open(dev, 0); /* open for read */
+
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint validity");
+		ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint device");
+		ok = yaffs2_rd_checkpt_dev(dev);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint objects");
+		ok = yaffs2_rd_checkpt_objs(dev);
+	}
+	if (ok) {
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint validity");
+		ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
+	}
+
+	if (ok) {
+		ok = yaffs2_rd_checkpt_sum(dev);
+		yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+			"read checkpoint checksum %d", ok);
+	}
+
+	if (!yaffs_checkpt_close(dev))
+		ok = 0;
+
+	if (ok)
+		dev->is_checkpointed = 1;
+	else
+		dev->is_checkpointed = 0;
+
+	return ok ? 1 : 0;
+}
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
+{
+	if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
+		dev->is_checkpointed = 0;
+		yaffs2_checkpt_invalidate_stream(dev);
+	}
+	if (dev->param.sb_dirty_fn)
+		dev->param.sb_dirty_fn(dev);
+}
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev)
+{
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"save entry: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	yaffs_verify_objects(dev);
+	yaffs_verify_blocks(dev);
+	yaffs_verify_free_chunks(dev);
+
+	if (!dev->is_checkpointed) {
+		yaffs2_checkpt_invalidate(dev);
+		yaffs2_wr_checkpt_data(dev);
+	}
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+		"save exit: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	return dev->is_checkpointed;
+}
+
+int yaffs2_checkpt_restore(struct yaffs_dev *dev)
+{
+	int retval;
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"restore entry: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	retval = yaffs2_rd_checkpt_data(dev);
+
+	if (dev->is_checkpointed) {
+		yaffs_verify_objects(dev);
+		yaffs_verify_blocks(dev);
+		yaffs_verify_free_chunks(dev);
+	}
+
+	yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+		"restore exit: is_checkpointed %d",
+		dev->is_checkpointed);
+
+	return retval;
+}
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
+{
+	/* if new_size > old_file_size.
+	 * We're going to be writing a hole.
+	 * If the hole is small then write zeros otherwise write a start
+	 * of hole marker.
+	 */
+	loff_t old_file_size;
+	loff_t increase;
+	int small_hole;
+	int result = YAFFS_OK;
+	struct yaffs_dev *dev = NULL;
+	u8 *local_buffer = NULL;
+	int small_increase_ok = 0;
+
+	if (!obj)
+		return YAFFS_FAIL;
+
+	if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+		return YAFFS_FAIL;
+
+	dev = obj->my_dev;
+
+	/* Bail out if not yaffs2 mode */
+	if (!dev->param.is_yaffs2)
+		return YAFFS_OK;
+
+	old_file_size = obj->variant.file_variant.file_size;
+
+	if (new_size <= old_file_size)
+		return YAFFS_OK;
+
+	increase = new_size - old_file_size;
+
+	if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
+	    yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
+		small_hole = 1;
+	else
+		small_hole = 0;
+
+	if (small_hole)
+		local_buffer = yaffs_get_temp_buffer(dev);
+
+	if (local_buffer) {
+		/* fill hole with zero bytes */
+		loff_t pos = old_file_size;
+		int this_write;
+		int written;
+		memset(local_buffer, 0, dev->data_bytes_per_chunk);
+		small_increase_ok = 1;
+
+		while (increase > 0 && small_increase_ok) {
+			this_write = increase;
+			if (this_write > dev->data_bytes_per_chunk)
+				this_write = dev->data_bytes_per_chunk;
+			written =
+			    yaffs_do_file_wr(obj, local_buffer, pos, this_write,
+					     0);
+			if (written == this_write) {
+				pos += this_write;
+				increase -= this_write;
+			} else {
+				small_increase_ok = 0;
+			}
+		}
+
+		yaffs_release_temp_buffer(dev, local_buffer);
+
+		/* If out of space then reverse any chunks we've added */
+		if (!small_increase_ok)
+			yaffs_resize_file_down(obj, old_file_size);
+	}
+
+	if (!small_increase_ok &&
+	    obj->parent &&
+	    obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+	    obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
+		/* Write a hole start header with the old file size */
+		yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
+	}
+
+	return result;
+}
+
+struct yaffs_block_index {
+	int seq;
+	int block;
+};
+
+static int yaffs2_ybicmp(const void *a, const void *b)
+{
+	int aseq = ((struct yaffs_block_index *)a)->seq;
+	int bseq = ((struct yaffs_block_index *)b)->seq;
+	int ablock = ((struct yaffs_block_index *)a)->block;
+	int bblock = ((struct yaffs_block_index *)b)->block;
+
+	if (aseq == bseq)
+		return ablock - bblock;
+
+	return aseq - bseq;
+}
+
+static inline int yaffs2_scan_chunk(struct yaffs_dev *dev,
+		struct yaffs_block_info *bi,
+		int blk, int chunk_in_block,
+		int *found_chunks,
+		u8 *chunk_data,
+		struct list_head *hard_list,
+		int summary_available)
+{
+	struct yaffs_obj_hdr *oh;
+	struct yaffs_obj *in;
+	struct yaffs_obj *parent;
+	int equiv_id;
+	loff_t file_size;
+	int is_shrink;
+	int is_unlinked;
+	struct yaffs_ext_tags tags;
+	int result;
+	int alloc_failed = 0;
+	int chunk = blk * dev->param.chunks_per_block + chunk_in_block;
+	struct yaffs_file_var *file_var;
+	struct yaffs_hardlink_var *hl_var;
+	struct yaffs_symlink_var *sl_var;
+
+	if (summary_available) {
+		result = yaffs_summary_fetch(dev, &tags, chunk_in_block);
+		tags.seq_number = bi->seq_number;
+	}
+
+	if (!summary_available || tags.obj_id == 0) {
+		result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
+		dev->tags_used++;
+	} else {
+		dev->summary_used++;
+	}
+
+	/* Let's have a good look at this chunk... */
+
+	if (!tags.chunk_used) {
+		/* An unassigned chunk in the block.
+		 * If there are used chunks after this one, then
+		 * it is a chunk that was skipped due to failing
+		 * the erased check. Just skip it so that it can
+		 * be deleted.
+		 * But, more typically, We get here when this is
+		 * an unallocated chunk and his means that
+		 * either the block is empty or this is the one
+		 * being allocated from
+		 */
+
+		if (*found_chunks) {
+			/* This is a chunk that was skipped due
+			 * to failing the erased check */
+		} else if (chunk_in_block == 0) {
+			/* We're looking at the first chunk in
+			 * the block so the block is unused */
+			bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+			dev->n_erased_blocks++;
+		} else {
+			if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+			    bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+				if (dev->seq_number == bi->seq_number) {
+					/* Allocating from this block*/
+					yaffs_trace(YAFFS_TRACE_SCAN,
+					    " Allocating from %d %d",
+					    blk, chunk_in_block);
+
+					bi->block_state =
+						YAFFS_BLOCK_STATE_ALLOCATING;
+					dev->alloc_block = blk;
+					dev->alloc_page = chunk_in_block;
+					dev->alloc_block_finder = blk;
+				} else {
+					/* This is a partially written block
+					 * that is not the current
+					 * allocation block.
+					 */
+					yaffs_trace(YAFFS_TRACE_SCAN,
+						"Partially written block %d detected. gc will fix this.",
+						blk);
+				}
+			}
+		}
+
+		dev->n_free_chunks++;
+
+	} else if (tags.ecc_result ==
+		YAFFS_ECC_RESULT_UNFIXED) {
+		yaffs_trace(YAFFS_TRACE_SCAN,
+			" Unfixed ECC in chunk(%d:%d), chunk ignored",
+			blk, chunk_in_block);
+			dev->n_free_chunks++;
+	} else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
+		   tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
+		   tags.obj_id == YAFFS_OBJECTID_SUMMARY ||
+		   (tags.chunk_id > 0 &&
+		     tags.n_bytes > dev->data_bytes_per_chunk) ||
+		   tags.seq_number != bi->seq_number) {
+		yaffs_trace(YAFFS_TRACE_SCAN,
+			"Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
+			blk, chunk_in_block, tags.obj_id,
+			tags.chunk_id, tags.n_bytes);
+		dev->n_free_chunks++;
+	} else if (tags.chunk_id > 0) {
+		/* chunk_id > 0 so it is a data chunk... */
+		loff_t endpos;
+		loff_t chunk_base = (tags.chunk_id - 1) *
+					dev->data_bytes_per_chunk;
+
+		*found_chunks = 1;
+
+		yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+		bi->pages_in_use++;
+
+		in = yaffs_find_or_create_by_number(dev,
+					tags.obj_id,
+					YAFFS_OBJECT_TYPE_FILE);
+		if (!in)
+			/* Out of memory */
+			alloc_failed = 1;
+
+		if (in &&
+		    in->variant_type == YAFFS_OBJECT_TYPE_FILE &&
+		    chunk_base < in->variant.file_variant.shrink_size) {
+			/* This has not been invalidated by
+			 * a resize */
+			if (!yaffs_put_chunk_in_file(in, tags.chunk_id,
+								chunk, -1))
+				alloc_failed = 1;
+
+			/* File size is calculated by looking at
+			 * the data chunks if we have not
+			 * seen an object header yet.
+			 * Stop this practice once we find an
+			 * object header.
+			 */
+			endpos = chunk_base + tags.n_bytes;
+
+			if (!in->valid &&
+			    in->variant.file_variant.scanned_size < endpos) {
+				in->variant.file_variant.
+				    scanned_size = endpos;
+				in->variant.file_variant.
+				    file_size = endpos;
+			}
+		} else if (in) {
+			/* This chunk has been invalidated by a
+			 * resize, or a past file deletion
+			 * so delete the chunk*/
+			yaffs_chunk_del(dev, chunk, 1, __LINE__);
+		}
+	} else {
+		/* chunk_id == 0, so it is an ObjectHeader.
+		 * Thus, we read in the object header and make
+		 * the object
+		 */
+		*found_chunks = 1;
+
+		yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+		bi->pages_in_use++;
+
+		oh = NULL;
+		in = NULL;
+
+		if (tags.extra_available) {
+			in = yaffs_find_or_create_by_number(dev,
+					tags.obj_id,
+					tags.extra_obj_type);
+			if (!in)
+				alloc_failed = 1;
+		}
+
+		if (!in ||
+		    (!in->valid && dev->param.disable_lazy_load) ||
+		    tags.extra_shadows ||
+		    (!in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+				 tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
+
+			/* If we don't have  valid info then we
+			 * need to read the chunk
+			 * TODO In future we can probably defer
+			 * reading the chunk and living with
+			 * invalid data until needed.
+			 */
+
+			result = yaffs_rd_chunk_tags_nand(dev,
+						  chunk,
+						  chunk_data,
+						  NULL);
+
+			oh = (struct yaffs_obj_hdr *)chunk_data;
+
+			if (dev->param.inband_tags) {
+				/* Fix up the header if they got
+				 * corrupted by inband tags */
+				oh->shadows_obj =
+				    oh->inband_shadowed_obj_id;
+				oh->is_shrink =
+				    oh->inband_is_shrink;
+			}
+
+			if (!in) {
+				in = yaffs_find_or_create_by_number(dev,
+							tags.obj_id, oh->type);
+				if (!in)
+					alloc_failed = 1;
+			}
+		}
+
+		if (!in) {
+			/* TODO Hoosterman we have a problem! */
+			yaffs_trace(YAFFS_TRACE_ERROR,
+				"yaffs tragedy: Could not make object for object  %d at chunk %d during scan",
+				tags.obj_id, chunk);
+			return YAFFS_FAIL;
+		}
+
+		if (in->valid) {
+			/* We have already filled this one.
+			 * We have a duplicate that will be
+			 * discarded, but we first have to suck
+			 * out resize info if it is a file.
+			 */
+			if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
+				((oh && oh->type == YAFFS_OBJECT_TYPE_FILE) ||
+				 (tags.extra_available &&
+				  tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+				)) {
+				loff_t this_size = (oh) ?
+					yaffs_oh_to_size(oh) :
+					tags.extra_file_size;
+				u32 parent_obj_id = (oh) ?
+					oh->parent_obj_id :
+					tags.extra_parent_id;
+
+				is_shrink = (oh) ?
+					oh->is_shrink :
+					tags.extra_is_shrink;
+
+				/* If it is deleted (unlinked
+				 * at start also means deleted)
+				 * we treat the file size as
+				 * being zeroed at this point.
+				 */
+				if (parent_obj_id == YAFFS_OBJECTID_DELETED ||
+				    parent_obj_id == YAFFS_OBJECTID_UNLINKED) {
+					this_size = 0;
+					is_shrink = 1;
+				}
+
+				if (is_shrink &&
+				    in->variant.file_variant.shrink_size >
+				    this_size)
+					in->variant.file_variant.shrink_size =
+					this_size;
+
+				if (is_shrink)
+					bi->has_shrink_hdr = 1;
+			}
+			/* Use existing - destroy this one. */
+			yaffs_chunk_del(dev, chunk, 1, __LINE__);
+		}
+
+		if (!in->valid && in->variant_type !=
+		    (oh ? oh->type : tags.extra_obj_type))
+			yaffs_trace(YAFFS_TRACE_ERROR,
+				"yaffs tragedy: Bad object type, %d != %d, for object %d at chunk %d during scan",
+				oh ? oh->type : tags.extra_obj_type,
+				in->variant_type, tags.obj_id,
+				chunk);
+
+		if (!in->valid &&
+		    (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+		     tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
+			/* We only load some info, don't fiddle
+			 * with directory structure */
+			in->valid = 1;
+
+			if (oh) {
+				in->yst_mode = oh->yst_mode;
+				yaffs_load_attribs(in, oh);
+				in->lazy_loaded = 0;
+			} else {
+				in->lazy_loaded = 1;
+			}
+			in->hdr_chunk = chunk;
+
+		} else if (!in->valid) {
+			/* we need to load this info */
+			in->valid = 1;
+			in->hdr_chunk = chunk;
+			if (oh) {
+				in->variant_type = oh->type;
+				in->yst_mode = oh->yst_mode;
+				yaffs_load_attribs(in, oh);
+
+				if (oh->shadows_obj > 0)
+					yaffs_handle_shadowed_obj(dev,
+					     oh->shadows_obj, 1);
+
+				yaffs_set_obj_name_from_oh(in, oh);
+				parent = yaffs_find_or_create_by_number(dev,
+						oh->parent_obj_id,
+						YAFFS_OBJECT_TYPE_DIRECTORY);
+				file_size = yaffs_oh_to_size(oh);
+				is_shrink = oh->is_shrink;
+				equiv_id = oh->equiv_id;
+			} else {
+				in->variant_type = tags.extra_obj_type;
+				parent = yaffs_find_or_create_by_number(dev,
+						tags.extra_parent_id,
+						YAFFS_OBJECT_TYPE_DIRECTORY);
+				file_size = tags.extra_file_size;
+				is_shrink = tags.extra_is_shrink;
+				equiv_id = tags.extra_equiv_id;
+				in->lazy_loaded = 1;
+			}
+			in->dirty = 0;
+
+			if (!parent)
+				alloc_failed = 1;
+
+			/* directory stuff...
+			 * hook up to parent
+			 */
+
+			if (parent &&
+			    parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) {
+				/* Set up as a directory */
+				parent->variant_type =
+					YAFFS_OBJECT_TYPE_DIRECTORY;
+				INIT_LIST_HEAD(&parent->
+						variant.dir_variant.children);
+			} else if (!parent ||
+				   parent->variant_type !=
+					YAFFS_OBJECT_TYPE_DIRECTORY) {
+				/* Hoosterman, another problem....
+				 * Trying to use a non-directory as a directory
+				 */
+
+				yaffs_trace(YAFFS_TRACE_ERROR,
+					"yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+					);
+				parent = dev->lost_n_found;
+			}
+			yaffs_add_obj_to_dir(parent, in);
+
+			is_unlinked = (parent == dev->del_dir) ||
+					(parent == dev->unlinked_dir);
+
+			if (is_shrink)
+				/* Mark the block */
+				bi->has_shrink_hdr = 1;
+
+			/* Note re hardlinks.
+			 * Since we might scan a hardlink before its equivalent
+			 * object is scanned we put them all in a list.
+			 * After scanning is complete, we should have all the
+			 * objects, so we run through this list and fix up all
+			 * the chains.
+			 */
+
+			switch (in->variant_type) {
+			case YAFFS_OBJECT_TYPE_UNKNOWN:
+				/* Todo got a problem */
+				break;
+			case YAFFS_OBJECT_TYPE_FILE:
+				file_var = &in->variant.file_variant;
+				if (file_var->scanned_size < file_size) {
+					/* This covers the case where the file
+					 * size is greater than the data held.
+					 * This will happen if the file is
+					 * resized to be larger than its
+					 * current data extents.
+					 */
+					file_var->file_size = file_size;
+					file_var->scanned_size = file_size;
+				}
+
+				if (file_var->shrink_size > file_size)
+					file_var->shrink_size = file_size;
+
+				break;
+			case YAFFS_OBJECT_TYPE_HARDLINK:
+				hl_var = &in->variant.hardlink_variant;
+				if (!is_unlinked) {
+					hl_var->equiv_id = equiv_id;
+					list_add(&in->hard_links, hard_list);
+				}
+				break;
+			case YAFFS_OBJECT_TYPE_DIRECTORY:
+				/* Do nothing */
+				break;
+			case YAFFS_OBJECT_TYPE_SPECIAL:
+				/* Do nothing */
+				break;
+			case YAFFS_OBJECT_TYPE_SYMLINK:
+				sl_var = &in->variant.symlink_variant;
+				if (oh) {
+					sl_var->alias =
+					    yaffs_clone_str(oh->alias);
+					if (!sl_var->alias)
+						alloc_failed = 1;
+				}
+				break;
+			}
+		}
+	}
+	return alloc_failed ? YAFFS_FAIL : YAFFS_OK;
+}
+
+int yaffs2_scan_backwards(struct yaffs_dev *dev)
+{
+	int blk;
+	int block_iter;
+	int start_iter;
+	int end_iter;
+	int n_to_scan = 0;
+	enum yaffs_block_state state;
+	int c;
+	int deleted;
+	LIST_HEAD(hard_list);
+	struct yaffs_block_info *bi;
+	u32 seq_number;
+	int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+	u8 *chunk_data;
+	int found_chunks;
+	int alloc_failed = 0;
+	struct yaffs_block_index *block_index = NULL;
+	int alt_block_index = 0;
+	int summary_available;
+
+	yaffs_trace(YAFFS_TRACE_SCAN,
+		"yaffs2_scan_backwards starts  intstartblk %d intendblk %d...",
+		dev->internal_start_block, dev->internal_end_block);
+
+	dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+	block_index =
+		kmalloc(n_blocks * sizeof(struct yaffs_block_index), GFP_NOFS);
+
+	if (!block_index) {
+		block_index =
+		    vmalloc(n_blocks * sizeof(struct yaffs_block_index));
+		alt_block_index = 1;
+	}
+
+	if (!block_index) {
+		yaffs_trace(YAFFS_TRACE_SCAN,
+			"yaffs2_scan_backwards() could not allocate block index!"
+			);
+		return YAFFS_FAIL;
+	}
+
+	dev->blocks_in_checkpt = 0;
+
+	chunk_data = yaffs_get_temp_buffer(dev);
+
+	/* Scan all the blocks to determine their state */
+	bi = dev->block_info;
+	for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+	     blk++) {
+		yaffs_clear_chunk_bits(dev, blk);
+		bi->pages_in_use = 0;
+		bi->soft_del_pages = 0;
+
+		yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+		bi->block_state = state;
+		bi->seq_number = seq_number;
+
+		if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+			bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+		if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+			bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+
+		yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+			"Block scanning block %d state %d seq %d",
+			blk, bi->block_state, seq_number);
+
+		if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+			dev->blocks_in_checkpt++;
+
+		} else if (bi->block_state == YAFFS_BLOCK_STATE_DEAD) {
+			yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+				"block %d is bad", blk);
+		} else if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+			yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+			dev->n_erased_blocks++;
+			dev->n_free_chunks += dev->param.chunks_per_block;
+		} else if (bi->block_state ==
+				YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+			/* Determine the highest sequence number */
+			if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+			    seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+				block_index[n_to_scan].seq = seq_number;
+				block_index[n_to_scan].block = blk;
+				n_to_scan++;
+				if (seq_number >= dev->seq_number)
+					dev->seq_number = seq_number;
+			} else {
+				/* TODO: Nasty sequence number! */
+				yaffs_trace(YAFFS_TRACE_SCAN,
+					"Block scanning block %d has bad sequence number %d",
+					blk, seq_number);
+			}
+		}
+		bi++;
+	}
+
+	yaffs_trace(YAFFS_TRACE_SCAN, "%d blocks to be sorted...", n_to_scan);
+
+	cond_resched();
+
+	/* Sort the blocks by sequence number */
+	sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
+		   yaffs2_ybicmp, NULL);
+
+	cond_resched();
+
+	yaffs_trace(YAFFS_TRACE_SCAN, "...done");
+
+	/* Now scan the blocks looking at the data. */
+	start_iter = 0;
+	end_iter = n_to_scan - 1;
+	yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
+
+	/* For each block.... backwards */
+	for (block_iter = end_iter;
+	     !alloc_failed && block_iter >= start_iter;
+	     block_iter--) {
+		/* Cooperative multitasking! This loop can run for so
+		   long that watchdog timers expire. */
+		cond_resched();
+
+		/* get the block to scan in the correct order */
+		blk = block_index[block_iter].block;
+		bi = yaffs_get_block_info(dev, blk);
+		deleted = 0;
+
+		summary_available = yaffs_summary_read(dev, dev->sum_tags, blk);
+
+		/* For each chunk in each block that needs scanning.... */
+		found_chunks = 0;
+		if (summary_available)
+			c = dev->chunks_per_summary - 1;
+		else
+			c = dev->param.chunks_per_block - 1;
+
+		for (/* c is already initialised */;
+		     !alloc_failed && c >= 0 &&
+		     (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+		      bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING);
+		      c--) {
+			/* Scan backwards...
+			 * Read the tags and decide what to do
+			 */
+			if (yaffs2_scan_chunk(dev, bi, blk, c,
+					&found_chunks, chunk_data,
+					&hard_list, summary_available) ==
+					YAFFS_FAIL)
+				alloc_failed = 1;
+		}
+
+		if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+			/* If we got this far while scanning, then the block
+			 * is fully allocated. */
+			bi->block_state = YAFFS_BLOCK_STATE_FULL;
+		}
+
+		/* Now let's see if it was dirty */
+		if (bi->pages_in_use == 0 &&
+		    !bi->has_shrink_hdr &&
+		    bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+			yaffs_block_became_dirty(dev, blk);
+		}
+	}
+
+	yaffs_skip_rest_of_block(dev);
+
+	if (alt_block_index)
+		vfree(block_index);
+	else
+		kfree(block_index);
+
+	/* Ok, we've done all the scanning.
+	 * Fix up the hard link chains.
+	 * We have scanned all the objects, now it's time to add these
+	 * hardlinks.
+	 */
+	yaffs_link_fixup(dev, &hard_list);
+
+	yaffs_release_temp_buffer(dev, chunk_data);
+
+	if (alloc_failed)
+		return YAFFS_FAIL;
+
+	yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
+
+	return YAFFS_OK;
+}
diff --git a/fs/yaffs2/yaffs_yaffs2.h b/fs/yaffs2/yaffs_yaffs2.h
new file mode 100644
index 0000000..2363bfd
--- /dev/null
+++ b/fs/yaffs2/yaffs_yaffs2.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS2_H__
+#define __YAFFS_YAFFS2_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+				   struct yaffs_block_info *bi);
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+				    struct yaffs_block_info *bi);
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
+int yaffs2_checkpt_required(struct yaffs_dev *dev);
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
+int yaffs2_checkpt_save(struct yaffs_dev *dev);
+int yaffs2_checkpt_restore(struct yaffs_dev *dev);
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
+int yaffs2_scan_backwards(struct yaffs_dev *dev);
+
+#endif
diff --git a/fs/yaffs2/yportenv.h b/fs/yaffs2/yportenv.h
new file mode 100644
index 0000000..8975af3
--- /dev/null
+++ b/fs/yaffs2/yportenv.h
@@ -0,0 +1,85 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ *   for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YPORTENV_H__
+#define __YPORTENV_H__
+
+/*
+ * Define the MTD version in terms of Linux Kernel versions
+ * This allows yaffs to be used independantly of the kernel
+ * as well as with it.
+ */
+
+#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+
+#ifdef YAFFS_OUT_OF_TREE
+#include "moduleconfig.h"
+#endif
+
+#include <linux/version.h>
+#define MTD_VERSION_CODE LINUX_VERSION_CODE
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/xattr.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sort.h>
+#include <linux/bitops.h>
+
+/*  These type wrappings are used to support Unicode names in WinCE. */
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x)     x
+
+#define YAFFS_LOSTNFOUND_NAME		"lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX		"obj"
+
+
+#define YAFFS_ROOT_MODE			0755
+#define YAFFS_LOSTNFOUND_MODE		0700
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
+#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
+#define Y_TIME_CONVERT(x) (x).tv_sec
+#else
+#define Y_CURRENT_TIME CURRENT_TIME
+#define Y_TIME_CONVERT(x) (x)
+#endif
+
+#define compile_time_assertion(assertion) \
+	({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
+
+
+#define yaffs_printf(msk, fmt, ...) \
+	printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__)
+
+#define yaffs_trace(msk, fmt, ...) do { \
+	if (yaffs_trace_mask & (msk)) \
+		printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
+} while (0)
+
+
+#endif
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 639d7a4..01613b3 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -46,5 +46,20 @@
 #define read_barrier_depends()		do {} while (0)
 #define smp_read_barrier_depends()	do {} while (0)
 
+#define smp_store_release(p, v)						\
+do {									\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	ACCESS_ONCE(*p) = (v);						\
+} while (0)
+
+#define smp_load_acquire(p)						\
+({									\
+	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	compiletime_assert_atomic_type(*p);				\
+	smp_mb();							\
+	___p1;								\
+})
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
index 2be8a2d..effb600 100644
--- a/include/asm-generic/dma-coherent.h
+++ b/include/asm-generic/dma-coherent.h
@@ -1,3 +1,5 @@
+#if !defined(__i386__) && !defined(__amd64__)
+
 #ifndef DMA_COHERENT_H
 #define DMA_COHERENT_H
 
@@ -33,3 +35,46 @@
 #endif
 
 #endif
+
+#else /* CMA is a crutch for brain-damaged hardware that causes untold pain */
+
+#ifndef DMA_COHERENT_H
+#define DMA_COHERENT_H
+
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+/*
+ * These three functions are only for dma allocator.
+ * Don't use them in device drivers.
+ */
+int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+				       dma_addr_t *dma_handle, void **ret);
+int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+			    void *cpu_addr, size_t size, int *ret);
+/*
+ * Standard interface
+ */
+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+extern int
+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+			    dma_addr_t device_addr, size_t size, int flags);
+
+extern void
+dma_release_declared_memory(struct device *dev);
+
+extern void *
+dma_mark_declared_memory_occupied(struct device *dev,
+				  dma_addr_t device_addr, size_t size);
+#else
+#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
+#endif
+
+#endif
+
+
+
+
+#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index bde6469..6f708bd 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -118,6 +118,9 @@
 	int			(*set_debounce)(struct gpio_chip *chip,
 						unsigned offset, unsigned debounce);
 
+	void			(*set_pinmux)(int gpio, int alt);
+	int			(*get_pinmux)(int gpio);
+
 	void			(*set)(struct gpio_chip *chip,
 						unsigned offset, int value);
 
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d17784e..344c195 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -87,6 +87,9 @@
 
 #endif	/* SMP */
 
+/* 3.18 backport */
+#define raw_cpu_ptr(ptr)	__this_cpu_ptr(ptr)
+
 #ifndef PER_CPU_BASE_SECTION
 #ifdef CONFIG_SMP
 #define PER_CPU_BASE_SECTION ".data..percpu"
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
new file mode 100644
index 0000000..663ac3d
--- /dev/null
+++ b/include/asm-generic/seccomp.h
@@ -0,0 +1,29 @@
+/*
+ * include/asm-generic/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi <at> linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_GENERIC_SECCOMP_H
+#define _ASM_GENERIC_SECCOMP_H
+
+#include <asm-generic/unistd.h>
+
+#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32)
+#define __NR_seccomp_read_32		__NR_read
+#define __NR_seccomp_write_32		__NR_write
+#define __NR_seccomp_exit_32		__NR_exit
+#define __NR_seccomp_sigreturn_32	__NR_rt_sigreturn
+#endif /* CONFIG_COMPAT && ! already defined */
+
+#define __NR_seccomp_read		__NR_read
+#define __NR_seccomp_write		__NR_write
+#define __NR_seccomp_exit		__NR_exit
+#define __NR_seccomp_sigreturn		__NR_rt_sigreturn
+
+#endif /* _ASM_GENERIC_SECCOMP_H */
+
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
new file mode 100644
index 0000000..f57eb7b
--- /dev/null
+++ b/include/asm-generic/simd.h
@@ -0,0 +1,14 @@
+
+#include <linux/hardirq.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ *                instructions or access the SIMD register file
+ *
+ * As architectures typically don't preserve the SIMD register file when
+ * taking an interrupt, !in_interrupt() should be a reasonable default.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+	return !in_interrupt();
+}
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 5b09392..d401e54 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -144,8 +144,6 @@
 
 /**
  * syscall_get_arch - return the AUDIT_ARCH for the current system call
- * @task:	task of interest, must be in system call entry tracing
- * @regs:	task_pt_regs() of @task
  *
  * Returns the AUDIT_ARCH_* based on the system call convention in use.
  *
@@ -155,5 +153,5 @@
  * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
  * provide an implementation of this.
  */
-int syscall_get_arch(struct task_struct *task, struct pt_regs *regs);
+int syscall_get_arch(void);
 #endif	/* _ASM_SYSCALL_H */
diff --git a/include/crypto/ablk_helper.h b/include/crypto/ablk_helper.h
new file mode 100644
index 0000000..4f93df5
--- /dev/null
+++ b/include/crypto/ablk_helper.h
@@ -0,0 +1,31 @@
+/*
+ * Shared async block cipher helpers
+ */
+
+#ifndef _CRYPTO_ABLK_HELPER_H
+#define _CRYPTO_ABLK_HELPER_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <crypto/cryptd.h>
+
+struct async_helper_ctx {
+	struct cryptd_ablkcipher *cryptd_tfm;
+};
+
+extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
+			unsigned int key_len);
+
+extern int __ablk_encrypt(struct ablkcipher_request *req);
+
+extern int ablk_encrypt(struct ablkcipher_request *req);
+
+extern int ablk_decrypt(struct ablkcipher_request *req);
+
+extern void ablk_exit(struct crypto_tfm *tfm);
+
+extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name);
+
+extern int ablk_init(struct crypto_tfm *tfm);
+
+#endif /* _CRYPTO_ABLK_HELPER_H */
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 418d270..063f8ef 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -100,9 +100,12 @@
 	void *page;
 	u8 *buffer;
 	u8 *iv;
+	unsigned int ivsize;
 
 	int flags;
-	unsigned int blocksize;
+	unsigned int walk_blocksize;
+	unsigned int cipher_blocksize;
+	unsigned int alignmask;
 };
 
 struct ablkcipher_walk {
@@ -192,6 +195,10 @@
 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
 			      struct blkcipher_walk *walk,
 			      unsigned int blocksize);
+int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
+				   struct blkcipher_walk *walk,
+				   struct crypto_aead *tfm,
+				   unsigned int blocksize);
 
 int ablkcipher_walk_done(struct ablkcipher_request *req,
 			 struct ablkcipher_walk *walk, int err);
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
new file mode 100644
index 0000000..a460889
--- /dev/null
+++ b/include/linux/Kbuild
@@ -0,0 +1,2 @@
+header-y += if_pppolac.h
+header-y += if_pppopns.h
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 9069694..a899402 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -44,10 +44,14 @@
 void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
 		enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
 int alarm_start(struct alarm *alarm, ktime_t start);
+int alarm_start_relative(struct alarm *alarm, ktime_t start);
+void alarm_restart(struct alarm *alarm);
 int alarm_try_to_cancel(struct alarm *alarm);
 int alarm_cancel(struct alarm *alarm);
 
 u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
+u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
+ktime_t alarm_expires_remaining(const struct alarm *alarm);
 
 /* Provide way to access the rtc device being used by alarmtimers */
 struct rtc_device *alarmtimer_get_rtcdev(void);
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 32a89cf..9d1d9ca 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -5,6 +5,15 @@
 #define AMBA_MMCI_H
 
 #include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
 
 
 /*
@@ -73,6 +82,9 @@
 	bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
 	void *dma_rx_param;
 	void *dma_tx_param;
+	unsigned int status_irq;
+	struct embedded_sdio_data *embedded_sdio;
+	int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
 };
 
 #endif
diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h
new file mode 100644
index 0000000..06264b8
--- /dev/null
+++ b/include/linux/android_aid.h
@@ -0,0 +1,28 @@
+/* include/linux/android_aid.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_AID_H
+#define _LINUX_ANDROID_AID_H
+
+/* AIDs that the kernel treats differently */
+#define AID_OBSOLETE_000 3001  /* was NET_BT_ADMIN */
+#define AID_OBSOLETE_001 3002  /* was NET_BT */
+#define AID_INET         3003
+#define AID_NET_RAW      3004
+#define AID_NET_ADMIN    3005
+#define AID_NET_BW_STATS 3006  /* read bandwidth statistics */
+#define AID_NET_BW_ACCT  3007  /* change bandwidth statistics accounting */
+
+#endif
diff --git a/include/linux/aufs_type.h b/include/linux/aufs_type.h
new file mode 100644
index 0000000..d5536d0
--- /dev/null
+++ b/include/linux/aufs_type.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2012-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <uapi/linux/aufs_type.h>
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index c1dde8e..28db15dd 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -21,6 +21,9 @@
  */
 #include <asm/bitops.h>
 
+/* 3.18 backport */
+#define smp_mb__after_atomic() smp_mb__after_clear_bit()
+
 #define for_each_set_bit(bit, addr, size) \
 	for ((bit) = find_first_bit((addr), (size));		\
 	     (bit) < (size);					\
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 9e52b0626..a3bd299 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -36,6 +36,7 @@
 	BH_Quiet,	/* Buffer Error Prinks to be quiet */
 	BH_Meta,	/* Buffer contains metadata */
 	BH_Prio,	/* Buffer should be submitted with REQ_PRIO */
+	BH_Defer_Completion, /* Defer AIO completion to workqueue */
 
 	BH_PrivateStart,/* not a state bit, but the first bit available
 			 * for private allocation by other entities
@@ -128,6 +129,7 @@
 BUFFER_FNS(Unwritten, unwritten)
 BUFFER_FNS(Meta, meta)
 BUFFER_FNS(Prio, prio)
+BUFFER_FNS(Defer_Completion, defer_completion)
 
 #define bh_offset(bh)		((unsigned long)(bh)->b_data & ~PAGE_MASK)
 
@@ -198,7 +200,8 @@
  * Generic address_space_operations implementations for buffer_head-backed
  * address_spaces.
  */
-void block_invalidatepage(struct page *page, unsigned long offset);
+void block_invalidatepage(struct page *page, unsigned int offset,
+			  unsigned int length);
 int block_write_full_page(struct page *page, get_block_t *get_block,
 				struct writeback_control *wbc);
 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
@@ -294,6 +297,18 @@
 	return __bread(sb->s_bdev, block, sb->s_blocksize);
 }
 
+static inline struct buffer_head *
+sb_bread_unmovable(struct super_block *sb, sector_t block)
+{
+	return sb_bread(sb, block);
+}
+
+static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
+					sector_t block, unsigned size)
+{
+	return __getblk(bdev, block, size);
+}
+
 static inline void
 sb_breadahead(struct super_block *sb, sector_t block)
 {
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 8852d37..a6fc777 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -578,6 +578,7 @@
 	void (*css_offline)(struct cgroup *cgrp);
 	void (*css_free)(struct cgroup *cgrp);
 
+	int (*allow_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
 	int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
 	void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
 	void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
@@ -868,6 +869,17 @@
 unsigned short css_depth(struct cgroup_subsys_state *css);
 struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
 
+/*
+ * Default Android check for whether the current process is allowed to move a
+ * task across cgroups, either because CAP_SYS_NICE is set or because the uid
+ * of the calling process is the same as the moved task or because we are
+ * running as root.
+ * Returns 0 if this is allowed, or -EACCES otherwise.
+ */
+int subsys_cgroup_allow_attach(struct cgroup *cgrp,
+			       struct cgroup_taskset *tset);
+
+
 #else /* !CONFIG_CGROUPS */
 
 static inline int cgroup_init_early(void) { return 0; }
@@ -891,6 +903,11 @@
 	return 0;
 }
 
+static inline int subsys_cgroup_allow_attach(struct cgroup *cgrp,
+					     struct cgroup_taskset *tset)
+{
+	return 0;
+}
 #endif /* !CONFIG_CGROUPS */
 
 #endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a2ce6f8..6977192 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -302,6 +302,11 @@
 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
 #endif
 
+/* Is this type a native word size -- useful for atomic operations */
+#ifndef __native_word
+# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+#endif
+
 /* Compile time object size, -1 for unknown */
 #ifndef __compiletime_object_size
 # define __compiletime_object_size(obj) -1
@@ -341,6 +346,10 @@
 #define compiletime_assert(condition, msg) \
 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
 
+#define compiletime_assert_atomic_type(t)				\
+	compiletime_assert(__native_word(t),				\
+		"Need native word sized stores/loads for atomicity.")
+
 /*
  * Prevent the compiler from merging or refetching accesses.  The compiler
  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 9f3c7e8..39d4f9d 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -232,4 +232,11 @@
 void arch_cpu_idle_exit(void);
 void arch_cpu_idle_dead(void);
 
+#define IDLE_START 1
+#define IDLE_END 2
+
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+void idle_notifier_call_chain(unsigned long val);
+
 #endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
new file mode 100644
index 0000000..c4d4eb8
--- /dev/null
+++ b/include/linux/cpufeature.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_CPUFEATURE_H
+#define __LINUX_CPUFEATURE_H
+
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+
+#include <linux/mod_devicetable.h>
+#include <asm/cpufeature.h>
+
+/*
+ * Macros imported from <asm/cpufeature.h>:
+ * - cpu_feature(x)		ordinal value of feature called 'x'
+ * - cpu_have_feature(u32 n)	whether feature #n is available
+ * - MAX_CPU_FEATURES		upper bound for feature ordinal values
+ * Optional:
+ * - CPU_FEATURE_TYPEFMT	format string fragment for printing the cpu type
+ * - CPU_FEATURE_TYPEVAL	set of values matching the format string above
+ */
+
+#ifndef CPU_FEATURE_TYPEFMT
+#define CPU_FEATURE_TYPEFMT	"%s"
+#endif
+
+#ifndef CPU_FEATURE_TYPEVAL
+#define CPU_FEATURE_TYPEVAL	ELF_PLATFORM
+#endif
+
+/*
+ * Use module_cpu_feature_match(feature, module_init_function) to
+ * declare that
+ * a) the module shall be probed upon discovery of CPU feature 'feature'
+ *    (typically at boot time using udev)
+ * b) the module must not be loaded if CPU feature 'feature' is not present
+ *    (not even by manual insmod).
+ *
+ * For a list of legal values for 'feature', please consult the file
+ * 'asm/cpufeature.h' of your favorite architecture.
+ */
+#define module_cpu_feature_match(x, __init)			\
+static struct cpu_feature const cpu_feature_match_ ## x[] =	\
+	{ { .feature = cpu_feature(x) }, { } };			\
+MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x);		\
+								\
+static int cpu_feature_match_ ## x ## _init(void)		\
+{								\
+	if (!cpu_have_feature(cpu_feature(x)))			\
+		return -ENODEV;					\
+	return __init();					\
+}								\
+module_init(cpu_feature_match_ ## x ## _init)
+
+#endif
+#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 1a81b74..d23ed25 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -21,6 +21,7 @@
 #include <linux/workqueue.h>
 #include <linux/cpumask.h>
 #include <asm/div64.h>
+#include <asm/cputime.h>
 
 #define CPUFREQ_NAME_LEN 16
 /* Print length for names. Extra 1 space for accomodating '\n' in prints */
@@ -341,6 +342,7 @@
 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
 int cpufreq_update_policy(unsigned int cpu);
 bool have_governor_per_policy(void);
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
 
 #ifdef CONFIG_CPU_FREQ
 /* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */
@@ -394,6 +396,9 @@
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
 extern struct cpufreq_governor cpufreq_gov_conservative;
 #define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_interactive)
 #endif
 
 
@@ -433,4 +438,11 @@
 void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
 
 void cpufreq_frequency_table_put_attr(unsigned int cpu);
+
+/*********************************************************************
+ *                         CPUFREQ STATS                             *
+ *********************************************************************/
+
+void acct_update_power(struct task_struct *p, cputime_t cputime);
+
 #endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 21ca773..822c135 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -51,7 +51,7 @@
 extern void debug_show_all_locks(void);
 extern void debug_show_held_locks(struct task_struct *task);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void debug_check_no_locks_held(struct task_struct *task);
+extern void debug_check_no_locks_held(void);
 #else
 static inline void debug_show_all_locks(void)
 {
@@ -67,7 +67,7 @@
 }
 
 static inline void
-debug_check_no_locks_held(struct task_struct *task)
+debug_check_no_locks_held(void)
 {
 }
 #endif
diff --git a/include/linux/err.h b/include/linux/err.h
index f2edce2..9b8c28d 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -60,6 +60,9 @@
 		return 0;
 }
 
+/* for backport */
+#define PTR_ERR_OR_ZERO(p) PTR_RET(p)
+
 #endif
 
 #endif /* _LINUX_ERR_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index e70df40..7fd81b8 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -3,6 +3,7 @@
 #ifndef FREEZER_H_INCLUDED
 #define FREEZER_H_INCLUDED
 
+#include <linux/debug_locks.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/atomic.h>
@@ -46,7 +47,11 @@
 extern void thaw_processes(void);
 extern void thaw_kernel_threads(void);
 
-static inline bool try_to_freeze(void)
+/*
+ * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
+ * If try_to_freeze causes a lockdep warning it means the caller may deadlock
+ */
+static inline bool try_to_freeze_unsafe(void)
 {
 	might_sleep();
 	if (likely(!freezing(current)))
@@ -54,6 +59,13 @@
 	return __refrigerator(false);
 }
 
+static inline bool try_to_freeze(void)
+{
+	if (!(current->flags & PF_NOFREEZE))
+		debug_check_no_locks_held();
+	return try_to_freeze_unsafe();
+}
+
 extern bool freeze_task(struct task_struct *p);
 extern bool set_freezable(void);
 
@@ -115,6 +127,14 @@
 	try_to_freeze();
 }
 
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezer_count_unsafe(void)
+{
+	current->flags &= ~PF_FREEZER_SKIP;
+	smp_mb();
+	try_to_freeze_unsafe();
+}
+
 /**
  * freezer_should_skip - whether to skip a task when determining frozen
  *			 state is reached
@@ -139,28 +159,86 @@
 }
 
 /*
- * These macros are intended to be used whenever you want allow a sleeping
+ * These functions are intended to be used whenever you want allow a sleeping
  * task to be frozen. Note that neither return any clear indication of
  * whether a freeze event happened while in this function.
  */
 
 /* Like schedule(), but should not block the freezer. */
-#define freezable_schedule()						\
-({									\
-	freezer_do_not_count();						\
-	schedule();							\
-	freezer_count();						\
-})
+static inline void freezable_schedule(void)
+{
+	freezer_do_not_count();
+	schedule();
+	freezer_count();
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezable_schedule_unsafe(void)
+{
+	freezer_do_not_count();
+	schedule();
+	freezer_count_unsafe();
+}
+
+/*
+ * Like freezable_schedule_timeout(), but should not block the freezer.  Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout(long timeout)
+{
+	long __retval;
+	freezer_do_not_count();
+	__retval = schedule_timeout(timeout);
+	freezer_count();
+	return __retval;
+}
+
+/*
+ * Like schedule_timeout_interruptible(), but should not block the freezer.  Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout_interruptible(long timeout)
+{
+	long __retval;
+	freezer_do_not_count();
+	__retval = schedule_timeout_interruptible(timeout);
+	freezer_count();
+	return __retval;
+}
 
 /* Like schedule_timeout_killable(), but should not block the freezer. */
-#define freezable_schedule_timeout_killable(timeout)			\
-({									\
-	long __retval;							\
-	freezer_do_not_count();						\
-	__retval = schedule_timeout_killable(timeout);			\
-	freezer_count();						\
-	__retval;							\
-})
+static inline long freezable_schedule_timeout_killable(long timeout)
+{
+	long __retval;
+	freezer_do_not_count();
+	__retval = schedule_timeout_killable(timeout);
+	freezer_count();
+	return __retval;
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
+{
+	long __retval;
+	freezer_do_not_count();
+	__retval = schedule_timeout_killable(timeout);
+	freezer_count_unsafe();
+	return __retval;
+}
+
+/*
+ * Like schedule_hrtimeout_range(), but should not block the freezer.  Do not
+ * call this with locks held.
+ */
+static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
+		unsigned long delta, const enum hrtimer_mode mode)
+{
+	int __retval;
+	freezer_do_not_count();
+	__retval = schedule_hrtimeout_range(expires, delta, mode);
+	freezer_count();
+	return __retval;
+}
 
 /*
  * Freezer-friendly wrappers around wait_event_interruptible(),
@@ -177,33 +255,45 @@
 	__retval;							\
 })
 
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+#define wait_event_freezekillable_unsafe(wq, condition)			\
+({									\
+	int __retval;							\
+	freezer_do_not_count();						\
+	__retval = wait_event_killable(wq, (condition));		\
+	freezer_count_unsafe();						\
+	__retval;							\
+})
+
 #define wait_event_freezable(wq, condition)				\
 ({									\
 	int __retval;							\
-	for (;;) {							\
-		__retval = wait_event_interruptible(wq, 		\
-				(condition) || freezing(current));	\
-		if (__retval || (condition))				\
-			break;						\
-		try_to_freeze();					\
-	}								\
+	freezer_do_not_count();						\
+	__retval = wait_event_interruptible(wq, (condition));		\
+	freezer_count();						\
 	__retval;							\
 })
 
 #define wait_event_freezable_timeout(wq, condition, timeout)		\
 ({									\
 	long __retval = timeout;					\
-	for (;;) {							\
-		__retval = wait_event_interruptible_timeout(wq,		\
-				(condition) || freezing(current),	\
-				__retval); 				\
-		if (__retval <= 0 || (condition))			\
-			break;						\
-		try_to_freeze();					\
-	}								\
+	freezer_do_not_count();						\
+	__retval = wait_event_interruptible_timeout(wq,	(condition),	\
+				__retval);				\
+	freezer_count();						\
 	__retval;							\
 })
 
+#define wait_event_freezable_exclusive(wq, condition)			\
+({									\
+	int __retval;							\
+	freezer_do_not_count();						\
+	__retval = wait_event_interruptible_exclusive(wq, condition);	\
+	freezer_count();						\
+	__retval;							\
+})
+
+
 #else /* !CONFIG_FREEZER */
 static inline bool frozen(struct task_struct *p) { return false; }
 static inline bool freezing(struct task_struct *p) { return false; }
@@ -225,18 +315,37 @@
 
 #define freezable_schedule()  schedule()
 
+#define freezable_schedule_unsafe()  schedule()
+
+#define freezable_schedule_timeout(timeout)  schedule_timeout(timeout)
+
+#define freezable_schedule_timeout_interruptible(timeout)		\
+	schedule_timeout_interruptible(timeout)
+
 #define freezable_schedule_timeout_killable(timeout)			\
 	schedule_timeout_killable(timeout)
 
+#define freezable_schedule_timeout_killable_unsafe(timeout)		\
+	schedule_timeout_killable(timeout)
+
+#define freezable_schedule_hrtimeout_range(expires, delta, mode)	\
+	schedule_hrtimeout_range(expires, delta, mode)
+
 #define wait_event_freezable(wq, condition)				\
 		wait_event_interruptible(wq, condition)
 
 #define wait_event_freezable_timeout(wq, condition, timeout)		\
 		wait_event_interruptible_timeout(wq, condition, timeout)
 
+#define wait_event_freezable_exclusive(wq, condition)			\
+		wait_event_interruptible_exclusive(wq, condition)
+
 #define wait_event_freezekillable(wq, condition)		\
 		wait_event_killable(wq, condition)
 
+#define wait_event_freezekillable_unsafe(wq, condition)			\
+		wait_event_killable(wq, condition)
+
 #endif /* !CONFIG_FREEZER */
 
 #endif	/* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5c9dc84..0431937 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -46,6 +46,7 @@
 struct cred;
 struct swap_info_struct;
 struct seq_file;
+struct workqueue_struct;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
@@ -63,8 +64,7 @@
 typedef int (get_block_t)(struct inode *inode, sector_t iblock,
 			struct buffer_head *bh_result, int create);
 typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
-			ssize_t bytes, void *private, int ret,
-			bool is_async);
+			ssize_t bytes, void *private);
 
 #define MAY_EXEC		0x00000001
 #define MAY_WRITE		0x00000002
@@ -365,7 +365,7 @@
 
 	/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
 	sector_t (*bmap)(struct address_space *, sector_t);
-	void (*invalidatepage) (struct page *, unsigned long);
+	void (*invalidatepage) (struct page *, unsigned int, unsigned int);
 	int (*releasepage) (struct page *, gfp_t);
 	void (*freepage)(struct page *);
 	ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
@@ -632,9 +632,14 @@
 	I_MUTEX_PARENT,
 	I_MUTEX_CHILD,
 	I_MUTEX_XATTR,
-	I_MUTEX_QUOTA
+	I_MUTEX_QUOTA,
+	I_MUTEX_NONDIR2,
+	I_MUTEX_PARENT2,
 };
 
+void lock_two_nondirectories(struct inode *, struct inode*);
+void unlock_two_nondirectories(struct inode *, struct inode*);
+
 /*
  * NOTE: in a 32bit arch with a preemptable kernel and
  * an UP compile the i_size_read/write must be atomic
@@ -1311,6 +1316,9 @@
 
 	/* Being remounted read-only */
 	int s_readonly_remount;
+
+	/* AIO completions deferred from interrupt context */
+	struct workqueue_struct *s_dio_done_wq;
 };
 
 /* superblock cache pruning functions */
@@ -1495,6 +1503,17 @@
  * to have different dirent layouts depending on the binary type.
  */
 typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
+struct dir_context {
+	const filldir_t actor;
+	loff_t pos;
+};
+
+static inline bool dir_emit(struct dir_context *ctx,
+			    const char *name, int namelen,
+			    u64 ino, unsigned type)
+{
+	return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
+}
 struct block_device_operations;
 
 /* These macros are for out of kernel modules to test that
@@ -1511,6 +1530,7 @@
 	ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
 	ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
 	int (*readdir) (struct file *, void *, filldir_t);
+	int (*iterate) (struct file *, struct dir_context *);
 	unsigned int (*poll) (struct file *, struct poll_table_struct *);
 	long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
 	long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
@@ -2460,6 +2480,9 @@
 void inode_dio_wait(struct inode *inode);
 void inode_dio_done(struct inode *inode);
 
+extern void inode_set_flags(struct inode *inode, unsigned int flags,
+			    unsigned int mask);
+
 extern const struct file_operations generic_ro_fops;
 
 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
@@ -2484,6 +2507,7 @@
 void inode_set_bytes(struct inode *inode, loff_t bytes);
 
 extern int vfs_readdir(struct file *, filldir_t, void *);
+extern int iterate_dir(struct file *, struct dir_context *);
 
 extern int vfs_stat(const char __user *, struct kstat *);
 extern int vfs_lstat(const char __user *, struct kstat *);
@@ -2564,6 +2588,7 @@
 extern int inode_newsize_ok(const struct inode *, loff_t offset);
 extern void setattr_copy(struct inode *inode, const struct iattr *attr);
 
+extern int update_time(struct inode *, struct timespec *, int);
 extern int file_update_time(struct file *file);
 
 extern int generic_show_options(struct seq_file *m, struct dentry *root);
@@ -2678,4 +2703,11 @@
 		inode->i_flags |= S_NOSEC;
 }
 
+static inline bool dir_relax(struct inode *inode)
+{
+	mutex_unlock(&inode->i_mutex);
+	mutex_lock(&inode->i_mutex);
+	return !IS_DEADDIR(inode);
+}
+
 #endif /* _LINUX_FS_H */
diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h
new file mode 100644
index 0000000..2613fc5
--- /dev/null
+++ b/include/linux/gpio_event.h
@@ -0,0 +1,170 @@
+/* include/linux/gpio_event.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_EVENT_H
+#define _LINUX_GPIO_EVENT_H
+
+#include <linux/input.h>
+
+struct gpio_event_input_devs {
+	int count;
+	struct input_dev *dev[];
+};
+enum {
+	GPIO_EVENT_FUNC_UNINIT  = 0x0,
+	GPIO_EVENT_FUNC_INIT    = 0x1,
+	GPIO_EVENT_FUNC_SUSPEND = 0x2,
+	GPIO_EVENT_FUNC_RESUME  = 0x3,
+};
+struct gpio_event_info {
+	int (*func)(struct gpio_event_input_devs *input_devs,
+		    struct gpio_event_info *info,
+		    void **data, int func);
+	int (*event)(struct gpio_event_input_devs *input_devs,
+		     struct gpio_event_info *info,
+		     void **data, unsigned int dev, unsigned int type,
+		     unsigned int code, int value); /* out events */
+	bool no_suspend;
+};
+
+struct gpio_event_platform_data {
+	const char *name;
+	struct gpio_event_info **info;
+	size_t info_count;
+	int (*power)(const struct gpio_event_platform_data *pdata, bool on);
+	const char *names[]; /* If name is NULL, names contain a NULL */
+			     /* terminated list of input devices to create */
+};
+
+#define GPIO_EVENT_DEV_NAME "gpio-event"
+
+/* Key matrix */
+
+enum gpio_event_matrix_flags {
+	/* unset: drive active output low, set: drive active output high */
+	GPIOKPF_ACTIVE_HIGH              = 1U << 0,
+	GPIOKPF_DEBOUNCE                 = 1U << 1,
+	GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2,
+	GPIOKPF_REMOVE_PHANTOM_KEYS      = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS |
+					   GPIOKPF_DEBOUNCE,
+	GPIOKPF_DRIVE_INACTIVE           = 1U << 3,
+	GPIOKPF_LEVEL_TRIGGERED_IRQ      = 1U << 4,
+	GPIOKPF_PRINT_UNMAPPED_KEYS      = 1U << 16,
+	GPIOKPF_PRINT_MAPPED_KEYS        = 1U << 17,
+	GPIOKPF_PRINT_PHANTOM_KEYS       = 1U << 18,
+};
+
+#define MATRIX_CODE_BITS (10)
+#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1)
+#define MATRIX_KEY(dev, code) \
+	(((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK))
+
+extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_matrix_info {
+	/* initialize to gpio_event_matrix_func */
+	struct gpio_event_info info;
+	/* size must be ninputs * noutputs */
+	const unsigned short *keymap;
+	unsigned int *input_gpios;
+	unsigned int *output_gpios;
+	unsigned int ninputs;
+	unsigned int noutputs;
+	/* time to wait before reading inputs after driving each output */
+	ktime_t settle_time;
+	/* time to wait before scanning the keypad a second time */
+	ktime_t debounce_delay;
+	ktime_t poll_time;
+	unsigned flags;
+};
+
+/* Directly connected inputs and outputs */
+
+enum gpio_event_direct_flags {
+	GPIOEDF_ACTIVE_HIGH         = 1U << 0,
+/*	GPIOEDF_USE_DOWN_IRQ        = 1U << 1, */
+/*	GPIOEDF_USE_IRQ             = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
+	GPIOEDF_PRINT_KEYS          = 1U << 8,
+	GPIOEDF_PRINT_KEY_DEBOUNCE  = 1U << 9,
+	GPIOEDF_PRINT_KEY_UNSTABLE  = 1U << 10,
+};
+
+struct gpio_event_direct_entry {
+	uint32_t gpio:16;
+	uint32_t code:10;
+	uint32_t dev:6;
+};
+
+/* inputs */
+extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_input_info {
+	/* initialize to gpio_event_input_func */
+	struct gpio_event_info info;
+	ktime_t debounce_time;
+	ktime_t poll_time;
+	uint16_t flags;
+	uint16_t type;
+	const struct gpio_event_direct_entry *keymap;
+	size_t keymap_size;
+};
+
+/* outputs */
+extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data,
+			unsigned int dev, unsigned int type,
+			unsigned int code, int value);
+struct gpio_event_output_info {
+	/* initialize to gpio_event_output_func and gpio_event_output_event */
+	struct gpio_event_info info;
+	uint16_t flags;
+	uint16_t type;
+	const struct gpio_event_direct_entry *keymap;
+	size_t keymap_size;
+};
+
+
+/* axes */
+
+enum gpio_event_axis_flags {
+	GPIOEAF_PRINT_UNKNOWN_DIRECTION  = 1U << 16,
+	GPIOEAF_PRINT_RAW                = 1U << 17,
+	GPIOEAF_PRINT_EVENT              = 1U << 18,
+};
+
+extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_axis_info {
+	/* initialize to gpio_event_axis_func */
+	struct gpio_event_info info;
+	uint8_t  count; /* number of gpios for this axis */
+	uint8_t  dev; /* device index when using multiple input devices */
+	uint8_t  type; /* EV_REL or EV_ABS */
+	uint16_t code;
+	uint16_t decoded_size;
+	uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in);
+	uint32_t *gpio;
+	uint32_t flags;
+};
+#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map
+#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map
+uint16_t gpio_axis_4bit_gray_map(
+			struct gpio_event_axis_info *info, uint16_t in);
+uint16_t gpio_axis_5bit_singletrack_map(
+			struct gpio_event_axis_info *info, uint16_t in);
+
+#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 4f8aa47..a969efb 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -284,6 +284,8 @@
 #define HID_QUIRK_HIDINPUT_FORCE		0x00000080
 #define HID_QUIRK_NO_EMPTY_INPUT		0x00000100
 #define HID_QUIRK_SKIP_OUTPUT_REPORTS		0x00010000
+ #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID		0x00020000
+#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP	0x00040000
 #define HID_QUIRK_FULLSPEED_INTERVAL		0x10000000
 #define HID_QUIRK_NO_INIT_REPORTS		0x20000000
 #define HID_QUIRK_NO_IGNORE			0x40000000
@@ -458,6 +460,7 @@
 	enum hid_type type;						/* device type (mouse, kbd, ...) */
 	unsigned country;						/* HID country */
 	struct hid_report_enum report_enum[HID_REPORT_TYPES];
+	struct work_struct led_work;					/* delayed LED worker */
 
 	struct semaphore driver_lock;					/* protects the current driver, except during input */
 	struct semaphore driver_input_lock;				/* protects the current driver */
@@ -505,12 +508,6 @@
 				  struct hid_usage *, __s32);
 	void (*hiddev_report_event) (struct hid_device *, struct hid_report *);
 
-	/* handler for raw input (Get_Report) data, used by hidraw */
-	int (*hid_get_raw_report) (struct hid_device *, unsigned char, __u8 *, size_t, unsigned char);
-
-	/* handler for raw output data, used by hidraw */
-	int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t, unsigned char);
-
 	/* debugging support via debugfs */
 	unsigned short debug;
 	struct dentry *debug_dir;
@@ -649,8 +646,8 @@
 	int (*input_mapped)(struct hid_device *hdev,
 			struct hid_input *hidinput, struct hid_field *field,
 			struct hid_usage *usage, unsigned long **bit, int *max);
-	void (*input_configured)(struct hid_device *hdev,
-				 struct hid_input *hidinput);
+	int (*input_configured)(struct hid_device *hdev,
+				struct hid_input *hidinput);
 	void (*feature_mapping)(struct hid_device *hdev,
 			struct hid_field *field,
 			struct hid_usage *usage);
@@ -669,10 +666,11 @@
  * @stop: called on remove
  * @open: called by input layer on open
  * @close: called by input layer on close
- * @hidinput_input_event: event input event (e.g. ff or leds)
  * @parse: this method is called only once to parse the device data,
  *	   shouldn't allocate anything to not leak memory
  * @request: send report request to device (e.g. feature report)
+ * @raw_request: send raw report request to device (e.g. feature report)
+ * @output_report: send output report to device
  * @wait: wait for buffered io to complete (send/recv reports)
  * @idle: send idle request to device
  */
@@ -685,17 +683,20 @@
 
 	int (*power)(struct hid_device *hdev, int level);
 
-	int (*hidinput_input_event) (struct input_dev *idev, unsigned int type,
-			unsigned int code, int value);
-
 	int (*parse)(struct hid_device *hdev);
 
 	void (*request)(struct hid_device *hdev,
 			struct hid_report *report, int reqtype);
 
 	int (*wait)(struct hid_device *hdev);
-	int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
 
+	int (*raw_request) (struct hid_device *hdev, unsigned char reportnum,
+			    __u8 *buf, size_t len, unsigned char rtype,
+			    int reqtype);
+
+	int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
+
+	int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
 };
 
 #define	PM_HINT_FULLON	1<<5
@@ -746,6 +747,7 @@
 unsigned int hidinput_count_leds(struct hid_device *hid);
 __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
 void hid_output_report(struct hid_report *report, __u8 *data);
+void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
 struct hid_device *hid_allocate_device(void);
 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
@@ -958,7 +960,55 @@
 				  struct hid_report *report, int reqtype)
 {
 	if (hdev->ll_driver->request)
-		hdev->ll_driver->request(hdev, report, reqtype);
+		return hdev->ll_driver->request(hdev, report, reqtype);
+
+	__hid_request(hdev, report, reqtype);
+}
+
+/**
+ * hid_hw_raw_request - send report request to device
+ *
+ * @hdev: hid device
+ * @reportnum: report ID
+ * @buf: in/out data to transfer
+ * @len: length of buf
+ * @rtype: HID report type
+ * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
+ *
+ * @return: count of data transfered, negative if error
+ *
+ * Same behavior as hid_hw_request, but with raw buffers instead.
+ */
+static inline int hid_hw_raw_request(struct hid_device *hdev,
+				  unsigned char reportnum, __u8 *buf,
+				  size_t len, unsigned char rtype, int reqtype)
+{
+	if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+		return -EINVAL;
+
+	return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
+						    rtype, reqtype);
+}
+
+/**
+ * hid_hw_output_report - send output report to device
+ *
+ * @hdev: hid device
+ * @buf: raw data to transfer
+ * @len: length of buf
+ *
+ * @return: count of data transfered, negative if error
+ */
+static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf,
+					size_t len)
+{
+	if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+		return -EINVAL;
+
+	if (hdev->ll_driver->output_report)
+		return hdev->ll_driver->output_report(hdev, buf, len);
+
+	return -ENOSYS;
 }
 
 /**
diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h
new file mode 100644
index 0000000..e40aa10
--- /dev/null
+++ b/include/linux/if_pppolac.h
@@ -0,0 +1,23 @@
+/* include/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOLAC_H
+#define __LINUX_IF_PPPOLAC_H
+
+#include <uapi/linux/if_pppolac.h>
+
+#endif /* __LINUX_IF_PPPOLAC_H */
diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h
new file mode 100644
index 0000000..4ac621a9
--- /dev/null
+++ b/include/linux/if_pppopns.h
@@ -0,0 +1,23 @@
+/* include/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOPNS_H
+#define __LINUX_IF_PPPOPNS_H
+
+#include <uapi/linux/if_pppopns.h>
+
+#endif /* __LINUX_IF_PPPOPNS_H */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index aff7ad8..dccd621 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -41,6 +41,25 @@
 	u32 seq_sent, seq_recv;
 	int ppp_flags;
 };
+
+struct pppolac_opt {
+	__u32		local;
+	__u32		remote;
+	__u32		recv_sequence;
+	__u32		xmit_sequence;
+	atomic_t	sequencing;
+	int		(*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
+};
+
+struct pppopns_opt {
+	__u16		local;
+	__u16		remote;
+	__u32		recv_sequence;
+	__u32		xmit_sequence;
+	void		(*data_ready)(struct sock *sk_raw, int length);
+	int		(*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
+};
+
 #include <net/sock.h>
 
 struct pppox_sock {
@@ -51,6 +70,8 @@
 	union {
 		struct pppoe_opt pppoe;
 		struct pptp_opt  pptp;
+		struct pppolac_opt lac;
+		struct pppopns_opt pns;
 	} proto;
 	__be16			num;
 };
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 833926c..14f8aac 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -183,4 +183,7 @@
 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
 	int *processed, unsigned int scale);
 
+int iio_read_channel_all_raw(struct iio_channel *chan, int *val);
+int iio_channel_get_name(const struct iio_channel *chan, char **chan_name);
+int iio_channel_get_num(const struct iio_channel *chan);
 #endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 6839111..0ae14f8 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -13,6 +13,7 @@
 #include <linux/device.h>
 #include <linux/cdev.h>
 #include <linux/iio/types.h>
+#include <linux/iio/consumer.h>
 /* IIO TODO LIST */
 /*
  * Provide means of adjusting timer accuracy.
@@ -287,6 +288,9 @@
 			int *val2,
 			long mask);
 
+	int (*read_all_raw)(struct iio_channel *chan,
+			int *val);
+
 	int (*write_raw)(struct iio_dev *indio_dev,
 			 struct iio_chan_spec const *chan,
 			 int val,
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 88bf0f0..2a50d36 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -29,6 +29,7 @@
 	IIO_ALTVOLTAGE,
 	IIO_CCT,
 	IIO_PRESSURE,
+	IIO_RESISTANCE,
 };
 
 enum iio_modifier {
diff --git a/include/linux/initramfs.h b/include/linux/initramfs.h
new file mode 100644
index 0000000..fc7da63
--- /dev/null
+++ b/include/linux/initramfs.h
@@ -0,0 +1,32 @@
+/*
+ * include/linux/initramfs.h
+ *
+ * Copyright (C) 2015, Google
+ * Rom Lemarchand <romlem@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_INITRAMFS_H
+#define _LINUX_INITRAMFS_H
+
+#include <linux/kconfig.h>
+
+#if IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
+
+int __init default_rootfs(void);
+
+#endif
+
+#endif /* _LINUX_INITRAMFS_H */
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h
index 10496bd..d7f2997 100644
--- a/include/linux/intel_mid_dma.h
+++ b/include/linux/intel_mid_dma.h
@@ -29,6 +29,10 @@
 
 #define DMA_PREP_CIRCULAR_LIST		(1 << 10)
 
+#define SST_MAX_DMA_LEN		4095
+#define SST_MAX_DMA_LEN_MRFLD	131071 /* 2^17 - 1 */
+
+
 /*DMA mode configurations*/
 enum intel_mid_dma_mode {
 	LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
@@ -73,4 +77,7 @@
 	struct dma_slave_config		dma_slave;
 };
 
+struct device *intel_mid_get_acpi_dma(const char *hid);
+dma_addr_t intel_dma_get_src_addr(struct dma_chan *chan);
+dma_addr_t intel_dma_get_dst_addr(struct dma_chan *chan);
 #endif /*__INTEL_MID_DMA_H__*/
diff --git a/include/linux/intel_mid_pm.h b/include/linux/intel_mid_pm.h
new file mode 100644
index 0000000..4dfaa81
--- /dev/null
+++ b/include/linux/intel_mid_pm.h
@@ -0,0 +1,234 @@
+/*
+ * intel_mid_pm.h
+ * Copyright (c) 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/errno.h>
+
+#ifndef INTEL_MID_PM_H
+#define INTEL_MID_PM_H
+
+#include <asm/intel-mid.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+
+/* Chip ID of Intel Atom SOC*/
+#define INTEL_ATOM_MRST 0x26
+#define INTEL_ATOM_MFLD 0x27
+#define INTEL_ATOM_CLV 0x35
+#define INTEL_ATOM_MRFLD 0x4a
+#define INTEL_ATOM_BYT 0x37
+
+static inline int platform_is(u8 model)
+{
+	return (boot_cpu_data.x86_model == model);
+}
+
+/* Register Type definitions */
+#define OSPM_REG_TYPE          0x0
+#define APM_REG_TYPE           0x1
+#define OSPM_MAX_POWER_ISLANDS 16
+#define OSPM_ISLAND_UP         0x0
+#define OSPM_ISLAND_DOWN       0x1
+/*Soft reset*/
+#define OSPM_ISLAND_SR         0x2
+
+/* North complex power islands definitions for APM block*/
+#define APM_GRAPHICS_ISLAND    0x1
+#define APM_VIDEO_DEC_ISLAND   0x2
+#define APM_VIDEO_ENC_ISLAND   0x4
+#define APM_GL3_CACHE_ISLAND   0x8
+#define APM_ISP_ISLAND         0x10
+#define APM_IPH_ISLAND         0x20
+
+/* North complex power islands definitions for OSPM block*/
+#define OSPM_DISPLAY_A_ISLAND  0x2
+#define OSPM_DISPLAY_B_ISLAND  0x80
+#define OSPM_DISPLAY_C_ISLAND  0x100
+#define OSPM_MIPI_ISLAND       0x200
+
+/* North Complex power islands definitions for Tangier */
+#define TNG_ISP_ISLAND		0x1
+/* North Complex Register definitions for Tangier */
+#define	ISP_SS_PM0		0x39
+
+#define C4_STATE_IDX	3
+#define C6_STATE_IDX	4
+#define S0I1_STATE_IDX  5
+#define LPMP3_STATE_IDX 6
+#define S0I3_STATE_IDX  7
+
+#define C4_HINT	(0x30)
+#define C6_HINT	(0x52)
+
+#define CSTATE_EXIT_LATENCY_C1	 1
+#define CSTATE_EXIT_LATENCY_C2	 20
+#define CSTATE_EXIT_LATENCY_C4	 100
+#define CSTATE_EXIT_LATENCY_C6	 140
+
+/* Since entry latency is substantial
+ * put exit_latency = entry+exit latency
+ */
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+#define CSTATE_EXIT_LATENCY_S0i1 1200
+#define CSTATE_EXIT_LATENCY_S0i2 2000
+#define CSTATE_EXIT_LATENCY_S0i3 10000
+#else
+#define CSTATE_EXIT_LATENCY_LPMP3 1040
+#define CSTATE_EXIT_LATENCY_S0i1 1040
+#define CSTATE_EXIT_LATENCY_S0i3 2800
+#endif
+#define BYT_S0I1_STATE         0x60
+#define BYT_S0I2_STATE         0x62
+#define BYT_LPMP3_STATE        0x62
+#define BYT_S0I3_STATE         0x64
+
+enum s3_parts {
+	PROC_FRZ,
+	DEV_SUS,
+	NB_CPU_OFF,
+	NB_CPU_ON,
+	DEV_RES,
+	PROC_UNFRZ,
+	MAX_S3_PARTS
+};
+
+#ifdef CONFIG_ATOM_SOC_POWER
+#define LOG_PMU_EVENTS
+
+/* Error codes for pmu */
+#define	PMU_SUCCESS			0
+#define PMU_FAILED			-1
+#define PMU_BUSY_STATUS			0
+#define PMU_MODE_ID			1
+#define	SET_MODE			1
+#define	SET_AOAC_S0i1			2
+#define	SET_AOAC_S0i3			3
+#define	SET_LPAUDIO			4
+#define	SET_AOAC_S0i2			7
+
+#ifdef CONFIG_REMOVEME_INTEL_ATOM_MRFLD_POWER
+#define MID_S0I1_STATE         0x60
+#define MID_S0I2_STATE         0x62
+#define MID_LPMP3_STATE        0x62
+#define MID_S0I3_STATE         0x64
+#else
+#define MID_S0I1_STATE         0x1
+#define MID_LPMP3_STATE        0x3
+#define MID_S0I2_STATE         0x7
+#define MID_S0I3_STATE         0x7
+#endif
+
+#define MID_S0IX_STATE         0xf
+#define MID_S3_STATE           0x1f
+#define MID_FAST_ON_OFF_STATE  0x3f
+
+/* combinations */
+#define MID_LPI1_STATE         0x1f
+#define MID_LPI3_STATE         0x7f
+#define MID_I1I3_STATE         0xff
+
+#define REMOVE_LP_FROM_LPIX    4
+
+/* Power number for MID_POWER */
+#define C0_POWER_USAGE         450
+#define C6_POWER_USAGE         200
+#define LPMP3_POWER_USAGE      130
+#define S0I1_POWER_USAGE       50
+#define S0I3_POWER_USAGE       31
+
+extern unsigned int enable_s3;
+extern unsigned int enable_s0ix;
+
+extern void pmu_s0ix_demotion_stat(int req_state, int grant_state);
+extern unsigned int pmu_get_new_cstate(unsigned int cstate, int *index);
+extern int get_target_platform_state(unsigned long *eax);
+extern int mid_s0ix_enter(int);
+extern int pmu_set_devices_in_d0i0(void);
+extern int pmu_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+extern pci_power_t pmu_pci_choose_state(struct pci_dev *pdev);
+
+extern void time_stamp_in_suspend_flow(int mark, bool start);
+extern void time_stamp_for_sleep_state_latency(int sleep_state,
+						bool start, bool entry);
+extern int mid_state_to_sys_state(int mid_state);
+extern void pmu_power_off(void);
+extern void pmu_set_s0ix_complete(void);
+extern bool pmu_is_s0ix_in_progress(void);
+extern int pmu_nc_set_power_state
+	(int islands, int state_type, int reg_type);
+extern int pmu_nc_get_power_state(int island, int reg_type);
+extern int pmu_set_emmc_to_d0i0_atomic(void);
+
+#ifdef LOG_PMU_EVENTS
+extern void pmu_log_ipc(u32 command);
+extern void pmu_log_ipc_irq(void);
+#else
+static inline void pmu_log_ipc(u32 command) { return; };
+static inline void pmu_log_ipc_irq(void) { return; };
+#endif
+extern void dump_nc_power_history(void);
+
+extern bool mid_pmu_is_wake_source(u32 lss_number);
+
+extern void (*nc_report_power_state) (u32, int);
+#else
+
+/*
+ * If CONFIG_ATOM_SOC_POWER is not defined
+ * fall back to C6
+ */
+
+#define MID_S0I1_STATE         C6_HINT
+#define MID_LPMP3_STATE        C6_HINT
+#define MID_S0I3_STATE         C6_HINT
+#define MID_S3_STATE           C6_HINT
+#define MID_FAST_ON_OFF_STATE  C6_HINT
+
+/* Power usage unknown if MID_POWER not defined */
+#define C0_POWER_USAGE         0
+#define C6_POWER_USAGE         0
+#define LPMP3_POWER_USAGE      0
+#define S0I1_POWER_USAGE       0
+#define S0I3_POWER_USAGE       0
+
+#define TEMP_DTS_ID     43
+
+static inline int pmu_nc_set_power_state
+	(int islands, int state_type, int reg_type) { return 0; }
+static inline int pmu_nc_get_power_state(int island, int reg_type) { return 0; }
+
+static inline void pmu_set_s0ix_complete(void) { return; }
+static inline bool pmu_is_s0ix_in_progress(void) { return false; };
+static inline unsigned int pmu_get_new_cstate
+			(unsigned int cstate, int *index) { return cstate; };
+
+/*returns function not implemented*/
+static inline void time_stamp_in_suspend_flow(int mark, bool start) {}
+static inline void time_stamp_for_sleep_state_latency(int sleep_state,
+					bool start, bool entry) {}
+static inline int mid_state_to_sys_state(int mid_state) { return 0; }
+
+static inline int pmu_set_devices_in_d0i0(void) { return 0; }
+static inline void pmu_log_ipc(u32 command) { return; };
+static inline void pmu_log_ipc_irq(void) { return; };
+static inline int pmu_set_emmc_to_d0i0_atomic(void) { return -ENOSYS; }
+static inline void pmu_power_off(void) { return; }
+static inline bool mid_pmu_is_wake_source(u32 lss_number) { return false; }
+#endif /* #ifdef CONFIG_ATOM_SOC_POWER */
+
+#endif /* #ifndef INTEL_MID_PM_H */
diff --git a/include/linux/intel_pidv_acpi.h b/include/linux/intel_pidv_acpi.h
new file mode 100644
index 0000000..77f0178
--- /dev/null
+++ b/include/linux/intel_pidv_acpi.h
@@ -0,0 +1,54 @@
+/*
+ * include/linux/intel_pidv_acpi.h
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Vincent Tinelli (vincent.tinelli@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _INTEL_PIDV_ACPI_H
+#define _INTEL_PIDV_ACPI_H
+
+#include <linux/acpi.h>
+#ifdef CONFIG_ACPI
+#define ACPI_SIG_PIDV           "PIDV"
+
+#define pidv_attr(_name) \
+static struct kobj_attribute _name##_attr = {	\
+	.attr   = {				\
+		.name = __stringify(_name),	\
+		.mode = 0440,			\
+	},					\
+	.show   = _name##_show,			\
+}
+
+struct platform_id {
+	u8 part_number[32];
+	u8 ext_id_1[32];
+	u8 ext_id_2[32];
+	u8 uuid[16];
+	u8 iafw_major;
+	u8 iafw_minor;
+	u8 secfw_major;
+	u8 secfw_minor;
+};
+
+struct acpi_table_pidv {
+	struct acpi_table_header header;
+	struct platform_id pidv;
+};
+
+#endif
+#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index b8b7dc7..c4a36ab 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -36,10 +36,12 @@
 	__s32		accept_ra_rt_info_max_plen;
 #endif
 #endif
+	__s32		accept_ra_rt_table;
 	__s32		proxy_ndp;
 	__s32		accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 	__s32		optimistic_dad;
+	__s32		use_optimistic;
 #endif
 #ifdef CONFIG_IPV6_MROUTE
 	__s32		mc_forwarding;
@@ -48,6 +50,7 @@
 	__s32		accept_dad;
 	__s32		force_tllao;
 	__s32           ndisc_notify;
+	__s32		use_oif_addrs_only;
 	void		*sysctl;
 };
 
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d591bfe..b2b022d 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -94,6 +94,7 @@
 	IRQ_NESTED_THREAD	= (1 << 15),
 	IRQ_NOTHREAD		= (1 << 16),
 	IRQ_PER_CPU_DEVID	= (1 << 17),
+	IRQ_CHAINED		= (1 << 18),
 };
 
 #define IRQF_MODIFY_MASK	\
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 078bc2f..b099e2c 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -127,6 +127,13 @@
 	return desc->action != NULL;
 }
 
+/* Test to see if the IRQ is chained */
+static inline int irq_is_chained(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+	return desc->status_use_accessors & IRQ_CHAINED;
+}
+
 /* caller has locked the irq_desc and both params are valid */
 static inline void __irq_set_handler_locked(unsigned int irq,
 					    irq_flow_handler_t handler)
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 7e0b622..27d08fd 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -243,6 +243,31 @@
 #ifdef __KERNEL__
 
 #include <linux/fs.h>
+
+enum jbd_state_bits {
+        BH_JBD                  /* Has an attached ext3 journal_head */
+          = BH_PrivateStart,
+        BH_JWrite,              /* Being written to log (@@@ DEBUGGING) */
+        BH_Freed,               /* Has been freed (truncated) */
+        BH_Revoked,             /* Has been revoked from the log */
+        BH_RevokeValid,         /* Revoked flag is valid */
+        BH_JBDDirty,            /* Is dirty but journaled */
+        BH_State,               /* Pins most journal_head state */
+        BH_JournalHead,         /* Pins bh->b_private and jh->b_bh */
+        BH_Unshadow,            /* Dummy bit, for BJ_Shadow wakeup filtering */
+        BH_JBDPrivateStart,     /* First bit available for private use by FS */
+};
+
+BUFFER_FNS(JBD, jbd)
+BUFFER_FNS(JWrite, jwrite)
+BUFFER_FNS(JBDDirty, jbddirty)
+TAS_BUFFER_FNS(JBDDirty, jbddirty)
+BUFFER_FNS(Revoked, revoked)
+TAS_BUFFER_FNS(Revoked, revoked)
+BUFFER_FNS(RevokeValid, revokevalid)
+TAS_BUFFER_FNS(RevokeValid, revokevalid)
+BUFFER_FNS(Freed, freed)
+
 #include <linux/sched.h>
 #include <linux/jbd_common.h>
 
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 7d4a932..8c8779b 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -26,7 +26,6 @@
 #include <linux/buffer_head.h>
 #include <linux/journal-head.h>
 #include <linux/stddef.h>
-#include <linux/bit_spinlock.h>
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
@@ -57,17 +56,13 @@
  */
 #define JBD2_EXPENSIVE_CHECKING
 extern ushort jbd2_journal_enable_debug;
+void __jbd2_debug(int level, const char *file, const char *func,
+		  unsigned int line, const char *fmt, ...);
 
-#define jbd_debug(n, f, a...)						\
-	do {								\
-		if ((n) <= jbd2_journal_enable_debug) {			\
-			printk (KERN_DEBUG "(%s, %d): %s: ",		\
-				__FILE__, __LINE__, __func__);	\
-			printk (f, ## a);				\
-		}							\
-	} while (0)
+#define jbd_debug(n, fmt, a...) \
+	__jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
 #else
-#define jbd_debug(f, a...)	/**/
+#define jbd_debug(n, fmt, a...)    /**/
 #endif
 
 extern void *jbd2_alloc(size_t size, gfp_t flags);
@@ -164,7 +159,11 @@
  * journal_block_tag (in the descriptor).  The other h_chksum* fields are
  * not used.
  *
- * Checksum v1 and v2 are mutually exclusive features.
+ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
+ * journal_block_tag3_t to store a full 32-bit checksum.  Everything else
+ * is the same as v2.
+ *
+ * Checksum v1, v2, and v3 are mutually exclusive features.
  */
 struct commit_header {
 	__be32		h_magic;
@@ -184,6 +183,14 @@
  * raw struct shouldn't be used for pointer math or sizeof() - use
  * journal_tag_bytes(journal) instead to compute this.
  */
+typedef struct journal_block_tag3_s
+{
+	__be32		t_blocknr;	/* The on-disk block number */
+	__be32		t_flags;	/* See below */
+	__be32		t_blocknr_high; /* most-significant high 32bits. */
+	__be32		t_checksum;	/* crc32c(uuid+seq+block) */
+} journal_block_tag3_t;
+
 typedef struct journal_block_tag_s
 {
 	__be32		t_blocknr;	/* The on-disk block number */
@@ -192,9 +199,6 @@
 	__be32		t_blocknr_high; /* most-significant high 32bits. */
 } journal_block_tag_t;
 
-#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
-#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
-
 /* Tail of descriptor block, for checksumming */
 struct jbd2_journal_block_tail {
 	__be32		t_checksum;	/* crc32c(uuid+descr_block) */
@@ -289,6 +293,7 @@
 #define JBD2_FEATURE_INCOMPAT_64BIT		0x00000002
 #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT	0x00000004
 #define JBD2_FEATURE_INCOMPAT_CSUM_V2		0x00000008
+#define JBD2_FEATURE_INCOMPAT_CSUM_V3		0x00000010
 
 /* Features known to this kernel version: */
 #define JBD2_KNOWN_COMPAT_FEATURES	JBD2_FEATURE_COMPAT_CHECKSUM
@@ -296,12 +301,41 @@
 #define JBD2_KNOWN_INCOMPAT_FEATURES	(JBD2_FEATURE_INCOMPAT_REVOKE | \
 					JBD2_FEATURE_INCOMPAT_64BIT | \
 					JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
-					JBD2_FEATURE_INCOMPAT_CSUM_V2)
+					JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
+					JBD2_FEATURE_INCOMPAT_CSUM_V3)
 
 #ifdef __KERNEL__
 
 #include <linux/fs.h>
 #include <linux/sched.h>
+
+enum jbd_state_bits {
+	BH_JBD			/* Has an attached ext3 journal_head */
+	  = BH_PrivateStart,
+	BH_JWrite,		/* Being written to log (@@@ DEBUGGING) */
+	BH_Freed,		/* Has been freed (truncated) */
+	BH_Revoked,		/* Has been revoked from the log */
+	BH_RevokeValid,		/* Revoked flag is valid */
+	BH_JBDDirty,		/* Is dirty but journaled */
+	BH_State,		/* Pins most journal_head state */
+	BH_JournalHead,		/* Pins bh->b_private and jh->b_bh */
+	BH_Shadow,		/* IO on shadow buffer is running */
+	BH_Verified,		/* Metadata block has been verified ok */
+	BH_JBDPrivateStart,	/* First bit available for private use by FS */
+};
+
+BUFFER_FNS(JBD, jbd)
+BUFFER_FNS(JWrite, jwrite)
+BUFFER_FNS(JBDDirty, jbddirty)
+TAS_BUFFER_FNS(JBDDirty, jbddirty)
+BUFFER_FNS(Revoked, revoked)
+TAS_BUFFER_FNS(Revoked, revoked)
+BUFFER_FNS(RevokeValid, revokevalid)
+TAS_BUFFER_FNS(RevokeValid, revokevalid)
+BUFFER_FNS(Freed, freed)
+BUFFER_FNS(Shadow, shadow)
+BUFFER_FNS(Verified, verified)
+
 #include <linux/jbd_common.h>
 
 #define J_ASSERT(assert)	BUG_ON(!(assert))
@@ -382,8 +416,15 @@
 
 struct jbd2_journal_handle
 {
-	/* Which compound transaction is this update a part of? */
-	transaction_t		*h_transaction;
+	union {
+		/* Which compound transaction is this update a part of? */
+		transaction_t	*h_transaction;
+		/* Which journal handle belongs to - used iff h_reserved set */
+		journal_t	*h_journal;
+	};
+
+	/* Handle reserved for finishing the logical operation */
+	handle_t		*h_rsv_handle;
 
 	/* Number of remaining buffers we are allowed to dirty: */
 	int			h_buffer_credits;
@@ -398,6 +439,7 @@
 	/* Flags [no locking] */
 	unsigned int	h_sync:		1;	/* sync-on-close */
 	unsigned int	h_jdata:	1;	/* force data journaling */
+	unsigned int	h_reserved:	1;	/* handle with reserved credits */
 	unsigned int	h_aborted:	1;	/* fatal error on handle */
 	unsigned int	h_type:		8;	/* for handle statistics */
 	unsigned int	h_line_no:	16;	/* for handle statistics */
@@ -524,12 +566,6 @@
 	struct journal_head	*t_checkpoint_io_list;
 
 	/*
-	 * Doubly-linked circular list of temporary buffers currently undergoing
-	 * IO in the log [j_list_lock]
-	 */
-	struct journal_head	*t_iobuf_list;
-
-	/*
 	 * Doubly-linked circular list of metadata buffers being shadowed by log
 	 * IO.  The IO buffers on the iobuf list and the shadow buffers on this
 	 * list match each other one for one at all times. [j_list_lock]
@@ -537,12 +573,6 @@
 	struct journal_head	*t_shadow_list;
 
 	/*
-	 * Doubly-linked circular list of control buffers being written to the
-	 * log. [j_list_lock]
-	 */
-	struct journal_head	*t_log_list;
-
-	/*
 	 * List of inodes whose data we've modified in data=ordered mode.
 	 * [j_list_lock]
 	 */
@@ -671,11 +701,10 @@
  *  waiting for checkpointing
  * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
  *  to start committing, or for a barrier lock to be released
- * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
  * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_checkpoint:  Wait queue to trigger checkpointing
  * @j_wait_commit: Wait queue to trigger commit
  * @j_wait_updates: Wait queue to wait for updates to complete
+ * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
  * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
  * @j_head: Journal head - identifies the first unused block in the journal
  * @j_tail: Journal tail - identifies the oldest still-used block in the
@@ -689,6 +718,7 @@
  *     journal
  * @j_fs_dev: Device which holds the client fs.  For internal journal this will
  *     be equal to j_dev
+ * @j_reserved_credits: Number of buffers reserved from the running transaction
  * @j_maxlen: Total maximum capacity of the journal region on disk.
  * @j_list_lock: Protects the buffer lists and internal buffer state.
  * @j_inode: Optional inode where we store the journal.  If present, all journal
@@ -778,21 +808,18 @@
 	 */
 	wait_queue_head_t	j_wait_transaction_locked;
 
-	/* Wait queue for waiting for checkpointing to complete */
-	wait_queue_head_t	j_wait_logspace;
-
 	/* Wait queue for waiting for commit to complete */
 	wait_queue_head_t	j_wait_done_commit;
 
-	/* Wait queue to trigger checkpointing */
-	wait_queue_head_t	j_wait_checkpoint;
-
 	/* Wait queue to trigger commit */
 	wait_queue_head_t	j_wait_commit;
 
 	/* Wait queue to wait for updates to complete */
 	wait_queue_head_t	j_wait_updates;
 
+	/* Wait queue to wait for reserved buffer credits to drop */
+	wait_queue_head_t	j_wait_reserved;
+
 	/* Semaphore for locking against concurrent checkpoints */
 	struct mutex		j_checkpoint_mutex;
 
@@ -847,6 +874,9 @@
 	/* Total maximum capacity of the journal region on disk. */
 	unsigned int		j_maxlen;
 
+	/* Number of buffers reserved from the running transaction */
+	atomic_t		j_reserved_credits;
+
 	/*
 	 * Protects the buffer lists and internal buffer state.
 	 */
@@ -992,9 +1022,17 @@
 extern void __journal_free_buffer(struct journal_head *bh);
 extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
 extern void __journal_clean_data_list(transaction_t *transaction);
+static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
+{
+	list_add_tail(&bh->b_assoc_buffers, head);
+}
+static inline void jbd2_unfile_log_bh(struct buffer_head *bh)
+{
+	list_del_init(&bh->b_assoc_buffers);
+}
 
 /* Log buffer allocation */
-extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *);
+struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
 int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
 int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
 			      unsigned long *block);
@@ -1005,7 +1043,7 @@
 extern void jbd2_journal_commit_transaction(journal_t *);
 
 /* Checkpoint list management */
-int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
 int __jbd2_journal_remove_checkpoint(struct journal_head *);
 void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
 
@@ -1040,11 +1078,10 @@
 				      struct jbd2_buffer_trigger_type *triggers);
 
 /* Buffer IO */
-extern int
-jbd2_journal_write_metadata_buffer(transaction_t	  *transaction,
-			      struct journal_head  *jh_in,
-			      struct journal_head **jh_out,
-			      unsigned long long   blocknr);
+extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
+					      struct journal_head *jh_in,
+					      struct buffer_head **bh_out,
+					      sector_t blocknr);
 
 /* Transaction locking */
 extern void		__wait_on_journal (journal_t *);
@@ -1077,10 +1114,14 @@
  */
 
 extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
-extern handle_t *jbd2__journal_start(journal_t *, int nblocks, gfp_t gfp_mask,
-				     unsigned int type, unsigned int line_no);
+extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks,
+				     gfp_t gfp_mask, unsigned int type,
+				     unsigned int line_no);
 extern int	 jbd2_journal_restart(handle_t *, int nblocks);
 extern int	 jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
+extern int	 jbd2_journal_start_reserved(handle_t *handle,
+				unsigned int type, unsigned int line_no);
+extern void	 jbd2_journal_free_reserved(handle_t *handle);
 extern int	 jbd2_journal_extend (handle_t *, int nblocks);
 extern int	 jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
 extern int	 jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
@@ -1091,7 +1132,7 @@
 extern int	 jbd2_journal_forget (handle_t *, struct buffer_head *);
 extern void	 journal_sync_buffer (struct buffer_head *);
 extern int	 jbd2_journal_invalidatepage(journal_t *,
-				struct page *, unsigned long);
+				struct page *, unsigned int, unsigned int);
 extern int	 jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
 extern int	 jbd2_journal_stop(handle_t *);
 extern int	 jbd2_journal_flush (journal_t *);
@@ -1126,6 +1167,7 @@
 extern int	   jbd2_journal_clear_err  (journal_t *);
 extern int	   jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
 extern int	   jbd2_journal_force_commit(journal_t *);
+extern int	   jbd2_journal_force_commit_nested(journal_t *);
 extern int	   jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
 extern int	   jbd2_journal_begin_ordered_truncate(journal_t *journal,
 				struct jbd2_inode *inode, loff_t new_size);
@@ -1179,8 +1221,10 @@
 extern void	   jbd2_journal_destroy_revoke(journal_t *);
 extern int	   jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
 extern int	   jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void	   jbd2_journal_write_revoke_records(journal_t *,
-						     transaction_t *, int);
+extern void	   jbd2_journal_write_revoke_records(journal_t *journal,
+						     transaction_t *transaction,
+						     struct list_head *log_bufs,
+						     int write_op);
 
 /* Recovery revoke support */
 extern int	jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
@@ -1196,11 +1240,9 @@
  * transitions on demand.
  */
 
-int __jbd2_log_space_left(journal_t *); /* Called with journal locked */
 int jbd2_log_start_commit(journal_t *journal, tid_t tid);
 int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
 int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
-int jbd2_journal_force_commit_nested(journal_t *journal);
 int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
 int jbd2_complete_transaction(journal_t *journal, tid_t tid);
 int jbd2_log_do_checkpoint(journal_t *journal);
@@ -1236,7 +1278,7 @@
 
 static inline int is_handle_aborted(handle_t *handle)
 {
-	if (handle->h_aborted)
+	if (handle->h_aborted || !handle->h_transaction)
 		return 1;
 	return is_journal_aborted(handle->h_transaction->t_journal);
 }
@@ -1266,17 +1308,47 @@
 extern int jbd2_journal_blocks_per_page(struct inode *inode);
 extern size_t journal_tag_bytes(journal_t *journal);
 
+static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
+{
+	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
+	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+		return 1;
+
+	return 0;
+}
+
+/*
+ * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
+ * transaction control blocks.
+ */
+#define JBD2_CONTROL_BLOCKS_SHIFT 5
+
 /*
  * Return the minimum number of blocks which must be free in the journal
  * before a new transaction may be started.  Must be called under j_state_lock.
  */
-static inline int jbd_space_needed(journal_t *journal)
+static inline int jbd2_space_needed(journal_t *journal)
 {
 	int nblocks = journal->j_max_transaction_buffers;
-	if (journal->j_committing_transaction)
-		nblocks += atomic_read(&journal->j_committing_transaction->
-				       t_outstanding_credits);
-	return nblocks;
+	return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT);
+}
+
+/*
+ * Return number of free blocks in the log. Must be called under j_state_lock.
+ */
+static inline unsigned long jbd2_log_space_left(journal_t *journal)
+{
+	/* Allow for rounding errors */
+	unsigned long free = journal->j_free - 32;
+
+	if (journal->j_committing_transaction) {
+		unsigned long committing = atomic_read(&journal->
+			j_committing_transaction->t_outstanding_credits);
+
+		/* Transaction + control blocks */
+		free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
+	}
+	return free;
 }
 
 /*
@@ -1287,11 +1359,9 @@
 #define BJ_None		0	/* Not journaled */
 #define BJ_Metadata	1	/* Normal journaled metadata */
 #define BJ_Forget	2	/* Buffer superseded by this transaction */
-#define BJ_IO		3	/* Buffer is for temporary IO use */
-#define BJ_Shadow	4	/* Buffer contents being shadowed to the log */
-#define BJ_LogCtl	5	/* Buffer contains log descriptors */
-#define BJ_Reserved	6	/* Buffer is reserved for access by journal */
-#define BJ_Types	7
+#define BJ_Shadow	3	/* Buffer contents being shadowed to the log */
+#define BJ_Reserved	4	/* Buffer is reserved for access by journal */
+#define BJ_Types	5
 
 extern int jbd_blocks_per_page(struct inode *inode);
 
@@ -1320,6 +1390,19 @@
 	return *(u32 *)desc.ctx;
 }
 
+/* Return most recent uncommitted transaction */
+static inline tid_t  jbd2_get_latest_transaction(journal_t *journal)
+{
+	tid_t tid;
+
+	read_lock(&journal->j_state_lock);
+	tid = journal->j_commit_request;
+	if (journal->j_running_transaction)
+		tid = journal->j_running_transaction->t_tid;
+	read_unlock(&journal->j_state_lock);
+	return tid;
+}
+
 #ifdef __KERNEL__
 
 #define buffer_trace_init(bh)	do {} while (0)
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
index 6133679..3dc5343 100644
--- a/include/linux/jbd_common.h
+++ b/include/linux/jbd_common.h
@@ -1,31 +1,7 @@
 #ifndef _LINUX_JBD_STATE_H
 #define _LINUX_JBD_STATE_H
 
-enum jbd_state_bits {
-	BH_JBD			/* Has an attached ext3 journal_head */
-	  = BH_PrivateStart,
-	BH_JWrite,		/* Being written to log (@@@ DEBUGGING) */
-	BH_Freed,		/* Has been freed (truncated) */
-	BH_Revoked,		/* Has been revoked from the log */
-	BH_RevokeValid,		/* Revoked flag is valid */
-	BH_JBDDirty,		/* Is dirty but journaled */
-	BH_State,		/* Pins most journal_head state */
-	BH_JournalHead,		/* Pins bh->b_private and jh->b_bh */
-	BH_Unshadow,		/* Dummy bit, for BJ_Shadow wakeup filtering */
-	BH_Verified,		/* Metadata block has been verified ok */
-	BH_JBDPrivateStart,	/* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-BUFFER_FNS(Verified, verified)
+#include <linux/bit_spinlock.h>
 
 static inline struct buffer_head *jh2bh(struct journal_head *jh)
 {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5f4554b..089774b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -805,4 +805,7 @@
 # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
 #endif
 
+/* To identify board information in panic logs, set this */
+extern char *mach_panic_string;
+
 #endif
diff --git a/include/linux/key.h b/include/linux/key.h
index 4dfde11..b2752c3 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -72,6 +72,15 @@
 
 #define KEY_PERM_UNDEF	0xffffffff
 
+/* New permission defines for backport */
+#define	KEY_NEED_VIEW	0x01	/* Require permission to view attributes */
+#define	KEY_NEED_READ	0x02	/* Require permission to read content */
+#define	KEY_NEED_WRITE	0x04	/* Require permission to update / modify */
+#define	KEY_NEED_SEARCH	0x08	/* Require permission to search (keyring) or find (key) */
+#define	KEY_NEED_LINK	0x10	/* Require permission to link */
+#define	KEY_NEED_SETATTR 0x20	/* Require permission to change attributes */
+#define	KEY_NEED_ALL	0x3f	/* All the above permissions */
+
 struct seq_file;
 struct user_struct;
 struct signal_struct;
diff --git a/include/linux/keychord.h b/include/linux/keychord.h
new file mode 100644
index 0000000..08cf540
--- /dev/null
+++ b/include/linux/keychord.h
@@ -0,0 +1,23 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_KEYCHORD_H_
+#define __LINUX_KEYCHORD_H_
+
+#include <uapi/linux/keychord.h>
+
+#endif	/* __LINUX_KEYCHORD_H_ */
diff --git a/include/linux/keycombo.h b/include/linux/keycombo.h
new file mode 100644
index 0000000..c6db262
--- /dev/null
+++ b/include/linux/keycombo.h
@@ -0,0 +1,36 @@
+/*
+ * include/linux/keycombo.h - platform data structure for keycombo driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYCOMBO_H
+#define _LINUX_KEYCOMBO_H
+
+#define KEYCOMBO_NAME "keycombo"
+
+/*
+ * if key_down_fn and key_up_fn are both present, you are guaranteed that
+ * key_down_fn will return before key_up_fn is called, and that key_up_fn
+ * is called iff key_down_fn is called.
+ */
+struct keycombo_platform_data {
+	void (*key_down_fn)(void *);
+	void (*key_up_fn)(void *);
+	void *priv;
+	int key_down_delay; /* Time in ms */
+	int *keys_up;
+	int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYCOMBO_H */
diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h
new file mode 100644
index 0000000..2e34afa
--- /dev/null
+++ b/include/linux/keyreset.h
@@ -0,0 +1,29 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+	int (*reset_fn)(void);
+	int key_down_delay;
+	int *keys_up;
+	int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
diff --git a/include/linux/lnw_gpio.h b/include/linux/lnw_gpio.h
new file mode 100644
index 0000000..2e9f6b2
--- /dev/null
+++ b/include/linux/lnw_gpio.h
@@ -0,0 +1,14 @@
+#ifndef _H_LANGWELL_GPIO_H
+#define _H_LANGWELL_GPIO_H
+
+enum {
+	LNW_GPIO = 0,
+	LNW_ALT_1 = 1,
+	LNW_ALT_2 = 2,
+	LNW_ALT_3 = 3,
+};
+
+void lnw_gpio_set_alt(int gpio, int alt);
+int gpio_get_alt(int gpio);
+
+#endif
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 1cc89e9..ffb9c9d 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -40,6 +40,11 @@
 	} fam;
 };
 
+struct lsm_ioctlop_audit {
+	struct path path;
+	u16 cmd;
+};
+
 /* Auxiliary data to use in generating the audit record. */
 struct common_audit_data {
 	char type;
@@ -53,6 +58,7 @@
 #define LSM_AUDIT_DATA_KMOD	8
 #define LSM_AUDIT_DATA_INODE	9
 #define LSM_AUDIT_DATA_DENTRY	10
+#define LSM_AUDIT_DATA_IOCTL_OP	11
 	union 	{
 		struct path path;
 		struct dentry *dentry;
@@ -68,6 +74,7 @@
 		} key_struct;
 #endif
 		char *kmod_name;
+		struct lsm_ioctlop_audit *op;
 	} u;
 	/* this union contains LSM specific data */
 	union {
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
new file mode 100644
index 0000000..4356686
--- /dev/null
+++ b/include/linux/lz4.h
@@ -0,0 +1,87 @@
+#ifndef __LZ4_H__
+#define __LZ4_H__
+/*
+ * LZ4 Kernel Interface
+ *
+ * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define LZ4_MEM_COMPRESS	(4096 * sizeof(unsigned char *))
+#define LZ4HC_MEM_COMPRESS	(65538 * sizeof(unsigned char *))
+
+/*
+ * lz4_compressbound()
+ * Provides the maximum size that LZ4 may output in a "worst case" scenario
+ * (input data not compressible)
+ */
+static inline size_t lz4_compressbound(size_t isize)
+{
+	return isize + (isize / 255) + 16;
+}
+
+/*
+ * lz4_compress()
+ *	src     : source address of the original data
+ *	src_len : size of the original data
+ *	dst	: output buffer address of the compressed data
+ *		This requires 'dst' of size LZ4_COMPRESSBOUND.
+ *	dst_len : is the output size, which is returned after compress done
+ *	workmem : address of the working memory.
+ *		This requires 'workmem' of size LZ4_MEM_COMPRESS.
+ *	return  : Success if return 0
+ *		  Error if return (< 0)
+ *	note :  Destination buffer and workmem must be already allocated with
+ *		the defined size.
+ */
+int lz4_compress(const unsigned char *src, size_t src_len,
+		unsigned char *dst, size_t *dst_len, void *wrkmem);
+
+ /*
+  * lz4hc_compress()
+  *	 src	 : source address of the original data
+  *	 src_len : size of the original data
+  *	 dst	 : output buffer address of the compressed data
+  *		This requires 'dst' of size LZ4_COMPRESSBOUND.
+  *	 dst_len : is the output size, which is returned after compress done
+  *	 workmem : address of the working memory.
+  *		This requires 'workmem' of size LZ4HC_MEM_COMPRESS.
+  *	 return  : Success if return 0
+  *		   Error if return (< 0)
+  *	 note :  Destination buffer and workmem must be already allocated with
+  *		 the defined size.
+  */
+int lz4hc_compress(const unsigned char *src, size_t src_len,
+		unsigned char *dst, size_t *dst_len, void *wrkmem);
+
+/*
+ * lz4_decompress()
+ *	src     : source address of the compressed data
+ *	src_len : is the input size, whcih is returned after decompress done
+ *	dest	: output buffer address of the decompressed data
+ *	actual_dest_len: is the size of uncompressed data, supposing it's known
+ *	return  : Success if return 0
+ *		  Error if return (< 0)
+ *	note :  Destination buffer must be already allocated.
+ *		slightly faster than lz4_decompress_unknownoutputsize()
+ */
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+		unsigned char *dest, size_t actual_dest_len);
+
+/*
+ * lz4_decompress_unknownoutputsize()
+ *	src     : source address of the compressed data
+ *	src_len : is the input size, therefore the compressed size
+ *	dest	: output buffer address of the decompressed data
+ *	dest_len: is the max size of the destination buffer, which is
+ *			returned with actual size of decompressed data after
+ *			decompress done
+ *	return  : Success if return 0
+ *		  Error if return (< 0)
+ *	note :  Destination buffer must be already allocated.
+ */
+int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
+		unsigned char *dest, size_t *dest_len);
+#endif
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
index 439a7a6..5121763 100644
--- a/include/linux/mfd/intel_msic.h
+++ b/include/linux/mfd/intel_msic.h
@@ -12,6 +12,8 @@
 #ifndef __LINUX_MFD_INTEL_MSIC_H__
 #define __LINUX_MFD_INTEL_MSIC_H__
 
+#include <asm/intel_mid_gpadc.h>
+
 /* ID */
 #define INTEL_MSIC_ID0			0x000	/* RO */
 #define INTEL_MSIC_ID1			0x001	/* RO */
@@ -52,6 +54,15 @@
 #define INTEL_MSIC_PBCONFIG		0x03e
 #define INTEL_MSIC_PBSTATUS		0x03f	/* RO */
 
+/*
+ * MSIC interrupt tree is readable from SRAM at INTEL_MSIC_IRQ_PHYS_BASE.
+ * Since IRQ block starts from address 0x002 we need to substract that from
+ * the actual IRQ status register address.
+ */
+#define MSIC_IRQ_STATUS(x)      (INTEL_MSIC_IRQ_PHYS_BASE + ((x) - 2))
+#define MSIC_IRQ_STATUS_ACCDET  MSIC_IRQ_STATUS(INTEL_MSIC_ACCDET)
+#define MSIC_IRQ_STATUS_OCAUDIO MSIC_IRQ_STATUS(INTEL_MSIC_OCAUDIO)
+
 /* GPIO */
 #define INTEL_MSIC_GPIO0LV7CTLO		0x040
 #define INTEL_MSIC_GPIO0LV6CTLO		0x041
@@ -377,9 +388,39 @@
 /**
  * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
  * @gpio_base: base number for the GPIOs
+ * @ngpio_lv: number of low voltage GPIOs
+ * @ngpio_hv: number of high voltage GPIOs
+ * @gpio0_lv_ctlo: low voltage GPIO0 output control register
+ * @gpio0_lv_ctli: low voltage GPIO0 input control register
+ * @gpio0_hv_ctlo: high voltage GPIO0 output control register
+ * @gpio0_hv_ctli: high voltage GPIO0 input control register
+ * @can_sleep: flag for gpio chip
  */
 struct intel_msic_gpio_pdata {
 	unsigned	gpio_base;
+	int		ngpio_lv;
+	int		ngpio_hv;
+	u16		gpio0_lv_ctlo;
+	u16		gpio0_lv_ctli;
+	u16		gpio0_hv_ctlo;
+	u16		gpio0_hv_ctli;
+	int		can_sleep;
+};
+
+#define DISABLE_VCRIT	0x01
+#define DISABLE_VWARNB	0x02
+#define DISABLE_VWARNA	0x04
+/**
+ * struct intel_msic_vdd_pdata - platform data for the MSIC VDD driver
+ * @msi: MSI number used for VDD interrupts
+ *
+ * The MSIC CTP driver converts @msi into an IRQ number and passes it to
+ * the VDD driver as %IORESOURCE_IRQ.
+ */
+struct intel_msic_vdd_pdata {
+	unsigned	msi;
+	/* 1 = VCRIT, 2 = WARNB, 4 = WARNA */
+	u8 disable_unused_comparator;
 };
 
 /**
@@ -429,6 +470,7 @@
 	int				irq[INTEL_MSIC_BLOCK_LAST];
 	struct intel_msic_gpio_pdata	*gpio;
 	struct intel_msic_ocd_pdata	*ocd;
+	struct intel_mid_gpadc_platform_data	*gpadc;
 };
 
 struct intel_msic;
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 68e7765..9e96ca7 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -130,6 +130,12 @@
 	int rate;
 };
 
+struct wm8958_custom_config {
+	int format;
+	int rate;
+	int channels;
+};
+
 struct wm8994_pdata {
 	int gpio_base;
 
@@ -182,6 +188,16 @@
 	 */
 	int micdet_delay;
 
+	/* Delay between microphone detect completing and reporting on
+	 * insert (specified in ms)
+	 */
+	int mic_id_delay;
+
+	/* Keep MICBIAS2 high for micb_en_delay, during jack insertion
+	 * removal
+	 */
+	int micb_en_delay;
+
 	/* IRQ for microphone detection if brought out directly as a
 	 * signal.
 	 */
@@ -223,6 +239,9 @@
 	 * lines is mastered.
 	 */
 	int max_channels_clocked[WM8994_NUM_AIF];
+
+	/* custom config for overriding the hw params */
+	struct wm8958_custom_config *custom_cfg;
 };
 
 #endif
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index 0535489..db8cef3 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -2668,6 +2668,10 @@
 /*
  * R772 (0x304) - AIF1ADC LRCLK
  */
+#define WM8958_AIF1_LRCLK_INV                   0x1000  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_MASK              0x1000  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_SHIFT                 12  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_WIDTH                  1  /* AIF1_LRCLK_INV */
 #define WM8994_AIF1ADC_LRCLK_DIR                0x0800  /* AIF1ADC_LRCLK_DIR */
 #define WM8994_AIF1ADC_LRCLK_DIR_MASK           0x0800  /* AIF1ADC_LRCLK_DIR */
 #define WM8994_AIF1ADC_LRCLK_DIR_SHIFT              11  /* AIF1ADC_LRCLK_DIR */
@@ -2679,6 +2683,10 @@
 /*
  * R773 (0x305) - AIF1DAC LRCLK
  */
+#define WM8958_AIF1_LRCLK_INV                   0x1000  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_MASK              0x1000  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_SHIFT                 12  /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_WIDTH                  1  /* AIF1_LRCLK_INV */
 #define WM8994_AIF1DAC_LRCLK_DIR                0x0800  /* AIF1DAC_LRCLK_DIR */
 #define WM8994_AIF1DAC_LRCLK_DIR_MASK           0x0800  /* AIF1DAC_LRCLK_DIR */
 #define WM8994_AIF1DAC_LRCLK_DIR_SHIFT              11  /* AIF1DAC_LRCLK_DIR */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 55590f4..da81155 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -40,6 +40,17 @@
 #define sysctl_legacy_va_layout 0
 #endif
 
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
+extern const int mmap_rnd_bits_min;
+extern const int mmap_rnd_bits_max;
+extern int mmap_rnd_bits __read_mostly;
+#endif
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+extern const int mmap_rnd_compat_bits_min;
+extern const int mmap_rnd_compat_bits_max;
+extern int mmap_rnd_compat_bits __read_mostly;
+#endif
+
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -325,6 +336,8 @@
 }
 #endif
 
+extern void kvfree(const void *addr);
+
 static inline void compound_lock(struct page *page)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -923,6 +936,7 @@
 extern void show_free_areas(unsigned int flags);
 extern bool skip_free_areas_node(unsigned int flags, int nid);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
 int shmem_zero_setup(struct vm_area_struct *);
 
 extern int can_do_mlock(void);
@@ -1057,7 +1071,8 @@
 struct page *get_dump_page(unsigned long addr);
 
 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
-extern void do_invalidatepage(struct page *page, unsigned long offset);
+extern void do_invalidatepage(struct page *page, unsigned int offset,
+			      unsigned int length);
 
 int __set_page_dirty_nobuffers(struct page *page);
 int __set_page_dirty_no_writeback(struct page *page);
@@ -1179,6 +1194,11 @@
 		mm->hiwater_vm = mm->total_vm;
 }
 
+static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
+{
+	mm->hiwater_rss = get_mm_rss(mm);
+}
+
 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
 					 struct mm_struct *mm)
 {
@@ -1501,7 +1521,7 @@
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-	struct mempolicy *);
+	struct mempolicy *, const char __user *);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int split_vma(struct mm_struct *,
 	struct vm_area_struct *, unsigned long addr, int new_below);
@@ -1839,6 +1859,12 @@
 static inline bool page_is_guard(struct page *page) { return false; }
 #endif /* CONFIG_DEBUG_PAGEALLOC */
 
+/* 3.18 backport */
+static inline void truncate_inode_pages_final(struct address_space *mapping)
+{
+	truncate_inode_pages(mapping, 0);
+}
+
 #if MAX_NUMNODES > 1
 void __init setup_nr_node_ids(void);
 #else
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 10a9a17..4c02455 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -213,6 +213,7 @@
 	unsigned long	vm_top;		/* region allocated to here */
 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
 	struct file	*vm_file;	/* the backing file or NULL */
+	struct file	*vm_prfile;	/* the virtual backing file or NULL */
 
 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
@@ -255,6 +256,10 @@
 	 * For areas with an address space and backing store,
 	 * linkage into the address_space->i_mmap interval tree, or
 	 * linkage of vma in the address_space->i_mmap_nonlinear list.
+	 *
+	 * For private anonymous mappings, a pointer to a null terminated string
+	 * in the user process containing the name given to the vma, or NULL
+	 * if unnamed.
 	 */
 	union {
 		struct {
@@ -262,6 +267,7 @@
 			unsigned long rb_subtree_last;
 		} linear;
 		struct list_head nonlinear;
+		const char __user *anon_name;
 	} shared;
 
 	/*
@@ -281,6 +287,7 @@
 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
 					   units, *not* PAGE_CACHE_SIZE */
 	struct file * vm_file;		/* File we map to (can be NULL). */
+	struct file *vm_prfile;		/* shadow of vm_file */
 	void * vm_private_data;		/* was vm_pte (shared mem) */
 
 #ifndef CONFIG_MMU
@@ -465,6 +472,14 @@
 	return mm->cpu_vm_mask_var;
 }
 
+/* Return the name for an anonymous mapping or NULL for a file-backed mapping */
+static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
+{
+	if (vma->vm_file)
+		return NULL;
+
+	return vma->shared.anon_name;
+}
 #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 /*
  * Memory barriers to keep this state in sync are graciously provided by
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index f31725b..4a733ae 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -67,6 +67,7 @@
 #define MMC_HIGH_52_MAX_DTR	52000000
 #define MMC_HIGH_DDR_MAX_DTR	52000000
 #define MMC_HS200_MAX_DTR	200000000
+#define MMC_HS400_MAX_DTR	200000000
 	unsigned int		sectors;
 	unsigned int		card_type;
 	unsigned int		hc_erase_size;		/* In sectors */
@@ -83,6 +84,7 @@
 	unsigned int		hpi_cmd;		/* cmd used as HPI */
 	bool			bkops;		/* background support bit */
 	bool			bkops_en;	/* background enable bit */
+	unsigned int		rpmb_size;		/* Units: half sector */
 	unsigned int            data_sector_size;       /* 512 bytes or 4KB */
 	unsigned int            data_tag_unit_size;     /* DATA TAG UNIT size */
 	unsigned int		boot_ro_lock;		/* ro lock support */
@@ -248,6 +250,7 @@
 #define MMC_CARD_SDXC		(1<<6)		/* card is SDXC */
 #define MMC_CARD_REMOVED	(1<<7)		/* card has been removed */
 #define MMC_STATE_HIGHSPEED_200	(1<<8)		/* card is in HS200 mode */
+#define MMC_STATE_HIGHSPEED_400	(1<<9)		/* card is in HS400 mode */
 #define MMC_STATE_DOING_BKOPS	(1<<10)		/* card is doing BKOPS */
 	unsigned int		quirks; 	/* card quirks */
 #define MMC_QUIRK_LENIENT_FN0	(1<<0)		/* allow SDIO FN0 writes outside of the VS CCCR range */
@@ -264,6 +267,7 @@
 #define MMC_QUIRK_LONG_READ_TIME (1<<9)		/* Data read time > CSD says */
 #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10)	/* Skip secure for erase/trim */
 						/* byte mode */
+#define MMC_QUIRK_NON_STD_CIS (1<<11)
 
 	unsigned int		erase_size;	/* erase size in sectors */
  	unsigned int		erase_shift;	/* if erase unit is power 2 */
@@ -294,6 +298,8 @@
 	struct dentry		*debugfs_root;
 	struct mmc_part	part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
 	unsigned int    nr_parts;
+
+	unsigned int		rpmb_max_req;
 };
 
 /*
@@ -409,6 +415,7 @@
 #define mmc_card_readonly(c)	((c)->state & MMC_STATE_READONLY)
 #define mmc_card_highspeed(c)	((c)->state & MMC_STATE_HIGHSPEED)
 #define mmc_card_hs200(c)	((c)->state & MMC_STATE_HIGHSPEED_200)
+#define mmc_card_hs400(c)	((c)->state & MMC_STATE_HIGHSPEED_400)
 #define mmc_card_blockaddr(c)	((c)->state & MMC_STATE_BLOCKADDR)
 #define mmc_card_ddr_mode(c)	((c)->state & MMC_STATE_HIGHSPEED_DDR)
 #define mmc_card_uhs(c)		((c)->state & MMC_STATE_ULTRAHIGHSPEED)
@@ -421,6 +428,7 @@
 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
 #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
 #define mmc_card_set_hs200(c)	((c)->state |= MMC_STATE_HIGHSPEED_200)
+#define mmc_card_set_hs400(c)	((c)->state |= MMC_STATE_HIGHSPEED_400)
 #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
 #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
 #define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
@@ -512,6 +520,7 @@
 	void (*remove)(struct mmc_card *);
 	int (*suspend)(struct mmc_card *);
 	int (*resume)(struct mmc_card *);
+	void (*shutdown)(struct mmc_card *);
 };
 
 extern int mmc_register_driver(struct mmc_driver *);
@@ -520,4 +529,7 @@
 extern void mmc_fixup_device(struct mmc_card *card,
 			     const struct mmc_fixup *table);
 
+extern int mmc_rpmb_req_handle(struct device *emmc,
+		struct mmc_ioc_rpmb_req *req);
+
 #endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 39613b9..a4ab8e1 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -10,6 +10,8 @@
 
 #include <linux/interrupt.h>
 #include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/mmc/ioctl.h>
 
 struct request;
 struct mmc_data;
@@ -121,6 +123,7 @@
 	unsigned int		sg_len;		/* size of scatter list */
 	struct scatterlist	*sg;		/* I/O scatter list */
 	s32			host_cookie;	/* host private data */
+	dma_addr_t		dmabuf;		/* used in panic mode */
 };
 
 struct mmc_host;
@@ -135,6 +138,34 @@
 	struct mmc_host		*host;
 };
 
+/*
+ * RPMB frame structure for MMC core stack
+ */
+struct mmc_core_rpmb_req {
+	struct mmc_ioc_rpmb_req *req;
+	__u8 *frame;
+	bool ready;
+};
+
+#define RPMB_PROGRAM_KEY       1       /* Program RPMB Authentication Key */
+#define RPMB_GET_WRITE_COUNTER 2       /* Read RPMB write counter */
+#define RPMB_WRITE_DATA		3	/* Write data to RPMB partition */
+#define RPMB_READ_DATA         4       /* Read data from RPMB partition */
+#define RPMB_RESULT_READ       5       /* Read result request */
+#define RPMB_REQ               1       /* RPMB request mark */
+#define RPMB_RESP              (1 << 1)/* RPMB response mark */
+#define RPMB_AVALIABLE_SECTORS 8       /* 4K page size */
+
+#define RPMB_TYPE_BEG          510
+#define RPMB_RES_BEG           508
+#define RPMB_BLKS_BEG          506
+#define RPMB_ADDR_BEG          504
+#define RPMB_WCOUNTER_BEG      500
+
+#define RPMB_NONCE_BEG         484
+#define RPMB_DATA_BEG          228
+#define RPMB_MAC_BEG           196
+
 struct mmc_card;
 struct mmc_async_req;
 
@@ -152,6 +183,10 @@
 extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool);
 extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
 extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
+extern int mmc_rpmb_partition_ops(struct mmc_core_rpmb_req *,
+		struct mmc_card *);
+extern int mmc_rpmb_pre_frame(struct mmc_core_rpmb_req *, struct mmc_card *);
+extern void mmc_rpmb_post_frame(struct mmc_core_rpmb_req *);
 
 #define MMC_ERASE_ARG		0x00000000
 #define MMC_SECURE_ERASE_ARG	0x80000000
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index e326ae2..308b56b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/device.h>
 #include <linux/fault-inject.h>
+#include <linux/wakelock.h>
 
 #include <linux/mmc/core.h>
 #include <linux/mmc/pm.h>
@@ -59,6 +60,7 @@
 #define MMC_TIMING_UHS_SDR104	6
 #define MMC_TIMING_UHS_DDR50	7
 #define MMC_TIMING_MMC_HS200	8
+#define MMC_TIMING_MMC_HS400	9
 
 #define MMC_SDR_MODE		0
 #define MMC_1_2V_DDR_MODE	1
@@ -80,6 +82,19 @@
 #define MMC_SET_DRIVER_TYPE_D	3
 };
 
+struct mmc_panic_host;
+
+struct mmc_host_panic_ops {
+	void    (*request)(struct mmc_panic_host *, struct mmc_request *);
+	void	(*prepare)(struct mmc_panic_host *);
+	int     (*setup)(struct mmc_panic_host *);
+	void    (*set_ios)(struct mmc_panic_host *);
+	void    (*dumpregs)(struct mmc_panic_host *);
+	int	(*power_on)(struct mmc_panic_host *);
+	int	(*hold_mutex)(struct mmc_panic_host *);
+	void	(*release_mutex)(struct mmc_panic_host *);
+};
+
 struct mmc_host_ops {
 	/*
 	 * 'enable' is called when the host is claimed and 'disable' is called
@@ -139,6 +154,9 @@
 	int	(*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
 	void	(*hw_reset)(struct mmc_host *host);
 	void	(*card_event)(struct mmc_host *host);
+	void	(*set_dev_power)(struct mmc_host *, bool);
+	/* Prevent host controller from Auto Clock Gating by busy reading */
+	void	(*busy_wait)(struct mmc_host *mmc, u32 delay);
 };
 
 struct mmc_card;
@@ -195,6 +213,27 @@
 	struct regulator *vqmmc;	/* Optional Vccq supply */
 };
 
+struct mmc_panic_host {
+	/*
+	 * DMA buffer for the log
+	 */
+	dma_addr_t      dmabuf;
+	void            *logbuf;
+	const struct mmc_host_panic_ops *panic_ops;
+	unsigned int            panic_ready;
+	unsigned int            totalsecs;
+	unsigned int            max_blk_size;
+	unsigned int            max_blk_count;
+	unsigned int            max_req_size;
+	unsigned int            blkaddr;
+	unsigned int            caps;
+	u32                     ocr;            /* the current OCR setting */
+	struct mmc_ios          ios;            /* current io bus settings */
+	struct mmc_card         *card;
+	struct mmc_host         *mmc;
+	void                    *priv;
+};
+
 struct mmc_host {
 	struct device		*parent;
 	struct device		class_dev;
@@ -264,7 +303,7 @@
 
 #define MMC_CAP2_BOOTPART_NOACC	(1 << 0)	/* Boot partition no access */
 #define MMC_CAP2_CACHE_CTRL	(1 << 1)	/* Allow cache control */
-#define MMC_CAP2_POWEROFF_NOTIFY (1 << 2)	/* Notify poweroff supported */
+#define MMC_CAP2_FULL_PWR_CYCLE	(1 << 2)	/* Can do full power cycle */
 #define MMC_CAP2_NO_MULTI_READ	(1 << 3)	/* Multiblock reads don't work */
 #define MMC_CAP2_NO_SLEEP_CMD	(1 << 4)	/* Don't allow sleep command */
 #define MMC_CAP2_HS200_1_8V_SDR	(1 << 5)        /* can support */
@@ -281,6 +320,17 @@
 #define MMC_CAP2_PACKED_CMD	(MMC_CAP2_PACKED_RD | \
 				 MMC_CAP2_PACKED_WR)
 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14)	/* Don't power up before scan */
+#define MMC_CAP2_INIT_CARD_SYNC	(1 << 15)	/* init card in sync mode */
+#define MMC_CAP2_POLL_R1B_BUSY	(1 << 16)	/* host poll R1B busy*/
+#define MMC_CAP2_RPMBPART_NOACC	(1 << 17)	/* RPMB partition no access */
+#define MMC_CAP2_LED_SUPPORT	(1 << 18)	/* led support */
+#define MMC_CAP2_PWCTRL_POWER	(1 << 19)	/* power control card power */
+#define MMC_CAP2_FIXED_NCRC	(1 << 20)	/* fixed NCRC */
+#define MMC_CAP2_HS200_WA	(1 << 21)	/* WA: 100MHz clock in HS200 */
+#define MMC_CAP2_HS400_1_8V_DDR	(1 << 22)	/* support HS400 */
+#define MMC_CAP2_HS400_1_2V_DDR	(1 << 23)	/* support HS400 */
+#define MMC_CAP2_HS400		(MMC_CAP2_HS400_1_8V_DDR | \
+				MMC_CAP2_HS400_1_2V_DDR)
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
@@ -329,12 +379,17 @@
 	int			claim_cnt;	/* "claim" nesting count */
 
 	struct delayed_work	detect;
+	struct wake_lock	detect_wake_lock;
 	int			detect_change;	/* card detect flag */
 	struct mmc_slot		slot;
 
 	const struct mmc_bus_ops *bus_ops;	/* current bus driver */
 	unsigned int		bus_refs;	/* reference counter */
 
+	unsigned int		bus_resume_flags;
+#define MMC_BUSRESUME_MANUAL_RESUME	(1 << 0)
+#define MMC_BUSRESUME_NEEDS_RESUME	(1 << 1)
+
 	unsigned int		sdio_irqs;
 	struct task_struct	*sdio_irq_thread;
 	bool			sdio_irq_pending;
@@ -362,15 +417,38 @@
 
 	unsigned int		slotno;	/* used for sdio acpi binding */
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	struct {
+		struct sdio_cis			*cis;
+		struct sdio_cccr		*cccr;
+		struct sdio_embedded_func	*funcs;
+		int				num_funcs;
+	} embedded_sdio_data;
+#endif
+	struct mmc_panic_host *phost;
 	unsigned long		private[0] ____cacheline_aligned;
 };
 
+#define SECTOR_SIZE    512
+int mmc_emergency_init(void);
+int mmc_emergency_write(char *, unsigned int);
+void mmc_alloc_panic_host(struct mmc_host *, const struct mmc_host_panic_ops *);
+void mmc_emergency_setup(struct mmc_host *host);
+
 struct mmc_host *mmc_alloc_host(int extra, struct device *);
 int mmc_add_host(struct mmc_host *);
 void mmc_remove_host(struct mmc_host *);
 void mmc_free_host(struct mmc_host *);
 void mmc_of_parse(struct mmc_host *host);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+				       struct sdio_cis *cis,
+				       struct sdio_cccr *cccr,
+				       struct sdio_embedded_func *funcs,
+				       int num_funcs);
+#endif
+
 static inline void *mmc_priv(struct mmc_host *host)
 {
 	return (void *)host->private;
@@ -381,6 +459,18 @@
 #define mmc_dev(x)	((x)->parent)
 #define mmc_classdev(x)	(&(x)->class_dev)
 #define mmc_hostname(x)	(dev_name(&(x)->class_dev))
+#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_NEEDS_RESUME)
+#define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME)
+
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+	if (manual)
+		host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+	else
+		host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
+}
+
+extern int mmc_resume_bus(struct mmc_host *host);
 
 int mmc_suspend_host(struct mmc_host *);
 int mmc_resume_host(struct mmc_host *);
@@ -391,8 +481,6 @@
 void mmc_detect_change(struct mmc_host *, unsigned long delay);
 void mmc_request_done(struct mmc_host *, struct mmc_request *);
 
-int mmc_cache_ctrl(struct mmc_host *, u8);
-
 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
 {
 	host->ops->enable_sdio_irq(host, 0);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 50bcde3..3633133 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -325,6 +325,7 @@
 #define EXT_CSD_POWER_OFF_LONG_TIME	247	/* RO */
 #define EXT_CSD_GENERIC_CMD6_TIME	248	/* RO */
 #define EXT_CSD_CACHE_SIZE		249	/* RO, 4 bytes */
+#define EXT_CSD_PWR_CL_200_DDR_195	253	/* RO, support HS400 */
 #define EXT_CSD_TAG_UNIT_SIZE		498	/* RO */
 #define EXT_CSD_DATA_TAG_SUPPORT	499	/* RO */
 #define EXT_CSD_MAX_PACKED_WRITES	500	/* RO */
@@ -357,6 +358,7 @@
 #define EXT_CSD_CARD_TYPE_26	(1<<0)	/* Card can run at 26MHz */
 #define EXT_CSD_CARD_TYPE_52	(1<<1)	/* Card can run at 52MHz */
 #define EXT_CSD_CARD_TYPE_MASK	0x3F	/* Mask out reserved bits */
+#define EXT_CSD_CARD_TYPE_MASK_FULL	0xFF	/* Support HS400 */
 #define EXT_CSD_CARD_TYPE_DDR_1_8V  (1<<2)   /* Card can run at 52MHz */
 					     /* DDR mode @1.8V or 3V I/O */
 #define EXT_CSD_CARD_TYPE_DDR_1_2V  (1<<3)   /* Card can run at 52MHz */
@@ -366,6 +368,9 @@
 #define EXT_CSD_CARD_TYPE_SDR_1_8V	(1<<4)	/* Card can run at 200MHz */
 #define EXT_CSD_CARD_TYPE_SDR_1_2V	(1<<5)	/* Card can run at 200MHz */
 						/* SDR mode @1.2V I/O */
+#define EXT_CSD_CARD_TYPE_HS400_1_8V	(1<<6)	/* Card can run at 200MHz */
+#define EXT_CSD_CARD_TYPE_HS400_1_2V	(1<<7)	/* Card can run at 200MHz */
+						/* DDR mode @1.8/1.2v I/O */
 
 #define EXT_CSD_BUS_WIDTH_1	0	/* Card is in 1 bit mode */
 #define EXT_CSD_BUS_WIDTH_4	1	/* Card is in 4 bit mode */
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
index 4a13920..6e2d6a1 100644
--- a/include/linux/mmc/pm.h
+++ b/include/linux/mmc/pm.h
@@ -26,5 +26,6 @@
 
 #define MMC_PM_KEEP_POWER	(1 << 0)	/* preserve card power during suspend */
 #define MMC_PM_WAKE_SDIO_IRQ	(1 << 1)	/* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY	(1 << 2)	/* ignore mmc pm notify */
 
 #endif /* LINUX_MMC_PM_H */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
index 8959604..a4c8606 100644
--- a/include/linux/mmc/sdhci-pci-data.h
+++ b/include/linux/mmc/sdhci-pci-data.h
@@ -8,10 +8,26 @@
 	int		slotno;
 	int		rst_n_gpio; /* Set to -EINVAL if unused */
 	int		cd_gpio;    /* Set to -EINVAL if unused */
+	int		quirks;
+	int		quirks2;
+	int		platform_quirks; /* Platform related quirks */
 	int		(*setup)(struct sdhci_pci_data *data);
 	void		(*cleanup)(struct sdhci_pci_data *data);
+	int		(*power_up)(void *data);
+	void		(*register_embedded_control)(void *dev_id,
+			   void (*virtual_cd)(void *dev_id, int card_present));
+	int		(*flis_check)(void *data, unsigned int clk);
 };
 
+/* Some Pre-Silicon platform not support all SDHCI HCs of the SoC */
+#define PLFM_QUIRK_NO_HOST_CTRL_HW	(1<<0)
+/* Some Pre-Silicon platform do not support eMMC boot partition access */
+#define PLFM_QUIRK_NO_EMMC_BOOT_PART	(1<<1)
+/* Some Pre-Silicon platform do not support eMMC or SD High Speed */
+#define PLFM_QUIRK_NO_HIGH_SPEED	(1<<2)
+/* For the platform which don't have the SD card slot */
+#define PLFM_QUIRK_NO_SDCARD_SLOT	(1<<3)
+
 extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
 				int slotno);
 
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index b838ffc..d4c124b 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -96,9 +96,57 @@
 #define SDHCI_QUIRK2_NO_1_8_V				(1<<2)
 #define SDHCI_QUIRK2_PRESET_VALUE_BROKEN		(1<<3)
 
+/* Intel private quirk2 starts on 15 */
+
+/* V2.0 host controller support DDR50 */
+#define SDHCI_QUIRK2_V2_0_SUPPORT_DDR50			(1<<15)
+/* Controller has bug when enabling Auto CMD23 */
+#define SDHCI_QUIRK2_BROKEN_AUTO_CMD23			(1<<16)
+/* HC Reg High Speed must be set later than HC2 Reg 1.8v Signaling Enable */
+#define SDHCI_QUIRK2_HIGH_SPEED_SET_LATE		(1<<17)
+/* BRCM voltage support: advertise 2.0v support and force using 1.8v instead */
+#define SDHCI_QUIRK2_ADVERTISE_2V0_FORCE_1V8		(1<<18)
+/* to allow mmc_detect to detach the bus */
+#define SDHCI_QUIRK2_DISABLE_MMC_CAP_NONREMOVABLE	(1<<19)
+/* avoid detect/rescan/poweoff operations on suspend/resume. */
+#define SDHCI_QUIRK2_ENABLE_MMC_PM_IGNORE_PM_NOTIFY	(1<<20)
+/* Disable eMMC/SD card High speed feature. */
+#define SDHCI_QUIRK2_DISABLE_HIGH_SPEED			(1<<21)
+#define SDHCI_QUIRK2_CAN_VDD_300			(1<<22)
+#define SDHCI_QUIRK2_CAN_VDD_330			(1<<23)
+#define SDHCI_QUIRK2_2MS_DELAY				(1<<24)
+#define SDHCI_QUIRK2_WAIT_FOR_IDLE			(1<<25)
+/* BAD sd cd in HOST IC. This will cause system hang when removing SD */
+#define SDHCI_QUIRK2_BAD_SD_CD				(1<<26)
+#define SDHCI_QUIRK2_POWER_PIN_GPIO_MODE		(1<<27)
+#define SDHCI_QUIRK2_ADVERTISE_3V0_FORCE_1V8   (1<<28)
+#define SDHCI_QUIRK2_NON_STD_CIS   (1<<29)
+#define SDHCI_QUIRK2_TUNING_POLL			(1<<30)
+
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
 
+	/*
+	 * XXX: SCU/X86 mutex variables base address in shared SRAM
+	 * NOTE: Max size of this struct is 16 bytes
+	 * without shared SRAM re-organization.
+	 */
+	void __iomem *sram_addr;        /* Shared SRAM address */
+
+	void __iomem *rte_addr;	/* IOAPIC RTE register address */
+
+#define DEKKER_EMMC_OWNER_OFFSET        0
+#define DEKKER_IA_REQ_OFFSET            0x04
+#define DEKKER_SCU_REQ_OFFSET           0x08
+/* 0xc offset: state of the emmc chip to SCU. */
+#define DEKKER_EMMC_STATE               0x0c
+#define DEKKER_OWNER_IA                 0
+#define DEKKER_OWNER_SCU                1
+#define DEKKER_EMMC_CHIP_ACTIVE         0
+#define DEKKER_EMMC_CHIP_SUSPENDED      1
+
+	unsigned int	usage_cnt;	/* eMMC mutex usage count */
+
 	const struct sdhci_ops *ops;	/* Low level hw interface */
 
 	struct regulator *vmmc;		/* Power regulator (vmmc) */
@@ -114,6 +162,7 @@
 #endif
 
 	spinlock_t lock;	/* Mutex */
+	spinlock_t dekker_lock; /* eMMC Dekker Mutex lock */
 
 	int flags;		/* Host attributes */
 #define SDHCI_USE_SDMA		(1<<0)	/* Host is SDMA capable */
@@ -128,6 +177,7 @@
 #define SDHCI_SDIO_IRQ_ENABLED	(1<<9)	/* SDIO irq enabled */
 #define SDHCI_HS200_NEEDS_TUNING (1<<10)	/* HS200 needs tuning */
 #define SDHCI_USING_RETUNING_TIMER (1<<11)	/* Host is using a retuning timer for the card */
+#define SDHCI_POWER_CTRL_DEV	(1<<12) /* ctrl dev power */
 
 	unsigned int version;	/* SDHCI spec. version */
 
@@ -139,11 +189,13 @@
 	u8 pwr;			/* Current voltage */
 
 	bool runtime_suspended;	/* Host is runtime suspended */
+	bool suspended;		/* Host is suspended */
 
 	struct mmc_request *mrq;	/* Current request */
 	struct mmc_command *cmd;	/* Current command */
 	struct mmc_data *data;	/* Current data request */
 	unsigned int data_early:1;	/* Data finished before cmd */
+	unsigned int r1b_busy_end:1;	/* R1B busy end */
 
 	struct sg_mapping_iter sg_miter;	/* SG state for PIO */
 	unsigned int blocks;	/* remaining PIO blocks */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
old mode 100644
new mode 100755
index 50f0bc9..dc680c4
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -23,6 +23,14 @@
 typedef void (sdio_irq_handler_t)(struct sdio_func *);
 
 /*
+ * Structure used to hold embedded SDIO device data from platform layer
+ */
+struct sdio_embedded_func {
+	uint8_t f_class;
+	uint32_t f_maxblksize;
+};
+
+/*
  * SDIO function CIS tuple (unknown to the core)
  */
 struct sdio_func_tuple {
@@ -130,6 +138,8 @@
 extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
 
 extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret,
+	unsigned in);
 extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
 extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
 
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 9f03fee..b3933db 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -23,6 +23,8 @@
 /*
  * Vendors and devices.  Sort key: vendor first, device next.
  */
+#define IWL_SDIO_DEVICE_ID_WKP1	0x3160
+#define IWL_SDIO_DEVICE_ID_WKP2	0x7260
 #define SDIO_VENDOR_ID_INTEL			0x0089
 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX	0x1402
 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI	0x1403
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index d313648..d292528 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -569,6 +569,15 @@
 #define X86_MODEL_ANY  0
 #define X86_FEATURE_ANY 0	/* Same as FPU, you can't test for that */
 
+/*
+ * Generic table type for matching CPU features.
+ * @feature:	the bit number of the feature (0 - 65535)
+ */
+
+struct cpu_feature {
+	__u16	feature;
+};
+
 #define IPACK_ANY_FORMAT 0xff
 #define IPACK_ANY_ID (~0)
 struct ipack_device_id {
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
new file mode 100644
index 0000000..ca60fbd
--- /dev/null
+++ b/include/linux/netfilter/xt_qtaguid.h
@@ -0,0 +1,13 @@
+#ifndef _XT_QTAGUID_MATCH_H
+#define _XT_QTAGUID_MATCH_H
+
+/* For now we just replace the xt_owner.
+ * FIXME: make iptables aware of qtaguid. */
+#include <linux/netfilter/xt_owner.h>
+
+#define XT_QTAGUID_UID    XT_OWNER_UID
+#define XT_QTAGUID_GID    XT_OWNER_GID
+#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
+#define xt_qtaguid_match_info xt_owner_match_info
+
+#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644
index 0000000..eadc690
--- /dev/null
+++ b/include/linux/netfilter/xt_quota2.h
@@ -0,0 +1,25 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+
+enum xt_quota_flags {
+	XT_QUOTA_INVERT    = 1 << 0,
+	XT_QUOTA_GROW      = 1 << 1,
+	XT_QUOTA_PACKET    = 1 << 2,
+	XT_QUOTA_NO_CHANGE = 1 << 3,
+	XT_QUOTA_MASK      = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+	char name[15];
+	u_int8_t flags;
+
+	/* Comparison-invariant */
+	aligned_u64 quota;
+
+	/* Used internally by the kernel */
+	struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 3b7fa2a..64b267e 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -54,6 +54,7 @@
 
 	u32			cl_minorversion;/* NFSv4 minorversion */
 	struct rpc_cred		*cl_machine_cred;
+	int			nfs_prog;
 
 #if IS_ENABLED(CONFIG_NFS_V4)
 	u64			cl_clientid;	/* constant */
@@ -75,7 +76,9 @@
 	 * This is used to generate the mv0 callback address.
 	 */
 	char			cl_ipaddr[48];
+
 	u32			cl_cb_ident;	/* v4.0 callback identifier */
+
 	const struct nfs4_minor_version_ops *cl_mvops;
 
 	/* The sequence id to use for the next CREATE_SESSION */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index a9e5134..ed96c55 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -11,7 +11,7 @@
  * reasonable for NFS over UDP.
  */
 #define NFS_MAX_FILE_IO_SIZE	(1048576U)
-#define NFS_DEF_FILE_IO_SIZE	(4096U)
+#define NFS_DEF_FILE_IO_SIZE	CONFIG_NFS_DEF_FILE_IO_SIZE
 #define NFS_MIN_FILE_IO_SIZE	(1024U)
 
 struct nfs4_string {
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index db50840..c8f8aa0 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -14,8 +14,11 @@
  * may be used to reset the timeout - for code which intentionally
  * disables interrupts for a long time. This call is stateless.
  */
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
 #include <asm/nmi.h>
+#endif
+
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
 extern void touch_nmi_watchdog(void);
 #else
 static inline void touch_nmi_watchdog(void)
diff --git a/include/linux/of.h b/include/linux/of.h
index 5e9d352..9528241 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -354,6 +354,11 @@
 	return NULL;
 }
 
+static inline struct device_node *of_find_node_by_path(const char *path)
+{
+	return NULL;
+}
+
 static inline struct device_node *of_get_parent(const struct device_node *node)
 {
 	return NULL;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index ed136ad..7e190a9 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -91,6 +91,27 @@
 extern int of_flat_dt_match(unsigned long node, const char *const *matches);
 extern unsigned long of_get_flat_dt_root(void);
 
+/*
+ * early_init_dt_scan_chosen - scan the device tree for ramdisk and bootargs
+ *
+ * The boot arguments will be placed into the memory pointed to by @data.
+ * That memory should be COMMAND_LINE_SIZE big and initialized to be a valid
+ * (possibly empty) string.  Logic for what will be in @data after this
+ * function finishes:
+ *
+ * - CONFIG_CMDLINE_FORCE=true
+ *     CONFIG_CMDLINE
+ * - CONFIG_CMDLINE_EXTEND=true, @data is non-empty string
+ *     @data + dt bootargs (even if dt bootargs are empty)
+ * - CONFIG_CMDLINE_EXTEND=true, @data is empty string
+ *     CONFIG_CMDLINE + dt bootargs (even if dt bootargs are empty)
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=non-empty:
+ *     dt bootargs
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is non-empty string
+ *     @data is left unchanged
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is empty string
+ *     CONFIG_CMDLINE (or "" if that's not defined)
+ */
 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data);
 extern void early_init_dt_check_for_initrd(unsigned long node);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6d53675..dd7d45b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -317,13 +317,23 @@
 extern void cancel_dirty_page(struct page *page, unsigned int account_size);
 
 int test_clear_page_writeback(struct page *page);
-int test_set_page_writeback(struct page *page);
+int __test_set_page_writeback(struct page *page, bool keep_write);
+
+#define test_set_page_writeback(page)			\
+	__test_set_page_writeback(page, false)
+#define test_set_page_writeback_keepwrite(page)	\
+	__test_set_page_writeback(page, true)
 
 static inline void set_page_writeback(struct page *page)
 {
 	test_set_page_writeback(page);
 }
 
+static inline void set_page_writeback_keepwrite(struct page *page)
+{
+	test_set_page_writeback_keepwrite(page);
+}
+
 #ifdef CONFIG_PAGEFLAGS_EXTENDED
 /*
  * System with lots of page flags available. This allows separate
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 9497527..99dc0a9 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -243,8 +243,21 @@
 
 typedef int filler_t(void *, struct page *);
 
-extern struct page * find_get_page(struct address_space *mapping,
-				pgoff_t index);
+pgoff_t page_cache_next_hole(struct address_space *mapping,
+                             pgoff_t index, unsigned long max_scan);
+
+extern struct page * find_get_page_flags(struct address_space *mapping,
+					 pgoff_t index, int fgp_flags);
+
+#define FGP_ACCESSED		0x00000001
+
+static inline struct page* find_get_page(struct address_space *mapping,
+					 pgoff_t index)
+{
+	return find_get_page_flags(mapping, index, 0);
+}
+
+
 extern struct page * find_lock_page(struct address_space *mapping,
 				pgoff_t index);
 extern struct page * find_or_create_page(struct address_space *mapping,
@@ -391,7 +404,7 @@
 	return 0;
 }
 
-/* 
+/*
  * Wait for a page to be unlocked.
  *
  * This must be called with the caller "holding" the page,
@@ -404,7 +417,7 @@
 		wait_on_page_bit(page, PG_locked);
 }
 
-/* 
+/*
  * Wait for a page to complete writeback
  */
 static inline void wait_on_page_writeback(struct page *page)
diff --git a/include/linux/panic_gbuffer.h b/include/linux/panic_gbuffer.h
new file mode 100644
index 0000000..c0580e5
--- /dev/null
+++ b/include/linux/panic_gbuffer.h
@@ -0,0 +1,37 @@
+/*
+ * panic_gbuffer.h
+ *
+ * Copyright (C) 2013 Intel Corp
+ *
+ * Expose a generic buffer header to be passed to the panic handler in
+ * order to dump buffer content in case of kernel panic.
+ *
+ * -----------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _LINUX_PANIC_GBUFFER_H
+#define _LINUX_PANIC_GBUFFER_H
+
+struct g_buffer_header {
+	unsigned char *base;
+	size_t size;
+	size_t woff;
+	size_t head;
+};
+
+void panic_set_gbuffer(struct g_buffer_header *gbuffer);
+
+#endif /* _LINUX_PANIC_GBUFFER_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 302182a..ad0b960 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2485,6 +2485,9 @@
 
 #define PCI_VENDOR_ID_ASMEDIA		0x1b21
 
+#define PCI_VENDOR_ID_CIRCUITCO		0x1cc8
+#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD	0x0001
+
 #define PCI_VENDOR_ID_TEKRAM		0x1de1
 #define PCI_DEVICE_ID_TEKRAM_DC290	0xdc29
 
@@ -2555,6 +2558,19 @@
 #define PCI_DEVICE_ID_INTEL_MFD_EMMC1	0x0824
 #define PCI_DEVICE_ID_INTEL_MRST_SD2	0x084F
 #define PCI_DEVICE_ID_INTEL_I960	0x0960
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO0	0x08F9
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO1	0x08FA
+#define PCI_DEVICE_ID_INTEL_CLV_SDIO2	0x08FB
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC0	0x08E5
+#define PCI_DEVICE_ID_INTEL_CLV_EMMC1	0x08E6
+#define PCI_DEVICE_ID_INTEL_CLV_OTG	0xE006
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC	0x1190
+#define PCI_DEVICE_ID_INTEL_MRFL_DWC3_OTG	0x119E
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC	0x0f14
+#define PCI_DEVICE_ID_INTEL_BYT_SDIO	0x0f15
+#define PCI_DEVICE_ID_INTEL_BYT_SD	0x0f16
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC45	0x0f50
+#define PCI_DEVICE_ID_INTEL_BYT_OTG	0x0f37
 #define PCI_DEVICE_ID_INTEL_I960RM	0x0962
 #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB	0x0c60
 #define PCI_DEVICE_ID_INTEL_8257X_SOL	0x1062
@@ -2866,6 +2882,7 @@
 #define PCI_DEVICE_ID_INTEL_IXP4XX	0x8500
 #define PCI_DEVICE_ID_INTEL_IXP2800	0x9004
 #define PCI_DEVICE_ID_INTEL_S21152BB	0xb152
+#define PCI_DEVICE_ID_INTEL_SST_MRFLD   0x119A
 
 #define PCI_VENDOR_ID_SCALEMP		0x8686
 #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL	0x1010
diff --git a/include/linux/platform_data/ds2482.h b/include/linux/platform_data/ds2482.h
new file mode 100644
index 0000000..5a6879e2a
--- /dev/null
+++ b/include/linux/platform_data/ds2482.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PLATFORM_DATA_DS2482__
+#define __PLATFORM_DATA_DS2482__
+
+struct ds2482_platform_data {
+	int		slpz_gpio;
+};
+
+#endif /* __PLATFORM_DATA_DS2482__ */
diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/intel-mid_wdt.h
new file mode 100644
index 0000000..b982534
--- /dev/null
+++ b/include/linux/platform_data/intel-mid_wdt.h
@@ -0,0 +1,22 @@
+/*
+ *      intel-mid_wdt: generic Intel MID SCU watchdog driver
+ *
+ *      Copyright (C) 2014 Intel Corporation. All rights reserved.
+ *      Contact: David Cohen <david.a.cohen@linux.intel.com>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of version 2 of the GNU General
+ *      Public License as published by the Free Software Foundation.
+ */
+
+#ifndef __INTEL_MID_WDT_H__
+#define __INTEL_MID_WDT_H__
+
+#include <linux/platform_device.h>
+
+struct intel_mid_wdt_pdata {
+	int irq;
+	int (*probe)(struct platform_device *pdev);
+};
+
+#endif /*__INTEL_MID_WDT_H__*/
diff --git a/include/linux/platform_data/intel_mid_remoteproc.h b/include/linux/platform_data/intel_mid_remoteproc.h
new file mode 100644
index 0000000..a16dffb
--- /dev/null
+++ b/include/linux/platform_data/intel_mid_remoteproc.h
@@ -0,0 +1,119 @@
+/*
+ * INTEL MID Remote Processor Head File
+ *
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ASM_INTEL_MID_REMOTEPROC_H
+#define _ASM_INTEL_MID_REMOTEPROC_H
+
+#define RP_IPC_COMMAND		0xA0
+#define RP_IPC_SIMPLE_COMMAND	0xA1
+#define RP_IPC_RAW_COMMAND	0xA2
+
+#define	RP_PMIC_ACCESS		0xFF
+#define	RP_DFU_REQUEST		0xFE
+#define	RP_SET_WATCHDOG		0xF8
+#define	RP_FLIS_ACCESS		0xF5
+#define	RP_GET_FW_REVISION	0xF4
+#define	RP_COLD_BOOT		0xF3
+#define	RP_COLD_RESET		0xF1
+#define	RP_COLD_OFF		0x80
+#define	RP_MIP_ACCESS		0xEC
+#define RP_GET_HOBADDR		0xE5
+#define RP_OSC_CLK_CTRL		0xE6
+#define RP_S0IX_COUNTER		0xE8
+#define RP_WRITE_OSNIB		0xE4
+#define RP_CLEAR_FABERROR	0xE3
+#define RP_FW_UPDATE		0xFE
+#define RP_VRTC			0xFA
+#define RP_PMDB			0xE0
+#define RP_WRITE_OEMNIB		0xDF	/* Command is used to write OEMNIB */
+					/* data. Used with extended OSHOB  */
+					/* OSNIB only.                     */
+/*
+ * Assigning some temp ids for following devices
+ * TODO: Need to change it to some meaningful
+ *       values.
+ */
+#define RP_PMIC_GPIO		0X02
+#define RP_PMIC_AUDIO		0x03
+#define RP_MSIC_GPIO		0x05
+#define RP_MSIC_AUDIO		0x06
+#define RP_MSIC_OCD		0x07
+#define RP_MSIC_BATTERY		0XEF
+#define RP_MSIC_THERMAL		0x09
+#define RP_MSIC_POWER_BTN	0x10
+#define RP_IPC			0X11
+#define RP_IPC_UTIL		0X12
+#define RP_FW_ACCESS		0X13
+#define RP_UMIP_ACCESS		0x14
+#define RP_OSIP_ACCESS		0x15
+#define RP_MSIC_ADC		0x16
+#define RP_BQ24192		0x17
+#define RP_MSIC_CLV_AUDIO	0x18
+#define RP_PMIC_CCSM		0x19
+#define RP_PMIC_I2C		0x20
+#define RP_MSIC_MRFLD_AUDIO	0x21
+#define RP_MSIC_PWM		0x22
+#define RP_MSIC_KPD_LED		0x23
+#define RP_BCOVE_ADC		0x24
+#define RP_BCOVE_THERMAL	0x25
+#define RP_MRFL_OCD		0x26
+#define RP_FW_LOGGING		0x27
+#define RP_PMIC_CHARGER		0x28
+
+enum rproc_type {
+	RPROC_SCU = 0,
+	RPROC_PSH,
+	RPROC_NUM,
+};
+
+struct rproc_ops;
+struct platform_device;
+struct rpmsg_ns_msg;
+
+struct rpmsg_ns_info {
+	enum rproc_type type;
+	char name[RPMSG_NAME_SIZE];
+	u32 addr;
+	u32 flags;
+	struct list_head node;
+};
+
+struct rpmsg_ns_list {
+	struct list_head list;
+	struct mutex lock;
+};
+
+extern struct rpmsg_ns_info *rpmsg_ns_alloc(const char *name,
+						int id, u32 addr);
+extern void rpmsg_ns_add_to_list(struct rpmsg_ns_info *info,
+					struct rpmsg_ns_list *nslist);
+
+/*
+ * struct intel_mid_rproc_pdata - intel mid remoteproc's platform data
+ * @name: the remoteproc's name
+ * @firmware: name of firmware file to load
+ * @ops: start/stop rproc handlers
+ * @device_enable: handler for enabling a device
+ * @device_shutdown: handler for shutting down a device
+ */
+struct intel_mid_rproc_pdata {
+	const char *name;
+	const char *firmware;
+	const struct rproc_ops *ops;
+	int (*device_enable) (struct platform_device *pdev);
+	int (*device_shutdown) (struct platform_device *pdev);
+	struct rpmsg_ns_list *nslist;
+};
+
+#endif /* _ASM_INTEL_MID_REMOTEPROC_H */
diff --git a/include/linux/platform_data/ti-ads7955.h b/include/linux/platform_data/ti-ads7955.h
new file mode 100644
index 0000000..da1451e
--- /dev/null
+++ b/include/linux/platform_data/ti-ads7955.h
@@ -0,0 +1,21 @@
+/*
+ * ADS7955 SPI ADC driver
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: Dave Hunt <dave.hunt@emutex.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_TI_ADS7955_H__
+#define __LINUX_PLATFORM_DATA_TI_ADS7955_H__
+
+/**
+ * struct ads7955_platform_data - Platform data for the ads7955 ADC driver
+ * @ext_ref: Whether to use an external reference voltage.
+ **/
+struct ads7955_platform_data {
+	bool ext_ref;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_TI_ADS7955_H__ */
diff --git a/include/linux/power/battery_id.h b/include/linux/power/battery_id.h
new file mode 100644
index 0000000..417160d
--- /dev/null
+++ b/include/linux/power/battery_id.h
@@ -0,0 +1,78 @@
+#ifndef __BATTERY_ID_H__
+
+#define __BATTERY_ID_H__
+
+enum {
+	POWER_SUPPLY_BATTERY_REMOVED = 0,
+	POWER_SUPPLY_BATTERY_INSERTED,
+};
+
+enum batt_chrg_prof_type {
+	PSE_MOD_CHRG_PROF = 0,
+};
+
+/* charging profile structure definition */
+struct ps_batt_chg_prof {
+	enum batt_chrg_prof_type chrg_prof_type;
+	void *batt_prof;
+};
+
+/* PSE Modified Algo Structure */
+/* Parameters defining the charging range */
+struct ps_temp_chg_table {
+	/* upper temperature limit for each zone */
+	short int temp_up_lim;
+	/* charge current and voltage */
+	short int full_chrg_vol;
+	short int full_chrg_cur;
+	/* maintenance thresholds */
+	/* maintenance lower threshold. Once battery hits full, charging
+	*  charging will be resumed when battery voltage <= this voltage
+	*/
+	short int maint_chrg_vol_ll;
+	/* Charge current and voltage in maintenance mode */
+	short int maint_chrg_vol_ul;
+	short int maint_chrg_cur;
+} __packed;
+
+
+#define BATTID_STR_LEN		8
+#define BATT_TEMP_NR_RNG	6
+/* Charging Profile */
+struct ps_pse_mod_prof {
+	/* battery id */
+	char batt_id[BATTID_STR_LEN];
+	/* type of battery */
+	u16 battery_type;
+	u16 capacity;
+	u16 voltage_max;
+	/* charge termination current */
+	u16 chrg_term_ma;
+	/* Low battery level voltage */
+	u16 low_batt_mV;
+	/* upper and lower temperature limits on discharging */
+	u8 disch_tmp_ul;
+	u8 disch_tmp_ll;
+	/* number of temperature monitoring ranges */
+	u16 temp_mon_ranges;
+	struct ps_temp_chg_table temp_mon_range[BATT_TEMP_NR_RNG];
+	/* Lowest temperature supported */
+	short int temp_low_lim;
+} __packed;
+
+/*For notification during battery change event*/
+extern struct atomic_notifier_head    batt_id_notifier;
+
+extern void battery_prop_changed(int battery_conn_stat,
+				struct ps_batt_chg_prof *batt_prop);
+#ifdef CONFIG_POWER_SUPPLY_BATTID
+extern int get_batt_prop(struct ps_batt_chg_prof *batt_prop);
+#else
+static inline int get_batt_prop(struct ps_batt_chg_prof *batt_prop)
+{
+	return -ENOMEM;
+}
+#endif
+extern int batt_id_reg_notifier(struct notifier_block *nb);
+extern void batt_id_unreg_notifier(struct notifier_block *nb);
+#endif
diff --git a/include/linux/power/bq24261_charger.h b/include/linux/power/bq24261_charger.h
new file mode 100644
index 0000000..f185659
--- /dev/null
+++ b/include/linux/power/bq24261_charger.h
@@ -0,0 +1,56 @@
+/*
+ * bq24261_charger.h: platform data structure for bq24261 driver
+ *
+ * (C) Copyright 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef __BQ24261_CHARGER_H__
+#define __BQ24261_CHARGER_H__
+
+struct bq24261_plat_data {
+	u32 irq_map;
+	u8 irq_mask;
+	char **supplied_to;
+	size_t num_supplicants;
+	struct power_supply_throttle *throttle_states;
+	size_t num_throttle_states;
+	int safety_timer;
+	int boost_mode_ma;
+	bool is_ts_enabled;
+
+	int (*enable_charging) (bool val);
+	int (*enable_charger) (bool val);
+	int (*set_inlmt) (int val);
+	int (*set_cc) (int val);
+	int (*set_cv) (int val);
+	int (*set_iterm) (int val);
+	int (*enable_vbus) (bool val);
+	/* WA for ShadyCove VBUS removal detect issue */
+	int (*handle_low_supply) (void);
+	void (*dump_master_regs) (void);
+};
+
+extern void bq24261_cv_to_reg(int, u8*);
+extern void bq24261_cc_to_reg(int, u8*);
+extern void bq24261_inlmt_to_reg(int, u8*);
+
+#ifdef CONFIG_BQ24261_CHARGER
+extern int bq24261_get_bat_health(void);
+extern int bq24261_get_bat_status(void);
+#else
+static int bq24261_get_bat_health(void)
+{
+	return 0;
+}
+static int bq24261_get_bat_status(void)
+{
+	return 0;
+}
+#endif
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 3828cef..1c00c6b 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -15,6 +15,7 @@
 
 #include <linux/workqueue.h>
 #include <linux/leds.h>
+#include <linux/types.h>
 
 struct device;
 
@@ -83,6 +84,14 @@
 	POWER_SUPPLY_SCOPE_DEVICE,
 };
 
+enum {
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_ZERO = 0,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_LOW,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_MEDIUM,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_HIGH,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT_NONE,
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -116,8 +125,14 @@
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+	POWER_SUPPLY_CHARGE_CURRENT_LIMIT,
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
 	POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+	POWER_SUPPLY_PROP_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT,
+	POWER_SUPPLY_PROP_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_INLMT,
 	POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
 	POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN,
 	POWER_SUPPLY_PROP_ENERGY_FULL,
@@ -131,6 +146,8 @@
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
 	POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+	POWER_SUPPLY_PROP_MAX_TEMP,
+	POWER_SUPPLY_PROP_MIN_TEMP,
 	POWER_SUPPLY_PROP_TEMP_AMBIENT,
 	POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN,
 	POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX,
@@ -139,7 +156,18 @@
 	POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
 	POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
 	POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
+	POWER_SUPPLY_PROP_CHARGE_TERM_CUR,
+	POWER_SUPPLY_PROP_ENABLE_CHARGING,
+	POWER_SUPPLY_PROP_ENABLE_CHARGER,
+	POWER_SUPPLY_PROP_CABLE_TYPE,
+	POWER_SUPPLY_PROP_PRIORITY,
 	POWER_SUPPLY_PROP_SCOPE,
+	/* Local extensions */
+	POWER_SUPPLY_PROP_USB_HC,
+	POWER_SUPPLY_PROP_USB_OTG,
+	POWER_SUPPLY_PROP_CHARGE_ENABLED,
+	/* Local extensions of type int64_t */
+	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_MANUFACTURER,
@@ -152,14 +180,74 @@
 	POWER_SUPPLY_TYPE_UPS,
 	POWER_SUPPLY_TYPE_MAINS,
 	POWER_SUPPLY_TYPE_USB,		/* Standard Downstream Port */
+	POWER_SUPPLY_TYPE_USB_INVAL,	/* Invalid Standard Downstream Port */
 	POWER_SUPPLY_TYPE_USB_DCP,	/* Dedicated Charging Port */
 	POWER_SUPPLY_TYPE_USB_CDP,	/* Charging Downstream Port */
 	POWER_SUPPLY_TYPE_USB_ACA,	/* Accessory Charger Adapters */
+	POWER_SUPPLY_TYPE_USB_HOST,	/* To support OTG devices */
 };
 
+enum power_supply_charger_event {
+	POWER_SUPPLY_CHARGER_EVENT_CONNECT = 0,
+	POWER_SUPPLY_CHARGER_EVENT_UPDATE,
+	POWER_SUPPLY_CHARGER_EVENT_RESUME,
+	POWER_SUPPLY_CHARGER_EVENT_SUSPEND,
+	POWER_SUPPLY_CHARGER_EVENT_DISCONNECT,
+};
+
+struct power_supply_charger_cap {
+	enum power_supply_charger_event chrg_evt;
+	enum power_supply_type chrg_type;
+	unsigned int mA; /* input current limit */
+};
+
+enum power_supply_charger_cable_type {
+	POWER_SUPPLY_CHARGER_TYPE_NONE = 0,
+	POWER_SUPPLY_CHARGER_TYPE_USB_SDP = 1 << 0,
+	POWER_SUPPLY_CHARGER_TYPE_USB_DCP = 1 << 1,
+	POWER_SUPPLY_CHARGER_TYPE_USB_CDP = 1 << 2,
+	POWER_SUPPLY_CHARGER_TYPE_USB_ACA = 1 << 3,
+	POWER_SUPPLY_CHARGER_TYPE_AC = 1 << 4,
+	POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK = 1 << 5,
+	POWER_SUPPLY_CHARGER_TYPE_ACA_A = 1 << 6,
+	POWER_SUPPLY_CHARGER_TYPE_ACA_B = 1 << 7,
+	POWER_SUPPLY_CHARGER_TYPE_ACA_C = 1 << 8,
+	POWER_SUPPLY_CHARGER_TYPE_SE1 = 1 << 9,
+	POWER_SUPPLY_CHARGER_TYPE_MHL = 1 << 10,
+	POWER_SUPPLY_CHARGER_TYPE_B_DEVICE = 1 << 11,
+};
+
+struct power_supply_cable_props {
+	enum power_supply_charger_event	chrg_evt;
+	enum power_supply_charger_cable_type chrg_type;
+	unsigned int ma;   /* input current limit */
+};
+
+#define POWER_SUPPLY_CHARGER_TYPE_USB \
+	(POWER_SUPPLY_CHARGER_TYPE_USB_SDP | \
+	POWER_SUPPLY_CHARGER_TYPE_USB_DCP | \
+	POWER_SUPPLY_CHARGER_TYPE_USB_CDP | \
+	POWER_SUPPLY_CHARGER_TYPE_USB_ACA | \
+	POWER_SUPPLY_CHARGER_TYPE_ACA_DOCK| \
+	POWER_SUPPLY_CHARGER_TYPE_SE1)
+
 union power_supply_propval {
 	int intval;
 	const char *strval;
+	int64_t int64val;
+};
+
+enum psy_throttle_action {
+
+	PSY_THROTTLE_DISABLE_CHARGER = 0,
+	PSY_THROTTLE_DISABLE_CHARGING,
+	PSY_THROTTLE_CC_LIMIT,
+	PSY_THROTTLE_INPUT_LIMIT,
+};
+
+struct power_supply_throttle {
+	enum psy_throttle_action throttle_action;
+	unsigned throttle_val;
 };
 
 struct power_supply {
@@ -169,8 +257,10 @@
 	size_t num_properties;
 
 	char **supplied_to;
+	unsigned long supported_cables;
 	size_t num_supplicants;
-
+	struct power_supply_throttle *throttle_states;
+	size_t num_throttle_states;
 	char **supplied_from;
 	size_t num_supplies;
 #ifdef CONFIG_OF
@@ -187,6 +277,8 @@
 				     enum power_supply_property psp);
 	void (*external_power_changed)(struct power_supply *psy);
 	void (*set_charged)(struct power_supply *psy);
+	void (*charging_port_changed)(struct power_supply *psy,
+				struct power_supply_charger_cap *cap);
 
 	/* For APM emulation, think legacy userspace. */
 	int use_for_apm;
@@ -194,6 +286,8 @@
 	/* private */
 	struct device *dev;
 	struct work_struct changed_work;
+	spinlock_t changed_lock;
+	bool changed;
 #ifdef CONFIG_THERMAL
 	struct thermal_zone_device *tzd;
 	struct thermal_cooling_device *tcd;
@@ -235,9 +329,13 @@
 extern struct power_supply *power_supply_get_by_name(const char *name);
 extern void power_supply_changed(struct power_supply *psy);
 extern int power_supply_am_i_supplied(struct power_supply *psy);
+extern int power_supply_is_battery_connected(void);
 extern int power_supply_set_battery_charged(struct power_supply *psy);
+extern void power_supply_charger_event(struct power_supply_charger_cap cap);
+extern void power_supply_query_charger_caps(struct power_supply_charger_cap
+									*cap);
 
-#ifdef CONFIG_POWER_SUPPLY
+#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
 extern int power_supply_is_system_supplied(void);
 #else
 static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 75d0176..2898841 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -35,6 +35,7 @@
 	PSTORE_TYPE_MCE		= 1,
 	PSTORE_TYPE_CONSOLE	= 2,
 	PSTORE_TYPE_FTRACE	= 3,
+	PSTORE_TYPE_PMSG	= 4, /* Backport: 7 in upstream 3.19.0-rc3 */
 	PSTORE_TYPE_UNKNOWN	= 255
 };
 
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 4af3fdc..712757f 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -68,6 +68,8 @@
 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
 	char *str, size_t len);
 
+void ramoops_console_write_buf(const char *buf, size_t size);
+
 /*
  * Ramoops platform data
  * @mem_size	memory size for ramoops
@@ -81,6 +83,7 @@
 	unsigned long	record_size;
 	unsigned long	console_size;
 	unsigned long	ftrace_size;
+	unsigned long	pmsg_size;
 	int		dump_oops;
 	struct persistent_ram_ecc_info ecc_info;
 };
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index a4df204..69375b7 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -76,6 +76,7 @@
 enum {
 	PWMF_REQUESTED = 1 << 0,
 	PWMF_ENABLED = 1 << 1,
+	PWMF_EXPORTED = 1 << 2,
 };
 
 struct pwm_device {
@@ -86,7 +87,9 @@
 	struct pwm_chip		*chip;
 	void			*chip_data;
 
-	unsigned int		period; /* in nanoseconds */
+	unsigned int		period;     /* in nanoseconds */
+	unsigned int		duty_cycle; /* in nanoseconds */
+	enum pwm_polarity	polarity;
 };
 
 static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
@@ -100,6 +103,17 @@
 	return pwm ? pwm->period : 0;
 }
 
+static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
+{
+	if (pwm)
+		pwm->duty_cycle = duty;
+}
+
+static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
+{
+	return pwm ? pwm->duty_cycle : 0;
+}
+
 /*
  * pwm_set_polarity - configure the polarity of a PWM signal
  */
@@ -278,4 +292,18 @@
 }
 #endif
 
+#ifdef CONFIG_PWM_SYSFS
+void pwmchip_sysfs_export(struct pwm_chip *chip);
+void pwmchip_sysfs_unexport(struct pwm_chip *chip);
+#else
+static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
+{
+}
+
+static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+{
+}
+#endif /* CONFIG_PWM_SYSFS */
+
+
 #endif /* __LINUX_PWM_H */
diff --git a/include/linux/random.h b/include/linux/random.h
index bf9085e..1b91d7d 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -24,6 +24,7 @@
 #endif
 
 unsigned int get_random_int(void);
+unsigned long get_random_long(void);
 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
 
 u32 prandom_u32(void);
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 0022c1b..aa870a4 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -68,6 +68,10 @@
 extern struct rb_node *rb_first(const struct rb_root *);
 extern struct rb_node *rb_last(const struct rb_root *);
 
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
 /* Fast replacement of a single node without remove/rebalance/add/rebalance */
 extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, 
 			    struct rb_root *root);
@@ -81,4 +85,22 @@
 	*rb_link = node;
 }
 
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
+ * given type safe against removal of rb_node entry
+ *
+ * @pos:	the 'type *' to use as a loop cursor.
+ * @n:		another 'type *' to use as temporary storage
+ * @root:	'rb_root *' of the rbtree.
+ * @field:	the name of the rb_node field within 'type'.
+ */
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+	for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\
+		n = rb_entry(rb_next_postorder(&pos->field), \
+			typeof(*pos), field); \
+	     &pos->field; \
+	     pos = n, \
+		n = rb_entry(rb_next_postorder(&pos->field), \
+			typeof(*pos), field))
+
 #endif	/* _LINUX_RBTREE_H */
diff --git a/include/linux/regulator/intel_basin_cove_pmic.h b/include/linux/regulator/intel_basin_cove_pmic.h
new file mode 100644
index 0000000..8dcdfa7
--- /dev/null
+++ b/include/linux/regulator/intel_basin_cove_pmic.h
@@ -0,0 +1,59 @@
+/*
+ * intel_basin_cove_pmic.h - Support for Basin Cove pmic VR
+ * Copyright (c) 2012, Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#ifndef __INTEL_BASIN_COVE_PMIC_H_
+#define __INTEL_BASIN_COVE_PMIC_H_
+
+struct regulator_init_data;
+
+enum intel_regulator_id {
+	VPROG1,
+	VPROG2,
+	VPROG3,
+};
+
+/* Voltage tables for Regulators */
+static const u16 VPROG1_VSEL_table[] = {
+	1500, 1800, 2500, 2800,
+};
+
+static const u16 VPROG2_VSEL_table[] = {
+	1500, 1800, 2500, 2850,
+};
+
+static const u16 VPROG3_VSEL_table[] = {
+	1050, 1800, 2500, 2800,
+};
+
+/* Slave Address for all regulators */
+#define VPROG1CNT_ADDR	0x0ac
+#define VPROG2CNT_ADDR	0x0ad
+#define VPROG3CNT_ADDR	0x0ae
+/**
+ * intel_pmic_info - platform data for intel pmic
+ * @pmic_reg: pmic register that is to be used for this VR
+ */
+struct intel_pmic_info {
+	struct regulator_init_data *init_data;
+	struct regulator_dev *intel_pmic_rdev;
+	const u16 *table;
+	u16 pmic_reg;
+	u8 table_len;
+};
+
+#endif /* __INTEL_BASIN_COVE_PMIC_H_ */
diff --git a/include/linux/regulator/intel_pmic.h b/include/linux/regulator/intel_pmic.h
new file mode 100644
index 0000000..f972d60
--- /dev/null
+++ b/include/linux/regulator/intel_pmic.h
@@ -0,0 +1,54 @@
+/*
+*Support for intel pmic
+*Copyright (c) 2012, Intel Corporation.
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 as
+*published by the Free Software Foundation.
+*
+*/
+
+struct regulator_init_data;
+
+enum intel_regulator_id {
+	VPROG1,
+	VPROG2,
+	VEMMC1,
+	VEMMC2,
+};
+
+/* Voltage tables for Regulators */
+static const u16 VPROG1_VSEL_table[] = {
+	1200, 1800, 2500, 2800,
+};
+
+static const u16 VPROG2_VSEL_table[] = {
+	1200, 1800, 2500, 2800,
+};
+
+static const u16 VEMMC1_VSEL_table[] = {
+	2850,
+};
+static const u16 VEMMC2_VSEL_table[] = {
+	2850,
+};
+
+static const u16 V180AON_VSEL_table[] = {
+	1800, 1817, 1836, 1854,
+};
+
+/* Slave Address for all regulators */
+#define VPROG1CNT_ADDR	0x0D6
+#define VPROG2CNT_ADDR	0x0D7
+#define VEMMC1CNT_ADDR	0x0D9
+#define VEMMC2CNT_ADDR	0x0DA
+/**
+ * intel_pmic_info - platform data for intel pmic
+ * @pmic_reg: pmic register that is to be used for this VR
+ */
+struct intel_pmic_info {
+	struct regulator_init_data *init_data;
+	struct regulator_dev *intel_pmic_rdev;
+	const u16 *table;
+	u16 pmic_reg;
+	u8 table_len;
+};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7728941..0c16b9f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1124,13 +1124,12 @@
 				 * execve */
 	unsigned in_iowait:1;
 
-	/* task may not gain privileges */
-	unsigned no_new_privs:1;
-
 	/* Revert to default priority/policy when forking */
 	unsigned sched_reset_on_fork:1;
 	unsigned sched_contributes_to_load:1;
 
+	unsigned long atomic_flags; /* Flags needing atomic access. */
+
 	pid_t pid;
 	pid_t tgid;
 
@@ -1171,6 +1170,7 @@
 
 	cputime_t utime, stime, utimescaled, stimescaled;
 	cputime_t gtime;
+	unsigned long long cpu_power;
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 	struct cputime prev_cputime;
 #endif
@@ -1620,6 +1620,9 @@
 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
 
+extern int task_free_register(struct notifier_block *n);
+extern int task_free_unregister(struct notifier_block *n);
+
 /*
  * Per process flags
  */
@@ -1700,6 +1703,19 @@
 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
 }
 
+/* Per-process atomic flags. */
+#define PFA_NO_NEW_PRIVS 0x00000001	/* May not gain new privileges. */
+
+static inline bool task_no_new_privs(struct task_struct *p)
+{
+	return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
+static inline void task_set_no_new_privs(struct task_struct *p)
+{
+	set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
 /*
  * task->jobctl flags
  */
diff --git a/include/linux/sdm.h b/include/linux/sdm.h
new file mode 100644
index 0000000..0a75ffb
--- /dev/null
+++ b/include/linux/sdm.h
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (C) Intel 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The SDM (System Debug Monitor) directs trace data routed from
+ * various parts in the system out through the Intel Tangier PTI port and
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard and USB Debug-Class
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#ifndef SDM_H_
+#define SDM_H_
+
+#ifdef CONFIG_INTEL_PTI_STM
+/* the following functions are defined in drivers/misc/stm.c */
+int stm_kernel_get_out(void);
+int stm_kernel_set_out(int bus_type);
+int stm_is_enabled(void);
+#else
+static inline int stm_kernel_get_out(void) { return -EOPNOTSUPP; };
+static inline int stm_kernel_set_out(int bus_type) { return -EOPNOTSUPP; };
+static inline int stm_is_enabled(void) { return 0; };
+#endif
+
+/* Temporary : To be replace later with dynamic*/
+#define STM_NB_IN_PINS                  0
+
+/* STM output configurations */
+#define STM_PTI_4BIT_LEGACY                    0
+#define STM_PTI_4BIT_NIDNT                     1
+#define STM_PTI_16BIT                          2
+#define STM_PTI_12BIT                          3
+#define STM_PTI_8BIT                           4
+#define STM_USB                                15
+
+/* Buffer configurations */
+#define DFX_BULK_BUFFER_SIZE		64 /* for Tangier A0 */
+#define DFX_BULK_OUT_BUFFER_ADDR	0xF90B0000
+#define DFX_BULK_IN_BUFFER_ADDR		0xF90B0000
+#define DFX_BULK_IN_BUFFER_ADDR_2	0xF90B0400
+
+#define TRACE_BULK_BUFFER_SIZE		65536 /* revision */
+#define TRACE_BULK_IN_BUFFER_ADDR	0xF90A0000 /* revision */
+
+#endif /*SDM_H_*/
+
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 6f19cfd..9687691 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,6 +3,8 @@
 
 #include <uapi/linux/seccomp.h>
 
+#define SECCOMP_FILTER_FLAG_MASK	(SECCOMP_FILTER_FLAG_TSYNC)
+
 #ifdef CONFIG_SECCOMP
 
 #include <linux/thread_info.h>
@@ -14,11 +16,11 @@
  *
  * @mode:  indicates one of the valid values above for controlled
  *         system calls available to a process.
- * @filter: The metadata and ruleset for determining what system calls
- *          are allowed for a task.
+ * @filter: must always point to a valid seccomp-filter or NULL as it is
+ *          accessed without locking during system call entry.
  *
  *          @filter must only be accessed from the context of current as there
- *          is no locking.
+ *          is no read locking.
  */
 struct seccomp {
 	int mode;
diff --git a/include/linux/security.h b/include/linux/security.h
index 4e50307..afb3d29 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1402,6 +1402,11 @@
 struct security_operations {
 	char name[SECURITY_NAME_MAX + 1];
 
+	int (*binder_set_context_mgr) (struct task_struct *mgr);
+	int (*binder_transaction) (struct task_struct *from, struct task_struct *to);
+	int (*binder_transfer_binder) (struct task_struct *from, struct task_struct *to);
+	int (*binder_transfer_file) (struct task_struct *from, struct task_struct *to, struct file *file);
+
 	int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
 	int (*ptrace_traceme) (struct task_struct *parent);
 	int (*capget) (struct task_struct *target,
@@ -1690,6 +1695,10 @@
 
 
 /* Security operations */
+int security_binder_set_context_mgr(struct task_struct *mgr);
+int security_binder_transaction(struct task_struct *from, struct task_struct *to);
+int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to);
+int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file);
 int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
 int security_ptrace_traceme(struct task_struct *parent);
 int security_capget(struct task_struct *target,
@@ -1869,6 +1878,26 @@
 	return 0;
 }
 
+static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+{
+	return 0;
+}
+
+static inline int security_binder_transaction(struct task_struct *from, struct task_struct *to)
+{
+	return 0;
+}
+
+static inline int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
+{
+	return 0;
+}
+
+static inline int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
+{
+	return 0;
+}
+
 static inline int security_ptrace_access_check(struct task_struct *child,
 					     unsigned int mode)
 {
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 87d4bbc..a782f3c 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -60,6 +60,7 @@
 	void		(*pm)(struct uart_port *, unsigned int state,
 			      unsigned int oldstate);
 	int		(*set_wake)(struct uart_port *, unsigned int state);
+	void		(*wake_peer)(struct uart_port *);
 
 	/*
 	 * Return a string describing the type of the port
diff --git a/include/linux/serial_max3110.h b/include/linux/serial_max3110.h
new file mode 100644
index 0000000..5470556
--- /dev/null
+++ b/include/linux/serial_max3110.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_SERIAL_MAX3110_H
+#define _LINUX_SERIAL_MAX3110_H
+
+/**
+ * struct plat_max3110 - MAX3110 SPI UART platform data
+ * @irq_edge_trigger: if IRQ is edge triggered
+ *
+ * You should use this structure in your machine description to specify
+ * how the MAX3110 is connected.
+ *
+ */
+struct plat_max3110 {
+	int irq_edge_triggered;
+};
+
+#endif
diff --git a/include/linux/serial_mfd.h b/include/linux/serial_mfd.h
index 2b071e0..0c0ba99 100644
--- a/include/linux/serial_mfd.h
+++ b/include/linux/serial_mfd.h
@@ -3,6 +3,7 @@
 
 /* HW register offset definition */
 #define UART_FOR	0x08
+#define UART_ABR	0x09
 #define UART_PS		0x0C
 #define UART_MUL	0x0D
 #define UART_DIV	0x0E
@@ -18,8 +19,8 @@
 #define HSU_GBL_INT_BIT_DMA	0x5
 
 #define HSU_GBL_ISR	0x8
-#define HSU_GBL_DMASR	0x400
-#define HSU_GBL_DMAISR	0x404
+#define HSU_GBL_DMASR	0x0
+#define HSU_GBL_DMAISR	0x4
 
 #define HSU_PORT_REG_OFFSET	0x80
 #define HSU_PORT0_REG_OFFSET	0x80
@@ -27,7 +28,7 @@
 #define HSU_PORT2_REG_OFFSET	0x180
 #define HSU_PORT_REG_LENGTH	0x80
 
-#define HSU_DMA_CHANS_REG_OFFSET	0x500
+#define HSU_DMA_CHANS_REG_OFFSET	0x100
 #define HSU_DMA_CHANS_REG_LENGTH	0x40
 
 #define HSU_CH_SR		0x0	/* channel status reg */
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index fe81791..cd50488 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -72,6 +72,7 @@
 #define SFI_SIG_WAKE		"WAKE"
 #define SFI_SIG_DEVS		"DEVS"
 #define SFI_SIG_GPIO		"GPIO"
+#define SFI_SIG_OEMB		"OEMB"
 
 #define SFI_SIGNATURE_SIZE	4
 #define SFI_OEM_ID_SIZE		6
@@ -85,6 +86,9 @@
 #define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
 	((ptable->header.len - sizeof(struct sfi_table_header)) / \
 	(sizeof(entry_type)))
+
+#define SPID_FRU_SIZE	10
+
 /*
  * Table structures must be byte-packed to match the SFI specification,
  * as they are provided by the BIOS.
@@ -153,6 +157,7 @@
 #define SFI_DEV_TYPE_UART	2
 #define SFI_DEV_TYPE_HSI	3
 #define SFI_DEV_TYPE_IPC	4
+#define SFI_DEV_TYPE_SD		5
 
 	u8	host_num;	/* attached to host 0, 1...*/
 	u16	addr;
diff --git a/include/linux/spi/intel_mid_ssp_spi.h b/include/linux/spi/intel_mid_ssp_spi.h
new file mode 100644
index 0000000..b212eee
--- /dev/null
+++ b/include/linux/spi/intel_mid_ssp_spi.h
@@ -0,0 +1,354 @@
+/*
+ *  Copyright (C) Intel 2009
+ *  Ken Mills <ken.k.mills@intel.com>
+ *  Sylvain Centelles <sylvain.centelles@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef INTEL_MID_SSP_SPI_H_
+#define INTEL_MID_SSP_SPI_H_
+
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+
+#define PCI_MRST_DMAC1_ID	0x0814
+#define PCI_MDFL_DMAC1_ID	0x0827
+#define PCI_BYT_DMAC1_ID	0x0f06
+#define PCI_MRFL_DMAC_ID	0x11A2
+
+#define SSP_NOT_SYNC 0x400000
+#define MAX_SPI_TRANSFER_SIZE 8192
+#define MAX_BITBANGING_LOOP   10000
+#define SPI_FIFO_SIZE 16
+
+/* PM QoS define */
+#define MIN_EXIT_LATENCY 20
+
+/* SSP assignement configuration from PCI config */
+
+#define SSP_CFG_SPI_MODE_ID		1
+/* adid field offset is 6 inside the vendor specific capability */
+#define VNDR_CAPABILITY_ADID_OFFSET	6
+
+/* Driver's quirk flags */
+/* This workarround bufferizes data in the audio fabric SDRAM from  */
+/* where the DMA transfers will operate. Should be enabled only for */
+/* SPI slave mode.                                                  */
+#define QUIRKS_SRAM_ADDITIONAL_CPY	1
+/* If set the trailing bytes won't be handled by the DMA.           */
+/* Trailing byte feature not fully available.                       */
+#define QUIRKS_DMA_USE_NO_TRAIL		2
+/* If set, the driver will use PM_QOS to reduce the latency         */
+/* introduced by the deeper C-states which may produce over/under   */
+/* run issues. Must be used in slave mode. In master mode, the      */
+/* latency is not critical, but setting this workarround  may       */
+/* improve the SPI throughput.                                      */
+#define QUIRKS_USE_PM_QOS		4
+/* This quirks is set on Moorestown                                 */
+#define QUIRKS_PLATFORM_MRST		8
+/* This quirks is set on Medfield                                   */
+#define QUIRKS_PLATFORM_MDFL		16
+/* If set, the driver will apply the bitbanging workarround needed  */
+/* to enable defective Langwell stepping A SSP. The defective SSP   */
+/* can be enabled only once, and should never be disabled.          */
+#define QUIRKS_BIT_BANGING		32
+/* If set, SPI is in slave clock mode                               */
+#define QUIRKS_SPI_SLAVE_CLOCK_MODE	64
+/* Add more platform here. */
+/* This quirks is set on Baytrail. */
+#define QUIRKS_PLATFORM_BYT		128
+#define QUIRKS_PLATFORM_MRFL		256
+
+/* Uncomment to get RX and TX short dumps after each transfer */
+/* #define DUMP_RX 1 */
+#define MAX_TRAILING_BYTE_RETRY 16
+#define MAX_TRAILING_BYTE_LOOP 100
+#define DELAY_TO_GET_A_WORD 3
+#define DFLT_TIMEOUT_VAL 500
+
+#define DEFINE_SSP_REG(reg, off) \
+static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
+static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+
+#define RX_DIRECTION 0
+#define TX_DIRECTION 1
+
+#define I2C_ACCESS_USDELAY 10
+
+#define DFLT_BITS_PER_WORD 16
+#define MIN_BITS_PER_WORD     4
+#define MAX_BITS_PER_WORD     32
+#define DFLT_FIFO_BURST_SIZE	IMSS_FIFO_BURST_8
+
+#define TRUNCATE(x, a) ((x) & ~((a)-1))
+
+DEFINE_SSP_REG(SSCR0, 0x00)
+DEFINE_SSP_REG(SSCR1, 0x04)
+DEFINE_SSP_REG(SSSR, 0x08)
+DEFINE_SSP_REG(SSITR, 0x0c)
+DEFINE_SSP_REG(SSDR, 0x10)
+DEFINE_SSP_REG(SSTO, 0x28)
+DEFINE_SSP_REG(SSPSP, 0x2c)
+DEFINE_SSP_REG(SSCR2, 0x40)
+DEFINE_SSP_REG(SSFS, 0x44)
+DEFINE_SSP_REG(SFIFOL, 0x68)
+
+DEFINE_SSP_REG(I2CCTRL, 0x00);
+DEFINE_SSP_REG(I2CDATA, 0x04);
+
+DEFINE_SSP_REG(GPLR1, 0x04);
+DEFINE_SSP_REG(GPDR1, 0x0c);
+DEFINE_SSP_REG(GPSR1, 0x14);
+DEFINE_SSP_REG(GPCR1, 0x1C);
+DEFINE_SSP_REG(GAFR1_U, 0x44);
+
+#define SYSCFG  0x20bc0
+
+#define SRAM_BASE_ADDR 0xfffdc000
+#define SRAM_RX_ADDR   SRAM_BASE_ADDR
+#define SRAM_TX_ADDR  (SRAM_BASE_ADDR + MAX_SPI_TRANSFER_SIZE)
+
+#define SSCR0_DSS   (0x0000000f)     /* Data Size Select (mask) */
+#define SSCR0_DataSize(x)  ((x) - 1)    /* Data Size Select [4..16] */
+#define SSCR0_FRF   (0x00000030)     /* FRame Format (mask) */
+#define SSCR0_Motorola        (0x0 << 4)         /* Motorola's SPI mode */
+#define SSCR0_ECS   (1 << 6) /* External clock select */
+#define SSCR0_SSE   (1 << 7) /* Synchronous Serial Port Enable */
+
+#define SSCR0_SCR   (0x000fff00)      /* Serial Clock Rate (mask) */
+#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#define SSCR0_EDSS  (1 << 20)           /* Extended data size select */
+#define SSCR0_NCS   (1 << 21)           /* Network clock select */
+#define SSCR0_RIM    (1 << 22)           /* Receive FIFO overrrun int mask */
+#define SSCR0_TUM   (1 << 23)           /* Transmit FIFO underrun int mask */
+#define SSCR0_FRDC (0x07000000)     /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
+#define SSCR0_ADC   (1 << 30)           /* Audio clock select */
+#define SSCR0_MOD  (1 << 31)           /* Mode (normal or network) */
+
+#define SSCR1_RIE    (1 << 0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE     (1 << 1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM   (1 << 2) /* Loop-Back Mode */
+#define SSCR1_SPO   (1 << 3) /* SSPSCLK polarity setting */
+#define SSCR1_SPH   (1 << 4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS           (1 << 5) /* Microwire Transmit Data Size */
+#define SSCR1_TFT    (0x000003c0)     /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT    (0x00003c00)     /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#define SSSR_TNF		(1 << 2)	/* Tx FIFO Not Full */
+#define SSSR_RNE		(1 << 3)	/* Rx FIFO Not Empty */
+#define SSSR_BSY		(1 << 4)	/* SSP Busy */
+#define SSSR_TFS		(1 << 5)	/* Tx FIFO Service Request */
+#define SSSR_RFS		(1 << 6)	/* Rx FIFO Service Request */
+#define SSSR_ROR		(1 << 7)	/* Rx FIFO Overrun */
+#define SSSR_TFL_MASK           (0x0F << 8)     /* Tx FIFO level field mask */
+#define SSSR_RFL_SHIFT		12		/* Rx FIFO MASK shift */
+#define SSSR_RFL_MASK		(0x0F << SSSR_RFL_SHIFT)/* RxFIFOlevel mask */
+
+#define SSCR0_TIM    (1 << 23)          /* Transmit FIFO Under Run Int Mask */
+#define SSCR0_RIM    (1 << 22)          /* Receive FIFO Over Run int Mask */
+#define SSCR0_NCS    (1 << 21)          /* Network Clock Select */
+#define SSCR0_EDSS   (1 << 20)          /* Extended Data Size Select */
+
+#define SSCR0_TISSP      (1 << 4)  /* TI Sync Serial Protocol */
+#define SSCR0_PSP        (3 << 4)  /* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP      (1 << 31) /* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE        (1 << 30) /* TXD Tristate Enable */
+#define SSCR1_EBCEI      (1 << 29) /* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR       (1 << 28) /* Slave Clock free Running */
+#define SSCR1_ECRA       (1 << 27) /* Enable Clock Request A */
+#define SSCR1_ECRB       (1 << 26) /* Enable Clock request B */
+#define SSCR1_SCLKDIR    (1 << 25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR    (1 << 24) /* Frame Direction */
+#define SSCR1_RWOT       (1 << 23) /* Receive Without Transmit */
+#define SSCR1_TRAIL      (1 << 22) /* Trailing Byte */
+#define SSCR1_TSRE       (1 << 21) /* Transmit Service Request Enable */
+#define SSCR1_RSRE       (1 << 20) /* Receive Service Request Enable */
+#define SSCR1_TINTE      (1 << 19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE      (1 << 18) /* Trailing Byte Interupt Enable */
+#define SSCR1_STRF       (1 << 15) /* Select FIFO or EFWR */
+#define SSCR1_EFWR       (1 << 14) /* Enable FIFO Write/Read */
+#define SSCR1_IFS        (1 << 16) /* Invert Frame Signal */
+
+#define SSSR_BCE         (1 << 23) /* Bit Count Error */
+#define SSSR_CSS         (1 << 22) /* Clock Synchronisation Status */
+#define SSSR_TUR         (1 << 21) /* Transmit FIFO Under Run */
+#define SSSR_EOC         (1 << 20) /* End Of Chain */
+#define SSSR_TINT        (1 << 19) /* Receiver Time-out Interrupt */
+#define SSSR_PINT        (1 << 18) /* Peripheral Trailing Byte Interrupt */
+
+#define SSPSP_FSRT       (1 << 25)   /* Frame Sync Relative Timing */
+#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
+#define SSPSP_SFRMWDTH(x)((x) << 16) /* Serial Frame Width */
+#define SSPSP_SFRMDLY(x) ((x) << 9)  /* Serial Frame Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7)  /* Dummy Start */
+#define SSPSP_STRTDLY(x) ((x) << 4)  /* Start Delay */
+#define SSPSP_ETDS       (1 << 3)    /* End of Transfer data State */
+#define SSPSP_SFRMP      (1 << 2)    /* Serial Frame Polarity */
+#define SSPSP_SCMODE(x)  ((x) << 0)  /* Serial Bit Rate Clock Mode */
+
+#define SSCR2_CLK_DEL_EN (1 << 3)	/* Delay logic for capturing input data */
+
+/*
+ * For testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables
+ */
+
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+struct callback_param {
+	void *drv_context;
+	u32 direction;
+};
+
+struct ssp_drv_context {
+	/* Driver model hookup */
+	struct pci_dev *pdev;
+
+	/* SPI framework hookup */
+	struct spi_master *master;
+
+	/* SSP register addresses */
+	unsigned long paddr;
+	void *ioaddr;
+	int irq;
+
+	/* I2C registers */
+	dma_addr_t I2C_paddr;
+	void *I2C_ioaddr;
+
+	/* SSP masks*/
+	u32 cr1_sig;
+	u32 cr1;
+	u32 clear_sr;
+	u32 mask_sr;
+
+	/* PM_QOS request */
+	struct pm_qos_request pm_qos_req;
+
+	struct tasklet_struct poll_transfer;
+
+	spinlock_t lock;
+	struct workqueue_struct *workqueue;
+	struct workqueue_struct *wq_poll_write;
+	struct work_struct pump_messages;
+	struct work_struct poll_write;
+	struct list_head queue;
+	struct completion msg_done;
+
+	int suspended;
+
+	/* Current message transfer state info */
+	struct spi_message *cur_msg;
+	size_t len;
+	size_t len_dma_rx;
+	size_t len_dma_tx;
+	void *tx;
+	void *tx_end;
+	void *rx;
+	void *rx_end;
+	bool dma_initialized;
+	int dma_mapped;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+	u8 n_bytes;
+	int (*write)(struct ssp_drv_context *sspc);
+	int (*read)(struct ssp_drv_context *sspc);
+
+	struct intel_mid_dma_slave    dmas_tx;
+	struct intel_mid_dma_slave    dmas_rx;
+	struct dma_chan    *txchan;
+	struct dma_chan    *rxchan;
+	struct workqueue_struct *dma_wq;
+	struct work_struct complete_work;
+
+	u8 __iomem *virt_addr_sram_tx;
+	u8 __iomem *virt_addr_sram_rx;
+
+	int txdma_done;
+	int rxdma_done;
+	struct callback_param tx_param;
+	struct callback_param rx_param;
+	struct pci_dev *dmac1;
+
+	unsigned long quirks;
+	u32 rx_fifo_threshold;
+
+	/* if CS_ACTIVE_HIGH, cs_assert == 1 else cs_assert == 0 */
+	int cs_assert;
+	int cs_change;
+	void (*cs_control)(u32 command);
+};
+
+struct chip_data {
+	u32 cr0;
+	u32 cr1;
+	u32 timeout;
+	u8 chip_select;
+	u8 n_bytes;
+	u8 dma_enabled;
+	u8 bits_per_word;
+	u32 speed_hz;
+	int (*write)(struct ssp_drv_context *sspc);
+	int (*read)(struct ssp_drv_context *sspc);
+	void (*cs_control)(u32 command);
+};
+
+
+enum intel_mid_ssp_spi_fifo_burst {
+	IMSS_FIFO_BURST_1,
+	IMSS_FIFO_BURST_4,
+	IMSS_FIFO_BURST_8
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct intel_mid_ssp_spi_chip {
+	enum intel_mid_ssp_spi_fifo_burst burst_size;
+	u32 timeout;
+	u8 enable_loopback;
+	u8 dma_enabled;
+	void (*cs_control)(u32 command);
+	void (*platform_pinmux)(void);
+};
+
+#define SPI_DIB_NAME_LEN  16
+#define SPI_DIB_SPEC_INFO_LEN      10
+
+struct spi_dib_header {
+	u32       signature;
+	u32       length;
+	u8         rev;
+	u8         checksum;
+	u8         dib[0];
+} __packed;
+
+#endif /*INTEL_MID_SSP_SPI_H_*/
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 74575cb..bfc6fb6 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -92,4 +92,10 @@
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
+
+extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+			   loff_t *ppos, size_t len, unsigned int flags);
+extern long do_splice_to(struct file *in, loff_t *ppos,
+			 struct pipe_inode_info *pipe, size_t len,
+			 unsigned int flags);
 #endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d4e3f16..a348213 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -363,7 +363,7 @@
 extern bool pm_get_wakeup_count(unsigned int *count, bool block);
 extern bool pm_save_wakeup_count(unsigned int count);
 extern void pm_wakep_autosleep_enabled(bool set);
-
+extern void pm_get_active_wakeup_sources(char *pending_sources, size_t max);
 static inline void lock_system_sleep(void)
 {
 	current->flags |= PF_FREEZER_SKIP;
diff --git a/include/linux/switch.h b/include/linux/switch.h
new file mode 100644
index 0000000..3e4c748
--- /dev/null
+++ b/include/linux/switch.h
@@ -0,0 +1,53 @@
+/*
+ *  Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+	const char	*name;
+	struct device	*dev;
+	int		index;
+	int		state;
+
+	ssize_t	(*print_name)(struct switch_dev *sdev, char *buf);
+	ssize_t	(*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+	const char *name;
+	unsigned 	gpio;
+
+	/* if NULL, switch_dev.name will be printed */
+	const char *name_on;
+	const char *name_off;
+	/* if NULL, "0" or "1" will be printed */
+	const char *state_on;
+	const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+	return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index e1bc430..988ad05 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -183,6 +183,7 @@
 	__SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
 
 #define __PROTECT(...) asmlinkage_protect(__VA_ARGS__)
+#if !defined(__i386) && !defined(__amd64__)
 #define __SYSCALL_DEFINEx(x, name, ...)					\
 	asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));	\
 	static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));	\
@@ -195,6 +196,21 @@
 	}								\
 	SYSCALL_ALIAS(sys##name, SyS##name);				\
 	static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+#else
+#define __SYSCALL_DEFINEx(x, name, ...)					\
+	asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));	\
+	static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));	\
+	asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))	\
+	{								\
+		long ret;						\
+		ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__));	\
+		__MAP(x,__SC_TEST,__VA_ARGS__);				\
+		__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__));	\
+		return ret;						\
+	}								\
+	SYSCALL_ALIAS(sys##name, SyS##name);				\
+	static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+#endif
 
 asmlinkage long sys_time(time_t __user *tloc);
 asmlinkage long sys_stime(time_t __user *tptr);
@@ -846,4 +862,6 @@
 asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
 			 unsigned long idx1, unsigned long idx2);
 asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags);
+asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
+			    const char __user *uargs);
 #endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a386a1c..04b08a6 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -110,22 +110,22 @@
 		     struct thermal_cooling_device *);
 	int (*unbind) (struct thermal_zone_device *,
 		       struct thermal_cooling_device *);
-	int (*get_temp) (struct thermal_zone_device *, unsigned long *);
+	int (*get_temp) (struct thermal_zone_device *, long *);
 	int (*get_mode) (struct thermal_zone_device *,
 			 enum thermal_device_mode *);
 	int (*set_mode) (struct thermal_zone_device *,
 		enum thermal_device_mode);
 	int (*get_trip_type) (struct thermal_zone_device *, int,
 		enum thermal_trip_type *);
-	int (*get_trip_temp) (struct thermal_zone_device *, int,
-			      unsigned long *);
-	int (*set_trip_temp) (struct thermal_zone_device *, int,
-			      unsigned long);
-	int (*get_trip_hyst) (struct thermal_zone_device *, int,
-			      unsigned long *);
-	int (*set_trip_hyst) (struct thermal_zone_device *, int,
-			      unsigned long);
-	int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
+	int (*get_trip_temp) (struct thermal_zone_device *, int, long *);
+	int (*set_trip_temp) (struct thermal_zone_device *, int, long);
+	int (*get_trip_hyst) (struct thermal_zone_device *, int, long *);
+	int (*set_trip_hyst) (struct thermal_zone_device *, int, long);
+	int (*get_slope) (struct thermal_zone_device *, long *);
+	int (*set_slope) (struct thermal_zone_device *, long);
+	int (*get_intercept) (struct thermal_zone_device *, long *);
+	int (*set_intercept) (struct thermal_zone_device *, long);
+	int (*get_crit_temp) (struct thermal_zone_device *, long *);
 	int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
 	int (*get_trend) (struct thermal_zone_device *, int,
 			  enum thermal_trend *);
@@ -137,6 +137,12 @@
 	int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
 	int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
 	int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
+	int (*get_force_state_override) (struct thermal_cooling_device *,
+								char *);
+	int (*set_force_state_override) (struct thermal_cooling_device *,
+								char *);
+	int (*get_available_states) (struct thermal_cooling_device *,
+								char *);
 };
 
 struct thermal_cooling_device {
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 58390c7..ae4a779 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -91,7 +91,10 @@
  *	This function is called by the low-level tty driver to signal
  *	that line discpline should try to send more characters to the
  *	low-level driver for transmission.  If the line discpline does
- *	not have any more data to send, it can just return.
+ *	not have any more data to send, it can just return. If the line
+ *	discpline does have some data to send, please arise a tasklet
+ *	or workqueue to do the real data transfer. Do not send data in
+ *	this hook, it may lead to a deadlock.
  *
  * int (*hangup)(struct tty_struct *)
  *
diff --git a/include/linux/uid_stat.h b/include/linux/uid_stat.h
new file mode 100644
index 0000000..6bd6c4e
--- /dev/null
+++ b/include/linux/uid_stat.h
@@ -0,0 +1,29 @@
+/* include/linux/uid_stat.h
+ *
+ * Copyright (C) 2008-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __uid_stat_h
+#define __uid_stat_h
+
+/* Contains definitions for resource tracking per uid. */
+
+#ifdef CONFIG_UID_STAT
+int uid_stat_tcp_snd(uid_t uid, int size);
+int uid_stat_tcp_rcv(uid_t uid, int size);
+#else
+#define uid_stat_tcp_snd(uid, size) do {} while (0);
+#define uid_stat_tcp_rcv(uid, size) do {} while (0);
+#endif
+
+#endif /* _LINUX_UID_STAT_H */
diff --git a/include/linux/usb/class-dual-role.h b/include/linux/usb/class-dual-role.h
new file mode 100644
index 0000000..af42ed3
--- /dev/null
+++ b/include/linux/usb/class-dual-role.h
@@ -0,0 +1,128 @@
+#ifndef __LINUX_CLASS_DUAL_ROLE_H__
+#define __LINUX_CLASS_DUAL_ROLE_H__
+
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device;
+
+enum dual_role_supported_modes {
+	DUAL_ROLE_SUPPORTED_MODES_DFP_AND_UFP = 0,
+	DUAL_ROLE_SUPPORTED_MODES_DFP,
+	DUAL_ROLE_SUPPORTED_MODES_UFP,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL,
+};
+
+enum {
+	DUAL_ROLE_PROP_MODE_UFP = 0,
+	DUAL_ROLE_PROP_MODE_DFP,
+	DUAL_ROLE_PROP_MODE_NONE,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_MODE_TOTAL,
+};
+
+enum {
+	DUAL_ROLE_PROP_PR_SRC = 0,
+	DUAL_ROLE_PROP_PR_SNK,
+	DUAL_ROLE_PROP_PR_NONE,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_PR_TOTAL,
+
+};
+
+enum {
+	DUAL_ROLE_PROP_DR_HOST = 0,
+	DUAL_ROLE_PROP_DR_DEVICE,
+	DUAL_ROLE_PROP_DR_NONE,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_DR_TOTAL,
+};
+
+enum {
+	DUAL_ROLE_PROP_VCONN_SUPPLY_NO = 0,
+	DUAL_ROLE_PROP_VCONN_SUPPLY_YES,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL,
+};
+
+enum dual_role_property {
+	DUAL_ROLE_PROP_SUPPORTED_MODES = 0,
+	DUAL_ROLE_PROP_MODE,
+	DUAL_ROLE_PROP_PR,
+	DUAL_ROLE_PROP_DR,
+	DUAL_ROLE_PROP_VCONN_SUPPLY,
+};
+
+struct dual_role_phy_instance;
+
+/* Description of typec port */
+struct dual_role_phy_desc {
+	/* /sys/class/dual_role_usb/<name>/ */
+	const char *name;
+	enum dual_role_supported_modes supported_modes;
+	enum dual_role_property *properties;
+	size_t num_properties;
+
+	/* Callback for "cat /sys/class/dual_role_usb/<name>/<property>" */
+	int (*get_property)(struct dual_role_phy_instance *dual_role,
+			     enum dual_role_property prop,
+			     unsigned int *val);
+	/* Callback for "echo <value> >
+	 *                      /sys/class/dual_role_usb/<name>/<property>" */
+	int (*set_property)(struct dual_role_phy_instance *dual_role,
+			     enum dual_role_property prop,
+			     const unsigned int *val);
+	/* Decides whether userspace can change a specific property */
+	int (*property_is_writeable)(struct dual_role_phy_instance *dual_role,
+				      enum dual_role_property prop);
+};
+
+struct dual_role_phy_instance {
+	const struct dual_role_phy_desc *desc;
+
+	/* Driver private data */
+	void *drv_data;
+
+	struct device dev;
+	struct work_struct changed_work;
+};
+
+#if IS_ENABLED(CONFIG_DUAL_ROLE_USB_INTF)
+extern void dual_role_instance_changed(struct dual_role_phy_instance
+				       *dual_role);
+extern struct dual_role_phy_instance *__must_check
+devm_dual_role_instance_register(struct device *parent,
+				 const struct dual_role_phy_desc *desc);
+extern void devm_dual_role_instance_unregister(struct device *dev,
+					       struct dual_role_phy_instance
+					       *dual_role);
+extern int dual_role_get_property(struct dual_role_phy_instance *dual_role,
+				  enum dual_role_property prop,
+				  unsigned int *val);
+extern int dual_role_set_property(struct dual_role_phy_instance *dual_role,
+				  enum dual_role_property prop,
+				  const unsigned int *val);
+extern int dual_role_property_is_writeable(struct dual_role_phy_instance
+					   *dual_role,
+					   enum dual_role_property prop);
+extern void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role);
+#else /* CONFIG_DUAL_ROLE_USB_INTF */
+static void dual_role_instance_changed(struct dual_role_phy_instance
+				       *dual_role){}
+static struct dual_role_phy_instance *__must_check
+devm_dual_role_instance_register(struct device *parent,
+				 const struct dual_role_phy_desc *desc)
+{
+	return ERR_PTR(-ENOSYS);
+}
+static void devm_dual_role_instance_unregister(struct device *dev,
+					       struct dual_role_phy_instance
+					       *dual_role){}
+static void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role)
+{
+	return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_DUAL_ROLE_USB_INTF */
+#endif /* __LINUX_CLASS_DUAL_ROLE_H__ */
diff --git a/include/linux/usb/debug.h b/include/linux/usb/debug.h
new file mode 100644
index 0000000..2a08e36
--- /dev/null
+++ b/include/linux/usb/debug.h
@@ -0,0 +1,253 @@
+/*
+ * <linux/usb/debug.h> -- USB Debug Class definitions.
+ *
+ * Copyright (C) 2008-2010, Intel Corporation.
+ *
+ * This software is distributed under the terms of the GNU General Public
+ * License ("GPL") version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_USB_DEBUG_H
+#define __LINUX_USB_DEBUG_H
+
+#include <linux/types.h>
+
+/* Debug Interface Subclass Codes */
+#define USB_SUBCLASS_DVC_GP		0x05
+#define USB_SUBCLASS_DVC_DFX		0x06
+#define USB_SUBCLASS_DVC_TRACE		0x07
+#define USB_SUBCLASS_DEBUG_CONTROL	0x08
+
+/* Debug Interface Function Protocol */
+#define DC_PROTOCOL_VENDOR			0x00
+#define DC_PROTOCOL_LAUTERBACH			0x01
+#define DC_PROTOCOL_ITP			0x02
+
+/* Debug Class-Specific Interface Descriptor Subtypes */
+#define DC_UNDEFINED			0x00
+#define DC_INPUT_CONNECTION		0x01
+#define DC_OUTPUT_CONNECTION		0x02
+#define DC_DEBUG_UNIT			0x03
+#define DC_DEBUG_ATTRIBUTES		0x04 /* revision: per SAS */
+
+/* Debug-Class Input/Output Connection Type */
+#define DC_CONNECTION_USB			0x00
+#define DC_CONNECTION_JTAG			0x01
+#define DC_CONNECTION_DEBUG_DATA_CONTROL	0x02
+#define DC_CONNECTION_DEBUG_DATA		0x03
+#define DC_CONNECTION_DEBUG_CONTROL		0x04
+
+/*
+ * Debug-class (rev 0.88r2) section 4.4.3
+ * Attibute  Descriptor, bmControl
+ */
+#define DC_CTL_SET_CFG_DATA_SG			(1 << 0)
+#define DC_CTL_SET_CFG_DATA			(1 << 1)
+#define DC_CTL_GET_CFG_DATA			(1 << 2)
+#define DC_CTL_SET_CFG_ADDR			(1 << 3)
+#define DC_CTL_GET_CFG_ADDR			(1 << 4)
+#define DC_CTL_SET_ALT_STACK			(1 << 5)
+#define DC_CTL_GET_ALT_STACK			(1 << 6)
+#define DC_CTL_SET_OP_MODE			(1 << 7)
+#define DC_CTL_GET_OP_MODE			(1 << 8)
+#define DC_CTL_SET_TRACE_CFG			(1 << 9)
+#define DC_CTL_GET_TRACE_CFG			(1 << 10)
+#define DC_CTL_SET_BUFF_INFO			(1 << 11)
+#define DC_CTL_GET_BUFF_INFO			(1 << 12)
+#define DC_CTL_SET_RESET			(1 << 13)
+
+/* Debug-class (rev 0.88r2) section 4.4.6
+ * Unit/Input/Output connection Descriptors,
+ * dTraceFormat
+ */
+#define DC_TRACE_NOT_FORMATED_PASSTHROUGH   0x00000000
+#define DC_TRACE_NOT_FORMATED_HEADER        0x00000001
+#define DC_TRACE_NOT_FORMATED_FOOTER        0x00000002
+#define DC_TRACE_NOT_FORMATED_GUID          0x00000005
+#define DC_TRACE_NOT_FORMATED_UTF8          0x00000006
+#define DC_TRACE_INTEL_FORMATED_VENDOR      0x01000000
+#define DC_TRACE_MIPI_FORMATED_STPV1        0x80000000
+#define DC_TRACE_MIPI_FORMATED_STPV2        0x80000001
+#define DC_TRACE_MIPI_FORMATED_TWP          0x80000100
+#define DC_TRACE_MIPI_FORMATED_OST          0x80001000
+#define DC_TRACE_NEXUS_FORMATED             0x81000000
+
+/* Debug-class (rev 0.88r2) section 4.4.6
+ * Unit connection Descriptors, dDebugUnitType
+ */
+#define DC_UNIT_TYPE_DFX		0x00
+#define DC_UNIT_TYPE_SELECT		0x01
+#define DC_UNIT_TYPE_TRACE_ROUTE	0x02
+#define DC_UNIT_TYPE_TRACE_PROC	0x03
+#define DC_UNIT_TYPE_TRACE_GEN		0x04
+#define DC_UNIT_TYPE_TRACE_SINK	0x05
+#define DC_UNIT_TYPE_CONTROL		0x06
+#define DC_UNIT_TYPE_VENDOR		0x40
+
+/* Debug-class (rev 0.88r2) section 4.4.6
+ * Unit connection Descriptors, dDebugUnitSubType
+ */
+#define DC_UNIT_SUBTYPE_NO              0x00
+#define DC_UNIT_SUBTYPE_CPU             0x01
+#define DC_UNIT_SUBTYPE_GFX             0x02
+#define DC_UNIT_SUBTYPE_VIDEO		0x03
+#define DC_UNIT_SUBTYPE_IMAGING	0x04
+#define DC_UNIT_SUBTYPE_AUDIO		0x05
+#define DC_UNIT_SUBTYPE_MODEM		0x06
+#define DC_UNIT_SUBTYPE_BLUETOOTH	0x07
+#define DC_UNIT_SUBTYPE_PWR_MGT	0x08
+#define DC_UNIT_SUBTYPE_SECURITY	0x09
+#define DC_UNIT_SUBTYPE_SENSOR		0x0A
+#define DC_UNIT_SUBTYPE_BUSWATCH	0x0B
+#define DC_UNIT_SUBTYPE_GPS		0x0C
+#define DC_UNIT_SUBTYPE_TRACEZIP	0x0D
+#define DC_UNIT_SUBTYPE_TAPCTL		0x0E
+#define DC_UNIT_SUBTYPE_MEMACC		0x0F
+#define DC_UNIT_SUBTYPE_SWLOGGER	0x40
+#define DC_UNIT_SUBTYPE_SWROUTER	0x41
+#define DC_UNIT_SUBTYPE_SWDRIVER	0x42
+#define DC_UNIT_SUBTYPE_VENDOR		0x80
+
+/* USB DBG requests values */
+#define DC_REQUEST_SET_CONFIG_DATA		0x01
+#define DC_REQUEST_SET_CONFIG_DATA_SINGLE	0x02
+#define DC_REQUEST_SET_CONFIG_ADDRESS		0x03
+#define DC_REQUEST_SET_ALT_STACK		0x04
+#define DC_REQUEST_SET_OPERATING		0x05
+#define DC_REQUEST_SET_TRACE			0x08
+#define DC_REQUEST_SET_BUFFER_INFO		0x09
+#define DC_REQUEST_SET_RESET			0x0A
+#define DC_REQUEST_GET_CONFIG_DATA		0x81
+#define DC_REQUEST_GET_CONFIG_DATA_SINGLE	0x82
+#define DC_REQUEST_GET_CONFIG_ADDRESS		0x83
+#define DC_REQUEST_GET_ALT_STACK		0x84
+#define DC_REQUEST_GET_OPERATING		0x85
+#define DC_REQUEST_GET_TRACE			0x86
+#define DC_REQUEST_GET_INFO			0x87
+#define DC_REQUEST_GET_ERROR			0x88
+#define DC_REQUEST_GET_BUFFER_INFO		0x89
+
+/* Debug-Class Debug-Attributes Descriptor */
+struct dc_debug_attributes_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+	__u8  bDescriptorSubtype;
+	__le16 bcdDC;
+	__le16 wTotalLength;
+	__u8  bmSupportedFeatures;
+	__u8  bControlSize;		/* n */
+	__u8  bmControl[0];		/* [n] */
+	__le16 wAuxDataSize;		/* m */
+	__le32 dInputBufferSize;
+	__le32 dOutputBufferSize;
+	__le64 qBaseAddress;
+	__le64 hGlobalID[2];
+	__u8  Supplementary[0];		/* [m-32] */
+} __attribute__((__packed__));
+
+#define DC_DEBUG_ATTR_DESCR(name)		 \
+	dc_debug_attributes_descriptor_##name
+
+#define DECLARE_DC_DEBUG_ATTR_DESCR(name, n, m)	\
+struct DC_DEBUG_ATTR_DESCR(name) {		\
+	__u8  bLength;					\
+	__u8  bDescriptorType;				\
+	__u8  bDescriptorSubtype;			\
+	__le16 bcdDC;					\
+	__le16 wTotalLength;				\
+	__u8  bmSupportedFeatures;			\
+	__u8  bControlSize;				\
+	__u8  bmControl[n];				\
+	__le16 wAuxDataSize;				\
+	__le32 dInputBufferSize;			\
+	__le32 dOutputBufferSize;			\
+	__le64 qBaseAddress;				\
+	__le64 hGlobalID[2];				\
+	__u8  Supplementary[m-32];			\
+} __attribute__((__packed__));
+
+#define DC_DBG_ATTRI_SIZE(n, m)		(9 + (n) + 2 + (m))
+
+/* Debug-Class Input Connection Descriptor */
+struct dc_input_connection_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+	__u8  bDescriptorSubtype;
+	__u8  bConnectionID;
+	__u8  bConnectionType;
+	__u8  bAssocConnection;
+	__u8  iConnection;
+	__le32 dTraceFormat;
+	__le32 dStreamID;
+} __attribute__((__packed__));
+
+#define DC_INPUT_CONNECTION_SIZE	15
+
+/* Debug-Class Output Connection Descriptor */
+struct dc_output_connection_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+	__u8  bDescriptorSubtype;
+	__u8  bConnectionID;
+	__u8  bConnectionType;
+	__u8  bAssocConnection;
+	__le16 wSourceID;
+	__u8  iConnection;
+} __attribute__((__packed__));
+
+#define DC_OUTPUT_CONNECTION_SIZE	9
+
+/* Debug-Class Debug-Unit Descriptor */
+struct dc_debug_unit_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+	__u8  bDescriptorSubtype;
+	__u8  bUnitID;
+	__u8  bDebugUnitType;
+	__u8  bDebugSubUnitType;
+	__u8  bAliasUnitID;
+	__u8  bNrInPins;		/* p */
+	__le16 wSourceID[0];		/* [p] */
+	__u8  bNrOutPins;		/* q */
+	__le32 dTraceFormat[0];		/* [q] */
+	__le32 dStreamID;
+	__u8  bControlSize;		/* n */
+	__u8  bmControl[0];		/* [n] */
+	__le16 wAuxDataSize;		/* m */
+	__le64 qBaseAddress;
+	__le64 hIPID[2];
+	__u8  Supplementary[0];		/* [m-24] */
+	__u8  iDebugUnitType;
+} __attribute__((__packed__));
+
+#define DC_DEBUG_UNIT_DESCRIPTOR(p, q, n, m)		\
+	dc_debug_unit_descriptor_##p_##q##n_##m
+
+#define DECLARE_DC_DEBUG_UNIT_DESCRIPTOR(p, q, n, m)	\
+struct DC_DEBUG_UNIT_DESCRIPTOR(p, q, n, m) {		\
+	__u8  bLength;					\
+	__u8  bDescriptorType;				\
+	__u8  bDescriptorSubtype;			\
+	__u8  bUnitID;					\
+	__u8  bDebugUnitType;				\
+	__u8  bDebugSubUnitType;			\
+	__u8  bAliasUnitID;				\
+	__u8  bNrInPins;				\
+	__le16 wSourceID[p];				\
+	__u8  bNrOutPins;				\
+	__le32 dTraceFormat[q];			\
+	__le32 dStreamID;				\
+	__u8  bControlSize;				\
+	__u8  bmControl[n];				\
+	__le16 wAuxDataSize;				\
+	__le64 qBaseAddress;				\
+	__le64 hIPID[2];				\
+	__u8  Supplementary[m-24];			\
+	__u8  iDebugUnitType;				\
+} __attribute__((__packed__));
+
+#define DC_DBG_UNIT_SIZE(p, q, n, m)	\
+(8 + (p * 2) + 1 + (q * 4) + 5 + (n) + 2 + (m) + 1)
+
+#endif /* __LINUX_USB_DEBUG_H */
diff --git a/include/linux/usb/dwc3-intel-mid.h b/include/linux/usb/dwc3-intel-mid.h
new file mode 100644
index 0000000..376b01e
--- /dev/null
+++ b/include/linux/usb/dwc3-intel-mid.h
@@ -0,0 +1,190 @@
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DWC3_INTEL_H
+#define __DWC3_INTEL_H
+
+#include "otg.h"
+
+enum intel_mid_pmic_type {
+	NO_PMIC,
+	SHADY_COVE,
+	BASIN_COVE
+};
+
+struct intel_dwc_otg_pdata {
+	int is_hvp;
+	enum intel_mid_pmic_type pmic_type;
+	int charger_detect_enable;
+	int gpio_cs;
+	int gpio_reset;
+	int gpio_id;
+	int id;
+	int charging_compliance;
+	struct delayed_work suspend_discon_work;
+	u8 ti_phy_vs1;
+	int sdp_charging;
+};
+
+#define TUSB1211_VENDOR_ID_LO					0x00
+#define TUSB1211_VENDOR_ID_HI					0x01
+#define TUSB1211_PRODUCT_ID_LO					0x02
+#define TUSB1211_PRODUCT_ID_HI					0x03
+#define TUSB1211_FUNC_CTRL						0x04
+#define TUSB1211_FUNC_CTRL_SET					0x05
+#define TUSB1211_FUNC_CTRL_CLR					0x06
+#define TUSB1211_IFC_CTRL						0x07
+#define TUSB1211_IFC_CTRL_SET					0x08
+#define TUSB1211_IFC_CTRL_CLR					0x09
+#define TUSB1211_OTG_CTRL						0x0A
+#define TUSB1211_OTG_CTRL_SET					0x0B
+#define TUSB1211_OTG_CTRL_CLR					0x0C
+#define TUSB1211_USB_INT_EN_RISE				0x0D
+#define TUSB1211_USB_INT_EN_RISE_SET			0x0E
+#define TUSB1211_USB_INT_EN_RISE_CLR			0x0F
+#define TUSB1211_USB_INT_EN_FALL				0x10
+#define TUSB1211_USB_INT_EN_FALL_SET			0x11
+#define TUSB1211_USB_INT_EN_FALL_CLR			0x12
+#define TUSB1211_USB_INT_STS					0x13
+#define TUSB1211_USB_INT_LATCH					0x14
+#define TUSB1211_DEBUG							0x15
+#define TUSB1211_SCRATCH_REG					0x16
+#define TUSB1211_SCRATCH_REG_SET				0x17
+#define TUSB1211_SCRATCH_REG_CLR				0x18
+#define TUSB1211_ACCESS_EXT_REG_SET				0x2F
+
+#define TUSB1211_VENDOR_SPECIFIC1				0x80
+#define TUSB1211_VENDOR_SPECIFIC1_SET			0x81
+#define TUSB1211_VENDOR_SPECIFIC1_CLR			0x82
+#define TUSB1211_POWER_CONTROL					0x3D
+#define TUSB1211_POWER_CONTROL_SET				0x3E
+#define TUSB1211_POWER_CONTROL_CLR				0x3F
+
+#define TUSB1211_VENDOR_SPECIFIC2				0x80
+#define TUSB1211_VENDOR_SPECIFIC2_SET			0x81
+#define TUSB1211_VENDOR_SPECIFIC2_CLR			0x82
+#define TUSB1211_VENDOR_SPECIFIC2_STS			0x83
+#define TUSB1211_VENDOR_SPECIFIC2_LATCH			0x84
+#define TUSB1211_VENDOR_SPECIFIC3				0x85
+#define TUSB1211_VENDOR_SPECIFIC3_SET			0x86
+#define TUSB1211_VENDOR_SPECIFIC3_CLR			0x87
+#define TUSB1211_VENDOR_SPECIFIC4				0x88
+#define TUSB1211_VENDOR_SPECIFIC4_SET			0x89
+#define TUSB1211_VENDOR_SPECIFIC4_CLR			0x8A
+#define TUSB1211_VENDOR_SPECIFIC5				0x8B
+#define TUSB1211_VENDOR_SPECIFIC5_SET			0x8C
+#define TUSB1211_VENDOR_SPECIFIC5_CLR			0x8D
+#define TUSB1211_VENDOR_SPECIFIC6				0x8E
+#define TUSB1211_VENDOR_SPECIFIC6_SET			0x8F
+#define TUSB1211_VENDOR_SPECIFIC6_CLR			0x90
+
+#define VS1_DATAPOLARITY						(1 << 6)
+#define VS1_ZHSDRV(v)					((v & 0x3) << 5)
+#define VS1_IHSTX(v)						 ((v & 0x7))
+
+#define VS2STS_VBUS_MNTR_STS					(1 << 7)
+#define VS2STS_REG3V3IN_MNTR_STS				(1 << 6)
+#define VS2STS_SVLDCONWKB_WDOG_STS				(1 << 5)
+#define VS2STS_ID_FLOAT_STS						(1 << 4)
+#define VS2STS_ID_RARBRC_STS(v)					((v & 0x3) << 2)
+#define VS2STS_BVALID_STS						(1 << 0)
+
+#define VS3_CHGD_IDP_SRC_EN						(1 << 6)
+#define VS3_IDPULLUP_WK_EN						(1 << 5)
+#define VS3_SW_USB_DET							(1 << 4)
+#define VS3_DATA_CONTACT_DET_EN					(1 << 3)
+#define VS3_REG3V3_VSEL(v)					   (v & 0x7)
+
+#define VS4_ACA_DET_EN							(1 << 6)
+#define VS4_RABUSIN_EN							(1 << 5)
+#define VS4_R1KSERIES							(1 << 4)
+#define VS4_PSW_OSOD							(1 << 3)
+#define VS4_PSW_CMOS							(1 << 2)
+#define VS4_CHGD_SERX_DP						(1 << 1)
+#define VS4_CHGD_SERX_DM						(1 << 0)
+
+#define VS5_AUTORESUME_WDOG_EN					(1 << 6)
+#define VS5_ID_FLOAT_EN							(1 << 5)
+#define VS5_ID_RES_EN							(1 << 4)
+#define VS5_SVLDCONWKB_WDOG_EN					(1 << 3)
+#define VS5_VBUS_MNTR_RISE_EN					(1 << 2)
+#define VS5_VBUS_MNTR_FALL_EN					(1 << 1)
+#define VS5_REG3V3IN_MNTR_EN					(1 << 0)
+
+#define DEBUG_LINESTATE                       (0x3 << 0)
+
+#define OTGCTRL_USEEXTVBUS_INDICATOR			(1 << 7)
+#define OTGCTRL_DRVVBUSEXTERNAL					(1 << 6)
+#define OTGCTRL_DRVVBUS							(1 << 5)
+#define OTGCTRL_CHRGVBUS						(1 << 4)
+#define OTGCTRL_DISCHRGVBUS						(1 << 3)
+#define OTGCTRL_DMPULLDOWN						(1 << 2)
+#define OTGCTRL_DPPULLDOWN						(1 << 1)
+#define OTGCTRL_IDPULLUP						(1 << 0)
+
+#define FUNCCTRL_SUSPENDM						(1 << 6)
+#define FUNCCTRL_RESET							(1 << 5)
+#define FUNCCTRL_OPMODE(v)				((v & 0x3) << 3)
+#define FUNCCTRL_TERMSELECT						(1 << 2)
+#define FUNCCTRL_XCVRSELECT(v)					(v & 0x3)
+
+#define PWCTRL_HWDETECT							(1 << 7)
+#define PWCTRL_DP_VSRC_EN						(1 << 6)
+#define PWCTRL_VDAT_DET							(1 << 5)
+#define PWCTRL_DP_WKPU_EN						(1 << 4)
+#define PWCTRL_BVALID_FALL						(1 << 3)
+#define PWCTRL_BVALID_RISE						(1 << 2)
+#define PWCTRL_DET_COMP							(1 << 1)
+#define PWCTRL_SW_CONTROL						(1 << 0)
+
+
+#define PMIC_VLDOCNT                0xAF
+#define PMIC_VLDOCNT_VUSBPHYEN      (1 << 2)
+
+#define PMIC_TLP1ESBS0I1VNNBASE		0X6B
+#define PMIC_I2COVRDADDR			0x59
+#define PMIC_I2COVROFFSET			0x5A
+#define PMIC_USBPHYCTRL				0x30
+#define PMIC_I2COVRWRDATA			0x5B
+#define PMIC_I2COVRCTRL				0x58
+#define PMIC_I2COVRCTL_I2CWR		0x01
+
+#define USBPHYCTRL_D0			(1 << 0)
+#define PMIC_USBIDCTRL				0x19
+#define USBIDCTRL_ACA_DETEN_D1	(1 << 1)
+#define USBIDCTRL_USB_IDEN_D0	(1 << 0)
+#define PMIC_USBIDSTS				0x1A
+#define USBIDSTS_ID_GND			(1 << 0)
+#define USBIDSTS_ID_RARBRC_STS(v)	((v & 0x3)  << 1)
+#define USBIDSTS_ID_FLOAT_STS	(1 << 3)
+#define PMIC_USBPHYCTRL_D0		(1 << 0)
+#define APBFC_EXIOTG3_MISC0_REG			0xF90FF85C
+
+#define DATACON_TIMEOUT		750
+#define DATACON_INTERVAL	10
+#define VBUS_TIMEOUT	300
+#define PCI_DEVICE_ID_DWC 0x119E
+
+#define VENDOR_ID_MASK (0x03 << 6)
+#define BASIN_COVE_PMIC_ID (0x03 << 6)
+
+#define PMIC_MAJOR_REV (0x07 << 3)
+#define PMIC_A0_MAJOR_REV 0x00
+
+#endif /* __DWC3_INTEL_H */
diff --git a/include/linux/usb/dwc3-intel-mrfl.h b/include/linux/usb/dwc3-intel-mrfl.h
new file mode 100644
index 0000000..82f3099
--- /dev/null
+++ b/include/linux/usb/dwc3-intel-mrfl.h
@@ -0,0 +1,168 @@
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DWC3_INTEL_H
+#define __DWC3_INTEL_H
+
+#include "otg.h"
+
+struct intel_dwc_otg_pdata {
+	int is_hvp;
+	int charging_compliance;
+};
+
+#define TUSB1211_VENDOR_ID_LO					0x00
+#define TUSB1211_VENDOR_ID_HI					0x01
+#define TUSB1211_PRODUCT_ID_LO					0x02
+#define TUSB1211_PRODUCT_ID_HI					0x03
+#define TUSB1211_FUNC_CTRL						0x04
+#define TUSB1211_FUNC_CTRL_SET					0x05
+#define TUSB1211_FUNC_CTRL_CLR					0x06
+#define TUSB1211_IFC_CTRL						0x07
+#define TUSB1211_IFC_CTRL_SET					0x08
+#define TUSB1211_IFC_CTRL_CLR					0x09
+#define TUSB1211_OTG_CTRL						0x0A
+#define TUSB1211_OTG_CTRL_SET					0x0B
+#define TUSB1211_OTG_CTRL_CLR					0x0C
+#define TUSB1211_USB_INT_EN_RISE				0x0D
+#define TUSB1211_USB_INT_EN_RISE_SET			0x0E
+#define TUSB1211_USB_INT_EN_RISE_CLR			0x0F
+#define TUSB1211_USB_INT_EN_FALL				0x10
+#define TUSB1211_USB_INT_EN_FALL_SET			0x11
+#define TUSB1211_USB_INT_EN_FALL_CLR			0x12
+#define TUSB1211_USB_INT_STS					0x13
+#define TUSB1211_USB_INT_LATCH					0x14
+#define TUSB1211_DEBUG							0x15
+#define TUSB1211_SCRATCH_REG					0x16
+#define TUSB1211_SCRATCH_REG_SET				0x17
+#define TUSB1211_SCRATCH_REG_CLR				0x18
+#define TUSB1211_ACCESS_EXT_REG_SET				0x2F
+
+#define TUSB1211_VENDOR_SPECIFIC1				0x80
+#define TUSB1211_VENDOR_SPECIFIC1_SET			0x81
+#define TUSB1211_VENDOR_SPECIFIC1_CLR			0x82
+#define TUSB1211_POWER_CONTROL					0x3D
+#define TUSB1211_POWER_CONTROL_SET				0x3E
+#define TUSB1211_POWER_CONTROL_CLR				0x3F
+
+#define TUSB1211_VENDOR_SPECIFIC2				0x80
+#define TUSB1211_VENDOR_SPECIFIC2_SET			0x81
+#define TUSB1211_VENDOR_SPECIFIC2_CLR			0x82
+#define TUSB1211_VENDOR_SPECIFIC2_STS			0x83
+#define TUSB1211_VENDOR_SPECIFIC2_LATCH			0x84
+#define TUSB1211_VENDOR_SPECIFIC3				0x85
+#define TUSB1211_VENDOR_SPECIFIC3_SET			0x86
+#define TUSB1211_VENDOR_SPECIFIC3_CLR			0x87
+#define TUSB1211_VENDOR_SPECIFIC4				0x88
+#define TUSB1211_VENDOR_SPECIFIC4_SET			0x89
+#define TUSB1211_VENDOR_SPECIFIC4_CLR			0x8A
+#define TUSB1211_VENDOR_SPECIFIC5				0x8B
+#define TUSB1211_VENDOR_SPECIFIC5_SET			0x8C
+#define TUSB1211_VENDOR_SPECIFIC5_CLR			0x8D
+#define TUSB1211_VENDOR_SPECIFIC6				0x8E
+#define TUSB1211_VENDOR_SPECIFIC6_SET			0x8F
+#define TUSB1211_VENDOR_SPECIFIC6_CLR			0x90
+
+#define VS1_DATAPOLARITY						(1 << 6)
+#define VS1_ZHSDRV(v)					((v & 0x3) << 5)
+#define VS1_IHSTX(v)						 ((v & 0x7))
+
+#define VS2STS_VBUS_MNTR_STS					(1 << 7)
+#define VS2STS_REG3V3IN_MNTR_STS				(1 << 6)
+#define VS2STS_SVLDCONWKB_WDOG_STS				(1 << 5)
+#define VS2STS_ID_FLOAT_STS						(1 << 4)
+#define VS2STS_ID_RARBRC_STS(v)					((v & 0x3) << 2)
+#define VS2STS_BVALID_STS						(1 << 0)
+
+#define VS3_CHGD_IDP_SRC_EN						(1 << 6)
+#define VS3_IDPULLUP_WK_EN						(1 << 5)
+#define VS3_SW_USB_DET							(1 << 4)
+#define VS3_DATA_CONTACT_DET_EN					(1 << 3)
+#define VS3_REG3V3_VSEL(v)					   (v & 0x7)
+
+#define VS4_ACA_DET_EN							(1 << 6)
+#define VS4_RABUSIN_EN							(1 << 5)
+#define VS4_R1KSERIES							(1 << 4)
+#define VS4_PSW_OSOD							(1 << 3)
+#define VS4_PSW_CMOS							(1 << 2)
+#define VS4_CHGD_SERX_DP						(1 << 1)
+#define VS4_CHGD_SERX_DM						(1 << 0)
+
+#define VS5_AUTORESUME_WDOG_EN					(1 << 6)
+#define VS5_ID_FLOAT_EN							(1 << 5)
+#define VS5_ID_RES_EN							(1 << 4)
+#define VS5_SVLDCONWKB_WDOG_EN					(1 << 3)
+#define VS5_VBUS_MNTR_RISE_EN					(1 << 2)
+#define VS5_VBUS_MNTR_FALL_EN					(1 << 1)
+#define VS5_REG3V3IN_MNTR_EN					(1 << 0)
+
+#define DEBUG_LINESTATE                       (0x3 << 0)
+
+#define OTGCTRL_USEEXTVBUS_INDICATOR			(1 << 7)
+#define OTGCTRL_DRVVBUSEXTERNAL					(1 << 6)
+#define OTGCTRL_DRVVBUS							(1 << 5)
+#define OTGCTRL_CHRGVBUS						(1 << 4)
+#define OTGCTRL_DISCHRGVBUS						(1 << 3)
+#define OTGCTRL_DMPULLDOWN						(1 << 2)
+#define OTGCTRL_DPPULLDOWN						(1 << 1)
+#define OTGCTRL_IDPULLUP						(1 << 0)
+
+#define FUNCCTRL_SUSPENDM						(1 << 6)
+#define FUNCCTRL_RESET							(1 << 5)
+#define FUNCCTRL_OPMODE(v)				((v & 0x3) << 3)
+#define FUNCCTRL_TERMSELECT						(1 << 2)
+#define FUNCCTRL_XCVRSELECT(v)					(v & 0x3)
+
+#define PWCTRL_HWDETECT							(1 << 7)
+#define PWCTRL_DP_VSRC_EN						(1 << 6)
+#define PWCTRL_VDAT_DET							(1 << 5)
+#define PWCTRL_DP_WKPU_EN						(1 << 4)
+#define PWCTRL_BVALID_FALL						(1 << 3)
+#define PWCTRL_BVALID_RISE						(1 << 2)
+#define PWCTRL_DET_COMP							(1 << 1)
+#define PWCTRL_SW_CONTROL						(1 << 0)
+
+
+#define PMIC_VLDOCNT                0xAF
+#define PMIC_VLDOCNT_VUSBPHYEN      (1 << 2)
+
+#define PMIC_TLP1ESBS0I1VNNBASE		0X6B
+#define PMIC_I2COVRDADDR			0x59
+#define PMIC_I2COVROFFSET			0x5A
+#define PMIC_USBPHYCTRL				0x30
+#define PMIC_I2COVRWRDATA			0x5B
+#define PMIC_I2COVRCTRL				0x58
+#define PMIC_I2COVRCTL_I2CWR		0x01
+
+#define USBPHYCTRL_D0			(1 << 0)
+#define PMIC_USBIDCTRL				0x19
+#define USBIDCTRL_ACA_DETEN_D1	(1 << 1)
+#define USBIDCTRL_USB_IDEN_D0	(1 << 0)
+#define PMIC_USBIDSTS				0x1A
+#define USBIDSTS_ID_GND			(1 << 0)
+#define USBIDSTS_ID_RARBRC_STS(v)	((v & 0x3)  << 0)
+#define USBIDSTS_ID_FLOAT_STS	(1 << 3)
+#define PMIC_USBPHYCTRL_D0		(1 << 0)
+#define APBFC_EXIOTG3_MISC0_REG			0xF90FF85C
+
+#define DATACON_TIMEOUT		750
+#define DATACON_INTERVAL	10
+#define VBUS_TIMEOUT	300
+#define PCI_DEVICE_ID_DWC 0x119E
+#endif /* __DWC3_INTEL_H */
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644
index 0000000..ebe3c4d
--- /dev/null
+++ b/include/linux/usb/f_accessory.h
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+#include <uapi/linux/usb/f_accessory.h>
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644
index 0000000..4e84177
--- /dev/null
+++ b/include/linux/usb/f_mtp.h
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+#include <uapi/linux/usb/f_mtp.h>
+
+#endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index f1b0dca..942ef5e 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/scatterlist.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 #include <linux/usb/ch9.h>
 
 struct usb_ep;
@@ -475,6 +476,7 @@
 
 /**
  * struct usb_gadget - represents a usb slave device
+ * @work: (internal use) Workqueue to be used for sysfs_notify()
  * @ops: Function pointers used to access hardware-specific operations.
  * @ep0: Endpoint zero, used when reading or writing responses to
  *	driver setup() requests
@@ -520,6 +522,7 @@
  * device is acting as a B-Peripheral (so is_a_peripheral is false).
  */
 struct usb_gadget {
+	struct work_struct		work;
 	/* readonly to gadget driver */
 	const struct usb_gadget_ops	*ops;
 	struct usb_ep			*ep0;
@@ -538,6 +541,7 @@
 	unsigned			out_epnum;
 	unsigned			in_epnum;
 };
+#define work_to_gadget(w)	(container_of((w), struct usb_gadget, work))
 
 static inline void set_gadget_data(struct usb_gadget *gadget, void *data)
 	{ dev_set_drvdata(&gadget->dev, data); }
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 287b906..ef35234 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -110,6 +110,7 @@
 #define HCD_FLAG_WAKEUP_PENDING		4	/* root hub is resuming? */
 #define HCD_FLAG_RH_RUNNING		5	/* root hub is running? */
 #define HCD_FLAG_DEAD			6	/* controller has died? */
+#define HCD_FLAG_IRQ_DISABLED	 7	/* Interrupt was disabled */
 
 	/* The flags can be tested using these macros; they are likely to
 	 * be slightly faster than test_bit().
@@ -120,6 +121,7 @@
 #define HCD_WAKEUP_PENDING(hcd)	((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
 #define HCD_RH_RUNNING(hcd)	((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
 #define HCD_DEAD(hcd)		((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_IRQ_DISABLED(hcd)	((hcd)->flags & (1U << HCD_FLAG_IRQ_DISABLED))
 
 	/* Flags that get set only during HCD registration or removal. */
 	unsigned		rh_registered:1;/* is root hub registered? */
@@ -132,6 +134,7 @@
 	unsigned		wireless:1;	/* Wireless USB HCD */
 	unsigned		authorized_default:1;
 	unsigned		has_tt:1;	/* Integrated TT in root hub */
+	unsigned		has_wakeup_irq:1; /* Can IRQ when suspended */
 
 	unsigned int		irq;		/* irq allocated */
 	void __iomem		*regs;		/* device memory/io */
diff --git a/include/linux/usb/penwell_otg.h b/include/linux/usb/penwell_otg.h
new file mode 100644
index 0000000..bf80b75
--- /dev/null
+++ b/include/linux/usb/penwell_otg.h
@@ -0,0 +1,515 @@
+/*
+ * Intel Penwell USB OTG transceiver driver
+ * Copyright (C) 2009 - 2010, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __PENWELL_OTG_H__
+#define __PENWELL_OTG_H__
+
+#include <linux/usb/intel_mid_otg.h>
+#include <linux/power_supply.h>
+#include <linux/wakelock.h>
+
+#define PMU_OTG_WAKE_SOURCE     6
+#define CI_USBCMD		0x30
+#	define USBCMD_RST		BIT(1)
+#	define USBCMD_RS		BIT(0)
+#define CI_USBSTS		0x34
+#	define USBSTS_SLI		BIT(8)
+#	define USBSTS_URI		BIT(6)
+#	define USBSTS_PCI		BIT(2)
+#define CI_USBINTR		0x38
+#	define USBINTR_PCE		BIT(2)
+#define CI_ULPIVP		0x60
+#	define ULPI_WU			BIT(31)
+#	define ULPI_RUN			BIT(30)
+#	define ULPI_RW			BIT(29)
+#	define ULPI_SS			BIT(27)
+#	define ULPI_PORT		(BIT(26) | BIT(25) | BIT(24))
+#	define ULPI_ADDR		(0xff << 16)
+#	define ULPI_DATRD		(0xff << 8)
+#	define ULPI_DATWR		(0xff << 0)
+#define CI_PORTSC1		0x74
+#	define PORTSC_PP		BIT(12)
+#	define PORTSC_LS		(BIT(11) | BIT(10))
+#	define PORTSC_SUSP		BIT(7)
+#	define PORTSC_CCS		BIT(0)
+#define CI_HOSTPC1		0xb4
+#	define HOSTPC1_PHCD		BIT(22)
+#define CI_OTGSC		0xf4
+#	define OTGSC_DPIE		BIT(30)
+#	define OTGSC_1MSE		BIT(29)
+#	define OTGSC_BSEIE		BIT(28)
+#	define OTGSC_BSVIE		BIT(27)
+#	define OTGSC_ASVIE		BIT(26)
+#	define OTGSC_AVVIE		BIT(25)
+#	define OTGSC_IDIE		BIT(24)
+#	define OTGSC_DPIS		BIT(22)
+#	define OTGSC_1MSS		BIT(21)
+#	define OTGSC_BSEIS		BIT(20)
+#	define OTGSC_BSVIS		BIT(19)
+#	define OTGSC_ASVIS		BIT(18)
+#	define OTGSC_AVVIS		BIT(17)
+#	define OTGSC_IDIS		BIT(16)
+#	define OTGSC_DPS		BIT(14)
+#	define OTGSC_1MST		BIT(13)
+#	define OTGSC_BSE		BIT(12)
+#	define OTGSC_BSV		BIT(11)
+#	define OTGSC_ASV		BIT(10)
+#	define OTGSC_AVV		BIT(9)
+#	define OTGSC_ID			BIT(8)
+#	define OTGSC_HABA		BIT(7)
+#	define OTGSC_HADP		BIT(6)
+#	define OTGSC_IDPU		BIT(5)
+#	define OTGSC_DP			BIT(4)
+#	define OTGSC_OT			BIT(3)
+#	define OTGSC_HAAR		BIT(2)
+#	define OTGSC_VC			BIT(1)
+#	define OTGSC_VD			BIT(0)
+#define CI_USBMODE		0xf8
+#	define USBMODE_CM		(BIT(1) | BIT(0))
+#	define USBMODE_IDLE		0
+#	define USBMODE_DEVICE		0x2
+#	define USBMODE_HOST		0x3
+#define USBCFG_ADDR			0xff10801c
+#define USBCFG_LEN			4
+#	define USBCFG_VBUSVAL		BIT(14)
+#	define USBCFG_AVALID		BIT(13)
+#	define USBCFG_BVALID		BIT(12)
+#	define USBCFG_SESEND		BIT(11)
+
+#define OTGSC_INTEN_MASK \
+	(OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE \
+	| OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE)
+
+#define OTGSC_INTSTS_MASK \
+	(OTGSC_DPIS | OTGSC_BSEIS | OTGSC_BSVIS \
+	| OTGSC_ASVIS | OTGSC_AVVIS | OTGSC_IDIS)
+
+#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
+
+#define HOST_REQUEST_FLAG		BIT(0)
+
+/* MSIC register for vbus power control */
+#define MSIC_ID			0x00
+#	define ID0_VENDID0		(BIT(7) | BIT(6))
+#define MSIC_ID1		0x01
+#	define ID1_VENDID1		(BIT(7) | BIT(6))
+#define MSIC_VUSB330CNT		0xd4
+#define MSIC_VOTGCNT		0xdf
+#	define VOTGEN			BIT(7)
+#	define VOTGRAMP			BIT(4)
+#define MSIC_SPWRSRINT1		0x193
+#	define SUSBCHPDET		BIT(6)
+#	define SUSBDCDET		BIT(2)
+#	define MSIC_SPWRSRINT1_MASK	(BIT(6) | BIT(2))
+#	define SPWRSRINT1_CDP		BIT(6)
+#	define SPWRSRINT1_SDP		0
+#	define SPWRSRINT1_DCP		BIT(2)
+#define MSIC_USB_MISC		0x2c8	/* Intel Specific */
+#	define MISC_CHGDSERXDPINV	BIT(5)
+#define MSIC_OTGCTRL		0x39c
+#define MSIC_OTGCTRLSET		0x340
+#define MSIC_OTGCTRLCLR		0x341
+#define	ULPI_OTGCTRL		0x0a
+#define	ULPI_OTGCTRLSET		0x0b
+#define	ULPI_OTGCTRLCLR		0x0c
+#	define DRVVBUS_EXTERNAL		BIT(6)
+#	define DRVVBUS			BIT(5)
+#	define DMPULLDOWN		BIT(2)
+#	define DPPULLDOWN		BIT(1)
+#define MSIC_USBINTEN_RISE	0x39d
+#define MSIC_USBINTEN_RISESET	0x39e
+#define MSIC_USBINTEN_RISECLR	0x39f
+#define MSIC_USBINTEN_FALL	0x3a0
+#define MSIC_USBINTEN_FALLSET	0x3a1
+#define MSIC_USBINTEN_FALLCLR	0x3a2
+
+/*
+ * For Clovertrail, due to change of USB PHY from MSIC to external standalone
+ * chip, USB Interrupt Enable Rising/Falling registers can be accessed only
+ * from ULPI interface.
+ */
+#define ULPI_USBINTEN_RISING		0xd
+#define ULPI_USBINTEN_RISINGSET		0xe
+#define ULPI_USBINTEN_RISINGCLR		0xf
+#define ULPI_USBINTEN_FALLING		0x10
+#define ULPI_USBINTEN_FALLINGSET	0x11
+#define ULPI_USBINTEN_FALLINGCLR	0x12
+
+#	define IDGND			BIT(4)
+#	define SESSEND			BIT(3)
+#	define SESSVLD			BIT(2)
+#	define VBUSVLD			BIT(1)
+#	define HOSTDISCON		BIT(0)
+#define MSIC_PWRCTRL		0x3b5
+#define MSIC_PWRCTRLSET		0x342
+#define MSIC_PWRCTRLCLR		0x343
+#define ULPI_PWRCTRL		0x3d
+#define ULPI_PWRCTRLSET		0x3e
+#define ULPI_PWRCTRLCLR		0x3f
+#	define HWDET			BIT(7)
+#	define DPVSRCEN			BIT(6)
+#	define VDATDET			BIT(5)
+#	define DPWKPUEN			BIT(4)
+#	define SWCNTRL			BIT(0)
+#define MSIC_FUNCTRL		0x398
+#define MSIC_FUNCTRLSET		0x344
+#define MSIC_FUNCTRLCLR		0x345
+#define ULPI_FUNCTRL		0x04
+#define ULPI_FUNCTRLSET		0x05
+#define ULPI_FUNCTRLCLR		0x06
+#	define PHYRESET			BIT(5)
+#	define OPMODE1			BIT(4)
+#	define OPMODE0			BIT(3)
+#	define TERMSELECT		BIT(2)
+#	define XCVRSELECT1		BIT(1)
+#	define XCVRSELECT0		BIT(0)
+#define MSIC_DEBUG		0x3a5
+#define ULPI_DEBUG		0x15
+#	define LINESTATE_MSK		(BIT(0) | BIT(1))
+#	define LINESTATE_SE1		(BIT(0) | BIT(1))
+#	define LINESTATE_SE0		(0)
+#	define LINESTATE_FSJ		BIT(0)
+#	define LINESTATE_FSK		BIT(1)
+#define MSIC_VS1		0x3b6
+#define MSIC_VS1SET		0x3a9
+#define MSIC_VS1CLR		0x3aa
+#define ULPI_VS1		0x80
+#define ULPI_VS1SET		0x81
+#define ULPI_VS1CLR		0x82
+#	define DATAPOLARITY		BIT(6)
+#define ULPI_VS2STS		0x83
+#define ULPI_VS2LATCH		0x84
+#	define VBUS_MNTR_STS		BIT(7)
+#	define REG3V3_MNTR_STS		BIT(6)
+#	define SVLDCONWKB_WDOG_STS	BIT(5)
+#	define IDFLOAT_STS		BIT(4)
+#	define IDRARBRC_STS(d)		(((d)>>2)&3)
+#	define IDRARBRC_STS1		BIT(3)
+#	define IDRARBRC_STS2		BIT(2)
+#	define IDRARBRC_MSK		(BIT(2) | BIT(3))
+#	define IDRARBRC_A		1
+#	define IDRARBRC_B		2
+#	define IDRARBRC_C		3
+#	define BVALID_STS		BIT(0)
+#define MSIC_VS3		0x3b9
+#define MSIC_VS3SET		0x346	/* Vendor Specific */
+#define MSIC_VS3CLR		0x347
+#	define SWUSBDET			BIT(4)
+#	define DATACONEN		BIT(3)
+#define ULPI_VS3		0x85
+#define ULPI_VS3SET		0x86
+#define ULPI_VS3CLR		0x87
+#	define CHGD_IDP_SRC		BIT(6)
+#	define IDPULLUP_WK		BIT(5)
+#	define SWUSBDET			BIT(4)
+#	define DATACONEN		BIT(3)
+#define MSIC_VS4		0x3ba
+#define MSIC_VS4SET		0x3ab
+#define MSIC_VS4CLR		0x3ac
+#define ULPI_VS4		0x88
+#define ULPI_VS4SET		0x89
+#define ULPI_VS4CLR		0x8a
+#	define ACADET			BIT(6)
+#	define RABUSIN			BIT(5)
+#	define R1KERIES			BIT(4)
+#	define CHRG_SERX_DP		BIT(1)
+#	define CHRG_SERX_DM		BIT(0)
+#define ULPI_VS5		0x8b
+#define ULPI_VS5SET		0x8c
+#define ULPI_VS5CLR		0x8d
+#	define AUTORESUME_WDOG		BIT(6)
+#	define IDFLOAT_EN		BIT(5)
+#	define IDRES_EN			BIT(4)
+#	define SVLDCONWKB_WDOG		BIT(3)
+#	define VBUS_MNTR_RISEEN		BIT(2)
+#	define VBUS_MNTR_FALLEN		BIT(1)
+#	define REG3V3IN_MNTR_EN		BIT(0)
+#define ULPI_VS6		0x8e
+#define ULPI_VS6SET		0x8f
+#define ULPI_VS6CLR		0x90
+#	define ACA_RID_B_CFG		BIT(7)
+#	define ACA_RID_A_CFG		BIT(6)
+#	define SOF_EN			BIT(5)
+#define MSIC_ULPIACCESSMODE	0x348
+#	define SPIMODE			BIT(0)
+#define MSIC_INT_EN_RISE	0x39D
+#define MSIC_INT_EN_RISE_SET	0x39E
+#define MSIC_INT_EN_RISE_CLR	0x39F
+#define MSIC_INT_EN_FALL	0x3A0
+#define MSIC_INT_EN_FALL_SET	0x3A1
+#define MSIC_INT_EN_FALL_CLR	0x3A2
+
+/* MSIC TI implementation for ADP/ACA */
+#define SPI_TI_VS2		0x3B7
+#define SPI_TI_VS2_LATCH	0x3B8
+#define SPI_TI_VS4		0x3BA
+#define SPI_TI_VS5		0x3BB
+#define ULPI_TI_USB_INT_STS	0x13
+#define ULPI_TI_USB_INT_LAT	0x14
+#	define USB_INT_IDGND		BIT(4)
+#	define USB_INT_SESSEND		BIT(3)
+#	define USB_INT_SESSVLD		BIT(2)
+#	define USB_INT_VBUSVLD		BIT(1)
+#define ULPI_TI_VS2		0x83
+#	define TI_ID_FLOAT_STS		BIT(4)
+#	define TI_ID_RARBRC_STS(d)	(((d)>>2)&3)
+#	define TI_ID_RARBRC_STS_MASK	(BIT(3) | BIT(2))
+#	define TI_ID_RARBRC_NONE	0
+#	define TI_ID_RARBRC_A		1
+#	define TI_ID_RARBRC_B		2
+#	define TI_ID_RARBRC_C		3
+#	define TI_ADP_INT_STS		BIT(1)
+#define ULPI_TI_VS4		0x88
+#	define TI_ACA_DET_EN		BIT(6)
+#define ULPI_TI_VS5		0x8b
+#	define TI_ADP_INT_EN		BIT(7)
+#	define TI_ID_FLOAT_EN		BIT(5)
+#	define TI_ID_RES_EN		BIT(4)
+#define ULPI_TI_VS6		0x8e
+#	define TI_HS_TXPREN		BIT(4)
+#	define TI_ADP_MODE(d)		(((d)>>2)&3)
+#	define TI_ADP_MODE_MASK		(BIT(3) | BIT(2))
+#	define TI_ADP_MODE_DISABLE	0
+#	define TI_ADP_MODE_SENSE	1
+#	define TI_ADP_MODE_PRB_A	2
+#	define TI_ADP_MODE_PRB_B	3
+#	define TI_VBUS_IADP_SRC		BIT(1)
+#	define TI_VBUS_IADP_SINK	BIT(0)
+#define ULPI_TI_VS7		0x91
+#	define TI_T_ADP_HIGH		(0xff)
+#define ULPI_TI_VS8		0x94
+#	define TI_T_ADP_LOW		(0xff)
+#define ULPI_TI_VS9		0x97
+#	define TI_T_ADP_RISE		(0xff)
+
+#define TI_PRB_DELTA			0x08
+
+/* MSIC FreeScale Implementation for ADP */
+#define ULPI_FS_ADPCL		0x28
+#	define ADPCL_PRBDSCHG		(BIT(5) | BIT(6))
+#	define ADPCL_PRBDSCHG_4		0
+#	define ADPCL_PRBDSCHG_8		1
+#	define ADPCL_PRBDSCHG_16	2
+#	define ADPCL_PRBDSCHG_32	3
+#	define ADPCL_PRBPRD		(BIT(3) | BIT(4))
+#	define ADPCL_PRBPRD_A_HALF	0
+#	define ADPCL_PRBPRD_B_HALF	1
+#	define ADPCL_PRBPRD_A		2
+#	define ADPCL_PRBPRD_B		3
+#	define ADPCL_SNSEN		BIT(2)
+#	define ADPCL_PRBEN		BIT(1)
+#	define ADPCL_ADPEN		BIT(0)
+#define ULPI_FS_ADPCH		0x29
+#	define ADPCH_PRBDELTA		(0x1f << 0)
+#define ULPI_FS_ADPIE		0x2a
+#	define ADPIE_ADPRAMPIE		BIT(2)
+#	define ADPIE_SNSMISSIE		BIT(1)
+#	define ADPIE_PRBTRGIE		BIT(0)
+#define ULPI_FS_ADPIS		0x2b
+#	define ADPIS_ADPRAMPS		BIT(5)
+#	define ADPIS_SNSMISSS		BIT(4)
+#	define ADPIS_PRBTRGS		BIT(3)
+#	define ADPIS_ADPRAMPI		BIT(2)
+#	define ADPIS_SNSMISSI		BIT(1)
+#	define ADPIS_PRBTRGI		BIT(0)
+#define ULPI_FS_ADPRL		0x2c
+#	define ADPRL_ADPRAMP		(0xff << 0)
+#define ULPI_FS_ADPRH		0x2d
+#	define ADPRH_ADPRAMP		(0x7 << 0)
+
+#define FS_ADPI_MASK	(ADPIS_ADPRAMPI | ADPIS_SNSMISSI | ADPIS_PRBTRGI)
+
+/* define Data connect checking timeout and polling interval */
+#define DATACON_TIMEOUT		750
+#define DATACON_INTERVAL	20
+
+enum penwell_otg_timer_type {
+	TA_WAIT_VRISE_TMR,
+	TA_WAIT_BCON_TMR,
+	TA_AIDL_BDIS_TMR,
+	TA_BIDL_ADIS_TMR,
+	TA_WAIT_VFALL_TMR,
+	TB_ASE0_BRST_TMR,
+	TB_SE0_SRP_TMR,
+	TB_SRP_FAIL_TMR, /* wait for response of SRP */
+	TB_BUS_SUSPEND_TMR,
+	TTST_MAINT_TMR,
+	TTST_NOADP_TMR,
+};
+
+#define TA_WAIT_VRISE		100
+#define TA_WAIT_BCON		50000
+#define TA_AIDL_BDIS		1500
+#define TA_BIDL_ADIS		300
+#define TA_WAIT_VFALL		950
+#define TB_ASE0_BRST		300
+#define TB_SE0_SRP		1200
+#define TB_SSEND_SRP		1800
+#	define SRP_MON_INVAL	300	/* TODO: interval needs more tuning */
+#define TB_SRP_FAIL		5500
+#define TB_BUS_SUSPEND		500
+#define THOS_REQ_POL		1500
+/* Test mode */
+#define	TTST_MAINT		9900
+#define	TTST_NOADP		5000
+
+/* MSIC vendor information */
+enum msic_vendor {
+	MSIC_VD_FS,
+	MSIC_VD_TI,
+	MSIC_VD_UNKNOWN
+};
+
+/* charger defined in BC 1.2 */
+enum usb_charger_type {
+	CHRG_UNKNOWN,
+	CHRG_SDP,	/* Standard Downstream Port */
+	CHRG_CDP,	/* Charging Downstream Port */
+	CHRG_SDP_INVAL,	/* Invaild Standard Downstream Port */
+	CHRG_DCP,	/* Dedicated Charging Port */
+	CHRG_ACA,	/* Accessory Charger Adapter */
+	CHRG_ACA_DOCK,	/* Accessory Charger Adapter - Dock */
+	CHRG_ACA_A,	/* Accessory Charger Adapter - RID_A */
+	CHRG_ACA_B,	/* Accessory Charger Adapter - RID_B */
+	CHRG_ACA_C,	/* Accessory Charger Adapter - RID_C */
+	CHRG_SE1,	/* SE1 (Apple)*/
+	CHRG_MHL	/* Moblie High-Definition Link */
+};
+
+struct adp_status {
+	struct completion	adp_comp;
+	u8			t_adp_rise;
+};
+
+/* Invalid SDP checking timeout */
+#define INVALID_SDP_TIMEOUT	(HZ * 15)
+
+/* OTG Battery Charging capability is used in charger capability detection */
+struct otg_bc_cap {
+	enum usb_charger_type	chrg_type;
+	unsigned int		ma;
+#define CHRG_CURR_UNKNOWN	0
+#define CHRG_CURR_DISCONN	0
+#define CHRG_CURR_SDP_SUSP	2
+#define CHRG_CURR_SDP_UNCONFIG	100
+#define CHRG_CURR_SDP_LOW	100
+#define CHRG_CURR_SDP_HIGH	500
+#define CHRG_CURR_SDP_INVAL	500
+#define CHRG_CURR_CDP		1500
+#define CHRG_CURR_DCP	1500
+#define CHRG_CURR_SE1	1500
+#define CHRG_CURR_ACA	1500
+	unsigned int            current_event;
+};
+
+struct otg_bc_event {
+	struct list_head		node;
+	struct power_supply_cable_props	cap;
+};
+
+/* Bus monitor action for b_ssend_srp/b_se0_srp */
+#define BUS_MON_STOP		0
+#define BUS_MON_START		1
+#define BUS_MON_CONTINUE	2
+
+/* define event ids to notify battery driver */
+#define USBCHRG_EVENT_CONNECT	1
+#define USBCHRG_EVENT_DISCONN	2
+#define USBCHRG_EVENT_SUSPEND	3
+#define USBCHRG_EVENT_RESUME	4
+#define USBCHRG_EVENT_UPDATE	5
+
+struct intel_mid_otg_pdata {
+	int gpio_vbus;
+	int gpio_cs;
+	int gpio_reset;
+	int charging_compliance;
+	int hnp_poll_support;
+	unsigned power_budget;
+};
+
+struct penwell_otg {
+	struct intel_mid_otg_xceiv	iotg;
+	struct device			*dev;
+
+	unsigned			region;
+	unsigned			cfg_region;
+
+	struct work_struct		work;
+	struct work_struct		hnp_poll_work;
+	struct work_struct		psc_notify;
+	struct work_struct		uevent_work;
+	struct delayed_work		ulpi_poll_work;
+	struct delayed_work		ulpi_check_work;
+	struct delayed_work		sdp_check_work;
+	struct workqueue_struct		*qwork;
+	struct workqueue_struct		*chrg_qwork;
+
+
+	struct timer_list		hsm_timer;
+	struct timer_list		hnp_poll_timer;
+	struct timer_list		bus_mon_timer;
+
+	unsigned long			b_se0_srp_time;
+	unsigned long			b_ssend_srp_time;
+
+	struct mutex			msic_mutex;
+	enum msic_vendor		msic;
+
+	struct notifier_block		iotg_notifier;
+	int				queue_stop;
+
+	struct adp_status		adp;
+
+	spinlock_t			charger_lock;
+	struct list_head		chrg_evt_queue;
+	struct otg_bc_cap		charging_cap;
+	spinlock_t			cap_lock;
+	struct power_supply_cable_props psc_cap;
+	int (*bc_callback)(void *arg, int event, struct otg_bc_cap *cap);
+	void				*bc_arg;
+
+	unsigned			rt_resuming;
+
+	unsigned			rt_quiesce;
+	struct intel_mid_otg_pdata	*otg_pdata;
+
+	struct wake_lock		wake_lock;
+	spinlock_t			lock;
+
+	int				phy_power_state;
+};
+
+static inline
+struct penwell_otg *iotg_to_penwell(struct intel_mid_otg_xceiv *iotg)
+{
+	return container_of(iotg, struct penwell_otg, iotg);
+}
+
+extern int penwell_otg_query_charging_cap(struct otg_bc_cap *cap);
+extern int penwell_otg_query_power_supply_cap(
+			struct power_supply_cable_props *cap);
+extern void *penwell_otg_register_bc_callback(
+	int (*cb)(void *, int, struct otg_bc_cap *), void *arg);
+extern int penwell_otg_unregister_bc_callback(void *handler);
+
+extern int pnw_otg_ulpi_write(u8 reg, u8 val);
+extern int is_clovertrail(struct pci_dev *pdev);
+
+#endif /* __PENWELL_OTG_H__ */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 6b5978f..fe5cb1e 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -18,6 +18,7 @@
 	USB_EVENT_ID,           /* id was grounded */
 	USB_EVENT_CHARGER,      /* usb dedicated charger */
 	USB_EVENT_ENUMERATED,   /* gadget driver enumerated */
+	USB_EVENT_DRIVE_VBUS,	/* drive vbus request */
 };
 
 /* associate a type with PHY */
@@ -107,6 +108,9 @@
 			enum usb_device_speed speed);
 	int	(*notify_disconnect)(struct usb_phy *x,
 			enum usb_device_speed speed);
+
+	/* check charger status */
+	int	(*get_chrg_status)(struct usb_phy *x, void *data);
 };
 
 /**
@@ -297,4 +301,14 @@
 		return "UNKNOWN PHY TYPE";
 	}
 }
+
+static inline int
+otg_get_chrg_status(struct usb_phy *x, void *data)
+{
+	if (x && x->get_chrg_status)
+		return x->get_chrg_status(x, data);
+
+	return -ENOTSUPP;
+}
+
 #endif /* __LINUX_USB_PHY_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index c8e5760..569d33b6 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -959,5 +959,33 @@
 		return 0;
 	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
 }
-	
+
+/* 3.18 backport */
+extern int bit_wait_io(void *);
+
+/**
+ * wait_on_bit_io - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared.  This is similar to wait_on_bit(), but calls
+ * io_schedule() instead of schedule() for the actual waiting.
+ *
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
+ */
+static inline int
+wait_on_bit_io(void *word, int bit, unsigned mode)
+{
+	might_sleep();
+	if (!test_bit(bit, word))
+		return 0;
+	return out_of_line_wait_on_bit(word, bit,
+				       bit_wait_io,
+				       mode);
+}
+
 #endif
diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h
new file mode 100644
index 0000000..f4a698a
--- /dev/null
+++ b/include/linux/wakelock.h
@@ -0,0 +1,67 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/ktime.h>
+#include <linux/device.h>
+
+/* A wake_lock prevents the system from entering suspend or other low power
+ * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock
+ * prevents a full system suspend.
+ */
+
+enum {
+	WAKE_LOCK_SUSPEND, /* Prevent suspend */
+	WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+	struct wakeup_source ws;
+};
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+				  const char *name)
+{
+	wakeup_source_init(&lock->ws, name);
+}
+
+static inline void wake_lock_destroy(struct wake_lock *lock)
+{
+	wakeup_source_trash(&lock->ws);
+}
+
+static inline void wake_lock(struct wake_lock *lock)
+{
+	__pm_stay_awake(&lock->ws);
+}
+
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+	__pm_wakeup_event(&lock->ws, jiffies_to_msecs(timeout));
+}
+
+static inline void wake_unlock(struct wake_lock *lock)
+{
+	__pm_relax(&lock->ws);
+}
+
+static inline int wake_lock_active(struct wake_lock *lock)
+{
+	return lock->ws.active;
+}
+
+#endif
diff --git a/include/linux/wakeup_reason.h b/include/linux/wakeup_reason.h
new file mode 100644
index 0000000..ad8b769
--- /dev/null
+++ b/include/linux/wakeup_reason.h
@@ -0,0 +1,27 @@
+/*
+ * include/linux/wakeup_reason.h
+ *
+ * Logs the reason which caused the kernel to resume
+ * from the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_WAKEUP_REASON_H
+#define _LINUX_WAKEUP_REASON_H
+
+#define MAX_SUSPEND_ABORT_LEN 256
+
+void log_wakeup_reason(int irq);
+void log_suspend_abort_reason(const char *fmt, ...);
+int check_wakeup_reason(int irq);
+
+#endif /* _LINUX_WAKEUP_REASON_H */
diff --git a/include/linux/wifi_tiwlan.h b/include/linux/wifi_tiwlan.h
new file mode 100644
index 0000000..f07e067
--- /dev/null
+++ b/include/linux/wifi_tiwlan.h
@@ -0,0 +1,27 @@
+/* include/linux/wifi_tiwlan.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WIFI_TIWLAN_H_
+#define _LINUX_WIFI_TIWLAN_H_
+
+#include <linux/wlan_plat.h>
+
+#define WMPA_NUMBER_OF_SECTIONS	3
+#define WMPA_NUMBER_OF_BUFFERS	160
+#define WMPA_SECTION_HEADER	24
+#define WMPA_SECTION_SIZE_0	(WMPA_NUMBER_OF_BUFFERS * 64)
+#define WMPA_SECTION_SIZE_1	(WMPA_NUMBER_OF_BUFFERS * 256)
+#define WMPA_SECTION_SIZE_2	(WMPA_NUMBER_OF_BUFFERS * 2048)
+
+#endif
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
index a54fe82..5020662 100644
--- a/include/linux/wl12xx.h
+++ b/include/linux/wl12xx.h
@@ -50,8 +50,13 @@
 
 struct wl12xx_platform_data {
 	void (*set_power)(bool enable);
+	int (*hw_init)(struct wl12xx_platform_data *pdata);
+	void (*hw_deinit)(struct wl12xx_platform_data *pdata);
 	/* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
 	int irq;
+	/* gpio must be set to -EINVAL by platform code if gpio based irq is
+	not used */
+	int gpio;
 	bool use_eeprom;
 	int board_ref_clock;
 	int board_tcxo_clock;
diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h
new file mode 100644
index 0000000..72e3c83
--- /dev/null
+++ b/include/linux/wlan_plat.h
@@ -0,0 +1,41 @@
+/* include/linux/wlan_plat.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WLAN_PLAT_H_
+#define _LINUX_WLAN_PLAT_H_
+
+#define WLAN_PLAT_NODFS_FLAG	0x01
+#define WLAN_PLAT_AP_FLAG	0x02
+
+struct wifi_platform_data {
+	int (*set_power)(int val);
+	int (*set_reset)(int val);
+	int (*set_carddetect)(int val);
+	void *(*mem_prealloc)(int section, unsigned long size);
+	int (*get_mac_addr)(unsigned char *buf);
+	char *nvram_id;
+	bool use_fast_irq;
+	int (*get_wake_irq)(void);
+	void *(*get_country_code)(char *ccode, u32 flags);
+#ifdef CONFIG_PARTIALRESUME
+#define WIFI_PR_INIT			0
+#define WIFI_PR_NOTIFY_RESUME		1
+#define WIFI_PR_VOTE_FOR_RESUME		2
+#define WIFI_PR_VOTE_FOR_SUSPEND	3
+#define WIFI_PR_WAIT_FOR_READY		4
+	bool (*partial_resume)(int action);
+#endif
+};
+
+#endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 579a500..abfe117 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -78,6 +78,7 @@
 	unsigned tagged_writepages:1;	/* tag-and-write to avoid livelock */
 	unsigned for_reclaim:1;		/* Invoked from the page allocator */
 	unsigned range_cyclic:1;	/* range_start is cyclic */
+	unsigned for_sync:1;		/* sync(2) WB_SYNC_ALL writeback */
 };
 
 /*
diff --git a/include/net/activity_stats.h b/include/net/activity_stats.h
new file mode 100644
index 0000000..10e4c15
--- /dev/null
+++ b/include/net/activity_stats.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#ifndef __activity_stats_h
+#define __activity_stats_h
+
+#ifdef CONFIG_NET_ACTIVITY_STATS
+void activity_stats_update(void);
+#else
+#define activity_stats_update(void) {}
+#endif
+
+#endif /* _NET_ACTIVITY_STATS_H */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 2510068..2cbf0ba 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -190,6 +190,8 @@
 extern void addrconf_prefix_rcv(struct net_device *dev,
 				u8 *opt, int len, bool sllao);
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table);
+
 /*
  *	anycast prototypes (anycast.c)
  */
@@ -201,6 +203,7 @@
 extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
 extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
 				const struct in6_addr *addr);
+extern void ipv6_ac_destroy_dev(struct inet6_dev *idev);
 
 
 /* Device notifier */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 0ef0006..db43501 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -199,8 +199,10 @@
 #define ESCO_2EV5	0x0100
 #define ESCO_3EV5	0x0200
 
-#define SCO_ESCO_MASK  (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
-#define EDR_ESCO_MASK  (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+#define SCO_ESCO_MASK	(ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
+#define EDR_ESCO_MASK	(ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+#define ALL_ESCO_MASK	(SCO_ESCO_MASK | ESCO_EV3 | ESCO_EV4 | ESCO_EV5 | \
+			EDR_ESCO_MASK)
 
 /* ACL flags */
 #define ACL_START_NO_FLUSH	0x00
@@ -1629,6 +1631,9 @@
 	__u8     out;
 	__u16    state;
 	__u32    link_mode;
+	__u32    mtu;
+	__u32    cnt;
+	__u32    pkts;
 };
 
 struct hci_dev_req {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 7cb6d36..519f2e2 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -308,6 +308,8 @@
 
 	bdaddr_t	dst;
 	__u8		dst_type;
+	bdaddr_t	src;
+	__u8		src_type;
 	__u16		handle;
 	__u16		state;
 	__u8		mode;
@@ -581,7 +583,8 @@
 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
 
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+					__u16 pkt_type, bdaddr_t *dst);
 int hci_conn_del(struct hci_conn *conn);
 void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
@@ -591,7 +594,8 @@
 void hci_chan_list_flush(struct hci_conn *conn);
 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
+			     __u16 pkt_type, bdaddr_t *dst,
 			     __u8 dst_type, __u8 sec_level, __u8 auth_type);
 int hci_conn_check_link_mode(struct hci_conn *conn);
 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
@@ -654,7 +658,7 @@
 			if (conn->state == BT_CONNECTED) {
 				timeo = conn->disc_timeout;
 				if (!conn->out)
-					timeo *= 2;
+					timeo *= 20;
 			} else {
 				timeo = msecs_to_jiffies(10);
 			}
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index fb94cf1..344ef07 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -442,7 +442,13 @@
 
 	__u8		state;
 
+	bdaddr_t	dst;
+	__u8		dst_type;
+	bdaddr_t	src;
+	__u8		src_type;
+
 	__le16		psm;
+	__le16		sport;
 	__u16		dcid;
 	__u16		scid;
 
@@ -453,8 +459,6 @@
 	__u8		chan_type;
 	__u8		chan_policy;
 
-	__le16		sport;
-
 	__u8		sec_level;
 
 	__u8		ident;
@@ -821,7 +825,7 @@
 		       u8 status);
 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
 
-void l2cap_conn_get(struct l2cap_conn *conn);
+struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
 void l2cap_conn_put(struct l2cap_conn *conn);
 
 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 1e35c43..6d1857a 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -37,6 +37,7 @@
 struct sockaddr_sco {
 	sa_family_t	sco_family;
 	bdaddr_t	sco_bdaddr;
+	__u16		sco_pkt_type;
 };
 
 /* SCO socket options */
@@ -72,7 +73,8 @@
 
 struct sco_pinfo {
 	struct bt_sock	bt;
-	__u32		flags;
+	__u16		pkt_type;
+
 	struct sco_conn	*conn;
 };
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 26b5b69..ce7dee0 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1268,10 +1268,12 @@
 /**
  * struct cfg80211_match_set - sets of attributes to match
  *
- * @ssid: SSID to be matched
+ * @ssid: SSID to be matched; may be zero-length for no match (RSSI only)
+ * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
  */
 struct cfg80211_match_set {
 	struct cfg80211_ssid ssid;
+	s32 rssi_thold;
 };
 
 /**
@@ -1293,7 +1295,8 @@
  * @dev: the interface
  * @scan_start: start time of the scheduled scan
  * @channels: channels to scan
- * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
+ * @min_rssi_thold: for drivers only supporting a single threshold, this
+ *	contains the minimum over all matchsets
  */
 struct cfg80211_sched_scan_request {
 	struct cfg80211_ssid *ssids;
@@ -1305,7 +1308,8 @@
 	u32 flags;
 	struct cfg80211_match_set *match_sets;
 	int n_match_sets;
-	s32 rssi_thold;
+	s32 min_rssi_thold;
+	s32 rssi_thold; /* just for backward compatible */
 
 	/* internal */
 	struct wiphy *wiphy;
@@ -2254,22 +2258,28 @@
  * enum wiphy_flags - wiphy capability flags
  *
  * @WIPHY_FLAG_CUSTOM_REGULATORY:  tells us the driver for this device
- * 	has its own custom regulatory domain and cannot identify the
- * 	ISO / IEC 3166 alpha2 it belongs to. When this is enabled
- * 	we will disregard the first regulatory hint (when the
- * 	initiator is %REGDOM_SET_BY_CORE).
- * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will
- *	ignore regulatory domain settings until it gets its own regulatory
- *	domain via its regulatory_hint() unless the regulatory hint is
- *	from a country IE. After its gets its own regulatory domain it will
- *	only allow further regulatory domain settings to further enhance
- *	compliance. For example if channel 13 and 14 are disabled by this
- *	regulatory domain no user regulatory domain can enable these channels
- *	at a later time. This can be used for devices which do not have
- *	calibration information guaranteed for frequencies or settings
- *	outside of its regulatory domain. If used in combination with
- *	WIPHY_FLAG_CUSTOM_REGULATORY the inspected country IE power settings
- *	will be followed.
+ *	has its own custom regulatory domain and cannot identify the
+ *	ISO / IEC 3166 alpha2 it belongs to. When this is enabled
+ *	we will disregard the first regulatory hint (when the
+ *	initiator is %REGDOM_SET_BY_CORE). wiphys can set the custom
+ *	regulatory domain using wiphy_apply_custom_regulatory()
+ *	prior to wiphy registration.
+ * @WIPHY_FLAG_STRICT_REGULATORY: tells us that the wiphy for this device
+ *	has regulatory domain that it wishes to be considered as the
+ *	superset for regulatory rules. After this device gets its regulatory
+ *	domain programmed further regulatory hints shall only be considered
+ *	for this device to enhance regulatory compliance, forcing the
+ *	device to only possibly use subsets of the original regulatory
+ *	rules. For example if channel 13 and 14 are disabled by this
+ *	device's regulatory domain no user specified regulatory hint which
+ *	has these channels enabled would enable them for this wiphy,
+ *	the device's original regulatory domain will be trusted as the
+ *	base. You can program the superset of regulatory rules for this
+ *	wiphy with regulatory_hint() for cards programmed with an
+ *	ISO3166-alpha2 country code. wiphys that use regulatory_hint()
+ *	will have their wiphy->regd programmed once the regulatory
+ *	domain is set, and all other regulatory hints will be ignored
+ *	until their own regulatory domain gets programmed.
  * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
  *	that passive scan flags and beaconing flags may not be lifted by
  *	cfg80211 due to regulatory beacon hints. For more information on beacon
@@ -2467,6 +2477,34 @@
 };
 
 /**
+ * enum wiphy_vendor_command_flags - validation flags for vendor commands
+ * @WIPHY_VENDOR_CMD_NEED_WDEV: vendor command requires wdev
+ * @WIPHY_VENDOR_CMD_NEED_NETDEV: vendor command requires netdev
+ * @WIPHY_VENDOR_CMD_NEED_RUNNING: interface/wdev must be up & running
+ *	(must be combined with %_WDEV or %_NETDEV)
+ */
+enum wiphy_vendor_command_flags {
+	WIPHY_VENDOR_CMD_NEED_WDEV = BIT(0),
+	WIPHY_VENDOR_CMD_NEED_NETDEV = BIT(1),
+	WIPHY_VENDOR_CMD_NEED_RUNNING = BIT(2),
+};
+
+/**
+ * struct wiphy_vendor_command - vendor command definition
+ * @info: vendor command identifying information, as used in nl80211
+ * @flags: flags, see &enum wiphy_vendor_command_flags
+ * @doit: callback for the operation, note that wdev is %NULL if the
+ *	flags didn't ask for a wdev and non-%NULL otherwise; the data
+ *	pointer may be %NULL if userspace provided no data at all
+ */
+struct wiphy_vendor_command {
+	struct nl80211_vendor_cmd_info info;
+	u32 flags;
+	int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev,
+		    const void *data, int data_len);
+};
+
+/**
  * struct wiphy - wireless hardware description
  * @reg_notifier: the driver's regulatory notification callback,
  *	note that if your driver uses wiphy_apply_custom_regulatory()
@@ -2573,6 +2611,12 @@
  *	802.11-2012 8.4.2.29 for the defined fields.
  * @extended_capabilities_mask: mask of the valid values
  * @extended_capabilities_len: length of the extended capabilities
+ * @country_ie_pref: country IE processing preferences specified
+ *	by enum nl80211_country_ie_pref
+ * @vendor_commands: array of vendor commands supported by the hardware
+ * @n_vendor_commands: number of vendor commands
+ * @vendor_events: array of vendor events supported by the hardware
+ * @n_vendor_events: number of vendor events
  */
 struct wiphy {
 	/* assign these fields before you register the wiphy */
@@ -2642,6 +2686,8 @@
 	const u8 *extended_capabilities, *extended_capabilities_mask;
 	u8 extended_capabilities_len;
 
+	u8 country_ie_pref;
+
 	/* If multiple wiphys are registered and you're handed e.g.
 	 * a regular netdev with assigned ieee80211_ptr, you won't
 	 * know whether it points to a wiphy your driver has registered
@@ -2681,6 +2727,10 @@
 	const struct iw_handler_def *wext;
 #endif
 
+	const struct wiphy_vendor_command *vendor_commands;
+	const struct nl80211_vendor_cmd_info *vendor_events;
+	int n_vendor_commands, n_vendor_events;
+
 	char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -3591,6 +3641,121 @@
  */
 void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
 
+/**
+ * DOC: Vendor commands
+ *
+ * Occasionally, there are special protocol or firmware features that
+ * can't be implemented very openly. For this and similar cases, the
+ * vendor command functionality allows implementing the features with
+ * (typically closed-source) userspace and firmware, using nl80211 as
+ * the configuration mechanism.
+ *
+ * A driver supporting vendor commands must register them as an array
+ * in struct wiphy, with handlers for each one, each command has an
+ * OUI and sub command ID to identify it.
+ *
+ * Note that this feature should not be (ab)used to implement protocol
+ * features that could openly be shared across drivers. In particular,
+ * it must never be required to use vendor commands to implement any
+ * "normal" functionality that higher-level userspace like connection
+ * managers etc. need.
+ */
+
+struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
+					   enum nl80211_commands cmd,
+					   enum nl80211_attrs attr,
+					   int approxlen);
+
+struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+					   enum nl80211_commands cmd,
+					   enum nl80211_attrs attr,
+					   int vendor_event_idx,
+					   int approxlen, gfp_t gfp);
+
+void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp);
+
+/**
+ * cfg80211_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @wiphy: the wiphy
+ * @approxlen: an upper bound of the length of the data that will
+ *	be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NL80211_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the testdata attribute. You
+ * must not modify the skb in any other way.
+ *
+ * When done, call cfg80211_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
+{
+	return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_VENDOR,
+					  NL80211_ATTR_VENDOR_DATA, approxlen);
+}
+
+/**
+ * cfg80211_vendor_cmd_reply - send the reply skb
+ * @skb: The skb, must have been allocated with
+ *	cfg80211_vendor_cmd_alloc_reply_skb()
+ *
+ * Since calling this function will usually be the last thing
+ * before returning from the vendor command doit() you should
+ * return the error code.  Note that this function consumes the
+ * skb regardless of the return value.
+ *
+ * Return: An error code or 0 on success.
+ */
+int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * cfg80211_vendor_event_alloc - allocate vendor-specific event skb
+ * @wiphy: the wiphy
+ * @event_idx: index of the vendor event in the wiphy's vendor_events
+ * @approxlen: an upper bound of the length of the data that will
+ *	be put into the skb
+ * @gfp: allocation flags
+ *
+ * This function allocates and pre-fills an skb for an event on the
+ * vendor-specific multicast group.
+ *
+ * When done filling the skb, call cfg80211_vendor_event() with the
+ * skb to send the event.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_event_alloc(struct wiphy *wiphy, int approxlen,
+			    int event_idx, gfp_t gfp)
+{
+	return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_VENDOR,
+					  NL80211_ATTR_VENDOR_DATA,
+					  event_idx, approxlen, gfp);
+}
+
+/**
+ * cfg80211_vendor_event - send the event
+ * @skb: The skb, must have been allocated with cfg80211_vendor_event_alloc()
+ * @gfp: allocation flags
+ *
+ * This function sends the given @skb, which must have been allocated
+ * by cfg80211_vendor_event_alloc(), as an event. It always consumes it.
+ */
+static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
+{
+	__cfg80211_send_event_skb(skb, gfp);
+}
+
 #ifdef CONFIG_NL80211_TESTMODE
 /**
  * DOC: Test mode
@@ -3626,8 +3791,12 @@
  *
  * Return: An allocated and pre-filled skb. %NULL if any errors happen.
  */
-struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
-						  int approxlen);
+static inline struct sk_buff *
+cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
+{
+	return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE,
+					  NL80211_ATTR_TESTDATA, approxlen);
+}
 
 /**
  * cfg80211_testmode_reply - send the reply skb
@@ -3641,7 +3810,10 @@
  *
  * Return: An error code or 0 on success.
  */
-int cfg80211_testmode_reply(struct sk_buff *skb);
+static inline int cfg80211_testmode_reply(struct sk_buff *skb)
+{
+	return cfg80211_vendor_cmd_reply(skb);
+}
 
 /**
  * cfg80211_testmode_alloc_event_skb - allocate testmode event
@@ -3664,8 +3836,13 @@
  *
  * Return: An allocated and pre-filled skb. %NULL if any errors happen.
  */
-struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy,
-						  int approxlen, gfp_t gfp);
+static inline struct sk_buff *
+cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
+{
+	return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_TESTMODE,
+					  NL80211_ATTR_TESTDATA, -1,
+					  approxlen, gfp);
+}
 
 /**
  * cfg80211_testmode_event - send the event
@@ -3677,7 +3854,10 @@
  * by cfg80211_testmode_alloc_event_skb(), as an event. It always
  * consumes it.
  */
-void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp);
+static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+{
+	__cfg80211_send_event_skb(skb, gfp);
+}
 
 #define CFG80211_TESTMODE_CMD(cmd)	.testmode_cmd = (cmd),
 #define CFG80211_TESTMODE_DUMP(cmd)	.testmode_dump = (cmd),
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index e361f48..4ac12e1 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -23,6 +23,8 @@
 	struct fib_rule __rcu	*ctarget;
 	char			iifname[IFNAMSIZ];
 	char			oifname[IFNAMSIZ];
+	kuid_t			uid_start;
+	kuid_t			uid_end;
 	struct rcu_head		rcu;
 	struct net *		fr_net;
 };
@@ -80,7 +82,9 @@
 	[FRA_FWMARK]	= { .type = NLA_U32 }, \
 	[FRA_FWMASK]	= { .type = NLA_U32 }, \
 	[FRA_TABLE]     = { .type = NLA_U32 }, \
-	[FRA_GOTO]	= { .type = NLA_U32 }
+	[FRA_GOTO]	= { .type = NLA_U32 }, \
+	[FRA_UID_START]	= { .type = NLA_U32 }, \
+	[FRA_UID_END]	= { .type = NLA_U32 }
 
 static inline void fib_rule_get(struct fib_rule *rule)
 {
diff --git a/include/net/flow.h b/include/net/flow.h
index 628e11b..1426681f 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -10,6 +10,15 @@
 #include <linux/socket.h>
 #include <linux/in6.h>
 #include <linux/atomic.h>
+#include <linux/uidgid.h>
+
+/*
+ * ifindex generation is per-net namespace, and loopback is
+ * always the 1st device in ns (see net_dev_init), thus any
+ * loopback device should get ifindex 1
+ */
+
+#define LOOPBACK_IFINDEX	1
 
 struct flowi_common {
 	int	flowic_oif;
@@ -23,6 +32,7 @@
 #define FLOWI_FLAG_CAN_SLEEP		0x02
 #define FLOWI_FLAG_KNOWN_NH		0x04
 	__u32	flowic_secid;
+	kuid_t	flowic_uid;
 };
 
 union flowi_uli {
@@ -59,6 +69,7 @@
 #define flowi4_proto		__fl_common.flowic_proto
 #define flowi4_flags		__fl_common.flowic_flags
 #define flowi4_secid		__fl_common.flowic_secid
+#define flowi4_uid		__fl_common.flowic_uid
 
 	/* (saddr,daddr) must be grouped, same order as in IP header */
 	__be32			saddr;
@@ -78,16 +89,18 @@
 				      __u32 mark, __u8 tos, __u8 scope,
 				      __u8 proto, __u8 flags,
 				      __be32 daddr, __be32 saddr,
-				      __be16 dport, __be16 sport)
+				      __be16 dport, __be16 sport,
+				      kuid_t uid)
 {
 	fl4->flowi4_oif = oif;
-	fl4->flowi4_iif = 0;
+	fl4->flowi4_iif = LOOPBACK_IFINDEX;
 	fl4->flowi4_mark = mark;
 	fl4->flowi4_tos = tos;
 	fl4->flowi4_scope = scope;
 	fl4->flowi4_proto = proto;
 	fl4->flowi4_flags = flags;
 	fl4->flowi4_secid = 0;
+	fl4->flowi4_uid = uid;
 	fl4->daddr = daddr;
 	fl4->saddr = saddr;
 	fl4->fl4_dport = dport;
@@ -115,6 +128,7 @@
 #define flowi6_proto		__fl_common.flowic_proto
 #define flowi6_flags		__fl_common.flowic_flags
 #define flowi6_secid		__fl_common.flowic_secid
+#define flowi6_uid		__fl_common.flowic_uid
 	struct in6_addr		daddr;
 	struct in6_addr		saddr;
 	__be32			flowlabel;
@@ -158,6 +172,7 @@
 #define flowi_proto	u.__fl_common.flowic_proto
 #define flowi_flags	u.__fl_common.flowic_flags
 #define flowi_secid	u.__fl_common.flowic_secid
+#define flowi_uid	u.__fl_common.flowic_uid
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 7235ae7..9528e10f 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -88,6 +88,7 @@
 				acked	   : 1,
 				no_srccheck: 1;
 	kmemcheck_bitfield_end(flags);
+	u32                     ir_mark;
 	struct ip_options_rcu	*opt;
 };
 
@@ -96,6 +97,14 @@
 	return (struct inet_request_sock *)sk;
 }
 
+static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
+{
+	if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
+		return skb->mark;
+
+	return sk->sk_mark;
+}
+
 struct inet_cork {
 	unsigned int		flags;
 	__be32			addr;
diff --git a/include/net/ip.h b/include/net/ip.h
index ea9be6b..a04070b 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -155,6 +155,7 @@
 				/* -1 if not needed */ 
 	int	    bound_dev_if;
 	u8  	    tos;
+	kuid_t	    uid;
 }; 
 
 #define IP_REPLY_ARG_NOSRCCHECK 1
@@ -227,6 +228,9 @@
 
 extern void ip_static_sysctl_init(void);
 
+#define IP4_REPLY_MARK(net, mark) \
+	((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
+
 static inline bool ip_is_fragment(const struct iphdr *iph)
 {
 	return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 8d977b3..6be6deb 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -136,7 +136,7 @@
 					      const struct in6_addr *gwaddr);
 
 extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-			    int oif, u32 mark);
+			    int oif, u32 mark, kuid_t uid);
 extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
 			       __be32 mtu);
 extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 413e23b..8645f1b 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -111,6 +111,9 @@
 
 #define	IP6_MF	0x0001
 
+#define IP6_REPLY_MARK(net, mark) \
+	((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
+
 #include <net/sock.h>
 
 /* sysctls */
@@ -260,6 +263,12 @@
 
 extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
 
+int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+			       struct icmp6hdr *thdr, int len);
+
+struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+				      struct sock *sk, struct flowi6 *fl6);
+
 extern int 			ip6_ra_control(struct sock *sk, int sel);
 
 extern int			ipv6_parse_hopopts(struct sk_buff *skb);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index b176978..b064d6d 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -9,6 +9,7 @@
 #include <linux/list.h>
 #include <linux/sysctl.h>
 
+#include <net/flow.h>
 #include <net/netns/core.h>
 #include <net/netns/mib.h>
 #include <net/netns/unix.h>
@@ -120,14 +121,6 @@
 	atomic_t		rt_genid;
 };
 
-/*
- * ifindex generation is per-net namespace, and loopback is
- * always the 1st device in ns (see net_dev_init), thus any
- * loopback device should get ifindex 1
- */
-
-#define LOOPBACK_IFINDEX	1
-
 #include <linux/seq_file_net.h>
 
 /* Init's network namespace */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 03e6378..4f6c720 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -65,6 +65,9 @@
 
 	int sysctl_tcp_ecn;
 
+	int sysctl_fwmark_reflect;
+	int sysctl_tcp_fwmark_accept;
+
 	kgid_t sysctl_ping_group_range[2];
 	long sysctl_tcp_mem[3];
 
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 005e2c2..4b9f99e 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -28,6 +28,7 @@
 	int ip6_rt_mtu_expires;
 	int ip6_rt_min_advmss;
 	int icmpv6_time;
+	int fwmark_reflect;
 };
 
 struct netns_ipv6 {
diff --git a/include/net/ping.h b/include/net/ping.h
index 682b5ae..d9ec515 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -13,6 +13,7 @@
 #ifndef _PING_H
 #define _PING_H
 
+#include <net/icmp.h>
 #include <net/netns/hash.h>
 
 /* PING_HTABLE_SIZE must be power of 2 */
@@ -28,6 +29,19 @@
  */
 #define GID_T_MAX (((gid_t)~0U) >> 1)
 
+/* Compatibility glue so we can support IPv6 when it's compiled as a module */
+struct pingv6_ops {
+	int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
+				int *addr_len);
+	int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg,
+				     struct sk_buff *skb);
+	int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
+	void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err,
+				__be16 port, u32 info, u8 *payload);
+	int (*ipv6_chk_addr)(struct net *net, const struct in6_addr *addr,
+			     const struct net_device *dev, int strict);
+};
+
 struct ping_table {
 	struct hlist_nulls_head	hash[PING_HTABLE_SIZE];
 	rwlock_t		lock;
@@ -39,10 +53,39 @@
 };
 
 extern struct proto ping_prot;
+extern struct ping_table ping_table;
+#if IS_ENABLED(CONFIG_IPV6)
+extern struct pingv6_ops pingv6_ops;
+#endif
 
+struct pingfakehdr {
+	struct icmphdr icmph;
+	struct iovec *iov;
+	sa_family_t family;
+	__wsum wcheck;
+};
 
-extern void ping_rcv(struct sk_buff *);
-extern void ping_err(struct sk_buff *, u32 info);
+int  ping_get_port(struct sock *sk, unsigned short ident);
+void ping_hash(struct sock *sk);
+void ping_unhash(struct sock *sk);
+
+int  ping_init_sock(struct sock *sk);
+void ping_close(struct sock *sk, long timeout);
+int  ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+void ping_err(struct sk_buff *skb, int offset, u32 info);
+int  ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
+		  struct sk_buff *);
+
+int  ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		  size_t len, int noblock, int flags, int *addr_len);
+int  ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+			 void *user_icmph, size_t icmph_len);
+int  ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		     size_t len);
+int  ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		     size_t len);
+int  ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void ping_rcv(struct sk_buff *skb);
 
 #ifdef CONFIG_PROC_FS
 extern int __init ping_proc_init(void);
@@ -50,6 +93,7 @@
 #endif
 
 void __init ping_init(void);
-
+int  __init pingv6_init(void);
+void pingv6_exit(void);
 
 #endif /* _PING_H */
diff --git a/include/net/route.h b/include/net/route.h
index 2ea40c1..647bb2a 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -142,7 +142,7 @@
 	flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
 			   RT_SCOPE_UNIVERSE, proto,
 			   sk ? inet_sk_flowi_flags(sk) : 0,
-			   daddr, saddr, dport, sport);
+			   daddr, saddr, dport, sport, sk ? sock_i_uid(sk) : 0);
 	if (sk)
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
 	return ip_route_output_flow(net, fl4, sk);
@@ -253,7 +253,8 @@
 		flow_flags |= FLOWI_FLAG_CAN_SLEEP;
 
 	flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
-			   protocol, flow_flags, dst, src, dport, sport);
+			   protocol, flow_flags, dst, src, dport, sport,
+			   sock_i_uid(sk));
 }
 
 static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 79cd118..7de75c3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -287,6 +287,7 @@
 extern int sysctl_tcp_early_retrans;
 extern int sysctl_tcp_limit_output_bytes;
 extern int sysctl_tcp_challenge_ack_limit;
+extern int sysctl_tcp_default_init_rwnd;
 extern int sysctl_tcp_min_tso_segs;
 
 extern atomic_long_t tcp_memory_allocated;
@@ -1556,6 +1557,8 @@
 extern int tcp_gro_complete(struct sk_buff *skb);
 extern int tcp4_gro_complete(struct sk_buff *skb);
 
+extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
+
 #ifdef CONFIG_PROC_FS
 extern int tcp4_proc_init(void);
 extern void tcp4_proc_exit(void);
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 938b7fd..eb40e71 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -11,6 +11,7 @@
 extern struct proto udpv6_prot;
 extern struct proto udplitev6_prot;
 extern struct proto tcpv6_prot;
+extern struct proto pingv6_prot;
 
 struct flowi6;
 
@@ -21,6 +22,8 @@
 extern void				ipv6_frag_exit(void);
 
 /* transport protocols */
+extern int				pingv6_init(void);
+extern void				pingv6_exit(void);
 extern int				rawv6_init(void);
 extern void				rawv6_exit(void);
 extern int				udpv6_init(void);
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index ae6c3b8..9307359 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -32,6 +32,7 @@
 #include <sound/pcm.h>
 
 struct snd_compr_ops;
+struct snd_pcm_substream;
 
 /**
  * struct snd_compr_runtime: runtime stream description
@@ -48,6 +49,8 @@
  *	the ring buffer
  * @total_bytes_transferred: cumulative bytes transferred by offload DSP
  * @sleep: poll sleep
+ * @wait: drain wait queue
+ * @drain_wake: condition for drain wake
  */
 struct snd_compr_runtime {
 	snd_pcm_state_t state;
@@ -59,6 +62,9 @@
 	u64 total_bytes_available;
 	u64 total_bytes_transferred;
 	wait_queue_head_t sleep;
+	wait_queue_head_t wait;
+	unsigned int drain_wake;
+	struct snd_pcm_substream *fe_substream;
 	void *private_data;
 };
 
@@ -157,6 +163,7 @@
 int snd_compress_deregister(struct snd_compr *device);
 int snd_compress_new(struct snd_card *card, int device,
 			int type, struct snd_compr *compr);
+int snd_compr_stop(struct snd_compr_stream *stream);
 
 /* dsp driver callback apis
  * For playback: driver should call snd_compress_fragment_elapsed() to let the
@@ -173,11 +180,10 @@
 
 static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
 {
-	if (snd_BUG_ON(!stream))
-		return;
+	snd_BUG_ON(!stream);
 
-	stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-	wake_up(&stream->runtime->sleep);
+	stream->runtime->drain_wake = 1;
+	wake_up(&stream->runtime->wait);
 }
 
 #endif
diff --git a/include/sound/core.h b/include/sound/core.h
index 97cd9c3..91d79b2 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -148,6 +148,11 @@
 	struct snd_mixer_oss *mixer_oss;
 	int mixer_oss_change_count;
 #endif
+
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+	struct snd_effect_ops *effect_ops;
+	struct mutex effect_lock;	/* effect lock */
+#endif
 };
 
 #ifdef CONFIG_PM
diff --git a/include/sound/effect_driver.h b/include/sound/effect_driver.h
new file mode 100644
index 0000000..410e5f9
--- /dev/null
+++ b/include/sound/effect_driver.h
@@ -0,0 +1,63 @@
+/*
+ *  effect_driver.h - effect offload driver APIs
+ *
+ *  Copyright (C) 2013 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef __EFFECT_DRIVER_H
+#define __EFFECT_DRIVER_H
+
+#include <sound/effect_offload.h>
+
+struct snd_effect_ops {
+	int (*create)(struct snd_card *card, struct snd_effect *effect);
+	int (*destroy)(struct snd_card *card, struct snd_effect *effect);
+	int (*set_params)(struct snd_card *card,
+				struct snd_effect_params *params);
+	int (*get_params)(struct snd_card *card,
+				struct snd_effect_params *params);
+	int (*query_num_effects)(struct snd_card *card);
+	int (*query_effect_caps)(struct snd_card *card,
+					struct snd_effect_caps *caps);
+};
+
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+int snd_effect_register(struct snd_card *card, struct snd_effect_ops *ops);
+int snd_effect_deregister(struct snd_card *card);
+#else
+static inline int snd_effect_register(struct snd_card *card,
+					struct snd_effect_ops *ops)
+{
+	return -ENODEV;
+}
+static inline int snd_effect_deregister(struct snd_card *card)
+{
+	return -ENODEV;
+}
+#endif
+
+/* IOCTL fns */
+int snd_ctl_effect_create(struct snd_card *card, void *arg);
+int snd_ctl_effect_destroy(struct snd_card *card, void *arg);
+int snd_ctl_effect_set_params(struct snd_card *card, void *arg);
+int snd_ctl_effect_get_params(struct snd_card *card, void *arg);
+int snd_ctl_effect_query_num_effects(struct snd_card *card, void *arg);
+int snd_ctl_effect_query_effect_caps(struct snd_card *card, void *arg);
+#endif
diff --git a/include/sound/intel_sst_ioctl.h b/include/sound/intel_sst_ioctl.h
new file mode 100644
index 0000000..6025ef9
--- /dev/null
+++ b/include/sound/intel_sst_ioctl.h
@@ -0,0 +1,63 @@
+#ifndef __INTEL_SST_IOCTL_H__
+#define __INTEL_SST_IOCTL_H__
+/*
+ *  intel_sst_ioctl.h - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file defines all sst ioctls
+ */
+
+/* codec and post/pre processing related info */
+
+#include <linux/types.h>
+
+/* Pre and post processing params structure */
+struct snd_ppp_params {
+	__u8			algo_id;/* Post/Pre processing algorithm ID  */
+	__u8			str_id;	/*Only 5 bits used 0 - 31 are valid*/
+	__u8			enable;	/* 0= disable, 1= enable*/
+	__u8			operation; /* 0 = set_algo, 1 = get_algo */
+	__u32			size;	/*Size of parameters for all blocks*/
+	void			*params;
+} __packed;
+
+struct snd_sst_driver_info {
+	__u32 max_streams;
+};
+
+struct snd_sst_tuning_params {
+	__u8 type;
+	__u8 str_id;
+	__u8 size;
+	__u8 rsvd;
+	__u64 addr;
+} __packed;
+
+/*IOCTL defined here */
+/*SST common ioctls */
+#define SNDRV_SST_DRIVER_INFO	_IOR('L', 0x10, struct snd_sst_driver_info)
+#define SNDRV_SST_SET_ALGO	_IOW('L', 0x30,  struct snd_ppp_params)
+#define SNDRV_SST_GET_ALGO	_IOWR('L', 0x31,  struct snd_ppp_params)
+#define SNDRV_SST_TUNING_PARAMS	_IOW('L', 0x32,  struct snd_sst_tuning_params)
+#endif /* __INTEL_SST_IOCTL_H__ */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index b48792f..6f01136 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -285,6 +285,7 @@
 	unsigned long hw_ptr_jiffies;	/* Time when hw_ptr is updated */
 	unsigned long hw_ptr_buffer_jiffies; /* buffer time in jiffies */
 	snd_pcm_sframes_t delay;	/* extra delay; typically FIFO size */
+	snd_pcm_sframes_t soc_delay;    /* extra delay; typically delay incurred in soc */
 	u64 hw_ptr_wrap;                /* offset for hw_ptr due to boundary wrap-around */
 
 	/* -- HW params -- */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index ae9a227..41e9df9 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -231,8 +231,8 @@
 	struct snd_soc_dai_driver *driver;
 
 	/* DAI runtime info */
-	unsigned int capture_active:1;		/* stream is in use */
-	unsigned int playback_active:1;		/* stream is in use */
+	unsigned int capture_active;		/* cap streams is in use */
+	unsigned int playback_active;		/* pb streams is in use */
 	unsigned int symmetric_rates:1;
 	struct snd_pcm_runtime *runtime;
 	unsigned int active;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 385c632..f48d25c 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -422,6 +422,8 @@
 int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
 	struct snd_soc_dapm_widget_list **list);
 
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol);
+
 /* dapm widget types */
 enum snd_soc_dapm_type {
 	snd_soc_dapm_input = 0,		/* input pin */
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 04598f1..a2e15ca 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -11,6 +11,7 @@
 #ifndef __LINUX_SND_SOC_DPCM_H
 #define __LINUX_SND_SOC_DPCM_H
 
+#include <linux/slab.h>
 #include <linux/list.h>
 #include <sound/pcm.h>
 
@@ -135,4 +136,25 @@
 int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
 int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *);
 
+int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
+	int stream, struct snd_soc_dapm_widget_list **list_);
+int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
+	int stream, struct snd_soc_dapm_widget_list **list, int new);
+int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream);
+void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int tream);
+int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd);
+int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream);
+int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
+	int event);
+
+static inline void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
+{
+	kfree(*list);
+}
+
+
 #endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 85c1522..9338da8 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -26,6 +26,7 @@
 #include <sound/compress_driver.h>
 #include <sound/control.h>
 #include <sound/ac97_codec.h>
+#include <sound/effect_driver.h>
 
 /*
  * Convenience kcontrol builders
@@ -92,6 +93,12 @@
 		{.reg = xreg, .rreg = xreg, .shift = xshift, \
 		 .rshift = xshift, .min = xmin, .max = xmax, \
 		 .platform_max = xmax, .invert = xinvert} }
+#define SND_SOC_BYTES_EXT(xname, xcount, xhandler_get, xhandler_put) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_bytes_ext, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct soc_bytes_ext) \
+		{.max = xcount} }
 #define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
 	.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
@@ -420,6 +427,21 @@
 		const char *dai_link, int stream);
 struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
 		const char *dai_link);
+#if IS_ENABLED(CONFIG_SND_EFFECTS_OFFLOAD)
+int snd_soc_register_effect(struct snd_soc_card *card,
+				struct snd_effect_ops *ops);
+int snd_soc_unregister_effect(struct snd_soc_card *card);
+#else
+static inline int snd_soc_register_effect(struct snd_soc_card *card,
+					struct snd_effect_ops *ops)
+{
+	return -ENODEV;
+}
+static inline int snd_soc_unregister_effect(struct snd_soc_card *card)
+{
+	return -ENODEV;
+}
+#endif
 
 /* Utility functions to get clock rates from various things */
 int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
@@ -538,6 +560,8 @@
 	struct snd_ctl_elem_value *ucontrol);
 int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol);
+int snd_soc_info_bytes_ext(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *ucontrol);
 
 /**
  * struct snd_soc_reg_access - Describes whether a given register is
@@ -594,6 +618,7 @@
  * @name:         gpio name
  * @report:       value to report when jack detected
  * @invert:       report presence in low state
+ * @irq_flag:	  Interrupt flags for GPIO-Irq line
  * @debouce_time: debouce time in ms
  * @wake:	  enable as wake source
  * @jack_status_check: callback function which overrides the detection
@@ -608,6 +633,7 @@
 	int invert;
 	int debounce_time;
 	bool wake;
+	unsigned long irq_flags;
 
 	struct snd_soc_jack *jack;
 	struct delayed_work work;
@@ -932,6 +958,10 @@
 	/* machine stream operations */
 	const struct snd_soc_ops *ops;
 	const struct snd_soc_compr_ops *compr_ops;
+
+	/*no of substreams */
+	unsigned int playback_count;
+	unsigned int capture_count;
 };
 
 struct snd_soc_codec_conf {
@@ -1063,6 +1093,7 @@
 
 	/* Dynamic PCM BE runtime data */
 	struct snd_soc_dpcm_runtime dpcm[2];
+	int fe_compr;
 
 	long pmdown_time;
 	unsigned char pop_wait:1;
@@ -1100,6 +1131,10 @@
 	unsigned int regbase, regcount, nbits, invert;
 };
 
+struct soc_bytes_ext {
+	int max;
+};
+
 /* enumerated kcontrol */
 struct soc_enum {
 	unsigned short reg;
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644
index 0000000..951e6ca
--- /dev/null
+++ b/include/trace/events/cpufreq_interactive.h
@@ -0,0 +1,112 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+	TP_PROTO(u32 cpu_id, unsigned long targfreq,
+	         unsigned long actualfreq),
+	TP_ARGS(cpu_id, targfreq, actualfreq),
+
+	TP_STRUCT__entry(
+	    __field(          u32, cpu_id    )
+	    __field(unsigned long, targfreq   )
+	    __field(unsigned long, actualfreq )
+	   ),
+
+	TP_fast_assign(
+	    __entry->cpu_id = (u32) cpu_id;
+	    __entry->targfreq = targfreq;
+	    __entry->actualfreq = actualfreq;
+	),
+
+	TP_printk("cpu=%u targ=%lu actual=%lu",
+	      __entry->cpu_id, __entry->targfreq,
+	      __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
+	TP_PROTO(u32 cpu_id, unsigned long targfreq,
+	     unsigned long actualfreq),
+	TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+		    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+
+	    TP_STRUCT__entry(
+		    __field(unsigned long, cpu_id    )
+		    __field(unsigned long, load      )
+		    __field(unsigned long, curtarg   )
+		    __field(unsigned long, curactual )
+		    __field(unsigned long, newtarg   )
+	    ),
+
+	    TP_fast_assign(
+		    __entry->cpu_id = cpu_id;
+		    __entry->load = load;
+		    __entry->curtarg = curtarg;
+		    __entry->curactual = curactual;
+		    __entry->newtarg = newtarg;
+	    ),
+
+	    TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+		      __entry->cpu_id, __entry->load, __entry->curtarg,
+		      __entry->curactual, __entry->newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 8ee15b9..ff4bd1b 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -14,11 +14,67 @@
 struct ext4_inode_info;
 struct mpage_da_data;
 struct ext4_map_blocks;
-struct ext4_extent;
 struct extent_status;
 
 #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
 
+#define show_mballoc_flags(flags) __print_flags(flags, "|",	\
+	{ EXT4_MB_HINT_MERGE,		"HINT_MERGE" },		\
+	{ EXT4_MB_HINT_RESERVED,	"HINT_RESV" },		\
+	{ EXT4_MB_HINT_METADATA,	"HINT_MDATA" },		\
+	{ EXT4_MB_HINT_FIRST,		"HINT_FIRST" },		\
+	{ EXT4_MB_HINT_BEST,		"HINT_BEST" },		\
+	{ EXT4_MB_HINT_DATA,		"HINT_DATA" },		\
+	{ EXT4_MB_HINT_NOPREALLOC,	"HINT_NOPREALLOC" },	\
+	{ EXT4_MB_HINT_GROUP_ALLOC,	"HINT_GRP_ALLOC" },	\
+	{ EXT4_MB_HINT_GOAL_ONLY,	"HINT_GOAL_ONLY" },	\
+	{ EXT4_MB_HINT_TRY_GOAL,	"HINT_TRY_GOAL" },	\
+	{ EXT4_MB_DELALLOC_RESERVED,	"DELALLOC_RESV" },	\
+	{ EXT4_MB_STREAM_ALLOC,		"STREAM_ALLOC" },	\
+	{ EXT4_MB_USE_ROOT_BLOCKS,	"USE_ROOT_BLKS" },	\
+	{ EXT4_MB_USE_RESERVED,		"USE_RESV" })
+
+#define show_map_flags(flags) __print_flags(flags, "|",			\
+	{ EXT4_GET_BLOCKS_CREATE,		"CREATE" },		\
+	{ EXT4_GET_BLOCKS_UNWRIT_EXT,		"UNWRIT" },		\
+	{ EXT4_GET_BLOCKS_DELALLOC_RESERVE,	"DELALLOC" },		\
+	{ EXT4_GET_BLOCKS_PRE_IO,		"PRE_IO" },		\
+	{ EXT4_GET_BLOCKS_CONVERT,		"CONVERT" },		\
+	{ EXT4_GET_BLOCKS_METADATA_NOFAIL,	"METADATA_NOFAIL" },	\
+	{ EXT4_GET_BLOCKS_NO_NORMALIZE,		"NO_NORMALIZE" },	\
+	{ EXT4_GET_BLOCKS_KEEP_SIZE,		"KEEP_SIZE" },		\
+	{ EXT4_GET_BLOCKS_NO_LOCK,		"NO_LOCK" },		\
+	{ EXT4_GET_BLOCKS_NO_PUT_HOLE,		"NO_PUT_HOLE" })
+
+#define show_mflags(flags) __print_flags(flags, "",	\
+	{ EXT4_MAP_NEW,		"N" },			\
+	{ EXT4_MAP_MAPPED,	"M" },			\
+	{ EXT4_MAP_UNWRITTEN,	"U" },			\
+	{ EXT4_MAP_BOUNDARY,	"B" },			\
+	{ EXT4_MAP_FROM_CLUSTER, "C" })
+
+#define show_free_flags(flags) __print_flags(flags, "|",	\
+	{ EXT4_FREE_BLOCKS_METADATA,		"METADATA" },	\
+	{ EXT4_FREE_BLOCKS_FORGET,		"FORGET" },	\
+	{ EXT4_FREE_BLOCKS_VALIDATED,		"VALIDATED" },	\
+	{ EXT4_FREE_BLOCKS_NO_QUOT_UPDATE,	"NO_QUOTA" },	\
+	{ EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER,"1ST_CLUSTER" },\
+	{ EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER,	"LAST_CLUSTER" })
+
+#define show_extent_status(status) __print_flags(status, "",	\
+	{ EXTENT_STATUS_WRITTEN,	"W" },			\
+	{ EXTENT_STATUS_UNWRITTEN,	"U" },			\
+	{ EXTENT_STATUS_DELAYED,	"D" },			\
+	{ EXTENT_STATUS_HOLE,		"H" })
+
+#define show_falloc_mode(mode) __print_flags(mode, "|",		\
+	{ FALLOC_FL_KEEP_SIZE,		"KEEP_SIZE"},		\
+	{ FALLOC_FL_PUNCH_HOLE,		"PUNCH_HOLE"},		\
+	{ FALLOC_FL_NO_HIDE_STALE,	"NO_HIDE_STALE"},	\
+	{ FALLOC_FL_COLLAPSE_RANGE,	"COLLAPSE_RANGE"},	\
+	{ FALLOC_FL_ZERO_RANGE,		"ZERO_RANGE"})
+
+
 TRACE_EVENT(ext4_free_inode,
 	TP_PROTO(struct inode *inode),
 
@@ -281,7 +337,7 @@
 	TP_ARGS(inode, pos, len, copied)
 );
 
-TRACE_EVENT(ext4_da_writepages,
+TRACE_EVENT(ext4_writepages,
 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
 
 	TP_ARGS(inode, wbc),
@@ -324,46 +380,62 @@
 );
 
 TRACE_EVENT(ext4_da_write_pages,
-	TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
+	TP_PROTO(struct inode *inode, pgoff_t first_page,
+		 struct writeback_control *wbc),
 
-	TP_ARGS(inode, mpd),
+	TP_ARGS(inode, first_page, wbc),
 
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
 		__field(	ino_t,	ino			)
-		__field(	__u64,	b_blocknr		)
-		__field(	__u32,	b_size			)
-		__field(	__u32,	b_state			)
-		__field(	unsigned long,	first_page	)
-		__field(	int,	io_done			)
-		__field(	int,	pages_written		)
-		__field(	int,	sync_mode		)
+		__field(      pgoff_t,	first_page		)
+		__field(	 long,	nr_to_write		)
+		__field(	  int,	sync_mode		)
 	),
 
 	TP_fast_assign(
 		__entry->dev		= inode->i_sb->s_dev;
 		__entry->ino		= inode->i_ino;
-		__entry->b_blocknr	= mpd->b_blocknr;
-		__entry->b_size		= mpd->b_size;
-		__entry->b_state	= mpd->b_state;
-		__entry->first_page	= mpd->first_page;
-		__entry->io_done	= mpd->io_done;
-		__entry->pages_written	= mpd->pages_written;
-		__entry->sync_mode	= mpd->wbc->sync_mode;
+		__entry->first_page	= first_page;
+		__entry->nr_to_write	= wbc->nr_to_write;
+		__entry->sync_mode	= wbc->sync_mode;
 	),
 
-	TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
-		  "first_page %lu io_done %d pages_written %d sync_mode %d",
+	TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld "
+		  "sync_mode %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->b_blocknr, __entry->b_size,
-		  __entry->b_state, __entry->first_page,
-		  __entry->io_done, __entry->pages_written,
-		  __entry->sync_mode
-                  )
+		  (unsigned long) __entry->ino, __entry->first_page,
+		  __entry->nr_to_write, __entry->sync_mode)
 );
 
-TRACE_EVENT(ext4_da_writepages_result,
+TRACE_EVENT(ext4_da_write_pages_extent,
+	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map),
+
+	TP_ARGS(inode, map),
+
+	TP_STRUCT__entry(
+		__field(	dev_t,	dev			)
+		__field(	ino_t,	ino			)
+		__field(	__u64,	lblk			)
+		__field(	__u32,	len			)
+		__field(	__u32,	flags			)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= inode->i_sb->s_dev;
+		__entry->ino		= inode->i_ino;
+		__entry->lblk		= map->m_lblk;
+		__entry->len		= map->m_len;
+		__entry->flags		= map->m_flags;
+	),
+
+	TP_printk("dev %d,%d ino %lu lblk %llu len %u flags %s",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino, __entry->lblk, __entry->len,
+		  show_mflags(__entry->flags))
+);
+
+TRACE_EVENT(ext4_writepages_result,
 	TP_PROTO(struct inode *inode, struct writeback_control *wbc,
 			int ret, int pages_written),
 
@@ -444,16 +516,16 @@
 );
 
 DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
-	TP_PROTO(struct page *page, unsigned long offset),
+	TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
 
-	TP_ARGS(page, offset),
+	TP_ARGS(page, offset, length),
 
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
 		__field(	ino_t,	ino			)
 		__field(	pgoff_t, index			)
-		__field(	unsigned long, offset		)
-
+		__field(	unsigned int, offset		)
+		__field(	unsigned int, length		)
 	),
 
 	TP_fast_assign(
@@ -461,24 +533,26 @@
 		__entry->ino	= page->mapping->host->i_ino;
 		__entry->index	= page->index;
 		__entry->offset	= offset;
+		__entry->length	= length;
 	),
 
-	TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
+	TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
-		  (unsigned long) __entry->index, __entry->offset)
+		  (unsigned long) __entry->index,
+		  __entry->offset, __entry->length)
 );
 
 DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
-	TP_PROTO(struct page *page, unsigned long offset),
+	TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
 
-	TP_ARGS(page, offset)
+	TP_ARGS(page, offset, length)
 );
 
 DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
-	TP_PROTO(struct page *page, unsigned long offset),
+	TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
 
-	TP_ARGS(page, offset)
+	TP_ARGS(page, offset, length)
 );
 
 TRACE_EVENT(ext4_discard_blocks,
@@ -673,10 +747,10 @@
 		__entry->flags	= ar->flags;
 	),
 
-	TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
+	TP_printk("dev %d,%d ino %lu flags %s len %u lblk %u goal %llu "
 		  "lleft %u lright %u pleft %llu pright %llu ",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->flags,
+		  (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags),
 		  __entry->len, __entry->logical, __entry->goal,
 		  __entry->lleft, __entry->lright, __entry->pleft,
 		  __entry->pright)
@@ -715,10 +789,10 @@
 		__entry->flags	= ar->flags;
 	),
 
-	TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
+	TP_printk("dev %d,%d ino %lu flags %s len %u block %llu lblk %u "
 		  "goal %llu lleft %u lright %u pleft %llu pright %llu",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->flags,
+		  (unsigned long) __entry->ino, show_mballoc_flags(__entry->flags),
 		  __entry->len, __entry->block, __entry->logical,
 		  __entry->goal,  __entry->lleft, __entry->lright,
 		  __entry->pleft, __entry->pright)
@@ -748,11 +822,11 @@
 		__entry->mode		= inode->i_mode;
 	),
 
-	TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
+	TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %s",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
 		  __entry->mode, __entry->block, __entry->count,
-		  __entry->flags)
+		  show_free_flags(__entry->flags))
 );
 
 TRACE_EVENT(ext4_sync_file_enter,
@@ -903,7 +977,7 @@
 	),
 
 	TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
-		  "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
+		  "result %u/%d/%u@%u blks %u grps %u cr %u flags %s "
 		  "tail %u broken %u",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
@@ -914,7 +988,7 @@
 		  __entry->result_group, __entry->result_start,
 		  __entry->result_len, __entry->result_logical,
 		  __entry->found, __entry->groups, __entry->cr,
-		  __entry->flags, __entry->tail,
+		  show_mballoc_flags(__entry->flags), __entry->tail,
 		  __entry->buddy ? 1 << __entry->buddy : 0)
 );
 
@@ -1260,7 +1334,7 @@
 		  __entry->rw, __entry->ret)
 );
 
-TRACE_EVENT(ext4_fallocate_enter,
+DECLARE_EVENT_CLASS(ext4__fallocate_mode,
 	TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
 
 	TP_ARGS(inode, offset, len, mode),
@@ -1268,23 +1342,45 @@
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
 		__field(	ino_t,	ino			)
-		__field(	loff_t,	pos			)
-		__field(	loff_t,	len			)
+		__field(	loff_t,	offset			)
+		__field(	loff_t, len			)
 		__field(	int,	mode			)
 	),
 
 	TP_fast_assign(
 		__entry->dev	= inode->i_sb->s_dev;
 		__entry->ino	= inode->i_ino;
-		__entry->pos	= offset;
+		__entry->offset	= offset;
 		__entry->len	= len;
 		__entry->mode	= mode;
 	),
 
-	TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
+	TP_printk("dev %d,%d ino %lu offset %lld len %lld mode %s",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->pos,
-		  __entry->len, __entry->mode)
+		  (unsigned long) __entry->ino,
+		  __entry->offset, __entry->len,
+		  show_falloc_mode(__entry->mode))
+);
+
+DEFINE_EVENT(ext4__fallocate_mode, ext4_fallocate_enter,
+
+	TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
+
+	TP_ARGS(inode, offset, len, mode)
+);
+
+DEFINE_EVENT(ext4__fallocate_mode, ext4_punch_hole,
+
+	TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
+
+	TP_ARGS(inode, offset, len, mode)
+);
+
+DEFINE_EVENT(ext4__fallocate_mode, ext4_zero_range,
+
+	TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
+
+	TP_ARGS(inode, offset, len, mode)
 );
 
 TRACE_EVENT(ext4_fallocate_exit,
@@ -1316,31 +1412,6 @@
 		  __entry->ret)
 );
 
-TRACE_EVENT(ext4_punch_hole,
-	TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
-
-	TP_ARGS(inode, offset, len),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	offset			)
-		__field(	loff_t, len			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->offset	= offset;
-		__entry->len	= len;
-	),
-
-	TP_printk("dev %d,%d ino %lu offset %lld len %lld",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->offset, __entry->len)
-);
-
 TRACE_EVENT(ext4_unlink_enter,
 	TP_PROTO(struct inode *parent, struct dentry *dentry),
 
@@ -1425,7 +1496,7 @@
 	TP_ARGS(inode)
 );
 
-/* 'ux' is the uninitialized extent. */
+/* 'ux' is the unwritten extent. */
 TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
 	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
 		 struct ext4_extent *ux),
@@ -1461,7 +1532,7 @@
 );
 
 /*
- * 'ux' is the uninitialized extent.
+ * 'ux' is the unwritten extent.
  * 'ix' is the initialized extent to which blocks are transferred.
  */
 TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
@@ -1528,10 +1599,10 @@
 		__entry->flags	= flags;
 	),
 
-	TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
+	TP_printk("dev %d,%d ino %lu lblk %u len %u flags %s",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
-		  __entry->lblk, __entry->len, __entry->flags)
+		  __entry->lblk, __entry->len, show_map_flags(__entry->flags))
 );
 
 DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
@@ -1549,47 +1620,53 @@
 );
 
 DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
-	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
+	TP_PROTO(struct inode *inode, unsigned flags, struct ext4_map_blocks *map,
+		 int ret),
 
-	TP_ARGS(inode, map, ret),
+	TP_ARGS(inode, flags, map, ret),
 
 	TP_STRUCT__entry(
 		__field(	dev_t,		dev		)
 		__field(	ino_t,		ino		)
+		__field(	unsigned int,	flags		)
 		__field(	ext4_fsblk_t,	pblk		)
 		__field(	ext4_lblk_t,	lblk		)
 		__field(	unsigned int,	len		)
-		__field(	unsigned int,	flags		)
+		__field(	unsigned int,	mflags		)
 		__field(	int,		ret		)
 	),
 
 	TP_fast_assign(
 		__entry->dev    = inode->i_sb->s_dev;
 		__entry->ino    = inode->i_ino;
+		__entry->flags	= flags;
 		__entry->pblk	= map->m_pblk;
 		__entry->lblk	= map->m_lblk;
 		__entry->len	= map->m_len;
-		__entry->flags	= map->m_flags;
+		__entry->mflags	= map->m_flags;
 		__entry->ret	= ret;
 	),
 
-	TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u flags %x ret %d",
+	TP_printk("dev %d,%d ino %lu flags %s lblk %u pblk %llu len %u "
+		  "mflags %s ret %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
-		  __entry->lblk, __entry->pblk,
-		  __entry->len, __entry->flags, __entry->ret)
+		  show_map_flags(__entry->flags), __entry->lblk, __entry->pblk,
+		  __entry->len, show_mflags(__entry->mflags), __entry->ret)
 );
 
 DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
-	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
+	TP_PROTO(struct inode *inode, unsigned flags,
+		 struct ext4_map_blocks *map, int ret),
 
-	TP_ARGS(inode, map, ret)
+	TP_ARGS(inode, flags, map, ret)
 );
 
 DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
-	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
+	TP_PROTO(struct inode *inode, unsigned flags,
+		 struct ext4_map_blocks *map, int ret),
 
-	TP_ARGS(inode, map, ret)
+	TP_ARGS(inode, flags, map, ret)
 );
 
 TRACE_EVENT(ext4_ext_load_extent,
@@ -1638,25 +1715,50 @@
 );
 
 TRACE_EVENT(ext4_journal_start,
-	TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP),
+	TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks,
+		 unsigned long IP),
 
-	TP_ARGS(sb, nblocks, IP),
+	TP_ARGS(sb, blocks, rsv_blocks, IP),
 
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
 		__field(unsigned long,	ip			)
-		__field(	int,	nblocks			)
+		__field(	  int,	blocks			)
+		__field(	  int,	rsv_blocks		)
 	),
 
 	TP_fast_assign(
-		__entry->dev	 = sb->s_dev;
-		__entry->ip	 = IP;
-		__entry->nblocks = nblocks;
+		__entry->dev		 = sb->s_dev;
+		__entry->ip		 = IP;
+		__entry->blocks		 = blocks;
+		__entry->rsv_blocks	 = rsv_blocks;
 	),
 
-	TP_printk("dev %d,%d nblocks %d caller %pF",
+	TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pF",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->nblocks, (void *)__entry->ip)
+		  __entry->blocks, __entry->rsv_blocks, (void *)__entry->ip)
+);
+
+TRACE_EVENT(ext4_journal_start_reserved,
+	TP_PROTO(struct super_block *sb, int blocks, unsigned long IP),
+
+	TP_ARGS(sb, blocks, IP),
+
+	TP_STRUCT__entry(
+		__field(	dev_t,	dev			)
+		__field(unsigned long,	ip			)
+		__field(	  int,	blocks			)
+	),
+
+	TP_fast_assign(
+		__entry->dev		 = sb->s_dev;
+		__entry->ip		 = IP;
+		__entry->blocks		 = blocks;
+	),
+
+	TP_printk("dev %d,%d blocks, %d caller %pF",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->blocks, (void *)__entry->ip)
 );
 
 DECLARE_EVENT_CLASS(ext4__trim,
@@ -1708,7 +1810,7 @@
 	TP_ARGS(sb, group, start, len)
 );
 
-TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
+TRACE_EVENT(ext4_ext_handle_unwritten_extents,
 	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags,
 		 unsigned int allocated, ext4_fsblk_t newblock),
 
@@ -1736,12 +1838,12 @@
 		__entry->newblk		= newblock;
 	),
 
-	TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %x "
+	TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %s "
 		  "allocated %d newblock %llu",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
 		  (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
-		  __entry->len, __entry->flags,
+		  __entry->len, show_map_flags(__entry->flags),
 		  (unsigned int) __entry->allocated,
 		  (unsigned long long) __entry->newblk)
 );
@@ -1769,10 +1871,10 @@
 		__entry->ret	= ret;
 	),
 
-	TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
+	TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %s ret %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  __entry->lblk, (unsigned long long) __entry->pblk,
-		  __entry->len, __entry->flags, __entry->ret)
+		  __entry->len, show_mflags(__entry->flags), __entry->ret)
 );
 
 TRACE_EVENT(ext4_ext_put_in_cache,
@@ -1926,7 +2028,7 @@
 TRACE_EVENT(ext4_remove_blocks,
 	    TP_PROTO(struct inode *inode, struct ext4_extent *ex,
 		ext4_lblk_t from, ext4_fsblk_t to,
-		ext4_fsblk_t partial_cluster),
+		long long partial_cluster),
 
 	TP_ARGS(inode, ex, from, to, partial_cluster),
 
@@ -1935,7 +2037,7 @@
 		__field(	ino_t,		ino	)
 		__field(	ext4_lblk_t,	from	)
 		__field(	ext4_lblk_t,	to	)
-		__field(	ext4_fsblk_t,	partial	)
+		__field(	long long,	partial	)
 		__field(	ext4_fsblk_t,	ee_pblk	)
 		__field(	ext4_lblk_t,	ee_lblk	)
 		__field(	unsigned short,	ee_len	)
@@ -1953,7 +2055,7 @@
 	),
 
 	TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
-		  "from %u to %u partial_cluster %u",
+		  "from %u to %u partial_cluster %lld",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
 		  (unsigned) __entry->ee_lblk,
@@ -1961,19 +2063,20 @@
 		  (unsigned short) __entry->ee_len,
 		  (unsigned) __entry->from,
 		  (unsigned) __entry->to,
-		  (unsigned) __entry->partial)
+		  (long long) __entry->partial)
 );
 
 TRACE_EVENT(ext4_ext_rm_leaf,
 	TP_PROTO(struct inode *inode, ext4_lblk_t start,
-		 struct ext4_extent *ex, ext4_fsblk_t partial_cluster),
+		 struct ext4_extent *ex,
+		 long long partial_cluster),
 
 	TP_ARGS(inode, start, ex, partial_cluster),
 
 	TP_STRUCT__entry(
 		__field(	dev_t,		dev	)
 		__field(	ino_t,		ino	)
-		__field(	ext4_fsblk_t,	partial	)
+		__field(	long long,	partial	)
 		__field(	ext4_lblk_t,	start	)
 		__field(	ext4_lblk_t,	ee_lblk	)
 		__field(	ext4_fsblk_t,	ee_pblk	)
@@ -1991,14 +2094,14 @@
 	),
 
 	TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
-		  "partial_cluster %u",
+		  "partial_cluster %lld",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
 		  (unsigned) __entry->start,
 		  (unsigned) __entry->ee_lblk,
 		  (unsigned long long) __entry->ee_pblk,
 		  (unsigned short) __entry->ee_len,
-		  (unsigned) __entry->partial)
+		  (long long) __entry->partial)
 );
 
 TRACE_EVENT(ext4_ext_rm_idx,
@@ -2025,14 +2128,16 @@
 );
 
 TRACE_EVENT(ext4_ext_remove_space,
-	TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
+	TP_PROTO(struct inode *inode, ext4_lblk_t start,
+		 ext4_lblk_t end, int depth),
 
-	TP_ARGS(inode, start, depth),
+	TP_ARGS(inode, start, end, depth),
 
 	TP_STRUCT__entry(
 		__field(	dev_t,		dev	)
 		__field(	ino_t,		ino	)
 		__field(	ext4_lblk_t,	start	)
+		__field(	ext4_lblk_t,	end	)
 		__field(	int,		depth	)
 	),
 
@@ -2040,28 +2145,31 @@
 		__entry->dev	= inode->i_sb->s_dev;
 		__entry->ino	= inode->i_ino;
 		__entry->start	= start;
+		__entry->end	= end;
 		__entry->depth	= depth;
 	),
 
-	TP_printk("dev %d,%d ino %lu since %u depth %d",
+	TP_printk("dev %d,%d ino %lu since %u end %u depth %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
 		  (unsigned) __entry->start,
+		  (unsigned) __entry->end,
 		  __entry->depth)
 );
 
 TRACE_EVENT(ext4_ext_remove_space_done,
-	TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
-		ext4_lblk_t partial, __le16 eh_entries),
+	TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end,
+		 int depth, long long partial, __le16 eh_entries),
 
-	TP_ARGS(inode, start, depth, partial, eh_entries),
+	TP_ARGS(inode, start, end, depth, partial, eh_entries),
 
 	TP_STRUCT__entry(
 		__field(	dev_t,		dev		)
 		__field(	ino_t,		ino		)
 		__field(	ext4_lblk_t,	start		)
+		__field(	ext4_lblk_t,	end		)
 		__field(	int,		depth		)
-		__field(	ext4_lblk_t,	partial		)
+		__field(	long long,	partial		)
 		__field(	unsigned short,	eh_entries	)
 	),
 
@@ -2069,22 +2177,24 @@
 		__entry->dev		= inode->i_sb->s_dev;
 		__entry->ino		= inode->i_ino;
 		__entry->start		= start;
+		__entry->end		= end;
 		__entry->depth		= depth;
 		__entry->partial	= partial;
 		__entry->eh_entries	= le16_to_cpu(eh_entries);
 	),
 
-	TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
+	TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld "
 		  "remaining_entries %u",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
 		  (unsigned) __entry->start,
+		  (unsigned) __entry->end,
 		  __entry->depth,
-		  (unsigned) __entry->partial,
+		  (long long) __entry->partial,
 		  (unsigned short) __entry->eh_entries)
 );
 
-TRACE_EVENT(ext4_es_insert_extent,
+DECLARE_EVENT_CLASS(ext4__es_extent,
 	TP_PROTO(struct inode *inode, struct extent_status *es),
 
 	TP_ARGS(inode, es),
@@ -2095,7 +2205,7 @@
 		__field(	ext4_lblk_t,	lblk		)
 		__field(	ext4_lblk_t,	len		)
 		__field(	ext4_fsblk_t,	pblk		)
-		__field(	unsigned long long, status	)
+		__field(	char, status	)
 	),
 
 	TP_fast_assign(
@@ -2107,11 +2217,23 @@
 		__entry->status	= ext4_es_status(es);
 	),
 
-	TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
+	TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
 		  __entry->lblk, __entry->len,
-		  __entry->pblk, __entry->status)
+		  __entry->pblk, show_extent_status(__entry->status))
+);
+
+DEFINE_EVENT(ext4__es_extent, ext4_es_insert_extent,
+	TP_PROTO(struct inode *inode, struct extent_status *es),
+
+	TP_ARGS(inode, es)
+);
+
+DEFINE_EVENT(ext4__es_extent, ext4_es_cache_extent,
+	TP_PROTO(struct inode *inode, struct extent_status *es),
+
+	TP_ARGS(inode, es)
 );
 
 TRACE_EVENT(ext4_es_remove_extent,
@@ -2172,7 +2294,7 @@
 		__field(	ext4_lblk_t,	lblk		)
 		__field(	ext4_lblk_t,	len		)
 		__field(	ext4_fsblk_t,	pblk		)
-		__field(	unsigned long long, status	)
+		__field(	char, status	)
 	),
 
 	TP_fast_assign(
@@ -2184,11 +2306,11 @@
 		__entry->status	= ext4_es_status(es);
 	),
 
-	TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
+	TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
 		  __entry->lblk, __entry->len,
-		  __entry->pblk, __entry->status)
+		  __entry->pblk, show_extent_status(__entry->status))
 );
 
 TRACE_EVENT(ext4_es_lookup_extent_enter,
@@ -2225,7 +2347,7 @@
 		__field(	ext4_lblk_t,	lblk		)
 		__field(	ext4_lblk_t,	len		)
 		__field(	ext4_fsblk_t,	pblk		)
-		__field(	unsigned long long,	status	)
+		__field(	char,		status		)
 		__field(	int,		found		)
 	),
 
@@ -2239,15 +2361,15 @@
 		__entry->found	= found;
 	),
 
-	TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %llx",
+	TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %s",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino, __entry->found,
 		  __entry->lblk, __entry->len,
 		  __entry->found ? __entry->pblk : 0,
-		  __entry->found ? __entry->status : 0)
+		  show_extent_status(__entry->found ? __entry->status : 0))
 );
 
-TRACE_EVENT(ext4_es_shrink_enter,
+DECLARE_EVENT_CLASS(ext4__es_shrink_enter,
 	TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
 
 	TP_ARGS(sb, nr_to_scan, cache_cnt),
@@ -2269,26 +2391,94 @@
 		  __entry->nr_to_scan, __entry->cache_cnt)
 );
 
-TRACE_EVENT(ext4_es_shrink_exit,
-	TP_PROTO(struct super_block *sb, int shrunk_nr, int cache_cnt),
+DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_count,
+	TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
 
-	TP_ARGS(sb, shrunk_nr, cache_cnt),
+	TP_ARGS(sb, nr_to_scan, cache_cnt)
+);
+
+DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_scan_enter,
+	TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
+
+	TP_ARGS(sb, nr_to_scan, cache_cnt)
+);
+
+TRACE_EVENT(ext4_es_shrink_scan_exit,
+	TP_PROTO(struct super_block *sb, int nr_shrunk, int cache_cnt),
+
+	TP_ARGS(sb, nr_shrunk, cache_cnt),
 
 	TP_STRUCT__entry(
 		__field(	dev_t,	dev			)
-		__field(	int,	shrunk_nr		)
+		__field(	int,	nr_shrunk		)
 		__field(	int,	cache_cnt		)
 	),
 
 	TP_fast_assign(
 		__entry->dev		= sb->s_dev;
-		__entry->shrunk_nr	= shrunk_nr;
+		__entry->nr_shrunk	= nr_shrunk;
 		__entry->cache_cnt	= cache_cnt;
 	),
 
-	TP_printk("dev %d,%d shrunk_nr %d cache_cnt %d",
+	TP_printk("dev %d,%d nr_shrunk %d cache_cnt %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->shrunk_nr, __entry->cache_cnt)
+		  __entry->nr_shrunk, __entry->cache_cnt)
+);
+
+TRACE_EVENT(ext4_collapse_range,
+	TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
+
+	TP_ARGS(inode, offset, len),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(loff_t,	offset)
+		__field(loff_t, len)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->offset	= offset;
+		__entry->len	= len;
+	),
+
+	TP_printk("dev %d,%d ino %lu offset %lld len %lld",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  __entry->offset, __entry->len)
+);
+
+TRACE_EVENT(ext4_es_shrink,
+	TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
+		 int skip_precached, int nr_skipped, int retried),
+
+	TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried),
+
+	TP_STRUCT__entry(
+		__field(	dev_t,		dev		)
+		__field(	int,		nr_shrunk	)
+		__field(	unsigned long long, scan_time	)
+		__field(	int,		skip_precached	)
+		__field(	int,		nr_skipped	)
+		__field(	int,		retried		)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= sb->s_dev;
+		__entry->nr_shrunk	= nr_shrunk;
+		__entry->scan_time	= div_u64(scan_time, 1000);
+		__entry->skip_precached = skip_precached;
+		__entry->nr_skipped	= nr_skipped;
+		__entry->retried	= retried;
+	),
+
+	TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d "
+		  "nr_skipped %d retried %d",
+		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk,
+		  __entry->scan_time, __entry->skip_precached,
+		  __entry->nr_skipped, __entry->retried)
 );
 
 #endif /* _TRACE_EXT4_H */
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 0421f49..e3175d0 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -52,6 +52,52 @@
 	TP_ARGS(page)
 	);
 
+DECLARE_EVENT_CLASS(mm_filemap_find_page_cache_miss,
+
+	TP_PROTO(struct file *file, loff_t pos, size_t count, int read),
+
+	TP_ARGS(file, pos, count, read),
+
+	TP_STRUCT__entry(
+		__array(char, path, MAX_FILTER_STR_VAL)
+		__field(char *, path_name)
+		__field(loff_t, pos)
+		__field(size_t, count)
+		__field(int, miss)
+	),
+
+	TP_fast_assign(
+		__entry->path_name = d_path(&file->f_path, __entry->path, MAX_FILTER_STR_VAL);
+		__entry->pos	= pos;
+		__entry->count	= count;
+		__entry->miss = 0;
+		if ((pos & ~PAGE_CACHE_MASK) || (count % PAGE_SIZE) || read) {
+			unsigned long ret;
+			rcu_read_lock();
+			ret = (count ? page_cache_next_hole(file->f_mapping,
+					(pos >> PAGE_CACHE_SHIFT), ((count - 1) >> PAGE_CACHE_SHIFT) + 1) : 0);
+			rcu_read_unlock();
+			__entry->miss = (ret >= (pos >> PAGE_CACHE_SHIFT) &&
+					ret <= ((pos + count - 1) >> PAGE_CACHE_SHIFT));
+		}
+	),
+
+	TP_printk("path_name %s pos %lld count %zu miss %s",
+		  __entry->path_name,
+		  __entry->pos, __entry->count,
+		  (__entry->miss ? "yes" : "no"))
+);
+
+DEFINE_EVENT(mm_filemap_find_page_cache_miss, mm_filemap_do_generic_file_read,
+	TP_PROTO(struct file *file, loff_t pos, size_t count, int read),
+	TP_ARGS(file, pos, count, read)
+	);
+
+DEFINE_EVENT(mm_filemap_find_page_cache_miss, mm_filemap_generic_perform_write,
+	TP_PROTO(struct file *file, loff_t pos, size_t count, int read),
+	TP_ARGS(file, pos, count, read)
+	);
+
 #endif /* _TRACE_FILEMAP_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/gpu.h b/include/trace/events/gpu.h
new file mode 100644
index 0000000..7e15cdf
--- /dev/null
+++ b/include/trace/events/gpu.h
@@ -0,0 +1,143 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu
+
+#if !defined(_TRACE_GPU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GPU_H
+
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+	({ \
+		u64 t = ns + (NSEC_PER_USEC / 2); \
+		do_div(t, NSEC_PER_SEC); \
+		t; \
+	})
+
+#define show_usecs_from_ns(ns) \
+	({ \
+		u64 t = ns + (NSEC_PER_USEC / 2) ; \
+		u32 rem; \
+		do_div(t, NSEC_PER_USEC); \
+		rem = do_div(t, USEC_PER_SEC); \
+	})
+
+/*
+ * The gpu_sched_switch event indicates that a switch from one GPU context to
+ * another occurred on one of the GPU hardware blocks.
+ *
+ * The gpu_name argument identifies the GPU hardware block.  Each independently
+ * scheduled GPU hardware block should have a different name.  This may be used
+ * in different ways for different GPUs.  For example, if a GPU includes
+ * multiple processing cores it may use names "GPU 0", "GPU 1", etc.  If a GPU
+ * includes a separately scheduled 2D and 3D hardware block, it might use the
+ * names "2D" and "3D".
+ *
+ * The timestamp argument is the timestamp at which the switch occurred on the
+ * GPU. These timestamps are in units of nanoseconds and must use
+ * approximately the same time as sched_clock, though they need not come from
+ * any CPU clock. The timestamps for a single hardware block must be
+ * monotonically nondecreasing.  This means that if a variable compensation
+ * offset is used to translate from some other clock to the sched_clock, then
+ * care must be taken when increasing that offset, and doing so may result in
+ * multiple events with the same timestamp.
+ *
+ * The next_ctx_id argument identifies the next context that was running on
+ * the GPU hardware block.  A value of 0 indicates that the hardware block
+ * will be idle.
+ *
+ * The next_prio argument indicates the priority of the next context at the
+ * time of the event.  The exact numeric values may mean different things for
+ * different GPUs, but they should follow the rule that lower values indicate a
+ * higher priority.
+ *
+ * The next_job_id argument identifies the batch of work that the GPU will be
+ * working on.  This should correspond to a job_id that was previously traced
+ * as a gpu_job_enqueue event when the batch of work was created.
+ */
+TRACE_EVENT(gpu_sched_switch,
+
+	TP_PROTO(const char *gpu_name, u64 timestamp,
+		u32 next_ctx_id, s32 next_prio, u32 next_job_id),
+
+	TP_ARGS(gpu_name, timestamp, next_ctx_id, next_prio, next_job_id),
+
+	TP_STRUCT__entry(
+		__string(       gpu_name,       gpu_name        )
+		__field(        u64,            timestamp       )
+		__field(        u32,            next_ctx_id     )
+		__field(        s32,            next_prio       )
+		__field(        u32,            next_job_id     )
+	),
+
+	TP_fast_assign(
+		__assign_str(gpu_name, gpu_name);
+		__entry->timestamp = timestamp;
+		__entry->next_ctx_id = next_ctx_id;
+		__entry->next_prio = next_prio;
+		__entry->next_job_id = next_job_id;
+	),
+
+	TP_printk("gpu_name=%s ts=%llu.%06lu next_ctx_id=%lu next_prio=%ld "
+		"next_job_id=%lu",
+		__get_str(gpu_name),
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->next_ctx_id,
+		(long)__entry->next_prio,
+		(unsigned long)__entry->next_job_id)
+);
+
+/*
+ * The gpu_job_enqueue event indicates that a batch of work has been queued up
+ * to be processed by the GPU.  This event is not intended to indicate that
+ * the batch of work has been submitted to the GPU hardware, but rather that
+ * it has been submitted to the GPU kernel driver.
+ *
+ * This event should be traced on the thread that initiated the work being
+ * queued.  For example, if a batch of work is submitted to the kernel by a
+ * userland thread, the event should be traced on that thread.
+ *
+ * The ctx_id field identifies the GPU context in which the batch of work
+ * being queued is to be run.
+ *
+ * The job_id field identifies the batch of work being queued within the given
+ * GPU context.  The first batch of work submitted for a given GPU context
+ * should have a job_id of 0, and each subsequent batch of work should
+ * increment the job_id by 1.
+ *
+ * The type field identifies the type of the job being enqueued.  The job
+ * types may be different for different GPU hardware.  For example, a GPU may
+ * differentiate between "2D", "3D", and "compute" jobs.
+ */
+TRACE_EVENT(gpu_job_enqueue,
+
+	TP_PROTO(u32 ctx_id, u32 job_id, const char *type),
+
+	TP_ARGS(ctx_id, job_id, type),
+
+	TP_STRUCT__entry(
+		__field(        u32,            ctx_id          )
+		__field(        u32,            job_id          )
+		__string(       type,           type            )
+	),
+
+	TP_fast_assign(
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__assign_str(type, type);
+	),
+
+	TP_printk("ctx_id=%lu job_id=%lu type=%s",
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		__get_str(type))
+);
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* _TRACE_GPU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
new file mode 100644
index 0000000..82b368d
--- /dev/null
+++ b/include/trace/events/mmc.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmc
+
+#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMC_H
+
+#include <linux/tracepoint.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/core.h>
+
+/*
+ * Unconditional logging of mmc block erase operations,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_erase_class,
+	TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+	TP_ARGS(cmd, addr, size),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, addr)
+		__field(unsigned int, size)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->addr = addr;
+		__entry->size = size;
+	),
+	TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+		  __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_start,
+	TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+	TP_ARGS(cmd, addr, size));
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_end,
+	TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+	TP_ARGS(cmd, addr, size));
+
+/*
+ * Logging of start of read or write mmc block operation,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_rw_class,
+	TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+	TP_ARGS(cmd, addr, data),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, addr)
+		__field(unsigned int, size)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->addr = addr;
+		__entry->size = data->blocks;
+	),
+	TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+		  __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_start,
+	TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+	TP_ARGS(cmd, addr, data),
+	TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+		      (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+		      data));
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_end,
+	TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+	TP_ARGS(cmd, addr, data),
+	TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+		      (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+		      data));
+#endif /* _TRACE_MMC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 427acab..503fc2b8 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -146,6 +146,25 @@
 	TP_ARGS(name, state, cpu_id)
 );
 
+TRACE_EVENT(clock_set_parent,
+
+	TP_PROTO(const char *name, const char *parent_name),
+
+	TP_ARGS(name, parent_name),
+
+	TP_STRUCT__entry(
+		__string(       name,           name            )
+		__string(       parent_name,    parent_name     )
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__assign_str(parent_name, parent_name);
+	),
+
+	TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
+);
+
 /*
  * The power domain events are used for power domains transitions
  */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 0cc74c4..b422ad5 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -692,9 +692,19 @@
 __SYSCALL(__NR_kcmp, sys_kcmp)
 #define __NR_finit_module 273
 __SYSCALL(__NR_finit_module, sys_finit_module)
+/* Backporting seccomp, skip a few ...
+ * #define __NR_sched_setattr 274
+__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
+ * #define __NR_sched_getattr 275
+__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+ * #define __NR_renameat2 276
+__SYSCALL(__NR_renameat2, sys_renameat2)
+ */
+#define __NR_seccomp 277
+__SYSCALL(__NR_seccomp, sys_seccomp)
 
 #undef __NR_syscalls
-#define __NR_syscalls 274
+#define __NR_syscalls 278
 
 /*
  * All syscalls below here should go away really,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index bdc6e87..fdccb62 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -1,4 +1,5 @@
 # UAPI Header export list
+header-y += android/
 header-y += byteorder/
 header-y += can/
 header-y += caif/
@@ -56,6 +57,7 @@
 header-y += atmsap.h
 header-y += atmsvc.h
 header-y += audit.h
+header-y += aufs_type.h
 header-y += auto_fs.h
 header-y += auto_fs4.h
 header-y += auxvec.h
diff --git a/include/uapi/linux/android/Kbuild b/include/uapi/linux/android/Kbuild
new file mode 100644
index 0000000..ca011ee
--- /dev/null
+++ b/include/uapi/linux/android/Kbuild
@@ -0,0 +1,2 @@
+# UAPI Header export list
+header-y += binder.h
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
new file mode 100644
index 0000000..c653800
--- /dev/null
+++ b/include/uapi/linux/android/binder.h
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+#ifdef BINDER_IPC_32BIT
+typedef __u32 binder_size_t;
+typedef __u32 binder_uintptr_t;
+#else
+typedef __u64 binder_size_t;
+typedef __u64 binder_uintptr_t;
+#endif
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes.  The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur.  The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+	/* 8 bytes for large_flat_header. */
+	__u32	type;
+	__u32	flags;
+
+	/* 8 bytes of data. */
+	union {
+		binder_uintptr_t	binder;	/* local object */
+		__u32			handle;	/* remote object */
+	};
+
+	/* extra data associated with local object */
+	binder_uintptr_t	cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+	binder_size_t		write_size;	/* bytes to write */
+	binder_size_t		write_consumed;	/* bytes consumed by driver */
+	binder_uintptr_t	write_buffer;
+	binder_size_t		read_size;	/* bytes to read */
+	binder_size_t		read_consumed;	/* bytes consumed by driver */
+	binder_uintptr_t	read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+	/* driver protocol version -- increment with incompatible change */
+	__s32	protocol_version;
+};
+
+/* This is the current protocol version. */
+#ifdef BINDER_IPC_32BIT
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+#else
+#define BINDER_CURRENT_PROTOCOL_VERSION 8
+#endif
+
+#define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read)
+#define	BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64)
+#define	BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32)
+#define	BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, __s32)
+#define	BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32)
+#define	BINDER_THREAD_EXIT		_IOW('b', 8, __s32)
+#define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted.  This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process.  That is, the process is being destroyed.
+ * You should handle this by exiting from your process.  Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */
+	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
+	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
+	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
+	 * identifying the target and contents of the transaction.
+	 */
+	union {
+		/* target descriptor of command transaction */
+		__u32	handle;
+		/* target descriptor of return transaction */
+		binder_uintptr_t ptr;
+	} target;
+	binder_uintptr_t	cookie;	/* target object cookie */
+	__u32		code;		/* transaction command */
+
+	/* General information about the transaction. */
+	__u32		flags;
+	pid_t		sender_pid;
+	uid_t		sender_euid;
+	binder_size_t	data_size;	/* number of bytes of data */
+	binder_size_t	offsets_size;	/* number of bytes of offsets */
+
+	/* If this transaction is inline, the data immediately
+	 * follows here; otherwise, it ends with a pointer to
+	 * the data buffer.
+	 */
+	union {
+		struct {
+			/* transaction data */
+			binder_uintptr_t	buffer;
+			/* offsets from buffer to flat_binder_object structs */
+			binder_uintptr_t	offsets;
+		} ptr;
+		__u8	buf[8];
+	} data;
+};
+
+struct binder_ptr_cookie {
+	binder_uintptr_t ptr;
+	binder_uintptr_t cookie;
+};
+
+struct binder_handle_cookie {
+	__u32 handle;
+	binder_uintptr_t cookie;
+} __packed;
+
+struct binder_pri_desc {
+	__s32 priority;
+	__u32 desc;
+};
+
+struct binder_pri_ptr_cookie {
+	__s32 priority;
+	binder_uintptr_t ptr;
+	binder_uintptr_t cookie;
+};
+
+enum binder_driver_return_protocol {
+	BR_ERROR = _IOR('r', 0, __s32),
+	/*
+	 * int: error code
+	 */
+
+	BR_OK = _IO('r', 1),
+	/* No parameters! */
+
+	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+	BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+	/*
+	 * binder_transaction_data: the received command.
+	 */
+
+	BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
+	/*
+	 * not currently supported
+	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+	 * Else the remote object has acquired a primary reference.
+	 */
+
+	BR_DEAD_REPLY = _IO('r', 5),
+	/*
+	 * The target of the last transaction (either a bcTRANSACTION or
+	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
+	 */
+
+	BR_TRANSACTION_COMPLETE = _IO('r', 6),
+	/*
+	 * No parameters... always refers to the last transaction requested
+	 * (including replies).  Note that this will be sent even for
+	 * asynchronous transactions.
+	 */
+
+	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+	/*
+	 * void *:	ptr to binder
+	 * void *: cookie for binder
+	 */
+
+	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+	/*
+	 * not currently supported
+	 * int:	priority
+	 * void *: ptr to binder
+	 * void *: cookie for binder
+	 */
+
+	BR_NOOP = _IO('r', 12),
+	/*
+	 * No parameters.  Do nothing and examine the next command.  It exists
+	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+	 */
+
+	BR_SPAWN_LOOPER = _IO('r', 13),
+	/*
+	 * No parameters.  The driver has determined that a process has no
+	 * threads waiting to service incoming transactions.  When a process
+	 * receives this command, it must spawn a new service thread and
+	 * register it via bcENTER_LOOPER.
+	 */
+
+	BR_FINISHED = _IO('r', 14),
+	/*
+	 * not currently supported
+	 * stop threadpool thread
+	 */
+
+	BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
+	/*
+	 * void *: cookie
+	 */
+	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
+	/*
+	 * void *: cookie
+	 */
+
+	BR_FAILED_REPLY = _IO('r', 17),
+	/*
+	 * The the last transaction (either a bcTRANSACTION or
+	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
+	 */
+};
+
+enum binder_driver_command_protocol {
+	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+	BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+	/*
+	 * binder_transaction_data: the sent command.
+	 */
+
+	BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
+	/*
+	 * not currently supported
+	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+	 * Else you have acquired a primary reference on the object.
+	 */
+
+	BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
+	/*
+	 * void *: ptr to transaction data received on a read
+	 */
+
+	BC_INCREFS = _IOW('c', 4, __u32),
+	BC_ACQUIRE = _IOW('c', 5, __u32),
+	BC_RELEASE = _IOW('c', 6, __u32),
+	BC_DECREFS = _IOW('c', 7, __u32),
+	/*
+	 * int:	descriptor
+	 */
+
+	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+	/*
+	 * void *: ptr to binder
+	 * void *: cookie for binder
+	 */
+
+	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+	/*
+	 * not currently supported
+	 * int: priority
+	 * int: descriptor
+	 */
+
+	BC_REGISTER_LOOPER = _IO('c', 11),
+	/*
+	 * No parameters.
+	 * Register a spawned looper thread with the device.
+	 */
+
+	BC_ENTER_LOOPER = _IO('c', 12),
+	BC_EXIT_LOOPER = _IO('c', 13),
+	/*
+	 * No parameters.
+	 * These two commands are sent as an application-level thread
+	 * enters and exits the binder loop, respectively.  They are
+	 * used so the binder can have an accurate count of the number
+	 * of looping threads it has available.
+	 */
+
+	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_handle_cookie),
+	/*
+	 * int: handle
+	 * void *: cookie
+	 */
+
+	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_handle_cookie),
+	/*
+	 * int: handle
+	 * void *: cookie
+	 */
+
+	BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
+	/*
+	 * void *: cookie
+	 */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 75cef3f..ce8750f 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -324,6 +324,8 @@
 /* distinguish syscall tables */
 #define __AUDIT_ARCH_64BIT 0x80000000
 #define __AUDIT_ARCH_LE	   0x40000000
+
+#define AUDIT_ARCH_AARCH64	(EM_AARCH64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ALPHA	(EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARM		(EM_ARM|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARMEB	(EM_ARM)
diff --git a/include/uapi/linux/aufs_type.h b/include/uapi/linux/aufs_type.h
new file mode 100644
index 0000000..ba283d6
--- /dev/null
+++ b/include/uapi/linux/aufs_type.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2005-2013 Junjiro R. Okajima
+ *
+ * This program, aufs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef __AUFS_TYPE_H__
+#define __AUFS_TYPE_H__
+
+#define AUFS_NAME	"aufs"
+
+#ifdef __KERNEL__
+/*
+ * define it before including all other headers.
+ * sched.h may use pr_* macros before defining "current", so define the
+ * no-current version first, and re-define later.
+ */
+#define pr_fmt(fmt)	AUFS_NAME " %s:%d: " fmt, __func__, __LINE__
+#include <linux/sched.h>
+#undef pr_fmt
+#define pr_fmt(fmt) \
+		AUFS_NAME " %s:%d:%.*s[%d]: " fmt, __func__, __LINE__, \
+		(int)sizeof(current->comm), current->comm, current->pid
+#else
+#include <stdint.h>
+#include <sys/types.h>
+#endif /* __KERNEL__ */
+
+#include <linux/limits.h>
+
+#define AUFS_VERSION	"3.10-20130819"
+
+/* todo? move this to linux-2.6.19/include/magic.h */
+#define AUFS_SUPER_MAGIC	('a' << 24 | 'u' << 16 | 'f' << 8 | 's')
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_BRANCH_MAX_127
+typedef int8_t aufs_bindex_t;
+#define AUFS_BRANCH_MAX 127
+#else
+typedef int16_t aufs_bindex_t;
+#ifdef CONFIG_AUFS_BRANCH_MAX_511
+#define AUFS_BRANCH_MAX 511
+#elif defined(CONFIG_AUFS_BRANCH_MAX_1023)
+#define AUFS_BRANCH_MAX 1023
+#elif defined(CONFIG_AUFS_BRANCH_MAX_32767)
+#define AUFS_BRANCH_MAX 32767
+#endif
+#endif
+
+#ifdef __KERNEL__
+#ifndef AUFS_BRANCH_MAX
+#error unknown CONFIG_AUFS_BRANCH_MAX value
+#endif
+#endif /* __KERNEL__ */
+
+/* ---------------------------------------------------------------------- */
+
+#define AUFS_FSTYPE		AUFS_NAME
+
+#define AUFS_ROOT_INO		2
+#define AUFS_FIRST_INO		11
+
+#define AUFS_WH_PFX		".wh."
+#define AUFS_WH_PFX_LEN		((int)sizeof(AUFS_WH_PFX) - 1)
+#define AUFS_WH_TMP_LEN		4
+/* a limit for rmdir/rename a dir and copyup */
+#define AUFS_MAX_NAMELEN	(NAME_MAX \
+				- AUFS_WH_PFX_LEN * 2	/* doubly whiteouted */\
+				- 1			/* dot */\
+				- AUFS_WH_TMP_LEN)	/* hex */
+#define AUFS_XINO_FNAME		"." AUFS_NAME ".xino"
+#define AUFS_XINO_DEFPATH	"/tmp/" AUFS_XINO_FNAME
+#define AUFS_XINO_TRUNC_INIT	64 /* blocks */
+#define AUFS_XINO_TRUNC_STEP	4  /* blocks */
+#define AUFS_DIRWH_DEF		3
+#define AUFS_RDCACHE_DEF	10 /* seconds */
+#define AUFS_RDCACHE_MAX	3600 /* seconds */
+#define AUFS_RDBLK_DEF		512 /* bytes */
+#define AUFS_RDHASH_DEF		32
+#define AUFS_WKQ_NAME		AUFS_NAME "d"
+#define AUFS_MFS_DEF_SEC	30 /* seconds */
+#define AUFS_MFS_MAX_SEC	3600 /* seconds */
+#define AUFS_PLINK_WARN		50 /* number of plinks in a single bucket */
+
+/* pseudo-link maintenace under /proc */
+#define AUFS_PLINK_MAINT_NAME	"plink_maint"
+#define AUFS_PLINK_MAINT_DIR	"fs/" AUFS_NAME
+#define AUFS_PLINK_MAINT_PATH	AUFS_PLINK_MAINT_DIR "/" AUFS_PLINK_MAINT_NAME
+
+#define AUFS_DIROPQ_NAME	AUFS_WH_PFX ".opq" /* whiteouted doubly */
+#define AUFS_WH_DIROPQ		AUFS_WH_PFX AUFS_DIROPQ_NAME
+
+#define AUFS_BASE_NAME		AUFS_WH_PFX AUFS_NAME
+#define AUFS_PLINKDIR_NAME	AUFS_WH_PFX "plnk"
+#define AUFS_ORPHDIR_NAME	AUFS_WH_PFX "orph"
+
+/* doubly whiteouted */
+#define AUFS_WH_BASE		AUFS_WH_PFX AUFS_BASE_NAME
+#define AUFS_WH_PLINKDIR	AUFS_WH_PFX AUFS_PLINKDIR_NAME
+#define AUFS_WH_ORPHDIR		AUFS_WH_PFX AUFS_ORPHDIR_NAME
+
+/* branch permissions and attributes */
+#define AUFS_BRPERM_RW		"rw"
+#define AUFS_BRPERM_RO		"ro"
+#define AUFS_BRPERM_RR		"rr"
+#define AUFS_BRRATTR_WH		"wh"
+#define AUFS_BRWATTR_NLWH	"nolwh"
+#define AUFS_BRATTR_UNPIN	"unpin"
+
+/* ---------------------------------------------------------------------- */
+
+/* ioctl */
+enum {
+	/* readdir in userspace */
+	AuCtl_RDU,
+	AuCtl_RDU_INO,
+
+	/* pathconf wrapper */
+	AuCtl_WBR_FD,
+
+	/* busy inode */
+	AuCtl_IBUSY,
+
+	/* move-down */
+	AuCtl_MVDOWN
+};
+
+/* borrowed from linux/include/linux/kernel.h */
+#ifndef ALIGN
+#define ALIGN(x, a)		__ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask)	(((x)+(mask))&~(mask))
+#endif
+
+/* borrowed from linux/include/linux/compiler-gcc3.h */
+#ifndef __aligned
+#define __aligned(x)			__attribute__((aligned(x)))
+#endif
+
+#ifdef __KERNEL__
+#ifndef __packed
+#define __packed			__attribute__((packed))
+#endif
+#endif
+
+struct au_rdu_cookie {
+	uint64_t	h_pos;
+	int16_t		bindex;
+	uint8_t		flags;
+	uint8_t		pad;
+	uint32_t	generation;
+} __aligned(8);
+
+struct au_rdu_ent {
+	uint64_t	ino;
+	int16_t		bindex;
+	uint8_t		type;
+	uint8_t		nlen;
+	uint8_t		wh;
+	char		name[0];
+} __aligned(8);
+
+static inline int au_rdu_len(int nlen)
+{
+	/* include the terminating NULL */
+	return ALIGN(sizeof(struct au_rdu_ent) + nlen + 1,
+		     sizeof(uint64_t));
+}
+
+union au_rdu_ent_ul {
+	struct au_rdu_ent __user	*e;
+	uint64_t			ul;
+};
+
+enum {
+	AufsCtlRduV_SZ,
+	AufsCtlRduV_End
+};
+
+struct aufs_rdu {
+	/* input */
+	union {
+		uint64_t	sz;	/* AuCtl_RDU */
+		uint64_t	nent;	/* AuCtl_RDU_INO */
+	};
+	union au_rdu_ent_ul	ent;
+	uint16_t		verify[AufsCtlRduV_End];
+
+	/* input/output */
+	uint32_t		blk;
+
+	/* output */
+	union au_rdu_ent_ul	tail;
+	/* number of entries which were added in a single call */
+	uint64_t		rent;
+	uint8_t			full;
+	uint8_t			shwh;
+
+	struct au_rdu_cookie	cookie;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_wbr_fd {
+	uint32_t	oflags;
+	int16_t		brid;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_ibusy {
+	uint64_t	ino, h_ino;
+	int16_t		bindex;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+/* flags for move-down */
+#define AUFS_MVDOWN_VERBOSE	1
+/* will be added more */
+
+struct aufs_mvdown {
+	uint8_t		flags;
+	/* will be added more */
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+#define AuCtlType		'A'
+#define AUFS_CTL_RDU		_IOWR(AuCtlType, AuCtl_RDU, struct aufs_rdu)
+#define AUFS_CTL_RDU_INO	_IOWR(AuCtlType, AuCtl_RDU_INO, struct aufs_rdu)
+#define AUFS_CTL_WBR_FD		_IOW(AuCtlType, AuCtl_WBR_FD, \
+				     struct aufs_wbr_fd)
+#define AUFS_CTL_IBUSY		_IOWR(AuCtlType, AuCtl_IBUSY, struct aufs_ibusy)
+#define AUFS_CTL_MVDOWN		_IOR(AuCtlType, AuCtl_MVDOWN, \
+				     struct aufs_mvdown)
+
+#endif /* __AUFS_TYPE_H__ */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index 8e2b7ba..59c17a2 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -22,6 +22,7 @@
 #define EM_PPC		20	/* PowerPC */
 #define EM_PPC64	21	 /* PowerPC64 */
 #define EM_SPU		23	/* Cell BE SPU */
+#define EM_ARM		40	/* ARM 32 bit */
 #define EM_SH		42	/* SuperH */
 #define EM_SPARCV9	43	/* SPARC v9 64-bit */
 #define EM_IA_64	50	/* HP/Intel IA-64 */
@@ -34,6 +35,7 @@
 #define EM_MN10300	89	/* Panasonic/MEI MN10300, AM33 */
 #define EM_BLACKFIN     106     /* ADI Blackfin Processor */
 #define EM_TI_C6000	140	/* TI C6X DSPs */
+#define EM_AARCH64	183	/* ARM 64 bit */
 #define EM_FRV		0x5441	/* Fujitsu FR-V */
 #define EM_AVR32	0x18ad	/* Atmel AVR32 */
 
diff --git a/include/uapi/linux/falloc.h b/include/uapi/linux/falloc.h
index 990c4cc..bc5e796 100644
--- a/include/uapi/linux/falloc.h
+++ b/include/uapi/linux/falloc.h
@@ -5,5 +5,41 @@
 #define FALLOC_FL_PUNCH_HOLE	0x02 /* de-allocates range */
 #define FALLOC_FL_NO_HIDE_STALE	0x04 /* reserved codepoint */
 
+/*
+ * FALLOC_FL_COLLAPSE_RANGE is used to remove a range of a file
+ * without leaving a hole in the file. The contents of the file beyond
+ * the range being removed is appended to the start offset of the range
+ * being removed (i.e. the hole that was punched is "collapsed"),
+ * resulting in a file layout that looks like the range that was
+ * removed never existed. As such collapsing a range of a file changes
+ * the size of the file, reducing it by the same length of the range
+ * that has been removed by the operation.
+ *
+ * Different filesystems may implement different limitations on the
+ * granularity of the operation. Most will limit operations to
+ * filesystem block size boundaries, but this boundary may be larger or
+ * smaller depending on the filesystem and/or the configuration of the
+ * filesystem or file.
+ *
+ * Attempting to collapse a range that crosses the end of the file is
+ * considered an illegal operation - just use ftruncate(2) if you need
+ * to collapse a range that crosses EOF.
+ */
+#define FALLOC_FL_COLLAPSE_RANGE	0x08
+
+/*
+ * FALLOC_FL_ZERO_RANGE is used to convert a range of file to zeros preferably
+ * without issuing data IO. Blocks should be preallocated for the regions that
+ * span holes in the file, and the entire range is preferable converted to
+ * unwritten extents - even though file system may choose to zero out the
+ * extent or do whatever which will result in reading zeros from the range
+ * while the range remains allocated for the file.
+ *
+ * This can be also used to preallocate blocks past EOF in the same way as
+ * with fallocate. Flag FALLOC_FL_KEEP_SIZE should cause the inode
+ * size to remain the same.
+ */
+#define FALLOC_FL_ZERO_RANGE		0x10
+
 
 #endif /* _UAPI_FALLOC_H_ */
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 51da65b..9dcdb62 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -49,6 +49,8 @@
 	FRA_TABLE,	/* Extended table id */
 	FRA_FWMASK,	/* mask for netfilter mark */
 	FRA_OIFNAME,
+	FRA_UID_START,	/* UID range */
+	FRA_UID_END,
 	__FRA_MAX
 };
 
diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h
index d830747..0c51d61 100644
--- a/include/uapi/linux/fiemap.h
+++ b/include/uapi/linux/fiemap.h
@@ -40,6 +40,7 @@
 
 #define FIEMAP_FLAG_SYNC	0x00000001 /* sync file data before map */
 #define FIEMAP_FLAG_XATTR	0x00000002 /* map extended attribute tree */
+#define FIEMAP_FLAG_CACHE	0x00000004 /* request caching of the extents */
 
 #define FIEMAP_FLAGS_COMPAT	(FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR)
 
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index a4ed56c..5014a5c 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -154,6 +154,8 @@
 #define FITHAW		_IOWR('X', 120, int)	/* Thaw */
 #define FITRIM		_IOWR('X', 121, struct fstrim_range)	/* Trim */
 
+#define FIDTRIM	_IOWR('f', 128, struct fstrim_range)	/* Deep discard trim */
+
 #define	FS_IOC_GETFLAGS			_IOR('f', 1, long)
 #define	FS_IOC_SETFLAGS			_IOW('f', 2, long)
 #define	FS_IOC_GETVERSION		_IOR('v', 1, long)
diff --git a/include/uapi/linux/if_pppolac.h b/include/uapi/linux/if_pppolac.h
new file mode 100644
index 0000000..b7eb815
--- /dev/null
+++ b/include/uapi/linux/if_pppolac.h
@@ -0,0 +1,33 @@
+/* include/uapi/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOLAC_H
+#define _UAPI_LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+	sa_family_t	sa_family;	/* AF_PPPOX */
+	unsigned int	sa_protocol;	/* PX_PROTO_OLAC */
+	int		udp_socket;
+	struct __attribute__((packed)) {
+		__u16	tunnel, session;
+	} local, remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOLAC_H */
diff --git a/include/uapi/linux/if_pppopns.h b/include/uapi/linux/if_pppopns.h
new file mode 100644
index 0000000..a392b52
--- /dev/null
+++ b/include/uapi/linux/if_pppopns.h
@@ -0,0 +1,32 @@
+/* include/uapi/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOPNS_H
+#define _UAPI_LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+	sa_family_t	sa_family;	/* AF_PPPOX */
+	unsigned int	sa_protocol;	/* PX_PROTO_OPNS */
+	int		tcp_socket;
+	__u16		local;
+	__u16		remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOPNS_H */
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index e36a4ae..87f478b 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -23,6 +23,8 @@
 #include <linux/socket.h>
 #include <linux/if_ether.h>
 #include <linux/if_pppol2tp.h>
+#include <linux/if_pppolac.h>
+#include <linux/if_pppopns.h>
 
 /* For user-space programs to pick up these definitions
  * which they wouldn't get otherwise without defining __KERNEL__
@@ -56,7 +58,9 @@
 #define PX_PROTO_OE    0 /* Currently just PPPoE */
 #define PX_PROTO_OL2TP 1 /* Now L2TP also */
 #define PX_PROTO_PPTP  2
-#define PX_MAX_PROTO   3
+#define PX_PROTO_OLAC  3
+#define PX_PROTO_OPNS  4
+#define PX_MAX_PROTO   5
 
 struct sockaddr_pppox {
 	__kernel_sa_family_t sa_family;       /* address family, AF_PPPOX */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 4649ee3..a86f530 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -153,6 +153,9 @@
 
 #define EVIOCGRAB		_IOW('E', 0x90, int)			/* Grab/Release device */
 
+#define EVIOCGSUSPENDBLOCK	_IOR('E', 0x91, int)			/* get suspend block enable */
+#define EVIOCSSUSPENDBLOCK	_IOW('E', 0x91, int)			/* set suspend block enable */
+
 #define EVIOCSCLOCKID		_IOW('E', 0xa0, int)			/* Set clockid to be used for timestamps */
 
 /*
@@ -458,10 +461,14 @@
 #define KEY_VIDEO_NEXT		241	/* drive next video source */
 #define KEY_VIDEO_PREV		242	/* drive previous video source */
 #define KEY_BRIGHTNESS_CYCLE	243	/* brightness up, after max is min */
-#define KEY_BRIGHTNESS_ZERO	244	/* brightness off, use ambient */
+#define KEY_BRIGHTNESS_AUTO	244	/* Set Auto Brightness: manual
+					  brightness control is off,
+					  rely on ambient */
+#define KEY_BRIGHTNESS_ZERO	KEY_BRIGHTNESS_AUTO
 #define KEY_DISPLAY_OFF		245	/* display device to off state */
 
-#define KEY_WIMAX		246
+#define KEY_WWAN		246	/* Wireless WAN (LTE, UMTS, GSM, etc.) */
+#define KEY_WIMAX		KEY_WWAN
 #define KEY_RFKILL		247	/* Key that controls all radios */
 
 #define KEY_MICMUTE		248	/* Mute / unmute the microphone */
@@ -506,11 +513,15 @@
 #define BTN_DEAD		0x12f
 
 #define BTN_GAMEPAD		0x130
-#define BTN_A			0x130
-#define BTN_B			0x131
+#define BTN_SOUTH		0x130
+#define BTN_A			BTN_SOUTH
+#define BTN_EAST		0x131
+#define BTN_B			BTN_EAST
 #define BTN_C			0x132
-#define BTN_X			0x133
-#define BTN_Y			0x134
+#define BTN_NORTH		0x133
+#define BTN_X			BTN_NORTH
+#define BTN_WEST		0x134
+#define BTN_Y			BTN_WEST
 #define BTN_Z			0x135
 #define BTN_TL			0x136
 #define BTN_TR			0x137
@@ -623,6 +634,7 @@
 #define KEY_ADDRESSBOOK		0x1ad	/* AL Contacts/Address Book */
 #define KEY_MESSENGER		0x1ae	/* AL Instant Messaging */
 #define KEY_DISPLAYTOGGLE	0x1af	/* Turn display (LCD) on and off */
+#define KEY_BRIGHTNESS_TOGGLE	KEY_DISPLAYTOGGLE
 #define KEY_SPELLCHECK		0x1b0   /* AL Spell Check */
 #define KEY_LOGOFF		0x1b1   /* AL Logoff */
 
@@ -707,6 +719,24 @@
 #define KEY_ATTENDANT_TOGGLE	0x21d	/* Attendant call on or off */
 #define KEY_LIGHTS_TOGGLE	0x21e	/* Reading light on or off */
 
+#define BTN_DPAD_UP		0x220
+#define BTN_DPAD_DOWN		0x221
+#define BTN_DPAD_LEFT		0x222
+#define BTN_DPAD_RIGHT		0x223
+
+#define KEY_ALS_TOGGLE		0x230	/* Ambient light sensor */
+
+#define KEY_BUTTONCONFIG		0x240	/* AL Button Configuration */
+#define KEY_TASKMANAGER		0x241	/* AL Task/Project Manager */
+#define KEY_JOURNAL		0x242	/* AL Log/Journal/Timecard */
+#define KEY_CONTROLPANEL		0x243	/* AL Control Panel */
+#define KEY_APPSELECT		0x244	/* AL Select Task/Application */
+#define KEY_SCREENSAVER		0x245	/* AL Screen Saver */
+#define KEY_VOICECOMMAND		0x246	/* Listening Voice Command */
+
+#define KEY_BRIGHTNESS_MIN		0x250	/* Set Brightness to Minimum */
+#define KEY_BRIGHTNESS_MAX		0x251	/* Set Brightness to Maximum */
+
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
 #define BTN_TRIGGER_HAPPY2		0x2c1
@@ -844,6 +874,7 @@
 #define SW_FRONT_PROXIMITY	0x0b  /* set = front proximity sensor active */
 #define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT	0x0d  /* set = inserted */
+#define SW_MUTE_DEVICE		0x0e  /* set = device disabled */
 #define SW_MAX			0x0f
 #define SW_CNT			(SW_MAX+1)
 
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 4bda4cf..15ac588 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -160,6 +160,9 @@
 	DEVCONF_ACCEPT_DAD,
 	DEVCONF_FORCE_TLLAO,
 	DEVCONF_NDISC_NOTIFY,
+	DEVCONF_ACCEPT_RA_RT_TABLE,
+	DEVCONF_USE_OPTIMISTIC,
+	DEVCONF_USE_OIF_ADDRS_ONLY,
 	DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/keychord.h b/include/uapi/linux/keychord.h
new file mode 100644
index 0000000..ea7cf4d
--- /dev/null
+++ b/include/uapi/linux/keychord.h
@@ -0,0 +1,52 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _UAPI_LINUX_KEYCHORD_H_
+#define _UAPI_LINUX_KEYCHORD_H_
+
+#include <linux/input.h>
+
+#define KEYCHORD_VERSION		1
+
+/*
+ * One or more input_keychord structs are written to /dev/keychord
+ * at once to specify the list of keychords to monitor.
+ * Reading /dev/keychord returns the id of a keychord when the
+ * keychord combination is pressed.  A keychord is signalled when
+ * all of the keys in the keycode list are in the pressed state.
+ * The order in which the keys are pressed does not matter.
+ * The keychord will not be signalled if keys not in the keycode
+ * list are pressed.
+ * Keychords will not be signalled on key release events.
+ */
+struct input_keychord {
+	/* should be KEYCHORD_VERSION */
+	__u16 version;
+	/*
+	 * client specified ID, returned from read()
+	 * when this keychord is pressed.
+	 */
+	__u16 id;
+
+	/* number of keycodes in this keychord */
+	__u16 count;
+
+	/* variable length array of keycodes */
+	__u16 keycodes[];
+};
+
+#endif	/* _UAPI_LINUX_KEYCHORD_H_ */
diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
index 1f5e689..98ad956 100644
--- a/include/uapi/linux/mmc/ioctl.h
+++ b/include/uapi/linux/mmc/ioctl.h
@@ -47,6 +47,19 @@
 
 #define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
 
+struct mmc_ioc_rpmb_req {
+	__u16 type;                     /* RPMB request type */
+	__u16 *result;                  /* response or request result */
+	__u16 blk_cnt;                  /* Number of blocks(half sector 256B) */
+	__u16 addr;                     /* data address */
+	__u32 *wc;                      /* write counter */
+	__u8 *nonce;                    /* Ramdom number */
+	__u8 *data;                     /* Buffer of the user data */
+	__u8 *mac;                      /* Message Authentication Code */
+};
+
+#define MMC_IOC_RPMB_REQ _IOWR(MMC_BLOCK_MAJOR, 1, struct mmc_ioc_rpmb_req)
+
 /*
  * Since this ioctl is only meant to enhance (and not replace) normal access
  * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index f055e58..4975d35 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -45,6 +45,7 @@
 
 #define CASE_LOWER_BASE	8	/* base is lower case */
 #define CASE_LOWER_EXT	16	/* extension is lower case */
+#define FAT_NO_83NAME	32	/* no 8.3 short filename for this file */
 
 #define DELETED_FLAG	0xe5	/* marks file as deleted when in name[0] */
 #define IS_FREE(n)	(!*(n) || *(n) == DELETED_FLAG)
@@ -104,6 +105,7 @@
 /* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */
 #define FAT_IOCTL_GET_ATTRIBUTES	_IOR('r', 0x10, __u32)
 #define FAT_IOCTL_SET_ATTRIBUTES	_IOW('r', 0x11, __u32)
+#define VFAT_IOCTL_GET_VOLUME_ID	_IOR('r', 0x12, __u32)
 
 struct fat_boot_sector {
 	__u8	ignored[3];	/* Boot strap short or near jump */
@@ -161,6 +163,17 @@
 	__le32   reserved2[4];
 };
 
+struct fat_boot_bsx {
+	__u8     drive;		/* drive number */
+	__u8     reserved1;
+	__u8     signature;	/* extended boot signature */
+	__u8     vol_id[4];	/* volume ID */
+	__u8     vol_label[11];	/* volume label */
+	__u8     type[8];	/* file system type */
+};
+#define FAT16_BSX_OFFSET 36 /* offset of fat_boot_bsx in FAT12 and FAT16 */
+#define FAT32_BSX_OFFSET 64 /* offset of fat_boot_bsx in FAT32 */
+
 struct msdos_dir_entry {
 	__u8	name[MSDOS_NAME];/* name and extension */
 	__u8	attr;		/* attribute bits */
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 208ae93..faaa28b 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -4,6 +4,7 @@
  * Header file for Xtables timer target module.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and forward-ported to 2.6.34
@@ -32,12 +33,19 @@
 #include <linux/types.h>
 
 #define MAX_IDLETIMER_LABEL_SIZE 28
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
 
 struct idletimer_tg_info {
 	__u32 timeout;
 
 	char label[MAX_IDLETIMER_LABEL_SIZE];
 
+	/* Use netlink messages for notification in addition to sysfs */
+	__u8 send_nl_msg;
+
 	/* for kernel module internal use only */
 	struct idletimer_tg *timer __attribute__((aligned(8)));
 };
diff --git a/include/uapi/linux/netfilter/xt_socket.h b/include/uapi/linux/netfilter/xt_socket.h
index 26d7217..6359456 100644
--- a/include/uapi/linux/netfilter/xt_socket.h
+++ b/include/uapi/linux/netfilter/xt_socket.h
@@ -11,4 +11,10 @@
 	__u8 flags;
 };
 
+void xt_socket_put_sk(struct sock *sk);
+struct sock *xt_socket_get4_sk(const struct sk_buff *skb,
+			       struct xt_action_param *par);
+struct sock *xt_socket_get6_sk(const struct sk_buff *skb,
+			       struct xt_action_param *par);
+
 #endif /* _XT_SOCKET_H */
diff --git a/include/uapi/linux/nfs_mount.h b/include/uapi/linux/nfs_mount.h
index 576bddd..9541d71 100644
--- a/include/uapi/linux/nfs_mount.h
+++ b/include/uapi/linux/nfs_mount.h
@@ -20,7 +20,7 @@
  * mount-to-kernel version compatibility.  Some of these aren't used yet
  * but here they are anyway.
  */
-#define NFS_MOUNT_VERSION	6
+#define NFS_MOUNT_VERSION	7
 #define NFS_MAX_CONTEXT_LEN	256
 
 struct nfs_mount_data {
@@ -43,6 +43,8 @@
 	struct nfs3_fh	root;			/* 4 */
 	int		pseudoflavor;		/* 5 */
 	char		context[NFS_MAX_CONTEXT_LEN + 1];	/* 6 */
+	int		nfs_prog; /* 7 */
+	int		mount_prog; /* 7 */
 };
 
 /* bits in the flags field visible to user space */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index d1e48b5..11c8c17 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -646,6 +646,28 @@
  * @NL80211_CMD_CRIT_PROTOCOL_STOP: Indicates the connection reliability can
  *	return back to normal.
  *
+ * @NL80211_CMD_GET_COALESCE: Get currently supported coalesce rules.
+ * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules.
+ *
+ * @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the
+ *	the new channel information (Channel Switch Announcement - CSA)
+ *	in the beacon for some time (as defined in the
+ *	%NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the
+ *	new channel. Userspace provides the new channel information (using
+ *	%NL80211_ATTR_WIPHY_FREQ and the attributes determining channel
+ *	width). %NL80211_ATTR_CH_SWITCH_BLOCK_TX may be supplied to inform
+ *	other station that transmission must be blocked until the channel
+ *	switch is complete.
+ *
+ * @NL80211_CMD_VENDOR: Vendor-specified command/event. The command is specified
+ *	by the %NL80211_ATTR_VENDOR_ID attribute and a sub-command in
+ *	%NL80211_ATTR_VENDOR_SUBCMD. Parameter(s) can be transported in
+ *	%NL80211_ATTR_VENDOR_DATA.
+ *	For feature advertisement, the %NL80211_ATTR_VENDOR_DATA attribute is
+ *	used in the wiphy data as a nested attribute containing descriptions
+ *	(&struct nl80211_vendor_cmd_info) of the supported vendor commands.
+ *	This may also be sent as an event with the same attributes.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -808,6 +830,13 @@
 	NL80211_CMD_CRIT_PROTOCOL_START,
 	NL80211_CMD_CRIT_PROTOCOL_STOP,
 
+	NL80211_CMD_GET_COALESCE,
+	NL80211_CMD_SET_COALESCE,
+
+	NL80211_CMD_CHANNEL_SWITCH,
+
+	NL80211_CMD_VENDOR,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -1429,6 +1458,48 @@
  * @NL80211_ATTR_MAX_CRIT_PROT_DURATION: duration in milliseconds in which
  *      the connection should have increased reliability (u16).
  *
+ * @NL80211_ATTR_PEER_AID: Association ID for the peer TDLS station (u16).
+ *	This is similar to @NL80211_ATTR_STA_AID but with a difference of being
+ *	allowed to be used with the first @NL80211_CMD_SET_STATION command to
+ *	update a TDLS peer STA entry.
+ *
+ * @NL80211_ATTR_COALESCE_RULE: Coalesce rule information.
+ *
+ * @NL80211_ATTR_CH_SWITCH_COUNT: u32 attribute specifying the number of TBTT's
+ *	until the channel switch event.
+ * @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission
+ *	must be blocked on the current channel (before the channel switch
+ *	operation).
+ * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
+ *	for the time while performing a channel switch.
+ * @NL80211_ATTR_CSA_C_OFF_BEACON: Offset of the channel switch counter
+ *	field in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
+ * @NL80211_ATTR_CSA_C_OFF_PRESP: Offset of the channel switch counter
+ *	field in the probe response (%NL80211_ATTR_PROBE_RESP).
+ *
+ * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
+ *	As specified in the &enum nl80211_rxmgmt_flags.
+ *
+ * @NL80211_ATTR_STA_SUPPORTED_CHANNELS: array of supported channels.
+ *
+ * @NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES: array of supported
+ *      supported operating classes.
+ *
+ * @NL80211_ATTR_HANDLE_DFS: A flag indicating whether user space
+ *	controls DFS operation in IBSS mode. If the flag is included in
+ *	%NL80211_CMD_JOIN_IBSS request, the driver will allow use of DFS
+ *	channels and reports radar events to userspace. Userspace is required
+ *	to react to radar events, e.g. initiate a channel switch or leave the
+ *	IBSS network.
+ *
+ * @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if
+ *	%NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet)
+ * @NL80211_ATTR_VENDOR_SUBCMD: vendor sub-command
+ * @NL80211_ATTR_VENDOR_DATA: data for the vendor command, if any; this
+ *	attribute is also used for vendor command feature advertisement
+ * @NL80211_ATTR_VENDOR_EVENTS: used for event list advertising in the wiphy
+ *	info, containing a nested array of possible events
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1727,6 +1798,35 @@
 	NL80211_ATTR_CRIT_PROT_ID,
 	NL80211_ATTR_MAX_CRIT_PROT_DURATION,
 
+	NL80211_ATTR_PEER_AID,
+
+	NL80211_ATTR_COALESCE_RULE,
+
+	NL80211_ATTR_CH_SWITCH_COUNT,
+	NL80211_ATTR_CH_SWITCH_BLOCK_TX,
+	NL80211_ATTR_CSA_IES,
+	NL80211_ATTR_CSA_C_OFF_BEACON,
+	NL80211_ATTR_CSA_C_OFF_PRESP,
+
+	NL80211_ATTR_RXMGMT_FLAGS,
+
+	NL80211_ATTR_STA_SUPPORTED_CHANNELS,
+
+	NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES,
+
+	NL80211_ATTR_HANDLE_DFS,
+
+	NL80211_ATTR_SUPPORT_5_MHZ,
+	NL80211_ATTR_SUPPORT_10_MHZ,
+
+	NL80211_ATTR_OPMODE_NOTIF,
+
+	NL80211_ATTR_VENDOR_ID,
+	NL80211_ATTR_VENDOR_SUBCMD,
+	NL80211_ATTR_VENDOR_DATA,
+
+	NL80211_ATTR_VENDOR_EVENTS,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -2278,9 +2378,15 @@
  * enum nl80211_sched_scan_match_attr - scheduled scan match attributes
  * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
  * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
- * only report BSS with matching SSID.
+ *	only report BSS with matching SSID.
  * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a
- *	BSS in scan results. Filtering is turned off if not specified.
+ *	BSS in scan results. Filtering is turned off if not specified. Note that
+ *	if this attribute is in a match set of its own, then it is treated as
+ *	the default value for all matchsets with an SSID, rather than being a
+ *	matchset of its own without an RSSI filter. This is due to problems with
+ *	how this API was implemented in the past. Also, due to the same problem,
+ *	the only way to create a matchset with only an RSSI filter (with this
+ *	attribute) is if there's only a single matchset with the RSSI attribute.
  * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
  *	attribute number currently defined
  * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3721,4 +3827,24 @@
 /* maximum duration for critical protocol measures */
 #define NL80211_CRIT_PROTO_MAX_DURATION		5000 /* msec */
 
+/*
+ * If this flag is unset, the lower 24 bits are an OUI, if set
+ * a Linux nl80211 vendor ID is used (no such IDs are allocated
+ * yet, so that's not valid so far)
+ */
+#define NL80211_VENDOR_ID_IS_LINUX	0x80000000
+
+/**
+ * struct nl80211_vendor_cmd_info - vendor command data
+ * @vendor_id: If the %NL80211_VENDOR_ID_IS_LINUX flag is clear, then the
+ *	value is a 24-bit OUI; if it is set then a separately allocated ID
+ *	may be used, but no such IDs are allocated yet. New IDs should be
+ *	added to this file when needed.
+ * @subcmd: sub-command ID for the command
+ */
+struct nl80211_vendor_cmd_info {
+	__u32 vendor_id;
+	__u32 subcmd;
+};
+
 #endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 289760f..28bb0b3 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -149,4 +149,13 @@
 
 #define PR_GET_TID_ADDRESS	40
 
+/* Sets the timerslack for arbitrary threads
+ * arg2 slack value, 0 means "use default"
+ * arg3 pid of the thread whose timer slack needs to be set
+ */
+#define PR_SET_TIMERSLACK_PID 41
+
+#define PR_SET_VMA		0x53564d41
+# define PR_SET_VMA_ANON_NAME		0
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 7a2144e..07c1146 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -297,6 +297,7 @@
 	RTA_TABLE,
 	RTA_MARK,
 	RTA_MFC_STATS,
+	RTA_UID,
 	__RTA_MAX
 };
 
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index ac2dc9f..0f238a4 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -10,6 +10,13 @@
 #define SECCOMP_MODE_STRICT	1 /* uses hard-coded filter. */
 #define SECCOMP_MODE_FILTER	2 /* uses user-supplied filter. */
 
+/* Valid operations for seccomp syscall. */
+#define SECCOMP_SET_MODE_STRICT	0
+#define SECCOMP_SET_MODE_FILTER	1
+
+/* Valid flags for SECCOMP_SET_MODE_FILTER */
+#define SECCOMP_FILTER_FLAG_TSYNC	1
+
 /*
  * All BPF programs must return a 32-bit value.
  * The bottom 16-bits are for optional return data.
diff --git a/include/uapi/linux/sockios.h b/include/uapi/linux/sockios.h
index 7997a50..f7ffe36 100644
--- a/include/uapi/linux/sockios.h
+++ b/include/uapi/linux/sockios.h
@@ -65,6 +65,7 @@
 #define SIOCDIFADDR	0x8936		/* delete PA address		*/
 #define	SIOCSIFHWBROADCAST	0x8937	/* set hardware broadcast addr	*/
 #define SIOCGIFCOUNT	0x8938		/* get number of devices */
+#define SIOCKILLADDR	0x8939		/* kill sockets with this local addr */
 
 #define SIOCGIFBR	0x8940		/* Bridging support		*/
 #define SIOCSIFBR	0x8941		/* Set bridging options 	*/
diff --git a/include/uapi/linux/uhid.h b/include/uapi/linux/uhid.h
index e9ed951..24ec6e5 100644
--- a/include/uapi/linux/uhid.h
+++ b/include/uapi/linux/uhid.h
@@ -32,8 +32,12 @@
 	UHID_OUTPUT,
 	UHID_OUTPUT_EV,
 	UHID_INPUT,
+	UHID_GET_REPORT,
+	UHID_GET_REPORT_REPLY,
 	UHID_FEATURE,
 	UHID_FEATURE_ANSWER,
+	UHID_SET_REPORT,
+	UHID_SET_REPORT_REPLY,
 };
 
 struct uhid_create_req {
@@ -69,6 +73,32 @@
 	__u8 rtype;
 } __attribute__((__packed__));
 
+struct uhid_get_report_req {
+	__u32 id;
+	__u8 rnum;
+	__u8 rtype;
+} __attribute__((__packed__));
+
+struct uhid_get_report_reply_req {
+	__u32 id;
+	__u16 err;
+	__u16 size;
+	__u8 data[UHID_DATA_MAX];
+} __attribute__((__packed__));
+
+struct uhid_set_report_req {
+	__u32 id;
+	__u8 rnum;
+	__u8 rtype;
+	__u16 size;
+	__u8 data[UHID_DATA_MAX];
+} __attribute__((__packed__));
+
+struct uhid_set_report_reply_req {
+	__u32 id;
+	__u16 err;
+} __attribute__((__packed__));
+
 struct uhid_output_ev_req {
 	__u16 type;
 	__u16 code;
@@ -97,7 +127,11 @@
 		struct uhid_output_req output;
 		struct uhid_output_ev_req output_ev;
 		struct uhid_feature_req feature;
+		struct uhid_get_report_req get_report;
 		struct uhid_feature_answer_req feature_answer;
+		struct uhid_get_report_reply_req get_report_reply;
+		struct uhid_set_report_req set_report;
+		struct uhid_set_report_reply_req set_report_reply;
 	} u;
 } __attribute__((__packed__));
 
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index aa33fd1..29860d6 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -294,6 +294,7 @@
 #define USB_CLASS_CSCID			0x0b	/* chip+ smart card */
 #define USB_CLASS_CONTENT_SEC		0x0d	/* content security */
 #define USB_CLASS_VIDEO			0x0e
+#define USB_CLASS_DEBUG			0xdc
 #define USB_CLASS_WIRELESS_CONTROLLER	0xe0
 #define USB_CLASS_MISC			0xef
 #define USB_CLASS_APP_SPEC		0xfe
diff --git a/include/uapi/linux/usb/f_accessory.h b/include/uapi/linux/usb/f_accessory.h
new file mode 100644
index 0000000..0baeb7d
--- /dev/null
+++ b/include/uapi/linux/usb/f_accessory.h
@@ -0,0 +1,146 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_ACCESSORY_H
+#define _UAPI_LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER   0
+#define ACCESSORY_STRING_MODEL          1
+#define ACCESSORY_STRING_DESCRIPTION    2
+#define ACCESSORY_STRING_VERSION        3
+#define ACCESSORY_STRING_URI            4
+#define ACCESSORY_STRING_SERIAL         5
+
+/* Control request for retrieving device's protocol version
+ *
+ *	requestType:    USB_DIR_IN | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_GET_PROTOCOL
+ *	value:          0
+ *	index:          0
+ *	data            version number (16 bits little endian)
+ *                     1 for original accessory support
+ *                     2 adds HID and device to host audio support
+ */
+#define ACCESSORY_GET_PROTOCOL  51
+
+/* Control request for host to send a string to the device
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SEND_STRING
+ *	value:          0
+ *	index:          string ID
+ *	data            zero terminated UTF8 string
+ *
+ *  The device can later retrieve these strings via the
+ *  ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING   52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_START
+ *	value:          0
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_START         53
+
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_REGISTER_HID_DEVICE
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          total length of the HID report descriptor
+ *	data            none
+ */
+#define ACCESSORY_REGISTER_HID         54
+
+/* Control request for unregistering a HID device.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_REGISTER_HID
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_UNREGISTER_HID         55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SET_HID_REPORT_DESC
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          offset of data in descriptor
+ *                      (needed when HID descriptor is too big for one packet)
+ *	data            the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC         56
+
+/* Control request for sending HID events.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SEND_HID_EVENT
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          0
+ *	data            the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT         57
+
+/* Control request for setting the audio mode.
+ *
+ *	requestType:	USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SET_AUDIO_MODE
+ *	value:          0 - no audio
+ *                     1 - device to host, 44100 16-bit stereo PCM
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_SET_AUDIO_MODE         58
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION    _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION        _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI            _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
+
+#endif /* _UAPI_LINUX_USB_F_ACCESSORY_H */
diff --git a/include/uapi/linux/usb/f_mtp.h b/include/uapi/linux/usb/f_mtp.h
new file mode 100644
index 0000000..5032918
--- /dev/null
+++ b/include/uapi/linux/usb/f_mtp.h
@@ -0,0 +1,61 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_MTP_H
+#define _UAPI_LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct mtp_file_range {
+	/* file descriptor for file to transfer */
+	int			fd;
+	/* offset in file for start of transfer */
+	loff_t		offset;
+	/* number of bytes to transfer */
+	int64_t		length;
+	/* MTP command ID for data header,
+	 * used only for MTP_SEND_FILE_WITH_HEADER
+	 */
+	uint16_t	command;
+	/* MTP transaction ID for data header,
+	 * used only for MTP_SEND_FILE_WITH_HEADER
+	 */
+	uint32_t	transaction_id;
+};
+
+struct mtp_event {
+	/* size of the event */
+	size_t		length;
+	/* event data to send */
+	void		*data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE              _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE           _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT             _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, struct mtp_file_range)
+
+#endif /* _UAPI_LINUX_USB_F_MTP_H */
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 602dc6c..165e705 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -57,6 +57,7 @@
 #define MAX_NUM_CODECS 32
 #define MAX_NUM_CODEC_DESCRIPTORS 32
 #define MAX_NUM_BITRATES 32
+#define MAX_NUM_SAMPLE_RATES 32
 
 /* Codecs are listed linearly to allow for extensibility */
 #define SND_AUDIOCODEC_PCM                   ((__u32) 0x00000001)
@@ -324,7 +325,8 @@
 
 /** struct snd_codec_desc - description of codec capabilities
  * @max_ch: Maximum number of audio channels
- * @sample_rates: Sampling rates in Hz, use SNDRV_PCM_RATE_xxx for this
+ * @sample_rates: Sampling rates in Hz, use values like 48000 for this
+ * @num_sample_rates: Number of valid values in sample_rates array
  * @bit_rate: Indexed array containing supported bit rates
  * @num_bitrates: Number of valid values in bit_rate array
  * @rate_control: value is specified by SND_RATECONTROLMODE defines.
@@ -346,7 +348,8 @@
 
 struct snd_codec_desc {
 	__u32 max_ch;
-	__u32 sample_rates;
+	__u32 sample_rates[MAX_NUM_SAMPLE_RATES];
+	__u32 num_sample_rates;
 	__u32 bit_rate[MAX_NUM_BITRATES];
 	__u32 num_bitrates;
 	__u32 rate_control;
@@ -364,7 +367,8 @@
  * @ch_out: Number of output channels. In case of contradiction between
  *		this field and the channelMode field, the channelMode field
  *		overrides.
- * @sample_rate: Audio sample rate of input data
+ * @sample_rate: Audio sample rate of input data in Hz, use values like 48000
+ *		for this.
  * @bit_rate: Bitrate of encoded data. May be ignored by decoders
  * @rate_control: Encoding rate control. See SND_RATECONTROLMODE defines.
  *               Encoders may rely on profiles for quality levels.
diff --git a/include/uapi/sound/effect_offload.h b/include/uapi/sound/effect_offload.h
new file mode 100644
index 0000000..9c30031
--- /dev/null
+++ b/include/uapi/sound/effect_offload.h
@@ -0,0 +1,62 @@
+/*
+ *  effect_offload.h - effect offload header definations
+ *
+ *  Copyright (C) 2013 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef __EFFECT_OFFLOAD_H
+#define __EFFECT_OFFLOAD_H
+
+#include <linux/types.h>
+
+#define SNDRV_EFFECT_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 0)
+
+struct snd_effect {
+	char uuid[16];  /* effect UUID */
+	int device;	/* streaming interface for effect insertion */
+	int pos;	/* position of effect to be placed in effect chain */
+	int mode;	/* Backend for Global device (Headset/Speaker) */
+};
+
+struct snd_effect_params {
+	char uuid[16];
+	int device;
+	u32 size;	/* size of parameter blob */
+	char *buffer;
+};
+
+struct snd_effect_caps {
+	u32 size;	/* size of buffer to read effect descriptors */
+	char *buffer;
+};
+
+#define SNDRV_CTL_IOCTL_EFFECT_VERSION		_IOR('E', 0x00, int)
+#define SNDRV_CTL_IOCTL_EFFECT_CREATE		_IOW('E', 0x01,\
+						struct snd_effect *)
+#define SNDRV_CTL_IOCTL_EFFECT_DESTROY		_IOW('E', 0x02,\
+						struct snd_effect *)
+#define SNDRV_CTL_IOCTL_EFFECT_SET_PARAMS	_IOW('E', 0x03,\
+						struct snd_effect_params *)
+#define SNDRV_CTL_IOCTL_EFFECT_GET_PARAMS	_IOWR('E', 0x04,\
+						struct snd_effect_params *)
+#define SNDRV_CTL_IOCTL_EFFECT_QUERY_NUM	_IOR('E', 0x05, int)
+#define SNDRV_CTL_IOCTL_EFFECT_QUERY_CAPS	_IOWR('E', 0x06,\
+						struct snd_effect_caps *)
+#endif
diff --git a/include/uapi/video/adf.h b/include/uapi/video/adf.h
new file mode 100644
index 0000000..c5d2e62
--- /dev/null
+++ b/include/uapi/video/adf.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_VIDEO_ADF_H_
+#define _UAPI_VIDEO_ADF_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_mode.h>
+
+#define ADF_NAME_LEN 32
+#define ADF_MAX_CUSTOM_DATA_SIZE 4096
+
+enum adf_interface_type {
+	ADF_INTF_DSI = 0,
+	ADF_INTF_eDP = 1,
+	ADF_INTF_DPI = 2,
+	ADF_INTF_VGA = 3,
+	ADF_INTF_DVI = 4,
+	ADF_INTF_HDMI = 5,
+	ADF_INTF_MEMORY = 6,
+	ADF_INTF_TYPE_DEVICE_CUSTOM = 128,
+	ADF_INTF_TYPE_MAX = (~(__u32)0),
+};
+
+#define ADF_INTF_FLAG_PRIMARY (1 << 0)
+#define ADF_INTF_FLAG_EXTERNAL (1 << 1)
+
+enum adf_event_type {
+	ADF_EVENT_VSYNC = 0,
+	ADF_EVENT_HOTPLUG = 1,
+	ADF_EVENT_DEVICE_CUSTOM = 128,
+	ADF_EVENT_TYPE_MAX = 255,
+};
+
+/**
+ * struct adf_set_event - start or stop subscribing to ADF events
+ *
+ * @type: the type of event to (un)subscribe
+ * @enabled: subscribe or unsubscribe
+ *
+ * After subscribing to an event, userspace may poll() the ADF object's fd
+ * to wait for events or read() to consume the event's data.
+ *
+ * ADF reserves event types 0 to %ADF_EVENT_DEVICE_CUSTOM-1 for its own events.
+ * Devices may use event types %ADF_EVENT_DEVICE_CUSTOM to %ADF_EVENT_TYPE_MAX-1
+ * for driver-private events.
+ */
+struct adf_set_event {
+	__u8 type;
+	__u8 enabled;
+};
+
+/**
+ * struct adf_event - common header for ADF event data
+ *
+ * @type: event type
+ * @length: total size of event data, header inclusive
+ */
+struct adf_event {
+	__u8 type;
+	__u32 length;
+};
+
+/**
+ * struct adf_vsync_event - ADF vsync event
+ *
+ * @base: event header (see &struct adf_event)
+ * @timestamp: time of vsync event, in nanoseconds
+ */
+struct adf_vsync_event {
+	struct adf_event base;
+	__aligned_u64 timestamp;
+};
+
+/**
+ * struct adf_vsync_event - ADF display hotplug event
+ *
+ * @base: event header (see &struct adf_event)
+ * @connected: whether a display is now connected to the interface
+ */
+struct adf_hotplug_event {
+	struct adf_event base;
+	__u8 connected;
+};
+
+#define ADF_MAX_PLANES 4
+/**
+ * struct adf_buffer_config - description of buffer displayed by adf_post_config
+ *
+ * @overlay_engine: id of the target overlay engine
+ * @w: width of display region in pixels
+ * @h: height of display region in pixels
+ * @format: DRM-style fourcc, see drm_fourcc.h for standard formats
+ * @fd: dma_buf fd for each plane
+ * @offset: location of first pixel to scan out, in bytes
+ * @pitch: stride (i.e. length of a scanline including padding) in bytes
+ * @n_planes: number of planes in buffer
+ * @acquire_fence: sync_fence fd which will clear when the buffer is
+ *	ready for display, or <0 if the buffer is already ready
+ */
+struct adf_buffer_config {
+	__u32 overlay_engine;
+
+	__u32 w;
+	__u32 h;
+	__u32 format;
+
+	__s32 fd[ADF_MAX_PLANES];
+	__u32 offset[ADF_MAX_PLANES];
+	__u32 pitch[ADF_MAX_PLANES];
+	__u8 n_planes;
+
+	__s32 acquire_fence;
+};
+#define ADF_MAX_BUFFERS (4096 / sizeof(struct adf_buffer_config))
+
+/**
+ * struct adf_post_config - request to flip to a new set of buffers
+ *
+ * @n_interfaces: number of interfaces targeted by the flip (input)
+ * @interfaces: ids of interfaces targeted by the flip (input)
+ * @n_bufs: number of buffers displayed (input)
+ * @bufs: description of buffers displayed (input)
+ * @custom_data_size: size of driver-private data (input)
+ * @custom_data: driver-private data (input)
+ * @complete_fence: sync_fence fd which will clear when this
+ *	configuration has left the screen (output)
+ */
+struct adf_post_config {
+	size_t n_interfaces;
+	__u32 __user *interfaces;
+
+	size_t n_bufs;
+	struct adf_buffer_config __user *bufs;
+
+	size_t custom_data_size;
+	void __user *custom_data;
+
+	__s32 complete_fence;
+};
+#define ADF_MAX_INTERFACES (4096 / sizeof(__u32))
+
+/**
+ * struct adf_simple_buffer_allocate - request to allocate a "simple" buffer
+ *
+ * @w: width of buffer in pixels (input)
+ * @h: height of buffer in pixels (input)
+ * @format: DRM-style fourcc (input)
+ *
+ * @fd: dma_buf fd (output)
+ * @offset: location of first pixel, in bytes (output)
+ * @pitch: length of a scanline including padding, in bytes (output)
+ *
+ * Simple buffers are analogous to DRM's "dumb" buffers.  They have a single
+ * plane of linear RGB data which can be allocated and scanned out without
+ * any driver-private ioctls or data.
+ *
+ * @format must be a standard RGB format defined in drm_fourcc.h.
+ *
+ * ADF clients must NOT assume that an interface can scan out a simple buffer
+ * allocated by a different ADF interface, even if the two interfaces belong to
+ * the same ADF device.
+ */
+struct adf_simple_buffer_alloc {
+	__u16 w;
+	__u16 h;
+	__u32 format;
+
+	__s32 fd;
+	__u32 offset;
+	__u32 pitch;
+};
+
+/**
+ * struct adf_simple_post_config - request to flip to a single buffer without
+ * driver-private data
+ *
+ * @buf: description of buffer displayed (input)
+ * @complete_fence: sync_fence fd which will clear when this buffer has left the
+ * screen (output)
+ */
+struct adf_simple_post_config {
+	struct adf_buffer_config buf;
+	__s32 complete_fence;
+};
+
+/**
+ * struct adf_attachment_config - description of attachment between an overlay
+ * engine and an interface
+ *
+ * @overlay_engine: id of the overlay engine
+ * @interface: id of the interface
+ */
+struct adf_attachment_config {
+	__u32 overlay_engine;
+	__u32 interface;
+};
+
+/**
+ * struct adf_device_data - describes a display device
+ *
+ * @name: display device's name
+ * @n_attachments: the number of current attachments
+ * @attachments: list of current attachments
+ * @n_allowed_attachments: the number of allowed attachments
+ * @allowed_attachments: list of allowed attachments
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_device_data {
+	char name[ADF_NAME_LEN];
+
+	size_t n_attachments;
+	struct adf_attachment_config __user *attachments;
+
+	size_t n_allowed_attachments;
+	struct adf_attachment_config __user *allowed_attachments;
+
+	size_t custom_data_size;
+	void __user *custom_data;
+};
+#define ADF_MAX_ATTACHMENTS (4096 / sizeof(struct adf_attachment_config))
+
+/**
+ * struct adf_device_data - describes a display interface
+ *
+ * @name: display interface's name
+ * @type: interface type (see enum @adf_interface_type)
+ * @id: which interface of type @type;
+ *	e.g. interface DSI.1 -> @type=@ADF_INTF_TYPE_DSI, @id=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @dpms_state: DPMS state (one of @DRM_MODE_DPMS_* defined in drm_mode.h)
+ * @hotplug_detect: whether a display is plugged in
+ * @width_mm: screen width in millimeters, or 0 if unknown
+ * @height_mm: screen height in millimeters, or 0 if unknown
+ * @current_mode: current display mode
+ * @n_available_modes: the number of hardware display modes
+ * @available_modes: list of hardware display modes
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_interface_data {
+	char name[ADF_NAME_LEN];
+
+	__u32 type;
+	__u32 id;
+	/* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+	__u32 flags;
+
+	__u8 dpms_state;
+	__u8 hotplug_detect;
+	__u16 width_mm;
+	__u16 height_mm;
+
+	struct drm_mode_modeinfo current_mode;
+	size_t n_available_modes;
+	struct drm_mode_modeinfo __user *available_modes;
+
+	size_t custom_data_size;
+	void __user *custom_data;
+};
+#define ADF_MAX_MODES (4096 / sizeof(struct drm_mode_modeinfo))
+
+/**
+ * struct adf_overlay_engine_data - describes an overlay engine
+ *
+ * @name: overlay engine's name
+ * @n_supported_formats: number of supported formats
+ * @supported_formats: list of supported formats
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_overlay_engine_data {
+	char name[ADF_NAME_LEN];
+
+	size_t n_supported_formats;
+	__u32 __user *supported_formats;
+
+	size_t custom_data_size;
+	void __user *custom_data;
+};
+#define ADF_MAX_SUPPORTED_FORMATS (4096 / sizeof(__u32))
+
+#define ADF_IOCTL_TYPE		'D'
+#define ADF_IOCTL_NR_CUSTOM	128
+
+#define ADF_SET_EVENT		_IOW(ADF_IOCTL_TYPE, 0, struct adf_set_event)
+#define ADF_BLANK		_IOW(ADF_IOCTL_TYPE, 1, __u8)
+#define ADF_POST_CONFIG		_IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config)
+#define ADF_SET_MODE		_IOW(ADF_IOCTL_TYPE, 3, \
+					struct drm_mode_modeinfo)
+#define ADF_GET_DEVICE_DATA	_IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data)
+#define ADF_GET_INTERFACE_DATA	_IOR(ADF_IOCTL_TYPE, 5, \
+					struct adf_interface_data)
+#define ADF_GET_OVERLAY_ENGINE_DATA \
+				_IOR(ADF_IOCTL_TYPE, 6, \
+					struct adf_overlay_engine_data)
+#define ADF_SIMPLE_POST_CONFIG	_IOW(ADF_IOCTL_TYPE, 7, \
+					struct adf_simple_post_config)
+#define ADF_SIMPLE_BUFFER_ALLOC	_IOW(ADF_IOCTL_TYPE, 8, \
+					struct adf_simple_buffer_alloc)
+#define ADF_ATTACH		_IOW(ADF_IOCTL_TYPE, 9, \
+					struct adf_attachment_config)
+#define ADF_DETACH		_IOW(ADF_IOCTL_TYPE, 10, \
+					struct adf_attachment_config)
+
+#endif /* _UAPI_VIDEO_ADF_H_ */
diff --git a/include/video/adf.h b/include/video/adf.h
new file mode 100644
index 0000000..34f10e5
--- /dev/null
+++ b/include/video/adf.h
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_H
+#define _VIDEO_ADF_H
+
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <uapi/video/adf.h>
+#include "sync.h"
+
+struct adf_obj;
+struct adf_obj_ops;
+struct adf_device;
+struct adf_device_ops;
+struct adf_interface;
+struct adf_interface_ops;
+struct adf_overlay_engine;
+struct adf_overlay_engine_ops;
+
+/**
+ * struct adf_buffer - buffer displayed by adf_post
+ *
+ * @overlay_engine: target overlay engine
+ * @w: width of display region in pixels
+ * @h: height of display region in pixels
+ * @format: DRM-style fourcc, see drm_fourcc.h for standard formats
+ * @dma_bufs: dma_buf for each plane
+ * @offset: location of first pixel to scan out, in bytes
+ * @pitch: length of a scanline including padding, in bytes
+ * @n_planes: number of planes in buffer
+ * @acquire_fence: sync_fence which will clear when the buffer is
+ *	ready for display
+ *
+ * &struct adf_buffer is the in-kernel counterpart to the userspace-facing
+ * &struct adf_buffer_config.
+ */
+struct adf_buffer {
+	struct adf_overlay_engine *overlay_engine;
+
+	u32 w;
+	u32 h;
+	u32 format;
+
+	struct dma_buf *dma_bufs[ADF_MAX_PLANES];
+	u32 offset[ADF_MAX_PLANES];
+	u32 pitch[ADF_MAX_PLANES];
+	u8 n_planes;
+
+	struct sync_fence *acquire_fence;
+};
+
+/**
+ * struct adf_buffer_mapping - state for mapping a &struct adf_buffer into the
+ * display device
+ *
+ * @attachments: dma-buf attachment for each plane
+ * @sg_tables: SG tables for each plane
+ */
+struct adf_buffer_mapping {
+	struct dma_buf_attachment *attachments[ADF_MAX_PLANES];
+	struct sg_table *sg_tables[ADF_MAX_PLANES];
+};
+
+/**
+ * struct adf_post - request to flip to a new set of buffers
+ *
+ * @n_bufs: number of buffers displayed
+ * @bufs: buffers displayed
+ * @mappings: in-device mapping state for each buffer
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ *
+ * &struct adf_post is the in-kernel counterpart to the userspace-facing
+ * &struct adf_post_config.
+ */
+struct adf_post {
+	size_t n_bufs;
+	struct adf_buffer *bufs;
+	struct adf_buffer_mapping *mappings;
+
+	size_t custom_data_size;
+	void *custom_data;
+};
+
+/**
+ * struct adf_attachment - description of attachment between an overlay engine
+ * and an interface
+ *
+ * @overlay_engine: the overlay engine
+ * @interface: the interface
+ *
+ * &struct adf_attachment is the in-kernel counterpart to the userspace-facing
+ * &struct adf_attachment_config.
+ */
+struct adf_attachment {
+	struct adf_overlay_engine *overlay_engine;
+	struct adf_interface *interface;
+};
+
+struct adf_pending_post {
+	struct list_head head;
+	struct adf_post config;
+	void *state;
+};
+
+enum adf_obj_type {
+	ADF_OBJ_OVERLAY_ENGINE = 0,
+	ADF_OBJ_INTERFACE = 1,
+	ADF_OBJ_DEVICE = 2,
+};
+
+/**
+ * struct adf_obj_ops - common ADF object implementation ops
+ *
+ * @open: handle opening the object's device node
+ * @release: handle releasing an open file
+ * @ioctl: handle custom ioctls
+ *
+ * @supports_event: return whether the object supports generating events of type
+ *	@type
+ * @set_event: enable or disable events of type @type
+ * @event_type_str: return a string representation of custom event @type
+ *	(@type >= %ADF_EVENT_DEVICE_CUSTOM).
+ *
+ * @custom_data: copy up to %ADF_MAX_CUSTOM_DATA_SIZE bytes of driver-private
+ *	data into @data (allocated by ADF) and return the number of copied bytes
+ *	in @size.  Return 0 on success or an error code (<0) on failure.
+ */
+struct adf_obj_ops {
+	/* optional */
+	int (*open)(struct adf_obj *obj, struct inode *inode,
+			struct file *file);
+	/* optional */
+	void (*release)(struct adf_obj *obj, struct inode *inode,
+			struct file *file);
+	/* optional */
+	long (*ioctl)(struct adf_obj *obj, unsigned int cmd, unsigned long arg);
+
+	/* optional */
+	bool (*supports_event)(struct adf_obj *obj, enum adf_event_type type);
+	/* required if supports_event is implemented */
+	void (*set_event)(struct adf_obj *obj, enum adf_event_type type,
+			bool enabled);
+	/* optional */
+	const char *(*event_type_str)(struct adf_obj *obj,
+			enum adf_event_type type);
+
+	/* optional */
+	int (*custom_data)(struct adf_obj *obj, void *data, size_t *size);
+};
+
+struct adf_obj {
+	enum adf_obj_type type;
+	char name[ADF_NAME_LEN];
+	struct adf_device *parent;
+
+	const struct adf_obj_ops *ops;
+
+	struct device dev;
+
+	struct spinlock file_lock;
+	struct list_head file_list;
+
+	struct mutex event_lock;
+	struct rb_root event_refcount;
+
+	int id;
+	int minor;
+};
+
+/**
+ * struct adf_device_quirks - common display device quirks
+ *
+ * @buffer_padding: whether the last scanline of a buffer extends to the
+ * 	buffer's pitch (@ADF_BUFFER_PADDED_TO_PITCH) or just to the visible
+ * 	width (@ADF_BUFFER_UNPADDED)
+ */
+struct adf_device_quirks {
+	/* optional, defaults to ADF_BUFFER_PADDED_TO_PITCH */
+	enum {
+		ADF_BUFFER_PADDED_TO_PITCH = 0,
+		ADF_BUFFER_UNPADDED = 1,
+	} buffer_padding;
+};
+
+/**
+ * struct adf_device_ops - display device implementation ops
+ *
+ * @owner: device's module
+ * @base: common operations (see &struct adf_obj_ops)
+ * @quirks: device's quirks (see &struct adf_device_quirks)
+ *
+ * @attach: attach overlay engine @eng to interface @intf.  Return 0 on success
+ *	or error code (<0) on failure.
+ * @detach: detach overlay engine @eng from interface @intf.  Return 0 on
+ *	success or error code (<0) on failure.
+ *
+ * @validate_custom_format: validate the number and size of planes
+ *	in buffers with a custom format (i.e., not one of the @DRM_FORMAT_*
+ *	types defined in drm/drm_fourcc.h).  Return 0 if the buffer is valid or
+ *	an error code (<0) otherwise.
+ *
+ * @validate: validate that the proposed configuration @cfg is legal.  The
+ *	driver may optionally allocate and return some driver-private state in
+ *	@driver_state, which will be passed to the corresponding post().  The
+ *	driver may NOT commit any changes to hardware.  Return 0 if @cfg is
+ *	valid or an error code (<0) otherwise.
+ * @complete_fence: create a hardware-backed sync fence to be signaled when
+ *	@cfg is removed from the screen.  If unimplemented, ADF automatically
+ *	creates an sw_sync fence.  Return the sync fence on success or a
+ *	PTR_ERR() on failure.
+ * @post: flip @cfg onto the screen.  Wait for the display to begin scanning out
+ *	@cfg before returning.
+ * @advance_timeline: signal the sync fence for the last configuration to leave
+ *	the display.  If unimplemented, ADF automatically advances an sw_sync
+ *	timeline.
+ * @state_free: free driver-private state allocated during validate()
+ */
+struct adf_device_ops {
+	/* required */
+	struct module *owner;
+	const struct adf_obj_ops base;
+	/* optional */
+	const struct adf_device_quirks quirks;
+
+	/* optional */
+	int (*attach)(struct adf_device *dev, struct adf_overlay_engine *eng,
+			struct adf_interface *intf);
+	/* optional */
+	int (*detach)(struct adf_device *dev, struct adf_overlay_engine *eng,
+			struct adf_interface *intf);
+
+	/* required if any of the device's overlay engines supports at least one
+	   custom format */
+	int (*validate_custom_format)(struct adf_device *dev,
+			struct adf_buffer *buf);
+
+	/* required */
+	int (*validate)(struct adf_device *dev, struct adf_post *cfg,
+			void **driver_state);
+	/* optional */
+	struct sync_fence *(*complete_fence)(struct adf_device *dev,
+			struct adf_post *cfg, void *driver_state);
+	/* required */
+	void (*post)(struct adf_device *dev, struct adf_post *cfg,
+			void *driver_state);
+	/* required if complete_fence is implemented */
+	void (*advance_timeline)(struct adf_device *dev,
+			struct adf_post *cfg, void *driver_state);
+	/* required if validate allocates driver state */
+	void (*state_free)(struct adf_device *dev, void *driver_state);
+};
+
+struct adf_attachment_list {
+	struct adf_attachment attachment;
+	struct list_head head;
+};
+
+struct adf_device {
+	struct adf_obj base;
+	struct device *dev;
+
+	const struct adf_device_ops *ops;
+
+	struct mutex client_lock;
+
+	struct idr interfaces;
+	size_t n_interfaces;
+	struct idr overlay_engines;
+
+	struct list_head post_list;
+	struct mutex post_lock;
+	struct kthread_worker post_worker;
+	struct task_struct *post_thread;
+	struct kthread_work post_work;
+
+	struct list_head attached;
+	size_t n_attached;
+	struct list_head attach_allowed;
+	size_t n_attach_allowed;
+
+	struct adf_pending_post *onscreen;
+
+	struct sw_sync_timeline *timeline;
+	int timeline_max;
+};
+
+/**
+ * struct adf_interface_ops - display interface implementation ops
+ *
+ * @base: common operations (see &struct adf_obj_ops)
+ *
+ * @blank: change the display's DPMS state.  Return 0 on success or error
+ *	code (<0) on failure.
+ *
+ * @alloc_simple_buffer: allocate a buffer with the specified @w, @h, and
+ *	@format.  @format will be a standard RGB format (i.e.,
+ *	adf_format_is_rgb(@format) == true).  Return 0 on success or error code
+ *	(<0) on failure.  On success, return the buffer, offset, and pitch in
+ *	@dma_buf, @offset, and @pitch respectively.
+ * @describe_simple_post: provide driver-private data needed to post a single
+ *	buffer @buf.  Copy up to ADF_MAX_CUSTOM_DATA_SIZE bytes into @data
+ *	(allocated by ADF) and return the number of bytes in @size.  Return 0 on
+ *	success or error code (<0) on failure.
+ *
+ * @modeset: change the interface's mode.  @mode is not necessarily part of the
+ *	modelist passed to adf_hotplug_notify_connected(); the driver may
+ *	accept or reject custom modes at its discretion.  Return 0 on success or
+ *	error code (<0) if the mode could not be set.
+ *
+ * @screen_size: copy the screen dimensions in millimeters into @width_mm
+ *	and @height_mm.  Return 0 on success or error code (<0) if the display
+ *	dimensions are unknown.
+ *
+ * @type_str: return a string representation of custom @intf->type
+ *	(@intf->type >= @ADF_INTF_TYPE_DEVICE_CUSTOM).
+ */
+struct adf_interface_ops {
+	const struct adf_obj_ops base;
+
+	/* optional */
+	int (*blank)(struct adf_interface *intf, u8 state);
+
+	/* optional */
+	int (*alloc_simple_buffer)(struct adf_interface *intf,
+			u16 w, u16 h, u32 format,
+			struct dma_buf **dma_buf, u32 *offset, u32 *pitch);
+	/* optional */
+	int (*describe_simple_post)(struct adf_interface *intf,
+			struct adf_buffer *fb, void *data, size_t *size);
+
+	/* optional */
+	int (*modeset)(struct adf_interface *intf,
+			struct drm_mode_modeinfo *mode);
+
+	/* optional */
+	int (*screen_size)(struct adf_interface *intf, u16 *width_mm,
+			u16 *height_mm);
+
+	/* optional */
+	const char *(*type_str)(struct adf_interface *intf);
+};
+
+struct adf_interface {
+	struct adf_obj base;
+	const struct adf_interface_ops *ops;
+
+	struct drm_mode_modeinfo current_mode;
+
+	enum adf_interface_type type;
+	u32 idx;
+	u32 flags;
+
+	wait_queue_head_t vsync_wait;
+	ktime_t vsync_timestamp;
+	rwlock_t vsync_lock;
+
+	u8 dpms_state;
+
+	bool hotplug_detect;
+	struct drm_mode_modeinfo *modelist;
+	size_t n_modes;
+	rwlock_t hotplug_modelist_lock;
+};
+
+/**
+ * struct adf_interface_ops - overlay engine implementation ops
+ *
+ * @base: common operations (see &struct adf_obj_ops)
+ *
+ * @supported_formats: list of fourccs the overlay engine can scan out
+ * @n_supported_formats: length of supported_formats, up to
+ *	ADF_MAX_SUPPORTED_FORMATS
+ */
+struct adf_overlay_engine_ops {
+	const struct adf_obj_ops base;
+
+	/* required */
+	const u32 *supported_formats;
+	/* required */
+	const size_t n_supported_formats;
+};
+
+struct adf_overlay_engine {
+	struct adf_obj base;
+
+	const struct adf_overlay_engine_ops *ops;
+};
+
+#define adf_obj_to_device(ptr) \
+	container_of((ptr), struct adf_device, base)
+
+#define adf_obj_to_interface(ptr) \
+	container_of((ptr), struct adf_interface, base)
+
+#define adf_obj_to_overlay_engine(ptr) \
+	container_of((ptr), struct adf_overlay_engine, base)
+
+int __printf(4, 5) adf_device_init(struct adf_device *dev,
+		struct device *parent, const struct adf_device_ops *ops,
+		const char *fmt, ...);
+void adf_device_destroy(struct adf_device *dev);
+int __printf(7, 8) adf_interface_init(struct adf_interface *intf,
+		struct adf_device *dev, enum adf_interface_type type, u32 idx,
+		u32 flags, const struct adf_interface_ops *ops, const char *fmt,
+		...);
+void adf_interface_destroy(struct adf_interface *intf);
+static inline struct adf_device *adf_interface_parent(
+		struct adf_interface *intf)
+{
+	return intf->base.parent;
+}
+int __printf(4, 5) adf_overlay_engine_init(struct adf_overlay_engine *eng,
+		struct adf_device *dev,
+		const struct adf_overlay_engine_ops *ops, const char *fmt, ...);
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng);
+static inline struct adf_device *adf_overlay_engine_parent(
+		struct adf_overlay_engine *eng)
+{
+	return eng->base.parent;
+}
+
+int adf_attachment_allow(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf);
+
+const char *adf_obj_type_str(enum adf_obj_type type);
+const char *adf_interface_type_str(struct adf_interface *intf);
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type);
+
+#define ADF_FORMAT_STR_SIZE 5
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE]);
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+		u8 num_planes, u8 hsub, u8 vsub, u8 cpp[]);
+/**
+ * adf_format_validate_rgb - validate the number and size of planes in buffers
+ * with a custom RGB format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @cpp: expected bytes per pixel
+ *
+ * adf_format_validate_rgb() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.  @buf must have a single RGB plane.
+ *
+ * Returns 0 if @buf has a single plane with sufficient size, or -EINVAL
+ * otherwise.
+ */
+static inline int adf_format_validate_rgb(struct adf_device *dev,
+		struct adf_buffer *buf, u8 cpp)
+{
+	return adf_format_validate_yuv(dev, buf, 1, 1, 1, &cpp);
+}
+
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type);
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type);
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event);
+
+static inline void adf_vsync_get(struct adf_interface *intf)
+{
+	adf_event_get(&intf->base, ADF_EVENT_VSYNC);
+}
+
+static inline void adf_vsync_put(struct adf_interface *intf)
+{
+	adf_event_put(&intf->base, ADF_EVENT_VSYNC);
+}
+
+int adf_vsync_wait(struct adf_interface *intf, long timeout);
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp);
+
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+		struct drm_mode_modeinfo *modelist, size_t n_modes);
+void adf_hotplug_notify_disconnected(struct adf_interface *intf);
+
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode);
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode);
+
+#endif /* _VIDEO_ADF_H */
diff --git a/include/video/adf_client.h b/include/video/adf_client.h
new file mode 100644
index 0000000..983f2b6
--- /dev/null
+++ b/include/video/adf_client.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_CLIENT_H_
+#define _VIDEO_ADF_CLIENT_H_
+
+#include <video/adf.h>
+
+int adf_interface_blank(struct adf_interface *intf, u8 state);
+u8 adf_interface_dpms_state(struct adf_interface *intf);
+
+void adf_interface_current_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo *mode);
+size_t adf_interface_modelist(struct adf_interface *intf,
+		struct drm_mode_modeinfo *modelist, size_t n_modes);
+int adf_interface_set_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo *mode);
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width,
+		u16 *height);
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+		u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch);
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+		struct adf_buffer *buf);
+
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+		u32 format);
+
+size_t adf_device_attachments(struct adf_device *dev,
+		struct adf_attachment *attachments, size_t n_attachments);
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+		struct adf_attachment *attachments, size_t n_attachments);
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf);
+bool adf_device_attach_allowed(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf);
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf);
+
+struct sync_fence *adf_device_post(struct adf_device *dev,
+		struct adf_interface **intfs, size_t n_intfs,
+		struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+		size_t custom_data_size);
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+		struct adf_interface **intfs, size_t n_intfs,
+		struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+		size_t custom_data_size);
+
+#endif /* _VIDEO_ADF_CLIENT_H_ */
diff --git a/include/video/adf_fbdev.h b/include/video/adf_fbdev.h
new file mode 100644
index 0000000..b722c6b
--- /dev/null
+++ b/include/video/adf_fbdev.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_FBDEV_H_
+#define _VIDEO_ADF_FBDEV_H_
+
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <video/adf.h>
+
+struct adf_fbdev {
+	struct adf_interface *intf;
+	struct adf_overlay_engine *eng;
+	struct fb_info *info;
+	u32 pseudo_palette[16];
+
+	unsigned int refcount;
+	struct mutex refcount_lock;
+
+	struct dma_buf *dma_buf;
+	u32 offset;
+	u32 pitch;
+	void *vaddr;
+	u32 format;
+
+	u16 default_xres_virtual;
+	u16 default_yres_virtual;
+	u32 default_format;
+};
+
+#if IS_ENABLED(CONFIG_ADF_FBDEV)
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+		struct fb_videomode *vmode);
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+		struct drm_mode_modeinfo *mode);
+
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+		struct adf_overlay_engine *eng,
+		u16 xres_virtual, u16 yres_virtual, u32 format,
+		struct fb_ops *fbops, const char *fmt, ...);
+void adf_fbdev_destroy(struct adf_fbdev *fbdev);
+
+int adf_fbdev_open(struct fb_info *info, int user);
+int adf_fbdev_release(struct fb_info *info, int user);
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
+int adf_fbdev_set_par(struct fb_info *info);
+int adf_fbdev_blank(int blank, struct fb_info *info);
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info);
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
+#else
+static inline void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+		struct fb_videomode *vmode)
+{
+	WARN_ONCE(1, "%s: CONFIG_ADF_FBDEV is disabled\n", __func__);
+}
+
+static inline void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+		struct drm_mode_modeinfo *mode)
+{
+	WARN_ONCE(1, "%s: CONFIG_ADF_FBDEV is disabled\n", __func__);
+}
+
+static inline int adf_fbdev_init(struct adf_fbdev *fbdev,
+		struct adf_interface *interface,
+		struct adf_overlay_engine *eng,
+		u16 xres_virtual, u16 yres_virtual, u32 format,
+		struct fb_ops *fbops, const char *fmt, ...)
+{
+	return -ENODEV;
+}
+
+static inline void adf_fbdev_destroy(struct adf_fbdev *fbdev) { }
+
+static inline int adf_fbdev_open(struct fb_info *info, int user)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_release(struct fb_info *info, int user)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_check_var(struct fb_var_screeninfo *var,
+		struct fb_info *info)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_set_par(struct fb_info *info)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_pan_display(struct fb_var_screeninfo *var,
+		struct fb_info *info)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_mmap(struct fb_info *info,
+		struct vm_area_struct *vma)
+{
+	return -ENODEV;
+}
+#endif
+
+#endif /* _VIDEO_ADF_FBDEV_H_ */
diff --git a/include/video/adf_format.h b/include/video/adf_format.h
new file mode 100644
index 0000000..e03182c
--- /dev/null
+++ b/include/video/adf_format.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_FORMAT_H
+#define _VIDEO_ADF_FORMAT_H
+
+bool adf_format_is_standard(u32 format);
+bool adf_format_is_rgb(u32 format);
+u8 adf_format_num_planes(u32 format);
+u8 adf_format_bpp(u32 format);
+u8 adf_format_plane_cpp(u32 format, int plane);
+u8 adf_format_horz_chroma_subsampling(u32 format);
+u8 adf_format_vert_chroma_subsampling(u32 format);
+
+#endif /* _VIDEO_ADF_FORMAT_H */
diff --git a/include/video/adf_memblock.h b/include/video/adf_memblock.h
new file mode 100644
index 0000000..6256e0e
--- /dev/null
+++ b/include/video/adf_memblock.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_MEMBLOCK_H_
+#define _VIDEO_ADF_MEMBLOCK_H_
+
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags);
+
+#endif /* _VIDEO_ADF_MEMBLOCK_H_ */
diff --git a/init/Kconfig b/init/Kconfig
index 8fa4f75..4d9465f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1251,6 +1251,12 @@
 config HAVE_PCSPKR_PLATFORM
 	bool
 
+config PANIC_TIMEOUT
+	int "Default panic timeout"
+	default 0
+	help
+	  Set default panic timeout.
+
 menuconfig EXPERT
 	bool "Configure standard kernel features (expert users)"
 	# Unhide debug options, to make the on-by-default options visible
@@ -1261,6 +1267,31 @@
           environments which can tolerate a "non-standard" kernel.
           Only use this if you really know what you are doing.
 
+config UPTIME_LIMITED_KERNEL
+	bool "Create a kernel with uptime limitations"
+	default n
+	help
+	  Limit the amount of time a kernel can run. The associated UPTIME_LIMIT*
+	  kernel config options should be used to tune the behaviour.
+
+config UPTIME_LIMIT_DURATION
+	int "Kernel uptime limit in minutes"
+	depends on UPTIME_LIMITED_KERNEL
+	range 0 14400
+	default 0
+	help
+	  Define the uptime limitation on a kernel in minutes. Once
+	  the defined time expires the kernel will emit a warning, cease
+	  to be usable and eventually restart. The valid range is 0 (disable)
+	  to 14400 (10 days)
+
+config UPTIME_LIMIT_KERNEL_REBOOT
+	bool "Reboot a time limited kernel at expiration"
+	depends on UPTIME_LIMITED_KERNEL
+	default y
+	help
+	  Reboot an uptime limited kernel at expiration.
+
 config UID16
 	bool "Enable 16-bit UID system calls" if EXPERT
 	depends on HAVE_UID16
diff --git a/init/Makefile b/init/Makefile
index 7bc47ee..692b91f 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -3,11 +3,8 @@
 #
 
 obj-y                          := main.o version.o mounts.o
-ifneq ($(CONFIG_BLK_DEV_INITRD),y)
 obj-y                          += noinitramfs.o
-else
 obj-$(CONFIG_BLK_DEV_INITRD)   += initramfs.o
-endif
 obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
 
 ifneq ($(CONFIG_ARCH_INIT_TASK),y)
diff --git a/init/do_mounts.c b/init/do_mounts.c
index a2b49f2..4b4936d 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -412,7 +412,9 @@
 		printk("DEBUG_BLOCK_EXT_DEVT is enabled, you need to specify "
 		       "explicit textual name for \"root=\" boot option.\n");
 #endif
-		panic("VFS: Unable to mount root fs on %s", b);
+		printk(KERN_EMERG "VFS: Unable to mount root fs on %s\n", b);
+		printk(KERN_EMERG "User configuration error - no valid root filesystem found\n");
+		panic("Invalid configuration from end user prevents continuing");
 	}
 
 	printk("List of all partitions:\n");
@@ -424,7 +426,9 @@
 #ifdef CONFIG_BLOCK
 	__bdevname(ROOT_DEV, b);
 #endif
-	panic("VFS: Unable to mount root fs on %s", b);
+	printk(KERN_EMERG "VFS: Unable to mount root fs on %s\n", b);
+	printk(KERN_EMERG "User configuration error - no valid root filesystem found\n");
+	panic("Invalid configuration from end user prevents continuing");
 out:
 	put_page(page);
 }
diff --git a/init/initramfs.c b/init/initramfs.c
index a67ef9d..43ae0cc 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -18,6 +18,7 @@
 #include <linux/dirent.h>
 #include <linux/syscalls.h>
 #include <linux/utime.h>
+#include <linux/initramfs.h>
 
 static __initdata char *message;
 static void __init error(char *x)
@@ -579,9 +580,25 @@
 }
 #endif
 
+static int __initdata do_skip_initramfs;
+
+static int __init skip_initramfs_param(char *str)
+{
+	if (*str)
+		return 0;
+	do_skip_initramfs = 1;
+	return 1;
+}
+__setup("skip_initramfs", skip_initramfs_param);
+
 static int __init populate_rootfs(void)
 {
-	char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
+	char *err;
+
+	if (do_skip_initramfs)
+		return default_rootfs();
+
+	err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
 	if (err)
 		panic(err);	/* Failed to decompress INTERNAL initramfs */
 	if (initrd_start) {
diff --git a/init/main.c b/init/main.c
index 2132ffd..a881535 100644
--- a/init/main.c
+++ b/init/main.c
@@ -669,11 +669,13 @@
 	int ret;
 
 	pr_debug("calling  %pF @ %i\n", fn, task_pid_nr(current));
+
 	calltime = ktime_get();
 	ret = fn();
 	rettime = ktime_get();
 	delta = ktime_sub(rettime, calltime);
 	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
 	pr_debug("initcall %pF returned %d after %lld usecs\n",
 		 fn, ret, duration);
 
@@ -856,6 +858,7 @@
 
 static noinline void __init kernel_init_freeable(void)
 {
+	struct stat console_stat;
 	/*
 	 * Wait until kthreadd is all set-up.
 	 */
@@ -885,6 +888,12 @@
 
 	do_basic_setup();
 
+	/* Use /dev/console to infer if the rootfs is setup properly */
+	if (sys_newlstat((char __user *) "/dev/console", (struct stat __user *) &console_stat)
+			|| !S_ISCHR(console_stat.st_mode)) {
+		panic("/dev/console is missing or not a character device!\nPlease ensure your rootfs is properly configured\n");
+	}
+
 	/* Open the /dev/console on the rootfs, this should never fail */
 	if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
 		pr_err("Warning: unable to open an initial console.\n");
diff --git a/init/noinitramfs.c b/init/noinitramfs.c
index 267739d..bcc8bcb0 100644
--- a/init/noinitramfs.c
+++ b/init/noinitramfs.c
@@ -21,11 +21,16 @@
 #include <linux/stat.h>
 #include <linux/kdev_t.h>
 #include <linux/syscalls.h>
+#include <linux/kconfig.h>
+#include <linux/initramfs.h>
 
 /*
  * Create a simple rootfs that is similar to the default initramfs
  */
-static int __init default_rootfs(void)
+#if !IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
+static
+#endif
+int __init default_rootfs(void)
 {
 	int err;
 
@@ -49,4 +54,6 @@
 	printk(KERN_WARNING "Failed to create a rootfs\n");
 	return err;
 }
+#if !IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
 rootfs_initcall(default_rootfs);
+#endif
diff --git a/kernel/Makefile b/kernel/Makefile
index 271fd31..5d7a07f 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -31,6 +31,7 @@
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
 obj-y += time/
+obj-$(CONFIG_UPTIME_LIMITED_KERNEL) += uptime_limit.o
 obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
 obj-$(CONFIG_LOCKDEP) += lockdep.o
 ifeq ($(CONFIG_PROC_FS),y)
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 6bd4a90..358dd23 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -423,7 +423,7 @@
 		f->lsm_rule = NULL;
 
 		/* Support legacy tests for a valid loginuid */
-		if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
+		if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295UL)) {
 			f->type = AUDIT_LOGINUID_SET;
 			f->val = 0;
 		}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ef13060..fcf0aef 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2106,6 +2106,43 @@
 	return retval;
 }
 
+static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+	struct cgroup_subsys *ss;
+	int ret;
+
+	for_each_subsys(cgrp->root, ss) {
+		if (ss->allow_attach) {
+			ret = ss->allow_attach(cgrp, tset);
+			if (ret)
+				return ret;
+		} else {
+			return -EACCES;
+		}
+	}
+
+	return 0;
+}
+
+int subsys_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+	const struct cred *cred = current_cred(), *tcred;
+	struct task_struct *task;
+
+	if (capable(CAP_SYS_NICE))
+		return 0;
+
+	cgroup_taskset_for_each(task, cgrp, tset) {
+		tcred = __task_cred(task);
+
+		if (current != task && cred->euid != tcred->uid &&
+		    cred->euid != tcred->suid)
+			return -EACCES;
+	}
+
+	return 0;
+}
+
 /*
  * Find the task_struct of the task to attach by vpid and pass it along to the
  * function to attach either it or all tasks in its threadgroup. Will lock
@@ -2137,9 +2174,18 @@
 		if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 		    !uid_eq(cred->euid, tcred->uid) &&
 		    !uid_eq(cred->euid, tcred->suid)) {
-			rcu_read_unlock();
-			ret = -EACCES;
-			goto out_unlock_cgroup;
+			/*
+			 * if the default permission check fails, give each
+			 * cgroup a chance to extend the permission check
+			 */
+			struct cgroup_taskset tset = { };
+			tset.single.task = tsk;
+			tset.single.cgrp = cgrp;
+			ret = cgroup_allow_attach(cgrp, &tset);
+			if (ret) {
+				rcu_read_unlock();
+				goto out_unlock_cgroup;
+			}
 		}
 	} else
 		tsk = current;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index bc255e2..7d47556 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -728,3 +728,23 @@
 {
 	cpumask_copy(to_cpumask(cpu_online_bits), src);
 }
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+	atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+	atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+	atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 0506d44..2d4438b 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -86,6 +86,10 @@
 bool dbg_is_early = true;
 /* Next cpu to become the master debug core */
 int dbg_switch_cpu;
+/* Flag for entering kdb when a panic occurs */
+static bool break_on_panic = true;
+/* Flag for entering kdb when an exception occurs */
+static bool break_on_exception = true;
 
 /* Use kdb or gdbserver mode */
 int dbg_kdb_mode = 1;
@@ -100,6 +104,8 @@
 
 module_param(kgdb_use_con, int, 0644);
 module_param(kgdbreboot, int, 0644);
+module_param(break_on_panic, bool, 0644);
+module_param(break_on_exception, bool, 0644);
 
 /*
  * Holds information about breakpoints in a kernel. These breakpoints are
@@ -678,6 +684,9 @@
 	if (arch_kgdb_ops.enable_nmi)
 		arch_kgdb_ops.enable_nmi(0);
 
+	if (unlikely(signo != SIGTRAP && !break_on_exception))
+		return 1;
+
 	ks->cpu			= raw_smp_processor_id();
 	ks->ex_vector		= evector;
 	ks->signo		= signo;
@@ -784,6 +793,9 @@
 			    unsigned long val,
 			    void *data)
 {
+	if (!break_on_panic)
+		return NOTIFY_DONE;
+
 	if (dbg_kdb_mode)
 		kdb_printf("PANIC: %s\n", (char *)data);
 	kgdb_breakpoint();
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 14ff484..4b0fb2f 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -216,7 +216,7 @@
 	int i;
 	int diag, dtab_count;
 	int key;
-
+	static int last_crlf;
 
 	diag = kdbgetintenv("DTABCOUNT", &dtab_count);
 	if (diag)
@@ -237,6 +237,9 @@
 		return buffer;
 	if (key != 9)
 		tab = 0;
+	if (key != 10 && key != 13)
+		last_crlf = 0;
+
 	switch (key) {
 	case 8: /* backspace */
 		if (cp > buffer) {
@@ -254,7 +257,12 @@
 			*cp = tmp;
 		}
 		break;
-	case 13: /* enter */
+	case 10: /* new line */
+	case 13: /* carriage return */
+		/* handle \n after \r */
+		if (last_crlf && last_crlf != key)
+			break;
+		last_crlf = key;
 		*lastchar++ = '\n';
 		*lastchar++ = '\0';
 		if (!KDB_STATE(KGDB_TRANS)) {
diff --git a/kernel/exit.c b/kernel/exit.c
index 717efbd..33fde71 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -841,7 +841,7 @@
 	/*
 	 * Make sure we are holding no locks:
 	 */
-	debug_check_no_locks_held(tsk);
+	debug_check_no_locks_held();
 	/*
 	 * We can do this unlocked here. The futex code uses this flag
 	 * just to verify whether the pi state cleanup has been done
diff --git a/kernel/fork.c b/kernel/fork.c
index 612e78d..a21335b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -198,6 +198,9 @@
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
+/* Notifier list called when a task struct is freed */
+static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
+
 static void account_kernel_stack(struct thread_info *ti, int account)
 {
 	struct zone *zone = page_zone(virt_to_page(ti));
@@ -231,6 +234,18 @@
 		free_signal_struct(sig);
 }
 
+int task_free_register(struct notifier_block *n)
+{
+	return atomic_notifier_chain_register(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_register);
+
+int task_free_unregister(struct notifier_block *n)
+{
+	return atomic_notifier_chain_unregister(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_unregister);
+
 void __put_task_struct(struct task_struct *tsk)
 {
 	WARN_ON(!tsk->exit_state);
@@ -242,6 +257,7 @@
 	delayacct_tsk_free(tsk);
 	put_signal_struct(tsk->signal);
 
+	atomic_notifier_call_chain(&task_free_notifier, 0, tsk);
 	if (!profile_handoff_task(tsk))
 		free_task(tsk);
 }
@@ -311,6 +327,15 @@
 		goto free_ti;
 
 	tsk->stack = ti;
+#ifdef CONFIG_SECCOMP
+	/*
+	 * We must handle setting up seccomp filters once we're under
+	 * the sighand lock in case orig has changed between now and
+	 * then. Until then, filter must be NULL to avoid messing up
+	 * the usage counts on the error path calling free_task.
+	 */
+	tsk->seccomp.filter = NULL;
+#endif
 
 	setup_thread_stack(tsk, orig);
 	clear_user_return_notifier(tsk);
@@ -418,6 +443,8 @@
 			struct address_space *mapping = file->f_mapping;
 
 			get_file(file);
+			if (tmp->vm_prfile)
+				get_file(tmp->vm_prfile);
 			if (tmp->vm_flags & VM_DENYWRITE)
 				atomic_dec(&inode->i_writecount);
 			mutex_lock(&mapping->i_mmap_mutex);
@@ -697,7 +724,8 @@
 
 	mm = get_task_mm(task);
 	if (mm && mm != current->mm &&
-			!ptrace_may_access(task, mode)) {
+			!ptrace_may_access(task, mode) &&
+			!capable(CAP_SYS_RESOURCE)) {
 		mmput(mm);
 		mm = ERR_PTR(-EACCES);
 	}
@@ -1089,6 +1117,39 @@
 	p->flags = new_flags;
 }
 
+static void copy_seccomp(struct task_struct *p)
+{
+#ifdef CONFIG_SECCOMP
+	/*
+	 * Must be called with sighand->lock held, which is common to
+	 * all threads in the group. Holding cred_guard_mutex is not
+	 * needed because this new task is not yet running and cannot
+	 * be racing exec.
+	 */
+	assert_spin_locked(&current->sighand->siglock);
+
+	/* Ref-count the new filter user, and assign it. */
+	get_seccomp_filter(current);
+	p->seccomp = current->seccomp;
+
+	/*
+	 * Explicitly enable no_new_privs here in case it got set
+	 * between the task_struct being duplicated and holding the
+	 * sighand lock. The seccomp state and nnp must be in sync.
+	 */
+	if (task_no_new_privs(current))
+		task_set_no_new_privs(p);
+
+	/*
+	 * If the parent gained a seccomp mode after copying thread
+	 * flags and between before we held the sighand lock, we have
+	 * to manually enable the seccomp thread flag here.
+	 */
+	if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
+		set_tsk_thread_flag(p, TIF_SECCOMP);
+#endif
+}
+
 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
 {
 	current->clear_child_tid = tidptr;
@@ -1193,7 +1254,6 @@
 		goto fork_out;
 
 	ftrace_graph_init_task(p);
-	get_seccomp_filter(p);
 
 	rt_mutex_init_task(p);
 
@@ -1239,6 +1299,7 @@
 
 	p->utime = p->stime = p->gtime = 0;
 	p->utimescaled = p->stimescaled = 0;
+	p->cpu_power = 0;
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 	p->prev_cputime.utime = p->prev_cputime.stime = 0;
 #endif
@@ -1436,6 +1497,12 @@
 	spin_lock(&current->sighand->siglock);
 
 	/*
+	 * Copy seccomp details explicitly here, in case they were changed
+	 * before holding sighand lock.
+	 */
+	copy_seccomp(p);
+
+	/*
 	 * Process group and session signals need to be delivered to just the
 	 * parent before the fork or both the parent and the child after the
 	 * fork. Restart if a signal comes in before we add the new process to
diff --git a/kernel/freezer.c b/kernel/freezer.c
index bd733f6..4ada72f 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -119,6 +119,18 @@
 {
 	unsigned long flags;
 
+	/*
+	 * This check can race with freezer_do_not_count, but worst case that
+	 * will result in an extra wakeup being sent to the task.  It does not
+	 * race with freezer_count(), the barriers in freezer_count() and
+	 * freezer_should_skip() ensure that either freezer_count() sees
+	 * freezing == true in try_to_freeze() and freezes, or
+	 * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task
+	 * normally.
+	 */
+	if (freezer_should_skip(p))
+		return false;
+
 	spin_lock_irqsave(&freezer_lock, flags);
 	if (!freezing(p) || frozen(p)) {
 		spin_unlock_irqrestore(&freezer_lock, flags);
diff --git a/kernel/futex.c b/kernel/futex.c
index edc4bea..fea18ac 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -62,6 +62,7 @@
 #include <linux/ptrace.h>
 #include <linux/sched/rt.h>
 #include <linux/hugetlb.h>
+#include <linux/freezer.h>
 
 #include <asm/futex.h>
 
@@ -1935,7 +1936,7 @@
 		 * is no timeout, or if it has yet to expire.
 		 */
 		if (!timeout || timeout->task)
-			schedule();
+			freezable_schedule();
 	}
 	__set_current_state(TASK_RUNNING);
 }
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index aadf4b7..1e84be1 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -47,6 +47,7 @@
 #include <linux/sched/sysctl.h>
 #include <linux/sched/rt.h>
 #include <linux/timer.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
@@ -1565,7 +1566,7 @@
 			t->task = NULL;
 
 		if (likely(t->task))
-			schedule();
+			freezable_schedule();
 
 		hrtimer_cancel(&t->timer);
 		mode = HRTIMER_MODE_ABS;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index cbd97ce..63af23a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -672,6 +672,7 @@
 		irq_settings_set_noprobe(desc);
 		irq_settings_set_norequest(desc);
 		irq_settings_set_nothread(desc);
+		irq_settings_set_chained(desc);
 		irq_startup(desc, true);
 	}
 out:
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index abcd6ca..da5f2ff 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -10,7 +10,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/syscore_ops.h>
-
+#include <linux/wakeup_reason.h>
 #include "internals.h"
 
 /**
@@ -103,14 +103,18 @@
 	int irq;
 
 	for_each_irq_desc(irq, desc) {
-		/*
-		 * Only interrupts which are marked as wakeup source
-		 * and have not been disabled before the suspend check
-		 * can abort suspend.
-		 */
 		if (irqd_is_wakeup_set(&desc->irq_data)) {
-			if (desc->depth == 1 && desc->istate & IRQS_PENDING)
+			if (desc->istate & IRQS_PENDING) {
+				log_suspend_abort_reason("Wakeup IRQ %d %s pending",
+					irq,
+					desc->action && desc->action->name ?
+					desc->action->name : "");
+				pr_info("Wakeup IRQ %d %s pending, suspend aborted\n",
+					irq,
+					desc->action && desc->action->name ?
+					desc->action->name : "");
 				return -EBUSY;
+			}
 			continue;
 		}
 		/*
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 1162f10..4ea2f96 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -15,6 +15,7 @@
 	_IRQ_NESTED_THREAD	= IRQ_NESTED_THREAD,
 	_IRQ_PER_CPU_DEVID	= IRQ_PER_CPU_DEVID,
 	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,
+	_IRQ_CHAINED		= IRQ_CHAINED,
 };
 
 #define IRQ_PER_CPU		GOT_YOU_MORON
@@ -28,6 +29,7 @@
 #define IRQ_PER_CPU_DEVID	GOT_YOU_MORON
 #undef IRQF_MODIFY_MASK
 #define IRQF_MODIFY_MASK	GOT_YOU_MORON
+#define IRQ_CHAINED		GOT_YOU_MORON
 
 static inline void
 irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
@@ -147,3 +149,8 @@
 {
 	return desc->status_use_accessors & _IRQ_NESTED_THREAD;
 }
+
+static inline bool irq_settings_set_chained(struct irq_desc *desc)
+{
+	return desc->status_use_accessors |= _IRQ_CHAINED;
+}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 1f3186b..e16c45b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -4090,7 +4090,7 @@
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
-static void print_held_locks_bug(struct task_struct *curr)
+static void print_held_locks_bug(void)
 {
 	if (!debug_locks_off())
 		return;
@@ -4099,22 +4099,21 @@
 
 	printk("\n");
 	printk("=====================================\n");
-	printk("[ BUG: lock held at task exit time! ]\n");
+	printk("[ BUG: %s/%d still has locks held! ]\n",
+	       current->comm, task_pid_nr(current));
 	print_kernel_ident();
 	printk("-------------------------------------\n");
-	printk("%s/%d is exiting with locks still held!\n",
-		curr->comm, task_pid_nr(curr));
-	lockdep_print_held_locks(curr);
-
+	lockdep_print_held_locks(current);
 	printk("\nstack backtrace:\n");
 	dump_stack();
 }
 
-void debug_check_no_locks_held(struct task_struct *task)
+void debug_check_no_locks_held(void)
 {
-	if (unlikely(task->lockdep_depth > 0))
-		print_held_locks_bug(task);
+	if (unlikely(current->lockdep_depth > 0))
+		print_held_locks_bug();
 }
+EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
 
 void debug_show_all_locks(void)
 {
diff --git a/kernel/panic.c b/kernel/panic.c
index d3d74c4..59bf94e 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,13 +27,19 @@
 #define PANIC_TIMER_STEP 100
 #define PANIC_BLINK_SPD 18
 
+/* Machine specific panic information string */
+char *mach_panic_string;
+
 int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
 static unsigned long tainted_mask;
 static int pause_on_oops;
 static int pause_on_oops_flag;
 static DEFINE_SPINLOCK(pause_on_oops_lock);
 
-int panic_timeout;
+#ifndef CONFIG_PANIC_TIMEOUT
+#define CONFIG_PANIC_TIMEOUT 0
+#endif
+int panic_timeout = CONFIG_PANIC_TIMEOUT;
 EXPORT_SYMBOL_GPL(panic_timeout);
 
 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
@@ -378,6 +384,11 @@
 void print_oops_end_marker(void)
 {
 	init_oops_id();
+
+	if (mach_panic_string)
+		printk(KERN_WARNING "Board Information: %s\n",
+		       mach_panic_string);
+
 	printk(KERN_WARNING "---[ end trace %016llx ]---\n",
 		(unsigned long long)oops_id);
 }
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 5dfdc9e..f8cc6c4 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,6 +18,14 @@
 
 	  Turning OFF this setting is NOT recommended! If in doubt, say Y.
 
+config HAS_WAKELOCK
+	bool
+	default y
+
+config WAKELOCK
+	bool
+	default y
+
 config HIBERNATE_CALLBACKS
 	bool
 
@@ -274,3 +282,10 @@
 config CPU_PM
 	bool
 	depends on SUSPEND || CPU_IDLE
+
+config SUSPEND_TIME
+	bool "Log time spent in suspend"
+	---help---
+	  Prints the time spent in suspend in the kernel log, and
+	  keeps statistics on the time spent in suspend in
+	  /sys/kernel/debug/suspend_time
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 29472bf..74c713b 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -11,5 +11,8 @@
 				   block_io.o
 obj-$(CONFIG_PM_AUTOSLEEP)	+= autosleep.o
 obj-$(CONFIG_PM_WAKELOCKS)	+= wakelock.o
+obj-$(CONFIG_SUSPEND_TIME)	+= suspend_time.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
+
+obj-$(CONFIG_SUSPEND)	+= wakeup_reason.o
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0695319..d1ccce3 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -17,7 +17,7 @@
 #include <linux/delay.h>
 #include <linux/workqueue.h>
 #include <linux/kmod.h>
-
+#include <linux/wakeup_reason.h>
 /* 
  * Timeout for stopping processes
  */
@@ -30,9 +30,13 @@
 	unsigned int todo;
 	bool wq_busy = false;
 	struct timeval start, end;
-	u64 elapsed_csecs64;
-	unsigned int elapsed_csecs;
+	u64 elapsed_msecs64;
+	unsigned int elapsed_msecs;
 	bool wakeup = false;
+	int sleep_usecs = USEC_PER_MSEC;
+#ifdef CONFIG_PM_SLEEP
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
+#endif
 
 	do_gettimeofday(&start);
 
@@ -62,42 +66,51 @@
 			break;
 
 		if (pm_wakeup_pending()) {
+#ifdef CONFIG_PM_SLEEP
+			pm_get_active_wakeup_sources(suspend_abort,
+				MAX_SUSPEND_ABORT_LEN);
+			log_suspend_abort_reason(suspend_abort);
+#endif
 			wakeup = true;
 			break;
 		}
 
 		/*
 		 * We need to retry, but first give the freezing tasks some
-		 * time to enter the refrigerator.
+		 * time to enter the refrigerator.  Start with an initial
+		 * 1 ms sleep followed by exponential backoff until 8 ms.
 		 */
-		msleep(10);
+		usleep_range(sleep_usecs / 2, sleep_usecs);
+		if (sleep_usecs < 8 * USEC_PER_MSEC)
+			sleep_usecs *= 2;
 	}
 
 	do_gettimeofday(&end);
-	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
-	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
-	elapsed_csecs = elapsed_csecs64;
+	elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
+	do_div(elapsed_msecs64, NSEC_PER_MSEC);
+	elapsed_msecs = elapsed_msecs64;
 
-	if (todo) {
+	if (wakeup) {
 		printk("\n");
-		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
-		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
-		       wakeup ? "aborted" : "failed",
-		       elapsed_csecs / 100, elapsed_csecs % 100,
+		printk(KERN_ERR "Freezing of tasks aborted after %d.%03d seconds",
+		       elapsed_msecs / 1000, elapsed_msecs % 1000);
+	} else if (todo) {
+		printk("\n");
+		printk(KERN_ERR "Freezing of tasks failed after %d.%03d seconds"
+		       " (%d tasks refusing to freeze, wq_busy=%d):\n",
+		       elapsed_msecs / 1000, elapsed_msecs % 1000,
 		       todo - wq_busy, wq_busy);
 
-		if (!wakeup) {
-			read_lock(&tasklist_lock);
-			do_each_thread(g, p) {
-				if (p != current && !freezer_should_skip(p)
-				    && freezing(p) && !frozen(p))
-					sched_show_task(p);
-			} while_each_thread(g, p);
-			read_unlock(&tasklist_lock);
-		}
+		read_lock(&tasklist_lock);
+		do_each_thread(g, p) {
+			if (p != current && !freezer_should_skip(p)
+			    && freezing(p) && !frozen(p))
+				sched_show_task(p);
+		} while_each_thread(g, p);
+		read_unlock(&tasklist_lock);
 	} else {
-		printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100,
-			elapsed_csecs % 100);
+		printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
+			elapsed_msecs % 1000);
 	}
 
 	return todo ? -EBUSY : 0;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 903c517..e2446e0 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -25,7 +25,9 @@
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/ftrace.h>
+#include <linux/rtc.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 
 #include "power.h"
 
@@ -138,7 +140,7 @@
 	error = suspend_freeze_processes();
 	if (!error)
 		return 0;
-
+	log_suspend_abort_reason("One or more tasks refusing to freeze");
 	suspend_stats.failed_freeze++;
 	dpm_save_failed_step(SUSPEND_FREEZE);
  Finish:
@@ -168,7 +170,8 @@
  */
 static int suspend_enter(suspend_state_t state, bool *wakeup)
 {
-	int error;
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
+	int error, last_dev;
 
 	if (need_suspend_ops(state) && suspend_ops->prepare) {
 		error = suspend_ops->prepare();
@@ -178,7 +181,11 @@
 
 	error = dpm_suspend_end(PMSG_SUSPEND);
 	if (error) {
+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+		last_dev %= REC_FAILED_NUM;
 		printk(KERN_ERR "PM: Some devices failed to power down\n");
+		log_suspend_abort_reason("%s device failed to power down",
+			suspend_stats.failed_devs[last_dev]);
 		goto Platform_finish;
 	}
 
@@ -203,8 +210,10 @@
 	}
 
 	error = disable_nonboot_cpus();
-	if (error || suspend_test(TEST_CPUS))
+	if (error || suspend_test(TEST_CPUS)) {
+		log_suspend_abort_reason("Disabling non-boot cpus failed");
 		goto Enable_cpus;
+	}
 
 	arch_suspend_disable_irqs();
 	BUG_ON(!irqs_disabled());
@@ -215,6 +224,11 @@
 		if (!(suspend_test(TEST_CORE) || *wakeup)) {
 			error = suspend_ops->enter(state);
 			events_check_enabled = false;
+		} else if (*wakeup) {
+			pm_get_active_wakeup_sources(suspend_abort,
+				MAX_SUSPEND_ABORT_LEN);
+			log_suspend_abort_reason(suspend_abort);
+			error = -EBUSY;
 		}
 		syscore_resume();
 	}
@@ -262,6 +276,7 @@
 	error = dpm_suspend_start(PMSG_SUSPEND);
 	if (error) {
 		printk(KERN_ERR "PM: Some devices failed to suspend\n");
+		log_suspend_abort_reason("Some devices failed to suspend");
 		goto Recover_platform;
 	}
 	suspend_test_finish("suspend devices");
@@ -358,6 +373,18 @@
 	return error;
 }
 
+static void pm_suspend_marker(char *annotation)
+{
+	struct timespec ts;
+	struct rtc_time tm;
+
+	getnstimeofday(&ts);
+	rtc_time_to_tm(ts.tv_sec, &tm);
+	pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
+		annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+		tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+}
+
 /**
  * pm_suspend - Externally visible function for suspending the system.
  * @state: System sleep state to enter.
@@ -372,6 +399,7 @@
 	if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
 		return -EINVAL;
 
+	pm_suspend_marker("entry");
 	error = enter_state(state);
 	if (error) {
 		suspend_stats.fail++;
@@ -379,6 +407,7 @@
 	} else {
 		suspend_stats.success++;
 	}
+	pm_suspend_marker("exit");
 	return error;
 }
 EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/suspend_time.c b/kernel/power/suspend_time.c
new file mode 100644
index 0000000..d2a65da
--- /dev/null
+++ b/kernel/power/suspend_time.c
@@ -0,0 +1,111 @@
+/*
+ * debugfs file to track time spent in suspend
+ *
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/syscore_ops.h>
+#include <linux/time.h>
+
+static struct timespec suspend_time_before;
+static unsigned int time_in_suspend_bins[32];
+
+#ifdef CONFIG_DEBUG_FS
+static int suspend_time_debug_show(struct seq_file *s, void *data)
+{
+	int bin;
+	seq_printf(s, "time (secs)  count\n");
+	seq_printf(s, "------------------\n");
+	for (bin = 0; bin < 32; bin++) {
+		if (time_in_suspend_bins[bin] == 0)
+			continue;
+		seq_printf(s, "%4d - %4d %4u\n",
+			bin ? 1 << (bin - 1) : 0, 1 << bin,
+				time_in_suspend_bins[bin]);
+	}
+	return 0;
+}
+
+static int suspend_time_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, suspend_time_debug_show, NULL);
+}
+
+static const struct file_operations suspend_time_debug_fops = {
+	.open		= suspend_time_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init suspend_time_debug_init(void)
+{
+	struct dentry *d;
+
+	d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
+		&suspend_time_debug_fops);
+	if (!d) {
+		pr_err("Failed to create suspend_time debug file\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+late_initcall(suspend_time_debug_init);
+#endif
+
+static int suspend_time_syscore_suspend(void)
+{
+	read_persistent_clock(&suspend_time_before);
+
+	return 0;
+}
+
+static void suspend_time_syscore_resume(void)
+{
+	struct timespec after;
+
+	read_persistent_clock(&after);
+
+	after = timespec_sub(after, suspend_time_before);
+
+	time_in_suspend_bins[fls(after.tv_sec)]++;
+
+	pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec,
+		after.tv_nsec / NSEC_PER_MSEC);
+}
+
+static struct syscore_ops suspend_time_syscore_ops = {
+	.suspend = suspend_time_syscore_suspend,
+	.resume = suspend_time_syscore_resume,
+};
+
+static int suspend_time_syscore_init(void)
+{
+	register_syscore_ops(&suspend_time_syscore_ops);
+
+	return 0;
+}
+
+static void suspend_time_syscore_exit(void)
+{
+	unregister_syscore_ops(&suspend_time_syscore_ops);
+}
+module_init(suspend_time_syscore_init);
+module_exit(suspend_time_syscore_exit);
diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c
new file mode 100644
index 0000000..f18fb16
--- /dev/null
+++ b/kernel/power/wakeup_reason.c
@@ -0,0 +1,216 @@
+/*
+ * kernel/power/wakeup_reason.c
+ *
+ * Logs the reasons which caused the kernel to resume from
+ * the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wakeup_reason.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+
+
+#define MAX_WAKEUP_REASON_IRQS 32
+static int irq_list[MAX_WAKEUP_REASON_IRQS];
+static int irqcount;
+static bool suspend_abort;
+static char abort_reason[MAX_SUSPEND_ABORT_LEN];
+static struct kobject *wakeup_reason;
+static DEFINE_SPINLOCK(resume_reason_lock);
+
+static struct timespec last_xtime; /* wall time before last suspend */
+static struct timespec curr_xtime; /* wall time after last suspend */
+static struct timespec last_stime; /* total_sleep_time before last suspend */
+static struct timespec curr_stime; /* total_sleep_time after last suspend */
+
+static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
+		char *buf)
+{
+	int irq_no, buf_offset = 0;
+	struct irq_desc *desc;
+	spin_lock(&resume_reason_lock);
+	if (suspend_abort) {
+		buf_offset = sprintf(buf, "Abort: %s", abort_reason);
+	} else {
+		for (irq_no = 0; irq_no < irqcount; irq_no++) {
+			desc = irq_to_desc(irq_list[irq_no]);
+			if (desc && desc->action && desc->action->name)
+				buf_offset += sprintf(buf + buf_offset, "%d %s\n",
+						irq_list[irq_no], desc->action->name);
+			else
+				buf_offset += sprintf(buf + buf_offset, "%d\n",
+						irq_list[irq_no]);
+		}
+	}
+	spin_unlock(&resume_reason_lock);
+	return buf_offset;
+}
+
+static ssize_t last_suspend_time_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct timespec sleep_time;
+	struct timespec total_time;
+	struct timespec suspend_resume_time;
+
+	sleep_time = timespec_sub(curr_stime, last_stime);
+	total_time = timespec_sub(curr_xtime, last_xtime);
+	suspend_resume_time = timespec_sub(total_time, sleep_time);
+
+	/*
+	 * suspend_resume_time is calculated from sleep_time. Userspace would
+	 * always need both. Export them in pair here.
+	 */
+	return sprintf(buf, "%lu.%09lu %lu.%09lu\n",
+				suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
+				sleep_time.tv_sec, sleep_time.tv_nsec);
+}
+
+static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
+static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
+
+static struct attribute *attrs[] = {
+	&resume_reason.attr,
+	&suspend_time.attr,
+	NULL,
+};
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+/*
+ * logs all the wake up reasons to the kernel
+ * stores the irqs to expose them to the userspace via sysfs
+ */
+void log_wakeup_reason(int irq)
+{
+	struct irq_desc *desc;
+	desc = irq_to_desc(irq);
+	if (desc && desc->action && desc->action->name)
+		printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
+				desc->action->name);
+	else
+		printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
+
+	spin_lock(&resume_reason_lock);
+	if (irqcount == MAX_WAKEUP_REASON_IRQS) {
+		spin_unlock(&resume_reason_lock);
+		printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
+				MAX_WAKEUP_REASON_IRQS);
+		return;
+	}
+
+	irq_list[irqcount++] = irq;
+	spin_unlock(&resume_reason_lock);
+}
+
+int check_wakeup_reason(int irq)
+{
+	int irq_no;
+	int ret = false;
+
+	spin_lock(&resume_reason_lock);
+	for (irq_no = 0; irq_no < irqcount; irq_no++)
+		if (irq_list[irq_no] == irq) {
+			ret = true;
+			break;
+	}
+	spin_unlock(&resume_reason_lock);
+	return ret;
+}
+
+void log_suspend_abort_reason(const char *fmt, ...)
+{
+	va_list args;
+
+	spin_lock(&resume_reason_lock);
+
+	//Suspend abort reason has already been logged.
+	if (suspend_abort) {
+		spin_unlock(&resume_reason_lock);
+		return;
+	}
+
+	suspend_abort = true;
+	va_start(args, fmt);
+	vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
+	va_end(args);
+	spin_unlock(&resume_reason_lock);
+}
+
+/* Detects a suspend and clears all the previous wake up reasons*/
+static int wakeup_reason_pm_event(struct notifier_block *notifier,
+		unsigned long pm_event, void *unused)
+{
+	struct timespec xtom; /* wall_to_monotonic, ignored */
+
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		spin_lock(&resume_reason_lock);
+		irqcount = 0;
+		suspend_abort = false;
+		spin_unlock(&resume_reason_lock);
+
+		get_xtime_and_monotonic_and_sleep_offset(&last_xtime, &xtom,
+			&last_stime);
+		break;
+	case PM_POST_SUSPEND:
+		get_xtime_and_monotonic_and_sleep_offset(&curr_xtime, &xtom,
+			&curr_stime);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block wakeup_reason_pm_notifier_block = {
+	.notifier_call = wakeup_reason_pm_event,
+};
+
+/* Initializes the sysfs parameter
+ * registers the pm_event notifier
+ */
+int __init wakeup_reason_init(void)
+{
+	int retval;
+
+	retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
+	if (retval)
+		printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
+				__func__, retval);
+
+	wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
+	if (!wakeup_reason) {
+		printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
+				__func__);
+		return 1;
+	}
+	retval = sysfs_create_group(wakeup_reason, &attr_group);
+	if (retval) {
+		kobject_put(wakeup_reason);
+		printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
+				__func__, retval);
+	}
+	return 0;
+}
+
+late_initcall(wakeup_reason_init);
diff --git a/kernel/printk.c b/kernel/printk.c
index ee8f6be..76687e1 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -51,6 +51,10 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/printk.h>
 
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+extern void printascii(char *);
+#endif
+
 /* printk's without a loglevel use this.. */
 #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
 
@@ -363,6 +367,13 @@
 	log_next_seq++;
 }
 
+/* Clears the ring-buffer */
+void log_buf_clear(void)
+{
+	clear_seq = log_next_seq;
+	clear_idx = log_next_idx;
+}
+
 #ifdef CONFIG_SECURITY_DMESG_RESTRICT
 int dmesg_restrict = 1;
 #else
@@ -1353,7 +1364,7 @@
 {
 	int retval = 0, wake = 0;
 
-	if (console_trylock()) {
+	if (!in_nmi() && console_trylock()) {
 		retval = 1;
 
 		/*
@@ -1532,7 +1543,13 @@
 	}
 
 	lockdep_off();
-	raw_spin_lock(&logbuf_lock);
+	if (unlikely(in_nmi())) {
+		if (!raw_spin_trylock(&logbuf_lock))
+			goto out_restore_lockdep_irqs;
+	} else {
+		raw_spin_lock(&logbuf_lock);
+	}
+
 	logbuf_cpu = this_cpu;
 
 	if (recursion_bug) {
@@ -1578,6 +1595,10 @@
 		}
 	}
 
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+	printascii(text);
+#endif
+
 	if (level == -1)
 		level = default_message_loglevel;
 
@@ -1628,6 +1649,7 @@
 	if (console_trylock_for_printk(this_cpu))
 		console_unlock();
 
+out_restore_lockdep_irqs:
 	lockdep_on();
 out_restore_irqs:
 	local_irq_restore(flags);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6a366f9..aa61ee1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7145,13 +7145,24 @@
 	return (nested == preempt_offset);
 }
 
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+	__might_sleep_init_called = 1;
+	return 0;
+}
+early_initcall(__might_sleep_init);
+
 void __might_sleep(const char *file, int line, int preempt_offset)
 {
 	static unsigned long prev_jiffy;	/* ratelimiting */
 
 	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
-	    system_state != SYSTEM_RUNNING || oops_in_progress)
+	    oops_in_progress)
+		return;
+	if (system_state != SYSTEM_RUNNING &&
+	    (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
 		return;
 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 		return;
@@ -8123,6 +8134,7 @@
 	.css_offline	= cpu_cgroup_css_offline,
 	.can_attach	= cpu_cgroup_can_attach,
 	.attach		= cpu_cgroup_attach,
+	.allow_attach	= subsys_cgroup_allow_attach,
 	.exit		= cpu_cgroup_exit,
 	.subsys_id	= cpu_cgroup_subsys_id,
 	.base_cftypes	= cpu_files,
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index c23a8fd..8cda97c 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -1,3 +1,4 @@
+#include <linux/cpufreq.h>
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/tsacct_kern.h>
@@ -149,6 +150,11 @@
 
 	/* Account for user time used */
 	acct_account_cputime(p);
+
+#ifdef CONFIG_CPU_FREQ_STAT
+	/* Account power usage for user time */
+	acct_update_power(p, cputime);
+#endif
 }
 
 /*
@@ -199,6 +205,11 @@
 
 	/* Account for system time used */
 	acct_account_cputime(p);
+
+#ifdef CONFIG_CPU_FREQ_STAT
+	/* Account power usage for system time */
+	acct_update_power(p, cputime);
+#endif
 }
 
 /*
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index b7a1004..18d57c7 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -18,15 +18,17 @@
 #include <linux/compat.h>
 #include <linux/sched.h>
 #include <linux/seccomp.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
 
 /* #define SECCOMP_DEBUG 1 */
 
 #ifdef CONFIG_SECCOMP_FILTER
 #include <asm/syscall.h>
 #include <linux/filter.h>
+#include <linux/pid.h>
 #include <linux/ptrace.h>
 #include <linux/security.h>
-#include <linux/slab.h>
 #include <linux/tracehook.h>
 #include <linux/uaccess.h>
 
@@ -95,7 +97,7 @@
 	if (off == BPF_DATA(nr))
 		return syscall_get_nr(current, regs);
 	if (off == BPF_DATA(arch))
-		return syscall_get_arch(current, regs);
+		return syscall_get_arch();
 	if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
 		unsigned long value;
 		int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
@@ -201,32 +203,170 @@
  */
 static u32 seccomp_run_filters(int syscall)
 {
-	struct seccomp_filter *f;
+	struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter);
 	u32 ret = SECCOMP_RET_ALLOW;
 
 	/* Ensure unexpected behavior doesn't result in failing open. */
-	if (WARN_ON(current->seccomp.filter == NULL))
+	if (unlikely(WARN_ON(f == NULL)))
 		return SECCOMP_RET_KILL;
 
+	/* Make sure cross-thread synced filter points somewhere sane. */
+	smp_read_barrier_depends();
+
 	/*
 	 * All filters in the list are evaluated and the lowest BPF return
 	 * value always takes priority (ignoring the DATA).
 	 */
-	for (f = current->seccomp.filter; f; f = f->prev) {
+	for (; f; f = f->prev) {
 		u32 cur_ret = sk_run_filter(NULL, f->insns);
+		
 		if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
 			ret = cur_ret;
 	}
 	return ret;
 }
+#endif /* CONFIG_SECCOMP_FILTER */
+
+static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
+{
+	assert_spin_locked(&current->sighand->siglock);
+
+	if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
+		return false;
+
+	return true;
+}
+
+static inline void seccomp_assign_mode(struct task_struct *task,
+				       unsigned long seccomp_mode)
+{
+	assert_spin_locked(&task->sighand->siglock);
+
+	task->seccomp.mode = seccomp_mode;
+	/*
+	 * Make sure TIF_SECCOMP cannot be set before the mode (and
+	 * filter) is set.
+	 */
+	smp_mb();
+	set_tsk_thread_flag(task, TIF_SECCOMP);
+}
+
+#ifdef CONFIG_SECCOMP_FILTER
+/* Returns 1 if the parent is an ancestor of the child. */
+static int is_ancestor(struct seccomp_filter *parent,
+		       struct seccomp_filter *child)
+{
+	/* NULL is the root ancestor. */
+	if (parent == NULL)
+		return 1;
+	for (; child; child = child->prev)
+		if (child == parent)
+			return 1;
+	return 0;
+}
 
 /**
- * seccomp_attach_filter: Attaches a seccomp filter to current.
+ * seccomp_can_sync_threads: checks if all threads can be synchronized
+ *
+ * Expects sighand and cred_guard_mutex locks to be held.
+ *
+ * Returns 0 on success, -ve on error, or the pid of a thread which was
+ * either not in the correct seccomp mode or it did not have an ancestral
+ * seccomp filter.
+ */
+static inline pid_t seccomp_can_sync_threads(void)
+{
+	struct task_struct *thread, *caller;
+
+	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
+	assert_spin_locked(&current->sighand->siglock);
+
+	/* Validate all threads being eligible for synchronization. */
+	caller = current;
+	for_each_thread(caller, thread) {
+		pid_t failed;
+
+		/* Skip current, since it is initiating the sync. */
+		if (thread == caller)
+			continue;
+
+		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
+		    (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
+		     is_ancestor(thread->seccomp.filter,
+				 caller->seccomp.filter)))
+			continue;
+
+		/* Return the first thread that cannot be synchronized. */
+		failed = task_pid_vnr(thread);
+		/* If the pid cannot be resolved, then return -ESRCH */
+		if (unlikely(WARN_ON(failed == 0)))
+			failed = -ESRCH;
+		return failed;
+	}
+
+	return 0;
+}
+
+/**
+ * seccomp_sync_threads: sets all threads to use current's filter
+ *
+ * Expects sighand and cred_guard_mutex locks to be held, and for
+ * seccomp_can_sync_threads() to have returned success already
+ * without dropping the locks.
+ *
+ */
+static inline void seccomp_sync_threads(void)
+{
+	struct task_struct *thread, *caller;
+
+	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
+	assert_spin_locked(&current->sighand->siglock);
+
+	/* Synchronize all threads. */
+	caller = current;
+	for_each_thread(caller, thread) {
+		/* Skip current, since it needs no changes. */
+		if (thread == caller)
+			continue;
+
+		/* Get a task reference for the new leaf node. */
+		get_seccomp_filter(caller);
+		/*
+		 * Drop the task reference to the shared ancestor since
+		 * current's path will hold a reference.  (This also
+		 * allows a put before the assignment.)
+		 */
+		put_seccomp_filter(thread);
+		smp_store_release(&thread->seccomp.filter,
+				  caller->seccomp.filter);
+
+		/*
+		 * Don't let an unprivileged task work around
+		 * the no_new_privs restriction by creating
+		 * a thread that sets it up, enters seccomp,
+		 * then dies.
+		 */
+		if (task_no_new_privs(caller))
+			task_set_no_new_privs(thread);
+
+		/*
+		 * Opt the other thread into seccomp if needed.
+		 * As threads are considered to be trust-realm
+		 * equivalent (see ptrace_may_access), it is safe to
+		 * allow one thread to transition the other.
+		 */
+		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+	}
+}
+
+/**
+ * seccomp_prepare_filter: Prepares a seccomp filter for use.
  * @fprog: BPF program to install
  *
- * Returns 0 on success or an errno on failure.
+ * Returns filter on success or an ERR_PTR on failure.
  */
-static long seccomp_attach_filter(struct sock_fprog *fprog)
+static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
 {
 	struct seccomp_filter *filter;
 	unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
@@ -234,12 +374,13 @@
 	long ret;
 
 	if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
+	BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
 
 	for (filter = current->seccomp.filter; filter; filter = filter->prev)
 		total_insns += filter->len + 4;  /* include a 4 instr penalty */
 	if (total_insns > MAX_INSNS_PER_PATH)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	/*
 	 * Installing a seccomp filter requires that the task have
@@ -247,16 +388,16 @@
 	 * This avoids scenarios where unprivileged tasks can affect the
 	 * behavior of privileged children.
 	 */
-	if (!current->no_new_privs &&
+	if (!task_no_new_privs(current) &&
 	    security_capable_noaudit(current_cred(), current_user_ns(),
 				     CAP_SYS_ADMIN) != 0)
-		return -EACCES;
+		return ERR_PTR(-EACCES);
 
 	/* Allocate a new seccomp_filter */
 	filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
 			 GFP_KERNEL|__GFP_NOWARN);
 	if (!filter)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);;
 	atomic_set(&filter->usage, 1);
 	filter->len = fprog->len;
 
@@ -275,28 +416,24 @@
 	if (ret)
 		goto fail;
 
-	/*
-	 * If there is an existing filter, make it the prev and don't drop its
-	 * task reference.
-	 */
-	filter->prev = current->seccomp.filter;
-	current->seccomp.filter = filter;
-	return 0;
+	return filter;
+
 fail:
 	kfree(filter);
-	return ret;
+	return ERR_PTR(ret);
 }
 
 /**
- * seccomp_attach_user_filter - attaches a user-supplied sock_fprog
+ * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
  * @user_filter: pointer to the user data containing a sock_fprog.
  *
  * Returns 0 on success and non-zero otherwise.
  */
-long seccomp_attach_user_filter(char __user *user_filter)
+static struct seccomp_filter *
+seccomp_prepare_user_filter(const char __user *user_filter)
 {
 	struct sock_fprog fprog;
-	long ret = -EFAULT;
+	struct seccomp_filter *filter = ERR_PTR(-EFAULT);
 
 #ifdef CONFIG_COMPAT
 	if (is_compat_task()) {
@@ -309,9 +446,56 @@
 #endif
 	if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
 		goto out;
-	ret = seccomp_attach_filter(&fprog);
+	filter = seccomp_prepare_filter(&fprog);
 out:
-	return ret;
+	return filter;
+}
+
+/**
+ * seccomp_attach_filter: validate and attach filter
+ * @flags:  flags to change filter behavior
+ * @filter: seccomp filter to add to the current process
+ *
+ * Caller must be holding current->sighand->siglock lock.
+ *
+ * Returns 0 on success, -ve on error.
+ */
+static long seccomp_attach_filter(unsigned int flags,
+				  struct seccomp_filter *filter)
+{
+	unsigned long total_insns;
+	struct seccomp_filter *walker;
+
+	assert_spin_locked(&current->sighand->siglock);
+
+	/* Validate resulting filter length. */
+	total_insns = filter->len;
+	for (walker = current->seccomp.filter; walker; walker = walker->prev)
+		total_insns += walker->len + 4;  /* 4 instr penalty */
+	if (total_insns > MAX_INSNS_PER_PATH)
+		return -ENOMEM;
+
+	/* If thread sync has been requested, check that it is possible. */
+	if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
+		int ret;
+
+		ret = seccomp_can_sync_threads();
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * If there is an existing filter, make it the prev and don't drop its
+	 * task reference.
+	 */
+	filter->prev = current->seccomp.filter;
+	current->seccomp.filter = filter;
+
+	/* Now that the new filter is in place, synchronize to all threads. */
+	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+		seccomp_sync_threads();
+
+	return 0;
 }
 
 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
@@ -324,6 +508,13 @@
 	atomic_inc(&orig->usage);
 }
 
+static inline void seccomp_filter_free(struct seccomp_filter *filter)
+{
+	if (filter) {
+		kfree(filter);
+	}
+}
+
 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
 void put_seccomp_filter(struct task_struct *tsk)
 {
@@ -332,7 +523,7 @@
 	while (orig && atomic_dec_and_test(&orig->usage)) {
 		struct seccomp_filter *freeme = orig;
 		orig = orig->prev;
-		kfree(freeme);
+		seccomp_filter_free(freeme);
 	}
 }
 
@@ -351,7 +542,7 @@
 	info.si_code = SYS_SECCOMP;
 	info.si_call_addr = (void __user *)KSTK_EIP(current);
 	info.si_errno = reason;
-	info.si_arch = syscall_get_arch(current, task_pt_regs(current));
+	info.si_arch = syscall_get_arch();
 	info.si_syscall = syscall;
 	force_sig_info(SIGSYS, &info, current);
 }
@@ -376,12 +567,17 @@
 
 int __secure_computing(int this_syscall)
 {
-	int mode = current->seccomp.mode;
 	int exit_sig = 0;
 	int *syscall;
 	u32 ret;
 
-	switch (mode) {
+	/*
+	 * Make sure that any changes to mode from another thread have
+	 * been seen after TIF_SECCOMP was seen.
+	 */
+	rmb();
+
+	switch (current->seccomp.mode) {
 	case SECCOMP_MODE_STRICT:
 		syscall = mode1_syscalls;
 #ifdef CONFIG_COMPAT
@@ -404,7 +600,9 @@
 		ret &= SECCOMP_RET_ACTION;
 		switch (ret) {
 		case SECCOMP_RET_ERRNO:
-			/* Set the low-order 16-bits as a errno. */
+			/* Set low-order bits as an errno, capped at MAX_ERRNO. */
+			if (data > MAX_ERRNO)
+				data = MAX_ERRNO;
 			syscall_set_return_value(current, regs,
 						 -data, 0);
 			goto skip;
@@ -467,47 +665,152 @@
 }
 
 /**
- * prctl_set_seccomp: configures current->seccomp.mode
- * @seccomp_mode: requested mode to use
- * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
- *
- * This function may be called repeatedly with a @seccomp_mode of
- * SECCOMP_MODE_FILTER to install additional filters.  Every filter
- * successfully installed will be evaluated (in reverse order) for each system
- * call the task makes.
+ * seccomp_set_mode_strict: internal function for setting strict seccomp
  *
  * Once current->seccomp.mode is non-zero, it may not be changed.
  *
  * Returns 0 on success or -EINVAL on failure.
  */
-long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
+static long seccomp_set_mode_strict(void)
 {
+	const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
 	long ret = -EINVAL;
 
-	if (current->seccomp.mode &&
-	    current->seccomp.mode != seccomp_mode)
+	spin_lock_irq(&current->sighand->siglock);
+
+	if (!seccomp_may_assign_mode(seccomp_mode))
 		goto out;
 
+#ifdef TIF_NOTSC
+	disable_TSC();
+#endif
+	seccomp_assign_mode(current, seccomp_mode);
+	ret = 0;
+
+out:
+	spin_unlock_irq(&current->sighand->siglock);
+
+	return ret;
+}
+
+#ifdef CONFIG_SECCOMP_FILTER
+/**
+ * seccomp_set_mode_filter: internal function for setting seccomp filter
+ * @flags:  flags to change filter behavior
+ * @filter: struct sock_fprog containing filter
+ *
+ * This function may be called repeatedly to install additional filters.
+ * Every filter successfully installed will be evaluated (in reverse order)
+ * for each system call the task makes.
+ *
+ * Once current->seccomp.mode is non-zero, it may not be changed.
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+static long seccomp_set_mode_filter(unsigned int flags,
+				    const char __user *filter)
+{
+	const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
+	struct seccomp_filter *prepared = NULL;
+	long ret = -EINVAL;
+
+	/* Validate flags. */
+	if (flags & ~SECCOMP_FILTER_FLAG_MASK)
+		return -EINVAL;
+
+	/* Prepare the new filter before holding any locks. */
+	prepared = seccomp_prepare_user_filter(filter);
+	if (IS_ERR(prepared))
+		return PTR_ERR(prepared);
+
+	/*
+	 * Make sure we cannot change seccomp or nnp state via TSYNC
+	 * while another thread is in the middle of calling exec.
+	 */
+	if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
+	    mutex_lock_killable(&current->signal->cred_guard_mutex))
+		goto out_free;
+
+	spin_lock_irq(&current->sighand->siglock);
+
+	if (!seccomp_may_assign_mode(seccomp_mode))
+		goto out;
+
+	ret = seccomp_attach_filter(flags, prepared);
+	if (ret)
+		goto out;
+	/* Do not free the successfully attached filter. */
+	prepared = NULL;
+
+	seccomp_assign_mode(current, seccomp_mode);
+out:
+	spin_unlock_irq(&current->sighand->siglock);
+	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+		mutex_unlock(&current->signal->cred_guard_mutex);
+out_free:
+	seccomp_filter_free(prepared);
+	return ret;
+}
+#else
+static inline long seccomp_set_mode_filter(unsigned int flags,
+					   const char __user *filter)
+{
+	return -EINVAL;
+}
+#endif
+
+/* Common entry point for both prctl and syscall. */
+static long do_seccomp(unsigned int op, unsigned int flags,
+		       const char __user *uargs)
+{
+	switch (op) {
+	case SECCOMP_SET_MODE_STRICT:
+		if (flags != 0 || uargs != NULL)
+			return -EINVAL;
+		return seccomp_set_mode_strict();
+	case SECCOMP_SET_MODE_FILTER:
+		return seccomp_set_mode_filter(flags, uargs);
+	default:
+		return -EINVAL;
+	}
+}
+
+SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
+			 const char __user *, uargs)
+{
+	return do_seccomp(op, flags, uargs);
+}
+
+/**
+ * prctl_set_seccomp: configures current->seccomp.mode
+ * @seccomp_mode: requested mode to use
+ * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
+ *
+ * Returns 0 on success or -EINVAL on failure.
+ */
+long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
+{
+	unsigned int op;
+	char __user *uargs;
+
 	switch (seccomp_mode) {
 	case SECCOMP_MODE_STRICT:
-		ret = 0;
-#ifdef TIF_NOTSC
-		disable_TSC();
-#endif
+		op = SECCOMP_SET_MODE_STRICT;
+		/*
+		 * Setting strict mode through prctl always ignored filter,
+		 * so make sure it is always NULL here to pass the internal
+		 * check in do_seccomp().
+		 */
+		uargs = NULL;
 		break;
-#ifdef CONFIG_SECCOMP_FILTER
 	case SECCOMP_MODE_FILTER:
-		ret = seccomp_attach_user_filter(filter);
-		if (ret)
-			goto out;
+		op = SECCOMP_SET_MODE_FILTER;
+		uargs = filter;
 		break;
-#endif
 	default:
-		goto out;
+		return -EINVAL;
 	}
 
-	current->seccomp.mode = seccomp_mode;
-	set_thread_flag(TIF_SECCOMP);
-out:
-	return ret;
+	/* prctl interface doesn't have flags, so they are always zero. */
+	return do_seccomp(op, 0, uargs);
 }
diff --git a/kernel/signal.c b/kernel/signal.c
index 7b81c53..408f944 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2849,7 +2849,7 @@
 		recalc_sigpending();
 		spin_unlock_irq(&tsk->sighand->siglock);
 
-		timeout = schedule_timeout_interruptible(timeout);
+		timeout = freezable_schedule_timeout_interruptible(timeout);
 
 		spin_lock_irq(&tsk->sighand->siglock);
 		__set_task_blocked(tsk, &tsk->real_blocked);
diff --git a/kernel/sys.c b/kernel/sys.c
index 2bbd9a7..a3bef5b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -42,6 +42,9 @@
 #include <linux/syscore_ops.h>
 #include <linux/version.h>
 #include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/mempolicy.h>
+#include <linux/sched.h>
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
@@ -2099,10 +2102,158 @@
 }
 #endif
 
+#ifdef CONFIG_MMU
+static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
+		struct vm_area_struct **prev,
+		unsigned long start, unsigned long end,
+		const char __user *name_addr)
+{
+	struct mm_struct * mm = vma->vm_mm;
+	int error = 0;
+	pgoff_t pgoff;
+
+	if (name_addr == vma_get_anon_name(vma)) {
+		*prev = vma;
+		goto out;
+	}
+
+	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+	*prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
+				vma->vm_file, pgoff, vma_policy(vma),
+				name_addr);
+	if (*prev) {
+		vma = *prev;
+		goto success;
+	}
+
+	*prev = vma;
+
+	if (start != vma->vm_start) {
+		error = split_vma(mm, vma, start, 1);
+		if (error)
+			goto out;
+	}
+
+	if (end != vma->vm_end) {
+		error = split_vma(mm, vma, end, 0);
+		if (error)
+			goto out;
+	}
+
+success:
+	if (!vma->vm_file)
+		vma->shared.anon_name = name_addr;
+
+out:
+	if (error == -ENOMEM)
+		error = -EAGAIN;
+	return error;
+}
+
+static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
+			unsigned long arg)
+{
+	unsigned long tmp;
+	struct vm_area_struct * vma, *prev;
+	int unmapped_error = 0;
+	int error = -EINVAL;
+
+	/*
+	 * If the interval [start,end) covers some unmapped address
+	 * ranges, just ignore them, but return -ENOMEM at the end.
+	 * - this matches the handling in madvise.
+	 */
+	vma = find_vma_prev(current->mm, start, &prev);
+	if (vma && start > vma->vm_start)
+		prev = vma;
+
+	for (;;) {
+		/* Still start < end. */
+		error = -ENOMEM;
+		if (!vma)
+			return error;
+
+		/* Here start < (end|vma->vm_end). */
+		if (start < vma->vm_start) {
+			unmapped_error = -ENOMEM;
+			start = vma->vm_start;
+			if (start >= end)
+				return error;
+		}
+
+		/* Here vma->vm_start <= start < (end|vma->vm_end) */
+		tmp = vma->vm_end;
+		if (end < tmp)
+			tmp = end;
+
+		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
+		error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
+				(const char __user *)arg);
+		if (error)
+			return error;
+		start = tmp;
+		if (prev && start < prev->vm_end)
+			start = prev->vm_end;
+		error = unmapped_error;
+		if (start >= end)
+			return error;
+		if (prev)
+			vma = prev->vm_next;
+		else	/* madvise_remove dropped mmap_sem */
+			vma = find_vma(current->mm, start);
+	}
+}
+
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	struct mm_struct *mm = current->mm;
+	int error;
+	unsigned long len;
+	unsigned long end;
+
+	if (start & ~PAGE_MASK)
+		return -EINVAL;
+	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+
+	/* Check to see whether len was rounded up from small -ve to zero */
+	if (len_in && !len)
+		return -EINVAL;
+
+	end = start + len;
+	if (end < start)
+		return -EINVAL;
+
+	if (end == start)
+		return 0;
+
+	down_write(&mm->mmap_sem);
+
+	switch (opt) {
+	case PR_SET_VMA_ANON_NAME:
+		error = prctl_set_vma_anon_name(start, end, arg);
+		break;
+	default:
+		error = -EINVAL;
+	}
+
+	up_write(&mm->mmap_sem);
+
+	return error;
+}
+#else /* CONFIG_MMU */
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	return -EINVAL;
+}
+#endif
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 		unsigned long, arg4, unsigned long, arg5)
 {
 	struct task_struct *me = current;
+	struct task_struct *tsk;
 	unsigned char comm[sizeof(me->comm)];
 	long error;
 
@@ -2245,6 +2396,26 @@
 	case PR_GET_TID_ADDRESS:
 		error = prctl_get_tid_address(me, (int __user **)arg2);
 		break;
+	case PR_SET_TIMERSLACK_PID:
+		if (task_pid_vnr(current) != (pid_t)arg3 &&
+				!capable(CAP_SYS_NICE))
+			return -EPERM;
+		rcu_read_lock();
+		tsk = find_task_by_vpid((pid_t)arg3);
+		if (tsk == NULL) {
+			rcu_read_unlock();
+			return -EINVAL;
+		}
+		get_task_struct(tsk);
+		rcu_read_unlock();
+		if (arg2 <= 0)
+			tsk->timer_slack_ns =
+				tsk->default_timer_slack_ns;
+		else
+			tsk->timer_slack_ns = arg2;
+		put_task_struct(tsk);
+		error = 0;
+		break;
 	case PR_SET_CHILD_SUBREAPER:
 		me->signal->is_child_subreaper = !!arg2;
 		break;
@@ -2256,12 +2427,15 @@
 		if (arg2 != 1 || arg3 || arg4 || arg5)
 			return -EINVAL;
 
-		current->no_new_privs = 1;
+		task_set_no_new_privs(current);
 		break;
 	case PR_GET_NO_NEW_PRIVS:
 		if (arg2 || arg3 || arg4 || arg5)
 			return -EINVAL;
-		return current->no_new_privs ? 1 : 0;
+		return task_no_new_privs(current) ? 1 : 0;
+	case PR_SET_VMA:
+		error = prctl_set_vma(arg2, arg3, arg4, arg5);
+		break;
 	default:
 		error = -EINVAL;
 		break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 7078052..7e7fc0a 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -209,3 +209,6 @@
 
 /* compare kernel pointers */
 cond_syscall(sys_kcmp);
+
+/* operate on Secure Computing state */
+cond_syscall(sys_seccomp);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4fd49fe..443f72d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -105,6 +105,8 @@
 extern unsigned int core_pipe_limit;
 #endif
 extern int pid_max;
+extern int extra_free_kbytes;
+extern int min_free_order_shift;
 extern int pid_max_min, pid_max_max;
 extern int percpu_pagelist_fraction;
 extern int compat_log;
@@ -1282,6 +1284,21 @@
 		.extra1		= &zero,
 	},
 	{
+		.procname	= "extra_free_kbytes",
+		.data		= &extra_free_kbytes,
+		.maxlen		= sizeof(extra_free_kbytes),
+		.mode		= 0644,
+		.proc_handler	= min_free_kbytes_sysctl_handler,
+		.extra1		= &zero,
+	},
+	{
+		.procname	= "min_free_order_shift",
+		.data		= &min_free_order_shift,
+		.maxlen		= sizeof(min_free_order_shift),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec
+	},
+	{
 		.procname	= "percpu_pagelist_fraction",
 		.data		= &percpu_pagelist_fraction,
 		.maxlen		= sizeof(percpu_pagelist_fraction),
@@ -1459,6 +1476,28 @@
 		.mode		= 0644,
 		.proc_handler	= proc_doulongvec_minmax,
 	},
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
+	{
+		.procname	= "mmap_rnd_bits",
+		.data		= &mmap_rnd_bits,
+		.maxlen		= sizeof(mmap_rnd_bits),
+		.mode		= 0600,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= (void *)&mmap_rnd_bits_min,
+		.extra2		= (void *)&mmap_rnd_bits_max,
+	},
+#endif
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+	{
+		.procname	= "mmap_rnd_compat_bits",
+		.data		= &mmap_rnd_compat_bits,
+		.maxlen		= sizeof(mmap_rnd_compat_bits),
+		.mode		= 0600,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= (void *)&mmap_rnd_compat_bits_min,
+		.extra2		= (void *)&mmap_rnd_compat_bits_max,
+	},
+#endif
 	{ }
 };
 
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 7d19fca..bb08d09 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -199,6 +199,12 @@
 
 }
 
+ktime_t alarm_expires_remaining(const struct alarm *alarm)
+{
+	struct alarm_base *base = &alarm_bases[alarm->type];
+	return ktime_sub(alarm->node.expires, base->gettime());
+}
+
 #ifdef CONFIG_RTC_CLASS
 /**
  * alarmtimer_suspend - Suspend time callback
@@ -305,7 +311,7 @@
 }
 
 /**
- * alarm_start - Sets an alarm to fire
+ * alarm_start - Sets an absolute alarm to fire
  * @alarm: ptr to alarm to set
  * @start: time to run the alarm
  */
@@ -325,6 +331,31 @@
 }
 
 /**
+ * alarm_start_relative - Sets a relative alarm to fire
+ * @alarm: ptr to alarm to set
+ * @start: time relative to now to run the alarm
+ */
+int alarm_start_relative(struct alarm *alarm, ktime_t start)
+{
+	struct alarm_base *base = &alarm_bases[alarm->type];
+
+	start = ktime_add(start, base->gettime());
+	return alarm_start(alarm, start);
+}
+
+void alarm_restart(struct alarm *alarm)
+{
+	struct alarm_base *base = &alarm_bases[alarm->type];
+	unsigned long flags;
+
+	spin_lock_irqsave(&base->lock, flags);
+	hrtimer_set_expires(&alarm->timer, alarm->node.expires);
+	hrtimer_restart(&alarm->timer);
+	alarmtimer_enqueue(base, alarm);
+	spin_unlock_irqrestore(&base->lock, flags);
+}
+
+/**
  * alarm_try_to_cancel - Tries to cancel an alarm timer
  * @alarm: ptr to alarm to be canceled
  *
@@ -394,6 +425,12 @@
 	return overrun;
 }
 
+u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
+{
+	struct alarm_base *base = &alarm_bases[alarm->type];
+
+	return alarm_forward(alarm, base->gettime(), interval);
+}
 
 
 
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 015f85a..e24c188 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -82,6 +82,9 @@
 	select CONTEXT_SWITCH_TRACER
 	bool
 
+config GPU_TRACEPOINTS
+	bool
+
 config CONTEXT_SWITCH_TRACER
 	bool
 
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index d7e2068..4501212 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -60,5 +60,6 @@
 endif
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
 obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
+obj-$(CONFIG_GPU_TRACEPOINTS) += gpu-traces.o
 
 libftrace-y := ftrace.o
diff --git a/kernel/trace/gpu-traces.c b/kernel/trace/gpu-traces.c
new file mode 100644
index 0000000..a4b3f00
--- /dev/null
+++ b/kernel/trace/gpu-traces.c
@@ -0,0 +1,23 @@
+/*
+ * GPU tracepoints
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu.h>
+
+EXPORT_TRACEPOINT_SYMBOL(gpu_sched_switch);
+EXPORT_TRACEPOINT_SYMBOL(gpu_job_enqueue);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d6e7252..620b20c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -730,6 +730,7 @@
 	"irq-info",
 	"markers",
 	"function-trace",
+	"print-tgid",
 	NULL
 };
 
@@ -1242,6 +1243,7 @@
 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
+static unsigned saved_tgids[SAVED_CMDLINES];
 static int cmdline_idx;
 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 
@@ -1443,6 +1445,7 @@
 	}
 
 	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
+	saved_tgids[idx] = tsk->tgid;
 
 	arch_spin_unlock(&trace_cmdline_lock);
 
@@ -1480,6 +1483,25 @@
 	preempt_enable();
 }
 
+int trace_find_tgid(int pid)
+{
+	unsigned map;
+	int tgid;
+
+	preempt_disable();
+	arch_spin_lock(&trace_cmdline_lock);
+	map = map_pid_to_cmdline[pid];
+	if (map != NO_CMDLINE_MAP)
+		tgid = saved_tgids[map];
+	else
+		tgid = -1;
+
+	arch_spin_unlock(&trace_cmdline_lock);
+	preempt_enable();
+
+	return tgid;
+}
+
 void tracing_record_cmdline(struct task_struct *tsk)
 {
 	if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
@@ -2435,6 +2457,13 @@
 	seq_puts(m, "#              | |       |          |         |\n");
 }
 
+static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+	print_event_info(buf, m);
+	seq_puts(m, "#           TASK-PID    TGID   CPU#      TIMESTAMP  FUNCTION\n");
+	seq_puts(m, "#              | |        |      |          |         |\n");
+}
+
 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
 {
 	print_event_info(buf, m);
@@ -2447,6 +2476,18 @@
 	seq_puts(m, "#              | |       |   ||||       |         |\n");
 }
 
+static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+	print_event_info(buf, m);
+	seq_puts(m, "#                                      _-----=> irqs-off\n");
+	seq_puts(m, "#                                     / _----=> need-resched\n");
+	seq_puts(m, "#                                    | / _---=> hardirq/softirq\n");
+	seq_puts(m, "#                                    || / _--=> preempt-depth\n");
+	seq_puts(m, "#                                    ||| /     delay\n");
+	seq_puts(m, "#           TASK-PID    TGID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
+	seq_puts(m, "#              | |        |      |   ||||       |         |\n");
+}
+
 void
 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 {
@@ -2747,9 +2788,15 @@
 	} else {
 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
 			if (trace_flags & TRACE_ITER_IRQ_INFO)
-				print_func_help_header_irq(iter->trace_buffer, m);
+				if (trace_flags & TRACE_ITER_TGID)
+					print_func_help_header_irq_tgid(iter->trace_buffer, m);
+				else
+					print_func_help_header_irq(iter->trace_buffer, m);
 			else
-				print_func_help_header(iter->trace_buffer, m);
+				if (trace_flags & TRACE_ITER_TGID)
+					print_func_help_header_tgid(iter->trace_buffer, m);
+				else
+					print_func_help_header(iter->trace_buffer, m);
 		}
 	}
 }
@@ -3601,9 +3648,53 @@
 }
 
 static const struct file_operations tracing_saved_cmdlines_fops = {
-    .open       = tracing_open_generic,
-    .read       = tracing_saved_cmdlines_read,
-    .llseek	= generic_file_llseek,
+	.open	= tracing_open_generic,
+	.read	= tracing_saved_cmdlines_read,
+	.llseek	= generic_file_llseek,
+};
+
+static ssize_t
+tracing_saved_tgids_read(struct file *file, char __user *ubuf,
+				size_t cnt, loff_t *ppos)
+{
+	char *file_buf;
+	char *buf;
+	int len = 0;
+	int pid;
+	int i;
+
+	file_buf = kmalloc(SAVED_CMDLINES*(16+1+16), GFP_KERNEL);
+	if (!file_buf)
+		return -ENOMEM;
+
+	buf = file_buf;
+
+	for (i = 0; i < SAVED_CMDLINES; i++) {
+		int tgid;
+		int r;
+
+		pid = map_cmdline_to_pid[i];
+		if (pid == -1 || pid == NO_CMDLINE_MAP)
+			continue;
+
+		tgid = trace_find_tgid(pid);
+		r = sprintf(buf, "%d %d\n", pid, tgid);
+		buf += r;
+		len += r;
+	}
+
+	len = simple_read_from_buffer(ubuf, cnt, ppos,
+				      file_buf, len);
+
+	kfree(file_buf);
+
+	return len;
+}
+
+static const struct file_operations tracing_saved_tgids_fops = {
+	.open	= tracing_open_generic,
+	.read	= tracing_saved_tgids_read,
+	.llseek	= generic_file_llseek,
 };
 
 static ssize_t
@@ -6162,6 +6253,9 @@
 	trace_create_file("trace_marker", 0220, d_tracer,
 			  tr, &tracing_mark_fops);
 
+	trace_create_file("saved_tgids", 0444, d_tracer,
+			  tr, &tracing_saved_tgids_fops);
+
 	trace_create_file("trace_clock", 0644, d_tracer, tr,
 			  &trace_clock_fops);
 
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fe57607..b8a1ac7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -654,6 +654,7 @@
 extern cycle_t ftrace_now(int cpu);
 
 extern void trace_find_cmdline(int pid, char comm[]);
+extern int trace_find_tgid(int pid);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern unsigned long ftrace_update_tot_cnt;
@@ -867,6 +868,7 @@
 	TRACE_ITER_IRQ_INFO		= 0x800000,
 	TRACE_ITER_MARKERS		= 0x1000000,
 	TRACE_ITER_FUNCTION		= 0x2000000,
+	TRACE_ITER_TGID 		= 0x4000000,
 };
 
 /*
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 8388bc9..28dd40c 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -46,6 +46,8 @@
 #define TRACE_GRAPH_PRINT_DURATION	0x10
 #define TRACE_GRAPH_PRINT_ABS_TIME	0x20
 #define TRACE_GRAPH_PRINT_IRQS		0x40
+#define TRACE_GRAPH_PRINT_FLAT		0x80
+
 
 static unsigned int max_depth;
 
@@ -64,6 +66,8 @@
 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
 	/* Display interrupts */
 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
+	/* Use standard trace formatting rather than hierarchical */
+	{ TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
 	{ } /* Empty entry */
 };
 
@@ -1234,6 +1238,9 @@
 	int cpu = iter->cpu;
 	int ret;
 
+	if (flags & TRACE_GRAPH_PRINT_FLAT)
+		return TRACE_TYPE_UNHANDLED;
+
 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
 		return TRACE_TYPE_HANDLED;
@@ -1291,13 +1298,6 @@
 	return print_graph_function_flags(iter, tracer_flags.val);
 }
 
-static enum print_line_t
-print_graph_function_event(struct trace_iterator *iter, int flags,
-			   struct trace_event *event)
-{
-	return print_graph_function(iter);
-}
-
 static void print_lat_header(struct seq_file *s, u32 flags)
 {
 	static const char spaces[] = "                "	/* 16 spaces */
@@ -1364,6 +1364,11 @@
 {
 	struct trace_iterator *iter = s->private;
 
+	if (flags & TRACE_GRAPH_PRINT_FLAT) {
+		trace_default_header(s);
+		return;
+	}
+
 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
 		return;
 
@@ -1434,20 +1439,6 @@
 	return 0;
 }
 
-static struct trace_event_functions graph_functions = {
-	.trace		= print_graph_function_event,
-};
-
-static struct trace_event graph_trace_entry_event = {
-	.type		= TRACE_GRAPH_ENT,
-	.funcs		= &graph_functions,
-};
-
-static struct trace_event graph_trace_ret_event = {
-	.type		= TRACE_GRAPH_RET,
-	.funcs		= &graph_functions
-};
-
 static struct tracer graph_trace __read_mostly = {
 	.name		= "function_graph",
 	.open		= graph_trace_open,
@@ -1523,16 +1514,6 @@
 {
 	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
 
-	if (!register_ftrace_event(&graph_trace_entry_event)) {
-		pr_warning("Warning: could not register graph trace events\n");
-		return 1;
-	}
-
-	if (!register_ftrace_event(&graph_trace_ret_event)) {
-		pr_warning("Warning: could not register graph trace events\n");
-		return 1;
-	}
-
 	return register_tracer(&graph_trace);
 }
 
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index bb922d9..a68e5e3 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -702,11 +702,25 @@
 	unsigned long secs, usec_rem;
 	char comm[TASK_COMM_LEN];
 	int ret;
+	int tgid;
 
 	trace_find_cmdline(entry->pid, comm);
 
-	ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
-			       comm, entry->pid, iter->cpu);
+	ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+	if (!ret)
+		return 0;
+
+	if (trace_flags & TRACE_ITER_TGID) {
+		tgid = trace_find_tgid(entry->pid);
+		if (tgid < 0)
+			ret = trace_seq_puts(s, "(-----) ");
+		else
+			ret = trace_seq_printf(s, "(%5d) ", tgid);
+		if (!ret)
+			return 0;
+	}
+
+	ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
 	if (!ret)
 		return 0;
 
@@ -1035,6 +1049,168 @@
 	.funcs		= &trace_fn_funcs,
 };
 
+/* TRACE_GRAPH_ENT */
+static enum print_line_t trace_graph_ent_trace(struct trace_iterator *iter, int flags,
+					struct trace_event *event)
+{
+	struct trace_seq *s = &iter->seq;
+	struct ftrace_graph_ent_entry *field;
+
+	trace_assign_type(field, iter->ent);
+
+	if (!trace_seq_puts(s, "graph_ent: func="))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	if (!seq_print_ip_sym(s, field->graph_ent.func, flags))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	if (!trace_seq_puts(s, "\n"))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_raw(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ent_entry *field;
+
+	trace_assign_type(field, iter->ent);
+
+	if (!trace_seq_printf(&iter->seq, "%lx %d\n",
+			      field->graph_ent.func,
+			      field->graph_ent.depth))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_hex(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ent_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_HEX_FIELD_RET(s, field->graph_ent.func);
+	SEQ_PUT_HEX_FIELD_RET(s, field->graph_ent.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_bin(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ent_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_FIELD_RET(s, field->graph_ent.func);
+	SEQ_PUT_FIELD_RET(s, field->graph_ent.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ent_funcs = {
+	.trace		= trace_graph_ent_trace,
+	.raw		= trace_graph_ent_raw,
+	.hex		= trace_graph_ent_hex,
+	.binary		= trace_graph_ent_bin,
+};
+
+static struct trace_event trace_graph_ent_event = {
+	.type		= TRACE_GRAPH_ENT,
+	.funcs		= &trace_graph_ent_funcs,
+};
+
+/* TRACE_GRAPH_RET */
+static enum print_line_t trace_graph_ret_trace(struct trace_iterator *iter, int flags,
+					struct trace_event *event)
+{
+	struct trace_seq *s = &iter->seq;
+	struct trace_entry *entry = iter->ent;
+	struct ftrace_graph_ret_entry *field;
+
+	trace_assign_type(field, entry);
+
+	if (!trace_seq_puts(s, "graph_ret: func="))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	if (!seq_print_ip_sym(s, field->ret.func, flags))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	if (!trace_seq_puts(s, "\n"))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_raw(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ret_entry *field;
+
+	trace_assign_type(field, iter->ent);
+
+	if (!trace_seq_printf(&iter->seq, "%lx %lld %lld %ld %d\n",
+			      field->ret.func,
+			      field->ret.calltime,
+			      field->ret.rettime,
+			      field->ret.overrun,
+			      field->ret.depth));
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_hex(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ret_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_HEX_FIELD_RET(s, field->ret.func);
+	SEQ_PUT_HEX_FIELD_RET(s, field->ret.calltime);
+	SEQ_PUT_HEX_FIELD_RET(s, field->ret.rettime);
+	SEQ_PUT_HEX_FIELD_RET(s, field->ret.overrun);
+	SEQ_PUT_HEX_FIELD_RET(s, field->ret.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_bin(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ret_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_FIELD_RET(s, field->ret.func);
+	SEQ_PUT_FIELD_RET(s, field->ret.calltime);
+	SEQ_PUT_FIELD_RET(s, field->ret.rettime);
+	SEQ_PUT_FIELD_RET(s, field->ret.overrun);
+	SEQ_PUT_FIELD_RET(s, field->ret.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ret_funcs = {
+	.trace		= trace_graph_ret_trace,
+	.raw		= trace_graph_ret_raw,
+	.hex		= trace_graph_ret_hex,
+	.binary		= trace_graph_ret_bin,
+};
+
+static struct trace_event trace_graph_ret_event = {
+	.type		= TRACE_GRAPH_RET,
+	.funcs		= &trace_graph_ret_funcs,
+};
+
 /* TRACE_CTX an TRACE_WAKE */
 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
 					     char *delim)
@@ -1425,6 +1601,8 @@
 
 static struct trace_event *events[] __initdata = {
 	&trace_fn_event,
+	&trace_graph_ent_event,
+	&trace_graph_ret_event,
 	&trace_ctx_event,
 	&trace_wake_event,
 	&trace_stack_event,
diff --git a/kernel/uptime_limit.c b/kernel/uptime_limit.c
new file mode 100644
index 0000000..b6a1a5e
--- /dev/null
+++ b/kernel/uptime_limit.c
@@ -0,0 +1,166 @@
+/*
+ * uptime_limit.c
+ *
+ * This file contains the functions which can limit kernel uptime
+ *
+ * Copyright (C) 2011 Bruce Ashfield (bruce.ashfield@windriver.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * This functionality is somewhat close to the softdog watchdog
+ * implementation, but it cannot be used directly for several reasons:
+ *
+ *   - The soft watchdog should be available while this functionality is active
+ *   - The duration range is different between this and the softdog. The
+ *     timeout available here is potentially quite a bit longer.
+ *   - At expiration, there are different expiration requirements and actions.
+ *   - This functionality is specific to a particular use case and should
+ *     not impact mainline functionality
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#define UPTIME_LIMIT_IN_SECONDS (CONFIG_UPTIME_LIMIT_DURATION * 60)
+#define MIN(X, Y) ((X) <= (Y) ? (X) : (Y))
+#define TEN_MINUTES_IN_SECONDS 600
+
+enum uptime_expiration_type {
+	uptime_no_action,
+	uptime_reboot
+};
+
+static enum uptime_expiration_type uptime_expiration_action = uptime_no_action;
+static struct timer_list timelimit_timer;
+static struct task_struct *uptime_worker_task;
+
+static void timelimit_expire(unsigned long timeout_seconds)
+{
+	char msg[128];
+	int msglen = 127;
+
+	if (timeout_seconds) {
+		if (timeout_seconds >= 60)
+			snprintf(msg, msglen,
+				 "Uptime: kernel validity duration has %d %s remaining\n",
+				 (int) timeout_seconds / 60, "minute(s)");
+		else
+			snprintf(msg, msglen,
+				 "Uptime: kernel validity duration has %d %s remaining\n",
+				 (int) timeout_seconds, "seconds");
+
+		printk(KERN_CRIT "%s", msg);
+
+		timelimit_timer.expires = jiffies + timeout_seconds * HZ;
+		timelimit_timer.data = 0;
+		add_timer_on(&timelimit_timer, cpumask_first(cpu_online_mask));
+	} else {
+		printk(KERN_CRIT "Uptime: Kernel validity timeout has expired\n");
+#ifdef CONFIG_UPTIME_LIMIT_KERNEL_REBOOT
+		uptime_expiration_action = uptime_reboot;
+		wake_up_process(uptime_worker_task);
+	}
+#endif
+}
+
+/*
+ * This thread starts and then immediately goes to sleep. When it is woken
+ * up, it carries out the instructions left in uptime_expiration_action. If
+ * no action was specified it simply goes back to sleep.
+ */
+static int uptime_worker(void *unused)
+{
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	while (!kthread_should_stop()) {
+		schedule();
+
+		if (kthread_should_stop())
+			break;
+
+		if (uptime_expiration_action == uptime_reboot) {
+			printk(KERN_CRIT "Uptime: restarting machine\n");
+			kernel_restart(NULL);
+		}
+
+		set_current_state(TASK_INTERRUPTIBLE);
+	}
+	__set_current_state(TASK_RUNNING);
+
+	return 0;
+}
+
+static int timeout_enable(int cpu)
+{
+	int err = 0;
+	int warning_limit;
+
+	/*
+	 * Create an uptime worker thread. This thread is required since the
+	 * safe version of kernel restart cannot be called from a
+	 * non-interruptible context. Which means we cannot call it directly
+	 * from a timer callback.  So we arrange for the timer expiration to
+	 * wakeup a thread, which performs the action.
+	 */
+	uptime_worker_task = kthread_create(uptime_worker,
+					    (void *)(unsigned long)cpu,
+					    "uptime_worker/%d", cpu);
+	if (IS_ERR(uptime_worker_task)) {
+		printk(KERN_ERR "Uptime: task for cpu %i failed\n", cpu);
+		err = PTR_ERR(uptime_worker_task);
+		goto out;
+	}
+	/* bind to cpu0 to avoid migration and hot plug nastiness */
+	kthread_bind(uptime_worker_task, cpu);
+	wake_up_process(uptime_worker_task);
+
+	/* Create the timer that will wake the uptime thread at expiration */
+	init_timer(&timelimit_timer);
+	timelimit_timer.function = timelimit_expire;
+	/*
+	 * Fire two timers. One warning timeout and the final timer
+	 * which will carry out the expiration action. The warning timer will
+	 * expire at the minimum of half the original time or ten minutes.
+	 */
+	warning_limit = MIN(UPTIME_LIMIT_IN_SECONDS/2, TEN_MINUTES_IN_SECONDS);
+	timelimit_timer.expires = jiffies + warning_limit * HZ;
+	timelimit_timer.data = UPTIME_LIMIT_IN_SECONDS - warning_limit;
+
+	add_timer_on(&timelimit_timer, cpumask_first(cpu_online_mask));
+out:
+	return err;
+}
+
+static int __init timelimit_init(void)
+{
+	int err = 0;
+
+	printk(KERN_INFO "Uptime: system uptime restrictions enabled\n");
+
+	/*
+	 * Enable the timeout thread for cpu 0 only, assuming that the
+	 * uptime limit is non-zero, to protect against any cpu
+	 * migration issues.
+	 */
+	if (UPTIME_LIMIT_IN_SECONDS)
+		err = timeout_enable(0);
+
+	return err;
+}
+device_initcall(timelimit_init);
diff --git a/kernel/wait.c b/kernel/wait.c
index 6698e0c..bca170e 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -287,3 +287,12 @@
 	return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
 }
 EXPORT_SYMBOL(bit_waitqueue);
+
+__sched int bit_wait_io(void *word)
+{
+	if (signal_pending_state(current->state, current))
+		return 1;
+	io_schedule();
+	return 0;
+}
+EXPORT_SYMBOL(bit_wait_io);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 05039e3..e092e5a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -45,6 +45,11 @@
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static cpumask_t __read_mostly watchdog_cpus;
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
@@ -178,7 +183,7 @@
 	__raw_get_cpu_var(watchdog_touch_ts) = 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /* watchdog detector functions */
 static int is_hardlockup(void)
 {
@@ -192,6 +197,76 @@
 }
 #endif
 
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static unsigned int watchdog_next_cpu(unsigned int cpu)
+{
+	cpumask_t cpus = watchdog_cpus;
+	unsigned int next_cpu;
+
+	next_cpu = cpumask_next(cpu, &cpus);
+	if (next_cpu >= nr_cpu_ids)
+		next_cpu = cpumask_first(&cpus);
+
+	if (next_cpu == cpu)
+		return nr_cpu_ids;
+
+	return next_cpu;
+}
+
+static int is_hardlockup_other_cpu(unsigned int cpu)
+{
+	unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
+
+	if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+		return 1;
+
+	per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+	return 0;
+}
+
+static void watchdog_check_hardlockup_other_cpu(void)
+{
+	unsigned int next_cpu;
+
+	/*
+	 * Test for hardlockups every 3 samples.  The sample period is
+	 *  watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
+	 *  watchdog_thresh (over by 20%).
+	 */
+	if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
+		return;
+
+	/* check for a hardlockup on the next cpu */
+	next_cpu = watchdog_next_cpu(smp_processor_id());
+	if (next_cpu >= nr_cpu_ids)
+		return;
+
+	smp_rmb();
+
+	if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
+		per_cpu(watchdog_nmi_touch, next_cpu) = false;
+		return;
+	}
+
+	if (is_hardlockup_other_cpu(next_cpu)) {
+		/* only warn once */
+		if (per_cpu(hard_watchdog_warn, next_cpu) == true)
+			return;
+
+		if (hardlockup_panic)
+			panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+		else
+			WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+
+		per_cpu(hard_watchdog_warn, next_cpu) = true;
+	} else {
+		per_cpu(hard_watchdog_warn, next_cpu) = false;
+	}
+}
+#else
+static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
+#endif
+
 static int is_softlockup(unsigned long touch_ts)
 {
 	unsigned long now = get_timestamp();
@@ -203,7 +278,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 
 static struct perf_event_attr wd_hw_attr = {
 	.type		= PERF_TYPE_HARDWARE,
@@ -251,7 +326,7 @@
 	__this_cpu_write(hard_watchdog_warn, false);
 	return;
 }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static void watchdog_interrupt_count(void)
 {
@@ -271,6 +346,9 @@
 	/* kick the hardlockup detector */
 	watchdog_interrupt_count();
 
+	/* test for hardlockups on the next cpu */
+	watchdog_check_hardlockup_other_cpu();
+
 	/* kick the softlockup detector */
 	wake_up_process(__this_cpu_read(softlockup_watchdog));
 
@@ -395,7 +473,7 @@
 	__touch_watchdog();
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /*
  * People like the simple clean cpu node info on boot.
  * Reduce the watchdog noise by only printing messages
@@ -471,9 +549,44 @@
 	return;
 }
 #else
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static int watchdog_nmi_enable(unsigned int cpu)
+{
+	/*
+	 * The new cpu will be marked online before the first hrtimer interrupt
+	 * runs on it.  If another cpu tests for a hardlockup on the new cpu
+	 * before it has run its first hrtimer, it will get a false positive.
+	 * Touch the watchdog on the new cpu to delay the first check for at
+	 * least 3 sampling periods to guarantee one hrtimer has run on the new
+	 * cpu.
+	 */
+	per_cpu(watchdog_nmi_touch, cpu) = true;
+	smp_wmb();
+	cpumask_set_cpu(cpu, &watchdog_cpus);
+	return 0;
+}
+
+static void watchdog_nmi_disable(unsigned int cpu)
+{
+	unsigned int next_cpu = watchdog_next_cpu(cpu);
+
+	/*
+	 * Offlining this cpu will cause the cpu before this one to start
+	 * checking the one after this one.  If this cpu just finished checking
+	 * the next cpu and updating hrtimer_interrupts_saved, and then the
+	 * previous cpu checks it within one sample period, it will trigger a
+	 * false positive.  Touch the watchdog on the next cpu to prevent it.
+	 */
+	if (next_cpu < nr_cpu_ids)
+		per_cpu(watchdog_nmi_touch, next_cpu) = true;
+	smp_wmb();
+	cpumask_clear_cpu(cpu, &watchdog_cpus);
+}
+#else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 /* prepare/enable/disable routines */
 /* sysctl functions */
diff --git a/lib/Kconfig b/lib/Kconfig
index fe01d41..06d94d8 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -189,6 +189,15 @@
 config LZO_DECOMPRESS
 	tristate
 
+config LZ4_COMPRESS
+	tristate
+
+config LZ4HC_COMPRESS
+	tristate
+
+config LZ4_DECOMPRESS
+	tristate
+
 source "lib/xz/Kconfig"
 
 #
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 74fdc5c..d317c1a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -191,15 +191,27 @@
 	  The overhead should be minimal.  A periodic hrtimer runs to
 	  generate interrupts and kick the watchdog task every 4 seconds.
 	  An NMI is generated every 10 seconds or so to check for hardlockups.
+	  If NMIs are not available on the platform, every 12 seconds the
+	  hrtimer interrupt on one cpu will be used to check for hardlockups
+	  on the next cpu.
 
 	  The frequency of hrtimer and NMI events and the soft and hard lockup
 	  thresholds can be controlled through the sysctl watchdog_thresh.
 
-config HARDLOCKUP_DETECTOR
+config HARDLOCKUP_DETECTOR_NMI
 	def_bool y
 	depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
 	depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
 
+config HARDLOCKUP_DETECTOR_OTHER_CPU
+	def_bool y
+	depends on LOCKUP_DETECTOR && SMP
+	depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
+
+config HARDLOCKUP_DETECTOR
+	def_bool y
+	depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
+
 config BOOTPARAM_HARDLOCKUP_PANIC
 	bool "Panic (Reboot) On Hard Lockups"
 	depends on HARDLOCKUP_DETECTOR
@@ -669,8 +681,9 @@
 	  mutexes and rwsems.
 
 config STACKTRACE
-	bool
+	bool "Stacktrace"
 	depends on STACKTRACE_SUPPORT
+	default y
 
 config DEBUG_STACK_USAGE
 	bool "Stack utilization instrumentation"
diff --git a/lib/Makefile b/lib/Makefile
index 9efe480..b6d7848 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -76,6 +76,9 @@
 obj-$(CONFIG_BCH) += bch.o
 obj-$(CONFIG_LZO_COMPRESS) += lzo/
 obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
+obj-$(CONFIG_LZ4_COMPRESS) += lz4/
+obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/
+obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
 obj-$(CONFIG_XZ_DEC) += xz/
 obj-$(CONFIG_RAID6_PQ) += raid6/
 
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile
new file mode 100644
index 0000000..8085d04
--- /dev/null
+++ b/lib/lz4/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o
+obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o
+obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
new file mode 100644
index 0000000..fd94058
--- /dev/null
+++ b/lib/lz4/lz4_compress.c
@@ -0,0 +1,443 @@
+/*
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2012, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at :
+ * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
+ * - LZ4 source repository : http://code.google.com/p/lz4/
+ *
+ *  Changed for kernel use by:
+ *  Chanho Min <chanho.min@lge.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <asm/unaligned.h>
+#include "lz4defs.h"
+
+/*
+ * LZ4_compressCtx :
+ * -----------------
+ * Compress 'isize' bytes from 'source' into an output buffer 'dest' of
+ * maximum size 'maxOutputSize'.  * If it cannot achieve it, compression
+ * will stop, and result of the function will be zero.
+ * return : the number of bytes written in buffer 'dest', or 0 if the
+ * compression fails
+ */
+static inline int lz4_compressctx(void *ctx,
+		const char *source,
+		char *dest,
+		int isize,
+		int maxoutputsize)
+{
+	HTYPE *hashtable = (HTYPE *)ctx;
+	const u8 *ip = (u8 *)source;
+#if LZ4_ARCH64
+	const BYTE * const base = ip;
+#else
+	const int base = 0;
+#endif
+	const u8 *anchor = ip;
+	const u8 *const iend = ip + isize;
+	const u8 *const mflimit = iend - MFLIMIT;
+	#define MATCHLIMIT (iend - LASTLITERALS)
+
+	u8 *op = (u8 *) dest;
+	u8 *const oend = op + maxoutputsize;
+	int length;
+	const int skipstrength = SKIPSTRENGTH;
+	u32 forwardh;
+	int lastrun;
+
+	/* Init */
+	if (isize < MINLENGTH)
+		goto _last_literals;
+
+	memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
+
+	/* First Byte */
+	hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
+	ip++;
+	forwardh = LZ4_HASH_VALUE(ip);
+
+	/* Main Loop */
+	for (;;) {
+		int findmatchattempts = (1U << skipstrength) + 3;
+		const u8 *forwardip = ip;
+		const u8 *ref;
+		u8 *token;
+
+		/* Find a match */
+		do {
+			u32 h = forwardh;
+			int step = findmatchattempts++ >> skipstrength;
+			ip = forwardip;
+			forwardip = ip + step;
+
+			if (unlikely(forwardip > mflimit))
+				goto _last_literals;
+
+			forwardh = LZ4_HASH_VALUE(forwardip);
+			ref = base + hashtable[h];
+			hashtable[h] = ip - base;
+		} while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
+
+		/* Catch up */
+		while ((ip > anchor) && (ref > (u8 *)source) &&
+			unlikely(ip[-1] == ref[-1])) {
+			ip--;
+			ref--;
+		}
+
+		/* Encode Literal length */
+		length = (int)(ip - anchor);
+		token = op++;
+		/* check output limit */
+		if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
+			(length >> 8) > oend))
+			return 0;
+
+		if (length >= (int)RUN_MASK) {
+			int len;
+			*token = (RUN_MASK << ML_BITS);
+			len = length - RUN_MASK;
+			for (; len > 254 ; len -= 255)
+				*op++ = 255;
+			*op++ = (u8)len;
+		} else
+			*token = (length << ML_BITS);
+
+		/* Copy Literals */
+		LZ4_BLINDCOPY(anchor, op, length);
+_next_match:
+		/* Encode Offset */
+		LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
+
+		/* Start Counting */
+		ip += MINMATCH;
+		/* MinMatch verified */
+		ref += MINMATCH;
+		anchor = ip;
+		while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) {
+			#if LZ4_ARCH64
+			u64 diff = A64(ref) ^ A64(ip);
+			#else
+			u32 diff = A32(ref) ^ A32(ip);
+			#endif
+			if (!diff) {
+				ip += STEPSIZE;
+				ref += STEPSIZE;
+				continue;
+			}
+			ip += LZ4_NBCOMMONBYTES(diff);
+			goto _endcount;
+		}
+		#if LZ4_ARCH64
+		if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
+			ip += 4;
+			ref += 4;
+		}
+		#endif
+		if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
+			ip += 2;
+			ref += 2;
+		}
+		if ((ip < MATCHLIMIT) && (*ref == *ip))
+			ip++;
+_endcount:
+		/* Encode MatchLength */
+		length = (int)(ip - anchor);
+		/* Check output limit */
+		if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend))
+			return 0;
+		if (length >= (int)ML_MASK) {
+			*token += ML_MASK;
+			length -= ML_MASK;
+			for (; length > 509 ; length -= 510) {
+				*op++ = 255;
+				*op++ = 255;
+			}
+			if (length > 254) {
+				length -= 255;
+				*op++ = 255;
+			}
+			*op++ = (u8)length;
+		} else
+			*token += length;
+
+		/* Test end of chunk */
+		if (ip > mflimit) {
+			anchor = ip;
+			break;
+		}
+
+		/* Fill table */
+		hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;
+
+		/* Test next position */
+		ref = base + hashtable[LZ4_HASH_VALUE(ip)];
+		hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
+		if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
+			token = op++;
+			*token = 0;
+			goto _next_match;
+		}
+
+		/* Prepare next loop */
+		anchor = ip++;
+		forwardh = LZ4_HASH_VALUE(ip);
+	}
+
+_last_literals:
+	/* Encode Last Literals */
+	lastrun = (int)(iend - anchor);
+	if (((char *)op - dest) + lastrun + 1
+		+ ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize)
+		return 0;
+
+	if (lastrun >= (int)RUN_MASK) {
+		*op++ = (RUN_MASK << ML_BITS);
+		lastrun -= RUN_MASK;
+		for (; lastrun > 254 ; lastrun -= 255)
+			*op++ = 255;
+		*op++ = (u8)lastrun;
+	} else
+		*op++ = (lastrun << ML_BITS);
+	memcpy(op, anchor, iend - anchor);
+	op += iend - anchor;
+
+	/* End */
+	return (int)(((char *)op) - dest);
+}
+
+static inline int lz4_compress64kctx(void *ctx,
+		const char *source,
+		char *dest,
+		int isize,
+		int maxoutputsize)
+{
+	u16 *hashtable = (u16 *)ctx;
+	const u8 *ip = (u8 *) source;
+	const u8 *anchor = ip;
+	const u8 *const base = ip;
+	const u8 *const iend = ip + isize;
+	const u8 *const mflimit = iend - MFLIMIT;
+	#define MATCHLIMIT (iend - LASTLITERALS)
+
+	u8 *op = (u8 *) dest;
+	u8 *const oend = op + maxoutputsize;
+	int len, length;
+	const int skipstrength = SKIPSTRENGTH;
+	u32 forwardh;
+	int lastrun;
+
+	/* Init */
+	if (isize < MINLENGTH)
+		goto _last_literals;
+
+	memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
+
+	/* First Byte */
+	ip++;
+	forwardh = LZ4_HASH64K_VALUE(ip);
+
+	/* Main Loop */
+	for (;;) {
+		int findmatchattempts = (1U << skipstrength) + 3;
+		const u8 *forwardip = ip;
+		const u8 *ref;
+		u8 *token;
+
+		/* Find a match */
+		do {
+			u32 h = forwardh;
+			int step = findmatchattempts++ >> skipstrength;
+			ip = forwardip;
+			forwardip = ip + step;
+
+			if (forwardip > mflimit)
+				goto _last_literals;
+
+			forwardh = LZ4_HASH64K_VALUE(forwardip);
+			ref = base + hashtable[h];
+			hashtable[h] = (u16)(ip - base);
+		} while (A32(ref) != A32(ip));
+
+		/* Catch up */
+		while ((ip > anchor) && (ref > (u8 *)source)
+			&& (ip[-1] == ref[-1])) {
+			ip--;
+			ref--;
+		}
+
+		/* Encode Literal length */
+		length = (int)(ip - anchor);
+		token = op++;
+		/* Check output limit */
+		if (unlikely(op + length + (2 + 1 + LASTLITERALS)
+			+ (length >> 8) > oend))
+			return 0;
+		if (length >= (int)RUN_MASK) {
+			*token = (RUN_MASK << ML_BITS);
+			len = length - RUN_MASK;
+			for (; len > 254 ; len -= 255)
+				*op++ = 255;
+			*op++ = (u8)len;
+		} else
+			*token = (length << ML_BITS);
+
+		/* Copy Literals */
+		LZ4_BLINDCOPY(anchor, op, length);
+
+_next_match:
+		/* Encode Offset */
+		LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
+
+		/* Start Counting */
+		ip += MINMATCH;
+		/* MinMatch verified */
+		ref += MINMATCH;
+		anchor = ip;
+
+		while (ip < MATCHLIMIT - (STEPSIZE - 1)) {
+			#if LZ4_ARCH64
+			u64 diff = A64(ref) ^ A64(ip);
+			#else
+			u32 diff = A32(ref) ^ A32(ip);
+			#endif
+
+			if (!diff) {
+				ip += STEPSIZE;
+				ref += STEPSIZE;
+				continue;
+			}
+			ip += LZ4_NBCOMMONBYTES(diff);
+			goto _endcount;
+		}
+		#if LZ4_ARCH64
+		if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
+			ip += 4;
+			ref += 4;
+		}
+		#endif
+		if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
+			ip += 2;
+			ref += 2;
+		}
+		if ((ip < MATCHLIMIT) && (*ref == *ip))
+			ip++;
+_endcount:
+
+		/* Encode MatchLength */
+		len = (int)(ip - anchor);
+		/* Check output limit */
+		if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
+			return 0;
+		if (len >= (int)ML_MASK) {
+			*token += ML_MASK;
+			len -= ML_MASK;
+			for (; len > 509 ; len -= 510) {
+				*op++ = 255;
+				*op++ = 255;
+			}
+			if (len > 254) {
+				len -= 255;
+				*op++ = 255;
+			}
+			*op++ = (u8)len;
+		} else
+			*token += len;
+
+		/* Test end of chunk */
+		if (ip > mflimit) {
+			anchor = ip;
+			break;
+		}
+
+		/* Fill table */
+		hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base);
+
+		/* Test next position */
+		ref = base + hashtable[LZ4_HASH64K_VALUE(ip)];
+		hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base);
+		if (A32(ref) == A32(ip)) {
+			token = op++;
+			*token = 0;
+			goto _next_match;
+		}
+
+		/* Prepare next loop */
+		anchor = ip++;
+		forwardh = LZ4_HASH64K_VALUE(ip);
+	}
+
+_last_literals:
+	/* Encode Last Literals */
+	lastrun = (int)(iend - anchor);
+	if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend)
+		return 0;
+	if (lastrun >= (int)RUN_MASK) {
+		*op++ = (RUN_MASK << ML_BITS);
+		lastrun -= RUN_MASK;
+		for (; lastrun > 254 ; lastrun -= 255)
+			*op++ = 255;
+		*op++ = (u8)lastrun;
+	} else
+		*op++ = (lastrun << ML_BITS);
+	memcpy(op, anchor, iend - anchor);
+	op += iend - anchor;
+	/* End */
+	return (int)(((char *)op) - dest);
+}
+
+int lz4_compress(const unsigned char *src, size_t src_len,
+			unsigned char *dst, size_t *dst_len, void *wrkmem)
+{
+	int ret = -1;
+	int out_len = 0;
+
+	if (src_len < LZ4_64KLIMIT)
+		out_len = lz4_compress64kctx(wrkmem, src, dst, src_len,
+				lz4_compressbound(src_len));
+	else
+		out_len = lz4_compressctx(wrkmem, src, dst, src_len,
+				lz4_compressbound(src_len));
+
+	if (out_len < 0)
+		goto exit;
+
+	*dst_len = out_len;
+
+	return 0;
+exit:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(lz4_compress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
new file mode 100644
index 0000000..677d1ea
--- /dev/null
+++ b/lib/lz4/lz4_decompress.c
@@ -0,0 +1,326 @@
+/*
+ * LZ4 Decompressor for Linux kernel
+ *
+ * Copyright (C) 2013 LG Electronics Co., Ltd. (http://www.lge.com/)
+ *
+ * Based on LZ4 implementation by Yann Collet.
+ *
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2012, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You can contact the author at :
+ *  - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
+ *  - LZ4 source repository : http://code.google.com/p/lz4/
+ */
+
+#ifndef STATIC
+#include <linux/module.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/lz4.h>
+
+#include <asm/unaligned.h>
+
+#include "lz4defs.h"
+
+static int lz4_uncompress(const char *source, char *dest, int osize)
+{
+	const BYTE *ip = (const BYTE *) source;
+	const BYTE *ref;
+	BYTE *op = (BYTE *) dest;
+	BYTE * const oend = op + osize;
+	BYTE *cpy;
+	unsigned token;
+	size_t length;
+	size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
+#if LZ4_ARCH64
+	size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
+#endif
+
+	while (1) {
+
+		/* get runlength */
+		token = *ip++;
+		length = (token >> ML_BITS);
+		if (length == RUN_MASK) {
+			size_t len;
+
+			len = *ip++;
+			for (; len == 255; length += 255)
+				len = *ip++;
+			length += len;
+		}
+
+		/* copy literals */
+		cpy = op + length;
+		if (unlikely(cpy > oend - COPYLENGTH)) {
+			/*
+			 * Error: not enough place for another match
+			 * (min 4) + 5 literals
+			 */
+			if (cpy != oend)
+				goto _output_error;
+
+			memcpy(op, ip, length);
+			ip += length;
+			break; /* EOF */
+		}
+		LZ4_WILDCOPY(ip, op, cpy);
+		ip -= (op - cpy);
+		op = cpy;
+
+		/* get offset */
+		LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
+		ip += 2;
+
+		/* Error: offset create reference outside destination buffer */
+		if (unlikely(ref < (BYTE *const) dest))
+			goto _output_error;
+
+		/* get matchlength */
+		length = token & ML_MASK;
+		if (length == ML_MASK) {
+			for (; *ip == 255; length += 255)
+				ip++;
+			length += *ip++;
+		}
+
+		/* copy repeated sequence */
+		if (unlikely((op - ref) < STEPSIZE)) {
+#if LZ4_ARCH64
+			size_t dec64 = dec64table[op - ref];
+#else
+			const int dec64 = 0;
+#endif
+			op[0] = ref[0];
+			op[1] = ref[1];
+			op[2] = ref[2];
+			op[3] = ref[3];
+			op += 4;
+			ref += 4;
+			ref -= dec32table[op-ref];
+			PUT4(ref, op);
+			op += STEPSIZE - 4;
+			ref -= dec64;
+		} else {
+			LZ4_COPYSTEP(ref, op);
+		}
+		cpy = op + length - (STEPSIZE - 4);
+		if (cpy > (oend - COPYLENGTH)) {
+
+			/* Error: request to write beyond destination buffer */
+			if (cpy > oend)
+				goto _output_error;
+			LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
+			while (op < cpy)
+				*op++ = *ref++;
+			op = cpy;
+			/*
+			 * Check EOF (should never happen, since last 5 bytes
+			 * are supposed to be literals)
+			 */
+			if (op == oend)
+				goto _output_error;
+			continue;
+		}
+		LZ4_SECURECOPY(ref, op, cpy);
+		op = cpy; /* correction */
+	}
+	/* end of decoding */
+	return (int) (((char *)ip) - source);
+
+	/* write overflow error detected */
+_output_error:
+	return (int) (-(((char *)ip) - source));
+}
+
+static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
+				int isize, size_t maxoutputsize)
+{
+	const BYTE *ip = (const BYTE *) source;
+	const BYTE *const iend = ip + isize;
+	const BYTE *ref;
+
+
+	BYTE *op = (BYTE *) dest;
+	BYTE * const oend = op + maxoutputsize;
+	BYTE *cpy;
+
+	size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
+#if LZ4_ARCH64
+	size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
+#endif
+
+	/* Main Loop */
+	while (ip < iend) {
+
+		unsigned token;
+		size_t length;
+
+		/* get runlength */
+		token = *ip++;
+		length = (token >> ML_BITS);
+		if (length == RUN_MASK) {
+			int s = 255;
+			while ((ip < iend) && (s == 255)) {
+				s = *ip++;
+				length += s;
+			}
+		}
+		/* copy literals */
+		cpy = op + length;
+		if ((cpy > oend - COPYLENGTH) ||
+			(ip + length > iend - COPYLENGTH)) {
+
+			if (cpy > oend)
+				goto _output_error;/* writes beyond buffer */
+
+			if (ip + length != iend)
+				goto _output_error;/*
+						    * Error: LZ4 format requires
+						    * to consume all input
+						    * at this stage
+						    */
+			memcpy(op, ip, length);
+			op += length;
+			break;/* Necessarily EOF, due to parsing restrictions */
+		}
+		LZ4_WILDCOPY(ip, op, cpy);
+		ip -= (op - cpy);
+		op = cpy;
+
+		/* get offset */
+		LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
+		ip += 2;
+		if (ref < (BYTE * const) dest)
+			goto _output_error;
+			/*
+			 * Error : offset creates reference
+			 * outside of destination buffer
+			 */
+
+		/* get matchlength */
+		length = (token & ML_MASK);
+		if (length == ML_MASK) {
+			while (ip < iend) {
+				int s = *ip++;
+				length += s;
+				if (s == 255)
+					continue;
+				break;
+			}
+		}
+
+		/* copy repeated sequence */
+		if (unlikely((op - ref) < STEPSIZE)) {
+#if LZ4_ARCH64
+			size_t dec64 = dec64table[op - ref];
+#else
+			const int dec64 = 0;
+#endif
+				op[0] = ref[0];
+				op[1] = ref[1];
+				op[2] = ref[2];
+				op[3] = ref[3];
+				op += 4;
+				ref += 4;
+				ref -= dec32table[op - ref];
+				PUT4(ref, op);
+				op += STEPSIZE - 4;
+				ref -= dec64;
+		} else {
+			LZ4_COPYSTEP(ref, op);
+		}
+		cpy = op + length - (STEPSIZE-4);
+		if (cpy > oend - COPYLENGTH) {
+			if (cpy > oend)
+				goto _output_error; /* write outside of buf */
+
+			LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
+			while (op < cpy)
+				*op++ = *ref++;
+			op = cpy;
+			/*
+			 * Check EOF (should never happen, since last 5 bytes
+			 * are supposed to be literals)
+			 */
+			if (op == oend)
+				goto _output_error;
+			continue;
+		}
+		LZ4_SECURECOPY(ref, op, cpy);
+		op = cpy; /* correction */
+	}
+	/* end of decoding */
+	return (int) (((char *) op) - dest);
+
+	/* write overflow error detected */
+_output_error:
+	return (int) (-(((char *) ip) - source));
+}
+
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+		unsigned char *dest, size_t actual_dest_len)
+{
+	int ret = -1;
+	int input_len = 0;
+
+	input_len = lz4_uncompress(src, dest, actual_dest_len);
+	if (input_len < 0)
+		goto exit_0;
+	*src_len = input_len;
+
+	return 0;
+exit_0:
+	return ret;
+}
+#ifndef STATIC
+EXPORT_SYMBOL_GPL(lz4_decompress);
+#endif
+
+int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
+		unsigned char *dest, size_t *dest_len)
+{
+	int ret = -1;
+	int out_len = 0;
+
+	out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len,
+					*dest_len);
+	if (out_len < 0)
+		goto exit_0;
+	*dest_len = out_len;
+
+	return 0;
+exit_0:
+	return ret;
+}
+#ifndef STATIC
+EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LZ4 Decompressor");
+#endif
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
new file mode 100644
index 0000000..abcecdc
--- /dev/null
+++ b/lib/lz4/lz4defs.h
@@ -0,0 +1,156 @@
+/*
+ * lz4defs.h -- architecture specific defines
+ *
+ * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Detects 64 bits mode
+ */
+#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
+	|| defined(__ppc64__) || defined(__LP64__))
+#define LZ4_ARCH64 1
+#else
+#define LZ4_ARCH64 0
+#endif
+
+/*
+ * Architecture-specific macros
+ */
+#define BYTE	u8
+typedef struct _U16_S { u16 v; } U16_S;
+typedef struct _U32_S { u32 v; } U32_S;
+typedef struct _U64_S { u64 v; } U64_S;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)		\
+	|| defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6	\
+	&& defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
+
+#define A16(x) (((U16_S *)(x))->v)
+#define A32(x) (((U32_S *)(x))->v)
+#define A64(x) (((U64_S *)(x))->v)
+
+#define PUT4(s, d) (A32(d) = A32(s))
+#define PUT8(s, d) (A64(d) = A64(s))
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+	do {	\
+		A16(p) = v; \
+		p += 2; \
+	} while (0)
+#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+
+#define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
+#define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
+#define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
+
+#define PUT4(s, d) \
+	put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
+#define PUT8(s, d) \
+	put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
+
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+	do {	\
+		put_unaligned(v, (u16 *)(p)); \
+		p += 2; \
+	} while (0)
+#endif
+
+#define COPYLENGTH 8
+#define ML_BITS  4
+#define ML_MASK  ((1U << ML_BITS) - 1)
+#define RUN_BITS (8 - ML_BITS)
+#define RUN_MASK ((1U << RUN_BITS) - 1)
+#define MEMORY_USAGE	14
+#define MINMATCH	4
+#define SKIPSTRENGTH	6
+#define LASTLITERALS	5
+#define MFLIMIT		(COPYLENGTH + MINMATCH)
+#define MINLENGTH	(MFLIMIT + 1)
+#define MAXD_LOG	16
+#define MAXD		(1 << MAXD_LOG)
+#define MAXD_MASK	(u32)(MAXD - 1)
+#define MAX_DISTANCE	(MAXD - 1)
+#define HASH_LOG	(MAXD_LOG - 1)
+#define HASHTABLESIZE	(1 << HASH_LOG)
+#define MAX_NB_ATTEMPTS	256
+#define OPTIMAL_ML	(int)((ML_MASK-1)+MINMATCH)
+#define LZ4_64KLIMIT	((1<<16) + (MFLIMIT - 1))
+#define HASHLOG64K	((MEMORY_USAGE - 2) + 1)
+#define HASH64KTABLESIZE	(1U << HASHLOG64K)
+#define LZ4_HASH_VALUE(p)	(((A32(p)) * 2654435761U) >> \
+				((MINMATCH * 8) - (MEMORY_USAGE-2)))
+#define LZ4_HASH64K_VALUE(p)	(((A32(p)) * 2654435761U) >> \
+				((MINMATCH * 8) - HASHLOG64K))
+#define HASH_VALUE(p)		(((A32(p)) * 2654435761U) >> \
+				((MINMATCH * 8) - HASH_LOG))
+
+#if LZ4_ARCH64/* 64-bit */
+#define STEPSIZE 8
+
+#define LZ4_COPYSTEP(s, d)	\
+	do {			\
+		PUT8(s, d);	\
+		d += 8;		\
+		s += 8;		\
+	} while (0)
+
+#define LZ4_COPYPACKET(s, d)	LZ4_COPYSTEP(s, d)
+
+#define LZ4_SECURECOPY(s, d, e)			\
+	do {					\
+		if (d < e) {			\
+			LZ4_WILDCOPY(s, d, e);	\
+		}				\
+	} while (0)
+#define HTYPE u32
+
+#ifdef __BIG_ENDIAN
+#define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3)
+#else
+#define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3)
+#endif
+
+#else	/* 32-bit */
+#define STEPSIZE 4
+
+#define LZ4_COPYSTEP(s, d)	\
+	do {			\
+		PUT4(s, d);	\
+		d += 4;		\
+		s += 4;		\
+	} while (0)
+
+#define LZ4_COPYPACKET(s, d)		\
+	do {				\
+		LZ4_COPYSTEP(s, d);	\
+		LZ4_COPYSTEP(s, d);	\
+	} while (0)
+
+#define LZ4_SECURECOPY	LZ4_WILDCOPY
+#define HTYPE const u8*
+
+#ifdef __BIG_ENDIAN
+#define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3)
+#else
+#define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
+#endif
+
+#endif
+
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+	(d = s - get_unaligned_le16(p))
+
+#define LZ4_WILDCOPY(s, d, e)		\
+	do {				\
+		LZ4_COPYPACKET(s, d);	\
+	} while (d < e)
+
+#define LZ4_BLINDCOPY(s, d, l)	\
+	do {	\
+		u8 *e = (d) + l;	\
+		LZ4_WILDCOPY(s, d, e);	\
+		d = e;	\
+	} while (0)
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
new file mode 100644
index 0000000..eb1a74f
--- /dev/null
+++ b/lib/lz4/lz4hc_compress.c
@@ -0,0 +1,539 @@
+/*
+ * LZ4 HC - High Compression Mode of LZ4
+ * Copyright (C) 2011-2012, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at :
+ * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
+ * - LZ4 source repository : http://code.google.com/p/lz4/
+ *
+ *  Changed for kernel use by:
+ *  Chanho Min <chanho.min@lge.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <asm/unaligned.h>
+#include "lz4defs.h"
+
+struct lz4hc_data {
+	const u8 *base;
+	HTYPE hashtable[HASHTABLESIZE];
+	u16 chaintable[MAXD];
+	const u8 *nexttoupdate;
+} __attribute__((__packed__));
+
+static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base)
+{
+	memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable));
+	memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable));
+
+#if LZ4_ARCH64
+	hc4->nexttoupdate = base + 1;
+#else
+	hc4->nexttoupdate = base;
+#endif
+	hc4->base = base;
+	return 1;
+}
+
+/* Update chains up to ip (excluded) */
+static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip)
+{
+	u16 *chaintable = hc4->chaintable;
+	HTYPE *hashtable  = hc4->hashtable;
+#if LZ4_ARCH64
+	const BYTE * const base = hc4->base;
+#else
+	const int base = 0;
+#endif
+
+	while (hc4->nexttoupdate < ip) {
+		const u8 *p = hc4->nexttoupdate;
+		size_t delta = p - (hashtable[HASH_VALUE(p)] + base);
+		if (delta > MAX_DISTANCE)
+			delta = MAX_DISTANCE;
+		chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta;
+		hashtable[HASH_VALUE(p)] = (p) - base;
+		hc4->nexttoupdate++;
+	}
+}
+
+static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2,
+		const u8 *const matchlimit)
+{
+	const u8 *p1t = p1;
+
+	while (p1t < matchlimit - (STEPSIZE - 1)) {
+#if LZ4_ARCH64
+		u64 diff = A64(p2) ^ A64(p1t);
+#else
+		u32 diff = A32(p2) ^ A32(p1t);
+#endif
+		if (!diff) {
+			p1t += STEPSIZE;
+			p2 += STEPSIZE;
+			continue;
+		}
+		p1t += LZ4_NBCOMMONBYTES(diff);
+		return p1t - p1;
+	}
+#if LZ4_ARCH64
+	if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) {
+		p1t += 4;
+		p2 += 4;
+	}
+#endif
+
+	if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) {
+		p1t += 2;
+		p2 += 2;
+	}
+	if ((p1t < matchlimit) && (*p2 == *p1t))
+		p1t++;
+	return p1t - p1;
+}
+
+static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4,
+		const u8 *ip, const u8 *const matchlimit, const u8 **matchpos)
+{
+	u16 *const chaintable = hc4->chaintable;
+	HTYPE *const hashtable = hc4->hashtable;
+	const u8 *ref;
+#if LZ4_ARCH64
+	const BYTE * const base = hc4->base;
+#else
+	const int base = 0;
+#endif
+	int nbattempts = MAX_NB_ATTEMPTS;
+	size_t repl = 0, ml = 0;
+	u16 delta;
+
+	/* HC4 match finder */
+	lz4hc_insert(hc4, ip);
+	ref = hashtable[HASH_VALUE(ip)] + base;
+
+	/* potential repetition */
+	if (ref >= ip-4) {
+		/* confirmed */
+		if (A32(ref) == A32(ip)) {
+			delta = (u16)(ip-ref);
+			repl = ml  = lz4hc_commonlength(ip + MINMATCH,
+					ref + MINMATCH, matchlimit) + MINMATCH;
+			*matchpos = ref;
+		}
+		ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
+	}
+
+	while ((ref >= ip - MAX_DISTANCE) && nbattempts) {
+		nbattempts--;
+		if (*(ref + ml) == *(ip + ml)) {
+			if (A32(ref) == A32(ip)) {
+				size_t mlt =
+					lz4hc_commonlength(ip + MINMATCH,
+					ref + MINMATCH, matchlimit) + MINMATCH;
+				if (mlt > ml) {
+					ml = mlt;
+					*matchpos = ref;
+				}
+			}
+		}
+		ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
+	}
+
+	/* Complete table */
+	if (repl) {
+		const BYTE *ptr = ip;
+		const BYTE *end;
+		end = ip + repl - (MINMATCH-1);
+		/* Pre-Load */
+		while (ptr < end - delta) {
+			chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
+			ptr++;
+		}
+		do {
+			chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
+			/* Head of chain */
+			hashtable[HASH_VALUE(ptr)] = (ptr) - base;
+			ptr++;
+		} while (ptr < end);
+		hc4->nexttoupdate = end;
+	}
+
+	return (int)ml;
+}
+
+static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4,
+	const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest,
+	const u8 **matchpos, const u8 **startpos)
+{
+	u16 *const chaintable = hc4->chaintable;
+	HTYPE *const hashtable = hc4->hashtable;
+#if LZ4_ARCH64
+	const BYTE * const base = hc4->base;
+#else
+	const int base = 0;
+#endif
+	const u8 *ref;
+	int nbattempts = MAX_NB_ATTEMPTS;
+	int delta = (int)(ip - startlimit);
+
+	/* First Match */
+	lz4hc_insert(hc4, ip);
+	ref = hashtable[HASH_VALUE(ip)] + base;
+
+	while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base)
+		&& (nbattempts)) {
+		nbattempts--;
+		if (*(startlimit + longest) == *(ref - delta + longest)) {
+			if (A32(ref) == A32(ip)) {
+				const u8 *reft = ref + MINMATCH;
+				const u8 *ipt = ip + MINMATCH;
+				const u8 *startt = ip;
+
+				while (ipt < matchlimit-(STEPSIZE - 1)) {
+					#if LZ4_ARCH64
+					u64 diff = A64(reft) ^ A64(ipt);
+					#else
+					u32 diff = A32(reft) ^ A32(ipt);
+					#endif
+
+					if (!diff) {
+						ipt += STEPSIZE;
+						reft += STEPSIZE;
+						continue;
+					}
+					ipt += LZ4_NBCOMMONBYTES(diff);
+					goto _endcount;
+				}
+				#if LZ4_ARCH64
+				if ((ipt < (matchlimit - 3))
+					&& (A32(reft) == A32(ipt))) {
+					ipt += 4;
+					reft += 4;
+				}
+				ipt += 2;
+				#endif
+				if ((ipt < (matchlimit - 1))
+					&& (A16(reft) == A16(ipt))) {
+					reft += 2;
+				}
+				if ((ipt < matchlimit) && (*reft == *ipt))
+					ipt++;
+_endcount:
+				reft = ref;
+
+				while ((startt > startlimit)
+					&& (reft > hc4->base)
+					&& (startt[-1] == reft[-1])) {
+					startt--;
+					reft--;
+				}
+
+				if ((ipt - startt) > longest) {
+					longest = (int)(ipt - startt);
+					*matchpos = reft;
+					*startpos = startt;
+				}
+			}
+		}
+		ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
+	}
+	return longest;
+}
+
+static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor,
+		int ml, const u8 *ref)
+{
+	int length, len;
+	u8 *token;
+
+	/* Encode Literal length */
+	length = (int)(*ip - *anchor);
+	token = (*op)++;
+	if (length >= (int)RUN_MASK) {
+		*token = (RUN_MASK << ML_BITS);
+		len = length - RUN_MASK;
+		for (; len > 254 ; len -= 255)
+			*(*op)++ = 255;
+		*(*op)++ = (u8)len;
+	} else
+		*token = (length << ML_BITS);
+
+	/* Copy Literals */
+	LZ4_BLINDCOPY(*anchor, *op, length);
+
+	/* Encode Offset */
+	LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref));
+
+	/* Encode MatchLength */
+	len = (int)(ml - MINMATCH);
+	if (len >= (int)ML_MASK) {
+		*token += ML_MASK;
+		len -= ML_MASK;
+		for (; len > 509 ; len -= 510) {
+			*(*op)++ = 255;
+			*(*op)++ = 255;
+		}
+		if (len > 254) {
+			len -= 255;
+			*(*op)++ = 255;
+		}
+		*(*op)++ = (u8)len;
+	} else
+		*token += len;
+
+	/* Prepare next loop */
+	*ip += ml;
+	*anchor = *ip;
+
+	return 0;
+}
+
+static int lz4_compresshcctx(struct lz4hc_data *ctx,
+		const char *source,
+		char *dest,
+		int isize)
+{
+	const u8 *ip = (const u8 *)source;
+	const u8 *anchor = ip;
+	const u8 *const iend = ip + isize;
+	const u8 *const mflimit = iend - MFLIMIT;
+	const u8 *const matchlimit = (iend - LASTLITERALS);
+
+	u8 *op = (u8 *)dest;
+
+	int ml, ml2, ml3, ml0;
+	const u8 *ref = NULL;
+	const u8 *start2 = NULL;
+	const u8 *ref2 = NULL;
+	const u8 *start3 = NULL;
+	const u8 *ref3 = NULL;
+	const u8 *start0;
+	const u8 *ref0;
+	int lastrun;
+
+	ip++;
+
+	/* Main Loop */
+	while (ip < mflimit) {
+		ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref));
+		if (!ml) {
+			ip++;
+			continue;
+		}
+
+		/* saved, in case we would skip too much */
+		start0 = ip;
+		ref0 = ref;
+		ml0 = ml;
+_search2:
+		if (ip+ml < mflimit)
+			ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2,
+				ip + 1, matchlimit, ml, &ref2, &start2);
+		else
+			ml2 = ml;
+		/* No better match */
+		if (ml2 == ml) {
+			lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+			continue;
+		}
+
+		if (start0 < ip) {
+			/* empirical */
+			if (start2 < ip + ml0) {
+				ip = start0;
+				ref = ref0;
+				ml = ml0;
+			}
+		}
+		/*
+		 * Here, start0==ip
+		 * First Match too small : removed
+		 */
+		if ((start2 - ip) < 3) {
+			ml = ml2;
+			ip = start2;
+			ref = ref2;
+			goto _search2;
+		}
+
+_search3:
+		/*
+		 * Currently we have :
+		 * ml2 > ml1, and
+		 * ip1+3 <= ip2 (usually < ip1+ml1)
+		 */
+		if ((start2 - ip) < OPTIMAL_ML) {
+			int correction;
+			int new_ml = ml;
+			if (new_ml > OPTIMAL_ML)
+				new_ml = OPTIMAL_ML;
+			if (ip + new_ml > start2 + ml2 - MINMATCH)
+				new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+			correction = new_ml - (int)(start2 - ip);
+			if (correction > 0) {
+				start2 += correction;
+				ref2 += correction;
+				ml2 -= correction;
+			}
+		}
+		/*
+		 * Now, we have start2 = ip+new_ml,
+		 * with new_ml=min(ml, OPTIMAL_ML=18)
+		 */
+		if (start2 + ml2 < mflimit)
+			ml3 = lz4hc_insertandgetwidermatch(ctx,
+				start2 + ml2 - 3, start2, matchlimit,
+				ml2, &ref3, &start3);
+		else
+			ml3 = ml2;
+
+		/* No better match : 2 sequences to encode */
+		if (ml3 == ml2) {
+			/* ip & ref are known; Now for ml */
+			if (start2 < ip+ml)
+				ml = (int)(start2 - ip);
+
+			/* Now, encode 2 sequences */
+			lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+			ip = start2;
+			lz4_encodesequence(&ip, &op, &anchor, ml2, ref2);
+			continue;
+		}
+
+		/* Not enough space for match 2 : remove it */
+		if (start3 < ip + ml + 3) {
+			/*
+			 * can write Seq1 immediately ==> Seq2 is removed,
+			 * so Seq3 becomes Seq1
+			 */
+			if (start3 >= (ip + ml)) {
+				if (start2 < ip + ml) {
+					int correction =
+						(int)(ip + ml - start2);
+					start2 += correction;
+					ref2 += correction;
+					ml2 -= correction;
+					if (ml2 < MINMATCH) {
+						start2 = start3;
+						ref2 = ref3;
+						ml2 = ml3;
+					}
+				}
+
+				lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+				ip  = start3;
+				ref = ref3;
+				ml  = ml3;
+
+				start0 = start2;
+				ref0 = ref2;
+				ml0 = ml2;
+				goto _search2;
+			}
+
+			start2 = start3;
+			ref2 = ref3;
+			ml2 = ml3;
+			goto _search3;
+		}
+
+		/*
+		 * OK, now we have 3 ascending matches; let's write at least
+		 * the first one ip & ref are known; Now for ml
+		 */
+		if (start2 < ip + ml) {
+			if ((start2 - ip) < (int)ML_MASK) {
+				int correction;
+				if (ml > OPTIMAL_ML)
+					ml = OPTIMAL_ML;
+				if (ip + ml > start2 + ml2 - MINMATCH)
+					ml = (int)(start2 - ip) + ml2
+						- MINMATCH;
+				correction = ml - (int)(start2 - ip);
+				if (correction > 0) {
+					start2 += correction;
+					ref2 += correction;
+					ml2 -= correction;
+				}
+			} else
+				ml = (int)(start2 - ip);
+		}
+		lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+
+		ip = start2;
+		ref = ref2;
+		ml = ml2;
+
+		start2 = start3;
+		ref2 = ref3;
+		ml2 = ml3;
+
+		goto _search3;
+	}
+
+	/* Encode Last Literals */
+	lastrun = (int)(iend - anchor);
+	if (lastrun >= (int)RUN_MASK) {
+		*op++ = (RUN_MASK << ML_BITS);
+		lastrun -= RUN_MASK;
+		for (; lastrun > 254 ; lastrun -= 255)
+			*op++ = 255;
+		*op++ = (u8) lastrun;
+	} else
+		*op++ = (lastrun << ML_BITS);
+	memcpy(op, anchor, iend - anchor);
+	op += iend - anchor;
+	/* End */
+	return (int) (((char *)op) - dest);
+}
+
+int lz4hc_compress(const unsigned char *src, size_t src_len,
+			unsigned char *dst, size_t *dst_len, void *wrkmem)
+{
+	int ret = -1;
+	int out_len = 0;
+
+	struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem;
+	lz4hc_init(hc4, (const u8 *)src);
+	out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src,
+		(char *)dst, (int)src_len);
+
+	if (out_len < 0)
+		goto exit;
+
+	*dst_len = out_len;
+	return 0;
+
+exit:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(lz4hc_compress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LZ4HC compressor");
diff --git a/lib/rbtree.c b/lib/rbtree.c
index c0e31fe..65f4eff 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -518,3 +518,43 @@
 	*new = *victim;
 }
 EXPORT_SYMBOL(rb_replace_node);
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+	for (;;) {
+		if (node->rb_left)
+			node = node->rb_left;
+		else if (node->rb_right)
+			node = node->rb_right;
+		else
+			return (struct rb_node *)node;
+	}
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+	const struct rb_node *parent;
+	if (!node)
+		return NULL;
+	parent = rb_parent(node);
+
+	/* If we're sitting on node, we've already seen our children */
+	if (parent && node == parent->rb_left && parent->rb_right) {
+		/* If we are the parent's left node, go to the parent's right
+		 * node then all the way down to the left */
+		return rb_left_deepest_node(parent->rb_right);
+	} else
+		/* Otherwise we are the parent's right node, and the parent
+		 * should be next */
+		return (struct rb_node *)parent;
+}
+EXPORT_SYMBOL(rb_next_postorder);
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+	if (!root->rb_node)
+		return NULL;
+
+	return rb_left_deepest_node(root->rb_node);
+}
+EXPORT_SYMBOL(rb_first_postorder);
diff --git a/mm/filemap.c b/mm/filemap.c
index 725a100..d299dcf 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -684,6 +684,47 @@
 }
 
 /**
+ * page_cache_next_hole - find the next hole (not-present entry)
+ * @mapping: mapping
+ * @index: index
+ * @max_scan: maximum range to search
+ *
+ * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
+ * lowest indexed hole.
+ *
+ * Returns: the index of the hole if found, otherwise returns an index
+ * outside of the set specified (in which case 'return - index >=
+ * max_scan' will be true). In rare cases of index wrap-around, 0 will
+ * be returned.
+ *
+ * page_cache_next_hole may be called under rcu_read_lock. However,
+ * like radix_tree_gang_lookup, this will not atomically search a
+ * snapshot of the tree at a single point in time. For example, if a
+ * hole is created at index 5, then subsequently a hole is created at
+ * index 10, page_cache_next_hole covering both indexes may return 10
+ * if called under rcu_read_lock.
+ */
+pgoff_t page_cache_next_hole(struct address_space *mapping,
+                             pgoff_t index, unsigned long max_scan)
+{
+        unsigned long i;
+
+        for (i = 0; i < max_scan; i++) {
+                struct page *page;
+
+                page = radix_tree_lookup(&mapping->page_tree, index);
+                if (!page || radix_tree_exceptional_entry(page))
+                        break;
+                index++;
+                if (index == 0)
+                        break;
+        }
+
+        return index;
+}
+EXPORT_SYMBOL(page_cache_next_hole);
+
+/**
  * find_get_page - find and get a page reference
  * @mapping: the address_space to search
  * @offset: the page index
@@ -691,7 +732,8 @@
  * Is there a pagecache struct page at the given (mapping, offset) tuple?
  * If yes, increment its refcount and return it; if no, return NULL.
  */
-struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
+struct page *find_get_page_flags(struct address_space *mapping, pgoff_t offset,
+				 int fgp_flags)
 {
 	void **pagep;
 	struct page *page;
@@ -728,6 +770,8 @@
 		}
 	}
 out:
+	if (page && (fgp_flags & FGP_ACCESSED))
+		mark_page_accessed(page);
 	rcu_read_unlock();
 
 	return page;
@@ -1109,6 +1153,8 @@
 	unsigned int prev_offset;
 	int error;
 
+	trace_mm_filemap_do_generic_file_read(filp, *ppos, desc->count, 1);
+
 	index = *ppos >> PAGE_CACHE_SHIFT;
 	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
 	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
@@ -1503,7 +1549,7 @@
 static int page_cache_read(struct file *file, pgoff_t offset)
 {
 	struct address_space *mapping = file->f_mapping;
-	struct page *page; 
+	struct page *page;
 	int ret;
 
 	do {
@@ -1520,7 +1566,7 @@
 		page_cache_release(page);
 
 	} while (ret == AOP_TRUNCATED_PAGE);
-		
+
 	return ret;
 }
 
@@ -2307,6 +2353,8 @@
 	ssize_t written = 0;
 	unsigned int flags = 0;
 
+	trace_mm_filemap_generic_perform_write(file, pos, iov_iter_count(i), 0);
+
 	/*
 	 * Copies from kernel address space cannot fail (NFSD is a big user).
 	 */
@@ -2406,7 +2454,7 @@
 		written += status;
 		*ppos = pos + status;
   	}
-	
+
 	return written ? written : status;
 }
 EXPORT_SYMBOL(generic_file_buffered_write);
diff --git a/mm/madvise.c b/mm/madvise.c
index 7055883..85d1a54 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -102,7 +102,8 @@
 
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
-				vma->vm_file, pgoff, vma_policy(vma));
+				vma->vm_file, pgoff, vma_policy(vma),
+				vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 437ae2c..460218d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6796,6 +6796,12 @@
 	return ret;
 }
 
+static int mem_cgroup_allow_attach(struct cgroup *cgroup,
+				   struct cgroup_taskset *tset)
+{
+	return subsys_cgroup_allow_attach(cgroup, tset);
+}
+
 static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
 				     struct cgroup_taskset *tset)
 {
@@ -6964,6 +6970,11 @@
 {
 	return 0;
 }
+static int mem_cgroup_allow_attach(struct cgroup *cgroup,
+				   struct cgroup_taskset *tset)
+{
+	return 0;
+}
 static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
 				     struct cgroup_taskset *tset)
 {
@@ -6999,6 +7010,7 @@
 	.can_attach = mem_cgroup_can_attach,
 	.cancel_attach = mem_cgroup_cancel_attach,
 	.attach = mem_cgroup_move_task,
+	.allow_attach = mem_cgroup_allow_attach,
 	.bind = mem_cgroup_bind,
 	.base_cftypes = mem_cgroup_files,
 	.early_init = 0,
diff --git a/mm/memory.c b/mm/memory.c
index 2ca2ee1..051e157 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3481,6 +3481,8 @@
 		/* file_update_time outside page_lock */
 		if (vma->vm_file && !page_mkwrite)
 			file_update_time(vma->vm_file);
+		if (vma->vm_prfile && !page_mkwrite)
+			file_update_time(vma->vm_prfile);
 	} else {
 		unlock_page(vmf.page);
 		if (anon)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b2061bb..420e4b9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -725,7 +725,7 @@
 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 				  vma->anon_vma, vma->vm_file, pgoff,
-				  new_pol);
+				  new_pol, vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			next = vma->vm_next;
diff --git a/mm/mlock.c b/mm/mlock.c
index 713e462..b680dfd 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -23,10 +23,10 @@
 
 int can_do_mlock(void)
 {
-	if (capable(CAP_IPC_LOCK))
-		return 1;
 	if (rlimit(RLIMIT_MEMLOCK) != 0)
 		return 1;
+	if (capable(CAP_IPC_LOCK))
+		return 1;
 	return 0;
 }
 EXPORT_SYMBOL(can_do_mlock);
@@ -289,7 +289,8 @@
 
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
-			  vma->vm_file, pgoff, vma_policy(vma));
+			  vma->vm_file, pgoff, vma_policy(vma),
+			  vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/mmap.c b/mm/mmap.c
index 70ff9b4..295c965 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -52,6 +52,18 @@
 #define arch_rebalance_pgtables(addr, len)		(addr)
 #endif
 
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
+const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
+const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
+int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
+#endif
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
+const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
+int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
+#endif
+
+
 static void unmap_region(struct mm_struct *mm,
 		struct vm_area_struct *vma, struct vm_area_struct *prev,
 		unsigned long start, unsigned long end);
@@ -251,6 +263,8 @@
 		vma->vm_ops->close(vma);
 	if (vma->vm_file)
 		fput(vma->vm_file);
+	if (vma->vm_prfile)
+		fput(vma->vm_prfile);
 	mpol_put(vma_policy(vma));
 	kmem_cache_free(vm_area_cachep, vma);
 	return next;
@@ -861,6 +875,8 @@
 		if (file) {
 			uprobe_munmap(next, next->vm_start, next->vm_end);
 			fput(file);
+			if (vma->vm_prfile)
+				fput(vma->vm_prfile);
 		}
 		if (next->anon_vma)
 			anon_vma_merge(vma, next);
@@ -893,7 +909,8 @@
  * per-vma resources, so we don't attempt to merge those.
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
-			struct file *file, unsigned long vm_flags)
+			struct file *file, unsigned long vm_flags,
+			const char __user *anon_name)
 {
 	if (vma->vm_flags ^ vm_flags)
 		return 0;
@@ -901,6 +918,8 @@
 		return 0;
 	if (vma->vm_ops && vma->vm_ops->close)
 		return 0;
+	if (vma_get_anon_name(vma) != anon_name)
+		return 0;
 	return 1;
 }
 
@@ -931,9 +950,10 @@
  */
 static int
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
-	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
+	const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags) &&
+	if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		if (vma->vm_pgoff == vm_pgoff)
 			return 1;
@@ -950,9 +970,10 @@
  */
 static int
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
-	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
+	const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags) &&
+	if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		pgoff_t vm_pglen;
 		vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -963,9 +984,9 @@
 }
 
 /*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
+ * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
+ * figure out whether that can be merged with its predecessor or its
+ * successor.  Or both (it neatly fills a hole).
  *
  * In most cases - when called for mmap, brk or mremap - [addr,end) is
  * certain not to be mapped by the time vma_merge is called; but when
@@ -995,7 +1016,8 @@
 			struct vm_area_struct *prev, unsigned long addr,
 			unsigned long end, unsigned long vm_flags,
 		     	struct anon_vma *anon_vma, struct file *file,
-			pgoff_t pgoff, struct mempolicy *policy)
+			pgoff_t pgoff, struct mempolicy *policy,
+			const char __user *anon_name)
 {
 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
 	struct vm_area_struct *area, *next;
@@ -1021,15 +1043,15 @@
 	 */
 	if (prev && prev->vm_end == addr &&
   			mpol_equal(vma_policy(prev), policy) &&
-			can_vma_merge_after(prev, vm_flags,
-						anon_vma, file, pgoff)) {
+			can_vma_merge_after(prev, vm_flags, anon_vma,
+						file, pgoff, anon_name)) {
 		/*
 		 * OK, it can.  Can we now merge in the successor as well?
 		 */
 		if (next && end == next->vm_start &&
 				mpol_equal(policy, vma_policy(next)) &&
-				can_vma_merge_before(next, vm_flags,
-					anon_vma, file, pgoff+pglen) &&
+				can_vma_merge_before(next, vm_flags, anon_vma,
+						file, pgoff+pglen, anon_name) &&
 				is_mergeable_anon_vma(prev->anon_vma,
 						      next->anon_vma, NULL)) {
 							/* cases 1, 6 */
@@ -1049,8 +1071,8 @@
 	 */
 	if (next && end == next->vm_start &&
  			mpol_equal(policy, vma_policy(next)) &&
-			can_vma_merge_before(next, vm_flags,
-					anon_vma, file, pgoff+pglen)) {
+			can_vma_merge_before(next, vm_flags, anon_vma,
+					file, pgoff+pglen, anon_name)) {
 		if (prev && addr < prev->vm_end)	/* case 4 */
 			err = vma_adjust(prev, prev->vm_start,
 				addr, prev->vm_pgoff, NULL);
@@ -1519,7 +1541,8 @@
 	/*
 	 * Can we just expand an old mapping?
 	 */
-	vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+	vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
+			NULL, NULL);
 	if (vma)
 		goto out;
 
@@ -2445,6 +2468,8 @@
 
 	if (new->vm_file)
 		get_file(new->vm_file);
+	if (new->vm_prfile)
+		get_file(new->vm_prfile);
 
 	if (new->vm_ops && new->vm_ops->open)
 		new->vm_ops->open(new);
@@ -2464,6 +2489,8 @@
 		new->vm_ops->close(new);
 	if (new->vm_file)
 		fput(new->vm_file);
+	if (new->vm_prfile)
+		fput(new->vm_prfile);
 	unlink_anon_vmas(new);
  out_free_mpol:
 	mpol_put(pol);
@@ -2666,7 +2693,7 @@
 
 	/* Can we just expand an old private anonymous mapping? */
 	vma = vma_merge(mm, prev, addr, addr + len, flags,
-					NULL, NULL, pgoff, NULL);
+					NULL, NULL, pgoff, NULL, NULL);
 	if (vma)
 		goto out;
 
@@ -2824,7 +2851,8 @@
 	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
 		return NULL;	/* should never get here */
 	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
-			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+			vma_get_anon_name(vma));
 	if (new_vma) {
 		/*
 		 * Source vma may have been merged into new_vma
@@ -2863,6 +2891,8 @@
 				goto out_free_mempol;
 			if (new_vma->vm_file)
 				get_file(new_vma->vm_file);
+			if (new_vma->vm_prfile)
+				get_file(new_vma->vm_prfile);
 			if (new_vma->vm_ops && new_vma->vm_ops->open)
 				new_vma->vm_ops->open(new_vma);
 			vma_link(mm, new_vma, prev, rb_link, rb_parent);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e9f65aa..3c44c97 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -278,7 +278,8 @@
 	 */
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
-			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+			vma_get_anon_name(vma));
 	if (*pprev) {
 		vma = *pprev;
 		goto success;
diff --git a/mm/nommu.c b/mm/nommu.c
index d9d07a5..c4d716d 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -651,6 +651,8 @@
 
 		if (region->vm_file)
 			fput(region->vm_file);
+		if (region->vm_prfile)
+			fput(region->vm_prfile);
 
 		/* IO memory and memory shared directly out of the pagecache
 		 * from ramfs/tmpfs mustn't be released here */
@@ -809,6 +811,8 @@
 		vma->vm_ops->close(vma);
 	if (vma->vm_file)
 		fput(vma->vm_file);
+	if (vma->vm_prfile)
+		fput(vma->vm_prfile);
 	put_nommu_region(vma->vm_region);
 	kmem_cache_free(vm_area_cachep, vma);
 }
@@ -1375,6 +1379,8 @@
 				}
 			}
 			fput(region->vm_file);
+			if (region->vm_prfile)
+				fput(region->vm_prfile);
 			kmem_cache_free(vm_region_jar, region);
 			region = pregion;
 			result = start;
@@ -1451,9 +1457,13 @@
 error:
 	if (region->vm_file)
 		fput(region->vm_file);
+	if (region->vm_prfile)
+		fput(region->vm_prfile);
 	kmem_cache_free(vm_region_jar, region);
 	if (vma->vm_file)
 		fput(vma->vm_file);
+	if (vma->vm_prfile)
+		fput(vma->vm_prfile);
 	kmem_cache_free(vm_area_cachep, vma);
 	kleave(" = %d", ret);
 	return ret;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b034f79..2e8a975 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2249,7 +2249,7 @@
 	return ret;
 }
 
-int test_set_page_writeback(struct page *page)
+int __test_set_page_writeback(struct page *page, bool keep_write)
 {
 	struct address_space *mapping = page_mapping(page);
 	int ret;
@@ -2271,9 +2271,10 @@
 			radix_tree_tag_clear(&mapping->page_tree,
 						page_index(page),
 						PAGECACHE_TAG_DIRTY);
-		radix_tree_tag_clear(&mapping->page_tree,
-				     page_index(page),
-				     PAGECACHE_TAG_TOWRITE);
+		if (!keep_write)
+			radix_tree_tag_clear(&mapping->page_tree,
+						page_index(page),
+						PAGECACHE_TAG_TOWRITE);
 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
 	} else {
 		ret = TestSetPageWriteback(page);
@@ -2283,7 +2284,7 @@
 	return ret;
 
 }
-EXPORT_SYMBOL(test_set_page_writeback);
+EXPORT_SYMBOL(__test_set_page_writeback);
 
 /*
  * Return true if any of the pages in the mapping are marked with the
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 494a081..025ebfc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -196,7 +196,20 @@
 	 "Movable",
 };
 
+/*
+ * Try to keep at least this much lowmem free.  Do not allow normal
+ * allocations below this point, only high priority ones. Automatically
+ * tuned according to the amount of memory in the system.
+ */
 int min_free_kbytes = 1024;
+int min_free_order_shift = 1;
+
+/*
+ * Extra memory for the system to try freeing. Used to temporarily
+ * free memory, to make space for new workloads. Anyone can allocate
+ * down to the min watermarks controlled by min_free_kbytes above.
+ */
+int extra_free_kbytes = 0;
 
 static unsigned long __meminitdata nr_kernel_pages;
 static unsigned long __meminitdata nr_all_pages;
@@ -1650,7 +1663,7 @@
 		free_pages -= z->free_area[o].nr_free << o;
 
 		/* Require fewer higher order pages to be free */
-		min >>= 1;
+		min >>= min_free_order_shift;
 
 		if (free_pages <= min)
 			return false;
@@ -5330,6 +5343,7 @@
 static void __setup_per_zone_wmarks(void)
 {
 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+	unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10);
 	unsigned long lowmem_pages = 0;
 	struct zone *zone;
 	unsigned long flags;
@@ -5341,11 +5355,14 @@
 	}
 
 	for_each_zone(zone) {
-		u64 tmp;
+		u64 min, low;
 
 		spin_lock_irqsave(&zone->lock, flags);
-		tmp = (u64)pages_min * zone->managed_pages;
-		do_div(tmp, lowmem_pages);
+		min = (u64)pages_min * zone->managed_pages;
+		do_div(min, lowmem_pages);
+		low = (u64)pages_low * zone->managed_pages;
+		do_div(low, vm_total_pages);
+
 		if (is_highmem(zone)) {
 			/*
 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -5366,11 +5383,13 @@
 			 * If it's a lowmem zone, reserve a number of pages
 			 * proportionate to the zone's size.
 			 */
-			zone->watermark[WMARK_MIN] = tmp;
+			zone->watermark[WMARK_MIN] = min;
 		}
 
-		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
-		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+		zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) +
+					low + (min >> 2);
+		zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
+					low + (min >> 1);
 
 		setup_zone_migrate_reserve(zone);
 		spin_unlock_irqrestore(&zone->lock, flags);
@@ -5483,7 +5502,7 @@
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
  *	that we can call two helper functions whenever min_free_kbytes
- *	changes.
+ *	or extra_free_kbytes changes.
  */
 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
 	void __user *buffer, size_t *length, loff_t *ppos)
diff --git a/mm/page_io.c b/mm/page_io.c
index a8a3ef4..ba05b64 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -21,6 +21,7 @@
 #include <linux/writeback.h>
 #include <linux/frontswap.h>
 #include <linux/aio.h>
+#include <linux/blkdev.h>
 #include <asm/pgtable.h>
 
 static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -80,9 +81,54 @@
 				imajor(bio->bi_bdev->bd_inode),
 				iminor(bio->bi_bdev->bd_inode),
 				(unsigned long long)bio->bi_sector);
-	} else {
-		SetPageUptodate(page);
+		goto out;
 	}
+
+	SetPageUptodate(page);
+
+	/*
+	 * There is no guarantee that the page is in swap cache - the software
+	 * suspend code (at least) uses end_swap_bio_read() against a non-
+	 * swapcache page.  So we must check PG_swapcache before proceeding with
+	 * this optimization.
+	 */
+	if (likely(PageSwapCache(page))) {
+		struct swap_info_struct *sis;
+
+		sis = page_swap_info(page);
+		if (sis->flags & SWP_BLKDEV) {
+			/*
+			 * The swap subsystem performs lazy swap slot freeing,
+			 * expecting that the page will be swapped out again.
+			 * So we can avoid an unnecessary write if the page
+			 * isn't redirtied.
+			 * This is good for real swap storage because we can
+			 * reduce unnecessary I/O and enhance wear-leveling
+			 * if an SSD is used as the as swap device.
+			 * But if in-memory swap device (eg zram) is used,
+			 * this causes a duplicated copy between uncompressed
+			 * data in VM-owned memory and compressed data in
+			 * zram-owned memory.  So let's free zram-owned memory
+			 * and make the VM-owned decompressed page *dirty*,
+			 * so the page should be swapped out somewhere again if
+			 * we again wish to reclaim it.
+			 */
+			struct gendisk *disk = sis->bdev->bd_disk;
+			if (disk->fops->swap_slot_free_notify) {
+				swp_entry_t entry;
+				unsigned long offset;
+
+				entry.val = page_private(page);
+				offset = swp_offset(entry);
+
+				SetPageDirty(page);
+				disk->fops->swap_slot_free_notify(sis->bdev,
+						offset);
+			}
+		}
+	}
+
+out:
 	unlock_page(page);
 	bio_put(bio);
 }
diff --git a/mm/readahead.c b/mm/readahead.c
index daed28d..829a77c 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -48,7 +48,7 @@
 		if (!trylock_page(page))
 			BUG();
 		page->mapping = mapping;
-		do_invalidatepage(page, 0);
+		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
 		page->mapping = NULL;
 		unlock_page(page);
 	}
diff --git a/mm/shmem.c b/mm/shmem.c
index cc02b6c..87e496f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3028,6 +3028,14 @@
 }
 EXPORT_SYMBOL_GPL(shmem_file_setup);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+	if (vma->vm_file)
+		fput(vma->vm_file);
+	vma->vm_file = file;
+	vma->vm_ops = &shmem_vm_ops;
+}
+
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -3041,10 +3049,7 @@
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	if (vma->vm_file)
-		fput(vma->vm_file);
-	vma->vm_file = file;
-	vma->vm_ops = &shmem_vm_ops;
+	shmem_set_file(vma, file);
 	return 0;
 }
 
diff --git a/mm/truncate.c b/mm/truncate.c
index 2d6151f..ea8ba2a 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -27,7 +27,8 @@
 /**
  * do_invalidatepage - invalidate part or all of a page
  * @page: the page which is affected
- * @offset: the index of the truncation point
+ * @offset: start of the range to invalidate
+ * @length: length of the range to invalidate
  *
  * do_invalidatepage() is called when all or part of the page has become
  * invalidated by a truncate operation.
@@ -38,16 +39,18 @@
  * point.  Because the caller is about to free (and possibly reuse) those
  * blocks on-disk.
  */
-void do_invalidatepage(struct page *page, unsigned long offset)
+void do_invalidatepage(struct page *page, unsigned int offset,
+		       unsigned int length)
 {
-	void (*invalidatepage)(struct page *, unsigned long);
+	void (*invalidatepage)(struct page *, unsigned int, unsigned int);
+
 	invalidatepage = page->mapping->a_ops->invalidatepage;
 #ifdef CONFIG_BLOCK
 	if (!invalidatepage)
 		invalidatepage = block_invalidatepage;
 #endif
 	if (invalidatepage)
-		(*invalidatepage)(page, offset);
+		(*invalidatepage)(page, offset, length);
 }
 
 static inline void truncate_partial_page(struct page *page, unsigned partial)
@@ -55,7 +58,7 @@
 	zero_user_segment(page, partial, PAGE_CACHE_SIZE);
 	cleancache_invalidate_page(page->mapping, page);
 	if (page_has_private(page))
-		do_invalidatepage(page, partial);
+		do_invalidatepage(page, partial, PAGE_CACHE_SIZE - partial);
 }
 
 /*
@@ -104,7 +107,7 @@
 		return -EIO;
 
 	if (page_has_private(page))
-		do_invalidatepage(page, 0);
+		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
 
 	cancel_dirty_page(page, PAGE_CACHE_SIZE);
 
@@ -603,6 +606,7 @@
 	struct page *page;
 	pgoff_t index;
 
+	WARN_ON(!mutex_is_locked(&inode->i_mutex));
 	WARN_ON(to > inode->i_size);
 
 	if (from >= to || bsize == PAGE_CACHE_SIZE)
diff --git a/mm/util.c b/mm/util.c
index 0b17252..f68b2db 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -7,6 +7,7 @@
 #include <linux/security.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
+#include <linux/vmalloc.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -381,6 +382,15 @@
 }
 EXPORT_SYMBOL(vm_mmap);
 
+void kvfree(const void *addr)
+{
+	if (is_vmalloc_addr(addr))
+		vfree(addr);
+	else
+		kfree(addr);
+}
+EXPORT_SYMBOL(kvfree);
+
 struct address_space *page_mapping(struct page *page)
 {
 	struct address_space *mapping = page->mapping;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d456560..bd4f271 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -28,6 +28,7 @@
 #include <linux/kmemleak.h>
 #include <linux/atomic.h>
 #include <linux/llist.h>
+#include <linux/sizes.h>
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
 #include <asm/shmparam.h>
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a1e3bec..9c786b9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -43,6 +43,7 @@
 #include <linux/sysctl.h>
 #include <linux/oom.h>
 #include <linux/prefetch.h>
+#include <linux/debugfs.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -155,6 +156,40 @@
 	return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
 }
 
+struct dentry *debug_file;
+
+static int debug_shrinker_show(struct seq_file *s, void *unused)
+{
+	struct shrinker *shrinker;
+	struct shrink_control sc;
+
+	sc.gfp_mask = -1;
+	sc.nr_to_scan = 0;
+
+	down_read(&shrinker_rwsem);
+	list_for_each_entry(shrinker, &shrinker_list, list) {
+		char name[64];
+		int num_objs;
+
+		num_objs = shrinker->shrink(shrinker, &sc);
+		seq_printf(s, "%pf %d\n", shrinker->shrink, num_objs);
+	}
+	up_read(&shrinker_rwsem);
+	return 0;
+}
+
+static int debug_shrinker_open(struct inode *inode, struct file *file)
+{
+        return single_open(file, debug_shrinker_show, inode->i_private);
+}
+
+static const struct file_operations debug_shrinker_fops = {
+        .open = debug_shrinker_open,
+        .read = seq_read,
+        .llseek = seq_lseek,
+        .release = single_release,
+};
+
 /*
  * Add a shrinker callback to be called from the vm
  */
@@ -167,6 +202,15 @@
 }
 EXPORT_SYMBOL(register_shrinker);
 
+static int __init add_shrinker_debug(void)
+{
+	debugfs_create_file("shrinker", 0644, NULL, NULL,
+			    &debug_shrinker_fops);
+	return 0;
+}
+
+late_initcall(add_shrinker_debug);
+
 /*
  * Remove one
  */
diff --git a/net/Kconfig b/net/Kconfig
index 2ddc904..c5e34cf 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -81,6 +81,20 @@
 
 endif # if INET
 
+config ANDROID_PARANOID_NETWORK
+	bool "Only allow certain groups to create sockets"
+	default y
+	help
+		none
+
+config NET_ACTIVITY_STATS
+	bool "Network activity statistics tracking"
+	default y
+	help
+	 Network activity statistics are useful for tracking wireless
+	 modem activity on 2G, 3G, 4G wireless networks. Counts number of
+	 transmissions and groups them in specified time buckets.
+
 config NETWORK_SECMARK
 	bool "Security Marking"
 	help
@@ -220,7 +234,7 @@
 source "net/netlink/Kconfig"
 
 config RPS
-	boolean
+	boolean "RPS"
 	depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
 	default y
 
diff --git a/net/Makefile b/net/Makefile
index 091e7b04..67d460a 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -70,3 +70,4 @@
 obj-$(CONFIG_NFC)		+= nfc/
 obj-$(CONFIG_OPENVSWITCH)	+= openvswitch/
 obj-$(CONFIG_VSOCKETS)	+= vmw_vsock/
+obj-$(CONFIG_NET_ACTIVITY_STATS)		+= activity_stats.o
diff --git a/net/activity_stats.c b/net/activity_stats.c
new file mode 100644
index 0000000..4609ce2
--- /dev/null
+++ b/net/activity_stats.c
@@ -0,0 +1,119 @@
+/* net/activity_stats.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/suspend.h>
+#include <net/net_namespace.h>
+
+/*
+ * Track transmission rates in buckets (power of 2).
+ * 1,2,4,8...512 seconds.
+ *
+ * Buckets represent the count of network transmissions at least
+ * N seconds apart, where N is 1 << bucket index.
+ */
+#define BUCKET_MAX 10
+
+/* Track network activity frequency */
+static unsigned long activity_stats[BUCKET_MAX];
+static ktime_t last_transmit;
+static ktime_t suspend_time;
+static DEFINE_SPINLOCK(activity_lock);
+
+void activity_stats_update(void)
+{
+	int i;
+	unsigned long flags;
+	ktime_t now;
+	s64 delta;
+
+	spin_lock_irqsave(&activity_lock, flags);
+	now = ktime_get();
+	delta = ktime_to_ns(ktime_sub(now, last_transmit));
+
+	for (i = BUCKET_MAX - 1; i >= 0; i--) {
+		/*
+		 * Check if the time delta between network activity is within the
+		 * minimum bucket range.
+		 */
+		if (delta < (1000000000ULL << i))
+			continue;
+
+		activity_stats[i]++;
+		last_transmit = now;
+		break;
+	}
+	spin_unlock_irqrestore(&activity_lock, flags);
+}
+
+static int activity_stats_show(struct seq_file *m, void *v)
+{
+	int i;
+	int ret;
+
+	seq_printf(m, "Min Bucket(sec) Count\n");
+
+	for (i = 0; i < BUCKET_MAX; i++) {
+		ret = seq_printf(m, "%15d %lu\n", 1 << i, activity_stats[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int activity_stats_notifier(struct notifier_block *nb,
+					unsigned long event, void *dummy)
+{
+	switch (event) {
+		case PM_SUSPEND_PREPARE:
+			suspend_time = ktime_get_real();
+			break;
+
+		case PM_POST_SUSPEND:
+			suspend_time = ktime_sub(ktime_get_real(), suspend_time);
+			last_transmit = ktime_sub(last_transmit, suspend_time);
+	}
+
+	return 0;
+}
+
+static int activity_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, activity_stats_show, PDE_DATA(inode));
+}
+
+static const struct file_operations activity_stats_fops = {
+	.open		= activity_stats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static struct notifier_block activity_stats_notifier_block = {
+	.notifier_call = activity_stats_notifier,
+};
+
+static int  __init activity_stats_init(void)
+{
+	proc_create("activity", S_IRUGO,
+		    init_net.proc_net_stat, &activity_stats_fops);
+	return register_pm_notifier(&activity_stats_notifier_block);
+}
+
+subsys_initcall(activity_stats_init);
+
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6629cdc..f7c3682 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -30,6 +30,11 @@
 #include <net/bluetooth/bluetooth.h>
 #include <linux/proc_fs.h>
 
+#ifndef CONFIG_BT_SOCK_DEBUG
+#undef  BT_DBG
+#define BT_DBG(D...)
+#endif
+
 #define VERSION "2.16"
 
 /* Bluetooth sockets */
@@ -103,11 +108,40 @@
 }
 EXPORT_SYMBOL(bt_sock_unregister);
 
+#ifdef CONFIG_PARANOID_NETWORK
+static inline int current_has_bt_admin(void)
+{
+	return !current_euid();
+}
+
+static inline int current_has_bt(void)
+{
+	return current_has_bt_admin();
+}
+# else
+static inline int current_has_bt_admin(void)
+{
+	return 1;
+}
+
+static inline int current_has_bt(void)
+{
+	return 1;
+}
+#endif
+
 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
 			  int kern)
 {
 	int err;
 
+	if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
+			proto == BTPROTO_L2CAP) {
+		if (!current_has_bt())
+			return -EPERM;
+	} else if (!current_has_bt_admin())
+		return -EPERM;
+
 	if (net != &init_net)
 		return -EAFNOSUPPORT;
 
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index d459ed4..a3f3380 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -113,7 +113,7 @@
 	bdaddr_t *dst = mgr->l2cap_conn->dst;
 	struct hci_conn *hcon;
 
-	hcon = hci_conn_add(hdev, AMP_LINK, dst);
+	hcon = hci_conn_add(hdev, AMP_LINK, 0, dst);
 	if (!hcon)
 		return NULL;
 
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 8e7290a..c9b2d50 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -354,7 +354,8 @@
 		     &conn->dst);
 }
 
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+					__u16 pkt_type, bdaddr_t *dst)
 {
 	struct hci_conn *conn;
 
@@ -382,14 +383,22 @@
 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
 		break;
 	case SCO_LINK:
-		if (lmp_esco_capable(hdev))
-			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
-					(hdev->esco_type & EDR_ESCO_MASK);
-		else
-			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
-		break;
+		if (!pkt_type)
+			pkt_type = SCO_ESCO_MASK;
 	case ESCO_LINK:
-		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
+		if (!pkt_type)
+			pkt_type = ALL_ESCO_MASK;
+		if (lmp_esco_capable(hdev)) {
+			/* HCI Setup Synchronous Connection Command uses
+			   reverse logic on the EDR_ESCO_MASK bits */
+			conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) &
+					hdev->esco_type;
+		} else {
+			/* Legacy HCI Add Sco Connection Command uses a
+			   shifted bitmask */
+			conn->pkt_type = (pkt_type << 5) & hdev->pkt_type &
+					SCO_PTYPE_MASK;
+		}
 		break;
 	}
 
@@ -520,7 +529,7 @@
 		if (le)
 			return ERR_PTR(-EBUSY);
 
-		le = hci_conn_add(hdev, LE_LINK, dst);
+		le = hci_conn_add(hdev, LE_LINK, 0, dst);
 		if (!le)
 			return ERR_PTR(-ENOMEM);
 
@@ -543,7 +552,7 @@
 
 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
 	if (!acl) {
-		acl = hci_conn_add(hdev, ACL_LINK, dst);
+		acl = hci_conn_add(hdev, ACL_LINK, 0, dst);
 		if (!acl)
 			return ERR_PTR(-ENOMEM);
 	}
@@ -561,7 +570,8 @@
 }
 
 static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
-				bdaddr_t *dst, u8 sec_level, u8 auth_type)
+					__u16 pkt_type, bdaddr_t *dst,
+					u8 sec_level, u8 auth_type)
 {
 	struct hci_conn *acl;
 	struct hci_conn *sco;
@@ -572,7 +582,7 @@
 
 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
 	if (!sco) {
-		sco = hci_conn_add(hdev, type, dst);
+		sco = hci_conn_add(hdev, type, pkt_type, dst);
 		if (!sco) {
 			hci_conn_drop(acl);
 			return ERR_PTR(-ENOMEM);
@@ -602,7 +612,8 @@
 }
 
 /* Create SCO, ACL or LE connection. */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
+			     __u16 pkt_type, bdaddr_t *dst,
 			     __u8 dst_type, __u8 sec_level, __u8 auth_type)
 {
 	BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
@@ -614,7 +625,7 @@
 		return hci_connect_acl(hdev, dst, sec_level, auth_type);
 	case SCO_LINK:
 	case ESCO_LINK:
-		return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
+		return hci_connect_sco(hdev, type, pkt_type, dst, sec_level, auth_type);
 	}
 
 	return ERR_PTR(-EINVAL);
@@ -883,6 +894,15 @@
 		(ci + n)->out   = c->out;
 		(ci + n)->state = c->state;
 		(ci + n)->link_mode = c->link_mode;
+		if (c->type == SCO_LINK) {
+			(ci + n)->mtu = hdev->sco_mtu;
+			(ci + n)->cnt = hdev->sco_cnt;
+			(ci + n)->pkts = hdev->sco_pkts;
+		} else {
+			(ci + n)->mtu = hdev->acl_mtu;
+			(ci + n)->cnt = hdev->acl_cnt;
+			(ci + n)->pkts = hdev->acl_pkts;
+		}
 		if (++n >= req.conn_num)
 			break;
 	}
@@ -919,6 +939,15 @@
 		ci.out   = conn->out;
 		ci.state = conn->state;
 		ci.link_mode = conn->link_mode;
+		if (req.type == SCO_LINK) {
+			ci.mtu = hdev->sco_mtu;
+			ci.cnt = hdev->sco_cnt;
+			ci.pkts = hdev->sco_pkts;
+		} else {
+			ci.mtu = hdev->acl_mtu;
+			ci.cnt = hdev->acl_cnt;
+			ci.pkts = hdev->acl_pkts;
+		}
 	}
 	hci_dev_unlock(hdev);
 
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
old mode 100644
new mode 100755
index 5daf7ab..1526fb2
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1119,7 +1119,7 @@
 		}
 	} else {
 		if (!conn) {
-			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
+			conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
 			if (conn) {
 				conn->out = true;
 				conn->link_mode |= HCI_LM_MASTER;
@@ -1748,6 +1748,15 @@
 	hci_conn_check_pending(hdev);
 }
 
+static inline bool is_sco_active(struct hci_dev *hdev)
+{
+	if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
+			(hci_conn_hash_lookup_state(hdev, ESCO_LINK,
+						    BT_CONNECTED)))
+		return true;
+	return false;
+}
+
 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_ev_conn_request *ev = (void *) skb->data;
@@ -1775,7 +1784,8 @@
 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
 					       &ev->bdaddr);
 		if (!conn) {
-			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
+			/* pkt_type not yet used for incoming connections */
+			conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
 			if (!conn) {
 				BT_ERR("No memory for new connection");
 				hci_dev_unlock(hdev);
@@ -1794,7 +1804,8 @@
 
 			bacpy(&cp.bdaddr, &ev->bdaddr);
 
-			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
+			if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
+						|| is_sco_active(hdev)))
 				cp.role = 0x00; /* Become master */
 			else
 				cp.role = 0x01; /* Remain slave */
@@ -2963,6 +2974,7 @@
 		hci_conn_add_sysfs(conn);
 		break;
 
+	case 0x10:	/* Connection Accept Timeout */
 	case 0x11:	/* Unsupported Feature or Parameter Value */
 	case 0x1c:	/* SCO interval rejected */
 	case 0x1a:	/* Unsupported Remote Feature */
@@ -3540,7 +3552,7 @@
 
 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
 	if (!conn) {
-		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+		conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
 		if (!conn) {
 			BT_ERR("No memory for new connection");
 			goto unlock;
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index c605633..66550c9 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -85,16 +85,14 @@
 		ci->product = session->input->id.product;
 		ci->version = session->input->id.version;
 		if (session->input->name)
-			strncpy(ci->name, session->input->name, 128);
+			strlcpy(ci->name, session->input->name, 128);
 		else
-			strncpy(ci->name, "HID Boot Device", 128);
-	}
-
-	if (session->hid) {
+			strlcpy(ci->name, "HID Boot Device", 128);
+	} else if (session->hid) {
 		ci->vendor  = session->hid->vendor;
 		ci->product = session->hid->product;
 		ci->version = session->hid->version;
-		strncpy(ci->name, session->hid->name, 128);
+		strlcpy(ci->name, session->hid->name, 128);
 	}
 }
 
@@ -160,7 +158,7 @@
 		  (!!test_bit(LED_COMPOSE, dev->led) << 3) |
 		  (!!test_bit(LED_SCROLLL, dev->led) << 2) |
 		  (!!test_bit(LED_CAPSL,   dev->led) << 1) |
-		  (!!test_bit(LED_NUML,    dev->led));
+		  (!!test_bit(LED_NUML,    dev->led) << 0);
 
 	if (session->leds == newleds)
 		return 0;
@@ -229,26 +227,6 @@
 	input_sync(dev);
 }
 
-static int hidp_send_report(struct hidp_session *session, struct hid_report *report)
-{
-	unsigned char hdr;
-	u8 *buf;
-	int rsize, ret;
-
-	buf = hid_alloc_report_buf(report, GFP_ATOMIC);
-	if (!buf)
-		return -EIO;
-
-	hid_output_report(report, buf);
-	hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
-
-	rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
-	ret = hidp_send_intr_message(session, hdr, buf, rsize);
-
-	kfree(buf);
-	return ret;
-}
-
 static int hidp_get_raw_report(struct hid_device *hid,
 		unsigned char report_number,
 		unsigned char *data, size_t count,
@@ -334,17 +312,24 @@
 	return ret;
 }
 
-static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
-		unsigned char report_type)
+static int hidp_set_raw_report(struct hid_device *hid, unsigned char reportnum,
+			       unsigned char *data, size_t count,
+			       unsigned char report_type)
 {
 	struct hidp_session *session = hid->driver_data;
 	int ret;
 
-	if (report_type == HID_OUTPUT_REPORT) {
-		report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
-		return hidp_send_intr_message(session, report_type,
-					      data, count);
-	} else if (report_type != HID_FEATURE_REPORT) {
+	switch (report_type) {
+	case HID_FEATURE_REPORT:
+		report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
+		break;
+	case HID_INPUT_REPORT:
+		report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_INPUT;
+		break;
+	case HID_OUTPUT_REPORT:
+		report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT;
+		break;
+	default:
 		return -EINVAL;
 	}
 
@@ -352,8 +337,8 @@
 		return -ERESTARTSYS;
 
 	/* Set up our wait, and send the report request to the device. */
+	data[0] = reportnum;
 	set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
-	report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
 	ret = hidp_send_ctrl_message(session, report_type, data, count);
 	if (ret)
 		goto err;
@@ -392,6 +377,29 @@
 	return ret;
 }
 
+static int hidp_output_report(struct hid_device *hid, __u8 *data, size_t count)
+{
+	struct hidp_session *session = hid->driver_data;
+
+	return hidp_send_intr_message(session,
+				      HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT,
+				      data, count);
+}
+
+static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum,
+			    __u8 *buf, size_t len, unsigned char rtype,
+			    int reqtype)
+{
+	switch (reqtype) {
+	case HID_REQ_GET_REPORT:
+		return hidp_get_raw_report(hid, reportnum, buf, len, rtype);
+	case HID_REQ_SET_REPORT:
+		return hidp_set_raw_report(hid, reportnum, buf, len, rtype);
+	default:
+		return -EIO;
+	}
+}
+
 static void hidp_idle_timeout(unsigned long arg)
 {
 	struct hidp_session *session = (struct hidp_session *) arg;
@@ -425,6 +433,16 @@
 		del_timer(&session->timer);
 }
 
+static void hidp_process_report(struct hidp_session *session,
+				int type, const u8 *data, int len, int intr)
+{
+	if (len > HID_MAX_BUFFER_SIZE)
+		len = HID_MAX_BUFFER_SIZE;
+
+	memcpy(session->input_buf, data, len);
+	hid_input_report(session->hid, type, session->input_buf, len, intr);
+}
+
 static void hidp_process_handshake(struct hidp_session *session,
 					unsigned char param)
 {
@@ -497,7 +515,8 @@
 			hidp_input_report(session, skb);
 
 		if (session->hid)
-			hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
+			hidp_process_report(session, HID_INPUT_REPORT,
+					    skb->data, skb->len, 0);
 		break;
 
 	case HIDP_DATA_RTYPE_OTHER:
@@ -579,7 +598,8 @@
 			hidp_input_report(session, skb);
 
 		if (session->hid) {
-			hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1);
+			hidp_process_report(session, HID_INPUT_REPORT,
+					    skb->data, skb->len, 1);
 			BT_DBG("report len %d", skb->len);
 		}
 	} else {
@@ -703,20 +723,6 @@
 
 static int hidp_start(struct hid_device *hid)
 {
-	struct hidp_session *session = hid->driver_data;
-	struct hid_report *report;
-
-	if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
-		return 0;
-
-	list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
-			report_list, list)
-		hidp_send_report(session, report);
-
-	list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].
-			report_list, list)
-		hidp_send_report(session, report);
-
 	return 0;
 }
 
@@ -736,6 +742,8 @@
 	.stop = hidp_stop,
 	.open  = hidp_open,
 	.close = hidp_close,
+	.raw_request = hidp_raw_request,
+	.output_report = hidp_output_report,
 };
 
 /* This function sets up the hid device. It does not add it
@@ -746,14 +754,10 @@
 	struct hid_device *hid;
 	int err;
 
-	session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
-	if (!session->rd_data)
-		return -ENOMEM;
+	session->rd_data = memdup_user(req->rd_data, req->rd_size);
+	if (IS_ERR(session->rd_data))
+		return PTR_ERR(session->rd_data);
 
-	if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
-		err = -EFAULT;
-		goto fault;
-	}
 	session->rd_size = req->rd_size;
 
 	hid = hid_allocate_device();
@@ -775,17 +779,17 @@
 	strncpy(hid->name, req->name, sizeof(req->name) - 1);
 
 	snprintf(hid->phys, sizeof(hid->phys), "%pMR",
-		 &bt_sk(session->ctrl_sock->sk)->src);
+		 &l2cap_pi(session->ctrl_sock->sk)->chan->src);
 
+	/* NOTE: Some device modules depend on the dst address being stored in
+	 * uniq. Please be aware of this before making changes to this behavior.
+	 */
 	snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
-		 &bt_sk(session->ctrl_sock->sk)->dst);
+		 &l2cap_pi(session->ctrl_sock->sk)->chan->dst);
 
 	hid->dev.parent = &session->conn->hcon->dev;
 	hid->ll_driver = &hidp_hid_driver;
 
-	hid->hid_get_raw_report = hidp_get_raw_report;
-	hid->hid_output_raw_report = hidp_output_raw_report;
-
 	/* True if device is blacklisted in drivers/hid/hid-core.c */
 	if (hid_ignore(hid)) {
 		hid_destroy_device(session->hid);
@@ -870,6 +874,29 @@
 }
 
 /*
+ * Asynchronous device registration
+ * HID device drivers might want to perform I/O during initialization to
+ * detect device types. Therefore, call device registration in a separate
+ * worker so the HIDP thread can schedule I/O operations.
+ * Note that this must be called after the worker thread was initialized
+ * successfully. This will then add the devices and increase session state
+ * on success, otherwise it will terminate the session thread.
+ */
+static void hidp_session_dev_work(struct work_struct *work)
+{
+	struct hidp_session *session = container_of(work,
+						    struct hidp_session,
+						    dev_init);
+	int ret;
+
+	ret = hidp_session_dev_add(session);
+	if (!ret)
+		atomic_inc(&session->state);
+	else
+		hidp_session_terminate(session);
+}
+
+/*
  * Create new session object
  * Allocate session object, initialize static fields, copy input data into the
  * object and take a reference to all sub-objects.
@@ -902,7 +929,7 @@
 
 	/* connection management */
 	bacpy(&session->bdaddr, bdaddr);
-	session->conn = conn;
+	session->conn = l2cap_conn_get(conn);
 	session->user.probe = hidp_session_probe;
 	session->user.remove = hidp_session_remove;
 	session->ctrl_sock = ctrl_sock;
@@ -916,6 +943,7 @@
 	session->idle_to = req->idle_to;
 
 	/* device management */
+	INIT_WORK(&session->dev_init, hidp_session_dev_work);
 	setup_timer(&session->timer, hidp_idle_timeout,
 		    (unsigned long)session);
 
@@ -927,13 +955,13 @@
 	if (ret)
 		goto err_free;
 
-	l2cap_conn_get(session->conn);
 	get_file(session->intr_sock->file);
 	get_file(session->ctrl_sock->file);
 	*out = session;
 	return 0;
 
 err_free:
+	l2cap_conn_put(session->conn);
 	kfree(session);
 	return ret;
 }
@@ -1054,8 +1082,8 @@
  * Probe HIDP session
  * This is called from the l2cap_conn core when our l2cap_user object is bound
  * to the hci-connection. We get the session via the \user object and can now
- * start the session thread, register the HID/input devices and link it into
- * the global session list.
+ * start the session thread, link it into the global session list and
+ * schedule HID/input device registration.
  * The global session-list owns its own reference to the session object so you
  * can drop your own reference after registering the l2cap_user object.
  */
@@ -1077,21 +1105,30 @@
 		goto out_unlock;
 	}
 
+	if (session->input) {
+		ret = hidp_session_dev_add(session);
+		if (ret)
+			goto out_unlock;
+	}
+
 	ret = hidp_session_start_sync(session);
 	if (ret)
-		goto out_unlock;
+		goto out_del;
 
-	ret = hidp_session_dev_add(session);
-	if (ret)
-		goto out_stop;
+	/* HID device registration is async to allow I/O during probe */
+	if (session->input)
+		atomic_inc(&session->state);
+	else
+		schedule_work(&session->dev_init);
 
 	hidp_session_get(session);
 	list_add(&session->list, &hidp_session_list);
 	ret = 0;
 	goto out_unlock;
 
-out_stop:
-	hidp_session_terminate(session);
+out_del:
+	if (session->input)
+		hidp_session_dev_del(session);
 out_unlock:
 	up_write(&hidp_session_sem);
 	return ret;
@@ -1121,7 +1158,12 @@
 	down_write(&hidp_session_sem);
 
 	hidp_session_terminate(session);
-	hidp_session_dev_del(session);
+
+	cancel_work_sync(&session->dev_init);
+	if (session->input ||
+	    atomic_read(&session->state) > HIDP_SESSION_PREPARING)
+		hidp_session_dev_del(session);
+
 	list_del(&session->list);
 
 	up_write(&hidp_session_sem);
@@ -1253,23 +1295,29 @@
 static int hidp_verify_sockets(struct socket *ctrl_sock,
 			       struct socket *intr_sock)
 {
+	struct l2cap_chan *ctrl_chan, *intr_chan;
 	struct bt_sock *ctrl, *intr;
 	struct hidp_session *session;
 
 	if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock))
 		return -EINVAL;
 
+	ctrl_chan = l2cap_pi(ctrl_sock->sk)->chan;
+	intr_chan = l2cap_pi(intr_sock->sk)->chan;
+
+	if (bacmp(&ctrl_chan->src, &intr_chan->src) ||
+	    bacmp(&ctrl_chan->dst, &intr_chan->dst))
+		return -ENOTUNIQ;
+
 	ctrl = bt_sk(ctrl_sock->sk);
 	intr = bt_sk(intr_sock->sk);
 
-	if (bacmp(&ctrl->src, &intr->src) || bacmp(&ctrl->dst, &intr->dst))
-		return -ENOTUNIQ;
 	if (ctrl->sk.sk_state != BT_CONNECTED ||
 	    intr->sk.sk_state != BT_CONNECTED)
 		return -EBADFD;
 
 	/* early session check, we check again during session registration */
-	session = hidp_session_find(&ctrl->dst);
+	session = hidp_session_find(&ctrl_chan->dst);
 	if (session) {
 		hidp_session_put(session);
 		return -EEXIST;
@@ -1284,25 +1332,24 @@
 {
 	struct hidp_session *session;
 	struct l2cap_conn *conn;
-	struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
+	struct l2cap_chan *chan;
 	int ret;
 
 	ret = hidp_verify_sockets(ctrl_sock, intr_sock);
 	if (ret)
 		return ret;
 
+	chan = l2cap_pi(ctrl_sock->sk)->chan;
 	conn = NULL;
 	l2cap_chan_lock(chan);
-	if (chan->conn) {
-		l2cap_conn_get(chan->conn);
-		conn = chan->conn;
-	}
+	if (chan->conn)
+		conn = l2cap_conn_get(chan->conn);
 	l2cap_chan_unlock(chan);
 
 	if (!conn)
 		return -EBADFD;
 
-	ret = hidp_session_new(&session, &bt_sk(ctrl_sock->sk)->dst, ctrl_sock,
+	ret = hidp_session_new(&session, &chan->dst, ctrl_sock,
 			       intr_sock, req, conn);
 	if (ret)
 		goto out_conn;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 6162ce8..20d5ea4 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -24,6 +24,7 @@
 #define __HIDP_H
 
 #include <linux/types.h>
+#include <linux/hid.h>
 #include <linux/kref.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/l2cap.h>
@@ -128,6 +129,7 @@
 
 enum hidp_session_state {
 	HIDP_SESSION_IDLING,
+	HIDP_SESSION_PREPARING,
 	HIDP_SESSION_RUNNING,
 };
 
@@ -156,6 +158,7 @@
 	unsigned long idle_to;
 
 	/* device management */
+	struct work_struct dev_init;
 	struct input_dev *input;
 	struct hid_device *hid;
 	struct timer_list timer;
@@ -177,6 +180,9 @@
 
 	/* Used in hidp_output_raw_report() */
 	int output_report_success; /* boolean */
+
+	/* temporary input buffer */
+	u8 input_buf[HID_MAX_BUFFER_SIZE];
 };
 
 /* HIDP init defines */
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 68843a2..81372db 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -58,6 +58,18 @@
 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
 		     struct sk_buff_head *skbs, u8 event);
 
+static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
+{
+	if (hcon->type == LE_LINK) {
+		if (type == ADDR_LE_DEV_PUBLIC)
+			return BDADDR_LE_PUBLIC;
+		else
+			return BDADDR_LE_RANDOM;
+	}
+
+	return BDADDR_BREDR;
+}
+
 /* ---- L2CAP channels ---- */
 
 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
@@ -1364,6 +1376,8 @@
 
 	bacpy(&bt_sk(sk)->src, conn->src);
 	bacpy(&bt_sk(sk)->dst, conn->dst);
+	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
+	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
 
 	l2cap_chan_add(conn, chan);
 
@@ -1655,9 +1669,10 @@
 	kfree(conn);
 }
 
-void l2cap_conn_get(struct l2cap_conn *conn)
+struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
 {
 	kref_get(&conn->ref);
+	return conn;
 }
 EXPORT_SYMBOL(l2cap_conn_get);
 
@@ -1785,6 +1800,7 @@
 	/* Set destination address and psm */
 	lock_sock(sk);
 	bacpy(&bt_sk(sk)->dst, dst);
+	chan->dst_type = dst_type;
 	release_sock(sk);
 
 	chan->psm = psm;
@@ -1793,10 +1809,10 @@
 	auth_type = l2cap_get_auth_type(chan);
 
 	if (chan->dcid == L2CAP_CID_LE_DATA)
-		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
+		hcon = hci_connect(hdev, LE_LINK, 0, dst, dst_type,
 				   chan->sec_level, auth_type);
 	else
-		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
+		hcon = hci_connect(hdev, ACL_LINK, 0, dst, dst_type,
 				   chan->sec_level, auth_type);
 
 	if (IS_ERR(hcon)) {
@@ -1825,6 +1841,7 @@
 
 	/* Update source addr of the socket */
 	bacpy(src, conn->src);
+	chan->src_type = bdaddr_type(hcon, hcon->src_type);
 
 	l2cap_chan_unlock(chan);
 	l2cap_chan_add(conn, chan);
@@ -3755,6 +3772,8 @@
 
 	bacpy(&bt_sk(sk)->src, conn->src);
 	bacpy(&bt_sk(sk)->dst, conn->dst);
+	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
+	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
 	chan->psm  = psm;
 	chan->dcid = scid;
 	chan->local_amp_id = amp_id;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 4b966c6..1a9b151 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -103,7 +103,8 @@
 	    __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
 		chan->sec_level = BT_SECURITY_SDP;
 
-	bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+	bacpy(&chan->src, &la.l2_bdaddr);
+	chan->src_type = la.l2_bdaddr_type;
 
 	chan->state = BT_BOUND;
 	sk->sk_state = BT_BOUND;
@@ -267,10 +268,12 @@
 		la->l2_psm = chan->psm;
 		bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
 		la->l2_cid = cpu_to_le16(chan->dcid);
+		la->l2_bdaddr_type = chan->dst_type;
 	} else {
 		la->l2_psm = chan->sport;
 		bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
 		la->l2_cid = cpu_to_le16(chan->scid);
+		la->l2_bdaddr_type = chan->src_type;
 	}
 
 	return 0;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 8208a13..3e57454 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2205,10 +2205,10 @@
 		auth_type = HCI_AT_DEDICATED_BONDING_MITM;
 
 	if (cp->addr.type == BDADDR_BREDR)
-		conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
+		conn = hci_connect(hdev, ACL_LINK, 0, &cp->addr.bdaddr,
 				   cp->addr.type, sec_level, auth_type);
 	else
-		conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
+		conn = hci_connect(hdev, LE_LINK, 0, &cp->addr.bdaddr,
 				   cp->addr.type, sec_level, auth_type);
 
 	if (IS_ERR(conn)) {
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 19ba192..3ca5e40 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -436,7 +436,6 @@
 
 	switch (d->state) {
 	case BT_CONNECT:
-	case BT_CONFIG:
 		if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
 			set_bit(RFCOMM_AUTH_REJECT, &d->flags);
 			rfcomm_schedule();
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index b586a32..2443a4f 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -158,6 +158,7 @@
 {
 	bdaddr_t *src = &bt_sk(sk)->src;
 	bdaddr_t *dst = &bt_sk(sk)->dst;
+	__u16 pkt_type = sco_pi(sk)->pkt_type;
 	struct sco_conn *conn;
 	struct hci_conn *hcon;
 	struct hci_dev  *hdev;
@@ -173,11 +174,13 @@
 
 	if (lmp_esco_capable(hdev) && !disable_esco)
 		type = ESCO_LINK;
-	else
+	else {
 		type = SCO_LINK;
+		pkt_type &= SCO_ESCO_MASK;
+	}
 
-	hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
-			   HCI_AT_NO_BONDING);
+	hcon = hci_connect(hdev, type, pkt_type, dst, BDADDR_BREDR,
+			   BT_SECURITY_LOW, HCI_AT_NO_BONDING);
 	if (IS_ERR(hcon)) {
 		err = PTR_ERR(hcon);
 		goto done;
@@ -445,19 +448,22 @@
 	return 0;
 }
 
-static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
 {
-	struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
+	struct sockaddr_sco sa;
 	struct sock *sk = sock->sk;
-	int err = 0;
+	int len, err = 0;
 
-	BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
+	BT_DBG("sk %p %pMR", sk, &sa.sco_bdaddr);
 
 	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
 
-	if (addr_len < sizeof(struct sockaddr_sco))
+	if (alen < sizeof(struct sockaddr_sco))
 		return -EINVAL;
+	memset(&sa, 0, sizeof(sa));
+	len = min_t(unsigned int, sizeof(sa), alen);
+	memcpy(&sa, addr, len);
 
 	lock_sock(sk);
 
@@ -471,7 +477,8 @@
 		goto done;
 	}
 
-	bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+	bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
+	sco_pi(sk)->pkt_type = sa.sco_pkt_type;
 
 	sk->sk_state = BT_BOUND;
 
@@ -482,26 +489,34 @@
 
 static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
 {
-	struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
 	struct sock *sk = sock->sk;
-	int err;
+	struct sockaddr_sco sa;
+	int len, err;
 
 	BT_DBG("sk %p", sk);
 
-	if (alen < sizeof(struct sockaddr_sco) ||
-	    addr->sa_family != AF_BLUETOOTH)
+	if (!addr || addr->sa_family != AF_BLUETOOTH)
 		return -EINVAL;
 
-	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
-		return -EBADFD;
-
-	if (sk->sk_type != SOCK_SEQPACKET)
-		return -EINVAL;
+	memset(&sa, 0, sizeof(sa));
+	len = min_t(unsigned int, sizeof(sa), alen);
+	memcpy(&sa, addr, len);
 
 	lock_sock(sk);
 
+	if (sk->sk_type != SOCK_SEQPACKET) {
+		err = -EINVAL;
+		goto done;
+	}
+
+	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+		err = -EBADFD;
+		goto done;
+	}
+
 	/* Set destination address and psm */
-	bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+	bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
+	sco_pi(sk)->pkt_type = sa.sco_pkt_type;
 
 	err = sco_connect(sk);
 	if (err)
@@ -625,6 +640,7 @@
 		bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
 	else
 		bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+	sa->sco_pkt_type = sco_pi(sk)->pkt_type;
 
 	return 0;
 }
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 9673128..239e0e8 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -41,11 +41,6 @@
 	}
 #endif
 
-	u64_stats_update_begin(&brstats->syncp);
-	brstats->tx_packets++;
-	brstats->tx_bytes += skb->len;
-	u64_stats_update_end(&brstats->syncp);
-
 	if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
 		goto out;
 
@@ -54,6 +49,12 @@
 	skb_reset_mac_header(skb);
 	skb_pull(skb, ETH_HLEN);
 
+	u64_stats_update_begin(&brstats->syncp);
+	brstats->tx_packets++;
+	/* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
+	brstats->tx_bytes += skb->len;
+	u64_stats_update_end(&brstats->syncp);
+
 	if (is_broadcast_ether_addr(dest))
 		br_flood_deliver(br, skb);
 	else if (is_multicast_ether_addr(dest)) {
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 84340a2..ba1b50e 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -31,6 +31,8 @@
 	r->pref = pref;
 	r->table = table;
 	r->flags = flags;
+	r->uid_start = INVALID_UID;
+	r->uid_end = INVALID_UID;
 	r->fr_net = hold_net(ops->fro_net);
 
 	/* The lock is not required here, the list in unreacheable
@@ -179,6 +181,23 @@
 }
 EXPORT_SYMBOL_GPL(fib_rules_unregister);
 
+static inline kuid_t fib_nl_uid(struct nlattr *nla)
+{
+	return make_kuid(current_user_ns(), nla_get_u32(nla));
+}
+
+static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
+{
+	return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
+}
+
+static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
+{
+	return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
+	       (uid_gte(fl->flowi_uid, rule->uid_start) &&
+		uid_lte(fl->flowi_uid, rule->uid_end));
+}
+
 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
 			  struct flowi *fl, int flags)
 {
@@ -193,6 +212,9 @@
 	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
 		goto out;
 
+	if (!fib_uid_range_match(fl, rule))
+		goto out;
+
 	ret = ops->match(rule, fl, flags);
 out:
 	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -363,6 +385,19 @@
 	} else if (rule->action == FR_ACT_GOTO)
 		goto errout_free;
 
+	/* UID start and end must either both be valid or both unspecified. */
+	rule->uid_start = rule->uid_end = INVALID_UID;
+	if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
+		if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
+			rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
+			rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
+		}
+		if (!uid_valid(rule->uid_start) ||
+		    !uid_valid(rule->uid_end) ||
+		    !uid_lte(rule->uid_start, rule->uid_end))
+		goto errout_free;
+	}
+
 	err = ops->configure(rule, skb, frh, tb);
 	if (err < 0)
 		goto errout_free;
@@ -469,6 +504,14 @@
 		    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
 			continue;
 
+		if (tb[FRA_UID_START] &&
+		    !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
+			continue;
+
+		if (tb[FRA_UID_END] &&
+		    !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
+			continue;
+
 		if (!ops->compare(rule, frh, tb))
 			continue;
 
@@ -525,7 +568,9 @@
 			 + nla_total_size(4) /* FRA_PRIORITY */
 			 + nla_total_size(4) /* FRA_TABLE */
 			 + nla_total_size(4) /* FRA_FWMARK */
-			 + nla_total_size(4); /* FRA_FWMASK */
+			 + nla_total_size(4) /* FRA_FWMASK */
+			 + nla_total_size(4) /* FRA_UID_START */
+			 + nla_total_size(4); /* FRA_UID_END */
 
 	if (ops->nlmsg_payload)
 		payload += ops->nlmsg_payload(rule);
@@ -579,7 +624,11 @@
 	    ((rule->mark_mask || rule->mark) &&
 	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
 	    (rule->target &&
-	     nla_put_u32(skb, FRA_GOTO, rule->target)))
+	     nla_put_u32(skb, FRA_GOTO, rule->target)) ||
+	    (uid_valid(rule->uid_start) &&
+	     nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
+	    (uid_valid(rule->uid_end) &&
+	     nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
 		goto nla_put_failure;
 	if (ops->fill(rule, skb, frh) < 0)
 		goto nla_put_failure;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b49e8ba..cdd7716 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -928,6 +928,7 @@
 			neigh->nud_state = NUD_PROBE;
 			neigh->updated = jiffies;
 			atomic_set(&neigh->probes, 0);
+			notify = 1;
 			next = now + neigh->parms->retrans_time;
 		}
 	} else {
@@ -1155,6 +1156,8 @@
 
 	if (new != old) {
 		neigh_del_timer(neigh);
+		if (new & NUD_PROBE)
+			atomic_set(&neigh->probes, 0);
 		if (new & NUD_IN_TIMER)
 			neigh_add_timer(neigh, (jiffies +
 						((new & NUD_REACHABLE) ?
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 089cb9f..5a9af0a 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -15,6 +15,7 @@
 
 obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 975c369..14ad21e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -119,6 +119,19 @@
 #include <linux/mroute.h>
 #endif
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+	return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+	return 1;
+}
+#endif
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -284,6 +297,9 @@
 	int try_loading_module = 0;
 	int err;
 
+	if (!current_has_network())
+		return -EACCES;
+
 	if (unlikely(!inet_ehash_secret))
 		if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
 			build_ehash_secret();
@@ -339,8 +355,7 @@
 	}
 
 	err = -EPERM;
-	if (sock->type == SOCK_RAW && !kern &&
-	    !ns_capable(net->user_ns, CAP_NET_RAW))
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	sock->ops = answer->ops;
@@ -908,6 +923,7 @@
 	case SIOCSIFPFLAGS:
 	case SIOCGIFPFLAGS:
 	case SIOCSIFFLAGS:
+	case SIOCKILLADDR:
 		err = devinet_ioctl(net, cmd, (void __user *)arg);
 		break;
 	default:
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 247ec19..791c19f 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -912,7 +912,14 @@
 		   agents are active. Taking the first reply prevents
 		   arp trashing and chooses the fastest router.
 		 */
-		override = time_after(jiffies, n->updated + n->parms->locktime);
+		/*
+		 * If n->updated is after jiffies, then the clock has wrapped and
+		 * we are *well* past the locktime, so set the override flag
+		 */
+		if (time_after(n->updated, jiffies))
+			override = 1;
+		else
+			override = time_after(jiffies, n->updated + n->parms->locktime);
 
 		/* Broadcast replies and request packets
 		   do not assert neighbour reachability.
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e40eef4..b151e0a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -59,6 +59,7 @@
 
 #include <net/arp.h>
 #include <net/ip.h>
+#include <net/tcp.h>
 #include <net/route.h>
 #include <net/ip_fib.h>
 #include <net/rtnetlink.h>
@@ -918,6 +919,7 @@
 	case SIOCSIFBRDADDR:	/* Set the broadcast address */
 	case SIOCSIFDSTADDR:	/* Set the destination address */
 	case SIOCSIFNETMASK: 	/* Set the netmask for the interface */
+	case SIOCKILLADDR:	/* Nuke all sockets on this address */
 		ret = -EPERM;
 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			goto out;
@@ -969,7 +971,8 @@
 	}
 
 	ret = -EADDRNOTAVAIL;
-	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
+	if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
+	    && cmd != SIOCKILLADDR)
 		goto done;
 
 	switch (cmd) {
@@ -1096,6 +1099,9 @@
 			inet_insert_ifa(ifa);
 		}
 		break;
+	case SIOCKILLADDR:	/* Nuke all connections on this address */
+		ret = tcp_nuke_addr(net, (struct sockaddr *) sin);
+		break;
 	}
 done:
 	rtnl_unlock();
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4556cd2..4a7cf03 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -250,7 +250,7 @@
 	bool dev_match;
 
 	fl4.flowi4_oif = 0;
-	fl4.flowi4_iif = oif;
+	fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
 	fl4.daddr = src;
 	fl4.saddr = dst;
 	fl4.flowi4_tos = tos;
@@ -531,6 +531,7 @@
 	[RTA_METRICS]		= { .type = NLA_NESTED },
 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
 	[RTA_FLOW]		= { .type = NLA_U32 },
+	[RTA_UID]		= { .type = NLA_U32 },
 };
 
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index bc773a1..d00d8f9 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -629,6 +629,7 @@
 				.daddr = nh->nh_gw,
 				.flowi4_scope = cfg->fc_scope + 1,
 				.flowi4_oif = nh->nh_oif,
+				.flowi4_iif = LOOPBACK_IFINDEX,
 			};
 
 			/* It is not necessary, but requires a bit of thinking */
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ea78ef5..5af8781 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -337,6 +337,7 @@
 	struct sock *sk;
 	struct inet_sock *inet;
 	__be32 daddr, saddr;
+	u32 mark = IP4_REPLY_MARK(net, skb->mark);
 
 	if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
 		return;
@@ -349,6 +350,7 @@
 	icmp_param->data.icmph.checksum = 0;
 
 	inet->tos = ip_hdr(skb)->tos;
+	sk->sk_mark = mark;
 	daddr = ipc.addr = ip_hdr(skb)->saddr;
 	saddr = fib_compute_spec_dst(skb);
 	ipc.opt = NULL;
@@ -361,6 +363,7 @@
 	memset(&fl4, 0, sizeof(fl4));
 	fl4.daddr = daddr;
 	fl4.saddr = saddr;
+	fl4.flowi4_mark = mark;
 	fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
 	fl4.flowi4_proto = IPPROTO_ICMP;
 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -379,7 +382,7 @@
 					struct flowi4 *fl4,
 					struct sk_buff *skb_in,
 					const struct iphdr *iph,
-					__be32 saddr, u8 tos,
+					__be32 saddr, u8 tos, u32 mark,
 					int type, int code,
 					struct icmp_bxm *param)
 {
@@ -391,6 +394,7 @@
 	fl4->daddr = (param->replyopts.opt.opt.srr ?
 		      param->replyopts.opt.opt.faddr : iph->saddr);
 	fl4->saddr = saddr;
+	fl4->flowi4_mark = mark;
 	fl4->flowi4_tos = RT_TOS(tos);
 	fl4->flowi4_proto = IPPROTO_ICMP;
 	fl4->fl4_icmp_type = type;
@@ -488,6 +492,7 @@
 	struct flowi4 fl4;
 	__be32 saddr;
 	u8  tos;
+	u32 mark;
 	struct net *net;
 	struct sock *sk;
 
@@ -584,6 +589,7 @@
 	tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
 					   IPTOS_PREC_INTERNETCONTROL) :
 					  iph->tos;
+	mark = IP4_REPLY_MARK(net, skb_in->mark);
 
 	if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))
 		goto out_unlock;
@@ -600,11 +606,12 @@
 	icmp_param.skb	  = skb_in;
 	icmp_param.offset = skb_network_offset(skb_in);
 	inet_sk(sk)->tos = tos;
+	sk->sk_mark = mark;
 	ipc.addr = iph->saddr;
 	ipc.opt = &icmp_param.replyopts.opt;
 	ipc.tx_flags = 0;
 
-	rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
+	rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
 			       type, code, &icmp_param);
 	if (IS_ERR(rt))
 		goto out_unlock;
@@ -937,7 +944,8 @@
 void icmp_err(struct sk_buff *skb, u32 info)
 {
 	struct iphdr *iph = (struct iphdr *)skb->data;
-	struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+	int offset = iph->ihl<<2;
+	struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
 	int type = icmp_hdr(skb)->type;
 	int code = icmp_hdr(skb)->code;
 	struct net *net = dev_net(skb->dev);
@@ -947,7 +955,7 @@
 	 * triggered by ICMP_ECHOREPLY which sent from kernel.
 	 */
 	if (icmph->type != ICMP_ECHOREPLY) {
-		ping_err(skb, info);
+		ping_err(skb, offset, info);
 		return;
 	}
 
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6acb541..6dfec2f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -417,12 +417,13 @@
 	struct net *net = sock_net(sk);
 	int flags = inet_sk_flowi_flags(sk);
 
-	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+	flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 			   sk->sk_protocol,
 			   flags,
 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport,
+			   sock_i_uid(sk));
 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 	rt = ip_route_output_flow(net, fl4, sk);
 	if (IS_ERR(rt))
@@ -454,11 +455,12 @@
 
 	rcu_read_lock();
 	opt = rcu_dereference(newinet->inet_opt);
-	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+	flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport,
+			   sock_i_uid(sk));
 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 	rt = ip_route_output_flow(net, fl4, sk);
 	if (IS_ERR(rt))
@@ -688,6 +690,8 @@
 		inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
 		newsk->sk_write_space = sk_stream_write_space;
 
+		newsk->sk_mark = inet_rsk(req)->ir_mark;
+
 		newicsk->icsk_retransmits = 0;
 		newicsk->icsk_backoff	  = 0;
 		newicsk->icsk_probes_out  = 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 5f077ef..650873c 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1485,12 +1485,14 @@
 			daddr = replyopts.opt.opt.faddr;
 	}
 
-	flowi4_init_output(&fl4, arg->bound_dev_if, 0,
+	flowi4_init_output(&fl4, arg->bound_dev_if,
+			   IP4_REPLY_MARK(net, skb->mark),
 			   RT_TOS(arg->tos),
 			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
 			   ip_reply_arg_flowi_flags(arg),
 			   daddr, saddr,
-			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
+			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
+			   arg->uid);
 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(net, &fl4);
 	if (IS_ERR(rt))
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a429ac6..91941b0 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -454,7 +454,7 @@
 	struct mr_table *mrt;
 	struct flowi4 fl4 = {
 		.flowi4_oif	= dev->ifindex,
-		.flowi4_iif	= skb->skb_iif,
+		.flowi4_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
 		.flowi4_mark	= skb->mark,
 	};
 	int err;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index e7916c1..23dfd4a 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -110,6 +110,18 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_NF_TARGET_REJECT_SKERR
+	bool "Force socket error when rejecting with icmp*"
+	depends on IP_NF_TARGET_REJECT
+	default n
+	help
+          This option enables turning a "--reject-with icmp*" into a matching
+          socket error also.
+	  The REJECT target normally allows sending an ICMP message. But it
+          leaves the local socket unaware of any ingress rejects.
+
+	  If unsure, say N.
+
 config IP_NF_TARGET_ULOG
 	tristate "ULOG target support"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 04b18c1..452e8a5 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -129,6 +129,14 @@
 static inline void send_unreach(struct sk_buff *skb_in, int code)
 {
 	icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP_NF_TARGET_REJECT_SKERR
+	if (skb_in->sk) {
+		skb_in->sk->sk_err = icmp_err_convert[code].errno;
+		skb_in->sk->sk_error_report(skb_in->sk);
+		pr_debug("ipt_REJECT: sk_err=%d for skb=%p sk=%p\n",
+			skb_in->sk->sk_err, skb_in, skb_in->sk);
+	}
+#endif
 }
 
 static unsigned int
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index c49dcd0..4bfaedf 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -89,11 +89,8 @@
 	if (ipv4_is_multicast(iph->daddr)) {
 		if (ipv4_is_zeronet(iph->saddr))
 			return ipv4_is_local_multicast(iph->daddr) ^ invert;
-		flow.flowi4_iif = 0;
-	} else {
-		flow.flowi4_iif = LOOPBACK_IFINDEX;
 	}
-
+	flow.flowi4_iif = LOOPBACK_IFINDEX;
 	flow.daddr = iph->saddr;
 	flow.saddr = rpfilter_get_saddr(iph->daddr);
 	flow.flowi4_oif = 0;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 459b957..ab3d86a 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -33,7 +33,6 @@
 #include <linux/netdevice.h>
 #include <net/snmp.h>
 #include <net/ip.h>
-#include <net/ipv6.h>
 #include <net/icmp.h>
 #include <net/protocol.h>
 #include <linux/skbuff.h>
@@ -46,8 +45,18 @@
 #include <net/inet_common.h>
 #include <net/checksum.h>
 
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/in6.h>
+#include <linux/icmpv6.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
+#endif
 
-static struct ping_table ping_table;
+
+struct ping_table ping_table;
+struct pingv6_ops pingv6_ops;
+EXPORT_SYMBOL_GPL(pingv6_ops);
 
 static u16 ping_port_rover;
 
@@ -58,6 +67,7 @@
 	pr_debug("hash(%d) = %d\n", num, res);
 	return res;
 }
+EXPORT_SYMBOL_GPL(ping_hash);
 
 static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
 					     struct net *net, unsigned int num)
@@ -65,7 +75,7 @@
 	return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
 }
 
-static int ping_v4_get_port(struct sock *sk, unsigned short ident)
+int ping_get_port(struct sock *sk, unsigned short ident)
 {
 	struct hlist_nulls_node *node;
 	struct hlist_nulls_head *hlist;
@@ -103,6 +113,10 @@
 		ping_portaddr_for_each_entry(sk2, node, hlist) {
 			isk2 = inet_sk(sk2);
 
+			/* BUG? Why is this reuse and not reuseaddr? ping.c
+			 * doesn't turn off SO_REUSEADDR, and it doesn't expect
+			 * that other ping processes can steal its packets.
+			 */
 			if ((isk2->inet_num == ident) &&
 			    (sk2 != sk) &&
 			    (!sk2->sk_reuse || !sk->sk_reuse))
@@ -125,17 +139,18 @@
 	write_unlock_bh(&ping_table.lock);
 	return 1;
 }
+EXPORT_SYMBOL_GPL(ping_get_port);
 
-static void ping_v4_hash(struct sock *sk)
+void ping_hash(struct sock *sk)
 {
-	pr_debug("ping_v4_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
+	pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
 	BUG(); /* "Please do not press this button again." */
 }
 
-static void ping_v4_unhash(struct sock *sk)
+void ping_unhash(struct sock *sk)
 {
 	struct inet_sock *isk = inet_sk(sk);
-	pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+	pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
 	if (sk_hashed(sk)) {
 		write_lock_bh(&ping_table.lock);
 		hlist_nulls_del(&sk->sk_nulls_node);
@@ -147,31 +162,63 @@
 		write_unlock_bh(&ping_table.lock);
 	}
 }
+EXPORT_SYMBOL_GPL(ping_unhash);
 
-static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr,
-				   u16 ident, int dif)
+static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
 {
 	struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
 	struct sock *sk = NULL;
 	struct inet_sock *isk;
 	struct hlist_nulls_node *hnode;
+	int dif = skb->dev->ifindex;
 
-	pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
-		 (int)ident, &daddr, dif);
+	if (skb->protocol == htons(ETH_P_IP)) {
+		pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
+			 (int)ident, &ip_hdr(skb)->daddr, dif);
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
+			 (int)ident, &ipv6_hdr(skb)->daddr, dif);
+#endif
+	}
+
 	read_lock_bh(&ping_table.lock);
 
 	ping_portaddr_for_each_entry(sk, hnode, hslot) {
 		isk = inet_sk(sk);
 
-		pr_debug("found: %p: num = %d, daddr = %pI4, dif = %d\n", sk,
-			 (int)isk->inet_num, &isk->inet_rcv_saddr,
-			 sk->sk_bound_dev_if);
-
 		pr_debug("iterate\n");
 		if (isk->inet_num != ident)
 			continue;
-		if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != daddr)
+
+		if (skb->protocol == htons(ETH_P_IP) &&
+		    sk->sk_family == AF_INET) {
+			pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk,
+				 (int) isk->inet_num, &isk->inet_rcv_saddr,
+				 sk->sk_bound_dev_if);
+
+			if (isk->inet_rcv_saddr &&
+			    isk->inet_rcv_saddr != ip_hdr(skb)->daddr)
+				continue;
+#if IS_ENABLED(CONFIG_IPV6)
+		} else if (skb->protocol == htons(ETH_P_IPV6) &&
+			   sk->sk_family == AF_INET6) {
+			struct ipv6_pinfo *np = inet6_sk(sk);
+
+			pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
+				 (int) isk->inet_num,
+				 &inet6_sk(sk)->rcv_saddr,
+				 sk->sk_bound_dev_if);
+
+			if (!ipv6_addr_any(&np->rcv_saddr) &&
+			    !ipv6_addr_equal(&np->rcv_saddr,
+					     &ipv6_hdr(skb)->daddr))
+				continue;
+#endif
+		} else {
 			continue;
+		}
+
 		if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
 			continue;
 
@@ -201,7 +248,7 @@
 }
 
 
-static int ping_init_sock(struct sock *sk)
+int ping_init_sock(struct sock *sk)
 {
 	struct net *net = sock_net(sk);
 	kgid_t group = current_egid();
@@ -210,6 +257,9 @@
 	kgid_t low, high;
 	int ret = 0;
 
+	if (sk->sk_family == AF_INET6)
+		inet6_sk(sk)->ipv6only = 1;
+
 	inet_get_ping_group_range_net(net, &low, &high);
 	if (gid_lte(low, group) && gid_lte(group, high))
 		return 0;
@@ -233,8 +283,9 @@
 	put_group_info(group_info);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ping_init_sock);
 
-static void ping_close(struct sock *sk, long timeout)
+void ping_close(struct sock *sk, long timeout)
 {
 	pr_debug("ping_close(sk=%p,sk->num=%u)\n",
 		 inet_sk(sk), inet_sk(sk)->inet_num);
@@ -242,36 +293,130 @@
 
 	sk_common_release(sk);
 }
+EXPORT_SYMBOL_GPL(ping_close);
 
+/* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */
+int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+			 struct sockaddr *uaddr, int addr_len) {
+	struct net *net = sock_net(sk);
+	if (sk->sk_family == AF_INET) {
+		struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+		int chk_addr_ret;
+
+		if (addr_len < sizeof(*addr))
+			return -EINVAL;
+
+		if (addr->sin_family != AF_INET &&
+		    !(addr->sin_family == AF_UNSPEC &&
+		      addr->sin_addr.s_addr == htonl(INADDR_ANY)))
+			return -EAFNOSUPPORT;
+
+		pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
+			 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
+
+		chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+
+		if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
+			chk_addr_ret = RTN_LOCAL;
+
+		if ((sysctl_ip_nonlocal_bind == 0 &&
+		    isk->freebind == 0 && isk->transparent == 0 &&
+		     chk_addr_ret != RTN_LOCAL) ||
+		    chk_addr_ret == RTN_MULTICAST ||
+		    chk_addr_ret == RTN_BROADCAST)
+			return -EADDRNOTAVAIL;
+
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (sk->sk_family == AF_INET6) {
+		struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
+		int addr_type, scoped, has_addr;
+		struct net_device *dev = NULL;
+
+		if (addr_len < sizeof(*addr))
+			return -EINVAL;
+
+		if (addr->sin6_family != AF_INET6)
+			return -EAFNOSUPPORT;
+
+		pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
+			 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
+
+		addr_type = ipv6_addr_type(&addr->sin6_addr);
+		scoped = __ipv6_addr_needs_scope_id(addr_type);
+		if ((addr_type != IPV6_ADDR_ANY &&
+		     !(addr_type & IPV6_ADDR_UNICAST)) ||
+		    (scoped && !addr->sin6_scope_id))
+			return -EINVAL;
+
+		rcu_read_lock();
+		if (addr->sin6_scope_id) {
+			dev = dev_get_by_index_rcu(net, addr->sin6_scope_id);
+			if (!dev) {
+				rcu_read_unlock();
+				return -ENODEV;
+			}
+		}
+		has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
+						    scoped);
+		rcu_read_unlock();
+
+		if (!(isk->freebind || isk->transparent || has_addr ||
+		      addr_type == IPV6_ADDR_ANY))
+			return -EADDRNOTAVAIL;
+
+		if (scoped)
+			sk->sk_bound_dev_if = addr->sin6_scope_id;
+#endif
+	} else {
+		return -EAFNOSUPPORT;
+	}
+	return 0;
+}
+
+void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
+{
+	if (saddr->sa_family == AF_INET) {
+		struct inet_sock *isk = inet_sk(sk);
+		struct sockaddr_in *addr = (struct sockaddr_in *) saddr;
+		isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (saddr->sa_family == AF_INET6) {
+		struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
+		struct ipv6_pinfo *np = inet6_sk(sk);
+		np->rcv_saddr = np->saddr = addr->sin6_addr;
+#endif
+	}
+}
+
+void ping_clear_saddr(struct sock *sk, int dif)
+{
+	sk->sk_bound_dev_if = dif;
+	if (sk->sk_family == AF_INET) {
+		struct inet_sock *isk = inet_sk(sk);
+		isk->inet_rcv_saddr = isk->inet_saddr = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (sk->sk_family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+		memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+		memset(&np->saddr, 0, sizeof(np->saddr));
+#endif
+	}
+}
 /*
  * We need our own bind because there are no privileged id's == local ports.
  * Moreover, we don't allow binding to multi- and broadcast addresses.
  */
 
-static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
-	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
 	struct inet_sock *isk = inet_sk(sk);
 	unsigned short snum;
-	int chk_addr_ret;
 	int err;
+	int dif = sk->sk_bound_dev_if;
 
-	if (addr_len < sizeof(struct sockaddr_in))
-		return -EINVAL;
-
-	pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n",
-		 sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
-
-	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
-	if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
-		chk_addr_ret = RTN_LOCAL;
-
-	if ((sysctl_ip_nonlocal_bind == 0 &&
-	    isk->freebind == 0 && isk->transparent == 0 &&
-	     chk_addr_ret != RTN_LOCAL) ||
-	    chk_addr_ret == RTN_MULTICAST ||
-	    chk_addr_ret == RTN_BROADCAST)
-		return -EADDRNOTAVAIL;
+	err = ping_check_bind_addr(sk, isk, uaddr, addr_len);
+	if (err)
+		return err;
 
 	lock_sock(sk);
 
@@ -280,42 +425,50 @@
 		goto out;
 
 	err = -EADDRINUSE;
-	isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
-	snum = ntohs(addr->sin_port);
-	if (ping_v4_get_port(sk, snum) != 0) {
-		isk->inet_saddr = isk->inet_rcv_saddr = 0;
+	ping_set_saddr(sk, uaddr);
+	snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port);
+	if (ping_get_port(sk, snum) != 0) {
+		ping_clear_saddr(sk, dif);
 		goto out;
 	}
 
-	pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n",
+	pr_debug("after bind(): num = %d, dif = %d\n",
 		 (int)isk->inet_num,
-		 &isk->inet_rcv_saddr,
 		 (int)sk->sk_bound_dev_if);
 
 	err = 0;
-	if (isk->inet_rcv_saddr)
+	if ((sk->sk_family == AF_INET && isk->inet_rcv_saddr) ||
+	    (sk->sk_family == AF_INET6 &&
+	     !ipv6_addr_any(&inet6_sk(sk)->rcv_saddr)))
 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+
 	if (snum)
 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
 	isk->inet_sport = htons(isk->inet_num);
 	isk->inet_daddr = 0;
 	isk->inet_dport = 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if (sk->sk_family == AF_INET6)
+		memset(&inet6_sk(sk)->daddr, 0, sizeof(inet6_sk(sk)->daddr));
+#endif
+
 	sk_dst_reset(sk);
 out:
 	release_sock(sk);
 	pr_debug("ping_v4_bind -> %d\n", err);
 	return err;
 }
+EXPORT_SYMBOL_GPL(ping_bind);
 
 /*
  * Is this a supported type of ICMP message?
  */
 
-static inline int ping_supported(int type, int code)
+static inline int ping_supported(int family, int type, int code)
 {
-	if (type == ICMP_ECHO && code == 0)
-		return 1;
-	return 0;
+	return (family == AF_INET && type == ICMP_ECHO && code == 0) ||
+	       (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0);
 }
 
 /*
@@ -323,30 +476,42 @@
  * sort of error condition.
  */
 
-static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-
-void ping_err(struct sk_buff *skb, u32 info)
+void ping_err(struct sk_buff *skb, int offset, u32 info)
 {
-	struct iphdr *iph = (struct iphdr *)skb->data;
-	struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+	int family;
+	struct icmphdr *icmph;
 	struct inet_sock *inet_sock;
-	int type = icmp_hdr(skb)->type;
-	int code = icmp_hdr(skb)->code;
+	int type;
+	int code;
 	struct net *net = dev_net(skb->dev);
 	struct sock *sk;
 	int harderr;
 	int err;
 
+	if (skb->protocol == htons(ETH_P_IP)) {
+		family = AF_INET;
+		type = icmp_hdr(skb)->type;
+		code = icmp_hdr(skb)->code;
+		icmph = (struct icmphdr *)(skb->data + offset);
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		family = AF_INET6;
+		type = icmp6_hdr(skb)->icmp6_type;
+		code = icmp6_hdr(skb)->icmp6_code;
+		icmph = (struct icmphdr *) (skb->data + offset);
+	} else {
+		BUG();
+	}
+
 	/* We assume the packet has already been checked by icmp_unreach */
 
-	if (!ping_supported(icmph->type, icmph->code))
+	if (!ping_supported(family, icmph->type, icmph->code))
 		return;
 
-	pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type,
-		 code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
+	pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n",
+		 skb->protocol, type, code, ntohs(icmph->un.echo.id),
+		 ntohs(icmph->un.echo.sequence));
 
-	sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
-			    ntohs(icmph->un.echo.id), skb->dev->ifindex);
+	sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
 	if (sk == NULL) {
 		pr_debug("no socket, dropping\n");
 		return;	/* No socket for error */
@@ -357,72 +522,83 @@
 	harderr = 0;
 	inet_sock = inet_sk(sk);
 
-	switch (type) {
-	default:
-	case ICMP_TIME_EXCEEDED:
-		err = EHOSTUNREACH;
-		break;
-	case ICMP_SOURCE_QUENCH:
-		/* This is not a real error but ping wants to see it.
-		 * Report it with some fake errno. */
-		err = EREMOTEIO;
-		break;
-	case ICMP_PARAMETERPROB:
-		err = EPROTO;
-		harderr = 1;
-		break;
-	case ICMP_DEST_UNREACH:
-		if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
-			ipv4_sk_update_pmtu(skb, sk, info);
-			if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
-				err = EMSGSIZE;
-				harderr = 1;
-				break;
+	if (skb->protocol == htons(ETH_P_IP)) {
+		switch (type) {
+		default:
+		case ICMP_TIME_EXCEEDED:
+			err = EHOSTUNREACH;
+			break;
+		case ICMP_SOURCE_QUENCH:
+			/* This is not a real error but ping wants to see it.
+			 * Report it with some fake errno.
+			 */
+			err = EREMOTEIO;
+			break;
+		case ICMP_PARAMETERPROB:
+			err = EPROTO;
+			harderr = 1;
+			break;
+		case ICMP_DEST_UNREACH:
+			if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
+				ipv4_sk_update_pmtu(skb, sk, info);
+				if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
+					err = EMSGSIZE;
+					harderr = 1;
+					break;
+				}
+				goto out;
 			}
-			goto out;
+			err = EHOSTUNREACH;
+			if (code <= NR_ICMP_UNREACH) {
+				harderr = icmp_err_convert[code].fatal;
+				err = icmp_err_convert[code].errno;
+			}
+			break;
+		case ICMP_REDIRECT:
+			/* See ICMP_SOURCE_QUENCH */
+			ipv4_sk_redirect(skb, sk);
+			err = EREMOTEIO;
+			break;
 		}
-		err = EHOSTUNREACH;
-		if (code <= NR_ICMP_UNREACH) {
-			harderr = icmp_err_convert[code].fatal;
-			err = icmp_err_convert[code].errno;
-		}
-		break;
-	case ICMP_REDIRECT:
-		/* See ICMP_SOURCE_QUENCH */
-		ipv4_sk_redirect(skb, sk);
-		err = EREMOTEIO;
-		break;
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
+#endif
 	}
 
 	/*
 	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
 	 *	4.1.3.3.
 	 */
-	if (!inet_sock->recverr) {
+	if ((family == AF_INET && !inet_sock->recverr) ||
+	    (family == AF_INET6 && !inet6_sk(sk)->recverr)) {
 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
 			goto out;
 	} else {
-		ip_icmp_error(sk, skb, err, 0 /* no remote port */,
-			 info, (u8 *)icmph);
+		if (family == AF_INET) {
+			ip_icmp_error(sk, skb, err, 0 /* no remote port */,
+				      info, (u8 *)icmph);
+#if IS_ENABLED(CONFIG_IPV6)
+		} else if (family == AF_INET6) {
+			pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
+						   info, (u8 *)icmph);
+#endif
+		}
 	}
 	sk->sk_err = err;
 	sk->sk_error_report(sk);
 out:
 	sock_put(sk);
 }
+EXPORT_SYMBOL_GPL(ping_err);
 
 /*
- *	Copy and checksum an ICMP Echo packet from user space into a buffer.
+ *	Copy and checksum an ICMP Echo packet from user space into a buffer
+ *	starting from the payload.
  */
 
-struct pingfakehdr {
-	struct icmphdr icmph;
-	struct iovec *iov;
-	__wsum wcheck;
-};
-
-static int ping_getfrag(void *from, char *to,
-			int offset, int fraglen, int odd, struct sk_buff *skb)
+int ping_getfrag(void *from, char *to,
+		 int offset, int fraglen, int odd, struct sk_buff *skb)
 {
 	struct pingfakehdr *pfh = (struct pingfakehdr *)from;
 
@@ -433,20 +609,33 @@
 			    pfh->iov, 0, fraglen - sizeof(struct icmphdr),
 			    &pfh->wcheck))
 			return -EFAULT;
-
-		return 0;
+	} else if (offset < sizeof(struct icmphdr)) {
+			BUG();
+	} else {
+		if (csum_partial_copy_fromiovecend
+				(to, pfh->iov, offset - sizeof(struct icmphdr),
+				 fraglen, &pfh->wcheck))
+			return -EFAULT;
 	}
-	if (offset < sizeof(struct icmphdr))
-		BUG();
-	if (csum_partial_copy_fromiovecend
-			(to, pfh->iov, offset - sizeof(struct icmphdr),
-			 fraglen, &pfh->wcheck))
-		return -EFAULT;
+
+#if IS_ENABLED(CONFIG_IPV6)
+	/* For IPv6, checksum each skb as we go along, as expected by
+	 * icmpv6_push_pending_frames. For IPv4, accumulate the checksum in
+	 * wcheck, it will be finalized in ping_v4_push_pending_frames.
+	 */
+	if (pfh->family == AF_INET6) {
+		skb->csum = pfh->wcheck;
+		skb->ip_summed = CHECKSUM_NONE;
+		pfh->wcheck = 0;
+	}
+#endif
+
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ping_getfrag);
 
-static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
-				    struct flowi4 *fl4)
+static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
+				       struct flowi4 *fl4)
 {
 	struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
 
@@ -458,24 +647,9 @@
 	return ip_push_pending_frames(sk, fl4);
 }
 
-static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-			size_t len)
-{
-	struct net *net = sock_net(sk);
-	struct flowi4 fl4;
-	struct inet_sock *inet = inet_sk(sk);
-	struct ipcm_cookie ipc;
-	struct icmphdr user_icmph;
-	struct pingfakehdr pfh;
-	struct rtable *rt = NULL;
-	struct ip_options_data opt_copy;
-	int free = 0;
-	__be32 saddr, daddr, faddr;
-	u8  tos;
-	int err;
-
-	pr_debug("ping_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
-
+int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+			void *user_icmph, size_t icmph_len) {
+	u8 type, code;
 
 	if (len > 0xFFFF)
 		return -EMSGSIZE;
@@ -490,15 +664,53 @@
 
 	/*
 	 *	Fetch the ICMP header provided by the userland.
-	 *	iovec is modified!
+	 *	iovec is modified! The ICMP header is consumed.
 	 */
-
-	if (memcpy_fromiovec((u8 *)&user_icmph, msg->msg_iov,
-			     sizeof(struct icmphdr)))
+	if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len))
 		return -EFAULT;
-	if (!ping_supported(user_icmph.type, user_icmph.code))
+
+	if (family == AF_INET) {
+		type = ((struct icmphdr *) user_icmph)->type;
+		code = ((struct icmphdr *) user_icmph)->code;
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (family == AF_INET6) {
+		type = ((struct icmp6hdr *) user_icmph)->icmp6_type;
+		code = ((struct icmp6hdr *) user_icmph)->icmp6_code;
+#endif
+	} else {
+		BUG();
+	}
+
+	if (!ping_supported(family, type, code))
 		return -EINVAL;
 
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ping_common_sendmsg);
+
+int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		    size_t len)
+{
+	struct net *net = sock_net(sk);
+	struct flowi4 fl4;
+	struct inet_sock *inet = inet_sk(sk);
+	struct ipcm_cookie ipc;
+	struct icmphdr user_icmph;
+	struct pingfakehdr pfh;
+	struct rtable *rt = NULL;
+	struct ip_options_data opt_copy;
+	int free = 0;
+	__be32 saddr, daddr, faddr;
+	u8  tos;
+	int err;
+
+	pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
+
+	err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph,
+				  sizeof(user_icmph));
+	if (err)
+		return err;
+
 	/*
 	 *	Get and verify the address.
 	 */
@@ -508,7 +720,7 @@
 		if (msg->msg_namelen < sizeof(*usin))
 			return -EINVAL;
 		if (usin->sin_family != AF_INET)
-			return -EINVAL;
+			return -EAFNOSUPPORT;
 		daddr = usin->sin_addr.s_addr;
 		/* no remote port */
 	} else {
@@ -570,7 +782,8 @@
 
 	flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
 			   RT_SCOPE_UNIVERSE, sk->sk_protocol,
-			   inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
+			   inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
+			   sock_i_uid(sk));
 
 	security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_flow(net, &fl4, sk);
@@ -603,13 +816,14 @@
 	pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
 	pfh.iov = msg->msg_iov;
 	pfh.wcheck = 0;
+	pfh.family = AF_INET;
 
 	err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
 			0, &ipc, &rt, msg->msg_flags);
 	if (err)
 		ip_flush_pending_frames(sk);
 	else
-		err = ping_push_pending_frames(sk, &pfh, &fl4);
+		err = ping_v4_push_pending_frames(sk, &pfh, &fl4);
 	release_sock(sk);
 
 out:
@@ -630,10 +844,13 @@
 	goto out;
 }
 
-static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-			size_t len, int noblock, int flags, int *addr_len)
+int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		 size_t len, int noblock, int flags, int *addr_len)
 {
 	struct inet_sock *isk = inet_sk(sk);
+	int family = sk->sk_family;
+	struct sockaddr_in *sin;
+	struct sockaddr_in6 *sin6;
 	struct sk_buff *skb;
 	int copied, err;
 
@@ -643,8 +860,23 @@
 	if (flags & MSG_OOB)
 		goto out;
 
-	if (flags & MSG_ERRQUEUE)
-		return ip_recv_error(sk, msg, len, addr_len);
+	if (addr_len) {
+		if (family == AF_INET)
+			*addr_len = sizeof(*sin);
+		else if (family == AF_INET6 && addr_len)
+			*addr_len = sizeof(*sin6);
+	}
+
+	if (flags & MSG_ERRQUEUE) {
+		if (family == AF_INET) {
+			return ip_recv_error(sk, msg, len, addr_len);
+#if IS_ENABLED(CONFIG_IPV6)
+		} else if (family == AF_INET6) {
+			return pingv6_ops.ipv6_recv_error(sk, msg, len,
+							addr_len);
+#endif
+		}
+	}
 
 	skb = skb_recv_datagram(sk, flags, noblock, &err);
 	if (!skb)
@@ -663,18 +895,44 @@
 
 	sock_recv_timestamp(msg, sk, skb);
 
-	/* Copy the address. */
-	if (msg->msg_name) {
-		struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
+	/* Copy the address and add cmsg data. */
+	if (family == AF_INET) {
+		sin = (struct sockaddr_in *) msg->msg_name;
+		if (sin) {
+			sin->sin_family = AF_INET;
+			sin->sin_port = 0 /* skb->h.uh->source */;
+			sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+			memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+		}
 
-		sin->sin_family = AF_INET;
-		sin->sin_port = 0 /* skb->h.uh->source */;
-		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
-		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
-		*addr_len = sizeof(*sin);
+		if (isk->cmsg_flags)
+			ip_cmsg_recv(msg, skb);
+
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+		struct ipv6hdr *ip6 = ipv6_hdr(skb);
+		sin6 = (struct sockaddr_in6 *) msg->msg_name;
+
+		if (sin6) {
+			sin6->sin6_family = AF_INET6;
+			sin6->sin6_port = 0;
+			sin6->sin6_addr = ip6->saddr;
+			sin6->sin6_flowinfo = 0;
+			if (np->sndflow)
+				sin6->sin6_flowinfo = ip6_flowinfo(ip6);
+			sin6->sin6_scope_id =
+				ipv6_iface_scope_id(&sin6->sin6_addr,
+						    IP6CB(skb)->iif);
+		}
+
+		if (inet6_sk(sk)->rxopt.all)
+			pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
+#endif
+	} else {
+		BUG();
 	}
-	if (isk->cmsg_flags)
-		ip_cmsg_recv(msg, skb);
+
 	err = copied;
 
 done:
@@ -683,8 +941,9 @@
 	pr_debug("ping_recvmsg -> %d\n", err);
 	return err;
 }
+EXPORT_SYMBOL_GPL(ping_recvmsg);
 
-static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
 		 inet_sk(sk), inet_sk(sk)->inet_num, skb);
@@ -695,6 +954,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
 
 
 /*
@@ -705,10 +965,7 @@
 {
 	struct sock *sk;
 	struct net *net = dev_net(skb->dev);
-	struct iphdr *iph = ip_hdr(skb);
 	struct icmphdr *icmph = icmp_hdr(skb);
-	__be32 saddr = iph->saddr;
-	__be32 daddr = iph->daddr;
 
 	/* We assume the packet has already been checked by icmp_rcv */
 
@@ -718,8 +975,7 @@
 	/* Push ICMP header back */
 	skb_push(skb, skb->data - (u8 *)icmph);
 
-	sk = ping_v4_lookup(net, saddr, daddr, ntohs(icmph->un.echo.id),
-			    skb->dev->ifindex);
+	sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
 	if (sk != NULL) {
 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
@@ -733,6 +989,7 @@
 
 	/* We're called from icmp_rcv(). kfree_skb() is done there. */
 }
+EXPORT_SYMBOL_GPL(ping_rcv);
 
 struct proto ping_prot = {
 	.name =		"PING",
@@ -743,14 +1000,14 @@
 	.disconnect =	udp_disconnect,
 	.setsockopt =	ip_setsockopt,
 	.getsockopt =	ip_getsockopt,
-	.sendmsg =	ping_sendmsg,
+	.sendmsg =	ping_v4_sendmsg,
 	.recvmsg =	ping_recvmsg,
 	.bind =		ping_bind,
 	.backlog_rcv =	ping_queue_rcv_skb,
 	.release_cb =	ip4_datagram_release_cb,
-	.hash =		ping_v4_hash,
-	.unhash =	ping_v4_unhash,
-	.get_port =	ping_v4_get_port,
+	.hash =		ping_hash,
+	.unhash =	ping_unhash,
+	.get_port =	ping_get_port,
 	.obj_size =	sizeof(struct inet_sock),
 };
 EXPORT_SYMBOL(ping_prot);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index b4a1c42..2dfe804 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -571,9 +571,9 @@
 	flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
 			   RT_SCOPE_UNIVERSE,
 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
-			   inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
-			    (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
-			   daddr, saddr, 0, 0);
+			   inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
+			   daddr, saddr, 0, 0,
+			   sock_i_uid(sk));
 
 	if (!inet->hdrincl) {
 		err = raw_probe_proto_opt(&fl4, msg);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e59d633..70edc33 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -515,7 +515,7 @@
 }
 EXPORT_SYMBOL(__ip_select_ident);
 
-static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void __build_flow_key(struct flowi4 *fl4, struct sock *sk,
 			     const struct iphdr *iph,
 			     int oif, u8 tos,
 			     u8 prot, u32 mark, int flow_flags)
@@ -531,11 +531,12 @@
 	flowi4_init_output(fl4, oif, mark, tos,
 			   RT_SCOPE_UNIVERSE, prot,
 			   flow_flags,
-			   iph->daddr, iph->saddr, 0, 0);
+			   iph->daddr, iph->saddr, 0, 0,
+			   sk ? sock_i_uid(sk) : 0);
 }
 
 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
-			       const struct sock *sk)
+			       struct sock *sk)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	int oif = skb->dev->ifindex;
@@ -546,7 +547,7 @@
 	__build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
 }
 
-static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
+static void build_sk_flow_key(struct flowi4 *fl4, struct sock *sk)
 {
 	const struct inet_sock *inet = inet_sk(sk);
 	const struct ip_options_rcu *inet_opt;
@@ -560,11 +561,12 @@
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
 			   inet_sk_flowi_flags(sk),
-			   daddr, inet->inet_saddr, 0, 0);
+			   daddr, inet->inet_saddr, 0, 0,
+			   sock_i_uid(sk));
 	rcu_read_unlock();
 }
 
-static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void ip_rt_build_flow_key(struct flowi4 *fl4, struct sock *sk,
 				 const struct sk_buff *skb)
 {
 	if (skb)
@@ -977,6 +979,9 @@
 	struct flowi4 fl4;
 	struct rtable *rt;
 
+	if (!mark)
+		mark = IP4_REPLY_MARK(net, skb->mark);
+
 	__build_flow_key(&fl4, NULL, iph, oif,
 			 RT_TOS(iph->tos), protocol, mark, flow_flags);
 	rt = __ip_route_output_key(net, &fl4);
@@ -994,6 +999,10 @@
 	struct rtable *rt;
 
 	__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+
+	if (!fl4.flowi4_mark)
+		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
+
 	rt = __ip_route_output_key(sock_net(sk), &fl4);
 	if (!IS_ERR(rt)) {
 		__ip_rt_update_pmtu(rt, &fl4, mtu);
@@ -2319,6 +2328,11 @@
 	    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
 		goto nla_put_failure;
 
+	if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
+	    nla_put_u32(skb, RTA_UID,
+			from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
+		goto nla_put_failure;
+
 	error = rt->dst.error;
 
 	if (rt_is_input_route(rt)) {
@@ -2369,6 +2383,7 @@
 	int err;
 	int mark;
 	struct sk_buff *skb;
+	kuid_t uid;
 
 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
 	if (err < 0)
@@ -2396,6 +2411,10 @@
 	dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
 	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
 	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
+	if (tb[RTA_UID])
+		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
+	else
+		uid = (iif ? INVALID_UID : current_uid());
 
 	memset(&fl4, 0, sizeof(fl4));
 	fl4.daddr = dst;
@@ -2403,6 +2422,7 @@
 	fl4.flowi4_tos = rtm->rtm_tos;
 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
 	fl4.flowi4_mark = mark;
+	fl4.flowi4_uid = uid;
 
 	if (iif) {
 		struct net_device *dev;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b05c96e..c94032b 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -312,6 +312,7 @@
 	ireq->rmt_port		= th->source;
 	ireq->loc_addr		= ip_hdr(skb)->daddr;
 	ireq->rmt_addr		= ip_hdr(skb)->saddr;
+	ireq->ir_mark		= inet_request_mark(sk, skb);
 	ireq->ecn_ok		= ecn_ok;
 	ireq->snd_wscale	= tcp_opt.snd_wscale;
 	ireq->sack_ok		= tcp_opt.sack_ok;
@@ -348,11 +349,12 @@
 	 * hasn't changed since we received the original syn, but I see
 	 * no easy way to do this.
 	 */
-	flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
+	flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
 			   inet_sk_flowi_flags(sk),
 			   (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
-			   ireq->loc_addr, th->source, th->dest);
+			   ireq->loc_addr, th->source, th->dest,
+			   sock_i_uid(sk));
 	security_req_classify_flow(req, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(sock_net(sk), &fl4);
 	if (IS_ERR(rt)) {
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 90b26be..f955750 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -138,6 +138,21 @@
 	return ret;
 }
 
+/* Validate changes from /proc interface. */
+static int proc_tcp_default_init_rwnd(ctl_table *ctl, int write,
+				      void __user *buffer,
+				      size_t *lenp, loff_t *ppos)
+{
+	int old_value = *(int *)ctl->data;
+	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+	int new_value = *(int *)ctl->data;
+
+	if (write && ret == 0 && (new_value < 3 || new_value > 100))
+		*(int *)ctl->data = old_value;
+
+	return ret;
+}
+
 static int proc_tcp_congestion_control(ctl_table *ctl, int write,
 				       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -737,7 +752,7 @@
 		.mode           = 0644,
 		.proc_handler   = proc_dointvec
 	},
-        {
+	{
 		.procname       = "tcp_thin_dupack",
 		.data           = &sysctl_tcp_thin_dupack,
 		.maxlen         = sizeof(int),
@@ -754,6 +769,13 @@
 		.extra2		= &four,
 	},
 	{
+		.procname       = "tcp_default_init_rwnd",
+		.data           = &sysctl_tcp_default_init_rwnd,
+		.maxlen         = sizeof(int),
+		.mode           = 0644,
+		.proc_handler   = proc_tcp_default_init_rwnd
+	},
+	{
 		.procname	= "tcp_min_tso_segs",
 		.data		= &sysctl_tcp_min_tso_segs,
 		.maxlen		= sizeof(int),
@@ -851,6 +873,20 @@
 		.mode		= 0644,
 		.proc_handler	= ipv4_tcp_mem,
 	},
+	{
+		.procname	= "fwmark_reflect",
+		.data		= &init_net.ipv4.sysctl_fwmark_reflect,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "tcp_fwmark_accept",
+		.data		= &init_net.ipv4.sysctl_tcp_fwmark_accept,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
 	{ }
 };
 
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644
index 0000000..0cbbf10
--- /dev/null
+++ b/net/ipv4/sysfs_net_ipv4.c
@@ -0,0 +1,88 @@
+/*
+ * net/ipv4/sysfs_net_ipv4.c
+ *
+ * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <net/tcp.h>
+
+#define CREATE_IPV4_FILE(_name, _var) \
+static ssize_t _name##_show(struct kobject *kobj, \
+			    struct kobj_attribute *attr, char *buf) \
+{ \
+	return sprintf(buf, "%d\n", _var); \
+} \
+static ssize_t _name##_store(struct kobject *kobj, \
+			     struct kobj_attribute *attr, \
+			     const char *buf, size_t count) \
+{ \
+	int val, ret; \
+	ret = sscanf(buf, "%d", &val); \
+	if (ret != 1) \
+		return -EINVAL; \
+	if (val < 0) \
+		return -EINVAL; \
+	_var = val; \
+	return count; \
+} \
+static struct kobj_attribute _name##_attr = \
+	__ATTR(_name, 0644, _name##_show, _name##_store)
+
+CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
+CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
+CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
+
+CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
+CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
+CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+
+static struct attribute *ipv4_attrs[] = {
+	&tcp_wmem_min_attr.attr,
+	&tcp_wmem_def_attr.attr,
+	&tcp_wmem_max_attr.attr,
+	&tcp_rmem_min_attr.attr,
+	&tcp_rmem_def_attr.attr,
+	&tcp_rmem_max_attr.attr,
+	NULL
+};
+
+static struct attribute_group ipv4_attr_group = {
+	.attrs = ipv4_attrs,
+};
+
+static __init int sysfs_ipv4_init(void)
+{
+	struct kobject *ipv4_kobject;
+	int ret;
+
+	ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
+	if (!ipv4_kobject)
+		return -ENOMEM;
+
+	ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
+	if (ret) {
+		kobject_put(ipv4_kobject);
+		return ret;
+	}
+
+	return 0;
+}
+
+subsys_initcall(sysfs_ipv4_init);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5d4bd6c..02e743e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -268,12 +268,16 @@
 #include <linux/crypto.h>
 #include <linux/time.h>
 #include <linux/slab.h>
+#include <linux/uid_stat.h>
 
 #include <net/icmp.h>
 #include <net/inet_common.h>
 #include <net/tcp.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
 #include <net/netdma.h>
 #include <net/sock.h>
 
@@ -1240,6 +1244,9 @@
 		tcp_push(sk, flags, mss_now, tp->nonagle);
 out_nopush:
 	release_sock(sk);
+
+	if (copied + copied_syn)
+		uid_stat_tcp_snd(current_uid(), copied + copied_syn);
 	return copied + copied_syn;
 
 do_fault:
@@ -1544,6 +1551,7 @@
 	if (copied > 0) {
 		tcp_recv_skb(sk, seq, &offset);
 		tcp_cleanup_rbuf(sk, copied);
+		uid_stat_tcp_rcv(current_uid(), copied);
 	}
 	return copied;
 }
@@ -1948,6 +1956,9 @@
 	tcp_cleanup_rbuf(sk, copied);
 
 	release_sock(sk);
+
+	if (copied > 0)
+		uid_stat_tcp_rcv(current_uid(), copied);
 	return copied;
 
 out:
@@ -1956,6 +1967,8 @@
 
 recv_urg:
 	err = tcp_recv_urg(sk, msg, len, flags);
+	if (err > 0)
+		uid_stat_tcp_rcv(current_uid(), err);
 	goto out;
 
 recv_sndq:
@@ -3468,3 +3481,107 @@
 
 	tcp_tasklet_init();
 }
+
+static int tcp_is_local(struct net *net, __be32 addr) {
+	struct rtable *rt;
+	struct flowi4 fl4 = { .daddr = addr };
+	rt = ip_route_output_key(net, &fl4);
+	if (IS_ERR_OR_NULL(rt))
+		return 0;
+	return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
+	struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
+	return rt6 && rt6->dst.dev && (rt6->dst.dev->flags & IFF_LOOPBACK);
+}
+#endif
+
+/*
+ * tcp_nuke_addr - destroy all sockets on the given local address
+ * if local address is the unspecified address (0.0.0.0 or ::), destroy all
+ * sockets with local addresses that are not configured.
+ */
+int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
+{
+	int family = addr->sa_family;
+	unsigned int bucket;
+
+	struct in_addr *in;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	struct in6_addr *in6;
+#endif
+	if (family == AF_INET) {
+		in = &((struct sockaddr_in *)addr)->sin_addr;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	} else if (family == AF_INET6) {
+		in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
+#endif
+	} else {
+		return -EAFNOSUPPORT;
+	}
+
+	for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+		struct hlist_nulls_node *node;
+		struct sock *sk;
+		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+		spin_lock_bh(lock);
+		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+			struct inet_sock *inet = inet_sk(sk);
+
+			if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+				continue;
+			if (sock_flag(sk, SOCK_DEAD))
+				continue;
+
+			if (family == AF_INET) {
+				__be32 s4 = inet->inet_rcv_saddr;
+				if (s4 == LOOPBACK4_IPV6)
+					continue;
+
+				if (in->s_addr != s4 &&
+				    !(in->s_addr == INADDR_ANY &&
+				      !tcp_is_local(net, s4)))
+					continue;
+			}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+			if (family == AF_INET6) {
+				struct in6_addr *s6;
+				if (!inet->pinet6)
+					continue;
+
+				s6 = &inet->pinet6->rcv_saddr;
+				if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED)
+					continue;
+
+				if (!ipv6_addr_equal(in6, s6) &&
+				    !(ipv6_addr_equal(in6, &in6addr_any) &&
+				      !tcp_is_local6(net, s6)))
+				continue;
+			}
+#endif
+
+			sock_hold(sk);
+			spin_unlock_bh(lock);
+
+			local_bh_disable();
+			bh_lock_sock(sk);
+			sk->sk_err = ETIMEDOUT;
+			sk->sk_error_report(sk);
+
+			tcp_done(sk);
+			bh_unlock_sock(sk);
+			local_bh_enable();
+			sock_put(sk);
+
+			goto restart;
+		}
+		spin_unlock_bh(lock);
+	}
+
+	return 0;
+}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f3b15bb..cd4cfb1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -99,6 +99,7 @@
 
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_early_retrans __read_mostly = 3;
+int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND;
 
 #define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
 #define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
@@ -352,14 +353,14 @@
 static void tcp_fixup_rcvbuf(struct sock *sk)
 {
 	u32 mss = tcp_sk(sk)->advmss;
-	u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
+	u32 icwnd = sysctl_tcp_default_init_rwnd;
 	int rcvmem;
 
 	/* Limit to 10 segments if mss <= 1460,
 	 * or 14600/mss segments, with a minimum of two segments.
 	 */
 	if (mss > 1460)
-		icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+		icwnd = max_t(u32, (1460 * icwnd) / mss, 2);
 
 	rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
 	while (tcp_win_from_space(rcvmem) < mss)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6504a08..3ae0b26 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1532,6 +1532,7 @@
 	ireq->rmt_addr = saddr;
 	ireq->no_srccheck = inet_sk(sk)->transparent;
 	ireq->opt = tcp_v4_save_options(skb);
+	ireq->ir_mark = inet_request_mark(sk, skb);
 
 	if (security_inet_conn_request(sk, skb, req))
 		goto drop_and_free;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1f2f6b5..86b9957 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -232,14 +232,13 @@
 	}
 
 	/* Set initial window to a value enough for senders starting with
-	 * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
+	 * initial congestion window of sysctl_tcp_default_init_rwnd. Place
 	 * a limit on the initial window when mss is larger than 1460.
 	 */
 	if (mss > (1 << *rcv_wscale)) {
-		int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
+		int init_cwnd = sysctl_tcp_default_init_rwnd;
 		if (mss > 1460)
-			init_cwnd =
-			max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+			init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
 		/* when initializing use the value from init_rcv_wnd
 		 * rather than the default from above
 		 */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 68174e4..364076e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -963,7 +963,8 @@
 		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
 				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
 				   inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
-				   faddr, saddr, dport, inet->inet_sport);
+				   faddr, saddr, dport, inet->inet_sport,
+				   sock_i_uid(sk));
 
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
 		rt = ip_route_output_flow(net, fl4, sk);
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 9af088d..470a9c0 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -7,7 +7,7 @@
 ipv6-objs :=	af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
 		addrlabel.o \
 		route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
-		raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
+		raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \
 		exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
 
 ipv6-offload :=	ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a3e2c34..0a2ca26 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -200,10 +200,12 @@
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
 	.accept_dad		= 1,
+	.use_oif_addrs_only	= 0,
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -234,10 +236,12 @@
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
 	.accept_dad		= 1,
+	.use_oif_addrs_only	= 0,
 };
 
 /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
@@ -1174,6 +1178,9 @@
 #endif
 	IPV6_SADDR_RULE_ORCHID,
 	IPV6_SADDR_RULE_PREFIX,
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+	IPV6_SADDR_RULE_NOT_OPTIMISTIC,
+#endif
 	IPV6_SADDR_RULE_MAX
 };
 
@@ -1201,6 +1208,15 @@
 	return 0;
 }
 
+static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
+{
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+	return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
+#else
+	return false;
+#endif
+}
+
 static int ipv6_get_saddr_eval(struct net *net,
 			       struct ipv6_saddr_score *score,
 			       struct ipv6_saddr_dst *dst,
@@ -1261,10 +1277,16 @@
 		score->scopedist = ret;
 		break;
 	case IPV6_SADDR_RULE_PREFERRED:
+	    {
 		/* Rule 3: Avoid deprecated and optimistic addresses */
+		u8 avoid = IFA_F_DEPRECATED;
+
+		if (!ipv6_use_optimistic_addr(score->ifa->idev))
+			avoid |= IFA_F_OPTIMISTIC;
 		ret = ipv6_saddr_preferred(score->addr_type) ||
-		      !(score->ifa->flags & (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC));
+		      !(score->ifa->flags & avoid);
 		break;
+	    }
 #ifdef CONFIG_IPV6_MIP6
 	case IPV6_SADDR_RULE_HOA:
 	    {
@@ -1312,6 +1334,14 @@
 			ret = score->ifa->prefix_len;
 		score->matchlen = ret;
 		break;
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+	case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
+		/* Optimistic addresses still have lower precedence than other
+		 * preferred addresses.
+		 */
+		ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
+		break;
+#endif
 	default:
 		ret = 0;
 	}
@@ -1359,9 +1389,15 @@
 		 *    include addresses assigned to interfaces
 		 *    belonging to the same site as the outgoing
 		 *    interface.)
+		 *  - "It is RECOMMENDED that the candidate source addresses
+		 *    be the set of unicast addresses assigned to the
+		 *    interface that will be used to send to the destination
+		 *    (the 'outgoing' interface)." (RFC 6724)
 		 */
+		idev = dst_dev ? __in6_dev_get(dst_dev) : NULL;
 		if (((dst_type & IPV6_ADDR_MULTICAST) ||
-		     dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
+		     dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
+		     (idev && idev->cnf.use_oif_addrs_only)) &&
 		    dst.ifindex && dev->ifindex != dst.ifindex)
 			continue;
 
@@ -1507,7 +1543,9 @@
 		if (!net_eq(dev_net(ifp->idev->dev), net))
 			continue;
 		if (ipv6_addr_equal(&ifp->addr, addr) &&
-		    !(ifp->flags&IFA_F_TENTATIVE) &&
+		    (!(ifp->flags&IFA_F_TENTATIVE) ||
+		     (ipv6_use_optimistic_addr(ifp->idev) &&
+		      ifp->flags&IFA_F_OPTIMISTIC)) &&
 		    (dev == NULL || ifp->idev->dev == dev ||
 		     !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
 			rcu_read_unlock_bh();
@@ -1959,6 +1997,31 @@
 }
 #endif
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
+	/* Determines into what table to put autoconf PIO/RIO/default routes
+	 * learned on this device.
+	 *
+	 * - If 0, use the same table for every device. This puts routes into
+	 *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+	 *   (but note that these three are currently all equal to
+	 *   RT6_TABLE_MAIN).
+	 * - If > 0, use the specified table.
+	 * - If < 0, put routes into table dev->ifindex + (-rt_table).
+	 */
+	struct inet6_dev *idev = in6_dev_get(dev);
+	u32 table;
+	int sysctl = idev->cnf.accept_ra_rt_table;
+	if (sysctl == 0) {
+		table = default_table;
+	} else if (sysctl > 0) {
+		table = (u32) sysctl;
+	} else {
+		table = (unsigned) dev->ifindex + (-sysctl);
+	}
+	in6_dev_put(idev);
+	return table;
+}
+
 /*
  *	Add prefix route.
  */
@@ -1968,7 +2031,7 @@
 		      unsigned long expires, u32 flags)
 {
 	struct fib6_config cfg = {
-		.fc_table = RT6_TABLE_PREFIX,
+		.fc_table = addrconf_rt_table(dev, RT6_TABLE_PREFIX),
 		.fc_metric = IP6_RT_PRIO_ADDRCONF,
 		.fc_ifindex = dev->ifindex,
 		.fc_expires = expires,
@@ -2002,7 +2065,8 @@
 	struct rt6_info *rt = NULL;
 	struct fib6_table *table;
 
-	table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
+	table = fib6_get_table(dev_net(dev),
+			       addrconf_rt_table(dev, RT6_TABLE_PREFIX));
 	if (table == NULL)
 		return NULL;
 
@@ -3160,11 +3224,13 @@
 
 	write_unlock_bh(&idev->lock);
 
-	/* Step 5: Discard multicast list */
-	if (how)
+	/* Step 5: Discard anycast and multicast list */
+	if (how) {
+		ipv6_ac_destroy_dev(idev);
 		ipv6_mc_destroy_dev(idev);
-	else
+	} else {
 		ipv6_mc_down(idev);
+	}
 
 	idev->tstamp = jiffies;
 
@@ -3279,8 +3345,15 @@
 	 * Optimistic nodes can start receiving
 	 * Frames right away
 	 */
-	if (ifp->flags & IFA_F_OPTIMISTIC)
+	if (ifp->flags & IFA_F_OPTIMISTIC) {
 		ip6_ins_rt(ifp->rt);
+		if (ipv6_use_optimistic_addr(idev)) {
+			/* Because optimistic nodes can use this address,
+			 * notify listeners. If DAD fails, RTM_DELADDR is sent.
+			 */
+			ipv6_ifa_notify(RTM_NEWADDR, ifp);
+		}
+	}
 
 	addrconf_dad_kick(ifp);
 out:
@@ -4287,10 +4360,12 @@
 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
 #endif
 #endif
+	array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
 	array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
 	array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 	array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
+	array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
 #endif
 #ifdef CONFIG_IPV6_MROUTE
 	array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
@@ -4299,6 +4374,7 @@
 	array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
 	array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
 	array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
+	array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -5002,6 +5078,13 @@
 #endif
 #endif
 		{
+			.procname	= "accept_ra_rt_table",
+			.data		= &ipv6_devconf.accept_ra_rt_table,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= proc_dointvec,
+		},
+		{
 			.procname	= "proxy_ndp",
 			.data		= &ipv6_devconf.proxy_ndp,
 			.maxlen		= sizeof(int),
@@ -5024,6 +5107,14 @@
 			.proc_handler   = proc_dointvec,
 
 		},
+		{
+			.procname       = "use_optimistic",
+			.data           = &ipv6_devconf.use_optimistic,
+			.maxlen         = sizeof(int),
+			.mode           = 0644,
+			.proc_handler   = proc_dointvec,
+
+		},
 #endif
 #ifdef CONFIG_IPV6_MROUTE
 		{
@@ -5063,6 +5154,14 @@
 			.proc_handler   = proc_dointvec
 		},
 		{
+			.procname       = "use_oif_addrs_only",
+			.data           = &ipv6_devconf.use_oif_addrs_only,
+			.maxlen         = sizeof(int),
+			.mode           = 0644,
+			.proc_handler   = proc_dointvec,
+
+		},
+		{
 			/* sentinel */
 		}
 	},
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 9443af7..220fd38 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -49,6 +49,7 @@
 #include <net/udp.h>
 #include <net/udplite.h>
 #include <net/tcp.h>
+#include <net/ping.h>
 #include <net/protocol.h>
 #include <net/inet_common.h>
 #include <net/route.h>
@@ -62,6 +63,20 @@
 #include <asm/uaccess.h>
 #include <linux/mroute6.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+	return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+	return 1;
+}
+#endif
+
 MODULE_AUTHOR("Cast of dozens");
 MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
 MODULE_LICENSE("GPL");
@@ -108,6 +123,9 @@
 	int try_loading_module = 0;
 	int err;
 
+	if (!current_has_network())
+		return -EACCES;
+
 	if (sock->type != SOCK_RAW &&
 	    sock->type != SOCK_DGRAM &&
 	    !inet_ehash_secret)
@@ -162,8 +180,7 @@
 	}
 
 	err = -EPERM;
-	if (sock->type == SOCK_RAW && !kern &&
-	    !ns_capable(net->user_ns, CAP_NET_RAW))
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	sock->ops = answer->ops;
@@ -480,6 +497,21 @@
 }
 EXPORT_SYMBOL(inet6_getname);
 
+int inet6_killaddr_ioctl(struct net *net, void __user *arg) {
+	struct in6_ifreq ireq;
+	struct sockaddr_in6 sin6;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EACCES;
+
+	if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
+		return -EFAULT;
+
+	sin6.sin6_family = AF_INET6;
+	sin6.sin6_addr = ireq.ifr6_addr;
+	return tcp_nuke_addr(net, (struct sockaddr *) &sin6);
+}
+
 int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
 	struct sock *sk = sock->sk;
@@ -503,6 +535,8 @@
 		return addrconf_del_ifaddr(net, (void __user *) arg);
 	case SIOCSIFDSTADDR:
 		return addrconf_set_dstaddr(net, (void __user *) arg);
+	case SIOCKILLADDR:
+		return inet6_killaddr_ioctl(net, (void __user *) arg);
 	default:
 		if (!sk->sk_prot->ioctl)
 			return -ENOIOCTLCMD;
@@ -663,6 +697,7 @@
 		fl6.flowi6_mark = sk->sk_mark;
 		fl6.fl6_dport = inet->inet_dport;
 		fl6.fl6_sport = inet->inet_sport;
+		fl6.flowi6_uid = sock_i_uid(sk);
 		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
 		final_p = fl6_update_dst(&fl6, np->opt, &final);
@@ -843,6 +878,9 @@
 	if (err)
 		goto out_unregister_udplite_proto;
 
+	err = proto_register(&pingv6_prot, 1);
+	if (err)
+		goto out_unregister_ping_proto;
 
 	/* We MUST register RAW sockets before we create the ICMP6,
 	 * IGMP6, or NDISC control sockets.
@@ -936,6 +974,10 @@
 	if (err)
 		goto ipv6_packet_fail;
 
+	err = pingv6_init();
+	if (err)
+		goto pingv6_fail;
+
 #ifdef CONFIG_SYSCTL
 	err = ipv6_sysctl_register();
 	if (err)
@@ -948,6 +990,8 @@
 sysctl_fail:
 	ipv6_packet_cleanup();
 #endif
+pingv6_fail:
+	pingv6_exit();
 ipv6_packet_fail:
 	tcpv6_exit();
 tcpv6_fail:
@@ -993,6 +1037,8 @@
 	rtnl_unregister_all(PF_INET6);
 out_sock_register_fail:
 	rawv6_exit();
+out_unregister_ping_proto:
+	proto_unregister(&pingv6_prot);
 out_unregister_raw_proto:
 	proto_unregister(&rawv6_prot);
 out_unregister_udplite_proto:
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index bb02e17..b903e19 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -630,7 +630,7 @@
 	if (type == NDISC_REDIRECT)
 		ip6_redirect(skb, net, 0, 0);
 	else
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
 	xfrm_state_put(x);
 }
 
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index c59083c..a2ffd09 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -353,6 +353,27 @@
 	return __ipv6_dev_ac_dec(idev, addr);
 }
 
+void ipv6_ac_destroy_dev(struct inet6_dev *idev)
+{
+	struct ifacaddr6 *aca;
+
+	write_lock_bh(&idev->lock);
+	while ((aca = idev->ac_list) != NULL) {
+		idev->ac_list = aca->aca_next;
+		write_unlock_bh(&idev->lock);
+
+		addrconf_leave_solict(idev, &aca->aca_addr);
+
+		dst_hold(&aca->aca_rt->dst);
+		ip6_del_rt(aca->aca_rt);
+
+		aca_put(aca);
+
+		write_lock_bh(&idev->lock);
+	}
+	write_unlock_bh(&idev->lock);
+}
+
 /*
  *	check if the interface has this anycast address
  *	called with rcu_read_lock()
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b0d5d7e..c560b6b 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -162,6 +162,7 @@
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.fl6_dport = inet->inet_dport;
 	fl6.fl6_sport = inet->inet_sport;
+	fl6.flowi6_uid = sock_i_uid(sk);
 
 	if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
 		fl6.flowi6_oif = np->mcast_oif;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 40ffd72..fdc81cb 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -449,7 +449,7 @@
 	if (type == NDISC_REDIRECT)
 		ip6_redirect(skb, net, 0, 0);
 	else
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
 	xfrm_state_put(x);
 }
 
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index f66c1b6..06fc6bf 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -166,15 +166,15 @@
  * to explore inner IPv6 header, eg. ICMPv6 error messages.
  *
  * If target header is found, its offset is set in *offset and return protocol
- * number. Otherwise, return -1.
+ * number. Otherwise, return -ENOENT or -EBADMSG.
  *
  * If the first fragment doesn't contain the final protocol header or
  * NEXTHDR_NONE it is considered invalid.
  *
  * Note that non-1st fragment is special case that "the protocol number
  * of last header" is "next header" field in Fragment header. In this case,
- * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
- * isn't NULL.
+ * *offset is meaningless. If fragoff is not NULL, the fragment offset is
+ * stored in *fragoff; if it is NULL, return -EINVAL.
  *
  * if flags is not NULL and it's a fragment, then the frag flag
  * IP6_FH_F_FRAG will be set. If it's an AH header, the
@@ -253,9 +253,12 @@
 				if (target < 0 &&
 				    ((!ipv6_ext_hdr(hp->nexthdr)) ||
 				     hp->nexthdr == NEXTHDR_NONE)) {
-					if (fragoff)
+					if (fragoff) {
 						*fragoff = _frag_off;
-					return hp->nexthdr;
+						return hp->nexthdr;
+					} else {
+						return -EINVAL;
+					}
 				}
 				if (!found)
 					return -ENOENT;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 2dee1d9..c1b611c 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -57,6 +57,7 @@
 
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
+#include <net/ping.h>
 #include <net/protocol.h>
 #include <net/raw.h>
 #include <net/rawv6.h>
@@ -84,12 +85,18 @@
 static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 		       u8 type, u8 code, int offset, __be32 info)
 {
+	/* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
+	struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
 	struct net *net = dev_net(skb->dev);
 
 	if (type == ICMPV6_PKT_TOOBIG)
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
 	else if (type == NDISC_REDIRECT)
 		ip6_redirect(skb, net, 0, 0);
+
+	if (!(type & ICMPV6_INFOMSG_MASK))
+		if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
+			ping_err(skb, offset, info);
 }
 
 static int icmpv6_rcv(struct sk_buff *skb);
@@ -224,7 +231,8 @@
 	return (*op & 0xC0) == 0x80;
 }
 
-static int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len)
+int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+			       struct icmp6hdr *thdr, int len)
 {
 	struct sk_buff *skb;
 	struct icmp6hdr *icmp6h;
@@ -307,8 +315,8 @@
 static inline void mip6_addr_swap(struct sk_buff *skb) {}
 #endif
 
-static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
-					     struct sock *sk, struct flowi6 *fl6)
+struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+				      struct sock *sk, struct flowi6 *fl6)
 {
 	struct dst_entry *dst, *dst2;
 	struct flowi6 fl2;
@@ -389,6 +397,7 @@
 	int len;
 	int hlimit;
 	int err = 0;
+	u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
 	if ((u8 *)hdr < skb->head ||
 	    (skb->network_header + sizeof(*hdr)) > skb->tail)
@@ -454,6 +463,7 @@
 	fl6.daddr = hdr->saddr;
 	if (saddr)
 		fl6.saddr = *saddr;
+	fl6.flowi6_mark = mark;
 	fl6.flowi6_oif = iif;
 	fl6.fl6_icmp_type = type;
 	fl6.fl6_icmp_code = code;
@@ -462,6 +472,7 @@
 	sk = icmpv6_xmit_lock(net);
 	if (sk == NULL)
 		return;
+	sk->sk_mark = mark;
 	np = inet6_sk(sk);
 
 	if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -543,6 +554,7 @@
 	struct dst_entry *dst;
 	int err = 0;
 	int hlimit;
+	u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
 	saddr = &ipv6_hdr(skb)->daddr;
 
@@ -559,11 +571,13 @@
 		fl6.saddr = *saddr;
 	fl6.flowi6_oif = skb->dev->ifindex;
 	fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
+	fl6.flowi6_mark = mark;
 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
 	sk = icmpv6_xmit_lock(net);
 	if (sk == NULL)
 		return;
+	sk->sk_mark = mark;
 	np = inet6_sk(sk);
 
 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -697,7 +711,8 @@
 		skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
 					     IPPROTO_ICMPV6, 0));
 		if (__skb_checksum_complete(skb)) {
-			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n",
+			LIMIT_NETDEBUG(KERN_DEBUG
+				       "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
 				       saddr, daddr);
 			goto csum_error;
 		}
@@ -718,7 +733,7 @@
 		break;
 
 	case ICMPV6_ECHO_REPLY:
-		/* we couldn't care less */
+		ping_rcv(skb);
 		break;
 
 	case ICMPV6_PKT_TOOBIG:
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e4311cb..65a4605 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -81,9 +81,10 @@
 	final_p = fl6_update_dst(fl6, np->opt, &final);
 	fl6->saddr = treq->loc_addr;
 	fl6->flowi6_oif = treq->iif;
-	fl6->flowi6_mark = sk->sk_mark;
+	fl6->flowi6_mark = inet_rsk(req)->ir_mark;
 	fl6->fl6_dport = inet_rsk(req)->rmt_port;
 	fl6->fl6_sport = inet_rsk(req)->loc_port;
+	fl6->flowi6_uid = sock_i_uid(sk);
 	security_req_classify_flow(req, flowi6_to_flowi(fl6));
 
 	dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
@@ -211,6 +212,7 @@
 	fl6->flowi6_mark = sk->sk_mark;
 	fl6->fl6_sport = inet->inet_sport;
 	fl6->fl6_dport = inet->inet_dport;
+	fl6->flowi6_uid = sock_i_uid(sk);
 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
 
 	final_p = fl6_update_dst(fl6, np->opt, &final);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8344f68..4147716 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -701,7 +701,7 @@
 	struct mr6_table *mrt;
 	struct flowi6 fl6 = {
 		.flowi6_oif	= dev->ifindex,
-		.flowi6_iif	= skb->skb_iif,
+		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
 		.flowi6_mark	= skb->mark,
 	};
 	int err;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 7af5aee..a1beb59 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -78,7 +78,7 @@
 	if (type == NDISC_REDIRECT)
 		ip6_redirect(skb, net, 0, 0);
 	else
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
 	xfrm_state_put(x);
 }
 
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 4433ab40..7f45af3 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -153,6 +153,18 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP6_NF_TARGET_REJECT_SKERR
+	bool "Force socket error when rejecting with icmp*"
+	depends on IP6_NF_TARGET_REJECT
+	default n
+	help
+          This option enables turning a "--reject-with icmp*" into a matching
+          socket error also.
+	  The REJECT target normally allows sending an ICMP message. But it
+          leaves the local socket unaware of any ingress rejects.
+
+	  If unsure, say N.
+
 config IP6_NF_MANGLE
 	tristate "Packet mangling"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 70f9abc..573c232 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -180,6 +180,15 @@
 		skb_in->dev = net->loopback_dev;
 
 	icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP6_NF_TARGET_REJECT_SKERR
+	if (skb_in->sk) {
+		icmpv6_err_convert(ICMPV6_DEST_UNREACH, code,
+				   &skb_in->sk->sk_err);
+		skb_in->sk->sk_error_report(skb_in->sk);
+		pr_debug("ip6t_REJECT: sk_err=%d for skb=%p sk=%p\n",
+			skb_in->sk->sk_err, skb_in, skb_in->sk);
+	}
+#endif
 }
 
 static unsigned int
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
new file mode 100644
index 0000000..fc05fdc
--- /dev/null
+++ b/net/ipv6/ping.c
@@ -0,0 +1,225 @@
+/*
+ * INET		An implementation of the TCP/IP protocol suite for the LINUX
+ *		operating system.  INET is implemented using the  BSD Socket
+ *		interface as the means of communication with the user level.
+ *
+ *		"Ping" sockets
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ * Based on ipv4/ping.c code.
+ *
+ * Authors:	Lorenzo Colitti (IPv6 support)
+ *		Vasiliy Kulikov / Openwall (IPv4 implementation, for Linux 2.6),
+ *		Pavel Kankovsky (IPv4 implementation, for Linux 2.4.32)
+ *
+ */
+
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/protocol.h>
+#include <net/udp.h>
+#include <net/transp_v6.h>
+#include <net/ping.h>
+
+struct proto pingv6_prot = {
+	.name =		"PINGv6",
+	.owner =	THIS_MODULE,
+	.init =		ping_init_sock,
+	.close =	ping_close,
+	.connect =	ip6_datagram_connect,
+	.disconnect =	udp_disconnect,
+	.setsockopt =	ipv6_setsockopt,
+	.getsockopt =	ipv6_getsockopt,
+	.sendmsg =	ping_v6_sendmsg,
+	.recvmsg =	ping_recvmsg,
+	.bind =		ping_bind,
+	.backlog_rcv =	ping_queue_rcv_skb,
+	.hash =		ping_hash,
+	.unhash =	ping_unhash,
+	.get_port =	ping_get_port,
+	.obj_size =	sizeof(struct raw6_sock),
+};
+EXPORT_SYMBOL_GPL(pingv6_prot);
+
+static struct inet_protosw pingv6_protosw = {
+	.type =      SOCK_DGRAM,
+	.protocol =  IPPROTO_ICMPV6,
+	.prot =      &pingv6_prot,
+	.ops =       &inet6_dgram_ops,
+	.no_check =  UDP_CSUM_DEFAULT,
+	.flags =     INET_PROTOSW_REUSE,
+};
+
+
+/* Compatibility glue so we can support IPv6 when it's compiled as a module */
+int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
+				int *addr_len)
+{
+	return -EAFNOSUPPORT;
+}
+int dummy_ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+				 struct sk_buff *skb)
+{
+	return -EAFNOSUPPORT;
+}
+int dummy_icmpv6_err_convert(u8 type, u8 code, int *err)
+{
+	return -EAFNOSUPPORT;
+}
+void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
+			    __be16 port, u32 info, u8 *payload) {}
+int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+			const struct net_device *dev, int strict)
+{
+	return 0;
+}
+
+int __init pingv6_init(void)
+{
+	pingv6_ops.ipv6_recv_error = ipv6_recv_error;
+	pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
+	pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
+	pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
+	pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
+	return inet6_register_protosw(&pingv6_protosw);
+}
+
+/* This never gets called because it's not possible to unload the ipv6 module,
+ * but just in case.
+ */
+void pingv6_exit(void)
+{
+	pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
+	pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
+	pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
+	pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
+	pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
+	inet6_unregister_protosw(&pingv6_protosw);
+}
+
+int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+		    size_t len)
+{
+	struct inet_sock *inet = inet_sk(sk);
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct icmp6hdr user_icmph;
+	int addr_type;
+	struct in6_addr *daddr;
+	int iif = 0;
+	struct flowi6 fl6;
+	int err;
+	int hlimit;
+	struct dst_entry *dst;
+	struct rt6_info *rt;
+	struct pingfakehdr pfh;
+
+	pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
+
+	err = ping_common_sendmsg(AF_INET6, msg, len, &user_icmph,
+				  sizeof(user_icmph));
+	if (err)
+		return err;
+
+	if (msg->msg_name) {
+		struct sockaddr_in6 *u = (struct sockaddr_in6 *) msg->msg_name;
+		if (msg->msg_namelen < sizeof(*u))
+			return -EINVAL;
+		if (u->sin6_family != AF_INET6) {
+			return -EAFNOSUPPORT;
+		}
+		if (sk->sk_bound_dev_if &&
+		    sk->sk_bound_dev_if != u->sin6_scope_id) {
+			return -EINVAL;
+		}
+		daddr = &(u->sin6_addr);
+		iif = u->sin6_scope_id;
+	} else {
+		if (sk->sk_state != TCP_ESTABLISHED)
+			return -EDESTADDRREQ;
+		daddr = &np->daddr;
+	}
+
+	if (!iif)
+		iif = sk->sk_bound_dev_if;
+
+	addr_type = ipv6_addr_type(daddr);
+	if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
+		return -EINVAL;
+	if (addr_type & IPV6_ADDR_MAPPED)
+		return -EINVAL;
+
+	/* TODO: use ip6_datagram_send_ctl to get options from cmsg */
+
+	memset(&fl6, 0, sizeof(fl6));
+
+	fl6.flowi6_proto = IPPROTO_ICMPV6;
+	fl6.saddr = np->saddr;
+	fl6.daddr = *daddr;
+	fl6.flowi6_mark = sk->sk_mark;
+	fl6.flowi6_uid = sock_i_uid(sk);
+	fl6.fl6_icmp_type = user_icmph.icmp6_type;
+	fl6.fl6_icmp_code = user_icmph.icmp6_code;
+	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+		fl6.flowi6_oif = np->mcast_oif;
+	else if (!fl6.flowi6_oif)
+		fl6.flowi6_oif = np->ucast_oif;
+
+	dst = ip6_sk_dst_lookup_flow(sk, &fl6,  daddr, 1);
+	if (IS_ERR(dst))
+		return PTR_ERR(dst);
+	rt = (struct rt6_info *) dst;
+
+	np = inet6_sk(sk);
+	if (!np)
+		return -EBADF;
+
+	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+		fl6.flowi6_oif = np->mcast_oif;
+	else if (!fl6.flowi6_oif)
+		fl6.flowi6_oif = np->ucast_oif;
+
+	pfh.icmph.type = user_icmph.icmp6_type;
+	pfh.icmph.code = user_icmph.icmp6_code;
+	pfh.icmph.checksum = 0;
+	pfh.icmph.un.echo.id = inet->inet_sport;
+	pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence;
+	pfh.iov = msg->msg_iov;
+	pfh.wcheck = 0;
+	pfh.family = AF_INET6;
+
+	if (ipv6_addr_is_multicast(&fl6.daddr))
+		hlimit = np->mcast_hops;
+	else
+		hlimit = np->hop_limit;
+	if (hlimit < 0)
+		hlimit = ip6_dst_hoplimit(dst);
+
+	lock_sock(sk);
+	err = ip6_append_data(sk, ping_getfrag, &pfh, len,
+			      0, hlimit,
+			      np->tclass, NULL, &fl6, rt,
+			      MSG_DONTWAIT, np->dontfrag);
+
+	if (err) {
+		ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
+				   ICMP6_MIB_OUTERRORS);
+		ip6_flush_pending_frames(sk);
+	} else {
+		err = icmpv6_push_pending_frames(sk, &fl6,
+						 (struct icmp6hdr *) &pfh.icmph,
+						 len);
+	}
+	release_sock(sk);
+
+	if (err)
+		return err;
+
+	return len;
+}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 464b1c9..a9db8d2 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -759,6 +759,7 @@
 	memset(&fl6, 0, sizeof(fl6));
 
 	fl6.flowi6_mark = sk->sk_mark;
+	fl6.flowi6_uid = sock_i_uid(sk);
 
 	if (sin6) {
 		if (addr_len < SIN6_LEN_RFC2133)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fb5010c..e55a8eb 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -93,13 +93,12 @@
 					struct sk_buff *skb);
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr, int ifindex,
-					   unsigned int pref);
-static struct rt6_info *rt6_get_route_info(struct net *net,
+					   const struct in6_addr *gwaddr, unsigned int pref);
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr, int ifindex);
+					   const struct in6_addr *gwaddr);
 #endif
 
 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@ -685,7 +684,6 @@
 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
 		  const struct in6_addr *gwaddr)
 {
-	struct net *net = dev_net(dev);
 	struct route_info *rinfo = (struct route_info *) opt;
 	struct in6_addr prefix_buf, *prefix;
 	unsigned int pref;
@@ -727,11 +725,7 @@
 		prefix = &prefix_buf;
 	}
 
-	if (rinfo->prefix_len == 0)
-		rt = rt6_get_dflt_router(gwaddr, dev);
-	else
-		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
-					gwaddr, dev->ifindex);
+	rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len, gwaddr);
 
 	if (rt && !lifetime) {
 		ip6_del_rt(rt);
@@ -739,8 +733,7 @@
 	}
 
 	if (!rt && lifetime)
-		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
-					pref);
+		rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
 	else if (rt)
 		rt->rt6i_flags = RTF_ROUTEINFO |
 				 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1150,7 +1143,7 @@
 }
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-		     int oif, u32 mark)
+		     int oif, u32 mark, kuid_t uid)
 {
 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
 	struct dst_entry *dst;
@@ -1158,11 +1151,12 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_oif = oif;
-	fl6.flowi6_mark = mark;
+	fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
 	fl6.flowi6_flags = 0;
 	fl6.daddr = iph->daddr;
 	fl6.saddr = iph->saddr;
 	fl6.flowlabel = ip6_flowinfo(iph);
+	fl6.flowi6_uid = uid;
 
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (!dst->error)
@@ -1174,7 +1168,7 @@
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
 	ip6_update_pmtu(skb, sock_net(sk), mtu,
-			sk->sk_bound_dev_if, sk->sk_mark);
+			sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
 }
 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
 
@@ -1847,15 +1841,16 @@
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_get_route_info(struct net *net,
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr, int ifindex)
+					   const struct in6_addr *gwaddr)
 {
 	struct fib6_node *fn;
 	struct rt6_info *rt = NULL;
 	struct fib6_table *table;
 
-	table = fib6_get_table(net, RT6_TABLE_INFO);
+	table = fib6_get_table(dev_net(dev),
+			       addrconf_rt_table(dev, RT6_TABLE_INFO));
 	if (!table)
 		return NULL;
 
@@ -1865,7 +1860,7 @@
 		goto out;
 
 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
-		if (rt->dst.dev->ifindex != ifindex)
+		if (rt->dst.dev->ifindex != dev->ifindex)
 			continue;
 		if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
 			continue;
@@ -1879,21 +1874,20 @@
 	return rt;
 }
 
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr, int ifindex,
-					   unsigned int pref)
+					   const struct in6_addr *gwaddr, unsigned int pref)
 {
 	struct fib6_config cfg = {
-		.fc_table	= RT6_TABLE_INFO,
+		.fc_table	= addrconf_rt_table(dev, RT6_TABLE_INFO),
 		.fc_metric	= IP6_RT_PRIO_USER,
-		.fc_ifindex	= ifindex,
+		.fc_ifindex	= dev->ifindex,
 		.fc_dst_len	= prefixlen,
 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
 				  RTF_UP | RTF_PREF(pref),
 		.fc_nlinfo.portid = 0,
 		.fc_nlinfo.nlh = NULL,
-		.fc_nlinfo.nl_net = net,
+		.fc_nlinfo.nl_net = dev_net(dev),
 	};
 
 	cfg.fc_dst = *prefix;
@@ -1905,7 +1899,7 @@
 
 	ip6_route_add(&cfg);
 
-	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
+	return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
 }
 #endif
 
@@ -1914,7 +1908,8 @@
 	struct rt6_info *rt;
 	struct fib6_table *table;
 
-	table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
+	table = fib6_get_table(dev_net(dev),
+			       addrconf_rt_table(dev, RT6_TABLE_MAIN));
 	if (!table)
 		return NULL;
 
@@ -1936,7 +1931,7 @@
 				     unsigned int pref)
 {
 	struct fib6_config cfg = {
-		.fc_table	= RT6_TABLE_DFLT,
+		.fc_table	= addrconf_rt_table(dev, RT6_TABLE_DFLT),
 		.fc_metric	= IP6_RT_PRIO_USER,
 		.fc_ifindex	= dev->ifindex,
 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -1953,28 +1948,17 @@
 	return rt6_get_dflt_router(gwaddr, dev);
 }
 
+
+int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
+	if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+	    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
+		return -1;
+	return 0;
+}
+
 void rt6_purge_dflt_routers(struct net *net)
 {
-	struct rt6_info *rt;
-	struct fib6_table *table;
-
-	/* NOTE: Keep consistent with rt6_get_dflt_router */
-	table = fib6_get_table(net, RT6_TABLE_DFLT);
-	if (!table)
-		return;
-
-restart:
-	read_lock_bh(&table->tb6_lock);
-	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
-		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-		    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
-			dst_hold(&rt->dst);
-			read_unlock_bh(&table->tb6_lock);
-			ip6_del_rt(rt);
-			goto restart;
-		}
-	}
-	read_unlock_bh(&table->tb6_lock);
+	fib6_clean_all(net, rt6_addrconf_purge, 0, NULL);
 }
 
 static void rtmsg_to_fib6_config(struct net *net,
@@ -2259,6 +2243,7 @@
 	[RTA_PRIORITY]          = { .type = NLA_U32 },
 	[RTA_METRICS]           = { .type = NLA_NESTED },
 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
+	[RTA_UID]		= { .type = NLA_U32 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2647,6 +2632,15 @@
 	if (tb[RTA_OIF])
 		oif = nla_get_u32(tb[RTA_OIF]);
 
+	if (tb[RTA_MARK])
+		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
+
+	if (tb[RTA_UID])
+		fl6.flowi6_uid = make_kuid(current_user_ns(),
+					   nla_get_u32(tb[RTA_UID]));
+	else
+		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
+
 	if (iif) {
 		struct net_device *dev;
 		int flags = 0;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index d5dda20..ba8622d 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -212,6 +212,8 @@
 	    ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
 		ireq6->iif = inet6_iif(skb);
 
+	ireq->ir_mark = inet_request_mark(sk, skb);
+
 	req->expires = 0UL;
 	req->num_retrans = 0;
 	ireq->ecn_ok		= ecn_ok;
@@ -238,9 +240,10 @@
 		final_p = fl6_update_dst(&fl6, np->opt, &final);
 		fl6.saddr = ireq6->loc_addr;
 		fl6.flowi6_oif = sk->sk_bound_dev_if;
-		fl6.flowi6_mark = sk->sk_mark;
+		fl6.flowi6_mark = ireq->ir_mark;
 		fl6.fl6_dport = inet_rsk(req)->rmt_port;
 		fl6.fl6_sport = inet_sk(sk)->inet_sport;
+		fl6.flowi6_uid = sock_i_uid(sk);
 		security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
 		dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index e85c48b..53a9f5a 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -24,6 +24,13 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec
 	},
+	{
+		.procname	= "fwmark_reflect",
+		.data		= &init_net.ipv6.sysctl.fwmark_reflect,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
 	{ }
 };
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 70b10ed..89ef9e5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -252,6 +252,7 @@
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.fl6_dport = usin->sin6_port;
 	fl6.fl6_sport = inet->inet_sport;
+	fl6.flowi6_uid = sock_i_uid(sk);
 
 	final_p = fl6_update_dst(&fl6, np->opt, &final);
 
@@ -791,6 +792,7 @@
 	fl6.flowi6_proto = IPPROTO_TCP;
 	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
 		fl6.flowi6_oif = inet6_iif(skb);
+	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 	fl6.fl6_dport = t1->dest;
 	fl6.fl6_sport = t1->source;
 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -1005,6 +1007,7 @@
 		TCP_ECN_create_request(req, skb, sock_net(sk));
 
 	treq->iif = sk->sk_bound_dev_if;
+	inet_rsk(req)->ir_mark = inet_request_mark(sk, skb);
 
 	/* So that link locals have meaning */
 	if (!sk->sk_bound_dev_if &&
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d234e6f..2d3bc06 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1147,6 +1147,7 @@
 		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
 
 	fl6.flowi6_mark = sk->sk_mark;
+	fl6.flowi6_uid = sock_i_uid(sk);
 
 	if (msg->msg_controllen) {
 		opt = &opt_space;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 56d22ca..bdf9c60 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1089,6 +1089,8 @@
 	based on who created the socket: the user or group. It is also
 	possible to check whether a socket actually exists.
 
+	Conflicts with '"quota, tag, uid" match'
+
 config NETFILTER_XT_MATCH_POLICY
 	tristate 'IPsec "policy" match support'
 	depends on XFRM
@@ -1122,6 +1124,22 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_QTAGUID
+	bool '"quota, tag, owner" match and stats support'
+        depends on NETFILTER_XT_MATCH_SOCKET
+	depends on NETFILTER_XT_MATCH_OWNER=n
+	help
+	  This option replaces the `owner' match. In addition to matching
+	  on uid, it keeps stats based on a tag assigned to a socket.
+	  The full tag is comprised of a UID and an accounting tag.
+	  The tags are assignable to sockets from user space (e.g. a download
+	  manager can assign the socket to another UID for accounting).
+	  Stats and control are done via /proc/net/xt_qtaguid/.
+	  It replaces owner as it takes the same arguments, but should
+	  really be recognized by the iptables tool.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_QUOTA
 	tristate '"quota" match support'
 	depends on NETFILTER_ADVANCED
@@ -1132,6 +1150,30 @@
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_QUOTA2
+	tristate '"quota2" match support'
+	depends on NETFILTER_ADVANCED
+	help
+	  This option adds a `quota2' match, which allows to match on a
+	  byte counter correctly and not per CPU.
+	  It allows naming the quotas.
+	  This is based on http://xtables-addons.git.sourceforge.net
+
+	  If you want to compile it as a module, say M here and read
+	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+	bool '"quota2" Netfilter LOG support'
+	depends on NETFILTER_XT_MATCH_QUOTA2
+	depends on IP_NF_TARGET_ULOG=n    # not yes, not module, just no
+	default n
+	help
+	  This option allows `quota2' to log ONCE when a quota limit
+	  is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+	  It logs similarly to how ipt_ULOG would without data.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_RATEEST
 	tristate '"rateest" match support'
 	depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index a1abf87..d9655f6 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -127,7 +127,9 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc1..ddf77f7 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -5,6 +5,7 @@
  * After timer expires a kevent will be sent.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and reworked for upstream inclusion
@@ -38,8 +39,16 @@
 #include <linux/netfilter/xt_IDLETIMER.h>
 #include <linux/kdev_t.h>
 #include <linux/kobject.h>
+#include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <linux/sysfs.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/suspend.h>
+#include <linux/notifier.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
 
 struct idletimer_tg_attr {
 	struct attribute attr;
@@ -55,14 +64,110 @@
 	struct kobject *kobj;
 	struct idletimer_tg_attr attr;
 
+	struct timespec delayed_timer_trigger;
+	struct timespec last_modified_timer;
+	struct timespec last_suspend_time;
+	struct notifier_block pm_nb;
+
+	int timeout;
 	unsigned int refcnt;
+	bool work_pending;
+	bool send_nl_msg;
+	bool active;
+	uid_t uid;
 };
 
 static LIST_HEAD(idletimer_tg_list);
 static DEFINE_MUTEX(list_mutex);
+static DEFINE_SPINLOCK(timestamp_lock);
 
 static struct kobject *idletimer_tg_kobj;
 
+static bool check_for_delayed_trigger(struct idletimer_tg *timer,
+		struct timespec *ts)
+{
+	bool state;
+	struct timespec temp;
+	spin_lock_bh(&timestamp_lock);
+	timer->work_pending = false;
+	if ((ts->tv_sec - timer->last_modified_timer.tv_sec) > timer->timeout ||
+			timer->delayed_timer_trigger.tv_sec != 0) {
+		state = false;
+		temp.tv_sec = timer->timeout;
+		temp.tv_nsec = 0;
+		if (timer->delayed_timer_trigger.tv_sec != 0) {
+			temp = timespec_add(timer->delayed_timer_trigger, temp);
+			ts->tv_sec = temp.tv_sec;
+			ts->tv_nsec = temp.tv_nsec;
+			timer->delayed_timer_trigger.tv_sec = 0;
+			timer->work_pending = true;
+			schedule_work(&timer->work);
+		} else {
+			temp = timespec_add(timer->last_modified_timer, temp);
+			ts->tv_sec = temp.tv_sec;
+			ts->tv_nsec = temp.tv_nsec;
+		}
+	} else {
+		state = timer->active;
+	}
+	spin_unlock_bh(&timestamp_lock);
+	return state;
+}
+
+static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
+{
+	char iface_msg[NLMSG_MAX_SIZE];
+	char state_msg[NLMSG_MAX_SIZE];
+	char timestamp_msg[NLMSG_MAX_SIZE];
+	char uid_msg[NLMSG_MAX_SIZE];
+	char *envp[] = { iface_msg, state_msg, timestamp_msg, uid_msg, NULL };
+	int res;
+	struct timespec ts;
+	uint64_t time_ns;
+	bool state;
+
+	res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+		       iface);
+	if (NLMSG_MAX_SIZE <= res) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+
+	get_monotonic_boottime(&ts);
+	state = check_for_delayed_trigger(timer, &ts);
+	res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+			state ? "active" : "inactive");
+
+	if (NLMSG_MAX_SIZE <= res) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+
+	if (state) {
+		res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=%u", timer->uid);
+		if (NLMSG_MAX_SIZE <= res)
+			pr_err("message too long (%d)", res);
+	} else {
+		res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=");
+		if (NLMSG_MAX_SIZE <= res)
+			pr_err("message too long (%d)", res);
+	}
+
+	time_ns = timespec_to_ns(&ts);
+	res = snprintf(timestamp_msg, NLMSG_MAX_SIZE, "TIME_NS=%llu", time_ns);
+	if (NLMSG_MAX_SIZE <= res) {
+		timestamp_msg[0] = '\0';
+		pr_err("message too long (%d)", res);
+	}
+
+	pr_debug("putting nlmsg: <%s> <%s> <%s> <%s>\n", iface_msg, state_msg,
+		 timestamp_msg, uid_msg);
+	kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
+	return;
+
+
+}
+
 static
 struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
 {
@@ -83,6 +188,7 @@
 {
 	struct idletimer_tg *timer;
 	unsigned long expires = 0;
+	unsigned long now = jiffies;
 
 	mutex_lock(&list_mutex);
 
@@ -92,11 +198,15 @@
 
 	mutex_unlock(&list_mutex);
 
-	if (time_after(expires, jiffies))
+	if (time_after(expires, now))
 		return sprintf(buf, "%u\n",
-			       jiffies_to_msecs(expires - jiffies) / 1000);
+			       jiffies_to_msecs(expires - now) / 1000);
 
-	return sprintf(buf, "0\n");
+	if (timer->send_nl_msg)
+		return sprintf(buf, "0 %d\n",
+			jiffies_to_msecs(now - expires) / 1000);
+	else
+		return sprintf(buf, "0\n");
 }
 
 static void idletimer_tg_work(struct work_struct *work)
@@ -105,6 +215,9 @@
 						  work);
 
 	sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+	if (timer->send_nl_msg)
+		notify_netlink_uevent(timer->attr.attr.name, timer);
 }
 
 static void idletimer_tg_expired(unsigned long data)
@@ -112,8 +225,55 @@
 	struct idletimer_tg *timer = (struct idletimer_tg *) data;
 
 	pr_debug("timer %s expired\n", timer->attr.attr.name);
-
+	spin_lock_bh(&timestamp_lock);
+	timer->active = false;
+	timer->work_pending = true;
 	schedule_work(&timer->work);
+	spin_unlock_bh(&timestamp_lock);
+}
+
+static int idletimer_resume(struct notifier_block *notifier,
+		unsigned long pm_event, void *unused)
+{
+	struct timespec ts;
+	unsigned long time_diff, now = jiffies;
+	struct idletimer_tg *timer = container_of(notifier,
+			struct idletimer_tg, pm_nb);
+	if (!timer)
+		return NOTIFY_DONE;
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		get_monotonic_boottime(&timer->last_suspend_time);
+		break;
+	case PM_POST_SUSPEND:
+		spin_lock_bh(&timestamp_lock);
+		if (!timer->active) {
+			spin_unlock_bh(&timestamp_lock);
+			break;
+		}
+		/* since jiffies are not updated when suspended now represents
+		 * the time it would have suspended */
+		if (time_after(timer->timer.expires, now)) {
+			get_monotonic_boottime(&ts);
+			ts = timespec_sub(ts, timer->last_suspend_time);
+			time_diff = timespec_to_jiffies(&ts);
+			if (timer->timer.expires > (time_diff + now)) {
+				mod_timer_pending(&timer->timer,
+						(timer->timer.expires - time_diff));
+			} else {
+				del_timer(&timer->timer);
+				timer->timer.expires = 0;
+				timer->active = false;
+				timer->work_pending = true;
+				schedule_work(&timer->work);
+			}
+		}
+		spin_unlock_bh(&timestamp_lock);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
 }
 
 static int idletimer_tg_create(struct idletimer_tg_info *info)
@@ -145,6 +305,21 @@
 	setup_timer(&info->timer->timer, idletimer_tg_expired,
 		    (unsigned long) info->timer);
 	info->timer->refcnt = 1;
+	info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+	info->timer->active = true;
+	info->timer->timeout = info->timeout;
+
+	info->timer->delayed_timer_trigger.tv_sec = 0;
+	info->timer->delayed_timer_trigger.tv_nsec = 0;
+	info->timer->work_pending = false;
+	info->timer->uid = 0;
+	get_monotonic_boottime(&info->timer->last_modified_timer);
+
+	info->timer->pm_nb.notifier_call = idletimer_resume;
+	ret = register_pm_notifier(&info->timer->pm_nb);
+	if (ret)
+		printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
+				__func__, ret);
 
 	mod_timer(&info->timer->timer,
 		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
@@ -161,6 +336,46 @@
 	return ret;
 }
 
+static void reset_timer(const struct idletimer_tg_info *info,
+			struct sk_buff *skb)
+{
+	unsigned long now = jiffies;
+	struct idletimer_tg *timer = info->timer;
+	bool timer_prev;
+
+	spin_lock_bh(&timestamp_lock);
+	timer_prev = timer->active;
+	timer->active = true;
+	/* timer_prev is used to guard overflow problem in time_before*/
+	if (!timer_prev || time_before(timer->timer.expires, now)) {
+		pr_debug("Starting Checkentry timer (Expired, Jiffies): %lu, %lu\n",
+				timer->timer.expires, now);
+
+		/* Stores the uid resposible for waking up the radio */
+		if (skb && (skb->sk)) {
+			struct sock *sk = skb->sk;
+			read_lock_bh(&sk->sk_callback_lock);
+			if ((sk->sk_socket) && (sk->sk_socket->file) &&
+		    (sk->sk_socket->file->f_cred))
+				timer->uid = sk->sk_socket->file->f_cred->uid;
+			read_unlock_bh(&sk->sk_callback_lock);
+		}
+
+		/* checks if there is a pending inactive notification*/
+		if (timer->work_pending)
+			timer->delayed_timer_trigger = timer->last_modified_timer;
+		else {
+			timer->work_pending = true;
+			schedule_work(&timer->work);
+		}
+	}
+
+	get_monotonic_boottime(&timer->last_modified_timer);
+	mod_timer(&timer->timer,
+			msecs_to_jiffies(info->timeout * 1000) + now);
+	spin_unlock_bh(&timestamp_lock);
+}
+
 /*
  * The actual xt_tables plugin.
  */
@@ -168,15 +383,23 @@
 					 const struct xt_action_param *par)
 {
 	const struct idletimer_tg_info *info = par->targinfo;
+	unsigned long now = jiffies;
 
 	pr_debug("resetting timer %s, timeout period %u\n",
 		 info->label, info->timeout);
 
 	BUG_ON(!info->timer);
 
-	mod_timer(&info->timer->timer,
-		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
+	info->timer->active = true;
 
+	if (time_before(info->timer->timer.expires, now)) {
+		schedule_work(&info->timer->work);
+		pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+			 info->label, info->timer->timer.expires, now);
+	}
+
+	/* TODO: Avoid modifying timers on each packet */
+	reset_timer(info, skb);
 	return XT_CONTINUE;
 }
 
@@ -185,7 +408,7 @@
 	struct idletimer_tg_info *info = par->targinfo;
 	int ret;
 
-	pr_debug("checkentry targinfo%s\n", info->label);
+	pr_debug("checkentry targinfo %s\n", info->label);
 
 	if (info->timeout == 0) {
 		pr_debug("timeout value is zero\n");
@@ -204,9 +427,7 @@
 	info->timer = __idletimer_tg_find_by_label(info->label);
 	if (info->timer) {
 		info->timer->refcnt++;
-		mod_timer(&info->timer->timer,
-			  msecs_to_jiffies(info->timeout * 1000) + jiffies);
-
+		reset_timer(info, NULL);
 		pr_debug("increased refcnt of timer %s to %u\n",
 			 info->label, info->timer->refcnt);
 	} else {
@@ -219,6 +440,7 @@
 	}
 
 	mutex_unlock(&list_mutex);
+
 	return 0;
 }
 
@@ -236,11 +458,12 @@
 		list_del(&info->timer->entry);
 		del_timer_sync(&info->timer->timer);
 		sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+		unregister_pm_notifier(&info->timer->pm_nb);
 		kfree(info->timer->attr.attr.name);
 		kfree(info->timer);
 	} else {
 		pr_debug("decreased refcnt of timer %s to %u\n",
-			 info->label, info->timer->refcnt);
+		info->label, info->timer->refcnt);
 	}
 
 	mutex_unlock(&list_mutex);
@@ -248,6 +471,7 @@
 
 static struct xt_target idletimer_tg __read_mostly = {
 	.name		= "IDLETIMER",
+	.revision	= 1,
 	.family		= NFPROTO_UNSPEC,
 	.target		= idletimer_tg_target,
 	.targetsize     = sizeof(struct idletimer_tg_info),
@@ -313,3 +537,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("ipt_IDLETIMER");
 MODULE_ALIAS("ip6t_IDLETIMER");
+MODULE_ALIAS("arpt_IDLETIMER");
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644
index 0000000..bcc622e
--- /dev/null
+++ b/net/netfilter/xt_qtaguid.c
@@ -0,0 +1,3017 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * There are run-time debug flags enabled via the debug_mask module param, or
+ * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
+ */
+#define DEBUG
+
+#include <linux/file.h>
+#include <linux/inetdevice.h>
+#include <linux/module.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_qtaguid.h>
+#include <linux/ratelimit.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
+#include <linux/netfilter/xt_socket.h>
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+#include "../../fs/proc/internal.h"
+
+/*
+ * We only use the xt_socket funcs within a similar context to avoid unexpected
+ * return values.
+ */
+#define XT_SOCKET_SUPPORTED_HOOKS \
+	((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
+
+
+static const char *module_procdirname = "xt_qtaguid";
+static struct proc_dir_entry *xt_qtaguid_procdir;
+
+static unsigned int proc_iface_perms = S_IRUGO;
+module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_stats_file;
+static unsigned int proc_stats_perms = S_IRUGO;
+module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_ctrl_file;
+
+/* Everybody can write. But proc_ctrl_write_limited is true by default which
+ * limits what can be controlled. See the can_*() functions.
+ */
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO;
+module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR);
+
+/* Limited by default, so the gid of the ctrl and stats proc entries
+ * will limit what can be done. See the can_*() functions.
+ */
+static bool proc_stats_readall_limited = true;
+static bool proc_ctrl_write_limited = true;
+
+module_param_named(stats_readall_limited, proc_stats_readall_limited, bool,
+		   S_IRUGO | S_IWUSR);
+module_param_named(ctrl_write_limited, proc_ctrl_write_limited, bool,
+		   S_IRUGO | S_IWUSR);
+
+/*
+ * Limit the number of active tags (via socket tags) for a given UID.
+ * Multiple processes could share the UID.
+ */
+static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS;
+module_param(max_sock_tags, int, S_IRUGO | S_IWUSR);
+
+/*
+ * After the kernel has initiallized this module, it is still possible
+ * to make it passive.
+ * Setting passive to Y:
+ *  - the iface stats handling will not act on notifications.
+ *  - iptables matches will never match.
+ *  - ctrl commands silently succeed.
+ *  - stats are always empty.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool module_passive;
+module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR);
+
+/*
+ * Control how qtaguid data is tracked per proc/uid.
+ * Setting tag_tracking_passive to Y:
+ *  - don't create proc specific structs to track tags
+ *  - don't check that active tag stats exceed some limits.
+ *  - don't clean up socket tags on process exits.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool qtu_proc_handling_passive;
+module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool,
+		   S_IRUGO | S_IWUSR);
+
+#define QTU_DEV_NAME "xt_qtaguid"
+
+uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK;
+module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+static const char *iface_stat_procdirname = "iface_stat";
+static struct proc_dir_entry *iface_stat_procdir;
+/*
+ * The iface_stat_all* will go away once userspace gets use to the new fields
+ * that have a format line.
+ */
+static const char *iface_stat_all_procfilename = "iface_stat_all";
+static struct proc_dir_entry *iface_stat_all_procfile;
+static const char *iface_stat_fmt_procfilename = "iface_stat_fmt";
+static struct proc_dir_entry *iface_stat_fmt_procfile;
+
+
+static LIST_HEAD(iface_stat_list);
+static DEFINE_SPINLOCK(iface_stat_list_lock);
+
+static struct rb_root sock_tag_tree = RB_ROOT;
+static DEFINE_SPINLOCK(sock_tag_list_lock);
+
+static struct rb_root tag_counter_set_tree = RB_ROOT;
+static DEFINE_SPINLOCK(tag_counter_set_list_lock);
+
+static struct rb_root uid_tag_data_tree = RB_ROOT;
+static DEFINE_SPINLOCK(uid_tag_data_tree_lock);
+
+static struct rb_root proc_qtu_data_tree = RB_ROOT;
+/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */
+
+static struct qtaguid_event_counts qtu_events;
+/*----------------------------------------------*/
+static bool can_manipulate_uids(void)
+{
+	/* root pwnd */
+	return in_egroup_p(xt_qtaguid_ctrl_file->gid)
+		|| unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_limited)
+		|| unlikely(current_fsuid() == xt_qtaguid_ctrl_file->uid);
+}
+
+static bool can_impersonate_uid(uid_t uid)
+{
+	return uid == current_fsuid() || can_manipulate_uids();
+}
+
+static bool can_read_other_uid_stats(uid_t uid)
+{
+	/* root pwnd */
+	return in_egroup_p(xt_qtaguid_stats_file->gid)
+		|| unlikely(!current_fsuid()) || uid == current_fsuid()
+		|| unlikely(!proc_stats_readall_limited)
+		|| unlikely(current_fsuid() == xt_qtaguid_ctrl_file->uid);
+}
+
+static inline void dc_add_byte_packets(struct data_counters *counters, int set,
+				  enum ifs_tx_rx direction,
+				  enum ifs_proto ifs_proto,
+				  int bytes,
+				  int packets)
+{
+	counters->bpc[set][direction][ifs_proto].bytes += bytes;
+	counters->bpc[set][direction][ifs_proto].packets += packets;
+}
+
+static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct tag_node *data = rb_entry(node, struct tag_node, node);
+		int result;
+		RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+			 " node=%p data=%p\n", tag, node, data);
+		result = tag_compare(tag, data->tag);
+		RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+			 " data.tag=0x%llx (uid=%u) res=%d\n",
+			 tag, data->tag, get_uid_from_tag(data->tag), result);
+		if (result < 0)
+			node = node->rb_left;
+		else if (result > 0)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct tag_node *this = rb_entry(*new, struct tag_node,
+						 node);
+		int result = tag_compare(data->tag, this->tag);
+		RB_DEBUG("qtaguid: %s(): tag=0x%llx"
+			 " (uid=%u)\n", __func__,
+			 this->tag,
+			 get_uid_from_tag(this->tag));
+		parent = *new;
+		if (result < 0)
+			new = &((*new)->rb_left);
+		else if (result > 0)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+}
+
+static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
+{
+	tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
+{
+	struct tag_node *node = tag_node_tree_search(root, tag);
+	if (!node)
+		return NULL;
+	return rb_entry(&node->node, struct tag_stat, tn.node);
+}
+
+static void tag_counter_set_tree_insert(struct tag_counter_set *data,
+					struct rb_root *root)
+{
+	tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root,
+							   tag_t tag)
+{
+	struct tag_node *node = tag_node_tree_search(root, tag);
+	if (!node)
+		return NULL;
+	return rb_entry(&node->node, struct tag_counter_set, tn.node);
+
+}
+
+static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root)
+{
+	tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag)
+{
+	struct tag_node *node = tag_node_tree_search(root, tag);
+	if (!node)
+		return NULL;
+	return rb_entry(&node->node, struct tag_ref, tn.node);
+}
+
+static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
+					     const struct sock *sk)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct sock_tag *data = rb_entry(node, struct sock_tag,
+						 sock_node);
+		if (sk < data->sk)
+			node = node->rb_left;
+		else if (sk > data->sk)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct sock_tag *this = rb_entry(*new, struct sock_tag,
+						 sock_node);
+		parent = *new;
+		if (data->sk < this->sk)
+			new = &((*new)->rb_left);
+		else if (data->sk > this->sk)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->sock_node, parent, new);
+	rb_insert_color(&data->sock_node, root);
+}
+
+static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
+{
+	struct rb_node *node;
+	struct sock_tag *st_entry;
+
+	node = rb_first(st_to_free_tree);
+	while (node) {
+		st_entry = rb_entry(node, struct sock_tag, sock_node);
+		node = rb_next(node);
+		CT_DEBUG("qtaguid: %s(): "
+			 "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__,
+			 st_entry->sk,
+			 st_entry->tag,
+			 get_uid_from_tag(st_entry->tag));
+		rb_erase(&st_entry->sock_node, st_to_free_tree);
+		sockfd_put(st_entry->socket);
+		kfree(st_entry);
+	}
+}
+
+static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root,
+						       const pid_t pid)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct proc_qtu_data *data = rb_entry(node,
+						      struct proc_qtu_data,
+						      node);
+		if (pid < data->pid)
+			node = node->rb_left;
+		else if (pid > data->pid)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+static void proc_qtu_data_tree_insert(struct proc_qtu_data *data,
+				      struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct proc_qtu_data *this = rb_entry(*new,
+						      struct proc_qtu_data,
+						      node);
+		parent = *new;
+		if (data->pid < this->pid)
+			new = &((*new)->rb_left);
+		else if (data->pid > this->pid)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+}
+
+static void uid_tag_data_tree_insert(struct uid_tag_data *data,
+				     struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct uid_tag_data *this = rb_entry(*new,
+						     struct uid_tag_data,
+						     node);
+		parent = *new;
+		if (data->uid < this->uid)
+			new = &((*new)->rb_left);
+		else if (data->uid > this->uid)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+}
+
+static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root,
+						     uid_t uid)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct uid_tag_data *data = rb_entry(node,
+						     struct uid_tag_data,
+						     node);
+		if (uid < data->uid)
+			node = node->rb_left;
+		else if (uid > data->uid)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+/*
+ * Allocates a new uid_tag_data struct if needed.
+ * Returns a pointer to the found or allocated uid_tag_data.
+ * Returns a PTR_ERR on failures, and lock is not held.
+ * If found is not NULL:
+ *   sets *found to true if not allocated.
+ *   sets *found to false if allocated.
+ */
+struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res)
+{
+	struct uid_tag_data *utd_entry;
+
+	/* Look for top level uid_tag_data for the UID */
+	utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid);
+	DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry);
+
+	if (found_res)
+		*found_res = utd_entry;
+	if (utd_entry)
+		return utd_entry;
+
+	utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC);
+	if (!utd_entry) {
+		pr_err("qtaguid: get_uid_data(%u): "
+		       "tag data alloc failed\n", uid);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	utd_entry->uid = uid;
+	utd_entry->tag_ref_tree = RB_ROOT;
+	uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree);
+	DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry);
+	return utd_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *new_tag_ref(tag_t new_tag,
+				   struct uid_tag_data *utd_entry)
+{
+	struct tag_ref *tr_entry;
+	int res;
+
+	if (utd_entry->num_active_tags + 1 > max_sock_tags) {
+		pr_info("qtaguid: new_tag_ref(0x%llx): "
+			"tag ref alloc quota exceeded. max=%d\n",
+			new_tag, max_sock_tags);
+		res = -EMFILE;
+		goto err_res;
+
+	}
+
+	tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC);
+	if (!tr_entry) {
+		pr_err("qtaguid: new_tag_ref(0x%llx): "
+		       "tag ref alloc failed\n",
+		       new_tag);
+		res = -ENOMEM;
+		goto err_res;
+	}
+	tr_entry->tn.tag = new_tag;
+	/* tr_entry->num_sock_tags  handled by caller */
+	utd_entry->num_active_tags++;
+	tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree);
+	DR_DEBUG("qtaguid: new_tag_ref(0x%llx): "
+		 " inserted new tag ref %p\n",
+		 new_tag, tr_entry);
+	return tr_entry;
+
+err_res:
+	return ERR_PTR(res);
+}
+
+static struct tag_ref *lookup_tag_ref(tag_t full_tag,
+				      struct uid_tag_data **utd_res)
+{
+	struct uid_tag_data *utd_entry;
+	struct tag_ref *tr_entry;
+	bool found_utd;
+	uid_t uid = get_uid_from_tag(full_tag);
+
+	DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n",
+		 full_tag, uid);
+
+	utd_entry = get_uid_data(uid, &found_utd);
+	if (IS_ERR_OR_NULL(utd_entry)) {
+		if (utd_res)
+			*utd_res = utd_entry;
+		return NULL;
+	}
+
+	tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag);
+	if (utd_res)
+		*utd_res = utd_entry;
+	DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n",
+		 full_tag, utd_entry, tr_entry);
+	return tr_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *get_tag_ref(tag_t full_tag,
+				   struct uid_tag_data **utd_res)
+{
+	struct uid_tag_data *utd_entry;
+	struct tag_ref *tr_entry;
+
+	DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n",
+		 full_tag);
+	spin_lock_bh(&uid_tag_data_tree_lock);
+	tr_entry = lookup_tag_ref(full_tag, &utd_entry);
+	BUG_ON(IS_ERR_OR_NULL(utd_entry));
+	if (!tr_entry)
+		tr_entry = new_tag_ref(full_tag, utd_entry);
+
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	if (utd_res)
+		*utd_res = utd_entry;
+	DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n",
+		 full_tag, utd_entry, tr_entry);
+	return tr_entry;
+}
+
+/* Checks and maybe frees the UID Tag Data entry */
+static void put_utd_entry(struct uid_tag_data *utd_entry)
+{
+	/* Are we done with the UID tag data entry? */
+	if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) &&
+		!utd_entry->num_pqd) {
+		DR_DEBUG("qtaguid: %s(): "
+			 "erase utd_entry=%p uid=%u "
+			 "by pid=%u tgid=%u uid=%u\n", __func__,
+			 utd_entry, utd_entry->uid,
+			 current->pid, current->tgid, current_fsuid());
+		BUG_ON(utd_entry->num_active_tags);
+		rb_erase(&utd_entry->node, &uid_tag_data_tree);
+		kfree(utd_entry);
+	} else {
+		DR_DEBUG("qtaguid: %s(): "
+			 "utd_entry=%p still has %d tags %d proc_qtu_data\n",
+			 __func__, utd_entry, utd_entry->num_active_tags,
+			 utd_entry->num_pqd);
+		BUG_ON(!(utd_entry->num_active_tags ||
+			 utd_entry->num_pqd));
+	}
+}
+
+/*
+ * If no sock_tags are using this tag_ref,
+ * decrements refcount of utd_entry, removes tr_entry
+ * from utd_entry->tag_ref_tree and frees.
+ */
+static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry,
+					struct uid_tag_data *utd_entry)
+{
+	DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__,
+		 tr_entry, tr_entry->tn.tag,
+		 get_uid_from_tag(tr_entry->tn.tag));
+	if (!tr_entry->num_sock_tags) {
+		BUG_ON(!utd_entry->num_active_tags);
+		utd_entry->num_active_tags--;
+		rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree);
+		DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry);
+		kfree(tr_entry);
+	}
+}
+
+static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry)
+{
+	struct rb_node *node;
+	struct tag_ref *tr_entry;
+	tag_t acct_tag;
+
+	DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__,
+		 full_tag, get_uid_from_tag(full_tag));
+	acct_tag = get_atag_from_tag(full_tag);
+	node = rb_first(&utd_entry->tag_ref_tree);
+	while (node) {
+		tr_entry = rb_entry(node, struct tag_ref, tn.node);
+		node = rb_next(node);
+		if (!acct_tag || tr_entry->tn.tag == full_tag)
+			free_tag_ref_from_utd_entry(tr_entry, utd_entry);
+	}
+}
+
+static ssize_t read_proc_u64(struct file *file, char __user *buf,
+			 size_t size, loff_t *ppos)
+{
+	uint64_t *valuep = PDE_DATA(file_inode(file));
+	char tmp[24];
+	size_t tmp_size;
+
+	tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", *valuep);
+	return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t read_proc_bool(struct file *file, char __user *buf,
+			  size_t size, loff_t *ppos)
+{
+	bool *valuep = PDE_DATA(file_inode(file));
+	char tmp[24];
+	size_t tmp_size;
+
+	tmp_size = scnprintf(tmp, sizeof(tmp), "%u\n", *valuep);
+	return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static int get_active_counter_set(tag_t tag)
+{
+	int active_set = 0;
+	struct tag_counter_set *tcs;
+
+	MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)"
+		 " (uid=%u)\n",
+		 tag, get_uid_from_tag(tag));
+	/* For now we only handle UID tags for active sets */
+	tag = get_utag_from_tag(tag);
+	spin_lock_bh(&tag_counter_set_list_lock);
+	tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+	if (tcs)
+		active_set = tcs->active_set;
+	spin_unlock_bh(&tag_counter_set_list_lock);
+	return active_set;
+}
+
+/*
+ * Find the entry for tracking the specified interface.
+ * Caller must hold iface_stat_list_lock
+ */
+static struct iface_stat *get_iface_entry(const char *ifname)
+{
+	struct iface_stat *iface_entry;
+
+	/* Find the entry for tracking the specified tag within the interface */
+	if (ifname == NULL) {
+		pr_info("qtaguid: iface_stat: get() NULL device name\n");
+		return NULL;
+	}
+
+	/* Iterate over interfaces */
+	list_for_each_entry(iface_entry, &iface_stat_list, list) {
+		if (!strcmp(ifname, iface_entry->ifname))
+			goto done;
+	}
+	iface_entry = NULL;
+done:
+	return iface_entry;
+}
+
+/* This is for fmt2 only */
+static void pp_iface_stat_header(struct seq_file *m)
+{
+	seq_puts(m,
+		 "ifname "
+		 "total_skb_rx_bytes total_skb_rx_packets "
+		 "total_skb_tx_bytes total_skb_tx_packets "
+		 "rx_tcp_bytes rx_tcp_packets "
+		 "rx_udp_bytes rx_udp_packets "
+		 "rx_other_bytes rx_other_packets "
+		 "tx_tcp_bytes tx_tcp_packets "
+		 "tx_udp_bytes tx_udp_packets "
+		 "tx_other_bytes tx_other_packets\n"
+	);
+}
+
+static void pp_iface_stat_line(struct seq_file *m,
+			       struct iface_stat *iface_entry)
+{
+	struct data_counters *cnts;
+	int cnt_set = 0;   /* We only use one set for the device */
+	cnts = &iface_entry->totals_via_skb;
+	seq_printf(m, "%s %llu %llu %llu %llu %llu %llu %llu %llu "
+		   "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+		   iface_entry->ifname,
+		   dc_sum_bytes(cnts, cnt_set, IFS_RX),
+		   dc_sum_packets(cnts, cnt_set, IFS_RX),
+		   dc_sum_bytes(cnts, cnt_set, IFS_TX),
+		   dc_sum_packets(cnts, cnt_set, IFS_TX),
+		   cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+}
+
+struct proc_iface_stat_fmt_info {
+	int fmt;
+};
+
+static void *iface_stat_fmt_proc_start(struct seq_file *m, loff_t *pos)
+{
+	struct proc_iface_stat_fmt_info *p = m->private;
+	loff_t n = *pos;
+
+	/*
+	 * This lock will prevent iface_stat_update() from changing active,
+	 * and in turn prevent an interface from unregistering itself.
+	 */
+	spin_lock_bh(&iface_stat_list_lock);
+
+	if (unlikely(module_passive))
+		return NULL;
+
+	if (!n && p->fmt == 2)
+		pp_iface_stat_header(m);
+
+	return seq_list_start(&iface_stat_list, n);
+}
+
+static void *iface_stat_fmt_proc_next(struct seq_file *m, void *p, loff_t *pos)
+{
+	return seq_list_next(p, &iface_stat_list, pos);
+}
+
+static void iface_stat_fmt_proc_stop(struct seq_file *m, void *p)
+{
+	spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static int iface_stat_fmt_proc_show(struct seq_file *m, void *v)
+{
+	struct proc_iface_stat_fmt_info *p = m->private;
+	struct iface_stat *iface_entry;
+	struct rtnl_link_stats64 dev_stats, *stats;
+	struct rtnl_link_stats64 no_dev_stats = {0};
+
+
+	CT_DEBUG("qtaguid:proc iface_stat_fmt pid=%u tgid=%u uid=%u\n",
+		 current->pid, current->tgid, current_fsuid());
+
+	iface_entry = list_entry(v, struct iface_stat, list);
+
+	if (iface_entry->active) {
+		stats = dev_get_stats(iface_entry->net_dev,
+				      &dev_stats);
+	} else {
+		stats = &no_dev_stats;
+	}
+	/*
+	 * If the meaning of the data changes, then update the fmtX
+	 * string.
+	 */
+	if (p->fmt == 1) {
+		seq_printf(m, "%s %d %llu %llu %llu %llu %llu %llu %llu %llu\n",
+			   iface_entry->ifname,
+			   iface_entry->active,
+			   iface_entry->totals_via_dev[IFS_RX].bytes,
+			   iface_entry->totals_via_dev[IFS_RX].packets,
+			   iface_entry->totals_via_dev[IFS_TX].bytes,
+			   iface_entry->totals_via_dev[IFS_TX].packets,
+			   stats->rx_bytes, stats->rx_packets,
+			   stats->tx_bytes, stats->tx_packets
+			   );
+	} else {
+		pp_iface_stat_line(m, iface_entry);
+	}
+	return 0;
+}
+
+static const struct file_operations read_u64_fops = {
+	.read		= read_proc_u64,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations read_bool_fops = {
+	.read		= read_proc_bool,
+	.llseek		= default_llseek,
+};
+
+static void iface_create_proc_worker(struct work_struct *work)
+{
+	struct proc_dir_entry *proc_entry;
+	struct iface_stat_work *isw = container_of(work, struct iface_stat_work,
+						   iface_work);
+	struct iface_stat *new_iface  = isw->iface_entry;
+
+	/* iface_entries are not deleted, so safe to manipulate. */
+	proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir);
+	if (IS_ERR_OR_NULL(proc_entry)) {
+		pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n");
+		kfree(isw);
+		return;
+	}
+
+	new_iface->proc_ptr = proc_entry;
+
+	proc_create_data("tx_bytes", proc_iface_perms, proc_entry,
+			 &read_u64_fops,
+			 &new_iface->totals_via_dev[IFS_TX].bytes);
+	proc_create_data("rx_bytes", proc_iface_perms, proc_entry,
+			 &read_u64_fops,
+			 &new_iface->totals_via_dev[IFS_RX].bytes);
+	proc_create_data("tx_packets", proc_iface_perms, proc_entry,
+			 &read_u64_fops,
+			 &new_iface->totals_via_dev[IFS_TX].packets);
+	proc_create_data("rx_packets", proc_iface_perms, proc_entry,
+			 &read_u64_fops,
+			 &new_iface->totals_via_dev[IFS_RX].packets);
+	proc_create_data("active", proc_iface_perms, proc_entry,
+			 &read_bool_fops, &new_iface->active);
+
+	IF_DEBUG("qtaguid: iface_stat: create_proc(): done "
+		 "entry=%p dev=%s\n", new_iface, new_iface->ifname);
+	kfree(isw);
+}
+
+/*
+ * Will set the entry's active state, and
+ * update the net_dev accordingly also.
+ */
+static void _iface_stat_set_active(struct iface_stat *entry,
+				   struct net_device *net_dev,
+				   bool activate)
+{
+	if (activate) {
+		entry->net_dev = net_dev;
+		entry->active = true;
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "enable tracking. rfcnt=%d\n", __func__,
+			 entry->ifname,
+			 __this_cpu_read(*net_dev->pcpu_refcnt));
+	} else {
+		entry->active = false;
+		entry->net_dev = NULL;
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "disable tracking. rfcnt=%d\n", __func__,
+			 entry->ifname,
+			 __this_cpu_read(*net_dev->pcpu_refcnt));
+
+	}
+}
+
+/* Caller must hold iface_stat_list_lock */
+static struct iface_stat *iface_alloc(struct net_device *net_dev)
+{
+	struct iface_stat *new_iface;
+	struct iface_stat_work *isw;
+
+	new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC);
+	if (new_iface == NULL) {
+		pr_err("qtaguid: iface_stat: create(%s): "
+		       "iface_stat alloc failed\n", net_dev->name);
+		return NULL;
+	}
+	new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC);
+	if (new_iface->ifname == NULL) {
+		pr_err("qtaguid: iface_stat: create(%s): "
+		       "ifname alloc failed\n", net_dev->name);
+		kfree(new_iface);
+		return NULL;
+	}
+	spin_lock_init(&new_iface->tag_stat_list_lock);
+	new_iface->tag_stat_tree = RB_ROOT;
+	_iface_stat_set_active(new_iface, net_dev, true);
+
+	/*
+	 * ipv6 notifier chains are atomic :(
+	 * No create_proc_read_entry() for you!
+	 */
+	isw = kmalloc(sizeof(*isw), GFP_ATOMIC);
+	if (!isw) {
+		pr_err("qtaguid: iface_stat: create(%s): "
+		       "work alloc failed\n", new_iface->ifname);
+		_iface_stat_set_active(new_iface, net_dev, false);
+		kfree(new_iface->ifname);
+		kfree(new_iface);
+		return NULL;
+	}
+	isw->iface_entry = new_iface;
+	INIT_WORK(&isw->iface_work, iface_create_proc_worker);
+	schedule_work(&isw->iface_work);
+	list_add(&new_iface->list, &iface_stat_list);
+	return new_iface;
+}
+
+static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
+					       struct iface_stat *iface)
+{
+	struct rtnl_link_stats64 dev_stats, *stats;
+	bool stats_rewound;
+
+	stats = dev_get_stats(net_dev, &dev_stats);
+	/* No empty packets */
+	stats_rewound =
+		(stats->rx_bytes < iface->last_known[IFS_RX].bytes)
+		|| (stats->tx_bytes < iface->last_known[IFS_TX].bytes);
+
+	IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p "
+		 "bytes rx/tx=%llu/%llu "
+		 "active=%d last_known=%d "
+		 "stats_rewound=%d\n", __func__,
+		 net_dev ? net_dev->name : "?",
+		 iface, net_dev,
+		 stats->rx_bytes, stats->tx_bytes,
+		 iface->active, iface->last_known_valid, stats_rewound);
+
+	if (iface->active && iface->last_known_valid && stats_rewound) {
+		pr_warn_once("qtaguid: iface_stat: %s(%s): "
+			     "iface reset its stats unexpectedly\n", __func__,
+			     net_dev->name);
+
+		iface->totals_via_dev[IFS_TX].bytes +=
+			iface->last_known[IFS_TX].bytes;
+		iface->totals_via_dev[IFS_TX].packets +=
+			iface->last_known[IFS_TX].packets;
+		iface->totals_via_dev[IFS_RX].bytes +=
+			iface->last_known[IFS_RX].bytes;
+		iface->totals_via_dev[IFS_RX].packets +=
+			iface->last_known[IFS_RX].packets;
+		iface->last_known_valid = false;
+		IF_DEBUG("qtaguid: %s(%s): iface=%p "
+			 "used last known bytes rx/tx=%llu/%llu\n", __func__,
+			 iface->ifname, iface, iface->last_known[IFS_RX].bytes,
+			 iface->last_known[IFS_TX].bytes);
+	}
+}
+
+/*
+ * Create a new entry for tracking the specified interface.
+ * Do nothing if the entry already exists.
+ * Called when an interface is configured with a valid IP address.
+ */
+static void iface_stat_create(struct net_device *net_dev,
+			      struct in_ifaddr *ifa)
+{
+	struct in_device *in_dev = NULL;
+	const char *ifname;
+	struct iface_stat *entry;
+	__be32 ipaddr = 0;
+	struct iface_stat *new_iface;
+
+	IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n",
+		 net_dev ? net_dev->name : "?",
+		 ifa, net_dev);
+	if (!net_dev) {
+		pr_err("qtaguid: iface_stat: create(): no net dev\n");
+		return;
+	}
+
+	ifname = net_dev->name;
+	if (!ifa) {
+		in_dev = in_dev_get(net_dev);
+		if (!in_dev) {
+			pr_err("qtaguid: iface_stat: create(%s): no inet dev\n",
+			       ifname);
+			return;
+		}
+		IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n",
+			 ifname, in_dev);
+		for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+			IF_DEBUG("qtaguid: iface_stat: create(%s): "
+				 "ifa=%p ifa_label=%s\n",
+				 ifname, ifa,
+				 ifa->ifa_label ? ifa->ifa_label : "(null)");
+			if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+				break;
+		}
+	}
+
+	if (!ifa) {
+		IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n",
+			 ifname);
+		goto done_put;
+	}
+	ipaddr = ifa->ifa_local;
+
+	spin_lock_bh(&iface_stat_list_lock);
+	entry = get_iface_entry(ifname);
+	if (entry != NULL) {
+		IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n",
+			 ifname, entry);
+		iface_check_stats_reset_and_adjust(net_dev, entry);
+		_iface_stat_set_active(entry, net_dev, true);
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "tracking now %d on ip=%pI4\n", __func__,
+			 entry->ifname, true, &ipaddr);
+		goto done_unlock_put;
+	}
+
+	new_iface = iface_alloc(net_dev);
+	IF_DEBUG("qtaguid: iface_stat: create(%s): done "
+		 "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr);
+done_unlock_put:
+	spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+	if (in_dev)
+		in_dev_put(in_dev);
+}
+
+static void iface_stat_create_ipv6(struct net_device *net_dev,
+				   struct inet6_ifaddr *ifa)
+{
+	struct in_device *in_dev;
+	const char *ifname;
+	struct iface_stat *entry;
+	struct iface_stat *new_iface;
+	int addr_type;
+
+	IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n",
+		 ifa, net_dev, net_dev ? net_dev->name : "");
+	if (!net_dev) {
+		pr_err("qtaguid: iface_stat: create6(): no net dev!\n");
+		return;
+	}
+	ifname = net_dev->name;
+
+	in_dev = in_dev_get(net_dev);
+	if (!in_dev) {
+		pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n",
+		       ifname);
+		return;
+	}
+
+	IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n",
+		 ifname, in_dev);
+
+	if (!ifa) {
+		IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n",
+			 ifname);
+		goto done_put;
+	}
+	addr_type = ipv6_addr_type(&ifa->addr);
+
+	spin_lock_bh(&iface_stat_list_lock);
+	entry = get_iface_entry(ifname);
+	if (entry != NULL) {
+		IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+			 ifname, entry);
+		iface_check_stats_reset_and_adjust(net_dev, entry);
+		_iface_stat_set_active(entry, net_dev, true);
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "tracking now %d on ip=%pI6c\n", __func__,
+			 entry->ifname, true, &ifa->addr);
+		goto done_unlock_put;
+	}
+
+	new_iface = iface_alloc(net_dev);
+	IF_DEBUG("qtaguid: iface_stat: create6(%s): done "
+		 "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr);
+
+done_unlock_put:
+	spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+	in_dev_put(in_dev);
+}
+
+static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
+{
+	MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
+	return sock_tag_tree_search(&sock_tag_tree, sk);
+}
+
+static struct sock_tag *get_sock_stat(const struct sock *sk)
+{
+	struct sock_tag *sock_tag_entry;
+	MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk);
+	if (!sk)
+		return NULL;
+	spin_lock_bh(&sock_tag_list_lock);
+	sock_tag_entry = get_sock_stat_nl(sk);
+	spin_unlock_bh(&sock_tag_list_lock);
+	return sock_tag_entry;
+}
+
+static int ipx_proto(const struct sk_buff *skb,
+		     struct xt_action_param *par)
+{
+	int thoff = 0, tproto;
+
+	switch (par->family) {
+	case NFPROTO_IPV6:
+		tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
+		if (tproto < 0)
+			MT_DEBUG("%s(): transport header not found in ipv6"
+				 " skb=%p\n", __func__, skb);
+		break;
+	case NFPROTO_IPV4:
+		tproto = ip_hdr(skb)->protocol;
+		break;
+	default:
+		tproto = IPPROTO_RAW;
+	}
+	return tproto;
+}
+
+static void
+data_counters_update(struct data_counters *dc, int set,
+		     enum ifs_tx_rx direction, int proto, int bytes)
+{
+	switch (proto) {
+	case IPPROTO_TCP:
+		dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1);
+		break;
+	case IPPROTO_UDP:
+		dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1);
+		break;
+	case IPPROTO_IP:
+	default:
+		dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes,
+				    1);
+		break;
+	}
+}
+
+/*
+ * Update stats for the specified interface. Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called when an device is being unregistered.
+ */
+static void iface_stat_update(struct net_device *net_dev, bool stash_only)
+{
+	struct rtnl_link_stats64 dev_stats, *stats;
+	struct iface_stat *entry;
+
+	stats = dev_get_stats(net_dev, &dev_stats);
+	spin_lock_bh(&iface_stat_list_lock);
+	entry = get_iface_entry(net_dev->name);
+	if (entry == NULL) {
+		IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n",
+			 net_dev->name);
+		spin_unlock_bh(&iface_stat_list_lock);
+		return;
+	}
+
+	IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+		 net_dev->name, entry);
+	if (!entry->active) {
+		IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__,
+			 net_dev->name);
+		spin_unlock_bh(&iface_stat_list_lock);
+		return;
+	}
+
+	if (stash_only) {
+		entry->last_known[IFS_TX].bytes = stats->tx_bytes;
+		entry->last_known[IFS_TX].packets = stats->tx_packets;
+		entry->last_known[IFS_RX].bytes = stats->rx_bytes;
+		entry->last_known[IFS_RX].packets = stats->rx_packets;
+		entry->last_known_valid = true;
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "dev stats stashed rx/tx=%llu/%llu\n", __func__,
+			 net_dev->name, stats->rx_bytes, stats->tx_bytes);
+		spin_unlock_bh(&iface_stat_list_lock);
+		return;
+	}
+	entry->totals_via_dev[IFS_TX].bytes += stats->tx_bytes;
+	entry->totals_via_dev[IFS_TX].packets += stats->tx_packets;
+	entry->totals_via_dev[IFS_RX].bytes += stats->rx_bytes;
+	entry->totals_via_dev[IFS_RX].packets += stats->rx_packets;
+	/* We don't need the last_known[] anymore */
+	entry->last_known_valid = false;
+	_iface_stat_set_active(entry, net_dev, false);
+	IF_DEBUG("qtaguid: %s(%s): "
+		 "disable tracking. rx/tx=%llu/%llu\n", __func__,
+		 net_dev->name, stats->rx_bytes, stats->tx_bytes);
+	spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Update stats for the specified interface from the skb.
+ * Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called on each sk.
+ */
+static void iface_stat_update_from_skb(const struct sk_buff *skb,
+				       struct xt_action_param *par)
+{
+	struct iface_stat *entry;
+	const struct net_device *el_dev;
+	enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
+	int bytes = skb->len;
+	int proto;
+
+	if (!skb->dev) {
+		MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+		el_dev = par->in ? : par->out;
+	} else {
+		const struct net_device *other_dev;
+		el_dev = skb->dev;
+		other_dev = par->in ? : par->out;
+		if (el_dev != other_dev) {
+			MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+				 "par->(in/out)=%p %s\n",
+				 par->hooknum, el_dev, el_dev->name, other_dev,
+				 other_dev->name);
+		}
+	}
+
+	if (unlikely(!el_dev)) {
+		pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n",
+				   par->hooknum, __func__);
+		BUG();
+	} else if (unlikely(!el_dev->name)) {
+		pr_err_ratelimited("qtaguid[%d]: %s(): no dev->name?!!\n",
+				   par->hooknum, __func__);
+		BUG();
+	} else {
+		proto = ipx_proto(skb, par);
+		MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+			 par->hooknum, el_dev->name, el_dev->type,
+			 par->family, proto);
+	}
+
+	spin_lock_bh(&iface_stat_list_lock);
+	entry = get_iface_entry(el_dev->name);
+	if (entry == NULL) {
+		IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
+			 __func__, el_dev->name);
+		spin_unlock_bh(&iface_stat_list_lock);
+		return;
+	}
+
+	IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+		 el_dev->name, entry);
+
+	data_counters_update(&entry->totals_via_skb, 0, direction, proto,
+			     bytes);
+	spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static void tag_stat_update(struct tag_stat *tag_entry,
+			enum ifs_tx_rx direction, int proto, int bytes)
+{
+	int active_set;
+	active_set = get_active_counter_set(tag_entry->tn.tag);
+	MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d "
+		 "dir=%d proto=%d bytes=%d)\n",
+		 tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag),
+		 active_set, direction, proto, bytes);
+	data_counters_update(&tag_entry->counters, active_set, direction,
+			     proto, bytes);
+	if (tag_entry->parent_counters)
+		data_counters_update(tag_entry->parent_counters, active_set,
+				     direction, proto, bytes);
+}
+
+/*
+ * Create a new entry for tracking the specified {acct_tag,uid_tag} within
+ * the interface.
+ * iface_entry->tag_stat_list_lock should be held.
+ */
+static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
+					   tag_t tag)
+{
+	struct tag_stat *new_tag_stat_entry = NULL;
+	IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx"
+		 " (uid=%u)\n", __func__,
+		 iface_entry, tag, get_uid_from_tag(tag));
+	new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
+	if (!new_tag_stat_entry) {
+		pr_err("qtaguid: iface_stat: tag stat alloc failed\n");
+		goto done;
+	}
+	new_tag_stat_entry->tn.tag = tag;
+	tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
+done:
+	return new_tag_stat_entry;
+}
+
+static void if_tag_stat_update(const char *ifname, uid_t uid,
+			       const struct sock *sk, enum ifs_tx_rx direction,
+			       int proto, int bytes)
+{
+	struct tag_stat *tag_stat_entry;
+	tag_t tag, acct_tag;
+	tag_t uid_tag;
+	struct data_counters *uid_tag_counters;
+	struct sock_tag *sock_tag_entry;
+	struct iface_stat *iface_entry;
+	struct tag_stat *new_tag_stat = NULL;
+	MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
+		"uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
+		 ifname, uid, sk, direction, proto, bytes);
+
+
+	iface_entry = get_iface_entry(ifname);
+	if (!iface_entry) {
+		pr_err_ratelimited("qtaguid: iface_stat: stat_update() "
+				   "%s not found\n", ifname);
+		return;
+	}
+	/* It is ok to process data when an iface_entry is inactive */
+
+	MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
+		 ifname, iface_entry);
+
+	/*
+	 * Look for a tagged sock.
+	 * It will have an acct_uid.
+	 */
+	sock_tag_entry = get_sock_stat(sk);
+	if (sock_tag_entry) {
+		tag = sock_tag_entry->tag;
+		acct_tag = get_atag_from_tag(tag);
+		uid_tag = get_utag_from_tag(tag);
+	} else {
+		acct_tag = make_atag_from_value(0);
+		tag = combine_atag_with_uid(acct_tag, uid);
+		uid_tag = make_tag_from_uid(uid);
+	}
+	MT_DEBUG("qtaguid: iface_stat: stat_update(): "
+		 " looking for tag=0x%llx (uid=%u) in ife=%p\n",
+		 tag, get_uid_from_tag(tag), iface_entry);
+	/* Loop over tag list under this interface for {acct_tag,uid_tag} */
+	spin_lock_bh(&iface_entry->tag_stat_list_lock);
+
+	tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+					      tag);
+	if (tag_stat_entry) {
+		/*
+		 * Updating the {acct_tag, uid_tag} entry handles both stats:
+		 * {0, uid_tag} will also get updated.
+		 */
+		tag_stat_update(tag_stat_entry, direction, proto, bytes);
+		spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+		return;
+	}
+
+	/* Loop over tag list under this interface for {0,uid_tag} */
+	tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+					      uid_tag);
+	if (!tag_stat_entry) {
+		/* Here: the base uid_tag did not exist */
+		/*
+		 * No parent counters. So
+		 *  - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
+		 */
+		new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
+		if (!new_tag_stat)
+			goto unlock;
+		uid_tag_counters = &new_tag_stat->counters;
+	} else {
+		uid_tag_counters = &tag_stat_entry->counters;
+	}
+
+	if (acct_tag) {
+		/* Create the child {acct_tag, uid_tag} and hook up parent. */
+		new_tag_stat = create_if_tag_stat(iface_entry, tag);
+		if (!new_tag_stat)
+			goto unlock;
+		new_tag_stat->parent_counters = uid_tag_counters;
+	} else {
+		/*
+		 * For new_tag_stat to be still NULL here would require:
+		 *  {0, uid_tag} exists
+		 *  and {acct_tag, uid_tag} doesn't exist
+		 *  AND acct_tag == 0.
+		 * Impossible. This reassures us that new_tag_stat
+		 * below will always be assigned.
+		 */
+		BUG_ON(!new_tag_stat);
+	}
+	tag_stat_update(new_tag_stat, direction, proto, bytes);
+unlock:
+	spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+}
+
+static int iface_netdev_event_handler(struct notifier_block *nb,
+				      unsigned long event, void *ptr) {
+	struct net_device *dev = ptr;
+
+	if (unlikely(module_passive))
+		return NOTIFY_DONE;
+
+	IF_DEBUG("qtaguid: iface_stat: netdev_event(): "
+		 "ev=0x%lx/%s netdev=%p->name=%s\n",
+		 event, netdev_evt_str(event), dev, dev ? dev->name : "");
+
+	switch (event) {
+	case NETDEV_UP:
+		iface_stat_create(dev, NULL);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		iface_stat_update(dev, event == NETDEV_DOWN);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static int iface_inet6addr_event_handler(struct notifier_block *nb,
+					 unsigned long event, void *ptr)
+{
+	struct inet6_ifaddr *ifa = ptr;
+	struct net_device *dev;
+
+	if (unlikely(module_passive))
+		return NOTIFY_DONE;
+
+	IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): "
+		 "ev=0x%lx/%s ifa=%p\n",
+		 event, netdev_evt_str(event), ifa);
+
+	switch (event) {
+	case NETDEV_UP:
+		BUG_ON(!ifa || !ifa->idev);
+		dev = (struct net_device *)ifa->idev->dev;
+		iface_stat_create_ipv6(dev, ifa);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		BUG_ON(!ifa || !ifa->idev);
+		dev = (struct net_device *)ifa->idev->dev;
+		iface_stat_update(dev, event == NETDEV_DOWN);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static int iface_inetaddr_event_handler(struct notifier_block *nb,
+					unsigned long event, void *ptr)
+{
+	struct in_ifaddr *ifa = ptr;
+	struct net_device *dev;
+
+	if (unlikely(module_passive))
+		return NOTIFY_DONE;
+
+	IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): "
+		 "ev=0x%lx/%s ifa=%p\n",
+		 event, netdev_evt_str(event), ifa);
+
+	switch (event) {
+	case NETDEV_UP:
+		BUG_ON(!ifa || !ifa->ifa_dev);
+		dev = ifa->ifa_dev->dev;
+		iface_stat_create(dev, ifa);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		BUG_ON(!ifa || !ifa->ifa_dev);
+		dev = ifa->ifa_dev->dev;
+		iface_stat_update(dev, event == NETDEV_DOWN);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block iface_netdev_notifier_blk = {
+	.notifier_call = iface_netdev_event_handler,
+};
+
+static struct notifier_block iface_inetaddr_notifier_blk = {
+	.notifier_call = iface_inetaddr_event_handler,
+};
+
+static struct notifier_block iface_inet6addr_notifier_blk = {
+	.notifier_call = iface_inet6addr_event_handler,
+};
+
+static const struct seq_operations iface_stat_fmt_proc_seq_ops = {
+	.start	= iface_stat_fmt_proc_start,
+	.next	= iface_stat_fmt_proc_next,
+	.stop	= iface_stat_fmt_proc_stop,
+	.show	= iface_stat_fmt_proc_show,
+};
+
+static int proc_iface_stat_fmt_open(struct inode *inode, struct file *file)
+{
+	struct proc_iface_stat_fmt_info *s;
+
+	s = __seq_open_private(file, &iface_stat_fmt_proc_seq_ops,
+			sizeof(struct proc_iface_stat_fmt_info));
+	if (!s)
+		return -ENOMEM;
+
+	s->fmt = (uintptr_t)PDE_DATA(inode);
+	return 0;
+}
+
+static const struct file_operations proc_iface_stat_fmt_fops = {
+	.open		= proc_iface_stat_fmt_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release_private,
+};
+
+static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
+{
+	int err;
+
+	iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
+	if (!iface_stat_procdir) {
+		pr_err("qtaguid: iface_stat: init failed to create proc entry\n");
+		err = -1;
+		goto err;
+	}
+
+	iface_stat_all_procfile = proc_create_data(iface_stat_all_procfilename,
+						   proc_iface_perms,
+						   parent_procdir,
+						   &proc_iface_stat_fmt_fops,
+						   (void *)1 /* fmt1 */);
+	if (!iface_stat_all_procfile) {
+		pr_err("qtaguid: iface_stat: init "
+		       " failed to create stat_old proc entry\n");
+		err = -1;
+		goto err_zap_entry;
+	}
+
+	iface_stat_fmt_procfile = proc_create_data(iface_stat_fmt_procfilename,
+						   proc_iface_perms,
+						   parent_procdir,
+						   &proc_iface_stat_fmt_fops,
+						   (void *)2 /* fmt2 */);
+	if (!iface_stat_fmt_procfile) {
+		pr_err("qtaguid: iface_stat: init "
+		       " failed to create stat_all proc entry\n");
+		err = -1;
+		goto err_zap_all_stats_entry;
+	}
+
+
+	err = register_netdevice_notifier(&iface_netdev_notifier_blk);
+	if (err) {
+		pr_err("qtaguid: iface_stat: init "
+		       "failed to register dev event handler\n");
+		goto err_zap_all_stats_entries;
+	}
+	err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+	if (err) {
+		pr_err("qtaguid: iface_stat: init "
+		       "failed to register ipv4 dev event handler\n");
+		goto err_unreg_nd;
+	}
+
+	err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk);
+	if (err) {
+		pr_err("qtaguid: iface_stat: init "
+		       "failed to register ipv6 dev event handler\n");
+		goto err_unreg_ip4_addr;
+	}
+	return 0;
+
+err_unreg_ip4_addr:
+	unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+err_unreg_nd:
+	unregister_netdevice_notifier(&iface_netdev_notifier_blk);
+err_zap_all_stats_entries:
+	remove_proc_entry(iface_stat_fmt_procfilename, parent_procdir);
+err_zap_all_stats_entry:
+	remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
+err_zap_entry:
+	remove_proc_entry(iface_stat_procdirname, parent_procdir);
+err:
+	return err;
+}
+
+static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
+				    struct xt_action_param *par)
+{
+	struct sock *sk;
+	unsigned int hook_mask = (1 << par->hooknum);
+
+	MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
+		 par->hooknum, par->family);
+
+	/*
+	 * Let's not abuse the the xt_socket_get*_sk(), or else it will
+	 * return garbage SKs.
+	 */
+	if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
+		return NULL;
+
+	switch (par->family) {
+	case NFPROTO_IPV6:
+		sk = xt_socket_get6_sk(skb, par);
+		break;
+	case NFPROTO_IPV4:
+		sk = xt_socket_get4_sk(skb, par);
+		break;
+	default:
+		return NULL;
+	}
+
+	if (sk) {
+		MT_DEBUG("qtaguid: %p->sk_proto=%u "
+			 "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+		/*
+		 * When in TCP_TIME_WAIT the sk is not a "struct sock" but
+		 * "struct inet_timewait_sock" which is missing fields.
+		 */
+		if (sk->sk_state  == TCP_TIME_WAIT) {
+			xt_socket_put_sk(sk);
+			sk = NULL;
+		}
+	}
+	return sk;
+}
+
+static void account_for_uid(const struct sk_buff *skb,
+			    const struct sock *alternate_sk, uid_t uid,
+			    struct xt_action_param *par)
+{
+	const struct net_device *el_dev;
+
+	if (!skb->dev) {
+		MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+		el_dev = par->in ? : par->out;
+	} else {
+		const struct net_device *other_dev;
+		el_dev = skb->dev;
+		other_dev = par->in ? : par->out;
+		if (el_dev != other_dev) {
+			MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+				"par->(in/out)=%p %s\n",
+				par->hooknum, el_dev, el_dev->name, other_dev,
+				other_dev->name);
+		}
+	}
+
+	if (unlikely(!el_dev)) {
+		pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
+	} else if (unlikely(!el_dev->name)) {
+		pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
+	} else {
+		int proto = ipx_proto(skb, par);
+		MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+			 par->hooknum, el_dev->name, el_dev->type,
+			 par->family, proto);
+
+		if_tag_stat_update(el_dev->name, uid,
+				skb->sk ? skb->sk : alternate_sk,
+				par->in ? IFS_RX : IFS_TX,
+				proto, skb->len);
+	}
+}
+
+static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_qtaguid_match_info *info = par->matchinfo;
+	const struct file *filp;
+	bool got_sock = false;
+	struct sock *sk;
+	uid_t sock_uid;
+	bool res;
+	bool set_sk_callback_lock = false;
+
+	if (unlikely(module_passive))
+		return (info->match ^ info->invert) == 0;
+
+	MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
+		 par->hooknum, skb, par->in, par->out, par->family);
+
+	atomic64_inc(&qtu_events.match_calls);
+	if (skb == NULL) {
+		res = (info->match ^ info->invert) == 0;
+		goto ret_res;
+	}
+
+	switch (par->hooknum) {
+	case NF_INET_PRE_ROUTING:
+	case NF_INET_POST_ROUTING:
+		atomic64_inc(&qtu_events.match_calls_prepost);
+		iface_stat_update_from_skb(skb, par);
+		/*
+		 * We are done in pre/post. The skb will get processed
+		 * further alter.
+		 */
+		res = (info->match ^ info->invert);
+		goto ret_res;
+		break;
+	/* default: Fall through and do UID releated work */
+	}
+
+	sk = skb->sk;
+	/*
+	 * When in TCP_TIME_WAIT the sk is not a "struct sock" but
+	 * "struct inet_timewait_sock" which is missing fields.
+	 * So we ignore it.
+	 */
+	if (sk && sk->sk_state == TCP_TIME_WAIT)
+		sk = NULL;
+	if (sk == NULL) {
+		/*
+		 * A missing sk->sk_socket happens when packets are in-flight
+		 * and the matching socket is already closed and gone.
+		 */
+		sk = qtaguid_find_sk(skb, par);
+		/*
+		 * If we got the socket from the find_sk(), we will need to put
+		 * it back, as nf_tproxy_get_sock_v4() got it.
+		 */
+		got_sock = sk;
+		if (sk)
+			atomic64_inc(&qtu_events.match_found_sk_in_ct);
+		else
+			atomic64_inc(&qtu_events.match_found_no_sk_in_ct);
+	} else {
+		atomic64_inc(&qtu_events.match_found_sk);
+	}
+	MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
+		 par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
+	if (sk != NULL) {
+		set_sk_callback_lock = true;
+		read_lock_bh(&sk->sk_callback_lock);
+		MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
+			par->hooknum, sk, sk->sk_socket,
+			sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+		filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+		MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
+			par->hooknum, filp ? filp->f_cred->fsuid : -1);
+	}
+
+	if (sk == NULL || sk->sk_socket == NULL) {
+		/*
+		 * Here, the qtaguid_find_sk() using connection tracking
+		 * couldn't find the owner, so for now we just count them
+		 * against the system.
+		 */
+		/*
+		 * TODO: unhack how to force just accounting.
+		 * For now we only do iface stats when the uid-owner is not
+		 * requested.
+		 */
+		if (!(info->match & XT_QTAGUID_UID))
+			account_for_uid(skb, sk, 0, par);
+		MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
+			par->hooknum,
+			sk ? sk->sk_socket : NULL);
+		res = (info->match ^ info->invert) == 0;
+		atomic64_inc(&qtu_events.match_no_sk);
+		goto put_sock_ret_res;
+	} else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
+		res = false;
+		goto put_sock_ret_res;
+	}
+	filp = sk->sk_socket->file;
+	if (filp == NULL) {
+		MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
+		account_for_uid(skb, sk, 0, par);
+		res = ((info->match ^ info->invert) &
+			(XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
+		atomic64_inc(&qtu_events.match_no_sk_file);
+		goto put_sock_ret_res;
+	}
+	sock_uid = filp->f_cred->fsuid;
+	/*
+	 * TODO: unhack how to force just accounting.
+	 * For now we only do iface stats when the uid-owner is not requested
+	 */
+	if (!(info->match & XT_QTAGUID_UID))
+		account_for_uid(skb, sk, sock_uid, par);
+
+	/*
+	 * The following two tests fail the match when:
+	 *    id not in range AND no inverted condition requested
+	 * or id     in range AND    inverted condition requested
+	 * Thus (!a && b) || (a && !b) == a ^ b
+	 */
+	if (info->match & XT_QTAGUID_UID)
+		if ((filp->f_cred->fsuid >= info->uid_min &&
+		     filp->f_cred->fsuid <= info->uid_max) ^
+		    !(info->invert & XT_QTAGUID_UID)) {
+			MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
+				 par->hooknum);
+			res = false;
+			goto put_sock_ret_res;
+		}
+	if (info->match & XT_QTAGUID_GID)
+		if ((filp->f_cred->fsgid >= info->gid_min &&
+				filp->f_cred->fsgid <= info->gid_max) ^
+			!(info->invert & XT_QTAGUID_GID)) {
+			MT_DEBUG("qtaguid[%d]: leaving gid not matching\n",
+				par->hooknum);
+			res = false;
+			goto put_sock_ret_res;
+		}
+
+	MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum);
+	res = true;
+
+put_sock_ret_res:
+	if (got_sock)
+		xt_socket_put_sk(sk);
+	if (set_sk_callback_lock)
+		read_unlock_bh(&sk->sk_callback_lock);
+ret_res:
+	MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
+	return res;
+}
+
+#ifdef DDEBUG
+/* This function is not in xt_qtaguid_print.c because of locks visibility */
+static void prdebug_full_state(int indent_level, const char *fmt, ...)
+{
+	va_list args;
+	char *fmt_buff;
+	char *buff;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	fmt_buff = kasprintf(GFP_ATOMIC,
+			     "qtaguid: %s(): %s {\n", __func__, fmt);
+	BUG_ON(!fmt_buff);
+	va_start(args, fmt);
+	buff = kvasprintf(GFP_ATOMIC,
+			  fmt_buff, args);
+	BUG_ON(!buff);
+	pr_debug("%s", buff);
+	kfree(fmt_buff);
+	kfree(buff);
+	va_end(args);
+
+	spin_lock_bh(&sock_tag_list_lock);
+	prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
+	spin_unlock_bh(&sock_tag_list_lock);
+
+	spin_lock_bh(&sock_tag_list_lock);
+	spin_lock_bh(&uid_tag_data_tree_lock);
+	prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
+	prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	spin_unlock_bh(&sock_tag_list_lock);
+
+	spin_lock_bh(&iface_stat_list_lock);
+	prdebug_iface_stat_list(indent_level, &iface_stat_list);
+	spin_unlock_bh(&iface_stat_list_lock);
+
+	pr_debug("qtaguid: %s(): }\n", __func__);
+}
+#else
+static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+#endif
+
+struct proc_ctrl_print_info {
+	struct sock *sk; /* socket found by reading to sk_pos */
+	loff_t sk_pos;
+};
+
+static void *qtaguid_ctrl_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct proc_ctrl_print_info *pcpi = m->private;
+	struct sock_tag *sock_tag_entry = v;
+	struct rb_node *node;
+
+	(*pos)++;
+
+	if (!v || v  == SEQ_START_TOKEN)
+		return NULL;
+
+	node = rb_next(&sock_tag_entry->sock_node);
+	if (!node) {
+		pcpi->sk = NULL;
+		sock_tag_entry = SEQ_START_TOKEN;
+	} else {
+		sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+		pcpi->sk = sock_tag_entry->sk;
+	}
+	pcpi->sk_pos = *pos;
+	return sock_tag_entry;
+}
+
+static void *qtaguid_ctrl_proc_start(struct seq_file *m, loff_t *pos)
+{
+	struct proc_ctrl_print_info *pcpi = m->private;
+	struct sock_tag *sock_tag_entry;
+	struct rb_node *node;
+
+	spin_lock_bh(&sock_tag_list_lock);
+
+	if (unlikely(module_passive))
+		return NULL;
+
+	if (*pos == 0) {
+		pcpi->sk_pos = 0;
+		node = rb_first(&sock_tag_tree);
+		if (!node) {
+			pcpi->sk = NULL;
+			return SEQ_START_TOKEN;
+		}
+		sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+		pcpi->sk = sock_tag_entry->sk;
+	} else {
+		sock_tag_entry = (pcpi->sk ? get_sock_stat_nl(pcpi->sk) :
+						NULL) ?: SEQ_START_TOKEN;
+		if (*pos != pcpi->sk_pos) {
+			/* seq_read skipped a next call */
+			*pos = pcpi->sk_pos;
+			return qtaguid_ctrl_proc_next(m, sock_tag_entry, pos);
+		}
+	}
+	return sock_tag_entry;
+}
+
+static void qtaguid_ctrl_proc_stop(struct seq_file *m, void *v)
+{
+	spin_unlock_bh(&sock_tag_list_lock);
+}
+
+/*
+ * Procfs reader to get all active socket tags using style "1)" as described in
+ * fs/proc/generic.c
+ */
+static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v)
+{
+	struct sock_tag *sock_tag_entry = v;
+	uid_t uid;
+	long f_count;
+
+	CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u\n",
+		 current->pid, current->tgid, current_fsuid());
+
+	if (sock_tag_entry != SEQ_START_TOKEN) {
+		uid = get_uid_from_tag(sock_tag_entry->tag);
+		CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
+			 "pid=%u\n",
+			 sock_tag_entry->sk,
+			 sock_tag_entry->tag,
+			 uid,
+			 sock_tag_entry->pid
+			);
+		f_count = atomic_long_read(
+			&sock_tag_entry->socket->file->f_count);
+		seq_printf(m, "sock=%p tag=0x%llx (uid=%u) pid=%u "
+			   "f_count=%lu\n",
+			   sock_tag_entry->sk,
+			   sock_tag_entry->tag, uid,
+			   sock_tag_entry->pid, f_count);
+	} else {
+		seq_printf(m, "events: sockets_tagged=%llu "
+			   "sockets_untagged=%llu "
+			   "counter_set_changes=%llu "
+			   "delete_cmds=%llu "
+			   "iface_events=%llu "
+			   "match_calls=%llu "
+			   "match_calls_prepost=%llu "
+			   "match_found_sk=%llu "
+			   "match_found_sk_in_ct=%llu "
+			   "match_found_no_sk_in_ct=%llu "
+			   "match_no_sk=%llu "
+			   "match_no_sk_file=%llu\n",
+			   (u64)atomic64_read(&qtu_events.sockets_tagged),
+			   (u64)atomic64_read(&qtu_events.sockets_untagged),
+			   (u64)atomic64_read(&qtu_events.counter_set_changes),
+			   (u64)atomic64_read(&qtu_events.delete_cmds),
+			   (u64)atomic64_read(&qtu_events.iface_events),
+			   (u64)atomic64_read(&qtu_events.match_calls),
+			   (u64)atomic64_read(&qtu_events.match_calls_prepost),
+			   (u64)atomic64_read(&qtu_events.match_found_sk),
+			   (u64)atomic64_read(&qtu_events.match_found_sk_in_ct),
+			   (u64)atomic64_read(&qtu_events.match_found_no_sk_in_ct),
+			   (u64)atomic64_read(&qtu_events.match_no_sk),
+			   (u64)atomic64_read(&qtu_events.match_no_sk_file));
+
+		/* Count the following as part of the last item_index */
+		prdebug_full_state(0, "proc ctrl");
+	}
+
+	return 0;
+}
+
+/*
+ * Delete socket tags, and stat tags associated with a given
+ * accouting tag and uid.
+ */
+static int ctrl_cmd_delete(const char *input)
+{
+	char cmd;
+	uid_t uid;
+	uid_t entry_uid;
+	tag_t acct_tag;
+	tag_t tag;
+	int res, argc;
+	struct iface_stat *iface_entry;
+	struct rb_node *node;
+	struct sock_tag *st_entry;
+	struct rb_root st_to_free_tree = RB_ROOT;
+	struct tag_stat *ts_entry;
+	struct tag_counter_set *tcs_entry;
+	struct tag_ref *tr_entry;
+	struct uid_tag_data *utd_entry;
+
+	argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid);
+	CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c "
+		 "user_tag=0x%llx uid=%u\n", input, argc, cmd,
+		 acct_tag, uid);
+	if (argc < 2) {
+		res = -EINVAL;
+		goto err;
+	}
+	if (!valid_atag(acct_tag)) {
+		pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input);
+		res = -EINVAL;
+		goto err;
+	}
+	if (argc < 3) {
+		uid = current_fsuid();
+	} else if (!can_impersonate_uid(uid)) {
+		pr_info("qtaguid: ctrl_delete(%s): "
+			"insufficient priv from pid=%u tgid=%u uid=%u\n",
+			input, current->pid, current->tgid, current_fsuid());
+		res = -EPERM;
+		goto err;
+	}
+
+	tag = combine_atag_with_uid(acct_tag, uid);
+	CT_DEBUG("qtaguid: ctrl_delete(%s): "
+		 "looking for tag=0x%llx (uid=%u)\n",
+		 input, tag, uid);
+
+	/* Delete socket tags */
+	spin_lock_bh(&sock_tag_list_lock);
+	node = rb_first(&sock_tag_tree);
+	while (node) {
+		st_entry = rb_entry(node, struct sock_tag, sock_node);
+		entry_uid = get_uid_from_tag(st_entry->tag);
+		node = rb_next(node);
+		if (entry_uid != uid)
+			continue;
+
+		CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n",
+			 input, st_entry->tag, entry_uid);
+
+		if (!acct_tag || st_entry->tag == tag) {
+			rb_erase(&st_entry->sock_node, &sock_tag_tree);
+			/* Can't sockfd_put() within spinlock, do it later. */
+			sock_tag_tree_insert(st_entry, &st_to_free_tree);
+			tr_entry = lookup_tag_ref(st_entry->tag, NULL);
+			BUG_ON(tr_entry->num_sock_tags <= 0);
+			tr_entry->num_sock_tags--;
+			/*
+			 * TODO: remove if, and start failing.
+			 * This is a hack to work around the fact that in some
+			 * places we have "if (IS_ERR_OR_NULL(pqd_entry))"
+			 * and are trying to work around apps
+			 * that didn't open the /dev/xt_qtaguid.
+			 */
+			if (st_entry->list.next && st_entry->list.prev)
+				list_del(&st_entry->list);
+		}
+	}
+	spin_unlock_bh(&sock_tag_list_lock);
+
+	sock_tag_tree_erase(&st_to_free_tree);
+
+	/* Delete tag counter-sets */
+	spin_lock_bh(&tag_counter_set_list_lock);
+	/* Counter sets are only on the uid tag, not full tag */
+	tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+	if (tcs_entry) {
+		CT_DEBUG("qtaguid: ctrl_delete(%s): "
+			 "erase tcs: tag=0x%llx (uid=%u) set=%d\n",
+			 input,
+			 tcs_entry->tn.tag,
+			 get_uid_from_tag(tcs_entry->tn.tag),
+			 tcs_entry->active_set);
+		rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree);
+		kfree(tcs_entry);
+	}
+	spin_unlock_bh(&tag_counter_set_list_lock);
+
+	/*
+	 * If acct_tag is 0, then all entries belonging to uid are
+	 * erased.
+	 */
+	spin_lock_bh(&iface_stat_list_lock);
+	list_for_each_entry(iface_entry, &iface_stat_list, list) {
+		spin_lock_bh(&iface_entry->tag_stat_list_lock);
+		node = rb_first(&iface_entry->tag_stat_tree);
+		while (node) {
+			ts_entry = rb_entry(node, struct tag_stat, tn.node);
+			entry_uid = get_uid_from_tag(ts_entry->tn.tag);
+			node = rb_next(node);
+
+			CT_DEBUG("qtaguid: ctrl_delete(%s): "
+				 "ts tag=0x%llx (uid=%u)\n",
+				 input, ts_entry->tn.tag, entry_uid);
+
+			if (entry_uid != uid)
+				continue;
+			if (!acct_tag || ts_entry->tn.tag == tag) {
+				CT_DEBUG("qtaguid: ctrl_delete(%s): "
+					 "erase ts: %s 0x%llx %u\n",
+					 input, iface_entry->ifname,
+					 get_atag_from_tag(ts_entry->tn.tag),
+					 entry_uid);
+				rb_erase(&ts_entry->tn.node,
+					 &iface_entry->tag_stat_tree);
+				kfree(ts_entry);
+			}
+		}
+		spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+	}
+	spin_unlock_bh(&iface_stat_list_lock);
+
+	/* Cleanup the uid_tag_data */
+	spin_lock_bh(&uid_tag_data_tree_lock);
+	node = rb_first(&uid_tag_data_tree);
+	while (node) {
+		utd_entry = rb_entry(node, struct uid_tag_data, node);
+		entry_uid = utd_entry->uid;
+		node = rb_next(node);
+
+		CT_DEBUG("qtaguid: ctrl_delete(%s): "
+			 "utd uid=%u\n",
+			 input, entry_uid);
+
+		if (entry_uid != uid)
+			continue;
+		/*
+		 * Go over the tag_refs, and those that don't have
+		 * sock_tags using them are freed.
+		 */
+		put_tag_ref_tree(tag, utd_entry);
+		put_utd_entry(utd_entry);
+	}
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+
+	atomic64_inc(&qtu_events.delete_cmds);
+	res = 0;
+
+err:
+	return res;
+}
+
+static int ctrl_cmd_counter_set(const char *input)
+{
+	char cmd;
+	uid_t uid = 0;
+	tag_t tag;
+	int res, argc;
+	struct tag_counter_set *tcs;
+	int counter_set;
+
+	argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid);
+	CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c "
+		 "set=%d uid=%u\n", input, argc, cmd,
+		 counter_set, uid);
+	if (argc != 3) {
+		res = -EINVAL;
+		goto err;
+	}
+	if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) {
+		pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n",
+			input);
+		res = -EINVAL;
+		goto err;
+	}
+	if (!can_manipulate_uids()) {
+		pr_info("qtaguid: ctrl_counterset(%s): "
+			"insufficient priv from pid=%u tgid=%u uid=%u\n",
+			input, current->pid, current->tgid, current_fsuid());
+		res = -EPERM;
+		goto err;
+	}
+
+	tag = make_tag_from_uid(uid);
+	spin_lock_bh(&tag_counter_set_list_lock);
+	tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+	if (!tcs) {
+		tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC);
+		if (!tcs) {
+			spin_unlock_bh(&tag_counter_set_list_lock);
+			pr_err("qtaguid: ctrl_counterset(%s): "
+			       "failed to alloc counter set\n",
+			       input);
+			res = -ENOMEM;
+			goto err;
+		}
+		tcs->tn.tag = tag;
+		tag_counter_set_tree_insert(tcs, &tag_counter_set_tree);
+		CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx "
+			 "(uid=%u) set=%d\n",
+			 input, tag, get_uid_from_tag(tag), counter_set);
+	}
+	tcs->active_set = counter_set;
+	spin_unlock_bh(&tag_counter_set_list_lock);
+	atomic64_inc(&qtu_events.counter_set_changes);
+	res = 0;
+
+err:
+	return res;
+}
+
+static int ctrl_cmd_tag(const char *input)
+{
+	char cmd;
+	int sock_fd = 0;
+	uid_t uid = 0;
+	tag_t acct_tag = make_atag_from_value(0);
+	tag_t full_tag;
+	struct socket *el_socket;
+	int res, argc;
+	struct sock_tag *sock_tag_entry;
+	struct tag_ref *tag_ref_entry;
+	struct uid_tag_data *uid_tag_data_entry;
+	struct proc_qtu_data *pqd_entry;
+
+	/* Unassigned args will get defaulted later. */
+	argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid);
+	CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d "
+		 "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
+		 acct_tag, uid);
+	if (argc < 2) {
+		res = -EINVAL;
+		goto err;
+	}
+	el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
+	if (!el_socket) {
+		pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
+			" sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+			input, sock_fd, res, current->pid, current->tgid,
+			current_fsuid());
+		goto err;
+	}
+	CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
+		 input, atomic_long_read(&el_socket->file->f_count),
+		 el_socket->sk);
+	if (argc < 3) {
+		acct_tag = make_atag_from_value(0);
+	} else if (!valid_atag(acct_tag)) {
+		pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input);
+		res = -EINVAL;
+		goto err_put;
+	}
+	CT_DEBUG("qtaguid: ctrl_tag(%s): "
+		 "pid=%u tgid=%u uid=%u euid=%u fsuid=%u "
+		 "ctrl.gid=%u in_group()=%d in_egroup()=%d\n",
+		 input, current->pid, current->tgid, current_uid(),
+		 current_euid(), current_fsuid(),
+		 xt_qtaguid_ctrl_file->gid,
+		 in_group_p(xt_qtaguid_ctrl_file->gid),
+		 in_egroup_p(xt_qtaguid_ctrl_file->gid));
+	if (argc < 4) {
+		uid = current_fsuid();
+	} else if (!can_impersonate_uid(uid)) {
+		pr_info("qtaguid: ctrl_tag(%s): "
+			"insufficient priv from pid=%u tgid=%u uid=%u\n",
+			input, current->pid, current->tgid, current_fsuid());
+		res = -EPERM;
+		goto err_put;
+	}
+	full_tag = combine_atag_with_uid(acct_tag, uid);
+
+	spin_lock_bh(&sock_tag_list_lock);
+	sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+	tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry);
+	if (IS_ERR(tag_ref_entry)) {
+		res = PTR_ERR(tag_ref_entry);
+		spin_unlock_bh(&sock_tag_list_lock);
+		goto err_put;
+	}
+	tag_ref_entry->num_sock_tags++;
+	if (sock_tag_entry) {
+		struct tag_ref *prev_tag_ref_entry;
+
+		CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
+			 "st@%p ...->f_count=%ld\n",
+			 input, el_socket->sk, sock_tag_entry,
+			 atomic_long_read(&el_socket->file->f_count));
+		/*
+		 * This is a re-tagging, so release the sock_fd that was
+		 * locked at the time of the 1st tagging.
+		 * There is still the ref from this call's sockfd_lookup() so
+		 * it can be done within the spinlock.
+		 */
+		sockfd_put(sock_tag_entry->socket);
+		prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
+						    &uid_tag_data_entry);
+		BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
+		BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0);
+		prev_tag_ref_entry->num_sock_tags--;
+		sock_tag_entry->tag = full_tag;
+	} else {
+		CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n",
+			 input, el_socket->sk);
+		sock_tag_entry = kzalloc(sizeof(*sock_tag_entry),
+					 GFP_ATOMIC);
+		if (!sock_tag_entry) {
+			pr_err("qtaguid: ctrl_tag(%s): "
+			       "socket tag alloc failed\n",
+			       input);
+			spin_unlock_bh(&sock_tag_list_lock);
+			res = -ENOMEM;
+			goto err_tag_unref_put;
+		}
+		sock_tag_entry->sk = el_socket->sk;
+		sock_tag_entry->socket = el_socket;
+		sock_tag_entry->pid = current->tgid;
+		sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
+							    uid);
+		spin_lock_bh(&uid_tag_data_tree_lock);
+		pqd_entry = proc_qtu_data_tree_search(
+			&proc_qtu_data_tree, current->tgid);
+		/*
+		 * TODO: remove if, and start failing.
+		 * At first, we want to catch user-space code that is not
+		 * opening the /dev/xt_qtaguid.
+		 */
+		if (IS_ERR_OR_NULL(pqd_entry))
+			pr_warn_once(
+				"qtaguid: %s(): "
+				"User space forgot to open /dev/xt_qtaguid? "
+				"pid=%u tgid=%u uid=%u\n", __func__,
+				current->pid, current->tgid,
+				current_fsuid());
+		else
+			list_add(&sock_tag_entry->list,
+				 &pqd_entry->sock_tag_list);
+		spin_unlock_bh(&uid_tag_data_tree_lock);
+
+		sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
+		atomic64_inc(&qtu_events.sockets_tagged);
+	}
+	spin_unlock_bh(&sock_tag_list_lock);
+	/* We keep the ref to the socket (file) until it is untagged */
+	CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+		 input, sock_tag_entry,
+		 atomic_long_read(&el_socket->file->f_count));
+	return 0;
+
+err_tag_unref_put:
+	BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+	tag_ref_entry->num_sock_tags--;
+	free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
+err_put:
+	CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
+		 input, atomic_long_read(&el_socket->file->f_count) - 1);
+	/* Release the sock_fd that was grabbed by sockfd_lookup(). */
+	sockfd_put(el_socket);
+	return res;
+
+err:
+	CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input);
+	return res;
+}
+
+static int ctrl_cmd_untag(const char *input)
+{
+	char cmd;
+	int sock_fd = 0;
+	struct socket *el_socket;
+	int res, argc;
+	struct sock_tag *sock_tag_entry;
+	struct tag_ref *tag_ref_entry;
+	struct uid_tag_data *utd_entry;
+	struct proc_qtu_data *pqd_entry;
+
+	argc = sscanf(input, "%c %d", &cmd, &sock_fd);
+	CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
+		 input, argc, cmd, sock_fd);
+	if (argc < 2) {
+		res = -EINVAL;
+		goto err;
+	}
+	el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
+	if (!el_socket) {
+		pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
+			" sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+			input, sock_fd, res, current->pid, current->tgid,
+			current_fsuid());
+		goto err;
+	}
+	CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
+		 input, atomic_long_read(&el_socket->file->f_count),
+		 el_socket->sk);
+	spin_lock_bh(&sock_tag_list_lock);
+	sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+	if (!sock_tag_entry) {
+		spin_unlock_bh(&sock_tag_list_lock);
+		res = -EINVAL;
+		goto err_put;
+	}
+	/*
+	 * The socket already belongs to the current process
+	 * so it can do whatever it wants to it.
+	 */
+	rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree);
+
+	tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry);
+	BUG_ON(!tag_ref_entry);
+	BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+	spin_lock_bh(&uid_tag_data_tree_lock);
+	pqd_entry = proc_qtu_data_tree_search(
+		&proc_qtu_data_tree, current->tgid);
+	/*
+	 * TODO: remove if, and start failing.
+	 * At first, we want to catch user-space code that is not
+	 * opening the /dev/xt_qtaguid.
+	 */
+	if (IS_ERR_OR_NULL(pqd_entry))
+		pr_warn_once("qtaguid: %s(): "
+			     "User space forgot to open /dev/xt_qtaguid? "
+			     "pid=%u tgid=%u uid=%u\n", __func__,
+			     current->pid, current->tgid, current_fsuid());
+	else
+		list_del(&sock_tag_entry->list);
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	/*
+	 * We don't free tag_ref from the utd_entry here,
+	 * only during a cmd_delete().
+	 */
+	tag_ref_entry->num_sock_tags--;
+	spin_unlock_bh(&sock_tag_list_lock);
+	/*
+	 * Release the sock_fd that was grabbed at tag time,
+	 * and once more for the sockfd_lookup() here.
+	 */
+	sockfd_put(sock_tag_entry->socket);
+	CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
+		 input, sock_tag_entry,
+		 atomic_long_read(&el_socket->file->f_count) - 1);
+	sockfd_put(el_socket);
+
+	kfree(sock_tag_entry);
+	atomic64_inc(&qtu_events.sockets_untagged);
+
+	return 0;
+
+err_put:
+	CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
+		 input, atomic_long_read(&el_socket->file->f_count) - 1);
+	/* Release the sock_fd that was grabbed by sockfd_lookup(). */
+	sockfd_put(el_socket);
+	return res;
+
+err:
+	CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
+	return res;
+}
+
+static ssize_t qtaguid_ctrl_parse(const char *input, size_t count)
+{
+	char cmd;
+	ssize_t res;
+
+	CT_DEBUG("qtaguid: ctrl(%s): pid=%u tgid=%u uid=%u\n",
+		 input, current->pid, current->tgid, current_fsuid());
+
+	cmd = input[0];
+	/* Collect params for commands */
+	switch (cmd) {
+	case 'd':
+		res = ctrl_cmd_delete(input);
+		break;
+
+	case 's':
+		res = ctrl_cmd_counter_set(input);
+		break;
+
+	case 't':
+		res = ctrl_cmd_tag(input);
+		break;
+
+	case 'u':
+		res = ctrl_cmd_untag(input);
+		break;
+
+	default:
+		res = -EINVAL;
+		goto err;
+	}
+	if (!res)
+		res = count;
+err:
+	CT_DEBUG("qtaguid: ctrl(%s): res=%zd\n", input, res);
+	return res;
+}
+
+#define MAX_QTAGUID_CTRL_INPUT_LEN 255
+static ssize_t qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
+				   size_t count, loff_t *offp)
+{
+	char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
+
+	if (unlikely(module_passive))
+		return count;
+
+	if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
+		return -EINVAL;
+
+	if (copy_from_user(input_buf, buffer, count))
+		return -EFAULT;
+
+	input_buf[count] = '\0';
+	return qtaguid_ctrl_parse(input_buf, count);
+}
+
+struct proc_print_info {
+	struct iface_stat *iface_entry;
+	int item_index;
+	tag_t tag; /* tag found by reading to tag_pos */
+	off_t tag_pos;
+	int tag_item_index;
+};
+
+static void pp_stats_header(struct seq_file *m)
+{
+	seq_puts(m,
+		 "idx iface acct_tag_hex uid_tag_int cnt_set "
+		 "rx_bytes rx_packets "
+		 "tx_bytes tx_packets "
+		 "rx_tcp_bytes rx_tcp_packets "
+		 "rx_udp_bytes rx_udp_packets "
+		 "rx_other_bytes rx_other_packets "
+		 "tx_tcp_bytes tx_tcp_packets "
+		 "tx_udp_bytes tx_udp_packets "
+		 "tx_other_bytes tx_other_packets\n");
+}
+
+static int pp_stats_line(struct seq_file *m, struct tag_stat *ts_entry,
+			 int cnt_set)
+{
+	int ret;
+	struct data_counters *cnts;
+	tag_t tag = ts_entry->tn.tag;
+	uid_t stat_uid = get_uid_from_tag(tag);
+	struct proc_print_info *ppi = m->private;
+	/* Detailed tags are not available to everybody */
+	if (get_atag_from_tag(tag) && !can_read_other_uid_stats(stat_uid)) {
+		CT_DEBUG("qtaguid: stats line: "
+			 "%s 0x%llx %u: insufficient priv "
+			 "from pid=%u tgid=%u uid=%u stats.gid=%u\n",
+			 ppi->iface_entry->ifname,
+			 get_atag_from_tag(tag), stat_uid,
+			 current->pid, current->tgid, current_fsuid(),
+			 xt_qtaguid_stats_file->gid);
+		return 0;
+	}
+	ppi->item_index++;
+	cnts = &ts_entry->counters;
+	ret = seq_printf(m, "%d %s 0x%llx %u %u "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu\n",
+		ppi->item_index,
+		ppi->iface_entry->ifname,
+		get_atag_from_tag(tag),
+		stat_uid,
+		cnt_set,
+		dc_sum_bytes(cnts, cnt_set, IFS_RX),
+		dc_sum_packets(cnts, cnt_set, IFS_RX),
+		dc_sum_bytes(cnts, cnt_set, IFS_TX),
+		dc_sum_packets(cnts, cnt_set, IFS_TX),
+		cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+		cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+		cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+		cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+		cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+		cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+		cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+		cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+		cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+		cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+		cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+		cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+	return ret ?: 1;
+}
+
+static bool pp_sets(struct seq_file *m, struct tag_stat *ts_entry)
+{
+	int ret;
+	int counter_set;
+	for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS;
+	     counter_set++) {
+		ret = pp_stats_line(m, ts_entry, counter_set);
+		if (ret < 0)
+			return false;
+	}
+	return true;
+}
+
+static int qtaguid_stats_proc_iface_stat_ptr_valid(struct iface_stat *ptr)
+{
+	struct iface_stat *iface_entry;
+
+	if (!ptr)
+		return false;
+
+	list_for_each_entry(iface_entry, &iface_stat_list, list)
+		if (iface_entry == ptr)
+			return true;
+	return false;
+}
+
+static void qtaguid_stats_proc_next_iface_entry(struct proc_print_info *ppi)
+{
+	spin_unlock_bh(&ppi->iface_entry->tag_stat_list_lock);
+	list_for_each_entry_continue(ppi->iface_entry, &iface_stat_list, list) {
+		spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+		return;
+	}
+	ppi->iface_entry = NULL;
+}
+
+static void *qtaguid_stats_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct proc_print_info *ppi = m->private;
+	struct tag_stat *ts_entry;
+	struct rb_node *node;
+
+	if (!v) {
+		pr_err("qtaguid: %s(): unexpected v: NULL\n", __func__);
+		return NULL;
+	}
+
+	(*pos)++;
+
+	if (!ppi->iface_entry || unlikely(module_passive))
+		return NULL;
+
+	if (v == SEQ_START_TOKEN)
+		node = rb_first(&ppi->iface_entry->tag_stat_tree);
+	else
+		node = rb_next(&((struct tag_stat *)v)->tn.node);
+
+	while (!node) {
+		qtaguid_stats_proc_next_iface_entry(ppi);
+		if (!ppi->iface_entry)
+			return NULL;
+		node = rb_first(&ppi->iface_entry->tag_stat_tree);
+	}
+
+	ts_entry = rb_entry(node, struct tag_stat, tn.node);
+	ppi->tag = ts_entry->tn.tag;
+	ppi->tag_pos = *pos;
+	ppi->tag_item_index = ppi->item_index;
+	return ts_entry;
+}
+
+static void *qtaguid_stats_proc_start(struct seq_file *m, loff_t *pos)
+{
+	struct proc_print_info *ppi = m->private;
+	struct tag_stat *ts_entry = NULL;
+
+	spin_lock_bh(&iface_stat_list_lock);
+
+	if (*pos == 0) {
+		ppi->item_index = 1;
+		ppi->tag_pos = 0;
+		if (list_empty(&iface_stat_list)) {
+			ppi->iface_entry = NULL;
+		} else {
+			ppi->iface_entry = list_first_entry(&iface_stat_list,
+							    struct iface_stat,
+							    list);
+			spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+		}
+		return SEQ_START_TOKEN;
+	}
+	if (!qtaguid_stats_proc_iface_stat_ptr_valid(ppi->iface_entry)) {
+		if (ppi->iface_entry) {
+			pr_err("qtaguid: %s(): iface_entry %p not found\n",
+			       __func__, ppi->iface_entry);
+			ppi->iface_entry = NULL;
+		}
+		return NULL;
+	}
+
+	spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+
+	if (!ppi->tag_pos) {
+		/* seq_read skipped first next call */
+		ts_entry = SEQ_START_TOKEN;
+	} else {
+		ts_entry = tag_stat_tree_search(
+				&ppi->iface_entry->tag_stat_tree, ppi->tag);
+		if (!ts_entry) {
+			pr_info("qtaguid: %s(): tag_stat.tag 0x%llx not found. Abort.\n",
+				__func__, ppi->tag);
+			return NULL;
+		}
+	}
+
+	if (*pos == ppi->tag_pos) { /* normal resume */
+		ppi->item_index = ppi->tag_item_index;
+	} else {
+		/* seq_read skipped a next call */
+		*pos = ppi->tag_pos;
+		ts_entry = qtaguid_stats_proc_next(m, ts_entry, pos);
+	}
+
+	return ts_entry;
+}
+
+static void qtaguid_stats_proc_stop(struct seq_file *m, void *v)
+{
+	struct proc_print_info *ppi = m->private;
+	if (ppi->iface_entry)
+		spin_unlock_bh(&ppi->iface_entry->tag_stat_list_lock);
+	spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Procfs reader to get all tag stats using style "1)" as described in
+ * fs/proc/generic.c
+ * Groups all protocols tx/rx bytes.
+ */
+static int qtaguid_stats_proc_show(struct seq_file *m, void *v)
+{
+	struct tag_stat *ts_entry = v;
+
+	if (v == SEQ_START_TOKEN)
+		pp_stats_header(m);
+	else
+		pp_sets(m, ts_entry);
+
+	return 0;
+}
+
+/*------------------------------------------*/
+static int qtudev_open(struct inode *inode, struct file *file)
+{
+	struct uid_tag_data *utd_entry;
+	struct proc_qtu_data  *pqd_entry;
+	struct proc_qtu_data  *new_pqd_entry;
+	int res;
+	bool utd_entry_found;
+
+	if (unlikely(qtu_proc_handling_passive))
+		return 0;
+
+	DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n",
+		 current->pid, current->tgid, current_fsuid());
+
+	spin_lock_bh(&uid_tag_data_tree_lock);
+
+	/* Look for existing uid data, or alloc one. */
+	utd_entry = get_uid_data(current_fsuid(), &utd_entry_found);
+	if (IS_ERR_OR_NULL(utd_entry)) {
+		res = PTR_ERR(utd_entry);
+		goto err_unlock;
+	}
+
+	/* Look for existing PID based proc_data */
+	pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree,
+					      current->tgid);
+	if (pqd_entry) {
+		pr_err("qtaguid: qtudev_open(): %u/%u %u "
+		       "%s already opened\n",
+		       current->pid, current->tgid, current_fsuid(),
+		       QTU_DEV_NAME);
+		res = -EBUSY;
+		goto err_unlock_free_utd;
+	}
+
+	new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC);
+	if (!new_pqd_entry) {
+		pr_err("qtaguid: qtudev_open(): %u/%u %u: "
+		       "proc data alloc failed\n",
+		       current->pid, current->tgid, current_fsuid());
+		res = -ENOMEM;
+		goto err_unlock_free_utd;
+	}
+	new_pqd_entry->pid = current->tgid;
+	INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list);
+	new_pqd_entry->parent_tag_data = utd_entry;
+	utd_entry->num_pqd++;
+
+	proc_qtu_data_tree_insert(new_pqd_entry,
+				  &proc_qtu_data_tree);
+
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n",
+		 current_fsuid(), new_pqd_entry);
+	file->private_data = new_pqd_entry;
+	return 0;
+
+err_unlock_free_utd:
+	if (!utd_entry_found) {
+		rb_erase(&utd_entry->node, &uid_tag_data_tree);
+		kfree(utd_entry);
+	}
+err_unlock:
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	return res;
+}
+
+static int qtudev_release(struct inode *inode, struct file *file)
+{
+	struct proc_qtu_data  *pqd_entry = file->private_data;
+	struct uid_tag_data  *utd_entry = pqd_entry->parent_tag_data;
+	struct sock_tag *st_entry;
+	struct rb_root st_to_free_tree = RB_ROOT;
+	struct list_head *entry, *next;
+	struct tag_ref *tr;
+
+	if (unlikely(qtu_proc_handling_passive))
+		return 0;
+
+	/*
+	 * Do not trust the current->pid, it might just be a kworker cleaning
+	 * up after a dead proc.
+	 */
+	DR_DEBUG("qtaguid: qtudev_release(): "
+		 "pid=%u tgid=%u uid=%u "
+		 "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n",
+		 current->pid, current->tgid, pqd_entry->parent_tag_data->uid,
+		 pqd_entry, pqd_entry->pid, utd_entry,
+		 utd_entry->num_active_tags);
+
+	spin_lock_bh(&sock_tag_list_lock);
+	spin_lock_bh(&uid_tag_data_tree_lock);
+
+	list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) {
+		st_entry = list_entry(entry, struct sock_tag, list);
+		DR_DEBUG("qtaguid: %s(): "
+			 "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n",
+			 __func__,
+			 st_entry, st_entry->sk,
+			 current->pid, current->tgid,
+			 pqd_entry->parent_tag_data->uid);
+
+		utd_entry = uid_tag_data_tree_search(
+			&uid_tag_data_tree,
+			get_uid_from_tag(st_entry->tag));
+		BUG_ON(IS_ERR_OR_NULL(utd_entry));
+		DR_DEBUG("qtaguid: %s(): "
+			 "looking for tag=0x%llx in utd_entry=%p\n", __func__,
+			 st_entry->tag, utd_entry);
+		tr = tag_ref_tree_search(&utd_entry->tag_ref_tree,
+					 st_entry->tag);
+		BUG_ON(!tr);
+		BUG_ON(tr->num_sock_tags <= 0);
+		tr->num_sock_tags--;
+		free_tag_ref_from_utd_entry(tr, utd_entry);
+
+		rb_erase(&st_entry->sock_node, &sock_tag_tree);
+		list_del(&st_entry->list);
+		/* Can't sockfd_put() within spinlock, do it later. */
+		sock_tag_tree_insert(st_entry, &st_to_free_tree);
+
+		/*
+		 * Try to free the utd_entry if no other proc_qtu_data is
+		 * using it (num_pqd is 0) and it doesn't have active tags
+		 * (num_active_tags is 0).
+		 */
+		put_utd_entry(utd_entry);
+	}
+
+	rb_erase(&pqd_entry->node, &proc_qtu_data_tree);
+	BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1);
+	pqd_entry->parent_tag_data->num_pqd--;
+	put_utd_entry(pqd_entry->parent_tag_data);
+	kfree(pqd_entry);
+	file->private_data = NULL;
+
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	spin_unlock_bh(&sock_tag_list_lock);
+
+
+	sock_tag_tree_erase(&st_to_free_tree);
+
+	prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+			   current->pid, current->tgid);
+	return 0;
+}
+
+/*------------------------------------------*/
+static const struct file_operations qtudev_fops = {
+	.owner = THIS_MODULE,
+	.open = qtudev_open,
+	.release = qtudev_release,
+};
+
+static struct miscdevice qtu_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = QTU_DEV_NAME,
+	.fops = &qtudev_fops,
+	/* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */
+};
+
+static const struct seq_operations proc_qtaguid_ctrl_seqops = {
+	.start = qtaguid_ctrl_proc_start,
+	.next = qtaguid_ctrl_proc_next,
+	.stop = qtaguid_ctrl_proc_stop,
+	.show = qtaguid_ctrl_proc_show,
+};
+
+static int proc_qtaguid_ctrl_open(struct inode *inode, struct file *file)
+{
+	return seq_open_private(file, &proc_qtaguid_ctrl_seqops,
+				sizeof(struct proc_ctrl_print_info));
+}
+
+static const struct file_operations proc_qtaguid_ctrl_fops = {
+	.open		= proc_qtaguid_ctrl_open,
+	.read		= seq_read,
+	.write		= qtaguid_ctrl_proc_write,
+	.llseek		= seq_lseek,
+	.release	= seq_release_private,
+};
+
+static const struct seq_operations proc_qtaguid_stats_seqops = {
+	.start = qtaguid_stats_proc_start,
+	.next = qtaguid_stats_proc_next,
+	.stop = qtaguid_stats_proc_stop,
+	.show = qtaguid_stats_proc_show,
+};
+
+static int proc_qtaguid_stats_open(struct inode *inode, struct file *file)
+{
+	return seq_open_private(file, &proc_qtaguid_stats_seqops,
+				sizeof(struct proc_print_info));
+}
+
+static const struct file_operations proc_qtaguid_stats_fops = {
+	.open		= proc_qtaguid_stats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release_private,
+};
+
+/*------------------------------------------*/
+static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
+{
+	int ret;
+	*res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
+	if (!*res_procdir) {
+		pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n");
+		ret = -ENOMEM;
+		goto no_dir;
+	}
+
+	xt_qtaguid_ctrl_file = proc_create_data("ctrl", proc_ctrl_perms,
+						*res_procdir,
+						&proc_qtaguid_ctrl_fops,
+						NULL);
+	if (!xt_qtaguid_ctrl_file) {
+		pr_err("qtaguid: failed to create xt_qtaguid/ctrl "
+			" file\n");
+		ret = -ENOMEM;
+		goto no_ctrl_entry;
+	}
+
+	xt_qtaguid_stats_file = proc_create_data("stats", proc_stats_perms,
+						 *res_procdir,
+						 &proc_qtaguid_stats_fops,
+						 NULL);
+	if (!xt_qtaguid_stats_file) {
+		pr_err("qtaguid: failed to create xt_qtaguid/stats "
+			"file\n");
+		ret = -ENOMEM;
+		goto no_stats_entry;
+	}
+	/*
+	 * TODO: add support counter hacking
+	 * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
+	 */
+	return 0;
+
+no_stats_entry:
+	remove_proc_entry("ctrl", *res_procdir);
+no_ctrl_entry:
+	remove_proc_entry("xt_qtaguid", NULL);
+no_dir:
+	return ret;
+}
+
+static struct xt_match qtaguid_mt_reg __read_mostly = {
+	/*
+	 * This module masquerades as the "owner" module so that iptables
+	 * tools can deal with it.
+	 */
+	.name       = "owner",
+	.revision   = 1,
+	.family     = NFPROTO_UNSPEC,
+	.match      = qtaguid_mt,
+	.matchsize  = sizeof(struct xt_qtaguid_match_info),
+	.me         = THIS_MODULE,
+};
+
+static int __init qtaguid_mt_init(void)
+{
+	if (qtaguid_proc_register(&xt_qtaguid_procdir)
+	    || iface_stat_init(xt_qtaguid_procdir)
+	    || xt_register_match(&qtaguid_mt_reg)
+	    || misc_register(&qtu_device))
+		return -1;
+	return 0;
+}
+
+/*
+ * TODO: allow unloading of the module.
+ * For now stats are permanent.
+ * Kconfig forces'y/n' and never an 'm'.
+ */
+
+module_init(qtaguid_mt_init);
+MODULE_AUTHOR("jpa <jpa@google.com>");
+MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_owner");
+MODULE_ALIAS("ip6t_owner");
+MODULE_ALIAS("ipt_qtaguid");
+MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
new file mode 100644
index 0000000..6dc14a9
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -0,0 +1,352 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_INTERNAL_H__
+#define __XT_QTAGUID_INTERNAL_H__
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+
+/* Iface handling */
+#define IDEBUG_MASK (1<<0)
+/* Iptable Matching. Per packet. */
+#define MDEBUG_MASK (1<<1)
+/* Red-black tree handling. Per packet. */
+#define RDEBUG_MASK (1<<2)
+/* procfs ctrl/stats handling */
+#define CDEBUG_MASK (1<<3)
+/* dev and resource tracking */
+#define DDEBUG_MASK (1<<4)
+
+/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
+#define DEFAULT_DEBUG_MASK 0
+
+/*
+ * (Un)Define these *DEBUG to compile out/in the pr_debug calls.
+ * All undef: text size ~ 0x3030; all def: ~ 0x4404.
+ */
+#define IDEBUG
+#define MDEBUG
+#define RDEBUG
+#define CDEBUG
+#define DDEBUG
+
+#define MSK_DEBUG(mask, ...) do {                           \
+		if (unlikely(qtaguid_debug_mask & (mask)))  \
+			pr_debug(__VA_ARGS__);              \
+	} while (0)
+#ifdef IDEBUG
+#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
+#else
+#define IF_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef MDEBUG
+#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
+#else
+#define MT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef RDEBUG
+#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
+#else
+#define RB_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef CDEBUG
+#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
+#else
+#define CT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef DDEBUG
+#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
+#else
+#define DR_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+
+extern uint qtaguid_debug_mask;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * Tags:
+ *
+ * They represent what the data usage counters will be tracked against.
+ * By default a tag is just based on the UID.
+ * The UID is used as the base for policing, and can not be ignored.
+ * So a tag will always at least represent a UID (uid_tag).
+ *
+ * A tag can be augmented with an "accounting tag" which is associated
+ * with a UID.
+ * User space can set the acct_tag portion of the tag which is then used
+ * with sockets: all data belonging to that socket will be counted against the
+ * tag. The policing is then based on the tag's uid_tag portion,
+ * and stats are collected for the acct_tag portion separately.
+ *
+ * There could be
+ * a:  {acct_tag=1, uid_tag=10003}
+ * b:  {acct_tag=2, uid_tag=10003}
+ * c:  {acct_tag=3, uid_tag=10003}
+ * d:  {acct_tag=0, uid_tag=10003}
+ * a, b, and c represent tags associated with specific sockets.
+ * d is for the totals for that uid, including all untagged traffic.
+ * Typically d is used with policing/quota rules.
+ *
+ * We want tag_t big enough to distinguish uid_t and acct_tag.
+ * It might become a struct if needed.
+ * Nothing should be using it as an int.
+ */
+typedef uint64_t tag_t;  /* Only used via accessors */
+
+#define TAG_UID_MASK 0xFFFFFFFFULL
+#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
+
+static inline int tag_compare(tag_t t1, tag_t t2)
+{
+	return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
+}
+
+static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
+{
+	return acct_tag | uid;
+}
+static inline tag_t make_tag_from_uid(uid_t uid)
+{
+	return uid;
+}
+static inline uid_t get_uid_from_tag(tag_t tag)
+{
+	return tag & TAG_UID_MASK;
+}
+static inline tag_t get_utag_from_tag(tag_t tag)
+{
+	return tag & TAG_UID_MASK;
+}
+static inline tag_t get_atag_from_tag(tag_t tag)
+{
+	return tag & TAG_ACCT_MASK;
+}
+
+static inline bool valid_atag(tag_t tag)
+{
+	return !(tag & TAG_UID_MASK);
+}
+static inline tag_t make_atag_from_value(uint32_t value)
+{
+	return (uint64_t)value << 32;
+}
+/*---------------------------------------------------------------------------*/
+
+/*
+ * Maximum number of socket tags that a UID is allowed to have active.
+ * Multiple processes belonging to the same UID contribute towards this limit.
+ * Special UIDs that can impersonate a UID also contribute (e.g. download
+ * manager, ...)
+ */
+#define DEFAULT_MAX_SOCK_TAGS 1024
+
+/*
+ * For now we only track 2 sets of counters.
+ * The default set is 0.
+ * Userspace can activate another set for a given uid being tracked.
+ */
+#define IFS_MAX_COUNTER_SETS 2
+
+enum ifs_tx_rx {
+	IFS_TX,
+	IFS_RX,
+	IFS_MAX_DIRECTIONS
+};
+
+/* For now, TCP, UDP, the rest */
+enum ifs_proto {
+	IFS_TCP,
+	IFS_UDP,
+	IFS_PROTO_OTHER,
+	IFS_MAX_PROTOS
+};
+
+struct byte_packet_counters {
+	uint64_t bytes;
+	uint64_t packets;
+};
+
+struct data_counters {
+	struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
+};
+
+static inline uint64_t dc_sum_bytes(struct data_counters *counters,
+				    int set,
+				    enum ifs_tx_rx direction)
+{
+	return counters->bpc[set][direction][IFS_TCP].bytes
+		+ counters->bpc[set][direction][IFS_UDP].bytes
+		+ counters->bpc[set][direction][IFS_PROTO_OTHER].bytes;
+}
+
+static inline uint64_t dc_sum_packets(struct data_counters *counters,
+				      int set,
+				      enum ifs_tx_rx direction)
+{
+	return counters->bpc[set][direction][IFS_TCP].packets
+		+ counters->bpc[set][direction][IFS_UDP].packets
+		+ counters->bpc[set][direction][IFS_PROTO_OTHER].packets;
+}
+
+
+/* Generic X based nodes used as a base for rb_tree ops */
+struct tag_node {
+	struct rb_node node;
+	tag_t tag;
+};
+
+struct tag_stat {
+	struct tag_node tn;
+	struct data_counters counters;
+	/*
+	 * If this tag is acct_tag based, we need to count against the
+	 * matching parent uid_tag.
+	 */
+	struct data_counters *parent_counters;
+};
+
+struct iface_stat {
+	struct list_head list;  /* in iface_stat_list */
+	char *ifname;
+	bool active;
+	/* net_dev is only valid for active iface_stat */
+	struct net_device *net_dev;
+
+	struct byte_packet_counters totals_via_dev[IFS_MAX_DIRECTIONS];
+	struct data_counters totals_via_skb;
+	/*
+	 * We keep the last_known, because some devices reset their counters
+	 * just before NETDEV_UP, while some will reset just before
+	 * NETDEV_REGISTER (which is more normal).
+	 * So now, if the device didn't do a NETDEV_UNREGISTER and we see
+	 * its current dev stats smaller that what was previously known, we
+	 * assume an UNREGISTER and just use the last_known.
+	 */
+	struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS];
+	/* last_known is usable when last_known_valid is true */
+	bool last_known_valid;
+
+	struct proc_dir_entry *proc_ptr;
+
+	struct rb_root tag_stat_tree;
+	spinlock_t tag_stat_list_lock;
+};
+
+/* This is needed to create proc_dir_entries from atomic context. */
+struct iface_stat_work {
+	struct work_struct iface_work;
+	struct iface_stat *iface_entry;
+};
+
+/*
+ * Track tag that this socket is transferring data for, and not necessarily
+ * the uid that owns the socket.
+ * This is the tag against which tag_stat.counters will be billed.
+ * These structs need to be looked up by sock and pid.
+ */
+struct sock_tag {
+	struct rb_node sock_node;
+	struct sock *sk;  /* Only used as a number, never dereferenced */
+	/* The socket is needed for sockfd_put() */
+	struct socket *socket;
+	/* Used to associate with a given pid */
+	struct list_head list;   /* in proc_qtu_data.sock_tag_list */
+	pid_t pid;
+
+	tag_t tag;
+};
+
+struct qtaguid_event_counts {
+	/* Various successful events */
+	atomic64_t sockets_tagged;
+	atomic64_t sockets_untagged;
+	atomic64_t counter_set_changes;
+	atomic64_t delete_cmds;
+	atomic64_t iface_events;  /* Number of NETDEV_* events handled */
+
+	atomic64_t match_calls;   /* Number of times iptables called mt */
+	/* Number of times iptables called mt from pre or post routing hooks */
+	atomic64_t match_calls_prepost;
+	/*
+	 * match_found_sk_*: numbers related to the netfilter matching
+	 * function finding a sock for the sk_buff.
+	 * Total skbs processed is sum(match_found*).
+	 */
+	atomic64_t match_found_sk;   /* An sk was already in the sk_buff. */
+	/* The connection tracker had or didn't have the sk. */
+	atomic64_t match_found_sk_in_ct;
+	atomic64_t match_found_no_sk_in_ct;
+	/*
+	 * No sk could be found. No apparent owner. Could happen with
+	 * unsolicited traffic.
+	 */
+	atomic64_t match_no_sk;
+	/*
+	 * The file ptr in the sk_socket wasn't there.
+	 * This might happen for traffic while the socket is being closed.
+	 */
+	atomic64_t match_no_sk_file;
+};
+
+/* Track the set active_set for the given tag. */
+struct tag_counter_set {
+	struct tag_node tn;
+	int active_set;
+};
+
+/*----------------------------------------------*/
+/*
+ * The qtu uid data is used to track resources that are created directly or
+ * indirectly by processes (uid tracked).
+ * It is shared by the processes with the same uid.
+ * Some of the resource will be counted to prevent further rogue allocations,
+ * some will need freeing once the owner process (uid) exits.
+ */
+struct uid_tag_data {
+	struct rb_node node;
+	uid_t uid;
+
+	/*
+	 * For the uid, how many accounting tags have been set.
+	 */
+	int num_active_tags;
+	/* Track the number of proc_qtu_data that reference it */
+	int num_pqd;
+	struct rb_root tag_ref_tree;
+	/* No tag_node_tree_lock; use uid_tag_data_tree_lock */
+};
+
+struct tag_ref {
+	struct tag_node tn;
+
+	/*
+	 * This tracks the number of active sockets that have a tag on them
+	 * which matches this tag_ref.tn.tag.
+	 * A tag ref can live on after the sockets are untagged.
+	 * A tag ref can only be removed during a tag delete command.
+	 */
+	int num_sock_tags;
+};
+
+struct proc_qtu_data {
+	struct rb_node node;
+	pid_t pid;
+
+	struct uid_tag_data *parent_tag_data;
+
+	/* Tracks the sock_tags that need freeing upon this proc's death */
+	struct list_head sock_tag_list;
+	/* No spinlock_t sock_tag_list_lock; use the global one. */
+};
+
+/*----------------------------------------------*/
+#endif  /* ifndef __XT_QTAGUID_INTERNAL_H__ */
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
new file mode 100644
index 0000000..f6a00a3
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -0,0 +1,566 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Most of the functions in this file just waste time if DEBUG is not defined.
+ * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
+ * debug flags ore not defined.
+ * Those funcs that fail to allocate memory will panic as there is no need to
+ * hobble allong just pretending to do the requested work.
+ */
+
+#define DEBUG
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/net.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+
+
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+#ifdef DDEBUG
+
+static void _bug_on_err_or_null(void *ptr)
+{
+	if (IS_ERR_OR_NULL(ptr)) {
+		pr_err("qtaguid: kmalloc failed\n");
+		BUG();
+	}
+}
+
+char *pp_tag_t(tag_t *tag)
+{
+	char *res;
+
+	if (!tag)
+		res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
+	else
+		res = kasprintf(GFP_ATOMIC,
+				"tag_t@%p{tag=0x%llx, uid=%u}",
+				tag, *tag, get_uid_from_tag(*tag));
+	_bug_on_err_or_null(res);
+	return res;
+}
+
+char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+	char *res;
+
+	if (!dc)
+		res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
+	else if (showValues)
+		res = kasprintf(
+			GFP_ATOMIC, "data_counters@%p{"
+			"set0{"
+			"rx{"
+			"tcp{b=%llu, p=%llu}, "
+			"udp{b=%llu, p=%llu},"
+			"other{b=%llu, p=%llu}}, "
+			"tx{"
+			"tcp{b=%llu, p=%llu}, "
+			"udp{b=%llu, p=%llu},"
+			"other{b=%llu, p=%llu}}}, "
+			"set1{"
+			"rx{"
+			"tcp{b=%llu, p=%llu}, "
+			"udp{b=%llu, p=%llu},"
+			"other{b=%llu, p=%llu}}, "
+			"tx{"
+			"tcp{b=%llu, p=%llu}, "
+			"udp{b=%llu, p=%llu},"
+			"other{b=%llu, p=%llu}}}}",
+			dc,
+			dc->bpc[0][IFS_RX][IFS_TCP].bytes,
+			dc->bpc[0][IFS_RX][IFS_TCP].packets,
+			dc->bpc[0][IFS_RX][IFS_UDP].bytes,
+			dc->bpc[0][IFS_RX][IFS_UDP].packets,
+			dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
+			dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
+			dc->bpc[0][IFS_TX][IFS_TCP].bytes,
+			dc->bpc[0][IFS_TX][IFS_TCP].packets,
+			dc->bpc[0][IFS_TX][IFS_UDP].bytes,
+			dc->bpc[0][IFS_TX][IFS_UDP].packets,
+			dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
+			dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
+			dc->bpc[1][IFS_RX][IFS_TCP].bytes,
+			dc->bpc[1][IFS_RX][IFS_TCP].packets,
+			dc->bpc[1][IFS_RX][IFS_UDP].bytes,
+			dc->bpc[1][IFS_RX][IFS_UDP].packets,
+			dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
+			dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
+			dc->bpc[1][IFS_TX][IFS_TCP].bytes,
+			dc->bpc[1][IFS_TX][IFS_TCP].packets,
+			dc->bpc[1][IFS_TX][IFS_UDP].bytes,
+			dc->bpc[1][IFS_TX][IFS_UDP].packets,
+			dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
+			dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
+	else
+		res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
+	_bug_on_err_or_null(res);
+	return res;
+}
+
+char *pp_tag_node(struct tag_node *tn)
+{
+	char *tag_str;
+	char *res;
+
+	if (!tn) {
+		res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	tag_str = pp_tag_t(&tn->tag);
+	res = kasprintf(GFP_ATOMIC,
+			"tag_node@%p{tag=%s}",
+			tn, tag_str);
+	_bug_on_err_or_null(res);
+	kfree(tag_str);
+	return res;
+}
+
+char *pp_tag_ref(struct tag_ref *tr)
+{
+	char *tn_str;
+	char *res;
+
+	if (!tr) {
+		res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	tn_str = pp_tag_node(&tr->tn);
+	res = kasprintf(GFP_ATOMIC,
+			"tag_ref@%p{%s, num_sock_tags=%d}",
+			tr, tn_str, tr->num_sock_tags);
+	_bug_on_err_or_null(res);
+	kfree(tn_str);
+	return res;
+}
+
+char *pp_tag_stat(struct tag_stat *ts)
+{
+	char *tn_str;
+	char *counters_str;
+	char *parent_counters_str;
+	char *res;
+
+	if (!ts) {
+		res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	tn_str = pp_tag_node(&ts->tn);
+	counters_str = pp_data_counters(&ts->counters, true);
+	parent_counters_str = pp_data_counters(ts->parent_counters, false);
+	res = kasprintf(GFP_ATOMIC,
+			"tag_stat@%p{%s, counters=%s, parent_counters=%s}",
+			ts, tn_str, counters_str, parent_counters_str);
+	_bug_on_err_or_null(res);
+	kfree(tn_str);
+	kfree(counters_str);
+	kfree(parent_counters_str);
+	return res;
+}
+
+char *pp_iface_stat(struct iface_stat *is)
+{
+	char *res;
+	if (!is) {
+		res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
+	} else {
+		struct data_counters *cnts = &is->totals_via_skb;
+		res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
+				"list=list_head{...}, "
+				"ifname=%s, "
+				"total_dev={rx={bytes=%llu, "
+				"packets=%llu}, "
+				"tx={bytes=%llu, "
+				"packets=%llu}}, "
+				"total_skb={rx={bytes=%llu, "
+				"packets=%llu}, "
+				"tx={bytes=%llu, "
+				"packets=%llu}}, "
+				"last_known_valid=%d, "
+				"last_known={rx={bytes=%llu, "
+				"packets=%llu}, "
+				"tx={bytes=%llu, "
+				"packets=%llu}}, "
+				"active=%d, "
+				"net_dev=%p, "
+				"proc_ptr=%p, "
+				"tag_stat_tree=rb_root{...}}",
+				is,
+				is->ifname,
+				is->totals_via_dev[IFS_RX].bytes,
+				is->totals_via_dev[IFS_RX].packets,
+				is->totals_via_dev[IFS_TX].bytes,
+				is->totals_via_dev[IFS_TX].packets,
+				dc_sum_bytes(cnts, 0, IFS_RX),
+				dc_sum_packets(cnts, 0, IFS_RX),
+				dc_sum_bytes(cnts, 0, IFS_TX),
+				dc_sum_packets(cnts, 0, IFS_TX),
+				is->last_known_valid,
+				is->last_known[IFS_RX].bytes,
+				is->last_known[IFS_RX].packets,
+				is->last_known[IFS_TX].bytes,
+				is->last_known[IFS_TX].packets,
+				is->active,
+				is->net_dev,
+				is->proc_ptr);
+	}
+	_bug_on_err_or_null(res);
+	return res;
+}
+
+char *pp_sock_tag(struct sock_tag *st)
+{
+	char *tag_str;
+	char *res;
+
+	if (!st) {
+		res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	tag_str = pp_tag_t(&st->tag);
+	res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
+			"sock_node=rb_node{...}, "
+			"sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+			"pid=%u, tag=%s}",
+			st, st->sk, st->socket, atomic_long_read(
+				&st->socket->file->f_count),
+			st->pid, tag_str);
+	_bug_on_err_or_null(res);
+	kfree(tag_str);
+	return res;
+}
+
+char *pp_uid_tag_data(struct uid_tag_data *utd)
+{
+	char *res;
+
+	if (!utd)
+		res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
+	else
+		res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
+				"uid=%u, num_active_acct_tags=%d, "
+				"num_pqd=%d, "
+				"tag_node_tree=rb_root{...}, "
+				"proc_qtu_data_tree=rb_root{...}}",
+				utd, utd->uid,
+				utd->num_active_tags, utd->num_pqd);
+	_bug_on_err_or_null(res);
+	return res;
+}
+
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+	char *parent_tag_data_str;
+	char *res;
+
+	if (!pqd) {
+		res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
+	res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
+			"node=rb_node{...}, pid=%u, "
+			"parent_tag_data=%s, "
+			"sock_tag_list=list_head{...}}",
+			pqd, pqd->pid, parent_tag_data_str
+		);
+	_bug_on_err_or_null(res);
+	kfree(parent_tag_data_str);
+	return res;
+}
+
+/*------------------------------------------*/
+void prdebug_sock_tag_tree(int indent_level,
+			   struct rb_root *sock_tag_tree)
+{
+	struct rb_node *node;
+	struct sock_tag *sock_tag_entry;
+	char *str;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(sock_tag_tree)) {
+		str = "sock_tag_tree=rb_root{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "sock_tag_tree=rb_root{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(sock_tag_tree);
+	     node;
+	     node = rb_next(node)) {
+		sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+		str = pp_sock_tag(sock_tag_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+		kfree(str);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_sock_tag_list(int indent_level,
+			   struct list_head *sock_tag_list)
+{
+	struct sock_tag *sock_tag_entry;
+	char *str;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (list_empty(sock_tag_list)) {
+		str = "sock_tag_list=list_head{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "sock_tag_list=list_head{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
+		str = pp_sock_tag(sock_tag_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+		kfree(str);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_proc_qtu_data_tree(int indent_level,
+				struct rb_root *proc_qtu_data_tree)
+{
+	char *str;
+	struct rb_node *node;
+	struct proc_qtu_data *proc_qtu_data_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
+		str = "proc_qtu_data_tree=rb_root{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "proc_qtu_data_tree=rb_root{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(proc_qtu_data_tree);
+	     node;
+	     node = rb_next(node)) {
+		proc_qtu_data_entry = rb_entry(node,
+					       struct proc_qtu_data,
+					       node);
+		str = pp_proc_qtu_data(proc_qtu_data_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+			 str);
+		kfree(str);
+		indent_level++;
+		prdebug_sock_tag_list(indent_level,
+				      &proc_qtu_data_entry->sock_tag_list);
+		indent_level--;
+
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+	char *str;
+	struct rb_node *node;
+	struct tag_ref *tag_ref_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(tag_ref_tree)) {
+		str = "tag_ref_tree{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "tag_ref_tree{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(tag_ref_tree);
+	     node;
+	     node = rb_next(node)) {
+		tag_ref_entry = rb_entry(node,
+					 struct tag_ref,
+					 tn.node);
+		str = pp_tag_ref(tag_ref_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+			 str);
+		kfree(str);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_uid_tag_data_tree(int indent_level,
+			       struct rb_root *uid_tag_data_tree)
+{
+	char *str;
+	struct rb_node *node;
+	struct uid_tag_data *uid_tag_data_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
+		str = "uid_tag_data_tree=rb_root{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "uid_tag_data_tree=rb_root{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(uid_tag_data_tree);
+	     node;
+	     node = rb_next(node)) {
+		uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
+					      node);
+		str = pp_uid_tag_data(uid_tag_data_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+		kfree(str);
+		if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
+			indent_level++;
+			prdebug_tag_ref_tree(indent_level,
+					     &uid_tag_data_entry->tag_ref_tree);
+			indent_level--;
+		}
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_stat_tree(int indent_level,
+				  struct rb_root *tag_stat_tree)
+{
+	char *str;
+	struct rb_node *node;
+	struct tag_stat *ts_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(tag_stat_tree)) {
+		str = "tag_stat_tree{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "tag_stat_tree{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(tag_stat_tree);
+	     node;
+	     node = rb_next(node)) {
+		ts_entry = rb_entry(node, struct tag_stat, tn.node);
+		str = pp_tag_stat(ts_entry);
+		pr_debug("%*d: %s\n", indent_level*2, indent_level,
+			 str);
+		kfree(str);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_iface_stat_list(int indent_level,
+			     struct list_head *iface_stat_list)
+{
+	char *str;
+	struct iface_stat *iface_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (list_empty(iface_stat_list)) {
+		str = "iface_stat_list=list_head{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "iface_stat_list=list_head{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	list_for_each_entry(iface_entry, iface_stat_list, list) {
+		str = pp_iface_stat(iface_entry);
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		kfree(str);
+
+		spin_lock_bh(&iface_entry->tag_stat_list_lock);
+		if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
+			indent_level++;
+			prdebug_tag_stat_tree(indent_level,
+					      &iface_entry->tag_stat_tree);
+			indent_level--;
+		}
+		spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+#endif  /* ifdef DDEBUG */
+/*------------------------------------------*/
+static const char * const netdev_event_strings[] = {
+	"netdev_unknown",
+	"NETDEV_UP",
+	"NETDEV_DOWN",
+	"NETDEV_REBOOT",
+	"NETDEV_CHANGE",
+	"NETDEV_REGISTER",
+	"NETDEV_UNREGISTER",
+	"NETDEV_CHANGEMTU",
+	"NETDEV_CHANGEADDR",
+	"NETDEV_GOING_DOWN",
+	"NETDEV_CHANGENAME",
+	"NETDEV_FEAT_CHANGE",
+	"NETDEV_BONDING_FAILOVER",
+	"NETDEV_PRE_UP",
+	"NETDEV_PRE_TYPE_CHANGE",
+	"NETDEV_POST_TYPE_CHANGE",
+	"NETDEV_POST_INIT",
+	"NETDEV_UNREGISTER_BATCH",
+	"NETDEV_RELEASE",
+	"NETDEV_NOTIFY_PEERS",
+	"NETDEV_JOIN",
+};
+
+const char *netdev_evt_str(int netdev_event)
+{
+	if (netdev_event < 0
+	    || netdev_event >= ARRAY_SIZE(netdev_event_strings))
+		return "bad event num";
+	return netdev_event_strings[netdev_event];
+}
diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h
new file mode 100644
index 0000000..b63871a
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.h
@@ -0,0 +1,120 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_PRINT_H__
+#define __XT_QTAGUID_PRINT_H__
+
+#include "xt_qtaguid_internal.h"
+
+#ifdef DDEBUG
+
+char *pp_tag_t(tag_t *tag);
+char *pp_data_counters(struct data_counters *dc, bool showValues);
+char *pp_tag_node(struct tag_node *tn);
+char *pp_tag_ref(struct tag_ref *tr);
+char *pp_tag_stat(struct tag_stat *ts);
+char *pp_iface_stat(struct iface_stat *is);
+char *pp_sock_tag(struct sock_tag *st);
+char *pp_uid_tag_data(struct uid_tag_data *qtd);
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
+
+/*------------------------------------------*/
+void prdebug_sock_tag_list(int indent_level,
+			   struct list_head *sock_tag_list);
+void prdebug_sock_tag_tree(int indent_level,
+			   struct rb_root *sock_tag_tree);
+void prdebug_proc_qtu_data_tree(int indent_level,
+				struct rb_root *proc_qtu_data_tree);
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
+void prdebug_uid_tag_data_tree(int indent_level,
+			       struct rb_root *uid_tag_data_tree);
+void prdebug_tag_stat_tree(int indent_level,
+			   struct rb_root *tag_stat_tree);
+void prdebug_iface_stat_list(int indent_level,
+			     struct list_head *iface_stat_list);
+
+#else
+
+/*------------------------------------------*/
+static inline char *pp_tag_t(tag_t *tag)
+{
+	return NULL;
+}
+static inline char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+	return NULL;
+}
+static inline char *pp_tag_node(struct tag_node *tn)
+{
+	return NULL;
+}
+static inline char *pp_tag_ref(struct tag_ref *tr)
+{
+	return NULL;
+}
+static inline char *pp_tag_stat(struct tag_stat *ts)
+{
+	return NULL;
+}
+static inline char *pp_iface_stat(struct iface_stat *is)
+{
+	return NULL;
+}
+static inline char *pp_sock_tag(struct sock_tag *st)
+{
+	return NULL;
+}
+static inline char *pp_uid_tag_data(struct uid_tag_data *qtd)
+{
+	return NULL;
+}
+static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+	return NULL;
+}
+
+/*------------------------------------------*/
+static inline
+void prdebug_sock_tag_list(int indent_level,
+			   struct list_head *sock_tag_list)
+{
+}
+static inline
+void prdebug_sock_tag_tree(int indent_level,
+			   struct rb_root *sock_tag_tree)
+{
+}
+static inline
+void prdebug_proc_qtu_data_tree(int indent_level,
+				struct rb_root *proc_qtu_data_tree)
+{
+}
+static inline
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+}
+static inline
+void prdebug_uid_tag_data_tree(int indent_level,
+			       struct rb_root *uid_tag_data_tree)
+{
+}
+static inline
+void prdebug_tag_stat_tree(int indent_level,
+			   struct rb_root *tag_stat_tree)
+{
+}
+static inline
+void prdebug_iface_stat_list(int indent_level,
+			     struct list_head *iface_stat_list)
+{
+}
+#endif
+/*------------------------------------------*/
+const char *netdev_evt_str(int netdev_event);
+#endif  /* ifndef __XT_QTAGUID_PRINT_H__ */
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 0000000..4328562
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,385 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ * 	netfilter module to enforce network quotas
+ * 	Sam Johnston <samj@samj.net>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License; either
+ *	version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+#include <linux/netfilter_ipv4/ipt_ULOG.h>
+#endif
+
+/**
+ * @lock:	lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+	u_int64_t quota;
+	spinlock_t lock;
+	struct list_head list;
+	atomic_t ref;
+	char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+	struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+		 "Event number for NETLINK_NFLOG message. 0 disables log."
+		 "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static unsigned int quota_list_uid   = 0;
+static unsigned int quota_list_gid   = 0;
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR);
+module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR);
+
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+		       const struct sk_buff *skb,
+		       const struct net_device *in,
+		       const struct net_device *out,
+		       const char *prefix)
+{
+	ulog_packet_msg_t *pm;
+	struct sk_buff *log_skb;
+	size_t size;
+	struct nlmsghdr *nlh;
+
+	if (!qlog_nl_event)
+		return;
+
+	size = NLMSG_SPACE(sizeof(*pm));
+	size = max(size, (size_t)NLMSG_GOODSIZE);
+	log_skb = alloc_skb(size, GFP_ATOMIC);
+	if (!log_skb) {
+		pr_err("xt_quota2: cannot alloc skb for logging\n");
+		return;
+	}
+
+	nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+			sizeof(*pm), 0);
+	if (!nlh) {
+		pr_err("xt_quota2: nlmsg_put failed\n");
+		kfree_skb(log_skb);
+		return;
+	}
+	pm = nlmsg_data(nlh);
+	if (skb->tstamp.tv64 == 0)
+		__net_timestamp((struct sk_buff *)skb);
+	pm->data_len = 0;
+	pm->hook = hooknum;
+	if (prefix != NULL)
+		strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
+	else
+		*(pm->prefix) = '\0';
+	if (in)
+		strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+	else
+		pm->indev_name[0] = '\0';
+
+	if (out)
+		strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+	else
+		pm->outdev_name[0] = '\0';
+
+	NETLINK_CB(log_skb).dst_group = 1;
+	pr_debug("throwing 1 packets to netlink group 1\n");
+	netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+}
+#else
+static void quota2_log(unsigned int hooknum,
+		       const struct sk_buff *skb,
+		       const struct net_device *in,
+		       const struct net_device *out,
+		       const char *prefix)
+{
+}
+#endif  /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static ssize_t quota_proc_read(struct file *file, char __user *buf,
+			   size_t size, loff_t *ppos)
+{
+	struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+	char tmp[24];
+	size_t tmp_size;
+
+	spin_lock_bh(&e->lock);
+	tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", e->quota);
+	spin_unlock_bh(&e->lock);
+	return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t quota_proc_write(struct file *file, const char __user *input,
+                            size_t size, loff_t *ppos)
+{
+	struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+	char buf[sizeof("18446744073709551616")];
+
+	if (size > sizeof(buf))
+		size = sizeof(buf);
+	if (copy_from_user(buf, input, size) != 0)
+		return -EFAULT;
+	buf[sizeof(buf)-1] = '\0';
+
+	spin_lock_bh(&e->lock);
+	e->quota = simple_strtoull(buf, NULL, 0);
+	spin_unlock_bh(&e->lock);
+	return size;
+}
+
+static const struct file_operations q2_counter_fops = {
+	.read		= quota_proc_read,
+	.write		= quota_proc_write,
+	.llseek		= default_llseek,
+};
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+	struct xt_quota_counter *e;
+	unsigned int size;
+
+	/* Do not need all the procfs things for anonymous counters. */
+	size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+	e = kmalloc(size, GFP_KERNEL);
+	if (e == NULL)
+		return NULL;
+
+	e->quota = q->quota;
+	spin_lock_init(&e->lock);
+	if (!anon) {
+		INIT_LIST_HEAD(&e->list);
+		atomic_set(&e->ref, 1);
+		strlcpy(e->name, q->name, sizeof(e->name));
+	}
+	return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name:	name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+	struct proc_dir_entry *p;
+	struct xt_quota_counter *e = NULL;
+	struct xt_quota_counter *new_e;
+
+	if (*q->name == '\0')
+		return q2_new_counter(q, true);
+
+	/* No need to hold a lock while getting a new counter */
+	new_e = q2_new_counter(q, false);
+	if (new_e == NULL)
+		goto out;
+
+	spin_lock_bh(&counter_list_lock);
+	list_for_each_entry(e, &counter_list, list)
+		if (strcmp(e->name, q->name) == 0) {
+			atomic_inc(&e->ref);
+			spin_unlock_bh(&counter_list_lock);
+			kfree(new_e);
+			pr_debug("xt_quota2: old counter name=%s", e->name);
+			return e;
+		}
+	e = new_e;
+	pr_debug("xt_quota2: new_counter name=%s", e->name);
+	list_add_tail(&e->list, &counter_list);
+	/* The entry having a refcount of 1 is not directly destructible.
+	 * This func has not yet returned the new entry, thus iptables
+	 * has not references for destroying this entry.
+	 * For another rule to try to destroy it, it would 1st need for this
+	 * func* to be re-invoked, acquire a new ref for the same named quota.
+	 * Nobody will access the e->procfs_entry either.
+	 * So release the lock. */
+	spin_unlock_bh(&counter_list_lock);
+
+	/* create_proc_entry() is not spin_lock happy */
+	p = e->procfs_entry = proc_create_data(e->name, quota_list_perms,
+	                      proc_xt_quota, &q2_counter_fops, e);
+
+	if (IS_ERR_OR_NULL(p)) {
+		spin_lock_bh(&counter_list_lock);
+		list_del(&e->list);
+		spin_unlock_bh(&counter_list_lock);
+		goto out;
+	}
+	proc_set_user(p, quota_list_uid, quota_list_gid);
+	return e;
+
+ out:
+	kfree(e);
+	return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+	pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+	if (q->flags & ~XT_QUOTA_MASK)
+		return -EINVAL;
+
+	q->name[sizeof(q->name)-1] = '\0';
+	if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+		printk(KERN_ERR "xt_quota.3: illegal name\n");
+		return -EINVAL;
+	}
+
+	q->master = q2_get_counter(q);
+	if (q->master == NULL) {
+		printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+
+	if (*q->name == '\0') {
+		kfree(e);
+		return;
+	}
+
+	spin_lock_bh(&counter_list_lock);
+	if (!atomic_dec_and_test(&e->ref)) {
+		spin_unlock_bh(&counter_list_lock);
+		return;
+	}
+
+	list_del(&e->list);
+	remove_proc_entry(e->name, proc_xt_quota);
+	spin_unlock_bh(&counter_list_lock);
+	kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+	bool ret = q->flags & XT_QUOTA_INVERT;
+
+	spin_lock_bh(&e->lock);
+	if (q->flags & XT_QUOTA_GROW) {
+		/*
+		 * While no_change is pointless in "grow" mode, we will
+		 * implement it here simply to have a consistent behavior.
+		 */
+		if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
+			e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+		}
+		ret = true;
+	} else {
+		if (e->quota >= skb->len) {
+			if (!(q->flags & XT_QUOTA_NO_CHANGE))
+				e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+			ret = !ret;
+		} else {
+			/* We are transitioning, log that fact. */
+			if (e->quota) {
+				quota2_log(par->hooknum,
+					   skb,
+					   par->in,
+					   par->out,
+					   q->name);
+			}
+			/* we do not allow even small packets from now on */
+			e->quota = 0;
+		}
+	}
+	spin_unlock_bh(&e->lock);
+	return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV4,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV6,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.me         = THIS_MODULE,
+	},
+};
+
+static int __init quota_mt2_init(void)
+{
+	int ret;
+	pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+	nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
+	if (!nflognl)
+		return -ENOMEM;
+#endif
+
+	proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+	if (proc_xt_quota == NULL)
+		return -EACCES;
+
+	ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	if (ret < 0)
+		remove_proc_entry("xt_quota", init_net.proc_net);
+	pr_debug("xt_quota2: init() %d", ret);
+	return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+	xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 63b2bdb..030f33c 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,7 +35,7 @@
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
-static void
+void
 xt_socket_put_sk(struct sock *sk)
 {
 	if (sk->sk_state == TCP_TIME_WAIT)
@@ -43,6 +43,7 @@
 	else
 		sock_put(sk);
 }
+EXPORT_SYMBOL(xt_socket_put_sk);
 
 static int
 extract_icmp4_fields(const struct sk_buff *skb,
@@ -101,9 +102,8 @@
 	return 0;
 }
 
-static bool
-socket_match(const struct sk_buff *skb, struct xt_action_param *par,
-	     const struct xt_socket_mtinfo1 *info)
+struct sock*
+xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	struct udphdr _hdr, *hp = NULL;
@@ -120,7 +120,7 @@
 		hp = skb_header_pointer(skb, ip_hdrlen(skb),
 					sizeof(_hdr), &_hdr);
 		if (hp == NULL)
-			return false;
+			return NULL;
 
 		protocol = iph->protocol;
 		saddr = iph->saddr;
@@ -131,9 +131,9 @@
 	} else if (iph->protocol == IPPROTO_ICMP) {
 		if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
 					&sport, &dport))
-			return false;
+			return NULL;
 	} else {
-		return false;
+		return NULL;
 	}
 
 #ifdef XT_SOCKET_HAVE_CONNTRACK
@@ -157,6 +157,23 @@
 
 	sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
 				   saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+
+	pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
+		 protocol, &saddr, ntohs(sport),
+		 &daddr, ntohs(dport),
+		 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+
+	return sk;
+}
+EXPORT_SYMBOL(xt_socket_get4_sk);
+
+static bool
+socket_match(const struct sk_buff *skb, struct xt_action_param *par,
+	     const struct xt_socket_mtinfo1 *info)
+{
+	struct sock *sk;
+
+	sk = xt_socket_get4_sk(skb, par);
 	if (sk != NULL) {
 		bool wildcard;
 		bool transparent = true;
@@ -179,11 +196,6 @@
 			sk = NULL;
 	}
 
-	pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
-		 protocol, &saddr, ntohs(sport),
-		 &daddr, ntohs(dport),
-		 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
 	return (sk != NULL);
 }
 
@@ -255,8 +267,8 @@
 	return 0;
 }
 
-static bool
-socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+struct sock*
+xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	struct ipv6hdr *iph = ipv6_hdr(skb);
 	struct udphdr _hdr, *hp = NULL;
@@ -264,7 +276,6 @@
 	struct in6_addr *daddr = NULL, *saddr = NULL;
 	__be16 uninitialized_var(dport), uninitialized_var(sport);
 	int thoff = 0, uninitialized_var(tproto);
-	const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
 
 	tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
 	if (tproto < 0) {
@@ -276,7 +287,7 @@
 		hp = skb_header_pointer(skb, thoff,
 					sizeof(_hdr), &_hdr);
 		if (hp == NULL)
-			return false;
+			return NULL;
 
 		saddr = &iph->saddr;
 		sport = hp->source;
@@ -286,13 +297,30 @@
 	} else if (tproto == IPPROTO_ICMPV6) {
 		if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
 					 &sport, &dport))
-			return false;
+			return NULL;
 	} else {
-		return false;
+		return NULL;
 	}
 
 	sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
 				   saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+	pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
+		 "(orig %pI6:%hu) sock %p\n",
+		 tproto, saddr, ntohs(sport),
+		 daddr, ntohs(dport),
+		 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+	return sk;
+}
+EXPORT_SYMBOL(xt_socket_get6_sk);
+
+static bool
+socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	struct sock *sk;
+	const struct xt_socket_mtinfo1 *info;
+
+	info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+	sk = xt_socket_get6_sk(skb, par);
 	if (sk != NULL) {
 		bool wildcard;
 		bool transparent = true;
@@ -315,12 +343,6 @@
 			sk = NULL;
 	}
 
-	pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
-		 "(orig %pI6:%hu) sock %p\n",
-		 tproto, saddr, ntohs(sport),
-		 daddr, ntohs(dport),
-		 &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
 	return (sk != NULL);
 }
 #endif
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 78efe89..8e12c8a 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,6 +10,11 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called rfkill.
 
+config RFKILL_PM
+	bool "Power off on suspend"
+	depends on RFKILL && PM
+	default y
+
 # LED trigger support
 config RFKILL_LEDS
 	bool
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 6563cc0..352cb55 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -792,6 +792,7 @@
 }
 EXPORT_SYMBOL(rfkill_pause_polling);
 
+#ifdef CONFIG_RFKILL_PM
 void rfkill_resume_polling(struct rfkill *rfkill)
 {
 	BUG_ON(!rfkill);
@@ -826,14 +827,17 @@
 
 	return 0;
 }
+#endif
 
 static struct class rfkill_class = {
 	.name		= "rfkill",
 	.dev_release	= rfkill_release,
 	.dev_attrs	= rfkill_dev_attrs,
 	.dev_uevent	= rfkill_dev_uevent,
+#ifdef CONFIG_RFKILL_PM
 	.suspend	= rfkill_suspend,
 	.resume		= rfkill_resume,
+#endif
 };
 
 bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 5356b12..77d251e 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -254,7 +254,7 @@
 {
 	if (fatal_signal_pending(current))
 		return -ERESTARTSYS;
-	freezable_schedule();
+	freezable_schedule_unsafe();
 	return 0;
 }
 
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 8f118c7..331ff78 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -114,6 +114,7 @@
 #include <linux/mount.h>
 #include <net/checksum.h>
 #include <linux/security.h>
+#include <linux/freezer.h>
 
 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
 EXPORT_SYMBOL_GPL(unix_socket_table);
@@ -2055,7 +2056,7 @@
 
 		set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
 		unix_state_unlock(sk);
-		timeo = schedule_timeout(timeo);
+		timeo = freezable_schedule_timeout(timeo);
 		unix_state_lock(sk);
 
 		if (sock_flag(sk, SOCK_DEAD))
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 16d08b3..4c602d1 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -166,3 +166,14 @@
 	  from lib80211.
 
 	  If unsure, say N.
+
+config CFG80211_ALLOW_RECONNECT
+	bool "Allow reconnect while already connected"
+	depends on CFG80211
+	default n
+	help
+	  cfg80211 stack doesn't allow to connect if you are already
+	  connected. This option allows to make a connection in this case.
+
+	  Select this option ONLY for wlan drivers that are specifically
+	  built for such purposes.
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d06da43..45af054 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -78,9 +78,7 @@
 
 	struct mutex sched_scan_mtx;
 
-#ifdef CONFIG_NL80211_TESTMODE
-	struct genl_info *testmode_info;
-#endif
+	struct genl_info *cur_cmd_info;
 
 	struct work_struct conn_work;
 	struct work_struct event_work;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index dd3dbed..6293e58 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -378,6 +378,12 @@
 	[NL80211_ATTR_MDID] = { .type = NLA_U16 },
 	[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
 				  .len = IEEE80211_MAX_DATA_LEN },
+	[NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
+	[NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY },
+	[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY },
+	[NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 },
+	[NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
+	[NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
 };
 
 /* policy for the key attributes */
@@ -1520,6 +1526,39 @@
 				dev->wiphy.max_acl_mac_addrs))
 			goto nla_put_failure;
 
+		if (dev->wiphy.n_vendor_commands) {
+			const struct nl80211_vendor_cmd_info *info;
+			struct nlattr *nested;
+
+			nested = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA);
+			if (!nested)
+				goto nla_put_failure;
+
+			for (i = 0; i < dev->wiphy.n_vendor_commands; i++) {
+				info = &dev->wiphy.vendor_commands[i].info;
+				if (nla_put(msg, i + 1, sizeof(*info), info))
+					goto nla_put_failure;
+			}
+				nla_nest_end(msg, nested);
+		}
+
+		if (dev->wiphy.n_vendor_events) {
+			const struct nl80211_vendor_cmd_info *info;
+			struct nlattr *nested;
+
+			nested = nla_nest_start(msg,
+				NL80211_ATTR_VENDOR_EVENTS);
+			if (!nested)
+				goto nla_put_failure;
+
+			for (i = 0; i < dev->wiphy.n_vendor_events; i++) {
+				info = &dev->wiphy.vendor_events[i];
+				if (nla_put(msg, i + 1, sizeof(*info), info))
+					goto nla_put_failure;
+			}
+			nla_nest_end(msg, nested);
+		}
+
 		/*
 		 * Any information below this point is only available to
 		 * applications that can deal with it being split. This
@@ -2638,8 +2677,8 @@
 
 	hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
 			     NL80211_CMD_NEW_KEY);
-	if (IS_ERR(hdr))
-		return PTR_ERR(hdr);
+	if (!hdr)
+		goto nla_put_failure;
 
 	cookie.msg = msg;
 	cookie.idx = key_idx;
@@ -5285,6 +5324,7 @@
 	enum ieee80211_band band;
 	size_t ie_len;
 	struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
+	s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
 
 	if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
 	    !rdev->ops->sched_scan_start)
@@ -5323,11 +5363,40 @@
 	if (n_ssids > wiphy->max_sched_scan_ssids)
 		return -EINVAL;
 
-	if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH])
+	/*
+	 * First, count the number of 'real' matchsets. Due to an issue with
+	 * the old implementation, matchsets containing only the RSSI attribute
+	 * (NL80211_SCHED_SCAN_MATCH_ATTR_RSSI) are considered as the 'default'
+	 * RSSI for all matchsets, rather than their own matchset for reporting
+	 * all APs with a strong RSSI. This is needed to be compatible with
+	 * older userspace that treated a matchset with only the RSSI as the
+	 * global RSSI for all other matchsets - if there are other matchsets.
+	 */
+	if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
 		nla_for_each_nested(attr,
 				    info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
-				    tmp)
-			n_match_sets++;
+				    tmp) {
+			struct nlattr *rssi;
+
+			err = nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+					nla_data(attr), nla_len(attr),
+					nl80211_match_policy);
+			if (err)
+				return err;
+			/* add other standalone attributes here */
+			if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) {
+				n_match_sets++;
+				continue;
+			}
+			rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+			if (rssi)
+				default_match_rssi = nla_get_s32(rssi);
+		}
+	}
+
+	/* However, if there's no other matchset, add the RSSI one */
+	if (!n_match_sets && default_match_rssi != NL80211_SCAN_RSSI_THOLD_OFF)
+		n_match_sets = 1;
 
 	if (n_match_sets > wiphy->max_match_sets)
 		return -EINVAL;
@@ -5455,6 +5524,15 @@
 				  nl80211_match_policy);
 			ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID];
 			if (ssid) {
+				if (WARN_ON(i >= n_match_sets)) {
+					/* this indicates a programming error,
+					 * the loop above should have verified
+					 * things properly
+					 */
+					err = -EINVAL;
+					goto out_free;
+				}
+
 				if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
 					err = -EINVAL;
 					goto out_free;
@@ -5463,17 +5541,32 @@
 				       nla_data(ssid), nla_len(ssid));
 				request->match_sets[i].ssid.ssid_len =
 					nla_len(ssid);
+				/* special attribute - old implemenation w/a */
+				request->match_sets[i].rssi_thold =
+					default_match_rssi;
+				rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+				if (rssi)
+					request->match_sets[i].rssi_thold =
+						nla_get_s32(rssi);
 			}
-			rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
-			if (rssi)
-				request->rssi_thold = nla_get_u32(rssi);
-			else
-				request->rssi_thold =
-						   NL80211_SCAN_RSSI_THOLD_OFF;
 			i++;
 		}
+
+		/* there was no other matchset, so the RSSI one is alone */
+		if (i == 0)
+			request->match_sets[0].rssi_thold = default_match_rssi;
+
+		request->min_rssi_thold = INT_MAX;
+		for (i = 0; i < n_match_sets; i++)
+			request->min_rssi_thold =
+				min(request->match_sets[i].rssi_thold,
+				    request->min_rssi_thold);
+	} else {
+		request->min_rssi_thold = NL80211_SCAN_RSSI_THOLD_OFF;
 	}
 
+	request->rssi_thold = request->min_rssi_thold;
+
 	if (info->attrs[NL80211_ATTR_IE]) {
 		request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
 		memcpy((void *)request->ie,
@@ -6402,12 +6495,62 @@
 	return err;
 }
 
+static struct sk_buff *
+__cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
+			    int approxlen, u32 portid, u32 seq,
+			    enum nl80211_commands cmd,
+			    enum nl80211_attrs attr,
+			    const struct nl80211_vendor_cmd_info *info,
+			    gfp_t gfp)
+{
+	struct sk_buff *skb;
+	void *hdr;
+	struct nlattr *data;
+
+	skb = nlmsg_new(approxlen + 100, gfp);
+	if (!skb)
+		return NULL;
+
+	hdr = nl80211hdr_put(skb, portid, seq, 0, cmd);
+	if (!hdr) {
+		kfree_skb(skb);
+		return NULL;
+	}
+
+	if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
+		goto nla_put_failure;
+
+	if (info) {
+		if (nla_put_u32(skb, NL80211_ATTR_VENDOR_ID,
+				info->vendor_id))
+			goto nla_put_failure;
+		if (nla_put_u32(skb, NL80211_ATTR_VENDOR_SUBCMD,
+				info->subcmd))
+			goto nla_put_failure;
+	}
+
+	data = nla_nest_start(skb, attr);
+
+	((void **)skb->cb)[0] = rdev;
+	((void **)skb->cb)[1] = hdr;
+	((void **)skb->cb)[2] = data;
+
+	return skb;
+
+ nla_put_failure:
+	kfree_skb(skb);
+	return NULL;
+}
 
 #ifdef CONFIG_NL80211_TESTMODE
 static struct genl_multicast_group nl80211_testmode_mcgrp = {
 	.name = "testmode",
 };
 
+static struct genl_multicast_group nl80211_vendor_mcgrp = {
+	.name = "vendor",
+};
+
 static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6418,11 +6561,11 @@
 
 	err = -EOPNOTSUPP;
 	if (rdev->ops->testmode_cmd) {
-		rdev->testmode_info = info;
+		rdev->cur_cmd_info = info;
 		err = rdev_testmode_cmd(rdev,
 				nla_data(info->attrs[NL80211_ATTR_TESTDATA]),
 				nla_len(info->attrs[NL80211_ATTR_TESTDATA]));
-		rdev->testmode_info = NULL;
+		rdev->cur_cmd_info = NULL;
 	}
 
 	return err;
@@ -6491,6 +6634,9 @@
 					   NL80211_CMD_TESTMODE);
 		struct nlattr *tmdata;
 
+		if (!hdr)
+			break;
+
 		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
 			genlmsg_cancel(skb, hdr);
 			break;
@@ -6523,84 +6669,37 @@
 	return err;
 }
 
-static struct sk_buff *
-__cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
-			      int approxlen, u32 portid, u32 seq, gfp_t gfp)
-{
-	struct sk_buff *skb;
-	void *hdr;
-	struct nlattr *data;
-
-	skb = nlmsg_new(approxlen + 100, gfp);
-	if (!skb)
-		return NULL;
-
-	hdr = nl80211hdr_put(skb, portid, seq, 0, NL80211_CMD_TESTMODE);
-	if (!hdr) {
-		kfree_skb(skb);
-		return NULL;
-	}
-
-	if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
-		goto nla_put_failure;
-	data = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
-
-	((void **)skb->cb)[0] = rdev;
-	((void **)skb->cb)[1] = hdr;
-	((void **)skb->cb)[2] = data;
-
-	return skb;
-
- nla_put_failure:
-	kfree_skb(skb);
-	return NULL;
-}
-
-struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
-						  int approxlen)
+struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+					   enum nl80211_commands cmd,
+					   enum nl80211_attrs attr,
+					   int vendor_event_idx,
+					   int approxlen, gfp_t gfp)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+	const struct nl80211_vendor_cmd_info *info;
 
-	if (WARN_ON(!rdev->testmode_info))
+	switch (cmd) {
+	case NL80211_CMD_TESTMODE:
+		if (WARN_ON(vendor_event_idx != -1))
+			return NULL;
+		info = NULL;
+		break;
+	case NL80211_CMD_VENDOR:
+		if (WARN_ON(vendor_event_idx < 0 ||
+			    vendor_event_idx >= wiphy->n_vendor_events))
+			return NULL;
+		info = &wiphy->vendor_events[vendor_event_idx];
+		break;
+	default:
+		WARN_ON(1);
 		return NULL;
-
-	return __cfg80211_testmode_alloc_skb(rdev, approxlen,
-				rdev->testmode_info->snd_portid,
-				rdev->testmode_info->snd_seq,
-				GFP_KERNEL);
-}
-EXPORT_SYMBOL(cfg80211_testmode_alloc_reply_skb);
-
-int cfg80211_testmode_reply(struct sk_buff *skb)
-{
-	struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
-	void *hdr = ((void **)skb->cb)[1];
-	struct nlattr *data = ((void **)skb->cb)[2];
-
-	/* clear CB data for netlink core to own from now on */
-	memset(skb->cb, 0, sizeof(skb->cb));
-
-	if (WARN_ON(!rdev->testmode_info)) {
-		kfree_skb(skb);
-		return -EINVAL;
 	}
-
-	nla_nest_end(skb, data);
-	genlmsg_end(skb, hdr);
-	return genlmsg_reply(skb, rdev->testmode_info);
+	return __cfg80211_alloc_vendor_skb(rdev, approxlen, 0, 0,
+					   cmd, attr, info, gfp);
 }
-EXPORT_SYMBOL(cfg80211_testmode_reply);
+EXPORT_SYMBOL(__cfg80211_alloc_event_skb);
 
-struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy,
-						  int approxlen, gfp_t gfp)
-{
-	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
-
-	return __cfg80211_testmode_alloc_skb(rdev, approxlen, 0, 0, gfp);
-}
-EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
-
-void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
 {
 	struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
 	void *hdr = ((void **)skb->cb)[1];
@@ -6611,10 +6710,15 @@
 
 	nla_nest_end(skb, data);
 	genlmsg_end(skb, hdr);
-	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
-				nl80211_testmode_mcgrp.id, gfp);
+
+	if (data->nla_type == NL80211_ATTR_VENDOR_DATA)
+		genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
+			nl80211_vendor_mcgrp.id, gfp);
+	else
+		genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
+			nl80211_testmode_mcgrp.id, gfp);
 }
-EXPORT_SYMBOL(cfg80211_testmode_event);
+EXPORT_SYMBOL(__cfg80211_send_event_skb);
 #endif
 
 static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
@@ -6935,9 +7039,8 @@
 
 	hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
 			     NL80211_CMD_REMAIN_ON_CHANNEL);
-
-	if (IS_ERR(hdr)) {
-		err = PTR_ERR(hdr);
+	if (!hdr) {
+		err = -ENOBUFS;
 		goto free_msg;
 	}
 
@@ -7224,9 +7327,8 @@
 
 		hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
 				     NL80211_CMD_FRAME);
-
-		if (IS_ERR(hdr)) {
-			err = PTR_ERR(hdr);
+		if (!hdr) {
+			err = -ENOBUFS;
 			goto free_msg;
 		}
 	}
@@ -8087,9 +8189,8 @@
 
 	hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
 			     NL80211_CMD_PROBE_CLIENT);
-
-	if (IS_ERR(hdr)) {
-		err = PTR_ERR(hdr);
+	if (!hdr) {
+		err = -ENOBUFS;
 		goto free_msg;
 	}
 
@@ -8306,6 +8407,108 @@
 	return 0;
 }
 
+static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct wireless_dev *wdev =
+		__cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs);
+	int i, err;
+	u32 vid, subcmd;
+
+	if (!rdev || !rdev->wiphy.vendor_commands)
+		return -EOPNOTSUPP;
+
+	if (IS_ERR(wdev)) {
+		err = PTR_ERR(wdev);
+		if (err != -EINVAL)
+			return err;
+		wdev = NULL;
+	} else if (wdev->wiphy != &rdev->wiphy) {
+		return -EINVAL;
+	}
+
+	if (!info->attrs[NL80211_ATTR_VENDOR_ID] ||
+	    !info->attrs[NL80211_ATTR_VENDOR_SUBCMD])
+		return -EINVAL;
+
+	vid = nla_get_u32(info->attrs[NL80211_ATTR_VENDOR_ID]);
+	subcmd = nla_get_u32(info->attrs[NL80211_ATTR_VENDOR_SUBCMD]);
+	for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) {
+		const struct wiphy_vendor_command *vcmd;
+		void *data = NULL;
+		int len = 0;
+
+		vcmd = &rdev->wiphy.vendor_commands[i];
+
+		if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
+			continue;
+
+		if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
+				   WIPHY_VENDOR_CMD_NEED_NETDEV)) {
+			if (!wdev)
+				return -EINVAL;
+			if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
+			    !wdev->netdev)
+				return -EINVAL;
+
+			if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
+				if (!wdev->netdev ||
+				    !netif_running(wdev->netdev))
+					return -ENETDOWN;
+			}
+		} else {
+			wdev = NULL;
+		}
+
+		if (info->attrs[NL80211_ATTR_VENDOR_DATA]) {
+			data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]);
+			len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]);
+		}
+
+		rdev->cur_cmd_info = info;
+		err = rdev->wiphy.vendor_commands[i].doit(&rdev->wiphy, wdev,
+								data, len);
+		rdev->cur_cmd_info = NULL;
+		return err;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
+					   enum nl80211_commands cmd,
+					   enum nl80211_attrs attr,
+					   int approxlen)
+{
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+	if (WARN_ON(!rdev->cur_cmd_info))
+		return NULL;
+
+	return __cfg80211_alloc_vendor_skb(rdev, approxlen,
+					   0,
+					   0,
+					   cmd, attr, NULL, GFP_KERNEL);
+}
+EXPORT_SYMBOL(__cfg80211_alloc_reply_skb);
+
+int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
+{
+	struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
+	void *hdr = ((void **)skb->cb)[1];
+	struct nlattr *data = ((void **)skb->cb)[2];
+
+	if (WARN_ON(!rdev->cur_cmd_info)) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	nla_nest_end(skb, data);
+	genlmsg_end(skb, hdr);
+	return genlmsg_reply(skb, rdev->cur_cmd_info);
+}
+EXPORT_SYMBOL(cfg80211_vendor_cmd_reply);
+
 #define NL80211_FLAG_NEED_WIPHY		0x01
 #define NL80211_FLAG_NEED_NETDEV	0x02
 #define NL80211_FLAG_NEED_RTNL		0x04
@@ -9010,7 +9213,15 @@
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
-	}
+	},
+	{
+		.cmd = NL80211_CMD_VENDOR,
+		.doit = nl80211_vendor_cmd,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_WIPHY |
+				  NL80211_FLAG_NEED_RTNL,
+	},
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -10829,6 +11040,10 @@
 	err = genl_register_mc_group(&nl80211_fam, &nl80211_testmode_mcgrp);
 	if (err)
 		goto err_out;
+
+	err = genl_register_mc_group(&nl80211_fam, &nl80211_vendor_mcgrp);
+	if (err)
+		goto err_out;
 #endif
 
 	err = netlink_register_notifier(&nl80211_netlink_notifier);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 3ed35c3..e2f74e6 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -707,8 +707,10 @@
 		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
 		return;
 
+#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
 	if (wdev->sme_state != CFG80211_SME_CONNECTED)
 		return;
+#endif
 
 	if (wdev->current_bss) {
 		cfg80211_unhold_bss(wdev->current_bss);
@@ -785,10 +787,14 @@
 
 	ASSERT_WDEV_LOCK(wdev);
 
+#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
 	if (wdev->sme_state != CFG80211_SME_IDLE)
 		return -EALREADY;
 
 	if (WARN_ON(wdev->connect_keys)) {
+#else
+	if (wdev->connect_keys) {
+#endif
 		kfree(wdev->connect_keys);
 		wdev->connect_keys = NULL;
 	}
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index f97869f..ef2142a 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -273,6 +273,12 @@
 
 dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
 
+# cat
+# ---------------------------------------------------------------------------
+# Concatentate multiple files together
+quiet_cmd_cat = CAT     $@
+cmd_cat = (cat $(filter-out FORCE,$^) > $@) || (rm -f $@; false)
+
 # Bzip2
 # ---------------------------------------------------------------------------
 
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index ecbb447..8900c5c5 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -24,7 +24,7 @@
 INSTALL_MOD_DIR ?= extra
 ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D))
 
-modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
+modinst_dir ?= $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 
 $(modules):
 	$(call cmd,modules_install,$(MODLIB)/$(modinst_dir))
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 43eda40..35e0f16 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -1083,7 +1083,7 @@
 	csym->flags &= ~(SYMBOL_VALID);
 }
 
-static void set_all_choice_values(struct symbol *csym)
+void set_all_choice_values(struct symbol *csym)
 {
 	struct property *prop;
 	struct symbol *sym;
@@ -1100,7 +1100,7 @@
 	}
 	csym->flags |= SYMBOL_DEF_USER;
 	/* clear VALID to get value calculated */
-	csym->flags &= ~(SYMBOL_VALID);
+	csym->flags &= ~(SYMBOL_VALID | SYMBOL_NEED_SET_CHOICE_VALUES);
 }
 
 void conf_set_all_new_symbols(enum conf_def_mode mode)
@@ -1202,6 +1202,14 @@
 	 * selected in a choice block and we set it to yes,
 	 * and the rest to no.
 	 */
+	if (mode != def_random) {
+		for_all_symbols(i, csym) {
+			if ((sym_is_choice(csym) && !sym_has_value(csym)) ||
+			    sym_is_choice_value(csym))
+				csym->flags |= SYMBOL_NEED_SET_CHOICE_VALUES;
+		}
+	}
+
 	for_all_symbols(i, csym) {
 		if (sym_has_value(csym) || !sym_is_choice(csym))
 			continue;
@@ -1209,7 +1217,5 @@
 		sym_calc_value(csym);
 		if (mode == def_random)
 			randomize_choice_values(csym);
-		else
-			set_all_choice_values(csym);
 	}
 }
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index cdd4860..df198a5 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -106,6 +106,9 @@
 #define SYMBOL_DEF3       0x40000  /* symbol.def[S_DEF_3] is valid */
 #define SYMBOL_DEF4       0x80000  /* symbol.def[S_DEF_4] is valid */
 
+/* choice values need to be set before calculating this symbol value */
+#define SYMBOL_NEED_SET_CHOICE_VALUES  0x100000
+
 #define SYMBOL_MAXLENGTH	256
 #define SYMBOL_HASHSIZE		9973
 
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index f8aee5f..0c8d419 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -87,6 +87,7 @@
 void sym_set_change_count(int count);
 void sym_add_change_count(int count);
 void conf_set_all_new_symbols(enum conf_def_mode mode);
+void set_all_choice_values(struct symbol *csym);
 
 struct conf_printer {
 	void (*print_symbol)(FILE *, struct symbol *, const char *, void *);
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index ecc5aa5..ab8f4c8 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -300,6 +300,14 @@
 
 	if (sym->flags & SYMBOL_VALID)
 		return;
+
+	if (sym_is_choice_value(sym) &&
+	    sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) {
+		sym->flags &= ~SYMBOL_NEED_SET_CHOICE_VALUES;
+		prop = sym_get_choice_prop(sym);
+		sym_calc_value(prop_get_symbol(prop));
+	}
+
 	sym->flags |= SYMBOL_VALID;
 
 	oldval = sym->curr;
@@ -425,6 +433,9 @@
 
 	if (sym->flags & SYMBOL_AUTO)
 		sym->flags &= ~SYMBOL_WRITE;
+
+	if (sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES)
+		set_all_choice_values(sym);
 }
 
 void sym_clear_all_valid(void)
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index e66d4d2..5e74595 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -174,6 +174,9 @@
 	DEVID_FIELD(x86_cpu_id, model);
 	DEVID_FIELD(x86_cpu_id, vendor);
 
+	DEVID(cpu_feature);
+	DEVID_FIELD(cpu_feature, feature);
+
 	DEVID(mei_cl_device_id);
 	DEVID_FIELD(mei_cl_device_id, name);
 
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 4319a38..09632f0 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1133,6 +1133,16 @@
 }
 ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
 
+/* LOOKS like cpu:type:*:feature:*FEAT* */
+static int do_cpu_entry(const char *filename, void *symval, char *alias)
+{
+	DEF_FIELD(symval, cpu_feature, feature);
+
+	sprintf(alias, "cpu:type:*:feature:*%04X*", feature);
+	return 1;
+}
+ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
+
 /* Looks like: mei:S */
 static int do_mei_entry(const char *filename, void *symval,
 			char *alias)
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index a2c361c..4fb2583 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1709,7 +1709,7 @@
 	char *version;
 	char *license;
 	struct module *mod;
-	struct elf_info info = { };
+	struct elf_info info = { .hdr = NULL };
 	Elf_Sym *sym;
 
 	if (!parse_elf(&info, modname))
@@ -2122,7 +2122,7 @@
 
 static void write_dump(const char *fname)
 {
-	struct buffer buf = { };
+	struct buffer buf = { NULL, 0, 0 };
 	struct symbol *symbol;
 	int n;
 
@@ -2148,7 +2148,7 @@
 int main(int argc, char **argv)
 {
 	struct module *mod;
-	struct buffer buf = { };
+	struct buffer buf = { NULL, 0, 0 };
 	char *kernel_read = NULL, *module_read = NULL;
 	char *dump_write = NULL, *files_source = NULL;
 	int opt;
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 859abda..9aaa4e7 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -629,7 +629,7 @@
 	 * There is no exception for unconfined as change_hat is not
 	 * available.
 	 */
-	if (current->no_new_privs)
+	if (task_no_new_privs(current))
 		return -EPERM;
 
 	/* released below */
@@ -780,7 +780,7 @@
 	 * no_new_privs is set because this aways results in a reduction
 	 * of permissions.
 	 */
-	if (current->no_new_privs && !unconfined(profile)) {
+	if (task_no_new_privs(current) && !unconfined(profile)) {
 		put_cred(cred);
 		return -EPERM;
 	}
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 40aedd9..4a8cbfe 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -65,7 +65,6 @@
 char *aa_split_fqname(char *args, char **ns_name);
 void aa_info_message(const char *str);
 void *kvmalloc(size_t size);
-void kvfree(void *buffer);
 
 
 /**
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 7430298..ce8d9a8 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -103,35 +103,3 @@
 	}
 	return buffer;
 }
-
-/**
- * do_vfree - workqueue routine for freeing vmalloced memory
- * @work: data to be freed
- *
- * The work_struct is overlaid to the data being freed, as at the point
- * the work is scheduled the data is no longer valid, be its freeing
- * needs to be delayed until safe.
- */
-static void do_vfree(struct work_struct *work)
-{
-	vfree(work);
-}
-
-/**
- * kvfree - free an allocation do by kvmalloc
- * @buffer: buffer to free (MAYBE_NULL)
- *
- * Free a buffer allocated by kvmalloc
- */
-void kvfree(void *buffer)
-{
-	if (is_vmalloc_addr(buffer)) {
-		/* Data is no longer valid so just use the allocated space
-		 * as the work_struct
-		 */
-		struct work_struct *work = (struct work_struct *) buffer;
-		INIT_WORK(work, do_vfree);
-		schedule_work(work);
-	} else
-		kfree(buffer);
-}
diff --git a/security/capability.c b/security/capability.c
index 1728d4e..6e4fc77 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -12,6 +12,26 @@
 
 #include <linux/security.h>
 
+static int cap_binder_set_context_mgr(struct task_struct *mgr)
+{
+	return 0;
+}
+
+static int cap_binder_transaction(struct task_struct *from, struct task_struct *to)
+{
+	return 0;
+}
+
+static int cap_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
+{
+	return 0;
+}
+
+static int cap_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
+{
+	return 0;
+}
+
 static int cap_syslog(int type)
 {
 	return 0;
@@ -903,6 +923,10 @@
 
 void __init security_fixup_ops(struct security_operations *ops)
 {
+	set_to_cap_if_null(ops, binder_set_context_mgr);
+	set_to_cap_if_null(ops, binder_transaction);
+	set_to_cap_if_null(ops, binder_transfer_binder);
+	set_to_cap_if_null(ops, binder_transfer_file);
 	set_to_cap_if_null(ops, ptrace_access_check);
 	set_to_cap_if_null(ops, ptrace_traceme);
 	set_to_cap_if_null(ops, capget);
diff --git a/security/commoncap.c b/security/commoncap.c
index 4fd7bf2..ba9d5f0 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -31,6 +31,10 @@
 #include <linux/binfmts.h>
 #include <linux/personality.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
 /*
  * If a non-root user executes a setuid-root binary in
  * !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -78,6 +82,13 @@
 {
 	struct user_namespace *ns = targ_ns;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+	if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
+		return 0;
+	if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
+		return 0;
+#endif
+
 	/* See if cred has the capability in the target user namespace
 	 * by examining the target user namespace and all of the target
 	 * user namespace's parents.
@@ -996,9 +1007,11 @@
 	}
 	return ret;
 }
+EXPORT_SYMBOL(cap_mmap_addr);
 
 int cap_mmap_file(struct file *file, unsigned long reqprot,
 		  unsigned long prot, unsigned long flags)
 {
 	return 0;
 }
+EXPORT_SYMBOL(cap_mmap_file);
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index dd0dc57..9760ecb6 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -7,6 +7,7 @@
 #include <linux/device_cgroup.h>
 #include <linux/cgroup.h>
 #include <linux/ctype.h>
+#include <linux/export.h>
 #include <linux/list.h>
 #include <linux/uaccess.h>
 #include <linux/seq_file.h>
@@ -789,6 +790,7 @@
 	return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
 			access);
 }
+EXPORT_SYMBOL(__devcgroup_inode_permission);
 
 int devcgroup_inode_mknod(int mode, dev_t dev)
 {
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 8d8d97d..8f6b8e8 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -245,6 +245,21 @@
 		}
 		break;
 	}
+	case LSM_AUDIT_DATA_IOCTL_OP: {
+		struct inode *inode;
+
+		audit_log_d_path(ab, " path=", &a->u.op->path);
+
+		inode = a->u.op->path.dentry->d_inode;
+		if (inode) {
+			audit_log_format(ab, " dev=");
+			audit_log_untrustedstring(ab, inode->i_sb->s_id);
+			audit_log_format(ab, " ino=%lu", inode->i_ino);
+		}
+
+		audit_log_format(ab, " ioctlcmd=%hx", a->u.op->cmd);
+		break;
+	}
 	case LSM_AUDIT_DATA_DENTRY: {
 		struct inode *inode;
 
diff --git a/security/security.c b/security/security.c
index a3dce87..3ccb622 100644
--- a/security/security.c
+++ b/security/security.c
@@ -134,6 +134,26 @@
 
 /* Security operations */
 
+int security_binder_set_context_mgr(struct task_struct *mgr)
+{
+	return security_ops->binder_set_context_mgr(mgr);
+}
+
+int security_binder_transaction(struct task_struct *from, struct task_struct *to)
+{
+	return security_ops->binder_transaction(from, to);
+}
+
+int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
+{
+	return security_ops->binder_transfer_binder(from, to);
+}
+
+int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
+{
+	return security_ops->binder_transfer_file(from, to, file);
+}
+
 int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
 {
 #ifdef CONFIG_SECURITY_YAMA_STACKED
@@ -396,6 +416,7 @@
 		return 0;
 	return security_ops->path_rmdir(dir, dentry);
 }
+EXPORT_SYMBOL(security_path_rmdir);
 
 int security_path_unlink(struct path *dir, struct dentry *dentry)
 {
@@ -412,6 +433,7 @@
 		return 0;
 	return security_ops->path_symlink(dir, dentry, old_name);
 }
+EXPORT_SYMBOL(security_path_symlink);
 
 int security_path_link(struct dentry *old_dentry, struct path *new_dir,
 		       struct dentry *new_dentry)
@@ -420,6 +442,7 @@
 		return 0;
 	return security_ops->path_link(old_dentry, new_dir, new_dentry);
 }
+EXPORT_SYMBOL(security_path_link);
 
 int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
 			 struct path *new_dir, struct dentry *new_dentry)
@@ -438,6 +461,7 @@
 		return 0;
 	return security_ops->path_truncate(path);
 }
+EXPORT_SYMBOL(security_path_truncate);
 
 int security_path_chmod(struct path *path, umode_t mode)
 {
@@ -445,6 +469,7 @@
 		return 0;
 	return security_ops->path_chmod(path, mode);
 }
+EXPORT_SYMBOL(security_path_chmod);
 
 int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
 {
@@ -452,6 +477,7 @@
 		return 0;
 	return security_ops->path_chown(path, uid, gid);
 }
+EXPORT_SYMBOL(security_path_chown);
 
 int security_path_chroot(struct path *path)
 {
@@ -528,6 +554,7 @@
 		return 0;
 	return security_ops->inode_readlink(dentry);
 }
+EXPORT_SYMBOL(security_inode_readlink);
 
 int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
@@ -542,6 +569,7 @@
 		return 0;
 	return security_ops->inode_permission(inode, mask);
 }
+EXPORT_SYMBOL(security_inode_permission);
 
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
 {
@@ -663,6 +691,7 @@
 
 	return fsnotify_perm(file, mask);
 }
+EXPORT_SYMBOL(security_file_permission);
 
 int security_file_alloc(struct file *file)
 {
@@ -723,6 +752,7 @@
 		return ret;
 	return ima_file_mmap(file, prot);
 }
+EXPORT_SYMBOL(security_mmap_file);
 
 int security_mmap_addr(unsigned long addr)
 {
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index dad36a6..386aafb 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/skbuff.h>
 #include <linux/percpu.h>
+#include <linux/list.h>
 #include <net/sock.h>
 #include <linux/un.h>
 #include <net/af_unix.h>
@@ -48,6 +49,7 @@
 	u32			tsid;
 	u16			tclass;
 	struct av_decision	avd;
+	struct avc_xperms_node	*xp_node;
 };
 
 struct avc_node {
@@ -56,6 +58,16 @@
 	struct rcu_head		rhead;
 };
 
+struct avc_xperms_decision_node {
+	struct extended_perms_decision xpd;
+	struct list_head xpd_list; /* list of extended_perms_decision */
+};
+
+struct avc_xperms_node {
+	struct extended_perms xp;
+	struct list_head xpd_head; /* list head of extended_perms_decision */
+};
+
 struct avc_cache {
 	struct hlist_head	slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
 	spinlock_t		slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
@@ -80,6 +92,9 @@
 static struct avc_cache avc_cache;
 static struct avc_callback_node *avc_callbacks;
 static struct kmem_cache *avc_node_cachep;
+static struct kmem_cache *avc_xperms_data_cachep;
+static struct kmem_cache *avc_xperms_decision_cachep;
+static struct kmem_cache *avc_xperms_cachep;
 
 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
 {
@@ -170,7 +185,17 @@
 	atomic_set(&avc_cache.lru_hint, 0);
 
 	avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
-					     0, SLAB_PANIC, NULL);
+					0, SLAB_PANIC, NULL);
+	avc_xperms_cachep = kmem_cache_create("avc_xperms_node",
+					sizeof(struct avc_xperms_node),
+					0, SLAB_PANIC, NULL);
+	avc_xperms_decision_cachep = kmem_cache_create(
+					"avc_xperms_decision_node",
+					sizeof(struct avc_xperms_decision_node),
+					0, SLAB_PANIC, NULL);
+	avc_xperms_data_cachep = kmem_cache_create("avc_xperms_data",
+					sizeof(struct extended_perms_data),
+					0, SLAB_PANIC, NULL);
 
 	audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
 }
@@ -205,9 +230,261 @@
 			 slots_used, AVC_CACHE_SLOTS, max_chain_len);
 }
 
+/*
+ * using a linked list for extended_perms_decision lookup because the list is
+ * always small. i.e. less than 5, typically 1
+ */
+static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver,
+					struct avc_xperms_node *xp_node)
+{
+	struct avc_xperms_decision_node *xpd_node;
+
+	list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) {
+		if (xpd_node->xpd.driver == driver)
+			return &xpd_node->xpd;
+	}
+	return NULL;
+}
+
+static inline unsigned int
+avc_xperms_has_perm(struct extended_perms_decision *xpd,
+					u8 perm, u8 which)
+{
+	unsigned int rc = 0;
+
+	if ((which == XPERMS_ALLOWED) &&
+			(xpd->used & XPERMS_ALLOWED))
+		rc = security_xperm_test(xpd->allowed->p, perm);
+	else if ((which == XPERMS_AUDITALLOW) &&
+			(xpd->used & XPERMS_AUDITALLOW))
+		rc = security_xperm_test(xpd->auditallow->p, perm);
+	else if ((which == XPERMS_DONTAUDIT) &&
+			(xpd->used & XPERMS_DONTAUDIT))
+		rc = security_xperm_test(xpd->dontaudit->p, perm);
+	return rc;
+}
+
+static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node,
+				u8 driver, u8 perm)
+{
+	struct extended_perms_decision *xpd;
+	security_xperm_set(xp_node->xp.drivers.p, driver);
+	xpd = avc_xperms_decision_lookup(driver, xp_node);
+	if (xpd && xpd->allowed)
+		security_xperm_set(xpd->allowed->p, perm);
+}
+
+static void avc_xperms_decision_free(struct avc_xperms_decision_node *xpd_node)
+{
+	struct extended_perms_decision *xpd;
+
+	xpd = &xpd_node->xpd;
+	if (xpd->allowed)
+		kmem_cache_free(avc_xperms_data_cachep, xpd->allowed);
+	if (xpd->auditallow)
+		kmem_cache_free(avc_xperms_data_cachep, xpd->auditallow);
+	if (xpd->dontaudit)
+		kmem_cache_free(avc_xperms_data_cachep, xpd->dontaudit);
+	kmem_cache_free(avc_xperms_decision_cachep, xpd_node);
+}
+
+static void avc_xperms_free(struct avc_xperms_node *xp_node)
+{
+	struct avc_xperms_decision_node *xpd_node, *tmp;
+
+	if (!xp_node)
+		return;
+
+	list_for_each_entry_safe(xpd_node, tmp, &xp_node->xpd_head, xpd_list) {
+		list_del(&xpd_node->xpd_list);
+		avc_xperms_decision_free(xpd_node);
+	}
+	kmem_cache_free(avc_xperms_cachep, xp_node);
+}
+
+static void avc_copy_xperms_decision(struct extended_perms_decision *dest,
+					struct extended_perms_decision *src)
+{
+	dest->driver = src->driver;
+	dest->used = src->used;
+	if (dest->used & XPERMS_ALLOWED)
+		memcpy(dest->allowed->p, src->allowed->p,
+				sizeof(src->allowed->p));
+	if (dest->used & XPERMS_AUDITALLOW)
+		memcpy(dest->auditallow->p, src->auditallow->p,
+				sizeof(src->auditallow->p));
+	if (dest->used & XPERMS_DONTAUDIT)
+		memcpy(dest->dontaudit->p, src->dontaudit->p,
+				sizeof(src->dontaudit->p));
+}
+
+/*
+ * similar to avc_copy_xperms_decision, but only copy decision
+ * information relevant to this perm
+ */
+static inline void avc_quick_copy_xperms_decision(u8 perm,
+			struct extended_perms_decision *dest,
+			struct extended_perms_decision *src)
+{
+	/*
+	 * compute index of the u32 of the 256 bits (8 u32s) that contain this
+	 * command permission
+	 */
+	u8 i = perm >> 5;
+
+	dest->used = src->used;
+	if (dest->used & XPERMS_ALLOWED)
+		dest->allowed->p[i] = src->allowed->p[i];
+	if (dest->used & XPERMS_AUDITALLOW)
+		dest->auditallow->p[i] = src->auditallow->p[i];
+	if (dest->used & XPERMS_DONTAUDIT)
+		dest->dontaudit->p[i] = src->dontaudit->p[i];
+}
+
+static struct avc_xperms_decision_node
+		*avc_xperms_decision_alloc(u8 which)
+{
+	struct avc_xperms_decision_node *xpd_node;
+	struct extended_perms_decision *xpd;
+
+	xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+				GFP_ATOMIC | __GFP_NOMEMALLOC);
+	if (!xpd_node)
+		return NULL;
+
+	xpd = &xpd_node->xpd;
+	if (which & XPERMS_ALLOWED) {
+		xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
+						GFP_ATOMIC | __GFP_NOMEMALLOC);
+		if (!xpd->allowed)
+			goto error;
+	}
+	if (which & XPERMS_AUDITALLOW) {
+		xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
+						GFP_ATOMIC | __GFP_NOMEMALLOC);
+		if (!xpd->auditallow)
+			goto error;
+	}
+	if (which & XPERMS_DONTAUDIT) {
+		xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
+						GFP_ATOMIC | __GFP_NOMEMALLOC);
+		if (!xpd->dontaudit)
+			goto error;
+	}
+	return xpd_node;
+error:
+	avc_xperms_decision_free(xpd_node);
+	return NULL;
+}
+
+static int avc_add_xperms_decision(struct avc_node *node,
+			struct extended_perms_decision *src)
+{
+	struct avc_xperms_decision_node *dest_xpd;
+
+	node->ae.xp_node->xp.len++;
+	dest_xpd = avc_xperms_decision_alloc(src->used);
+	if (!dest_xpd)
+		return -ENOMEM;
+	avc_copy_xperms_decision(&dest_xpd->xpd, src);
+	list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
+	return 0;
+}
+
+static struct avc_xperms_node *avc_xperms_alloc(void)
+{
+	struct avc_xperms_node *xp_node;
+
+	xp_node = kmem_cache_zalloc(avc_xperms_cachep,
+				GFP_ATOMIC|__GFP_NOMEMALLOC);
+	if (!xp_node)
+		return xp_node;
+	INIT_LIST_HEAD(&xp_node->xpd_head);
+	return xp_node;
+}
+
+static int avc_xperms_populate(struct avc_node *node,
+				struct avc_xperms_node *src)
+{
+	struct avc_xperms_node *dest;
+	struct avc_xperms_decision_node *dest_xpd;
+	struct avc_xperms_decision_node *src_xpd;
+
+	if (src->xp.len == 0)
+		return 0;
+	dest = avc_xperms_alloc();
+	if (!dest)
+		return -ENOMEM;
+
+	memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p));
+	dest->xp.len = src->xp.len;
+
+	/* for each source xpd allocate a destination xpd and copy */
+	list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) {
+		dest_xpd = avc_xperms_decision_alloc(src_xpd->xpd.used);
+		if (!dest_xpd)
+			goto error;
+		avc_copy_xperms_decision(&dest_xpd->xpd, &src_xpd->xpd);
+		list_add(&dest_xpd->xpd_list, &dest->xpd_head);
+	}
+	node->ae.xp_node = dest;
+	return 0;
+error:
+	avc_xperms_free(dest);
+	return -ENOMEM;
+
+}
+
+static inline u32 avc_xperms_audit_required(u32 requested,
+					struct av_decision *avd,
+					struct extended_perms_decision *xpd,
+					u8 perm,
+					int result,
+					u32 *deniedp)
+{
+	u32 denied, audited;
+
+	denied = requested & ~avd->allowed;
+	if (unlikely(denied)) {
+		audited = denied & avd->auditdeny;
+		if (audited && xpd) {
+			if (avc_xperms_has_perm(xpd, perm, XPERMS_DONTAUDIT))
+				audited &= ~requested;
+		}
+	} else if (result) {
+		audited = denied = requested;
+	} else {
+		audited = requested & avd->auditallow;
+		if (audited && xpd) {
+			if (!avc_xperms_has_perm(xpd, perm, XPERMS_AUDITALLOW))
+				audited &= ~requested;
+		}
+	}
+
+	*deniedp = denied;
+	return audited;
+}
+
+static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass,
+				u32 requested, struct av_decision *avd,
+				struct extended_perms_decision *xpd,
+				u8 perm, int result,
+				struct common_audit_data *ad)
+{
+	u32 audited, denied;
+
+	audited = avc_xperms_audit_required(
+			requested, avd, xpd, perm, result, &denied);
+	if (likely(!audited))
+		return 0;
+	return slow_avc_audit(ssid, tsid, tclass, requested,
+			audited, denied, result, ad, 0);
+}
+
 static void avc_node_free(struct rcu_head *rhead)
 {
 	struct avc_node *node = container_of(rhead, struct avc_node, rhead);
+	avc_xperms_free(node->ae.xp_node);
 	kmem_cache_free(avc_node_cachep, node);
 	avc_cache_stats_incr(frees);
 }
@@ -221,6 +498,7 @@
 
 static void avc_node_kill(struct avc_node *node)
 {
+	avc_xperms_free(node->ae.xp_node);
 	kmem_cache_free(avc_node_cachep, node);
 	avc_cache_stats_incr(frees);
 	atomic_dec(&avc_cache.active_nodes);
@@ -367,6 +645,7 @@
  * @tsid: target security identifier
  * @tclass: target security class
  * @avd: resulting av decision
+ * @xp_node: resulting extended permissions
  *
  * Insert an AVC entry for the SID pair
  * (@ssid, @tsid) and class @tclass.
@@ -378,7 +657,9 @@
  * the access vectors into a cache entry, returns
  * avc_node inserted. Otherwise, this function returns NULL.
  */
-static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
+static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
+				struct av_decision *avd,
+				struct avc_xperms_node *xp_node)
 {
 	struct avc_node *pos, *node = NULL;
 	int hvalue;
@@ -391,10 +672,15 @@
 	if (node) {
 		struct hlist_head *head;
 		spinlock_t *lock;
+		int rc = 0;
 
 		hvalue = avc_hash(ssid, tsid, tclass);
 		avc_node_populate(node, ssid, tsid, tclass, avd);
-
+		rc = avc_xperms_populate(node, xp_node);
+		if (rc) {
+			kmem_cache_free(avc_node_cachep, node);
+			return NULL;
+		}
 		head = &avc_cache.slots[hvalue];
 		lock = &avc_cache.slots_lock[hvalue];
 
@@ -444,11 +730,15 @@
 	avc_dump_query(ab, ad->selinux_audit_data->ssid,
 			   ad->selinux_audit_data->tsid,
 			   ad->selinux_audit_data->tclass);
+	if (ad->selinux_audit_data->denied) {
+		audit_log_format(ab, " permissive=%u",
+				 ad->selinux_audit_data->result ? 0 : 1);
+	}
 }
 
 /* This is the slow part of avc audit with big stack footprint */
 noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
-		u32 requested, u32 audited, u32 denied,
+		u32 requested, u32 audited, u32 denied, int result,
 		struct common_audit_data *a,
 		unsigned flags)
 {
@@ -477,6 +767,7 @@
 	sad.tsid = tsid;
 	sad.audited = audited;
 	sad.denied = denied;
+	sad.result = result;
 
 	a->selinux_audit_data = &sad;
 
@@ -523,14 +814,17 @@
  * @perms : Permission mask bits
  * @ssid,@tsid,@tclass : identifier of an AVC entry
  * @seqno : sequence number when decision was made
+ * @xpd: extended_perms_decision to be added to the node
  *
  * if a valid AVC entry doesn't exist,this function returns -ENOENT.
  * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
  * otherwise, this function updates the AVC entry. The original AVC-entry object
  * will release later by RCU.
  */
-static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
-			   u32 seqno)
+static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
+			u32 tsid, u16 tclass, u32 seqno,
+			struct extended_perms_decision *xpd,
+			u32 flags)
 {
 	int hvalue, rc = 0;
 	unsigned long flag;
@@ -574,9 +868,19 @@
 
 	avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
 
+	if (orig->ae.xp_node) {
+		rc = avc_xperms_populate(node, orig->ae.xp_node);
+		if (rc) {
+			kmem_cache_free(avc_node_cachep, node);
+			goto out_unlock;
+		}
+	}
+
 	switch (event) {
 	case AVC_CALLBACK_GRANT:
 		node->ae.avd.allowed |= perms;
+		if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS))
+			avc_xperms_allow_perm(node->ae.xp_node, driver, xperm);
 		break;
 	case AVC_CALLBACK_TRY_REVOKE:
 	case AVC_CALLBACK_REVOKE:
@@ -594,6 +898,9 @@
 	case AVC_CALLBACK_AUDITDENY_DISABLE:
 		node->ae.avd.auditdeny &= ~perms;
 		break;
+	case AVC_CALLBACK_ADD_XPERMS:
+		avc_add_xperms_decision(node, xpd);
+		break;
 	}
 	avc_node_replace(node, orig);
 out_unlock:
@@ -665,18 +972,20 @@
  * results in a bigger stack frame.
  */
 static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
-			 u16 tclass, struct av_decision *avd)
+			 u16 tclass, struct av_decision *avd,
+			 struct avc_xperms_node *xp_node)
 {
 	rcu_read_unlock();
-	security_compute_av(ssid, tsid, tclass, avd);
+	INIT_LIST_HEAD(&xp_node->xpd_head);
+	security_compute_av(ssid, tsid, tclass, avd, &xp_node->xp);
 	rcu_read_lock();
-	return avc_insert(ssid, tsid, tclass, avd);
+	return avc_insert(ssid, tsid, tclass, avd, xp_node);
 }
 
 static noinline int avc_denied(u32 ssid, u32 tsid,
-			 u16 tclass, u32 requested,
-			 unsigned flags,
-			 struct av_decision *avd)
+				u16 tclass, u32 requested,
+				u8 driver, u8 xperm, unsigned flags,
+				struct av_decision *avd)
 {
 	if (flags & AVC_STRICT)
 		return -EACCES;
@@ -684,11 +993,91 @@
 	if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
 		return -EACCES;
 
-	avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
-				tsid, tclass, avd->seqno);
+	avc_update_node(AVC_CALLBACK_GRANT, requested, driver, xperm, ssid,
+				tsid, tclass, avd->seqno, NULL, flags);
 	return 0;
 }
 
+/*
+ * The avc extended permissions logic adds an additional 256 bits of
+ * permissions to an avc node when extended permissions for that node are
+ * specified in the avtab. If the additional 256 permissions is not adequate,
+ * as-is the case with ioctls, then multiple may be chained together and the
+ * driver field is used to specify which set contains the permission.
+ */
+int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+			u8 driver, u8 xperm, struct common_audit_data *ad)
+{
+	struct avc_node *node;
+	struct av_decision avd;
+	u32 denied;
+	struct extended_perms_decision local_xpd;
+	struct extended_perms_decision *xpd = NULL;
+	struct extended_perms_data allowed;
+	struct extended_perms_data auditallow;
+	struct extended_perms_data dontaudit;
+	struct avc_xperms_node local_xp_node;
+	struct avc_xperms_node *xp_node;
+	int rc = 0, rc2;
+
+	xp_node = &local_xp_node;
+	BUG_ON(!requested);
+
+	rcu_read_lock();
+
+	node = avc_lookup(ssid, tsid, tclass);
+	if (unlikely(!node)) {
+		node = avc_compute_av(ssid, tsid, tclass, &avd, xp_node);
+	} else {
+		memcpy(&avd, &node->ae.avd, sizeof(avd));
+		xp_node = node->ae.xp_node;
+	}
+	/* if extended permissions are not defined, only consider av_decision */
+	if (!xp_node || !xp_node->xp.len)
+		goto decision;
+
+	local_xpd.allowed = &allowed;
+	local_xpd.auditallow = &auditallow;
+	local_xpd.dontaudit = &dontaudit;
+
+	xpd = avc_xperms_decision_lookup(driver, xp_node);
+	if (unlikely(!xpd)) {
+		/*
+		 * Compute the extended_perms_decision only if the driver
+		 * is flagged
+		 */
+		if (!security_xperm_test(xp_node->xp.drivers.p, driver)) {
+			avd.allowed &= ~requested;
+			goto decision;
+		}
+		rcu_read_unlock();
+		security_compute_xperms_decision(ssid, tsid, tclass, driver,
+						&local_xpd);
+		rcu_read_lock();
+		avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver, xperm,
+				ssid, tsid, tclass, avd.seqno, &local_xpd, 0);
+	} else {
+		avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd);
+	}
+	xpd = &local_xpd;
+
+	if (!avc_xperms_has_perm(xpd, xperm, XPERMS_ALLOWED))
+		avd.allowed &= ~requested;
+
+decision:
+	denied = requested & ~(avd.allowed);
+	if (unlikely(denied))
+		rc = avc_denied(ssid, tsid, tclass, requested, driver, xperm,
+				AVC_EXTENDED_PERMS, &avd);
+
+	rcu_read_unlock();
+
+	rc2 = avc_xperms_audit(ssid, tsid, tclass, requested,
+			&avd, xpd, xperm, rc, ad);
+	if (rc2)
+		return rc2;
+	return rc;
+}
 
 /**
  * avc_has_perm_noaudit - Check permissions but perform no auditing.
@@ -716,6 +1105,7 @@
 			 struct av_decision *avd)
 {
 	struct avc_node *node;
+	struct avc_xperms_node xp_node;
 	int rc = 0;
 	u32 denied;
 
@@ -724,16 +1114,14 @@
 	rcu_read_lock();
 
 	node = avc_lookup(ssid, tsid, tclass);
-	if (unlikely(!node)) {
-		node = avc_compute_av(ssid, tsid, tclass, avd);
-	} else {
+	if (unlikely(!node))
+		node = avc_compute_av(ssid, tsid, tclass, avd, &xp_node);
+	else
 		memcpy(avd, &node->ae.avd, sizeof(*avd));
-		avd = &node->ae.avd;
-	}
 
 	denied = requested & ~(avd->allowed);
 	if (unlikely(denied))
-		rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
+		rc = avc_denied(ssid, tsid, tclass, requested, 0, 0, flags, avd);
 
 	rcu_read_unlock();
 	return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index fdd6e4f..507267b 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -147,6 +147,17 @@
 	return (atomic_read(&selinux_secmark_refcount) > 0);
 }
 
+static int selinux_netcache_avc_callback(u32 event)
+{
+	if (event == AVC_CALLBACK_RESET) {
+		sel_netif_flush();
+		sel_netnode_flush();
+		sel_netport_flush();
+		synchronize_net();
+	}
+	return 0;
+}
+
 /*
  * initialise the security for the init task
  */
@@ -419,8 +430,11 @@
 	    sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
 		sbsec->flags &= ~SE_SBLABELSUPP;
 
-	/* Special handling for sysfs. Is genfs but also has setxattr handler*/
-	if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
+	/* Special handling. Is genfs but also has in-core setxattr handler*/
+	if (!strcmp(sb->s_type->name, "sysfs") ||
+	    !strcmp(sb->s_type->name, "pstore") ||
+	    !strcmp(sb->s_type->name, "debugfs") ||
+	    !strcmp(sb->s_type->name, "rootfs"))
 		sbsec->flags |= SE_SBLABELSUPP;
 
 	/* Initialize the root inode. */
@@ -685,7 +699,12 @@
 	}
 
 	if (strcmp(sb->s_type->name, "proc") == 0)
-		sbsec->flags |= SE_SBPROC;
+		sbsec->flags |= SE_SBPROC | SE_SBGENFS;
+
+	if (!strcmp(sb->s_type->name, "debugfs") ||
+	    !strcmp(sb->s_type->name, "sysfs") ||
+	    !strcmp(sb->s_type->name, "pstore"))
+		sbsec->flags |= SE_SBGENFS;
 
 	/* Determine the labeling behavior to use for this filesystem type. */
 	rc = security_fs_use((sbsec->flags & SE_SBPROC) ? "proc" : sb->s_type->name, &sbsec->behavior, &sbsec->sid);
@@ -1170,12 +1189,13 @@
 	return SECCLASS_SOCKET;
 }
 
-#ifdef CONFIG_PROC_FS
-static int selinux_proc_get_sid(struct dentry *dentry,
-				u16 tclass,
-				u32 *sid)
+static int selinux_genfs_get_sid(struct dentry *dentry,
+				 u16 tclass,
+				 u16 flags,
+				 u32 *sid)
 {
 	int rc;
+	struct super_block *sb = dentry->d_inode->i_sb;
 	char *buffer, *path;
 
 	buffer = (char *)__get_free_page(GFP_KERNEL);
@@ -1186,26 +1206,20 @@
 	if (IS_ERR(path))
 		rc = PTR_ERR(path);
 	else {
-		/* each process gets a /proc/PID/ entry. Strip off the
-		 * PID part to get a valid selinux labeling.
-		 * e.g. /proc/1/net/rpc/nfs -> /net/rpc/nfs */
-		while (path[1] >= '0' && path[1] <= '9') {
-			path[1] = '/';
-			path++;
+		if (flags & SE_SBPROC) {
+			/* each process gets a /proc/PID/ entry. Strip off the
+			 * PID part to get a valid selinux labeling.
+			 * e.g. /proc/1/net/rpc/nfs -> /net/rpc/nfs */
+			while (path[1] >= '0' && path[1] <= '9') {
+				path[1] = '/';
+				path++;
+			}
 		}
-		rc = security_genfs_sid("proc", path, tclass, sid);
+		rc = security_genfs_sid(sb->s_type->name, path, tclass, sid);
 	}
 	free_page((unsigned long)buffer);
 	return rc;
 }
-#else
-static int selinux_proc_get_sid(struct dentry *dentry,
-				u16 tclass,
-				u32 *sid)
-{
-	return -EINVAL;
-}
-#endif
 
 /* The inode's security attributes must be initialized before first use. */
 static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry)
@@ -1360,7 +1374,7 @@
 		/* Default to the fs superblock SID. */
 		isec->sid = sbsec->sid;
 
-		if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
+		if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) {
 			/* We must have a dentry to determine the label on
 			 * procfs inodes */
 			if (opt_dentry)
@@ -1383,7 +1397,8 @@
 			if (!dentry)
 				goto out_unlock;
 			isec->sclass = inode_mode_to_security_class(inode->i_mode);
-			rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
+			rc = selinux_genfs_get_sid(dentry, isec->sclass,
+						   sbsec->flags, &sid);
 			dput(dentry);
 			if (rc)
 				goto out_unlock;
@@ -1856,6 +1871,65 @@
 
 /* Hook functions begin here. */
 
+static int selinux_binder_set_context_mgr(struct task_struct *mgr)
+{
+	u32 mysid = current_sid();
+	u32 mgrsid = task_sid(mgr);
+
+	return avc_has_perm(mysid, mgrsid, SECCLASS_BINDER, BINDER__SET_CONTEXT_MGR, NULL);
+}
+
+static int selinux_binder_transaction(struct task_struct *from, struct task_struct *to)
+{
+	u32 mysid = current_sid();
+	u32 fromsid = task_sid(from);
+	u32 tosid = task_sid(to);
+	int rc;
+
+	if (mysid != fromsid) {
+		rc = avc_has_perm(mysid, fromsid, SECCLASS_BINDER, BINDER__IMPERSONATE, NULL);
+		if (rc)
+			return rc;
+	}
+
+	return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__CALL, NULL);
+}
+
+static int selinux_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
+{
+	u32 fromsid = task_sid(from);
+	u32 tosid = task_sid(to);
+	return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__TRANSFER, NULL);
+}
+
+static int selinux_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
+{
+	u32 sid = task_sid(to);
+	struct file_security_struct *fsec = file->f_security;
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct inode_security_struct *isec = inode->i_security;
+	struct common_audit_data ad;
+	int rc;
+
+	ad.type = LSM_AUDIT_DATA_PATH;
+	ad.u.path = file->f_path;
+
+	if (sid != fsec->sid) {
+		rc = avc_has_perm(sid, fsec->sid,
+				  SECCLASS_FD,
+				  FD__USE,
+				  &ad);
+		if (rc)
+			return rc;
+	}
+
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+
+	return avc_has_perm(sid, isec->sid, isec->sclass, file_to_av(file),
+			    &ad);
+}
+
 static int selinux_ptrace_access_check(struct task_struct *child,
 				     unsigned int mode)
 {
@@ -2672,6 +2746,7 @@
 
 static noinline int audit_inode_permission(struct inode *inode,
 					   u32 perms, u32 audited, u32 denied,
+					   int result,
 					   unsigned flags)
 {
 	struct common_audit_data ad;
@@ -2682,7 +2757,7 @@
 	ad.u.inode = inode;
 
 	rc = slow_avc_audit(current_sid(), isec->sid, isec->sclass, perms,
-			    audited, denied, &ad, flags);
+			    audited, denied, result, &ad, flags);
 	if (rc)
 		return rc;
 	return 0;
@@ -2724,7 +2799,7 @@
 	if (likely(!audited))
 		return rc;
 
-	rc2 = audit_inode_permission(inode, perms, audited, denied, flags);
+	rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
 	if (rc2)
 		return rc2;
 	return rc;
@@ -2748,7 +2823,8 @@
 			ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
 		return dentry_has_perm(cred, dentry, FILE__SETATTR);
 
-	if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE))
+	if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE)
+			&& !(ia_valid & ATTR_FILE))
 		av |= FILE__OPEN;
 
 	return dentry_has_perm(cred, dentry, av);
@@ -3035,6 +3111,46 @@
 	file_free_security(file);
 }
 
+/*
+ * Check whether a task has the ioctl permission and cmd
+ * operation to an inode.
+ */
+int ioctl_has_perm(const struct cred *cred, struct file *file,
+		u32 requested, u16 cmd)
+{
+	struct common_audit_data ad;
+	struct file_security_struct *fsec = file->f_security;
+	struct inode *inode = file_inode(file);
+	struct inode_security_struct *isec = inode->i_security;
+	struct lsm_ioctlop_audit ioctl;
+	u32 ssid = cred_sid(cred);
+	int rc;
+	u8 driver = cmd >> 8;
+	u8 xperm = cmd & 0xff;
+
+	ad.type = LSM_AUDIT_DATA_IOCTL_OP;
+	ad.u.op = &ioctl;
+	ad.u.op->cmd = cmd;
+	ad.u.op->path = file->f_path;
+
+	if (ssid != fsec->sid) {
+		rc = avc_has_perm(ssid, fsec->sid,
+				SECCLASS_FD,
+				FD__USE,
+				&ad);
+		if (rc)
+			goto out;
+	}
+
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+
+	rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass,
+			requested, driver, xperm, &ad);
+out:
+	return rc;
+}
+
 static int selinux_file_ioctl(struct file *file, unsigned int cmd,
 			      unsigned long arg)
 {
@@ -3077,7 +3193,7 @@
 	 * to the file's ioctl() function.
 	 */
 	default:
-		error = file_has_perm(cred, file, FILE__IOCTL);
+		error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd);
 	}
 	return error;
 }
@@ -3121,24 +3237,20 @@
 
 static int selinux_mmap_addr(unsigned long addr)
 {
-	int rc = 0;
-	u32 sid = current_sid();
-
-	/*
-	 * notice that we are intentionally putting the SELinux check before
-	 * the secondary cap_file_mmap check.  This is such a likely attempt
-	 * at bad behaviour/exploit that we always want to get the AVC, even
-	 * if DAC would have also denied the operation.
-	 */
-	if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
-		rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT,
-				  MEMPROTECT__MMAP_ZERO, NULL);
-		if (rc)
-			return rc;
-	}
+	int rc;
 
 	/* do DAC check on address space usage */
-	return cap_mmap_addr(addr);
+	rc = cap_mmap_addr(addr);
+	if (rc)
+		return rc;
+
+	if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
+		u32 sid = current_sid();
+		rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT,
+				  MEMPROTECT__MMAP_ZERO, NULL);
+	}
+
+	return rc;
 }
 
 static int selinux_mmap_file(struct file *file, unsigned long reqprot,
@@ -4153,15 +4265,15 @@
 			    &ad);
 }
 
-static int selinux_inet_sys_rcv_skb(int ifindex, char *addrp, u16 family,
-				    u32 peer_sid,
+static int selinux_inet_sys_rcv_skb(struct net *ns, int ifindex,
+				    char *addrp, u16 family, u32 peer_sid,
 				    struct common_audit_data *ad)
 {
 	int err;
 	u32 if_sid;
 	u32 node_sid;
 
-	err = sel_netif_sid(ifindex, &if_sid);
+	err = sel_netif_sid(ns, ifindex, &if_sid);
 	if (err)
 		return err;
 	err = avc_has_perm(peer_sid, if_sid,
@@ -4254,8 +4366,8 @@
 		err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
 		if (err)
 			return err;
-		err = selinux_inet_sys_rcv_skb(skb->skb_iif, addrp, family,
-					       peer_sid, &ad);
+		err = selinux_inet_sys_rcv_skb(sock_net(sk), skb->skb_iif,
+					       addrp, family, peer_sid, &ad);
 		if (err) {
 			selinux_netlbl_err(skb, err, 0);
 			return err;
@@ -4598,7 +4710,8 @@
 
 #ifdef CONFIG_NETFILTER
 
-static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
+static unsigned int selinux_ip_forward(struct sk_buff *skb,
+				       const struct net_device *indev,
 				       u16 family)
 {
 	int err;
@@ -4624,14 +4737,14 @@
 
 	ad.type = LSM_AUDIT_DATA_NET;
 	ad.u.net = &net;
-	ad.u.net->netif = ifindex;
+	ad.u.net->netif = indev->ifindex;
 	ad.u.net->family = family;
 	if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
 		return NF_DROP;
 
 	if (peerlbl_active) {
-		err = selinux_inet_sys_rcv_skb(ifindex, addrp, family,
-					       peer_sid, &ad);
+		err = selinux_inet_sys_rcv_skb(dev_net(indev), indev->ifindex,
+					       addrp, family, peer_sid, &ad);
 		if (err) {
 			selinux_netlbl_err(skb, err, 1);
 			return NF_DROP;
@@ -4660,7 +4773,7 @@
 					 const struct net_device *out,
 					 int (*okfn)(struct sk_buff *))
 {
-	return selinux_ip_forward(skb, in->ifindex, PF_INET);
+	return selinux_ip_forward(skb, in, PF_INET);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -4670,7 +4783,7 @@
 					 const struct net_device *out,
 					 int (*okfn)(struct sk_buff *))
 {
-	return selinux_ip_forward(skb, in->ifindex, PF_INET6);
+	return selinux_ip_forward(skb, in, PF_INET6);
 }
 #endif	/* IPV6 */
 
@@ -4758,11 +4871,13 @@
 	return NF_ACCEPT;
 }
 
-static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+static unsigned int selinux_ip_postroute(struct sk_buff *skb,
+					 const struct net_device *outdev,
 					 u16 family)
 {
 	u32 secmark_perm;
 	u32 peer_sid;
+	int ifindex = outdev->ifindex;
 	struct sock *sk;
 	struct common_audit_data ad;
 	struct lsm_network_audit net = {0,};
@@ -4875,7 +4990,7 @@
 		u32 if_sid;
 		u32 node_sid;
 
-		if (sel_netif_sid(ifindex, &if_sid))
+		if (sel_netif_sid(dev_net(outdev), ifindex, &if_sid))
 			return NF_DROP;
 		if (avc_has_perm(peer_sid, if_sid,
 				 SECCLASS_NETIF, NETIF__EGRESS, &ad))
@@ -4897,7 +5012,7 @@
 					   const struct net_device *out,
 					   int (*okfn)(struct sk_buff *))
 {
-	return selinux_ip_postroute(skb, out->ifindex, PF_INET);
+	return selinux_ip_postroute(skb, out, PF_INET);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -4907,7 +5022,7 @@
 					   const struct net_device *out,
 					   int (*okfn)(struct sk_buff *))
 {
-	return selinux_ip_postroute(skb, out->ifindex, PF_INET6);
+	return selinux_ip_postroute(skb, out, PF_INET6);
 }
 #endif	/* IPV6 */
 
@@ -5656,6 +5771,11 @@
 static struct security_operations selinux_ops = {
 	.name =				"selinux",
 
+	.binder_set_context_mgr =	selinux_binder_set_context_mgr,
+	.binder_transaction =		selinux_binder_transaction,
+	.binder_transfer_binder =	selinux_binder_transfer_binder,
+	.binder_transfer_file =		selinux_binder_transfer_file,
+
 	.ptrace_access_check =		selinux_ptrace_access_check,
 	.ptrace_traceme =		selinux_ptrace_traceme,
 	.capget =			selinux_capget,
@@ -5882,6 +6002,9 @@
 	if (register_security(&selinux_ops))
 		panic("SELinux: Unable to register with kernel.\n");
 
+	if (avc_add_callback(selinux_netcache_avc_callback, AVC_CALLBACK_RESET))
+		panic("SELinux: Unable to register AVC netcache callback\n");
+
 	if (selinux_enforcing)
 		printk(KERN_DEBUG "SELinux:  Starting in enforcing mode\n");
 	else
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 92d0ab5..6d65f77 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -102,7 +102,7 @@
 }
 
 int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
-		   u32 requested, u32 audited, u32 denied,
+		   u32 requested, u32 audited, u32 denied, int result,
 		   struct common_audit_data *a,
 		   unsigned flags);
 
@@ -137,11 +137,12 @@
 	if (likely(!audited))
 		return 0;
 	return slow_avc_audit(ssid, tsid, tclass,
-			      requested, audited, denied,
+			      requested, audited, denied, result,
 			      a, flags);
 }
 
 #define AVC_STRICT 1 /* Ignore permissive mode. */
+#define AVC_EXTENDED_PERMS 2	/* update extended permissions */
 int avc_has_perm_noaudit(u32 ssid, u32 tsid,
 			 u16 tclass, u32 requested,
 			 unsigned flags,
@@ -159,6 +160,9 @@
 	return avc_has_perm_flags(ssid, tsid, tclass, requested, auditdata, 0);
 }
 
+int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+		u8 driver, u8 perm, struct common_audit_data *ad);
+
 u32 avc_policy_seqno(void);
 
 #define AVC_CALLBACK_GRANT		1
@@ -169,6 +173,7 @@
 #define AVC_CALLBACK_AUDITALLOW_DISABLE	32
 #define AVC_CALLBACK_AUDITDENY_ENABLE	64
 #define AVC_CALLBACK_AUDITDENY_DISABLE	128
+#define AVC_CALLBACK_ADD_XPERMS		256
 
 int avc_add_callback(int (*callback)(u32 event), u32 events);
 
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 14d04e6..c32ff7b 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -151,5 +151,6 @@
 	{ "kernel_service", { "use_as_override", "create_files_as", NULL } },
 	{ "tun_socket",
 	  { COMMON_SOCK_PERMS, "attach_queue", NULL } },
+	{ "binder", { "impersonate", "call", "set_context_mgr", "transfer", NULL } },
 	{ NULL }
   };
diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h
index 43d5072..c721454 100644
--- a/security/selinux/include/netif.h
+++ b/security/selinux/include/netif.h
@@ -17,7 +17,11 @@
 #ifndef _SELINUX_NETIF_H_
 #define _SELINUX_NETIF_H_
 
-int sel_netif_sid(int ifindex, u32 *sid);
+#include <net/net_namespace.h>
+
+void sel_netif_flush(void);
+
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid);
 
 #endif	/* _SELINUX_NETIF_H_ */
 
diff --git a/security/selinux/include/netnode.h b/security/selinux/include/netnode.h
index df7a5ed..937668d 100644
--- a/security/selinux/include/netnode.h
+++ b/security/selinux/include/netnode.h
@@ -27,6 +27,8 @@
 #ifndef _SELINUX_NETNODE_H
 #define _SELINUX_NETNODE_H
 
+void sel_netnode_flush(void);
+
 int sel_netnode_sid(void *addr, u16 family, u32 *sid);
 
 #endif
diff --git a/security/selinux/include/netport.h b/security/selinux/include/netport.h
index 4d965b8..d1ce896 100644
--- a/security/selinux/include/netport.h
+++ b/security/selinux/include/netport.h
@@ -26,6 +26,8 @@
 #ifndef _SELINUX_NETPORT_H
 #define _SELINUX_NETPORT_H
 
+void sel_netport_flush(void);
+
 int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid);
 
 #endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 6fd9dd2..2a3be7a 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -24,6 +24,7 @@
 #include <linux/binfmts.h>
 #include <linux/in.h>
 #include <linux/spinlock.h>
+#include <net/net_namespace.h>
 #include "flask.h"
 #include "avc.h"
 
@@ -78,6 +79,7 @@
 };
 
 struct netif_security_struct {
+	struct net *ns;			/* network namespace */
 	int ifindex;			/* device index */
 	u32 sid;			/* SID for this interface */
 };
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 6d38851..c290ec3 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -33,13 +33,15 @@
 #define POLICYDB_VERSION_ROLETRANS	26
 #define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS	27
 #define POLICYDB_VERSION_DEFAULT_TYPE	28
+#define POLICYDB_VERSION_CONSTRAINT_NAMES	29
+#define POLICYDB_VERSION_XPERMS_IOCTL	30
 
 /* Range of policy versions we understand*/
 #define POLICYDB_VERSION_MIN   POLICYDB_VERSION_BASE
 #ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
 #define POLICYDB_VERSION_MAX	CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
 #else
-#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_DEFAULT_TYPE
+#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_XPERMS_IOCTL
 #endif
 
 /* Mask for just the mount related flags */
@@ -53,6 +55,7 @@
 #define SE_SBINITIALIZED	0x10
 #define SE_SBPROC		0x20
 #define SE_SBLABELSUPP	0x40
+#define SE_SBGENFS	0x80
 
 #define CONTEXT_STR	"context="
 #define FSCONTEXT_STR	"fscontext="
@@ -102,11 +105,38 @@
 	u32 flags;
 };
 
+#define XPERMS_ALLOWED 1
+#define XPERMS_AUDITALLOW 2
+#define XPERMS_DONTAUDIT 4
+
+#define security_xperm_set(perms, x) (perms[x >> 5] |= 1 << (x & 0x1f))
+#define security_xperm_test(perms, x) (1 & (perms[x >> 5] >> (x & 0x1f)))
+struct extended_perms_data {
+	u32 p[8];
+};
+
+struct extended_perms_decision {
+	u8 used;
+	u8 driver;
+	struct extended_perms_data *allowed;
+	struct extended_perms_data *auditallow;
+	struct extended_perms_data *dontaudit;
+};
+
+struct extended_perms {
+	u16 len;	/* length associated decision chain */
+	struct extended_perms_data drivers; /* flag drivers that are used */
+};
+
 /* definitions of av_decision.flags */
 #define AVD_FLAGS_PERMISSIVE	0x0001
 
 void security_compute_av(u32 ssid, u32 tsid,
-			 u16 tclass, struct av_decision *avd);
+			 u16 tclass, struct av_decision *avd,
+			 struct extended_perms *xperms);
+
+void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 tclass,
+			 u8 driver, struct extended_perms_decision *xpermd);
 
 void security_compute_av_user(u32 ssid, u32 tsid,
 			     u16 tclass, struct av_decision *avd);
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 47a49d1..548651a 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -45,6 +45,7 @@
 
 /**
  * sel_netif_hashfn - Hashing function for the interface table
+ * @ns: the network namespace
  * @ifindex: the network interface
  *
  * Description:
@@ -52,13 +53,14 @@
  * bucket number for the given interface.
  *
  */
-static inline u32 sel_netif_hashfn(int ifindex)
+static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex)
 {
-	return (ifindex & (SEL_NETIF_HASH_SIZE - 1));
+	return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1));
 }
 
 /**
  * sel_netif_find - Search for an interface record
+ * @ns: the network namespace
  * @ifindex: the network interface
  *
  * Description:
@@ -66,15 +68,15 @@
  * If an entry can not be found in the table return NULL.
  *
  */
-static inline struct sel_netif *sel_netif_find(int ifindex)
+static inline struct sel_netif *sel_netif_find(const struct net *ns,
+					       int ifindex)
 {
-	int idx = sel_netif_hashfn(ifindex);
+	int idx = sel_netif_hashfn(ns, ifindex);
 	struct sel_netif *netif;
 
 	list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
-		/* all of the devices should normally fit in the hash, so we
-		 * optimize for that case */
-		if (likely(netif->nsec.ifindex == ifindex))
+		if (net_eq(netif->nsec.ns, ns) &&
+		    netif->nsec.ifindex == ifindex)
 			return netif;
 
 	return NULL;
@@ -96,7 +98,7 @@
 	if (sel_netif_total >= SEL_NETIF_HASH_MAX)
 		return -ENOSPC;
 
-	idx = sel_netif_hashfn(netif->nsec.ifindex);
+	idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex);
 	list_add_rcu(&netif->list, &sel_netif_hash[idx]);
 	sel_netif_total++;
 
@@ -120,6 +122,7 @@
 
 /**
  * sel_netif_sid_slow - Lookup the SID of a network interface using the policy
+ * @ns: the network namespace
  * @ifindex: the network interface
  * @sid: interface SID
  *
@@ -130,7 +133,7 @@
  * failure.
  *
  */
-static int sel_netif_sid_slow(int ifindex, u32 *sid)
+static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
 {
 	int ret;
 	struct sel_netif *netif;
@@ -140,7 +143,7 @@
 	/* NOTE: we always use init's network namespace since we don't
 	 * currently support containers */
 
-	dev = dev_get_by_index(&init_net, ifindex);
+	dev = dev_get_by_index(ns, ifindex);
 	if (unlikely(dev == NULL)) {
 		printk(KERN_WARNING
 		       "SELinux: failure in sel_netif_sid_slow(),"
@@ -149,7 +152,7 @@
 	}
 
 	spin_lock_bh(&sel_netif_lock);
-	netif = sel_netif_find(ifindex);
+	netif = sel_netif_find(ns, ifindex);
 	if (netif != NULL) {
 		*sid = netif->nsec.sid;
 		ret = 0;
@@ -163,6 +166,7 @@
 	ret = security_netif_sid(dev->name, &new->nsec.sid);
 	if (ret != 0)
 		goto out;
+	new->nsec.ns = ns;
 	new->nsec.ifindex = ifindex;
 	ret = sel_netif_insert(new);
 	if (ret != 0)
@@ -184,6 +188,7 @@
 
 /**
  * sel_netif_sid - Lookup the SID of a network interface
+ * @ns: the network namespace
  * @ifindex: the network interface
  * @sid: interface SID
  *
@@ -195,12 +200,12 @@
  * on failure.
  *
  */
-int sel_netif_sid(int ifindex, u32 *sid)
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid)
 {
 	struct sel_netif *netif;
 
 	rcu_read_lock();
-	netif = sel_netif_find(ifindex);
+	netif = sel_netif_find(ns, ifindex);
 	if (likely(netif != NULL)) {
 		*sid = netif->nsec.sid;
 		rcu_read_unlock();
@@ -208,11 +213,12 @@
 	}
 	rcu_read_unlock();
 
-	return sel_netif_sid_slow(ifindex, sid);
+	return sel_netif_sid_slow(ns, ifindex, sid);
 }
 
 /**
  * sel_netif_kill - Remove an entry from the network interface table
+ * @ns: the network namespace
  * @ifindex: the network interface
  *
  * Description:
@@ -220,13 +226,13 @@
  * table if it exists.
  *
  */
-static void sel_netif_kill(int ifindex)
+static void sel_netif_kill(const struct net *ns, int ifindex)
 {
 	struct sel_netif *netif;
 
 	rcu_read_lock();
 	spin_lock_bh(&sel_netif_lock);
-	netif = sel_netif_find(ifindex);
+	netif = sel_netif_find(ns, ifindex);
 	if (netif)
 		sel_netif_destroy(netif);
 	spin_unlock_bh(&sel_netif_lock);
@@ -240,7 +246,7 @@
  * Remove all entries from the network interface table.
  *
  */
-static void sel_netif_flush(void)
+void sel_netif_flush(void)
 {
 	int idx;
 	struct sel_netif *netif;
@@ -252,25 +258,13 @@
 	spin_unlock_bh(&sel_netif_lock);
 }
 
-static int sel_netif_avc_callback(u32 event)
-{
-	if (event == AVC_CALLBACK_RESET) {
-		sel_netif_flush();
-		synchronize_net();
-	}
-	return 0;
-}
-
 static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
 					     unsigned long event, void *ptr)
 {
 	struct net_device *dev = ptr;
 
-	if (dev_net(dev) != &init_net)
-		return NOTIFY_DONE;
-
 	if (event == NETDEV_DOWN)
-		sel_netif_kill(dev->ifindex);
+		sel_netif_kill(dev_net(dev), dev->ifindex);
 
 	return NOTIFY_DONE;
 }
@@ -291,10 +285,6 @@
 
 	register_netdevice_notifier(&sel_netif_netdev_notifier);
 
-	err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET);
-	if (err)
-		panic("avc_add_callback() failed, error %d\n", err);
-
 	return err;
 }
 
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index c5454c0..bb8de9d 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -281,7 +281,7 @@
  * Remove all entries from the network address table.
  *
  */
-static void sel_netnode_flush(void)
+void sel_netnode_flush(void)
 {
 	unsigned int idx;
 	struct sel_netnode *node, *node_tmp;
@@ -298,15 +298,6 @@
 	spin_unlock_bh(&sel_netnode_lock);
 }
 
-static int sel_netnode_avc_callback(u32 event)
-{
-	if (event == AVC_CALLBACK_RESET) {
-		sel_netnode_flush();
-		synchronize_net();
-	}
-	return 0;
-}
-
 static __init int sel_netnode_init(void)
 {
 	int iter;
@@ -320,10 +311,6 @@
 		sel_netnode_hash[iter].size = 0;
 	}
 
-	ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET);
-	if (ret != 0)
-		panic("avc_add_callback() failed, error %d\n", ret);
-
 	return ret;
 }
 
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index d353797..73ac678 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -217,7 +217,7 @@
  * Remove all entries from the network address table.
  *
  */
-static void sel_netport_flush(void)
+void sel_netport_flush(void)
 {
 	unsigned int idx;
 	struct sel_netport *port, *port_tmp;
@@ -234,15 +234,6 @@
 	spin_unlock_bh(&sel_netport_lock);
 }
 
-static int sel_netport_avc_callback(u32 event)
-{
-	if (event == AVC_CALLBACK_RESET) {
-		sel_netport_flush();
-		synchronize_net();
-	}
-	return 0;
-}
-
 static __init int sel_netport_init(void)
 {
 	int iter;
@@ -256,10 +247,6 @@
 		sel_netport_hash[iter].size = 0;
 	}
 
-	ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET);
-	if (ret != 0)
-		panic("avc_add_callback() failed, error %d\n", ret);
-
 	return ret;
 }
 
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 855e464..4c29bcc 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -17,6 +17,7 @@
 #include <linux/inet_diag.h>
 #include <linux/xfrm.h>
 #include <linux/audit.h>
+#include <linux/sock_diag.h>
 
 #include "flask.h"
 #include "av_permissions.h"
@@ -78,6 +79,7 @@
 {
 	{ TCPDIAG_GETSOCK,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
 	{ DCCPDIAG_GETSOCK,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+	{ SOCK_DIAG_BY_FAMILY,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
 };
 
 static struct nlmsg_perm nlmsg_xfrm_perms[] =
@@ -98,6 +100,13 @@
 	{ XFRM_MSG_FLUSHPOLICY,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
 	{ XFRM_MSG_NEWAE,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
 	{ XFRM_MSG_GETAE,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_REPORT,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_MIGRATE,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_NEWSADINFO,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_GETSADINFO,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_NEWSPDINFO,	NETLINK_XFRM_SOCKET__NLMSG_WRITE },
+	{ XFRM_MSG_GETSPDINFO,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
+	{ XFRM_MSG_MAPPING,	NETLINK_XFRM_SOCKET__NLMSG_READ  },
 };
 
 static struct nlmsg_perm nlmsg_audit_perms[] =
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index a3dd9fa..a6bef63 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -24,6 +24,7 @@
 #include "policydb.h"
 
 static struct kmem_cache *avtab_node_cachep;
+static struct kmem_cache *avtab_xperms_cachep;
 
 static inline int avtab_hash(struct avtab_key *keyp, u16 mask)
 {
@@ -37,11 +38,24 @@
 		  struct avtab_key *key, struct avtab_datum *datum)
 {
 	struct avtab_node *newnode;
+	struct avtab_extended_perms *xperms;
 	newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
 	if (newnode == NULL)
 		return NULL;
 	newnode->key = *key;
-	newnode->datum = *datum;
+
+	if (key->specified & AVTAB_XPERMS) {
+		xperms = kmem_cache_zalloc(avtab_xperms_cachep, GFP_KERNEL);
+		if (xperms == NULL) {
+			kmem_cache_free(avtab_node_cachep, newnode);
+			return NULL;
+		}
+		*xperms = *(datum->u.xperms);
+		newnode->datum.u.xperms = xperms;
+	} else {
+		newnode->datum.u.data = datum->u.data;
+	}
+
 	if (prev) {
 		newnode->next = prev->next;
 		prev->next = newnode;
@@ -70,8 +84,12 @@
 		if (key->source_type == cur->key.source_type &&
 		    key->target_type == cur->key.target_type &&
 		    key->target_class == cur->key.target_class &&
-		    (specified & cur->key.specified))
+		    (specified & cur->key.specified)) {
+			/* extended perms may not be unique */
+			if (specified & AVTAB_XPERMS)
+				break;
 			return -EEXIST;
+		}
 		if (key->source_type < cur->key.source_type)
 			break;
 		if (key->source_type == cur->key.source_type &&
@@ -232,6 +250,9 @@
 		while (cur) {
 			temp = cur;
 			cur = cur->next;
+			if (temp->key.specified & AVTAB_XPERMS)
+				kmem_cache_free(avtab_xperms_cachep,
+						temp->datum.u.xperms);
 			kmem_cache_free(avtab_node_cachep, temp);
 		}
 		h->htable[i] = NULL;
@@ -320,7 +341,10 @@
 	AVTAB_AUDITALLOW,
 	AVTAB_TRANSITION,
 	AVTAB_CHANGE,
-	AVTAB_MEMBER
+	AVTAB_MEMBER,
+	AVTAB_XPERMS_ALLOWED,
+	AVTAB_XPERMS_AUDITALLOW,
+	AVTAB_XPERMS_DONTAUDIT
 };
 
 int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
@@ -330,10 +354,11 @@
 {
 	__le16 buf16[4];
 	u16 enabled;
-	__le32 buf32[7];
 	u32 items, items2, val, vers = pol->policyvers;
 	struct avtab_key key;
 	struct avtab_datum datum;
+	struct avtab_extended_perms xperms;
+	__le32 buf32[ARRAY_SIZE(xperms.perms.p)];
 	int i, rc;
 	unsigned set;
 
@@ -390,11 +415,15 @@
 			printk(KERN_ERR "SELinux: avtab: entry has both access vectors and types\n");
 			return -EINVAL;
 		}
+		if (val & AVTAB_XPERMS) {
+			printk(KERN_ERR "SELinux: avtab: entry has extended permissions\n");
+			return -EINVAL;
+		}
 
 		for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
 			if (val & spec_order[i]) {
 				key.specified = spec_order[i] | enabled;
-				datum.data = le32_to_cpu(buf32[items++]);
+				datum.u.data = le32_to_cpu(buf32[items++]);
 				rc = insertf(a, &key, &datum, p);
 				if (rc)
 					return rc;
@@ -437,14 +466,42 @@
 		return -EINVAL;
 	}
 
-	rc = next_entry(buf32, fp, sizeof(u32));
-	if (rc) {
-		printk(KERN_ERR "SELinux: avtab: truncated entry\n");
-		return rc;
+	if ((vers < POLICYDB_VERSION_XPERMS_IOCTL) &&
+			(key.specified & AVTAB_XPERMS)) {
+		printk(KERN_ERR "SELinux:  avtab:  policy version %u does not "
+				"support extended permissions rules and one "
+				"was specified\n", vers);
+		return -EINVAL;
+	} else if (key.specified & AVTAB_XPERMS) {
+		memset(&xperms, 0, sizeof(struct avtab_extended_perms));
+		rc = next_entry(&xperms.specified, fp, sizeof(u8));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		rc = next_entry(&xperms.driver, fp, sizeof(u8));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(xperms.perms.p));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		for (i = 0; i < ARRAY_SIZE(xperms.perms.p); i++)
+			xperms.perms.p[i] = le32_to_cpu(buf32[i]);
+		datum.u.xperms = &xperms;
+	} else {
+		rc = next_entry(buf32, fp, sizeof(u32));
+		if (rc) {
+			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+			return rc;
+		}
+		datum.u.data = le32_to_cpu(*buf32);
 	}
-	datum.data = le32_to_cpu(*buf32);
 	if ((key.specified & AVTAB_TYPE) &&
-	    !policydb_type_isvalid(pol, datum.data)) {
+	    !policydb_type_isvalid(pol, datum.u.data)) {
 		printk(KERN_ERR "SELinux: avtab: invalid type\n");
 		return -EINVAL;
 	}
@@ -504,8 +561,9 @@
 int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
 {
 	__le16 buf16[4];
-	__le32 buf32[1];
+	__le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)];
 	int rc;
+	unsigned int i;
 
 	buf16[0] = cpu_to_le16(cur->key.source_type);
 	buf16[1] = cpu_to_le16(cur->key.target_type);
@@ -514,8 +572,22 @@
 	rc = put_entry(buf16, sizeof(u16), 4, fp);
 	if (rc)
 		return rc;
-	buf32[0] = cpu_to_le32(cur->datum.data);
-	rc = put_entry(buf32, sizeof(u32), 1, fp);
+
+	if (cur->key.specified & AVTAB_XPERMS) {
+		rc = put_entry(&cur->datum.u.xperms->specified, sizeof(u8), 1, fp);
+		if (rc)
+			return rc;
+		rc = put_entry(&cur->datum.u.xperms->driver, sizeof(u8), 1, fp);
+		if (rc)
+			return rc;
+		for (i = 0; i < ARRAY_SIZE(cur->datum.u.xperms->perms.p); i++)
+			buf32[i] = cpu_to_le32(cur->datum.u.xperms->perms.p[i]);
+		rc = put_entry(buf32, sizeof(u32),
+				ARRAY_SIZE(cur->datum.u.xperms->perms.p), fp);
+	} else {
+		buf32[0] = cpu_to_le32(cur->datum.u.data);
+		rc = put_entry(buf32, sizeof(u32), 1, fp);
+	}
 	if (rc)
 		return rc;
 	return 0;
@@ -548,9 +620,13 @@
 	avtab_node_cachep = kmem_cache_create("avtab_node",
 					      sizeof(struct avtab_node),
 					      0, SLAB_PANIC, NULL);
+	avtab_xperms_cachep = kmem_cache_create("avtab_extended_perms",
+						sizeof(struct avtab_extended_perms),
+						0, SLAB_PANIC, NULL);
 }
 
 void avtab_cache_destroy(void)
 {
 	kmem_cache_destroy(avtab_node_cachep);
+	kmem_cache_destroy(avtab_xperms_cachep);
 }
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 63ce2f9..8133523 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -23,6 +23,8 @@
 #ifndef _SS_AVTAB_H_
 #define _SS_AVTAB_H_
 
+#include "security.h"
+
 struct avtab_key {
 	u16 source_type;	/* source type */
 	u16 target_type;	/* target type */
@@ -35,13 +37,43 @@
 #define AVTAB_MEMBER		0x0020
 #define AVTAB_CHANGE		0x0040
 #define AVTAB_TYPE		(AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
+/* extended permissions */
+#define AVTAB_XPERMS_ALLOWED	0x0100
+#define AVTAB_XPERMS_AUDITALLOW	0x0200
+#define AVTAB_XPERMS_DONTAUDIT	0x0400
+#define AVTAB_XPERMS		(AVTAB_XPERMS_ALLOWED | \
+				AVTAB_XPERMS_AUDITALLOW | \
+				AVTAB_XPERMS_DONTAUDIT)
 #define AVTAB_ENABLED_OLD   0x80000000 /* reserved for used in cond_avtab */
 #define AVTAB_ENABLED		0x8000 /* reserved for used in cond_avtab */
 	u16 specified;	/* what field is specified */
 };
 
+/*
+ * For operations that require more than the 32 permissions provided by the avc
+ * extended permissions may be used to provide 256 bits of permissions.
+ */
+struct avtab_extended_perms {
+/* These are not flags. All 256 values may be used */
+#define AVTAB_XPERMS_IOCTLFUNCTION	0x01
+#define AVTAB_XPERMS_IOCTLDRIVER	0x02
+	/* extension of the avtab_key specified */
+	u8 specified; /* ioctl, netfilter, ... */
+	/*
+	 * if 256 bits is not adequate as is often the case with ioctls, then
+	 * multiple extended perms may be used and the driver field
+	 * specifies which permissions are included.
+	 */
+	u8 driver;
+	/* 256 bits of permissions */
+	struct extended_perms_data perms;
+};
+
 struct avtab_datum {
-	u32 data; /* access vector or type value */
+	union {
+		u32 data; /* access vector or type value */
+		struct avtab_extended_perms *xperms;
+	} u;
 };
 
 struct avtab_node {
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 377d148..6618fb4 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -15,6 +15,7 @@
 
 #include "security.h"
 #include "conditional.h"
+#include "services.h"
 
 /*
  * cond_evaluate_expr evaluates a conditional expr
@@ -617,21 +618,39 @@
 
 	return 0;
 }
-/* Determine whether additional permissions are granted by the conditional
- * av table, and if so, add them to the result
- */
-void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd)
+
+void cond_compute_xperms(struct avtab *ctab, struct avtab_key *key,
+		struct extended_perms_decision *xpermd)
 {
 	struct avtab_node *node;
 
-	if (!ctab || !key || !avd)
+	if (!ctab || !key || !xpermd)
+		return;
+
+	for (node = avtab_search_node(ctab, key); node;
+			node = avtab_search_node_next(node, key->specified)) {
+		if (node->key.specified & AVTAB_ENABLED)
+			services_compute_xperms_decision(xpermd, node);
+	}
+	return;
+
+}
+/* Determine whether additional permissions are granted by the conditional
+ * av table, and if so, add them to the result
+ */
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+		struct av_decision *avd, struct extended_perms *xperms)
+{
+	struct avtab_node *node;
+
+	if (!ctab || !key || !avd || !xperms)
 		return;
 
 	for (node = avtab_search_node(ctab, key); node;
 				node = avtab_search_node_next(node, key->specified)) {
 		if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) ==
 		    (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED)))
-			avd->allowed |= node->datum.data;
+			avd->allowed |= node->datum.u.data;
 		if ((u16)(AVTAB_AUDITDENY|AVTAB_ENABLED) ==
 		    (node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED)))
 			/* Since a '0' in an auditdeny mask represents a
@@ -639,10 +658,13 @@
 			 * the '&' operand to ensure that all '0's in the mask
 			 * are retained (much unlike the allow and auditallow cases).
 			 */
-			avd->auditdeny &= node->datum.data;
+			avd->auditdeny &= node->datum.u.data;
 		if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
 		    (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
-			avd->auditallow |= node->datum.data;
+			avd->auditallow |= node->datum.u.data;
+		if ((node->key.specified & AVTAB_ENABLED) &&
+				(node->key.specified & AVTAB_XPERMS))
+			services_compute_xperms_drivers(xperms, node);
 	}
 	return;
 }
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 4d1f874..ddb43e7 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -73,8 +73,10 @@
 int cond_write_bool(void *key, void *datum, void *ptr);
 int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
 
-void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd);
-
+void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
+		struct av_decision *avd, struct extended_perms *xperms);
+void cond_compute_xperms(struct avtab *ctab, struct avtab_key *key,
+		struct extended_perms_decision *xpermd);
 int evaluate_cond_node(struct policydb *p, struct cond_node *node);
 
 #endif /* _CONDITIONAL_H_ */
diff --git a/security/selinux/ss/constraint.h b/security/selinux/ss/constraint.h
index 149dda7..96fd947 100644
--- a/security/selinux/ss/constraint.h
+++ b/security/selinux/ss/constraint.h
@@ -48,6 +48,7 @@
 	u32 op;			/* operator */
 
 	struct ebitmap names;	/* names */
+	struct type_set *type_names;
 
 	struct constraint_expr *next;   /* next expression */
 };
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index bcdca73..ca2c6f4 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -143,6 +143,16 @@
 		.sym_num	= SYM_NUM,
 		.ocon_num	= OCON_NUM,
 	},
+	{
+		.version	= POLICYDB_VERSION_CONSTRAINT_NAMES,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
+	{
+		.version	= POLICYDB_VERSION_XPERMS_IOCTL,
+		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM,
+	},
 };
 
 static struct policydb_compat_info *policydb_lookup_compat(int version)
@@ -613,6 +623,19 @@
 	return 0;
 }
 
+static void constraint_expr_destroy(struct constraint_expr *expr)
+{
+	if (expr) {
+		ebitmap_destroy(&expr->names);
+		if (expr->type_names) {
+			ebitmap_destroy(&expr->type_names->types);
+			ebitmap_destroy(&expr->type_names->negset);
+			kfree(expr->type_names);
+		}
+		kfree(expr);
+	}
+}
+
 static int cls_destroy(void *key, void *datum, void *p)
 {
 	struct class_datum *cladatum;
@@ -628,10 +651,9 @@
 		while (constraint) {
 			e = constraint->expr;
 			while (e) {
-				ebitmap_destroy(&e->names);
 				etmp = e;
 				e = e->next;
-				kfree(etmp);
+				constraint_expr_destroy(etmp);
 			}
 			ctemp = constraint;
 			constraint = constraint->next;
@@ -642,16 +664,14 @@
 		while (constraint) {
 			e = constraint->expr;
 			while (e) {
-				ebitmap_destroy(&e->names);
 				etmp = e;
 				e = e->next;
-				kfree(etmp);
+				constraint_expr_destroy(etmp);
 			}
 			ctemp = constraint;
 			constraint = constraint->next;
 			kfree(ctemp);
 		}
-
 		kfree(cladatum->comkey);
 	}
 	kfree(datum);
@@ -1156,8 +1176,34 @@
 	return rc;
 }
 
-static int read_cons_helper(struct constraint_node **nodep, int ncons,
-			    int allowxtarget, void *fp)
+static void type_set_init(struct type_set *t)
+{
+	ebitmap_init(&t->types);
+	ebitmap_init(&t->negset);
+}
+
+static int type_set_read(struct type_set *t, void *fp)
+{
+	__le32 buf[1];
+	int rc;
+
+	if (ebitmap_read(&t->types, fp))
+		return -EINVAL;
+	if (ebitmap_read(&t->negset, fp))
+		return -EINVAL;
+
+	rc = next_entry(buf, fp, sizeof(u32));
+	if (rc < 0)
+		return -EINVAL;
+	t->flags = le32_to_cpu(buf[0]);
+
+	return 0;
+}
+
+
+static int read_cons_helper(struct policydb *p,
+				struct constraint_node **nodep,
+				int ncons, int allowxtarget, void *fp)
 {
 	struct constraint_node *c, *lc;
 	struct constraint_expr *e, *le;
@@ -1225,6 +1271,18 @@
 				rc = ebitmap_read(&e->names, fp);
 				if (rc)
 					return rc;
+				if (p->policyvers >=
+					POLICYDB_VERSION_CONSTRAINT_NAMES) {
+						e->type_names = kzalloc(sizeof
+						(*e->type_names),
+						GFP_KERNEL);
+					if (!e->type_names)
+						return -ENOMEM;
+					type_set_init(e->type_names);
+					rc = type_set_read(e->type_names, fp);
+					if (rc)
+						return rc;
+				}
 				break;
 			default:
 				return -EINVAL;
@@ -1301,7 +1359,7 @@
 			goto bad;
 	}
 
-	rc = read_cons_helper(&cladatum->constraints, ncons, 0, fp);
+	rc = read_cons_helper(p, &cladatum->constraints, ncons, 0, fp);
 	if (rc)
 		goto bad;
 
@@ -1311,7 +1369,8 @@
 		if (rc)
 			goto bad;
 		ncons = le32_to_cpu(buf[0]);
-		rc = read_cons_helper(&cladatum->validatetrans, ncons, 1, fp);
+		rc = read_cons_helper(p, &cladatum->validatetrans,
+				ncons, 1, fp);
 		if (rc)
 			goto bad;
 	}
@@ -2762,6 +2821,24 @@
 	return 0;
 }
 
+static int type_set_write(struct type_set *t, void *fp)
+{
+	int rc;
+	__le32 buf[1];
+
+	if (ebitmap_write(&t->types, fp))
+		return -EINVAL;
+	if (ebitmap_write(&t->negset, fp))
+		return -EINVAL;
+
+	buf[0] = cpu_to_le32(t->flags);
+	rc = put_entry(buf, sizeof(u32), 1, fp);
+	if (rc)
+		return -EINVAL;
+
+	return 0;
+}
+
 static int write_cons_helper(struct policydb *p, struct constraint_node *node,
 			     void *fp)
 {
@@ -2793,6 +2870,12 @@
 				rc = ebitmap_write(&e->names, fp);
 				if (rc)
 					return rc;
+				if (p->policyvers >=
+					POLICYDB_VERSION_CONSTRAINT_NAMES) {
+					rc = type_set_write(e->type_names, fp);
+					if (rc)
+						return rc;
+				}
 				break;
 			default:
 				break;
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index da63747..725d594 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -154,6 +154,17 @@
 struct cond_node;
 
 /*
+ * type set preserves data needed to determine constraint info from
+ * policy source. This is not used by the kernel policy but allows
+ * utilities such as audit2allow to determine constraint denials.
+ */
+struct type_set {
+	struct ebitmap types;
+	struct ebitmap negset;
+	u32 flags;
+};
+
+/*
  * The configuration data includes security contexts for
  * initial SIDs, unlabeled file systems, TCP and UDP port numbers,
  * network interfaces, and nodes.  This structure stores the
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 18caa16..cf905cd 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -92,9 +92,10 @@
 				    u32 *scontext_len);
 
 static void context_struct_compute_av(struct context *scontext,
-				      struct context *tcontext,
-				      u16 tclass,
-				      struct av_decision *avd);
+					struct context *tcontext,
+					u16 tclass,
+					struct av_decision *avd,
+					struct extended_perms *xperms);
 
 struct selinux_mapping {
 	u16 value; /* policy value */
@@ -564,7 +565,8 @@
 		context_struct_compute_av(&lo_scontext,
 					  tcontext,
 					  tclass,
-					  &lo_avd);
+					  &lo_avd,
+					  NULL);
 		if ((lo_avd.allowed & avd->allowed) == avd->allowed)
 			return;		/* no masked permission */
 		masked = ~lo_avd.allowed & avd->allowed;
@@ -579,7 +581,8 @@
 		context_struct_compute_av(scontext,
 					  &lo_tcontext,
 					  tclass,
-					  &lo_avd);
+					  &lo_avd,
+					  NULL);
 		if ((lo_avd.allowed & avd->allowed) == avd->allowed)
 			return;		/* no masked permission */
 		masked = ~lo_avd.allowed & avd->allowed;
@@ -595,7 +598,8 @@
 		context_struct_compute_av(&lo_scontext,
 					  &lo_tcontext,
 					  tclass,
-					  &lo_avd);
+					  &lo_avd,
+					  NULL);
 		if ((lo_avd.allowed & avd->allowed) == avd->allowed)
 			return;		/* no masked permission */
 		masked = ~lo_avd.allowed & avd->allowed;
@@ -612,13 +616,39 @@
 }
 
 /*
- * Compute access vectors based on a context structure pair for
- * the permissions in a particular class.
+ * flag which drivers have permissions
+ * only looking for ioctl based extended permssions
+ */
+void services_compute_xperms_drivers(
+		struct extended_perms *xperms,
+		struct avtab_node *node)
+{
+	unsigned int i;
+
+	if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+		/* if one or more driver has all permissions allowed */
+		for (i = 0; i < ARRAY_SIZE(xperms->drivers.p); i++)
+			xperms->drivers.p[i] |= node->datum.u.xperms->perms.p[i];
+	} else if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+		/* if allowing permissions within a driver */
+		security_xperm_set(xperms->drivers.p,
+					node->datum.u.xperms->driver);
+	}
+
+	/* If no ioctl commands are allowed, ignore auditallow and auditdeny */
+	if (node->key.specified & AVTAB_XPERMS_ALLOWED)
+		xperms->len = 1;
+}
+
+/*
+ * Compute access vectors and extended permissions based on a context
+ * structure pair for the permissions in a particular class.
  */
 static void context_struct_compute_av(struct context *scontext,
-				      struct context *tcontext,
-				      u16 tclass,
-				      struct av_decision *avd)
+					struct context *tcontext,
+					u16 tclass,
+					struct av_decision *avd,
+					struct extended_perms *xperms)
 {
 	struct constraint_node *constraint;
 	struct role_allow *ra;
@@ -632,6 +662,10 @@
 	avd->allowed = 0;
 	avd->auditallow = 0;
 	avd->auditdeny = 0xffffffff;
+	if (xperms) {
+		memset(&xperms->drivers, 0, sizeof(xperms->drivers));
+		xperms->len = 0;
+	}
 
 	if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
 		if (printk_ratelimit())
@@ -646,7 +680,7 @@
 	 * this permission check, then use it.
 	 */
 	avkey.target_class = tclass;
-	avkey.specified = AVTAB_AV;
+	avkey.specified = AVTAB_AV | AVTAB_XPERMS;
 	sattr = flex_array_get(policydb.type_attr_map_array, scontext->type - 1);
 	BUG_ON(!sattr);
 	tattr = flex_array_get(policydb.type_attr_map_array, tcontext->type - 1);
@@ -659,15 +693,18 @@
 			     node;
 			     node = avtab_search_node_next(node, avkey.specified)) {
 				if (node->key.specified == AVTAB_ALLOWED)
-					avd->allowed |= node->datum.data;
+					avd->allowed |= node->datum.u.data;
 				else if (node->key.specified == AVTAB_AUDITALLOW)
-					avd->auditallow |= node->datum.data;
+					avd->auditallow |= node->datum.u.data;
 				else if (node->key.specified == AVTAB_AUDITDENY)
-					avd->auditdeny &= node->datum.data;
+					avd->auditdeny &= node->datum.u.data;
+				else if (xperms && (node->key.specified & AVTAB_XPERMS))
+					services_compute_xperms_drivers(xperms, node);
 			}
 
 			/* Check conditional av table for additional permissions */
-			cond_compute_av(&policydb.te_cond_avtab, &avkey, avd);
+			cond_compute_av(&policydb.te_cond_avtab, &avkey,
+					avd, xperms);
 
 		}
 	}
@@ -898,6 +935,139 @@
 	avd->flags = 0;
 }
 
+void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+					struct avtab_node *node)
+{
+	unsigned int i;
+
+	if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+		if (xpermd->driver != node->datum.u.xperms->driver)
+			return;
+	} else if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+		if (!security_xperm_test(node->datum.u.xperms->perms.p,
+					xpermd->driver))
+			return;
+	} else {
+		BUG();
+	}
+
+	if (node->key.specified == AVTAB_XPERMS_ALLOWED) {
+		xpermd->used |= XPERMS_ALLOWED;
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+			memset(xpermd->allowed->p, 0xff,
+					sizeof(xpermd->allowed->p));
+		}
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+			for (i = 0; i < ARRAY_SIZE(xpermd->allowed->p); i++)
+				xpermd->allowed->p[i] |=
+					node->datum.u.xperms->perms.p[i];
+		}
+	} else if (node->key.specified == AVTAB_XPERMS_AUDITALLOW) {
+		xpermd->used |= XPERMS_AUDITALLOW;
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+			memset(xpermd->auditallow->p, 0xff,
+					sizeof(xpermd->auditallow->p));
+		}
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+			for (i = 0; i < ARRAY_SIZE(xpermd->auditallow->p); i++)
+				xpermd->auditallow->p[i] |=
+					node->datum.u.xperms->perms.p[i];
+		}
+	} else if (node->key.specified == AVTAB_XPERMS_DONTAUDIT) {
+		xpermd->used |= XPERMS_DONTAUDIT;
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER) {
+			memset(xpermd->dontaudit->p, 0xff,
+					sizeof(xpermd->dontaudit->p));
+		}
+		if (node->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLFUNCTION) {
+			for (i = 0; i < ARRAY_SIZE(xpermd->dontaudit->p); i++)
+				xpermd->dontaudit->p[i] |=
+					node->datum.u.xperms->perms.p[i];
+		}
+	} else {
+		BUG();
+	}
+}
+
+void security_compute_xperms_decision(u32 ssid,
+				u32 tsid,
+				u16 orig_tclass,
+				u8 driver,
+				struct extended_perms_decision *xpermd)
+{
+	u16 tclass;
+	struct context *scontext, *tcontext;
+	struct avtab_key avkey;
+	struct avtab_node *node;
+	struct ebitmap *sattr, *tattr;
+	struct ebitmap_node *snode, *tnode;
+	unsigned int i, j;
+
+	xpermd->driver = driver;
+	xpermd->used = 0;
+	memset(xpermd->allowed->p, 0, sizeof(xpermd->allowed->p));
+	memset(xpermd->auditallow->p, 0, sizeof(xpermd->auditallow->p));
+	memset(xpermd->dontaudit->p, 0, sizeof(xpermd->dontaudit->p));
+
+	read_lock(&policy_rwlock);
+	if (!ss_initialized)
+		goto allow;
+
+	scontext = sidtab_search(&sidtab, ssid);
+	if (!scontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, ssid);
+		goto out;
+	}
+
+	tcontext = sidtab_search(&sidtab, tsid);
+	if (!tcontext) {
+		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
+		       __func__, tsid);
+		goto out;
+	}
+
+	tclass = unmap_class(orig_tclass);
+	if (unlikely(orig_tclass && !tclass)) {
+		if (policydb.allow_unknown)
+			goto allow;
+		goto out;
+	}
+
+
+	if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
+		pr_warn_ratelimited("SELinux:  Invalid class %hu\n", tclass);
+		goto out;
+	}
+
+	avkey.target_class = tclass;
+	avkey.specified = AVTAB_XPERMS;
+	sattr = flex_array_get(policydb.type_attr_map_array,
+				scontext->type - 1);
+	BUG_ON(!sattr);
+	tattr = flex_array_get(policydb.type_attr_map_array,
+				tcontext->type - 1);
+	BUG_ON(!tattr);
+	ebitmap_for_each_positive_bit(sattr, snode, i) {
+		ebitmap_for_each_positive_bit(tattr, tnode, j) {
+			avkey.source_type = i + 1;
+			avkey.target_type = j + 1;
+			for (node = avtab_search_node(&policydb.te_avtab, &avkey);
+			     node;
+			     node = avtab_search_node_next(node, avkey.specified))
+				services_compute_xperms_decision(xpermd, node);
+
+			cond_compute_xperms(&policydb.te_cond_avtab,
+						&avkey, xpermd);
+		}
+	}
+out:
+	read_unlock(&policy_rwlock);
+	return;
+allow:
+	memset(xpermd->allowed->p, 0xff, sizeof(xpermd->allowed->p));
+	goto out;
+}
 
 /**
  * security_compute_av - Compute access vector decisions.
@@ -905,6 +1075,7 @@
  * @tsid: target security identifier
  * @tclass: target security class
  * @avd: access vector decisions
+ * @xperms: extended permissions
  *
  * Compute a set of access vector decisions based on the
  * SID pair (@ssid, @tsid) for the permissions in @tclass.
@@ -912,13 +1083,15 @@
 void security_compute_av(u32 ssid,
 			 u32 tsid,
 			 u16 orig_tclass,
-			 struct av_decision *avd)
+			 struct av_decision *avd,
+			 struct extended_perms *xperms)
 {
 	u16 tclass;
 	struct context *scontext = NULL, *tcontext = NULL;
 
 	read_lock(&policy_rwlock);
 	avd_init(avd);
+	xperms->len = 0;
 	if (!ss_initialized)
 		goto allow;
 
@@ -946,7 +1119,7 @@
 			goto allow;
 		goto out;
 	}
-	context_struct_compute_av(scontext, tcontext, tclass, avd);
+	context_struct_compute_av(scontext, tcontext, tclass, avd, xperms);
 	map_decision(orig_tclass, avd, policydb.allow_unknown);
 out:
 	read_unlock(&policy_rwlock);
@@ -992,7 +1165,7 @@
 		goto out;
 	}
 
-	context_struct_compute_av(scontext, tcontext, tclass, avd);
+	context_struct_compute_av(scontext, tcontext, tclass, avd, NULL);
  out:
 	read_unlock(&policy_rwlock);
 	return;
@@ -1512,7 +1685,7 @@
 
 	if (avdatum) {
 		/* Use the type from the type transition/member/change rule. */
-		newcontext.type = avdatum->data;
+		newcontext.type = avdatum->u.data;
 	}
 
 	/* if we have a objname this is a file trans check so check those rules */
diff --git a/security/selinux/ss/services.h b/security/selinux/ss/services.h
index e8d907e..6abcd87 100644
--- a/security/selinux/ss/services.h
+++ b/security/selinux/ss/services.h
@@ -11,5 +11,11 @@
 
 extern struct policydb policydb;
 
+void services_compute_xperms_drivers(struct extended_perms *xperms,
+				struct avtab_node *node);
+
+void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+					struct avtab_node *node);
+
 #endif	/* _SS_SERVICES_H_ */
 
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index b413ed0..27a27bd 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -15,6 +15,9 @@
 config SND_COMPRESS_OFFLOAD
 	tristate
 
+config SND_EFFECTS_OFFLOAD
+	tristate
+
 # To be effective this also requires INPUT - users should say:
 #    select SND_JACK if INPUT=y || INPUT=SND
 # to avoid having to force INPUT on.
diff --git a/sound/core/Makefile b/sound/core/Makefile
index 43d4117..9e37a2b 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -23,6 +23,7 @@
 snd-hwdep-objs    := hwdep.o
 
 snd-compress-objs := compress_offload.o
+snd-effects-objs := effects_offload.o
 
 obj-$(CONFIG_SND) 		+= snd.o
 obj-$(CONFIG_SND_HWDEP)		+= snd-hwdep.o
@@ -36,3 +37,4 @@
 obj-$(CONFIG_SND_SEQUENCER)	+= seq/
 
 obj-$(CONFIG_SND_COMPRESS_OFFLOAD)	+= snd-compress.o
+obj-$(CONFIG_SND_EFFECTS_OFFLOAD)	+= snd-effects.o
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 572f951..9174d84 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -130,6 +130,7 @@
 	}
 	runtime->state = SNDRV_PCM_STATE_OPEN;
 	init_waitqueue_head(&runtime->sleep);
+	init_waitqueue_head(&runtime->wait);
 	data->stream.runtime = runtime;
 	f->private_data = (void *)data;
 	mutex_lock(&compr->lock);
@@ -146,6 +147,18 @@
 static int snd_compr_free(struct inode *inode, struct file *f)
 {
 	struct snd_compr_file *data = f->private_data;
+	struct snd_compr_runtime *runtime = data->stream.runtime;
+
+	switch (runtime->state) {
+	case SNDRV_PCM_STATE_RUNNING:
+	case SNDRV_PCM_STATE_DRAINING:
+	case SNDRV_PCM_STATE_PAUSED:
+		data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
+		break;
+	default:
+		break;
+	}
+
 	data->stream.ops->free(&data->stream);
 	kfree(data->stream.runtime->buffer);
 	kfree(data->stream.runtime);
@@ -263,16 +276,25 @@
 	struct snd_compr_file *data = f->private_data;
 	struct snd_compr_stream *stream;
 	size_t avail;
-	int retval;
+	int retval = 0;
 
 	if (snd_BUG_ON(!data))
 		return -EFAULT;
 
 	stream = &data->stream;
 	mutex_lock(&stream->device->lock);
-	/* write is allowed when stream is running or has been steup */
+	 /*
+	 * if the stream is in paused state, return the
+	 * number of bytes consumed as 0
+	 */
+	if (stream->runtime->state == SNDRV_PCM_STATE_PAUSED) {
+		mutex_unlock(&stream->device->lock);
+		return retval;
+	}
+	/* write is allowed when stream is running or prepared or in setup */
 	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
-			stream->runtime->state != SNDRV_PCM_STATE_RUNNING) {
+			stream->runtime->state != SNDRV_PCM_STATE_RUNNING &&
+			stream->runtime->state != SNDRV_PCM_STATE_PREPARED) {
 		mutex_unlock(&stream->device->lock);
 		return -EBADFD;
 	}
@@ -379,8 +401,7 @@
 		return -EFAULT;
 
 	mutex_lock(&stream->device->lock);
-	if (stream->runtime->state == SNDRV_PCM_STATE_PAUSED ||
-			stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
+	if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
 		retval = -EBADFD;
 		goto out;
 	}
@@ -404,6 +425,7 @@
 			retval = snd_compr_get_poll(stream);
 		break;
 	default:
+		pr_err("poll returns err!...\n");
 		if (stream->direction == SND_COMPRESS_PLAYBACK)
 			retval = POLLOUT | POLLWRNORM | POLLERR;
 		else
@@ -499,9 +521,6 @@
 	if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
 		return -EINVAL;
 
-	if (!(params->codec.sample_rate & SNDRV_PCM_RATE_8000_192000))
-		return -EINVAL;
-
 	return 0;
 }
 
@@ -636,7 +655,8 @@
 {
 	int retval;
 
-	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
+	if ((stream->runtime->state != SNDRV_PCM_STATE_RUNNING) &&
+		(stream->runtime->state != SNDRV_PCM_STATE_DRAINING))
 		return -EPERM;
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 	if (!retval)
@@ -651,8 +671,10 @@
 	if (stream->runtime->state != SNDRV_PCM_STATE_PAUSED)
 		return -EPERM;
 	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
-	if (!retval)
+	if (!retval) {
 		stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+		wake_up(&stream->runtime->sleep);
+	}
 	return retval;
 }
 
@@ -668,21 +690,24 @@
 	return retval;
 }
 
-static int snd_compr_stop(struct snd_compr_stream *stream)
+int snd_compr_stop(struct snd_compr_stream *stream)
 {
-	int retval;
+	int retval = 0;
 
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP)
 		return -EPERM;
-	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
+	if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 	if (!retval) {
+		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+		wake_up(&stream->runtime->sleep);
 		snd_compr_drain_notify(stream);
 		stream->runtime->total_bytes_available = 0;
 		stream->runtime->total_bytes_transferred = 0;
 	}
 	return retval;
 }
+EXPORT_SYMBOL(snd_compr_stop);
 
 static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
 {
@@ -721,20 +746,30 @@
 
 static int snd_compr_drain(struct snd_compr_stream *stream)
 {
-	int retval;
+	int retval = 0;
 
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP)
 		return -EPERM;
 
-	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
+	stream->runtime->drain_wake = 0;
+
+	/* this is hackish for our tree but for now lets carry it while we fix
+	 * usermode behaviour
+	 */
+	if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+		retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
+	else
+		return 0;
+
 	if (retval) {
-		pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
+		pr_err("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
 		wake_up(&stream->runtime->sleep);
 		return retval;
 	}
 
-	return snd_compress_wait_for_drain(stream);
+	retval = snd_compress_wait_for_drain(stream);
+	stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+	return retval;
 }
 
 static int snd_compr_next_track(struct snd_compr_stream *stream)
@@ -761,23 +796,31 @@
 
 static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 {
-	int retval;
-	if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
-			stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+	int retval = 0;
+
+	/* agaain hackish  changes */
+	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP)
 		return -EPERM;
 	/* stream can be drained only when next track has been signalled */
 	if (stream->next_track == false)
 		return -EPERM;
 
-	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
+	stream->runtime->drain_wake = 0;
+	if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+		retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
+	else
+		return 0;
+
 	if (retval) {
-		pr_debug("Partial drain returned failure\n");
+		pr_err("Partial drain returned failure\n");
 		wake_up(&stream->runtime->sleep);
 		return retval;
 	}
 
 	stream->next_track = false;
-	return snd_compress_wait_for_drain(stream);
+	retval = snd_compress_wait_for_drain(stream);
+	stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+	return retval;
 }
 
 static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
@@ -857,6 +900,9 @@
 		.write =	snd_compr_write,
 		.read =		snd_compr_read,
 		.unlocked_ioctl = snd_compr_ioctl,
+#ifdef CONFIG_COMPAT
+		.compat_ioctl =	snd_compr_ioctl,
+#endif
 		.mmap =		snd_compr_mmap,
 		.poll =		snd_compr_poll,
 };
@@ -871,7 +917,7 @@
 		return -EBADFD;
 	compr = device->device_data;
 
-	sprintf(str, "comprC%iD%i", compr->card->number, compr->device);
+	snprintf(str, sizeof(str), "comprC%iD%i", compr->card->number, compr->device);
 	pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
 			compr->direction);
 	/* register compressed device */
diff --git a/sound/core/effects_offload.c b/sound/core/effects_offload.c
new file mode 100644
index 0000000..def0a89
--- /dev/null
+++ b/sound/core/effects_offload.c
@@ -0,0 +1,307 @@
+/*
+ *  effect_offload.c - effects offload core
+ *
+ *  Copyright (C) 2013 Intel Corporation
+ *  Authors:	Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>
+ *		Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
+#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
+
+#include <linux/module.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/effect_offload.h>
+#include <sound/effect_driver.h>
+
+static DEFINE_MUTEX(effect_mutex);
+
+int snd_ctl_effect_create(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect *effect;
+
+	effect = kmalloc(sizeof(*effect), GFP_KERNEL);
+	if (!effect)
+		return -ENOMEM;
+	if (copy_from_user(effect, (void __user *)arg, sizeof(*effect))) {
+		retval = -EFAULT;
+		goto out;
+	}
+	pr_debug("effect_offload: device %d, pos %d, mode%d\n",
+			effect->device, effect->pos, effect->mode);
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->create(card, effect);
+	mutex_unlock(&card->effect_lock);
+out:
+	kfree(effect);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_create);
+
+int snd_ctl_effect_destroy(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect *effect;
+
+	effect = kmalloc(sizeof(*effect), GFP_KERNEL);
+	if (!effect)
+		return -ENOMEM;
+	if (copy_from_user(effect, (void __user *)arg, sizeof(*effect))) {
+		retval = -EFAULT;
+		goto out;
+	}
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->destroy(card, effect);
+	mutex_unlock(&card->effect_lock);
+out:
+	kfree(effect);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_destroy);
+
+int snd_ctl_effect_set_params(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect_params *params;
+	char __user *argp = (char __user *)arg;
+	char __user *bufp;
+
+	params = kmalloc(sizeof(*params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	if (copy_from_user(params, argp, sizeof(*params))) {
+		retval = -EFAULT;
+		goto out;
+	}
+	bufp = params->buffer;
+	params->buffer = kmalloc(params->size, GFP_KERNEL);
+	if (!params->buffer) {
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user((void *)params->buffer, bufp, params->size)) {
+		retval = -EFAULT;
+		goto free_buf;
+	}
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->set_params(card, params);
+	mutex_unlock(&card->effect_lock);
+free_buf:
+	kfree(params->buffer);
+out:
+	kfree(params);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_set_params);
+
+int snd_ctl_effect_get_params(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect_params inparams;
+	struct snd_effect_params *outparams;
+	unsigned int offset;
+	char __user *argp = (char __user *)arg;
+
+	if (copy_from_user((void *)&inparams, argp, sizeof(inparams)))
+		retval = -EFAULT;
+
+	outparams = kmalloc(sizeof(*outparams), GFP_KERNEL);
+	if (!outparams)
+		return -ENOMEM;
+
+	memcpy(outparams, &inparams, sizeof(inparams));
+	outparams->buffer = kmalloc(inparams.size, GFP_KERNEL);
+	if (!outparams->buffer) {
+		retval = -ENOMEM;
+		goto free_out;
+	}
+
+	if (copy_from_user((void *)outparams->buffer, inparams.buffer,
+							inparams.size)) {
+		retval = -EFAULT;
+		goto free_buf;
+	}
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->get_params(card, outparams);
+	mutex_unlock(&card->effect_lock);
+
+	if (retval)
+		goto free_buf;
+
+	if (!outparams->size)
+		goto free_buf;
+
+	if (outparams->size > inparams.size) {
+		pr_err("mem insufficient to copy\n");
+		retval = -EMSGSIZE;
+		goto free_buf;
+	} else {
+		offset = offsetof(struct snd_effect_params, size);
+		if (copy_to_user((argp + offset), (void *)&outparams->size,
+								sizeof(u32)))
+			retval = -EFAULT;
+
+		if (copy_to_user(inparams.buffer, outparams->buffer,
+							outparams->size))
+			retval = -EFAULT;
+	}
+free_buf:
+	kfree(outparams->buffer);
+free_out:
+	kfree(outparams);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_get_params);
+
+int snd_ctl_effect_query_num_effects(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	int __user *ip = arg;
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->query_num_effects(card);
+	mutex_unlock(&card->effect_lock);
+
+	if (retval < 0)
+		goto out;
+	retval = put_user(retval, ip) ? -EFAULT : 0;
+out:
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_query_num_effects);
+
+int snd_ctl_effect_query_effect_caps(struct snd_card *card, void *arg)
+{
+	int retval = 0;
+	struct snd_effect_caps *caps;
+	unsigned int offset, insize;
+	char __user *argp = (char __user *)arg;
+	char __user *bufp;
+
+	caps = kmalloc(sizeof(*caps), GFP_KERNEL);
+	if (!caps)
+		return -ENOMEM;
+
+	if (copy_from_user(caps, argp, sizeof(*caps))) {
+		retval = -EFAULT;
+		goto out;
+	}
+	bufp = caps->buffer;
+	insize = caps->size;
+	caps->buffer = kmalloc(caps->size, GFP_KERNEL);
+	if (!caps->buffer) {
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	mutex_lock(&card->effect_lock);
+	retval = card->effect_ops->query_effect_caps(card, caps);
+	mutex_unlock(&card->effect_lock);
+
+	if (retval)
+		goto free_buf;
+
+	if (insize < caps->size) {
+		pr_err("mem insufficient to copy\n");
+		retval = -EMSGSIZE;
+		goto free_buf;
+	}
+
+	offset = offsetof(struct snd_effect_caps, size);
+	if (copy_to_user((argp + offset), (void *)&caps->size, sizeof(u32))) {
+		retval = -EFAULT;
+		goto free_buf;
+	}
+
+	if (copy_to_user(bufp, caps->buffer, caps->size))
+		retval = -EFAULT;
+
+free_buf:
+	kfree(caps->buffer);
+out:
+	kfree(caps);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_effect_query_effect_caps);
+
+/**
+ * snd_effect_register - register compressed device
+ *
+ * @card : snd card to which the effect is registered
+ * @ops : effect_ops to register
+ */
+int snd_effect_register(struct snd_card *card, struct snd_effect_ops *ops)
+{
+
+	if (card == NULL || ops == NULL)
+		return -EINVAL;
+
+	if (snd_BUG_ON(!ops->create))
+		return -EINVAL;
+	if (snd_BUG_ON(!ops->destroy))
+		return -EINVAL;
+	if (snd_BUG_ON(!ops->set_params))
+		return -EINVAL;
+
+	mutex_init(&card->effect_lock);
+
+	pr_debug("Registering Effects to card %s\n", card->shortname);
+	/* register the effect ops with the card */
+	mutex_lock(&effect_mutex);
+	card->effect_ops = ops;
+	mutex_unlock(&effect_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_effect_register);
+
+int snd_effect_deregister(struct snd_card *card)
+{
+	pr_debug("Removing effects for card %s\n", card->shortname);
+	mutex_lock(&effect_mutex);
+	card->effect_ops = NULL;
+	mutex_unlock(&effect_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_effect_deregister);
+
+static int __init snd_effect_init(void)
+{
+	return 0;
+}
+
+static void __exit snd_effect_exit(void)
+{
+}
+
+module_init(snd_effect_init);
+module_exit(snd_effect_exit);
+
+MODULE_DESCRIPTION("ALSA Effect offload framework");
+MODULE_AUTHOR("Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 9e675c7..23f39d2 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -45,7 +45,7 @@
 source "sound/soc/nuc900/Kconfig"
 source "sound/soc/omap/Kconfig"
 source "sound/soc/kirkwood/Kconfig"
-source "sound/soc/mid-x86/Kconfig"
+source "sound/soc/intel/Kconfig"
 source "sound/soc/mxs/Kconfig"
 source "sound/soc/pxa/Kconfig"
 source "sound/soc/samsung/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 197b6ae..25201f53 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -20,7 +20,7 @@
 obj-$(CONFIG_SND_SOC)	+= dwc/
 obj-$(CONFIG_SND_SOC)	+= fsl/
 obj-$(CONFIG_SND_SOC)	+= jz4740/
-obj-$(CONFIG_SND_SOC)	+= mid-x86/
+obj-$(CONFIG_SND_SOC)	+= intel/
 obj-$(CONFIG_SND_SOC)	+= mxs/
 obj-$(CONFIG_SND_SOC)	+= nuc900/
 obj-$(CONFIG_SND_SOC)	+= omap/
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index d1ae869d..fc12d6e 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -30,7 +30,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 
-#include <asm/intel_scu_ipc.h>
+#include <asm/intel_scu_pmic.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index cda3cf2..50880af 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/pm.h>
+#include <linux/gcd.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -93,9 +94,9 @@
 
 static const struct wm8958_micd_rate micdet_rates[] = {
 	{ 32768,       true,  1, 4 },
-	{ 32768,       false, 1, 1 },
+	{ 32768,       false, 1, 0 },
 	{ 44100 * 256, true,  7, 10 },
-	{ 44100 * 256, false, 7, 10 },
+	{ 44100 * 256, false, 7, 9 },
 };
 
 static const struct wm8958_micd_rate jackdet_rates[] = {
@@ -539,13 +540,13 @@
 static const struct snd_kcontrol_new wm8994_snd_controls[] = {
 SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
 		 WM8994_AIF1_ADC1_RIGHT_VOLUME,
-		 1, 119, 0, digital_tlv),
+		 1, 120, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
 		 WM8994_AIF1_ADC2_RIGHT_VOLUME,
-		 1, 119, 0, digital_tlv),
+		 1, 120, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8994_AIF2_ADC_LEFT_VOLUME,
 		 WM8994_AIF2_ADC_RIGHT_VOLUME,
-		 1, 119, 0, digital_tlv),
+		 1, 120, 0, digital_tlv),
 
 SOC_ENUM("AIF1ADCL Source", aif1adcl_src),
 SOC_ENUM("AIF1ADCR Source", aif1adcr_src),
@@ -607,12 +608,12 @@
 SOC_ENUM("DAC OSR", dac_osr),
 
 SOC_DOUBLE_R_TLV("DAC1 Volume", WM8994_DAC1_LEFT_VOLUME,
-		 WM8994_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+		 WM8994_DAC1_RIGHT_VOLUME, 1, 112, 0, digital_tlv),
 SOC_DOUBLE_R("DAC1 Switch", WM8994_DAC1_LEFT_VOLUME,
 	     WM8994_DAC1_RIGHT_VOLUME, 9, 1, 1),
 
 SOC_DOUBLE_R_TLV("DAC2 Volume", WM8994_DAC2_LEFT_VOLUME,
-		 WM8994_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+		 WM8994_DAC2_RIGHT_VOLUME, 1, 112, 0, digital_tlv),
 SOC_DOUBLE_R("DAC2 Switch", WM8994_DAC2_LEFT_VOLUME,
 	     WM8994_DAC2_RIGHT_VOLUME, 9, 1, 1),
 
@@ -638,6 +639,11 @@
 	       10, 15, 0, wm8994_3d_tlv),
 SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2,
 	   8, 1, 0),
+
+SOC_SINGLE_TLV("MIXINL MIXOUTL Volume", WM8994_INPUT_MIXER_3, 0, 7, 0,
+	       mixin_boost_tlv),
+SOC_SINGLE_TLV("MIXINR MIXOUTR Volume", WM8994_INPUT_MIXER_4, 0, 7, 0,
+	       mixin_boost_tlv),
 };
 
 static const struct snd_kcontrol_new wm8994_eq_controls[] = {
@@ -866,7 +872,7 @@
 					    WM8994_BIAS_SRC |
 					    WM8994_STARTUP_BIAS_ENA |
 					    WM8994_VMID_BUF_ENA |
-					    (0x2 << WM8994_VMID_RAMP_SHIFT));
+					    (0x3 << WM8994_VMID_RAMP_SHIFT));
 
 			/* Main bias enable, VMID=2x40k */
 			snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
@@ -874,7 +880,14 @@
 					    WM8994_VMID_SEL_MASK,
 					    WM8994_BIAS_ENA | 0x2);
 
-			msleep(300);
+			/* The delay of 300ms was recommended to support pop
+			 * free startup of the line output driver, as we don't use
+			 * that feature reducing the delay to 50ms as recommended in
+			 * the spec, Also changing VMID_RAMP to soft fast start
+			 * accordingly Also applies for VMID_FORCE and
+			 * vmid_dereference.
+			 */
+			msleep(50);
 
 			snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
 					    WM8994_VMID_RAMP_MASK |
@@ -893,15 +906,14 @@
 					    WM8994_BIAS_SRC |
 					    WM8994_STARTUP_BIAS_ENA |
 					    WM8994_VMID_BUF_ENA |
-					    (0x2 << WM8994_VMID_RAMP_SHIFT));
+					    (0x3 << WM8994_VMID_RAMP_SHIFT));
 
 			/* Main bias enable, VMID=2x40k */
 			snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
 					    WM8994_BIAS_ENA |
 					    WM8994_VMID_SEL_MASK,
 					    WM8994_BIAS_ENA | 0x2);
-
-			msleep(400);
+			msleep(50);
 
 			snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
 					    WM8994_VMID_RAMP_MASK |
@@ -946,7 +958,7 @@
 		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
 				    WM8994_VMID_SEL_MASK, 0);
 
-		msleep(400);
+		msleep(50);
 
 		/* Active discharge */
 		snd_soc_update_bits(codec, WM8994_ANTIPOP_1,
@@ -969,7 +981,7 @@
 				    WM8994_VMID_RAMP_MASK, 0);
 
 		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
-				    WM8994_VMID_SEL_MASK, 0);
+				    WM8994_BIAS_ENA | WM8994_VMID_SEL_MASK, 0);
 	}
 
 	pm_runtime_put(codec->dev);
@@ -1055,7 +1067,7 @@
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
 		/* Don't enable timeslot 2 if not in use */
-		if (wm8994->channels[0] <= 2)
+		if ((wm8994->channels[0] <= 2) && (wm8994->slots <= 2))
 			mask &= ~(WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
 
 		val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
@@ -1331,6 +1343,26 @@
 	struct snd_soc_codec *codec = w->codec;
 	unsigned int mask = 1 << w->shift;
 
+	/* Don't propagate FIFO errors unless the DAC is running */
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* Clear FIFO error status */
+		snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2,
+				    WM8994_FIFOS_ERR_EINT_MASK,
+				    1 << WM8994_FIFOS_ERR_EINT_SHIFT);
+		/* Unmask FIFO error interrupts */
+		snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2_MASK,
+				    WM8994_IM_FIFOS_ERR_EINT_MASK,
+				    0 << WM8994_IM_FIFOS_ERR_EINT_SHIFT);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		/* Mask FIFO error interrupts */
+		snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2_MASK,
+				    WM8994_IM_FIFOS_ERR_EINT_MASK,
+				    1 << WM8994_IM_FIFOS_ERR_EINT_SHIFT);
+		break;
+	}
+
 	snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
 			    mask, mask);
 	return 0;
@@ -1498,6 +1530,24 @@
 	"AIF1DACDAT", "AIF3DACDAT",
 };
 
+static const char *loopback_text[] = {
+	"None", "ADCDAT",
+};
+
+static const struct soc_enum aif1_loopback_enum =
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, WM8994_AIF1_LOOPBACK_SHIFT, 2,
+			loopback_text);
+
+static const struct snd_kcontrol_new aif1_loopback =
+	SOC_DAPM_ENUM("AIF1 Loopback", aif1_loopback_enum);
+
+static const struct soc_enum aif2_loopback_enum =
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, WM8994_AIF2_LOOPBACK_SHIFT, 2,
+			loopback_text);
+
+static const struct snd_kcontrol_new aif2_loopback =
+	SOC_DAPM_ENUM("AIF2 Loopback", aif2_loopback_enum);
+
 static const struct soc_enum aif1dac_enum =
 	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 0, 2, aif1dac_text);
 
@@ -1616,13 +1666,17 @@
 
 static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = {
 SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0,
-	dac_ev, SND_SOC_DAPM_PRE_PMU),
+	dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+	SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0,
-	dac_ev, SND_SOC_DAPM_PRE_PMU),
+	dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+	SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0,
-	dac_ev, SND_SOC_DAPM_PRE_PMU),
+	dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+	SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
-	dac_ev, SND_SOC_DAPM_PRE_PMU),
+	dac_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+	SND_SOC_DAPM_PRE_PMD),
 };
 
 static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
@@ -1633,15 +1687,15 @@
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", SND_SOC_NOPM, 1, 0, &adcl_mux,
 			adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", SND_SOC_NOPM, 0, 0, &adcr_mux,
 			adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", SND_SOC_NOPM, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0, &adcr_mux),
 };
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
@@ -1737,12 +1791,11 @@
 SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8994_POWER_MANAGEMENT_4, 3, 0),
 SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0),
 
-/* Power is done with the muxes since the ADC power also controls the
- * downsampling chain, the chip will automatically manage the analogue
- * specific portions.
- */
-SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0),
-SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_ADC("ADCL", NULL, WM8994_POWER_MANAGEMENT_4, 1, 0),
+SND_SOC_DAPM_ADC("ADCR", NULL, WM8994_POWER_MANAGEMENT_4, 0, 0),
+
+SND_SOC_DAPM_MUX("AIF1 Loopback", SND_SOC_NOPM, 0, 0, &aif1_loopback),
+SND_SOC_DAPM_MUX("AIF2 Loopback", SND_SOC_NOPM, 0, 0, &aif2_loopback),
 
 SND_SOC_DAPM_POST("Debug log", post_ev),
 };
@@ -1875,9 +1928,9 @@
 	{ "AIF1DAC2L", NULL, "AIF1DAC Mux" },
 	{ "AIF1DAC2R", NULL, "AIF1DAC Mux" },
 
-	{ "AIF1DAC Mux", "AIF1DACDAT", "AIF1DACDAT" },
+	{ "AIF1DAC Mux", "AIF1DACDAT", "AIF1 Loopback" },
 	{ "AIF1DAC Mux", "AIF3DACDAT", "AIF3DACDAT" },
-	{ "AIF2DAC Mux", "AIF2DACDAT", "AIF2DACDAT" },
+	{ "AIF2DAC Mux", "AIF2DACDAT", "AIF2 Loopback" },
 	{ "AIF2DAC Mux", "AIF3DACDAT", "AIF3DACDAT" },
 	{ "AIF2ADC Mux", "AIF2ADCDAT", "AIF2ADCL" },
 	{ "AIF2ADC Mux", "AIF2ADCDAT", "AIF2ADCR" },
@@ -1928,6 +1981,12 @@
 	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACL" },
 	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACR" },
 
+	/* Loopback */
+	{ "AIF1 Loopback", "ADCDAT", "AIF1ADCDAT" },
+	{ "AIF1 Loopback", "None", "AIF1DACDAT" },
+	{ "AIF2 Loopback", "ADCDAT", "AIF2ADCDAT" },
+	{ "AIF2 Loopback", "None", "AIF2DACDAT" },
+
 	/* Sidetone */
 	{ "Left Sidetone", "ADC/DMIC1", "ADCL Mux" },
 	{ "Left Sidetone", "DMIC2", "DMIC2L" },
@@ -2010,15 +2069,16 @@
 	u16 outdiv;
 	u16 n;
 	u16 k;
+	u16 lambda;
 	u16 clk_ref_div;
 	u16 fll_fratio;
 };
 
-static int wm8994_get_fll_config(struct fll_div *fll,
+static int wm8994_get_fll_config(struct wm8994 *control, struct fll_div *fll,
 				 int freq_in, int freq_out)
 {
 	u64 Kpart;
-	unsigned int K, Ndiv, Nmod;
+	unsigned int K, Ndiv, Nmod, gcd_fll;
 
 	pr_debug("FLL input=%dHz, output=%dHz\n", freq_in, freq_out);
 
@@ -2067,20 +2127,32 @@
 	Nmod = freq_out % freq_in;
 	pr_debug("Nmod=%d\n", Nmod);
 
-	/* Calculate fractional part - scale up so we can round. */
-	Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+	switch (control->type) {
+	case WM8994:
+		/* Calculate fractional part - scale up so we can round. */
+		Kpart = FIXED_FLL_SIZE * (long long)Nmod;
 
-	do_div(Kpart, freq_in);
+		do_div(Kpart, freq_in);
 
-	K = Kpart & 0xFFFFFFFF;
+		K = Kpart & 0xFFFFFFFF;
 
-	if ((K % 10) >= 5)
-		K += 5;
+		if ((K % 10) >= 5)
+			K += 5;
 
-	/* Move down to proper range now rounding is done */
-	fll->k = K / 10;
+		/* Move down to proper range now rounding is done */
+		fll->k = K / 10;
+		fll->lambda = 0;
 
-	pr_debug("N=%x K=%x\n", fll->n, fll->k);
+		pr_debug("N=%x K=%x\n", fll->n, fll->k);
+		break;
+
+	default:
+		gcd_fll = gcd(freq_out, freq_in);
+
+		fll->k = (freq_out - (freq_in * fll->n)) / gcd_fll;
+		fll->lambda = freq_in / gcd_fll;
+		
+	}
 
 	return 0;
 }
@@ -2144,9 +2216,9 @@
 	 * analysis bugs spewing warnings.
 	 */
 	if (freq_out)
-		ret = wm8994_get_fll_config(&fll, freq_in, freq_out);
+		ret = wm8994_get_fll_config(control, &fll, freq_in, freq_out);
 	else
-		ret = wm8994_get_fll_config(&fll, wm8994->fll[id].in,
+		ret = wm8994_get_fll_config(control, &fll, wm8994->fll[id].in,
 					    wm8994->fll[id].out);
 	if (ret < 0)
 		return ret;
@@ -2191,6 +2263,17 @@
 			    WM8994_FLL1_N_MASK,
 			    fll.n << WM8994_FLL1_N_SHIFT);
 
+	if (fll.lambda) {
+		snd_soc_update_bits(codec, WM8958_FLL1_EFS_1 + reg_offset,
+				    WM8958_FLL1_LAMBDA_MASK,
+				    fll.lambda);
+		snd_soc_update_bits(codec, WM8958_FLL1_EFS_2 + reg_offset,
+				    WM8958_FLL1_EFS_ENA, WM8958_FLL1_EFS_ENA);
+	} else {
+		snd_soc_update_bits(codec, WM8958_FLL1_EFS_2 + reg_offset,
+				    WM8958_FLL1_EFS_ENA, 0);
+	}
+
 	snd_soc_update_bits(codec, WM8994_FLL1_CONTROL_5 + reg_offset,
 			    WM8994_FLL1_FRC_NCO | WM8958_FLL1_BYP |
 			    WM8994_FLL1_REFCLK_DIV_MASK |
@@ -2271,7 +2354,8 @@
 	 * If SYSCLK will be less than 50kHz adjust AIFnCLK dividers
 	 * for detection.
 	 */
-	if (max(wm8994->aifclk[0], wm8994->aifclk[1]) < 50000) {
+	if (max(wm8994->aifclk[0], wm8994->aifclk[1]) < 50000 &&
+		!wm8994->aifdiv[0]) {
 		dev_dbg(codec->dev, "Configuring AIFs for 128fs\n");
 
 		wm8994->aifdiv[0] = snd_soc_read(codec, WM8994_AIF1_RATE)
@@ -2555,17 +2639,24 @@
 	struct wm8994 *control = wm8994->wm8994;
 	int ms_reg;
 	int aif1_reg;
+	int dac_reg;
+	int adc_reg;
 	int ms = 0;
 	int aif1 = 0;
+	int lrclk = 0;
 
 	switch (dai->id) {
 	case 1:
 		ms_reg = WM8994_AIF1_MASTER_SLAVE;
 		aif1_reg = WM8994_AIF1_CONTROL_1;
+		dac_reg = WM8994_AIF1DAC_LRCLK;
+		adc_reg = WM8994_AIF1ADC_LRCLK;
 		break;
 	case 2:
 		ms_reg = WM8994_AIF2_MASTER_SLAVE;
 		aif1_reg = WM8994_AIF2_CONTROL_1;
+		dac_reg = WM8994_AIF1DAC_LRCLK;
+		adc_reg = WM8994_AIF1ADC_LRCLK;
 		break;
 	default:
 		return -EINVAL;
@@ -2584,6 +2675,7 @@
 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_DSP_B:
 		aif1 |= WM8994_AIF1_LRCLK_INV;
+		lrclk |= WM8958_AIF1_LRCLK_INV;
 	case SND_SOC_DAIFMT_DSP_A:
 		aif1 |= 0x18;
 		break;
@@ -2622,12 +2714,14 @@
 			break;
 		case SND_SOC_DAIFMT_IB_IF:
 			aif1 |= WM8994_AIF1_BCLK_INV | WM8994_AIF1_LRCLK_INV;
+			lrclk |= WM8958_AIF1_LRCLK_INV;
 			break;
 		case SND_SOC_DAIFMT_IB_NF:
 			aif1 |= WM8994_AIF1_BCLK_INV;
 			break;
 		case SND_SOC_DAIFMT_NB_IF:
 			aif1 |= WM8994_AIF1_LRCLK_INV;
+			lrclk |= WM8958_AIF1_LRCLK_INV;
 			break;
 		default:
 			return -EINVAL;
@@ -2658,6 +2752,10 @@
 			    aif1);
 	snd_soc_update_bits(codec, ms_reg, WM8994_AIF1_MSTR,
 			    ms);
+	snd_soc_update_bits(codec, dac_reg,
+			    WM8958_AIF1_LRCLK_INV, lrclk);
+	snd_soc_update_bits(codec, adc_reg,
+			    WM8958_AIF1_LRCLK_INV, lrclk);
 
 	return 0;
 }
@@ -2706,9 +2804,40 @@
 	int lrclk = 0;
 	int rate_val = 0;
 	int id = dai->id - 1;
+	struct snd_pcm_hw_params hw_params;
 
 	int i, cur_val, best_val, bclk_rate, best;
 
+	if (params)
+		memcpy(&hw_params, params, sizeof(*params));
+	else
+		return -EINVAL;
+
+	/* If custom params are there, override to custom params */
+	if (pdata->custom_cfg) {
+
+		dev_dbg(codec->dev, "%s: Overriding to custom params....\n",
+							__func__);
+
+		snd_mask_none(hw_param_mask(&hw_params,
+					SNDRV_PCM_HW_PARAM_FORMAT));
+		snd_mask_set(hw_param_mask(&hw_params,
+					SNDRV_PCM_HW_PARAM_FORMAT),
+					pdata->custom_cfg->format);
+
+		hw_param_interval(&hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
+						pdata->custom_cfg->rate;
+		hw_param_interval(&hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
+						pdata->custom_cfg->rate;
+
+		hw_param_interval(&hw_params,
+					SNDRV_PCM_HW_PARAM_CHANNELS)->min =
+						pdata->custom_cfg->channels;
+		hw_param_interval(&hw_params,
+					SNDRV_PCM_HW_PARAM_CHANNELS)->max =
+						pdata->custom_cfg->channels;
+	}
+
 	switch (dai->id) {
 	case 1:
 		aif1_reg = WM8994_AIF1_CONTROL_1;
@@ -2740,8 +2869,9 @@
 		return -EINVAL;
 	}
 
-	bclk_rate = params_rate(params);
-	switch (params_format(params)) {
+	bclk_rate = params_rate(&hw_params);
+
+	switch (params_format(&hw_params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
 		bclk_rate *= 16;
 		break;
@@ -2761,7 +2891,7 @@
 		return -EINVAL;
 	}
 
-	wm8994->channels[id] = params_channels(params);
+	wm8994->channels[id] = params_channels(&hw_params);
 	if (pdata->max_channels_clocked[id] &&
 	    wm8994->channels[id] > pdata->max_channels_clocked[id]) {
 		dev_dbg(dai->dev, "Constraining channels to %d from %d\n",
@@ -2781,7 +2911,7 @@
 
 	/* Try to find an appropriate sample rate; look for an exact match. */
 	for (i = 0; i < ARRAY_SIZE(srs); i++)
-		if (srs[i].rate == params_rate(params))
+		if (srs[i].rate == params_rate(&hw_params))
 			break;
 	if (i == ARRAY_SIZE(srs))
 		return -EINVAL;
@@ -2802,10 +2932,10 @@
 
 	/* AIFCLK/fs ratio; look for a close match in either direction */
 	best = 0;
-	best_val = abs((fs_ratios[0] * params_rate(params))
+	best_val = abs((fs_ratios[0] * params_rate(&hw_params))
 		       - wm8994->aifclk[id]);
 	for (i = 1; i < ARRAY_SIZE(fs_ratios); i++) {
-		cur_val = abs((fs_ratios[i] * params_rate(params))
+		cur_val = abs((fs_ratios[i] * params_rate(&hw_params))
 			      - wm8994->aifclk[id]);
 		if (cur_val >= best_val)
 			continue;
@@ -2833,7 +2963,7 @@
 		bclk_divs[best], bclk_rate);
 	bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
 
-	lrclk = bclk_rate / params_rate(params);
+	lrclk = bclk_rate / params_rate(&hw_params);
 	if (!lrclk) {
 		dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
 			bclk_rate);
@@ -2853,12 +2983,12 @@
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		switch (dai->id) {
 		case 1:
-			wm8994->dac_rates[0] = params_rate(params);
+			wm8994->dac_rates[0] = params_rate(&hw_params);
 			wm8994_set_retune_mobile(codec, 0);
 			wm8994_set_retune_mobile(codec, 1);
 			break;
 		case 2:
-			wm8994->dac_rates[1] = params_rate(params);
+			wm8994->dac_rates[1] = params_rate(&hw_params);
 			wm8994_set_retune_mobile(codec, 2);
 			break;
 		}
@@ -2911,6 +3041,13 @@
 	return snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1);
 }
 
+#if IS_ENABLED(CONFIG_SND_MRFLD_MACHINE) || \
+	IS_ENABLED(CONFIG_SND_MOOR_MACHINE)
+static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+	return 0;
+}
+#else
 static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
 {
 	struct snd_soc_codec *codec = codec_dai->codec;
@@ -2937,6 +3074,7 @@
 
 	return 0;
 }
+#endif
 
 static int wm8994_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
 {
@@ -2964,6 +3102,25 @@
 	return snd_soc_update_bits(codec, reg, mask, val);
 }
 
+static int wm8994_set_tdm_slots(struct snd_soc_dai *dai,
+	unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = wm8994->wm8994;
+
+	switch (control->type) {
+	case WM8958:
+		wm8994->slots = slots;
+		break;
+	default:
+		pr_err("we dont support tdm for non 8958!");
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
 static int wm8994_aif2_probe(struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
@@ -2991,6 +3148,7 @@
 	.digital_mute	= wm8994_aif_mute,
 	.set_pll	= wm8994_set_fll,
 	.set_tristate	= wm8994_set_tristate,
+	.set_tdm_slot	= wm8994_set_tdm_slots,
 };
 
 static const struct snd_soc_dai_ops wm8994_aif2_dai_ops = {
@@ -3096,24 +3254,7 @@
 static int wm8994_codec_resume(struct snd_soc_codec *codec)
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-	struct wm8994 *control = wm8994->wm8994;
 	int i, ret;
-	unsigned int val, mask;
-
-	if (control->revision < 4) {
-		/* force a HW read */
-		ret = regmap_read(control->regmap,
-				  WM8994_POWER_MANAGEMENT_5, &val);
-
-		/* modify the cache only */
-		codec->cache_only = 1;
-		mask =  WM8994_DAC1R_ENA | WM8994_DAC1L_ENA |
-			WM8994_DAC2R_ENA | WM8994_DAC2L_ENA;
-		val &= mask;
-		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
-				    mask, val);
-		codec->cache_only = 0;
-	}
 
 	for (i = 0; i < ARRAY_SIZE(wm8994->fll); i++) {
 		if (!wm8994->fll_suspend[i].out)
@@ -3495,6 +3636,32 @@
 			    wm8994->btn_mask);
 }
 
+static void wm8958_open_circuit_work(struct work_struct *work)
+{
+	struct wm8994_priv *wm8994 = container_of(work,
+						  struct wm8994_priv,
+						  open_circuit_work.work);
+	struct device *dev = wm8994->wm8994->dev;
+
+	wm1811_micd_stop(wm8994->hubs.codec);
+
+	mutex_lock(&wm8994->accdet_lock);
+
+	dev_dbg(dev, "Reporting open circuit\n");
+
+	wm8994->jack_mic = false;
+	wm8994->mic_detecting = true;
+	wm8994->headphone_detected = false;
+
+	wm8958_micd_set_rate(wm8994->hubs.codec);
+
+	snd_soc_jack_report(wm8994->micdet[0].jack, 0,
+			    wm8994->btn_mask |
+			    SND_JACK_HEADSET);
+
+	mutex_unlock(&wm8994->accdet_lock);
+}
+
 static void wm8958_mic_id(void *data, u16 status)
 {
 	struct snd_soc_codec *codec = data;
@@ -3504,16 +3671,9 @@
 	if (!(status & WM8958_MICD_STS)) {
 		/* If nothing present then clear our statuses */
 		dev_dbg(codec->dev, "Detected open circuit\n");
-		wm8994->jack_mic = false;
-		wm8994->mic_detecting = true;
 
-		wm1811_micd_stop(codec);
-
-		wm8958_micd_set_rate(codec);
-
-		snd_soc_jack_report(wm8994->micdet[0].jack, 0,
-				    wm8994->btn_mask |
-				    SND_JACK_HEADSET);
+		schedule_delayed_work(&wm8994->open_circuit_work,
+				      msecs_to_jiffies(2500));
 		return;
 	}
 
@@ -3568,10 +3728,8 @@
 
 	dev_dbg(codec->dev, "Starting mic detection\n");
 
-	/* Use a user-supplied callback if we have one */
-	if (wm8994->micd_cb) {
-		wm8994->micd_cb(wm8994->micd_cb_data);
-	} else {
+	/* If there's a callback it'll be called out of the lock */
+	if (!wm8994->micd_cb) {
 		/*
 		 * Start off measument of microphone impedence to find out
 		 * what's actually there.
@@ -3585,6 +3743,10 @@
 
 	mutex_unlock(&wm8994->accdet_lock);
 
+	/* Custom callbacks may reasonably wish to take the same locks */
+	if (wm8994->micd_cb)
+		wm8994->micd_cb(wm8994->micd_cb_data);
+
 	pm_runtime_put(codec->dev);
 }
 
@@ -3596,8 +3758,12 @@
 	int reg, delay;
 	bool present;
 
+	cancel_delayed_work_sync(&wm8994->mic_work);
+
 	pm_runtime_get_sync(codec->dev);
 
+	cancel_delayed_work_sync(&wm8994->mic_complete_work);
+
 	mutex_lock(&wm8994->accdet_lock);
 
 	reg = snd_soc_read(codec, WM1811_JACKDET_CTRL);
@@ -3630,8 +3796,6 @@
 	} else {
 		dev_dbg(codec->dev, "Jack not detected\n");
 
-		cancel_delayed_work_sync(&wm8994->mic_work);
-
 		snd_soc_update_bits(codec, WM8958_MICBIAS2,
 				    WM8958_MICB2_DISCH, WM8958_MICB2_DISCH);
 
@@ -3720,6 +3884,7 @@
 		} else {
 			wm8994->mic_detecting = true;
 			wm8994->jack_mic = false;
+			wm8994->headphone_detected = false;
 		}
 
 		if (id_cb) {
@@ -3780,11 +3945,70 @@
 }
 EXPORT_SYMBOL_GPL(wm8958_mic_detect);
 
+int wm8958_micd_set_custom_rate(struct snd_soc_codec *codec,
+		wm8958_micd_set_custom_rate_cb micd_custom_rate_cb,
+		void *micd_custom_rate_cb_data)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	if (micd_custom_rate_cb) {
+		wm8994->micd_custom_rate_cb = micd_custom_rate_cb;
+		wm8994->micd_custom_rate_cb_data = micd_custom_rate_cb_data;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(wm8958_micd_set_custom_rate);
+
+static void wm8958_mic_work(struct work_struct *work)
+{
+	struct wm8994_priv *wm8994 = container_of(work,
+						  struct wm8994_priv,
+						  mic_complete_work.work);
+	struct snd_soc_codec *codec = wm8994->hubs.codec;
+
+	dev_crit(codec->dev, "MIC WORK %x\n", wm8994->mic_status);
+
+	pm_runtime_get_sync(codec->dev);
+
+	mutex_lock(&wm8994->accdet_lock);
+
+	wm8994->mic_id_cb(wm8994->mic_id_cb_data, wm8994->mic_status);
+
+	mutex_unlock(&wm8994->accdet_lock);
+
+	pm_runtime_put(codec->dev);
+
+	dev_crit(codec->dev, "MIC WORK %x DONE\n", wm8994->mic_status);
+}
+
+static void wm8958_micd_set_custom_rate_work(struct work_struct *work)
+{
+	struct wm8994_priv *wm8994 = container_of(work,
+						  struct wm8994_priv,
+						  micd_set_custom_rate_work.work);
+	struct snd_soc_codec *codec = wm8994->hubs.codec;
+
+	dev_dbg(codec->dev, "%s: Set custom rates\n", __func__);
+
+	pm_runtime_get_sync(codec->dev);
+
+	mutex_lock(&wm8994->accdet_lock);
+
+	wm8994->micd_custom_rate_cb(wm8994->micd_custom_rate_cb_data);
+
+	mutex_unlock(&wm8994->accdet_lock);
+
+	pm_runtime_put(codec->dev);
+
+}
+
 static irqreturn_t wm8958_mic_irq(int irq, void *data)
 {
 	struct wm8994_priv *wm8994 = data;
 	struct snd_soc_codec *codec = wm8994->hubs.codec;
-	int reg, count, ret;
+	struct wm8994 *control = wm8994->wm8994;
+	int reg, count, ret, id_delay;
 
 	/*
 	 * Jack detection may have detected a removal simulataneously
@@ -3794,6 +4018,9 @@
 	if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA))
 		return IRQ_HANDLED;
 
+	cancel_delayed_work_sync(&wm8994->mic_complete_work);
+	cancel_delayed_work_sync(&wm8994->open_circuit_work);
+
 	pm_runtime_get_sync(codec->dev);
 
 	/* We may occasionally read a detection without an impedence
@@ -3842,14 +4069,30 @@
 		snd_soc_jack_report(wm8994->micdet[0].jack, 0,
 				    SND_JACK_MECHANICAL | SND_JACK_HEADSET |
 				    wm8994->btn_mask);
+		wm8994->jack_mic = false;
+		wm8994->headphone_detected = false;
 		wm8994->mic_detecting = true;
 		goto out;
 	}
 
-	if (wm8994->mic_detecting)
-		wm8994->mic_id_cb(wm8994->mic_id_cb_data, reg);
-	else
+	wm8994->mic_status = reg;
+	id_delay = wm8994->wm8994->pdata.mic_id_delay;
+
+	if (wm8994->mic_detecting) {
+		if (control->type == WM8958) {
+			/* Set mic-bias high during detection phase (micb_en_delay) */
+			/* 0 == Continuous */
+			dev_dbg(codec->dev, "Set MICBIAS High, for micb_en_delay time\n");
+			snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+				    WM8958_MICD_BIAS_STARTTIME_MASK |
+				    WM8958_MICD_RATE_MASK, 0);
+		}
+
+		schedule_delayed_work(&wm8994->mic_complete_work,
+				      msecs_to_jiffies(id_delay));
+	} else {
 		wm8958_button_det(codec, reg);
+	}
 
 out:
 	pm_runtime_put(codec->dev);
@@ -3888,6 +4131,7 @@
 	struct wm8994 *control = dev_get_drvdata(codec->dev->parent);
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	unsigned int dcs_done_irq;
 	unsigned int reg;
 	int ret, i;
 
@@ -3899,6 +4143,8 @@
 	mutex_init(&wm8994->accdet_lock);
 	INIT_DELAYED_WORK(&wm8994->jackdet_bootstrap,
 			  wm1811_jackdet_bootstrap);
+	INIT_DELAYED_WORK(&wm8994->open_circuit_work,
+			  wm8958_open_circuit_work);
 
 	switch (control->type) {
 	case WM8994:
@@ -3907,10 +4153,16 @@
 	case WM1811:
 		INIT_DELAYED_WORK(&wm8994->mic_work, wm1811_mic_work);
 		break;
+	case WM8958:
+		INIT_DELAYED_WORK(&wm8994->micd_set_custom_rate_work,
+					wm8958_micd_set_custom_rate_work);
+		break;
 	default:
 		break;
 	}
 
+	INIT_DELAYED_WORK(&wm8994->mic_complete_work, wm8958_mic_work);
+
 	for (i = 0; i < ARRAY_SIZE(wm8994->fll_locked); i++)
 		init_completion(&wm8994->fll_locked[i]);
 
@@ -3983,6 +4235,9 @@
 	wm8994_request_irq(wm8994->wm8994, WM8994_IRQ_TEMP_SHUT,
 			   wm8994_temp_shut, "Thermal shutdown", codec);
 
+	dcs_done_irq = regmap_irq_get_virq(wm8994->wm8994->irq_data,
+					   WM8994_IRQ_DCS_DONE);
+	irq_set_status_flags(dcs_done_irq, IRQ_NOAUTOEN);
 	ret = wm8994_request_irq(wm8994->wm8994, WM8994_IRQ_DCS_DONE,
 				 wm_hubs_dcs_done, "DC servo done",
 				 &wm8994->hubs);
@@ -4089,7 +4344,6 @@
 	}
 	if ((reg & WM8994_GPN_FN_MASK) != WM8994_GP_FN_PIN_SPECIFIC) {
 		wm8994->lrclk_shared[0] = 1;
-		wm8994_dai[0].symmetric_rates = 1;
 	} else {
 		wm8994->lrclk_shared[0] = 0;
 	}
@@ -4219,6 +4473,7 @@
 	}
 
 	wm_hubs_add_analogue_routes(codec, 0, 0);
+	enable_irq(dcs_done_irq);
 	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	switch (control->type) {
@@ -4261,6 +4516,11 @@
 		break;
 	}
 
+	/* Make sure FIFO errors are masked */
+	snd_soc_update_bits(codec, WM8994_INTERRUPT_STATUS_2_MASK,
+			    WM8994_IM_FIFOS_ERR_EINT_MASK,
+			    1 << WM8994_IM_FIFOS_ERR_EINT_SHIFT);
+
 	return 0;
 
 err_irq:
@@ -4365,6 +4625,11 @@
 static int wm8994_suspend(struct device *dev)
 {
 	struct wm8994_priv *wm8994 = dev_get_drvdata(dev);
+	struct wm8994 *control = wm8994->wm8994;
+	struct snd_soc_codec *codec = wm8994->hubs.codec;
+	unsigned int reg;
+	int ret;
+
 
 	/* Drop down to power saving mode when system is suspended */
 	if (wm8994->jackdet && !wm8994->active_refcount)
@@ -4372,18 +4637,62 @@
 				   WM1811_JACKDET_MODE_MASK,
 				   wm8994->jackdet_mode);
 
+	/* Disable the MIC Detection when suspended */
+	if ((control->type == WM8958) && wm8994->mic_id_cb) {
+
+		reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
+
+		dev_dbg(codec->dev, "%s: WM8958_MIC_DETECT_3 0x%x\n", __func__, reg);
+		dev_dbg(codec->dev, "mic_detect %d jack_mic %d headphone %d\n",
+					wm8994->mic_detecting, wm8994->jack_mic,
+					wm8994->headphone_detected);
+
+		if (!(wm8994->jack_mic) && !(wm8994->headphone_detected)) {
+
+			dev_dbg(codec->dev, "Jack not connected..Mask interrupt\n");
+			snd_soc_write(codec, WM8994_INTERRUPT_CONTROL, 0x01);
+
+			ret = regcache_sync_region(wm8994->wm8994->regmap,
+					WM8994_INTERRUPT_CONTROL,
+					WM8994_INTERRUPT_CONTROL);
+			if (ret != 0)
+				dev_err(dev, "Failed to sync register: %d\n", ret);
+			synchronize_irq(control->irq);
+
+			dev_dbg(codec->dev, "Disable MIC Detection!!!\n");
+			snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+						WM8958_MICD_ENA, 0);
+
+			snd_soc_dapm_disable_pin(&codec->dapm, "CLK_SYS");
+			snd_soc_dapm_sync(&codec->dapm);
+		}
+	}
+
 	return 0;
 }
 
 static int wm8994_resume(struct device *dev)
 {
 	struct wm8994_priv *wm8994 = dev_get_drvdata(dev);
+	struct wm8994 *control = wm8994->wm8994;
+	struct snd_soc_codec *codec = wm8994->hubs.codec;
 
 	if (wm8994->jackdet && wm8994->jackdet_mode)
 		regmap_update_bits(wm8994->wm8994->regmap, WM8994_ANTIPOP_2,
 				   WM1811_JACKDET_MODE_MASK,
 				   WM1811_JACKDET_MODE_AUDIO);
 
+	/* Enable the MIC Detection when resumed */
+	if ((control->type == WM8958) && wm8994->mic_id_cb) {
+		dev_dbg(codec->dev, "Enable MIC Detection!!!\n");
+		snd_soc_dapm_force_enable_pin(&codec->dapm, "CLK_SYS");
+		snd_soc_dapm_sync(&codec->dapm);
+
+		snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+					WM8958_MICD_ENA, WM8958_MICD_ENA);
+		snd_soc_write(codec, WM8994_INTERRUPT_CONTROL, 0x00);
+	}
+
 	return 0;
 }
 #endif
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index 55ddf4d..0a834df 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -41,6 +41,7 @@
 
 typedef void (*wm1811_micdet_cb)(void *data);
 typedef void (*wm1811_mic_id_cb)(void *data, u16 status);
+typedef void (*wm8958_micd_set_custom_rate_cb)(struct snd_soc_codec *codec);
 
 int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
 		      int micbias);
@@ -55,6 +56,10 @@
 
 void wm8958_dsp2_init(struct snd_soc_codec *codec);
 
+int wm8958_micd_set_custom_rate(struct snd_soc_codec *codec,
+			wm8958_micd_set_custom_rate_cb micd_custom_rate_cb,
+			void *micd_custom_rate_cb_data);
+
 struct wm8994_micdet {
 	struct snd_soc_jack *jack;
 	bool detecting;
@@ -86,6 +91,7 @@
 	bool fll_locked_irq;
 	bool fll_byp;
 	bool clk_has_run;
+	int slots;
 
 	int vmid_refcount;
 	int active_refcount;
@@ -134,8 +140,14 @@
 	struct mutex accdet_lock;
 	struct wm8994_micdet micdet[2];
 	struct delayed_work mic_work;
+	struct delayed_work open_circuit_work;
+	struct delayed_work mic_complete_work;
+	struct delayed_work micd_set_custom_rate_work;
+
+	u16 mic_status;
 	bool mic_detecting;
 	bool jack_mic;
+	bool headphone_detected;
 	int btn_mask;
 	bool jackdet;
 	int jackdet_mode;
@@ -146,6 +158,8 @@
 	void *micd_cb_data;
 	wm1811_mic_id_cb mic_id_cb;
 	void *mic_id_cb_data;
+	wm8958_micd_set_custom_rate_cb micd_custom_rate_cb;
+	void *micd_custom_rate_cb_data;
 
 	unsigned int aif1clk_enable:1;
 	unsigned int aif2clk_enable:1;
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 7a0466e..2874ec9 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -1223,11 +1223,6 @@
 				    WM8993_LINEOUT2_MODE,
 				    WM8993_LINEOUT2_MODE);
 
-	if (!lineout1_diff && !lineout2_diff)
-		snd_soc_update_bits(codec, WM8993_ANTIPOP1,
-				    WM8993_LINEOUT_VMID_BUF_ENA,
-				    WM8993_LINEOUT_VMID_BUF_ENA);
-
 	if (lineout1fb)
 		snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
 				    WM8993_LINEOUT1_FB, WM8993_LINEOUT1_FB);
@@ -1253,6 +1248,13 @@
 	struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
 	int val = 0;
 
+	if ((hubs->lineout1_se && hubs->lineout2_se) &&
+			(hubs->lineout1n_ena  || hubs->lineout1p_ena ||
+			hubs->lineout2n_ena || hubs->lineout2p_ena))
+		snd_soc_update_bits(codec, WM8993_ANTIPOP1,
+			WM8993_LINEOUT_VMID_BUF_ENA,
+			WM8993_LINEOUT_VMID_BUF_ENA);
+
 	if (hubs->lineout1_se)
 		val |= WM8993_LINEOUT1N_ENA | WM8993_LINEOUT1P_ENA;
 
@@ -1282,6 +1284,13 @@
 		val = 0;
 		mask = 0;
 
+		if ((hubs->lineout1_se && hubs->lineout2_se) &&
+				(hubs->lineout1n_ena  || hubs->lineout1p_ena ||
+				hubs->lineout2n_ena || hubs->lineout2p_ena))
+			snd_soc_update_bits(codec, WM8993_ANTIPOP1,
+				WM8993_LINEOUT_VMID_BUF_ENA,
+				WM8993_LINEOUT_VMID_BUF_ENA);
+
 		if (hubs->lineout1_se)
 			mask |= WM8993_LINEOUT1N_ENA | WM8993_LINEOUT1P_ENA;
 
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
new file mode 100644
index 0000000..b6efaac
--- /dev/null
+++ b/sound/soc/intel/Kconfig
@@ -0,0 +1,58 @@
+config SND_MFLD_MACHINE
+	tristate "SOC Machine Audio driver for Intel Medfield MID platform"
+	depends on INTEL_SCU_IPC && INTEL_SCU_IPC_UTIL && X86 && GPIO_LANGWELL
+	depends on MSIC_GPADC
+	select SND_SOC_SN95031
+	select SND_SST_PLATFORM
+	select SND_SST_MACHINE
+	select SND_INTEL_SST
+	default n
+	help
+          This adds support for ASoC machine driver for Intel(R) MID Medfield platform
+          used as alsa device in audio subsystem in Intel(R) MID devices
+          Say Y if you have such a device
+	  If unsure select "N".
+
+config SND_MRFLD_MACHINE
+	tristate "SOC Machine Audio driver for Intel Merrifield MID platform"
+	depends on INTEL_SCU_IPC && X86
+	select SND_SOC_LM49453
+	select SND_SOC_WM8994
+	select MFD_CORE
+	select MFD_WM8994
+	select REGULATOR_WM8994
+	select SND_SST_PLATFORM
+	select SND_SST_MACHINE
+	select SND_INTEL_SST
+	select SND_EFFECTS_OFFLOAD
+	default n
+	help
+	  This adds support for ASoC machine driver for Intel(R) MID Merrifield platform
+          used as alsa device in audio substem in Intel(R) MID devices
+          Say Y if you have such a device
+          If unsure select "N".
+
+config SND_INTEL_SST
+       tristate
+
+config SND_SST_PLATFORM
+	tristate
+
+config SND_SOC_COMMS_SSP
+	depends on SND_INTEL_MID_I2S
+	tristate "Use ASOC framework to drive AudioComms SSP BT and Modem"
+	help
+	  Sound SOC cards usually used for BT VOIP and MODEM MIXING use cases.
+	  This will add devices for these uses cases in the list of alsa cards.
+	  Say Y if you need these sound cards (BT chipset or Modem present).
+	  Requires to enable the INTEL_MID_I2S low level SSP I2S driver.
+
+config SST_MRFLD_DPCM
+	bool "Use DPCM based Merrifield Machine Audio driver"
+	default n
+	help
+	  This adds an option to enable the DPCM based MRFLD machine driver
+
+config SND_SST_MACHINE
+	tristate
+
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
new file mode 100644
index 0000000..02d5120
--- /dev/null
+++ b/sound/soc/intel/Makefile
@@ -0,0 +1,22 @@
+#EXTRA CFLAGS
+ccflags-y += -Werror
+
+ifeq (${TARGET_BUILD_VARIANT},$(filter ${TARGET_BUILD_VARIANT}, eng))
+ccflags-y += -DCONFIG_SND_VERBOSE_PRINTK -DCONFIG_SND_DEBUG -DCONFIG_SND_DEBUG_VERBOSE
+endif
+
+# SST Platform Driver
+PLATFORM_LIBS = platform-libs/controls_v1.o platform-libs/controls_v2.o platform-libs/controls_v2_dpcm.o \
+		platform-libs/ipc_lib_v2.o
+
+snd-soc-sst-platform-objs := pcm.o compress.o effects.o $(PLATFORM_LIBS)
+obj-$(CONFIG_SND_SST_PLATFORM) += snd-soc-sst-platform.o
+
+# Relevant Machine driver
+obj-$(CONFIG_SND_SST_MACHINE) += board/
+
+# DSP driver
+obj-$(CONFIG_SND_INTEL_SST) += sst/
+
+# Audio Comms
+obj-$(CONFIG_SND_SOC_COMMS_SSP) += ssp/
diff --git a/sound/soc/intel/board/Makefile b/sound/soc/intel/board/Makefile
new file mode 100644
index 0000000..3581ae1
--- /dev/null
+++ b/sound/soc/intel/board/Makefile
@@ -0,0 +1,14 @@
+#EXTRA CFLAGS
+ccflags-y += -Werror
+
+# Merrifield board
+snd-merr-saltbay-wm8958-objs := merr_saltbay_wm8958.o
+snd-merr-dpcm-wm8958-objs := merr_dpcm_wm8958.o
+snd-merr-dpcm-dummy-objs := merr_dpcm_dummy.o
+
+ifdef CONFIG_SST_MRFLD_DPCM
+	obj-$(CONFIG_SND_MRFLD_MACHINE) += snd-merr-dpcm-wm8958.o
+	obj-$(CONFIG_SND_MRFLD_MACHINE) += snd-merr-dpcm-dummy.o
+else
+	obj-$(CONFIG_SND_MRFLD_MACHINE) += snd-merr-saltbay-wm8958.o
+endif
diff --git a/sound/soc/intel/board/merr_dpcm_dummy.c b/sound/soc/intel/board/merr_dpcm_dummy.c
new file mode 100644
index 0000000..5603b21
--- /dev/null
+++ b/sound/soc/intel/board/merr_dpcm_dummy.c
@@ -0,0 +1,201 @@
+/*
+ *  ASoc Dummy DPCM Machine driver for Intel Edison MID platform
+ *
+ *  Copyright (C) 2014 Intel Corp
+ *  Author: Michael Soares <michaelx.soares@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#ifdef CONFIG_PM_SLEEP
+static int snd_merr_dpcm_prepare(struct device *dev)
+{
+	pr_debug("In %s device name\n", __func__);
+	snd_soc_suspend(dev);
+	return 0;
+}
+
+static void snd_merr_dpcm_complete(struct device *dev)
+{
+	pr_debug("In %s\n", __func__);
+	snd_soc_resume(dev);
+	return;
+}
+
+static int snd_merr_dpcm_poweroff(struct device *dev)
+{
+	pr_debug("In %s\n", __func__);
+	snd_soc_poweroff(dev);
+	return 0;
+}
+#else
+#define snd_merr_dpcm_prepare NULL
+#define snd_merr_dpcm_complete NULL
+#define snd_merr_dpcm_poweroff NULL
+#endif
+
+static unsigned int rates_48000[] = {
+	48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+	.count = ARRAY_SIZE(rates_48000),
+	.list  = rates_48000,
+};
+
+static int merr_dummy_startup(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_48000);
+}
+
+static struct snd_soc_ops merr_dummy_ops = {
+		.startup = merr_dummy_startup,
+};
+
+static int merr_codec_fixup(struct snd_soc_pcm_runtime *rtd,
+		struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+			SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+			SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	pr_debug("Invoked %s for dailink %s\n", __func__, rtd->dai_link->name);
+
+	/* The DSP will convert the FE rate to 48k, stereo, 24bits */
+	rate->min = rate->max = 48000;
+	channels->min = channels->max = 2;
+
+	/* set SSP2 to 24-bit */
+	snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+				SNDRV_PCM_HW_PARAM_FIRST_MASK],
+				SNDRV_PCM_FORMAT_S24_LE);
+	return 0;
+}
+
+struct snd_soc_dai_link merr_msic_dailink[] = {
+	[MERR_DPCM_AUDIO] = {
+		.name = "Media Audio Port",
+		.stream_name = "Edison Audio",
+		.cpu_dai_name = "Headset-cpu-dai",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "sst-platform",
+		.ignore_suspend = 1,
+		.dynamic = 1,
+		.ops = &merr_dummy_ops,
+	},
+	/* back ends */
+	{
+		.name = "SSP2-Codec",
+		.be_id = 1,
+		.cpu_dai_name = "ssp2-codec",
+		.platform_name = "sst-platform",
+		.no_pcm = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.be_hw_params_fixup = merr_codec_fixup,
+		.ignore_suspend = 1,
+	},
+	{
+		.name = "SSP1-BT",
+		.be_id = 2,
+		.cpu_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "snd-soc-dummy",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.ignore_suspend = 1,
+	},
+
+};
+
+static const struct snd_soc_dapm_route map[] = {
+	{ "Dummy Playback", NULL, "codec_out0"  },
+	{ "Dummy Playback", NULL, "codec_out1"  },
+	{ "codec_in0", NULL, "Dummy Capture" },
+	{ "codec_in1", NULL, "Dummy Capture" },
+};
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_merr = {
+	.name = "dummy-audio",
+	.dai_link = merr_msic_dailink,
+	.num_links = ARRAY_SIZE(merr_msic_dailink),
+	.dapm_routes = map,
+	.num_dapm_routes = ARRAY_SIZE(map),
+};
+
+static int snd_merr_dpcm_probe(struct platform_device *pdev)
+{
+	int ret_val = 0;
+	pr_debug("%s enter\n", __func__);
+
+	/* register the soc card */
+	snd_soc_card_merr.dev = &pdev->dev;
+	ret_val = snd_soc_register_card(&snd_soc_card_merr);
+	if (ret_val) {
+		pr_err("snd_soc_register_card failed %d\n", ret_val);
+		return ret_val;
+	}
+	platform_set_drvdata(pdev, &snd_soc_card_merr);
+	pr_info("%s successful\n", __func__);
+	return ret_val;
+}
+
+static int snd_merr_dpcm_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *soc_card = platform_get_drvdata(pdev);
+	pr_err("snd_merr_dpcm_remove");
+	snd_soc_unregister_card(soc_card);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+const struct dev_pm_ops snd_merr_dpcm_mc_pm_ops = {
+	.prepare = snd_merr_dpcm_prepare,
+	.complete = snd_merr_dpcm_complete,
+	.poweroff = snd_merr_dpcm_poweroff,
+};
+
+static struct platform_driver snd_merr_dpcm_drv = {
+	.driver = {
+			.owner = THIS_MODULE,
+			.name = "merr_dpcm_dummy",
+			.pm = &snd_merr_dpcm_mc_pm_ops,
+	},
+	.probe = snd_merr_dpcm_probe,
+	.remove = snd_merr_dpcm_remove,
+};
+
+module_platform_driver(snd_merr_dpcm_drv);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Edison dummy MID Machine driver");
+MODULE_AUTHOR("Michael Soares <michaelx.soares@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:merr_dpcm_dummy");
+
diff --git a/sound/soc/intel/board/merr_dpcm_wm8958.c b/sound/soc/intel/board/merr_dpcm_wm8958.c
new file mode 100644
index 0000000..3b89a82
--- /dev/null
+++ b/sound/soc/intel/board/merr_dpcm_wm8958.c
@@ -0,0 +1,931 @@
+/*
+ *  merr_dpcm_wm8958.c - ASoc DPCM Machine driver for Intel Merrfield MID platform
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/input.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include "../../codecs/wm8994.h"
+
+/* Codec PLL output clk rate */
+#define CODEC_SYSCLK_RATE			24576000
+/* Input clock to codec at MCLK1 PIN */
+#define CODEC_IN_MCLK1_RATE			19200000
+/* Input clock to codec at MCLK2 PIN */
+#define CODEC_IN_MCLK2_RATE			32768
+/*  define to select between MCLK1 and MCLK2 input to codec as its clock */
+#define CODEC_IN_MCLK1				1
+#define CODEC_IN_MCLK2				2
+
+/* Register address for OSC Clock */
+#define MERR_OSC_CLKOUT_CTRL0_REG_ADDR  0xFF00BC04
+/* Size of osc clock register */
+#define MERR_OSC_CLKOUT_CTRL0_REG_SIZE  4
+
+struct mrfld_8958_mc_private {
+	struct snd_soc_jack jack;
+	int jack_retry;
+	u8 pmic_id;
+	void __iomem    *osc_clk0_reg;
+};
+
+
+/* set_osc_clk0-	enable/disables the osc clock0
+ * addr:		address of the register to write to
+ * enable:		bool to enable or disable the clock
+ */
+static inline void set_soc_osc_clk0(void __iomem *addr, bool enable)
+{
+	u32 osc_clk_ctrl;
+
+	osc_clk_ctrl = readl(addr);
+	if (enable)
+		osc_clk_ctrl |= BIT(31);
+	else
+		osc_clk_ctrl &= ~(BIT(31));
+
+	pr_debug("%s: enable:%d val 0x%x\n", __func__, enable, osc_clk_ctrl);
+
+	writel(osc_clk_ctrl, addr);
+}
+
+static inline struct snd_soc_codec *mrfld_8958_get_codec(struct snd_soc_card *card)
+{
+	bool found = false;
+	struct snd_soc_codec *codec;
+
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+		if (!strstr(codec->name, "wm8994-codec")) {
+			pr_debug("codec was %s", codec->name);
+			continue;
+		} else {
+			found = true;
+			break;
+		}
+	}
+	if (found == false) {
+		pr_warn("%s: cant find codec", __func__);
+		return NULL;
+	}
+	return codec;
+}
+
+/* TODO: find better way of doing this */
+static struct snd_soc_dai *find_codec_dai(struct snd_soc_card *card, const char *dai_name)
+{
+	int i;
+	for (i = 0; i < card->num_rtd; i++) {
+		if (!strcmp(card->rtd[i].codec_dai->name, dai_name))
+			return card->rtd[i].codec_dai;
+	}
+	pr_err("%s: unable to find codec dai\n", __func__);
+	/* this should never occur */
+	WARN_ON(1);
+	return NULL;
+}
+
+/* Function to switch the input clock for codec,  When audio is in
+ * progress input clock to codec will be through MCLK1 which is 19.2MHz
+ * while in off state input clock to codec will be through 32KHz through
+ * MCLK2
+ * card	: Sound card structure
+ * src	: Input clock source to codec
+ */
+static int mrfld_8958_set_codec_clk(struct snd_soc_card *card, int src)
+{
+	struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+	int ret;
+
+	if (!aif1_dai)
+		return -ENODEV;
+
+	switch (src) {
+	case CODEC_IN_MCLK1:
+		/* Turn ON the PLL to generate required sysclk rate
+		 * from MCLK1 */
+		ret = snd_soc_dai_set_pll(aif1_dai,
+			WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
+			CODEC_IN_MCLK1_RATE, CODEC_SYSCLK_RATE);
+		if (ret < 0) {
+			pr_err("Failed to start FLL: %d\n", ret);
+			return ret;
+		}
+		/* Switch to MCLK1 input */
+		ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_FLL1,
+				CODEC_SYSCLK_RATE, SND_SOC_CLOCK_IN);
+		if (ret < 0) {
+			pr_err("Failed to set codec sysclk configuration %d\n",
+				 ret);
+			return ret;
+		}
+		break;
+	case CODEC_IN_MCLK2:
+		/* Switch to MCLK2 */
+		ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_MCLK2,
+				32768, SND_SOC_CLOCK_IN);
+		if (ret < 0) {
+			pr_err("Failed to switch to MCLK2: %d", ret);
+			return ret;
+		}
+		/* Turn off PLL for MCLK1 */
+		ret = snd_soc_dai_set_pll(aif1_dai, WM8994_FLL1, 0, 0, 0);
+		if (ret < 0) {
+			pr_err("Failed to stop the FLL: %d", ret);
+			return ret;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mrfld_wm8958_set_clk_fmt(struct snd_soc_dai *codec_dai)
+{
+	unsigned int fmt;
+	int ret = 0;
+	struct snd_soc_card *card = codec_dai->card;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	/* Enable the osc clock at start so that it gets settling time */
+	set_soc_osc_clk0(ctx->osc_clk0_reg, true);
+
+	pr_err("setting snd_soc_dai_set_tdm_slot\n");
+	ret = snd_soc_dai_set_tdm_slot(codec_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+	if (ret < 0) {
+		pr_err("can't set codec pcm format %d\n", ret);
+		return ret;
+	}
+
+	/* WM8958 slave Mode */
+	fmt =   SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+		| SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+	if (ret < 0) {
+		pr_err("can't set codec DAI configuration %d\n", ret);
+		return ret;
+	}
+
+	/* FIXME: move this to SYS_CLOCK event handler when codec driver
+	 * dependency is clean.
+	 */
+	/* Switch to 19.2MHz MCLK1 input clock for codec */
+	ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK1);
+
+	return ret;
+}
+
+static int mrfld_8958_hw_params(struct snd_pcm_substream *substream,
+			   struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	if (!strcmp(codec_dai->name, "wm8994-aif1"))
+		return mrfld_wm8958_set_clk_fmt(codec_dai);
+	return 0;
+}
+
+static int mrfld_wm8958_compr_set_params(struct snd_compr_stream *cstream)
+{
+	return 0;
+}
+
+static const struct snd_soc_pcm_stream mrfld_wm8958_dai_params = {
+	.formats = SNDRV_PCM_FMTBIT_S24_LE,
+	.rate_min = 48000,
+	.rate_max = 48000,
+	.channels_min = 2,
+	.channels_max = 2,
+};
+
+static int merr_codec_fixup(struct snd_soc_pcm_runtime *rtd,
+			    struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+			SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+						SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	pr_debug("Invoked %s for dailink %s\n", __func__, rtd->dai_link->name);
+
+	/* The DSP will covert the FE rate to 48k, stereo, 24bits */
+	rate->min = rate->max = 48000;
+	channels->min = channels->max = 2;
+
+	/* set SSP2 to 24-bit */
+	snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+				    SNDRV_PCM_HW_PARAM_FIRST_MASK],
+				    SNDRV_PCM_FORMAT_S24_LE);
+	return 0;
+}
+
+static int mrfld_8958_set_bias_level(struct snd_soc_card *card,
+				struct snd_soc_dapm_context *dapm,
+				enum snd_soc_bias_level level)
+{
+	struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+	int ret = 0;
+
+	if (!aif1_dai)
+		return -ENODEV;
+
+	if (dapm->dev != aif1_dai->dev)
+		return 0;
+	switch (level) {
+	case SND_SOC_BIAS_PREPARE:
+		if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY)
+
+			ret = mrfld_wm8958_set_clk_fmt(aif1_dai);
+		break;
+	default:
+		break;
+	}
+	pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+			card->dapm.bias_level);
+	return ret;
+}
+
+static int mrfld_8958_set_bias_level_post(struct snd_soc_card *card,
+		 struct snd_soc_dapm_context *dapm,
+		 enum snd_soc_bias_level level)
+{
+	struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+	int ret = 0;
+
+	if (!aif1_dai)
+		return -ENODEV;
+
+	if (dapm->dev != aif1_dai->dev)
+		return 0;
+
+	switch (level) {
+	case SND_SOC_BIAS_STANDBY:
+		/* We are in stabdba down so */
+		/* Switch to 32KHz MCLK2 input clock for codec
+		 */
+		ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK2);
+		/* Turn off 19.2MHz soc osc clock */
+		set_soc_osc_clk0(ctx->osc_clk0_reg, false);
+		break;
+	default:
+		break;
+	}
+	card->dapm.bias_level = level;
+	pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+			card->dapm.bias_level);
+	return ret;
+}
+
+#define PMIC_ID_ADDR		0x00
+#define PMIC_CHIP_ID_A0_VAL	0xC0
+
+static int mrfld_8958_set_vflex_vsel(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *k, int event)
+{
+#define VFLEXCNT		0xAB
+#define VFLEXVSEL_5V		0x01
+#define VFLEXVSEL_B0_VSYS_PT	0x80	/* B0: Vsys pass-through */
+#define VFLEXVSEL_A0_4P5V	0x41	/* A0: 4.5V */
+
+	struct snd_soc_dapm_context *dapm = w->dapm;
+	struct snd_soc_card *card = dapm->card;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	u8 vflexvsel, pmic_id = ctx->pmic_id;
+	int retval = 0;
+
+	pr_debug("%s: ON? %d\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
+
+	vflexvsel = (pmic_id == PMIC_CHIP_ID_A0_VAL) ? VFLEXVSEL_A0_4P5V : VFLEXVSEL_B0_VSYS_PT;
+	pr_debug("pmic_id %#x vflexvsel %#x\n", pmic_id,
+		SND_SOC_DAPM_EVENT_ON(event) ? VFLEXVSEL_5V : vflexvsel);
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		retval = intel_scu_ipc_iowrite8(VFLEXCNT, VFLEXVSEL_5V);
+	else if (SND_SOC_DAPM_EVENT_OFF(event))
+		retval = intel_scu_ipc_iowrite8(VFLEXCNT, vflexvsel);
+	if (retval)
+		pr_err("Error writing to VFLEXCNT register\n");
+
+	return retval;
+}
+
+static const struct snd_soc_dapm_widget widgets[] = {
+	SND_SOC_DAPM_HP("Headphones", NULL),
+	SND_SOC_DAPM_MIC("AMIC", NULL),
+	SND_SOC_DAPM_MIC("DMIC", NULL),
+	SND_SOC_DAPM_SUPPLY("VFLEXCNT", SND_SOC_NOPM, 0, 0,
+			mrfld_8958_set_vflex_vsel,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route map[] = {
+	{ "Headphones", NULL, "HPOUT1L" },
+	{ "Headphones", NULL, "HPOUT1R" },
+
+	/* saltbay uses 2 DMICs, other configs may use more so change below
+	 * accordingly
+	 */
+	{ "DMIC1DAT", NULL, "DMIC" },
+	{ "DMIC2DAT", NULL, "DMIC" },
+	/*{ "DMIC3DAT", NULL, "DMIC" },*/
+	/*{ "DMIC4DAT", NULL, "DMIC" },*/
+
+	/* MICBIAS2 is connected as Bias for AMIC so we link it
+	 * here. Also AMIC wires up to IN1LP pin.
+	 * DMIC is externally connected to 1.8V rail, so no link rqd.
+	 */
+	{ "AMIC", NULL, "MICBIAS2" },
+	{ "IN1LP", NULL, "AMIC" },
+
+	/* SWM map link the SWM outs to codec AIF */
+	{ "AIF1 Playback", NULL, "codec_out0"  },
+	{ "AIF1 Playback", NULL, "codec_out1"  },
+	{ "codec_in0", NULL, "AIF1 Capture" },
+	{ "codec_in1", NULL, "AIF1 Capture" },
+
+	{ "AIF1 Playback", NULL, "VFLEXCNT" },
+	{ "AIF1 Capture", NULL, "VFLEXCNT" },
+};
+
+static const struct wm8958_micd_rate micdet_rates[] = {
+	{ 32768,       true,  1, 4 },
+	{ 32768,       false, 1, 1 },
+	{ 44100 * 256, true,  7, 10 },
+	{ 44100 * 256, false, 7, 10 },
+};
+
+static void wm8958_custom_micd_set_rate(struct snd_soc_codec *codec)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = dev_get_drvdata(codec->dev->parent);
+	int best, i, sysclk, val;
+	bool idle;
+	const struct wm8958_micd_rate *rates;
+	int num_rates;
+
+	idle = !wm8994->jack_mic;
+
+	sysclk = snd_soc_read(codec, WM8994_CLOCKING_1);
+	if (sysclk & WM8994_SYSCLK_SRC)
+		sysclk = wm8994->aifclk[1];
+	else
+		sysclk = wm8994->aifclk[0];
+
+	if (control->pdata.micd_rates) {
+		rates = control->pdata.micd_rates;
+		num_rates = control->pdata.num_micd_rates;
+	} else {
+		rates = micdet_rates;
+		num_rates = ARRAY_SIZE(micdet_rates);
+	}
+
+	best = 0;
+	for (i = 0; i < num_rates; i++) {
+		if (rates[i].idle != idle)
+			continue;
+		if (abs(rates[i].sysclk - sysclk) <
+		    abs(rates[best].sysclk - sysclk))
+			best = i;
+		else if (rates[best].idle != idle)
+			best = i;
+	}
+
+	val = rates[best].start << WM8958_MICD_BIAS_STARTTIME_SHIFT
+		| rates[best].rate << WM8958_MICD_RATE_SHIFT;
+
+	dev_dbg(codec->dev, "MICD rate %d,%d for %dHz %s\n",
+		rates[best].start, rates[best].rate, sysclk,
+		idle ? "idle" : "active");
+
+	snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+			    WM8958_MICD_BIAS_STARTTIME_MASK |
+			    WM8958_MICD_RATE_MASK, val);
+}
+
+static void wm8958_custom_mic_id(void *data, u16 status)
+{
+	struct snd_soc_codec *codec = data;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "wm8958 custom mic id called with status %x\n",
+		status);
+
+	/* Either nothing present or just starting detection */
+	if (!(status & WM8958_MICD_STS)) {
+		/* If nothing present then clear our statuses */
+		dev_dbg(codec->dev, "Detected open circuit\n");
+
+		schedule_delayed_work(&wm8994->open_circuit_work,
+				      msecs_to_jiffies(2500));
+		return;
+	}
+
+	schedule_delayed_work(&wm8994->micd_set_custom_rate_work,
+		msecs_to_jiffies(wm8994->wm8994->pdata.micb_en_delay));
+
+	/* If the measurement is showing a high impedence we've got a
+	 * microphone.
+	 */
+	if (status & 0x600) {
+		dev_dbg(codec->dev, "Detected microphone\n");
+
+		wm8994->mic_detecting = false;
+		wm8994->jack_mic = true;
+
+		snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADSET,
+				    SND_JACK_HEADSET);
+	}
+
+
+	if (status & 0xfc) {
+		dev_dbg(codec->dev, "Detected headphone\n");
+
+		/* Partial inserts of headsets with complete insert
+		 * after an indeterminate amount of time require
+		 * continouous micdetect enabled (until open circuit
+		 * or headset is detected)
+		 * */
+		wm8994->mic_detecting = true;
+
+		snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADPHONE,
+				    SND_JACK_HEADSET);
+	}
+}
+
+static int mrfld_8958_init(struct snd_soc_pcm_runtime *runtime)
+{
+	int ret;
+	unsigned int fmt;
+	struct snd_soc_codec *codec;
+	struct snd_soc_card *card = runtime->card;
+	struct snd_soc_dai *aif1_dai = find_codec_dai(card, "wm8994-aif1");
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	if (!aif1_dai)
+		return -ENODEV;
+
+	pr_debug("Entry %s\n", __func__);
+
+	ret = snd_soc_dai_set_tdm_slot(aif1_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+	if (ret < 0) {
+		pr_err("can't set codec pcm format %d\n", ret);
+		return ret;
+	}
+
+	/* WM8958 slave Mode */
+	fmt =   SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+		| SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(aif1_dai, fmt);
+	if (ret < 0) {
+		pr_err("can't set codec DAI configuration %d\n", ret);
+		return ret;
+	}
+
+	mrfld_8958_set_bias_level(card, &card->dapm, SND_SOC_BIAS_OFF);
+	card->dapm.idle_bias_off = true;
+
+	/* these pins are not used in SB config so mark as nc
+	 *
+	 * LINEOUT1, 2
+	 * IN1R
+	 * DMICDAT2
+	 */
+	snd_soc_dapm_nc_pin(&card->dapm, "DMIC2DAT");
+	snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT2P");
+	snd_soc_dapm_nc_pin(&card->dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(&card->dapm, "IN1RN");
+	snd_soc_dapm_nc_pin(&card->dapm, "IN1RP");
+
+	snd_soc_dapm_sync(&card->dapm);
+
+	codec = mrfld_8958_get_codec(card);
+	if (!codec) {
+		pr_err("%s: we didnt find the codec pointer!\n", __func__);
+		return 0;
+	}
+
+	ctx->jack_retry = 0;
+	ret = snd_soc_jack_new(codec, "Intel MID Audio Jack",
+			       SND_JACK_HEADSET | SND_JACK_HEADPHONE |
+				SND_JACK_BTN_0 | SND_JACK_BTN_1,
+				&ctx->jack);
+	if (ret) {
+		pr_err("jack creation failed\n");
+		return ret;
+	}
+
+	snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_1, KEY_MEDIA);
+	snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+
+	wm8958_mic_detect(codec, &ctx->jack, NULL, NULL,
+			  wm8958_custom_mic_id, codec);
+
+	wm8958_micd_set_custom_rate(codec, wm8958_custom_micd_set_rate, codec);
+
+	snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_1, WM8994_AIF1DAC1_MUTE, 0);
+	snd_soc_update_bits(codec, WM8994_AIF1_DAC2_FILTERS_1, WM8994_AIF1DAC2_MUTE, 0);
+
+	/* Micbias1 is always off, so for pm optimizations make sure the micbias1
+	 * discharge bit is set to floating to avoid discharge in disable state
+	 */
+	snd_soc_update_bits(codec, WM8958_MICBIAS1, WM8958_MICB1_DISCH, 0);
+
+	return 0;
+}
+
+static unsigned int rates_8000_16000[] = {
+	8000,
+	16000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_8000_16000 = {
+	.count = ARRAY_SIZE(rates_8000_16000),
+	.list  = rates_8000_16000,
+};
+static unsigned int rates_48000[] = {
+	48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+	.count = ARRAY_SIZE(rates_48000),
+	.list  = rates_48000,
+};
+static int mrfld_8958_startup(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_48000);
+}
+
+static struct snd_soc_ops mrfld_8958_ops = {
+	.startup = mrfld_8958_startup,
+};
+static int mrfld_8958_8k_16k_startup(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_8000_16000);
+}
+
+static struct snd_soc_ops mrfld_8958_8k_16k_ops = {
+	.startup = mrfld_8958_8k_16k_startup,
+	.hw_params = mrfld_8958_hw_params,
+};
+
+static struct snd_soc_ops mrfld_8958_be_ssp2_ops = {
+	.hw_params = mrfld_8958_hw_params,
+};
+static struct snd_soc_compr_ops mrfld_compr_ops = {
+	.set_params = mrfld_wm8958_compr_set_params,
+};
+
+struct snd_soc_dai_link mrfld_8958_msic_dailink[] = {
+	[MERR_DPCM_AUDIO] = {
+		.name = "Merrifield Audio Port",
+		.stream_name = "Saltbay Audio",
+		.cpu_dai_name = "Headset-cpu-dai",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "sst-platform",
+		.init = mrfld_8958_init,
+		.ignore_suspend = 1,
+		.dynamic = 1,
+		.ops = &mrfld_8958_ops,
+	},
+	[MERR_DPCM_DB] = {
+		.name = "Merrifield DB Audio Port",
+		.stream_name = "Deep Buffer Audio",
+		.cpu_dai_name = "Deepbuffer-cpu-dai",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "sst-platform",
+		.init = mrfld_8958_init,
+		.ignore_suspend = 1,
+		.dynamic = 1,
+		.ops = &mrfld_8958_ops,
+	},
+	[MERR_DPCM_LL] = {
+		.name = "Merrifield LL Audio Port",
+		.stream_name = "Low Latency Audio",
+		.cpu_dai_name = "Lowlatency-cpu-dai",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "sst-platform",
+		.init = mrfld_8958_init,
+		.ignore_suspend = 1,
+		.dynamic = 1,
+		.ops = &mrfld_8958_ops,
+	},
+	[MERR_DPCM_COMPR] = {
+		.name = "Merrifield Compress Port",
+		.stream_name = "Saltbay Compress",
+		.platform_name = "sst-platform",
+		.cpu_dai_name = "Compress-cpu-dai",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.dynamic = 1,
+		.init = mrfld_8958_init,
+		.compr_ops = &mrfld_compr_ops,
+	},
+	[MERR_DPCM_VOIP] = {
+		.name = "Merrifield VOIP Port",
+		.stream_name = "Saltbay Voip",
+		.cpu_dai_name = "Voip-cpu-dai",
+		.platform_name = "sst-platform",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.init = NULL,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_8k_16k_ops,
+		.dynamic = 1,
+	},
+	[MERR_DPCM_PROBE] = {
+		.name = "Merrifield Probe Port",
+		.stream_name = "Saltbay Probe",
+		.cpu_dai_name = "Probe-cpu-dai",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.platform_name = "sst-platform",
+		.playback_count = 8,
+		.capture_count = 8,
+	},
+	/* CODEC<->CODEC link */
+	{
+		.name = "Merrifield Codec-Loop Port",
+		.stream_name = "Saltbay Codec-Loop",
+		.cpu_dai_name = "snd-soc-dummy-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+						| SND_SOC_DAIFMT_CBS_CFS,
+		.params = &mrfld_wm8958_dai_params,
+	},
+
+	/* back ends */
+	{
+		.name = "SSP2-Codec",
+		.be_id = 1,
+		.cpu_dai_name = "ssp2-codec",
+		.platform_name = "sst-platform",
+		.no_pcm = 1,
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.be_hw_params_fixup = merr_codec_fixup,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_be_ssp2_ops,
+	},
+	{
+		.name = "SSP1-BTFM",
+		.be_id = 2,
+		.cpu_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "snd-soc-dummy",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.ignore_suspend = 1,
+	},
+	{
+		.name = "SSP0-Modem",
+		.be_id = 3,
+		.cpu_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "snd-soc-dummy",
+		.no_pcm = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.ignore_suspend = 1,
+	},
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int snd_mrfld_8958_prepare(struct device *dev)
+{
+	pr_debug("In %s device name\n", __func__);
+	snd_soc_suspend(dev);
+	return 0;
+}
+
+static void snd_mrfld_8958_complete(struct device *dev)
+{
+	pr_debug("In %s\n", __func__);
+	snd_soc_resume(dev);
+	return;
+}
+
+static int snd_mrfld_8958_poweroff(struct device *dev)
+{
+	pr_debug("In %s\n", __func__);
+	snd_soc_poweroff(dev);
+	return 0;
+}
+#else
+#define snd_mrfld_8958_prepare NULL
+#define snd_mrfld_8958_complete NULL
+#define snd_mrfld_8958_poweroff NULL
+#endif
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_mrfld = {
+	.name = "wm8958-audio",
+	.dai_link = mrfld_8958_msic_dailink,
+	.num_links = ARRAY_SIZE(mrfld_8958_msic_dailink),
+	.set_bias_level = mrfld_8958_set_bias_level,
+	.set_bias_level_post = mrfld_8958_set_bias_level_post,
+	.dapm_widgets = widgets,
+	.num_dapm_widgets = ARRAY_SIZE(widgets),
+	.dapm_routes = map,
+	.num_dapm_routes = ARRAY_SIZE(map),
+};
+
+static int snd_mrfld_8958_mc_probe(struct platform_device *pdev)
+{
+	int ret_val = 0;
+	struct mrfld_8958_mc_private *drv;
+
+	pr_debug("Entry %s\n", __func__);
+
+	drv = kzalloc(sizeof(*drv), GFP_ATOMIC);
+	if (!drv) {
+		pr_err("allocation failed\n");
+		return -ENOMEM;
+	}
+
+	/* ioremap the register */
+	drv->osc_clk0_reg = devm_ioremap_nocache(&pdev->dev,
+					MERR_OSC_CLKOUT_CTRL0_REG_ADDR,
+					MERR_OSC_CLKOUT_CTRL0_REG_SIZE);
+	if (!drv->osc_clk0_reg) {
+		pr_err("osc clk0 ctrl ioremap failed\n");
+		ret_val = -1;
+		goto unalloc;
+	}
+
+	ret_val = intel_scu_ipc_ioread8(PMIC_ID_ADDR, &drv->pmic_id);
+	if (ret_val) {
+		pr_err("Error reading PMIC ID register\n");
+		goto unalloc;
+	}
+
+	/* register the soc card */
+	snd_soc_card_mrfld.dev = &pdev->dev;
+	snd_soc_card_set_drvdata(&snd_soc_card_mrfld, drv);
+	ret_val = snd_soc_register_card(&snd_soc_card_mrfld);
+	if (ret_val) {
+		pr_err("snd_soc_register_card failed %d\n", ret_val);
+		goto unalloc;
+	}
+	platform_set_drvdata(pdev, &snd_soc_card_mrfld);
+	pr_info("%s successful\n", __func__);
+	return ret_val;
+
+unalloc:
+	kfree(drv);
+	return ret_val;
+}
+
+static int snd_mrfld_8958_mc_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *soc_card = platform_get_drvdata(pdev);
+	struct mrfld_8958_mc_private *drv = snd_soc_card_get_drvdata(soc_card);
+
+	pr_debug("In %s\n", __func__);
+	kfree(drv);
+	snd_soc_card_set_drvdata(soc_card, NULL);
+	snd_soc_unregister_card(soc_card);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+const struct dev_pm_ops snd_mrfld_8958_mc_pm_ops = {
+	.prepare = snd_mrfld_8958_prepare,
+	.complete = snd_mrfld_8958_complete,
+	.poweroff = snd_mrfld_8958_poweroff,
+};
+
+static struct platform_driver snd_mrfld_8958_mc_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "mrfld_wm8958",
+		.pm = &snd_mrfld_8958_mc_pm_ops,
+	},
+	.probe = snd_mrfld_8958_mc_probe,
+	.remove = snd_mrfld_8958_mc_remove,
+};
+
+static int snd_mrfld_8958_driver_init(void)
+{
+	pr_info("Merrifield Machine Driver mrfld_wm8958 registerd\n");
+	return platform_driver_register(&snd_mrfld_8958_mc_driver);
+}
+
+static void snd_mrfld_8958_driver_exit(void)
+{
+	pr_debug("In %s\n", __func__);
+	platform_driver_unregister(&snd_mrfld_8958_mc_driver);
+}
+
+static int snd_mrfld_8958_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed snd_mrfld wm8958 rpmsg device\n");
+
+	ret = snd_mrfld_8958_driver_init();
+
+out:
+	return ret;
+}
+
+static void snd_mrfld_8958_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	snd_mrfld_8958_driver_exit();
+	dev_info(&rpdev->dev, "Removed snd_mrfld wm8958 rpmsg device\n");
+}
+
+static void snd_mrfld_8958_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+				int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+			data, len,  true);
+}
+
+static struct rpmsg_device_id snd_mrfld_8958_rpmsg_id_table[] = {
+	{ .name = "rpmsg_mrfld_wm8958_audio" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, snd_mrfld_8958_rpmsg_id_table);
+
+static struct rpmsg_driver snd_mrfld_8958_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= snd_mrfld_8958_rpmsg_id_table,
+	.probe		= snd_mrfld_8958_rpmsg_probe,
+	.callback	= snd_mrfld_8958_rpmsg_cb,
+	.remove		= snd_mrfld_8958_rpmsg_remove,
+};
+
+static int __init snd_mrfld_8958_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+late_initcall(snd_mrfld_8958_rpmsg_init);
+
+static void __exit snd_mrfld_8958_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+module_exit(snd_mrfld_8958_rpmsg_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Merrifield MID Machine driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mrfld_wm8958");
diff --git a/sound/soc/intel/board/merr_saltbay_wm8958.c b/sound/soc/intel/board/merr_saltbay_wm8958.c
new file mode 100644
index 0000000..ef63154
--- /dev/null
+++ b/sound/soc/intel/board/merr_saltbay_wm8958.c
@@ -0,0 +1,887 @@
+/*
+ *  merr_saltbay_wm8958.c - ASoc Machine driver for Intel Merrfield MID platform
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_mid_rpmsg.h>
+#include <asm/platform_mrfld_audio.h>
+#include <asm/intel_sst_mrfld.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <linux/input.h>
+#include <asm/intel-mid.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include "../../codecs/wm8994.h"
+
+/* Codec PLL output clk rate */
+#define CODEC_SYSCLK_RATE			24576000
+/* Input clock to codec at MCLK1 PIN */
+#define CODEC_IN_MCLK1_RATE			19200000
+/* Input clock to codec at MCLK2 PIN */
+#define CODEC_IN_MCLK2_RATE			32768
+/*  define to select between MCLK1 and MCLK2 input to codec as its clock */
+#define CODEC_IN_MCLK1				1
+#define CODEC_IN_MCLK2				2
+
+/* Register address for OSC Clock */
+#define MERR_OSC_CLKOUT_CTRL0_REG_ADDR  0xFF00BC04
+/* Size of osc clock register */
+#define MERR_OSC_CLKOUT_CTRL0_REG_SIZE  4
+
+struct mrfld_8958_mc_private {
+	struct snd_soc_jack jack;
+	int jack_retry;
+	u8 pmic_id;
+	void __iomem    *osc_clk0_reg;
+};
+
+
+/* set_osc_clk0-	enable/disables the osc clock0
+ * addr:		address of the register to write to
+ * enable:		bool to enable or disable the clock
+ */
+static inline void set_soc_osc_clk0(void __iomem *addr, bool enable)
+{
+	u32 osc_clk_ctrl;
+
+	osc_clk_ctrl = readl(addr);
+	if (enable)
+		osc_clk_ctrl |= BIT(31);
+	else
+		osc_clk_ctrl &= ~(BIT(31));
+
+	pr_debug("%s: enable:%d val 0x%x\n", __func__, enable, osc_clk_ctrl);
+
+	writel(osc_clk_ctrl, addr);
+}
+
+
+static inline struct snd_soc_codec *mrfld_8958_get_codec(struct snd_soc_card *card)
+{
+	bool found = false;
+	struct snd_soc_codec *codec;
+
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+		if (!strstr(codec->name, "wm8994-codec")) {
+			pr_debug("codec was %s", codec->name);
+			continue;
+		} else {
+			found = true;
+			break;
+		}
+	}
+	if (found == false) {
+		pr_err("%s: cant find codec", __func__);
+		return NULL;
+	}
+	return codec;
+}
+
+/* Function to switch the input clock for codec,  When audio is in
+ * progress input clock to codec will be through MCLK1 which is 19.2MHz
+ * while in off state input clock to codec will be through 32KHz through
+ * MCLK2
+ * card	: Sound card structure
+ * src	: Input clock source to codec
+ */
+static int mrfld_8958_set_codec_clk(struct snd_soc_card *card, int src)
+{
+	struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+	int ret;
+
+	switch (src) {
+	case CODEC_IN_MCLK1:
+		/* Turn ON the PLL to generate required sysclk rate
+		 * from MCLK1 */
+		ret = snd_soc_dai_set_pll(aif1_dai,
+			WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
+			CODEC_IN_MCLK1_RATE, CODEC_SYSCLK_RATE);
+		if (ret < 0) {
+			pr_err("Failed to start FLL: %d\n", ret);
+			return ret;
+		}
+		/* Switch to MCLK1 input */
+		ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_FLL1,
+				CODEC_SYSCLK_RATE, SND_SOC_CLOCK_IN);
+		if (ret < 0) {
+			pr_err("Failed to set codec sysclk configuration %d\n",
+				 ret);
+			return ret;
+		}
+		break;
+	case CODEC_IN_MCLK2:
+		/* Switch to MCLK2 */
+		ret = snd_soc_dai_set_sysclk(aif1_dai, WM8994_SYSCLK_MCLK2,
+				32768, SND_SOC_CLOCK_IN);
+		if (ret < 0) {
+			pr_err("Failed to switch to MCLK2: %d", ret);
+			return ret;
+		}
+		/* Turn off PLL for MCLK1 */
+		ret = snd_soc_dai_set_pll(aif1_dai, WM8994_FLL1, 0, 0, 0);
+		if (ret < 0) {
+			pr_err("Failed to stop the FLL: %d", ret);
+			return ret;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mrfld_wm8958_set_clk_fmt(struct snd_soc_dai *codec_dai)
+{
+	unsigned int fmt;
+	int ret = 0;
+	struct snd_soc_card *card = codec_dai->card;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	/* Enable the osc clock at start so that it gets settling time */
+	set_soc_osc_clk0(ctx->osc_clk0_reg, true);
+
+	ret = snd_soc_dai_set_tdm_slot(codec_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+	if (ret < 0) {
+		pr_err("can't set codec pcm format %d\n", ret);
+		return ret;
+	}
+
+	/* WM8958 slave Mode */
+	fmt =   SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+		| SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+	if (ret < 0) {
+		pr_err("can't set codec DAI configuration %d\n", ret);
+		return ret;
+	}
+
+	/* FIXME: move this to SYS_CLOCK event handler when codec driver
+	 * dependency is clean.
+	 */
+	/* Switch to 19.2MHz MCLK1 input clock for codec */
+	ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK1);
+
+	return ret;
+}
+
+static int mrfld_8958_hw_params(struct snd_pcm_substream *substream,
+			   struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	return mrfld_wm8958_set_clk_fmt(codec_dai);
+}
+
+static int mrfld_wm8958_compr_set_params(struct snd_compr_stream *cstream)
+{
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	return mrfld_wm8958_set_clk_fmt(codec_dai);
+}
+static int mrfld_8958_set_bias_level(struct snd_soc_card *card,
+		struct snd_soc_dapm_context *dapm,
+		enum snd_soc_bias_level level)
+{
+	struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+	int ret = 0;
+
+	if (dapm->dev != aif1_dai->dev)
+		return 0;
+	switch (level) {
+	case SND_SOC_BIAS_PREPARE:
+		if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY)
+
+			ret = mrfld_wm8958_set_clk_fmt(aif1_dai);
+		break;
+	default:
+		break;
+	}
+	pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+			card->dapm.bias_level);
+	return ret;
+}
+static int mrfld_8958_set_bias_level_post(struct snd_soc_card *card,
+		 struct snd_soc_dapm_context *dapm,
+		 enum snd_soc_bias_level level)
+{
+	struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+	int ret = 0;
+
+	if (dapm->dev != aif1_dai->dev)
+		return 0;
+
+	switch (level) {
+	case SND_SOC_BIAS_STANDBY:
+		/* We are in stabdba down so */
+		/* Switch to 32KHz MCLK2 input clock for codec
+		 */
+		ret = mrfld_8958_set_codec_clk(card, CODEC_IN_MCLK2);
+		/* Turn off 19.2MHz soc osc clock */
+		set_soc_osc_clk0(ctx->osc_clk0_reg, false);
+		break;
+	default:
+		break;
+	}
+	card->dapm.bias_level = level;
+	pr_debug("%s card(%s)->bias_level %u\n", __func__, card->name,
+			card->dapm.bias_level);
+	return ret;
+}
+
+#define PMIC_ID_ADDR		0x00
+#define PMIC_CHIP_ID_A0_VAL	0xC0
+
+static int mrfld_8958_set_vflex_vsel(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *k, int event)
+{
+#define VFLEXCNT		0xAB
+#define VFLEXVSEL_5V		0x01
+#define VFLEXVSEL_B0_VSYS_PT	0x80	/* B0: Vsys pass-through */
+#define VFLEXVSEL_A0_4P5V	0x41	/* A0: 4.5V */
+
+	struct snd_soc_dapm_context *dapm = w->dapm;
+	struct snd_soc_card *card = dapm->card;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	u8 vflexvsel, pmic_id = ctx->pmic_id;
+	int retval = 0;
+
+	pr_debug("%s: ON? %d\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
+
+	vflexvsel = (pmic_id == PMIC_CHIP_ID_A0_VAL) ? VFLEXVSEL_A0_4P5V : VFLEXVSEL_B0_VSYS_PT;
+	pr_debug("pmic_id %#x vflexvsel %#x\n", pmic_id,
+		SND_SOC_DAPM_EVENT_ON(event) ? VFLEXVSEL_5V : vflexvsel);
+
+	/*FIXME: seems to be issue with bypass mode in MOOR, for now
+		force the bias off volate as VFLEXVSEL_5V */
+	if ((INTEL_MID_BOARD(1, PHONE, MOFD)) ||
+			(INTEL_MID_BOARD(1, TABLET, MOFD)))
+		vflexvsel = VFLEXVSEL_5V;
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		retval = intel_scu_ipc_iowrite8(VFLEXCNT, VFLEXVSEL_5V);
+	else if (SND_SOC_DAPM_EVENT_OFF(event))
+		retval = intel_scu_ipc_iowrite8(VFLEXCNT, vflexvsel);
+	if (retval)
+		pr_err("Error writing to VFLEXCNT register\n");
+
+	return retval;
+}
+
+static const struct snd_soc_dapm_widget widgets[] = {
+	SND_SOC_DAPM_HP("Headphones", NULL),
+	SND_SOC_DAPM_MIC("AMIC", NULL),
+	SND_SOC_DAPM_MIC("DMIC", NULL),
+	SND_SOC_DAPM_SUPPLY("VFLEXCNT", SND_SOC_NOPM, 0, 0,
+			mrfld_8958_set_vflex_vsel,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route map[] = {
+	{ "Headphones", NULL, "HPOUT1L" },
+	{ "Headphones", NULL, "HPOUT1R" },
+
+	/* saltbay uses 2 DMICs, other configs may use more so change below
+	 * accordingly
+	 */
+	{ "DMIC1DAT", NULL, "DMIC" },
+	{ "DMIC2DAT", NULL, "DMIC" },
+	/*{ "DMIC3DAT", NULL, "DMIC" },*/
+	/*{ "DMIC4DAT", NULL, "DMIC" },*/
+
+	/* MICBIAS2 is connected as Bias for AMIC so we link it
+	 * here. Also AMIC wires up to IN1LP pin.
+	 * DMIC is externally connected to 1.8V rail, so no link rqd.
+	 */
+	{ "AMIC", NULL, "MICBIAS2" },
+	{ "IN1LP", NULL, "AMIC" },
+
+	/* SWM map link the SWM outs to codec AIF */
+	{ "AIF1DAC1L", NULL, "Codec OUT0"  },
+	{ "AIF1DAC1R", NULL, "Codec OUT0"  },
+	{ "AIF1DAC2L", NULL, "Codec OUT1"  },
+	{ "AIF1DAC2R", NULL, "Codec OUT1"  },
+	{ "Codec IN0", NULL, "AIF1ADC1L" },
+	{ "Codec IN0", NULL, "AIF1ADC1R" },
+	{ "Codec IN1", NULL, "AIF1ADC1L" },
+	{ "Codec IN1", NULL, "AIF1ADC1R" },
+
+	{ "AIF1DAC1L", NULL, "VFLEXCNT" },
+	{ "AIF1DAC1R", NULL, "VFLEXCNT" },
+	{ "AIF1DAC2L", NULL, "VFLEXCNT" },
+	{ "AIF1DAC2R", NULL, "VFLEXCNT" },
+
+	{ "AIF1ADC1L", NULL, "VFLEXCNT" },
+	{ "AIF1ADC1R", NULL, "VFLEXCNT" },
+
+};
+
+static const struct wm8958_micd_rate micdet_rates[] = {
+	{ 32768,       true,  1, 4 },
+	{ 32768,       false, 1, 1 },
+	{ 44100 * 256, true,  7, 10 },
+	{ 44100 * 256, false, 7, 10 },
+};
+
+static void wm8958_custom_micd_set_rate(struct snd_soc_codec *codec)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = dev_get_drvdata(codec->dev->parent);
+	int best, i, sysclk, val;
+	bool idle;
+	const struct wm8958_micd_rate *rates;
+	int num_rates;
+
+	idle = !wm8994->jack_mic;
+
+	sysclk = snd_soc_read(codec, WM8994_CLOCKING_1);
+	if (sysclk & WM8994_SYSCLK_SRC)
+		sysclk = wm8994->aifclk[1];
+	else
+		sysclk = wm8994->aifclk[0];
+
+	if (control->pdata.micd_rates) {
+		rates = control->pdata.micd_rates;
+		num_rates = control->pdata.num_micd_rates;
+	} else {
+		rates = micdet_rates;
+		num_rates = ARRAY_SIZE(micdet_rates);
+	}
+
+	best = 0;
+	for (i = 0; i < num_rates; i++) {
+		if (rates[i].idle != idle)
+			continue;
+		if (abs(rates[i].sysclk - sysclk) <
+		    abs(rates[best].sysclk - sysclk))
+			best = i;
+		else if (rates[best].idle != idle)
+			best = i;
+	}
+
+	val = rates[best].start << WM8958_MICD_BIAS_STARTTIME_SHIFT
+		| rates[best].rate << WM8958_MICD_RATE_SHIFT;
+
+	dev_dbg(codec->dev, "MICD rate %d,%d for %dHz %s\n",
+		rates[best].start, rates[best].rate, sysclk,
+		idle ? "idle" : "active");
+
+	snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+			    WM8958_MICD_BIAS_STARTTIME_MASK |
+			    WM8958_MICD_RATE_MASK, val);
+}
+
+static void wm8958_custom_mic_id(void *data, u16 status)
+{
+	struct snd_soc_codec *codec = data;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "wm8958 custom mic id called with status %x\n",
+		status);
+
+	/* Either nothing present or just starting detection */
+	if (!(status & WM8958_MICD_STS)) {
+		/* If nothing present then clear our statuses */
+		dev_dbg(codec->dev, "Detected open circuit\n");
+
+		schedule_delayed_work(&wm8994->open_circuit_work,
+				      msecs_to_jiffies(2500));
+		return;
+	}
+
+	schedule_delayed_work(&wm8994->micd_set_custom_rate_work,
+		msecs_to_jiffies(wm8994->wm8994->pdata.micb_en_delay));
+
+	/* If the measurement is showing a high impedence we've got a
+	 * microphone.
+	 */
+	if (status & 0x600) {
+		dev_dbg(codec->dev, "Detected microphone\n");
+
+		wm8994->mic_detecting = false;
+		wm8994->jack_mic = true;
+		wm8994->headphone_detected = false;
+
+		snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADSET,
+				    SND_JACK_HEADSET);
+	}
+
+
+	if (status & 0xfc) {
+		dev_dbg(codec->dev, "Detected headphone\n");
+
+		/* Partial inserts of headsets with complete insert
+		 * after an indeterminate amount of time require
+		 * continouous micdetect enabled (until open circuit
+		 * or headset is detected)
+		 * */
+		wm8994->mic_detecting = true;
+
+		wm8994->jack_mic = false;
+		wm8994->headphone_detected = true;
+
+		snd_soc_jack_report(wm8994->micdet[0].jack, SND_JACK_HEADPHONE,
+				    SND_JACK_HEADSET);
+	}
+}
+
+static int mrfld_8958_init(struct snd_soc_pcm_runtime *runtime)
+{
+	int ret;
+	unsigned int fmt;
+	struct snd_soc_codec *codec = runtime->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_card *card = runtime->card;
+	struct snd_soc_dai *aif1_dai = card->rtd[0].codec_dai;
+	struct mrfld_8958_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+	pr_debug("Entry %s\n", __func__);
+
+	ret = snd_soc_dai_set_tdm_slot(aif1_dai, 0, 0, 4, SNDRV_PCM_FORMAT_S24_LE);
+	if (ret < 0) {
+		pr_err("can't set codec pcm format %d\n", ret);
+		return ret;
+	}
+
+	/* WM8958 slave Mode */
+	fmt =   SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF
+		| SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(aif1_dai, fmt);
+	if (ret < 0) {
+		pr_err("can't set codec DAI configuration %d\n", ret);
+		return ret;
+	}
+
+	mrfld_8958_set_bias_level(card, dapm, SND_SOC_BIAS_OFF);
+	card->dapm.idle_bias_off = true;
+
+	/* these pins are not used in SB config so mark as nc
+	 *
+	 * LINEOUT1, 2
+	 * IN1R
+	 * DMICDAT2
+	 */
+	snd_soc_dapm_nc_pin(dapm, "DMIC2DAT");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "IN1RN");
+	snd_soc_dapm_nc_pin(dapm, "IN1RP");
+
+	/* Force enable VMID to avoid cold latency constraints */
+	snd_soc_dapm_force_enable_pin(dapm, "VMID");
+	snd_soc_dapm_sync(dapm);
+
+	ctx->jack_retry = 0;
+	ret = snd_soc_jack_new(codec, "Intel MID Audio Jack",
+			       SND_JACK_HEADSET | SND_JACK_HEADPHONE |
+				SND_JACK_BTN_0 | SND_JACK_BTN_1,
+				&ctx->jack);
+	if (ret) {
+		pr_err("jack creation failed\n");
+		return ret;
+	}
+
+	snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_1, KEY_MEDIA);
+	snd_jack_set_key(ctx->jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+
+	wm8958_mic_detect(codec, &ctx->jack, NULL, NULL,
+			  wm8958_custom_mic_id, codec);
+
+	wm8958_micd_set_custom_rate(codec, wm8958_custom_micd_set_rate, codec);
+
+	snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_1, WM8994_AIF1DAC1_MUTE, 0);
+	snd_soc_update_bits(codec, WM8994_AIF1_DAC2_FILTERS_1, WM8994_AIF1DAC2_MUTE, 0);
+
+	/* Micbias1 is always off, so for pm optimizations make sure the micbias1
+	 * discharge bit is set to floating to avoid discharge in disable state
+	 */
+	snd_soc_update_bits(codec, WM8958_MICBIAS1, WM8958_MICB1_DISCH, 0);
+
+	return 0;
+}
+
+static unsigned int rates_8000_16000[] = {
+	8000,
+	16000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_8000_16000 = {
+	.count = ARRAY_SIZE(rates_8000_16000),
+	.list  = rates_8000_16000,
+};
+
+static unsigned int rates_48000[] = {
+	48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_48000 = {
+	.count = ARRAY_SIZE(rates_48000),
+	.list  = rates_48000,
+};
+
+static int mrfld_8958_startup(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_48000);
+}
+
+static struct snd_soc_ops mrfld_8958_ops = {
+	.startup = mrfld_8958_startup,
+	.hw_params = mrfld_8958_hw_params,
+};
+
+static int mrfld_8958_8k_16k_startup(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE,
+			&constraints_8000_16000);
+}
+
+static struct snd_soc_ops mrfld_8958_8k_16k_ops = {
+	.startup = mrfld_8958_8k_16k_startup,
+	.hw_params = mrfld_8958_hw_params,
+};
+
+static struct snd_soc_compr_ops mrfld_compr_ops = {
+	.set_params = mrfld_wm8958_compr_set_params,
+};
+
+struct snd_soc_dai_link mrfld_8958_msic_dailink[] = {
+	[MERR_SALTBAY_AUDIO] = {
+		.name = "Merrifield Audio Port",
+		.stream_name = "Audio",
+		.cpu_dai_name = "Headset-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.platform_name = "sst-platform",
+		.init = mrfld_8958_init,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_ops,
+		.playback_count = 3,
+	},
+	[MERR_SALTBAY_COMPR] = {
+		.name = "Merrifield Compress Port",
+		.stream_name = "Compress",
+		.platform_name = "sst-platform",
+		.cpu_dai_name = "Compress-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.compr_ops = &mrfld_compr_ops,
+	},
+	[MERR_SALTBAY_VOIP] = {
+		.name = "Merrifield VOIP Port",
+		.stream_name = "Voip",
+		.cpu_dai_name = "Voip-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.platform_name = "sst-platform",
+		.init = NULL,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_8k_16k_ops,
+	},
+	[MERR_SALTBAY_PROBE] = {
+		.name = "Merrifield Probe Port",
+		.stream_name = "Probe",
+		.cpu_dai_name = "Probe-cpu-dai",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.platform_name = "sst-platform",
+		.playback_count = 8,
+		.capture_count = 8,
+	},
+	[MERR_SALTBAY_AWARE] = {
+		.name = "Merrifield Aware Port",
+		.stream_name = "Aware",
+		.cpu_dai_name = "Loopback-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.platform_name = "sst-platform",
+		.init = NULL,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_8k_16k_ops,
+	},
+	[MERR_SALTBAY_VAD] = {
+		.name = "Merrifield VAD Port",
+		.stream_name = "Vad",
+		.cpu_dai_name = "Loopback-cpu-dai",
+		.codec_dai_name = "wm8994-aif1",
+		.codec_name = "wm8994-codec",
+		.platform_name = "sst-platform",
+		.init = NULL,
+		.ignore_suspend = 1,
+		.ops = &mrfld_8958_8k_16k_ops,
+	},
+	[MERR_SALTBAY_POWER] = {
+		.name = "Virtual Power Port",
+		.stream_name = "Power",
+		.cpu_dai_name = "Power-cpu-dai",
+		.platform_name = "sst-platform",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int snd_mrfld_8958_prepare(struct device *dev)
+{
+	struct snd_soc_card *card = dev_get_drvdata(dev);
+	struct snd_soc_codec *codec;
+	struct snd_soc_dapm_context *dapm;
+
+	pr_debug("In %s\n", __func__);
+
+	codec = mrfld_8958_get_codec(card);
+	if (!codec) {
+		pr_err("%s: couldn't find the codec pointer!\n", __func__);
+		return -EAGAIN;
+	}
+
+	pr_debug("found codec %s\n", codec->name);
+	dapm = &codec->dapm;
+
+	snd_soc_dapm_disable_pin(dapm, "VMID");
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_suspend(dev);
+	return 0;
+}
+
+static void snd_mrfld_8958_complete(struct device *dev)
+{
+	struct snd_soc_card *card = dev_get_drvdata(dev);
+	struct snd_soc_codec *codec;
+	struct snd_soc_dapm_context *dapm;
+
+	pr_debug("In %s\n", __func__);
+
+	codec = mrfld_8958_get_codec(card);
+	if (!codec) {
+		pr_err("%s: couldn't find the codec pointer!\n", __func__);
+		return;
+	}
+
+	pr_debug("found codec %s\n", codec->name);
+	dapm = &codec->dapm;
+
+	snd_soc_dapm_force_enable_pin(dapm, "VMID");
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_resume(dev);
+	return;
+}
+
+static int snd_mrfld_8958_poweroff(struct device *dev)
+{
+	pr_debug("In %s\n", __func__);
+	snd_soc_poweroff(dev);
+	return 0;
+}
+#else
+#define snd_mrfld_8958_prepare NULL
+#define snd_mrfld_8958_complete NULL
+#define snd_mrfld_8958_poweroff NULL
+#endif
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_mrfld = {
+	.name = "wm8958-audio",
+	.dai_link = mrfld_8958_msic_dailink,
+	.num_links = ARRAY_SIZE(mrfld_8958_msic_dailink),
+	.set_bias_level = mrfld_8958_set_bias_level,
+	.set_bias_level_post = mrfld_8958_set_bias_level_post,
+	.dapm_widgets = widgets,
+	.num_dapm_widgets = ARRAY_SIZE(widgets),
+	.dapm_routes = map,
+	.num_dapm_routes = ARRAY_SIZE(map),
+};
+
+static int snd_mrfld_8958_mc_probe(struct platform_device *pdev)
+{
+	int ret_val = 0;
+	struct mrfld_8958_mc_private *drv;
+
+	pr_debug("Entry %s\n", __func__);
+
+	drv = kzalloc(sizeof(*drv), GFP_ATOMIC);
+	if (!drv) {
+		pr_err("allocation failed\n");
+		return -ENOMEM;
+	}
+
+	/* ioremap the register */
+	drv->osc_clk0_reg = devm_ioremap_nocache(&pdev->dev,
+					MERR_OSC_CLKOUT_CTRL0_REG_ADDR,
+					MERR_OSC_CLKOUT_CTRL0_REG_SIZE);
+	if (!drv->osc_clk0_reg) {
+		pr_err("osc clk0 ctrl ioremap failed\n");
+		ret_val = -1;
+		goto unalloc;
+	}
+
+	ret_val = intel_scu_ipc_ioread8(PMIC_ID_ADDR, &drv->pmic_id);
+	if (ret_val) {
+		pr_err("Error reading PMIC ID register\n");
+		goto unalloc;
+	}
+
+	/* register the soc card */
+	snd_soc_card_mrfld.dev = &pdev->dev;
+	snd_soc_card_set_drvdata(&snd_soc_card_mrfld, drv);
+	ret_val = snd_soc_register_card(&snd_soc_card_mrfld);
+	if (ret_val) {
+		pr_err("snd_soc_register_card failed %d\n", ret_val);
+		goto unalloc;
+	}
+	platform_set_drvdata(pdev, &snd_soc_card_mrfld);
+	pr_info("%s successful\n", __func__);
+	return ret_val;
+
+unalloc:
+	kfree(drv);
+	return ret_val;
+}
+
+static int snd_mrfld_8958_mc_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *soc_card = platform_get_drvdata(pdev);
+	struct mrfld_8958_mc_private *drv = snd_soc_card_get_drvdata(soc_card);
+
+	pr_debug("In %s\n", __func__);
+	kfree(drv);
+	snd_soc_card_set_drvdata(soc_card, NULL);
+	snd_soc_unregister_card(soc_card);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+const struct dev_pm_ops snd_mrfld_8958_mc_pm_ops = {
+	.prepare = snd_mrfld_8958_prepare,
+	.complete = snd_mrfld_8958_complete,
+	.poweroff = snd_mrfld_8958_poweroff,
+};
+
+static struct platform_driver snd_mrfld_8958_mc_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "mrfld_wm8958",
+		.pm = &snd_mrfld_8958_mc_pm_ops,
+	},
+	.probe = snd_mrfld_8958_mc_probe,
+	.remove = snd_mrfld_8958_mc_remove,
+};
+
+static int snd_mrfld_8958_driver_init(void)
+{
+	pr_info("Merrifield Machine Driver mrfld_wm8958 registerd\n");
+	return platform_driver_register(&snd_mrfld_8958_mc_driver);
+}
+
+static void snd_mrfld_8958_driver_exit(void)
+{
+	pr_debug("In %s\n", __func__);
+	platform_driver_unregister(&snd_mrfld_8958_mc_driver);
+}
+
+static int snd_mrfld_8958_rpmsg_probe(struct rpmsg_channel *rpdev)
+{
+	int ret = 0;
+
+	if (rpdev == NULL) {
+		pr_err("rpmsg channel not created\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dev_info(&rpdev->dev, "Probed snd_mrfld wm8958 rpmsg device\n");
+
+	ret = snd_mrfld_8958_driver_init();
+
+out:
+	return ret;
+}
+
+static void snd_mrfld_8958_rpmsg_remove(struct rpmsg_channel *rpdev)
+{
+	snd_mrfld_8958_driver_exit();
+	dev_info(&rpdev->dev, "Removed snd_mrfld wm8958 rpmsg device\n");
+}
+
+static void snd_mrfld_8958_rpmsg_cb(struct rpmsg_channel *rpdev, void *data,
+				int len, void *priv, u32 src)
+{
+	dev_warn(&rpdev->dev, "unexpected, message\n");
+
+	print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1,
+			data, len,  true);
+}
+
+static struct rpmsg_device_id snd_mrfld_8958_rpmsg_id_table[] = {
+	{ .name = "rpmsg_mrfld_wm8958_audio" },
+	{ },
+};
+MODULE_DEVICE_TABLE(rpmsg, snd_mrfld_8958_rpmsg_id_table);
+
+static struct rpmsg_driver snd_mrfld_8958_rpmsg = {
+	.drv.name	= KBUILD_MODNAME,
+	.drv.owner	= THIS_MODULE,
+	.id_table	= snd_mrfld_8958_rpmsg_id_table,
+	.probe		= snd_mrfld_8958_rpmsg_probe,
+	.callback	= snd_mrfld_8958_rpmsg_cb,
+	.remove		= snd_mrfld_8958_rpmsg_remove,
+};
+
+static int __init snd_mrfld_8958_rpmsg_init(void)
+{
+	return register_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+late_initcall(snd_mrfld_8958_rpmsg_init);
+
+static void __exit snd_mrfld_8958_rpmsg_exit(void)
+{
+	return unregister_rpmsg_driver(&snd_mrfld_8958_rpmsg);
+}
+module_exit(snd_mrfld_8958_rpmsg_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Merrifield MID Machine driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mrfld_wm8958");
diff --git a/sound/soc/intel/compress.c b/sound/soc/intel/compress.c
new file mode 100644
index 0000000..f067468
--- /dev/null
+++ b/sound/soc/intel/compress.c
@@ -0,0 +1,254 @@
+/*
+ *  compress.c - Intel MID Platform driver for Compress stream operations
+ *
+ *  Copyright (C) 2010-2013 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  Author: Harsha Priya <priya.harsha@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <sound/intel_sst_ioctl.h>
+#include "platform_ipc_v2.h"
+#include "sst_platform.h"
+#include "sst_platform_pvt.h"
+
+static void sst_compr_fragment_elapsed(void *arg)
+{
+	struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg;
+
+	pr_debug("fragment elapsed by driver\n");
+	if (cstream)
+		snd_compr_fragment_elapsed(cstream);
+}
+
+static void sst_drain_notify(void *arg)
+{
+	struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg;
+
+	pr_debug("drain notify by driver\n");
+	if (cstream)
+		snd_compr_drain_notify(cstream);
+}
+
+static int sst_platform_compr_open(struct snd_compr_stream *cstream)
+{
+
+	int ret_val = 0;
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct sst_runtime_stream *stream;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+	pr_debug("%s called:%s\n", __func__, dai_link->cpu_dai_name);
+
+	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+	if (!stream)
+		return -ENOMEM;
+
+	spin_lock_init(&stream->status_lock);
+
+	/* get the sst ops */
+	if (!sst_dsp || !try_module_get(sst_dsp->dev->driver->owner)) {
+		pr_err("no device available to run\n");
+		ret_val = -ENODEV;
+		goto out_ops;
+	}
+	stream->compr_ops = sst_dsp->compr_ops;
+
+	stream->id = 0;
+	sst_set_stream_status(stream, SST_PLATFORM_INIT);
+	runtime->private_data = stream;
+	return 0;
+out_ops:
+	kfree(stream);
+	return ret_val;
+}
+
+static int sst_platform_compr_free(struct snd_compr_stream *cstream)
+{
+	struct sst_runtime_stream *stream;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+	int ret_val = 0, str_id;
+
+	stream = cstream->runtime->private_data;
+	/*need to check*/
+	str_id = stream->id;
+	if (str_id)
+		ret_val = stream->compr_ops->close(str_id);
+	module_put(sst_dsp->dev->driver->owner);
+	kfree(stream);
+	pr_debug("%s called for dai %s: ret = %d\n", __func__,
+				dai_link->cpu_dai_name, ret_val);
+	return 0;
+}
+
+static int sst_platform_compr_set_params(struct snd_compr_stream *cstream,
+					struct snd_compr_params *params)
+{
+	struct sst_runtime_stream *stream;
+	int retval = 0;
+	struct snd_sst_params str_params;
+	struct sst_compress_cb cb;
+	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+	struct snd_soc_platform *platform = rtd->platform;
+	struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+
+	pr_debug("In function %s\n", __func__);
+	stream = cstream->runtime->private_data;
+	/* construct fw structure for this*/
+	memset(&str_params, 0, sizeof(str_params));
+
+	/* fill the device type and stream id to pass to SST driver */
+	retval = sst_fill_stream_params(cstream, ctx, &str_params, true);
+	pr_debug("compr_set_params: fill stream params ret_val = 0x%x\n", retval);
+	if (retval < 0)
+		return retval;
+
+	switch (params->codec.id) {
+	case SND_AUDIOCODEC_MP3: {
+		str_params.codec = SST_CODEC_TYPE_MP3;
+		str_params.sparams.uc.mp3_params.num_chan = params->codec.ch_in;
+		str_params.sparams.uc.mp3_params.pcm_wd_sz = 16;
+		break;
+	}
+
+	case SND_AUDIOCODEC_AAC: {
+		str_params.codec = SST_CODEC_TYPE_AAC;
+		str_params.sparams.uc.aac_params.num_chan = params->codec.ch_in;
+		str_params.sparams.uc.aac_params.pcm_wd_sz = 16;
+		if (params->codec.format == SND_AUDIOSTREAMFORMAT_MP4ADTS)
+			str_params.sparams.uc.aac_params.bs_format =
+							AAC_BIT_STREAM_ADTS;
+		else if (params->codec.format == SND_AUDIOSTREAMFORMAT_RAW)
+			str_params.sparams.uc.aac_params.bs_format =
+							AAC_BIT_STREAM_RAW;
+		else {
+			pr_err("Undefined format%d\n", params->codec.format);
+			return -EINVAL;
+		}
+		str_params.sparams.uc.aac_params.externalsr =
+						params->codec.sample_rate;
+		break;
+	}
+
+	default:
+		pr_err("codec not supported, id =%d\n", params->codec.id);
+		return -EINVAL;
+	}
+
+	str_params.aparams.ring_buf_info[0].addr  =
+					virt_to_phys(cstream->runtime->buffer);
+	str_params.aparams.ring_buf_info[0].size =
+					cstream->runtime->buffer_size;
+	str_params.aparams.sg_count = 1;
+	str_params.aparams.frag_size = cstream->runtime->fragment_size;
+
+	cb.param = cstream;
+	cb.compr_cb = sst_compr_fragment_elapsed;
+	cb.drain_cb_param = cstream;
+	cb.drain_notify = sst_drain_notify;
+
+	retval = stream->compr_ops->open(&str_params, &cb);
+	if (retval < 0) {
+		pr_err("stream allocation failed %d\n", retval);
+		return retval;
+	}
+
+	stream->id = retval;
+	return 0;
+}
+
+static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
+{
+	struct sst_runtime_stream *stream =
+		cstream->runtime->private_data;
+
+	return stream->compr_ops->control(cmd, stream->id);
+}
+
+static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
+					struct snd_compr_tstamp *tstamp)
+{
+	struct sst_runtime_stream *stream;
+
+	stream  = cstream->runtime->private_data;
+	stream->compr_ops->tstamp(stream->id, tstamp);
+	tstamp->byte_offset = tstamp->copied_total %
+				 (u32)cstream->runtime->buffer_size;
+	pr_debug("calc bytes offset/copied bytes as %d\n", tstamp->byte_offset);
+	return 0;
+}
+
+static int sst_platform_compr_ack(struct snd_compr_stream *cstream,
+					size_t bytes)
+{
+	struct sst_runtime_stream *stream;
+
+	stream  = cstream->runtime->private_data;
+	stream->compr_ops->ack(stream->id, (unsigned long)bytes);
+	stream->bytes_written += bytes;
+
+	return 0;
+}
+
+static int sst_platform_compr_get_caps(struct snd_compr_stream *cstream,
+					struct snd_compr_caps *caps)
+{
+	struct sst_runtime_stream *stream =
+		cstream->runtime->private_data;
+
+	return stream->compr_ops->get_caps(caps);
+}
+
+static int sst_platform_compr_get_codec_caps(struct snd_compr_stream *cstream,
+					struct snd_compr_codec_caps *codec)
+{
+	struct sst_runtime_stream *stream =
+		cstream->runtime->private_data;
+
+	return stream->compr_ops->get_codec_caps(codec);
+}
+
+static int sst_platform_compr_set_metadata(struct snd_compr_stream *cstream,
+					struct snd_compr_metadata *metadata)
+{
+	struct sst_runtime_stream *stream  =
+		 cstream->runtime->private_data;
+
+	return stream->compr_ops->set_metadata(stream->id, metadata);
+}
+
+struct snd_compr_ops sst_platform_compr_ops = {
+
+	.open = sst_platform_compr_open,
+	.free = sst_platform_compr_free,
+	.set_params = sst_platform_compr_set_params,
+	.set_metadata = sst_platform_compr_set_metadata,
+	.trigger = sst_platform_compr_trigger,
+	.pointer = sst_platform_compr_pointer,
+	.ack = sst_platform_compr_ack,
+	.get_caps = sst_platform_compr_get_caps,
+	.get_codec_caps = sst_platform_compr_get_codec_caps,
+};
diff --git a/sound/soc/intel/effects.c b/sound/soc/intel/effects.c
new file mode 100644
index 0000000..d03cdfa
--- /dev/null
+++ b/sound/soc/intel/effects.c
@@ -0,0 +1,405 @@
+/*
+ *  effects.c - platform file for effects interface
+ *
+ *  Copyright (C) 2013 Intel Corporation
+ *  Authors:	Samreen Nilofer <samreen.nilofer@intel.com>
+ *		Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#include <linux/slab.h>
+#include <asm/platform_sst_audio.h>
+#include "platform_ipc_v2.h"
+#include "sst_platform.h"
+#include "sst_platform_pvt.h"
+
+extern struct sst_device *sst_dsp;
+extern struct device *sst_pdev;
+
+struct effect_uuid {
+	uint32_t timeLow;
+	uint16_t timeMid;
+	uint16_t timeHiAndVersion;
+	uint16_t clockSeq;
+	uint8_t node[6];
+};
+
+#define EFFECT_STRING_LEN_MAX 64
+
+enum sst_effect {
+	EFFECTS_CREATE = 0,
+	EFFECTS_DESTROY,
+	EFFECTS_SET_PARAMS,
+	EFFECTS_GET_PARAMS,
+};
+
+enum sst_mixer_output_mode {
+	SST_MEDIA0_OUT,
+	SST_MEDIA1_OUT,
+};
+
+static inline void sst_fill_byte_stream(struct snd_sst_bytes_v2 *bytes, u8 type,
+			u8 msg, u8 block, u8 task, u8 pipe_id, u16 len,
+			struct ipc_effect_payload *payload)
+{
+	u32 size = sizeof(struct ipc_effect_dsp_hdr);
+
+	bytes->type = type;
+	bytes->ipc_msg = msg;
+	bytes->block = block;
+	bytes->task_id = task;
+	bytes->pipe_id = pipe_id;
+	bytes->len = len;
+
+	/* Copy the ipc_effect_dsp_hdr followed by the data */
+	memcpy(bytes->bytes, payload, size);
+	memcpy(bytes->bytes + size, payload->data, len - size);
+}
+
+static int sst_send_effects(struct ipc_effect_payload *dsp_payload, int data_len,
+					enum sst_effect effect_type)
+{
+	struct snd_sst_bytes_v2 *bytes;
+	u32 len;
+	int ret;
+	u8 type, msg = IPC_INVALID, pipe, payload_len;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	len = sizeof(*bytes) + sizeof(struct ipc_effect_dsp_hdr) + data_len;
+
+	bytes = kzalloc(len, GFP_KERNEL);
+	if (!bytes) {
+		pr_err("kzalloc failed allocate bytes\n");
+		return -ENOMEM;
+	}
+
+	switch (effect_type) {
+	case EFFECTS_CREATE:
+	case EFFECTS_DESTROY:
+		type = SND_SST_BYTES_SET;
+		msg = IPC_CMD;
+		break;
+
+	case EFFECTS_SET_PARAMS:
+		type = SND_SST_BYTES_SET;
+		msg = IPC_SET_PARAMS;
+		break;
+
+	case EFFECTS_GET_PARAMS:
+		type = SND_SST_BYTES_GET;
+		msg =  IPC_GET_PARAMS;
+		break;
+	default:
+		pr_err("No such effect %#x", effect_type);
+		ret = -EINVAL;
+		goto free_bytes;
+	}
+
+	pipe = dsp_payload->dsp_hdr.pipe_id;
+	payload_len = sizeof(struct ipc_effect_dsp_hdr) + data_len;
+	sst_fill_byte_stream(bytes, type, msg, 1, SST_TASK_ID_MEDIA,
+				pipe, payload_len, dsp_payload);
+
+	mutex_lock(&sst->lock);
+	ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM, bytes);
+	mutex_unlock(&sst->lock);
+
+	if (ret) {
+		pr_err("byte_stream failed err %d pipe_id %#x\n", ret,
+				dsp_payload->dsp_hdr.pipe_id);
+		goto free_bytes;
+	}
+
+	/* Copy only the data - skip the dsp header */
+	if (msg == IPC_GET_PARAMS)
+		memcpy(dsp_payload->data, bytes->bytes, data_len);
+
+free_bytes:
+	kfree(bytes);
+	return ret;
+}
+
+static int sst_get_algo_id(const struct sst_dev_effects *pdev_effs,
+					char *uuid, u16 *algo_id)
+{
+	int i, len;
+
+	len = pdev_effs->effs_num_map;
+
+	for (i = 0; i < len; i++) {
+		if (!strncmp(pdev_effs->effs_map[i].uuid, uuid, sizeof(struct effect_uuid))) {
+			*algo_id = pdev_effs->effs_map[i].algo_id;
+			return 0;
+		}
+	}
+	pr_err("no such uuid\n");
+	return -EINVAL;
+}
+
+static int sst_fill_effects_info(const struct sst_dev_effects *pdev_effs,
+					char *uuid, u16 pos,
+					struct ipc_dsp_effects_info *effs_info, u16 cmd_id)
+{
+	int i, len;
+
+	len = pdev_effs->effs_num_map;
+
+	for (i = 0; i < len; i++) {
+		if (!strncmp(pdev_effs->effs_map[i].uuid, uuid, sizeof(struct effect_uuid))) {
+
+			effs_info->cmd_id = cmd_id;
+			effs_info->length = (sizeof(struct ipc_dsp_effects_info) -
+						offsetof(struct ipc_dsp_effects_info, sel_pos));
+			effs_info->sel_pos = pos;
+			effs_info->sel_algo_id = pdev_effs->effs_map[i].algo_id;
+			effs_info->cpu_load = pdev_effs->effs_res_map[i].cpuLoad;
+			effs_info->memory_usage = pdev_effs->effs_res_map[i].memoryUsage;
+			effs_info->flags = pdev_effs->effs_res_map[i].flags;
+
+			return 0;
+		}
+	}
+
+	pr_err("no such uuid\n");
+	return -EINVAL;
+}
+
+static inline void sst_fill_dsp_payload(struct ipc_effect_payload *dsp_payload,
+					u8 pipe_id, u16 mod_id, char *data)
+{
+	dsp_payload->dsp_hdr.mod_index_id = 0xFF;
+	dsp_payload->dsp_hdr.pipe_id = pipe_id;
+	dsp_payload->dsp_hdr.mod_id = mod_id;
+	dsp_payload->data = data;
+}
+
+static int sst_get_pipe_id(struct sst_dev_stream_map *map, int map_size,
+				int dev, int mode, u8 *pipe_id)
+{
+	int index;
+
+	if (map == NULL)
+		return -EINVAL;
+
+	/* In case of global effects, dev will be 0xff */
+	if (dev == 0xFF) {
+		*pipe_id = (mode == SST_MEDIA0_OUT) ? PIPE_MEDIA0_OUT : PIPE_MEDIA1_OUT;
+		return 0;
+	}
+
+	for (index = 1; index < map_size; index++) {
+		if (map[index].dev_num == dev) {
+			*pipe_id = map[index].device_id;
+			break;
+		}
+	}
+
+	if (index == map_size) {
+		pr_err("no such device %d\n", dev);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int sst_effects_create(struct snd_card *card, struct snd_effect *effect)
+{
+	int ret = 0;
+	u8 pipe_id = 0;
+	struct ipc_effect_payload dsp_payload;
+	struct ipc_dsp_effects_info effects_info;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	ret = sst_fill_effects_info(&sst->pdata->pdev_effs, effect->uuid, effect->pos,
+				 &effects_info, IPC_EFFECTS_CREATE);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+				sst->pdata->strm_map_size,
+				effect->device, effect->mode, &pipe_id);
+	if (ret < 0)
+		return ret;
+
+	sst_fill_dsp_payload(&dsp_payload, pipe_id, 0xFF, (char *)&effects_info);
+
+	ret = sst_send_effects(&dsp_payload, sizeof(effects_info), EFFECTS_CREATE);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sst_effects_destroy(struct snd_card *card, struct snd_effect *effect)
+{
+	int ret = 0;
+	u8 pipe_id = 0;
+	struct ipc_effect_payload dsp_payload;
+	struct ipc_dsp_effects_info effects_info;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	ret = sst_fill_effects_info(&sst->pdata->pdev_effs, effect->uuid, effect->pos,
+				&effects_info, IPC_EFFECTS_DESTROY);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+				sst->pdata->strm_map_size,
+				effect->device, effect->mode, &pipe_id);
+	if (ret < 0)
+		return ret;
+
+	sst_fill_dsp_payload(&dsp_payload, pipe_id, 0xFF, (char *)&effects_info);
+
+	ret = sst_send_effects(&dsp_payload, sizeof(effects_info), EFFECTS_DESTROY);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sst_effects_set_params(struct snd_card *card,
+					struct snd_effect_params *params)
+{
+	int ret = 0;
+	u8 pipe_id = 0;
+	u16 algo_id;
+	struct ipc_effect_payload dsp_payload;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	ret = sst_get_algo_id(&sst->pdata->pdev_effs, params->uuid, &algo_id);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+				sst->pdata->strm_map_size,
+				params->device, SST_MEDIA0_OUT, &pipe_id);
+	if (ret < 0)
+		return ret;
+
+	sst_fill_dsp_payload(&dsp_payload, pipe_id, algo_id, params->buffer);
+
+	ret = sst_send_effects(&dsp_payload, params->size, EFFECTS_SET_PARAMS);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sst_effects_get_params(struct snd_card *card,
+					struct snd_effect_params *params)
+{
+	int ret = 0;
+	u8 pipe_id = 0;
+	u16 algo_id;
+	struct ipc_effect_payload dsp_payload;
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	ret = sst_get_algo_id(&sst->pdata->pdev_effs, params->uuid, &algo_id);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_get_pipe_id(sst->pdata->pdev_strm_map,
+				sst->pdata->strm_map_size,
+				params->device, SST_MEDIA0_OUT, &pipe_id);
+	if (ret < 0)
+		return ret;
+
+	sst_fill_dsp_payload(&dsp_payload, pipe_id, algo_id, params->buffer);
+
+	ret = sst_send_effects(&dsp_payload, params->size, EFFECTS_GET_PARAMS);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int sst_query_num_effects(struct snd_card *card)
+{
+	struct sst_data *sst;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	return sst->pdata->pdev_effs.effs_num_map;
+}
+
+static int sst_query_effects_caps(struct snd_card *card,
+					struct snd_effect_caps *caps)
+{
+	struct sst_data *sst;
+	struct sst_dev_effects_map *effs_map;
+	unsigned int num_effects, offset = 0;
+	char *dstn;
+	int i;
+
+	if (!sst_pdev)
+		return -ENODEV;
+	sst =  dev_get_drvdata(sst_pdev);
+
+	effs_map = sst->pdata->pdev_effs.effs_map;
+	num_effects = sst->pdata->pdev_effs.effs_num_map;
+
+	if (caps->size < (num_effects * MAX_DESCRIPTOR_SIZE)) {
+		pr_err("buffer size is insufficient\n");
+		return -ENOMEM;
+	}
+
+	dstn = caps->buffer;
+	for (i = 0; i < num_effects; i++) {
+		memcpy(dstn + offset, effs_map[i].descriptor, MAX_DESCRIPTOR_SIZE);
+		offset += MAX_DESCRIPTOR_SIZE;
+	}
+	caps->size = offset;
+
+	return 0;
+}
+
+struct snd_effect_ops effects_ops = {
+	.create = sst_effects_create,
+	.destroy = sst_effects_destroy,
+	.set_params = sst_effects_set_params,
+	.get_params = sst_effects_get_params,
+	.query_num_effects = sst_query_num_effects,
+	.query_effect_caps = sst_query_effects_caps,
+};
diff --git a/sound/soc/intel/pcm.c b/sound/soc/intel/pcm.c
new file mode 100644
index 0000000..8600147
--- /dev/null
+++ b/sound/soc/intel/pcm.c
@@ -0,0 +1,1021 @@
+/*
+ *  pcm.c - Intel MID Platform driver file implementing PCM functionality
+ *
+ *  Copyright (C) 2010-2013 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  Author: Harsha Priya <priya.harsha@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/intel_sst_ioctl.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/intel_sst_mrfld.h>
+#include <asm/intel-mid.h>
+#include "platform_ipc_v2.h"
+#include "sst_platform.h"
+#include "sst_platform_pvt.h"
+
+struct device *sst_pdev;
+struct sst_device *sst_dsp;
+extern struct snd_compr_ops sst_platform_compr_ops;
+extern struct snd_effect_ops effects_ops;
+
+static DEFINE_MUTEX(sst_dsp_lock);
+
+static struct snd_pcm_hardware sst_platform_pcm_hw = {
+	.info =	(SNDRV_PCM_INFO_INTERLEAVED |
+			SNDRV_PCM_INFO_DOUBLE |
+			SNDRV_PCM_INFO_PAUSE |
+			SNDRV_PCM_INFO_RESUME |
+			SNDRV_PCM_INFO_MMAP|
+			SNDRV_PCM_INFO_MMAP_VALID |
+			SNDRV_PCM_INFO_BLOCK_TRANSFER |
+			SNDRV_PCM_INFO_SYNC_START),
+	.formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
+			SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
+			SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
+	.rates = (SNDRV_PCM_RATE_8000|
+			SNDRV_PCM_RATE_16000 |
+			SNDRV_PCM_RATE_44100 |
+			SNDRV_PCM_RATE_48000),
+	.rate_min = SST_MIN_RATE,
+	.rate_max = SST_MAX_RATE,
+	.channels_min =	SST_MIN_CHANNEL,
+	.channels_max =	SST_MAX_CHANNEL,
+	.buffer_bytes_max = SST_MAX_BUFFER,
+	.period_bytes_min = SST_MIN_PERIOD_BYTES,
+	.period_bytes_max = SST_MAX_PERIOD_BYTES,
+	.periods_min = SST_MIN_PERIODS,
+	.periods_max = SST_MAX_PERIODS,
+	.fifo_size = SST_FIFO_SIZE,
+};
+
+static int sst_platform_ihf_set_tdm_slot(struct snd_soc_dai *dai,
+			unsigned int tx_mask, unsigned int rx_mask,
+			int slots, int slot_width) {
+	struct snd_sst_runtime_params params_data;
+	int channels = slots;
+
+	/* registering with SST driver to get access to SST APIs to use */
+	if (!sst_dsp) {
+		pr_err("sst: DSP not registered\n");
+		return -EIO;
+	}
+	params_data.type = SST_SET_CHANNEL_INFO;
+	params_data.str_id = SND_SST_DEVICE_IHF;
+	params_data.size = sizeof(channels);
+	params_data.addr = &channels;
+	return sst_dsp->ops->set_generic_params(SST_SET_RUNTIME_PARAMS,
+							(void *)&params_data);
+}
+
+static int sst_media_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+
+	pr_debug("%s: enter, mute=%d dai-name=%s dir=%d\n", __func__, mute, dai->name, stream);
+
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+		sst_send_pipe_gains(dai, stream, mute);
+#endif
+
+	return 0;
+}
+
+/* helper functions */
+void sst_set_stream_status(struct sst_runtime_stream *stream,
+					int state)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&stream->status_lock, flags);
+	stream->stream_status = state;
+	spin_unlock_irqrestore(&stream->status_lock, flags);
+}
+
+static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
+{
+	int state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&stream->status_lock, flags);
+	state = stream->stream_status;
+	spin_unlock_irqrestore(&stream->status_lock, flags);
+	return state;
+}
+
+static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
+				struct snd_sst_alloc_params_ext *alloc_param)
+{
+	unsigned int channels;
+	snd_pcm_uframes_t period_size;
+	ssize_t periodbytes;
+	ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
+	u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
+
+	channels = substream->runtime->channels;
+	period_size = substream->runtime->period_size;
+	periodbytes = samples_to_bytes(substream->runtime, period_size);
+	alloc_param->ring_buf_info[0].addr = buffer_addr;
+	alloc_param->ring_buf_info[0].size = buffer_bytes;
+	alloc_param->sg_count = 1;
+	alloc_param->reserved = 0;
+	alloc_param->frag_size = periodbytes * channels;
+
+	pr_debug("period_size = %d\n", alloc_param->frag_size);
+	pr_debug("ring_buf_addr = 0x%x\n", alloc_param->ring_buf_info[0].addr);
+}
+static void sst_fill_pcm_params(struct snd_pcm_substream *substream,
+				struct snd_sst_stream_params *param)
+{
+	param->uc.pcm_params.num_chan = (u8) substream->runtime->channels;
+	param->uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
+	param->uc.pcm_params.sfreq = substream->runtime->rate;
+
+	/* PCM stream via ALSA interface */
+	param->uc.pcm_params.use_offload_path = 0;
+	param->uc.pcm_params.reserved2 = 0;
+	memset(param->uc.pcm_params.channel_map, 0, sizeof(u8));
+	pr_debug("sfreq= %d, wd_sz = %d\n",
+	param->uc.pcm_params.sfreq, param->uc.pcm_params.pcm_wd_sz);
+
+}
+
+#define ASSIGN_PIPE_ID(periodtime, lowlatency, deepbuffer) \
+	((periodtime) <= (lowlatency) ? PIPE_LOW_PCM0_IN : \
+	((periodtime) >= (deepbuffer) ? PIPE_MEDIA3_IN : PIPE_MEDIA1_IN))
+
+static int sst_get_stream_mapping(int dev, int sdev, int dir,
+	struct sst_dev_stream_map *map, int size, u8 pipe_id,
+	const struct sst_lowlatency_deepbuff *ll_db)
+{
+	int index;
+	unsigned long pt = 0, ll = 0, db = 0;
+
+	if (map == NULL)
+		return -EINVAL;
+
+	pr_debug("dev %d sdev %d dir %d\n", dev, sdev, dir);
+
+	/* index 0 is not used in stream map */
+	for (index = 1; index < size; index++) {
+		if ((map[index].dev_num == dev) &&
+		    (map[index].subdev_num == sdev) &&
+		    (map[index].direction == dir)) {
+			/* device id for the probe is assigned dynamically */
+			if (map[index].status == SST_DEV_MAP_IN_USE) {
+				return index;
+			} else if (map[index].status == SST_DEV_MAP_FREE) {
+				map[index].status = SST_DEV_MAP_IN_USE;
+
+				if (map[index].dev_num == MERR_SALTBAY_PROBE) {
+					map[index].device_id = pipe_id;
+
+				} else if (map[index].dev_num == MERR_SALTBAY_AUDIO) {
+					if (!ll_db->low_latency || !ll_db->deep_buffer)
+						return -EINVAL;
+
+					pt = ll_db->period_time;
+					ll = *(ll_db->low_latency);
+					db = *(ll_db->deep_buffer);
+
+					pr_debug("PT %lu LL %lu DB %lu\n", pt, ll, db);
+
+					map[index].device_id = ASSIGN_PIPE_ID(pt,
+								ll, db);
+				}
+				pr_debug("%s: pipe_id 0%x index %d", __func__,
+						map[index].device_id, index);
+
+				return index;
+			}
+		}
+	}
+	return 0;
+}
+
+int sst_fill_stream_params(void *substream,
+	const struct sst_data *ctx, struct snd_sst_params *str_params, bool is_compress)
+{
+	int map_size;
+	int index;
+	struct sst_dev_stream_map *map;
+	struct snd_pcm_substream *pstream = NULL;
+	struct snd_compr_stream *cstream = NULL;
+
+	map = ctx->pdata->pdev_strm_map;
+	map_size = ctx->pdata->strm_map_size;
+
+	if (is_compress == true)
+		cstream = (struct snd_compr_stream *)substream;
+	else
+		pstream = (struct snd_pcm_substream *)substream;
+
+	str_params->stream_type = SST_STREAM_TYPE_MUSIC;
+
+	/* For pcm streams */
+	if (pstream) {
+		index = sst_get_stream_mapping(pstream->pcm->device,
+					  pstream->number, pstream->stream,
+					  map, map_size, ctx->pipe_id, &ctx->ll_db);
+		if (index <= 0)
+			return -EINVAL;
+
+		str_params->stream_id = index;
+		str_params->device_type = map[index].device_id;
+		str_params->task = map[index].task_id;
+
+		if (str_params->device_type == SST_PROBE_IN)
+			str_params->stream_type = SST_STREAM_TYPE_PROBE;
+
+		pr_debug("str_id = %d, device_type = 0x%x, task = %d",
+			 str_params->stream_id, str_params->device_type,
+			 str_params->task);
+
+		str_params->ops = (u8)pstream->stream;
+	}
+
+	if (cstream) {
+		/* FIXME: Add support for subdevice number in
+		 * snd_compr_stream */
+		index = sst_get_stream_mapping(cstream->device->device,
+					       0, cstream->direction,
+					       map, map_size, ctx->pipe_id, &ctx->ll_db);
+		if (index <= 0)
+			return -EINVAL;
+		str_params->stream_id = index;
+		str_params->device_type = map[index].device_id;
+		str_params->task = map[index].task_id;
+		pr_debug("compress str_id = %d, device_type = 0x%x, task = %d",
+			 str_params->stream_id, str_params->device_type,
+			 str_params->task);
+
+		str_params->ops = (u8)cstream->direction;
+	}
+	return 0;
+}
+
+#define CALC_PERIODTIME(period_size, rate) (((period_size) * 1000) / (rate))
+
+static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
+		struct snd_soc_platform *platform)
+{
+	struct sst_runtime_stream *stream =
+			substream->runtime->private_data;
+	struct snd_sst_stream_params param = {{{0,},},};
+	struct snd_sst_params str_params = {0};
+	struct snd_sst_alloc_params_ext alloc_params = {0};
+	int ret_val = 0;
+	struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+
+	/* set codec params and inform SST driver the same */
+	sst_fill_pcm_params(substream, &param);
+	sst_fill_alloc_params(substream, &alloc_params);
+	substream->runtime->dma_area = substream->dma_buffer.area;
+	str_params.sparams = param;
+	str_params.aparams = alloc_params;
+	str_params.codec = SST_CODEC_TYPE_PCM;
+
+	ctx->ll_db.period_time = CALC_PERIODTIME(substream->runtime->period_size,
+					substream->runtime->rate);
+
+	/* fill the device type and stream id to pass to SST driver */
+	ret_val = sst_fill_stream_params(substream, ctx, &str_params, false);
+	pr_debug("platform prepare: fill stream params ret_val = 0x%x\n", ret_val);
+	if (ret_val < 0)
+		return ret_val;
+
+	stream->stream_info.str_id = str_params.stream_id;
+
+	ret_val = stream->ops->open(&str_params);
+	pr_debug("platform prepare: stream open ret_val = 0x%x\n", ret_val);
+	if (ret_val <= 0)
+		return ret_val;
+
+	pr_debug("platform allocated strid:  %d\n", stream->stream_info.str_id);
+
+	return ret_val;
+}
+
+static void sst_period_elapsed(void *mad_substream)
+{
+	struct snd_pcm_substream *substream = mad_substream;
+	struct sst_runtime_stream *stream;
+	int status;
+
+	if (!substream || !substream->runtime) {
+		pr_debug("In %s : Null Substream pointer\n", __func__);
+		return;
+	}
+	stream = substream->runtime->private_data;
+	if (!stream) {
+		pr_debug("In %s : Null Stream pointer\n", __func__);
+		return;
+	}
+	status = sst_get_stream_status(stream);
+	if (status != SST_PLATFORM_RUNNING) {
+		pr_debug("In %s : Stream Status=%d\n", __func__, status);
+		return;
+	}
+	snd_pcm_period_elapsed(substream);
+}
+
+static int sst_platform_init_stream(struct snd_pcm_substream *substream)
+{
+	struct sst_runtime_stream *stream =
+			substream->runtime->private_data;
+	int ret_val;
+
+	pr_debug("setting buffer ptr param\n");
+	sst_set_stream_status(stream, SST_PLATFORM_INIT);
+	stream->stream_info.period_elapsed = sst_period_elapsed;
+	stream->stream_info.mad_substream = substream;
+	stream->stream_info.buffer_ptr = 0;
+	stream->stream_info.sfreq = substream->runtime->rate;
+	pr_debug("pcm_substream %p, period_elapsed %p\n",
+			stream->stream_info.mad_substream, stream->stream_info.period_elapsed);
+	ret_val = stream->ops->device_control(
+			SST_SND_STREAM_INIT, &stream->stream_info);
+	if (ret_val)
+		pr_err("control_set ret error %d\n", ret_val);
+	return ret_val;
+
+}
+
+static inline int power_up_sst(struct sst_runtime_stream *sst)
+{
+	return sst->ops->power(true);
+}
+
+static inline int power_down_sst(struct sst_runtime_stream *sst)
+{
+	return sst->ops->power(false);
+}
+/* end -- helper functions */
+
+static int sst_media_open(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	int ret_val = 0;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct sst_runtime_stream *stream;
+
+	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+	if (!stream)
+		return -ENOMEM;
+
+	spin_lock_init(&stream->status_lock);
+
+	/* get the sst ops */
+	mutex_lock(&sst_dsp_lock);
+	if (!sst_dsp ||
+	    !try_module_get(sst_dsp->dev->driver->owner)) {
+		pr_err("no device available to run\n");
+		ret_val = -ENODEV;
+		goto out_ops;
+	}
+	stream->ops = sst_dsp->ops;
+	mutex_unlock(&sst_dsp_lock);
+
+	stream->stream_info.str_id = 0;
+	sst_set_stream_status(stream, SST_PLATFORM_UNINIT);
+	stream->stream_info.mad_substream = substream;
+	runtime->private_data = stream;
+
+	if (strstr(dai->name, "Power-cpu-dai"))
+		return power_up_sst(stream);
+
+	/* Make sure, that the period size is always even */
+	snd_pcm_hw_constraint_step(substream->runtime, 0,
+			   SNDRV_PCM_HW_PARAM_PERIODS, 2);
+
+	pr_debug("buf_ptr %llu\n", stream->stream_info.buffer_ptr);
+	return snd_pcm_hw_constraint_integer(runtime,
+			 SNDRV_PCM_HW_PARAM_PERIODS);
+out_ops:
+	kfree(stream);
+	mutex_unlock(&sst_dsp_lock);
+	return ret_val;
+}
+
+static void sst_free_stream_in_use(struct sst_dev_stream_map *map, int str_id)
+{
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+	return;
+#else
+	if ((map[str_id].dev_num == MERR_SALTBAY_AUDIO) ||
+	    (map[str_id].dev_num == MERR_SALTBAY_PROBE)) {
+
+		/* Do nothing in capture for audio device */
+		if ((map[str_id].dev_num == MERR_SALTBAY_AUDIO) &&
+		    (map[str_id].direction == SNDRV_PCM_STREAM_CAPTURE))
+			return;
+		if ((map[str_id].task_id == SST_TASK_ID_MEDIA) &&
+		    (map[str_id].status == SST_DEV_MAP_IN_USE)) {
+			pr_debug("str_id %d device_id 0x%x\n", str_id, map[str_id].device_id);
+			map[str_id].status = SST_DEV_MAP_FREE;
+			map[str_id].device_id = PIPE_RSVD;
+		}
+	}
+	return;
+#endif
+}
+
+static void sst_media_close(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct sst_runtime_stream *stream;
+	int ret_val = 0, str_id;
+	struct sst_data *ctx = snd_soc_platform_get_drvdata(dai->platform);
+
+	stream = substream->runtime->private_data;
+	if (strstr(dai->name, "Power-cpu-dai"))
+		ret_val = power_down_sst(stream);
+
+	str_id = stream->stream_info.str_id;
+	if (str_id)
+		ret_val = stream->ops->close(str_id);
+	sst_free_stream_in_use(ctx->pdata->pdev_strm_map, str_id);
+	module_put(sst_dsp->dev->driver->owner);
+	kfree(stream);
+	pr_debug("%s: %d\n", __func__, ret_val);
+}
+
+static int sst_dpcm_probe_cmd(struct snd_soc_platform *platform,
+		struct snd_pcm_substream *substream, u16 pipe_id, bool on)
+{
+	int ret = 0;
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+	if (substream->pcm->device == MERR_DPCM_PROBE)
+		ret = sst_dpcm_probe_send(platform, pipe_id, substream->number,
+					      substream->stream, on);
+#endif
+	return ret;
+}
+
+static inline unsigned int get_current_pipe_id(struct snd_soc_platform *platform,
+					       struct snd_pcm_substream *substream)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_dev_stream_map *map = sst->pdata->pdev_strm_map;
+	struct sst_runtime_stream *stream =
+			substream->runtime->private_data;
+	u32 str_id = stream->stream_info.str_id;
+	unsigned int pipe_id;
+	pipe_id = map[str_id].device_id;
+
+	pr_debug("%s: got pipe_id = %#x for str_id = %d\n",
+		 __func__, pipe_id, str_id);
+	return pipe_id;
+}
+
+static void sst_probe_close(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	u16 probe_pipe_id = get_current_pipe_id(dai->platform, substream);
+
+	sst_dpcm_probe_cmd(dai->platform, substream, probe_pipe_id, false);
+	sst_media_close(substream, dai);
+}
+
+static int sst_media_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct sst_runtime_stream *stream;
+	int ret_val = 0, str_id;
+
+	pr_debug("%s\n", __func__);
+
+	stream = substream->runtime->private_data;
+	str_id = stream->stream_info.str_id;
+	if (stream->stream_info.str_id)
+		return ret_val;
+
+	ret_val = sst_platform_alloc_stream(substream, dai->platform);
+	if (ret_val <= 0)
+		return ret_val;
+	snprintf(substream->pcm->id, sizeof(substream->pcm->id),
+			"%d", stream->stream_info.str_id);
+
+	ret_val = sst_platform_init_stream(substream);
+	if (ret_val)
+		return ret_val;
+	substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
+
+	return ret_val;
+}
+
+static int sst_probe_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	u16 probe_pipe_id;
+
+	sst_media_prepare(substream, dai);
+	probe_pipe_id = get_current_pipe_id(dai->platform, substream);
+
+	return sst_dpcm_probe_cmd(dai->platform, substream, probe_pipe_id, true);
+}
+
+static int sst_media_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	pr_debug("%s\n", __func__);
+
+	snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+	memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
+	return 0;
+}
+
+static int sst_media_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	return snd_pcm_lib_free_pages(substream);
+}
+
+static struct snd_soc_dai_ops sst_media_dai_ops = {
+	.startup = sst_media_open,
+	.shutdown = sst_media_close,
+	.prepare = sst_media_prepare,
+	.hw_params = sst_media_hw_params,
+	.hw_free = sst_media_hw_free,
+	.set_tdm_slot = sst_platform_ihf_set_tdm_slot,
+	.mute_stream = sst_media_digital_mute,
+};
+
+static struct snd_soc_dai_ops sst_probe_dai_ops = {
+	.startup = sst_media_open,
+	.hw_params = sst_media_hw_params,
+	.hw_free = sst_media_hw_free,
+	.shutdown = sst_probe_close,
+	.prepare = sst_probe_prepare,
+};
+
+static struct snd_soc_dai_ops sst_loopback_dai_ops = {
+	.startup = sst_media_open,
+	.shutdown = sst_media_close,
+	.prepare = sst_media_prepare,
+};
+
+static struct snd_soc_dai_ops sst_compr_dai_ops = {
+	.mute_stream = sst_media_digital_mute,
+};
+
+static struct snd_soc_dai_driver sst_platform_dai[] = {
+{
+	.name = SST_HEADSET_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Headset Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "Headset Capture",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_DEEPBUFFER_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Deepbuffer Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_LOWLATENCY_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Low Latency Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_SPEAKER_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Speaker Playback",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_VOICE_DAI,
+	.playback = {
+		.stream_name = "Voice Downlink",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "Voice Uplink",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_COMPRESS_DAI,
+	.compress_dai = 1,
+	.ops = &sst_compr_dai_ops,
+	.playback = {
+		.stream_name = "Compress Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_VIRTUAL_DAI,
+	.playback = {
+		.stream_name = "Virtual Playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_POWER_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "Dummy Power Stream",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = SST_PROBE_DAI,
+	.ops = &sst_probe_dai_ops,
+	.playback = {
+		.stream_name = "Probe Playback",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+	.capture = {
+		.stream_name = "Probe Capture",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000 |
+				SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = SST_VOIP_DAI,
+	.ops = &sst_media_dai_ops,
+	.playback = {
+		.stream_name = "VOIP Playback",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "VOIP Capture",
+		.channels_min = SST_MONO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = SST_LOOPBACK_DAI,
+	.ops = &sst_loopback_dai_ops,
+	.capture = {
+		.stream_name = "Loopback Capture",
+		.channels_min = SST_MONO,
+		.channels_max = SST_MONO,
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+/*BE CPU  Dais */
+{
+	.name = "ssp2-codec",
+	.playback = {
+		.stream_name = "ssp2 playback",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "ssp2 Capture",
+		.channels_min = SST_STEREO,
+		.channels_max = SST_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+};
+
+static int sst_platform_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+	pr_debug("sst_platform_open called:%s\n", dai_link->cpu_dai_name);
+	if (substream->pcm->internal)
+		return 0;
+	runtime = substream->runtime;
+	runtime->hw = sst_platform_pcm_hw;
+	return 0;
+}
+
+static int sst_platform_close(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+	pr_debug("sst_platform_close called:%s\n", dai_link->cpu_dai_name);
+	return 0;
+}
+
+static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	int ret_val = 0, str_id;
+	struct sst_runtime_stream *stream;
+	int str_cmd, status, alsa_state;
+
+	if (substream->pcm->internal)
+		return 0;
+	pr_debug("sst_platform_pcm_trigger called\n");
+	stream = substream->runtime->private_data;
+	str_id = stream->stream_info.str_id;
+	alsa_state = substream->runtime->status->state;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		pr_debug("Trigger Start\n");
+		str_cmd = SST_SND_START;
+		status = SST_PLATFORM_RUNNING;
+		stream->stream_info.mad_substream = substream;
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("Trigger stop\n");
+		str_cmd = SST_SND_DROP;
+		status = SST_PLATFORM_DROPPED;
+		break;
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		pr_debug("Trigger pause\n");
+		str_cmd = SST_SND_PAUSE;
+		status = SST_PLATFORM_PAUSED;
+		break;
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		pr_debug("Trigger pause release\n");
+		str_cmd = SST_SND_RESUME;
+		status = SST_PLATFORM_RUNNING;
+		break;
+	default:
+		return -EINVAL;
+	}
+	ret_val = stream->ops->device_control(str_cmd, &str_id);
+	if (!ret_val)
+		sst_set_stream_status(stream, status);
+
+	return ret_val;
+}
+
+
+static snd_pcm_uframes_t sst_platform_pcm_pointer
+			(struct snd_pcm_substream *substream)
+{
+	struct sst_runtime_stream *stream;
+	int ret_val, status;
+	struct pcm_stream_info *str_info;
+
+	stream = substream->runtime->private_data;
+	status = sst_get_stream_status(stream);
+	if (status == SST_PLATFORM_INIT)
+		return 0;
+	str_info = &stream->stream_info;
+	ret_val = stream->ops->device_control(
+				SST_SND_BUFFER_POINTER, str_info);
+	if (ret_val) {
+		pr_err("sst: error code = %d\n", ret_val);
+		return ret_val;
+	}
+	substream->runtime->soc_delay = str_info->pcm_delay;
+	return str_info->buffer_ptr;
+}
+
+static struct snd_pcm_ops sst_platform_ops = {
+	.open = sst_platform_open,
+	.close = sst_platform_close,
+	.ioctl = snd_pcm_lib_ioctl,
+	.trigger = sst_platform_pcm_trigger,
+	.pointer = sst_platform_pcm_pointer,
+};
+
+static void sst_pcm_free(struct snd_pcm *pcm)
+{
+	pr_debug("sst_pcm_free called\n");
+	snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_dai *dai = rtd->cpu_dai;
+	struct snd_pcm *pcm = rtd->pcm;
+	int retval = 0;
+
+	pr_debug("sst_pcm_new called\n");
+	if (dai->driver->playback.channels_min ||
+			dai->driver->capture.channels_min) {
+		retval =  snd_pcm_lib_preallocate_pages_for_all(pcm,
+			SNDRV_DMA_TYPE_CONTINUOUS,
+			snd_dma_continuous_data(GFP_DMA),
+			SST_MAX_BUFFER, SST_MAX_BUFFER);
+		if (retval) {
+			pr_err("dma buffer allocationf fail\n");
+			return retval;
+		}
+	}
+	return retval;
+}
+
+static int sst_soc_probe(struct snd_soc_platform *platform)
+{
+	struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+	struct soft_platform_id spid;
+	int ret = 0;
+
+	memcpy(&spid, ctx->pdata->spid, sizeof(spid));
+	pr_err("Enter:%s\n", __func__);
+
+#if IS_BUILTIN(CONFIG_SST_MRFLD_DPCM)
+    ret = sst_dsp_init_v2_dpcm(platform);
+#else
+    ret = sst_dsp_init(platform);
+#endif
+//    if (ret)
+//        return ret;
+//    ret = snd_soc_register_effect(platform->card, &effects_ops);
+
+	return ret;
+}
+
+static int sst_soc_remove(struct snd_soc_platform *platform)
+{
+	pr_debug("%s called\n", __func__);
+	return 0;
+}
+
+static struct snd_soc_platform_driver sst_soc_platform_drv  = {
+	.probe		= sst_soc_probe,
+	.remove		= sst_soc_remove,
+	.ops		= &sst_platform_ops,
+	.compr_ops	= &sst_platform_compr_ops,
+	.pcm_new	= sst_pcm_new,
+	.pcm_free	= sst_pcm_free,
+	.read		= sst_soc_read,
+	.write		= sst_soc_write,
+};
+
+int sst_register_dsp(struct sst_device *sst_dev)
+{
+	if (!sst_dev)
+		return -ENODEV;
+	mutex_lock(&sst_dsp_lock);
+	if (sst_dsp) {
+		pr_err("we already have a device %s\n", sst_dsp->name);
+		mutex_unlock(&sst_dsp_lock);
+		return -EEXIST;
+	}
+	pr_debug("registering device %s\n", sst_dev->name);
+
+	sst_dsp = sst_dev;
+	mutex_unlock(&sst_dsp_lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sst_register_dsp);
+
+int sst_unregister_dsp(struct sst_device *dev)
+{
+	if (dev != sst_dsp)
+		return -EINVAL;
+
+	mutex_lock(&sst_dsp_lock);
+	if (sst_dsp) {
+		pr_debug("unregister %s\n", sst_dsp->name);
+		mutex_unlock(&sst_dsp_lock);
+		return -EIO;
+	}
+
+	sst_dsp = NULL;
+	mutex_unlock(&sst_dsp_lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sst_unregister_dsp);
+
+static const struct snd_soc_component_driver pcm_component = {
+	.name           = "pcm",
+};
+
+static int sst_platform_probe(struct platform_device *pdev)
+{
+	struct sst_data *sst;
+	int ret;
+	struct sst_platform_data *pdata = pdev->dev.platform_data;
+
+	pr_debug("sst_platform_probe called\n");
+	sst = devm_kzalloc(&pdev->dev, sizeof(*sst), GFP_KERNEL);
+	if (sst == NULL) {
+		pr_err("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	sst_pdev = &pdev->dev;
+	sst->pdata = pdata;
+	mutex_init(&sst->lock);
+	dev_set_drvdata(&pdev->dev, sst);
+
+	ret = snd_soc_register_platform(&pdev->dev,
+					 &sst_soc_platform_drv);
+	if (ret) {
+		pr_err("registering soc platform failed\n");
+		return ret;
+	}
+	ret = snd_soc_register_component(&pdev->dev, &pcm_component,
+				sst_platform_dai, ARRAY_SIZE(sst_platform_dai));
+	if (ret) {
+		pr_err("registering cpu dais failed\n");
+		snd_soc_unregister_platform(&pdev->dev);
+	}
+
+	return ret;
+}
+
+static int sst_platform_remove(struct platform_device *pdev)
+{
+
+	snd_soc_unregister_component(&pdev->dev);
+	snd_soc_unregister_platform(&pdev->dev);
+	pr_debug("sst_platform_remove success\n");
+	return 0;
+}
+
+static struct platform_driver sst_platform_driver = {
+	.driver		= {
+		.name		= "sst-platform",
+		.owner		= THIS_MODULE,
+	},
+	.probe		= sst_platform_probe,
+	.remove		= sst_platform_remove,
+};
+
+module_platform_driver(sst_platform_driver);
+
+MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver");
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sst-platform");
diff --git a/sound/soc/intel/platform-libs/controls_v1.c b/sound/soc/intel/platform-libs/controls_v1.c
new file mode 100644
index 0000000..b6a5fa7
--- /dev/null
+++ b/sound/soc/intel/platform-libs/controls_v1.c
@@ -0,0 +1,184 @@
+/*
+ *  controls_v1.c - Intel MID Platform driver ALSA controls for CTP
+ *
+ *  Copyright (C) 2012 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/slab.h>
+#include <sound/intel_sst_ioctl.h>
+#include <sound/soc.h>
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+
+
+static int sst_set_mixer_param(unsigned int device_input_mixer)
+{
+	if (!sst_dsp) {
+		pr_err("sst: DSP not registered\n");
+		return -ENODEV;
+	}
+
+	/*allocate memory for params*/
+	return sst_dsp->ops->set_generic_params(SST_SET_ALGO_PARAMS,
+						(void *)&device_input_mixer);
+}
+static int lpe_mixer_ihf_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	ucontrol->value.integer.value[0] = sst->lpe_mixer_input_ihf;
+	return 0;
+}
+
+static int lpe_mixer_ihf_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	int device_input_mixer;
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		pr_debug("input is None\n");
+		device_input_mixer = SST_STREAM_DEVICE_IHF
+					| SST_INPUT_STREAM_NONE;
+		break;
+	case 1:
+		pr_debug("input is PCM stream\n");
+		device_input_mixer = SST_STREAM_DEVICE_IHF
+					| SST_INPUT_STREAM_PCM;
+		break;
+	case 2:
+		pr_debug("input is Compress  stream\n");
+		device_input_mixer = SST_STREAM_DEVICE_IHF
+					| SST_INPUT_STREAM_COMPRESS;
+		break;
+	case 3:
+		pr_debug("input is Mixed stream\n");
+		device_input_mixer = SST_STREAM_DEVICE_IHF
+					| SST_INPUT_STREAM_MIXED;
+		break;
+	default:
+		pr_err("Invalid Input:%ld\n", ucontrol->value.integer.value[0]);
+		return -EINVAL;
+	}
+	sst->lpe_mixer_input_ihf  = ucontrol->value.integer.value[0];
+	return sst_set_mixer_param(device_input_mixer);
+}
+
+static int lpe_mixer_headset_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	ucontrol->value.integer.value[0] = sst->lpe_mixer_input_hs;
+	return 0;
+}
+
+static int lpe_mixer_headset_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	int mixer_input_stream;
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		pr_debug("input is None\n");
+		mixer_input_stream = SST_STREAM_DEVICE_HS
+					| SST_INPUT_STREAM_NONE;
+		break;
+	case 1:
+		pr_debug("input is PCM stream\n");
+		mixer_input_stream = SST_STREAM_DEVICE_HS
+					 | SST_INPUT_STREAM_PCM;
+		break;
+	case 2:
+		pr_debug("input is Compress  stream\n");
+		mixer_input_stream = SST_STREAM_DEVICE_HS
+					 | SST_INPUT_STREAM_COMPRESS;
+		break;
+	case 3:
+		pr_debug("input is Mixed stream\n");
+		mixer_input_stream = SST_STREAM_DEVICE_HS
+					 | SST_INPUT_STREAM_MIXED;
+		break;
+	default:
+		pr_err("Invalid Input:%ld\n", ucontrol->value.integer.value[0]);
+		return -EINVAL;
+	}
+	sst->lpe_mixer_input_hs  = ucontrol->value.integer.value[0];
+	return sst_set_mixer_param(mixer_input_stream);
+}
+
+static int sst_probe_byte_control_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+
+	if (!sst_dsp) {
+		pr_err("sst: DSP not registered\n");
+		return -ENODEV;
+	}
+
+	return sst_dsp->ops->set_generic_params(SST_GET_PROBE_BYTE_STREAM,
+				ucontrol->value.bytes.data);
+}
+
+static int sst_probe_byte_control_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+
+	if (!sst_dsp) {
+		pr_err("sst: DSP not registered\n");
+		return -ENODEV;
+	}
+
+	return sst_dsp->ops->set_generic_params(SST_SET_PROBE_BYTE_STREAM,
+				ucontrol->value.bytes.data);
+}
+
+static const char *lpe_mixer_text[] = {
+	"None", "PCM", "Compressed", "Mixed",
+};
+
+static const struct soc_enum lpe_mixer_enum =
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lpe_mixer_text), lpe_mixer_text);
+
+static const struct snd_kcontrol_new sst_controls_clv[] = {
+	SOC_ENUM_EXT("LPE IHF mixer", lpe_mixer_enum,
+		lpe_mixer_ihf_get, lpe_mixer_ihf_set),
+	SOC_ENUM_EXT("LPE headset mixer", lpe_mixer_enum,
+		lpe_mixer_headset_get, lpe_mixer_headset_set),
+	SND_SOC_BYTES_EXT("SST Probe Byte Control", SST_MAX_BIN_BYTES,
+		sst_probe_byte_control_get,
+		sst_probe_byte_control_set),
+};
+
+int sst_platform_clv_init(struct snd_soc_platform *platform)
+{
+	struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+	ctx->lpe_mixer_input_hs = 0;
+	ctx->lpe_mixer_input_ihf = 0;
+	snd_soc_add_platform_controls(platform, sst_controls_clv,
+						ARRAY_SIZE(sst_controls_clv));
+	return 0;
+}
diff --git a/sound/soc/intel/platform-libs/controls_v2.c b/sound/soc/intel/platform-libs/controls_v2.c
new file mode 100644
index 0000000..3e80276
--- /dev/null
+++ b/sound/soc/intel/platform-libs/controls_v2.c
@@ -0,0 +1,1770 @@
+/*
+ *  controls_v2.c - Intel MID Platform driver ALSA controls for Mrfld
+ *
+ *  Copyright (C) 2012 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@ilinux.intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <sound/soc.h>
+#include <sound/asound.h>
+#include <asm/platform_sst_audio.h>
+#include "../platform_ipc_v2.h"
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+#include "ipc_lib.h"
+#include "controls_v2.h"
+
+
+#define SST_ALGO_KCONTROL_INT(xname, xreg, xshift, xmax, xinvert,\
+	xhandler_get, xhandler_put, xmod, xpipe, xinstance, default_val) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = sst_algo_int_ctl_info, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_algo_int_control_v2) \
+		{.mc.reg = xreg, .mc.rreg = xreg, .mc.shift = xshift, \
+		.mc.rshift = xshift, .mc.max = xmax, .mc.platform_max = xmax, \
+		.mc.invert = xinvert, .module_id = xmod, .pipe_id = xpipe, \
+		.instance_id = xinstance, .value = default_val } }
+/* Thresholds for Low Latency & Deep Buffer*/
+#define DEFAULT_LOW_LATENCY 10 /* In Ms */
+#define DEFAULT_DEEP_BUFFER 96
+
+unsigned long ll_threshold = DEFAULT_LOW_LATENCY;
+unsigned long db_threshold = DEFAULT_DEEP_BUFFER;
+
+int sst_algo_int_ctl_info(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_info *uinfo)
+{
+	struct sst_algo_int_control_v2 *amc = (void *)kcontrol->private_value;
+	struct soc_mixer_control *mc = &amc->mc;
+	int platform_max;
+
+	if (!mc->platform_max)
+		mc->platform_max = mc->max;
+	platform_max = mc->platform_max;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = platform_max;
+	return 0;
+}
+
+unsigned int sst_soc_read(struct snd_soc_platform *platform,
+			unsigned int reg)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	pr_debug("%s for reg %d val=%d\n", __func__, reg, sst->widget[reg]);
+	BUG_ON(reg > (SST_NUM_WIDGETS - 1));
+	return sst->widget[reg];
+}
+
+int sst_soc_write(struct snd_soc_platform *platform,
+		  unsigned int reg, unsigned int val)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	pr_debug("%s for reg %d val %d\n", __func__, reg, val);
+	BUG_ON(reg > (SST_NUM_WIDGETS - 1));
+	sst->widget[reg] = val;
+	return 0;
+}
+
+unsigned int sst_reg_read(struct sst_data *sst, unsigned int reg,
+			  unsigned int shift, unsigned int max)
+{
+	unsigned int mask = (1 << fls(max)) - 1;
+
+	return (sst->widget[reg] >> shift) & mask;
+}
+
+unsigned int sst_reg_write(struct sst_data *sst, unsigned int reg,
+			   unsigned int shift, unsigned int max, unsigned int val)
+{
+	unsigned int mask = (1 << fls(max)) - 1;
+
+	val &= mask;
+	val <<= shift;
+	sst->widget[reg] &= ~(mask << shift);
+	sst->widget[reg] |= val;
+	return val;
+}
+
+int sst_mix_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(widget->platform);
+	unsigned int mask = (1 << fls(mc->max)) - 1;
+	unsigned int val;
+	int connect;
+	struct snd_soc_dapm_update update;
+
+	pr_debug("%s called set %ld for %s\n", __func__,
+			ucontrol->value.integer.value[0], widget->name);
+	val = sst_reg_write(sst, mc->reg, mc->shift, mc->max, ucontrol->value.integer.value[0]);
+	connect = !!val;
+
+	widget->value = val;
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = mc->reg;
+	update.mask = mask;
+	update.val = val;
+
+	widget->dapm->update = &update;
+	snd_soc_dapm_mixer_update_power(widget, kcontrol, connect);
+	widget->dapm->update = NULL;
+	return 0;
+}
+
+int sst_mix_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *w = wlist->widgets[0];
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+
+	pr_debug("%s called for %s\n", __func__, w->name);
+	ucontrol->value.integer.value[0] = !!sst_reg_read(sst, mc->reg, mc->shift, mc->max);
+	return 0;
+}
+
+static const struct snd_kcontrol_new sst_mix_modem_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_MODEM, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_MODEM, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_MODEM, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_MODEM, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_MODEM, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_MODEM, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_MODEM, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_MODEM, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_MODEM, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_MODEM, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_MODEM, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_MODEM, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_MODEM, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_MODEM, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_MODEM, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_MODEM, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_MODEM, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_MODEM, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_MODEM, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_codec0_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_CODEC0, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_CODEC0, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_CODEC0, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_CODEC0, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_CODEC0, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_CODEC0, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_CODEC0, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_CODEC0, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_CODEC0, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_CODEC0, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_CODEC0, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_CODEC0, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_CODEC0, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_CODEC0, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_CODEC0, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_CODEC0, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_CODEC0, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_CODEC0, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_CODEC0, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_codec1_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_CODEC1, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_CODEC1, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_CODEC1, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_CODEC1, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_CODEC1, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_CODEC1, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_CODEC1, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_CODEC1, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_CODEC1, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_CODEC1, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_CODEC1, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_CODEC1, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_CODEC1, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_CODEC1, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_CODEC1, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_CODEC1, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_CODEC1, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_CODEC1, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_CODEC1, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_sprot_l0_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_LOOP0, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_LOOP0, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_LOOP0, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_LOOP0, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_LOOP0, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_LOOP0, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_LOOP0, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_LOOP0, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_LOOP0, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_LOOP0, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_LOOP0, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_LOOP0, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_LOOP0, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_LOOP0, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_LOOP0, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_LOOP0, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_LOOP0, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_LOOP0, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_LOOP0, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media_l1_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_LOOP1, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_LOOP1, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_LOOP1, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_LOOP1, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_LOOP1, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_LOOP1, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_LOOP1, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_LOOP1, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_LOOP1, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_LOOP1, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_LOOP1, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_LOOP1, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_LOOP1, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_LOOP1, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_LOOP1, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_LOOP1, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_LOOP1, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_LOOP1, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_LOOP1, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media_l2_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_LOOP2, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_LOOP2, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_LOOP2, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_LOOP2, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_LOOP2, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_LOOP2, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_LOOP2, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_LOOP2, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_LOOP2, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_LOOP2, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_LOOP2, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_LOOP2, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_LOOP2, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_LOOP2, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_LOOP2, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_LOOP2, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_LOOP2, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_LOOP2, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_LOOP2, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_speech_tx_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_SPEECH, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_SPEECH, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_SPEECH, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_SPEECH, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_SPEECH, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_SPEECH, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_SPEECH, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_SPEECH, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_SPEECH, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_SPEECH, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_SPEECH, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_SPEECH, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_SPEECH, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_SPEECH, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_SPEECH, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_SPEECH, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_SPEECH, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_SPEECH, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_SPEECH, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_speech_rx_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_RXSPEECH, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_RXSPEECH, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_RXSPEECH, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_RXSPEECH, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_RXSPEECH, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_RXSPEECH, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_RXSPEECH, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_RXSPEECH, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_RXSPEECH, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_RXSPEECH, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_RXSPEECH, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_RXSPEECH, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_RXSPEECH, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_RXSPEECH, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_RXSPEECH, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_RXSPEECH, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_RXSPEECH, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_RXSPEECH, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_RXSPEECH, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_voip_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_VOIP, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_VOIP, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_VOIP, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_VOIP, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_VOIP, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_VOIP, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_VOIP, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_VOIP, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_VOIP, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_VOIP, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_VOIP, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_VOIP, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_VOIP, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_VOIP, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_VOIP, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_VOIP, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_VOIP, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_VOIP, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_VOIP, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_pcm0_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_PCM0, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_PCM0, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_PCM0, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_PCM0, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_PCM0, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_PCM0, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_PCM0, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_PCM0, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_PCM0, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_PCM0, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_PCM0, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_PCM0, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_PCM0, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_PCM0, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_PCM0, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_PCM0, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_PCM0, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_PCM0, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_PCM0, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_pcm1_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_PCM1, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_PCM1, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_PCM1, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_PCM1, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_PCM1, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_PCM1, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_PCM1, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_PCM1, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_PCM1, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_PCM1, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_PCM1, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_PCM1, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_PCM1, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_PCM1, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_PCM1, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_PCM1, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_PCM1, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_PCM1, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_PCM1, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_pcm2_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_PCM2, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_PCM2, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_PCM2, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_PCM2, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_PCM2, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_PCM2, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_PCM2, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_PCM2, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_PCM2, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_PCM2, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_PCM2, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_PCM2, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_PCM2, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_PCM2, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_PCM2, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_PCM2, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_PCM2, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_PCM2, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_PCM2, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_aware_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_AWARE, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_AWARE, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_AWARE, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_AWARE, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_AWARE, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_AWARE, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_AWARE, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_AWARE, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_AWARE, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_AWARE, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_AWARE, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_AWARE, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_AWARE, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_AWARE, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_AWARE, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_AWARE, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_AWARE, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_AWARE, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_AWARE, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_vad_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_VAD, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_VAD, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_VAD, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_VAD, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_VAD, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_VAD, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_VAD, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_VAD, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_VAD, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_VAD, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_VAD, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_VAD, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_VAD, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_VAD, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_VAD, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_VAD, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_VAD, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_VAD, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_VAD, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media0_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_MEDIA0, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_MEDIA0, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_MEDIA0, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_MEDIA0, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_MEDIA0, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_MEDIA0, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_MEDIA0, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_MEDIA0, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_MEDIA0, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_MEDIA0, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_MEDIA0, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_MEDIA0, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_MEDIA0, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_MEDIA0, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_MEDIA0, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_MEDIA0, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_MEDIA0, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_MEDIA0, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_MEDIA0, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_media1_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_MEDIA1, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_MEDIA1, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_MEDIA1, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_MEDIA1, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_MEDIA1, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_MEDIA1, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_MEDIA1, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_MEDIA1, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_MEDIA1, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_MEDIA1, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_MEDIA1, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_MEDIA1, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_MEDIA1, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_MEDIA1, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_MEDIA1, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_MEDIA1, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_MEDIA1, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_MEDIA1, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_MEDIA1, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_fm_controls[] = {
+	SOC_SINGLE_EXT("Modem", SST_MIX_FM, 0, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("BT", SST_MIX_FM, 1, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec0", SST_MIX_FM, 2, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Codec1", SST_MIX_FM, 3, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sprot_L0", SST_MIX_FM, 4, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L1", SST_MIX_FM, 5, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media_L2", SST_MIX_FM, 6, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Probe", SST_MIX_FM, 7, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Sidetone", SST_MIX_FM, 8, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Tx", SST_MIX_FM, 9, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Speech_Rx", SST_MIX_FM, 10, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Tone", SST_MIX_FM, 11, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Voip", SST_MIX_FM, 12, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM0", SST_MIX_FM, 13, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("PCM1", SST_MIX_FM, 14, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media0", SST_MIX_FM, 15, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media1", SST_MIX_FM, 16, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("Media2", SST_MIX_FM, 17, 1, 0,
+		sst_mix_get, sst_mix_put),
+	SOC_SINGLE_EXT("FM", SST_MIX_FM, 18, 1, 0,
+		sst_mix_get, sst_mix_put),
+};
+
+static const struct snd_kcontrol_new sst_mix_sw_modem =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 0, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_codec0 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 1, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_codec1 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 2, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_sprot_l0 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 3, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media_l1 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 4, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media_l2 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 5, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_speech_tx =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 6, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_speech_rx =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 7, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_voip =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 8, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_pcm0 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 9, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_pcm1 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 10, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_pcm2 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 11, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_aware =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 12, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_vad =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 13, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media0 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 14, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_media1 =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 15, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_mix_sw_fm =
+	SOC_SINGLE_EXT("Switch", SST_MIX_SWITCH, 16, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_modem =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 0, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_codec0 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 1, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_codec1 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 2, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_speech_tx =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 6, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_speech_rx =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 7, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_voip =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 8, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_pcm0 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 9, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_pcm1 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 10, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_pcm2 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 11, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_aware =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 12, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_vad =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 13, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_media0 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 14, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_media1 =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 15, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_out_sw_fm =
+	SOC_SINGLE_EXT("Switch", SST_OUT_SWITCH, 16, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_modem =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 0, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_codec0 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 1, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_codec1 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 2, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_sidetone =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 3, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_speech_tx =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 4, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_speech_rx  =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 5, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_tone =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 6, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_voip =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 7, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_pcm0 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 8, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_pcm1 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 9, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_media0 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 10, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_media1 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 11, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_media2 =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 12, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_kcontrol_new sst_in_sw_fm =
+	SOC_SINGLE_EXT("Switch", SST_IN_SWITCH, 13, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const struct snd_soc_dapm_widget sst_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("Modem IN"),
+	SND_SOC_DAPM_INPUT("Codec IN0"),
+	SND_SOC_DAPM_INPUT("Codec IN1"),
+	SND_SOC_DAPM_INPUT("Tone IN"),
+	SND_SOC_DAPM_INPUT("FM IN"),
+	SND_SOC_DAPM_OUTPUT("Modem OUT"),
+	SND_SOC_DAPM_OUTPUT("Codec OUT0"),
+	SND_SOC_DAPM_OUTPUT("Codec OUT1"),
+	SND_SOC_DAPM_OUTPUT("FM OUT"),
+	SND_SOC_DAPM_AIF_IN("Voip IN", "VoIP", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("Media IN0", "Compress", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("Media IN1", "PCM", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("Voip OUT", "VoIP", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PCM1 OUT", "Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("Aware OUT", "Aware", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("VAD OUT", "VAD", 0, SND_SOC_NOPM, 0, 0),
+
+	/* output mixers */
+	SND_SOC_DAPM_MIXER("MIX Modem", SND_SOC_NOPM, 0, 0,
+		sst_mix_modem_controls, ARRAY_SIZE(sst_mix_modem_controls)),
+	SND_SOC_DAPM_MIXER("MIX Codec0", SND_SOC_NOPM, 0, 0,
+		sst_mix_codec0_controls , ARRAY_SIZE(sst_mix_codec0_controls)),
+	SND_SOC_DAPM_MIXER("MIX Codec1", SND_SOC_NOPM, 0, 0,
+		sst_mix_codec1_controls, ARRAY_SIZE(sst_mix_codec1_controls)),
+	SND_SOC_DAPM_MIXER("MIX Sprot L0", SND_SOC_NOPM, 0, 0,
+		sst_mix_sprot_l0_controls, ARRAY_SIZE(sst_mix_sprot_l0_controls)),
+	SND_SOC_DAPM_MIXER("MIX Media L1", SND_SOC_NOPM, 0, 0,
+		sst_mix_media_l1_controls, ARRAY_SIZE(sst_mix_media_l1_controls)),
+	SND_SOC_DAPM_MIXER("MIX Media L2", SND_SOC_NOPM, 0, 0,
+		sst_mix_media_l2_controls, ARRAY_SIZE(sst_mix_media_l2_controls)),
+	SND_SOC_DAPM_MIXER("MIX Speech Tx", SND_SOC_NOPM, 0, 0,
+		sst_mix_speech_tx_controls, ARRAY_SIZE(sst_mix_speech_tx_controls)),
+	SND_SOC_DAPM_MIXER("MIX Speech Rx", SND_SOC_NOPM, 0, 0,
+		sst_mix_speech_rx_controls, ARRAY_SIZE(sst_mix_speech_rx_controls)),
+	SND_SOC_DAPM_MIXER("MIX Voip", SND_SOC_NOPM, 0, 0,
+		sst_mix_voip_controls, ARRAY_SIZE(sst_mix_voip_controls)),
+	SND_SOC_DAPM_MIXER("MIX PCM0", SND_SOC_NOPM, 0, 0,
+		sst_mix_pcm0_controls, ARRAY_SIZE(sst_mix_pcm0_controls)),
+	SND_SOC_DAPM_MIXER("MIX PCM1", SND_SOC_NOPM, 0, 0,
+		sst_mix_pcm1_controls, ARRAY_SIZE(sst_mix_pcm1_controls)),
+	SND_SOC_DAPM_MIXER("MIX PCM2", SND_SOC_NOPM, 0, 0,
+		sst_mix_pcm2_controls, ARRAY_SIZE(sst_mix_pcm2_controls)),
+	SND_SOC_DAPM_MIXER("MIX Aware", SND_SOC_NOPM, 0, 0,
+		sst_mix_aware_controls, ARRAY_SIZE(sst_mix_aware_controls)),
+	SND_SOC_DAPM_MIXER("MIX VAD", SND_SOC_NOPM, 0, 0,
+		sst_mix_vad_controls, ARRAY_SIZE(sst_mix_vad_controls)),
+	SND_SOC_DAPM_MIXER("MIX Media0", SND_SOC_NOPM, 0, 0,
+		sst_mix_media0_controls, ARRAY_SIZE(sst_mix_media0_controls)),
+	SND_SOC_DAPM_MIXER("MIX Media1", SND_SOC_NOPM, 0, 0,
+		sst_mix_media1_controls, ARRAY_SIZE(sst_mix_media1_controls)),
+	SND_SOC_DAPM_MIXER("MIX FM", SND_SOC_NOPM, 0, 0,
+		sst_mix_fm_controls, ARRAY_SIZE(sst_mix_fm_controls)),
+
+	/* switches for mixer outputs */
+	SND_SOC_DAPM_SWITCH("Mix Modem Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_modem),
+	SND_SOC_DAPM_SWITCH("Mix Codec0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_codec0),
+	SND_SOC_DAPM_SWITCH("Mix Codec1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_codec1),
+	SND_SOC_DAPM_SWITCH("Mix Sprot L0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_sprot_l0),
+	SND_SOC_DAPM_SWITCH("Mix Media L1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_media_l1),
+	SND_SOC_DAPM_SWITCH("Mix Media L2 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_media_l2),
+	SND_SOC_DAPM_SWITCH("Mix Speech Tx Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_speech_tx),
+	SND_SOC_DAPM_SWITCH("Mix Speech Rx Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_speech_rx),
+	SND_SOC_DAPM_SWITCH("Mix Voip Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_voip),
+	SND_SOC_DAPM_SWITCH("Mix PCM0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_pcm0),
+	SND_SOC_DAPM_SWITCH("Mix PCM1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_pcm1),
+	SND_SOC_DAPM_SWITCH("Mix PCM2 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_pcm2),
+	SND_SOC_DAPM_SWITCH("Mix Aware Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_aware),
+	SND_SOC_DAPM_SWITCH("Mix VAD Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_vad),
+	SND_SOC_DAPM_SWITCH("Mix Media0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_media0),
+	SND_SOC_DAPM_SWITCH("Mix Media1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_media1),
+	SND_SOC_DAPM_SWITCH("Mix FM Switch", SND_SOC_NOPM, 0, 0,
+			&sst_mix_sw_fm),
+
+	/* output pipeline switches */
+	SND_SOC_DAPM_SWITCH("Out Modem Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_modem),
+	SND_SOC_DAPM_SWITCH("Out Codec0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_codec0),
+	SND_SOC_DAPM_SWITCH("Out Codec1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_codec1),
+	SND_SOC_DAPM_SWITCH("Out Speech Tx Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_speech_tx),
+	SND_SOC_DAPM_SWITCH("Out Speech Rx Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_speech_rx),
+	SND_SOC_DAPM_SWITCH("Out Voip Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_voip),
+	SND_SOC_DAPM_SWITCH("Out PCM0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_pcm0),
+	SND_SOC_DAPM_SWITCH("Out PCM1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_pcm1),
+	SND_SOC_DAPM_SWITCH("Out PCM2 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_pcm2),
+	SND_SOC_DAPM_SWITCH("Out Aware Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_aware),
+	SND_SOC_DAPM_SWITCH("Out VAD Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_vad),
+	SND_SOC_DAPM_SWITCH("Out Media0 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_media0),
+	SND_SOC_DAPM_SWITCH("Out Media1 Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_media1),
+	SND_SOC_DAPM_SWITCH("Out FM Switch", SND_SOC_NOPM, 0, 0,
+			&sst_out_sw_fm),
+
+	/* Input pipeline switches */
+	SND_SOC_DAPM_SWITCH("In Modem Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_modem),
+	SND_SOC_DAPM_SWITCH("In Codec0 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_codec0),
+	SND_SOC_DAPM_SWITCH("In Codec1 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_codec1),
+	SND_SOC_DAPM_SWITCH("In Speech Tx Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_speech_tx),
+	SND_SOC_DAPM_SWITCH("In Speech Rx Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_speech_rx),
+	SND_SOC_DAPM_SWITCH("In Tone Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_tone),
+	SND_SOC_DAPM_SWITCH("In Voip Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_voip),
+	SND_SOC_DAPM_SWITCH("In PCM0 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_pcm0),
+	SND_SOC_DAPM_SWITCH("In PCM1 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_pcm1),
+	SND_SOC_DAPM_SWITCH("In Media0 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_media0),
+	SND_SOC_DAPM_SWITCH("In Media1 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_media1),
+	SND_SOC_DAPM_SWITCH("In Media2 Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_media2),
+	SND_SOC_DAPM_SWITCH("In FM Switch", SND_SOC_NOPM, 0, 0,
+		       &sst_in_sw_fm),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	/* media mixer settings */
+	{ "In Media0 Switch", "Switch", "Media IN0"},
+	{ "In Media1 Switch", "Switch", "Media IN1"},
+	{ "MIX Media0", "Media0", "In Media0 Switch"},
+	{ "MIX Media0", "Media1", "In Media1 Switch"},
+	{ "MIX Media0", "Media2", "In Media2 Switch"},
+	{ "MIX Media1", "Media0", "In Media0 Switch"},
+	{ "MIX Media1", "Media1", "In Media1 Switch"},
+	{ "MIX Media1", "Media2", "In Media2 Switch"},
+
+	/* media to main mixer intercon */
+	/* two media paths from media to main */
+	{ "Mix Media0 Switch", "Switch", "MIX Media0"},
+	{ "Out Media0 Switch", "Switch", "Mix Media0 Switch"},
+	{ "In PCM0 Switch", "Switch", "Out Media0 Switch"},
+	{ "Mix Media1 Switch", "Switch", "MIX Media1"},
+	{ "Out Media1 Switch", "Switch", "Mix Media1 Switch"},
+	{ "In PCM1 Switch", "Switch", "Out Media1 Switch"},
+	/* one back from main to media */
+	{ "Mix PCM0 Switch", "Switch", "MIX PCM0"},
+	{ "Out PCM0 Switch", "Switch", "Mix PCM0 Switch"},
+	{ "In Media2 Switch", "Switch", "Out PCM0 Switch"},
+
+	/* main mixer inputs - all inputs connect to mixer */
+	{ "MIX Modem", "Modem", "In Modem Switch"},
+	{ "MIX Modem", "Codec0", "In Codec0 Switch"},
+	{ "MIX Modem", "Codec1", "In Codec1 Switch"},
+	{ "MIX Modem", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Modem", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Modem", "Tone", "In Tone Switch"},
+	{ "MIX Modem", "Voip", "In Voip Switch"},
+	{ "MIX Modem", "PCM0", "In PCM0 Switch"},
+	{ "MIX Modem", "PCM1", "In PCM1 Switch"},
+	{ "MIX Modem", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Modem", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Modem", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Modem", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Modem", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Codec0", "Modem", "In Modem Switch"},
+	{ "MIX Codec0", "Codec0", "In Codec0 Switch"},
+	{ "MIX Codec0", "Codec1", "In Codec1 Switch"},
+	{ "MIX Codec0", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Codec0", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Codec0", "Tone", "In Tone Switch"},
+	{ "MIX Codec0", "Voip", "In Voip Switch"},
+	{ "MIX Codec0", "PCM0", "In PCM0 Switch"},
+	{ "MIX Codec0", "PCM1", "In PCM1 Switch"},
+	{ "MIX Codec0", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Codec0", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Codec0", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Codec0", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Codec0", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Codec1", "Modem", "In Modem Switch"},
+	{ "MIX Codec1", "Codec0", "In Codec0 Switch"},
+	{ "MIX Codec1", "Codec1", "In Codec1 Switch"},
+	{ "MIX Codec1", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Codec1", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Codec1", "Tone", "In Tone Switch"},
+	{ "MIX Codec1", "Voip", "In Voip Switch"},
+	{ "MIX Codec1", "PCM0", "In PCM0 Switch"},
+	{ "MIX Codec1", "PCM1", "In PCM1 Switch"},
+	{ "MIX Codec1", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Codec1", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Codec1", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Codec1", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Codec1", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Sprot L0", "Modem", "In Modem Switch"},
+	{ "MIX Sprot L0", "Codec0", "In Codec0 Switch"},
+	{ "MIX Sprot L0", "Codec1", "In Codec1 Switch"},
+	{ "MIX Sprot L0", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Sprot L0", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Sprot L0", "Tone", "In Tone Switch"},
+	{ "MIX Sprot L0", "Voip", "In Voip Switch"},
+	{ "MIX Sprot L0", "PCM0", "In PCM0 Switch"},
+	{ "MIX Sprot L0", "PCM1", "In PCM1 Switch"},
+	{ "MIX Sprot L0", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Sprot L0", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Sprot L0", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Sprot L0", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Sprot L0", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Media L1", "Modem", "In Modem Switch"},
+	{ "MIX Media L1", "Codec0", "In Codec0 Switch"},
+	{ "MIX Media L1", "Codec1", "In Codec1 Switch"},
+	{ "MIX Media L1", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Media L1", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Media L1", "Tone", "In Tone Switch"},
+	{ "MIX Media L1", "Voip", "In Voip Switch"},
+	{ "MIX Media L1", "PCM0", "In PCM0 Switch"},
+	{ "MIX Media L1", "PCM1", "In PCM1 Switch"},
+	{ "MIX Media L1", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Media L1", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Media L1", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Media L1", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Media L1", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Media L2", "Modem", "In Modem Switch"},
+	{ "MIX Media L2", "Codec0", "In Codec0 Switch"},
+	{ "MIX Media L2", "Codec1", "In Codec1 Switch"},
+	{ "MIX Media L2", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Media L2", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Media L2", "Tone", "In Tone Switch"},
+	{ "MIX Media L2", "Voip", "In Voip Switch"},
+	{ "MIX Media L2", "PCM0", "In PCM0 Switch"},
+	{ "MIX Media L2", "PCM1", "In PCM1 Switch"},
+	{ "MIX Media L2", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Media L2", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Media L2", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Media L2", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Media L2", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Speech Rx", "Modem", "In Modem Switch"},
+	{ "MIX Speech Rx", "Codec0", "In Codec0 Switch"},
+	{ "MIX Speech Rx", "Codec1", "In Codec1 Switch"},
+	{ "MIX Speech Rx", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Speech Rx", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Speech Rx", "Tone", "In Tone Switch"},
+	{ "MIX Speech Rx", "Voip", "In Voip Switch"},
+	{ "MIX Speech Rx", "PCM0", "In PCM0 Switch"},
+	{ "MIX Speech Rx", "PCM1", "In PCM1 Switch"},
+	{ "MIX Speech Rx", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Speech Rx", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Speech Rx", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Speech Rx", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Speech Rx", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Speech Tx", "Modem", "In Modem Switch"},
+	{ "MIX Speech Tx", "Codec0", "In Codec0 Switch"},
+	{ "MIX Speech Tx", "Codec1", "In Codec1 Switch"},
+	{ "MIX Speech Tx", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Speech Tx", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Speech Tx", "Tone", "In Tone Switch"},
+	{ "MIX Speech Tx", "Voip", "In Voip Switch"},
+	{ "MIX Speech Tx", "PCM0", "In PCM0 Switch"},
+	{ "MIX Speech Tx", "PCM1", "In PCM1 Switch"},
+	{ "MIX Speech Tx", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Speech Tx", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Speech Tx", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Speech Tx", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Speech Tx", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Voip", "Modem", "In Modem Switch"},
+	{ "MIX Voip", "Codec0", "In Codec0 Switch"},
+	{ "MIX Voip", "Codec1", "In Codec1 Switch"},
+	{ "MIX Voip", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Voip", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Voip", "Tone", "In Tone Switch"},
+	{ "MIX Voip", "Voip", "In Voip Switch"},
+	{ "MIX Voip", "PCM0", "In PCM0 Switch"},
+	{ "MIX Voip", "PCM1", "In PCM1 Switch"},
+	{ "MIX Voip", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Voip", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Voip", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Voip", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Voip", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX PCM0", "Modem", "In Modem Switch"},
+	{ "MIX PCM0", "Codec0", "In Codec0 Switch"},
+	{ "MIX PCM0", "Codec1", "In Codec1 Switch"},
+	{ "MIX PCM0", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX PCM0", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX PCM0", "Tone", "In Tone Switch"},
+	{ "MIX PCM0", "Voip", "In Voip Switch"},
+	{ "MIX PCM0", "PCM0", "In PCM0 Switch"},
+	{ "MIX PCM0", "PCM1", "In PCM1 Switch"},
+	{ "MIX PCM0", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX PCM0", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX PCM0", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX PCM0", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX PCM0", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX PCM1", "Modem", "In Modem Switch"},
+	{ "MIX PCM1", "Codec0", "In Codec0 Switch"},
+	{ "MIX PCM1", "Codec1", "In Codec1 Switch"},
+	{ "MIX PCM1", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX PCM1", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX PCM1", "Tone", "In Tone Switch"},
+	{ "MIX PCM1", "Voip", "In Voip Switch"},
+	{ "MIX PCM1", "PCM0", "In PCM0 Switch"},
+	{ "MIX PCM1", "PCM1", "In PCM1 Switch"},
+	{ "MIX PCM1", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX PCM1", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX PCM1", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX PCM1", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX PCM1", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX PCM2", "Modem", "In Modem Switch"},
+	{ "MIX PCM2", "Codec0", "In Codec0 Switch"},
+	{ "MIX PCM2", "Codec1", "In Codec1 Switch"},
+	{ "MIX PCM2", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX PCM2", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX PCM2", "Tone", "In Tone Switch"},
+	{ "MIX PCM2", "Voip", "In Voip Switch"},
+	{ "MIX PCM2", "PCM0", "In PCM0 Switch"},
+	{ "MIX PCM2", "PCM1", "In PCM1 Switch"},
+	{ "MIX PCM2", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX PCM2", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX PCM2", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX PCM2", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX PCM2", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX Aware", "Modem", "In Modem Switch"},
+	{ "MIX Aware", "Codec0", "In Codec0 Switch"},
+	{ "MIX Aware", "Codec1", "In Codec1 Switch"},
+	{ "MIX Aware", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX Aware", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX Aware", "Tone", "In Tone Switch"},
+	{ "MIX Aware", "Voip", "In Voip Switch"},
+	{ "MIX Aware", "PCM0", "In PCM0 Switch"},
+	{ "MIX Aware", "PCM1", "In PCM1 Switch"},
+	{ "MIX Aware", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX Aware", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX Aware", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX Aware", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX Aware", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX VAD", "Modem", "In Modem Switch"},
+	{ "MIX VAD", "Codec0", "In Codec0 Switch"},
+	{ "MIX VAD", "Codec1", "In Codec1 Switch"},
+	{ "MIX VAD", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX VAD", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX VAD", "Tone", "In Tone Switch"},
+	{ "MIX VAD", "Voip", "In Voip Switch"},
+	{ "MIX VAD", "PCM0", "In PCM0 Switch"},
+	{ "MIX VAD", "PCM1", "In PCM1 Switch"},
+	{ "MIX VAD", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX VAD", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX VAD", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX VAD", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX VAD", "Sidetone", "Mix Speech Tx Switch"},
+
+	{ "MIX FM", "Modem", "In Modem Switch"},
+	{ "MIX FM", "Codec0", "In Codec0 Switch"},
+	{ "MIX FM", "Codec1", "In Codec1 Switch"},
+	{ "MIX FM", "Speech_Tx", "In Speech Tx Switch"},
+	{ "MIX FM", "Speech_Rx", "In Speech Rx Switch"},
+	{ "MIX FM", "Tone", "In Tone Switch"},
+	{ "MIX FM", "Voip", "In Voip Switch"},
+	{ "MIX FM", "PCM0", "In PCM0 Switch"},
+	{ "MIX FM", "PCM1", "In PCM1 Switch"},
+	{ "MIX FM", "FM", "In FM Switch"},
+	/* loops have output switches coming back to mixers */
+	{ "MIX FM", "Sprot_L0", "Mix Sprot L0 Switch"},
+	{ "MIX FM", "Media_L1", "Mix Media L1 Switch"},
+	{ "MIX FM", "Media_L2", "Mix Media L2 Switch"},
+	/* sidetone comes from speech out */
+	{ "MIX FM", "Sidetone", "Mix Speech Tx Switch"},
+
+	/* now connect the mixers to output switches */
+	{ "Mix Modem Switch", "Switch", "MIX Modem"},
+	{ "Out Modem Switch", "Switch", "Mix Modem Switch"},
+	{ "Mix Codec0 Switch", "Switch", "MIX Codec0"},
+	{ "Out Codec0 Switch", "Switch", "Mix Codec0 Switch"},
+	{ "Mix Codec1 Switch", "Switch", "MIX Codec1"},
+	{ "Out Codec1 Switch", "Switch", "Mix Codec1 Switch"},
+	{ "Mix Speech Tx Switch", "Switch", "MIX Speech Tx"},
+	{ "Out Speech Tx Switch", "Switch", "Mix Speech Tx Switch"},
+	{ "Mix Speech Rx Switch", "Switch", "MIX Speech Rx"},
+	{ "Out Speech Rx Switch", "Switch", "Mix Speech Rx Switch"},
+	{ "Mix Voip Switch", "Switch", "MIX Voip"},
+	{ "Out Voip Switch", "Switch", "Mix Voip Switch"},
+	{ "Mix Aware Switch", "Switch", "MIX Aware"},
+	{ "Out Aware Switch", "Switch", "Mix Aware Switch"},
+	{ "Mix VAD Switch", "Switch", "MIX VAD"},
+	{ "Out VAD Switch", "Switch", "Mix VAD Switch"},
+	{ "Mix FM Switch", "Switch", "MIX FM"},
+	{ "Out FM Switch", "Switch", "Mix FM Switch"},
+	{ "Mix PCM1 Switch", "Switch", "MIX PCM1"},
+	{ "Out PCM1 Switch", "Switch", "Mix PCM1 Switch"},
+	{ "Mix PCM2 Switch", "Switch", "MIX PCM2"},
+	{ "Out PCM2 Switch", "Switch", "Mix PCM2 Switch"},
+
+	/* the loops
+	 * media loops dont have i/p o/p switches, just mixer enable
+	 */
+	{ "Mix Sprot L0 Switch", "Switch", "MIX Sprot L0"},
+	{ "Mix Media L1 Switch", "Switch", "MIX Media L1"},
+	{ "Mix Media L2 Switch", "Switch", "MIX Media L2"},
+	/* so no need as mixer switches are
+	 * inputs to all mixers
+	 * need to connect speech loops here
+	 */
+	{ "In Speech Rx Switch", "Switch", "Out Speech Rx Switch"},
+	{ "In Speech Tx Switch", "Switch", "Out Speech Tx Switch"},
+	/* last one, connect the output switches to ip's
+	 * and op's. Also connect the AIFs
+	 */
+	{ "In Modem Switch", "Switch", "Modem IN"},
+	{ "In Codec0 Switch", "Switch", "Codec IN0"},
+	{ "In Codec1 Switch", "Switch", "Codec IN1"},
+	{ "In Tone Switch", "Switch", "Tone IN"},
+	{ "In FM Switch", "Switch", "FM IN"},
+
+	{ "Modem OUT", NULL, "Out Modem Switch"},
+	{ "Codec OUT0", NULL, "Out Codec0 Switch"},
+	{ "Codec OUT1", NULL, "Out Codec1 Switch"},
+	{ "FM OUT", NULL, "Out FM Switch"},
+
+	{ "In Voip Switch", "Switch", "Voip IN"},
+
+	{ "Voip OUT", NULL, "Out Voip Switch"},
+	{ "PCM1 OUT", NULL, "Out PCM1 Switch"},
+	{ "Aware OUT", NULL, "Out Aware Switch"},
+	{ "VAD OUT", NULL, "Out VAD Switch"},
+};
+
+int sst_byte_control_get(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	pr_debug("in %s\n", __func__);
+	memcpy(ucontrol->value.bytes.data, sst->byte_stream, SST_MAX_BIN_BYTES);
+	print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
+			     (const void *)sst->byte_stream, 32);
+	return 0;
+}
+
+static int sst_check_binary_input(char *stream)
+{
+	struct snd_sst_bytes_v2 *bytes = (struct snd_sst_bytes_v2 *)stream;
+
+	if (bytes->len == 0 || bytes->len > 1000) {
+		pr_err("length out of bounds %d\n", bytes->len);
+		return -EINVAL;
+	}
+	if (bytes->type == 0 || bytes->type > SND_SST_BYTES_GET) {
+		pr_err("type out of bounds: %d\n", bytes->type);
+		return -EINVAL;
+	}
+	if (bytes->block > 1) {
+		pr_err("block invalid %d\n", bytes->block);
+		return -EINVAL;
+	}
+	if (bytes->task_id == SST_TASK_ID_NONE || bytes->task_id > SST_TASK_ID_MAX) {
+		pr_err("taskid invalid %d\n", bytes->task_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int sst_byte_control_set(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int ret = 0;
+
+	pr_debug("in %s\n", __func__);
+	mutex_lock(&sst->lock);
+	memcpy(sst->byte_stream, ucontrol->value.bytes.data, SST_MAX_BIN_BYTES);
+	if (0 != sst_check_binary_input(sst->byte_stream)) {
+		mutex_unlock(&sst->lock);
+		return -EINVAL;
+	}
+	print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
+			     (const void *)sst->byte_stream, 32);
+	ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM, sst->byte_stream);
+	mutex_unlock(&sst->lock);
+
+	return ret;
+}
+
+static int sst_pipe_id_control_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int ret = 0;
+
+	ucontrol->value.integer.value[0] = sst->pipe_id;
+
+	return ret;
+}
+
+static int sst_pipe_id_control_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int ret = 0;
+
+	sst->pipe_id = ucontrol->value.integer.value[0];
+	pr_debug("%s: pipe_id %d", __func__, sst->pipe_id);
+
+	return ret;
+}
+
+/* dB range for mrfld compress volume is -144dB to +36dB.
+ * Gain library expects user input in terms of 0.1dB, for example,
+ * 60 (in decimal) represents 6dB.
+ * MW will pass 2's complement value for negative dB values.
+ */
+static int sst_compr_vol_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_algo_int_control_v2 *amc = (void *)kcontrol->private_value;
+	u16 gain;
+	unsigned int gain_offset, ret;
+
+	sst_create_compr_vol_ipc(sst->byte_stream, SND_SST_BYTES_GET, amc);
+	mutex_lock(&sst->lock);
+	ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM,
+						sst->byte_stream);
+	mutex_unlock(&sst->lock);
+	if (ret) {
+		pr_err("failed to get compress vol from fw: %d\n", ret);
+		return ret;
+	}
+	gain_offset = sizeof(struct snd_sst_bytes_v2) +
+				sizeof(struct ipc_dsp_hdr);
+
+	/* Get params format for vol ctrl lib, size 6 bytes :
+	 * u16 left_gain, u16 right_gain, u16 ramp
+	 */
+	memcpy(&gain,
+		(unsigned int *)(sst->byte_stream + gain_offset),
+		sizeof(u16));
+	pr_debug("%s: cell_gain = %d\n", __func__, gain);
+	amc->value = gain;
+	ucontrol->value.integer.value[0] = gain;
+	return 0;
+}
+
+static int sst_compr_vol_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_algo_int_control_v2 *amc = (void *)kcontrol->private_value;
+	int ret = 0;
+	unsigned int old_val;
+
+	pr_debug("%s: cell_gain = %ld\n", __func__,\
+				ucontrol->value.integer.value[0]);
+	old_val = amc->value;
+	amc->value = ucontrol->value.integer.value[0];
+	sst_create_compr_vol_ipc(sst->byte_stream, SND_SST_BYTES_SET,
+					amc);
+
+	mutex_lock(&sst->lock);
+	ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM,
+						sst->byte_stream);
+	mutex_unlock(&sst->lock);
+	if (ret) {
+		pr_err("failed to set compress vol in fw: %d\n", ret);
+		amc->value = old_val;
+		return ret;
+	}
+	return 0;
+}
+
+static int sst_vtsv_enroll_set(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int ret = 0;
+
+	sst->vtsv_enroll = ucontrol->value.integer.value[0];
+	mutex_lock(&sst->lock);
+	if (sst->vtsv_enroll)
+		ret = sst_dsp->ops->set_generic_params(SST_SET_VTSV_INFO,
+					(void *)&sst->vtsv_enroll);
+	mutex_unlock(&sst->lock);
+	return ret;
+}
+
+static int sst_vtsv_enroll_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	ucontrol->value.integer.value[0] = sst->vtsv_enroll;
+	return 0;
+}
+
+/* This value corresponds to two's complement value of -10 or -1dB */
+#define SST_COMPR_VOL_MAX_INTEG_GAIN 0xFFF6
+#define SST_COMPR_VOL_MUTE 0xFA60 /* 2's complement of -1440 or -144dB*/
+
+
+static const struct snd_kcontrol_new sst_mrfld_controls[] = {
+	SND_SOC_BYTES_EXT("SST Byte control", SST_MAX_BIN_BYTES,
+		       sst_byte_control_get, sst_byte_control_set),
+	SOC_SINGLE_EXT("SST Pipe_id control", SST_PIPE_CONTROL, 0, 0x9A, 0,
+		sst_pipe_id_control_get, sst_pipe_id_control_set),
+	SST_ALGO_KCONTROL_INT("Compress Volume", SST_COMPRESS_VOL,
+		0, SST_COMPR_VOL_MAX_INTEG_GAIN, 0,
+		sst_compr_vol_get, sst_compr_vol_set,
+		SST_ALGO_VOLUME_CONTROL, PIPE_MEDIA0_IN, 0,
+		SST_COMPR_VOL_MUTE),
+	SOC_SINGLE_BOOL_EXT("SST VTSV Enroll", 0, sst_vtsv_enroll_get,
+		       sst_vtsv_enroll_set),
+};
+
+static DEVICE_ULONG_ATTR(low_latency_threshold, 0644, ll_threshold);
+static DEVICE_ULONG_ATTR(deep_buffer_threshold, 0644, db_threshold);
+
+static struct attribute *device_sysfs_attrs[] = {
+	&dev_attr_low_latency_threshold.attr.attr,
+	&dev_attr_deep_buffer_threshold.attr.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = device_sysfs_attrs,
+};
+
+int sst_dsp_init(struct snd_soc_platform *platform)
+{
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	int error = 0;
+
+	sst->byte_stream = devm_kzalloc(platform->dev,
+			SST_MAX_BIN_BYTES, GFP_KERNEL);
+	if (sst->byte_stream == NULL) {
+		pr_err("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	sst->widget = devm_kzalloc(platform->dev,
+				   SST_NUM_WIDGETS * sizeof(*sst->widget),
+				   GFP_KERNEL);
+	if (sst->widget == NULL) {
+		pr_err("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	sst->vtsv_enroll = false;
+	/* Assign the pointer variables */
+	sst->ll_db.low_latency = &ll_threshold;
+	sst->ll_db.deep_buffer = &db_threshold;
+
+	pr_debug("Default ll thres %lu db thres %lu\n", ll_threshold, db_threshold);
+
+	snd_soc_dapm_new_controls(&platform->dapm, sst_dapm_widgets,
+			ARRAY_SIZE(sst_dapm_widgets));
+	snd_soc_dapm_add_routes(&platform->dapm, intercon,
+			ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_widgets(&platform->dapm);
+	snd_soc_add_platform_controls(platform, sst_mrfld_controls,
+			ARRAY_SIZE(sst_mrfld_controls));
+
+	error = sysfs_create_group(&platform->dev->kobj, &attr_group);
+	if (error)
+		pr_err("failed to create sysfs files  %d\n", error);
+
+	return error;
+}
diff --git a/sound/soc/intel/platform-libs/controls_v2.h b/sound/soc/intel/platform-libs/controls_v2.h
new file mode 100644
index 0000000..b54c947
--- /dev/null
+++ b/sound/soc/intel/platform-libs/controls_v2.h
@@ -0,0 +1,708 @@
+/*
+ *  controls_v2.h - Intel MID Platform driver header file
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Ramesh Babu <ramesh.babu.koul@intel.com>
+ *  Author: Omair M Abdullah <omair.m.abdullah@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SST_CONTROLS_V2_H__
+#define __SST_CONTROLS_V2_H__
+
+/*
+ * This section defines the map for the mixer widgets.
+ *
+ * Each mixer will be represented by single value and that value will have each
+ * bit corresponding to one input
+ *
+ * Each out_id will correspond to one mixer and one path. Each input will be
+ * represented by single bit in the register.
+ */
+
+/* mixer register ids here */
+#define SST_MIX(x)		(x)
+
+#define SST_MIX_MODEM		SST_MIX(0)
+#define SST_MIX_BT		SST_MIX(1)
+#define SST_MIX_CODEC0		SST_MIX(2)
+#define SST_MIX_CODEC1		SST_MIX(3)
+#define SST_MIX_LOOP0		SST_MIX(4)
+#define SST_MIX_LOOP1		SST_MIX(5)
+#define SST_MIX_LOOP2		SST_MIX(6)
+#define SST_MIX_PROBE		SST_MIX(7)
+#define SST_MIX_HF_SNS		SST_MIX(8)
+#define SST_MIX_HF		SST_MIX(9)
+#define SST_MIX_SPEECH		SST_MIX(10)
+#define SST_MIX_RXSPEECH	SST_MIX(11)
+#define SST_MIX_VOIP		SST_MIX(12)
+#define SST_MIX_PCM0		SST_MIX(13)
+#define SST_MIX_PCM1		SST_MIX(14)
+#define SST_MIX_PCM2		SST_MIX(15)
+#define SST_MIX_AWARE		SST_MIX(16)
+#define SST_MIX_VAD		SST_MIX(17)
+#define SST_MIX_FM		SST_MIX(18)
+
+#define SST_MIX_MEDIA0		SST_MIX(19)
+#define SST_MIX_MEDIA1		SST_MIX(20)
+
+#define SST_NUM_MIX		(SST_MIX_MEDIA1 + 1)
+
+#define SST_MIX_SWITCH		(SST_NUM_MIX + 1)
+#define SST_OUT_SWITCH		(SST_NUM_MIX + 2)
+#define SST_IN_SWITCH		(SST_NUM_MIX + 3)
+#define SST_MUX_REG		(SST_NUM_MIX + 4)
+#define SST_REG_LAST		(SST_MUX_REG)
+
+/* last entry defines array size */
+#define SST_NUM_WIDGETS		(SST_REG_LAST + 1)
+
+#define SST_BT_FM_MUX_SHIFT	0
+#define SST_VOICE_MODE_SHIFT	1
+#define SST_BT_MODE_SHIFT	2
+
+/* in each mixer register we will define one bit for each input */
+#define SST_MIX_IP(x)		(x)
+
+#define SST_IP_MODEM		SST_MIX_IP(0)
+#define SST_IP_BT		SST_MIX_IP(1)
+#define SST_IP_CODEC0		SST_MIX_IP(2)
+#define SST_IP_CODEC1		SST_MIX_IP(3)
+#define SST_IP_LOOP0		SST_MIX_IP(4)
+#define SST_IP_LOOP1		SST_MIX_IP(5)
+#define SST_IP_LOOP2		SST_MIX_IP(6)
+#define SST_IP_PROBE		SST_MIX_IP(7)
+#define SST_IP_SIDETONE		SST_MIX_IP(8)
+#define SST_IP_TXSPEECH		SST_MIX_IP(9)
+#define SST_IP_SPEECH		SST_MIX_IP(10)
+#define SST_IP_TONE		SST_MIX_IP(11)
+#define SST_IP_VOIP		SST_MIX_IP(12)
+#define SST_IP_PCM0		SST_MIX_IP(13)
+#define SST_IP_PCM1		SST_MIX_IP(14)
+#define SST_IP_LOW_PCM0		SST_MIX_IP(15)
+#define SST_IP_FM		SST_MIX_IP(16)
+#define SST_IP_MEDIA0		SST_MIX_IP(17)
+#define SST_IP_MEDIA1		SST_MIX_IP(18)
+#define SST_IP_MEDIA2		SST_MIX_IP(19)
+#define SST_IP_MEDIA3		SST_MIX_IP(20)
+
+#define SST_IP_LAST		SST_IP_MEDIA3
+
+#define SST_SWM_INPUT_COUNT	(SST_IP_LAST + 1)
+#define SST_CMD_SWM_MAX_INPUTS	6
+
+#define SST_PATH_ID_SHIFT	8
+#define SST_DEFAULT_LOCATION_ID	0xFFFF
+#define SST_DEFAULT_CELL_NBR	0xFF
+#define SST_DEFAULT_MODULE_ID	0xFFFF
+
+/*
+ * Audio DSP Path Ids. Specified by the audio DSP FW
+ */
+enum sst_path_index {
+	SST_PATH_INDEX_MODEM_OUT                = (0x00 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_BT_OUT                   = (0x01 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_CODEC_OUT0               = (0x02 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_CODEC_OUT1               = (0x03 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_SPROT_LOOP_OUT           = (0x04 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA_LOOP1_OUT          = (0x05 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA_LOOP2_OUT          = (0x06 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE_OUT                = (0x07 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_HF_SNS_OUT               = (0x08 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOICE_UPLINK_REF2	= (0x08 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_HF_OUT                   = (0x09 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOICE_UPLINK_REF1	= (0x09 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_SPEECH_OUT               = (0x0A << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOICE_UPLINK		= (0x0A << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_RX_SPEECH_OUT            = (0x0B << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOICE_DOWNLINK		= (0x0B << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_VOIP_OUT                 = (0x0C << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PCM0_OUT                 = (0x0D << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PCM1_OUT                 = (0x0E << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PCM2_OUT                 = (0x0F << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_AWARE_OUT                = (0x10 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VAD_OUT                  = (0x11 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_MEDIA0_OUT               = (0x12 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA1_OUT               = (0x13 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_FM_OUT                   = (0x14 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_PROBE1_PIPE_OUT		= (0x15 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE2_PIPE_OUT		= (0x16 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE3_PIPE_OUT		= (0x17 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE4_PIPE_OUT		= (0x18 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE5_PIPE_OUT		= (0x19 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE6_PIPE_OUT		= (0x1A << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE7_PIPE_OUT		= (0x1B << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE8_PIPE_OUT		= (0x1C << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_SIDETONE_OUT		= (0x1D << SST_PATH_ID_SHIFT),
+
+	/* Start of input paths */
+	SST_PATH_INDEX_MODEM_IN                 = (0x80 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_BT_IN                    = (0x81 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_CODEC_IN0                = (0x82 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_CODEC_IN1                = (0x83 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_SPROT_LOOP_IN            = (0x84 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA_LOOP1_IN           = (0x85 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA_LOOP2_IN           = (0x86 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_PROBE_IN                 = (0x87 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_SIDETONE_IN              = (0x88 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_TX_SPEECH_IN             = (0x89 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_SPEECH_IN                = (0x8A << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_TONE_IN                  = (0x8B << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_VOIP_IN                  = (0x8C << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_PCM0_IN                  = (0x8D << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PCM1_IN                  = (0x8E << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_MEDIA0_IN                = (0x8F << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA1_IN                = (0x90 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_MEDIA2_IN                = (0x91 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_FM_IN                    = (0x92 << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_PROBE1_PIPE_IN           = (0x93 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE2_PIPE_IN           = (0x94 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE3_PIPE_IN           = (0x95 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE4_PIPE_IN           = (0x96 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE5_PIPE_IN           = (0x97 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE6_PIPE_IN           = (0x98 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE7_PIPE_IN           = (0x99 << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_PROBE8_PIPE_IN           = (0x9A << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_MEDIA3_IN		= (0x9C << SST_PATH_ID_SHIFT),
+	SST_PATH_INDEX_LOW_PCM0_IN		= (0x9D << SST_PATH_ID_SHIFT),
+
+	SST_PATH_INDEX_RESERVED                 = (0xFF << SST_PATH_ID_SHIFT),
+};
+
+/*
+ * switch matrix input path IDs
+ */
+enum sst_swm_inputs {
+	SST_SWM_IN_MODEM	= (SST_PATH_INDEX_MODEM_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_BT		= (SST_PATH_INDEX_BT_IN		  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_CODEC0	= (SST_PATH_INDEX_CODEC_IN0	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_CODEC1	= (SST_PATH_INDEX_CODEC_IN1	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_SPROT_LOOP	= (SST_PATH_INDEX_SPROT_LOOP_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_MEDIA_LOOP1	= (SST_PATH_INDEX_MEDIA_LOOP1_IN  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_MEDIA_LOOP2	= (SST_PATH_INDEX_MEDIA_LOOP2_IN  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_PROBE	= (SST_PATH_INDEX_PROBE_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_SIDETONE	= (SST_PATH_INDEX_SIDETONE_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_TXSPEECH	= (SST_PATH_INDEX_TX_SPEECH_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_SPEECH	= (SST_PATH_INDEX_SPEECH_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_TONE		= (SST_PATH_INDEX_TONE_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_VOIP		= (SST_PATH_INDEX_VOIP_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_PCM0		= (SST_PATH_INDEX_PCM0_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_PCM1		= (SST_PATH_INDEX_PCM1_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_MEDIA0	= (SST_PATH_INDEX_MEDIA0_IN	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_IN_MEDIA1	= (SST_PATH_INDEX_MEDIA1_IN	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_IN_MEDIA2	= (SST_PATH_INDEX_MEDIA2_IN	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_IN_FM		= (SST_PATH_INDEX_FM_IN		  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_MEDIA3	= (SST_PATH_INDEX_MEDIA3_IN	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_IN_LOW_PCM0	= (SST_PATH_INDEX_LOW_PCM0_IN	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_IN_END		= (SST_PATH_INDEX_RESERVED	  | SST_DEFAULT_CELL_NBR)
+};
+
+/*
+ * switch matrix output path IDs
+ */
+enum sst_swm_outputs {
+	SST_SWM_OUT_MODEM	= (SST_PATH_INDEX_MODEM_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_BT		= (SST_PATH_INDEX_BT_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_CODEC0	= (SST_PATH_INDEX_CODEC_OUT0	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_CODEC1	= (SST_PATH_INDEX_CODEC_OUT1	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_SPROT_LOOP	= (SST_PATH_INDEX_SPROT_LOOP_OUT  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_MEDIA_LOOP1	= (SST_PATH_INDEX_MEDIA_LOOP1_OUT | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_MEDIA_LOOP2	= (SST_PATH_INDEX_MEDIA_LOOP2_OUT | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_PROBE	= (SST_PATH_INDEX_PROBE_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_HF_SNS	= (SST_PATH_INDEX_HF_SNS_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_HF		= (SST_PATH_INDEX_HF_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_SPEECH	= (SST_PATH_INDEX_SPEECH_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_RXSPEECH	= (SST_PATH_INDEX_RX_SPEECH_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_VOIP	= (SST_PATH_INDEX_VOIP_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_PCM0	= (SST_PATH_INDEX_PCM0_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_PCM1	= (SST_PATH_INDEX_PCM1_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_PCM2	= (SST_PATH_INDEX_PCM2_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_AWARE	= (SST_PATH_INDEX_AWARE_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_VAD		= (SST_PATH_INDEX_VAD_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_MEDIA0	= (SST_PATH_INDEX_MEDIA0_OUT	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_OUT_MEDIA1	= (SST_PATH_INDEX_MEDIA1_OUT	  | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+	SST_SWM_OUT_FM		= (SST_PATH_INDEX_FM_OUT	  | SST_DEFAULT_CELL_NBR),
+	SST_SWM_OUT_END		= (SST_PATH_INDEX_RESERVED	  | SST_DEFAULT_CELL_NBR),
+};
+
+enum sst_ipc_msg {
+	SST_IPC_IA_CMD = 1,
+	SST_IPC_IA_SET_PARAMS,
+	SST_IPC_IA_GET_PARAMS,
+};
+
+enum sst_cmd_type {
+	SST_CMD_BYTES_SET = 1,
+	SST_CMD_BYTES_GET = 2,
+};
+
+enum sst_task {
+	SST_TASK_SBA = 1,
+	SST_TASK_FBA_UL,
+	SST_TASK_MMX,
+	SST_TASK_AWARE,
+	SST_TASK_FBA_DL,
+};
+
+enum sst_type {
+	SST_TYPE_CMD = 1,
+	SST_TYPE_PARAMS,
+};
+
+enum sst_flag {
+	SST_FLAG_BLOCKED = 1,
+	SST_FLAG_NONBLOCK,
+};
+
+/*
+ * Enumeration for indexing the gain cells in VB_SET_GAIN DSP command
+ */
+enum sst_gain_index {
+	/* GAIN IDs for SB task start here */
+	SST_GAIN_INDEX_MODEM_OUT,
+	SST_GAIN_INDEX_MODEM_IN,
+	SST_GAIN_INDEX_BT_OUT,
+	SST_GAIN_INDEX_BT_IN,
+	SST_GAIN_INDEX_FM_OUT,
+
+	SST_GAIN_INDEX_FM_IN,
+	SST_GAIN_INDEX_CODEC_OUT0,
+	SST_GAIN_INDEX_CODEC_OUT1,
+	SST_GAIN_INDEX_CODEC_IN0,
+	SST_GAIN_INDEX_CODEC_IN1,
+
+	SST_GAIN_INDEX_SPROT_LOOP_OUT,
+	SST_GAIN_INDEX_MEDIA_LOOP1_OUT,
+	SST_GAIN_INDEX_MEDIA_LOOP2_OUT,
+	SST_GAIN_INDEX_RX_SPEECH_OUT,
+	SST_GAIN_INDEX_TX_SPEECH_IN,
+
+	SST_GAIN_INDEX_SPEECH_OUT,
+	SST_GAIN_INDEX_SPEECH_IN,
+	SST_GAIN_INDEX_HF_OUT,
+	SST_GAIN_INDEX_HF_SNS_OUT,
+	SST_GAIN_INDEX_TONE_IN,
+
+	SST_GAIN_INDEX_SIDETONE_IN,
+	SST_GAIN_INDEX_PROBE_OUT,
+	SST_GAIN_INDEX_PROBE_IN,
+	SST_GAIN_INDEX_PCM0_IN_LEFT,
+	SST_GAIN_INDEX_PCM0_IN_RIGHT,
+
+	SST_GAIN_INDEX_PCM1_OUT_LEFT,
+	SST_GAIN_INDEX_PCM1_OUT_RIGHT,
+	SST_GAIN_INDEX_PCM1_IN_LEFT,
+	SST_GAIN_INDEX_PCM1_IN_RIGHT,
+	SST_GAIN_INDEX_PCM2_OUT_LEFT,
+
+	SST_GAIN_INDEX_PCM2_OUT_RIGHT,
+	SST_GAIN_INDEX_VOIP_OUT,
+	SST_GAIN_INDEX_VOIP_IN,
+	SST_GAIN_INDEX_AWARE_OUT,
+	SST_GAIN_INDEX_VAD_OUT,
+
+	/* Gain IDs for FBA task start here */
+	SST_GAIN_INDEX_VOICE_UL,
+
+	/* Gain IDs for MMX task start here */
+	SST_GAIN_INDEX_MEDIA0_IN_LEFT,
+	SST_GAIN_INDEX_MEDIA0_IN_RIGHT,
+	SST_GAIN_INDEX_MEDIA1_IN_LEFT,
+	SST_GAIN_INDEX_MEDIA1_IN_RIGHT,
+
+	SST_GAIN_INDEX_MEDIA2_IN_LEFT,
+	SST_GAIN_INDEX_MEDIA2_IN_RIGHT,
+
+	SST_GAIN_INDEX_GAIN_END
+};
+
+/*
+ * Audio DSP module IDs specified by FW spec
+ * TODO: Update with all modules
+ */
+enum sst_module_id {
+	SST_MODULE_ID_GAIN_CELL		  = 0x0067,
+	SST_MODULE_ID_SPROT		  = 0x006D,
+	SST_MODULE_ID_NR		  = 0x0076,
+	SST_MODULE_ID_BWX		  = 0x0077,
+	SST_MODULE_ID_DRP		  = 0x0078,
+	SST_MODULE_ID_MDRP		  = 0x0079,
+
+	SST_MODULE_ID_ANA		  = 0x007A,
+	SST_MODULE_ID_AEC		  = 0x007B,
+	SST_MODULE_ID_NR_SNS		  = 0x007C,
+	SST_MODULE_ID_SER		  = 0x007D,
+	SST_MODULE_ID_AGC		  = 0x007E,
+
+	SST_MODULE_ID_CNI		  = 0x007F,
+	SST_MODULE_ID_CONTEXT_ALGO_AWARE  = 0x0080,
+	SST_MODULE_ID_FIR_24		  = 0x0081,
+	SST_MODULE_ID_IIR_24		  = 0x0082,
+	SST_MODULE_ID_FILT_DCR		  = 0x0082,
+
+	SST_MODULE_ID_ASRC		  = 0x0083,
+	SST_MODULE_ID_TONE_GEN		  = 0x0084,
+	SST_MODULE_ID_BMF		  = 0x0086,
+	SST_MODULE_ID_EDL		  = 0x0087,
+	SST_MODULE_ID_GLC		  = 0x0088,
+
+	SST_MODULE_ID_FIR_16		  = 0x0089,
+	SST_MODULE_ID_IIR_16		  = 0x008A,
+	SST_MODULE_ID_DNR		  = 0x008B,
+
+	SST_MODULE_ID_CNI_TX		  = 0x0090,
+	SST_MODULE_ID_REF_LINE		  = 0x0091,
+	SST_MODULE_ID_VOLUME		  = 0x0092,
+
+	SST_MODULE_ID_TASK		  = 0xFFFF
+};
+
+enum sst_cmd {
+	SBA_IDLE		= 14,
+	SBA_VB_SET_SPEECH_PATH	= 26,
+	MMX_SET_GAIN		= 33,
+	SBA_VB_SET_GAIN		= 33,
+	FBA_VB_RX_CNI		= 35,
+	MMX_SET_GAIN_TIMECONST	= 36,
+	SBA_VB_SET_TIMECONST	= 36,
+	FBA_VB_ANA		= 37,
+	FBA_VB_SET_FIR		= 38,
+	FBA_VB_SET_IIR		= 39,
+	FBA_VB_AEC		= 47,
+	FBA_VB_NR_UL		= 48,
+	FBA_VB_AGC		= 49,
+	FBA_VB_NR_DL		= 55,
+	SBA_PROBE		= 66,
+	MMX_PROBE		= 66,
+	FBA_VB_SET_BIQUAD_D_C	= 69,
+	FBA_VB_DUAL_BAND_COMP	= 70,
+	FBA_VB_SNS		= 72,
+	FBA_VB_SER		= 78,
+	FBA_VB_TX_CNI		= 80,
+	SBA_VB_START		= 85,
+	FBA_VB_SET_REF_LINE	= 94,
+	FBA_VB_SET_DELAY_LINE	= 95,
+	FBA_VB_BWX		= 104,
+	FBA_VB_GMM		= 105,
+	FBA_VB_GLC		= 107,
+	FBA_VB_BMF		= 111,
+	FBA_VB_DNR		= 113,
+	MMX_SET_SWM		= 114,
+	SBA_SET_SWM		= 114,
+	SBA_SET_MDRP            = 116,
+	SBA_HW_SET_SSP		= 117,
+	SBA_SET_MEDIA_LOOP_MAP	= 118,
+	SBA_SET_MEDIA_PATH	= 119,
+	MMX_SET_MEDIA_PATH	= 119,
+	SBA_VB_LPRO		= 126,
+	SBA_VB_SET_FIR          = 128,
+	SBA_VB_SET_IIR          = 129,
+	SBA_SET_SSP_SLOT_MAP	= 130,
+	AWARE_ENV_CLASS_PARAMS	= 130,
+};
+
+enum sst_dsp_switch {
+	SST_SWITCH_OFF = 0,
+	SST_SWITCH_ON = 3,
+};
+
+enum sst_path_switch {
+	SST_PATH_OFF = 0,
+	SST_PATH_ON = 1,
+};
+
+enum sst_swm_state {
+	SST_SWM_OFF = 0,
+	SST_SWM_ON = 3,
+};
+
+#define SST_FILL_LOCATION_IDS(dst, cell_idx, pipe_id)		do {	\
+		dst.location_id.p.cell_nbr_idx = (cell_idx);		\
+		dst.location_id.p.path_id = (pipe_id);			\
+	} while (0)
+#define SST_FILL_LOCATION_ID(dst, loc_id)				(\
+	dst.location_id.f = (loc_id))
+#define SST_FILL_MODULE_ID(dst, mod_id)					(\
+	dst.module_id = (mod_id))
+
+#define SST_FILL_DESTINATION1(dst, id)				do {	\
+		SST_FILL_LOCATION_ID(dst, (id) & 0xFFFF);		\
+		SST_FILL_MODULE_ID(dst, ((id) & 0xFFFF0000) >> 16);	\
+	} while (0)
+#define SST_FILL_DESTINATION2(dst, loc_id, mod_id)		do {	\
+		SST_FILL_LOCATION_ID(dst, loc_id);			\
+		SST_FILL_MODULE_ID(dst, mod_id);			\
+	} while (0)
+#define SST_FILL_DESTINATION3(dst, cell_idx, path_id, mod_id)	do {	\
+		SST_FILL_LOCATION_IDS(dst, cell_idx, path_id);		\
+		SST_FILL_MODULE_ID(dst, mod_id);			\
+	} while (0)
+
+#define SST_FILL_DESTINATION(level, dst, ...)				\
+	SST_FILL_DESTINATION##level(dst, __VA_ARGS__)
+#define SST_FILL_DEFAULT_DESTINATION(dst)				\
+	SST_FILL_DESTINATION(2, dst, SST_DEFAULT_LOCATION_ID, SST_DEFAULT_MODULE_ID)
+
+struct sst_destination_id {
+	union sst_location_id {
+		struct {
+			u8 cell_nbr_idx;	/* module index */
+			u8 path_id;		/* pipe_id */
+		} __packed	p;		/* part */
+		u16		f;		/* full */
+	} __packed location_id;
+	u16	   module_id;
+} __packed;
+
+struct sst_dsp_header {
+	struct sst_destination_id dst;
+	u16 command_id;
+	u16 length;
+} __packed;
+
+/*
+ *
+ * Common Commands
+ *
+ */
+struct sst_cmd_generic {
+	struct sst_dsp_header header;
+} __packed;
+
+struct swm_input_ids {
+	struct sst_destination_id input_id;
+} __packed;
+
+struct sst_cmd_set_swm {
+	struct sst_dsp_header header;
+	struct sst_destination_id output_id;
+	u16    switch_state;
+	u16    nb_inputs;
+	struct swm_input_ids input[SST_CMD_SWM_MAX_INPUTS];
+} __packed;
+
+struct sst_cmd_set_media_path {
+	struct sst_dsp_header header;
+	u16    switch_state;
+} __packed;
+
+struct sst_cmd_set_speech_path {
+	struct sst_dsp_header header;
+	u16    switch_state;
+	struct {
+		u16 rsvd:8;
+		u16 sample_length:2;
+		u16 rate:3;
+		u16 format:3;
+	} config;
+} __packed;
+
+struct gain_cell {
+	struct sst_destination_id dest;
+	s16 cell_gain_left;
+	s16 cell_gain_right;
+	u16 gain_time_constant;
+} __packed;
+
+#define NUM_GAIN_CELLS 1
+struct sst_cmd_set_gain_dual {
+	struct sst_dsp_header header;
+	u16    gain_cell_num;
+	struct gain_cell cell_gains[NUM_GAIN_CELLS];
+} __packed;
+
+struct sst_cmd_set_params {
+	struct sst_destination_id dst;
+	u16 command_id;
+	char params[0];
+} __packed;
+
+/*
+ *
+ * Media (MMX) commands
+ *
+ */
+
+/*
+ *
+ * SBA commands
+ *
+ */
+struct sst_cmd_sba_vb_start {
+	struct sst_dsp_header header;
+} __packed;
+
+union sba_media_loop_params {
+	struct {
+		u16 rsvd:8;
+		u16 sample_length:2;
+		u16 rate:3;
+		u16 format:3;
+	} part;
+	u16 full;
+} __packed;
+
+struct sst_cmd_sba_set_media_loop_map {
+	struct	sst_dsp_header header;
+	u16	switch_state;
+	union	sba_media_loop_params param;
+	u16	map;
+} __packed;
+
+enum sst_ssp_mode {
+	SSP_MODE_MASTER = 0,
+	SSP_MODE_SLAVE = 1,
+};
+
+enum sst_ssp_pcm_mode {
+	SSP_PCM_MODE_NORMAL = 0,
+	SSP_PCM_MODE_NETWORK = 1,
+};
+
+enum sst_ssp_duplex {
+	SSP_DUPLEX = 0,
+	SSP_RX = 1,
+	SSP_TX = 2,
+};
+
+enum sst_ssp_fs_frequency {
+	SSP_FS_8_KHZ = 0,
+	SSP_FS_16_KHZ = 1,
+	SSP_FS_44_1_KHZ = 2,
+	SSP_FS_48_KHZ = 3,
+};
+
+enum sst_ssp_fs_polarity {
+	SSP_FS_ACTIVE_LOW = 0,
+	SSP_FS_ACTIVE_HIGH = 1,
+};
+
+enum sst_ssp_protocol {
+	SSP_MODE_PCM = 0,
+	SSP_MODE_I2S = 1,
+};
+
+enum sst_ssp_port_id {
+	SSP_MODEM = 0,
+	SSP_BT = 1,
+	SSP_FM = 2,
+	SSP_CODEC = 3,
+};
+
+struct sst_cmd_sba_hw_set_ssp {
+	struct sst_dsp_header header;
+	u16 selection;			/* 0:SSP0(def), 1:SSP1, 2:SSP2 */
+
+	u16 switch_state;
+
+	u16 nb_bits_per_slots:6;        /* 0-32 bits, 24 (def) */
+	u16 nb_slots:4;			/* 0-8: slots per frame  */
+	u16 mode:3;			/* 0:Master, 1: Slave  */
+	u16 duplex:3;
+
+	u16 active_tx_slot_map:8;       /* Bit map, 0:off, 1:on */
+	u16 reserved1:8;
+
+	u16 active_rx_slot_map:8;       /* Bit map 0: Off, 1:On */
+	u16 reserved2:8;
+
+	u16 frame_sync_frequency;
+
+	u16 frame_sync_polarity:8;
+	u16 data_polarity:8;
+
+	u16 frame_sync_width;           /* 1 to N clocks */
+	u16 ssp_protocol:8;
+	u16 start_delay:8;		/* Start delay in terms of clock ticks */
+} __packed;
+
+#define SST_MAX_TDM_SLOTS 8
+
+struct sst_param_sba_ssp_slot_map {
+	struct sst_dsp_header header;
+
+	u16 param_id;
+	u16 param_len;
+	u16 ssp_index;
+
+	u8 rx_slot_map[SST_MAX_TDM_SLOTS];
+	u8 tx_slot_map[SST_MAX_TDM_SLOTS];
+} __packed;
+
+enum {
+	SST_PROBE_EXTRACTOR = 0,
+	SST_PROBE_INJECTOR = 1,
+};
+
+struct sst_cmd_probe {
+	struct sst_dsp_header header;
+
+	u16 switch_state;
+	struct sst_destination_id probe_dst;
+
+	u16 shared_mem:1;
+	u16 probe_in:1;
+	u16 probe_out:1;
+	u16 rsvd_1:13;
+
+	u16 rsvd_2:5;
+	u16 probe_mode:2;
+	u16 rsvd_3:1;
+	u16 sample_length:2;
+	u16 rate:3;
+	u16 format:3;
+
+	u16 sm_buf_id;
+
+	u16 gain[6];
+	u16 rsvd_4[9];
+} __packed;
+
+struct sst_probe_config {
+	const char *name;
+	u16 loc_id;
+	u16 mod_id;
+	u8 task_id;
+	struct pcm_cfg {
+		u8 s_length:2;
+		u8 rate:3;
+		u8 format:3;
+	} cfg;
+};
+
+int sst_mix_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int sst_mix_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+#endif
diff --git a/sound/soc/intel/platform-libs/controls_v2_dpcm.c b/sound/soc/intel/platform-libs/controls_v2_dpcm.c
new file mode 100644
index 0000000..311c043
--- /dev/null
+++ b/sound/soc/intel/platform-libs/controls_v2_dpcm.c
@@ -0,0 +1,1803 @@
+/*
+ *  controls_v2_dpcm.c - Intel MID Platform driver DPCM ALSA controls for Mrfld
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "../platform_ipc_v2.h"
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+#include "controls_v2.h"
+#include "sst_widgets.h"
+
+static inline void sst_fill_byte_control(char *param,
+					 u8 ipc_msg, u8 block,
+					 u8 task_id, u8 pipe_id,
+					 u16 len, void *cmd_data)
+{
+
+	struct snd_sst_bytes_v2 *byte_data = (struct snd_sst_bytes_v2 *)param;
+	byte_data->type = SST_CMD_BYTES_SET;
+	byte_data->ipc_msg = ipc_msg;
+	byte_data->block = block;
+	byte_data->task_id = task_id;
+	byte_data->pipe_id = pipe_id;
+
+	if (len > SST_MAX_BIN_BYTES - sizeof(*byte_data)) {
+		pr_err("%s: command length too big (%u)", __func__, len);
+		len = SST_MAX_BIN_BYTES - sizeof(*byte_data);
+		WARN_ON(1); /* this happens only if code is wrong */
+	}
+	byte_data->len = len;
+	memcpy(byte_data->bytes, cmd_data, len);
+	print_hex_dump_bytes("writing to lpe: ", DUMP_PREFIX_OFFSET,
+			     byte_data, len + sizeof(*byte_data));
+}
+
+static int sst_fill_and_send_cmd(struct sst_data *sst,
+				 u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id,
+				 void *cmd_data, u16 len)
+{
+	int ret = 0;
+
+	mutex_lock(&sst->lock);
+	sst_fill_byte_control(sst->byte_stream, ipc_msg, block, task_id, pipe_id,
+			      len, cmd_data);
+	ret = sst_dsp->ops->set_generic_params(SST_SET_BYTE_STREAM,
+					       sst->byte_stream);
+	mutex_unlock(&sst->lock);
+
+	return ret;
+}
+
+static int sst_probe_get(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct sst_probe_value *v = (void *)kcontrol->private_value;
+
+	ucontrol->value.enumerated.item[0] = v->val;
+	return 0;
+}
+
+static int sst_probe_put(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct sst_probe_value *v = (void *)kcontrol->private_value;
+	const struct soc_enum *e = v->p_enum;
+
+	if (ucontrol->value.enumerated.item[0] > e->max - 1)
+		return -EINVAL;
+	v->val = ucontrol->value.enumerated.item[0];
+	return 0;
+}
+
+int sst_probe_enum_info(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	struct sst_probe_value *v = (void *)kcontrol->private_value;
+	const struct soc_enum *e = v->p_enum;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = e->max;
+
+	if (uinfo->value.enumerated.item > e->max - 1)
+		uinfo->value.enumerated.item = e->max - 1;
+	strcpy(uinfo->value.enumerated.name,
+		e->texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+/*
+ * slot map value is a bitfield where each bit represents a FW channel
+ *
+ *			3 2 1 0		# 0 = codec0, 1 = codec1
+ *			RLRLRLRL	# 3, 4 = reserved
+ *
+ * e.g. slot 0 rx map =	00001100b -> data from slot 0 goes into codec_in1 L,R
+ */
+static u8 sst_ssp_slot_map[SST_MAX_TDM_SLOTS] = {
+	0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default rx map */
+};
+
+/*
+ * channel map value is a bitfield where each bit represents a slot
+ *
+ *			  76543210	# 0 = slot 0, 1 = slot 1
+ *
+ * e.g. codec1_0 tx map = 00000101b -> data from codec_out1_0 goes into slot 0, 2
+ */
+static u8 sst_ssp_channel_map[SST_MAX_TDM_SLOTS] = {
+	0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default tx map */
+};
+
+static int sst_slot_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	unsigned int ctl_no = e->reg;
+	unsigned int is_tx = e->reg2;
+	unsigned int val, mux;
+	u8 *map = is_tx ? sst_ssp_channel_map : sst_ssp_slot_map;
+
+	val = 1 << ctl_no;
+	/* search which slot/channel has this bit set - there should be only one */
+	for (mux = e->max; mux > 0;  mux--)
+		if (map[mux - 1] & val)
+			break;
+
+	ucontrol->value.enumerated.item[0] = mux;
+	pr_debug("%s: %s - %s map = %#x\n", __func__, is_tx ? "tx channel" : "rx slot",
+		 e->texts[mux], mux ? map[mux - 1] : -1);
+	return 0;
+}
+
+/*
+ * (de)interleaver controls are defined in opposite sense to be user-friendly
+ *
+ * Instead of the enum value being the value set to the register, it is the
+ * register address; and the kcontrol_no is the value written to the register.
+ *
+ * This means that whenever an enum is set, we need to clear the bit
+ * for that kcontrol_no for all the interleaver OR deinterleaver registers
+ */
+static int sst_slot_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	int i;
+	unsigned int ctl_no = e->reg;
+	unsigned int is_tx = e->reg2;
+	unsigned int slot_channel_no;
+	unsigned int val, mux;
+	u8 *map = is_tx ? sst_ssp_channel_map : sst_ssp_slot_map;
+
+	val = 1 << ctl_no;
+	mux = ucontrol->value.enumerated.item[0];
+	if (mux > e->max - 1)
+		return -EINVAL;
+
+	/* first clear all registers of this bit */
+	for (i = 0; i < e->max; i++)
+		map[i] &= ~val;
+
+	if (mux == 0) /* kctl set to 'none' */
+		return 0;
+
+	/* offset by one to take "None" into account */
+	slot_channel_no = mux - 1;
+	map[slot_channel_no] |= val;
+
+	pr_debug("%s: %s %s map = %#x\n", __func__, is_tx ? "tx channel" : "rx slot",
+		 e->texts[mux], map[slot_channel_no]);
+	return 0;
+}
+
+/* assumes a boolean mux */
+static inline bool get_mux_state(struct sst_data *sst, unsigned int reg, unsigned int shift)
+{
+	return (sst_reg_read(sst, reg, shift, 1) == 1);
+}
+
+static int sst_mux_get(struct snd_kcontrol *kcontrol,
+		       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct sst_data *sst = snd_soc_platform_get_drvdata(widget->platform);
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	unsigned int max = e->max - 1;
+
+	ucontrol->value.enumerated.item[0] = sst_reg_read(sst, e->reg, e->shift_l, max);
+	return 0;
+}
+
+static int sst_mux_put(struct snd_kcontrol *kcontrol,
+		       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+	struct sst_data *sst = snd_soc_platform_get_drvdata(widget->platform);
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	struct snd_soc_dapm_update update;
+	unsigned int max = e->max - 1;
+	unsigned int mask = (1 << fls(max)) - 1;
+	unsigned int mux, val;
+
+	if (ucontrol->value.enumerated.item[0] > e->max - 1)
+		return -EINVAL;
+
+	mux = ucontrol->value.enumerated.item[0];
+	val = sst_reg_write(sst, e->reg, e->shift_l, max, mux);
+
+	pr_debug("%s: reg[%d] = %#x\n", __func__, e->reg, val);
+
+	widget->value = val;
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = e->reg;
+	update.mask = mask;
+	update.val = val;
+
+	widget->dapm->update = &update;
+	snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e);
+	widget->dapm->update = NULL;
+	return 0;
+}
+
+static int sst_mode_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	unsigned int max = e->max - 1;
+
+	ucontrol->value.enumerated.item[0] = sst_reg_read(sst, e->reg, e->shift_l, max);
+	return 0;
+}
+
+static int sst_mode_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct soc_enum *e = (void *)kcontrol->private_value;
+	unsigned int max = e->max - 1;
+	unsigned int val;
+
+	if (ucontrol->value.enumerated.item[0] > e->max - 1)
+		return -EINVAL;
+
+	val = sst_reg_write(sst, e->reg, e->shift_l, max, ucontrol->value.enumerated.item[0]);
+	pr_debug("%s: reg[%d] - %#x\n", __func__, e->reg, val);
+	return 0;
+}
+
+static void sst_send_algo_cmd(struct sst_data *sst,
+			      struct sst_algo_control *bc)
+{
+	int len;
+	struct sst_cmd_set_params *cmd;
+
+	if (bc->params == NULL)
+		return;
+
+	len = sizeof(cmd->dst) + sizeof(cmd->command_id) + bc->max;
+
+	cmd = kzalloc(len + bc->max, GFP_KERNEL);
+	if (cmd == NULL) {
+		pr_err("Failed to send cmd, kzalloc failed\n");
+		return;
+	}
+
+	SST_FILL_DESTINATION(2, cmd->dst, bc->pipe_id, bc->module_id);
+	cmd->command_id = bc->cmd_id;
+	memcpy(cmd->params, bc->params, bc->max);
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED,
+				bc->task_id, 0, cmd, len);
+	kfree(cmd);
+
+}
+
+static void sst_find_and_send_pipe_algo(struct snd_soc_platform *platform,
+					struct snd_soc_dapm_widget *w)
+{
+	struct sst_algo_control *bc;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_ids *ids = w->priv;
+	struct module *algo = NULL;
+
+	pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+
+	list_for_each_entry(algo, &ids->algo_list, node) {
+			bc = (void *)algo->kctl->private_value;
+
+			pr_debug("Found algo control name =%s pipe=%s\n", algo->kctl->id.name,  w->name);
+			sst_send_algo_cmd(sst, bc);
+	}
+}
+
+int sst_algo_bytes_ctl_info(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_info *uinfo)
+{
+	struct sst_algo_control *bc = (void *)kcontrol->private_value;
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = bc->max;
+
+	if (bc->params == NULL) {
+		bc->params = devm_kzalloc(platform->dev, bc->max, GFP_KERNEL);
+		if (bc->params == NULL) {
+			pr_err("kzalloc failed\n");
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static int sst_algo_control_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct sst_algo_control *bc = (void *)kcontrol->private_value;
+
+	pr_debug("in %s\n", __func__);
+	switch (bc->type) {
+	case SST_ALGO_PARAMS:
+		if (bc->params)
+			memcpy(ucontrol->value.bytes.data, bc->params, bc->max);
+		break;
+	case SST_ALGO_BYPASS:
+		ucontrol->value.integer.value[0] = bc->bypass ? 1 : 0;
+		pr_debug("%s: bypass  %d\n", __func__, bc->bypass);
+		break;
+	default:
+		pr_err("Invalid Input- algo type:%d\n", bc->type);
+		return -EINVAL;
+
+	}
+	return 0;
+}
+
+static int sst_algo_control_set(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_algo_control *bc = (void *)kcontrol->private_value;
+
+	pr_debug("in %s control_name=%s\n", __func__, kcontrol->id.name);
+	switch (bc->type) {
+	case SST_ALGO_PARAMS:
+		if (bc->params)
+			memcpy(bc->params, ucontrol->value.bytes.data, bc->max);
+		break;
+	case SST_ALGO_BYPASS:
+		bc->bypass = !!ucontrol->value.integer.value[0];
+		pr_debug("%s: Mute %d\n", __func__, bc->bypass);
+		break;
+	default:
+		pr_err("Invalid Input- algo type:%ld\n", ucontrol->value.integer.value[0]);
+		return -EINVAL;
+	}
+	/*if pipe is enabled, need to send the algo params from here */
+	if (bc->w && bc->w->power)
+		sst_send_algo_cmd(sst, bc);
+
+	return 0;
+}
+
+static int sst_gain_ctl_info(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = mc->stereo ? 2 : 1;
+	uinfo->value.integer.min = mc->min;
+	uinfo->value.integer.max = mc->max;
+	return 0;
+}
+
+static void sst_send_gain_cmd(struct sst_data *sst, struct sst_gain_value *gv,
+			      u16 task_id, u16 loc_id, u16 module_id, int mute)
+{
+	struct sst_cmd_set_gain_dual cmd;
+	pr_debug("%s", __func__);
+
+	cmd.header.command_id = MMX_SET_GAIN;
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	cmd.gain_cell_num = 1;
+
+	if (mute || gv->mute) {
+		cmd.cell_gains[0].cell_gain_left = SST_GAIN_MIN_VALUE;
+		cmd.cell_gains[0].cell_gain_right = SST_GAIN_MIN_VALUE;
+	} else {
+		cmd.cell_gains[0].cell_gain_left = gv->l_gain;
+		cmd.cell_gains[0].cell_gain_right = gv->r_gain;
+	}
+	SST_FILL_DESTINATION(2, cmd.cell_gains[0].dest,
+			     loc_id, module_id);
+	cmd.cell_gains[0].gain_time_constant = gv->ramp_duration;
+
+	cmd.header.length = sizeof(struct sst_cmd_set_gain_dual)
+				- sizeof(struct sst_dsp_header);
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED,
+			      task_id, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+}
+
+static int sst_gain_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+	struct sst_gain_value *gv = mc->gain_val;
+
+	switch (mc->type) {
+	case SST_GAIN_TLV:
+		ucontrol->value.integer.value[0] = gv->l_gain;
+		ucontrol->value.integer.value[1] = gv->r_gain;
+		pr_debug("%s: Volume %d, %d\n", __func__, gv->l_gain, gv->r_gain);
+		break;
+	case SST_GAIN_MUTE:
+		ucontrol->value.integer.value[0] = gv->mute ? 1 : 0;
+		pr_debug("%s: Mute %d\n", __func__, gv->mute);
+		break;
+	case SST_GAIN_RAMP_DURATION:
+		ucontrol->value.integer.value[0] = gv->ramp_duration;
+		pr_debug("%s: RampDuration %d\n", __func__, gv->ramp_duration);
+		break;
+	default:
+		pr_err("Invalid Input- gain type:%d\n", mc->type);
+		return -EINVAL;
+	};
+	return 0;
+}
+
+static int sst_gain_put(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_platform *platform = snd_kcontrol_chip(kcontrol);
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value;
+	struct sst_gain_value *gv = mc->gain_val;
+
+	switch (mc->type) {
+	case SST_GAIN_TLV:
+		gv->l_gain = ucontrol->value.integer.value[0];
+		gv->r_gain = ucontrol->value.integer.value[1];
+		pr_debug("%s: Volume %d, %d\n", __func__, gv->l_gain, gv->r_gain);
+		break;
+	case SST_GAIN_MUTE:
+		gv->mute = !!ucontrol->value.integer.value[0];
+		pr_debug("%s: Mute %d\n", __func__, gv->mute);
+		break;
+	case SST_GAIN_RAMP_DURATION:
+		gv->ramp_duration = ucontrol->value.integer.value[0];
+		pr_debug("%s: RampDuration %d\n", __func__, gv->ramp_duration);
+		break;
+	default:
+		pr_err("Invalid Input- gain type:%d\n", mc->type);
+		return -EINVAL;
+	};
+
+	if (mc->w && mc->w->power)
+		sst_send_gain_cmd(sst, gv, mc->task_id,
+				mc->pipe_id | mc->instance_id, mc->module_id, 0);
+	return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(sst_gain_tlv_common, SST_GAIN_MIN_VALUE * 10, 10, 0);
+
+/* Look up table to convert MIXER SW bit regs to SWM inputs */
+static const uint swm_mixer_input_ids[SST_SWM_INPUT_COUNT] = {
+	[SST_IP_MODEM]		= SST_SWM_IN_MODEM,
+	[SST_IP_BT]		= SST_SWM_IN_BT,
+	[SST_IP_CODEC0]		= SST_SWM_IN_CODEC0,
+	[SST_IP_CODEC1]		= SST_SWM_IN_CODEC1,
+	[SST_IP_LOOP0]		= SST_SWM_IN_SPROT_LOOP,
+	[SST_IP_LOOP1]		= SST_SWM_IN_MEDIA_LOOP1,
+	[SST_IP_LOOP2]		= SST_SWM_IN_MEDIA_LOOP2,
+	[SST_IP_SIDETONE]	= SST_SWM_IN_SIDETONE,
+	[SST_IP_TXSPEECH]	= SST_SWM_IN_TXSPEECH,
+	[SST_IP_SPEECH]		= SST_SWM_IN_SPEECH,
+	[SST_IP_TONE]		= SST_SWM_IN_TONE,
+	[SST_IP_VOIP]		= SST_SWM_IN_VOIP,
+	[SST_IP_PCM0]		= SST_SWM_IN_PCM0,
+	[SST_IP_PCM1]		= SST_SWM_IN_PCM1,
+	[SST_IP_LOW_PCM0]	= SST_SWM_IN_LOW_PCM0,
+	[SST_IP_FM]		= SST_SWM_IN_FM,
+	[SST_IP_MEDIA0]		= SST_SWM_IN_MEDIA0,
+	[SST_IP_MEDIA1]		= SST_SWM_IN_MEDIA1,
+	[SST_IP_MEDIA2]		= SST_SWM_IN_MEDIA2,
+	[SST_IP_MEDIA3]		= SST_SWM_IN_MEDIA3,
+};
+
+static int fill_swm_input(struct swm_input_ids *swm_input, unsigned int reg)
+{
+	uint i, is_set, nb_inputs = 0;
+	u16 input_loc_id;
+
+	pr_debug("%s:reg value:%#x\n", __func__, reg);
+	for (i = 0; i < SST_SWM_INPUT_COUNT; i++) {
+		is_set = reg & BIT(i);
+		if (!is_set)
+			continue;
+
+		input_loc_id = swm_mixer_input_ids[i];
+		SST_FILL_DESTINATION(2, swm_input->input_id,
+				     input_loc_id, SST_DEFAULT_MODULE_ID);
+		nb_inputs++;
+		swm_input++;
+		pr_debug("input id:%#x, nb_inputs:%d\n", input_loc_id, nb_inputs);
+
+		if (nb_inputs == SST_CMD_SWM_MAX_INPUTS) {
+			pr_warn("%s: SET_SWM cmd max inputs reached", __func__);
+			break;
+		}
+	}
+	return nb_inputs;
+}
+
+static void sst_set_pipe_gain(struct sst_ids *ids, struct sst_data *sst, int mute)
+{
+	struct sst_gain_mixer_control *mc;
+	struct sst_gain_value *gv;
+	struct module *gain = NULL;
+
+	list_for_each_entry(gain, &ids->gain_list, node) {
+		struct snd_kcontrol *kctl = gain->kctl;
+
+		pr_debug("control name=%s", kctl->id.name);
+		mc = (void *)kctl->private_value;
+		gv = mc->gain_val;
+
+		sst_send_gain_cmd(sst, gv, mc->task_id,
+				mc->pipe_id | mc->instance_id, mc->module_id, mute);
+	}
+}
+
+static int sst_swm_mixer_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_set_swm cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+	bool set_mixer = false;
+	int val = sst->widget[ids->reg];
+
+	pr_debug("%s: widget=%s\n", __func__, w->name);
+	pr_debug("%s: reg[%d] = %#x\n", __func__, ids->reg, val);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+	case SND_SOC_DAPM_POST_PMD:
+		set_mixer = true;
+		break;
+	case SND_SOC_DAPM_POST_REG:
+		if (w->power)
+			set_mixer = true;
+		break;
+	default:
+		set_mixer = false;
+	}
+
+	if (set_mixer == false)
+		return 0;
+
+	if (SND_SOC_DAPM_EVENT_ON(event) ||
+	    event == SND_SOC_DAPM_POST_REG)
+		cmd.switch_state = SST_SWM_ON;
+	else
+		cmd.switch_state = SST_SWM_OFF;
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	/* MMX_SET_SWM == SBA_SET_SWM */
+	cmd.header.command_id = SBA_SET_SWM;
+
+	SST_FILL_DESTINATION(2, cmd.output_id,
+			     ids->location_id, SST_DEFAULT_MODULE_ID);
+	cmd.nb_inputs =	fill_swm_input(&cmd.input[0], val);
+	cmd.header.length = offsetof(struct sst_cmd_set_swm, input) - sizeof(struct sst_dsp_header)
+				+ (cmd.nb_inputs * sizeof(cmd.input[0]));
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+			      ids->task_id, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+	return 0;
+}
+
+/* SBA mixers - 16 inputs */
+#define SST_SBA_DECLARE_MIX_CONTROLS(kctl_name, mixer_reg)			\
+	static const struct snd_kcontrol_new kctl_name[] = {			\
+		SOC_SINGLE_EXT("modem_in", mixer_reg, SST_IP_MODEM, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("bt_in", mixer_reg, SST_IP_BT, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("codec_in0", mixer_reg, SST_IP_CODEC0, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("codec_in1", mixer_reg, SST_IP_CODEC1, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("sprot_loop_in", mixer_reg, SST_IP_LOOP0, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media_loop1_in", mixer_reg, SST_IP_LOOP1, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media_loop2_in", mixer_reg, SST_IP_LOOP2, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("sidetone_in", mixer_reg, SST_IP_SIDETONE, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("txspeech_in", mixer_reg, SST_IP_TXSPEECH, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("speech_in", mixer_reg, SST_IP_SPEECH, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("tone_in", mixer_reg, SST_IP_TONE, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("voip_in", mixer_reg, SST_IP_VOIP, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("pcm0_in", mixer_reg, SST_IP_PCM0, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("pcm1_in", mixer_reg, SST_IP_PCM1, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("low_pcm0_in", mixer_reg, SST_IP_LOW_PCM0, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("fm_in", mixer_reg, SST_IP_FM, 1, 0,		\
+				sst_mix_get, sst_mix_put),			\
+	}
+
+#define SST_SBA_MIXER_GRAPH_MAP(mix_name)			\
+	{ mix_name, "modem_in",		"modem_in" },		\
+	{ mix_name, "bt_in",		"bt_in" },		\
+	{ mix_name, "codec_in0",	"codec_in0" },		\
+	{ mix_name, "codec_in1",	"codec_in1" },		\
+	{ mix_name, "sprot_loop_in",	"sprot_loop_in" },	\
+	{ mix_name, "media_loop1_in",	"media_loop1_in" },	\
+	{ mix_name, "media_loop2_in",	"media_loop2_in" },	\
+	{ mix_name, "sidetone_in",	"sidetone_in" },	\
+	{ mix_name, "txspeech_in",	"txspeech_in" },	\
+	{ mix_name, "speech_in",	"speech_in" },		\
+	{ mix_name, "tone_in",		"tone_in" },		\
+	{ mix_name, "voip_in",		"voip_in" },		\
+	{ mix_name, "pcm0_in",		"pcm0_in" },		\
+	{ mix_name, "pcm1_in",		"pcm1_in" },		\
+	{ mix_name, "low_pcm0_in",	"low_pcm0_in" },	\
+	{ mix_name, "fm_in",		"fm_in" }
+
+#define SST_MMX_DECLARE_MIX_CONTROLS(kctl_name, mixer_reg)			\
+	static const struct snd_kcontrol_new kctl_name[] = {			\
+		SOC_SINGLE_EXT("media0_in", mixer_reg, SST_IP_MEDIA0, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media1_in", mixer_reg, SST_IP_MEDIA1, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media2_in", mixer_reg, SST_IP_MEDIA2, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+		SOC_SINGLE_EXT("media3_in", mixer_reg, SST_IP_MEDIA3, 1, 0,	\
+				sst_mix_get, sst_mix_put),			\
+	}
+
+SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media0_controls, SST_MIX_MEDIA0);
+SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media1_controls, SST_MIX_MEDIA1);
+
+/* 18 SBA mixers */
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm0_controls, SST_MIX_PCM0);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm1_controls, SST_MIX_PCM1);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm2_controls, SST_MIX_PCM2);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_sprot_l0_controls, SST_MIX_LOOP0);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l1_controls, SST_MIX_LOOP1);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l2_controls, SST_MIX_LOOP2);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_voip_controls, SST_MIX_VOIP);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_aware_controls, SST_MIX_AWARE);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_vad_controls, SST_MIX_VAD);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_hf_sns_controls, SST_MIX_HF_SNS);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_hf_controls, SST_MIX_HF);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_speech_controls, SST_MIX_SPEECH);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_rxspeech_controls, SST_MIX_RXSPEECH);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec0_controls, SST_MIX_CODEC0);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec1_controls, SST_MIX_CODEC1);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_bt_controls, SST_MIX_BT);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_fm_controls, SST_MIX_FM);
+SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_modem_controls, SST_MIX_MODEM);
+
+static int sst_vb_trigger_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_generic cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+
+	pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		cmd.header.command_id = SBA_VB_START;
+	else
+		cmd.header.command_id = SBA_IDLE;
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	cmd.header.length = 0;
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		sst_dsp->ops->power(true);
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+			      SST_TASK_SBA, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+
+	if (!SND_SOC_DAPM_EVENT_ON(event))
+		sst_dsp->ops->power(false);
+	return 0;
+}
+
+static void sst_send_slot_map(struct sst_data *sst)
+{
+	struct sst_param_sba_ssp_slot_map cmd;
+
+	pr_debug("Enter: %s", __func__);
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	cmd.header.command_id = SBA_SET_SSP_SLOT_MAP;
+	cmd.header.length = sizeof(struct sst_param_sba_ssp_slot_map)
+				- sizeof(struct sst_dsp_header);
+
+	cmd.param_id = SBA_SET_SSP_SLOT_MAP;
+	cmd.param_len = sizeof(cmd.rx_slot_map) + sizeof(cmd.tx_slot_map) + sizeof(cmd.ssp_index);
+	cmd.ssp_index = SSP_CODEC;
+
+	memcpy(cmd.rx_slot_map, &sst_ssp_slot_map[0], sizeof(cmd.rx_slot_map));
+	memcpy(cmd.tx_slot_map, &sst_ssp_channel_map[0], sizeof(cmd.tx_slot_map));
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED,
+			      SST_TASK_SBA, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+}
+
+static int sst_ssp_event(struct snd_soc_dapm_widget *w,
+			 struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_sba_hw_set_ssp cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+	static int ssp_active[SST_NUM_SSPS];
+	unsigned int domain, mux;
+	unsigned int ssp_no = ids->ssp->ssp_number;
+	int domain_shift, mux_shift;
+	const struct sst_ssp_config *config;
+
+	pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+	cmd.header.command_id = SBA_HW_SET_SSP;
+	cmd.header.length = sizeof(struct sst_cmd_sba_hw_set_ssp)
+				- sizeof(struct sst_dsp_header);
+	mux_shift = *ids->ssp->mux_shift;
+	mux = (mux_shift == -1) ? 0 : get_mux_state(sst, SST_MUX_REG, mux_shift);
+	domain_shift = (*ids->ssp->domain_shift)[mux];
+	domain = (domain_shift == -1) ? 0 : get_mux_state(sst, SST_MUX_REG, domain_shift);
+
+	config = &(*ids->ssp->ssp_config)[mux][domain];
+	pr_debug("%s: ssp_id: %u, mux: %d, domain: %d\n", __func__,
+		 config->ssp_id, mux, domain);
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		ssp_active[ssp_no]++;
+	else
+		ssp_active[ssp_no]--;
+
+	pr_debug("%s: ssp_no: %u ssp_active: %d", __func__, ssp_no, ssp_active[ssp_no]);
+	if (ssp_active[ssp_no])
+		cmd.switch_state = SST_SWITCH_ON;
+	else
+		cmd.switch_state = SST_SWITCH_OFF;
+
+	cmd.selection = config->ssp_id;
+	cmd.nb_bits_per_slots = config->bits_per_slot;
+	cmd.nb_slots = config->slots;
+	cmd.mode = config->ssp_mode | (config->pcm_mode << 1);
+	cmd.duplex = config->duplex;
+	cmd.active_tx_slot_map = config->active_slot_map;
+	cmd.active_rx_slot_map = config->active_slot_map;
+	cmd.frame_sync_frequency = config->fs_frequency;
+	cmd.frame_sync_polarity = SSP_FS_ACTIVE_HIGH;
+	cmd.data_polarity = 1;
+	cmd.frame_sync_width = config->fs_width;
+	cmd.ssp_protocol = config->ssp_protocol;
+	cmd.start_delay = config->start_delay;
+	cmd.reserved1 = cmd.reserved2 = 0xFF;
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+			      SST_TASK_SBA, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		sst_find_and_send_pipe_algo(w->platform, w);
+		sst_send_slot_map(sst);
+		sst_set_pipe_gain(ids, sst, 0);
+	}
+	return 0;
+}
+
+static int sst_set_speech_path(struct snd_soc_dapm_widget *w,
+			       struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_set_speech_path cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+	bool is_wideband;
+	static int speech_active;
+
+	pr_debug("%s: widget=%s\n", __func__, w->name);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		speech_active++;
+		cmd.switch_state = SST_SWITCH_ON;
+	} else {
+		speech_active--;
+		cmd.switch_state = SST_SWITCH_OFF;
+	}
+
+	SST_FILL_DEFAULT_DESTINATION(cmd.header.dst);
+
+	cmd.header.command_id = SBA_VB_SET_SPEECH_PATH;
+	cmd.header.length = sizeof(struct sst_cmd_set_speech_path)
+				- sizeof(struct sst_dsp_header);
+	cmd.config.sample_length = 0;
+	cmd.config.rate = 0;		/* 8 khz */
+	cmd.config.format = 0;
+
+	is_wideband = get_mux_state(sst, SST_MUX_REG, SST_VOICE_MODE_SHIFT);
+	if (is_wideband)
+		cmd.config.rate = 1;	/* 16 khz */
+
+	if ((SND_SOC_DAPM_EVENT_ON(event) && (speech_active == 1)) ||
+			(SND_SOC_DAPM_EVENT_OFF(event) && (speech_active == 0)))
+		sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+				SST_TASK_SBA, 0, &cmd,
+				sizeof(cmd.header) + cmd.header.length);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		sst_find_and_send_pipe_algo(w->platform, w);
+		sst_set_pipe_gain(ids, sst, 0);
+	}
+
+	return 0;
+
+}
+
+static int sst_set_media_path(struct snd_soc_dapm_widget *w,
+			      struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_set_media_path cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+
+	pr_debug("%s: widget=%s\n", __func__, w->name);
+	pr_debug("%s: task=%u, location=%#x\n", __func__,
+				ids->task_id, ids->location_id);
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		cmd.switch_state = SST_PATH_ON;
+	else
+		cmd.switch_state = SST_PATH_OFF;
+
+	SST_FILL_DESTINATION(2, cmd.header.dst,
+			     ids->location_id, SST_DEFAULT_MODULE_ID);
+
+	/* MMX_SET_MEDIA_PATH == SBA_SET_MEDIA_PATH */
+	cmd.header.command_id = MMX_SET_MEDIA_PATH;
+	cmd.header.length = sizeof(struct sst_cmd_set_media_path)
+				- sizeof(struct sst_dsp_header);
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+			      ids->task_id, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		sst_find_and_send_pipe_algo(w->platform, w);
+		sst_set_pipe_gain(ids, sst, 0);
+	}
+
+	return 0;
+}
+
+static int sst_set_media_loop(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *k, int event)
+{
+	struct sst_cmd_sba_set_media_loop_map cmd;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(w->platform);
+	struct sst_ids *ids = w->priv;
+
+	pr_debug("Enter:%s, widget=%s\n", __func__, w->name);
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		cmd.switch_state = SST_SWITCH_ON;
+	else
+		cmd.switch_state = SST_SWITCH_OFF;
+
+	SST_FILL_DESTINATION(2, cmd.header.dst,
+			     ids->location_id, SST_DEFAULT_MODULE_ID);
+
+	cmd.header.command_id = SBA_SET_MEDIA_LOOP_MAP;
+	cmd.header.length = sizeof(struct sst_cmd_sba_set_media_loop_map)
+				 - sizeof(struct sst_dsp_header);
+	cmd.param.part.rate = 2; /* 48khz */
+
+	cmd.param.part.format = ids->format; /* stereo/Mono */
+	cmd.param.part.sample_length = 1; /* 24bit left justified*/
+	cmd.map = 0; /* Algo sequence: Gain - DRP - FIR - IIR  */
+
+	sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+			      SST_TASK_SBA, 0, &cmd,
+			      sizeof(cmd.header) + cmd.header.length);
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		sst_find_and_send_pipe_algo(w->platform, w);
+		sst_set_pipe_gain(ids, sst, 0);
+	}
+	return 0;
+}
+
+static int sst_send_probe_cmd(struct sst_data *sst, u16 probe_pipe_id,
+			      int mode, int switch_state,
+			      const struct sst_probe_config *probe_cfg)
+{
+	struct sst_cmd_probe cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	SST_FILL_DESTINATION(3, cmd.header.dst, SST_DEFAULT_CELL_NBR,
+			     probe_pipe_id, SST_DEFAULT_MODULE_ID);
+	cmd.header.command_id = SBA_PROBE;
+	cmd.header.length = sizeof(struct sst_cmd_probe)
+				 - sizeof(struct sst_dsp_header);
+	cmd.switch_state = switch_state;
+
+	SST_FILL_DESTINATION(2, cmd.probe_dst,
+			     probe_cfg->loc_id, probe_cfg->mod_id);
+
+	cmd.shared_mem = 1;
+	cmd.probe_in = 0;
+	cmd.probe_out = 0;
+
+	cmd.probe_mode = mode;
+	cmd.sample_length = probe_cfg->cfg.s_length;
+	cmd.rate = probe_cfg->cfg.rate;
+	cmd.format = probe_cfg->cfg.format;
+	cmd.sm_buf_id = 1;
+
+	return sst_fill_and_send_cmd(sst, SST_IPC_IA_CMD, SST_FLAG_BLOCKED,
+				     probe_cfg->task_id, 0, &cmd,
+				     sizeof(cmd.header) + cmd.header.length);
+}
+
+static const struct snd_kcontrol_new sst_probe_controls[];
+static const struct sst_probe_config sst_probes[];
+
+#define SST_MAX_PROBE_STREAMS 8
+int sst_dpcm_probe_send(struct snd_soc_platform *platform, u16 probe_pipe_id,
+			int substream, int direction, bool on)
+{
+	int switch_state = on ? SST_SWITCH_ON : SST_SWITCH_OFF;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	const struct sst_probe_config *probe_cfg;
+	struct sst_probe_value *probe_val;
+	char *type;
+	int offset;
+	int mode;
+
+	if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+		mode = SST_PROBE_EXTRACTOR;
+		offset = 0;
+		type = "extractor";
+	} else {
+		mode = SST_PROBE_INJECTOR;
+		offset = SST_MAX_PROBE_STREAMS;
+		type = "injector";
+	}
+	/* get the value of the probe connection kcontrol */
+	probe_val = (void *)sst_probe_controls[substream + offset].private_value;
+	probe_cfg = &sst_probes[probe_val->val];
+
+	pr_debug("%s: substream=%d, direction=%d\n", __func__, substream, direction);
+	pr_debug("%s: %s probe point at %s\n", __func__, type, probe_cfg->name);
+
+	return sst_send_probe_cmd(sst, probe_pipe_id, mode, switch_state, probe_cfg);
+}
+
+static const struct snd_kcontrol_new sst_mix_sw_aware =
+	SOC_SINGLE_EXT("switch", SST_MIX_SWITCH, 0, 1, 0,
+		sst_mix_get, sst_mix_put);
+
+static const char * const sst_bt_fm_texts[] = {
+	"fm", "bt",
+};
+
+static const struct snd_kcontrol_new sst_bt_fm_mux =
+	SST_SSP_MUX_CTL("ssp1_out", 0, SST_MUX_REG, SST_BT_FM_MUX_SHIFT, sst_bt_fm_texts,
+			sst_mux_get, sst_mux_put);
+
+#define SST_SSP_CODEC_MUX		0
+#define SST_SSP_CODEC_DOMAIN		0
+#define SST_SSP_MODEM_MUX		0
+#define SST_SSP_MODEM_DOMAIN		0
+#define SST_SSP_FM_MUX			0
+#define SST_SSP_FM_DOMAIN		0
+#define SST_SSP_BT_MUX			1
+#define SST_SSP_BT_NB_DOMAIN		0
+#define SST_SSP_BT_WB_DOMAIN		1
+
+static const int sst_ssp_mux_shift[SST_NUM_SSPS] = {
+	[SST_SSP0] = -1,			/* no register shift, i.e. single mux value */
+	[SST_SSP1] = SST_BT_FM_MUX_SHIFT,
+	[SST_SSP2] = -1,
+};
+
+static const int sst_ssp_domain_shift[SST_NUM_SSPS][SST_MAX_SSP_MUX] = {
+	[SST_SSP0][0] = -1,			/* no domain shift, i.e. single domain */
+	[SST_SSP1] = {
+		[SST_SSP_FM_MUX] = -1,
+		[SST_SSP_BT_MUX] = SST_BT_MODE_SHIFT,
+	},
+	[SST_SSP2][0] = -1,
+};
+
+static const struct sst_ssp_config
+sst_ssp_configs[SST_NUM_SSPS][SST_MAX_SSP_MUX][SST_MAX_SSP_DOMAINS] = {
+	[SST_SSP0] = {
+		[SST_SSP_MODEM_MUX] = {
+			[SST_SSP_MODEM_DOMAIN] = {
+				.ssp_id = SSP_MODEM,
+				.bits_per_slot = 16,
+				.slots = 1,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NETWORK,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_PCM,
+				.fs_width = 1,
+				.fs_frequency = SSP_FS_48_KHZ,
+				.active_slot_map = 0x1,
+				.start_delay = 1,
+			},
+		},
+	},
+	[SST_SSP1] = {
+		[SST_SSP_FM_MUX] = {
+			[SST_SSP_FM_DOMAIN] = {
+				.ssp_id = SSP_FM,
+				.bits_per_slot = 16,
+				.slots = 2,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NORMAL,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_I2S,
+				.fs_width = 32,
+				.fs_frequency = SSP_FS_48_KHZ,
+				.active_slot_map = 0x3,
+				.start_delay = 0,
+			},
+		},
+		[SST_SSP_BT_MUX] = {
+			[SST_SSP_BT_NB_DOMAIN] = {
+				.ssp_id = SSP_BT,
+				.bits_per_slot = 16,
+				.slots = 1,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NORMAL,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_PCM,
+				.fs_width = 1,
+				.fs_frequency = SSP_FS_8_KHZ,
+				.active_slot_map = 0x1,
+				.start_delay = 1,
+			},
+			[SST_SSP_BT_WB_DOMAIN] = {
+				.ssp_id = SSP_BT,
+				.bits_per_slot = 16,
+				.slots = 1,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NORMAL,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_PCM,
+				.fs_width = 1,
+				.fs_frequency = SSP_FS_16_KHZ,
+				.active_slot_map = 0x1,
+				.start_delay = 1,
+			},
+		},
+	},
+	[SST_SSP2] = {
+		[SST_SSP_CODEC_MUX] = {
+			[SST_SSP_CODEC_DOMAIN] = {
+				.ssp_id = SSP_CODEC,
+				.bits_per_slot = 24,
+				.slots = 4,
+				.ssp_mode = SSP_MODE_MASTER,
+				.pcm_mode = SSP_PCM_MODE_NETWORK,
+				.duplex = SSP_DUPLEX,
+				.ssp_protocol = SSP_MODE_PCM,
+				.fs_width = 1,
+				.fs_frequency = SSP_FS_48_KHZ,
+				.active_slot_map = 0xF,
+				.start_delay = 0,
+			},
+		},
+	},
+};
+
+#define SST_SSP_CFG(wssp_no)								\
+	(const struct sst_ssp_cfg){ .ssp_config = &sst_ssp_configs[wssp_no],		\
+				    .ssp_number = wssp_no,				\
+				    .mux_shift = &sst_ssp_mux_shift[wssp_no],		\
+				    .domain_shift = &sst_ssp_domain_shift[wssp_no], }
+
+static const struct snd_soc_dapm_widget sst_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("tone"),
+	SND_SOC_DAPM_OUTPUT("aware"),
+	SND_SOC_DAPM_OUTPUT("vad"),
+	SST_SSP_INPUT("modem_in",  sst_ssp_event, SST_SSP_CFG(SST_SSP0)),
+	SST_SSP_AIF_IN("codec_in0", sst_ssp_event, SST_SSP_CFG(SST_SSP2)),
+	SST_SSP_AIF_IN("codec_in1", sst_ssp_event, SST_SSP_CFG(SST_SSP2)),
+	SST_SSP_INPUT("bt_fm_in", sst_ssp_event, SST_SSP_CFG(SST_SSP1)),
+	SST_SSP_OUTPUT("modem_out", sst_ssp_event, SST_SSP_CFG(SST_SSP0)),
+	SST_SSP_AIF_OUT("codec_out0", sst_ssp_event, SST_SSP_CFG(SST_SSP2)),
+	SST_SSP_AIF_OUT("codec_out1", sst_ssp_event, SST_SSP_CFG(SST_SSP2)),
+	SST_SSP_OUTPUT("bt_fm_out", sst_ssp_event, SST_SSP_CFG(SST_SSP1)),
+
+	/* Media Paths */
+	/* MediaX IN paths are set via ALLOC, so no SET_MEDIA_PATH command */
+	SST_PATH_INPUT("media0_in", SST_TASK_MMX, SST_SWM_IN_MEDIA0, NULL),
+	SST_PATH_INPUT("media1_in", SST_TASK_MMX, SST_SWM_IN_MEDIA1, NULL),
+	SST_PATH_INPUT("media2_in", SST_TASK_MMX, SST_SWM_IN_MEDIA2, sst_set_media_path),
+	SST_PATH_INPUT("media3_in", SST_TASK_MMX, SST_SWM_IN_MEDIA3, NULL),
+	SST_PATH_OUTPUT("media0_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA0, sst_set_media_path),
+	SST_PATH_OUTPUT("media1_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA1, sst_set_media_path),
+
+	/* SBA PCM Paths */
+	SST_PATH_INPUT("pcm0_in", SST_TASK_SBA, SST_SWM_IN_PCM0, sst_set_media_path),
+	SST_PATH_INPUT("pcm1_in", SST_TASK_SBA, SST_SWM_IN_PCM1, sst_set_media_path),
+	SST_PATH_OUTPUT("pcm0_out", SST_TASK_SBA, SST_SWM_OUT_PCM0, sst_set_media_path),
+	SST_PATH_OUTPUT("pcm1_out", SST_TASK_SBA, SST_SWM_OUT_PCM1, sst_set_media_path),
+	SST_PATH_OUTPUT("pcm2_out", SST_TASK_SBA, SST_SWM_OUT_PCM2, sst_set_media_path),
+	/* TODO: check if this needs SET_MEDIA_PATH command*/
+	SST_PATH_INPUT("low_pcm0_in", SST_TASK_SBA, SST_SWM_IN_LOW_PCM0, NULL),
+
+	SST_PATH_INPUT("voip_in", SST_TASK_SBA, SST_SWM_IN_VOIP, sst_set_media_path),
+	SST_PATH_OUTPUT("voip_out", SST_TASK_SBA, SST_SWM_OUT_VOIP, sst_set_media_path),
+	SST_PATH_OUTPUT("aware_out", SST_TASK_SBA, SST_SWM_OUT_AWARE, sst_set_media_path),
+	SST_PATH_OUTPUT("vad_out", SST_TASK_SBA, SST_SWM_OUT_VAD, sst_set_media_path),
+
+	/* SBA Loops */
+	SST_PATH_INPUT("sprot_loop_in", SST_TASK_SBA, SST_SWM_IN_SPROT_LOOP, NULL),
+	SST_PATH_INPUT("media_loop1_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP1, NULL),
+	SST_PATH_INPUT("media_loop2_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP2, NULL),
+	SST_PATH_MEDIA_LOOP_OUTPUT("sprot_loop_out", SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP, SST_FMT_MONO, sst_set_media_loop),
+	SST_PATH_MEDIA_LOOP_OUTPUT("media_loop1_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1, SST_FMT_MONO, sst_set_media_loop),
+	SST_PATH_MEDIA_LOOP_OUTPUT("media_loop2_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2, SST_FMT_STEREO, sst_set_media_loop),
+
+	/* TODO: need to send command */
+	SST_PATH_INPUT("sidetone_in", SST_TASK_SBA, SST_SWM_IN_SIDETONE, NULL),
+	SST_PATH_INPUT("tone_in", SST_TASK_SBA, SST_SWM_IN_TONE, NULL),
+	SST_PATH_INPUT("bt_in", SST_TASK_SBA, SST_SWM_IN_BT, NULL),
+	SST_PATH_INPUT("fm_in", SST_TASK_SBA, SST_SWM_IN_FM, NULL),
+	SST_PATH_OUTPUT("bt_out", SST_TASK_SBA, SST_SWM_OUT_BT, NULL),
+	SST_PATH_OUTPUT("fm_out", SST_TASK_SBA, SST_SWM_OUT_FM, NULL),
+
+	/* SBA Voice Paths */
+	SST_PATH_INPUT("speech_in", SST_TASK_SBA, SST_SWM_IN_SPEECH, sst_set_speech_path),
+	SST_PATH_INPUT("txspeech_in", SST_TASK_SBA, SST_SWM_IN_TXSPEECH, sst_set_speech_path),
+	SST_PATH_OUTPUT("hf_sns_out", SST_TASK_SBA, SST_SWM_OUT_HF_SNS, sst_set_speech_path),
+	SST_PATH_OUTPUT("hf_out", SST_TASK_SBA, SST_SWM_OUT_HF, sst_set_speech_path),
+	SST_PATH_OUTPUT("speech_out", SST_TASK_SBA, SST_SWM_OUT_SPEECH, sst_set_speech_path),
+	SST_PATH_OUTPUT("rxspeech_out", SST_TASK_SBA, SST_SWM_OUT_RXSPEECH, sst_set_speech_path),
+
+	/* Media Mixers */
+	SST_SWM_MIXER("media0_out mix 0", SST_MIX_MEDIA0, SST_TASK_MMX, SST_SWM_OUT_MEDIA0,
+		      sst_mix_media0_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("media1_out mix 0", SST_MIX_MEDIA1, SST_TASK_MMX, SST_SWM_OUT_MEDIA1,
+		      sst_mix_media1_controls, sst_swm_mixer_event),
+
+	/* SBA PCM mixers */
+	SST_SWM_MIXER("pcm0_out mix 0", SST_MIX_PCM0, SST_TASK_SBA, SST_SWM_OUT_PCM0,
+		      sst_mix_pcm0_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("pcm1_out mix 0", SST_MIX_PCM1, SST_TASK_SBA, SST_SWM_OUT_PCM1,
+		      sst_mix_pcm1_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("pcm2_out mix 0", SST_MIX_PCM2, SST_TASK_SBA, SST_SWM_OUT_PCM2,
+		      sst_mix_pcm2_controls, sst_swm_mixer_event),
+
+	/* SBA Loop mixers */
+	SST_SWM_MIXER("sprot_loop_out mix 0", SST_MIX_LOOP0, SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP,
+		      sst_mix_sprot_l0_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("media_loop1_out mix 0", SST_MIX_LOOP1, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1,
+		      sst_mix_media_l1_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("media_loop2_out mix 0", SST_MIX_LOOP2, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2,
+		      sst_mix_media_l2_controls, sst_swm_mixer_event),
+
+	SST_SWM_MIXER("voip_out mix 0", SST_MIX_VOIP, SST_TASK_SBA, SST_SWM_OUT_VOIP,
+		      sst_mix_voip_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("aware_out mix 0", SST_MIX_AWARE, SST_TASK_SBA, SST_SWM_OUT_AWARE,
+		      sst_mix_aware_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("vad_out mix 0", SST_MIX_VAD, SST_TASK_SBA, SST_SWM_OUT_VAD,
+		      sst_mix_vad_controls, sst_swm_mixer_event),
+
+	/* SBA Voice mixers */
+	SST_SWM_MIXER("hf_sns_out mix 0", SST_MIX_HF_SNS, SST_TASK_SBA, SST_SWM_OUT_HF_SNS,
+		      sst_mix_hf_sns_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("hf_out mix 0", SST_MIX_HF, SST_TASK_SBA, SST_SWM_OUT_HF,
+		      sst_mix_hf_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("speech_out mix 0", SST_MIX_SPEECH, SST_TASK_SBA, SST_SWM_OUT_SPEECH,
+		      sst_mix_speech_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("rxspeech_out mix 0", SST_MIX_RXSPEECH, SST_TASK_SBA, SST_SWM_OUT_RXSPEECH,
+		      sst_mix_rxspeech_controls, sst_swm_mixer_event),
+
+	/* SBA Backend mixers */
+	SST_SWM_MIXER("codec_out0 mix 0", SST_MIX_CODEC0, SST_TASK_SBA, SST_SWM_OUT_CODEC0,
+		      sst_mix_codec0_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("codec_out1 mix 0", SST_MIX_CODEC1, SST_TASK_SBA, SST_SWM_OUT_CODEC1,
+		      sst_mix_codec1_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("bt_out mix 0", SST_MIX_BT, SST_TASK_SBA, SST_SWM_OUT_BT,
+		      sst_mix_bt_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("fm_out mix 0", SST_MIX_FM, SST_TASK_SBA, SST_SWM_OUT_FM,
+		      sst_mix_fm_controls, sst_swm_mixer_event),
+	SST_SWM_MIXER("modem_out mix 0", SST_MIX_MODEM, SST_TASK_SBA, SST_SWM_OUT_MODEM,
+		      sst_mix_modem_controls, sst_swm_mixer_event),
+
+	SND_SOC_DAPM_SWITCH("aware_out aware 0", SND_SOC_NOPM, 0, 0, &sst_mix_sw_aware),
+	SND_SOC_DAPM_MUX("ssp1_out mux 0", SND_SOC_NOPM, 0, 0, &sst_bt_fm_mux),
+
+	SND_SOC_DAPM_SUPPLY("VBTimer", SND_SOC_NOPM, 0, 0,
+			    sst_vb_trigger_event,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	{"media0_in", NULL, "Compress Playback"},
+	{"media1_in", NULL, "Headset Playback"},
+	{"media2_in", NULL, "pcm0_out"},
+	{"media3_in", NULL, "Deepbuffer Playback"},
+
+	{"media0_out mix 0", "media0_in", "media0_in"},
+	{"media0_out mix 0", "media1_in", "media1_in"},
+	{"media0_out mix 0", "media2_in", "media2_in"},
+	{"media0_out mix 0", "media3_in", "media3_in"},
+	{"media1_out mix 0", "media0_in", "media0_in"},
+	{"media1_out mix 0", "media1_in", "media1_in"},
+	{"media1_out mix 0", "media2_in", "media2_in"},
+	{"media1_out mix 0", "media3_in", "media3_in"},
+
+	{"media0_out", NULL, "media0_out mix 0"},
+	{"media1_out", NULL, "media1_out mix 0"},
+	{"pcm0_in", NULL, "media0_out"},
+	{"pcm1_in", NULL, "media1_out"},
+
+	{"Headset Capture", NULL, "pcm1_out"},
+	{"Headset Capture", NULL, "pcm2_out"},
+	{"pcm0_out", NULL, "pcm0_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("pcm0_out mix 0"),
+	{"pcm1_out", NULL, "pcm1_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("pcm1_out mix 0"),
+	{"pcm2_out", NULL, "pcm2_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("pcm2_out mix 0"),
+
+	{"media_loop1_in", NULL, "media_loop1_out"},
+	{"media_loop1_out", NULL, "media_loop1_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("media_loop1_out mix 0"),
+	{"media_loop2_in", NULL, "media_loop2_out"},
+	{"media_loop2_out", NULL, "media_loop2_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("media_loop2_out mix 0"),
+	{"sprot_loop_in", NULL, "sprot_loop_out"},
+	{"sprot_loop_out", NULL, "sprot_loop_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("sprot_loop_out mix 0"),
+
+	{"voip_in", NULL, "VOIP Playback"},
+	{"VOIP Capture", NULL, "voip_out"},
+	{"voip_out", NULL, "voip_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("voip_out mix 0"),
+
+	{"aware", NULL, "aware_out"},
+	{"aware_out", NULL, "aware_out aware 0"},
+	{"aware_out aware 0", "switch", "aware_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("aware_out mix 0"),
+	{"vad", NULL, "vad_out"},
+	{"vad_out", NULL, "vad_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("vad_out mix 0"),
+
+	{"codec_out0", NULL, "codec_out0 mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("codec_out0 mix 0"),
+	{"codec_out1", NULL, "codec_out1 mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("codec_out1 mix 0"),
+	{"modem_out", NULL, "modem_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("modem_out mix 0"),
+
+	{"bt_fm_out", NULL, "ssp1_out mux 0"},
+	{"ssp1_out mux 0", "bt", "bt_out"},
+	{"ssp1_out mux 0", "fm", "fm_out"},
+	{"bt_out", NULL, "bt_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("bt_out mix 0"),
+	{"fm_out", NULL, "fm_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("fm_out mix 0"),
+	{"bt_in", NULL, "bt_fm_in"},
+	{"fm_in", NULL, "bt_fm_in"},
+
+	/* Uplink processing */
+	{"txspeech_in", NULL, "hf_sns_out"},
+	{"txspeech_in", NULL, "hf_out"},
+	{"txspeech_in", NULL, "speech_out"},
+
+	{"hf_sns_out", NULL, "hf_sns_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("hf_sns_out mix 0"),
+	{"hf_out", NULL, "hf_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("hf_out mix 0"),
+	{"speech_out", NULL, "speech_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("speech_out mix 0"),
+
+	/* Downlink processing */
+	{"speech_in", NULL, "rxspeech_out"},
+	{"rxspeech_out", NULL, "rxspeech_out mix 0"},
+	SST_SBA_MIXER_GRAPH_MAP("rxspeech_out mix 0"),
+
+	/* TODO: add Tone inputs */
+	/* TODO: add Low Latency stream support */
+
+	{"Headset Capture", NULL, "VBTimer"},
+	{"Headset Playback", NULL, "VBTimer"},
+	{"Deepbuffer Playback", NULL, "VBTimer"},
+	{"Compress Playback", NULL, "VBTimer"},
+	{"VOIP Playback", NULL, "VBTimer"},
+	{"aware", NULL, "VBTimer"},
+	{"modem_in", NULL, "VBTimer"},
+	{"modem_out", NULL, "VBTimer"},
+	{"bt_fm_in", NULL, "VBTimer"},
+	{"bt_fm_out", NULL, "VBTimer"},
+};
+
+static const char * const sst_nb_wb_texts[] = {
+	"narrowband", "wideband",
+};
+
+static const struct snd_kcontrol_new sst_mux_controls[] = {
+	SST_SSP_MUX_CTL("domain voice mode", 0, SST_MUX_REG, SST_VOICE_MODE_SHIFT, sst_nb_wb_texts,
+			sst_mode_get, sst_mode_put),
+	SST_SSP_MUX_CTL("domain bt mode", 0, SST_MUX_REG, SST_BT_MODE_SHIFT, sst_nb_wb_texts,
+			sst_mode_get, sst_mode_put),
+};
+
+static const char * const slot_names[] = {
+	"none",
+	"slot 0", "slot 1", "slot 2", "slot 3",
+	"slot 4", "slot 5", "slot 6", "slot 7", /* not supported by FW */
+};
+
+static const char * const channel_names[] = {
+	"none",
+	"codec_out0_0", "codec_out0_1", "codec_out1_0", "codec_out1_1",
+	"codec_out2_0", "codec_out2_1", "codec_out3_0", "codec_out3_1", /* not supported by FW */
+};
+
+#define SST_INTERLEAVER(xpname, slot_name, slotno) \
+	SST_SSP_SLOT_CTL(xpname, "interleaver", slot_name, slotno, 1, \
+			 channel_names, sst_slot_get, sst_slot_put)
+
+#define SST_DEINTERLEAVER(xpname, channel_name, channel_no) \
+	SST_SSP_SLOT_CTL(xpname, "deinterleaver", channel_name, channel_no, 0, \
+			 slot_names, sst_slot_get, sst_slot_put)
+
+static const struct snd_kcontrol_new sst_slot_controls[] = {
+	SST_INTERLEAVER("codec_out", "slot 0", 0),
+	SST_INTERLEAVER("codec_out", "slot 1", 1),
+	SST_INTERLEAVER("codec_out", "slot 2", 2),
+	SST_INTERLEAVER("codec_out", "slot 3", 3),
+	SST_DEINTERLEAVER("codec_in", "codec_in0_0", 0),
+	SST_DEINTERLEAVER("codec_in", "codec_in0_1", 1),
+	SST_DEINTERLEAVER("codec_in", "codec_in1_0", 2),
+	SST_DEINTERLEAVER("codec_in", "codec_in1_1", 3),
+};
+
+#define SST_NUM_PROBE_CONNECTION_PTS 31
+static const struct sst_probe_config sst_probes[SST_NUM_PROBE_CONNECTION_PTS] = {
+	/* TODO: get this struct from FW config data */
+	/* TODO: only gain outputs supported currently */
+	{ "media0_in gain", SST_PATH_INDEX_MEDIA0_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media1_in gain", SST_PATH_INDEX_MEDIA1_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media2_in gain", SST_PATH_INDEX_MEDIA2_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "media3_in gain", SST_PATH_INDEX_MEDIA3_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_MMX, { 1, 2, 1 } },
+	{ "pcm0_in gain", SST_PATH_INDEX_PCM0_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "pcm1_in gain", SST_PATH_INDEX_PCM1_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "pcm1_out gain", SST_PATH_INDEX_PCM1_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "pcm2_out gain", SST_PATH_INDEX_PCM2_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "voip_in gain", SST_PATH_INDEX_VOIP_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "voip_out gain", SST_PATH_INDEX_VOIP_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "aware_out gain", SST_PATH_INDEX_AWARE_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "vad_out gain", SST_PATH_INDEX_VAD_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "hf_sns_out gain", SST_PATH_INDEX_HF_SNS_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "hf_out gain", SST_PATH_INDEX_HF_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "speech_out gain", SST_PATH_INDEX_SPEECH_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "txspeech_in gain", SST_PATH_INDEX_TX_SPEECH_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "rxspeech_out gain", SST_PATH_INDEX_RX_SPEECH_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "speech_in gain", SST_PATH_INDEX_SPEECH_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "media_loop1_out gain", SST_PATH_INDEX_MEDIA_LOOP1_OUT , SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "media_loop2_out gain", SST_PATH_INDEX_MEDIA_LOOP2_OUT , SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "tone_in gain", SST_PATH_INDEX_TONE_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_out0 gain", SST_PATH_INDEX_CODEC_OUT0, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_out1 gain", SST_PATH_INDEX_CODEC_OUT1, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "bt_out gain", SST_PATH_INDEX_BT_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "fm_out gain", SST_PATH_INDEX_FM_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "modem_out gain", SST_PATH_INDEX_MODEM_OUT, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_in0 gain", SST_PATH_INDEX_CODEC_IN0, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "codec_in1 gain", SST_PATH_INDEX_CODEC_IN1, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "bt_in gain", SST_PATH_INDEX_BT_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "fm_in gain", SST_PATH_INDEX_FM_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+	{ "modem_in gain", SST_PATH_INDEX_MODEM_IN, SST_MODULE_ID_GAIN_CELL, SST_TASK_SBA, { 1, 2, 1 } },
+};
+
+/* initialized based on names in sst_probes array */
+static const char *sst_probe_enum_texts[SST_NUM_PROBE_CONNECTION_PTS];
+static const SOC_ENUM_SINGLE_EXT_DECL(sst_probe_enum, sst_probe_enum_texts);
+
+#define SST_PROBE_CTL(name, num)						\
+	SST_PROBE_ENUM(SST_PROBE_CTL_NAME(name, num, "connection"),		\
+		       sst_probe_enum, sst_probe_get, sst_probe_put)
+	/* TODO: implement probe gains
+	SOC_SINGLE_EXT_TLV(SST_PROBE_CTL_NAME(name, num, "gains"), xreg, xshift,
+		xmax, xinv, xget, xput, sst_gain_tlv_common)
+	*/
+
+static const struct snd_kcontrol_new sst_probe_controls[] = {
+	SST_PROBE_CTL("probe out", 0),
+	SST_PROBE_CTL("probe out", 1),
+	SST_PROBE_CTL("probe out", 2),
+	SST_PROBE_CTL("probe out", 3),
+	SST_PROBE_CTL("probe out", 4),
+	SST_PROBE_CTL("probe out", 5),
+	SST_PROBE_CTL("probe out", 6),
+	SST_PROBE_CTL("probe out", 7),
+	SST_PROBE_CTL("probe in", 0),
+	SST_PROBE_CTL("probe in", 1),
+	SST_PROBE_CTL("probe in", 2),
+	SST_PROBE_CTL("probe in", 3),
+	SST_PROBE_CTL("probe in", 4),
+	SST_PROBE_CTL("probe in", 5),
+	SST_PROBE_CTL("probe in", 6),
+	SST_PROBE_CTL("probe in", 7),
+};
+
+/* Gain helper with min/max set */
+#define SST_GAIN(name, path_id, task_id, instance, gain_var)				\
+	SST_GAIN_KCONTROLS(name, "gain", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE,	\
+		SST_GAIN_TC_MIN, SST_GAIN_TC_MAX,					\
+		sst_gain_get, sst_gain_put,						\
+		SST_MODULE_ID_GAIN_CELL, path_id, instance, task_id,			\
+		sst_gain_tlv_common, gain_var)
+
+#define SST_VOLUME(name, path_id, task_id, instance, gain_var)				\
+	SST_GAIN_KCONTROLS(name, "volume", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE,	\
+		SST_GAIN_TC_MIN, SST_GAIN_TC_MAX,					\
+		sst_gain_get, sst_gain_put,						\
+		SST_MODULE_ID_VOLUME, path_id, instance, task_id,			\
+		sst_gain_tlv_common, gain_var)
+
+#define SST_NUM_GAINS 36
+static struct sst_gain_value sst_gains[SST_NUM_GAINS];
+
+static const struct snd_kcontrol_new sst_gain_controls[] = {
+	SST_GAIN("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[0]),
+	SST_GAIN("media1_in", SST_PATH_INDEX_MEDIA1_IN, SST_TASK_MMX, 0, &sst_gains[1]),
+	SST_GAIN("media2_in", SST_PATH_INDEX_MEDIA2_IN, SST_TASK_MMX, 0, &sst_gains[2]),
+	SST_GAIN("media3_in", SST_PATH_INDEX_MEDIA3_IN, SST_TASK_MMX, 0, &sst_gains[3]),
+
+	SST_GAIN("pcm0_in", SST_PATH_INDEX_PCM0_IN, SST_TASK_SBA, 0, &sst_gains[4]),
+	SST_GAIN("pcm1_in", SST_PATH_INDEX_PCM1_IN, SST_TASK_SBA, 0, &sst_gains[5]),
+	SST_GAIN("low_pcm0_in", SST_PATH_INDEX_LOW_PCM0_IN, SST_TASK_SBA, 0, &sst_gains[6]),
+	SST_GAIN("pcm1_out", SST_PATH_INDEX_PCM1_OUT, SST_TASK_SBA, 0, &sst_gains[7]),
+	SST_GAIN("pcm2_out", SST_PATH_INDEX_PCM1_OUT, SST_TASK_SBA, 0, &sst_gains[8]),
+
+	SST_GAIN("voip_in", SST_PATH_INDEX_VOIP_IN, SST_TASK_SBA, 0, &sst_gains[9]),
+	SST_GAIN("voip_out", SST_PATH_INDEX_VOIP_OUT, SST_TASK_SBA, 0, &sst_gains[10]),
+	SST_GAIN("tone_in", SST_PATH_INDEX_TONE_IN, SST_TASK_SBA, 0, &sst_gains[11]),
+
+	SST_GAIN("aware_out", SST_PATH_INDEX_AWARE_OUT, SST_TASK_SBA, 0, &sst_gains[12]),
+	SST_GAIN("vad_out", SST_PATH_INDEX_VAD_OUT, SST_TASK_SBA, 0, &sst_gains[13]),
+
+	SST_GAIN("hf_sns_out", SST_PATH_INDEX_HF_SNS_OUT, SST_TASK_SBA, 0, &sst_gains[14]),
+	SST_GAIN("hf_out", SST_PATH_INDEX_HF_OUT, SST_TASK_SBA, 0, &sst_gains[15]),
+	SST_GAIN("speech_out", SST_PATH_INDEX_SPEECH_OUT, SST_TASK_SBA, 0, &sst_gains[16]),
+	SST_GAIN("txspeech_in", SST_PATH_INDEX_TX_SPEECH_IN, SST_TASK_SBA, 0, &sst_gains[17]),
+	SST_GAIN("rxspeech_out", SST_PATH_INDEX_RX_SPEECH_OUT, SST_TASK_SBA, 0, &sst_gains[18]),
+	SST_GAIN("speech_in", SST_PATH_INDEX_SPEECH_IN, SST_TASK_SBA, 0, &sst_gains[19]),
+
+	SST_GAIN("codec_in0", SST_PATH_INDEX_CODEC_IN0, SST_TASK_SBA, 0, &sst_gains[20]),
+	SST_GAIN("codec_in1", SST_PATH_INDEX_CODEC_IN1, SST_TASK_SBA, 0, &sst_gains[21]),
+	SST_GAIN("codec_out0", SST_PATH_INDEX_CODEC_OUT0, SST_TASK_SBA, 0, &sst_gains[22]),
+	SST_GAIN("codec_out1", SST_PATH_INDEX_CODEC_OUT1, SST_TASK_SBA, 0, &sst_gains[23]),
+	SST_GAIN("bt_out", SST_PATH_INDEX_BT_OUT, SST_TASK_SBA, 0, &sst_gains[24]),
+	SST_GAIN("fm_out", SST_PATH_INDEX_FM_OUT, SST_TASK_SBA, 0, &sst_gains[25]),
+	SST_GAIN("bt_in", SST_PATH_INDEX_BT_IN, SST_TASK_SBA, 0, &sst_gains[26]),
+	SST_GAIN("fm_in", SST_PATH_INDEX_FM_IN, SST_TASK_SBA, 0, &sst_gains[27]),
+	SST_GAIN("modem_in", SST_PATH_INDEX_MODEM_IN, SST_TASK_SBA, 0, &sst_gains[28]),
+	SST_GAIN("modem_out", SST_PATH_INDEX_MODEM_OUT, SST_TASK_SBA, 0, &sst_gains[29]),
+	SST_GAIN("media_loop1_out", SST_PATH_INDEX_MEDIA_LOOP1_OUT, SST_TASK_SBA, 0, &sst_gains[30]),
+	SST_GAIN("media_loop2_out", SST_PATH_INDEX_MEDIA_LOOP2_OUT, SST_TASK_SBA, 0, &sst_gains[31]),
+	SST_GAIN("sprot_loop_out", SST_PATH_INDEX_SPROT_LOOP_OUT, SST_TASK_SBA, 0, &sst_gains[32]),
+	SST_VOLUME("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[33]),
+	SST_GAIN("sidetone_in", SST_PATH_INDEX_SIDETONE_IN, SST_TASK_SBA, 0, &sst_gains[34]),
+	SST_GAIN("speech_out", SST_PATH_INDEX_SPEECH_OUT, SST_TASK_FBA_UL, 1, &sst_gains[35]),
+};
+
+static const struct snd_kcontrol_new sst_algo_controls[] = {
+	SST_ALGO_KCONTROL_BYTES("media_loop1_out", "fir", 138, SST_MODULE_ID_FIR_24,
+		 SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+	SST_ALGO_KCONTROL_BYTES("media_loop1_out", "iir", 300, SST_MODULE_ID_IIR_24,
+		SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("media_loop1_out", "mdrp", 76, SST_MODULE_ID_MDRP,
+		SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP),
+	SST_ALGO_KCONTROL_BYTES("media_loop2_out", "fir", 272, SST_MODULE_ID_FIR_24,
+		SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+	SST_ALGO_KCONTROL_BYTES("media_loop2_out", "iir", 300, SST_MODULE_ID_IIR_24,
+		SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("media_loop2_out", "mdrp", 76, SST_MODULE_ID_MDRP,
+		SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP),
+	SST_ALGO_KCONTROL_BYTES("aware_out", "fir", 272, SST_MODULE_ID_FIR_24,
+		SST_PATH_INDEX_AWARE_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+	SST_ALGO_KCONTROL_BYTES("aware_out", "iir", 300, SST_MODULE_ID_IIR_24,
+		SST_PATH_INDEX_AWARE_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("aware_out", "aware", 48, SST_MODULE_ID_CONTEXT_ALGO_AWARE,
+		SST_PATH_INDEX_AWARE_OUT, 0, SST_TASK_AWARE, AWARE_ENV_CLASS_PARAMS),
+	SST_ALGO_KCONTROL_BYTES("vad_out", "fir", 272, SST_MODULE_ID_FIR_24,
+		SST_PATH_INDEX_VAD_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+	SST_ALGO_KCONTROL_BYTES("vad_out", "iir", 300, SST_MODULE_ID_IIR_24,
+		SST_PATH_INDEX_VAD_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("sprot_loop_out", "lpro", 192, SST_MODULE_ID_SPROT,
+		SST_PATH_INDEX_SPROT_LOOP_OUT, 0, SST_TASK_SBA, SBA_VB_LPRO),
+	SST_ALGO_KCONTROL_BYTES("codec_in0", "dcr", 300, SST_MODULE_ID_FILT_DCR,
+		SST_PATH_INDEX_CODEC_IN0, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	SST_ALGO_KCONTROL_BYTES("codec_in1", "dcr", 300, SST_MODULE_ID_FILT_DCR,
+		SST_PATH_INDEX_CODEC_IN1, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+	/* Uplink */
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "fir_speech", 136, SST_MODULE_ID_FIR_16,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_FIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "fir_hf_sns", 136, SST_MODULE_ID_FIR_16,
+		SST_PATH_INDEX_HF_SNS_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_FIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "iir_speech", 48, SST_MODULE_ID_IIR_16,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_IIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "iir_hf_sns", 48, SST_MODULE_ID_IIR_16,
+		SST_PATH_INDEX_HF_SNS_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_IIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "aec", 640, SST_MODULE_ID_AEC,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_AEC),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "nr", 38, SST_MODULE_ID_NR,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_NR_UL),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "agc", 58, SST_MODULE_ID_AGC,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_AGC),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "biquad", 22, SST_MODULE_ID_DRP,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_BIQUAD_D_C),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "compr", 36, SST_MODULE_ID_DRP,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_DUAL_BAND_COMP),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "sns", 324, SST_MODULE_ID_NR_SNS,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SNS),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "ser", 42, SST_MODULE_ID_SER,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SER),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "cni", 48, SST_MODULE_ID_CNI_TX,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_TX_CNI),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "ref", 24, SST_MODULE_ID_REF_LINE,
+		SST_PATH_INDEX_HF_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_REF_LINE),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "delay", 6, SST_MODULE_ID_EDL,
+		SST_PATH_INDEX_HF_OUT, 0, SST_TASK_FBA_UL, FBA_VB_SET_DELAY_LINE),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "bmf", 264, SST_MODULE_ID_BMF,
+		SST_PATH_INDEX_HF_SNS_OUT, 0, SST_TASK_FBA_UL, FBA_VB_BMF),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_out", "ul_module", "dnr", 18, SST_MODULE_ID_DNR,
+		SST_PATH_INDEX_SPEECH_OUT, 0, SST_TASK_FBA_UL, FBA_VB_DNR),
+	/* Downlink */
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "ana", 52, SST_MODULE_ID_ANA,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_ANA),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "fir", 136, SST_MODULE_ID_FIR_16,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_SET_FIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "iir", 48, SST_MODULE_ID_IIR_16,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_SET_IIR),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "nr", 38, SST_MODULE_ID_NR,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_NR_DL),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "biquad", 22, SST_MODULE_ID_DRP,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_SET_BIQUAD_D_C),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "compr", 36, SST_MODULE_ID_DRP,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_DUAL_BAND_COMP),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "cni", 48, SST_MODULE_ID_CNI,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_RX_CNI),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "bwx", 54, SST_MODULE_ID_BWX,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_BWX),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "gmm", 586, SST_MODULE_ID_BWX,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_GMM),
+	SST_COMBO_ALGO_KCONTROL_BYTES("speech_in", "dl_module", "glc", 18, SST_MODULE_ID_GLC,
+		SST_PATH_INDEX_SPEECH_IN, 0, SST_TASK_FBA_DL, FBA_VB_GLC),
+};
+
+static const struct snd_kcontrol_new sst_debug_controls[] = {
+	SND_SOC_BYTES_EXT("sst debug byte control", SST_MAX_BIN_BYTES,
+		       sst_byte_control_get, sst_byte_control_set),
+};
+
+static inline bool is_sst_dapm_widget(struct snd_soc_dapm_widget *w)
+{
+	if ((w->id == snd_soc_dapm_pga) ||
+	    (w->id == snd_soc_dapm_aif_in) ||
+	    (w->id == snd_soc_dapm_aif_out) ||
+	    (w->id == snd_soc_dapm_input) ||
+	    (w->id == snd_soc_dapm_output) ||
+	    (w->id == snd_soc_dapm_mixer))
+		return true;
+	else
+		return false;
+}
+
+int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
+{
+	struct snd_soc_platform *platform = dai->platform;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+	struct snd_soc_dapm_widget *w;
+	struct snd_soc_dapm_path *p = NULL;
+
+	pr_debug("%s: enter, dai-name=%s dir=%d\n", __func__, dai->name, stream);
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		pr_debug("Stream name=%s\n", dai->playback_widget->name);
+		w = dai->playback_widget;
+		list_for_each_entry(p, &w->sinks, list_source) {
+			if (p->connected && !p->connected(w, p->sink))
+				continue;
+
+			if (p->connect && p->sink->power && is_sst_dapm_widget(p->sink)) {
+				struct sst_ids *ids = p->sink->priv;
+
+				pr_debug("send gains for widget=%s\n", p->sink->name);
+				sst_set_pipe_gain(ids, sst, mute);
+			}
+		}
+	} else {
+		pr_debug("Stream name=%s\n", dai->capture_widget->name);
+		w = dai->capture_widget;
+		list_for_each_entry(p, &w->sources, list_sink) {
+			if (p->connected && !p->connected(w, p->sink))
+				continue;
+
+			if (p->connect &&  p->source->power && is_sst_dapm_widget(p->source)) {
+				struct sst_ids *ids = p->source->priv;
+
+				pr_debug("send gain for widget=%s\n", p->source->name);
+				sst_set_pipe_gain(ids, sst, mute);
+			}
+		}
+	}
+	return 0;
+}
+
+static int sst_fill_module_list(struct snd_kcontrol *kctl,
+	 struct snd_soc_dapm_widget *w, int type)
+{
+	struct module *module = NULL;
+	struct sst_ids *ids = w->priv;
+
+	module = devm_kzalloc(w->platform->dev, sizeof(*module), GFP_KERNEL);
+	if (!module) {
+		pr_err("kzalloc block failed\n");
+		return -ENOMEM;
+	}
+
+	if (type == SST_MODULE_GAIN) {
+		struct sst_gain_mixer_control *mc = (void *)kctl->private_value;
+
+		mc->w = w;
+		module->kctl = kctl;
+		list_add_tail(&module->node, &ids->gain_list);
+	} else if (type == SST_MODULE_ALGO) {
+		struct sst_algo_control *bc = (void *)kctl->private_value;
+
+		bc->w = w;
+		module->kctl = kctl;
+		list_add_tail(&module->node, &ids->algo_list);
+	}
+
+	return 0;
+}
+
+static int sst_fill_widget_module_info(struct snd_soc_dapm_widget *w,
+	struct snd_soc_platform *platform)
+{
+	struct snd_kcontrol *kctl;
+	int index, ret = 0;
+	struct snd_card *card = platform->card->snd_card;
+	char *idx;
+
+	down_read(&card->controls_rwsem);
+
+	list_for_each_entry(kctl, &card->controls, list) {
+		idx = strstr(kctl->id.name, " ");
+		if (idx == NULL)
+			continue;
+		index  = strlen(kctl->id.name) - strlen(idx);
+		if (strstr(kctl->id.name, "volume") &&
+		    !strncmp(kctl->id.name, w->name, index))
+			ret = sst_fill_module_list(kctl, w, SST_MODULE_GAIN);
+		else if (strstr(kctl->id.name, "params") &&
+			 !strncmp(kctl->id.name, w->name, index))
+			ret = sst_fill_module_list(kctl, w, SST_MODULE_ALGO);
+		else if (strstr(kctl->id.name, "mute") &&
+			 !strncmp(kctl->id.name, w->name, index)) {
+			struct sst_gain_mixer_control *mc = (void *)kctl->private_value;
+			mc->w = w;
+		}
+		if (ret < 0) {
+			up_read(&card->controls_rwsem);
+			return ret;
+		}
+	}
+	up_read(&card->controls_rwsem);
+	return 0;
+}
+
+static int sst_map_modules_to_pipe(struct snd_soc_platform *platform)
+{
+	struct snd_soc_dapm_widget *w;
+	struct snd_soc_dapm_context *dapm = &platform->dapm;
+	int ret = 0;
+
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->platform && is_sst_dapm_widget(w) && (w->priv)) {
+			struct sst_ids *ids = w->priv;
+
+			pr_debug("widget type=%d name=%s", w->id, w->name);
+			INIT_LIST_HEAD(&ids->algo_list);
+			INIT_LIST_HEAD(&ids->gain_list);
+			ret = sst_fill_widget_module_info(w, platform);
+			if (ret < 0)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform)
+{
+	int i, ret = 0;
+	struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+
+	sst->byte_stream = devm_kzalloc(platform->dev,
+					SST_MAX_BIN_BYTES, GFP_KERNEL);
+	if (!sst->byte_stream) {
+		pr_err("%s: kzalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+	sst->widget = devm_kzalloc(platform->dev,
+				   SST_NUM_WIDGETS * sizeof(*sst->widget),
+				   GFP_KERNEL);
+	if (!sst->widget) {
+		pr_err("%s: kzalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	snd_soc_dapm_new_controls(&platform->dapm, sst_dapm_widgets,
+			ARRAY_SIZE(sst_dapm_widgets));
+	snd_soc_dapm_add_routes(&platform->dapm, intercon,
+			ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_widgets(&platform->dapm);
+
+	for (i = 0; i < SST_NUM_GAINS; i++) {
+		sst_gains[i].mute = SST_GAIN_MUTE_DEFAULT;
+		sst_gains[i].l_gain = SST_GAIN_VOLUME_DEFAULT;
+		sst_gains[i].r_gain = SST_GAIN_VOLUME_DEFAULT;
+		sst_gains[i].ramp_duration = SST_GAIN_RAMP_DURATION_DEFAULT;
+	}
+
+	snd_soc_add_platform_controls(platform, sst_gain_controls,
+			ARRAY_SIZE(sst_gain_controls));
+
+	snd_soc_add_platform_controls(platform, sst_algo_controls,
+			ARRAY_SIZE(sst_algo_controls));
+	snd_soc_add_platform_controls(platform, sst_slot_controls,
+			ARRAY_SIZE(sst_slot_controls));
+	snd_soc_add_platform_controls(platform, sst_mux_controls,
+			ARRAY_SIZE(sst_mux_controls));
+
+	/* initialize the names of the probe points */
+	for (i = 0; i < SST_NUM_PROBE_CONNECTION_PTS; i++)
+		sst_probe_enum_texts[i] = sst_probes[i].name;
+
+	snd_soc_add_platform_controls(platform, sst_probe_controls,
+			ARRAY_SIZE(sst_probe_controls));
+
+	ret = sst_map_modules_to_pipe(platform);
+
+	return ret;
+}
diff --git a/sound/soc/intel/platform-libs/ipc_lib.h b/sound/soc/intel/platform-libs/ipc_lib.h
new file mode 100644
index 0000000..6ba50c4
--- /dev/null
+++ b/sound/soc/intel/platform-libs/ipc_lib.h
@@ -0,0 +1,33 @@
+/*
+ *  ipc_lib.h - Intel MID Platform driver header file
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#ifndef __PLATFORMDRV_IPC_LIB_H__
+#define __PLATFORMDRV_IPC_LIB_H__
+
+struct sst_algo_int_control_v2;
+
+void sst_create_compr_vol_ipc(char *bytes, unsigned int type,
+		struct sst_algo_int_control_v2 *kdata);
+#endif
diff --git a/sound/soc/intel/platform-libs/ipc_lib_v2.c b/sound/soc/intel/platform-libs/ipc_lib_v2.c
new file mode 100644
index 0000000..105c756
--- /dev/null
+++ b/sound/soc/intel/platform-libs/ipc_lib_v2.c
@@ -0,0 +1,109 @@
+/*
+ *  ipc_lib_v2.c - Intel MID Platform Driver IPC wrappers for mrfld
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Lakshmi N Vinnakota <lakshmi.n.vinnakota@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+#include <sound/soc.h>
+#include <asm/platform_sst_audio.h>
+#include "../platform_ipc_v2.h"
+#include "../sst_platform.h"
+#include "../sst_platform_pvt.h"
+
+
+static inline void sst_fill_dsp_hdr(struct ipc_dsp_hdr *hdr, u8 index, u8 pipe,
+	u16 module, u16 cmd, u16 len)
+{
+	hdr->mod_index_id = index;
+	hdr->pipe_id = pipe;
+	hdr->mod_id = module;
+	hdr->cmd_id = cmd;
+	hdr->length = len;
+
+}
+
+static inline void sst_fill_byte_control_hdr(struct snd_sst_bytes_v2 *hdr,
+	u8 type, u8 msg, u8 block, u8 task, u8 pipe, u16 len)
+{
+	hdr->type = type;
+	hdr->ipc_msg = msg;
+	hdr->block = block;
+	hdr->task_id = task;
+	hdr->pipe_id = pipe;
+	hdr->rsvd = 0;
+	hdr->len = len;
+}
+
+#define SST_GAIN_V2_TIME_CONST 50
+
+void sst_create_compr_vol_ipc(char *bytes, unsigned int type,
+	struct sst_algo_int_control_v2 *kdata)
+{
+	struct snd_sst_gain_v2 gain1;
+	struct snd_sst_bytes_v2 byte_hdr;
+	struct ipc_dsp_hdr dsp_hdr;
+	char *tmp;
+	u16 len;
+	u8 ipc_msg;
+
+	/* Fill gain params */
+	gain1.gain_cell_num = 1;  /* num of gain cells to modify*/
+	gain1.cell_nbr_idx = kdata->instance_id; /* instance index */
+	gain1.cell_path_idx = kdata->pipe_id; /* pipe id */
+	gain1.module_id = kdata->module_id; /*module id */
+	gain1.left_cell_gain = kdata->value; /* left gain value in dB*/
+	gain1.right_cell_gain = kdata->value; /* same value as left in dB*/
+	/* set to default recommended value*/
+	gain1.gain_time_const = SST_GAIN_V2_TIME_CONST;
+
+	/* fill dsp header */
+	/* Get params format for vol ctrl lib, size 6 bytes :
+	 * u16 left_gain, u16 right_gain, u16 ramp
+	 */
+	memset(&dsp_hdr, 0, sizeof(dsp_hdr));
+	if (type == SND_SST_BYTES_GET) {
+		len = 6;
+		ipc_msg = IPC_GET_PARAMS;
+	} else {
+		len = sizeof(gain1);
+		ipc_msg = IPC_SET_PARAMS;
+	}
+
+	sst_fill_dsp_hdr(&dsp_hdr, 0, kdata->pipe_id, kdata->module_id,
+				IPC_IA_SET_GAIN_MRFLD, len);
+
+	/* fill byte control header */
+	memset(&byte_hdr, 0, sizeof(byte_hdr));
+	len = sizeof(dsp_hdr) + dsp_hdr.length;
+	sst_fill_byte_control_hdr(&byte_hdr, type, ipc_msg, 1,
+			SST_TASK_ID_MEDIA, kdata->pipe_id, len);
+
+	/* fill complete byte stream as ipc payload */
+	tmp = bytes;
+	memcpy(tmp, &byte_hdr, sizeof(byte_hdr));
+	memcpy((tmp + sizeof(byte_hdr)), &dsp_hdr, sizeof(dsp_hdr));
+	if (type != SND_SST_BYTES_GET)
+		memcpy((tmp + sizeof(byte_hdr) + sizeof(dsp_hdr)), &gain1,
+			sizeof(gain1));
+#ifdef DEBUG_HEX_DUMP_BYTES
+	print_hex_dump_bytes(__func__, DUMP_PREFIX_NONE, bytes, 32);
+#endif
+}
diff --git a/sound/soc/intel/platform-libs/sst_widgets.h b/sound/soc/intel/platform-libs/sst_widgets.h
new file mode 100644
index 0000000..d39ec89
--- /dev/null
+++ b/sound/soc/intel/platform-libs/sst_widgets.h
@@ -0,0 +1,337 @@
+/*
+ *  sst_widgets.h - Intel helpers to generate FW widgets
+ *
+ *  Copyright (C) 2013 Intel Corp
+ *  Author: Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef __SST_WIDGETS_H__
+#define __SST_WIDGETS_H__
+
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define SST_MODULE_GAIN 1
+#define SST_MODULE_ALGO 2
+
+#define SST_FMT_MONO 0
+#define SST_FMT_STEREO 3
+
+/* physical SSP numbers */
+enum {
+	SST_SSP0 = 0,
+	SST_SSP1,
+	SST_SSP2,
+	SST_SSP_LAST = SST_SSP2,
+};
+
+#define SST_NUM_SSPS		(SST_SSP_LAST + 1)	/* physical SSPs */
+#define SST_MAX_SSP_MUX		2			/* single SSP muxed between pipes */
+#define SST_MAX_SSP_DOMAINS	2			/* domains present in each pipe */
+
+struct module {
+	struct snd_kcontrol *kctl;
+	struct list_head node;
+};
+
+struct sst_ssp_config {
+	u8 ssp_id;
+	u8 bits_per_slot;
+	u8 slots;
+	u8 ssp_mode;
+	u8 pcm_mode;
+	u8 duplex;
+	u8 ssp_protocol;
+	u8 fs_frequency;
+	u8 active_slot_map;
+	u8 start_delay;
+	u16 fs_width;
+};
+
+struct sst_ssp_cfg {
+	const u8 ssp_number;
+	const int *mux_shift;
+	const int (*domain_shift)[SST_MAX_SSP_MUX];
+	const struct sst_ssp_config (*ssp_config)[SST_MAX_SSP_MUX][SST_MAX_SSP_DOMAINS];
+};
+
+struct sst_ids {
+	u16 location_id;
+	u16 module_id;
+	u8  task_id;
+	u8  format;
+	u8  reg;
+	struct list_head algo_list;
+	struct list_head gain_list;
+	const struct sst_ssp_cfg *ssp;
+};
+
+#define SST_SSP_AIF_IN(wname, wevent, wssp_cfg)						\
+{	.id = snd_soc_dapm_aif_in, .name = wname, .sname = NULL,			\
+	.reg = SND_SOC_NOPM, .shift = 0, .invert = 0,					\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD,	\
+	.priv = (void *)&(struct sst_ids) { .ssp = &wssp_cfg, }				\
+}
+
+#define SST_SSP_AIF_OUT(wname, wevent, wssp_cfg)					\
+{	.id = snd_soc_dapm_aif_out, .name = wname, .sname = NULL,			\
+	.reg = SND_SOC_NOPM, .shift = 0, .invert = 0,					\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD,	\
+	.priv = (void *)&(struct sst_ids) { .ssp = &wssp_cfg, }				\
+}
+
+#define SST_SSP_INPUT(wname, wevent, wssp_cfg)						\
+{	.id = snd_soc_dapm_input, .name = wname, .sname = NULL,				\
+	.reg = SND_SOC_NOPM, .shift = 0, .invert = 0,					\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD,	\
+	.priv = (void *)&(struct sst_ids) { .ssp = &wssp_cfg, }				\
+}
+
+#define SST_SSP_OUTPUT(wname, wevent, wssp_cfg)						\
+{	.id = snd_soc_dapm_output, .name = wname, .sname = NULL,			\
+	.reg = SND_SOC_NOPM, .shift = 0, .invert = 0,					\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD,	\
+	.priv = (void *)&(struct sst_ids) { .ssp = &wssp_cfg, }				\
+}
+
+#define SST_PATH(wname, wtask, wloc_id, wevent, wflags)					\
+{	.id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0,		\
+	.invert = 0, .kcontrol_news = NULL, .num_kcontrols = 0,				\
+	.event = wevent, .event_flags = wflags,						\
+	.priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id, }	\
+}
+
+#define SST_PATH_MEDIA_LOOP(wname, wtask, wloc_id, wformat, wevent, wflags)             \
+{	.id = snd_soc_dapm_pga, .name = wname, .reg = SND_SOC_NOPM, .shift = 0,         \
+	.invert = 0, .kcontrol_news = NULL, .num_kcontrols = 0,                         \
+	.event = wevent, .event_flags = wflags,                                         \
+	.priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id,	\
+					    .format = wformat,}				\
+}
+
+/* output is triggered before input */
+#define SST_PATH_INPUT(name, task_id, loc_id, event)					\
+	SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)
+
+#define SST_PATH_OUTPUT(name, task_id, loc_id, event)					\
+	SST_PATH(name, task_id, loc_id, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+#define SST_PATH_MEDIA_LOOP_OUTPUT(name, task_id, loc_id, format, event)		\
+	SST_PATH_MEDIA_LOOP(name, task_id, loc_id, format, event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD)
+
+
+#define SST_SWM_MIXER(wname, wreg, wtask, wloc_id, wcontrols, wevent)			\
+{	.id = snd_soc_dapm_mixer, .name = wname, .reg = SND_SOC_NOPM, .shift = 0,	\
+	.invert = 0, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols),\
+	.event = wevent, .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD |	\
+					SND_SOC_DAPM_POST_REG,				\
+	.priv = (void *)&(struct sst_ids) { .task_id = wtask, .location_id = wloc_id,	\
+					    .reg = wreg }				\
+}
+
+enum sst_gain_kcontrol_type {
+	SST_GAIN_TLV,
+	SST_GAIN_MUTE,
+	SST_GAIN_RAMP_DURATION,
+};
+
+struct sst_gain_mixer_control {
+	bool stereo;
+	enum sst_gain_kcontrol_type type;
+	struct sst_gain_value *gain_val;
+	int max;
+	int min;
+	u16 instance_id;
+	u16 module_id;
+	u16 pipe_id;
+	u16 task_id;
+	char pname[44];
+	struct snd_soc_dapm_widget *w;
+};
+
+struct sst_gain_value {
+	u16 ramp_duration;
+	s16 l_gain;
+	s16 r_gain;
+	bool mute;
+};
+
+#define SST_GAIN_VOLUME_DEFAULT		(-1440)
+#define SST_GAIN_RAMP_DURATION_DEFAULT	5 /* timeconstant */
+#define SST_GAIN_MUTE_DEFAULT		true
+
+#define SST_GAIN_KCONTROL_TLV(xname, xhandler_get, xhandler_put, \
+			      xmod, xpipe, xinstance, xtask, tlv_array, xgain_val, \
+			      xmin, xmax, xpname) \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+		  SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+	.tlv.p = (tlv_array), \
+	.info = sst_gain_ctl_info,\
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+	{ .stereo = true, .max = xmax, .min = xmin, .type = SST_GAIN_TLV, \
+	  .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+	  .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
+
+#define SST_GAIN_KCONTROL_INT(xname, xhandler_get, xhandler_put, \
+			      xmod, xpipe, xinstance, xtask, xtype, xgain_val, \
+			      xmin, xmax, xpname) \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = sst_gain_ctl_info, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+	{ .stereo = false, .max = xmax, .min = xmin, .type = xtype, \
+	  .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+	  .instance_id = xinstance, .gain_val = xgain_val, .pname =  xpname}
+
+#define SST_GAIN_KCONTROL_BOOL(xname, xhandler_get, xhandler_put,\
+			       xmod, xpipe, xinstance, xtask, xgain_val, xpname) \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_bool_ext, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_gain_mixer_control) \
+	{ .stereo = false, .type = SST_GAIN_MUTE, \
+	  .module_id = xmod, .pipe_id = xpipe, .task_id = xtask,\
+	  .instance_id = xinstance, .gain_val = xgain_val, .pname = xpname}
+
+#define SST_CONTROL_NAME(xpname, xmname, xinstance, xtype) \
+	xpname " " xmname " " #xinstance " " xtype
+
+#define SST_COMBO_CONTROL_NAME(xpname, xmname, xinstance, xtype, xsubmodule) \
+	xpname " " xmname " " #xinstance " " xtype " " xsubmodule
+
+/*
+ * 3 Controls for each Gain module
+ * e.g.	- pcm0_in gain 0 volume
+ *	- pcm0_in gain 0 rampduration
+ *	- pcm0_in gain 0 mute
+ */
+#define SST_GAIN_KCONTROLS(xpname, xmname, xmin_gain, xmax_gain, xmin_tc, xmax_tc, \
+			   xhandler_get, xhandler_put, \
+			   xmod, xpipe, xinstance, xtask, tlv_array, xgain_val) \
+	{ SST_GAIN_KCONTROL_INT(SST_CONTROL_NAME(xpname, xmname, xinstance, "rampduration"), \
+		xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, SST_GAIN_RAMP_DURATION, \
+		xgain_val, xmin_tc, xmax_tc, xpname) }, \
+	{ SST_GAIN_KCONTROL_BOOL(SST_CONTROL_NAME(xpname, xmname, xinstance, "mute"), \
+		xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, \
+		xgain_val, xpname) } ,\
+	{ SST_GAIN_KCONTROL_TLV(SST_CONTROL_NAME(xpname, xmname, xinstance, "volume"), \
+		xhandler_get, xhandler_put, xmod, xpipe, xinstance, xtask, tlv_array, \
+		xgain_val, xmin_gain, xmax_gain, xpname) }
+
+#define SST_GAIN_TC_MIN		5
+#define SST_GAIN_TC_MAX		5000
+#define SST_GAIN_MIN_VALUE	-1440 /* in 0.1 DB units */
+#define SST_GAIN_MAX_VALUE	360
+
+enum sst_algo_kcontrol_type {
+	SST_ALGO_PARAMS,
+	SST_ALGO_BYPASS,
+};
+
+struct sst_algo_control {
+	enum sst_algo_kcontrol_type type;
+	int max;
+	u16 module_id;
+	u16 pipe_id;
+	u16 instance_id;
+	u16 task_id;
+	u16 cmd_id;
+	bool bypass;
+	unsigned char *params;
+	char pname[44];
+	struct snd_soc_dapm_widget *w;
+};
+
+#define SST_ALGO_KCONTROL_BYTES(xpname, xmname, xcount, xmod, \
+				xpipe, xinstance, xtask, xcmd) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,\
+	.name = SST_CONTROL_NAME(xpname, xmname, xinstance, "params"), \
+	.info = sst_algo_bytes_ctl_info, \
+	.get = sst_algo_control_get, .put = sst_algo_control_set, \
+	.private_value = (unsigned long)&(struct sst_algo_control) \
+	{.max = xcount, .type = SST_ALGO_PARAMS, .module_id = xmod, .pname = xpname, \
+	.pipe_id = xpipe, .instance_id = xinstance, .task_id = xtask, .cmd_id = xcmd} }
+
+#define SST_ALGO_KCONTROL_BOOL(xpname, xmname, xmod, xpipe, xinstance, xtask) \
+{      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+	.name = SST_CONTROL_NAME(xpname, xmname, xinstance, "bypass"), \
+	.info = snd_soc_info_bool_ext, \
+	.get = sst_algo_control_get, .put = sst_algo_control_set, \
+	.private_value = (unsigned long)&(struct sst_algo_control) \
+	{.type = SST_ALGO_BYPASS, .module_id = xmod, .pipe_id = xpipe, .pname = xpname, \
+	.task_id = xtask, .instance_id = xinstance, .bypass = 0 } }
+
+#define SST_ALGO_BYPASS_PARAMS(xpname, xmname, xcount, xmod, xpipe, \
+				xinstance, xtask, xcmd)  \
+	SST_ALGO_KCONTROL_BOOL(xpname, xmname, xmod, xpipe, xinstance, xtask), \
+	SST_ALGO_KCONTROL_BYTES(xpname, xmname, xcount, xmod, xpipe, xinstance, xtask, xcmd)
+
+#define SST_COMBO_ALGO_KCONTROL_BYTES(xpname, xmname, xsubmod, xcount, xmod, \
+				      xpipe, xinstance, xtask, xcmd) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,\
+	.name = SST_COMBO_CONTROL_NAME(xpname, xmname, xinstance, "params", xsubmod), \
+	.info = sst_algo_bytes_ctl_info, \
+	.get = sst_algo_control_get, .put = sst_algo_control_set, \
+	.private_value = (unsigned long)&(struct sst_algo_control) \
+	{.max = xcount, .type = SST_ALGO_PARAMS, .module_id = xmod, .pname = xpname, \
+	.pipe_id = xpipe, .instance_id = xinstance, .task_id = xtask, .cmd_id = xcmd} }
+
+
+/* only 4 slots/channels supported atm */
+#define SST_SSP_SLOT_ENUM(s_ch_no, is_tx, xtexts) \
+	(struct soc_enum){ .reg = s_ch_no, .reg2 = is_tx, .max = 4+1, .texts = xtexts, }
+
+#define SST_SLOT_CTL_NAME(xpname, xmname, s_ch_name) \
+	xpname " " xmname " " s_ch_name
+
+#define SST_SSP_SLOT_CTL(xpname, xmname, s_ch_name, s_ch_no, is_tx, xtexts, xget, xput) \
+	SOC_DAPM_ENUM_EXT(SST_SLOT_CTL_NAME(xpname, xmname, s_ch_name), \
+			  SST_SSP_SLOT_ENUM(s_ch_no, is_tx, xtexts), \
+			  xget, xput)
+
+#define SST_MUX_CTL_NAME(xpname, xinstance) \
+	xpname " " #xinstance
+
+#define SST_SSP_MUX_ENUM(xreg, xshift, xtexts) \
+	(struct soc_enum){ .reg = xreg, .texts = xtexts, .shift_l = xshift, \
+			   .shift_r = xshift, .max = ARRAY_SIZE(xtexts), }
+
+#define SST_SSP_MUX_CTL(xpname, xinstance, xreg, xshift, xtexts, xget, xput) \
+	SOC_DAPM_ENUM_EXT(SST_MUX_CTL_NAME(xpname, xinstance), \
+			  SST_SSP_MUX_ENUM(xreg, xshift, xtexts), \
+			  xget, xput)
+
+struct sst_probe_value {
+	unsigned int val;
+	const struct soc_enum *p_enum;
+};
+
+#define SST_PROBE_CTL_NAME(dir, num, type) \
+	dir #num " " type
+
+#define SST_PROBE_ENUM(xname, xenum, xhandler_get, xhandler_put) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = sst_probe_enum_info, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct sst_probe_value) \
+	{ .val = 0, .p_enum = &xenum } }
+
+#endif
diff --git a/sound/soc/intel/platform_ipc_v2.h b/sound/soc/intel/platform_ipc_v2.h
new file mode 100644
index 0000000..a5db16b
--- /dev/null
+++ b/sound/soc/intel/platform_ipc_v2.h
@@ -0,0 +1,693 @@
+/*
+*  platform_ipc_v2.h - Intel MID Platform driver FW IPC definitions
+*
+*  Copyright (C) 2008-10 Intel Corporation
+*  Author:	Vinod Koul <vinod.koul@intel.com>
+*		Harsha Priya <priya.harsha@intel.com>
+*		Dharageswari R <dharageswari.r@intel.com>
+*		KP Jeeja <jeeja.kp@intel.com>
+*  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*
+*  This program is free software; you can redistribute it and/or modify
+*  it under the terms of the GNU General Public License as published by
+*  the Free Software Foundation; version 2 of the License.
+*
+*  This program is distributed in the hope that it will be useful, but
+*  WITHOUT ANY WARRANTY; without even the implied warranty of
+*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+*  General Public License for more details.
+*
+*  You should have received a copy of the GNU General Public License along
+*  with this program; if not, write to the Free Software Foundation, Inc.,
+*  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+*
+* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*
+*  This driver exposes the audio engine functionalities to the ALSA
+*	and middleware.
+*  This file has definitions shared between the firmware and driver
+*/
+#ifndef __PLATFORM_IPC_V2_H__
+#define __PLATFORM_IPC_V2_H__
+
+#define MAX_DBG_RW_BYTES 80
+#define MAX_NUM_SCATTER_BUFFERS 8
+#define MAX_LOOP_BACK_DWORDS 8
+/* IPC base address and mailbox, timestamp offsets */
+#define SST_MAILBOX_SIZE 0x0400
+#define SST_MAILBOX_SEND 0x0000
+#define SST_TIME_STAMP 0x1800
+#define SST_TIME_STAMP_MRFLD 0x680
+#define SST_TIME_STAMP_BYT 0x800
+#define SST_RESERVED_OFFSET 0x1A00
+#define SST_SCU_LPE_MAILBOX 0x1000
+#define SST_LPE_SCU_MAILBOX 0x1400
+#define SST_SCU_LPE_LOG_BUF (SST_SCU_LPE_MAILBOX+16)
+#define PROCESS_MSG 0x80
+
+/* Message ID's for IPC messages */
+/* Bits B7: SST or IA/SC ; B6-B4: Msg Category; B3-B0: Msg Type */
+
+/* I2L Firmware/Codec Download msgs */
+#define IPC_IA_PREP_LIB_DNLD 0x01
+#define IPC_IA_LIB_DNLD_CMPLT 0x02
+#define IPC_IA_GET_FW_VERSION 0x04
+#define IPC_IA_GET_FW_BUILD_INF 0x05
+#define IPC_IA_GET_FW_INFO 0x06
+#define IPC_IA_GET_FW_CTXT 0x07
+#define IPC_IA_SET_FW_CTXT 0x08
+#define IPC_IA_PREPARE_SHUTDOWN 0x31
+/* I2L Codec Config/control msgs */
+#define IPC_PREP_D3 0x10
+#define IPC_IA_SET_CODEC_PARAMS 0x10
+#define IPC_IA_GET_CODEC_PARAMS 0x11
+#define IPC_IA_SET_PPP_PARAMS 0x12
+#define IPC_IA_GET_PPP_PARAMS 0x13
+#define IPC_SST_PERIOD_ELAPSED_MRFLD 0xA
+#define IPC_IA_ALG_PARAMS 0x1A
+#define IPC_IA_TUNING_PARAMS 0x1B
+#define IPC_IA_SET_RUNTIME_PARAMS 0x1C
+#define IPC_IA_SET_PARAMS 0x1
+#define IPC_IA_GET_PARAMS 0x2
+
+#define IPC_EFFECTS_CREATE 0xE
+#define IPC_EFFECTS_DESTROY 0xF
+
+/* I2L Stream config/control msgs */
+#define IPC_IA_ALLOC_STREAM_MRFLD 0x2
+#define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
+#define IPC_IA_FREE_STREAM_MRFLD 0x03
+#define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
+#define IPC_IA_SET_STREAM_PARAMS 0x22
+#define IPC_IA_SET_STREAM_PARAMS_MRFLD 0x12
+#define IPC_IA_GET_STREAM_PARAMS 0x23
+#define IPC_IA_PAUSE_STREAM 0x24
+#define IPC_IA_PAUSE_STREAM_MRFLD 0x4
+#define IPC_IA_RESUME_STREAM 0x25
+#define IPC_IA_RESUME_STREAM_MRFLD 0x5
+#define IPC_IA_DROP_STREAM 0x26
+#define IPC_IA_DROP_STREAM_MRFLD 0x07
+#define IPC_IA_DRAIN_STREAM 0x27 /* Short msg with str_id */
+#define IPC_IA_DRAIN_STREAM_MRFLD 0x8
+#define IPC_IA_CONTROL_ROUTING 0x29
+#define IPC_IA_VTSV_UPDATE_MODULES 0x20
+#define IPC_IA_VTSV_DETECTED 0x21
+
+#define IPC_IA_START_STREAM_MRFLD 0X06
+#define IPC_IA_START_STREAM 0x30 /* Short msg with str_id */
+
+#define IPC_IA_SET_GAIN_MRFLD 0x21
+/* Debug msgs */
+#define IPC_IA_DBG_MEM_READ 0x40
+#define IPC_IA_DBG_MEM_WRITE 0x41
+#define IPC_IA_DBG_LOOP_BACK 0x42
+#define IPC_IA_DBG_LOG_ENABLE 0x45
+#define IPC_IA_DBG_SET_PROBE_PARAMS 0x47
+
+/* L2I Firmware/Codec Download msgs */
+#define IPC_IA_FW_INIT_CMPLT 0x81
+#define IPC_IA_FW_INIT_CMPLT_MRFLD 0x01
+#define IPC_IA_FW_ASYNC_ERR_MRFLD 0x11
+
+/* L2I Codec Config/control msgs */
+#define IPC_SST_FRAGMENT_ELPASED 0x90 /* Request IA more data */
+
+#define IPC_SST_BUF_UNDER_RUN 0x92 /* PB Under run and stopped */
+#define IPC_SST_BUF_OVER_RUN 0x93 /* CAP Under run and stopped */
+#define IPC_SST_DRAIN_END 0x94 /* PB Drain complete and stopped */
+#define IPC_SST_CHNGE_SSP_PARAMS 0x95 /* PB SSP parameters changed */
+#define IPC_SST_STREAM_PROCESS_FATAL_ERR 0x96/* error in processing a stream */
+#define IPC_SST_PERIOD_ELAPSED 0x97 /* period elapsed */
+
+#define IPC_SST_ERROR_EVENT 0x99 /* Buffer over run occurred */
+/* L2S messages */
+#define IPC_SC_DDR_LINK_UP 0xC0
+#define IPC_SC_DDR_LINK_DOWN 0xC1
+#define IPC_SC_SET_LPECLK_REQ 0xC2
+#define IPC_SC_SSP_BIT_BANG 0xC3
+
+/* L2I Error reporting msgs */
+#define IPC_IA_MEM_ALLOC_FAIL 0xE0
+#define IPC_IA_PROC_ERR 0xE1 /* error in processing a
+					stream can be used by playback and
+					capture modules */
+
+/* L2I Debug msgs */
+#define IPC_IA_PRINT_STRING 0xF0
+
+/* Buffer under-run */
+#define IPC_IA_BUF_UNDER_RUN_MRFLD 0x0B
+
+/* Mrfld specific defines:
+ * For asynchronous messages(INIT_CMPLT, PERIOD_ELAPSED, ASYNC_ERROR)
+ * received from FW, the format is:
+ *  - IPC High: pvt_id is set to zero. Always short message.
+ *  - msg_id is in lower 16-bits of IPC low payload.
+ *  - pipe_id is in higher 16-bits of IPC low payload for period_elapsed.
+ *  - error id is in higher 16-bits of IPC low payload for async errors.
+ */
+#define SST_ASYNC_DRV_ID 0
+
+/* Command Response or Acknowledge message to any IPC message will have
+ * same message ID and stream ID information which is sent.
+ * There is no specific Ack message ID. The data field is used as response
+ * meaning.
+ */
+
+/* SCU IPC for resetting & power gating the LPE through SCU */
+#define IPC_SCU_LPE_RESET 0xA3
+
+enum ackData {
+	IPC_ACK_SUCCESS = 0,
+	IPC_ACK_FAILURE,
+};
+
+enum ipc_ia_msg_id {
+	IPC_CMD = 1,		/*!< Task Control message ID */
+	IPC_SET_PARAMS = 2,/*!< Task Set param message ID */
+	IPC_GET_PARAMS = 3,	/*!< Task Get param message ID */
+	IPC_INVALID = 0xFF,	/*!<Task Get param message ID */
+};
+
+enum sst_codec_types {
+	/*  AUDIO/MUSIC	CODEC Type Definitions */
+	SST_CODEC_TYPE_UNKNOWN = 0,
+	SST_CODEC_TYPE_PCM,	/* Pass through Audio codec */
+	SST_CODEC_TYPE_MP3,
+	SST_CODEC_TYPE_MP24,
+	SST_CODEC_TYPE_AAC,
+	SST_CODEC_TYPE_AACP,
+	SST_CODEC_TYPE_eAACP,
+	SST_CODEC_TYPE_WMA9,
+	SST_CODEC_TYPE_WMA10,
+	SST_CODEC_TYPE_WMA10P,
+	SST_CODEC_TYPE_RA,
+	SST_CODEC_TYPE_DDAC3,
+	SST_CODEC_TYPE_STEREO_TRUE_HD,
+	SST_CODEC_TYPE_STEREO_HD_PLUS,
+
+	/*  VOICE CODEC Type Definitions */
+	SST_CODEC_TYPE_VOICE_PCM = 0x21, /* Pass through voice codec */
+};
+
+enum sst_algo_types {
+	SST_ALGO_SRC = 0x64,
+	SST_ALGO_MIXER = 0x65,
+	SST_ALGO_DOWN_MIXER = 0x66,
+	SST_ALGO_VTSV = 0x73,
+	SST_ALGO_AUDCLASSIFIER = 0x80,
+	SST_ALGO_VOLUME_CONTROL = 0x92,
+	SST_ALGO_GEQ = 0x99,
+};
+
+enum stream_type {
+	SST_STREAM_TYPE_NONE = 0,
+	SST_STREAM_TYPE_MUSIC = 1,
+	SST_STREAM_TYPE_NORMAL = 2,
+	SST_STREAM_TYPE_PROBE = 3,
+	SST_STREAM_TYPE_LONG_PB = 4,
+	SST_STREAM_TYPE_LOW_LATENCY = 5,
+};
+
+enum sst_error_codes {
+	/* Error code,response to msgId: Description */
+	/* Common error codes */
+	SST_SUCCESS = 0,	/* Success */
+	SST_ERR_INVALID_STREAM_ID = 1,
+	SST_ERR_INVALID_MSG_ID = 2,
+	SST_ERR_INVALID_STREAM_OP = 3,
+	SST_ERR_INVALID_PARAMS = 4,
+	SST_ERR_INVALID_CODEC = 5,
+	SST_ERR_INVALID_MEDIA_TYPE = 6,
+	SST_ERR_STREAM_ERR = 7,
+
+	/* IPC specific error codes */
+	SST_IPC_ERR_CALL_BACK_NOT_REGD = 8,
+	SST_IPC_ERR_STREAM_NOT_ALLOCATED = 9,
+	SST_IPC_ERR_STREAM_ALLOC_FAILED = 10,
+	SST_IPC_ERR_GET_STREAM_FAILED = 11,
+	SST_ERR_MOD_NOT_AVAIL = 12,
+	SST_ERR_MOD_DNLD_RQD = 13,
+	SST_ERR_STREAM_STOPPED = 14,
+	SST_ERR_STREAM_IN_USE = 15,
+
+	/* Capture specific error codes */
+	SST_CAP_ERR_INCMPLTE_CAPTURE_MSG = 16,
+	SST_CAP_ERR_CAPTURE_FAIL = 17,
+	SST_CAP_ERR_GET_DDR_NEW_SGLIST = 18,
+	SST_CAP_ERR_UNDER_RUN = 19,
+	SST_CAP_ERR_OVERFLOW = 20,
+
+	/* Playback specific error codes*/
+	SST_PB_ERR_INCMPLTE_PLAY_MSG = 21,
+	SST_PB_ERR_PLAY_FAIL = 22,
+	SST_PB_ERR_GET_DDR_NEW_SGLIST = 23,
+
+	/* Codec manager specific error codes */
+	SST_LIB_ERR_LIB_DNLD_REQUIRED = 24,
+	SST_LIB_ERR_LIB_NOT_SUPPORTED = 25,
+
+	/* Library manager specific error codes */
+	SST_SCC_ERR_PREP_DNLD_FAILED = 26,
+	SST_SCC_ERR_LIB_DNLD_RES_FAILED = 27,
+	/* Scheduler specific error codes */
+	SST_SCH_ERR_FAIL = 28,
+
+	/* DMA specific error codes */
+	SST_DMA_ERR_NO_CHNL_AVAILABLE = 29,
+	SST_DMA_ERR_INVALID_INPUT_PARAMS = 30,
+	SST_DMA_ERR_CHNL_ALREADY_SUSPENDED = 31,
+	SST_DMA_ERR_CHNL_ALREADY_STARTED = 32,
+	SST_DMA_ERR_CHNL_NOT_ENABLED = 33,
+	SST_DMA_ERR_TRANSFER_FAILED = 34,
+
+	SST_SSP_ERR_ALREADY_ENABLED = 35,
+	SST_SSP_ERR_ALREADY_DISABLED = 36,
+	SST_SSP_ERR_NOT_INITIALIZED = 37,
+	SST_SSP_ERR_SRAM_NO_DMA_DATA = 38,
+
+	/* Other error codes */
+	SST_ERR_MOD_INIT_FAIL = 39,
+
+	/* FW init error codes */
+	SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED = 40,
+	SST_RDR_ERR_ROUTE_ALREADY_STARTED = 41,
+	SST_RDR_ERR_IO_DEV_SEL_FAILED = 42,
+	SST_RDR_PREP_CODEC_DNLD_FAILED = 43,
+
+	/* Memory debug error codes */
+	SST_ERR_DBG_MEM_READ_FAIL = 44,
+	SST_ERR_DBG_MEM_WRITE_FAIL = 45,
+	SST_ERR_INSUFFICIENT_INPUT_SG_LIST = 46,
+	SST_ERR_INSUFFICIENT_OUTPUT_SG_LIST = 47,
+
+	SST_ERR_BUFFER_NOT_AVAILABLE = 48,
+	SST_ERR_BUFFER_NOT_ALLOCATED = 49,
+	SST_ERR_INVALID_REGION_TYPE = 50,
+	SST_ERR_NULL_PTR = 51,
+	SST_ERR_INVALID_BUFFER_SIZE = 52,
+	SST_ERR_INVALID_BUFFER_INDEX = 53,
+
+	/*IIPC specific error codes */
+	SST_IIPC_QUEUE_FULL = 54,
+	SST_IIPC_ERR_MSG_SND_FAILED = 55,
+	SST_PB_ERR_UNDERRUN_OCCURED = 56,
+	SST_RDR_INSUFFICIENT_MIXER_BUFFER = 57,
+	SST_INVALID_TIME_SLOTS = 58,
+};
+
+enum dbg_mem_data_type {
+	/* Data type of debug read/write */
+	DATA_TYPE_U32,
+	DATA_TYPE_U16,
+	DATA_TYPE_U8,
+};
+
+enum dbg_type {
+	NO_DEBUG = 0,
+	SRAM_DEBUG,
+	PTI_DEBUG,
+};
+
+struct ipc_dsp_hdr {
+	u16 mod_index_id:8;		/*!< DSP Command ID specific to tasks */
+	u16 pipe_id:8;	/*!< instance of the module in the pipeline */
+	u16 mod_id;		/*!< Pipe_id */
+	u16 cmd_id;		/*!< Module ID = lpe_algo_types_t */
+	u16 length;		/*!< Length of the payload only */
+} __packed;
+
+struct ipc_dsp_effects_info {
+	u16	cmd_id;
+	u16	length;
+	u16	sel_pos;
+	u16	sel_algo_id;
+	u16	cpu_load;       /* CPU load indication */
+	u16	memory_usage;   /* Data Memory usage */
+	u32	flags;         /* effect engine caps/requirements flags */
+} __packed;
+
+struct ipc_effect_dsp_hdr {
+	u16 mod_index_id:8;             /*!< DSP Command ID specific to tasks */
+	u16 pipe_id:8;  /*!< instance of the module in the pipeline */
+	u16 mod_id;             /*!< Pipe_id */
+} __packed;
+
+struct ipc_effect_payload {
+	struct ipc_effect_dsp_hdr dsp_hdr;
+	char *data;
+};
+
+union ipc_header_high {
+	struct {
+		u32  msg_id:8;	    /* Message ID - Max 256 Message Types */
+		u32  task_id:4;	    /* Task ID associated with this comand */
+		u32  drv_id:4;    /* Identifier for the driver to track*/
+		u32  rsvd1:8;	    /* Reserved */
+		u32  result:4;	    /* Reserved */
+		u32  res_rqd:1;	    /* Response rqd */
+		u32  large:1;	    /* Large Message if large = 1 */
+		u32  done:1;	    /* bit 30 - Done bit */
+		u32  busy:1;	    /* bit 31 - busy bit*/
+	} part;
+	u32 full;
+} __packed;
+
+/* IPC header */
+union ipc_header_mrfld {
+	struct {
+		u32 header_low_payload;
+		union ipc_header_high header_high;
+	} p;
+	u64 full;
+} __packed;
+
+/* CAUTION NOTE: All IPC message body must be multiple of 32 bits.*/
+
+/* IPC Header */
+union ipc_header {
+	struct {
+		u32  msg_id:8; /* Message ID - Max 256 Message Types */
+		u32  str_id:5;
+		u32  large:1;	/* Large Message if large = 1 */
+		u32  reserved:2;	/* Reserved for future use */
+		u32  data:14;	/* Ack/Info for msg, size of msg in Mailbox */
+		u32  done:1; /* bit 30 */
+		u32  busy:1; /* bit 31 */
+	} part;
+	u32 full;
+} __packed;
+
+/* Firmware build info */
+struct sst_fw_build_info {
+	unsigned char  date[16]; /* Firmware build date */
+	unsigned char  time[16]; /* Firmware build time */
+} __packed;
+
+/* Firmware Version info */
+struct snd_sst_fw_version {
+	u8 build;	/* build number*/
+	u8 minor;	/* minor number*/
+	u8 major;	/* major number*/
+	u8 type;	/* build type */
+};
+
+struct ipc_header_fw_init {
+	struct snd_sst_fw_version fw_version;/* Firmware version details */
+	struct sst_fw_build_info build_info;
+	u16 result;	/* Fw init result */
+	u8 module_id; /* Module ID in case of error */
+	u8 debug_info; /* Debug info from Module ID in case of fail */
+} __packed;
+
+struct snd_sst_tstamp {
+	u64 ring_buffer_counter;	/* PB/CP: Bytes copied from/to DDR. */
+	u64 hardware_counter;	    /* PB/CP: Bytes DMAed to/from SSP. */
+	u64 frames_decoded;
+	u64 bytes_decoded;
+	u64 bytes_copied;
+	u32 sampling_frequency;
+	u32 channel_peak[8];
+} __packed;
+
+/* SST to IA memory read debug message  */
+struct ipc_sst_ia_dbg_mem_rw  {
+	u16  num_bytes;/* Maximum of MAX_DBG_RW_BYTES */
+	u16  data_type;/* enum: dbg_mem_data_type */
+	u32  address;	/* Memory address of data memory of data_type */
+	u8	rw_bytes[MAX_DBG_RW_BYTES];/* Maximum of 64 bytes can be RW */
+} __packed;
+
+struct ipc_sst_ia_dbg_loop_back {
+	u16 num_dwords; /* Maximum of MAX_DBG_RW_BYTES */
+	u16 increment_val;/* Increments dwords by this value, 0- no increment */
+	u32 lpbk_dwords[MAX_LOOP_BACK_DWORDS];/* Maximum of 8 dwords loopback */
+} __packed;
+
+/* Stream type params struture for Alloc stream */
+struct snd_sst_str_type {
+	u8 codec_type;		/* Codec type */
+	u8 str_type;		/* 1 = voice 2 = music */
+	u8 operation;		/* Playback or Capture */
+	u8 protected_str;	/* 0=Non DRM, 1=DRM */
+	u8 time_slots;
+	u8 reserved;		/* Reserved */
+	u16 result;		/* Result used for acknowledgment */
+} __packed;
+
+/* Library info structure */
+struct module_info {
+	u32 lib_version;
+	u32 lib_type;/*TBD- KLOCKWORK u8 lib_type;*/
+	u32 media_type;
+	u8  lib_name[12];
+	u32 lib_caps;
+	unsigned char  b_date[16]; /* Lib build date */
+	unsigned char  b_time[16]; /* Lib build time */
+} __packed;
+
+/* Library slot info */
+struct lib_slot_info {
+	u8  slot_num; /* 1 or 2 */
+	u8  reserved1;
+	u16 reserved2;
+	u32 iram_size; /* slot size in IRAM */
+	u32 dram_size; /* slot size in DRAM */
+	u32 iram_offset; /* starting offset of slot in IRAM */
+	u32 dram_offset; /* starting offset of slot in DRAM */
+} __packed;
+
+struct snd_ppp_mixer_params {
+	__u32			type; /*Type of the parameter */
+	__u32			size;
+	__u32			input_stream_bitmap; /*Input stream Bit Map*/
+} __packed;
+
+struct snd_sst_lib_download {
+	struct module_info lib_info; /* library info type, capabilities etc */
+	struct lib_slot_info slot_info; /* slot info to be downloaded */
+	u32 mod_entry_pt;
+};
+
+struct snd_sst_lib_download_info {
+	struct snd_sst_lib_download dload_lib;
+	u16 result;	/* Result used for acknowledgment */
+	u8 pvt_id; /* Private ID */
+	u8 reserved;  /* for alignment */
+};
+
+struct snd_pcm_params {
+	u8 num_chan;	/* 1=Mono, 2=Stereo */
+	u8 pcm_wd_sz;	/* 16/24 - bit*/
+	u8 use_offload_path;	/* 0-PCM using period elpased & ALSA interfaces
+				   1-PCM stream via compressed interface  */
+	u8 reserved2;
+	u32 sfreq;    /* Sampling rate in Hz */
+	u8 channel_map[8];
+} __packed;
+
+/* MP3 Music Parameters Message */
+struct snd_mp3_params {
+	u8  num_chan;	/* 1=Mono, 2=Stereo	*/
+	u8  pcm_wd_sz; /* 16/24 - bit*/
+	u8  crc_check; /* crc_check - disable (0) or enable (1) */
+	u8  reserved1; /* unused*/
+};
+
+#define AAC_BIT_STREAM_ADTS		0
+#define AAC_BIT_STREAM_ADIF		1
+#define AAC_BIT_STREAM_RAW		2
+
+/* AAC Music Parameters Message */
+struct snd_aac_params {
+	u8 num_chan; /* 1=Mono, 2=Stereo*/
+	u8 pcm_wd_sz; /* 16/24 - bit*/
+	u8 bdownsample; /*SBR downsampling 0 - disable 1 -enabled AAC+ only */
+	u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
+	u32 externalsr; /*sampling rate of basic AAC raw bit stream*/
+	u8 sbr_signalling;/*disable/enable/set automode the SBR tool.AAC+*/
+	u8 reser1;
+	u16  reser2;
+};
+
+/* WMA Music Parameters Message */
+struct snd_wma_params {
+	u8  num_chan;	/* 1=Mono, 2=Stereo */
+	u8  pcm_wd_sz;	/* 16/24 - bit*/
+	u16 reserved1;
+	u32 brate;	/* Use the hard coded value. */
+	u32 sfreq;	/* Sampling freq eg. 8000, 441000, 48000 */
+	u32 channel_mask;  /* Channel Mask */
+	u16 format_tag;	/* Format Tag */
+	u16 block_align;	/* packet size */
+	u16 wma_encode_opt;/* Encoder option */
+	u8 op_align;	/* op align 0- 16 bit, 1- MSB, 2 LSB */
+	u8 reserved;	/* reserved */
+};
+
+/* Codec params struture */
+union  snd_sst_codec_params {
+	struct snd_pcm_params pcm_params;
+	struct snd_mp3_params mp3_params;
+	struct snd_aac_params aac_params;
+	struct snd_wma_params wma_params;
+};
+
+/* Address and size info of a frame buffer in DDR */
+struct sst_address_info {
+	__u32 addr; /* Address at IA */
+	__u32 size; /* Size of the buffer */
+} __packed;
+
+/* Additional params for Alloc struct*/
+struct snd_sst_alloc_params_ext {
+	__u16 sg_count;
+	__u16 reserved;
+	__u32 frag_size;	/*Number of samples after which period elapsed
+				  message is sent valid only if path  = 0*/
+	struct sst_address_info  ring_buf_info[8];
+};
+
+struct snd_sst_stream_params {
+	union snd_sst_codec_params uc;
+} __packed;
+
+struct snd_sst_params {
+	u32 result;
+	u32 stream_id;
+	u8 codec;
+	u8 ops;
+	u8 stream_type;
+	u8 device_type;
+	u8 task;
+	struct snd_sst_stream_params sparams;
+	struct snd_sst_alloc_params_ext aparams;
+};
+
+struct snd_sst_alloc_mrfld {
+	u16 codec_type;
+	u8 operation;
+	u8 sg_count;
+	struct sst_address_info ring_buf_info[8];
+	u32 frag_size;
+	u32 ts;
+	struct snd_sst_stream_params codec_params;
+} __packed;
+
+/* Alloc stream params structure */
+struct snd_sst_alloc_params {
+	struct snd_sst_str_type str_type;
+	struct snd_sst_stream_params stream_params;
+	struct snd_sst_alloc_params_ext alloc_params;
+} __packed;
+
+/* Alloc stream response message */
+struct snd_sst_alloc_response {
+	struct snd_sst_str_type str_type; /* Stream type for allocation */
+	struct snd_sst_lib_download lib_dnld; /* Valid only for codec dnld */
+};
+
+/* Drop response */
+struct snd_sst_drop_response {
+	u32 result;
+	u32 bytes;
+};
+
+struct snd_sst_async_msg {
+	u32 msg_id; /* Async msg id */
+	u32 payload[0];
+};
+
+struct snd_sst_async_err_msg {
+	u32 fw_resp; /* Firmware Result */
+	u32 lib_resp; /*Library result */
+} __packed;
+
+struct snd_sst_vol {
+	u32	stream_id;
+	s32	volume;
+	u32	ramp_duration;
+	u32	ramp_type;		/* Ramp type, default=0 */
+};
+
+/* Gain library parameters for mrfld
+ * based on DSP command spec v0.82
+ */
+struct snd_sst_gain_v2 {
+	u16 gain_cell_num;  /* num of gain cells to modify*/
+	u8 cell_nbr_idx; /* instance index*/
+	u8 cell_path_idx; /* pipe-id */
+	u16 module_id; /*module id */
+	u16 left_cell_gain; /* left gain value in dB*/
+	u16 right_cell_gain; /* right gain value in dB*/
+	u16 gain_time_const; /* gain time constant*/
+} __packed;
+
+struct snd_sst_mute {
+	u32	stream_id;
+	u32	mute;
+};
+
+struct snd_sst_runtime_params {
+	u8 type;
+	u8 str_id;
+	u8 size;
+	u8 rsvd;
+	void *addr;
+} __packed;
+
+enum stream_param_type {
+	SST_SET_TIME_SLOT = 0,
+	SST_SET_CHANNEL_INFO = 1,
+	OTHERS = 2, /*reserved for future params*/
+};
+
+/* CSV Voice call routing structure */
+struct snd_sst_control_routing {
+	u8 control; /* 0=start, 1=Stop */
+	u8 reserved[3];	/* Reserved- for 32 bit alignment */
+};
+
+struct ipc_post {
+	struct list_head node;
+	union ipc_header header; /* driver specific */
+	bool is_large;
+	bool is_process_reply;
+	union ipc_header_mrfld mrfld_header;
+	char *mailbox_data;
+};
+
+struct snd_sst_ctxt_params {
+	u32 address; /* Physical Address in DDR where the context is stored */
+	u32 size; /* size of the context */
+};
+
+struct snd_sst_lpe_log_params {
+	u8 dbg_type;
+	u8 module_id;
+	u8 log_level;
+	u8 reserved;
+} __packed;
+
+enum snd_sst_bytes_type {
+	SND_SST_BYTES_SET = 0x1,
+	SND_SST_BYTES_GET = 0x2,
+};
+
+struct snd_sst_bytes_v2 {
+	u8 type;
+	u8 ipc_msg;
+	u8 block;
+	u8 task_id;
+	u8 pipe_id;
+	u8 rsvd;
+	u16 len;
+	char bytes[0];
+};
+
+#define MAX_VTSV_FILES 2
+struct snd_sst_vtsv_info {
+	struct sst_address_info vfiles[MAX_VTSV_FILES];
+} __packed;
+
+#endif /* __PLATFORMDRV_IPC_V2_H__ */
diff --git a/sound/soc/intel/ssp/Makefile b/sound/soc/intel/ssp/Makefile
new file mode 100644
index 0000000..b3935f5
--- /dev/null
+++ b/sound/soc/intel/ssp/Makefile
@@ -0,0 +1,5 @@
+#EXTRA CFLAGS
+ccflags-y += -Werror
+
+# Audio Comms
+obj-$(CONFIG_SND_SOC_COMMS_SSP) += mid_ssp.o
diff --git a/sound/soc/intel/ssp/mid_ssp.c b/sound/soc/intel/ssp/mid_ssp.c
new file mode 100644
index 0000000..3cd09d2
--- /dev/null
+++ b/sound/soc/intel/ssp/mid_ssp.c
@@ -0,0 +1,1436 @@
+/*
+ *	mid_ssp.c - ASoC CPU DAI driver for
+ *
+ *  Copyright (C) 2011-12 Intel Corp
+ *  Author: Selma Bensaid<selma.bensaid@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#define FORMAT(fmt) "%s: " fmt, __func__
+#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
+
+#include <linux/module.h>
+#include "mid_ssp.h"
+
+
+/*
+ * Default I2S configuration
+ */
+/*
+ * TO BE DONE: use mixer to make it more flexible
+ */
+const struct intel_mid_i2s_settings ssp_platform_i2s_config = {
+	.master_mode_clk_selection = SSP_MASTER_CLOCK_UNDEFINED,
+	.master_mode_standard_freq = 0xFFFF,
+	.tx_tristate_phase = TXD_TRISTATE_LAST_PHASE_OFF,
+	.slave_clk_free_running_status =
+			SLAVE_SSPCLK_ON_DURING_TRANSFER_ONLY,
+	.ssp_duplex_mode = RX_AND_TX_MODE,
+	.ssp_trailing_byte_mode = SSP_TRAILING_BYTE_HDL_BY_IA,
+	.ssp_tx_dma = SSP_TX_DMA_ENABLE,
+	.ssp_rx_dma = SSP_RX_DMA_ENABLE,
+	.rx_fifo_interrupt = SSP_RX_FIFO_OVER_INT_ENABLE,
+	.tx_fifo_interrupt = SSP_TX_FIFO_UNDER_INT_ENABLE,
+	.ssp_rx_timeout_interrupt_status = SSP_RX_TIMEOUT_INT_DISABLE,
+	.ssp_trailing_byte_interrupt_status =
+			SSP_TRAILING_BYTE_INT_ENABLE,
+	.ssp_loopback_mode_status = SSP_LOOPBACK_OFF,
+	.ssp_rx_fifo_threshold = MID_SSP_RX_FIFO_THRESHOLD,
+	.ssp_tx_fifo_threshold = MID_SSP_TX_FIFO_THRESHOLD,
+	.ssp_frmsync_pol_bit = SSP_FRMS_ACTIVE_HIGH,
+	.ssp_end_transfer_state =
+			SSP_END_DATA_TRANSFER_STATE_LOW,
+	.ssp_psp_T1 = 0,
+	.ssp_psp_T2 = 0,
+	.ssp_psp_T4 = 0,
+	.ssp_psp_T5 = 0,
+	.ssp_psp_T6 = 1,
+};
+
+/*
+ * SSP DAI Internal functions
+ */
+
+/**
+ * ssp_dma_req - This function programs a write or read request
+ * to the Intel I2S driver
+ *
+ * @param substream Pointer to stream structure
+ * return ret_val Status
+ */
+static int ssp_dma_req(struct snd_pcm_substream *substream)
+{
+
+	struct intel_alsa_ssp_stream_info *str_info;
+	struct intel_ssp_config *ssp_config;
+	struct snd_pcm_runtime *pl_runtime;
+	int ret;
+#ifdef _LLI_ENABLED_
+	struct intel_mid_i2s_lli *sg_table = NULL;
+	int i;
+#else
+	u32 *dma_addr;
+#endif /* _LLI_ENABLED_ */
+
+	WARN(!substream, "SSP DAI: "
+			"ERROR NULL substream\n");
+	if (!substream)
+		return -EINVAL;
+
+
+	pl_runtime = substream->runtime;
+
+	str_info = pl_runtime->private_data;
+
+	WARN(!str_info, "SSP DAI: "
+			"ERROR NULL str_info\n");
+	if (!str_info)
+		return -EINVAL;
+
+	ssp_config = str_info->ssp_config;
+
+	WARN(!ssp_config, "SSP DAI: "
+			"ERROR NULL ssp_config\n");
+	if (!ssp_config)
+		return -EINVAL;
+
+
+	WARN(!ssp_config->i2s_handle, "SSP DAI: "
+			"ERROR, trying to play a stream however "
+			"ssp_config->i2s_handle is NULL\n");
+
+	if (!ssp_config->i2s_handle)
+		return -EINVAL;
+
+#ifdef _LLI_ENABLED_
+	if (!test_bit(INTEL_ALSA_SSP_STREAM_STARTED,
+				&str_info->stream_status)) {
+		pr_err("%s: Stream has been stopped before SSP DMA request has been taken into account",
+			__func__);
+		return 0;
+	}
+
+	/* Will be executed once until next DAI shutdown */
+	if (!test_bit(INTEL_ALSA_SSP_STREAM_INIT,
+				  &str_info->stream_status)) {
+
+		str_info->length = frames_to_bytes(pl_runtime,
+						pl_runtime->period_size);
+
+		str_info->addr = substream->runtime->dma_area;
+
+		sg_table = kzalloc(sizeof(struct intel_mid_i2s_lli) *
+						   pl_runtime->periods,
+						   GFP_KERNEL);
+		if (sg_table == NULL) {
+			pr_err("sg_table allocation failed!");
+			return -EINVAL;
+		}
+
+		for (i = 0; i < pl_runtime->periods; i++) {
+			sg_table[i].addr = (u32 *) (str_info->addr +
+							str_info->length * i);
+			sg_table[i].leng = (u32) str_info->length;
+		}
+
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			ret = intel_mid_i2s_lli_wr_req(ssp_config->i2s_handle,
+							sg_table,
+							pl_runtime->periods,
+							I2S_CIRCULAR_MODE,
+							substream);
+		else
+			ret = intel_mid_i2s_lli_rd_req(ssp_config->i2s_handle,
+							sg_table,
+							pl_runtime->periods,
+							I2S_CIRCULAR_MODE,
+							substream);
+		kfree(sg_table);
+
+		if (ret != 0) {
+			pr_err("SSP DAI: %s request error",
+				(substream->stream ==
+				SNDRV_PCM_STREAM_PLAYBACK) ?
+					   "write" : "read");
+		}
+
+		set_bit(INTEL_ALSA_SSP_STREAM_INIT, &str_info->stream_status);
+
+		intel_mid_i2s_command(ssp_config->i2s_handle,
+					SSP_CMD_ENABLE_SSP, NULL);
+	}
+
+	/* Executed at each TRIGGER_START */
+	intel_mid_i2s_command(ssp_config->i2s_handle,
+			(substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+			SSP_CMD_ENABLE_DMA_TX_INTR :
+			SSP_CMD_ENABLE_DMA_RX_INTR, NULL);
+
+	return 0;
+#else
+	str_info->length = frames_to_bytes(pl_runtime, pl_runtime->period_size);
+
+	str_info->addr = substream->runtime->dma_area;
+	pr_debug("SSP DAI: FCT %s substream->runtime->dma_area = %p",
+		 __func__, substream->runtime->dma_area);
+
+	dma_addr = (u32 *)(str_info->addr + str_info->length
+			   * str_info->period_req_index);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		ret = intel_mid_i2s_wr_req(ssp_config->i2s_handle, dma_addr,
+					   str_info->length, substream);
+	else
+		ret = intel_mid_i2s_rd_req(ssp_config->i2s_handle, dma_addr,
+					   str_info->length, substream);
+
+	if (ret == 0) {
+		intel_mid_i2s_command(ssp_config->i2s_handle,
+				      SSP_CMD_ENABLE_SSP, NULL);
+
+		if (test_and_set_bit(INTEL_ALSA_SSP_STREAM_RUNNING,
+				     &str_info->stream_status)) {
+			pr_err("SSP DAI: ERROR previous request not handled\n");
+			return -EBUSY;
+		}
+
+		if (++(str_info->period_req_index) >= pl_runtime->periods)
+			str_info->period_req_index = 0;
+		return 0;
+	} else {
+		pr_err("SSP DAI: FCT %s read/write req ERROR\n", __func__);
+		return -EINVAL;
+	}
+#endif /* _LLI_ENABLED_ */
+} /* ssp_dma_req */
+
+/**
+ * ssp_dma_complete - End of capture or playback callback
+ * called in DMA Complete Tasklet context
+ * This Callback has in charge of re-programming a new read or write
+ * request to Intel MID I2S Driver if the stream has not been Closed.
+ * It calls also the snd_pcm_period_elapsed if the stream is not
+ * PAUSED or SUSPENDED to inform ALSA Kernel that the Ring Buffer
+ * period has been sent or received properly
+ *
+ * @param param Pointer to a user data
+ * return status
+ */
+static int ssp_dma_complete(void *param)
+{
+	struct snd_pcm_substream *substream;
+	struct intel_alsa_ssp_stream_info *str_info;
+	struct snd_pcm_runtime *pl_runtime;
+#ifndef _LLI_ENABLED_
+	bool call_back = false;
+	bool reset_index = false;
+#endif /* _LLI_ENABLED_ */
+
+	substream = (struct snd_pcm_substream *)param;
+	pl_runtime = substream->runtime;
+	str_info = substream->runtime->private_data;
+
+	WARN(!str_info, "SSP DAI: ERROR NULL str_info\n");
+	if (str_info == NULL)
+		return -EINVAL;
+
+#ifdef _LLI_ENABLED_
+	if (!test_bit(INTEL_ALSA_SSP_STREAM_INIT,
+		      &str_info->stream_status)) {
+		pr_err("Stream already not initialized");
+		return 0;
+	}
+
+	if (++(str_info->period_cb_index) >= pl_runtime->periods)
+		str_info->period_cb_index = 0;
+
+	if (test_bit(INTEL_ALSA_SSP_STREAM_STARTED, &str_info->stream_status))
+		snd_pcm_period_elapsed(substream);
+	else
+		pr_debug("No call to snd_period_elapsed, stream is not started");
+#else
+	if (test_and_clear_bit(INTEL_ALSA_SSP_STREAM_RUNNING,
+			       &str_info->stream_status)) {
+		bool dropped = test_and_clear_bit(INTEL_ALSA_SSP_STREAM_DROPPED,
+						  &str_info->stream_status);
+		bool started = test_bit(INTEL_ALSA_SSP_STREAM_STARTED,
+					&str_info->stream_status);
+
+		if (started) {
+			/*
+			 * Whatever dropped or not,
+			 * the stream is on going
+			 */
+			call_back = true;
+		}
+		if (started && dropped) {
+			/*
+			 * the stream has been dropped and restarted
+			 * before the callback occurs
+			 * in this case the we have to reprogram the
+			 * requests to SSP driver
+			 * and reset the stream's indexes
+			 */
+			reset_index = true;
+		}
+		if (!started && !dropped) {
+			pr_err("SSP DAI: FCT %s neither started nor dropped",
+			       __func__);
+			return -EBUSY;
+		}
+	} else {
+		pr_err("SSP DAI: FCT %s called while not running ", __func__);
+		return -EBUSY;
+	}
+
+	if (call_back == true) {
+		pr_debug("SSP DAI: playback/capture (REQ=%d,CB=%d): DMA_REQ_COMPLETE\n",
+			 str_info->period_req_index,
+			 str_info->period_cb_index);
+
+		if (reset_index) {
+			str_info->period_cb_index = 0;
+			str_info->period_req_index = 0;
+		} else if (++(str_info->period_cb_index) >= pl_runtime->periods)
+			str_info->period_cb_index = 0;
+
+		/*
+		 * Launch the next Capture/Playback request if
+		 * no CLOSE has been requested
+		 */
+		ssp_dma_req(substream);
+
+		/*
+		 * Call the snd_pcm_period_elapsed to inform ALSA kernel
+		 * that a ringbuffer period has been played
+		 */
+		snd_pcm_period_elapsed(substream);
+	}
+#endif /* _LLI_ENABLED_ */
+
+	return 0;
+} /* ssp_dma_complete */
+
+/**
+ * intel_mid_ssp_transfer_data - send data buffers
+ *
+ * @param work Pointer to stream structure
+ * return void
+ */
+void intel_mid_ssp_transfer_data(struct work_struct *work)
+{
+	struct intel_alsa_ssp_stream_info *str_info;
+	struct snd_pcm_substream *substream;
+
+	BUG_ON(!work);
+
+	str_info = container_of(work, struct intel_alsa_ssp_stream_info,
+			ssp_ws);
+
+	BUG_ON(!str_info);
+
+	substream = str_info->substream;
+
+	BUG_ON(!substream);
+
+	ssp_dma_req(substream);
+
+} /* intel_mid_ssp_transfer_data */
+
+/*
+ * SSP PLATFORM
+ */
+
+/*
+ * SSP Platform functions
+ */
+static int ssp_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
+{
+	int retval = 0;
+	struct snd_soc_dai *dai;
+	struct snd_pcm *pcm;
+
+	pr_debug("SSP DAI: FCT %s enters\n",
+			__func__);
+	/*
+	 * Do pre-allocation to all substreams of the given pcm for the
+	 * specified DMA type.
+	 *
+	 */
+	dai = soc_runtime->cpu_dai;
+	pcm = soc_runtime->pcm;
+
+	if (dai->driver->playback.channels_min ||
+			dai->driver->capture.channels_min) {
+		retval =  snd_pcm_lib_preallocate_pages_for_all(pcm,
+			SNDRV_DMA_TYPE_CONTINUOUS,
+			snd_dma_continuous_data(GFP_KERNEL),
+			SSP_MIN_BUFFER, SSP_MAX_BUFFER);
+
+		if (retval) {
+			pr_err("DMA buffer allocation fail\n");
+			return retval;
+		}
+	}
+	return retval;
+} /* ssp_platform_pcm_new */
+
+static void ssp_platform_pcm_free(struct snd_pcm *pcm)
+{
+	pr_debug("SSP DAI: FCT %s enter\n",
+			__func__);
+	/*
+	 * release all pre-allocated buffers on the pcm
+	 *
+	 */
+	snd_pcm_lib_preallocate_free_for_all(pcm);
+
+} /* ssp_platform_pcm_free */
+
+/**
+ * ssp_platform_hw_params - Allocate memory for Ring Buffer according
+ * to hw_params.
+ * It's called in a non-atomic context
+ *
+ * @param substream Substream for which the stream function is called
+ * @param hw_params Stream command thats requested from upper layer
+ * return status 0 ==> OK
+ *
+ */
+static int ssp_platform_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *hw_params)
+{
+	int ret_val;
+
+	/*
+	 * Allocates the DMA buffer for the substream
+	 * This callback could be called several time
+	 * snd_pcm_lib_malloc_pages allows to avoid memory leak
+	 * as it release already allocated memory when already allocated
+	 */
+	ret_val = snd_pcm_lib_malloc_pages(substream,
+			params_buffer_bytes(hw_params));
+
+	if (ret_val < 0)
+		return ret_val;
+
+	memset(substream->runtime->dma_area, 0, params_buffer_bytes(hw_params));
+
+	return 0;
+} /* ssp_platform_hw_params */
+
+/*
+ * ssp_platform_pointer- to send the current buffer pointer
+ * processed by HW
+ * This function is called by ALSA framework to get the current HW buffer ptr
+ * to check the Ring Buffer Status
+ *
+ * @param substream Pointer to the substream for which the function
+ *		is called
+ *
+ * return pcm_pointer Indicates the number of samples played
+ *
+ */
+static
+snd_pcm_uframes_t ssp_platform_pointer(struct snd_pcm_substream *substream)
+{
+	struct intel_alsa_ssp_stream_info *str_info;
+	unsigned long pcm_pointer = 0;
+
+	str_info = substream->runtime->private_data;
+
+	WARN(!str_info, "SSP DAI: ERROR NULL str_info\n");
+	if (!str_info)
+		return -EINVAL;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		pcm_pointer = (unsigned long) (str_info->period_cb_index
+				* substream->runtime->period_size);
+	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		pcm_pointer = (unsigned long) (str_info->period_cb_index
+				* substream->runtime->period_size);
+
+	pr_debug("SSP DAI: FCT %s Frame bits = %d, period_size = %d,  periods = %d\n",
+		 __func__,
+		 (int) substream->runtime->frame_bits,
+		 (int) substream->runtime->period_size,
+		 (int) substream->runtime->periods);
+
+	pr_debug("SSP DAI: FCT %s returns %ld\n",
+			__func__, pcm_pointer);
+
+	return pcm_pointer;
+} /* ssp_platform_pointer */
+
+static struct snd_pcm_ops ssp_platform_ops = {
+	.open = NULL,
+	.close = NULL,
+	.ioctl = snd_pcm_lib_ioctl,
+	.hw_params = ssp_platform_hw_params,
+	.hw_free = NULL,
+	.prepare = NULL,
+	.trigger = NULL,
+	.pointer = ssp_platform_pointer,
+};
+
+struct snd_soc_platform_driver soc_ssp_platform_drv = {
+	.ops		= &ssp_platform_ops,
+	.probe		= NULL,
+	.pcm_new	= ssp_platform_pcm_new,
+	.pcm_free	= ssp_platform_pcm_free,
+};
+
+/*
+ * SND SOC DAI OPs
+ */
+static int ssp_probe(struct snd_soc_dai *cpu_dai)
+{
+	struct intel_ssp_config *ssp_config;
+
+	pr_info("SSP DAI: FCT %s enters for CPU_DAI %d\n",
+			__func__, cpu_dai->id);
+
+	ssp_config = kzalloc(sizeof(struct intel_ssp_config), GFP_KERNEL);
+
+
+	if (ssp_config == NULL) {
+		pr_err("Unable to allocate ssp_config\n");
+		return -ENOMEM;
+	}
+
+#ifndef _LLI_ENABLED_
+	ssp_config->intel_mid_dma_alloc = false;
+#endif /* _LLI_ENABLED_ */
+	ssp_config->ssp_dai_tx_allocated = false;
+	ssp_config->ssp_dai_rx_allocated = false;
+
+	ssp_config->i2s_settings = ssp_platform_i2s_config;
+	pr_info("SSP DAI: FCT %s ssp_config %p\n",
+				__func__, ssp_config);
+
+	cpu_dai->playback_dma_data = cpu_dai->capture_dma_data = ssp_config;
+
+	return 0;
+
+} /* ssp_probe */
+
+static int ssp_remove(struct snd_soc_dai *cpu_dai)
+{
+	struct intel_ssp_config *ssp_config;
+
+	WARN(!cpu_dai, "SSP DAI: "
+			"ERROR NULL cpu_dai\n");
+	if (!cpu_dai)
+		return -EINVAL;
+
+	ssp_config = cpu_dai->playback_dma_data;
+
+	kfree(ssp_config);
+
+	return 0;
+} /* ssp_remove */
+
+static int ssp_dai_startup(struct snd_pcm_substream *substream,
+			   struct snd_soc_dai *cpu_dai)
+{
+	struct intel_ssp_config *ssp_config;
+	struct snd_pcm_runtime *pl_runtime;
+	struct intel_alsa_ssp_stream_info *str_info;
+	struct intel_ssp_info *ssp_info;
+	struct snd_soc_dai_driver *cpudai_drv = cpu_dai->driver;
+	unsigned int device;
+	int ret = 0;
+
+	WARN(!cpu_dai->driver, "SSP DAI: "
+			"FCT %s ERROR NULL cpu_dai->driver\n",
+			__func__);
+	if (!cpu_dai->driver)
+		return -EINVAL;
+
+	pr_info("SSP DAI: FCT %s enters for DAI Id = %d\n",
+			__func__, cpu_dai->driver->id);
+
+	ssp_info = dev_get_drvdata(cpu_dai->dev);
+
+	WARN(!ssp_info, "SSP DAI: ERROR NULL ssp_info\n");
+	if (!ssp_info)
+		return -EINVAL;
+
+	pl_runtime = substream->runtime;
+
+	ssp_config = snd_soc_dai_get_dma_data(cpu_dai, substream);
+
+	WARN(!ssp_config, "SSP DAI: "
+			"FCT %s ERROR NULL ssp_config\n",
+			__func__);
+	if (!ssp_config)
+		return -EINVAL;
+
+
+	device = cpu_dai->driver->id;
+
+	/*
+	 * setup the internal data structure stream pointers based on it being
+	 * playback or capture stream
+	 */
+	str_info = kzalloc(sizeof(*str_info), GFP_KERNEL);
+
+	if (!str_info) {
+		pr_err("SSP DAI: str_info alloc failure\n");
+		return -EINVAL;
+
+	}
+	str_info->substream = substream;
+	str_info->ssp_config = ssp_config;
+	str_info->stream_status = 0;
+
+	INIT_WORK(&str_info->ssp_ws, intel_mid_ssp_transfer_data);
+
+	/*
+	 * Initialize SSPx [x=0,1] driver
+	 * Store the Stream information
+	 */
+	pl_runtime->private_data = str_info;
+
+	pr_debug("SSP DAI: FCT %s enters cpu_dai->card->name = %s\n",
+			__func__, cpu_dai->card->name);
+
+	if (!cpu_dai->active) {
+		if (!strcmp(cpudai_drv->name, SSP_BT_DAI_NAME)) {
+			ssp_config->i2s_handle =
+				intel_mid_i2s_open(SSP_USAGE_BLUETOOTH_FM);
+			pr_debug("opening the CPU_DAI for "\
+					"SSP_USAGE_BLUETOOTH_FM, i2s_handle = %p\n",
+					ssp_config->i2s_handle);
+
+		} else if (!strcmp(cpudai_drv->name, SSP_MODEM_DAI_NAME)) {
+			ssp_config->i2s_handle =
+				intel_mid_i2s_open(SSP_USAGE_MODEM);
+			pr_debug("opening the CPU_DAI for "\
+					"SSP_USAGE_MODEM, i2s_handle = %p\n",
+					ssp_config->i2s_handle);
+
+		} else {
+			pr_err("non Valid SOC CARD\n");
+			return -EINVAL;
+		}
+
+		/* Set the Write Callback */
+		ret = intel_mid_i2s_set_wr_cb(ssp_config->i2s_handle,
+				ssp_dma_complete);
+		if (ret)
+			return ret;
+
+		/* Set the Default Read Callback */
+		ret = intel_mid_i2s_set_rd_cb(ssp_config->i2s_handle,
+				ssp_dma_complete);
+		if (ret)
+			return ret;
+
+	} else {
+		/*
+		 * do nothing because already Open by sibling substream
+		 */
+		pr_debug("SSP DAI: FCT %s Open DO NOTHING\n",
+				__func__);
+	}
+	return 0;
+} /* ssp_dai_startup */
+
+static void ssp_dai_shutdown(struct snd_pcm_substream *substream,
+			     struct snd_soc_dai *cpu_dai)
+{
+	struct intel_ssp_config *ssp_config;
+	struct intel_alsa_ssp_stream_info *str_info;
+	struct snd_pcm_runtime *runtime;
+	struct intel_ssp_info *ssp_info;
+
+	ssp_config = snd_soc_dai_get_dma_data(cpu_dai, substream);
+
+	BUG_ON(!ssp_config);
+
+	runtime = substream->runtime;
+	BUG_ON(!runtime->private_data);
+
+	str_info = runtime->private_data;
+	BUG_ON(!str_info);
+
+	ssp_info = dev_get_drvdata(cpu_dai->dev);
+	BUG_ON(!ssp_info);
+
+	/* Cancel pending work */
+	cancel_work_sync(&str_info->ssp_ws);
+
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		/*
+		 * Only Free Tx channel if no playback streams are active
+		 * Shutdown can be called right after a startup if something
+		 * failed (as a concurrency issue
+		 * so this case can happen
+		 */
+		if ((!cpu_dai->playback_active) &&
+			(ssp_config->i2s_settings.ssp_active_tx_slots_map)) {
+			intel_mid_i2s_command(ssp_config->i2s_handle,
+					SSP_CMD_FREE_TX, NULL);
+			pr_debug("SSP DAI: FCT %s TX DMA Channel released\n",
+					__func__);
+		}
+		ssp_config->ssp_dai_tx_allocated = false;
+		break;
+
+	case SNDRV_PCM_STREAM_CAPTURE:
+		/*
+		 * Only Free Rx channel if no capture streams are active
+		 */
+		if ((!cpu_dai->capture_active) &&
+			(ssp_config->i2s_settings.ssp_active_rx_slots_map)) {
+			intel_mid_i2s_command(ssp_config->i2s_handle,
+					SSP_CMD_FREE_RX, NULL);
+			pr_debug("SSP DAI: FCT %s RX DMA Channel released\n",
+					__func__);
+		}
+		ssp_config->ssp_dai_rx_allocated = false;
+		break;
+
+	default:
+		pr_err("SSP DAI: FCT %s Bad stream_dir: %d\n",
+				__func__, substream->stream);
+		break;
+	}
+
+#ifdef _LLI_ENABLED_
+	clear_bit(INTEL_ALSA_SSP_STREAM_INIT, &str_info->stream_status);
+#endif /* _LLI_ENABLED_ */
+
+	kfree(str_info);
+
+	if (!cpu_dai->active) {
+		pr_info("SSP DAI: FCT %s closing I2S\n",
+				__func__);
+		/*
+		 * Close the Intel MID I2S connection
+		 */
+		intel_mid_i2s_close(ssp_config->i2s_handle);
+
+		ssp_config->i2s_handle = NULL;
+#ifndef _LLI_ENABLED_
+		ssp_config->intel_mid_dma_alloc = false;
+#endif /* _LLI_ENABLED_ */
+	}
+
+} /* ssp_dai_shutdown */
+
+static int ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+		unsigned int fmt)
+{
+	struct intel_ssp_config *ssp_config;
+	struct intel_mid_i2s_settings *i2s_config;
+
+	ssp_config = cpu_dai->playback_dma_data;
+
+	WARN(!ssp_config, "SSP DAI: FCT %s ssp_config=NULL\n",
+			__func__);
+	if (!ssp_config)
+		return -EINVAL;
+
+	pr_debug("SSP DAI: FCT %s fmt = %d\n",
+			__func__, fmt);
+
+	i2s_config = &(ssp_config->i2s_settings);
+
+	/*
+	 * SSP CLK Direction
+	 * SSP FRMSYNC Direction
+	 */
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		i2s_config->sspslclk_direction = SSPSCLK_MASTER_MODE;
+		i2s_config->sspsfrm_direction = SSPSCLK_MASTER_MODE;
+		/*
+		 * Mandatory to be able to perform only RX without TX
+		 * in SSP CLK Master Mode
+		 *
+		 */
+		i2s_config->ssp_duplex_mode = RX_WITHOUT_TX_MODE;
+		break;
+	case SND_SOC_DAIFMT_CBM_CFS:
+		i2s_config->sspslclk_direction = SSPSCLK_MASTER_MODE;
+		i2s_config->sspsfrm_direction = SSPSCLK_SLAVE_MODE;
+		/*
+		 * Mandatory to be able to perform only RX without TX
+		 * in SSP CLK Master Mode
+		 *
+		 */
+		i2s_config->ssp_duplex_mode = RX_WITHOUT_TX_MODE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		i2s_config->sspslclk_direction = SSPSCLK_SLAVE_MODE;
+		i2s_config->sspsfrm_direction = SSPSCLK_SLAVE_MODE;
+		i2s_config->ssp_duplex_mode = RX_AND_TX_MODE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFM:
+		i2s_config->sspslclk_direction = SSPSCLK_SLAVE_MODE;
+		i2s_config->sspsfrm_direction = SSPSCLK_MASTER_MODE;
+		i2s_config->ssp_duplex_mode = RX_AND_TX_MODE;
+		break;
+	default:
+		pr_err("SSP DAI: %s Bad DAI CLK/FS Mode=%d\n",
+					__func__,
+					(fmt & SND_SOC_DAIFMT_MASTER_MASK));
+		return -EINVAL;
+	}
+	/*
+	 * SSP Sgnal Inversion Mode
+	 * Use clock gating bitfield for
+	 * Serial bit-rate Clock Mode
+	 */
+	switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) {
+	case SSP_DAI_SCMODE_0:
+		i2s_config->ssp_serial_clk_mode = SSP_CLK_MODE_0;
+		break;
+	case SSP_DAI_SCMODE_1:
+		i2s_config->ssp_serial_clk_mode = SSP_CLK_MODE_1;
+		break;
+	case SSP_DAI_SCMODE_2:
+		i2s_config->ssp_serial_clk_mode = SSP_CLK_MODE_2;
+		break;
+	case SSP_DAI_SCMODE_3:
+		i2s_config->ssp_serial_clk_mode = SSP_CLK_MODE_3;
+		break;
+	default:
+		pr_err("SSP DAI: %s Bad DAI Signal Inversion Mode=%d\n",
+				__func__,
+				(fmt & SND_SOC_DAIFMT_INV_MASK));
+		return -EINVAL;
+	}
+
+	/*
+	 * SSP FS Inversion Mode
+	 */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+	case SND_SOC_DAIFMT_IB_NF:
+		i2s_config->ssp_frmsync_pol_bit = SSP_FRMS_ACTIVE_HIGH;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+	case SND_SOC_DAIFMT_IB_IF:
+		i2s_config->ssp_frmsync_pol_bit = SSP_FRMS_ACTIVE_LOW;
+		break;
+	default:
+		pr_err("SSP DAI: %s Bad DAI FS Inversion Mode=%d\n",
+				__func__,
+				(fmt & SND_SOC_DAIFMT_INV_MASK));
+		return -EINVAL;
+	}
+
+	/*
+	 * SSP Format Mode
+	 */
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		i2s_config->frame_format = PSP_FORMAT;
+		break;
+
+	default:
+		pr_err("SSP DAI: %s Bad DAI format Mode=%d\n",
+				__func__,
+				(fmt & SND_SOC_DAIFMT_FORMAT_MASK));
+		return -EINVAL;
+	}
+	return 0;
+} /* ssp_set_dai_fmt */
+
+static int ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
+	unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
+{
+	struct intel_ssp_config *ssp_config;
+	struct intel_mid_i2s_settings *i2s_config;
+
+	ssp_config = cpu_dai->playback_dma_data;
+
+	WARN(!ssp_config, "SSP DAI: FCT %s ssp_config=NULL\n",
+			__func__);
+	if (!ssp_config)
+		return -EINVAL;
+
+
+	i2s_config = &(ssp_config->i2s_settings);
+
+	i2s_config->frame_rate_divider_control = slots;
+	i2s_config->data_size = slot_width;
+	i2s_config->mode = SSP_IN_NETWORK_MODE;
+	i2s_config->ssp_active_tx_slots_map = tx_mask;
+	i2s_config->ssp_active_rx_slots_map = rx_mask;
+
+	pr_debug("i2s_config->frame_rate_divider_control = %d\n",
+			i2s_config->frame_rate_divider_control);
+	pr_debug("i2s_config->data_size = %d\n",
+			i2s_config->data_size);
+	pr_debug("i2s_config->mode = %d\n",
+			i2s_config->mode);
+	pr_debug("i2s_config->ssp_active_tx_slots_map = %d\n",
+			i2s_config->ssp_active_tx_slots_map);
+	pr_debug("i2s_config->ssp_active_rx_slots_map = %d\n",
+			i2s_config->ssp_active_rx_slots_map);
+
+	return 0;
+}
+
+static int ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
+	int clk_id, unsigned int freq, int dir)
+{
+	struct intel_ssp_config *ssp_config;
+	struct intel_mid_i2s_settings *i2s_config;
+
+	ssp_config = cpu_dai->playback_dma_data;
+
+	BUG_ON(!ssp_config);
+
+	i2s_config = &(ssp_config->i2s_settings);
+
+	pr_debug("SSP DAI: FCT %s clk_id = %d\n",
+			__func__, clk_id);
+
+	switch (clk_id) {
+	case SSP_CLK_ONCHIP:
+		i2s_config->master_mode_clk_selection = SSP_ONCHIP_CLOCK;
+		break;
+	case SSP_CLK_NET:
+		i2s_config->master_mode_clk_selection = SSP_NETWORK_CLOCK;
+		break;
+	case SSP_CLK_EXT:
+		i2s_config->master_mode_clk_selection = SSP_EXTERNAL_CLOCK;
+		break;
+	case SSP_CLK_AUDIO:
+		i2s_config->master_mode_clk_selection = SSP_ONCHIP_AUDIO_CLOCK;
+		break;
+	default:
+		i2s_config->master_mode_standard_freq =
+						SSP_MASTER_CLOCK_UNDEFINED;
+		pr_err("SSP DAI: %s Bad clk_id=%d\n",
+				__func__,
+				clk_id);
+		return -EINVAL;
+	}
+
+	pr_debug("SSP DAI:FCT %s freq = %d\n",
+				__func__, freq);
+
+	switch (freq) {
+	case 8000:
+		i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_8_000;
+		i2s_config->ssp_psp_T1 = 0;
+		i2s_config->ssp_psp_T2 = 1;
+		i2s_config->ssp_psp_T4 = 0;
+		i2s_config->ssp_psp_T5 = 0;
+		i2s_config->ssp_psp_T6 = 1;
+		break;
+
+	case 11025:
+		i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_11_025;
+		pr_err("SSP DAI: %s Bad freq_out=%d\n",
+						__func__,
+						freq);
+		return -EINVAL;
+
+	case 16000:
+		i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_16_000;
+		i2s_config->ssp_psp_T1 = 6;
+		i2s_config->ssp_psp_T2 = 2;
+		i2s_config->ssp_psp_T4 = 0;
+		i2s_config->ssp_psp_T5 = 14;
+		i2s_config->ssp_psp_T6 = 16;
+		break;
+
+	case 22050:
+		i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_22_050;
+		pr_err("SSP DAI: %s Bad freq_out=%d\n",
+						__func__,
+						freq);
+		return -EINVAL;
+
+	case 44100:
+		i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_44_100;
+		pr_err("SSP DAI: %s Bad freq_out=%d\n",
+						__func__,
+						freq);
+		return -EINVAL;
+
+	case 48000:
+		i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_48_000;
+		i2s_config->ssp_psp_T1 = 6;
+		i2s_config->ssp_psp_T2 = 2;
+		i2s_config->ssp_psp_T4 = 0;
+		i2s_config->ssp_psp_T5 = 14;
+		i2s_config->ssp_psp_T6 = 16;
+		break;
+
+	default:
+		i2s_config->master_mode_standard_freq = SSP_FRM_FREQ_UNDEFINED;
+		pr_err("SSP DAI: %s Bad freq_out=%d\n",
+				__func__,
+				freq);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ssp_set_dai_tristate(struct snd_soc_dai *cpu_dai,
+	int tristate)
+{
+	struct intel_ssp_config *ssp_config;
+	struct intel_mid_i2s_settings *i2s_config;
+
+	ssp_config = cpu_dai->playback_dma_data;
+
+	BUG_ON(!ssp_config);
+
+	i2s_config = &(ssp_config->i2s_settings);
+
+	if (IS_TRISTATE_ENABLED(tristate))
+		i2s_config->tx_tristate_enable = TXD_TRISTATE_ON;
+	else
+		i2s_config->tx_tristate_enable = TXD_TRISTATE_OFF;
+
+	if (IS_NEXT_FRMS_ASSERTED_WITH_LSB_PREVIOUS_FRM(tristate))
+		i2s_config->ssp_frmsync_timing_bit =
+				NEXT_FRMS_ASS_WITH_LSB_PREVIOUS_FRM;
+	else
+		i2s_config->ssp_frmsync_timing_bit =
+				NEXT_FRMS_ASS_AFTER_END_OF_T4;
+
+	pr_debug("FCT %s tristate %x\n", __func__, tristate);
+
+	return 0;
+}
+
+/**
+ * ssp_dai_trigger- stream activities are handled here
+ * This function is called whenever a stream activity is invoked
+ * The Trigger function is called in an atomic context
+ *
+ * @param substream Substream for which the stream function is called
+ * @param cmd The stream command thats requested from upper layer
+ * return status 0 ==> OK
+ *
+ */
+static int ssp_dai_trigger(struct snd_pcm_substream *substream,
+		int cmd, struct snd_soc_dai *cpu_dai)
+{
+	int ret_val = 0;
+	struct intel_alsa_ssp_stream_info *str_info;
+	struct snd_pcm_runtime *pl_runtime;
+	struct intel_ssp_info *ssp_info;
+#ifdef _LLI_ENABLED_
+	struct intel_ssp_config *ssp_config;
+#endif /* _LLI_ENABLED_ */
+
+	bool trigger_start = true;
+	int stream = 0;
+
+	pr_debug("SSP DAI: FCT %s enters\n",
+			__func__);
+
+	stream = substream->stream;
+
+
+	pl_runtime = substream->runtime;
+
+	WARN(!pl_runtime->private_data, "SSP DAI: ERROR "
+			"NULL pl_runtime->private_data\n");
+	if (!pl_runtime->private_data)
+		return -EINVAL;
+
+	WARN(!cpu_dai, "SSP DAI: ERROR NULL cpu_dai\n");
+	if (!cpu_dai)
+		return -EINVAL;
+
+	WARN(!cpu_dai->dev, "SSP DAI: ERROR NULL cpu_dai->dev\n");
+	if (!cpu_dai->dev)
+		return -EINVAL;
+
+	ssp_info = dev_get_drvdata(cpu_dai->dev);
+
+
+	WARN(!ssp_info->ssp_dai_wq, "SSP DAI: ERROR NULL ssp_dai_wq\n");
+	if (!ssp_info->ssp_dai_wq)
+		return -EINVAL;
+
+	str_info = pl_runtime->private_data;
+
+#ifdef _LLI_ENABLED_
+	ssp_config = str_info->ssp_config;
+
+	WARN(!ssp_config, "SSP DAI: ERROR NULL ssp_config\n");
+	if (!ssp_config)
+		return -EINVAL;
+#endif /* _LLI_ENABLED_ */
+
+	pr_debug("SSP DAI: FCT %s CMD = 0x%04X\n",
+			__func__, cmd);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		if (!test_and_set_bit(INTEL_ALSA_SSP_STREAM_STARTED,
+				      &str_info->stream_status)) {
+#ifndef _LLI_ENABLED_
+			if (test_bit(INTEL_ALSA_SSP_STREAM_DROPPED,
+				     &str_info->stream_status)) {
+				pr_debug("SSP DAI: FCT %s do not restart the trigger stream running already\n",
+					 __func__);
+				trigger_start = false;
+			} else
+#endif /* _LLI_ENABLED_ */
+				trigger_start = true;
+		} else {
+			pr_err("SSP DAI: ERROR 2 consecutive TRIGGER_START\n");
+			return -EBUSY;
+		}
+
+		/* Store the substream locally */
+		if (trigger_start) {
+			if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+
+				pr_debug("SSP DAI: queue Playback Work\n");
+				queue_work(ssp_info->ssp_dai_wq,
+						&str_info->ssp_ws);
+			} else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
+
+				pr_debug("SSP DAI: queue Capture Work\n");
+				queue_work(ssp_info->ssp_dai_wq,
+						&str_info->ssp_ws);
+			} else {
+				pr_err("SSP DAI: SNDRV_PCM_TRIGGER_START Bad Stream: %d\n",
+						substream->stream);
+				return -EINVAL;
+			}
+		}
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		if (test_and_clear_bit(INTEL_ALSA_SSP_STREAM_STARTED,
+				       &str_info->stream_status)) {
+#ifdef _LLI_ENABLED_
+			intel_mid_i2s_command(ssp_config->i2s_handle,
+					      (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+					      SSP_CMD_DISABLE_DMA_TX_INTR :
+					      SSP_CMD_DISABLE_DMA_RX_INTR,
+					      NULL);
+#else
+			set_bit(INTEL_ALSA_SSP_STREAM_DROPPED,
+				&str_info->stream_status);
+#endif /* _LLI_ENABLED_ */
+		} else {
+			pr_err("SSP DAI: trigger START/STOP mismatch\n");
+			return -EBUSY;
+		}
+		break;
+
+	default:
+		pr_err("SSP DAI: snd_i2s_alsa_pcm_trigger Bad Command\n");
+		return -EINVAL;
+	}
+	return ret_val;
+} /* ssp_dai_trigger */
+
+/**
+ * ssp_dai_hw_params - Allocate memory for Ring Buffer according
+ * to hw_params.
+ * It's called in a non-atomic context
+ *
+ * @param substream Substream for which the stream function is called
+ * @param hw_params Stream command thats requested from upper layer
+ * @param cpu_dai Pointer to the CPU DAI that is used
+ * return status 0 ==> OK
+ *
+ */
+static int ssp_dai_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *hw_params,
+		struct snd_soc_dai *cpu_dai)
+{
+
+	return 0;
+}
+
+static int ssp_dai_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *cpu_dai)
+{
+	struct intel_ssp_config *ssp_config;
+	struct intel_ssp_info *ssp_info;
+
+	pr_debug("SSP DAI: FCT %s enters\n",
+				__func__);
+
+	WARN(!cpu_dai, "SSP DAI: ERROR NULL cpu_dai\n");
+	if (!cpu_dai)
+		return -EINVAL;
+
+	ssp_info = dev_get_drvdata(cpu_dai->dev);
+	WARN(!ssp_info, "SSP DAI: ERROR NULL ssp_info\n");
+	if (!ssp_info)
+		return -EINVAL;
+
+	ssp_config = snd_soc_dai_get_dma_data(cpu_dai, substream);
+	pr_debug("SSP DAI: FCT %s ssp_dai_tx_allocated %d "\
+			"ssp_dai_rx_allocated %d\n",
+			__func__,
+			ssp_config->ssp_dai_tx_allocated,
+			ssp_config->ssp_dai_rx_allocated);
+
+	/*
+	 * The set HW Config is only once for a CPU DAI
+	 */
+
+	if (!ssp_config->ssp_dai_tx_allocated &&
+			!ssp_config->ssp_dai_rx_allocated) {
+		intel_mid_i2s_command(ssp_config->i2s_handle,
+						SSP_CMD_SET_HW_CONFIG,
+						&(ssp_config->i2s_settings));
+	}
+
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		if (!ssp_config->ssp_dai_tx_allocated) {
+			if (intel_mid_i2s_command(ssp_config->i2s_handle,
+					SSP_CMD_ALLOC_TX, NULL)) {
+				pr_err("can not alloc TX DMA Channel\n");
+				return -EBUSY;
+			}
+			ssp_config->ssp_dai_tx_allocated = true;
+		}
+		break;
+
+	case SNDRV_PCM_STREAM_CAPTURE:
+		if (!ssp_config->ssp_dai_rx_allocated) {
+			if (intel_mid_i2s_command(ssp_config->i2s_handle,
+					SSP_CMD_ALLOC_RX, NULL)) {
+				pr_err("can not alloc RX DMA Channel\n");
+				return -EBUSY;
+			}
+			ssp_config->ssp_dai_rx_allocated = true;
+		}
+		break;
+
+	default:
+		pr_err("SSP DAI: FCT %s Bad stream_dir: %d\n",
+				__func__, substream->stream);
+		return -EINVAL;
+	}
+
+#ifndef _LLI_ENABLED_
+	ssp_config->intel_mid_dma_alloc = true;
+#endif /* _LLI_ENABLED_ */
+
+	pr_debug("SSP DAI: FCT %s leaves\n",
+			__func__);
+
+	return 0;
+}
+
+
+
+/* BT/FM */
+static struct snd_soc_dai_ops ssp_dai_ops = {
+	.startup	= ssp_dai_startup,
+	.shutdown	= ssp_dai_shutdown,
+	.trigger	= ssp_dai_trigger,
+	.hw_params	= ssp_dai_hw_params,
+	.prepare    = ssp_dai_prepare,
+	.set_sysclk	= ssp_set_dai_sysclk,
+	.set_pll    = NULL,
+	.set_fmt	= ssp_set_dai_fmt,
+	.set_tdm_slot	= ssp_set_dai_tdm_slot,
+	.set_tristate	= ssp_set_dai_tristate,
+};
+
+#define SSP_SUPPORTED_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | \
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \
+			SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+
+#define SSP_SUPPORTED_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+			SNDRV_PCM_FMTBIT_U16_LE | \
+			SNDRV_PCM_FMTBIT_S8 | \
+			SNDRV_PCM_FMTBIT_U8)
+
+struct snd_soc_dai_driver intel_ssp_platform_dai[] = {
+{
+	.name = SSP_MODEM_DAI_NAME,
+	.id = 0,
+	.playback = {
+		.channels_min = 1,
+		.channels_max = 8,
+		.rates = SSP_SUPPORTED_RATES,
+		.formats = SSP_SUPPORTED_FORMATS,
+	},
+	.capture = {
+		.channels_min = 1,
+		.channels_max = 8,
+		.rates = SSP_SUPPORTED_RATES,
+		.formats = SSP_SUPPORTED_FORMATS,
+	},
+	.ops = &ssp_dai_ops,
+	.probe = ssp_probe,
+	.remove = ssp_remove,
+},
+{
+	.name = SSP_BT_DAI_NAME,
+	.id = 1,
+	.playback = {
+		.channels_min = 1,
+		.channels_max = 8,
+		.rates = SSP_SUPPORTED_RATES,
+		.formats = SSP_SUPPORTED_FORMATS,
+	},
+	.capture = {
+		.channels_min = 1,
+		.channels_max = 8,
+		.rates = SSP_SUPPORTED_RATES,
+		.formats = SSP_SUPPORTED_FORMATS,
+	},
+	.ops = &ssp_dai_ops,
+	.probe = ssp_probe,
+	.remove = ssp_remove,
+},
+};
+
+static const struct snd_soc_component_driver ssp_component = {
+	.name           = "ssp",
+};
+
+static int ssp_dai_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct intel_ssp_info *ssp_info;
+
+	pr_info("SSP DAI: FCT %s enters\n",
+			__func__);
+
+	ssp_info = kzalloc(sizeof(struct intel_ssp_info), GFP_KERNEL);
+
+	if (ssp_info == NULL) {
+		pr_err("Unable to allocate ssp_info\n");
+		return -ENOMEM;
+	}
+	pr_info("ssp_info address %p", ssp_info);
+
+	ret = snd_soc_register_platform(&pdev->dev,
+				&soc_ssp_platform_drv);
+	if (ret) {
+		pr_err("registering SSP PLATFORM failed\n");
+		snd_soc_unregister_component(&pdev->dev);
+		kfree(ssp_info);
+		return -EBUSY;
+	}
+
+	ret = snd_soc_register_component(&pdev->dev, &ssp_component,
+			intel_ssp_platform_dai,
+			ARRAY_SIZE(intel_ssp_platform_dai));
+
+	if (ret) {
+		pr_err("registering cpu DAIs failed\n");
+		snd_soc_unregister_component(&pdev->dev);
+		kfree(ssp_info);
+		return -EBUSY;
+	}
+
+	ssp_info->ssp_dai_wq = create_workqueue("ssp_transfer_data");
+
+	if (!ssp_info->ssp_dai_wq) {
+		pr_err("work queue failed\n");
+		snd_soc_unregister_component(&pdev->dev);
+		kfree(ssp_info);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, ssp_info);
+
+	pr_info("SSP DAI: FCT %s leaves %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int ssp_dai_remove(struct platform_device *pdev)
+{
+	struct intel_ssp_info *ssp_info = platform_get_drvdata(pdev);
+
+	pr_debug("SSP DAI: FCT %s enters\n",
+			__func__);
+
+	if (ssp_info == NULL) {
+		pr_err("Unable to allocate ssp_info\n");
+		return -ENOMEM;
+	}
+	pr_info("ssp_info address %p", ssp_info);
+
+	flush_workqueue(ssp_info->ssp_dai_wq);
+
+	destroy_workqueue(ssp_info->ssp_dai_wq);
+
+	platform_set_drvdata(pdev, NULL);
+
+	snd_soc_unregister_component(&pdev->dev);
+
+	snd_soc_unregister_platform(&pdev->dev);
+
+	pr_debug("SSP DAI: FCT %s leaves\n",
+			__func__);
+
+	return 0;
+}
+
+static struct platform_driver intel_ssp_dai_driver = {
+	.driver		= {
+		.name		= "mid-ssp-dai",
+		.owner		= THIS_MODULE,
+	},
+	.probe		= ssp_dai_probe,
+	.remove		= ssp_dai_remove,
+};
+
+
+static int __init ssp_soc_dai_init(void)
+{
+	pr_info("SSP DAI: FCT %s called\n",
+			__func__);
+
+	return  platform_driver_register(&intel_ssp_dai_driver);
+}
+module_init(ssp_soc_dai_init);
+
+static void __exit ssp_soc_dai_exit(void)
+{
+	pr_debug("SSP DAI: FCT %s called\n",
+			__func__);
+
+	platform_driver_unregister(&intel_ssp_dai_driver);
+
+}
+module_exit(ssp_soc_dai_exit);
+
+MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver");
+MODULE_AUTHOR("Selma Bensaid");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ssp-cpu-dai");
diff --git a/sound/soc/intel/ssp/mid_ssp.h b/sound/soc/intel/ssp/mid_ssp.h
new file mode 100644
index 0000000..4df4eac
--- /dev/null
+++ b/sound/soc/intel/ssp/mid_ssp.h
@@ -0,0 +1,124 @@
+/*
+ *	mfld_ssp.h - ASoC CPU DAI driver for
+ *
+ *  Copyright (C) 2011-12 Intel Corp
+ *  Authors:	Selma Bensaid <selma.bensaid@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#ifndef MID_SSP_H_
+#define MID_SSP_H_
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/info.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include <linux/intel_mid_i2s_if.h>
+
+#define SSP_MODEM_DAI_NAME "ssp-modem-cpu-dai"
+#define SSP_BT_DAI_NAME "ssp-bt-cpu-dai"
+
+#define SSP_MAX_BUFFER		(640*1024)
+#define SSP_MIN_BUFFER		(640*1024)
+
+#define TRISTATE_BIT			0
+#define FRAME_SYNC_RELATIVE_TIMING_BIT	1
+#define DUMMY_START_ONE_PERIOD_OFFSET	2
+#define DUMMY_START_ONE_PERIOD_MASK     0x3
+
+#define IS_TRISTATE_ENABLED(x) (x & BIT(TRISTATE_BIT))
+#define IS_NEXT_FRMS_ASSERTED_WITH_LSB_PREVIOUS_FRM(x) \
+			((x & BIT(FRAME_SYNC_RELATIVE_TIMING_BIT)) \
+					>> FRAME_SYNC_RELATIVE_TIMING_BIT)
+#define IS_DUMMY_START_ONE_PERIOD_OFFSET(x) \
+			((x >> DUMMY_START_ONE_PERIOD_OFFSET) \
+					& DUMMY_START_ONE_PERIOD_MASK)
+
+#define MID_SSP_RX_FIFO_THRESHOLD 8
+#define MID_SSP_TX_FIFO_THRESHOLD 7
+
+
+/* data driven FALLING, data sampled RISING, idle LOW */
+#define SSP_DAI_SCMODE_0        (1 << 4)
+/* data driven RISING, data sampled FALLING, idle LOW */
+#define SSP_DAI_SCMODE_1		(2 << 4)
+/* data driven RISING, data sampled FALLING, idle HIGH */
+#define SSP_DAI_SCMODE_2		(3 << 4)
+/* data driven FALLING, data sampled RISING, idle HIGH */
+#define SSP_DAI_SCMODE_3		(4 << 4)
+
+
+/*
+ * Structures Definition
+ */
+
+
+struct intel_ssp_config {
+	struct intel_mid_i2s_hdl *i2s_handle;
+	struct intel_mid_i2s_settings i2s_settings;
+#ifndef _LLI_ENABLED_
+	bool intel_mid_dma_alloc;
+#endif /* _LLI_ENABLED_ */
+	bool ssp_dai_tx_allocated;
+	bool ssp_dai_rx_allocated;
+};
+
+struct intel_ssp_info {
+	struct workqueue_struct *ssp_dai_wq;
+};
+
+struct intel_alsa_ssp_stream_info {
+	struct snd_pcm_substream *substream;
+	struct work_struct ssp_ws;
+	struct intel_ssp_config *ssp_config;
+	unsigned long stream_status;
+	u32 period_req_index;
+	s32 period_cb_index;
+	u8 *addr;
+	int length;
+};
+
+
+/*
+ * Enum Definition
+ */
+
+enum intel_alsa_ssp_stream_status {
+	INTEL_ALSA_SSP_STREAM_INIT = 0,
+	INTEL_ALSA_SSP_STREAM_STARTED,
+	INTEL_ALSA_SSP_STREAM_RUNNING,
+	INTEL_ALSA_SSP_STREAM_PAUSED,
+	INTEL_ALSA_SSP_STREAM_DROPPED,
+};
+enum ssp_clk_def {
+	SSP_CLK_ONCHIP = 0x0,
+	SSP_CLK_NET,
+	SSP_CLK_EXT,
+	SSP_CLK_AUDIO
+};
+
+
+#endif /* MID_SSP_H_ */
diff --git a/sound/soc/intel/sst/Makefile b/sound/soc/intel/sst/Makefile
new file mode 100644
index 0000000..3834491
--- /dev/null
+++ b/sound/soc/intel/sst/Makefile
@@ -0,0 +1,14 @@
+# Makefile for SST Audio driver
+snd-intel-sst-objs := sst.o sst_ipc.o sst_stream.o sst_drv_interface.o sst_dsp.o sst_pvt.o sst_app_interface.o sst_acpi.o
+
+ifdef CONFIG_DEBUG_FS
+	snd-intel-sst-objs += sst_debug.o
+endif
+
+obj-$(CONFIG_SND_INTEL_SST) += snd-intel-sst.o
+
+ccflags-y += -I$(src)                   # needed for trace events
+
+CFLAGS_snd-intel-sst.o = -I$(src)
+
+ccflags-y += -DMRFLD_WORD_WA -Werror
diff --git a/sound/soc/intel/sst/sst.c b/sound/soc/intel/sst/sst.c
new file mode 100644
index 0000000..0f59e47
--- /dev/null
+++ b/sound/soc/intel/sst/sst.c
@@ -0,0 +1,1255 @@
+/*
+ *  sst.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10	Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This driver enumerates the SST audio engine as a PCI or ACPI device and
+ *  provides interface to the platform driver to interact with the SST audio
+ *  Firmware.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/miscdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/async.h>
+#include <linux/lnw_gpio.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <asm/intel-mid.h>
+#include <asm/platform_sst_audio.h>
+#include <asm/platform_sst.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define CREATE_TRACE_POINTS
+#include "sst_trace.h"
+
+MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
+MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
+MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
+MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
+MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(SST_DRIVER_VERSION);
+
+struct intel_sst_drv *sst_drv_ctx;
+static struct mutex drv_ctx_lock;
+
+/*
+ *  * ioctl32 compat
+ *   */
+#ifdef CONFIG_COMPAT
+#include "sst_app_compat_interface.c"
+#else
+#define intel_sst_ioctl_compat NULL
+#endif
+
+static const struct file_operations intel_sst_fops_cntrl = {
+	.owner = THIS_MODULE,
+	.open = intel_sst_open_cntrl,
+	.release = intel_sst_release_cntrl,
+	.unlocked_ioctl = intel_sst_ioctl,
+	.compat_ioctl = intel_sst_ioctl_compat,
+};
+
+struct miscdevice lpe_ctrl = {
+	.minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
+	.name = "intel_sst_ctrl",/* /dev/intel_sst_ctrl */
+	.fops = &intel_sst_fops_cntrl
+};
+
+static inline void set_imr_interrupts(struct intel_sst_drv *ctx, bool enable)
+{
+	union interrupt_reg imr;
+
+	spin_lock(&ctx->ipc_spin_lock);
+	imr.full = sst_shim_read(ctx->shim, SST_IMRX);
+	if (enable) {
+		imr.part.done_interrupt = 0;
+		imr.part.busy_interrupt = 0;
+	} else {
+		imr.part.done_interrupt = 1;
+		imr.part.busy_interrupt = 1;
+	}
+	sst_shim_write(ctx->shim, SST_IMRX, imr.full);
+	spin_unlock(&ctx->ipc_spin_lock);
+}
+
+#define SST_IS_PROCESS_REPLY(header) ((header & PROCESS_MSG) ? true : false)
+#define SST_VALIDATE_MAILBOX_SIZE(size) ((size <= SST_MAILBOX_SIZE) ? true : false)
+
+static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context)
+{
+	union interrupt_reg_mrfld isr;
+	union ipc_header_mrfld header;
+	union sst_imr_reg_mrfld imr;
+	struct ipc_post *msg = NULL;
+	unsigned int size = 0;
+	struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+	irqreturn_t retval = IRQ_HANDLED;
+
+	/* Interrupt arrived, check src */
+	isr.full = sst_shim_read64(drv->shim, SST_ISRX);
+	if (isr.part.done_interrupt) {
+		/* Clear done bit */
+		spin_lock(&drv->ipc_spin_lock);
+		header.full = sst_shim_read64(drv->shim,
+					drv->ipc_reg.ipcx);
+		header.p.header_high.part.done = 0;
+		sst_shim_write64(drv->shim, drv->ipc_reg.ipcx, header.full);
+		/* write 1 to clear status register */;
+		isr.part.done_interrupt = 1;
+		sst_shim_write64(drv->shim, SST_ISRX, isr.full);
+		spin_unlock(&drv->ipc_spin_lock);
+		trace_sst_ipc("ACK   <-", header.p.header_high.full,
+					  header.p.header_low_payload,
+					  header.p.header_high.part.drv_id);
+		queue_work(drv->post_msg_wq, &drv->ipc_post_msg.wq);
+		retval = IRQ_HANDLED;
+	}
+	if (isr.part.busy_interrupt) {
+		spin_lock(&drv->ipc_spin_lock);
+		imr.full = sst_shim_read64(drv->shim, SST_IMRX);
+		imr.part.busy_interrupt = 1;
+		sst_shim_write64(drv->shim, SST_IMRX, imr.full);
+		spin_unlock(&drv->ipc_spin_lock);
+		header.full =  sst_shim_read64(drv->shim, drv->ipc_reg.ipcd);
+		if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) {
+			pr_err("No memory available\n");
+			drv->ops->clear_interrupt();
+			return IRQ_HANDLED;
+		}
+		if (header.p.header_high.part.large) {
+			size = header.p.header_low_payload;
+			if (SST_VALIDATE_MAILBOX_SIZE(size)) {
+				memcpy_fromio(msg->mailbox_data,
+					drv->mailbox + drv->mailbox_recv_offset, size);
+			} else {
+				pr_err("Mailbox not copied, payload siz is: %u\n", size);
+				header.p.header_low_payload = 0;
+			}
+		}
+		msg->mrfld_header = header;
+		msg->is_process_reply =
+			SST_IS_PROCESS_REPLY(header.p.header_high.part.msg_id);
+		trace_sst_ipc("REPLY <-", msg->mrfld_header.p.header_high.full,
+					  msg->mrfld_header.p.header_low_payload,
+					  msg->mrfld_header.p.header_high.part.drv_id);
+		spin_lock(&drv->rx_msg_lock);
+		list_add_tail(&msg->node, &drv->rx_list);
+		spin_unlock(&drv->rx_msg_lock);
+		drv->ops->clear_interrupt();
+		retval = IRQ_WAKE_THREAD;
+	}
+	return retval;
+}
+
+static irqreturn_t intel_sst_irq_thread_mfld(int irq, void *context)
+{
+	struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+	struct ipc_post *__msg, *msg = NULL;
+	unsigned long irq_flags;
+
+	if (list_empty(&drv->rx_list))
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
+	list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) {
+
+		list_del(&msg->node);
+		spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
+		if (msg->is_process_reply)
+			drv->ops->process_message(msg);
+		else
+			drv->ops->process_reply(msg);
+
+		if (msg->is_large)
+			kfree(msg->mailbox_data);
+		kfree(msg);
+		spin_lock_irqsave(&drv->rx_msg_lock, irq_flags);
+	}
+	spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags);
+	return IRQ_HANDLED;
+}
+/**
+* intel_sst_interrupt - Interrupt service routine for SST
+*
+* @irq:	irq number of interrupt
+* @context: pointer to device structre
+*
+* This function is called by OS when SST device raises
+* an interrupt. This will be result of write in IPC register
+* Source can be busy or done interrupt
+*/
+static irqreturn_t intel_sst_intr_mfld(int irq, void *context)
+{
+	union interrupt_reg isr;
+	union ipc_header header;
+	irqreturn_t retval = IRQ_HANDLED;
+	struct ipc_post *msg = NULL;
+	unsigned int size = 0;
+	struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
+
+	/* Interrupt arrived, check src */
+	isr.full = sst_shim_read(drv->shim, SST_ISRX);
+	if (isr.part.done_interrupt) {
+		/* Mask all interrupts till this one is processsed */
+		set_imr_interrupts(drv, false);
+		/* Clear done bit */
+		spin_lock(&drv->ipc_spin_lock);
+		header.full = sst_shim_read(drv->shim, drv->ipc_reg.ipcx);
+		header.part.done = 0;
+		sst_shim_write(drv->shim, drv->ipc_reg.ipcx, header.full);
+		/* write 1 to clear status register */;
+		isr.part.done_interrupt = 1;
+		sst_shim_write(drv->shim, SST_ISRX, isr.full);
+		spin_unlock(&drv->ipc_spin_lock);
+		queue_work(drv->post_msg_wq, &sst_drv_ctx->ipc_post_msg.wq);
+
+		/* Un mask done and busy intr */
+		set_imr_interrupts(drv, true);
+		retval = IRQ_HANDLED;
+	}
+	if (isr.part.busy_interrupt) {
+		/* Mask all interrupts till we process it in bottom half */
+		set_imr_interrupts(drv, false);
+		header.full = sst_shim_read(drv->shim, drv->ipc_reg.ipcd);
+		if (sst_create_ipc_msg(&msg, header.part.large)) {
+			pr_err("No memory available\n");
+			drv->ops->clear_interrupt();
+			return IRQ_HANDLED;
+		}
+		if (header.part.large) {
+			size = header.part.data;
+			if (SST_VALIDATE_MAILBOX_SIZE(size)) {
+				memcpy_fromio(msg->mailbox_data,
+					drv->mailbox + drv->mailbox_recv_offset + 4, size);
+			} else {
+				pr_err("Mailbox not copied, payload siz is: %u\n", size);
+				header.part.data = 0;
+			}
+		}
+		msg->header = header;
+		msg->is_process_reply =
+				SST_IS_PROCESS_REPLY(msg->header.part.msg_id);
+		spin_lock(&drv->rx_msg_lock);
+		list_add_tail(&msg->node, &drv->rx_list);
+		spin_unlock(&drv->rx_msg_lock);
+		drv->ops->clear_interrupt();
+		retval = IRQ_WAKE_THREAD;
+	}
+	return retval;
+}
+
+static int sst_save_dsp_context_v2(struct intel_sst_drv *sst)
+{
+	unsigned int pvt_id;
+	struct ipc_post *msg = NULL;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct sst_block *block;
+
+	/*send msg to fw*/
+	pvt_id = sst_assign_pvt_id(sst);
+	if (sst_create_block_and_ipc_msg(&msg, true, sst, &block,
+				IPC_CMD, pvt_id)) {
+		pr_err("msg/block alloc failed. Not proceeding with context save\n");
+		return 0;
+	}
+
+	sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+			      SST_TASK_ID_MEDIA, 1, pvt_id);
+	msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr);
+	msg->mrfld_header.p.header_high.part.res_rqd = 1;
+	sst_fill_header_dsp(&dsp_hdr, IPC_PREP_D3, PIPE_RSVD, pvt_id);
+	memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+
+	sst_add_to_dispatch_list_and_post(sst, msg);
+	/*wait for reply*/
+	if (sst_wait_timeout(sst, block)) {
+		pr_err("sst: err fw context save timeout  ...\n");
+		pr_err("not suspending FW!!!");
+		sst_free_block(sst, block);
+		return -EIO;
+	}
+	if (block->ret_code) {
+		pr_err("fw responded w/ error %d", block->ret_code);
+		sst_free_block(sst, block);
+		return -EIO;
+	}
+
+	sst_free_block(sst, block);
+	return 0;
+}
+
+static int sst_save_dsp_context(struct intel_sst_drv *sst)
+{
+	struct snd_sst_ctxt_params fw_context;
+	unsigned int pvt_id;
+	struct ipc_post *msg = NULL;
+	struct sst_block *block;
+	pr_debug("%s: Enter\n", __func__);
+
+	/*send msg to fw*/
+	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+	if (sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block,
+				IPC_IA_GET_FW_CTXT, pvt_id)) {
+		pr_err("msg/block alloc failed. Not proceeding with context save\n");
+		return -ENOMEM;
+	}
+	sst_fill_header(&msg->header, IPC_IA_GET_FW_CTXT, 1, pvt_id);
+	msg->header.part.data = sizeof(fw_context) + sizeof(u32);
+	fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
+	fw_context.size = FW_CONTEXT_MEM;
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32),
+				&fw_context, sizeof(fw_context));
+	sst_add_to_dispatch_list_and_post(sst, msg);
+	/*wait for reply*/
+	if (sst_wait_timeout(sst_drv_ctx, block))
+		pr_err("sst: err fw context save timeout  ...\n");
+	pr_debug("fw context saved  ...\n");
+	if (block->ret_code)
+		sst_drv_ctx->fw_cntx_size = 0;
+	else
+		sst_drv_ctx->fw_cntx_size = *sst_drv_ctx->fw_cntx;
+	pr_debug("fw copied data %x\n", sst_drv_ctx->fw_cntx_size);
+	sst_free_block(sst_drv_ctx, block);
+	return 0;
+}
+
+static struct intel_sst_ops mrfld_ops = {
+	.interrupt = intel_sst_interrupt_mrfld,
+	.irq_thread = intel_sst_irq_thread_mfld,
+	.clear_interrupt = intel_sst_clear_intr_mrfld,
+	.start = sst_start_mrfld,
+	.reset = intel_sst_reset_dsp_mrfld,
+	.post_message = sst_post_message_mrfld,
+	.sync_post_message = sst_sync_post_message_mrfld,
+	.process_message = sst_process_message_mrfld,
+	.process_reply = sst_process_reply_mrfld,
+	.save_dsp_context =  sst_save_dsp_context_v2,
+	.alloc_stream = sst_alloc_stream_mrfld,
+	.post_download = sst_post_download_mrfld,
+	.do_recovery = sst_do_recovery_mrfld,
+};
+
+static struct intel_sst_ops mrfld_32_ops = {
+	.interrupt = intel_sst_intr_mfld,
+	.irq_thread = intel_sst_irq_thread_mfld,
+	.clear_interrupt = intel_sst_clear_intr_mfld,
+	.start = sst_start_mrfld,
+	.reset = intel_sst_reset_dsp_mrfld,
+	.post_message = sst_post_message_mfld,
+	.sync_post_message = sst_sync_post_message_mfld,
+	.process_message = sst_process_message_mfld,
+	.process_reply = sst_process_reply_mfld,
+	.save_dsp_context =  sst_save_dsp_context,
+	.restore_dsp_context = sst_restore_fw_context,
+	.alloc_stream = sst_alloc_stream_ctp,
+	.post_download = sst_post_download_byt,
+	.do_recovery = sst_do_recovery,
+};
+
+static struct intel_sst_ops ctp_ops = {
+	.interrupt = intel_sst_intr_mfld,
+	.irq_thread = intel_sst_irq_thread_mfld,
+	.clear_interrupt = intel_sst_clear_intr_mfld,
+	.start = sst_start_mfld,
+	.reset = intel_sst_reset_dsp_mfld,
+	.post_message = sst_post_message_mfld,
+	.sync_post_message = sst_sync_post_message_mfld,
+	.process_message = sst_process_message_mfld,
+	.process_reply = sst_process_reply_mfld,
+	.set_bypass = intel_sst_set_bypass_mfld,
+	.save_dsp_context =  sst_save_dsp_context,
+	.restore_dsp_context = sst_restore_fw_context,
+	.alloc_stream = sst_alloc_stream_ctp,
+	.post_download = sst_post_download_ctp,
+	.do_recovery = sst_do_recovery,
+};
+
+int sst_driver_ops(struct intel_sst_drv *sst)
+{
+
+	switch (sst->pci_id) {
+	case SST_MRFLD_PCI_ID:
+		sst->tstamp = SST_TIME_STAMP_MRFLD;
+		sst->ops = &mrfld_ops;
+		return 0;
+	case SST_BYT_PCI_ID:
+		sst->tstamp = SST_TIME_STAMP_BYT;
+		sst->ops = &mrfld_32_ops;
+		return 0;
+	case SST_CLV_PCI_ID:
+		sst->tstamp =  SST_TIME_STAMP;
+		sst->ops = &ctp_ops;
+		return 0;
+	default:
+		pr_err("SST Driver capablities missing for pci_id: %x", sst->pci_id);
+		return -EINVAL;
+	};
+}
+
+int sst_alloc_drv_context(struct device *dev)
+{
+	struct intel_sst_drv *ctx;
+	mutex_lock(&drv_ctx_lock);
+	if (sst_drv_ctx) {
+		pr_err("Only one sst handle is supported\n");
+		mutex_unlock(&drv_ctx_lock);
+		return -EBUSY;
+	}
+	pr_debug("%s: %d", __func__, __LINE__);
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		pr_err("malloc fail\n");
+		mutex_unlock(&drv_ctx_lock);
+		return -ENOMEM;
+	}
+	sst_drv_ctx = ctx;
+	mutex_unlock(&drv_ctx_lock);
+	return 0;
+}
+
+static ssize_t sst_sysfs_get_recovery(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", ctx->sst_state);
+}
+
+
+static ssize_t sst_sysfs_set_recovery(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t len)
+{
+	long val;
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	if (kstrtol(buf, 0, &val))
+		return -EINVAL;
+
+	if (val == 1) {
+		if (!atomic_read(&ctx->pm_usage_count)) {
+			pr_debug("%s: set sst state to uninit...\n", __func__);
+			sst_set_fw_state_locked(ctx, SST_UN_INIT);
+		} else {
+			pr_err("%s: not setting sst state... %d\n", __func__,
+					atomic_read(&ctx->pm_usage_count));
+			pr_err("Unrecoverable state....\n");
+			BUG();
+			return -EPERM;
+		}
+	}
+
+	return len;
+}
+
+static DEVICE_ATTR(audio_recovery, S_IRUGO | S_IWUSR,
+			sst_sysfs_get_recovery, sst_sysfs_set_recovery);
+
+int sst_request_firmware_async(struct intel_sst_drv *ctx)
+{
+	int ret = 0;
+
+	snprintf(ctx->firmware_name, sizeof(ctx->firmware_name),
+			"%s%04x%s", "fw_sst_",
+			ctx->pci_id, ".bin");
+	pr_debug("Requesting FW %s now...\n", ctx->firmware_name);
+
+	trace_sst_fw_download("Request firmware async", ctx->sst_state);
+
+	ret = request_firmware_nowait(THIS_MODULE, 1, ctx->firmware_name,
+			ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb);
+	if (ret)
+		pr_err("could not load firmware %s error %d\n", ctx->firmware_name, ret);
+
+	return ret;
+}
+/*
+* intel_sst_probe - PCI probe function
+*
+* @pci:	PCI device structure
+* @pci_id: PCI device ID structure
+*
+* This function is called by OS when a device is found
+* This enables the device, interrupt etc
+*/
+static int intel_sst_probe(struct pci_dev *pci,
+			const struct pci_device_id *pci_id)
+{
+	int i, ret = 0;
+	struct intel_sst_ops *ops;
+	struct sst_platform_info *sst_pdata = pci->dev.platform_data;
+	int ddr_base;
+	u32 ssp_base_add;
+	u32 dma_base_add;
+	u32 len;
+
+
+
+	pr_debug("Probe for DID %x\n", pci->device);
+	ret = sst_alloc_drv_context(&pci->dev);
+	if (ret)
+		return ret;
+
+	sst_drv_ctx->dev = &pci->dev;
+	sst_drv_ctx->pci_id = pci->device;
+	if (!sst_pdata)
+		return -EINVAL;
+	sst_drv_ctx->pdata = sst_pdata;
+
+	if (!sst_drv_ctx->pdata->probe_data)
+		return -EINVAL;
+	memcpy(&sst_drv_ctx->info, sst_drv_ctx->pdata->probe_data,
+					sizeof(sst_drv_ctx->info));
+
+	sst_drv_ctx->use_32bit_ops = sst_drv_ctx->pdata->ipc_info->use_32bit_ops;
+	sst_drv_ctx->mailbox_recv_offset = sst_drv_ctx->pdata->ipc_info->mbox_recv_off;
+
+	if (0 != sst_driver_ops(sst_drv_ctx))
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+	mutex_init(&sst_drv_ctx->stream_lock);
+	mutex_init(&sst_drv_ctx->sst_lock);
+	mutex_init(&sst_drv_ctx->mixer_ctrl_lock);
+	mutex_init(&sst_drv_ctx->csr_lock);
+
+	sst_drv_ctx->stream_cnt = 0;
+	sst_drv_ctx->fw_in_mem = NULL;
+	sst_drv_ctx->vcache.file1_in_mem = NULL;
+	sst_drv_ctx->vcache.file2_in_mem = NULL;
+	sst_drv_ctx->vcache.size1 = 0;
+	sst_drv_ctx->vcache.size2 = 0;
+
+	/* we don't use dma, so set to 0*/
+	sst_drv_ctx->use_dma = 0; //1;
+	sst_drv_ctx->use_lli = 1;
+
+	INIT_LIST_HEAD(&sst_drv_ctx->memcpy_list);
+	INIT_LIST_HEAD(&sst_drv_ctx->libmemcpy_list);
+
+	INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
+	INIT_LIST_HEAD(&sst_drv_ctx->block_list);
+	INIT_LIST_HEAD(&sst_drv_ctx->rx_list);
+	INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, ops->post_message);
+	init_waitqueue_head(&sst_drv_ctx->wait_queue);
+
+	sst_drv_ctx->mad_wq = create_singlethread_workqueue("sst_mad_wq");
+	if (!sst_drv_ctx->mad_wq)
+		goto do_free_drv_ctx;
+	sst_drv_ctx->post_msg_wq =
+		create_singlethread_workqueue("sst_post_msg_wq");
+	if (!sst_drv_ctx->post_msg_wq)
+		goto free_mad_wq;
+
+	spin_lock_init(&sst_drv_ctx->ipc_spin_lock);
+	spin_lock_init(&sst_drv_ctx->block_lock);
+	spin_lock_init(&sst_drv_ctx->pvt_id_lock);
+	spin_lock_init(&sst_drv_ctx->rx_msg_lock);
+
+	sst_drv_ctx->ipc_reg.ipcx = SST_IPCX + sst_drv_ctx->pdata->ipc_info->ipc_offset;
+	sst_drv_ctx->ipc_reg.ipcd = SST_IPCD + sst_drv_ctx->pdata->ipc_info->ipc_offset;
+	pr_debug("ipcx 0x%x ipxd 0x%x", sst_drv_ctx->ipc_reg.ipcx,
+					sst_drv_ctx->ipc_reg.ipcd);
+
+	pr_info("Got drv data max stream %d\n",
+				sst_drv_ctx->info.max_streams);
+	for (i = 1; i <= sst_drv_ctx->info.max_streams; i++) {
+		struct stream_info *stream = &sst_drv_ctx->streams[i];
+		memset(stream, 0, sizeof(*stream));
+		stream->pipe_id = PIPE_RSVD;
+		mutex_init(&stream->lock);
+	}
+
+	ret = sst_request_firmware_async(sst_drv_ctx);
+	if (ret) {
+		pr_err("Firmware download failed:%d\n", ret);
+		goto do_free_mem;
+	}
+	/* Init the device */
+	ret = pci_enable_device(pci);
+	if (ret) {
+		pr_err("device can't be enabled\n");
+		goto do_free_mem;
+	}
+	sst_drv_ctx->pci = pci_dev_get(pci);
+	ret = pci_request_regions(pci, SST_DRV_NAME);
+	if (ret)
+		goto do_disable_device;
+	/* map registers */
+	/* SST Shim */
+
+	if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID) {
+		sst_drv_ctx->ddr_base = pci_resource_start(pci, 0);
+		/*
+		* check that the relocated IMR base matches with FW Binary
+		* put temporary check till better soln is available for FW
+		*/
+		ddr_base = relocate_imr_addr_mrfld(sst_drv_ctx->ddr_base);
+		if (!sst_drv_ctx->pdata->lib_info) {
+			pr_err("%s:lib_info pointer NULL\n", __func__);
+			ret = -EINVAL;
+			goto do_release_regions;
+		}
+		if (ddr_base != sst_drv_ctx->pdata->lib_info->mod_base) {
+			pr_err("FW LSP DDR BASE does not match with IFWI\n");
+			ret = -EINVAL;
+			goto do_release_regions;
+		}
+		sst_drv_ctx->ddr_end = pci_resource_end(pci, 0);
+
+		sst_drv_ctx->ddr = pci_ioremap_bar(pci, 0);
+		if (!sst_drv_ctx->ddr)
+			goto do_unmap_ddr;
+		pr_debug("sst: DDR Ptr %p\n", sst_drv_ctx->ddr);
+	} else {
+		sst_drv_ctx->ddr = NULL;
+	}
+
+	/* SHIM */
+	sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
+	sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
+	if (!sst_drv_ctx->shim)
+		goto do_release_regions;
+	pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);
+
+	/* Shared SRAM */
+	sst_drv_ctx->mailbox_add = pci_resource_start(pci, 2);
+	sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
+	if (!sst_drv_ctx->mailbox)
+		goto do_unmap_shim;
+	pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);
+
+	/* IRAM */
+	sst_drv_ctx->iram_end = pci_resource_end(pci, 3);
+	sst_drv_ctx->iram_base = pci_resource_start(pci, 3);
+	sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
+	if (!sst_drv_ctx->iram)
+		goto do_unmap_sram;
+	pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);
+
+	/* DRAM */
+	sst_drv_ctx->dram_end = pci_resource_end(pci, 4);
+	sst_drv_ctx->dram_base = pci_resource_start(pci, 4);
+	sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
+	if (!sst_drv_ctx->dram)
+		goto do_unmap_iram;
+	pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);
+
+	if ((sst_pdata->pdata != NULL) &&
+			(sst_pdata->debugfs_data != NULL)) {
+		if (sst_pdata->ssp_data != NULL) {
+			/* SSP Register */
+			ssp_base_add = sst_pdata->ssp_data->base_add;
+			len = sst_pdata->debugfs_data->ssp_reg_size;
+			for (i = 0; i < sst_pdata->debugfs_data->num_ssp; i++) {
+				sst_drv_ctx->debugfs.ssp[i] =
+					devm_ioremap(&pci->dev,
+						ssp_base_add + (len * i), len);
+				if (!sst_drv_ctx->debugfs.ssp[i]) {
+					pr_warn("ssp ioremap failed\n");
+					continue;
+				}
+
+				pr_debug("\n ssp io 0x%p ssp 0x%x size 0x%x",
+					sst_drv_ctx->debugfs.ssp[i],
+						ssp_base_add, len);
+			}
+		}
+
+		/* DMA Register */
+		dma_base_add = sst_pdata->pdata->sst_dma_base[0];
+		len = sst_pdata->debugfs_data->dma_reg_size;
+		for (i = 0; i < sst_pdata->debugfs_data->num_dma; i++) {
+			sst_drv_ctx->debugfs.dma_reg[i] =
+				devm_ioremap(&pci->dev,
+					dma_base_add + (len * i), len);
+			if (!sst_drv_ctx->debugfs.dma_reg[i]) {
+				pr_warn("dma ioremap failed\n");
+				continue;
+			}
+
+			pr_debug("\n dma io 0x%p ssp 0x%x size 0x%x",
+				sst_drv_ctx->debugfs.dma_reg[i],
+					dma_base_add, len);
+		}
+	}
+
+	/* Do not access iram/dram etc before LPE is reset */
+
+	sst_drv_ctx->dump_buf.iram_buf.size = pci_resource_len(pci, 3);
+	sst_drv_ctx->dump_buf.iram_buf.buf = kzalloc(sst_drv_ctx->dump_buf.iram_buf.size,
+						GFP_KERNEL);
+	if (!sst_drv_ctx->dump_buf.iram_buf.buf) {
+		pr_err("%s: no memory\n", __func__);
+		ret = -ENOMEM;
+		goto do_unmap_dram;
+	}
+
+	sst_drv_ctx->dump_buf.dram_buf.size = pci_resource_len(pci, 4);
+	sst_drv_ctx->dump_buf.dram_buf.buf = kzalloc(sst_drv_ctx->dump_buf.dram_buf.size,
+						GFP_KERNEL);
+	if (!sst_drv_ctx->dump_buf.dram_buf.buf) {
+		pr_err("%s: no memory\n", __func__);
+		ret = -ENOMEM;
+		goto do_free_iram_buf;
+	}
+
+	pr_debug("\niram len 0x%x dram len 0x%x",
+			sst_drv_ctx->dump_buf.iram_buf.size,
+			sst_drv_ctx->dump_buf.dram_buf.size);
+
+	if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
+		sst_drv_ctx->probe_bytes = kzalloc(SST_MAX_BIN_BYTES, GFP_KERNEL);
+		if (!sst_drv_ctx->probe_bytes) {
+			pr_err("%s: no memory\n", __func__);
+			ret = -ENOMEM;
+			goto do_free_dram_buf;
+		}
+	}
+
+	sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
+	sst_drv_ctx->irq_num = pci->irq;
+	/* Register the ISR */
+	ret = request_threaded_irq(pci->irq, sst_drv_ctx->ops->interrupt,
+		sst_drv_ctx->ops->irq_thread, 0, SST_DRV_NAME,
+		sst_drv_ctx);
+	if (ret)
+		goto do_free_probe_bytes;
+	pr_debug("Registered IRQ 0x%x\n", pci->irq);
+
+	/*Register LPE Control as misc driver*/
+	ret = misc_register(&lpe_ctrl);
+	if (ret) {
+		pr_err("couldn't register control device\n");
+		goto do_free_irq;
+	}
+	/* default intr are unmasked so set this as masked */
+	if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID)
+		sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, 0xFFFF0038);
+
+	if (sst_drv_ctx->use_32bit_ops) {
+		pr_debug("allocate mem for context save/restore\n ");
+		/*allocate mem for fw context save during suspend*/
+		sst_drv_ctx->fw_cntx = kzalloc(FW_CONTEXT_MEM, GFP_KERNEL);
+		if (!sst_drv_ctx->fw_cntx) {
+			ret = -ENOMEM;
+			goto do_free_misc;
+		}
+		/*setting zero as that is valid mem to restore*/
+		sst_drv_ctx->fw_cntx_size = 0;
+	}
+	if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
+		u32 csr;
+		u32 csr2;
+		u32 clkctl;
+
+		/*set lpe start clock and ram size*/
+		csr = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+		csr |= 0x30000;
+		/*make sure clksel set to OSC for SSP0,1 (default)*/
+		csr &= 0xFFFFFFF3;
+		sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr);
+
+		/*set clock output enable for SSP0,1,3*/
+		clkctl = sst_shim_read(sst_drv_ctx->shim, SST_CLKCTL);
+		if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+			clkctl |= (0x7 << 16);
+		else
+			clkctl |= ((1<<16)|(1<<17));
+		sst_shim_write(sst_drv_ctx->shim, SST_CLKCTL, clkctl);
+
+		/* set SSP0 & SSP1 disable DMA Finish*/
+		csr2 = sst_shim_read(sst_drv_ctx->shim, SST_CSR2);
+		/*set SSP3 disable DMA finsh for SSSP3 */
+		csr2 |= BIT(1)|BIT(2);
+		sst_shim_write(sst_drv_ctx->shim, SST_CSR2, csr2);
+	}
+	if (sst_drv_ctx->pdata->ssp_data) {
+		if (sst_drv_ctx->pdata->ssp_data->gpio_in_use)
+			sst_set_gpio_conf(&sst_drv_ctx->pdata->ssp_data->gpio);
+	}
+	pci_set_drvdata(pci, sst_drv_ctx);
+	pm_runtime_allow(sst_drv_ctx->dev);
+	pm_runtime_put_noidle(sst_drv_ctx->dev);
+	register_sst(sst_drv_ctx->dev);
+	sst_debugfs_init(sst_drv_ctx);
+	sst_drv_ctx->qos = kzalloc(sizeof(struct pm_qos_request),
+				GFP_KERNEL);
+	if (!sst_drv_ctx->qos)
+		goto do_free_misc;
+	pm_qos_add_request(sst_drv_ctx->qos, PM_QOS_CPU_DMA_LATENCY,
+				PM_QOS_DEFAULT_VALUE);
+
+	ret = device_create_file(sst_drv_ctx->dev, &dev_attr_audio_recovery);
+	if (ret) {
+		pr_err("could not create sysfs %s file\n",
+			dev_attr_audio_recovery.attr.name);
+		goto do_free_qos;
+	}
+
+	pr_info("%s successfully done!\n", __func__);
+	return ret;
+
+do_free_qos:
+	pm_qos_remove_request(sst_drv_ctx->qos);
+	kfree(sst_drv_ctx->qos);
+do_free_misc:
+	misc_deregister(&lpe_ctrl);
+do_free_irq:
+	free_irq(pci->irq, sst_drv_ctx);
+do_free_probe_bytes:
+	if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+		kfree(sst_drv_ctx->probe_bytes);
+do_free_dram_buf:
+#ifdef CONFIG_DEBUG_FS
+	if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+		kfree(sst_drv_ctx->dump_buf.dram_buf.buf);
+do_free_iram_buf:
+	if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+		kfree(sst_drv_ctx->dump_buf.iram_buf.buf);
+#endif
+do_unmap_dram:
+	iounmap(sst_drv_ctx->dram);
+do_unmap_iram:
+	iounmap(sst_drv_ctx->iram);
+do_unmap_sram:
+	iounmap(sst_drv_ctx->mailbox);
+do_unmap_shim:
+	iounmap(sst_drv_ctx->shim);
+
+do_unmap_ddr:
+	if (sst_drv_ctx->ddr)
+		iounmap(sst_drv_ctx->ddr);
+
+do_release_regions:
+	pci_release_regions(pci);
+do_disable_device:
+	pci_disable_device(pci);
+do_free_mem:
+	destroy_workqueue(sst_drv_ctx->post_msg_wq);
+free_mad_wq:
+	destroy_workqueue(sst_drv_ctx->mad_wq);
+do_free_drv_ctx:
+	sst_drv_ctx = NULL;
+	pr_err("Probe failed with %d\n", ret);
+	return ret;
+}
+
+/**
+* intel_sst_remove - PCI remove function
+*
+* @pci:	PCI device structure
+*
+* This function is called by OS when a device is unloaded
+* This frees the interrupt etc
+*/
+static void intel_sst_remove(struct pci_dev *pci)
+{
+	struct intel_sst_drv *sst_drv_ctx = pci_get_drvdata(pci);
+	sst_debugfs_exit(sst_drv_ctx);
+	pm_runtime_get_noresume(sst_drv_ctx->dev);
+	pm_runtime_forbid(sst_drv_ctx->dev);
+	unregister_sst(sst_drv_ctx->dev);
+	pci_dev_put(sst_drv_ctx->pci);
+	sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
+	misc_deregister(&lpe_ctrl);
+	free_irq(pci->irq, sst_drv_ctx);
+
+	iounmap(sst_drv_ctx->dram);
+	iounmap(sst_drv_ctx->iram);
+	iounmap(sst_drv_ctx->mailbox);
+	iounmap(sst_drv_ctx->shim);
+#ifdef CONFIG_DEBUG_FS
+	if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID) {
+		kfree(sst_drv_ctx->dump_buf.iram_buf.buf);
+		kfree(sst_drv_ctx->dump_buf.dram_buf.buf);
+	}
+#endif
+	if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+		kfree(sst_drv_ctx->probe_bytes);
+
+	device_remove_file(sst_drv_ctx->dev, &dev_attr_audio_recovery);
+	kfree(sst_drv_ctx->fw_cntx);
+	kfree(sst_drv_ctx->runtime_param.param.addr);
+	flush_scheduled_work();
+	destroy_workqueue(sst_drv_ctx->post_msg_wq);
+	destroy_workqueue(sst_drv_ctx->mad_wq);
+	pm_qos_remove_request(sst_drv_ctx->qos);
+	kfree(sst_drv_ctx->qos);
+	kfree(sst_drv_ctx->fw_sg_list.src);
+	kfree(sst_drv_ctx->fw_sg_list.dst);
+	sst_drv_ctx->fw_sg_list.list_len = 0;
+	kfree(sst_drv_ctx->fw_in_mem);
+	sst_drv_ctx->fw_in_mem = NULL;
+	sst_memcpy_free_resources();
+	sst_drv_ctx = NULL;
+	pci_release_regions(pci);
+	pci_disable_device(pci);
+	pci_set_drvdata(pci, NULL);
+}
+
+inline void sst_save_shim64(struct intel_sst_drv *ctx,
+			    void __iomem *shim,
+			    struct sst_shim_regs64 *shim_regs)
+{
+	unsigned long irq_flags;
+	spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
+
+	shim_regs->csr = sst_shim_read64(shim, SST_CSR),
+	shim_regs->pisr = sst_shim_read64(shim, SST_PISR),
+	shim_regs->pimr = sst_shim_read64(shim, SST_PIMR),
+	shim_regs->isrx = sst_shim_read64(shim, SST_ISRX),
+	shim_regs->isrd = sst_shim_read64(shim, SST_ISRD),
+	shim_regs->imrx = sst_shim_read64(shim, SST_IMRX),
+	shim_regs->imrd = sst_shim_read64(shim, SST_IMRD),
+	shim_regs->ipcx = sst_shim_read64(shim, ctx->ipc_reg.ipcx),
+	shim_regs->ipcd = sst_shim_read64(shim, ctx->ipc_reg.ipcd),
+	shim_regs->isrsc = sst_shim_read64(shim, SST_ISRSC),
+	shim_regs->isrlpesc = sst_shim_read64(shim, SST_ISRLPESC),
+	shim_regs->imrsc = sst_shim_read64(shim, SST_IMRSC),
+	shim_regs->imrlpesc = sst_shim_read64(shim, SST_IMRLPESC),
+	shim_regs->ipcsc = sst_shim_read64(shim, SST_IPCSC),
+	shim_regs->ipclpesc = sst_shim_read64(shim, SST_IPCLPESC),
+	shim_regs->clkctl = sst_shim_read64(shim, SST_CLKCTL),
+	shim_regs->csr2 = sst_shim_read64(shim, SST_CSR2);
+
+	spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
+}
+
+static inline void sst_restore_shim64(struct intel_sst_drv *ctx,
+				      void __iomem *shim,
+				      struct sst_shim_regs64 *shim_regs)
+{
+	unsigned long irq_flags;
+	spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
+	sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
+	spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
+}
+
+/*
+ * The runtime_suspend/resume is pretty much similar to the legacy
+ * suspend/resume with the noted exception below: The PCI core takes care of
+ * taking the system through D3hot and restoring it back to D0 and so there is
+ * no need to duplicate that here.
+ */
+static int intel_sst_runtime_suspend(struct device *dev)
+{
+	union config_status_reg csr;
+	int ret = 0;
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	pr_info("runtime_suspend called\n");
+	if (ctx->sst_state == SST_UN_INIT) {
+		pr_debug("LPE is already in UNINIT state, No action");
+		return 0;
+	}
+	/*save fw context*/
+	if (ctx->ops->save_dsp_context(ctx))
+		return -EBUSY;
+
+	if (ctx->pci_id == SST_CLV_PCI_ID) {
+		/*Assert RESET on LPE Processor*/
+		csr.full = sst_shim_read(ctx->shim, SST_CSR);
+		ctx->csr_value = csr.full;
+		csr.full = csr.full | 0x2;
+		sst_shim_write(ctx->shim, SST_CSR, csr.full);
+	}
+
+	/* Move the SST state to Suspended */
+	sst_set_fw_state_locked(ctx, SST_SUSPENDED);
+
+	flush_workqueue(ctx->post_msg_wq);
+	synchronize_irq(ctx->irq_num);
+
+	if (ctx->pci_id == SST_BYT_PCI_ID || ctx->pci_id == SST_CHT_PCI_ID) {
+		/* save the shim registers because PMC doesn't save state */
+		sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
+	}
+	return ret;
+}
+
+static int intel_sst_runtime_resume(struct device *dev)
+{
+	u32 csr;
+	int ret = 0;
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	pr_info("runtime_resume called\n");
+
+	if (ctx->pci_id == SST_BYT_PCI_ID || ctx->pci_id == SST_CHT_PCI_ID) {
+		/* wait for device power up a/c to PCI spec */
+		usleep_range(10000, 11000);
+		sst_restore_shim64(ctx, ctx->shim, ctx->shim_regs64);
+	}
+
+	if (ctx->pci_id == SST_CLV_PCI_ID) {
+		csr = sst_shim_read(ctx->shim, SST_CSR);
+		/*
+		 * To restore the csr_value after S0ix and S3 states.
+		 * The value 0x30000 is to enable LPE dram high and low addresses.
+		 * Reference:
+		 * Penwell Audio Voice Module HAS 1.61 Section - 13.12.1 -
+		 * CSR - Configuration and Status Register.
+		 */
+		csr |= (ctx->csr_value | 0x30000);
+		sst_shim_write(ctx->shim, SST_CSR, csr);
+		if (sst_drv_ctx->pdata->ssp_data) {
+			if (ctx->pdata->ssp_data->gpio_in_use)
+				sst_set_gpio_conf(&ctx->pdata->ssp_data->gpio);
+		}
+	}
+	/* When fw_clear_cache is set, clear the cached firmware copy */
+	/* fw_clear_cache is set through debugfs support */
+	if (atomic_read(&ctx->fw_clear_cache) && ctx->fw_in_mem) {
+		pr_debug("Clearing the cached firmware\n");
+		kfree(ctx->fw_in_mem);
+		ctx->fw_in_mem = NULL;
+		atomic_set(&ctx->fw_clear_cache, 0);
+	}
+
+	sst_set_fw_state_locked(ctx, SST_UN_INIT);
+	return ret;
+}
+
+static int intel_sst_suspend(struct device *dev)
+{
+	int retval = 0, usage_count;
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	usage_count = atomic_read(&ctx->pm_usage_count);
+	if (usage_count) {
+		pr_err("Ret error for suspend:%d\n", usage_count);
+		return -EBUSY;
+	}
+	retval = intel_sst_runtime_suspend(dev);
+
+	return retval;
+}
+
+static int intel_sst_runtime_idle(struct device *dev)
+{
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	pr_info("runtime_idle called\n");
+	if (ctx->sst_state != SST_UN_INIT) {
+		pm_schedule_suspend(dev, SST_SUSPEND_DELAY);
+		return -EBUSY;
+	} else {
+		return 0;
+	}
+	return -EBUSY;
+
+}
+
+static void sst_do_shutdown(struct intel_sst_drv *ctx)
+{
+	int retval = 0;
+	unsigned int pvt_id;
+	struct ipc_post *msg = NULL;
+	struct sst_block *block = NULL;
+
+	pr_debug(" %s called\n", __func__);
+	if (ctx->sst_state == SST_SUSPENDED ||
+			ctx->sst_state == SST_UN_INIT) {
+		sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
+		pr_debug("sst is already in suspended/un-int state\n");
+		return;
+	}
+	if (!ctx->use_32bit_ops)
+		return;
+
+	sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
+	flush_workqueue(ctx->post_msg_wq);
+	pvt_id = sst_assign_pvt_id(ctx);
+	retval = sst_create_block_and_ipc_msg(&msg, false,
+			ctx, &block,
+			IPC_IA_PREPARE_SHUTDOWN, pvt_id);
+	if (retval) {
+		pr_err("sst_create_block returned error!\n");
+		return;
+	}
+	sst_fill_header(&msg->header, IPC_IA_PREPARE_SHUTDOWN, 0, pvt_id);
+	sst_add_to_dispatch_list_and_post(ctx, msg);
+	sst_wait_timeout(ctx, block);
+	sst_free_block(ctx, block);
+}
+
+
+/**
+* sst_pci_shutdown - PCI shutdown function
+*
+* @pci:        PCI device structure
+*
+* This function is called by OS when a device is shutdown/reboot
+*
+*/
+
+static void sst_pci_shutdown(struct pci_dev *pci)
+{
+	struct intel_sst_drv *ctx = pci_get_drvdata(pci);
+
+	pr_debug(" %s called\n", __func__);
+
+	sst_do_shutdown(ctx);
+	disable_irq_nosync(pci->irq);
+}
+
+/**
+* sst_acpi_shutdown - platform shutdown function
+*
+* @pci:        Platform device structure
+*
+* This function is called by OS when a device is shutdown/reboot
+*
+*/
+static void sst_acpi_shutdown(struct platform_device *pdev)
+{
+	struct intel_sst_drv *ctx = platform_get_drvdata(pdev);
+	int irq = platform_get_irq(pdev, 0);
+
+	pr_debug(" %s called\n", __func__);
+
+	sst_do_shutdown(ctx);
+	disable_irq_nosync(irq);
+}
+
+static const struct dev_pm_ops intel_sst_pm = {
+	.suspend = intel_sst_suspend,
+	.resume = intel_sst_runtime_resume,
+	.runtime_suspend = intel_sst_runtime_suspend,
+	.runtime_resume = intel_sst_runtime_resume,
+	.runtime_idle = intel_sst_runtime_idle,
+};
+
+static const struct acpi_device_id sst_acpi_ids[];
+
+struct sst_platform_info *sst_get_acpi_driver_data(const char *hid)
+{
+	const struct acpi_device_id *id;
+
+	pr_debug("%s", __func__);
+	for (id = sst_acpi_ids; id->id[0]; id++)
+		if (!strncmp(id->id, hid, 16))
+			return (struct sst_platform_info *)id->driver_data;
+	return NULL;
+}
+
+/* PCI Routines */
+static DEFINE_PCI_DEVICE_TABLE(intel_sst_ids) = {
+	{ PCI_VDEVICE(INTEL, SST_CLV_PCI_ID), 0},
+	{ PCI_VDEVICE(INTEL, SST_MRFLD_PCI_ID), 0},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, intel_sst_ids);
+
+static const struct acpi_device_id sst_acpi_ids[] = {
+	{ "LPE0F28",  (kernel_ulong_t) &byt_rvp_platform_data },
+	{ "LPE0F281", (kernel_ulong_t) &byt_ffrd8_platform_data },
+	{ "80860F28", (kernel_ulong_t) &byt_ffrd8_platform_data },
+	{ "808622A8", (kernel_ulong_t) &cht_platform_data },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, sst_acpi_ids);
+
+static struct pci_driver driver = {
+	.name = SST_DRV_NAME,
+	.id_table = intel_sst_ids,
+	.probe = intel_sst_probe,
+	.remove = intel_sst_remove,
+	.shutdown = sst_pci_shutdown,
+#ifdef CONFIG_PM
+	.driver = {
+		.pm = &intel_sst_pm,
+	},
+#endif
+};
+
+static struct platform_driver sst_acpi_driver = {
+	.driver = {
+		.name			= "intel_sst_acpi",
+		.owner			= THIS_MODULE,
+		.acpi_match_table	= ACPI_PTR(sst_acpi_ids),
+		.pm			= &intel_sst_pm,
+	},
+	.probe	= sst_acpi_probe,
+	.remove	= sst_acpi_remove,
+	.shutdown = sst_acpi_shutdown,
+};
+
+
+/**
+* intel_sst_init - Module init function
+*
+* Registers with PCI
+* Registers with /dev
+* Init all data strutures
+*/
+static int __init intel_sst_init(void)
+{
+	/* Init all variables, data structure etc....*/
+	int ret = 0;
+	pr_info("INFO: ******** SST DRIVER loading.. Ver: %s\n",
+				       SST_DRIVER_VERSION);
+
+	mutex_init(&drv_ctx_lock);
+	/* Register with PCI */
+	ret = pci_register_driver(&driver);
+	if (ret)
+		pr_err("PCI register failed\n");
+
+	ret = platform_driver_register(&sst_acpi_driver);
+	if (ret)
+		pr_err("ACPI register failed\n");
+	return ret;
+}
+
+/**
+* intel_sst_exit - Module exit function
+*
+* Unregisters with PCI
+* Unregisters with /dev
+* Frees all data strutures
+*/
+static void __exit intel_sst_exit(void)
+{
+	pci_unregister_driver(&driver);
+	platform_driver_unregister(&sst_acpi_driver);
+
+	pr_debug("driver unloaded\n");
+	sst_drv_ctx = NULL;
+	return;
+}
+
+module_init(intel_sst_init);
+module_exit(intel_sst_exit);
diff --git a/sound/soc/intel/sst/sst.h b/sound/soc/intel/sst/sst.h
new file mode 100644
index 0000000..4ef3efa
--- /dev/null
+++ b/sound/soc/intel/sst/sst.h
@@ -0,0 +1,932 @@
+/*
+ *  sst.h - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  Common private declarations for SST
+ */
+#ifndef __SST_H__
+#define __SST_H__
+
+#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
+#include <linux/firmware.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/lnw_gpio.h>
+#include <asm/platform_sst.h>
+#include <sound/intel_sst_ioctl.h>
+
+#define SST_DRIVER_VERSION "3.0.8"
+
+/* driver names */
+#define SST_DRV_NAME "intel_sst_driver"
+#define SST_CLV_PCI_ID	0x08E7
+#define SST_MRFLD_PCI_ID 0x119A
+#define SST_BYT_PCI_ID  0x0F28
+#define SST_CHT_PCI_ID 0x22A8
+
+#define SST_SUSPEND_DELAY 2000
+#define FW_CONTEXT_MEM (64*1024)
+#define SST_ICCM_BOUNDARY 4
+#define SST_CONFIG_SSP_SIGN 0x7ffe8001
+
+/* FIXME: All this info should come from platform data
+ * move this when the base framework is ready to pass
+ * platform data to SST driver
+ */
+#define MRFLD_FW_VIRTUAL_BASE 0xC0000000
+#define MRFLD_FW_DDR_BASE_OFFSET 0x0
+#define MRFLD_FW_FEATURE_BASE_OFFSET 0x4
+#define MRFLD_FW_BSS_RESET_BIT 0
+extern struct intel_sst_drv *sst_drv_ctx;
+enum sst_states {
+	SST_FW_LOADED = 1,
+	SST_FW_RUNNING,
+	SST_START_INIT,
+	SST_UN_INIT,
+	SST_ERROR,
+	SST_SUSPENDED,
+	SST_FW_CTXT_RESTORE,
+	SST_SHUTDOWN,
+	SST_FW_LIB_LOAD,
+};
+
+enum sst_algo_ops {
+	SST_SET_ALGO = 0,
+	SST_GET_ALGO = 1,
+};
+
+#define SST_BLOCK_TIMEOUT	1000
+
+/* SST register map */
+#define SST_CSR			0x00
+#define SST_PISR		0x08
+#define SST_PIMR		0x10
+#define SST_ISRX		0x18
+#define SST_ISRD		0x20
+#define SST_IMRX		0x28
+#define SST_IMRD		0x30
+#define SST_IPCX		0x38 /* IPC IA-SST */
+#define SST_IPCD		0x40 /* IPC SST-IA */
+#define SST_ISRSC		0x48
+#define SST_ISRLPESC		0x50
+#define SST_IMRSC		0x58
+#define SST_IMRLPESC		0x60
+#define SST_IPCSC		0x68
+#define SST_IPCLPESC		0x70
+#define SST_CLKCTL		0x78
+#define SST_CSR2		0x80
+
+#define SST_SHIM_BEGIN		SST_CSR
+#define SST_SHIM_END		SST_CSR2
+#define SST_SHIM_SIZE		0x88
+
+#define FW_SIGNATURE_SIZE	4
+
+/* stream states */
+enum sst_stream_states {
+	STREAM_UN_INIT	= 0,	/* Freed/Not used stream */
+	STREAM_RUNNING	= 1,	/* Running */
+	STREAM_PAUSED	= 2,	/* Paused stream */
+	STREAM_DECODE	= 3,	/* stream is in decoding only state */
+	STREAM_INIT	= 4,	/* stream init, waiting for data */
+	STREAM_RESET	= 5,	/* force reset on recovery */
+};
+
+enum sst_ram_type {
+	SST_IRAM	= 1,
+	SST_DRAM	= 2,
+};
+
+/* SST shim registers to structure mapping  */
+union config_status_reg {
+	struct {
+		u32 mfld_strb:1;
+		u32 sst_reset:1;
+		u32 clk_sel:3;
+		u32 sst_clk:2;
+		u32 bypass:3;
+		u32 run_stall:1;
+		u32 rsvd1:2;
+		u32 strb_cntr_rst:1;
+		u32 rsvd:18;
+	} part;
+	u32 full;
+};
+
+union interrupt_reg {
+	struct {
+		u64 done_interrupt:1;
+		u64 busy_interrupt:1;
+		u64 rsvd:62;
+	} part;
+	u64 full;
+};
+
+union sst_imr_reg {
+	struct {
+		u32 done_interrupt:1;
+		u32 busy_interrupt:1;
+		u32 rsvd:30;
+	} part;
+	u32 full;
+};
+
+union sst_pisr_reg {
+	struct {
+		u32 pssp0:1;
+		u32 pssp1:1;
+		u32 rsvd0:3;
+		u32 dmac:1;
+		u32 rsvd1:26;
+	} part;
+	u32 full;
+};
+
+union sst_pimr_reg {
+	struct {
+		u32 ssp0:1;
+		u32 ssp1:1;
+		u32 rsvd0:3;
+		u32 dmac:1;
+		u32 rsvd1:10;
+		u32 ssp0_sc:1;
+		u32 ssp1_sc:1;
+		u32 rsvd2:3;
+		u32 dmac_sc:1;
+		u32 rsvd3:10;
+	} part;
+	u32 full;
+};
+
+union config_status_reg_mrfld {
+	struct {
+		u64 lpe_reset:1;
+		u64 lpe_reset_vector:1;
+		u64 runstall:1;
+		u64 pwaitmode:1;
+		u64 clk_sel:3;
+		u64 rsvd2:1;
+		u64 sst_clk:3;
+		u64 xt_snoop:1;
+		u64 rsvd3:4;
+		u64 clk_sel1:6;
+		u64 clk_enable:3;
+		u64 rsvd4:6;
+		u64 slim0baseclk:1;
+		u64 rsvd:32;
+	} part;
+	u64 full;
+};
+
+union interrupt_reg_mrfld {
+	struct {
+		u64 done_interrupt:1;
+		u64 busy_interrupt:1;
+		u64 rsvd:62;
+	} part;
+	u64 full;
+};
+
+union sst_imr_reg_mrfld {
+	struct {
+		u64 done_interrupt:1;
+		u64 busy_interrupt:1;
+		u64 rsvd:62;
+	} part;
+	u64 full;
+};
+
+/*This structure is used to block a user/fw data call to another
+fw/user call
+*/
+struct sst_block {
+	bool	condition; /* condition for blocking check */
+	int	ret_code; /* ret code when block is released */
+	void	*data; /* data to be appsed for block if any */
+	u32     size;
+	bool	on;
+	u32     msg_id;  /*msg_id = msgid in mfld/ctp, mrfld = 0 */
+	u32     drv_id; /* = str_id in mfld/ctp, = drv_id in mrfld*/
+	struct list_head node;
+};
+
+/**
+ * struct stream_info - structure that holds the stream information
+ *
+ * @status : stream current state
+ * @prev : stream prev state
+ * @ops : stream operation pb/cp/drm...
+ * @bufs: stream buffer list
+ * @lock : stream mutex for protecting state
+ * @pcm_substream : PCM substream
+ * @period_elapsed : PCM period elapsed callback
+ * @sfreq : stream sampling freq
+ * @str_type : stream type
+ * @cumm_bytes : cummulative bytes decoded
+ * @str_type : stream type
+ * @src : stream source
+ * @device : output device type (medfield only)
+ */
+struct stream_info {
+	unsigned int		status;
+	unsigned int		prev;
+	unsigned int		ops;
+	struct mutex		lock; /* mutex */
+	void			*pcm_substream;
+	void (*period_elapsed)	(void *pcm_substream);
+	unsigned int		sfreq;
+	u32			cumm_bytes;
+	void			*compr_cb_param;
+	void (*compr_cb)	(void *compr_cb_param);
+	void			*drain_cb_param;
+	void (*drain_notify)	(void *drain_cb_param);
+
+	unsigned int		num_ch;
+	unsigned int		pipe_id;
+	unsigned int		str_id;
+	unsigned int		task_id;
+};
+
+#define SST_FW_SIGN "$SST"
+#define SST_FW_LIB_SIGN "$LIB"
+
+/*
+ * struct fw_header - FW file headers
+ *
+ * @signature : FW signature
+ * @modules : # of modules
+ * @file_format : version of header format
+ * @reserved : reserved fields
+ */
+struct fw_header {
+	unsigned char signature[FW_SIGNATURE_SIZE]; /* FW signature */
+	u32 file_size; /* size of fw minus this header */
+	u32 modules; /*  # of modules */
+	u32 file_format; /* version of header format */
+	u32 reserved[4];
+};
+
+struct fw_module_header {
+	unsigned char signature[FW_SIGNATURE_SIZE]; /* module signature */
+	u32 mod_size; /* size of module */
+	u32 blocks; /* # of blocks */
+	u32 type; /* codec type, pp lib */
+	u32 entry_point;
+};
+
+struct fw_block_info {
+	enum sst_ram_type	type;	/* IRAM/DRAM */
+	u32			size;	/* Bytes */
+	u32			ram_offset; /* Offset in I/DRAM */
+	u32			rsvd;	/* Reserved field */
+};
+
+struct sst_ipc_msg_wq {
+	union ipc_header_mrfld mrfld_header;
+	struct ipc_dsp_hdr dsp_hdr;
+	char mailbox[SST_MAILBOX_SIZE];
+	struct work_struct	wq;
+	union ipc_header header;
+};
+
+struct sst_dma {
+	struct dma_chan *ch;
+	struct intel_mid_dma_slave slave;
+	struct device *dev;
+};
+
+struct sst_runtime_param {
+	struct snd_sst_runtime_params param;
+};
+
+struct sst_sg_list {
+	struct scatterlist *src;
+	struct scatterlist *dst;
+	int list_len;
+	unsigned int sg_idx;
+};
+
+struct sst_memcpy_list {
+	struct list_head memcpylist;
+	void *dstn;
+	const void *src;
+	u32 size;
+	bool is_io;
+};
+
+struct sst_debugfs {
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		*root;
+#endif
+	int			runtime_pm_status;
+	void __iomem            *ssp[SST_MAX_SSP_PORTS];
+	void __iomem            *dma_reg[SST_MAX_DMA];
+	unsigned char get_params_data[1024];
+	ssize_t get_params_len;
+};
+
+struct lpe_log_buf_hdr {
+	u32 base_addr;
+	u32 end_addr;
+	u32 rd_addr;
+	u32 wr_addr;
+};
+
+struct snd_ssp_config {
+	int size;
+	char bytes[0];
+};
+
+struct snd_sst_probe_bytes {
+	u16 len;
+	char bytes[0];
+};
+
+#define PCI_DMAC_CLV_ID 0x08F0
+#define PCI_DMAC_MRFLD_ID 0x119B
+
+struct sst_ram_buf {
+	u32 size;
+	char *buf;
+};
+
+/* Firmware Module Information*/
+
+enum sst_lib_dwnld_status {
+	SST_LIB_NOT_FOUND = 0,
+	SST_LIB_FOUND,
+	SST_LIB_DOWNLOADED,
+};
+
+struct sst_module_info {
+	const char *name; /* Library name */
+	u32	id; /* Module ID */
+	u32	entry_pt; /* Module entry point */
+	u8	status; /* module status*/
+	u8	rsvd1;
+	u16	rsvd2;
+};
+
+/* Structure for managing the Library Region(1.5MB)
+ * in DDR in Merrifield
+ */
+struct sst_mem_mgr {
+	phys_addr_t current_base;
+	int avail;
+	unsigned int count;
+};
+
+struct sst_dump_buf {
+	/* buffers for iram-dram dump crash */
+	struct sst_ram_buf iram_buf;
+	struct sst_ram_buf dram_buf;
+};
+
+struct sst_ipc_reg {
+	int ipcx;
+	int ipcd;
+};
+
+struct sst_shim_regs64 {
+	u64 csr;
+	u64 pisr;
+	u64 pimr;
+	u64 isrx;
+	u64 isrd;
+	u64 imrx;
+	u64 imrd;
+	u64 ipcx;
+	u64 ipcd;
+	u64 isrsc;
+	u64 isrlpesc;
+	u64 imrsc;
+	u64 imrlpesc;
+	u64 ipcsc;
+	u64 ipclpesc;
+	u64 clkctl;
+	u64 csr2;
+};
+
+struct sst_vtsv_cache {
+	void *file1_in_mem;
+	u32 size1;
+	void *file2_in_mem;
+	u32 size2;
+};
+
+/***
+ *
+ * struct intel_sst_drv - driver ops
+ *
+ * @sst_state : current sst device state
+ * @pci_id : PCI device id loaded
+ * @shim : SST shim pointer
+ * @mailbox : SST mailbox pointer
+ * @iram : SST IRAM pointer
+ * @dram : SST DRAM pointer
+ * @pdata : SST info passed as a part of pci platform data
+ * @shim_phy_add : SST shim phy addr
+ * @shim_regs64: Struct to save shim registers
+ * @ipc_dispatch_list : ipc messages dispatched
+ * @rx_list : to copy the process_reply/process_msg from DSP
+ * @ipc_post_msg_wq : wq to post IPC messages context
+ * @ipc_post_msg : wq to post reply from FW context
+ * @mad_ops : MAD driver operations registered
+ * @mad_wq : MAD driver wq
+ * @post_msg_wq : wq to post IPC messages
+ * @streams : sst stream contexts
+ * @list_lock : sst driver list lock (deprecated)
+ * @ipc_spin_lock : spin lock to handle audio shim access and ipc queue
+ * @rx_msg_lock : spin lock to handle the rx messages from the DSP
+ * @scard_ops : sst card ops
+ * @pci : sst pci device struture
+ * @dev : pointer to current device struct
+ * @sst_lock : sst device lock
+ * @stream_lock : sst stream lock
+ * @pvt_id : sst private id
+ * @stream_cnt : total sst active stream count
+ * @pb_streams : total active pb streams
+ * @cp_streams : total active cp streams
+ * @audio_start : audio status
+ * @qos		: PM Qos struct
+ * firmware_name : Firmware / Library name
+ */
+struct intel_sst_drv {
+	int			sst_state;
+	int			irq_num;
+	unsigned int		pci_id;
+	bool			use_32bit_ops;
+	void __iomem		*ddr;
+	void __iomem		*shim;
+	void __iomem		*mailbox;
+	void __iomem		*iram;
+	void __iomem		*dram;
+	unsigned int		mailbox_add;
+	unsigned int		iram_base;
+	unsigned int		dram_base;
+	unsigned int		shim_phy_add;
+	unsigned int		iram_end;
+	unsigned int		dram_end;
+	unsigned int		ddr_end;
+	unsigned int		ddr_base;
+	unsigned int		mailbox_recv_offset;
+	atomic_t		pm_usage_count;
+	struct sst_shim_regs64	*shim_regs64;
+	struct list_head        block_list;
+	struct list_head	ipc_dispatch_list;
+	struct sst_platform_info *pdata;
+	struct sst_ipc_msg_wq   ipc_post_msg;
+	struct list_head	rx_list;
+	struct work_struct      ipc_post_msg_wq;
+	wait_queue_head_t	wait_queue;
+	struct workqueue_struct *mad_wq;
+	struct workqueue_struct *post_msg_wq;
+	unsigned int		tstamp;
+	struct stream_info	streams[MAX_NUM_STREAMS+1]; /*str_id 0 is not used*/
+	spinlock_t		ipc_spin_lock; /* lock for Shim reg access and ipc queue */
+	spinlock_t              block_lock; /* lock for adding block to block_list */
+	spinlock_t              pvt_id_lock; /* lock for allocating private id */
+	spinlock_t		rx_msg_lock;
+	struct pci_dev		*pci;
+	struct device		*dev;
+	unsigned int		pvt_id;
+	struct mutex            sst_lock;
+	struct mutex		stream_lock;
+	unsigned int		stream_cnt;
+	unsigned int		*fw_cntx;
+	unsigned int		fw_cntx_size;
+	unsigned int		csr_value;
+	struct sst_dma		dma;
+	void			*fw_in_mem;
+	struct sst_runtime_param runtime_param;
+	unsigned int		device_input_mixer;
+	struct mutex		mixer_ctrl_lock;
+	struct dma_async_tx_descriptor *desc;
+	struct sst_sg_list	fw_sg_list, library_list;
+	struct intel_sst_ops	*ops;
+	struct sst_debugfs	debugfs;
+	struct pm_qos_request	*qos;
+	struct sst_info	info;
+	unsigned int		use_dma;
+	unsigned int		use_lli;
+	atomic_t		fw_clear_context;
+	atomic_t		fw_clear_cache;
+	bool			lib_dwnld_reqd;
+	/* list used during FW download in memcpy mode */
+	struct list_head	memcpy_list;
+	/* list used during LIB download in memcpy mode */
+	struct list_head	libmemcpy_list;
+	/* holds the stucts of iram/dram local buffers for dump*/
+	struct sst_dump_buf	dump_buf;
+	/* Lock for CSR register change */
+	struct mutex		csr_lock;
+	/* byte control to set the probe stream */
+	struct snd_sst_probe_bytes *probe_bytes;
+	/* contains the ipc registers */
+	struct sst_ipc_reg	ipc_reg;
+	/* IMR region Library space memory manager */
+	struct sst_mem_mgr      lib_mem_mgr;
+	/* Contains the cached vtsv files*/
+	struct sst_vtsv_cache	vcache;
+	/* Pointer to device ID, now for same PCI_ID, HID will be
+	 * will be different for FDK and EDK2. This will be used
+	 * for devices where PCI or ACPI id is same but HID is
+	 * different
+	 */
+	const char *hid;
+	/* Holder for firmware name. Due to async call it needs to be
+	 * persistent till worker thread gets called
+	 */
+	char firmware_name[20];
+};
+
+extern struct intel_sst_drv *sst_drv_ctx;
+extern struct sst_platform_info byt_rvp_platform_data;
+extern struct sst_platform_info byt_ffrd8_platform_data;
+extern struct sst_platform_info cht_platform_data;
+
+/* misc definitions */
+#define FW_DWNL_ID 0xFF
+
+struct sst_fill_config {
+	u32 sign;
+	struct sst_board_config_data sst_bdata;
+	struct sst_platform_config_data sst_pdata;
+	u32 shim_phy_add;
+	u32 mailbox_add;
+} __packed;
+
+struct intel_sst_ops {
+	irqreturn_t (*interrupt) (int, void *);
+	irqreturn_t (*irq_thread) (int, void *);
+	void (*clear_interrupt) (void);
+	int (*start) (void);
+	int (*reset) (void);
+	void (*process_reply) (struct ipc_post *msg);
+	void (*post_message) (struct work_struct *work);
+	int (*sync_post_message) (struct ipc_post *msg);
+	void (*process_message) (struct ipc_post *msg);
+	void (*set_bypass)(bool set);
+	int (*save_dsp_context) (struct intel_sst_drv *sst);
+	void (*restore_dsp_context) (void);
+	int (*alloc_stream) (char *params, struct sst_block *block);
+	void (*post_download)(struct intel_sst_drv *sst);
+	void (*do_recovery)(struct intel_sst_drv *sst);
+};
+
+int sst_alloc_stream(char *params, struct sst_block *block);
+int sst_pause_stream(int id);
+int sst_resume_stream(int id);
+int sst_drop_stream(int id);
+int sst_next_track(void);
+int sst_free_stream(int id);
+int sst_start_stream(int str_id);
+int sst_send_byte_stream_mrfld(void *sbytes);
+int sst_send_probe_bytes(struct intel_sst_drv *sst);
+int sst_set_stream_param(int str_id, struct snd_sst_params *str_param);
+int sst_set_metadata(int str_id, char *params);
+int sst_get_stream(struct snd_sst_params *str_param);
+int sst_get_stream_allocated(struct snd_sst_params *str_param,
+				struct snd_sst_lib_download **lib_dnld);
+int sst_drain_stream(int str_id, bool partial_drain);
+
+
+int sst_sync_post_message_mfld(struct ipc_post *msg);
+void sst_post_message_mfld(struct work_struct *work);
+void sst_process_message_mfld(struct ipc_post *msg);
+void sst_process_reply_mfld(struct ipc_post *msg);
+int sst_start_mfld(void);
+int intel_sst_reset_dsp_mfld(void);
+void intel_sst_clear_intr_mfld(void);
+void intel_sst_set_bypass_mfld(bool set);
+
+int sst_sync_post_message_mrfld(struct ipc_post *msg);
+void sst_post_message_mrfld(struct work_struct *work);
+void sst_process_message_mrfld(struct ipc_post *msg);
+void sst_process_reply_mrfld(struct ipc_post *msg);
+int sst_start_mrfld(void);
+int intel_sst_reset_dsp_mrfld(void);
+void intel_sst_clear_intr_mrfld(void);
+void sst_process_mad_ops(struct work_struct *work);
+
+long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd,
+			unsigned long arg);
+int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr);
+int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr);
+
+int sst_load_fw(void);
+int sst_load_library(struct snd_sst_lib_download *lib, u8 ops);
+int sst_load_all_modules_elf(struct intel_sst_drv *ctx,
+		struct sst_module_info *mod_table, int mod_table_size);
+int sst_get_next_lib_mem(struct sst_mem_mgr *mgr, int size,
+			unsigned long *lib_base);
+void sst_post_download_ctp(struct intel_sst_drv *ctx);
+void sst_post_download_mrfld(struct intel_sst_drv *ctx);
+void sst_post_download_byt(struct intel_sst_drv *ctx);
+int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx);
+void sst_memcpy_free_resources(void);
+
+int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
+				struct sst_block *block);
+int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
+			struct sst_block *block);
+int sst_create_ipc_msg(struct ipc_post **arg, bool large);
+int sst_download_fw(void);
+int free_stream_context(unsigned int str_id);
+void sst_clean_stream(struct stream_info *stream);
+int intel_sst_register_compress(struct intel_sst_drv *sst);
+int intel_sst_remove_compress(struct intel_sst_drv *sst);
+void sst_cdev_fragment_elapsed(int str_id);
+int sst_send_sync_msg(int ipc, int str_id);
+int sst_get_num_channel(struct snd_sst_params *str_param);
+int sst_get_sfreq(struct snd_sst_params *str_param);
+int intel_sst_check_device(void);
+int sst_alloc_stream_ctp(char *params, struct sst_block *block);
+int sst_alloc_stream_mrfld(char *params, struct sst_block *block);
+void sst_restore_fw_context(void);
+struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
+				u32 msg_id, u32 drv_id);
+int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
+		struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
+		u32 msg_id, u32 drv_id);
+int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed);
+int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
+		u32 drv_id, u32 ipc, void *data, u32 size);
+int sst_alloc_drv_context(struct device *dev);
+int sst_request_firmware_async(struct intel_sst_drv *ctx);
+int sst_driver_ops(struct intel_sst_drv *sst);
+struct sst_platform_info *sst_get_acpi_driver_data(const char *hid);
+int sst_acpi_probe(struct platform_device *pdev);
+int sst_acpi_remove(struct platform_device *pdev);
+void sst_save_shim64(struct intel_sst_drv *ctx, void __iomem *shim,
+		     struct sst_shim_regs64 *shim_regs);
+void sst_firmware_load_cb(const struct firmware *fw, void *context);
+int sst_send_vtsv_data_to_fw(struct intel_sst_drv *ctx);
+
+void sst_do_recovery_mrfld(struct intel_sst_drv *sst);
+void sst_do_recovery(struct intel_sst_drv *sst);
+long intel_sst_ioctl_dsp(unsigned int cmd,
+		struct snd_ppp_params *algo_params, unsigned long arg);
+
+void sst_dump_to_buffer(const void *from, size_t from_len, char *buf);
+
+extern int intel_scu_ipc_simple_command(int, int);
+
+static inline int sst_pm_runtime_put(struct intel_sst_drv *sst_drv)
+{
+	int ret;
+
+	ret = pm_runtime_put_sync(sst_drv->dev);
+	if (ret < 0)
+		return ret;
+	atomic_dec(&sst_drv->pm_usage_count);
+
+	pr_debug("%s: count is %d now..\n", __func__,
+			atomic_read(&sst_drv->pm_usage_count));
+	return 0;
+}
+/*
+ * sst_fill_header - inline to fill sst header
+ *
+ * @header : ipc header
+ * @msg : IPC message to be sent
+ * @large : is ipc large msg
+ * @str_id : stream id
+ *
+ * this function is an inline function that sets the headers before
+ * sending a message
+ */
+static inline void sst_fill_header(union ipc_header *header,
+				int msg, int large, int str_id)
+{
+	header->part.msg_id = msg;
+	header->part.str_id = str_id;
+	header->part.large = large;
+	header->part.done = 0;
+	header->part.busy = 1;
+	header->part.data = 0;
+}
+
+
+static inline void sst_fill_header_mrfld(union ipc_header_mrfld *header,
+				int msg, int task_id, int large, int drv_id)
+{
+	header->full = 0;
+	header->p.header_high.part.msg_id = msg;
+	header->p.header_high.part.task_id = task_id;
+	header->p.header_high.part.large = large;
+	header->p.header_high.part.drv_id = drv_id;
+	header->p.header_high.part.done = 0;
+	header->p.header_high.part.busy = 1;
+	header->p.header_high.part.res_rqd = 1;
+}
+
+static inline void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg,
+					int pipe_id, int len)
+{
+	dsp->cmd_id = msg;
+	dsp->mod_index_id = 0xff;
+	dsp->pipe_id = pipe_id;
+	dsp->length = len;
+	dsp->mod_id = 0;
+}
+
+#define MAX_BLOCKS 15
+/* sst_assign_pvt_id - assign a pvt id for stream
+ *
+ * @sst_drv_ctx : driver context
+ *
+ * this inline function assigns a private id for calls that dont have stream
+ * context yet, should be called with lock held
+ */
+static inline unsigned int sst_assign_pvt_id(struct intel_sst_drv *sst_drv_ctx)
+{
+	unsigned int local;
+
+	spin_lock(&sst_drv_ctx->pvt_id_lock);
+	sst_drv_ctx->pvt_id++;
+	if (sst_drv_ctx->pvt_id > MAX_BLOCKS)
+		sst_drv_ctx->pvt_id = 1;
+	local = sst_drv_ctx->pvt_id;
+	spin_unlock(&sst_drv_ctx->pvt_id_lock);
+	return local;
+}
+
+
+/*
+ * sst_init_stream - this function initialzes stream context
+ *
+ * @stream : stream struture
+ * @codec : codec for stream
+ * @sst_id : stream id
+ * @ops : stream operation
+ * @slot : stream pcm slot
+ * @device : device type
+ *
+ * this inline function initialzes stream context for allocated stream
+ */
+static inline void sst_init_stream(struct stream_info *stream,
+		int codec, int sst_id, int ops, u8 slot)
+{
+	stream->status = STREAM_INIT;
+	stream->prev = STREAM_UN_INIT;
+	stream->ops = ops;
+}
+
+static inline void sst_set_gpio_conf(const struct sst_gpio_config *gpio_conf)
+{
+	lnw_gpio_set_alt(gpio_conf->i2s_rx_alt, gpio_conf->alt_function);
+	lnw_gpio_set_alt(gpio_conf->i2s_tx_alt, gpio_conf->alt_function);
+	lnw_gpio_set_alt(gpio_conf->i2s_frame, gpio_conf->alt_function);
+	lnw_gpio_set_alt(gpio_conf->i2s_clock, gpio_conf->alt_function);
+}
+
+
+/*
+ * sst_validate_strid - this function validates the stream id
+ *
+ * @str_id : stream id to be validated
+ *
+ * returns 0 if valid stream
+ */
+static inline int sst_validate_strid(int str_id)
+{
+	if (str_id <= 0 || str_id > sst_drv_ctx->info.max_streams) {
+		pr_err("SST ERR: invalid stream id : %d, max %d\n",
+					str_id, sst_drv_ctx->info.max_streams);
+		return -EINVAL;
+	} else
+		return 0;
+}
+
+static inline int sst_shim_write(void __iomem *addr, int offset, int value)
+{
+	writel(value, addr + offset);
+	return 0;
+}
+
+static inline u32 sst_shim_read(void __iomem *addr, int offset)
+{
+
+	return readl(addr + offset);
+}
+
+static inline u32 sst_reg_read(void __iomem *addr, int offset)
+{
+
+	return readl(addr + offset);
+}
+
+static inline u64 sst_reg_read64(void __iomem *addr, int offset)
+{
+	u64 val = 0;
+
+	memcpy_fromio(&val, addr + offset, sizeof(val));
+
+	return val;
+}
+
+static inline int sst_shim_write64(void __iomem *addr, int offset, u64 value)
+{
+	memcpy_toio(addr + offset, &value, sizeof(value));
+	return 0;
+}
+
+static inline u64 sst_shim_read64(void __iomem *addr, int offset)
+{
+	u64 val = 0;
+
+	memcpy_fromio(&val, addr + offset, sizeof(val));
+	return val;
+}
+
+static inline void
+sst_set_fw_state_locked(struct intel_sst_drv *sst_drv_ctx, int sst_state)
+{
+	mutex_lock(&sst_drv_ctx->sst_lock);
+	sst_drv_ctx->sst_state = sst_state;
+	mutex_unlock(&sst_drv_ctx->sst_lock);
+}
+
+static inline struct stream_info *get_stream_info(int str_id)
+{
+	if (sst_validate_strid(str_id))
+		return NULL;
+	return &sst_drv_ctx->streams[str_id];
+}
+
+static inline int get_stream_id_mrfld(u32 pipe_id)
+{
+	int i;
+
+	for (i = 1; i <= sst_drv_ctx->info.max_streams; i++)
+		if (pipe_id == sst_drv_ctx->streams[i].pipe_id)
+			return i;
+
+	pr_debug("%s: no such pipe_id(%u)", __func__, pipe_id);
+	return -1;
+}
+
+int register_sst(struct device *);
+int unregister_sst(struct device *);
+
+#ifdef CONFIG_DEBUG_FS
+void sst_debugfs_init(struct intel_sst_drv *sst);
+void sst_debugfs_exit(struct intel_sst_drv *sst);
+#else
+static inline void sst_debugfs_init(struct intel_sst_drv *sst)
+{
+}
+
+static inline void sst_debugfs_exit(struct intel_sst_drv *sst)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * FW should use virtual address 0xC000_0000 to map to the DDR
+ * reserved 2MB region at 512MB boundary. Currently the address of
+ * DDR region allocated by IA FW is not 512MB aligned. So FW is
+ * statically linking the DDR region at 0xDF600000. So we need to
+ * use the translated address to identify the DDR regions in the FW
+ * ELF binary.
+ */
+static inline u32 relocate_imr_addr_mrfld(u32 base_addr)
+{
+	/* Get the difference from 512MB aligned base addr */
+	/* relocate the base */
+	base_addr = MRFLD_FW_VIRTUAL_BASE + (base_addr % (512 * 1024 * 1024));
+	return base_addr;
+}
+
+static inline void sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst,
+						struct ipc_post *msg)
+{
+	unsigned long irq_flags;
+	spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+	list_add_tail(&msg->node, &sst->ipc_dispatch_list);
+	spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+	sst->ops->post_message(&sst->ipc_post_msg_wq);
+}
+#endif
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
new file mode 100644
index 0000000..a898c2a
--- /dev/null
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -0,0 +1,669 @@
+/* sst_acpi.c - SST (LPE) driver init file for ACPI enumeration.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ *  Authors:	Ramesh Babu K V <Ramesh.Babu@intel.com>
+ *  Authors:	Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <asm/platform_byt_audio.h>
+#include <asm/platform_sst.h>
+#include <acpi/acpi_bus.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+extern struct miscdevice lpe_ctrl;
+
+static const struct sst_platform_config_data sst_byt_pdata = {
+	.sst_sram_buff_base	= 0xffffffff,
+	.sst_dma_base[0]	= SST_BYT_DMA0_PHY_ADDR,
+	.sst_dma_base[1]	= SST_BYT_DMA1_PHY_ADDR,
+};
+
+/* use array[0] for ssp_platform_data even though SSP2 is used */
+static const struct sst_board_config_data sst_byt_rvp_bdata = {
+	.active_ssp_ports = 1,
+	.platform_id = 3,
+	.board_id = 1,
+	.ihf_num_chan = 2,
+	.osc_clk_freq = 25000000,
+	.ssp_platform_data = {
+		[0] = {
+			.ssp_cfg_sst = 1,
+			.port_number = 2,
+			.is_master = 1,
+			.pack_mode = 1,
+			.num_slots_per_frame = 2,
+			.num_bits_per_slot = 24,
+			.active_tx_map = 3,
+			.active_rx_map = 3,
+			.ssp_frame_format = 3,
+			.frame_polarity = 1,
+			.serial_bitrate_clk_mode = 0,
+			.frame_sync_width = 24,
+			.dma_handshake_interface_tx = 5,
+			.dma_handshake_interface_rx = 4,
+			.network_mode = 0,
+			.start_delay = 1,
+			.ssp_base_add = SST_BYT_SSP2_PHY_ADDR,
+		},
+	},
+};
+
+static const struct sst_board_config_data sst_byt_ffrd8_bdata = {
+	.active_ssp_ports = 1,
+	.platform_id = 3,
+	.board_id = 1,
+	.ihf_num_chan = 2,
+	.osc_clk_freq = 25000000,
+	.ssp_platform_data = {
+		[0] = {
+			.ssp_cfg_sst = 1,
+			.port_number = 0,
+			.is_master = 1,
+			.pack_mode = 1,
+			.num_slots_per_frame = 2,
+			.num_bits_per_slot = 24,
+			.active_tx_map = 3,
+			.active_rx_map = 3,
+			.ssp_frame_format = 3,
+			.frame_polarity = 1,
+			.serial_bitrate_clk_mode = 0,
+			.frame_sync_width = 24,
+			.dma_handshake_interface_tx = 1,
+			.dma_handshake_interface_rx = 0,
+			.network_mode = 0,
+			.start_delay = 1,
+			.ssp_base_add = SST_BYT_SSP0_PHY_ADDR,
+		},
+	},
+};
+
+static const struct sst_board_config_data sst_byt_crv2_bdata = {
+	.active_ssp_ports = 1,
+	.platform_id = 3,
+	.board_id = 1,
+	.ihf_num_chan = 1,
+	.osc_clk_freq = 25000000,
+	.ssp_platform_data = {
+		[0] = {
+			.ssp_cfg_sst = 1,
+			.port_number = 0,
+			.is_master = 1,
+			.pack_mode = 1,
+			.num_slots_per_frame = 2,
+			.num_bits_per_slot = 24,
+			.active_tx_map = 3,
+			.active_rx_map = 3,
+			.ssp_frame_format = 3,
+			.frame_polarity = 1,
+			.serial_bitrate_clk_mode = 0,
+			.frame_sync_width = 24,
+			.dma_handshake_interface_tx = 1,
+			.dma_handshake_interface_rx = 0,
+			.network_mode = 0,
+			.start_delay = 1,
+			.ssp_base_add = SST_BYT_SSP0_PHY_ADDR,
+		},
+	},
+};
+
+static const struct sst_info byt_fwparse_info = {
+	.use_elf	= true,
+	.max_streams	= 4,
+	.dma_max_len	= SST_MAX_DMA_LEN_MRFLD,
+	.iram_start	= SST_BYT_IRAM_PHY_START,
+	.iram_end	= SST_BYT_IRAM_PHY_END,
+	.iram_use	= true,
+	.dram_start	= SST_BYT_DRAM_PHY_START,
+	.dram_end	= SST_BYT_DRAM_PHY_END,
+	.dram_use	= true,
+	.imr_start	= SST_BYT_IMR_VIRT_START,
+	.imr_end	= SST_BYT_IMR_VIRT_END,
+	.imr_use	= true,
+	.mailbox_start	= SST_BYT_MBOX_PHY_ADDR,
+	.num_probes	= 0,
+	.lpe_viewpt_rqd  = true,
+};
+
+
+static const struct sst_info cht_fwparse_info = {
+	.use_elf	= true,
+	.max_streams	= MAX_NUM_STREAMS_MRFLD,
+	.dma_max_len	= SST_MAX_DMA_LEN_MRFLD,
+	.iram_start	= SST_BYT_IRAM_PHY_START,
+	.iram_end	= SST_BYT_IRAM_PHY_END,
+	.iram_use	= true,
+	.dram_start	= SST_BYT_DRAM_PHY_START,
+	.dram_end	= SST_BYT_DRAM_PHY_END,
+	.dram_use	= true,
+	.imr_start	= SST_BYT_IMR_VIRT_START,
+	.imr_end	= SST_BYT_IMR_VIRT_END,
+	.imr_use	= true,
+	.mailbox_start	= SST_BYT_MBOX_PHY_ADDR,
+	.num_probes	= 0,
+	.lpe_viewpt_rqd = true,
+};
+
+static const struct sst_ipc_info byt_ipc_info = {
+	.use_32bit_ops = true,
+	.ipc_offset = 4,
+	.mbox_recv_off = 0x400,
+};
+
+static const struct sst_lib_dnld_info  byt_lib_dnld_info = {
+	.mod_base           = SST_BYT_IMR_VIRT_START,
+	.mod_end            = SST_BYT_IMR_VIRT_END,
+	.mod_table_offset   = BYT_FW_MOD_TABLE_OFFSET,
+	.mod_table_size     = BYT_FW_MOD_TABLE_SIZE,
+	.mod_ddr_dnld       = true,
+};
+
+static const struct sst_ipc_info cht_ipc_info = {
+	.use_32bit_ops = false,
+	.ipc_offset = 0,
+	.mbox_recv_off = 0x400,
+};
+
+struct sst_platform_info cht_platform_data = {
+	.probe_data = &cht_fwparse_info,
+	.ssp_data = NULL,
+	.bdata = NULL,
+	.pdata = NULL,
+	.ipc_info = &cht_ipc_info,
+	.lib_info = NULL,
+};
+
+struct sst_platform_info byt_rvp_platform_data = {
+	.probe_data = &byt_fwparse_info,
+	.ssp_data = NULL,
+	.bdata = &sst_byt_rvp_bdata,
+	.pdata = &sst_byt_pdata,
+	.ipc_info = &byt_ipc_info,
+	.lib_info = &byt_lib_dnld_info,
+};
+
+struct sst_platform_info byt_ffrd8_platform_data = {
+	.probe_data = &byt_fwparse_info,
+	.ssp_data = NULL,
+	.bdata = &sst_byt_ffrd8_bdata,
+	.pdata = &sst_byt_pdata,
+	.ipc_info = &byt_ipc_info,
+	.lib_info = &byt_lib_dnld_info,
+};
+
+int sst_workqueue_init(struct intel_sst_drv *ctx)
+{
+	pr_debug("%s", __func__);
+
+	INIT_LIST_HEAD(&ctx->memcpy_list);
+	INIT_LIST_HEAD(&ctx->libmemcpy_list);
+	INIT_LIST_HEAD(&sst_drv_ctx->rx_list);
+	INIT_LIST_HEAD(&ctx->ipc_dispatch_list);
+	INIT_LIST_HEAD(&ctx->block_list);
+	INIT_WORK(&ctx->ipc_post_msg.wq, ctx->ops->post_message);
+	init_waitqueue_head(&ctx->wait_queue);
+
+	ctx->mad_wq = create_singlethread_workqueue("sst_mad_wq");
+	if (!ctx->mad_wq)
+		goto err_wq;
+	ctx->post_msg_wq =
+		create_singlethread_workqueue("sst_post_msg_wq");
+	if (!ctx->post_msg_wq)
+		goto err_wq;
+	return 0;
+err_wq:
+	return -EBUSY;
+}
+
+void sst_init_locks(struct intel_sst_drv *ctx)
+{
+	mutex_init(&ctx->stream_lock);
+	mutex_init(&ctx->sst_lock);
+	mutex_init(&ctx->mixer_ctrl_lock);
+	mutex_init(&ctx->csr_lock);
+	spin_lock_init(&sst_drv_ctx->rx_msg_lock);
+	spin_lock_init(&ctx->ipc_spin_lock);
+	spin_lock_init(&ctx->block_lock);
+	spin_lock_init(&ctx->pvt_id_lock);
+}
+
+int sst_destroy_workqueue(struct intel_sst_drv *ctx)
+{
+	pr_debug("%s", __func__);
+	if (ctx->mad_wq)
+		destroy_workqueue(ctx->mad_wq);
+	if (ctx->post_msg_wq)
+		destroy_workqueue(ctx->post_msg_wq);
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static int sst_platform_get_resources_fdk(struct intel_sst_drv *ctx,
+				      struct platform_device *pdev)
+{
+	struct resource *rsrc;
+
+	pr_debug("%s", __func__);
+
+	/* All ACPI resource request here */
+	/* Get DDR addr from platform resource table */
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!rsrc) {
+		pr_err("Invalid DDR base from IFWI");
+		return -EIO;
+	}
+	ctx->ddr_base = rsrc->start;
+	ctx->ddr_end = rsrc->end;
+	pr_debug("DDR base: %#x", ctx->ddr_base);
+	ctx->ddr = devm_ioremap_nocache(ctx->dev, ctx->ddr_base,
+					resource_size(rsrc));
+	if (!ctx->ddr) {
+		pr_err("unable to map DDR");
+		return -EIO;
+	}
+
+	/* Get Shim addr from platform resource table */
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!rsrc) {
+		pr_err("Invalid SHIM base from IFWI");
+		return -EIO;
+	}
+	ctx->shim_phy_add = rsrc->start;
+	pr_debug("SHIM base: %#x", ctx->shim_phy_add);
+	ctx->shim = devm_ioremap_nocache(ctx->dev, ctx->shim_phy_add,
+					 resource_size(rsrc));
+	if (!ctx->shim) {
+		pr_err("unable to map SHIM");
+		return -EIO;
+	}
+	/* reassign physical address to LPE viewpoint address */
+	ctx->shim_phy_add = SST_BYT_SHIM_PHY_ADDR;
+
+	/* Get mailbox addr from platform resource table */
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	if (!rsrc) {
+		pr_err("Invalid Mailbox base from IFWI");
+		return -EIO;
+	}
+	ctx->mailbox_add = rsrc->start;
+	pr_debug("Mailbox base: %#x", ctx->mailbox_add);
+	ctx->mailbox = devm_ioremap_nocache(ctx->dev, ctx->mailbox_add,
+					    resource_size(rsrc));
+	if (!ctx->mailbox) {
+		pr_err("unable to map mailbox");
+		return -EIO;
+	}
+	/* reassign physical address to LPE viewpoint address */
+	ctx->mailbox_add = sst_drv_ctx->info.mailbox_start;
+
+	/* Get iram/iccm addr from platform resource table */
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+	if (!rsrc) {
+		pr_err("Invalid IRAM base from IFWI");
+		return -EIO;
+	}
+	ctx->iram_base = rsrc->start;
+	ctx->iram_end =  rsrc->end;
+	pr_debug("IRAM base: %#x", ctx->iram_base);
+	ctx->iram = devm_ioremap_nocache(ctx->dev, ctx->iram_base,
+					 resource_size(rsrc));
+	if (!ctx->iram) {
+		pr_err("unable to map IRAM");
+		return -EIO;
+	}
+
+	/* Get dram/dccm addr from platform resource table */
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+	if (!rsrc) {
+		pr_err("Invalid DRAM base from IFWI");
+		return -EIO;
+	}
+	ctx->dram_base = rsrc->start;
+	ctx->dram_end = rsrc->end;
+	pr_debug("DRAM base: %#x", ctx->dram_base);
+	ctx->dram = devm_ioremap_nocache(ctx->dev, ctx->dram_base,
+					 resource_size(rsrc));
+	if (!ctx->dram) {
+		pr_err("unable to map DRAM");
+		return -EIO;
+	}
+
+	/* Register the ISR */
+	ctx->irq_num = platform_get_irq(pdev, 0);
+	pr_debug("irq from pdev is:%d", ctx->irq_num);
+	return 0;
+}
+
+#define LPE_IRAM_OFFSET 0x0C0000
+#define LPE_IRAM_SIZE 0x040000
+#define LPE_DRAM_OFFSET 0x100000
+#define LPE_DRAM_SIZE 0x040000
+#define LPE_SHIM_OFFSET 0x140000
+#define LPE_SHIM_SIZE 0x004000
+#define LPE_MBOX_OFFSET 0x144000
+#define LPE_MBOX_SIZE 0x004000
+
+static int sst_platform_get_resources_edk(struct intel_sst_drv *ctx,
+				      struct platform_device *pdev)
+{
+	struct resource *rsrc;
+
+	pr_debug("%s", __func__);
+
+	/* All ACPI resource request here */
+	/* Get Shim addr */
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!rsrc) {
+		pr_err("Invalid SHIM base from IFWI");
+		return -EIO;
+	}
+	pr_debug("LPE base: %#x size:%#x", (unsigned int) rsrc->start,
+					(unsigned int)resource_size(rsrc));
+	ctx->iram_base = rsrc->start + LPE_IRAM_OFFSET;
+	ctx->iram_end =  ctx->iram_base + LPE_IRAM_SIZE - 1;
+	pr_debug("IRAM base: %#x", ctx->iram_base);
+	ctx->iram = devm_ioremap_nocache(ctx->dev, ctx->iram_base,
+					 LPE_IRAM_SIZE);
+	if (!ctx->iram) {
+		pr_err("unable to map IRAM");
+		return -EIO;
+	}
+
+	ctx->dram_base = rsrc->start + LPE_DRAM_OFFSET;
+	ctx->dram_end = ctx->dram_base + LPE_DRAM_SIZE - 1;
+	pr_debug("DRAM base: %#x", ctx->dram_base);
+	ctx->dram = devm_ioremap_nocache(ctx->dev, ctx->dram_base,
+					 LPE_DRAM_SIZE);
+	if (!ctx->dram) {
+		pr_err("unable to map DRAM");
+		return -EIO;
+	}
+
+	ctx->shim_phy_add = rsrc->start + LPE_SHIM_OFFSET;
+	pr_debug("SHIM base: %#x", ctx->shim_phy_add);
+	ctx->shim = devm_ioremap_nocache(ctx->dev, ctx->shim_phy_add,
+							LPE_SHIM_SIZE);
+	if (!ctx->shim) {
+		pr_err("unable to map SHIM");
+		return -EIO;
+	}
+	/* reassign physical address to LPE viewpoint address */
+	ctx->shim_phy_add = SST_BYT_SHIM_PHY_ADDR;
+
+	/* Get mailbox addr */
+	ctx->mailbox_add = rsrc->start + LPE_MBOX_OFFSET;
+	pr_debug("Mailbox base: %#x", ctx->mailbox_add);
+	ctx->mailbox = devm_ioremap_nocache(ctx->dev, ctx->mailbox_add,
+					    LPE_MBOX_SIZE);
+	if (!ctx->mailbox) {
+		pr_err("unable to map mailbox");
+		return -EIO;
+	}
+
+	/* reassign physical address to LPE viewpoint address */
+	ctx->mailbox_add = sst_drv_ctx->info.mailbox_start;
+
+	rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	if (!rsrc) {
+		pr_err("Invalid DDR base from IFWI");
+		return -EIO;
+	}
+	ctx->ddr_base = rsrc->start;
+	ctx->ddr_end = rsrc->end;
+	pr_debug("DDR base: %#x", ctx->ddr_base);
+	ctx->ddr = devm_ioremap_nocache(ctx->dev, ctx->ddr_base,
+					resource_size(rsrc));
+	if (!ctx->ddr) {
+		pr_err("unable to map DDR");
+		return -EIO;
+	}
+	/* Register the ISR */
+	if (!strncmp(ctx->hid, "80860F28", 8))
+		ctx->irq_num = platform_get_irq(pdev, 0);
+	else if (!strncmp(ctx->hid, "808622A8", 8)) {
+		/* FIXME: IRQ number will be moved to 0 once the BIOS fix is done */
+		ctx->irq_num = platform_get_irq(pdev, 5);
+	} else
+		return -EINVAL;
+	return 0;
+}
+
+static int sst_platform_get_resources(const char *hid,
+		struct intel_sst_drv *ctx, struct platform_device *pdev)
+{
+
+	pr_debug("%s", __func__);
+
+	if (!strncmp(hid, "LPE0F281", 8)) {
+		ctx->pci_id = SST_BYT_PCI_ID;
+		return sst_platform_get_resources_fdk(ctx, pdev);
+	}
+	if (!strncmp(hid, "808622A8", 8)) {
+		ctx->pci_id = SST_CHT_PCI_ID;
+		return sst_platform_get_resources_edk(ctx, pdev);
+	}
+	if (!strncmp(hid, "80860F28", 8)) {
+		ctx->pci_id = SST_BYT_PCI_ID;
+		return sst_platform_get_resources_edk(ctx, pdev);
+	} else if (!strncmp(hid, "LPE0F28", 7)) {
+		ctx->pci_id = SST_BYT_PCI_ID;
+		return sst_platform_get_resources_fdk(ctx, pdev);
+	} else {
+		pr_err("Invalid device\n");
+		return -EINVAL;
+	}
+}
+
+int sst_acpi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	acpi_handle handle = ACPI_HANDLE(dev);
+	struct acpi_device *device;
+	const char *hid;
+	int i, ret = 0;
+	struct intel_sst_drv *ctx;
+
+	ret = acpi_bus_get_device(handle, &device);
+	if (ret) {
+		pr_err("%s: could not get acpi device - %d\n", __func__, ret);
+		return -ENODEV;
+	}
+
+	if (acpi_bus_get_status(device) || !device->status.present) {
+		pr_err("%s: device has invalid status", __func__);
+		return -ENODEV;
+	}
+
+	hid = acpi_device_hid(device);
+	pr_debug("%s for %s", __func__, hid);
+	ret = sst_alloc_drv_context(dev);
+	if (ret)
+		return ret;
+	ctx = sst_drv_ctx;
+	ctx->dev = dev;
+	ctx->hid = hid;
+
+	ret = sst_platform_get_resources(hid, ctx, pdev);
+	if (ret)
+		return ret;
+	/* need to save shim registers in BYT */
+	ctx->shim_regs64 = devm_kzalloc(dev, sizeof(*ctx->shim_regs64),
+					GFP_KERNEL);
+	if (!ctx->shim_regs64)
+		return -ENOMEM;
+
+	ret = sst_driver_ops(ctx);
+	if (ret != 0)
+		return -EINVAL;
+
+	sst_init_locks(ctx);
+
+	ctx->stream_cnt = 0;
+	ctx->fw_in_mem = NULL;
+	ctx->use_dma = 1;
+	ctx->use_lli = 1;
+
+	if (sst_workqueue_init(ctx))
+		goto do_free_wq;
+
+	ctx->pdata = sst_get_acpi_driver_data(hid);
+	if (!ctx->pdata)
+		return -EINVAL;
+	if (INTEL_MID_BOARD(3, TABLET, BYT, BLK, PRO, CRV2)) {
+		/* BYT-CR V2 has only mono speaker, while
+		 * byt has stereo speaker, for both
+		 * HID is same, so platform data also is
+		 * same, hence overriding bdata based on spid
+		 */
+		ctx->pdata->bdata = &sst_byt_crv2_bdata;
+		pr_info("Overriding bdata for byt-crv2\n");
+	}
+
+	ctx->use_32bit_ops = ctx->pdata->ipc_info->use_32bit_ops;
+	ctx->mailbox_recv_offset = ctx->pdata->ipc_info->mbox_recv_off;
+
+	memcpy(&ctx->info, ctx->pdata->probe_data, sizeof(ctx->info));
+
+	ctx->ipc_reg.ipcx = SST_IPCX + ctx->pdata->ipc_info->ipc_offset;
+	ctx->ipc_reg.ipcd = SST_IPCD + ctx->pdata->ipc_info->ipc_offset;
+
+	pr_debug("Got drv data max stream %d\n",
+				ctx->info.max_streams);
+	for (i = 1; i <= ctx->info.max_streams; i++) {
+		struct stream_info *stream = &ctx->streams[i];
+		mutex_init(&stream->lock);
+	}
+	ret = sst_request_firmware_async(ctx);
+	if (ret) {
+		pr_err("Firmware download failed:%d\n", ret);
+		goto do_free_wq;
+	}
+
+	ret = devm_request_threaded_irq(ctx->dev, ctx->irq_num, ctx->ops->interrupt,
+					ctx->ops->irq_thread, 0, SST_DRV_NAME,
+					ctx);
+	if (ret)
+		return ret;
+	pr_debug("Registered IRQ %#x\n", ctx->irq_num);
+
+	/*Register LPE Control as misc driver*/
+	ret = misc_register(&lpe_ctrl);
+	if (ret) {
+		pr_err("couldn't register control device\n");
+		goto do_free_wq;
+	}
+	/* mask all SSP and DMA irq to IA - enabled in acpi kernel driver */
+	sst_shim_write64(ctx->shim, SST_IMRX, 0xFFFF0038);
+
+	if (ctx->use_32bit_ops) {
+		pr_debug("allocate mem for context save/restore\n ");
+		/*allocate mem for fw context save during suspend*/
+		ctx->fw_cntx = devm_kzalloc(ctx->dev, FW_CONTEXT_MEM, GFP_KERNEL);
+		if (!ctx->fw_cntx) {
+			ret = -ENOMEM;
+			goto do_free_misc;
+		}
+		/*setting zero as that is valid mem to restore*/
+		ctx->fw_cntx_size = 0;
+	}
+
+	platform_set_drvdata(pdev, ctx);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+	register_sst(dev);
+	sst_debugfs_init(ctx);
+	sst_set_fw_state_locked(ctx, SST_UN_INIT);
+	sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
+	pr_info("%s successfully done!\n", __func__);
+	return ret;
+
+do_free_misc:
+	misc_deregister(&lpe_ctrl);
+do_free_wq:
+	sst_destroy_workqueue(ctx);
+
+	sst_drv_ctx = NULL;
+	platform_set_drvdata(pdev, NULL);
+	pr_err("%s: failed with %d\n", __func__, ret);
+	return ret;
+}
+
+/**
+* intel_sst_remove - remove function
+*
+* @pdev:	platform device structure
+*
+* This function is called by OS when a device is unloaded
+* This frees the interrupt etc
+*/
+int sst_acpi_remove(struct platform_device *pdev)
+{
+	struct intel_sst_drv *ctx;
+
+	ctx = platform_get_drvdata(pdev);
+	sst_debugfs_exit(ctx);
+	pm_runtime_get_noresume(ctx->dev);
+	pm_runtime_disable(ctx->dev);
+	unregister_sst(ctx->dev);
+	sst_set_fw_state_locked(ctx, SST_UN_INIT);
+	misc_deregister(&lpe_ctrl);
+	kfree(ctx->runtime_param.param.addr);
+	flush_scheduled_work();
+	sst_destroy_workqueue(ctx);
+	kfree(ctx->fw_sg_list.src);
+	kfree(ctx->fw_sg_list.dst);
+	ctx->fw_sg_list.list_len = 0;
+	kfree(ctx->fw_in_mem);
+	ctx->fw_in_mem = NULL;
+	sst_memcpy_free_resources();
+	sst_drv_ctx = NULL;
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+#else
+int sst_acpi_probe(struct platform_device *pdev)
+{
+	return -EINVAL;
+}
+
+int sst_acpi_remove(struct platform_device *pdev)
+{
+	return -EINVAL;
+}
+#endif
+
+MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine ACPI Driver");
+MODULE_AUTHOR("Ramesh Babu K V");
+MODULE_AUTHOR("Omair Mohammed Abdullah");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("sst");
diff --git a/sound/soc/intel/sst/sst_app_compat_interface.c b/sound/soc/intel/sst/sst_app_compat_interface.c
new file mode 100644
index 0000000..4babd76
--- /dev/null
+++ b/sound/soc/intel/sst/sst_app_compat_interface.c
@@ -0,0 +1,85 @@
+
+/*
+ *  sst_app_compat_interface.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2013-14 Intel Corp
+ *  Authors: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *  This driver exposes the audio engine functionalities to the ALSA
+ *	and middleware.
+ */
+
+/* This file is included from sst.c */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/compat.h>
+#include <linux/types.h>
+#include <sound/intel_sst_ioctl.h>
+#include "sst.h"
+
+struct snd_ppp_params32 {
+	__u8			algo_id;/* Post/Pre processing algorithm ID  */
+	__u8			str_id;	/*Only 5 bits used 0 - 31 are valid*/
+	__u8			enable;	/* 0= disable, 1= enable*/
+	__u8			operation;
+	__u32			size;	/*Size of parameters for all blocks*/
+	__u32			params;
+} __packed;
+
+enum {
+SNDRV_SST_SET_ALGO32 = _IOW('L', 0x30,  struct snd_ppp_params32),
+SNDRV_SST_GET_ALGO32 = _IOWR('L', 0x31,  struct snd_ppp_params32),
+};
+
+static long sst_algo_compat(unsigned int cmd,
+				struct snd_ppp_params32 __user *arg32)
+{
+	int retval = 0;
+	struct snd_ppp_params32 algo_params32;
+	struct snd_ppp_params algo_params;
+
+	if (copy_from_user(&algo_params32, arg32, sizeof(algo_params32))) {
+		pr_debug("%s: copy from user failed: %d\n", __func__, retval);
+		return -EINVAL;
+	}
+
+	memcpy(&algo_params, &algo_params32, sizeof(algo_params32)-sizeof(__u32));
+	algo_params.params = compat_ptr(algo_params32.params);
+	retval = intel_sst_ioctl_dsp(cmd, &algo_params, (unsigned long)arg32);
+	return retval;
+}
+
+static long intel_sst_ioctl_compat(struct file *file_ptr,
+				unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = compat_ptr(arg);
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_SST_DRIVER_INFO):
+	case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
+		return intel_sst_ioctl(file_ptr, cmd, (unsigned long)argp);
+	case _IOC_NR(SNDRV_SST_SET_ALGO32):
+		return sst_algo_compat(SNDRV_SST_SET_ALGO, argp);
+	case _IOC_NR(SNDRV_SST_GET_ALGO32):
+		return sst_algo_compat(SNDRV_SST_GET_ALGO, argp);
+
+	default:
+		return -ENOTTY;
+	}
+	return 0;
+}
diff --git a/sound/soc/intel/sst/sst_app_interface.c b/sound/soc/intel/sst/sst_app_interface.c
new file mode 100644
index 0000000..cfbbf4f
--- /dev/null
+++ b/sound/soc/intel/sst/sst_app_interface.c
@@ -0,0 +1,342 @@
+/*
+ *  sst_app_interface.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *  Harsha Priya <priya.harsha@intel.com>
+ *  Dharageswari R <dharageswari.r@intel.com>
+ *  Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *  This driver exposes the audio engine functionalities to the ALSA
+ *	and middleware.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/uio.h>
+#include <linux/aio.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <linux/ioctl.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define AM_MODULE 1
+
+/**
+ * intel_sst_open_cntrl - opens a handle to driver
+ *
+ * @i_node:	inode structure
+ * @file_ptr:pointer to file
+ *
+ * This function is called by OS when a user space component
+ * tries to get a driver handle to /dev/intel_sst_control.
+ * Only one handle at a time will be allowed
+ * This is for control operations only
+ */
+int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr)
+{
+	unsigned int retval;
+
+	/* audio manager open */
+	mutex_lock(&sst_drv_ctx->stream_lock);
+	retval = intel_sst_check_device();
+	if (retval) {
+		mutex_unlock(&sst_drv_ctx->stream_lock);
+		return retval;
+	}
+	pr_debug("AM handle opened\n");
+
+	mutex_unlock(&sst_drv_ctx->stream_lock);
+	return retval;
+}
+
+
+int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr)
+{
+	/* audio manager close */
+	mutex_lock(&sst_drv_ctx->stream_lock);
+	sst_pm_runtime_put(sst_drv_ctx);
+	mutex_unlock(&sst_drv_ctx->stream_lock);
+	pr_debug("AM handle closed\n");
+	return 0;
+}
+
+/**
+ * sst_get_max_streams - Function to populate the drv info structure
+ *				with the max streams
+ * @info: the out params that holds the drv info
+ *
+ * This function is called when max streams count is required
+**/
+void sst_get_max_streams(struct snd_sst_driver_info *info)
+{
+	pr_debug("info.max_streams %d num_probes %d\n", sst_drv_ctx->info.max_streams,
+					sst_drv_ctx->info.num_probes);
+	info->max_streams = sst_drv_ctx->info.max_streams - sst_drv_ctx->info.num_probes;
+}
+
+/**
+ * sst_create_algo_ipc - create ipc msg for algorithm parameters
+ *
+ * @algo_params: Algorithm parameters
+ * @msg: post msg pointer
+ * @pvt_id: Checked by wake_up_block
+ *
+ * This function is called to create ipc msg
+ * For copying the mailbox data the function returns offset in bytes to mailbox
+ * memory where the mailbox data should be copied after msg header
+ */
+static int sst_create_algo_ipc(struct snd_ppp_params *algo_params,
+					struct ipc_post **msg, int pvt_id)
+{
+	u32 header_size = 0;
+	u32 ipc_msg_size = sizeof(u32) + sizeof(*algo_params)
+			 - sizeof(algo_params->params) + algo_params->size;
+	u32 offset = 0;
+
+	if (ipc_msg_size > SST_MAILBOX_SIZE)
+		return -ENOMEM;
+	if (sst_create_ipc_msg(msg, true))
+		return -ENOMEM;
+	sst_fill_header(&(*msg)->header,
+			IPC_IA_ALG_PARAMS, 1, pvt_id);
+	(*msg)->header.part.data = ipc_msg_size;
+	memcpy((*msg)->mailbox_data, &(*msg)->header, sizeof(u32));
+	offset = sizeof(u32);
+	header_size = sizeof(*algo_params) - sizeof(algo_params->params);
+	memcpy((*msg)->mailbox_data + offset, algo_params, header_size);
+	offset += header_size;
+	return offset;
+}
+
+static long sst_send_algo(struct snd_ppp_params *algo_params,
+		struct sst_block *block, enum sst_algo_ops algo)
+{
+	struct ipc_post *msg;
+	int retval;
+	int offset;
+
+	pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+		algo_params->algo_id, algo_params->str_id,
+		algo_params->enable, algo_params->size);
+
+	algo_params->operation = algo;
+
+	offset = sst_create_algo_ipc(algo_params, &msg, block->drv_id);
+	if (offset < 0)
+		return offset;
+
+	if (copy_from_user(msg->mailbox_data + offset,
+			algo_params->params, algo_params->size)) {
+		kfree(msg);
+		return -EFAULT;
+	}
+
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	if (retval) {
+		pr_debug("%s: failed for algo ops %s with retval %d\n",
+				__func__, algo ? "SST_GET_ALGO" : "SST_SET_ALGO", retval);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ * intel_sst_ioctl_dsp - receives the device ioctl's
+ *
+ * @cmd:Ioctl cmd
+ * @arg:data
+ *
+ * This function is called when a user space component
+ * sends a DSP Ioctl to SST driver
+ */
+long intel_sst_ioctl_dsp(unsigned int cmd,
+		struct snd_ppp_params *algo_params, unsigned long arg)
+{
+	int retval = 0;
+	struct snd_ppp_params *algo_params_copied;
+	struct sst_block *block;
+	int pvt_id;
+
+	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+	block = sst_create_block(sst_drv_ctx, IPC_IA_ALG_PARAMS, pvt_id);
+	if (block == NULL)
+		return -ENOMEM;
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_SST_SET_ALGO):
+		retval = sst_send_algo(algo_params, block, SST_SET_ALGO);
+		break;
+
+	case _IOC_NR(SNDRV_SST_GET_ALGO):
+		retval = sst_send_algo(algo_params, block, SST_GET_ALGO);
+		if (retval)
+			break;
+		algo_params_copied = (struct snd_ppp_params *)block->data;
+
+		if (algo_params_copied->size > algo_params->size) {
+			pr_debug("mem insufficient to copy\n");
+			retval = -EMSGSIZE;
+			break;
+		} else {
+			char __user *tmp;
+			struct snd_ppp_params *get_params;
+			char *pp;
+
+			tmp = (char __user *)arg + offsetof(
+					struct snd_ppp_params, size);
+			if (copy_to_user(tmp, &algo_params_copied->size,
+						 sizeof(u32))) {
+				retval = -EFAULT;
+				break;
+			}
+			tmp = (char __user *)arg + offsetof(
+					struct snd_ppp_params, enable);
+			if (copy_to_user(tmp, &algo_params_copied->enable,
+						 sizeof(u8))) {
+				retval = -EFAULT;
+				break;
+			}
+			if (algo_params_copied->size == 0)
+				break;
+
+			get_params = kmalloc(sizeof(*get_params), GFP_KERNEL);
+			if (!get_params) {
+				pr_err("sst: mem alloc failed\n");
+				break;
+			}
+			memcpy(get_params, algo_params_copied,
+							sizeof(*get_params));
+
+			get_params->params = kmalloc(get_params->size, GFP_KERNEL);
+			if (!get_params->params) {
+				pr_err("sst: mem alloc failed\n");
+				goto free_mem;
+			}
+			pp = (char *)algo_params_copied;
+			pp = pp + sizeof(*get_params) -
+						sizeof(get_params->params);
+			memcpy(get_params->params, pp, get_params->size);
+			if (copy_to_user(algo_params->params,
+					get_params->params,
+					get_params->size)) {
+				retval = -EFAULT;
+			}
+			kfree(get_params->params);
+
+free_mem:
+			kfree(get_params);
+
+		}
+		break;
+	}
+	sst_free_block(sst_drv_ctx, block);
+	pr_debug("ioctl dsp return = %d, for cmd = %x\n", retval, cmd);
+	return retval;
+}
+
+static long sst_ioctl_tuning_params(unsigned int cmd, unsigned long arg)
+{
+	struct snd_sst_tuning_params params;
+	struct ipc_post *msg;
+	unsigned long address;
+
+	if (copy_from_user(&params, (void __user *)arg, sizeof(params)))
+		return -EFAULT;
+	pr_debug("sst: Parameter %d, Stream %d, Size %d\n", params.type,
+			params.str_id, params.size);
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+	address = (unsigned long)params.addr;
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
+		sst_fill_header(&msg->header, IPC_IA_TUNING_PARAMS, 1,
+				params.str_id);
+		break;
+	}
+	msg->header.part.data = sizeof(u32) + sizeof(params) + params.size;
+	memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), &params, sizeof(params));
+	/* driver doesn't need to send address, so overwrite addr with data */
+	if (copy_from_user(msg->mailbox_data + sizeof(u32)
+				+ sizeof(params) - sizeof(params.addr),
+			(void __user *)address, params.size)) {
+		kfree(msg->mailbox_data);
+		kfree(msg);
+		return -EFAULT;
+	}
+
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return 0;
+}
+/**
+ * intel_sst_ioctl - receives the device ioctl's
+ * @file_ptr:pointer to file
+ * @cmd:Ioctl cmd
+ * @arg:data
+ *
+ * This function is called by OS when a user space component
+ * sends an Ioctl to SST driver
+ */
+long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	struct snd_ppp_params algo_params;
+
+	if (sst_drv_ctx->sst_state != SST_FW_RUNNING)
+		return -EBUSY;
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
+		struct snd_sst_driver_info info;
+
+		pr_debug("SNDRV_SST_DRIVER_INFO received\n");
+		sst_get_max_streams(&info);
+
+		if (copy_to_user((void __user *)arg, &info,
+				sizeof(info)))
+			retval = -EFAULT;
+		break;
+	}
+	case _IOC_NR(SNDRV_SST_GET_ALGO):
+	case _IOC_NR(SNDRV_SST_SET_ALGO):
+		if (copy_from_user(&algo_params, (void __user *)arg,
+						sizeof(algo_params))) {
+			return -EFAULT;
+		}
+		retval = intel_sst_ioctl_dsp(cmd, &algo_params, arg);
+		break;
+
+	case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
+		retval = sst_ioctl_tuning_params(cmd, arg);
+		break;
+
+	default:
+		retval = -EINVAL;
+	}
+	pr_debug("intel_sst_ioctl:complete ret code = %d for command = %x\n", retval, cmd);
+	return retval;
+}
+
diff --git a/sound/soc/intel/sst/sst_debug.c b/sound/soc/intel/sst/sst_debug.c
new file mode 100644
index 0000000..ce771c5
--- /dev/null
+++ b/sound/soc/intel/sst/sst_debug.c
@@ -0,0 +1,1328 @@
+/*
+ *  sst_debug.c - Intel SST Driver debugfs support
+ *
+ *  Copyright (C) 2012	Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Omair Mohammed Abdullah <omair.m.abdullah@linux.intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file contains all debugfs functions
+ *  Support includes:
+ *   - Disabling/Enabling runtime PM for SST
+ *   - Reading/Writing SST SHIM registers
+ *   - Reading/Enabling Input OSC Clock
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": debugfs: " fmt
+
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+#include <asm/intel_scu_pmic.h>
+#include <asm/intel_scu_ipcutil.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define DMA_NUM_CH	8
+#define DEBUGFS_SSP_BUF_SIZE	300  /* 22 chars * 12 reg*/
+#define DEBUGFS_DMA_BUF_SIZE	2500 /* 32 chars * 78 regs*/
+
+/* Register Offsets of SSP3 and LPE DMA */
+u32 ssp_reg_off[] = {0x0, 0x4, 0x8, 0xC, 0x10, 0x28, 0x2C, 0x30, 0x34, 0x38,
+			0x3C, 0x40};
+/* Excludes the channel registers */
+u32 dma_reg_off[] = {0x2C0, 0x2C8, 0x2D0, 0x2D8, 0x2E0, 0x2E8,
+		0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320, 0x328, 0x330,
+		0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370, 0x378,
+		0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3C8, 0x3D0,
+		0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8};
+
+static ssize_t sst_debug_shim_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	unsigned long long val = 0;
+	unsigned int addr;
+	char buf[512];
+	char name[8];
+	int pos = 0;
+
+	buf[0] = 0;
+	if (drv->sst_state == SST_SUSPENDED) {
+		pr_err("FW suspended, cannot read SHIM registers\n");
+		return -EFAULT;
+	}
+
+	for (addr = SST_SHIM_BEGIN; addr <= SST_SHIM_END; addr += 8) {
+		switch (drv->pci_id) {
+		case SST_CLV_PCI_ID:
+			val = sst_shim_read(drv->shim, addr);
+			break;
+		case SST_MRFLD_PCI_ID:
+		case SST_BYT_PCI_ID:
+		case SST_CHT_PCI_ID:
+			val = sst_shim_read64(drv->shim, addr);
+			break;
+		}
+
+		name[0] = 0;
+		switch (addr) {
+		case SST_ISRX:
+			strcpy(name, "ISRX"); break;
+		case SST_ISRD:
+			strcpy(name, "ISRD"); break;
+		case SST_IPCX:
+			strcpy(name, "IPCX"); break;
+		case SST_IPCD:
+			strcpy(name, "IPCD"); break;
+		case SST_IMRX:
+			strcpy(name, "IMRX"); break;
+		case SST_IMRD:
+			strcpy(name, "IMRD"); break;
+		}
+		pos += sprintf(buf + pos, "0x%.2x: %.8llx  %s\n", addr, val, name);
+	}
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			buf, strlen(buf));
+}
+
+static ssize_t sst_debug_shim_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	char buf[32];
+	char *start = buf, *end;
+	unsigned long long value;
+	unsigned long reg_addr;
+	int ret_val;
+	size_t buf_size = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
+
+	if (drv->sst_state == SST_SUSPENDED) {
+		pr_err("FW suspended, cannot write SHIM registers\n");
+		return -EFAULT;
+	}
+
+	while (*start == ' ')
+		start++;
+	end = start;
+	while (isalnum(*end))
+		end++;
+	*end = 0;
+
+	ret_val = kstrtoul(start, 16, &reg_addr);
+	if (ret_val) {
+		pr_err("kstrtoul failed, ret_val = %d\n", ret_val);
+		return ret_val;
+	}
+	if (!(SST_SHIM_BEGIN < reg_addr && reg_addr < SST_SHIM_END)) {
+		pr_err("invalid shim address: 0x%lx\n", reg_addr);
+		return -EINVAL;
+	}
+
+	start = end + 1;
+	while (*start == ' ')
+		start++;
+
+	ret_val = kstrtoull(start, 16, &value);
+	if (ret_val) {
+		pr_err("kstrtoul failed, ret_val = %d\n", ret_val);
+		return ret_val;
+	}
+
+	pr_debug("writing shim: 0x%.2lx=0x%.8llx", reg_addr, value);
+
+	if (drv->pci_id == SST_CLV_PCI_ID)
+		sst_shim_write(drv->shim, reg_addr, (u32) value);
+	else if (drv->pci_id == SST_MRFLD_PCI_ID)
+		sst_shim_write64(drv->shim, reg_addr, (u64) value);
+
+	/* Userspace has been fiddling around behind the kernel's back */
+	add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
+	return buf_size;
+}
+
+static const struct file_operations sst_debug_shim_ops = {
+	.open = simple_open,
+	.read = sst_debug_shim_read,
+	.write = sst_debug_shim_write,
+	.llseek = default_llseek,
+};
+
+#define RESVD_DUMP_SZ		40
+#define IA_LPE_MAILBOX_DUMP_SZ	100
+#define LPE_IA_MAILBOX_DUMP_SZ	100
+#define SCU_LPE_MAILBOX_DUMP_SZ	256
+#define LPE_SCU_MAILBOX_DUMP_SZ	256
+
+static inline int is_fw_running(struct intel_sst_drv *drv)
+{
+	pm_runtime_get_sync(drv->dev);
+	atomic_inc(&drv->pm_usage_count);
+	if (drv->sst_state != SST_FW_RUNNING) {
+		pr_err("FW not running\n");
+		sst_pm_runtime_put(drv);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static inline int read_buffer_fromio(char *dest, unsigned int sz,
+				     const u32 __iomem *from,
+				     unsigned int num_dwords)
+{
+	int i;
+	const unsigned int rowsz = 16, groupsz = 4;
+	const unsigned int size = num_dwords * sizeof(u32);
+	unsigned int linelen, printed = 0, remaining = size;
+
+	u8 *tmp = kmalloc(size, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+	memcpy_fromio(tmp, from, size);
+	for (i = 0; i < size; i += rowsz) {
+		linelen = min(remaining, rowsz);
+		remaining -= rowsz;
+		hex_dump_to_buffer(tmp + i, linelen, rowsz, groupsz,
+				   dest + printed, sz - printed, false);
+		printed += linelen * 2 + linelen / groupsz - 1;
+		*(dest + printed++) = '\n';
+		*(dest + printed) = 0;
+	}
+	kfree(tmp);
+	return 0;
+}
+
+static inline int copy_sram_to_user_buffer(char __user *user_buf, size_t count, loff_t *ppos,
+					   unsigned int num_dwords, const u32 __iomem *from,
+					   u32 offset)
+{
+	ssize_t bytes_read;
+	char *buf;
+	int pos;
+	unsigned int bufsz = 48 + sizeof(u32) * num_dwords * (2 + 1) + 1;
+
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+	*buf = 0;
+	pos = scnprintf(buf, 48, "Reading %u dwords from offset %#x\n",
+			num_dwords, offset);
+	read_buffer_fromio(buf + pos, bufsz - pos, from, num_dwords);
+	bytes_read = simple_read_from_buffer(user_buf, count, ppos,
+					     buf, strlen(buf));
+	kfree(buf);
+	return bytes_read;
+}
+
+static ssize_t sst_debug_sram_lpe_debug_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, RESVD_DUMP_SZ,
+				       (u32 *)(drv->mailbox + SST_RESERVED_OFFSET),
+				       SST_RESERVED_OFFSET);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_debug_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_lpe_debug_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_lpe_checkpoint_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+	u32 offset;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+
+	offset = sst_drv_ctx->pdata->debugfs_data->checkpoint_offset;
+
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos,
+				sst_drv_ctx->pdata->debugfs_data->checkpoint_size,
+				(u32 *)(drv->mailbox + offset), offset);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_checkpoint_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_lpe_checkpoint_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_ia_lpe_mbox_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, IA_LPE_MAILBOX_DUMP_SZ,
+				       (u32 *)(drv->mailbox + SST_MAILBOX_SEND),
+				       SST_MAILBOX_SEND);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_ia_lpe_mbox_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_ia_lpe_mbox_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_lpe_ia_mbox_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, LPE_IA_MAILBOX_DUMP_SZ,
+				       (u32 *)(drv->mailbox + drv->mailbox_recv_offset),
+				       drv->mailbox_recv_offset);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_ia_mbox_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_lpe_ia_mbox_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_lpe_scu_mbox_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, LPE_SCU_MAILBOX_DUMP_SZ,
+				       (u32 *)(drv->mailbox + SST_LPE_SCU_MAILBOX),
+				       SST_LPE_SCU_MAILBOX);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_lpe_scu_mbox_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_lpe_scu_mbox_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_sram_scu_lpe_mbox_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{	struct intel_sst_drv *drv = file->private_data;
+	int ret = 0;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+	ret = copy_sram_to_user_buffer(user_buf, count, ppos, SCU_LPE_MAILBOX_DUMP_SZ,
+				       (u32 *)(drv->mailbox + SST_SCU_LPE_MAILBOX),
+				       SST_SCU_LPE_MAILBOX);
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_sram_scu_lpe_mbox_ops = {
+	.open = simple_open,
+	.read = sst_debug_sram_scu_lpe_mbox_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_lpe_log_enable_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	struct ipc_post *msg = NULL;
+	char buf[32];
+	int str_id = 0;	/* DUMMY, required by post message */
+	struct snd_sst_lpe_log_params params;
+	int ret_val = 0;
+	char *start = buf, *end;
+	int i = 0;
+	u8 *addr;
+	unsigned long tmp;
+
+	size_t buf_size = min(count, sizeof(buf)-1);
+	memset(&params, 0, sizeof(params));
+
+	ret_val = is_fw_running(drv);
+	if (ret_val)
+		return ret_val;
+
+	if (copy_from_user(buf, user_buf, buf_size)) {
+		ret_val = -EFAULT;
+		goto put_pm_runtime;
+	}
+
+	buf[buf_size] = 0;
+
+	addr = &params.dbg_type;
+	for (i = 0; i < (sizeof(params) - sizeof(u8)); i++) {
+		while (*start == ' ')
+			start++;
+		end = start;
+		while (isalnum(*end))
+			end++;
+		*end = 0;
+		ret_val = kstrtoul(start, 16, &tmp);
+		if (ret_val) {
+			pr_err("kstrtoul failed, ret_val = %d\n", ret_val);
+			goto put_pm_runtime;
+		}
+		*addr++ = (u8)tmp;
+		start = end + 1;
+	}
+
+	pr_debug("dbg_type = %d module_id = %d log_level = %d\n",
+			params.dbg_type, params.module_id, params.log_level);
+
+	if (params.dbg_type < NO_DEBUG || params.dbg_type > PTI_DEBUG) {
+		ret_val = -EINVAL;
+		goto put_pm_runtime;
+	}
+
+	ret_val = sst_create_ipc_msg(&msg, true);
+	if (ret_val != 0)
+		goto put_pm_runtime;
+
+	if (sst_drv_ctx->pci_id != SST_MRFLD_PCI_ID) {
+		sst_fill_header(&msg->header, IPC_IA_DBG_LOG_ENABLE, 1,
+							str_id);
+		msg->header.part.data = sizeof(u32) + sizeof(params);
+		memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+		memcpy(msg->mailbox_data + sizeof(u32), &params,
+							sizeof(params));
+	}
+	drv->ops->sync_post_message(msg);
+	ret_val = buf_size;
+put_pm_runtime:
+	sst_pm_runtime_put(drv);
+	return ret_val;
+}
+
+/*
+ * Circular buffer hdr -> 0x1000
+ * log data starts at 0x1010
+ */
+static ssize_t sst_debug_lpe_log_enable_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	struct lpe_log_buf_hdr buf_hdr;
+	size_t size1, size2, offset, bytes_read;
+	char *buf = NULL;
+	int ret;
+
+	ret = is_fw_running(drv);
+	if (ret)
+		return ret;
+
+	/* Get the sram lpe log buffer header */
+	memcpy_fromio(&buf_hdr, (u32 *)(drv->mailbox + SST_SCU_LPE_MAILBOX),
+							sizeof(buf_hdr));
+	if (buf_hdr.rd_addr == buf_hdr.wr_addr) {
+		pr_err("SRAM emptry\n");
+		ret = -ENODATA;
+		goto put_pm_runtime;
+	} else if (buf_hdr.rd_addr < buf_hdr.wr_addr) {
+		size1 = buf_hdr.wr_addr - buf_hdr.rd_addr;
+		offset = (buf_hdr.rd_addr - buf_hdr.base_addr)
+						+ SST_SCU_LPE_LOG_BUF;
+		pr_debug("Size = %zu, offset = %zx\n", size1, offset);
+		buf = vmalloc(size1);
+		if (buf == NULL) {
+			pr_err("Not enough memory to allocate\n");
+			ret = -ENOMEM;
+			goto put_pm_runtime;
+		}
+		memcpy_fromio(buf, (u32 *)(drv->mailbox + offset), size1);
+		bytes_read = simple_read_from_buffer(user_buf, count, ppos,
+							buf, size1);
+
+		buf_hdr.rd_addr = buf_hdr.rd_addr + bytes_read;
+
+	} else {
+		/* Read including the end address as well */
+		size1 = buf_hdr.end_addr - buf_hdr.rd_addr + 1;
+		offset = (buf_hdr.rd_addr - buf_hdr.base_addr)
+						+ SST_SCU_LPE_LOG_BUF;
+		pr_debug("Size = %zu, offset = %zx\n", size1, offset);
+		buf = vmalloc(size1);
+		if (buf == NULL) {
+			pr_err("Not enough memory to allocate\n");
+			ret = -ENOMEM;
+			goto put_pm_runtime;
+		}
+		memcpy_fromio(buf, (u32 *)(drv->mailbox + offset), size1);
+		bytes_read = simple_read_from_buffer(user_buf, count, ppos,
+							buf, size1);
+		if (bytes_read != size1) {
+			buf_hdr.rd_addr = buf_hdr.rd_addr + bytes_read;
+			goto update_rd_ptr;
+		}
+
+		/* Wrap around lpe log buffer here */
+		vfree(buf);
+		buf = NULL;
+		size2 = (buf_hdr.wr_addr - buf_hdr.base_addr);
+		offset = SST_SCU_LPE_LOG_BUF;
+		pr_debug("Size = %zu, offset = %zx\n", size2, offset);
+		buf = vmalloc(size2);
+		if (buf == NULL) {
+			pr_err("Not enough memory to allocate\n");
+			ret = -ENOMEM;
+			goto put_pm_runtime;
+		}
+		memcpy_fromio(buf, (u32 *)(drv->mailbox + offset), size2);
+		bytes_read += simple_read_from_buffer(user_buf,
+				(count - bytes_read), ppos, buf, size2);
+		buf_hdr.rd_addr = buf_hdr.base_addr + bytes_read - size1;
+
+	}
+update_rd_ptr:
+	if (bytes_read != 0) {
+		memcpy_toio((u32 *)(drv->mailbox + SST_SCU_LPE_MAILBOX +
+				2 * sizeof(u32)), &(buf_hdr.rd_addr), sizeof(u32));
+		pr_debug("read pointer restored\n");
+	}
+	vfree(buf);
+	buf = NULL;
+	ret = bytes_read;
+put_pm_runtime:
+	sst_pm_runtime_put(drv);
+	return ret;
+}
+
+static const struct file_operations sst_debug_lpe_log_enable_ops = {
+	.open = simple_open,
+	.write = sst_debug_lpe_log_enable_write,
+	.read = sst_debug_lpe_log_enable_read,
+	.llseek = default_llseek,
+};
+
+static ssize_t sst_debug_rtpm_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	char *status;
+
+	int usage = atomic_read(&drv->pm_usage_count);
+
+	pr_debug("RTPM usage: %d\n", usage);
+	status = drv->debugfs.runtime_pm_status ? "enabled\n" : "disabled\n";
+	return simple_read_from_buffer(user_buf, count, ppos,
+			status, strlen(status));
+}
+
+static ssize_t sst_debug_rtpm_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *drv = file->private_data;
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	int usage = atomic_read(&drv->pm_usage_count);
+
+	pr_debug("RTPM Usage: %d\n", usage);
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+	if (!strncmp(buf, "enable\n", sz)) {
+		/* already enabled? */
+		if (drv->debugfs.runtime_pm_status)
+			return -EINVAL;
+		drv->debugfs.runtime_pm_status = 1;
+		pm_runtime_allow(drv->dev);
+		sz = 6; /* strlen("enable") */
+	} else if (!strncmp(buf, "disable\n", sz)) {
+		if (!drv->debugfs.runtime_pm_status)
+			return -EINVAL;
+		drv->debugfs.runtime_pm_status = 0;
+		pm_runtime_forbid(drv->dev);
+		sz = 7; /* strlen("disable") */
+	} else
+		return -EINVAL;
+	return sz;
+}
+
+static const struct file_operations sst_debug_rtpm_ops = {
+	.open = simple_open,
+	.read = sst_debug_rtpm_read,
+	.write = sst_debug_rtpm_write,
+	.llseek = default_llseek,
+};
+
+
+static ssize_t sst_debug_readme_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	const char *buf =
+		"\nAll files can be read using 'cat'\n"
+		"1. 'echo disable > runtime_pm' disables runtime PM and will prevent SST from suspending.\n"
+		"To enable runtime PM, echo 'enable' to runtime_pm. Dmesg will print the runtime pm usage\n"
+		"if logs are enabled.\n"
+		"2. Write to shim register using 'echo <addr> <value> > shim_dump'.\n"
+		"Valid address range is between 0x00 to 0x80 in increments of 8.\n"
+		"3. echo 1 > fw_clear_context , This sets the flag to skip the context restore\n"
+		"4. echo 1 > fw_clear_cache , This sets the flag to clear the cached copy of firmware\n"
+		"5. echo 1 > fw_reset_state ,This sets the fw state to uninit\n"
+		"6. echo memcpy > fw_dwnld_mode, This will set the firmware download mode to memcpy\n"
+		"   echo lli > fw_dwnld_mode, This will set the firmware download mode to\n"
+					"dma lli mode\n"
+		"   echo dma > fw_dwnld_mode, This will set the firmware download mode to\n"
+					"dma single block mode\n"
+		"7. iram_dump, dram_dump, interfaces provide mmap support to\n"
+		"get the iram and dram dump, these buffers will have data only\n"
+		"after the recovery is triggered\n";
+
+	const char *ctp_buf =
+		"8. Enable input clock by 'echo enable > osc_clk0'.\n"
+		"This prevents the input OSC clock from switching off till it is disabled by\n"
+		"'echo disable > osc_clk0'. The status of the clock indicated who are using it.\n"
+		"9. lpe_log_enable usage:\n"
+		"	echo <dbg_type> <module_id> <log_level> > lpe_log_enable.\n"
+		"10. cat fw_ssp_reg,This will dump the ssp register contents\n"
+		"11. cat fw_dma_reg,This will dump the dma register contents\n";
+
+	const char *mrfld_buf =
+		"8. lpe_log_enable usage:\n"
+		"	echo <dbg_type> <module_id> <log_level> > lpe_log_enable.\n"
+		"9. cat fw_ssp_reg,This will dump the ssp register contents\n"
+		"10. cat fw_dma_reg,This will dump the dma register contents\n"
+		"11. ddr_imr_dump interface provides mmap support to get the imr dump,\n"
+		"this buffer will have data only after the recovery is triggered\n"
+		"12. ipc usage:\n"
+		"\t ipc file works only in binary mode. The ipc format is <IPC hdr><dsp hdr><payload>.\n"
+		"\t drv_id in the ipc header will be overwritten with unique driver id in the driver\n";
+
+	char *readme = NULL;
+	const char *buf2 = NULL;
+	int size, ret = 0;
+
+	switch (sst_drv_ctx->pci_id) {
+	case SST_CLV_PCI_ID:
+		size = strlen(buf) + strlen(ctp_buf) + 2;
+		buf2 = ctp_buf;
+		break;
+	case SST_MRFLD_PCI_ID:
+		size = strlen(buf) + strlen(mrfld_buf) + 2;
+		buf2 = mrfld_buf;
+		break;
+	default:
+		size = strlen(buf) + 1;
+	};
+
+	readme = kmalloc(size, GFP_KERNEL);
+	if (readme == NULL) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (buf2)
+		sprintf(readme, "%s%s\n", buf, buf2);
+	else
+		sprintf(readme, "%s\n", buf);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos,
+			readme, strlen(readme));
+	kfree(readme);
+	return ret;
+}
+
+static const struct file_operations sst_debug_readme_ops = {
+	.open = simple_open,
+	.read = sst_debug_readme_read,
+};
+
+static ssize_t sst_debug_osc_clk0_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	char status[16];
+	int mode = -1;
+#ifdef CONFIG_INTEL_SCU_IPC_UTIL
+	mode = intel_scu_ipc_set_osc_clk0(0, CLK0_QUERY);
+#endif
+
+	snprintf(status, 16, "0x%x\n", mode);
+	return simple_read_from_buffer(user_buf, count, ppos,
+			status, strlen(status));
+}
+
+static ssize_t sst_debug_osc_clk0_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+#ifdef CONFIG_INTEL_SCU_IPC_UTIL
+	if (!strncmp(buf, "enable\n", sz)) {
+		intel_scu_ipc_set_osc_clk0(true, CLK0_DEBUG);
+		sz = 6; /* strlen("enable") */
+	} else if (!strncmp(buf, "disable\n", sz)) {
+		intel_scu_ipc_set_osc_clk0(false, CLK0_DEBUG);
+		sz = 7; /* strlen("disable") */
+	} else
+		return -EINVAL;
+#endif
+	return sz;
+}
+
+static const struct file_operations sst_debug_osc_clk0_ops = {
+	.open = simple_open,
+	.read = sst_debug_osc_clk0_read,
+	.write = sst_debug_osc_clk0_write,
+};
+
+static ssize_t sst_debug_fw_clear_cntx_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *status;
+
+	status = atomic_read(&sst_drv_ctx->fw_clear_context) ? \
+			"clear fw cntx\n" : "do not clear fw cntx\n";
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			status, strlen(status));
+
+}
+
+static ssize_t sst_debug_fw_clear_cntx_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+	if (!strncmp(buf, "1\n", sz))
+		atomic_set(&sst_drv_ctx->fw_clear_context, 1);
+	else
+		atomic_set(&sst_drv_ctx->fw_clear_context, 0);
+
+	return sz;
+
+}
+
+static const struct file_operations sst_debug_fw_clear_cntx = {
+	.open = simple_open,
+	.read = sst_debug_fw_clear_cntx_read,
+	.write = sst_debug_fw_clear_cntx_write,
+};
+
+static ssize_t sst_debug_fw_clear_cache_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *status;
+
+	status = atomic_read(&sst_drv_ctx->fw_clear_cache) ? \
+			"cache clear flag set\n" : "cache clear flag not set\n";
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			status, strlen(status));
+
+}
+
+static ssize_t sst_debug_fw_clear_cache_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+	if (!strncmp(buf, "1\n", sz))
+		atomic_set(&sst_drv_ctx->fw_clear_cache, 1);
+	else
+		return -EINVAL;
+
+	return sz;
+}
+
+static const struct file_operations sst_debug_fw_clear_cache = {
+	.open = simple_open,
+	.read = sst_debug_fw_clear_cache_read,
+	.write = sst_debug_fw_clear_cache_write,
+};
+
+static ssize_t sst_debug_fw_reset_state_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char state[16];
+
+	sprintf(state, "%d\n", sst_drv_ctx->sst_state);
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			state, strlen(state));
+
+}
+
+static ssize_t sst_debug_fw_reset_state_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = 0;
+
+	if (!strncmp(buf, "1\n", sz))
+		sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
+	else
+		return -EINVAL;
+
+	return sz;
+
+}
+
+static const struct file_operations sst_debug_fw_reset_state = {
+	.open = simple_open,
+	.read = sst_debug_fw_reset_state_read,
+	.write = sst_debug_fw_reset_state_write,
+};
+
+static ssize_t sst_debug_dwnld_mode_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *state = "error\n";
+
+	if (sst_drv_ctx->use_dma == 0) {
+		state = "memcpy\n";
+	} else if (sst_drv_ctx->use_dma == 1) {
+		state = sst_drv_ctx->use_lli ? \
+				"lli\n" : "dma\n";
+
+	}
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+			state, strlen(state));
+
+}
+
+static ssize_t sst_debug_dwnld_mode_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+
+{
+	char buf[16];
+	int sz = min(count, sizeof(buf)-1);
+
+	if (sst_drv_ctx->sst_state != SST_SUSPENDED &&
+	    sst_drv_ctx->sst_state != SST_UN_INIT) {
+		pr_err("FW should be in suspended/uninit state\n");
+		return -EFAULT;
+	}
+
+	if (copy_from_user(buf, user_buf, sz))
+		return -EFAULT;
+	buf[sz] = '\0';
+
+	/* Firmware needs to be downloaded again to populate the lists */
+	atomic_set(&sst_drv_ctx->fw_clear_cache, 1);
+
+	if (!strncmp(buf, "memcpy\n", sz)) {
+		sst_drv_ctx->use_dma = 0;
+	} else if (!strncmp(buf, "lli\n", sz)) {
+		sst_drv_ctx->use_dma = 1;
+		sst_drv_ctx->use_lli = 1;
+	} else if (!strncmp(buf, "dma\n", sz)) {
+		sst_drv_ctx->use_dma = 1;
+		sst_drv_ctx->use_lli = 0;
+	}
+	return sz;
+
+}
+
+static const struct file_operations sst_debug_dwnld_mode = {
+	.open = simple_open,
+	.read = sst_debug_dwnld_mode_read,
+	.write = sst_debug_dwnld_mode_write,
+};
+
+static int dump_ssp_port(void __iomem *ssp_base, char *buf, int pos)
+{
+	int index = 0;
+
+	while (index < ARRAY_SIZE(ssp_reg_off)) {
+		pos += sprintf(buf + pos, "Reg: 0x%x: 0x%x\n", ssp_reg_off[index],
+			sst_reg_read(ssp_base, ssp_reg_off[index]));
+		index++;
+	}
+	return pos;
+}
+
+static ssize_t sst_debug_ssp_reg_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *buf;
+	int i, pos = 0, off = 0;
+	struct intel_sst_drv *drv = file->private_data;
+	int num_ssp, buf_size, ret;
+
+	num_ssp = sst_drv_ctx->pdata->debugfs_data->num_ssp;
+	buf_size = DEBUGFS_SSP_BUF_SIZE * num_ssp;
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = is_fw_running(drv);
+	if (ret)
+		goto err;
+
+	buf[0] = 0;
+
+	for (i = 0; i < num_ssp ; i++) {
+		if (!sst_drv_ctx->debugfs.ssp[i]) {
+			pr_err("ssp %d port not mapped\n", i);
+			continue;
+		}
+		off = sst_drv_ctx->pdata->debugfs_data->ssp_reg_size * i;
+		pos = dump_ssp_port((sst_drv_ctx->debugfs.ssp[i]), buf, pos);
+	}
+	sst_pm_runtime_put(drv);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+err:
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations sst_debug_ssp_reg = {
+		.open = simple_open,
+		.read = sst_debug_ssp_reg_read,
+};
+
+static int dump_dma_reg(char *buf, int pos, int dma)
+{
+	int i, index = 0;
+	int off = 0 ;
+	void __iomem *dma_reg;
+
+	if (!sst_drv_ctx->debugfs.dma_reg[dma]) {
+		pr_err("dma %d not mapped\n", dma);
+		return pos;
+	}
+
+	pos += sprintf(buf + pos, "\nDump DMA%d Reg\n\n", dma);
+
+	dma_reg = sst_drv_ctx->debugfs.dma_reg[dma];
+
+	/* Dump the DMA channel registers */
+	for (i = 0; i < DMA_NUM_CH; i++) {
+		pos += sprintf(buf + pos, "SAR%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 8;
+
+		pos += sprintf(buf + pos, "DAR%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 8;
+
+		pos += sprintf(buf + pos, "LLP%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 8;
+
+		pos += sprintf(buf + pos, "CTL%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 0x28;
+
+		pos += sprintf(buf + pos, "CFG%d: 0x%x: 0x%llx\n", i, off,
+			sst_reg_read64(dma_reg, off));
+		off += 0x18;
+	}
+
+	/* Dump the remaining DMA registers */
+	while (index < ARRAY_SIZE(dma_reg_off)) {
+		pos += sprintf(buf + pos, "Reg: 0x%x: 0x%llx\n", dma_reg_off[index],
+				sst_reg_read64(dma_reg, dma_reg_off[index]));
+		index++;
+	}
+	return pos;
+}
+
+static ssize_t sst_debug_dma_reg_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *buf;
+	int pos = 0;
+	int ret, i;
+	struct intel_sst_drv *drv = file->private_data;
+	int num_dma, buf_size;
+
+	num_dma = sst_drv_ctx->pdata->debugfs_data->num_dma;
+	buf_size = DEBUGFS_DMA_BUF_SIZE * num_dma;
+
+	buf = kmalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: no memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = is_fw_running(drv);
+	if (ret)
+		goto err;
+
+	buf[0] = 0;
+
+	for (i = 0; i < num_dma; i++)
+		pos = dump_dma_reg(buf, pos, i);
+
+	sst_pm_runtime_put(drv);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+err:
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations sst_debug_dma_reg = {
+		.open = simple_open,
+		.read = sst_debug_dma_reg_read,
+};
+
+/**
+ * sst_debug_remap - function remaps the iram/dram buff to userspace
+ *
+ * @vma: vm_area_struct passed from userspace
+ * @buf: Physical addr of the pointer to be remapped
+ * @type: type of the buffer
+ *
+ * Remaps the kernel buffer to the userspace
+ */
+static int sst_debug_remap(struct vm_area_struct *vma, char *buf,
+					enum sst_ram_type type)
+{
+	int retval, length;
+	void *mem_area;
+
+	if (!buf)
+		return -EIO;
+
+	length = vma->vm_end - vma->vm_start;
+	pr_debug("iram length 0x%x\n", length);
+
+	/* round it up to the page bondary  */
+	mem_area = (void *)PAGE_ALIGN((unsigned long)buf);
+
+	/* map the whole physically contiguous area in one piece  */
+	retval = remap_pfn_range(vma,
+			vma->vm_start,
+			virt_to_phys((void *)mem_area) >> PAGE_SHIFT,
+			length,
+			vma->vm_page_prot);
+	if (retval)
+		pr_err("mapping failed %d ", retval);
+	return retval;
+}
+
+int sst_debug_iram_dump_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int retval;
+	struct intel_sst_drv *sst = sst_drv_ctx;
+
+	retval = sst_debug_remap(vma, sst->dump_buf.iram_buf.buf, SST_IRAM);
+
+	return retval;
+}
+
+static const struct file_operations sst_debug_iram_dump = {
+	.open = simple_open,
+	.mmap = sst_debug_iram_dump_mmap,
+};
+
+int sst_debug_dram_dump_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int retval;
+	struct intel_sst_drv *sst = sst_drv_ctx;
+
+	retval = sst_debug_remap(vma, sst->dump_buf.dram_buf.buf, SST_DRAM);
+
+	return retval;
+}
+
+static const struct file_operations sst_debug_dram_dump = {
+	.open = simple_open,
+	.mmap = sst_debug_dram_dump_mmap,
+};
+
+int sst_debug_ddr_imr_dump_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int retval;
+	struct intel_sst_drv *sst = sst_drv_ctx;
+
+	retval = sst_debug_remap(vma, sst->ddr, 0);
+
+	return retval;
+}
+
+static const struct file_operations sst_debug_ddr_imr_dump = {
+	.open = simple_open,
+	.mmap = sst_debug_ddr_imr_dump_mmap,
+};
+
+static ssize_t sst_debug_ipc_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *ctx = (struct intel_sst_drv *)file->private_data;
+	unsigned char *buf;
+	struct sst_block *block = NULL;
+	struct ipc_dsp_hdr *dsp_hdr;
+	struct ipc_post *msg = NULL;
+	int ret, res_rqd, msg_id, drv_id;
+	u32 low_payload;
+
+	if (count > 1024)
+		return -EINVAL;
+
+	ret = is_fw_running(ctx);
+	if (ret)
+		return ret;
+
+	buf = kzalloc((sizeof(unsigned char) * (count)), GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto put_pm_runtime;
+	}
+	if (copy_from_user(buf, user_buf, count)) {
+		ret = -EFAULT;
+		goto free_mem;
+	}
+
+	if (sst_create_ipc_msg(&msg, true)) {
+		ret = -ENOMEM;
+		goto free_mem;
+	}
+
+	msg->mrfld_header.full = *((u64 *)buf);
+	pr_debug("ipc hdr: %llx\n", msg->mrfld_header.full);
+
+	/* Override the drv id with unique drv id */
+	drv_id = sst_assign_pvt_id(ctx);
+	msg->mrfld_header.p.header_high.part.drv_id = drv_id;
+
+	res_rqd = msg->mrfld_header.p.header_high.part.res_rqd;
+	msg_id = msg->mrfld_header.p.header_high.part.msg_id;
+	pr_debug("res_rqd: %d, msg_id: %d, drv_id: %d\n",
+					res_rqd, msg_id, drv_id);
+	if (res_rqd) {
+		block = sst_create_block(ctx, msg_id, drv_id);
+		if (block == NULL) {
+			ret = -ENOMEM;
+			kfree(msg);
+			goto free_mem;
+		}
+	}
+
+	dsp_hdr = (struct ipc_dsp_hdr *)(buf + 8);
+	pr_debug("dsp hdr: %llx\n", *((u64 *)(dsp_hdr)));
+	low_payload = msg->mrfld_header.p.header_low_payload;
+	if (low_payload > (1024 - sizeof(union ipc_header_mrfld))) {
+		pr_err("Invalid low payload length: %x\n", low_payload);
+		ret = -EINVAL;
+		kfree(msg);
+		goto free_block;
+	}
+
+	memcpy(msg->mailbox_data, (buf+(sizeof(union ipc_header_mrfld))),
+			low_payload);
+	sst_add_to_dispatch_list_and_post(ctx, msg);
+	if (res_rqd) {
+		ret = sst_wait_timeout(ctx, block);
+		if (ret) {
+			pr_err("%s: fw returned err %d\n", __func__, ret);
+			goto free_block;
+		}
+
+		if (msg_id == IPC_GET_PARAMS) {
+			unsigned char *r = block->data;
+			memcpy(ctx->debugfs.get_params_data, r, dsp_hdr->length);
+			ctx->debugfs.get_params_len = dsp_hdr->length;
+		}
+
+	}
+	ret = count;
+free_block:
+	if (res_rqd)
+		sst_free_block(sst_drv_ctx, block);
+free_mem:
+	kfree(buf);
+put_pm_runtime:
+	sst_pm_runtime_put(ctx);
+	return ret;
+}
+
+static ssize_t sst_debug_ipc_read(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct intel_sst_drv *ctx = (struct intel_sst_drv *)file->private_data;
+	return simple_read_from_buffer(user_buf, count, ppos,
+			ctx->debugfs.get_params_data,
+			ctx->debugfs.get_params_len);
+}
+
+static const struct file_operations sst_debug_ipc_ops = {
+	.open = simple_open,
+	.write = sst_debug_ipc_write,
+	.read = sst_debug_ipc_read,
+};
+
+struct sst_debug {
+	const char *name;
+	const struct file_operations *fops;
+	umode_t mode;
+};
+
+static const struct sst_debug sst_common_dbg_entries[] = {
+	{"runtime_pm", &sst_debug_rtpm_ops, 0600},
+	{"shim_dump", &sst_debug_shim_ops, 0600},
+	{"fw_clear_context", &sst_debug_fw_clear_cntx, 0600},
+	{"fw_clear_cache", &sst_debug_fw_clear_cache, 0600},
+	{"fw_reset_state", &sst_debug_fw_reset_state, 0600},
+	{"fw_dwnld_mode", &sst_debug_dwnld_mode, 0600},
+	{"iram_dump", &sst_debug_iram_dump, 0400},
+	{"dram_dump", &sst_debug_dram_dump, 0400},
+	{"sram_ia_lpe_mailbox", &sst_debug_sram_ia_lpe_mbox_ops, 0400},
+	{"sram_lpe_ia_mailbox", &sst_debug_sram_lpe_ia_mbox_ops, 0400},
+	{"README", &sst_debug_readme_ops, 0400},
+};
+
+static const struct sst_debug ctp_dbg_entries[] = {
+	{"sram_lpe_debug", &sst_debug_sram_lpe_debug_ops, 0400},
+	{"sram_lpe_checkpoint", &sst_debug_sram_lpe_checkpoint_ops, 0400},
+	{"sram_lpe_scu_mailbox", &sst_debug_sram_lpe_scu_mbox_ops, 0400},
+	{"sram_scu_lpe_mailbox", &sst_debug_sram_scu_lpe_mbox_ops, 0400},
+	{"lpe_log_enable", &sst_debug_lpe_log_enable_ops, 0400},
+	{"fw_ssp_reg", &sst_debug_ssp_reg, 0400},
+	{"fw_dma_reg", &sst_debug_dma_reg, 0400},
+	{"osc_clk0", &sst_debug_osc_clk0_ops, 0600},
+};
+
+static const struct sst_debug mrfld_dbg_entries[] = {
+	{"sram_lpe_checkpoint", &sst_debug_sram_lpe_checkpoint_ops, 0400},
+	{"fw_ssp_reg", &sst_debug_ssp_reg, 0400},
+	{"fw_dma_reg", &sst_debug_dma_reg, 0400},
+	{"ddr_imr_dump", &sst_debug_ddr_imr_dump, 0400},
+	{"ipc", &sst_debug_ipc_ops, 0400},
+};
+
+void sst_debugfs_create_files(struct intel_sst_drv *sst,
+			const struct sst_debug *entries, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++) {
+		struct dentry *dentry;
+		const struct sst_debug *entry = &entries[i];
+
+		dentry = debugfs_create_file(entry->name, entry->mode,
+				sst->debugfs.root, sst, entry->fops);
+		if (dentry == NULL) {
+			pr_err("Failed to create %s file\n", entry->name);
+			return;
+		}
+	}
+}
+
+void sst_debugfs_init(struct intel_sst_drv *sst)
+{
+	int size = 0;
+	const struct sst_debug *debug = NULL;
+
+	sst->debugfs.root = debugfs_create_dir("sst", NULL);
+	if (IS_ERR(sst->debugfs.root) || !sst->debugfs.root) {
+		pr_err("Failed to create debugfs directory\n");
+		return;
+	}
+
+	sst_debugfs_create_files(sst, sst_common_dbg_entries,
+				ARRAY_SIZE(sst_common_dbg_entries));
+
+	/* Initial status is enabled */
+	sst->debugfs.runtime_pm_status = 1;
+
+	if (sst->pci_id == SST_MRFLD_PCI_ID) {
+		debug = mrfld_dbg_entries;
+		size = ARRAY_SIZE(mrfld_dbg_entries);
+	} else if (sst->pci_id == SST_CLV_PCI_ID) {
+		debug = ctp_dbg_entries;
+		size = ARRAY_SIZE(ctp_dbg_entries);
+	}
+
+	if (debug)
+		sst_debugfs_create_files(sst, debug, size);
+
+}
+
+void sst_debugfs_exit(struct intel_sst_drv *sst)
+{
+	if (sst->debugfs.runtime_pm_status)
+		pm_runtime_allow(sst->dev);
+	debugfs_remove_recursive(sst->debugfs.root);
+}
diff --git a/sound/soc/intel/sst/sst_drv_interface.c b/sound/soc/intel/sst/sst_drv_interface.c
new file mode 100644
index 0000000..07ecd20
--- /dev/null
+++ b/sound/soc/intel/sst/sst_drv_interface.c
@@ -0,0 +1,1123 @@
+/*
+ *  sst_drv_interface.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This file defines the interface  between the platform driver and the SST
+ * driver.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/math64.h>
+#include <linux/intel_mid_pm.h>
+#include <sound/compress_offload.h>
+#include <sound/pcm.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define NUM_CODEC 2
+#define MIN_FRAGMENT 2
+#define MAX_FRAGMENT 4
+#define MIN_FRAGMENT_SIZE (50 * 1024)
+#define MAX_FRAGMENT_SIZE (1024 * 1024)
+#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz)  (((pcm_wd_sz + 15) >> 4) << 1)
+
+void sst_restore_fw_context(void)
+{
+	struct snd_sst_ctxt_params fw_context;
+	struct ipc_post *msg = NULL;
+	int retval = 0;
+	struct sst_block *block;
+
+	/* Skip the context restore, when fw_clear_context is set */
+	/* fw_clear_context set through debugfs support */
+	if (atomic_read(&sst_drv_ctx->fw_clear_context)) {
+		pr_debug("Skipping restore_fw_context\n");
+		atomic_set(&sst_drv_ctx->fw_clear_context, 0);
+		return;
+	}
+
+	pr_debug("restore_fw_context\n");
+	/*nothing to restore*/
+	if (!sst_drv_ctx->fw_cntx_size)
+		return;
+	pr_debug("restoring context......\n");
+	/*send msg to fw*/
+	retval = sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block,
+			IPC_IA_SET_FW_CTXT, 0);
+	if (retval) {
+		pr_err("Can't allocate block/msg. No restore fw_context\n");
+		return;
+	}
+
+	sst_drv_ctx->sst_state = SST_FW_CTXT_RESTORE;
+	sst_fill_header(&msg->header, IPC_IA_SET_FW_CTXT, 1, 0);
+
+	msg->header.part.data = sizeof(fw_context) + sizeof(u32);
+	fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
+	fw_context.size = sst_drv_ctx->fw_cntx_size;
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32),
+				&fw_context, sizeof(fw_context));
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	sst_free_block(sst_drv_ctx, block);
+	if (retval)
+		pr_err("sst_restore_fw_context..timeout!\n");
+	return;
+}
+
+/*
+ * sst_download_fw - download the audio firmware to DSP
+ *
+ * This function is called when the FW needs to be downloaded to SST DSP engine
+ */
+int sst_download_fw(void)
+{
+	int retval = 0;
+
+	retval = sst_load_fw();
+	if (retval)
+		return retval;
+	pr_debug("fw loaded successful!!!\n");
+
+	if (sst_drv_ctx->ops->restore_dsp_context)
+		sst_drv_ctx->ops->restore_dsp_context();
+	sst_drv_ctx->sst_state = SST_FW_RUNNING;
+	return retval;
+}
+
+int free_stream_context(unsigned int str_id)
+{
+	struct stream_info *stream;
+	int ret = 0;
+
+	stream = get_stream_info(str_id);
+	if (stream) {
+		/* str_id is valid, so stream is alloacted */
+		ret = sst_free_stream(str_id);
+		if (ret)
+			sst_clean_stream(&sst_drv_ctx->streams[str_id]);
+		return ret;
+	}
+	return ret;
+}
+
+/*
+ * sst_send_algo_param - send LPE Mixer param to SST
+ *
+ * this function sends the algo parameter to sst dsp engine
+ */
+static int sst_send_algo_param(struct snd_ppp_params *algo_params)
+{
+	u32 header_size = 0;
+	struct ipc_post *msg = NULL;
+	u32 ipc_msg_size = sizeof(u32) + sizeof(*algo_params)
+			 - sizeof(algo_params->params) + algo_params->size;
+	u32 offset = 0;
+
+	if (ipc_msg_size > SST_MAILBOX_SIZE)
+		return -ENOMEM;
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+	sst_fill_header(&msg->header,
+			IPC_IA_ALG_PARAMS, 1, algo_params->str_id);
+	msg->header.part.data = sizeof(u32) + sizeof(*algo_params)
+			 - sizeof(algo_params->params) + algo_params->size;
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	offset = sizeof(u32);
+	header_size = sizeof(*algo_params) - sizeof(algo_params->params);
+	memcpy(msg->mailbox_data + sizeof(u32), algo_params,
+		sizeof(*algo_params) - sizeof(algo_params->params));
+	offset += header_size;
+	memcpy(msg->mailbox_data + offset , algo_params->params,
+			algo_params->size);
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return 0;
+}
+
+static int sst_send_lpe_mixer_algo_params(void)
+{
+	struct snd_ppp_params algo_param;
+	struct snd_ppp_mixer_params mixer_param;
+	unsigned int input_mixer, stream_device_id;
+	int retval = 0;
+
+	retval = intel_sst_check_device();
+	if (retval) {
+		pr_err("sst_check_device failed %d\n", retval);
+		return retval;
+	}
+
+	mutex_lock(&sst_drv_ctx->mixer_ctrl_lock);
+	input_mixer = (sst_drv_ctx->device_input_mixer)
+				& SST_INPUT_STREAM_MIXED;
+	pr_debug("Input Mixer settings %d", input_mixer);
+	stream_device_id = sst_drv_ctx->device_input_mixer - input_mixer;
+	algo_param.algo_id = SST_ALGO_MIXER;
+	algo_param.str_id = stream_device_id;
+	algo_param.enable = 1;
+	algo_param.operation = SST_SET_ALGO;
+	algo_param.size = sizeof(mixer_param);
+	mixer_param.type = SST_ALGO_PARAM_MIXER_STREAM_CFG;
+	mixer_param.input_stream_bitmap = input_mixer;
+	mixer_param.size = sizeof(input_mixer);
+	algo_param.params = &mixer_param;
+	mutex_unlock(&sst_drv_ctx->mixer_ctrl_lock);
+	pr_debug("setting pp param\n");
+	pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+			algo_param.algo_id, algo_param.str_id,
+			algo_param.enable, algo_param.size);
+	sst_send_algo_param(&algo_param);
+	sst_pm_runtime_put(sst_drv_ctx);
+	return retval;
+}
+
+/*
+ * sst_get_stream_allocated - this function gets a stream allocated with
+ * the given params
+ *
+ * @str_param : stream params
+ * @lib_dnld : pointer to pointer of lib downlaod struct
+ *
+ * This creates new stream id for a stream, in case lib is to be downloaded to
+ * DSP, it downloads that
+ */
+int sst_get_stream_allocated(struct snd_sst_params *str_param,
+		struct snd_sst_lib_download **lib_dnld)
+{
+	int retval, str_id;
+	struct sst_block *block;
+	struct snd_sst_alloc_response *response;
+	struct stream_info *str_info;
+
+	pr_debug("In %s\n", __func__);
+	block = sst_create_block(sst_drv_ctx, 0, 0);
+	if (block == NULL)
+		return -ENOMEM;
+
+	retval = sst_drv_ctx->ops->alloc_stream((char *) str_param, block);
+	str_id = retval;
+	if (retval < 0) {
+		pr_err("sst_alloc_stream failed %d\n", retval);
+		goto free_block;
+	}
+	pr_debug("Stream allocated %d\n", retval);
+	str_info = get_stream_info(str_id);
+	if (str_info == NULL) {
+		pr_err("get stream info returned null\n");
+		str_id = -EINVAL;
+		goto free_block;
+	}
+
+	/* Block the call for reply */
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	if (block->data) {
+		response = (struct snd_sst_alloc_response *)block->data;
+		retval = response->str_type.result;
+		if (!retval)
+			goto free_block;
+
+		pr_err("sst: FW alloc failed retval %d\n", retval);
+		if (retval == SST_ERR_STREAM_IN_USE) {
+			pr_err("sst:FW not in clean state, send free for:%d\n",
+					str_id);
+			sst_free_stream(str_id);
+			*lib_dnld = NULL;
+		}
+		if (retval == SST_LIB_ERR_LIB_DNLD_REQUIRED) {
+			*lib_dnld = kzalloc(sizeof(**lib_dnld), GFP_KERNEL);
+			if (*lib_dnld == NULL) {
+				str_id = -ENOMEM;
+				goto free_block;
+			}
+			memcpy(*lib_dnld, &response->lib_dnld, sizeof(**lib_dnld));
+			sst_clean_stream(str_info);
+		} else {
+			*lib_dnld = NULL;
+		}
+		str_id = -retval;
+	} else if (retval != 0) {
+		pr_err("sst: FW alloc failed retval %d\n", retval);
+		/* alloc failed, so reset the state to uninit */
+		str_info->status = STREAM_UN_INIT;
+		str_id = retval;
+	}
+free_block:
+	sst_free_block(sst_drv_ctx, block);
+	return str_id; /*will ret either error (in above if) or correct str id*/
+}
+
+/*
+ * sst_get_sfreq - this function returns the frequency of the stream
+ *
+ * @str_param : stream params
+ */
+int sst_get_sfreq(struct snd_sst_params *str_param)
+{
+	switch (str_param->codec) {
+	case SST_CODEC_TYPE_PCM:
+		return str_param->sparams.uc.pcm_params.sfreq;
+	case SST_CODEC_TYPE_AAC:
+		return str_param->sparams.uc.aac_params.externalsr;
+	case SST_CODEC_TYPE_MP3:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * sst_get_sfreq - this function returns the frequency of the stream
+ *
+ * @str_param : stream params
+ */
+int sst_get_num_channel(struct snd_sst_params *str_param)
+{
+	switch (str_param->codec) {
+	case SST_CODEC_TYPE_PCM:
+		return str_param->sparams.uc.pcm_params.num_chan;
+	case SST_CODEC_TYPE_MP3:
+		return str_param->sparams.uc.mp3_params.num_chan;
+	case SST_CODEC_TYPE_AAC:
+		return str_param->sparams.uc.aac_params.num_chan;
+	default:
+		return -EINVAL;
+	}
+}
+
+/*
+ * sst_get_stream - this function prepares for stream allocation
+ *
+ * @str_param : stream param
+ */
+int sst_get_stream(struct snd_sst_params *str_param)
+{
+	int retval;
+	struct stream_info *str_info;
+	struct snd_sst_lib_download *lib_dnld;
+
+	pr_debug("In %s\n", __func__);
+	/* stream is not allocated, we are allocating */
+	retval = sst_get_stream_allocated(str_param, &lib_dnld);
+
+	if (retval == -(SST_LIB_ERR_LIB_DNLD_REQUIRED)) {
+		/* codec download is required */
+
+		pr_debug("Codec is required.... trying that\n");
+		if (lib_dnld == NULL) {
+			pr_err("lib download null!!! abort\n");
+			return -EIO;
+		}
+
+		retval = sst_load_library(lib_dnld, str_param->ops);
+		kfree(lib_dnld);
+
+		if (!retval) {
+			pr_debug("codec was downloaded successfully\n");
+
+			retval = sst_get_stream_allocated(str_param, &lib_dnld);
+			if (retval <= 0) {
+				retval = -EIO;
+				goto err;
+			}
+
+			pr_debug("Alloc done stream id %d\n", retval);
+		} else {
+			pr_debug("codec download failed\n");
+			retval = -EIO;
+			goto err;
+		}
+	} else if  (retval <= 0) {
+		retval = -EIO;
+		goto err;
+	}
+	/* store sampling freq */
+	str_info = &sst_drv_ctx->streams[retval];
+	str_info->sfreq = sst_get_sfreq(str_param);
+
+err:
+	return retval;
+}
+
+/**
+* intel_sst_check_device - checks SST device
+*
+* This utility function checks the state of SST device and downlaods FW if
+* not done, or resumes the device if suspended
+*/
+int intel_sst_check_device(void)
+{
+	int retval = 0;
+
+	pr_debug("In %s\n", __func__);
+
+	pm_runtime_get_sync(sst_drv_ctx->dev);
+	atomic_inc(&sst_drv_ctx->pm_usage_count);
+
+	pr_debug("%s: count is %d now\n", __func__,
+				atomic_read(&sst_drv_ctx->pm_usage_count));
+
+	mutex_lock(&sst_drv_ctx->sst_lock);
+	if (sst_drv_ctx->sst_state == SST_UN_INIT)
+		sst_drv_ctx->sst_state = SST_START_INIT;
+
+	if (sst_drv_ctx->sst_state == SST_START_INIT ||
+		sst_drv_ctx->sst_state == SST_FW_LIB_LOAD) {
+
+		/* FW is not downloaded */
+		pr_debug("DSP Downloading FW now...\n");
+		retval = sst_download_fw();
+		if (retval) {
+			pr_err("FW download fail %x\n", retval);
+			sst_drv_ctx->sst_state = SST_UN_INIT;
+			mutex_unlock(&sst_drv_ctx->sst_lock);
+			sst_pm_runtime_put(sst_drv_ctx);
+			return retval;
+		}
+	}
+	mutex_unlock(&sst_drv_ctx->sst_lock);
+	return retval;
+}
+
+void sst_process_mad_ops(struct work_struct *work)
+{
+
+	struct mad_ops_wq *mad_ops =
+			container_of(work, struct mad_ops_wq, wq);
+	int retval = 0;
+
+	switch (mad_ops->control_op) {
+	case SST_SND_PAUSE:
+		retval = sst_pause_stream(mad_ops->stream_id);
+		break;
+	case SST_SND_RESUME:
+		retval = sst_resume_stream(mad_ops->stream_id);
+		break;
+	default:
+		pr_err(" wrong control_ops reported\n");
+	}
+	if (retval)
+		pr_err("%s(): op: %d, retval: %d\n",
+				__func__, mad_ops->control_op, retval);
+	kfree(mad_ops);
+	return;
+}
+
+static int sst_power_control(bool state)
+{
+	pr_debug("%s for %d", __func__, state);
+
+	/* should we do ref count here, or rely on pcm handle?? */
+	if (state == true)
+		return intel_sst_check_device();
+	else
+		return sst_pm_runtime_put(sst_drv_ctx);
+}
+/*
+ * sst_open_pcm_stream - Open PCM interface
+ *
+ * @str_param: parameters of pcm stream
+ *
+ * This function is called by MID sound card driver to open
+ * a new pcm interface
+ */
+static int sst_open_pcm_stream(struct snd_sst_params *str_param)
+{
+	int retval;
+
+	if (!str_param)
+		return -EINVAL;
+
+	pr_debug("%s: doing rtpm_get\n", __func__);
+
+	retval = intel_sst_check_device();
+
+	if (retval)
+		return retval;
+	retval = sst_get_stream(str_param);
+	if (retval > 0) {
+		sst_drv_ctx->stream_cnt++;
+	} else {
+		pr_err("sst_get_stream returned err %d\n", retval);
+		sst_pm_runtime_put(sst_drv_ctx);
+	}
+
+	return retval;
+}
+
+static int sst_cdev_open(struct snd_sst_params *str_params,
+		struct sst_compress_cb *cb)
+{
+	int str_id, retval;
+	struct stream_info *stream;
+
+	pr_debug("%s: doing rtpm_get\n", __func__);
+
+	retval = intel_sst_check_device();
+	if (retval)
+		return retval;
+
+	str_id = sst_get_stream(str_params);
+	if (str_id > 0) {
+		pr_debug("stream allocated in sst_cdev_open %d\n", str_id);
+		stream = &sst_drv_ctx->streams[str_id];
+		stream->compr_cb = cb->compr_cb;
+		stream->compr_cb_param = cb->param;
+		stream->drain_notify = cb->drain_notify;
+		stream->drain_cb_param = cb->drain_cb_param;
+	} else {
+		pr_err("stream encountered error during alloc %d\n", str_id);
+		str_id = -EINVAL;
+		sst_pm_runtime_put(sst_drv_ctx);
+	}
+	return str_id;
+}
+
+static int sst_cdev_close(unsigned int str_id)
+{
+	int retval;
+	struct stream_info *stream;
+
+	pr_debug("%s: Entry\n", __func__);
+	stream = get_stream_info(str_id);
+	if (!stream) {
+		pr_err("stream info is NULL for str %d!!!\n", str_id);
+		return -EINVAL;
+	}
+
+	if (stream->status == STREAM_RESET) {
+		/* silently fail here as we have cleaned the stream */
+		pr_debug("stream in reset state...\n");
+		stream->status = STREAM_UN_INIT;
+
+		retval = 0;
+		goto put;
+	}
+
+	retval = sst_free_stream(str_id);
+put:
+	stream->compr_cb_param = NULL;
+	stream->compr_cb = NULL;
+
+	/* The free_stream will return a error if there is no stream to free,
+	(i.e. the alloc failure case). And in this case the open does a put in
+	the error scenario, so skip in this case.
+		In the close we need to handle put in the success scenario and
+	the timeout error(EBUSY) scenario. */
+	if (!retval || (retval == -EBUSY))
+		sst_pm_runtime_put(sst_drv_ctx);
+	else
+		pr_err("%s: free stream returned err %d\n", __func__, retval);
+
+	pr_debug("%s: End\n", __func__);
+	return retval;
+
+}
+
+static int sst_cdev_ack(unsigned int str_id, unsigned long bytes)
+{
+	struct stream_info *stream;
+	struct snd_sst_tstamp fw_tstamp = {0,};
+	int offset;
+	void __iomem *addr;
+
+	pr_debug("sst:  ackfor %d\n", str_id);
+	stream = get_stream_info(str_id);
+	if (!stream)
+		return -EINVAL;
+
+	/* update bytes sent */
+	stream->cumm_bytes += bytes;
+	pr_debug("bytes copied %d inc by %ld\n", stream->cumm_bytes, bytes);
+
+	memcpy_fromio(&fw_tstamp,
+		((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)
+		+(str_id * sizeof(fw_tstamp))),
+		sizeof(fw_tstamp));
+
+	fw_tstamp.bytes_copied = stream->cumm_bytes;
+	pr_debug("bytes sent to fw %llu inc by %ld\n", fw_tstamp.bytes_copied,
+							 bytes);
+
+	addr =  ((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)) +
+			(str_id * sizeof(fw_tstamp));
+	offset =  offsetof(struct snd_sst_tstamp, bytes_copied);
+	sst_shim_write(addr, offset, fw_tstamp.bytes_copied);
+	return 0;
+
+}
+
+static int sst_cdev_set_metadata(unsigned int str_id,
+				struct snd_compr_metadata *metadata)
+{
+	int retval = 0, pvt_id, len;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("set metadata for stream %d\n", str_id);
+
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+
+	if (sst_create_ipc_msg(&msg, 1))
+		return -ENOMEM;
+
+	if (!sst_drv_ctx->use_32bit_ops) {
+		pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+		pr_debug("pvt id = %d\n", pvt_id);
+		pr_debug("pipe id = %d\n", str_info->pipe_id);
+		sst_fill_header_mrfld(&msg->mrfld_header,
+			IPC_CMD, str_info->task_id, 1, pvt_id);
+
+		len = sizeof(*metadata) + sizeof(dsp_hdr);
+		msg->mrfld_header.p.header_low_payload = len;
+		sst_fill_header_dsp(&dsp_hdr, IPC_IA_SET_STREAM_PARAMS_MRFLD,
+				str_info->pipe_id, sizeof(*metadata));
+		memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+		memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+				metadata, sizeof(*metadata));
+	} else {
+		sst_fill_header(&msg->header, IPC_IA_SET_STREAM_PARAMS,
+					1, str_id);
+		msg->header.part.data = sizeof(u32) + sizeof(*metadata);
+		memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+		memcpy(msg->mailbox_data + sizeof(u32),
+				metadata, sizeof(*metadata));
+	}
+
+	sst_drv_ctx->ops->sync_post_message(msg);
+	return retval;
+}
+
+static int sst_cdev_control(unsigned int cmd, unsigned int str_id)
+{
+	pr_debug("recieved cmd %d on stream %d\n", cmd, str_id);
+
+	if (sst_drv_ctx->sst_state == SST_UN_INIT)
+		return 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		return sst_pause_stream(str_id);
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		return sst_resume_stream(str_id);
+	case SNDRV_PCM_TRIGGER_START: {
+		struct stream_info *str_info;
+		str_info = get_stream_info(str_id);
+		if (!str_info)
+			return -EINVAL;
+		str_info->prev = str_info->status;
+		str_info->status = STREAM_RUNNING;
+		return sst_start_stream(str_id);
+	}
+	case SNDRV_PCM_TRIGGER_STOP:
+		return sst_drop_stream(str_id);
+	case SND_COMPR_TRIGGER_DRAIN:
+		return sst_drain_stream(str_id, false);
+	case SND_COMPR_TRIGGER_NEXT_TRACK:
+		return sst_next_track();
+	case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
+		return sst_drain_stream(str_id, true);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int sst_cdev_tstamp(unsigned int str_id, struct snd_compr_tstamp *tstamp)
+{
+	struct snd_sst_tstamp fw_tstamp = {0,};
+	struct stream_info *stream;
+
+	memcpy_fromio(&fw_tstamp,
+		((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)
+		+(str_id * sizeof(fw_tstamp))),
+		sizeof(fw_tstamp));
+
+	stream = get_stream_info(str_id);
+	if (!stream)
+		return -EINVAL;
+	pr_debug("rb_counter %llu in bytes\n", fw_tstamp.ring_buffer_counter);
+
+	tstamp->copied_total = fw_tstamp.ring_buffer_counter;
+	tstamp->pcm_frames = fw_tstamp.frames_decoded;
+	tstamp->pcm_io_frames = div_u64(fw_tstamp.hardware_counter,
+			(u64)((stream->num_ch) * SST_GET_BYTES_PER_SAMPLE(24)));
+	tstamp->sampling_rate = fw_tstamp.sampling_frequency;
+	pr_debug("PCM  = %u\n", tstamp->pcm_io_frames);
+	pr_debug("Pointer Query on strid = %d  copied_total %d, decodec %d\n",
+		str_id, tstamp->copied_total, tstamp->pcm_frames);
+	pr_debug("rendered %d\n", tstamp->pcm_io_frames);
+	return 0;
+}
+
+static int sst_cdev_caps(struct snd_compr_caps *caps)
+{
+	caps->num_codecs = NUM_CODEC;
+	caps->min_fragment_size = MIN_FRAGMENT_SIZE;  /* 50KB */
+	caps->max_fragment_size = MAX_FRAGMENT_SIZE;  /* 1024KB */
+	caps->min_fragments = MIN_FRAGMENT;
+	caps->max_fragments = MAX_FRAGMENT;
+	caps->codecs[0] = SND_AUDIOCODEC_MP3;
+	caps->codecs[1] = SND_AUDIOCODEC_AAC;
+	return 0;
+}
+
+static int sst_cdev_codec_caps(struct snd_compr_codec_caps *codec)
+{
+
+	if (codec->codec == SND_AUDIOCODEC_MP3) {
+		codec->num_descriptors = 2;
+		codec->descriptor[0].max_ch = 2;
+		codec->descriptor[1].sample_rates[0] = SNDRV_PCM_RATE_8000;
+                codec->descriptor[1].sample_rates[1] = SNDRV_PCM_RATE_11025;
+                codec->descriptor[1].sample_rates[2] = SNDRV_PCM_RATE_16000;
+                codec->descriptor[1].sample_rates[3] = SNDRV_PCM_RATE_22050;
+                codec->descriptor[1].sample_rates[4] = SNDRV_PCM_RATE_32000;
+                codec->descriptor[1].sample_rates[5] = SNDRV_PCM_RATE_44100;
+                codec->descriptor[1].sample_rates[6] = SNDRV_PCM_RATE_48000;
+		codec->descriptor[0].bit_rate[0] = 320; /* 320kbps */
+		codec->descriptor[0].bit_rate[1] = 192;
+		codec->descriptor[0].num_bitrates = 2;
+		codec->descriptor[0].profiles = 0;
+		codec->descriptor[0].modes = SND_AUDIOCHANMODE_MP3_STEREO;
+		codec->descriptor[0].formats = 0;
+	} else if (codec->codec == SND_AUDIOCODEC_AAC) {
+		codec->num_descriptors = 2;
+		codec->descriptor[1].max_ch = 2;
+		codec->descriptor[1].sample_rates[0] = SNDRV_PCM_RATE_8000;
+                codec->descriptor[1].sample_rates[1] = SNDRV_PCM_RATE_11025;
+                codec->descriptor[1].sample_rates[2] = SNDRV_PCM_RATE_16000;
+                codec->descriptor[1].sample_rates[3] = SNDRV_PCM_RATE_22050;
+                codec->descriptor[1].sample_rates[4] = SNDRV_PCM_RATE_32000;
+                codec->descriptor[1].sample_rates[5] = SNDRV_PCM_RATE_44100;
+                codec->descriptor[1].sample_rates[6] = SNDRV_PCM_RATE_48000;
+		codec->descriptor[1].bit_rate[0] = 320; /* 320kbps */
+		codec->descriptor[1].bit_rate[1] = 192;
+		codec->descriptor[1].num_bitrates = 2;
+		codec->descriptor[1].profiles = 0;
+		codec->descriptor[1].modes = 0;
+		codec->descriptor[1].formats =
+			(SND_AUDIOSTREAMFORMAT_MP4ADTS |
+				SND_AUDIOSTREAMFORMAT_RAW);
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void sst_cdev_fragment_elapsed(int str_id)
+{
+	struct stream_info *stream;
+
+	pr_debug("fragment elapsed from firmware for str_id %d\n", str_id);
+	stream = &sst_drv_ctx->streams[str_id];
+	if (stream->compr_cb)
+		stream->compr_cb(stream->compr_cb_param);
+}
+
+/*
+ * sst_close_pcm_stream - Close PCM interface
+ *
+ * @str_id: stream id to be closed
+ *
+ * This function is called by MID sound card driver to close
+ * an existing pcm interface
+ */
+static int sst_close_pcm_stream(unsigned int str_id)
+{
+	struct stream_info *stream;
+	int retval = 0;
+
+	pr_debug("%s: Entry\n", __func__);
+	stream = get_stream_info(str_id);
+	if (!stream) {
+		pr_err("stream info is NULL for str %d!!!\n", str_id);
+		return -EINVAL;
+	}
+
+	if (stream->status == STREAM_RESET) {
+		/* silently fail here as we have cleaned the stream */
+		pr_debug("stream in reset state...\n");
+
+		retval = 0;
+		goto put;
+	}
+
+	retval = free_stream_context(str_id);
+put:
+	stream->pcm_substream = NULL;
+	stream->status = STREAM_UN_INIT;
+	stream->period_elapsed = NULL;
+	sst_drv_ctx->stream_cnt--;
+
+	/* The free_stream will return a error if there is no stream to free,
+	(i.e. the alloc failure case). And in this case the open does a put in
+	the error scenario, so skip in this case.
+		In the close we need to handle put in the success scenario and
+	the timeout error(EBUSY) scenario. */
+	if (!retval || (retval == -EBUSY))
+		sst_pm_runtime_put(sst_drv_ctx);
+	else
+		pr_err("%s: free stream returned err %d\n", __func__, retval);
+
+	pr_debug("%s: Exit\n", __func__);
+	return 0;
+}
+
+int sst_send_sync_msg(int ipc, int str_id)
+{
+	struct ipc_post *msg = NULL;
+
+	if (sst_create_ipc_msg(&msg, false))
+		return -ENOMEM;
+	sst_fill_header(&msg->header, ipc, 0, str_id);
+	return sst_drv_ctx->ops->sync_post_message(msg);
+}
+
+static inline int sst_calc_tstamp(struct pcm_stream_info *info,
+		struct snd_pcm_substream *substream,
+		struct snd_sst_tstamp *fw_tstamp)
+{
+	size_t delay_bytes, delay_frames;
+	size_t buffer_sz;
+	u32 pointer_bytes, pointer_samples;
+
+	pr_debug("mrfld ring_buffer_counter %llu in bytes\n",
+			fw_tstamp->ring_buffer_counter);
+	pr_debug("mrfld hardware_counter %llu in bytes\n",
+			 fw_tstamp->hardware_counter);
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		delay_bytes = (size_t) (fw_tstamp->ring_buffer_counter -
+					fw_tstamp->hardware_counter);
+	else
+		delay_bytes = (size_t) (fw_tstamp->hardware_counter -
+					fw_tstamp->ring_buffer_counter);
+	delay_frames = bytes_to_frames(substream->runtime, delay_bytes);
+	buffer_sz = snd_pcm_lib_buffer_bytes(substream);
+	div_u64_rem(fw_tstamp->ring_buffer_counter, buffer_sz, &pointer_bytes);
+	pointer_samples = bytes_to_samples(substream->runtime, pointer_bytes);
+
+	pr_debug("pcm delay %zu in bytes\n", delay_bytes);
+
+	info->buffer_ptr = pointer_samples / substream->runtime->channels;
+
+	info->pcm_delay = delay_frames / substream->runtime->channels;
+	pr_debug("buffer ptr %llu pcm_delay rep: %llu\n",
+			info->buffer_ptr, info->pcm_delay);
+	return 0;
+}
+
+static int sst_read_timestamp(struct pcm_stream_info *info)
+{
+	struct stream_info *stream;
+	struct snd_pcm_substream *substream;
+	struct snd_sst_tstamp fw_tstamp;
+	unsigned int str_id;
+
+	str_id = info->str_id;
+	stream = get_stream_info(str_id);
+	if (!stream)
+		return -EINVAL;
+
+	if (!stream->pcm_substream)
+		return -EINVAL;
+	substream = stream->pcm_substream;
+
+	memcpy_fromio(&fw_tstamp,
+		((void *)(sst_drv_ctx->mailbox + sst_drv_ctx->tstamp)
+			+ (str_id * sizeof(fw_tstamp))),
+		sizeof(fw_tstamp));
+	return sst_calc_tstamp(info, substream, &fw_tstamp);
+}
+
+/*
+ * sst_device_control - Set Control params
+ *
+ * @cmd: control cmd to be set
+ * @arg: command argument
+ *
+ * This function is called by MID sound card driver to set
+ * SST/Sound card controls for an opened stream.
+ * This is registered with MID driver
+ */
+static int sst_device_control(int cmd, void *arg)
+{
+	int retval = 0, str_id = 0;
+
+	if (sst_drv_ctx->sst_state == SST_UN_INIT)
+		return 0;
+
+	switch (cmd) {
+	case SST_SND_PAUSE:
+	case SST_SND_RESUME: {
+		struct mad_ops_wq *work = kzalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work)
+			return -ENOMEM;
+		INIT_WORK(&work->wq, sst_process_mad_ops);
+		work->control_op = cmd;
+		work->stream_id = *(int *)arg;
+		queue_work(sst_drv_ctx->mad_wq, &work->wq);
+		break;
+	}
+	case SST_SND_START: {
+		struct stream_info *str_info;
+		int ipc;
+		str_id = *(int *)arg;
+		str_info = get_stream_info(str_id);
+		if (!str_info)
+			return -EINVAL;
+		ipc = IPC_IA_START_STREAM;
+		str_info->prev = str_info->status;
+		str_info->status = STREAM_RUNNING;
+		sst_start_stream(str_id);
+		break;
+	}
+	case SST_SND_DROP: {
+		struct stream_info *str_info;
+		int ipc;
+		str_id = *(int *)arg;
+		str_info = get_stream_info(str_id);
+		if (!str_info)
+			return -EINVAL;
+		ipc = IPC_IA_DROP_STREAM;
+		str_info->prev = STREAM_UN_INIT;
+		str_info->status = STREAM_INIT;
+		if (sst_drv_ctx->use_32bit_ops)
+			retval = sst_send_sync_msg(ipc, str_id);
+		else
+			retval = sst_drop_stream(str_id);
+		break;
+	}
+	case SST_SND_STREAM_INIT: {
+		struct pcm_stream_info *str_info;
+		struct stream_info *stream;
+
+		pr_debug("stream init called\n");
+		str_info = (struct pcm_stream_info *)arg;
+		str_id = str_info->str_id;
+		stream = get_stream_info(str_id);
+		if (!stream) {
+			retval = -EINVAL;
+			break;
+		}
+		pr_debug("setting the period ptrs\n");
+		stream->pcm_substream = str_info->mad_substream;
+		stream->period_elapsed = str_info->period_elapsed;
+		stream->sfreq = str_info->sfreq;
+		stream->prev = stream->status;
+		stream->status = STREAM_INIT;
+		pr_debug("pcm_substream %p, period_elapsed %p, sfreq %d, status %d\n",
+				stream->pcm_substream, stream->period_elapsed, stream->sfreq, stream->status);
+		break;
+	}
+
+	case SST_SND_BUFFER_POINTER: {
+		struct pcm_stream_info *stream_info;
+
+		stream_info = (struct pcm_stream_info *)arg;
+		retval = sst_read_timestamp(stream_info);
+		pr_debug("pointer %llu, delay %llu\n",
+			stream_info->buffer_ptr, stream_info->pcm_delay);
+		break;
+	}
+	default:
+		/* Illegal case */
+		pr_warn("illegal req\n");
+		return -EINVAL;
+	}
+
+	return retval;
+}
+
+/*
+ * sst_copy_runtime_param - copy runtime params from src to dst
+ *				 structure.
+ *
+ *@dst: destination runtime structure
+ *@src: source runtime structure
+ *
+ * This helper function is called to copy the runtime parameter
+ * structure.
+*/
+static int sst_copy_runtime_param(struct snd_sst_runtime_params *dst,
+			struct snd_sst_runtime_params *src)
+{
+	dst->type = src->type;
+	dst->str_id = src->str_id;
+	dst->size = src->size;
+	if (dst->addr) {
+		pr_err("mem allocated in prev setting, use the same memory\n");
+		return -EINVAL;
+	}
+	dst->addr = kzalloc(dst->size, GFP_KERNEL);
+	if (!dst->addr)
+		return -ENOMEM;
+	memcpy(dst->addr, src->addr, dst->size);
+	return 0;
+}
+/*
+ * sst_set_generic_params - Set generic params
+ *
+ * @cmd: control cmd to be set
+ * @arg: command argument
+ *
+ * This function is called by MID sound card driver to configure
+ * SST runtime params.
+ */
+static int sst_set_generic_params(enum sst_controls cmd, void *arg)
+{
+	int ret_val = 0;
+	pr_debug("Enter:%s, cmd:%d\n", __func__, cmd);
+
+	if (NULL == arg)
+		return -EINVAL;
+
+	switch (cmd) {
+	case SST_SET_RUNTIME_PARAMS: {
+		struct snd_sst_runtime_params *src;
+		struct snd_sst_runtime_params *dst;
+
+		src = (struct snd_sst_runtime_params *)arg;
+		dst = &(sst_drv_ctx->runtime_param.param);
+		ret_val = sst_copy_runtime_param(dst, src);
+		break;
+		}
+	case SST_SET_ALGO_PARAMS: {
+		unsigned int device_input_mixer = *((unsigned int *)arg);
+		pr_debug("LPE mixer algo param set %x\n", device_input_mixer);
+		mutex_lock(&sst_drv_ctx->mixer_ctrl_lock);
+		sst_drv_ctx->device_input_mixer = device_input_mixer;
+		mutex_unlock(&sst_drv_ctx->mixer_ctrl_lock);
+		ret_val = sst_send_lpe_mixer_algo_params();
+		break;
+	}
+	case SST_SET_BYTE_STREAM: {
+		ret_val = intel_sst_check_device();
+		if (ret_val)
+			return ret_val;
+
+		ret_val = sst_send_byte_stream_mrfld(arg);
+		sst_pm_runtime_put(sst_drv_ctx);
+		break;
+	}
+	case SST_GET_PROBE_BYTE_STREAM: {
+		struct snd_sst_probe_bytes *prb_bytes = (struct snd_sst_probe_bytes *)arg;
+
+		if (sst_drv_ctx->probe_bytes) {
+			prb_bytes->len = sst_drv_ctx->probe_bytes->len;
+			memcpy(prb_bytes->bytes, &sst_drv_ctx->probe_bytes->bytes, prb_bytes->len);
+		}
+		break;
+	}
+	case SST_SET_PROBE_BYTE_STREAM: {
+		struct snd_sst_probe_bytes *prb_bytes = (struct snd_sst_probe_bytes *)arg;
+
+		if (sst_drv_ctx->probe_bytes) {
+			sst_drv_ctx->probe_bytes->len = prb_bytes->len;
+			memcpy(&sst_drv_ctx->probe_bytes->bytes, prb_bytes->bytes, prb_bytes->len);
+		}
+
+		ret_val = intel_sst_check_device();
+		if (ret_val)
+			return ret_val;
+
+		ret_val = sst_send_probe_bytes(sst_drv_ctx);
+		break;
+	}
+	case SST_SET_VTSV_INFO: {
+		ret_val = intel_sst_check_device();
+		if (ret_val)
+			return ret_val;
+
+		ret_val = sst_send_vtsv_data_to_fw(sst_drv_ctx);
+		if (ret_val)
+			pr_err("vtsv data send failed\n");
+		sst_pm_runtime_put(sst_drv_ctx);
+		break;
+	}
+	default:
+		pr_err("Invalid cmd request:%d\n", cmd);
+		ret_val = -EINVAL;
+	}
+	return ret_val;
+}
+
+static struct sst_ops pcm_ops = {
+	.open = sst_open_pcm_stream,
+	.device_control = sst_device_control,
+	.set_generic_params = sst_set_generic_params,
+	.close = sst_close_pcm_stream,
+	.power = sst_power_control,
+};
+
+static struct compress_sst_ops compr_ops = {
+	.open = sst_cdev_open,
+	.close = sst_cdev_close,
+	.control = sst_cdev_control,
+	.tstamp = sst_cdev_tstamp,
+	.ack = sst_cdev_ack,
+	.get_caps = sst_cdev_caps,
+	.get_codec_caps = sst_cdev_codec_caps,
+	.set_metadata = sst_cdev_set_metadata,
+};
+
+
+static struct sst_device sst_dsp_device = {
+	.name = "Intel(R) SST LPE",
+	.dev = NULL,
+	.ops = &pcm_ops,
+	.compr_ops = &compr_ops,
+};
+
+/*
+ * register_sst - function to register DSP
+ *
+ * This functions registers DSP with the platform driver
+ */
+int register_sst(struct device *dev)
+{
+	int ret_val;
+	sst_dsp_device.dev = dev;
+	ret_val = sst_register_dsp(&sst_dsp_device);
+	if (ret_val)
+		pr_err("Unable to register DSP with platform driver\n");
+
+	return ret_val;
+}
+
+int unregister_sst(struct device *dev)
+{
+	return sst_unregister_dsp(&sst_dsp_device);
+}
diff --git a/sound/soc/intel/sst/sst_dsp.c b/sound/soc/intel/sst/sst_dsp.c
new file mode 100644
index 0000000..29950f9
--- /dev/null
+++ b/sound/soc/intel/sst/sst_dsp.c
@@ -0,0 +1,1992 @@
+/*
+ *  sst_dsp.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10	Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file contains all dsp controlling functions like firmware download,
+ * setting/resetting dsp cores, etc
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/dmaengine.h>
+#include <linux/intel_mid_dma.h>
+#include <linux/pm_qos.h>
+#include <linux/intel_mid_pm.h>
+#include <linux/elf.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+#include "sst_trace.h"
+
+#ifndef CONFIG_X86_64
+#define MEMCPY_TOIO memcpy_toio
+#else
+#define MEMCPY_TOIO memcpy32_toio
+#endif
+
+/**
+ * Add here all the static librairies to be downloaded at bootup
+ */
+static struct sst_module_info sst_modules_mrfld[] = {};
+
+static struct sst_module_info sst_modules_byt[] = {
+	{"mp3_dec", SST_CODEC_TYPE_MP3, 0, SST_LIB_NOT_FOUND},
+	{"aac_dec", SST_CODEC_TYPE_AAC, 0, SST_LIB_NOT_FOUND},
+};
+
+/**
+ * memcpy32_toio: Copy using writel commands
+ *
+ * This is needed because the hardware does not support
+ * 64-bit moveq insructions while writing to PCI MMIO
+ */
+void memcpy32_toio(void *dst, const void *src, int count)
+{
+	int i;
+	const u32 *src_32 = src;
+	u32 *dst_32 = dst;
+
+	for (i = 0; i < count/sizeof(u32); i++)
+		writel(*src_32++, dst_32++);
+}
+
+/**
+ * intel_sst_reset_dsp_medfield - Resetting SST DSP
+ *
+ * This resets DSP in case of Medfield platfroms
+ */
+int intel_sst_reset_dsp_mfld(void)
+{
+	union config_status_reg csr;
+
+	pr_debug("Resetting the DSP in medfield\n");
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.full |= 0x382;
+	csr.part.run_stall = 0x1;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+
+	return 0;
+}
+
+/**
+ * sst_start_medfield - Start the SST DSP processor
+ *
+ * This starts the DSP in MRST platfroms
+ */
+int sst_start_mfld(void)
+{
+	union config_status_reg csr;
+
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.bypass = 0;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.mfld_strb = 1;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.run_stall = 0;
+	csr.part.sst_reset = 0;
+	pr_debug("Starting the DSP_medfld %x\n", csr.full);
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	pr_debug("Starting the DSP_medfld\n");
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+
+	return 0;
+}
+/**
+ * intel_sst_reset_dsp_mrfld - Resetting SST DSP
+ *
+ * This resets DSP in case of MRFLD platfroms
+ */
+int intel_sst_reset_dsp_mrfld(void)
+{
+	union config_status_reg_mrfld csr;
+
+	pr_debug("sst: Resetting the DSP in mrfld\n");
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+
+	pr_debug("value:0x%llx\n", csr.full);
+
+	csr.full |= 0x7;
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+
+	pr_debug("value:0x%llx\n", csr.full);
+
+	csr.full &= ~(0x1);
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	pr_debug("value:0x%llx\n", csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+	return 0;
+}
+
+/**
+ * sst_start_merrifield - Start the SST DSP processor
+ *
+ * This starts the DSP in MERRIFIELD platfroms
+ */
+int sst_start_mrfld(void)
+{
+	union config_status_reg_mrfld csr;
+
+	pr_debug("sst: Starting the DSP in mrfld LALALALA\n");
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	pr_debug("value:0x%llx\n", csr.full);
+
+	csr.full |= 0x7;
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	pr_debug("value:0x%llx\n", csr.full);
+
+	csr.part.xt_snoop = 1;
+	csr.full &= ~(0x5);
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	pr_debug("sst: Starting the DSP_merrifield:%llx\n", csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+	return 0;
+}
+
+/**
+ * intel_sst_set_bypass - Sets/clears the bypass bits
+ *
+ * This sets/clears the bypass bits
+ */
+void intel_sst_set_bypass_mfld(bool set)
+{
+	union config_status_reg csr;
+
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	if (set == true)
+		csr.full |= 0x380;
+	else
+		csr.part.bypass = 0;
+	pr_debug("SetupByPass set %d Val 0x%x\n", set, csr.full);
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+
+}
+#define SST_CALC_DMA_DSTN(lpe_viewpt_rqd, ia_viewpt_addr, elf_paddr, \
+			lpe_viewpt_addr) ((lpe_viewpt_rqd) ? \
+		elf_paddr : (ia_viewpt_addr + elf_paddr - lpe_viewpt_addr))
+
+static int sst_fill_dstn(struct intel_sst_drv *sst, struct sst_info info,
+			Elf32_Phdr *pr, void **dstn, unsigned int *dstn_phys, int *mem_type)
+{
+#ifdef MRFLD_WORD_WA
+	/* work arnd-since only 4 byte align copying is only allowed for ICCM */
+	if ((pr->p_paddr >= info.iram_start) && (pr->p_paddr < info.iram_end)) {
+		size_t data_size = pr->p_filesz % SST_ICCM_BOUNDARY;
+
+		if (data_size)
+			pr->p_filesz += 4 - data_size;
+		*dstn = sst->iram + (pr->p_paddr - info.iram_start);
+		*dstn_phys = SST_CALC_DMA_DSTN(info.lpe_viewpt_rqd,
+				sst->iram_base, pr->p_paddr, info.iram_start);
+		*mem_type = 1;
+	}
+#else
+	if ((pr->p_paddr >= info.iram_start) &&
+	    (pr->p_paddr < info.iram_end)) {
+
+		*dstn = sst->iram + (pr->p_paddr - info.iram_start);
+		*dstn_phys = SST_CALC_DMA_DSTN(info.lpe_viewpt_rqd,
+				sst->iram_base, pr->p_paddr, info.iram_start);
+		*mem_type = 1;
+	}
+#endif
+	else if ((pr->p_paddr >= info.dram_start) &&
+		 (pr->p_paddr < info.dram_end)) {
+
+		*dstn = sst->dram + (pr->p_paddr - info.dram_start);
+		*dstn_phys = SST_CALC_DMA_DSTN(info.lpe_viewpt_rqd,
+				sst->dram_base, pr->p_paddr, info.dram_start);
+		*mem_type = 1;
+	} else if ((pr->p_paddr >= info.imr_start) &&
+		   (pr->p_paddr < info.imr_end)) {
+
+		*dstn = sst->ddr + (pr->p_paddr - info.imr_start);
+		*dstn_phys =  sst->ddr_base + pr->p_paddr - info.imr_start;
+		*mem_type = 0;
+	} else {
+	       return -EINVAL;
+	}
+	return 0;
+}
+
+static void sst_fill_info(struct intel_sst_drv *sst,
+			struct sst_info *info)
+{
+	/* first we setup addresses to be used for elf sections */
+	if (sst->info.iram_use) {
+		info->iram_start = sst->info.iram_start;
+		info->iram_end = sst->info.iram_end;
+	} else {
+		info->iram_start = sst->iram_base;
+		info->iram_end = sst->iram_end;
+	}
+	if (sst->info.dram_use) {
+		info->dram_start = sst->info.dram_start;
+		info->dram_end = sst->info.dram_end;
+	} else {
+		info->dram_start = sst->dram_base;
+		info->dram_end = sst->dram_end;
+	}
+	if (sst->info.imr_use) {
+		info->imr_start = sst->info.imr_start;
+		info->imr_end = sst->info.imr_end;
+	} else {
+		info->imr_start = relocate_imr_addr_mrfld(sst->ddr_base);
+		info->imr_end = relocate_imr_addr_mrfld(sst->ddr_end);
+	}
+
+	info->lpe_viewpt_rqd = sst->info.lpe_viewpt_rqd;
+	info->dma_max_len = sst->info.dma_max_len;
+	pr_debug("%s: dma_max_len 0x%x", __func__, info->dma_max_len);
+}
+
+static inline int sst_validate_elf(const struct firmware *sst_bin, bool dynamic)
+{
+	Elf32_Ehdr *elf;
+
+	BUG_ON(!sst_bin);
+
+	pr_debug("IN %s\n", __func__);
+
+	elf = (Elf32_Ehdr *)sst_bin->data;
+
+	if ((elf->e_ident[0] != 0x7F) || (elf->e_ident[1] != 'E') ||
+	    (elf->e_ident[2] != 'L') || (elf->e_ident[3] != 'F')) {
+		pr_debug("ELF Header Not found!%zu\n", sst_bin->size);
+		return -EINVAL;
+	}
+
+	if (dynamic == true) {
+		if (elf->e_type != ET_DYN) {
+			pr_err("Not a dynamic loadable library\n");
+			return -EINVAL;
+		}
+	}
+	pr_debug("Valid ELF Header...%zu\n", sst_bin->size);
+	return 0;
+}
+
+/**
+ * sst_validate_fw_image - validates the firmware signature
+ *
+ * @sst_fw_in_mem	: pointer to audio FW
+ * @size		: size of the firmware
+ * @module		: points to the FW modules
+ * @num_modules		: points to the num of modules
+ * This function validates the header signature in the FW image
+ */
+static int sst_validate_fw_image(const void *sst_fw_in_mem, unsigned long size,
+		struct fw_module_header **module, u32 *num_modules)
+{
+	struct fw_header *header;
+
+	pr_debug("%s\n", __func__);
+
+	/* Read the header information from the data pointer */
+	header = (struct fw_header *)sst_fw_in_mem;
+	pr_debug("header sign=%s size=%x modules=%x fmt=%x size=%zx\n",
+			header->signature, header->file_size, header->modules,
+			header->file_format, sizeof(*header));
+
+	/* verify FW */
+	if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
+		(size != header->file_size + sizeof(*header))) {
+		/* Invalid FW signature */
+		pr_err("InvalidFW sign/filesize mismatch\n");
+		return -EINVAL;
+	}
+	*num_modules = header->modules;
+	*module = (void *)sst_fw_in_mem + sizeof(*header);
+
+	return 0;
+}
+
+/**
+ * sst_validate_library - validates the library signature
+ *
+ * @fw_lib			: pointer to FW library
+ * @slot			: pointer to the lib slot info
+ * @entry_point		: out param, which contains the module entry point
+ * This function is called before downloading the codec/postprocessing
+ * library
+ */
+static int sst_validate_library(const struct firmware *fw_lib,
+		struct lib_slot_info *slot,
+		u32 *entry_point)
+{
+	struct fw_header *header;
+	struct fw_module_header *module;
+	struct fw_block_info *block;
+	unsigned int n_blk, isize = 0, dsize = 0;
+	int err = 0;
+
+	header = (struct fw_header *)fw_lib->data;
+	if (header->modules != 1) {
+		pr_err("Module no mismatch found\n");
+		err = -EINVAL;
+		goto exit;
+	}
+	module = (void *)fw_lib->data + sizeof(*header);
+	*entry_point = module->entry_point;
+	pr_debug("Module entry point 0x%x\n", *entry_point);
+	pr_debug("Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
+			module->signature, module->mod_size,
+			module->blocks, module->type);
+
+	block = (void *)module + sizeof(*module);
+	for (n_blk = 0; n_blk < module->blocks; n_blk++) {
+		switch (block->type) {
+		case SST_IRAM:
+			isize += block->size;
+			break;
+		case SST_DRAM:
+			dsize += block->size;
+			break;
+		default:
+			pr_err("Invalid block type for 0x%x\n", n_blk);
+			err = -EINVAL;
+			goto exit;
+		}
+		block = (void *)block + sizeof(*block) + block->size;
+	}
+	if (isize > slot->iram_size || dsize > slot->dram_size) {
+		pr_err("library exceeds size allocated\n");
+		err = -EINVAL;
+		goto exit;
+	} else
+		pr_debug("Library is safe for download...\n");
+
+	pr_debug("iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
+			isize, dsize, slot->iram_size, slot->dram_size);
+exit:
+	return err;
+
+}
+
+static bool chan_filter(struct dma_chan *chan, void *param)
+{
+	struct sst_dma *dma = (struct sst_dma *)param;
+
+	/* we only need MID_DMAC1 as that can access DSP RAMs*/
+	if (chan->device->dev == dma->dev)
+		return true;
+
+	return false;
+}
+
+static unsigned int
+sst_get_elf_sg_len(struct intel_sst_drv *sst, Elf32_Ehdr *elf, Elf32_Phdr *pr,
+		struct sst_info info)
+{
+	unsigned int i = 0, count = 0;
+
+	pr_debug("in %s: dma_max_len 0x%x\n", __func__, info.dma_max_len);
+
+	while (i < elf->e_phnum) {
+		if (pr[i].p_type == PT_LOAD) {
+
+			if ((pr[i].p_paddr >= info.iram_start) &&
+					(pr[i].p_paddr < info.iram_end &&
+						pr[i].p_filesz)) {
+				count += (pr[i].p_filesz) / info.dma_max_len;
+
+				if ((pr[i].p_filesz) % info.dma_max_len)
+					count++;
+
+			} else if ((pr[i].p_paddr >= info.dram_start) &&
+					(pr[i].p_paddr < info.dram_end &&
+						pr[i].p_filesz)) {
+				count += (pr[i].p_filesz) / info.dma_max_len;
+
+				if ((pr[i].p_filesz) % info.dma_max_len)
+					count++;
+
+			} else if ((pr[i].p_paddr >= info.imr_start) &&
+					(pr[i].p_paddr < info.imr_end &&
+						pr[i].p_filesz)) {
+				count += (pr[i].p_filesz) / info.dma_max_len;
+
+				if ((pr[i].p_filesz) % info.dma_max_len)
+					count++;
+			}
+		}
+		i++;
+	}
+
+	pr_debug("gotcha count %d\n", count);
+	return count;
+}
+
+static int
+sst_init_dma_sg_list(struct intel_sst_drv *sst, unsigned int len,
+		struct scatterlist **src, struct scatterlist **dstn)
+{
+	struct scatterlist *sg_src = NULL, *sg_dst = NULL;
+
+	sg_src = kzalloc(sizeof(*sg_src)*(len), GFP_KERNEL);
+	if (NULL == sg_src)
+		return -ENOMEM;
+	sg_init_table(sg_src, len);
+	sg_dst = kzalloc(sizeof(*sg_dst)*(len), GFP_KERNEL);
+	if (NULL == sg_dst) {
+		kfree(sg_src);
+		return -ENOMEM;
+	}
+	sg_init_table(sg_dst, len);
+	*src = sg_src;
+	*dstn = sg_dst;
+
+	return 0;
+}
+
+static int sst_alloc_dma_chan(struct sst_dma *dma)
+{
+	dma_cap_mask_t mask;
+	struct intel_mid_dma_slave *slave = &dma->slave;
+	int retval;
+	struct pci_dev *dmac = NULL;
+	const char *hid;
+
+	pr_debug("%s\n", __func__);
+	dma->dev = NULL;
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	if (sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+		dmac = pci_get_device(PCI_VENDOR_ID_INTEL,
+				      PCI_DMAC_CLV_ID, NULL);
+	else if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID)
+		dmac = pci_get_device(PCI_VENDOR_ID_INTEL,
+				      PCI_DMAC_MRFLD_ID, NULL);
+	else if (sst_drv_ctx->pci_id == SST_BYT_PCI_ID) {
+		hid = sst_drv_ctx->hid;
+		if (!strncmp(hid, "LPE0F281", 8))
+			dma->dev = intel_mid_get_acpi_dma("DMA0F28");
+		else if (!strncmp(hid, "80860F28", 8))
+			dma->dev = intel_mid_get_acpi_dma("ADMA0F28");
+		else if (!strncmp(hid, "808622A8", 8))
+			dma->dev = intel_mid_get_acpi_dma("ADMA22A8");
+		else if (!strncmp(hid, "LPE0F28", 7))
+			dma->dev = intel_mid_get_acpi_dma("DMA0F28");
+	}
+
+	if (!dmac && !dma->dev) {
+		pr_err("Can't find DMAC\n");
+		return -ENODEV;
+	}
+	if (dmac)
+		dma->dev = &dmac->dev;
+
+	dma->ch = dma_request_channel(mask, chan_filter, dma);
+	if (!dma->ch) {
+		pr_err("unable to request dma channel\n");
+		return -EIO;
+	}
+
+	slave->dma_slave.direction = DMA_MEM_TO_MEM;
+	slave->hs_mode = 0;
+	slave->cfg_mode = LNW_DMA_MEM_TO_MEM;
+	slave->dma_slave.src_addr_width = slave->dma_slave.dst_addr_width =
+						DMA_SLAVE_BUSWIDTH_4_BYTES;
+	slave->dma_slave.src_maxburst = slave->dma_slave.dst_maxburst =
+							LNW_DMA_MSIZE_16;
+
+	retval = dmaengine_slave_config(dma->ch, &slave->dma_slave);
+	if (retval) {
+		pr_err("unable to set slave config, err %d\n", retval);
+		dma_release_channel(dma->ch);
+		return -EIO;
+	}
+	return retval;
+}
+
+static void sst_dma_transfer_complete(void *arg)
+{
+	sst_drv_ctx  = (struct intel_sst_drv *)arg;
+	pr_debug(" sst_dma_transfer_complete\n");
+	sst_wake_up_block(sst_drv_ctx, 0, FW_DWNL_ID, FW_DWNL_ID, NULL, 0);
+}
+
+static inline int sst_dma_wait_for_completion(struct intel_sst_drv *sst)
+{
+	int ret = 0;
+	struct sst_block *block;
+	/* call prep and wait */
+	sst->desc->callback = sst_dma_transfer_complete;
+	sst->desc->callback_param = sst;
+
+	block = sst_create_block(sst, FW_DWNL_ID, FW_DWNL_ID);
+	if (block == NULL)
+		return -ENOMEM;
+
+	sst->desc->tx_submit(sst_drv_ctx->desc);
+	ret = sst_wait_timeout(sst, block);
+	if (ret)
+		dma_wait_for_async_tx(sst_drv_ctx->desc);
+	sst_free_block(sst, block);
+	return ret;
+}
+
+static int sst_dma_firmware(struct sst_dma *dma, struct sst_sg_list *sg_list)
+{
+	int retval = 0;
+	enum dma_ctrl_flags flag = DMA_CTRL_ACK;
+	struct scatterlist *sg_src_list, *sg_dst_list;
+	int length;
+	pr_debug("%s: use_lli %d\n", __func__, sst_drv_ctx->use_lli);
+
+	sg_src_list = sg_list->src;
+	sg_dst_list = sg_list->dst;
+	length = sg_list->list_len;
+
+	/* BY default PIMR is unsmasked
+	 * FW gets unmaksed dma intr too, so mask it for FW to execute on mrfld
+	 */
+	if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID ||
+	    sst_drv_ctx->pci_id == SST_BYT_PCI_ID)
+		sst_shim_write(sst_drv_ctx->shim, SST_PIMR, 0xFFFF0034);
+
+	if (sst_drv_ctx->use_lli) {
+		sst_drv_ctx->desc = dma->ch->device->device_prep_dma_sg(dma->ch,
+					sg_dst_list, length,
+					sg_src_list, length, flag);
+		if (!sst_drv_ctx->desc)
+			return -EFAULT;
+		retval = sst_dma_wait_for_completion(sst_drv_ctx);
+		if (retval)
+			pr_err("sst_dma_firmware..timeout!\n");
+	} else {
+		struct scatterlist *sg;
+		dma_addr_t src_addr, dstn_addr;
+		int i = 0;
+
+		/* dma single block mode */
+		for_each_sg(sg_src_list, sg, length, i) {
+			pr_debug("dma desc %d, length %d\n", i, sg->length);
+			src_addr = sg_phys(sg);
+			dstn_addr = sg_phys(sg_dst_list);
+			if (sg_dst_list)
+				sg_dst_list = sg_next(sg_dst_list);
+			sst_drv_ctx->desc = dma->ch->device->device_prep_dma_memcpy(
+					dma->ch, dstn_addr, src_addr, sg->length, flag);
+			if (!sst_drv_ctx->desc)
+				return -EFAULT;
+			retval = sst_dma_wait_for_completion(sst_drv_ctx);
+			if (retval)
+				pr_err("sst_dma_firmware..timeout!\n");
+
+		}
+	}
+
+	return retval;
+}
+
+/*
+ * sst_fill_sglist - Fill the sg list
+ *
+ * @from: src address of the fw
+ * @to: virtual address of IRAM/DRAM
+ * @block_size: size of the block
+ * @sg_src: source scatterlist pointer
+ * @sg_dst: Destination scatterlist pointer
+ * @fw_sg_list: Pointer to the sg_list
+ * @dma_max_len: maximum len of the DMA block
+ *
+ * Parses modules that need to be placed in SST IRAM and DRAM
+ * and stores them in a sg list for transfer
+ * returns error or 0 if list creation fails or pass.
+  */
+static int sst_fill_sglist(unsigned long from, unsigned long to,
+		u32 block_size, struct scatterlist **sg_src, struct scatterlist **sg_dstn,
+		struct sst_sg_list *fw_sg_list, u32 dma_max_len)
+{
+	u32 offset = 0;
+	int len = 0;
+	unsigned long dstn, src;
+
+	pr_debug("%s entry", __func__);
+	if (!sg_src || !sg_dstn)
+		return -EINVAL;
+
+	do {
+		dstn = (unsigned long) (to + offset);
+		src = (unsigned long) (from + offset);
+
+		/* split blocks to dma_max_len */
+
+		len = block_size - offset;
+		pr_debug("DMA blk src %lx,dstn %lx,len %d,offset %d, size %d\n",
+			src, dstn, len, offset, block_size);
+		if (len > dma_max_len) {
+			pr_debug("block size exceeds %d\n", dma_max_len);
+			len = dma_max_len;
+			offset += len;
+		} else {
+			pr_debug("Node length less that %d\n", dma_max_len);
+			offset = 0;
+		}
+
+		if (!(*sg_src) || !(*sg_dstn))
+			return -ENOMEM;
+
+		sg_set_page(*sg_src, virt_to_page((void *) src), len,
+				offset_in_page((void *) src));
+		sg_set_page(*sg_dstn, virt_to_page((void *) dstn), len,
+				offset_in_page((void *) dstn));
+
+		*sg_src = sg_next(*sg_src);
+		*sg_dstn = sg_next(*sg_dstn);
+
+		/* TODO: is sg_idx required? */
+		if (sst_drv_ctx->info.use_elf == true)
+			fw_sg_list->sg_idx++;
+	} while (offset > 0);
+
+	return 0;
+}
+
+static int sst_parse_elf_module_dma(struct intel_sst_drv *sst, const void *fw,
+		 struct sst_info info, Elf32_Phdr *pr,
+		 struct scatterlist **sg_src, struct scatterlist **sg_dstn,
+		 struct sst_sg_list *fw_sg_list)
+{
+	unsigned long dstn, src;
+	unsigned int dstn_phys;
+	int ret_val = 0;
+	int mem_type;
+
+	ret_val = sst_fill_dstn(sst, info, pr, (void *)&dstn, &dstn_phys, &mem_type);
+	if (ret_val)
+		return ret_val;
+
+	dstn = (unsigned long) phys_to_virt(dstn_phys);
+	src = (unsigned long) (fw + pr->p_offset);
+
+	ret_val = sst_fill_sglist(src, dstn, pr->p_filesz,
+				sg_src, sg_dstn, fw_sg_list, sst->info.dma_max_len);
+
+	return ret_val;
+}
+
+static int
+sst_parse_elf_fw_dma(struct intel_sst_drv *sst, const void *fw_in_mem,
+			struct sst_sg_list *fw_sg_list)
+{
+	int i = 0, ret = 0;
+	Elf32_Ehdr *elf;
+	Elf32_Phdr *pr;
+	struct sst_info info;
+	struct scatterlist *sg_src = NULL, *sg_dst = NULL;
+	unsigned int sg_len;
+
+	BUG_ON(!fw_in_mem);
+
+	elf = (Elf32_Ehdr *)fw_in_mem;
+	pr = (Elf32_Phdr *) (fw_in_mem + elf->e_phoff);
+	pr_debug("%s entry\n", __func__);
+
+	sst_fill_info(sst, &info);
+
+	sg_len = sst_get_elf_sg_len(sst, elf, pr, info);
+	if (sg_len == 0) {
+		pr_err("we got NULL sz ELF, abort\n");
+		return -EIO;
+	}
+
+	if (sst_init_dma_sg_list(sst, sg_len, &sg_src, &sg_dst)) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	fw_sg_list->src = sg_src;
+	fw_sg_list->dst = sg_dst;
+	fw_sg_list->list_len = sg_len;
+	fw_sg_list->sg_idx = 0;
+
+	while (i < elf->e_phnum) {
+		if ((pr[i].p_type == PT_LOAD) && (pr[i].p_filesz)) {
+			ret = sst_parse_elf_module_dma(sst, fw_in_mem, info,
+					&pr[i], &sg_src, &sg_dst, fw_sg_list);
+			if (ret)
+				goto err;
+		}
+		i++;
+	}
+	return 0;
+err:
+	kfree(fw_sg_list->src);
+	kfree(fw_sg_list->dst);
+err1:
+	fw_sg_list->src = NULL;
+	fw_sg_list->dst = NULL;
+	fw_sg_list->list_len = 0;
+	fw_sg_list->sg_idx = 0;
+
+	return ret;
+}
+
+/**
+ * sst_parse_module_dma - Parse audio FW modules and populate the dma list
+ *
+ * @sst_ctx	: sst driver context
+ * @module	: FW module header
+ * @sg_list	: Pointer to the sg_list to be populated
+ * Count the length for scattergather list
+ * and create the scattergather list of same length
+ * returns error or 0 if module sizes are proper
+ */
+static int sst_parse_module_dma(struct intel_sst_drv *sst_ctx,
+				struct fw_module_header *module,
+				struct sst_sg_list *sg_list)
+{
+	struct fw_block_info *block;
+	u32 count;
+	unsigned long ram, src;
+	int retval, sg_len = 0;
+	struct scatterlist *sg_src, *sg_dst;
+
+	pr_debug("module sign %s size %x blocks %x type %x\n",
+			module->signature, module->mod_size,
+			module->blocks, module->type);
+	pr_debug("module entrypoint 0x%x\n", module->entry_point);
+
+	block = (void *)module + sizeof(*module);
+
+	for (count = 0; count < module->blocks; count++) {
+		sg_len += (block->size) / sst_drv_ctx->info.dma_max_len;
+
+		if (block->size % sst_drv_ctx->info.dma_max_len)
+			sg_len = sg_len + 1;
+		block = (void *)block + sizeof(*block) + block->size;
+	}
+
+	if (sst_init_dma_sg_list(sst_ctx, sg_len, &sg_src, &sg_dst)) {
+		retval = -ENOMEM;
+		goto err1;
+	}
+
+	sg_list->src = sg_src;
+	sg_list->dst = sg_dst;
+	sg_list->list_len = sg_len;
+
+	block = (void *)module + sizeof(*module);
+
+	for (count = 0; count < module->blocks; count++) {
+		if (block->size <= 0) {
+			pr_err("block size invalid\n");
+			retval = -EINVAL;
+			goto err;
+		}
+		switch (block->type) {
+		case SST_IRAM:
+			ram = sst_ctx->iram_base;
+			break;
+		case SST_DRAM:
+			ram = sst_ctx->dram_base;
+			break;
+		default:
+			pr_err("wrong ram type0x%x in block0x%x\n",
+					block->type, count);
+			retval = -EINVAL;
+			goto err;
+		}
+
+		/*converting from physical to virtual because
+		scattergather list works on virtual pointers*/
+		ram = (unsigned long) phys_to_virt(ram);
+		ram = (unsigned long)(ram + block->ram_offset);
+		src = (unsigned long) (void *)block + sizeof(*block);
+
+		retval = sst_fill_sglist(src, ram,
+				block->size, &sg_src, &sg_dst,
+				sg_list, sst_ctx->info.dma_max_len);
+		if (retval)
+			goto err;
+
+		block = (void *)block + sizeof(*block) + block->size;
+	}
+	return 0;
+err:
+	kfree(sg_list->src);
+	kfree(sg_list->dst);
+err1:
+	sg_list->src = NULL;
+	sg_list->dst = NULL;
+	sg_list->list_len = 0;
+
+	return retval;
+}
+
+/**
+ * sst_parse_fw_dma - parse the firmware image & populate the list for dma
+ *
+ * @sst_fw_in_mem	: pointer to audio fw
+ * @size		: size of the firmware
+ * @fw_list		: pointer to sst_sg_list to be populated
+ * This function parses the FW image and saves the parsed image in the list
+ * for dma
+ */
+static int sst_parse_fw_dma(const void *sst_fw_in_mem, unsigned long size,
+				struct sst_sg_list *fw_list)
+{
+	struct fw_module_header *module;
+	u32 count, num_modules;
+	int ret_val;
+
+	ret_val = sst_validate_fw_image(sst_fw_in_mem, size,
+				&module, &num_modules);
+	if (ret_val)
+		return ret_val;
+
+	for (count = 0; count < num_modules; count++) {
+		/* module */
+		ret_val = sst_parse_module_dma(sst_drv_ctx, module, fw_list);
+		if (ret_val)
+			return ret_val;
+		module = (void *)module + sizeof(*module) + module->mod_size ;
+	}
+
+	return 0;
+}
+
+static void sst_dma_free_resources(struct sst_dma *dma)
+{
+	pr_debug("entry:%s\n", __func__);
+
+	dma_release_channel(dma->ch);
+}
+
+void sst_fill_config(struct intel_sst_drv *sst_ctx, unsigned int offset)
+{
+	struct sst_fill_config sst_config;
+
+	 if (!(sst_ctx->pdata->bdata && sst_ctx->pdata->pdata))
+		return;
+
+	sst_config.sign = SST_CONFIG_SSP_SIGN;
+	memcpy(&sst_config.sst_bdata, sst_ctx->pdata->bdata, sizeof(struct sst_board_config_data));
+	memcpy(&sst_config.sst_pdata, sst_ctx->pdata->pdata, sizeof(struct sst_platform_config_data));
+	sst_config.shim_phy_add = sst_ctx->shim_phy_add;
+	sst_config.mailbox_add = sst_ctx->mailbox_add;
+	MEMCPY_TOIO(sst_ctx->dram + offset, &sst_config, sizeof(sst_config));
+
+}
+
+/**
+ * sst_do_dma - function allocs and initiates the DMA
+ *
+ * @sg_list: Pointer to dma list on which the dma needs to be initiated
+ *
+ * Triggers the DMA
+ */
+static int sst_do_dma(struct sst_sg_list *sg_list)
+{
+	int ret_val;
+
+	/* get a dmac channel */
+	ret_val = sst_alloc_dma_chan(&sst_drv_ctx->dma);
+	if (ret_val)
+		return ret_val;
+
+	/* allocate desc for transfer and submit */
+	ret_val = sst_dma_firmware(&sst_drv_ctx->dma, sg_list);
+
+	sst_dma_free_resources(&sst_drv_ctx->dma);
+
+	return ret_val;
+}
+
+/*
+ * sst_fill_memcpy_list - Fill the memcpy list
+ *
+ * @memcpy_list: List to be filled
+ * @destn: Destination addr to be filled in the list
+ * @src: Source addr to be filled in the list
+ * @size: Size to be filled in the list
+ *
+ * Adds the node to the list after required fields
+ * are populated in the node
+ */
+
+static int sst_fill_memcpy_list(struct list_head *memcpy_list,
+			void *destn, const void *src, u32 size, bool is_io)
+{
+	struct sst_memcpy_list *listnode;
+
+	listnode = kzalloc(sizeof(*listnode), GFP_KERNEL);
+	if (listnode == NULL)
+		return -ENOMEM;
+	listnode->dstn = destn;
+	listnode->src = src;
+	listnode->size = size;
+	listnode->is_io = is_io;
+	list_add_tail(&listnode->memcpylist, memcpy_list);
+
+	return 0;
+}
+
+static int sst_parse_elf_module_memcpy(struct intel_sst_drv *sst,
+		const void *fw, struct sst_info info, Elf32_Phdr *pr,
+		struct list_head *memcpy_list)
+{
+	void *dstn;
+	unsigned int dstn_phys;
+	int ret_val = 0;
+	int mem_type;
+
+	ret_val = sst_fill_dstn(sst, info, pr, &dstn, &dstn_phys, &mem_type);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = sst_fill_memcpy_list(memcpy_list, dstn,
+			(void *)fw + pr->p_offset, pr->p_filesz, mem_type);
+	if (ret_val)
+		return ret_val;
+
+	return 0;
+}
+
+static int
+sst_parse_elf_fw_memcpy(struct intel_sst_drv *sst, const void *fw_in_mem,
+			struct list_head *memcpy_list)
+{
+	int i = 0;
+
+	Elf32_Ehdr *elf;
+	Elf32_Phdr *pr;
+	struct sst_info info;
+
+	BUG_ON(!fw_in_mem);
+
+	elf = (Elf32_Ehdr *)fw_in_mem;
+	pr = (Elf32_Phdr *) (fw_in_mem + elf->e_phoff);
+	pr_debug("%s entry\n", __func__);
+
+	sst_fill_info(sst, &info);
+
+	while (i < elf->e_phnum) {
+		if (pr[i].p_type == PT_LOAD)
+			sst_parse_elf_module_memcpy(sst, fw_in_mem, info,
+					&pr[i], memcpy_list);
+		i++;
+	}
+	return 0;
+}
+
+/**
+ * sst_parse_module_memcpy - Parse audio FW modules and populate the memcpy list
+ *
+ * @module		: FW module header
+ * @memcpy_list	: Pointer to the list to be populated
+ * Create the memcpy list as the number of block to be copied
+ * returns error or 0 if module sizes are proper
+ */
+static int sst_parse_module_memcpy(struct fw_module_header *module,
+				struct list_head *memcpy_list)
+{
+	struct fw_block_info *block;
+	u32 count;
+	int ret_val = 0;
+	void __iomem *ram_iomem;
+
+	pr_debug("module sign %s size %x blocks %x type %x\n",
+			module->signature, module->mod_size,
+			module->blocks, module->type);
+	pr_debug("module entrypoint 0x%x\n", module->entry_point);
+
+	block = (void *)module + sizeof(*module);
+
+	for (count = 0; count < module->blocks; count++) {
+		if (block->size <= 0) {
+			pr_err("block size invalid\n");
+			return -EINVAL;
+		}
+		switch (block->type) {
+		case SST_IRAM:
+			ram_iomem = sst_drv_ctx->iram;
+			break;
+		case SST_DRAM:
+			ram_iomem = sst_drv_ctx->dram;
+			break;
+		default:
+			pr_err("wrong ram type0x%x in block0x%x\n",
+					block->type, count);
+			return -EINVAL;
+		}
+
+		ret_val = sst_fill_memcpy_list(memcpy_list,
+				ram_iomem + block->ram_offset,
+				(void *)block + sizeof(*block), block->size, 1);
+		if (ret_val)
+			return ret_val;
+
+		block = (void *)block + sizeof(*block) + block->size;
+	}
+	return 0;
+}
+
+/**
+ * sst_parse_fw_memcpy - parse the firmware image & populate the list for memcpy
+ *
+ * @sst_fw_in_mem	: pointer to audio fw
+ * @size		: size of the firmware
+ * @fw_list		: pointer to list_head to be populated
+ * This function parses the FW image and saves the parsed image in the list
+ * for memcpy
+ */
+static int sst_parse_fw_memcpy(const void *sst_fw_in_mem, unsigned long size,
+				struct list_head *fw_list)
+{
+	struct fw_module_header *module;
+	u32 count, num_modules;
+	int ret_val;
+
+	ret_val = sst_validate_fw_image(sst_fw_in_mem, size,
+				&module, &num_modules);
+	if (ret_val)
+		return ret_val;
+
+	for (count = 0; count < num_modules; count++) {
+		/* module */
+		ret_val = sst_parse_module_memcpy(module, fw_list);
+		if (ret_val)
+			return ret_val;
+		module = (void *)module + sizeof(*module) + module->mod_size ;
+	}
+
+	return 0;
+}
+
+/**
+ * sst_do_memcpy - function initiates the memcpy
+ *
+ * @memcpy_list: Pter to memcpy list on which the memcpy needs to be initiated
+ *
+ * Triggers the memcpy
+ */
+static void sst_do_memcpy(struct list_head *memcpy_list)
+{
+	struct sst_memcpy_list *listnode;
+
+	list_for_each_entry(listnode, memcpy_list, memcpylist) {
+		if (listnode->is_io == true)
+			MEMCPY_TOIO((void __iomem *)listnode->dstn, listnode->src,
+							listnode->size);
+		else
+			memcpy(listnode->dstn, listnode->src, listnode->size);
+	}
+}
+
+static void sst_memcpy_free_lib_resources(void)
+{
+	struct sst_memcpy_list *listnode, *tmplistnode;
+
+	pr_debug("entry:%s\n", __func__);
+
+	/*Free the list*/
+	if (!list_empty(&sst_drv_ctx->libmemcpy_list)) {
+		list_for_each_entry_safe(listnode, tmplistnode,
+				&sst_drv_ctx->libmemcpy_list, memcpylist) {
+			list_del(&listnode->memcpylist);
+			kfree(listnode);
+		}
+	}
+}
+
+void sst_memcpy_free_resources(void)
+{
+	struct sst_memcpy_list *listnode, *tmplistnode;
+
+	pr_debug("entry:%s\n", __func__);
+
+	/*Free the list*/
+	if (!list_empty(&sst_drv_ctx->memcpy_list)) {
+		list_for_each_entry_safe(listnode, tmplistnode,
+				&sst_drv_ctx->memcpy_list, memcpylist) {
+			list_del(&listnode->memcpylist);
+			kfree(listnode);
+		}
+	}
+	sst_memcpy_free_lib_resources();
+}
+
+void sst_firmware_load_cb(const struct firmware *fw, void *context)
+{
+	struct intel_sst_drv *ctx = context;
+	int ret = 0;
+
+	pr_debug("In %s\n", __func__);
+
+	if (fw == NULL) {
+		pr_err("request fw failed\n");
+		goto out;
+	}
+
+	if (sst_drv_ctx->sst_state != SST_UN_INIT ||
+			ctx->fw_in_mem != NULL)
+		goto exit;
+
+	pr_debug("Request Fw completed\n");
+	trace_sst_fw_download("End of FW request", ctx->sst_state);
+
+	if (ctx->info.use_elf == true)
+		ret = sst_validate_elf(fw, false);
+
+	if (ret != 0) {
+		pr_err("FW image invalid...\n");
+		goto out;
+	}
+
+	ctx->fw_in_mem = kzalloc(fw->size, GFP_KERNEL);
+	if (!ctx->fw_in_mem) {
+		pr_err("%s unable to allocate memory\n", __func__);
+		goto out;
+	}
+
+	pr_debug("copied fw to %p", ctx->fw_in_mem);
+	pr_debug("phys: %lx", (unsigned long)virt_to_phys(ctx->fw_in_mem));
+	memcpy(ctx->fw_in_mem, fw->data, fw->size);
+
+	trace_sst_fw_download("Start FW parsing", ctx->sst_state);
+	if (ctx->use_dma) {
+		if (ctx->info.use_elf == true)
+			ret = sst_parse_elf_fw_dma(ctx, ctx->fw_in_mem,
+							&ctx->fw_sg_list);
+		else
+			ret = sst_parse_fw_dma(ctx->fw_in_mem, fw->size,
+							&ctx->fw_sg_list);
+	} else {
+		if (ctx->info.use_elf == true)
+			ret = sst_parse_elf_fw_memcpy(ctx, ctx->fw_in_mem,
+							&ctx->memcpy_list);
+		else
+			ret = sst_parse_fw_memcpy(ctx->fw_in_mem, fw->size,
+							&ctx->memcpy_list);
+	}
+	trace_sst_fw_download("End FW parsing", ctx->sst_state);
+	if (ret) {
+		kfree(ctx->fw_in_mem);
+		ctx->fw_in_mem = NULL;
+		goto out;
+	}
+
+	/* If static module download(download at boot time) is supported,
+	 * set the flag to indicate lib download is to be done
+	 */
+	if (ctx->pdata->lib_info)
+		if (ctx->pdata->lib_info->mod_ddr_dnld)
+			ctx->lib_dwnld_reqd = true;
+
+	sst_set_fw_state_locked(sst_drv_ctx, SST_FW_LIB_LOAD);
+	goto exit;
+out:
+	sst_set_fw_state_locked(sst_drv_ctx, SST_UN_INIT);
+exit:
+	if (fw != NULL)
+		release_firmware(fw);
+}
+
+/*
+ * sst_request_fw - requests audio fw from kernel and saves a copy
+ *
+ * This function requests the SST FW from the kernel, parses it and
+ * saves a copy in the driver context
+ */
+static int sst_request_fw(struct intel_sst_drv *sst)
+{
+	int retval = 0;
+	char name[20];
+	const struct firmware *fw;
+
+	snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
+				sst->pci_id, ".bin");
+	pr_debug("Requesting FW %s now...\n", name);
+
+	retval = request_firmware(&fw, name, sst->dev);
+	if (fw == NULL) {
+		pr_err("fw is returning as null\n");
+		return -EINVAL;
+	}
+	if (retval) {
+		pr_err("request fw failed %d\n", retval);
+		return retval;
+	}
+	trace_sst_fw_download("End of FW request", sst->sst_state);
+	if (sst->info.use_elf == true)
+		retval = sst_validate_elf(fw, false);
+	if (retval != 0) {
+		pr_err("FW image invalid...\n");
+		goto end_release;
+	}
+	sst->fw_in_mem = kzalloc(fw->size, GFP_KERNEL);
+	if (!sst->fw_in_mem) {
+		pr_err("%s unable to allocate memory\n", __func__);
+		retval = -ENOMEM;
+		goto end_release;
+	}
+	pr_debug("copied fw to %p", sst->fw_in_mem);
+	pr_debug("phys: %lx", (unsigned long)virt_to_phys(sst->fw_in_mem));
+	memcpy(sst->fw_in_mem, fw->data, fw->size);
+	trace_sst_fw_download("Start FW parsing", sst->sst_state);
+	if (sst->use_dma) {
+		if (sst->info.use_elf == true)
+			retval = sst_parse_elf_fw_dma(sst, sst->fw_in_mem,
+							&sst->fw_sg_list);
+		else
+			retval = sst_parse_fw_dma(sst->fw_in_mem, fw->size,
+							&sst->fw_sg_list);
+	} else {
+		if (sst->info.use_elf == true)
+			retval = sst_parse_elf_fw_memcpy(sst, sst->fw_in_mem,
+							&sst->memcpy_list);
+		else
+			retval = sst_parse_fw_memcpy(sst->fw_in_mem, fw->size,
+							&sst->memcpy_list);
+	}
+	trace_sst_fw_download("End FW parsing", sst->sst_state);
+	if (retval) {
+		kfree(sst->fw_in_mem);
+		sst->fw_in_mem = NULL;
+	}
+
+	/* If static module download(download at boot time) is supported,
+	 * set the flag to indicate lib download is to be done
+	 */
+	if (sst->pdata->lib_info)
+		if (sst->pdata->lib_info->mod_ddr_dnld)
+			sst->lib_dwnld_reqd = true;
+end_release:
+	release_firmware(fw);
+	return retval;
+}
+
+static inline void print_lib_info(struct snd_sst_lib_download_info *resp)
+{
+	pr_debug("codec Type %d Ver %d Built %s: %s\n",
+		resp->dload_lib.lib_info.lib_type,
+		resp->dload_lib.lib_info.lib_version,
+		resp->dload_lib.lib_info.b_date,
+		resp->dload_lib.lib_info.b_time);
+}
+
+/* sst_download_library - This function is called when any
+ codec/post processing library needs to be downloaded */
+static int sst_download_library(const struct firmware *fw_lib,
+				struct snd_sst_lib_download_info *lib)
+{
+	int ret_val = 0;
+
+	/* send IPC message and wait */
+	u8 pvt_id;
+	struct ipc_post *msg = NULL;
+	union config_status_reg csr;
+	struct snd_sst_str_type str_type = {0};
+	int retval = 0;
+	void *codec_fw;
+	struct sst_block *block;
+
+	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+	ret_val = sst_create_block_and_ipc_msg(&msg, true, sst_drv_ctx, &block,
+				IPC_IA_PREP_LIB_DNLD, pvt_id);
+	if (ret_val) {
+		pr_err("library download failed\n");
+		return ret_val;
+	}
+
+	sst_fill_header(&msg->header, IPC_IA_PREP_LIB_DNLD, 1, pvt_id);
+	msg->header.part.data = sizeof(u32) + sizeof(str_type);
+	str_type.codec_type = lib->dload_lib.lib_info.lib_type;
+	/*str_type.pvt_id = pvt_id;*/
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), &str_type, sizeof(str_type));
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	if (block->data) {
+		struct snd_sst_str_type *str_type =
+			(struct snd_sst_str_type *)block->data;
+		if (str_type->result) {
+			/* error */
+			pr_err("Prep codec downloaded failed %d\n",
+					str_type->result);
+			retval = -EIO;
+			goto free_block;
+		}
+		kfree(block->data);
+	} else if (retval != 0) {
+		retval = -EIO;
+		goto free_block;
+	}
+	pr_debug("FW responded, ready for download now...\n");
+	codec_fw = kzalloc(fw_lib->size, GFP_KERNEL);
+	if (!codec_fw) {
+		memset(lib, 0, sizeof(*lib));
+		retval = -ENOMEM;
+		goto send_ipc;
+	}
+	memcpy(codec_fw, fw_lib->data, fw_lib->size);
+
+	if (sst_drv_ctx->use_dma)
+		retval = sst_parse_fw_dma(codec_fw, fw_lib->size,
+				 &sst_drv_ctx->library_list);
+	else
+		retval = sst_parse_fw_memcpy(codec_fw, fw_lib->size,
+				 &sst_drv_ctx->libmemcpy_list);
+
+	if (retval) {
+		memset(lib, 0, sizeof(*lib));
+		goto send_ipc;
+	}
+
+	/* downloading on success */
+	mutex_lock(&sst_drv_ctx->sst_lock);
+	sst_drv_ctx->sst_state = SST_FW_LOADED;
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = readl(sst_drv_ctx->shim + SST_CSR);
+	csr.part.run_stall = 1;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.bypass = 0x7;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+
+	if (sst_drv_ctx->use_dma) {
+		ret_val = sst_do_dma(&sst_drv_ctx->library_list);
+		if (ret_val) {
+			pr_err("sst_do_dma failed, abort\n");
+			memset(lib, 0, sizeof(*lib));
+		}
+	} else
+		sst_do_memcpy(&sst_drv_ctx->libmemcpy_list);
+	/* set the FW to running again */
+	mutex_lock(&sst_drv_ctx->csr_lock);
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.bypass = 0x0;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.part.run_stall = 0;
+	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+	mutex_unlock(&sst_drv_ctx->csr_lock);
+send_ipc:
+	/* send download complete and wait */
+	if (sst_create_ipc_msg(&msg, true)) {
+		retval = -ENOMEM;
+		goto free_resources;
+	}
+
+	block->condition = false;
+	block->msg_id = IPC_IA_LIB_DNLD_CMPLT;
+	sst_fill_header(&msg->header, IPC_IA_LIB_DNLD_CMPLT, 1, pvt_id);
+	msg->header.part.data = sizeof(u32) + sizeof(*lib);
+	lib->pvt_id = pvt_id;
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), lib, sizeof(*lib));
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	pr_debug("Waiting for FW response Download complete\n");
+	retval = sst_wait_timeout(sst_drv_ctx, block);
+	sst_drv_ctx->sst_state = SST_FW_RUNNING;
+	if (block->data) {
+		struct snd_sst_lib_download_info *resp = block->data;
+		retval = resp->result;
+		if (retval) {
+			pr_err("err in lib dload %x\n", resp->result);
+			goto free_resources;
+		} else {
+			pr_debug("Codec download complete...\n");
+			print_lib_info(resp);
+		}
+	} else if (retval) {
+		/* error */
+		retval = -EIO;
+		goto free_resources;
+	}
+
+	pr_debug("FW success on Download complete\n");
+
+free_resources:
+	if (sst_drv_ctx->use_dma) {
+		kfree(sst_drv_ctx->library_list.src);
+		kfree(sst_drv_ctx->library_list.dst);
+		sst_drv_ctx->library_list.list_len = 0;
+	}
+
+	kfree(codec_fw);
+	mutex_unlock(&sst_drv_ctx->sst_lock);
+free_block:
+	sst_free_block(sst_drv_ctx, block);
+	return retval;
+}
+
+/*
+ * Writing the DDR physical base to DCCM offset
+ * so that FW can use it to setup TLB
+ */
+static void sst_dccm_config_write(void __iomem *dram_base, unsigned int ddr_base)
+{
+	void __iomem *addr;
+	u32 bss_reset = 0;
+
+	addr = (void __iomem *)(dram_base + MRFLD_FW_DDR_BASE_OFFSET);
+	MEMCPY_TOIO(addr, (void *)&ddr_base, sizeof(u32));
+	bss_reset |= (1 << MRFLD_FW_BSS_RESET_BIT);
+	addr = (void __iomem *)(dram_base + MRFLD_FW_FEATURE_BASE_OFFSET);
+	MEMCPY_TOIO(addr, &bss_reset, sizeof(u32));
+	pr_debug("%s: config written to DCCM\n", __func__);
+}
+
+void sst_post_download_mrfld(struct intel_sst_drv *ctx)
+{
+	sst_dccm_config_write(ctx->dram, ctx->ddr_base);
+	/* For mrfld, download all libraries the first time fw is
+	 * downloaded */
+	pr_debug("%s: lib_dwnld = %u\n", __func__, ctx->lib_dwnld_reqd);
+	if (ctx->lib_dwnld_reqd) {
+		sst_load_all_modules_elf(ctx, sst_modules_mrfld, ARRAY_SIZE(sst_modules_mrfld));
+		ctx->lib_dwnld_reqd = false;
+	}
+}
+
+void sst_post_download_ctp(struct intel_sst_drv *ctx)
+{
+	sst_fill_config(ctx, 0);
+}
+
+void sst_post_download_byt(struct intel_sst_drv *ctx)
+{
+	sst_dccm_config_write(ctx->dram, ctx->ddr_base);
+	sst_fill_config(ctx, 2 * sizeof(u32));
+
+	pr_debug("%s: lib_dwnld = %u\n", __func__, ctx->lib_dwnld_reqd);
+	if (ctx->lib_dwnld_reqd) {
+		sst_load_all_modules_elf(ctx, sst_modules_byt,
+					ARRAY_SIZE(sst_modules_byt));
+		ctx->lib_dwnld_reqd = false;
+	}
+}
+
+static void sst_init_lib_mem_mgr(struct intel_sst_drv *ctx)
+{
+	struct sst_mem_mgr *mgr = &ctx->lib_mem_mgr;
+	const struct sst_lib_dnld_info *lib_info = ctx->pdata->lib_info;
+
+	memset(mgr, 0, sizeof(*mgr));
+	mgr->current_base = lib_info->mod_base + lib_info->mod_table_offset
+						+ lib_info->mod_table_size;
+	mgr->avail = lib_info->mod_end - mgr->current_base + 1;
+
+	pr_debug("current base = 0x%lx , avail = 0x%x\n",
+		(unsigned long)mgr->current_base, mgr->avail);
+}
+
+/**
+ * sst_load_fw - function to load FW into DSP
+ *
+ *
+ * Transfers the FW to DSP using dma/memcpy
+ */
+int sst_load_fw(void)
+{
+	int ret_val = 0;
+	struct sst_block *block;
+
+	pr_debug("sst_load_fw\n");
+
+	if ((sst_drv_ctx->sst_state !=  SST_START_INIT &&
+			sst_drv_ctx->sst_state !=  SST_FW_LIB_LOAD) ||
+			sst_drv_ctx->sst_state == SST_SHUTDOWN)
+		return -EAGAIN;
+
+	if (!sst_drv_ctx->fw_in_mem) {
+		if (sst_drv_ctx->sst_state != SST_START_INIT) {
+			/* even wake*/
+			pr_err("sst : wait for FW to be downloaded\n");
+			return -EBUSY;
+		} else {
+			trace_sst_fw_download("Req FW sent in check device",
+						sst_drv_ctx->sst_state);
+			pr_debug("sst: FW not in memory retry to download\n");
+			ret_val = sst_request_fw(sst_drv_ctx);
+			if (ret_val)
+				return ret_val;
+		}
+	}
+
+	BUG_ON(!sst_drv_ctx->fw_in_mem);
+	block = sst_create_block(sst_drv_ctx, 0, FW_DWNL_ID);
+	if (block == NULL)
+		return -ENOMEM;
+
+	/* Prevent C-states beyond C6 */
+	pm_qos_update_request(sst_drv_ctx->qos, CSTATE_EXIT_LATENCY_S0i1 - 1);
+
+	ret_val = sst_drv_ctx->ops->reset();
+	if (ret_val)
+		goto restore;
+
+	trace_sst_fw_download("Start FW copy", sst_drv_ctx->sst_state);
+	if (sst_drv_ctx->use_dma) {
+		ret_val = sst_do_dma(&sst_drv_ctx->fw_sg_list);
+		if (ret_val) {
+			pr_err("sst_do_dma failed, abort\n");
+			goto restore;
+		}
+	} else {
+		sst_do_memcpy(&sst_drv_ctx->memcpy_list);
+	}
+
+	trace_sst_fw_download("Post download for Lib start",
+			sst_drv_ctx->sst_state);
+	/* Write the DRAM/DCCM config before enabling FW */
+	if (sst_drv_ctx->ops->post_download)
+		sst_drv_ctx->ops->post_download(sst_drv_ctx);
+	trace_sst_fw_download("Post download for Lib end",
+			sst_drv_ctx->sst_state);
+	sst_drv_ctx->sst_state = SST_FW_LOADED;
+
+	/* bring sst out of reset */
+	ret_val = sst_drv_ctx->ops->start();
+	if (ret_val)
+		goto restore;
+	trace_sst_fw_download("DSP reset done",
+			sst_drv_ctx->sst_state);
+
+	ret_val = sst_wait_timeout(sst_drv_ctx, block);
+	if (ret_val) {
+		pr_err("fw download failed %d\n" , ret_val);
+		/* assume FW d/l failed due to timeout*/
+		ret_val = -EBUSY;
+
+	}
+
+restore:
+	/* Re-enable Deeper C-states beyond C6 */
+	pm_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE);
+	sst_free_block(sst_drv_ctx, block);
+
+	return ret_val;
+}
+
+/**
+ * sst_load_library - function to load FW into DSP
+ *
+ * @lib: Pointer to the lib download structure
+ * @ops: Contains the stream ops
+ * This function is called when FW requests for a particular library download
+ * This function prepares & downloads the library
+ */
+int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
+{
+	char buf[20];
+	const char *type, *dir;
+	int len = 0, error = 0;
+	u32 entry_point;
+	const struct firmware *fw_lib;
+	struct snd_sst_lib_download_info dload_info = {{{0},},};
+
+	memset(buf, 0, sizeof(buf));
+
+	pr_debug("Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
+			lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
+	pr_debug("Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
+		lib->lib_info.lib_version, lib->lib_info.lib_name,
+		lib->lib_info.lib_caps, lib->lib_info.media_type);
+
+	pr_debug("IRAM Size 0x%x, offset 0x%x\n",
+		lib->slot_info.iram_size, lib->slot_info.iram_offset);
+	pr_debug("DRAM Size 0x%x, offset 0x%x\n",
+		lib->slot_info.dram_size, lib->slot_info.dram_offset);
+
+	switch (lib->lib_info.lib_type) {
+	case SST_CODEC_TYPE_MP3:
+		type = "mp3_";
+		break;
+	case SST_CODEC_TYPE_AAC:
+		type = "aac_";
+		break;
+	case SST_CODEC_TYPE_AACP:
+		type = "aac_v1_";
+		break;
+	case SST_CODEC_TYPE_eAACP:
+		type = "aac_v2_";
+		break;
+	case SST_CODEC_TYPE_WMA9:
+		type = "wma9_";
+		break;
+	default:
+		pr_err("Invalid codec type\n");
+		error = -EINVAL;
+		goto wake;
+	}
+
+	if (ops == STREAM_OPS_CAPTURE)
+		dir = "enc_";
+	else
+		dir = "dec_";
+	len = strlen(type) + strlen(dir);
+	strncpy(buf, type, sizeof(buf)-1);
+	strncpy(buf + strlen(type), dir, sizeof(buf)-strlen(type)-1);
+	len += snprintf(buf + len, sizeof(buf) - len, "%d",
+			lib->slot_info.slot_num);
+	len += snprintf(buf + len, sizeof(buf) - len, ".bin");
+
+	pr_debug("Requesting %s\n", buf);
+
+	error = request_firmware(&fw_lib, buf, sst_drv_ctx->dev);
+	if (fw_lib == NULL) {
+		pr_err("fw_lib pointer is returning null\n");
+		return -EINVAL;
+	}
+	if (error) {
+		pr_err("library load failed %d\n", error);
+		goto wake;
+	}
+	error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
+	if (error)
+		goto wake_free;
+
+	lib->mod_entry_pt = entry_point;
+	memcpy(&dload_info.dload_lib, lib, sizeof(*lib));
+	/* Prevent C-states beyond C6 */
+	pm_qos_update_request(sst_drv_ctx->qos, CSTATE_EXIT_LATENCY_S0i1 - 1);
+	error = sst_download_library(fw_lib, &dload_info);
+	/* Re-enable Deeper C-states beyond C6 */
+	pm_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE);
+	if (error)
+		goto wake_free;
+
+	/* lib is downloaded and init send alloc again */
+	pr_debug("Library is downloaded now...\n");
+wake_free:
+	/* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
+	release_firmware(fw_lib);
+wake:
+	return error;
+}
+
+/* In relocatable elf file, there can be  relocatable variables and functions.
+ * Variables are kept in Global Address Offset Table (GOT) and functions in
+ * Procedural Linkage Table (PLT). In current codec binaries only relocatable
+ * variables are seen. So we use the GOT table.
+ */
+static int sst_find_got_table(Elf32_Shdr *shdr, int nsec, char *in_elf,
+		Elf32_Rela **got, unsigned int *cnt)
+{
+	int i = 0;
+	while (i < nsec) {
+		if (shdr[i].sh_type == SHT_RELA) {
+			*got = (Elf32_Rela *)(in_elf + shdr[i].sh_offset);
+			*cnt = shdr[i].sh_size / sizeof(Elf32_Rela);
+			break;
+		}
+		i++;
+	}
+	if (i == nsec)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* For each entry in the GOT table, find the unrelocated offset. Then
+ * add the relocation base to the offset and write back the new address to the
+ * original variable location.
+ */
+static int sst_relocate_got_entries(Elf32_Rela *table, unsigned int size,
+	char *in_elf, int elf_size, u32 rel_base)
+{
+	int i;
+	Elf32_Rela *entry;
+	Elf32_Addr *target_addr, unreloc_addr;
+
+	for (i = 0; i < size; i++) {
+		entry = &table[i];
+		if (ELF32_R_SYM(entry->r_info) != 0) {
+			return -EINVAL;
+		} else {
+			if (entry->r_offset > elf_size) {
+				pr_err("GOT table target addr out of range\n");
+				return -EINVAL;
+			}
+			target_addr = (Elf32_Addr *)(in_elf + entry->r_offset);
+			unreloc_addr = *target_addr + entry->r_addend;
+			if (unreloc_addr > elf_size) {
+				pr_err("GOT table entry invalid\n");
+				continue;
+			}
+			*target_addr = unreloc_addr + rel_base;
+		}
+	}
+	return 0;
+}
+
+static int sst_relocate_elf(char *in_elf, int elf_size, phys_addr_t rel_base,
+		Elf32_Addr *entry_pt)
+{
+	int retval = 0;
+	Elf32_Ehdr *ehdr = (Elf32_Ehdr *)in_elf;
+	Elf32_Shdr *shdr = (Elf32_Shdr *) (in_elf + ehdr->e_shoff);
+	Elf32_Phdr *phdr = (Elf32_Phdr *) (in_elf + ehdr->e_phoff);
+	int i, num_sec;
+	Elf32_Rela *rel_table = NULL;
+	unsigned int rela_cnt = 0;
+	u32 rbase;
+
+	BUG_ON(rel_base > (u32)(-1));
+	rbase = (u32) (rel_base & (u32)(~0));
+
+	/* relocate the entry_pt */
+	*entry_pt = (Elf32_Addr)(ehdr->e_entry + rbase);
+	num_sec = ehdr->e_shnum;
+
+	/* Find the relocation(GOT) table through the section header */
+	retval = sst_find_got_table(shdr, num_sec, in_elf,
+					&rel_table, &rela_cnt);
+	if (retval < 0)
+		return retval;
+
+	/* Relocate all the entries in the GOT */
+	retval = sst_relocate_got_entries(rel_table, rela_cnt, in_elf,
+						elf_size, rbase);
+	if (retval < 0)
+		return retval;
+
+	pr_debug("GOT entries relocated\n");
+
+	/* Update the program headers in the ELF */
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		if (phdr[i].p_type == PT_LOAD) {
+			phdr[i].p_vaddr += rbase;
+			phdr[i].p_paddr += rbase;
+		}
+	}
+	pr_debug("program header entries updated\n");
+
+	return retval;
+}
+
+#define ALIGN_256 0x100
+
+int sst_get_next_lib_mem(struct sst_mem_mgr *mgr, int size,
+			unsigned long *lib_base)
+{
+	int retval = 0;
+
+	pr_debug("library orig size = 0x%x", size);
+	if (size % ALIGN_256)
+		size += (ALIGN_256 - (size % ALIGN_256));
+	if (size > mgr->avail)
+		return -ENOMEM;
+
+	*lib_base = mgr->current_base;
+	mgr->current_base += size;
+	mgr->avail -= size;
+	mgr->count++;
+	pr_debug("library base = 0x%lx", *lib_base);
+	pr_debug("library aligned size = 0x%x", size);
+	pr_debug("lib count = %d\n", mgr->count);
+	return retval;
+
+}
+
+static int sst_download_lib_elf(struct intel_sst_drv *sst, const void *lib,
+		int size)
+{
+	int retval = 0;
+
+	pr_debug("In %s\n", __func__);
+
+	if (sst->use_dma) {
+		retval = sst_parse_elf_fw_dma(sst, lib,
+				 &sst->library_list);
+		if (retval)
+			goto free_dma_res;
+		retval = sst_do_dma(&sst->library_list);
+		if (retval)
+			pr_err("sst_do_dma failed, abort\n");
+free_dma_res:
+		kfree(sst->library_list.src);
+		kfree(sst->library_list.dst);
+		sst->library_list.list_len = 0;
+	} else {
+		retval = sst_parse_elf_fw_memcpy(sst, lib,
+				 &sst->libmemcpy_list);
+		if (retval)
+			return retval;
+		sst_do_memcpy(&sst->libmemcpy_list);
+		sst_memcpy_free_lib_resources();
+	}
+	pr_debug("download lib complete");
+	return retval;
+}
+
+static void sst_fill_fw_module_table(struct sst_module_info *mod_list,
+		int list_size, unsigned long ddr_base)
+{
+	int i;
+	u32 *write_ptr = (u32 *)ddr_base;
+
+	pr_debug("In %s\n", __func__);
+
+	for (i = 0; i < list_size; i++) {
+		if (mod_list[i].status == SST_LIB_DOWNLOADED) {
+			pr_debug("status dnwld for %d\n", i);
+			pr_debug("module id %d\n", mod_list[i].id);
+			pr_debug("entry pt 0x%x\n", mod_list[i].entry_pt);
+
+			*write_ptr++ = mod_list[i].id;
+			*write_ptr++ = mod_list[i].entry_pt;
+		}
+	}
+}
+
+static int sst_request_lib_elf(struct sst_module_info *mod_entry,
+	const struct firmware **fw_lib, int pci_id, struct device *dev)
+{
+	char name[25];
+	int retval = 0;
+
+	snprintf(name, sizeof(name), "%s%s%04x%s", mod_entry->name,
+			"_", pci_id, ".bin");
+	pr_debug("Requesting %s\n", name);
+
+	retval = request_firmware(fw_lib, name, dev);
+	if (retval) {
+		pr_err("%s library load failed %d\n", name, retval);
+		return retval;
+	}
+	pr_debug("got lib\n");
+	mod_entry->status = SST_LIB_FOUND;
+	return 0;
+}
+
+static int sst_allocate_lib_mem(const struct firmware *lib, int size,
+	struct sst_mem_mgr *mem_mgr, char **out_elf, unsigned long *lib_start)
+{
+	int retval = 0;
+
+	*out_elf = kzalloc(size, GFP_KERNEL);
+	if (!*out_elf) {
+		pr_err("cannot alloc mem for elf copy %d\n", retval);
+		goto mem_error;
+	}
+
+	memcpy(*out_elf, lib->data, size);
+	retval = sst_get_next_lib_mem(mem_mgr, size, lib_start);
+	if (retval < 0) {
+		pr_err("cannot alloc ddr mem for lib: %d\n", retval);
+		kfree(*out_elf);
+		goto mem_error;
+	}
+	return 0;
+
+mem_error:
+	release_firmware(lib);
+	return -ENOMEM;
+}
+
+int sst_load_all_modules_elf(struct intel_sst_drv *ctx, struct sst_module_info *mod_table,
+								int num_modules)
+{
+	int retval = 0;
+	int i;
+	const struct firmware *fw_lib;
+	struct sst_module_info *mod = NULL;
+	char *out_elf;
+	unsigned int lib_size = 0;
+	unsigned int mod_table_offset = ctx->pdata->lib_info->mod_table_offset;
+	unsigned long lib_base;
+
+	pr_debug("In %s", __func__);
+
+	sst_init_lib_mem_mgr(ctx);
+
+	for (i = 0; i < num_modules; i++) {
+		mod = &mod_table[i];
+		trace_sst_lib_download("Start of Request Lib", mod->name);
+		retval = sst_request_lib_elf(mod, &fw_lib,
+						ctx->pci_id, ctx->dev);
+		if (retval < 0)
+			continue;
+		lib_size = fw_lib->size;
+
+		trace_sst_lib_download("End of Request Lib", mod->name);
+		retval = sst_validate_elf(fw_lib, true);
+		if (retval < 0) {
+			pr_err("library is not valid elf %d\n", retval);
+			release_firmware(fw_lib);
+			continue;
+		}
+		pr_debug("elf validated\n");
+		retval = sst_allocate_lib_mem(fw_lib, lib_size,
+				&ctx->lib_mem_mgr, &out_elf, &lib_base);
+		if (retval < 0) {
+			pr_err("lib mem allocation failed: %d\n", retval);
+			continue;
+		}
+		pr_debug("lib space allocated\n");
+
+		/* relocate in place */
+		retval = sst_relocate_elf(out_elf, lib_size,
+						lib_base, &mod->entry_pt);
+		if (retval < 0) {
+			pr_err("lib elf relocation failed: %d\n", retval);
+			release_firmware(fw_lib);
+			kfree(out_elf);
+			continue;
+		}
+		pr_debug("relocation done\n");
+		release_firmware(fw_lib);
+		trace_sst_lib_download("Start of download Lib", mod->name);
+		/* write to ddr imr region,use memcpy method */
+		retval = sst_download_lib_elf(ctx, out_elf, lib_size);
+		trace_sst_lib_download("End of download Lib", mod->name);
+		mod->status = SST_LIB_DOWNLOADED;
+		kfree(out_elf);
+	}
+
+	/* write module table to DDR */
+	sst_fill_fw_module_table(mod_table, num_modules,
+			(unsigned long)(ctx->ddr + mod_table_offset));
+	return retval;
+}
diff --git a/sound/soc/intel/sst/sst_ipc.c b/sound/soc/intel/sst/sst_ipc.c
new file mode 100644
index 0000000..097c715
--- /dev/null
+++ b/sound/soc/intel/sst/sst_ipc.c
@@ -0,0 +1,768 @@
+/*
+ *  sst_ipc.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corporation
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file defines all ipc functions
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <sound/intel_sst_ioctl.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+#include "sst_trace.h"
+
+void sst_dump_to_buffer(const void *from, size_t len, char *buf)
+{
+	int i, end;
+	const unsigned char *cmd = from;
+
+	if (len == 0) {
+		buf[0] = '\0';
+		return;
+	}
+
+	for (end = len - 1; end >= 0; end--)
+		if (cmd[end])
+			break;
+	end++;
+
+	buf += snprintf(buf, 3, "%02x", cmd[0]);
+	for (i = 1; i < len; i++) {
+		buf += snprintf(buf, 4, " %02x", cmd[i]);
+		if (i == end && end != len - 1) {
+			sprintf(buf, "...");
+			break;
+		}
+	}
+}
+
+struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
+					u32 msg_id, u32 drv_id)
+{
+	struct sst_block *msg = NULL;
+
+	pr_debug("in %s\n", __func__);
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg) {
+		pr_err("kzalloc block failed\n");
+		return NULL;
+	}
+	msg->condition = false;
+	msg->on = true;
+	msg->msg_id = msg_id;
+	msg->drv_id = drv_id;
+	spin_lock_bh(&ctx->block_lock);
+	list_add_tail(&msg->node, &ctx->block_list);
+	spin_unlock_bh(&ctx->block_lock);
+
+	return msg;
+}
+
+int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
+		u32 drv_id, u32 ipc, void *data, u32 size)
+{
+	struct sst_block *block = NULL;
+
+	pr_debug("in %s\n", __func__);
+	spin_lock_bh(&ctx->block_lock);
+	list_for_each_entry(block, &ctx->block_list, node) {
+		pr_debug("Block ipc %d, drv_id %d\n", block->msg_id,
+							block->drv_id);
+		if (block->msg_id == ipc && block->drv_id == drv_id) {
+			pr_debug("free up the block\n");
+			block->ret_code = result;
+			block->data = data;
+			block->size = size;
+			block->condition = true;
+			spin_unlock_bh(&ctx->block_lock);
+			wake_up(&ctx->wait_queue);
+			return 0;
+		}
+	}
+	spin_unlock_bh(&ctx->block_lock);
+	pr_debug("Block not found or a response is received for a short message for ipc %d, drv_id %d\n",
+			ipc, drv_id);
+	return -EINVAL;
+}
+
+int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
+{
+	struct sst_block *block = NULL, *__block;
+
+	pr_debug("in %s\n", __func__);
+	spin_lock_bh(&ctx->block_lock);
+	list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
+		if (block == freed) {
+			list_del(&freed->node);
+			kfree(freed->data);
+			freed->data = NULL;
+			kfree(freed);
+			spin_unlock_bh(&ctx->block_lock);
+			return 0;
+		}
+	}
+	spin_unlock_bh(&ctx->block_lock);
+	return -EINVAL;
+}
+
+/*
+ * sst_send_runtime_param - send runtime param to SST
+ *
+ * this function sends the runtime parameter to sst dsp engine
+ */
+static int sst_send_runtime_param(struct snd_sst_runtime_params *params)
+{
+	struct ipc_post *msg = NULL;
+	int ret_val;
+
+	pr_debug("Enter:%s\n", __func__);
+	ret_val = sst_create_ipc_msg(&msg, true);
+	if (ret_val)
+		return ret_val;
+	sst_fill_header(&msg->header, IPC_IA_SET_RUNTIME_PARAMS, 1,
+							params->str_id);
+	msg->header.part.data = sizeof(u32) + sizeof(*params) - sizeof(params->addr)
+				+ params->size;
+	memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), params, sizeof(*params)
+				- sizeof(params->addr));
+	/* driver doesn't need to send address, so overwrite addr with data */
+	memcpy(msg->mailbox_data + sizeof(u32) + sizeof(*params)
+			- sizeof(params->addr),
+			params->addr, params->size);
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return 0;
+}
+
+void sst_post_message_mrfld(struct work_struct *work)
+{
+	struct ipc_post *msg;
+	union ipc_header_mrfld header;
+	unsigned long irq_flags;
+
+	pr_debug("Enter:%s\n", __func__);
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	/* check list */
+	if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
+		/* queue is empty, nothing to send */
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		pr_debug("Empty msg queue... NO Action\n");
+		return;
+	}
+
+	/* check busy bit */
+	header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+	if (header.p.header_high.part.busy) {
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		pr_debug("Busy not free... post later\n");
+		return;
+	}
+	/* copy msg from list */
+	msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
+			struct ipc_post, node);
+	list_del(&msg->node);
+	pr_debug("sst: size: = %x\n", msg->mrfld_header.p.header_low_payload);
+	if (msg->mrfld_header.p.header_high.part.large)
+		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+			    msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+
+	trace_sst_ipc("POST  ->", msg->mrfld_header.p.header_high.full,
+				  msg->mrfld_header.p.header_low_payload,
+				  msg->mrfld_header.p.header_high.part.drv_id);
+	trace_sst_ipc_mailbox(msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+	sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	pr_debug("sst: Post message: header = %x\n",
+					msg->mrfld_header.p.header_high.full);
+	kfree(msg->mailbox_data);
+	kfree(msg);
+	return;
+}
+
+/**
+* sst_post_message - Posts message to SST
+*
+* @work: Pointer to work structure
+*
+* This function is called by any component in driver which
+* wants to send an IPC message. This will post message only if
+* busy bit is free
+*/
+void sst_post_message_mfld(struct work_struct *work)
+{
+	struct ipc_post *msg;
+	union ipc_header header;
+	unsigned long irq_flags;
+
+	pr_debug("Enter:%s\n", __func__);
+
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	/* check list */
+	if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
+		/* queue is empty, nothing to send */
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		pr_debug("Empty msg queue... NO Action\n");
+		return;
+	}
+
+	/* check busy bit */
+	header.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx);
+	if (header.part.busy) {
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		pr_debug("Busy not free... Post later\n");
+		return;
+	}
+	/* copy msg from list */
+	msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
+			struct ipc_post, node);
+	list_del(&msg->node);
+	pr_debug("size: = %x\n", msg->header.part.data);
+	if (msg->header.part.large)
+		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+			msg->mailbox_data, msg->header.part.data);
+
+	sst_shim_write(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx, msg->header.full);
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	pr_debug("Posted message: header = %x\n", msg->header.full);
+
+	kfree(msg->mailbox_data);
+	kfree(msg);
+	return;
+}
+
+int sst_sync_post_message_mrfld(struct ipc_post *msg)
+{
+	union ipc_header_mrfld header;
+	unsigned int loop_count = 0;
+	int retval = 0;
+	unsigned long irq_flags;
+
+	pr_debug("Enter:%s\n", __func__);
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+
+	/* check busy bit */
+	header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+	while (header.p.header_high.part.busy) {
+		if (loop_count > 10) {
+			pr_err("sst: Busy wait failed, cant send this msg\n");
+			retval = -EBUSY;
+			goto out;
+		}
+		udelay(500);
+		loop_count++;
+		header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
+	}
+	pr_debug("sst: Post message: header = %x\n",
+					msg->mrfld_header.p.header_high.full);
+	pr_debug("sst: size = 0x%x\n", msg->mrfld_header.p.header_low_payload);
+	if (msg->mrfld_header.p.header_high.part.large)
+		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+			msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+
+	trace_sst_ipc("POST  ->", msg->mrfld_header.p.header_high.full,
+				  msg->mrfld_header.p.header_low_payload,
+				  msg->mrfld_header.p.header_high.part.drv_id);
+	trace_sst_ipc_mailbox(msg->mailbox_data, msg->mrfld_header.p.header_low_payload);
+	sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
+
+out:
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	kfree(msg->mailbox_data);
+	kfree(msg);
+	return retval;
+}
+
+/* use this for trigger ops to post syncronous msgs
+ */
+int sst_sync_post_message_mfld(struct ipc_post *msg)
+{
+	union ipc_header header;
+	unsigned int loop_count = 0;
+	int retval = 0;
+	unsigned long irq_flags;
+
+	pr_debug("Enter:%s\n", __func__);
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+
+	/* check busy bit */
+	header.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx);
+	while (header.part.busy) {
+		if (loop_count > 10) {
+			pr_err("busy wait failed, cant send this msg\n");
+			retval = -EBUSY;
+			goto out;
+		}
+		udelay(500);
+		loop_count++;
+		header.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx);
+	}
+	pr_debug("sst: Post message: header = %x\n", msg->header.full);
+	if (msg->header.part.large)
+		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
+			msg->mailbox_data, msg->header.part.data);
+	sst_shim_write(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcx, msg->header.full);
+
+out:
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	kfree(msg->mailbox_data);
+	kfree(msg);
+
+	return retval;
+}
+
+/*
+ * sst_clear_interrupt - clear the SST FW interrupt
+ *
+ * This function clears the interrupt register after the interrupt
+ * bottom half is complete allowing next interrupt to arrive
+ */
+void intel_sst_clear_intr_mfld(void)
+{
+	union interrupt_reg isr;
+	union interrupt_reg imr;
+	union ipc_header clear_ipc;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	imr.full = sst_shim_read(sst_drv_ctx->shim, SST_IMRX);
+	isr.full = sst_shim_read(sst_drv_ctx->shim, SST_ISRX);
+	/*  write 1 to clear  */;
+	isr.part.busy_interrupt = 1;
+	sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
+	/* Set IA done bit */
+	clear_ipc.full = sst_shim_read(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcd);
+	clear_ipc.part.busy = 0;
+	clear_ipc.part.done = 1;
+	clear_ipc.part.data = IPC_ACK_SUCCESS;
+	sst_shim_write(sst_drv_ctx->shim, sst_drv_ctx->ipc_reg.ipcd, clear_ipc.full);
+	/* un mask busy interrupt */
+	imr.part.busy_interrupt = 0;
+	imr.part.done_interrupt = 0;
+	sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+}
+
+
+void intel_sst_clear_intr_mrfld(void)
+{
+	union interrupt_reg_mrfld isr;
+	union interrupt_reg_mrfld imr;
+	union ipc_header_mrfld clear_ipc;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+	imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
+	isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
+
+	/*  write 1 to clear  */
+	isr.part.busy_interrupt = 1;
+	sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
+
+	/* Set IA done bit */
+	clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
+
+	clear_ipc.p.header_high.part.busy = 0;
+	clear_ipc.p.header_high.part.done = 1;
+	clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
+	sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
+	/* un mask busy interrupt */
+	imr.part.busy_interrupt = 0;
+	sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
+	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+}
+
+
+/*
+ * process_fw_init - process the FW init msg
+ *
+ * @msg: IPC message mailbox data from FW
+ *
+ * This function processes the FW init msg from FW
+ * marks FW state and prints debug info of loaded FW
+ */
+static void process_fw_init(void *msg)
+{
+	struct ipc_header_fw_init *init =
+		(struct ipc_header_fw_init *)msg;
+	int retval = 0;
+
+	pr_debug("*** FW Init msg came***\n");
+	if (init->result) {
+		sst_drv_ctx->sst_state =  SST_ERROR;
+		pr_debug("FW Init failed, Error %x\n", init->result);
+		pr_err("FW Init failed, Error %x\n", init->result);
+		retval = init->result;
+		goto ret;
+	}
+	pr_info("FW Version %02x.%02x.%02x.%02x\n",
+		init->fw_version.type, init->fw_version.major,
+		init->fw_version.minor, init->fw_version.build);
+	pr_info("Build date %s Time %s\n",
+			init->build_info.date, init->build_info.time);
+
+	/* If there any runtime parameter to set, send it */
+	if (sst_drv_ctx->runtime_param.param.addr)
+		sst_send_runtime_param(&(sst_drv_ctx->runtime_param.param));
+
+ret:
+	sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
+}
+/**
+* sst_process_message_mfld - Processes message from SST
+*
+* @work:	Pointer to work structure
+*
+* This function is scheduled by ISR
+* It take a msg from process_queue and does action based on msg
+*/
+void sst_process_message_mfld(struct ipc_post *msg)
+{
+	int str_id;
+	struct stream_info *stream;
+
+	str_id = msg->header.part.str_id;
+	pr_debug("IPC process for %x\n", msg->header.full);
+	/* based on msg in list call respective handler */
+	switch (msg->header.part.msg_id) {
+	case IPC_SST_PERIOD_ELAPSED:
+		if (sst_validate_strid(str_id)) {
+			pr_err("stream id %d invalid\n", str_id);
+			break;
+		}
+		stream = &sst_drv_ctx->streams[str_id];
+		if (stream->period_elapsed)
+			stream->period_elapsed(stream->pcm_substream);
+		break;
+	case IPC_SST_BUF_UNDER_RUN:
+	case IPC_SST_BUF_OVER_RUN:
+		if (sst_validate_strid(str_id)) {
+			pr_err("stream id %d invalid\n", str_id);
+			break;
+		}
+		pr_err("Buffer under/overrun for %d\n",
+				msg->header.part.str_id);
+		pr_err("Got Underrun & not to send data...ignore\n");
+		break;
+
+	case IPC_SST_FRAGMENT_ELPASED: {
+		pr_debug("IPC_SST_FRAGMENT_ELPASED for %d", str_id);
+		sst_cdev_fragment_elapsed(str_id);
+		break;
+	}
+
+	case IPC_IA_PRINT_STRING:
+		pr_debug("been asked to print something by fw\n");
+		/* TBD */
+		break;
+
+	case IPC_IA_FW_INIT_CMPLT: {
+		/* send next data to FW */
+		process_fw_init(msg->mailbox_data);
+		break;
+	}
+
+	case IPC_SST_STREAM_PROCESS_FATAL_ERR:
+		if (sst_validate_strid(str_id)) {
+			pr_err("stream id %d invalid\n", str_id);
+			break;
+		}
+		pr_err("codec fatal error %x stream %d...\n",
+				msg->header.full, msg->header.part.str_id);
+		pr_err("Dropping the stream\n");
+		sst_drop_stream(msg->header.part.str_id);
+		break;
+	default:
+		/* Illegal case */
+		pr_err("Unhandled msg %x header %x\n",
+		msg->header.part.msg_id, msg->header.full);
+	}
+	return;
+}
+
+/**
+* sst_process_message - Processes message from SST
+*
+* @work:	Pointer to work structure
+*
+* This function is scheduled by ISR
+* It take a msg from process_queue and does action based on msg
+*/
+
+void sst_process_message_mrfld(struct ipc_post *msg)
+{
+	int str_id;
+
+	str_id = msg->mrfld_header.p.header_high.part.drv_id;
+
+	pr_debug("IPC process message header %x payload %x\n",
+			msg->mrfld_header.p.header_high.full,
+			msg->mrfld_header.p.header_low_payload);
+
+	return;
+}
+
+#define VTSV_MAX_NUM_RESULTS 6
+#define VTSV_SIZE_PER_RESULT 7 /* 7 16 bit words */
+/* Max 6 results each of size 7 words + 1 num results word */
+#define VTSV_MAX_TOTAL_RESULT_SIZE \
+	(VTSV_MAX_NUM_RESULTS*VTSV_SIZE_PER_RESULT + 1)
+/* Each data word in the result is sent as a string in the format:
+DATAn=d, where n is the data word index varying from 0 to
+				VTSV_MAX_TOTAL_RESULT_SIZE-1
+d = string representation of data in decimal format;
+				unsigned 16bit data needs max 5 chars
+So total data string size = 4("DATA")+2("n")+1("=")
+				+5("d")+1(null)+5(reserved) = 18  */
+#define VTSV_DATA_STRING_SIZE 18
+
+static int send_vtsv_result_event(void *data, int size)
+{
+	char *envp[VTSV_MAX_TOTAL_RESULT_SIZE+3];
+	char res_size[30];
+	char ev_type[30];
+	char result[VTSV_MAX_TOTAL_RESULT_SIZE][VTSV_DATA_STRING_SIZE];
+	int offset = 0;
+	u16 *tmp;
+	int i;
+	int ret;
+
+	if (!data) {
+		pr_err("Data pointer Null into %s\n", __func__);
+		return -EINVAL;
+	}
+	size = size / (sizeof(u16)); /* Number of 16 bit data words*/
+	if (size > VTSV_MAX_TOTAL_RESULT_SIZE) {
+		pr_err("VTSV result size exceeds expected value, no uevent sent\n");
+		return -EINVAL;
+	}
+
+	snprintf(ev_type, sizeof(res_size), "EVENT_TYPE=SST_VTSV");
+	envp[offset++] = ev_type;
+	snprintf(res_size, sizeof(ev_type), "VTSV_RESULT_SIZE=%u", size);
+	envp[offset++] = res_size;
+	tmp = (u16 *)(data);
+	for (i = 0; i < size; i++) {
+		/* Driver assumes all data to be u16; The VTSV service
+		layer will type cast to u16 or s16 as appropriate for
+		a given data word*/
+		snprintf(result[i], VTSV_DATA_STRING_SIZE,
+				"DATA%u=%u", i, *tmp++);
+		envp[offset++] = result[i];
+	}
+	envp[offset] = NULL;
+	ret = kobject_uevent_env(&sst_drv_ctx->dev->kobj, KOBJ_CHANGE, envp);
+	if (ret)
+		pr_err("VTSV event send failed: ret = %d\n", ret);
+	return ret;
+}
+
+static void process_fw_async_msg(struct ipc_post *msg)
+{
+	u32 msg_id;
+	int str_id;
+	int res_size, ret;
+	u32 data_size, i;
+	void *data_offset;
+	struct stream_info *stream;
+	union ipc_header_high msg_high;
+	u32 msg_low, pipe_id;
+
+	msg_high = msg->mrfld_header.p.header_high;
+	msg_low = msg->mrfld_header.p.header_low_payload;
+	msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
+	data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
+	data_size =  msg_low - (sizeof(struct ipc_dsp_hdr));
+
+	switch (msg_id) {
+	case IPC_SST_PERIOD_ELAPSED_MRFLD:
+		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+		str_id = get_stream_id_mrfld(pipe_id);
+		if (str_id > 0) {
+			pr_debug("Period elapsed rcvd for pipe id 0x%x\n", pipe_id);
+			stream = &sst_drv_ctx->streams[str_id];
+			if (stream->period_elapsed)
+				stream->period_elapsed(stream->pcm_substream);
+			if (stream->compr_cb)
+				stream->compr_cb(stream->compr_cb_param);
+		}
+		break;
+
+	case IPC_IA_DRAIN_STREAM_MRFLD:
+		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+		str_id = get_stream_id_mrfld(pipe_id);
+		if (str_id > 0) {
+			stream = &sst_drv_ctx->streams[str_id];
+			if (stream->drain_notify)
+				stream->drain_notify(stream->drain_cb_param);
+		}
+		break;
+
+	case IPC_IA_FW_ASYNC_ERR_MRFLD:
+		pr_err("FW sent async error msg:\n");
+		for (i = 0; i < (data_size/4); i++)
+			pr_err("0x%x\n", (*((unsigned int *)data_offset + i)));
+		break;
+
+	case IPC_IA_VTSV_DETECTED:
+		res_size = data_size;
+		ret = send_vtsv_result_event(data_offset, res_size);
+		if (ret)
+			pr_err("VTSV uevent send failed: %d\n", ret);
+		else
+			pr_debug("VTSV uevent sent\n");
+		break;
+
+	case IPC_IA_FW_INIT_CMPLT_MRFLD:
+		process_fw_init(data_offset);
+		break;
+
+	case IPC_IA_BUF_UNDER_RUN_MRFLD:
+		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
+		str_id = get_stream_id_mrfld(pipe_id);
+		if (str_id > 0)
+			pr_err("Buffer under-run for pipe:%#x str_id:%d\n",
+					pipe_id, str_id);
+		break;
+
+	default:
+		pr_err("Unrecognized async msg from FW msg_id %#x\n", msg_id);
+	}
+}
+
+void sst_process_reply_mrfld(struct ipc_post *msg)
+{
+	unsigned int drv_id;
+	void *data;
+	union ipc_header_high msg_high;
+	u32 msg_low;
+
+	msg_high = msg->mrfld_header.p.header_high;
+	msg_low = msg->mrfld_header.p.header_low_payload;
+
+	pr_debug("IPC process message header %x payload %x\n",
+			msg->mrfld_header.p.header_high.full,
+			msg->mrfld_header.p.header_low_payload);
+
+	drv_id = msg_high.part.drv_id;
+
+	/* Check for async messages */
+	if (drv_id == SST_ASYNC_DRV_ID) {
+		/* FW sent async large message */
+		process_fw_async_msg(msg);
+		goto end;
+	}
+
+	/* FW sent short error response for an IPC */
+	if (msg_high.part.result && drv_id && !msg_high.part.large) {
+		/* 32-bit FW error code in msg_low */
+		pr_err("FW sent error response 0x%x", msg_low);
+		sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+			msg_high.part.drv_id,
+			msg_high.part.msg_id, NULL, 0);
+		goto end;
+	}
+
+	/* Process all valid responses */
+	/* if it is a large message, the payload contains the size to
+	 * copy from mailbox */
+	if (msg_high.part.large) {
+		data = kzalloc(msg_low, GFP_KERNEL);
+		if (!data)
+			goto end;
+		memcpy(data, (void *) msg->mailbox_data, msg_low);
+		if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+				msg_high.part.drv_id,
+				msg_high.part.msg_id, data, msg_low))
+			kfree(data);
+	} else {
+		sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
+				msg_high.part.drv_id,
+				msg_high.part.msg_id, NULL, 0);
+	}
+
+end:
+	return;
+}
+
+/**
+* sst_process_reply - Processes reply message from SST
+*
+* @work:	Pointer to work structure
+*
+* This function is scheduled by ISR
+* It take a reply msg from response_queue and
+* does action based on msg
+*/
+void sst_process_reply_mfld(struct ipc_post *msg)
+{
+	void *data;
+	int str_id;
+	struct stream_info *stream;
+
+
+	str_id = msg->header.part.str_id;
+
+	pr_debug("sst: IPC process reply for %x\n", msg->header.full);
+
+	/* handle drain notify first */
+	if (msg->header.part.msg_id == IPC_IA_DRAIN_STREAM) {
+		pr_debug("drain message notify\n");
+		if (str_id > 0) {
+			stream = &sst_drv_ctx->streams[str_id];
+			if (stream->drain_notify)
+				stream->drain_notify(stream->drain_cb_param);
+		}
+		return;
+	}
+
+
+	if (!msg->header.part.large) {
+		if (!msg->header.part.data)
+			pr_debug("Success\n");
+		else
+			pr_err("Error from firmware: %d\n", msg->header.part.data);
+		sst_wake_up_block(sst_drv_ctx, msg->header.part.data,
+				str_id, msg->header.part.msg_id, NULL, 0);
+	} else {
+		pr_debug("Allocating %d\n", msg->header.part.data);
+		data = kzalloc(msg->header.part.data, GFP_KERNEL);
+		if (!data) {
+			pr_err("sst: mem alloc failed\n");
+			return;
+		}
+
+		memcpy(data, (void *)msg->mailbox_data, msg->header.part.data);
+		if (sst_wake_up_block(sst_drv_ctx, 0, str_id,
+				msg->header.part.msg_id, data,
+				msg->header.part.data))
+			kfree(data);
+	}
+	return;
+}
diff --git a/sound/soc/intel/sst/sst_pvt.c b/sound/soc/intel/sst/sst_pvt.c
new file mode 100644
index 0000000..7638cb8
--- /dev/null
+++ b/sound/soc/intel/sst/sst_pvt.c
@@ -0,0 +1,577 @@
+/*
+ *  sst_pvt.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10	Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file contains all private functions
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kobject.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <sound/asound.h>
+#include <sound/pcm.h>
+#include <sound/compress_offload.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+
+#define SST_EXCE_DUMP_BASE	0xFFFF2c00
+#define SST_EXCE_DUMP_WORD	4
+#define SST_EXCE_DUMP_LEN	32
+#define SST_EXCE_DUMP_SIZE	((SST_EXCE_DUMP_LEN)*(SST_EXCE_DUMP_WORD))
+#define SST_EXCE_DUMP_OFFSET	0xA00
+/*
+ * sst_wait_interruptible - wait on event
+ *
+ * @sst_drv_ctx: Driver context
+ * @block: Driver block to wait on
+ *
+ * This function waits without a timeout (and is interruptable) for a
+ * given block event
+ */
+int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
+				struct sst_block *block)
+{
+	int retval = 0;
+
+	if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
+				block->condition)) {
+		/* event wake */
+		if (block->ret_code < 0) {
+			pr_err("stream failed %d\n", block->ret_code);
+			retval = -EBUSY;
+		} else {
+			pr_debug("event up\n");
+			retval = 0;
+		}
+	} else {
+		pr_err("signal interrupted\n");
+		retval = -EINTR;
+	}
+	return retval;
+
+}
+
+unsigned long long read_shim_data(struct intel_sst_drv *sst, int addr)
+{
+	unsigned long long val = 0;
+
+	switch (sst->pci_id) {
+	case SST_CLV_PCI_ID:
+		val = sst_shim_read(sst->shim, addr);
+		break;
+	case SST_MRFLD_PCI_ID:
+	case SST_BYT_PCI_ID:
+		val = sst_shim_read64(sst->shim, addr);
+		break;
+	}
+	return val;
+}
+
+void write_shim_data(struct intel_sst_drv *sst, int addr,
+				unsigned long long data)
+{
+	switch (sst->pci_id) {
+	case SST_CLV_PCI_ID:
+		sst_shim_write(sst->shim, addr, (u32) data);
+		break;
+	case SST_MRFLD_PCI_ID:
+	case SST_BYT_PCI_ID:
+		sst_shim_write64(sst->shim, addr, (u64) data);
+		break;
+	}
+}
+
+
+void dump_sst_shim(struct intel_sst_drv *sst)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+	pr_err("audio shim registers:\n"
+		"CSR: %.8llx\n"
+		"PISR: %.8llx\n"
+		"PIMR: %.8llx\n"
+		"ISRX: %.8llx\n"
+		"ISRD: %.8llx\n"
+		"IMRX: %.8llx\n"
+		"IMRD: %.8llx\n"
+		"IPCX: %.8llx\n"
+		"IPCD: %.8llx\n"
+		"ISRSC: %.8llx\n"
+		"ISRLPESC: %.8llx\n"
+		"IMRSC: %.8llx\n"
+		"IMRLPESC: %.8llx\n"
+		"IPCSC: %.8llx\n"
+		"IPCLPESC: %.8llx\n"
+		"CLKCTL: %.8llx\n"
+		"CSR2: %.8llx\n",
+		read_shim_data(sst, SST_CSR),
+		read_shim_data(sst, SST_PISR),
+		read_shim_data(sst, SST_PIMR),
+		read_shim_data(sst, SST_ISRX),
+		read_shim_data(sst, SST_ISRD),
+		read_shim_data(sst, SST_IMRX),
+		read_shim_data(sst, SST_IMRD),
+		read_shim_data(sst, sst->ipc_reg.ipcx),
+		read_shim_data(sst, sst->ipc_reg.ipcd),
+		read_shim_data(sst, SST_ISRSC),
+		read_shim_data(sst, SST_ISRLPESC),
+		read_shim_data(sst, SST_IMRSC),
+		read_shim_data(sst, SST_IMRLPESC),
+		read_shim_data(sst, SST_IPCSC),
+		read_shim_data(sst, SST_IPCLPESC),
+		read_shim_data(sst, SST_CLKCTL),
+		read_shim_data(sst, SST_CSR2));
+	spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+}
+
+void reset_sst_shim(struct intel_sst_drv *sst)
+{
+	union config_status_reg_mrfld csr;
+
+	pr_err("Resetting few Shim registers\n");
+	write_shim_data(sst, sst->ipc_reg.ipcx, 0x0);
+	write_shim_data(sst, sst->ipc_reg.ipcd, 0x0);
+	write_shim_data(sst, SST_ISRX, 0x0);
+	write_shim_data(sst, SST_ISRD, 0x0);
+	write_shim_data(sst, SST_IPCSC, 0x0);
+	write_shim_data(sst, SST_IPCLPESC, 0x0);
+	write_shim_data(sst, SST_ISRSC, 0x0);
+	write_shim_data(sst, SST_ISRLPESC, 0x0);
+	write_shim_data(sst, SST_PISR, 0x0);
+
+	/* Reset the CSR value to the default value. i.e 0x1e40001*/
+	csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR);
+	csr.part.xt_snoop = 0;
+	csr.full &= ~(0xf);
+	csr.full |= 0x01;
+	sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full);
+}
+
+static void dump_sst_crash_area(void)
+{
+	void __iomem *fw_dump_area;
+	u32 dump_word;
+	u8 i;
+
+	/* dump the firmware SRAM where the exception details are stored */
+	fw_dump_area = ioremap_nocache(SST_EXCE_DUMP_BASE, SST_EXCE_DUMP_SIZE);
+
+	pr_err("Firmware exception dump begins:\n");
+	pr_err("Exception start signature:%#x\n", readl(fw_dump_area + SST_EXCE_DUMP_WORD));
+	pr_err("EXCCAUSE:\t\t\t%#x\n", readl(fw_dump_area + SST_EXCE_DUMP_WORD*2));
+	pr_err("EXCVADDR:\t\t\t%#x\n", readl(fw_dump_area + (SST_EXCE_DUMP_WORD*3)));
+	pr_err("Firmware additional data:\n");
+
+	/* dump remaining FW debug data */
+	for (i = 1; i < (SST_EXCE_DUMP_LEN-4+1); i++) {
+		dump_word = readl(fw_dump_area + (SST_EXCE_DUMP_WORD*3)
+						+ (i*SST_EXCE_DUMP_WORD));
+		pr_err("Data[%d]=%#x\n", i, dump_word);
+	}
+	iounmap(fw_dump_area);
+	pr_err("Firmware exception dump ends\n");
+}
+
+/**
+ * dump_ram_area - dumps the iram/dram into a local buff
+ *
+ * @sst			: pointer to driver context
+ * @recovery		: pointer to the struct containing buffers
+ * @iram		: true if iram dump else false
+ * This function dumps the iram dram data into the respective buffers
+ */
+static void dump_ram_area(struct intel_sst_drv *sst,
+			struct sst_dump_buf *dump_buf, enum sst_ram_type type)
+{
+	if (type == SST_IRAM) {
+		pr_err("Iram dumped in buffer\n");
+		memcpy_fromio(dump_buf->iram_buf.buf, sst->iram,
+				dump_buf->iram_buf.size);
+	} else {
+		pr_err("Dram dumped in buffer\n");
+		memcpy_fromio(dump_buf->dram_buf.buf, sst->dram,
+				dump_buf->dram_buf.size);
+	}
+}
+
+/*FIXME Disabling IRAM/DRAM dump for timeout issues */
+static void sst_stream_recovery(struct intel_sst_drv *sst)
+{
+	struct stream_info *str_info;
+	u8 i;
+	for (i = 1; i <= sst->info.max_streams; i++) {
+		pr_err("Audio: Stream %d, state %d\n", i, sst->streams[i].status);
+		if (sst->streams[i].status != STREAM_UN_INIT) {
+			str_info = &sst_drv_ctx->streams[i];
+			if (str_info->pcm_substream)
+				snd_pcm_stop(str_info->pcm_substream, SNDRV_PCM_STATE_SETUP);
+			else if (str_info->compr_cb_param)
+				snd_compr_stop(str_info->compr_cb_param);
+			sst->streams[i].status = STREAM_RESET;
+		}
+	}
+}
+
+static void sst_dump_ipc_dispatch_lists(struct intel_sst_drv *sst)
+{
+	struct ipc_post *m, *_m;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
+	if (list_empty(&sst->ipc_dispatch_list))
+		pr_err("ipc dispatch list is Empty\n");
+
+	list_for_each_entry_safe(m, _m, &sst->ipc_dispatch_list, node) {
+		pr_err("ipc-dispatch:pending msg header %#x\n", m->header.full);
+		list_del(&m->node);
+		kfree(m->mailbox_data);
+		kfree(m);
+	}
+	spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
+}
+
+static void sst_dump_rx_lists(struct intel_sst_drv *sst)
+{
+	struct ipc_post *m, *_m;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&sst->rx_msg_lock, irq_flags);
+	if (list_empty(&sst->rx_list))
+		pr_err("rx msg list is empty\n");
+
+	list_for_each_entry_safe(m, _m, &sst->rx_list, node) {
+		pr_err("rx: pending msg header %#x\n", m->header.full);
+		list_del(&m->node);
+		kfree(m->mailbox_data);
+		kfree(m);
+	}
+	spin_unlock_irqrestore(&sst->rx_msg_lock, irq_flags);
+}
+
+/* num_dwords: should be multiple of 4 */
+static void dump_buffer_fromio(void __iomem *from,
+				     unsigned int num_dwords)
+{
+	int i;
+	u32 val[4];
+
+	if (num_dwords % 4) {
+		pr_err("%s: num_dwords %d not multiple of 4\n",
+				__func__, num_dwords);
+		return;
+	}
+
+	pr_err("****** Start *******\n");
+	pr_err("Dump %d dwords, from location %p\n", num_dwords, from);
+
+	for (i = 0; i < num_dwords; ) {
+		val[0] = ioread32(from + (i++ * 4));
+		val[1] = ioread32(from + (i++ * 4));
+		val[2] = ioread32(from + (i++ * 4));
+		val[3] = ioread32(from + (i++ * 4));
+		pr_err("%.8x %.8x %.8x %.8x\n", val[0], val[1], val[2], val[3]);
+	}
+	pr_err("****** End *********\n\n\n");
+}
+
+static void sst_stall_lpe_n_wait(struct intel_sst_drv *sst)
+{
+	union config_status_reg_mrfld csr;
+	void __iomem *dma_reg0 = sst->debugfs.dma_reg[0];
+	void __iomem *dma_reg1 = sst->debugfs.dma_reg[1];
+	int offset = 0x3A0; /* ChEnReg of DMA */
+
+
+	pr_err("Before stall: DMA_0 Ch_EN %#llx DMA_1 Ch_EN %#llx\n",
+				sst_reg_read64(dma_reg0, offset),
+				sst_reg_read64(dma_reg1, offset));
+
+	/* Stall LPE */
+	csr.full = sst_shim_read64(sst->shim, SST_CSR);
+	csr.part.runstall  = 1;
+	sst_shim_write64(sst->shim, SST_CSR, csr.full);
+
+	/* A 5ms delay, before resetting the LPE */
+	usleep_range(5000, 5100);
+
+	pr_err("After stall: DMA_0 Ch_EN %#llx DMA_1 Ch_EN %#llx\n",
+				sst_reg_read64(dma_reg0, offset),
+				sst_reg_read64(dma_reg1, offset));
+}
+
+#if IS_ENABLED(CONFIG_INTEL_SCU_IPC)
+static void sst_send_scu_reset_ipc(struct intel_sst_drv *sst)
+{
+	int ret = 0;
+
+	/* Reset and power gate the LPE */
+	ret = intel_scu_ipc_simple_command(IPC_SCU_LPE_RESET, 0);
+	if (ret) {
+		pr_err("Power gating LPE failed %d\n", ret);
+		reset_sst_shim(sst);
+	} else {
+		pr_err("LPE reset via SCU is success!!\n");
+		pr_err("dump after LPE power cycle\n");
+		dump_sst_shim(sst);
+
+		/* Mask the DMA & SSP interrupts */
+		sst_shim_write64(sst->shim, SST_IMRX, 0xFFFF0038);
+	}
+}
+#else
+static void sst_send_scu_reset_ipc(struct intel_sst_drv *sst)
+{
+	pr_debug("%s: do nothing, just return\n", __func__);
+}
+#endif
+
+#define SRAM_OFFSET_MRFLD	0xc00
+#define NUM_DWORDS		256
+void sst_do_recovery_mrfld(struct intel_sst_drv *sst)
+{
+	char iram_event[30], dram_event[30], ddr_imr_event[65], event_type[30];
+	char *envp[5];
+	int env_offset = 0;
+
+	/*
+	 * setting firmware state as uninit so that the firmware will get
+	 * redownloaded on next request.This is because firmare not responding
+	 * for 1 sec is equalant to some unrecoverable error of FW.
+	 */
+	pr_err("Audio: Intel SST engine encountered an unrecoverable error\n");
+	pr_err("Audio: trying to reset the dsp now\n");
+
+	mutex_lock(&sst->sst_lock);
+	sst->sst_state = SST_UN_INIT;
+	sst_stream_recovery(sst);
+	mutex_unlock(&sst->sst_lock);
+
+	dump_stack();
+	dump_sst_shim(sst);
+
+	sst_stall_lpe_n_wait(sst);
+
+	/* dump mailbox and sram */
+	pr_err("Dumping Mailbox...\n");
+	dump_buffer_fromio(sst->mailbox, NUM_DWORDS);
+	pr_err("Dumping SRAM...\n");
+	dump_buffer_fromio(sst->mailbox + SRAM_OFFSET_MRFLD, NUM_DWORDS);
+
+	if (sst_drv_ctx->ops->set_bypass) {
+
+		sst_drv_ctx->ops->set_bypass(true);
+		dump_ram_area(sst, &(sst->dump_buf), SST_IRAM);
+		dump_ram_area(sst, &(sst->dump_buf), SST_DRAM);
+		sst_drv_ctx->ops->set_bypass(false);
+
+	}
+
+	snprintf(event_type, sizeof(event_type), "EVENT_TYPE=SST_RECOVERY");
+	envp[env_offset++] = event_type;
+	snprintf(iram_event, sizeof(iram_event), "IRAM_DUMP_SIZE=%d",
+					sst->dump_buf.iram_buf.size);
+	envp[env_offset++] = iram_event;
+	snprintf(dram_event, sizeof(dram_event), "DRAM_DUMP_SIZE=%d",
+					sst->dump_buf.dram_buf.size);
+	envp[env_offset++] = dram_event;
+
+	if (sst->ddr != NULL) {
+		snprintf(ddr_imr_event, sizeof(ddr_imr_event),
+		"DDR_IMR_DUMP_SIZE=%d DDR_IMR_ADDRESS=%p", (sst->ddr_end - sst->ddr_base), sst->ddr);
+		envp[env_offset++] = ddr_imr_event;
+	}
+	envp[env_offset] = NULL;
+	kobject_uevent_env(&sst->dev->kobj, KOBJ_CHANGE, envp);
+	pr_err("Recovery Uevent Sent!!\n");
+
+	/* Send IPC to SCU to power gate and reset the LPE */
+	sst_send_scu_reset_ipc(sst);
+
+	pr_err("reset the pvt id from val %d\n", sst_drv_ctx->pvt_id);
+	spin_lock(&sst_drv_ctx->pvt_id_lock);
+	sst_drv_ctx->pvt_id = 0;
+	spin_unlock(&sst_drv_ctx->pvt_id_lock);
+	sst_dump_ipc_dispatch_lists(sst_drv_ctx);
+	sst_dump_rx_lists(sst_drv_ctx);
+
+	if (sst_drv_ctx->fw_in_mem) {
+		pr_err("Clearing the cached FW copy...\n");
+		kfree(sst_drv_ctx->fw_in_mem);
+		sst_drv_ctx->fw_in_mem = NULL;
+	}
+}
+
+void sst_do_recovery(struct intel_sst_drv *sst)
+{
+	pr_err("Audio: Intel SST engine encountered an unrecoverable error\n");
+
+	dump_stack();
+	dump_sst_shim(sst);
+
+	if (sst->sst_state == SST_FW_RUNNING &&
+		sst_drv_ctx->pci_id == SST_CLV_PCI_ID)
+		dump_sst_crash_area();
+
+	sst_dump_ipc_dispatch_lists(sst_drv_ctx);
+
+}
+
+/*
+ * sst_wait_timeout - wait on event for timeout
+ *
+ * @sst_drv_ctx: Driver context
+ * @block: Driver block to wait on
+ *
+ * This function waits with a timeout value (and is not interruptible) on a
+ * given block event
+ */
+int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block)
+{
+	int retval = 0;
+
+	/* NOTE:
+	Observed that FW processes the alloc msg and replies even
+	before the alloc thread has finished execution */
+	pr_debug("sst: waiting for condition %x ipc %d drv_id %d\n",
+		       block->condition, block->msg_id, block->drv_id);
+	if (wait_event_timeout(sst_drv_ctx->wait_queue,
+				block->condition,
+				msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
+		/* event wake */
+		pr_debug("sst: Event wake %x\n", block->condition);
+		pr_debug("sst: message ret: %d\n", block->ret_code);
+		retval = -block->ret_code;
+	} else {
+		block->on = false;
+		pr_err("sst: Wait timed-out condition:%#x, msg_id:%#x fw_state %#x\n",
+				block->condition, block->msg_id, sst_drv_ctx->sst_state);
+
+		if (sst_drv_ctx->sst_state == SST_FW_LOADED ||
+			sst_drv_ctx->sst_state ==  SST_START_INIT) {
+			pr_err("Can't recover as timedout while downloading the FW\n");
+			pr_err("reseting fw state to unint from %d ...\n", sst_drv_ctx->sst_state);
+			sst_drv_ctx->sst_state = SST_UN_INIT;
+
+			dump_sst_shim(sst_drv_ctx);
+
+			/* Reset & Power Off the LPE only for MRFLD */
+			if (sst_drv_ctx->pci_id == SST_MRFLD_PCI_ID) {
+				sst_stall_lpe_n_wait(sst_drv_ctx);
+
+				/* Send IPC to SCU to power gate and reset the LPE */
+				sst_send_scu_reset_ipc(sst_drv_ctx);
+			}
+
+		} else {
+			if (sst_drv_ctx->ops->do_recovery)
+				sst_drv_ctx->ops->do_recovery(sst_drv_ctx);
+		}
+
+		retval = -EBUSY;
+	}
+	return retval;
+}
+
+/*
+ * sst_create_ipc_msg - create a IPC message
+ *
+ * @arg: ipc message
+ * @large: large or short message
+ *
+ * this function allocates structures to send a large or short
+ * message to the firmware
+ */
+int sst_create_ipc_msg(struct ipc_post **arg, bool large)
+{
+	struct ipc_post *msg;
+
+	msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
+	if (!msg) {
+		pr_err("kzalloc ipc msg failed\n");
+		return -ENOMEM;
+	}
+	if (large) {
+		msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
+		if (!msg->mailbox_data) {
+			kfree(msg);
+			pr_err("kzalloc mailbox_data failed");
+			return -ENOMEM;
+		}
+	} else {
+		msg->mailbox_data = NULL;
+	}
+	msg->is_large = large;
+	*arg = msg;
+	return 0;
+}
+
+/*
+ * sst_create_block_and_ipc_msg - Creates IPC message and sst block
+ * @arg: passed to sst_create_ipc_message API
+ * @large: large or short message
+ * @sst_drv_ctx: sst driver context
+ * @block: return block allocated
+ * @msg_id: IPC
+ * @drv_id: stream id or private id
+ */
+int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
+		struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
+		u32 msg_id, u32 drv_id)
+{
+	int retval = 0;
+	retval = sst_create_ipc_msg(arg, large);
+	if (retval)
+		return retval;
+	*block = sst_create_block(sst_drv_ctx, msg_id, drv_id);
+	if (*block == NULL) {
+		kfree(*arg);
+		return -ENOMEM;
+	}
+	return retval;
+}
+
+/*
+ * sst_clean_stream - clean the stream context
+ *
+ * @stream: stream structure
+ *
+ * this function resets the stream contexts
+ * should be called in free
+ */
+void sst_clean_stream(struct stream_info *stream)
+{
+	stream->status = STREAM_UN_INIT;
+	stream->prev = STREAM_UN_INIT;
+	mutex_lock(&stream->lock);
+	stream->cumm_bytes = 0;
+	mutex_unlock(&stream->lock);
+}
+
diff --git a/sound/soc/intel/sst/sst_stream.c b/sound/soc/intel/sst/sst_stream.c
new file mode 100644
index 0000000..9e71b44
--- /dev/null
+++ b/sound/soc/intel/sst/sst_stream.c
@@ -0,0 +1,831 @@
+/*
+ *  sst_stream.c - Intel SST Driver for audio engine
+ *
+ *  Copyright (C) 2008-10 Intel Corp
+ *  Authors:	Vinod Koul <vinod.koul@intel.com>
+ *		Harsha Priya <priya.harsha@intel.com>
+ *		Dharageswari R <dharageswari.r@intel.com>
+ *		KP Jeeja <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This file contains the stream operations of SST driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <asm/platform_sst_audio.h>
+#include "../sst_platform.h"
+#include "../platform_ipc_v2.h"
+#include "sst.h"
+#include "sst_trace.h"
+
+/**
+ * sst_alloc_stream - Send msg for a new stream ID
+ *
+ * @params:	stream params
+ * @stream_ops:	operation of stream PB/capture
+ * @codec:	codec for stream
+ * @device:	device stream to be allocated for
+ *
+ * This function is called by any function which wants to start
+ * a new stream. This also check if a stream exists which is idle
+ * it initializes idle stream id to this request
+ */
+int sst_alloc_stream_ctp(char *params, struct sst_block *block)
+{
+	struct ipc_post *msg = NULL;
+	struct snd_sst_alloc_params alloc_param;
+	unsigned int pcm_slot = 0x03, num_ch;
+	int str_id;
+	struct snd_sst_params *str_params;
+	struct snd_sst_stream_params *sparams;
+	struct snd_sst_alloc_params_ext *aparams;
+	struct stream_info *str_info;
+	unsigned int stream_ops, device;
+	u8 codec;
+
+	pr_debug("In %s\n", __func__);
+
+	BUG_ON(!params);
+	str_params = (struct snd_sst_params *)params;
+	stream_ops = str_params->ops;
+	codec = str_params->codec;
+	device = str_params->device_type;
+	sparams = &str_params->sparams;
+	aparams = &str_params->aparams;
+	num_ch = sst_get_num_channel(str_params);
+
+	pr_debug("period_size = %d\n", aparams->frag_size);
+	pr_debug("ring_buf_addr = 0x%x\n", aparams->ring_buf_info[0].addr);
+	pr_debug("ring_buf_size = %d\n", aparams->ring_buf_info[0].size);
+	pr_debug("In alloc device_type=%d\n", str_params->device_type);
+	pr_debug("In alloc sg_count =%d\n", aparams->sg_count);
+
+	str_id = str_params->stream_id;
+	if (str_id <= 0)
+		return -EBUSY;
+
+	/*allocate device type context*/
+	sst_init_stream(&sst_drv_ctx->streams[str_id], codec,
+			str_id, stream_ops, pcm_slot);
+	/* send msg to FW to allocate a stream */
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+
+	alloc_param.str_type.codec_type = codec;
+	alloc_param.str_type.str_type = str_params->stream_type;
+	alloc_param.str_type.operation = stream_ops;
+	alloc_param.str_type.protected_str = 0; /* non drm */
+	alloc_param.str_type.time_slots = pcm_slot;
+	alloc_param.str_type.reserved = 0;
+	alloc_param.str_type.result = 0;
+	memcpy(&alloc_param.stream_params, sparams,
+			sizeof(struct snd_sst_stream_params));
+	memcpy(&alloc_param.alloc_params, aparams,
+			sizeof(struct snd_sst_alloc_params_ext));
+	block->drv_id = str_id;
+	block->msg_id = IPC_IA_ALLOC_STREAM;
+	sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, str_id);
+	msg->header.part.data = sizeof(alloc_param) + sizeof(u32);
+	memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), &alloc_param,
+			sizeof(alloc_param));
+	str_info = &sst_drv_ctx->streams[str_id];
+	str_info->num_ch = num_ch;
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return str_id;
+}
+
+int sst_alloc_stream_mrfld(char *params, struct sst_block *block)
+{
+	struct ipc_post *msg = NULL;
+	struct snd_sst_alloc_mrfld alloc_param;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct snd_sst_params *str_params;
+	struct snd_sst_tstamp fw_tstamp;
+	unsigned int str_id, pipe_id, pvt_id, task_id;
+	u32 len = 0;
+	struct stream_info *str_info;
+	int i, num_ch;
+
+	pr_debug("In %s\n", __func__);
+	BUG_ON(!params);
+
+	str_params = (struct snd_sst_params *)params;
+	memset(&alloc_param, 0, sizeof(alloc_param));
+	alloc_param.operation = str_params->ops;
+	alloc_param.codec_type = str_params->codec;
+	alloc_param.sg_count = str_params->aparams.sg_count;
+	alloc_param.ring_buf_info[0].addr = str_params->aparams.ring_buf_info[0].addr;
+	alloc_param.ring_buf_info[0].size = str_params->aparams.ring_buf_info[0].size;
+	alloc_param.frag_size = str_params->aparams.frag_size;
+
+	memcpy(&alloc_param.codec_params, &str_params->sparams,
+			sizeof(struct snd_sst_stream_params));
+
+	/* fill channel map params for multichannel support.
+	 * Ideally channel map should be received from upper layers
+	 * for multichannel support.
+	 * Currently hardcoding as per FW reqm.
+	 */
+	num_ch = sst_get_num_channel(str_params);
+	for (i = 0; i < 8; i++) {
+		if (i < num_ch)
+			alloc_param.codec_params.uc.pcm_params.channel_map[i] = i;
+		else
+			alloc_param.codec_params.uc.pcm_params.channel_map[i] = 0xFF;
+	}
+
+	str_id = str_params->stream_id;
+	pipe_id = str_params->device_type;
+	task_id = str_params->task;
+	sst_drv_ctx->streams[str_id].pipe_id = pipe_id;
+	sst_drv_ctx->streams[str_id].task_id = task_id;
+	sst_drv_ctx->streams[str_id].num_ch = num_ch;
+
+	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+	if (sst_drv_ctx->info.lpe_viewpt_rqd)
+		alloc_param.ts = sst_drv_ctx->info.mailbox_start +
+			sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp));
+	else
+		alloc_param.ts = sst_drv_ctx->mailbox_add +
+			sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp));
+
+	pr_debug("alloc tstamp location = 0x%x\n", alloc_param.ts);
+	pr_debug("assigned pipe id 0x%x to task %d\n", pipe_id, task_id);
+
+	/*allocate device type context*/
+	sst_init_stream(&sst_drv_ctx->streams[str_id], alloc_param.codec_type,
+			str_id, alloc_param.operation, 0);
+	/* send msg to FW to allocate a stream */
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+
+	block->drv_id = pvt_id;
+	block->msg_id = IPC_CMD;
+
+	sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+			      task_id, 1, pvt_id);
+	pr_debug("header:%x\n", msg->mrfld_header.p.header_high.full);
+	msg->mrfld_header.p.header_high.part.res_rqd = 1;
+
+	len = msg->mrfld_header.p.header_low_payload = sizeof(alloc_param) + sizeof(dsp_hdr);
+	sst_fill_header_dsp(&dsp_hdr, IPC_IA_ALLOC_STREAM_MRFLD, pipe_id, sizeof(alloc_param));
+	memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+	memcpy(msg->mailbox_data + sizeof(dsp_hdr), &alloc_param,
+			sizeof(alloc_param));
+	trace_sst_stream("ALLOC ->", str_id, pipe_id);
+	str_info = &sst_drv_ctx->streams[str_id];
+	pr_debug("header:%x\n", msg->mrfld_header.p.header_high.full);
+	pr_debug("response rqd: %x", msg->mrfld_header.p.header_high.part.res_rqd);
+	pr_debug("calling post_message\n");
+	pr_info("Alloc for str %d pipe %#x\n", str_id, pipe_id);
+
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	return str_id;
+}
+
+/**
+* sst_stream_stream - Send msg for a pausing stream
+* @str_id:	 stream ID
+*
+* This function is called by any function which wants to start
+* a stream.
+*/
+int sst_start_stream(int str_id)
+{
+	int retval = 0, pvt_id;
+	u32 len = 0;
+	struct ipc_post *msg = NULL;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct stream_info *str_info;
+
+	pr_debug("sst_start_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	if (str_info->status != STREAM_RUNNING)
+		return -EBADRQC;
+
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+
+	if (!sst_drv_ctx->use_32bit_ops) {
+		pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+		pr_debug("pvt_id = %d, pipe id = %d, task = %d\n",
+			 pvt_id, str_info->pipe_id, str_info->task_id);
+		sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+				      str_info->task_id, 1, pvt_id);
+
+		len = sizeof(u16) + sizeof(dsp_hdr);
+		msg->mrfld_header.p.header_low_payload = len;
+		sst_fill_header_dsp(&dsp_hdr, IPC_IA_START_STREAM_MRFLD,
+				str_info->pipe_id, sizeof(u16));
+		memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+		memset(msg->mailbox_data + sizeof(dsp_hdr), 0, sizeof(u16));
+		trace_sst_stream("START ->", str_id, str_info->pipe_id);
+		pr_info("Start for str %d pipe %#x\n", str_id, str_info->pipe_id);
+
+	} else {
+		pr_debug("fill START_STREAM for CTP\n");
+		sst_fill_header(&msg->header, IPC_IA_START_STREAM, 1, str_id);
+		msg->header.part.data =  sizeof(u32) + sizeof(u32);
+		memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
+		memset(msg->mailbox_data + sizeof(u32), 0, sizeof(u32));
+	}
+	sst_drv_ctx->ops->sync_post_message(msg);
+	return retval;
+}
+
+int sst_send_byte_stream_mrfld(void *sbytes)
+{
+	struct ipc_post *msg = NULL;
+	struct snd_sst_bytes_v2 *bytes = (struct snd_sst_bytes_v2 *) sbytes;
+	u32 length;
+	int pvt_id, ret = 0;
+	struct sst_block *block = NULL;
+
+	pr_debug("%s: type:%u ipc_msg:%u block:%u task_id:%u pipe: %#x length:%#x\n",
+		__func__, bytes->type, bytes->ipc_msg,
+		bytes->block, bytes->task_id,
+		bytes->pipe_id, bytes->len);
+
+	/* need some err check as this is user data, perhpas move this to the
+	 * platform driver and pass the struct
+	 */
+	if (sst_create_ipc_msg(&msg, true))
+		return -ENOMEM;
+
+	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+	sst_fill_header_mrfld(&msg->mrfld_header, bytes->ipc_msg, bytes->task_id,
+			      1, pvt_id);
+	msg->mrfld_header.p.header_high.part.res_rqd = bytes->block;
+	length = bytes->len;
+	msg->mrfld_header.p.header_low_payload = length;
+	pr_debug("length is %d\n", length);
+	memcpy(msg->mailbox_data, &bytes->bytes, bytes->len);
+	trace_sst_stream("BYTES ->", bytes->type, bytes->pipe_id);
+	if (bytes->block) {
+		block = sst_create_block(sst_drv_ctx, bytes->ipc_msg, pvt_id);
+		if (block == NULL) {
+			kfree(msg);
+			return -ENOMEM;
+		}
+	}
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	pr_debug("msg->mrfld_header.p.header_low_payload:%d", msg->mrfld_header.p.header_low_payload);
+	if (bytes->block) {
+		ret = sst_wait_timeout(sst_drv_ctx, block);
+		if (ret) {
+			pr_err("%s: fw returned err %d\n", __func__, ret);
+			sst_free_block(sst_drv_ctx, block);
+			return ret;
+		}
+	}
+	if (bytes->type == SND_SST_BYTES_GET) {
+		/* copy the reply and send back
+		 * we need to update only sz and payload
+		 */
+		if (bytes->block) {
+			unsigned char *r = block->data;
+			pr_debug("read back %d bytes", bytes->len);
+			memcpy(bytes->bytes, r, bytes->len);
+			trace_sst_stream("BYTES <-", bytes->type, bytes->pipe_id);
+		}
+	}
+	if (bytes->block)
+		sst_free_block(sst_drv_ctx, block);
+	return 0;
+}
+
+int sst_send_probe_bytes(struct intel_sst_drv *sst)
+{
+	struct ipc_post *msg = NULL;
+	struct sst_block *block;
+	int ret_val = 0;
+
+	ret_val = sst_create_block_and_ipc_msg(&msg, true, sst,
+			&block, IPC_IA_DBG_SET_PROBE_PARAMS, 0);
+	if (ret_val) {
+		pr_err("Can't allocate block/msg: Probe Byte Stream\n");
+		return ret_val;
+	}
+
+	sst_fill_header(&msg->header, IPC_IA_DBG_SET_PROBE_PARAMS, 1, 0);
+
+	msg->header.part.data = sizeof(u32) + sst->probe_bytes->len;
+	memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
+	memcpy(msg->mailbox_data + sizeof(u32), sst->probe_bytes->bytes,
+				sst->probe_bytes->len);
+
+	sst_add_to_dispatch_list_and_post(sst, msg);
+	ret_val = sst_wait_timeout(sst, block);
+	sst_free_block(sst, block);
+	if (ret_val)
+		pr_err("set probe stream param..timeout!\n");
+	return ret_val;
+}
+
+/*
+ * sst_pause_stream - Send msg for a pausing stream
+ * @str_id:	 stream ID
+ *
+ * This function is called by any function which wants to pause
+ * an already running stream.
+ */
+int sst_pause_stream(int str_id)
+{
+	int retval = 0, pvt_id, len;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct intel_sst_ops *ops;
+	struct sst_block *block;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("SST DBG:sst_pause_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+	if (str_info->status == STREAM_PAUSED)
+		return 0;
+	if (str_info->status == STREAM_RUNNING ||
+		str_info->status == STREAM_INIT) {
+		if (str_info->prev == STREAM_UN_INIT)
+			return -EBADRQC;
+		if (!sst_drv_ctx->use_32bit_ops) {
+			pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+			retval = sst_create_block_and_ipc_msg(&msg, true,
+					sst_drv_ctx, &block, IPC_CMD, pvt_id);
+			if (retval)
+				return retval;
+			sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+					str_info->task_id, 1, pvt_id);
+			msg->mrfld_header.p.header_high.part.res_rqd = 1;
+			len = sizeof(dsp_hdr);
+			msg->mrfld_header.p.header_low_payload = len;
+			sst_fill_header_dsp(&dsp_hdr, IPC_IA_PAUSE_STREAM_MRFLD,
+						str_info->pipe_id, 0);
+			memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+			trace_sst_stream("PAUSE ->", str_id, str_info->pipe_id);
+		} else {
+			retval = sst_create_block_and_ipc_msg(&msg, false,
+					sst_drv_ctx, &block,
+					IPC_IA_PAUSE_STREAM, str_id);
+			if (retval)
+				return retval;
+			sst_fill_header(&msg->header, IPC_IA_PAUSE_STREAM,
+								0, str_id);
+		}
+		sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+		retval = sst_wait_timeout(sst_drv_ctx, block);
+		sst_free_block(sst_drv_ctx, block);
+		if (retval == 0) {
+			str_info->prev = str_info->status;
+			str_info->status = STREAM_PAUSED;
+		} else if (retval == SST_ERR_INVALID_STREAM_ID) {
+			retval = -EINVAL;
+			mutex_lock(&sst_drv_ctx->stream_lock);
+			sst_clean_stream(str_info);
+			mutex_unlock(&sst_drv_ctx->stream_lock);
+		}
+	} else {
+		retval = -EBADRQC;
+		pr_debug("SST DBG:BADRQC for stream\n ");
+	}
+
+	return retval;
+}
+
+/**
+ * sst_resume_stream - Send msg for resuming stream
+ * @str_id:		stream ID
+ *
+ * This function is called by any function which wants to resume
+ * an already paused stream.
+ */
+int sst_resume_stream(int str_id)
+{
+	int retval = 0;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct intel_sst_ops *ops;
+	struct sst_block *block = NULL;
+	int pvt_id, len;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("SST DBG:sst_resume_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+	if (str_info->status == STREAM_RUNNING)
+			return 0;
+	if (str_info->status == STREAM_PAUSED) {
+		if (!sst_drv_ctx->use_32bit_ops) {
+			pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+			retval = sst_create_block_and_ipc_msg(&msg, true,
+					sst_drv_ctx, &block, IPC_CMD, pvt_id);
+			if (retval)
+				return retval;
+			sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+					str_info->task_id, 1, pvt_id);
+			msg->mrfld_header.p.header_high.part.res_rqd = 1;
+			len = sizeof(dsp_hdr);
+			msg->mrfld_header.p.header_low_payload = len;
+			sst_fill_header_dsp(&dsp_hdr,
+						IPC_IA_RESUME_STREAM_MRFLD,
+						str_info->pipe_id, 0);
+			memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+			trace_sst_stream("RESUME->", str_id, str_info->pipe_id);
+		} else {
+			retval = sst_create_block_and_ipc_msg(&msg, false,
+					sst_drv_ctx, &block,
+					IPC_IA_RESUME_STREAM, str_id);
+			if (retval)
+				return retval;
+			sst_fill_header(&msg->header, IPC_IA_RESUME_STREAM,
+								0, str_id);
+		}
+		sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+		retval = sst_wait_timeout(sst_drv_ctx, block);
+		sst_free_block(sst_drv_ctx, block);
+		if (!retval) {
+			if (str_info->prev == STREAM_RUNNING)
+				str_info->status = STREAM_RUNNING;
+			else
+				str_info->status = STREAM_INIT;
+			str_info->prev = STREAM_PAUSED;
+		} else if (retval == -SST_ERR_INVALID_STREAM_ID) {
+			retval = -EINVAL;
+			mutex_lock(&sst_drv_ctx->stream_lock);
+			sst_clean_stream(str_info);
+			mutex_unlock(&sst_drv_ctx->stream_lock);
+		}
+	} else {
+		retval = -EBADRQC;
+		pr_err("SST ERR: BADQRC for stream\n");
+	}
+
+	return retval;
+}
+
+
+/**
+ * sst_drop_stream - Send msg for stopping stream
+ * @str_id:		stream ID
+ *
+ * This function is called by any function which wants to stop
+ * a stream.
+ */
+int sst_drop_stream(int str_id)
+{
+	int retval = 0, pvt_id;
+	struct stream_info *str_info;
+	struct ipc_post *msg = NULL;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("SST DBG:sst_drop_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+
+	if (str_info->status != STREAM_UN_INIT) {
+
+		if (sst_drv_ctx->use_32bit_ops == true) {
+			str_info->prev = STREAM_UN_INIT;
+			str_info->status = STREAM_INIT;
+			str_info->cumm_bytes = 0;
+			sst_send_sync_msg(IPC_IA_DROP_STREAM, str_id);
+		} else {
+			if (sst_create_ipc_msg(&msg, true))
+				return -ENOMEM;
+			str_info->prev = STREAM_UN_INIT;
+			str_info->status = STREAM_INIT;
+			str_info->cumm_bytes = 0;
+			pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+			sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+					      str_info->task_id, 1, pvt_id);
+
+			msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr);
+			sst_fill_header_dsp(&dsp_hdr, IPC_IA_DROP_STREAM_MRFLD,
+					str_info->pipe_id, 0);
+			memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+			trace_sst_stream("STOP  ->", str_id, str_info->pipe_id);
+			pr_info("Stop for str %d pipe %#x\n", str_id, str_info->pipe_id);
+
+			sst_drv_ctx->ops->sync_post_message(msg);
+		}
+	} else {
+		retval = -EBADRQC;
+		pr_debug("BADQRC for stream, state %x\n", str_info->status);
+	}
+	return retval;
+}
+
+/**
+ * sst_next_track: notify next track
+ * @str_id:		stream ID
+ *
+ * This function is called by any function which wants to
+ * set next track. Current this is NOP as FW doest care
+ */
+int sst_next_track(void)
+{
+	pr_debug("SST DBG: next_track");
+	return 0;
+}
+
+/**
+* sst_drain_stream - Send msg for draining stream
+* @str_id:		stream ID
+*
+* This function is called by any function which wants to drain
+* a stream.
+*/
+int sst_drain_stream(int str_id, bool partial_drain)
+{
+	int retval = 0, pvt_id, len;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct intel_sst_ops *ops;
+	struct sst_block *block = NULL;
+	struct ipc_dsp_hdr dsp_hdr;
+
+	pr_debug("SST DBG:sst_drain_stream for %d\n", str_id);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+	if (str_info->status != STREAM_RUNNING &&
+		str_info->status != STREAM_INIT &&
+		str_info->status != STREAM_PAUSED) {
+			pr_err("SST ERR: BADQRC for stream = %d\n",
+				       str_info->status);
+			return -EBADRQC;
+	}
+
+	if (!sst_drv_ctx->use_32bit_ops) {
+		pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+		retval = sst_create_block_and_ipc_msg(&msg, true,
+				sst_drv_ctx, &block, IPC_CMD, pvt_id);
+		if (retval)
+			return retval;
+		sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+				str_info->task_id, 1, pvt_id);
+		pr_debug("header:%x\n",
+			(unsigned int)msg->mrfld_header.p.header_high.full);
+		msg->mrfld_header.p.header_high.part.res_rqd = 1;
+
+		len = sizeof(u8) + sizeof(dsp_hdr);
+		msg->mrfld_header.p.header_low_payload = len;
+		sst_fill_header_dsp(&dsp_hdr, IPC_IA_DRAIN_STREAM_MRFLD,
+					str_info->pipe_id, sizeof(u8));
+		memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+		memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+				&partial_drain, sizeof(u8));
+		trace_sst_stream("DRAIN ->", str_id, str_info->pipe_id);
+	} else {
+		retval = sst_create_block_and_ipc_msg(&msg, false,
+				sst_drv_ctx, &block,
+				IPC_IA_DRAIN_STREAM, str_id);
+		if (retval)
+			return retval;
+		sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM, 0, str_id);
+		msg->header.part.data = partial_drain;
+	}
+	sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg);
+	/* with new non blocked drain implementation in core we dont need to
+	 * wait for respsonse, and need to only invoke callback for drain
+	 * complete
+	 */
+
+	sst_free_block(sst_drv_ctx, block);
+	return retval;
+}
+
+/**
+ * sst_free_stream - Frees a stream
+ * @str_id:		stream ID
+ *
+ * This function is called by any function which wants to free
+ * a stream.
+ */
+int sst_free_stream(int str_id)
+{
+	int retval = 0;
+	unsigned int pvt_id;
+	struct ipc_post *msg = NULL;
+	struct stream_info *str_info;
+	struct intel_sst_ops *ops;
+	unsigned long irq_flags;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct sst_block *block;
+
+	pr_debug("SST DBG:sst_free_stream for %d\n", str_id);
+
+	mutex_lock(&sst_drv_ctx->sst_lock);
+	if (sst_drv_ctx->sst_state == SST_UN_INIT) {
+		mutex_unlock(&sst_drv_ctx->sst_lock);
+		return -ENODEV;
+	}
+	mutex_unlock(&sst_drv_ctx->sst_lock);
+	str_info = get_stream_info(str_id);
+	if (!str_info)
+		return -EINVAL;
+	ops = sst_drv_ctx->ops;
+
+	mutex_lock(&str_info->lock);
+	if (str_info->status != STREAM_UN_INIT) {
+		str_info->prev =  str_info->status;
+		str_info->status = STREAM_UN_INIT;
+		mutex_unlock(&str_info->lock);
+
+		if (!sst_drv_ctx->use_32bit_ops) {
+			pvt_id = sst_assign_pvt_id(sst_drv_ctx);
+			retval = sst_create_block_and_ipc_msg(&msg, true,
+					sst_drv_ctx, &block, IPC_CMD, pvt_id);
+			if (retval)
+				return retval;
+
+			sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+					      str_info->task_id, 1, pvt_id);
+			msg->mrfld_header.p.header_low_payload =
+							sizeof(dsp_hdr);
+			sst_fill_header_dsp(&dsp_hdr, IPC_IA_FREE_STREAM_MRFLD,
+						str_info->pipe_id,  0);
+			memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+			trace_sst_stream("FREE  ->", str_id, str_info->pipe_id);
+			pr_info("Free for str %d pipe %#x\n", str_id, str_info->pipe_id);
+
+		} else {
+			retval = sst_create_block_and_ipc_msg(&msg, false,
+						sst_drv_ctx, &block,
+						IPC_IA_FREE_STREAM, str_id);
+			if (retval)
+				return retval;
+			sst_fill_header(&msg->header, IPC_IA_FREE_STREAM,
+								 0, str_id);
+		}
+		spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
+		spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
+		if (!sst_drv_ctx->use_32bit_ops) {
+			/*FIXME: do we need to wake up drain stream here,
+			 * how to get the pvt_id and msg_id
+			 */
+		} else {
+			sst_wake_up_block(sst_drv_ctx, 0, str_id,
+				IPC_IA_DRAIN_STREAM, NULL, 0);
+		}
+		ops->post_message(&sst_drv_ctx->ipc_post_msg_wq);
+		retval = sst_wait_timeout(sst_drv_ctx, block);
+		pr_debug("sst: wait for free returned %d\n", retval);
+		mutex_lock(&sst_drv_ctx->stream_lock);
+		sst_clean_stream(str_info);
+		mutex_unlock(&sst_drv_ctx->stream_lock);
+		pr_debug("SST DBG:Stream freed\n");
+		sst_free_block(sst_drv_ctx, block);
+	} else {
+		mutex_unlock(&str_info->lock);
+		retval = -EBADRQC;
+		pr_debug("SST DBG:BADQRC for stream\n");
+	}
+
+	return retval;
+}
+
+int sst_request_vtsv_file(char *fname, struct intel_sst_drv *ctx,
+		void **out_file, u32 *out_size)
+{
+	int retval = 0;
+	const struct firmware *file;
+	void *ddr_virt_addr;
+	unsigned long file_base;
+
+	if (!ctx->pdata->lib_info) {
+		pr_err("lib_info pointer NULL\n");
+		return -EINVAL;
+	}
+
+	pr_debug("Requesting VTSV file %s now...\n", fname);
+	retval = request_firmware(&file, fname, ctx->dev);
+	if (file == NULL) {
+		pr_err("VTSV file is returning as null\n");
+		return -EINVAL;
+	}
+	if (retval) {
+		pr_err("request fw failed %d\n", retval);
+		return retval;
+	}
+
+	if ((*out_file == NULL) || (*out_size < file->size)) {
+		retval = sst_get_next_lib_mem(&ctx->lib_mem_mgr, file->size,
+			&file_base);
+		*out_file = (void *)file_base;
+	}
+	ddr_virt_addr = (unsigned char *)ctx->ddr +
+		(unsigned long)(*out_file - ctx->pdata->lib_info->mod_base);
+	memcpy(ddr_virt_addr, file->data, file->size);
+
+	*out_size = file->size;
+	release_firmware(file);
+	return 0;
+}
+
+int sst_format_vtsv_message(struct intel_sst_drv *ctx,
+	struct ipc_post **msgptr, struct sst_block **block)
+{
+	int retval = 0, pvt_id, len;
+	struct ipc_dsp_hdr dsp_hdr;
+	struct snd_sst_vtsv_info vinfo;
+	struct ipc_post *msg;
+
+	BUG_ON((unsigned long)(ctx->vcache.file1_in_mem) & 0xffffffff00000000ULL);
+	BUG_ON((unsigned long)(ctx->vcache.file2_in_mem) & 0xffffffff00000000ULL);
+
+	vinfo.vfiles[0].addr = (u32)((unsigned long)ctx->vcache.file1_in_mem
+				& 0xffffffff);
+	vinfo.vfiles[0].size = ctx->vcache.size1;
+	vinfo.vfiles[1].addr = (u32)((unsigned long)ctx->vcache.file2_in_mem
+				& 0xffffffff);
+	vinfo.vfiles[1].size = ctx->vcache.size2;
+
+	/* Create the vtsv message */
+	pvt_id = sst_assign_pvt_id(ctx);
+	retval = sst_create_block_and_ipc_msg(msgptr, true,
+			ctx, block, IPC_CMD, pvt_id);
+	if (retval)
+		return retval;
+	msg = *msgptr;
+	sst_fill_header_mrfld(&msg->mrfld_header, IPC_CMD,
+			SST_TASK_ID_AWARE, 1, pvt_id);
+	pr_debug("header:%x\n",
+			(unsigned int)msg->mrfld_header.p.header_high.full);
+	msg->mrfld_header.p.header_high.part.res_rqd = 1;
+
+	len = sizeof(vinfo) + sizeof(dsp_hdr);
+	msg->mrfld_header.p.header_low_payload = len;
+	sst_fill_header_dsp(&dsp_hdr, IPC_IA_VTSV_UPDATE_MODULES,
+				PIPE_VAD_OUT, sizeof(u8));
+	dsp_hdr.mod_id = SST_ALGO_VTSV;
+	memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
+	memcpy(msg->mailbox_data + sizeof(dsp_hdr),
+			&vinfo, sizeof(vinfo));
+	return 0;
+}
+
+int sst_send_vtsv_data_to_fw(struct intel_sst_drv *ctx)
+{
+	int retval = 0;
+	struct ipc_post *msg = NULL;
+	struct sst_block *block = NULL;
+
+	/* Download both the data files */
+	retval = sst_request_vtsv_file("vtsv_net.bin", ctx,
+			&ctx->vcache.file1_in_mem, &ctx->vcache.size1);
+	if (retval) {
+		pr_err("vtsv data file1 request failed %d\n", retval);
+		return retval;
+	}
+
+	retval = sst_request_vtsv_file("vtsv_grammar.bin", ctx,
+			&ctx->vcache.file2_in_mem, &ctx->vcache.size2);
+	if (retval) {
+		pr_err("vtsv data file2 request failed %d\n", retval);
+		return retval;
+	}
+
+	retval = sst_format_vtsv_message(ctx, &msg, &block);
+	if (retval) {
+		pr_err("vtsv msg format failed %d\n", retval);
+		return retval;
+	}
+	sst_add_to_dispatch_list_and_post(ctx, msg);
+	retval = sst_wait_timeout(ctx, block);
+	if (retval)
+		pr_err("vtsv msg send to fw failed %d\n", retval);
+
+	sst_free_block(ctx, block);
+	return retval;
+}
diff --git a/sound/soc/intel/sst/sst_trace.h b/sound/soc/intel/sst/sst_trace.h
new file mode 100644
index 0000000..8d34ef3
--- /dev/null
+++ b/sound/soc/intel/sst/sst_trace.h
@@ -0,0 +1,147 @@
+/*
+ *  sst_trace.h - Intel SST Driver tracing support
+ *
+ *  Copyright (C) 2013	Intel Corp
+ *  Authors: Omair Mohammed Abdullah <omair.m.abdullah@linux.intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sst
+
+#if !defined(_TRACE_SST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SST_H
+
+#include <linux/types.h>
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sst_ipc,
+
+	TP_PROTO(const char *msg, u32 header_high, u32 header_low, int pvt_id),
+
+	TP_ARGS(msg, header_high, header_low, pvt_id),
+
+	TP_STRUCT__entry(
+		__string(info_msg,	msg)
+		__field(unsigned int,	val_l)
+		__field(unsigned int,	val_h)
+		__field(unsigned int,	id)
+	),
+
+	TP_fast_assign(
+		__assign_str(info_msg, msg);
+		__entry->val_l = header_low;
+		__entry->val_h = header_high;
+		__entry->id = pvt_id;
+	),
+
+	TP_printk("\t%s\t [%2u] = %#8.8x:%.4x", __get_str(info_msg),
+		  (unsigned int)__entry->id,
+		  (unsigned int)__entry->val_h, (unsigned int)__entry->val_l)
+
+);
+
+TRACE_EVENT(sst_stream,
+
+	TP_PROTO(const char *msg, int str_id, int pipe_id),
+
+	TP_ARGS(msg, str_id, pipe_id),
+
+	TP_STRUCT__entry(
+		__string(info_msg,	msg)
+		__field(unsigned int,	str_id)
+		__field(unsigned int,	pipe_id)
+	),
+
+	TP_fast_assign(
+		__assign_str(info_msg, msg);
+		__entry->str_id = str_id;
+		__entry->pipe_id = pipe_id;
+	),
+
+	TP_printk("\t%s\t str  = %2u, pipe = %#x", __get_str(info_msg),
+		  (unsigned int)__entry->str_id, (unsigned int)__entry->pipe_id)
+);
+
+TRACE_EVENT(sst_ipc_mailbox,
+
+	TP_PROTO(const char *mailbox, int mbox_len),
+
+	TP_ARGS(mailbox, mbox_len),
+
+	TP_STRUCT__entry(
+		__dynamic_array(char,	mbox,	(3 * mbox_len))
+	),
+
+	TP_fast_assign(
+		sst_dump_to_buffer(mailbox, mbox_len,
+				   __get_dynamic_array(mbox));
+	),
+
+	TP_printk("  %s", __get_str(mbox))
+
+);
+
+TRACE_EVENT(sst_lib_download,
+
+	TP_PROTO(const char *msg, const char *lib_name),
+
+	TP_ARGS(msg, lib_name),
+
+	TP_STRUCT__entry(
+		__string(info_msg, msg)
+		__string(info_lib_name, lib_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(info_msg, msg);
+		__assign_str(info_lib_name, lib_name);
+	),
+
+	TP_printk("\t%s %s", __get_str(info_msg),
+			__get_str(info_lib_name))
+);
+
+TRACE_EVENT(sst_fw_download,
+
+	TP_PROTO(const char *msg, int fw_state),
+
+	TP_ARGS(msg, fw_state),
+
+	TP_STRUCT__entry(
+		__string(info_msg, msg)
+		__field(unsigned int,   fw_state)
+	),
+
+	TP_fast_assign(
+		__assign_str(info_msg, msg);
+		__entry->fw_state = fw_state;
+	),
+
+	TP_printk("\t%s\tFW state = %d", __get_str(info_msg),
+				(unsigned int)__entry->fw_state)
+);
+
+#endif /* _TRACE_SST_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE sst_trace
+#include <trace/define_trace.h>
diff --git a/sound/soc/intel/sst_platform.h b/sound/soc/intel/sst_platform.h
new file mode 100644
index 0000000..5d231b5
--- /dev/null
+++ b/sound/soc/intel/sst_platform.h
@@ -0,0 +1,155 @@
+/*
+ *  sst_platform.h - Intel MID Platform driver header file
+ *
+ *  Copyright (C) 2010 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  Author: Harsha Priya <priya.harsha@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#ifndef __SST_PLATFORM_H__
+#define __SST_PLATFORM_H__
+
+#include <sound/soc.h>
+
+#define SST_MAX_BIN_BYTES 1024
+
+struct sst_data;
+
+enum sst_audio_device_type {
+	SND_SST_DEVICE_HEADSET = 1,
+	SND_SST_DEVICE_IHF,
+	SND_SST_DEVICE_VIBRA,
+	SND_SST_DEVICE_HAPTIC,
+	SND_SST_DEVICE_CAPTURE,
+	SND_SST_DEVICE_COMPRESS,
+};
+
+enum snd_sst_input_stream {
+	SST_INPUT_STREAM_NONE = 0x0,
+	SST_INPUT_STREAM_PCM = 0x6,
+	SST_INPUT_STREAM_COMPRESS = 0x8,
+	SST_INPUT_STREAM_MIXED = 0xE,
+};
+
+enum sst_stream_ops {
+	STREAM_OPS_PLAYBACK = 0,        /* Decode */
+	STREAM_OPS_CAPTURE,             /* Encode */
+	STREAM_OPS_COMPRESSED_PATH,     /* Offload playback/capture */
+
+};
+enum snd_sst_stream_type {
+	SST_STREAM_DEVICE_HS = 32,
+	SST_STREAM_DEVICE_IHF = 33,
+	SST_STREAM_DEVICE_MIC0 = 34,
+	SST_STREAM_DEVICE_MIC1 = 35,
+};
+
+enum sst_controls {
+	SST_SND_ALLOC =			0x1000,
+	SST_SND_PAUSE =			0x1001,
+	SST_SND_RESUME =		0x1002,
+	SST_SND_DROP =			0x1003,
+	SST_SND_FREE =			0x1004,
+	SST_SND_BUFFER_POINTER =	0x1005,
+	SST_SND_STREAM_INIT =		0x1006,
+	SST_SND_START	 =		0x1007,
+	SST_SET_RUNTIME_PARAMS =	0x1008,
+	SST_SET_ALGO_PARAMS =		0x1009,
+	SST_SET_BYTE_STREAM =		0x100A,
+	SST_GET_BYTE_STREAM =		0x100B,
+	SST_SET_SSP_CONFIG =		0x100C,
+	SST_SET_PROBE_BYTE_STREAM =     0x100D,
+	SST_GET_PROBE_BYTE_STREAM =	0x100E,
+	SST_SET_VTSV_INFO =		0x100F,
+};
+
+struct pcm_stream_info {
+	int str_id;
+	void *mad_substream;
+	void (*period_elapsed) (void *mad_substream);
+	unsigned long long buffer_ptr;
+	unsigned long long pcm_delay;
+	int sfreq;
+};
+
+struct sst_compress_cb {
+	void *param;
+	void (*compr_cb)(void *param);
+	void *drain_cb_param;
+	void (*drain_notify)(void *param);
+
+};
+
+struct snd_sst_params;
+
+struct compress_sst_ops {
+	const char *name;
+	int (*open) (struct snd_sst_params *str_params,
+			struct sst_compress_cb *cb);
+	int (*control) (unsigned int cmd, unsigned int str_id);
+	int (*tstamp) (unsigned int str_id, struct snd_compr_tstamp *tstamp);
+	int (*ack) (unsigned int str_id, unsigned long bytes);
+	int (*close) (unsigned int str_id);
+	int (*get_caps) (struct snd_compr_caps *caps);
+	int (*get_codec_caps) (struct snd_compr_codec_caps *codec);
+	int (*set_metadata) (unsigned int str_id, struct snd_compr_metadata *metadata);
+
+};
+
+enum lpe_param_types_mixer {
+	SST_ALGO_PARAM_MIXER_STREAM_CFG = 0x801,
+};
+
+struct mad_ops_wq {
+	int stream_id;
+	enum sst_controls control_op;
+	struct work_struct wq;
+};
+
+struct sst_ops {
+	int (*open) (struct snd_sst_params *str_param);
+	int (*device_control) (int cmd, void *arg);
+	int (*set_generic_params) (enum sst_controls cmd, void *arg);
+	int (*close) (unsigned int str_id);
+	int (*power) (bool state);
+};
+
+struct sst_runtime_stream {
+	int     stream_status;
+	unsigned int id;
+	size_t bytes_written;
+	struct pcm_stream_info stream_info;
+	struct sst_ops *ops;
+	struct compress_sst_ops *compr_ops;
+	spinlock_t	status_lock;
+};
+
+struct sst_device {
+	char *name;
+	struct device *dev;
+	struct sst_ops *ops;
+	struct platform_device *pdev;
+	struct compress_sst_ops *compr_ops;
+};
+
+int sst_register_dsp(struct sst_device *sst);
+int sst_unregister_dsp(struct sst_device *sst);
+#endif
diff --git a/sound/soc/intel/sst_platform_pvt.h b/sound/soc/intel/sst_platform_pvt.h
new file mode 100644
index 0000000..f74dd9c
--- /dev/null
+++ b/sound/soc/intel/sst_platform_pvt.h
@@ -0,0 +1,127 @@
+/*
+ *  sst_platform_pvt.h - Intel MID Platform driver header file
+ *
+ *  Copyright (C) 2010 Intel Corp
+ *  Author: Vinod Koul <vinod.koul@intel.com>
+ *  Author: Harsha Priya <priya.harsha@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *
+ */
+
+#ifndef __SST_PLATFORM_PVT_H__
+#define __SST_PLATFORM_PVT_H__
+
+/* TODO rmv this global */
+extern struct sst_device *sst_dsp;
+
+#define SST_MONO		1
+#define SST_STEREO		2
+
+#define SST_MIN_RATE		8000
+#define SST_MAX_RATE		48000
+#define SST_MIN_CHANNEL		1
+#define SST_MAX_CHANNEL		2
+
+#define SST_MAX_BUFFER		96000 /*500ms@48K,16bit,2ch - CLV*/
+#define SST_MIN_PERIOD_BYTES	1536  /*24ms@16K,16bit,2ch - For VoIP on Mrfld*/
+#define SST_MAX_PERIOD_BYTES	48000 /*250ms@48K,16bit,2ch - CLV*/
+
+#define SST_MIN_PERIODS		2
+#define SST_MAX_PERIODS		50
+#define SST_FIFO_SIZE		0
+#define SST_CODEC_TYPE_PCM	1
+
+#define SST_HEADSET_DAI		"Headset-cpu-dai"
+#define SST_SPEAKER_DAI		"Speaker-cpu-dai"
+#define SST_VOICE_DAI		"Voice-cpu-dai"
+#define SST_VIRTUAL_DAI		"Virtual-cpu-dai"
+#define SST_LOOPBACK_DAI	"Loopback-cpu-dai"
+#define SST_POWER_DAI		"Power-cpu-dai"
+#define SST_COMPRESS_DAI	"Compress-cpu-dai"
+#define SST_PROBE_DAI		"Probe-cpu-dai"
+#define SST_VOIP_DAI		"Voip-cpu-dai"
+#define SST_DEEPBUFFER_DAI	"Deepbuffer-cpu-dai"
+#define SST_LOWLATENCY_DAI	"Lowlatency-cpu-dai"
+
+struct sst_device;
+
+enum sst_drv_status {
+	SST_PLATFORM_UNINIT,
+	SST_PLATFORM_INIT,
+	SST_PLATFORM_RUNNING,
+	SST_PLATFORM_PAUSED,
+	SST_PLATFORM_DROPPED,
+};
+
+#define SST_PIPE_CONTROL	0x0
+#define SST_COMPRESS_VOL	0x01
+
+int sst_platform_clv_init(struct snd_soc_platform *platform);
+int sst_dsp_init(struct snd_soc_platform *platform);
+int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform);
+int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute);
+
+unsigned int sst_soc_read(struct snd_soc_platform *platform, unsigned int reg);
+int sst_soc_write(struct snd_soc_platform *platform, unsigned int reg, unsigned int val);
+unsigned int sst_reg_read(struct sst_data *sst, unsigned int reg,
+			  unsigned int shift, unsigned int max);
+unsigned int sst_reg_write(struct sst_data *sst, unsigned int reg,
+			   unsigned int shift, unsigned int max, unsigned int val);
+
+int sst_algo_int_ctl_info(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_info *uinfo);
+void sst_set_stream_status(struct sst_runtime_stream *stream, int state);
+int sst_fill_stream_params(void *substream, const struct sst_data *ctx,
+			   struct snd_sst_params *str_params, bool is_compress);
+int sst_dpcm_probe_send(struct snd_soc_platform *platform, u16 probe_pipe,
+			int substream, int direction, bool on);
+int sst_byte_control_get(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol);
+int sst_byte_control_set(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol);
+
+struct sst_algo_int_control_v2 {
+	struct soc_mixer_control mc;
+	u16 module_id; /* module identifieer */
+	u16 pipe_id; /* location info: pipe_id + instance_id */
+	u16 instance_id;
+	unsigned int value; /* Value received is stored here */
+};
+
+struct sst_lowlatency_deepbuff {
+	/* Thresholds for low latency & deep buffer */
+	unsigned long	*low_latency;
+	unsigned long	*deep_buffer;
+	unsigned long	period_time;
+};
+
+struct sst_data {
+	struct platform_device *pdev;
+	struct sst_platform_data *pdata;
+	unsigned int lpe_mixer_input_ihf;
+	unsigned int lpe_mixer_input_hs;
+	u32 *widget;
+	char *byte_stream;
+	struct mutex lock;
+	/* Pipe_id for probe_stream to be saved in stream map */
+	u8 pipe_id;
+	bool vtsv_enroll;
+	struct sst_lowlatency_deepbuff ll_db;
+};
+#endif
diff --git a/sound/soc/mid-x86/Kconfig b/sound/soc/mid-x86/Kconfig
deleted file mode 100644
index 61c10bf..0000000
--- a/sound/soc/mid-x86/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-config SND_MFLD_MACHINE
-	tristate "SOC Machine Audio driver for Intel Medfield MID platform"
-	depends on INTEL_SCU_IPC
-	select SND_SOC_SN95031
-	select SND_SST_PLATFORM
-	help
-          This adds support for ASoC machine driver for Intel(R) MID Medfield platform
-          used as alsa device in audio substem in Intel(R) MID devices
-          Say Y if you have such a device
-          If unsure select "N".
-
-config SND_SST_PLATFORM
-	tristate
diff --git a/sound/soc/mid-x86/Makefile b/sound/soc/mid-x86/Makefile
deleted file mode 100644
index 6398833..0000000
--- a/sound/soc/mid-x86/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-snd-soc-sst-platform-objs := sst_platform.o
-snd-soc-mfld-machine-objs := mfld_machine.o
-
-obj-$(CONFIG_SND_SST_PLATFORM) += snd-soc-sst-platform.o
-obj-$(CONFIG_SND_MFLD_MACHINE) += snd-soc-mfld-machine.o
diff --git a/sound/soc/mid-x86/mfld_machine.c b/sound/soc/mid-x86/mfld_machine.c
deleted file mode 100644
index 4139116..0000000
--- a/sound/soc/mid-x86/mfld_machine.c
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- *  mfld_machine.c - ASoc Machine driver for Intel Medfield MID platform
- *
- *  Copyright (C) 2010 Intel Corp
- *  Author: Vinod Koul <vinod.koul@intel.com>
- *  Author: Harsha Priya <priya.harsha@intel.com>
- *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include "../codecs/sn95031.h"
-
-#define MID_MONO 1
-#define MID_STEREO 2
-#define MID_MAX_CAP 5
-#define MFLD_JACK_INSERT 0x04
-
-enum soc_mic_bias_zones {
-	MFLD_MV_START = 0,
-	/* mic bias volutage range for Headphones*/
-	MFLD_MV_HP = 400,
-	/* mic bias volutage range for American Headset*/
-	MFLD_MV_AM_HS = 650,
-	/* mic bias volutage range for Headset*/
-	MFLD_MV_HS = 2000,
-	MFLD_MV_UNDEFINED,
-};
-
-static unsigned int	hs_switch;
-static unsigned int	lo_dac;
-
-struct mfld_mc_private {
-	void __iomem *int_base;
-	u8 interrupt_status;
-};
-
-struct snd_soc_jack mfld_jack;
-
-/*Headset jack detection DAPM pins */
-static struct snd_soc_jack_pin mfld_jack_pins[] = {
-	{
-		.pin = "Headphones",
-		.mask = SND_JACK_HEADPHONE,
-	},
-	{
-		.pin = "AMIC1",
-		.mask = SND_JACK_MICROPHONE,
-	},
-};
-
-/* jack detection voltage zones */
-static struct snd_soc_jack_zone mfld_zones[] = {
-	{MFLD_MV_START, MFLD_MV_AM_HS, SND_JACK_HEADPHONE},
-	{MFLD_MV_AM_HS, MFLD_MV_HS, SND_JACK_HEADSET},
-};
-
-/* sound card controls */
-static const char *headset_switch_text[] = {"Earpiece", "Headset"};
-
-static const char *lo_text[] = {"Vibra", "Headset", "IHF", "None"};
-
-static const struct soc_enum headset_enum =
-	SOC_ENUM_SINGLE_EXT(2, headset_switch_text);
-
-static const struct soc_enum lo_enum =
-	SOC_ENUM_SINGLE_EXT(4, lo_text);
-
-static int headset_get_switch(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = hs_switch;
-	return 0;
-}
-
-static int headset_set_switch(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
-
-	if (ucontrol->value.integer.value[0] == hs_switch)
-		return 0;
-
-	if (ucontrol->value.integer.value[0]) {
-		pr_debug("hs_set HS path\n");
-		snd_soc_dapm_enable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
-	} else {
-		pr_debug("hs_set EP path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT");
-	}
-	snd_soc_dapm_sync(&codec->dapm);
-	hs_switch = ucontrol->value.integer.value[0];
-
-	return 0;
-}
-
-static void lo_enable_out_pins(struct snd_soc_codec *codec)
-{
-	snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTL");
-	snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTR");
-	snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTL");
-	snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTR");
-	snd_soc_dapm_enable_pin(&codec->dapm, "VIB1OUT");
-	snd_soc_dapm_enable_pin(&codec->dapm, "VIB2OUT");
-	if (hs_switch) {
-		snd_soc_dapm_enable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
-	} else {
-		snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT");
-	}
-}
-
-static int lo_get_switch(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = lo_dac;
-	return 0;
-}
-
-static int lo_set_switch(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
-
-	if (ucontrol->value.integer.value[0] == lo_dac)
-		return 0;
-
-	/* we dont want to work with last state of lineout so just enable all
-	 * pins and then disable pins not required
-	 */
-	lo_enable_out_pins(codec);
-	switch (ucontrol->value.integer.value[0]) {
-	case 0:
-		pr_debug("set vibra path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "VIB1OUT");
-		snd_soc_dapm_disable_pin(&codec->dapm, "VIB2OUT");
-		snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0);
-		break;
-
-	case 1:
-		pr_debug("set hs  path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "Headphones");
-		snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT");
-		snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x22);
-		break;
-
-	case 2:
-		pr_debug("set spkr path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTL");
-		snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTR");
-		snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x44);
-		break;
-
-	case 3:
-		pr_debug("set null path\n");
-		snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTL");
-		snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTR");
-		snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x66);
-		break;
-	}
-	snd_soc_dapm_sync(&codec->dapm);
-	lo_dac = ucontrol->value.integer.value[0];
-	return 0;
-}
-
-static const struct snd_kcontrol_new mfld_snd_controls[] = {
-	SOC_ENUM_EXT("Playback Switch", headset_enum,
-			headset_get_switch, headset_set_switch),
-	SOC_ENUM_EXT("Lineout Mux", lo_enum,
-			lo_get_switch, lo_set_switch),
-};
-
-static const struct snd_soc_dapm_widget mfld_widgets[] = {
-	SND_SOC_DAPM_HP("Headphones", NULL),
-	SND_SOC_DAPM_MIC("Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mfld_map[] = {
-	{"Headphones", NULL, "HPOUTR"},
-	{"Headphones", NULL, "HPOUTL"},
-	{"Mic", NULL, "AMIC1"},
-};
-
-static void mfld_jack_check(unsigned int intr_status)
-{
-	struct mfld_jack_data jack_data;
-
-	jack_data.mfld_jack = &mfld_jack;
-	jack_data.intr_id = intr_status;
-
-	sn95031_jack_detection(&jack_data);
-	/* TODO: add american headset detection post gpiolib support */
-}
-
-static int mfld_init(struct snd_soc_pcm_runtime *runtime)
-{
-	struct snd_soc_codec *codec = runtime->codec;
-	struct snd_soc_dapm_context *dapm = &codec->dapm;
-	int ret_val;
-
-	/* Add jack sense widgets */
-	snd_soc_dapm_new_controls(dapm, mfld_widgets, ARRAY_SIZE(mfld_widgets));
-
-	/* Set up the map */
-	snd_soc_dapm_add_routes(dapm, mfld_map, ARRAY_SIZE(mfld_map));
-
-	/* always connected */
-	snd_soc_dapm_enable_pin(dapm, "Headphones");
-	snd_soc_dapm_enable_pin(dapm, "Mic");
-
-	ret_val = snd_soc_add_codec_controls(codec, mfld_snd_controls,
-				ARRAY_SIZE(mfld_snd_controls));
-	if (ret_val) {
-		pr_err("soc_add_controls failed %d", ret_val);
-		return ret_val;
-	}
-	/* default is earpiece pin, userspace sets it explcitly */
-	snd_soc_dapm_disable_pin(dapm, "Headphones");
-	/* default is lineout NC, userspace sets it explcitly */
-	snd_soc_dapm_disable_pin(dapm, "LINEOUTL");
-	snd_soc_dapm_disable_pin(dapm, "LINEOUTR");
-	lo_dac = 3;
-	hs_switch = 0;
-	/* we dont use linein in this so set to NC */
-	snd_soc_dapm_disable_pin(dapm, "LINEINL");
-	snd_soc_dapm_disable_pin(dapm, "LINEINR");
-
-	/* Headset and button jack detection */
-	ret_val = snd_soc_jack_new(codec, "Intel(R) MID Audio Jack",
-			SND_JACK_HEADSET | SND_JACK_BTN_0 |
-			SND_JACK_BTN_1, &mfld_jack);
-	if (ret_val) {
-		pr_err("jack creation failed\n");
-		return ret_val;
-	}
-
-	ret_val = snd_soc_jack_add_pins(&mfld_jack,
-			ARRAY_SIZE(mfld_jack_pins), mfld_jack_pins);
-	if (ret_val) {
-		pr_err("adding jack pins failed\n");
-		return ret_val;
-	}
-	ret_val = snd_soc_jack_add_zones(&mfld_jack,
-			ARRAY_SIZE(mfld_zones), mfld_zones);
-	if (ret_val) {
-		pr_err("adding jack zones failed\n");
-		return ret_val;
-	}
-
-	/* we want to check if anything is inserted at boot,
-	 * so send a fake event to codec and it will read adc
-	 * to find if anything is there or not */
-	mfld_jack_check(MFLD_JACK_INSERT);
-	return ret_val;
-}
-
-static struct snd_soc_dai_link mfld_msic_dailink[] = {
-	{
-		.name = "Medfield Headset",
-		.stream_name = "Headset",
-		.cpu_dai_name = "Headset-cpu-dai",
-		.codec_dai_name = "SN95031 Headset",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = mfld_init,
-	},
-	{
-		.name = "Medfield Speaker",
-		.stream_name = "Speaker",
-		.cpu_dai_name = "Speaker-cpu-dai",
-		.codec_dai_name = "SN95031 Speaker",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = NULL,
-	},
-	{
-		.name = "Medfield Vibra",
-		.stream_name = "Vibra1",
-		.cpu_dai_name = "Vibra1-cpu-dai",
-		.codec_dai_name = "SN95031 Vibra1",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = NULL,
-	},
-	{
-		.name = "Medfield Haptics",
-		.stream_name = "Vibra2",
-		.cpu_dai_name = "Vibra2-cpu-dai",
-		.codec_dai_name = "SN95031 Vibra2",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = NULL,
-	},
-	{
-		.name = "Medfield Compress",
-		.stream_name = "Speaker",
-		.cpu_dai_name = "Compress-cpu-dai",
-		.codec_dai_name = "SN95031 Speaker",
-		.codec_name = "sn95031",
-		.platform_name = "sst-platform",
-		.init = NULL,
-	},
-};
-
-/* SoC card */
-static struct snd_soc_card snd_soc_card_mfld = {
-	.name = "medfield_audio",
-	.owner = THIS_MODULE,
-	.dai_link = mfld_msic_dailink,
-	.num_links = ARRAY_SIZE(mfld_msic_dailink),
-};
-
-static irqreturn_t snd_mfld_jack_intr_handler(int irq, void *dev)
-{
-	struct mfld_mc_private *mc_private = (struct mfld_mc_private *) dev;
-
-	memcpy_fromio(&mc_private->interrupt_status,
-			((void *)(mc_private->int_base)),
-			sizeof(u8));
-	return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t snd_mfld_jack_detection(int irq, void *data)
-{
-	struct mfld_mc_private *mc_drv_ctx = (struct mfld_mc_private *) data;
-
-	if (mfld_jack.codec == NULL)
-		return IRQ_HANDLED;
-	mfld_jack_check(mc_drv_ctx->interrupt_status);
-
-	return IRQ_HANDLED;
-}
-
-static int snd_mfld_mc_probe(struct platform_device *pdev)
-{
-	int ret_val = 0, irq;
-	struct mfld_mc_private *mc_drv_ctx;
-	struct resource *irq_mem;
-
-	pr_debug("snd_mfld_mc_probe called\n");
-
-	/* retrive the irq number */
-	irq = platform_get_irq(pdev, 0);
-
-	/* audio interrupt base of SRAM location where
-	 * interrupts are stored by System FW */
-	mc_drv_ctx = kzalloc(sizeof(*mc_drv_ctx), GFP_ATOMIC);
-	if (!mc_drv_ctx) {
-		pr_err("allocation failed\n");
-		return -ENOMEM;
-	}
-
-	irq_mem = platform_get_resource_byname(
-				pdev, IORESOURCE_MEM, "IRQ_BASE");
-	if (!irq_mem) {
-		pr_err("no mem resource given\n");
-		ret_val = -ENODEV;
-		goto unalloc;
-	}
-	mc_drv_ctx->int_base = ioremap_nocache(irq_mem->start,
-					resource_size(irq_mem));
-	if (!mc_drv_ctx->int_base) {
-		pr_err("Mapping of cache failed\n");
-		ret_val = -ENOMEM;
-		goto unalloc;
-	}
-	/* register for interrupt */
-	ret_val = request_threaded_irq(irq, snd_mfld_jack_intr_handler,
-			snd_mfld_jack_detection,
-			IRQF_SHARED, pdev->dev.driver->name, mc_drv_ctx);
-	if (ret_val) {
-		pr_err("cannot register IRQ\n");
-		goto unalloc;
-	}
-	/* register the soc card */
-	snd_soc_card_mfld.dev = &pdev->dev;
-	ret_val = snd_soc_register_card(&snd_soc_card_mfld);
-	if (ret_val) {
-		pr_debug("snd_soc_register_card failed %d\n", ret_val);
-		goto freeirq;
-	}
-	platform_set_drvdata(pdev, mc_drv_ctx);
-	pr_debug("successfully exited probe\n");
-	return ret_val;
-
-freeirq:
-	free_irq(irq, mc_drv_ctx);
-unalloc:
-	kfree(mc_drv_ctx);
-	return ret_val;
-}
-
-static int snd_mfld_mc_remove(struct platform_device *pdev)
-{
-	struct mfld_mc_private *mc_drv_ctx = platform_get_drvdata(pdev);
-
-	pr_debug("snd_mfld_mc_remove called\n");
-	free_irq(platform_get_irq(pdev, 0), mc_drv_ctx);
-	snd_soc_unregister_card(&snd_soc_card_mfld);
-	kfree(mc_drv_ctx);
-	platform_set_drvdata(pdev, NULL);
-	return 0;
-}
-
-static struct platform_driver snd_mfld_mc_driver = {
-	.driver = {
-		.owner = THIS_MODULE,
-		.name = "msic_audio",
-	},
-	.probe = snd_mfld_mc_probe,
-	.remove = snd_mfld_mc_remove,
-};
-
-module_platform_driver(snd_mfld_mc_driver);
-
-MODULE_DESCRIPTION("ASoC Intel(R) MID Machine driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:msic-audio");
diff --git a/sound/soc/mid-x86/sst_dsp.h b/sound/soc/mid-x86/sst_dsp.h
deleted file mode 100644
index 0fce1de..0000000
--- a/sound/soc/mid-x86/sst_dsp.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef __SST_DSP_H__
-#define __SST_DSP_H__
-/*
- *  sst_dsp.h - Intel SST Driver for audio engine
- *
- *  Copyright (C) 2008-12 Intel Corporation
- *  Authors:	Vinod Koul <vinod.koul@linux.intel.com>
- *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-enum sst_codec_types {
-	/*  AUDIO/MUSIC	CODEC Type Definitions */
-	SST_CODEC_TYPE_UNKNOWN = 0,
-	SST_CODEC_TYPE_PCM,	/* Pass through Audio codec */
-	SST_CODEC_TYPE_MP3,
-	SST_CODEC_TYPE_MP24,
-	SST_CODEC_TYPE_AAC,
-	SST_CODEC_TYPE_AACP,
-	SST_CODEC_TYPE_eAACP,
-};
-
-enum stream_type {
-	SST_STREAM_TYPE_NONE = 0,
-	SST_STREAM_TYPE_MUSIC = 1,
-};
-
-struct snd_pcm_params {
-	u16 codec;	/* codec type */
-	u8 num_chan;	/* 1=Mono, 2=Stereo */
-	u8 pcm_wd_sz;	/* 16/24 - bit*/
-	u32 reserved;	/* Bitrate in bits per second */
-	u32 sfreq;	/* Sampling rate in Hz */
-	u8 use_offload_path;
-	u8 reserved2;
-	u16 reserved3;
-	u8 channel_map[8];
-} __packed;
-
-/* MP3 Music Parameters Message */
-struct snd_mp3_params {
-	u16 codec;
-	u8  num_chan;	/* 1=Mono, 2=Stereo	*/
-	u8  pcm_wd_sz; /* 16/24 - bit*/
-	u8  crc_check; /* crc_check - disable (0) or enable (1) */
-	u8  reserved1; /* unused*/
-	u16 reserved2;	/* Unused */
-} __packed;
-
-#define AAC_BIT_STREAM_ADTS		0
-#define AAC_BIT_STREAM_ADIF		1
-#define AAC_BIT_STREAM_RAW		2
-
-/* AAC Music Parameters Message */
-struct snd_aac_params {
-	u16 codec;
-	u8 num_chan; /* 1=Mono, 2=Stereo*/
-	u8 pcm_wd_sz; /* 16/24 - bit*/
-	u8 bdownsample; /*SBR downsampling 0 - disable 1 -enabled AAC+ only */
-	u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
-	u16  reser2;
-	u32 externalsr; /*sampling rate of basic AAC raw bit stream*/
-	u8 sbr_signalling;/*disable/enable/set automode the SBR tool.AAC+*/
-	u8 reser1;
-	u16  reser3;
-} __packed;
-
-/* WMA Music Parameters Message */
-struct snd_wma_params {
-	u16 codec;
-	u8  num_chan;	/* 1=Mono, 2=Stereo */
-	u8  pcm_wd_sz;	/* 16/24 - bit*/
-	u32 brate;	/* Use the hard coded value. */
-	u32 sfreq;	/* Sampling freq eg. 8000, 441000, 48000 */
-	u32 channel_mask;  /* Channel Mask */
-	u16 format_tag;	/* Format Tag */
-	u16 block_align;	/* packet size */
-	u16 wma_encode_opt;/* Encoder option */
-	u8 op_align;	/* op align 0- 16 bit, 1- MSB, 2 LSB */
-	u8 reserved;	/* reserved */
-} __packed;
-
-/* Codec params struture */
-union  snd_sst_codec_params {
-	struct snd_pcm_params pcm_params;
-	struct snd_mp3_params mp3_params;
-	struct snd_aac_params aac_params;
-	struct snd_wma_params wma_params;
-} __packed;
-
-/* Address and size info of a frame buffer */
-struct sst_address_info {
-	u32 addr; /* Address at IA */
-	u32 size; /* Size of the buffer */
-};
-
-struct snd_sst_alloc_params_ext {
-	struct sst_address_info  ring_buf_info[8];
-	u8 sg_count;
-	u8 reserved;
-	u16 reserved2;
-	u32 frag_size;	/*Number of samples after which period elapsed
-				  message is sent valid only if path  = 0*/
-} __packed;
-
-struct snd_sst_stream_params {
-	union snd_sst_codec_params uc;
-} __packed;
-
-struct snd_sst_params {
-	u32 stream_id;
-	u8 codec;
-	u8 ops;
-	u8 stream_type;
-	u8 device_type;
-	struct snd_sst_stream_params sparams;
-	struct snd_sst_alloc_params_ext aparams;
-};
-
-#endif /* __SST_DSP_H__ */
diff --git a/sound/soc/mid-x86/sst_platform.c b/sound/soc/mid-x86/sst_platform.c
deleted file mode 100644
index 392fc0b..0000000
--- a/sound/soc/mid-x86/sst_platform.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- *  sst_platform.c - Intel MID Platform driver
- *
- *  Copyright (C) 2010-2013 Intel Corp
- *  Author: Vinod Koul <vinod.koul@intel.com>
- *  Author: Harsha Priya <priya.harsha@intel.com>
- *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/compress_driver.h>
-#include "sst_platform.h"
-
-static struct sst_device *sst;
-static DEFINE_MUTEX(sst_lock);
-
-int sst_register_dsp(struct sst_device *dev)
-{
-	BUG_ON(!dev);
-	if (!try_module_get(dev->dev->driver->owner))
-		return -ENODEV;
-	mutex_lock(&sst_lock);
-	if (sst) {
-		pr_err("we already have a device %s\n", sst->name);
-		module_put(dev->dev->driver->owner);
-		mutex_unlock(&sst_lock);
-		return -EEXIST;
-	}
-	pr_debug("registering device %s\n", dev->name);
-	sst = dev;
-	mutex_unlock(&sst_lock);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(sst_register_dsp);
-
-int sst_unregister_dsp(struct sst_device *dev)
-{
-	BUG_ON(!dev);
-	if (dev != sst)
-		return -EINVAL;
-
-	mutex_lock(&sst_lock);
-
-	if (!sst) {
-		mutex_unlock(&sst_lock);
-		return -EIO;
-	}
-
-	module_put(sst->dev->driver->owner);
-	pr_debug("unreg %s\n", sst->name);
-	sst = NULL;
-	mutex_unlock(&sst_lock);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(sst_unregister_dsp);
-
-static struct snd_pcm_hardware sst_platform_pcm_hw = {
-	.info =	(SNDRV_PCM_INFO_INTERLEAVED |
-			SNDRV_PCM_INFO_DOUBLE |
-			SNDRV_PCM_INFO_PAUSE |
-			SNDRV_PCM_INFO_RESUME |
-			SNDRV_PCM_INFO_MMAP|
-			SNDRV_PCM_INFO_MMAP_VALID |
-			SNDRV_PCM_INFO_BLOCK_TRANSFER |
-			SNDRV_PCM_INFO_SYNC_START),
-	.formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
-			SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
-			SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
-	.rates = (SNDRV_PCM_RATE_8000|
-			SNDRV_PCM_RATE_44100 |
-			SNDRV_PCM_RATE_48000),
-	.rate_min = SST_MIN_RATE,
-	.rate_max = SST_MAX_RATE,
-	.channels_min =	SST_MIN_CHANNEL,
-	.channels_max =	SST_MAX_CHANNEL,
-	.buffer_bytes_max = SST_MAX_BUFFER,
-	.period_bytes_min = SST_MIN_PERIOD_BYTES,
-	.period_bytes_max = SST_MAX_PERIOD_BYTES,
-	.periods_min = SST_MIN_PERIODS,
-	.periods_max = SST_MAX_PERIODS,
-	.fifo_size = SST_FIFO_SIZE,
-};
-
-/* MFLD - MSIC */
-static struct snd_soc_dai_driver sst_platform_dai[] = {
-{
-	.name = "Headset-cpu-dai",
-	.id = 0,
-	.playback = {
-		.channels_min = SST_STEREO,
-		.channels_max = SST_STEREO,
-		.rates = SNDRV_PCM_RATE_48000,
-		.formats = SNDRV_PCM_FMTBIT_S24_LE,
-	},
-	.capture = {
-		.channels_min = 1,
-		.channels_max = 5,
-		.rates = SNDRV_PCM_RATE_48000,
-		.formats = SNDRV_PCM_FMTBIT_S24_LE,
-	},
-},
-{
-	.name = "Speaker-cpu-dai",
-	.id = 1,
-	.playback = {
-		.channels_min = SST_MONO,
-		.channels_max = SST_STEREO,
-		.rates = SNDRV_PCM_RATE_48000,
-		.formats = SNDRV_PCM_FMTBIT_S24_LE,
-	},
-},
-{
-	.name = "Vibra1-cpu-dai",
-	.id = 2,
-	.playback = {
-		.channels_min = SST_MONO,
-		.channels_max = SST_MONO,
-		.rates = SNDRV_PCM_RATE_48000,
-		.formats = SNDRV_PCM_FMTBIT_S24_LE,
-	},
-},
-{
-	.name = "Vibra2-cpu-dai",
-	.id = 3,
-	.playback = {
-		.channels_min = SST_MONO,
-		.channels_max = SST_STEREO,
-		.rates = SNDRV_PCM_RATE_48000,
-		.formats = SNDRV_PCM_FMTBIT_S24_LE,
-	},
-},
-{
-	.name = "Compress-cpu-dai",
-	.compress_dai = 1,
-	.playback = {
-		.channels_min = SST_STEREO,
-		.channels_max = SST_STEREO,
-		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
-	},
-},
-};
-
-static const struct snd_soc_component_driver sst_component = {
-	.name		= "sst",
-};
-
-/* helper functions */
-static inline void sst_set_stream_status(struct sst_runtime_stream *stream,
-					int state)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&stream->status_lock, flags);
-	stream->stream_status = state;
-	spin_unlock_irqrestore(&stream->status_lock, flags);
-}
-
-static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
-{
-	int state;
-	unsigned long flags;
-
-	spin_lock_irqsave(&stream->status_lock, flags);
-	state = stream->stream_status;
-	spin_unlock_irqrestore(&stream->status_lock, flags);
-	return state;
-}
-
-static void sst_fill_pcm_params(struct snd_pcm_substream *substream,
-				struct sst_pcm_params *param)
-{
-
-	param->codec = SST_CODEC_TYPE_PCM;
-	param->num_chan = (u8) substream->runtime->channels;
-	param->pcm_wd_sz = substream->runtime->sample_bits;
-	param->reserved = 0;
-	param->sfreq = substream->runtime->rate;
-	param->ring_buffer_size = snd_pcm_lib_buffer_bytes(substream);
-	param->period_count = substream->runtime->period_size;
-	param->ring_buffer_addr = virt_to_phys(substream->dma_buffer.area);
-	pr_debug("period_cnt = %d\n", param->period_count);
-	pr_debug("sfreq= %d, wd_sz = %d\n", param->sfreq, param->pcm_wd_sz);
-}
-
-static int sst_platform_alloc_stream(struct snd_pcm_substream *substream)
-{
-	struct sst_runtime_stream *stream =
-			substream->runtime->private_data;
-	struct sst_pcm_params param = {0};
-	struct sst_stream_params str_params = {0};
-	int ret_val;
-
-	/* set codec params and inform SST driver the same */
-	sst_fill_pcm_params(substream, &param);
-	substream->runtime->dma_area = substream->dma_buffer.area;
-	str_params.sparams = param;
-	str_params.codec =  param.codec;
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		str_params.ops = STREAM_OPS_PLAYBACK;
-		str_params.device_type = substream->pcm->device + 1;
-		pr_debug("Playbck stream,Device %d\n",
-					substream->pcm->device);
-	} else {
-		str_params.ops = STREAM_OPS_CAPTURE;
-		str_params.device_type = SND_SST_DEVICE_CAPTURE;
-		pr_debug("Capture stream,Device %d\n",
-					substream->pcm->device);
-	}
-	ret_val = stream->ops->open(&str_params);
-	pr_debug("SST_SND_PLAY/CAPTURE ret_val = %x\n", ret_val);
-	if (ret_val < 0)
-		return ret_val;
-
-	stream->stream_info.str_id = ret_val;
-	pr_debug("str id :  %d\n", stream->stream_info.str_id);
-	return ret_val;
-}
-
-static void sst_period_elapsed(void *mad_substream)
-{
-	struct snd_pcm_substream *substream = mad_substream;
-	struct sst_runtime_stream *stream;
-	int status;
-
-	if (!substream || !substream->runtime)
-		return;
-	stream = substream->runtime->private_data;
-	if (!stream)
-		return;
-	status = sst_get_stream_status(stream);
-	if (status != SST_PLATFORM_RUNNING)
-		return;
-	snd_pcm_period_elapsed(substream);
-}
-
-static int sst_platform_init_stream(struct snd_pcm_substream *substream)
-{
-	struct sst_runtime_stream *stream =
-			substream->runtime->private_data;
-	int ret_val;
-
-	pr_debug("setting buffer ptr param\n");
-	sst_set_stream_status(stream, SST_PLATFORM_INIT);
-	stream->stream_info.period_elapsed = sst_period_elapsed;
-	stream->stream_info.mad_substream = substream;
-	stream->stream_info.buffer_ptr = 0;
-	stream->stream_info.sfreq = substream->runtime->rate;
-	ret_val = stream->ops->device_control(
-			SST_SND_STREAM_INIT, &stream->stream_info);
-	if (ret_val)
-		pr_err("control_set ret error %d\n", ret_val);
-	return ret_val;
-
-}
-/* end -- helper functions */
-
-static int sst_platform_open(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct sst_runtime_stream *stream;
-	int ret_val;
-
-	pr_debug("sst_platform_open called\n");
-
-	snd_soc_set_runtime_hwparams(substream, &sst_platform_pcm_hw);
-	ret_val = snd_pcm_hw_constraint_integer(runtime,
-						SNDRV_PCM_HW_PARAM_PERIODS);
-	if (ret_val < 0)
-		return ret_val;
-
-	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
-	if (!stream)
-		return -ENOMEM;
-	spin_lock_init(&stream->status_lock);
-
-	/* get the sst ops */
-	mutex_lock(&sst_lock);
-	if (!sst) {
-		pr_err("no device available to run\n");
-		mutex_unlock(&sst_lock);
-		kfree(stream);
-		return -ENODEV;
-	}
-	if (!try_module_get(sst->dev->driver->owner)) {
-		mutex_unlock(&sst_lock);
-		kfree(stream);
-		return -ENODEV;
-	}
-	stream->ops = sst->ops;
-	mutex_unlock(&sst_lock);
-
-	stream->stream_info.str_id = 0;
-	sst_set_stream_status(stream, SST_PLATFORM_INIT);
-	stream->stream_info.mad_substream = substream;
-	/* allocate memory for SST API set */
-	runtime->private_data = stream;
-
-	return 0;
-}
-
-static int sst_platform_close(struct snd_pcm_substream *substream)
-{
-	struct sst_runtime_stream *stream;
-	int ret_val = 0, str_id;
-
-	pr_debug("sst_platform_close called\n");
-	stream = substream->runtime->private_data;
-	str_id = stream->stream_info.str_id;
-	if (str_id)
-		ret_val = stream->ops->close(str_id);
-	module_put(sst->dev->driver->owner);
-	kfree(stream);
-	return ret_val;
-}
-
-static int sst_platform_pcm_prepare(struct snd_pcm_substream *substream)
-{
-	struct sst_runtime_stream *stream;
-	int ret_val = 0, str_id;
-
-	pr_debug("sst_platform_pcm_prepare called\n");
-	stream = substream->runtime->private_data;
-	str_id = stream->stream_info.str_id;
-	if (stream->stream_info.str_id) {
-		ret_val = stream->ops->device_control(
-				SST_SND_DROP, &str_id);
-		return ret_val;
-	}
-
-	ret_val = sst_platform_alloc_stream(substream);
-	if (ret_val < 0)
-		return ret_val;
-	snprintf(substream->pcm->id, sizeof(substream->pcm->id),
-			"%d", stream->stream_info.str_id);
-
-	ret_val = sst_platform_init_stream(substream);
-	if (ret_val)
-		return ret_val;
-	substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
-	return ret_val;
-}
-
-static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
-					int cmd)
-{
-	int ret_val = 0, str_id;
-	struct sst_runtime_stream *stream;
-	int str_cmd, status;
-
-	pr_debug("sst_platform_pcm_trigger called\n");
-	stream = substream->runtime->private_data;
-	str_id = stream->stream_info.str_id;
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-		pr_debug("sst: Trigger Start\n");
-		str_cmd = SST_SND_START;
-		status = SST_PLATFORM_RUNNING;
-		stream->stream_info.mad_substream = substream;
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-		pr_debug("sst: in stop\n");
-		str_cmd = SST_SND_DROP;
-		status = SST_PLATFORM_DROPPED;
-		break;
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		pr_debug("sst: in pause\n");
-		str_cmd = SST_SND_PAUSE;
-		status = SST_PLATFORM_PAUSED;
-		break;
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		pr_debug("sst: in pause release\n");
-		str_cmd = SST_SND_RESUME;
-		status = SST_PLATFORM_RUNNING;
-		break;
-	default:
-		return -EINVAL;
-	}
-	ret_val = stream->ops->device_control(str_cmd, &str_id);
-	if (!ret_val)
-		sst_set_stream_status(stream, status);
-
-	return ret_val;
-}
-
-
-static snd_pcm_uframes_t sst_platform_pcm_pointer
-			(struct snd_pcm_substream *substream)
-{
-	struct sst_runtime_stream *stream;
-	int ret_val, status;
-	struct pcm_stream_info *str_info;
-
-	stream = substream->runtime->private_data;
-	status = sst_get_stream_status(stream);
-	if (status == SST_PLATFORM_INIT)
-		return 0;
-	str_info = &stream->stream_info;
-	ret_val = stream->ops->device_control(
-				SST_SND_BUFFER_POINTER, str_info);
-	if (ret_val) {
-		pr_err("sst: error code = %d\n", ret_val);
-		return ret_val;
-	}
-	return stream->stream_info.buffer_ptr;
-}
-
-static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
-	memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
-
-	return 0;
-}
-
-static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream)
-{
-	return snd_pcm_lib_free_pages(substream);
-}
-
-static struct snd_pcm_ops sst_platform_ops = {
-	.open = sst_platform_open,
-	.close = sst_platform_close,
-	.ioctl = snd_pcm_lib_ioctl,
-	.prepare = sst_platform_pcm_prepare,
-	.trigger = sst_platform_pcm_trigger,
-	.pointer = sst_platform_pcm_pointer,
-	.hw_params = sst_platform_pcm_hw_params,
-	.hw_free = sst_platform_pcm_hw_free,
-};
-
-static void sst_pcm_free(struct snd_pcm *pcm)
-{
-	pr_debug("sst_pcm_free called\n");
-	snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
-static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_pcm *pcm = rtd->pcm;
-	int retval = 0;
-
-	pr_debug("sst_pcm_new called\n");
-	if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
-			pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
-		retval =  snd_pcm_lib_preallocate_pages_for_all(pcm,
-			SNDRV_DMA_TYPE_CONTINUOUS,
-			snd_dma_continuous_data(GFP_KERNEL),
-			SST_MIN_BUFFER, SST_MAX_BUFFER);
-		if (retval) {
-			pr_err("dma buffer allocationf fail\n");
-			return retval;
-		}
-	}
-	return retval;
-}
-
-/* compress stream operations */
-static void sst_compr_fragment_elapsed(void *arg)
-{
-	struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg;
-
-	pr_debug("fragment elapsed by driver\n");
-	if (cstream)
-		snd_compr_fragment_elapsed(cstream);
-}
-
-static int sst_platform_compr_open(struct snd_compr_stream *cstream)
-{
-
-	int ret_val = 0;
-	struct snd_compr_runtime *runtime = cstream->runtime;
-	struct sst_runtime_stream *stream;
-
-	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
-	if (!stream)
-		return -ENOMEM;
-
-	spin_lock_init(&stream->status_lock);
-
-	/* get the sst ops */
-	if (!sst || !try_module_get(sst->dev->driver->owner)) {
-		pr_err("no device available to run\n");
-		ret_val = -ENODEV;
-		goto out_ops;
-	}
-	stream->compr_ops = sst->compr_ops;
-
-	stream->id = 0;
-	sst_set_stream_status(stream, SST_PLATFORM_INIT);
-	runtime->private_data = stream;
-	return 0;
-out_ops:
-	kfree(stream);
-	return ret_val;
-}
-
-static int sst_platform_compr_free(struct snd_compr_stream *cstream)
-{
-	struct sst_runtime_stream *stream;
-	int ret_val = 0, str_id;
-
-	stream = cstream->runtime->private_data;
-	/*need to check*/
-	str_id = stream->id;
-	if (str_id)
-		ret_val = stream->compr_ops->close(str_id);
-	module_put(sst->dev->driver->owner);
-	kfree(stream);
-	pr_debug("%s: %d\n", __func__, ret_val);
-	return 0;
-}
-
-static int sst_platform_compr_set_params(struct snd_compr_stream *cstream,
-					struct snd_compr_params *params)
-{
-	struct sst_runtime_stream *stream;
-	int retval;
-	struct snd_sst_params str_params;
-	struct sst_compress_cb cb;
-
-	stream = cstream->runtime->private_data;
-	/* construct fw structure for this*/
-	memset(&str_params, 0, sizeof(str_params));
-
-	str_params.ops = STREAM_OPS_PLAYBACK;
-	str_params.stream_type = SST_STREAM_TYPE_MUSIC;
-	str_params.device_type = SND_SST_DEVICE_COMPRESS;
-
-	switch (params->codec.id) {
-	case SND_AUDIOCODEC_MP3: {
-		str_params.codec = SST_CODEC_TYPE_MP3;
-		str_params.sparams.uc.mp3_params.codec = SST_CODEC_TYPE_MP3;
-		str_params.sparams.uc.mp3_params.num_chan = params->codec.ch_in;
-		str_params.sparams.uc.mp3_params.pcm_wd_sz = 16;
-		break;
-	}
-
-	case SND_AUDIOCODEC_AAC: {
-		str_params.codec = SST_CODEC_TYPE_AAC;
-		str_params.sparams.uc.aac_params.codec = SST_CODEC_TYPE_AAC;
-		str_params.sparams.uc.aac_params.num_chan = params->codec.ch_in;
-		str_params.sparams.uc.aac_params.pcm_wd_sz = 16;
-		if (params->codec.format == SND_AUDIOSTREAMFORMAT_MP4ADTS)
-			str_params.sparams.uc.aac_params.bs_format =
-							AAC_BIT_STREAM_ADTS;
-		else if (params->codec.format == SND_AUDIOSTREAMFORMAT_RAW)
-			str_params.sparams.uc.aac_params.bs_format =
-							AAC_BIT_STREAM_RAW;
-		else {
-			pr_err("Undefined format%d\n", params->codec.format);
-			return -EINVAL;
-		}
-		str_params.sparams.uc.aac_params.externalsr =
-						params->codec.sample_rate;
-		break;
-	}
-
-	default:
-		pr_err("codec not supported, id =%d\n", params->codec.id);
-		return -EINVAL;
-	}
-
-	str_params.aparams.ring_buf_info[0].addr  =
-					virt_to_phys(cstream->runtime->buffer);
-	str_params.aparams.ring_buf_info[0].size =
-					cstream->runtime->buffer_size;
-	str_params.aparams.sg_count = 1;
-	str_params.aparams.frag_size = cstream->runtime->fragment_size;
-
-	cb.param = cstream;
-	cb.compr_cb = sst_compr_fragment_elapsed;
-
-	retval = stream->compr_ops->open(&str_params, &cb);
-	if (retval < 0) {
-		pr_err("stream allocation failed %d\n", retval);
-		return retval;
-	}
-
-	stream->id = retval;
-	return 0;
-}
-
-static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
-{
-	struct sst_runtime_stream *stream =
-		cstream->runtime->private_data;
-
-	return stream->compr_ops->control(cmd, stream->id);
-}
-
-static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
-					struct snd_compr_tstamp *tstamp)
-{
-	struct sst_runtime_stream *stream;
-
-	stream  = cstream->runtime->private_data;
-	stream->compr_ops->tstamp(stream->id, tstamp);
-	tstamp->byte_offset = tstamp->copied_total %
-				 (u32)cstream->runtime->buffer_size;
-	pr_debug("calc bytes offset/copied bytes as %d\n", tstamp->byte_offset);
-	return 0;
-}
-
-static int sst_platform_compr_ack(struct snd_compr_stream *cstream,
-					size_t bytes)
-{
-	struct sst_runtime_stream *stream;
-
-	stream  = cstream->runtime->private_data;
-	stream->compr_ops->ack(stream->id, (unsigned long)bytes);
-	stream->bytes_written += bytes;
-
-	return 0;
-}
-
-static int sst_platform_compr_get_caps(struct snd_compr_stream *cstream,
-					struct snd_compr_caps *caps)
-{
-	struct sst_runtime_stream *stream =
-		cstream->runtime->private_data;
-
-	return stream->compr_ops->get_caps(caps);
-}
-
-static int sst_platform_compr_get_codec_caps(struct snd_compr_stream *cstream,
-					struct snd_compr_codec_caps *codec)
-{
-	struct sst_runtime_stream *stream =
-		cstream->runtime->private_data;
-
-	return stream->compr_ops->get_codec_caps(codec);
-}
-
-static int sst_platform_compr_set_metadata(struct snd_compr_stream *cstream,
-					struct snd_compr_metadata *metadata)
-{
-	struct sst_runtime_stream *stream  =
-		 cstream->runtime->private_data;
-
-	return stream->compr_ops->set_metadata(stream->id, metadata);
-}
-
-static struct snd_compr_ops sst_platform_compr_ops = {
-
-	.open = sst_platform_compr_open,
-	.free = sst_platform_compr_free,
-	.set_params = sst_platform_compr_set_params,
-	.set_metadata = sst_platform_compr_set_metadata,
-	.trigger = sst_platform_compr_trigger,
-	.pointer = sst_platform_compr_pointer,
-	.ack = sst_platform_compr_ack,
-	.get_caps = sst_platform_compr_get_caps,
-	.get_codec_caps = sst_platform_compr_get_codec_caps,
-};
-
-static struct snd_soc_platform_driver sst_soc_platform_drv = {
-	.ops		= &sst_platform_ops,
-	.compr_ops	= &sst_platform_compr_ops,
-	.pcm_new	= sst_pcm_new,
-	.pcm_free	= sst_pcm_free,
-};
-
-static int sst_platform_probe(struct platform_device *pdev)
-{
-	int ret;
-
-	pr_debug("sst_platform_probe called\n");
-	sst = NULL;
-	ret = snd_soc_register_platform(&pdev->dev, &sst_soc_platform_drv);
-	if (ret) {
-		pr_err("registering soc platform failed\n");
-		return ret;
-	}
-
-	ret = snd_soc_register_component(&pdev->dev, &sst_component,
-				sst_platform_dai, ARRAY_SIZE(sst_platform_dai));
-	if (ret) {
-		pr_err("registering cpu dais failed\n");
-		snd_soc_unregister_platform(&pdev->dev);
-	}
-	return ret;
-}
-
-static int sst_platform_remove(struct platform_device *pdev)
-{
-
-	snd_soc_unregister_component(&pdev->dev);
-	snd_soc_unregister_platform(&pdev->dev);
-	pr_debug("sst_platform_remove success\n");
-	return 0;
-}
-
-static struct platform_driver sst_platform_driver = {
-	.driver		= {
-		.name		= "sst-platform",
-		.owner		= THIS_MODULE,
-	},
-	.probe		= sst_platform_probe,
-	.remove		= sst_platform_remove,
-};
-
-module_platform_driver(sst_platform_driver);
-
-MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sst-platform");
diff --git a/sound/soc/mid-x86/sst_platform.h b/sound/soc/mid-x86/sst_platform.h
deleted file mode 100644
index cacc906..0000000
--- a/sound/soc/mid-x86/sst_platform.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- *  sst_platform.h - Intel MID Platform driver header file
- *
- *  Copyright (C) 2010 Intel Corp
- *  Author: Vinod Koul <vinod.koul@intel.com>
- *  Author: Harsha Priya <priya.harsha@intel.com>
- *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-
-#ifndef __SST_PLATFORMDRV_H__
-#define __SST_PLATFORMDRV_H__
-
-#include "sst_dsp.h"
-
-#define SST_MONO		1
-#define SST_STEREO		2
-#define SST_MAX_CAP		5
-
-#define SST_MIN_RATE		8000
-#define SST_MAX_RATE		48000
-#define SST_MIN_CHANNEL		1
-#define SST_MAX_CHANNEL		5
-#define SST_MAX_BUFFER		(800*1024)
-#define SST_MIN_BUFFER		(800*1024)
-#define SST_MIN_PERIOD_BYTES	32
-#define SST_MAX_PERIOD_BYTES	SST_MAX_BUFFER
-#define SST_MIN_PERIODS		2
-#define SST_MAX_PERIODS		(1024*2)
-#define SST_FIFO_SIZE		0
-
-struct pcm_stream_info {
-	int str_id;
-	void *mad_substream;
-	void (*period_elapsed) (void *mad_substream);
-	unsigned long long buffer_ptr;
-	int sfreq;
-};
-
-enum sst_drv_status {
-	SST_PLATFORM_INIT = 1,
-	SST_PLATFORM_STARTED,
-	SST_PLATFORM_RUNNING,
-	SST_PLATFORM_PAUSED,
-	SST_PLATFORM_DROPPED,
-};
-
-enum sst_controls {
-	SST_SND_ALLOC =			0x00,
-	SST_SND_PAUSE =			0x01,
-	SST_SND_RESUME =		0x02,
-	SST_SND_DROP =			0x03,
-	SST_SND_FREE =			0x04,
-	SST_SND_BUFFER_POINTER =	0x05,
-	SST_SND_STREAM_INIT =		0x06,
-	SST_SND_START	 =		0x07,
-	SST_MAX_CONTROLS =		0x07,
-};
-
-enum sst_stream_ops {
-	STREAM_OPS_PLAYBACK = 0,
-	STREAM_OPS_CAPTURE,
-};
-
-enum sst_audio_device_type {
-	SND_SST_DEVICE_HEADSET = 1,
-	SND_SST_DEVICE_IHF,
-	SND_SST_DEVICE_VIBRA,
-	SND_SST_DEVICE_HAPTIC,
-	SND_SST_DEVICE_CAPTURE,
-	SND_SST_DEVICE_COMPRESS,
-};
-
-/* PCM Parameters */
-struct sst_pcm_params {
-	u16 codec;	/* codec type */
-	u8 num_chan;	/* 1=Mono, 2=Stereo */
-	u8 pcm_wd_sz;	/* 16/24 - bit*/
-	u32 reserved;	/* Bitrate in bits per second */
-	u32 sfreq;	/* Sampling rate in Hz */
-	u32 ring_buffer_size;
-	u32 period_count;	/* period elapsed in samples*/
-	u32 ring_buffer_addr;
-};
-
-struct sst_stream_params {
-	u32 result;
-	u32 stream_id;
-	u8 codec;
-	u8 ops;
-	u8 stream_type;
-	u8 device_type;
-	struct sst_pcm_params sparams;
-};
-
-struct sst_compress_cb {
-	void *param;
-	void (*compr_cb)(void *param);
-};
-
-struct compress_sst_ops {
-	const char *name;
-	int (*open) (struct snd_sst_params *str_params,
-			struct sst_compress_cb *cb);
-	int (*control) (unsigned int cmd, unsigned int str_id);
-	int (*tstamp) (unsigned int str_id, struct snd_compr_tstamp *tstamp);
-	int (*ack) (unsigned int str_id, unsigned long bytes);
-	int (*close) (unsigned int str_id);
-	int (*get_caps) (struct snd_compr_caps *caps);
-	int (*get_codec_caps) (struct snd_compr_codec_caps *codec);
-	int (*set_metadata) (unsigned int str_id,
-			struct snd_compr_metadata *mdata);
-
-};
-
-struct sst_ops {
-	int (*open) (struct sst_stream_params *str_param);
-	int (*device_control) (int cmd, void *arg);
-	int (*close) (unsigned int str_id);
-};
-
-struct sst_runtime_stream {
-	int     stream_status;
-	unsigned int id;
-	size_t bytes_written;
-	struct pcm_stream_info stream_info;
-	struct sst_ops *ops;
-	struct compress_sst_ops *compr_ops;
-	spinlock_t	status_lock;
-};
-
-struct sst_device {
-	char *name;
-	struct device *dev;
-	struct sst_ops *ops;
-	struct compress_sst_ops *compr_ops;
-};
-
-int sst_register_dsp(struct sst_device *sst);
-int sst_unregister_dsp(struct sst_device *sst);
-#endif
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 2340554..e68d5ce 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -24,6 +24,7 @@
 #include <sound/compress_driver.h>
 #include <sound/soc.h>
 #include <sound/initval.h>
+#include <sound/soc-dpcm.h>
 
 static int soc_compr_open(struct snd_compr_stream *cstream)
 {
@@ -75,6 +76,98 @@
 	return ret;
 }
 
+static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+{
+	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+	struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
+	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+	struct snd_soc_dai *codec_dai = fe->codec_dai;
+	struct snd_soc_dpcm *dpcm;
+	struct snd_soc_dapm_widget_list *list;
+	int stream;
+	int ret = 0;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		stream = SNDRV_PCM_STREAM_PLAYBACK;
+	else
+		stream = SNDRV_PCM_STREAM_CAPTURE;
+
+	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+	if (platform->driver->compr_ops && platform->driver->compr_ops->open) {
+		ret = platform->driver->compr_ops->open(cstream);
+		if (ret < 0) {
+			pr_err("compress asoc: can't open platform %s\n", platform->name);
+			goto out;
+		}
+	}
+
+	if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->startup) {
+		ret = fe->dai_link->compr_ops->startup(cstream);
+		if (ret < 0) {
+			pr_err("compress asoc: %s startup failed\n", fe->dai_link->name);
+			goto machine_err;
+		}
+	}
+
+	fe->dpcm[stream].runtime = fe_substream->runtime;
+
+	if (dpcm_path_get(fe, stream, &list) <= 0) {
+		dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
+			fe->dai_link->name, stream ? "capture" : "playback");
+	}
+
+	/* calculate valid and active FE <-> BE dpcms */
+	dpcm_process_paths(fe, stream, &list, 1);
+
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+	ret = dpcm_be_dai_startup(fe, stream);
+	if (ret < 0) {
+		/* clean up all links */
+		list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+			dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
+
+		dpcm_be_disconnect(fe, stream);
+		fe->dpcm[stream].runtime = NULL;
+		goto fe_err;
+	}
+
+	dpcm_clear_pending_state(fe, stream);
+	dpcm_path_put(&list);
+
+	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		cpu_dai->playback_active++;
+		codec_dai->playback_active++;
+	} else {
+		cpu_dai->capture_active++;
+		codec_dai->capture_active++;
+	}
+
+	cpu_dai->active++;
+	codec_dai->active++;
+	fe->codec->active++;
+
+	mutex_unlock(&fe->card->mutex);
+
+	return 0;
+
+fe_err:
+	if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
+		fe->dai_link->compr_ops->shutdown(cstream);
+machine_err:
+	if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+		platform->driver->compr_ops->free(cstream);
+out:
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+	mutex_unlock(&fe->card->mutex);
+	return ret;
+}
+
 /*
  * Power down the audio subsystem pmdown_time msecs after close is called.
  * This is to ensure there are no pops or clicks in between any music tracks
@@ -88,18 +181,17 @@
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
-	dev_dbg(rtd->dev, "ASoC: pop wq checking: %s status: %s waiting: %s\n",
-		 codec_dai->driver->playback.stream_name,
-		 codec_dai->playback_active ? "active" : "inactive",
-		 rtd->pop_wait ? "yes" : "no");
+	dev_dbg (rtd->dev, "ASoC: pop wq checking: %s status: %s waiting: %s\n",
+			codec_dai->driver->playback.stream_name,
+			codec_dai->playback_active ? "active" : "inactive",
+			rtd->pop_wait ? "yes" : "no");
 
 	/* are we waiting on this codec DAI stream */
-	if (rtd->pop_wait == 1) {
+	if (rtd->pop_wait == 1 && !codec_dai->active) {
 		rtd->pop_wait = 0;
 		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
 					  SND_SOC_DAPM_STREAM_STOP);
 	}
-
 	mutex_unlock(&rtd->pcm_mutex);
 }
 
@@ -120,8 +212,8 @@
 		cpu_dai->capture_active--;
 		codec_dai->capture_active--;
 	}
-
-	snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
+	if (!codec_dai->playback_active)
+		snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
 
 	cpu_dai->active--;
 	codec_dai->active--;
@@ -133,7 +225,6 @@
 	if (!codec_dai->active)
 		codec_dai->rate = 0;
 
-
 	if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->shutdown)
 		rtd->dai_link->compr_ops->shutdown(cstream);
 
@@ -141,7 +232,8 @@
 		platform->driver->compr_ops->free(cstream);
 	cpu_dai->runtime = NULL;
 
-	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+	if (cstream->direction == SND_COMPRESS_PLAYBACK
+				&& !codec_dai->playback_active) {
 		if (!rtd->pmdown_time || codec->ignore_pmdown_time ||
 		    rtd->dai_link->ignore_pmdown_time) {
 			snd_soc_dapm_stream_event(rtd,
@@ -152,7 +244,8 @@
 			schedule_delayed_work(&rtd->delayed_work,
 				msecs_to_jiffies(rtd->pmdown_time));
 		}
-	} else {
+	} else if (cstream->direction == SND_COMPRESS_CAPTURE
+					&& !codec_dai->capture_active) {
 		/* capture streams can be powered down now */
 		snd_soc_dapm_stream_event(rtd,
 			SNDRV_PCM_STREAM_CAPTURE,
@@ -163,33 +256,126 @@
 	return 0;
 }
 
+static int soc_compr_free_fe(struct snd_compr_stream *cstream)
+{
+	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+	struct snd_soc_dai *codec_dai = fe->codec_dai;
+	struct snd_soc_dpcm *dpcm;
+	int stream, ret;
+
+	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		stream = SNDRV_PCM_STREAM_PLAYBACK;
+		cpu_dai->playback_active--;
+		codec_dai->playback_active--;
+	} else {
+		stream = SNDRV_PCM_STREAM_CAPTURE;
+		cpu_dai->capture_active--;
+		codec_dai->capture_active--;
+	}
+
+	cpu_dai->active--;
+	codec_dai->active--;
+	fe->codec->active--;
+
+	snd_soc_dai_digital_mute(cpu_dai, 1, stream);
+
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+	ret = dpcm_be_dai_hw_free(fe, stream);
+	if (ret < 0)
+		dev_err(fe->dev, "compressed hw_free failed %d\n", ret);
+
+	ret = dpcm_be_dai_shutdown(fe, stream);
+
+	/* mark FE's links ready to prune */
+	list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+		dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
+	else
+		dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
+
+	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+
+	dpcm_be_disconnect(fe, stream);
+
+	fe->dpcm[stream].runtime = NULL;
+
+	if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
+		fe->dai_link->compr_ops->shutdown(cstream);
+
+	if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+		platform->driver->compr_ops->free(cstream);
+
+	mutex_unlock(&fe->card->mutex);
+	return 0;
+}
+
 static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
 {
 
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
 	int ret = 0;
 
-	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
-
 	if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
 		ret = platform->driver->compr_ops->trigger(cstream, cmd);
 		if (ret < 0)
 			goto out;
 	}
 
+out:
+	return ret;
+}
+
+static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
+{
+	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+	struct snd_soc_platform *platform = fe->platform;
+	int ret = 0, stream;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		stream = SNDRV_PCM_STREAM_PLAYBACK;
+	else
+		stream = SNDRV_PCM_STREAM_CAPTURE;
+
+
+	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+	if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
+		ret = platform->driver->compr_ops->trigger(cstream, cmd);
+		if (ret < 0)
+			goto out;
+	}
+
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+	ret = dpcm_be_dai_trigger(fe, stream, cmd);
+
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
-		snd_soc_dai_digital_mute(codec_dai, 0, cstream->direction);
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
-		snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
+		break;
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
 		break;
 	}
 
 out:
-	mutex_unlock(&rtd->pcm_mutex);
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+	mutex_unlock(&fe->card->mutex);
 	return ret;
 }
 
@@ -220,26 +406,91 @@
 			goto err;
 	}
 
-	if (cstream->direction == SND_COMPRESS_PLAYBACK)
-		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
-					SND_SOC_DAPM_STREAM_START);
-	else
-		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
-					SND_SOC_DAPM_STREAM_START);
-
 	/* cancel any delayed stream shutdown that is pending */
-	rtd->pop_wait = 0;
-	mutex_unlock(&rtd->pcm_mutex);
+	if (cstream->direction == SND_COMPRESS_PLAYBACK
+				 && rtd->pop_wait) {
+		rtd->pop_wait = 0;
+		cancel_delayed_work(&rtd->delayed_work);
+	}
 
-	cancel_delayed_work_sync(&rtd->delayed_work);
+	snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
+					SND_SOC_DAPM_STREAM_START);
 
-	return ret;
-
+	snd_soc_dai_digital_mute(rtd->codec_dai, 0, cstream->direction);
 err:
 	mutex_unlock(&rtd->pcm_mutex);
 	return ret;
 }
 
+static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
+					struct snd_compr_params *params)
+{
+	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+	struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
+	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+	struct snd_soc_dai *codec_dai =  fe->codec_dai;
+
+	struct snd_pcm_hw_params *hw_params;
+	int ret = 0, stream;
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		stream = SNDRV_PCM_STREAM_PLAYBACK;
+	else
+		stream = SNDRV_PCM_STREAM_CAPTURE;
+
+	hw_params = kzalloc(sizeof(*hw_params), GFP_KERNEL);
+	if (hw_params == NULL)
+		return -ENOMEM;
+
+	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+
+	/* first we call set_params for the platform driver
+	 * this should configure the soc side
+	 * if the machine has compressed ops then we call that as well
+	 * expectation is that platform and machine will configure everything
+	 * for this compress path, like configuring pcm port for codec
+	 */
+	if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
+		ret = platform->driver->compr_ops->set_params(cstream, params);
+		if (ret < 0)
+			goto out;
+	}
+
+	if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->set_params) {
+		ret = fe->dai_link->compr_ops->set_params(cstream);
+		if (ret < 0)
+			goto out;
+	}
+
+	memcpy(&fe->dpcm[fe_substream->stream].hw_params, params,
+			sizeof(struct snd_pcm_hw_params));
+
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+
+	ret = dpcm_be_dai_hw_params(fe, stream);
+	if (ret < 0)
+		goto out;
+
+	ret = dpcm_be_dai_prepare(fe, stream);
+	if (ret < 0)
+		goto out;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
+	else
+		dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
+
+	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+
+	snd_soc_dai_digital_mute(cpu_dai, 0, stream);
+
+out:
+	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+	mutex_unlock(&fe->card->mutex);
+	return ret;
+}
+
 static int soc_compr_get_params(struct snd_compr_stream *cstream,
 					struct snd_codec *params)
 {
@@ -359,6 +610,7 @@
 
 	return ret;
 }
+
 /* ASoC Compress operations */
 static struct snd_compr_ops soc_compr_ops = {
 	.open		= soc_compr_open,
@@ -374,6 +626,21 @@
 	.get_codec_caps = soc_compr_get_codec_caps
 };
 
+/* ASoC Dynamic Compress operations */
+static struct snd_compr_ops soc_compr_dyn_ops = {
+	.open		= soc_compr_open_fe,
+	.free		= soc_compr_free_fe,
+	.set_params	= soc_compr_set_params_fe,
+	.get_params	= soc_compr_get_params,
+	.set_metadata   = sst_compr_set_metadata,
+	.get_metadata	= sst_compr_get_metadata,
+	.trigger	= soc_compr_trigger_fe,
+	.pointer	= soc_compr_pointer,
+	.ack		= soc_compr_ack,
+	.get_caps	= soc_compr_get_caps,
+	.get_codec_caps = soc_compr_get_codec_caps
+};
+
 /* create a new compress */
 int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
 {
@@ -382,6 +649,7 @@
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	struct snd_compr *compr;
+	struct snd_pcm *be_pcm;
 	char new_name[64];
 	int ret = 0, direction = 0;
 	int playback = 0, capture = 0;
@@ -426,7 +694,26 @@
 		ret = -ENOMEM;
 		goto compr_err;
 	}
-	memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
+
+	if (rtd->dai_link->dynamic) {
+		snprintf(new_name, sizeof(new_name), "(%s)",
+			rtd->dai_link->stream_name);
+
+		ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
+				1, 0, &be_pcm);
+		if (ret < 0) {
+			dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
+				rtd->dai_link->name);
+			goto compr_err;
+		}
+
+		rtd->pcm = be_pcm;
+		rtd->fe_compr = 1;
+		be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+		/*be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;*/
+		memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
+	} else
+		memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
 
 	/* Add copy callback for not memory mapped DSPs */
 	if (platform->driver->compr_ops && platform->driver->compr_ops->copy)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index d56bbea..717e6e9 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -530,6 +530,15 @@
 }
 #endif
 
+static void codec2codec_close_delayed_work(struct work_struct *work)
+{
+	/* Currently nothing to do for c2c links
+	 * Since c2c links are internal nodes in the DAPM graph and
+	 * don't interface with the outside world or application layer
+	 * we don't have to do any special handling on close.
+	 */
+}
+
 #ifdef CONFIG_PM_SLEEP
 /* powers down audio subsystem for suspend */
 int snd_soc_suspend(struct device *dev)
@@ -1163,6 +1172,11 @@
 		if (dai->dev != platform->dev)
 			continue;
 
+		/* dummy platform doesn't have and DAIs, don't add dummy-codec
+		 * widgets here (since dev is the same)
+		 */
+		if (!strcmp(dai->name, "snd-soc-dummy-dai"))
+			continue;
 		snd_soc_dapm_new_dai_widgets(&platform->dapm, dai);
 	}
 
@@ -1224,9 +1238,6 @@
 	}
 	rtd->card = card;
 
-	/* Make sure all DAPM widgets are instantiated */
-	snd_soc_dapm_new_widgets(&codec->dapm);
-
 	/* machine controls, routes and widgets are not prefixed */
 	temp = codec->name_prefix;
 	codec->name_prefix = NULL;
@@ -1362,7 +1373,6 @@
 				return -ENODEV;
 
 			list_add(&cpu_dai->dapm.list, &card->dapm_list);
-			snd_soc_dapm_new_dai_widgets(&cpu_dai->dapm, cpu_dai);
 		}
 
 		if (cpu_dai->driver->probe) {
@@ -1429,6 +1439,9 @@
 				return ret;
 			}
 		} else {
+			INIT_DELAYED_WORK(&rtd->delayed_work,
+						codec2codec_close_delayed_work);
+
 			/* link the DAI widgets */
 			play_w = codec_dai->playback_widget;
 			capture_w = cpu_dai->capture_widget;
@@ -1717,8 +1730,6 @@
 		snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
 					card->num_dapm_routes);
 
-	snd_soc_dapm_new_widgets(&card->dapm);
-
 	for (i = 0; i < card->num_links; i++) {
 		dai_link = &card->dai_link[i];
 		dai_fmt = dai_link->dai_fmt;
@@ -3174,6 +3185,19 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_bytes_put);
 
+int snd_soc_info_bytes_ext(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+
+	ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	ucontrol->count = params->max;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_bytes_ext);
+
 /**
  * snd_soc_info_xr_sx - signed multi register info callback
  * @kcontrol: mreg control
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c2ecb4e..98055eb 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -172,6 +172,17 @@
 	return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
 }
 
+/**
+ * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
+ * @kcontrol: The kcontrol
+ */
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol)
+{
+	struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
+	return wlist->widgets[0]->codec;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_codec);
+
 /* get snd_card from DAPM context */
 static inline struct snd_card *dapm_get_snd_card(
 	struct snd_soc_dapm_context *dapm)
@@ -3440,7 +3451,7 @@
 				break;
 			}
 
-			if (!w->sname)
+			if (!w->sname || !strstr(w->sname, dai_w->name))
 				continue;
 
 			if (dai->driver->playback.stream_name &&
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 0bb5ccc..9fd4ed8 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -65,6 +65,7 @@
 	struct snd_soc_codec *codec;
 	struct snd_soc_dapm_context *dapm;
 	struct snd_soc_jack_pin *pin;
+	unsigned int sync = 0;
 	int enable;
 
 	trace_snd_soc_jack_report(jack, mask, status);
@@ -92,12 +93,16 @@
 			snd_soc_dapm_enable_pin(dapm, pin->pin);
 		else
 			snd_soc_dapm_disable_pin(dapm, pin->pin);
+
+		/* we need to sync for this case only */
+		sync = 1;
 	}
 
 	/* Report before the DAPM sync to help users updating micbias status */
 	blocking_notifier_call_chain(&jack->notifier, jack->status, jack);
 
-	snd_soc_dapm_sync(dapm);
+	if (sync)
+		snd_soc_dapm_sync(dapm);
 
 	snd_jack_report(jack->jack, jack->status);
 
@@ -318,10 +323,13 @@
 		INIT_DELAYED_WORK(&gpios[i].work, gpio_work);
 		gpios[i].jack = jack;
 
+		if (!gpios[i].irq_flags)
+			gpios[i].irq_flags =
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
 		ret = request_any_context_irq(gpio_to_irq(gpios[i].gpio),
 					      gpio_handler,
-					      IRQF_TRIGGER_RISING |
-					      IRQF_TRIGGER_FALLING,
+					      gpios[i].irq_flags,
 					      gpios[i].name,
 					      &gpios[i]);
 		if (ret < 0)
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index c6d408c..4b32ddd 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -34,7 +34,7 @@
 #define DPCM_MAX_BE_USERS	8
 
 /* DPCM stream event, send event to FE and all active BEs. */
-static int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
+int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
 	int event)
 {
 	struct snd_soc_dpcm *dpcm;
@@ -337,7 +337,7 @@
 		 rtd->pop_wait ? "yes" : "no");
 
 	/* are we waiting on this codec DAI stream */
-	if (rtd->pop_wait == 1) {
+	if (rtd->pop_wait == 1 && !codec_dai->active) {
 		rtd->pop_wait = 0;
 		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
 					  SND_SOC_DAPM_STREAM_STOP);
@@ -383,7 +383,10 @@
 	/* Muting the DAC suppresses artifacts caused during digital
 	 * shutdown, for example from stopping clocks.
 	 */
-	snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
+	if (!codec_dai->playback_active)
+		snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
+
+	snd_soc_dai_digital_mute(cpu_dai, 1, substream->stream);
 
 	if (cpu_dai->driver->ops->shutdown)
 		cpu_dai->driver->ops->shutdown(substream, cpu_dai);
@@ -398,7 +401,8 @@
 		platform->driver->ops->close(substream);
 	cpu_dai->runtime = NULL;
 
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
+			&& !codec_dai->playback_active) {
 		if (!rtd->pmdown_time || codec->ignore_pmdown_time ||
 		    rtd->dai_link->ignore_pmdown_time) {
 			/* powered down playback stream now */
@@ -411,14 +415,14 @@
 			schedule_delayed_work(&rtd->delayed_work,
 				msecs_to_jiffies(rtd->pmdown_time));
 		}
-	} else {
+	} else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE
+			&& !codec_dai->capture_active) {
 		/* capture streams can be powered down now */
 		snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
 					  SND_SOC_DAPM_STREAM_STOP);
 	}
 
 	mutex_unlock(&rtd->pcm_mutex);
-
 	pm_runtime_put(platform->dev);
 	pm_runtime_put(codec_dai->dev);
 	pm_runtime_put(cpu_dai->dev);
@@ -488,6 +492,7 @@
 			SND_SOC_DAPM_STREAM_START);
 
 	snd_soc_dai_digital_mute(codec_dai, 0, substream->stream);
+	snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
 
 out:
 	mutex_unlock(&rtd->pcm_mutex);
@@ -757,7 +762,7 @@
 }
 
 /* disconnect a BE and FE */
-static void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
+void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm, *d;
 
@@ -853,7 +858,7 @@
 	return 0;
 }
 
-static int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
+int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
 	int stream, struct snd_soc_dapm_widget_list **list_)
 {
 	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
@@ -875,11 +880,6 @@
 	return paths;
 }
 
-static inline void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
-{
-	kfree(*list);
-}
-
 static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
 	struct snd_soc_dapm_widget_list **list_)
 {
@@ -949,7 +949,7 @@
 			continue;
 
 		/* don't connect if FE is not running */
-		if (!fe->dpcm[stream].runtime)
+		if (!fe->dpcm[stream].runtime && !fe->fe_compr)
 			continue;
 
 		/* newly connected FE and BE */
@@ -974,7 +974,7 @@
  * Find the corresponding BE DAIs that source or sink audio to this
  * FE substream.
  */
-static int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
+int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
 	int stream, struct snd_soc_dapm_widget_list **list, int new)
 {
 	if (new)
@@ -983,7 +983,7 @@
 		return dpcm_prune_paths(fe, stream, list);
 }
 
-static void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
+void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 
@@ -1021,7 +1021,7 @@
 	}
 }
 
-static int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 	int err, count = 0;
@@ -1149,7 +1149,6 @@
 	}
 
 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
-
 	dpcm_set_fe_runtime(fe_substream);
 	snd_pcm_limit_hw_rates(runtime);
 
@@ -1163,7 +1162,7 @@
 	return ret;
 }
 
-static int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 
@@ -1224,7 +1223,7 @@
 	return 0;
 }
 
-static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 
@@ -1290,7 +1289,7 @@
 	return 0;
 }
 
-static int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 	int ret;
@@ -1396,7 +1395,7 @@
 	if (ret < 0) {
 		dev_err(fe->dev,"ASoC: hw_params FE failed %d\n", ret);
 		dpcm_be_dai_hw_free(fe, stream);
-	 } else
+	} else
 		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
 
 out:
@@ -1420,7 +1419,7 @@
 	return ret;
 }
 
-static int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
+int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
 			       int cmd)
 {
 	struct snd_soc_dpcm *dpcm;
@@ -1512,7 +1511,6 @@
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
 
 static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
 {
@@ -1588,7 +1586,7 @@
 	return ret;
 }
 
-static int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
+int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
 {
 	struct snd_soc_dpcm *dpcm;
 	int ret = 0;
@@ -2013,17 +2011,36 @@
 	int ret = 0, playback = 0, capture = 0;
 
 	if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm) {
-		if (cpu_dai->driver->playback.channels_min)
-			playback = 1;
-		if (cpu_dai->driver->capture.channels_min)
-			capture = 1;
+		if (cpu_dai->driver->playback.channels_min) {
+			if (rtd->dai_link->playback_count)
+				playback = rtd->dai_link->playback_count;
+			else
+				playback = 1;
+		}
+
+		if (cpu_dai->driver->capture.channels_min) {
+			if (rtd->dai_link->capture_count)
+				capture = rtd->dai_link->capture_count;
+			else
+				capture = 1;
+		}
 	} else {
 		if (codec_dai->driver->playback.channels_min &&
-		    cpu_dai->driver->playback.channels_min)
-			playback = 1;
+		    cpu_dai->driver->playback.channels_min) {
+			if (rtd->dai_link->playback_count)
+				playback = rtd->dai_link->playback_count;
+			else
+				playback = 1;
+		}
+
 		if (codec_dai->driver->capture.channels_min &&
-		    cpu_dai->driver->capture.channels_min)
-			capture = 1;
+		    cpu_dai->driver->capture.channels_min) {
+			if (rtd->dai_link->capture_count)
+				capture = rtd->dai_link->capture_count;
+			else
+				capture = 1;
+		}
+
 	}
 
 	/* create the PCM */
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 4b3be6c..8ecf448 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -75,7 +75,11 @@
 
 static int dummy_dma_open(struct snd_pcm_substream *substream)
 {
-	snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+	/* BE's dont need dummy params */
+	if (!rtd->dai_link->no_pcm)
+		snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
 
 	return 0;
 }
@@ -89,7 +93,28 @@
 	.ops = &dummy_dma_ops,
 };
 
-static struct snd_soc_codec_driver dummy_codec;
+static struct snd_soc_dapm_widget dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("Dummy Input"),
+	SND_SOC_DAPM_OUTPUT("Dummy Output"),
+};
+
+static struct snd_soc_dapm_route intercon[] = {
+	{ "Dummy Output", NULL, "Dummy Playback"},
+	{ "Dummy Capture", NULL, "Dummy Input"},
+};
+
+static int dummy_codec_probe(struct snd_soc_codec *codec)
+{
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	snd_soc_dapm_new_controls(dapm, dapm_widgets,
+			ARRAY_SIZE(dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+	return 0;
+}
+
+static struct snd_soc_codec_driver dummy_codec = {
+	.probe = dummy_codec_probe,
+};
 
 #define STUB_RATES	SNDRV_PCM_RATE_8000_192000
 #define STUB_FORMATS	(SNDRV_PCM_FMTBIT_S8 | \
@@ -101,17 +126,18 @@
 			SNDRV_PCM_FMTBIT_S32_LE | \
 			SNDRV_PCM_FMTBIT_U32_LE | \
 			SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+
 static struct snd_soc_dai_driver dummy_dai = {
 	.name = "snd-soc-dummy-dai",
 	.playback = {
-		.stream_name	= "Playback",
+		.stream_name	= "Dummy Playback",
 		.channels_min	= 1,
 		.channels_max	= 384,
 		.rates		= STUB_RATES,
 		.formats	= STUB_FORMATS,
 	},
 	.capture = {
-		.stream_name	= "Capture",
+		.stream_name	= "Dummy Capture",
 		.channels_min	= 1,
 		.channels_max	= 384,
 		.rates = STUB_RATES,
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b0f164b..ecdd885 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -691,9 +691,10 @@
 	ifneq ($(call try-cc,$(SOURCE_SLANG),$(FLAGS_SLANG),libslang),y)
 		msg := $(warning slang not found, disables TUI support. Please install slang-devel or libslang-dev);
 	else
-		# Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
-		BASIC_CFLAGS += -I/usr/include/slang
+		# Some releases like Fedora has /usr/include/slang/slang.h other than /usr/include/slang.h
+		SLANG_INC ?= -idirafter =/usr/include/slang
 		BASIC_CFLAGS += -DSLANG_SUPPORT
+		BASIC_CFLAGS += $(SLANG_INC)
 		EXTLIBS += -lslang
 		LIB_OBJS += $(OUTPUT)ui/browser.o
 		LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
@@ -742,6 +743,7 @@
 	FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
 
 	ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED),perl),y)
+		msg := $(warning libperl not found, disables Perl scripting support. Please install libperl-dev or perl-devel);
 		BASIC_CFLAGS += -DNO_LIBPERL
 	else
                ALL_LDFLAGS += $(PERL_EMBED_LDFLAGS)
@@ -780,10 +782,10 @@
 
       PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
 
-      PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
+      PYTHON_EMBED_LDOPTS := $(shell pkg-config --libs python 2>/dev/null)
       PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
       PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
-      PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
+      PYTHON_EMBED_CCOPTS := $(shell pkg-config --cflags python 2>/dev/null)
       FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
 
       ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED),python),y)
@@ -1194,7 +1196,7 @@
 install: install-bin try-install-man
 
 install-python_ext:
-	$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
+	$(PYTHON_WORD) util/setup.py --quiet install --prefix='$(DESTDIR_SQ)/usr'
 
 # 'make install-doc' should call 'make -C Documentation install'
 $(INSTALL_DOC_TARGETS):
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 30d1c32..f5c0834 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -21,6 +21,7 @@
 #include <string.h>
 #include <unistd.h>
 #include <pthread.h>
+#include <inttypes.h>
 #include <sys/mman.h>
 #include <sys/time.h>
 #include <sys/wait.h>
@@ -1110,7 +1111,7 @@
 		/* Check whether our max runtime timed out: */
 		if (g->p.nr_secs) {
 			timersub(&stop, &start0, &diff);
-			if (diff.tv_sec >= g->p.nr_secs) {
+			if (diff.tv_sec >= (long int)g->p.nr_secs) {
 				g->stop_work = true;
 				break;
 			}
@@ -1157,7 +1158,7 @@
 			runtime_ns_max += diff.tv_usec * 1000;
 
 			if (details >= 0) {
-				printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016lx]\n",
+				printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIu64"x]\n",
 					process_nr, thread_nr, runtime_ns_max / bytes_done, val);
 			}
 			fflush(stdout);
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 2da2a6c..2eddd71 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1663,28 +1663,29 @@
 	return cmd_record(i, rec_argv, NULL);
 }
 
+static const char default_sort_order[] = "avg, max, switch, runtime";
+static struct perf_sched sched = {
+	.tool = {
+		.sample		 = perf_sched__process_tracepoint_sample,
+		.comm		 = perf_event__process_comm,
+		.lost		 = perf_event__process_lost,
+		.fork		 = perf_event__process_fork,
+		.ordered_samples = true,
+	},
+	.cmp_pid	      = LIST_HEAD_INIT(sched.cmp_pid),
+	.sort_list	      = LIST_HEAD_INIT(sched.sort_list),
+	.start_work_mutex     = PTHREAD_MUTEX_INITIALIZER,
+	.work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
+	.curr_pid	      = { [0 ... MAX_CPUS - 1] = -1 },
+	.sort_order	      = default_sort_order,
+	.replay_repeat	      = 10,
+	.profile_cpu	      = -1,
+	.next_shortname1      = 'A',
+	.next_shortname2      = '0',
+};
+
 int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
 {
-	const char default_sort_order[] = "avg, max, switch, runtime";
-	struct perf_sched sched = {
-		.tool = {
-			.sample		 = perf_sched__process_tracepoint_sample,
-			.comm		 = perf_event__process_comm,
-			.lost		 = perf_event__process_lost,
-			.fork		 = perf_event__process_fork,
-			.ordered_samples = true,
-		},
-		.cmp_pid	      = LIST_HEAD_INIT(sched.cmp_pid),
-		.sort_list	      = LIST_HEAD_INIT(sched.sort_list),
-		.start_work_mutex     = PTHREAD_MUTEX_INITIALIZER,
-		.work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
-		.curr_pid	      = { [0 ... MAX_CPUS - 1] = -1 },
-		.sort_order	      = default_sort_order,
-		.replay_repeat	      = 10,
-		.profile_cpu	      = -1,
-		.next_shortname1      = 'A',
-		.next_shortname2      = '0',
-	};
 	const struct option latency_options[] = {
 	OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
 		   "sort by key(s): runtime, switch, avg, max"),
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 32bd102..6128be7 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -3,6 +3,8 @@
 
 #include <asm/unistd.h>
 
+#include <stdbool.h>
+
 #if defined(__i386__)
 #define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 #define cpu_relax()	asm volatile("rep; nop" ::: "memory");
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
index 315067b..57d3aa3 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -19,6 +19,10 @@
  *
  */
 
+#ifdef __mips__
+#include <sgidefs.h>
+#endif
+
 #include <Python.h>
 #include "../../../perf.h"
 #include "../../../util/trace-event.h"
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 00218f5..f045c2c 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -71,7 +71,7 @@
 	char path[PATH_MAX];
 
 	snprintf(path, PATH_MAX, "%s/event-%d-%llu-%d", dir,
-		 attr->type, attr->config, fd);
+		 attr->type, (unsigned long long)attr->config, fd);
 
 	file = fopen(path, "w+");
 	if (!file) {
@@ -80,7 +80,7 @@
 	}
 
 	if (fprintf(file, "[event-%d-%llu-%d]\n",
-		    attr->type, attr->config, fd) < 0) {
+		    attr->type, (unsigned long long)attr->config, fd) < 0) {
 		perror("test attr - failed to write event file");
 		fclose(file);
 		return -1;
@@ -96,10 +96,10 @@
 	/* struct perf_event_attr */
 	WRITE_ASS(type,   PRIu32);
 	WRITE_ASS(size,   PRIu32);
-	WRITE_ASS(config,  "llu");
-	WRITE_ASS(sample_period, "llu");
-	WRITE_ASS(sample_type,   "llu");
-	WRITE_ASS(read_format,   "llu");
+	__WRITE_ASS(config,        "llu", (unsigned long long)attr->config);
+	__WRITE_ASS(sample_period, "llu", (unsigned long long)attr->sample_period);
+	__WRITE_ASS(sample_type,   "llu", (unsigned long long)attr->sample_type);
+	__WRITE_ASS(read_format,   "llu", (unsigned long long)attr->read_format);
 	WRITE_ASS(disabled,       "d");
 	WRITE_ASS(inherit,        "d");
 	WRITE_ASS(pinned,         "d");
@@ -124,10 +124,10 @@
 	WRITE_ASS(exclude_callchain_user, "d");
 	WRITE_ASS(wakeup_events, PRIu32);
 	WRITE_ASS(bp_type, PRIu32);
-	WRITE_ASS(config1, "llu");
-	WRITE_ASS(config2, "llu");
-	WRITE_ASS(branch_sample_type, "llu");
-	WRITE_ASS(sample_regs_user,   "llu");
+	__WRITE_ASS(config1,            "llu", (unsigned long long)attr->config1);
+	__WRITE_ASS(config2,            "llu", (unsigned long long)attr->config2);
+	__WRITE_ASS(branch_sample_type, "llu", (unsigned long long)attr->branch_sample_type);
+	__WRITE_ASS(sample_regs_user,   "llu", (unsigned long long)attr->sample_regs_user);
 	WRITE_ASS(sample_stack_user,  PRIu32);
 
 	fclose(file);
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index 68daa28..bf3a094 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -74,7 +74,7 @@
 
 	fd = sys_perf_event_open(&pe, 0, -1, -1, 0);
 	if (fd < 0) {
-		pr_debug("failed opening event %llx\n", pe.config);
+		pr_debug("failed opening event %llx\n", (unsigned long long)pe.config);
 		return TEST_FAIL;
 	}
 
diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c
index fe7ed28..3662b15 100644
--- a/tools/perf/tests/bp_signal_overflow.c
+++ b/tools/perf/tests/bp_signal_overflow.c
@@ -87,7 +87,7 @@
 
 	fd = sys_perf_event_open(&pe, 0, -1, -1, 0);
 	if (fd < 0) {
-		pr_debug("failed opening event %llx\n", pe.config);
+		pr_debug("failed opening event %llx\n", (unsigned long long)pe.config);
 		return TEST_FAIL;
 	}
 
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index d102716..c0ec9f5 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -898,7 +898,7 @@
 	snprintf(command, sizeof(command),
 		 "%s %s%s --start-address=0x%016" PRIx64
 		 " --stop-address=0x%016" PRIx64
-		 " -d %s %s -C %s|grep -v %s|expand",
+		 " -d %s %s -C %s|grep -v %s|sed 's/\t/        /g'",
 		 objdump_path ? objdump_path : "objdump",
 		 disassembler_style ? "-M " : "",
 		 disassembler_style ? disassembler_style : "",
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
index 96b919d..d0c6cd6 100644
--- a/tools/perf/util/include/linux/compiler.h
+++ b/tools/perf/util/include/linux/compiler.h
@@ -4,6 +4,7 @@
 #ifndef __always_inline
 #define __always_inline	inline
 #endif
+#undef __user
 #define __user
 #ifndef __attribute_const__
 #define __attribute_const__
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index e87aa5d..0aa746f 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -19,6 +19,10 @@
  *
  */
 
+#ifdef __mips__
+#include <sgidefs.h>
+#endif
+
 #include <Python.h>
 
 #include <stdio.h>
diff --git a/usr/Makefile b/usr/Makefile
index 029ffe6..ddb506b 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -35,7 +35,7 @@
 # Generate the initramfs cpio archive
 
 hostprogs-y := gen_init_cpio
-initramfs   := $(CONFIG_SHELL) $(srctree)/scripts/gen_initramfs_list.sh
+initramfs   := $(INITRAMFS_WRAPPER) $(CONFIG_SHELL) $(srctree)/scripts/gen_initramfs_list.sh
 ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \
 			$(shell echo $(CONFIG_INITRAMFS_SOURCE)),-d)
 ramfs-args  := \